summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/removed/sysfs-bus-nfit17
-rw-r--r--Documentation/ABI/testing/evm13
-rw-r--r--Documentation/ABI/testing/ima_policy2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio9
-rw-r--r--Documentation/ABI/testing/sysfs-bus-nfit19
-rw-r--r--Documentation/ABI/testing/sysfs-class-mtd8
-rw-r--r--Documentation/PCI/pci-error-recovery.txt35
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst72
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt19
-rw-r--r--Documentation/auxdisplay/lcd-panel-cgram.txt (renamed from Documentation/misc-devices/lcd-panel-cgram.txt)0
-rw-r--r--Documentation/blockdev/zram.txt25
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,g3dsys.txt30
-rw-r--r--Documentation/devicetree/bindings/arm/ux500/boards.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/actions,s900-cmu.txt47
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt26
-rw-r--r--Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.txt100
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt22
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,videocc.txt19
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt10
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip.txt77
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi-ccu.txt3
-rw-r--r--Documentation/devicetree/bindings/dma/k3dma.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt1
-rw-r--r--Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt4
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca953x.txt34
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt3
-rw-r--r--Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt9
-rw-r--r--Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/mcp320x.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt (renamed from Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt)0
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt6
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt7
-rw-r--r--Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt26
-rw-r--r--Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt41
-rw-r--r--Documentation/devicetree/bindings/iio/afe/voltage-divider.txt53
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ltc2632.txt14
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ti,dac5571.txt24
-rw-r--r--Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt12
-rw-r--r--Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt9
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt2
-rw-r--r--Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt47
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt (renamed from Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt)0
-rw-r--r--Documentation/devicetree/bindings/mfd/axp20x.txt3
-rw-r--r--Documentation/devicetree/bindings/mfd/bd9571mwv.txt21
-rw-r--r--Documentation/devicetree/bindings/mips/lantiq/rcu.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmi-nand.txt5
-rw-r--r--Documentation/devicetree/bindings/mtd/ibm,ndfc.txt (renamed from Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt)0
-rw-r--r--Documentation/devicetree/bindings/mtd/mtk-nand.txt28
-rw-r--r--Documentation/devicetree/bindings/mtd/partition.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/brcm,bcm947xx-cfe-partitions.txt42
-rw-r--r--Documentation/devicetree/bindings/mtd/sunxi-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/net/ibm,emac.txt (renamed from Documentation/devicetree/bindings/powerpc/4xx/emac.txt)0
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt24
-rw-r--r--Documentation/devicetree/bindings/pci/mobiveil-pcie.txt73
-rw-r--r--Documentation/devicetree/bindings/pci/pci-armada8k.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci.txt6
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt62
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-pcie-host.txt (renamed from Documentation/devicetree/bindings/pci/rockchip-pcie.txt)0
-rw-r--r--Documentation/devicetree/bindings/pci/xgene-pci.txt7
-rw-r--r--Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt16
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt18
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt10
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq27xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/pps/pps-gpio.txt1
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt6
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt45
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt10
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt126
-rw-r--r--Documentation/devicetree/bindings/regulator/sy8106a-regulator.txt23
-rw-r--r--Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt (renamed from Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt)0
-rw-r--r--Documentation/devicetree/bindings/rng/sparc_sun_oracle_rng.txt (renamed from Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt)0
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt37
-rw-r--r--Documentation/devicetree/bindings/submitting-patches.txt9
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt6
-rw-r--r--Documentation/devicetree/bindings/timer/altr,timer-1.0.txt (renamed from Documentation/devicetree/bindings/nios2/timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/arm,arch_timer.txt (renamed from Documentation/devicetree/bindings/arm/arch_timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/arm,armv7m-systick.txt (renamed from Documentation/devicetree/bindings/arm/armv7m_systick.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/arm,global_timer.txt (renamed from Documentation/devicetree/bindings/arm/global_timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/arm,twd.txt (renamed from Documentation/devicetree/bindings/arm/twd.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,gtm.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/gtm.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/mrvl,mmp-timer.txt (renamed from Documentation/devicetree/bindings/arm/mrvl/timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/qcom,msm-timer.txt (renamed from Documentation/devicetree/bindings/arm/msm/timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/st,spear-timer.txt (renamed from Documentation/devicetree/bindings/arm/spear-timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt (renamed from Documentation/devicetree/bindings/c6x/timer64.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/ti,timer.txt (renamed from Documentation/devicetree/bindings/arm/omap/timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/via,vt8500-timer.txt (renamed from Documentation/devicetree/bindings/arm/vt8500/via,vt8500-timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt7
-rw-r--r--Documentation/driver-api/clk.rst2
-rw-r--r--Documentation/driver-api/gpio/board.rst16
-rw-r--r--Documentation/driver-api/gpio/drivers-on-gpio.rst4
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/scsi.rst2
-rw-r--r--Documentation/driver-api/target.rst64
-rw-r--r--Documentation/features/vm/pte_special/arch-support.txt2
-rw-r--r--Documentation/filesystems/00-INDEX6
-rw-r--r--Documentation/filesystems/autofs-mount-control.txt (renamed from Documentation/filesystems/autofs4-mount-control.txt)9
-rw-r--r--Documentation/filesystems/autofs.txt (renamed from Documentation/filesystems/autofs4.txt)10
-rw-r--r--Documentation/filesystems/automount-support.txt2
-rw-r--r--Documentation/filesystems/ncpfs.txt12
-rw-r--r--Documentation/filesystems/path-lookup.md2
-rw-r--r--Documentation/ioctl/ioctl-number.txt2
-rw-r--r--Documentation/livepatch/livepatch.txt24
-rw-r--r--MAINTAINERS109
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi2
-rw-r--r--arch/arm/common/mcpm_entry.c2
-rw-r--r--arch/arm/include/asm/kvm_host.h12
-rw-r--r--arch/arm/include/asm/kvm_mmu.h5
-rw-r--r--arch/arm/include/asm/pgtable-3level.h1
-rw-r--r--arch/arm/kernel/entry-common.S25
-rw-r--r--arch/arm/kernel/perf_event_v6.c4
-rw-r--r--arch/arm/kernel/perf_event_v7.c3
-rw-r--r--arch/arm/kernel/perf_event_xscale.c6
-rw-r--r--arch/arm/kernel/signal.c14
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c28
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm/xen/enlighten.c11
-rw-r--r--arch/arm64/Kconfig16
-rw-r--r--arch/arm64/configs/defconfig3
-rw-r--r--arch/arm64/include/asm/acpi.h4
-rw-r--r--arch/arm64/include/asm/cache.h6
-rw-r--r--arch/arm64/include/asm/cmpxchg.h4
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h22
-rw-r--r--arch/arm64/include/asm/elf.h13
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h12
-rw-r--r--arch/arm64/include/asm/kvm_asm.h30
-rw-r--r--arch/arm64/include/asm/kvm_host.h26
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h25
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/include/asm/processor.h5
-rw-r--r--arch/arm64/include/asm/thread_info.h1
-rw-r--r--arch/arm64/include/asm/topology.h6
-rw-r--r--arch/arm64/include/uapi/asm/auxvec.h3
-rw-r--r--arch/arm64/kernel/Makefile1
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c3
-rw-r--r--arch/arm64/kernel/asm-offsets.c1
-rw-r--r--arch/arm64/kernel/cacheinfo.c15
-rw-r--r--arch/arm64/kernel/cpu_errata.c182
-rw-r--r--arch/arm64/kernel/cpufeature.c10
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S2
-rw-r--r--arch/arm64/kernel/entry.S30
-rw-r--r--arch/arm64/kernel/fpsimd.c18
-rw-r--r--arch/arm64/kernel/hibernate.c11
-rw-r--r--arch/arm64/kernel/perf_event.c3
-rw-r--r--arch/arm64/kernel/ptrace.c5
-rw-r--r--arch/arm64/kernel/signal.c57
-rw-r--r--arch/arm64/kernel/ssbd.c110
-rw-r--r--arch/arm64/kernel/suspend.c8
-rw-r--r--arch/arm64/kernel/topology.c104
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S20
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S38
-rw-r--r--arch/arm64/kvm/hyp/switch.c42
-rw-r--r--arch/arm64/kvm/reset.c4
-rw-r--r--arch/arm64/mm/dma-mapping.c5
-rw-r--r--arch/arm64/mm/fault.c46
-rw-r--r--arch/microblaze/include/asm/pci.h4
-rw-r--r--arch/microblaze/pci/pci-common.c61
-rw-r--r--arch/mips/pci/pci-legacy.c8
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/pte-common.h3
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/entry_32.S7
-rw-r--r--arch/powerpc/kernel/entry_64.S8
-rw-r--r--arch/powerpc/kernel/signal.c3
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h3
-rw-r--r--arch/riscv/kernel/cacheinfo.c1
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/pgtable.h1
-rw-r--r--arch/s390/mm/pgalloc.c21
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/include/asm/pgtable.h2
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/pgtable_64.h3
-rw-r--r--arch/sparc/kernel/leon_pci.c62
-rw-r--r--arch/sparc/kernel/pci.c136
-rw-r--r--arch/sparc/kernel/pci_common.c31
-rw-r--r--arch/sparc/kernel/pci_msi.c10
-rw-r--r--arch/sparc/kernel/pcic.c94
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/Kconfig.debug3
-rw-r--r--arch/x86/boot/compressed/kaslr_64.c5
-rw-r--r--arch/x86/entry/common.c3
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/mcsafe_test.h75
-rw-r--r--arch/x86/include/asm/page_types.h8
-rw-r--r--arch/x86/include/asm/pgtable_64.h4
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h2
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/apic/vector.c45
-rw-r--r--arch/x86/kernel/cpu/bugs.c13
-rw-r--r--arch/x86/kernel/cpu/common.c9
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c6
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c33
-rw-r--r--arch/x86/kernel/i8237.c25
-rw-r--r--arch/x86/kernel/idt.c7
-rw-r--r--arch/x86/kernel/platform-quirks.c7
-rw-r--r--arch/x86/kernel/signal.c6
-rw-r--r--arch/x86/kvm/cpuid.c10
-rw-r--r--arch/x86/kvm/svm.c8
-rw-r--r--arch/x86/lib/memcpy_64.S10
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c3
-rw-r--r--arch/x86/mm/pgtable.c10
-rw-r--r--arch/x86/pci/early.c19
-rw-r--r--arch/x86/pci/fixup.c4
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/platform/uv/uv_irq.c7
-rw-r--r--arch/x86/xen/mmu.c60
-rw-r--r--arch/x86/xen/xen-pvh.S47
-rw-r--r--arch/xtensa/include/asm/pci.h2
-rw-r--r--arch/xtensa/kernel/pci.c69
-rw-r--r--block/bio.c18
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-flush.c11
-rw-r--r--block/ioprio.c22
-rw-r--r--block/partitions/cmdline.c57
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/nfit/core.c11
-rw-r--r--drivers/acpi/pci_root.c17
-rw-r--r--drivers/acpi/pptt.c655
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/ata/libata-scsi.c12
-rw-r--r--drivers/auxdisplay/Kconfig16
-rw-r--r--drivers/auxdisplay/arm-charlcd.c4
-rw-r--r--drivers/auxdisplay/cfag12864b.c16
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c20
-rw-r--r--drivers/auxdisplay/charlcd.c119
-rw-r--r--drivers/auxdisplay/hd44780.c6
-rw-r--r--drivers/auxdisplay/ht16k33.c10
-rw-r--r--drivers/auxdisplay/ks0108.c16
-rw-r--r--drivers/auxdisplay/panel.c6
-rw-r--r--drivers/base/cacheinfo.c157
-rw-r--r--drivers/block/loop.c71
-rw-r--r--drivers/block/nbd.c13
-rw-r--r--drivers/block/zram/Kconfig14
-rw-r--r--drivers/block/zram/zram_drv.c165
-rw-r--r--drivers/block/zram/zram_drv.h14
-rw-r--r--drivers/char/Kconfig12
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/adi.c239
-rw-r--r--drivers/char/tpm/Makefile10
-rw-r--r--drivers/char/tpm/eventlog/acpi.c (renamed from drivers/char/tpm/tpm_eventlog_acpi.c)3
-rw-r--r--drivers/char/tpm/eventlog/common.c195
-rw-r--r--drivers/char/tpm/eventlog/common.h35
-rw-r--r--drivers/char/tpm/eventlog/efi.c (renamed from drivers/char/tpm/tpm_eventlog_efi.c)6
-rw-r--r--drivers/char/tpm/eventlog/of.c (renamed from drivers/char/tpm/tpm_eventlog_of.c)11
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c (renamed from drivers/char/tpm/tpm1_eventlog.c)200
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c (renamed from drivers/char/tpm/tpm2_eventlog.c)3
-rw-r--r--drivers/char/tpm/st33zp24/spi.c4
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c2
-rw-r--r--drivers/char/tpm/tpm-dev-common.c40
-rw-r--r--drivers/char/tpm/tpm-dev.h2
-rw-r--r--drivers/char/tpm/tpm-interface.c5
-rw-r--r--drivers/char/tpm/tpm.h32
-rw-r--r--drivers/char/tpm/tpm2-space.c3
-rw-r--r--drivers/char/tpm/tpm_crb.c10
-rw-r--r--drivers/char/tpm/tpm_tis_core.c58
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/actions/Kconfig14
-rw-r--r--drivers/clk/actions/Makefile12
-rw-r--r--drivers/clk/actions/owl-common.c89
-rw-r--r--drivers/clk/actions/owl-common.h41
-rw-r--r--drivers/clk/actions/owl-composite.c199
-rw-r--r--drivers/clk/actions/owl-composite.h124
-rw-r--r--drivers/clk/actions/owl-divider.c94
-rw-r--r--drivers/clk/actions/owl-divider.h75
-rw-r--r--drivers/clk/actions/owl-factor.c222
-rw-r--r--drivers/clk/actions/owl-factor.h83
-rw-r--r--drivers/clk/actions/owl-fixed-factor.h28
-rw-r--r--drivers/clk/actions/owl-gate.c77
-rw-r--r--drivers/clk/actions/owl-gate.h73
-rw-r--r--drivers/clk/actions/owl-mux.c60
-rw-r--r--drivers/clk/actions/owl-mux.h61
-rw-r--r--drivers/clk/actions/owl-pll.c194
-rw-r--r--drivers/clk/actions/owl-pll.h92
-rw-r--r--drivers/clk/actions/owl-s900.c721
-rw-r--r--drivers/clk/at91/clk-pll.c13
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c42
-rw-r--r--drivers/clk/bcm/clk-sr.c135
-rw-r--r--drivers/clk/berlin/berlin2-avpll.c13
-rw-r--r--drivers/clk/berlin/berlin2-avpll.h13
-rw-r--r--drivers/clk/berlin/berlin2-div.c13
-rw-r--r--drivers/clk/berlin/berlin2-div.h13
-rw-r--r--drivers/clk/berlin/berlin2-pll.c13
-rw-r--r--drivers/clk/berlin/berlin2-pll.h13
-rw-r--r--drivers/clk/berlin/bg2.c13
-rw-r--r--drivers/clk/berlin/bg2q.c13
-rw-r--r--drivers/clk/berlin/common.h13
-rw-r--r--drivers/clk/clk-aspeed.c57
-rw-r--r--drivers/clk/clk-bulk.c5
-rw-r--r--drivers/clk/clk-npcm7xx.c656
-rw-r--r--drivers/clk/clk-si544.c1
-rw-r--r--drivers/clk/clk-stm32mp1.c12
-rw-r--r--drivers/clk/clk.c171
-rw-r--r--drivers/clk/davinci/pll-da830.c5
-rw-r--r--drivers/clk/davinci/pll-da850.c37
-rw-r--r--drivers/clk/davinci/pll-dm355.c22
-rw-r--r--drivers/clk/davinci/pll-dm365.c9
-rw-r--r--drivers/clk/davinci/pll-dm644x.c9
-rw-r--r--drivers/clk/davinci/pll-dm646x.c11
-rw-r--r--drivers/clk/davinci/pll.c314
-rw-r--r--drivers/clk/davinci/pll.h41
-rw-r--r--drivers/clk/davinci/psc-da830.c3
-rw-r--r--drivers/clk/davinci/psc-dm355.c7
-rw-r--r--drivers/clk/davinci/psc-dm365.c22
-rw-r--r--drivers/clk/davinci/psc-dm644x.c3
-rw-r--r--drivers/clk/davinci/psc-dm646x.c3
-rw-r--r--drivers/clk/davinci/psc.c72
-rw-r--r--drivers/clk/davinci/psc.h12
-rw-r--r--drivers/clk/hisilicon/Kconfig13
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c17
-rw-r--r--drivers/clk/imx/clk-imx6q.c2
-rw-r--r--drivers/clk/imx/clk-imx6sl.c2
-rw-r--r--drivers/clk/imx/clk-imx6sx.c17
-rw-r--r--drivers/clk/imx/clk-imx6ul.c20
-rw-r--r--drivers/clk/imx/clk-imx7d.c17
-rw-r--r--drivers/clk/ingenic/cgu.c8
-rw-r--r--drivers/clk/ingenic/cgu.h4
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c49
-rw-r--r--drivers/clk/mediatek/Kconfig6
-rw-r--r--drivers/clk/mediatek/Makefile1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-g3d.c95
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c8
-rw-r--r--drivers/clk/meson/Kconfig8
-rw-r--r--drivers/clk/meson/Makefile3
-rw-r--r--drivers/clk/meson/axg-aoclk.c164
-rw-r--r--drivers/clk/meson/axg-aoclk.h29
-rw-r--r--drivers/clk/meson/axg.c4
-rw-r--r--drivers/clk/meson/clk-audio-divider.c13
-rw-r--r--drivers/clk/meson/clk-mpll.c76
-rw-r--r--drivers/clk/meson/clk-pll.c13
-rw-r--r--drivers/clk/meson/clk-regmap.c6
-rw-r--r--drivers/clk/meson/clk-regmap.h8
-rw-r--r--drivers/clk/meson/clkc.h16
-rw-r--r--drivers/clk/meson/gxbb-aoclk-32k.c3
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c147
-rw-r--r--drivers/clk/meson/gxbb-aoclk.h8
-rw-r--r--drivers/clk/meson/gxbb.c129
-rw-r--r--drivers/clk/meson/gxbb.h58
-rw-r--r--drivers/clk/meson/meson-aoclk.c81
-rw-r--r--drivers/clk/meson/meson-aoclk.h34
-rw-r--r--drivers/clk/meson/meson8b.c77
-rw-r--r--drivers/clk/meson/meson8b.h18
-rw-r--r--drivers/clk/mvebu/clk-corediv.c2
-rw-r--r--drivers/clk/qcom/Kconfig27
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c302
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h18
-rw-r--r--drivers/clk/qcom/clk-branch.c7
-rw-r--r--drivers/clk/qcom/clk-branch.h1
-rw-r--r--drivers/clk/qcom/clk-rcg.h17
-rw-r--r--drivers/clk/qcom/clk-rcg2.c168
-rw-r--r--drivers/clk/qcom/common.c32
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c8
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c2834
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c3465
-rw-r--r--drivers/clk/qcom/gdsc.c102
-rw-r--r--drivers/clk/qcom/gdsc.h6
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c22
-rw-r--r--drivers/clk/qcom/videocc-sdm845.c358
-rw-r--r--drivers/clk/renesas/Kconfig10
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/r8a7743-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7745-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77470-cpg-mssr.c229
-rw-r--r--drivers/clk/renesas/r8a7791-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7792-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7794-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c289
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c24
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h2
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-rockchip.c98
-rw-r--r--drivers/clk/rockchip/clk.c16
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c6
-rw-r--r--drivers/clk/socfpga/clk-s10.c40
-rw-r--r--drivers/clk/spear/spear6xx_clock.c2
-rw-r--r--drivers/clk/sunxi-ng/Kconfig5
-rw-r--r--drivers/clk/sunxi-ng/Makefile1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c207
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h19
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c72
-rw-r--r--drivers/clk/tegra/clk-dfll.c42
-rw-r--r--drivers/clk/tegra/clk-tegra114.c2
-rw-r--r--drivers/clk/tegra/clk-tegra124.c2
-rw-r--r--drivers/clk/tegra/clk-tegra20.c52
-rw-r--r--drivers/clk/tegra/clk-tegra210.c2
-rw-r--r--drivers/clk/tegra/clk-tegra30.c2
-rw-r--r--drivers/clk/tegra/clk.c5
-rw-r--r--drivers/clk/tegra/clk.h2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c5
-rw-r--r--drivers/clocksource/timer-sp804.c3
-rw-r--r--drivers/dax/super.c33
-rw-r--r--drivers/dma/Kconfig31
-rw-r--r--drivers/dma/Makefile5
-rw-r--r--drivers/dma/at_hdmac.c9
-rw-r--r--drivers/dma/at_xdmac.c9
-rw-r--r--drivers/dma/dma-axi-dmac.c2
-rw-r--r--drivers/dma/dmatest.c16
-rw-r--r--drivers/dma/dw/platform.c6
-rw-r--r--drivers/dma/fsldma.c6
-rw-r--r--drivers/dma/idma64.c6
-rw-r--r--drivers/dma/imx-dma.c26
-rw-r--r--drivers/dma/imx-sdma.c29
-rw-r--r--drivers/dma/mxs-dma.c14
-rw-r--r--drivers/dma/pl330.c209
-rw-r--r--drivers/dma/qcom/bam_dma.c10
-rw-r--r--drivers/dma/qcom/hidma.c3
-rw-r--r--drivers/dma/qcom/hidma_mgmt_sys.c6
-rw-r--r--drivers/dma/sh/shdmac.c50
-rw-r--r--drivers/dma/sprd-dma.c349
-rw-r--r--drivers/dma/ste_dma40.c12
-rw-r--r--drivers/dma/stm32-mdma.c100
-rw-r--r--drivers/dma/ti/Kconfig37
-rw-r--r--drivers/dma/ti/Makefile5
-rw-r--r--drivers/dma/ti/cppi41.c (renamed from drivers/dma/cppi41.c)2
-rw-r--r--drivers/dma/ti/dma-crossbar.c (renamed from drivers/dma/ti-dma-crossbar.c)0
-rw-r--r--drivers/dma/ti/edma.c (renamed from drivers/dma/edma.c)4
-rw-r--r--drivers/dma/ti/omap-dma.c (renamed from drivers/dma/omap-dma.c)2
-rw-r--r--drivers/dma/txx9dmac.c8
-rw-r--r--drivers/gpio/Kconfig12
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c2
-rw-r--r--drivers/gpio/gpio-104-idi-48.c2
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c9
-rw-r--r--drivers/gpio/gpio-aspeed.c24
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-dwapb.c57
-rw-r--r--drivers/gpio/gpio-eic-sprd.c73
-rw-r--r--drivers/gpio/gpio-ge.c4
-rw-r--r--drivers/gpio/gpio-gpio-mm.c2
-rw-r--r--drivers/gpio/gpio-ingenic.c4
-rw-r--r--drivers/gpio/gpio-loongson.c116
-rw-r--r--drivers/gpio/gpio-lp3943.c2
-rw-r--r--drivers/gpio/gpio-lp873x.c2
-rw-r--r--drivers/gpio/gpio-lpc32xx.c3
-rw-r--r--drivers/gpio/gpio-lynxpoint.c2
-rw-r--r--drivers/gpio/gpio-max730x.c2
-rw-r--r--drivers/gpio/gpio-mc33880.c2
-rw-r--r--drivers/gpio/gpio-mc9s08dz60.c2
-rw-r--r--drivers/gpio/gpio-ml-ioh.c2
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c2
-rw-r--r--drivers/gpio/gpio-mockup.c7
-rw-r--r--drivers/gpio/gpio-msic.c2
-rw-r--r--drivers/gpio/gpio-mvebu.c20
-rw-r--r--drivers/gpio/gpio-mxc.c54
-rw-r--r--drivers/gpio/gpio-mxs.c32
-rw-r--r--drivers/gpio/gpio-octeon.c2
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-palmas.c8
-rw-r--r--drivers/gpio/gpio-pca953x.c81
-rw-r--r--drivers/gpio/gpio-pcf857x.c2
-rw-r--r--drivers/gpio/gpio-pch.c2
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c58
-rw-r--r--drivers/gpio/gpio-pxa.c10
-rw-r--r--drivers/gpio/gpio-stmpe.c9
-rw-r--r--drivers/gpio/gpio-syscon.c9
-rw-r--r--drivers/gpio/gpio-ts4900.c7
-rw-r--r--drivers/gpio/gpio-vf610.c4
-rw-r--r--drivers/gpio/gpio-xlp.c9
-rw-r--r--drivers/gpio/gpio-xra1403.c15
-rw-r--r--drivers/gpio/gpio-zynq.c14
-rw-r--r--drivers/gpio/gpiolib-of.c7
-rw-r--r--drivers/gpio/gpiolib.c153
-rw-r--r--drivers/gpio/gpiolib.h2
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile7
-rw-r--r--drivers/hid/Kconfig16
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-alps.c30
-rw-r--r--drivers/hid/hid-core.c21
-rw-r--r--drivers/hid/hid-generic.c15
-rw-r--r--drivers/hid/hid-gfrm.c2
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-input.c123
-rw-r--r--drivers/hid/hid-magicmouse.c6
-rw-r--r--drivers/hid/hid-multitouch.c232
-rw-r--r--drivers/hid/hid-plantronics.c6
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-redragon.c86
-rw-r--r--drivers/hid/hid-rmi.c20
-rw-r--r--drivers/hid/hid-steam.c1115
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c33
-rw-r--r--drivers/hid/wacom_wac.c74
-rw-r--r--drivers/hsi/clients/cmt_speech.c2
-rw-r--r--drivers/hv/ring_buffer.c2
-rw-r--r--drivers/ide/alim15x3.c2
-rw-r--r--drivers/ide/ide-io.c4
-rw-r--r--drivers/ide/ide-iops.c13
-rw-r--r--drivers/ide/ide-taskfile.c10
-rw-r--r--drivers/iio/Kconfig2
-rw-r--r--drivers/iio/Makefile2
-rw-r--r--drivers/iio/accel/Kconfig24
-rw-r--r--drivers/iio/accel/Makefile2
-rw-r--r--drivers/iio/accel/adis16201.c321
-rw-r--r--drivers/iio/accel/adis16209.c (renamed from drivers/staging/iio/accel/adis16209.c)52
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c23
-rw-r--r--drivers/iio/accel/mma8452.c24
-rw-r--r--drivers/iio/accel/sca3000.c13
-rw-r--r--drivers/iio/accel/st_accel_i2c.c1
-rw-r--r--drivers/iio/adc/ad7791.c87
-rw-r--r--drivers/iio/adc/imx7d_adc.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c83
-rw-r--r--drivers/iio/adc/stm32-adc-core.c66
-rw-r--r--drivers/iio/adc/stm32-adc.c47
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c17
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c26
-rw-r--r--drivers/iio/adc/stx104.c11
-rw-r--r--drivers/iio/adc/ti-ads8688.c48
-rw-r--r--drivers/iio/afe/Kconfig19
-rw-r--r--drivers/iio/afe/Makefile6
-rw-r--r--drivers/iio/afe/iio-rescale.c359
-rw-r--r--drivers/iio/amplifiers/ad8366.c6
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c14
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c3
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c9
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c9
-rw-r--r--drivers/iio/dac/Kconfig33
-rw-r--r--drivers/iio/dac/Makefile3
-rw-r--r--drivers/iio/dac/ad5686-spi.c119
-rw-r--r--drivers/iio/dac/ad5686.c428
-rw-r--r--drivers/iio/dac/ad5686.h145
-rw-r--r--drivers/iio/dac/ad5696-i2c.c102
-rw-r--r--drivers/iio/dac/ltc2632.c89
-rw-r--r--drivers/iio/dac/ti-dac5571.c439
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c284
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c86
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h22
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c47
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c12
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c52
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c10
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c7
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c5
-rw-r--r--drivers/iio/light/Kconfig8
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c3
-rw-r--r--drivers/iio/light/tsl2583.c2
-rw-r--r--drivers/iio/light/tsl2772.c1800
-rw-r--r--drivers/iio/magnetometer/mag3110.c158
-rw-r--r--drivers/iio/potentiometer/mcp4018.c41
-rw-r--r--drivers/iio/potentiometer/mcp4531.c145
-rw-r--r--drivers/iio/potentiostat/lmp91000.c2
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c3
-rw-r--r--drivers/iio/resolver/Kconfig17
-rw-r--r--drivers/iio/resolver/Makefile5
-rw-r--r--drivers/iio/resolver/ad2s1200.c (renamed from drivers/staging/iio/resolver/ad2s1200.c)167
-rw-r--r--drivers/infiniband/Kconfig11
-rw-r--r--drivers/infiniband/core/Makefile9
-rw-r--r--drivers/infiniband/core/addr.c172
-rw-r--r--drivers/infiniband/core/cache.c112
-rw-r--r--drivers/infiniband/core/cm.c62
-rw-r--r--drivers/infiniband/core/cma.c36
-rw-r--r--drivers/infiniband/core/core_priv.h3
-rw-r--r--drivers/infiniband/core/device.c4
-rw-r--r--drivers/infiniband/core/mad.c12
-rw-r--r--drivers/infiniband/core/nldev.c122
-rw-r--r--drivers/infiniband/core/restrack.c9
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c2
-rw-r--r--drivers/infiniband/core/security.c4
-rw-r--r--drivers/infiniband/core/ucma.c6
-rw-r--r--drivers/infiniband/core/umem.c13
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c111
-rw-r--r--drivers/infiniband/core/uverbs_main.c42
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c3
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c157
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c23
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c4
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig1
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h5
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/restrack.c501
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile10
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c497
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h10
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c82
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h15
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h13
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c292
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h102
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c53
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c39
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.h24
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c375
-rw-r--r--drivers/infiniband/hw/hfi1/fault.h109
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c8
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h61
-rw-r--r--drivers/infiniband/hw/hfi1/init.c47
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c37
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c32
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c44
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c10
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c154
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c12
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c43
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h3
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h160
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c61
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c11
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c45
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c188
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c76
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c67
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c34
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c60
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c50
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c57
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c511
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h36
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c43
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib.h4
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c154
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c20
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig2
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c74
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.h6
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c151
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_cq.h35
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c35
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c20
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c93
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c12
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h6
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c21
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c37
-rw-r--r--drivers/iommu/Kconfig5
-rw-r--r--drivers/iommu/amd_iommu.c82
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/dmar.c8
-rw-r--r--drivers/iommu/intel-iommu.c104
-rw-r--r--drivers/iommu/intel-svm.c2
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c5
-rw-r--r--drivers/iommu/io-pgtable-arm.c18
-rw-r--r--drivers/iommu/iommu.c7
-rw-r--r--drivers/iommu/irq_remapping.c5
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--drivers/iommu/qcom_iommu.c6
-rw-r--r--drivers/iommu/tegra-gart.c15
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c3
-rw-r--r--drivers/irqchip/irq-stm32-exti.c4
-rw-r--r--drivers/lightnvm/pblk-gc.c3
-rw-r--r--drivers/lightnvm/pblk-init.c2
-rw-r--r--drivers/mailbox/Kconfig22
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c2
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c2
-rw-r--r--drivers/mailbox/stm32-ipcc.c402
-rw-r--r--drivers/md/dm-linear.c16
-rw-r--r--drivers/md/dm-log-writes.c15
-rw-r--r--drivers/md/dm-stripe.c21
-rw-r--r--drivers/md/dm.c47
-rw-r--r--drivers/md/md.c212
-rw-r--r--drivers/md/md.h22
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c10
-rw-r--r--drivers/md/raid5.c12
-rw-r--r--drivers/md/raid5.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h4
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/mfd/bd9571mwv.c2
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c3
-rw-r--r--drivers/misc/pci_endpoint_test.c29
-rw-r--r--drivers/mtd/bcm47xxpart.c29
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c62
-rw-r--r--drivers/mtd/chips/cfi_probe.c42
-rw-r--r--drivers/mtd/cmdlinepart.c5
-rw-r--r--drivers/mtd/devices/docg3.c3
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/inftlmount.c23
-rw-r--r--drivers/mtd/maps/pismo.c1
-rw-r--r--drivers/mtd/mtdcore.c24
-rw-r--r--drivers/mtd/mtdcore.h1
-rw-r--r--drivers/mtd/mtdpart.c44
-rw-r--r--drivers/mtd/nand/onenand/samsung.c6
-rw-r--r--drivers/mtd/nand/raw/Kconfig8
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c25
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c13
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c29
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c27
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c54
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c188
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h25
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c35
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c38
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c26
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c7
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c10
-rw-r--r--drivers/mtd/nand/raw/nand_base.c89
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c91
-rw-r--r--drivers/mtd/nftlmount.c23
-rw-r--r--drivers/mtd/spi-nor/Kconfig6
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c96
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c15
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c12
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c80
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c33
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c2
-rw-r--r--drivers/mtd/ubi/build.c4
-rw-r--r--drivers/mtd/ubi/eba.c111
-rw-r--r--drivers/mtd/ubi/fastmap.c20
-rw-r--r--drivers/mtd/ubi/ubi-media.h22
-rw-r--r--drivers/mtd/ubi/ubi.h11
-rw-r--r--drivers/mtd/ubi/vmt.c1
-rw-r--r--drivers/mtd/ubi/vtbl.c16
-rw-r--r--drivers/mtd/ubi/wl.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c75
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c7
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c18
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nvdimm/bus.c19
-rw-r--r--drivers/nvdimm/e820.c41
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c46
-rw-r--r--drivers/nvdimm/region_devs.c3
-rw-r--r--drivers/nvme/host/core.c4
-rw-r--r--drivers/nvme/host/fabrics.c3
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c55
-rw-r--r--drivers/nvme/host/rdma.c5
-rw-r--r--drivers/nvme/target/admin-cmd.c15
-rw-r--r--drivers/nvme/target/configfs.c14
-rw-r--r--drivers/of/of_numa.c1
-rw-r--r--drivers/of/platform.c16
-rw-r--r--drivers/of/resolver.c5
-rw-r--r--drivers/of/unittest.c8
-rw-r--r--drivers/pci/Kconfig12
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/ats.c3
-rw-r--r--drivers/pci/dwc/Kconfig88
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c19
-rw-r--r--drivers/pci/dwc/pci-imx6.c2
-rw-r--r--drivers/pci/dwc/pci-keystone.c2
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c21
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c6
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c19
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c80
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c155
-rw-r--r--drivers/pci/dwc/pcie-designware.c22
-rw-r--r--drivers/pci/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/dwc/pcie-qcom.c13
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c35
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c23
-rw-r--r--drivers/pci/host/Kconfig55
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-aardvark.c7
-rw-r--r--drivers/pci/host/pci-ftpci100.c6
-rw-r--r--drivers/pci/host/pci-host-common.c13
-rw-r--r--drivers/pci/host/pci-host-generic.c1
-rw-r--r--drivers/pci/host/pci-hyperv.c162
-rw-r--r--drivers/pci/host/pci-mvebu.c2
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c2
-rw-r--r--drivers/pci/host/pci-v3-semi.c5
-rw-r--r--drivers/pci/host/pci-versatile.c5
-rw-r--r--drivers/pci/host/pci-xgene.c5
-rw-r--r--drivers/pci/host/pcie-altera.c7
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c5
-rw-r--r--drivers/pci/host/pcie-mediatek.c236
-rw-r--r--drivers/pci/host/pcie-mobiveil.c866
-rw-r--r--drivers/pci/host/pcie-rcar.c284
-rw-r--r--drivers/pci/host/pcie-rockchip-ep.c642
-rw-r--r--drivers/pci/host/pcie-rockchip-host.c1142
-rw-r--r--drivers/pci/host/pcie-rockchip.c1580
-rw-r--r--drivers/pci/host/pcie-rockchip.h338
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c6
-rw-r--r--drivers/pci/host/pcie-xilinx.c6
-rw-r--r--drivers/pci/host/vmd.c91
-rw-r--r--drivers/pci/hotplug/Kconfig5
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c45
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c84
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c84
-rw-r--r--drivers/pci/hotplug/pnv_php.c8
-rw-r--r--drivers/pci/hotplug/shpchp.h12
-rw-r--r--drivers/pci/hotplug/shpchp_core.c14
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c8
-rw-r--r--drivers/pci/iov.c42
-rw-r--r--drivers/pci/of.c63
-rw-r--r--drivers/pci/pci-acpi.c55
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci-pf-stub.c54
-rw-r--r--drivers/pci/pci-sysfs.c15
-rw-r--r--drivers/pci/pci.c89
-rw-r--r--drivers/pci/pci.h45
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c11
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h32
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c397
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c38
-rw-r--r--drivers/pci/pcie/aspm.c9
-rw-r--r--drivers/pci/pcie/dpc.c74
-rw-r--r--drivers/pci/pcie/err.c388
-rw-r--r--drivers/pci/pcie/portdrv.h5
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c57
-rw-r--r--drivers/pci/pcie/portdrv_core.c71
-rw-r--r--drivers/pci/probe.c96
-rw-r--r--drivers/pci/quirks.c1002
-rw-r--r--drivers/pci/setup-bus.c82
-rw-r--r--drivers/perf/Kconfig36
-rw-r--r--drivers/perf/arm-cci.c47
-rw-r--r--drivers/perf/arm-ccn.c22
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/perf/arm_spe_pmu.c6
-rw-r--r--drivers/pinctrl/Kconfig1
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/actions/Kconfig15
-rw-r--r--drivers/pinctrl/actions/Makefile2
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c785
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.h162
-rw-r--r--drivers/pinctrl/actions/pinctrl-s900.c1888
-rw-r--r--drivers/pinctrl/bcm/Kconfig1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c100
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2.c5
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2cd.c5
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2q.c5
-rw-r--r--drivers/pinctrl/berlin/berlin-bg4ct.c13
-rw-r--r--drivers/pinctrl/berlin/berlin.c5
-rw-r--r--drivers/pinctrl/berlin/berlin.h5
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c42
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h6
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c27
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.c15
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.h6
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx21.c15
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx23.c19
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx25.c28
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx27.c19
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx28.c19
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx35.c24
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx50.c19
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx51.c21
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx53.c21
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6dl.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6q.c21
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sl.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sll.c8
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sx.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6ul.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7d.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7ulp.c17
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c13
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.h8
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c15
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c4
-rw-r--r--drivers/pinctrl/mediatek/Kconfig6
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c492
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h106
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt2701.c25
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt2712.c25
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c143
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8127.c25
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8135.c25
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8173.c25
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c608
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.h13
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c107
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c23
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c32
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c22
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c4
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c246
-rw-r--r--drivers/pinctrl/pinctrl-single.c72
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c92
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdf2xxx.c114
-rw-r--r--drivers/pinctrl/samsung/Kconfig10
-rw-r--r--drivers/pinctrl/samsung/Makefile1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c30
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c20
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos5440.c1005
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c29
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig10
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile2
-rw-r--r--drivers/pinctrl/sh-pfc/core.c12
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77470.c2343
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c8
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c8
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c1592
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77970.c32
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77980.c52
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c2695
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h10
-rw-r--r--drivers/pinctrl/sunxi/Kconfig4
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c128
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c11
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h11
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c30
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c49
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c54
-rw-r--r--drivers/power/reset/gpio-poweroff.c4
-rw-r--r--drivers/power/supply/ab8500_bmdata.c63
-rw-r--r--drivers/power/supply/ab8500_btemp.c93
-rw-r--r--drivers/power/supply/ab8500_charger.c133
-rw-r--r--drivers/power/supply/ab8500_fg.c14
-rw-r--r--drivers/power/supply/abx500_chargalg.c62
-rw-r--r--drivers/power/supply/axp288_charger.c26
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c27
-rw-r--r--drivers/power/supply/bq27xxx_battery.c9
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c2
-rw-r--r--drivers/power/supply/charger-manager.c5
-rw-r--r--drivers/power/supply/gpio-charger.c3
-rw-r--r--drivers/power/supply/power_supply_sysfs.c86
-rw-r--r--drivers/power/supply/s3c_adc_battery.c8
-rw-r--r--drivers/regulator/Kconfig18
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/ab8500.c1407
-rw-r--r--drivers/regulator/anatop-regulator.c22
-rw-r--r--drivers/regulator/arizona-ldo1.c19
-rw-r--r--drivers/regulator/axp20x-regulator.c2
-rw-r--r--drivers/regulator/bd71837-regulator.c640
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c127
-rw-r--r--drivers/regulator/core.c244
-rw-r--r--drivers/regulator/cpcap-regulator.c2
-rw-r--r--drivers/regulator/internal.h37
-rw-r--r--drivers/regulator/lm363x-regulator.c20
-rw-r--r--drivers/regulator/lp87565-regulator.c17
-rw-r--r--drivers/regulator/lp8788-ldo.c32
-rw-r--r--drivers/regulator/ltc3676.c20
-rw-r--r--drivers/regulator/max8952.c18
-rw-r--r--drivers/regulator/max8973-regulator.c54
-rw-r--r--drivers/regulator/max8998.c3
-rw-r--r--drivers/regulator/mc13783-regulator.c18
-rw-r--r--drivers/regulator/mc13892-regulator.c18
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c28
-rw-r--r--drivers/regulator/of_regulator.c193
-rw-r--r--drivers/regulator/pfuze100-regulator.c40
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c133
-rw-r--r--drivers/regulator/s5m8767.c26
-rw-r--r--drivers/regulator/sy8106a-regulator.c167
-rw-r--r--drivers/regulator/tps65090-regulator.c50
-rw-r--r--drivers/regulator/tps6586x-regulator.c1
-rw-r--r--drivers/regulator/twl-regulator.c2
-rw-r--r--drivers/regulator/wm8350-regulator.c1
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/interface.c5
-rw-r--r--drivers/rtc/nvmem.c2
-rw-r--r--drivers/rtc/rtc-88pm80x.c58
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c30
-rw-r--r--drivers/rtc/rtc-at91rm9200.c2
-rw-r--r--drivers/rtc/rtc-bq4802.c6
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c20
-rw-r--r--drivers/rtc/rtc-cmos.c155
-rw-r--r--drivers/rtc/rtc-cros-ec.c23
-rw-r--r--drivers/rtc/rtc-ds1216.c6
-rw-r--r--drivers/rtc/rtc-ds1307.c2
-rw-r--r--drivers/rtc/rtc-ds1511.c9
-rw-r--r--drivers/rtc/rtc-ds1553.c15
-rw-r--r--drivers/rtc/rtc-ds1685.c66
-rw-r--r--drivers/rtc/rtc-ds1742.c8
-rw-r--r--drivers/rtc/rtc-ftrtc010.c36
-rw-r--r--drivers/rtc/rtc-lpc32xx.c16
-rw-r--r--drivers/rtc/rtc-ls1x.c32
-rw-r--r--drivers/rtc/rtc-m48t59.c41
-rw-r--r--drivers/rtc/rtc-mrst.c45
-rw-r--r--drivers/rtc/rtc-mv.c3
-rw-r--r--drivers/rtc/rtc-mxc.c34
-rw-r--r--drivers/rtc/rtc-mxc_v2.c29
-rw-r--r--drivers/rtc/rtc-pcap.c15
-rw-r--r--drivers/rtc/rtc-pxa.c2
-rw-r--r--drivers/rtc/rtc-rx8581.c210
-rw-r--r--drivers/rtc/rtc-sc27xx.c63
-rw-r--r--drivers/rtc/rtc-sh.c15
-rw-r--r--drivers/rtc/rtc-snvs.c13
-rw-r--r--drivers/rtc/rtc-st-lpc.c26
-rw-r--r--drivers/rtc/rtc-stk17ta8.c15
-rw-r--r--drivers/rtc/rtc-stm32.c339
-rw-r--r--drivers/rtc/rtc-sun6i.c4
-rw-r--r--drivers/rtc/rtc-sunxi.c23
-rw-r--r--drivers/rtc/rtc-test.c208
-rw-r--r--drivers/rtc/rtc-tps6586x.c55
-rw-r--r--drivers/rtc/rtc-tps65910.c26
-rw-r--r--drivers/rtc/rtc-vr41xx.c35
-rw-r--r--drivers/rtc/rtc-zynqmp.c10
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c90
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h21
-rw-r--r--drivers/s390/scsi/zfcp_erp.c194
-rw-r--r--drivers/s390/scsi/zfcp_ext.h16
-rw-r--r--drivers/s390/scsi/zfcp_fc.c11
-rw-r--r--drivers/s390/scsi/zfcp_fc.h22
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c61
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h6
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c141
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c5
-rw-r--r--drivers/scsi/3w-9xxx.c5
-rw-r--r--drivers/scsi/3w-xxxx.c3
-rw-r--r--drivers/scsi/Kconfig14
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/a100u2w.c13
-rw-r--r--drivers/scsi/am53c974.c13
-rw-r--r--drivers/scsi/cxlflash/Kconfig2
-rw-r--r--drivers/scsi/cxlflash/Makefile4
-rw-r--r--drivers/scsi/cxlflash/backend.h55
-rw-r--r--drivers/scsi/cxlflash/common.h12
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c13
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c4
-rw-r--r--drivers/scsi/cxlflash/main.c97
-rw-r--r--drivers/scsi/cxlflash/main.h21
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1436
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.h77
-rw-r--r--drivers/scsi/cxlflash/sislite.h41
-rw-r--r--drivers/scsi/cxlflash/superpipe.c23
-rw-r--r--drivers/scsi/cxlflash/vlun.c3
-rw-r--r--drivers/scsi/dpt_i2o.c21
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c5
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h52
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c638
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c164
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c284
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c452
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/ips.c78
-rw-r--r--drivers/scsi/ips.h11
-rw-r--r--drivers/scsi/isci/init.c3
-rw-r--r--drivers/scsi/iscsi_tcp.c1
-rw-r--r--drivers/scsi/libsas/sas_ata.c5
-rw-r--r--drivers/scsi/libsas/sas_discover.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c124
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c98
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c153
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c238
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/megaraid.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c27
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h9
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h30
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c477
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h60
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c33
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c491
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c3
-rw-r--r--drivers/scsi/mvumi.c20
-rw-r--r--drivers/scsi/osd/osd_initiator.c16
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c2
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.c2
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h6
-rw-r--r--drivers/scsi/qedf/qedf_attr.c2
-rw-r--r--drivers/scsi/qedf/qedf_dbg.c4
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c2
-rw-r--r--drivers/scsi/qedf/qedf_els.c35
-rw-r--r--drivers/scsi/qedf/qedf_fip.c5
-rw-r--r--drivers/scsi/qedf/qedf_hsi.h2
-rw-r--r--drivers/scsi/qedf/qedf_io.c87
-rw-r--r--drivers/scsi/qedf/qedf_main.c130
-rw-r--r--drivers/scsi/qedf/qedf_version.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c105
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c192
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c37
-rw-r--r--drivers/scsi/qlogicpti.c6
-rw-r--r--drivers/scsi/scsi_debugfs.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c31
-rw-r--r--drivers/scsi/scsi_dh.c5
-rw-r--r--drivers/scsi/scsi_error.c7
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.h12
-rw-r--r--drivers/scsi/sd_zbc.c10
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/snic/snic_scsi.c6
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/storvsc_drv.c85
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c308
-rw-r--r--drivers/scsi/ufs/ufshcd.h21
-rw-r--r--drivers/scsi/wd719x.c13
-rw-r--r--drivers/scsi/zorro_esp.c1172
-rw-r--r--drivers/soc/rockchip/pm_domains.c4
-rw-r--r--drivers/soc/tegra/pmc.c3
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/android/Kconfig11
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/TODO9
-rw-r--r--drivers/staging/android/ashmem.h6
-rw-r--r--drivers/staging/android/ion/Kconfig2
-rw-r--r--drivers/staging/android/ion/ion.c22
-rw-r--r--drivers/staging/android/ion/ion.h2
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c5
-rw-r--r--drivers/staging/android/uapi/ashmem.h6
-rw-r--r--drivers/staging/android/uapi/ion.h2
-rw-r--r--drivers/staging/android/uapi/vsoc_shm.h295
-rw-r--r--drivers/staging/android/vsoc.c1152
-rw-r--r--drivers/staging/board/armadillo800eva.c10
-rw-r--r--drivers/staging/board/board.c5
-rw-r--r--drivers/staging/comedi/comedi_compat32.c12
-rw-r--r--drivers/staging/comedi/comedi_usb.h5
-rw-r--r--drivers/staging/comedi/comedidev.h2
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c6
-rw-r--r--drivers/staging/emxx_udc/Kconfig2
-rw-r--r--drivers/staging/emxx_udc/TODO2
-rw-r--r--drivers/staging/fbtft/TODO4
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c13
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig8
-rw-r--r--drivers/staging/fsl-dpaa2/Makefile5
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c111
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h49
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c23
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h13
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/dpsw.c42
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/dpsw.h6
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c39
-rw-r--r--drivers/staging/fsl-dpaa2/rtc/Makefile7
-rw-r--r--drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h137
-rw-r--r--drivers/staging/fsl-dpaa2/rtc/dprtc.c701
-rw-r--r--drivers/staging/fsl-dpaa2/rtc/dprtc.h164
-rw-r--r--drivers/staging/fsl-dpaa2/rtc/rtc.c229
-rw-r--r--drivers/staging/fsl-dpaa2/rtc/rtc.h14
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio-driver.c28
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/qbman-portal.h4
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-io.h2
-rw-r--r--drivers/staging/gdm724x/gdm_endian.c14
-rw-r--r--drivers/staging/gdm724x/gdm_endian.h14
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c16
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h14
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c14
-rw-r--r--drivers/staging/gdm724x/gdm_mux.h14
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c14
-rw-r--r--drivers/staging/gdm724x/gdm_tty.h14
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c14
-rw-r--r--drivers/staging/gdm724x/gdm_usb.h14
-rw-r--r--drivers/staging/gdm724x/hci.h14
-rw-r--r--drivers/staging/gdm724x/hci_packet.h14
-rw-r--r--drivers/staging/gdm724x/netlink_k.c14
-rw-r--r--drivers/staging/gdm724x/netlink_k.h14
-rw-r--r--drivers/staging/goldfish/Kconfig9
-rw-r--r--drivers/staging/goldfish/Makefile1
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c441
-rw-r--r--drivers/staging/goldfish/goldfish_nand_reg.h76
-rw-r--r--drivers/staging/greybus/TODO5
-rw-r--r--drivers/staging/greybus/arche-platform.c9
-rw-r--r--drivers/staging/greybus/audio_codec.h5
-rw-r--r--drivers/staging/greybus/audio_topology.c2
-rw-r--r--drivers/staging/greybus/camera.c17
-rw-r--r--drivers/staging/greybus/svc.c3
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/TODO9
-rw-r--r--drivers/staging/iio/accel/Kconfig24
-rw-r--r--drivers/staging/iio/accel/Makefile2
-rw-r--r--drivers/staging/iio/accel/adis16201.c385
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c6
-rw-r--r--drivers/staging/iio/adc/ad7780.c2
-rw-r--r--drivers/staging/iio/cdc/ad7746.c44
-rw-r--r--drivers/staging/iio/light/Kconfig14
-rw-r--r--drivers/staging/iio/light/Makefile5
-rw-r--r--drivers/staging/iio/light/tsl2x7x.c1889
-rw-r--r--drivers/staging/iio/light/tsl2x7x.h99
-rw-r--r--drivers/staging/iio/meter/Kconfig42
-rw-r--r--drivers/staging/iio/meter/Makefile8
-rw-r--r--drivers/staging/iio/meter/ade7753.c630
-rw-r--r--drivers/staging/iio/meter/ade7754.c664
-rw-r--r--drivers/staging/iio/meter/ade7758.h183
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c955
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c177
-rw-r--r--drivers/staging/iio/meter/ade7758_trigger.c108
-rw-r--r--drivers/staging/iio/meter/ade7759.c558
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c238
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c268
-rw-r--r--drivers/staging/iio/meter/ade7854.c40
-rw-r--r--drivers/staging/iio/meter/ade7854.h16
-rw-r--r--drivers/staging/iio/resolver/Kconfig12
-rw-r--r--drivers/staging/iio/resolver/Makefile1
-rw-r--r--drivers/staging/ipx/Kconfig61
-rw-r--r--drivers/staging/ipx/Makefile8
-rw-r--r--drivers/staging/ipx/TODO4
-rw-r--r--drivers/staging/ipx/af_ipx.c2082
-rw-r--r--drivers/staging/ipx/ipx_proc.c303
-rw-r--r--drivers/staging/ipx/ipx_route.c293
-rw-r--r--drivers/staging/ipx/pe2.c36
-rw-r--r--drivers/staging/ipx/sysctl_net_ipx.c40
-rw-r--r--drivers/staging/ks7010/eap_packet.h76
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c632
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.h164
-rw-r--r--drivers/staging/ks7010/ks_hostif.c1043
-rw-r--r--drivers/staging/ks7010/ks_hostif.h465
-rw-r--r--drivers/staging/ks7010/ks_wlan.h278
-rw-r--r--drivers/staging/ks7010/ks_wlan_ioctl.h7
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c1118
-rw-r--r--drivers/staging/ks7010/michael_mic.c28
-rw-r--r--drivers/staging/ks7010/michael_mic.h11
-rw-r--r--drivers/staging/lustre/Kconfig3
-rw-r--r--drivers/staging/lustre/Makefile2
-rw-r--r--drivers/staging/lustre/README.txt83
-rw-r--r--drivers/staging/lustre/TODO302
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h85
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h135
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h295
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h205
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h167
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h191
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h866
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h82
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h200
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h100
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_time.h79
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h133
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h78
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h103
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api.h212
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h644
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h666
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h87
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/libcfs_debug.h149
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/libcfs_ioctl.h141
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnet-dlc.h149
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnet-types.h669
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnetctl.h134
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnetst.h556
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/nidstr.h119
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/socklnd.h44
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_cfg.h261
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_fid.h293
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_fiemap.h72
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_idl.h2688
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ioctl.h231
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_kernelcomm.h94
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ostid.h236
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_param.h94
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_user.h1325
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ver.h27
-rw-r--r--drivers/staging/lustre/lnet/Kconfig46
-rw-r--r--drivers/staging/lustre/lnet/Makefile1
-rw-r--r--drivers/staging/lustre/lnet/klnds/Makefile1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile5
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c2952
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h1038
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c3751
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c287
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/Makefile6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c2918
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h705
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c2592
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c533
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c184
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c810
-rw-r--r--drivers/staging/lustre/lnet/libcfs/Makefile19
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c458
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c142
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c2064
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c228
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c152
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c167
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c556
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c1079
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c139
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c443
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h30
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c145
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c197
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c257
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c604
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c1191
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h263
-rw-r--r--drivers/staging/lustre/lnet/lnet/Makefile10
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c501
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c2307
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c1234
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c426
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c463
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c274
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c2388
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c625
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c987
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c586
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c105
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c223
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c1023
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c1258
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c456
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c1800
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c909
-rw-r--r--drivers/staging/lustre/lnet/selftest/Makefile7
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c526
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c799
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c1397
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h143
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c2101
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h244
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c1786
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c165
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c228
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c1682
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h295
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h623
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c244
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.h50
-rw-r--r--drivers/staging/lustre/lustre/Kconfig45
-rw-r--r--drivers/staging/lustre/lustre/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/fid/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h47
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c88
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c440
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c226
-rw-r--r--drivers/staging/lustre/lustre/fld/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c517
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h171
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c471
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c155
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h2463
-rw-r--r--drivers/staging/lustre/lustre/include/interval_tree.h119
-rw-r--r--drivers/staging/lustre/lustre/include/llog_swab.h67
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h672
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h1335
-rw-r--r--drivers/staging/lustre/lustre/include/lu_ref.h178
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_acl.h46
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_compat.h81
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h52
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h153
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h1354
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h402
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_errno.h198
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h257
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h676
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h138
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h61
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h93
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h368
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_intent.h69
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_kernelcomm.h56
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h124
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_linkea.h93
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lmv.h174
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h382
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h229
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mds.h63
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h2359
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs.h718
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h71
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_obdo.h55
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_patchless_compat.h67
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h307
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h1070
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_swab.h109
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h1101
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h153
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h1607
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h545
-rw-r--r--drivers/staging/lustre/lustre/include/seq_range.h200
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c599
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c74
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c259
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c495
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c69
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h342
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c843
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c2146
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c1163
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c68
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c1023
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c2080
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c1369
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile11
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c300
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c1706
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c3600
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c206
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c293
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c186
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h1337
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c2666
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c478
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c375
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c1684
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c1202
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.c240
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.h83
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c1214
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c641
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c1577
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c185
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c159
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c659
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h321
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c1374
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c87
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c305
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c523
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c638
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c523
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_security.c96
-rw-r--r--drivers/staging/lustre/lustre/lmv/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c82
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c517
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h164
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c3131
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c173
-rw-r--r--drivers/staging/lustre/lustre/lov/Makefile9
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h641
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c384
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c332
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h283
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c1023
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c348
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c107
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c1448
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c1625
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c271
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c400
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c136
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c586
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c356
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c147
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c81
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c180
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c68
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c299
-rw-r--r--drivers/staging/lustre/lustre/mdc/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c231
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h144
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c497
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c1202
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c419
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c2754
-rw-r--r--drivers/staging/lustre/lustre/mgc/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c69
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h58
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c1844
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_internal.h95
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c1152
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c275
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c1061
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c1046
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c535
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c96
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c1514
-rw-r--r--drivers/staging/lustre/lustre/obdclass/kernelcomm.c238
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linkea.c249
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c531
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c162
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c523
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c236
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_internal.h79
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c225
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c412
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c133
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c1810
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c2058
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ref.c47
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c240
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c214
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c1559
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c1244
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c181
-rw-r--r--drivers/staging/lustre/lustre/obdclass/statfs_pack.c58
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c47
-rw-r--r--drivers/staging/lustre/lustre/obdecho/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c1724
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_internal.h42
-rw-r--r--drivers/staging/lustre/lustre/osc/Makefile6
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c843
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c3306
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h683
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c246
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h236
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c918
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c1231
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c474
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c1094
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c284
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c2899
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/Makefile23
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c3269
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c234
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/errno.c381
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c585
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c1676
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c2234
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c340
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c69
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c1332
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c770
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c1612
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c269
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c2313
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c72
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c477
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h371
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c182
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c912
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c374
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c2383
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c572
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c850
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c190
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c195
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c459
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c1023
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c2804
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c4210
-rw-r--r--drivers/staging/lustre/sysfs-fs-lustre654
-rw-r--r--drivers/staging/most/cdev/cdev.c16
-rw-r--r--drivers/staging/most/core.c68
-rw-r--r--drivers/staging/most/core.h4
-rw-r--r--drivers/staging/most/dim2/Kconfig2
-rw-r--r--drivers/staging/most/dim2/dim2.c397
-rw-r--r--drivers/staging/most/dim2/dim2.h21
-rw-r--r--drivers/staging/most/dim2/hal.c9
-rw-r--r--drivers/staging/most/dim2/reg.h84
-rw-r--r--drivers/staging/most/i2c/i2c.c140
-rw-r--r--drivers/staging/most/sound/sound.c123
-rw-r--r--drivers/staging/most/usb/usb.c55
-rw-r--r--drivers/staging/most/video/video.c32
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts3
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi24
-rw-r--r--drivers/staging/mt7621-eth/ethtool.c54
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7620.h4
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7621.c10
-rw-r--r--drivers/staging/mt7621-eth/mdio.c6
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c44
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.h33
-rw-r--r--drivers/staging/mt7621-eth/soc_mt7621.c7
-rw-r--r--drivers/staging/mt7621-gpio/TODO2
-rw-r--r--drivers/staging/mt7621-gpio/gpio-mt7621.c200
-rw-r--r--drivers/staging/mt7621-gpio/mediatek,mt7621-gpio.txt68
-rw-r--r--drivers/staging/mt7621-mmc/board.h82
-rw-r--r--drivers/staging/mt7621-mmc/dbg.c398
-rw-r--r--drivers/staging/mt7621-mmc/dbg.h97
-rw-r--r--drivers/staging/mt7621-mmc/mt6575_sd.h1087
-rw-r--r--drivers/staging/mt7621-mmc/sd.c4458
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c481
-rw-r--r--drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c1
-rw-r--r--drivers/staging/mt7621-spi/spi-mt7621.c4
-rw-r--r--drivers/staging/ncpfs/Kconfig108
-rw-r--r--drivers/staging/ncpfs/Makefile17
-rw-r--r--drivers/staging/ncpfs/TODO4
-rw-r--r--drivers/staging/ncpfs/dir.c1220
-rw-r--r--drivers/staging/ncpfs/file.c263
-rw-r--r--drivers/staging/ncpfs/getopt.c76
-rw-r--r--drivers/staging/ncpfs/getopt.h17
-rw-r--r--drivers/staging/ncpfs/inode.c1067
-rw-r--r--drivers/staging/ncpfs/ioctl.c923
-rw-r--r--drivers/staging/ncpfs/mmap.c125
-rw-r--r--drivers/staging/ncpfs/ncp_fs.h101
-rw-r--r--drivers/staging/ncpfs/ncp_fs_i.h31
-rw-r--r--drivers/staging/ncpfs/ncp_fs_sb.h174
-rw-r--r--drivers/staging/ncpfs/ncplib_kernel.c1326
-rw-r--r--drivers/staging/ncpfs/ncplib_kernel.h215
-rw-r--r--drivers/staging/ncpfs/ncpsign_kernel.c128
-rw-r--r--drivers/staging/ncpfs/ncpsign_kernel.h27
-rw-r--r--drivers/staging/ncpfs/sock.c855
-rw-r--r--drivers/staging/ncpfs/symlink.c182
-rw-r--r--drivers/staging/nvec/nvec.c81
-rw-r--r--drivers/staging/nvec/nvec.h2
-rw-r--r--drivers/staging/olpc_dcon/TODO4
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433.txt6
-rw-r--r--drivers/staging/pi433/Documentation/pi433.txt10
-rw-r--r--drivers/staging/pi433/Kconfig2
-rw-r--r--drivers/staging/pi433/pi433_if.c39
-rw-r--r--drivers/staging/pi433/rf69.c182
-rw-r--r--drivers/staging/pi433/rf69.h28
-rw-r--r--drivers/staging/rtl8188eu/TODO5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c72
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c26
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c20
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c38
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c100
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c34
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c16
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h10
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c12
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c4
-rw-r--r--drivers/staging/rtl8192e/dot11d.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c14
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c467
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c4
-rw-r--r--drivers/staging/rtl8723bs/Makefile2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_debug.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_eeprom.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_odm.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_rf.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c12
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/Mp_Precomp.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_phy.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_pwr_seq.c (renamed from drivers/staging/rtl8723bs/hal/Hal8723BPwrSeq.c)12
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_sdio.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_AntDiv.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_AntDiv.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_PathDiv.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_PathDiv.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RTL8723B.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RTL8723B.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_debug.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_debug.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_interface.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_precomp.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_reg.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_types.h10
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c40
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c10
-rw-r--r--drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h10
-rw-r--r--drivers/staging/rtl8723bs/include/HalPwrSeqCmd.h10
-rw-r--r--drivers/staging/rtl8723bs/include/HalVerDef.h10
-rw-r--r--drivers/staging/rtl8723bs/include/autoconf.h10
-rw-r--r--drivers/staging/rtl8723bs/include/basic_types.h10
-rw-r--r--drivers/staging/rtl8723bs/include/cmd_osdep.h10
-rw-r--r--drivers/staging/rtl8723bs/include/drv_conf.h10
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h10
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types_sdio.h10
-rw-r--r--drivers/staging/rtl8723bs/include/ethernet.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_btcoex.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_h2c.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_phycfg.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_reg.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_data.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pg.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_cfg.h (renamed from drivers/staging/rtl8723bs/include/Hal8723BPhyCfg.h)10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_reg.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_reg_8723b.h (renamed from drivers/staging/rtl8723bs/include/Hal8723BPhyReg.h)10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pwr_seq.h (renamed from drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h)0
-rw-r--r--drivers/staging/rtl8723bs/include/hal_sdio.h10
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h10
-rw-r--r--drivers/staging/rtl8723bs/include/ioctl_cfg80211.h10
-rw-r--r--drivers/staging/rtl8723bs/include/mlme_osdep.h10
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h10
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h10
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h10
-rw-r--r--drivers/staging/rtl8723bs/include/recv_osdep.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8192c_recv.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8192c_rf.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_cmd.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_dm.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h283
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_recv.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_rf.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_spec.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_xmit.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ap.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_beamforming.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_br_ext.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_btcoex.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_byteorder.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_debug.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_eeprom.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_efuse.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_event.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ht.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_io.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ioctl.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ioctl_set.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mp.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_odm.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_pwrctrl.h35
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_qos.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_rf.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_security.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_version.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h10
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_hal.h10
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_ops.h10
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_ops_linux.h10
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_osintf.h10
-rw-r--r--drivers/staging/rtl8723bs/include/sta_info.h10
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h22
-rw-r--r--drivers/staging/rtl8723bs/include/wlan_bssdef.h10
-rw-r--r--drivers/staging/rtl8723bs/include/xmit_osdep.h10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c12
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/rtw_proc.c10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/rtw_proc.h10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c10
-rw-r--r--drivers/staging/rtlwifi/base.c13
-rw-r--r--drivers/staging/rtlwifi/base.h13
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbt_precomp.h13
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c13
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h13
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c13
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h13
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c13
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h13
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c13
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h13
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.c13
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.h13
-rw-r--r--drivers/staging/rtlwifi/cam.c13
-rw-r--r--drivers/staging/rtlwifi/cam.h13
-rw-r--r--drivers/staging/rtlwifi/core.c13
-rw-r--r--drivers/staging/rtlwifi/core.h13
-rw-r--r--drivers/staging/rtlwifi/debug.c32
-rw-r--r--drivers/staging/rtlwifi/debug.h13
-rw-r--r--drivers/staging/rtlwifi/efuse.c13
-rw-r--r--drivers/staging/rtlwifi/efuse.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_2_platform.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit2.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_info.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg2.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_type.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_usb_reg.h13
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.c13
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.h13
-rw-r--r--drivers/staging/rtlwifi/pci.c13
-rw-r--r--drivers/staging/rtlwifi/pci.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/mp_precomp.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_beamforming.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dfs.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_features.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_iqk.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_pre_define.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_precomp.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.c17
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_reg.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_types.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c47
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c13
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h13
-rw-r--r--drivers/staging/rtlwifi/ps.c13
-rw-r--r--drivers/staging/rtlwifi/ps.h13
-rw-r--r--drivers/staging/rtlwifi/pwrseqcmd.h13
-rw-r--r--drivers/staging/rtlwifi/rc.c13
-rw-r--r--drivers/staging/rtlwifi/rc.h13
-rw-r--r--drivers/staging/rtlwifi/regd.c13
-rw-r--r--drivers/staging/rtlwifi/regd.h13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/def.h13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.h13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.c13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.h13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.c13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.h13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.c13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.h13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/reg.h13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.c13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.h13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.c13
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.h13
-rw-r--r--drivers/staging/rtlwifi/stats.c13
-rw-r--r--drivers/staging/rtlwifi/stats.h13
-rw-r--r--drivers/staging/rtlwifi/wifi.h13
-rw-r--r--drivers/staging/sm750fb/TODO5
-rw-r--r--drivers/staging/speakup/buffers.c4
-rw-r--r--drivers/staging/speakup/main.c6
-rw-r--r--drivers/staging/speakup/speakup.h3
-rw-r--r--drivers/staging/speakup/speakup_dummy.c1
-rw-r--r--drivers/staging/speakup/speakup_soft.c3
-rw-r--r--drivers/staging/speakup/spk_types.h2
-rw-r--r--drivers/staging/speakup/varhandlers.c1
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c25
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c3
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c2
-rw-r--r--drivers/staging/vc04_services/Kconfig1
-rw-r--r--drivers/staging/vc04_services/Makefile2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c64
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c43
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h12
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/TODO11
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c402
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-common.h9
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h13
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c345
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h15
-rw-r--r--drivers/staging/vc04_services/interface/vchi/TODO9
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_common.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c96
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c55
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h1
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c137
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c1
-rw-r--r--drivers/staging/vt6655/card.c5
-rw-r--r--drivers/staging/vt6655/device_main.c150
-rw-r--r--drivers/staging/wilc1000/TODO12
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c140
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.h37
-rw-r--r--drivers/staging/wilc1000/host_interface.c549
-rw-r--r--drivers/staging/wilc1000/host_interface.h6
-rw-r--r--drivers/staging/wilc1000/linux_mon.c90
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c378
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c2
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c109
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c99
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c1049
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.h18
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h83
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c26
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h37
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c1
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h16
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c7
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c8
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h26
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h2
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h2
-rw-r--r--drivers/staging/wlan-ng/p80211metadef.h2
-rw-r--r--drivers/staging/wlan-ng/p80211metastruct.h90
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h2
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h2
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c8
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h8
-rw-r--r--drivers/staging/wlan-ng/p80211req.h2
-rw-r--r--drivers/staging/wlan-ng/p80211types.h33
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h2
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c52
-rw-r--r--drivers/target/target_core_configfs.c25
-rw-r--r--drivers/target/target_core_file.c137
-rw-r--r--drivers/target/target_core_file.h1
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_pscsi.c26
-rw-r--r--drivers/target/target_core_transport.c64
-rw-r--r--drivers/target/target_core_user.c160
-rw-r--r--drivers/xen/privcmd.c135
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c5
-rw-r--r--fs/9p/v9fs.c29
-rw-r--r--fs/Kconfig5
-rw-r--r--fs/Makefile1
-rw-r--r--fs/aio.c18
-rw-r--r--fs/autofs/Kconfig20
-rw-r--r--fs/autofs/Makefile7
-rw-r--r--fs/autofs/autofs_i.h (renamed from fs/autofs4/autofs_i.h)92
-rw-r--r--fs/autofs/dev-ioctl.c (renamed from fs/autofs4/dev-ioctl.c)31
-rw-r--r--fs/autofs/expire.c (renamed from fs/autofs4/expire.c)133
-rw-r--r--fs/autofs/init.c (renamed from fs/autofs4/init.c)12
-rw-r--r--fs/autofs/inode.c (renamed from fs/autofs4/inode.c)54
-rw-r--r--fs/autofs/root.c (renamed from fs/autofs4/root.c)277
-rw-r--r--fs/autofs/symlink.c (renamed from fs/autofs4/symlink.c)16
-rw-r--r--fs/autofs/waitq.c (renamed from fs/autofs4/waitq.c)59
-rw-r--r--fs/autofs4/Kconfig46
-rw-r--r--fs/autofs4/Makefile4
-rw-r--r--fs/binfmt_misc.c12
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/cifs/cifsacl.h14
-rw-r--r--fs/cifs/cifsencrypt.c17
-rw-r--r--fs/cifs/cifsfs.c22
-rw-r--r--fs/cifs/cifsglob.h6
-rw-r--r--fs/cifs/cifsproto.h7
-rw-r--r--fs/cifs/connect.c32
-rw-r--r--fs/cifs/link.c6
-rw-r--r--fs/cifs/misc.c17
-rw-r--r--fs/cifs/smb2file.c3
-rw-r--r--fs/cifs/smb2inode.c3
-rw-r--r--fs/cifs/smb2misc.c4
-rw-r--r--fs/cifs/smb2ops.c73
-rw-r--r--fs/cifs/smb2pdu.c24
-rw-r--r--fs/cifs/smb2proto.h2
-rw-r--r--fs/cifs/smb2transport.c12
-rw-r--r--fs/cifs/smbdirect.c121
-rw-r--r--fs/cifs/smbdirect.h2
-rw-r--r--fs/cifs/transport.c34
-rw-r--r--fs/compat.c72
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/dax.c214
-rw-r--r--fs/exec.c1
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/hpfs/hpfs_fn.h3
-rw-r--r--fs/iomap.c1
-rw-r--r--fs/ocfs2/dlmglue.c119
-rw-r--r--fs/ocfs2/dlmglue.h1
-rw-r--r--fs/ocfs2/file.c10
-rw-r--r--fs/ocfs2/file.h2
-rw-r--r--fs/ocfs2/ioctl.c2
-rw-r--r--fs/ocfs2/mmap.c44
-rw-r--r--fs/ocfs2/namei.c3
-rw-r--r--fs/ocfs2/ocfs2_fs.h8
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c265
-rw-r--r--fs/proc/fd.c2
-rw-r--r--fs/proc/internal.h2
-rw-r--r--fs/proc/page.c2
-rw-r--r--fs/proc/task_mmu.c26
-rw-r--r--fs/ubifs/file.c11
-rw-r--r--fs/ubifs/gc.c2
-rw-r--r--fs/ubifs/journal.c5
-rw-r--r--fs/ubifs/log.c6
-rw-r--r--fs/ubifs/lpt_commit.c2
-rw-r--r--fs/ubifs/replay.c5
-rw-r--r--fs/ubifs/ubifs.h2
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/userfaultfd.c22
-rw-r--r--fs/xfs/xfs_file.c72
-rw-r--r--fs/xfs/xfs_inode.h16
-rw-r--r--fs/xfs/xfs_ioctl.c8
-rw-r--r--fs/xfs/xfs_iops.c16
-rw-r--r--fs/xfs/xfs_pnfs.c15
-rw-r--r--fs/xfs/xfs_pnfs.h5
-rw-r--r--include/asm-generic/int-ll64.h19
-rw-r--r--include/dt-bindings/clock/actions,s900-cmu.h129
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h4
-rw-r--r--include/dt-bindings/clock/axg-aoclkc.h26
-rw-r--r--include/dt-bindings/clock/bcm-sr.h24
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h2
-rw-r--r--include/dt-bindings/clock/histb-clock.h8
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h4
-rw-r--r--include/dt-bindings/clock/imx6sx-clock.h6
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h33
-rw-r--r--include/dt-bindings/clock/imx7d-clock.h4
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h1
-rw-r--r--include/dt-bindings/clock/mt2701-clk.h20
-rw-r--r--include/dt-bindings/clock/nuvoton,npcm7xx-clock.h44
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h208
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h239
-rw-r--r--include/dt-bindings/clock/qcom,rpmh.h22
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sdm845.h35
-rw-r--r--include/dt-bindings/clock/r8a77470-cpg-mssr.h36
-rw-r--r--include/dt-bindings/clock/r8a77990-cpg-mssr.h62
-rw-r--r--include/dt-bindings/clock/sun50i-h6-r-ccu.h24
-rw-r--r--include/dt-bindings/pinctrl/mt7623-pinfunc.h90
-rw-r--r--include/dt-bindings/reset/axg-aoclkc.h20
-rw-r--r--include/dt-bindings/reset/mt2701-resets.h3
-rw-r--r--include/dt-bindings/reset/sun50i-h6-r-ccu.h17
-rw-r--r--include/linux/acpi.h22
-rw-r--r--include/linux/aer.h1
-rw-r--r--include/linux/arm-smccc.h10
-rw-r--r--include/linux/bio.h1
-rw-r--r--include/linux/cacheinfo.h25
-rw-r--r--include/linux/cfag12864b.h16
-rw-r--r--include/linux/clk-provider.h23
-rw-r--r--include/linux/clk/davinci.h40
-rw-r--r--include/linux/dax.h16
-rw-r--r--include/linux/device-mapper.h5
-rw-r--r--include/linux/dma/sprd-dma.h61
-rw-r--r--include/linux/fs.h16
-rw-r--r--include/linux/gfp.h14
-rw-r--r--include/linux/gpio/consumer.h10
-rw-r--r--include/linux/gpio/machine.h31
-rw-r--r--include/linux/hid.h19
-rw-r--r--include/linux/hmm.h8
-rw-r--r--include/linux/hyperv.h12
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h24
-rw-r--r--include/linux/iio/adc/stm32-dfsdm-adc.h2
-rw-r--r--include/linux/iio/common/cros_ec_sensors_core.h (renamed from drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h)3
-rw-r--r--include/linux/iio/iio.h24
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/ioprio.h9
-rw-r--r--include/linux/irq.h7
-rw-r--r--include/linux/kernel.h17
-rw-r--r--include/linux/ks0108.h16
-rw-r--r--include/linux/ksm.h17
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/log2.h35
-rw-r--r--include/linux/memcontrol.h51
-rw-r--r--include/linux/memfd.h16
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/memremap.h36
-rw-r--r--include/linux/mfd/abx500.h1
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h2
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h4
-rw-r--r--include/linux/mfd/bd9571mwv.h5
-rw-r--r--include/linux/mfd/lp8788.h16
-rw-r--r--include/linux/mfd/samsung/core.h4
-rw-r--r--include/linux/mfd/tps65090.h8
-rw-r--r--include/linux/mfd/tps6586x.h1
-rw-r--r--include/linux/mlx5/device.h12
-rw-r--r--include/linux/mlx5/fs.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h52
-rw-r--r--include/linux/mm.h91
-rw-r--r--include/linux/mm_types.h239
-rw-r--r--include/linux/mpi.h61
-rw-r--r--include/linux/mtd/nand.h3
-rw-r--r--include/linux/mtd/rawnand.h35
-rw-r--r--include/linux/mtd/spi-nor.h2
-rw-r--r--include/linux/of_clk.h30
-rw-r--r--include/linux/of_pci.h34
-rw-r--r--include/linux/page-flags.h51
-rw-r--r--include/linux/page_counter.h26
-rw-r--r--include/linux/pci-ecam.h1
-rw-r--r--include/linux/pci-epc.h8
-rw-r--r--include/linux/pci-epf.h4
-rw-r--r--include/linux/pci.h21
-rw-r--r--include/linux/pci_hotplug.h18
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/linux/perf/arm_pmu.h2
-rw-r--r--include/linux/pfn_t.h4
-rw-r--r--include/linux/platform_data/gpio-dwapb.h3
-rw-r--r--include/linux/platform_data/tsl2772.h101
-rw-r--r--include/linux/power/bq27xxx_battery.h3
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/regulator/ab8500.h157
-rw-r--r--include/linux/regulator/arizona-ldo1.h3
-rw-r--r--include/linux/regulator/consumer.h1
-rw-r--r--include/linux/regulator/driver.h27
-rw-r--r--include/linux/regulator/machine.h4
-rw-r--r--include/linux/regulator/max8952.h1
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/sched.h134
-rw-r--r--include/linux/sched/mm.h4
-rw-r--r--include/linux/shmem_fs.h13
-rw-r--r--include/linux/slab_def.h7
-rw-r--r--include/linux/slub_def.h1
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/linux/types.h34
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/userfaultfd_k.h6
-rw-r--r--include/ras/ras_event.h22
-rw-r--r--include/rdma/ib_addr.h20
-rw-r--r--include/rdma/ib_cache.h1
-rw-r--r--include/rdma/ib_verbs.h102
-rw-r--r--include/rdma/rdma_cm.h3
-rw-r--r--include/rdma/rdma_vt.h7
-rw-r--r--include/rdma/rdmavt_cq.h5
-rw-r--r--include/rdma/rdmavt_qp.h1
-rw-r--r--include/rdma/restrack.h22
-rw-r--r--include/rdma/uverbs_ioctl.h11
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_devinfo.h75
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/rseq.h57
-rw-r--r--include/uapi/linux/aio_abi.h1
-rw-r--r--include/uapi/linux/audit.h1
-rw-r--r--include/uapi/linux/auto_fs.h169
-rw-r--r--include/uapi/linux/auto_fs4.h153
-rw-r--r--include/uapi/linux/kernel-page-flags.h2
-rw-r--r--include/uapi/linux/ncp.h202
-rw-r--r--include/uapi/linux/ncp_fs.h147
-rw-r--r--include/uapi/linux/ncp_mount.h72
-rw-r--r--include/uapi/linux/ncp_no.h20
-rw-r--r--include/uapi/linux/pci_regs.h6
-rw-r--r--include/uapi/linux/rseq.h133
-rw-r--r--include/uapi/linux/target_core_user.h11
-rw-r--r--include/uapi/linux/types_32_64.h50
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h21
-rw-r--r--include/uapi/rdma/ib_user_verbs.h63
-rw-r--r--include/uapi/rdma/mlx5-abi.h30
-rw-r--r--include/uapi/rdma/rdma_netlink.h30
-rw-r--r--include/uapi/xen/privcmd.h11
-rw-r--r--include/xen/interface/memory.h66
-rw-r--r--include/xen/interface/xen.h7
-rw-r--r--include/xen/xen-ops.h24
-rw-r--r--init/Kconfig23
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/crash_core.c1
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/hung_task.c11
-rw-r--r--kernel/iomem.c167
-rw-r--r--kernel/irq/manage.c38
-rw-r--r--kernel/irq/migration.c31
-rw-r--r--kernel/memremap.c210
-rw-r--r--kernel/resource.c1
-rw-r--r--kernel/rseq.c357
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/signal.c24
-rw-r--r--kernel/sys.c10
-rw-r--r--kernel/sys_ni.c3
-rw-r--r--kernel/workqueue.c1
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/bitmap.c5
-rw-r--r--lib/bucket_locks.c5
-rw-r--r--lib/idr.c10
-rw-r--r--lib/mpi/mpi-internal.h75
-rw-r--r--lib/percpu_ida.c63
-rw-r--r--lib/ucs2_string.c2
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/Makefile1
-rw-r--r--mm/backing-dev.c4
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/gup.c78
-rw-r--r--mm/hmm.c13
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/hugetlb.c44
-rw-r--r--mm/hugetlb_cgroup.c6
-rw-r--r--mm/init-mm.c1
-rw-r--r--mm/ksm.c11
-rw-r--r--mm/memblock.c27
-rw-r--r--mm/memcontrol.c342
-rw-r--r--mm/memfd.c345
-rw-r--r--mm/memory.c43
-rw-r--r--mm/memory_hotplug.c23
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c74
-rw-r--r--mm/page_counter.c100
-rw-r--r--mm/shmem.c382
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c112
-rw-r--r--mm/sparse.c6
-rw-r--r--mm/swap.c3
-rw-r--r--mm/swap_slots.c10
-rw-r--r--mm/swap_state.c3
-rw-r--r--mm/userfaultfd.c22
-rw-r--r--mm/util.c6
-rw-r--r--mm/vmalloc.c41
-rw-r--r--mm/vmpressure.c35
-rw-r--r--mm/vmscan.c38
-rw-r--r--net/9p/client.c13
-rw-r--r--net/9p/trans_xen.c1
-rw-r--r--samples/auxdisplay/cfag12864b-example.c16
-rw-r--r--scripts/Makefile.lib3
-rwxr-xr-xscripts/checkpatch.pl12
-rw-r--r--scripts/dtc/checks.c199
-rw-r--r--scripts/dtc/dtc-lexer.l7
-rw-r--r--scripts/dtc/dtc-parser.y39
-rw-r--r--scripts/dtc/dtc.h4
-rw-r--r--scripts/dtc/livetree.c26
-rw-r--r--scripts/dtc/version_gen.h2
-rwxr-xr-xscripts/get_maintainer.pl17
-rw-r--r--scripts/selinux/mdp/mdp.c1
-rwxr-xr-xscripts/tags.sh6
-rw-r--r--security/integrity/evm/Kconfig11
-rw-r--r--security/integrity/evm/evm.h7
-rw-r--r--security/integrity/evm/evm_crypto.c10
-rw-r--r--security/integrity/evm/evm_main.c79
-rw-r--r--security/integrity/evm/evm_secfs.c206
-rw-r--r--security/integrity/iint.c18
-rw-r--r--security/integrity/ima/ima.h1
-rw-r--r--security/integrity/ima/ima_fs.c18
-rw-r--r--security/integrity/ima/ima_kexec.c2
-rw-r--r--security/integrity/ima/ima_main.c19
-rw-r--r--security/integrity/ima/ima_policy.c70
-rw-r--r--security/integrity/ima/ima_template_lib.c2
-rw-r--r--security/integrity/integrity.h2
-rw-r--r--security/smack/smack_lsm.c12
-rw-r--r--tools/iio/iio_generic_buffer.c13
-rw-r--r--tools/objtool/check.c22
-rw-r--r--tools/perf/Documentation/perf-list.txt6
-rw-r--r--tools/perf/Documentation/perf-record.txt3
-rw-r--r--tools/perf/Documentation/perf-script-python.txt26
-rw-r--r--tools/perf/Documentation/perf-stat.txt40
-rw-r--r--tools/perf/arch/common.c4
-rw-r--r--tools/perf/arch/common.h4
-rw-r--r--tools/perf/builtin-annotate.c36
-rw-r--r--tools/perf/builtin-c2c.c2
-rw-r--r--tools/perf/builtin-kvm.c2
-rw-r--r--tools/perf/builtin-probe.c3
-rw-r--r--tools/perf/builtin-report.c39
-rw-r--r--tools/perf/builtin-sched.c14
-rw-r--r--tools/perf/builtin-script.c12
-rw-r--r--tools/perf/builtin-stat.c28
-rw-r--r--tools/perf/builtin-top.c48
-rw-r--r--tools/perf/builtin-trace.c2
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/scripts/python/bin/powerpc-hcalls-record2
-rw-r--r--tools/perf/scripts/python/bin/powerpc-hcalls-report2
-rw-r--r--tools/perf/scripts/python/powerpc-hcalls.py200
-rw-r--r--tools/perf/tests/code-reading.c1
-rw-r--r--tools/perf/tests/kmod-path.c16
-rw-r--r--tools/perf/tests/parse-events.c4
-rw-r--r--tools/perf/tests/python-use.c3
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh2
-rw-r--r--tools/perf/ui/browsers/annotate.c21
-rw-r--r--tools/perf/ui/browsers/hists.c43
-rw-r--r--tools/perf/ui/browsers/hists.h3
-rw-r--r--tools/perf/ui/gtk/annotate.c2
-rw-r--r--tools/perf/ui/gtk/hists.c5
-rw-r--r--tools/perf/ui/hist.c2
-rw-r--r--tools/perf/ui/stdio/hist.c4
-rw-r--r--tools/perf/util/Build1
-rw-r--r--tools/perf/util/annotate.c165
-rw-r--r--tools/perf/util/annotate.h53
-rw-r--r--tools/perf/util/cgroup.c9
-rw-r--r--tools/perf/util/dso.c2
-rw-r--r--tools/perf/util/evsel.c4
-rw-r--r--tools/perf/util/evsel.h5
-rw-r--r--tools/perf/util/header.c24
-rw-r--r--tools/perf/util/hist.c15
-rw-r--r--tools/perf/util/hist.h26
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c23
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h9
-rw-r--r--tools/perf/util/intel-pt.c5
-rw-r--r--tools/perf/util/map.c26
-rw-r--r--tools/perf/util/map.h1
-rw-r--r--tools/perf/util/parse-events.l18
-rw-r--r--tools/perf/util/parse-events.y14
-rw-r--r--tools/perf/util/probe-event.c3
-rw-r--r--tools/perf/util/quote.c62
-rw-r--r--tools/perf/util/quote.h31
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c250
-rw-r--r--tools/perf/util/session.c2
-rw-r--r--tools/perf/util/sort.c81
-rw-r--r--tools/perf/util/sort.h7
-rw-r--r--tools/perf/util/symbol.c3
-rw-r--r--tools/perf/util/symbol.h3
-rw-r--r--tools/perf/util/top.h3
-rw-r--r--tools/testing/nvdimm/test/nfit.c104
-rw-r--r--tools/testing/selftests/Makefile2
-rw-r--r--tools/testing/selftests/lib.mk4
-rw-r--r--tools/testing/selftests/proc/.gitignore3
-rw-r--r--tools/testing/selftests/proc/Makefile5
-rw-r--r--tools/testing/selftests/proc/fd-001-lookup.c168
-rw-r--r--tools/testing/selftests/proc/fd-002-posix-eq.c57
-rw-r--r--tools/testing/selftests/proc/fd-003-kthread.c178
-rw-r--r--tools/testing/selftests/proc/proc-uptime.h16
-rw-r--r--tools/testing/selftests/proc/proc.h39
-rw-r--r--tools/testing/selftests/proc/read.c17
-rw-r--r--tools/testing/selftests/rseq/.gitignore6
-rw-r--r--tools/testing/selftests/rseq/Makefile30
-rw-r--r--tools/testing/selftests/rseq/basic_percpu_ops_test.c312
-rw-r--r--tools/testing/selftests/rseq/basic_test.c56
-rw-r--r--tools/testing/selftests/rseq/param_test.c1260
-rw-r--r--tools/testing/selftests/rseq/rseq-arm.h715
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc.h671
-rw-r--r--tools/testing/selftests/rseq/rseq-skip.h65
-rw-r--r--tools/testing/selftests/rseq/rseq-x86.h1132
-rw-r--r--tools/testing/selftests/rseq/rseq.c117
-rw-r--r--tools/testing/selftests/rseq/rseq.h147
-rw-r--r--tools/testing/selftests/rseq/run_param_test.sh121
-rw-r--r--tools/testing/selftests/sparc64/Makefile46
-rw-r--r--tools/testing/selftests/sparc64/drivers/.gitignore1
-rw-r--r--tools/testing/selftests/sparc64/drivers/Makefile15
-rw-r--r--tools/testing/selftests/sparc64/drivers/adi-test.c721
-rwxr-xr-xtools/testing/selftests/sparc64/drivers/drivers_test.sh30
-rwxr-xr-xtools/testing/selftests/sparc64/run.sh3
-rw-r--r--tools/vm/page-types.c1
-rw-r--r--virt/kvm/arm/arm.c4
-rw-r--r--virt/kvm/arm/psci.c18
2519 files changed, 83476 insertions, 247802 deletions
diff --git a/Documentation/ABI/removed/sysfs-bus-nfit b/Documentation/ABI/removed/sysfs-bus-nfit
new file mode 100644
index 000000000000..ae8c1ca53828
--- /dev/null
+++ b/Documentation/ABI/removed/sysfs-bus-nfit
@@ -0,0 +1,17 @@
+What: /sys/bus/nd/devices/regionX/nfit/ecc_unit_size
+Date: Aug, 2017
+KernelVersion: v4.14 (Removed v4.18)
+Contact: linux-nvdimm@lists.01.org
+Description:
+ (RO) Size of a write request to a DIMM that will not incur a
+ read-modify-write cycle at the memory controller.
+
+ When the nfit driver initializes it runs an ARS (Address Range
+ Scrub) operation across every pmem range. Part of that process
+ involves determining the ARS capabilities of a given address
+ range. One of the capabilities that is reported is the 'Clear
+ Uncorrectable Error Range Length Unit Size' (see: ACPI 6.2
+ section 9.20.7.4 Function Index 1 - Query ARS Capabilities).
+ This property indicates the boundary at which the NVDIMM may
+ need to perform read-modify-write cycles to maintain ECC (Error
+ Correcting Code) blocks.
diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
index d12cb2eae9ee..201d10319fa1 100644
--- a/Documentation/ABI/testing/evm
+++ b/Documentation/ABI/testing/evm
@@ -57,3 +57,16 @@ Description:
dracut (via 97masterkey and 98integrity) and systemd (via
core/ima-setup) have support for loading keys at boot
time.
+
+What: security/integrity/evm/evm_xattrs
+Date: April 2018
+Contact: Matthew Garrett <mjg59@google.com>
+Description:
+ Shows the set of extended attributes used to calculate or
+ validate the EVM signature, and allows additional attributes
+ to be added at runtime. Any signatures generated after
+ additional attributes are added (and on files posessing those
+ additional attributes) will only be valid if the same
+ additional attributes are configured on system boot. Writing
+ a single period (.) will lock the xattr list from any further
+ modification.
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index b8465e00ba5f..74c6702de74e 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -21,7 +21,7 @@ Description:
audit | hash | dont_hash
condition:= base | lsm [option]
base: [[func=] [mask=] [fsmagic=] [fsuuid=] [uid=]
- [euid=] [fowner=]]
+ [euid=] [fowner=] [fsname=]]
lsm: [[subj_user=] [subj_role=] [subj_type=]
[obj_user=] [obj_role=] [obj_type=]]
option: [[appraise_type=]] [permit_directio]
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 6a5f34b4d5b9..731146c3b138 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -190,6 +190,13 @@ Description:
but should match other such assignments on device).
Units after application of scale and offset are m/s^2.
+What: /sys/bus/iio/devices/iio:deviceX/in_angl_raw
+KernelVersion: 4.17
+Contact: linux-iio@vger.kernel.org
+Description:
+ Angle of rotation. Units after application of scale and offset
+ are radians.
+
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_raw
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_raw
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_raw
@@ -297,6 +304,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_pressure_offset
What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_offset
What: /sys/bus/iio/devices/iio:deviceX/in_magn_offset
What: /sys/bus/iio/devices/iio:deviceX/in_rot_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_angl_offset
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -350,6 +358,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_scale
What: /sys/bus/iio/devices/iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_scale
What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_scale
What: /sys/bus/iio/devices/iio:deviceX/in_countY_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_angl_scale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
diff --git a/Documentation/ABI/testing/sysfs-bus-nfit b/Documentation/ABI/testing/sysfs-bus-nfit
index 619eb8ca0f99..a1cb44dcb908 100644
--- a/Documentation/ABI/testing/sysfs-bus-nfit
+++ b/Documentation/ABI/testing/sysfs-bus-nfit
@@ -212,22 +212,3 @@ Description:
range. Used by NVDIMM Region Mapping Structure to uniquely refer
to this structure. Value of 0 is reserved and not used as an
index.
-
-
-What: /sys/bus/nd/devices/regionX/nfit/ecc_unit_size
-Date: Aug, 2017
-KernelVersion: v4.14
-Contact: linux-nvdimm@lists.01.org
-Description:
- (RO) Size of a write request to a DIMM that will not incur a
- read-modify-write cycle at the memory controller.
-
- When the nfit driver initializes it runs an ARS (Address Range
- Scrub) operation across every pmem range. Part of that process
- involves determining the ARS capabilities of a given address
- range. One of the capabilities that is reported is the 'Clear
- Uncorrectable Error Range Length Unit Size' (see: ACPI 6.2
- section 9.20.7.4 Function Index 1 - Query ARS Capabilities).
- This property indicates the boundary at which the NVDIMM may
- need to perform read-modify-write cycles to maintain ECC (Error
- Correcting Code) blocks.
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
index f34e592301d1..3bc7c0a95c92 100644
--- a/Documentation/ABI/testing/sysfs-class-mtd
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -232,3 +232,11 @@ Description:
of the parent (another partition or a flash device) in bytes.
This attribute is absent on flash devices, so it can be used
to distinguish them from partitions.
+
+What: /sys/class/mtd/mtdX/oobavail
+Date: April 2018
+KernelVersion: 4.16
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Number of bytes available for a client to place data into
+ the out of band area.
diff --git a/Documentation/PCI/pci-error-recovery.txt b/Documentation/PCI/pci-error-recovery.txt
index 0b6bb3ef449e..688b69121e82 100644
--- a/Documentation/PCI/pci-error-recovery.txt
+++ b/Documentation/PCI/pci-error-recovery.txt
@@ -110,7 +110,7 @@ The actual steps taken by a platform to recover from a PCI error
event will be platform-dependent, but will follow the general
sequence described below.
-STEP 0: Error Event
+STEP 0: Error Event: ERR_NONFATAL
-------------------
A PCI bus error is detected by the PCI hardware. On powerpc, the slot
is isolated, in that all I/O is blocked: all reads return 0xffffffff,
@@ -228,13 +228,7 @@ proceeds to either STEP3 (Link Reset) or to STEP 5 (Resume Operations).
If any driver returned PCI_ERS_RESULT_NEED_RESET, then the platform
proceeds to STEP 4 (Slot Reset)
-STEP 3: Link Reset
-------------------
-The platform resets the link. This is a PCI-Express specific step
-and is done whenever a fatal error has been detected that can be
-"solved" by resetting the link.
-
-STEP 4: Slot Reset
+STEP 3: Slot Reset
------------------
In response to a return value of PCI_ERS_RESULT_NEED_RESET, the
@@ -320,7 +314,7 @@ Failure).
>>> However, it probably should.
-STEP 5: Resume Operations
+STEP 4: Resume Operations
-------------------------
The platform will call the resume() callback on all affected device
drivers if all drivers on the segment have returned
@@ -332,7 +326,7 @@ a result code.
At this point, if a new error happens, the platform will restart
a new error recovery sequence.
-STEP 6: Permanent Failure
+STEP 5: Permanent Failure
-------------------------
A "permanent failure" has occurred, and the platform cannot recover
the device. The platform will call error_detected() with a
@@ -355,6 +349,27 @@ errors. See the discussion in powerpc/eeh-pci-error-recovery.txt
for additional detail on real-life experience of the causes of
software errors.
+STEP 0: Error Event: ERR_FATAL
+-------------------
+PCI bus error is detected by the PCI hardware. On powerpc, the slot is
+isolated, in that all I/O is blocked: all reads return 0xffffffff, all
+writes are ignored.
+
+STEP 1: Remove devices
+--------------------
+Platform removes the devices depending on the error agent, it could be
+this port for all subordinates or upstream component (likely downstream
+port)
+
+STEP 2: Reset link
+--------------------
+The platform resets the link. This is a PCI-Express specific step and is
+done whenever a fatal error has been detected that can be "solved" by
+resetting the link.
+
+STEP 3: Re-enumerate the devices
+--------------------
+Initiates the re-enumeration.
Conclusion; General Remarks
---------------------------
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 74cdeaed9f7a..8a2c52d5c53b 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1001,14 +1001,44 @@ PAGE_SIZE multiple when read back.
The total amount of memory currently being used by the cgroup
and its descendants.
+ memory.min
+ A read-write single value file which exists on non-root
+ cgroups. The default is "0".
+
+ Hard memory protection. If the memory usage of a cgroup
+ is within its effective min boundary, the cgroup's memory
+ won't be reclaimed under any conditions. If there is no
+ unprotected reclaimable memory available, OOM killer
+ is invoked.
+
+ Effective min boundary is limited by memory.min values of
+ all ancestor cgroups. If there is memory.min overcommitment
+ (child cgroup or cgroups are requiring more protected memory
+ than parent will allow), then each child cgroup will get
+ the part of parent's protection proportional to its
+ actual memory usage below memory.min.
+
+ Putting more memory than generally available under this
+ protection is discouraged and may lead to constant OOMs.
+
+ If a memory cgroup is not populated with processes,
+ its memory.min is ignored.
+
memory.low
A read-write single value file which exists on non-root
cgroups. The default is "0".
- Best-effort memory protection. If the memory usages of a
- cgroup and all its ancestors are below their low boundaries,
- the cgroup's memory won't be reclaimed unless memory can be
- reclaimed from unprotected cgroups.
+ Best-effort memory protection. If the memory usage of a
+ cgroup is within its effective low boundary, the cgroup's
+ memory won't be reclaimed unless memory can be reclaimed
+ from unprotected cgroups.
+
+ Effective low boundary is limited by memory.low values of
+ all ancestor cgroups. If there is memory.low overcommitment
+ (child cgroup or cgroups are requiring more protected memory
+ than parent will allow), then each child cgroup will get
+ the part of parent's protection proportional to its
+ actual memory usage below memory.low.
Putting more memory than generally available under this
protection is discouraged.
@@ -1199,6 +1229,27 @@ PAGE_SIZE multiple when read back.
Swap usage hard limit. If a cgroup's swap usage reaches this
limit, anonymous memory of the cgroup will not be swapped out.
+ memory.swap.events
+ A read-only flat-keyed file which exists on non-root cgroups.
+ The following entries are defined. Unless specified
+ otherwise, a value change in this file generates a file
+ modified event.
+
+ max
+ The number of times the cgroup's swap usage was about
+ to go over the max boundary and swap allocation
+ failed.
+
+ fail
+ The number of times swap allocation failed either
+ because of running out of swap system-wide or max
+ limit.
+
+ When reduced under the current usage, the existing swap
+ entries are reclaimed gradually and the swap usage may stay
+ higher than the limit for an extended period of time. This
+ reduces the impact on the workload and memory management.
+
Usage Guidelines
~~~~~~~~~~~~~~~~
@@ -1934,17 +1985,8 @@ system performance due to overreclaim, to the point where the feature
becomes self-defeating.
The memory.low boundary on the other hand is a top-down allocated
-reserve. A cgroup enjoys reclaim protection when it and all its
-ancestors are below their low boundaries, which makes delegation of
-subtrees possible. Secondly, new cgroups have no reserve per default
-and in the common case most cgroups are eligible for the preferred
-reclaim pass. This allows the new low boundary to be efficiently
-implemented with just a minor addition to the generic reclaim code,
-without the need for out-of-band data structures and reclaim passes.
-Because the generic reclaim code considers all cgroups except for the
-ones running low in the preferred first reclaim pass, overreclaim of
-individual groups is eliminated as well, resulting in much better
-overall workload performance.
+reserve. A cgroup enjoys reclaim protection when it's within its low,
+which makes delegation of subtrees possible.
The original high boundary, the hard limit, is defined as a strict
limit that can not budge, even if the OOM killer has to be called.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1beb30d8d7fc..638342d0a095 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3162,6 +3162,8 @@
on: Turn realloc on
realloc same as realloc=on
noari do not use PCIe ARI.
+ noats [PCIE, Intel-IOMMU, AMD-IOMMU]
+ do not use PCIe ATS (and IOMMU device IOTLB).
pcie_scan_all Scan all possible PCIe devices. Otherwise we
only look for one device below a PCIe downstream
port.
@@ -4104,6 +4106,23 @@
expediting. Set to zero to disable automatic
expediting.
+ ssbd= [ARM64,HW]
+ Speculative Store Bypass Disable control
+
+ On CPUs that are vulnerable to the Speculative
+ Store Bypass vulnerability and offer a
+ firmware based mitigation, this parameter
+ indicates how the mitigation should be used:
+
+ force-on: Unconditionally enable mitigation for
+ for both kernel and userspace
+ force-off: Unconditionally disable mitigation for
+ for both kernel and userspace
+ kernel: Always enable mitigation in the
+ kernel, and offer a prctl interface
+ to allow userspace to register its
+ interest in being mitigated too.
+
stack_guard_gap= [MM]
override the default stack gap protection. The value
is in page units and it defines how many pages prior
diff --git a/Documentation/misc-devices/lcd-panel-cgram.txt b/Documentation/auxdisplay/lcd-panel-cgram.txt
index 7f82c905763d..7f82c905763d 100644
--- a/Documentation/misc-devices/lcd-panel-cgram.txt
+++ b/Documentation/auxdisplay/lcd-panel-cgram.txt
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 257e65714c6a..875b2b56b87f 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -218,6 +218,7 @@ line of text and contains the following stats separated by whitespace:
same_pages the number of same element filled pages written to this disk.
No memory is allocated for such pages.
pages_compacted the number of pages freed during compaction
+ huge_pages the number of incompressible pages
9) Deactivate:
swapoff /dev/zram0
@@ -242,5 +243,29 @@ to backing storage rather than keeping it in memory.
User should set up backing device via /sys/block/zramX/backing_dev
before disksize setting.
+= memory tracking
+
+With CONFIG_ZRAM_MEMORY_TRACKING, user can know information of the
+zram block. It could be useful to catch cold or incompressible
+pages of the process with*pagemap.
+If you enable the feature, you could see block state via
+/sys/kernel/debug/zram/zram0/block_state". The output is as follows,
+
+ 300 75.033841 .wh
+ 301 63.806904 s..
+ 302 63.806919 ..h
+
+First column is zram's block index.
+Second column is access time since the system was booted
+Third column is state of the block.
+(s: same page
+w: written page to backing store
+h: huge page)
+
+First line of above example says 300th block is accessed at 75.033841sec
+and the block's state is huge so it is written back to the backing
+storage. It's a debugging feature so anyone shouldn't rely on it to work
+properly.
+
Nitin Gupta
ngupta@vflare.org
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,g3dsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,g3dsys.txt
new file mode 100644
index 000000000000..7de43bf41fdc
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,g3dsys.txt
@@ -0,0 +1,30 @@
+MediaTek g3dsys controller
+============================
+
+The MediaTek g3dsys controller provides various clocks and reset controller to
+the GPU.
+
+Required Properties:
+
+- compatible: Should be:
+ - "mediatek,mt2701-g3dsys", "syscon":
+ for MT2701 SoC
+ - "mediatek,mt7623-g3dsys", "mediatek,mt2701-g3dsys", "syscon":
+ for MT7623 SoC
+- #clock-cells: Must be 1
+- #reset-cells: Must be 1
+
+The g3dsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+g3dsys: clock-controller@13000000 {
+ compatible = "mediatek,mt7623-g3dsys",
+ "mediatek,mt2701-g3dsys",
+ "syscon";
+ reg = <0 0x13000000 0 0x200>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/ux500/boards.txt b/Documentation/devicetree/bindings/arm/ux500/boards.txt
index 7334c24625fc..0fa429534f49 100644
--- a/Documentation/devicetree/bindings/arm/ux500/boards.txt
+++ b/Documentation/devicetree/bindings/arm/ux500/boards.txt
@@ -26,7 +26,7 @@ interrupt-controller:
see binding for interrupt-controller/arm,gic.txt
timer:
- see binding for arm/twd.txt
+ see binding for timer/arm,twd.txt
clocks:
see binding for clocks/ux500.txt
diff --git a/Documentation/devicetree/bindings/clock/actions,s900-cmu.txt b/Documentation/devicetree/bindings/clock/actions,s900-cmu.txt
new file mode 100644
index 000000000000..93e4fb827cd6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/actions,s900-cmu.txt
@@ -0,0 +1,47 @@
+* Actions S900 Clock Management Unit (CMU)
+
+The Actions S900 clock management unit generates and supplies clock to various
+controllers within the SoC. The clock binding described here is applicable to
+S900 SoC.
+
+Required Properties:
+
+- compatible: should be "actions,s900-cmu"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- clocks: Reference to the parent clocks ("hosc", "losc")
+- #clock-cells: should be 1.
+
+Each clock is assigned an identifier, and client nodes can use this identifier
+to specify the clock which they consume.
+
+All available clocks are defined as preprocessor macros in
+dt-bindings/clock/actions,s900-cmu.h header and can be used in device
+tree sources.
+
+External clocks:
+
+The hosc clock used as input for the plls is generated outside the SoC. It is
+expected that it is defined using standard clock bindings as "hosc".
+
+Actions S900 CMU also requires one more clock:
+ - "losc" - internal low frequency oscillator
+
+Example: Clock Management Unit node:
+
+ cmu: clock-controller@e0160000 {
+ compatible = "actions,s900-cmu";
+ reg = <0x0 0xe0160000 0x0 0x1000>;
+ clocks = <&hosc>, <&losc>;
+ #clock-cells = <1>;
+ };
+
+Example: UART controller node that consumes clock generated by the clock
+management unit:
+
+ uart: serial@e012a000 {
+ compatible = "actions,s900-uart", "actions,owl-uart";
+ reg = <0x0 0xe012a000 0x0 0x2000>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu CLK_UART5>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
index 786dc39ca904..3a880528030e 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
@@ -9,6 +9,7 @@ Required Properties:
- GXBB (S905) : "amlogic,meson-gxbb-aoclkc"
- GXL (S905X, S905D) : "amlogic,meson-gxl-aoclkc"
- GXM (S912) : "amlogic,meson-gxm-aoclkc"
+ - AXG (A113D, A113X) : "amlogic,meson-axg-aoclkc"
followed by the common "amlogic,meson-gx-aoclkc"
- #clock-cells: should be 1.
diff --git a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
index f8e4a93466cb..ab730ea0a560 100644
--- a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
@@ -276,36 +276,38 @@ These clock IDs are defined in:
clk_ts_500_ref genpll2 2 BCM_SR_GENPLL2_TS_500_REF_CLK
clk_125_nitro genpll2 3 BCM_SR_GENPLL2_125_NITRO_CLK
clk_chimp genpll2 4 BCM_SR_GENPLL2_CHIMP_CLK
- clk_nic_flash genpll2 5 BCM_SR_GENPLL2_NIC_FLASH
+ clk_nic_flash genpll2 5 BCM_SR_GENPLL2_NIC_FLASH_CLK
+ clk_fs genpll2 6 BCM_SR_GENPLL2_FS_CLK
genpll3 crystal 0 BCM_SR_GENPLL3
clk_hsls genpll3 1 BCM_SR_GENPLL3_HSLS_CLK
clk_sdio genpll3 2 BCM_SR_GENPLL3_SDIO_CLK
genpll4 crystal 0 BCM_SR_GENPLL4
- ccn genpll4 1 BCM_SR_GENPLL4_CCN_CLK
+ clk_ccn genpll4 1 BCM_SR_GENPLL4_CCN_CLK
clk_tpiu_pll genpll4 2 BCM_SR_GENPLL4_TPIU_PLL_CLK
- noc_clk genpll4 3 BCM_SR_GENPLL4_NOC_CLK
+ clk_noc genpll4 3 BCM_SR_GENPLL4_NOC_CLK
clk_chclk_fs4 genpll4 4 BCM_SR_GENPLL4_CHCLK_FS4_CLK
clk_bridge_fscpu genpll4 5 BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK
-
genpll5 crystal 0 BCM_SR_GENPLL5
- fs4_hf_clk genpll5 1 BCM_SR_GENPLL5_FS4_HF_CLK
- crypto_ae_clk genpll5 2 BCM_SR_GENPLL5_CRYPTO_AE_CLK
- raid_ae_clk genpll5 3 BCM_SR_GENPLL5_RAID_AE_CLK
+ clk_fs4_hf genpll5 1 BCM_SR_GENPLL5_FS4_HF_CLK
+ clk_crypto_ae genpll5 2 BCM_SR_GENPLL5_CRYPTO_AE_CLK
+ clk_raid_ae genpll5 3 BCM_SR_GENPLL5_RAID_AE_CLK
genpll6 crystal 0 BCM_SR_GENPLL6
- 48_usb genpll6 1 BCM_SR_GENPLL6_48_USB_CLK
+ clk_48_usb genpll6 1 BCM_SR_GENPLL6_48_USB_CLK
lcpll0 crystal 0 BCM_SR_LCPLL0
clk_sata_refp lcpll0 1 BCM_SR_LCPLL0_SATA_REFP_CLK
clk_sata_refn lcpll0 2 BCM_SR_LCPLL0_SATA_REFN_CLK
- clk_usb_ref lcpll0 3 BCM_SR_LCPLL0_USB_REF_CLK
- sata_refpn lcpll0 3 BCM_SR_LCPLL0_SATA_REFPN_CLK
+ clk_sata_350 lcpll0 3 BCM_SR_LCPLL0_SATA_350_CLK
+ clk_sata_500 lcpll0 4 BCM_SR_LCPLL0_SATA_500_CLK
lcpll1 crystal 0 BCM_SR_LCPLL1
- wan lcpll1 1 BCM_SR_LCPLL0_WAN_CLK
+ clk_wan lcpll1 1 BCM_SR_LCPLL1_WAN_CLK
+ clk_usb_ref lcpll1 2 BCM_SR_LCPLL1_USB_REF_CLK
+ clk_crmu_ts lcpll1 3 BCM_SR_LCPLL1_CRMU_TS_CLK
lcpll_pcie crystal 0 BCM_SR_LCPLL_PCIE
- pcie_phy_ref lcpll1 1 BCM_SR_LCPLL_PCIE_PHY_REF_CLK
+ clk_pcie_phy_ref lcpll1 1 BCM_SR_LCPLL_PCIE_PHY_REF_CLK
diff --git a/Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.txt b/Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.txt
new file mode 100644
index 000000000000..f82064546d11
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nuvoton,npcm750-clk.txt
@@ -0,0 +1,100 @@
+* Nuvoton NPCM7XX Clock Controller
+
+Nuvoton Poleg BMC NPCM7XX contains an integrated clock controller, which
+generates and supplies clocks to all modules within the BMC.
+
+External clocks:
+
+There are six fixed clocks that are generated outside the BMC. All clocks are of
+a known fixed value that cannot be changed. clk_refclk, clk_mcbypck and
+clk_sysbypck are inputs to the clock controller.
+clk_rg1refck, clk_rg2refck and clk_xin are external clocks suppling the
+network. They are set on the device tree, but not used by the clock module. The
+network devices use them directly.
+Example can be found below.
+
+All available clocks are defined as preprocessor macros in:
+dt-bindings/clock/nuvoton,npcm7xx-clock.h
+and can be reused as DT sources.
+
+Required Properties of clock controller:
+
+ - compatible: "nuvoton,npcm750-clk" : for clock controller of Nuvoton
+ Poleg BMC NPCM750
+
+ - reg: physical base address of the clock controller and length of
+ memory mapped region.
+
+ - #clock-cells: should be 1.
+
+Example: Clock controller node:
+
+ clk: clock-controller@f0801000 {
+ compatible = "nuvoton,npcm750-clk";
+ #clock-cells = <1>;
+ reg = <0xf0801000 0x1000>;
+ clock-names = "refclk", "sysbypck", "mcbypck";
+ clocks = <&clk_refclk>, <&clk_sysbypck>, <&clk_mcbypck>;
+ };
+
+Example: Required external clocks for network:
+
+ /* external reference clock */
+ clk_refclk: clk-refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "refclk";
+ };
+
+ /* external reference clock for cpu. float in normal operation */
+ clk_sysbypck: clk-sysbypck {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <800000000>;
+ clock-output-names = "sysbypck";
+ };
+
+ /* external reference clock for MC. float in normal operation */
+ clk_mcbypck: clk-mcbypck {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <800000000>;
+ clock-output-names = "mcbypck";
+ };
+
+ /* external clock signal rg1refck, supplied by the phy */
+ clk_rg1refck: clk-rg1refck {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ clock-output-names = "clk_rg1refck";
+ };
+
+ /* external clock signal rg2refck, supplied by the phy */
+ clk_rg2refck: clk-rg2refck {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ clock-output-names = "clk_rg2refck";
+ };
+
+ clk_xin: clk-xin {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = "clk_xin";
+ };
+
+
+Example: GMAC controller node that consumes two clocks: a generated clk by the
+clock controller and a fixed clock from DT (clk_rg1refck).
+
+ ethernet0: ethernet@f0802000 {
+ compatible = "snps,dwmac";
+ reg = <0xf0802000 0x2000>;
+ interrupts = <0 14 4>;
+ interrupt-names = "macirq";
+ clocks = <&clk_rg1refck>, <&clk NPCM7XX_CLK_AHB>;
+ clock-names = "stmmaceth", "clk_gmac";
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 551d03be9665..664ea1fd6c76 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -17,7 +17,9 @@ Required properties :
"qcom,gcc-msm8974pro-ac"
"qcom,gcc-msm8994"
"qcom,gcc-msm8996"
+ "qcom,gcc-msm8998"
"qcom,gcc-mdm9615"
+ "qcom,gcc-sdm845"
- reg : shall contain base register location and length
- #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
new file mode 100644
index 000000000000..3c007653da31
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
@@ -0,0 +1,22 @@
+Qualcomm Technologies, Inc. RPMh Clocks
+-------------------------------------------------------
+
+Resource Power Manager Hardened (RPMh) manages shared resources on
+some Qualcomm Technologies Inc. SoCs. It accepts clock requests from
+other hardware subsystems via RSC to control clocks.
+
+Required properties :
+- compatible : shall contain "qcom,sdm845-rpmh-clk"
+
+- #clock-cells : must contain 1
+
+Example :
+
+#include <dt-bindings/clock/qcom,rpmh.h>
+
+ &apps_rsc {
+ rpmhcc: clock-controller {
+ compatible = "qcom,sdm845-rpmh-clk";
+ #clock-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,videocc.txt b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
new file mode 100644
index 000000000000..e7c035afa778
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
@@ -0,0 +1,19 @@
+Qualcomm Video Clock & Reset Controller Binding
+-----------------------------------------------
+
+Required properties :
+- compatible : shall contain "qcom,sdm845-videocc"
+- reg : shall contain base register location and length
+- #clock-cells : from common clock binding, shall contain 1.
+- #power-domain-cells : from generic power domain binding, shall contain 1.
+
+Optional properties :
+- #reset-cells : from common reset binding, shall contain 1.
+
+Example:
+ videocc: clock-controller@ab00000 {
+ compatible = "qcom,sdm845-videocc";
+ reg = <0xab00000 0x10000>;
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
index 773a5226342f..db542abadb75 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
@@ -15,6 +15,7 @@ Required Properties:
- compatible: Must be one of:
- "renesas,r8a7743-cpg-mssr" for the r8a7743 SoC (RZ/G1M)
- "renesas,r8a7745-cpg-mssr" for the r8a7745 SoC (RZ/G1E)
+ - "renesas,r8a77470-cpg-mssr" for the r8a77470 SoC (RZ/G1C)
- "renesas,r8a7790-cpg-mssr" for the r8a7790 SoC (R-Car H2)
- "renesas,r8a7791-cpg-mssr" for the r8a7791 SoC (R-Car M2-W)
- "renesas,r8a7792-cpg-mssr" for the r8a7792 SoC (R-Car V2H)
@@ -25,6 +26,7 @@ Required Properties:
- "renesas,r8a77965-cpg-mssr" for the r8a77965 SoC (R-Car M3-N)
- "renesas,r8a77970-cpg-mssr" for the r8a77970 SoC (R-Car V3M)
- "renesas,r8a77980-cpg-mssr" for the r8a77980 SoC (R-Car V3H)
+ - "renesas,r8a77990-cpg-mssr" for the r8a77990 SoC (R-Car E3)
- "renesas,r8a77995-cpg-mssr" for the r8a77995 SoC (R-Car D3)
- reg: Base address and length of the memory resource used by the CPG/MSSR
@@ -33,10 +35,12 @@ Required Properties:
- clocks: References to external parent clocks, one entry for each entry in
clock-names
- clock-names: List of external parent clock names. Valid names are:
- - "extal" (r8a7743, r8a7745, r8a7790, r8a7791, r8a7792, r8a7793, r8a7794,
- r8a7795, r8a7796, r8a77965, r8a77970, r8a77980, r8a77995)
+ - "extal" (r8a7743, r8a7745, r8a77470, r8a7790, r8a7791, r8a7792,
+ r8a7793, r8a7794, r8a7795, r8a7796, r8a77965, r8a77970,
+ r8a77980, r8a77990, r8a77995)
- "extalr" (r8a7795, r8a7796, r8a77965, r8a77970, r8a77980)
- - "usb_extal" (r8a7743, r8a7745, r8a7790, r8a7791, r8a7793, r8a7794)
+ - "usb_extal" (r8a7743, r8a7745, r8a77470, r8a7790, r8a7791, r8a7793,
+ r8a7794)
- #clock-cells: Must be 2
- For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
diff --git a/Documentation/devicetree/bindings/clock/rockchip.txt b/Documentation/devicetree/bindings/clock/rockchip.txt
deleted file mode 100644
index 22f6769e5d4a..000000000000
--- a/Documentation/devicetree/bindings/clock/rockchip.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-Device Tree Clock bindings for arch-rockchip
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-== Gate clocks ==
-
-These bindings are deprecated!
-Please use the soc specific CRU bindings instead.
-
-The gate registers form a continuos block which makes the dt node
-structure a matter of taste, as either all gates can be put into
-one gate clock spanning all registers or they can be divided into
-the 10 individual gates containing 16 clocks each.
-The code supports both approaches.
-
-Required properties:
-- compatible : "rockchip,rk2928-gate-clk"
-- reg : shall be the control register address(es) for the clock.
-- #clock-cells : from common clock binding; shall be set to 1
-- clock-output-names : the corresponding gate names that the clock controls
-- clocks : should contain the parent clock for each individual gate,
- therefore the number of clocks elements should match the number of
- clock-output-names
-
-Example using multiple gate clocks:
-
- clk_gates0: gate-clk@200000d0 {
- compatible = "rockchip,rk2928-gate-clk";
- reg = <0x200000d0 0x4>;
- clocks = <&dummy>, <&dummy>,
- <&dummy>, <&dummy>,
- <&dummy>, <&dummy>,
- <&dummy>, <&dummy>,
- <&dummy>, <&dummy>,
- <&dummy>, <&dummy>,
- <&dummy>, <&dummy>,
- <&dummy>, <&dummy>;
-
- clock-output-names =
- "gate_core_periph", "gate_cpu_gpll",
- "gate_ddrphy", "gate_aclk_cpu",
- "gate_hclk_cpu", "gate_pclk_cpu",
- "gate_atclk_cpu", "gate_i2s0",
- "gate_i2s0_frac", "gate_i2s1",
- "gate_i2s1_frac", "gate_i2s2",
- "gate_i2s2_frac", "gate_spdif",
- "gate_spdif_frac", "gate_testclk";
-
- #clock-cells = <1>;
- };
-
- clk_gates1: gate-clk@200000d4 {
- compatible = "rockchip,rk2928-gate-clk";
- reg = <0x200000d4 0x4>;
- clocks = <&xin24m>, <&xin24m>,
- <&xin24m>, <&dummy>,
- <&dummy>, <&xin24m>,
- <&xin24m>, <&dummy>,
- <&xin24m>, <&dummy>,
- <&xin24m>, <&dummy>,
- <&xin24m>, <&dummy>,
- <&xin24m>, <&dummy>;
-
- clock-output-names =
- "gate_timer0", "gate_timer1",
- "gate_timer2", "gate_jtag",
- "gate_aclk_lcdc1_src", "gate_otgphy0",
- "gate_otgphy1", "gate_ddr_gpll",
- "gate_uart0", "gate_frac_uart0",
- "gate_uart1", "gate_frac_uart1",
- "gate_uart2", "gate_frac_uart2",
- "gate_uart3", "gate_frac_uart3";
-
- #clock-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
index 460ef27b1008..47d2e902ced4 100644
--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
@@ -21,6 +21,7 @@ Required properties :
- "allwinner,sun50i-a64-r-ccu"
- "allwinner,sun50i-h5-ccu"
- "allwinner,sun50i-h6-ccu"
+ - "allwinner,sun50i-h6-r-ccu"
- "nextthing,gr8-ccu"
- reg: Must contain the registers base address and length
@@ -35,7 +36,7 @@ Required properties :
For the main CCU on H6, one more clock is needed:
- "iosc": the SoC's internal frequency oscillator
-For the PRCM CCUs on A83T/H3/A64, two more clocks are needed:
+For the PRCM CCUs on A83T/H3/A64/H6, two more clocks are needed:
- "pll-periph": the SoC's peripheral PLL from the main CCU
- "iosc": the SoC's internal frequency oscillator
diff --git a/Documentation/devicetree/bindings/dma/k3dma.txt b/Documentation/devicetree/bindings/dma/k3dma.txt
index 23f8d712c3ce..4945aeac4dc4 100644
--- a/Documentation/devicetree/bindings/dma/k3dma.txt
+++ b/Documentation/devicetree/bindings/dma/k3dma.txt
@@ -23,7 +23,6 @@ Controller:
dma-requests = <27>;
interrupts = <0 12 4>;
clocks = <&pclk>;
- status = "disable";
};
Client:
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index 61315eaa7660..b1ba639554c0 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -29,6 +29,7 @@ Required Properties:
- "renesas,dmac-r8a77965" (R-Car M3-N)
- "renesas,dmac-r8a77970" (R-Car V3M)
- "renesas,dmac-r8a77980" (R-Car V3H)
+ - "renesas,dmac-r8a77995" (R-Car D3)
- reg: base address and length of the registers block for the DMAC
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
index 9dc935e24e55..482e54362d3e 100644
--- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
@@ -12,6 +12,8 @@ Required Properties:
- "renesas,r8a7795-usb-dmac" (R-Car H3)
- "renesas,r8a7796-usb-dmac" (R-Car M3-W)
- "renesas,r8a77965-usb-dmac" (R-Car M3-N)
+ - "renesas,r8a77990-usb-dmac" (R-Car E3)
+ - "renesas,r8a77995-usb-dmac" (R-Car D3)
- reg: base address and length of the registers block for the DMAC
- interrupts: interrupt specifiers for the DMAC, one for each entry in
interrupt-names.
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 66026dcf53e1..3f15f6644527 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -190,7 +190,6 @@ mmc0: mmc@23000000 {
power-domains = <&k2g_pds 0xb>;
clocks = <&k2g_clks 0xb 1>, <&k2g_clks 0xb 2>;
clock-names = "fck", "mmchsdb_fck";
- status = "disabled";
};
------------------------------------------------------------------------------
diff --git a/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt b/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt
index a767259dedad..1e442450747f 100644
--- a/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt
+++ b/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt
@@ -11,6 +11,10 @@ Optional properties:
- trans-gpios = <gpio-descriptor>; : GPIO for voltage translator enable
- mux-gpios = <gpio-descriptor>; : GPIO for pin multiplexing with other
functions (eg, external FSI masters)
+ - no-gpio-delays; : Don't add extra delays between GPIO
+ accesses. This is useful when the HW
+ GPIO block is running at a low enough
+ frequency.
Examples:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
index d2a937682836..88f228665507 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
@@ -31,10 +31,15 @@ Required properties:
ti,tca9554
onnn,pca9654
exar,xra1202
+ - gpio-controller: if used as gpio expander.
+ - #gpio-cells: if used as gpio expander.
+ - interrupt-controller: if to be used as interrupt expander.
+ - #interrupt-cells: if to be used as interrupt expander.
Optional properties:
- reset-gpios: GPIO specification for the RESET input. This is an
active low signal to the PCA953x.
+ - vcc-supply: power supply regulator.
Example:
@@ -47,3 +52,32 @@ Example:
interrupt-parent = <&gpio3>;
interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
};
+
+
+Example with Interrupts:
+
+
+ gpio99: gpio@22 {
+ compatible = "nxp,pcal6524";
+ reg = <0x22>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>; /* gpio6_161 */
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ vcc-supply = <&vdds_1v8_main>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names =
+ "hdmi-ct-hpd", "hdmi.ls-oe", "p02", "p03", "vibra", "fault2", "p06", "p07",
+ "en-usb", "en-host1", "en-host2", "chg-int", "p14", "p15", "mic-int", "en-modem",
+ "shdn-hs-amp", "chg-status+red", "green", "blue", "en-esata", "fault1", "p26", "p27";
+ };
+
+ ts3a227@3b {
+ compatible = "ti,ts3a227e";
+ reg = <0x3b>;
+ interrupt-parent = <&gpio99>;
+ interrupts = <14 IRQ_TYPE_EDGE_RISING>;
+ ti,micbias = <0>; /* 2.1V */
+ };
+
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 9474138d776e..378f1322211e 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -5,6 +5,7 @@ Required Properties:
- compatible: should contain one or more of the following:
- "renesas,gpio-r8a7743": for R8A7743 (RZ/G1M) compatible GPIO controller.
- "renesas,gpio-r8a7745": for R8A7745 (RZ/G1E) compatible GPIO controller.
+ - "renesas,gpio-r8a77470": for R8A77470 (RZ/G1C) compatible GPIO controller.
- "renesas,gpio-r8a7778": for R8A7778 (R-Car M1) compatible GPIO controller.
- "renesas,gpio-r8a7779": for R8A7779 (R-Car H1) compatible GPIO controller.
- "renesas,gpio-r8a7790": for R8A7790 (R-Car H2) compatible GPIO controller.
@@ -14,7 +15,9 @@ Required Properties:
- "renesas,gpio-r8a7794": for R8A7794 (R-Car E2) compatible GPIO controller.
- "renesas,gpio-r8a7795": for R8A7795 (R-Car H3) compatible GPIO controller.
- "renesas,gpio-r8a7796": for R8A7796 (R-Car M3-W) compatible GPIO controller.
+ - "renesas,gpio-r8a77965": for R8A77965 (R-Car M3-N) compatible GPIO controller.
- "renesas,gpio-r8a77970": for R8A77970 (R-Car V3M) compatible GPIO controller.
+ - "renesas,gpio-r8a77990": for R8A77990 (R-Car E3) compatible GPIO controller.
- "renesas,gpio-r8a77995": for R8A77995 (R-Car D3) compatible GPIO controller.
- "renesas,rcar-gen1-gpio": for a generic R-Car Gen1 GPIO controller.
- "renesas,rcar-gen2-gpio": for a generic R-Car Gen2 or RZ/G1 GPIO controller.
diff --git a/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt b/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
index 4a75da7051bd..3c1118bc67f5 100644
--- a/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
@@ -26,8 +26,13 @@ controller.
the second encodes the triger flags encoded as described in
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- interrupt-parent : The parent interrupt controller.
-- interrupts : The interrupt to the parent controller raised when GPIOs
- generate the interrupts.
+- interrupts : The interrupts to the parent controller raised when GPIOs
+ generate the interrupts. If the controller provides one combined interrupt
+ for all GPIOs, specify a single interrupt. If the controller provides one
+ interrupt for each GPIO, provide a list of interrupts that correspond to each
+ of the GPIO pins. When specifying multiple interrupts, if any are unconnected,
+ use the interrupts-extended property to specify the interrupts and set the
+ interrupt controller handle for unused interrupts to 0.
- snps,nr-gpios : The number of pins in the port, a single cell.
- resets : Reset line for the controller.
diff --git a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
index 1e6ee3deb4fa..d1acd5ea2737 100644
--- a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
@@ -7,6 +7,7 @@ Required properties:
- "amlogic,meson-gxbb-saradc" for GXBB
- "amlogic,meson-gxl-saradc" for GXL
- "amlogic,meson-gxm-saradc" for GXM
+ - "amlogic,meson-axg-saradc" for AXG
along with the generic "amlogic,meson-saradc"
- reg: the physical base address and length of the registers
- interrupts: the interrupt indicating end of sampling
diff --git a/Documentation/devicetree/bindings/iio/adc/mcp320x.txt b/Documentation/devicetree/bindings/iio/adc/mcp320x.txt
index 7d64753df949..56373d643f76 100644
--- a/Documentation/devicetree/bindings/iio/adc/mcp320x.txt
+++ b/Documentation/devicetree/bindings/iio/adc/mcp320x.txt
@@ -49,7 +49,7 @@ Required properties:
Examples:
spi_controller {
mcp3x0x@0 {
- compatible = "mcp3002";
+ compatible = "microchip,mcp3002";
reg = <0>;
spi-max-frequency = <1000000>;
vref-supply = <&vref_reg>;
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
index 6c49db7f8ad2..6c49db7f8ad2 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
index e8bb8243e92c..f1ead43a1a95 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -24,8 +24,11 @@ Required properties:
- compatible: Should be one of:
"st,stm32f4-adc-core"
"st,stm32h7-adc-core"
+ "st,stm32mp1-adc-core"
- reg: Offset and length of the ADC block register set.
-- interrupts: Must contain the interrupt for ADC block.
+- interrupts: One or more interrupts for ADC block. Some parts like stm32f4
+ and stm32h7 share a common ADC interrupt line. stm32mp1 has two separate
+ interrupt lines, one for each ADC within ADC block.
- clocks: Core can use up to two clocks, depending on part used:
- "adc" clock: for the analog circuitry, common to all ADCs.
It's required on stm32f4.
@@ -53,6 +56,7 @@ Required properties:
- compatible: Should be one of:
"st,stm32f4-adc"
"st,stm32h7-adc"
+ "st,stm32mp1-adc"
- reg: Offset of ADC instance in ADC block (e.g. may be 0x0, 0x100, 0x200).
- clocks: Input clock private to this ADC instance. It's required only on
stm32f4, that has per instance clock input for registers access.
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt
index ed7520d1d051..75ba25d062e1 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt
@@ -8,14 +8,16 @@ It is mainly targeted for:
- PDM microphones (audio digital microphone)
It features up to 8 serial digital interfaces (SPI or Manchester) and
-up to 4 filters on stm32h7.
+up to 4 filters on stm32h7 or 6 filters on stm32mp1.
Each child node match with a filter instance.
Contents of a STM32 DFSDM root node:
------------------------------------
Required properties:
-- compatible: Should be "st,stm32h7-dfsdm".
+- compatible: Should be one of:
+ "st,stm32h7-dfsdm"
+ "st,stm32mp1-dfsdm"
- reg: Offset and length of the DFSDM block register set.
- clocks: IP and serial interfaces clocking. Should be set according
to rcc clock ID and "clock-names".
@@ -45,6 +47,7 @@ Required properties:
"st,stm32-dfsdm-adc" for sigma delta ADCs
"st,stm32-dfsdm-dmic" for audio digital microphone.
- reg: Specifies the DFSDM filter instance used.
+ Valid values are from 0 to 3 on stm32h7, 0 to 5 on stm32mp1.
- interrupts: IRQ lines connected to each DFSDM filter instance.
- st,adc-channels: List of single-ended channels muxed for this ADC.
valid values:
diff --git a/Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt b/Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt
new file mode 100644
index 000000000000..821b61b8c542
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt
@@ -0,0 +1,26 @@
+Current Sense Amplifier
+=======================
+
+When an io-channel measures the output voltage from a current sense
+amplifier, the interesting measurement is almost always the current
+through the sense resistor, not the voltage output. This binding
+describes such a current sense circuit.
+
+Required properties:
+- compatible : "current-sense-amplifier"
+- io-channels : Channel node of a voltage io-channel.
+- sense-resistor-micro-ohms : The sense resistance in microohms.
+
+Optional properties:
+- sense-gain-mult: Amplifier gain multiplier. The default is <1>.
+- sense-gain-div: Amplifier gain divider. The default is <1>.
+
+Example:
+
+sysi {
+ compatible = "current-sense-amplifier";
+ io-channels = <&tiadc 0>;
+
+ sense-resistor-micro-ohms = <20000>;
+ sense-gain-mul = <50>;
+};
diff --git a/Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt b/Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt
new file mode 100644
index 000000000000..0f67108a07b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt
@@ -0,0 +1,41 @@
+Current Sense Shunt
+===================
+
+When an io-channel measures the voltage over a current sense shunt,
+the interesting measurement is almost always the current through the
+shunt, not the voltage over it. This binding describes such a current
+sense circuit.
+
+Required properties:
+- compatible : "current-sense-shunt"
+- io-channels : Channel node of a voltage io-channel.
+- shunt-resistor-micro-ohms : The shunt resistance in microohms.
+
+Example:
+The system current is measured by measuring the voltage over a
+3.3 ohms shunt resistor.
+
+sysi {
+ compatible = "current-sense-shunt";
+ io-channels = <&tiadc 0>;
+
+ /* Divide the voltage by 3300000/1000000 (or 3.3) for the current. */
+ shunt-resistor-micro-ohms = <3300000>;
+};
+
+&i2c {
+ tiadc: adc@48 {
+ compatible = "ti,ads1015";
+ reg = <0x48>;
+ #io-channel-cells = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 { /* IN0,IN1 differential */
+ reg = <0>;
+ ti,gain = <1>;
+ ti,datarate = <4>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/iio/afe/voltage-divider.txt b/Documentation/devicetree/bindings/iio/afe/voltage-divider.txt
new file mode 100644
index 000000000000..b452a8406107
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/afe/voltage-divider.txt
@@ -0,0 +1,53 @@
+Voltage divider
+===============
+
+When an io-channel measures the midpoint of a voltage divider, the
+interesting voltage is often the voltage over the full resistance
+of the divider. This binding describes the voltage divider in such
+a curcuit.
+
+ Vin ----.
+ |
+ .-----.
+ | R |
+ '-----'
+ |
+ +---- Vout
+ |
+ .-----.
+ | Rout|
+ '-----'
+ |
+ GND
+
+Required properties:
+- compatible : "voltage-divider"
+- io-channels : Channel node of a voltage io-channel measuring Vout.
+- output-ohms : Resistance Rout over which the output voltage is measured.
+ See full-ohms.
+- full-ohms : Resistance R + Rout for the full divider. The io-channel
+ is scaled by the Rout / (R + Rout) quotient.
+
+Example:
+The system voltage is circa 12V, but divided down with a 22/222
+voltage divider (R = 200 Ohms, Rout = 22 Ohms) and fed to an ADC.
+
+sysv {
+ compatible = "voltage-divider";
+ io-channels = <&maxadc 1>;
+
+ /* Scale the system voltage by 22/222 to fit the ADC range. */
+ output-ohms = <22>;
+ full-ohms = <222>; /* 200 + 22 */
+};
+
+&spi {
+ maxadc: adc@0 {
+ compatible = "maxim,max1027";
+ reg = <0>;
+ #io-channel-cells = <1>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <15 IRQ_TYPE_EDGE_RISING>;
+ spi-max-frequency = <1000000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/iio/dac/ltc2632.txt b/Documentation/devicetree/bindings/iio/dac/ltc2632.txt
index eb911e5a8ab4..e0d5fea33031 100644
--- a/Documentation/devicetree/bindings/iio/dac/ltc2632.txt
+++ b/Documentation/devicetree/bindings/iio/dac/ltc2632.txt
@@ -12,12 +12,26 @@ Required properties:
Property rules described in Documentation/devicetree/bindings/spi/spi-bus.txt
apply. In particular, "reg" and "spi-max-frequency" properties must be given.
+Optional properties:
+ - vref-supply: Phandle to the external reference voltage supply. This should
+ only be set if there is an external reference voltage connected to the VREF
+ pin. If the property is not set the internal reference is used.
+
Example:
+ vref: regulator-vref {
+ compatible = "regulator-fixed";
+ regulator-name = "vref-ltc2632";
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-always-on;
+ };
+
spi_master {
dac: ltc2632@0 {
compatible = "lltc,ltc2632-l12";
reg = <0>; /* CS0 */
spi-max-frequency = <1000000>;
+ vref-supply = <&vref>; /* optional */
};
};
diff --git a/Documentation/devicetree/bindings/iio/dac/ti,dac5571.txt b/Documentation/devicetree/bindings/iio/dac/ti,dac5571.txt
new file mode 100644
index 000000000000..03af6b9a4d07
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/ti,dac5571.txt
@@ -0,0 +1,24 @@
+* Texas Instruments DAC5571 Family
+
+Required properties:
+ - compatible: Should contain
+ "ti,dac5571"
+ "ti,dac6571"
+ "ti,dac7571"
+ "ti,dac5574"
+ "ti,dac6574"
+ "ti,dac7574"
+ "ti,dac5573"
+ "ti,dac6573"
+ "ti,dac7573"
+ - reg: Should contain the DAC I2C address
+
+Optional properties:
+ - vref-supply: The regulator supply for DAC reference voltage
+
+Example:
+dac@0 {
+ compatible = "ti,dac5571";
+ reg = <0x4C>;
+ vref-supply = <&vdd_supply>;
+};
diff --git a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
index 2b4514592f83..5f4777e8cc9e 100644
--- a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
+++ b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
@@ -8,10 +8,16 @@ Required properties:
"invensense,mpu6500"
"invensense,mpu9150"
"invensense,mpu9250"
+ "invensense,mpu9255"
"invensense,icm20608"
- reg : the I2C address of the sensor
- interrupt-parent : should be the phandle for the interrupt controller
- - interrupts : interrupt mapping for GPIO IRQ
+ - interrupts: interrupt mapping for IRQ. It should be configured with flags
+ IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
+ IRQ_TYPE_EDGE_FALLING.
+
+ Refer to interrupt-controller/interrupts.txt for generic interrupt client node
+ bindings.
Optional properties:
- mount-matrix: an optional 3x3 mounting rotation matrix
@@ -24,7 +30,7 @@ Example:
compatible = "invensense,mpu6050";
reg = <0x68>;
interrupt-parent = <&gpio1>;
- interrupts = <18 1>;
+ interrupts = <18 IRQ_TYPE_EDGE_RISING>;
mount-matrix = "-0.984807753012208", /* x0 */
"0", /* y0 */
"-0.173648177666930", /* z0 */
@@ -41,7 +47,7 @@ Example:
compatible = "invensense,mpu9250";
reg = <0x68>;
interrupt-parent = <&gpio3>;
- interrupts = <21 1>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>;
i2c-gate {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
index 1ff1af799c76..ef8a8566c63f 100644
--- a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
+++ b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
@@ -6,6 +6,7 @@ Required properties:
"st,lsm6ds3h"
"st,lsm6dsl"
"st,lsm6dsm"
+ "st,ism330dlc"
- reg: i2c address of the sensor / spi cs line
Optional properties:
diff --git a/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt b/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt
index b9b621e94cd7..e6d0c2eb345c 100644
--- a/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt
+++ b/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt
@@ -1,10 +1,13 @@
-* Texas Instruments LMP91000 potentiostat
+* Texas Instruments LMP91000 series of potentiostats
-http://www.ti.com/lit/ds/symlink/lmp91000.pdf
+LMP91000: http://www.ti.com/lit/ds/symlink/lmp91000.pdf
+LMP91002: http://www.ti.com/lit/ds/symlink/lmp91002.pdf
Required properties:
- - compatible: should be "ti,lmp91000"
+ - compatible: should be one of the following:
+ "ti,lmp91000"
+ "ti,lmp91002"
- reg: the I2C address of the device
- io-channels: the phandle of the iio provider
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
index 16964f0c1773..6e8a9ab0fdae 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
@@ -10,6 +10,8 @@ platforms.
Definition: must be one of:
"qcom,msm8916-apcs-kpss-global",
"qcom,msm8996-apcs-hmss-global"
+ "qcom,msm8998-apcs-hmss-global"
+ "qcom,sdm845-apss-shared"
- reg:
Usage: required
diff --git a/Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt b/Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt
new file mode 100644
index 000000000000..1d2b7fee7b85
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt
@@ -0,0 +1,47 @@
+* STMicroelectronics STM32 IPCC (Inter-Processor Communication Controller)
+
+The IPCC block provides a non blocking signaling mechanism to post and
+retrieve messages in an atomic way between two processors.
+It provides the signaling for N bidirectionnal channels. The number of channels
+(N) can be read from a dedicated register.
+
+Required properties:
+- compatible: Must be "st,stm32mp1-ipcc"
+- reg: Register address range (base address and length)
+- st,proc-id: Processor id using the mailbox (0 or 1)
+- clocks: Input clock
+- interrupt-names: List of names for the interrupts described by the interrupt
+ property. Must contain the following entries:
+ - "rx"
+ - "tx"
+ - "wakeup"
+- interrupts: Interrupt specifiers for "rx channel occupied", "tx channel
+ free" and "system wakeup".
+- #mbox-cells: Number of cells required for the mailbox specifier. Must be 1.
+ The data contained in the mbox specifier of the "mboxes"
+ property in the client node is the mailbox channel index.
+
+Optional properties:
+- wakeup-source: Flag to indicate whether this device can wake up the system
+
+
+
+Example:
+ ipcc: mailbox@4c001000 {
+ compatible = "st,stm32mp1-ipcc";
+ #mbox-cells = <1>;
+ reg = <0x4c001000 0x400>;
+ st,proc-id = <0>;
+ interrupts-extended = <&intc GIC_SPI 100 IRQ_TYPE_NONE>,
+ <&intc GIC_SPI 101 IRQ_TYPE_NONE>,
+ <&aiec 62 1>;
+ interrupt-names = "rx", "tx", "wakeup";
+ clocks = <&rcc_clk IPCC>;
+ wakeup-source;
+ }
+
+Client:
+ mbox_test {
+ ...
+ mboxes = <&ipcc 0>, <&ipcc 1>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt
index f9632bacbd04..f9632bacbd04 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index 9455503b0299..d1762f3b30af 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -43,7 +43,7 @@ Optional properties:
regulator to drive the OTG VBus, rather then
as an input pin which signals whether the
board is driving OTG VBus or not.
- (axp221 / axp223 / axp813 only)
+ (axp221 / axp223 / axp803/ axp813 only)
- x-powers,master-mode: Boolean (axp806 only). Set this when the PMIC is
wired for master mode. The default is slave mode.
@@ -132,6 +132,7 @@ FLDO2 : LDO : fldoin-supply : shared supply
LDO_IO0 : LDO : ips-supply : GPIO 0
LDO_IO1 : LDO : ips-supply : GPIO 1
RTC_LDO : LDO : ips-supply : always on
+DRIVEVBUS : Enable output : drivevbus-supply : external regulator
AXP806 regulators, type, and corresponding input supply names:
diff --git a/Documentation/devicetree/bindings/mfd/bd9571mwv.txt b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
index 9ab216a851d5..25d1f697eb25 100644
--- a/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
+++ b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
@@ -25,6 +25,25 @@ Required properties:
Each child node is defined using the standard
binding for regulators.
+Optional properties:
+ - rohm,ddr-backup-power : Value to use for DDR-Backup Power (default 0).
+ This is a bitmask that specifies which DDR power
+ rails need to be kept powered when backup mode is
+ entered, for system suspend:
+ - bit 0: DDR0
+ - bit 1: DDR1
+ - bit 2: DDR0C
+ - bit 3: DDR1C
+ These bits match the KEEPON_DDR* bits in the
+ documentation for the "BKUP Mode Cnt" register.
+ - rohm,rstbmode-level: The RSTB signal is configured for level mode, to
+ accommodate a toggle power switch (the RSTBMODE pin is
+ strapped low).
+ - rohm,rstbmode-pulse: The RSTB signal is configured for pulse mode, to
+ accommodate a momentary power switch (the RSTBMODE pin
+ is strapped high).
+ The two properties above are mutually exclusive.
+
Example:
pmic: pmic@30 {
@@ -36,6 +55,8 @@ Example:
#interrupt-cells = <2>;
gpio-controller;
#gpio-cells = <2>;
+ rohm,ddr-backup-power = <0xf>;
+ rohm,rstbmode-pulse;
regulators {
dvfs: dvfs {
diff --git a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
index a086f1e1cdd7..7f0822b4beae 100644
--- a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
+++ b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
@@ -61,7 +61,6 @@ Example of the RCU bindings on a xRX200 SoC:
usb_phy0: usb2-phy@18 {
compatible = "lantiq,xrx200-usb2-phy";
reg = <0x18 4>, <0x38 4>;
- status = "disabled";
resets = <&reset1 4 4>, <&reset0 4 4>;
reset-names = "phy", "ctrl";
@@ -71,7 +70,6 @@ Example of the RCU bindings on a xRX200 SoC:
usb_phy1: usb2-phy@34 {
compatible = "lantiq,xrx200-usb2-phy";
reg = <0x34 4>, <0x3C 4>;
- status = "disabled";
resets = <&reset1 5 4>, <&reset0 4 4>;
reset-names = "phy", "ctrl";
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index ee978c95189d..839f469f4525 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -69,7 +69,6 @@ Example: R8A7790 (R-Car H2) SDHI controller nodes
max-frequency = <195000000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
resets = <&cpg 314>;
- status = "disabled";
};
sdhi1: sd@ee120000 {
@@ -83,7 +82,6 @@ Example: R8A7790 (R-Car H2) SDHI controller nodes
max-frequency = <195000000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
resets = <&cpg 313>;
- status = "disabled";
};
sdhi2: sd@ee140000 {
@@ -97,7 +95,6 @@ Example: R8A7790 (R-Car H2) SDHI controller nodes
max-frequency = <97500000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
resets = <&cpg 312>;
- status = "disabled";
};
sdhi3: sd@ee160000 {
@@ -111,5 +108,4 @@ Example: R8A7790 (R-Car H2) SDHI controller nodes
max-frequency = <97500000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
resets = <&cpg 311>;
- status = "disabled";
};
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
index b289ef3c1b7e..393588385c6e 100644
--- a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
@@ -47,6 +47,11 @@ Optional properties:
partitions written from Linux with this feature
turned on may not be accessible by the BootROM
code.
+ - nand-ecc-strength: integer representing the number of bits to correct
+ per ECC step. Needs to be a multiple of 2.
+ - nand-ecc-step-size: integer representing the number of data bytes
+ that are covered by a single ECC step. The driver
+ supports 512 and 1024.
The device tree may optionally contain sub-nodes describing partitions of the
address space. See partition.txt for more detail.
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt b/Documentation/devicetree/bindings/mtd/ibm,ndfc.txt
index 869f0b5f16e8..869f0b5f16e8 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt
+++ b/Documentation/devicetree/bindings/mtd/ibm,ndfc.txt
diff --git a/Documentation/devicetree/bindings/mtd/mtk-nand.txt b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
index 1c88526dedfc..4d3ec5e4ff8a 100644
--- a/Documentation/devicetree/bindings/mtd/mtk-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
@@ -20,7 +20,6 @@ Required NFI properties:
- interrupts: Interrupts of NFI.
- clocks: NFI required clocks.
- clock-names: NFI clocks internal name.
-- status: Disabled default. Then set "okay" by platform.
- ecc-engine: Required ECC Engine node.
- #address-cells: NAND chip index, should be 1.
- #size-cells: Should be 0.
@@ -34,7 +33,6 @@ Example:
clocks = <&pericfg CLK_PERI_NFI>,
<&pericfg CLK_PERI_NFI_PAD>;
clock-names = "nfi_clk", "pad_clk";
- status = "disabled";
ecc-engine = <&bch>;
#address-cells = <1>;
#size-cells = <0>;
@@ -50,14 +48,19 @@ Optional:
- nand-on-flash-bbt: Store BBT on NAND Flash.
- nand-ecc-mode: the NAND ecc mode (check driver for supported modes)
- nand-ecc-step-size: Number of data bytes covered by a single ECC step.
- valid values: 512 and 1024.
+ valid values:
+ 512 and 1024 on mt2701 and mt2712.
+ 512 only on mt7622.
1024 is recommended for large page NANDs.
- nand-ecc-strength: Number of bits to correct per ECC step.
- The valid values that the controller supports are: 4, 6,
- 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, 40, 44,
- 48, 52, 56, 60.
+ The valid values that each controller supports:
+ mt2701: 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28,
+ 32, 36, 40, 44, 48, 52, 56, 60.
+ mt2712: 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28,
+ 32, 36, 40, 44, 48, 52, 56, 60, 68, 72, 80.
+ mt7622: 4, 6, 8, 10, 12, 14, 16.
The strength should be calculated as follows:
- E = (S - F) * 8 / 14
+ E = (S - F) * 8 / B
S = O / (P / Q)
E : nand-ecc-strength.
S : spare size per sector.
@@ -66,6 +69,15 @@ Optional:
O : oob size.
P : page size.
Q : nand-ecc-step-size.
+ B : number of parity bits needed to correct
+ 1 bitflip.
+ According to MTK NAND controller design,
+ this number depends on max ecc step size
+ that MTK NAND controller supports.
+ If max ecc step size supported is 1024,
+ then it should be always 14. And if max
+ ecc step size is 512, then it should be
+ always 13.
If the result does not match any one of the listed
choices above, please select the smaller valid value from
the list.
@@ -152,7 +164,6 @@ Required BCH properties:
- interrupts: Interrupts of ECC.
- clocks: ECC required clocks.
- clock-names: ECC clocks internal name.
-- status: Disabled default. Then set "okay" by platform.
Example:
@@ -162,5 +173,4 @@ Example:
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_NFI_ECC>;
clock-names = "nfiecc_clk";
- status = "disabled";
};
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt
index 36f3b769a626..a8f382642ba9 100644
--- a/Documentation/devicetree/bindings/mtd/partition.txt
+++ b/Documentation/devicetree/bindings/mtd/partition.txt
@@ -14,7 +14,7 @@ method is used for a given flash device. To describe the method there should be
a subnode of the flash device that is named 'partitions'. It must have a
'compatible' property, which is used to identify the method to use.
-We currently only document a binding for fixed layouts.
+Available bindings are listed in the "partitions" subdirectory.
Fixed Partitions
diff --git a/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm947xx-cfe-partitions.txt b/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm947xx-cfe-partitions.txt
new file mode 100644
index 000000000000..1d61a029395e
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm947xx-cfe-partitions.txt
@@ -0,0 +1,42 @@
+Broadcom BCM47xx Partitions
+===========================
+
+Broadcom is one of hardware manufacturers providing SoCs (BCM47xx) used in
+home routers. Their BCM947xx boards using CFE bootloader have several partitions
+without any on-flash partition table. On some devices their sizes and/or
+meanings can also vary so fixed partitioning can't be used.
+
+Discovering partitions on these devices is possible thanks to having a special
+header and/or magic signature at the beginning of each of them. They are also
+block aligned which is important for determinig a size.
+
+Most of partitions use ASCII text based magic for determining a type. More
+complex partitions (like TRX with its HDR0 magic) may include extra header
+containing some details, including a length.
+
+A list of supported partitions includes:
+1) Bootloader with Broadcom's CFE (Common Firmware Environment)
+2) NVRAM with configuration/calibration data
+3) Device manufacturer's data with some default values (e.g. SSIDs)
+4) TRX firmware container which can hold up to 4 subpartitions
+5) Backup TRX firmware used after failed upgrade
+
+As mentioned earlier, role of some partitions may depend on extra configuration.
+For example both: main firmware and backup firmware use the same TRX format with
+the same header. To distinguish currently used firmware a CFE's environment
+variable "bootpartition" is used.
+
+
+Devices using Broadcom partitions described above should should have flash node
+with a subnode named "partitions" using following properties:
+
+Required properties:
+- compatible : (required) must be "brcm,bcm947xx-cfe-partitions"
+
+Example:
+
+flash@0 {
+ partitions {
+ compatible = "brcm,bcm947xx-cfe-partitions";
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
index 0734f03bf3d3..dcd5a5d80dc0 100644
--- a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
@@ -22,8 +22,6 @@ Optional properties:
- reset : phandle + reset specifier pair
- reset-names : must contain "ahb"
- allwinner,rb : shall contain the native Ready/Busy ids.
- or
-- rb-gpios : shall contain the gpios used as R/B pins.
- nand-ecc-mode : one of the supported ECC modes ("hw", "soft", "soft_bch" or
"none")
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt b/Documentation/devicetree/bindings/net/ibm,emac.txt
index 44b842b6ca15..44b842b6ca15 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
+++ b/Documentation/devicetree/bindings/net/ibm,emac.txt
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 890526dbfc26..fac897d54423 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -21,6 +21,7 @@ Required properties:
- "renesas,etheravb-r8a77965" for the R8A77965 SoC.
- "renesas,etheravb-r8a77970" for the R8A77970 SoC.
- "renesas,etheravb-r8a77980" for the R8A77980 SoC.
+ - "renesas,etheravb-r8a77990" for the R8A77990 SoC.
- "renesas,etheravb-r8a77995" for the R8A77995 SoC.
- "renesas,etheravb-rcar-gen3" as a fallback for the above
R-Car Gen3 devices.
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index 1da7ade3183c..c124f9bc11f3 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -1,7 +1,9 @@
* Synopsys DesignWare PCIe interface
Required properties:
-- compatible: should contain "snps,dw-pcie" to identify the core.
+- compatible:
+ "snps,dw-pcie" for RC mode;
+ "snps,dw-pcie-ep" for EP mode;
- reg: Should contain the configuration address space.
- reg-names: Must be "config" for the PCIe configuration space.
(The old way of getting the configuration address space from "ranges"
@@ -41,11 +43,11 @@ EP mode:
Example configuration:
- pcie: pcie@dffff000 {
+ pcie: pcie@dfc00000 {
compatible = "snps,dw-pcie";
- reg = <0xdffff000 0x1000>, /* Controller registers */
- <0xd0000000 0x2000>; /* PCI config space */
- reg-names = "ctrlreg", "config";
+ reg = <0xdfc00000 0x0001000>, /* IP registers */
+ <0xd0000000 0x0002000>; /* Configuration space */
+ reg-names = "dbi", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
@@ -54,5 +56,15 @@ Example configuration:
interrupts = <25>, <24>;
#interrupt-cells = <1>;
num-lanes = <1>;
- num-viewport = <3>;
+ };
+or
+ pcie: pcie@dfc00000 {
+ compatible = "snps,dw-pcie-ep";
+ reg = <0xdfc00000 0x0001000>, /* IP registers 1 */
+ <0xdfc01000 0x0001000>, /* IP registers 2 */
+ <0xd0000000 0x2000000>; /* Configuration space */
+ reg-names = "dbi", "dbi2", "addr_space";
+ num-ib-windows = <6>;
+ num-ob-windows = <2>;
+ num-lanes = <1>;
};
diff --git a/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
new file mode 100644
index 000000000000..65038aa642e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
@@ -0,0 +1,73 @@
+* Mobiveil AXI PCIe Root Port Bridge DT description
+
+Mobiveil's GPEX 4.0 is a PCIe Gen4 root port bridge IP. This configurable IP
+has up to 8 outbound and inbound windows for the address translation.
+
+Required properties:
+- #address-cells: Address representation for root ports, set to <3>
+- #size-cells: Size representation for root ports, set to <2>
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- compatible: Should contain "mbvl,gpex40-pcie"
+- reg: Should contain PCIe registers location and length
+ "config_axi_slave": PCIe controller registers
+ "csr_axi_slave" : Bridge config registers
+ "gpio_slave" : GPIO registers to control slot power
+ "apb_csr" : MSI registers
+
+- device_type: must be "pci"
+- apio-wins : number of requested apio outbound windows
+ default 2 outbound windows are configured -
+ 1. Config window
+ 2. Memory window
+- ppio-wins : number of requested ppio inbound windows
+ default 1 inbound memory window is configured.
+- bus-range: PCI bus numbers covered
+- interrupt-controller: identifies the node as an interrupt controller
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- interrupt-parent : phandle to the interrupt controller that
+ it is attached to, it should be set to gic to point to
+ ARM's Generic Interrupt Controller node in system DT.
+- interrupts: The interrupt line of the PCIe controller
+ last cell of this field is set to 4 to
+ denote it as IRQ_TYPE_LEVEL_HIGH type interrupt.
+- interrupt-map-mask,
+ interrupt-map: standard PCI properties to define the mapping of the
+ PCI interface to interrupt numbers.
+- ranges: ranges for the PCI memory regions (I/O space region is not
+ supported by hardware)
+ Please refer to the standard PCI bus binding document for a more
+ detailed explanation
+
+
+Example:
+++++++++
+ pcie0: pcie@a0000000 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ compatible = "mbvl,gpex40-pcie";
+ reg = <0xa0000000 0x00001000>,
+ <0xb0000000 0x00010000>,
+ <0xff000000 0x00200000>,
+ <0xb0010000 0x00001000>;
+ reg-names = "config_axi_slave",
+ "csr_axi_slave",
+ "gpio_slave",
+ "apb_csr";
+ device_type = "pci";
+ apio-wins = <2>;
+ ppio-wins = <1>;
+ bus-range = <0x00000000 0x000000ff>;
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <1>;
+ interrupts = < 0 89 4 >;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 0 &pci_express 0>,
+ <0 0 0 1 &pci_express 1>,
+ <0 0 0 2 &pci_express 2>,
+ <0 0 0 3 &pci_express 3>;
+ ranges = < 0x83000000 0 0x00000000 0xa8000000 0 0x8000000>;
+
+ };
diff --git a/Documentation/devicetree/bindings/pci/pci-armada8k.txt b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
index c1e4c3d10a74..9e3fc15e1af8 100644
--- a/Documentation/devicetree/bindings/pci/pci-armada8k.txt
+++ b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
@@ -12,7 +12,10 @@ Required properties:
- "ctrl" for the control register region
- "config" for the config space region
- interrupts: Interrupt specifier for the PCIe controler
-- clocks: reference to the PCIe controller clock
+- clocks: reference to the PCIe controller clocks
+- clock-names: mandatory if there is a second clock, in this case the
+ name must be "core" for the first clock and "reg" for the second
+ one
Example:
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 1fb614e615da..a5f7fc62d10e 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -8,6 +8,7 @@ compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
"renesas,pcie-r8a7793" for the R8A7793 SoC;
"renesas,pcie-r8a7795" for the R8A7795 SoC;
"renesas,pcie-r8a7796" for the R8A7796 SoC;
+ "renesas,pcie-r8a77980" for the R8A77980 SoC;
"renesas,pcie-rcar-gen2" for a generic R-Car Gen2 or
RZ/G1 compatible device.
"renesas,pcie-rcar-gen3" for a generic R-Car Gen3 compatible device.
@@ -32,6 +33,11 @@ compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
and PCIe bus clocks.
- clock-names: from common clock binding: should be "pcie" and "pcie_bus".
+Optional properties:
+- phys: from common PHY binding: PHY phandle and specifier (only make sense
+ for R-Car gen3 SoCs where the PCIe PHYs have their own register blocks).
+- phy-names: from common PHY binding: should be "pcie".
+
Example:
SoC-specific DT Entry:
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt
new file mode 100644
index 000000000000..778467307a93
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt
@@ -0,0 +1,62 @@
+* Rockchip AXI PCIe Endpoint Controller DT description
+
+Required properties:
+- compatible: Should contain "rockchip,rk3399-pcie-ep"
+- reg: Two register ranges as listed in the reg-names property
+- reg-names: Must include the following names
+ - "apb-base"
+ - "mem-base"
+- clocks: Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+ - "aclk"
+ - "aclk-perf"
+ - "hclk"
+ - "pm"
+- resets: Must contain seven entries for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names: Must include the following names
+ - "core"
+ - "mgmt"
+ - "mgmt-sticky"
+ - "pipe"
+ - "pm"
+ - "aclk"
+ - "pclk"
+- pinctrl-names : The pin control state names
+- pinctrl-0: The "default" pinctrl state
+- phys: Must contain an phandle to a PHY for each entry in phy-names.
+- phy-names: Must include 4 entries for all 4 lanes even if some of
+ them won't be used for your cases. Entries are of the form "pcie-phy-N":
+ where N ranges from 0 to 3.
+ (see example below and you MUST also refer to ../phy/rockchip-pcie-phy.txt
+ for changing the #phy-cells of phy node to support it)
+- rockchip,max-outbound-regions: Maximum number of outbound regions
+
+Optional Property:
+- num-lanes: number of lanes to use
+- max-functions: Maximum number of functions that can be configured (default 1).
+
+pcie0-ep: pcie@f8000000 {
+ compatible = "rockchip,rk3399-pcie-ep";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ rockchip,max-outbound-regions = <16>;
+ clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
+ <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
+ clock-names = "aclk", "aclk-perf",
+ "hclk", "pm";
+ max-functions = /bits/ 8 <8>;
+ num-lanes = <4>;
+ reg = <0x0 0xfd000000 0x0 0x1000000>, <0x0 0x80000000 0x0 0x20000>;
+ reg-names = "apb-base", "mem-base";
+ resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> ,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
+ phys = <&pcie_phy 0>, <&pcie_phy 1>, <&pcie_phy 2>, <&pcie_phy 3>;
+ phy-names = "pcie-phy-0", "pcie-phy-1", "pcie-phy-2", "pcie-phy-3";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_clkreq>;
+};
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie-host.txt
index af34c65773fd..af34c65773fd 100644
--- a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/rockchip-pcie-host.txt
diff --git a/Documentation/devicetree/bindings/pci/xgene-pci.txt b/Documentation/devicetree/bindings/pci/xgene-pci.txt
index 6fd2decfa66c..92490330dc1c 100644
--- a/Documentation/devicetree/bindings/pci/xgene-pci.txt
+++ b/Documentation/devicetree/bindings/pci/xgene-pci.txt
@@ -25,8 +25,6 @@ Optional properties:
Example:
-SoC-specific DT Entry:
-
pcie0: pcie@1f2b0000 {
status = "disabled";
device_type = "pci";
@@ -50,8 +48,3 @@ SoC-specific DT Entry:
clocks = <&pcie0clk 0>;
};
-
-Board-specific DT Entry:
- &pcie0 {
- status = "ok";
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
index fb87c7d74f2e..8fb5a53775e8 100644
--- a/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
@@ -8,6 +8,17 @@ Required Properties:
- reg: Should contain the register base address and size of
the pin controller.
- clocks: phandle of the clock feeding the pin controller
+- gpio-controller: Marks the device node as a GPIO controller.
+- gpio-ranges: Specifies the mapping between gpio controller and
+ pin-controller pins.
+- #gpio-cells: Should be two. The first cell is the gpio pin number
+ and the second cell is used for optional parameters.
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells: Specifies the number of cells needed to encode an
+ interrupt. Shall be set to 2. The first cell
+ defines the interrupt number, the second encodes
+ the trigger flags described in
+ bindings/interrupt-controller/interrupts.txt
Please refer to pinctrl-bindings.txt in this directory for details of the
common pinctrl bindings used by client devices, including the meaning of the
@@ -164,6 +175,11 @@ Example:
compatible = "actions,s900-pinctrl";
reg = <0x0 0xe01b0000 0x0 0x1000>;
clocks = <&cmu CLK_GPIO>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 146>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
uart2-default: uart2-default {
pinmux {
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index 64bc5c2a76da..258a4648ab81 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -28,6 +28,7 @@ Required properties:
"allwinner,sun50i-a64-r-pinctrl"
"allwinner,sun50i-h5-pinctrl"
"allwinner,sun50i-h6-pinctrl"
+ "allwinner,sun50i-h6-r-pinctrl"
"nextthing,gr8-pinctrl"
- reg: Should contain the register physical address and length for the
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt b/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt
index 2569866c692f..3fac0a061bcc 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt
@@ -36,6 +36,24 @@ listed. In other words, a subnode that lists only a mux function implies no
information about any pull configuration. Similarly, a subnode that lists only
a pul parameter implies no information about the mux function.
+The BCM2835 pin configuration and multiplexing supports the generic bindings.
+For details on each properties, you can refer to ./pinctrl-bindings.txt.
+
+Required sub-node properties:
+ - pins
+ - function
+
+Optional sub-node properties:
+ - bias-disable
+ - bias-pull-up
+ - bias-pull-down
+ - output-high
+ - output-low
+
+Legacy pin configuration and multiplexing binding:
+*** (Its use is deprecated, use generic multiplexing and configuration
+bindings instead)
+
Required subnode-properties:
- brcm,pins: An array of cells. Each cell contains the ID of a pin. Valid IDs
are the integer GPIO IDs; 0==GPIO0, 1==GPIO1, ... 53==GPIO53.
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index 2c12f9789116..54ecb8ab7788 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -3,8 +3,10 @@
Required properties for the root node:
- compatible: one of "amlogic,meson8-cbus-pinctrl"
"amlogic,meson8b-cbus-pinctrl"
+ "amlogic,meson8m2-cbus-pinctrl"
"amlogic,meson8-aobus-pinctrl"
"amlogic,meson8b-aobus-pinctrl"
+ "amlogic,meson8m2-aobus-pinctrl"
"amlogic,meson-gxbb-periphs-pinctrl"
"amlogic,meson-gxbb-aobus-pinctrl"
"amlogic,meson-gxl-periphs-pinctrl"
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
index a5a8322a31bd..a677145ae6d1 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
@@ -18,7 +18,9 @@ Required properties:
removed.
- #gpio-cells : Should be two.
- first cell is the pin number
- - second cell is used to specify flags. Flags are currently unused.
+ - second cell is used to specify flags as described in
+ 'Documentation/devicetree/bindings/gpio/gpio.txt'. Allowed values defined by
+ 'include/dt-bindings/gpio/gpio.h' (e.g. GPIO_ACTIVE_LOW).
- gpio-controller : Marks the device node as a GPIO controller.
- reg : For an address on its bus. I2C uses this a the I2C address of the chip.
SPI uses this to specify the chipselect line which the chip is
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
index f18ed99f6e14..def8fcad8941 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
@@ -9,6 +9,16 @@ Required properties for the root node:
- #gpio-cells: Should be two. The first cell is the pin number and the
second is the GPIO flags.
+Optional properties:
+- interrupt-controller : Marks the device node as an interrupt controller
+
+If the property interrupt-controller is defined, following property is required
+- reg-names: A string describing the "reg" entries. Must contain "eint".
+- interrupts : The interrupt output from the controller.
+- #interrupt-cells: Should be two.
+- interrupt-parent: Phandle of the interrupt parent to which the external
+ GPIO interrupts are forwarded to.
+
Please refer to pinctrl-bindings.txt in this directory for details of the
common pinctrl bindings used by client devices, including the meaning of the
phrase "pin configuration node".
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index 892d8fd7b700..abd8fbcf1e62 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -15,6 +15,7 @@ Required Properties:
- "renesas,pfc-r8a7740": for R8A7740 (R-Mobile A1) compatible pin-controller.
- "renesas,pfc-r8a7743": for R8A7743 (RZ/G1M) compatible pin-controller.
- "renesas,pfc-r8a7745": for R8A7745 (RZ/G1E) compatible pin-controller.
+ - "renesas,pfc-r8a77470": for R8A77470 (RZ/G1C) compatible pin-controller.
- "renesas,pfc-r8a7778": for R8A7778 (R-Car M1) compatible pin-controller.
- "renesas,pfc-r8a7779": for R8A7779 (R-Car H1) compatible pin-controller.
- "renesas,pfc-r8a7790": for R8A7790 (R-Car H2) compatible pin-controller.
@@ -27,6 +28,7 @@ Required Properties:
- "renesas,pfc-r8a77965": for R8A77965 (R-Car M3-N) compatible pin-controller.
- "renesas,pfc-r8a77970": for R8A77970 (R-Car V3M) compatible pin-controller.
- "renesas,pfc-r8a77980": for R8A77980 (R-Car V3H) compatible pin-controller.
+ - "renesas,pfc-r8a77990": for R8A77990 (R-Car E3) compatible pin-controller.
- "renesas,pfc-r8a77995": for R8A77995 (R-Car D3) compatible pin-controller.
- "renesas,pfc-sh73a0": for SH73A0 (SH-Mobile AG5) compatible pin-controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
index a01a3b8a2363..0919db294c17 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
@@ -20,6 +20,7 @@ defined as gpio sub-nodes of the pinmux controller.
Required properties for iomux controller:
- compatible: should be
+ "rockchip,px30-pinctrl": for Rockchip PX30
"rockchip,rv1108-pinctrl": for Rockchip RV1108
"rockchip,rk2928-pinctrl": for Rockchip RK2928
"rockchip,rk3066a-pinctrl": for Rockchip RK3066a
diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
index 615c1cb6889f..37994fdb18ca 100644
--- a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
@@ -25,6 +25,7 @@ Required properties:
* "ti,bq27545" - BQ27545
* "ti,bq27421" - BQ27421
* "ti,bq27425" - BQ27425
+ * "ti,bq27426" - BQ27426
* "ti,bq27441" - BQ27441
* "ti,bq27621" - BQ27621
- reg: integer, I2C address of the fuel gauge.
diff --git a/Documentation/devicetree/bindings/pps/pps-gpio.txt b/Documentation/devicetree/bindings/pps/pps-gpio.txt
index 0de23b793657..3683874832ae 100644
--- a/Documentation/devicetree/bindings/pps/pps-gpio.txt
+++ b/Documentation/devicetree/bindings/pps/pps-gpio.txt
@@ -20,5 +20,4 @@ Example:
assert-falling-edge;
compatible = "pps-gpio";
- status = "okay";
};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt b/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
index 2e53324fb720..5ccfcc82da08 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
@@ -2,7 +2,7 @@
Required properties:
- compatible: Shall contain "ti,omap-dmtimer-pwm".
-- ti,timers: phandle to PWM capable OMAP timer. See arm/omap/timer.txt for info
+- ti,timers: phandle to PWM capable OMAP timer. See timer/ti,timer.txt for info
about these timers.
- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
the cells format.
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
index c6dd3f5e485b..f0ada3b14d70 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.txt
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -21,7 +21,7 @@ Each regulator is defined using the standard binding for regulators.
Example 1: PFUZE100
- pmic: pfuze100@8 {
+ pfuze100: pmic@8 {
compatible = "fsl,pfuze100";
reg = <0x08>;
@@ -122,7 +122,7 @@ Example 1: PFUZE100
Example 2: PFUZE200
- pmic: pfuze200@8 {
+ pfuze200: pmic@8 {
compatible = "fsl,pfuze200";
reg = <0x08>;
@@ -216,7 +216,7 @@ Example 2: PFUZE200
Example 3: PFUZE3000
- pmic: pfuze3000@8 {
+ pfuze3000: pmic@8 {
compatible = "fsl,pfuze3000";
reg = <0x08>;
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
index 57d2c65899df..406f2e570c50 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
@@ -110,6 +110,11 @@ Qualcomm SPMI Regulators
Definition: Reference to regulator supplying the input pin, as
described in the data sheet.
+- qcom,saw-reg:
+ Usage: optional
+ Value type: <phandle>
+ Description: Reference to syscon node defining the SAW registers.
+
The regulator node houses sub-nodes for each regulator within the device. Each
sub-node is identified using the node's name, with valid values listed for each
@@ -201,6 +206,17 @@ see regulator.txt - with additional custom properties described below:
2 = 0.55 uA
3 = 0.75 uA
+- qcom,saw-slave:
+ Usage: optional
+ Value type: <boo>
+ Description: SAW controlled gang slave. Will not be configured.
+
+- qcom,saw-leader:
+ Usage: optional
+ Value type: <boo>
+ Description: SAW controlled gang leader. Will be configured as
+ SAW regulator.
+
Example:
regulators {
@@ -221,3 +237,32 @@ Example:
....
};
+
+Example 2:
+
+ saw3: syscon@9A10000 {
+ compatible = "syscon";
+ reg = <0x9A10000 0x1000>;
+ };
+
+ ...
+
+ spm-regulators {
+ compatible = "qcom,pm8994-regulators";
+ qcom,saw-reg = <&saw3>;
+ s8 {
+ qcom,saw-slave;
+ };
+ s9 {
+ qcom,saw-slave;
+ };
+ s10 {
+ qcom,saw-slave;
+ };
+ pm8994_s11_saw: s11 {
+ qcom,saw-leader;
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1140000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index 2babe15b618d..a7cd36877bfe 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -59,6 +59,11 @@ Optional properties:
- regulator-initial-mode: initial operating mode. The set of possible operating
modes depends on the capabilities of every hardware so each device binding
documentation explains which values the regulator supports.
+- regulator-allowed-modes: list of operating modes that software is allowed to
+ configure for the regulator at run-time. Elements may be specified in any
+ order. The set of possible operating modes depends on the capabilities of
+ every hardware so each device binding document explains which values the
+ regulator supports.
- regulator-system-load: Load in uA present on regulator that is not captured by
any consumer request.
- regulator-pull-down: Enable pull down resistor when the regulator is disabled.
@@ -68,6 +73,11 @@ Optional properties:
0: Disable active discharge.
1: Enable active discharge.
Absence of this property will leave configuration to default.
+- regulator-coupled-with: Regulators with which the regulator
+ is coupled. The linkage is 2-way - all coupled regulators should be linked
+ with each other. A regulator should not be coupled with its supplier.
+- regulator-coupled-max-spread: Max spread between voltages of coupled regulators
+ in microvolts.
Deprecated properties:
- regulator-compatible: If a regulator chip contains multiple
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
new file mode 100644
index 000000000000..4edf3137d9f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
@@ -0,0 +1,126 @@
+ROHM BD71837 Power Management Integrated Circuit (PMIC) regulator bindings
+
+BD71837MWV is a programmable Power Management
+IC (PMIC) for powering single-core, dual-core, and
+quad-core SoC’s such as NXP-i.MX 8M. It is optimized
+for low BOM cost and compact solution footprint. It
+integrates 8 Buck regulators and 7 LDO’s to provide all
+the power rails required by the SoC and the commonly
+used peripherals.
+
+Required properties:
+ - regulator-name: should be "buck1", ..., "buck8" and "ldo1", ..., "ldo7"
+
+List of regulators provided by this controller. BD71837 regulators node
+should be sub node of the BD71837 MFD node. See BD71837 MFD bindings at
+Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt
+Regulator nodes should be named to BUCK_<number> and LDO_<number>. The
+definition for each of these nodes is defined using the standard
+binding for regulators at
+Documentation/devicetree/bindings/regulator/regulator.txt.
+Note that if BD71837 starts at RUN state you probably want to use
+regulator-boot-on at least for BUCK6 and BUCK7 so that those are not
+disabled by driver at startup. LDO5 and LDO6 are supplied by those and
+if they are disabled at startup the voltage monitoring for LDO5/LDO6 will
+cause PMIC to reset.
+
+The valid names for regulator nodes are:
+BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6, BUCK7, BUCK8
+LDO1, LDO2, LDO3, LDO4, LDO5, LDO6, LDO7
+
+Optional properties:
+- Any optional property defined in bindings/regulator/regulator.txt
+
+Example:
+regulators {
+ buck1: BUCK1 {
+ regulator-name = "buck1";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-ramp-delay = <1250>;
+ };
+ buck2: BUCK2 {
+ regulator-name = "buck2";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <1250>;
+ };
+ buck3: BUCK3 {
+ regulator-name = "buck3";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ };
+ buck4: BUCK4 {
+ regulator-name = "buck4";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ };
+ buck5: BUCK5 {
+ regulator-name = "buck5";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-boot-on;
+ };
+ buck6: BUCK6 {
+ regulator-name = "buck6";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+ buck7: BUCK7 {
+ regulator-name = "buck7";
+ regulator-min-microvolt = <1605000>;
+ regulator-max-microvolt = <1995000>;
+ regulator-boot-on;
+ };
+ buck8: BUCK8 {
+ regulator-name = "buck8";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ };
+
+ ldo1: LDO1 {
+ regulator-name = "ldo1";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+ ldo2: LDO2 {
+ regulator-name = "ldo2";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-boot-on;
+ };
+ ldo3: LDO3 {
+ regulator-name = "ldo3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ ldo4: LDO4 {
+ regulator-name = "ldo4";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ ldo5: LDO5 {
+ regulator-name = "ldo5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ ldo6: LDO6 {
+ regulator-name = "ldo6";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ ldo7_reg: LDO7 {
+ regulator-name = "ldo7";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+
diff --git a/Documentation/devicetree/bindings/regulator/sy8106a-regulator.txt b/Documentation/devicetree/bindings/regulator/sy8106a-regulator.txt
new file mode 100644
index 000000000000..39a8ca73f572
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/sy8106a-regulator.txt
@@ -0,0 +1,23 @@
+SY8106A Voltage regulator
+
+Required properties:
+- compatible: Must be "silergy,sy8106a"
+- reg: I2C slave address - must be <0x65>
+- silergy,fixed-microvolt - the voltage when I2C regulating is disabled (set
+ by external resistor like a fixed voltage)
+
+Any property defined as part of the core regulator binding, defined in
+./regulator.txt, can also be used.
+
+Example:
+
+ sy8106a {
+ compatible = "silergy,sy8106a";
+ reg = <0x65>;
+ regulator-name = "sy8106a-vdd";
+ silergy,fixed-microvolt = <1200000>;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt b/Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt
index a13fbdb4bd88..a13fbdb4bd88 100644
--- a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
+++ b/Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt
diff --git a/Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt b/Documentation/devicetree/bindings/rng/sparc_sun_oracle_rng.txt
index b0b211194c71..b0b211194c71 100644
--- a/Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt
+++ b/Documentation/devicetree/bindings/rng/sparc_sun_oracle_rng.txt
diff --git a/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt b/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt
index 5cbc0b145a61..811124a36d16 100644
--- a/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt
+++ b/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt
@@ -9,7 +9,7 @@ Optional properties:
Example:
-rtc: nxp,rtc-pcf2123@3 {
+pcf2123: rtc@3 {
compatible = "nxp,rtc-pcf2123"
reg = <3>
spi-cs-high;
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
index a66692a08ace..c920e2736991 100644
--- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
@@ -1,23 +1,29 @@
STM32 Real Time Clock
Required properties:
-- compatible: can be either "st,stm32-rtc" or "st,stm32h7-rtc", depending on
- the device is compatible with stm32(f4/f7) or stm32h7.
+- compatible: can be one of the following:
+ - "st,stm32-rtc" for devices compatible with stm32(f4/f7).
+ - "st,stm32h7-rtc" for devices compatible with stm32h7.
+ - "st,stm32mp1-rtc" for devices compatible with stm32mp1.
- reg: address range of rtc register set.
- clocks: can use up to two clocks, depending on part used:
- "rtc_ck": RTC clock source.
- It is required on stm32(f4/f7) and stm32h7.
- "pclk": RTC APB interface clock.
It is not present on stm32(f4/f7).
- It is required on stm32h7.
+ It is required on stm32(h7/mp1).
- clock-names: must be "rtc_ck" and "pclk".
- It is required only on stm32h7.
+ It is required on stm32(h7/mp1).
- interrupt-parent: phandle for the interrupt controller.
-- interrupts: rtc alarm interrupt.
-- st,syscfg: phandle for pwrcfg, mandatory to disable/enable backup domain
- (RTC registers) write protection.
+ It is required on stm32(f4/f7/h7).
+- interrupts: rtc alarm interrupt. On stm32mp1, a second interrupt is required
+ for rtc alarm wakeup interrupt.
+- st,syscfg: phandle/offset/mask triplet. The phandle to pwrcfg used to
+ access control register at offset, and change the dbp (Disable Backup
+ Protection) bit represented by the mask, mandatory to disable/enable backup
+ domain (RTC registers) write protection.
+ It is required on stm32(f4/f7/h7).
-Optional properties (to override default rtc_ck parent clock):
+Optional properties (to override default rtc_ck parent clock on stm32(f4/f7/h7):
- assigned-clocks: reference to the rtc_ck clock entry.
- assigned-clock-parents: phandle of the new parent clock of rtc_ck.
@@ -31,7 +37,7 @@ Example:
assigned-clock-parents = <&rcc 1 CLK_LSE>;
interrupt-parent = <&exti>;
interrupts = <17 1>;
- st,syscfg = <&pwrcfg>;
+ st,syscfg = <&pwrcfg 0x00 0x100>;
};
rtc: rtc@58004000 {
@@ -44,5 +50,14 @@ Example:
interrupt-parent = <&exti>;
interrupts = <17 1>;
interrupt-names = "alarm";
- st,syscfg = <&pwrcfg>;
+ st,syscfg = <&pwrcfg 0x00 0x100>;
+ };
+
+ rtc: rtc@5c004000 {
+ compatible = "st,stm32mp1-rtc";
+ reg = <0x5c004000 0x400>;
+ clocks = <&rcc RTCAPB>, <&rcc RTC>;
+ clock-names = "pclk", "rtc_ck";
+ interrupts-extended = <&intc GIC_SPI 3 IRQ_TYPE_NONE>,
+ <&exti 19 1>;
};
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt
index 274058c583dd..de0d6090c0fd 100644
--- a/Documentation/devicetree/bindings/submitting-patches.txt
+++ b/Documentation/devicetree/bindings/submitting-patches.txt
@@ -6,7 +6,14 @@ I. For patch submitters
0) Normal patch submission rules from Documentation/process/submitting-patches.rst
applies.
- 1) The Documentation/ portion of the patch should be a separate patch.
+ 1) The Documentation/ and include/dt-bindings/ portion of the patch should
+ be a separate patch. The preferred subject prefix for binding patches is:
+
+ "dt-bindings: <binding dir>: ..."
+
+ The 80 characters of the subject are precious. It is recommended to not
+ use "Documentation" or "doc" because that is implied. All bindings are
+ docs. Repeating "binding" again should also be avoided.
2) Submit the entire series to the devicetree mailinglist at
diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
index fdf5caa6229b..39e7d4e61a63 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
@@ -27,9 +27,9 @@ Example:
tsc: thermal@e6198000 {
compatible = "renesas,r8a7795-thermal";
- reg = <0 0xe6198000 0 0x68>,
- <0 0xe61a0000 0 0x5c>,
- <0 0xe61a8000 0 0x5c>;
+ reg = <0 0xe6198000 0 0x100>,
+ <0 0xe61a0000 0 0x100>,
+ <0 0xe61a8000 0 0x100>;
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/nios2/timer.txt b/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
index 904a5846d7ac..904a5846d7ac 100644
--- a/Documentation/devicetree/bindings/nios2/timer.txt
+++ b/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/timer/arm,arch_timer.txt
index 68301b77e854..68301b77e854 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer.txt
diff --git a/Documentation/devicetree/bindings/arm/armv7m_systick.txt b/Documentation/devicetree/bindings/timer/arm,armv7m-systick.txt
index 7cf4a24601eb..7cf4a24601eb 100644
--- a/Documentation/devicetree/bindings/arm/armv7m_systick.txt
+++ b/Documentation/devicetree/bindings/timer/arm,armv7m-systick.txt
diff --git a/Documentation/devicetree/bindings/arm/global_timer.txt b/Documentation/devicetree/bindings/timer/arm,global_timer.txt
index bdae3a818793..bdae3a818793 100644
--- a/Documentation/devicetree/bindings/arm/global_timer.txt
+++ b/Documentation/devicetree/bindings/timer/arm,global_timer.txt
diff --git a/Documentation/devicetree/bindings/arm/twd.txt b/Documentation/devicetree/bindings/timer/arm,twd.txt
index 383ea19c2bf0..383ea19c2bf0 100644
--- a/Documentation/devicetree/bindings/arm/twd.txt
+++ b/Documentation/devicetree/bindings/timer/arm,twd.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt b/Documentation/devicetree/bindings/timer/fsl,gtm.txt
index 9a33efded4bc..9a33efded4bc 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt
+++ b/Documentation/devicetree/bindings/timer/fsl,gtm.txt
diff --git a/Documentation/devicetree/bindings/arm/mrvl/timer.txt b/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.txt
index 9a6e251462e7..9a6e251462e7 100644
--- a/Documentation/devicetree/bindings/arm/mrvl/timer.txt
+++ b/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.txt
diff --git a/Documentation/devicetree/bindings/arm/msm/timer.txt b/Documentation/devicetree/bindings/timer/qcom,msm-timer.txt
index 5e10c345548f..5e10c345548f 100644
--- a/Documentation/devicetree/bindings/arm/msm/timer.txt
+++ b/Documentation/devicetree/bindings/timer/qcom,msm-timer.txt
diff --git a/Documentation/devicetree/bindings/arm/spear-timer.txt b/Documentation/devicetree/bindings/timer/st,spear-timer.txt
index c0017221cf55..c0017221cf55 100644
--- a/Documentation/devicetree/bindings/arm/spear-timer.txt
+++ b/Documentation/devicetree/bindings/timer/st,spear-timer.txt
diff --git a/Documentation/devicetree/bindings/c6x/timer64.txt b/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
index 95911fe70224..95911fe70224 100644
--- a/Documentation/devicetree/bindings/c6x/timer64.txt
+++ b/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
diff --git a/Documentation/devicetree/bindings/arm/omap/timer.txt b/Documentation/devicetree/bindings/timer/ti,timer.txt
index d02e27c764ec..d02e27c764ec 100644
--- a/Documentation/devicetree/bindings/arm/omap/timer.txt
+++ b/Documentation/devicetree/bindings/timer/ti,timer.txt
diff --git a/Documentation/devicetree/bindings/arm/vt8500/via,vt8500-timer.txt b/Documentation/devicetree/bindings/timer/via,vt8500-timer.txt
index 901c73f0d8ef..901c73f0d8ef 100644
--- a/Documentation/devicetree/bindings/arm/vt8500/via,vt8500-timer.txt
+++ b/Documentation/devicetree/bindings/timer/via,vt8500-timer.txt
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 36003832c2a8..4b38f3373f43 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -32,6 +32,7 @@ andestech Andes Technology Corporation
apm Applied Micro Circuits Corporation (APM)
aptina Aptina Imaging
arasan Arasan Chip Systems
+archermind ArcherMind Technology (Nanjing) Co., Ltd.
arctic Arctic Sand
aries Aries Embedded GmbH
arm ARM Ltd.
@@ -47,6 +48,7 @@ auvidea Auvidea GmbH
avago Avago Technologies
avia avia semiconductor
avic Shanghai AVIC Optoelectronics Co., Ltd.
+avnet Avnet, Inc.
axentia Axentia Technologies AB
axis Axis Communications AB
bananapi BIPAI KEJI LIMITED
@@ -186,6 +188,7 @@ khadas Khadas
kiebackpeter Kieback & Peter GmbH
kinetic Kinetic Technologies
kingnovel Kingnovel Technology Co., Ltd.
+koe Kaohsiung Opto-Electronics Inc.
kosagi Sutajio Ko-Usagi PTE Ltd.
kyo Kyocera Corporation
lacie LaCie
@@ -200,11 +203,13 @@ linaro Linaro Limited
linksys Belkin International, Inc. (Linksys)
linux Linux-specific binding
lltc Linear Technology Corporation
+logicpd Logic PD, Inc.
lsi LSI Corp. (LSI Logic)
lwn Liebherr-Werk Nenzing GmbH
macnica Macnica Americas
marvell Marvell Technology Group Ltd.
maxim Maxim Integrated Products
+mbvl Mobiveil Inc.
mcube mCube
meas Measurement Specialties
mediatek MediaTek Inc.
@@ -319,6 +324,7 @@ sgx SGX Sensortech
sharp Sharp Corporation
shimafuji Shimafuji Electric, Inc.
si-en Si-En Technology Ltd.
+sifive SiFive, Inc.
sigma Sigma Designs, Inc.
sii Seiko Instruments, Inc.
sil Silicon Image
@@ -394,6 +400,7 @@ vot Vision Optical Technology Co., Ltd.
wd Western Digital Corp.
wetek WeTek Electronics, limited.
wexler Wexler
+wi2wi Wi2Wi, Inc.
winbond Winbond Electronics corp.
winstar Winstar Display Corp.
wlf Wolfson Microelectronics
diff --git a/Documentation/driver-api/clk.rst b/Documentation/driver-api/clk.rst
index 511628bb3d3a..593cca5058b1 100644
--- a/Documentation/driver-api/clk.rst
+++ b/Documentation/driver-api/clk.rst
@@ -96,7 +96,7 @@ the operations defined in clk-provider.h::
int (*get_phase)(struct clk_hw *hw);
int (*set_phase)(struct clk_hw *hw, int degrees);
void (*init)(struct clk_hw *hw);
- int (*debug_init)(struct clk_hw *hw,
+ void (*debug_init)(struct clk_hw *hw,
struct dentry *dentry);
};
diff --git a/Documentation/driver-api/gpio/board.rst b/Documentation/driver-api/gpio/board.rst
index 25d62b2e9fd0..2c112553df84 100644
--- a/Documentation/driver-api/gpio/board.rst
+++ b/Documentation/driver-api/gpio/board.rst
@@ -177,3 +177,19 @@ mapping and is thus transparent to GPIO consumers.
A set of functions such as gpiod_set_value() is available to work with
the new descriptor-oriented interface.
+
+Boards using platform data can also hog GPIO lines by defining GPIO hog tables.
+
+.. code-block:: c
+
+ struct gpiod_hog gpio_hog_table[] = {
+ GPIO_HOG("gpio.0", 10, "foo", GPIO_ACTIVE_LOW, GPIOD_OUT_HIGH),
+ { }
+ };
+
+And the table can be added to the board code as follows::
+
+ gpiod_add_hogs(gpio_hog_table);
+
+The line will be hogged as soon as the gpiochip is created or - in case the
+chip was created earlier - when the hog table is registered.
diff --git a/Documentation/driver-api/gpio/drivers-on-gpio.rst b/Documentation/driver-api/gpio/drivers-on-gpio.rst
index 7da0c1dd1f7a..f3a189320e11 100644
--- a/Documentation/driver-api/gpio/drivers-on-gpio.rst
+++ b/Documentation/driver-api/gpio/drivers-on-gpio.rst
@@ -85,6 +85,10 @@ hardware descriptions such as device tree or ACPI:
any other serio bus to the system and makes it possible to connect drivers
for e.g. keyboards and other PS/2 protocol based devices.
+- cec-gpio: drivers/media/platform/cec-gpio/ is used to interact with a CEC
+ Consumer Electronics Control bus using only GPIO. It is used to communicate
+ with devices on the HDMI bus.
+
Apart from this there are special GPIO drivers in subsystems like MMC/SD to
read card detect and write protect GPIO lines, and in the TTY serial subsystem
to emulate MCTRL (modem control) signals CTS/RTS by using two GPIO lines. The
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index f4180e7c7ed5..6d9f2f9fe20e 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -36,6 +36,7 @@ available subsections can be seen below.
edac
scsi
libata
+ target
mtdnand
miscellaneous
w1
diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst
index 31ad0fed6763..64b231d125e0 100644
--- a/Documentation/driver-api/scsi.rst
+++ b/Documentation/driver-api/scsi.rst
@@ -334,5 +334,5 @@ todo
~~~~
Parallel (fast/wide/ultra) SCSI, USB, SATA, SAS, Fibre Channel,
-FireWire, ATAPI devices, Infiniband, I2O, iSCSI, Parallel ports,
+FireWire, ATAPI devices, Infiniband, I2O, Parallel ports,
netlink...
diff --git a/Documentation/driver-api/target.rst b/Documentation/driver-api/target.rst
new file mode 100644
index 000000000000..4363611dd86d
--- /dev/null
+++ b/Documentation/driver-api/target.rst
@@ -0,0 +1,64 @@
+=================================
+target and iSCSI Interfaces Guide
+=================================
+
+Introduction and Overview
+=========================
+
+TBD
+
+Target core device interfaces
+=============================
+
+.. kernel-doc:: drivers/target/target_core_device.c
+ :export:
+
+Target core transport interfaces
+================================
+
+.. kernel-doc:: drivers/target/target_core_transport.c
+ :export:
+
+Target-supported userspace I/O
+==============================
+
+.. kernel-doc:: drivers/target/target_core_user.c
+ :doc: Userspace I/O
+
+.. kernel-doc:: include/uapi/linux/target_core_user.h
+ :doc: Ring Design
+
+iSCSI helper functions
+======================
+
+.. kernel-doc:: drivers/scsi/libiscsi.c
+ :export:
+
+
+iSCSI boot information
+======================
+
+.. kernel-doc:: drivers/scsi/iscsi_boot_sysfs.c
+ :export:
+
+
+iSCSI transport class
+=====================
+
+The file drivers/scsi/scsi_transport_iscsi.c defines transport
+attributes for the iSCSI class, which sends SCSI packets over TCP/IP
+connections.
+
+.. kernel-doc:: drivers/scsi/scsi_transport_iscsi.c
+ :export:
+
+
+iSCSI TCP interfaces
+====================
+
+.. kernel-doc:: drivers/scsi/iscsi_tcp.c
+ :internal:
+
+.. kernel-doc:: drivers/scsi/libiscsi_tcp.c
+ :export:
+
diff --git a/Documentation/features/vm/pte_special/arch-support.txt b/Documentation/features/vm/pte_special/arch-support.txt
index 6a608a6dcf71..a8378424bc98 100644
--- a/Documentation/features/vm/pte_special/arch-support.txt
+++ b/Documentation/features/vm/pte_special/arch-support.txt
@@ -1,6 +1,6 @@
#
# Feature name: pte_special
-# Kconfig: __HAVE_ARCH_PTE_SPECIAL
+# Kconfig: ARCH_HAS_PTE_SPECIAL
# description: arch supports the pte_special()/pte_mkspecial() VM APIs
#
-----------------------
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index b7bd6c9009cc..0937bade1099 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -10,8 +10,8 @@ afs.txt
- info and examples for the distributed AFS (Andrew File System) fs.
affs.txt
- info and mount options for the Amiga Fast File System.
-autofs4-mount-control.txt
- - info on device control operations for autofs4 module.
+autofs-mount-control.txt
+ - info on device control operations for autofs module.
automount-support.txt
- information about filesystem automount support.
befs.txt
@@ -89,8 +89,6 @@ locks.txt
- info on file locking implementations, flock() vs. fcntl(), etc.
mandatory-locking.txt
- info on the Linux implementation of Sys V mandatory file locking.
-ncpfs.txt
- - info on Novell Netware(tm) filesystem using NCP protocol.
nfs/
- nfs-related documentation.
nilfs2.txt
diff --git a/Documentation/filesystems/autofs4-mount-control.txt b/Documentation/filesystems/autofs-mount-control.txt
index e5177cb31a04..45edad6933cc 100644
--- a/Documentation/filesystems/autofs4-mount-control.txt
+++ b/Documentation/filesystems/autofs-mount-control.txt
@@ -1,5 +1,5 @@
-Miscellaneous Device control operations for the autofs4 kernel module
+Miscellaneous Device control operations for the autofs kernel module
====================================================================
The problem
@@ -164,7 +164,7 @@ possibility for future development due to the requirements of the
message bus architecture.
-autofs4 Miscellaneous Device mount control interface
+autofs Miscellaneous Device mount control interface
====================================================
The control interface is opening a device node, typically /dev/autofs.
@@ -244,7 +244,7 @@ The device node ioctl operations implemented by this interface are:
AUTOFS_DEV_IOCTL_VERSION
------------------------
-Get the major and minor version of the autofs4 device ioctl kernel module
+Get the major and minor version of the autofs device ioctl kernel module
implementation. It requires an initialized struct autofs_dev_ioctl as an
input parameter and sets the version information in the passed in structure.
It returns 0 on success or the error -EINVAL if a version mismatch is
@@ -254,7 +254,7 @@ detected.
AUTOFS_DEV_IOCTL_PROTOVER_CMD and AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD
------------------------------------------------------------------
-Get the major and minor version of the autofs4 protocol version understood
+Get the major and minor version of the autofs protocol version understood
by loaded module. This call requires an initialized struct autofs_dev_ioctl
with the ioctlfd field set to a valid autofs mount point descriptor
and sets the requested version number in version field of struct args_protover
@@ -404,4 +404,3 @@ type is also given we are looking for a particular autofs mount and if
a match isn't found a fail is returned. If the the located path is the
root of a mount 1 is returned along with the super magic of the mount
or 0 otherwise.
-
diff --git a/Documentation/filesystems/autofs4.txt b/Documentation/filesystems/autofs.txt
index f10dd590f69f..373ad25852d3 100644
--- a/Documentation/filesystems/autofs4.txt
+++ b/Documentation/filesystems/autofs.txt
@@ -30,15 +30,15 @@ key advantages:
Context
-------
-The "autofs4" filesystem module is only one part of an autofs system.
+The "autofs" filesystem module is only one part of an autofs system.
There also needs to be a user-space program which looks up names
and mounts filesystems. This will often be the "automount" program,
-though other tools including "systemd" can make use of "autofs4".
+though other tools including "systemd" can make use of "autofs".
This document describes only the kernel module and the interactions
required with any user-space program. Subsequent text refers to this
as the "automount daemon" or simply "the daemon".
-"autofs4" is a Linux kernel module with provides the "autofs"
+"autofs" is a Linux kernel module with provides the "autofs"
filesystem type. Several "autofs" filesystems can be mounted and they
can each be managed separately, or all managed by the same daemon.
@@ -215,7 +215,7 @@ of expiry.
The VFS also supports "expiry" of mounts using the MNT_EXPIRE flag to
the `umount` system call. Unmounting with MNT_EXPIRE will fail unless
a previous attempt had been made, and the filesystem has been inactive
-and untouched since that previous attempt. autofs4 does not depend on
+and untouched since that previous attempt. autofs does not depend on
this but has its own internal tracking of whether filesystems were
recently used. This allows individual names in the autofs directory
to expire separately.
@@ -415,7 +415,7 @@ which can be used to communicate directly with the autofs filesystem.
It requires CAP_SYS_ADMIN for access.
The `ioctl`s that can be used on this device are described in a separate
-document `autofs4-mount-control.txt`, and are summarized briefly here.
+document `autofs-mount-control.txt`, and are summarized briefly here.
Each ioctl is passed a pointer to an `autofs_dev_ioctl` structure:
struct autofs_dev_ioctl {
diff --git a/Documentation/filesystems/automount-support.txt b/Documentation/filesystems/automount-support.txt
index 7eb762eb3136..b0afd3d55eaf 100644
--- a/Documentation/filesystems/automount-support.txt
+++ b/Documentation/filesystems/automount-support.txt
@@ -9,7 +9,7 @@ also be requested by userspace.
IN-KERNEL AUTOMOUNTING
======================
-See section "Mount Traps" of Documentation/filesystems/autofs4.txt
+See section "Mount Traps" of Documentation/filesystems/autofs.txt
Then from userspace, you can just do something like:
diff --git a/Documentation/filesystems/ncpfs.txt b/Documentation/filesystems/ncpfs.txt
deleted file mode 100644
index 5af164f4b37b..000000000000
--- a/Documentation/filesystems/ncpfs.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-The ncpfs filesystem understands the NCP protocol, designed by the
-Novell Corporation for their NetWare(tm) product. NCP is functionally
-similar to the NFS used in the TCP/IP community.
-To mount a NetWare filesystem, you need a special mount program, which
-can be found in the ncpfs package. The home site for ncpfs is
-ftp.gwdg.de/pub/linux/misc/ncpfs, but sunsite and its many mirrors
-will have it as well.
-
-Related products are linware and mars_nwe, which will give Linux partial
-NetWare server functionality.
-
-mars_nwe can be found on ftp.gwdg.de/pub/linux/misc/ncpfs.
diff --git a/Documentation/filesystems/path-lookup.md b/Documentation/filesystems/path-lookup.md
index 1933ef734e63..e2edd45c4bc0 100644
--- a/Documentation/filesystems/path-lookup.md
+++ b/Documentation/filesystems/path-lookup.md
@@ -460,7 +460,7 @@ this retry process in the next article.
Automount points are locations in the filesystem where an attempt to
lookup a name can trigger changes to how that lookup should be
handled, in particular by mounting a filesystem there. These are
-covered in greater detail in autofs4.txt in the Linux documentation
+covered in greater detail in autofs.txt in the Linux documentation
tree, but a few notes specifically related to path lookup are in order
here.
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index a12488d45c40..480c8609dc58 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -151,7 +151,7 @@ Code Seq#(hex) Include File Comments
'J' 00-1F drivers/scsi/gdth_ioctl.h
'K' all linux/kd.h
'L' 00-1F linux/loop.h conflict!
-'L' 10-1F drivers/scsi/mpt2sas/mpt2sas_ctl.h conflict!
+'L' 10-1F drivers/scsi/mpt3sas/mpt3sas_ctl.h conflict!
'L' 20-2F linux/lightnvm.h
'L' E0-FF linux/ppdd.h encrypted disk device driver
<http://linux01.gwdg.de/~alatham/ppdd.html>
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt
index 1ae2de758c08..2d7ed09dbd59 100644
--- a/Documentation/livepatch/livepatch.txt
+++ b/Documentation/livepatch/livepatch.txt
@@ -429,30 +429,6 @@ See Documentation/ABI/testing/sysfs-kernel-livepatch for more details.
The current Livepatch implementation has several limitations:
-
- + The patch must not change the semantic of the patched functions.
-
- The current implementation guarantees only that either the old
- or the new function is called. The functions are patched one
- by one. It means that the patch must _not_ change the semantic
- of the function.
-
-
- + Data structures can not be patched.
-
- There is no support to version data structures or anyhow migrate
- one structure into another. Also the simple consistency model does
- not allow to switch more functions atomically.
-
- Once there is more complex consistency mode, it will be possible to
- use some workarounds. For example, it will be possible to use a hole
- for a new member because the data structure is aligned. Or it will
- be possible to use an existing member for something else.
-
- There are no plans to add more generic support for modified structures
- at the moment.
-
-
+ Only functions that can be traced could be patched.
Livepatch is based on the dynamic ftrace. In particular, functions
diff --git a/MAINTAINERS b/MAINTAINERS
index dc241b04d1bd..2c7069037a15 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -796,6 +796,14 @@ M: Michael Hanselmann <linux-kernel@hansmi.ch>
S: Supported
F: drivers/macintosh/ams/
+ANALOG DEVICES INC AD5686 DRIVER
+M: Stefan Popa <stefan.popa@analog.com>
+L: linux-pm@vger.kernel.org
+W: http://ez.analog.com/community/linux-device-drivers
+S: Supported
+F: drivers/iio/dac/ad5686*
+F: drivers/iio/dac/ad5696*
+
ANALOG DEVICES INC AD9389B DRIVER
M: Hans Verkuil <hans.verkuil@cisco.com>
L: linux-media@vger.kernel.org
@@ -1135,10 +1143,12 @@ F: arch/arm/mach-actions/
F: arch/arm/boot/dts/owl-*
F: arch/arm64/boot/dts/actions/
F: drivers/clocksource/owl-*
+F: drivers/pinctrl/actions/*
F: drivers/soc/actions/
F: include/dt-bindings/power/owl-*
F: include/linux/soc/actions/
F: Documentation/devicetree/bindings/arm/actions.txt
+F: Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
F: Documentation/devicetree/bindings/power/actions,owl-sps.txt
F: Documentation/devicetree/bindings/timer/actions,owl-timer.txt
@@ -2531,8 +2541,6 @@ F: kernel/audit*
AUXILIARY DISPLAY DRIVERS
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: drivers/auxdisplay/
F: include/linux/cfag12864b.h
@@ -3382,16 +3390,12 @@ F: include/linux/usb/wusb*
CFAG12864B LCD DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: drivers/auxdisplay/cfag12864b.c
F: include/linux/cfag12864b.h
CFAG12864BFB LCD FRAMEBUFFER DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: drivers/auxdisplay/cfag12864bfb.c
F: include/linux/cfag12864b.h
@@ -3583,6 +3587,7 @@ F: drivers/clk/
X: drivers/clk/clkdev.c
F: include/linux/clk-pr*
F: include/linux/clk/
+F: include/linux/of_clk.h
COMMON INTERNET FILE SYSTEM (CIFS)
M: Steve French <sfrench@samba.org>
@@ -4425,6 +4430,12 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/staging/fsl-dpaa2/ethsw
+DPAA2 PTP CLOCK DRIVER
+M: Yangbo Lu <yangbo.lu@nxp.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/staging/fsl-dpaa2/rtc
+
DPT_I2O SCSI RAID DRIVER
M: Adaptec OEM Raid Solutions <aacraid@microsemi.com>
L: linux-scsi@vger.kernel.org
@@ -6972,6 +6983,15 @@ F: drivers/staging/iio/
F: include/linux/iio/
F: tools/iio/
+IIO UNIT CONVERTER
+M: Peter Rosin <peda@axentia.se>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt
+F: Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt
+F: Documentation/devicetree/bindings/iio/afe/voltage-divider.txt
+F: drivers/iio/afe/iio-rescale.c
+
IKANOS/ADI EAGLE ADSL USB DRIVER
M: Matthieu Castet <castet.matthieu@free.fr>
M: Stanislaw Gruszka <stf_xl@wp.pl>
@@ -7727,11 +7747,11 @@ W: https://linuxtv.org
S: Maintained
F: drivers/media/radio/radio-keene*
-KERNEL AUTOMOUNTER v4 (AUTOFS4)
+KERNEL AUTOMOUNTER
M: Ian Kent <raven@themaw.net>
L: autofs@vger.kernel.org
S: Maintained
-F: fs/autofs4/
+F: fs/autofs/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
M: Masahiro Yamada <yamada.masahiro@socionext.com>
@@ -7975,8 +7995,6 @@ F: kernel/kprobes.c
KS0108 LCD CONTROLLER DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: Documentation/auxdisplay/ks0108
F: drivers/auxdisplay/ks0108.c
@@ -8034,6 +8052,13 @@ S: Maintained
F: Documentation/misc-devices/eeprom
F: drivers/misc/eeprom/eeprom.c
+LEGO MINDSTORMS EV3
+R: David Lechner <david@lechnology.com>
+S: Maintained
+F: arch/arm/boot/dts/da850-lego-ev3.dts
+F: Documentation/devicetree/bindings/power/supply/lego_ev3_battery.txt
+F: drivers/power/supply/lego_ev3_battery.c
+
LEGO USB Tower driver
M: Juergen Stuber <starblue@users.sourceforge.net>
L: legousb-devel@lists.sourceforge.net
@@ -8421,7 +8446,6 @@ L: linux-scsi@vger.kernel.org
W: http://www.avagotech.com/support/
S: Supported
F: drivers/message/fusion/
-F: drivers/scsi/mpt2sas/
F: drivers/scsi/mpt3sas/
LSILOGIC/SYMBIOS/NCR 53C8XX and 53C1010 PCI-SCSI drivers
@@ -9028,6 +9052,13 @@ L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt7601u/
+MEDIATEK NAND CONTROLLER DRIVER
+M: Xiaolei Li <xiaolei.li@mediatek.com>
+L: linux-mtd@lists.infradead.org
+S: Maintained
+F: drivers/mtd/nand/raw/mtk_*
+F: Documentation/devicetree/bindings/mtd/mtk-nand.txt
+
MEDIATEK RANDOM NUMBER GENERATOR SUPPORT
M: Sean Wang <sean.wang@mediatek.com>
S: Maintained
@@ -9484,6 +9515,13 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained
F: drivers/media/dvb-frontends/mn88473*
+PCI DRIVER FOR MOBIVEIL PCIE IP
+M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+L: linux-pci@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+F: drivers/pci/host/pcie-mobiveil.c
+
MODULE SUPPORT
M: Jessica Yu <jeyu@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@@ -9665,6 +9703,7 @@ F: drivers/net/ethernet/myricom/myri10ge/
NAND FLASH SUBSYSTEM
M: Boris Brezillon <boris.brezillon@bootlin.com>
+M: Miquel Raynal <miquel.raynal@bootlin.com>
R: Richard Weinberger <richard@nod.at>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
@@ -10509,12 +10548,14 @@ F: drivers/infiniband/ulp/opa_vnic
OPEN FIRMWARE AND DEVICE TREE OVERLAYS
M: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
+M: Frank Rowand <frowand.list@gmail.com>
L: devicetree@vger.kernel.org
S: Maintained
F: Documentation/devicetree/dynamic-resolution-notes.txt
F: Documentation/devicetree/overlay-notes.txt
F: drivers/of/overlay.c
F: drivers/of/resolver.c
+K: of_overlay_notifier_
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <robh+dt@kernel.org>
@@ -10826,9 +10867,9 @@ F: Documentation/devicetree/bindings/pci/cdns,*.txt
F: drivers/pci/cadence/pcie-cadence*
PCI DRIVER FOR FREESCALE LAYERSCAPE
-M: Minghuan Lian <minghuan.Lian@freescale.com>
-M: Mingkai Hu <mingkai.hu@freescale.com>
-M: Roy Zang <tie-fei.zang@freescale.com>
+M: Minghuan Lian <minghuan.Lian@nxp.com>
+M: Mingkai Hu <mingkai.hu@nxp.com>
+M: Roy Zang <roy.zang@nxp.com>
L: linuxppc-dev@lists.ozlabs.org
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org
@@ -11054,8 +11095,8 @@ M: Shawn Lin <shawn.lin@rock-chips.com>
L: linux-pci@vger.kernel.org
L: linux-rockchip@lists.infradead.org
S: Maintained
-F: Documentation/devicetree/bindings/pci/rockchip-pcie.txt
-F: drivers/pci/host/pcie-rockchip.c
+F: Documentation/devicetree/bindings/pci/rockchip-pcie*
+F: drivers/pci/host/pcie-rockchip*
PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC
M: Linus Walleij <linus.walleij@linaro.org>
@@ -11218,6 +11259,7 @@ L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
+F: drivers/pinctrl/mediatek/mtk-eint.*
F: drivers/pinctrl/mediatek/pinctrl-mtk-common.*
F: drivers/pinctrl/mediatek/pinctrl-mt2701.c
F: drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -11778,6 +11820,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.g
S: Supported
F: arch/hexagon/
+QUALCOMM HIDMA DRIVER
+M: Sinan Kaya <okaya@codeaurora.org>
+L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-msm@vger.kernel.org
+L: dmaengine@vger.kernel.org
+S: Supported
+F: drivers/dma/qcom/hidma*
+
QUALCOMM IOMMU
M: Rob Clark <robdclark@gmail.com>
L: iommu@lists.linux-foundation.org
@@ -12083,6 +12133,18 @@ F: include/dt-bindings/reset/
F: include/linux/reset.h
F: include/linux/reset-controller.h
+RESTARTABLE SEQUENCES SUPPORT
+M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+M: Peter Zijlstra <peterz@infradead.org>
+M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M: Boqun Feng <boqun.feng@gmail.com>
+L: linux-kernel@vger.kernel.org
+S: Supported
+F: kernel/rseq.c
+F: include/uapi/linux/rseq.h
+F: include/trace/events/rseq.h
+F: tools/testing/selftests/rseq/
+
RFKILL
M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
@@ -13413,15 +13475,6 @@ S: Odd Fixes
F: Documentation/devicetree/bindings/staging/iio/
F: drivers/staging/iio/
-STAGING - LUSTRE PARALLEL FILESYSTEM
-M: Oleg Drokin <oleg.drokin@intel.com>
-M: Andreas Dilger <andreas.dilger@intel.com>
-M: James Simmons <jsimmons@infradead.org>
-L: lustre-devel@lists.lustre.org (moderated for non-subscribers)
-W: http://wiki.lustre.org/
-S: Maintained
-F: drivers/staging/lustre
-
STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
M: Marc Dietrich <marvin24@gmx.de>
L: ac100@lists.launchpad.net (moderated for non-subscribers)
@@ -13589,6 +13642,12 @@ S: Supported
F: net/switchdev/
F: include/net/switchdev.h
+SY8106A REGULATOR DRIVER
+M: Icenowy Zheng <icenowy@aosc.io>
+S: Maintained
+F: drivers/regulator/sy8106a-regulator.c
+F: Documentation/devicetree/bindings/regulator/sy8106a-regulator.txt
+
SYNC FILE FRAMEWORK
M: Sumit Semwal <sumit.semwal@linaro.org>
R: Gustavo Padovan <gustavo@padovan.org>
diff --git a/arch/Kconfig b/arch/Kconfig
index 8a7f7e1f2ca7..86ae4c4edd6f 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -272,6 +272,13 @@ config HAVE_REGS_AND_STACK_ACCESS_API
declared in asm/ptrace.h
For example the kprobes-based event tracer needs this API.
+config HAVE_RSEQ
+ bool
+ depends on HAVE_REGS_AND_STACK_ACCESS_API
+ help
+ This symbol should be selected by an architecture if it
+ supports an implementation of restartable sequences.
+
config HAVE_CLK
bool
help
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 89d47eac18b2..e81bcd271be7 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -48,6 +48,7 @@ config ARC
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
+ select ARCH_HAS_PTE_SPECIAL
config MIGHT_HAVE_PCI
bool
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 08fe33830d4b..8ec5599a0957 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -320,8 +320,6 @@ PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
-#define __HAVE_ARCH_PTE_SPECIAL
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 8f460bdd4be1..94d222545920 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -8,6 +8,7 @@ config ARM
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
+ select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
@@ -90,6 +91,7 @@ config ARM
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
select HAVE_VIRT_CPU_ACCOUNTING_GEN
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 200714e3feea..d74dd7f19507 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -120,7 +120,7 @@
<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_ENET_AXI_ROOT_CLK>,
+ clocks = <&clks IMX7D_ENET2_IPG_ROOT_CLK>,
<&clks IMX7D_ENET_AXI_ROOT_CLK>,
<&clks IMX7D_ENET2_TIME_ROOT_CLK>,
<&clks IMX7D_PLL_ENET_MAIN_125M_CLK>,
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index ce85b3ca1a55..69436b9a404c 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -1092,7 +1092,7 @@
<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_ENET_AXI_ROOT_CLK>,
+ clocks = <&clks IMX7D_ENET1_IPG_ROOT_CLK>,
<&clks IMX7D_ENET_AXI_ROOT_CLK>,
<&clks IMX7D_ENET1_TIME_ROOT_CLK>,
<&clks IMX7D_PLL_ENET_MAIN_125M_CLK>,
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 2b913f17d50f..ad574d20415c 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irqflags.h>
@@ -174,6 +175,7 @@ bool mcpm_is_available(void)
{
return (platform_ops) ? true : false;
}
+EXPORT_SYMBOL_GPL(mcpm_is_available);
/*
* We can't use regular spinlocks. In the switcher case, it is possible
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 343fc9e6f78d..2d75e77bf7bb 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -325,6 +325,18 @@ static inline bool kvm_arm_harden_branch_predictor(void)
}
}
+#define KVM_SSBD_UNKNOWN -1
+#define KVM_SSBD_FORCE_DISABLE 0
+#define KVM_SSBD_KERNEL 1
+#define KVM_SSBD_FORCE_ENABLE 2
+#define KVM_SSBD_MITIGATED 3
+
+static inline int kvm_arm_have_ssbd(void)
+{
+ /* No way to detect it yet, pretend it is not there. */
+ return KVM_SSBD_UNKNOWN;
+}
+
static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index c94d291fd1a8..8553d68b7c8a 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -356,6 +356,11 @@ static inline int kvm_map_vectors(void)
return 0;
}
+static inline int hyp_map_aux_data(void)
+{
+ return 0;
+}
+
#define kvm_phys_to_vttbr(addr) (addr)
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 2a4836087358..6d50a11d7793 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -219,7 +219,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
pte_val(pte) |= L_PTE_SPECIAL;
return pte;
}
-#define __HAVE_ARCH_PTE_SPECIAL
#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 20df608bf343..106a1466518d 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -39,12 +39,13 @@ saved_pc .req lr
.section .entry.text,"ax",%progbits
.align 5
-#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
+#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING) || \
+ IS_ENABLED(CONFIG_DEBUG_RSEQ))
/*
* This is the fast syscall return path. We do as little as possible here,
* such as avoiding writing r0 to the stack. We only use this path if we
- * have tracing and context tracking disabled - the overheads from those
- * features make this path too inefficient.
+ * have tracing, context tracking and rseq debug disabled - the overheads
+ * from those features make this path too inefficient.
*/
ret_fast_syscall:
UNWIND(.fnstart )
@@ -71,14 +72,20 @@ fast_work_pending:
/* fall through to work_pending */
#else
/*
- * The "replacement" ret_fast_syscall for when tracing or context tracking
- * is enabled. As we will need to call out to some C functions, we save
- * r0 first to avoid needing to save registers around each C function call.
+ * The "replacement" ret_fast_syscall for when tracing, context tracking,
+ * or rseq debug is enabled. As we will need to call out to some C functions,
+ * we save r0 first to avoid needing to save registers around each C function
+ * call.
*/
ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
+ /* do_rseq_syscall needs interrupts enabled. */
+ mov r0, sp @ 'regs'
+ bl do_rseq_syscall
+#endif
disable_irq_notrace @ disable interrupts
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
@@ -113,6 +120,12 @@ ENDPROC(ret_fast_syscall)
*/
ENTRY(ret_to_user)
ret_slow_syscall:
+#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
+ /* do_rseq_syscall needs interrupts enabled. */
+ enable_irq_notrace @ enable interrupts
+ mov r0, sp @ 'regs'
+ bl do_rseq_syscall
+#endif
disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r2, [tsk, #TI_ADDR_LIMIT]
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 1d7061a38922..be42c4f66a40 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -303,12 +303,10 @@ static void armv6pmu_enable_event(struct perf_event *event)
}
static irqreturn_t
-armv6pmu_handle_irq(int irq_num,
- void *dev)
+armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 870b66c1e4ef..57f01e059f39 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -946,11 +946,10 @@ static void armv7pmu_disable_event(struct perf_event *event)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
+static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
u32 pmnc;
struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index fcf218da660e..88d1a76f5367 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -142,11 +142,10 @@ xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
}
static irqreturn_t
-xscale1pmu_handle_irq(int irq_num, void *dev)
+xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
unsigned long pmnc;
struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
@@ -489,11 +488,10 @@ xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
}
static irqreturn_t
-xscale2pmu_handle_irq(int irq_num, void *dev)
+xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
unsigned long pmnc, of_flags;
struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index bd8810d4acb3..f09e9d66d605 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -541,6 +541,12 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
int ret;
/*
+ * Increment event counter and perform fixup for the pre-signal
+ * frame.
+ */
+ rseq_signal_deliver(regs);
+
+ /*
* Set up the stack frame
*/
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
@@ -660,6 +666,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} else {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(regs);
}
}
local_irq_disable();
@@ -703,3 +710,10 @@ asmlinkage void addr_limit_check_failed(void)
{
addr_limit_user_check();
}
+
+#ifdef CONFIG_DEBUG_RSEQ
+asmlinkage void do_rseq_syscall(struct pt_regs *regs)
+{
+ rseq_syscall(regs);
+}
+#endif
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index f00988705408..5aa472892465 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
+#include <linux/gpio/machine.h>
#include <linux/mfd/wm831x/irq.h>
#include <linux/mfd/wm831x/gpio.h>
@@ -206,9 +207,6 @@ static const struct i2c_board_info wm1277_devs[] = {
};
static struct arizona_pdata wm5102_reva_pdata = {
- .ldo1 = {
- .ldoena = S3C64XX_GPN(7),
- },
.gpio_base = CODEC_GPIO_BASE,
.irq_flags = IRQF_TRIGGER_HIGH,
.micd_pol_gpio = CODEC_GPIO_BASE + 4,
@@ -237,10 +235,16 @@ static struct spi_board_info wm5102_reva_spi_devs[] = {
},
};
-static struct arizona_pdata wm5102_pdata = {
- .ldo1 = {
- .ldoena = S3C64XX_GPN(7),
+static struct gpiod_lookup_table wm5102_reva_gpiod_table = {
+ .dev_id = "spi0.1", /* SPI device name */
+ .table = {
+ GPIO_LOOKUP("GPION", 7,
+ "wlf,ldoena", GPIO_ACTIVE_HIGH),
+ { },
},
+};
+
+static struct arizona_pdata wm5102_pdata = {
.gpio_base = CODEC_GPIO_BASE,
.irq_flags = IRQF_TRIGGER_HIGH,
.micd_pol_gpio = CODEC_GPIO_BASE + 2,
@@ -264,6 +268,15 @@ static struct spi_board_info wm5102_spi_devs[] = {
},
};
+static struct gpiod_lookup_table wm5102_gpiod_table = {
+ .dev_id = "spi0.1", /* SPI device name */
+ .table = {
+ GPIO_LOOKUP("GPION", 7,
+ "wlf,ldo1ena", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static struct spi_board_info wm5110_spi_devs[] = {
[0] = {
.modalias = "wm5110",
@@ -366,6 +379,9 @@ static int wlf_gf_module_probe(struct i2c_client *i2c,
rev == gf_mods[i].rev))
break;
+ gpiod_add_lookup_table(&wm5102_reva_gpiod_table);
+ gpiod_add_lookup_table(&wm5102_gpiod_table);
+
if (i < ARRAY_SIZE(gf_mods)) {
dev_info(&i2c->dev, "%s revision %d\n",
gf_mods[i].name, rev + 1);
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 0bb0e9c6376c..fbc74b5fa3ed 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -412,3 +412,4 @@
395 common pkey_alloc sys_pkey_alloc
396 common pkey_free sys_pkey_free
397 common statx sys_statx
+398 common rseq sys_rseq
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index ba7f4c8f5c3e..8073625371f5 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -89,6 +89,17 @@ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
+/* Not used by XENFEAT_auto_translated guests. */
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid, struct page **pages)
+{
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+
static void xen_read_wallclock(struct timespec64 *ts)
{
u32 version;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b25ed7834f6c..9795b59aa28a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -7,16 +7,19 @@ config ARM64
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
select ACPI_MCFG if ACPI
select ACPI_SPCR_TABLE if ACPI
+ select ACPI_PPTT if ACPI
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_SYNC_CORE
+ select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
@@ -922,6 +925,15 @@ config HARDEN_EL2_VECTORS
If unsure, say Y.
+config ARM64_SSBD
+ bool "Speculative Store Bypass Disable" if EXPERT
+ default y
+ help
+ This enables mitigation of the bypassing of previous stores
+ by speculative loads.
+
+ If unsure, say Y.
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
@@ -1033,6 +1045,7 @@ config ARM64_PAN
config ARM64_LSE_ATOMICS
bool "Atomic instructions"
+ default y
help
As part of the Large System Extensions, ARMv8.1 introduces new
atomic instructions that are designed specifically to scale in
@@ -1041,7 +1054,8 @@ config ARM64_LSE_ATOMICS
Say Y here to make use of these instructions for the in-kernel
atomic routines. This incurs a small overhead on CPUs that do
not support these instructions and requires the kernel to be
- built with binutils >= 2.25.
+ built with binutils >= 2.25 in order for the new instructions
+ to be used.
config ARM64_VHE
bool "Enable support for Virtualization Host Extensions (VHE)"
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index ecf613761e78..17ea72b1b389 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -78,7 +78,8 @@ CONFIG_PCIE_ARMADA_8K=y
CONFIG_PCI_AARDVARK=y
CONFIG_PCI_TEGRA=y
CONFIG_PCIE_RCAR=y
-CONFIG_PCIE_ROCKCHIP=m
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCI_XGENE=y
CONFIG_PCI_HOST_THUNDER_PEM=y
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 32f465a80e4e..0db62a4cbce2 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -86,6 +86,10 @@ static inline bool acpi_has_cpu_in_madt(void)
}
struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu);
+static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
+{
+ return acpi_cpu_get_madt_gicc(cpu)->uid;
+}
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 9bbffc7a301f..5df5cfe1c143 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -33,7 +33,7 @@
#define ICACHE_POLICY_VIPT 2
#define ICACHE_POLICY_PIPT 3
-#define L1_CACHE_SHIFT 7
+#define L1_CACHE_SHIFT (6)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/*
@@ -43,7 +43,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN (128)
#ifndef __ASSEMBLY__
@@ -77,7 +77,7 @@ static inline u32 cache_type_cwg(void)
static inline int cache_line_size(void)
{
u32 cwg = cache_type_cwg();
- return cwg ? 4 << cwg : L1_CACHE_BYTES;
+ return cwg ? 4 << cwg : ARCH_DMA_MINALIGN;
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 4f5fd2a36e6e..3b0938281541 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -204,7 +204,9 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \
unsigned long tmp; \
\
asm volatile( \
- " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
+ " sevl\n" \
+ " wfe\n" \
+ " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
" eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
" cbnz %" #w "[tmp], 1f\n" \
" wfe\n" \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index bc51b72fafd4..8a699c708fc9 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -48,7 +48,8 @@
#define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29
+#define ARM64_SSBD 30
-#define ARM64_NCAPS 30
+#define ARM64_NCAPS 31
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 09b0f2a80c8f..55bc1f073bfb 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -537,6 +537,28 @@ static inline u64 read_zcr_features(void)
return zcr;
}
+#define ARM64_SSBD_UNKNOWN -1
+#define ARM64_SSBD_FORCE_DISABLE 0
+#define ARM64_SSBD_KERNEL 1
+#define ARM64_SSBD_FORCE_ENABLE 2
+#define ARM64_SSBD_MITIGATED 3
+
+static inline int arm64_get_ssbd_state(void)
+{
+#ifdef CONFIG_ARM64_SSBD
+ extern int ssbd_state;
+ return ssbd_state;
+#else
+ return ARM64_SSBD_UNKNOWN;
+#endif
+}
+
+#ifdef CONFIG_ARM64_SSBD
+void arm64_set_ssbd_mitigation(bool state);
+#else
+static inline void arm64_set_ssbd_mitigation(bool state) {}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index fac1c4de7898..433b9554c6a1 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -121,6 +121,9 @@
#ifndef __ASSEMBLY__
+#include <linux/bug.h>
+#include <asm/processor.h> /* for signal_minsigstksz, used by ARCH_DLINFO */
+
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
@@ -148,6 +151,16 @@ typedef struct user_fpsimd_state elf_fpregset_t;
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(elf_addr_t)current->mm->context.vdso); \
+ \
+ /* \
+ * Should always be nonzero unless there's a kernel bug. \
+ * If we haven't determined a sensible value to give to \
+ * userspace, omit the entry: \
+ */ \
+ if (likely(signal_minsigstksz)) \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, signal_minsigstksz); \
+ else \
+ NEW_AUX_ENT(AT_IGNORE, 0); \
} while (0)
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index e050d765ca9e..46843515d77b 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -207,12 +207,14 @@
str w\nxtmp, [\xpfpsr, #4]
.endm
-.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp
+.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
mrs_s x\nxtmp, SYS_ZCR_EL1
- bic x\nxtmp, x\nxtmp, ZCR_ELx_LEN_MASK
- orr x\nxtmp, x\nxtmp, \xvqminus1
- msr_s SYS_ZCR_EL1, x\nxtmp // self-synchronising
-
+ bic \xtmp2, x\nxtmp, ZCR_ELx_LEN_MASK
+ orr \xtmp2, \xtmp2, \xvqminus1
+ cmp \xtmp2, x\nxtmp
+ b.eq 921f
+ msr_s SYS_ZCR_EL1, \xtmp2 // self-synchronising
+921:
_for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
_sve_ldr_p 0, \nxbase
_sve_wrffr 0
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index f6648a3e4152..951b2076a5e2 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -20,6 +20,9 @@
#include <asm/virt.h>
+#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
+#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
+
#define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
@@ -71,14 +74,37 @@ extern u32 __kvm_get_mdcr_el2(void);
extern u32 __init_stage2_translation(void);
+/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
+#define __hyp_this_cpu_ptr(sym) \
+ ({ \
+ void *__ptr = hyp_symbol_addr(sym); \
+ __ptr += read_sysreg(tpidr_el2); \
+ (typeof(&sym))__ptr; \
+ })
+
+#define __hyp_this_cpu_read(sym) \
+ ({ \
+ *__hyp_this_cpu_ptr(sym); \
+ })
+
#else /* __ASSEMBLY__ */
-.macro get_host_ctxt reg, tmp
- adr_l \reg, kvm_host_cpu_state
+.macro hyp_adr_this_cpu reg, sym, tmp
+ adr_l \reg, \sym
mrs \tmp, tpidr_el2
add \reg, \reg, \tmp
.endm
+.macro hyp_ldr_this_cpu reg, sym, tmp
+ adr_l \reg, \sym
+ mrs \tmp, tpidr_el2
+ ldr \reg, [\reg, \tmp]
+.endm
+
+.macro get_host_ctxt reg, tmp
+ hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
+.endm
+
.macro get_vcpu_ptr vcpu, ctxt
get_host_ctxt \ctxt, \vcpu
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 469de8acd06f..95d8a0e15b5f 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -216,6 +216,9 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;
+ /* State of various workarounds, see kvm_asm.h for bit assignment */
+ u64 workaround_flags;
+
/* Guest debug state */
u64 debug_flags;
@@ -452,6 +455,29 @@ static inline bool kvm_arm_harden_branch_predictor(void)
return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
}
+#define KVM_SSBD_UNKNOWN -1
+#define KVM_SSBD_FORCE_DISABLE 0
+#define KVM_SSBD_KERNEL 1
+#define KVM_SSBD_FORCE_ENABLE 2
+#define KVM_SSBD_MITIGATED 3
+
+static inline int kvm_arm_have_ssbd(void)
+{
+ switch (arm64_get_ssbd_state()) {
+ case ARM64_SSBD_FORCE_DISABLE:
+ return KVM_SSBD_FORCE_DISABLE;
+ case ARM64_SSBD_KERNEL:
+ return KVM_SSBD_KERNEL;
+ case ARM64_SSBD_FORCE_ENABLE:
+ return KVM_SSBD_FORCE_ENABLE;
+ case ARM64_SSBD_MITIGATED:
+ return KVM_SSBD_MITIGATED;
+ case ARM64_SSBD_UNKNOWN:
+ default:
+ return KVM_SSBD_UNKNOWN;
+ }
+}
+
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6128992c2ded..fb9a7127bb75 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -72,7 +72,6 @@
#ifdef __ASSEMBLY__
#include <asm/alternative.h>
-#include <asm/cpufeature.h>
/*
* Convert a kernel VA into a HYP VA.
@@ -473,6 +472,30 @@ static inline int kvm_map_vectors(void)
}
#endif
+#ifdef CONFIG_ARM64_SSBD
+DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+static inline int hyp_map_aux_data(void)
+{
+ int cpu, err;
+
+ for_each_possible_cpu(cpu) {
+ u64 *ptr;
+
+ ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
+ err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+#else
+static inline int hyp_map_aux_data(void)
+{
+ return 0;
+}
+#endif
+
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7c4c8f318ba9..9f82d6b53851 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -306,8 +306,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#define __HAVE_ARCH_PTE_SPECIAL
-
static inline pte_t pgd_pte(pgd_t pgd)
{
return __pte(pgd_val(pgd));
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 767598932549..65ab83e8926e 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -35,6 +35,8 @@
#ifdef __KERNEL__
#include <linux/build_bug.h>
+#include <linux/cache.h>
+#include <linux/init.h>
#include <linux/stddef.h>
#include <linux/string.h>
@@ -244,6 +246,9 @@ void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused);
+extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
+extern void __init minsigstksz_setup(void);
+
/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
#define SVE_GET_VL() sve_get_current_vl()
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 740aa03c5f0d..cbcf11b5e637 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -94,6 +94,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_32BIT 22 /* 32bit process */
#define TIF_SVE 23 /* Scalable Vector Extension in use */
#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
+#define TIF_SSBD 25 /* Wants SSB mitigation */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index c4f2d50491eb..df48212f767b 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -7,14 +7,16 @@
struct cpu_topology {
int thread_id;
int core_id;
- int cluster_id;
+ int package_id;
+ int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
+ cpumask_t llc_siblings;
};
extern struct cpu_topology cpu_topology[NR_CPUS];
-#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h
index ec0a86d484e1..743c0b84fd30 100644
--- a/arch/arm64/include/uapi/asm/auxvec.h
+++ b/arch/arm64/include/uapi/asm/auxvec.h
@@ -19,7 +19,8 @@
/* vDSO location */
#define AT_SYSINFO_EHDR 33
+#define AT_MINSIGSTKSZ 51 /* stack needed for signal delivery */
-#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
#endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index bf825f38d206..0025f8691046 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
+arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 6e47fc3ab549..97d45d5151d4 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
+#include <linux/uaccess.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
@@ -20,8 +21,6 @@
#include <asm/system_misc.h>
#include <asm/traps.h>
#include <asm/kprobes.h>
-#include <linux/uaccess.h>
-#include <asm/cpufeature.h>
#define CREATE_TRACE_POINTS
#include "trace-events-emulation.h"
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 5bdda651bd05..323aeb5f2fe6 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -136,6 +136,7 @@ int main(void)
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
+ DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
index 380f2e2fbed5..0bf0a835122f 100644
--- a/arch/arm64/kernel/cacheinfo.c
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/cacheinfo.h>
#include <linux/of.h>
@@ -46,7 +47,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
static int __init_cache_level(unsigned int cpu)
{
- unsigned int ctype, level, leaves, of_level;
+ unsigned int ctype, level, leaves, fw_level;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
@@ -59,15 +60,19 @@ static int __init_cache_level(unsigned int cpu)
leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
}
- of_level = of_find_last_cache_level(cpu);
- if (level < of_level) {
+ if (acpi_disabled)
+ fw_level = of_find_last_cache_level(cpu);
+ else
+ fw_level = acpi_find_last_cache_level(cpu);
+
+ if (level < fw_level) {
/*
* some external caches not specified in CLIDR_EL1
* the information may be available in the device tree
* only unified external caches are considered here
*/
- leaves += (of_level - level);
- level = of_level;
+ leaves += (fw_level - level);
+ level = fw_level;
}
this_cpu_ci->num_levels = level;
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index e4a1182deff7..1d2b6d768efe 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -16,6 +16,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
+#include <linux/psci.h>
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
@@ -232,6 +234,178 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
}
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+#ifdef CONFIG_ARM64_SSBD
+DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+
+static const struct ssbd_options {
+ const char *str;
+ int state;
+} ssbd_options[] = {
+ { "force-on", ARM64_SSBD_FORCE_ENABLE, },
+ { "force-off", ARM64_SSBD_FORCE_DISABLE, },
+ { "kernel", ARM64_SSBD_KERNEL, },
+};
+
+static int __init ssbd_cfg(char *buf)
+{
+ int i;
+
+ if (!buf || !buf[0])
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
+ int len = strlen(ssbd_options[i].str);
+
+ if (strncmp(buf, ssbd_options[i].str, len))
+ continue;
+
+ ssbd_state = ssbd_options[i].state;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+early_param("ssbd", ssbd_cfg);
+
+void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+{
+ u32 insn;
+
+ BUG_ON(nr_inst != 1);
+
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ insn = aarch64_insn_get_hvc_value();
+ break;
+ case PSCI_CONDUIT_SMC:
+ insn = aarch64_insn_get_smc_value();
+ break;
+ default:
+ return;
+ }
+
+ *updptr = cpu_to_le32(insn);
+}
+
+void __init arm64_enable_wa2_handling(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+{
+ BUG_ON(nr_inst != 1);
+ /*
+ * Only allow mitigation on EL1 entry/exit and guest
+ * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
+ * be flipped.
+ */
+ if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
+ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+void arm64_set_ssbd_mitigation(bool state)
+{
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ struct arm_smccc_res res;
+ bool required = true;
+ s32 val;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
+ ssbd_state = ARM64_SSBD_UNKNOWN;
+ return false;
+ }
+
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+ break;
+
+ default:
+ ssbd_state = ARM64_SSBD_UNKNOWN;
+ return false;
+ }
+
+ val = (s32)res.a0;
+
+ switch (val) {
+ case SMCCC_RET_NOT_SUPPORTED:
+ ssbd_state = ARM64_SSBD_UNKNOWN;
+ return false;
+
+ case SMCCC_RET_NOT_REQUIRED:
+ pr_info_once("%s mitigation not required\n", entry->desc);
+ ssbd_state = ARM64_SSBD_MITIGATED;
+ return false;
+
+ case SMCCC_RET_SUCCESS:
+ required = true;
+ break;
+
+ case 1: /* Mitigation not required on this CPU */
+ required = false;
+ break;
+
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ switch (ssbd_state) {
+ case ARM64_SSBD_FORCE_DISABLE:
+ pr_info_once("%s disabled from command-line\n", entry->desc);
+ arm64_set_ssbd_mitigation(false);
+ required = false;
+ break;
+
+ case ARM64_SSBD_KERNEL:
+ if (required) {
+ __this_cpu_write(arm64_ssbd_callback_required, 1);
+ arm64_set_ssbd_mitigation(true);
+ }
+ break;
+
+ case ARM64_SSBD_FORCE_ENABLE:
+ pr_info_once("%s forced from command-line\n", entry->desc);
+ arm64_set_ssbd_mitigation(true);
+ required = true;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return required;
+}
+#endif /* CONFIG_ARM64_SSBD */
+
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \
.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
@@ -488,6 +662,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
},
#endif
+#ifdef CONFIG_ARM64_SSBD
+ {
+ .desc = "Speculative Store Bypass Disable",
+ .capability = ARM64_SSBD,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_ssbd_mitigation,
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 9d1b06d67c53..d2856b129097 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1606,7 +1606,6 @@ static void __init setup_system_capabilities(void)
void __init setup_cpu_features(void)
{
u32 cwg;
- int cls;
setup_system_capabilities();
mark_const_caps_ready();
@@ -1619,6 +1618,7 @@ void __init setup_cpu_features(void)
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
sve_setup();
+ minsigstksz_setup();
/* Advertise that we have computed the system capabilities */
set_sys_caps_initialised();
@@ -1627,13 +1627,9 @@ void __init setup_cpu_features(void)
* Check for sane CTR_EL0.CWG value.
*/
cwg = cache_type_cwg();
- cls = cache_line_size();
if (!cwg)
- pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
- cls);
- if (L1_CACHE_BYTES < cls)
- pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
- L1_CACHE_BYTES, cls);
+ pr_warn("No Cache Writeback Granule information, assuming %d\n",
+ ARCH_DMA_MINALIGN);
}
static bool __maybe_unused
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 73f17bffcd23..12d4958e6429 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -49,7 +49,7 @@ ENTRY(sve_save_state)
ENDPROC(sve_save_state)
ENTRY(sve_load_state)
- sve_load 0, x1, x2, 3
+ sve_load 0, x1, x2, 3, x4
ret
ENDPROC(sve_load_state)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index ec2ee720e33e..28ad8799406f 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -18,6 +18,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/linkage.h>
@@ -137,6 +138,25 @@ alternative_else_nop_endif
add \dst, \dst, #(\sym - .entry.tramp.text)
.endm
+ // This macro corrupts x0-x3. It is the caller's duty
+ // to save/restore them if required.
+ .macro apply_ssbd, state, targ, tmp1, tmp2
+#ifdef CONFIG_ARM64_SSBD
+alternative_cb arm64_enable_wa2_handling
+ b \targ
+alternative_cb_end
+ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
+ cbz \tmp2, \targ
+ ldr \tmp2, [tsk, #TSK_TI_FLAGS]
+ tbnz \tmp2, #TIF_SSBD, \targ
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ mov w1, #\state
+alternative_cb arm64_update_smccc_conduit
+ nop // Patched to SMC/HVC #0
+alternative_cb_end
+#endif
+ .endm
+
.macro kernel_entry, el, regsize = 64
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
@@ -163,6 +183,14 @@ alternative_else_nop_endif
ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
+ apply_ssbd 1, 1f, x22, x23
+
+#ifdef CONFIG_ARM64_SSBD
+ ldp x0, x1, [sp, #16 * 0]
+ ldp x2, x3, [sp, #16 * 1]
+#endif
+1:
+
mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
@@ -303,6 +331,8 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
+ apply_ssbd 0, 5f, x0, x1
+5:
.endif
msr elr_el1, x21 // set up the return data
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 4bcdd0318729..3b527ae46e49 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -31,7 +31,6 @@
#include <linux/percpu.h>
#include <linux/prctl.h>
#include <linux/preempt.h>
-#include <linux/prctl.h>
#include <linux/ptrace.h>
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
@@ -129,7 +128,7 @@ static int sve_default_vl = -1;
#ifdef CONFIG_ARM64_SVE
/* Maximum supported vector length across all CPUs (initially poisoned) */
-int __ro_after_init sve_max_vl = -1;
+int __ro_after_init sve_max_vl = SVE_VL_MIN;
/* Set of available vector lengths, as vq_to_bit(vq): */
static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
static void __percpu *efi_sve_state;
@@ -360,22 +359,13 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
return ret;
/* Writing -1 has the special meaning "set to max": */
- if (vl == -1) {
- /* Fail safe if sve_max_vl wasn't initialised */
- if (WARN_ON(!sve_vl_valid(sve_max_vl)))
- vl = SVE_VL_MIN;
- else
- vl = sve_max_vl;
-
- goto chosen;
- }
+ if (vl == -1)
+ vl = sve_max_vl;
if (!sve_vl_valid(vl))
return -EINVAL;
- vl = find_supported_vector_length(vl);
-chosen:
- sve_default_vl = vl;
+ sve_default_vl = find_supported_vector_length(vl);
return 0;
}
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 1ec5f28c39fc..6b2686d54411 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -313,6 +313,17 @@ int swsusp_arch_suspend(void)
sleep_cpu = -EINVAL;
__cpu_suspend_exit();
+
+ /*
+ * Just in case the boot kernel did turn the SSBD
+ * mitigation off behind our back, let's set the state
+ * to what we expect it to be.
+ */
+ switch (arm64_get_ssbd_state()) {
+ case ARM64_SSBD_FORCE_ENABLE:
+ case ARM64_SSBD_KERNEL:
+ arm64_set_ssbd_mitigation(true);
+ }
}
local_daif_restore(flags);
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 85a251b6dfa8..33147aacdafd 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -670,11 +670,10 @@ static void armv8pmu_disable_event(struct perf_event *event)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
+static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
u32 pmovsr;
struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 7ff81fed46e1..bd732644c2f6 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -766,9 +766,6 @@ static void sve_init_header_from_task(struct user_sve_header *header,
vq = sve_vq_from_vl(header->vl);
header->max_vl = sve_max_vl;
- if (WARN_ON(!sve_vl_valid(sve_max_vl)))
- header->max_vl = header->vl;
-
header->size = SVE_PT_SIZE(vq, header->flags);
header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
SVE_PT_REGS_SVE);
@@ -1046,8 +1043,6 @@ static const struct user_regset_view user_aarch64_view = {
};
#ifdef CONFIG_COMPAT
-#include <linux/compat.h>
-
enum compat_regset {
REGSET_COMPAT_GPR,
REGSET_COMPAT_VFP,
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 154b7d30145d..511af13e8d8f 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/cache.h>
#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -570,8 +571,15 @@ badframe:
return 0;
}
-/* Determine the layout of optional records in the signal frame */
-static int setup_sigframe_layout(struct rt_sigframe_user_layout *user)
+/*
+ * Determine the layout of optional records in the signal frame
+ *
+ * add_all: if true, lays out the biggest possible signal frame for
+ * this task; otherwise, generates a layout for the current state
+ * of the task.
+ */
+static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
+ bool add_all)
{
int err;
@@ -581,7 +589,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user)
return err;
/* fault information, if valid */
- if (current->thread.fault_code) {
+ if (add_all || current->thread.fault_code) {
err = sigframe_alloc(user, &user->esr_offset,
sizeof(struct esr_context));
if (err)
@@ -591,8 +599,14 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user)
if (system_supports_sve()) {
unsigned int vq = 0;
- if (test_thread_flag(TIF_SVE))
- vq = sve_vq_from_vl(current->thread.sve_vl);
+ if (add_all || test_thread_flag(TIF_SVE)) {
+ int vl = sve_max_vl;
+
+ if (!add_all)
+ vl = current->thread.sve_vl;
+
+ vq = sve_vq_from_vl(vl);
+ }
err = sigframe_alloc(user, &user->sve_offset,
SVE_SIG_CONTEXT_SIZE(vq));
@@ -603,7 +617,6 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user)
return sigframe_alloc_end(user);
}
-
static int setup_sigframe(struct rt_sigframe_user_layout *user,
struct pt_regs *regs, sigset_t *set)
{
@@ -701,7 +714,7 @@ static int get_sigframe(struct rt_sigframe_user_layout *user,
int err;
init_user_layout(user);
- err = setup_sigframe_layout(user);
+ err = setup_sigframe_layout(user, false);
if (err)
return err;
@@ -830,11 +843,12 @@ static void do_signal(struct pt_regs *regs)
unsigned long continue_addr = 0, restart_addr = 0;
int retval = 0;
struct ksignal ksig;
+ bool syscall = in_syscall(regs);
/*
* If we were from a system call, check for system call restarting...
*/
- if (in_syscall(regs)) {
+ if (syscall) {
continue_addr = regs->pc;
restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
retval = regs->regs[0];
@@ -886,7 +900,7 @@ static void do_signal(struct pt_regs *regs)
* Handle restarting a different system call. As above, if a debugger
* has chosen to restart at a different PC, ignore the restart.
*/
- if (in_syscall(regs) && regs->pc == restart_addr) {
+ if (syscall && regs->pc == restart_addr) {
if (retval == -ERESTART_RESTARTBLOCK)
setup_restart_syscall(regs);
user_rewind_single_step(current);
@@ -936,3 +950,28 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
thread_flags = READ_ONCE(current_thread_info()->flags);
} while (thread_flags & _TIF_WORK_MASK);
}
+
+unsigned long __ro_after_init signal_minsigstksz;
+
+/*
+ * Determine the stack space required for guaranteed signal devliery.
+ * This function is used to populate AT_MINSIGSTKSZ at process startup.
+ * cpufeatures setup is assumed to be complete.
+ */
+void __init minsigstksz_setup(void)
+{
+ struct rt_sigframe_user_layout user;
+
+ init_user_layout(&user);
+
+ /*
+ * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
+ * be big enough, but it's our best guess:
+ */
+ if (WARN_ON(setup_sigframe_layout(&user, true)))
+ return;
+
+ signal_minsigstksz = sigframe_size(&user) +
+ round_up(sizeof(struct frame_record), 16) +
+ 16; /* max alignment padding */
+}
diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
new file mode 100644
index 000000000000..3432e5ef9f41
--- /dev/null
+++ b/arch/arm64/kernel/ssbd.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+
+#include <asm/cpufeature.h>
+
+/*
+ * prctl interface for SSBD
+ * FIXME: Drop the below ifdefery once merged in 4.18.
+ */
+#ifdef PR_SPEC_STORE_BYPASS
+static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+ int state = arm64_get_ssbd_state();
+
+ /* Unsupported */
+ if (state == ARM64_SSBD_UNKNOWN)
+ return -EINVAL;
+
+ /* Treat the unaffected/mitigated state separately */
+ if (state == ARM64_SSBD_MITIGATED) {
+ switch (ctrl) {
+ case PR_SPEC_ENABLE:
+ return -EPERM;
+ case PR_SPEC_DISABLE:
+ case PR_SPEC_FORCE_DISABLE:
+ return 0;
+ }
+ }
+
+ /*
+ * Things are a bit backward here: the arm64 internal API
+ * *enables the mitigation* when the userspace API *disables
+ * speculation*. So much fun.
+ */
+ switch (ctrl) {
+ case PR_SPEC_ENABLE:
+ /* If speculation is force disabled, enable is not allowed */
+ if (state == ARM64_SSBD_FORCE_ENABLE ||
+ task_spec_ssb_force_disable(task))
+ return -EPERM;
+ task_clear_spec_ssb_disable(task);
+ clear_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ case PR_SPEC_DISABLE:
+ if (state == ARM64_SSBD_FORCE_DISABLE)
+ return -EPERM;
+ task_set_spec_ssb_disable(task);
+ set_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ if (state == ARM64_SSBD_FORCE_DISABLE)
+ return -EPERM;
+ task_set_spec_ssb_disable(task);
+ task_set_spec_ssb_force_disable(task);
+ set_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssbd_prctl_set(task, ctrl);
+ default:
+ return -ENODEV;
+ }
+}
+
+static int ssbd_prctl_get(struct task_struct *task)
+{
+ switch (arm64_get_ssbd_state()) {
+ case ARM64_SSBD_UNKNOWN:
+ return -EINVAL;
+ case ARM64_SSBD_FORCE_ENABLE:
+ return PR_SPEC_DISABLE;
+ case ARM64_SSBD_KERNEL:
+ if (task_spec_ssb_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+ if (task_spec_ssb_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+ case ARM64_SSBD_FORCE_DISABLE:
+ return PR_SPEC_ENABLE;
+ default:
+ return PR_SPEC_NOT_AFFECTED;
+ }
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssbd_prctl_get(task);
+ default:
+ return -ENODEV;
+ }
+}
+#endif /* PR_SPEC_STORE_BYPASS */
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index a307b9e13392..70c283368b64 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -62,6 +62,14 @@ void notrace __cpu_suspend_exit(void)
*/
if (hw_breakpoint_restore)
hw_breakpoint_restore(cpu);
+
+ /*
+ * On resume, firmware implementing dynamic mitigation will
+ * have turned the mitigation on. If the user has forcefully
+ * disabled it, make sure their wishes are obeyed.
+ */
+ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
+ arm64_set_ssbd_mitigation(false);
}
/*
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 21868530018e..f845a8617812 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -11,7 +11,9 @@
* for more details.
*/
+#include <linux/acpi.h>
#include <linux/arch_topology.h>
+#include <linux/cacheinfo.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
@@ -22,6 +24,7 @@
#include <linux/sched.h>
#include <linux/sched/topology.h>
#include <linux/slab.h>
+#include <linux/smp.h>
#include <linux/string.h>
#include <asm/cpu.h>
@@ -47,7 +50,7 @@ static int __init get_cpu_for_node(struct device_node *node)
return cpu;
}
-static int __init parse_core(struct device_node *core, int cluster_id,
+static int __init parse_core(struct device_node *core, int package_id,
int core_id)
{
char name[10];
@@ -63,7 +66,7 @@ static int __init parse_core(struct device_node *core, int cluster_id,
leaf = false;
cpu = get_cpu_for_node(t);
if (cpu >= 0) {
- cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].package_id = package_id;
cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i;
} else {
@@ -85,7 +88,7 @@ static int __init parse_core(struct device_node *core, int cluster_id,
return -EINVAL;
}
- cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].package_id = package_id;
cpu_topology[cpu].core_id = core_id;
} else if (leaf) {
pr_err("%pOF: Can't get CPU for leaf core\n", core);
@@ -101,7 +104,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
bool leaf = true;
bool has_cores = false;
struct device_node *c;
- static int cluster_id __initdata;
+ static int package_id __initdata;
int core_id = 0;
int i, ret;
@@ -140,7 +143,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
}
if (leaf) {
- ret = parse_core(c, cluster_id, core_id++);
+ ret = parse_core(c, package_id, core_id++);
} else {
pr_err("%pOF: Non-leaf cluster with core %s\n",
cluster, name);
@@ -158,7 +161,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
pr_warn("%pOF: empty cluster\n", cluster);
if (leaf)
- cluster_id++;
+ package_id++;
return 0;
}
@@ -194,7 +197,7 @@ static int __init parse_dt_topology(void)
* only mark cores described in the DT as possible.
*/
for_each_possible_cpu(cpu)
- if (cpu_topology[cpu].cluster_id == -1)
+ if (cpu_topology[cpu].package_id == -1)
ret = -EINVAL;
out_map:
@@ -212,7 +215,14 @@ EXPORT_SYMBOL_GPL(cpu_topology);
const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &cpu_topology[cpu].core_sibling;
+ const cpumask_t *core_mask = &cpu_topology[cpu].core_sibling;
+
+ if (cpu_topology[cpu].llc_id != -1) {
+ if (cpumask_subset(&cpu_topology[cpu].llc_siblings, core_mask))
+ core_mask = &cpu_topology[cpu].llc_siblings;
+ }
+
+ return core_mask;
}
static void update_siblings_masks(unsigned int cpuid)
@@ -224,7 +234,12 @@ static void update_siblings_masks(unsigned int cpuid)
for_each_possible_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
- if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
+ if (cpuid_topo->llc_id == cpu_topo->llc_id) {
+ cpumask_set_cpu(cpu, &cpuid_topo->llc_siblings);
+ cpumask_set_cpu(cpuid, &cpu_topo->llc_siblings);
+ }
+
+ if (cpuid_topo->package_id != cpu_topo->package_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
@@ -245,7 +260,7 @@ void store_cpu_topology(unsigned int cpuid)
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
u64 mpidr;
- if (cpuid_topo->cluster_id != -1)
+ if (cpuid_topo->package_id != -1)
goto topology_populated;
mpidr = read_cpuid_mpidr();
@@ -259,19 +274,19 @@ void store_cpu_topology(unsigned int cpuid)
/* Multiprocessor system : Multi-threads per core */
cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
+ cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
} else {
/* Multiprocessor system : Single-thread per core */
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
+ cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
}
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
- cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
+ cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
cpuid_topo->thread_id, mpidr);
topology_populated:
@@ -287,7 +302,11 @@ static void __init reset_cpu_topology(void)
cpu_topo->thread_id = -1;
cpu_topo->core_id = 0;
- cpu_topo->cluster_id = -1;
+ cpu_topo->package_id = -1;
+
+ cpu_topo->llc_id = -1;
+ cpumask_clear(&cpu_topo->llc_siblings);
+ cpumask_set_cpu(cpu, &cpu_topo->llc_siblings);
cpumask_clear(&cpu_topo->core_sibling);
cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
@@ -296,6 +315,59 @@ static void __init reset_cpu_topology(void)
}
}
+#ifdef CONFIG_ACPI
+/*
+ * Propagate the topology information of the processor_topology_node tree to the
+ * cpu_topology array.
+ */
+static int __init parse_acpi_topology(void)
+{
+ bool is_threaded;
+ int cpu, topology_id;
+
+ is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
+
+ for_each_possible_cpu(cpu) {
+ int i, cache_id;
+
+ topology_id = find_acpi_cpu_topology(cpu, 0);
+ if (topology_id < 0)
+ return topology_id;
+
+ if (is_threaded) {
+ cpu_topology[cpu].thread_id = topology_id;
+ topology_id = find_acpi_cpu_topology(cpu, 1);
+ cpu_topology[cpu].core_id = topology_id;
+ } else {
+ cpu_topology[cpu].thread_id = -1;
+ cpu_topology[cpu].core_id = topology_id;
+ }
+ topology_id = find_acpi_cpu_topology_package(cpu);
+ cpu_topology[cpu].package_id = topology_id;
+
+ i = acpi_find_last_cache_level(cpu);
+
+ if (i > 0) {
+ /*
+ * this is the only part of cpu_topology that has
+ * a direct relationship with the cache topology
+ */
+ cache_id = find_acpi_cpu_cache_topology(cpu, i);
+ if (cache_id > 0)
+ cpu_topology[cpu].llc_id = cache_id;
+ }
+ }
+
+ return 0;
+}
+
+#else
+static inline int __init parse_acpi_topology(void)
+{
+ return -EINVAL;
+}
+#endif
+
void __init init_cpu_topology(void)
{
reset_cpu_topology();
@@ -304,6 +376,8 @@ void __init init_cpu_topology(void)
* Discard anything that was parsed if we hit an error so we
* don't use partial information.
*/
- if (of_have_populated_dt() && parse_dt_topology())
+ if (!acpi_disabled && parse_acpi_topology())
+ reset_cpu_topology();
+ else if (of_have_populated_dt() && parse_dt_topology())
reset_cpu_topology();
}
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 0221aca6493d..605d1b60469c 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -34,25 +34,25 @@ jiffies = jiffies_64;
* 4 KB (see related ASSERT() below) \
*/ \
. = ALIGN(SZ_4K); \
- VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
+ __hyp_idmap_text_start = .; \
*(.hyp.idmap.text) \
- VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
- VMLINUX_SYMBOL(__hyp_text_start) = .; \
+ __hyp_idmap_text_end = .; \
+ __hyp_text_start = .; \
*(.hyp.text) \
- VMLINUX_SYMBOL(__hyp_text_end) = .;
+ __hyp_text_end = .;
#define IDMAP_TEXT \
. = ALIGN(SZ_4K); \
- VMLINUX_SYMBOL(__idmap_text_start) = .; \
+ __idmap_text_start = .; \
*(.idmap.text) \
- VMLINUX_SYMBOL(__idmap_text_end) = .;
+ __idmap_text_end = .;
#ifdef CONFIG_HIBERNATION
#define HIBERNATE_TEXT \
. = ALIGN(SZ_4K); \
- VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
+ __hibernate_exit_text_start = .; \
*(.hibernate_exit.text) \
- VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
+ __hibernate_exit_text_end = .;
#else
#define HIBERNATE_TEXT
#endif
@@ -60,10 +60,10 @@ jiffies = jiffies_64;
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
#define TRAMP_TEXT \
. = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \
+ __entry_tramp_text_start = .; \
*(.entry.tramp.text) \
. = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
+ __entry_tramp_text_end = .;
#else
#define TRAMP_TEXT
#endif
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index bffece27b5c1..05d836979032 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -106,8 +106,44 @@ el1_hvc_guest:
*/
ldr x1, [sp] // Guest's x0
eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
+ cbz w1, wa_epilogue
+
+ /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
+ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
+ ARM_SMCCC_ARCH_WORKAROUND_2)
cbnz w1, el1_trap
- mov x0, x1
+
+#ifdef CONFIG_ARM64_SSBD
+alternative_cb arm64_enable_wa2_handling
+ b wa2_end
+alternative_cb_end
+ get_vcpu_ptr x2, x0
+ ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
+
+ // Sanitize the argument and update the guest flags
+ ldr x1, [sp, #8] // Guest's x1
+ clz w1, w1 // Murphy's device:
+ lsr w1, w1, #5 // w1 = !!w1 without using
+ eor w1, w1, #1 // the flags...
+ bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
+ str x0, [x2, #VCPU_WORKAROUND_FLAGS]
+
+ /* Check that we actually need to perform the call */
+ hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
+ cbz x0, wa2_end
+
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ smc #0
+
+ /* Don't leak data from the SMC call */
+ mov x3, xzr
+wa2_end:
+ mov x2, xzr
+ mov x1, xzr
+#endif
+
+wa_epilogue:
+ mov x0, xzr
add sp, sp, #16
eret
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index d9645236e474..c50cedc447f1 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -15,6 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>
@@ -389,6 +390,39 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
+static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
+{
+ if (!cpus_have_const_cap(ARM64_SSBD))
+ return false;
+
+ return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
+}
+
+static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ARM64_SSBD
+ /*
+ * The host runs with the workaround always present. If the
+ * guest wants it disabled, so be it...
+ */
+ if (__needs_ssbd_off(vcpu) &&
+ __hyp_this_cpu_read(arm64_ssbd_callback_required))
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
+#endif
+}
+
+static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ARM64_SSBD
+ /*
+ * If the guest has disabled the workaround, bring it back on.
+ */
+ if (__needs_ssbd_off(vcpu) &&
+ __hyp_this_cpu_read(arm64_ssbd_callback_required))
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
+#endif
+}
+
/* Switch to the guest for VHE systems running in EL2 */
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
@@ -409,6 +443,8 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
+ __set_guest_arch_workaround_state(vcpu);
+
do {
/* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt);
@@ -416,6 +452,8 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
+ __set_host_arch_workaround_state(vcpu);
+
fp_enabled = fpsimd_enabled_vhe();
sysreg_save_guest_state_vhe(guest_ctxt);
@@ -465,6 +503,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
__sysreg_restore_state_nvhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
+ __set_guest_arch_workaround_state(vcpu);
+
do {
/* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt);
@@ -472,6 +512,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
+ __set_host_arch_workaround_state(vcpu);
+
fp_enabled = __fpsimd_enabled_nvhe();
__sysreg_save_state_nvhe(guest_ctxt);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 3256b9228e75..a74311beda35 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -122,6 +122,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset PMU */
kvm_pmu_vcpu_reset(vcpu);
+ /* Default workaround setup is enabled (if supported) */
+ if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
+ vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
+
/* Reset timer */
return kvm_timer_vcpu_reset(vcpu);
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index db01f2709842..49e217ac7e1e 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -504,6 +504,11 @@ static int __init arm64_dma_init(void)
max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
swiotlb = 1;
+ WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
+ TAINT_CPU_OUT_OF_SPEC,
+ "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
+ ARCH_DMA_MINALIGN, cache_line_size());
+
return atomic_pool_init();
}
arch_initcall(arm64_dma_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 576f15153080..b8eecc7b9531 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -235,8 +235,9 @@ static bool is_el1_instruction_abort(unsigned int esr)
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
-static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
- unsigned long addr)
+static inline bool is_el1_permission_fault(unsigned int esr,
+ struct pt_regs *regs,
+ unsigned long addr)
{
unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
@@ -254,6 +255,22 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
return false;
}
+static void die_kernel_fault(const char *msg, unsigned long addr,
+ unsigned int esr, struct pt_regs *regs)
+{
+ bust_spinlocks(1);
+
+ pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg,
+ addr);
+
+ mem_abort_decode(esr);
+
+ show_pte(addr);
+ die("Oops", regs, esr);
+ bust_spinlocks(0);
+ do_exit(SIGKILL);
+}
+
static void __do_kernel_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -266,9 +283,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
return;
- bust_spinlocks(1);
-
- if (is_permission_fault(esr, regs, addr)) {
+ if (is_el1_permission_fault(esr, regs, addr)) {
if (esr & ESR_ELx_WNR)
msg = "write to read-only memory";
else
@@ -279,15 +294,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
msg = "paging request";
}
- pr_alert("Unable to handle kernel %s at virtual address %08lx\n", msg,
- addr);
-
- mem_abort_decode(esr);
-
- show_pte(addr);
- die("Oops", regs, esr);
- bust_spinlocks(0);
- do_exit(SIGKILL);
+ die_kernel_fault(msg, addr, esr, regs);
}
static void __do_user_fault(struct siginfo *info, unsigned int esr)
@@ -447,16 +454,19 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) {
+ if (addr < TASK_SIZE && is_el1_permission_fault(esr, regs, addr)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
- die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
+ die_kernel_fault("access to user memory with fs=KERNEL_DS",
+ addr, esr, regs);
if (is_el1_instruction_abort(esr))
- die("Attempting to execute userspace memory", regs, esr);
+ die_kernel_fault("execution of user memory",
+ addr, esr, regs);
if (!search_exception_tables(regs->pc))
- die("Accessing user space memory outside uaccess.h routines", regs, esr);
+ die_kernel_fault("access to user memory outside uaccess routines",
+ addr, esr, regs);
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 66cf3a5a2f83..859c19828dd4 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -61,10 +61,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
#define HAVE_PCI_LEGACY 1
-extern void pcibios_claim_one_bus(struct pci_bus *b);
-
-extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
-
extern void pcibios_resource_survey(void);
struct file;
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 161f9758c631..f34346d56095 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -915,67 +915,6 @@ void __init pcibios_resource_survey(void)
pci_assign_unassigned_resources();
}
-/* This is used by the PCI hotplug driver to allocate resource
- * of newly plugged busses. We can try to consolidate with the
- * rest of the code later, for now, keep it as-is as our main
- * resource allocation function doesn't deal with sub-trees yet.
- */
-void pcibios_claim_one_bus(struct pci_bus *bus)
-{
- struct pci_dev *dev;
- struct pci_bus *child_bus;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- int i;
-
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *r = &dev->resource[i];
-
- if (r->parent || !r->start || !r->flags)
- continue;
-
- pr_debug("PCI: Claiming %s: ", pci_name(dev));
- pr_debug("Resource %d: %016llx..%016llx [%x]\n",
- i, (unsigned long long)r->start,
- (unsigned long long)r->end,
- (unsigned int)r->flags);
-
- if (pci_claim_resource(dev, i) == 0)
- continue;
-
- pci_claim_bridge_resource(dev, i);
- }
- }
-
- list_for_each_entry(child_bus, &bus->children, node)
- pcibios_claim_one_bus(child_bus);
-}
-EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
-
-
-/* pcibios_finish_adding_to_bus
- *
- * This is to be called by the hotplug code after devices have been
- * added to a bus, this include calling it for a PHB that is just
- * being added
- */
-void pcibios_finish_adding_to_bus(struct pci_bus *bus)
-{
- pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
- pci_domain_nr(bus), bus->number);
-
- /* Allocate bus and devices resources */
- pcibios_allocate_bus_resources(bus);
- pcibios_claim_one_bus(bus);
-
- /* Add new devices to global lists. Register in proc, sysfs. */
- pci_bus_add_devices(bus);
-
- /* Fixup EEH */
- /* eeh_add_device_tree_late(bus); */
-}
-EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
-
static void pcibios_setup_phb_resources(struct pci_controller *hose,
struct list_head *resources)
{
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 0c65c38e05d6..f1e92bf743c2 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -263,9 +263,8 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask)
(!(r->flags & IORESOURCE_ROM_ENABLE)))
continue;
if (!r->start && r->end) {
- printk(KERN_ERR "PCI: Device %s not available "
- "because of resource collisions\n",
- pci_name(dev));
+ pci_err(dev,
+ "can't enable device: resource collisions\n");
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
@@ -274,8 +273,7 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
- printk("PCI: Enabling device %s (%04x -> %04x)\n",
- pci_name(dev), old_cmd, cmd);
+ pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 076fe3094856..eaba5920234d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -135,6 +135,7 @@ config PPC
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if PPC64
+ select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select ARCH_HAS_SG_CHAIN
@@ -219,6 +220,7 @@ config PPC
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_IRQ_TIME_ACCOUNTING
+ select HAVE_RSEQ
select IOMMU_HELPER if PPC64
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 42fe7c2ff2df..63cee159022b 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -335,9 +335,6 @@ extern unsigned long pci_io_base;
/* Advertise special mapping type for AGP */
#define HAVE_PAGE_AGP
-/* Advertise support for _PAGE_SPECIAL */
-#define __HAVE_ARCH_PTE_SPECIAL
-
#ifndef __ASSEMBLY__
/*
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index 050b0d775324..bef56141a549 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -208,9 +208,6 @@ static inline bool pte_user(pte_t pte)
#define PAGE_AGP (PAGE_KERNEL_NC)
#define HAVE_PAGE_AGP
-/* Advertise support for _PAGE_SPECIAL */
-#define __HAVE_ARCH_PTE_SPECIAL
-
#ifndef _PAGE_READ
/* if not defined, we should not find _PAGE_WRITE too */
#define _PAGE_READ 0
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 79a3b47e4839..cfcf6a874cfa 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -392,3 +392,4 @@ SYSCALL(statx)
SYSCALL(pkey_alloc)
SYSCALL(pkey_free)
SYSCALL(pkey_mprotect)
+SYSCALL(rseq)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index daf1ba97a00c..1e9708632dce 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 387
+#define NR_syscalls 388
#define __NR__exit __NR_exit
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 389c36fd8299..ac5ba55066dd 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -398,5 +398,6 @@
#define __NR_pkey_alloc 384
#define __NR_pkey_free 385
#define __NR_pkey_mprotect 386
+#define __NR_rseq 387
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index eb8d01bae8c6..973577f2141c 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -365,6 +365,13 @@ syscall_dotrace_cont:
blrl /* Call handler */
.globl ret_from_syscall
ret_from_syscall:
+#ifdef CONFIG_DEBUG_RSEQ
+ /* Check whether the syscall is issued inside a restartable sequence */
+ stw r3,GPR3(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl rseq_syscall
+ lwz r3,GPR3(r1)
+#endif
mr r6,r3
CURRENT_THREAD_INFO(r12, r1)
/* disable interrupts so current_thread_info()->flags can't change */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index b10e01021214..729e9ef4d3bb 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -194,6 +194,14 @@ system_call: /* label this so stack traces look sane */
.Lsyscall_exit:
std r3,RESULT(r1)
+
+#ifdef CONFIG_DEBUG_RSEQ
+ /* Check whether the syscall is issued inside a restartable sequence */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl rseq_syscall
+ ld r3,RESULT(r1)
+#endif
+
CURRENT_THREAD_INFO(r12, r1)
ld r8,_MSR(r1)
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index fb932f1202c7..17fe4339ba59 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -134,6 +134,8 @@ static void do_signal(struct task_struct *tsk)
/* Re-enable the breakpoints for the signal stack */
thread_change_pc(tsk, tsk->thread.regs);
+ rseq_signal_deliver(tsk->thread.regs);
+
if (is32) {
if (ksig.ka.sa.sa_flags & SA_SIGINFO)
ret = handle_rt_signal32(&ksig, oldset, tsk);
@@ -168,6 +170,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(regs);
}
user_enter();
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 274bc064c41f..17f19e67993b 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -42,6 +42,7 @@ config RISCV
select THREAD_INFO_IN_TASK
select RISCV_TIMER
select GENERIC_IRQ_MULTI_HANDLER
+ select ARCH_HAS_PTE_SPECIAL
config MMU
def_bool y
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index 997ddbb1d370..2fa2942be221 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -42,7 +42,4 @@
_PAGE_WRITE | _PAGE_EXEC | \
_PAGE_USER | _PAGE_GLOBAL))
-/* Advertise support for _PAGE_SPECIAL */
-#define __HAVE_ARCH_PTE_SPECIAL
-
#endif /* _ASM_RISCV_PGTABLE_BITS_H */
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index 10ed2749e246..0bc86e5f8f3f 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -20,7 +20,6 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
struct device_node *node,
enum cache_type type, unsigned int level)
{
- this_leaf->of_node = node;
this_leaf->level = level;
this_leaf->type = type;
/* not a sector cache */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b7deee7e738f..baed39772c84 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -65,6 +65,7 @@ config S390
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV
+ select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 2d24d33bf188..9809694e1389 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -171,7 +171,6 @@ static inline int is_module_addr(void *addr)
#define _PAGE_WRITE 0x020 /* SW pte write bit */
#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
-#define __HAVE_ARCH_PTE_SPECIAL
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 562f72955956..84bd6329a88d 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -190,14 +190,15 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list,
struct page, lru);
- mask = atomic_read(&page->_mapcount);
+ mask = atomic_read(&page->_refcount) >> 24;
mask = (mask | (mask >> 4)) & 3;
if (mask != 3) {
table = (unsigned long *) page_to_phys(page);
bit = mask & 1; /* =1 -> second 2K */
if (bit)
table += PTRS_PER_PTE;
- atomic_xor_bits(&page->_mapcount, 1U << bit);
+ atomic_xor_bits(&page->_refcount,
+ 1U << (bit + 24));
list_del(&page->lru);
}
}
@@ -218,12 +219,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
table = (unsigned long *) page_to_phys(page);
if (mm_alloc_pgste(mm)) {
/* Return 4K page table with PGSTEs */
- atomic_set(&page->_mapcount, 3);
+ atomic_xor_bits(&page->_refcount, 3 << 24);
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
} else {
/* Return the first 2K fragment of the page */
- atomic_set(&page->_mapcount, 1);
+ atomic_xor_bits(&page->_refcount, 1 << 24);
memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
spin_lock_bh(&mm->context.lock);
list_add(&page->lru, &mm->context.pgtable_list);
@@ -242,7 +243,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
/* Free 2K page table fragment of a 4K page */
bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.lock);
- mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
+ mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
+ mask >>= 24;
if (mask & 3)
list_add(&page->lru, &mm->context.pgtable_list);
else
@@ -253,7 +255,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
}
pgtable_page_dtor(page);
- atomic_set(&page->_mapcount, -1);
__free_page(page);
}
@@ -274,7 +275,8 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
}
bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.lock);
- mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
+ mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
+ mask >>= 24;
if (mask & 3)
list_add_tail(&page->lru, &mm->context.pgtable_list);
else
@@ -296,12 +298,13 @@ static void __tlb_remove_table(void *_table)
break;
case 1: /* lower 2K of a 4K page table */
case 2: /* higher 2K of a 4K page table */
- if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
+ mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
+ mask >>= 24;
+ if (mask != 0)
break;
/* fallthrough */
case 3: /* 4K page table with pgstes */
pgtable_page_dtor(page);
- atomic_set(&page->_mapcount, -1);
__free_page(page);
break;
}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index ae619d54018c..4d61a085982b 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config SUPERH
def_bool y
+ select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 89c513a982fc..f6abfe2bca93 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -156,8 +156,6 @@ extern void page_table_range_init(unsigned long start, unsigned long end,
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#define __HAVE_ARCH_PTE_SPECIAL
-
#include <asm-generic/pgtable.h>
#endif /* __ASM_SH_PGTABLE_H */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index b42ba888217d..9a2b8877f174 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -88,6 +88,7 @@ config SPARC64
select ARCH_USE_QUEUED_SPINLOCKS
select GENERIC_TIME_VSYSCALL
select ARCH_CLOCKSOURCE_DATA
+ select ARCH_HAS_PTE_SPECIAL
config ARCH_DEFCONFIG
string
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 44d6ac47e035..1393a8ac596b 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -117,9 +117,6 @@ bool kern_addr_valid(unsigned long addr);
#define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */
#define _PAGE_PUD_HUGE _PAGE_PMD_HUGE
-/* Advertise support for _PAGE_SPECIAL */
-#define __HAVE_ARCH_PTE_SPECIAL
-
/* SUN4U pte bits... */
#define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
#define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index 15b59169c535..e5e5ff6b9a5c 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -60,50 +60,30 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
pci_bus_add_devices(root_bus);
}
-void pcibios_fixup_bus(struct pci_bus *pbus)
+int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- struct pci_dev *dev;
- int i, has_io, has_mem;
- u16 cmd;
+ u16 cmd, oldcmd;
+ int i;
- list_for_each_entry(dev, &pbus->devices, bus_list) {
- /*
- * We can not rely on that the bootloader has enabled I/O
- * or memory access to PCI devices. Instead we enable it here
- * if the device has BARs of respective type.
- */
- has_io = has_mem = 0;
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
- unsigned long f = dev->resource[i].flags;
- if (f & IORESOURCE_IO)
- has_io = 1;
- else if (f & IORESOURCE_MEM)
- has_mem = 1;
- }
- /* ROM BARs are mapped into 32-bit memory space */
- if (dev->resource[PCI_ROM_RESOURCE].end != 0) {
- dev->resource[PCI_ROM_RESOURCE].flags |=
- IORESOURCE_ROM_ENABLE;
- has_mem = 1;
- }
- pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd);
- if (has_io && !(cmd & PCI_COMMAND_IO)) {
-#ifdef CONFIG_PCI_DEBUG
- printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n",
- pci_name(dev));
-#endif
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ oldcmd = cmd;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *res = &dev->resource[i];
+
+ /* Only set up the requested stuff */
+ if (!(mask & (1<<i)))
+ continue;
+
+ if (res->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
- pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
- cmd);
- }
- if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
-#ifdef CONFIG_PCI_DEBUG
- printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev"
- "%s\n", pci_name(dev));
-#endif
+ if (res->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
- pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
- cmd);
- }
}
+
+ if (cmd != oldcmd) {
+ pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
}
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 41b20edb427d..17ea16a1337c 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -214,8 +214,8 @@ static void pci_parse_of_addrs(struct platform_device *op,
if (!addrs)
return;
if (ofpci_verbose)
- printk(" parse addresses (%d bytes) @ %p\n",
- proplen, addrs);
+ pci_info(dev, " parse addresses (%d bytes) @ %p\n",
+ proplen, addrs);
op_res = &op->resource[0];
for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
struct resource *res;
@@ -227,8 +227,8 @@ static void pci_parse_of_addrs(struct platform_device *op,
continue;
i = addrs[0] & 0xff;
if (ofpci_verbose)
- printk(" start: %llx, end: %llx, i: %x\n",
- op_res->start, op_res->end, i);
+ pci_info(dev, " start: %llx, end: %llx, i: %x\n",
+ op_res->start, op_res->end, i);
if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
@@ -236,13 +236,15 @@ static void pci_parse_of_addrs(struct platform_device *op,
res = &dev->resource[PCI_ROM_RESOURCE];
flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
} else {
- printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
+ pci_err(dev, "bad cfg reg num 0x%x\n", i);
continue;
}
res->start = op_res->start;
res->end = op_res->end;
res->flags = flags;
res->name = pci_name(dev);
+
+ pci_info(dev, "reg 0x%x: %pR\n", i, res);
}
}
@@ -289,8 +291,8 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
type = "";
if (ofpci_verbose)
- printk(" create device, devfn: %x, type: %s\n",
- devfn, type);
+ pci_info(bus," create device, devfn: %x, type: %s\n",
+ devfn, type);
dev->sysdata = node;
dev->dev.parent = bus->bridge;
@@ -323,10 +325,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
- if (ofpci_verbose)
- printk(" class: 0x%x device name: %s\n",
- dev->class, pci_name(dev));
-
/* I have seen IDE devices which will not respond to
* the bmdma simplex check reads if bus mastering is
* disabled.
@@ -353,10 +351,13 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->irq = PCI_IRQ_NONE;
}
+ pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
+ dev->vendor, dev->device, dev->hdr_type, dev->class);
+
pci_parse_of_addrs(sd->op, node, dev);
if (ofpci_verbose)
- printk(" adding to system ...\n");
+ pci_info(dev, " adding to system ...\n");
pci_device_add(dev, bus);
@@ -430,19 +431,19 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
u64 size;
if (ofpci_verbose)
- printk("of_scan_pci_bridge(%s)\n", node->full_name);
+ pci_info(dev, "of_scan_pci_bridge(%s)\n", node->full_name);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
if (busrange == NULL || len != 8) {
- printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
+ pci_info(dev, "Can't get bus-range for PCI-PCI bridge %s\n",
node->full_name);
return;
}
if (ofpci_verbose)
- printk(" Bridge bus range [%u --> %u]\n",
- busrange[0], busrange[1]);
+ pci_info(dev, " Bridge bus range [%u --> %u]\n",
+ busrange[0], busrange[1]);
ranges = of_get_property(node, "ranges", &len);
simba = 0;
@@ -454,8 +455,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
if (!bus) {
- printk(KERN_ERR "Failed to create pci bus for %s\n",
- node->full_name);
+ pci_err(dev, "Failed to create pci bus for %s\n",
+ node->full_name);
return;
}
@@ -464,8 +465,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
bus->bridge_ctl = 0;
if (ofpci_verbose)
- printk(" Bridge ranges[%p] simba[%d]\n",
- ranges, simba);
+ pci_info(dev, " Bridge ranges[%p] simba[%d]\n",
+ ranges, simba);
/* parse ranges property, or cook one up by hand for Simba */
/* PCI #address-cells == 3 and #size-cells == 2 always */
@@ -487,10 +488,10 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
u64 start;
if (ofpci_verbose)
- printk(" RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:"
- "%08x:%08x]\n",
- ranges[0], ranges[1], ranges[2], ranges[3],
- ranges[4], ranges[5], ranges[6], ranges[7]);
+ pci_info(dev, " RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:"
+ "%08x:%08x]\n",
+ ranges[0], ranges[1], ranges[2], ranges[3],
+ ranges[4], ranges[5], ranges[6], ranges[7]);
flags = pci_parse_of_flags(ranges[0]);
size = GET_64BIT(ranges, 6);
@@ -510,14 +511,14 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
if (flags & IORESOURCE_IO) {
res = bus->resource[0];
if (res->flags) {
- printk(KERN_ERR "PCI: ignoring extra I/O range"
- " for bridge %s\n", node->full_name);
+ pci_err(dev, "ignoring extra I/O range"
+ " for bridge %s\n", node->full_name);
continue;
}
} else {
if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
- printk(KERN_ERR "PCI: too many memory ranges"
- " for bridge %s\n", node->full_name);
+ pci_err(dev, "too many memory ranges"
+ " for bridge %s\n", node->full_name);
continue;
}
res = bus->resource[i];
@@ -529,8 +530,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
region.end = region.start + size - 1;
if (ofpci_verbose)
- printk(" Using flags[%08x] start[%016llx] size[%016llx]\n",
- flags, start, size);
+ pci_info(dev, " Using flags[%08x] start[%016llx] size[%016llx]\n",
+ flags, start, size);
pcibios_bus_to_resource(dev->bus, res, &region);
}
@@ -538,7 +539,7 @@ after_ranges:
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
if (ofpci_verbose)
- printk(" bus name: %s\n", bus->name);
+ pci_info(dev, " bus name: %s\n", bus->name);
pci_of_scan_bus(pbm, node, bus);
}
@@ -553,14 +554,14 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm,
struct pci_dev *dev;
if (ofpci_verbose)
- printk("PCI: scan_bus[%s] bus no %d\n",
- node->full_name, bus->number);
+ pci_info(bus, "scan_bus[%s] bus no %d\n",
+ node->full_name, bus->number);
child = NULL;
prev_devfn = -1;
while ((child = of_get_next_child(node, child)) != NULL) {
if (ofpci_verbose)
- printk(" * %s\n", child->full_name);
+ pci_info(bus, " * %s\n", child->full_name);
reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20)
continue;
@@ -581,8 +582,7 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm,
if (!dev)
continue;
if (ofpci_verbose)
- printk("PCI: dev header type: %x\n",
- dev->hdr_type);
+ pci_info(dev, "dev header type: %x\n", dev->hdr_type);
if (pci_is_bridge(dev))
of_scan_pci_bridge(pbm, child, dev);
@@ -624,6 +624,45 @@ static void pci_bus_register_of_sysfs(struct pci_bus *bus)
pci_bus_register_of_sysfs(child_bus);
}
+static void pci_claim_legacy_resources(struct pci_dev *dev)
+{
+ struct pci_bus_region region;
+ struct resource *p, *root, *conflict;
+
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ return;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return;
+
+ p->name = "Video RAM area";
+ p->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ region.start = 0xa0000UL;
+ region.end = region.start + 0x1ffffUL;
+ pcibios_bus_to_resource(dev->bus, p, &region);
+
+ root = pci_find_parent_resource(dev, p);
+ if (!root) {
+ pci_info(dev, "can't claim VGA legacy %pR: no compatible bridge window\n", p);
+ goto err;
+ }
+
+ conflict = request_resource_conflict(root, p);
+ if (conflict) {
+ pci_info(dev, "can't claim VGA legacy %pR: address conflict with %s %pR\n",
+ p, conflict->name, conflict);
+ goto err;
+ }
+
+ pci_info(dev, "VGA legacy framebuffer %pR\n", p);
+ return;
+
+err:
+ kfree(p);
+}
+
static void pci_claim_bus_resources(struct pci_bus *bus)
{
struct pci_bus *child_bus;
@@ -639,15 +678,13 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
continue;
if (ofpci_verbose)
- printk("PCI: Claiming %s: "
- "Resource %d: %016llx..%016llx [%x]\n",
- pci_name(dev), i,
- (unsigned long long)r->start,
- (unsigned long long)r->end,
- (unsigned int)r->flags);
+ pci_info(dev, "Claiming Resource %d: %pR\n",
+ i, r);
pci_claim_resource(dev, i);
}
+
+ pci_claim_legacy_resources(dev);
}
list_for_each_entry(child_bus, &bus->children, node)
@@ -687,6 +724,7 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
pci_bus_register_of_sysfs(bus);
pci_claim_bus_resources(bus);
+
pci_bus_add_devices(bus);
return bus;
}
@@ -713,9 +751,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
}
if (cmd != oldcmd) {
- printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
- pci_name(dev), cmd);
- /* Enable the appropriate bits in the PCI command register. */
+ pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
@@ -1075,8 +1111,8 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
sp = prop->names;
if (ofpci_verbose)
- printk("PCI: Making slots for [%s] mask[0x%02x]\n",
- node->full_name, mask);
+ pci_info(bus, "Making slots for [%s] mask[0x%02x]\n",
+ node->full_name, mask);
i = 0;
while (mask) {
@@ -1089,12 +1125,12 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
}
if (ofpci_verbose)
- printk("PCI: Making slot [%s]\n", sp);
+ pci_info(bus, "Making slot [%s]\n", sp);
pci_slot = pci_create_slot(bus, i, sp, NULL);
if (IS_ERR(pci_slot))
- printk(KERN_ERR "PCI: pci_create_slot returned %ld\n",
- PTR_ERR(pci_slot));
+ pci_err(bus, "pci_create_slot returned %ld\n",
+ PTR_ERR(pci_slot));
sp += strlen(sp) + 1;
mask &= ~this_bit;
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 38d46bcc8634..4759ccd542fe 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -329,23 +329,6 @@ void pci_get_pbm_props(struct pci_pbm_info *pbm)
}
}
-static void pci_register_legacy_regions(struct resource *io_res,
- struct resource *mem_res)
-{
- struct resource *p;
-
- /* VGA Video RAM. */
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return;
-
- p->name = "Video RAM area";
- p->start = mem_res->start + 0xa0000UL;
- p->end = p->start + 0x1ffffUL;
- p->flags = IORESOURCE_BUSY;
- request_resource(mem_res, p);
-}
-
static void pci_register_iommu_region(struct pci_pbm_info *pbm)
{
const u32 *vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma",
@@ -487,8 +470,6 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
if (pbm->mem64_space.flags)
request_resource(&iomem_resource, &pbm->mem64_space);
- pci_register_legacy_regions(&pbm->io_space,
- &pbm->mem_space);
pci_register_iommu_region(pbm);
}
@@ -508,8 +489,8 @@ void pci_scan_for_target_abort(struct pci_pbm_info *pbm,
PCI_STATUS_REC_TARGET_ABORT));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
- printk("%s: Device %s saw Target Abort [%016x]\n",
- pbm->name, pci_name(pdev), status);
+ pci_info(pdev, "%s: Device saw Target Abort [%016x]\n",
+ pbm->name, status);
}
}
@@ -531,8 +512,8 @@ void pci_scan_for_master_abort(struct pci_pbm_info *pbm,
(status & (PCI_STATUS_REC_MASTER_ABORT));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
- printk("%s: Device %s received Master Abort [%016x]\n",
- pbm->name, pci_name(pdev), status);
+ pci_info(pdev, "%s: Device received Master Abort "
+ "[%016x]\n", pbm->name, status);
}
}
@@ -555,8 +536,8 @@ void pci_scan_for_parity_error(struct pci_pbm_info *pbm,
PCI_STATUS_DETECTED_PARITY));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
- printk("%s: Device %s saw Parity Error [%016x]\n",
- pbm->name, pci_name(pdev), status);
+ pci_info(pdev, "%s: Device saw Parity Error [%016x]\n",
+ pbm->name, status);
}
}
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 1994d7638406..fb5899cbfa51 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -191,8 +191,8 @@ static void sparc64_teardown_msi_irq(unsigned int irq,
break;
}
if (i >= pbm->msi_num) {
- printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
- pbm->name, irq);
+ pci_err(pdev, "%s: teardown: No MSI for irq %u\n", pbm->name,
+ irq);
return;
}
@@ -201,9 +201,9 @@ static void sparc64_teardown_msi_irq(unsigned int irq,
err = ops->msi_teardown(pbm, msi_num);
if (err) {
- printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
- "irq %u, gives error %d\n",
- pbm->name, msi_num, irq, err);
+ pci_err(pdev, "%s: teardown: ops->teardown() on MSI %u, "
+ "irq %u, gives error %d\n", pbm->name, msi_num, irq,
+ err);
return;
}
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 22f8774977d5..ee4c9a9a171c 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -518,10 +518,10 @@ static void pcic_map_pci_device(struct linux_pcic *pcic,
* board in a PCI slot. We must remap it
* under 64K but it is not done yet. XXX
*/
- printk("PCIC: Skipping I/O space at 0x%lx, "
- "this will Oops if a driver attaches "
- "device '%s' at %02x:%02x)\n", address,
- namebuf, dev->bus->number, dev->devfn);
+ pci_info(dev, "PCIC: Skipping I/O space at "
+ "0x%lx, this will Oops if a driver "
+ "attaches device '%s'\n", address,
+ namebuf);
}
}
}
@@ -551,8 +551,8 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
p++;
}
if (i >= pcic->pcic_imdim) {
- printk("PCIC: device %s devfn %02x:%02x not found in %d\n",
- namebuf, dev->bus->number, dev->devfn, pcic->pcic_imdim);
+ pci_info(dev, "PCIC: device %s not found in %d\n", namebuf,
+ pcic->pcic_imdim);
dev->irq = 0;
return;
}
@@ -565,7 +565,7 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
real_irq = ivec >> ((i-4) << 2) & 0xF;
} else { /* Corrupted map */
- printk("PCIC: BAD PIN %d\n", i); for (;;) {}
+ pci_info(dev, "PCIC: BAD PIN %d\n", i); for (;;) {}
}
/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */
@@ -574,10 +574,10 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
*/
if (real_irq == 0 || p->force) {
if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */
- printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
+ pci_info(dev, "PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
}
- printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n",
- p->irq, p->pin, dev->bus->number, dev->devfn);
+ pci_info(dev, "PCIC: setting irq %d at pin %d\n", p->irq,
+ p->pin);
real_irq = p->irq;
i = p->pin;
@@ -602,15 +602,13 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
void pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
- int i, has_io, has_mem;
- unsigned int cmd = 0;
struct linux_pcic *pcic;
/* struct linux_pbm_info* pbm = &pcic->pbm; */
int node;
struct pcidev_cookie *pcp;
if (!pcic0_up) {
- printk("pcibios_fixup_bus: no PCIC\n");
+ pci_info(bus, "pcibios_fixup_bus: no PCIC\n");
return;
}
pcic = &pcic0;
@@ -619,44 +617,12 @@ void pcibios_fixup_bus(struct pci_bus *bus)
* Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus);
*/
if (bus->number != 0) {
- printk("pcibios_fixup_bus: nonzero bus 0x%x\n", bus->number);
+ pci_info(bus, "pcibios_fixup_bus: nonzero bus 0x%x\n",
+ bus->number);
return;
}
list_for_each_entry(dev, &bus->devices, bus_list) {
-
- /*
- * Comment from i386 branch:
- * There are buggy BIOSes that forget to enable I/O and memory
- * access to PCI devices. We try to fix this, but we need to
- * be sure that the BIOS didn't forget to assign an address
- * to the device. [mj]
- * OBP is a case of such BIOS :-)
- */
- has_io = has_mem = 0;
- for(i=0; i<6; i++) {
- unsigned long f = dev->resource[i].flags;
- if (f & IORESOURCE_IO) {
- has_io = 1;
- } else if (f & IORESOURCE_MEM)
- has_mem = 1;
- }
- pcic_read_config(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
- if (has_io && !(cmd & PCI_COMMAND_IO)) {
- printk("PCIC: Enabling I/O for device %02x:%02x\n",
- dev->bus->number, dev->devfn);
- cmd |= PCI_COMMAND_IO;
- pcic_write_config(dev->bus, dev->devfn,
- PCI_COMMAND, 2, cmd);
- }
- if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
- printk("PCIC: Enabling memory for device %02x:%02x\n",
- dev->bus->number, dev->devfn);
- cmd |= PCI_COMMAND_MEMORY;
- pcic_write_config(dev->bus, dev->devfn,
- PCI_COMMAND, 2, cmd);
- }
-
node = pdev_to_pnode(&pcic->pbm, dev);
if(node == 0)
node = -1;
@@ -675,6 +641,34 @@ void pcibios_fixup_bus(struct pci_bus *bus)
}
}
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, oldcmd;
+ int i;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ oldcmd = cmd;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *res = &dev->resource[i];
+
+ /* Only set up the requested stuff */
+ if (!(mask & (1<<i)))
+ continue;
+
+ if (res->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (res->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+
+ if (cmd != oldcmd) {
+ pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
/* Makes compiler happy */
static volatile int pcic_timer_dummy;
@@ -747,17 +741,11 @@ static void watchdog_reset() {
}
#endif
-int pcibios_enable_device(struct pci_dev *pdev, int mask)
-{
- return 0;
-}
-
/*
* NMI
*/
void pcic_nmi(unsigned int pend, struct pt_regs *regs)
{
-
pend = swab32(pend);
if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) {
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cb6e3a219294..297789aef9fa 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -60,6 +60,7 @@ config X86
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PMEM_API if X86_64
+ select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_REFCOUNT
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_UACCESS_MCSAFE if X86_64
@@ -182,6 +183,7 @@ config X86
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
select HAVE_STACK_VALIDATION if X86_64
+ select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_USER_RETURN_NOTIFIER
@@ -333,6 +335,9 @@ config ARCH_SUPPORTS_UPROBES
config FIX_EARLYCON_MEM
def_bool y
+config DYNAMIC_PHYSICAL_MASK
+ bool
+
config PGTABLE_LEVELS
int
default 5 if X86_5LEVEL
@@ -1485,6 +1490,7 @@ config ARCH_HAS_MEM_ENCRYPT
config AMD_MEM_ENCRYPT
bool "AMD Secure Memory Encryption (SME) support"
depends on X86_64 && CPU_SUP_AMD
+ select DYNAMIC_PHYSICAL_MASK
---help---
Say yes to enable support for the encryption of system memory.
This requires an AMD processor that supports Secure Memory
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 192e4d2f9efc..c6dd1d980081 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -72,6 +72,9 @@ config EARLY_PRINTK_USB_XDBC
You should normally say N here, unless you want to debug early
crashes or need a very simple printk logging facility.
+config MCSAFE_TEST
+ def_bool n
+
config X86_PTDUMP_CORE
def_bool n
diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c
index 522d11431433..748456c365f4 100644
--- a/arch/x86/boot/compressed/kaslr_64.c
+++ b/arch/x86/boot/compressed/kaslr_64.c
@@ -69,6 +69,8 @@ static struct alloc_pgt_data pgt_data;
/* The top level page table entry pointer. */
static unsigned long top_level_pgt;
+phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
+
/*
* Mapping information structure passed to kernel_ident_mapping_init().
* Due to relocation, pointers must be assigned at run time not build time.
@@ -81,6 +83,9 @@ void initialize_identity_maps(void)
/* If running as an SEV guest, the encryption mask is required. */
set_sev_encryption_mask();
+ /* Exclude the encryption mask from __PHYSICAL_MASK */
+ physical_mask &= ~sme_me_mask;
+
/* Init mapping_info with run-time function/buffer pointers. */
mapping_info.alloc_pgt_page = alloc_pgt_page;
mapping_info.context = &pgt_data;
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index fbf6a6c3fd2d..92190879b228 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -164,6 +164,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
if (cached_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(regs);
}
if (cached_flags & _TIF_USER_RETURN_NOTIFY)
@@ -254,6 +255,8 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
local_irq_enable();
+ rseq_syscall(regs);
+
/*
* First do one-time work. If these work items are enabled, we
* want to run them exactly once per syscall exit with IRQs on.
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 14a2f996e543..3cf7b533b3d1 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -397,3 +397,4 @@
383 i386 statx sys_statx __ia32_sys_statx
384 i386 arch_prctl sys_arch_prctl __ia32_compat_sys_arch_prctl
385 i386 io_pgetevents sys_io_pgetevents __ia32_compat_sys_io_pgetevents
+386 i386 rseq sys_rseq __ia32_sys_rseq
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index cd36232ab62f..f0b1709a5ffb 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -342,6 +342,7 @@
331 common pkey_free __x64_sys_pkey_free
332 common statx __x64_sys_statx
333 common io_pgetevents __x64_sys_io_pgetevents
+334 common rseq __x64_sys_rseq
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 08acd954f00e..74a9e06b6cfd 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -436,6 +436,8 @@ static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
#endif /* CONFIG_X86_LOCAL_APIC */
+extern void apic_ack_irq(struct irq_data *data);
+
static inline void ack_APIC_irq(void)
{
/*
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index fb00a2fca990..5701f5cecd31 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -282,7 +282,9 @@
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
diff --git a/arch/x86/include/asm/mcsafe_test.h b/arch/x86/include/asm/mcsafe_test.h
new file mode 100644
index 000000000000..eb59804b6201
--- /dev/null
+++ b/arch/x86/include/asm/mcsafe_test.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MCSAFE_TEST_H_
+#define _MCSAFE_TEST_H_
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_MCSAFE_TEST
+extern unsigned long mcsafe_test_src;
+extern unsigned long mcsafe_test_dst;
+
+static inline void mcsafe_inject_src(void *addr)
+{
+ if (addr)
+ mcsafe_test_src = (unsigned long) addr;
+ else
+ mcsafe_test_src = ~0UL;
+}
+
+static inline void mcsafe_inject_dst(void *addr)
+{
+ if (addr)
+ mcsafe_test_dst = (unsigned long) addr;
+ else
+ mcsafe_test_dst = ~0UL;
+}
+#else /* CONFIG_MCSAFE_TEST */
+static inline void mcsafe_inject_src(void *addr)
+{
+}
+
+static inline void mcsafe_inject_dst(void *addr)
+{
+}
+#endif /* CONFIG_MCSAFE_TEST */
+
+#else /* __ASSEMBLY__ */
+#include <asm/export.h>
+
+#ifdef CONFIG_MCSAFE_TEST
+.macro MCSAFE_TEST_CTL
+ .pushsection .data
+ .align 8
+ .globl mcsafe_test_src
+ mcsafe_test_src:
+ .quad 0
+ EXPORT_SYMBOL_GPL(mcsafe_test_src)
+ .globl mcsafe_test_dst
+ mcsafe_test_dst:
+ .quad 0
+ EXPORT_SYMBOL_GPL(mcsafe_test_dst)
+ .popsection
+.endm
+
+.macro MCSAFE_TEST_SRC reg count target
+ leaq \count(\reg), %r9
+ cmp mcsafe_test_src, %r9
+ ja \target
+.endm
+
+.macro MCSAFE_TEST_DST reg count target
+ leaq \count(\reg), %r9
+ cmp mcsafe_test_dst, %r9
+ ja \target
+.endm
+#else
+.macro MCSAFE_TEST_CTL
+.endm
+
+.macro MCSAFE_TEST_SRC reg count target
+.endm
+
+.macro MCSAFE_TEST_DST reg count target
+.endm
+#endif /* CONFIG_MCSAFE_TEST */
+#endif /* __ASSEMBLY__ */
+#endif /* _MCSAFE_TEST_H_ */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 1e53560a84bb..c85e15010f48 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -17,7 +17,6 @@
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
-#define __PHYSICAL_MASK ((phys_addr_t)(__sme_clr((1ULL << __PHYSICAL_MASK_SHIFT) - 1)))
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
@@ -55,6 +54,13 @@
#ifndef __ASSEMBLY__
+#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
+extern phys_addr_t physical_mask;
+#define __PHYSICAL_MASK physical_mask
+#else
+#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
+#endif
+
extern int devmem_is_allowed(unsigned long pagenr);
extern unsigned long max_low_pfn_mapped;
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 3c5385f9a88f..0fdcd21dadbd 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -216,7 +216,7 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
}
#endif
-static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
+static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
{
pgd_t pgd;
@@ -230,7 +230,7 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
*p4dp = native_make_p4d(native_pgd_val(pgd));
}
-static inline void native_p4d_clear(p4d_t *p4d)
+static __always_inline void native_p4d_clear(p4d_t *p4d)
{
native_set_p4d(p4d, native_make_p4d(0));
}
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 1e5a40673953..99fff853c944 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -65,7 +65,6 @@
#define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0))
#define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0))
#endif
-#define __HAVE_ARCH_PTE_SPECIAL
#define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \
_PAGE_PKEY_BIT1 | \
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 22647a642e98..0af81b590a0c 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -236,7 +236,7 @@ TRACE_EVENT(vector_alloc,
TP_PROTO(unsigned int irq, unsigned int vector, bool reserved,
int ret),
- TP_ARGS(irq, vector, ret, reserved),
+ TP_ARGS(irq, vector, reserved, ret),
TP_STRUCT__entry(
__field( unsigned int, irq )
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 2d27236c16a3..b85a7c54c6a1 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -301,5 +301,6 @@ extern struct x86_apic_ops x86_apic_ops;
extern void x86_early_init_platform_quirks(void);
extern void x86_init_noop(void);
extern void x86_init_uint_noop(unsigned int unused);
+extern bool x86_pnpbios_disabled(void);
#endif
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 7553819c74c3..3982f79d2377 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1851,7 +1851,7 @@ static void ioapic_ir_ack_level(struct irq_data *irq_data)
* intr-remapping table entry. Hence for the io-apic
* EOI we use the pin number.
*/
- ack_APIC_irq();
+ apic_ack_irq(irq_data);
eoi_ioapic_pin(data->entry.vector, data);
}
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index bb6f7a2148d7..35aaee4fc028 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -235,6 +235,15 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
return 0;
+ /*
+ * Careful here. @apicd might either have move_in_progress set or
+ * be enqueued for cleanup. Assigning a new vector would either
+ * leave a stale vector on some CPU around or in case of a pending
+ * cleanup corrupt the hlist.
+ */
+ if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
+ return -EBUSY;
+
vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
if (vector > 0)
apic_update_vector(irqd, vector, cpu);
@@ -579,8 +588,7 @@ error:
static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
struct irq_data *irqd, int ind)
{
- unsigned int cpu, vector, prev_cpu, prev_vector;
- struct apic_chip_data *apicd;
+ struct apic_chip_data apicd;
unsigned long flags;
int irq;
@@ -596,24 +604,26 @@ static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
return;
}
- apicd = irqd->chip_data;
- if (!apicd) {
+ if (!irqd->chip_data) {
seq_printf(m, "%*sVector: Not assigned\n", ind, "");
return;
}
raw_spin_lock_irqsave(&vector_lock, flags);
- cpu = apicd->cpu;
- vector = apicd->vector;
- prev_cpu = apicd->prev_cpu;
- prev_vector = apicd->prev_vector;
+ memcpy(&apicd, irqd->chip_data, sizeof(apicd));
raw_spin_unlock_irqrestore(&vector_lock, flags);
- seq_printf(m, "%*sVector: %5u\n", ind, "", vector);
- seq_printf(m, "%*sTarget: %5u\n", ind, "", cpu);
- if (prev_vector) {
- seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", prev_vector);
- seq_printf(m, "%*sPrevious target: %5u\n", ind, "", prev_cpu);
+
+ seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
+ seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
+ if (apicd.prev_vector) {
+ seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
+ seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
}
+ seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
+ seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
+ seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
+ seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
+ seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
}
#endif
@@ -800,13 +810,18 @@ static int apic_retrigger_irq(struct irq_data *irqd)
return 1;
}
-void apic_ack_edge(struct irq_data *irqd)
+void apic_ack_irq(struct irq_data *irqd)
{
- irq_complete_move(irqd_cfg(irqd));
irq_move_irq(irqd);
ack_APIC_irq();
}
+void apic_ack_edge(struct irq_data *irqd)
+{
+ irq_complete_move(irqd_cfg(irqd));
+ apic_ack_irq(irqd);
+}
+
static struct irq_chip lapic_controller = {
.name = "APIC",
.irq_ack = apic_ack_edge,
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 7416fc206b4a..cd0fda1fff6d 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -529,18 +529,15 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
if (mode == SPEC_STORE_BYPASS_DISABLE) {
setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
/*
- * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
- * a completely different MSR and bit dependent on family.
+ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
+ * use a completely different MSR and bit dependent on family.
*/
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ x86_amd_ssb_disable();
+ else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
- break;
- case X86_VENDOR_AMD:
- x86_amd_ssb_disable();
- break;
}
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 95c8e507580d..910b47ee8078 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -803,6 +803,12 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_STIBP);
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
}
+
+ if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
+ set_cpu_cap(c, X86_FEATURE_SSBD);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
+ }
}
void get_cpu_cap(struct cpuinfo_x86 *c)
@@ -992,7 +998,8 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
- !(ia32_cap & ARCH_CAP_SSB_NO))
+ !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
if (x86_match_cpu(cpu_no_meltdown))
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 24bfa63e86cf..ec4754f81cbd 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -845,6 +845,8 @@ static __init void rdt_quirks(void)
case INTEL_FAM6_SKYLAKE_X:
if (boot_cpu_data.x86_stepping <= 4)
set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
+ else
+ set_rdt_options("!l3cat");
}
}
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 77e201301528..08286269fd24 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -70,7 +70,7 @@ static DEFINE_MUTEX(microcode_mutex);
/*
* Serialize late loading so that CPUs get updated one-by-one.
*/
-static DEFINE_SPINLOCK(update_lock);
+static DEFINE_RAW_SPINLOCK(update_lock);
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
@@ -560,9 +560,9 @@ static int __reload_late(void *info)
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
return -1;
- spin_lock(&update_lock);
+ raw_spin_lock(&update_lock);
apply_microcode_local(&err);
- spin_unlock(&update_lock);
+ raw_spin_unlock(&update_lock);
/* siblings return UCODE_OK because their engine got updated already */
if (err > UCODE_NFOUND) {
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 558444b23923..c610f47373e4 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -106,17 +106,9 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
memset(line, 0, LINE_SIZE);
- length = len;
- length--;
-
- if (length > LINE_SIZE - 1)
- length = LINE_SIZE - 1;
-
+ length = strncpy_from_user(line, buf, LINE_SIZE - 1);
if (length < 0)
- return -EINVAL;
-
- if (copy_from_user(line, buf, length))
- return -EFAULT;
+ return length;
linelen = strlen(line);
ptr = line + linelen - 1;
@@ -149,17 +141,16 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
return -EINVAL;
ptr = skip_spaces(ptr + 5);
- for (i = 0; i < MTRR_NUM_TYPES; ++i) {
- if (strcmp(ptr, mtrr_strings[i]))
- continue;
- base >>= PAGE_SHIFT;
- size >>= PAGE_SHIFT;
- err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true);
- if (err < 0)
- return err;
- return len;
- }
- return -EINVAL;
+ i = match_string(mtrr_strings, MTRR_NUM_TYPES, ptr);
+ if (i < 0)
+ return i;
+
+ base >>= PAGE_SHIFT;
+ size >>= PAGE_SHIFT;
+ err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true);
+ if (err < 0)
+ return err;
+ return len;
}
static long
diff --git a/arch/x86/kernel/i8237.c b/arch/x86/kernel/i8237.c
index 8eeaa81de066..0a3e70fd00d6 100644
--- a/arch/x86/kernel/i8237.c
+++ b/arch/x86/kernel/i8237.c
@@ -9,10 +9,12 @@
* your option) any later version.
*/
+#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/syscore_ops.h>
#include <asm/dma.h>
+#include <asm/x86_init.h>
/*
* This module just handles suspend/resume issues with the
@@ -49,6 +51,29 @@ static struct syscore_ops i8237_syscore_ops = {
static int __init i8237A_init_ops(void)
{
+ /*
+ * From SKL PCH onwards, the legacy DMA device is removed in which the
+ * I/O ports (81h-83h, 87h, 89h-8Bh, 8Fh) related to it are removed
+ * as well. All removed ports must return 0xff for a inb() request.
+ *
+ * Note: DMA_PAGE_2 (port 0x81) should not be checked for detecting
+ * the presence of DMA device since it may be used by BIOS to decode
+ * LPC traffic for POST codes. Original LPC only decodes one byte of
+ * port 0x80 but some BIOS may choose to enhance PCH LPC port 0x8x
+ * decoding.
+ */
+ if (dma_inb(DMA_PAGE_0) == 0xFF)
+ return -ENODEV;
+
+ /*
+ * It is not required to load this driver as newer SoC may not
+ * support 8237 DMA or bus mastering from LPC. Platform firmware
+ * must announce the support for such legacy devices via
+ * ACPI_FADT_LEGACY_DEVICES field in FADT table.
+ */
+ if (x86_pnpbios_disabled() && dmi_get_bios_year() >= 2017)
+ return -ENODEV;
+
register_syscore_ops(&i8237_syscore_ops);
return 0;
}
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 2c3a1b4294eb..74383a3780dc 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -317,15 +317,12 @@ void __init idt_setup_apic_and_irq_gates(void)
set_intr_gate(i, entry);
}
- for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
#ifdef CONFIG_X86_LOCAL_APIC
+ for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
set_bit(i, system_vectors);
set_intr_gate(i, spurious_interrupt);
-#else
- entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR);
- set_intr_gate(i, entry);
-#endif
}
+#endif
}
/**
diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
index 235fe6008ac8..b348a672f71d 100644
--- a/arch/x86/kernel/platform-quirks.c
+++ b/arch/x86/kernel/platform-quirks.c
@@ -33,9 +33,14 @@ void __init x86_early_init_platform_quirks(void)
x86_platform.set_legacy_features();
}
+bool __init x86_pnpbios_disabled(void)
+{
+ return x86_platform.legacy.devices.pnpbios == 0;
+}
+
#if defined(CONFIG_PNPBIOS)
bool __init arch_pnpbios_disabled(void)
{
- return x86_platform.legacy.devices.pnpbios == 0;
+ return x86_pnpbios_disabled();
}
#endif
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index da270b95fe4d..445ca11ff863 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -688,6 +688,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
sigset_t *set = sigmask_to_save();
compat_sigset_t *cset = (compat_sigset_t *) set;
+ /*
+ * Increment event counter and perform fixup for the pre-signal
+ * frame.
+ */
+ rseq_signal_deliver(regs);
+
/* Set up the stack frame */
if (is_ia32_frame(ksig)) {
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 92bf2f2e7cdd..f4f30d0c25c4 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -379,7 +379,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 0x80000008.ebx */
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
- F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
+ F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
+ F(AMD_SSB_NO);
/* cpuid 0xC0000001.edx */
const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -664,7 +665,12 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ebx |= F(VIRT_SSBD);
entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ /*
+ * The preference is to use SPEC CTRL MSR instead of the
+ * VIRT_SPEC MSR.
+ */
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
+ !boot_cpu_has(X86_FEATURE_AMD_SSBD))
entry->ebx |= F(VIRT_SSBD);
break;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 26110c202b19..950ec50f77c3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4115,7 +4115,8 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_SPEC_CTRL:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
+ !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
return 1;
msr_info->data = svm->spec_ctrl;
@@ -4217,11 +4218,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
break;
case MSR_IA32_SPEC_CTRL:
if (!msr->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
+ !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
return 1;
/* The STIBP bit doesn't fault even if it's not advertised */
- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
return 1;
svm->spec_ctrl = data;
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index c3b527a9f95d..298ef1479240 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/cpufeatures.h>
+#include <asm/mcsafe_test.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
@@ -183,6 +184,9 @@ ENTRY(memcpy_orig)
ENDPROC(memcpy_orig)
#ifndef CONFIG_UML
+
+MCSAFE_TEST_CTL
+
/*
* __memcpy_mcsafe - memory copy with machine check exception handling
* Note that we only catch machine checks when reading the source addresses.
@@ -206,6 +210,8 @@ ENTRY(__memcpy_mcsafe)
subl %ecx, %edx
.L_read_leading_bytes:
movb (%rsi), %al
+ MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
+ MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
.L_write_leading_bytes:
movb %al, (%rdi)
incq %rsi
@@ -221,6 +227,8 @@ ENTRY(__memcpy_mcsafe)
.L_read_words:
movq (%rsi), %r8
+ MCSAFE_TEST_SRC %rsi 8 .E_read_words
+ MCSAFE_TEST_DST %rdi 8 .E_write_words
.L_write_words:
movq %r8, (%rdi)
addq $8, %rsi
@@ -237,6 +245,8 @@ ENTRY(__memcpy_mcsafe)
movl %edx, %ecx
.L_read_trailing_bytes:
movb (%rsi), %al
+ MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
+ MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
.L_write_trailing_bytes:
movb %al, (%rdi)
incq %rsi
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 1b2197d13832..7ae36868aed2 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -527,6 +527,7 @@ void __init sme_enable(struct boot_params *bp)
/* SEV state cannot be controlled by a command line option */
sme_me_mask = me_mask;
sev_enabled = true;
+ physical_mask &= ~sme_me_mask;
return;
}
@@ -561,4 +562,6 @@ void __init sme_enable(struct boot_params *bp)
sme_me_mask = 0;
else
sme_me_mask = active_by_default ? me_mask : 0;
+
+ physical_mask &= ~sme_me_mask;
}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index ffc8c13c50e4..47b5951e592b 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -8,6 +8,11 @@
#include <asm/fixmap.h>
#include <asm/mtrr.h>
+#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
+phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
+EXPORT_SYMBOL(physical_mask);
+#endif
+
#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
#ifdef CONFIG_HIGHPTE
@@ -114,13 +119,12 @@ static inline void pgd_list_del(pgd_t *pgd)
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
{
- BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
- virt_to_page(pgd)->index = (pgoff_t)mm;
+ virt_to_page(pgd)->pt_mm = mm;
}
struct mm_struct *pgd_page_get_mm(struct page *page)
{
- return (struct mm_struct *)page->index;
+ return page->pt_mm;
}
static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index f0114007e915..e5f753cbb1c3 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -59,24 +59,15 @@ int early_pci_allowed(void)
void early_dump_pci_device(u8 bus, u8 slot, u8 func)
{
+ u32 value[256 / 4];
int i;
- int j;
- u32 val;
- printk(KERN_INFO "pci 0000:%02x:%02x.%d config space:",
- bus, slot, func);
+ pr_info("pci 0000:%02x:%02x.%d config space:\n", bus, slot, func);
- for (i = 0; i < 256; i += 4) {
- if (!(i & 0x0f))
- printk("\n %02x:",i);
+ for (i = 0; i < 256; i += 4)
+ value[i / 4] = read_pci_config(bus, slot, func, i);
- val = read_pci_config(bus, slot, func, i);
- for (j = 0; j < 4; j++) {
- printk(" %02x", val & 0xff);
- val >>= 8;
- }
- }
- printk("\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, value, 256, false);
}
void early_dump_pci_devices(void)
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 54ef19e90705..13f4485ca388 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -636,6 +636,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334a, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index b36caae0fb2f..b96d38288c60 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -615,7 +615,7 @@ static int uv2_3_wait_completion(struct bau_desc *bau_desc,
/* spin on the status MMR, waiting for it to go idle */
while (descriptor_stat != UV2H_DESC_IDLE) {
- if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) {
+ if (descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) {
/*
* A h/w bug on the destination side may
* have prevented the message being marked
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index e4cb9f4cde8a..fc13cbbb2dce 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -47,11 +47,6 @@ static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
static void uv_noop(struct irq_data *data) { }
-static void uv_ack_apic(struct irq_data *data)
-{
- ack_APIC_irq();
-}
-
static int
uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
@@ -73,7 +68,7 @@ static struct irq_chip uv_irq_chip = {
.name = "UV-CORE",
.irq_mask = uv_noop,
.irq_unmask = uv_noop,
- .irq_eoi = uv_ack_apic,
+ .irq_eoi = apic_ack_irq,
.irq_set_affinity = uv_set_irq_affinity,
};
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 2d76106788a3..96fc2f0fdbfe 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -63,37 +63,44 @@ static noinline void xen_flush_tlb_all(void)
#define REMAP_BATCH_SIZE 16
struct remap_data {
- xen_pfn_t *mfn;
+ xen_pfn_t *pfn;
bool contiguous;
+ bool no_translate;
pgprot_t prot;
struct mmu_update *mmu_update;
};
-static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
+static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
unsigned long addr, void *data)
{
struct remap_data *rmd = data;
- pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
+ pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
- /* If we have a contiguous range, just update the mfn itself,
- else update pointer to be "next mfn". */
+ /*
+ * If we have a contiguous range, just update the pfn itself,
+ * else update pointer to be "next pfn".
+ */
if (rmd->contiguous)
- (*rmd->mfn)++;
+ (*rmd->pfn)++;
else
- rmd->mfn++;
+ rmd->pfn++;
- rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
+ rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
+ rmd->mmu_update->ptr |= rmd->no_translate ?
+ MMU_PT_UPDATE_NO_TRANSLATE :
+ MMU_NORMAL_PT_UPDATE;
rmd->mmu_update->val = pte_val_ma(pte);
rmd->mmu_update++;
return 0;
}
-static int do_remap_gfn(struct vm_area_struct *vma,
+static int do_remap_pfn(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t *gfn, int nr,
+ xen_pfn_t *pfn, int nr,
int *err_ptr, pgprot_t prot,
- unsigned domid,
+ unsigned int domid,
+ bool no_translate,
struct page **pages)
{
int err = 0;
@@ -104,11 +111,14 @@ static int do_remap_gfn(struct vm_area_struct *vma,
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
- rmd.mfn = gfn;
+ rmd.pfn = pfn;
rmd.prot = prot;
- /* We use the err_ptr to indicate if there we are doing a contiguous
- * mapping or a discontigious mapping. */
+ /*
+ * We use the err_ptr to indicate if there we are doing a contiguous
+ * mapping or a discontigious mapping.
+ */
rmd.contiguous = !err_ptr;
+ rmd.no_translate = no_translate;
while (nr) {
int index = 0;
@@ -119,7 +129,7 @@ static int do_remap_gfn(struct vm_area_struct *vma,
rmd.mmu_update = mmu_update;
err = apply_to_page_range(vma->vm_mm, addr, range,
- remap_area_mfn_pte_fn, &rmd);
+ remap_area_pfn_pte_fn, &rmd);
if (err)
goto out;
@@ -173,7 +183,8 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
if (xen_feature(XENFEAT_auto_translated_physmap))
return -EOPNOTSUPP;
- return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
+ return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
+ pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
@@ -192,10 +203,25 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
* cause of "wrong memory was mapped in".
*/
BUG_ON(err_ptr == NULL);
- return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
+ return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
+ false, pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid, struct page **pages)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return -EOPNOTSUPP;
+
+ return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
+ true, pages);
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+
/* Returns: 0 success */
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages)
diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
index e1a5fbeae08d..ca2d3b2bf2af 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/xen/xen-pvh.S
@@ -54,12 +54,19 @@
* charge of setting up it's own stack, GDT and IDT.
*/
+#define PVH_GDT_ENTRY_CS 1
+#define PVH_GDT_ENTRY_DS 2
+#define PVH_GDT_ENTRY_CANARY 3
+#define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8)
+#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
+#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
+
ENTRY(pvh_start_xen)
cld
lgdt (_pa(gdt))
- mov $(__BOOT_DS),%eax
+ mov $PVH_DS_SEL,%eax
mov %eax,%ds
mov %eax,%es
mov %eax,%ss
@@ -93,11 +100,17 @@ ENTRY(pvh_start_xen)
mov %eax, %cr0
/* Jump to 64-bit mode. */
- ljmp $__KERNEL_CS, $_pa(1f)
+ ljmp $PVH_CS_SEL, $_pa(1f)
/* 64-bit entry point. */
.code64
1:
+ /* Set base address in stack canary descriptor. */
+ mov $MSR_GS_BASE,%ecx
+ mov $_pa(canary), %eax
+ xor %edx, %edx
+ wrmsr
+
call xen_prepare_pvh
/* startup_64 expects boot_params in %rsi. */
@@ -107,6 +120,17 @@ ENTRY(pvh_start_xen)
#else /* CONFIG_X86_64 */
+ /* Set base address in stack canary descriptor. */
+ movl $_pa(gdt_start),%eax
+ movl $_pa(canary),%ecx
+ movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax)
+ shrl $16, %ecx
+ movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax)
+ movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax)
+
+ mov $PVH_CANARY_SEL,%eax
+ mov %eax,%gs
+
call mk_early_pgtbl_32
mov $_pa(initial_page_table), %eax
@@ -116,13 +140,13 @@ ENTRY(pvh_start_xen)
or $(X86_CR0_PG | X86_CR0_PE), %eax
mov %eax, %cr0
- ljmp $__BOOT_CS, $1f
+ ljmp $PVH_CS_SEL, $1f
1:
call xen_prepare_pvh
mov $_pa(pvh_bootparams), %esi
/* startup_32 doesn't expect paging and PAE to be on. */
- ljmp $__BOOT_CS, $_pa(2f)
+ ljmp $PVH_CS_SEL, $_pa(2f)
2:
mov %cr0, %eax
and $~X86_CR0_PG, %eax
@@ -131,7 +155,7 @@ ENTRY(pvh_start_xen)
and $~X86_CR4_PAE, %eax
mov %eax, %cr4
- ljmp $__BOOT_CS, $_pa(startup_32)
+ ljmp $PVH_CS_SEL, $_pa(startup_32)
#endif
END(pvh_start_xen)
@@ -143,16 +167,19 @@ gdt:
.word 0
gdt_start:
.quad 0x0000000000000000 /* NULL descriptor */
- .quad 0x0000000000000000 /* reserved */
#ifdef CONFIG_X86_64
- .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* __KERNEL_CS */
+ .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */
#else
- .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* __KERNEL_CS */
+ .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */
#endif
- .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* __KERNEL_DS */
+ .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
+ .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */
gdt_end:
- .balign 4
+ .balign 16
+canary:
+ .fill 48, 1, 0
+
early_stack:
.fill 256, 1, 0
early_stack_end:
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 6ddf0a30c60d..883024054b05 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -20,8 +20,6 @@
#define pcibios_assign_all_busses() 0
-extern struct pci_controller* pcibios_alloc_controller(void);
-
/* Assume some values. (We should revise them, if necessary) */
#define PCIBIOS_MIN_IO 0x2000
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index b7c7a60c7000..21f13e9aabe1 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -41,8 +41,8 @@
* pci_bus_add_device
*/
-struct pci_controller* pci_ctrl_head;
-struct pci_controller** pci_ctrl_tail = &pci_ctrl_head;
+static struct pci_controller *pci_ctrl_head;
+static struct pci_controller **pci_ctrl_tail = &pci_ctrl_head;
static int pci_bus_count;
@@ -80,50 +80,6 @@ pcibios_align_resource(void *data, const struct resource *res,
return start;
}
-int
-pcibios_enable_resources(struct pci_dev *dev, int mask)
-{
- u16 cmd, old_cmd;
- int idx;
- struct resource *r;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- old_cmd = cmd;
- for(idx=0; idx<6; idx++) {
- r = &dev->resource[idx];
- if (!r->start && r->end) {
- pr_err("PCI: Device %s not available because "
- "of resource collisions\n", pci_name(dev));
- return -EINVAL;
- }
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- if (dev->resource[PCI_ROM_RESOURCE].start)
- cmd |= PCI_COMMAND_MEMORY;
- if (cmd != old_cmd) {
- pr_info("PCI: Enabling device %s (%04x -> %04x)\n",
- pci_name(dev), old_cmd, cmd);
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- }
- return 0;
-}
-
-struct pci_controller * __init pcibios_alloc_controller(void)
-{
- struct pci_controller *pci_ctrl;
-
- pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl));
- memset(pci_ctrl, 0, sizeof(struct pci_controller));
-
- *pci_ctrl_tail = pci_ctrl;
- pci_ctrl_tail = &pci_ctrl->next;
-
- return pci_ctrl;
-}
-
static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
struct list_head *resources)
{
@@ -223,8 +179,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
for (idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end) {
- pr_err("PCI: Device %s not available because "
- "of resource collisions\n", pci_name(dev));
+ pci_err(dev, "can't enable device: resource collisions\n");
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
@@ -233,29 +188,13 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
- pr_info("PCI: Enabling device %s (%04x -> %04x)\n",
- pci_name(dev), old_cmd, cmd);
+ pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
-#ifdef CONFIG_PROC_FS
-
-/*
- * Return the index of the PCI controller for device pdev.
- */
-
-int
-pci_controller_num(struct pci_dev *dev)
-{
- struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
- return pci_ctrl->index;
-}
-
-#endif /* CONFIG_PROC_FS */
-
/*
* Platform support for /proc/bus/pci/X/Y mmap()s.
* -- paulus.
diff --git a/block/bio.c b/block/bio.c
index 5f7563598b1c..db9a40e9a136 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1997,6 +1997,24 @@ bad:
}
EXPORT_SYMBOL(bioset_init);
+/*
+ * Initialize and setup a new bio_set, based on the settings from
+ * another bio_set.
+ */
+int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
+{
+ int flags;
+
+ flags = 0;
+ if (src->bvec_pool.min_nr)
+ flags |= BIOSET_NEED_BVECS;
+ if (src->rescue_workqueue)
+ flags |= BIOSET_NEED_RESCUER;
+
+ return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
+}
+EXPORT_SYMBOL(bioset_init_from_src);
+
#ifdef CONFIG_BLK_CGROUP
/**
diff --git a/block/blk-core.c b/block/blk-core.c
index 3f56be15f17e..cf0ee764b908 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2220,10 +2220,10 @@ static inline int blk_partition_remap(struct bio *bio)
if (bio_check_eod(bio, part_nr_sects_read(p)))
goto out;
bio->bi_iter.bi_sector += p->start_sect;
- bio->bi_partno = 0;
trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
bio->bi_iter.bi_sector - p->start_sect);
}
+ bio->bi_partno = 0;
ret = 0;
out:
rcu_read_unlock();
diff --git a/block/blk-flush.c b/block/blk-flush.c
index f17170675917..ce41f666de3e 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -94,7 +94,7 @@ enum {
};
static bool blk_kick_flush(struct request_queue *q,
- struct blk_flush_queue *fq);
+ struct blk_flush_queue *fq, unsigned int flags);
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
{
@@ -169,9 +169,11 @@ static bool blk_flush_complete_seq(struct request *rq,
struct request_queue *q = rq->q;
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
bool queued = false, kicked;
+ unsigned int cmd_flags;
BUG_ON(rq->flush.seq & seq);
rq->flush.seq |= seq;
+ cmd_flags = rq->cmd_flags;
if (likely(!error))
seq = blk_flush_cur_seq(rq);
@@ -212,7 +214,7 @@ static bool blk_flush_complete_seq(struct request *rq,
BUG();
}
- kicked = blk_kick_flush(q, fq);
+ kicked = blk_kick_flush(q, fq, cmd_flags);
return kicked | queued;
}
@@ -281,6 +283,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
* @fq: flush queue
+ * @flags: cmd_flags of the original request
*
* Flush related states of @q have changed, consider issuing flush request.
* Please read the comment at the top of this file for more info.
@@ -291,7 +294,8 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
* RETURNS:
* %true if flush was issued, %false otherwise.
*/
-static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
+static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
+ unsigned int flags)
{
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
struct request *first_rq =
@@ -346,6 +350,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
}
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
+ flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;
diff --git a/block/ioprio.c b/block/ioprio.c
index 6f5d0b6625e3..f9821080c92c 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -61,15 +61,10 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
}
EXPORT_SYMBOL_GPL(set_task_ioprio);
-SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
+int ioprio_check_cap(int ioprio)
{
int class = IOPRIO_PRIO_CLASS(ioprio);
int data = IOPRIO_PRIO_DATA(ioprio);
- struct task_struct *p, *g;
- struct user_struct *user;
- struct pid *pgrp;
- kuid_t uid;
- int ret;
switch (class) {
case IOPRIO_CLASS_RT:
@@ -92,6 +87,21 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
return -EINVAL;
}
+ return 0;
+}
+
+SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
+{
+ struct task_struct *p, *g;
+ struct user_struct *user;
+ struct pid *pgrp;
+ kuid_t uid;
+ int ret;
+
+ ret = ioprio_check_cap(ioprio);
+ if (ret)
+ return ret;
+
ret = -ESRCH;
rcu_read_lock();
switch (which) {
diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
index e333583ab38c..60fb3df9897c 100644
--- a/block/partitions/cmdline.c
+++ b/block/partitions/cmdline.c
@@ -58,6 +58,62 @@ static int __init cmdline_parts_setup(char *s)
}
__setup("blkdevparts=", cmdline_parts_setup);
+static bool has_overlaps(sector_t from, sector_t size,
+ sector_t from2, sector_t size2)
+{
+ sector_t end = from + size;
+ sector_t end2 = from2 + size2;
+
+ if (from >= from2 && from < end2)
+ return true;
+
+ if (end > from2 && end <= end2)
+ return true;
+
+ if (from2 >= from && from2 < end)
+ return true;
+
+ if (end2 > from && end2 <= end)
+ return true;
+
+ return false;
+}
+
+static inline void overlaps_warns_header(void)
+{
+ pr_warn("Overlapping partitions are used in command line partitions.");
+ pr_warn("Don't use filesystems on overlapping partitions:");
+}
+
+static void cmdline_parts_verifier(int slot, struct parsed_partitions *state)
+{
+ int i;
+ bool header = true;
+
+ for (; slot < state->limit && state->parts[slot].has_info; slot++) {
+ for (i = slot+1; i < state->limit && state->parts[i].has_info;
+ i++) {
+ if (has_overlaps(state->parts[slot].from,
+ state->parts[slot].size,
+ state->parts[i].from,
+ state->parts[i].size)) {
+ if (header) {
+ header = false;
+ overlaps_warns_header();
+ }
+ pr_warn("%s[%llu,%llu] overlaps with "
+ "%s[%llu,%llu].",
+ state->parts[slot].info.volname,
+ (u64)state->parts[slot].from << 9,
+ (u64)state->parts[slot].size << 9,
+ state->parts[i].info.volname,
+ (u64)state->parts[i].from << 9,
+ (u64)state->parts[i].size << 9);
+ }
+ }
+ }
+}
+
/*
* Purpose: allocate cmdline partitions.
* Returns:
@@ -93,6 +149,7 @@ int cmdline_partition(struct parsed_partitions *state)
disk_size = get_capacity(state->bdev->bd_disk) << 9;
cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state);
+ cmdline_parts_verifier(1, state);
strlcat(state->pp_buf, "\n", PAGE_SIZE);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 516d7b36d6fb..b533eeb6139d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -547,6 +547,9 @@ config ACPI_CONFIGFS
if ARM64
source "drivers/acpi/arm64/Kconfig"
+
+config ACPI_PPTT
+ bool
endif
config TPS68470_PMIC_OPREGION
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 48e202752754..6d59aa109a91 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_ACPI_BGRT) += bgrt.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_acpi.o
obj-$(CONFIG_ACPI_SPCR_TABLE) += spcr.o
obj-$(CONFIG_ACPI_DEBUGGER_USER) += acpi_dbg.o
+obj-$(CONFIG_ACPI_PPTT) += pptt.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index e2235ed3e4be..b87252bf4571 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1978,19 +1978,8 @@ static ssize_t range_index_show(struct device *dev,
}
static DEVICE_ATTR_RO(range_index);
-static ssize_t ecc_unit_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct nd_region *nd_region = to_nd_region(dev);
- struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
-
- return sprintf(buf, "%d\n", nfit_spa->clear_err_unit);
-}
-static DEVICE_ATTR_RO(ecc_unit_size);
-
static struct attribute *acpi_nfit_region_attributes[] = {
&dev_attr_range_index.attr,
- &dev_attr_ecc_unit_size.attr,
NULL,
};
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 0da18bde6a16..7433035ded95 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -153,6 +153,7 @@ static struct pci_osc_bit_struct pci_osc_control_bit[] = {
{ OSC_PCI_EXPRESS_PME_CONTROL, "PME" },
{ OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
{ OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
+ { OSC_PCI_EXPRESS_LTR_CONTROL, "LTR" },
};
static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
@@ -472,9 +473,17 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
}
control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
- | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
| OSC_PCI_EXPRESS_PME_CONTROL;
+ if (IS_ENABLED(CONFIG_PCIEASPM))
+ control |= OSC_PCI_EXPRESS_LTR_CONTROL;
+
+ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+
+ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
+ control |= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
+
if (pci_aer_available()) {
if (aer_acpi_firmware_first())
dev_info(&device->dev,
@@ -900,11 +909,15 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
host_bridge = to_pci_host_bridge(bus->bridge);
if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
- host_bridge->native_hotplug = 0;
+ host_bridge->native_pcie_hotplug = 0;
+ if (!(root->osc_control_set & OSC_PCI_SHPC_NATIVE_HP_CONTROL))
+ host_bridge->native_shpc_hotplug = 0;
if (!(root->osc_control_set & OSC_PCI_EXPRESS_AER_CONTROL))
host_bridge->native_aer = 0;
if (!(root->osc_control_set & OSC_PCI_EXPRESS_PME_CONTROL))
host_bridge->native_pme = 0;
+ if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL))
+ host_bridge->native_ltr = 0;
pci_scan_child_bus(bus);
pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info,
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
new file mode 100644
index 000000000000..e5ea1974d1e3
--- /dev/null
+++ b/drivers/acpi/pptt.c
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pptt.c - parsing of Processor Properties Topology Table (PPTT)
+ *
+ * Copyright (C) 2018, ARM
+ *
+ * This file implements parsing of the Processor Properties Topology Table
+ * which is optionally used to describe the processor and cache topology.
+ * Due to the relative pointers used throughout the table, this doesn't
+ * leverage the existing subtable parsing in the kernel.
+ *
+ * The PPTT structure is an inverted tree, with each node potentially
+ * holding one or two inverted tree data structures describing
+ * the caches available at that level. Each cache structure optionally
+ * contains properties describing the cache at a given level which can be
+ * used to override hardware probed values.
+ */
+#define pr_fmt(fmt) "ACPI PPTT: " fmt
+
+#include <linux/acpi.h>
+#include <linux/cacheinfo.h>
+#include <acpi/processor.h>
+
+static struct acpi_subtable_header *fetch_pptt_subtable(struct acpi_table_header *table_hdr,
+ u32 pptt_ref)
+{
+ struct acpi_subtable_header *entry;
+
+ /* there isn't a subtable at reference 0 */
+ if (pptt_ref < sizeof(struct acpi_subtable_header))
+ return NULL;
+
+ if (pptt_ref + sizeof(struct acpi_subtable_header) > table_hdr->length)
+ return NULL;
+
+ entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, pptt_ref);
+
+ if (entry->length == 0)
+ return NULL;
+
+ if (pptt_ref + entry->length > table_hdr->length)
+ return NULL;
+
+ return entry;
+}
+
+static struct acpi_pptt_processor *fetch_pptt_node(struct acpi_table_header *table_hdr,
+ u32 pptt_ref)
+{
+ return (struct acpi_pptt_processor *)fetch_pptt_subtable(table_hdr, pptt_ref);
+}
+
+static struct acpi_pptt_cache *fetch_pptt_cache(struct acpi_table_header *table_hdr,
+ u32 pptt_ref)
+{
+ return (struct acpi_pptt_cache *)fetch_pptt_subtable(table_hdr, pptt_ref);
+}
+
+static struct acpi_subtable_header *acpi_get_pptt_resource(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *node,
+ int resource)
+{
+ u32 *ref;
+
+ if (resource >= node->number_of_priv_resources)
+ return NULL;
+
+ ref = ACPI_ADD_PTR(u32, node, sizeof(struct acpi_pptt_processor));
+ ref += resource;
+
+ return fetch_pptt_subtable(table_hdr, *ref);
+}
+
+static inline bool acpi_pptt_match_type(int table_type, int type)
+{
+ return ((table_type & ACPI_PPTT_MASK_CACHE_TYPE) == type ||
+ table_type & ACPI_PPTT_CACHE_TYPE_UNIFIED & type);
+}
+
+/**
+ * acpi_pptt_walk_cache() - Attempt to find the requested acpi_pptt_cache
+ * @table_hdr: Pointer to the head of the PPTT table
+ * @local_level: passed res reflects this cache level
+ * @res: cache resource in the PPTT we want to walk
+ * @found: returns a pointer to the requested level if found
+ * @level: the requested cache level
+ * @type: the requested cache type
+ *
+ * Attempt to find a given cache level, while counting the max number
+ * of cache levels for the cache node.
+ *
+ * Given a pptt resource, verify that it is a cache node, then walk
+ * down each level of caches, counting how many levels are found
+ * as well as checking the cache type (icache, dcache, unified). If a
+ * level & type match, then we set found, and continue the search.
+ * Once the entire cache branch has been walked return its max
+ * depth.
+ *
+ * Return: The cache structure and the level we terminated with.
+ */
+static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
+ int local_level,
+ struct acpi_subtable_header *res,
+ struct acpi_pptt_cache **found,
+ int level, int type)
+{
+ struct acpi_pptt_cache *cache;
+
+ if (res->type != ACPI_PPTT_TYPE_CACHE)
+ return 0;
+
+ cache = (struct acpi_pptt_cache *) res;
+ while (cache) {
+ local_level++;
+
+ if (local_level == level &&
+ cache->flags & ACPI_PPTT_CACHE_TYPE_VALID &&
+ acpi_pptt_match_type(cache->attributes, type)) {
+ if (*found != NULL && cache != *found)
+ pr_warn("Found duplicate cache level/type unable to determine uniqueness\n");
+
+ pr_debug("Found cache @ level %d\n", level);
+ *found = cache;
+ /*
+ * continue looking at this node's resource list
+ * to verify that we don't find a duplicate
+ * cache node.
+ */
+ }
+ cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache);
+ }
+ return local_level;
+}
+
+static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node,
+ int *starting_level, int level,
+ int type)
+{
+ struct acpi_subtable_header *res;
+ int number_of_levels = *starting_level;
+ int resource = 0;
+ struct acpi_pptt_cache *ret = NULL;
+ int local_level;
+
+ /* walk down from processor node */
+ while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) {
+ resource++;
+
+ local_level = acpi_pptt_walk_cache(table_hdr, *starting_level,
+ res, &ret, level, type);
+ /*
+ * we are looking for the max depth. Since its potentially
+ * possible for a given node to have resources with differing
+ * depths verify that the depth we have found is the largest.
+ */
+ if (number_of_levels < local_level)
+ number_of_levels = local_level;
+ }
+ if (number_of_levels > *starting_level)
+ *starting_level = number_of_levels;
+
+ return ret;
+}
+
+/**
+ * acpi_count_levels() - Given a PPTT table, and a cpu node, count the caches
+ * @table_hdr: Pointer to the head of the PPTT table
+ * @cpu_node: processor node we wish to count caches for
+ *
+ * Given a processor node containing a processing unit, walk into it and count
+ * how many levels exist solely for it, and then walk up each level until we hit
+ * the root node (ignore the package level because it may be possible to have
+ * caches that exist across packages). Count the number of cache levels that
+ * exist at each level on the way up.
+ *
+ * Return: Total number of levels found.
+ */
+static int acpi_count_levels(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node)
+{
+ int total_levels = 0;
+
+ do {
+ acpi_find_cache_level(table_hdr, cpu_node, &total_levels, 0, 0);
+ cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
+ } while (cpu_node);
+
+ return total_levels;
+}
+
+/**
+ * acpi_pptt_leaf_node() - Given a processor node, determine if its a leaf
+ * @table_hdr: Pointer to the head of the PPTT table
+ * @node: passed node is checked to see if its a leaf
+ *
+ * Determine if the *node parameter is a leaf node by iterating the
+ * PPTT table, looking for nodes which reference it.
+ *
+ * Return: 0 if we find a node referencing the passed node (or table error),
+ * or 1 if we don't.
+ */
+static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *node)
+{
+ struct acpi_subtable_header *entry;
+ unsigned long table_end;
+ u32 node_entry;
+ struct acpi_pptt_processor *cpu_node;
+ u32 proc_sz;
+
+ table_end = (unsigned long)table_hdr + table_hdr->length;
+ node_entry = ACPI_PTR_DIFF(node, table_hdr);
+ entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
+ sizeof(struct acpi_table_pptt));
+ proc_sz = sizeof(struct acpi_pptt_processor *);
+
+ while ((unsigned long)entry + proc_sz < table_end) {
+ cpu_node = (struct acpi_pptt_processor *)entry;
+ if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
+ cpu_node->parent == node_entry)
+ return 0;
+ if (entry->length == 0)
+ return 0;
+ entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
+ entry->length);
+
+ }
+ return 1;
+}
+
+/**
+ * acpi_find_processor_node() - Given a PPTT table find the requested processor
+ * @table_hdr: Pointer to the head of the PPTT table
+ * @acpi_cpu_id: cpu we are searching for
+ *
+ * Find the subtable entry describing the provided processor.
+ * This is done by iterating the PPTT table looking for processor nodes
+ * which have an acpi_processor_id that matches the acpi_cpu_id parameter
+ * passed into the function. If we find a node that matches this criteria
+ * we verify that its a leaf node in the topology rather than depending
+ * on the valid flag, which doesn't need to be set for leaf nodes.
+ *
+ * Return: NULL, or the processors acpi_pptt_processor*
+ */
+static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_header *table_hdr,
+ u32 acpi_cpu_id)
+{
+ struct acpi_subtable_header *entry;
+ unsigned long table_end;
+ struct acpi_pptt_processor *cpu_node;
+ u32 proc_sz;
+
+ table_end = (unsigned long)table_hdr + table_hdr->length;
+ entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
+ sizeof(struct acpi_table_pptt));
+ proc_sz = sizeof(struct acpi_pptt_processor *);
+
+ /* find the processor structure associated with this cpuid */
+ while ((unsigned long)entry + proc_sz < table_end) {
+ cpu_node = (struct acpi_pptt_processor *)entry;
+
+ if (entry->length == 0) {
+ pr_warn("Invalid zero length subtable\n");
+ break;
+ }
+ if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
+ acpi_cpu_id == cpu_node->acpi_processor_id &&
+ acpi_pptt_leaf_node(table_hdr, cpu_node)) {
+ return (struct acpi_pptt_processor *)entry;
+ }
+
+ entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
+ entry->length);
+ }
+
+ return NULL;
+}
+
+static int acpi_find_cache_levels(struct acpi_table_header *table_hdr,
+ u32 acpi_cpu_id)
+{
+ int number_of_levels = 0;
+ struct acpi_pptt_processor *cpu;
+
+ cpu = acpi_find_processor_node(table_hdr, acpi_cpu_id);
+ if (cpu)
+ number_of_levels = acpi_count_levels(table_hdr, cpu);
+
+ return number_of_levels;
+}
+
+static u8 acpi_cache_type(enum cache_type type)
+{
+ switch (type) {
+ case CACHE_TYPE_DATA:
+ pr_debug("Looking for data cache\n");
+ return ACPI_PPTT_CACHE_TYPE_DATA;
+ case CACHE_TYPE_INST:
+ pr_debug("Looking for instruction cache\n");
+ return ACPI_PPTT_CACHE_TYPE_INSTR;
+ default:
+ case CACHE_TYPE_UNIFIED:
+ pr_debug("Looking for unified cache\n");
+ /*
+ * It is important that ACPI_PPTT_CACHE_TYPE_UNIFIED
+ * contains the bit pattern that will match both
+ * ACPI unified bit patterns because we use it later
+ * to match both cases.
+ */
+ return ACPI_PPTT_CACHE_TYPE_UNIFIED;
+ }
+}
+
+static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *table_hdr,
+ u32 acpi_cpu_id,
+ enum cache_type type,
+ unsigned int level,
+ struct acpi_pptt_processor **node)
+{
+ int total_levels = 0;
+ struct acpi_pptt_cache *found = NULL;
+ struct acpi_pptt_processor *cpu_node;
+ u8 acpi_type = acpi_cache_type(type);
+
+ pr_debug("Looking for CPU %d's level %d cache type %d\n",
+ acpi_cpu_id, level, acpi_type);
+
+ cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id);
+
+ while (cpu_node && !found) {
+ found = acpi_find_cache_level(table_hdr, cpu_node,
+ &total_levels, level, acpi_type);
+ *node = cpu_node;
+ cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
+ }
+
+ return found;
+}
+
+/* total number of attributes checked by the properties code */
+#define PPTT_CHECKED_ATTRIBUTES 4
+
+/**
+ * update_cache_properties() - Update cacheinfo for the given processor
+ * @this_leaf: Kernel cache info structure being updated
+ * @found_cache: The PPTT node describing this cache instance
+ * @cpu_node: A unique reference to describe this cache instance
+ *
+ * The ACPI spec implies that the fields in the cache structures are used to
+ * extend and correct the information probed from the hardware. Lets only
+ * set fields that we determine are VALID.
+ *
+ * Return: nothing. Side effect of updating the global cacheinfo
+ */
+static void update_cache_properties(struct cacheinfo *this_leaf,
+ struct acpi_pptt_cache *found_cache,
+ struct acpi_pptt_processor *cpu_node)
+{
+ int valid_flags = 0;
+
+ this_leaf->fw_token = cpu_node;
+ if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID) {
+ this_leaf->size = found_cache->size;
+ valid_flags++;
+ }
+ if (found_cache->flags & ACPI_PPTT_LINE_SIZE_VALID) {
+ this_leaf->coherency_line_size = found_cache->line_size;
+ valid_flags++;
+ }
+ if (found_cache->flags & ACPI_PPTT_NUMBER_OF_SETS_VALID) {
+ this_leaf->number_of_sets = found_cache->number_of_sets;
+ valid_flags++;
+ }
+ if (found_cache->flags & ACPI_PPTT_ASSOCIATIVITY_VALID) {
+ this_leaf->ways_of_associativity = found_cache->associativity;
+ valid_flags++;
+ }
+ if (found_cache->flags & ACPI_PPTT_WRITE_POLICY_VALID) {
+ switch (found_cache->attributes & ACPI_PPTT_MASK_WRITE_POLICY) {
+ case ACPI_PPTT_CACHE_POLICY_WT:
+ this_leaf->attributes = CACHE_WRITE_THROUGH;
+ break;
+ case ACPI_PPTT_CACHE_POLICY_WB:
+ this_leaf->attributes = CACHE_WRITE_BACK;
+ break;
+ }
+ }
+ if (found_cache->flags & ACPI_PPTT_ALLOCATION_TYPE_VALID) {
+ switch (found_cache->attributes & ACPI_PPTT_MASK_ALLOCATION_TYPE) {
+ case ACPI_PPTT_CACHE_READ_ALLOCATE:
+ this_leaf->attributes |= CACHE_READ_ALLOCATE;
+ break;
+ case ACPI_PPTT_CACHE_WRITE_ALLOCATE:
+ this_leaf->attributes |= CACHE_WRITE_ALLOCATE;
+ break;
+ case ACPI_PPTT_CACHE_RW_ALLOCATE:
+ case ACPI_PPTT_CACHE_RW_ALLOCATE_ALT:
+ this_leaf->attributes |=
+ CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE;
+ break;
+ }
+ }
+ /*
+ * If the above flags are valid, and the cache type is NOCACHE
+ * update the cache type as well.
+ */
+ if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+ valid_flags == PPTT_CHECKED_ATTRIBUTES)
+ this_leaf->type = CACHE_TYPE_UNIFIED;
+}
+
+static void cache_setup_acpi_cpu(struct acpi_table_header *table,
+ unsigned int cpu)
+{
+ struct acpi_pptt_cache *found_cache;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ struct cacheinfo *this_leaf;
+ unsigned int index = 0;
+ struct acpi_pptt_processor *cpu_node = NULL;
+
+ while (index < get_cpu_cacheinfo(cpu)->num_leaves) {
+ this_leaf = this_cpu_ci->info_list + index;
+ found_cache = acpi_find_cache_node(table, acpi_cpu_id,
+ this_leaf->type,
+ this_leaf->level,
+ &cpu_node);
+ pr_debug("found = %p %p\n", found_cache, cpu_node);
+ if (found_cache)
+ update_cache_properties(this_leaf,
+ found_cache,
+ cpu_node);
+
+ index++;
+ }
+}
+
+/* Passing level values greater than this will result in search termination */
+#define PPTT_ABORT_PACKAGE 0xFF
+
+static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu,
+ int level, int flag)
+{
+ struct acpi_pptt_processor *prev_node;
+
+ while (cpu && level) {
+ if (cpu->flags & flag)
+ break;
+ pr_debug("level %d\n", level);
+ prev_node = fetch_pptt_node(table_hdr, cpu->parent);
+ if (prev_node == NULL)
+ break;
+ cpu = prev_node;
+ level--;
+ }
+ return cpu;
+}
+
+/**
+ * topology_get_acpi_cpu_tag() - Find a unique topology value for a feature
+ * @table: Pointer to the head of the PPTT table
+ * @cpu: Kernel logical cpu number
+ * @level: A level that terminates the search
+ * @flag: A flag which terminates the search
+ *
+ * Get a unique value given a cpu, and a topology level, that can be
+ * matched to determine which cpus share common topological features
+ * at that level.
+ *
+ * Return: Unique value, or -ENOENT if unable to locate cpu
+ */
+static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
+ unsigned int cpu, int level, int flag)
+{
+ struct acpi_pptt_processor *cpu_node;
+ u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (cpu_node) {
+ cpu_node = acpi_find_processor_package_id(table, cpu_node,
+ level, flag);
+ /* Only the first level has a guaranteed id */
+ if (level == 0)
+ return cpu_node->acpi_processor_id;
+ return ACPI_PTR_DIFF(cpu_node, table);
+ }
+ pr_warn_once("PPTT table found, but unable to locate core %d (%d)\n",
+ cpu, acpi_cpu_id);
+ return -ENOENT;
+}
+
+static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
+{
+ struct acpi_table_header *table;
+ acpi_status status;
+ int retval;
+
+ status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+ if (ACPI_FAILURE(status)) {
+ pr_warn_once("No PPTT table found, cpu topology may be inaccurate\n");
+ return -ENOENT;
+ }
+ retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
+ pr_debug("Topology Setup ACPI cpu %d, level %d ret = %d\n",
+ cpu, level, retval);
+ acpi_put_table(table);
+
+ return retval;
+}
+
+/**
+ * acpi_find_last_cache_level() - Determines the number of cache levels for a PE
+ * @cpu: Kernel logical cpu number
+ *
+ * Given a logical cpu number, returns the number of levels of cache represented
+ * in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0
+ * indicating we didn't find any cache levels.
+ *
+ * Return: Cache levels visible to this core.
+ */
+int acpi_find_last_cache_level(unsigned int cpu)
+{
+ u32 acpi_cpu_id;
+ struct acpi_table_header *table;
+ int number_of_levels = 0;
+ acpi_status status;
+
+ pr_debug("Cache Setup find last level cpu=%d\n", cpu);
+
+ acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+ if (ACPI_FAILURE(status)) {
+ pr_warn_once("No PPTT table found, cache topology may be inaccurate\n");
+ } else {
+ number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id);
+ acpi_put_table(table);
+ }
+ pr_debug("Cache Setup find last level level=%d\n", number_of_levels);
+
+ return number_of_levels;
+}
+
+/**
+ * cache_setup_acpi() - Override CPU cache topology with data from the PPTT
+ * @cpu: Kernel logical cpu number
+ *
+ * Updates the global cache info provided by cpu_get_cacheinfo()
+ * when there are valid properties in the acpi_pptt_cache nodes. A
+ * successful parse may not result in any updates if none of the
+ * cache levels have any valid flags set. Futher, a unique value is
+ * associated with each known CPU cache entry. This unique value
+ * can be used to determine whether caches are shared between cpus.
+ *
+ * Return: -ENOENT on failure to find table, or 0 on success
+ */
+int cache_setup_acpi(unsigned int cpu)
+{
+ struct acpi_table_header *table;
+ acpi_status status;
+
+ pr_debug("Cache Setup ACPI cpu %d\n", cpu);
+
+ status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+ if (ACPI_FAILURE(status)) {
+ pr_warn_once("No PPTT table found, cache topology may be inaccurate\n");
+ return -ENOENT;
+ }
+
+ cache_setup_acpi_cpu(table, cpu);
+ acpi_put_table(table);
+
+ return status;
+}
+
+/**
+ * find_acpi_cpu_topology() - Determine a unique topology value for a given cpu
+ * @cpu: Kernel logical cpu number
+ * @level: The topological level for which we would like a unique ID
+ *
+ * Determine a topology unique ID for each thread/core/cluster/mc_grouping
+ * /socket/etc. This ID can then be used to group peers, which will have
+ * matching ids.
+ *
+ * The search terminates when either the requested level is found or
+ * we reach a root node. Levels beyond the termination point will return the
+ * same unique ID. The unique id for level 0 is the acpi processor id. All
+ * other levels beyond this use a generated value to uniquely identify
+ * a topological feature.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
+ * Otherwise returns a value which represents a unique topological feature.
+ */
+int find_acpi_cpu_topology(unsigned int cpu, int level)
+{
+ return find_acpi_cpu_topology_tag(cpu, level, 0);
+}
+
+/**
+ * find_acpi_cpu_cache_topology() - Determine a unique cache topology value
+ * @cpu: Kernel logical cpu number
+ * @level: The cache level for which we would like a unique ID
+ *
+ * Determine a unique ID for each unified cache in the system
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
+ * Otherwise returns a value which represents a unique topological feature.
+ */
+int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
+{
+ struct acpi_table_header *table;
+ struct acpi_pptt_cache *found_cache;
+ acpi_status status;
+ u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ struct acpi_pptt_processor *cpu_node = NULL;
+ int ret = -1;
+
+ status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+ if (ACPI_FAILURE(status)) {
+ pr_warn_once("No PPTT table found, topology may be inaccurate\n");
+ return -ENOENT;
+ }
+
+ found_cache = acpi_find_cache_node(table, acpi_cpu_id,
+ CACHE_TYPE_UNIFIED,
+ level,
+ &cpu_node);
+ if (found_cache)
+ ret = ACPI_PTR_DIFF(cpu_node, table);
+
+ acpi_put_table(table);
+
+ return ret;
+}
+
+
+/**
+ * find_acpi_cpu_topology_package() - Determine a unique cpu package value
+ * @cpu: Kernel logical cpu number
+ *
+ * Determine a topology unique package ID for the given cpu.
+ * This ID can then be used to group peers, which will have matching ids.
+ *
+ * The search terminates when either a level is found with the PHYSICAL_PACKAGE
+ * flag set or we reach a root node.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
+ * Otherwise returns a value which represents the package for this cpu.
+ */
+int find_acpi_cpu_topology_package(unsigned int cpu)
+{
+ return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
+ ACPI_PPTT_PHYSICAL_PACKAGE);
+}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 4a3410aa6540..a3d012b08fc5 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -462,7 +462,7 @@ static const char * const table_sigs[] = {
ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT,
- ACPI_SIG_NFIT, ACPI_SIG_HMAT, NULL };
+ ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT, NULL };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ce5019db50fd..6a91d04351d9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -5054,6 +5054,18 @@ int ata_sas_port_init(struct ata_port *ap)
}
EXPORT_SYMBOL_GPL(ata_sas_port_init);
+int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
+{
+ return ata_tport_add(parent, ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_tport_add);
+
+void ata_sas_tport_delete(struct ata_port *ap)
+{
+ ata_tport_delete(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
+
/**
* ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
* @ap: SATA port to destroy
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 2c2ed9cf8796..57410f9c5d44 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -14,9 +14,6 @@ menuconfig AUXDISPLAY
If you say N, all options in this submenu will be skipped and disabled.
-config CHARLCD
- tristate "Character LCD core support" if COMPILE_TEST
-
if AUXDISPLAY
config HD44780
@@ -137,8 +134,8 @@ config CFAG12864B_RATE
config IMG_ASCII_LCD
tristate "Imagination Technologies ASCII LCD Display"
depends on HAS_IOMEM
- default y if MIPS_MALTA || MIPS_SEAD3
- select SYSCON
+ default y if MIPS_MALTA
+ select MFD_SYSCON
help
Enable this to support the simple ASCII LCD displays found on
development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
@@ -157,8 +154,6 @@ config HT16K33
Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
LED controller driver with keyscan.
-endif # AUXDISPLAY
-
config ARM_CHARLCD
bool "ARM Ltd. Character LCD Driver"
depends on PLAT_VERSATILE
@@ -169,7 +164,9 @@ config ARM_CHARLCD
line and the Linux version on the second line, but that's
still useful.
-config PANEL
+endif # AUXDISPLAY
+
+menuconfig PANEL
tristate "Parallel port LCD/Keypad Panel support"
depends on PARPORT
select CHARLCD
@@ -448,3 +445,6 @@ config PANEL_BOOT_MESSAGE
printf()-formatted message is valid with newline and escape codes.
endif # PANEL
+
+config CHARLCD
+ tristate "Character LCD core support" if COMPILE_TEST
diff --git a/drivers/auxdisplay/arm-charlcd.c b/drivers/auxdisplay/arm-charlcd.c
index b3176ee92b90..296fb30dfa00 100644
--- a/drivers/auxdisplay/arm-charlcd.c
+++ b/drivers/auxdisplay/arm-charlcd.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the on-board character LCD found on some ARM reference boards
* This is basically an Hitachi HD44780 LCD with a custom IP block to drive it
* http://en.wikipedia.org/wiki/HD44780_Character_LCD
* Currently it will just display the text "ARM Linux" and the linux version
*
- * License terms: GNU General Public License (GPL) version 2
* Author: Linus Walleij <triad@df.lth.se>
*/
#include <linux/init.h>
@@ -54,12 +54,14 @@
#define HD_BUSY_FLAG 0x80U
/**
+ * struct charlcd - Private data structure
* @dev: a pointer back to containing device
* @phybase: the offset to the controller in physical memory
* @physize: the size of the physical page
* @virtbase: the offset to the controller in virtual memory
* @irq: reserved interrupt number
* @complete: completion structure for the last LCD command
+ * @init_work: delayed work structure to initialize the display on boot
*/
struct charlcd {
struct device *dev;
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index 41ce4bd96813..6bd2f65e116a 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -1,26 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Filename: cfag12864b.c
* Version: 0.1.0
* Description: cfag12864b LCD driver
- * License: GPLv2
* Depends: ks0108
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/init.h>
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index a3874034e2ce..40c8a552a478 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -1,26 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Filename: cfag12864bfb.c
* Version: 0.1.0
* Description: cfag12864b LCD framebuffer driver
- * License: GPLv2
* Depends: cfag12864b
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/init.h>
@@ -37,7 +23,7 @@
#define CFAG12864BFB_NAME "cfag12864bfb"
-static struct fb_fix_screeninfo cfag12864bfb_fix = {
+static const struct fb_fix_screeninfo cfag12864bfb_fix = {
.id = "cfag12864b",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO10,
@@ -48,7 +34,7 @@ static struct fb_fix_screeninfo cfag12864bfb_fix = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo cfag12864bfb_var = {
+static const struct fb_var_screeninfo cfag12864bfb_var = {
.xres = CFAG12864B_WIDTH,
.yres = CFAG12864B_HEIGHT,
.xres_virtual = CFAG12864B_WIDTH,
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 642afd88870b..8673fc2b9eb8 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -1,16 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Character LCD driver for Linux
*
* Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
* Copyright (C) 2016-2017 Glider bvba
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/atomic.h>
+#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
@@ -192,10 +189,11 @@ static void charlcd_print(struct charlcd *lcd, char c)
c = lcd->char_conv[(unsigned char)c];
lcd->ops->write_data(lcd, c);
priv->addr.x++;
+
+ /* prevents the cursor from wrapping onto the next line */
+ if (priv->addr.x == lcd->bwidth)
+ charlcd_gotoxy(lcd);
}
- /* prevents the cursor from wrapping onto the next line */
- if (priv->addr.x == lcd->bwidth)
- charlcd_gotoxy(lcd);
}
static void charlcd_clear_fast(struct charlcd *lcd)
@@ -293,6 +291,79 @@ static int charlcd_init_display(struct charlcd *lcd)
}
/*
+ * Parses an unsigned integer from a string, until a non-digit character
+ * is found. The empty string is not accepted. No overflow checks are done.
+ *
+ * Returns whether the parsing was successful. Only in that case
+ * the output parameters are written to.
+ *
+ * TODO: If the kernel adds an inplace version of kstrtoul(), this function
+ * could be easily replaced by that.
+ */
+static bool parse_n(const char *s, unsigned long *res, const char **next_s)
+{
+ if (!isdigit(*s))
+ return false;
+
+ *res = 0;
+ while (isdigit(*s)) {
+ *res = *res * 10 + (*s - '0');
+ ++s;
+ }
+
+ *next_s = s;
+ return true;
+}
+
+/*
+ * Parses a movement command of the form "(.*);", where the group can be
+ * any number of subcommands of the form "(x|y)[0-9]+".
+ *
+ * Returns whether the command is valid. The position arguments are
+ * only written if the parsing was successful.
+ *
+ * For instance:
+ * - ";" returns (<original x>, <original y>).
+ * - "x1;" returns (1, <original y>).
+ * - "y2x1;" returns (1, 2).
+ * - "x12y34x56;" returns (56, 34).
+ * - "" fails.
+ * - "x" fails.
+ * - "x;" fails.
+ * - "x1" fails.
+ * - "xy12;" fails.
+ * - "x12yy12;" fails.
+ * - "xx" fails.
+ */
+static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
+{
+ unsigned long new_x = *x;
+ unsigned long new_y = *y;
+
+ for (;;) {
+ if (!*s)
+ return false;
+
+ if (*s == ';')
+ break;
+
+ if (*s == 'x') {
+ if (!parse_n(s + 1, &new_x, &s))
+ return false;
+ } else if (*s == 'y') {
+ if (!parse_n(s + 1, &new_y, &s))
+ return false;
+ } else {
+ return false;
+ }
+ }
+
+ *x = new_x;
+ *y = new_y;
+ return true;
+}
+
+/*
* These are the file operation function for user access to /dev/lcd
* This function can also be called from inside the kernel, by
* setting file and ppos to NULL.
@@ -362,6 +433,7 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
break;
case 'N': /* Two Lines */
priv->flags |= LCD_FLAG_N;
+ processed = 1;
break;
case 'l': /* Shift Cursor Left */
if (priv->addr.x > 0) {
@@ -441,9 +513,9 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
shift ^= 4;
if (*esc >= '0' && *esc <= '9') {
value |= (*esc - '0') << shift;
- } else if (*esc >= 'A' && *esc <= 'Z') {
+ } else if (*esc >= 'A' && *esc <= 'F') {
value |= (*esc - 'A' + 10) << shift;
- } else if (*esc >= 'a' && *esc <= 'z') {
+ } else if (*esc >= 'a' && *esc <= 'f') {
value |= (*esc - 'a' + 10) << shift;
} else {
esc++;
@@ -469,24 +541,11 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
}
case 'x': /* gotoxy : LxXXX[yYYY]; */
case 'y': /* gotoxy : LyYYY[xXXX]; */
- if (!strchr(esc, ';'))
- break;
-
- while (*esc) {
- if (*esc == 'x') {
- esc++;
- if (kstrtoul(esc, 10, &priv->addr.x) < 0)
- break;
- } else if (*esc == 'y') {
- esc++;
- if (kstrtoul(esc, 10, &priv->addr.y) < 0)
- break;
- } else {
- break;
- }
- }
+ /* If the command is valid, move to the new address */
+ if (parse_xy(esc, &priv->addr.x, &priv->addr.y))
+ charlcd_gotoxy(lcd);
- charlcd_gotoxy(lcd);
+ /* Regardless of its validity, mark as processed */
processed = 1;
break;
}
@@ -527,7 +586,7 @@ static void charlcd_write_char(struct charlcd *lcd, char c)
if ((c != '\n') && priv->esc_seq.len >= 0) {
/* yes, let's add this char to the buffer */
priv->esc_seq.buf[priv->esc_seq.len++] = c;
- priv->esc_seq.buf[priv->esc_seq.len] = 0;
+ priv->esc_seq.buf[priv->esc_seq.len] = '\0';
} else {
/* aborts any previous escape sequence */
priv->esc_seq.len = -1;
@@ -536,7 +595,7 @@ static void charlcd_write_char(struct charlcd *lcd, char c)
case LCD_ESCAPE_CHAR:
/* start of an escape sequence */
priv->esc_seq.len = 0;
- priv->esc_seq.buf[priv->esc_seq.len] = 0;
+ priv->esc_seq.buf[priv->esc_seq.len] = '\0';
break;
case '\b':
/* go back one char and clear it */
@@ -555,7 +614,7 @@ static void charlcd_write_char(struct charlcd *lcd, char c)
/* back one char again */
lcd->ops->write_cmd(lcd, LCD_CMD_SHIFT);
break;
- case '\014':
+ case '\f':
/* quickly clear the display */
charlcd_clear_fast(lcd);
break;
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 036eec404289..78d8f1986fec 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* HD44780 Character LCD driver for Linux
*
* Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
* Copyright (C) 2016-2017 Glider bvba
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/delay.h>
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index fbfa5b4cc567..a43276c76fc6 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* HT16K33 driver
*
* Author: Robin van der Gracht <robin@protonic.nl>
*
* Copyright: (C) 2016 Protonic Holland.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 816de9eaac26..abfe3fa9e6f4 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -1,26 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Filename: ks0108.c
* Version: 0.1.0
* Description: ks0108 LCD Controller driver
- * License: GPLv2
* Depends: parport
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index ec5e8800f8ad..3b25a643058c 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Front panel driver for Linux
* Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
* Copyright (C) 2016-2017 Glider bvba
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* This code drives an LCD module (/dev/lcd), and a keypad (/dev/keypad)
* connected to a parallel printer port.
*
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index edf726267282..2880e2ab01f5 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -32,50 +32,10 @@ struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
}
#ifdef CONFIG_OF
-static int cache_setup_of_node(unsigned int cpu)
-{
- struct device_node *np;
- struct cacheinfo *this_leaf;
- struct device *cpu_dev = get_cpu_device(cpu);
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- unsigned int index = 0;
-
- /* skip if of_node is already populated */
- if (this_cpu_ci->info_list->of_node)
- return 0;
-
- if (!cpu_dev) {
- pr_err("No cpu device for CPU %d\n", cpu);
- return -ENODEV;
- }
- np = cpu_dev->of_node;
- if (!np) {
- pr_err("Failed to find cpu%d device node\n", cpu);
- return -ENOENT;
- }
-
- while (index < cache_leaves(cpu)) {
- this_leaf = this_cpu_ci->info_list + index;
- if (this_leaf->level != 1)
- np = of_find_next_cache_node(np);
- else
- np = of_node_get(np);/* cpu node itself */
- if (!np)
- break;
- this_leaf->of_node = np;
- index++;
- }
-
- if (index != cache_leaves(cpu)) /* not all OF nodes populated */
- return -ENOENT;
-
- return 0;
-}
-
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
- return sib_leaf->of_node == this_leaf->of_node;
+ return sib_leaf->fw_token == this_leaf->fw_token;
}
/* OF properties to query for a given cache type */
@@ -111,7 +71,7 @@ static inline int get_cacheinfo_idx(enum cache_type type)
return type;
}
-static void cache_size(struct cacheinfo *this_leaf)
+static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
const __be32 *cache_size;
@@ -120,13 +80,14 @@ static void cache_size(struct cacheinfo *this_leaf)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
- cache_size = of_get_property(this_leaf->of_node, propname, NULL);
+ cache_size = of_get_property(np, propname, NULL);
if (cache_size)
this_leaf->size = of_read_number(cache_size, 1);
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
-static void cache_get_line_size(struct cacheinfo *this_leaf)
+static void cache_get_line_size(struct cacheinfo *this_leaf,
+ struct device_node *np)
{
const __be32 *line_size;
int i, lim, ct_idx;
@@ -138,7 +99,7 @@ static void cache_get_line_size(struct cacheinfo *this_leaf)
const char *propname;
propname = cache_type_info[ct_idx].line_size_props[i];
- line_size = of_get_property(this_leaf->of_node, propname, NULL);
+ line_size = of_get_property(np, propname, NULL);
if (line_size)
break;
}
@@ -147,7 +108,7 @@ static void cache_get_line_size(struct cacheinfo *this_leaf)
this_leaf->coherency_line_size = of_read_number(line_size, 1);
}
-static void cache_nr_sets(struct cacheinfo *this_leaf)
+static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
const __be32 *nr_sets;
@@ -156,7 +117,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
- nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
+ nr_sets = of_get_property(np, propname, NULL);
if (nr_sets)
this_leaf->number_of_sets = of_read_number(nr_sets, 1);
}
@@ -175,41 +136,77 @@ static void cache_associativity(struct cacheinfo *this_leaf)
this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
}
-static bool cache_node_is_unified(struct cacheinfo *this_leaf)
+static bool cache_node_is_unified(struct cacheinfo *this_leaf,
+ struct device_node *np)
{
- return of_property_read_bool(this_leaf->of_node, "cache-unified");
+ return of_property_read_bool(np, "cache-unified");
}
-static void cache_of_override_properties(unsigned int cpu)
+static void cache_of_set_props(struct cacheinfo *this_leaf,
+ struct device_node *np)
{
- int index;
+ /*
+ * init_cache_level must setup the cache level correctly
+ * overriding the architecturally specified levels, so
+ * if type is NONE at this stage, it should be unified
+ */
+ if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+ cache_node_is_unified(this_leaf, np))
+ this_leaf->type = CACHE_TYPE_UNIFIED;
+ cache_size(this_leaf, np);
+ cache_get_line_size(this_leaf, np);
+ cache_nr_sets(this_leaf, np);
+ cache_associativity(this_leaf);
+}
+
+static int cache_setup_of_node(unsigned int cpu)
+{
+ struct device_node *np;
struct cacheinfo *this_leaf;
+ struct device *cpu_dev = get_cpu_device(cpu);
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ unsigned int index = 0;
- for (index = 0; index < cache_leaves(cpu); index++) {
+ /* skip if fw_token is already populated */
+ if (this_cpu_ci->info_list->fw_token) {
+ return 0;
+ }
+
+ if (!cpu_dev) {
+ pr_err("No cpu device for CPU %d\n", cpu);
+ return -ENODEV;
+ }
+ np = cpu_dev->of_node;
+ if (!np) {
+ pr_err("Failed to find cpu%d device node\n", cpu);
+ return -ENOENT;
+ }
+
+ while (index < cache_leaves(cpu)) {
this_leaf = this_cpu_ci->info_list + index;
- /*
- * init_cache_level must setup the cache level correctly
- * overriding the architecturally specified levels, so
- * if type is NONE at this stage, it should be unified
- */
- if (this_leaf->type == CACHE_TYPE_NOCACHE &&
- cache_node_is_unified(this_leaf))
- this_leaf->type = CACHE_TYPE_UNIFIED;
- cache_size(this_leaf);
- cache_get_line_size(this_leaf);
- cache_nr_sets(this_leaf);
- cache_associativity(this_leaf);
+ if (this_leaf->level != 1)
+ np = of_find_next_cache_node(np);
+ else
+ np = of_node_get(np);/* cpu node itself */
+ if (!np)
+ break;
+ cache_of_set_props(this_leaf, np);
+ this_leaf->fw_token = np;
+ index++;
}
+
+ if (index != cache_leaves(cpu)) /* not all OF nodes populated */
+ return -ENOENT;
+
+ return 0;
}
#else
-static void cache_of_override_properties(unsigned int cpu) { }
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
/*
- * For non-DT systems, assume unique level 1 cache, system-wide
+ * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
* shared caches for all other levels. This will be used only if
* arch specific code has not populated shared_cpu_map
*/
@@ -217,6 +214,11 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
}
#endif
+int __weak cache_setup_acpi(unsigned int cpu)
+{
+ return -ENOTSUPP;
+}
+
static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -230,8 +232,8 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (of_have_populated_dt())
ret = cache_setup_of_node(cpu);
else if (!acpi_disabled)
- /* No cache property/hierarchy support yet in ACPI */
- ret = -ENOTSUPP;
+ ret = cache_setup_acpi(cpu);
+
if (ret)
return ret;
@@ -282,16 +284,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
}
- of_node_put(this_leaf->of_node);
+ if (of_have_populated_dt())
+ of_node_put(this_leaf->fw_token);
}
}
-static void cache_override_properties(unsigned int cpu)
-{
- if (of_have_populated_dt())
- return cache_of_override_properties(cpu);
-}
-
static void free_cache_attributes(unsigned int cpu)
{
if (!per_cpu_cacheinfo(cpu))
@@ -325,12 +322,17 @@ static int detect_cache_attributes(unsigned int cpu)
if (per_cpu_cacheinfo(cpu) == NULL)
return -ENOMEM;
+ /*
+ * populate_cache_leaves() may completely setup the cache leaves and
+ * shared_cpu_map or it may leave it partially setup.
+ */
ret = populate_cache_leaves(cpu);
if (ret)
goto free_ci;
/*
- * For systems using DT for cache hierarchy, of_node and shared_cpu_map
- * will be set up here only if they are not populated already
+ * For systems using DT for cache hierarchy, fw_token
+ * and shared_cpu_map will be set up here only if they are
+ * not populated already
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
@@ -338,7 +340,6 @@ static int detect_cache_attributes(unsigned int cpu)
goto free_ci;
}
- cache_override_properties(cpu);
return 0;
free_ci:
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4838b0dbaad3..21e6d1b3b393 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -76,6 +76,8 @@
#include <linux/miscdevice.h>
#include <linux/falloc.h>
#include <linux/uio.h>
+#include <linux/ioprio.h>
+
#include "loop.h"
#include <linux/uaccess.h>
@@ -559,6 +561,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
cmd->iocb.ki_filp = file;
cmd->iocb.ki_complete = lo_rw_aio_complete;
cmd->iocb.ki_flags = IOCB_DIRECT;
+ cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
if (cmd->css)
kthread_associate_blkcg(cmd->css);
@@ -644,6 +647,36 @@ static void loop_reread_partitions(struct loop_device *lo,
__func__, lo->lo_number, lo->lo_file_name, rc);
}
+static inline int is_loop_device(struct file *file)
+{
+ struct inode *i = file->f_mapping->host;
+
+ return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
+}
+
+static int loop_validate_file(struct file *file, struct block_device *bdev)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct file *f = file;
+
+ /* Avoid recursion */
+ while (is_loop_device(f)) {
+ struct loop_device *l;
+
+ if (f->f_mapping->host->i_bdev == bdev)
+ return -EBADF;
+
+ l = f->f_mapping->host->i_bdev->bd_disk->private_data;
+ if (l->lo_state == Lo_unbound) {
+ return -EINVAL;
+ }
+ f = l->lo_backing_file;
+ }
+ if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
+ return -EINVAL;
+ return 0;
+}
+
/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
@@ -673,14 +706,15 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!file)
goto out;
+ error = loop_validate_file(file, bdev);
+ if (error)
+ goto out_putf;
+
inode = file->f_mapping->host;
old_file = lo->lo_backing_file;
error = -EINVAL;
- if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
- goto out_putf;
-
/* size of the new backing store needs to be the same */
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_putf;
@@ -706,13 +740,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
return error;
}
-static inline int is_loop_device(struct file *file)
-{
- struct inode *i = file->f_mapping->host;
-
- return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
-}
-
/* loop sysfs attributes */
static ssize_t loop_attr_show(struct device *dev, char *page,
@@ -878,7 +905,7 @@ static int loop_prepare_queue(struct loop_device *lo)
static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct block_device *bdev, unsigned int arg)
{
- struct file *file, *f;
+ struct file *file;
struct inode *inode;
struct address_space *mapping;
int lo_flags = 0;
@@ -897,29 +924,13 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (lo->lo_state != Lo_unbound)
goto out_putf;
- /* Avoid recursion */
- f = file;
- while (is_loop_device(f)) {
- struct loop_device *l;
-
- if (f->f_mapping->host->i_bdev == bdev)
- goto out_putf;
-
- l = f->f_mapping->host->i_bdev->bd_disk->private_data;
- if (l->lo_state == Lo_unbound) {
- error = -EINVAL;
- goto out_putf;
- }
- f = l->lo_backing_file;
- }
+ error = loop_validate_file(file, bdev);
+ if (error)
+ goto out_putf;
mapping = file->f_mapping;
inode = mapping->host;
- error = -EINVAL;
- if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
- goto out_putf;
-
if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
!file->f_op->write_iter)
lo_flags |= LO_FLAGS_READ_ONLY;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3ed1ef8ee528..3b7083b8ecbb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -246,6 +246,7 @@ static void nbd_size_update(struct nbd_device *nbd)
if (config->flags & NBD_FLAG_SEND_TRIM) {
nbd->disk->queue->limits.discard_granularity = config->blksize;
+ nbd->disk->queue->limits.discard_alignment = config->blksize;
blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
}
blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
@@ -275,7 +276,7 @@ static void nbd_complete_rq(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
- dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", cmd,
+ dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
cmd->status ? "failed" : "done");
blk_mq_end_request(req, cmd->status);
@@ -482,7 +483,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
memcpy(request.handle, &tag, sizeof(tag));
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
- cmd, nbdcmd_to_ascii(type),
+ req, nbdcmd_to_ascii(type),
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
@@ -518,7 +519,7 @@ send_pages:
int flags = is_last ? 0 : MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
- cmd, bvec.bv_len);
+ req, bvec.bv_len);
iov_iter_bvec(&from, ITER_BVEC | WRITE,
&bvec, 1, bvec.bv_len);
if (skip) {
@@ -610,7 +611,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
return cmd;
}
- dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
+ dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
if (rq_data_dir(req) != WRITE) {
struct req_iterator iter;
struct bio_vec bvec;
@@ -637,7 +638,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
return ERR_PTR(-EIO);
}
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
- cmd, bvec.bv_len);
+ req, bvec.bv_len);
}
} else {
/* See the comment in nbd_queue_rq. */
@@ -1062,6 +1063,7 @@ static void nbd_config_put(struct nbd_device *nbd)
nbd->tag_set.timeout = 0;
nbd->disk->queue->limits.discard_granularity = 0;
+ nbd->disk->queue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
@@ -1516,6 +1518,7 @@ static int nbd_dev_add(int index)
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 0;
+ disk->queue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(disk->queue, 0);
blk_queue_max_segment_size(disk->queue, UINT_MAX);
blk_queue_max_segments(disk->queue, USHRT_MAX);
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index ac3a31d433b2..635235759a0a 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -13,7 +13,7 @@ config ZRAM
It has several use cases, for example: /tmp storage, use as swap
disks and maybe many more.
- See zram.txt for more information.
+ See Documentation/blockdev/zram.txt for more information.
config ZRAM_WRITEBACK
bool "Write back incompressible page to backing device"
@@ -25,4 +25,14 @@ config ZRAM_WRITEBACK
For this feature, admin should set up backing device via
/sys/block/zramX/backing_dev.
- See zram.txt for more infomration.
+ See Documentation/blockdev/zram.txt for more information.
+
+config ZRAM_MEMORY_TRACKING
+ bool "Track zRam block status"
+ depends on ZRAM && DEBUG_FS
+ help
+ With this feature, admin can track the state of allocated blocks
+ of zRAM. Admin could see the information via
+ /sys/kernel/debug/zram/zramX/block_state.
+
+ See Documentation/blockdev/zram.txt for more information.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 0f3fadd71230..da51293e7c03 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -31,6 +31,7 @@
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/sysfs.h>
+#include <linux/debugfs.h>
#include <linux/cpuhotplug.h>
#include "zram_drv.h"
@@ -52,11 +53,28 @@ static size_t huge_class_size;
static void zram_free_page(struct zram *zram, size_t index);
+static void zram_slot_lock(struct zram *zram, u32 index)
+{
+ bit_spin_lock(ZRAM_LOCK, &zram->table[index].value);
+}
+
+static void zram_slot_unlock(struct zram *zram, u32 index)
+{
+ bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value);
+}
+
static inline bool init_done(struct zram *zram)
{
return zram->disksize;
}
+static inline bool zram_allocated(struct zram *zram, u32 index)
+{
+
+ return (zram->table[index].value >> (ZRAM_FLAG_SHIFT + 1)) ||
+ zram->table[index].handle;
+}
+
static inline struct zram *dev_to_zram(struct device *dev)
{
return (struct zram *)dev_to_disk(dev)->private_data;
@@ -73,7 +91,7 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
}
/* flag operations require table entry bit_spin_lock() being held */
-static int zram_test_flag(struct zram *zram, u32 index,
+static bool zram_test_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
return zram->table[index].value & BIT(flag);
@@ -600,6 +618,114 @@ static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
static void zram_wb_clear(struct zram *zram, u32 index) {}
#endif
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+
+static struct dentry *zram_debugfs_root;
+
+static void zram_debugfs_create(void)
+{
+ zram_debugfs_root = debugfs_create_dir("zram", NULL);
+}
+
+static void zram_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(zram_debugfs_root);
+}
+
+static void zram_accessed(struct zram *zram, u32 index)
+{
+ zram->table[index].ac_time = ktime_get_boottime();
+}
+
+static void zram_reset_access(struct zram *zram, u32 index)
+{
+ zram->table[index].ac_time = 0;
+}
+
+static ssize_t read_block_state(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *kbuf;
+ ssize_t index, written = 0;
+ struct zram *zram = file->private_data;
+ unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+ struct timespec64 ts;
+
+ kbuf = kvmalloc(count, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ kvfree(kbuf);
+ return -EINVAL;
+ }
+
+ for (index = *ppos; index < nr_pages; index++) {
+ int copied;
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ ts = ktime_to_timespec64(zram->table[index].ac_time);
+ copied = snprintf(kbuf + written, count,
+ "%12zd %12lld.%06lu %c%c%c\n",
+ index, (s64)ts.tv_sec,
+ ts.tv_nsec / NSEC_PER_USEC,
+ zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
+ zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
+ zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.');
+
+ if (count < copied) {
+ zram_slot_unlock(zram, index);
+ break;
+ }
+ written += copied;
+ count -= copied;
+next:
+ zram_slot_unlock(zram, index);
+ *ppos += 1;
+ }
+
+ up_read(&zram->init_lock);
+ if (copy_to_user(buf, kbuf, written))
+ written = -EFAULT;
+ kvfree(kbuf);
+
+ return written;
+}
+
+static const struct file_operations proc_zram_block_state_op = {
+ .open = simple_open,
+ .read = read_block_state,
+ .llseek = default_llseek,
+};
+
+static void zram_debugfs_register(struct zram *zram)
+{
+ if (!zram_debugfs_root)
+ return;
+
+ zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
+ zram_debugfs_root);
+ debugfs_create_file("block_state", 0400, zram->debugfs_dir,
+ zram, &proc_zram_block_state_op);
+}
+
+static void zram_debugfs_unregister(struct zram *zram)
+{
+ debugfs_remove_recursive(zram->debugfs_dir);
+}
+#else
+static void zram_debugfs_create(void) {};
+static void zram_debugfs_destroy(void) {};
+static void zram_accessed(struct zram *zram, u32 index) {};
+static void zram_reset_access(struct zram *zram, u32 index) {};
+static void zram_debugfs_register(struct zram *zram) {};
+static void zram_debugfs_unregister(struct zram *zram) {};
+#endif
/*
* We switched to per-cpu streams and this attr is not needed anymore.
@@ -719,14 +845,15 @@ static ssize_t mm_stat_show(struct device *dev,
max_used = atomic_long_read(&zram->stats.max_used_pages);
ret = scnprintf(buf, PAGE_SIZE,
- "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
+ "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
orig_size << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.compr_data_size),
mem_used << PAGE_SHIFT,
zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.same_pages),
- pool_stats.pages_compacted);
+ pool_stats.pages_compacted,
+ (u64)atomic64_read(&zram->stats.huge_pages));
up_read(&zram->init_lock);
return ret;
@@ -753,16 +880,6 @@ static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
static DEVICE_ATTR_RO(debug_stat);
-static void zram_slot_lock(struct zram *zram, u32 index)
-{
- bit_spin_lock(ZRAM_ACCESS, &zram->table[index].value);
-}
-
-static void zram_slot_unlock(struct zram *zram, u32 index)
-{
- bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
-}
-
static void zram_meta_free(struct zram *zram, u64 disksize)
{
size_t num_pages = disksize >> PAGE_SHIFT;
@@ -805,6 +922,13 @@ static void zram_free_page(struct zram *zram, size_t index)
{
unsigned long handle;
+ zram_reset_access(zram, index);
+
+ if (zram_test_flag(zram, index, ZRAM_HUGE)) {
+ zram_clear_flag(zram, index, ZRAM_HUGE);
+ atomic64_dec(&zram->stats.huge_pages);
+ }
+
if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
zram_wb_clear(zram, index);
atomic64_dec(&zram->stats.pages_stored);
@@ -973,6 +1097,7 @@ compress_again:
}
if (unlikely(comp_len >= huge_class_size)) {
+ comp_len = PAGE_SIZE;
if (zram_wb_enabled(zram) && allow_wb) {
zcomp_stream_put(zram->comp);
ret = write_to_bdev(zram, bvec, index, bio, &element);
@@ -984,7 +1109,6 @@ compress_again:
allow_wb = false;
goto compress_again;
}
- comp_len = PAGE_SIZE;
}
/*
@@ -1046,6 +1170,11 @@ out:
zram_slot_lock(zram, index);
zram_free_page(zram, index);
+ if (comp_len == PAGE_SIZE) {
+ zram_set_flag(zram, index, ZRAM_HUGE);
+ atomic64_inc(&zram->stats.huge_pages);
+ }
+
if (flags) {
zram_set_flag(zram, index, flags);
zram_set_element(zram, index, element);
@@ -1166,6 +1295,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time);
+ zram_slot_lock(zram, index);
+ zram_accessed(zram, index);
+ zram_slot_unlock(zram, index);
+
if (unlikely(ret < 0)) {
if (!is_write)
atomic64_inc(&zram->stats.failed_reads);
@@ -1577,6 +1710,7 @@ static int zram_add(void)
}
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
+ zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
@@ -1610,6 +1744,7 @@ static int zram_remove(struct zram *zram)
zram->claim = true;
mutex_unlock(&bdev->bd_mutex);
+ zram_debugfs_unregister(zram);
/*
* Remove sysfs first, so no one will perform a disksize
* store while we destroy the devices. This also helps during
@@ -1712,6 +1847,7 @@ static void destroy_devices(void)
{
class_unregister(&zram_control_class);
idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
+ zram_debugfs_destroy();
idr_destroy(&zram_index_idr);
unregister_blkdev(zram_major, "zram");
cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
@@ -1733,6 +1869,7 @@ static int __init zram_init(void)
return ret;
}
+ zram_debugfs_create();
zram_major = register_blkdev(0, "zram");
if (zram_major <= 0) {
pr_err("Unable to get major number\n");
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 008861220723..72c8584b6dff 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -43,10 +43,11 @@
/* Flags for zram pages (table[page_no].value) */
enum zram_pageflags {
- /* Page consists the same element */
- ZRAM_SAME = ZRAM_FLAG_SHIFT,
- ZRAM_ACCESS, /* page is now accessed */
+ /* zram slot is locked */
+ ZRAM_LOCK = ZRAM_FLAG_SHIFT,
+ ZRAM_SAME, /* Page consists the same element */
ZRAM_WB, /* page is stored on backing_device */
+ ZRAM_HUGE, /* Incompressible page */
__NR_ZRAM_PAGEFLAGS,
};
@@ -60,6 +61,9 @@ struct zram_table_entry {
unsigned long element;
};
unsigned long value;
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+ ktime_t ac_time;
+#endif
};
struct zram_stats {
@@ -71,6 +75,7 @@ struct zram_stats {
atomic64_t invalid_io; /* non-page-aligned I/O requests */
atomic64_t notify_free; /* no. of swap slot free notifications */
atomic64_t same_pages; /* no. of same element filled pages */
+ atomic64_t huge_pages; /* no. of huge pages */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
atomic64_t writestall; /* no. of write slow paths */
@@ -107,5 +112,8 @@ struct zram {
unsigned long nr_pages;
spinlock_t bitmap_lock;
#endif
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+ struct dentry *debugfs_dir;
+#endif
};
#endif
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e538061eadcb..410c30c42120 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -540,5 +540,17 @@ source "drivers/s390/char/Kconfig"
source "drivers/char/xillybus/Kconfig"
+config ADI
+ tristate "SPARC Privileged ADI driver"
+ depends on SPARC64
+ default m
+ help
+ SPARC M7 and newer processors utilize ADI (Application Data
+ Integrity) to version and protect memory. This driver provides
+ read/write access to the ADI versions for privileged processes.
+ This feature is also known as MCD (Memory Corruption Detection)
+ and SSM (Silicon Secured Memory). Intended consumers of this
+ driver include crash and makedumpfile.
+
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index c97c768cd1dd..b8d42b4e979b 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -57,3 +57,4 @@ js-rtc-y = rtc.o
obj-$(CONFIG_XILLYBUS) += xillybus/
obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
+obj-$(CONFIG_ADI) += adi.o
diff --git a/drivers/char/adi.c b/drivers/char/adi.c
new file mode 100644
index 000000000000..751d7cc0da1b
--- /dev/null
+++ b/drivers/char/adi.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Privileged ADI driver for sparc64
+ *
+ * Author: Tom Hromatka <tom.hromatka@oracle.com>
+ */
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/asi.h>
+
+#define MAX_BUF_SZ PAGE_SIZE
+
+static int adi_open(struct inode *inode, struct file *file)
+{
+ file->f_mode |= FMODE_UNSIGNED_OFFSET;
+ return 0;
+}
+
+static int read_mcd_tag(unsigned long addr)
+{
+ long err;
+ int ver;
+
+ __asm__ __volatile__(
+ "1: ldxa [%[addr]] %[asi], %[ver]\n"
+ " mov 0, %[err]\n"
+ "2:\n"
+ " .section .fixup,#alloc,#execinstr\n"
+ " .align 4\n"
+ "3: sethi %%hi(2b), %%g1\n"
+ " jmpl %%g1 + %%lo(2b), %%g0\n"
+ " mov %[invalid], %[err]\n"
+ " .previous\n"
+ " .section __ex_table, \"a\"\n"
+ " .align 4\n"
+ " .word 1b, 3b\n"
+ " .previous\n"
+ : [ver] "=r" (ver), [err] "=r" (err)
+ : [addr] "r" (addr), [invalid] "i" (EFAULT),
+ [asi] "i" (ASI_MCD_REAL)
+ : "memory", "g1"
+ );
+
+ if (err)
+ return -EFAULT;
+ else
+ return ver;
+}
+
+static ssize_t adi_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offp)
+{
+ size_t ver_buf_sz, bytes_read = 0;
+ int ver_buf_idx = 0;
+ loff_t offset;
+ u8 *ver_buf;
+ ssize_t ret;
+
+ ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
+ ver_buf = kmalloc(ver_buf_sz, GFP_KERNEL);
+ if (!ver_buf)
+ return -ENOMEM;
+
+ offset = (*offp) * adi_blksize();
+
+ while (bytes_read < count) {
+ ret = read_mcd_tag(offset);
+ if (ret < 0)
+ goto out;
+
+ ver_buf[ver_buf_idx] = (u8)ret;
+ ver_buf_idx++;
+ offset += adi_blksize();
+
+ if (ver_buf_idx >= ver_buf_sz) {
+ if (copy_to_user(buf + bytes_read, ver_buf,
+ ver_buf_sz)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ bytes_read += ver_buf_sz;
+ ver_buf_idx = 0;
+
+ ver_buf_sz = min(count - bytes_read,
+ (size_t)MAX_BUF_SZ);
+ }
+ }
+
+ (*offp) += bytes_read;
+ ret = bytes_read;
+out:
+ kfree(ver_buf);
+ return ret;
+}
+
+static int set_mcd_tag(unsigned long addr, u8 ver)
+{
+ long err;
+
+ __asm__ __volatile__(
+ "1: stxa %[ver], [%[addr]] %[asi]\n"
+ " mov 0, %[err]\n"
+ "2:\n"
+ " .section .fixup,#alloc,#execinstr\n"
+ " .align 4\n"
+ "3: sethi %%hi(2b), %%g1\n"
+ " jmpl %%g1 + %%lo(2b), %%g0\n"
+ " mov %[invalid], %[err]\n"
+ " .previous\n"
+ " .section __ex_table, \"a\"\n"
+ " .align 4\n"
+ " .word 1b, 3b\n"
+ " .previous\n"
+ : [err] "=r" (err)
+ : [ver] "r" (ver), [addr] "r" (addr),
+ [invalid] "i" (EFAULT), [asi] "i" (ASI_MCD_REAL)
+ : "memory", "g1"
+ );
+
+ if (err)
+ return -EFAULT;
+ else
+ return ver;
+}
+
+static ssize_t adi_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ size_t ver_buf_sz, bytes_written = 0;
+ loff_t offset;
+ u8 *ver_buf;
+ ssize_t ret;
+ int i;
+
+ if (count <= 0)
+ return -EINVAL;
+
+ ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
+ ver_buf = kmalloc(ver_buf_sz, GFP_KERNEL);
+ if (!ver_buf)
+ return -ENOMEM;
+
+ offset = (*offp) * adi_blksize();
+
+ do {
+ if (copy_from_user(ver_buf, &buf[bytes_written],
+ ver_buf_sz)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ for (i = 0; i < ver_buf_sz; i++) {
+ ret = set_mcd_tag(offset, ver_buf[i]);
+ if (ret < 0)
+ goto out;
+
+ offset += adi_blksize();
+ }
+
+ bytes_written += ver_buf_sz;
+ ver_buf_sz = min(count - bytes_written, (size_t)MAX_BUF_SZ);
+ } while (bytes_written < count);
+
+ (*offp) += bytes_written;
+ ret = bytes_written;
+out:
+ __asm__ __volatile__("membar #Sync");
+ kfree(ver_buf);
+ return ret;
+}
+
+static loff_t adi_llseek(struct file *file, loff_t offset, int whence)
+{
+ loff_t ret = -EINVAL;
+
+ switch (whence) {
+ case SEEK_END:
+ case SEEK_DATA:
+ case SEEK_HOLE:
+ /* unsupported */
+ return -EINVAL;
+ case SEEK_CUR:
+ if (offset == 0)
+ return file->f_pos;
+
+ offset += file->f_pos;
+ break;
+ case SEEK_SET:
+ break;
+ }
+
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ ret = offset;
+ }
+
+ return ret;
+}
+
+static const struct file_operations adi_fops = {
+ .owner = THIS_MODULE,
+ .llseek = adi_llseek,
+ .open = adi_open,
+ .read = adi_read,
+ .write = adi_write,
+};
+
+static struct miscdevice adi_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = KBUILD_MODNAME,
+ .fops = &adi_fops,
+};
+
+static int __init adi_init(void)
+{
+ if (!adi_capable())
+ return -EPERM;
+
+ return misc_register(&adi_miscdev);
+}
+
+static void __exit adi_exit(void)
+{
+ misc_deregister(&adi_miscdev);
+}
+
+module_init(adi_init);
+module_exit(adi_exit);
+
+MODULE_AUTHOR("Tom Hromatka <tom.hromatka@oracle.com>");
+MODULE_DESCRIPTION("Privileged interface to ADI");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index acd758381c58..4e9c33ca1f8f 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -4,11 +4,11 @@
#
obj-$(CONFIG_TCG_TPM) += tpm.o
tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
- tpm-dev-common.o tpmrm-dev.o tpm1_eventlog.o tpm2_eventlog.o \
- tpm2-space.o
-tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_eventlog_acpi.o
-tpm-$(CONFIG_EFI) += tpm_eventlog_efi.o
-tpm-$(CONFIG_OF) += tpm_eventlog_of.o
+ tpm-dev-common.o tpmrm-dev.o eventlog/common.o eventlog/tpm1.o \
+ eventlog/tpm2.o tpm2-space.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o
+tpm-$(CONFIG_EFI) += eventlog/efi.o
+tpm-$(CONFIG_OF) += eventlog/of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
diff --git a/drivers/char/tpm/tpm_eventlog_acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 66f19e93c216..7c53b1973b62 100644
--- a/drivers/char/tpm/tpm_eventlog_acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -27,7 +27,8 @@
#include <linux/acpi.h>
#include <linux/tpm_eventlog.h>
-#include "tpm.h"
+#include "../tpm.h"
+#include "common.h"
struct acpi_tcpa {
struct acpi_table_header hdr;
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
new file mode 100644
index 000000000000..5a8720df2b51
--- /dev/null
+++ b/drivers/char/tpm/eventlog/common.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2005, 2012 IBM Corporation
+ *
+ * Authors:
+ * Kent Yoder <key@linux.vnet.ibm.com>
+ * Seiji Munetoh <munetoh@jp.ibm.com>
+ * Stefan Berger <stefanb@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Access to the event log created by a system's firmware / BIOS
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+static int tpm_bios_measurements_open(struct inode *inode,
+ struct file *file)
+{
+ int err;
+ struct seq_file *seq;
+ struct tpm_chip_seqops *chip_seqops;
+ const struct seq_operations *seqops;
+ struct tpm_chip *chip;
+
+ inode_lock(inode);
+ if (!inode->i_private) {
+ inode_unlock(inode);
+ return -ENODEV;
+ }
+ chip_seqops = (struct tpm_chip_seqops *)inode->i_private;
+ seqops = chip_seqops->seqops;
+ chip = chip_seqops->chip;
+ get_device(&chip->dev);
+ inode_unlock(inode);
+
+ /* now register seq file */
+ err = seq_open(file, seqops);
+ if (!err) {
+ seq = file->private_data;
+ seq->private = chip;
+ }
+
+ return err;
+}
+
+static int tpm_bios_measurements_release(struct inode *inode,
+ struct file *file)
+{
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct tpm_chip *chip = (struct tpm_chip *)seq->private;
+
+ put_device(&chip->dev);
+
+ return seq_release(inode, file);
+}
+
+static const struct file_operations tpm_bios_measurements_ops = {
+ .owner = THIS_MODULE,
+ .open = tpm_bios_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = tpm_bios_measurements_release,
+};
+
+static int tpm_read_log(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (chip->log.bios_event_log != NULL) {
+ dev_dbg(&chip->dev,
+ "%s: ERROR - event log already initialized\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ rc = tpm_read_log_acpi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
+ rc = tpm_read_log_efi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
+ return tpm_read_log_of(chip);
+}
+
+/*
+ * tpm_bios_log_setup() - Read the event log from the firmware
+ * @chip: TPM chip to use.
+ *
+ * If an event log is found then the securityfs files are setup to
+ * export it to userspace, otherwise nothing is done.
+ *
+ * Returns -ENODEV if the firmware has no event log or securityfs is not
+ * supported.
+ */
+int tpm_bios_log_setup(struct tpm_chip *chip)
+{
+ const char *name = dev_name(&chip->dev);
+ unsigned int cnt;
+ int log_version;
+ int rc = 0;
+
+ rc = tpm_read_log(chip);
+ if (rc < 0)
+ return rc;
+ log_version = rc;
+
+ cnt = 0;
+ chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ /* NOTE: securityfs_create_dir can return ENODEV if securityfs is
+ * compiled out. The caller should ignore the ENODEV return code.
+ */
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->bin_log_seqops.chip = chip;
+ if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ chip->bin_log_seqops.seqops =
+ &tpm2_binary_b_measurements_seqops;
+ else
+ chip->bin_log_seqops.seqops =
+ &tpm1_binary_b_measurements_seqops;
+
+
+ chip->bios_dir[cnt] =
+ securityfs_create_file("binary_bios_measurements",
+ 0440, chip->bios_dir[0],
+ (void *)&chip->bin_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+
+ chip->ascii_log_seqops.chip = chip;
+ chip->ascii_log_seqops.seqops =
+ &tpm1_ascii_b_measurements_seqops;
+
+ chip->bios_dir[cnt] =
+ securityfs_create_file("ascii_bios_measurements",
+ 0440, chip->bios_dir[0],
+ (void *)&chip->ascii_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+ }
+
+ return 0;
+
+err:
+ rc = PTR_ERR(chip->bios_dir[cnt]);
+ chip->bios_dir[cnt] = NULL;
+ tpm_bios_log_teardown(chip);
+ return rc;
+}
+
+void tpm_bios_log_teardown(struct tpm_chip *chip)
+{
+ int i;
+ struct inode *inode;
+
+ /* securityfs_remove currently doesn't take care of handling sync
+ * between removal and opening of pseudo files. To handle this, a
+ * workaround is added by making i_private = NULL here during removal
+ * and to check it during open(), both within inode_lock()/unlock().
+ * This design ensures that open() either safely gets kref or fails.
+ */
+ for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
+ if (chip->bios_dir[i]) {
+ inode = d_inode(chip->bios_dir[i]);
+ inode_lock(inode);
+ inode->i_private = NULL;
+ inode_unlock(inode);
+ securityfs_remove(chip->bios_dir[i]);
+ }
+ }
+}
diff --git a/drivers/char/tpm/eventlog/common.h b/drivers/char/tpm/eventlog/common.h
new file mode 100644
index 000000000000..47ff8136ceb5
--- /dev/null
+++ b/drivers/char/tpm/eventlog/common.h
@@ -0,0 +1,35 @@
+#ifndef __TPM_EVENTLOG_COMMON_H__
+#define __TPM_EVENTLOG_COMMON_H__
+
+#include "../tpm.h"
+
+extern const struct seq_operations tpm1_ascii_b_measurements_seqops;
+extern const struct seq_operations tpm1_binary_b_measurements_seqops;
+extern const struct seq_operations tpm2_binary_b_measurements_seqops;
+
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_EFI)
+int tpm_read_log_efi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_efi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_eventlog_efi.c b/drivers/char/tpm/eventlog/efi.c
index e3f9ffd341d2..3e673ab22cb4 100644
--- a/drivers/char/tpm/tpm_eventlog_efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -14,7 +14,8 @@
#include <linux/efi.h>
#include <linux/tpm_eventlog.h>
-#include "tpm.h"
+#include "../tpm.h"
+#include "common.h"
/* read binary bios log from EFI configuration table */
int tpm_read_log_efi(struct tpm_chip *chip)
@@ -50,10 +51,9 @@ int tpm_read_log_efi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = kmalloc(log_size, GFP_KERNEL);
+ log->bios_event_log = kmemdup(log_tbl->log, log_size, GFP_KERNEL);
if (!log->bios_event_log)
goto err_memunmap;
- memcpy(log->bios_event_log, log_tbl->log, log_size);
log->bios_event_log_end = log->bios_event_log + log_size;
tpm_log_version = log_tbl->version;
diff --git a/drivers/char/tpm/tpm_eventlog_of.c b/drivers/char/tpm/eventlog/of.c
index 96fd5646f866..bba5fba6cb3b 100644
--- a/drivers/char/tpm/tpm_eventlog_of.c
+++ b/drivers/char/tpm/eventlog/of.c
@@ -19,7 +19,8 @@
#include <linux/of.h>
#include <linux/tpm_eventlog.h>
-#include "tpm.h"
+#include "../tpm.h"
+#include "common.h"
int tpm_read_log_of(struct tpm_chip *chip)
{
@@ -56,8 +57,8 @@ int tpm_read_log_of(struct tpm_chip *chip)
* but physical tpm needs the conversion.
*/
if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0) {
- size = be32_to_cpup(sizep);
- base = be64_to_cpup(basep);
+ size = be32_to_cpup((__force __be32 *)sizep);
+ base = be64_to_cpup((__force __be64 *)basep);
} else {
size = *sizep;
base = *basep;
@@ -68,14 +69,12 @@ int tpm_read_log_of(struct tpm_chip *chip)
return -EIO;
}
- log->bios_event_log = kmalloc(size, GFP_KERNEL);
+ log->bios_event_log = kmemdup(__va(base), size, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
log->bios_event_log_end = log->bios_event_log + size;
- memcpy(log->bios_event_log, __va(base), size);
-
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
diff --git a/drivers/char/tpm/tpm1_eventlog.c b/drivers/char/tpm/eventlog/tpm1.c
index add798bd69d0..58c84784ba25 100644
--- a/drivers/char/tpm/tpm1_eventlog.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -28,7 +28,8 @@
#include <linux/slab.h>
#include <linux/tpm_eventlog.h>
-#include "tpm.h"
+#include "../tpm.h"
+#include "common.h"
static const char* tcpa_event_type_strings[] = {
@@ -71,7 +72,7 @@ static const char* tcpa_pc_event_id_strings[] = {
};
/* returns pointer to start of pos. entry of tcg log */
-static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
+static void *tpm1_bios_measurements_start(struct seq_file *m, loff_t *pos)
{
loff_t i;
struct tpm_chip *chip = m->private;
@@ -118,7 +119,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
return addr;
}
-static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
+static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
loff_t *pos)
{
struct tcpa_event *event = v;
@@ -149,7 +150,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
return v;
}
-static void tpm_bios_measurements_stop(struct seq_file *m, void *v)
+static void tpm1_bios_measurements_stop(struct seq_file *m, void *v)
{
}
@@ -232,7 +233,7 @@ static int get_event_name(char *dest, struct tcpa_event *event,
}
-static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
+static int tpm1_binary_bios_measurements_show(struct seq_file *m, void *v)
{
struct tcpa_event *event = v;
struct tcpa_event temp_event;
@@ -261,18 +262,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
}
-static int tpm_bios_measurements_release(struct inode *inode,
- struct file *file)
-{
- struct seq_file *seq = (struct seq_file *)file->private_data;
- struct tpm_chip *chip = (struct tpm_chip *)seq->private;
-
- put_device(&chip->dev);
-
- return seq_release(inode, file);
-}
-
-static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
+static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
{
int len = 0;
char *eventname;
@@ -305,172 +295,16 @@ static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
return 0;
}
-static const struct seq_operations tpm_ascii_b_measurements_seqops = {
- .start = tpm_bios_measurements_start,
- .next = tpm_bios_measurements_next,
- .stop = tpm_bios_measurements_stop,
- .show = tpm_ascii_bios_measurements_show,
+const struct seq_operations tpm1_ascii_b_measurements_seqops = {
+ .start = tpm1_bios_measurements_start,
+ .next = tpm1_bios_measurements_next,
+ .stop = tpm1_bios_measurements_stop,
+ .show = tpm1_ascii_bios_measurements_show,
};
-static const struct seq_operations tpm_binary_b_measurements_seqops = {
- .start = tpm_bios_measurements_start,
- .next = tpm_bios_measurements_next,
- .stop = tpm_bios_measurements_stop,
- .show = tpm_binary_bios_measurements_show,
-};
-
-static int tpm_bios_measurements_open(struct inode *inode,
- struct file *file)
-{
- int err;
- struct seq_file *seq;
- struct tpm_chip_seqops *chip_seqops;
- const struct seq_operations *seqops;
- struct tpm_chip *chip;
-
- inode_lock(inode);
- if (!inode->i_private) {
- inode_unlock(inode);
- return -ENODEV;
- }
- chip_seqops = (struct tpm_chip_seqops *)inode->i_private;
- seqops = chip_seqops->seqops;
- chip = chip_seqops->chip;
- get_device(&chip->dev);
- inode_unlock(inode);
-
- /* now register seq file */
- err = seq_open(file, seqops);
- if (!err) {
- seq = file->private_data;
- seq->private = chip;
- }
-
- return err;
-}
-
-static const struct file_operations tpm_bios_measurements_ops = {
- .owner = THIS_MODULE,
- .open = tpm_bios_measurements_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = tpm_bios_measurements_release,
+const struct seq_operations tpm1_binary_b_measurements_seqops = {
+ .start = tpm1_bios_measurements_start,
+ .next = tpm1_bios_measurements_next,
+ .stop = tpm1_bios_measurements_stop,
+ .show = tpm1_binary_bios_measurements_show,
};
-
-static int tpm_read_log(struct tpm_chip *chip)
-{
- int rc;
-
- if (chip->log.bios_event_log != NULL) {
- dev_dbg(&chip->dev,
- "%s: ERROR - event log already initialized\n",
- __func__);
- return -EFAULT;
- }
-
- rc = tpm_read_log_acpi(chip);
- if (rc != -ENODEV)
- return rc;
-
- rc = tpm_read_log_efi(chip);
- if (rc != -ENODEV)
- return rc;
-
- return tpm_read_log_of(chip);
-}
-
-/*
- * tpm_bios_log_setup() - Read the event log from the firmware
- * @chip: TPM chip to use.
- *
- * If an event log is found then the securityfs files are setup to
- * export it to userspace, otherwise nothing is done.
- *
- * Returns -ENODEV if the firmware has no event log or securityfs is not
- * supported.
- */
-int tpm_bios_log_setup(struct tpm_chip *chip)
-{
- const char *name = dev_name(&chip->dev);
- unsigned int cnt;
- int log_version;
- int rc = 0;
-
- rc = tpm_read_log(chip);
- if (rc < 0)
- return rc;
- log_version = rc;
-
- cnt = 0;
- chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
- /* NOTE: securityfs_create_dir can return ENODEV if securityfs is
- * compiled out. The caller should ignore the ENODEV return code.
- */
- if (IS_ERR(chip->bios_dir[cnt]))
- goto err;
- cnt++;
-
- chip->bin_log_seqops.chip = chip;
- if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
- chip->bin_log_seqops.seqops =
- &tpm2_binary_b_measurements_seqops;
- else
- chip->bin_log_seqops.seqops =
- &tpm_binary_b_measurements_seqops;
-
-
- chip->bios_dir[cnt] =
- securityfs_create_file("binary_bios_measurements",
- 0440, chip->bios_dir[0],
- (void *)&chip->bin_log_seqops,
- &tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
- goto err;
- cnt++;
-
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
-
- chip->ascii_log_seqops.chip = chip;
- chip->ascii_log_seqops.seqops =
- &tpm_ascii_b_measurements_seqops;
-
- chip->bios_dir[cnt] =
- securityfs_create_file("ascii_bios_measurements",
- 0440, chip->bios_dir[0],
- (void *)&chip->ascii_log_seqops,
- &tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
- goto err;
- cnt++;
- }
-
- return 0;
-
-err:
- rc = PTR_ERR(chip->bios_dir[cnt]);
- chip->bios_dir[cnt] = NULL;
- tpm_bios_log_teardown(chip);
- return rc;
-}
-
-void tpm_bios_log_teardown(struct tpm_chip *chip)
-{
- int i;
- struct inode *inode;
-
- /* securityfs_remove currently doesn't take care of handling sync
- * between removal and opening of pseudo files. To handle this, a
- * workaround is added by making i_private = NULL here during removal
- * and to check it during open(), both within inode_lock()/unlock().
- * This design ensures that open() either safely gets kref or fails.
- */
- for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
- if (chip->bios_dir[i]) {
- inode = d_inode(chip->bios_dir[i]);
- inode_lock(inode);
- inode->i_private = NULL;
- inode_unlock(inode);
- securityfs_remove(chip->bios_dir[i]);
- }
- }
-}
diff --git a/drivers/char/tpm/tpm2_eventlog.c b/drivers/char/tpm/eventlog/tpm2.c
index 1ce4411292ba..1b8fa9de2cac 100644
--- a/drivers/char/tpm/tpm2_eventlog.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -23,7 +23,8 @@
#include <linux/slab.h>
#include <linux/tpm_eventlog.h>
-#include "tpm.h"
+#include "../tpm.h"
+#include "common.h"
/*
* calc_tpm2_event_size() - calculate the event size, where event
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index 0fc4f20b5f83..d7909ab287a8 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -40,7 +40,7 @@
#define ST33ZP24_OK 0x5A
#define ST33ZP24_UNDEFINED_ERR 0x80
#define ST33ZP24_BADLOCALITY 0x81
-#define ST33ZP24_TISREGISTER_UKNOWN 0x82
+#define ST33ZP24_TISREGISTER_UNKNOWN 0x82
#define ST33ZP24_LOCALITY_NOT_ACTIVATED 0x83
#define ST33ZP24_HASH_END_BEFORE_HASH_START 0x84
#define ST33ZP24_BAD_COMMAND_ORDER 0x85
@@ -84,7 +84,7 @@ static int st33zp24_status_to_errno(u8 code)
return 0;
case ST33ZP24_UNDEFINED_ERR:
case ST33ZP24_BADLOCALITY:
- case ST33ZP24_TISREGISTER_UKNOWN:
+ case ST33ZP24_TISREGISTER_UNKNOWN:
case ST33ZP24_LOCALITY_NOT_ACTIVATED:
case ST33ZP24_HASH_END_BEFORE_HASH_START:
case ST33ZP24_BAD_COMMAND_ORDER:
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index f95b9c75175b..abd675bec88c 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -373,8 +373,6 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
int ret;
u8 data;
- if (!chip)
- return -EBUSY;
if (len < TPM_HEADER_SIZE)
return -EBUSY;
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 230b99288024..e4a04b2d3c32 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -37,7 +37,7 @@ static void timeout_work(struct work_struct *work)
struct file_priv *priv = container_of(work, struct file_priv, work);
mutex_lock(&priv->buffer_mutex);
- atomic_set(&priv->data_pending, 0);
+ priv->data_pending = 0;
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
mutex_unlock(&priv->buffer_mutex);
}
@@ -46,7 +46,6 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
struct file_priv *priv)
{
priv->chip = chip;
- atomic_set(&priv->data_pending, 0);
mutex_init(&priv->buffer_mutex);
timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
INIT_WORK(&priv->work, timeout_work);
@@ -58,29 +57,24 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct file_priv *priv = file->private_data;
- ssize_t ret_size;
- ssize_t orig_ret_size;
+ ssize_t ret_size = 0;
int rc;
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
- ret_size = atomic_read(&priv->data_pending);
- if (ret_size > 0) { /* relay data */
- orig_ret_size = ret_size;
- if (size < ret_size)
- ret_size = size;
+ mutex_lock(&priv->buffer_mutex);
- mutex_lock(&priv->buffer_mutex);
+ if (priv->data_pending) {
+ ret_size = min_t(ssize_t, size, priv->data_pending);
rc = copy_to_user(buf, priv->data_buffer, ret_size);
- memset(priv->data_buffer, 0, orig_ret_size);
+ memset(priv->data_buffer, 0, priv->data_pending);
if (rc)
ret_size = -EFAULT;
- mutex_unlock(&priv->buffer_mutex);
+ priv->data_pending = 0;
}
- atomic_set(&priv->data_pending, 0);
-
+ mutex_unlock(&priv->buffer_mutex);
return ret_size;
}
@@ -91,17 +85,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
size_t in_size = size;
ssize_t out_size;
+ if (in_size > TPM_BUFSIZE)
+ return -E2BIG;
+
+ mutex_lock(&priv->buffer_mutex);
+
/* Cannot perform a write until the read has cleared either via
* tpm_read or a user_read_timer timeout. This also prevents split
* buffered writes from blocking here.
*/
- if (atomic_read(&priv->data_pending) != 0)
+ if (priv->data_pending != 0) {
+ mutex_unlock(&priv->buffer_mutex);
return -EBUSY;
-
- if (in_size > TPM_BUFSIZE)
- return -E2BIG;
-
- mutex_lock(&priv->buffer_mutex);
+ }
if (copy_from_user
(priv->data_buffer, (void __user *) buf, in_size)) {
@@ -132,7 +128,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
return out_size;
}
- atomic_set(&priv->data_pending, out_size);
+ priv->data_pending = out_size;
mutex_unlock(&priv->buffer_mutex);
/* Set a timeout by which the reader must come claim the result */
@@ -149,5 +145,5 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
file->private_data = NULL;
- atomic_set(&priv->data_pending, 0);
+ priv->data_pending = 0;
}
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
index ba3b6f9dacf7..b24cfb4d3ee1 100644
--- a/drivers/char/tpm/tpm-dev.h
+++ b/drivers/char/tpm/tpm-dev.h
@@ -8,7 +8,7 @@ struct file_priv {
struct tpm_chip *chip;
/* Data passed to and from the tpm via the read/write calls */
- atomic_t data_pending;
+ size_t data_pending;
struct mutex buffer_mutex;
struct timer_list user_read_timer; /* user needs to claim result */
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index c43a9e28995e..e32f6e85dc6d 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -489,7 +489,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
goto out;
}
- tpm_msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_TIMEOUT_POLL);
rmb();
} while (time_before(jiffies, stop));
@@ -587,7 +587,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
*/
if (rc == TPM2_RC_TESTING && cc == TPM2_CC_SELF_TEST)
break;
- delay_msec *= 2;
+
if (delay_msec > TPM2_DURATION_LONG) {
if (rc == TPM2_RC_RETRY)
dev_err(&chip->dev, "in retry loop\n");
@@ -597,6 +597,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
break;
}
tpm_msleep(delay_msec);
+ delay_msec *= 2;
memcpy(buf, save, save_size);
}
return ret;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 7f2d0f489e9c..4426649e431c 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -53,7 +53,10 @@ enum tpm_const {
enum tpm_timeout {
TPM_TIMEOUT = 5, /* msecs */
TPM_TIMEOUT_RETRY = 100, /* msecs */
- TPM_TIMEOUT_RANGE_US = 300 /* usecs */
+ TPM_TIMEOUT_RANGE_US = 300, /* usecs */
+ TPM_TIMEOUT_POLL = 1, /* msecs */
+ TPM_TIMEOUT_USECS_MIN = 100, /* usecs */
+ TPM_TIMEOUT_USECS_MAX = 500 /* usecs */
};
/* TPM addresses */
@@ -590,33 +593,6 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
u32 cc, u8 *buf, size_t *bufsiz);
-extern const struct seq_operations tpm2_binary_b_measurements_seqops;
-
-#if defined(CONFIG_ACPI)
-int tpm_read_log_acpi(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_acpi(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-#if defined(CONFIG_OF)
-int tpm_read_log_of(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_of(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-#if defined(CONFIG_EFI)
-int tpm_read_log_efi(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_efi(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-
int tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
#endif
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 4e4014eabdb9..6122d3276f72 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -102,8 +102,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
* TPM_RC_REFERENCE_H0 means the session has been
* flushed outside the space
*/
- rc = -ENOENT;
+ *handle = 0;
tpm_buf_destroy(&tbuf);
+ return -ENOENT;
} else if (rc > 0) {
dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
__func__, rc);
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 7f78482cd157..34fbc6cb097b 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -511,8 +511,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
sizeof(struct crb_regs_tail));
- if (IS_ERR(priv->regs_t))
- return PTR_ERR(priv->regs_t);
+ if (IS_ERR(priv->regs_t)) {
+ ret = PTR_ERR(priv->regs_t);
+ goto out_relinquish_locality;
+ }
/*
* PTT HW bug w/a: wake up the device to access
@@ -520,7 +522,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
*/
ret = crb_cmd_ready(dev, priv);
if (ret)
- return ret;
+ goto out_relinquish_locality;
pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
@@ -565,6 +567,8 @@ out:
crb_go_idle(dev, priv);
+out_relinquish_locality:
+
__crb_relinquish_locality(dev, priv, 0);
return ret;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 5a1f47b43947..8b46aaa9e049 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -31,12 +31,6 @@
#include "tpm.h"
#include "tpm_tis_core.h"
-/* This is a polling delay to check for status and burstcount.
- * As per ddwg input, expectation is that status check and burstcount
- * check should return within few usecs.
- */
-#define TPM_POLL_SLEEP 1 /* msec */
-
static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
@@ -90,7 +84,8 @@ again:
}
} else {
do {
- tpm_msleep(TPM_POLL_SLEEP);
+ usleep_range(TPM_TIMEOUT_USECS_MIN,
+ TPM_TIMEOUT_USECS_MAX);
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
@@ -143,13 +138,58 @@ static bool check_locality(struct tpm_chip *chip, int l)
return false;
}
+static bool locality_inactive(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u8 access;
+
+ rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+ if (rc < 0)
+ return false;
+
+ if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
+ == TPM_ACCESS_VALID)
+ return true;
+
+ return false;
+}
+
static int release_locality(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop, timeout;
+ long rc;
tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
- return 0;
+ stop = jiffies + chip->timeout_a;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -1;
+
+ rc = wait_event_interruptible_timeout(priv->int_queue,
+ (locality_inactive(chip, l)),
+ timeout);
+
+ if (rc > 0)
+ return 0;
+
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ if (locality_inactive(chip, l))
+ return 0;
+ tpm_msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ }
+ return -1;
}
static int request_locality(struct tpm_chip *chip, int l)
@@ -234,7 +274,7 @@ static int get_burstcount(struct tpm_chip *chip)
burstcnt = (value >> 8) & 0xFFFF;
if (burstcnt)
return burstcnt;
- tpm_msleep(TPM_POLL_SLEEP);
+ usleep_range(TPM_TIMEOUT_USECS_MIN, TPM_TIMEOUT_USECS_MAX);
} while (time_before(jiffies, stop));
return -EBUSY;
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 34968a381d0f..721572a8c429 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -277,6 +277,7 @@ config COMMON_CLK_STM32H7
---help---
Support for stm32h7 SoC family clocks
+source "drivers/clk/actions/Kconfig"
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
source "drivers/clk/imgtec/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index de6d06ac790b..ae40cbe770f0 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
+obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
obj-$(CONFIG_COMMON_CLK_OXNAS) += clk-oxnas.o
obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
# please keep this section sorted lexicographically by directory path name
+obj-y += actions/
obj-$(CONFIG_COMMON_CLK_AT91) += at91/
obj-$(CONFIG_ARCH_ARTPEC) += axis/
obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/
diff --git a/drivers/clk/actions/Kconfig b/drivers/clk/actions/Kconfig
new file mode 100644
index 000000000000..8854adb37847
--- /dev/null
+++ b/drivers/clk/actions/Kconfig
@@ -0,0 +1,14 @@
+config CLK_ACTIONS
+ bool "Clock driver for Actions Semi SoCs"
+ depends on ARCH_ACTIONS || COMPILE_TEST
+ default ARCH_ACTIONS
+
+if CLK_ACTIONS
+
+# SoC Drivers
+
+config CLK_OWL_S900
+ bool "Support for the Actions Semi OWL S900 clocks"
+ depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST
+ default ARM64 && ARCH_ACTIONS
+endif
diff --git a/drivers/clk/actions/Makefile b/drivers/clk/actions/Makefile
new file mode 100644
index 000000000000..76e431434d10
--- /dev/null
+++ b/drivers/clk/actions/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_CLK_ACTIONS) += clk-owl.o
+
+clk-owl-y += owl-common.o
+clk-owl-y += owl-gate.o
+clk-owl-y += owl-mux.o
+clk-owl-y += owl-divider.o
+clk-owl-y += owl-factor.o
+clk-owl-y += owl-composite.o
+clk-owl-y += owl-pll.o
+
+# SoC support
+obj-$(CONFIG_CLK_OWL_S900) += owl-s900.o
diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
new file mode 100644
index 000000000000..61c1071b5180
--- /dev/null
+++ b/drivers/clk/actions/owl-common.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL common clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "owl-common.h"
+
+static const struct regmap_config owl_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x00cc,
+ .fast_io = true,
+};
+
+static void owl_clk_set_regmap(const struct owl_clk_desc *desc,
+ struct regmap *regmap)
+{
+ int i;
+ struct owl_clk_common *clks;
+
+ for (i = 0; i < desc->num_clks; i++) {
+ clks = desc->clks[i];
+ if (!clks)
+ continue;
+
+ clks->regmap = regmap;
+ }
+}
+
+int owl_clk_regmap_init(struct platform_device *pdev,
+ const struct owl_clk_desc *desc)
+{
+ void __iomem *base;
+ struct regmap *regmap;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &owl_regmap_config);
+ if (IS_ERR(regmap)) {
+ pr_err("failed to init regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ owl_clk_set_regmap(desc, regmap);
+
+ return 0;
+}
+
+int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks)
+{
+ int i, ret;
+ struct clk_hw *hw;
+
+ for (i = 0; i < hw_clks->num; i++) {
+
+ hw = hw_clks->hws[i];
+
+ if (IS_ERR_OR_NULL(hw))
+ continue;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "Couldn't register clock %d - %s\n",
+ i, hw->init->name);
+ return ret;
+ }
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_clks);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider\n");
+
+ return ret;
+}
diff --git a/drivers/clk/actions/owl-common.h b/drivers/clk/actions/owl-common.h
new file mode 100644
index 000000000000..4fd726ec54a6
--- /dev/null
+++ b/drivers/clk/actions/owl-common.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL common clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#ifndef _OWL_COMMON_H_
+#define _OWL_COMMON_H_
+
+#include <linux/clk-provider.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+
+struct device_node;
+
+struct owl_clk_common {
+ struct regmap *regmap;
+ struct clk_hw hw;
+};
+
+struct owl_clk_desc {
+ struct owl_clk_common **clks;
+ unsigned long num_clks;
+ struct clk_hw_onecell_data *hw_clks;
+};
+
+static inline struct owl_clk_common *
+ hw_to_owl_clk_common(const struct clk_hw *hw)
+{
+ return container_of(hw, struct owl_clk_common, hw);
+}
+
+int owl_clk_regmap_init(struct platform_device *pdev,
+ const struct owl_clk_desc *desc);
+int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks);
+
+#endif /* _OWL_COMMON_H_ */
diff --git a/drivers/clk/actions/owl-composite.c b/drivers/clk/actions/owl-composite.c
new file mode 100644
index 000000000000..101706e0c66f
--- /dev/null
+++ b/drivers/clk/actions/owl-composite.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL composite clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "owl-composite.h"
+
+static u8 owl_comp_get_parent(struct clk_hw *hw)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+
+ return owl_mux_helper_get_parent(&comp->common, &comp->mux_hw);
+}
+
+static int owl_comp_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+
+ return owl_mux_helper_set_parent(&comp->common, &comp->mux_hw, index);
+}
+
+static void owl_comp_disable(struct clk_hw *hw)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+ struct owl_clk_common *common = &comp->common;
+
+ owl_gate_set(common, &comp->gate_hw, false);
+}
+
+static int owl_comp_enable(struct clk_hw *hw)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+ struct owl_clk_common *common = &comp->common;
+
+ owl_gate_set(common, &comp->gate_hw, true);
+
+ return 0;
+}
+
+static int owl_comp_is_enabled(struct clk_hw *hw)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+ struct owl_clk_common *common = &comp->common;
+
+ return owl_gate_clk_is_enabled(common, &comp->gate_hw);
+}
+
+static long owl_comp_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+
+ return owl_divider_helper_round_rate(&comp->common, &comp->rate.div_hw,
+ rate, parent_rate);
+}
+
+static unsigned long owl_comp_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+
+ return owl_divider_helper_recalc_rate(&comp->common, &comp->rate.div_hw,
+ parent_rate);
+}
+
+static int owl_comp_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+
+ return owl_divider_helper_set_rate(&comp->common, &comp->rate.div_hw,
+ rate, parent_rate);
+}
+
+static long owl_comp_fact_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+
+ return owl_factor_helper_round_rate(&comp->common,
+ &comp->rate.factor_hw,
+ rate, parent_rate);
+}
+
+static unsigned long owl_comp_fact_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+
+ return owl_factor_helper_recalc_rate(&comp->common,
+ &comp->rate.factor_hw,
+ parent_rate);
+}
+
+static int owl_comp_fact_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+
+ return owl_factor_helper_set_rate(&comp->common,
+ &comp->rate.factor_hw,
+ rate, parent_rate);
+}
+
+static long owl_comp_fix_fact_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+ struct clk_fixed_factor *fix_fact_hw = &comp->rate.fix_fact_hw;
+
+ return comp->fix_fact_ops->round_rate(&fix_fact_hw->hw, rate, parent_rate);
+}
+
+static unsigned long owl_comp_fix_fact_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct owl_composite *comp = hw_to_owl_comp(hw);
+ struct clk_fixed_factor *fix_fact_hw = &comp->rate.fix_fact_hw;
+
+ return comp->fix_fact_ops->recalc_rate(&fix_fact_hw->hw, parent_rate);
+
+}
+
+static int owl_comp_fix_fact_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * We must report success but we can do so unconditionally because
+ * owl_comp_fix_fact_round_rate returns values that ensure this call is
+ * a nop.
+ */
+
+ return 0;
+}
+
+const struct clk_ops owl_comp_div_ops = {
+ /* mux_ops */
+ .get_parent = owl_comp_get_parent,
+ .set_parent = owl_comp_set_parent,
+
+ /* gate_ops */
+ .disable = owl_comp_disable,
+ .enable = owl_comp_enable,
+ .is_enabled = owl_comp_is_enabled,
+
+ /* div_ops */
+ .round_rate = owl_comp_div_round_rate,
+ .recalc_rate = owl_comp_div_recalc_rate,
+ .set_rate = owl_comp_div_set_rate,
+};
+
+
+const struct clk_ops owl_comp_fact_ops = {
+ /* mux_ops */
+ .get_parent = owl_comp_get_parent,
+ .set_parent = owl_comp_set_parent,
+
+ /* gate_ops */
+ .disable = owl_comp_disable,
+ .enable = owl_comp_enable,
+ .is_enabled = owl_comp_is_enabled,
+
+ /* fact_ops */
+ .round_rate = owl_comp_fact_round_rate,
+ .recalc_rate = owl_comp_fact_recalc_rate,
+ .set_rate = owl_comp_fact_set_rate,
+};
+
+const struct clk_ops owl_comp_fix_fact_ops = {
+ /* gate_ops */
+ .disable = owl_comp_disable,
+ .enable = owl_comp_enable,
+ .is_enabled = owl_comp_is_enabled,
+
+ /* fix_fact_ops */
+ .round_rate = owl_comp_fix_fact_round_rate,
+ .recalc_rate = owl_comp_fix_fact_recalc_rate,
+ .set_rate = owl_comp_fix_fact_set_rate,
+};
+
+
+const struct clk_ops owl_comp_pass_ops = {
+ /* mux_ops */
+ .get_parent = owl_comp_get_parent,
+ .set_parent = owl_comp_set_parent,
+
+ /* gate_ops */
+ .disable = owl_comp_disable,
+ .enable = owl_comp_enable,
+ .is_enabled = owl_comp_is_enabled,
+};
diff --git a/drivers/clk/actions/owl-composite.h b/drivers/clk/actions/owl-composite.h
new file mode 100644
index 000000000000..b410ed5bf308
--- /dev/null
+++ b/drivers/clk/actions/owl-composite.h
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL composite clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#ifndef _OWL_COMPOSITE_H_
+#define _OWL_COMPOSITE_H_
+
+#include "owl-common.h"
+#include "owl-mux.h"
+#include "owl-gate.h"
+#include "owl-factor.h"
+#include "owl-fixed-factor.h"
+#include "owl-divider.h"
+
+union owl_rate {
+ struct owl_divider_hw div_hw;
+ struct owl_factor_hw factor_hw;
+ struct clk_fixed_factor fix_fact_hw;
+};
+
+struct owl_composite {
+ struct owl_mux_hw mux_hw;
+ struct owl_gate_hw gate_hw;
+ union owl_rate rate;
+
+ const struct clk_ops *fix_fact_ops;
+
+ struct owl_clk_common common;
+};
+
+#define OWL_COMP_DIV(_struct, _name, _parent, \
+ _mux, _gate, _div, _flags) \
+ struct owl_composite _struct = { \
+ .mux_hw = _mux, \
+ .gate_hw = _gate, \
+ .rate.div_hw = _div, \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parent, \
+ &owl_comp_div_ops,\
+ _flags), \
+ }, \
+ }
+
+#define OWL_COMP_DIV_FIXED(_struct, _name, _parent, \
+ _gate, _div, _flags) \
+ struct owl_composite _struct = { \
+ .gate_hw = _gate, \
+ .rate.div_hw = _div, \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &owl_comp_div_ops,\
+ _flags), \
+ }, \
+ }
+
+#define OWL_COMP_FACTOR(_struct, _name, _parent, \
+ _mux, _gate, _factor, _flags) \
+ struct owl_composite _struct = { \
+ .mux_hw = _mux, \
+ .gate_hw = _gate, \
+ .rate.factor_hw = _factor, \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parent, \
+ &owl_comp_fact_ops,\
+ _flags), \
+ }, \
+ }
+
+#define OWL_COMP_FIXED_FACTOR(_struct, _name, _parent, \
+ _gate, _mul, _div, _flags) \
+ struct owl_composite _struct = { \
+ .gate_hw = _gate, \
+ .rate.fix_fact_hw.mult = _mul, \
+ .rate.fix_fact_hw.div = _div, \
+ .fix_fact_ops = &clk_fixed_factor_ops, \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &owl_comp_fix_fact_ops,\
+ _flags), \
+ }, \
+ }
+
+#define OWL_COMP_PASS(_struct, _name, _parent, \
+ _mux, _gate, _flags) \
+ struct owl_composite _struct = { \
+ .mux_hw = _mux, \
+ .gate_hw = _gate, \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parent, \
+ &owl_comp_pass_ops,\
+ _flags), \
+ }, \
+ }
+
+static inline struct owl_composite *hw_to_owl_comp(const struct clk_hw *hw)
+{
+ struct owl_clk_common *common = hw_to_owl_clk_common(hw);
+
+ return container_of(common, struct owl_composite, common);
+}
+
+extern const struct clk_ops owl_comp_div_ops;
+extern const struct clk_ops owl_comp_fact_ops;
+extern const struct clk_ops owl_comp_fix_fact_ops;
+extern const struct clk_ops owl_comp_pass_ops;
+extern const struct clk_ops clk_fixed_factor_ops;
+
+#endif /* _OWL_COMPOSITE_H_ */
diff --git a/drivers/clk/actions/owl-divider.c b/drivers/clk/actions/owl-divider.c
new file mode 100644
index 000000000000..cddac00fe324
--- /dev/null
+++ b/drivers/clk/actions/owl-divider.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL divider clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "owl-divider.h"
+
+long owl_divider_helper_round_rate(struct owl_clk_common *common,
+ const struct owl_divider_hw *div_hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return divider_round_rate(&common->hw, rate, parent_rate,
+ div_hw->table, div_hw->width,
+ div_hw->div_flags);
+}
+
+static long owl_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct owl_divider *div = hw_to_owl_divider(hw);
+
+ return owl_divider_helper_round_rate(&div->common, &div->div_hw,
+ rate, parent_rate);
+}
+
+unsigned long owl_divider_helper_recalc_rate(struct owl_clk_common *common,
+ const struct owl_divider_hw *div_hw,
+ unsigned long parent_rate)
+{
+ unsigned long val;
+ unsigned int reg;
+
+ regmap_read(common->regmap, div_hw->reg, &reg);
+ val = reg >> div_hw->shift;
+ val &= (1 << div_hw->width) - 1;
+
+ return divider_recalc_rate(&common->hw, parent_rate,
+ val, div_hw->table,
+ div_hw->div_flags,
+ div_hw->width);
+}
+
+static unsigned long owl_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct owl_divider *div = hw_to_owl_divider(hw);
+
+ return owl_divider_helper_recalc_rate(&div->common,
+ &div->div_hw, parent_rate);
+}
+
+int owl_divider_helper_set_rate(const struct owl_clk_common *common,
+ const struct owl_divider_hw *div_hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ unsigned long val;
+ unsigned int reg;
+
+ val = divider_get_val(rate, parent_rate, div_hw->table,
+ div_hw->width, 0);
+
+ regmap_read(common->regmap, div_hw->reg, &reg);
+ reg &= ~GENMASK(div_hw->width + div_hw->shift - 1, div_hw->shift);
+
+ regmap_write(common->regmap, div_hw->reg,
+ reg | (val << div_hw->shift));
+
+ return 0;
+}
+
+static int owl_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct owl_divider *div = hw_to_owl_divider(hw);
+
+ return owl_divider_helper_set_rate(&div->common, &div->div_hw,
+ rate, parent_rate);
+}
+
+const struct clk_ops owl_divider_ops = {
+ .recalc_rate = owl_divider_recalc_rate,
+ .round_rate = owl_divider_round_rate,
+ .set_rate = owl_divider_set_rate,
+};
diff --git a/drivers/clk/actions/owl-divider.h b/drivers/clk/actions/owl-divider.h
new file mode 100644
index 000000000000..92d3e3d23967
--- /dev/null
+++ b/drivers/clk/actions/owl-divider.h
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL divider clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#ifndef _OWL_DIVIDER_H_
+#define _OWL_DIVIDER_H_
+
+#include "owl-common.h"
+
+struct owl_divider_hw {
+ u32 reg;
+ u8 shift;
+ u8 width;
+ u8 div_flags;
+ struct clk_div_table *table;
+};
+
+struct owl_divider {
+ struct owl_divider_hw div_hw;
+ struct owl_clk_common common;
+};
+
+#define OWL_DIVIDER_HW(_reg, _shift, _width, _div_flags, _table) \
+ { \
+ .reg = _reg, \
+ .shift = _shift, \
+ .width = _width, \
+ .div_flags = _div_flags, \
+ .table = _table, \
+ }
+
+#define OWL_DIVIDER(_struct, _name, _parent, _reg, \
+ _shift, _width, _table, _div_flags, _flags) \
+ struct owl_divider _struct = { \
+ .div_hw = OWL_DIVIDER_HW(_reg, _shift, _width, \
+ _div_flags, _table), \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &owl_divider_ops, \
+ _flags), \
+ }, \
+ }
+
+static inline struct owl_divider *hw_to_owl_divider(const struct clk_hw *hw)
+{
+ struct owl_clk_common *common = hw_to_owl_clk_common(hw);
+
+ return container_of(common, struct owl_divider, common);
+}
+
+long owl_divider_helper_round_rate(struct owl_clk_common *common,
+ const struct owl_divider_hw *div_hw,
+ unsigned long rate,
+ unsigned long *parent_rate);
+
+unsigned long owl_divider_helper_recalc_rate(struct owl_clk_common *common,
+ const struct owl_divider_hw *div_hw,
+ unsigned long parent_rate);
+
+int owl_divider_helper_set_rate(const struct owl_clk_common *common,
+ const struct owl_divider_hw *div_hw,
+ unsigned long rate,
+ unsigned long parent_rate);
+
+extern const struct clk_ops owl_divider_ops;
+
+#endif /* _OWL_DIVIDER_H_ */
diff --git a/drivers/clk/actions/owl-factor.c b/drivers/clk/actions/owl-factor.c
new file mode 100644
index 000000000000..317d4a9e112e
--- /dev/null
+++ b/drivers/clk/actions/owl-factor.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL factor clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "owl-factor.h"
+
+static unsigned int _get_table_maxval(const struct clk_factor_table *table)
+{
+ unsigned int maxval = 0;
+ const struct clk_factor_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->val > maxval)
+ maxval = clkt->val;
+ return maxval;
+}
+
+static int _get_table_div_mul(const struct clk_factor_table *table,
+ unsigned int val, unsigned int *mul, unsigned int *div)
+{
+ const struct clk_factor_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++) {
+ if (clkt->val == val) {
+ *mul = clkt->mul;
+ *div = clkt->div;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int _get_table_val(const struct clk_factor_table *table,
+ unsigned long rate, unsigned long parent_rate)
+{
+ const struct clk_factor_table *clkt;
+ int val = -1;
+ u64 calc_rate;
+
+ for (clkt = table; clkt->div; clkt++) {
+ calc_rate = parent_rate * clkt->mul;
+ do_div(calc_rate, clkt->div);
+
+ if ((unsigned long)calc_rate <= rate) {
+ val = clkt->val;
+ break;
+ }
+ }
+
+ if (val == -1)
+ val = _get_table_maxval(table);
+
+ return val;
+}
+
+static int clk_val_best(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate)
+{
+ struct owl_factor *factor = hw_to_owl_factor(hw);
+ struct owl_factor_hw *factor_hw = &factor->factor_hw;
+ const struct clk_factor_table *clkt = factor_hw->table;
+ unsigned long parent_rate, try_parent_rate, best = 0, cur_rate;
+ unsigned long parent_rate_saved = *best_parent_rate;
+ int bestval = 0;
+
+ if (!rate)
+ rate = 1;
+
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ parent_rate = *best_parent_rate;
+ bestval = _get_table_val(clkt, rate, parent_rate);
+ return bestval;
+ }
+
+ for (clkt = factor_hw->table; clkt->div; clkt++) {
+ try_parent_rate = rate * clkt->div / clkt->mul;
+
+ if (try_parent_rate == parent_rate_saved) {
+ pr_debug("%s: [%d %d %d] found try_parent_rate %ld\n",
+ __func__, clkt->val, clkt->mul, clkt->div,
+ try_parent_rate);
+ /*
+ * It's the most ideal case if the requested rate can be
+ * divided from parent clock without any need to change
+ * parent rate, so return the divider immediately.
+ */
+ *best_parent_rate = parent_rate_saved;
+ return clkt->val;
+ }
+
+ parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ try_parent_rate);
+ cur_rate = DIV_ROUND_UP(parent_rate, clkt->div) * clkt->mul;
+ if (cur_rate <= rate && cur_rate > best) {
+ bestval = clkt->val;
+ best = cur_rate;
+ *best_parent_rate = parent_rate;
+ }
+ }
+
+ if (!bestval) {
+ bestval = _get_table_maxval(clkt);
+ *best_parent_rate = clk_hw_round_rate(
+ clk_hw_get_parent(hw), 1);
+ }
+
+ return bestval;
+}
+
+long owl_factor_helper_round_rate(struct owl_clk_common *common,
+ const struct owl_factor_hw *factor_hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct clk_factor_table *clkt = factor_hw->table;
+ unsigned int val, mul = 0, div = 1;
+
+ val = clk_val_best(&common->hw, rate, parent_rate);
+ _get_table_div_mul(clkt, val, &mul, &div);
+
+ return *parent_rate * mul / div;
+}
+
+static long owl_factor_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct owl_factor *factor = hw_to_owl_factor(hw);
+ struct owl_factor_hw *factor_hw = &factor->factor_hw;
+
+ return owl_factor_helper_round_rate(&factor->common, factor_hw,
+ rate, parent_rate);
+}
+
+unsigned long owl_factor_helper_recalc_rate(struct owl_clk_common *common,
+ const struct owl_factor_hw *factor_hw,
+ unsigned long parent_rate)
+{
+ const struct clk_factor_table *clkt = factor_hw->table;
+ unsigned long long int rate;
+ u32 reg, val, mul, div;
+
+ div = 0;
+ mul = 0;
+
+ regmap_read(common->regmap, factor_hw->reg, &reg);
+
+ val = reg >> factor_hw->shift;
+ val &= div_mask(factor_hw);
+
+ _get_table_div_mul(clkt, val, &mul, &div);
+ if (!div) {
+ WARN(!(factor_hw->fct_flags & CLK_DIVIDER_ALLOW_ZERO),
+ "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
+ __clk_get_name(common->hw.clk));
+ return parent_rate;
+ }
+
+ rate = (unsigned long long int)parent_rate * mul;
+ do_div(rate, div);
+
+ return rate;
+}
+
+static unsigned long owl_factor_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct owl_factor *factor = hw_to_owl_factor(hw);
+ struct owl_factor_hw *factor_hw = &factor->factor_hw;
+ struct owl_clk_common *common = &factor->common;
+
+ return owl_factor_helper_recalc_rate(common, factor_hw, parent_rate);
+}
+
+int owl_factor_helper_set_rate(const struct owl_clk_common *common,
+ const struct owl_factor_hw *factor_hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 val, reg;
+
+ val = _get_table_val(factor_hw->table, rate, parent_rate);
+
+ if (val > div_mask(factor_hw))
+ val = div_mask(factor_hw);
+
+ regmap_read(common->regmap, factor_hw->reg, &reg);
+
+ reg &= ~(div_mask(factor_hw) << factor_hw->shift);
+ reg |= val << factor_hw->shift;
+
+ regmap_write(common->regmap, factor_hw->reg, reg);
+
+ return 0;
+}
+
+static int owl_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct owl_factor *factor = hw_to_owl_factor(hw);
+ struct owl_factor_hw *factor_hw = &factor->factor_hw;
+ struct owl_clk_common *common = &factor->common;
+
+ return owl_factor_helper_set_rate(common, factor_hw,
+ rate, parent_rate);
+}
+
+const struct clk_ops owl_factor_ops = {
+ .round_rate = owl_factor_round_rate,
+ .recalc_rate = owl_factor_recalc_rate,
+ .set_rate = owl_factor_set_rate,
+};
diff --git a/drivers/clk/actions/owl-factor.h b/drivers/clk/actions/owl-factor.h
new file mode 100644
index 000000000000..f1a7ffe896e1
--- /dev/null
+++ b/drivers/clk/actions/owl-factor.h
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL factor clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#ifndef _OWL_FACTOR_H_
+#define _OWL_FACTOR_H_
+
+#include "owl-common.h"
+
+struct clk_factor_table {
+ unsigned int val;
+ unsigned int mul;
+ unsigned int div;
+};
+
+struct owl_factor_hw {
+ u32 reg;
+ u8 shift;
+ u8 width;
+ u8 fct_flags;
+ struct clk_factor_table *table;
+};
+
+struct owl_factor {
+ struct owl_factor_hw factor_hw;
+ struct owl_clk_common common;
+};
+
+#define OWL_FACTOR_HW(_reg, _shift, _width, _fct_flags, _table) \
+ { \
+ .reg = _reg, \
+ .shift = _shift, \
+ .width = _width, \
+ .fct_flags = _fct_flags, \
+ .table = _table, \
+ }
+
+#define OWL_FACTOR(_struct, _name, _parent, _reg, \
+ _shift, _width, _table, _fct_flags, _flags) \
+ struct owl_factor _struct = { \
+ .factor_hw = OWL_FACTOR_HW(_reg, _shift, \
+ _width, _fct_flags, _table), \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &owl_factor_ops, \
+ _flags), \
+ }, \
+ }
+
+#define div_mask(d) ((1 << ((d)->width)) - 1)
+
+static inline struct owl_factor *hw_to_owl_factor(const struct clk_hw *hw)
+{
+ struct owl_clk_common *common = hw_to_owl_clk_common(hw);
+
+ return container_of(common, struct owl_factor, common);
+}
+
+long owl_factor_helper_round_rate(struct owl_clk_common *common,
+ const struct owl_factor_hw *factor_hw,
+ unsigned long rate,
+ unsigned long *parent_rate);
+
+unsigned long owl_factor_helper_recalc_rate(struct owl_clk_common *common,
+ const struct owl_factor_hw *factor_hw,
+ unsigned long parent_rate);
+
+int owl_factor_helper_set_rate(const struct owl_clk_common *common,
+ const struct owl_factor_hw *factor_hw,
+ unsigned long rate,
+ unsigned long parent_rate);
+
+extern const struct clk_ops owl_factor_ops;
+
+#endif /* _OWL_FACTOR_H_ */
diff --git a/drivers/clk/actions/owl-fixed-factor.h b/drivers/clk/actions/owl-fixed-factor.h
new file mode 100644
index 000000000000..cc9fe36c0964
--- /dev/null
+++ b/drivers/clk/actions/owl-fixed-factor.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL fixed factor clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#ifndef _OWL_FIXED_FACTOR_H_
+#define _OWL_FIXED_FACTOR_H_
+
+#include "owl-common.h"
+
+#define OWL_FIX_FACT(_struct, _name, _parent, _mul, _div, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .mult = _mul, \
+ .div = _div, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
+extern const struct clk_ops clk_fixed_factor_ops;
+
+#endif /* _OWL_FIXED_FACTOR_H_ */
diff --git a/drivers/clk/actions/owl-gate.c b/drivers/clk/actions/owl-gate.c
new file mode 100644
index 000000000000..f11500ba46a7
--- /dev/null
+++ b/drivers/clk/actions/owl-gate.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL gate clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "owl-gate.h"
+
+void owl_gate_set(const struct owl_clk_common *common,
+ const struct owl_gate_hw *gate_hw, bool enable)
+{
+ int set = gate_hw->gate_flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
+ u32 reg;
+
+ set ^= enable;
+
+ regmap_read(common->regmap, gate_hw->reg, &reg);
+
+ if (set)
+ reg |= BIT(gate_hw->bit_idx);
+ else
+ reg &= ~BIT(gate_hw->bit_idx);
+
+ regmap_write(common->regmap, gate_hw->reg, reg);
+}
+
+static void owl_gate_disable(struct clk_hw *hw)
+{
+ struct owl_gate *gate = hw_to_owl_gate(hw);
+ struct owl_clk_common *common = &gate->common;
+
+ owl_gate_set(common, &gate->gate_hw, false);
+}
+
+static int owl_gate_enable(struct clk_hw *hw)
+{
+ struct owl_gate *gate = hw_to_owl_gate(hw);
+ struct owl_clk_common *common = &gate->common;
+
+ owl_gate_set(common, &gate->gate_hw, true);
+
+ return 0;
+}
+
+int owl_gate_clk_is_enabled(const struct owl_clk_common *common,
+ const struct owl_gate_hw *gate_hw)
+{
+ u32 reg;
+
+ regmap_read(common->regmap, gate_hw->reg, &reg);
+
+ if (gate_hw->gate_flags & CLK_GATE_SET_TO_DISABLE)
+ reg ^= BIT(gate_hw->bit_idx);
+
+ return !!(reg & BIT(gate_hw->bit_idx));
+}
+
+static int owl_gate_is_enabled(struct clk_hw *hw)
+{
+ struct owl_gate *gate = hw_to_owl_gate(hw);
+ struct owl_clk_common *common = &gate->common;
+
+ return owl_gate_clk_is_enabled(common, &gate->gate_hw);
+}
+
+const struct clk_ops owl_gate_ops = {
+ .disable = owl_gate_disable,
+ .enable = owl_gate_enable,
+ .is_enabled = owl_gate_is_enabled,
+};
diff --git a/drivers/clk/actions/owl-gate.h b/drivers/clk/actions/owl-gate.h
new file mode 100644
index 000000000000..c2d61ceebce2
--- /dev/null
+++ b/drivers/clk/actions/owl-gate.h
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL gate clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#ifndef _OWL_GATE_H_
+#define _OWL_GATE_H_
+
+#include "owl-common.h"
+
+struct owl_gate_hw {
+ u32 reg;
+ u8 bit_idx;
+ u8 gate_flags;
+};
+
+struct owl_gate {
+ struct owl_gate_hw gate_hw;
+ struct owl_clk_common common;
+};
+
+#define OWL_GATE_HW(_reg, _bit_idx, _gate_flags) \
+ { \
+ .reg = _reg, \
+ .bit_idx = _bit_idx, \
+ .gate_flags = _gate_flags, \
+ }
+
+#define OWL_GATE(_struct, _name, _parent, _reg, \
+ _bit_idx, _gate_flags, _flags) \
+ struct owl_gate _struct = { \
+ .gate_hw = OWL_GATE_HW(_reg, _bit_idx, _gate_flags), \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &owl_gate_ops, \
+ _flags), \
+ } \
+ } \
+
+#define OWL_GATE_NO_PARENT(_struct, _name, _reg, \
+ _bit_idx, _gate_flags, _flags) \
+ struct owl_gate _struct = { \
+ .gate_hw = OWL_GATE_HW(_reg, _bit_idx, _gate_flags), \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \
+ &owl_gate_ops, \
+ _flags), \
+ }, \
+ } \
+
+static inline struct owl_gate *hw_to_owl_gate(const struct clk_hw *hw)
+{
+ struct owl_clk_common *common = hw_to_owl_clk_common(hw);
+
+ return container_of(common, struct owl_gate, common);
+}
+
+void owl_gate_set(const struct owl_clk_common *common,
+ const struct owl_gate_hw *gate_hw, bool enable);
+int owl_gate_clk_is_enabled(const struct owl_clk_common *common,
+ const struct owl_gate_hw *gate_hw);
+
+extern const struct clk_ops owl_gate_ops;
+
+#endif /* _OWL_GATE_H_ */
diff --git a/drivers/clk/actions/owl-mux.c b/drivers/clk/actions/owl-mux.c
new file mode 100644
index 000000000000..f9c6cf2540e4
--- /dev/null
+++ b/drivers/clk/actions/owl-mux.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL mux clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "owl-mux.h"
+
+u8 owl_mux_helper_get_parent(const struct owl_clk_common *common,
+ const struct owl_mux_hw *mux_hw)
+{
+ u32 reg;
+ u8 parent;
+
+ regmap_read(common->regmap, mux_hw->reg, &reg);
+ parent = reg >> mux_hw->shift;
+ parent &= BIT(mux_hw->width) - 1;
+
+ return parent;
+}
+
+static u8 owl_mux_get_parent(struct clk_hw *hw)
+{
+ struct owl_mux *mux = hw_to_owl_mux(hw);
+
+ return owl_mux_helper_get_parent(&mux->common, &mux->mux_hw);
+}
+
+int owl_mux_helper_set_parent(const struct owl_clk_common *common,
+ struct owl_mux_hw *mux_hw, u8 index)
+{
+ u32 reg;
+
+ regmap_read(common->regmap, mux_hw->reg, &reg);
+ reg &= ~GENMASK(mux_hw->width + mux_hw->shift - 1, mux_hw->shift);
+ regmap_write(common->regmap, mux_hw->reg,
+ reg | (index << mux_hw->shift));
+
+ return 0;
+}
+
+static int owl_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct owl_mux *mux = hw_to_owl_mux(hw);
+
+ return owl_mux_helper_set_parent(&mux->common, &mux->mux_hw, index);
+}
+
+const struct clk_ops owl_mux_ops = {
+ .get_parent = owl_mux_get_parent,
+ .set_parent = owl_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
diff --git a/drivers/clk/actions/owl-mux.h b/drivers/clk/actions/owl-mux.h
new file mode 100644
index 000000000000..834284c8c3ae
--- /dev/null
+++ b/drivers/clk/actions/owl-mux.h
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL mux clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#ifndef _OWL_MUX_H_
+#define _OWL_MUX_H_
+
+#include "owl-common.h"
+
+struct owl_mux_hw {
+ u32 reg;
+ u8 shift;
+ u8 width;
+};
+
+struct owl_mux {
+ struct owl_mux_hw mux_hw;
+ struct owl_clk_common common;
+};
+
+#define OWL_MUX_HW(_reg, _shift, _width) \
+ { \
+ .reg = _reg, \
+ .shift = _shift, \
+ .width = _width, \
+ }
+
+#define OWL_MUX(_struct, _name, _parents, _reg, \
+ _shift, _width, _flags) \
+ struct owl_mux _struct = { \
+ .mux_hw = OWL_MUX_HW(_reg, _shift, _width), \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parents, \
+ &owl_mux_ops, \
+ _flags), \
+ }, \
+ }
+
+static inline struct owl_mux *hw_to_owl_mux(const struct clk_hw *hw)
+{
+ struct owl_clk_common *common = hw_to_owl_clk_common(hw);
+
+ return container_of(common, struct owl_mux, common);
+}
+
+u8 owl_mux_helper_get_parent(const struct owl_clk_common *common,
+ const struct owl_mux_hw *mux_hw);
+int owl_mux_helper_set_parent(const struct owl_clk_common *common,
+ struct owl_mux_hw *mux_hw, u8 index);
+
+extern const struct clk_ops owl_mux_ops;
+
+#endif /* _OWL_MUX_H_ */
diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c
new file mode 100644
index 000000000000..058e06d7099f
--- /dev/null
+++ b/drivers/clk/actions/owl-pll.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL pll clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "owl-pll.h"
+
+static u32 owl_pll_calculate_mul(struct owl_pll_hw *pll_hw, unsigned long rate)
+{
+ u32 mul;
+
+ mul = DIV_ROUND_CLOSEST(rate, pll_hw->bfreq);
+ if (mul < pll_hw->min_mul)
+ mul = pll_hw->min_mul;
+ else if (mul > pll_hw->max_mul)
+ mul = pll_hw->max_mul;
+
+ return mul &= mul_mask(pll_hw);
+}
+
+static unsigned long _get_table_rate(const struct clk_pll_table *table,
+ unsigned int val)
+{
+ const struct clk_pll_table *clkt;
+
+ for (clkt = table; clkt->rate; clkt++)
+ if (clkt->val == val)
+ return clkt->rate;
+
+ return 0;
+}
+
+static const struct clk_pll_table *_get_pll_table(
+ const struct clk_pll_table *table, unsigned long rate)
+{
+ const struct clk_pll_table *clkt;
+
+ for (clkt = table; clkt->rate; clkt++) {
+ if (clkt->rate == rate) {
+ table = clkt;
+ break;
+ } else if (clkt->rate < rate)
+ table = clkt;
+ }
+
+ return table;
+}
+
+static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct owl_pll *pll = hw_to_owl_pll(hw);
+ struct owl_pll_hw *pll_hw = &pll->pll_hw;
+ const struct clk_pll_table *clkt;
+ u32 mul;
+
+ if (pll_hw->table) {
+ clkt = _get_pll_table(pll_hw->table, rate);
+ return clkt->rate;
+ }
+
+ /* fixed frequency */
+ if (pll_hw->width == 0)
+ return pll_hw->bfreq;
+
+ mul = owl_pll_calculate_mul(pll_hw, rate);
+
+ return pll_hw->bfreq * mul;
+}
+
+static unsigned long owl_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct owl_pll *pll = hw_to_owl_pll(hw);
+ struct owl_pll_hw *pll_hw = &pll->pll_hw;
+ const struct owl_clk_common *common = &pll->common;
+ u32 val;
+
+ if (pll_hw->table) {
+ regmap_read(common->regmap, pll_hw->reg, &val);
+
+ val = val >> pll_hw->shift;
+ val &= mul_mask(pll_hw);
+
+ return _get_table_rate(pll_hw->table, val);
+ }
+
+ /* fixed frequency */
+ if (pll_hw->width == 0)
+ return pll_hw->bfreq;
+
+ regmap_read(common->regmap, pll_hw->reg, &val);
+
+ val = val >> pll_hw->shift;
+ val &= mul_mask(pll_hw);
+
+ return pll_hw->bfreq * val;
+}
+
+static int owl_pll_is_enabled(struct clk_hw *hw)
+{
+ struct owl_pll *pll = hw_to_owl_pll(hw);
+ struct owl_pll_hw *pll_hw = &pll->pll_hw;
+ const struct owl_clk_common *common = &pll->common;
+ u32 reg;
+
+ regmap_read(common->regmap, pll_hw->reg, &reg);
+
+ return !!(reg & BIT(pll_hw->bit_idx));
+}
+
+static void owl_pll_set(const struct owl_clk_common *common,
+ const struct owl_pll_hw *pll_hw, bool enable)
+{
+ u32 reg;
+
+ regmap_read(common->regmap, pll_hw->reg, &reg);
+
+ if (enable)
+ reg |= BIT(pll_hw->bit_idx);
+ else
+ reg &= ~BIT(pll_hw->bit_idx);
+
+ regmap_write(common->regmap, pll_hw->reg, reg);
+}
+
+static int owl_pll_enable(struct clk_hw *hw)
+{
+ struct owl_pll *pll = hw_to_owl_pll(hw);
+ const struct owl_clk_common *common = &pll->common;
+
+ owl_pll_set(common, &pll->pll_hw, true);
+
+ return 0;
+}
+
+static void owl_pll_disable(struct clk_hw *hw)
+{
+ struct owl_pll *pll = hw_to_owl_pll(hw);
+ const struct owl_clk_common *common = &pll->common;
+
+ owl_pll_set(common, &pll->pll_hw, false);
+}
+
+static int owl_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct owl_pll *pll = hw_to_owl_pll(hw);
+ struct owl_pll_hw *pll_hw = &pll->pll_hw;
+ const struct owl_clk_common *common = &pll->common;
+ const struct clk_pll_table *clkt;
+ u32 val, reg;
+
+ /* fixed frequency */
+ if (pll_hw->width == 0)
+ return 0;
+
+ if (pll_hw->table) {
+ clkt = _get_pll_table(pll_hw->table, rate);
+ val = clkt->val;
+ } else {
+ val = owl_pll_calculate_mul(pll_hw, rate);
+ }
+
+ regmap_read(common->regmap, pll_hw->reg, &reg);
+
+ reg &= ~mul_mask(pll_hw);
+ reg |= val << pll_hw->shift;
+
+ regmap_write(common->regmap, pll_hw->reg, reg);
+
+ udelay(PLL_STABILITY_WAIT_US);
+
+ return 0;
+}
+
+const struct clk_ops owl_pll_ops = {
+ .enable = owl_pll_enable,
+ .disable = owl_pll_disable,
+ .is_enabled = owl_pll_is_enabled,
+ .round_rate = owl_pll_round_rate,
+ .recalc_rate = owl_pll_recalc_rate,
+ .set_rate = owl_pll_set_rate,
+};
diff --git a/drivers/clk/actions/owl-pll.h b/drivers/clk/actions/owl-pll.h
new file mode 100644
index 000000000000..0aae30abd5dc
--- /dev/null
+++ b/drivers/clk/actions/owl-pll.h
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL pll clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#ifndef _OWL_PLL_H_
+#define _OWL_PLL_H_
+
+#include "owl-common.h"
+
+/* last entry should have rate = 0 */
+struct clk_pll_table {
+ unsigned int val;
+ unsigned long rate;
+};
+
+struct owl_pll_hw {
+ u32 reg;
+ u32 bfreq;
+ u8 bit_idx;
+ u8 shift;
+ u8 width;
+ u8 min_mul;
+ u8 max_mul;
+ const struct clk_pll_table *table;
+};
+
+struct owl_pll {
+ struct owl_pll_hw pll_hw;
+ struct owl_clk_common common;
+};
+
+#define OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
+ _width, _min_mul, _max_mul, _table) \
+ { \
+ .reg = _reg, \
+ .bfreq = _bfreq, \
+ .bit_idx = _bit_idx, \
+ .shift = _shift, \
+ .width = _width, \
+ .min_mul = _min_mul, \
+ .max_mul = _max_mul, \
+ .table = _table, \
+ }
+
+#define OWL_PLL(_struct, _name, _parent, _reg, _bfreq, _bit_idx, \
+ _shift, _width, _min_mul, _max_mul, _table, _flags) \
+ struct owl_pll _struct = { \
+ .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
+ _width, _min_mul, \
+ _max_mul, _table), \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &owl_pll_ops, \
+ _flags), \
+ }, \
+ }
+
+#define OWL_PLL_NO_PARENT(_struct, _name, _reg, _bfreq, _bit_idx, \
+ _shift, _width, _min_mul, _max_mul, _table, _flags) \
+ struct owl_pll _struct = { \
+ .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
+ _width, _min_mul, \
+ _max_mul, _table), \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \
+ &owl_pll_ops, \
+ _flags), \
+ }, \
+ }
+
+#define mul_mask(m) ((1 << ((m)->width)) - 1)
+#define PLL_STABILITY_WAIT_US (50)
+
+static inline struct owl_pll *hw_to_owl_pll(const struct clk_hw *hw)
+{
+ struct owl_clk_common *common = hw_to_owl_clk_common(hw);
+
+ return container_of(common, struct owl_pll, common);
+}
+
+extern const struct clk_ops owl_pll_ops;
+
+#endif /* _OWL_PLL_H_ */
diff --git a/drivers/clk/actions/owl-s900.c b/drivers/clk/actions/owl-s900.c
new file mode 100644
index 000000000000..7f60ed6afe63
--- /dev/null
+++ b/drivers/clk/actions/owl-s900.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// OWL S900 SoC clock driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "owl-common.h"
+#include "owl-composite.h"
+#include "owl-divider.h"
+#include "owl-factor.h"
+#include "owl-fixed-factor.h"
+#include "owl-gate.h"
+#include "owl-mux.h"
+#include "owl-pll.h"
+
+#include <dt-bindings/clock/actions,s900-cmu.h>
+
+#define CMU_COREPLL (0x0000)
+#define CMU_DEVPLL (0x0004)
+#define CMU_DDRPLL (0x0008)
+#define CMU_NANDPLL (0x000C)
+#define CMU_DISPLAYPLL (0x0010)
+#define CMU_AUDIOPLL (0x0014)
+#define CMU_TVOUTPLL (0x0018)
+#define CMU_BUSCLK (0x001C)
+#define CMU_SENSORCLK (0x0020)
+#define CMU_LCDCLK (0x0024)
+#define CMU_DSICLK (0x0028)
+#define CMU_CSICLK (0x002C)
+#define CMU_DECLK (0x0030)
+#define CMU_BISPCLK (0x0034)
+#define CMU_IMXCLK (0x0038)
+#define CMU_HDECLK (0x003C)
+#define CMU_VDECLK (0x0040)
+#define CMU_VCECLK (0x0044)
+#define CMU_NANDCCLK (0x004C)
+#define CMU_SD0CLK (0x0050)
+#define CMU_SD1CLK (0x0054)
+#define CMU_SD2CLK (0x0058)
+#define CMU_UART0CLK (0x005C)
+#define CMU_UART1CLK (0x0060)
+#define CMU_UART2CLK (0x0064)
+#define CMU_PWM0CLK (0x0070)
+#define CMU_PWM1CLK (0x0074)
+#define CMU_PWM2CLK (0x0078)
+#define CMU_PWM3CLK (0x007C)
+#define CMU_USBPLL (0x0080)
+#define CMU_ASSISTPLL (0x0084)
+#define CMU_EDPCLK (0x0088)
+#define CMU_GPU3DCLK (0x0090)
+#define CMU_CORECTL (0x009C)
+#define CMU_DEVCLKEN0 (0x00A0)
+#define CMU_DEVCLKEN1 (0x00A4)
+#define CMU_DEVRST0 (0x00A8)
+#define CMU_DEVRST1 (0x00AC)
+#define CMU_UART3CLK (0x00B0)
+#define CMU_UART4CLK (0x00B4)
+#define CMU_UART5CLK (0x00B8)
+#define CMU_UART6CLK (0x00BC)
+#define CMU_TLSCLK (0x00C0)
+#define CMU_SD3CLK (0x00C4)
+#define CMU_PWM4CLK (0x00C8)
+#define CMU_PWM5CLK (0x00CC)
+
+static struct clk_pll_table clk_audio_pll_table[] = {
+ { 0, 45158400 }, { 1, 49152000 },
+ { 0, 0 },
+};
+
+static struct clk_pll_table clk_edp_pll_table[] = {
+ { 0, 810000000 }, { 1, 135000000 }, { 2, 270000000 },
+ { 0, 0 },
+};
+
+/* pll clocks */
+static OWL_PLL_NO_PARENT(core_pll_clk, "core_pll_clk", CMU_COREPLL, 24000000, 9, 0, 8, 5, 107, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(dev_pll_clk, "dev_pll_clk", CMU_DEVPLL, 6000000, 8, 0, 8, 20, 180, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(ddr_pll_clk, "ddr_pll_clk", CMU_DDRPLL, 24000000, 8, 0, 8, 5, 45, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(nand_pll_clk, "nand_pll_clk", CMU_NANDPLL, 6000000, 8, 0, 8, 4, 100, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(display_pll_clk, "display_pll_clk", CMU_DISPLAYPLL, 6000000, 8, 0, 8, 20, 180, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(assist_pll_clk, "assist_pll_clk", CMU_ASSISTPLL, 500000000, 0, 0, 0, 0, 0, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(audio_pll_clk, "audio_pll_clk", CMU_AUDIOPLL, 0, 4, 0, 1, 0, 0, clk_audio_pll_table, CLK_IGNORE_UNUSED);
+static OWL_PLL(edp_pll_clk, "edp_pll_clk", "edp24M_clk", CMU_EDPCLK, 0, 9, 0, 2, 0, 0, clk_edp_pll_table, CLK_IGNORE_UNUSED);
+
+static const char *cpu_clk_mux_p[] = { "losc", "hosc", "core_pll_clk", };
+static const char *dev_clk_p[] = { "hosc", "dev_pll_clk", };
+static const char *noc_clk_mux_p[] = { "dev_clk", "assist_pll_clk", };
+static const char *dmm_clk_mux_p[] = { "dev_clk", "nand_pll_clk", "assist_pll_clk", "ddr_clk_src", };
+static const char *bisp_clk_mux_p[] = { "assist_pll_clk", "dev_clk", };
+static const char *csi_clk_mux_p[] = { "display_pll_clk", "dev_clk", };
+static const char *de_clk_mux_p[] = { "assist_pll_clk", "dev_clk", };
+static const char *gpu_clk_mux_p[] = { "dev_clk", "display_pll_clk", "ddr_clk_src", };
+static const char *hde_clk_mux_p[] = { "dev_clk", "display_pll_clk", "ddr_clk_src", };
+static const char *imx_clk_mux_p[] = { "assist_pll_clk", "dev_clk", };
+static const char *lcd_clk_mux_p[] = { "display_pll_clk", "nand_pll_clk", };
+static const char *nand_clk_mux_p[] = { "dev_clk", "nand_pll_clk", };
+static const char *sd_clk_mux_p[] = { "dev_clk", "nand_pll_clk", };
+static const char *sensor_clk_mux_p[] = { "hosc", "bisp_clk", };
+static const char *uart_clk_mux_p[] = { "hosc", "dev_pll_clk", };
+static const char *vce_clk_mux_p[] = { "dev_clk", "display_pll_clk", "assist_pll_clk", "ddr_clk_src", };
+static const char *i2s_clk_mux_p[] = { "audio_pll_clk", };
+static const char *edp_clk_mux_p[] = { "assist_pll_clk", "display_pll_clk", };
+
+/* mux clocks */
+static OWL_MUX(cpu_clk, "cpu_clk", cpu_clk_mux_p, CMU_BUSCLK, 0, 2, CLK_SET_RATE_PARENT);
+static OWL_MUX(dev_clk, "dev_clk", dev_clk_p, CMU_DEVPLL, 12, 1, CLK_SET_RATE_PARENT);
+static OWL_MUX(noc_clk_mux, "noc_clk_mux", noc_clk_mux_p, CMU_BUSCLK, 7, 1, CLK_SET_RATE_PARENT);
+
+static struct clk_div_table nand_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 6 },
+ { 4, 8 }, { 5, 10 }, { 6, 12 }, { 7, 14 },
+ { 8, 16 }, { 9, 18 }, { 10, 20 }, { 11, 22 },
+ { 12, 24 }, { 13, 26 }, { 14, 28 }, { 15, 30 },
+ { 0, 0 },
+};
+
+static struct clk_div_table apb_div_table[] = {
+ { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 0, 0 },
+};
+
+static struct clk_div_table eth_mac_div_table[] = {
+ { 0, 2 }, { 1, 4 },
+ { 0, 0 },
+};
+
+static struct clk_div_table rmii_ref_div_table[] = {
+ { 0, 4 }, { 1, 10 },
+ { 0, 0 },
+};
+
+static struct clk_div_table usb3_mac_div_table[] = {
+ { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 0, 8 },
+};
+
+static struct clk_div_table i2s_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
+ { 8, 24 },
+ { 0, 0 },
+};
+
+static struct clk_div_table hdmia_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
+ { 8, 24 },
+ { 0, 0 },
+};
+
+/* divider clocks */
+static OWL_DIVIDER(noc_clk_div, "noc_clk_div", "noc_clk", CMU_BUSCLK, 19, 1, NULL, 0, 0);
+static OWL_DIVIDER(ahb_clk, "ahb_clk", "noc_clk_div", CMU_BUSCLK, 4, 1, NULL, 0, 0);
+static OWL_DIVIDER(apb_clk, "apb_clk", "ahb_clk", CMU_BUSCLK, 8, 2, apb_div_table, 0, 0);
+static OWL_DIVIDER(usb3_mac_clk, "usb3_mac_clk", "assist_pll_clk", CMU_ASSISTPLL, 12, 2, usb3_mac_div_table, 0, 0);
+static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "assist_pll_clk", CMU_ASSISTPLL, 8, 1, rmii_ref_div_table, 0, 0);
+
+static struct clk_factor_table sd_factor_table[] = {
+ /* bit0 ~ 4 */
+ { 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 },
+ { 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 },
+ { 8, 1, 9 }, { 9, 1, 10 }, { 10, 1, 11 }, { 11, 1, 12 },
+ { 12, 1, 13 }, { 13, 1, 14 }, { 14, 1, 15 }, { 15, 1, 16 },
+ { 16, 1, 17 }, { 17, 1, 18 }, { 18, 1, 19 }, { 19, 1, 20 },
+ { 20, 1, 21 }, { 21, 1, 22 }, { 22, 1, 23 }, { 23, 1, 24 },
+ { 24, 1, 25 }, { 25, 1, 26 }, { 26, 1, 27 }, { 27, 1, 28 },
+ { 28, 1, 29 }, { 29, 1, 30 }, { 30, 1, 31 }, { 31, 1, 32 },
+
+ /* bit8: /128 */
+ { 256, 1, 1 * 128 }, { 257, 1, 2 * 128 }, { 258, 1, 3 * 128 }, { 259, 1, 4 * 128 },
+ { 260, 1, 5 * 128 }, { 261, 1, 6 * 128 }, { 262, 1, 7 * 128 }, { 263, 1, 8 * 128 },
+ { 264, 1, 9 * 128 }, { 265, 1, 10 * 128 }, { 266, 1, 11 * 128 }, { 267, 1, 12 * 128 },
+ { 268, 1, 13 * 128 }, { 269, 1, 14 * 128 }, { 270, 1, 15 * 128 }, { 271, 1, 16 * 128 },
+ { 272, 1, 17 * 128 }, { 273, 1, 18 * 128 }, { 274, 1, 19 * 128 }, { 275, 1, 20 * 128 },
+ { 276, 1, 21 * 128 }, { 277, 1, 22 * 128 }, { 278, 1, 23 * 128 }, { 279, 1, 24 * 128 },
+ { 280, 1, 25 * 128 }, { 281, 1, 26 * 128 }, { 282, 1, 27 * 128 }, { 283, 1, 28 * 128 },
+ { 284, 1, 29 * 128 }, { 285, 1, 30 * 128 }, { 286, 1, 31 * 128 }, { 287, 1, 32 * 128 },
+
+ { 0, 0 },
+};
+
+static struct clk_factor_table dmm_factor_table[] = {
+ { 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 1, 3 },
+ { 4, 1, 4 },
+ { 0, 0, 0 },
+};
+
+static struct clk_factor_table noc_factor_table[] = {
+ { 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 1, 3 }, { 4, 1, 4 },
+ { 0, 0, 0 },
+};
+
+static struct clk_factor_table bisp_factor_table[] = {
+ { 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
+ { 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
+ { 0, 0, 0 },
+};
+
+/* factor clocks */
+static OWL_FACTOR(noc_clk, "noc_clk", "noc_clk_mux", CMU_BUSCLK, 16, 3, noc_factor_table, 0, 0);
+static OWL_FACTOR(de_clk1, "de_clk1", "de_clk", CMU_DECLK, 0, 3, bisp_factor_table, 0, 0);
+static OWL_FACTOR(de_clk2, "de_clk2", "de_clk", CMU_DECLK, 4, 3, bisp_factor_table, 0, 0);
+static OWL_FACTOR(de_clk3, "de_clk3", "de_clk", CMU_DECLK, 8, 3, bisp_factor_table, 0, 0);
+
+/* gate clocks */
+static OWL_GATE(gpio_clk, "gpio_clk", "apb_clk", CMU_DEVCLKEN0, 18, 0, 0);
+static OWL_GATE_NO_PARENT(gpu_clk, "gpu_clk", CMU_DEVCLKEN0, 30, 0, 0);
+static OWL_GATE(dmac_clk, "dmac_clk", "noc_clk_div", CMU_DEVCLKEN0, 1, 0, 0);
+static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
+static OWL_GATE_NO_PARENT(dsi_clk, "dsi_clk", CMU_DEVCLKEN0, 12, 0, 0);
+static OWL_GATE(ddr0_clk, "ddr0_clk", "ddr_pll_clk", CMU_DEVCLKEN0, 31, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(ddr1_clk, "ddr1_clk", "ddr_pll_clk", CMU_DEVCLKEN0, 29, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE_NO_PARENT(usb3_480mpll0_clk, "usb3_480mpll0_clk", CMU_USBPLL, 3, 0, 0);
+static OWL_GATE_NO_PARENT(usb3_480mphy0_clk, "usb3_480mphy0_clk", CMU_USBPLL, 2, 0, 0);
+static OWL_GATE_NO_PARENT(usb3_5gphy_clk, "usb3_5gphy_clk", CMU_USBPLL, 1, 0, 0);
+static OWL_GATE_NO_PARENT(usb3_cce_clk, "usb3_cce_clk", CMU_USBPLL, 0, 0, 0);
+static OWL_GATE(edp24M_clk, "edp24M_clk", "diff24M", CMU_EDPCLK, 8, 0, 0);
+static OWL_GATE(edp_link_clk, "edp_link_clk", "edp_pll_clk", CMU_DEVCLKEN0, 10, 0, 0);
+static OWL_GATE_NO_PARENT(usbh0_pllen_clk, "usbh0_pllen_clk", CMU_USBPLL, 12, 0, 0);
+static OWL_GATE_NO_PARENT(usbh0_phy_clk, "usbh0_phy_clk", CMU_USBPLL, 10, 0, 0);
+static OWL_GATE_NO_PARENT(usbh0_cce_clk, "usbh0_cce_clk", CMU_USBPLL, 8, 0, 0);
+static OWL_GATE_NO_PARENT(usbh1_pllen_clk, "usbh1_pllen_clk", CMU_USBPLL, 13, 0, 0);
+static OWL_GATE_NO_PARENT(usbh1_phy_clk, "usbh1_phy_clk", CMU_USBPLL, 11, 0, 0);
+static OWL_GATE_NO_PARENT(usbh1_cce_clk, "usbh1_cce_clk", CMU_USBPLL, 9, 0, 0);
+static OWL_GATE(spi0_clk, "spi0_clk", "ahb_clk", CMU_DEVCLKEN1, 10, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(spi1_clk, "spi1_clk", "ahb_clk", CMU_DEVCLKEN1, 11, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(spi2_clk, "spi2_clk", "ahb_clk", CMU_DEVCLKEN1, 12, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(spi3_clk, "spi3_clk", "ahb_clk", CMU_DEVCLKEN1, 13, 0, CLK_IGNORE_UNUSED);
+
+/* composite clocks */
+static OWL_COMP_FACTOR(bisp_clk, "bisp_clk", bisp_clk_mux_p,
+ OWL_MUX_HW(CMU_BISPCLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+ OWL_FACTOR_HW(CMU_BISPCLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_DIV(csi0_clk, "csi0_clk", csi_clk_mux_p,
+ OWL_MUX_HW(CMU_CSICLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 13, 0),
+ OWL_DIVIDER_HW(CMU_CSICLK, 0, 4, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(csi1_clk, "csi1_clk", csi_clk_mux_p,
+ OWL_MUX_HW(CMU_CSICLK, 20, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 15, 0),
+ OWL_DIVIDER_HW(CMU_CSICLK, 16, 4, 0, NULL),
+ 0);
+
+static OWL_COMP_PASS(de_clk, "de_clk", de_clk_mux_p,
+ OWL_MUX_HW(CMU_DECLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 8, 0),
+ 0);
+
+static OWL_COMP_FACTOR(dmm_clk, "dmm_clk", dmm_clk_mux_p,
+ OWL_MUX_HW(CMU_BUSCLK, 10, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 19, 0),
+ OWL_FACTOR_HW(CMU_BUSCLK, 12, 3, 0, dmm_factor_table),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_FACTOR(edp_clk, "edp_clk", edp_clk_mux_p,
+ OWL_MUX_HW(CMU_EDPCLK, 19, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 10, 0),
+ OWL_FACTOR_HW(CMU_EDPCLK, 16, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_DIV_FIXED(eth_mac_clk, "eth_mac_clk", "assist_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 22, 0),
+ OWL_DIVIDER_HW(CMU_ASSISTPLL, 10, 1, 0, eth_mac_div_table),
+ 0);
+
+static OWL_COMP_FACTOR(gpu_core_clk, "gpu_core_clk", gpu_clk_mux_p,
+ OWL_MUX_HW(CMU_GPU3DCLK, 4, 2),
+ OWL_GATE_HW(CMU_GPU3DCLK, 15, 0),
+ OWL_FACTOR_HW(CMU_GPU3DCLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(gpu_mem_clk, "gpu_mem_clk", gpu_clk_mux_p,
+ OWL_MUX_HW(CMU_GPU3DCLK, 20, 2),
+ OWL_GATE_HW(CMU_GPU3DCLK, 14, 0),
+ OWL_FACTOR_HW(CMU_GPU3DCLK, 16, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(gpu_sys_clk, "gpu_sys_clk", gpu_clk_mux_p,
+ OWL_MUX_HW(CMU_GPU3DCLK, 28, 2),
+ OWL_GATE_HW(CMU_GPU3DCLK, 13, 0),
+ OWL_FACTOR_HW(CMU_GPU3DCLK, 24, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(hde_clk, "hde_clk", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_HDECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 27, 0),
+ OWL_FACTOR_HW(CMU_HDECLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_DIV(hdmia_clk, "hdmia_clk", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 22, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 24, 4, 0, hdmia_div_table),
+ 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c0_clk, "i2c0_clk", "assist_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 14, 0),
+ 1, 5, 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c1_clk, "i2c1_clk", "assist_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 15, 0),
+ 1, 5, 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c2_clk, "i2c2_clk", "assist_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 30, 0),
+ 1, 5, 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c3_clk, "i2c3_clk", "assist_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 31, 0),
+ 1, 5, 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c4_clk, "i2c4_clk", "assist_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN0, 17, 0),
+ 1, 5, 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c5_clk, "i2c5_clk", "assist_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 1, 0),
+ 1, 5, 0);
+
+static OWL_COMP_DIV(i2srx_clk, "i2srx_clk", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 21, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 20, 4, 0, i2s_div_table),
+ 0);
+
+static OWL_COMP_DIV(i2stx_clk, "i2stx_clk", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 20, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 16, 4, 0, i2s_div_table),
+ 0);
+
+static OWL_COMP_FACTOR(imx_clk, "imx_clk", imx_clk_mux_p,
+ OWL_MUX_HW(CMU_IMXCLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 17, 0),
+ OWL_FACTOR_HW(CMU_IMXCLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_DIV(lcd_clk, "lcd_clk", lcd_clk_mux_p,
+ OWL_MUX_HW(CMU_LCDCLK, 12, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 9, 0),
+ OWL_DIVIDER_HW(CMU_LCDCLK, 0, 5, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(nand0_clk, "nand0_clk", nand_clk_mux_p,
+ OWL_MUX_HW(CMU_NANDCCLK, 8, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 4, 0),
+ OWL_DIVIDER_HW(CMU_NANDCCLK, 0, 4, 0, nand_div_table),
+ CLK_SET_RATE_PARENT);
+
+static OWL_COMP_DIV(nand1_clk, "nand1_clk", nand_clk_mux_p,
+ OWL_MUX_HW(CMU_NANDCCLK, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 11, 0),
+ OWL_DIVIDER_HW(CMU_NANDCCLK, 16, 4, 0, nand_div_table),
+ CLK_SET_RATE_PARENT);
+
+static OWL_COMP_DIV_FIXED(pwm0_clk, "pwm0_clk", "hosc",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 23, 0),
+ OWL_DIVIDER_HW(CMU_PWM0CLK, 0, 6, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV_FIXED(pwm1_clk, "pwm1_clk", "hosc",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 24, 0),
+ OWL_DIVIDER_HW(CMU_PWM1CLK, 0, 6, 0, NULL),
+ 0);
+/*
+ * pwm2 may be for backlight, do not gate it
+ * even it is "unused", because it may be
+ * enabled at boot stage, and in kernel, driver
+ * has no effective method to know the real status,
+ * so, the best way is keeping it as what it was.
+ */
+static OWL_COMP_DIV_FIXED(pwm2_clk, "pwm2_clk", "hosc",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 25, 0),
+ OWL_DIVIDER_HW(CMU_PWM2CLK, 0, 6, 0, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV_FIXED(pwm3_clk, "pwm3_clk", "hosc",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 26, 0),
+ OWL_DIVIDER_HW(CMU_PWM3CLK, 0, 6, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV_FIXED(pwm4_clk, "pwm4_clk", "hosc",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 4, 0),
+ OWL_DIVIDER_HW(CMU_PWM4CLK, 0, 6, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV_FIXED(pwm5_clk, "pwm5_clk", "hosc",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 5, 0),
+ OWL_DIVIDER_HW(CMU_PWM5CLK, 0, 6, 0, NULL),
+ 0);
+
+static OWL_COMP_FACTOR(sd0_clk, "sd0_clk", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD0CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 5, 0),
+ OWL_FACTOR_HW(CMU_SD0CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(sd1_clk, "sd1_clk", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD1CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 6, 0),
+ OWL_FACTOR_HW(CMU_SD1CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(sd2_clk, "sd2_clk", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD2CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 7, 0),
+ OWL_FACTOR_HW(CMU_SD2CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(sd3_clk, "sd3_clk", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD3CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 16, 0),
+ OWL_FACTOR_HW(CMU_SD3CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_DIV(sensor_clk, "sensor_clk", sensor_clk_mux_p,
+ OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+ OWL_DIVIDER_HW(CMU_SENSORCLK, 0, 4, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV_FIXED(speed_sensor_clk, "speed_sensor_clk",
+ "hosc",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 0, 0),
+ OWL_DIVIDER_HW(CMU_TLSCLK, 0, 4, CLK_DIVIDER_POWER_OF_TWO, NULL),
+ 0);
+
+static OWL_COMP_DIV_FIXED(thermal_sensor_clk, "thermal_sensor_clk",
+ "hosc",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 2, 0),
+ OWL_DIVIDER_HW(CMU_TLSCLK, 8, 4, CLK_DIVIDER_POWER_OF_TWO, NULL),
+ 0);
+
+static OWL_COMP_DIV(uart0_clk, "uart0_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART0CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 6, 0),
+ OWL_DIVIDER_HW(CMU_UART0CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART1CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 7, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 1, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart2_clk, "uart2_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART2CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0),
+ OWL_DIVIDER_HW(CMU_UART2CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart3_clk, "uart3_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART3CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0),
+ OWL_DIVIDER_HW(CMU_UART3CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart4_clk, "uart4_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART4CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0),
+ OWL_DIVIDER_HW(CMU_UART4CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart5_clk, "uart5_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART5CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0),
+ OWL_DIVIDER_HW(CMU_UART5CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart6_clk, "uart6_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART6CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0),
+ OWL_DIVIDER_HW(CMU_UART6CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_FACTOR(vce_clk, "vce_clk", vce_clk_mux_p,
+ OWL_MUX_HW(CMU_VCECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 26, 0),
+ OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(vde_clk, "vde_clk", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_VDECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 25, 0),
+ OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static struct owl_clk_common *s900_clks[] = {
+ &core_pll_clk.common,
+ &dev_pll_clk.common,
+ &ddr_pll_clk.common,
+ &nand_pll_clk.common,
+ &display_pll_clk.common,
+ &assist_pll_clk.common,
+ &audio_pll_clk.common,
+ &edp_pll_clk.common,
+ &cpu_clk.common,
+ &dev_clk.common,
+ &noc_clk_mux.common,
+ &noc_clk_div.common,
+ &ahb_clk.common,
+ &apb_clk.common,
+ &usb3_mac_clk.common,
+ &rmii_ref_clk.common,
+ &noc_clk.common,
+ &de_clk1.common,
+ &de_clk2.common,
+ &de_clk3.common,
+ &gpio_clk.common,
+ &gpu_clk.common,
+ &dmac_clk.common,
+ &timer_clk.common,
+ &dsi_clk.common,
+ &ddr0_clk.common,
+ &ddr1_clk.common,
+ &usb3_480mpll0_clk.common,
+ &usb3_480mphy0_clk.common,
+ &usb3_5gphy_clk.common,
+ &usb3_cce_clk.common,
+ &edp24M_clk.common,
+ &edp_link_clk.common,
+ &usbh0_pllen_clk.common,
+ &usbh0_phy_clk.common,
+ &usbh0_cce_clk.common,
+ &usbh1_pllen_clk.common,
+ &usbh1_phy_clk.common,
+ &usbh1_cce_clk.common,
+ &i2c0_clk.common,
+ &i2c1_clk.common,
+ &i2c2_clk.common,
+ &i2c3_clk.common,
+ &i2c4_clk.common,
+ &i2c5_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &spi2_clk.common,
+ &spi3_clk.common,
+ &bisp_clk.common,
+ &csi0_clk.common,
+ &csi1_clk.common,
+ &de_clk.common,
+ &dmm_clk.common,
+ &edp_clk.common,
+ &eth_mac_clk.common,
+ &gpu_core_clk.common,
+ &gpu_mem_clk.common,
+ &gpu_sys_clk.common,
+ &hde_clk.common,
+ &hdmia_clk.common,
+ &i2srx_clk.common,
+ &i2stx_clk.common,
+ &imx_clk.common,
+ &lcd_clk.common,
+ &nand0_clk.common,
+ &nand1_clk.common,
+ &pwm0_clk.common,
+ &pwm1_clk.common,
+ &pwm2_clk.common,
+ &pwm3_clk.common,
+ &pwm4_clk.common,
+ &pwm5_clk.common,
+ &sd0_clk.common,
+ &sd1_clk.common,
+ &sd2_clk.common,
+ &sd3_clk.common,
+ &sensor_clk.common,
+ &speed_sensor_clk.common,
+ &thermal_sensor_clk.common,
+ &uart0_clk.common,
+ &uart1_clk.common,
+ &uart2_clk.common,
+ &uart3_clk.common,
+ &uart4_clk.common,
+ &uart5_clk.common,
+ &uart6_clk.common,
+ &vce_clk.common,
+ &vde_clk.common,
+};
+
+static struct clk_hw_onecell_data s900_hw_clks = {
+ .hws = {
+ [CLK_CORE_PLL] = &core_pll_clk.common.hw,
+ [CLK_DEV_PLL] = &dev_pll_clk.common.hw,
+ [CLK_DDR_PLL] = &ddr_pll_clk.common.hw,
+ [CLK_NAND_PLL] = &nand_pll_clk.common.hw,
+ [CLK_DISPLAY_PLL] = &display_pll_clk.common.hw,
+ [CLK_ASSIST_PLL] = &assist_pll_clk.common.hw,
+ [CLK_AUDIO_PLL] = &audio_pll_clk.common.hw,
+ [CLK_EDP_PLL] = &edp_pll_clk.common.hw,
+ [CLK_CPU] = &cpu_clk.common.hw,
+ [CLK_DEV] = &dev_clk.common.hw,
+ [CLK_NOC_MUX] = &noc_clk_mux.common.hw,
+ [CLK_NOC_DIV] = &noc_clk_div.common.hw,
+ [CLK_AHB] = &ahb_clk.common.hw,
+ [CLK_APB] = &apb_clk.common.hw,
+ [CLK_USB3_MAC] = &usb3_mac_clk.common.hw,
+ [CLK_RMII_REF] = &rmii_ref_clk.common.hw,
+ [CLK_NOC] = &noc_clk.common.hw,
+ [CLK_DE1] = &de_clk1.common.hw,
+ [CLK_DE2] = &de_clk2.common.hw,
+ [CLK_DE3] = &de_clk3.common.hw,
+ [CLK_GPIO] = &gpio_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_DMAC] = &dmac_clk.common.hw,
+ [CLK_TIMER] = &timer_clk.common.hw,
+ [CLK_DSI] = &dsi_clk.common.hw,
+ [CLK_DDR0] = &ddr0_clk.common.hw,
+ [CLK_DDR1] = &ddr1_clk.common.hw,
+ [CLK_USB3_480MPLL0] = &usb3_480mpll0_clk.common.hw,
+ [CLK_USB3_480MPHY0] = &usb3_480mphy0_clk.common.hw,
+ [CLK_USB3_5GPHY] = &usb3_5gphy_clk.common.hw,
+ [CLK_USB3_CCE] = &usb3_cce_clk.common.hw,
+ [CLK_24M_EDP] = &edp24M_clk.common.hw,
+ [CLK_EDP_LINK] = &edp_link_clk.common.hw,
+ [CLK_USB2H0_PLLEN] = &usbh0_pllen_clk.common.hw,
+ [CLK_USB2H0_PHY] = &usbh0_phy_clk.common.hw,
+ [CLK_USB2H0_CCE] = &usbh0_cce_clk.common.hw,
+ [CLK_USB2H1_PLLEN] = &usbh1_pllen_clk.common.hw,
+ [CLK_USB2H1_PHY] = &usbh1_phy_clk.common.hw,
+ [CLK_USB2H1_CCE] = &usbh1_cce_clk.common.hw,
+ [CLK_I2C0] = &i2c0_clk.common.hw,
+ [CLK_I2C1] = &i2c1_clk.common.hw,
+ [CLK_I2C2] = &i2c2_clk.common.hw,
+ [CLK_I2C3] = &i2c3_clk.common.hw,
+ [CLK_I2C4] = &i2c4_clk.common.hw,
+ [CLK_I2C5] = &i2c5_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_SPI2] = &spi2_clk.common.hw,
+ [CLK_SPI3] = &spi3_clk.common.hw,
+ [CLK_BISP] = &bisp_clk.common.hw,
+ [CLK_CSI0] = &csi0_clk.common.hw,
+ [CLK_CSI1] = &csi1_clk.common.hw,
+ [CLK_DE0] = &de_clk.common.hw,
+ [CLK_DMM] = &dmm_clk.common.hw,
+ [CLK_EDP] = &edp_clk.common.hw,
+ [CLK_ETH_MAC] = &eth_mac_clk.common.hw,
+ [CLK_GPU_CORE] = &gpu_core_clk.common.hw,
+ [CLK_GPU_MEM] = &gpu_mem_clk.common.hw,
+ [CLK_GPU_SYS] = &gpu_sys_clk.common.hw,
+ [CLK_HDE] = &hde_clk.common.hw,
+ [CLK_HDMI_AUDIO] = &hdmia_clk.common.hw,
+ [CLK_I2SRX] = &i2srx_clk.common.hw,
+ [CLK_I2STX] = &i2stx_clk.common.hw,
+ [CLK_IMX] = &imx_clk.common.hw,
+ [CLK_LCD] = &lcd_clk.common.hw,
+ [CLK_NAND0] = &nand0_clk.common.hw,
+ [CLK_NAND1] = &nand1_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_PWM3] = &pwm3_clk.common.hw,
+ [CLK_PWM4] = &pwm4_clk.common.hw,
+ [CLK_PWM5] = &pwm5_clk.common.hw,
+ [CLK_SD0] = &sd0_clk.common.hw,
+ [CLK_SD1] = &sd1_clk.common.hw,
+ [CLK_SD2] = &sd2_clk.common.hw,
+ [CLK_SD3] = &sd3_clk.common.hw,
+ [CLK_SENSOR] = &sensor_clk.common.hw,
+ [CLK_SPEED_SENSOR] = &speed_sensor_clk.common.hw,
+ [CLK_THERMAL_SENSOR] = &thermal_sensor_clk.common.hw,
+ [CLK_UART0] = &uart0_clk.common.hw,
+ [CLK_UART1] = &uart1_clk.common.hw,
+ [CLK_UART2] = &uart2_clk.common.hw,
+ [CLK_UART3] = &uart3_clk.common.hw,
+ [CLK_UART4] = &uart4_clk.common.hw,
+ [CLK_UART5] = &uart5_clk.common.hw,
+ [CLK_UART6] = &uart6_clk.common.hw,
+ [CLK_VCE] = &vce_clk.common.hw,
+ [CLK_VDE] = &vde_clk.common.hw,
+ },
+ .num = CLK_NR_CLKS,
+};
+
+static const struct owl_clk_desc s900_clk_desc = {
+ .clks = s900_clks,
+ .num_clks = ARRAY_SIZE(s900_clks),
+
+ .hw_clks = &s900_hw_clks,
+};
+
+static int s900_clk_probe(struct platform_device *pdev)
+{
+ const struct owl_clk_desc *desc;
+
+ desc = &s900_clk_desc;
+ owl_clk_regmap_init(pdev, desc);
+
+ return owl_clk_probe(&pdev->dev, desc->hw_clks);
+}
+
+static const struct of_device_id s900_clk_of_match[] = {
+ { .compatible = "actions,s900-cmu", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver s900_clk_driver = {
+ .probe = s900_clk_probe,
+ .driver = {
+ .name = "s900-cmu",
+ .of_match_table = s900_clk_of_match,
+ },
+};
+
+static int __init s900_clk_init(void)
+{
+ return platform_driver_register(&s900_clk_driver);
+}
+core_initcall(s900_clk_init);
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 7d3223fc7161..72b6091eb7b9 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -132,19 +132,8 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pll *pll = to_clk_pll(hw);
- unsigned int pllr;
- u16 mul;
- u8 div;
-
- regmap_read(pll->regmap, PLL_REG(pll->id), &pllr);
-
- div = PLL_DIV(pllr);
- mul = PLL_MUL(pllr, pll->layout);
-
- if (!div || !mul)
- return 0;
- return (parent_rate / div) * (mul + 1);
+ return (parent_rate / pll->div) * (pll->mul + 1);
}
static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 6d4e69edfb36..9e0b2f2b48e7 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -394,25 +394,21 @@ out:
return count * 1000;
}
-static int bcm2835_debugfs_regset(struct bcm2835_cprman *cprman, u32 base,
+static void bcm2835_debugfs_regset(struct bcm2835_cprman *cprman, u32 base,
struct debugfs_reg32 *regs, size_t nregs,
struct dentry *dentry)
{
- struct dentry *regdump;
struct debugfs_regset32 *regset;
regset = devm_kzalloc(cprman->dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
- return -ENOMEM;
+ return;
regset->regs = regs;
regset->nregs = nregs;
regset->base = cprman->regs + base;
- regdump = debugfs_create_regset32("regdump", S_IRUGO, dentry,
- regset);
-
- return regdump ? 0 : -ENOMEM;
+ debugfs_create_regset32("regdump", S_IRUGO, dentry, regset);
}
struct bcm2835_pll_data {
@@ -730,7 +726,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
return 0;
}
-static int bcm2835_pll_debug_init(struct clk_hw *hw,
+static void bcm2835_pll_debug_init(struct clk_hw *hw,
struct dentry *dentry)
{
struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
@@ -740,7 +736,7 @@ static int bcm2835_pll_debug_init(struct clk_hw *hw,
regs = devm_kzalloc(cprman->dev, 7 * sizeof(*regs), GFP_KERNEL);
if (!regs)
- return -ENOMEM;
+ return;
regs[0].name = "cm_ctrl";
regs[0].offset = data->cm_ctrl_reg;
@@ -757,7 +753,7 @@ static int bcm2835_pll_debug_init(struct clk_hw *hw,
regs[6].name = "ana3";
regs[6].offset = data->ana_reg_base + 3 * 4;
- return bcm2835_debugfs_regset(cprman, 0, regs, 7, dentry);
+ bcm2835_debugfs_regset(cprman, 0, regs, 7, dentry);
}
static const struct clk_ops bcm2835_pll_clk_ops = {
@@ -861,8 +857,8 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
return 0;
}
-static int bcm2835_pll_divider_debug_init(struct clk_hw *hw,
- struct dentry *dentry)
+static void bcm2835_pll_divider_debug_init(struct clk_hw *hw,
+ struct dentry *dentry)
{
struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
struct bcm2835_cprman *cprman = divider->cprman;
@@ -871,14 +867,14 @@ static int bcm2835_pll_divider_debug_init(struct clk_hw *hw,
regs = devm_kzalloc(cprman->dev, 7 * sizeof(*regs), GFP_KERNEL);
if (!regs)
- return -ENOMEM;
+ return;
regs[0].name = "cm";
regs[0].offset = data->cm_reg;
regs[1].name = "a2w";
regs[1].offset = data->a2w_reg;
- return bcm2835_debugfs_regset(cprman, 0, regs, 2, dentry);
+ bcm2835_debugfs_regset(cprman, 0, regs, 2, dentry);
}
static const struct clk_ops bcm2835_pll_divider_clk_ops = {
@@ -1254,15 +1250,14 @@ static struct debugfs_reg32 bcm2835_debugfs_clock_reg32[] = {
},
};
-static int bcm2835_clock_debug_init(struct clk_hw *hw,
+static void bcm2835_clock_debug_init(struct clk_hw *hw,
struct dentry *dentry)
{
struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
struct bcm2835_cprman *cprman = clock->cprman;
const struct bcm2835_clock_data *data = clock->data;
- return bcm2835_debugfs_regset(
- cprman, data->ctl_reg,
+ bcm2835_debugfs_regset(cprman, data->ctl_reg,
bcm2835_debugfs_clock_reg32,
ARRAY_SIZE(bcm2835_debugfs_clock_reg32),
dentry);
@@ -1395,7 +1390,7 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
struct bcm2835_clock *clock;
struct clk_init_data init;
const char *parents[1 << CM_SRC_BITS];
- size_t i, j;
+ size_t i;
int ret;
/*
@@ -1405,12 +1400,11 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
for (i = 0; i < data->num_mux_parents; i++) {
parents[i] = data->parents[i];
- for (j = 0; j < ARRAY_SIZE(cprman_parent_names); j++) {
- if (strcmp(parents[i], cprman_parent_names[j]) == 0) {
- parents[i] = cprman->real_parent_names[j];
- break;
- }
- }
+ ret = match_string(cprman_parent_names,
+ ARRAY_SIZE(cprman_parent_names),
+ parents[i]);
+ if (ret >= 0)
+ parents[i] = cprman->real_parent_names[ret];
}
memset(&init, 0, sizeof(init));
diff --git a/drivers/clk/bcm/clk-sr.c b/drivers/clk/bcm/clk-sr.c
index adc74f4584cf..7b9efc0212a8 100644
--- a/drivers/clk/bcm/clk-sr.c
+++ b/drivers/clk/bcm/clk-sr.c
@@ -56,8 +56,8 @@ static const struct iproc_pll_ctrl sr_genpll0 = {
};
static const struct iproc_clk_ctrl sr_genpll0_clk[] = {
- [BCM_SR_GENPLL0_SATA_CLK] = {
- .channel = BCM_SR_GENPLL0_SATA_CLK,
+ [BCM_SR_GENPLL0_125M_CLK] = {
+ .channel = BCM_SR_GENPLL0_125M_CLK,
.flags = IPROC_CLK_AON,
.enable = ENABLE_VAL(0x4, 6, 0, 12),
.mdiv = REG_VAL(0x18, 0, 9),
@@ -102,6 +102,65 @@ static int sr_genpll0_clk_init(struct platform_device *pdev)
return 0;
}
+static const struct iproc_pll_ctrl sr_genpll2 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 1, 13, 12),
+ .reset = RESET_VAL(0x0, 12, 11),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .status = REG_VAL(0x30, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_genpll2_clk[] = {
+ [BCM_SR_GENPLL2_NIC_CLK] = {
+ .channel = BCM_SR_GENPLL2_NIC_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
+ [BCM_SR_GENPLL2_TS_500_CLK] = {
+ .channel = BCM_SR_GENPLL2_TS_500_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 7, 1, 13),
+ .mdiv = REG_VAL(0x18, 10, 9),
+ },
+ [BCM_SR_GENPLL2_125_NITRO_CLK] = {
+ .channel = BCM_SR_GENPLL2_125_NITRO_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 8, 2, 14),
+ .mdiv = REG_VAL(0x18, 20, 9),
+ },
+ [BCM_SR_GENPLL2_CHIMP_CLK] = {
+ .channel = BCM_SR_GENPLL2_CHIMP_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 9, 3, 15),
+ .mdiv = REG_VAL(0x1c, 0, 9),
+ },
+ [BCM_SR_GENPLL2_NIC_FLASH_CLK] = {
+ .channel = BCM_SR_GENPLL2_NIC_FLASH_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 10, 4, 16),
+ .mdiv = REG_VAL(0x1c, 10, 9),
+ },
+ [BCM_SR_GENPLL2_FS4_CLK] = {
+ .channel = BCM_SR_GENPLL2_FS4_CLK,
+ .enable = ENABLE_VAL(0x4, 11, 5, 17),
+ .mdiv = REG_VAL(0x1c, 20, 9),
+ },
+};
+
+static int sr_genpll2_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_genpll2, NULL, 0, sr_genpll2_clk,
+ ARRAY_SIZE(sr_genpll2_clk));
+ return 0;
+}
+
static const struct iproc_pll_ctrl sr_genpll3 = {
.flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
IPROC_CLK_PLL_NEEDS_SW_CFG,
@@ -157,6 +216,30 @@ static const struct iproc_clk_ctrl sr_genpll4_clk[] = {
.enable = ENABLE_VAL(0x4, 6, 0, 12),
.mdiv = REG_VAL(0x18, 0, 9),
},
+ [BCM_SR_GENPLL4_TPIU_PLL_CLK] = {
+ .channel = BCM_SR_GENPLL4_TPIU_PLL_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 7, 1, 13),
+ .mdiv = REG_VAL(0x18, 10, 9),
+ },
+ [BCM_SR_GENPLL4_NOC_CLK] = {
+ .channel = BCM_SR_GENPLL4_NOC_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 8, 2, 14),
+ .mdiv = REG_VAL(0x18, 20, 9),
+ },
+ [BCM_SR_GENPLL4_CHCLK_FS4_CLK] = {
+ .channel = BCM_SR_GENPLL4_CHCLK_FS4_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 9, 3, 15),
+ .mdiv = REG_VAL(0x1c, 0, 9),
+ },
+ [BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK] = {
+ .channel = BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 10, 4, 16),
+ .mdiv = REG_VAL(0x1c, 10, 9),
+ },
};
static int sr_genpll4_clk_init(struct platform_device *pdev)
@@ -181,18 +264,21 @@ static const struct iproc_pll_ctrl sr_genpll5 = {
};
static const struct iproc_clk_ctrl sr_genpll5_clk[] = {
- [BCM_SR_GENPLL5_FS_CLK] = {
- .channel = BCM_SR_GENPLL5_FS_CLK,
- .flags = IPROC_CLK_AON,
+ [BCM_SR_GENPLL5_FS4_HF_CLK] = {
+ .channel = BCM_SR_GENPLL5_FS4_HF_CLK,
.enable = ENABLE_VAL(0x4, 6, 0, 12),
.mdiv = REG_VAL(0x18, 0, 9),
},
- [BCM_SR_GENPLL5_SPU_CLK] = {
- .channel = BCM_SR_GENPLL5_SPU_CLK,
- .flags = IPROC_CLK_AON,
- .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ [BCM_SR_GENPLL5_CRYPTO_AE_CLK] = {
+ .channel = BCM_SR_GENPLL5_CRYPTO_AE_CLK,
+ .enable = ENABLE_VAL(0x4, 7, 1, 12),
.mdiv = REG_VAL(0x18, 10, 9),
},
+ [BCM_SR_GENPLL5_RAID_AE_CLK] = {
+ .channel = BCM_SR_GENPLL5_RAID_AE_CLK,
+ .enable = ENABLE_VAL(0x4, 8, 2, 14),
+ .mdiv = REG_VAL(0x18, 20, 9),
+ },
};
static int sr_genpll5_clk_init(struct platform_device *pdev)
@@ -214,24 +300,30 @@ static const struct iproc_pll_ctrl sr_lcpll0 = {
};
static const struct iproc_clk_ctrl sr_lcpll0_clk[] = {
- [BCM_SR_LCPLL0_SATA_REF_CLK] = {
- .channel = BCM_SR_LCPLL0_SATA_REF_CLK,
+ [BCM_SR_LCPLL0_SATA_REFP_CLK] = {
+ .channel = BCM_SR_LCPLL0_SATA_REFP_CLK,
.flags = IPROC_CLK_AON,
.enable = ENABLE_VAL(0x0, 7, 1, 13),
.mdiv = REG_VAL(0x14, 0, 9),
},
- [BCM_SR_LCPLL0_USB_REF_CLK] = {
- .channel = BCM_SR_LCPLL0_USB_REF_CLK,
+ [BCM_SR_LCPLL0_SATA_REFN_CLK] = {
+ .channel = BCM_SR_LCPLL0_SATA_REFN_CLK,
.flags = IPROC_CLK_AON,
.enable = ENABLE_VAL(0x0, 8, 2, 14),
.mdiv = REG_VAL(0x14, 10, 9),
},
- [BCM_SR_LCPLL0_SATA_REFPN_CLK] = {
- .channel = BCM_SR_LCPLL0_SATA_REFPN_CLK,
+ [BCM_SR_LCPLL0_SATA_350_CLK] = {
+ .channel = BCM_SR_LCPLL0_SATA_350_CLK,
.flags = IPROC_CLK_AON,
.enable = ENABLE_VAL(0x0, 9, 3, 15),
.mdiv = REG_VAL(0x14, 20, 9),
},
+ [BCM_SR_LCPLL0_SATA_500_CLK] = {
+ .channel = BCM_SR_LCPLL0_SATA_500_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 10, 4, 16),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
};
static int sr_lcpll0_clk_init(struct platform_device *pdev)
@@ -259,6 +351,18 @@ static const struct iproc_clk_ctrl sr_lcpll1_clk[] = {
.enable = ENABLE_VAL(0x0, 7, 1, 13),
.mdiv = REG_VAL(0x14, 0, 9),
},
+ [BCM_SR_LCPLL1_USB_REF_CLK] = {
+ .channel = BCM_SR_LCPLL1_USB_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 8, 2, 14),
+ .mdiv = REG_VAL(0x14, 10, 9),
+ },
+ [BCM_SR_LCPLL1_CRMU_TS_CLK] = {
+ .channel = BCM_SR_LCPLL1_CRMU_TS_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 9, 3, 15),
+ .mdiv = REG_VAL(0x14, 20, 9),
+ },
};
static int sr_lcpll1_clk_init(struct platform_device *pdev)
@@ -298,6 +402,7 @@ static int sr_lcpll_pcie_clk_init(struct platform_device *pdev)
static const struct of_device_id sr_clk_dt_ids[] = {
{ .compatible = "brcm,sr-genpll0", .data = sr_genpll0_clk_init },
+ { .compatible = "brcm,sr-genpll2", .data = sr_genpll2_clk_init },
{ .compatible = "brcm,sr-genpll4", .data = sr_genpll4_clk_init },
{ .compatible = "brcm,sr-genpll5", .data = sr_genpll5_clk_init },
{ .compatible = "brcm,sr-lcpll0", .data = sr_lcpll0_clk_init },
diff --git a/drivers/clk/berlin/berlin2-avpll.c b/drivers/clk/berlin/berlin2-avpll.c
index cfcae468e989..aa89b4c9464e 100644
--- a/drivers/clk/berlin/berlin2-avpll.c
+++ b/drivers/clk/berlin/berlin2-avpll.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014 Marvell Technology Group Ltd.
*
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Alexandre Belloni <alexandre.belloni@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk-provider.h>
#include <linux/io.h>
diff --git a/drivers/clk/berlin/berlin2-avpll.h b/drivers/clk/berlin/berlin2-avpll.h
index 17e311153b42..f3af34dc2bee 100644
--- a/drivers/clk/berlin/berlin2-avpll.h
+++ b/drivers/clk/berlin/berlin2-avpll.h
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Marvell Technology Group Ltd.
*
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Alexandre Belloni <alexandre.belloni@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __BERLIN2_AVPLL_H
#define __BERLIN2_AVPLL_H
diff --git a/drivers/clk/berlin/berlin2-div.c b/drivers/clk/berlin/berlin2-div.c
index 41ab2d392c57..4d0be66aa6a8 100644
--- a/drivers/clk/berlin/berlin2-div.c
+++ b/drivers/clk/berlin/berlin2-div.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014 Marvell Technology Group Ltd.
*
* Alexandre Belloni <alexandre.belloni@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bitops.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/berlin/berlin2-div.h b/drivers/clk/berlin/berlin2-div.h
index e835ddf8374a..d4da64325190 100644
--- a/drivers/clk/berlin/berlin2-div.h
+++ b/drivers/clk/berlin/berlin2-div.h
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Marvell Technology Group Ltd.
*
* Alexandre Belloni <alexandre.belloni@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __BERLIN2_DIV_H
#define __BERLIN2_DIV_H
diff --git a/drivers/clk/berlin/berlin2-pll.c b/drivers/clk/berlin/berlin2-pll.c
index 4ffbe80f6323..9661820717a5 100644
--- a/drivers/clk/berlin/berlin2-pll.c
+++ b/drivers/clk/berlin/berlin2-pll.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014 Marvell Technology Group Ltd.
*
* Alexandre Belloni <alexandre.belloni@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk-provider.h>
#include <linux/io.h>
diff --git a/drivers/clk/berlin/berlin2-pll.h b/drivers/clk/berlin/berlin2-pll.h
index 583e024b9bed..3757fb25c4e8 100644
--- a/drivers/clk/berlin/berlin2-pll.h
+++ b/drivers/clk/berlin/berlin2-pll.h
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Marvell Technology Group Ltd.
*
* Alexandre Belloni <alexandre.belloni@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __BERLIN2_PLL_H
#define __BERLIN2_PLL_H
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index 45fb888bf0a0..0b4b44a2579e 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014 Marvell Technology Group Ltd.
*
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Alexandre Belloni <alexandre.belloni@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index db7364e15c8b..9b9db743df25 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014 Marvell Technology Group Ltd.
*
* Alexandre Belloni <alexandre.belloni@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/berlin/common.h b/drivers/clk/berlin/common.h
index bc68a14c4550..1afb3c29b796 100644
--- a/drivers/clk/berlin/common.h
+++ b/drivers/clk/berlin/common.h
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Marvell Technology Group Ltd.
*
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Alexandre Belloni <alexandre.belloni@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __BERLIN2_COMMON_H
#define __BERLIN2_COMMON_H
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 7abe4232d282..38b366b00c57 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -14,7 +14,9 @@
#include <dt-bindings/clock/aspeed-clock.h>
-#define ASPEED_NUM_CLKS 35
+#define ASPEED_NUM_CLKS 36
+
+#define ASPEED_RESET2_OFFSET 32
#define ASPEED_RESET_CTRL 0x04
#define ASPEED_CLK_SELECTION 0x08
@@ -30,6 +32,7 @@
#define CLKIN_25MHZ_EN BIT(23)
#define AST2400_CLK_SOURCE_SEL BIT(18)
#define ASPEED_CLK_SELECTION_2 0xd8
+#define ASPEED_RESET_CTRL2 0xd4
/* Globally visible clocks */
static DEFINE_SPINLOCK(aspeed_clk_lock);
@@ -88,7 +91,7 @@ static const struct aspeed_gate_data aspeed_gates[] = {
[ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
[ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
[ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
- [ASPEED_CLK_GATE_BCLK] = { 4, 10, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
+ [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
[ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */
[ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
[ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
@@ -291,47 +294,72 @@ struct aspeed_reset {
#define to_aspeed_reset(p) container_of((p), struct aspeed_reset, rcdev)
static const u8 aspeed_resets[] = {
+ /* SCU04 resets */
[ASPEED_RESET_XDMA] = 25,
[ASPEED_RESET_MCTP] = 24,
[ASPEED_RESET_ADC] = 23,
[ASPEED_RESET_JTAG_MASTER] = 22,
[ASPEED_RESET_MIC] = 18,
[ASPEED_RESET_PWM] = 9,
- [ASPEED_RESET_PCIVGA] = 8,
+ [ASPEED_RESET_PECI] = 10,
[ASPEED_RESET_I2C] = 2,
[ASPEED_RESET_AHB] = 1,
+
+ /*
+ * SCUD4 resets start at an offset to separate them from
+ * the SCU04 resets.
+ */
+ [ASPEED_RESET_CRT1] = ASPEED_RESET2_OFFSET + 5,
};
static int aspeed_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
- u32 rst = BIT(aspeed_resets[id]);
+ u32 reg = ASPEED_RESET_CTRL;
+ u32 bit = aspeed_resets[id];
- return regmap_update_bits(ar->map, ASPEED_RESET_CTRL, rst, 0);
+ if (bit >= ASPEED_RESET2_OFFSET) {
+ bit -= ASPEED_RESET2_OFFSET;
+ reg = ASPEED_RESET_CTRL2;
+ }
+
+ return regmap_update_bits(ar->map, reg, BIT(bit), 0);
}
static int aspeed_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
- u32 rst = BIT(aspeed_resets[id]);
+ u32 reg = ASPEED_RESET_CTRL;
+ u32 bit = aspeed_resets[id];
+
+ if (bit >= ASPEED_RESET2_OFFSET) {
+ bit -= ASPEED_RESET2_OFFSET;
+ reg = ASPEED_RESET_CTRL2;
+ }
- return regmap_update_bits(ar->map, ASPEED_RESET_CTRL, rst, rst);
+ return regmap_update_bits(ar->map, reg, BIT(bit), BIT(bit));
}
static int aspeed_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
- u32 val, rst = BIT(aspeed_resets[id]);
- int ret;
+ u32 reg = ASPEED_RESET_CTRL;
+ u32 bit = aspeed_resets[id];
+ int ret, val;
+
+ if (bit >= ASPEED_RESET2_OFFSET) {
+ bit -= ASPEED_RESET2_OFFSET;
+ reg = ASPEED_RESET_CTRL2;
+ }
- ret = regmap_read(ar->map, ASPEED_RESET_CTRL, &val);
+ ret = regmap_read(ar->map, reg, &val);
if (ret)
return ret;
- return !!(val & rst);
+ return !!(val & BIT(bit));
}
static const struct reset_control_ops aspeed_reset_ops = {
@@ -474,6 +502,13 @@ static int aspeed_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_BCLK] = hw;
+ /* Fixed 24MHz clock */
+ hw = clk_hw_register_fixed_rate(NULL, "fixed-24m", "clkin",
+ 0, 24000000);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_clk_data->hws[ASPEED_CLK_24M] = hw;
+
/*
* TODO: There are a number of clocks that not included in this driver
* as more information is required:
diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
index 4c10456f8a32..6904ed6da504 100644
--- a/drivers/clk/clk-bulk.c
+++ b/drivers/clk/clk-bulk.c
@@ -42,8 +42,9 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks,
clks[i].clk = clk_get(dev, clks[i].id);
if (IS_ERR(clks[i].clk)) {
ret = PTR_ERR(clks[i].clk);
- dev_err(dev, "Failed to get clk '%s': %d\n",
- clks[i].id, ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clk '%s': %d\n",
+ clks[i].id, ret);
clks[i].clk = NULL;
goto err;
}
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
new file mode 100644
index 000000000000..740af90a9508
--- /dev/null
+++ b/drivers/clk/clk-npcm7xx.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NPCM7xx Clock Generator
+ * All the clocks are initialized by the bootloader, so this driver allow only
+ * reading of current settings directly from the hardware.
+ *
+ * Copyright (C) 2018 Nuvoton Technologies tali.perry@nuvoton.com
+ */
+
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/bitfield.h>
+
+#include <dt-bindings/clock/nuvoton,npcm7xx-clock.h>
+
+struct npcm7xx_clk_pll {
+ struct clk_hw hw;
+ void __iomem *pllcon;
+ u8 flags;
+};
+
+#define to_npcm7xx_clk_pll(_hw) container_of(_hw, struct npcm7xx_clk_pll, hw)
+
+#define PLLCON_LOKI BIT(31)
+#define PLLCON_LOKS BIT(30)
+#define PLLCON_FBDV GENMASK(27, 16)
+#define PLLCON_OTDV2 GENMASK(15, 13)
+#define PLLCON_PWDEN BIT(12)
+#define PLLCON_OTDV1 GENMASK(10, 8)
+#define PLLCON_INDV GENMASK(5, 0)
+
+static unsigned long npcm7xx_clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct npcm7xx_clk_pll *pll = to_npcm7xx_clk_pll(hw);
+ unsigned long fbdv, indv, otdv1, otdv2;
+ unsigned int val;
+ u64 ret;
+
+ if (parent_rate == 0) {
+ pr_err("%s: parent rate is zero", __func__);
+ return 0;
+ }
+
+ val = readl_relaxed(pll->pllcon);
+
+ indv = FIELD_GET(PLLCON_INDV, val);
+ fbdv = FIELD_GET(PLLCON_FBDV, val);
+ otdv1 = FIELD_GET(PLLCON_OTDV1, val);
+ otdv2 = FIELD_GET(PLLCON_OTDV2, val);
+
+ ret = (u64)parent_rate * fbdv;
+ do_div(ret, indv * otdv1 * otdv2);
+
+ return ret;
+}
+
+static const struct clk_ops npcm7xx_clk_pll_ops = {
+ .recalc_rate = npcm7xx_clk_pll_recalc_rate,
+};
+
+static struct clk_hw *
+npcm7xx_clk_register_pll(void __iomem *pllcon, const char *name,
+ const char *parent_name, unsigned long flags)
+{
+ struct npcm7xx_clk_pll *pll;
+ struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pr_debug("%s reg, name=%s, p=%s\n", __func__, name, parent_name);
+
+ init.name = name;
+ init.ops = &npcm7xx_clk_pll_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ pll->pllcon = pllcon;
+ pll->hw.init = &init;
+
+ hw = &pll->hw;
+
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ kfree(pll);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+#define NPCM7XX_CLKEN1 (0x00)
+#define NPCM7XX_CLKEN2 (0x28)
+#define NPCM7XX_CLKEN3 (0x30)
+#define NPCM7XX_CLKSEL (0x04)
+#define NPCM7XX_CLKDIV1 (0x08)
+#define NPCM7XX_CLKDIV2 (0x2C)
+#define NPCM7XX_CLKDIV3 (0x58)
+#define NPCM7XX_PLLCON0 (0x0C)
+#define NPCM7XX_PLLCON1 (0x10)
+#define NPCM7XX_PLLCON2 (0x54)
+#define NPCM7XX_SWRSTR (0x14)
+#define NPCM7XX_IRQWAKECON (0x18)
+#define NPCM7XX_IRQWAKEFLAG (0x1C)
+#define NPCM7XX_IPSRST1 (0x20)
+#define NPCM7XX_IPSRST2 (0x24)
+#define NPCM7XX_IPSRST3 (0x34)
+#define NPCM7XX_WD0RCR (0x38)
+#define NPCM7XX_WD1RCR (0x3C)
+#define NPCM7XX_WD2RCR (0x40)
+#define NPCM7XX_SWRSTC1 (0x44)
+#define NPCM7XX_SWRSTC2 (0x48)
+#define NPCM7XX_SWRSTC3 (0x4C)
+#define NPCM7XX_SWRSTC4 (0x50)
+#define NPCM7XX_CORSTC (0x5C)
+#define NPCM7XX_PLLCONG (0x60)
+#define NPCM7XX_AHBCKFI (0x64)
+#define NPCM7XX_SECCNT (0x68)
+#define NPCM7XX_CNTR25M (0x6C)
+
+struct npcm7xx_clk_gate_data {
+ u32 reg;
+ u8 bit_idx;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ /*
+ * If this clock is exported via DT, set onecell_idx to constant
+ * defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
+ * this specific clock. Otherwise, set to -1.
+ */
+ int onecell_idx;
+};
+
+struct npcm7xx_clk_mux_data {
+ u8 shift;
+ u8 mask;
+ u32 *table;
+ const char *name;
+ const char * const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ /*
+ * If this clock is exported via DT, set onecell_idx to constant
+ * defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
+ * this specific clock. Otherwise, set to -1.
+ */
+ int onecell_idx;
+
+};
+
+struct npcm7xx_clk_div_fixed_data {
+ u8 mult;
+ u8 div;
+ const char *name;
+ const char *parent_name;
+ u8 clk_divider_flags;
+ /*
+ * If this clock is exported via DT, set onecell_idx to constant
+ * defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
+ * this specific clock. Otherwise, set to -1.
+ */
+ int onecell_idx;
+};
+
+
+struct npcm7xx_clk_div_data {
+ u32 reg;
+ u8 shift;
+ u8 width;
+ const char *name;
+ const char *parent_name;
+ u8 clk_divider_flags;
+ unsigned long flags;
+ /*
+ * If this clock is exported via DT, set onecell_idx to constant
+ * defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
+ * this specific clock. Otherwise, set to -1.
+ */
+ int onecell_idx;
+};
+
+struct npcm7xx_clk_pll_data {
+ u32 reg;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ /*
+ * If this clock is exported via DT, set onecell_idx to constant
+ * defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
+ * this specific clock. Otherwise, set to -1.
+ */
+ int onecell_idx;
+};
+
+/*
+ * Single copy of strings used to refer to clocks within this driver indexed by
+ * above enum.
+ */
+#define NPCM7XX_CLK_S_REFCLK "refclk"
+#define NPCM7XX_CLK_S_SYSBYPCK "sysbypck"
+#define NPCM7XX_CLK_S_MCBYPCK "mcbypck"
+#define NPCM7XX_CLK_S_GFXBYPCK "gfxbypck"
+#define NPCM7XX_CLK_S_PLL0 "pll0"
+#define NPCM7XX_CLK_S_PLL1 "pll1"
+#define NPCM7XX_CLK_S_PLL1_DIV2 "pll1_div2"
+#define NPCM7XX_CLK_S_PLL2 "pll2"
+#define NPCM7XX_CLK_S_PLL_GFX "pll_gfx"
+#define NPCM7XX_CLK_S_PLL2_DIV2 "pll2_div2"
+#define NPCM7XX_CLK_S_PIX_MUX "gfx_pixel"
+#define NPCM7XX_CLK_S_GPRFSEL_MUX "gprfsel_mux"
+#define NPCM7XX_CLK_S_MC_MUX "mc_phy"
+#define NPCM7XX_CLK_S_CPU_MUX "cpu" /*AKA system clock.*/
+#define NPCM7XX_CLK_S_MC "mc"
+#define NPCM7XX_CLK_S_AXI "axi" /*AKA CLK2*/
+#define NPCM7XX_CLK_S_AHB "ahb" /*AKA CLK4*/
+#define NPCM7XX_CLK_S_CLKOUT_MUX "clkout_mux"
+#define NPCM7XX_CLK_S_UART_MUX "uart_mux"
+#define NPCM7XX_CLK_S_TIM_MUX "timer_mux"
+#define NPCM7XX_CLK_S_SD_MUX "sd_mux"
+#define NPCM7XX_CLK_S_GFXM_MUX "gfxm_mux"
+#define NPCM7XX_CLK_S_SU_MUX "serial_usb_mux"
+#define NPCM7XX_CLK_S_DVC_MUX "dvc_mux"
+#define NPCM7XX_CLK_S_GFX_MUX "gfx_mux"
+#define NPCM7XX_CLK_S_GFX_PIXEL "gfx_pixel"
+#define NPCM7XX_CLK_S_SPI0 "spi0"
+#define NPCM7XX_CLK_S_SPI3 "spi3"
+#define NPCM7XX_CLK_S_SPIX "spix"
+#define NPCM7XX_CLK_S_APB1 "apb1"
+#define NPCM7XX_CLK_S_APB2 "apb2"
+#define NPCM7XX_CLK_S_APB3 "apb3"
+#define NPCM7XX_CLK_S_APB4 "apb4"
+#define NPCM7XX_CLK_S_APB5 "apb5"
+#define NPCM7XX_CLK_S_TOCK "tock"
+#define NPCM7XX_CLK_S_CLKOUT "clkout"
+#define NPCM7XX_CLK_S_UART "uart"
+#define NPCM7XX_CLK_S_TIMER "timer"
+#define NPCM7XX_CLK_S_MMC "mmc"
+#define NPCM7XX_CLK_S_SDHC "sdhc"
+#define NPCM7XX_CLK_S_ADC "adc"
+#define NPCM7XX_CLK_S_GFX "gfx0_gfx1_mem"
+#define NPCM7XX_CLK_S_USBIF "serial_usbif"
+#define NPCM7XX_CLK_S_USB_HOST "usb_host"
+#define NPCM7XX_CLK_S_USB_BRIDGE "usb_bridge"
+#define NPCM7XX_CLK_S_PCI "pci"
+
+static u32 pll_mux_table[] = {0, 1, 2, 3};
+static const char * const pll_mux_parents[] __initconst = {
+ NPCM7XX_CLK_S_PLL0,
+ NPCM7XX_CLK_S_PLL1_DIV2,
+ NPCM7XX_CLK_S_REFCLK,
+ NPCM7XX_CLK_S_PLL2_DIV2,
+};
+
+static u32 cpuck_mux_table[] = {0, 1, 2, 3};
+static const char * const cpuck_mux_parents[] __initconst = {
+ NPCM7XX_CLK_S_PLL0,
+ NPCM7XX_CLK_S_PLL1_DIV2,
+ NPCM7XX_CLK_S_REFCLK,
+ NPCM7XX_CLK_S_SYSBYPCK,
+};
+
+static u32 pixcksel_mux_table[] = {0, 2};
+static const char * const pixcksel_mux_parents[] __initconst = {
+ NPCM7XX_CLK_S_PLL_GFX,
+ NPCM7XX_CLK_S_REFCLK,
+};
+
+static u32 sucksel_mux_table[] = {2, 3};
+static const char * const sucksel_mux_parents[] __initconst = {
+ NPCM7XX_CLK_S_REFCLK,
+ NPCM7XX_CLK_S_PLL2_DIV2,
+};
+
+static u32 mccksel_mux_table[] = {0, 2, 3};
+static const char * const mccksel_mux_parents[] __initconst = {
+ NPCM7XX_CLK_S_PLL1_DIV2,
+ NPCM7XX_CLK_S_REFCLK,
+ NPCM7XX_CLK_S_MCBYPCK,
+};
+
+static u32 clkoutsel_mux_table[] = {0, 1, 2, 3, 4};
+static const char * const clkoutsel_mux_parents[] __initconst = {
+ NPCM7XX_CLK_S_PLL0,
+ NPCM7XX_CLK_S_PLL1_DIV2,
+ NPCM7XX_CLK_S_REFCLK,
+ NPCM7XX_CLK_S_PLL_GFX, // divided by 2
+ NPCM7XX_CLK_S_PLL2_DIV2,
+};
+
+static u32 gfxmsel_mux_table[] = {2, 3};
+static const char * const gfxmsel_mux_parents[] __initconst = {
+ NPCM7XX_CLK_S_REFCLK,
+ NPCM7XX_CLK_S_PLL2_DIV2,
+};
+
+static u32 dvcssel_mux_table[] = {2, 3};
+static const char * const dvcssel_mux_parents[] __initconst = {
+ NPCM7XX_CLK_S_REFCLK,
+ NPCM7XX_CLK_S_PLL2,
+};
+
+static const struct npcm7xx_clk_pll_data npcm7xx_plls[] __initconst = {
+ {NPCM7XX_PLLCON0, NPCM7XX_CLK_S_PLL0, NPCM7XX_CLK_S_REFCLK, 0, -1},
+
+ {NPCM7XX_PLLCON1, NPCM7XX_CLK_S_PLL1,
+ NPCM7XX_CLK_S_REFCLK, 0, -1},
+
+ {NPCM7XX_PLLCON2, NPCM7XX_CLK_S_PLL2,
+ NPCM7XX_CLK_S_REFCLK, 0, -1},
+
+ {NPCM7XX_PLLCONG, NPCM7XX_CLK_S_PLL_GFX,
+ NPCM7XX_CLK_S_REFCLK, 0, -1},
+};
+
+static const struct npcm7xx_clk_mux_data npcm7xx_muxes[] __initconst = {
+ {0, GENMASK(1, 0), cpuck_mux_table, NPCM7XX_CLK_S_CPU_MUX,
+ cpuck_mux_parents, ARRAY_SIZE(cpuck_mux_parents), CLK_IS_CRITICAL,
+ NPCM7XX_CLK_CPU},
+
+ {4, GENMASK(1, 0), pixcksel_mux_table, NPCM7XX_CLK_S_PIX_MUX,
+ pixcksel_mux_parents, ARRAY_SIZE(pixcksel_mux_parents), 0,
+ NPCM7XX_CLK_GFX_PIXEL},
+
+ {6, GENMASK(1, 0), pll_mux_table, NPCM7XX_CLK_S_SD_MUX,
+ pll_mux_parents, ARRAY_SIZE(pll_mux_parents), 0, -1},
+
+ {8, GENMASK(1, 0), pll_mux_table, NPCM7XX_CLK_S_UART_MUX,
+ pll_mux_parents, ARRAY_SIZE(pll_mux_parents), 0, -1},
+
+ {10, GENMASK(1, 0), sucksel_mux_table, NPCM7XX_CLK_S_SU_MUX,
+ sucksel_mux_parents, ARRAY_SIZE(sucksel_mux_parents), 0, -1},
+
+ {12, GENMASK(1, 0), mccksel_mux_table, NPCM7XX_CLK_S_MC_MUX,
+ mccksel_mux_parents, ARRAY_SIZE(mccksel_mux_parents), 0, -1},
+
+ {14, GENMASK(1, 0), pll_mux_table, NPCM7XX_CLK_S_TIM_MUX,
+ pll_mux_parents, ARRAY_SIZE(pll_mux_parents), 0, -1},
+
+ {16, GENMASK(1, 0), pll_mux_table, NPCM7XX_CLK_S_GFX_MUX,
+ pll_mux_parents, ARRAY_SIZE(pll_mux_parents), 0, -1},
+
+ {18, GENMASK(2, 0), clkoutsel_mux_table, NPCM7XX_CLK_S_CLKOUT_MUX,
+ clkoutsel_mux_parents, ARRAY_SIZE(clkoutsel_mux_parents), 0, -1},
+
+ {21, GENMASK(1, 0), gfxmsel_mux_table, NPCM7XX_CLK_S_GFXM_MUX,
+ gfxmsel_mux_parents, ARRAY_SIZE(gfxmsel_mux_parents), 0, -1},
+
+ {23, GENMASK(1, 0), dvcssel_mux_table, NPCM7XX_CLK_S_DVC_MUX,
+ dvcssel_mux_parents, ARRAY_SIZE(dvcssel_mux_parents), 0, -1},
+};
+
+/* fixed ratio dividers (no register): */
+static const struct npcm7xx_clk_div_fixed_data npcm7xx_divs_fx[] __initconst = {
+ { 1, 2, NPCM7XX_CLK_S_MC, NPCM7XX_CLK_S_MC_MUX, 0, NPCM7XX_CLK_MC},
+ { 1, 2, NPCM7XX_CLK_S_PLL1_DIV2, NPCM7XX_CLK_S_PLL1, 0, -1},
+ { 1, 2, NPCM7XX_CLK_S_PLL2_DIV2, NPCM7XX_CLK_S_PLL2, 0, -1},
+};
+
+/* configurable dividers: */
+static const struct npcm7xx_clk_div_data npcm7xx_divs[] __initconst = {
+ {NPCM7XX_CLKDIV1, 28, 3, NPCM7XX_CLK_S_ADC,
+ NPCM7XX_CLK_S_TIMER, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_ADC},
+ /*30-28 ADCCKDIV*/
+ {NPCM7XX_CLKDIV1, 26, 2, NPCM7XX_CLK_S_AHB,
+ NPCM7XX_CLK_S_AXI, 0, CLK_IS_CRITICAL, NPCM7XX_CLK_AHB},
+ /*27-26 CLK4DIV*/
+ {NPCM7XX_CLKDIV1, 21, 5, NPCM7XX_CLK_S_TIMER,
+ NPCM7XX_CLK_S_TIM_MUX, 0, 0, NPCM7XX_CLK_TIMER},
+ /*25-21 TIMCKDIV*/
+ {NPCM7XX_CLKDIV1, 16, 5, NPCM7XX_CLK_S_UART,
+ NPCM7XX_CLK_S_UART_MUX, 0, 0, NPCM7XX_CLK_UART},
+ /*20-16 UARTDIV*/
+ {NPCM7XX_CLKDIV1, 11, 5, NPCM7XX_CLK_S_MMC,
+ NPCM7XX_CLK_S_SD_MUX, 0, 0, NPCM7XX_CLK_MMC},
+ /*15-11 MMCCKDIV*/
+ {NPCM7XX_CLKDIV1, 6, 5, NPCM7XX_CLK_S_SPI3,
+ NPCM7XX_CLK_S_AHB, 0, 0, NPCM7XX_CLK_SPI3},
+ /*10-6 AHB3CKDIV*/
+ {NPCM7XX_CLKDIV1, 2, 4, NPCM7XX_CLK_S_PCI,
+ NPCM7XX_CLK_S_GFX_MUX, 0, 0, NPCM7XX_CLK_PCI},
+ /*5-2 PCICKDIV*/
+ {NPCM7XX_CLKDIV1, 0, 1, NPCM7XX_CLK_S_AXI,
+ NPCM7XX_CLK_S_CPU_MUX, CLK_DIVIDER_POWER_OF_TWO, CLK_IS_CRITICAL,
+ NPCM7XX_CLK_AXI},/*0 CLK2DIV*/
+
+ {NPCM7XX_CLKDIV2, 30, 2, NPCM7XX_CLK_S_APB4,
+ NPCM7XX_CLK_S_AHB, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_APB4},
+ /*31-30 APB4CKDIV*/
+ {NPCM7XX_CLKDIV2, 28, 2, NPCM7XX_CLK_S_APB3,
+ NPCM7XX_CLK_S_AHB, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_APB3},
+ /*29-28 APB3CKDIV*/
+ {NPCM7XX_CLKDIV2, 26, 2, NPCM7XX_CLK_S_APB2,
+ NPCM7XX_CLK_S_AHB, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_APB2},
+ /*27-26 APB2CKDIV*/
+ {NPCM7XX_CLKDIV2, 24, 2, NPCM7XX_CLK_S_APB1,
+ NPCM7XX_CLK_S_AHB, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_APB1},
+ /*25-24 APB1CKDIV*/
+ {NPCM7XX_CLKDIV2, 22, 2, NPCM7XX_CLK_S_APB5,
+ NPCM7XX_CLK_S_AHB, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_APB5},
+ /*23-22 APB5CKDIV*/
+ {NPCM7XX_CLKDIV2, 16, 5, NPCM7XX_CLK_S_CLKOUT,
+ NPCM7XX_CLK_S_CLKOUT_MUX, 0, 0, NPCM7XX_CLK_CLKOUT},
+ /*20-16 CLKOUTDIV*/
+ {NPCM7XX_CLKDIV2, 13, 3, NPCM7XX_CLK_S_GFX,
+ NPCM7XX_CLK_S_GFX_MUX, 0, 0, NPCM7XX_CLK_GFX},
+ /*15-13 GFXCKDIV*/
+ {NPCM7XX_CLKDIV2, 8, 5, NPCM7XX_CLK_S_USB_BRIDGE,
+ NPCM7XX_CLK_S_SU_MUX, 0, 0, NPCM7XX_CLK_SU},
+ /*12-8 SUCKDIV*/
+ {NPCM7XX_CLKDIV2, 4, 4, NPCM7XX_CLK_S_USB_HOST,
+ NPCM7XX_CLK_S_SU_MUX, 0, 0, NPCM7XX_CLK_SU48},
+ /*7-4 SU48CKDIV*/
+ {NPCM7XX_CLKDIV2, 0, 4, NPCM7XX_CLK_S_SDHC,
+ NPCM7XX_CLK_S_SD_MUX, 0, 0, NPCM7XX_CLK_SDHC}
+ ,/*3-0 SD1CKDIV*/
+
+ {NPCM7XX_CLKDIV3, 6, 5, NPCM7XX_CLK_S_SPI0,
+ NPCM7XX_CLK_S_AHB, 0, 0, NPCM7XX_CLK_SPI0},
+ /*10-6 SPI0CKDV*/
+ {NPCM7XX_CLKDIV3, 1, 5, NPCM7XX_CLK_S_SPIX,
+ NPCM7XX_CLK_S_AHB, 0, 0, NPCM7XX_CLK_SPIX},
+ /*5-1 SPIXCKDV*/
+
+};
+
+static const struct npcm7xx_clk_gate_data npcm7xx_gates[] __initconst = {
+ {NPCM7XX_CLKEN1, 31, "smb1-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN1, 30, "smb0-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN1, 29, "smb7-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN1, 28, "smb6-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN1, 27, "adc-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN1, 26, "wdt-gate", NPCM7XX_CLK_S_TIMER, 0},
+ {NPCM7XX_CLKEN1, 25, "usbdev3-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN1, 24, "usbdev6-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN1, 23, "usbdev5-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN1, 22, "usbdev4-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN1, 21, "emc2-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN1, 20, "timer5_9-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN1, 19, "timer0_4-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN1, 18, "pwmm0-gate", NPCM7XX_CLK_S_APB3, 0},
+ {NPCM7XX_CLKEN1, 17, "huart-gate", NPCM7XX_CLK_S_UART, 0},
+ {NPCM7XX_CLKEN1, 16, "smb5-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN1, 15, "smb4-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN1, 14, "smb3-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN1, 13, "smb2-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN1, 12, "mc-gate", NPCM7XX_CLK_S_MC, 0},
+ {NPCM7XX_CLKEN1, 11, "uart01-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN1, 10, "aes-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN1, 9, "peci-gate", NPCM7XX_CLK_S_APB3, 0},
+ {NPCM7XX_CLKEN1, 8, "usbdev2-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN1, 7, "uart23-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN1, 6, "emc1-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN1, 5, "usbdev1-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN1, 4, "shm-gate", NPCM7XX_CLK_S_AHB, 0},
+ /* bit 3 is reserved */
+ {NPCM7XX_CLKEN1, 2, "kcs-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN1, 1, "spi3-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN1, 0, "spi0-gate", NPCM7XX_CLK_S_AHB, 0},
+
+ {NPCM7XX_CLKEN2, 31, "cp-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN2, 30, "tock-gate", NPCM7XX_CLK_S_TOCK, 0},
+ /* bit 29 is reserved */
+ {NPCM7XX_CLKEN2, 28, "gmac1-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN2, 27, "usbif-gate", NPCM7XX_CLK_S_USBIF, 0},
+ {NPCM7XX_CLKEN2, 26, "usbhost-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN2, 25, "gmac2-gate", NPCM7XX_CLK_S_AHB, 0},
+ /* bit 24 is reserved */
+ {NPCM7XX_CLKEN2, 23, "pspi2-gate", NPCM7XX_CLK_S_APB5, 0},
+ {NPCM7XX_CLKEN2, 22, "pspi1-gate", NPCM7XX_CLK_S_APB5, 0},
+ {NPCM7XX_CLKEN2, 21, "3des-gate", NPCM7XX_CLK_S_AHB, 0},
+ /* bit 20 is reserved */
+ {NPCM7XX_CLKEN2, 19, "siox2-gate", NPCM7XX_CLK_S_APB3, 0},
+ {NPCM7XX_CLKEN2, 18, "siox1-gate", NPCM7XX_CLK_S_APB3, 0},
+ /* bit 17 is reserved */
+ {NPCM7XX_CLKEN2, 16, "fuse-gate", NPCM7XX_CLK_S_APB4, 0},
+ /* bit 15 is reserved */
+ {NPCM7XX_CLKEN2, 14, "vcd-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN2, 13, "ece-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN2, 12, "vdma-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN2, 11, "ahbpcibrg-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN2, 10, "gfxsys-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN2, 9, "sdhc-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN2, 8, "mmc-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN2, 7, "mft7-gate", NPCM7XX_CLK_S_APB4, 0},
+ {NPCM7XX_CLKEN2, 6, "mft6-gate", NPCM7XX_CLK_S_APB4, 0},
+ {NPCM7XX_CLKEN2, 5, "mft5-gate", NPCM7XX_CLK_S_APB4, 0},
+ {NPCM7XX_CLKEN2, 4, "mft4-gate", NPCM7XX_CLK_S_APB4, 0},
+ {NPCM7XX_CLKEN2, 3, "mft3-gate", NPCM7XX_CLK_S_APB4, 0},
+ {NPCM7XX_CLKEN2, 2, "mft2-gate", NPCM7XX_CLK_S_APB4, 0},
+ {NPCM7XX_CLKEN2, 1, "mft1-gate", NPCM7XX_CLK_S_APB4, 0},
+ {NPCM7XX_CLKEN2, 0, "mft0-gate", NPCM7XX_CLK_S_APB4, 0},
+
+ {NPCM7XX_CLKEN3, 31, "gpiom7-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN3, 30, "gpiom6-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN3, 29, "gpiom5-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN3, 28, "gpiom4-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN3, 27, "gpiom3-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN3, 26, "gpiom2-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN3, 25, "gpiom1-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN3, 24, "gpiom0-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN3, 23, "espi-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN3, 22, "smb11-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN3, 21, "smb10-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN3, 20, "smb9-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN3, 19, "smb8-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN3, 18, "smb15-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN3, 17, "rng-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN3, 16, "timer10_14-gate", NPCM7XX_CLK_S_APB1, 0},
+ {NPCM7XX_CLKEN3, 15, "pcirc-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN3, 14, "sececc-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN3, 13, "sha-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN3, 12, "smb14-gate", NPCM7XX_CLK_S_APB2, 0},
+ /* bit 11 is reserved */
+ /* bit 10 is reserved */
+ {NPCM7XX_CLKEN3, 9, "pcimbx-gate", NPCM7XX_CLK_S_AHB, 0},
+ /* bit 8 is reserved */
+ {NPCM7XX_CLKEN3, 7, "usbdev9-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN3, 6, "usbdev8-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN3, 5, "usbdev7-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN3, 4, "usbdev0-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN3, 3, "smb13-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN3, 2, "spix-gate", NPCM7XX_CLK_S_AHB, 0},
+ {NPCM7XX_CLKEN3, 1, "smb12-gate", NPCM7XX_CLK_S_APB2, 0},
+ {NPCM7XX_CLKEN3, 0, "pwmm1-gate", NPCM7XX_CLK_S_APB3, 0},
+};
+
+static DEFINE_SPINLOCK(npcm7xx_clk_lock);
+
+static void __init npcm7xx_clk_init(struct device_node *clk_np)
+{
+ struct clk_hw_onecell_data *npcm7xx_clk_data;
+ void __iomem *clk_base;
+ struct resource res;
+ struct clk_hw *hw;
+ int ret;
+ int i;
+
+ ret = of_address_to_resource(clk_np, 0, &res);
+ if (ret) {
+ pr_err("%s: failed to get resource, ret %d\n", clk_np->name,
+ ret);
+ return;
+ }
+
+ clk_base = ioremap(res.start, resource_size(&res));
+ if (!clk_base)
+ goto npcm7xx_init_error;
+
+ npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) *
+ NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL);
+ if (!npcm7xx_clk_data)
+ goto npcm7xx_init_np_err;
+
+ npcm7xx_clk_data->num = NPCM7XX_NUM_CLOCKS;
+
+ for (i = 0; i < NPCM7XX_NUM_CLOCKS; i++)
+ npcm7xx_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+
+ /* Register plls */
+ for (i = 0; i < ARRAY_SIZE(npcm7xx_plls); i++) {
+ const struct npcm7xx_clk_pll_data *pll_data = &npcm7xx_plls[i];
+
+ hw = npcm7xx_clk_register_pll(clk_base + pll_data->reg,
+ pll_data->name, pll_data->parent_name, pll_data->flags);
+ if (IS_ERR(hw)) {
+ pr_err("npcm7xx_clk: Can't register pll\n");
+ goto npcm7xx_init_fail;
+ }
+
+ if (pll_data->onecell_idx >= 0)
+ npcm7xx_clk_data->hws[pll_data->onecell_idx] = hw;
+ }
+
+ /* Register fixed dividers */
+ hw = clk_hw_register_fixed_factor(NULL, NPCM7XX_CLK_S_PLL1_DIV2,
+ NPCM7XX_CLK_S_PLL1, 0, 1, 2);
+ if (IS_ERR(hw)) {
+ pr_err("npcm7xx_clk: Can't register fixed div\n");
+ goto npcm7xx_init_fail;
+ }
+
+ hw = clk_hw_register_fixed_factor(NULL, NPCM7XX_CLK_S_PLL2_DIV2,
+ NPCM7XX_CLK_S_PLL2, 0, 1, 2);
+ if (IS_ERR(hw)) {
+ pr_err("npcm7xx_clk: Can't register div2\n");
+ goto npcm7xx_init_fail;
+ }
+
+ /* Register muxes */
+ for (i = 0; i < ARRAY_SIZE(npcm7xx_muxes); i++) {
+ const struct npcm7xx_clk_mux_data *mux_data = &npcm7xx_muxes[i];
+
+ hw = clk_hw_register_mux_table(NULL,
+ mux_data->name,
+ mux_data->parent_names, mux_data->num_parents,
+ mux_data->flags, clk_base + NPCM7XX_CLKSEL,
+ mux_data->shift, mux_data->mask, 0,
+ mux_data->table, &npcm7xx_clk_lock);
+
+ if (IS_ERR(hw)) {
+ pr_err("npcm7xx_clk: Can't register mux\n");
+ goto npcm7xx_init_fail;
+ }
+
+ if (mux_data->onecell_idx >= 0)
+ npcm7xx_clk_data->hws[mux_data->onecell_idx] = hw;
+ }
+
+ /* Register clock dividers specified in npcm7xx_divs */
+ for (i = 0; i < ARRAY_SIZE(npcm7xx_divs); i++) {
+ const struct npcm7xx_clk_div_data *div_data = &npcm7xx_divs[i];
+
+ hw = clk_hw_register_divider(NULL, div_data->name,
+ div_data->parent_name,
+ div_data->flags,
+ clk_base + div_data->reg,
+ div_data->shift, div_data->width,
+ div_data->clk_divider_flags, &npcm7xx_clk_lock);
+ if (IS_ERR(hw)) {
+ pr_err("npcm7xx_clk: Can't register div table\n");
+ goto npcm7xx_init_fail;
+ }
+
+ if (div_data->onecell_idx >= 0)
+ npcm7xx_clk_data->hws[div_data->onecell_idx] = hw;
+ }
+
+ ret = of_clk_add_hw_provider(clk_np, of_clk_hw_onecell_get,
+ npcm7xx_clk_data);
+ if (ret)
+ pr_err("failed to add DT provider: %d\n", ret);
+
+ of_node_put(clk_np);
+
+ return;
+
+npcm7xx_init_fail:
+ kfree(npcm7xx_clk_data->hws);
+npcm7xx_init_np_err:
+ iounmap(clk_base);
+npcm7xx_init_error:
+ of_node_put(clk_np);
+}
+CLK_OF_DECLARE(npcm7xx_clk_init, "nuvoton,npcm750-clk", npcm7xx_clk_init);
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
index 1c96a9f6c022..1e2a3b8f9454 100644
--- a/drivers/clk/clk-si544.c
+++ b/drivers/clk/clk-si544.c
@@ -207,6 +207,7 @@ static int si544_calc_muldiv(struct clk_si544_muldiv *settings,
/* And the fractional bits using the remainder */
vco = (u64)tmp << 32;
+ vco += FXO / 2; /* Round to nearest multiple */
do_div(vco, FXO);
settings->fb_div_frac = vco;
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index 83e8cd81674f..a907555b2a3d 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -579,14 +579,9 @@ clk_stm32_register_gate_ops(struct device *dev,
spinlock_t *lock)
{
struct clk_init_data init = { NULL };
- struct clk_gate *gate;
struct clk_hw *hw;
int ret;
- gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate)
- return ERR_PTR(-ENOMEM);
-
init.name = name;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -604,10 +599,8 @@ clk_stm32_register_gate_ops(struct device *dev,
hw->init = &init;
ret = clk_hw_register(dev, hw);
- if (ret) {
- kfree(gate);
+ if (ret)
hw = ERR_PTR(ret);
- }
return hw;
}
@@ -1988,7 +1981,8 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
_DIV(RCC_MCO2CFGR, 4, 4, 0, NULL)),
/* Debug clocks */
- GATE(CK_DBG, "ck_sys_dbg", "ck_axi", 0, RCC_DBGCFGR, 8, 0),
+ GATE(CK_DBG, "ck_sys_dbg", "ck_axi", CLK_IGNORE_UNUSED,
+ RCC_DBGCFGR, 8, 0),
COMPOSITE(CK_TRACE, "ck_trace", ck_trace_src, CLK_OPS_PARENT_ENABLE,
_GATE(RCC_DBGCFGR, 9, 0),
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 7af555f0e60c..a24a6afb50b6 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -549,7 +549,8 @@ static void clk_core_rate_unprotect(struct clk_core *core)
if (!core)
return;
- if (WARN_ON(core->protect_count == 0))
+ if (WARN(core->protect_count == 0,
+ "%s already unprotected\n", core->name))
return;
if (--core->protect_count > 0)
@@ -682,16 +683,18 @@ static void clk_core_unprepare(struct clk_core *core)
if (!core)
return;
- if (WARN_ON(core->prepare_count == 0))
+ if (WARN(core->prepare_count == 0,
+ "%s already unprepared\n", core->name))
return;
- if (WARN_ON(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL))
+ if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
+ "Unpreparing critical %s\n", core->name))
return;
if (--core->prepare_count > 0)
return;
- WARN_ON(core->enable_count > 0);
+ WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
trace_clk_unprepare(core);
@@ -809,10 +812,11 @@ static void clk_core_disable(struct clk_core *core)
if (!core)
return;
- if (WARN_ON(core->enable_count == 0))
+ if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
return;
- if (WARN_ON(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL))
+ if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
+ "Disabling critical %s\n", core->name))
return;
if (--core->enable_count > 0)
@@ -867,7 +871,8 @@ static int clk_core_enable(struct clk_core *core)
if (!core)
return 0;
- if (WARN_ON(core->prepare_count == 0))
+ if (WARN(core->prepare_count == 0,
+ "Enabling unprepared %s\n", core->name))
return -ESHUTDOWN;
if (core->enable_count == 0) {
@@ -2171,7 +2176,6 @@ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
bool clk_has_parent(struct clk *clk, struct clk *parent)
{
struct clk_core *core, *parent_core;
- unsigned int i;
/* NULL clocks should be nops, so return success if either is NULL. */
if (!clk || !parent)
@@ -2184,11 +2188,8 @@ bool clk_has_parent(struct clk *clk, struct clk *parent)
if (core->parent == parent_core)
return true;
- for (i = 0; i < core->num_parents; i++)
- if (strcmp(core->parent_names[i], parent_core->name) == 0)
- return true;
-
- return false;
+ return match_string(core->parent_names, core->num_parents,
+ parent_core->name) >= 0;
}
EXPORT_SYMBOL_GPL(clk_has_parent);
@@ -2609,81 +2610,31 @@ static int possible_parents_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(possible_parents);
-static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
+static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
- struct dentry *d;
- int ret = -ENOMEM;
-
- if (!core || !pdentry) {
- ret = -EINVAL;
- goto out;
- }
-
- d = debugfs_create_dir(core->name, pdentry);
- if (!d)
- goto out;
+ struct dentry *root;
- core->dentry = d;
-
- d = debugfs_create_ulong("clk_rate", 0444, core->dentry, &core->rate);
- if (!d)
- goto err_out;
-
- d = debugfs_create_ulong("clk_accuracy", 0444, core->dentry,
- &core->accuracy);
- if (!d)
- goto err_out;
-
- d = debugfs_create_u32("clk_phase", 0444, core->dentry, &core->phase);
- if (!d)
- goto err_out;
-
- d = debugfs_create_file("clk_flags", 0444, core->dentry, core,
- &clk_flags_fops);
- if (!d)
- goto err_out;
-
- d = debugfs_create_u32("clk_prepare_count", 0444, core->dentry,
- &core->prepare_count);
- if (!d)
- goto err_out;
-
- d = debugfs_create_u32("clk_enable_count", 0444, core->dentry,
- &core->enable_count);
- if (!d)
- goto err_out;
-
- d = debugfs_create_u32("clk_protect_count", 0444, core->dentry,
- &core->protect_count);
- if (!d)
- goto err_out;
-
- d = debugfs_create_u32("clk_notifier_count", 0444, core->dentry,
- &core->notifier_count);
- if (!d)
- goto err_out;
+ if (!core || !pdentry)
+ return;
- if (core->num_parents > 1) {
- d = debugfs_create_file("clk_possible_parents", 0444,
- core->dentry, core, &possible_parents_fops);
- if (!d)
- goto err_out;
- }
+ root = debugfs_create_dir(core->name, pdentry);
+ core->dentry = root;
- if (core->ops->debug_init) {
- ret = core->ops->debug_init(core->hw, core->dentry);
- if (ret)
- goto err_out;
- }
+ debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
+ debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
+ debugfs_create_u32("clk_phase", 0444, root, &core->phase);
+ debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
+ debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
+ debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
+ debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
+ debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
- ret = 0;
- goto out;
+ if (core->num_parents > 1)
+ debugfs_create_file("clk_possible_parents", 0444, root, core,
+ &possible_parents_fops);
-err_out:
- debugfs_remove_recursive(core->dentry);
- core->dentry = NULL;
-out:
- return ret;
+ if (core->ops->debug_init)
+ core->ops->debug_init(core->hw, core->dentry);
}
/**
@@ -2694,17 +2645,13 @@ out:
* initialized. Otherwise it bails out early since the debugfs clk directory
* will be created lazily by clk_debug_init as part of a late_initcall.
*/
-static int clk_debug_register(struct clk_core *core)
+static void clk_debug_register(struct clk_core *core)
{
- int ret = 0;
-
mutex_lock(&clk_debug_lock);
hlist_add_head(&core->debug_node, &clk_debug_list);
if (inited)
- ret = clk_debug_create_one(core, rootdir);
+ clk_debug_create_one(core, rootdir);
mutex_unlock(&clk_debug_lock);
-
- return ret;
}
/**
@@ -2724,19 +2671,6 @@ static void clk_debug_unregister(struct clk_core *core)
mutex_unlock(&clk_debug_lock);
}
-struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
- void *data, const struct file_operations *fops)
-{
- struct dentry *d = NULL;
-
- if (hw->core->dentry)
- d = debugfs_create_file(name, mode, hw->core->dentry, data,
- fops);
-
- return d;
-}
-EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
-
/**
* clk_debug_init - lazily populate the debugfs clk directory
*
@@ -2749,32 +2683,17 @@ EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
static int __init clk_debug_init(void)
{
struct clk_core *core;
- struct dentry *d;
rootdir = debugfs_create_dir("clk", NULL);
- if (!rootdir)
- return -ENOMEM;
-
- d = debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
- &clk_summary_fops);
- if (!d)
- return -ENOMEM;
-
- d = debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
- &clk_dump_fops);
- if (!d)
- return -ENOMEM;
-
- d = debugfs_create_file("clk_orphan_summary", 0444, rootdir,
- &orphan_list, &clk_summary_fops);
- if (!d)
- return -ENOMEM;
-
- d = debugfs_create_file("clk_orphan_dump", 0444, rootdir,
- &orphan_list, &clk_dump_fops);
- if (!d)
- return -ENOMEM;
+ debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
+ &clk_summary_fops);
+ debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
+ &clk_dump_fops);
+ debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
+ &clk_summary_fops);
+ debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
+ &clk_dump_fops);
mutex_lock(&clk_debug_lock);
hlist_for_each_entry(core, &clk_debug_list, debug_node)
@@ -2787,7 +2706,7 @@ static int __init clk_debug_init(void)
}
late_initcall(clk_debug_init);
#else
-static inline int clk_debug_register(struct clk_core *core) { return 0; }
+static inline void clk_debug_register(struct clk_core *core) { }
static inline void clk_debug_reparent(struct clk_core *core,
struct clk_core *new_parent)
{
@@ -3907,7 +3826,7 @@ int of_clk_parent_fill(struct device_node *np, const char **parents,
EXPORT_SYMBOL_GPL(of_clk_parent_fill);
struct clock_provider {
- of_clk_init_cb_t clk_init_cb;
+ void (*clk_init_cb)(struct device_node *);
struct device_node *np;
struct list_head node;
};
diff --git a/drivers/clk/davinci/pll-da830.c b/drivers/clk/davinci/pll-da830.c
index 929a3d3a9adb..0a0d06fb25fd 100644
--- a/drivers/clk/davinci/pll-da830.c
+++ b/drivers/clk/davinci/pll-da830.c
@@ -6,6 +6,7 @@
*/
#include <linux/clkdev.h>
+#include <linux/clk/davinci.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -36,11 +37,11 @@ SYSCLK(5, pll0_sysclk5, pll0_pllen, 5, 0);
SYSCLK(6, pll0_sysclk6, pll0_pllen, 5, SYSCLK_FIXED_DIV);
SYSCLK(7, pll0_sysclk7, pll0_pllen, 5, 0);
-int da830_pll_init(struct device *dev, void __iomem *base)
+int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
struct clk *clk;
- davinci_pll_clk_register(dev, &da830_pll_info, "ref_clk", base);
+ davinci_pll_clk_register(dev, &da830_pll_info, "ref_clk", base, cfgchip);
clk = davinci_pll_sysclk_register(dev, &pll0_sysclk2, base);
clk_register_clkdev(clk, "pll0_sysclk2", "da830-psc0");
diff --git a/drivers/clk/davinci/pll-da850.c b/drivers/clk/davinci/pll-da850.c
index 2a038b7908cc..0f7198191ea2 100644
--- a/drivers/clk/davinci/pll-da850.c
+++ b/drivers/clk/davinci/pll-da850.c
@@ -7,10 +7,14 @@
#include <linux/bitops.h>
#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
#include <linux/clkdev.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mfd/da8xx-cfgchip.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
#include <linux/of.h>
#include <linux/types.h>
@@ -81,11 +85,11 @@ static const struct davinci_pll_obsclk_info da850_pll0_obsclk_info = {
.ocsrc_mask = GENMASK(4, 0),
};
-int da850_pll0_init(struct device *dev, void __iomem *base)
+int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
struct clk *clk;
- davinci_pll_clk_register(dev, &da850_pll0_info, "ref_clk", base);
+ davinci_pll_clk_register(dev, &da850_pll0_info, "ref_clk", base, cfgchip);
clk = davinci_pll_sysclk_register(dev, &pll0_sysclk1, base);
clk_register_clkdev(clk, "pll0_sysclk1", "da850-psc0");
@@ -134,11 +138,22 @@ static const struct davinci_pll_sysclk_info *da850_pll0_sysclk_info[] = {
NULL
};
-int of_da850_pll0_init(struct device *dev, void __iomem *base)
+void of_da850_pll0_init(struct device_node *node)
{
- return of_davinci_pll_init(dev, &da850_pll0_info,
- &da850_pll0_obsclk_info,
- da850_pll0_sysclk_info, 7, base);
+ void __iomem *base;
+ struct regmap *cfgchip;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s: ioremap failed\n", __func__);
+ return;
+ }
+
+ cfgchip = syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
+
+ of_davinci_pll_init(NULL, node, &da850_pll0_info,
+ &da850_pll0_obsclk_info,
+ da850_pll0_sysclk_info, 7, base, cfgchip);
}
static const struct davinci_pll_clk_info da850_pll1_info = {
@@ -179,11 +194,11 @@ static const struct davinci_pll_obsclk_info da850_pll1_obsclk_info = {
.ocsrc_mask = GENMASK(4, 0),
};
-int da850_pll1_init(struct device *dev, void __iomem *base)
+int da850_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
struct clk *clk;
- davinci_pll_clk_register(dev, &da850_pll1_info, "oscin", base);
+ davinci_pll_clk_register(dev, &da850_pll1_info, "oscin", base, cfgchip);
davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
@@ -204,9 +219,9 @@ static const struct davinci_pll_sysclk_info *da850_pll1_sysclk_info[] = {
NULL
};
-int of_da850_pll1_init(struct device *dev, void __iomem *base)
+int of_da850_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
- return of_davinci_pll_init(dev, &da850_pll1_info,
+ return of_davinci_pll_init(dev, dev->of_node, &da850_pll1_info,
&da850_pll1_obsclk_info,
- da850_pll1_sysclk_info, 3, base);
+ da850_pll1_sysclk_info, 3, base, cfgchip);
}
diff --git a/drivers/clk/davinci/pll-dm355.c b/drivers/clk/davinci/pll-dm355.c
index 5345f8286c50..505aed80be9a 100644
--- a/drivers/clk/davinci/pll-dm355.c
+++ b/drivers/clk/davinci/pll-dm355.c
@@ -6,6 +6,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk/davinci.h>
#include <linux/clkdev.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -22,16 +23,16 @@ static const struct davinci_pll_clk_info dm355_pll1_info = {
PLL_POSTDIV_ALWAYS_ENABLED | PLL_POSTDIV_FIXED_DIV,
};
-SYSCLK(1, pll1_sysclk1, pll1, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
-SYSCLK(2, pll1_sysclk2, pll1, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
-SYSCLK(3, pll1_sysclk3, pll1, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(4, pll1_sysclk4, pll1, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(1, pll1_sysclk1, pll1_pllen, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
+SYSCLK(2, pll1_sysclk2, pll1_pllen, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
+SYSCLK(3, pll1_sysclk3, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(4, pll1_sysclk4, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-int dm355_pll1_init(struct device *dev, void __iomem *base)
+int dm355_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
struct clk *clk;
- davinci_pll_clk_register(dev, &dm355_pll1_info, "ref_clk", base);
+ davinci_pll_clk_register(dev, &dm355_pll1_info, "ref_clk", base, cfgchip);
clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
clk_register_clkdev(clk, "pll1_sysclk1", "dm355-psc");
@@ -62,17 +63,14 @@ static const struct davinci_pll_clk_info dm355_pll2_info = {
PLL_POSTDIV_ALWAYS_ENABLED | PLL_POSTDIV_FIXED_DIV,
};
-SYSCLK(1, pll2_sysclk1, pll2, 5, SYSCLK_FIXED_DIV);
-SYSCLK(2, pll2_sysclk2, pll2, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
+SYSCLK(1, pll2_sysclk1, pll2_pllen, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
-int dm355_pll2_init(struct device *dev, void __iomem *base)
+int dm355_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
- davinci_pll_clk_register(dev, &dm355_pll2_info, "oscin", base);
+ davinci_pll_clk_register(dev, &dm355_pll2_info, "oscin", base, cfgchip);
davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
- davinci_pll_sysclk_register(dev, &pll2_sysclk2, base);
-
davinci_pll_sysclkbp_clk_register(dev, "pll2_sysclkbp", base);
return 0;
diff --git a/drivers/clk/davinci/pll-dm365.c b/drivers/clk/davinci/pll-dm365.c
index 5f8d9f42d0f3..2d29712753a3 100644
--- a/drivers/clk/davinci/pll-dm365.c
+++ b/drivers/clk/davinci/pll-dm365.c
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/clkdev.h>
+#include <linux/clk/davinci.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -56,11 +57,11 @@ static const struct davinci_pll_obsclk_info dm365_pll1_obsclk_info = {
.ocsrc_mask = BIT(4),
};
-int dm365_pll1_init(struct device *dev, void __iomem *base)
+int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
struct clk *clk;
- davinci_pll_clk_register(dev, &dm365_pll1_info, "ref_clk", base);
+ davinci_pll_clk_register(dev, &dm365_pll1_info, "ref_clk", base, cfgchip);
clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
clk_register_clkdev(clk, "pll1_sysclk1", "dm365-psc");
@@ -119,11 +120,11 @@ static const struct davinci_pll_obsclk_info dm365_pll2_obsclk_info = {
.ocsrc_mask = BIT(4),
};
-int dm365_pll2_init(struct device *dev, void __iomem *base)
+int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
struct clk *clk;
- davinci_pll_clk_register(dev, &dm365_pll2_info, "oscin", base);
+ davinci_pll_clk_register(dev, &dm365_pll2_info, "oscin", base, cfgchip);
davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
diff --git a/drivers/clk/davinci/pll-dm644x.c b/drivers/clk/davinci/pll-dm644x.c
index 69bf785377cf..7650fadfaac8 100644
--- a/drivers/clk/davinci/pll-dm644x.c
+++ b/drivers/clk/davinci/pll-dm644x.c
@@ -6,6 +6,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk/davinci.h>
#include <linux/clkdev.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -27,11 +28,11 @@ SYSCLK(2, pll1_sysclk2, pll1_pllen, 4, SYSCLK_FIXED_DIV);
SYSCLK(3, pll1_sysclk3, pll1_pllen, 4, SYSCLK_FIXED_DIV);
SYSCLK(5, pll1_sysclk5, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-int dm644x_pll1_init(struct device *dev, void __iomem *base)
+int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
struct clk *clk;
- davinci_pll_clk_register(dev, &dm644x_pll1_info, "ref_clk", base);
+ davinci_pll_clk_register(dev, &dm644x_pll1_info, "ref_clk", base, cfgchip);
clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
clk_register_clkdev(clk, "pll1_sysclk1", "dm644x-psc");
@@ -66,9 +67,9 @@ static const struct davinci_pll_clk_info dm644x_pll2_info = {
SYSCLK(1, pll2_sysclk1, pll2_pllen, 4, 0);
SYSCLK(2, pll2_sysclk2, pll2_pllen, 4, 0);
-int dm644x_pll2_init(struct device *dev, void __iomem *base)
+int dm644x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
- davinci_pll_clk_register(dev, &dm644x_pll2_info, "oscin", base);
+ davinci_pll_clk_register(dev, &dm644x_pll2_info, "oscin", base, cfgchip);
davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
diff --git a/drivers/clk/davinci/pll-dm646x.c b/drivers/clk/davinci/pll-dm646x.c
index a61cc3256418..26982970df0e 100644
--- a/drivers/clk/davinci/pll-dm646x.c
+++ b/drivers/clk/davinci/pll-dm646x.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
#include <linux/clkdev.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -29,11 +30,11 @@ SYSCLK(6, pll1_sysclk6, pll1_pllen, 4, 0);
SYSCLK(8, pll1_sysclk8, pll1_pllen, 4, 0);
SYSCLK(9, pll1_sysclk9, pll1_pllen, 4, 0);
-int dm646x_pll1_init(struct device *dev, void __iomem *base)
+int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
struct clk *clk;
- davinci_pll_clk_register(dev, &dm646x_pll1_info, "ref_clk", base);
+ davinci_pll_clk_register(dev, &dm646x_pll1_info, "ref_clk", base, cfgchip);
clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
clk_register_clkdev(clk, "pll1_sysclk1", "dm646x-psc");
@@ -72,11 +73,11 @@ static const struct davinci_pll_clk_info dm646x_pll2_info = {
.flags = 0,
};
-SYSCLK(1, pll2_sysclk1, pll2_pllen, 4, 0);
+SYSCLK(1, pll2_sysclk1, pll2_pllen, 4, SYSCLK_ALWAYS_ENABLED);
-int dm646x_pll2_init(struct device *dev, void __iomem *base)
+int dm646x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
{
- davinci_pll_clk_register(dev, &dm646x_pll2_info, "oscin", base);
+ davinci_pll_clk_register(dev, &dm646x_pll2_info, "oscin", base, cfgchip);
davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 23a24c944f1d..1c99e992d638 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
+#include <linux/clk/davinci.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -190,7 +191,7 @@ static int davinci_pll_set_rate(struct clk_hw *hw, unsigned long rate,
}
#ifdef CONFIG_DEBUG_FS
-static int davinci_pll_debug_init(struct clk_hw *hw, struct dentry *dentry);
+static void davinci_pll_debug_init(struct clk_hw *hw, struct dentry *dentry);
#else
#define davinci_pll_debug_init NULL
#endif
@@ -223,6 +224,7 @@ static const struct clk_ops dm365_pll_ops = {
/**
* davinci_pll_div_register - common *DIV clock implementation
+ * @dev: The PLL platform device or NULL
* @name: the clock name
* @parent_name: the parent clock name
* @reg: the *DIV register
@@ -240,17 +242,21 @@ static struct clk *davinci_pll_div_register(struct device *dev,
const struct clk_ops *divider_ops = &clk_divider_ops;
struct clk_gate *gate;
struct clk_divider *divider;
+ struct clk *clk;
+ int ret;
- gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
return ERR_PTR(-ENOMEM);
gate->reg = reg;
gate->bit_idx = DIV_ENABLE_SHIFT;
- divider = devm_kzalloc(dev, sizeof(*divider), GFP_KERNEL);
- if (!divider)
- return ERR_PTR(-ENOMEM);
+ divider = kzalloc(sizeof(*divider), GFP_KERNEL);
+ if (!divider) {
+ ret = -ENOMEM;
+ goto err_free_gate;
+ }
divider->reg = reg;
divider->shift = DIV_RATIO_SHIFT;
@@ -261,9 +267,22 @@ static struct clk *davinci_pll_div_register(struct device *dev,
divider_ops = &clk_divider_ro_ops;
}
- return clk_register_composite(dev, name, parent_names, num_parents,
- NULL, NULL, &divider->hw, divider_ops,
- &gate->hw, &clk_gate_ops, flags);
+ clk = clk_register_composite(dev, name, parent_names, num_parents,
+ NULL, NULL, &divider->hw, divider_ops,
+ &gate->hw, &clk_gate_ops, flags);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err_free_divider;
+ }
+
+ return clk;
+
+err_free_divider:
+ kfree(divider);
+err_free_gate:
+ kfree(gate);
+
+ return ERR_PTR(ret);
}
struct davinci_pllen_clk {
@@ -321,36 +340,17 @@ static int davinci_pllen_rate_change(struct notifier_block *nb,
return NOTIFY_OK;
}
-static struct davinci_pll_platform_data *davinci_pll_get_pdata(struct device *dev)
-{
- struct davinci_pll_platform_data *pdata = dev_get_platdata(dev);
-
- /*
- * Platform data is optional, so allocate a new struct if one was not
- * provided. For device tree, this will always be the case.
- */
- if (!pdata)
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- /* for device tree, we need to fill in the struct */
- if (dev->of_node)
- pdata->cfgchip =
- syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
-
- return pdata;
-}
-
static struct notifier_block davinci_pllen_notifier = {
.notifier_call = davinci_pllen_rate_change,
};
/**
* davinci_pll_clk_register - Register a PLL clock
+ * @dev: The PLL platform device or NULL
* @info: The device-specific clock info
* @parent_name: The parent clock name
* @base: The PLL's memory region
+ * @cfgchip: CFGCHIP syscon regmap for info->unlock_reg or NULL
*
* This creates a series of clocks that represent the PLL.
*
@@ -366,9 +366,9 @@ static struct notifier_block davinci_pllen_notifier = {
struct clk *davinci_pll_clk_register(struct device *dev,
const struct davinci_pll_clk_info *info,
const char *parent_name,
- void __iomem *base)
+ void __iomem *base,
+ struct regmap *cfgchip)
{
- struct davinci_pll_platform_data *pdata;
char prediv_name[MAX_NAME_SIZE];
char pllout_name[MAX_NAME_SIZE];
char postdiv_name[MAX_NAME_SIZE];
@@ -376,11 +376,12 @@ struct clk *davinci_pll_clk_register(struct device *dev,
struct clk_init_data init;
struct davinci_pll_clk *pllout;
struct davinci_pllen_clk *pllen;
- struct clk *pllout_clk, *clk;
-
- pdata = davinci_pll_get_pdata(dev);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
+ struct clk *oscin_clk = NULL;
+ struct clk *prediv_clk = NULL;
+ struct clk *pllout_clk;
+ struct clk *postdiv_clk = NULL;
+ struct clk *pllen_clk;
+ int ret;
if (info->flags & PLL_HAS_CLKMODE) {
/*
@@ -392,10 +393,10 @@ struct clk *davinci_pll_clk_register(struct device *dev,
* a number of different things. In this driver we use it to
* mean the signal after the PLLCTL[CLKMODE] switch.
*/
- clk = clk_register_fixed_factor(dev, OSCIN_CLK_NAME,
- parent_name, 0, 1, 1);
- if (IS_ERR(clk))
- return clk;
+ oscin_clk = clk_register_fixed_factor(dev, OSCIN_CLK_NAME,
+ parent_name, 0, 1, 1);
+ if (IS_ERR(oscin_clk))
+ return oscin_clk;
parent_name = OSCIN_CLK_NAME;
}
@@ -411,30 +412,34 @@ struct clk *davinci_pll_clk_register(struct device *dev,
/* Some? DM355 chips don't correctly report the PREDIV value */
if (info->flags & PLL_PREDIV_FIXED8)
- clk = clk_register_fixed_factor(dev, prediv_name,
- parent_name, flags, 1, 8);
+ prediv_clk = clk_register_fixed_factor(dev, prediv_name,
+ parent_name, flags, 1, 8);
else
- clk = davinci_pll_div_register(dev, prediv_name,
+ prediv_clk = davinci_pll_div_register(dev, prediv_name,
parent_name, base + PREDIV, fixed, flags);
- if (IS_ERR(clk))
- return clk;
+ if (IS_ERR(prediv_clk)) {
+ ret = PTR_ERR(prediv_clk);
+ goto err_unregister_oscin;
+ }
parent_name = prediv_name;
}
/* Unlock writing to PLL registers */
if (info->unlock_reg) {
- if (IS_ERR_OR_NULL(pdata->cfgchip))
+ if (IS_ERR_OR_NULL(cfgchip))
dev_warn(dev, "Failed to get CFGCHIP (%ld)\n",
- PTR_ERR(pdata->cfgchip));
+ PTR_ERR(cfgchip));
else
- regmap_write_bits(pdata->cfgchip, info->unlock_reg,
+ regmap_write_bits(cfgchip, info->unlock_reg,
info->unlock_mask, 0);
}
- pllout = devm_kzalloc(dev, sizeof(*pllout), GFP_KERNEL);
- if (!pllout)
- return ERR_PTR(-ENOMEM);
+ pllout = kzalloc(sizeof(*pllout), GFP_KERNEL);
+ if (!pllout) {
+ ret = -ENOMEM;
+ goto err_unregister_prediv;
+ }
snprintf(pllout_name, MAX_NAME_SIZE, "%s_pllout", info->name);
@@ -456,9 +461,11 @@ struct clk *davinci_pll_clk_register(struct device *dev,
pllout->pllm_min = info->pllm_min;
pllout->pllm_max = info->pllm_max;
- pllout_clk = devm_clk_register(dev, &pllout->hw);
- if (IS_ERR(pllout_clk))
- return pllout_clk;
+ pllout_clk = clk_register(dev, &pllout->hw);
+ if (IS_ERR(pllout_clk)) {
+ ret = PTR_ERR(pllout_clk);
+ goto err_free_pllout;
+ }
clk_hw_set_rate_range(&pllout->hw, info->pllout_min_rate,
info->pllout_max_rate);
@@ -474,17 +481,21 @@ struct clk *davinci_pll_clk_register(struct device *dev,
if (info->flags & PLL_POSTDIV_ALWAYS_ENABLED)
flags |= CLK_IS_CRITICAL;
- clk = davinci_pll_div_register(dev, postdiv_name, parent_name,
- base + POSTDIV, fixed, flags);
- if (IS_ERR(clk))
- return clk;
+ postdiv_clk = davinci_pll_div_register(dev, postdiv_name,
+ parent_name, base + POSTDIV, fixed, flags);
+ if (IS_ERR(postdiv_clk)) {
+ ret = PTR_ERR(postdiv_clk);
+ goto err_unregister_pllout;
+ }
parent_name = postdiv_name;
}
- pllen = devm_kzalloc(dev, sizeof(*pllout), GFP_KERNEL);
- if (!pllen)
- return ERR_PTR(-ENOMEM);
+ pllen = kzalloc(sizeof(*pllout), GFP_KERNEL);
+ if (!pllen) {
+ ret = -ENOMEM;
+ goto err_unregister_postdiv;
+ }
snprintf(pllen_name, MAX_NAME_SIZE, "%s_pllen", info->name);
@@ -497,17 +508,35 @@ struct clk *davinci_pll_clk_register(struct device *dev,
pllen->hw.init = &init;
pllen->base = base;
- clk = devm_clk_register(dev, &pllen->hw);
- if (IS_ERR(clk))
- return clk;
+ pllen_clk = clk_register(dev, &pllen->hw);
+ if (IS_ERR(pllen_clk)) {
+ ret = PTR_ERR(pllen_clk);
+ goto err_free_pllen;
+ }
- clk_notifier_register(clk, &davinci_pllen_notifier);
+ clk_notifier_register(pllen_clk, &davinci_pllen_notifier);
return pllout_clk;
+
+err_free_pllen:
+ kfree(pllen);
+err_unregister_postdiv:
+ clk_unregister(postdiv_clk);
+err_unregister_pllout:
+ clk_unregister(pllout_clk);
+err_free_pllout:
+ kfree(pllout);
+err_unregister_prediv:
+ clk_unregister(prediv_clk);
+err_unregister_oscin:
+ clk_unregister(oscin_clk);
+
+ return ERR_PTR(ret);
}
/**
* davinci_pll_auxclk_register - Register bypass clock (AUXCLK)
+ * @dev: The PLL platform device or NULL
* @name: The clock name
* @base: The PLL memory region
*/
@@ -521,6 +550,7 @@ struct clk *davinci_pll_auxclk_register(struct device *dev,
/**
* davinci_pll_sysclkbp_clk_register - Register bypass divider clock (SYSCLKBP)
+ * @dev: The PLL platform device or NULL
* @name: The clock name
* @base: The PLL memory region
*/
@@ -535,6 +565,7 @@ struct clk *davinci_pll_sysclkbp_clk_register(struct device *dev,
/**
* davinci_pll_obsclk_register - Register oscillator divider clock (OBSCLK)
+ * @dev: The PLL platform device or NULL
* @info: The clock info
* @base: The PLL memory region
*/
@@ -546,9 +577,11 @@ davinci_pll_obsclk_register(struct device *dev,
struct clk_mux *mux;
struct clk_gate *gate;
struct clk_divider *divider;
+ struct clk *clk;
u32 oscdiv;
+ int ret;
- mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
return ERR_PTR(-ENOMEM);
@@ -556,16 +589,20 @@ davinci_pll_obsclk_register(struct device *dev,
mux->table = info->table;
mux->mask = info->ocsrc_mask;
- gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
- if (!gate)
- return ERR_PTR(-ENOMEM);
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ ret = -ENOMEM;
+ goto err_free_mux;
+ }
gate->reg = base + CKEN;
gate->bit_idx = CKEN_OBSCLK_SHIFT;
- divider = devm_kzalloc(dev, sizeof(*divider), GFP_KERNEL);
- if (!divider)
- return ERR_PTR(-ENOMEM);
+ divider = kzalloc(sizeof(*divider), GFP_KERNEL);
+ if (!divider) {
+ ret = -ENOMEM;
+ goto err_free_gate;
+ }
divider->reg = base + OSCDIV;
divider->shift = DIV_RATIO_SHIFT;
@@ -576,11 +613,27 @@ davinci_pll_obsclk_register(struct device *dev,
oscdiv |= BIT(DIV_ENABLE_SHIFT);
writel(oscdiv, base + OSCDIV);
- return clk_register_composite(dev, info->name, info->parent_names,
- info->num_parents,
- &mux->hw, &clk_mux_ops,
- &divider->hw, &clk_divider_ops,
- &gate->hw, &clk_gate_ops, 0);
+ clk = clk_register_composite(dev, info->name, info->parent_names,
+ info->num_parents,
+ &mux->hw, &clk_mux_ops,
+ &divider->hw, &clk_divider_ops,
+ &gate->hw, &clk_gate_ops, 0);
+
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err_free_divider;
+ }
+
+ return clk;
+
+err_free_divider:
+ kfree(divider);
+err_free_gate:
+ kfree(gate);
+err_free_mux:
+ kfree(mux);
+
+ return ERR_PTR(ret);
}
/* The PLL SYSCLKn clocks have a mechanism for synchronizing rate changes. */
@@ -616,6 +669,7 @@ static struct notifier_block davinci_pll_sysclk_notifier = {
/**
* davinci_pll_sysclk_register - Register divider clocks (SYSCLKn)
+ * @dev: The PLL platform device or NULL
* @info: The clock info
* @base: The PLL memory region
*/
@@ -630,6 +684,7 @@ davinci_pll_sysclk_register(struct device *dev,
struct clk *clk;
u32 reg;
u32 flags = 0;
+ int ret;
/* PLLDIVn registers are not entirely consecutive */
if (info->id < 4)
@@ -637,16 +692,18 @@ davinci_pll_sysclk_register(struct device *dev,
else
reg = PLLDIV4 + 4 * (info->id - 4);
- gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
return ERR_PTR(-ENOMEM);
gate->reg = base + reg;
gate->bit_idx = DIV_ENABLE_SHIFT;
- divider = devm_kzalloc(dev, sizeof(*divider), GFP_KERNEL);
- if (!divider)
- return ERR_PTR(-ENOMEM);
+ divider = kzalloc(sizeof(*divider), GFP_KERNEL);
+ if (!divider) {
+ ret = -ENOMEM;
+ goto err_free_gate;
+ }
divider->reg = base + reg;
divider->shift = DIV_RATIO_SHIFT;
@@ -668,22 +725,31 @@ davinci_pll_sysclk_register(struct device *dev,
clk = clk_register_composite(dev, info->name, &info->parent_name, 1,
NULL, NULL, &divider->hw, divider_ops,
&gate->hw, &clk_gate_ops, flags);
- if (IS_ERR(clk))
- return clk;
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err_free_divider;
+ }
clk_notifier_register(clk, &davinci_pll_sysclk_notifier);
return clk;
+
+err_free_divider:
+ kfree(divider);
+err_free_gate:
+ kfree(gate);
+
+ return ERR_PTR(ret);
}
-int of_davinci_pll_init(struct device *dev,
+int of_davinci_pll_init(struct device *dev, struct device_node *node,
const struct davinci_pll_clk_info *info,
const struct davinci_pll_obsclk_info *obsclk_info,
const struct davinci_pll_sysclk_info **div_info,
u8 max_sysclk_id,
- void __iomem *base)
+ void __iomem *base,
+ struct regmap *cfgchip)
{
- struct device_node *node = dev->of_node;
struct device_node *child;
const char *parent_name;
struct clk *clk;
@@ -693,7 +759,7 @@ int of_davinci_pll_init(struct device *dev,
else
parent_name = OSCIN_CLK_NAME;
- clk = davinci_pll_clk_register(dev, info, parent_name, base);
+ clk = davinci_pll_clk_register(dev, info, parent_name, base, cfgchip);
if (IS_ERR(clk)) {
dev_err(dev, "failed to register %s\n", info->name);
return PTR_ERR(clk);
@@ -711,13 +777,15 @@ int of_davinci_pll_init(struct device *dev,
int n_clks = max_sysclk_id + 1;
int i;
- clk_data = devm_kzalloc(dev, sizeof(*clk_data), GFP_KERNEL);
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
- clks = devm_kmalloc_array(dev, n_clks, sizeof(*clks), GFP_KERNEL);
- if (!clks)
+ clks = kmalloc_array(n_clks, sizeof(*clks), GFP_KERNEL);
+ if (!clks) {
+ kfree(clk_data);
return -ENOMEM;
+ }
clk_data->clks = clks;
clk_data->clk_num = n_clks;
@@ -770,32 +838,73 @@ int of_davinci_pll_init(struct device *dev,
return 0;
}
+static struct davinci_pll_platform_data *davinci_pll_get_pdata(struct device *dev)
+{
+ struct davinci_pll_platform_data *pdata = dev_get_platdata(dev);
+
+ /*
+ * Platform data is optional, so allocate a new struct if one was not
+ * provided. For device tree, this will always be the case.
+ */
+ if (!pdata)
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ /* for device tree, we need to fill in the struct */
+ if (dev->of_node)
+ pdata->cfgchip =
+ syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
+
+ return pdata;
+}
+
+/* needed in early boot for clocksource/clockevent */
+#ifdef CONFIG_ARCH_DAVINCI_DA850
+CLK_OF_DECLARE(da850_pll0, "ti,da850-pll0", of_da850_pll0_init);
+#endif
+
static const struct of_device_id davinci_pll_of_match[] = {
- { .compatible = "ti,da850-pll0", .data = of_da850_pll0_init },
+#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .compatible = "ti,da850-pll1", .data = of_da850_pll1_init },
+#endif
{ }
};
static const struct platform_device_id davinci_pll_id_table[] = {
+#ifdef CONFIG_ARCH_DAVINCI_DA830
{ .name = "da830-pll", .driver_data = (kernel_ulong_t)da830_pll_init },
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .name = "da850-pll0", .driver_data = (kernel_ulong_t)da850_pll0_init },
{ .name = "da850-pll1", .driver_data = (kernel_ulong_t)da850_pll1_init },
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM355
{ .name = "dm355-pll1", .driver_data = (kernel_ulong_t)dm355_pll1_init },
{ .name = "dm355-pll2", .driver_data = (kernel_ulong_t)dm355_pll2_init },
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM365
{ .name = "dm365-pll1", .driver_data = (kernel_ulong_t)dm365_pll1_init },
{ .name = "dm365-pll2", .driver_data = (kernel_ulong_t)dm365_pll2_init },
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM644x
{ .name = "dm644x-pll1", .driver_data = (kernel_ulong_t)dm644x_pll1_init },
{ .name = "dm644x-pll2", .driver_data = (kernel_ulong_t)dm644x_pll2_init },
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM646x
{ .name = "dm646x-pll1", .driver_data = (kernel_ulong_t)dm646x_pll1_init },
{ .name = "dm646x-pll2", .driver_data = (kernel_ulong_t)dm646x_pll2_init },
+#endif
{ }
};
-typedef int (*davinci_pll_init)(struct device *dev, void __iomem *base);
+typedef int (*davinci_pll_init)(struct device *dev, void __iomem *base,
+ struct regmap *cfgchip);
static int davinci_pll_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct davinci_pll_platform_data *pdata;
const struct of_device_id *of_id;
davinci_pll_init pll_init = NULL;
struct resource *res;
@@ -812,12 +921,18 @@ static int davinci_pll_probe(struct platform_device *pdev)
return -EINVAL;
}
+ pdata = davinci_pll_get_pdata(dev);
+ if (!pdata) {
+ dev_err(dev, "missing platform data\n");
+ return -EINVAL;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
- return pll_init(dev, base);
+ return pll_init(dev, base, pdata->cfgchip);
}
static struct platform_driver davinci_pll_driver = {
@@ -874,26 +989,19 @@ static const struct debugfs_reg32 davinci_pll_regs[] = {
DEBUG_REG(PLLDIV9),
};
-static int davinci_pll_debug_init(struct clk_hw *hw, struct dentry *dentry)
+static void davinci_pll_debug_init(struct clk_hw *hw, struct dentry *dentry)
{
struct davinci_pll_clk *pll = to_davinci_pll_clk(hw);
struct debugfs_regset32 *regset;
- struct dentry *d;
regset = kzalloc(sizeof(*regset), GFP_KERNEL);
if (!regset)
- return -ENOMEM;
+ return;
regset->regs = davinci_pll_regs;
regset->nregs = ARRAY_SIZE(davinci_pll_regs);
regset->base = pll->base;
- d = debugfs_create_regset32("registers", 0400, dentry, regset);
- if (IS_ERR(d)) {
- kfree(regset);
- return PTR_ERR(d);
- }
-
- return 0;
+ debugfs_create_regset32("registers", 0400, dentry, regset);
}
#endif
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
index b1b6fb23f972..7cc354dd29e2 100644
--- a/drivers/clk/davinci/pll.h
+++ b/drivers/clk/davinci/pll.h
@@ -11,6 +11,7 @@
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
+#include <linux/regmap.h>
#include <linux/types.h>
#define PLL_HAS_CLKMODE BIT(0) /* PLL has PLLCTL[CLKMODE] */
@@ -94,7 +95,8 @@ struct davinci_pll_obsclk_info {
struct clk *davinci_pll_clk_register(struct device *dev,
const struct davinci_pll_clk_info *info,
const char *parent_name,
- void __iomem *base);
+ void __iomem *base,
+ struct regmap *cfgchip);
struct clk *davinci_pll_auxclk_register(struct device *dev,
const char *name,
void __iomem *base);
@@ -110,32 +112,29 @@ davinci_pll_sysclk_register(struct device *dev,
const struct davinci_pll_sysclk_info *info,
void __iomem *base);
-int of_davinci_pll_init(struct device *dev,
+int of_davinci_pll_init(struct device *dev, struct device_node *node,
const struct davinci_pll_clk_info *info,
const struct davinci_pll_obsclk_info *obsclk_info,
const struct davinci_pll_sysclk_info **div_info,
u8 max_sysclk_id,
- void __iomem *base);
+ void __iomem *base,
+ struct regmap *cfgchip);
/* Platform-specific callbacks */
-int da830_pll_init(struct device *dev, void __iomem *base);
-
-int da850_pll0_init(struct device *dev, void __iomem *base);
-int da850_pll1_init(struct device *dev, void __iomem *base);
-int of_da850_pll0_init(struct device *dev, void __iomem *base);
-int of_da850_pll1_init(struct device *dev, void __iomem *base);
-
-int dm355_pll1_init(struct device *dev, void __iomem *base);
-int dm355_pll2_init(struct device *dev, void __iomem *base);
-
-int dm365_pll1_init(struct device *dev, void __iomem *base);
-int dm365_pll2_init(struct device *dev, void __iomem *base);
-
-int dm644x_pll1_init(struct device *dev, void __iomem *base);
-int dm644x_pll2_init(struct device *dev, void __iomem *base);
-
-int dm646x_pll1_init(struct device *dev, void __iomem *base);
-int dm646x_pll2_init(struct device *dev, void __iomem *base);
+#ifdef CONFIG_ARCH_DAVINCI_DA850
+int da850_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+void of_da850_pll0_init(struct device_node *node);
+int of_da850_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM355
+int dm355_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM644x
+int dm644x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM646x
+int dm646x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+#endif
#endif /* __CLK_DAVINCI_PLL_H___ */
diff --git a/drivers/clk/davinci/psc-da830.c b/drivers/clk/davinci/psc-da830.c
index f61abf5632ff..081b039fcb02 100644
--- a/drivers/clk/davinci/psc-da830.c
+++ b/drivers/clk/davinci/psc-da830.c
@@ -55,7 +55,8 @@ const struct davinci_psc_init_data da830_psc0_init_data = {
.psc_init = &da830_psc0_init,
};
-LPSC_CLKDEV2(usb0_clkdev, NULL, "musb-da8xx",
+LPSC_CLKDEV3(usb0_clkdev, "fck", "da830-usb-phy-clks",
+ NULL, "musb-da8xx",
NULL, "cppi41-dmaengine");
LPSC_CLKDEV1(usb1_clkdev, NULL, "ohci-da8xx");
/* REVISIT: gpio-davinci.c should be modified to drop con_id */
diff --git a/drivers/clk/davinci/psc-dm355.c b/drivers/clk/davinci/psc-dm355.c
index 6995ecea2677..ddd250107c4e 100644
--- a/drivers/clk/davinci/psc-dm355.c
+++ b/drivers/clk/davinci/psc-dm355.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/init.h>
@@ -41,14 +42,14 @@ static const struct davinci_lpsc_clk_info dm355_psc_info[] = {
LPSC(5, 0, timer3, pll1_auxclk, NULL, 0),
LPSC(6, 0, spi1, pll1_sysclk2, spi1_clkdev, 0),
LPSC(7, 0, mmcsd1, pll1_sysclk2, mmcsd1_clkdev, 0),
- LPSC(8, 0, asp1, pll1_sysclk2, NULL, 0),
+ LPSC(8, 0, asp1, pll1_sysclk2, mcbsp1_clkdev, 0),
LPSC(9, 0, usb, pll1_sysclk2, usb_clkdev, 0),
LPSC(10, 0, pwm3, pll1_auxclk, NULL, 0),
LPSC(11, 0, spi2, pll1_sysclk2, spi2_clkdev, 0),
LPSC(12, 0, rto, pll1_auxclk, NULL, 0),
LPSC(14, 0, aemif, pll1_sysclk2, aemif_clkdev, 0),
LPSC(15, 0, mmcsd0, pll1_sysclk2, mmcsd0_clkdev, 0),
- LPSC(17, 0, asp0, pll1_sysclk2, NULL, 0),
+ LPSC(17, 0, asp0, pll1_sysclk2, mcbsp0_clkdev, 0),
LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
LPSC(20, 0, uart1, pll1_auxclk, uart1_clkdev, 0),
@@ -68,7 +69,7 @@ static const struct davinci_lpsc_clk_info dm355_psc_info[] = {
{ }
};
-static int dm355_psc_init(struct device *dev, void __iomem *base)
+int dm355_psc_init(struct device *dev, void __iomem *base)
{
return davinci_psc_register_clocks(dev, dm355_psc_info, 42, base);
}
diff --git a/drivers/clk/davinci/psc-dm365.c b/drivers/clk/davinci/psc-dm365.c
index 3ad915f37376..8c73086cc676 100644
--- a/drivers/clk/davinci/psc-dm365.c
+++ b/drivers/clk/davinci/psc-dm365.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/init.h>
@@ -65,15 +66,28 @@ static const struct davinci_lpsc_clk_info dm365_psc_info[] = {
LPSC(31, 0, arm, pll2_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
LPSC(38, 0, spi3, pll1_sysclk4, spi3_clkdev, 0),
LPSC(39, 0, spi4, pll1_auxclk, spi4_clkdev, 0),
- LPSC(40, 0, emac, pll2_sysclk4, emac_clkdev, 0),
- LPSC(44, 1, voice_codec, pll1_sysclk3, voice_codec_clkdev, 0),
- LPSC(46, 1, vpss_dac, pll1_sysclk3, vpss_dac_clkdev, 0),
+ LPSC(40, 0, emac, pll1_sysclk4, emac_clkdev, 0),
+ /*
+ * The TRM (ARM Subsystem User's Guide) shows two clocks input into
+ * voice codec module (PLL2 SYSCLK4 with a DIV2 and PLL1 SYSCLK4). Its
+ * not fully clear from documentation which clock should be considered
+ * as parent for PSC. The clock chosen here is to maintain
+ * compatibility with existing code in arch/arm/mach-davinci/dm365.c
+ */
+ LPSC(44, 0, voice_codec, pll2_sysclk4, voice_codec_clkdev, 0),
+ /*
+ * Its not fully clear from TRM (ARM Subsystem User's Guide) as to what
+ * the parent of VPSS DAC LPSC should actually be. PLL1 SYSCLK3 feeds
+ * into HDVICP and MJCP. The clock chosen here is to remain compatible
+ * with code existing in arch/arm/mach-davinci/dm365.c
+ */
+ LPSC(46, 0, vpss_dac, pll1_sysclk3, vpss_dac_clkdev, 0),
LPSC(47, 0, vpss_master, pll1_sysclk5, vpss_master_clkdev, 0),
LPSC(50, 0, mjcp, pll1_sysclk3, NULL, 0),
{ }
};
-static int dm365_psc_init(struct device *dev, void __iomem *base)
+int dm365_psc_init(struct device *dev, void __iomem *base)
{
return davinci_psc_register_clocks(dev, dm365_psc_info, 52, base);
}
diff --git a/drivers/clk/davinci/psc-dm644x.c b/drivers/clk/davinci/psc-dm644x.c
index c22367baa46f..fc0230e3a3d6 100644
--- a/drivers/clk/davinci/psc-dm644x.c
+++ b/drivers/clk/davinci/psc-dm644x.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/init.h>
@@ -63,7 +64,7 @@ static const struct davinci_lpsc_clk_info dm644x_psc_info[] = {
{ }
};
-static int dm644x_psc_init(struct device *dev, void __iomem *base)
+int dm644x_psc_init(struct device *dev, void __iomem *base)
{
return davinci_psc_register_clocks(dev, dm644x_psc_info, 41, base);
}
diff --git a/drivers/clk/davinci/psc-dm646x.c b/drivers/clk/davinci/psc-dm646x.c
index 468ef86ea40b..c3f82ed70a80 100644
--- a/drivers/clk/davinci/psc-dm646x.c
+++ b/drivers/clk/davinci/psc-dm646x.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/init.h>
@@ -58,7 +59,7 @@ static const struct davinci_lpsc_clk_info dm646x_psc_info[] = {
{ }
};
-static int dm646x_psc_init(struct device *dev, void __iomem *base)
+int dm646x_psc_init(struct device *dev, void __iomem *base)
{
return davinci_psc_register_clocks(dev, dm646x_psc_info, 46, base);
}
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index ce170e600f09..fffbed5e263b 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -15,6 +15,7 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
+#include <linux/clk/davinci.h>
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/of_address.h>
@@ -63,7 +64,7 @@ struct davinci_psc_data {
/**
* struct davinci_lpsc_clk - LPSC clock structure
- * @dev: the device that provides this LPSC
+ * @dev: the device that provides this LPSC or NULL
* @hw: clk_hw for the LPSC
* @pm_domain: power domain for the LPSC
* @genpd_clk: clock reference owned by @pm_domain
@@ -221,6 +222,7 @@ static void davinci_psc_genpd_detach_dev(struct generic_pm_domain *pm_domain,
/**
* davinci_lpsc_clk_register - register LPSC clock
+ * @dev: the clocks's device or NULL
* @name: name of this clock
* @parent_name: name of clock's parent
* @regmap: PSC MMIO region
@@ -238,7 +240,7 @@ davinci_lpsc_clk_register(struct device *dev, const char *name,
int ret;
bool is_on;
- lpsc = devm_kzalloc(dev, sizeof(*lpsc), GFP_KERNEL);
+ lpsc = kzalloc(sizeof(*lpsc), GFP_KERNEL);
if (!lpsc)
return ERR_PTR(-ENOMEM);
@@ -261,9 +263,15 @@ davinci_lpsc_clk_register(struct device *dev, const char *name,
lpsc->pd = pd;
lpsc->flags = flags;
- ret = devm_clk_hw_register(dev, &lpsc->hw);
- if (ret < 0)
+ ret = clk_hw_register(dev, &lpsc->hw);
+ if (ret < 0) {
+ kfree(lpsc);
return ERR_PTR(ret);
+ }
+
+ /* for now, genpd is only registered when using device-tree */
+ if (!dev || !dev->of_node)
+ return lpsc;
/* genpd attach needs a way to look up this clock */
ret = clk_hw_register_clkdev(&lpsc->hw, name, best_dev_name(dev));
@@ -378,13 +386,15 @@ __davinci_psc_register_clocks(struct device *dev,
struct regmap *regmap;
int i, ret;
- psc = devm_kzalloc(dev, sizeof(*psc), GFP_KERNEL);
+ psc = kzalloc(sizeof(*psc), GFP_KERNEL);
if (!psc)
return ERR_PTR(-ENOMEM);
- clks = devm_kmalloc_array(dev, num_clks, sizeof(*clks), GFP_KERNEL);
- if (!clks)
- return ERR_PTR(-ENOMEM);
+ clks = kmalloc_array(num_clks, sizeof(*clks), GFP_KERNEL);
+ if (!clks) {
+ ret = -ENOMEM;
+ goto err_free_psc;
+ }
psc->clk_data.clks = clks;
psc->clk_data.clk_num = num_clks;
@@ -396,16 +406,20 @@ __davinci_psc_register_clocks(struct device *dev,
for (i = 0; i < num_clks; i++)
clks[i] = ERR_PTR(-ENOENT);
- pm_domains = devm_kcalloc(dev, num_clks, sizeof(*pm_domains), GFP_KERNEL);
- if (!pm_domains)
- return ERR_PTR(-ENOMEM);
+ pm_domains = kcalloc(num_clks, sizeof(*pm_domains), GFP_KERNEL);
+ if (!pm_domains) {
+ ret = -ENOMEM;
+ goto err_free_clks;
+ }
psc->pm_data.domains = pm_domains;
psc->pm_data.num_domains = num_clks;
- regmap = devm_regmap_init_mmio(dev, base, &davinci_psc_regmap_config);
- if (IS_ERR(regmap))
- return ERR_CAST(regmap);
+ regmap = regmap_init_mmio(dev, base, &davinci_psc_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_free_pm_domains;
+ }
for (; info->name; info++) {
struct davinci_lpsc_clk *lpsc;
@@ -423,6 +437,13 @@ __davinci_psc_register_clocks(struct device *dev,
pm_domains[info->md] = &lpsc->pm_domain;
}
+ /*
+ * for now, a reset controller is only registered when there is a device
+ * to associate it with.
+ */
+ if (!dev)
+ return psc;
+
psc->rcdev.ops = &davinci_psc_reset_ops;
psc->rcdev.owner = THIS_MODULE;
psc->rcdev.dev = dev;
@@ -436,6 +457,15 @@ __davinci_psc_register_clocks(struct device *dev,
dev_warn(dev, "Failed to register reset controller (%d)\n", ret);
return psc;
+
+err_free_pm_domains:
+ kfree(pm_domains);
+err_free_clks:
+ kfree(clks);
+err_free_psc:
+ kfree(psc);
+
+ return ERR_PTR(ret);
}
int davinci_psc_register_clocks(struct device *dev,
@@ -483,20 +513,34 @@ int of_davinci_psc_clk_init(struct device *dev,
}
static const struct of_device_id davinci_psc_of_match[] = {
+#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .compatible = "ti,da850-psc0", .data = &of_da850_psc0_init_data },
{ .compatible = "ti,da850-psc1", .data = &of_da850_psc1_init_data },
+#endif
{ }
};
static const struct platform_device_id davinci_psc_id_table[] = {
+#ifdef CONFIG_ARCH_DAVINCI_DA830
{ .name = "da830-psc0", .driver_data = (kernel_ulong_t)&da830_psc0_init_data },
{ .name = "da830-psc1", .driver_data = (kernel_ulong_t)&da830_psc1_init_data },
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .name = "da850-psc0", .driver_data = (kernel_ulong_t)&da850_psc0_init_data },
{ .name = "da850-psc1", .driver_data = (kernel_ulong_t)&da850_psc1_init_data },
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM355
{ .name = "dm355-psc", .driver_data = (kernel_ulong_t)&dm355_psc_init_data },
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM365
{ .name = "dm365-psc", .driver_data = (kernel_ulong_t)&dm365_psc_init_data },
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM644x
{ .name = "dm644x-psc", .driver_data = (kernel_ulong_t)&dm644x_psc_init_data },
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM646x
{ .name = "dm646x-psc", .driver_data = (kernel_ulong_t)&dm646x_psc_init_data },
+#endif
{ }
};
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index c2a7df6413fe..6a42529d31a9 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -94,15 +94,27 @@ struct davinci_psc_init_data {
int (*psc_init)(struct device *dev, void __iomem *base);
};
+#ifdef CONFIG_ARCH_DAVINCI_DA830
extern const struct davinci_psc_init_data da830_psc0_init_data;
extern const struct davinci_psc_init_data da830_psc1_init_data;
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DA850
extern const struct davinci_psc_init_data da850_psc0_init_data;
extern const struct davinci_psc_init_data da850_psc1_init_data;
extern const struct davinci_psc_init_data of_da850_psc0_init_data;
extern const struct davinci_psc_init_data of_da850_psc1_init_data;
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM355
extern const struct davinci_psc_init_data dm355_psc_init_data;
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM356
extern const struct davinci_psc_init_data dm365_psc_init_data;
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM644x
extern const struct davinci_psc_init_data dm644x_psc_init_data;
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM646x
extern const struct davinci_psc_init_data dm646x_psc_init_data;
+#endif
#endif /* __CLK_DAVINCI_PSC_H__ */
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index 1bd43550e4c8..becdb1dd21b5 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -44,14 +44,17 @@ config RESET_HISI
Build reset controller driver for HiSilicon device chipsets.
config STUB_CLK_HI6220
- bool "Hi6220 Stub Clock Driver"
- depends on COMMON_CLK_HI6220 && MAILBOX
- default ARCH_HISI
+ bool "Hi6220 Stub Clock Driver" if EXPERT
+ depends on (COMMON_CLK_HI6220 || COMPILE_TEST)
+ depends on MAILBOX
+ default COMMON_CLK_HI6220
help
Build the Hisilicon Hi6220 stub clock driver.
config STUB_CLK_HI3660
- bool "Hi3660 Stub Clock Driver"
- depends on COMMON_CLK_HI3660 && MAILBOX
+ bool "Hi3660 Stub Clock Driver" if EXPERT
+ depends on (COMMON_CLK_HI3660 || COMPILE_TEST)
+ depends on MAILBOX
+ default COMMON_CLK_HI3660
help
Build the Hisilicon Hi3660 stub clock driver.
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
index 743eec131528..4fe0b2a9baf1 100644
--- a/drivers/clk/hisilicon/crg-hi3798cv200.c
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -186,6 +186,23 @@ static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
CLK_SET_RATE_PARENT, 0xbc, 0, 0 },
{ HISTB_USB2_PHY2_REF_CLK, "clk_u2_phy2_ref", "24m",
CLK_SET_RATE_PARENT, 0xbc, 2, 0 },
+ /* USB3 */
+ { HISTB_USB3_BUS_CLK, "clk_u3_bus", NULL,
+ CLK_SET_RATE_PARENT, 0xb0, 0, 0 },
+ { HISTB_USB3_UTMI_CLK, "clk_u3_utmi", NULL,
+ CLK_SET_RATE_PARENT, 0xb0, 4, 0 },
+ { HISTB_USB3_PIPE_CLK, "clk_u3_pipe", NULL,
+ CLK_SET_RATE_PARENT, 0xb0, 3, 0 },
+ { HISTB_USB3_SUSPEND_CLK, "clk_u3_suspend", NULL,
+ CLK_SET_RATE_PARENT, 0xb0, 2, 0 },
+ { HISTB_USB3_BUS_CLK1, "clk_u3_bus1", NULL,
+ CLK_SET_RATE_PARENT, 0xb0, 16, 0 },
+ { HISTB_USB3_UTMI_CLK1, "clk_u3_utmi1", NULL,
+ CLK_SET_RATE_PARENT, 0xb0, 20, 0 },
+ { HISTB_USB3_PIPE_CLK1, "clk_u3_pipe1", NULL,
+ CLK_SET_RATE_PARENT, 0xb0, 19, 0 },
+ { HISTB_USB3_SUSPEND_CLK1, "clk_u3_suspend1", NULL,
+ CLK_SET_RATE_PARENT, 0xb0, 18, 0 },
};
static struct hisi_clock_data *hi3798cv200_clk_register(
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 8d518ad5dc13..b9ea7037e193 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -753,6 +753,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
else
clk[IMX6Q_CLK_ECSPI5] = imx_clk_gate2("ecspi5", "ecspi_root", base + 0x6c, 8);
clk[IMX6QDL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10);
+ clk[IMX6QDL_CLK_EPIT1] = imx_clk_gate2("epit1", "ipg", base + 0x6c, 12);
+ clk[IMX6QDL_CLK_EPIT2] = imx_clk_gate2("epit2", "ipg", base + 0x6c, 14);
clk[IMX6QDL_CLK_ESAI_EXTAL] = imx_clk_gate2_shared("esai_extal", "esai_podf", base + 0x6c, 16, &share_count_esai);
clk[IMX6QDL_CLK_ESAI_IPG] = imx_clk_gate2_shared("esai_ipg", "ahb", base + 0x6c, 16, &share_count_esai);
clk[IMX6QDL_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai);
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 9642cdf0fb88..66b1dd1cfad0 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -330,7 +330,7 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
clks[IMX6SL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
/* name parent_name reg shift width */
- clks[IMX6SL_CLK_OCRAM_PODF] = imx_clk_divider("ocram_podf", "ocram_sel", base + 0x14, 16, 3);
+ clks[IMX6SL_CLK_OCRAM_PODF] = imx_clk_busy_divider("ocram_podf", "ocram_sel", base + 0x14, 16, 3, base + 0x48, 0);
clks[IMX6SL_CLK_PERIPH_CLK2_PODF] = imx_clk_divider("periph_clk2_podf", "periph_clk2_sel", base + 0x14, 27, 3);
clks[IMX6SL_CLK_PERIPH2_CLK2_PODF] = imx_clk_divider("periph2_clk2_podf", "periph2_clk2_sel", base + 0x14, 0, 3);
clks[IMX6SL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index bc3f9ebf2d9e..10c771b91ef6 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -80,7 +80,7 @@ static const char *lvds_sels[] = {
"arm", "pll1_sys", "dummy", "dummy", "dummy", "dummy", "dummy", "pll5_video_div",
"dummy", "dummy", "pcie_ref_125m", "dummy", "usbphy1", "usbphy2",
};
-static const char *pll_bypass_src_sels[] = { "osc", "lvds1_in", };
+static const char *pll_bypass_src_sels[] = { "osc", "lvds1_in", "lvds2_in", "dummy", };
static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", };
static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", };
static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", };
@@ -97,12 +97,7 @@ static int const clks_init_on[] __initconst = {
IMX6SX_CLK_IPMUX1, IMX6SX_CLK_IPMUX2, IMX6SX_CLK_IPMUX3,
IMX6SX_CLK_WAKEUP, IMX6SX_CLK_MMDC_P0_FAST, IMX6SX_CLK_MMDC_P0_IPG,
IMX6SX_CLK_ROM, IMX6SX_CLK_ARM, IMX6SX_CLK_IPG, IMX6SX_CLK_OCRAM,
- IMX6SX_CLK_PER2_MAIN, IMX6SX_CLK_PERCLK, IMX6SX_CLK_M4,
- IMX6SX_CLK_QSPI1, IMX6SX_CLK_QSPI2, IMX6SX_CLK_UART_IPG,
- IMX6SX_CLK_UART_SERIAL, IMX6SX_CLK_I2C3, IMX6SX_CLK_ECSPI5,
- IMX6SX_CLK_CAN1_IPG, IMX6SX_CLK_CAN1_SERIAL, IMX6SX_CLK_CAN2_IPG,
- IMX6SX_CLK_CAN2_SERIAL, IMX6SX_CLK_CANFD, IMX6SX_CLK_EPIT1,
- IMX6SX_CLK_EPIT2,
+ IMX6SX_CLK_PER2_MAIN, IMX6SX_CLK_PERCLK, IMX6SX_CLK_TZASC1,
};
static const struct clk_div_table clk_enet_ref_table[] = {
@@ -158,8 +153,9 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_IPP_DI0] = of_clk_get_by_name(ccm_node, "ipp_di0");
clks[IMX6SX_CLK_IPP_DI1] = of_clk_get_by_name(ccm_node, "ipp_di1");
- /* Clock source from external clock via CLK1 PAD */
- clks[IMX6SX_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0);
+ /* Clock source from external clock via CLK1/2 PAD */
+ clks[IMX6SX_CLK_ANACLK1] = of_clk_get_by_name(ccm_node, "anaclk1");
+ clks[IMX6SX_CLK_ANACLK2] = of_clk_get_by_name(ccm_node, "anaclk2");
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop");
base = of_iomap(np, 0);
@@ -228,7 +224,9 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_PCIE_REF_125M] = imx_clk_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19);
clks[IMX6SX_CLK_LVDS1_OUT] = imx_clk_gate_exclusive("lvds1_out", "lvds1_sel", base + 0x160, 10, BIT(12));
+ clks[IMX6SX_CLK_LVDS2_OUT] = imx_clk_gate_exclusive("lvds2_out", "lvds2_sel", base + 0x160, 11, BIT(13));
clks[IMX6SX_CLK_LVDS1_IN] = imx_clk_gate_exclusive("lvds1_in", "anaclk1", base + 0x160, 12, BIT(10));
+ clks[IMX6SX_CLK_LVDS2_IN] = imx_clk_gate_exclusive("lvds2_in", "anaclk2", base + 0x160, 13, BIT(11));
clks[IMX6SX_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0,
base + 0xe0, 0, 2, 0, clk_enet_ref_table,
@@ -270,6 +268,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
/* name reg shift width parent_names num_parents */
clks[IMX6SX_CLK_LVDS1_SEL] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
+ clks[IMX6SX_CLK_LVDS2_SEL] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
np = ccm_node;
base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 12320118f8de..ba563ba50b40 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -40,7 +40,7 @@ static const char *axi_alt_sels[] = { "pll2_pfd2_396m", "pll3_pfd1_540m", };
static const char *axi_sels[] = {"periph", "axi_alt_sel", };
static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
static const char *periph2_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll4_audio_div", };
-static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", };
+static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "pll2_bypass_src", };
static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "osc", };
static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
@@ -68,6 +68,13 @@ static const char *sim_sels[] = { "sim_podf", "ipp_di0", "ipp_di1", "ldb_di0", "
static const char *epdc_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd2_508m", };
static const char *esai_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll5_video_div", "pll3_usb_otg", };
static const char *epdc_sels[] = { "epdc_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
+static const char *cko1_sels[] = { "dummy", "dummy", "dummy", "dummy", "dummy", "axi", "enfc", "dummy", "dummy",
+ "dummy", "lcdif_pix", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio_div", };
+static const char *cko2_sels[] = { "dummy", "dummy", "dummy", "usdhc1", "dummy", "dummy", "ecspi_root", "dummy",
+ "dummy", "dummy", "dummy", "dummy", "dummy", "dummy", "osc", "dummy",
+ "dummy", "usdhc2", "sai1", "sai2", "sai3", "dummy", "dummy", "can_root",
+ "dummy", "dummy", "dummy", "dummy", "uart_serial", "spdif", "dummy", "dummy", };
+static const char *cko_sels[] = { "cko1", "cko2", };
static struct clk *clks[IMX6UL_CLK_END];
static struct clk_onecell_data clk_data;
@@ -273,6 +280,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
clks[IMX6UL_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
+ clks[IMX6UL_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
+ clks[IMX6UL_CLK_CKO2_SEL] = imx_clk_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels));
+ clks[IMX6UL_CLK_CKO] = imx_clk_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels));
+
clks[IMX6UL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
clks[IMX6UL_CLK_LDB_DI0_DIV_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7);
clks[IMX6UL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "qspi1_sel", 2, 7);
@@ -316,6 +327,9 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_LCDIF_PRED] = imx_clk_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3);
clks[IMX6UL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3);
+ clks[IMX6UL_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3);
+ clks[IMX6UL_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3);
+
clks[IMX6UL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
clks[IMX6UL_CLK_MMDC_PODF] = imx_clk_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
clks[IMX6UL_CLK_AXI_PODF] = imx_clk_busy_divider("axi_podf", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
@@ -445,6 +459,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_PWM6] = imx_clk_gate2("pwm6", "perclk", base + 0x80, 28);
clks[IMX6UL_CLK_PWM7] = imx_clk_gate2("pwm7", "perclk", base + 0x80, 30);
+ /* CCOSR */
+ clks[IMX6UL_CLK_CKO1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7);
+ clks[IMX6UL_CLK_CKO2] = imx_clk_gate("cko2", "cko2_podf", base + 0x60, 24);
+
/* mask handshake of mmdc */
writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 975a20d3cc94..27217a7ea17e 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -26,6 +26,8 @@ static u32 share_count_sai1;
static u32 share_count_sai2;
static u32 share_count_sai3;
static u32 share_count_nand;
+static u32 share_count_enet1;
+static u32 share_count_enet2;
static const struct clk_div_table test_div_table[] = {
{ .val = 3, .div = 1, },
@@ -729,7 +731,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_LCDIF_PIXEL_ROOT_DIV] = imx_clk_divider2("lcdif_pixel_post_div", "lcdif_pixel_pre_div", base + 0xa300, 0, 6);
clks[IMX7D_MIPI_DSI_ROOT_DIV] = imx_clk_divider2("mipi_dsi_post_div", "mipi_dsi_pre_div", base + 0xa380, 0, 6);
clks[IMX7D_MIPI_CSI_ROOT_DIV] = imx_clk_divider2("mipi_csi_post_div", "mipi_csi_pre_div", base + 0xa400, 0, 6);
- clks[IMX7D_MIPI_DPHY_ROOT_DIV] = imx_clk_divider2("mipi_dphy_post_div", "mipi_csi_dphy_div", base + 0xa480, 0, 6);
+ clks[IMX7D_MIPI_DPHY_ROOT_DIV] = imx_clk_divider2("mipi_dphy_post_div", "mipi_dphy_pre_div", base + 0xa480, 0, 6);
clks[IMX7D_SAI1_ROOT_DIV] = imx_clk_divider2("sai1_post_div", "sai1_pre_div", base + 0xa500, 0, 6);
clks[IMX7D_SAI2_ROOT_DIV] = imx_clk_divider2("sai2_post_div", "sai2_pre_div", base + 0xa580, 0, 6);
clks[IMX7D_SAI3_ROOT_DIV] = imx_clk_divider2("sai3_post_div", "sai3_pre_div", base + 0xa600, 0, 6);
@@ -738,7 +740,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ENET1_TIME_ROOT_DIV] = imx_clk_divider2("enet1_time_post_div", "enet1_time_pre_div", base + 0xa780, 0, 6);
clks[IMX7D_ENET2_REF_ROOT_DIV] = imx_clk_divider2("enet2_ref_post_div", "enet2_ref_pre_div", base + 0xa800, 0, 6);
clks[IMX7D_ENET2_TIME_ROOT_DIV] = imx_clk_divider2("enet2_time_post_div", "enet2_time_pre_div", base + 0xa880, 0, 6);
- clks[IMX7D_ENET_PHY_REF_ROOT_DIV] = imx_clk_divider2("enet_phy_ref_post_div", "enet_phy_ref_pre_div", base + 0xa900, 0, 6);
+ clks[IMX7D_ENET_PHY_REF_ROOT_CLK] = imx_clk_divider2("enet_phy_ref_root_clk", "enet_phy_ref_pre_div", base + 0xa900, 0, 6);
clks[IMX7D_EIM_ROOT_DIV] = imx_clk_divider2("eim_post_div", "eim_pre_div", base + 0xa980, 0, 6);
clks[IMX7D_NAND_ROOT_CLK] = imx_clk_divider2("nand_root_clk", "nand_pre_div", base + 0xaa00, 0, 6);
clks[IMX7D_QSPI_ROOT_DIV] = imx_clk_divider2("qspi_post_div", "qspi_pre_div", base + 0xaa80, 0, 6);
@@ -805,6 +807,10 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_gate4("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0);
clks[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_gate4("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0);
clks[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_gate4("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0);
+ clks[IMX7D_ENET1_IPG_ROOT_CLK] = imx_clk_gate2_shared2("enet1_ipg_root_clk", "enet_axi_post_div", base + 0x4700, 0, &share_count_enet1);
+ clks[IMX7D_ENET1_TIME_ROOT_CLK] = imx_clk_gate2_shared2("enet1_time_root_clk", "enet1_time_post_div", base + 0x4700, 0, &share_count_enet1);
+ clks[IMX7D_ENET2_IPG_ROOT_CLK] = imx_clk_gate2_shared2("enet2_ipg_root_clk", "enet_axi_post_div", base + 0x4710, 0, &share_count_enet2);
+ clks[IMX7D_ENET2_TIME_ROOT_CLK] = imx_clk_gate2_shared2("enet2_time_root_clk", "enet2_time_post_div", base + 0x4710, 0, &share_count_enet2);
clks[IMX7D_SAI1_ROOT_CLK] = imx_clk_gate2_shared2("sai1_root_clk", "sai1_post_div", base + 0x48c0, 0, &share_count_sai1);
clks[IMX7D_SAI1_IPG_CLK] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_root_clk", base + 0x48c0, 0, &share_count_sai1);
clks[IMX7D_SAI2_ROOT_CLK] = imx_clk_gate2_shared2("sai2_root_clk", "sai2_post_div", base + 0x48d0, 0, &share_count_sai2);
@@ -812,11 +818,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_SAI3_ROOT_CLK] = imx_clk_gate2_shared2("sai3_root_clk", "sai3_post_div", base + 0x48e0, 0, &share_count_sai3);
clks[IMX7D_SAI3_IPG_CLK] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_root_clk", base + 0x48e0, 0, &share_count_sai3);
clks[IMX7D_SPDIF_ROOT_CLK] = imx_clk_gate4("spdif_root_clk", "spdif_post_div", base + 0x44d0, 0);
- clks[IMX7D_ENET1_REF_ROOT_CLK] = imx_clk_gate4("enet1_ref_root_clk", "enet1_ref_post_div", base + 0x44e0, 0);
- clks[IMX7D_ENET1_TIME_ROOT_CLK] = imx_clk_gate4("enet1_time_root_clk", "enet1_time_post_div", base + 0x44f0, 0);
- clks[IMX7D_ENET2_REF_ROOT_CLK] = imx_clk_gate4("enet2_ref_root_clk", "enet2_ref_post_div", base + 0x4500, 0);
- clks[IMX7D_ENET2_TIME_ROOT_CLK] = imx_clk_gate4("enet2_time_root_clk", "enet2_time_post_div", base + 0x4510, 0);
- clks[IMX7D_ENET_PHY_REF_ROOT_CLK] = imx_clk_gate4("enet_phy_ref_root_clk", "enet_phy_ref_post_div", base + 0x4520, 0);
clks[IMX7D_EIM_ROOT_CLK] = imx_clk_gate4("eim_root_clk", "eim_post_div", base + 0x4160, 0);
clks[IMX7D_NAND_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_rawnand_clk", "nand_root_clk", base + 0x4140, 0, &share_count_nand);
clks[IMX7D_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_root_clk", base + 0x4140, 0, &share_count_nand);
@@ -891,6 +892,8 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clk_set_parent(clks[IMX7D_PLL_AUDIO_MAIN_BYPASS], clks[IMX7D_PLL_AUDIO_MAIN]);
clk_set_parent(clks[IMX7D_PLL_VIDEO_MAIN_BYPASS], clks[IMX7D_PLL_VIDEO_MAIN]);
+ clk_set_parent(clks[IMX7D_MIPI_CSI_ROOT_SRC], clks[IMX7D_PLL_SYS_PFD3_CLK]);
+
/* use old gpt clk setting, gpt1 root clk must be twice as gpt counter freq */
clk_set_parent(clks[IMX7D_GPT1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 56a712c9075f..5ef7d9ba2195 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -43,7 +43,8 @@ static inline bool
ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
const struct ingenic_cgu_gate_info *info)
{
- return readl(cgu->base + info->reg) & BIT(info->bit);
+ return !!(readl(cgu->base + info->reg) & BIT(info->bit))
+ ^ info->clear_to_gate;
}
/**
@@ -62,7 +63,7 @@ ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
{
u32 clkgr = readl(cgu->base + info->reg);
- if (val)
+ if (val ^ info->clear_to_gate)
clkgr |= BIT(info->bit);
else
clkgr &= ~BIT(info->bit);
@@ -511,6 +512,9 @@ static int ingenic_clk_enable(struct clk_hw *hw)
spin_lock_irqsave(&cgu->lock, flags);
ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
spin_unlock_irqrestore(&cgu->lock, flags);
+
+ if (clk_info->gate.delay_us)
+ udelay(clk_info->gate.delay_us);
}
return 0;
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 9da34910bd80..542192376ebf 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -111,10 +111,14 @@ struct ingenic_cgu_fixdiv_info {
* struct ingenic_cgu_gate_info - information about a clock gate
* @reg: offset of the gate control register within the CGU
* @bit: offset of the bit in the register that controls the gate
+ * @clear_to_gate: if set, the clock is gated when the bit is cleared
+ * @delay_us: delay in microseconds after which the clock is considered stable
*/
struct ingenic_cgu_gate_info {
unsigned reg;
u8 bit;
+ bool clear_to_gate;
+ u16 delay_us;
};
/**
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index c78d369b9403..bf46a0df2004 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -42,7 +42,6 @@
/* bits within the OPCR register */
#define OPCR_SPENDH BIT(5) /* UHC PHY suspend */
-#define OPCR_SPENDN BIT(7) /* OTG PHY suspend */
/* bits within the USBPCR1 register */
#define USBPCR1_UHC_POWER BIT(5) /* UHC PHY power down */
@@ -83,37 +82,6 @@ static const struct clk_ops jz4770_uhc_phy_ops = {
.is_enabled = jz4770_uhc_phy_is_enabled,
};
-static int jz4770_otg_phy_enable(struct clk_hw *hw)
-{
- void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
-
- writel(readl(reg_opcr) | OPCR_SPENDN, reg_opcr);
-
- /* Wait for the clock to be stable */
- udelay(50);
- return 0;
-}
-
-static void jz4770_otg_phy_disable(struct clk_hw *hw)
-{
- void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
-
- writel(readl(reg_opcr) & ~OPCR_SPENDN, reg_opcr);
-}
-
-static int jz4770_otg_phy_is_enabled(struct clk_hw *hw)
-{
- void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
-
- return !!(readl(reg_opcr) & OPCR_SPENDN);
-}
-
-static const struct clk_ops jz4770_otg_phy_ops = {
- .enable = jz4770_otg_phy_enable,
- .disable = jz4770_otg_phy_disable,
- .is_enabled = jz4770_otg_phy_is_enabled,
-};
-
static const s8 pll_od_encoding[8] = {
0x0, 0x1, -1, 0x2, -1, -1, -1, 0x3,
};
@@ -186,7 +154,7 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
"h1clk", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4770_CLK_PLL0, },
.div = { CGU_REG_CPCCR, 24, 1, 4, 22, -1, -1 },
- .gate = { CGU_REG_LCR, 30 },
+ .gate = { CGU_REG_CLKGR1, 7 },
},
[JZ4770_CLK_H2CLK] = {
"h2clk", CGU_CLK_DIV,
@@ -194,9 +162,10 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.div = { CGU_REG_CPCCR, 16, 1, 4, 22, -1, -1 },
},
[JZ4770_CLK_C1CLK] = {
- "c1clk", CGU_CLK_DIV,
+ "c1clk", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4770_CLK_PLL0, },
.div = { CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1 },
+ .gate = { CGU_REG_OPCR, 31, true }, // disable CCLK stop on idle
},
[JZ4770_CLK_PCLK] = {
"pclk", CGU_CLK_DIV,
@@ -393,7 +362,7 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
[JZ4770_CLK_VPU] = {
"vpu", CGU_CLK_GATE,
.parents = { JZ4770_CLK_H1CLK, },
- .gate = { CGU_REG_CLKGR1, 7 },
+ .gate = { CGU_REG_LCR, 30, false, 150 },
},
[JZ4770_CLK_MMC0] = {
"mmc0", CGU_CLK_GATE,
@@ -410,6 +379,11 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.parents = { JZ4770_CLK_MMC2_MUX, },
.gate = { CGU_REG_CLKGR0, 12 },
},
+ [JZ4770_CLK_OTG_PHY] = {
+ "usb_phy", CGU_CLK_GATE,
+ .parents = { JZ4770_CLK_OTG },
+ .gate = { CGU_REG_OPCR, 7, true, 50 },
+ },
/* Custom clocks */
@@ -418,11 +392,6 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.parents = { JZ4770_CLK_UHC, -1, -1, -1 },
.custom = { &jz4770_uhc_phy_ops },
},
- [JZ4770_CLK_OTG_PHY] = {
- "usb_phy", CGU_CLK_CUSTOM,
- .parents = { JZ4770_CLK_OTG, -1, -1, -1 },
- .custom = { &jz4770_otg_phy_ops },
- },
[JZ4770_CLK_EXT512] = {
"ext/512", CGU_CLK_FIXDIV,
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 92afe5989e97..3dd1dab92223 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -60,6 +60,12 @@ config COMMON_CLK_MT2701_AUDSYS
---help---
This driver supports Mediatek MT2701 audsys clocks.
+config COMMON_CLK_MT2701_G3DSYS
+ bool "Clock driver for MediaTek MT2701 g3dsys"
+ depends on COMMON_CLK_MT2701
+ ---help---
+ This driver supports MediaTek MT2701 g3dsys clocks.
+
config COMMON_CLK_MT2712
bool "Clock driver for MediaTek MT2712"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index b80eff2abb31..844b55d2770d 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_COMMON_CLK_MT2701) += clk-mt2701.o
obj-$(CONFIG_COMMON_CLK_MT2701_AUDSYS) += clk-mt2701-aud.o
obj-$(CONFIG_COMMON_CLK_MT2701_BDPSYS) += clk-mt2701-bdp.o
obj-$(CONFIG_COMMON_CLK_MT2701_ETHSYS) += clk-mt2701-eth.o
+obj-$(CONFIG_COMMON_CLK_MT2701_G3DSYS) += clk-mt2701-g3d.o
obj-$(CONFIG_COMMON_CLK_MT2701_HIFSYS) += clk-mt2701-hif.o
obj-$(CONFIG_COMMON_CLK_MT2701_IMGSYS) += clk-mt2701-img.o
obj-$(CONFIG_COMMON_CLK_MT2701_MMSYS) += clk-mt2701-mm.o
diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
new file mode 100644
index 000000000000..1328c112a38f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+#define GATE_G3D(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &g3d_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate_regs g3d_cg_regs = {
+ .sta_ofs = 0x0,
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+};
+
+static const struct mtk_gate g3d_clks[] = {
+ GATE_G3D(CLK_G3DSYS_CORE, "g3d_core", "mfg_sel", 0),
+};
+
+static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_G3DSYS_NR);
+
+ mtk_clk_register_gates(node, g3d_clks, ARRAY_SIZE(g3d_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ mtk_register_reset_controller(node, 1, 0xc);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2701_g3d[] = {
+ {
+ .compatible = "mediatek,mt2701-g3dsys",
+ .data = clk_mt2701_g3dsys_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt2701_g3d_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_g3d_drv = {
+ .probe = clk_mt2701_g3d_probe,
+ .driver = {
+ .name = "clk-mt2701-g3d",
+ .of_match_table = of_match_clk_mt2701_g3d,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_g3d_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index deca7527f92f..4dda8988b2f0 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -46,8 +46,6 @@ static const struct mtk_fixed_clk top_fixed_clks[] = {
340 * MHZ),
FIXED_CLK(CLK_TOP_HDMI_0_PLL340M, "hdmi_0_pll340m", "clk26m",
340 * MHZ),
- FIXED_CLK(CLK_TOP_HDMITX_CLKDIG_CTS, "hdmitx_dig_cts", "clk26m",
- 300 * MHZ),
FIXED_CLK(CLK_TOP_HADDS2_FB, "hadds2_fbclk", "clk26m",
27 * MHZ),
FIXED_CLK(CLK_TOP_WBG_DIG_416M, "wbg_dig_ck_416m", "clk26m",
@@ -977,6 +975,10 @@ static const struct mtk_pll_data apmixed_plls[] = {
21, 0x2d0, 4, 0x0, 0x2d4, 0),
};
+static const struct mtk_fixed_factor apmixed_fixed_divs[] = {
+ FACTOR(CLK_APMIXED_HDMI_REF, "hdmi_ref", "tvdpll", 1, 1),
+};
+
static int mtk_apmixedsys_init(struct platform_device *pdev)
{
struct clk_onecell_data *clk_data;
@@ -988,6 +990,8 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
mtk_clk_register_plls(node, apmixed_plls, ARRAY_SIZE(apmixed_plls),
clk_data);
+ mtk_clk_register_factors(apmixed_fixed_divs, ARRAY_SIZE(apmixed_fixed_divs),
+ clk_data);
return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index d5cbec522aec..815659eebea3 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -3,6 +3,12 @@ config COMMON_CLK_AMLOGIC
depends on OF
depends on ARCH_MESON || COMPILE_TEST
+config COMMON_CLK_MESON_AO
+ bool
+ depends on OF
+ depends on ARCH_MESON || COMPILE_TEST
+ select COMMON_CLK_REGMAP_MESON
+
config COMMON_CLK_REGMAP_MESON
bool
select REGMAP
@@ -21,6 +27,7 @@ config COMMON_CLK_GXBB
bool
depends on COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
+ select COMMON_CLK_MESON_AO
select COMMON_CLK_REGMAP_MESON
select MFD_SYSCON
help
@@ -31,6 +38,7 @@ config COMMON_CLK_AXG
bool
depends on COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
+ select COMMON_CLK_MESON_AO
select COMMON_CLK_REGMAP_MESON
select MFD_SYSCON
help
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index ffee82e60b7a..d0d13aeb369a 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -3,7 +3,8 @@
#
obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-audio-divider.o
+obj-$(CONFIG_COMMON_CLK_MESON_AO) += meson-aoclk.o
obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-32k.o
-obj-$(CONFIG_COMMON_CLK_AXG) += axg.o
+obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
obj-$(CONFIG_COMMON_CLK_REGMAP_MESON) += clk-regmap.o
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
new file mode 100644
index 000000000000..29e088542387
--- /dev/null
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Meson-AXG Clock Controller Driver
+ *
+ * Copyright (c) 2016 Baylibre SAS.
+ * Author: Michael Turquette <mturquette@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ */
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/mfd/syscon.h>
+#include "clk-regmap.h"
+#include "meson-aoclk.h"
+#include "axg-aoclk.h"
+
+#define AXG_AO_GATE(_name, _bit) \
+static struct clk_regmap axg_aoclk_##_name = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = (AO_RTI_GEN_CNTL_REG0), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_ao_" #_name, \
+ .ops = &clk_regmap_gate_ops, \
+ .parent_names = (const char *[]){ "clk81" }, \
+ .num_parents = 1, \
+ .flags = CLK_IGNORE_UNUSED, \
+ }, \
+}
+
+AXG_AO_GATE(remote, 0);
+AXG_AO_GATE(i2c_master, 1);
+AXG_AO_GATE(i2c_slave, 2);
+AXG_AO_GATE(uart1, 3);
+AXG_AO_GATE(uart2, 5);
+AXG_AO_GATE(ir_blaster, 6);
+AXG_AO_GATE(saradc, 7);
+
+static struct clk_regmap axg_aoclk_clk81 = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .mask = 0x1,
+ .shift = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_clk81",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "clk81", "ao_alt_xtal"},
+ .num_parents = 2,
+ },
+};
+
+static struct clk_regmap axg_aoclk_saradc_mux = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_SAR_CLK,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_saradc_mux",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "xtal", "axg_ao_clk81" },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_regmap axg_aoclk_saradc_div = {
+ .data = &(struct clk_regmap_div_data) {
+ .offset = AO_SAR_CLK,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_saradc_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "axg_ao_saradc_mux" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_aoclk_saradc_gate = {
+ .data = &(struct clk_regmap_gate_data) {
+ .offset = AO_SAR_CLK,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_saradc_gate",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "axg_ao_saradc_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const unsigned int axg_aoclk_reset[] = {
+ [RESET_AO_REMOTE] = 16,
+ [RESET_AO_I2C_MASTER] = 18,
+ [RESET_AO_I2C_SLAVE] = 19,
+ [RESET_AO_UART1] = 17,
+ [RESET_AO_UART2] = 22,
+ [RESET_AO_IR_BLASTER] = 23,
+};
+
+static struct clk_regmap *axg_aoclk_regmap[] = {
+ [CLKID_AO_REMOTE] = &axg_aoclk_remote,
+ [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master,
+ [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave,
+ [CLKID_AO_UART1] = &axg_aoclk_uart1,
+ [CLKID_AO_UART2] = &axg_aoclk_uart2,
+ [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster,
+ [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc,
+ [CLKID_AO_CLK81] = &axg_aoclk_clk81,
+ [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux,
+ [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div,
+ [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate,
+};
+
+static const struct clk_hw_onecell_data axg_aoclk_onecell_data = {
+ .hws = {
+ [CLKID_AO_REMOTE] = &axg_aoclk_remote.hw,
+ [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master.hw,
+ [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave.hw,
+ [CLKID_AO_UART1] = &axg_aoclk_uart1.hw,
+ [CLKID_AO_UART2] = &axg_aoclk_uart2.hw,
+ [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster.hw,
+ [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc.hw,
+ [CLKID_AO_CLK81] = &axg_aoclk_clk81.hw,
+ [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw,
+ [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw,
+ [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw,
+ },
+ .num = NR_CLKS,
+};
+
+static const struct meson_aoclk_data axg_aoclkc_data = {
+ .reset_reg = AO_RTI_GEN_CNTL_REG0,
+ .num_reset = ARRAY_SIZE(axg_aoclk_reset),
+ .reset = axg_aoclk_reset,
+ .num_clks = ARRAY_SIZE(axg_aoclk_regmap),
+ .clks = axg_aoclk_regmap,
+ .hw_data = &axg_aoclk_onecell_data,
+};
+
+static const struct of_device_id axg_aoclkc_match_table[] = {
+ {
+ .compatible = "amlogic,meson-axg-aoclkc",
+ .data = &axg_aoclkc_data,
+ },
+ { }
+};
+
+static struct platform_driver axg_aoclkc_driver = {
+ .probe = meson_aoclkc_probe,
+ .driver = {
+ .name = "axg-aoclkc",
+ .of_match_table = axg_aoclkc_match_table,
+ },
+};
+
+builtin_platform_driver(axg_aoclkc_driver);
diff --git a/drivers/clk/meson/axg-aoclk.h b/drivers/clk/meson/axg-aoclk.h
new file mode 100644
index 000000000000..91384d8dd844
--- /dev/null
+++ b/drivers/clk/meson/axg-aoclk.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ */
+
+#ifndef __AXG_AOCLKC_H
+#define __AXG_AOCLKC_H
+
+#define NR_CLKS 11
+/* AO Configuration Clock registers offsets
+ * Register offsets from the data sheet must be multiplied by 4.
+ */
+#define AO_RTI_PWR_CNTL_REG1 0x0C
+#define AO_RTI_PWR_CNTL_REG0 0x10
+#define AO_RTI_GEN_CNTL_REG0 0x40
+#define AO_OSCIN_CNTL 0x58
+#define AO_CRT_CLK_CNTL1 0x68
+#define AO_SAR_CLK 0x90
+#define AO_RTC_ALT_CLK_CNTL0 0x94
+#define AO_RTC_ALT_CLK_CNTL1 0x98
+
+#include <dt-bindings/clock/axg-aoclkc.h>
+#include <dt-bindings/reset/axg-aoclkc.h>
+
+#endif /* __AXG_AOCLKC_H */
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 5f5d468c1efe..bd4dbc696b88 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -461,6 +461,7 @@ static struct clk_regmap axg_mpll0_div = {
.width = 1,
},
.lock = &meson_clk_lock,
+ .flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "mpll0_div",
@@ -507,6 +508,7 @@ static struct clk_regmap axg_mpll1_div = {
.width = 1,
},
.lock = &meson_clk_lock,
+ .flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "mpll1_div",
@@ -553,6 +555,7 @@ static struct clk_regmap axg_mpll2_div = {
.width = 1,
},
.lock = &meson_clk_lock,
+ .flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "mpll2_div",
@@ -599,6 +602,7 @@ static struct clk_regmap axg_mpll3_div = {
.width = 1,
},
.lock = &meson_clk_lock,
+ .flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "mpll3_div",
diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c
index f7ab5b1db342..58f546e04807 100644
--- a/drivers/clk/meson/clk-audio-divider.c
+++ b/drivers/clk/meson/clk-audio-divider.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 AmLogic, Inc.
* Author: Jerome Brunet <jbrunet@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 0df1227b65b3..650f75cc15a9 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -1,57 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright (c) 2016 AmLogic, Inc.
- * Author: Michael Turquette <mturquette@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING
- *
- * BSD LICENSE
- *
* Copyright (c) 2016 AmLogic, Inc.
* Author: Michael Turquette <mturquette@baylibre.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
@@ -89,10 +39,23 @@ static long rate_from_params(unsigned long parent_rate,
static void params_from_rate(unsigned long requested_rate,
unsigned long parent_rate,
unsigned int *sdm,
- unsigned int *n2)
+ unsigned int *n2,
+ u8 flags)
{
uint64_t div = parent_rate;
- unsigned long rem = do_div(div, requested_rate);
+ uint64_t frac = do_div(div, requested_rate);
+
+ frac *= SDM_DEN;
+
+ if (flags & CLK_MESON_MPLL_ROUND_CLOSEST)
+ *sdm = DIV_ROUND_CLOSEST_ULL(frac, requested_rate);
+ else
+ *sdm = DIV_ROUND_UP_ULL(frac, requested_rate);
+
+ if (*sdm == SDM_DEN) {
+ *sdm = 0;
+ div += 1;
+ }
if (div < N2_MIN) {
*n2 = N2_MIN;
@@ -102,7 +65,6 @@ static void params_from_rate(unsigned long requested_rate,
*sdm = SDM_DEN - 1;
} else {
*n2 = div;
- *sdm = DIV_ROUND_UP_ULL((u64)rem * SDM_DEN, requested_rate);
}
}
@@ -125,9 +87,11 @@ static long mpll_round_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long *parent_rate)
{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
unsigned int sdm, n2;
- params_from_rate(rate, *parent_rate, &sdm, &n2);
+ params_from_rate(rate, *parent_rate, &sdm, &n2, mpll->flags);
return rate_from_params(*parent_rate, sdm, n2);
}
@@ -140,7 +104,7 @@ static int mpll_set_rate(struct clk_hw *hw,
unsigned int sdm, n2;
unsigned long flags = 0;
- params_from_rate(rate, parent_rate, &sdm, &n2);
+ params_from_rate(rate, parent_rate, &sdm, &n2, mpll->flags);
if (mpll->lock)
spin_lock_irqsave(mpll->lock, flags);
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index 65a7bd903551..3e04617ac47f 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015 Endless Mobile, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
*
* Copyright (c) 2018 Baylibre, SAS.
* Author: Jerome Brunet <jbrunet@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index ab7a3556f5b2..305ee307c003 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018 BayLibre, SAS.
-// Author: Jerome Brunet <jbrunet@baylibre.com>
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
#include "clk-regmap.h"
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index 627c888026d7..ed2d4348dbe2 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -1,6 +1,8 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018 BayLibre, SAS.
-// Author: Jerome Brunet <jbrunet@baylibre.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
#ifndef __CLK_REGMAP_H
#define __CLK_REGMAP_H
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index 8fe73c4edca8..2fb084330ee9 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015 Endless Mobile, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __CLKC_H
@@ -97,8 +86,11 @@ struct meson_clk_mpll_data {
struct parm ssen;
struct parm misc;
spinlock_t *lock;
+ u8 flags;
};
+#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
+
struct meson_clk_audio_div_data {
struct parm div;
u8 flags;
diff --git a/drivers/clk/meson/gxbb-aoclk-32k.c b/drivers/clk/meson/gxbb-aoclk-32k.c
index 491634dbc985..680467141a1d 100644
--- a/drivers/clk/meson/gxbb-aoclk-32k.c
+++ b/drivers/clk/meson/gxbb-aoclk-32k.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2017 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * SPDX-License-Identifier: GPL-2.0+
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 9ec23ae9a219..42ed61d3c3fb 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -1,90 +1,14 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * BSD LICENSE
- *
- * Copyright (c) 2016 BayLibre, SAS.
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/clk-provider.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
-#include <linux/reset-controller.h>
#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <dt-bindings/clock/gxbb-aoclkc.h>
-#include <dt-bindings/reset/gxbb-aoclkc.h>
#include "clk-regmap.h"
+#include "meson-aoclk.h"
#include "gxbb-aoclk.h"
-struct gxbb_aoclk_reset_controller {
- struct reset_controller_dev reset;
- unsigned int *data;
- struct regmap *regmap;
-};
-
-static int gxbb_aoclk_do_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct gxbb_aoclk_reset_controller *reset =
- container_of(rcdev, struct gxbb_aoclk_reset_controller, reset);
-
- return regmap_write(reset->regmap, AO_RTI_GEN_CNTL_REG0,
- BIT(reset->data[id]));
-}
-
-static const struct reset_control_ops gxbb_aoclk_reset_ops = {
- .reset = gxbb_aoclk_do_reset,
-};
-
#define GXBB_AO_GATE(_name, _bit) \
static struct clk_regmap _name##_ao = { \
.data = &(struct clk_regmap_gate_data) { \
@@ -96,7 +20,7 @@ static struct clk_regmap _name##_ao = { \
.ops = &clk_regmap_gate_ops, \
.parent_names = (const char *[]){ "clk81" }, \
.num_parents = 1, \
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
+ .flags = CLK_IGNORE_UNUSED, \
}, \
}
@@ -117,7 +41,7 @@ static struct aoclk_cec_32k cec_32k_ao = {
},
};
-static unsigned int gxbb_aoclk_reset[] = {
+static const unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_REMOTE] = 16,
[RESET_AO_I2C_MASTER] = 18,
[RESET_AO_I2C_SLAVE] = 19,
@@ -135,7 +59,7 @@ static struct clk_regmap *gxbb_aoclk_gate[] = {
[CLKID_AO_IR_BLASTER] = &ir_blaster_ao,
};
-static struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
+static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
.hws = {
[CLKID_AO_REMOTE] = &remote_ao.hw,
[CLKID_AO_I2C_MASTER] = &i2c_master_ao.hw,
@@ -145,58 +69,55 @@ static struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
[CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw,
[CLKID_AO_CEC_32K] = &cec_32k_ao.hw,
},
- .num = 7,
+ .num = NR_CLKS,
};
-static int gxbb_aoclkc_probe(struct platform_device *pdev)
+static int gxbb_register_cec_ao_32k(struct platform_device *pdev)
{
- struct gxbb_aoclk_reset_controller *rstc;
struct device *dev = &pdev->dev;
struct regmap *regmap;
- int ret, clkid;
-
- rstc = devm_kzalloc(dev, sizeof(*rstc), GFP_KERNEL);
- if (!rstc)
- return -ENOMEM;
+ int ret;
regmap = syscon_node_to_regmap(of_get_parent(dev->of_node));
if (IS_ERR(regmap)) {
dev_err(dev, "failed to get regmap\n");
- return -ENODEV;
- }
-
- /* Reset Controller */
- rstc->regmap = regmap;
- rstc->data = gxbb_aoclk_reset;
- rstc->reset.ops = &gxbb_aoclk_reset_ops;
- rstc->reset.nr_resets = ARRAY_SIZE(gxbb_aoclk_reset);
- rstc->reset.of_node = dev->of_node;
- ret = devm_reset_controller_register(dev, &rstc->reset);
-
- /*
- * Populate regmap and register all clks
- */
- for (clkid = 0; clkid < ARRAY_SIZE(gxbb_aoclk_gate); clkid++) {
- gxbb_aoclk_gate[clkid]->map = regmap;
-
- ret = devm_clk_hw_register(dev,
- gxbb_aoclk_onecell_data.hws[clkid]);
- if (ret)
- return ret;
+ return PTR_ERR(regmap);
}
/* Specific clocks */
cec_32k_ao.regmap = regmap;
ret = devm_clk_hw_register(dev, &cec_32k_ao.hw);
+ if (ret) {
+ dev_err(&pdev->dev, "clk cec_32k_ao register failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct meson_aoclk_data gxbb_aoclkc_data = {
+ .reset_reg = AO_RTI_GEN_CNTL_REG0,
+ .num_reset = ARRAY_SIZE(gxbb_aoclk_reset),
+ .reset = gxbb_aoclk_reset,
+ .num_clks = ARRAY_SIZE(gxbb_aoclk_gate),
+ .clks = gxbb_aoclk_gate,
+ .hw_data = &gxbb_aoclk_onecell_data,
+};
+
+static int gxbb_aoclkc_probe(struct platform_device *pdev)
+{
+ int ret = gxbb_register_cec_ao_32k(pdev);
if (ret)
return ret;
- return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
- &gxbb_aoclk_onecell_data);
+ return meson_aoclkc_probe(pdev);
}
static const struct of_device_id gxbb_aoclkc_match_table[] = {
- { .compatible = "amlogic,meson-gx-aoclkc" },
+ {
+ .compatible = "amlogic,meson-gx-aoclkc",
+ .data = &gxbb_aoclkc_data,
+ },
{ }
};
diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h
index badc4c22b4ee..c514493d989a 100644
--- a/drivers/clk/meson/gxbb-aoclk.h
+++ b/drivers/clk/meson/gxbb-aoclk.h
@@ -1,13 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2017 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * SPDX-License-Identifier: GPL-2.0+
*/
#ifndef __GXBB_AOCLKC_H
#define __GXBB_AOCLKC_H
+#define NR_CLKS 7
+
/* AO Configuration Clock registers offsets */
#define AO_RTI_PWR_CNTL_REG1 0x0c
#define AO_RTI_PWR_CNTL_REG0 0x10
@@ -26,4 +27,7 @@ struct aoclk_cec_32k {
extern const struct clk_ops meson_aoclk_cec_32k_ops;
+#include <dt-bindings/clock/gxbb-aoclkc.h>
+#include <dt-bindings/reset/gxbb-aoclkc.h>
+
#endif /* __GXBB_AOCLKC_H */
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index b1e4d9557610..240658404367 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * AmLogic S905 / GXBB Clock Controller Driver
- *
* Copyright (c) 2016 AmLogic, Inc.
* Michael Turquette <mturquette@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
@@ -1543,6 +1530,102 @@ static struct clk_regmap gxbb_vapb = {
},
};
+/* VDEC clocks */
+
+static const char * const gxbb_vdec_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+};
+
+static struct clk_regmap gxbb_vdec_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = gxbb_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxbb_vdec_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxbb_vdec_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxbb_vdec_hevc_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = gxbb_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxbb_vdec_hevc_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_hevc_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxbb_vdec_hevc = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_hevc",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_hevc_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
@@ -1786,6 +1869,12 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_FCLK_DIV4_DIV] = &gxbb_fclk_div4_div.hw,
[CLKID_FCLK_DIV5_DIV] = &gxbb_fclk_div5_div.hw,
[CLKID_FCLK_DIV7_DIV] = &gxbb_fclk_div7_div.hw,
+ [CLKID_VDEC_1_SEL] = &gxbb_vdec_1_sel.hw,
+ [CLKID_VDEC_1_DIV] = &gxbb_vdec_1_div.hw,
+ [CLKID_VDEC_1] = &gxbb_vdec_1.hw,
+ [CLKID_VDEC_HEVC_SEL] = &gxbb_vdec_hevc_sel.hw,
+ [CLKID_VDEC_HEVC_DIV] = &gxbb_vdec_hevc_div.hw,
+ [CLKID_VDEC_HEVC] = &gxbb_vdec_hevc.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -1942,6 +2031,12 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_FCLK_DIV4_DIV] = &gxbb_fclk_div4_div.hw,
[CLKID_FCLK_DIV5_DIV] = &gxbb_fclk_div5_div.hw,
[CLKID_FCLK_DIV7_DIV] = &gxbb_fclk_div7_div.hw,
+ [CLKID_VDEC_1_SEL] = &gxbb_vdec_1_sel.hw,
+ [CLKID_VDEC_1_DIV] = &gxbb_vdec_1_div.hw,
+ [CLKID_VDEC_1] = &gxbb_vdec_1.hw,
+ [CLKID_VDEC_HEVC_SEL] = &gxbb_vdec_hevc_sel.hw,
+ [CLKID_VDEC_HEVC_DIV] = &gxbb_vdec_hevc_div.hw,
+ [CLKID_VDEC_HEVC] = &gxbb_vdec_hevc.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -2100,6 +2195,12 @@ static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_fclk_div4,
&gxbb_fclk_div5,
&gxbb_fclk_div7,
+ &gxbb_vdec_1_sel,
+ &gxbb_vdec_1_div,
+ &gxbb_vdec_1,
+ &gxbb_vdec_hevc_sel,
+ &gxbb_vdec_hevc_div,
+ &gxbb_vdec_hevc,
};
struct clkc_data {
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index 9febf3f03739..ec1a812bf1fd 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -1,57 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright (c) 2016 AmLogic, Inc.
* Author: Michael Turquette <mturquette@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING
- *
- * BSD LICENSE
- *
- * Copyright (c) 2016 BayLibre, Inc.
- * Author: Michael Turquette <mturquette@baylibre.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __GXBB_H
@@ -204,8 +154,12 @@
#define CLKID_FCLK_DIV4_DIV 148
#define CLKID_FCLK_DIV5_DIV 149
#define CLKID_FCLK_DIV7_DIV 150
+#define CLKID_VDEC_1_SEL 151
+#define CLKID_VDEC_1_DIV 152
+#define CLKID_VDEC_HEVC_SEL 154
+#define CLKID_VDEC_HEVC_DIV 155
-#define NR_CLKS 151
+#define NR_CLKS 157
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
new file mode 100644
index 000000000000..f965845917e3
--- /dev/null
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Meson-AXG Clock Controller Driver
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ * Author: Yixun Lan <yixun.lan@amlogic.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include "clk-regmap.h"
+#include "meson-aoclk.h"
+
+static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct meson_aoclk_reset_controller *rstc =
+ container_of(rcdev, struct meson_aoclk_reset_controller, reset);
+
+ return regmap_write(rstc->regmap, rstc->data->reset_reg,
+ BIT(rstc->data->reset[id]));
+}
+
+static const struct reset_control_ops meson_aoclk_reset_ops = {
+ .reset = meson_aoclk_do_reset,
+};
+
+int meson_aoclkc_probe(struct platform_device *pdev)
+{
+ struct meson_aoclk_reset_controller *rstc;
+ struct meson_aoclk_data *data;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int ret, clkid;
+
+ data = (struct meson_aoclk_data *) of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ rstc = devm_kzalloc(dev, sizeof(*rstc), GFP_KERNEL);
+ if (!rstc)
+ return -ENOMEM;
+
+ regmap = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to get regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Reset Controller */
+ rstc->data = data;
+ rstc->regmap = regmap;
+ rstc->reset.ops = &meson_aoclk_reset_ops;
+ rstc->reset.nr_resets = data->num_reset,
+ rstc->reset.of_node = dev->of_node;
+ ret = devm_reset_controller_register(dev, &rstc->reset);
+ if (ret) {
+ dev_err(dev, "failed to register reset controller\n");
+ return ret;
+ }
+
+ /*
+ * Populate regmap and register all clks
+ */
+ for (clkid = 0; clkid < data->num_clks; clkid++) {
+ data->clks[clkid]->map = regmap;
+
+ ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
+ if (ret)
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ (void *) data->hw_data);
+}
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h
new file mode 100644
index 000000000000..ab2819e88922
--- /dev/null
+++ b/drivers/clk/meson/meson-aoclk.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ * Author: Yixun Lan <yixun.lan@amlogic.com>
+ */
+
+#ifndef __MESON_AOCLK_H__
+#define __MESON_AOCLK_H__
+
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include "clk-regmap.h"
+
+struct meson_aoclk_data {
+ const unsigned int reset_reg;
+ const int num_reset;
+ const unsigned int *reset;
+ int num_clks;
+ struct clk_regmap **clks;
+ const struct clk_hw_onecell_data *hw_data;
+};
+
+struct meson_aoclk_reset_controller {
+ struct reset_controller_dev reset;
+ const struct meson_aoclk_data *data;
+ struct regmap *regmap;
+};
+
+int meson_aoclkc_probe(struct platform_device *pdev);
+#endif
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index d0524ec71aad..7447d96a265f 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * AmLogic S802 (Meson8) / S805 (Meson8b) / S812 (Meson8m2) Clock Controller
- * Driver
- *
* Copyright (c) 2015 Endless Mobile, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
*
* Copyright (c) 2016 BayLibre, Inc.
* Michael Turquette <mturquette@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
@@ -246,6 +232,13 @@ static struct clk_regmap meson8b_fclk_div2 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div2_div" },
.num_parents = 1,
+ /*
+ * FIXME: Ethernet with a RGMII PHYs is not working if
+ * fclk_div2 is disabled. it is currently unclear why this
+ * is. keep it enabled until the Ethernet driver knows how
+ * to manage this clock.
+ */
+ .flags = CLK_IS_CRITICAL,
},
};
@@ -640,6 +633,54 @@ static struct clk_regmap meson8b_cpu_clk = {
},
};
+static struct clk_regmap meson8b_nand_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "nand_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ /* FIXME all other parents are unknown: */
+ .parent_names = (const char *[]){ "fclk_div4", "fclk_div3",
+ "fclk_div5", "fclk_div7", "xtal" },
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_nand_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "nand_clk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "nand_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_nand_clk_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "nand_clk_gate",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "nand_clk_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
@@ -835,6 +876,9 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_FCLK_DIV4_DIV] = &meson8b_fclk_div4_div.hw,
[CLKID_FCLK_DIV5_DIV] = &meson8b_fclk_div5_div.hw,
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
+ [CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
+ [CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -940,6 +984,9 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_fclk_div4,
&meson8b_fclk_div5,
&meson8b_fclk_div7,
+ &meson8b_nand_clk_sel,
+ &meson8b_nand_clk_div,
+ &meson8b_nand_clk_gate,
};
static const struct meson8b_clk_reset_line {
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index 6e414bd36981..5d09412b5084 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -1,21 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015 Endless Mobile, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
*
* Copyright (c) 2016 BayLibre, Inc.
* Michael Turquette <mturquette@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MESON8B_H
@@ -40,6 +29,7 @@
#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
+#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */
#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
#define HHI_VID_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
@@ -83,8 +73,10 @@
#define CLKID_FCLK_DIV4_DIV 107
#define CLKID_FCLK_DIV5_DIV 108
#define CLKID_FCLK_DIV7_DIV 109
+#define CLKID_NAND_SEL 110
+#define CLKID_NAND_DIV 111
-#define CLK_NR_CLKS 110
+#define CLK_NR_CLKS 113
/*
* include the CLKID and RESETID that have
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 8491979f4096..68f05c53d40e 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -72,7 +72,7 @@ static const struct clk_corediv_desc mvebu_corediv_desc[] = {
};
static const struct clk_corediv_desc mv98dx3236_corediv_desc[] = {
- { .mask = 0x0f, .offset = 6, .fieldbit = 26 }, /* NAND clock */
+ { .mask = 0x0f, .offset = 6, .fieldbit = 27 }, /* NAND clock */
};
#define to_corediv_clk(p) container_of(p, struct clk_corediv, hw)
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index fbf4532f94b8..9c3480dcc38a 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -218,6 +218,33 @@ config MSM_MMCC_8996
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config MSM_GCC_8998
+ tristate "MSM8998 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8998 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config SDM_GCC_845
+ tristate "SDM845 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SDM845 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2C, USB, UFS, SDDC, PCIe, etc.
+
+config SDM_VIDEOCC_845
+ tristate "SDM845 Video Clock Controller"
+ depends on COMMON_CLK_QCOM
+ select SDM_GCC_845
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SDM845 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
depends on (COMMON_CLK_QCOM && SPMI) || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 230332cf317e..762c01137c2f 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
+obj-$(CONFIG_MSM_GCC_8998) += gcc-msm8998.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
@@ -37,4 +38,6 @@ obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
+obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 6d04cd96482a..3c49a60072f1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -58,6 +58,8 @@
#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
+#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
+#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[CLK_ALPHA_PLL_TYPE_DEFAULT] = {
@@ -90,6 +92,18 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x1c,
[PLL_OFF_STATUS] = 0x24,
},
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_USER_CTL] = 0x0c,
+ [PLL_OFF_USER_CTL_U] = 0x10,
+ [PLL_OFF_CONFIG_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL_U] = 0x18,
+ [PLL_OFF_TEST_CTL] = 0x1c,
+ [PLL_OFF_TEST_CTL_U] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ [PLL_OFF_OPMODE] = 0x2c,
+ [PLL_OFF_FRAC] = 0x38,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -108,6 +122,12 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define PLL_HUAYRA_N_MASK 0xff
#define PLL_HUAYRA_ALPHA_WIDTH 16
+#define FABIA_OPMODE_STANDBY 0x0
+#define FABIA_OPMODE_RUN 0x1
+
+#define FABIA_PLL_OUT_MASK 0x7
+#define FABIA_PLL_RATE_MARGIN 500
+
#define pll_alpha_width(p) \
((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
ALPHA_REG_BITWIDTH : ALPHA_REG_16BIT_WIDTH)
@@ -441,16 +461,12 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return alpha_pll_calc_rate(prate, l, a, alpha_width);
}
-static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
- int (*is_enabled)(struct clk_hw *))
+
+static int __clk_alpha_pll_update_latch(struct clk_alpha_pll *pll)
{
int ret;
u32 mode;
- if (!is_enabled(&pll->clkr.hw) ||
- !(pll->flags & SUPPORTS_DYNAMIC_UPDATE))
- return 0;
-
regmap_read(pll->clkr.regmap, PLL_MODE(pll), &mode);
/* Latch the input to the PLL */
@@ -489,6 +505,16 @@ static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
return 0;
}
+static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
+ int (*is_enabled)(struct clk_hw *))
+{
+ if (!is_enabled(&pll->clkr.hw) ||
+ !(pll->flags & SUPPORTS_DYNAMIC_UPDATE))
+ return 0;
+
+ return __clk_alpha_pll_update_latch(pll);
+}
+
static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate,
int (*is_enabled)(struct clk_hw *))
@@ -832,3 +858,265 @@ const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
.recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);
+
+void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, mask;
+
+ if (config->l)
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+
+ if (config->alpha)
+ regmap_write(regmap, PLL_FRAC(pll), config->alpha);
+
+ if (config->config_ctl_val)
+ regmap_write(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+
+ if (config->post_div_mask) {
+ mask = config->post_div_mask;
+ val = config->post_div_val;
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+ }
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS,
+ PLL_UPDATE_BYPASS);
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+EXPORT_SYMBOL_GPL(clk_fabia_pll_configure);
+
+static int alpha_pll_fabia_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, opmode_val;
+ struct regmap *regmap = pll->clkr.regmap;
+
+ ret = regmap_read(regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_active(pll);
+ }
+
+ ret = regmap_read(regmap, PLL_OPMODE(pll), &opmode_val);
+ if (ret)
+ return ret;
+
+ /* Skip If PLL is already running */
+ if ((opmode_val & FABIA_OPMODE_RUN) && (val & PLL_OUTCTRL))
+ return 0;
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N,
+ PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_RUN);
+ if (ret)
+ return ret;
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll),
+ FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL,
+ PLL_OUTCTRL);
+}
+
+static void alpha_pll_fabia_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+ struct regmap *regmap = pll->clkr.regmap;
+
+ ret = regmap_read(regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return;
+
+ /* Disable main outputs */
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), FABIA_PLL_OUT_MASK,
+ 0);
+ if (ret)
+ return;
+
+ /* Place the PLL in STANDBY */
+ regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+}
+
+static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, frac, alpha_width = pll_alpha_width(pll);
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac);
+
+ return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
+}
+
+static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ unsigned long rrate;
+ int ret = 0;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+
+ /*
+ * Due to limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+ if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) {
+ pr_err("Call set rate on the PLL with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_FRAC(pll), a);
+
+ return __clk_alpha_pll_update_latch(pll);
+}
+
+const struct clk_ops clk_alpha_pll_fabia_ops = {
+ .enable = alpha_pll_fabia_enable,
+ .disable = alpha_pll_fabia_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .set_rate = alpha_pll_fabia_set_rate,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fabia_ops);
+
+const struct clk_ops clk_alpha_pll_fixed_fabia_ops = {
+ .enable = alpha_pll_fabia_enable,
+ .disable = alpha_pll_fabia_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_fabia_ops);
+
+static unsigned long clk_alpha_pll_postdiv_fabia_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 i, div = 1, val;
+ int ret;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return ret;
+
+ val >>= pll->post_div_shift;
+ val &= BIT(pll->width) - 1;
+
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].val == val) {
+ div = pll->post_div_table[i].div;
+ break;
+ }
+ }
+
+ return (parent_rate / div);
+}
+
+static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ return divider_round_rate(hw, rate, prate, pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int i, val = 0, div, ret;
+
+ /*
+ * If the PLL is in FSM mode, then treat set_rate callback as a
+ * no-operation.
+ */
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ if (val & PLL_VOTE_FSM_ENA)
+ return 0;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].div == div) {
+ val = pll->post_div_table[i].val;
+ break;
+ }
+ }
+
+ return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ (BIT(pll->width) - 1) << pll->post_div_shift,
+ val << pll->post_div_shift);
+}
+
+const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 7593e8a56cf2..f981b486c468 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -22,6 +22,7 @@ enum {
CLK_ALPHA_PLL_TYPE_DEFAULT,
CLK_ALPHA_PLL_TYPE_HUAYRA,
CLK_ALPHA_PLL_TYPE_BRAMMO,
+ CLK_ALPHA_PLL_TYPE_FABIA,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -36,6 +37,8 @@ enum {
PLL_OFF_TEST_CTL,
PLL_OFF_TEST_CTL_U,
PLL_OFF_STATUS,
+ PLL_OFF_OPMODE,
+ PLL_OFF_FRAC,
PLL_OFF_MAX_REGS
};
@@ -73,6 +76,10 @@ struct clk_alpha_pll {
* @offset: base address of registers
* @regs: alpha pll register map (see @clk_alpha_pll_regs)
* @width: width of post-divider
+ * @post_div_shift: shift to differentiate between odd & even post-divider
+ * @post_div_table: table with PLL odd and even post-divider settings
+ * @num_post_div: Number of PLL post-divider settings
+ *
* @clkr: regmap clock handle
*/
struct clk_alpha_pll_postdiv {
@@ -81,6 +88,9 @@ struct clk_alpha_pll_postdiv {
const u8 *regs;
struct clk_regmap clkr;
+ int post_div_shift;
+ const struct clk_div_table *post_div_table;
+ size_t num_post_div;
};
struct alpha_pll_config {
@@ -109,7 +119,13 @@ extern const struct clk_ops clk_alpha_pll_postdiv_ops;
extern const struct clk_ops clk_alpha_pll_huayra_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ro_ops;
+extern const struct clk_ops clk_alpha_pll_fabia_ops;
+extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops;
+extern const struct clk_ops clk_alpha_pll_postdiv_fabia_ops;
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
#endif
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 26f7af315066..c58c5538b1b6 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -77,8 +77,11 @@ static int clk_branch_wait(const struct clk_branch *br, bool enabling,
bool voted = br->halt_check & BRANCH_VOTED;
const char *name = clk_hw_get_name(&br->clkr.hw);
- /* Skip checking halt bit if the clock is in hardware gated mode */
- if (clk_branch_in_hwcg_mode(br))
+ /*
+ * Skip checking halt bit if we're explicitly ignoring the bit or the
+ * clock is in hardware gated mode
+ */
+ if (br->halt_check == BRANCH_HALT_SKIP || clk_branch_in_hwcg_mode(br))
return 0;
if (br->halt_check == BRANCH_HALT_DELAY || (!enabling && voted)) {
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 284df3f3c55f..1702efb1c511 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -42,6 +42,7 @@ struct clk_branch {
#define BRANCH_HALT_ENABLE 1 /* pol: 0 = halt */
#define BRANCH_HALT_ENABLE_VOTED (BRANCH_HALT_ENABLE | BRANCH_VOTED)
#define BRANCH_HALT_DELAY 2 /* No bit to check; just delay */
+#define BRANCH_HALT_SKIP 3 /* Don't check halt bit */
struct clk_regmap clkr;
};
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 2a7489a84e69..b209a2fe86b9 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. */
#ifndef __QCOM_CLK_RCG_H__
#define __QCOM_CLK_RCG_H__
@@ -144,6 +134,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @cmd_rcgr: corresponds to *_CMD_RCGR
* @mnd_width: number of bits in m/n/d values
* @hid_width: number of bits in half integer divider
+ * @safe_src_index: safe src index value
* @parent_map: map from software's parent index to hardware's src_sel field
* @freq_tbl: frequency table
* @clkr: regmap clock handle
@@ -153,6 +144,7 @@ struct clk_rcg2 {
u32 cmd_rcgr;
u8 mnd_width;
u8 hid_width;
+ u8 safe_src_index;
const struct parent_map *parent_map;
const struct freq_tbl *freq_tbl;
struct clk_regmap clkr;
@@ -167,5 +159,6 @@ extern const struct clk_ops clk_byte_ops;
extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops;
+extern const struct clk_ops clk_rcg2_shared_ops;
#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index bbeaf9c09dbb..52208d4165f4 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
@@ -42,6 +34,7 @@
#define CFG_MODE_SHIFT 12
#define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
#define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
+#define CFG_HW_CLK_CTRL_MASK BIT(20)
#define M_REG 0x8
#define N_REG 0xc
@@ -211,6 +204,7 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
clk_flags = clk_hw_get_flags(hw);
p = clk_hw_get_parent_by_index(hw, index);
if (clk_flags & CLK_SET_RATE_PARENT) {
+ rate = f->freq;
if (f->pre_div) {
rate /= 2;
rate *= f->pre_div + 1;
@@ -248,7 +242,7 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
}
-static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
u32 cfg, mask;
struct clk_hw *hw = &rcg->clkr.hw;
@@ -276,13 +270,21 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
}
mask = BIT(rcg->hid_width) - 1;
- mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE;
- ret = regmap_update_bits(rcg->clkr.regmap,
- rcg->cmd_rcgr + CFG_REG, mask, cfg);
+
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ mask, cfg);
+}
+
+static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ int ret;
+
+ ret = __clk_rcg2_configure(rcg, f);
if (ret)
return ret;
@@ -789,3 +791,141 @@ const struct clk_ops clk_gfx3d_ops = {
.determine_rate = clk_gfx3d_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
+
+static int clk_rcg2_set_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const char *name = clk_hw_get_name(hw);
+ int ret, count;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, CMD_ROOT_EN);
+ if (ret)
+ return ret;
+
+ /* wait for RCG to turn ON */
+ for (count = 500; count > 0; count--) {
+ if (clk_rcg2_is_enabled(hw))
+ return 0;
+
+ udelay(1);
+ }
+
+ pr_err("%s: RCG did not turn on\n", name);
+ return -ETIMEDOUT;
+}
+
+static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, 0);
+}
+
+static int
+clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+ ret = clk_rcg2_configure(rcg, f);
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+}
+
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f;
+
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ /*
+ * In case clock is disabled, update the CFG, M, N and D registers
+ * and don't hit the update bit of CMD register.
+ */
+ if (!__clk_is_enabled(hw->clk))
+ return __clk_rcg2_configure(rcg, f);
+
+ return clk_rcg2_shared_force_enable_clear(hw, f);
+}
+
+static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
+}
+
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+
+ /*
+ * Set the update bit because required configuration has already
+ * been written in clk_rcg2_shared_set_rate()
+ */
+ ret = clk_rcg2_set_force_enable(hw);
+ if (ret)
+ return ret;
+
+ ret = update_config(rcg);
+ if (ret)
+ return ret;
+
+ return clk_rcg2_clear_force_enable(hw);
+}
+
+static void clk_rcg2_shared_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg;
+
+ /*
+ * Store current configuration as switching to safe source would clear
+ * the SRC and DIV of CFG register
+ */
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ /*
+ * Park the RCG at a safe configuration - sourced off of safe source.
+ * Force enable and disable the RCG while configuring it to safeguard
+ * against any update signal coming from the downstream clock.
+ * The current parent is still prepared and enabled at this point, and
+ * the safe source is always on while application processor subsystem
+ * is online. Therefore, the RCG can safely switch its parent.
+ */
+ clk_rcg2_set_force_enable(hw);
+
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
+
+ update_config(rcg);
+
+ clk_rcg2_clear_force_enable(hw);
+
+ /* Write back the stored configuration corresponding to current rate */
+ regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+}
+
+const struct clk_ops clk_rcg2_shared_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_shared_set_rate,
+ .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index b8064a336d46..39ce64c2783b 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -228,22 +228,6 @@ int qcom_cc_really_probe(struct platform_device *pdev,
if (!cc)
return -ENOMEM;
- cc->rclks = rclks;
- cc->num_rclks = num_clks;
-
- for (i = 0; i < num_clks; i++) {
- if (!rclks[i])
- continue;
-
- ret = devm_clk_register_regmap(dev, rclks[i]);
- if (ret)
- return ret;
- }
-
- ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
- if (ret)
- return ret;
-
reset = &cc->reset;
reset->rcdev.of_node = dev->of_node;
reset->rcdev.ops = &qcom_reset_ops;
@@ -272,6 +256,22 @@ int qcom_cc_really_probe(struct platform_device *pdev,
return ret;
}
+ cc->rclks = rclks;
+ cc->num_rclks = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rclks[i])
+ continue;
+
+ ret = devm_clk_register_regmap(dev, rclks[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
+ if (ret)
+ return ret;
+
return 0;
}
EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 3d6452932797..9f35b3fe1d97 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -1418,6 +1418,7 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
static struct clk_branch gcc_usb3_phy_pipe_clk = {
.halt_reg = 0x50004,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x50004,
.enable_mask = BIT(0),
@@ -2472,6 +2473,7 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
static struct clk_branch gcc_pcie_0_pipe_clk = {
.halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x6b018,
.enable_mask = BIT(0),
@@ -2547,6 +2549,7 @@ static struct clk_branch gcc_pcie_1_aux_clk = {
static struct clk_branch gcc_pcie_1_pipe_clk = {
.halt_reg = 0x6d018,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x6d018,
.enable_mask = BIT(0),
@@ -2622,6 +2625,7 @@ static struct clk_branch gcc_pcie_2_aux_clk = {
static struct clk_branch gcc_pcie_2_pipe_clk = {
.halt_reg = 0x6e018,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x6e018,
.enable_mask = BIT(0),
@@ -2792,6 +2796,7 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
.halt_reg = 0x7501c,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x7501c,
.enable_mask = BIT(0),
@@ -2807,6 +2812,7 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
.halt_reg = 0x75020,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x75020,
.enable_mask = BIT(0),
@@ -3105,7 +3111,7 @@ static struct gdsc aggre0_noc_gdsc = {
.name = "aggre0_noc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc hlos1_vote_aggre0_noc_gdsc = {
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
new file mode 100644
index 000000000000..78d87f5c7098
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -0,0 +1,2834 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8998.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-alpha-pll.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+enum {
+ P_AUD_REF_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_PLL0_EARLY_DIV_CLK_SRC,
+ P_SLEEP_CLK,
+ P_XO,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "xo",
+ "gpll0_out_main",
+ "gpll0_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "xo",
+ "gpll0_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "xo",
+ "gpll0_out_main",
+ "core_pi_sleep_clk",
+ "gpll0_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "xo",
+ "core_pi_sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "xo",
+ "gpll0_out_main",
+ "gpll4_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+ "xo",
+ "gpll0_out_main",
+ "aud_ref_clk",
+ "core_bi_pll_test_se",
+};
+
+static struct pll_vco fabia_vco[] = {
+ { 250000000, 2000000000, 0 },
+ { 125000000, 1000000000, 1 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_main = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_main",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_odd = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_odd",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_test = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_test",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll1_out_even = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_even",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll1_out_main = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_main",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll1_out_odd = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_odd",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll1_out_test = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_test",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll2 = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll2",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2_out_even = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_even",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2_out_main = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_main",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2_out_odd = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_odd",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2_out_test = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_test",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_even = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_even",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_main = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_main",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_odd = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_odd",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_test = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_test",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_even = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_even",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_main = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_main",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_odd = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_odd",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_test = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_test",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x19020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1900c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1b020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1b00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1d020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1d00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1f020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1f00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x21020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2100c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x23020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2300c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_MAIN, 1, 96, 15625),
+ F(7372800, P_GPLL0_OUT_MAIN, 1, 192, 15625),
+ F(14745600, P_GPLL0_OUT_MAIN, 1, 384, 15625),
+ F(16000000, P_GPLL0_OUT_MAIN, 5, 2, 15),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 5, 1, 5),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 4, 75),
+ F(40000000, P_GPLL0_OUT_MAIN, 15, 0, 0),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 375),
+ F(48000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 32, 375),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 75),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1536, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(63157895, P_GPLL0_OUT_MAIN, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x1a00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x1c00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x1e00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart3_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x26020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x28020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2800c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2a020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2a00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2c020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2c00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2e020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2e00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup5_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x30020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x3000c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup6_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x2700c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x2900c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x2b00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart3_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_hmss_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_hmss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_ahb_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_hmss_rbcpr_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x48044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_rbcpr_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+ F(1010526, P_XO, 1, 1, 19),
+ { }
+};
+
+static struct clk_rcg2 pcie_aux_clk_src = {
+ .cmd_rcgr = 0x6c000,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_pcie_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcie_aux_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 15, 1, 2),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x14010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc4_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 15, 1, 2),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x16010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc4_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_tsif_ref_clk_src[] = {
+ F(105495, P_XO, 1, 1, 182),
+ { }
+};
+
+static struct clk_rcg2 tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_clk_src",
+ .parent_names = gcc_parent_names_5,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ufs_axi_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_axi_clk_src = {
+ .cmd_rcgr = 0x75018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_ufs_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+ .cmd_rcgr = 0xf014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+ F(1200000, P_XO, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0x5000c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb3_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre1_noc_xo_clk = {
+ .halt_reg = 0x8202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre1_noc_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre1_ufs_axi_clk = {
+ .halt_reg = 0x82028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x82028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre1_ufs_axi_clk",
+ .parent_names = (const char *[]){
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre1_usb3_axi_clk = {
+ .halt_reg = 0x82024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x82024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre1_usb3_axi_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_qdss_tsctr_div2_clk = {
+ .halt_reg = 0x48090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_qdss_tsctr_div2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_qdss_tsctr_div8_clk = {
+ .halt_reg = 0x48094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_qdss_tsctr_div8_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_hmss_axi_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_hmss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_mss_q6_axi_clk = {
+ .halt_reg = 0x4401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_mss_q6_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x19008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x19004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x1b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x1b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x1d008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x1f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x21008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x21008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x21004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x23008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x23004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x1a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0x25004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0x26004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0x2a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0x2a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x2c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x2c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
+ .halt_reg = 0x2e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
+ .halt_reg = 0x2e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
+ .halt_reg = 0x30008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
+ .halt_reg = 0x30004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_sleep_clk = {
+ .halt_reg = 0x25008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0x27004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0x29004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart3_apps_clk = {
+ .halt_reg = 0x2b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_axi_clk = {
+ .halt_reg = 0x5018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_axi_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_bimc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_bimc_gfx_src_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_bimc_gfx_src_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x71004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_ahb_clk",
+ .parent_names = (const char *[]){
+ "hmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_at_clk = {
+ .halt_reg = 0x48010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_dvm_bus_clk = {
+ .halt_reg = 0x4808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_dvm_bus_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "hmss_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_trig_clk = {
+ .halt_reg = 0x4800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_trig_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_at_clk = {
+ .halt_reg = 0x47020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_trig_clk = {
+ .halt_reg = 0x4701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_trig_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ .halt_reg = 0x9004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_noc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_qm_ahb_clk = {
+ .halt_reg = 0x9030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_qm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_qm_core_clk = {
+ .halt_reg = 0x900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_qm_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_sys_noc_axi_clk = {
+ .halt_reg = 0x9000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_sys_noc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_at_clk = {
+ .halt_reg = 0x8a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_names = (const char *[]){
+ "pcie_aux_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "pcie_aux_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]){
+ "tsif_ref_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ahb_clk = {
+ .halt_reg = 0x7500c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_axi_clk = {
+ .halt_reg = 0x75008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_axi_clk",
+ .parent_names = (const char *[]){
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ice_core_clk = {
+ .halt_reg = 0x7600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ice_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_aux_clk = {
+ .halt_reg = 0x76040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_aux_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
+ .halt_reg = 0x75014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
+ .halt_reg = 0x7605c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7605c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_unipro_core_clk = {
+ .halt_reg = 0x76008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_unipro_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0xf00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0x50000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "usb3_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0x50004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc ufs_gdsc = {
+ .gdscr = 0x75004,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "ufs_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc usb_30_gdsc = {
+ .gdscr = 0xf004,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "usb_30_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_msm8998_clocks[] = {
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_QUP5_I2C_APPS_CLK_SRC] = &blsp2_qup5_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP5_SPI_APPS_CLK_SRC] = &blsp2_qup5_spi_apps_clk_src.clkr,
+ [BLSP2_QUP6_I2C_APPS_CLK_SRC] = &blsp2_qup6_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP6_SPI_APPS_CLK_SRC] = &blsp2_qup6_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BLSP2_UART3_APPS_CLK_SRC] = &blsp2_uart3_apps_clk_src.clkr,
+ [GCC_AGGRE1_NOC_XO_CLK] = &gcc_aggre1_noc_xo_clk.clkr,
+ [GCC_AGGRE1_UFS_AXI_CLK] = &gcc_aggre1_ufs_axi_clk.clkr,
+ [GCC_AGGRE1_USB3_AXI_CLK] = &gcc_aggre1_usb3_axi_clk.clkr,
+ [GCC_APSS_QDSS_TSCTR_DIV2_CLK] = &gcc_apss_qdss_tsctr_div2_clk.clkr,
+ [GCC_APSS_QDSS_TSCTR_DIV8_CLK] = &gcc_apss_qdss_tsctr_div8_clk.clkr,
+ [GCC_BIMC_HMSS_AXI_CLK] = &gcc_bimc_hmss_axi_clk.clkr,
+ [GCC_BIMC_MSS_Q6_AXI_CLK] = &gcc_bimc_mss_q6_axi_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_I2C_APPS_CLK] = &gcc_blsp2_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_SPI_APPS_CLK] = &gcc_blsp2_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_I2C_APPS_CLK] = &gcc_blsp2_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_SPI_APPS_CLK] = &gcc_blsp2_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP2_SLEEP_CLK] = &gcc_blsp2_sleep_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BLSP2_UART3_APPS_CLK] = &gcc_blsp2_uart3_apps_clk.clkr,
+ [GCC_CFG_NOC_USB3_AXI_CLK] = &gcc_cfg_noc_usb3_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GPU_BIMC_GFX_CLK] = &gcc_gpu_bimc_gfx_clk.clkr,
+ [GCC_GPU_BIMC_GFX_SRC_CLK] = &gcc_gpu_bimc_gfx_src_clk.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_HMSS_AHB_CLK] = &gcc_hmss_ahb_clk.clkr,
+ [GCC_HMSS_AT_CLK] = &gcc_hmss_at_clk.clkr,
+ [GCC_HMSS_DVM_BUS_CLK] = &gcc_hmss_dvm_bus_clk.clkr,
+ [GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr,
+ [GCC_HMSS_TRIG_CLK] = &gcc_hmss_trig_clk.clkr,
+ [GCC_LPASS_AT_CLK] = &gcc_lpass_at_clk.clkr,
+ [GCC_LPASS_TRIG_CLK] = &gcc_lpass_trig_clk.clkr,
+ [GCC_MMSS_NOC_CFG_AHB_CLK] = &gcc_mmss_noc_cfg_ahb_clk.clkr,
+ [GCC_MMSS_QM_AHB_CLK] = &gcc_mmss_qm_ahb_clk.clkr,
+ [GCC_MMSS_QM_CORE_CLK] = &gcc_mmss_qm_core_clk.clkr,
+ [GCC_MMSS_SYS_NOC_AXI_CLK] = &gcc_mmss_sys_noc_axi_clk.clkr,
+ [GCC_MSS_AT_CLK] = &gcc_mss_at_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_UFS_AHB_CLK] = &gcc_ufs_ahb_clk.clkr,
+ [GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr,
+ [GCC_UFS_ICE_CORE_CLK] = &gcc_ufs_ice_core_clk.clkr,
+ [GCC_UFS_PHY_AUX_CLK] = &gcc_ufs_phy_aux_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_0_CLK] = &gcc_ufs_rx_symbol_0_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_1_CLK] = &gcc_ufs_rx_symbol_1_clk.clkr,
+ [GCC_UFS_TX_SYMBOL_0_CLK] = &gcc_ufs_tx_symbol_0_clk.clkr,
+ [GCC_UFS_UNIPRO_CORE_CLK] = &gcc_ufs_unipro_core_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL0_OUT_MAIN] = &gpll0_out_main.clkr,
+ [GPLL0_OUT_ODD] = &gpll0_out_odd.clkr,
+ [GPLL0_OUT_TEST] = &gpll0_out_test.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL1_OUT_EVEN] = &gpll1_out_even.clkr,
+ [GPLL1_OUT_MAIN] = &gpll1_out_main.clkr,
+ [GPLL1_OUT_ODD] = &gpll1_out_odd.clkr,
+ [GPLL1_OUT_TEST] = &gpll1_out_test.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [GPLL2_OUT_EVEN] = &gpll2_out_even.clkr,
+ [GPLL2_OUT_MAIN] = &gpll2_out_main.clkr,
+ [GPLL2_OUT_ODD] = &gpll2_out_odd.clkr,
+ [GPLL2_OUT_TEST] = &gpll2_out_test.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_OUT_EVEN] = &gpll3_out_even.clkr,
+ [GPLL3_OUT_MAIN] = &gpll3_out_main.clkr,
+ [GPLL3_OUT_ODD] = &gpll3_out_odd.clkr,
+ [GPLL3_OUT_TEST] = &gpll3_out_test.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_OUT_EVEN] = &gpll4_out_even.clkr,
+ [GPLL4_OUT_MAIN] = &gpll4_out_main.clkr,
+ [GPLL4_OUT_ODD] = &gpll4_out_odd.clkr,
+ [GPLL4_OUT_TEST] = &gpll4_out_test.clkr,
+ [HMSS_AHB_CLK_SRC] = &hmss_ahb_clk_src.clkr,
+ [HMSS_RBCPR_CLK_SRC] = &hmss_rbcpr_clk_src.clkr,
+ [PCIE_AUX_CLK_SRC] = &pcie_aux_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr,
+ [TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
+ [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+};
+
+static struct gdsc *gcc_msm8998_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [UFS_GDSC] = &ufs_gdsc,
+ [USB_30_GDSC] = &usb_30_gdsc,
+};
+
+static const struct qcom_reset_map gcc_msm8998_resets[] = {
+ [GCC_BLSP1_QUP1_BCR] = { 0x102400 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x110592 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x118784 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x126976 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x135168 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x143360 },
+ [GCC_BLSP2_QUP1_BCR] = { 0x155648 },
+ [GCC_BLSP2_QUP2_BCR] = { 0x163840 },
+ [GCC_BLSP2_QUP3_BCR] = { 0x172032 },
+ [GCC_BLSP2_QUP4_BCR] = { 0x180224 },
+ [GCC_BLSP2_QUP5_BCR] = { 0x188416 },
+ [GCC_BLSP2_QUP6_BCR] = { 0x196608 },
+ [GCC_PCIE_0_BCR] = { 0x438272 },
+ [GCC_PDM_BCR] = { 0x208896 },
+ [GCC_SDCC2_BCR] = { 0x81920 },
+ [GCC_SDCC4_BCR] = { 0x90112 },
+ [GCC_TSIF_BCR] = { 0x221184 },
+ [GCC_UFS_BCR] = { 0x479232 },
+ [GCC_USB_30_BCR] = { 0x61440 },
+};
+
+static const struct regmap_config gcc_msm8998_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8f000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8998_desc = {
+ .config = &gcc_msm8998_regmap_config,
+ .clks = gcc_msm8998_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8998_clocks),
+ .resets = gcc_msm8998_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8998_resets),
+ .gdscs = gcc_msm8998_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8998_gdscs),
+};
+
+static int gcc_msm8998_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_msm8998_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
+ * turned off by hardware during certain apps low power modes.
+ */
+ ret = regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_msm8998_desc, regmap);
+}
+
+static const struct of_device_id gcc_msm8998_match_table[] = {
+ { .compatible = "qcom,gcc-msm8998" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8998_match_table);
+
+static struct platform_driver gcc_msm8998_driver = {
+ .probe = gcc_msm8998_probe,
+ .driver = {
+ .name = "gcc-msm8998",
+ .of_match_table = gcc_msm8998_match_table,
+ },
+};
+
+static int __init gcc_msm8998_init(void)
+{
+ return platform_driver_register(&gcc_msm8998_driver);
+}
+core_initcall(gcc_msm8998_init);
+
+static void __exit gcc_msm8998_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8998_driver);
+}
+module_exit(gcc_msm8998_exit);
+
+MODULE_DESCRIPTION("QCOM GCC msm8998 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8998");
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
new file mode 100644
index 000000000000..e78e6f5b99fc
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -0,0 +1,3465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-alpha-pll.h"
+#include "gdsc.h"
+#include "reset.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+enum {
+ P_BI_TCXO,
+ P_AUD_REF_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_pi_sleep_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "bi_tcxo",
+ "core_pi_sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "bi_tcxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+ "bi_tcxo",
+ "gpll0",
+ "aud_ref_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_7[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_8[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_10[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_names = gcc_parent_names_7,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x4815c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b028,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d028,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17164,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17294,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173c4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174f4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17754,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17884,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18868,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(201500000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_10,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
+ F(105495, P_BI_TCXO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 gcc_tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk_src",
+ .parent_names = gcc_parent_names_6,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x7501c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x7505c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x75090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x7701c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7705c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x77090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 2,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x10018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1005c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_vs_ctrl_clk_src = {
+ .cmd_rcgr = 0x7a030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vsensor_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x90014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x82028,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x82024,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x8201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x82020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x82020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apc_vs_clk = {
+ .halt_reg = 0x7a050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apc_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0xb008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_axi_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x502c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x5030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x44038,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x44038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0xb00c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_axi_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0_out_even",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x71004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x71004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0_out_even",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_vs_clk = {
+ .halt_reg = 0x7a04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_axis2_clk = {
+ .halt_reg = 0x8a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_axis2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x8a000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_div_clk_src",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mfab_axis_clk = {
+ .halt_reg = 0x8a004,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x8a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mfab_axis_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_memnoc_axi_clk = {
+ .halt_reg = 0x8a154,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x8a154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_memnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a150,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_vs_clk = {
+ .halt_reg = 0x7a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_1_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_clk = {
+ .halt_reg = 0x8c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_phy_refgen_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "gcc_pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_ahb_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17160,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x17290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x173c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s4_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17620,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s5_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x17750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s6_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x17880,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s7_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s4_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s5_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s6_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18864,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap1_s7_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x414c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]){
+ "gcc_tsif_ref_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x7500c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7500c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_clkref_clk = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x75058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x7508c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7508c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7508c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x750a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x75054,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75054,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_card_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x7700c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7708c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7708c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7708c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x770a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77054,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77054,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x1000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x10014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_sec_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_prim_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_prim_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_clk = {
+ .halt_reg = 0x8c028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x1004c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1004c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_sec_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_sec_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vdda_vs_clk = {
+ .halt_reg = 0x7a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vdda_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddcx_vs_clk = {
+ .halt_reg = 0x7a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddcx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddmx_vs_clk = {
+ .halt_reg = 0x7a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddmx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0xb004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_ahb_clk = {
+ .halt_reg = 0x7a014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7a014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_clk = {
+ .halt_reg = 0x7a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk",
+ .parent_names = (const char *[]){
+ "gcc_vs_ctrl_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
+ .gdscr = 0x7d030,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
+ .gdscr = 0x7d03c,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
+ .gdscr = 0x7d038,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d048,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_sdm845_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_APC_VS_CLK] = &gcc_apc_vs_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_AXI_CLK] = &gcc_camera_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_AXI_CLK] = &gcc_disp_axi_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_VS_CLK] = &gcc_gpu_vs_clk.clkr,
+ [GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
+ [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr,
+ [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_VS_CLK] = &gcc_mss_vs_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_CLK] = &gcc_pcie_1_clkref_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK] = &gcc_pcie_phy_refgen_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_AHB_CLK] = &gcc_qmip_camera_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_AHB_CLK] = &gcc_qmip_video_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] =
+ &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VDDA_VS_CLK] = &gcc_vdda_vs_clk.clkr,
+ [GCC_VDDCX_VS_CLK] = &gcc_vddcx_vs_clk.clkr,
+ [GCC_VDDMX_VS_CLK] = &gcc_vddmx_vs_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GCC_VS_CTRL_AHB_CLK] = &gcc_vs_ctrl_ahb_clk.clkr,
+ [GCC_VS_CTRL_CLK] = &gcc_vs_ctrl_clk.clkr,
+ [GCC_VS_CTRL_CLK_SRC] = &gcc_vs_ctrl_clk_src.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+};
+
+static const struct qcom_reset_map gcc_sdm845_resets[] = {
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_PRNG_BCR] = { 0x34000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_TSIF_BCR] = { 0x36000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+};
+
+static struct gdsc *gcc_sdm845_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+};
+
+static const struct regmap_config gcc_sdm845_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x182090,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdm845_desc = {
+ .config = &gcc_sdm845_regmap_config,
+ .clks = gcc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdm845_clocks),
+ .resets = gcc_sdm845_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdm845_resets),
+ .gdscs = gcc_sdm845_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdm845_gdscs),
+};
+
+static const struct of_device_id gcc_sdm845_match_table[] = {
+ { .compatible = "qcom,gcc-sdm845" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
+
+static int gcc_sdm845_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Disable the GPLL0 active input to MMSS and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ /* Enable CPUSS clocks */
+ regmap_update_bits(regmap, 0x48190, BIT(0), 0x1);
+ regmap_update_bits(regmap, 0x52004, BIT(22), 0x1);
+
+ return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
+}
+
+static struct platform_driver gcc_sdm845_driver = {
+ .probe = gcc_sdm845_probe,
+ .driver = {
+ .name = "gcc-sdm845",
+ .of_match_table = gcc_sdm845_match_table,
+ },
+};
+
+static int __init gcc_sdm845_init(void)
+{
+ return platform_driver_register(&gcc_sdm845_driver);
+}
+subsys_initcall(gcc_sdm845_init);
+
+static void __exit gcc_sdm845_exit(void)
+{
+ platform_driver_unregister(&gcc_sdm845_driver);
+}
+module_exit(gcc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-sdm845");
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index a4f3580587b7..a077133c7ce3 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,12 @@
#define HW_CONTROL_MASK BIT(1)
#define SW_COLLAPSE_MASK BIT(0)
#define GMEM_CLAMP_IO_MASK BIT(0)
+#define GMEM_RESET_MASK BIT(4)
+
+/* CFG_GDSCR */
+#define GDSC_POWER_UP_COMPLETE BIT(16)
+#define GDSC_POWER_DOWN_COMPLETE BIT(15)
+#define CFG_GDSCR_OFFSET 0x4
/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
#define EN_REST_WAIT_VAL (0x2 << 20)
@@ -40,20 +46,50 @@
#define RETAIN_MEM BIT(14)
#define RETAIN_PERIPH BIT(13)
-#define TIMEOUT_US 100
+#define TIMEOUT_US 500
#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
-static int gdsc_is_enabled(struct gdsc *sc, unsigned int reg)
+enum gdsc_status {
+ GDSC_OFF,
+ GDSC_ON
+};
+
+/* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
+static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
{
+ unsigned int reg;
u32 val;
int ret;
+ if (sc->flags & POLL_CFG_GDSCR)
+ reg = sc->gdscr + CFG_GDSCR_OFFSET;
+ else if (sc->gds_hw_ctrl)
+ reg = sc->gds_hw_ctrl;
+ else
+ reg = sc->gdscr;
+
ret = regmap_read(sc->regmap, reg, &val);
if (ret)
return ret;
- return !!(val & PWR_ON_MASK);
+ if (sc->flags & POLL_CFG_GDSCR) {
+ switch (status) {
+ case GDSC_ON:
+ return !!(val & GDSC_POWER_UP_COMPLETE);
+ case GDSC_OFF:
+ return !!(val & GDSC_POWER_DOWN_COMPLETE);
+ }
+ }
+
+ switch (status) {
+ case GDSC_ON:
+ return !!(val & PWR_ON_MASK);
+ case GDSC_OFF:
+ return !(val & PWR_ON_MASK);
+ }
+
+ return -EINVAL;
}
static int gdsc_hwctrl(struct gdsc *sc, bool en)
@@ -63,34 +99,33 @@ static int gdsc_hwctrl(struct gdsc *sc, bool en)
return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val);
}
-static int gdsc_poll_status(struct gdsc *sc, unsigned int reg, bool en)
+static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status)
{
ktime_t start;
start = ktime_get();
do {
- if (gdsc_is_enabled(sc, reg) == en)
+ if (gdsc_check_status(sc, status))
return 0;
} while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US);
- if (gdsc_is_enabled(sc, reg) == en)
+ if (gdsc_check_status(sc, status))
return 0;
return -ETIMEDOUT;
}
-static int gdsc_toggle_logic(struct gdsc *sc, bool en)
+static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
{
int ret;
- u32 val = en ? 0 : SW_COLLAPSE_MASK;
- unsigned int status_reg = sc->gdscr;
+ u32 val = (status == GDSC_ON) ? 0 : SW_COLLAPSE_MASK;
ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
if (ret)
return ret;
/* If disabling votable gdscs, don't poll on status */
- if ((sc->flags & VOTABLE) && !en) {
+ if ((sc->flags & VOTABLE) && status == GDSC_OFF) {
/*
* Add a short delay here to ensure that an enable
* right after it was disabled does not put it in an
@@ -101,7 +136,6 @@ static int gdsc_toggle_logic(struct gdsc *sc, bool en)
}
if (sc->gds_hw_ctrl) {
- status_reg = sc->gds_hw_ctrl;
/*
* The gds hw controller asserts/de-asserts the status bit soon
* after it receives a power on/off request from a master.
@@ -115,7 +149,7 @@ static int gdsc_toggle_logic(struct gdsc *sc, bool en)
udelay(1);
}
- return gdsc_poll_status(sc, status_reg, en);
+ return gdsc_poll_status(sc, status);
}
static inline int gdsc_deassert_reset(struct gdsc *sc)
@@ -166,6 +200,14 @@ static inline void gdsc_assert_clamp_io(struct gdsc *sc)
GMEM_CLAMP_IO_MASK, 1);
}
+static inline void gdsc_assert_reset_aon(struct gdsc *sc)
+{
+ regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
+ GMEM_RESET_MASK, 1);
+ udelay(1);
+ regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
+ GMEM_RESET_MASK, 0);
+}
static int gdsc_enable(struct generic_pm_domain *domain)
{
struct gdsc *sc = domain_to_gdsc(domain);
@@ -174,10 +216,19 @@ static int gdsc_enable(struct generic_pm_domain *domain)
if (sc->pwrsts == PWRSTS_ON)
return gdsc_deassert_reset(sc);
- if (sc->flags & CLAMP_IO)
+ if (sc->flags & SW_RESET) {
+ gdsc_assert_reset(sc);
+ udelay(1);
+ gdsc_deassert_reset(sc);
+ }
+
+ if (sc->flags & CLAMP_IO) {
+ if (sc->flags & AON_RESET)
+ gdsc_assert_reset_aon(sc);
gdsc_deassert_clamp_io(sc);
+ }
- ret = gdsc_toggle_logic(sc, true);
+ ret = gdsc_toggle_logic(sc, GDSC_ON);
if (ret)
return ret;
@@ -222,8 +273,6 @@ static int gdsc_disable(struct generic_pm_domain *domain)
/* Turn off HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
- unsigned int reg;
-
ret = gdsc_hwctrl(sc, false);
if (ret < 0)
return ret;
@@ -235,8 +284,7 @@ static int gdsc_disable(struct generic_pm_domain *domain)
*/
udelay(1);
- reg = sc->gds_hw_ctrl ? sc->gds_hw_ctrl : sc->gdscr;
- ret = gdsc_poll_status(sc, reg, true);
+ ret = gdsc_poll_status(sc, GDSC_ON);
if (ret)
return ret;
}
@@ -244,7 +292,7 @@ static int gdsc_disable(struct generic_pm_domain *domain)
if (sc->pwrsts & PWRSTS_OFF)
gdsc_clear_mem_on(sc);
- ret = gdsc_toggle_logic(sc, false);
+ ret = gdsc_toggle_logic(sc, GDSC_OFF);
if (ret)
return ret;
@@ -258,7 +306,6 @@ static int gdsc_init(struct gdsc *sc)
{
u32 mask, val;
int on, ret;
- unsigned int reg;
/*
* Disable HW trigger: collapse/restore occur based on registers writes.
@@ -274,13 +321,12 @@ static int gdsc_init(struct gdsc *sc)
/* Force gdsc ON if only ON state is supported */
if (sc->pwrsts == PWRSTS_ON) {
- ret = gdsc_toggle_logic(sc, true);
+ ret = gdsc_toggle_logic(sc, GDSC_ON);
if (ret)
return ret;
}
- reg = sc->gds_hw_ctrl ? sc->gds_hw_ctrl : sc->gdscr;
- on = gdsc_is_enabled(sc, reg);
+ on = gdsc_check_status(sc, GDSC_ON);
if (on < 0)
return on;
@@ -291,6 +337,14 @@ static int gdsc_init(struct gdsc *sc)
if ((sc->flags & VOTABLE) && on)
gdsc_enable(&sc->pd);
+ /* If ALWAYS_ON GDSCs are not ON, turn them ON */
+ if (sc->flags & ALWAYS_ON) {
+ if (!on)
+ gdsc_enable(&sc->pd);
+ on = true;
+ sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
+ }
+
if (on || (sc->pwrsts & PWRSTS_RET))
gdsc_force_mem_on(sc);
else
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 39648348e5ec..bd1f2c780d0a 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -53,6 +53,10 @@ struct gdsc {
#define VOTABLE BIT(0)
#define CLAMP_IO BIT(1)
#define HW_CTRL BIT(2)
+#define SW_RESET BIT(3)
+#define AON_RESET BIT(4)
+#define POLL_CFG_GDSCR BIT(5)
+#define ALWAYS_ON BIT(6)
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 66a2fa4ec93c..1a25ee4f3658 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -1245,7 +1245,7 @@ static struct clk_branch mmss_mmagic_ahb_clk = {
.name = "mmss_mmagic_ahb_clk",
.parent_names = (const char *[]){ "ahb_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1260,7 +1260,7 @@ static struct clk_branch mmss_mmagic_cfg_ahb_clk = {
.name = "mmss_mmagic_cfg_ahb_clk",
.parent_names = (const char *[]){ "ahb_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1319,7 +1319,7 @@ static struct clk_branch mmagic_camss_axi_clk = {
.name = "mmagic_camss_axi_clk",
.parent_names = (const char *[]){ "axi_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1334,7 +1334,7 @@ static struct clk_branch mmagic_camss_noc_cfg_ahb_clk = {
.name = "mmagic_camss_noc_cfg_ahb_clk",
.parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1439,7 +1439,7 @@ static struct clk_branch mmagic_mdss_axi_clk = {
.name = "mmagic_mdss_axi_clk",
.parent_names = (const char *[]){ "axi_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1454,7 +1454,7 @@ static struct clk_branch mmagic_mdss_noc_cfg_ahb_clk = {
.name = "mmagic_mdss_noc_cfg_ahb_clk",
.parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1529,7 +1529,7 @@ static struct clk_branch mmagic_video_axi_clk = {
.name = "mmagic_video_axi_clk",
.parent_names = (const char *[]){ "axi_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1544,7 +1544,7 @@ static struct clk_branch mmagic_video_noc_cfg_ahb_clk = {
.name = "mmagic_video_noc_cfg_ahb_clk",
.parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -2919,7 +2919,7 @@ static struct gdsc mmagic_video_gdsc = {
.name = "mmagic_video",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc mmagic_mdss_gdsc = {
@@ -2929,7 +2929,7 @@ static struct gdsc mmagic_mdss_gdsc = {
.name = "mmagic_mdss",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc mmagic_camss_gdsc = {
@@ -2939,7 +2939,7 @@ static struct gdsc mmagic_camss_gdsc = {
.name = "mmagic_camss",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc venus_gdsc = {
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
new file mode 100644
index 000000000000..9073b7a710ac
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,videocc-sdm845.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "gdsc.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_VIDEO_PLL0_OUT_EVEN,
+ P_VIDEO_PLL0_OUT_MAIN,
+ P_VIDEO_PLL0_OUT_ODD,
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_MAIN, 1 },
+ { P_VIDEO_PLL0_OUT_EVEN, 2 },
+ { P_VIDEO_PLL0_OUT_ODD, 3 },
+ { P_CORE_BI_PLL_TEST_SE, 4 },
+};
+
+static const char * const video_cc_parent_names_0[] = {
+ "bi_tcxo",
+ "video_pll0",
+ "video_pll0_out_even",
+ "video_pll0_out_odd",
+ "core_bi_pll_test_se",
+};
+
+static const struct alpha_pll_config video_pll0_config = {
+ .l = 0x10,
+ .alpha = 0xaaab,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x42c,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "video_pll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_venus_clk_src[] = {
+ F(100000000, P_VIDEO_PLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(330000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(404000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(444000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(533000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_venus_clk_src = {
+ .cmd_rcgr = 0x7f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_venus_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_cc_venus_clk_src",
+ .parent_names = video_cc_parent_names_0,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch video_cc_apb_clk = {
+ .halt_reg = 0x990,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x990,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_at_clk = {
+ .halt_reg = 0x9f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_qdss_trig_clk = {
+ .halt_reg = 0x970,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x970,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_qdss_trig_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_qdss_tsctr_div8_clk = {
+ .halt_reg = 0x9d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_qdss_tsctr_div8_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_vcodec0_axi_clk = {
+ .halt_reg = 0x930,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x930,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_vcodec0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_vcodec0_core_clk = {
+ .halt_reg = 0x890,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x890,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_vcodec0_core_clk",
+ .parent_names = (const char *[]){
+ "video_cc_venus_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_vcodec1_axi_clk = {
+ .halt_reg = 0x950,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x950,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_vcodec1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_vcodec1_core_clk = {
+ .halt_reg = 0x8d0,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x8d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_vcodec1_core_clk",
+ .parent_names = (const char *[]){
+ "video_cc_venus_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+ .halt_reg = 0x9b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ctl_axi_clk = {
+ .halt_reg = 0x910,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x910,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_venus_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ctl_core_clk = {
+ .halt_reg = 0x850,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x850,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_venus_ctl_core_clk",
+ .parent_names = (const char *[]){
+ "video_cc_venus_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x814,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .cxcs = (unsigned int []){ 0x850, 0x910 },
+ .cxc_count = 2,
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc vcodec0_gdsc = {
+ .gdscr = 0x874,
+ .pd = {
+ .name = "vcodec0_gdsc",
+ },
+ .cxcs = (unsigned int []){ 0x890, 0x930 },
+ .cxc_count = 2,
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vcodec1_gdsc = {
+ .gdscr = 0x8b4,
+ .pd = {
+ .name = "vcodec1_gdsc",
+ },
+ .cxcs = (unsigned int []){ 0x8d0, 0x950 },
+ .cxc_count = 2,
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *video_cc_sdm845_clocks[] = {
+ [VIDEO_CC_APB_CLK] = &video_cc_apb_clk.clkr,
+ [VIDEO_CC_AT_CLK] = &video_cc_at_clk.clkr,
+ [VIDEO_CC_QDSS_TRIG_CLK] = &video_cc_qdss_trig_clk.clkr,
+ [VIDEO_CC_QDSS_TSCTR_DIV8_CLK] = &video_cc_qdss_tsctr_div8_clk.clkr,
+ [VIDEO_CC_VCODEC0_AXI_CLK] = &video_cc_vcodec0_axi_clk.clkr,
+ [VIDEO_CC_VCODEC0_CORE_CLK] = &video_cc_vcodec0_core_clk.clkr,
+ [VIDEO_CC_VCODEC1_AXI_CLK] = &video_cc_vcodec1_axi_clk.clkr,
+ [VIDEO_CC_VCODEC1_CORE_CLK] = &video_cc_vcodec1_core_clk.clkr,
+ [VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+ [VIDEO_CC_VENUS_CLK_SRC] = &video_cc_venus_clk_src.clkr,
+ [VIDEO_CC_VENUS_CTL_AXI_CLK] = &video_cc_venus_ctl_axi_clk.clkr,
+ [VIDEO_CC_VENUS_CTL_CORE_CLK] = &video_cc_venus_ctl_core_clk.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+};
+
+static struct gdsc *video_cc_sdm845_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [VCODEC0_GDSC] = &vcodec0_gdsc,
+ [VCODEC1_GDSC] = &vcodec1_gdsc,
+};
+
+static const struct regmap_config video_cc_sdm845_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb90,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc video_cc_sdm845_desc = {
+ .config = &video_cc_sdm845_regmap_config,
+ .clks = video_cc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sdm845_clocks),
+ .gdscs = video_cc_sdm845_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sdm845_gdscs),
+};
+
+static const struct of_device_id video_cc_sdm845_match_table[] = {
+ { .compatible = "qcom,sdm845-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sdm845_match_table);
+
+static int video_cc_sdm845_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sdm845_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
+
+ return qcom_cc_really_probe(pdev, &video_cc_sdm845_desc, regmap);
+}
+
+static struct platform_driver video_cc_sdm845_driver = {
+ .probe = video_cc_sdm845_probe,
+ .driver = {
+ .name = "sdm845-videocc",
+ .of_match_table = video_cc_sdm845_match_table,
+ },
+};
+
+static int __init video_cc_sdm845_init(void)
+{
+ return platform_driver_register(&video_cc_sdm845_driver);
+}
+subsys_initcall(video_cc_sdm845_init);
+
+static void __exit video_cc_sdm845_exit(void)
+{
+ platform_driver_unregister(&video_cc_sdm845_driver);
+}
+module_exit(video_cc_sdm845_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index ef76c861ec84..f9ba71311727 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -7,6 +7,7 @@ config CLK_RENESAS
select CLK_R8A7740 if ARCH_R8A7740
select CLK_R8A7743 if ARCH_R8A7743
select CLK_R8A7745 if ARCH_R8A7745
+ select CLK_R8A77470 if ARCH_R8A77470
select CLK_R8A7778 if ARCH_R8A7778
select CLK_R8A7779 if ARCH_R8A7779
select CLK_R8A7790 if ARCH_R8A7790
@@ -18,6 +19,7 @@ config CLK_RENESAS
select CLK_R8A77965 if ARCH_R8A77965
select CLK_R8A77970 if ARCH_R8A77970
select CLK_R8A77980 if ARCH_R8A77980
+ select CLK_R8A77990 if ARCH_R8A77990
select CLK_R8A77995 if ARCH_R8A77995
select CLK_SH73A0 if ARCH_SH73A0
@@ -60,6 +62,10 @@ config CLK_R8A7745
bool "RZ/G1E clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
+config CLK_R8A77470
+ bool "RZ/G1C clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN2_CPG
+
config CLK_R8A7778
bool "R-Car M1A clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
@@ -111,6 +117,10 @@ config CLK_R8A77980
bool "R-Car V3H clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A77990
+ bool "R-Car E3 clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A77995
bool "R-Car D3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 6c0f19636e3e..fe5bac9215e5 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_CLK_R8A73A4) += clk-r8a73a4.o
obj-$(CONFIG_CLK_R8A7740) += clk-r8a7740.o
obj-$(CONFIG_CLK_R8A7743) += r8a7743-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7745) += r8a7745-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77470) += r8a77470-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7778) += clk-r8a7778.o
obj-$(CONFIG_CLK_R8A7779) += clk-r8a7779.o
obj-$(CONFIG_CLK_R8A7790) += r8a7790-cpg-mssr.o
@@ -17,6 +18,7 @@ obj-$(CONFIG_CLK_R8A7796) += r8a7796-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77965) += r8a77965-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77970) += r8a77970-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77980) += r8a77980-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77990) += r8a77990-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/r8a7743-cpg-mssr.c b/drivers/clk/renesas/r8a7743-cpg-mssr.c
index d3c8b1e2969f..011c170ec3f9 100644
--- a/drivers/clk/renesas/r8a7743-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7743-cpg-mssr.c
@@ -52,7 +52,6 @@ static const struct cpg_core_clk r8a7743_core_clks[] __initconst = {
/* Core Clock Outputs */
DEF_BASE("z", R8A7743_CLK_Z, CLK_TYPE_GEN2_Z, CLK_PLL0),
- DEF_BASE("lb", R8A7743_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
DEF_BASE("sdh", R8A7743_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
DEF_BASE("sd0", R8A7743_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
DEF_BASE("qspi", R8A7743_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
@@ -63,6 +62,7 @@ static const struct cpg_core_clk r8a7743_core_clks[] __initconst = {
DEF_FIXED("zs", R8A7743_CLK_ZS, CLK_PLL1, 6, 1),
DEF_FIXED("hp", R8A7743_CLK_HP, CLK_PLL1, 12, 1),
DEF_FIXED("b", R8A7743_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("lb", R8A7743_CLK_LB, CLK_PLL1, 24, 1),
DEF_FIXED("p", R8A7743_CLK_P, CLK_PLL1, 24, 1),
DEF_FIXED("cl", R8A7743_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("m2", R8A7743_CLK_M2, CLK_PLL1, 8, 1),
diff --git a/drivers/clk/renesas/r8a7745-cpg-mssr.c b/drivers/clk/renesas/r8a7745-cpg-mssr.c
index 87f5a3619e4f..4b0a9243b748 100644
--- a/drivers/clk/renesas/r8a7745-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7745-cpg-mssr.c
@@ -51,7 +51,6 @@ static const struct cpg_core_clk r8a7745_core_clks[] __initconst = {
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
/* Core Clock Outputs */
- DEF_BASE("lb", R8A7745_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
DEF_BASE("sdh", R8A7745_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
DEF_BASE("sd0", R8A7745_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
DEF_BASE("qspi", R8A7745_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
@@ -63,6 +62,7 @@ static const struct cpg_core_clk r8a7745_core_clks[] __initconst = {
DEF_FIXED("zs", R8A7745_CLK_ZS, CLK_PLL1, 6, 1),
DEF_FIXED("hp", R8A7745_CLK_HP, CLK_PLL1, 12, 1),
DEF_FIXED("b", R8A7745_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("lb", R8A7745_CLK_LB, CLK_PLL1, 24, 1),
DEF_FIXED("p", R8A7745_CLK_P, CLK_PLL1, 24, 1),
DEF_FIXED("cl", R8A7745_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("cp", R8A7745_CLK_CP, CLK_PLL1, 48, 1),
diff --git a/drivers/clk/renesas/r8a77470-cpg-mssr.c b/drivers/clk/renesas/r8a77470-cpg-mssr.c
new file mode 100644
index 000000000000..ab0fb10b6bf0
--- /dev/null
+++ b/drivers/clk/renesas/r8a77470-cpg-mssr.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a77470 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a77470-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A77470_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_USB_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a77470_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("usb_extal", CLK_USB_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("sdh", R8A77470_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
+ DEF_BASE("sd0", R8A77470_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
+ DEF_BASE("sd1", R8A77470_CLK_SD1, CLK_TYPE_GEN2_SD1, CLK_PLL1),
+ DEF_BASE("qspi", R8A77470_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
+ DEF_BASE("rcan", R8A77470_CLK_RCAN, CLK_TYPE_GEN2_RCAN, CLK_USB_EXTAL),
+
+ DEF_FIXED("z2", R8A77470_CLK_Z2, CLK_PLL0, 1, 1),
+ DEF_FIXED("zx", R8A77470_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("zs", R8A77470_CLK_ZS, CLK_PLL1, 6, 1),
+ DEF_FIXED("hp", R8A77470_CLK_HP, CLK_PLL1, 12, 1),
+ DEF_FIXED("b", R8A77470_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("lb", R8A77470_CLK_LB, CLK_PLL1, 24, 1),
+ DEF_FIXED("p", R8A77470_CLK_P, CLK_PLL1, 24, 1),
+ DEF_FIXED("cl", R8A77470_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("cp", R8A77470_CLK_CP, CLK_PLL1, 48, 1),
+ DEF_FIXED("m2", R8A77470_CLK_M2, CLK_PLL1, 8, 1),
+ DEF_FIXED("zb3", R8A77470_CLK_ZB3, CLK_PLL3, 4, 1),
+ DEF_FIXED("mp", R8A77470_CLK_MP, CLK_PLL1_DIV2, 15, 1),
+ DEF_FIXED("cpex", R8A77470_CLK_CPEX, CLK_EXTAL, 2, 1),
+ DEF_FIXED("r", R8A77470_CLK_R, CLK_PLL1, 49152, 1),
+ DEF_FIXED("osc", R8A77470_CLK_OSC, CLK_PLL1, 12288, 1),
+
+ DEF_DIV6P1("sd2", R8A77470_CLK_SD2, CLK_PLL1_DIV2, 0x078),
+};
+
+static const struct mssr_mod_clk r8a77470_mod_clks[] __initconst = {
+ DEF_MOD("msiof0", 0, R8A77470_CLK_MP),
+ DEF_MOD("vcp0", 101, R8A77470_CLK_ZS),
+ DEF_MOD("vpc0", 103, R8A77470_CLK_ZS),
+ DEF_MOD("tmu1", 111, R8A77470_CLK_P),
+ DEF_MOD("3dg", 112, R8A77470_CLK_ZS),
+ DEF_MOD("2d-dmac", 115, R8A77470_CLK_ZS),
+ DEF_MOD("fdp1-0", 119, R8A77470_CLK_ZS),
+ DEF_MOD("tmu3", 121, R8A77470_CLK_P),
+ DEF_MOD("tmu2", 122, R8A77470_CLK_P),
+ DEF_MOD("cmt0", 124, R8A77470_CLK_R),
+ DEF_MOD("vsp1du0", 128, R8A77470_CLK_ZS),
+ DEF_MOD("vsp1-sy", 131, R8A77470_CLK_ZS),
+ DEF_MOD("msiof2", 205, R8A77470_CLK_MP),
+ DEF_MOD("msiof1", 208, R8A77470_CLK_MP),
+ DEF_MOD("sys-dmac1", 218, R8A77470_CLK_ZS),
+ DEF_MOD("sys-dmac0", 219, R8A77470_CLK_ZS),
+ DEF_MOD("sdhi2", 312, R8A77470_CLK_SD2),
+ DEF_MOD("sdhi1", 313, R8A77470_CLK_SD1),
+ DEF_MOD("sdhi0", 314, R8A77470_CLK_SD0),
+ DEF_MOD("usbhs-dmac0-ch1", 326, R8A77470_CLK_HP),
+ DEF_MOD("usbhs-dmac1-ch1", 327, R8A77470_CLK_HP),
+ DEF_MOD("cmt1", 329, R8A77470_CLK_R),
+ DEF_MOD("usbhs-dmac0-ch0", 330, R8A77470_CLK_HP),
+ DEF_MOD("usbhs-dmac1-ch0", 331, R8A77470_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A77470_CLK_R),
+ DEF_MOD("irqc", 407, R8A77470_CLK_CP),
+ DEF_MOD("intc-sys", 408, R8A77470_CLK_ZS),
+ DEF_MOD("audio-dmac0", 502, R8A77470_CLK_HP),
+ DEF_MOD("pwm", 523, R8A77470_CLK_P),
+ DEF_MOD("usb-ehci-0", 703, R8A77470_CLK_MP),
+ DEF_MOD("usbhs-0", 704, R8A77470_CLK_HP),
+ DEF_MOD("usb-ehci-1", 705, R8A77470_CLK_MP),
+ DEF_MOD("usbhs-1", 706, R8A77470_CLK_HP),
+ DEF_MOD("hscif2", 713, R8A77470_CLK_ZS),
+ DEF_MOD("scif5", 714, R8A77470_CLK_P),
+ DEF_MOD("scif4", 715, R8A77470_CLK_P),
+ DEF_MOD("hscif1", 716, R8A77470_CLK_ZS),
+ DEF_MOD("hscif0", 717, R8A77470_CLK_ZS),
+ DEF_MOD("scif3", 718, R8A77470_CLK_P),
+ DEF_MOD("scif2", 719, R8A77470_CLK_P),
+ DEF_MOD("scif1", 720, R8A77470_CLK_P),
+ DEF_MOD("scif0", 721, R8A77470_CLK_P),
+ DEF_MOD("du1", 723, R8A77470_CLK_ZX),
+ DEF_MOD("du0", 724, R8A77470_CLK_ZX),
+ DEF_MOD("ipmmu-sgx", 800, R8A77470_CLK_ZX),
+ DEF_MOD("etheravb", 812, R8A77470_CLK_HP),
+ DEF_MOD("ether", 813, R8A77470_CLK_P),
+ DEF_MOD("gpio5", 907, R8A77470_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A77470_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A77470_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A77470_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A77470_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A77470_CLK_CP),
+ DEF_MOD("can1", 915, R8A77470_CLK_P),
+ DEF_MOD("can0", 916, R8A77470_CLK_P),
+ DEF_MOD("qspi_mod-1", 917, R8A77470_CLK_QSPI),
+ DEF_MOD("qspi_mod-0", 918, R8A77470_CLK_QSPI),
+ DEF_MOD("i2c4", 927, R8A77470_CLK_HP),
+ DEF_MOD("i2c3", 928, R8A77470_CLK_HP),
+ DEF_MOD("i2c2", 929, R8A77470_CLK_HP),
+ DEF_MOD("i2c1", 930, R8A77470_CLK_HP),
+ DEF_MOD("i2c0", 931, R8A77470_CLK_HP),
+ DEF_MOD("ssi-all", 1005, R8A77470_CLK_P),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A77470_CLK_P),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a77470_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
+ MOD_CLK_ID(408), /* INTC-SYS (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 (MHz) *1 *2
+ *---------------------------------------------------
+ * 0 0 20 x80 x78 x50
+ * 0 1 26 x60 x60 x56
+ * 1 0 Prohibitted setting
+ * 1 1 30 x52 x52 x50
+ *
+ * *1 : Table 7.4 indicates VCO output (PLL0 = VCO)
+ * *2 : Table 7.4 indicates VCO output (PLL1 = VCO)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
+ (((md) & BIT(13)) >> 13))
+
+static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ /* EXTAL div PLL1 mult x2 PLL3 mult */
+ { 1, 156, 50, },
+ { 1, 120, 56, },
+ { /* Invalid*/ },
+ { 1, 104, 50, },
+};
+
+static int __init r8a77470_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen2_cpg_init(cpg_pll_config, 2, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a77470_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a77470_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a77470_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a77470_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a77470_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a77470_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a77470_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a77470_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen2_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7791-cpg-mssr.c b/drivers/clk/renesas/r8a7791-cpg-mssr.c
index 820b220b09cc..1b91f03b7598 100644
--- a/drivers/clk/renesas/r8a7791-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7791-cpg-mssr.c
@@ -57,7 +57,6 @@ static struct cpg_core_clk r8a7791_core_clks[] __initdata = {
/* Core Clock Outputs */
DEF_BASE("z", R8A7791_CLK_Z, CLK_TYPE_GEN2_Z, CLK_PLL0),
- DEF_BASE("lb", R8A7791_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
DEF_BASE("adsp", R8A7791_CLK_ADSP, CLK_TYPE_GEN2_ADSP, CLK_PLL1),
DEF_BASE("sdh", R8A7791_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
DEF_BASE("sd0", R8A7791_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
@@ -70,6 +69,7 @@ static struct cpg_core_clk r8a7791_core_clks[] __initdata = {
DEF_FIXED("hp", R8A7791_CLK_HP, CLK_PLL1, 12, 1),
DEF_FIXED("i", R8A7791_CLK_I, CLK_PLL1, 2, 1),
DEF_FIXED("b", R8A7791_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("lb", R8A7791_CLK_LB, CLK_PLL1, 24, 1),
DEF_FIXED("p", R8A7791_CLK_P, CLK_PLL1, 24, 1),
DEF_FIXED("cl", R8A7791_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("m2", R8A7791_CLK_M2, CLK_PLL1, 8, 1),
diff --git a/drivers/clk/renesas/r8a7792-cpg-mssr.c b/drivers/clk/renesas/r8a7792-cpg-mssr.c
index 609a54080496..493e07859f5f 100644
--- a/drivers/clk/renesas/r8a7792-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7792-cpg-mssr.c
@@ -53,7 +53,6 @@ static const struct cpg_core_clk r8a7792_core_clks[] __initconst = {
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
/* Core Clock Outputs */
- DEF_BASE("lb", R8A7792_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
DEF_BASE("qspi", R8A7792_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
DEF_FIXED("z", R8A7792_CLK_Z, CLK_PLL0, 1, 1),
@@ -63,6 +62,7 @@ static const struct cpg_core_clk r8a7792_core_clks[] __initconst = {
DEF_FIXED("hp", R8A7792_CLK_HP, CLK_PLL1, 12, 1),
DEF_FIXED("i", R8A7792_CLK_I, CLK_PLL1, 3, 1),
DEF_FIXED("b", R8A7792_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("lb", R8A7792_CLK_LB, CLK_PLL1, 24, 1),
DEF_FIXED("p", R8A7792_CLK_P, CLK_PLL1, 24, 1),
DEF_FIXED("cl", R8A7792_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("m2", R8A7792_CLK_M2, CLK_PLL1, 8, 1),
diff --git a/drivers/clk/renesas/r8a7794-cpg-mssr.c b/drivers/clk/renesas/r8a7794-cpg-mssr.c
index 2a40bbeaeeaf..088f4b79fdfc 100644
--- a/drivers/clk/renesas/r8a7794-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7794-cpg-mssr.c
@@ -55,7 +55,6 @@ static const struct cpg_core_clk r8a7794_core_clks[] __initconst = {
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
/* Core Clock Outputs */
- DEF_BASE("lb", R8A7794_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
DEF_BASE("adsp", R8A7794_CLK_ADSP, CLK_TYPE_GEN2_ADSP, CLK_PLL1),
DEF_BASE("sdh", R8A7794_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
DEF_BASE("sd0", R8A7794_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
@@ -69,6 +68,7 @@ static const struct cpg_core_clk r8a7794_core_clks[] __initconst = {
DEF_FIXED("hp", R8A7794_CLK_HP, CLK_PLL1, 12, 1),
DEF_FIXED("i", R8A7794_CLK_I, CLK_PLL1, 2, 1),
DEF_FIXED("b", R8A7794_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("lb", R8A7794_CLK_LB, CLK_PLL1, 24, 1),
DEF_FIXED("p", R8A7794_CLK_P, CLK_PLL1, 24, 1),
DEF_FIXED("cl", R8A7794_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("cp", R8A7794_CLK_CP, CLK_PLL1, 48, 1),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index b1acfb60351c..8fae5e9c4a77 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -116,6 +116,10 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("scif3", 204, R8A77965_CLK_S3D4),
DEF_MOD("scif1", 206, R8A77965_CLK_S3D4),
DEF_MOD("scif0", 207, R8A77965_CLK_S3D4),
+ DEF_MOD("msiof3", 208, R8A77965_CLK_MSO),
+ DEF_MOD("msiof2", 209, R8A77965_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A77965_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A77965_CLK_MSO),
DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S0D3),
DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S0D3),
DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3),
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index 7aaae73a321a..d7ebd9ec0059 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -116,7 +116,7 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
DEF_MOD("sys-dmac1", 218, R8A77980_CLK_S0D3),
DEF_MOD("tpu0", 304, R8A77980_CLK_S3D4),
DEF_MOD("sdif", 314, R8A77980_CLK_SD0),
- DEF_MOD("pciec0", 319, R8A77980_CLK_S3D1),
+ DEF_MOD("pciec0", 319, R8A77980_CLK_S2D2),
DEF_MOD("intc-ex", 407, R8A77980_CLK_CP),
DEF_MOD("intc-ap", 408, R8A77980_CLK_S0D3),
DEF_MOD("hscif3", 517, R8A77980_CLK_S3D1),
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
new file mode 100644
index 000000000000..9e14f1486fbb
--- /dev/null
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a77990 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a77990-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A77990_CLK_CPEX,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL0D4,
+ CLK_PLL0D6,
+ CLK_PLL0D8,
+ CLK_PLL0D20,
+ CLK_PLL0D24,
+ CLK_PLL1D2,
+ CLK_PE,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100),
+ DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1),
+ DEF_FIXED(".pll0d6", CLK_PLL0D6, CLK_PLL0, 6, 1),
+ DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1),
+ DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1),
+ DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1),
+ DEF_FIXED(".pll1d2", CLK_PLL1D2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pe", CLK_PE, CLK_PLL0D20, 1, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1, 3, 1),
+ DEF_FIXED(".s2", CLK_S2, CLK_PLL1, 4, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("za2", R8A77990_CLK_ZA2, CLK_PLL0D24, 1, 1),
+ DEF_FIXED("za8", R8A77990_CLK_ZA8, CLK_PLL0D8, 1, 1),
+ DEF_FIXED("ztr", R8A77990_CLK_ZTR, CLK_PLL1, 6, 1),
+ DEF_FIXED("zt", R8A77990_CLK_ZT, CLK_PLL1, 4, 1),
+ DEF_FIXED("zx", R8A77990_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("s0d1", R8A77990_CLK_S0D1, CLK_S0, 1, 1),
+ DEF_FIXED("s0d3", R8A77990_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d6", R8A77990_CLK_S0D6, CLK_S0, 6, 1),
+ DEF_FIXED("s0d12", R8A77990_CLK_S0D12, CLK_S0, 12, 1),
+ DEF_FIXED("s0d24", R8A77990_CLK_S0D24, CLK_S0, 24, 1),
+ DEF_FIXED("s1d1", R8A77990_CLK_S1D1, CLK_S1, 1, 1),
+ DEF_FIXED("s1d2", R8A77990_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A77990_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s2d1", R8A77990_CLK_S2D1, CLK_S2, 1, 1),
+ DEF_FIXED("s2d2", R8A77990_CLK_S2D2, CLK_S2, 2, 1),
+ DEF_FIXED("s2d4", R8A77990_CLK_S2D4, CLK_S2, 4, 1),
+ DEF_FIXED("s3d1", R8A77990_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A77990_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A77990_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_GEN3_SD("sd0", R8A77990_CLK_SD0, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SD("sd1", R8A77990_CLK_SD1, CLK_SDSRC, 0x0078),
+ DEF_GEN3_SD("sd3", R8A77990_CLK_SD3, CLK_SDSRC, 0x026c),
+
+ DEF_FIXED("cl", R8A77990_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("cp", R8A77990_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A77990_CLK_CPEX, CLK_EXTAL, 4, 1),
+ DEF_FIXED("osc", R8A77990_CLK_OSC, CLK_EXTAL, 384, 1),
+ DEF_FIXED("r", R8A77990_CLK_R, CLK_EXTAL, 1536, 1),
+
+ DEF_GEN3_PE("s0d6c", R8A77990_CLK_S0D6C, CLK_S0, 6, CLK_PE, 2),
+ DEF_GEN3_PE("s3d1c", R8A77990_CLK_S3D1C, CLK_S3, 1, CLK_PE, 1),
+ DEF_GEN3_PE("s3d2c", R8A77990_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2),
+ DEF_GEN3_PE("s3d4c", R8A77990_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4),
+
+ DEF_DIV6P1("canfd", R8A77990_CLK_CANFD, CLK_PLL0D6, 0x244),
+ DEF_DIV6P1("csi0", R8A77990_CLK_CSI0, CLK_PLL1D2, 0x00c),
+ DEF_DIV6P1("mso", R8A77990_CLK_MSO, CLK_PLL1D2, 0x014),
+};
+
+static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
+ DEF_MOD("scif5", 202, R8A77990_CLK_S3D4C),
+ DEF_MOD("scif4", 203, R8A77990_CLK_S3D4C),
+ DEF_MOD("scif3", 204, R8A77990_CLK_S3D4C),
+ DEF_MOD("scif1", 206, R8A77990_CLK_S3D4C),
+ DEF_MOD("scif0", 207, R8A77990_CLK_S3D4C),
+ DEF_MOD("msiof3", 208, R8A77990_CLK_MSO),
+ DEF_MOD("msiof2", 209, R8A77990_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A77990_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A77990_CLK_MSO),
+ DEF_MOD("sys-dmac2", 217, R8A77990_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A77990_CLK_S3D1),
+ DEF_MOD("sys-dmac0", 219, R8A77990_CLK_S3D1),
+
+ DEF_MOD("cmt3", 300, R8A77990_CLK_R),
+ DEF_MOD("cmt2", 301, R8A77990_CLK_R),
+ DEF_MOD("cmt1", 302, R8A77990_CLK_R),
+ DEF_MOD("cmt0", 303, R8A77990_CLK_R),
+ DEF_MOD("scif2", 310, R8A77990_CLK_S3D4C),
+ DEF_MOD("sdif3", 311, R8A77990_CLK_SD3),
+ DEF_MOD("sdif1", 313, R8A77990_CLK_SD1),
+ DEF_MOD("sdif0", 314, R8A77990_CLK_SD0),
+ DEF_MOD("pcie0", 319, R8A77990_CLK_S3D1),
+ DEF_MOD("usb3-if0", 328, R8A77990_CLK_S3D1),
+ DEF_MOD("usb-dmac0", 330, R8A77990_CLK_S3D1),
+ DEF_MOD("usb-dmac1", 331, R8A77990_CLK_S3D1),
+
+ DEF_MOD("rwdt", 402, R8A77990_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A77990_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A77990_CLK_S0D3),
+
+ DEF_MOD("audmac0", 502, R8A77990_CLK_S3D4),
+ DEF_MOD("drif7", 508, R8A77990_CLK_S3D2),
+ DEF_MOD("drif6", 509, R8A77990_CLK_S3D2),
+ DEF_MOD("drif5", 510, R8A77990_CLK_S3D2),
+ DEF_MOD("drif4", 511, R8A77990_CLK_S3D2),
+ DEF_MOD("drif3", 512, R8A77990_CLK_S3D2),
+ DEF_MOD("drif2", 513, R8A77990_CLK_S3D2),
+ DEF_MOD("drif1", 514, R8A77990_CLK_S3D2),
+ DEF_MOD("drif0", 515, R8A77990_CLK_S3D2),
+ DEF_MOD("hscif4", 516, R8A77990_CLK_S3D1C),
+ DEF_MOD("hscif3", 517, R8A77990_CLK_S3D1C),
+ DEF_MOD("hscif2", 518, R8A77990_CLK_S3D1C),
+ DEF_MOD("hscif1", 519, R8A77990_CLK_S3D1C),
+ DEF_MOD("hscif0", 520, R8A77990_CLK_S3D1C),
+ DEF_MOD("thermal", 522, R8A77990_CLK_CP),
+ DEF_MOD("pwm", 523, R8A77990_CLK_S3D4C),
+
+ DEF_MOD("fcpvd1", 602, R8A77990_CLK_S1D2),
+ DEF_MOD("fcpvd0", 603, R8A77990_CLK_S1D2),
+ DEF_MOD("fcpvb0", 607, R8A77990_CLK_S0D1),
+ DEF_MOD("fcpvi0", 611, R8A77990_CLK_S0D1),
+ DEF_MOD("fcpf0", 615, R8A77990_CLK_S0D1),
+ DEF_MOD("fcpcs", 619, R8A77990_CLK_S0D1),
+ DEF_MOD("vspd1", 622, R8A77990_CLK_S1D2),
+ DEF_MOD("vspd0", 623, R8A77990_CLK_S1D2),
+ DEF_MOD("vspb", 626, R8A77990_CLK_S0D1),
+ DEF_MOD("vspi0", 631, R8A77990_CLK_S0D1),
+
+ DEF_MOD("ehci0", 703, R8A77990_CLK_S3D4),
+ DEF_MOD("hsusb", 704, R8A77990_CLK_S3D4),
+ DEF_MOD("csi40", 716, R8A77990_CLK_CSI0),
+ DEF_MOD("du1", 723, R8A77990_CLK_S2D1),
+ DEF_MOD("du0", 724, R8A77990_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A77990_CLK_S2D1),
+
+ DEF_MOD("vin5", 806, R8A77990_CLK_S1D2),
+ DEF_MOD("vin4", 807, R8A77990_CLK_S1D2),
+ DEF_MOD("etheravb", 812, R8A77990_CLK_S3D2),
+
+ DEF_MOD("gpio6", 906, R8A77990_CLK_S3D4),
+ DEF_MOD("gpio5", 907, R8A77990_CLK_S3D4),
+ DEF_MOD("gpio4", 908, R8A77990_CLK_S3D4),
+ DEF_MOD("gpio3", 909, R8A77990_CLK_S3D4),
+ DEF_MOD("gpio2", 910, R8A77990_CLK_S3D4),
+ DEF_MOD("gpio1", 911, R8A77990_CLK_S3D4),
+ DEF_MOD("gpio0", 912, R8A77990_CLK_S3D4),
+ DEF_MOD("can-fd", 914, R8A77990_CLK_S3D2),
+ DEF_MOD("can-if1", 915, R8A77990_CLK_S3D4),
+ DEF_MOD("can-if0", 916, R8A77990_CLK_S3D4),
+ DEF_MOD("i2c6", 918, R8A77990_CLK_S3D2),
+ DEF_MOD("i2c5", 919, R8A77990_CLK_S3D2),
+ DEF_MOD("i2c-dvfs", 926, R8A77990_CLK_CP),
+ DEF_MOD("i2c4", 927, R8A77990_CLK_S3D2),
+ DEF_MOD("i2c3", 928, R8A77990_CLK_S3D2),
+ DEF_MOD("i2c2", 929, R8A77990_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A77990_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A77990_CLK_S3D2),
+
+ DEF_MOD("ssi-all", 1005, R8A77990_CLK_S3D4),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A77990_CLK_S3D4),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a77990_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD19 EXTAL (MHz) PLL0 PLL1 PLL3
+ *--------------------------------------------------------------------
+ * 0 48 x 1 x100/4 x100/3 x100/3
+ * 1 48 x 1 x100/4 x100/3 x58/3
+ */
+#define CPG_PLL_CONFIG_INDEX(md) (((md) & BIT(19)) >> 19)
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[2] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div */
+ { 1, 100, 3, 100, 3, },
+ { 1, 100, 3, 58, 3, },
+};
+
+static int __init r8a77990_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen3_cpg_init(cpg_pll_config, 0, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a77990_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a77990_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a77990_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a77990_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a77990_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a77990_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a77990_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a77990_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c
index feb14579a71b..daf88bc2cdae 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.c
+++ b/drivers/clk/renesas/rcar-gen2-cpg.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include "renesas-cpg-mssr.h"
#include "rcar-gen2-cpg.h"
@@ -260,6 +261,17 @@ static const struct clk_div_table cpg_sd01_div_table[] = {
static const struct rcar_gen2_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_pll0_div __initdata;
static u32 cpg_mode __initdata;
+static u32 cpg_quirks __initdata;
+
+#define SD_SKIP_FIRST BIT(0) /* Skip first clock in SD table */
+
+static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
+ {
+ .soc_id = "r8a77470",
+ .data = (void *)SD_SKIP_FIRST,
+ },
+ { /* sentinel */ }
+};
struct clk * __init rcar_gen2_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
@@ -327,11 +339,17 @@ struct clk * __init rcar_gen2_cpg_clk_register(struct device *dev,
case CLK_TYPE_GEN2_SD0:
table = cpg_sd01_div_table;
+ if (cpg_quirks & SD_SKIP_FIRST)
+ table++;
+
shift = 4;
break;
case CLK_TYPE_GEN2_SD1:
table = cpg_sd01_div_table;
+ if (cpg_quirks & SD_SKIP_FIRST)
+ table++;
+
shift = 0;
break;
@@ -360,9 +378,15 @@ struct clk * __init rcar_gen2_cpg_clk_register(struct device *dev,
int __init rcar_gen2_cpg_init(const struct rcar_gen2_cpg_pll_config *config,
unsigned int pll0_div, u32 mode)
{
+ const struct soc_device_attribute *attr;
+
cpg_pll_config = config;
cpg_pll0_div = pll0_div;
cpg_mode = mode;
+ attr = soc_device_match(cpg_quirks_match);
+ if (attr)
+ cpg_quirks = (uintptr_t)attr->data;
+ pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks);
spin_lock_init(&cpg_lock);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 69a7c756658b..f4b013e9352d 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -653,6 +653,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a7745_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A77470
+ {
+ .compatible = "renesas,r8a77470-cpg-mssr",
+ .data = &r8a77470_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A7790
{
.compatible = "renesas,r8a7790-cpg-mssr",
@@ -712,6 +718,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a77980_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A77990
+ {
+ .compatible = "renesas,r8a77990-cpg-mssr",
+ .data = &r8a77990_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A77995
{
.compatible = "renesas,r8a77995-cpg-mssr",
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 97ccb093c10f..642f720b9b05 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -133,6 +133,7 @@ struct cpg_mssr_info {
extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7745_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a77470_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7790_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7791_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7792_cpg_mssr_info;
@@ -142,6 +143,7 @@ extern const struct cpg_mssr_info r8a7796_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77965_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77970_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77980_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a77990_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 59b8d320960a..98e7b9429b83 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -3,7 +3,6 @@
# Rockchip Clock specific Makefile
#
-obj-y += clk-rockchip.o
obj-y += clk.o
obj-y += clk-pll.o
obj-y += clk-cpu.o
diff --git a/drivers/clk/rockchip/clk-rockchip.c b/drivers/clk/rockchip/clk-rockchip.c
deleted file mode 100644
index 2c9bb81144c9..000000000000
--- a/drivers/clk/rockchip/clk-rockchip.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2013 MundoReader S.L.
- * Author: Heiko Stuebner <heiko@sntech.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-static DEFINE_SPINLOCK(clk_lock);
-
-/*
- * Gate clocks
- */
-
-static void __init rk2928_gate_clk_init(struct device_node *node)
-{
- struct clk_onecell_data *clk_data;
- const char *clk_parent;
- const char *clk_name;
- void __iomem *reg;
- void __iomem *reg_idx;
- int flags;
- int qty;
- int reg_bit;
- int clkflags = CLK_SET_RATE_PARENT;
- int i;
-
- qty = of_property_count_strings(node, "clock-output-names");
- if (qty < 0) {
- pr_err("%s: error in clock-output-names %d\n", __func__, qty);
- return;
- }
-
- if (qty == 0) {
- pr_info("%s: nothing to do\n", __func__);
- return;
- }
-
- reg = of_iomap(node, 0);
- if (!reg)
- return;
-
- clk_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
- if (!clk_data) {
- iounmap(reg);
- return;
- }
-
- clk_data->clks = kzalloc(qty * sizeof(struct clk *), GFP_KERNEL);
- if (!clk_data->clks) {
- kfree(clk_data);
- iounmap(reg);
- return;
- }
-
- flags = CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE;
-
- for (i = 0; i < qty; i++) {
- of_property_read_string_index(node, "clock-output-names",
- i, &clk_name);
-
- /* ignore empty slots */
- if (!strcmp("reserved", clk_name))
- continue;
-
- clk_parent = of_clk_get_parent_name(node, i);
-
- /* keep all gates untouched for now */
- clkflags |= CLK_IGNORE_UNUSED;
-
- reg_idx = reg + (4 * (i / 16));
- reg_bit = (i % 16);
-
- clk_data->clks[i] = clk_register_gate(NULL, clk_name,
- clk_parent, clkflags,
- reg_idx, reg_bit,
- flags,
- &clk_lock);
- WARN_ON(IS_ERR(clk_data->clks[i]));
- }
-
- clk_data->clk_num = qty;
-
- of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
-}
-CLK_OF_DECLARE(rk2928_gate, "rockchip,rk2928-gate-clk", rk2928_gate_clk_init);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 3cd8ad59e0b7..326b3fa44f5d 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -274,18 +274,10 @@ static struct clk *rockchip_clk_register_frac_branch(
struct clk_mux *frac_mux = &frac->mux;
struct clk_init_data init;
struct clk *mux_clk;
- int i, ret;
-
- frac->mux_frac_idx = -1;
- for (i = 0; i < child->num_parents; i++) {
- if (!strcmp(name, child->parent_names[i])) {
- pr_debug("%s: found fractional parent in mux at pos %d\n",
- __func__, i);
- frac->mux_frac_idx = i;
- break;
- }
- }
+ int ret;
+ frac->mux_frac_idx = match_string(child->parent_names,
+ child->num_parents, name);
frac->mux_ops = &clk_mux_ops;
frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
@@ -312,6 +304,8 @@ static struct clk *rockchip_clk_register_frac_branch(
/* notifier on the fraction divider to catch rate changes */
if (frac->mux_frac_idx >= 0) {
+ pr_debug("%s: found fractional parent in mux at pos %d\n",
+ __func__, frac->mux_frac_idx);
ret = clk_notifier_register(clk, &frac->clk_nb);
if (ret)
pr_err("%s: failed to register clock notifier for %s\n",
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index 66a904758761..0d92f3e5e3d9 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -219,8 +219,7 @@ static int s3c24xx_dclk1_div_notify(struct notifier_block *nb,
#ifdef CONFIG_PM_SLEEP
static int s3c24xx_dclk_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s3c24xx_dclk *s3c24xx_dclk = platform_get_drvdata(pdev);
+ struct s3c24xx_dclk *s3c24xx_dclk = dev_get_drvdata(dev);
s3c24xx_dclk->reg_save = readl_relaxed(s3c24xx_dclk->base);
return 0;
@@ -228,8 +227,7 @@ static int s3c24xx_dclk_suspend(struct device *dev)
static int s3c24xx_dclk_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s3c24xx_dclk *s3c24xx_dclk = platform_get_drvdata(pdev);
+ struct s3c24xx_dclk *s3c24xx_dclk = dev_get_drvdata(dev);
writel_relaxed(s3c24xx_dclk->reg_save, s3c24xx_dclk->base);
return 0;
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 3a11c382a663..72714633e39c 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -260,46 +260,45 @@ static int s10_clk_register_pll(const struct stratix10_pll_clock *clks,
return 0;
}
-static struct stratix10_clock_data *__socfpga_s10_clk_init(struct device_node *np,
+static struct stratix10_clock_data *__socfpga_s10_clk_init(struct platform_device *pdev,
int nr_clks)
{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct stratix10_clock_data *clk_data;
struct clk **clk_table;
+ struct resource *res;
void __iomem *base;
- base = of_iomap(np, 0);
- if (!base) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
pr_err("%s: failed to map clock registers\n", __func__);
- goto err;
+ return ERR_CAST(base);
}
- clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ clk_data = devm_kzalloc(dev, sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
- goto err;
+ return ERR_PTR(-ENOMEM);
clk_data->base = base;
- clk_table = kcalloc(nr_clks, sizeof(*clk_table), GFP_KERNEL);
+ clk_table = devm_kcalloc(dev, nr_clks, sizeof(*clk_table), GFP_KERNEL);
if (!clk_table)
- goto err_data;
+ return ERR_PTR(-ENOMEM);
clk_data->clk_data.clks = clk_table;
clk_data->clk_data.clk_num = nr_clks;
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data->clk_data);
return clk_data;
-
-err_data:
- kfree(clk_data);
-err:
- return NULL;
}
-static int s10_clkmgr_init(struct device_node *np)
+static int s10_clkmgr_init(struct platform_device *pdev)
{
struct stratix10_clock_data *clk_data;
- clk_data = __socfpga_s10_clk_init(np, STRATIX10_NUM_CLKS);
- if (!clk_data)
- return -ENOMEM;
+ clk_data = __socfpga_s10_clk_init(pdev, STRATIX10_NUM_CLKS);
+ if (IS_ERR(clk_data))
+ return PTR_ERR(clk_data);
s10_clk_register_pll(s10_pll_clks, ARRAY_SIZE(s10_pll_clks), clk_data);
@@ -317,11 +316,7 @@ static int s10_clkmgr_init(struct device_node *np)
static int s10_clkmgr_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
-
- s10_clkmgr_init(np);
-
- return 0;
+ return s10_clkmgr_init(pdev);
}
static const struct of_device_id stratix10_clkmgr_match_table[] = {
@@ -334,6 +329,7 @@ static struct platform_driver stratix10_clkmgr_driver = {
.probe = s10_clkmgr_probe,
.driver = {
.name = "stratix10-clkmgr",
+ .suppress_bind_attrs = true,
.of_match_table = stratix10_clkmgr_match_table,
},
};
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index f911d9f77763..47810be7f15c 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -147,7 +147,7 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_30m_clk", 0, 1,
1);
- clk_register_clkdev(clk, NULL, "wdt");
+ clk_register_clkdev(clk, NULL, "fc880000.wdt");
/* clock derived from pll1 clk */
clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk",
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 79dfd296c3d1..826674d090fd 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -16,6 +16,11 @@ config SUN50I_H6_CCU
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+config SUN50I_H6_R_CCU
+ bool "Support for the Allwinner H6 PRCM CCU"
+ default ARM64 && ARCH_SUNXI
+ depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+
config SUN4I_A10_CCU
bool "Support for the Allwinner A10/A20 CCU"
default MACH_SUN4I
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 128a40ee5c5e..acaa14cfa25c 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -23,6 +23,7 @@ lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o
# SoC support
obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
obj-$(CONFIG_SUN50I_H6_CCU) += ccu-sun50i-h6.o
+obj-$(CONFIG_SUN50I_H6_R_CCU) += ccu-sun50i-h6-r.o
obj-$(CONFIG_SUN4I_A10_CCU) += ccu-sun4i-a10.o
obj-$(CONFIG_SUN5I_CCU) += ccu-sun5i.o
obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
new file mode 100644
index 000000000000..27554eaf6929
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.xyz>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_nm.h"
+
+#include "ccu-sun50i-h6-r.h"
+
+/*
+ * Information about AR100 and AHB/APB clocks in R_CCU are gathered from
+ * clock definitions in the BSP source code.
+ */
+
+static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k",
+ "pll-periph0", "iosc" };
+static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = {
+ { .index = 2, .shift = 0, .width = 5 },
+};
+
+static struct ccu_div ar100_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 24,
+ .width = 2,
+
+ .var_predivs = ar100_r_apb2_predivs,
+ .n_var_predivs = ARRAY_SIZE(ar100_r_apb2_predivs),
+ },
+
+ .common = {
+ .reg = 0x000,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ar100",
+ ar100_r_apb2_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static CLK_FIXED_FACTOR(r_ahb_clk, "r-ahb", "ar100", 1, 1, 0);
+
+static struct ccu_div r_apb1_clk = {
+ .div = _SUNXI_CCU_DIV(0, 2),
+
+ .common = {
+ .reg = 0x00c,
+ .hw.init = CLK_HW_INIT("r-apb1",
+ "r-ahb",
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_div r_apb2_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 24,
+ .width = 2,
+
+ .var_predivs = ar100_r_apb2_predivs,
+ .n_var_predivs = ARRAY_SIZE(ar100_r_apb2_predivs),
+ },
+
+ .common = {
+ .reg = 0x010,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("r-apb2",
+ ar100_r_apb2_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+/*
+ * Information about the gate/resets are gathered from the clock header file
+ * in the BSP source code, although most of them are unused. The existence
+ * of the hardware block is verified with "3.1 Memory Mapping" chapter in
+ * "Allwinner H6 V200 User Manual V1.1"; and the parent APB buses are verified
+ * with "3.3.2.1 System Bus Tree" chapter inthe same document.
+ */
+static SUNXI_CCU_GATE(r_apb1_timer_clk, "r-apb1-timer", "r-apb1",
+ 0x11c, BIT(0), 0);
+static SUNXI_CCU_GATE(r_apb1_twd_clk, "r-apb1-twd", "r-apb1",
+ 0x12c, BIT(0), 0);
+static SUNXI_CCU_GATE(r_apb1_pwm_clk, "r-apb1-pwm", "r-apb1",
+ 0x13c, BIT(0), 0);
+static SUNXI_CCU_GATE(r_apb2_uart_clk, "r-apb2-uart", "r-apb2",
+ 0x18c, BIT(0), 0);
+static SUNXI_CCU_GATE(r_apb2_i2c_clk, "r-apb2-i2c", "r-apb2",
+ 0x19c, BIT(0), 0);
+static SUNXI_CCU_GATE(r_apb1_ir_clk, "r-apb1-ir", "r-apb1",
+ 0x1cc, BIT(0), 0);
+static SUNXI_CCU_GATE(r_apb1_w1_clk, "r-apb1-w1", "r-apb1",
+ 0x1cc, BIT(0), 0);
+
+/* Information of IR(RX) mod clock is gathered from BSP source code */
+static const char * const r_mod0_default_parents[] = { "osc32k", "osc24M" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ir_clk, "ir",
+ r_mod0_default_parents, 0x1c0,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+/*
+ * BSP didn't use the 1-wire function at all now, and the information about
+ * this mod clock is guessed from the IR mod clock above. The existence of
+ * this mod clock is proven by BSP clock header, and the dividers are verified
+ * by contents in the 1-wire related chapter of the User Manual.
+ */
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(w1_clk, "w1",
+ r_mod0_default_parents, 0x1e0,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static struct ccu_common *sun50i_h6_r_ccu_clks[] = {
+ &ar100_clk.common,
+ &r_apb1_clk.common,
+ &r_apb2_clk.common,
+ &r_apb1_timer_clk.common,
+ &r_apb1_twd_clk.common,
+ &r_apb1_pwm_clk.common,
+ &r_apb2_uart_clk.common,
+ &r_apb2_i2c_clk.common,
+ &r_apb1_ir_clk.common,
+ &r_apb1_w1_clk.common,
+ &ir_clk.common,
+ &w1_clk.common,
+};
+
+static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = {
+ .hws = {
+ [CLK_AR100] = &ar100_clk.common.hw,
+ [CLK_R_AHB] = &r_ahb_clk.hw,
+ [CLK_R_APB1] = &r_apb1_clk.common.hw,
+ [CLK_R_APB2] = &r_apb2_clk.common.hw,
+ [CLK_R_APB1_TIMER] = &r_apb1_timer_clk.common.hw,
+ [CLK_R_APB1_TWD] = &r_apb1_twd_clk.common.hw,
+ [CLK_R_APB1_PWM] = &r_apb1_pwm_clk.common.hw,
+ [CLK_R_APB2_UART] = &r_apb2_uart_clk.common.hw,
+ [CLK_R_APB2_I2C] = &r_apb2_i2c_clk.common.hw,
+ [CLK_R_APB1_IR] = &r_apb1_ir_clk.common.hw,
+ [CLK_R_APB1_W1] = &r_apb1_w1_clk.common.hw,
+ [CLK_IR] = &ir_clk.common.hw,
+ [CLK_W1] = &w1_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun50i_h6_r_ccu_resets[] = {
+ [RST_R_APB1_TIMER] = { 0x11c, BIT(16) },
+ [RST_R_APB1_TWD] = { 0x12c, BIT(16) },
+ [RST_R_APB1_PWM] = { 0x13c, BIT(16) },
+ [RST_R_APB2_UART] = { 0x18c, BIT(16) },
+ [RST_R_APB2_I2C] = { 0x19c, BIT(16) },
+ [RST_R_APB1_IR] = { 0x1cc, BIT(16) },
+ [RST_R_APB1_W1] = { 0x1ec, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun50i_h6_r_ccu_desc = {
+ .ccu_clks = sun50i_h6_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_h6_r_ccu_clks),
+
+ .hw_clks = &sun50i_h6_r_hw_clks,
+
+ .resets = sun50i_h6_r_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h6_r_ccu_resets),
+};
+
+static void __init sunxi_r_ccu_init(struct device_node *node,
+ const struct sunxi_ccu_desc *desc)
+{
+ void __iomem *reg;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%pOF: Could not map the clock registers\n", node);
+ return;
+ }
+
+ sunxi_ccu_probe(node, reg, desc);
+}
+
+static void __init sun50i_h6_r_ccu_setup(struct device_node *node)
+{
+ sunxi_r_ccu_init(node, &sun50i_h6_r_ccu_desc);
+}
+CLK_OF_DECLARE(sun50i_h6_r_ccu, "allwinner,sun50i-h6-r-ccu",
+ sun50i_h6_r_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h
new file mode 100644
index 000000000000..782117dc0b28
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Icenowy Zheng <icenowy@aosc.xyz>
+ */
+
+#ifndef _CCU_SUN50I_H6_R_H
+#define _CCU_SUN50I_H6_R_H
+
+#include <dt-bindings/clock/sun50i-h6-r-ccu.h>
+#include <dt-bindings/reset/sun50i-h6-r-ccu.h>
+
+/* AHB/APB bus clocks are not exported except APB1 for R_PIO */
+#define CLK_R_AHB 1
+
+#define CLK_R_APB2 3
+
+#define CLK_NUMBER (CLK_W1 + 1)
+
+#endif /* _CCU_SUN50I_H6_R_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 933f2e68f42a..65ba6455feb7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -12,7 +12,8 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -1250,17 +1251,45 @@ static struct ccu_mux_nb sun8i_r40_cpu_nb = {
.bypass_index = 1, /* index of 24 MHz oscillator */
};
-static void __init sun8i_r40_ccu_setup(struct device_node *node)
+/*
+ * Add a regmap for the GMAC driver (dwmac-sun8i) to access the
+ * GMAC configuration register.
+ * Only this register is allowed to be written, in order to
+ * prevent overriding critical clock configuration.
+ */
+
+#define SUN8I_R40_GMAC_CFG_REG 0x164
+static bool sun8i_r40_ccu_regmap_accessible_reg(struct device *dev,
+ unsigned int reg)
+{
+ if (reg == SUN8I_R40_GMAC_CFG_REG)
+ return true;
+ return false;
+}
+
+static struct regmap_config sun8i_r40_ccu_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x320, /* PLL_LOCK_CTRL_REG */
+
+ /* other devices have no business accessing other registers */
+ .readable_reg = sun8i_r40_ccu_regmap_accessible_reg,
+ .writeable_reg = sun8i_r40_ccu_regmap_accessible_reg,
+};
+
+static int sun8i_r40_ccu_probe(struct platform_device *pdev)
{
+ struct resource *res;
+ struct regmap *regmap;
void __iomem *reg;
u32 val;
+ int ret;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%s: Could not map the clock registers\n",
- of_node_full_name(node));
- return;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 4 */
val = readl(reg + SUN8I_R40_PLL_AUDIO_REG);
@@ -1277,7 +1306,14 @@ static void __init sun8i_r40_ccu_setup(struct device_node *node)
val &= ~GENMASK(25, 20);
writel(val, reg + SUN8I_R40_USB_CLK_REG);
- sunxi_ccu_probe(node, reg, &sun8i_r40_ccu_desc);
+ regmap = devm_regmap_init_mmio(&pdev->dev, reg,
+ &sun8i_r40_ccu_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun8i_r40_ccu_desc);
+ if (ret)
+ return ret;
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&sun8i_r40_pll_cpu_nb);
@@ -1285,6 +1321,20 @@ static void __init sun8i_r40_ccu_setup(struct device_node *node)
/* Reparent CPU during PLL CPU rate changes */
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
&sun8i_r40_cpu_nb);
+
+ return 0;
}
-CLK_OF_DECLARE(sun8i_r40_ccu, "allwinner,sun8i-r40-ccu",
- sun8i_r40_ccu_setup);
+
+static const struct of_device_id sun8i_r40_ccu_ids[] = {
+ { .compatible = "allwinner,sun8i-r40-ccu" },
+ { }
+};
+
+static struct platform_driver sun8i_r40_ccu_driver = {
+ .probe = sun8i_r40_ccu_probe,
+ .driver = {
+ .name = "sun8i-r40-ccu",
+ .of_match_table = sun8i_r40_ccu_ids,
+ },
+};
+builtin_platform_driver(sun8i_r40_ccu_driver);
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 0a7deee74eea..48ee43734e05 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1196,42 +1196,24 @@ static const struct file_operations attr_registers_fops = {
.release = single_release,
};
-static int dfll_debug_init(struct tegra_dfll *td)
+static void dfll_debug_init(struct tegra_dfll *td)
{
- int ret;
+ struct dentry *root;
if (!td || (td->mode == DFLL_UNINITIALIZED))
- return 0;
-
- td->debugfs_dir = debugfs_create_dir("tegra_dfll_fcpu", NULL);
- if (!td->debugfs_dir)
- return -ENOMEM;
-
- ret = -ENOMEM;
-
- if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR,
- td->debugfs_dir, td, &enable_fops))
- goto err_out;
-
- if (!debugfs_create_file("lock", S_IRUGO,
- td->debugfs_dir, td, &lock_fops))
- goto err_out;
+ return;
- if (!debugfs_create_file("rate", S_IRUGO,
- td->debugfs_dir, td, &rate_fops))
- goto err_out;
+ root = debugfs_create_dir("tegra_dfll_fcpu", NULL);
+ td->debugfs_dir = root;
- if (!debugfs_create_file("registers", S_IRUGO,
- td->debugfs_dir, td, &attr_registers_fops))
- goto err_out;
-
- return 0;
-
-err_out:
- debugfs_remove_recursive(td->debugfs_dir);
- return ret;
+ debugfs_create_file("enable", S_IRUGO | S_IWUSR, root, td, &enable_fops);
+ debugfs_create_file("lock", S_IRUGO, root, td, &lock_fops);
+ debugfs_create_file("rate", S_IRUGO, root, td, &rate_fops);
+ debugfs_create_file("registers", S_IRUGO, root, td, &attr_registers_fops);
}
+#else
+static void inline dfll_debug_init(struct tegra_dfll *td) { }
#endif /* CONFIG_DEBUG_FS */
/*
@@ -1715,9 +1697,7 @@ int tegra_dfll_register(struct platform_device *pdev,
return ret;
}
-#ifdef CONFIG_DEBUG_FS
dfll_debug_init(td);
-#endif
return 0;
}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 5d5a22d529f5..1824f014202b 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1367,7 +1367,7 @@ static void __init tegra114_clock_init(struct device_node *np)
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
&pll_x_params);
- tegra_add_of_provider(np);
+ tegra_add_of_provider(np, of_clk_src_onecell_get);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra_clk_apply_init_table = tegra114_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 50088e976611..0c69c7970950 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1479,7 +1479,7 @@ static void __init tegra124_132_clock_init_post(struct device_node *np)
&pll_x_params);
tegra_init_special_resets(1, tegra124_reset_assert,
tegra124_reset_deassert);
- tegra_add_of_provider(np);
+ tegra_add_of_provider(np, of_clk_src_onecell_get);
clks[TEGRA124_CLK_EMC] = tegra_clk_register_emc(clk_base, np,
&emc_lock);
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 0ee56dd04cec..cc857d4d4a86 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -26,6 +26,8 @@
#include "clk.h"
#include "clk-id.h"
+#define MISC_CLK_ENB 0x48
+
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
#define OSC_CTRL_OSC_FREQ_13MHZ (0<<30)
@@ -831,15 +833,25 @@ static void __init tegra20_periph_clk_init(void)
periph_clk_enb_refcnt);
clks[TEGRA20_CLK_PEX] = clk;
+ /* dev1 OSC divider */
+ clk_register_divider(NULL, "dev1_osc_div", "clk_m",
+ 0, clk_base + MISC_CLK_ENB, 22, 2,
+ CLK_DIVIDER_POWER_OF_TWO | CLK_DIVIDER_READ_ONLY,
+ NULL);
+
+ /* dev2 OSC divider */
+ clk_register_divider(NULL, "dev2_osc_div", "clk_m",
+ 0, clk_base + MISC_CLK_ENB, 20, 2,
+ CLK_DIVIDER_POWER_OF_TWO | CLK_DIVIDER_READ_ONLY,
+ NULL);
+
/* cdev1 */
- clk = clk_register_fixed_rate(NULL, "cdev1_fixed", NULL, 0, 26000000);
- clk = tegra_clk_register_periph_gate("cdev1", "cdev1_fixed", 0,
+ clk = tegra_clk_register_periph_gate("cdev1", "cdev1_mux", 0,
clk_base, 0, 94, periph_clk_enb_refcnt);
clks[TEGRA20_CLK_CDEV1] = clk;
/* cdev2 */
- clk = clk_register_fixed_rate(NULL, "cdev2_fixed", NULL, 0, 26000000);
- clk = tegra_clk_register_periph_gate("cdev2", "cdev2_fixed", 0,
+ clk = tegra_clk_register_periph_gate("cdev2", "cdev2_mux", 0,
clk_base, 0, 93, periph_clk_enb_refcnt);
clks[TEGRA20_CLK_CDEV2] = clk;
@@ -1077,6 +1089,36 @@ static const struct of_device_id pmc_match[] __initconst = {
{ },
};
+static struct clk *tegra20_clk_src_onecell_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct clk_hw *parent_hw;
+ struct clk_hw *hw;
+ struct clk *clk;
+
+ clk = of_clk_src_onecell_get(clkspec, data);
+ if (IS_ERR(clk))
+ return clk;
+
+ /*
+ * Tegra20 CDEV1 and CDEV2 clocks are a bit special case, their parent
+ * clock is created by the pinctrl driver. It is possible for clk user
+ * to request these clocks before pinctrl driver got probed and hence
+ * user will get an orphaned clock. That might be undesirable because
+ * user may expect parent clock to be enabled by the child.
+ */
+ if (clkspec->args[0] == TEGRA20_CLK_CDEV1 ||
+ clkspec->args[0] == TEGRA20_CLK_CDEV2) {
+ hw = __clk_get_hw(clk);
+
+ parent_hw = clk_hw_get_parent(hw);
+ if (!parent_hw)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return clk;
+}
+
static void __init tegra20_clock_init(struct device_node *np)
{
struct device_node *node;
@@ -1115,7 +1157,7 @@ static void __init tegra20_clock_init(struct device_node *np)
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA20_CLK_CLK_MAX);
- tegra_add_of_provider(np);
+ tegra_add_of_provider(np, tegra20_clk_src_onecell_get);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra_clk_apply_init_table = tegra20_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 9fb5d51ccce4..5435d01c636a 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3567,7 +3567,7 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_init_special_resets(2, tegra210_reset_assert,
tegra210_reset_deassert);
- tegra_add_of_provider(np);
+ tegra_add_of_provider(np, of_clk_src_onecell_get);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra210_mbist_clk_init();
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index b316dfb6f6c7..acfe661b2ae7 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1349,7 +1349,7 @@ static void __init tegra30_clock_init(struct device_node *np)
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
- tegra_add_of_provider(np);
+ tegra_add_of_provider(np, of_clk_src_onecell_get);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra_clk_apply_init_table = tegra30_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index ba923f0d5953..593d76a114f9 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -298,7 +298,8 @@ static struct reset_controller_dev rst_ctlr = {
.of_reset_n_cells = 1,
};
-void __init tegra_add_of_provider(struct device_node *np)
+void __init tegra_add_of_provider(struct device_node *np,
+ void *clk_src_onecell_get)
{
int i;
@@ -314,7 +315,7 @@ void __init tegra_add_of_provider(struct device_node *np)
clk_data.clks = clks;
clk_data.clk_num = clk_num;
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ of_clk_add_provider(np, clk_src_onecell_get, &clk_data);
rst_ctlr.of_node = np;
rst_ctlr.nr_resets = periph_banks * 32 + num_special_reset;
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index ba7e20e6a82b..e1f88463b600 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -763,7 +763,7 @@ struct clk **tegra_clk_init(void __iomem *clk_base, int num, int periph_banks);
struct clk **tegra_lookup_dt_id(int clk_id, struct tegra_clk *tegra_clk);
-void tegra_add_of_provider(struct device_node *np);
+void tegra_add_of_provider(struct device_node *np, void *clk_src_onecell_get);
void tegra_register_devclks(struct tegra_devclk *dev_clks, int num);
void tegra_audio_clk_init(void __iomem *clk_base,
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index ebc78ab2df05..4f5ff9fa11fd 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -51,6 +51,9 @@
#define UNIPHIER_LD11_SYS_CLK_STDMAC(idx) \
UNIPHIER_CLK_GATE("stdmac", (idx), NULL, 0x210c, 8)
+#define UNIPHIER_LD11_SYS_CLK_HSC(idx) \
+ UNIPHIER_CLK_GATE("hsc", (idx), NULL, 0x210c, 9)
+
#define UNIPHIER_PRO4_SYS_CLK_GIO(idx) \
UNIPHIER_CLK_GATE("gio", (idx), NULL, 0x2104, 6)
@@ -182,6 +185,7 @@ const struct uniphier_clk_data uniphier_ld11_sys_clk_data[] = {
/* Index 5 reserved for eMMC PHY */
UNIPHIER_LD11_SYS_CLK_ETHER(6),
UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC, MIO */
+ UNIPHIER_LD11_SYS_CLK_HSC(9),
UNIPHIER_CLK_FACTOR("usb2", -1, "ref", 24, 25),
UNIPHIER_LD11_SYS_CLK_AIO(40),
UNIPHIER_LD11_SYS_CLK_EVEA(41),
@@ -215,6 +219,7 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
UNIPHIER_LD20_SYS_CLK_SD,
UNIPHIER_LD11_SYS_CLK_ETHER(6),
UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC */
+ UNIPHIER_LD11_SYS_CLK_HSC(9),
/* GIO is always clock-enabled: no function for 0x210c bit5 */
/*
* clock for USB Link is enabled by the logic "OR" of bit 14 and bit 15.
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 3ac9dec9a038..e01222ea888f 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -27,6 +27,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_clk.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
@@ -245,7 +246,7 @@ static int __init sp804_of_init(struct device_node *np)
clk1 = NULL;
/* Get the 2nd clock if the timer has 3 timer clocks */
- if (of_count_phandle_with_args(np, "clocks", "#clock-cells") == 3) {
+ if (of_clk_get_parent_count(np) == 3) {
clk2 = of_clk_get(np, 1);
if (IS_ERR(clk2)) {
pr_err("sp804: %s clock not found: %d\n", np->name,
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 1d7bd96511f0..903d9c473749 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -85,6 +85,7 @@ EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
{
struct dax_device *dax_dev;
+ bool dax_enabled = false;
pgoff_t pgoff;
int err, id;
void *kaddr;
@@ -134,14 +135,21 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
* on being able to do (page_address(pfn_to_page())).
*/
WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
+ dax_enabled = true;
} else if (pfn_t_devmap(pfn)) {
- /* pass */;
- } else {
+ struct dev_pagemap *pgmap;
+
+ pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
+ if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX)
+ dax_enabled = true;
+ put_dev_pagemap(pgmap);
+ }
+
+ if (!dax_enabled) {
pr_debug("%s: error: dax support not enabled\n",
bdevname(bdev, buf));
return false;
}
-
return true;
}
EXPORT_SYMBOL_GPL(__bdev_dax_supported);
@@ -182,8 +190,7 @@ static ssize_t write_cache_show(struct device *dev,
if (!dax_dev)
return -ENXIO;
- rc = sprintf(buf, "%d\n", !!test_bit(DAXDEV_WRITE_CACHE,
- &dax_dev->flags));
+ rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev));
put_dax(dax_dev);
return rc;
}
@@ -201,10 +208,8 @@ static ssize_t write_cache_store(struct device *dev,
if (rc)
len = rc;
- else if (write_cache)
- set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
else
- clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
+ dax_write_cache(dax_dev, write_cache);
put_dax(dax_dev);
return len;
@@ -282,11 +287,21 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
}
EXPORT_SYMBOL_GPL(dax_copy_from_iter);
+size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t bytes, struct iov_iter *i)
+{
+ if (!dax_alive(dax_dev))
+ return 0;
+
+ return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+}
+EXPORT_SYMBOL_GPL(dax_copy_to_iter);
+
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
{
- if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
+ if (unlikely(!dax_write_cache_enabled(dax_dev)))
return;
arch_wb_cache_pmem(addr, size);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6d61cd023633..ca1680afa20a 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -151,13 +151,6 @@ config DMA_JZ4780
If you have a board based on such a SoC and wish to use DMA for
devices which can use the DMA controller, say Y or M here.
-config DMA_OMAP
- tristate "OMAP DMA support"
- depends on ARCH_OMAP || COMPILE_TEST
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
-
config DMA_SA11X0
tristate "SA-11x0 DMA support"
depends on ARCH_SA1100 || COMPILE_TEST
@@ -574,28 +567,6 @@ config TIMB_DMA
help
Enable support for the Timberdale FPGA DMA engine.
-config TI_CPPI41
- tristate "CPPI 4.1 DMA support"
- depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX)
- select DMA_ENGINE
- help
- The Communications Port Programming Interface (CPPI) 4.1 DMA engine
- is currently used by the USB driver on AM335x and DA8xx platforms.
-
-config TI_DMA_CROSSBAR
- bool
-
-config TI_EDMA
- bool "TI EDMA support"
- depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE || COMPILE_TEST
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
- default n
- help
- Enable support for the TI EDMA controller. This DMA
- engine is found on TI DaVinci and AM33xx parts.
-
config XGENE_DMA
tristate "APM X-Gene DMA support"
depends on ARCH_XGENE || COMPILE_TEST
@@ -653,6 +624,8 @@ source "drivers/dma/hsu/Kconfig"
source "drivers/dma/sh/Kconfig"
+source "drivers/dma/ti/Kconfig"
+
# clients
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0f62a4d49aab..203a99d68315 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
-obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
@@ -69,13 +68,11 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
-obj-$(CONFIG_TI_CPPI41) += cppi41.o
-obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
-obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o
obj-y += mediatek/
obj-y += qcom/
+obj-y += ti/
obj-y += xilinx/
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index a861b5b4d443..75f38d19fcbe 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -2041,8 +2041,7 @@ static void at_dma_shutdown(struct platform_device *pdev)
static int at_dma_prepare(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct at_dma *atdma = dev_get_drvdata(dev);
struct dma_chan *chan, *_chan;
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
@@ -2076,8 +2075,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan)
static int at_dma_suspend_noirq(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct at_dma *atdma = dev_get_drvdata(dev);
struct dma_chan *chan, *_chan;
/* preserve data */
@@ -2118,8 +2116,7 @@ static void atc_resume_cyclic(struct at_dma_chan *atchan)
static int at_dma_resume_noirq(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct at_dma *atdma = dev_get_drvdata(dev);
struct dma_chan *chan, *_chan;
/* bring back DMA controller */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 94236ec9d410..4bf72561667c 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1833,8 +1833,7 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan)
#ifdef CONFIG_PM
static int atmel_xdmac_prepare(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
+ struct at_xdmac *atxdmac = dev_get_drvdata(dev);
struct dma_chan *chan, *_chan;
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
@@ -1853,8 +1852,7 @@ static int atmel_xdmac_prepare(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int atmel_xdmac_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
+ struct at_xdmac *atxdmac = dev_get_drvdata(dev);
struct dma_chan *chan, *_chan;
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
@@ -1878,8 +1876,7 @@ static int atmel_xdmac_suspend(struct device *dev)
static int atmel_xdmac_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
+ struct at_xdmac *atxdmac = dev_get_drvdata(dev);
struct at_xdmac_chan *atchan;
struct dma_chan *chan, *_chan;
int i;
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 2419fe524daa..15b2453d2647 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -687,7 +687,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_device;
- ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, 0,
+ ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
dev_name(&pdev->dev), dmac);
if (ret)
goto err_unregister_of;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index b9339524d5bd..aa1712beb0cc 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -468,6 +468,8 @@ static int dmatest_func(void *data)
unsigned long long total_len = 0;
u8 align = 0;
bool is_memset = false;
+ dma_addr_t *srcs;
+ dma_addr_t *dma_pq;
set_freezable();
@@ -551,6 +553,14 @@ static int dmatest_func(void *data)
set_user_nice(current, 10);
+ srcs = kcalloc(src_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!srcs)
+ goto err_dstbuf;
+
+ dma_pq = kcalloc(dst_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!dma_pq)
+ goto err_srcs_array;
+
/*
* src and dst buffers are freed by ourselves below
*/
@@ -561,7 +571,6 @@ static int dmatest_func(void *data)
&& !(params->iterations && total_tests >= params->iterations)) {
struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *um;
- dma_addr_t srcs[src_cnt];
dma_addr_t *dsts;
unsigned int src_off, dst_off, len;
@@ -676,8 +685,6 @@ static int dmatest_func(void *data)
srcs, src_cnt,
len, flags);
else if (thread->type == DMA_PQ) {
- dma_addr_t dma_pq[dst_cnt];
-
for (i = 0; i < dst_cnt; i++)
dma_pq[i] = dsts[i] + dst_off;
tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
@@ -779,6 +786,9 @@ static int dmatest_func(void *data)
runtime = ktime_to_us(ktime);
ret = 0;
+ kfree(dma_pq);
+err_srcs_array:
+ kfree(srcs);
err_dstbuf:
for (i = 0; thread->udsts[i]; i++)
kfree(thread->udsts[i]);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index bc31fe802061..f62dd0944908 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -293,8 +293,7 @@ MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
static int dw_suspend_late(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+ struct dw_dma_chip *chip = dev_get_drvdata(dev);
dw_dma_disable(chip);
clk_disable_unprepare(chip->clk);
@@ -304,8 +303,7 @@ static int dw_suspend_late(struct device *dev)
static int dw_resume_early(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+ struct dw_dma_chip *chip = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(chip->clk);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 3eaece888e75..1117b5123a6f 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1328,8 +1328,7 @@ static int fsldma_of_remove(struct platform_device *op)
#ifdef CONFIG_PM
static int fsldma_suspend_late(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct fsldma_device *fdev = platform_get_drvdata(pdev);
+ struct fsldma_device *fdev = dev_get_drvdata(dev);
struct fsldma_chan *chan;
int i;
@@ -1360,8 +1359,7 @@ out:
static int fsldma_resume_early(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct fsldma_device *fdev = platform_get_drvdata(pdev);
+ struct fsldma_device *fdev = dev_get_drvdata(dev);
struct fsldma_chan *chan;
u32 mode;
int i;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 1953e57505f4..e5c911200bdb 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -670,8 +670,7 @@ static int idma64_platform_remove(struct platform_device *pdev)
static int idma64_pm_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct idma64_chip *chip = platform_get_drvdata(pdev);
+ struct idma64_chip *chip = dev_get_drvdata(dev);
idma64_off(chip->idma64);
return 0;
@@ -679,8 +678,7 @@ static int idma64_pm_suspend(struct device *dev)
static int idma64_pm_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct idma64_chip *chip = platform_get_drvdata(pdev);
+ struct idma64_chip *chip = dev_get_drvdata(dev);
idma64_on(chip->idma64);
return 0;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 715b39ae5a46..75b6ff0415ee 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1,19 +1,13 @@
-/*
- * drivers/dma/imx-dma.c
- *
- * This file contains a driver for the Freescale i.MX DMA engine
- * found on i.MX1/21/27
- *
- * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
- * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// drivers/dma/imx-dma.c
+//
+// This file contains a driver for the Freescale i.MX DMA engine
+// found on i.MX1/21/27
+//
+// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+// Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
+
#include <linux/err.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index ccd03c3cedfe..f077992635c2 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1,21 +1,14 @@
-/*
- * drivers/dma/imx-sdma.c
- *
- * This file contains a driver for the Freescale Smart DMA engine
- *
- * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
- *
- * Based on code from Freescale:
- *
- * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// drivers/dma/imx-sdma.c
+//
+// This file contains a driver for the Freescale Smart DMA engine
+//
+// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+//
+// Based on code from Freescale:
+//
+// Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/init.h>
#include <linux/iopoll.h>
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 41d167921fab..ae5182ff0128 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -1,12 +1,8 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * Refer to drivers/dma/imx-sdma.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+//
+// Refer to drivers/dma/imx-sdma.c
#include <linux/init.h>
#include <linux/types.h>
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index de1fd59fe136..6237069001c4 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -27,6 +27,7 @@
#include <linux/of_dma.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
+#include <linux/bug.h>
#include "dmaengine.h"
#define PL330_MAX_CHAN 8
@@ -1094,51 +1095,96 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
return off;
}
-static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run,
- u8 buf[], const struct _xfer_spec *pxs,
- int cyc)
+static u32 _emit_load(unsigned int dry_run, u8 buf[],
+ enum pl330_cond cond, enum dma_transfer_direction direction,
+ u8 peri)
{
int off = 0;
- enum pl330_cond cond;
- if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
- cond = BURST;
- else
- cond = SINGLE;
+ switch (direction) {
+ case DMA_MEM_TO_MEM:
+ /* fall through */
+ case DMA_MEM_TO_DEV:
+ off += _emit_LD(dry_run, &buf[off], cond);
+ break;
- while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
- off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri);
- off += _emit_ST(dry_run, &buf[off], ALWAYS);
+ case DMA_DEV_TO_MEM:
+ if (cond == ALWAYS) {
+ off += _emit_LDP(dry_run, &buf[off], SINGLE,
+ peri);
+ off += _emit_LDP(dry_run, &buf[off], BURST,
+ peri);
+ } else {
+ off += _emit_LDP(dry_run, &buf[off], cond,
+ peri);
+ }
+ break;
- if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
- off += _emit_FLUSHP(dry_run, &buf[off],
- pxs->desc->peri);
+ default:
+ /* this code should be unreachable */
+ WARN_ON(1);
+ break;
}
return off;
}
-static inline int _ldst_memtodev(struct pl330_dmac *pl330,
+static inline u32 _emit_store(unsigned int dry_run, u8 buf[],
+ enum pl330_cond cond, enum dma_transfer_direction direction,
+ u8 peri)
+{
+ int off = 0;
+
+ switch (direction) {
+ case DMA_MEM_TO_MEM:
+ /* fall through */
+ case DMA_DEV_TO_MEM:
+ off += _emit_ST(dry_run, &buf[off], cond);
+ break;
+
+ case DMA_MEM_TO_DEV:
+ if (cond == ALWAYS) {
+ off += _emit_STP(dry_run, &buf[off], SINGLE,
+ peri);
+ off += _emit_STP(dry_run, &buf[off], BURST,
+ peri);
+ } else {
+ off += _emit_STP(dry_run, &buf[off], cond,
+ peri);
+ }
+ break;
+
+ default:
+ /* this code should be unreachable */
+ WARN_ON(1);
+ break;
+ }
+
+ return off;
+}
+
+static inline int _ldst_peripheral(struct pl330_dmac *pl330,
unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs, int cyc)
+ const struct _xfer_spec *pxs, int cyc,
+ enum pl330_cond cond)
{
int off = 0;
- enum pl330_cond cond;
if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
cond = BURST;
- else
- cond = SINGLE;
+ /*
+ * do FLUSHP at beginning to clear any stale dma requests before the
+ * first WFP.
+ */
+ if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+ off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
while (cyc--) {
off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
- off += _emit_LD(dry_run, &buf[off], ALWAYS);
- off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri);
-
- if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
- off += _emit_FLUSHP(dry_run, &buf[off],
- pxs->desc->peri);
+ off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype,
+ pxs->desc->peri);
+ off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype,
+ pxs->desc->peri);
}
return off;
@@ -1148,19 +1194,65 @@ static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs, int cyc)
{
int off = 0;
+ enum pl330_cond cond = BRST_LEN(pxs->ccr) > 1 ? BURST : SINGLE;
switch (pxs->desc->rqtype) {
case DMA_MEM_TO_DEV:
- off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc);
- break;
+ /* fall through */
case DMA_DEV_TO_MEM:
- off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc);
+ off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc,
+ cond);
break;
+
case DMA_MEM_TO_MEM:
off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
break;
+
+ default:
+ /* this code should be unreachable */
+ WARN_ON(1);
+ break;
+ }
+
+ return off;
+}
+
+/*
+ * transfer dregs with single transfers to peripheral, or a reduced size burst
+ * for mem-to-mem.
+ */
+static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int transfer_length)
+{
+ int off = 0;
+ int dregs_ccr;
+
+ if (transfer_length == 0)
+ return off;
+
+ switch (pxs->desc->rqtype) {
+ case DMA_MEM_TO_DEV:
+ /* fall through */
+ case DMA_DEV_TO_MEM:
+ off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs,
+ transfer_length, SINGLE);
+ break;
+
+ case DMA_MEM_TO_MEM:
+ dregs_ccr = pxs->ccr;
+ dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) |
+ (0xf << CC_DSTBRSTLEN_SHFT));
+ dregs_ccr |= (((transfer_length - 1) & 0xf) <<
+ CC_SRCBRSTLEN_SHFT);
+ dregs_ccr |= (((transfer_length - 1) & 0xf) <<
+ CC_DSTBRSTLEN_SHFT);
+ off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
+ off += _ldst_memtomem(dry_run, &buf[off], pxs, 1);
+ break;
+
default:
- off += 0x40000000; /* Scare off the Client */
+ /* this code should be unreachable */
+ WARN_ON(1);
break;
}
@@ -1256,6 +1348,8 @@ static inline int _setup_loops(struct pl330_dmac *pl330,
struct pl330_xfer *x = &pxs->desc->px;
u32 ccr = pxs->ccr;
unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
+ int num_dregs = (x->bytes - BURST_TO_BYTE(bursts, ccr)) /
+ BRST_SIZE(ccr);
int off = 0;
while (bursts) {
@@ -1263,6 +1357,7 @@ static inline int _setup_loops(struct pl330_dmac *pl330,
off += _loop(pl330, dry_run, &buf[off], &c, pxs);
bursts -= c;
}
+ off += _dregs(pl330, dry_run, &buf[off], pxs, num_dregs);
return off;
}
@@ -1294,7 +1389,6 @@ static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
struct _xfer_spec *pxs)
{
struct _pl330_req *req = &thrd->req[index];
- struct pl330_xfer *x;
u8 *buf = req->mc_cpu;
int off = 0;
@@ -1303,11 +1397,6 @@ static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
/* DMAMOV CCR, ccr */
off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
- x = &pxs->desc->px;
- /* Error if xfer length is not aligned at burst size */
- if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
- return -EINVAL;
-
off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
/* DMASEV peripheral/event */
@@ -1365,6 +1454,20 @@ static int pl330_submit_req(struct pl330_thread *thrd,
u32 ccr;
int ret = 0;
+ switch (desc->rqtype) {
+ case DMA_MEM_TO_DEV:
+ break;
+
+ case DMA_DEV_TO_MEM:
+ break;
+
+ case DMA_MEM_TO_MEM:
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
if (pl330->state == DYING
|| pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
@@ -2106,6 +2209,18 @@ static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch,
return true;
}
+static int fixup_burst_len(int max_burst_len, int quirks)
+{
+ if (quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+ return 1;
+ else if (max_burst_len > PL330_MAX_BURST)
+ return PL330_MAX_BURST;
+ else if (max_burst_len < 1)
+ return 1;
+ else
+ return max_burst_len;
+}
+
static int pl330_config(struct dma_chan *chan,
struct dma_slave_config *slave_config)
{
@@ -2117,15 +2232,15 @@ static int pl330_config(struct dma_chan *chan,
pch->fifo_addr = slave_config->dst_addr;
if (slave_config->dst_addr_width)
pch->burst_sz = __ffs(slave_config->dst_addr_width);
- if (slave_config->dst_maxburst)
- pch->burst_len = slave_config->dst_maxburst;
+ pch->burst_len = fixup_burst_len(slave_config->dst_maxburst,
+ pch->dmac->quirks);
} else if (slave_config->direction == DMA_DEV_TO_MEM) {
if (slave_config->src_addr)
pch->fifo_addr = slave_config->src_addr;
if (slave_config->src_addr_width)
pch->burst_sz = __ffs(slave_config->src_addr_width);
- if (slave_config->src_maxburst)
- pch->burst_len = slave_config->src_maxburst;
+ pch->burst_len = fixup_burst_len(slave_config->src_maxburst,
+ pch->dmac->quirks);
}
return 0;
@@ -2519,14 +2634,8 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
burst_len >>= desc->rqcfg.brst_size;
/* src/dst_burst_len can't be more than 16 */
- if (burst_len > 16)
- burst_len = 16;
-
- while (burst_len > 1) {
- if (!(len % (burst_len << desc->rqcfg.brst_size)))
- break;
- burst_len--;
- }
+ if (burst_len > PL330_MAX_BURST)
+ burst_len = PL330_MAX_BURST;
return burst_len;
}
@@ -2598,7 +2707,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
desc->rqtype = direction;
desc->rqcfg.brst_size = pch->burst_sz;
- desc->rqcfg.brst_len = 1;
+ desc->rqcfg.brst_len = pch->burst_len;
desc->bytes_requested = period_len;
fill_px(&desc->px, dst, src, period_len);
@@ -2743,7 +2852,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->rqcfg.brst_size = pch->burst_sz;
- desc->rqcfg.brst_len = 1;
+ desc->rqcfg.brst_len = pch->burst_len;
desc->rqtype = direction;
desc->bytes_requested = sg_dma_len(sg);
}
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 4a828c18099a..1617715aa6e0 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -451,6 +451,7 @@ static void bam_reset_channel(struct bam_chan *bchan)
/**
* bam_chan_init_hw - Initialize channel hardware
* @bchan: bam channel
+ * @dir: DMA transfer direction
*
* This function resets and initializes the BAM channel
*/
@@ -673,7 +674,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
remainder = 0;
}
- async_desc->length += desc->size;
+ async_desc->length += le16_to_cpu(desc->size);
desc++;
} while (remainder > 0);
}
@@ -687,7 +688,7 @@ err_out:
/**
* bam_dma_terminate_all - terminate all transactions on a channel
- * @bchan: bam dma channel
+ * @chan: bam dma channel
*
* Dequeues and frees all transactions
* No callbacks are done
@@ -918,7 +919,8 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
continue;
for (i = 0; i < async_desc->num_desc; i++)
- residue += async_desc->curr_desc[i].size;
+ residue += le16_to_cpu(
+ async_desc->curr_desc[i].size);
}
}
@@ -958,7 +960,7 @@ static void bam_apply_new_config(struct bam_chan *bchan,
/**
* bam_start_dma - start next transaction
- * @bchan - bam dma channel
+ * @bchan: bam dma channel
*/
static void bam_start_dma(struct bam_chan *bchan)
{
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 963cc5228d05..43d4b00b8138 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -616,8 +616,7 @@ static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
static ssize_t hidma_show_values(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hidma_dev *mdev = platform_get_drvdata(pdev);
+ struct hidma_dev *mdev = dev_get_drvdata(dev);
buf[0] = 0;
diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c
index d61f1068a34b..cbb89eafd844 100644
--- a/drivers/dma/qcom/hidma_mgmt_sys.c
+++ b/drivers/dma/qcom/hidma_mgmt_sys.c
@@ -107,8 +107,7 @@ static struct hidma_mgmt_fileinfo hidma_mgmt_files[] = {
static ssize_t show_values(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+ struct hidma_mgmt_dev *mdev = dev_get_drvdata(dev);
unsigned int i;
buf[0] = 0;
@@ -125,8 +124,7 @@ static ssize_t show_values(struct device *dev, struct device_attribute *attr,
static ssize_t set_values(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+ struct hidma_mgmt_dev *mdev = dev_get_drvdata(dev);
unsigned long tmp;
unsigned int i;
int rc;
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index c94ffab0d25c..04a74e0a95b7 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -443,7 +443,6 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
return ret;
}
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
static irqreturn_t sh_dmae_err(int irq, void *data)
{
struct sh_dmae_device *shdev = data;
@@ -454,7 +453,6 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
sh_dmae_reset(shdev);
return IRQ_HANDLED;
}
-#endif
static bool sh_dmae_desc_completed(struct shdma_chan *schan,
struct shdma_desc *sdesc)
@@ -686,11 +684,8 @@ static int sh_dmae_probe(struct platform_device *pdev)
const struct sh_dmae_pdata *pdata;
unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
int chan_irq[SH_DMAE_MAX_CHANNELS];
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
unsigned long irqflags = 0;
- int errirq;
-#endif
- int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
+ int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
struct sh_dmae_device *shdev;
struct dma_device *dma_dev;
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
@@ -792,33 +787,32 @@ static int sh_dmae_probe(struct platform_device *pdev)
if (err)
goto rst_err;
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
- chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (IS_ENABLED(CONFIG_CPU_SH4) || IS_ENABLED(CONFIG_ARCH_RENESAS)) {
+ chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- if (!chanirq_res)
- chanirq_res = errirq_res;
- else
- irqres++;
+ if (!chanirq_res)
+ chanirq_res = errirq_res;
+ else
+ irqres++;
- if (chanirq_res == errirq_res ||
- (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
- irqflags = IRQF_SHARED;
+ if (chanirq_res == errirq_res ||
+ (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
+ irqflags = IRQF_SHARED;
- errirq = errirq_res->start;
+ errirq = errirq_res->start;
- err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
- "DMAC Address Error", shdev);
- if (err) {
- dev_err(&pdev->dev,
- "DMA failed requesting irq #%d, error %d\n",
- errirq, err);
- goto eirq_err;
+ err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err,
+ irqflags, "DMAC Address Error", shdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA failed requesting irq #%d, error %d\n",
+ errirq, err);
+ goto eirq_err;
+ }
+ } else {
+ chanirq_res = errirq_res;
}
-#else
- chanirq_res = errirq_res;
-#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
-
if (chanirq_res->start == chanirq_res->end &&
!platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
/* Special case - all multiplexed */
@@ -884,9 +878,7 @@ edmadevreg:
chan_probe_err:
sh_dmae_chan_remove(shdev);
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
eirq_err:
-#endif
rst_err:
spin_lock_irq(&sh_dmae_lock);
list_del_rcu(&shdev->node);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 52ebccb483be..55df0d41355b 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/sprd-dma.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -116,57 +117,21 @@
#define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
#define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
+/* define the DMA transfer step type */
+#define SPRD_DMA_NONE_STEP 0
+#define SPRD_DMA_BYTE_STEP 1
+#define SPRD_DMA_SHORT_STEP 2
+#define SPRD_DMA_WORD_STEP 4
+#define SPRD_DMA_DWORD_STEP 8
+
#define SPRD_DMA_SOFTWARE_UID 0
-/*
- * enum sprd_dma_req_mode: define the DMA request mode
- * @SPRD_DMA_FRAG_REQ: fragment request mode
- * @SPRD_DMA_BLK_REQ: block request mode
- * @SPRD_DMA_TRANS_REQ: transaction request mode
- * @SPRD_DMA_LIST_REQ: link-list request mode
- *
- * We have 4 types request mode: fragment mode, block mode, transaction mode
- * and linklist mode. One transaction can contain several blocks, one block can
- * contain several fragments. Link-list mode means we can save several DMA
- * configuration into one reserved memory, then DMA can fetch each DMA
- * configuration automatically to start transfer.
- */
-enum sprd_dma_req_mode {
- SPRD_DMA_FRAG_REQ,
- SPRD_DMA_BLK_REQ,
- SPRD_DMA_TRANS_REQ,
- SPRD_DMA_LIST_REQ,
-};
-
-/*
- * enum sprd_dma_int_type: define the DMA interrupt type
- * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
- * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
- * is done.
- * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
- * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
- * or one block request is done.
- * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
- * request is done.
- * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
- * transaction request or fragment request is done.
- * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
- * transaction request or block request is done.
- * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
- * is done.
- * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
- * incorrect.
- */
-enum sprd_dma_int_type {
- SPRD_DMA_NO_INT,
- SPRD_DMA_FRAG_INT,
- SPRD_DMA_BLK_INT,
- SPRD_DMA_BLK_FRAG_INT,
- SPRD_DMA_TRANS_INT,
- SPRD_DMA_TRANS_FRAG_INT,
- SPRD_DMA_TRANS_BLK_INT,
- SPRD_DMA_LIST_INT,
- SPRD_DMA_CFGERR_INT,
+/* dma data width values */
+enum sprd_dma_datawidth {
+ SPRD_DMA_DATAWIDTH_1_BYTE,
+ SPRD_DMA_DATAWIDTH_2_BYTES,
+ SPRD_DMA_DATAWIDTH_4_BYTES,
+ SPRD_DMA_DATAWIDTH_8_BYTES,
};
/* dma channel hardware configuration */
@@ -199,6 +164,7 @@ struct sprd_dma_desc {
struct sprd_dma_chn {
struct virt_dma_chan vc;
void __iomem *chn_base;
+ struct dma_slave_config slave_cfg;
u32 chn_num;
u32 dev_id;
struct sprd_dma_desc *cur_desc;
@@ -587,52 +553,97 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&schan->vc.lock, flags);
}
-static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
- dma_addr_t dest, dma_addr_t src, size_t len)
+static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
+{
+ switch (buswidth) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ return ffs(buswidth) - 1;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
+{
+ switch (buswidth) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ return buswidth;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sprd_dma_fill_desc(struct dma_chan *chan,
+ struct sprd_dma_desc *sdesc,
+ dma_addr_t src, dma_addr_t dst, u32 len,
+ enum dma_transfer_direction dir,
+ unsigned long flags,
+ struct dma_slave_config *slave_cfg)
{
struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
- u32 datawidth, src_step, des_step, fragment_len;
- u32 block_len, req_mode, irq_mode, transcation_len;
- u32 fix_mode = 0, fix_en = 0;
-
- if (IS_ALIGNED(len, 4)) {
- datawidth = 2;
- src_step = 4;
- des_step = 4;
- } else if (IS_ALIGNED(len, 2)) {
- datawidth = 1;
- src_step = 2;
- des_step = 2;
+ u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
+ u32 int_mode = flags & SPRD_DMA_INT_MASK;
+ int src_datawidth, dst_datawidth, src_step, dst_step;
+ u32 temp, fix_mode = 0, fix_en = 0;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
+ if (src_step < 0) {
+ dev_err(sdev->dma_dev.dev, "invalid source step\n");
+ return src_step;
+ }
+ dst_step = SPRD_DMA_NONE_STEP;
} else {
- datawidth = 0;
- src_step = 1;
- des_step = 1;
+ dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width);
+ if (dst_step < 0) {
+ dev_err(sdev->dma_dev.dev, "invalid destination step\n");
+ return dst_step;
+ }
+ src_step = SPRD_DMA_NONE_STEP;
}
- fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE;
- if (len <= SPRD_DMA_BLK_LEN_MASK) {
- block_len = len;
- transcation_len = 0;
- req_mode = SPRD_DMA_BLK_REQ;
- irq_mode = SPRD_DMA_BLK_INT;
- } else {
- block_len = SPRD_DMA_MEMCPY_MIN_SIZE;
- transcation_len = len;
- req_mode = SPRD_DMA_TRANS_REQ;
- irq_mode = SPRD_DMA_TRANS_INT;
+ src_datawidth = sprd_dma_get_datawidth(slave_cfg->src_addr_width);
+ if (src_datawidth < 0) {
+ dev_err(sdev->dma_dev.dev, "invalid source datawidth\n");
+ return src_datawidth;
}
+ dst_datawidth = sprd_dma_get_datawidth(slave_cfg->dst_addr_width);
+ if (dst_datawidth < 0) {
+ dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n");
+ return dst_datawidth;
+ }
+
+ if (slave_cfg->slave_id)
+ schan->dev_id = slave_cfg->slave_id;
+
hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
- hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
- SPRD_DMA_HIGH_ADDR_MASK);
- hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
- SPRD_DMA_HIGH_ADDR_MASK);
- hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
- hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
+ /*
+ * wrap_ptr and wrap_to will save the high 4 bits source address and
+ * destination address.
+ */
+ hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
+ hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
+ hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
+ hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK;
- if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) {
+ /*
+ * If the src step and dst step both are 0 or both are not 0, that means
+ * we can not enable the fix mode. If one is 0 and another one is not,
+ * we can enable the fix mode.
+ */
+ if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) {
fix_en = 0;
} else {
fix_en = 1;
@@ -642,87 +653,119 @@ static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
fix_mode = 0;
}
- hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
- datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
- req_mode << SPRD_DMA_REQ_MODE_OFFSET |
- fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
- fix_en << SPRD_DMA_FIX_EN_OFFSET |
- (fragment_len & SPRD_DMA_FRG_LEN_MASK);
- hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK;
-
- hw->intc = SPRD_DMA_CFG_ERR_INT_EN;
-
- switch (irq_mode) {
- case SPRD_DMA_NO_INT:
- break;
-
- case SPRD_DMA_FRAG_INT:
- hw->intc |= SPRD_DMA_FRAG_INT_EN;
- break;
+ hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN;
- case SPRD_DMA_BLK_INT:
- hw->intc |= SPRD_DMA_BLK_INT_EN;
- break;
+ temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
+ temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
+ temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
+ temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
+ temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
+ temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
+ hw->frg_len = temp;
- case SPRD_DMA_BLK_FRAG_INT:
- hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN;
- break;
+ hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
+ hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
- case SPRD_DMA_TRANS_INT:
- hw->intc |= SPRD_DMA_TRANS_INT_EN;
- break;
+ temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
+ temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
+ hw->trsf_step = temp;
- case SPRD_DMA_TRANS_FRAG_INT:
- hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN;
- break;
+ hw->frg_step = 0;
+ hw->src_blk_step = 0;
+ hw->des_blk_step = 0;
+ return 0;
+}
- case SPRD_DMA_TRANS_BLK_INT:
- hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN;
- break;
+static struct dma_async_tx_descriptor *
+sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct sprd_dma_desc *sdesc;
+ struct sprd_dma_chn_hw *hw;
+ enum sprd_dma_datawidth datawidth;
+ u32 step, temp;
- case SPRD_DMA_LIST_INT:
- hw->intc |= SPRD_DMA_LIST_INT_EN;
- break;
+ sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
+ if (!sdesc)
+ return NULL;
- case SPRD_DMA_CFGERR_INT:
- hw->intc |= SPRD_DMA_CFG_ERR_INT_EN;
- break;
+ hw = &sdesc->chn_hw;
- default:
- dev_err(sdev->dma_dev.dev, "invalid irq mode\n");
- return -EINVAL;
+ hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
+ hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
+ hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
+ hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK;
+ hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+ SPRD_DMA_HIGH_ADDR_MASK;
+ hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+ SPRD_DMA_HIGH_ADDR_MASK;
+
+ if (IS_ALIGNED(len, 8)) {
+ datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
+ step = SPRD_DMA_DWORD_STEP;
+ } else if (IS_ALIGNED(len, 4)) {
+ datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
+ step = SPRD_DMA_WORD_STEP;
+ } else if (IS_ALIGNED(len, 2)) {
+ datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
+ step = SPRD_DMA_SHORT_STEP;
+ } else {
+ datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
+ step = SPRD_DMA_BYTE_STEP;
}
- if (transcation_len == 0)
- hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK;
- else
- hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK;
+ temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
+ temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
+ temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET;
+ temp |= len & SPRD_DMA_FRG_LEN_MASK;
+ hw->frg_len = temp;
- hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) <<
- SPRD_DMA_DEST_TRSF_STEP_OFFSET |
- (src_step & SPRD_DMA_TRSF_STEP_MASK) <<
- SPRD_DMA_SRC_TRSF_STEP_OFFSET;
+ hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
+ hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
- hw->frg_step = 0;
- hw->src_blk_step = 0;
- hw->des_blk_step = 0;
- hw->src_blk_step = 0;
- return 0;
+ temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
+ temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
+ hw->trsf_step = temp;
+
+ return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
}
static struct dma_async_tx_descriptor *
-sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
- size_t len, unsigned long flags)
+sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
{
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct dma_slave_config *slave_cfg = &schan->slave_cfg;
+ dma_addr_t src = 0, dst = 0;
struct sprd_dma_desc *sdesc;
- int ret;
+ struct scatterlist *sg;
+ u32 len = 0;
+ int ret, i;
+
+ /* TODO: now we only support one sg for each DMA configuration. */
+ if (!is_slave_direction(dir) || sglen > 1)
+ return NULL;
sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
if (!sdesc)
return NULL;
- ret = sprd_dma_config(chan, sdesc, dest, src, len);
+ for_each_sg(sgl, sg, sglen, i) {
+ len = sg_dma_len(sg);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = sg_dma_address(sg);
+ dst = slave_cfg->dst_addr;
+ } else {
+ src = slave_cfg->src_addr;
+ dst = sg_dma_address(sg);
+ }
+ }
+
+ ret = sprd_dma_fill_desc(chan, sdesc, src, dst, len, dir, flags,
+ slave_cfg);
if (ret) {
kfree(sdesc);
return NULL;
@@ -731,6 +774,19 @@ sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
}
+static int sprd_dma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct dma_slave_config *slave_cfg = &schan->slave_cfg;
+
+ if (!is_slave_direction(config->direction))
+ return -EINVAL;
+
+ memcpy(slave_cfg, config, sizeof(*config));
+ return 0;
+}
+
static int sprd_dma_pause(struct dma_chan *chan)
{
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
@@ -842,10 +898,9 @@ static int sprd_dma_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdev->glb_base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!sdev->glb_base)
- return -ENOMEM;
+ sdev->glb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sdev->glb_base))
+ return PTR_ERR(sdev->glb_base);
dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
sdev->total_chns = chn_count;
@@ -858,6 +913,8 @@ static int sprd_dma_probe(struct platform_device *pdev)
sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
+ sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
+ sdev->dma_dev.device_config = sprd_dma_slave_config;
sdev->dma_dev.device_pause = sprd_dma_pause;
sdev->dma_dev.device_resume = sprd_dma_resume;
sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index c2b089af0420..1bc149af990e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2889,8 +2889,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
#ifdef CONFIG_PM_SLEEP
static int dma40_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct d40_base *base = platform_get_drvdata(pdev);
+ struct d40_base *base = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_suspend(dev);
@@ -2904,8 +2903,7 @@ static int dma40_suspend(struct device *dev)
static int dma40_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct d40_base *base = platform_get_drvdata(pdev);
+ struct d40_base *base = dev_get_drvdata(dev);
int ret = 0;
if (base->lcpa_regulator) {
@@ -2970,8 +2968,7 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
static int dma40_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct d40_base *base = platform_get_drvdata(pdev);
+ struct d40_base *base = dev_get_drvdata(dev);
d40_save_restore_registers(base, true);
@@ -2985,8 +2982,7 @@ static int dma40_runtime_suspend(struct device *dev)
static int dma40_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct d40_base *base = platform_get_drvdata(pdev);
+ struct d40_base *base = dev_get_drvdata(dev);
d40_save_restore_registers(base, false);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index daa1602eb9f5..9dc450b7ace6 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -252,13 +252,17 @@ struct stm32_mdma_hwdesc {
u32 cmdr;
} __aligned(64);
+struct stm32_mdma_desc_node {
+ struct stm32_mdma_hwdesc *hwdesc;
+ dma_addr_t hwdesc_phys;
+};
+
struct stm32_mdma_desc {
struct virt_dma_desc vdesc;
u32 ccr;
- struct stm32_mdma_hwdesc *hwdesc;
- dma_addr_t hwdesc_phys;
bool cyclic;
u32 count;
+ struct stm32_mdma_desc_node node[];
};
struct stm32_mdma_chan {
@@ -344,30 +348,42 @@ static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
struct stm32_mdma_chan *chan, u32 count)
{
struct stm32_mdma_desc *desc;
+ int i;
- desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ desc = kzalloc(offsetof(typeof(*desc), node[count]), GFP_NOWAIT);
if (!desc)
return NULL;
- desc->hwdesc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
- &desc->hwdesc_phys);
- if (!desc->hwdesc) {
- dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
- kfree(desc);
- return NULL;
+ for (i = 0; i < count; i++) {
+ desc->node[i].hwdesc =
+ dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
+ &desc->node[i].hwdesc_phys);
+ if (!desc->node[i].hwdesc)
+ goto err;
}
desc->count = count;
return desc;
+
+err:
+ dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
+ while (--i >= 0)
+ dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+ desc->node[i].hwdesc_phys);
+ kfree(desc);
+ return NULL;
}
static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc)
{
struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc);
struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan);
+ int i;
- dma_pool_free(chan->desc_pool, desc->hwdesc, desc->hwdesc_phys);
+ for (i = 0; i < desc->count; i++)
+ dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+ desc->node[i].hwdesc_phys);
kfree(desc);
}
@@ -410,13 +426,10 @@ static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr,
static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst,
enum dma_slave_buswidth width)
{
- u32 best_burst = max_burst;
- u32 burst_len = best_burst * width;
+ u32 best_burst;
- while ((burst_len > 0) && (tlen % burst_len)) {
- best_burst = best_burst >> 1;
- burst_len = best_burst * width;
- }
+ best_burst = min((u32)1 << __ffs(tlen | buf_len),
+ max_burst * width) / width;
return (best_burst > 0) ? best_burst : 1;
}
@@ -669,18 +682,18 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
}
static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan,
- struct stm32_mdma_hwdesc *hwdesc)
+ struct stm32_mdma_desc_node *node)
{
- dev_dbg(chan2dev(chan), "hwdesc: 0x%p\n", hwdesc);
- dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", hwdesc->ctcr);
- dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", hwdesc->cbndtr);
- dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", hwdesc->csar);
- dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", hwdesc->cdar);
- dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", hwdesc->cbrur);
- dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", hwdesc->clar);
- dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", hwdesc->ctbr);
- dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", hwdesc->cmar);
- dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", hwdesc->cmdr);
+ dev_dbg(chan2dev(chan), "hwdesc: %pad\n", &node->hwdesc_phys);
+ dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", node->hwdesc->ctcr);
+ dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", node->hwdesc->cbndtr);
+ dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", node->hwdesc->csar);
+ dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", node->hwdesc->cdar);
+ dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", node->hwdesc->cbrur);
+ dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", node->hwdesc->clar);
+ dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", node->hwdesc->ctbr);
+ dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", node->hwdesc->cmar);
+ dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", node->hwdesc->cmdr);
}
static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
@@ -694,7 +707,7 @@ static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
struct stm32_mdma_hwdesc *hwdesc;
u32 next = count + 1;
- hwdesc = &desc->hwdesc[count];
+ hwdesc = desc->node[count].hwdesc;
hwdesc->ctcr = ctcr;
hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK |
STM32_MDMA_CBNDTR_BRDUM |
@@ -704,19 +717,20 @@ static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
hwdesc->csar = src_addr;
hwdesc->cdar = dst_addr;
hwdesc->cbrur = 0;
- hwdesc->clar = desc->hwdesc_phys + next * sizeof(*hwdesc);
hwdesc->ctbr = ctbr;
hwdesc->cmar = config->mask_addr;
hwdesc->cmdr = config->mask_data;
if (is_last) {
if (is_cyclic)
- hwdesc->clar = desc->hwdesc_phys;
+ hwdesc->clar = desc->node[0].hwdesc_phys;
else
hwdesc->clar = 0;
+ } else {
+ hwdesc->clar = desc->node[next].hwdesc_phys;
}
- stm32_mdma_dump_hwdesc(chan, hwdesc);
+ stm32_mdma_dump_hwdesc(chan, &desc->node[count]);
}
static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
@@ -780,7 +794,7 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
{
struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
struct stm32_mdma_desc *desc;
- int ret;
+ int i, ret;
/*
* Once DMA is in setup cyclic mode the channel we cannot assign this
@@ -806,7 +820,9 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
xfer_setup_err:
- dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys);
+ for (i = 0; i < desc->count; i++)
+ dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+ desc->node[i].hwdesc_phys);
kfree(desc);
return NULL;
}
@@ -895,7 +911,9 @@ stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
xfer_setup_err:
- dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys);
+ for (i = 0; i < desc->count; i++)
+ dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+ desc->node[i].hwdesc_phys);
kfree(desc);
return NULL;
}
@@ -1009,7 +1027,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
ctcr |= STM32_MDMA_CTCR_PKE;
/* Prepare hardware descriptor */
- hwdesc = desc->hwdesc;
+ hwdesc = desc->node[0].hwdesc;
hwdesc->ctcr = ctcr;
hwdesc->cbndtr = cbndtr;
hwdesc->csar = src;
@@ -1020,7 +1038,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
hwdesc->cmar = 0;
hwdesc->cmdr = 0;
- stm32_mdma_dump_hwdesc(chan, hwdesc);
+ stm32_mdma_dump_hwdesc(chan, &desc->node[0]);
} else {
/* Setup a LLI transfer */
ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) |
@@ -1120,7 +1138,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
}
chan->desc = to_stm32_mdma_desc(vdesc);
- hwdesc = chan->desc->hwdesc;
+ hwdesc = chan->desc->node[0].hwdesc;
chan->curr_hwdesc = 0;
stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr);
@@ -1198,7 +1216,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
unsigned long flags;
u32 status, reg;
- hwdesc = &chan->desc->hwdesc[chan->curr_hwdesc];
+ hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
spin_lock_irqsave(&chan->vchan.lock, flags);
@@ -1268,13 +1286,13 @@ static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
u32 curr_hwdesc)
{
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ struct stm32_mdma_hwdesc *hwdesc = desc->node[0].hwdesc;
u32 cbndtr, residue, modulo, burst_size;
int i;
residue = 0;
for (i = curr_hwdesc + 1; i < desc->count; i++) {
- struct stm32_mdma_hwdesc *hwdesc = &desc->hwdesc[i];
-
+ hwdesc = desc->node[i].hwdesc;
residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
}
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
@@ -1503,7 +1521,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
c = dma_get_any_slave_channel(&dmadev->ddev);
if (!c) {
- dev_err(mdma2dev(dmadev), "No more channel avalaible\n");
+ dev_err(mdma2dev(dmadev), "No more channels available\n");
return NULL;
}
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
new file mode 100644
index 000000000000..e5e74e1361dc
--- /dev/null
+++ b/drivers/dma/ti/Kconfig
@@ -0,0 +1,37 @@
+#
+# Texas Instruments DMA drivers
+#
+
+config TI_CPPI41
+ tristate "Texas Instruments CPPI 4.1 DMA support"
+ depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX)
+ select DMA_ENGINE
+ help
+ The Communications Port Programming Interface (CPPI) 4.1 DMA engine
+ is currently used by the USB driver on AM335x and DA8xx platforms.
+
+config TI_EDMA
+ tristate "Texas Instruments EDMA support"
+ depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
+ default y
+ help
+ Enable support for the TI EDMA (Enhanced DMA) controller. This DMA
+ engine is found on TI DaVinci, AM33xx, AM43xx, DRA7xx and Keystone 2
+ parts.
+
+config DMA_OMAP
+ tristate "Texas Instruments sDMA (omap-dma) support"
+ depends on ARCH_OMAP || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
+ default y
+ help
+ Enable support for the TI sDMA (System DMA or DMA4) controller. This
+ DMA engine is found on OMAP and DRA7xx parts.
+
+config TI_DMA_CROSSBAR
+ bool
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
new file mode 100644
index 000000000000..113e59ec9c32
--- /dev/null
+++ b/drivers/dma/ti/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TI_CPPI41) += cppi41.o
+obj-$(CONFIG_TI_EDMA) += edma.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/cppi41.c b/drivers/dma/ti/cppi41.c
index d9bee65a18a4..1497da367710 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -11,7 +11,7 @@
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/pm_runtime.h>
-#include "dmaengine.h"
+#include "../dmaengine.h"
#define DESC_TYPE 27
#define DESC_TYPE_HOST 0x10
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index 9272b173c746..9272b173c746 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
diff --git a/drivers/dma/edma.c b/drivers/dma/ti/edma.c
index 9bc722ca8329..ceabdea40ae0 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -33,8 +33,8 @@
#include <linux/platform_data/edma.h>
-#include "dmaengine.h"
-#include "virt-dma.h"
+#include "../dmaengine.h"
+#include "../virt-dma.h"
/* Offsets matching "struct edmacc_param" */
#define PARM_OPT 0x00
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 9483000fcf79..9b5ca8691f27 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -21,7 +21,7 @@
#include <linux/of_dma.h>
#include <linux/of_device.h>
-#include "virt-dma.h"
+#include "../virt-dma.h"
#define OMAP_SDMA_REQUESTS 127
#define OMAP_SDMA_CHANNELS 32
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 4d8c7b9078fd..eb45af71d3a3 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1244,8 +1244,7 @@ static void txx9dmac_shutdown(struct platform_device *pdev)
static int txx9dmac_suspend_noirq(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
+ struct txx9dmac_dev *ddev = dev_get_drvdata(dev);
txx9dmac_off(ddev);
return 0;
@@ -1253,9 +1252,8 @@ static int txx9dmac_suspend_noirq(struct device *dev)
static int txx9dmac_resume_noirq(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
- struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct txx9dmac_dev *ddev = dev_get_drvdata(dev);
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(dev);
u32 mcr;
mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b960f6f35abd..71c0ab46f216 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -22,6 +22,18 @@ menuconfig GPIOLIB
if GPIOLIB
+config GPIOLIB_FASTPATH_LIMIT
+ int "Maximum number of GPIOs for fast path"
+ range 32 512
+ default 512
+ help
+ This adjusts the point at which certain APIs will switch from
+ using a stack allocated buffer to a dynamically allocated buffer.
+
+ You shouldn't need to change this unless you really need to
+ optimize either stack space or performance. Change this carefully
+ since setting an incorrect value could cause stack corruption.
+
config OF_GPIO
def_bool y
depends on OF
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 31e22c93e844..9c4e07fcb74b 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -188,7 +188,7 @@ static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
size_t i;
- const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
+ static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
const unsigned int gpio_reg_size = 8;
unsigned int bits_offset;
size_t word_index;
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index f35632609379..2c9738adb3a6 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -94,7 +94,7 @@ static int idi_48_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
{
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
size_t i;
- const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
+ static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
const unsigned int gpio_reg_size = 8;
unsigned int bits_offset;
size_t word_index;
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 0475e8ec96d0..49616ec815ee 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -105,27 +105,22 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mmio_74xx_gpio_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
struct mmio_74xx_gpio_priv *priv;
struct resource *res;
void __iomem *dat;
int err;
- of_id = of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
- if (!of_id)
- return -ENODEV;
-
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dat = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dat))
return PTR_ERR(dat);
- priv->flags = (uintptr_t) of_id->data;
-
err = bgpio_init(&priv->gc, &pdev->dev,
DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8),
dat, NULL, NULL, NULL, NULL, 0);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 6f693b7d5220..5e89f1c74a33 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -54,6 +54,8 @@ struct aspeed_gpio {
u8 *offset_timer;
unsigned int timer_users[4];
struct clk *clk;
+
+ u32 *dcache;
};
struct aspeed_gpio_bank {
@@ -231,12 +233,13 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
u32 reg;
addr = bank_val_reg(gpio, bank, GPIO_DATA);
- reg = ioread32(addr);
+ reg = gpio->dcache[GPIO_BANK(offset)];
if (val)
reg |= GPIO_BIT(offset);
else
reg &= ~GPIO_BIT(offset);
+ gpio->dcache[GPIO_BANK(offset)] = reg;
iowrite32(reg, addr);
}
@@ -287,11 +290,10 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
spin_lock_irqsave(&gpio->lock, flags);
+ __aspeed_gpio_set(gc, offset, val);
reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
iowrite32(reg | GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR));
- __aspeed_gpio_set(gc, offset, val);
-
spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
@@ -852,7 +854,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
const struct of_device_id *gpio_id;
struct aspeed_gpio *gpio;
struct resource *res;
- int rc;
+ int rc, i, banks;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
@@ -893,6 +895,20 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.base = -1;
gpio->chip.irq.need_valid_mask = true;
+ /* Allocate a cache of the output registers */
+ banks = gpio->config->nr_gpios >> 5;
+ gpio->dcache = devm_kzalloc(&pdev->dev,
+ sizeof(u32) * banks, GFP_KERNEL);
+ if (!gpio->dcache)
+ return -ENOMEM;
+
+ /* Populate it with initial values read from the HW */
+ for (i = 0; i < banks; i++) {
+ const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
+ gpio->dcache[i] = ioread32(gpio->base + bank->val_regs +
+ GPIO_DATA);
+ }
+
rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
if (rc < 0)
return rc;
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 987126c4c6f6..b574ecff7761 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -610,14 +610,12 @@ done:
return 0;
}
-#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id davinci_gpio_ids[] = {
{ .compatible = "ti,keystone-gpio", keystone_gpio_get_irq_chip},
{ .compatible = "ti,dm6441-gpio", davinci_gpio_get_irq_chip},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, davinci_gpio_ids);
-#endif
static struct platform_driver davinci_gpio_driver = {
.probe = davinci_gpio_probe,
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 226977f78482..7a2de3de6571 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -441,14 +441,19 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
irq_gc->chip_types[1].handler = handle_edge_irq;
if (!pp->irq_shared) {
- irq_set_chained_handler_and_data(pp->irq, dwapb_irq_handler,
- gpio);
+ int i;
+
+ for (i = 0; i < pp->ngpio; i++) {
+ if (pp->irq[i] >= 0)
+ irq_set_chained_handler_and_data(pp->irq[i],
+ dwapb_irq_handler, gpio);
+ }
} else {
/*
* Request a shared IRQ since where MFD would have devices
* using the same irq pin
*/
- err = devm_request_irq(gpio->dev, pp->irq,
+ err = devm_request_irq(gpio->dev, pp->irq[0],
dwapb_irq_handler_mfd,
IRQF_SHARED, "gpio-dwapb-mfd", gpio);
if (err) {
@@ -524,7 +529,7 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
if (pp->idx == 0)
port->gc.set_config = dwapb_gpio_set_config;
- if (pp->irq)
+ if (pp->has_irq)
dwapb_configure_irqs(gpio, port, pp);
err = gpiochip_add_data(&port->gc, port);
@@ -535,7 +540,7 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
port->is_registered = true;
/* Add GPIO-signaled ACPI event support */
- if (pp->irq)
+ if (pp->has_irq)
acpi_gpiochip_request_interrupts(&port->gc);
return err;
@@ -557,7 +562,7 @@ dwapb_gpio_get_pdata(struct device *dev)
struct dwapb_platform_data *pdata;
struct dwapb_port_property *pp;
int nports;
- int i;
+ int i, j;
nports = device_get_child_node_count(dev);
if (nports == 0)
@@ -575,6 +580,8 @@ dwapb_gpio_get_pdata(struct device *dev)
i = 0;
device_for_each_child_node(dev, fwnode) {
+ struct device_node *np = NULL;
+
pp = &pdata->properties[i++];
pp->fwnode = fwnode;
@@ -594,23 +601,35 @@ dwapb_gpio_get_pdata(struct device *dev)
pp->ngpio = 32;
}
+ pp->irq_shared = false;
+ pp->gpio_base = -1;
+
/*
* Only port A can provide interrupts in all configurations of
* the IP.
*/
- if (dev->of_node && pp->idx == 0 &&
- fwnode_property_read_bool(fwnode,
+ if (pp->idx != 0)
+ continue;
+
+ if (dev->of_node && fwnode_property_read_bool(fwnode,
"interrupt-controller")) {
- pp->irq = irq_of_parse_and_map(to_of_node(fwnode), 0);
- if (!pp->irq)
- dev_warn(dev, "no irq for port%d\n", pp->idx);
+ np = to_of_node(fwnode);
}
- if (has_acpi_companion(dev) && pp->idx == 0)
- pp->irq = platform_get_irq(to_platform_device(dev), 0);
+ for (j = 0; j < pp->ngpio; j++) {
+ pp->irq[j] = -ENXIO;
- pp->irq_shared = false;
- pp->gpio_base = -1;
+ if (np)
+ pp->irq[j] = of_irq_get(np, j);
+ else if (has_acpi_companion(dev))
+ pp->irq[j] = platform_get_irq(to_platform_device(dev), j);
+
+ if (pp->irq[j] >= 0)
+ pp->has_irq = true;
+ }
+
+ if (!pp->has_irq)
+ dev_warn(dev, "no irq for port%d\n", pp->idx);
}
return pdata;
@@ -684,13 +703,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
gpio->flags = 0;
if (dev->of_node) {
- const struct of_device_id *of_devid;
-
- of_devid = of_match_device(dwapb_of_match, dev);
- if (of_devid) {
- if (of_devid->data)
- gpio->flags = (uintptr_t)of_devid->data;
- }
+ gpio->flags = (uintptr_t)of_device_get_match_data(dev);
} else if (has_acpi_companion(dev)) {
const struct acpi_device_id *acpi_id;
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index de7dd939c043..e0d6a0a7bc69 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -300,6 +300,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
u32 offset = irqd_to_hwirq(data);
+ int state;
switch (sprd_eic->type) {
case SPRD_EIC_DEBOUNCE:
@@ -310,6 +311,17 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 0);
break;
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ state = sprd_eic_get(chip, offset);
+ if (state)
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IEV, 0);
+ else
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IEV, 1);
+ break;
default:
return -ENOTSUPP;
}
@@ -324,6 +336,17 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 1);
break;
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ state = sprd_eic_get(chip, offset);
+ if (state)
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTPOL, 0);
+ else
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTPOL, 1);
+ break;
default:
return -ENOTSUPP;
}
@@ -405,6 +428,55 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
+static void sprd_eic_toggle_trigger(struct gpio_chip *chip, unsigned int irq,
+ unsigned int offset)
+{
+ struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
+ struct irq_data *data = irq_get_irq_data(irq);
+ u32 trigger = irqd_get_trigger_type(data);
+ int state, post_state;
+
+ /*
+ * The debounce EIC and latch EIC can only support level trigger, so we
+ * can toggle the level trigger to emulate the edge trigger.
+ */
+ if ((sprd_eic->type != SPRD_EIC_DEBOUNCE &&
+ sprd_eic->type != SPRD_EIC_LATCH) ||
+ !(trigger & IRQ_TYPE_EDGE_BOTH))
+ return;
+
+ sprd_eic_irq_mask(data);
+ state = sprd_eic_get(chip, offset);
+
+retry:
+ switch (sprd_eic->type) {
+ case SPRD_EIC_DEBOUNCE:
+ if (state)
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 0);
+ else
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 1);
+ break;
+ case SPRD_EIC_LATCH:
+ if (state)
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 0);
+ else
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 1);
+ break;
+ default:
+ sprd_eic_irq_unmask(data);
+ return;
+ }
+
+ post_state = sprd_eic_get(chip, offset);
+ if (state != post_state) {
+ dev_warn(chip->parent, "EIC level was changed.\n");
+ state = post_state;
+ goto retry;
+ }
+
+ sprd_eic_irq_unmask(data);
+}
+
static int sprd_eic_match_chip_by_type(struct gpio_chip *chip, void *data)
{
enum sprd_eic_type type = *(enum sprd_eic_type *)data;
@@ -448,6 +520,7 @@ static void sprd_eic_handle_one_type(struct gpio_chip *chip)
bank * SPRD_EIC_PER_BANK_NR + n);
generic_handle_irq(girq);
+ sprd_eic_toggle_trigger(chip, girq, n);
}
}
}
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index 1fe2d3418f2f..636952769bc8 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -52,8 +52,6 @@ MODULE_DEVICE_TABLE(of, gef_gpio_ids);
static int __init gef_gpio_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(gef_gpio_ids, &pdev->dev);
struct gpio_chip *gc;
void __iomem *regs;
int ret;
@@ -82,7 +80,7 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
}
gc->base = -1;
- gc->ngpio = (u16)(uintptr_t)of_id->data;
+ gc->ngpio = (u16)(uintptr_t)of_device_get_match_data(&pdev->dev);
gc->of_gpio_n_cells = 2;
gc->of_node = pdev->dev.of_node;
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
index d496cc56c2a2..b56ff2efbf36 100644
--- a/drivers/gpio/gpio-gpio-mm.c
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -177,7 +177,7 @@ static int gpiomm_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
size_t i;
- const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
+ static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
const unsigned int gpio_reg_size = 8;
unsigned int bits_offset;
size_t word_index;
diff --git a/drivers/gpio/gpio-ingenic.c b/drivers/gpio/gpio-ingenic.c
index 15fb2bc796a8..e738e384a5ca 100644
--- a/drivers/gpio/gpio-ingenic.c
+++ b/drivers/gpio/gpio-ingenic.c
@@ -285,8 +285,6 @@ MODULE_DEVICE_TABLE(of, ingenic_gpio_of_match);
static int ingenic_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *of_id = of_match_device(
- ingenic_gpio_of_match, dev);
struct ingenic_gpio_chip *jzgc;
u32 bank;
int err;
@@ -323,7 +321,7 @@ static int ingenic_gpio_probe(struct platform_device *pdev)
jzgc->gc.parent = dev;
jzgc->gc.of_node = dev->of_node;
jzgc->gc.owner = THIS_MODULE;
- jzgc->version = (enum jz_version)of_id->data;
+ jzgc->version = (enum jz_version)of_device_get_match_data(dev);
jzgc->gc.set = ingenic_gpio_set;
jzgc->gc.get = ingenic_gpio_get;
diff --git a/drivers/gpio/gpio-loongson.c b/drivers/gpio/gpio-loongson.c
index 92c4fe7b2677..16cfbe9e72fe 100644
--- a/drivers/gpio/gpio-loongson.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -17,9 +17,11 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
#include <asm/types.h>
#include <loongson.h>
-#include <linux/gpio.h>
#define STLS2F_N_GPIO 4
#define STLS3A_N_GPIO 16
@@ -30,86 +32,108 @@
#define LOONGSON_N_GPIO STLS2F_N_GPIO
#endif
+/*
+ * Offset into the register where we read lines, we write them from offset 0.
+ * This offset is the only thing that stand between us and using
+ * GPIO_GENERIC.
+ */
#define LOONGSON_GPIO_IN_OFFSET 16
static DEFINE_SPINLOCK(gpio_lock);
-static int loongson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+static int loongson_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
- u32 temp;
- u32 mask;
+ u32 val;
spin_lock(&gpio_lock);
- mask = 1 << gpio;
- temp = LOONGSON_GPIOIE;
- temp |= mask;
- LOONGSON_GPIOIE = temp;
+ val = LOONGSON_GPIODATA;
spin_unlock(&gpio_lock);
- return 0;
+ return !!(val & BIT(gpio + LOONGSON_GPIO_IN_OFFSET));
}
-static int loongson_gpio_direction_output(struct gpio_chip *chip,
- unsigned gpio, int level)
+static void loongson_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ u32 val;
+
+ spin_lock(&gpio_lock);
+ val = LOONGSON_GPIODATA;
+ if (value)
+ val |= BIT(gpio);
+ else
+ val &= ~BIT(gpio);
+ LOONGSON_GPIODATA = val;
+ spin_unlock(&gpio_lock);
+}
+
+static int loongson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
u32 temp;
- u32 mask;
- gpio_set_value(gpio, level);
spin_lock(&gpio_lock);
- mask = 1 << gpio;
temp = LOONGSON_GPIOIE;
- temp &= (~mask);
+ temp |= BIT(gpio);
LOONGSON_GPIOIE = temp;
spin_unlock(&gpio_lock);
return 0;
}
-static int loongson_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+static int loongson_gpio_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int level)
{
- u32 val;
- u32 mask;
+ u32 temp;
- mask = 1 << (gpio + LOONGSON_GPIO_IN_OFFSET);
+ loongson_gpio_set_value(chip, gpio, level);
spin_lock(&gpio_lock);
- val = LOONGSON_GPIODATA;
+ temp = LOONGSON_GPIOIE;
+ temp &= ~BIT(gpio);
+ LOONGSON_GPIOIE = temp;
spin_unlock(&gpio_lock);
- return (val & mask) != 0;
+ return 0;
}
-static void loongson_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static int loongson_gpio_probe(struct platform_device *pdev)
{
- u32 val;
- u32 mask;
-
- mask = 1 << gpio;
-
- spin_lock(&gpio_lock);
- val = LOONGSON_GPIODATA;
- if (value)
- val |= mask;
- else
- val &= (~mask);
- LOONGSON_GPIODATA = val;
- spin_unlock(&gpio_lock);
+ struct gpio_chip *gc;
+ struct device *dev = &pdev->dev;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ gc->label = "loongson-gpio-chip";
+ gc->base = 0;
+ gc->ngpio = LOONGSON_N_GPIO;
+ gc->get = loongson_gpio_get_value;
+ gc->set = loongson_gpio_set_value;
+ gc->direction_input = loongson_gpio_direction_input;
+ gc->direction_output = loongson_gpio_direction_output;
+
+ return gpiochip_add_data(gc, NULL);
}
-static struct gpio_chip loongson_chip = {
- .label = "Loongson-gpio-chip",
- .direction_input = loongson_gpio_direction_input,
- .get = loongson_gpio_get_value,
- .direction_output = loongson_gpio_direction_output,
- .set = loongson_gpio_set_value,
- .base = 0,
- .ngpio = LOONGSON_N_GPIO,
- .can_sleep = false,
+static struct platform_driver loongson_gpio_driver = {
+ .driver = {
+ .name = "loongson-gpio",
+ },
+ .probe = loongson_gpio_probe,
};
static int __init loongson_gpio_setup(void)
{
- return gpiochip_add_data(&loongson_chip, NULL);
+ struct platform_device *pdev;
+ int ret;
+
+ ret = platform_driver_register(&loongson_gpio_driver);
+ if (ret) {
+ pr_err("error registering loongson GPIO driver\n");
+ return ret;
+ }
+
+ pdev = platform_device_register_simple("loongson-gpio", -1, NULL, 0);
+ return PTR_ERR_OR_ZERO(pdev);
}
postcore_initcall(loongson_gpio_setup);
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 6dc6725403ec..c3a3b9b7b553 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -12,7 +12,7 @@
#include <linux/bitops.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/mfd/lp3943.h>
#include <linux/module.h>
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index df0ad2cef0d2..801995dd9b26 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -14,7 +14,7 @@
* Based on the TPS65218 driver
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 92b3ae2a6735..aa74cc4d8b14 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -20,9 +20,8 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/module.h>
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 1e557b10d73e..b5b5e500e72c 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -25,7 +25,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 946d09195598..198a36b07773 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -35,7 +35,7 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/spi/max7301.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
/*
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 0f0df7956264..18a5a58d634a 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -24,7 +24,7 @@
#include <linux/mutex.h>
#include <linux/spi/spi.h>
#include <linux/spi/mc33880.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/gpio/gpio-mc9s08dz60.c b/drivers/gpio/gpio-mc9s08dz60.c
index 2fcad5b9cca5..d8d846d2189a 100644
--- a/drivers/gpio/gpio-mc9s08dz60.c
+++ b/drivers/gpio/gpio-mc9s08dz60.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#define GPIO_GROUP_NUM 2
#define GPIO_NUM_PER_GROUP 8
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index b3678bd1c120..e2bee27eb526 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -18,7 +18,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index b1cf76dd84ba..b0754fe69e77 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -11,7 +11,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/io.h>
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 76c2fe91a901..d66b7a768ecd 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* GPIO Testing Device Driver
*
* Copyright (C) 2014 Kamlakant Patel <kamlakant.patel@broadcom.com>
* Copyright (C) 2015-2016 Bamvor Jian Zhang <bamv2005@gmail.com>
* Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/init.h>
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 6cb67595d15f..3b34dbecef99 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -24,7 +24,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/mfd/intel_msic.h>
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 45c65f805fd6..6e02148c208b 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -36,7 +36,8 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -51,8 +52,6 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#include "gpiolib.h"
-
/*
* GPIO unit register offsets.
*/
@@ -608,19 +607,16 @@ static int mvebu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
if (mvpwm->gpiod) {
ret = -EBUSY;
} else {
- desc = gpio_to_desc(mvchip->chip.base + pwm->hwpwm);
- if (!desc) {
- ret = -ENODEV;
+ desc = gpiochip_request_own_desc(&mvchip->chip,
+ pwm->hwpwm, "mvebu-pwm");
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
goto out;
}
- ret = gpiod_request(desc, "mvebu-pwm");
- if (ret)
- goto out;
-
ret = gpiod_direction_output(desc, 0);
if (ret) {
- gpiod_free(desc);
+ gpiochip_free_own_desc(desc);
goto out;
}
@@ -637,7 +633,7 @@ static void mvebu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
unsigned long flags;
spin_lock_irqsave(&mvpwm->lock, flags);
- gpiod_free(mvpwm->gpiod);
+ gpiochip_free_own_desc(mvpwm->gpiod);
mvpwm->gpiod = NULL;
spin_unlock_irqrestore(&mvpwm->lock, flags);
}
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 5245a2fe62ae..2f2829966d4c 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -1,25 +1,13 @@
-/*
- * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * Based on code from Freescale Semiconductor,
- * Authors: Daniel Mack, Juergen Beisert.
- * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
+// SPDX-License-Identifier: GPL-2.0+
+//
+// MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
+// Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+//
+// Based on code from Freescale Semiconductor,
+// Authors: Daniel Mack, Juergen Beisert.
+// Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -30,8 +18,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio/driver.h>
-/* FIXME: for gpio_get_value() replace this with direct register read */
-#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/bug.h>
@@ -62,6 +48,7 @@ struct mxc_gpio_hwdata {
struct mxc_gpio_port {
struct list_head node;
void __iomem *base;
+ struct clk *clk;
int irq;
int irq_high;
struct irq_domain *domain;
@@ -174,7 +161,6 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
struct mxc_gpio_port *port = gc->private;
u32 bit, val;
u32 gpio_idx = d->hwirq;
- u32 gpio = port->gc.base + gpio_idx;
int edge;
void __iomem *reg = port->base;
@@ -190,13 +176,13 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
if (GPIO_EDGE_SEL >= 0) {
edge = GPIO_INT_BOTH_EDGES;
} else {
- val = gpio_get_value(gpio);
+ val = port->gc.get(&port->gc, gpio_idx);
if (val) {
edge = GPIO_INT_LOW_LEV;
- pr_debug("mxc: set GPIO %d to low trigger\n", gpio);
+ pr_debug("mxc: set GPIO %d to low trigger\n", gpio_idx);
} else {
edge = GPIO_INT_HIGH_LEV;
- pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
+ pr_debug("mxc: set GPIO %d to high trigger\n", gpio_idx);
}
port->both_edges |= 1 << gpio_idx;
}
@@ -437,6 +423,17 @@ static int mxc_gpio_probe(struct platform_device *pdev)
if (port->irq < 0)
return port->irq;
+ /* the controller clock is optional */
+ port->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(port->clk))
+ port->clk = NULL;
+
+ err = clk_prepare_enable(port->clk);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return err;
+ }
+
/* disable the interrupt and clear the status */
writel(0, port->base + GPIO_IMR);
writel(~0, port->base + GPIO_ISR);
@@ -505,6 +502,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
out_irqdomain_remove:
irq_domain_remove(port->domain);
out_bgio:
+ clk_disable_unprepare(port->clk);
dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
return err;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 435def22445d..e2831ee70cdc 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -1,24 +1,10 @@
-/*
- * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * Based on code from Freescale,
- * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
+// Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+//
+// Based on code from Freescale,
+// Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/err.h>
#include <linux/init.h>
@@ -290,8 +276,6 @@ MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
static int mxs_gpio_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(mxs_gpio_dt_ids, &pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct device_node *parent;
static void __iomem *base;
@@ -306,7 +290,7 @@ static int mxs_gpio_probe(struct platform_device *pdev)
port->id = of_alias_get_id(np, "gpio");
if (port->id < 0)
return port->id;
- port->devid = (enum mxs_gpio_id) of_id->data;
+ port->devid = (enum mxs_gpio_id)of_device_get_match_data(&pdev->dev);
port->dev = &pdev->dev;
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0)
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 96a8a8cb2729..1b19c88ea7bb 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -9,7 +9,7 @@
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <asm/octeon/octeon.h>
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 35971a341c40..d1afedf4dcbf 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -24,7 +24,7 @@
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/bitops.h>
#include <linux/platform_data/gpio-omap.h>
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 3d818195e351..05b0cd5dcf11 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -18,7 +18,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mfd/palmas.h>
@@ -159,13 +159,9 @@ static int palmas_gpio_probe(struct platform_device *pdev)
struct palmas_platform_data *palmas_pdata;
struct palmas_gpio *palmas_gpio;
int ret;
- const struct of_device_id *match;
const struct palmas_device_data *dev_data;
- match = of_match_device(of_palmas_gpio_match, &pdev->dev);
- if (!match)
- return -ENODEV;
- dev_data = match->data;
+ dev_data = of_device_get_match_data(&pdev->dev);
if (!dev_data)
dev_data = &palmas_dev_data;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d2ead4b1cf61..c55ad157e820 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -12,7 +12,7 @@
*/
#include <linux/acpi.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -25,29 +25,44 @@
#include <asm/unaligned.h>
-#define PCA953X_INPUT 0
-#define PCA953X_OUTPUT 1
-#define PCA953X_INVERT 2
-#define PCA953X_DIRECTION 3
+#define PCA953X_INPUT 0x00
+#define PCA953X_OUTPUT 0x01
+#define PCA953X_INVERT 0x02
+#define PCA953X_DIRECTION 0x03
#define REG_ADDR_AI 0x80
-#define PCA957X_IN 0
-#define PCA957X_INVRT 1
-#define PCA957X_BKEN 2
-#define PCA957X_PUPD 3
-#define PCA957X_CFG 4
-#define PCA957X_OUT 5
-#define PCA957X_MSK 6
-#define PCA957X_INTS 7
-
-#define PCAL953X_IN_LATCH 34
-#define PCAL953X_INT_MASK 37
-#define PCAL953X_INT_STAT 38
+#define PCA957X_IN 0x00
+#define PCA957X_INVRT 0x01
+#define PCA957X_BKEN 0x02
+#define PCA957X_PUPD 0x03
+#define PCA957X_CFG 0x04
+#define PCA957X_OUT 0x05
+#define PCA957X_MSK 0x06
+#define PCA957X_INTS 0x07
+
+#define PCAL953X_OUT_STRENGTH 0x20
+#define PCAL953X_IN_LATCH 0x22
+#define PCAL953X_PULL_EN 0x23
+#define PCAL953X_PULL_SEL 0x24
+#define PCAL953X_INT_MASK 0x25
+#define PCAL953X_INT_STAT 0x26
+#define PCAL953X_OUT_CONF 0x27
+
+#define PCAL6524_INT_EDGE 0x28
+#define PCAL6524_INT_CLR 0x2a
+#define PCAL6524_IN_STATUS 0x2b
+#define PCAL6524_OUT_INDCONF 0x2c
+#define PCAL6524_DEBOUNCE 0x2d
#define PCA_GPIO_MASK 0x00FF
+
+#define PCAL_GPIO_MASK 0x1f
+#define PCAL_PINCTRL_MASK 0xe0
+
#define PCA_INT 0x0100
#define PCA_PCAL 0x0200
+#define PCA_LATCH_INT (PCA_PCAL | PCA_INT)
#define PCA953X_TYPE 0x1000
#define PCA957X_TYPE 0x2000
#define PCA_TYPE_MASK 0xF000
@@ -207,9 +222,11 @@ static int pca957x_write_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
static int pca953x_write_regs_24(struct pca953x_chip *chip, int reg, u8 *val)
{
int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+ int addr = (reg & PCAL_GPIO_MASK) << bank_shift;
+ int pinctrl = (reg & PCAL_PINCTRL_MASK) << 1;
return i2c_smbus_write_i2c_block_data(chip->client,
- (reg << bank_shift) | REG_ADDR_AI,
+ pinctrl | addr | REG_ADDR_AI,
NBANK(chip), val);
}
@@ -249,9 +266,11 @@ static int pca953x_read_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
static int pca953x_read_regs_24(struct pca953x_chip *chip, int reg, u8 *val)
{
int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+ int addr = (reg & PCAL_GPIO_MASK) << bank_shift;
+ int pinctrl = (reg & PCAL_PINCTRL_MASK) << 1;
return i2c_smbus_read_i2c_block_data(chip->client,
- (reg << bank_shift) | REG_ADDR_AI,
+ pinctrl | addr | REG_ADDR_AI,
NBANK(chip), val);
}
@@ -522,6 +541,15 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static void pca953x_irq_shutdown(struct irq_data *d)
+{
+ struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
+ u8 mask = 1 << (d->hwirq % BANK_SZ);
+
+ chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
+ chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask;
+}
+
static struct irq_chip pca953x_irq_chip = {
.name = "pca953x",
.irq_mask = pca953x_irq_mask,
@@ -529,6 +557,7 @@ static struct irq_chip pca953x_irq_chip = {
.irq_bus_lock = pca953x_irq_bus_lock,
.irq_bus_sync_unlock = pca953x_irq_bus_sync_unlock,
.irq_set_type = pca953x_irq_set_type,
+ .irq_shutdown = pca953x_irq_shutdown,
};
static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
@@ -810,13 +839,11 @@ static int pca953x_probe(struct i2c_client *client,
chip->driver_data = i2c_id->driver_data;
} else {
const struct acpi_device_id *acpi_id;
- const struct of_device_id *match;
+ struct device *dev = &client->dev;
- match = of_match_device(pca953x_dt_ids, &client->dev);
- if (match) {
- chip->driver_data = (int)(uintptr_t)match->data;
- } else {
- acpi_id = acpi_match_device(pca953x_acpi_ids, &client->dev);
+ chip->driver_data = (uintptr_t)of_device_get_match_data(dev);
+ if (!chip->driver_data) {
+ acpi_id = acpi_match_device(pca953x_acpi_ids, dev);
if (!acpi_id) {
ret = -ENODEV;
goto err_exit;
@@ -936,8 +963,8 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "nxp,pca9575", .data = OF_957X(16, PCA_INT), },
{ .compatible = "nxp,pca9698", .data = OF_953X(40, 0), },
- { .compatible = "nxp,pcal6524", .data = OF_953X(24, PCA_INT), },
- { .compatible = "nxp,pcal9555a", .data = OF_953X(16, PCA_INT), },
+ { .compatible = "nxp,pcal6524", .data = OF_953X(24, PCA_LATCH_INT), },
+ { .compatible = "nxp,pcal9555a", .data = OF_953X(16, PCA_LATCH_INT), },
{ .compatible = "maxim,max7310", .data = OF_953X( 8, 0), },
{ .compatible = "maxim,max7312", .data = OF_953X(16, PCA_INT), },
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 38fbb420c6cd..adf72dda25a2 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/platform_data/pcf857x.h>
#include <linux/interrupt.h>
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 68c6d0c5a6d1..ffce0ab912ed 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -17,7 +17,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index b70974cb9ef1..2afd9de84a0d 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -20,7 +20,7 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/bitops.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/slab.h>
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index 66d68d991162..29e044ff4b17 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -178,6 +178,14 @@ static int sprd_pmic_eic_irq_set_type(struct irq_data *data,
case IRQ_TYPE_LEVEL_LOW:
pmic_eic->reg[REG_IEV] = 0;
break;
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ /*
+ * Will set the trigger level according to current EIC level
+ * in irq_bus_sync_unlock() interface, so here nothing to do.
+ */
+ break;
default:
return -ENOTSUPP;
}
@@ -197,11 +205,22 @@ static void sprd_pmic_eic_bus_sync_unlock(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip);
+ u32 trigger = irqd_get_trigger_type(data);
u32 offset = irqd_to_hwirq(data);
+ int state;
/* Set irq type */
- sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_IEV,
- pmic_eic->reg[REG_IEV]);
+ if (trigger & IRQ_TYPE_EDGE_BOTH) {
+ state = sprd_pmic_eic_get(chip, offset);
+ if (state)
+ sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_IEV, 0);
+ else
+ sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_IEV, 1);
+ } else {
+ sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_IEV,
+ pmic_eic->reg[REG_IEV]);
+ }
+
/* Set irq unmask */
sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_IE,
pmic_eic->reg[REG_IE]);
@@ -212,6 +231,35 @@ static void sprd_pmic_eic_bus_sync_unlock(struct irq_data *data)
mutex_unlock(&pmic_eic->buslock);
}
+static void sprd_pmic_eic_toggle_trigger(struct gpio_chip *chip,
+ unsigned int irq, unsigned int offset)
+{
+ u32 trigger = irq_get_trigger_type(irq);
+ int state, post_state;
+
+ if (!(trigger & IRQ_TYPE_EDGE_BOTH))
+ return;
+
+ state = sprd_pmic_eic_get(chip, offset);
+retry:
+ if (state)
+ sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_IEV, 0);
+ else
+ sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_IEV, 1);
+
+ post_state = sprd_pmic_eic_get(chip, offset);
+ if (state != post_state) {
+ dev_warn(chip->parent, "PMIC EIC level was changed.\n");
+ state = post_state;
+ goto retry;
+ }
+
+ /* Set irq unmask */
+ sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_IE, 1);
+ /* Generate trigger start pulse for debounce EIC */
+ sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_TRIG, 1);
+}
+
static irqreturn_t sprd_pmic_eic_irq_handler(int irq, void *data)
{
struct sprd_pmic_eic *pmic_eic = data;
@@ -233,6 +281,12 @@ static irqreturn_t sprd_pmic_eic_irq_handler(int irq, void *data)
girq = irq_find_mapping(chip->irq.domain, n);
handle_nested_irq(girq);
+
+ /*
+ * The PMIC EIC can only support level trigger, so we can
+ * toggle the level trigger to emulate the edge trigger.
+ */
+ sprd_pmic_eic_toggle_trigger(chip, girq, n);
}
return IRQ_HANDLED;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index f480fb896963..1e66f808051c 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -14,7 +14,7 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/gpio-pxa.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -579,15 +579,9 @@ static int pxa_gpio_probe_dt(struct platform_device *pdev,
struct pxa_gpio_chip *pchip)
{
int nr_gpios;
- const struct of_device_id *of_id =
- of_match_device(pxa_gpio_dt_ids, &pdev->dev);
const struct pxa_gpio_id *gpio_id;
- if (!of_id || !of_id->data) {
- dev_err(&pdev->dev, "Failed to find gpio controller\n");
- return -EFAULT;
- }
- gpio_id = of_id->data;
+ gpio_id = of_device_get_match_data(&pdev->dev);
gpio_type = gpio_id->type;
nr_gpios = gpio_id->gpio_nums;
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index f8d7d1cd8488..8d6a5a7e612d 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -363,13 +363,15 @@ static struct irq_chip stmpe_gpio_irq_chip = {
.irq_set_type = stmpe_gpio_irq_set_type,
};
+#define MAX_GPIOS 24
+
static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
{
struct stmpe_gpio *stmpe_gpio = dev;
struct stmpe *stmpe = stmpe_gpio->stmpe;
u8 statmsbreg;
int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
- u8 status[num_banks];
+ u8 status[DIV_ROUND_UP(MAX_GPIOS, 8)];
int ret;
int i;
@@ -434,6 +436,11 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
struct stmpe_gpio *stmpe_gpio;
int ret, irq;
+ if (stmpe->num_gpios > MAX_GPIOS) {
+ dev_err(&pdev->dev, "Need to increase maximum GPIO number\n");
+ return -EINVAL;
+ }
+
stmpe_gpio = kzalloc(sizeof(*stmpe_gpio), GFP_KERNEL);
if (!stmpe_gpio)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 537cec7583fc..8b0a69c5ba88 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -182,20 +182,15 @@ MODULE_DEVICE_TABLE(of, syscon_gpio_ids);
static int syscon_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *of_id;
struct syscon_gpio_priv *priv;
struct device_node *np = dev->of_node;
int ret;
- of_id = of_match_device(syscon_gpio_ids, dev);
- if (!of_id)
- return -ENODEV;
-
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->data = of_id->data;
+ priv->data = of_device_get_match_data(dev);
if (priv->data->compatible) {
priv->syscon = syscon_regmap_lookup_by_compatible(
@@ -205,6 +200,8 @@ static int syscon_gpio_probe(struct platform_device *pdev)
} else {
priv->syscon =
syscon_regmap_lookup_by_phandle(np, "gpio,syscon-dev");
+ if (IS_ERR(priv->syscon) && np->parent)
+ priv->syscon = syscon_node_to_regmap(np->parent);
if (IS_ERR(priv->syscon))
return PTR_ERR(priv->syscon);
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index 5bd21725e604..1da8d0586329 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -128,15 +128,10 @@ MODULE_DEVICE_TABLE(of, ts4900_gpio_of_match_table);
static int ts4900_gpio_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct of_device_id *match;
struct ts4900_gpio_priv *priv;
u32 ngpio;
int ret;
- match = of_match_device(ts4900_gpio_of_match_table, &client->dev);
- if (!match)
- return -EINVAL;
-
if (of_property_read_u32(client->dev.of_node, "ngpios", &ngpio))
ngpio = DEFAULT_PIN_NUMBER;
@@ -148,7 +143,7 @@ static int ts4900_gpio_probe(struct i2c_client *client,
priv->gpio_chip.label = "ts4900-gpio";
priv->gpio_chip.ngpio = ngpio;
priv->gpio_chip.parent = &client->dev;
- priv->input_bit = (uintptr_t)match->data;
+ priv->input_bit = (uintptr_t)of_device_get_match_data(&client->dev);
priv->regmap = devm_regmap_init_i2c(client, &ts4900_regmap_config);
if (IS_ERR(priv->regmap)) {
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 4610cc2938ad..d4ad6d0e02a2 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -254,8 +254,6 @@ static struct irq_chip vf610_gpio_irq_chip = {
static int vf610_gpio_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id = of_match_device(vf610_gpio_dt_ids,
- &pdev->dev);
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct vf610_gpio_port *port;
@@ -267,7 +265,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
- port->sdata = of_id->data;
+ port->sdata = of_device_get_match_data(dev);
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
port->base = devm_ioremap_resource(dev, iores);
if (IS_ERR(port->base))
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index e74bd43a6974..8e4275eaa7d7 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -322,14 +322,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
return irq;
if (pdev->dev.of_node) {
- const struct of_device_id *of_id;
-
- of_id = of_match_device(xlp_gpio_of_ids, &pdev->dev);
- if (!of_id) {
- dev_err(&pdev->dev, "Unable to match OF ID\n");
- return -ENODEV;
- }
- soc_type = (uintptr_t) of_id->data;
+ soc_type = (uintptr_t)of_device_get_match_data(&pdev->dev);
} else {
const struct acpi_device_id *acpi_id;
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index 8d4c8e99b251..8711a7907568 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -39,6 +39,7 @@
#define XRA_REIR 0x10 /* Input Rising Edge Interrupt Enable */
#define XRA_FEIR 0x12 /* Input Falling Edge Interrupt Enable */
#define XRA_IFR 0x14 /* Input Filter Enable/Disable */
+#define XRA_LAST 0x15 /* Bounds */
struct xra1403 {
struct gpio_chip chip;
@@ -50,7 +51,7 @@ static const struct regmap_config xra1403_regmap_cfg = {
.pad_bits = 1,
.val_bits = 8,
- .max_register = XRA_IFR | 0x01,
+ .max_register = XRA_LAST,
};
static unsigned int to_reg(unsigned int reg, unsigned int offset)
@@ -126,21 +127,16 @@ static void xra1403_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
int reg;
struct xra1403 *xra = gpiochip_get_data(chip);
- int *value;
+ int value[XRA_LAST];
int i;
unsigned int gcr;
unsigned int gsr;
- value = kmalloc_array(xra1403_regmap_cfg.max_register, sizeof(*value),
- GFP_KERNEL);
- if (!value)
- return;
-
seq_puts(s, "xra reg:");
- for (reg = 0; reg <= xra1403_regmap_cfg.max_register; reg++)
+ for (reg = 0; reg <= XRA_LAST; reg++)
seq_printf(s, " %2.2x", reg);
seq_puts(s, "\n value:");
- for (reg = 0; reg < xra1403_regmap_cfg.max_register; reg++) {
+ for (reg = 0; reg < XRA_LAST; reg++) {
regmap_read(xra->regmap, reg, &value[reg]);
seq_printf(s, " %2.2x", value[reg]);
}
@@ -159,7 +155,6 @@ static void xra1403_dbg_show(struct seq_file *s, struct gpio_chip *chip)
(gcr & BIT(i)) ? "in" : "out",
(gsr & BIT(i)) ? "hi" : "lo");
}
- kfree(value);
}
#else
#define xra1403_dbg_show NULL
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 75ee877e5cd5..3f5fcdd5a429 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -665,10 +665,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
static int __maybe_unused zynq_gpio_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- int irq = platform_get_irq(pdev, 0);
- struct irq_data *data = irq_get_irq_data(irq);
- struct zynq_gpio *gpio = platform_get_drvdata(pdev);
+ struct zynq_gpio *gpio = dev_get_drvdata(dev);
+ struct irq_data *data = irq_get_irq_data(gpio->irq);
if (!irqd_is_wakeup_set(data)) {
zynq_gpio_save_context(gpio);
@@ -680,10 +678,8 @@ static int __maybe_unused zynq_gpio_suspend(struct device *dev)
static int __maybe_unused zynq_gpio_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- int irq = platform_get_irq(pdev, 0);
- struct irq_data *data = irq_get_irq_data(irq);
- struct zynq_gpio *gpio = platform_get_drvdata(pdev);
+ struct zynq_gpio *gpio = dev_get_drvdata(dev);
+ struct irq_data *data = irq_get_irq_data(gpio->irq);
int ret;
if (!irqd_is_wakeup_set(data)) {
@@ -831,7 +827,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
chip->free = zynq_gpio_free;
chip->direction_input = zynq_gpio_dir_in;
chip->direction_output = zynq_gpio_dir_out;
- chip->base = -1;
+ chip->base = of_alias_get_id(pdev->dev.of_node, "gpio");
chip->ngpio = gpio->p_data->ngpio;
/* Retrieve GPIO clock */
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 586d15137c03..28d968088131 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -210,11 +210,8 @@ static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *
if (!con_id)
return ERR_PTR(-ENOENT);
- for (i = 0; i < ARRAY_SIZE(whitelist); i++)
- if (!strcmp(con_id, whitelist[i]))
- break;
-
- if (i == ARRAY_SIZE(whitelist))
+ i = match_string(whitelist, ARRAY_SIZE(whitelist), con_id);
+ if (i < 0)
return ERR_PTR(-ENOENT);
desc = of_get_named_gpiod_flags(np, con_id, 0, of_flags);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 55d596f3035e..e11a3bb03820 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -61,6 +61,11 @@ static struct bus_type gpio_bus_type = {
.name = "gpio",
};
+/*
+ * Number of GPIOs to use for the fast path in set array
+ */
+#define FASTPATH_NGPIO CONFIG_GPIOLIB_FASTPATH_LIMIT
+
/* gpio_lock prevents conflicts during gpio_desc[] table updates.
* While any GPIO is requested, its gpio_chip is not removable;
* each GPIO's "requested" flag serves as a lock and refcount.
@@ -71,6 +76,9 @@ static DEFINE_MUTEX(gpio_lookup_lock);
static LIST_HEAD(gpio_lookup_list);
LIST_HEAD(gpio_devices);
+static DEFINE_MUTEX(gpio_machine_hogs_mutex);
+static LIST_HEAD(gpio_machine_hogs);
+
static void gpiochip_free_hogs(struct gpio_chip *chip);
static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key,
@@ -450,12 +458,11 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
vals[i] = !!ghd.values[i];
/* Reuse the array setting function */
- gpiod_set_array_value_complex(false,
+ return gpiod_set_array_value_complex(false,
true,
lh->numdescs,
lh->descs,
vals);
- return 0;
}
return -EINVAL;
}
@@ -1172,6 +1179,41 @@ err_remove_device:
return status;
}
+static void gpiochip_machine_hog(struct gpio_chip *chip, struct gpiod_hog *hog)
+{
+ struct gpio_desc *desc;
+ int rv;
+
+ desc = gpiochip_get_desc(chip, hog->chip_hwnum);
+ if (IS_ERR(desc)) {
+ pr_err("%s: unable to get GPIO desc: %ld\n",
+ __func__, PTR_ERR(desc));
+ return;
+ }
+
+ if (test_bit(FLAG_IS_HOGGED, &desc->flags))
+ return;
+
+ rv = gpiod_hog(desc, hog->line_name, hog->lflags, hog->dflags);
+ if (rv)
+ pr_err("%s: unable to hog GPIO line (%s:%u): %d\n",
+ __func__, chip->label, hog->chip_hwnum, rv);
+}
+
+static void machine_gpiochip_add(struct gpio_chip *chip)
+{
+ struct gpiod_hog *hog;
+
+ mutex_lock(&gpio_machine_hogs_mutex);
+
+ list_for_each_entry(hog, &gpio_machine_hogs, list) {
+ if (!strcmp(chip->label, hog->chip_label))
+ gpiochip_machine_hog(chip, hog);
+ }
+
+ mutex_unlock(&gpio_machine_hogs_mutex);
+}
+
static void gpiochip_setup_devs(void)
{
struct gpio_device *gdev;
@@ -1244,6 +1286,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
goto err_free_descs;
}
+ if (chip->ngpio > FASTPATH_NGPIO)
+ chip_warn(chip, "line cnt %u is greater than fast path cnt %u\n",
+ chip->ngpio, FASTPATH_NGPIO);
+
gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
if (!gdev->label) {
status = -ENOMEM;
@@ -1327,6 +1373,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
acpi_gpiochip_add(chip);
+ machine_gpiochip_add(chip);
+
/*
* By first adding the chardev, and then adding the device,
* we get a device node entry in sysfs under
@@ -2078,6 +2126,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config);
* @pctldev: the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_group: name of the pin group inside the pin controller
+ *
+ * Calling this function directly from a DeviceTree-supported
+ * pinctrl driver is DEPRECATED. Please see Section 2.1 of
+ * Documentation/devicetree/bindings/gpio/gpio.txt on how to
+ * bind pinctrl and gpio drivers via the "gpio-ranges" property.
*/
int gpiochip_add_pingroup_range(struct gpio_chip *chip,
struct pinctrl_dev *pctldev,
@@ -2131,6 +2184,11 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
*
* Returns:
* 0 on success, or a negative error-code on failure.
+ *
+ * Calling this function directly from a DeviceTree-supported
+ * pinctrl driver is DEPRECATED. Please see Section 2.1 of
+ * Documentation/devicetree/bindings/gpio/gpio.txt on how to
+ * bind pinctrl and gpio drivers via the "gpio-ranges" property.
*/
int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
@@ -2719,16 +2777,28 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
while (i < array_size) {
struct gpio_chip *chip = desc_array[i]->gdev->chip;
- unsigned long mask[BITS_TO_LONGS(chip->ngpio)];
- unsigned long bits[BITS_TO_LONGS(chip->ngpio)];
+ unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
+ unsigned long *mask, *bits;
int first, j, ret;
+ if (likely(chip->ngpio <= FASTPATH_NGPIO)) {
+ mask = fastpath;
+ } else {
+ mask = kmalloc_array(2 * BITS_TO_LONGS(chip->ngpio),
+ sizeof(*mask),
+ can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ if (!mask)
+ return -ENOMEM;
+ }
+
+ bits = mask + BITS_TO_LONGS(chip->ngpio);
+ bitmap_zero(mask, chip->ngpio);
+
if (!can_sleep)
WARN_ON(chip->can_sleep);
/* collect all inputs belonging to the same chip */
first = i;
- memset(mask, 0, sizeof(mask));
do {
const struct gpio_desc *desc = desc_array[i];
int hwgpio = gpio_chip_hwgpio(desc);
@@ -2739,8 +2809,11 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
(desc_array[i]->gdev->chip == chip));
ret = gpio_chip_get_multiple(chip, mask, bits);
- if (ret)
+ if (ret) {
+ if (mask != fastpath)
+ kfree(mask);
return ret;
+ }
for (j = first; j < i; j++) {
const struct gpio_desc *desc = desc_array[j];
@@ -2752,6 +2825,9 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
value_array[j] = value;
trace_gpio_value(desc_to_gpio(desc), 1, value);
}
+
+ if (mask != fastpath)
+ kfree(mask);
}
return 0;
}
@@ -2935,7 +3011,7 @@ static void gpio_chip_set_multiple(struct gpio_chip *chip,
}
}
-void gpiod_set_array_value_complex(bool raw, bool can_sleep,
+int gpiod_set_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
@@ -2944,14 +3020,26 @@ void gpiod_set_array_value_complex(bool raw, bool can_sleep,
while (i < array_size) {
struct gpio_chip *chip = desc_array[i]->gdev->chip;
- unsigned long mask[BITS_TO_LONGS(chip->ngpio)];
- unsigned long bits[BITS_TO_LONGS(chip->ngpio)];
+ unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
+ unsigned long *mask, *bits;
int count = 0;
+ if (likely(chip->ngpio <= FASTPATH_NGPIO)) {
+ mask = fastpath;
+ } else {
+ mask = kmalloc_array(2 * BITS_TO_LONGS(chip->ngpio),
+ sizeof(*mask),
+ can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ if (!mask)
+ return -ENOMEM;
+ }
+
+ bits = mask + BITS_TO_LONGS(chip->ngpio);
+ bitmap_zero(mask, chip->ngpio);
+
if (!can_sleep)
WARN_ON(chip->can_sleep);
- memset(mask, 0, sizeof(mask));
do {
struct gpio_desc *desc = desc_array[i];
int hwgpio = gpio_chip_hwgpio(desc);
@@ -2982,7 +3070,11 @@ void gpiod_set_array_value_complex(bool raw, bool can_sleep,
/* push collected bits to outputs */
if (count != 0)
gpio_chip_set_multiple(chip, mask, bits);
+
+ if (mask != fastpath)
+ kfree(mask);
}
+ return 0;
}
/**
@@ -3057,13 +3149,13 @@ EXPORT_SYMBOL_GPL(gpiod_set_value);
* This function should be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
*/
-void gpiod_set_raw_array_value(unsigned int array_size,
+int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array, int *value_array)
{
if (!desc_array)
- return;
- gpiod_set_array_value_complex(true, false, array_size, desc_array,
- value_array);
+ return -EINVAL;
+ return gpiod_set_array_value_complex(true, false, array_size,
+ desc_array, value_array);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value);
@@ -3383,14 +3475,14 @@ EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
*
* This function is to be called from contexts that can sleep.
*/
-void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
might_sleep_if(extra_checks);
if (!desc_array)
- return;
- gpiod_set_array_value_complex(true, true, array_size, desc_array,
+ return -EINVAL;
+ return gpiod_set_array_value_complex(true, true, array_size, desc_array,
value_array);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value_cansleep);
@@ -3463,6 +3555,33 @@ void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
}
EXPORT_SYMBOL_GPL(gpiod_remove_lookup_table);
+/**
+ * gpiod_add_hogs() - register a set of GPIO hogs from machine code
+ * @hogs: table of gpio hog entries with a zeroed sentinel at the end
+ */
+void gpiod_add_hogs(struct gpiod_hog *hogs)
+{
+ struct gpio_chip *chip;
+ struct gpiod_hog *hog;
+
+ mutex_lock(&gpio_machine_hogs_mutex);
+
+ for (hog = &hogs[0]; hog->chip_label; hog++) {
+ list_add_tail(&hog->list, &gpio_machine_hogs);
+
+ /*
+ * The chip may have been registered earlier, so check if it
+ * exists and, if so, try to hog the line now.
+ */
+ chip = find_chip_by_name(hog->chip_label);
+ if (chip)
+ gpiochip_machine_hog(chip, hog);
+ }
+
+ mutex_unlock(&gpio_machine_hogs_mutex);
+}
+EXPORT_SYMBOL_GPL(gpiod_add_hogs);
+
static struct gpiod_lookup_table *gpiod_find_lookup_table(struct device *dev)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index ad456b6f9d8b..1a8e20363861 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -188,7 +188,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
-void gpiod_set_array_value_complex(bool raw, bool can_sleep,
+int gpiod_set_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 3e58ed93d5b1..2a3b8d7972b5 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -17,3 +17,10 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
+
+# 'remote-endpoint' is fixed up at run-time
+DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7791 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7793 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7795 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7796 += -Wno-graph_endpoint
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 0000434a1fbd..a49a10437c40 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -575,6 +575,13 @@ config HID_MAYFLASH
Say Y here if you have HJZ Mayflash PS3 game controller adapters
and want to enable force feedback support.
+config HID_REDRAGON
+ tristate "Redragon keyboards"
+ depends on HID
+ default !EXPERT
+ ---help---
+ Support for Redragon keyboards that need fix-ups to work properly.
+
config HID_MICROSOFT
tristate "Microsoft non-fully HID-compliant devices"
depends on HID
@@ -838,6 +845,15 @@ config HID_SPEEDLINK
---help---
Support for Speedlink Vicious and Divine Cezanne mouse.
+config HID_STEAM
+ tristate "Steam Controller support"
+ depends on HID
+ select POWER_SUPPLY
+ ---help---
+ Say Y here if you have a Steam Controller if you want to use it
+ without running the Steam Client. It supports both the wired and
+ the wireless adaptor.
+
config HID_STEELSERIES
tristate "Steelseries SRW-S1 steering wheel support"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 17a8bd97da9d..511e1cbff768 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -86,6 +86,7 @@ hid-picolcd-$(CONFIG_DEBUG_FS) += hid-picolcd_debugfs.o
obj-$(CONFIG_HID_PLANTRONICS) += hid-plantronics.o
obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
+obj-$(CONFIG_HID_REDRAGON) += hid-redragon.o
obj-$(CONFIG_HID_RETRODE) += hid-retrode.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
@@ -97,6 +98,7 @@ obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
+obj-$(CONFIG_HID_STEAM) += hid-steam.o
obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index b1eeb4839bfc..aec253b44156 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -171,7 +171,7 @@ static int t4_read_write_register(struct hid_device *hdev, u32 address,
int ret;
u16 check_sum;
u8 *input;
- u8 *readbuf;
+ u8 *readbuf = NULL;
input = kzalloc(T4_FEATURE_REPORT_LEN, GFP_KERNEL);
if (!input)
@@ -204,8 +204,8 @@ static int t4_read_write_register(struct hid_device *hdev, u32 address,
goto exit;
}
- readbuf = kzalloc(T4_FEATURE_REPORT_LEN, GFP_KERNEL);
if (read_flag) {
+ readbuf = kzalloc(T4_FEATURE_REPORT_LEN, GFP_KERNEL);
if (!readbuf) {
ret = -ENOMEM;
goto exit;
@@ -219,22 +219,24 @@ static int t4_read_write_register(struct hid_device *hdev, u32 address,
goto exit_readbuf;
}
+ ret = -EINVAL;
+
if (*(u32 *)&readbuf[6] != address) {
dev_err(&hdev->dev, "read register address error (%x,%x)\n",
- *(u32 *)&readbuf[6], address);
+ *(u32 *)&readbuf[6], address);
goto exit_readbuf;
}
if (*(u16 *)&readbuf[10] != 1) {
dev_err(&hdev->dev, "read register size error (%x)\n",
- *(u16 *)&readbuf[10]);
+ *(u16 *)&readbuf[10]);
goto exit_readbuf;
}
check_sum = t4_calc_check_sum(readbuf, 6, 7);
if (*(u16 *)&readbuf[13] != check_sum) {
dev_err(&hdev->dev, "read register checksum error (%x,%x)\n",
- *(u16 *)&readbuf[13], check_sum);
+ *(u16 *)&readbuf[13], check_sum);
goto exit_readbuf;
}
@@ -458,17 +460,35 @@ static int __maybe_unused alps_post_reset(struct hid_device *hdev)
case T4:
ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_1,
NULL, T4_I2C_ABS, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed T4_PRM_FEED_CONFIG_1 (%d)\n",
+ ret);
+ goto exit;
+ }
+
ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_4,
NULL, T4_FEEDCFG4_ADVANCED_ABS_ENABLE, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed T4_PRM_FEED_CONFIG_4 (%d)\n",
+ ret);
+ goto exit;
+ }
break;
case U1:
ret = u1_read_write_register(hdev,
ADDRESS_U1_DEV_CTRL_1, NULL,
U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to change TP mode (%d)\n",
+ ret);
+ goto exit;
+ }
break;
default:
break;
}
+
+exit:
return ret;
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 5d7cc6bbbac6..355dc7e49562 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -57,7 +57,9 @@ MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle
* Register a new report for a device.
*/
-struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id)
+struct hid_report *hid_register_report(struct hid_device *device,
+ unsigned int type, unsigned int id,
+ unsigned int application)
{
struct hid_report_enum *report_enum = device->report_enum + type;
struct hid_report *report;
@@ -78,6 +80,7 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
report->type = type;
report->size = 0;
report->device = device;
+ report->application = application;
report_enum->report_id_hash[id] = report;
list_add_tail(&report->list, &report_enum->report_list);
@@ -221,11 +224,15 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
{
struct hid_report *report;
struct hid_field *field;
- unsigned usages;
- unsigned offset;
- unsigned i;
+ unsigned int usages;
+ unsigned int offset;
+ unsigned int i;
+ unsigned int application;
+
+ application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
- report = hid_register_report(parser->device, report_type, parser->global.report_id);
+ report = hid_register_report(parser->device, report_type,
+ parser->global.report_id, application);
if (!report) {
hid_err(parser->device, "hid_register_report failed\n");
return -1;
@@ -259,7 +266,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
- field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
+ field->application = application;
for (i = 0; i < usages; i++) {
unsigned j = i;
@@ -1798,7 +1805,7 @@ EXPORT_SYMBOL_GPL(hid_hw_stop);
*
* Tell underlying HW to start delivering events from the device.
* This function should be called sometime after successful call
- * to hid_hiw_start().
+ * to hid_hw_start().
*/
int hid_hw_open(struct hid_device *hdev)
{
diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
index c25b4718de44..3b6eccbc2519 100644
--- a/drivers/hid/hid-generic.c
+++ b/drivers/hid/hid-generic.c
@@ -56,6 +56,20 @@ static bool hid_generic_match(struct hid_device *hdev,
return true;
}
+static int hid_generic_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
static const struct hid_device_id hid_table[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, HID_ANY_ID, HID_ANY_ID) },
{ }
@@ -66,6 +80,7 @@ static struct hid_driver hid_generic = {
.name = "hid-generic",
.id_table = hid_table,
.match = hid_generic_match,
+ .probe = hid_generic_probe,
};
module_hid_driver(hid_generic);
diff --git a/drivers/hid/hid-gfrm.c b/drivers/hid/hid-gfrm.c
index 075b1c020846..cf477f8c8f4c 100644
--- a/drivers/hid/hid-gfrm.c
+++ b/drivers/hid/hid-gfrm.c
@@ -116,7 +116,7 @@ static int gfrm_probe(struct hid_device *hdev, const struct hid_device_id *id)
* those reports reach gfrm_raw_event() from hid_input_report().
*/
if (!hid_register_report(hdev, HID_INPUT_REPORT,
- GFRM100_SEARCH_KEY_REPORT_ID)) {
+ GFRM100_SEARCH_KEY_REPORT_ID, 0)) {
ret = -ENOMEM;
goto done;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 46f5ecd11bf7..a85634fe033f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -615,6 +615,7 @@
#define USB_VENDOR_ID_JESS 0x0c45
#define USB_DEVICE_ID_JESS_YUREX 0x1010
#define USB_DEVICE_ID_ASUS_MD_5112 0x5112
+#define USB_DEVICE_ID_REDRAGON_ASURA 0x760b
#define USB_VENDOR_ID_JESS2 0x0f30
#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
@@ -1012,6 +1013,10 @@
#define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403
#define USB_DEVICE_ID_MTP_SITRONIX 0x5001
+#define USB_VENDOR_ID_VALVE 0x28de
+#define USB_DEVICE_ID_STEAM_CONTROLLER 0x1102
+#define USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS 0x1142
+
#define USB_VENDOR_ID_STEELSERIES 0x1038
#define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 930652c25120..ab93dd5927c3 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1110,8 +1110,31 @@ mapped:
set_bit(usage->type, input->evbit);
- while (usage->code <= max && test_and_set_bit(usage->code, bit))
- usage->code = find_next_zero_bit(bit, max + 1, usage->code);
+ /*
+ * This part is *really* controversial:
+ * - HID aims at being generic so we should do our best to export
+ * all incoming events
+ * - HID describes what events are, so there is no reason for ABS_X
+ * to be mapped to ABS_Y
+ * - HID is using *_MISC+N as a default value, but nothing prevents
+ * *_MISC+N to overwrite a legitimate even, which confuses userspace
+ * (for instance ABS_MISC + 7 is ABS_MT_SLOT, which has a different
+ * processing)
+ *
+ * If devices still want to use this (at their own risk), they will
+ * have to use the quirk HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE, but
+ * the default should be a reliable mapping.
+ */
+ while (usage->code <= max && test_and_set_bit(usage->code, bit)) {
+ if (device->quirks & HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE) {
+ usage->code = find_next_zero_bit(bit,
+ max + 1,
+ usage->code);
+ } else {
+ device->status |= HID_STAT_DUP_DETECTED;
+ goto ignore;
+ }
+ }
if (usage->code > max)
goto ignore;
@@ -1487,15 +1510,56 @@ static void report_features(struct hid_device *hid)
}
}
-static struct hid_input *hidinput_allocate(struct hid_device *hid)
+static struct hid_input *hidinput_allocate(struct hid_device *hid,
+ unsigned int application)
{
struct hid_input *hidinput = kzalloc(sizeof(*hidinput), GFP_KERNEL);
struct input_dev *input_dev = input_allocate_device();
- if (!hidinput || !input_dev) {
- kfree(hidinput);
- input_free_device(input_dev);
- hid_err(hid, "Out of memory during hid input probe\n");
- return NULL;
+ const char *suffix = NULL;
+
+ if (!hidinput || !input_dev)
+ goto fail;
+
+ if ((hid->quirks & HID_QUIRK_INPUT_PER_APP) &&
+ hid->maxapplication > 1) {
+ switch (application) {
+ case HID_GD_KEYBOARD:
+ suffix = "Keyboard";
+ break;
+ case HID_GD_KEYPAD:
+ suffix = "Keypad";
+ break;
+ case HID_GD_MOUSE:
+ suffix = "Mouse";
+ break;
+ case HID_DG_STYLUS:
+ suffix = "Pen";
+ break;
+ case HID_DG_TOUCHSCREEN:
+ suffix = "Touchscreen";
+ break;
+ case HID_DG_TOUCHPAD:
+ suffix = "Touchpad";
+ break;
+ case HID_GD_SYSTEM_CONTROL:
+ suffix = "System Control";
+ break;
+ case HID_CP_CONSUMER_CONTROL:
+ suffix = "Consumer Control";
+ break;
+ case HID_GD_WIRELESS_RADIO_CTLS:
+ suffix = "Wireless Radio Control";
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (suffix) {
+ hidinput->name = kasprintf(GFP_KERNEL, "%s %s",
+ hid->name, suffix);
+ if (!hidinput->name)
+ goto fail;
}
input_set_drvdata(input_dev, hid);
@@ -1505,7 +1569,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid)
input_dev->setkeycode = hidinput_setkeycode;
input_dev->getkeycode = hidinput_getkeycode;
- input_dev->name = hid->name;
+ input_dev->name = hidinput->name ? hidinput->name : hid->name;
input_dev->phys = hid->phys;
input_dev->uniq = hid->uniq;
input_dev->id.bustype = hid->bus;
@@ -1513,10 +1577,19 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid)
input_dev->id.product = hid->product;
input_dev->id.version = hid->version;
input_dev->dev.parent = &hid->dev;
+
hidinput->input = input_dev;
list_add_tail(&hidinput->list, &hid->inputs);
+ INIT_LIST_HEAD(&hidinput->reports);
+
return hidinput;
+
+fail:
+ kfree(hidinput);
+ input_free_device(input_dev);
+ hid_err(hid, "Out of memory during hid input probe\n");
+ return NULL;
}
static bool hidinput_has_been_populated(struct hid_input *hidinput)
@@ -1562,6 +1635,7 @@ static void hidinput_cleanup_hidinput(struct hid_device *hid,
list_del(&hidinput->list);
input_free_device(hidinput->input);
+ kfree(hidinput->name);
for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) {
if (k == HID_OUTPUT_REPORT &&
@@ -1594,6 +1668,20 @@ static struct hid_input *hidinput_match(struct hid_report *report)
return NULL;
}
+static struct hid_input *hidinput_match_application(struct hid_report *report)
+{
+ struct hid_device *hid = report->device;
+ struct hid_input *hidinput;
+
+ list_for_each_entry(hidinput, &hid->inputs, list) {
+ if (hidinput->report &&
+ hidinput->report->application == report->application)
+ return hidinput;
+ }
+
+ return NULL;
+}
+
static inline void hidinput_configure_usages(struct hid_input *hidinput,
struct hid_report *report)
{
@@ -1616,11 +1704,14 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
struct hid_driver *drv = hid->driver;
struct hid_report *report;
struct hid_input *next, *hidinput = NULL;
+ unsigned int application;
int i, k;
INIT_LIST_HEAD(&hid->inputs);
INIT_WORK(&hid->led_work, hidinput_led_worker);
+ hid->status &= ~HID_STAT_DUP_DETECTED;
+
if (!force) {
for (i = 0; i < hid->maxcollection; i++) {
struct hid_collection *col = &hid->collection[i];
@@ -1646,15 +1737,20 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
if (!report->maxfield)
continue;
+ application = report->application;
+
/*
* Find the previous hidinput report attached
* to this report id.
*/
if (hid->quirks & HID_QUIRK_MULTI_INPUT)
hidinput = hidinput_match(report);
+ else if (hid->maxapplication > 1 &&
+ (hid->quirks & HID_QUIRK_INPUT_PER_APP))
+ hidinput = hidinput_match_application(report);
if (!hidinput) {
- hidinput = hidinput_allocate(hid);
+ hidinput = hidinput_allocate(hid, application);
if (!hidinput)
goto out_unwind;
}
@@ -1663,6 +1759,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
if (hid->quirks & HID_QUIRK_MULTI_INPUT)
hidinput->report = report;
+
+ list_add_tail(&report->hidinput_list,
+ &hidinput->reports);
}
}
@@ -1687,6 +1786,10 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
goto out_unwind;
}
+ if (hid->status & HID_STAT_DUP_DETECTED)
+ hid_dbg(hid,
+ "Some usages could not be mapped, please use HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE if this is legitimate.\n");
+
return 0;
out_unwind:
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 42ed887ba0be..b454c4386157 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -531,12 +531,12 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
- MOUSE_REPORT_ID);
+ MOUSE_REPORT_ID, 0);
else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
report = hid_register_report(hdev, HID_INPUT_REPORT,
- TRACKPAD_REPORT_ID);
+ TRACKPAD_REPORT_ID, 0);
report = hid_register_report(hdev, HID_INPUT_REPORT,
- DOUBLE_REPORT_ID);
+ DOUBLE_REPORT_ID, 0);
}
if (!report) {
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index dad2fbb0e3f8..45968f7970f8 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -81,6 +81,11 @@ MODULE_LICENSE("GPL");
#define MT_BUTTONTYPE_CLICKPAD 0
+enum latency_mode {
+ HID_LATENCY_NORMAL = 0,
+ HID_LATENCY_HIGH = 1,
+};
+
#define MT_IO_FLAGS_RUNNING 0
#define MT_IO_FLAGS_ACTIVE_SLOTS 1
#define MT_IO_FLAGS_PENDING_SLOTS 2
@@ -127,11 +132,7 @@ struct mt_device {
int left_button_state; /* left button state */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
- __s16 inputmode; /* InputMode HID feature, -1 if non-existent */
- __s16 inputmode_index; /* InputMode HID feature index in the report */
- __s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
- -1 if non-existent */
- __u8 inputmode_value; /* InputMode HID feature value */
+ __u8 inputmode_value; /* InputMode HID feature value */
__u8 num_received; /* how many contacts we received */
__u8 num_expected; /* expected last contact index */
__u8 maxcontacts;
@@ -415,32 +416,9 @@ static void mt_feature_mapping(struct hid_device *hdev,
struct mt_device *td = hid_get_drvdata(hdev);
switch (usage->hid) {
- case HID_DG_INPUTMODE:
- /* Ignore if value index is out of bounds. */
- if (usage->usage_index >= field->report_count) {
- dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n");
- break;
- }
-
- if (td->inputmode < 0) {
- td->inputmode = field->report->id;
- td->inputmode_index = usage->usage_index;
- } else {
- /*
- * Some elan panels wrongly declare 2 input mode
- * features, and silently ignore when we set the
- * value in the second field. Skip the second feature
- * and hope for the best.
- */
- dev_info(&hdev->dev,
- "Ignoring the extra HID_DG_INPUTMODE\n");
- }
-
- break;
case HID_DG_CONTACTMAX:
mt_get_feature(hdev, field->report);
- td->maxcontact_report_id = field->report->id;
td->maxcontacts = field->value[0];
if (!td->maxcontacts &&
field->logical_maximum <= MT_MAX_MAXCONTACT)
@@ -620,13 +598,16 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
hid_map_usage(hi, usage, bit, max,
EV_MSC, MSC_TIMESTAMP);
input_set_capability(hi->input, EV_MSC, MSC_TIMESTAMP);
- mt_store_field(usage, td, hi);
/* Ignore if indexes are out of bounds. */
if (field->index >= field->report->maxfield ||
usage->usage_index >= field->report_count)
return 1;
td->scantime_index = field->index;
td->scantime_val_index = usage->usage_index;
+ /*
+ * We don't set td->last_slot_field as scan time is
+ * global to the report.
+ */
return 1;
case HID_DG_CONTACTCOUNT:
/* Ignore if indexes are out of bounds. */
@@ -1181,61 +1162,100 @@ static void mt_report(struct hid_device *hid, struct hid_report *report)
input_sync(field->hidinput->input);
}
-static void mt_set_input_mode(struct hid_device *hdev)
+static bool mt_need_to_apply_feature(struct hid_device *hdev,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ enum latency_mode latency,
+ bool surface_switch,
+ bool button_switch)
{
struct mt_device *td = hid_get_drvdata(hdev);
- struct hid_report *r;
- struct hid_report_enum *re;
struct mt_class *cls = &td->mtclass;
+ struct hid_report *report = field->report;
+ unsigned int index = usage->usage_index;
char *buf;
u32 report_len;
+ int max;
- if (td->inputmode < 0)
- return;
-
- re = &(hdev->report_enum[HID_FEATURE_REPORT]);
- r = re->report_id_hash[td->inputmode];
- if (r) {
+ switch (usage->hid) {
+ case HID_DG_INPUTMODE:
if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
- report_len = hid_report_len(r);
- buf = hid_alloc_report_buf(r, GFP_KERNEL);
+ report_len = hid_report_len(report);
+ buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf) {
- hid_err(hdev, "failed to allocate buffer for report\n");
- return;
+ hid_err(hdev,
+ "failed to allocate buffer for report\n");
+ return false;
}
- hid_hw_raw_request(hdev, r->id, buf, report_len,
+ hid_hw_raw_request(hdev, report->id, buf, report_len,
HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
kfree(buf);
}
- r->field[0]->value[td->inputmode_index] = td->inputmode_value;
- hid_hw_request(hdev, r, HID_REQ_SET_REPORT);
- }
-}
-static void mt_set_maxcontacts(struct hid_device *hdev)
-{
- struct mt_device *td = hid_get_drvdata(hdev);
- struct hid_report *r;
- struct hid_report_enum *re;
- int fieldmax, max;
+ field->value[index] = td->inputmode_value;
+ return true;
- if (td->maxcontact_report_id < 0)
- return;
+ case HID_DG_CONTACTMAX:
+ if (td->mtclass.maxcontacts) {
+ max = min_t(int, field->logical_maximum,
+ td->mtclass.maxcontacts);
+ if (field->value[index] != max) {
+ field->value[index] = max;
+ return true;
+ }
+ }
+ break;
- if (!td->mtclass.maxcontacts)
- return;
+ case HID_DG_LATENCYMODE:
+ field->value[index] = latency;
+ return true;
+
+ case HID_DG_SURFACESWITCH:
+ field->value[index] = surface_switch;
+ return true;
+
+ case HID_DG_BUTTONSWITCH:
+ field->value[index] = button_switch;
+ return true;
+ }
- re = &hdev->report_enum[HID_FEATURE_REPORT];
- r = re->report_id_hash[td->maxcontact_report_id];
- if (r) {
- max = td->mtclass.maxcontacts;
- fieldmax = r->field[0]->logical_maximum;
- max = min(fieldmax, max);
- if (r->field[0]->value[0] != max) {
- r->field[0]->value[0] = max;
- hid_hw_request(hdev, r, HID_REQ_SET_REPORT);
+ return false; /* no need to update the report */
+}
+
+static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
+ bool surface_switch, bool button_switch)
+{
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
+ struct hid_usage *usage;
+ int i, j;
+ bool update_report;
+
+ rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ update_report = false;
+
+ for (i = 0; i < rep->maxfield; i++) {
+ /* Ignore if report count is out of bounds. */
+ if (rep->field[i]->report_count < 1)
+ continue;
+
+ for (j = 0; j < rep->field[i]->maxusage; j++) {
+ usage = &rep->field[i]->usage[j];
+
+ if (mt_need_to_apply_feature(hdev,
+ rep->field[i],
+ usage,
+ latency,
+ surface_switch,
+ button_switch))
+ update_report = true;
+ }
}
+
+ if (update_report)
+ hid_hw_request(hdev, rep, HID_REQ_SET_REPORT);
}
}
@@ -1274,54 +1294,48 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
struct mt_device *td = hid_get_drvdata(hdev);
char *name;
const char *suffix = NULL;
- struct hid_field *field = hi->report->field[0];
+ unsigned int application = 0;
+ struct hid_report *report;
int ret;
- if (hi->report->id == td->mt_report_id) {
- ret = mt_touch_input_configured(hdev, hi);
- if (ret)
- return ret;
+ list_for_each_entry(report, &hi->reports, hidinput_list) {
+ application = report->application;
+ if (report->id == td->mt_report_id) {
+ ret = mt_touch_input_configured(hdev, hi);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * some egalax touchscreens have "application == DG_TOUCHSCREEN"
+ * for the stylus. Check this first, and then rely on
+ * the application field.
+ */
+ if (report->field[0]->physical == HID_DG_STYLUS) {
+ suffix = "Pen";
+ /* force BTN_STYLUS to allow tablet matching in udev */
+ __set_bit(BTN_STYLUS, hi->input->keybit);
+ }
}
- /*
- * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
- * for the stylus. Check this first, and then rely on the application
- * field.
- */
- if (hi->report->field[0]->physical == HID_DG_STYLUS) {
- suffix = "Pen";
- /* force BTN_STYLUS to allow tablet matching in udev */
- __set_bit(BTN_STYLUS, hi->input->keybit);
- } else {
- switch (field->application) {
+ if (!suffix) {
+ switch (application) {
case HID_GD_KEYBOARD:
- suffix = "Keyboard";
- break;
case HID_GD_KEYPAD:
- suffix = "Keypad";
- break;
case HID_GD_MOUSE:
- suffix = "Mouse";
- break;
- case HID_DG_STYLUS:
- suffix = "Pen";
- /* force BTN_STYLUS to allow tablet matching in udev */
- __set_bit(BTN_STYLUS, hi->input->keybit);
- break;
- case HID_DG_TOUCHSCREEN:
- /* we do not set suffix = "Touchscreen" */
- break;
case HID_DG_TOUCHPAD:
- suffix = "Touchpad";
- break;
case HID_GD_SYSTEM_CONTROL:
- suffix = "System Control";
- break;
case HID_CP_CONSUMER_CONTROL:
- suffix = "Consumer Control";
- break;
case HID_GD_WIRELESS_RADIO_CTLS:
- suffix = "Wireless Radio Control";
+ /* already handled by hid core */
+ break;
+ case HID_DG_TOUCHSCREEN:
+ /* we do not set suffix = "Touchscreen" */
+ hi->input->name = hdev->name;
+ break;
+ case HID_DG_STYLUS:
+ /* force BTN_STYLUS to allow tablet matching in udev */
+ __set_bit(BTN_STYLUS, hi->input->keybit);
break;
case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
suffix = "Custom Media Keys";
@@ -1434,8 +1448,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
td->hdev = hdev;
td->mtclass = *mtclass;
- td->inputmode = -1;
- td->maxcontact_report_id = -1;
td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
td->cc_index = -1;
td->scantime_index = -1;
@@ -1459,10 +1471,10 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
/*
* This allows the driver to handle different input sensors
- * that emits events through different reports on the same HID
+ * that emits events through different applications on the same HID
* device.
*/
- hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
timer_setup(&td->release_timer, mt_expired_timeout, 0);
@@ -1482,8 +1494,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev_warn(&hdev->dev, "Cannot allocate sysfs group for %s\n",
hdev->name);
- mt_set_maxcontacts(hdev);
- mt_set_input_mode(hdev);
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
/* release .fields memory as it is not used anymore */
devm_kfree(&hdev->dev, td->fields);
@@ -1496,8 +1507,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
static int mt_reset_resume(struct hid_device *hdev)
{
mt_release_contacts(hdev);
- mt_set_maxcontacts(hdev);
- mt_set_input_mode(hdev);
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
return 0;
}
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index febb21ee190e..584b10d3fc3d 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -2,7 +2,7 @@
* Plantronics USB HID Driver
*
* Copyright (c) 2014 JD Cole <jd.cole@plantronics.com>
- * Copyright (c) 2015 Terry Junge <terry.junge@plantronics.com>
+ * Copyright (c) 2015-2018 Terry Junge <terry.junge@plantronics.com>
*/
/*
@@ -48,6 +48,10 @@ static int plantronics_input_mapping(struct hid_device *hdev,
unsigned short mapped_key;
unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
+ /* special case for PTT products */
+ if (field->application == HID_GD_JOYSTICK)
+ goto defaulted;
+
/* handle volume up/down mapping */
/* non-standard types or multi-HID interfaces - plt_type is PID */
if (!(plt_type & HID_USAGE_PAGE)) {
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 587e2681a53f..249d49b6b16c 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -416,7 +416,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000) },
#endif
#if IS_ENABLED(CONFIG_HID_LED)
- { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
diff --git a/drivers/hid/hid-redragon.c b/drivers/hid/hid-redragon.c
new file mode 100644
index 000000000000..daf59578bf93
--- /dev/null
+++ b/drivers/hid/hid-redragon.c
@@ -0,0 +1,86 @@
+/*
+ * HID driver for Redragon keyboards
+ *
+ * Copyright (c) 2017 Robert Munteanu
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+
+/*
+ * The Redragon Asura keyboard sends an incorrect HID descriptor.
+ * At byte 100 it contains
+ *
+ * 0x81, 0x00
+ *
+ * which is Input (Data, Arr, Abs), but it should be
+ *
+ * 0x81, 0x02
+ *
+ * which is Input (Data, Var, Abs), which is consistent with the way
+ * key codes are generated.
+ */
+
+static __u8 *redragon_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize >= 102 && rdesc[100] == 0x81 && rdesc[101] == 0x00) {
+ dev_info(&hdev->dev, "Fixing Redragon ASURA report descriptor.\n");
+ rdesc[101] = 0x02;
+ }
+
+ return rdesc;
+}
+
+static int redragon_probe(struct hid_device *dev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(dev);
+ if (ret) {
+ hid_err(dev, "parse failed\n");
+ return ret;
+ }
+
+ /* do not register unused input device */
+ if (dev->maxapplication == 1)
+ return 0;
+
+ ret = hid_hw_start(dev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(dev, "hw start failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+static const struct hid_device_id redragon_devices[] = {
+ {HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_REDRAGON_ASURA)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(hid, redragon_devices);
+
+static struct hid_driver redragon_driver = {
+ .name = "redragon",
+ .id_table = redragon_devices,
+ .report_fixup = redragon_report_fixup,
+ .probe = redragon_probe
+};
+
+module_hid_driver(redragon_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 9c9362149641..9e33165250a3 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -413,6 +413,24 @@ static int rmi_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
+static void rmi_report(struct hid_device *hid, struct hid_report *report)
+{
+ struct hid_field *field = report->field[0];
+
+ if (!(hid->claimed & HID_CLAIMED_INPUT))
+ return;
+
+ switch (report->id) {
+ case RMI_READ_DATA_REPORT_ID:
+ /* fall-through */
+ case RMI_ATTN_REPORT_ID:
+ return;
+ }
+
+ if (field && field->hidinput && field->hidinput->input)
+ input_sync(field->hidinput->input);
+}
+
#ifdef CONFIG_PM
static int rmi_suspend(struct hid_device *hdev, pm_message_t message)
{
@@ -637,6 +655,7 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
hid_set_drvdata(hdev, data);
hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
ret = hid_parse(hdev);
if (ret) {
@@ -744,6 +763,7 @@ static struct hid_driver rmi_driver = {
.remove = rmi_remove,
.event = rmi_event,
.raw_event = rmi_raw_event,
+ .report = rmi_report,
.input_mapping = rmi_input_mapping,
.input_configured = rmi_input_configured,
#ifdef CONFIG_PM
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
new file mode 100644
index 000000000000..cb86cc834201
--- /dev/null
+++ b/drivers/hid/hid-steam.c
@@ -0,0 +1,1115 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HID driver for Valve Steam Controller
+ *
+ * Copyright (c) 2018 Rodrigo Rivas Costa <rodrigorivascosta@gmail.com>
+ *
+ * Supports both the wired and wireless interfaces.
+ *
+ * This controller has a builtin emulation of mouse and keyboard: the right pad
+ * can be used as a mouse, the shoulder buttons are mouse buttons, A and B
+ * buttons are ENTER and ESCAPE, and so on. This is implemented as additional
+ * HID interfaces.
+ *
+ * This is known as the "lizard mode", because apparently lizards like to use
+ * the computer from the coach, without a proper mouse and keyboard.
+ *
+ * This driver will disable the lizard mode when the input device is opened
+ * and re-enable it when the input device is closed, so as not to break user
+ * mode behaviour. The lizard_mode parameter can be used to change that.
+ *
+ * There are a few user space applications (notably Steam Client) that use
+ * the hidraw interface directly to create input devices (XTest, uinput...).
+ * In order to avoid breaking them this driver creates a layered hidraw device,
+ * so it can detect when the client is running and then:
+ * - it will not send any command to the controller.
+ * - this input device will be disabled, to avoid double input of the same
+ * user action.
+ *
+ * For additional functions, such as changing the right-pad margin or switching
+ * the led, you can use the user-space tool at:
+ *
+ * https://github.com/rodrigorc/steamctrl
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/delay.h>
+#include <linux/power_supply.h>
+#include "hid-ids.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rodrigo Rivas Costa <rodrigorivascosta@gmail.com>");
+
+static bool lizard_mode = true;
+
+static DEFINE_MUTEX(steam_devices_lock);
+static LIST_HEAD(steam_devices);
+
+#define STEAM_QUIRK_WIRELESS BIT(0)
+
+/* Touch pads are 40 mm in diameter and 65535 units */
+#define STEAM_PAD_RESOLUTION 1638
+/* Trigger runs are about 5 mm and 256 units */
+#define STEAM_TRIGGER_RESOLUTION 51
+/* Joystick runs are about 5 mm and 256 units */
+#define STEAM_JOYSTICK_RESOLUTION 51
+
+#define STEAM_PAD_FUZZ 256
+
+/*
+ * Commands that can be sent in a feature report.
+ * Thanks to Valve for some valuable hints.
+ */
+#define STEAM_CMD_SET_MAPPINGS 0x80
+#define STEAM_CMD_CLEAR_MAPPINGS 0x81
+#define STEAM_CMD_GET_MAPPINGS 0x82
+#define STEAM_CMD_GET_ATTRIB 0x83
+#define STEAM_CMD_GET_ATTRIB_LABEL 0x84
+#define STEAM_CMD_DEFAULT_MAPPINGS 0x85
+#define STEAM_CMD_FACTORY_RESET 0x86
+#define STEAM_CMD_WRITE_REGISTER 0x87
+#define STEAM_CMD_CLEAR_REGISTER 0x88
+#define STEAM_CMD_READ_REGISTER 0x89
+#define STEAM_CMD_GET_REGISTER_LABEL 0x8a
+#define STEAM_CMD_GET_REGISTER_MAX 0x8b
+#define STEAM_CMD_GET_REGISTER_DEFAULT 0x8c
+#define STEAM_CMD_SET_MODE 0x8d
+#define STEAM_CMD_DEFAULT_MOUSE 0x8e
+#define STEAM_CMD_FORCEFEEDBAK 0x8f
+#define STEAM_CMD_REQUEST_COMM_STATUS 0xb4
+#define STEAM_CMD_GET_SERIAL 0xae
+
+/* Some useful register ids */
+#define STEAM_REG_LPAD_MODE 0x07
+#define STEAM_REG_RPAD_MODE 0x08
+#define STEAM_REG_RPAD_MARGIN 0x18
+#define STEAM_REG_LED 0x2d
+#define STEAM_REG_GYRO_MODE 0x30
+
+/* Raw event identifiers */
+#define STEAM_EV_INPUT_DATA 0x01
+#define STEAM_EV_CONNECT 0x03
+#define STEAM_EV_BATTERY 0x04
+
+/* Values for GYRO_MODE (bitmask) */
+#define STEAM_GYRO_MODE_OFF 0x0000
+#define STEAM_GYRO_MODE_STEERING 0x0001
+#define STEAM_GYRO_MODE_TILT 0x0002
+#define STEAM_GYRO_MODE_SEND_ORIENTATION 0x0004
+#define STEAM_GYRO_MODE_SEND_RAW_ACCEL 0x0008
+#define STEAM_GYRO_MODE_SEND_RAW_GYRO 0x0010
+
+/* Other random constants */
+#define STEAM_SERIAL_LEN 10
+
+struct steam_device {
+ struct list_head list;
+ spinlock_t lock;
+ struct hid_device *hdev, *client_hdev;
+ struct mutex mutex;
+ bool client_opened, input_opened;
+ struct input_dev __rcu *input;
+ unsigned long quirks;
+ struct work_struct work_connect;
+ bool connected;
+ char serial_no[STEAM_SERIAL_LEN + 1];
+ struct power_supply_desc battery_desc;
+ struct power_supply __rcu *battery;
+ u8 battery_charge;
+ u16 voltage;
+};
+
+static int steam_recv_report(struct steam_device *steam,
+ u8 *data, int size)
+{
+ struct hid_report *r;
+ u8 *buf;
+ int ret;
+
+ r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (hid_report_len(r) < 64)
+ return -EINVAL;
+
+ buf = hid_alloc_report_buf(r, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /*
+ * The report ID is always 0, so strip the first byte from the output.
+ * hid_report_len() is not counting the report ID, so +1 to the length
+ * or else we get a EOVERFLOW. We are safe from a buffer overflow
+ * because hid_alloc_report_buf() allocates +7 bytes.
+ */
+ ret = hid_hw_raw_request(steam->hdev, 0x00,
+ buf, hid_report_len(r) + 1,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret > 0)
+ memcpy(data, buf + 1, min(size, ret - 1));
+ kfree(buf);
+ return ret;
+}
+
+static int steam_send_report(struct steam_device *steam,
+ u8 *cmd, int size)
+{
+ struct hid_report *r;
+ u8 *buf;
+ unsigned int retries = 50;
+ int ret;
+
+ r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (hid_report_len(r) < 64)
+ return -EINVAL;
+
+ buf = hid_alloc_report_buf(r, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* The report ID is always 0 */
+ memcpy(buf + 1, cmd, size);
+
+ /*
+ * Sometimes the wireless controller fails with EPIPE
+ * when sending a feature report.
+ * Doing a HID_REQ_GET_REPORT and waiting for a while
+ * seems to fix that.
+ */
+ do {
+ ret = hid_hw_raw_request(steam->hdev, 0,
+ buf, size + 1,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret != -EPIPE)
+ break;
+ msleep(20);
+ } while (--retries);
+
+ kfree(buf);
+ if (ret < 0)
+ hid_err(steam->hdev, "%s: error %d (%*ph)\n", __func__,
+ ret, size, cmd);
+ return ret;
+}
+
+static inline int steam_send_report_byte(struct steam_device *steam, u8 cmd)
+{
+ return steam_send_report(steam, &cmd, 1);
+}
+
+static int steam_write_registers(struct steam_device *steam,
+ /* u8 reg, u16 val */...)
+{
+ /* Send: 0x87 len (reg valLo valHi)* */
+ u8 reg;
+ u16 val;
+ u8 cmd[64] = {STEAM_CMD_WRITE_REGISTER, 0x00};
+ va_list args;
+
+ va_start(args, steam);
+ for (;;) {
+ reg = va_arg(args, int);
+ if (reg == 0)
+ break;
+ val = va_arg(args, int);
+ cmd[cmd[1] + 2] = reg;
+ cmd[cmd[1] + 3] = val & 0xff;
+ cmd[cmd[1] + 4] = val >> 8;
+ cmd[1] += 3;
+ }
+ va_end(args);
+
+ return steam_send_report(steam, cmd, 2 + cmd[1]);
+}
+
+static int steam_get_serial(struct steam_device *steam)
+{
+ /*
+ * Send: 0xae 0x15 0x01
+ * Recv: 0xae 0x15 0x01 serialnumber (10 chars)
+ */
+ int ret;
+ u8 cmd[] = {STEAM_CMD_GET_SERIAL, 0x15, 0x01};
+ u8 reply[3 + STEAM_SERIAL_LEN + 1];
+
+ ret = steam_send_report(steam, cmd, sizeof(cmd));
+ if (ret < 0)
+ return ret;
+ ret = steam_recv_report(steam, reply, sizeof(reply));
+ if (ret < 0)
+ return ret;
+ if (reply[0] != 0xae || reply[1] != 0x15 || reply[2] != 0x01)
+ return -EIO;
+ reply[3 + STEAM_SERIAL_LEN] = 0;
+ strlcpy(steam->serial_no, reply + 3, sizeof(steam->serial_no));
+ return 0;
+}
+
+/*
+ * This command requests the wireless adaptor to post an event
+ * with the connection status. Useful if this driver is loaded when
+ * the controller is already connected.
+ */
+static inline int steam_request_conn_status(struct steam_device *steam)
+{
+ return steam_send_report_byte(steam, STEAM_CMD_REQUEST_COMM_STATUS);
+}
+
+static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
+{
+ if (enable) {
+ /* enable esc, enter, cursors */
+ steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MAPPINGS);
+ /* enable mouse */
+ steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MOUSE);
+ steam_write_registers(steam,
+ STEAM_REG_RPAD_MARGIN, 0x01, /* enable margin */
+ 0);
+ } else {
+ /* disable esc, enter, cursor */
+ steam_send_report_byte(steam, STEAM_CMD_CLEAR_MAPPINGS);
+ steam_write_registers(steam,
+ STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
+ STEAM_REG_RPAD_MARGIN, 0x00, /* disable margin */
+ 0);
+ }
+}
+
+static void steam_update_lizard_mode(struct steam_device *steam)
+{
+ mutex_lock(&steam->mutex);
+ if (!steam->client_opened) {
+ if (steam->input_opened)
+ steam_set_lizard_mode(steam, false);
+ else
+ steam_set_lizard_mode(steam, lizard_mode);
+ }
+ mutex_unlock(&steam->mutex);
+}
+
+static int steam_input_open(struct input_dev *dev)
+{
+ struct steam_device *steam = input_get_drvdata(dev);
+ int ret;
+
+ ret = hid_hw_open(steam->hdev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&steam->mutex);
+ steam->input_opened = true;
+ if (!steam->client_opened && lizard_mode)
+ steam_set_lizard_mode(steam, false);
+ mutex_unlock(&steam->mutex);
+ return 0;
+}
+
+static void steam_input_close(struct input_dev *dev)
+{
+ struct steam_device *steam = input_get_drvdata(dev);
+
+ mutex_lock(&steam->mutex);
+ steam->input_opened = false;
+ if (!steam->client_opened && lizard_mode)
+ steam_set_lizard_mode(steam, true);
+ mutex_unlock(&steam->mutex);
+
+ hid_hw_close(steam->hdev);
+}
+
+static enum power_supply_property steam_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static int steam_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct steam_device *steam = power_supply_get_drvdata(psy);
+ unsigned long flags;
+ s16 volts;
+ u8 batt;
+ int ret = 0;
+
+ spin_lock_irqsave(&steam->lock, flags);
+ volts = steam->voltage;
+ batt = steam->battery_charge;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = volts * 1000; /* mV -> uV */
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = batt;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int steam_battery_register(struct steam_device *steam)
+{
+ struct power_supply *battery;
+ struct power_supply_config battery_cfg = { .drv_data = steam, };
+ unsigned long flags;
+ int ret;
+
+ steam->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY;
+ steam->battery_desc.properties = steam_battery_props;
+ steam->battery_desc.num_properties = ARRAY_SIZE(steam_battery_props);
+ steam->battery_desc.get_property = steam_battery_get_property;
+ steam->battery_desc.name = devm_kasprintf(&steam->hdev->dev,
+ GFP_KERNEL, "steam-controller-%s-battery",
+ steam->serial_no);
+ if (!steam->battery_desc.name)
+ return -ENOMEM;
+
+ /* avoid the warning of 0% battery while waiting for the first info */
+ spin_lock_irqsave(&steam->lock, flags);
+ steam->voltage = 3000;
+ steam->battery_charge = 100;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
+ battery = power_supply_register(&steam->hdev->dev,
+ &steam->battery_desc, &battery_cfg);
+ if (IS_ERR(battery)) {
+ ret = PTR_ERR(battery);
+ hid_err(steam->hdev,
+ "%s:power_supply_register failed with error %d\n",
+ __func__, ret);
+ return ret;
+ }
+ rcu_assign_pointer(steam->battery, battery);
+ power_supply_powers(battery, &steam->hdev->dev);
+ return 0;
+}
+
+static int steam_register(struct steam_device *steam)
+{
+ struct hid_device *hdev = steam->hdev;
+ struct input_dev *input;
+ int ret;
+
+ rcu_read_lock();
+ input = rcu_dereference(steam->input);
+ rcu_read_unlock();
+ if (input) {
+ dbg_hid("%s: already connected\n", __func__);
+ return 0;
+ }
+
+ /*
+ * Unlikely, but getting the serial could fail, and it is not so
+ * important, so make up a serial number and go on.
+ */
+ if (steam_get_serial(steam) < 0)
+ strlcpy(steam->serial_no, "XXXXXXXXXX",
+ sizeof(steam->serial_no));
+
+ hid_info(hdev, "Steam Controller '%s' connected",
+ steam->serial_no);
+
+ input = input_allocate_device();
+ if (!input)
+ return -ENOMEM;
+
+ input_set_drvdata(input, steam);
+ input->dev.parent = &hdev->dev;
+ input->open = steam_input_open;
+ input->close = steam_input_close;
+
+ input->name = (steam->quirks & STEAM_QUIRK_WIRELESS) ?
+ "Wireless Steam Controller" :
+ "Steam Controller";
+ input->phys = hdev->phys;
+ input->uniq = steam->serial_no;
+ input->id.bustype = hdev->bus;
+ input->id.vendor = hdev->vendor;
+ input->id.product = hdev->product;
+ input->id.version = hdev->version;
+
+ input_set_capability(input, EV_KEY, BTN_TR2);
+ input_set_capability(input, EV_KEY, BTN_TL2);
+ input_set_capability(input, EV_KEY, BTN_TR);
+ input_set_capability(input, EV_KEY, BTN_TL);
+ input_set_capability(input, EV_KEY, BTN_Y);
+ input_set_capability(input, EV_KEY, BTN_B);
+ input_set_capability(input, EV_KEY, BTN_X);
+ input_set_capability(input, EV_KEY, BTN_A);
+ input_set_capability(input, EV_KEY, BTN_DPAD_UP);
+ input_set_capability(input, EV_KEY, BTN_DPAD_RIGHT);
+ input_set_capability(input, EV_KEY, BTN_DPAD_LEFT);
+ input_set_capability(input, EV_KEY, BTN_DPAD_DOWN);
+ input_set_capability(input, EV_KEY, BTN_SELECT);
+ input_set_capability(input, EV_KEY, BTN_MODE);
+ input_set_capability(input, EV_KEY, BTN_START);
+ input_set_capability(input, EV_KEY, BTN_GEAR_DOWN);
+ input_set_capability(input, EV_KEY, BTN_GEAR_UP);
+ input_set_capability(input, EV_KEY, BTN_THUMBR);
+ input_set_capability(input, EV_KEY, BTN_THUMBL);
+ input_set_capability(input, EV_KEY, BTN_THUMB);
+ input_set_capability(input, EV_KEY, BTN_THUMB2);
+
+ input_set_abs_params(input, ABS_HAT2Y, 0, 255, 0, 0);
+ input_set_abs_params(input, ABS_HAT2X, 0, 255, 0, 0);
+ input_set_abs_params(input, ABS_X, -32767, 32767, 0, 0);
+ input_set_abs_params(input, ABS_Y, -32767, 32767, 0, 0);
+ input_set_abs_params(input, ABS_RX, -32767, 32767,
+ STEAM_PAD_FUZZ, 0);
+ input_set_abs_params(input, ABS_RY, -32767, 32767,
+ STEAM_PAD_FUZZ, 0);
+ input_set_abs_params(input, ABS_HAT0X, -32767, 32767,
+ STEAM_PAD_FUZZ, 0);
+ input_set_abs_params(input, ABS_HAT0Y, -32767, 32767,
+ STEAM_PAD_FUZZ, 0);
+ input_abs_set_res(input, ABS_X, STEAM_JOYSTICK_RESOLUTION);
+ input_abs_set_res(input, ABS_Y, STEAM_JOYSTICK_RESOLUTION);
+ input_abs_set_res(input, ABS_RX, STEAM_PAD_RESOLUTION);
+ input_abs_set_res(input, ABS_RY, STEAM_PAD_RESOLUTION);
+ input_abs_set_res(input, ABS_HAT0X, STEAM_PAD_RESOLUTION);
+ input_abs_set_res(input, ABS_HAT0Y, STEAM_PAD_RESOLUTION);
+ input_abs_set_res(input, ABS_HAT2Y, STEAM_TRIGGER_RESOLUTION);
+ input_abs_set_res(input, ABS_HAT2X, STEAM_TRIGGER_RESOLUTION);
+
+ ret = input_register_device(input);
+ if (ret)
+ goto input_register_fail;
+
+ rcu_assign_pointer(steam->input, input);
+
+ /* ignore battery errors, we can live without it */
+ if (steam->quirks & STEAM_QUIRK_WIRELESS)
+ steam_battery_register(steam);
+
+ return 0;
+
+input_register_fail:
+ input_free_device(input);
+ return ret;
+}
+
+static void steam_unregister(struct steam_device *steam)
+{
+ struct input_dev *input;
+ struct power_supply *battery;
+
+ rcu_read_lock();
+ input = rcu_dereference(steam->input);
+ battery = rcu_dereference(steam->battery);
+ rcu_read_unlock();
+
+ if (battery) {
+ RCU_INIT_POINTER(steam->battery, NULL);
+ synchronize_rcu();
+ power_supply_unregister(battery);
+ }
+ if (input) {
+ RCU_INIT_POINTER(steam->input, NULL);
+ synchronize_rcu();
+ hid_info(steam->hdev, "Steam Controller '%s' disconnected",
+ steam->serial_no);
+ input_unregister_device(input);
+ }
+}
+
+static void steam_work_connect_cb(struct work_struct *work)
+{
+ struct steam_device *steam = container_of(work, struct steam_device,
+ work_connect);
+ unsigned long flags;
+ bool connected;
+ int ret;
+
+ spin_lock_irqsave(&steam->lock, flags);
+ connected = steam->connected;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
+ if (connected) {
+ ret = steam_register(steam);
+ if (ret) {
+ hid_err(steam->hdev,
+ "%s:steam_register failed with error %d\n",
+ __func__, ret);
+ }
+ } else {
+ steam_unregister(steam);
+ }
+}
+
+static bool steam_is_valve_interface(struct hid_device *hdev)
+{
+ struct hid_report_enum *rep_enum;
+
+ /*
+ * The wired device creates 3 interfaces:
+ * 0: emulated mouse.
+ * 1: emulated keyboard.
+ * 2: the real game pad.
+ * The wireless device creates 5 interfaces:
+ * 0: emulated keyboard.
+ * 1-4: slots where up to 4 real game pads will be connected to.
+ * We know which one is the real gamepad interface because they are the
+ * only ones with a feature report.
+ */
+ rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
+ return !list_empty(&rep_enum->report_list);
+}
+
+static int steam_client_ll_parse(struct hid_device *hdev)
+{
+ struct steam_device *steam = hid_get_drvdata(hdev);
+
+ return hid_parse_report(hdev, steam->hdev->dev_rdesc,
+ steam->hdev->dev_rsize);
+}
+
+static int steam_client_ll_start(struct hid_device *hdev)
+{
+ return 0;
+}
+
+static void steam_client_ll_stop(struct hid_device *hdev)
+{
+}
+
+static int steam_client_ll_open(struct hid_device *hdev)
+{
+ struct steam_device *steam = hid_get_drvdata(hdev);
+ int ret;
+
+ ret = hid_hw_open(steam->hdev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&steam->mutex);
+ steam->client_opened = true;
+ mutex_unlock(&steam->mutex);
+ return ret;
+}
+
+static void steam_client_ll_close(struct hid_device *hdev)
+{
+ struct steam_device *steam = hid_get_drvdata(hdev);
+
+ mutex_lock(&steam->mutex);
+ steam->client_opened = false;
+ if (steam->input_opened)
+ steam_set_lizard_mode(steam, false);
+ else
+ steam_set_lizard_mode(steam, lizard_mode);
+ mutex_unlock(&steam->mutex);
+
+ hid_hw_close(steam->hdev);
+}
+
+static int steam_client_ll_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, u8 *buf,
+ size_t count, unsigned char report_type,
+ int reqtype)
+{
+ struct steam_device *steam = hid_get_drvdata(hdev);
+
+ return hid_hw_raw_request(steam->hdev, reportnum, buf, count,
+ report_type, reqtype);
+}
+
+static struct hid_ll_driver steam_client_ll_driver = {
+ .parse = steam_client_ll_parse,
+ .start = steam_client_ll_start,
+ .stop = steam_client_ll_stop,
+ .open = steam_client_ll_open,
+ .close = steam_client_ll_close,
+ .raw_request = steam_client_ll_raw_request,
+};
+
+static struct hid_device *steam_create_client_hid(struct hid_device *hdev)
+{
+ struct hid_device *client_hdev;
+
+ client_hdev = hid_allocate_device();
+ if (IS_ERR(client_hdev))
+ return client_hdev;
+
+ client_hdev->ll_driver = &steam_client_ll_driver;
+ client_hdev->dev.parent = hdev->dev.parent;
+ client_hdev->bus = hdev->bus;
+ client_hdev->vendor = hdev->vendor;
+ client_hdev->product = hdev->product;
+ client_hdev->version = hdev->version;
+ client_hdev->type = hdev->type;
+ client_hdev->country = hdev->country;
+ strlcpy(client_hdev->name, hdev->name,
+ sizeof(client_hdev->name));
+ strlcpy(client_hdev->phys, hdev->phys,
+ sizeof(client_hdev->phys));
+ /*
+ * Since we use the same device info than the real interface to
+ * trick userspace, we will be calling steam_probe recursively.
+ * We need to recognize the client interface somehow.
+ */
+ client_hdev->group = HID_GROUP_STEAM;
+ return client_hdev;
+}
+
+static int steam_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct steam_device *steam;
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev,
+ "%s:parse of hid interface failed\n", __func__);
+ return ret;
+ }
+
+ /*
+ * The virtual client_dev is only used for hidraw.
+ * Also avoid the recursive probe.
+ */
+ if (hdev->group == HID_GROUP_STEAM)
+ return hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ /*
+ * The non-valve interfaces (mouse and keyboard emulation) are
+ * connected without changes.
+ */
+ if (!steam_is_valve_interface(hdev))
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ steam = devm_kzalloc(&hdev->dev, sizeof(*steam), GFP_KERNEL);
+ if (!steam) {
+ ret = -ENOMEM;
+ goto steam_alloc_fail;
+ }
+ steam->hdev = hdev;
+ hid_set_drvdata(hdev, steam);
+ spin_lock_init(&steam->lock);
+ mutex_init(&steam->mutex);
+ steam->quirks = id->driver_data;
+ INIT_WORK(&steam->work_connect, steam_work_connect_cb);
+
+ steam->client_hdev = steam_create_client_hid(hdev);
+ if (IS_ERR(steam->client_hdev)) {
+ ret = PTR_ERR(steam->client_hdev);
+ goto client_hdev_fail;
+ }
+ hid_set_drvdata(steam->client_hdev, steam);
+
+ /*
+ * With the real steam controller interface, do not connect hidraw.
+ * Instead, create the client_hid and connect that.
+ */
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDRAW);
+ if (ret)
+ goto hid_hw_start_fail;
+
+ ret = hid_add_device(steam->client_hdev);
+ if (ret)
+ goto client_hdev_add_fail;
+
+ if (steam->quirks & STEAM_QUIRK_WIRELESS) {
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev,
+ "%s:hid_hw_open for wireless\n",
+ __func__);
+ goto hid_hw_open_fail;
+ }
+ hid_info(hdev, "Steam wireless receiver connected");
+ steam_request_conn_status(steam);
+ } else {
+ ret = steam_register(steam);
+ if (ret) {
+ hid_err(hdev,
+ "%s:steam_register failed with error %d\n",
+ __func__, ret);
+ goto input_register_fail;
+ }
+ }
+
+ mutex_lock(&steam_devices_lock);
+ steam_update_lizard_mode(steam);
+ list_add(&steam->list, &steam_devices);
+ mutex_unlock(&steam_devices_lock);
+
+ return 0;
+
+hid_hw_open_fail:
+input_register_fail:
+client_hdev_add_fail:
+ hid_hw_stop(hdev);
+hid_hw_start_fail:
+ hid_destroy_device(steam->client_hdev);
+client_hdev_fail:
+ cancel_work_sync(&steam->work_connect);
+steam_alloc_fail:
+ hid_err(hdev, "%s: failed with error %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static void steam_remove(struct hid_device *hdev)
+{
+ struct steam_device *steam = hid_get_drvdata(hdev);
+
+ if (!steam || hdev->group == HID_GROUP_STEAM) {
+ hid_hw_stop(hdev);
+ return;
+ }
+
+ mutex_lock(&steam_devices_lock);
+ list_del(&steam->list);
+ mutex_unlock(&steam_devices_lock);
+
+ hid_destroy_device(steam->client_hdev);
+ steam->client_opened = false;
+ cancel_work_sync(&steam->work_connect);
+ if (steam->quirks & STEAM_QUIRK_WIRELESS) {
+ hid_info(hdev, "Steam wireless receiver disconnected");
+ hid_hw_close(hdev);
+ }
+ hid_hw_stop(hdev);
+ steam_unregister(steam);
+}
+
+static void steam_do_connect_event(struct steam_device *steam, bool connected)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&steam->lock, flags);
+ steam->connected = connected;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
+ if (schedule_work(&steam->work_connect) == 0)
+ dbg_hid("%s: connected=%d event already queued\n",
+ __func__, connected);
+}
+
+/*
+ * Some input data in the protocol has the opposite sign.
+ * Clamp the values to 32767..-32767 so that the range is
+ * symmetrical and can be negated safely.
+ */
+static inline s16 steam_le16(u8 *data)
+{
+ s16 x = (s16) le16_to_cpup((__le16 *)data);
+
+ return x == -32768 ? -32767 : x;
+}
+
+/*
+ * The size for this message payload is 60.
+ * The known values are:
+ * (* values are not sent through wireless)
+ * (* accelerator/gyro is disabled by default)
+ * Offset| Type | Mapped to |Meaning
+ * -------+-------+-----------+--------------------------
+ * 4-7 | u32 | -- | sequence number
+ * 8-10 | 24bit | see below | buttons
+ * 11 | u8 | ABS_HAT2Y | left trigger
+ * 12 | u8 | ABS_HAT2X | right trigger
+ * 13-15 | -- | -- | always 0
+ * 16-17 | s16 | ABS_X/ABS_HAT0X | X value
+ * 18-19 | s16 | ABS_Y/ABS_HAT0Y | Y value
+ * 20-21 | s16 | ABS_RX | right-pad X value
+ * 22-23 | s16 | ABS_RY | right-pad Y value
+ * 24-25 | s16 | -- | * left trigger
+ * 26-27 | s16 | -- | * right trigger
+ * 28-29 | s16 | -- | * accelerometer X value
+ * 30-31 | s16 | -- | * accelerometer Y value
+ * 32-33 | s16 | -- | * accelerometer Z value
+ * 34-35 | s16 | -- | gyro X value
+ * 36-36 | s16 | -- | gyro Y value
+ * 38-39 | s16 | -- | gyro Z value
+ * 40-41 | s16 | -- | quaternion W value
+ * 42-43 | s16 | -- | quaternion X value
+ * 44-45 | s16 | -- | quaternion Y value
+ * 46-47 | s16 | -- | quaternion Z value
+ * 48-49 | -- | -- | always 0
+ * 50-51 | s16 | -- | * left trigger (uncalibrated)
+ * 52-53 | s16 | -- | * right trigger (uncalibrated)
+ * 54-55 | s16 | -- | * joystick X value (uncalibrated)
+ * 56-57 | s16 | -- | * joystick Y value (uncalibrated)
+ * 58-59 | s16 | -- | * left-pad X value
+ * 60-61 | s16 | -- | * left-pad Y value
+ * 62-63 | u16 | -- | * battery voltage
+ *
+ * The buttons are:
+ * Bit | Mapped to | Description
+ * ------+------------+--------------------------------
+ * 8.0 | BTN_TR2 | right trigger fully pressed
+ * 8.1 | BTN_TL2 | left trigger fully pressed
+ * 8.2 | BTN_TR | right shoulder
+ * 8.3 | BTN_TL | left shoulder
+ * 8.4 | BTN_Y | button Y
+ * 8.5 | BTN_B | button B
+ * 8.6 | BTN_X | button X
+ * 8.7 | BTN_A | button A
+ * 9.0 | BTN_DPAD_UP | lef-pad up
+ * 9.1 | BTN_DPAD_RIGHT | lef-pad right
+ * 9.2 | BTN_DPAD_LEFT | lef-pad left
+ * 9.3 | BTN_DPAD_DOWN | lef-pad down
+ * 9.4 | BTN_SELECT | menu left
+ * 9.5 | BTN_MODE | steam logo
+ * 9.6 | BTN_START | menu right
+ * 9.7 | BTN_GEAR_DOWN | left back lever
+ * 10.0 | BTN_GEAR_UP | right back lever
+ * 10.1 | -- | left-pad clicked
+ * 10.2 | BTN_THUMBR | right-pad clicked
+ * 10.3 | BTN_THUMB | left-pad touched (but see explanation below)
+ * 10.4 | BTN_THUMB2 | right-pad touched
+ * 10.5 | -- | unknown
+ * 10.6 | BTN_THUMBL | joystick clicked
+ * 10.7 | -- | lpad_and_joy
+ */
+
+static void steam_do_input_event(struct steam_device *steam,
+ struct input_dev *input, u8 *data)
+{
+ /* 24 bits of buttons */
+ u8 b8, b9, b10;
+ s16 x, y;
+ bool lpad_touched, lpad_and_joy;
+
+ b8 = data[8];
+ b9 = data[9];
+ b10 = data[10];
+
+ input_report_abs(input, ABS_HAT2Y, data[11]);
+ input_report_abs(input, ABS_HAT2X, data[12]);
+
+ /*
+ * These two bits tells how to interpret the values X and Y.
+ * lpad_and_joy tells that the joystick and the lpad are used at the
+ * same time.
+ * lpad_touched tells whether X/Y are to be read as lpad coord or
+ * joystick values.
+ * (lpad_touched || lpad_and_joy) tells if the lpad is really touched.
+ */
+ lpad_touched = b10 & BIT(3);
+ lpad_and_joy = b10 & BIT(7);
+ x = steam_le16(data + 16);
+ y = -steam_le16(data + 18);
+
+ input_report_abs(input, lpad_touched ? ABS_HAT0X : ABS_X, x);
+ input_report_abs(input, lpad_touched ? ABS_HAT0Y : ABS_Y, y);
+ /* Check if joystick is centered */
+ if (lpad_touched && !lpad_and_joy) {
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ }
+ /* Check if lpad is untouched */
+ if (!(lpad_touched || lpad_and_joy)) {
+ input_report_abs(input, ABS_HAT0X, 0);
+ input_report_abs(input, ABS_HAT0Y, 0);
+ }
+
+ input_report_abs(input, ABS_RX, steam_le16(data + 20));
+ input_report_abs(input, ABS_RY, -steam_le16(data + 22));
+
+ input_event(input, EV_KEY, BTN_TR2, !!(b8 & BIT(0)));
+ input_event(input, EV_KEY, BTN_TL2, !!(b8 & BIT(1)));
+ input_event(input, EV_KEY, BTN_TR, !!(b8 & BIT(2)));
+ input_event(input, EV_KEY, BTN_TL, !!(b8 & BIT(3)));
+ input_event(input, EV_KEY, BTN_Y, !!(b8 & BIT(4)));
+ input_event(input, EV_KEY, BTN_B, !!(b8 & BIT(5)));
+ input_event(input, EV_KEY, BTN_X, !!(b8 & BIT(6)));
+ input_event(input, EV_KEY, BTN_A, !!(b8 & BIT(7)));
+ input_event(input, EV_KEY, BTN_SELECT, !!(b9 & BIT(4)));
+ input_event(input, EV_KEY, BTN_MODE, !!(b9 & BIT(5)));
+ input_event(input, EV_KEY, BTN_START, !!(b9 & BIT(6)));
+ input_event(input, EV_KEY, BTN_GEAR_DOWN, !!(b9 & BIT(7)));
+ input_event(input, EV_KEY, BTN_GEAR_UP, !!(b10 & BIT(0)));
+ input_event(input, EV_KEY, BTN_THUMBR, !!(b10 & BIT(2)));
+ input_event(input, EV_KEY, BTN_THUMBL, !!(b10 & BIT(6)));
+ input_event(input, EV_KEY, BTN_THUMB, lpad_touched || lpad_and_joy);
+ input_event(input, EV_KEY, BTN_THUMB2, !!(b10 & BIT(4)));
+ input_event(input, EV_KEY, BTN_DPAD_UP, !!(b9 & BIT(0)));
+ input_event(input, EV_KEY, BTN_DPAD_RIGHT, !!(b9 & BIT(1)));
+ input_event(input, EV_KEY, BTN_DPAD_LEFT, !!(b9 & BIT(2)));
+ input_event(input, EV_KEY, BTN_DPAD_DOWN, !!(b9 & BIT(3)));
+
+ input_sync(input);
+}
+
+/*
+ * The size for this message payload is 11.
+ * The known values are:
+ * Offset| Type | Meaning
+ * -------+-------+---------------------------
+ * 4-7 | u32 | sequence number
+ * 8-11 | -- | always 0
+ * 12-13 | u16 | voltage (mV)
+ * 14 | u8 | battery percent
+ */
+static void steam_do_battery_event(struct steam_device *steam,
+ struct power_supply *battery, u8 *data)
+{
+ unsigned long flags;
+
+ s16 volts = steam_le16(data + 12);
+ u8 batt = data[14];
+
+ /* Creating the battery may have failed */
+ rcu_read_lock();
+ battery = rcu_dereference(steam->battery);
+ if (likely(battery)) {
+ spin_lock_irqsave(&steam->lock, flags);
+ steam->voltage = volts;
+ steam->battery_charge = batt;
+ spin_unlock_irqrestore(&steam->lock, flags);
+ power_supply_changed(battery);
+ }
+ rcu_read_unlock();
+}
+
+static int steam_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data,
+ int size)
+{
+ struct steam_device *steam = hid_get_drvdata(hdev);
+ struct input_dev *input;
+ struct power_supply *battery;
+
+ if (!steam)
+ return 0;
+
+ if (steam->client_opened)
+ hid_input_report(steam->client_hdev, HID_FEATURE_REPORT,
+ data, size, 0);
+ /*
+ * All messages are size=64, all values little-endian.
+ * The format is:
+ * Offset| Meaning
+ * -------+--------------------------------------------
+ * 0-1 | always 0x01, 0x00, maybe protocol version?
+ * 2 | type of message
+ * 3 | length of the real payload (not checked)
+ * 4-n | payload data, depends on the type
+ *
+ * There are these known types of message:
+ * 0x01: input data (60 bytes)
+ * 0x03: wireless connect/disconnect (1 byte)
+ * 0x04: battery status (11 bytes)
+ */
+
+ if (size != 64 || data[0] != 1 || data[1] != 0)
+ return 0;
+
+ switch (data[2]) {
+ case STEAM_EV_INPUT_DATA:
+ if (steam->client_opened)
+ return 0;
+ rcu_read_lock();
+ input = rcu_dereference(steam->input);
+ if (likely(input)) {
+ steam_do_input_event(steam, input, data);
+ } else {
+ dbg_hid("%s: input data without connect event\n",
+ __func__);
+ steam_do_connect_event(steam, true);
+ }
+ rcu_read_unlock();
+ break;
+ case STEAM_EV_CONNECT:
+ /*
+ * The payload of this event is a single byte:
+ * 0x01: disconnected.
+ * 0x02: connected.
+ */
+ switch (data[4]) {
+ case 0x01:
+ steam_do_connect_event(steam, false);
+ break;
+ case 0x02:
+ steam_do_connect_event(steam, true);
+ break;
+ }
+ break;
+ case STEAM_EV_BATTERY:
+ if (steam->quirks & STEAM_QUIRK_WIRELESS) {
+ rcu_read_lock();
+ battery = rcu_dereference(steam->battery);
+ if (likely(battery)) {
+ steam_do_battery_event(steam, battery, data);
+ } else {
+ dbg_hid(
+ "%s: battery data without connect event\n",
+ __func__);
+ steam_do_connect_event(steam, true);
+ }
+ rcu_read_unlock();
+ }
+ break;
+ }
+ return 0;
+}
+
+static int steam_param_set_lizard_mode(const char *val,
+ const struct kernel_param *kp)
+{
+ struct steam_device *steam;
+ int ret;
+
+ ret = param_set_bool(val, kp);
+ if (ret)
+ return ret;
+
+ mutex_lock(&steam_devices_lock);
+ list_for_each_entry(steam, &steam_devices, list) {
+ steam_update_lizard_mode(steam);
+ }
+ mutex_unlock(&steam_devices_lock);
+ return 0;
+}
+
+static const struct kernel_param_ops steam_lizard_mode_ops = {
+ .set = steam_param_set_lizard_mode,
+ .get = param_get_bool,
+};
+
+module_param_cb(lizard_mode, &steam_lizard_mode_ops, &lizard_mode, 0644);
+MODULE_PARM_DESC(lizard_mode,
+ "Enable mouse and keyboard emulation (lizard mode) when the gamepad is not in use");
+
+static const struct hid_device_id steam_controllers[] = {
+ { /* Wired Steam Controller */
+ HID_USB_DEVICE(USB_VENDOR_ID_VALVE,
+ USB_DEVICE_ID_STEAM_CONTROLLER)
+ },
+ { /* Wireless Steam Controller */
+ HID_USB_DEVICE(USB_VENDOR_ID_VALVE,
+ USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS),
+ .driver_data = STEAM_QUIRK_WIRELESS
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(hid, steam_controllers);
+
+static struct hid_driver steam_controller_driver = {
+ .name = "hid-steam",
+ .id_table = steam_controllers,
+ .probe = steam_probe,
+ .remove = steam_remove,
+ .raw_event = steam_raw_event,
+};
+
+module_hid_driver(steam_controller_driver);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index cc33622253aa..c1652bb7bd15 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -131,8 +131,6 @@ static const struct i2c_hid_cmd hid_no_cmd = { .length = 0 };
* static const struct i2c_hid_cmd hid_set_protocol_cmd = { I2C_HID_CMD(0x07) };
*/
-static DEFINE_MUTEX(i2c_hid_open_mut);
-
/* The main device structure */
struct i2c_hid {
struct i2c_client *client; /* i2c client */
@@ -868,6 +866,15 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
}
#ifdef CONFIG_ACPI
+static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
+ /*
+ * The CHPN0001 ACPI device, which is used to describe the Chipone
+ * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
+ */
+ {"CHPN0001", 0 },
+ { },
+};
+
static int i2c_hid_acpi_pdata(struct i2c_client *client,
struct i2c_hid_platform_data *pdata)
{
@@ -879,13 +886,18 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
acpi_handle handle;
handle = ACPI_HANDLE(&client->dev);
- if (!handle || acpi_bus_get_device(handle, &adev))
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+
+ if (acpi_match_device_ids(adev, i2c_hid_acpi_blacklist) == 0)
return -ENODEV;
obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
ACPI_TYPE_INTEGER);
if (!obj) {
- dev_err(&client->dev, "device _DSM execution failed\n");
+ dev_err(&client->dev, "Error _DSM call to get HID descriptor address failed\n");
return -ENODEV;
}
@@ -1000,11 +1012,8 @@ static int i2c_hid_probe(struct i2c_client *client,
goto err;
} else if (!platform_data) {
ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
- if (ret) {
- dev_err(&client->dev,
- "HID register address not provided\n");
+ if (ret)
goto err;
- }
} else {
ihid->pdata = *platform_data;
}
@@ -1054,6 +1063,14 @@ static int i2c_hid_probe(struct i2c_client *client,
pm_runtime_enable(&client->dev);
device_enable_async_suspend(&client->dev);
+ /* Make sure there is something at this address */
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0) {
+ dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
+ ret = -ENXIO;
+ goto err_pm;
+ }
+
ret = i2c_hid_fetch_hid_descriptor(ihid);
if (ret < 0)
goto err_pm;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 5f947ec20dcb..0bb44d0088ed 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2894,24 +2894,31 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
struct wacom_features *features = &wacom->features;
struct input_dev *input = wacom->pen_input;
unsigned char *data = wacom->data;
- int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
+ int x = 0, y = 0, p = 0, d = 0;
+ bool pen = false, btn1 = false, btn2 = false;
+ bool range, prox, rdy;
if (data[0] != WACOM_REPORT_PENABLED)
return 0;
- prox = (data[1] & 0x20) == 0x20;
+ range = (data[1] & 0x80) == 0x80;
+ prox = (data[1] & 0x40) == 0x40;
+ rdy = (data[1] & 0x20) == 0x20;
+
+ wacom->shared->stylus_in_proximity = range;
+ if (delay_pen_events(wacom))
+ return 0;
+
+ if (rdy) {
+ p = le16_to_cpup((__le16 *)&data[6]);
+ pen = data[1] & 0x01;
+ btn1 = data[1] & 0x02;
+ btn2 = data[1] & 0x04;
+ }
+ if (prox) {
+ x = le16_to_cpup((__le16 *)&data[2]);
+ y = le16_to_cpup((__le16 *)&data[4]);
- /*
- * All reports shared between PEN and RUBBER tool must be
- * forced to a known starting value (zero) when transitioning to
- * out-of-prox.
- *
- * If not reset then, to userspace, it will look like lost events
- * if new tool comes in-prox with same values as previous tool sent.
- *
- * Hardware does report zero in most out-of-prox cases but not all.
- */
- if (!wacom->shared->stylus_in_proximity) {
if (data[1] & 0x08) {
wacom->tool[0] = BTN_TOOL_RUBBER;
wacom->id[0] = ERASER_DEVICE_ID;
@@ -2919,16 +2926,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
wacom->tool[0] = BTN_TOOL_PEN;
wacom->id[0] = STYLUS_DEVICE_ID;
}
+ wacom->reporting_data = true;
}
-
- wacom->shared->stylus_in_proximity = prox;
- if (delay_pen_events(wacom))
- return 0;
-
- if (prox) {
- x = le16_to_cpup((__le16 *)&data[2]);
- y = le16_to_cpup((__le16 *)&data[4]);
- p = le16_to_cpup((__le16 *)&data[6]);
+ if (range) {
/*
* Convert distance from out prox to distance from tablet.
* distance will be greater than distance_max once
@@ -2937,25 +2937,29 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
*/
if (data[8] <= features->distance_max)
d = features->distance_max - data[8];
-
- pen = data[1] & 0x01;
- btn1 = data[1] & 0x02;
- btn2 = data[1] & 0x04;
} else {
wacom->id[0] = 0;
}
- input_report_key(input, BTN_TOUCH, pen);
- input_report_key(input, BTN_STYLUS, btn1);
- input_report_key(input, BTN_STYLUS2, btn2);
+ if (wacom->reporting_data) {
+ input_report_key(input, BTN_TOUCH, pen);
+ input_report_key(input, BTN_STYLUS, btn1);
+ input_report_key(input, BTN_STYLUS2, btn2);
- input_report_abs(input, ABS_X, x);
- input_report_abs(input, ABS_Y, y);
- input_report_abs(input, ABS_PRESSURE, p);
- input_report_abs(input, ABS_DISTANCE, d);
+ if (prox || !range) {
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ }
+ input_report_abs(input, ABS_PRESSURE, p);
+ input_report_abs(input, ABS_DISTANCE, d);
- input_report_key(input, wacom->tool[0], prox); /* PEN or RUBBER */
- input_report_abs(input, ABS_MISC, wacom->id[0]); /* TOOL ID */
+ input_report_key(input, wacom->tool[0], range); /* PEN or RUBBER */
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* TOOL ID */
+ }
+
+ if (!range) {
+ wacom->reporting_data = false;
+ }
return 1;
}
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index 324cb8ec9405..a1d4b9366496 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -1098,7 +1098,7 @@ static void cs_hsi_stop(struct cs_hsi_iface *hi)
kfree(hi);
}
-static int cs_char_vma_fault(struct vm_fault *vmf)
+static vm_fault_t cs_char_vma_fault(struct vm_fault *vmf)
{
struct cs_char *csdata = vmf->vma->vm_private_data;
struct page *page;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 8699bb969e7e..3c836c099a8f 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -227,6 +227,8 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
ring_info->ring_buffer->feature_bits.value = 1;
ring_info->ring_size = page_cnt << PAGE_SHIFT;
+ ring_info->ring_size_div10_reciprocal =
+ reciprocal_value(ring_info->ring_size / 10);
ring_info->ring_datasize = ring_info->ring_size -
sizeof(struct hv_ring_buffer);
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index 36f76e28a0bf..3265970aee34 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -323,9 +323,9 @@ out:
pci_write_config_byte(dev, 0x53, tmpbyte);
}
+ local_irq_restore(flags);
pci_dev_put(north);
pci_dev_put(isa_dev);
- local_irq_restore(flags);
return 0;
}
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 6f25da56a169..a444bad7a2aa 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -659,8 +659,7 @@ void ide_timer_expiry (struct timer_list *t)
spin_unlock(&hwif->lock);
/* disable_irq_nosync ?? */
disable_irq(hwif->irq);
- /* local CPU only, as if we were handling an interrupt */
- local_irq_disable();
+
if (hwif->polling) {
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
@@ -679,6 +678,7 @@ void ide_timer_expiry (struct timer_list *t)
startstop = ide_error(drive, "irq timeout",
hwif->tp_ops->read_status(hwif));
}
+ /* Disable interrupts again, `handler' might have enabled it */
spin_lock_irq(&hwif->lock);
enable_irq(hwif->irq);
if (startstop == ide_stopped && hwif->polling == 0) {
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 210a0887dd29..d55e9ebd5628 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -108,6 +108,7 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
ide_hwif_t *hwif = drive->hwif;
const struct ide_tp_ops *tp_ops = hwif->tp_ops;
unsigned long flags;
+ bool irqs_threaded = force_irqthreads;
int i;
u8 stat;
@@ -115,8 +116,10 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
stat = tp_ops->read_status(hwif);
if (stat & ATA_BUSY) {
- local_save_flags(flags);
- local_irq_enable_in_hardirq();
+ if (!irqs_threaded) {
+ local_save_flags(flags);
+ local_irq_enable_in_hardirq();
+ }
timeout += jiffies;
while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) {
if (time_after(jiffies, timeout)) {
@@ -129,12 +132,14 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
if ((stat & ATA_BUSY) == 0)
break;
- local_irq_restore(flags);
+ if (!irqs_threaded)
+ local_irq_restore(flags);
*rstat = stat;
return -EBUSY;
}
}
- local_irq_restore(flags);
+ if (!irqs_threaded)
+ local_irq_restore(flags);
}
/*
* Allow status to settle, then read it again.
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index c034cd965831..89b29028d315 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -237,7 +237,6 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
while (len) {
unsigned nr_bytes = min(len, cursg->length - cmd->cursg_ofs);
- int page_is_high;
page = sg_page(cursg);
offset = cursg->offset + cmd->cursg_ofs;
@@ -248,10 +247,6 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
nr_bytes = min_t(unsigned, nr_bytes, (PAGE_SIZE - offset));
- page_is_high = PageHighMem(page);
- if (page_is_high)
- local_irq_save(flags);
-
buf = kmap_atomic(page) + offset;
cmd->nleft -= nr_bytes;
@@ -270,9 +265,6 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
kunmap_atomic(buf);
- if (page_is_high)
- local_irq_restore(flags);
-
len -= nr_bytes;
}
}
@@ -413,7 +405,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
return startstop;
}
- if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
+ if (!force_irqthreads && (drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
local_irq_disable();
ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index b3c8c6ef0dff..d08aeb41cd07 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT
source "drivers/iio/accel/Kconfig"
source "drivers/iio/adc/Kconfig"
+source "drivers/iio/afe/Kconfig"
source "drivers/iio/amplifiers/Kconfig"
source "drivers/iio/chemical/Kconfig"
source "drivers/iio/common/Kconfig"
@@ -92,6 +93,7 @@ source "drivers/iio/potentiometer/Kconfig"
source "drivers/iio/potentiostat/Kconfig"
source "drivers/iio/pressure/Kconfig"
source "drivers/iio/proximity/Kconfig"
+source "drivers/iio/resolver/Kconfig"
source "drivers/iio/temperature/Kconfig"
endif # IIO
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index b16b2e9ddc40..cb5993251381 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
obj-y += accel/
obj-y += adc/
+obj-y += afe/
obj-y += amplifiers/
obj-y += buffer/
obj-y += chemical/
@@ -35,5 +36,6 @@ obj-y += potentiometer/
obj-y += potentiostat/
obj-y += pressure/
obj-y += proximity/
+obj-y += resolver/
obj-y += temperature/
obj-y += trigger/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index c6d9517d7611..62ae7e5abcfa 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -5,6 +5,30 @@
menu "Accelerometers"
+config ADIS16201
+ tristate "Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say Y here to build support for Analog Devices adis16201 dual-axis
+ digital inclinometer and accelerometer.
+
+ To compile this driver as a module, say M here: the module will
+ be called adis16201.
+
+config ADIS16209
+ tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say Y here to build support for Analog Devices adis16209 dual-axis digital inclinometer
+ and accelerometer.
+
+ To compile this driver as a module, say M here: the module will be
+ called adis16209.
+
config ADXL345
tristate
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 368aedb6377a..636d4d1b2990 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -4,6 +4,8 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ADIS16201) += adis16201.o
+obj-$(CONFIG_ADIS16209) += adis16209.o
obj-$(CONFIG_ADXL345) += adxl345_core.o
obj-$(CONFIG_ADXL345_I2C) += adxl345_i2c.o
obj-$(CONFIG_ADXL345_SPI) += adxl345_spi.o
diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
new file mode 100644
index 000000000000..4c1d482ea73a
--- /dev/null
+++ b/drivers/iio/accel/adis16201.c
@@ -0,0 +1,321 @@
+/*
+ * ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/imu/adis.h>
+
+#define ADIS16201_STARTUP_DELAY_MS 220
+#define ADIS16201_FLASH_CNT 0x00
+
+/* Data Output Register Information */
+#define ADIS16201_SUPPLY_OUT_REG 0x02
+#define ADIS16201_XACCL_OUT_REG 0x04
+#define ADIS16201_YACCL_OUT_REG 0x06
+#define ADIS16201_AUX_ADC_REG 0x08
+#define ADIS16201_TEMP_OUT_REG 0x0A
+#define ADIS16201_XINCL_OUT_REG 0x0C
+#define ADIS16201_YINCL_OUT_REG 0x0E
+
+/* Calibration Register Definition */
+#define ADIS16201_XACCL_OFFS_REG 0x10
+#define ADIS16201_YACCL_OFFS_REG 0x12
+#define ADIS16201_XACCL_SCALE_REG 0x14
+#define ADIS16201_YACCL_SCALE_REG 0x16
+#define ADIS16201_XINCL_OFFS_REG 0x18
+#define ADIS16201_YINCL_OFFS_REG 0x1A
+#define ADIS16201_XINCL_SCALE_REG 0x1C
+#define ADIS16201_YINCL_SCALE_REG 0x1E
+
+/* Alarm Register Definition */
+#define ADIS16201_ALM_MAG1_REG 0x20
+#define ADIS16201_ALM_MAG2_REG 0x22
+#define ADIS16201_ALM_SMPL1_REG 0x24
+#define ADIS16201_ALM_SMPL2_REG 0x26
+#define ADIS16201_ALM_CTRL_REG 0x28
+
+#define ADIS16201_AUX_DAC_REG 0x30
+#define ADIS16201_GPIO_CTRL_REG 0x32
+#define ADIS16201_SMPL_PRD_REG 0x36
+/* Operation, filter configuration */
+#define ADIS16201_AVG_CNT_REG 0x38
+#define ADIS16201_SLP_CNT_REG 0x3A
+
+/* Miscellaneous Control Register Definition */
+#define ADIS16201_MSC_CTRL_REG 0x34
+#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8)
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2)
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16201_MSC_CTRL_ACTIVE_DATA_RDY_HIGH BIT(1)
+/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
+
+/* Diagnostics System Status Register Definition */
+#define ADIS16201_DIAG_STAT_REG 0x3C
+#define ADIS16201_DIAG_STAT_ALARM2 BIT(9)
+#define ADIS16201_DIAG_STAT_ALARM1 BIT(8)
+#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3
+#define ADIS16201_DIAG_STAT_FLASH_UPT_FAIL_BIT 2
+/* Power supply above 3.625 V */
+#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1
+/* Power supply below 3.15 V */
+#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0
+
+/* System Command Register Definition */
+#define ADIS16201_GLOB_CMD_REG 0x3E
+#define ADIS16201_GLOB_CMD_SW_RESET BIT(7)
+#define ADIS16201_GLOB_CMD_FACTORY_RESET BIT(1)
+
+#define ADIS16201_ERROR_ACTIVE BIT(14)
+
+enum adis16201_scan {
+ ADIS16201_SCAN_ACC_X,
+ ADIS16201_SCAN_ACC_Y,
+ ADIS16201_SCAN_INCLI_X,
+ ADIS16201_SCAN_INCLI_Y,
+ ADIS16201_SCAN_SUPPLY,
+ ADIS16201_SCAN_AUX_ADC,
+ ADIS16201_SCAN_TEMP,
+};
+
+static const u8 adis16201_addresses[] = {
+ [ADIS16201_SCAN_ACC_X] = ADIS16201_XACCL_OFFS_REG,
+ [ADIS16201_SCAN_ACC_Y] = ADIS16201_YACCL_OFFS_REG,
+ [ADIS16201_SCAN_INCLI_X] = ADIS16201_XINCL_OFFS_REG,
+ [ADIS16201_SCAN_INCLI_Y] = ADIS16201_YINCL_OFFS_REG,
+};
+
+static int adis16201_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ int ret;
+ int bits;
+ u8 addr;
+ s16 val16;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan,
+ ADIS16201_ERROR_ACTIVE, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->channel == 0) {
+ /* Voltage base units are mV hence 1.22 mV */
+ *val = 1;
+ *val2 = 220000;
+ } else {
+ /* Voltage base units are mV hence 0.61 mV */
+ *val = 0;
+ *val2 = 610000;
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = -470;
+ *val2 = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ /*
+ * IIO base unit for sensitivity of accelerometer
+ * is milli g.
+ * 1 LSB represents 0.244 mg.
+ */
+ *val = 0;
+ *val2 = IIO_G_TO_M_S_2(462400);
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_INCLI:
+ *val = 0;
+ *val2 = 100000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ /*
+ * The raw ADC value is 1278 when the temperature
+ * is 25 degrees and the scale factor per milli
+ * degree celcius is -470.
+ */
+ *val = 25000 / -470 - 1278;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ bits = 12;
+ break;
+ case IIO_INCLI:
+ bits = 9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ addr = adis16201_addresses[chan->scan_index];
+ ret = adis_read_reg_16(st, addr, &val16);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(val16, bits - 1);
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int adis16201_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ int m;
+
+ if (mask != IIO_CHAN_INFO_CALIBBIAS)
+ return -EINVAL;
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ m = GENMASK(11, 0);
+ break;
+ case IIO_INCLI:
+ m = GENMASK(8, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return adis_write_reg_16(st, adis16201_addresses[chan->scan_index],
+ val & m);
+}
+
+static const struct iio_chan_spec adis16201_channels[] = {
+ ADIS_SUPPLY_CHAN(ADIS16201_SUPPLY_OUT_REG, ADIS16201_SCAN_SUPPLY, 0,
+ 12),
+ ADIS_TEMP_CHAN(ADIS16201_TEMP_OUT_REG, ADIS16201_SCAN_TEMP, 0, 12),
+ ADIS_ACCEL_CHAN(X, ADIS16201_XACCL_OUT_REG, ADIS16201_SCAN_ACC_X,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ ADIS_ACCEL_CHAN(Y, ADIS16201_YACCL_OUT_REG, ADIS16201_SCAN_ACC_Y,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
+ ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ IIO_CHAN_SOFT_TIMESTAMP(7)
+};
+
+static const struct iio_info adis16201_info = {
+ .read_raw = adis16201_read_raw,
+ .write_raw = adis16201_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+};
+
+static const char * const adis16201_status_error_msgs[] = {
+ [ADIS16201_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
+ [ADIS16201_DIAG_STAT_FLASH_UPT_FAIL_BIT] = "Flash update failed",
+ [ADIS16201_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
+ [ADIS16201_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
+};
+
+static const struct adis_data adis16201_data = {
+ .read_delay = 20,
+ .msc_ctrl_reg = ADIS16201_MSC_CTRL_REG,
+ .glob_cmd_reg = ADIS16201_GLOB_CMD_REG,
+ .diag_stat_reg = ADIS16201_DIAG_STAT_REG,
+
+ .self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
+ .startup_delay = ADIS16201_STARTUP_DELAY_MS,
+
+ .status_error_msgs = adis16201_status_error_msgs,
+ .status_error_mask = BIT(ADIS16201_DIAG_STAT_SPI_FAIL_BIT) |
+ BIT(ADIS16201_DIAG_STAT_FLASH_UPT_FAIL_BIT) |
+ BIT(ADIS16201_DIAG_STAT_POWER_HIGH_BIT) |
+ BIT(ADIS16201_DIAG_STAT_POWER_LOW_BIT),
+};
+
+static int adis16201_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adis *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16201_info;
+
+ indio_dev->channels = adis16201_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16201_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(st, indio_dev, spi, &adis16201_data);
+ if (ret)
+ return ret;
+
+ ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
+ if (ret)
+ return ret;
+
+ ret = adis_initial_startup(st);
+ if (ret)
+ goto error_cleanup_buffer_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto error_cleanup_buffer_trigger;
+
+ return 0;
+
+error_cleanup_buffer_trigger:
+ adis_cleanup_buffer_and_trigger(st, indio_dev);
+ return ret;
+}
+
+static int adis16201_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis_cleanup_buffer_and_trigger(st, indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver adis16201_driver = {
+ .driver = {
+ .name = "adis16201",
+ },
+ .probe = adis16201_probe,
+ .remove = adis16201_remove,
+};
+module_spi_driver(adis16201_driver);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16201");
diff --git a/drivers/staging/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c
index 72a18cfe81ee..f2dc3a5f0463 100644
--- a/drivers/staging/iio/accel/adis16209.c
+++ b/drivers/iio/accel/adis16209.c
@@ -6,7 +6,6 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -16,8 +15,6 @@
#include <linux/sysfs.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
#include <linux/iio/imu/adis.h>
#define ADIS16209_STARTUP_DELAY_MS 220
@@ -71,13 +68,13 @@
#define ADIS16209_STAT_REG 0x3C
#define ADIS16209_STAT_ALARM2 BIT(9)
#define ADIS16209_STAT_ALARM1 BIT(8)
-#define ADIS16209_STAT_SELFTEST_FAIL_BIT 5
-#define ADIS16209_STAT_SPI_FAIL_BIT 3
-#define ADIS16209_STAT_FLASH_UPT_FAIL_BIT 2
+#define ADIS16209_STAT_SELFTEST_FAIL_BIT 5
+#define ADIS16209_STAT_SPI_FAIL_BIT 3
+#define ADIS16209_STAT_FLASH_UPT_FAIL_BIT 2
/* Power supply above 3.625 V */
-#define ADIS16209_STAT_POWER_HIGH_BIT 1
+#define ADIS16209_STAT_POWER_HIGH_BIT 1
/* Power supply below 3.15 V */
-#define ADIS16209_STAT_POWER_LOW_BIT 0
+#define ADIS16209_STAT_POWER_LOW_BIT 0
#define ADIS16209_CMD_REG 0x3E
#define ADIS16209_CMD_SW_RESET BIT(7)
@@ -115,25 +112,22 @@ static int adis16209_write_raw(struct iio_dev *indio_dev,
long mask)
{
struct adis *st = iio_priv(indio_dev);
- int bits;
- s16 val16;
- u8 addr;
+ int m;
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- case IIO_INCLI:
- bits = 14;
- break;
- default:
- return -EINVAL;
- }
- val16 = val & ((1 << bits) - 1);
- addr = adis16209_addresses[chan->scan_index][0];
- return adis_write_reg_16(st, addr, val16);
+ if (mask != IIO_CHAN_INFO_CALIBBIAS)
+ return -EINVAL;
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ case IIO_INCLI:
+ m = GENMASK(13, 0);
+ break;
+ default:
+ return -EINVAL;
}
- return -EINVAL;
+
+ return adis_write_reg_16(st, adis16209_addresses[chan->scan_index][0],
+ val & m);
}
static int adis16209_read_raw(struct iio_dev *indio_dev,
@@ -195,7 +189,7 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_OFFSET:
/*
* The raw ADC value is 0x4FE when the temperature
- * is 25 degrees and the scale factor per milli
+ * is 45 degrees and the scale factor per milli
* degree celcius is -470.
*/
*val = 25000 / -470 - 0x4FE;
@@ -270,13 +264,14 @@ static const struct adis_data adis16209_data = {
static int adis16209_probe(struct spi_device *spi)
{
- int ret;
- struct adis *st;
struct iio_dev *indio_dev;
+ struct adis *st;
+ int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
+
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
@@ -290,6 +285,7 @@ static int adis16209_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16209_data);
if (ret)
return ret;
+
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
return ret;
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 208f2d9f0e8a..383c802eb5b8 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -837,29 +837,12 @@ static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data,
int sample_length = 3 * 2;
int ret;
int total_length = samples * sample_length;
- int i;
- size_t step = regmap_get_raw_read_max(data->regmap);
-
- if (!step || step > total_length)
- step = total_length;
- else if (step < total_length)
- step = sample_length;
-
- /*
- * Seems we have a bus with size limitation so we have to execute
- * multiple reads
- */
- for (i = 0; i < total_length; i += step) {
- ret = regmap_raw_read(data->regmap, BMC150_ACCEL_REG_FIFO_DATA,
- &buffer[i], step);
- if (ret)
- break;
- }
+ ret = regmap_raw_read(data->regmap, BMC150_ACCEL_REG_FIFO_DATA,
+ buffer, total_length);
if (ret)
dev_err(dev,
- "Error transferring data from fifo in single steps of %zu\n",
- step);
+ "Error transferring data from fifo: %d\n", ret);
return ret;
}
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 7a2da7f9d4dc..7e3d82cff3d5 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -106,6 +106,7 @@ struct mma8452_data {
u8 ctrl_reg1;
u8 data_cfg;
const struct mma_chip_info *chip_info;
+ int sleep_val;
};
/**
@@ -193,7 +194,11 @@ static int mma8452_drdy(struct mma8452_data *data)
if ((ret & MMA8452_STATUS_DRDY) == MMA8452_STATUS_DRDY)
return 0;
- msleep(20);
+ if (data->sleep_val <= 20)
+ usleep_range(data->sleep_val * 250,
+ data->sleep_val * 500);
+ else
+ msleep(20);
}
dev_err(&data->client->dev, "data not ready\n");
@@ -544,6 +549,18 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int mma8452_calculate_sleep(struct mma8452_data *data)
+{
+ int ret, i = mma8452_get_odr_index(data);
+
+ if (mma8452_samp_freq[i][0] > 0)
+ ret = 1000 / mma8452_samp_freq[i][0];
+ else
+ ret = 1000;
+
+ return ret == 0 ? 1 : ret;
+}
+
static int mma8452_standby(struct mma8452_data *data)
{
return i2c_smbus_write_byte_data(data->client, MMA8452_CTRL_REG1,
@@ -700,6 +717,8 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK;
data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT;
+ data->sleep_val = mma8452_calculate_sleep(data);
+
ret = mma8452_change_config(data, MMA8452_CTRL_REG1,
data->ctrl_reg1);
break;
@@ -1593,6 +1612,9 @@ static int mma8452_probe(struct i2c_client *client,
data->ctrl_reg1 = MMA8452_CTRL_ACTIVE |
(MMA8452_CTRL_DR_DEFAULT << MMA8452_CTRL_DR_SHIFT);
+
+ data->sleep_val = mma8452_calculate_sleep(data);
+
ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG1,
data->ctrl_reg1);
if (ret < 0)
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index f33dadf7b262..4dceb75e3586 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -1277,7 +1277,7 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev)
{
struct iio_buffer *buffer;
- buffer = iio_kfifo_allocate();
+ buffer = devm_iio_kfifo_allocate(&indio_dev->dev);
if (!buffer)
return -ENOMEM;
@@ -1287,11 +1287,6 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev)
return 0;
}
-static void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
-{
- iio_kfifo_free(indio_dev->buffer);
-}
-
static inline
int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
{
@@ -1486,7 +1481,9 @@ static int sca3000_probe(struct spi_device *spi)
}
indio_dev->modes = INDIO_DIRECT_MODE;
- sca3000_configure_ring(indio_dev);
+ ret = sca3000_configure_ring(indio_dev);
+ if (ret)
+ return ret;
if (spi->irq) {
ret = request_threaded_irq(spi->irq,
@@ -1546,8 +1543,6 @@ static int sca3000_remove(struct spi_device *spi)
if (spi->irq)
free_irq(spi->irq, indio_dev);
- sca3000_unconfigure_ring(indio_dev);
-
return 0;
}
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 6bdec8c451e0..056dddb27236 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -107,6 +107,7 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id st_accel_acpi_match[] = {
+ {"SMO8840", LNG2DM},
{"SMO8A90", LNG2DM},
{ },
};
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index 70fbf92f9827..a9ff0695ddf7 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -153,6 +153,17 @@ struct ad7791_state {
const struct ad7791_chip_info *info;
};
+static const int ad7791_sample_freq_avail[8][2] = {
+ [AD7791_FILTER_RATE_120] = { 120, 0 },
+ [AD7791_FILTER_RATE_100] = { 100, 0 },
+ [AD7791_FILTER_RATE_33_3] = { 33, 300000 },
+ [AD7791_FILTER_RATE_20] = { 20, 0 },
+ [AD7791_FILTER_RATE_16_6] = { 16, 600000 },
+ [AD7791_FILTER_RATE_16_7] = { 16, 700000 },
+ [AD7791_FILTER_RATE_13_3] = { 13, 300000 },
+ [AD7791_FILTER_RATE_9_5] = { 9, 500000 },
+};
+
static struct ad7791_state *ad_sigma_delta_to_ad7791(struct ad_sigma_delta *sd)
{
return container_of(sd, struct ad7791_state, sd);
@@ -202,6 +213,7 @@ static int ad7791_read_raw(struct iio_dev *indio_dev,
{
struct ad7791_state *st = iio_priv(indio_dev);
bool unipolar = !!(st->mode & AD7791_MODE_UNIPOLAR);
+ unsigned int rate;
switch (info) {
case IIO_CHAN_INFO_RAW:
@@ -239,63 +251,56 @@ static int ad7791_read_raw(struct iio_dev *indio_dev,
*val2 = chan->scan_type.realbits - 1;
return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ rate = st->filter & AD7791_FILTER_RATE_MASK;
+ *val = ad7791_sample_freq_avail[rate][0];
+ *val2 = ad7791_sample_freq_avail[rate][1];
+ return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
-static const char * const ad7791_sample_freq_avail[] = {
- [AD7791_FILTER_RATE_120] = "120",
- [AD7791_FILTER_RATE_100] = "100",
- [AD7791_FILTER_RATE_33_3] = "33.3",
- [AD7791_FILTER_RATE_20] = "20",
- [AD7791_FILTER_RATE_16_6] = "16.6",
- [AD7791_FILTER_RATE_16_7] = "16.7",
- [AD7791_FILTER_RATE_13_3] = "13.3",
- [AD7791_FILTER_RATE_9_5] = "9.5",
-};
-
-static ssize_t ad7791_read_frequency(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int ad7791_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7791_state *st = iio_priv(indio_dev);
- unsigned int rate = st->filter & AD7791_FILTER_RATE_MASK;
-
- return sprintf(buf, "%s\n", ad7791_sample_freq_avail[rate]);
-}
-
-static ssize_t ad7791_write_frequency(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7791_state *st = iio_priv(indio_dev);
- int i, ret;
-
- i = sysfs_match_string(ad7791_sample_freq_avail, buf);
- if (i < 0)
- return i;
+ int ret, i;
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
- st->filter &= ~AD7791_FILTER_RATE_MASK;
- st->filter |= i;
- ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, sizeof(st->filter),
- st->filter);
- iio_device_release_direct_mode(indio_dev);
- return len;
-}
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ for (i = 0; i < ARRAY_SIZE(ad7791_sample_freq_avail); i++) {
+ if (ad7791_sample_freq_avail[i][0] == val &&
+ ad7791_sample_freq_avail[i][1] == val2)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ad7791_sample_freq_avail)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ st->filter &= ~AD7791_FILTER_RATE_MASK;
+ st->filter |= i;
+ ad_sd_write_reg(&st->sd, AD7791_REG_FILTER,
+ sizeof(st->filter),
+ st->filter);
+ break;
+ default:
+ ret = -EINVAL;
+ }
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ad7791_read_frequency,
- ad7791_write_frequency);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+}
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("120 100 33.3 20 16.7 16.6 13.3 9.5");
static struct attribute *ad7791_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
NULL
};
@@ -306,12 +311,14 @@ static const struct attribute_group ad7791_attribute_group = {
static const struct iio_info ad7791_info = {
.read_raw = &ad7791_read_raw,
+ .write_raw = &ad7791_write_raw,
.attrs = &ad7791_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
};
static const struct iio_info ad7791_no_filter_info = {
.read_raw = &ad7791_read_raw,
+ .write_raw = &ad7791_write_raw,
.validate_trigger = ad_sd_validate_trigger,
};
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index cfab31162845..ad6764fb2a23 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -604,5 +604,5 @@ static struct platform_driver imx7d_adc_driver = {
module_platform_driver(imx7d_adc_driver);
MODULE_AUTHOR("Haibo Chen <haibo.chen@freescale.com>");
-MODULE_DESCRIPTION("Freeacale IMX7D ADC driver");
+MODULE_DESCRIPTION("Freescale IMX7D ADC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index ede955d9b2a4..2948909f3ee3 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -219,15 +219,19 @@ enum meson_sar_adc_chan7_mux_sel {
CHAN7_MUX_CH7_INPUT = 0x7,
};
-struct meson_sar_adc_data {
+struct meson_sar_adc_param {
bool has_bl30_integration;
unsigned long clock_rate;
u32 bandgap_reg;
unsigned int resolution;
- const char *name;
const struct regmap_config *regmap_config;
};
+struct meson_sar_adc_data {
+ const struct meson_sar_adc_param *param;
+ const char *name;
+};
+
struct meson_sar_adc_priv {
struct regmap *regmap;
struct regulator *vref;
@@ -276,7 +280,7 @@ static int meson_sar_adc_calib_val(struct iio_dev *indio_dev, int val)
/* use val_calib = scale * val_raw + offset calibration function */
tmp = div_s64((s64)val * priv->calibscale, MILLION) + priv->calibbias;
- return clamp(tmp, 0, (1 << priv->data->resolution) - 1);
+ return clamp(tmp, 0, (1 << priv->data->param->resolution) - 1);
}
static int meson_sar_adc_wait_busy_clear(struct iio_dev *indio_dev)
@@ -328,7 +332,7 @@ static int meson_sar_adc_read_raw_sample(struct iio_dev *indio_dev,
}
fifo_val = FIELD_GET(MESON_SAR_ADC_FIFO_RD_SAMPLE_VALUE_MASK, regval);
- fifo_val &= GENMASK(priv->data->resolution - 1, 0);
+ fifo_val &= GENMASK(priv->data->param->resolution - 1, 0);
*val = meson_sar_adc_calib_val(indio_dev, fifo_val);
return 0;
@@ -447,7 +451,7 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev)
mutex_lock(&indio_dev->mlock);
- if (priv->data->has_bl30_integration) {
+ if (priv->data->param->has_bl30_integration) {
/* prevent BL30 from using the SAR ADC while we are using it */
regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY,
MESON_SAR_ADC_DELAY_KERNEL_BUSY,
@@ -475,7 +479,7 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
- if (priv->data->has_bl30_integration)
+ if (priv->data->param->has_bl30_integration)
/* allow BL30 to use the SAR ADC again */
regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY,
MESON_SAR_ADC_DELAY_KERNEL_BUSY, 0);
@@ -559,7 +563,7 @@ static int meson_sar_adc_iio_info_read_raw(struct iio_dev *indio_dev,
}
*val = ret / 1000;
- *val2 = priv->data->resolution;
+ *val2 = priv->data->param->resolution;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_CALIBBIAS:
@@ -632,7 +636,7 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
*/
meson_sar_adc_set_chan7_mux(indio_dev, CHAN7_MUX_CH7_INPUT);
- if (priv->data->has_bl30_integration) {
+ if (priv->data->param->has_bl30_integration) {
/*
* leave sampling delay and the input clocks as configured by
* BL30 to make sure BL30 gets the values it expects when
@@ -712,7 +716,7 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
return ret;
}
- ret = clk_set_rate(priv->adc_clk, priv->data->clock_rate);
+ ret = clk_set_rate(priv->adc_clk, priv->data->param->clock_rate);
if (ret) {
dev_err(indio_dev->dev.parent,
"failed to set adc clock rate\n");
@@ -725,14 +729,15 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ const struct meson_sar_adc_param *param = priv->data->param;
u32 enable_mask;
- if (priv->data->bandgap_reg == MESON_SAR_ADC_REG11)
+ if (param->bandgap_reg == MESON_SAR_ADC_REG11)
enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN;
else
enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN;
- regmap_update_bits(priv->regmap, priv->data->bandgap_reg, enable_mask,
+ regmap_update_bits(priv->regmap, param->bandgap_reg, enable_mask,
on_off ? enable_mask : 0);
}
@@ -844,8 +849,8 @@ static int meson_sar_adc_calib(struct iio_dev *indio_dev)
int ret, nominal0, nominal1, value0, value1;
/* use points 25% and 75% for calibration */
- nominal0 = (1 << priv->data->resolution) / 4;
- nominal1 = (1 << priv->data->resolution) * 3 / 4;
+ nominal0 = (1 << priv->data->param->resolution) / 4;
+ nominal1 = (1 << priv->data->param->resolution) * 3 / 4;
meson_sar_adc_set_chan7_mux(indio_dev, CHAN7_MUX_VDD_DIV4);
usleep_range(10, 20);
@@ -883,51 +888,60 @@ static const struct iio_info meson_sar_adc_iio_info = {
.read_raw = meson_sar_adc_iio_info_read_raw,
};
-static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
- .has_bl30_integration = false,
- .clock_rate = 1150000,
- .bandgap_reg = MESON_SAR_ADC_DELTA_10,
- .regmap_config = &meson_sar_adc_regmap_config_meson8,
- .resolution = 10,
- .name = "meson-meson8-saradc",
-};
-
-static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
+static const struct meson_sar_adc_param meson_sar_adc_meson8_param = {
.has_bl30_integration = false,
.clock_rate = 1150000,
.bandgap_reg = MESON_SAR_ADC_DELTA_10,
.regmap_config = &meson_sar_adc_regmap_config_meson8,
.resolution = 10,
- .name = "meson-meson8b-saradc",
};
-static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
+static const struct meson_sar_adc_param meson_sar_adc_gxbb_param = {
.has_bl30_integration = true,
.clock_rate = 1200000,
.bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 10,
- .name = "meson-gxbb-saradc",
};
-static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
+static const struct meson_sar_adc_param meson_sar_adc_gxl_param = {
.has_bl30_integration = true,
.clock_rate = 1200000,
.bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
+};
+
+static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
+ .param = &meson_sar_adc_meson8_param,
+ .name = "meson-meson8-saradc",
+};
+
+static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
+ .param = &meson_sar_adc_meson8_param,
+ .name = "meson-meson8b-saradc",
+};
+
+static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
+ .param = &meson_sar_adc_gxbb_param,
+ .name = "meson-gxbb-saradc",
+};
+
+static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
+ .param = &meson_sar_adc_gxl_param,
.name = "meson-gxl-saradc",
};
static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
- .has_bl30_integration = true,
- .clock_rate = 1200000,
- .bandgap_reg = MESON_SAR_ADC_REG11,
- .regmap_config = &meson_sar_adc_regmap_config_gxbb,
- .resolution = 12,
+ .param = &meson_sar_adc_gxl_param,
.name = "meson-gxm-saradc",
};
+static const struct meson_sar_adc_data meson_sar_adc_axg_data = {
+ .param = &meson_sar_adc_gxl_param,
+ .name = "meson-axg-saradc",
+};
+
static const struct of_device_id meson_sar_adc_of_match[] = {
{
.compatible = "amlogic,meson8-saradc",
@@ -946,6 +960,9 @@ static const struct of_device_id meson_sar_adc_of_match[] = {
}, {
.compatible = "amlogic,meson-gxm-saradc",
.data = &meson_sar_adc_gxm_data,
+ }, {
+ .compatible = "amlogic,meson-axg-saradc",
+ .data = &meson_sar_adc_axg_data,
},
{},
};
@@ -1001,7 +1018,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
return ret;
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
- priv->data->regmap_config);
+ priv->data->param->regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 40be7d9fadbf..ca432e7b6ff1 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -34,9 +34,6 @@
#define STM32F4_ADC_ADCPRE_SHIFT 16
#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16)
-/* STM32 F4 maximum analog clock rate (from datasheet) */
-#define STM32F4_ADC_MAX_CLK_RATE 36000000
-
/* STM32H7 - common registers for all ADC instances */
#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08)
@@ -51,9 +48,6 @@
#define STM32H7_CKMODE_SHIFT 16
#define STM32H7_CKMODE_MASK GENMASK(17, 16)
-/* STM32 H7 maximum analog clock rate (from datasheet) */
-#define STM32H7_ADC_MAX_CLK_RATE 36000000
-
/**
* stm32_adc_common_regs - stm32 common registers, compatible dependent data
* @csr: common status register offset
@@ -74,15 +68,17 @@ struct stm32_adc_priv;
* stm32_adc_priv_cfg - stm32 core compatible configuration data
* @regs: common registers for all instances
* @clk_sel: clock selection routine
+ * @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
*/
struct stm32_adc_priv_cfg {
const struct stm32_adc_common_regs *regs;
int (*clk_sel)(struct platform_device *, struct stm32_adc_priv *);
+ u32 max_clk_rate_hz;
};
/**
* struct stm32_adc_priv - stm32 ADC core private data
- * @irq: irq for ADC block
+ * @irq: irq(s) for ADC block
* @domain: irq domain reference
* @aclk: clock reference for the analog circuitry
* @bclk: bus clock common for all ADCs, depends on part used
@@ -91,7 +87,7 @@ struct stm32_adc_priv_cfg {
* @common: common data for all ADC instances
*/
struct stm32_adc_priv {
- int irq;
+ int irq[STM32_ADC_MAX_ADCS];
struct irq_domain *domain;
struct clk *aclk;
struct clk *bclk;
@@ -133,7 +129,7 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
}
for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
- if ((rate / stm32f4_pclk_div[i]) <= STM32F4_ADC_MAX_CLK_RATE)
+ if ((rate / stm32f4_pclk_div[i]) <= priv->cfg->max_clk_rate_hz)
break;
}
if (i >= ARRAY_SIZE(stm32f4_pclk_div)) {
@@ -222,7 +218,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
if (ckmode)
continue;
- if ((rate / div) <= STM32H7_ADC_MAX_CLK_RATE)
+ if ((rate / div) <= priv->cfg->max_clk_rate_hz)
goto out;
}
}
@@ -242,7 +238,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
if (!ckmode)
continue;
- if ((rate / div) <= STM32H7_ADC_MAX_CLK_RATE)
+ if ((rate / div) <= priv->cfg->max_clk_rate_hz)
goto out;
}
@@ -328,11 +324,24 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
struct stm32_adc_priv *priv)
{
struct device_node *np = pdev->dev.of_node;
+ unsigned int i;
+
+ for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
+ priv->irq[i] = platform_get_irq(pdev, i);
+ if (priv->irq[i] < 0) {
+ /*
+ * At least one interrupt must be provided, make others
+ * optional:
+ * - stm32f4/h7 shares a common interrupt.
+ * - stm32mp1, has one line per ADC (either for ADC1,
+ * ADC2 or both).
+ */
+ if (i && priv->irq[i] == -ENXIO)
+ continue;
+ dev_err(&pdev->dev, "failed to get irq\n");
- priv->irq = platform_get_irq(pdev, 0);
- if (priv->irq < 0) {
- dev_err(&pdev->dev, "failed to get irq\n");
- return priv->irq;
+ return priv->irq[i];
+ }
}
priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
@@ -343,8 +352,12 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
return -ENOMEM;
}
- irq_set_chained_handler(priv->irq, stm32_adc_irq_handler);
- irq_set_handler_data(priv->irq, priv);
+ for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
+ if (priv->irq[i] < 0)
+ continue;
+ irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler);
+ irq_set_handler_data(priv->irq[i], priv);
+ }
return 0;
}
@@ -353,11 +366,17 @@ static void stm32_adc_irq_remove(struct platform_device *pdev,
struct stm32_adc_priv *priv)
{
int hwirq;
+ unsigned int i;
for (hwirq = 0; hwirq < STM32_ADC_MAX_ADCS; hwirq++)
irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq));
irq_domain_remove(priv->domain);
- irq_set_chained_handler(priv->irq, NULL);
+
+ for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
+ if (priv->irq[i] < 0)
+ continue;
+ irq_set_chained_handler(priv->irq[i], NULL);
+ }
}
static int stm32_adc_probe(struct platform_device *pdev)
@@ -497,11 +516,19 @@ static int stm32_adc_remove(struct platform_device *pdev)
static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
.regs = &stm32f4_adc_common_regs,
.clk_sel = stm32f4_adc_clk_sel,
+ .max_clk_rate_hz = 36000000,
};
static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
.regs = &stm32h7_adc_common_regs,
.clk_sel = stm32h7_adc_clk_sel,
+ .max_clk_rate_hz = 36000000,
+};
+
+static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
+ .regs = &stm32h7_adc_common_regs,
+ .clk_sel = stm32h7_adc_clk_sel,
+ .max_clk_rate_hz = 40000000,
};
static const struct of_device_id stm32_adc_of_match[] = {
@@ -512,6 +539,9 @@ static const struct of_device_id stm32_adc_of_match[] = {
.compatible = "st,stm32h7-adc-core",
.data = (void *)&stm32h7_adc_priv_cfg
}, {
+ .compatible = "st,stm32mp1-adc-core",
+ .data = (void *)&stm32mp1_adc_priv_cfg
+ }, {
},
};
MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 9a2583caedaa..378411853d75 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -84,6 +84,7 @@
#define STM32H7_ADC_CALFACT2 0xC8
/* STM32H7_ADC_ISR - bit fields */
+#define STM32MP1_VREGREADY BIT(12)
#define STM32H7_EOC BIT(2)
#define STM32H7_ADRDY BIT(0)
@@ -249,6 +250,7 @@ struct stm32_adc;
* @adc_info: per instance input channels definitions
* @trigs: external trigger sources
* @clk_required: clock is required
+ * @has_vregready: vregready status flag presence
* @selfcalib: optional routine for self-calibration
* @prepare: optional prepare routine (power-up, enable)
* @start_conv: routine to start conversions
@@ -261,6 +263,7 @@ struct stm32_adc_cfg {
const struct stm32_adc_info *adc_info;
struct stm32_adc_trig_info *trigs;
bool clk_required;
+ bool has_vregready;
int (*selfcalib)(struct stm32_adc *);
int (*prepare)(struct stm32_adc *);
void (*start_conv)(struct stm32_adc *, bool dma);
@@ -695,8 +698,12 @@ static void stm32h7_adc_stop_conv(struct stm32_adc *adc)
stm32_adc_clr_bits(adc, STM32H7_ADC_CFGR, STM32H7_DMNGT_MASK);
}
-static void stm32h7_adc_exit_pwr_down(struct stm32_adc *adc)
+static int stm32h7_adc_exit_pwr_down(struct stm32_adc *adc)
{
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ int ret;
+ u32 val;
+
/* Exit deep power down, then enable ADC voltage regulator */
stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_DEEPPWD);
stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADVREGEN);
@@ -705,7 +712,20 @@ static void stm32h7_adc_exit_pwr_down(struct stm32_adc *adc)
stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_BOOST);
/* Wait for startup time */
- usleep_range(10, 20);
+ if (!adc->cfg->has_vregready) {
+ usleep_range(10, 20);
+ return 0;
+ }
+
+ ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_ISR, val,
+ val & STM32MP1_VREGREADY, 100,
+ STM32_ADC_TIMEOUT_US);
+ if (ret) {
+ stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_DEEPPWD);
+ dev_err(&indio_dev->dev, "Failed to exit power down\n");
+ }
+
+ return ret;
}
static void stm32h7_adc_enter_pwr_down(struct stm32_adc *adc)
@@ -888,7 +908,9 @@ static int stm32h7_adc_selfcalib(struct stm32_adc *adc)
int ret;
u32 val;
- stm32h7_adc_exit_pwr_down(adc);
+ ret = stm32h7_adc_exit_pwr_down(adc);
+ if (ret)
+ return ret;
/*
* Select calibration mode:
@@ -952,7 +974,10 @@ static int stm32h7_adc_prepare(struct stm32_adc *adc)
{
int ret;
- stm32h7_adc_exit_pwr_down(adc);
+ ret = stm32h7_adc_exit_pwr_down(adc);
+ if (ret)
+ return ret;
+
stm32_adc_writel(adc, STM32H7_ADC_DIFSEL, adc->difsel);
ret = stm32h7_adc_enable(adc);
@@ -1944,9 +1969,23 @@ static const struct stm32_adc_cfg stm32h7_adc_cfg = {
.smp_cycles = stm32h7_adc_smp_cycles,
};
+static const struct stm32_adc_cfg stm32mp1_adc_cfg = {
+ .regs = &stm32h7_adc_regspec,
+ .adc_info = &stm32h7_adc_info,
+ .trigs = stm32h7_adc_trigs,
+ .has_vregready = true,
+ .selfcalib = stm32h7_adc_selfcalib,
+ .start_conv = stm32h7_adc_start_conv,
+ .stop_conv = stm32h7_adc_stop_conv,
+ .prepare = stm32h7_adc_prepare,
+ .unprepare = stm32h7_adc_unprepare,
+ .smp_cycles = stm32h7_adc_smp_cycles,
+};
+
static const struct of_device_id stm32_adc_of_match[] = {
{ .compatible = "st,stm32f4-adc", .data = (void *)&stm32f4_adc_cfg },
{ .compatible = "st,stm32h7-adc", .data = (void *)&stm32h7_adc_cfg },
+ { .compatible = "st,stm32mp1-adc", .data = (void *)&stm32mp1_adc_cfg },
{},
};
MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index b28a716a23b2..fcd4a1c00ca0 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -8,11 +8,11 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
+#include <linux/iio/adc/stm32-dfsdm-adc.h>
#include <linux/iio/buffer.h>
#include <linux/iio/hw-consumer.h>
-#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -254,7 +254,8 @@ static int stm32_dfsdm_start_filter(struct stm32_dfsdm *dfsdm,
DFSDM_CR1_RSWSTART(1));
}
-static void stm32_dfsdm_stop_filter(struct stm32_dfsdm *dfsdm, unsigned int fl_id)
+static void stm32_dfsdm_stop_filter(struct stm32_dfsdm *dfsdm,
+ unsigned int fl_id)
{
/* Disable conversion */
regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
@@ -338,7 +339,7 @@ static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm,
"st,adc-channel-types", chan_idx,
&of_str);
if (!ret) {
- val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_type);
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_type);
if (val < 0)
return val;
} else {
@@ -350,7 +351,7 @@ static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm,
"st,adc-channel-clk-src", chan_idx,
&of_str);
if (!ret) {
- val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_src);
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_src);
if (val < 0)
return val;
} else {
@@ -1104,7 +1105,6 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
char *name;
int ret, irq, val;
-
dev_data = of_device_get_match_data(dev);
iio = devm_iio_device_alloc(dev, sizeof(*adc));
if (!iio) {
@@ -1122,8 +1122,8 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, adc);
ret = of_property_read_u32(dev->of_node, "reg", &adc->fl_id);
- if (ret != 0) {
- dev_err(dev, "Missing reg property\n");
+ if (ret != 0 || adc->fl_id >= adc->dfsdm->num_fls) {
+ dev_err(dev, "Missing or bad reg property\n");
return -EINVAL;
}
@@ -1172,7 +1172,6 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_cleanup;
- dev_err(dev, "of_platform_populate\n");
if (dev_data->type == DFSDM_AUDIO) {
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret < 0) {
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index e50efdcc41ff..bf089f5d6225 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -25,6 +25,8 @@ struct stm32_dfsdm_dev_data {
#define STM32H7_DFSDM_NUM_FILTERS 4
#define STM32H7_DFSDM_NUM_CHANNELS 8
+#define STM32MP1_DFSDM_NUM_FILTERS 6
+#define STM32MP1_DFSDM_NUM_CHANNELS 8
static bool stm32_dfsdm_volatile_reg(struct device *dev, unsigned int reg)
{
@@ -61,6 +63,21 @@ static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_data = {
.regmap_cfg = &stm32h7_dfsdm_regmap_cfg,
};
+static const struct regmap_config stm32mp1_dfsdm_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = sizeof(u32),
+ .max_register = 0x7fc,
+ .volatile_reg = stm32_dfsdm_volatile_reg,
+ .fast_io = true,
+};
+
+static const struct stm32_dfsdm_dev_data stm32mp1_dfsdm_data = {
+ .num_filters = STM32MP1_DFSDM_NUM_FILTERS,
+ .num_channels = STM32MP1_DFSDM_NUM_CHANNELS,
+ .regmap_cfg = &stm32mp1_dfsdm_regmap_cfg,
+};
+
struct dfsdm_priv {
struct platform_device *pdev; /* platform device */
@@ -227,6 +244,11 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
}
priv->spi_clk_out_div = div_u64_rem(clk_freq, spi_freq, &rem) - 1;
+ if (!priv->spi_clk_out_div) {
+ /* spi_clk_out_div == 0 means ckout is OFF */
+ dev_err(&pdev->dev, "spi-max-frequency not achievable\n");
+ return -EINVAL;
+ }
priv->dfsdm.spi_master_freq = spi_freq;
if (rem) {
@@ -243,6 +265,10 @@ static const struct of_device_id stm32_dfsdm_of_match[] = {
.compatible = "st,stm32h7-dfsdm",
.data = &stm32h7_dfsdm_data,
},
+ {
+ .compatible = "st,stm32mp1-dfsdm",
+ .data = &stm32mp1_dfsdm_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, stm32_dfsdm_of_match);
diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/adc/stx104.c
index 17b021f33180..0662ca199eb0 100644
--- a/drivers/iio/adc/stx104.c
+++ b/drivers/iio/adc/stx104.c
@@ -233,6 +233,16 @@ static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(inb(stx104gpio->base) & BIT(offset));
}
+static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
+
+ *bits = inb(stx104gpio->base);
+
+ return 0;
+}
+
static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
@@ -342,6 +352,7 @@ static int stx104_probe(struct device *dev, unsigned int id)
stx104gpio->chip.direction_input = stx104_gpio_direction_input;
stx104gpio->chip.direction_output = stx104_gpio_direction_output;
stx104gpio->chip.get = stx104_gpio_get;
+ stx104gpio->chip.get_multiple = stx104_gpio_get_multiple;
stx104gpio->chip.set = stx104_gpio_set;
stx104gpio->chip.set_multiple = stx104_gpio_set_multiple;
stx104gpio->base = base[id] + 3;
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 079f133144b0..184d686ebd99 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -17,6 +17,9 @@
#include <linux/of.h>
#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <linux/iio/sysfs.h>
#define ADS8688_CMD_REG(x) (x << 8)
@@ -155,6 +158,13 @@ static const struct attribute_group ads8688_attribute_group = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
| BIT(IIO_CHAN_INFO_SCALE) \
| BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
}
static const struct iio_chan_spec ads8684_channels[] = {
@@ -371,6 +381,28 @@ static const struct iio_info ads8688_info = {
.attrs = &ads8688_attribute_group,
};
+static irqreturn_t ads8688_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ u16 buffer[8];
+ int i, j = 0;
+
+ for (i = 0; i < indio_dev->masklength; i++) {
+ if (!test_bit(i, indio_dev->active_scan_mask))
+ continue;
+ buffer[j] = ads8688_read(indio_dev, i);
+ j++;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ pf->timestamp);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const struct ads8688_chip_info ads8688_chip_info_tbl[] = {
[ID_ADS8684] = {
.channels = ads8684_channels,
@@ -402,7 +434,7 @@ static int ads8688_probe(struct spi_device *spi)
ret = regulator_get_voltage(st->reg);
if (ret < 0)
- goto error_out;
+ goto err_regulator_disable;
st->vref_mv = ret / 1000;
} else {
@@ -430,13 +462,22 @@ static int ads8688_probe(struct spi_device *spi)
mutex_init(&st->lock);
+ ret = iio_triggered_buffer_setup(indio_dev, NULL, ads8688_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&spi->dev, "iio triggered buffer setup failed\n");
+ goto err_regulator_disable;
+ }
+
ret = iio_device_register(indio_dev);
if (ret)
- goto error_out;
+ goto err_buffer_cleanup;
return 0;
-error_out:
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+err_regulator_disable:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
@@ -449,6 +490,7 @@ static int ads8688_remove(struct spi_device *spi)
struct ads8688_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
diff --git a/drivers/iio/afe/Kconfig b/drivers/iio/afe/Kconfig
new file mode 100644
index 000000000000..c91eef04825a
--- /dev/null
+++ b/drivers/iio/afe/Kconfig
@@ -0,0 +1,19 @@
+#
+# Analog Front End drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Analog Front Ends"
+
+config IIO_RESCALE
+ tristate "IIO rescale"
+ depends on OF || COMPILE_TEST
+ help
+ Say yes here to build support for the IIO rescaling
+ that handles voltage dividers, current sense shunts and
+ current sense amplifiers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iio-rescale.
+
+endmenu
diff --git a/drivers/iio/afe/Makefile b/drivers/iio/afe/Makefile
new file mode 100644
index 000000000000..5fabb7bcac47
--- /dev/null
+++ b/drivers/iio/afe/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O Analog Front Ends (AFE)
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_IIO_RESCALE) += iio-rescale.o
diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
new file mode 100644
index 000000000000..e9ceee66d1e7
--- /dev/null
+++ b/drivers/iio/afe/iio-rescale.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IIO rescale driver
+ *
+ * Copyright (C) 2018 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ */
+
+#include <linux/err.h>
+#include <linux/gcd.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+struct rescale;
+
+struct rescale_cfg {
+ enum iio_chan_type type;
+ int (*props)(struct device *dev, struct rescale *rescale);
+};
+
+struct rescale {
+ const struct rescale_cfg *cfg;
+ struct iio_channel *source;
+ struct iio_chan_spec chan;
+ struct iio_chan_spec_ext_info *ext_info;
+ s32 numerator;
+ s32 denominator;
+};
+
+static int rescale_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct rescale *rescale = iio_priv(indio_dev);
+ unsigned long long tmp;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return iio_read_channel_raw(rescale->source, val);
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = iio_read_channel_scale(rescale->source, val, val2);
+ switch (ret) {
+ case IIO_VAL_FRACTIONAL:
+ *val *= rescale->numerator;
+ *val2 *= rescale->denominator;
+ return ret;
+ case IIO_VAL_INT:
+ *val *= rescale->numerator;
+ if (rescale->denominator == 1)
+ return ret;
+ *val2 = rescale->denominator;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_VAL_FRACTIONAL_LOG2:
+ tmp = *val * 1000000000LL;
+ do_div(tmp, rescale->denominator);
+ tmp *= rescale->numerator;
+ do_div(tmp, 1000000000LL);
+ *val = tmp;
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rescale_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct rescale *rescale = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *type = IIO_VAL_INT;
+ return iio_read_avail_channel_raw(rescale->source,
+ vals, length);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info rescale_info = {
+ .read_raw = rescale_read_raw,
+ .read_avail = rescale_read_avail,
+};
+
+static ssize_t rescale_read_ext_info(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *chan,
+ char *buf)
+{
+ struct rescale *rescale = iio_priv(indio_dev);
+
+ return iio_read_channel_ext_info(rescale->source,
+ rescale->ext_info[private].name,
+ buf);
+}
+
+static ssize_t rescale_write_ext_info(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ struct rescale *rescale = iio_priv(indio_dev);
+
+ return iio_write_channel_ext_info(rescale->source,
+ rescale->ext_info[private].name,
+ buf, len);
+}
+
+static int rescale_configure_channel(struct device *dev,
+ struct rescale *rescale)
+{
+ struct iio_chan_spec *chan = &rescale->chan;
+ struct iio_chan_spec const *schan = rescale->source->channel;
+
+ chan->indexed = 1;
+ chan->output = schan->output;
+ chan->ext_info = rescale->ext_info;
+ chan->type = rescale->cfg->type;
+
+ if (!iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) ||
+ !iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) {
+ dev_err(dev, "source channel does not support raw/scale\n");
+ return -EINVAL;
+ }
+
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE);
+
+ if (iio_channel_has_available(schan, IIO_CHAN_INFO_RAW))
+ chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_RAW);
+
+ return 0;
+}
+
+static int rescale_current_sense_amplifier_props(struct device *dev,
+ struct rescale *rescale)
+{
+ u32 sense;
+ u32 gain_mult = 1;
+ u32 gain_div = 1;
+ u32 factor;
+ int ret;
+
+ ret = device_property_read_u32(dev, "sense-resistor-micro-ohms",
+ &sense);
+ if (ret) {
+ dev_err(dev, "failed to read the sense resistance: %d\n", ret);
+ return ret;
+ }
+
+ device_property_read_u32(dev, "sense-gain-mult", &gain_mult);
+ device_property_read_u32(dev, "sense-gain-div", &gain_div);
+
+ /*
+ * Calculate the scaling factor, 1 / (gain * sense), or
+ * gain_div / (gain_mult * sense), while trying to keep the
+ * numerator/denominator from overflowing.
+ */
+ factor = gcd(sense, 1000000);
+ rescale->numerator = 1000000 / factor;
+ rescale->denominator = sense / factor;
+
+ factor = gcd(rescale->numerator, gain_mult);
+ rescale->numerator /= factor;
+ rescale->denominator *= gain_mult / factor;
+
+ factor = gcd(rescale->denominator, gain_div);
+ rescale->numerator *= gain_div / factor;
+ rescale->denominator /= factor;
+
+ return 0;
+}
+
+static int rescale_current_sense_shunt_props(struct device *dev,
+ struct rescale *rescale)
+{
+ u32 shunt;
+ u32 factor;
+ int ret;
+
+ ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &shunt);
+ if (ret) {
+ dev_err(dev, "failed to read the shunt resistance: %d\n", ret);
+ return ret;
+ }
+
+ factor = gcd(shunt, 1000000);
+ rescale->numerator = 1000000 / factor;
+ rescale->denominator = shunt / factor;
+
+ return 0;
+}
+
+static int rescale_voltage_divider_props(struct device *dev,
+ struct rescale *rescale)
+{
+ int ret;
+ u32 factor;
+
+ ret = device_property_read_u32(dev, "output-ohms",
+ &rescale->denominator);
+ if (ret) {
+ dev_err(dev, "failed to read output-ohms: %d\n", ret);
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "full-ohms",
+ &rescale->numerator);
+ if (ret) {
+ dev_err(dev, "failed to read full-ohms: %d\n", ret);
+ return ret;
+ }
+
+ factor = gcd(rescale->numerator, rescale->denominator);
+ rescale->numerator /= factor;
+ rescale->denominator /= factor;
+
+ return 0;
+}
+
+enum rescale_variant {
+ CURRENT_SENSE_AMPLIFIER,
+ CURRENT_SENSE_SHUNT,
+ VOLTAGE_DIVIDER,
+};
+
+static const struct rescale_cfg rescale_cfg[] = {
+ [CURRENT_SENSE_AMPLIFIER] = {
+ .type = IIO_CURRENT,
+ .props = rescale_current_sense_amplifier_props,
+ },
+ [CURRENT_SENSE_SHUNT] = {
+ .type = IIO_CURRENT,
+ .props = rescale_current_sense_shunt_props,
+ },
+ [VOLTAGE_DIVIDER] = {
+ .type = IIO_VOLTAGE,
+ .props = rescale_voltage_divider_props,
+ },
+};
+
+static const struct of_device_id rescale_match[] = {
+ { .compatible = "current-sense-amplifier",
+ .data = &rescale_cfg[CURRENT_SENSE_AMPLIFIER], },
+ { .compatible = "current-sense-shunt",
+ .data = &rescale_cfg[CURRENT_SENSE_SHUNT], },
+ { .compatible = "voltage-divider",
+ .data = &rescale_cfg[VOLTAGE_DIVIDER], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rescale_match);
+
+static int rescale_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct iio_channel *source;
+ struct rescale *rescale;
+ int sizeof_ext_info;
+ int sizeof_priv;
+ int i;
+ int ret;
+
+ source = devm_iio_channel_get(dev, NULL);
+ if (IS_ERR(source)) {
+ if (PTR_ERR(source) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get source channel\n");
+ return PTR_ERR(source);
+ }
+
+ sizeof_ext_info = iio_get_channel_ext_info_count(source);
+ if (sizeof_ext_info) {
+ sizeof_ext_info += 1; /* one extra entry for the sentinel */
+ sizeof_ext_info *= sizeof(*rescale->ext_info);
+ }
+
+ sizeof_priv = sizeof(*rescale) + sizeof_ext_info;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof_priv);
+ if (!indio_dev)
+ return -ENOMEM;
+
+ rescale = iio_priv(indio_dev);
+
+ rescale->cfg = of_device_get_match_data(dev);
+ rescale->numerator = 1;
+ rescale->denominator = 1;
+
+ ret = rescale->cfg->props(dev, rescale);
+ if (ret)
+ return ret;
+
+ if (!rescale->numerator || !rescale->denominator) {
+ dev_err(dev, "invalid scaling factor.\n");
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ rescale->source = source;
+
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &rescale_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = &rescale->chan;
+ indio_dev->num_channels = 1;
+ if (sizeof_ext_info) {
+ rescale->ext_info = devm_kmemdup(dev,
+ source->channel->ext_info,
+ sizeof_ext_info, GFP_KERNEL);
+ if (!rescale->ext_info)
+ return -ENOMEM;
+
+ for (i = 0; rescale->ext_info[i].name; ++i) {
+ struct iio_chan_spec_ext_info *ext_info =
+ &rescale->ext_info[i];
+
+ if (source->channel->ext_info[i].read)
+ ext_info->read = rescale_read_ext_info;
+ if (source->channel->ext_info[i].write)
+ ext_info->write = rescale_write_ext_info;
+ ext_info->private = i;
+ }
+ }
+
+ ret = rescale_configure_channel(dev, rescale);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct platform_driver rescale_driver = {
+ .probe = rescale_probe,
+ .driver = {
+ .name = "iio-rescale",
+ .of_match_table = rescale_match,
+ },
+};
+module_platform_driver(rescale_driver);
+
+MODULE_DESCRIPTION("IIO rescale driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index 43667866321e..0138337aedd1 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -161,12 +161,14 @@ static int ad8366_probe(struct spi_device *spi)
indio_dev->channels = ad8366_channels;
indio_dev->num_channels = ARRAY_SIZE(ad8366_channels);
+ ret = ad8366_write(indio_dev, 0 , 0);
+ if (ret < 0)
+ goto error_disable_reg;
+
ret = iio_device_register(indio_dev);
if (ret)
goto error_disable_reg;
- ad8366_write(indio_dev, 0, 0);
-
return 0;
error_disable_reg:
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index abfc4bbc4cfc..a406ad31b096 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -61,9 +61,9 @@
#define ATLAS_REG_ORP_CALIB_STATUS 0x0d
#define ATLAS_REG_ORP_DATA 0x0e
-#define ATLAS_PH_INT_TIME_IN_US 450000
-#define ATLAS_EC_INT_TIME_IN_US 650000
-#define ATLAS_ORP_INT_TIME_IN_US 450000
+#define ATLAS_PH_INT_TIME_IN_MS 450
+#define ATLAS_EC_INT_TIME_IN_MS 650
+#define ATLAS_ORP_INT_TIME_IN_MS 450
enum {
ATLAS_PH_SM,
@@ -270,21 +270,21 @@ static struct atlas_device atlas_devices[] = {
.num_channels = 3,
.data_reg = ATLAS_REG_PH_DATA,
.calibration = &atlas_check_ph_calibration,
- .delay = ATLAS_PH_INT_TIME_IN_US,
+ .delay = ATLAS_PH_INT_TIME_IN_MS,
},
[ATLAS_EC_SM] = {
.channels = atlas_ec_channels,
.num_channels = 5,
.data_reg = ATLAS_REG_EC_DATA,
.calibration = &atlas_check_ec_calibration,
- .delay = ATLAS_EC_INT_TIME_IN_US,
+ .delay = ATLAS_EC_INT_TIME_IN_MS,
},
[ATLAS_ORP_SM] = {
.channels = atlas_orp_channels,
.num_channels = 2,
.data_reg = ATLAS_REG_ORP_DATA,
.calibration = &atlas_check_orp_calibration,
- .delay = ATLAS_ORP_INT_TIME_IN_US,
+ .delay = ATLAS_ORP_INT_TIME_IN_MS,
},
};
@@ -393,7 +393,7 @@ static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
}
if (suspended)
- usleep_range(data->chip->delay, data->chip->delay + 100000);
+ msleep(data->chip->delay);
ret = regmap_bulk_read(data->regmap, reg, (u8 *) val, sizeof(*val));
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 705cb3e72663..89cb0066a6e0 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/common/cros_ec_sensors_core.h>
#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger_consumer.h>
@@ -31,8 +32,6 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
-#include "cros_ec_sensors_core.h"
-
#define CROS_EC_SENSORS_MAX_CHANNELS 4
/* State data for ec_sensors iio driver. */
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index a620eb5ce202..414cc43c287e 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/common/cros_ec_sensors_core.h>
#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger_consumer.h>
@@ -27,8 +28,6 @@
#include <linux/sysfs.h>
#include <linux/platform_device.h>
-#include "cros_ec_sensors_core.h"
-
static char *cros_ec_loc[] = {
[MOTIONSENSE_LOC_BASE] = "base",
[MOTIONSENSE_LOC_LID] = "lid",
@@ -448,8 +447,7 @@ EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write);
static int __maybe_unused cros_ec_sensors_prepare(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
if (st->curr_sampl_freq == 0)
@@ -471,8 +469,7 @@ static int __maybe_unused cros_ec_sensors_prepare(struct device *dev)
static void __maybe_unused cros_ec_sensors_complete(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
if (st->curr_sampl_freq == 0)
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 4905a997a7ec..1e10c0af2f2c 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -304,8 +304,7 @@ EXPORT_SYMBOL(hid_sensor_setup_trigger);
static int __maybe_unused hid_sensor_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct hid_sensor_common *attrb = iio_device_get_drvdata(indio_dev);
return _hid_sensor_power_state(attrb, false);
@@ -313,8 +312,7 @@ static int __maybe_unused hid_sensor_suspend(struct device *dev)
static int __maybe_unused hid_sensor_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct hid_sensor_common *attrb = iio_device_get_drvdata(indio_dev);
schedule_work(&attrb->work);
return 0;
@@ -322,8 +320,7 @@ static int __maybe_unused hid_sensor_resume(struct device *dev)
static int __maybe_unused hid_sensor_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct hid_sensor_common *attrb = iio_device_get_drvdata(indio_dev);
return _hid_sensor_power_state(attrb, true);
}
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 76db0768e454..06e90debb9f5 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -131,16 +131,31 @@ config LTC2632
module will be called ltc2632.
config AD5686
- tristate "Analog Devices AD5686R/AD5685R/AD5684R DAC SPI driver"
+ tristate
+
+config AD5686_SPI
+ tristate "Analog Devices AD5686 and similar multi-channel DACs (SPI)"
depends on SPI
+ select AD5686
help
- Say yes here to build support for Analog Devices AD5686R, AD5685R,
- AD5684R, AD5791 Voltage Output Digital to
- Analog Converter.
+ Say yes here to build support for Analog Devices AD5672R, AD5676,
+ AD5676R, AD5684, AD5684R, AD5684R, AD5685R, AD5686, AD5686R.
+ Voltage Output Digital to Analog Converter.
To compile this driver as a module, choose M here: the
module will be called ad5686.
+config AD5696_I2C
+ tristate "Analog Devices AD5696 and similar multi-channel DACs (I2C)"
+ depends on I2C
+ select AD5686
+ help
+ Say yes here to build support for Analog Devices AD5671R, AD5675R,
+ AD5694, AD5694R, AD5695R, AD5696, AD5696R Voltage Output Digital to
+ Analog Converter.
+ To compile this driver as a module, choose M here: the module will be
+ called ad5696.
+
config AD5755
tristate "Analog Devices AD5755/AD5755-1/AD5757/AD5735/AD5737 DAC driver"
depends on SPI_MASTER
@@ -321,6 +336,16 @@ config TI_DAC082S085
If compiled as a module, it will be called ti-dac082s085.
+config TI_DAC5571
+ tristate "Texas Instruments 8/10/12/16-bit 1/2/4-channel DAC driver"
+ depends on I2C
+ help
+ Driver for the Texas Instruments
+ DAC5571, DAC6571, DAC7571, DAC5574, DAC6574, DAC7574, DAC5573,
+ DAC6573, DAC7573, DAC8571, DAC8574.
+
+ If compiled as a module, it will be called ti-dac5571.
+
config VF610_DAC
tristate "Vybrid vf610 DAC driver"
depends on OF
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 81e710ed7491..57aa230d34ab 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
+obj-$(CONFIG_AD5686_SPI) += ad5686-spi.o
+obj-$(CONFIG_AD5696_I2C) += ad5696-i2c.o
obj-$(CONFIG_AD7303) += ad7303.o
obj-$(CONFIG_AD8801) += ad8801.o
obj-$(CONFIG_CIO_DAC) += cio-dac.o
@@ -35,4 +37,5 @@ obj-$(CONFIG_MCP4922) += mcp4922.o
obj-$(CONFIG_STM32_DAC_CORE) += stm32-dac-core.o
obj-$(CONFIG_STM32_DAC) += stm32-dac.o
obj-$(CONFIG_TI_DAC082S085) += ti-dac082s085.o
+obj-$(CONFIG_TI_DAC5571) += ti-dac5571.o
obj-$(CONFIG_VF610_DAC) += vf610_dac.o
diff --git a/drivers/iio/dac/ad5686-spi.c b/drivers/iio/dac/ad5686-spi.c
new file mode 100644
index 000000000000..1df9143f55e9
--- /dev/null
+++ b/drivers/iio/dac/ad5686-spi.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * AD5672R, AD5676, AD5676R, AD5681R, AD5682R, AD5683, AD5683R,
+ * AD5684, AD5684R, AD5685R, AD5686, AD5686R
+ * Digital to analog converters driver
+ *
+ * Copyright 2018 Analog Devices Inc.
+ */
+
+#include "ad5686.h"
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+static int ad5686_spi_write(struct ad5686_state *st,
+ u8 cmd, u8 addr, u16 val)
+{
+ struct spi_device *spi = to_spi_device(st->dev);
+ u8 tx_len, *buf;
+
+ switch (st->chip_info->regmap_type) {
+ case AD5683_REGMAP:
+ st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) |
+ AD5683_DATA(val));
+ buf = &st->data[0].d8[1];
+ tx_len = 3;
+ break;
+ case AD5686_REGMAP:
+ st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) |
+ AD5686_ADDR(addr) |
+ val);
+ buf = &st->data[0].d8[1];
+ tx_len = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return spi_write(spi, buf, tx_len);
+}
+
+static int ad5686_spi_read(struct ad5686_state *st, u8 addr)
+{
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->data[1].d8[1],
+ .rx_buf = &st->data[2].d8[1],
+ .len = 3,
+ },
+ };
+ struct spi_device *spi = to_spi_device(st->dev);
+ u8 cmd = 0;
+ int ret;
+
+ if (st->chip_info->regmap_type == AD5686_REGMAP)
+ cmd = AD5686_CMD_READBACK_ENABLE;
+ else if (st->chip_info->regmap_type == AD5683_REGMAP)
+ cmd = AD5686_CMD_READBACK_ENABLE_V2;
+
+ st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) |
+ AD5686_ADDR(addr));
+ st->data[1].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP));
+
+ ret = spi_sync_transfer(spi, t, ARRAY_SIZE(t));
+ if (ret < 0)
+ return ret;
+
+ return be32_to_cpu(st->data[2].d32);
+}
+
+static int ad5686_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ return ad5686_probe(&spi->dev, id->driver_data, id->name,
+ ad5686_spi_write, ad5686_spi_read);
+}
+
+static int ad5686_spi_remove(struct spi_device *spi)
+{
+ return ad5686_remove(&spi->dev);
+}
+
+static const struct spi_device_id ad5686_spi_id[] = {
+ {"ad5672r", ID_AD5672R},
+ {"ad5676", ID_AD5676},
+ {"ad5676r", ID_AD5676R},
+ {"ad5681r", ID_AD5681R},
+ {"ad5682r", ID_AD5682R},
+ {"ad5683", ID_AD5683},
+ {"ad5683r", ID_AD5683R},
+ {"ad5684", ID_AD5684},
+ {"ad5684r", ID_AD5684R},
+ {"ad5685", ID_AD5685R}, /* Does not exist */
+ {"ad5685r", ID_AD5685R},
+ {"ad5686", ID_AD5686},
+ {"ad5686r", ID_AD5686R},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5686_spi_id);
+
+static struct spi_driver ad5686_spi_driver = {
+ .driver = {
+ .name = "ad5686",
+ },
+ .probe = ad5686_spi_probe,
+ .remove = ad5686_spi_remove,
+ .id_table = ad5686_spi_id,
+};
+
+module_spi_driver(ad5686_spi_driver);
+
+MODULE_AUTHOR("Stefan Popa <stefan.popa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5686 and similar multi-channel DACs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 20254df7f9c7..e136f0fd38f0 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* AD5686R, AD5685R, AD5684R Digital to analog converters driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#include <linux/interrupt.h>
@@ -11,7 +10,6 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
@@ -19,116 +17,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#define AD5686_DAC_CHANNELS 4
-
-#define AD5686_ADDR(x) ((x) << 16)
-#define AD5686_CMD(x) ((x) << 20)
-
-#define AD5686_ADDR_DAC(chan) (0x1 << (chan))
-#define AD5686_ADDR_ALL_DAC 0xF
-
-#define AD5686_CMD_NOOP 0x0
-#define AD5686_CMD_WRITE_INPUT_N 0x1
-#define AD5686_CMD_UPDATE_DAC_N 0x2
-#define AD5686_CMD_WRITE_INPUT_N_UPDATE_N 0x3
-#define AD5686_CMD_POWERDOWN_DAC 0x4
-#define AD5686_CMD_LDAC_MASK 0x5
-#define AD5686_CMD_RESET 0x6
-#define AD5686_CMD_INTERNAL_REFER_SETUP 0x7
-#define AD5686_CMD_DAISY_CHAIN_ENABLE 0x8
-#define AD5686_CMD_READBACK_ENABLE 0x9
-
-#define AD5686_LDAC_PWRDN_NONE 0x0
-#define AD5686_LDAC_PWRDN_1K 0x1
-#define AD5686_LDAC_PWRDN_100K 0x2
-#define AD5686_LDAC_PWRDN_3STATE 0x3
-
-/**
- * struct ad5686_chip_info - chip specific information
- * @int_vref_mv: AD5620/40/60: the internal reference voltage
- * @channel: channel specification
-*/
-
-struct ad5686_chip_info {
- u16 int_vref_mv;
- struct iio_chan_spec channel[AD5686_DAC_CHANNELS];
-};
-
-/**
- * struct ad5446_state - driver instance specific data
- * @spi: spi_device
- * @chip_info: chip model specific constants, available modes etc
- * @reg: supply regulator
- * @vref_mv: actual reference voltage used
- * @pwr_down_mask: power down mask
- * @pwr_down_mode: current power down mode
- * @data: spi transfer buffers
- */
-
-struct ad5686_state {
- struct spi_device *spi;
- const struct ad5686_chip_info *chip_info;
- struct regulator *reg;
- unsigned short vref_mv;
- unsigned pwr_down_mask;
- unsigned pwr_down_mode;
- /*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
- */
-
- union {
- __be32 d32;
- u8 d8[4];
- } data[3] ____cacheline_aligned;
-};
-
-/**
- * ad5686_supported_device_ids:
- */
-
-enum ad5686_supported_device_ids {
- ID_AD5684,
- ID_AD5685,
- ID_AD5686,
-};
-static int ad5686_spi_write(struct ad5686_state *st,
- u8 cmd, u8 addr, u16 val, u8 shift)
-{
- val <<= shift;
-
- st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) |
- AD5686_ADDR(addr) |
- val);
-
- return spi_write(st->spi, &st->data[0].d8[1], 3);
-}
-
-static int ad5686_spi_read(struct ad5686_state *st, u8 addr)
-{
- struct spi_transfer t[] = {
- {
- .tx_buf = &st->data[0].d8[1],
- .len = 3,
- .cs_change = 1,
- }, {
- .tx_buf = &st->data[1].d8[1],
- .rx_buf = &st->data[2].d8[1],
- .len = 3,
- },
- };
- int ret;
-
- st->data[0].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_READBACK_ENABLE) |
- AD5686_ADDR(addr));
- st->data[1].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP));
-
- ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
- if (ret < 0)
- return ret;
-
- return be32_to_cpu(st->data[2].d32);
-}
+#include "ad5686.h"
static const char * const ad5686_powerdown_modes[] = {
"1kohm_to_gnd",
@@ -137,7 +26,7 @@ static const char * const ad5686_powerdown_modes[] = {
};
static int ad5686_get_powerdown_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
+ const struct iio_chan_spec *chan)
{
struct ad5686_state *st = iio_priv(indio_dev);
@@ -145,7 +34,8 @@ static int ad5686_get_powerdown_mode(struct iio_dev *indio_dev,
}
static int ad5686_set_powerdown_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int mode)
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
{
struct ad5686_state *st = iio_priv(indio_dev);
@@ -163,21 +53,25 @@ static const struct iio_enum ad5686_powerdown_mode_enum = {
};
static ssize_t ad5686_read_dac_powerdown(struct iio_dev *indio_dev,
- uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
{
struct ad5686_state *st = iio_priv(indio_dev);
return sprintf(buf, "%d\n", !!(st->pwr_down_mask &
- (0x3 << (chan->channel * 2))));
+ (0x3 << (chan->channel * 2))));
}
static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev,
- uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
- size_t len)
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf,
+ size_t len)
{
bool readin;
int ret;
struct ad5686_state *st = iio_priv(indio_dev);
+ unsigned int val, ref_bit_msk;
+ u8 shift;
ret = strtobool(buf, &readin);
if (ret)
@@ -188,8 +82,28 @@ static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev,
else
st->pwr_down_mask &= ~(0x3 << (chan->channel * 2));
- ret = ad5686_spi_write(st, AD5686_CMD_POWERDOWN_DAC, 0,
- st->pwr_down_mask & st->pwr_down_mode, 0);
+ switch (st->chip_info->regmap_type) {
+ case AD5683_REGMAP:
+ shift = 13;
+ ref_bit_msk = AD5683_REF_BIT_MSK;
+ break;
+ case AD5686_REGMAP:
+ shift = 0;
+ ref_bit_msk = 0;
+ break;
+ case AD5693_REGMAP:
+ shift = 13;
+ ref_bit_msk = AD5693_REF_BIT_MSK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = ((st->pwr_down_mask & st->pwr_down_mode) << shift);
+ if (!st->use_internal_vref)
+ val |= ref_bit_msk;
+
+ ret = st->write(st, AD5686_CMD_POWERDOWN_DAC, 0, val);
return ret ? ret : len;
}
@@ -206,7 +120,7 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&indio_dev->mlock);
- ret = ad5686_spi_read(st, chan->address);
+ ret = st->read(st, chan->address);
mutex_unlock(&indio_dev->mlock);
if (ret < 0)
return ret;
@@ -221,10 +135,10 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
}
static int ad5686_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
struct ad5686_state *st = iio_priv(indio_dev);
int ret;
@@ -235,11 +149,10 @@ static int ad5686_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
mutex_lock(&indio_dev->mlock);
- ret = ad5686_spi_write(st,
- AD5686_CMD_WRITE_INPUT_N_UPDATE_N,
- chan->address,
- val,
- chan->scan_type.shift);
+ ret = st->write(st,
+ AD5686_CMD_WRITE_INPUT_N_UPDATE_N,
+ chan->address,
+ val << chan->scan_type.shift);
mutex_unlock(&indio_dev->mlock);
break;
default:
@@ -266,14 +179,14 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
{ },
};
-#define AD5868_CHANNEL(chan, bits, _shift) { \
+#define AD5868_CHANNEL(chan, addr, bits, _shift) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
.channel = chan, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
- .address = AD5686_ADDR_DAC(chan), \
+ .address = addr, \
.scan_type = { \
.sign = 'u', \
.realbits = (bits), \
@@ -283,45 +196,191 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
.ext_info = ad5686_ext_info, \
}
+#define DECLARE_AD5693_CHANNELS(name, bits, _shift) \
+static struct iio_chan_spec name[] = { \
+ AD5868_CHANNEL(0, 0, bits, _shift), \
+}
+
+#define DECLARE_AD5686_CHANNELS(name, bits, _shift) \
+static struct iio_chan_spec name[] = { \
+ AD5868_CHANNEL(0, 1, bits, _shift), \
+ AD5868_CHANNEL(1, 2, bits, _shift), \
+ AD5868_CHANNEL(2, 4, bits, _shift), \
+ AD5868_CHANNEL(3, 8, bits, _shift), \
+}
+
+#define DECLARE_AD5676_CHANNELS(name, bits, _shift) \
+static struct iio_chan_spec name[] = { \
+ AD5868_CHANNEL(0, 0, bits, _shift), \
+ AD5868_CHANNEL(1, 1, bits, _shift), \
+ AD5868_CHANNEL(2, 2, bits, _shift), \
+ AD5868_CHANNEL(3, 3, bits, _shift), \
+ AD5868_CHANNEL(4, 4, bits, _shift), \
+ AD5868_CHANNEL(5, 5, bits, _shift), \
+ AD5868_CHANNEL(6, 6, bits, _shift), \
+ AD5868_CHANNEL(7, 7, bits, _shift), \
+}
+
+DECLARE_AD5676_CHANNELS(ad5672_channels, 12, 4);
+DECLARE_AD5676_CHANNELS(ad5676_channels, 16, 0);
+DECLARE_AD5686_CHANNELS(ad5684_channels, 12, 4);
+DECLARE_AD5686_CHANNELS(ad5685r_channels, 14, 2);
+DECLARE_AD5686_CHANNELS(ad5686_channels, 16, 0);
+DECLARE_AD5693_CHANNELS(ad5693_channels, 16, 0);
+DECLARE_AD5693_CHANNELS(ad5692r_channels, 14, 2);
+DECLARE_AD5693_CHANNELS(ad5691r_channels, 12, 4);
+
static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
+ [ID_AD5671R] = {
+ .channels = ad5672_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 8,
+ .regmap_type = AD5686_REGMAP,
+ },
+ [ID_AD5672R] = {
+ .channels = ad5672_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 8,
+ .regmap_type = AD5686_REGMAP,
+ },
+ [ID_AD5675R] = {
+ .channels = ad5676_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 8,
+ .regmap_type = AD5686_REGMAP,
+ },
+ [ID_AD5676] = {
+ .channels = ad5676_channels,
+ .num_channels = 8,
+ .regmap_type = AD5686_REGMAP,
+ },
+ [ID_AD5676R] = {
+ .channels = ad5676_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 8,
+ .regmap_type = AD5686_REGMAP,
+ },
+ [ID_AD5681R] = {
+ .channels = ad5691r_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 1,
+ .regmap_type = AD5683_REGMAP,
+ },
+ [ID_AD5682R] = {
+ .channels = ad5692r_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 1,
+ .regmap_type = AD5683_REGMAP,
+ },
+ [ID_AD5683] = {
+ .channels = ad5693_channels,
+ .num_channels = 1,
+ .regmap_type = AD5683_REGMAP,
+ },
+ [ID_AD5683R] = {
+ .channels = ad5693_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 1,
+ .regmap_type = AD5683_REGMAP,
+ },
[ID_AD5684] = {
- .channel[0] = AD5868_CHANNEL(0, 12, 4),
- .channel[1] = AD5868_CHANNEL(1, 12, 4),
- .channel[2] = AD5868_CHANNEL(2, 12, 4),
- .channel[3] = AD5868_CHANNEL(3, 12, 4),
+ .channels = ad5684_channels,
+ .num_channels = 4,
+ .regmap_type = AD5686_REGMAP,
+ },
+ [ID_AD5684R] = {
+ .channels = ad5684_channels,
.int_vref_mv = 2500,
+ .num_channels = 4,
+ .regmap_type = AD5686_REGMAP,
},
- [ID_AD5685] = {
- .channel[0] = AD5868_CHANNEL(0, 14, 2),
- .channel[1] = AD5868_CHANNEL(1, 14, 2),
- .channel[2] = AD5868_CHANNEL(2, 14, 2),
- .channel[3] = AD5868_CHANNEL(3, 14, 2),
+ [ID_AD5685R] = {
+ .channels = ad5685r_channels,
.int_vref_mv = 2500,
+ .num_channels = 4,
+ .regmap_type = AD5686_REGMAP,
},
[ID_AD5686] = {
- .channel[0] = AD5868_CHANNEL(0, 16, 0),
- .channel[1] = AD5868_CHANNEL(1, 16, 0),
- .channel[2] = AD5868_CHANNEL(2, 16, 0),
- .channel[3] = AD5868_CHANNEL(3, 16, 0),
+ .channels = ad5686_channels,
+ .num_channels = 4,
+ .regmap_type = AD5686_REGMAP,
+ },
+ [ID_AD5686R] = {
+ .channels = ad5686_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 4,
+ .regmap_type = AD5686_REGMAP,
+ },
+ [ID_AD5691R] = {
+ .channels = ad5691r_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 1,
+ .regmap_type = AD5693_REGMAP,
+ },
+ [ID_AD5692R] = {
+ .channels = ad5692r_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 1,
+ .regmap_type = AD5693_REGMAP,
+ },
+ [ID_AD5693] = {
+ .channels = ad5693_channels,
+ .num_channels = 1,
+ .regmap_type = AD5693_REGMAP,
+ },
+ [ID_AD5693R] = {
+ .channels = ad5693_channels,
.int_vref_mv = 2500,
+ .num_channels = 1,
+ .regmap_type = AD5693_REGMAP,
+ },
+ [ID_AD5694] = {
+ .channels = ad5684_channels,
+ .num_channels = 4,
+ .regmap_type = AD5686_REGMAP,
+ },
+ [ID_AD5694R] = {
+ .channels = ad5684_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 4,
+ .regmap_type = AD5686_REGMAP,
+ },
+ [ID_AD5696] = {
+ .channels = ad5686_channels,
+ .num_channels = 4,
+ .regmap_type = AD5686_REGMAP,
+ },
+ [ID_AD5696R] = {
+ .channels = ad5686_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 4,
+ .regmap_type = AD5686_REGMAP,
},
};
-
-static int ad5686_probe(struct spi_device *spi)
+int ad5686_probe(struct device *dev,
+ enum ad5686_supported_device_ids chip_type,
+ const char *name, ad5686_write_func write,
+ ad5686_read_func read)
{
struct ad5686_state *st;
struct iio_dev *indio_dev;
- int ret, voltage_uv = 0;
+ unsigned int val, ref_bit_msk;
+ u8 cmd;
+ int ret, i, voltage_uv = 0;
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+
+ st->dev = dev;
+ st->write = write;
+ st->read = read;
- st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
+ st->reg = devm_regulator_get_optional(dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
@@ -334,28 +393,47 @@ static int ad5686_probe(struct spi_device *spi)
voltage_uv = ret;
}
- st->chip_info =
- &ad5686_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+ st->chip_info = &ad5686_chip_info_tbl[chip_type];
if (voltage_uv)
st->vref_mv = voltage_uv / 1000;
else
st->vref_mv = st->chip_info->int_vref_mv;
- st->spi = spi;
-
/* Set all the power down mode for all channels to 1K pulldown */
- st->pwr_down_mode = 0x55;
+ for (i = 0; i < st->chip_info->num_channels; i++)
+ st->pwr_down_mode |= (0x01 << (i * 2));
- indio_dev->dev.parent = &spi->dev;
- indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
indio_dev->info = &ad5686_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = st->chip_info->channel;
- indio_dev->num_channels = AD5686_DAC_CHANNELS;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+
+ switch (st->chip_info->regmap_type) {
+ case AD5683_REGMAP:
+ cmd = AD5686_CMD_CONTROL_REG;
+ ref_bit_msk = AD5683_REF_BIT_MSK;
+ st->use_internal_vref = !voltage_uv;
+ break;
+ case AD5686_REGMAP:
+ cmd = AD5686_CMD_INTERNAL_REFER_SETUP;
+ ref_bit_msk = 0;
+ break;
+ case AD5693_REGMAP:
+ cmd = AD5686_CMD_CONTROL_REG;
+ ref_bit_msk = AD5693_REF_BIT_MSK;
+ st->use_internal_vref = !voltage_uv;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error_disable_reg;
+ }
+
+ val = (voltage_uv | ref_bit_msk);
- ret = ad5686_spi_write(st, AD5686_CMD_INTERNAL_REFER_SETUP, 0,
- !!voltage_uv, 0);
+ ret = st->write(st, cmd, 0, !!val);
if (ret)
goto error_disable_reg;
@@ -370,10 +448,11 @@ error_disable_reg:
regulator_disable(st->reg);
return ret;
}
+EXPORT_SYMBOL_GPL(ad5686_probe);
-static int ad5686_remove(struct spi_device *spi)
+int ad5686_remove(struct device *dev)
{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5686_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
@@ -382,24 +461,7 @@ static int ad5686_remove(struct spi_device *spi)
return 0;
}
-
-static const struct spi_device_id ad5686_id[] = {
- {"ad5684", ID_AD5684},
- {"ad5685", ID_AD5685},
- {"ad5686", ID_AD5686},
- {}
-};
-MODULE_DEVICE_TABLE(spi, ad5686_id);
-
-static struct spi_driver ad5686_driver = {
- .driver = {
- .name = "ad5686",
- },
- .probe = ad5686_probe,
- .remove = ad5686_remove,
- .id_table = ad5686_id,
-};
-module_spi_driver(ad5686_driver);
+EXPORT_SYMBOL_GPL(ad5686_remove);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5686/85/84 DAC");
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
new file mode 100644
index 000000000000..d05cda9f1edd
--- /dev/null
+++ b/drivers/iio/dac/ad5686.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * This file is part of AD5686 DAC driver
+ *
+ * Copyright 2018 Analog Devices Inc.
+ */
+
+#ifndef __DRIVERS_IIO_DAC_AD5686_H__
+#define __DRIVERS_IIO_DAC_AD5686_H__
+
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+
+#define AD5683_DATA(x) ((x) << 4)
+#define AD5686_ADDR(x) ((x) << 16)
+#define AD5686_CMD(x) ((x) << 20)
+
+#define AD5686_ADDR_DAC(chan) (0x1 << (chan))
+#define AD5686_ADDR_ALL_DAC 0xF
+
+#define AD5686_CMD_NOOP 0x0
+#define AD5686_CMD_WRITE_INPUT_N 0x1
+#define AD5686_CMD_UPDATE_DAC_N 0x2
+#define AD5686_CMD_WRITE_INPUT_N_UPDATE_N 0x3
+#define AD5686_CMD_POWERDOWN_DAC 0x4
+#define AD5686_CMD_LDAC_MASK 0x5
+#define AD5686_CMD_RESET 0x6
+#define AD5686_CMD_INTERNAL_REFER_SETUP 0x7
+#define AD5686_CMD_DAISY_CHAIN_ENABLE 0x8
+#define AD5686_CMD_READBACK_ENABLE 0x9
+
+#define AD5686_LDAC_PWRDN_NONE 0x0
+#define AD5686_LDAC_PWRDN_1K 0x1
+#define AD5686_LDAC_PWRDN_100K 0x2
+#define AD5686_LDAC_PWRDN_3STATE 0x3
+
+#define AD5686_CMD_CONTROL_REG 0x4
+#define AD5686_CMD_READBACK_ENABLE_V2 0x5
+#define AD5683_REF_BIT_MSK BIT(12)
+#define AD5693_REF_BIT_MSK BIT(12)
+
+/**
+ * ad5686_supported_device_ids:
+ */
+enum ad5686_supported_device_ids {
+ ID_AD5671R,
+ ID_AD5672R,
+ ID_AD5675R,
+ ID_AD5676,
+ ID_AD5676R,
+ ID_AD5681R,
+ ID_AD5682R,
+ ID_AD5683,
+ ID_AD5683R,
+ ID_AD5684,
+ ID_AD5684R,
+ ID_AD5685R,
+ ID_AD5686,
+ ID_AD5686R,
+ ID_AD5691R,
+ ID_AD5692R,
+ ID_AD5693,
+ ID_AD5693R,
+ ID_AD5694,
+ ID_AD5694R,
+ ID_AD5695R,
+ ID_AD5696,
+ ID_AD5696R,
+};
+
+enum ad5686_regmap_type {
+ AD5683_REGMAP,
+ AD5686_REGMAP,
+ AD5693_REGMAP
+};
+
+struct ad5686_state;
+
+typedef int (*ad5686_write_func)(struct ad5686_state *st,
+ u8 cmd, u8 addr, u16 val);
+
+typedef int (*ad5686_read_func)(struct ad5686_state *st, u8 addr);
+
+/**
+ * struct ad5686_chip_info - chip specific information
+ * @int_vref_mv: AD5620/40/60: the internal reference voltage
+ * @num_channels: number of channels
+ * @channel: channel specification
+ * @regmap_type: register map layout variant
+ */
+
+struct ad5686_chip_info {
+ u16 int_vref_mv;
+ unsigned int num_channels;
+ struct iio_chan_spec *channels;
+ enum ad5686_regmap_type regmap_type;
+};
+
+/**
+ * struct ad5446_state - driver instance specific data
+ * @spi: spi_device
+ * @chip_info: chip model specific constants, available modes etc
+ * @reg: supply regulator
+ * @vref_mv: actual reference voltage used
+ * @pwr_down_mask: power down mask
+ * @pwr_down_mode: current power down mode
+ * @use_internal_vref: set to true if the internal reference voltage is used
+ * @data: spi transfer buffers
+ */
+
+struct ad5686_state {
+ struct device *dev;
+ const struct ad5686_chip_info *chip_info;
+ struct regulator *reg;
+ unsigned short vref_mv;
+ unsigned int pwr_down_mask;
+ unsigned int pwr_down_mode;
+ ad5686_write_func write;
+ ad5686_read_func read;
+ bool use_internal_vref;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+
+ union {
+ __be32 d32;
+ __be16 d16;
+ u8 d8[4];
+ } data[3] ____cacheline_aligned;
+};
+
+
+int ad5686_probe(struct device *dev,
+ enum ad5686_supported_device_ids chip_type,
+ const char *name, ad5686_write_func write,
+ ad5686_read_func read);
+
+int ad5686_remove(struct device *dev);
+
+
+#endif /* __DRIVERS_IIO_DAC_AD5686_H__ */
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
new file mode 100644
index 000000000000..d18735d7d938
--- /dev/null
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * AD5671R, AD5675R, AD5691R, AD5692R, AD5693, AD5693R,
+ * AD5694, AD5694R, AD5695R, AD5696, AD5696R
+ * Digital to analog converters driver
+ *
+ * Copyright 2018 Analog Devices Inc.
+ */
+
+#include "ad5686.h"
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+
+static int ad5686_i2c_read(struct ad5686_state *st, u8 addr)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+ struct i2c_msg msg[2] = {
+ {
+ .addr = i2c->addr,
+ .flags = i2c->flags,
+ .len = 3,
+ .buf = &st->data[0].d8[1],
+ },
+ {
+ .addr = i2c->addr,
+ .flags = i2c->flags | I2C_M_RD,
+ .len = 2,
+ .buf = (char *)&st->data[0].d16,
+ },
+ };
+ int ret;
+
+ st->data[0].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP) |
+ AD5686_ADDR(addr) |
+ 0x00);
+
+ ret = i2c_transfer(i2c->adapter, msg, 2);
+ if (ret < 0)
+ return ret;
+
+ return be16_to_cpu(st->data[0].d16);
+}
+
+static int ad5686_i2c_write(struct ad5686_state *st,
+ u8 cmd, u8 addr, u16 val)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+ int ret;
+
+ st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) | AD5686_ADDR(addr)
+ | val);
+
+ ret = i2c_master_send(i2c, &st->data[0].d8[1], 3);
+ if (ret < 0)
+ return ret;
+
+ return (ret != 3) ? -EIO : 0;
+}
+
+static int ad5686_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ return ad5686_probe(&i2c->dev, id->driver_data, id->name,
+ ad5686_i2c_write, ad5686_i2c_read);
+}
+
+static int ad5686_i2c_remove(struct i2c_client *i2c)
+{
+ return ad5686_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id ad5686_i2c_id[] = {
+ {"ad5671r", ID_AD5671R},
+ {"ad5675r", ID_AD5675R},
+ {"ad5691r", ID_AD5691R},
+ {"ad5692r", ID_AD5692R},
+ {"ad5693", ID_AD5693},
+ {"ad5693r", ID_AD5693R},
+ {"ad5694", ID_AD5694},
+ {"ad5694r", ID_AD5694R},
+ {"ad5695r", ID_AD5695R},
+ {"ad5696", ID_AD5696},
+ {"ad5696r", ID_AD5696R},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ad5686_i2c_id);
+
+static struct i2c_driver ad5686_i2c_driver = {
+ .driver = {
+ .name = "ad5696",
+ },
+ .probe = ad5686_i2c_probe,
+ .remove = ad5686_i2c_remove,
+ .id_table = ad5686_i2c_id,
+};
+
+module_i2c_driver(ad5686_i2c_driver);
+
+MODULE_AUTHOR("Stefan Popa <stefan.popa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5686 and similar multi-channel DACs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index af2ddd0dd341..cca278eaa138 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -2,6 +2,7 @@
* LTC2632 Digital to analog convertors spi driver
*
* Copyright 2017 Maxime Roussin-Bélanger
+ * expanded by Silvan Murer <silvan.murer@gmail.com>
*
* Licensed under the GPL-2.
*/
@@ -10,6 +11,7 @@
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
#define LTC2632_DAC_CHANNELS 2
@@ -28,7 +30,7 @@
/**
* struct ltc2632_chip_info - chip specific information
* @channels: channel spec for the DAC
- * @vref_mv: reference voltage
+ * @vref_mv: internal reference voltage
*/
struct ltc2632_chip_info {
const struct iio_chan_spec *channels;
@@ -39,10 +41,14 @@ struct ltc2632_chip_info {
* struct ltc2632_state - driver instance specific data
* @spi_dev: pointer to the spi_device struct
* @powerdown_cache_mask used to show current channel powerdown state
+ * @vref_mv used reference voltage (internal or external)
+ * @vref_reg regulator for the reference voltage
*/
struct ltc2632_state {
struct spi_device *spi_dev;
unsigned int powerdown_cache_mask;
+ int vref_mv;
+ struct regulator *vref_reg;
};
enum ltc2632_supported_device_ids {
@@ -90,7 +96,7 @@ static int ltc2632_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_SCALE:
- *val = chip_info->vref_mv;
+ *val = st->vref_mv;
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
}
@@ -246,6 +252,45 @@ static int ltc2632_probe(struct spi_device *spi)
chip_info = (struct ltc2632_chip_info *)
spi_get_device_id(spi)->driver_data;
+ st->vref_reg = devm_regulator_get_optional(&spi->dev, "vref");
+ if (PTR_ERR(st->vref_reg) == -ENODEV) {
+ /* use internal reference voltage */
+ st->vref_reg = NULL;
+ st->vref_mv = chip_info->vref_mv;
+
+ ret = ltc2632_spi_write(spi, LTC2632_CMD_INTERNAL_REFER,
+ 0, 0, 0);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Set internal reference command failed, %d\n",
+ ret);
+ return ret;
+ }
+ } else if (IS_ERR(st->vref_reg)) {
+ dev_err(&spi->dev,
+ "Error getting voltage reference regulator\n");
+ return PTR_ERR(st->vref_reg);
+ } else {
+ /* use external reference voltage */
+ ret = regulator_enable(st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev,
+ "enable reference regulator failed, %d\n",
+ ret);
+ return ret;
+ }
+ st->vref_mv = regulator_get_voltage(st->vref_reg) / 1000;
+
+ ret = ltc2632_spi_write(spi, LTC2632_CMD_EXTERNAL_REFER,
+ 0, 0, 0);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Set external reference command failed, %d\n",
+ ret);
+ return ret;
+ }
+ }
+
indio_dev->dev.parent = &spi->dev;
indio_dev->name = dev_of_node(&spi->dev) ? dev_of_node(&spi->dev)->name
: spi_get_device_id(spi)->name;
@@ -254,14 +299,20 @@ static int ltc2632_probe(struct spi_device *spi)
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = LTC2632_DAC_CHANNELS;
- ret = ltc2632_spi_write(spi, LTC2632_CMD_INTERNAL_REFER, 0, 0, 0);
- if (ret) {
- dev_err(&spi->dev,
- "Set internal reference command failed, %d\n", ret);
- return ret;
- }
+ return iio_device_register(indio_dev);
+}
- return devm_iio_device_register(&spi->dev, indio_dev);
+static int ltc2632_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ltc2632_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (st->vref_reg)
+ regulator_disable(st->vref_reg);
+
+ return 0;
}
static const struct spi_device_id ltc2632_id[] = {
@@ -275,15 +326,6 @@ static const struct spi_device_id ltc2632_id[] = {
};
MODULE_DEVICE_TABLE(spi, ltc2632_id);
-static struct spi_driver ltc2632_driver = {
- .driver = {
- .name = "ltc2632",
- },
- .probe = ltc2632_probe,
- .id_table = ltc2632_id,
-};
-module_spi_driver(ltc2632_driver);
-
static const struct of_device_id ltc2632_of_match[] = {
{
.compatible = "lltc,ltc2632-l12",
@@ -308,6 +350,17 @@ static const struct of_device_id ltc2632_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ltc2632_of_match);
+static struct spi_driver ltc2632_driver = {
+ .driver = {
+ .name = "ltc2632",
+ .of_match_table = of_match_ptr(ltc2632_of_match),
+ },
+ .probe = ltc2632_probe,
+ .remove = ltc2632_remove,
+ .id_table = ltc2632_id,
+};
+module_spi_driver(ltc2632_driver);
+
MODULE_AUTHOR("Maxime Roussin-Belanger <maxime.roussinbelanger@gmail.com>");
MODULE_DESCRIPTION("LTC2632 DAC SPI driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
new file mode 100644
index 000000000000..dd21eebed6a8
--- /dev/null
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -0,0 +1,439 @@
+/*
+ * ti-dac5571.c - Texas Instruments 8/10/12-bit 1/4-channel DAC driver
+ *
+ * Copyright (C) 2018 Prevas A/S
+ *
+ * http://www.ti.com/lit/ds/symlink/dac5571.pdf
+ * http://www.ti.com/lit/ds/symlink/dac6571.pdf
+ * http://www.ti.com/lit/ds/symlink/dac7571.pdf
+ * http://www.ti.com/lit/ds/symlink/dac5574.pdf
+ * http://www.ti.com/lit/ds/symlink/dac6574.pdf
+ * http://www.ti.com/lit/ds/symlink/dac7574.pdf
+ * http://www.ti.com/lit/ds/symlink/dac5573.pdf
+ * http://www.ti.com/lit/ds/symlink/dac6573.pdf
+ * http://www.ti.com/lit/ds/symlink/dac7573.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+enum chip_id {
+ single_8bit, single_10bit, single_12bit,
+ quad_8bit, quad_10bit, quad_12bit
+};
+
+struct dac5571_spec {
+ u8 num_channels;
+ u8 resolution;
+};
+
+static const struct dac5571_spec dac5571_spec[] = {
+ [single_8bit] = {.num_channels = 1, .resolution = 8},
+ [single_10bit] = {.num_channels = 1, .resolution = 10},
+ [single_12bit] = {.num_channels = 1, .resolution = 12},
+ [quad_8bit] = {.num_channels = 4, .resolution = 8},
+ [quad_10bit] = {.num_channels = 4, .resolution = 10},
+ [quad_12bit] = {.num_channels = 4, .resolution = 12},
+};
+
+struct dac5571_data {
+ struct i2c_client *client;
+ int id;
+ struct mutex lock;
+ struct regulator *vref;
+ u16 val[4];
+ bool powerdown;
+ u8 powerdown_mode;
+ struct dac5571_spec const *spec;
+ int (*dac5571_cmd)(struct dac5571_data *data, int channel, u16 val);
+ int (*dac5571_pwrdwn)(struct dac5571_data *data, int channel, u8 pwrdwn);
+ u8 buf[3] ____cacheline_aligned;
+};
+
+#define DAC5571_POWERDOWN(mode) ((mode) + 1)
+#define DAC5571_POWERDOWN_FLAG BIT(0)
+#define DAC5571_CHANNEL_SELECT 1
+#define DAC5571_LOADMODE_DIRECT BIT(4)
+#define DAC5571_SINGLE_PWRDWN_BITS 4
+#define DAC5571_QUAD_PWRDWN_BITS 6
+
+static int dac5571_cmd_single(struct dac5571_data *data, int channel, u16 val)
+{
+ unsigned int shift;
+
+ shift = 12 - data->spec->resolution;
+ data->buf[1] = val << shift;
+ data->buf[0] = val >> (8 - shift);
+
+ if (i2c_master_send(data->client, data->buf, 2) != 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int dac5571_cmd_quad(struct dac5571_data *data, int channel, u16 val)
+{
+ unsigned int shift;
+
+ shift = 16 - data->spec->resolution;
+ data->buf[2] = val << shift;
+ data->buf[1] = (val >> (8 - shift));
+ data->buf[0] = (channel << DAC5571_CHANNEL_SELECT) |
+ DAC5571_LOADMODE_DIRECT;
+
+ if (i2c_master_send(data->client, data->buf, 3) != 3)
+ return -EIO;
+
+ return 0;
+}
+
+static int dac5571_pwrdwn_single(struct dac5571_data *data, int channel, u8 pwrdwn)
+{
+ unsigned int shift;
+
+ shift = 12 - data->spec->resolution;
+ data->buf[1] = 0;
+ data->buf[0] = pwrdwn << DAC5571_SINGLE_PWRDWN_BITS;
+
+ if (i2c_master_send(data->client, data->buf, 2) != 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int dac5571_pwrdwn_quad(struct dac5571_data *data, int channel, u8 pwrdwn)
+{
+ unsigned int shift;
+
+ shift = 16 - data->spec->resolution;
+ data->buf[2] = 0;
+ data->buf[1] = pwrdwn << DAC5571_QUAD_PWRDWN_BITS;
+ data->buf[0] = (channel << DAC5571_CHANNEL_SELECT) |
+ DAC5571_LOADMODE_DIRECT | DAC5571_POWERDOWN_FLAG;
+
+ if (i2c_master_send(data->client, data->buf, 3) != 3)
+ return -EIO;
+
+ return 0;
+}
+
+static const char *const dac5571_powerdown_modes[] = {
+ "1kohm_to_gnd", "100kohm_to_gnd", "three_state",
+};
+
+static int dac5571_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct dac5571_data *data = iio_priv(indio_dev);
+
+ return data->powerdown_mode;
+}
+
+static int dac5571_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct dac5571_data *data = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (data->powerdown_mode == mode)
+ return 0;
+
+ mutex_lock(&data->lock);
+ if (data->powerdown) {
+ ret = data->dac5571_pwrdwn(data, chan->channel,
+ DAC5571_POWERDOWN(mode));
+ if (ret)
+ goto out;
+ }
+ data->powerdown_mode = mode;
+
+ out:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static const struct iio_enum dac5571_powerdown_mode = {
+ .items = dac5571_powerdown_modes,
+ .num_items = ARRAY_SIZE(dac5571_powerdown_modes),
+ .get = dac5571_get_powerdown_mode,
+ .set = dac5571_set_powerdown_mode,
+};
+
+static ssize_t dac5571_read_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct dac5571_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", data->powerdown);
+}
+
+static ssize_t dac5571_write_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct dac5571_data *data = iio_priv(indio_dev);
+ bool powerdown;
+ int ret;
+
+ ret = strtobool(buf, &powerdown);
+ if (ret)
+ return ret;
+
+ if (data->powerdown == powerdown)
+ return len;
+
+ mutex_lock(&data->lock);
+ if (powerdown)
+ ret = data->dac5571_pwrdwn(data, chan->channel,
+ DAC5571_POWERDOWN(data->powerdown_mode));
+ else
+ ret = data->dac5571_cmd(data, chan->channel, data->val[0]);
+ if (ret)
+ goto out;
+
+ data->powerdown = powerdown;
+
+ out:
+ mutex_unlock(&data->lock);
+
+ return ret ? ret : len;
+}
+
+
+static const struct iio_chan_spec_ext_info dac5571_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = dac5571_read_powerdown,
+ .write = dac5571_write_powerdown,
+ .shared = IIO_SHARED_BY_TYPE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &dac5571_powerdown_mode),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &dac5571_powerdown_mode),
+ {},
+};
+
+#define dac5571_CHANNEL(chan, name) { \
+ .type = IIO_VOLTAGE, \
+ .channel = (chan), \
+ .address = (chan), \
+ .indexed = true, \
+ .output = true, \
+ .datasheet_name = name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = dac5571_ext_info, \
+}
+
+static const struct iio_chan_spec dac5571_channels[] = {
+ dac5571_CHANNEL(0, "A"),
+ dac5571_CHANNEL(1, "B"),
+ dac5571_CHANNEL(2, "C"),
+ dac5571_CHANNEL(3, "D"),
+};
+
+static int dac5571_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct dac5571_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val = data->val[chan->channel];
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(data->vref);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
+ *val2 = data->spec->resolution;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dac5571_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct dac5571_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (data->val[chan->channel] == val)
+ return 0;
+
+ if (val >= (1 << data->spec->resolution) || val < 0)
+ return -EINVAL;
+
+ if (data->powerdown)
+ return -EBUSY;
+
+ mutex_lock(&data->lock);
+ ret = data->dac5571_cmd(data, chan->channel, val);
+ if (ret == 0)
+ data->val[chan->channel] = val;
+ mutex_unlock(&data->lock);
+ return ret;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dac5571_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info dac5571_info = {
+ .read_raw = dac5571_read_raw,
+ .write_raw = dac5571_write_raw,
+ .write_raw_get_fmt = dac5571_write_raw_get_fmt,
+};
+
+static int dac5571_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ const struct dac5571_spec *spec;
+ struct dac5571_data *data;
+ struct iio_dev *indio_dev;
+ int ret, i;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = client->dev.of_node;
+ indio_dev->info = &dac5571_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = dac5571_channels;
+
+ spec = &dac5571_spec[id->driver_data];
+ indio_dev->num_channels = spec->num_channels;
+ data->spec = spec;
+
+ data->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(data->vref))
+ return PTR_ERR(data->vref);
+
+ ret = regulator_enable(data->vref);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&data->lock);
+
+ switch (spec->num_channels) {
+ case 1:
+ data->dac5571_cmd = dac5571_cmd_single;
+ data->dac5571_pwrdwn = dac5571_pwrdwn_single;
+ break;
+ case 4:
+ data->dac5571_cmd = dac5571_cmd_quad;
+ data->dac5571_pwrdwn = dac5571_pwrdwn_quad;
+ break;
+ default:
+ goto err;
+ }
+
+ for (i = 0; i < spec->num_channels; i++) {
+ ret = data->dac5571_cmd(data, i, 0);
+ if (ret) {
+ dev_err(dev, "failed to initialize channel %d to 0\n", i);
+ goto err;
+ }
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err;
+
+ return 0;
+
+ err:
+ regulator_disable(data->vref);
+ return ret;
+}
+
+static int dac5571_remove(struct i2c_client *i2c)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
+ struct dac5571_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(data->vref);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id dac5571_of_id[] = {
+ {.compatible = "ti,dac5571"},
+ {.compatible = "ti,dac6571"},
+ {.compatible = "ti,dac7571"},
+ {.compatible = "ti,dac5574"},
+ {.compatible = "ti,dac6574"},
+ {.compatible = "ti,dac7574"},
+ {.compatible = "ti,dac5573"},
+ {.compatible = "ti,dac6573"},
+ {.compatible = "ti,dac7573"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, dac5571_of_id);
+#endif
+
+static const struct i2c_device_id dac5571_id[] = {
+ {"dac5571", single_8bit},
+ {"dac6571", single_10bit},
+ {"dac7571", single_12bit},
+ {"dac5574", quad_8bit},
+ {"dac6574", quad_10bit},
+ {"dac7574", quad_12bit},
+ {"dac5573", quad_8bit},
+ {"dac6573", quad_10bit},
+ {"dac7573", quad_12bit},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, dac5571_id);
+
+static struct i2c_driver dac5571_driver = {
+ .driver = {
+ .name = "ti-dac5571",
+ },
+ .probe = dac5571_probe,
+ .remove = dac5571_remove,
+ .id_table = dac5571_id,
+};
+module_i2c_driver(dac5571_driver);
+
+MODULE_AUTHOR("Sean Nyekjaer <sean.nyekjaer@prevas.dk>");
+MODULE_DESCRIPTION("Texas Instruments 8/10/12-bit 1/4-channel DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 7d64be353403..f9c0624505a2 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -24,6 +24,7 @@
#include <linux/spinlock.h>
#include <linux/iio/iio.h>
#include <linux/acpi.h>
+#include <linux/platform_device.h>
#include "inv_mpu_iio.h"
/*
@@ -52,6 +53,7 @@ static const struct inv_mpu6050_reg_map reg_set_6500 = {
.raw_accl = INV_MPU6050_REG_RAW_ACCEL,
.temperature = INV_MPU6050_REG_TEMPERATURE,
.int_enable = INV_MPU6050_REG_INT_ENABLE,
+ .int_status = INV_MPU6050_REG_INT_STATUS,
.pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
.pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
.int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG,
@@ -86,6 +88,7 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
.gyro_fifo_enable = false,
.accl_fifo_enable = false,
.accl_fs = INV_MPU6050_FS_02G,
+ .user_ctrl = 0,
};
/* Indexed by enum inv_devices */
@@ -121,6 +124,12 @@ static const struct inv_mpu6050_hw hw_info[] = {
.config = &chip_config_6050,
},
{
+ .whoami = INV_MPU9255_WHOAMI_VALUE,
+ .name = "MPU9255",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6050,
+ },
+ {
.whoami = INV_ICM20608_WHOAMI_VALUE,
.name = "ICM20608",
.reg = &reg_set_6500,
@@ -168,7 +177,7 @@ int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
return result;
if (en) {
- /* Wait for output stabilize */
+ /* Wait for output to stabilize */
msleep(INV_MPU6050_TEMP_UP_TIME);
if (mask == INV_MPU6050_BIT_PWR_GYRO_STBY) {
/* switch internal clock to PLL */
@@ -185,26 +194,29 @@ int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
{
- int result = 0;
+ int result;
if (power_on) {
- if (!st->powerup_count)
+ if (!st->powerup_count) {
result = regmap_write(st->map, st->reg->pwr_mgmt_1, 0);
- if (!result)
- st->powerup_count++;
+ if (result)
+ return result;
+ usleep_range(INV_MPU6050_REG_UP_TIME_MIN,
+ INV_MPU6050_REG_UP_TIME_MAX);
+ }
+ st->powerup_count++;
} else {
- st->powerup_count--;
- if (!st->powerup_count)
+ if (st->powerup_count == 1) {
result = regmap_write(st->map, st->reg->pwr_mgmt_1,
INV_MPU6050_BIT_SLEEP);
+ if (result)
+ return result;
+ }
+ st->powerup_count--;
}
- if (result)
- return result;
-
- if (power_on)
- usleep_range(INV_MPU6050_REG_UP_TIME_MIN,
- INV_MPU6050_REG_UP_TIME_MAX);
+ dev_dbg(regmap_get_device(st->map), "set power %d, count=%u\n",
+ power_on, st->powerup_count);
return 0;
}
@@ -262,26 +274,33 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
d = (INV_MPU6050_FSR_2000DPS << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
result = regmap_write(st->map, st->reg->gyro_config, d);
if (result)
- return result;
+ goto error_power_off;
result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
if (result)
- return result;
+ goto error_power_off;
d = INV_MPU6050_ONE_K_HZ / INV_MPU6050_INIT_FIFO_RATE - 1;
result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
- return result;
+ goto error_power_off;
d = (INV_MPU6050_FS_02G << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
result = regmap_write(st->map, st->reg->accl_config, d);
if (result)
+ goto error_power_off;
+
+ result = regmap_write(st->map, st->reg->int_pin_cfg, st->irq_mask);
+ if (result)
return result;
memcpy(&st->chip_config, hw_info[st->chip_type].config,
sizeof(struct inv_mpu6050_chip_config));
- result = inv_mpu6050_set_power_itg(st, false);
+ return inv_mpu6050_set_power_itg(st, false);
+
+error_power_off:
+ inv_mpu6050_set_power_itg(st, false);
return result;
}
@@ -314,6 +333,65 @@ static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
return IIO_VAL_INT;
}
+static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int result;
+ int ret;
+
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ goto error_power_off;
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
+ chan->channel2, val);
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ goto error_power_off;
+ break;
+ case IIO_ACCEL:
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ goto error_power_off;
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl,
+ chan->channel2, val);
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ goto error_power_off;
+ break;
+ case IIO_TEMP:
+ /* wait for stablization */
+ msleep(INV_MPU6050_SENSOR_UP_TIME);
+ ret = inv_mpu6050_sensor_show(st, st->reg->temperature,
+ IIO_MOD_X, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ result = inv_mpu6050_set_power_itg(st, false);
+ if (result)
+ goto error_power_off;
+
+ return ret;
+
+error_power_off:
+ inv_mpu6050_set_power_itg(st, false);
+ return result;
+}
+
static int
inv_mpu6050_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
@@ -324,63 +402,14 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- {
- int result;
-
- ret = IIO_VAL_INT;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
mutex_lock(&st->lock);
- result = iio_device_claim_direct_mode(indio_dev);
- if (result)
- goto error_read_raw_unlock;
- result = inv_mpu6050_set_power_itg(st, true);
- if (result)
- goto error_read_raw_release;
- switch (chan->type) {
- case IIO_ANGL_VEL:
- result = inv_mpu6050_switch_engine(st, true,
- INV_MPU6050_BIT_PWR_GYRO_STBY);
- if (result)
- goto error_read_raw_power_off;
- ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
- chan->channel2, val);
- result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_GYRO_STBY);
- if (result)
- goto error_read_raw_power_off;
- break;
- case IIO_ACCEL:
- result = inv_mpu6050_switch_engine(st, true,
- INV_MPU6050_BIT_PWR_ACCL_STBY);
- if (result)
- goto error_read_raw_power_off;
- ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl,
- chan->channel2, val);
- result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_ACCL_STBY);
- if (result)
- goto error_read_raw_power_off;
- break;
- case IIO_TEMP:
- /* wait for stablization */
- msleep(INV_MPU6050_SENSOR_UP_TIME);
- ret = inv_mpu6050_sensor_show(st, st->reg->temperature,
- IIO_MOD_X, val);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-error_read_raw_power_off:
- result |= inv_mpu6050_set_power_itg(st, false);
-error_read_raw_release:
- iio_device_release_direct_mode(indio_dev);
-error_read_raw_unlock:
+ ret = inv_mpu6050_read_channel_data(indio_dev, chan, val);
mutex_unlock(&st->lock);
- if (result)
- return result;
-
+ iio_device_release_direct_mode(indio_dev);
return ret;
- }
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
@@ -502,17 +531,18 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
struct inv_mpu6050_state *st = iio_priv(indio_dev);
int result;
- mutex_lock(&st->lock);
/*
* we should only update scale when the chip is disabled, i.e.
* not running
*/
result = iio_device_claim_direct_mode(indio_dev);
if (result)
- goto error_write_raw_unlock;
+ return result;
+
+ mutex_lock(&st->lock);
result = inv_mpu6050_set_power_itg(st, true);
if (result)
- goto error_write_raw_release;
+ goto error_write_raw_unlock;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -551,10 +581,9 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
}
result |= inv_mpu6050_set_power_itg(st, false);
-error_write_raw_release:
- iio_device_release_direct_mode(indio_dev);
error_write_raw_unlock:
mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return result;
}
@@ -613,17 +642,18 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
return -EINVAL;
+ result = iio_device_claim_direct_mode(indio_dev);
+ if (result)
+ return result;
+
mutex_lock(&st->lock);
if (fifo_rate == st->chip_config.fifo_rate) {
result = 0;
goto fifo_rate_fail_unlock;
}
- result = iio_device_claim_direct_mode(indio_dev);
- if (result)
- goto fifo_rate_fail_unlock;
result = inv_mpu6050_set_power_itg(st, true);
if (result)
- goto fifo_rate_fail_release;
+ goto fifo_rate_fail_unlock;
d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
result = regmap_write(st->map, st->reg->sample_rate_div, d);
@@ -637,10 +667,9 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
fifo_rate_fail_power_off:
result |= inv_mpu6050_set_power_itg(st, false);
-fifo_rate_fail_release:
- iio_device_release_direct_mode(indio_dev);
fifo_rate_fail_unlock:
mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
if (result)
return result;
@@ -769,7 +798,14 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
};
-/* constant IIO attribute */
+/*
+ * The user can choose any frequency between INV_MPU6050_MIN_FIFO_RATE and
+ * INV_MPU6050_MAX_FIFO_RATE, but only these frequencies are matched by the
+ * low-pass filter. Specifically, each of these sampling rates are about twice
+ * the bandwidth of a corresponding low-pass filter, which should eliminate
+ * aliasing following the Nyquist principle. By picking a frequency different
+ * from these, the user risks aliasing effects.
+ */
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 20 50 100 200 500");
static IIO_CONST_ATTR(in_anglvel_scale_available,
"0.000133090 0.000266181 0.000532362 0.001064724");
@@ -850,14 +886,11 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
msleep(INV_MPU6050_POWER_UP_TIME);
/*
- * toggle power state. After reset, the sleep bit could be on
- * or off depending on the OTP settings. Toggling power would
+ * Turn power on. After reset, the sleep bit could be on
+ * or off depending on the OTP settings. Turning power on
* make it in a definite state as well as making the hardware
* state align with the software state
*/
- result = inv_mpu6050_set_power_itg(st, false);
- if (result)
- return result;
result = inv_mpu6050_set_power_itg(st, true);
if (result)
return result;
@@ -865,13 +898,17 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
result = inv_mpu6050_switch_engine(st, false,
INV_MPU6050_BIT_PWR_ACCL_STBY);
if (result)
- return result;
+ goto error_power_off;
result = inv_mpu6050_switch_engine(st, false,
INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
- return result;
+ goto error_power_off;
- return 0;
+ return inv_mpu6050_set_power_itg(st, false);
+
+error_power_off:
+ inv_mpu6050_set_power_itg(st, false);
+ return result;
}
int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
@@ -882,6 +919,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
struct inv_mpu6050_platform_data *pdata;
struct device *dev = regmap_get_device(regmap);
int result;
+ struct irq_data *desc;
+ int irq_type;
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
@@ -913,20 +952,43 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
st->plat_data = *pdata;
}
+ desc = irq_get_irq_data(irq);
+ if (!desc) {
+ dev_err(dev, "Could not find IRQ %d\n", irq);
+ return -EINVAL;
+ }
+
+ irq_type = irqd_get_trigger_type(desc);
+ if (irq_type == IRQF_TRIGGER_RISING)
+ st->irq_mask = INV_MPU6050_ACTIVE_HIGH;
+ else if (irq_type == IRQF_TRIGGER_FALLING)
+ st->irq_mask = INV_MPU6050_ACTIVE_LOW;
+ else if (irq_type == IRQF_TRIGGER_HIGH)
+ st->irq_mask = INV_MPU6050_ACTIVE_HIGH |
+ INV_MPU6050_LATCH_INT_EN;
+ else if (irq_type == IRQF_TRIGGER_LOW)
+ st->irq_mask = INV_MPU6050_ACTIVE_LOW |
+ INV_MPU6050_LATCH_INT_EN;
+ else {
+ dev_err(dev, "Invalid interrupt type 0x%x specified\n",
+ irq_type);
+ return -EINVAL;
+ }
+
/* power is turned on inside check chip type*/
result = inv_check_and_setup_chip(st);
if (result)
return result;
- if (inv_mpu_bus_setup)
- inv_mpu_bus_setup(indio_dev);
-
result = inv_mpu6050_init_config(indio_dev);
if (result) {
dev_err(dev, "Could not initialize device.\n");
return result;
}
+ if (inv_mpu_bus_setup)
+ inv_mpu_bus_setup(indio_dev);
+
dev_set_drvdata(dev, indio_dev);
indio_dev->dev.parent = dev;
/* name will be NULL when enumerated via ACPI */
@@ -940,50 +1002,32 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
indio_dev->info = &mpu_info;
indio_dev->modes = INDIO_BUFFER_TRIGGERED;
- result = iio_triggered_buffer_setup(indio_dev,
- inv_mpu6050_irq_handler,
- inv_mpu6050_read_fifo,
- NULL);
+ result = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ inv_mpu6050_irq_handler,
+ inv_mpu6050_read_fifo,
+ NULL);
if (result) {
dev_err(dev, "configure buffer fail %d\n", result);
return result;
}
- result = inv_mpu6050_probe_trigger(indio_dev);
+ result = inv_mpu6050_probe_trigger(indio_dev, irq_type);
if (result) {
dev_err(dev, "trigger probe fail %d\n", result);
- goto out_unreg_ring;
+ return result;
}
INIT_KFIFO(st->timestamps);
spin_lock_init(&st->time_stamp_lock);
- result = iio_device_register(indio_dev);
+ result = devm_iio_device_register(dev, indio_dev);
if (result) {
dev_err(dev, "IIO register fail %d\n", result);
- goto out_remove_trigger;
+ return result;
}
return 0;
-
-out_remove_trigger:
- inv_mpu6050_remove_trigger(st);
-out_unreg_ring:
- iio_triggered_buffer_cleanup(indio_dev);
- return result;
}
EXPORT_SYMBOL_GPL(inv_mpu_core_probe);
-int inv_mpu_core_remove(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- iio_device_unregister(indio_dev);
- inv_mpu6050_remove_trigger(iio_priv(indio_dev));
- iio_triggered_buffer_cleanup(indio_dev);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(inv_mpu_core_remove);
-
#ifdef CONFIG_PM_SLEEP
static int inv_mpu_resume(struct device *dev)
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index fcd7a92b6cf8..495409d56207 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -29,25 +29,18 @@ static int inv_mpu6050_select_bypass(struct i2c_mux_core *muxc, u32 chan_id)
{
struct iio_dev *indio_dev = i2c_mux_priv(muxc);
struct inv_mpu6050_state *st = iio_priv(indio_dev);
- int ret = 0;
+ int ret;
- /* Use the same mutex which was used everywhere to protect power-op */
mutex_lock(&st->lock);
- if (!st->powerup_count) {
- ret = regmap_write(st->map, st->reg->pwr_mgmt_1, 0);
- if (ret)
- goto write_error;
- usleep_range(INV_MPU6050_REG_UP_TIME_MIN,
- INV_MPU6050_REG_UP_TIME_MAX);
- }
- if (!ret) {
- st->powerup_count++;
- ret = regmap_write(st->map, st->reg->int_pin_cfg,
- INV_MPU6050_INT_PIN_CFG |
- INV_MPU6050_BIT_BYPASS_EN);
- }
-write_error:
+ ret = inv_mpu6050_set_power_itg(st, true);
+ if (ret)
+ goto error_unlock;
+
+ ret = regmap_write(st->map, st->reg->int_pin_cfg,
+ st->irq_mask | INV_MPU6050_BIT_BYPASS_EN);
+
+error_unlock:
mutex_unlock(&st->lock);
return ret;
@@ -59,12 +52,11 @@ static int inv_mpu6050_deselect_bypass(struct i2c_mux_core *muxc, u32 chan_id)
struct inv_mpu6050_state *st = iio_priv(indio_dev);
mutex_lock(&st->lock);
- /* It doesn't really mattter, if any of the calls fails */
- regmap_write(st->map, st->reg->int_pin_cfg, INV_MPU6050_INT_PIN_CFG);
- st->powerup_count--;
- if (!st->powerup_count)
- regmap_write(st->map, st->reg->pwr_mgmt_1,
- INV_MPU6050_BIT_SLEEP);
+
+ /* It doesn't really matter if any of the calls fail */
+ regmap_write(st->map, st->reg->int_pin_cfg, st->irq_mask);
+ inv_mpu6050_set_power_itg(st, false);
+
mutex_unlock(&st->lock);
return 0;
@@ -133,29 +125,32 @@ static int inv_mpu_probe(struct i2c_client *client,
return result;
st = iio_priv(dev_get_drvdata(&client->dev));
- st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
- 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
- inv_mpu6050_select_bypass,
- inv_mpu6050_deselect_bypass);
- if (!st->muxc) {
- result = -ENOMEM;
- goto out_unreg_device;
+ switch (st->chip_type) {
+ case INV_ICM20608:
+ /* no i2c auxiliary bus on the chip */
+ break;
+ default:
+ /* declare i2c auxiliary bus */
+ st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
+ 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
+ inv_mpu6050_select_bypass,
+ inv_mpu6050_deselect_bypass);
+ if (!st->muxc)
+ return -ENOMEM;
+ st->muxc->priv = dev_get_drvdata(&client->dev);
+ result = i2c_mux_add_adapter(st->muxc, 0, 0, 0);
+ if (result)
+ return result;
+ result = inv_mpu_acpi_create_mux_client(client);
+ if (result)
+ goto out_del_mux;
+ break;
}
- st->muxc->priv = dev_get_drvdata(&client->dev);
- result = i2c_mux_add_adapter(st->muxc, 0, 0, 0);
- if (result)
- goto out_unreg_device;
-
- result = inv_mpu_acpi_create_mux_client(client);
- if (result)
- goto out_del_mux;
return 0;
out_del_mux:
i2c_mux_del_adapters(st->muxc);
-out_unreg_device:
- inv_mpu_core_remove(&client->dev);
return result;
}
@@ -164,10 +159,12 @@ static int inv_mpu_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct inv_mpu6050_state *st = iio_priv(indio_dev);
- inv_mpu_acpi_delete_mux_client(client);
- i2c_mux_del_adapters(st->muxc);
+ if (st->muxc) {
+ inv_mpu_acpi_delete_mux_client(client);
+ i2c_mux_del_adapters(st->muxc);
+ }
- return inv_mpu_core_remove(&client->dev);
+ return 0;
}
/*
@@ -179,6 +176,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"mpu6500", INV_MPU6500},
{"mpu9150", INV_MPU9150},
{"mpu9250", INV_MPU9250},
+ {"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
{}
};
@@ -203,6 +201,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_MPU9250
},
{
+ .compatible = "invensense,mpu9255",
+ .data = (void *)INV_MPU9255
+ },
+ {
.compatible = "invensense,icm20608",
.data = (void *)INV_ICM20608
},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index 065794162d65..c54da777945d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -40,6 +40,7 @@
* @raw_accl: Address of first accel register.
* @temperature: temperature register
* @int_enable: Interrupt enable register.
+ * @int_status: Interrupt status register.
* @pwr_mgmt_1: Controls chip's power state and clock source.
* @pwr_mgmt_2: Controls power state of individual sensors.
* @int_pin_cfg; Controls interrupt pin configuration.
@@ -60,6 +61,7 @@ struct inv_mpu6050_reg_map {
u8 raw_accl;
u8 temperature;
u8 int_enable;
+ u8 int_status;
u8 pwr_mgmt_1;
u8 pwr_mgmt_2;
u8 int_pin_cfg;
@@ -74,6 +76,7 @@ enum inv_devices {
INV_MPU6000,
INV_MPU9150,
INV_MPU9250,
+ INV_MPU9255,
INV_ICM20608,
INV_NUM_PARTS
};
@@ -94,6 +97,7 @@ struct inv_mpu6050_chip_config {
unsigned int accl_fifo_enable:1;
unsigned int gyro_fifo_enable:1;
u16 fifo_rate;
+ u8 user_ctrl;
};
/**
@@ -125,6 +129,7 @@ struct inv_mpu6050_hw {
* @timestamps: kfifo queue to store time stamp.
* @map regmap pointer.
* @irq interrupt number.
+ * @irq_mask the int_pin_cfg mask to configure interrupt type.
*/
struct inv_mpu6050_state {
#define TIMESTAMP_FIFO_SIZE 16
@@ -143,6 +148,8 @@ struct inv_mpu6050_state {
DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
struct regmap *map;
int irq;
+ u8 irq_mask;
+ unsigned skip_samples;
};
/*register and associated bit definition*/
@@ -166,6 +173,9 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_TEMPERATURE 0x41
#define INV_MPU6050_REG_RAW_GYRO 0x43
+#define INV_MPU6050_REG_INT_STATUS 0x3A
+#define INV_MPU6050_BIT_RAW_DATA_RDY_INT 0x01
+
#define INV_MPU6050_REG_USER_CTRL 0x6A
#define INV_MPU6050_BIT_FIFO_RST 0x04
#define INV_MPU6050_BIT_DMP_RST 0x08
@@ -215,8 +225,12 @@ struct inv_mpu6050_state {
#define INV_MPU6050_OUTPUT_DATA_SIZE 24
#define INV_MPU6050_REG_INT_PIN_CFG 0x37
+#define INV_MPU6050_ACTIVE_HIGH 0x00
+#define INV_MPU6050_ACTIVE_LOW 0x80
+/* enable level triggering */
+#define INV_MPU6050_LATCH_INT_EN 0x20
#define INV_MPU6050_BIT_BYPASS_EN 0x2
-#define INV_MPU6050_INT_PIN_CFG 0
+
/* init parameters */
#define INV_MPU6050_INIT_FIFO_RATE 50
@@ -232,6 +246,7 @@ struct inv_mpu6050_state {
#define INV_MPU6500_WHOAMI_VALUE 0x70
#define INV_MPU9150_WHOAMI_VALUE 0x68
#define INV_MPU9250_WHOAMI_VALUE 0x71
+#define INV_MPU9255_WHOAMI_VALUE 0x73
#define INV_ICM20608_WHOAMI_VALUE 0xAF
/* scan element definition */
@@ -287,8 +302,7 @@ enum inv_mpu6050_clock_sel_e {
irqreturn_t inv_mpu6050_irq_handler(int irq, void *p);
irqreturn_t inv_mpu6050_read_fifo(int irq, void *p);
-int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev);
-void inv_mpu6050_remove_trigger(struct inv_mpu6050_state *st);
+int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type);
int inv_reset_fifo(struct iio_dev *indio_dev);
int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask);
int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val);
@@ -297,6 +311,4 @@ int inv_mpu_acpi_create_mux_client(struct i2c_client *client);
void inv_mpu_acpi_delete_mux_client(struct i2c_client *client);
int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type);
-int inv_mpu_core_remove(struct device *dev);
-int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
extern const struct dev_pm_ops inv_mpu_pmops;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index ff81c6aa009d..1795418438e4 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -51,13 +51,14 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
if (result)
goto reset_fifo_fail;
/* disable fifo reading */
- result = regmap_write(st->map, st->reg->user_ctrl, 0);
+ result = regmap_write(st->map, st->reg->user_ctrl,
+ st->chip_config.user_ctrl);
if (result)
goto reset_fifo_fail;
/* reset FIFO*/
- result = regmap_write(st->map, st->reg->user_ctrl,
- INV_MPU6050_BIT_FIFO_RST);
+ d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST;
+ result = regmap_write(st->map, st->reg->user_ctrl, d);
if (result)
goto reset_fifo_fail;
@@ -72,9 +73,9 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
if (result)
return result;
}
- /* enable FIFO reading and I2C master interface*/
- result = regmap_write(st->map, st->reg->user_ctrl,
- INV_MPU6050_BIT_FIFO_EN);
+ /* enable FIFO reading */
+ d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_EN;
+ result = regmap_write(st->map, st->reg->user_ctrl, d);
if (result)
goto reset_fifo_fail;
/* enable sensor output to FIFO */
@@ -127,8 +128,23 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
u16 fifo_count;
s64 timestamp;
+ int int_status;
mutex_lock(&st->lock);
+
+ /* ack interrupt and check status */
+ result = regmap_read(st->map, st->reg->int_status, &int_status);
+ if (result) {
+ dev_err(regmap_get_device(st->map),
+ "failed to ack interrupt\n");
+ goto flush_fifo;
+ }
+ if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
+ dev_warn(regmap_get_device(st->map),
+ "spurious interrupt with status 0x%x\n", int_status);
+ goto end_session;
+ }
+
if (!(st->chip_config.accl_fifo_enable |
st->chip_config.gyro_fifo_enable))
goto end_session;
@@ -140,7 +156,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
/*
- * read fifo_count register to know how many bytes inside FIFO
+ * read fifo_count register to know how many bytes are inside the FIFO
* right now
*/
result = regmap_bulk_read(st->map, st->reg->fifo_count_h, data,
@@ -150,7 +166,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
fifo_count = be16_to_cpup((__be16 *)(&data[0]));
if (fifo_count < bytes_per_datum)
goto end_session;
- /* fifo count can't be odd number, if it is odd, reset fifo*/
+ /* fifo count can't be an odd number. If it is odd, reset the FIFO. */
if (fifo_count & 1)
goto flush_fifo;
if (fifo_count > INV_MPU6050_FIFO_THRESHOLD)
@@ -159,7 +175,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
if (kfifo_len(&st->timestamps) >
fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR)
goto flush_fifo;
- while (fifo_count >= bytes_per_datum) {
+ do {
result = regmap_bulk_read(st->map, st->reg->fifo_r_w,
data, bytes_per_datum);
if (result)
@@ -170,12 +186,15 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
if (result == 0)
timestamp = 0;
- result = iio_push_to_buffers_with_timestamp(indio_dev, data,
- timestamp);
- if (result)
- goto flush_fifo;
+ /* skip first samples if needed */
+ if (st->skip_samples)
+ st->skip_samples--;
+ else
+ iio_push_to_buffers_with_timestamp(indio_dev, data,
+ timestamp);
+
fifo_count -= bytes_per_datum;
- }
+ } while (fifo_count >= bytes_per_datum);
end_session:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 74506e5ac0db..227f50afff22 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -31,8 +31,9 @@ static int inv_mpu_i2c_disable(struct iio_dev *indio_dev)
if (ret)
return ret;
- ret = regmap_write(st->map, INV_MPU6050_REG_USER_CTRL,
- INV_MPU6050_BIT_I2C_IF_DIS);
+ st->chip_config.user_ctrl |= INV_MPU6050_BIT_I2C_IF_DIS;
+ ret = regmap_write(st->map, st->reg->user_ctrl,
+ st->chip_config.user_ctrl);
if (ret) {
inv_mpu6050_set_power_itg(st, false);
return ret;
@@ -69,11 +70,6 @@ static int inv_mpu_probe(struct spi_device *spi)
inv_mpu_i2c_disable, chip_type);
}
-static int inv_mpu_remove(struct spi_device *spi)
-{
- return inv_mpu_core_remove(&spi->dev);
-}
-
/*
* device id table is used to identify what device can be
* supported by this driver
@@ -83,6 +79,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"mpu6500", INV_MPU6500},
{"mpu9150", INV_MPU9150},
{"mpu9250", INV_MPU9250},
+ {"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
{}
};
@@ -97,7 +94,6 @@ MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
static struct spi_driver inv_mpu_driver = {
.probe = inv_mpu_probe,
- .remove = inv_mpu_remove,
.id_table = inv_mpu_id,
.driver = {
.acpi_match_table = ACPI_PTR(inv_acpi_match),
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index f963f9fc98c0..6c3e1652a687 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -49,49 +49,66 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
return result;
inv_scan_query(indio_dev);
+ st->skip_samples = 0;
if (st->chip_config.gyro_fifo_enable) {
result = inv_mpu6050_switch_engine(st, true,
INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
- return result;
+ goto error_power_off;
+ /* gyro first sample is out of specs, skip it */
+ st->skip_samples = 1;
}
if (st->chip_config.accl_fifo_enable) {
result = inv_mpu6050_switch_engine(st, true,
INV_MPU6050_BIT_PWR_ACCL_STBY);
if (result)
- return result;
+ goto error_gyro_off;
}
result = inv_reset_fifo(indio_dev);
if (result)
- return result;
+ goto error_accl_off;
} else {
result = regmap_write(st->map, st->reg->fifo_en, 0);
if (result)
- return result;
+ goto error_accl_off;
result = regmap_write(st->map, st->reg->int_enable, 0);
if (result)
- return result;
+ goto error_accl_off;
- result = regmap_write(st->map, st->reg->user_ctrl, 0);
+ result = regmap_write(st->map, st->reg->user_ctrl,
+ st->chip_config.user_ctrl);
if (result)
- return result;
+ goto error_accl_off;
result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_GYRO_STBY);
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
if (result)
- return result;
+ goto error_accl_off;
result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_ACCL_STBY);
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
- return result;
+ goto error_gyro_off;
+
result = inv_mpu6050_set_power_itg(st, false);
if (result)
- return result;
+ goto error_power_off;
}
return 0;
+
+error_accl_off:
+ if (st->chip_config.accl_fifo_enable)
+ inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+error_gyro_off:
+ if (st->chip_config.gyro_fifo_enable)
+ inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+error_power_off:
+ inv_mpu6050_set_power_itg(st, false);
+ return result;
}
/**
@@ -117,7 +134,7 @@ static const struct iio_trigger_ops inv_mpu_trigger_ops = {
.set_trigger_state = &inv_mpu_data_rdy_trigger_set_state,
};
-int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
+int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type)
{
int ret;
struct inv_mpu6050_state *st = iio_priv(indio_dev);
@@ -131,7 +148,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
ret = devm_request_irq(&indio_dev->dev, st->irq,
&iio_trigger_generic_data_rdy_poll,
- IRQF_TRIGGER_RISING,
+ irq_type,
"inv_mpu",
st->trig);
if (ret)
@@ -141,7 +158,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
st->trig->ops = &inv_mpu_trigger_ops;
iio_trigger_set_drvdata(st->trig, indio_dev);
- ret = iio_trigger_register(st->trig);
+ ret = devm_iio_trigger_register(&indio_dev->dev, st->trig);
if (ret)
return ret;
@@ -149,8 +166,3 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
return 0;
}
-
-void inv_mpu6050_remove_trigger(struct inv_mpu6050_state *st)
-{
- iio_trigger_unregister(st->trig);
-}
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 14f2eb6e9fb7..ccc817e17eb8 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -8,7 +8,8 @@ config IIO_ST_LSM6DSX
select IIO_ST_LSM6DSX_SPI if (SPI_MASTER)
help
Say yes here to build support for STMicroelectronics LSM6DSx imu
- sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm
+ sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
+ ism330dlc
To compile this driver as a module, choose M here: the module
will be called st_lsm6dsx.
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index a3cc7cd97026..edcd838037cd 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -18,12 +18,14 @@
#define ST_LSM6DS3H_DEV_NAME "lsm6ds3h"
#define ST_LSM6DSL_DEV_NAME "lsm6dsl"
#define ST_LSM6DSM_DEV_NAME "lsm6dsm"
+#define ST_ISM330DLC_DEV_NAME "ism330dlc"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID,
ST_LSM6DS3H_ID,
ST_LSM6DSL_ID,
ST_LSM6DSM_ID,
+ ST_ISM330DLC_ID,
ST_LSM6DSX_MAX_ID,
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 1045e025e92b..4994f920a836 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -1,10 +1,10 @@
/*
* STMicroelectronics st_lsm6dsx FIFO buffer library driver
*
- * LSM6DS3/LSM6DS3H/LSM6DSL/LSM6DSM: The FIFO buffer can be configured
- * to store data from gyroscope and accelerometer. Samples are queued
- * without any tag according to a specific pattern based on 'FIFO data sets'
- * (6 bytes each):
+ * LSM6DS3/LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC: The FIFO buffer can be
+ * configured to store data from gyroscope and accelerometer. Samples are
+ * queued without any tag according to a specific pattern based on
+ * 'FIFO data sets' (6 bytes each):
* - 1st data set is reserved for gyroscope data
* - 2nd data set is reserved for accelerometer data
* The FIFO pattern changes depending on the ODRs and decimation factors
@@ -276,7 +276,7 @@ static inline int st_lsm6dsx_read_block(struct st_lsm6dsx_hw *hw, u8 *data,
#define ST_LSM6DSX_IIO_BUFF_SIZE (ALIGN(ST_LSM6DSX_SAMPLE_SIZE, \
sizeof(s64)) + sizeof(s64))
/**
- * st_lsm6dsx_read_fifo() - LSM6DS3-LSM6DS3H-LSM6DSL-LSM6DSM read FIFO routine
+ * st_lsm6dsx_read_fifo() - hw FIFO read routine
* @hw: Pointer to instance of struct st_lsm6dsx_hw.
*
* Read samples from the hw FIFO and push them to IIO buffers.
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 8656d72ef4ee..aebbe0ddd8d8 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -17,7 +17,7 @@
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 8KB
*
- * - LSM6DS3H/LSM6DSL/LSM6DSM:
+ * - LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC:
* - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
@@ -252,6 +252,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.id = {
[0] = ST_LSM6DSL_ID,
[1] = ST_LSM6DSM_ID,
+ [2] = ST_ISM330DLC_ID,
},
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
@@ -266,11 +267,11 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fifo_ops = {
.fifo_th = {
.addr = 0x06,
- .mask = GENMASK(11, 0),
+ .mask = GENMASK(10, 0),
},
.fifo_diff = {
.addr = 0x3a,
- .mask = GENMASK(11, 0),
+ .mask = GENMASK(10, 0),
},
.th_wl = 3, /* 1LSB = 2B */
},
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 41525dd2aab7..377c4e9997da 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -57,6 +57,10 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,lsm6dsm",
.data = (void *)ST_LSM6DSM_ID,
},
+ {
+ .compatible = "st,ism330dlc",
+ .data = (void *)ST_ISM330DLC_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -66,6 +70,7 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DS3H_DEV_NAME, ST_LSM6DS3H_ID },
{ ST_LSM6DSL_DEV_NAME, ST_LSM6DSL_ID },
{ ST_LSM6DSM_DEV_NAME, ST_LSM6DSM_ID },
+ { ST_ISM330DLC_DEV_NAME, ST_ISM330DLC_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index 2c8135834479..fec5c6ce7eb7 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -57,6 +57,10 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,lsm6dsm",
.data = (void *)ST_LSM6DSM_ID,
},
+ {
+ .compatible = "st,ism330dlc",
+ .data = (void *)ST_ISM330DLC_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -66,6 +70,7 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DS3H_DEV_NAME, ST_LSM6DS3H_ID },
{ ST_LSM6DSL_DEV_NAME, ST_LSM6DSL_ID },
{ ST_LSM6DSM_DEV_NAME, ST_LSM6DSM_ID },
+ { ST_ISM330DLC_DEV_NAME, ST_ISM330DLC_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 074e50657366..c7ef8d1862d6 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -409,6 +409,14 @@ config TSL2583
Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
Access ALS data via iio, sysfs.
+config TSL2772
+ tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors"
+ depends on I2C
+ help
+ Support for: tsl2571, tsl2671, tmd2671, tsl2771, tmd2771, tsl2572, tsl2672,
+ tmd2672, tsl2772, tmd2772 devices.
+ Provides iio_events and direct access via sysfs.
+
config TSL4531
tristate "TAOS TSL4531 ambient light sensors"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index f1777036d4f8..80943af5d627 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_ST_UVIS25_SPI) += st_uvis25_spi.o
obj-$(CONFIG_TCS3414) += tcs3414.o
obj-$(CONFIG_TCS3472) += tcs3472.o
obj-$(CONFIG_TSL2583) += tsl2583.o
+obj-$(CONFIG_TSL2772) += tsl2772.o
obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index 8e8a0e7f78d1..fd1609e975ab 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/common/cros_ec_sensors_core.h>
#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger.h>
@@ -29,8 +30,6 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
-#include "../common/cros_ec_sensors/cros_ec_sensors_core.h"
-
/*
* We only represent one entry for light or proximity. EC is merging different
* light sensors to return the what the eye would see. For proximity, we
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index f2e50edaa242..4b5d9988f025 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -600,7 +600,7 @@ done:
static IIO_CONST_ATTR(in_illuminance_calibscale_available, "1 8 16 111");
static IIO_CONST_ATTR(in_illuminance_integration_time_available,
- "0.000050 0.000100 0.000150 0.000200 0.000250 0.000300 0.000350 0.000400 0.000450 0.000500 0.000550 0.000600 0.000650");
+ "0.050 0.100 0.150 0.200 0.250 0.300 0.350 0.400 0.450 0.500 0.550 0.600 0.650");
static IIO_DEVICE_ATTR_RW(in_illuminance_input_target, 0);
static IIO_DEVICE_ATTR_WO(in_illuminance_calibrate, 0);
static IIO_DEVICE_ATTR_RW(in_illuminance_lux_table, 0);
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
new file mode 100644
index 000000000000..34d42a2504c9
--- /dev/null
+++ b/drivers/iio/light/tsl2772.c
@@ -0,0 +1,1800 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Device driver for monitoring ambient light intensity in (lux) and proximity
+ * detection (prox) within the TAOS TSL2571, TSL2671, TMD2671, TSL2771, TMD2771,
+ * TSL2572, TSL2672, TMD2672, TSL2772, and TMD2772 devices.
+ *
+ * Copyright (c) 2012, TAOS Corporation.
+ * Copyright (c) 2017-2018 Brian Masney <masneyb@onstation.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/platform_data/tsl2772.h>
+
+/* Cal defs */
+#define PROX_STAT_CAL 0
+#define PROX_STAT_SAMP 1
+#define MAX_SAMPLES_CAL 200
+
+/* TSL2772 Device ID */
+#define TRITON_ID 0x00
+#define SWORDFISH_ID 0x30
+#define HALIBUT_ID 0x20
+
+/* Lux calculation constants */
+#define TSL2772_LUX_CALC_OVER_FLOW 65535
+
+/*
+ * TAOS Register definitions - Note: depending on device, some of these register
+ * are not used and the register address is benign.
+ */
+
+/* Register offsets */
+#define TSL2772_MAX_CONFIG_REG 16
+
+/* Device Registers and Masks */
+#define TSL2772_CNTRL 0x00
+#define TSL2772_ALS_TIME 0X01
+#define TSL2772_PRX_TIME 0x02
+#define TSL2772_WAIT_TIME 0x03
+#define TSL2772_ALS_MINTHRESHLO 0X04
+#define TSL2772_ALS_MINTHRESHHI 0X05
+#define TSL2772_ALS_MAXTHRESHLO 0X06
+#define TSL2772_ALS_MAXTHRESHHI 0X07
+#define TSL2772_PRX_MINTHRESHLO 0X08
+#define TSL2772_PRX_MINTHRESHHI 0X09
+#define TSL2772_PRX_MAXTHRESHLO 0X0A
+#define TSL2772_PRX_MAXTHRESHHI 0X0B
+#define TSL2772_PERSISTENCE 0x0C
+#define TSL2772_ALS_PRX_CONFIG 0x0D
+#define TSL2772_PRX_COUNT 0x0E
+#define TSL2772_GAIN 0x0F
+#define TSL2772_NOTUSED 0x10
+#define TSL2772_REVID 0x11
+#define TSL2772_CHIPID 0x12
+#define TSL2772_STATUS 0x13
+#define TSL2772_ALS_CHAN0LO 0x14
+#define TSL2772_ALS_CHAN0HI 0x15
+#define TSL2772_ALS_CHAN1LO 0x16
+#define TSL2772_ALS_CHAN1HI 0x17
+#define TSL2772_PRX_LO 0x18
+#define TSL2772_PRX_HI 0x19
+
+/* tsl2772 cmd reg masks */
+#define TSL2772_CMD_REG 0x80
+#define TSL2772_CMD_SPL_FN 0x60
+#define TSL2772_CMD_REPEAT_PROTO 0x00
+#define TSL2772_CMD_AUTOINC_PROTO 0x20
+
+#define TSL2772_CMD_PROX_INT_CLR 0X05
+#define TSL2772_CMD_ALS_INT_CLR 0x06
+#define TSL2772_CMD_PROXALS_INT_CLR 0X07
+
+/* tsl2772 cntrl reg masks */
+#define TSL2772_CNTL_ADC_ENBL 0x02
+#define TSL2772_CNTL_PWR_ON 0x01
+
+/* tsl2772 status reg masks */
+#define TSL2772_STA_ADC_VALID 0x01
+#define TSL2772_STA_PRX_VALID 0x02
+#define TSL2772_STA_ADC_PRX_VALID (TSL2772_STA_ADC_VALID | \
+ TSL2772_STA_PRX_VALID)
+#define TSL2772_STA_ALS_INTR 0x10
+#define TSL2772_STA_PRX_INTR 0x20
+
+/* tsl2772 cntrl reg masks */
+#define TSL2772_CNTL_REG_CLEAR 0x00
+#define TSL2772_CNTL_PROX_INT_ENBL 0X20
+#define TSL2772_CNTL_ALS_INT_ENBL 0X10
+#define TSL2772_CNTL_WAIT_TMR_ENBL 0X08
+#define TSL2772_CNTL_PROX_DET_ENBL 0X04
+#define TSL2772_CNTL_PWRON 0x01
+#define TSL2772_CNTL_ALSPON_ENBL 0x03
+#define TSL2772_CNTL_INTALSPON_ENBL 0x13
+#define TSL2772_CNTL_PROXPON_ENBL 0x0F
+#define TSL2772_CNTL_INTPROXPON_ENBL 0x2F
+
+#define TSL2772_ALS_GAIN_TRIM_MIN 250
+#define TSL2772_ALS_GAIN_TRIM_MAX 4000
+
+/* Device family members */
+enum {
+ tsl2571,
+ tsl2671,
+ tmd2671,
+ tsl2771,
+ tmd2771,
+ tsl2572,
+ tsl2672,
+ tmd2672,
+ tsl2772,
+ tmd2772
+};
+
+enum {
+ TSL2772_CHIP_UNKNOWN = 0,
+ TSL2772_CHIP_WORKING = 1,
+ TSL2772_CHIP_SUSPENDED = 2
+};
+
+/* Per-device data */
+struct tsl2772_als_info {
+ u16 als_ch0;
+ u16 als_ch1;
+ u16 lux;
+};
+
+struct tsl2772_chip_info {
+ int chan_table_elements;
+ struct iio_chan_spec channel_with_events[4];
+ struct iio_chan_spec channel_without_events[4];
+ const struct iio_info *info;
+};
+
+struct tsl2772_chip {
+ kernel_ulong_t id;
+ struct mutex prox_mutex;
+ struct mutex als_mutex;
+ struct i2c_client *client;
+ u16 prox_data;
+ struct tsl2772_als_info als_cur_info;
+ struct tsl2772_settings settings;
+ struct tsl2772_platform_data *pdata;
+ int als_gain_time_scale;
+ int als_saturation;
+ int tsl2772_chip_status;
+ u8 tsl2772_config[TSL2772_MAX_CONFIG_REG];
+ const struct tsl2772_chip_info *chip_info;
+ const struct iio_info *info;
+ s64 event_timestamp;
+ /*
+ * This structure is intentionally large to accommodate
+ * updates via sysfs.
+ * Sized to 9 = max 8 segments + 1 termination segment
+ */
+ struct tsl2772_lux tsl2772_device_lux[TSL2772_MAX_LUX_TABLE_SIZE];
+};
+
+/*
+ * Different devices require different coefficents, and these numbers were
+ * derived from the 'Lux Equation' section of the various device datasheets.
+ * All of these coefficients assume a Glass Attenuation (GA) factor of 1.
+ * The coefficients are multiplied by 1000 to avoid floating point operations.
+ * The two rows in each table correspond to the Lux1 and Lux2 equations from
+ * the datasheets.
+ */
+static const struct tsl2772_lux tsl2x71_lux_table[TSL2772_DEF_LUX_TABLE_SZ] = {
+ { 53000, 106000 },
+ { 31800, 53000 },
+ { 0, 0 },
+};
+
+static const struct tsl2772_lux tmd2x71_lux_table[TSL2772_DEF_LUX_TABLE_SZ] = {
+ { 24000, 48000 },
+ { 14400, 24000 },
+ { 0, 0 },
+};
+
+static const struct tsl2772_lux tsl2x72_lux_table[TSL2772_DEF_LUX_TABLE_SZ] = {
+ { 60000, 112200 },
+ { 37800, 60000 },
+ { 0, 0 },
+};
+
+static const struct tsl2772_lux tmd2x72_lux_table[TSL2772_DEF_LUX_TABLE_SZ] = {
+ { 20000, 35000 },
+ { 12600, 20000 },
+ { 0, 0 },
+};
+
+static const struct tsl2772_lux *tsl2772_default_lux_table_group[] = {
+ [tsl2571] = tsl2x71_lux_table,
+ [tsl2671] = tsl2x71_lux_table,
+ [tmd2671] = tmd2x71_lux_table,
+ [tsl2771] = tsl2x71_lux_table,
+ [tmd2771] = tmd2x71_lux_table,
+ [tsl2572] = tsl2x72_lux_table,
+ [tsl2672] = tsl2x72_lux_table,
+ [tmd2672] = tmd2x72_lux_table,
+ [tsl2772] = tsl2x72_lux_table,
+ [tmd2772] = tmd2x72_lux_table,
+};
+
+static const struct tsl2772_settings tsl2772_default_settings = {
+ .als_time = 255, /* 2.72 / 2.73 ms */
+ .als_gain = 0,
+ .prox_time = 255, /* 2.72 / 2.73 ms */
+ .prox_gain = 0,
+ .wait_time = 255,
+ .als_prox_config = 0,
+ .als_gain_trim = 1000,
+ .als_cal_target = 150,
+ .als_persistence = 1,
+ .als_interrupt_en = false,
+ .als_thresh_low = 200,
+ .als_thresh_high = 256,
+ .prox_persistence = 1,
+ .prox_interrupt_en = false,
+ .prox_thres_low = 0,
+ .prox_thres_high = 512,
+ .prox_max_samples_cal = 30,
+ .prox_pulse_count = 8,
+ .prox_diode = TSL2772_DIODE1,
+ .prox_power = TSL2772_100_mA
+};
+
+static const s16 tsl2772_als_gain[] = {
+ 1,
+ 8,
+ 16,
+ 120
+};
+
+static const s16 tsl2772_prox_gain[] = {
+ 1,
+ 2,
+ 4,
+ 8
+};
+
+static const int tsl2772_int_time_avail[][6] = {
+ [tsl2571] = { 0, 2720, 0, 2720, 0, 696000 },
+ [tsl2671] = { 0, 2720, 0, 2720, 0, 696000 },
+ [tmd2671] = { 0, 2720, 0, 2720, 0, 696000 },
+ [tsl2771] = { 0, 2720, 0, 2720, 0, 696000 },
+ [tmd2771] = { 0, 2720, 0, 2720, 0, 696000 },
+ [tsl2572] = { 0, 2730, 0, 2730, 0, 699000 },
+ [tsl2672] = { 0, 2730, 0, 2730, 0, 699000 },
+ [tmd2672] = { 0, 2730, 0, 2730, 0, 699000 },
+ [tsl2772] = { 0, 2730, 0, 2730, 0, 699000 },
+ [tmd2772] = { 0, 2730, 0, 2730, 0, 699000 },
+};
+
+static int tsl2772_int_calibscale_avail[] = { 1, 8, 16, 120 };
+
+static int tsl2772_prox_calibscale_avail[] = { 1, 2, 4, 8 };
+
+/* Channel variations */
+enum {
+ ALS,
+ PRX,
+ ALSPRX,
+ PRX2,
+ ALSPRX2,
+};
+
+static const u8 device_channel_config[] = {
+ [tsl2571] = ALS,
+ [tsl2671] = PRX,
+ [tmd2671] = PRX,
+ [tsl2771] = ALSPRX,
+ [tmd2771] = ALSPRX,
+ [tsl2572] = ALS,
+ [tsl2672] = PRX2,
+ [tmd2672] = PRX2,
+ [tsl2772] = ALSPRX2,
+ [tmd2772] = ALSPRX2
+};
+
+static int tsl2772_read_status(struct tsl2772_chip *chip)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(chip->client,
+ TSL2772_CMD_REG | TSL2772_STATUS);
+ if (ret < 0)
+ dev_err(&chip->client->dev,
+ "%s: failed to read STATUS register: %d\n", __func__,
+ ret);
+
+ return ret;
+}
+
+static int tsl2772_write_control_reg(struct tsl2772_chip *chip, u8 data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2772_CMD_REG | TSL2772_CNTRL, data);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to write to control register %x: %d\n",
+ __func__, data, ret);
+ }
+
+ return ret;
+}
+
+static int tsl2772_read_autoinc_regs(struct tsl2772_chip *chip, int lower_reg,
+ int upper_reg)
+{
+ u8 buf[2];
+ int ret;
+
+ ret = i2c_smbus_write_byte(chip->client,
+ TSL2772_CMD_REG | TSL2772_CMD_AUTOINC_PROTO |
+ lower_reg);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to enable auto increment protocol: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte_data(chip->client,
+ TSL2772_CMD_REG | lower_reg);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to read from register %x: %d\n", __func__,
+ lower_reg, ret);
+ return ret;
+ }
+ buf[0] = ret;
+
+ ret = i2c_smbus_read_byte_data(chip->client,
+ TSL2772_CMD_REG | upper_reg);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to read from register %x: %d\n", __func__,
+ upper_reg, ret);
+ return ret;
+ }
+ buf[1] = ret;
+
+ ret = i2c_smbus_write_byte(chip->client,
+ TSL2772_CMD_REG | TSL2772_CMD_REPEAT_PROTO |
+ lower_reg);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to enable repeated byte protocol: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return le16_to_cpup((const __le16 *)&buf[0]);
+}
+
+/**
+ * tsl2772_get_lux() - Reads and calculates current lux value.
+ * @indio_dev: pointer to IIO device
+ *
+ * The raw ch0 and ch1 values of the ambient light sensed in the last
+ * integration cycle are read from the device. The raw values are multiplied
+ * by a device-specific scale factor, and divided by the integration time and
+ * device gain. The code supports multiple lux equations through the lux table
+ * coefficients. A lux gain trim is applied to each lux equation, and then the
+ * maximum lux within the interval 0..65535 is selected.
+ */
+static int tsl2772_get_lux(struct iio_dev *indio_dev)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+ struct tsl2772_lux *p;
+ int max_lux, ret;
+ bool overflow;
+
+ mutex_lock(&chip->als_mutex);
+
+ if (chip->tsl2772_chip_status != TSL2772_CHIP_WORKING) {
+ dev_err(&chip->client->dev, "%s: device is not enabled\n",
+ __func__);
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = tsl2772_read_status(chip);
+ if (ret < 0)
+ goto out_unlock;
+
+ if (!(ret & TSL2772_STA_ADC_VALID)) {
+ dev_err(&chip->client->dev,
+ "%s: data not valid yet\n", __func__);
+ ret = chip->als_cur_info.lux; /* return LAST VALUE */
+ goto out_unlock;
+ }
+
+ ret = tsl2772_read_autoinc_regs(chip, TSL2772_ALS_CHAN0LO,
+ TSL2772_ALS_CHAN0HI);
+ if (ret < 0)
+ goto out_unlock;
+ chip->als_cur_info.als_ch0 = ret;
+
+ ret = tsl2772_read_autoinc_regs(chip, TSL2772_ALS_CHAN1LO,
+ TSL2772_ALS_CHAN1HI);
+ if (ret < 0)
+ goto out_unlock;
+ chip->als_cur_info.als_ch1 = ret;
+
+ if (chip->als_cur_info.als_ch0 >= chip->als_saturation) {
+ max_lux = TSL2772_LUX_CALC_OVER_FLOW;
+ goto update_struct_with_max_lux;
+ }
+
+ if (!chip->als_cur_info.als_ch0) {
+ /* have no data, so return LAST VALUE */
+ ret = chip->als_cur_info.lux;
+ goto out_unlock;
+ }
+
+ max_lux = 0;
+ overflow = false;
+ for (p = (struct tsl2772_lux *)chip->tsl2772_device_lux; p->ch0 != 0;
+ p++) {
+ int lux;
+
+ lux = ((chip->als_cur_info.als_ch0 * p->ch0) -
+ (chip->als_cur_info.als_ch1 * p->ch1)) /
+ chip->als_gain_time_scale;
+
+ /*
+ * The als_gain_trim can have a value within the range 250..4000
+ * and is a multiplier for the lux. A trim of 1000 makes no
+ * changes to the lux, less than 1000 scales it down, and
+ * greater than 1000 scales it up.
+ */
+ lux = (lux * chip->settings.als_gain_trim) / 1000;
+
+ if (lux > TSL2772_LUX_CALC_OVER_FLOW) {
+ overflow = true;
+ continue;
+ }
+
+ max_lux = max(max_lux, lux);
+ }
+
+ if (overflow && max_lux == 0)
+ max_lux = TSL2772_LUX_CALC_OVER_FLOW;
+
+update_struct_with_max_lux:
+ chip->als_cur_info.lux = max_lux;
+ ret = max_lux;
+
+out_unlock:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+/**
+ * tsl2772_get_prox() - Reads proximity data registers and updates
+ * chip->prox_data.
+ *
+ * @indio_dev: pointer to IIO device
+ */
+static int tsl2772_get_prox(struct iio_dev *indio_dev)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->prox_mutex);
+
+ ret = tsl2772_read_status(chip);
+ if (ret < 0)
+ goto prox_poll_err;
+
+ switch (chip->id) {
+ case tsl2571:
+ case tsl2671:
+ case tmd2671:
+ case tsl2771:
+ case tmd2771:
+ if (!(ret & TSL2772_STA_ADC_VALID)) {
+ ret = -EINVAL;
+ goto prox_poll_err;
+ }
+ break;
+ case tsl2572:
+ case tsl2672:
+ case tmd2672:
+ case tsl2772:
+ case tmd2772:
+ if (!(ret & TSL2772_STA_PRX_VALID)) {
+ ret = -EINVAL;
+ goto prox_poll_err;
+ }
+ break;
+ }
+
+ ret = tsl2772_read_autoinc_regs(chip, TSL2772_PRX_LO, TSL2772_PRX_HI);
+ if (ret < 0)
+ goto prox_poll_err;
+ chip->prox_data = ret;
+
+prox_poll_err:
+ mutex_unlock(&chip->prox_mutex);
+
+ return ret;
+}
+
+/**
+ * tsl2772_defaults() - Populates the device nominal operating parameters
+ * with those provided by a 'platform' data struct or
+ * with prefined defaults.
+ *
+ * @chip: pointer to device structure.
+ */
+static void tsl2772_defaults(struct tsl2772_chip *chip)
+{
+ /* If Operational settings defined elsewhere.. */
+ if (chip->pdata && chip->pdata->platform_default_settings)
+ memcpy(&chip->settings, chip->pdata->platform_default_settings,
+ sizeof(tsl2772_default_settings));
+ else
+ memcpy(&chip->settings, &tsl2772_default_settings,
+ sizeof(tsl2772_default_settings));
+
+ /* Load up the proper lux table. */
+ if (chip->pdata && chip->pdata->platform_lux_table[0].ch0 != 0)
+ memcpy(chip->tsl2772_device_lux,
+ chip->pdata->platform_lux_table,
+ sizeof(chip->pdata->platform_lux_table));
+ else
+ memcpy(chip->tsl2772_device_lux,
+ tsl2772_default_lux_table_group[chip->id],
+ TSL2772_DEFAULT_TABLE_BYTES);
+}
+
+/**
+ * tsl2772_als_calibrate() - Obtain single reading and calculate
+ * the als_gain_trim.
+ *
+ * @indio_dev: pointer to IIO device
+ */
+static int tsl2772_als_calibrate(struct iio_dev *indio_dev)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+ int ret, lux_val;
+
+ ret = i2c_smbus_read_byte_data(chip->client,
+ TSL2772_CMD_REG | TSL2772_CNTRL);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to read from the CNTRL register\n",
+ __func__);
+ return ret;
+ }
+
+ if ((ret & (TSL2772_CNTL_ADC_ENBL | TSL2772_CNTL_PWR_ON))
+ != (TSL2772_CNTL_ADC_ENBL | TSL2772_CNTL_PWR_ON)) {
+ dev_err(&chip->client->dev,
+ "%s: Device is not powered on and/or ADC is not enabled\n",
+ __func__);
+ return -EINVAL;
+ } else if ((ret & TSL2772_STA_ADC_VALID) != TSL2772_STA_ADC_VALID) {
+ dev_err(&chip->client->dev,
+ "%s: The two ADC channels have not completed an integration cycle\n",
+ __func__);
+ return -ENODATA;
+ }
+
+ lux_val = tsl2772_get_lux(indio_dev);
+ if (lux_val < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to get lux\n", __func__);
+ return lux_val;
+ }
+
+ ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) /
+ lux_val;
+ if (ret < TSL2772_ALS_GAIN_TRIM_MIN || ret > TSL2772_ALS_GAIN_TRIM_MAX)
+ return -ERANGE;
+
+ chip->settings.als_gain_trim = ret;
+
+ return ret;
+}
+
+static int tsl2772_chip_on(struct iio_dev *indio_dev)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+ int ret, i, als_count, als_time_us;
+ u8 *dev_reg, reg_val;
+
+ /* Non calculated parameters */
+ chip->tsl2772_config[TSL2772_ALS_TIME] = chip->settings.als_time;
+ chip->tsl2772_config[TSL2772_PRX_TIME] = chip->settings.prox_time;
+ chip->tsl2772_config[TSL2772_WAIT_TIME] = chip->settings.wait_time;
+ chip->tsl2772_config[TSL2772_ALS_PRX_CONFIG] =
+ chip->settings.als_prox_config;
+
+ chip->tsl2772_config[TSL2772_ALS_MINTHRESHLO] =
+ (chip->settings.als_thresh_low) & 0xFF;
+ chip->tsl2772_config[TSL2772_ALS_MINTHRESHHI] =
+ (chip->settings.als_thresh_low >> 8) & 0xFF;
+ chip->tsl2772_config[TSL2772_ALS_MAXTHRESHLO] =
+ (chip->settings.als_thresh_high) & 0xFF;
+ chip->tsl2772_config[TSL2772_ALS_MAXTHRESHHI] =
+ (chip->settings.als_thresh_high >> 8) & 0xFF;
+ chip->tsl2772_config[TSL2772_PERSISTENCE] =
+ (chip->settings.prox_persistence & 0xFF) << 4 |
+ (chip->settings.als_persistence & 0xFF);
+
+ chip->tsl2772_config[TSL2772_PRX_COUNT] =
+ chip->settings.prox_pulse_count;
+ chip->tsl2772_config[TSL2772_PRX_MINTHRESHLO] =
+ (chip->settings.prox_thres_low) & 0xFF;
+ chip->tsl2772_config[TSL2772_PRX_MINTHRESHHI] =
+ (chip->settings.prox_thres_low >> 8) & 0xFF;
+ chip->tsl2772_config[TSL2772_PRX_MAXTHRESHLO] =
+ (chip->settings.prox_thres_high) & 0xFF;
+ chip->tsl2772_config[TSL2772_PRX_MAXTHRESHHI] =
+ (chip->settings.prox_thres_high >> 8) & 0xFF;
+
+ /* and make sure we're not already on */
+ if (chip->tsl2772_chip_status == TSL2772_CHIP_WORKING) {
+ /* if forcing a register update - turn off, then on */
+ dev_info(&chip->client->dev, "device is already enabled\n");
+ return -EINVAL;
+ }
+
+ /* Set the gain based on tsl2772_settings struct */
+ chip->tsl2772_config[TSL2772_GAIN] =
+ (chip->settings.als_gain & 0xFF) |
+ ((chip->settings.prox_gain & 0xFF) << 2) |
+ (chip->settings.prox_diode << 4) |
+ (chip->settings.prox_power << 6);
+
+ /* set chip time scaling and saturation */
+ als_count = 256 - chip->settings.als_time;
+ als_time_us = als_count * tsl2772_int_time_avail[chip->id][3];
+ chip->als_saturation = als_count * 768; /* 75% of full scale */
+ chip->als_gain_time_scale = als_time_us *
+ tsl2772_als_gain[chip->settings.als_gain];
+
+ /*
+ * TSL2772 Specific power-on / adc enable sequence
+ * Power on the device 1st.
+ */
+ ret = tsl2772_write_control_reg(chip, TSL2772_CNTL_PWR_ON);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Use the following shadow copy for our delay before enabling ADC.
+ * Write all the registers.
+ */
+ for (i = 0, dev_reg = chip->tsl2772_config;
+ i < TSL2772_MAX_CONFIG_REG; i++) {
+ int reg = TSL2772_CMD_REG + i;
+
+ ret = i2c_smbus_write_byte_data(chip->client, reg,
+ *dev_reg++);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to write to register %x: %d\n",
+ __func__, reg, ret);
+ return ret;
+ }
+ }
+
+ /* Power-on settling time */
+ usleep_range(3000, 3500);
+
+ reg_val = TSL2772_CNTL_PWR_ON | TSL2772_CNTL_ADC_ENBL |
+ TSL2772_CNTL_PROX_DET_ENBL;
+ if (chip->settings.als_interrupt_en)
+ reg_val |= TSL2772_CNTL_ALS_INT_ENBL;
+ if (chip->settings.prox_interrupt_en)
+ reg_val |= TSL2772_CNTL_PROX_INT_ENBL;
+
+ ret = tsl2772_write_control_reg(chip, reg_val);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte(chip->client,
+ TSL2772_CMD_REG | TSL2772_CMD_SPL_FN |
+ TSL2772_CMD_PROXALS_INT_CLR);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to clear interrupt status: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ chip->tsl2772_chip_status = TSL2772_CHIP_WORKING;
+
+ return ret;
+}
+
+static int tsl2772_chip_off(struct iio_dev *indio_dev)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+
+ /* turn device off */
+ chip->tsl2772_chip_status = TSL2772_CHIP_SUSPENDED;
+ return tsl2772_write_control_reg(chip, 0x00);
+}
+
+/**
+ * tsl2772_invoke_change - power cycle the device to implement the user
+ * parameters
+ * @indio_dev: pointer to IIO device
+ *
+ * Obtain and lock both ALS and PROX resources, determine and save device state
+ * (On/Off), cycle device to implement updated parameter, put device back into
+ * proper state, and unlock resource.
+ */
+static int tsl2772_invoke_change(struct iio_dev *indio_dev)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+ int device_status = chip->tsl2772_chip_status;
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+ mutex_lock(&chip->prox_mutex);
+
+ if (device_status == TSL2772_CHIP_WORKING) {
+ ret = tsl2772_chip_off(indio_dev);
+ if (ret < 0)
+ goto unlock;
+ }
+
+ ret = tsl2772_chip_on(indio_dev);
+
+unlock:
+ mutex_unlock(&chip->prox_mutex);
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static int tsl2772_prox_cal(struct iio_dev *indio_dev)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+ int prox_history[MAX_SAMPLES_CAL + 1];
+ int i, ret, mean, max, sample_sum;
+
+ if (chip->settings.prox_max_samples_cal < 1 ||
+ chip->settings.prox_max_samples_cal > MAX_SAMPLES_CAL)
+ return -EINVAL;
+
+ for (i = 0; i < chip->settings.prox_max_samples_cal; i++) {
+ usleep_range(15000, 17500);
+ ret = tsl2772_get_prox(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ prox_history[i] = chip->prox_data;
+ }
+
+ sample_sum = 0;
+ max = INT_MIN;
+ for (i = 0; i < chip->settings.prox_max_samples_cal; i++) {
+ sample_sum += prox_history[i];
+ max = max(max, prox_history[i]);
+ }
+ mean = sample_sum / chip->settings.prox_max_samples_cal;
+
+ chip->settings.prox_thres_high = (max << 1) - mean;
+
+ return tsl2772_invoke_change(indio_dev);
+}
+
+static int tsl2772_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type == IIO_INTENSITY) {
+ *length = ARRAY_SIZE(tsl2772_int_calibscale_avail);
+ *vals = tsl2772_int_calibscale_avail;
+ } else {
+ *length = ARRAY_SIZE(tsl2772_prox_calibscale_avail);
+ *vals = tsl2772_prox_calibscale_avail;
+ }
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(tsl2772_int_time_avail[chip->id]);
+ *vals = tsl2772_int_time_avail[chip->id];
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_RANGE;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t in_illuminance0_target_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tsl2772_chip *chip = iio_priv(dev_to_iio_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", chip->settings.als_cal_target);
+}
+
+static ssize_t in_illuminance0_target_input_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+ u16 value;
+ int ret;
+
+ if (kstrtou16(buf, 0, &value))
+ return -EINVAL;
+
+ chip->settings.als_cal_target = value;
+ ret = tsl2772_invoke_change(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static ssize_t in_illuminance0_calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ bool value;
+ int ret;
+
+ if (kstrtobool(buf, &value) || !value)
+ return -EINVAL;
+
+ ret = tsl2772_als_calibrate(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = tsl2772_invoke_change(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static ssize_t in_illuminance0_lux_table_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tsl2772_chip *chip = iio_priv(dev_to_iio_dev(dev));
+ int i = 0;
+ int offset = 0;
+
+ while (i < TSL2772_MAX_LUX_TABLE_SIZE) {
+ offset += snprintf(buf + offset, PAGE_SIZE, "%u,%u,",
+ chip->tsl2772_device_lux[i].ch0,
+ chip->tsl2772_device_lux[i].ch1);
+ if (chip->tsl2772_device_lux[i].ch0 == 0) {
+ /*
+ * We just printed the first "0" entry.
+ * Now get rid of the extra "," and break.
+ */
+ offset--;
+ break;
+ }
+ i++;
+ }
+
+ offset += snprintf(buf + offset, PAGE_SIZE, "\n");
+ return offset;
+}
+
+static ssize_t in_illuminance0_lux_table_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+ int value[ARRAY_SIZE(chip->tsl2772_device_lux) * 2 + 1];
+ int n, ret;
+
+ get_options(buf, ARRAY_SIZE(value), value);
+
+ /*
+ * We now have an array of ints starting at value[1], and
+ * enumerated by value[0].
+ * We expect each group of two ints to be one table entry,
+ * and the last table entry is all 0.
+ */
+ n = value[0];
+ if ((n % 2) || n < 4 ||
+ n > ((ARRAY_SIZE(chip->tsl2772_device_lux) - 1) * 2))
+ return -EINVAL;
+
+ if ((value[(n - 1)] | value[n]) != 0)
+ return -EINVAL;
+
+ if (chip->tsl2772_chip_status == TSL2772_CHIP_WORKING) {
+ ret = tsl2772_chip_off(indio_dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Zero out the table */
+ memset(chip->tsl2772_device_lux, 0, sizeof(chip->tsl2772_device_lux));
+ memcpy(chip->tsl2772_device_lux, &value[1], (value[0] * 4));
+
+ ret = tsl2772_invoke_change(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static ssize_t in_proximity0_calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ bool value;
+ int ret;
+
+ if (kstrtobool(buf, &value) || !value)
+ return -EINVAL;
+
+ ret = tsl2772_prox_cal(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = tsl2772_invoke_change(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static int tsl2772_read_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+
+ if (chan->type == IIO_INTENSITY)
+ return chip->settings.als_interrupt_en;
+ else
+ return chip->settings.prox_interrupt_en;
+}
+
+static int tsl2772_write_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int val)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+
+ if (chan->type == IIO_INTENSITY)
+ chip->settings.als_interrupt_en = val ? true : false;
+ else
+ chip->settings.prox_interrupt_en = val ? true : false;
+
+ return tsl2772_invoke_change(indio_dev);
+}
+
+static int tsl2772_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL, count, persistence;
+ u8 time;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (chan->type == IIO_INTENSITY) {
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ chip->settings.als_thresh_high = val;
+ ret = 0;
+ break;
+ case IIO_EV_DIR_FALLING:
+ chip->settings.als_thresh_low = val;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ chip->settings.prox_thres_high = val;
+ ret = 0;
+ break;
+ case IIO_EV_DIR_FALLING:
+ chip->settings.prox_thres_low = val;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case IIO_EV_INFO_PERIOD:
+ if (chan->type == IIO_INTENSITY)
+ time = chip->settings.als_time;
+ else
+ time = chip->settings.prox_time;
+
+ count = 256 - time;
+ persistence = ((val * 1000000) + val2) /
+ (count * tsl2772_int_time_avail[chip->id][3]);
+
+ if (chan->type == IIO_INTENSITY) {
+ /* ALS filter values are 1, 2, 3, 5, 10, 15, ..., 60 */
+ if (persistence > 3)
+ persistence = (persistence / 5) + 3;
+
+ chip->settings.als_persistence = persistence;
+ } else {
+ chip->settings.prox_persistence = persistence;
+ }
+
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return tsl2772_invoke_change(indio_dev);
+}
+
+static int tsl2772_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+ int filter_delay, persistence;
+ u8 time;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (chan->type == IIO_INTENSITY) {
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = chip->settings.als_thresh_high;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ *val = chip->settings.als_thresh_low;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = chip->settings.prox_thres_high;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ *val = chip->settings.prox_thres_low;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ }
+ break;
+ case IIO_EV_INFO_PERIOD:
+ if (chan->type == IIO_INTENSITY) {
+ time = chip->settings.als_time;
+ persistence = chip->settings.als_persistence;
+
+ /* ALS filter values are 1, 2, 3, 5, 10, 15, ..., 60 */
+ if (persistence > 3)
+ persistence = (persistence - 3) * 5;
+ } else {
+ time = chip->settings.prox_time;
+ persistence = chip->settings.prox_persistence;
+ }
+
+ filter_delay = persistence * (256 - time) *
+ tsl2772_int_time_avail[chip->id][3];
+
+ *val = filter_delay / 1000000;
+ *val2 = filter_delay % 1000000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl2772_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ tsl2772_get_lux(indio_dev);
+ *val = chip->als_cur_info.lux;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ tsl2772_get_lux(indio_dev);
+ if (chan->channel == 0)
+ *val = chip->als_cur_info.als_ch0;
+ else
+ *val = chip->als_cur_info.als_ch1;
+ return IIO_VAL_INT;
+ case IIO_PROXIMITY:
+ tsl2772_get_prox(indio_dev);
+ *val = chip->prox_data;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type == IIO_LIGHT)
+ *val = tsl2772_als_gain[chip->settings.als_gain];
+ else
+ *val = tsl2772_prox_gain[chip->settings.prox_gain];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ *val = chip->settings.als_gain_trim;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = (256 - chip->settings.als_time) *
+ tsl2772_int_time_avail[chip->id][3];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl2772_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type == IIO_INTENSITY) {
+ switch (val) {
+ case 1:
+ chip->settings.als_gain = 0;
+ break;
+ case 8:
+ chip->settings.als_gain = 1;
+ break;
+ case 16:
+ chip->settings.als_gain = 2;
+ break;
+ case 120:
+ chip->settings.als_gain = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (val) {
+ case 1:
+ chip->settings.prox_gain = 0;
+ break;
+ case 2:
+ chip->settings.prox_gain = 1;
+ break;
+ case 4:
+ chip->settings.prox_gain = 2;
+ break;
+ case 8:
+ chip->settings.prox_gain = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val < TSL2772_ALS_GAIN_TRIM_MIN ||
+ val > TSL2772_ALS_GAIN_TRIM_MAX)
+ return -EINVAL;
+
+ chip->settings.als_gain_trim = val;
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0 || val2 < tsl2772_int_time_avail[chip->id][1] ||
+ val2 > tsl2772_int_time_avail[chip->id][5])
+ return -EINVAL;
+
+ chip->settings.als_time = 256 -
+ (val2 / tsl2772_int_time_avail[chip->id][3]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tsl2772_invoke_change(indio_dev);
+}
+
+static DEVICE_ATTR_RW(in_illuminance0_target_input);
+
+static DEVICE_ATTR_WO(in_illuminance0_calibrate);
+
+static DEVICE_ATTR_WO(in_proximity0_calibrate);
+
+static DEVICE_ATTR_RW(in_illuminance0_lux_table);
+
+/* Use the default register values to identify the Taos device */
+static int tsl2772_device_id_verif(int id, int target)
+{
+ switch (target) {
+ case tsl2571:
+ case tsl2671:
+ case tsl2771:
+ return (id & 0xf0) == TRITON_ID;
+ case tmd2671:
+ case tmd2771:
+ return (id & 0xf0) == HALIBUT_ID;
+ case tsl2572:
+ case tsl2672:
+ case tmd2672:
+ case tsl2772:
+ case tmd2772:
+ return (id & 0xf0) == SWORDFISH_ID;
+ }
+
+ return -EINVAL;
+}
+
+static irqreturn_t tsl2772_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct tsl2772_chip *chip = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns(indio_dev);
+ int ret;
+
+ ret = tsl2772_read_status(chip);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ /* What type of interrupt do we need to process */
+ if (ret & TSL2772_STA_PRX_INTR) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+ }
+
+ if (ret & TSL2772_STA_ALS_INTR) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+ }
+
+ ret = i2c_smbus_write_byte(chip->client,
+ TSL2772_CMD_REG | TSL2772_CMD_SPL_FN |
+ TSL2772_CMD_PROXALS_INT_CLR);
+ if (ret < 0)
+ dev_err(&chip->client->dev,
+ "%s: failed to clear interrupt status: %d\n",
+ __func__, ret);
+
+ return IRQ_HANDLED;
+}
+
+static struct attribute *tsl2772_ALS_device_attrs[] = {
+ &dev_attr_in_illuminance0_target_input.attr,
+ &dev_attr_in_illuminance0_calibrate.attr,
+ &dev_attr_in_illuminance0_lux_table.attr,
+ NULL
+};
+
+static struct attribute *tsl2772_PRX_device_attrs[] = {
+ &dev_attr_in_proximity0_calibrate.attr,
+ NULL
+};
+
+static struct attribute *tsl2772_ALSPRX_device_attrs[] = {
+ &dev_attr_in_illuminance0_target_input.attr,
+ &dev_attr_in_illuminance0_calibrate.attr,
+ &dev_attr_in_illuminance0_lux_table.attr,
+ NULL
+};
+
+static struct attribute *tsl2772_PRX2_device_attrs[] = {
+ &dev_attr_in_proximity0_calibrate.attr,
+ NULL
+};
+
+static struct attribute *tsl2772_ALSPRX2_device_attrs[] = {
+ &dev_attr_in_illuminance0_target_input.attr,
+ &dev_attr_in_illuminance0_calibrate.attr,
+ &dev_attr_in_illuminance0_lux_table.attr,
+ &dev_attr_in_proximity0_calibrate.attr,
+ NULL
+};
+
+static const struct attribute_group tsl2772_device_attr_group_tbl[] = {
+ [ALS] = {
+ .attrs = tsl2772_ALS_device_attrs,
+ },
+ [PRX] = {
+ .attrs = tsl2772_PRX_device_attrs,
+ },
+ [ALSPRX] = {
+ .attrs = tsl2772_ALSPRX_device_attrs,
+ },
+ [PRX2] = {
+ .attrs = tsl2772_PRX2_device_attrs,
+ },
+ [ALSPRX2] = {
+ .attrs = tsl2772_ALSPRX2_device_attrs,
+ },
+};
+
+#define TSL2772_DEVICE_INFO(type)[type] = \
+ { \
+ .attrs = &tsl2772_device_attr_group_tbl[type], \
+ .read_raw = &tsl2772_read_raw, \
+ .read_avail = &tsl2772_read_avail, \
+ .write_raw = &tsl2772_write_raw, \
+ .read_event_value = &tsl2772_read_event_value, \
+ .write_event_value = &tsl2772_write_event_value, \
+ .read_event_config = &tsl2772_read_interrupt_config, \
+ .write_event_config = &tsl2772_write_interrupt_config, \
+ }
+
+static const struct iio_info tsl2772_device_info[] = {
+ TSL2772_DEVICE_INFO(ALS),
+ TSL2772_DEVICE_INFO(PRX),
+ TSL2772_DEVICE_INFO(ALSPRX),
+ TSL2772_DEVICE_INFO(PRX2),
+ TSL2772_DEVICE_INFO(ALSPRX2),
+};
+
+static const struct iio_event_spec tsl2772_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct tsl2772_chip_info tsl2772_chip_info_tbl[] = {
+ [ALS] = {
+ .channel_with_events = {
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .event_spec = tsl2772_events,
+ .num_event_specs = ARRAY_SIZE(tsl2772_events),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 1,
+ },
+ },
+ .channel_without_events = {
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 1,
+ },
+ },
+ .chan_table_elements = 3,
+ .info = &tsl2772_device_info[ALS],
+ },
+ [PRX] = {
+ .channel_with_events = {
+ {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_spec = tsl2772_events,
+ .num_event_specs = ARRAY_SIZE(tsl2772_events),
+ },
+ },
+ .channel_without_events = {
+ {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+ },
+ .chan_table_elements = 1,
+ .info = &tsl2772_device_info[PRX],
+ },
+ [ALSPRX] = {
+ .channel_with_events = {
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .event_spec = tsl2772_events,
+ .num_event_specs = ARRAY_SIZE(tsl2772_events),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_spec = tsl2772_events,
+ .num_event_specs = ARRAY_SIZE(tsl2772_events),
+ },
+ },
+ .channel_without_events = {
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+ },
+ .chan_table_elements = 4,
+ .info = &tsl2772_device_info[ALSPRX],
+ },
+ [PRX2] = {
+ .channel_with_events = {
+ {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .event_spec = tsl2772_events,
+ .num_event_specs = ARRAY_SIZE(tsl2772_events),
+ },
+ },
+ .channel_without_events = {
+ {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ },
+ },
+ .chan_table_elements = 1,
+ .info = &tsl2772_device_info[PRX2],
+ },
+ [ALSPRX2] = {
+ .channel_with_events = {
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .event_spec = tsl2772_events,
+ .num_event_specs = ARRAY_SIZE(tsl2772_events),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .event_spec = tsl2772_events,
+ .num_event_specs = ARRAY_SIZE(tsl2772_events),
+ },
+ },
+ .channel_without_events = {
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ },
+ },
+ .chan_table_elements = 4,
+ .info = &tsl2772_device_info[ALSPRX2],
+ },
+};
+
+static int tsl2772_probe(struct i2c_client *clientp,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct tsl2772_chip *chip;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+ chip->client = clientp;
+ i2c_set_clientdata(clientp, indio_dev);
+
+ ret = i2c_smbus_read_byte_data(chip->client,
+ TSL2772_CMD_REG | TSL2772_CHIPID);
+ if (ret < 0)
+ return ret;
+
+ if (tsl2772_device_id_verif(ret, id->driver_data) <= 0) {
+ dev_info(&chip->client->dev,
+ "%s: i2c device found does not match expected id\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = i2c_smbus_write_byte(clientp, TSL2772_CMD_REG | TSL2772_CNTRL);
+ if (ret < 0) {
+ dev_err(&clientp->dev,
+ "%s: Failed to write to CMD register: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ mutex_init(&chip->als_mutex);
+ mutex_init(&chip->prox_mutex);
+
+ chip->tsl2772_chip_status = TSL2772_CHIP_UNKNOWN;
+ chip->pdata = dev_get_platdata(&clientp->dev);
+ chip->id = id->driver_data;
+ chip->chip_info =
+ &tsl2772_chip_info_tbl[device_channel_config[id->driver_data]];
+
+ indio_dev->info = chip->chip_info->info;
+ indio_dev->dev.parent = &clientp->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = chip->client->name;
+ indio_dev->num_channels = chip->chip_info->chan_table_elements;
+
+ if (clientp->irq) {
+ indio_dev->channels = chip->chip_info->channel_with_events;
+
+ ret = devm_request_threaded_irq(&clientp->dev, clientp->irq,
+ NULL,
+ &tsl2772_event_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "TSL2772_event",
+ indio_dev);
+ if (ret) {
+ dev_err(&clientp->dev,
+ "%s: irq request failed\n", __func__);
+ return ret;
+ }
+ } else {
+ indio_dev->channels = chip->chip_info->channel_without_events;
+ }
+
+ tsl2772_defaults(chip);
+ ret = tsl2772_chip_on(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ tsl2772_chip_off(indio_dev);
+ dev_err(&clientp->dev,
+ "%s: iio registration failed\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tsl2772_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ return tsl2772_chip_off(indio_dev);
+}
+
+static int tsl2772_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ return tsl2772_chip_on(indio_dev);
+}
+
+static int tsl2772_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ tsl2772_chip_off(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id tsl2772_idtable[] = {
+ { "tsl2571", tsl2571 },
+ { "tsl2671", tsl2671 },
+ { "tmd2671", tmd2671 },
+ { "tsl2771", tsl2771 },
+ { "tmd2771", tmd2771 },
+ { "tsl2572", tsl2572 },
+ { "tsl2672", tsl2672 },
+ { "tmd2672", tmd2672 },
+ { "tsl2772", tsl2772 },
+ { "tmd2772", tmd2772 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, tsl2772_idtable);
+
+static const struct of_device_id tsl2772_of_match[] = {
+ { .compatible = "amstaos,tsl2571" },
+ { .compatible = "amstaos,tsl2671" },
+ { .compatible = "amstaos,tmd2671" },
+ { .compatible = "amstaos,tsl2771" },
+ { .compatible = "amstaos,tmd2771" },
+ { .compatible = "amstaos,tsl2572" },
+ { .compatible = "amstaos,tsl2672" },
+ { .compatible = "amstaos,tmd2672" },
+ { .compatible = "amstaos,tsl2772" },
+ { .compatible = "amstaos,tmd2772" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tsl2772_of_match);
+
+static const struct dev_pm_ops tsl2772_pm_ops = {
+ .suspend = tsl2772_suspend,
+ .resume = tsl2772_resume,
+};
+
+static struct i2c_driver tsl2772_driver = {
+ .driver = {
+ .name = "tsl2772",
+ .of_match_table = tsl2772_of_match,
+ .pm = &tsl2772_pm_ops,
+ },
+ .id_table = tsl2772_idtable,
+ .probe = tsl2772_probe,
+ .remove = tsl2772_remove,
+};
+
+module_i2c_driver(tsl2772_driver);
+
+MODULE_AUTHOR("J. August Brenner <Jon.Brenner@ams.com>");
+MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
+MODULE_DESCRIPTION("TAOS tsl2772 ambient and proximity light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index b34ace76d31b..f063355480ba 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -26,6 +26,7 @@
#define MAG3110_OUT_Y 0x03
#define MAG3110_OUT_Z 0x05
#define MAG3110_WHO_AM_I 0x07
+#define MAG3110_SYSMOD 0x08
#define MAG3110_OFF_X 0x09 /* MSB first */
#define MAG3110_OFF_Y 0x0b
#define MAG3110_OFF_Z 0x0d
@@ -39,6 +40,8 @@
#define MAG3110_CTRL_DR_SHIFT 5
#define MAG3110_CTRL_DR_DEFAULT 0
+#define MAG3110_SYSMOD_MODE_MASK GENMASK(1, 0)
+
#define MAG3110_CTRL_TM BIT(1) /* trigger single measurement */
#define MAG3110_CTRL_AC BIT(0) /* continuous measurements */
@@ -52,17 +55,20 @@ struct mag3110_data {
struct i2c_client *client;
struct mutex lock;
u8 ctrl_reg1;
+ int sleep_val;
};
static int mag3110_request(struct mag3110_data *data)
{
int ret, tries = 150;
- /* trigger measurement */
- ret = i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
- data->ctrl_reg1 | MAG3110_CTRL_TM);
- if (ret < 0)
- return ret;
+ if ((data->ctrl_reg1 & MAG3110_CTRL_AC) == 0) {
+ /* trigger measurement */
+ ret = i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
+ data->ctrl_reg1 | MAG3110_CTRL_TM);
+ if (ret < 0)
+ return ret;
+ }
while (tries-- > 0) {
ret = i2c_smbus_read_byte_data(data->client, MAG3110_STATUS);
@@ -71,7 +77,11 @@ static int mag3110_request(struct mag3110_data *data)
/* wait for data ready */
if ((ret & MAG3110_STATUS_DRDY) == MAG3110_STATUS_DRDY)
break;
- msleep(20);
+
+ if (data->sleep_val <= 20)
+ usleep_range(data->sleep_val * 250, data->sleep_val * 500);
+ else
+ msleep(20);
}
if (tries < 0) {
@@ -144,6 +154,117 @@ static int mag3110_get_samp_freq_index(struct mag3110_data *data,
val2);
}
+static int mag3110_calculate_sleep(struct mag3110_data *data)
+{
+ int ret, i = data->ctrl_reg1 >> MAG3110_CTRL_DR_SHIFT;
+
+ if (mag3110_samp_freq[i][0] > 0)
+ ret = 1000 / mag3110_samp_freq[i][0];
+ else
+ ret = 1000;
+
+ return ret == 0 ? 1 : ret;
+}
+
+static int mag3110_standby(struct mag3110_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
+ data->ctrl_reg1 & ~MAG3110_CTRL_AC);
+}
+
+static int mag3110_wait_standby(struct mag3110_data *data)
+{
+ int ret, tries = 30;
+
+ /*
+ * Takes up to 1/ODR to come out of active mode into stby
+ * Longest expected period is 12.5seconds.
+ * We'll sleep for 500ms between checks
+ */
+ while (tries-- > 0) {
+ ret = i2c_smbus_read_byte_data(data->client, MAG3110_SYSMOD);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c error\n");
+ return ret;
+ }
+ /* wait for standby */
+ if ((ret & MAG3110_SYSMOD_MODE_MASK) == 0)
+ break;
+
+ msleep_interruptible(500);
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev, "device not entering standby mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mag3110_active(struct mag3110_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
+ data->ctrl_reg1);
+}
+
+/* returns >0 if active, 0 if in standby and <0 on error */
+static int mag3110_is_active(struct mag3110_data *data)
+{
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(data->client, MAG3110_CTRL_REG1);
+ if (reg < 0)
+ return reg;
+
+ return reg & MAG3110_CTRL_AC;
+}
+
+static int mag3110_change_config(struct mag3110_data *data, u8 reg, u8 val)
+{
+ int ret;
+ int is_active;
+
+ mutex_lock(&data->lock);
+
+ is_active = mag3110_is_active(data);
+ if (is_active < 0) {
+ ret = is_active;
+ goto fail;
+ }
+
+ /* config can only be changed when in standby */
+ if (is_active > 0) {
+ ret = mag3110_standby(data);
+ if (ret < 0)
+ goto fail;
+ }
+
+ /*
+ * After coming out of active we must wait for the part
+ * to transition to STBY. This can take up to 1 /ODR to occur
+ */
+ ret = mag3110_wait_standby(data);
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_smbus_write_byte_data(data->client, reg, val);
+ if (ret < 0)
+ goto fail;
+
+ if (is_active > 0) {
+ ret = mag3110_active(data);
+ if (ret < 0)
+ goto fail;
+ }
+
+ ret = 0;
+fail:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
static int mag3110_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -235,11 +356,15 @@ static int mag3110_write_raw(struct iio_dev *indio_dev,
ret = -EINVAL;
break;
}
-
- data->ctrl_reg1 &= ~MAG3110_CTRL_DR_MASK;
+ data->ctrl_reg1 &= 0xff & ~MAG3110_CTRL_DR_MASK
+ & ~MAG3110_CTRL_AC;
data->ctrl_reg1 |= rate << MAG3110_CTRL_DR_SHIFT;
- ret = i2c_smbus_write_byte_data(data->client,
- MAG3110_CTRL_REG1, data->ctrl_reg1);
+ data->sleep_val = mag3110_calculate_sleep(data);
+ if (data->sleep_val < 40)
+ data->ctrl_reg1 |= MAG3110_CTRL_AC;
+
+ ret = mag3110_change_config(data, MAG3110_CTRL_REG1,
+ data->ctrl_reg1);
break;
case IIO_CHAN_INFO_CALIBBIAS:
if (val < -10000 || val > 10000) {
@@ -337,12 +462,6 @@ static const struct iio_info mag3110_info = {
static const unsigned long mag3110_scan_masks[] = {0x7, 0xf, 0};
-static int mag3110_standby(struct mag3110_data *data)
-{
- return i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
- data->ctrl_reg1 & ~MAG3110_CTRL_AC);
-}
-
static int mag3110_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -374,8 +493,11 @@ static int mag3110_probe(struct i2c_client *client,
indio_dev->available_scan_masks = mag3110_scan_masks;
data->ctrl_reg1 = MAG3110_CTRL_DR_DEFAULT << MAG3110_CTRL_DR_SHIFT;
- ret = i2c_smbus_write_byte_data(client, MAG3110_CTRL_REG1,
- data->ctrl_reg1);
+ data->sleep_val = mag3110_calculate_sleep(data);
+ if (data->sleep_val < 40)
+ data->ctrl_reg1 |= MAG3110_CTRL_AC;
+
+ ret = mag3110_change_config(data, MAG3110_CTRL_REG1, data->ctrl_reg1);
if (ret < 0)
return ret;
diff --git a/drivers/iio/potentiometer/mcp4018.c b/drivers/iio/potentiometer/mcp4018.c
index 601b25d1f387..320a7c929777 100644
--- a/drivers/iio/potentiometer/mcp4018.c
+++ b/drivers/iio/potentiometer/mcp4018.c
@@ -99,6 +99,23 @@ static const struct iio_info mcp4018_info = {
.write_raw = mcp4018_write_raw,
};
+static const struct i2c_device_id mcp4018_id[] = {
+ { "mcp4017-502", MCP4018_502 },
+ { "mcp4017-103", MCP4018_103 },
+ { "mcp4017-503", MCP4018_503 },
+ { "mcp4017-104", MCP4018_104 },
+ { "mcp4018-502", MCP4018_502 },
+ { "mcp4018-103", MCP4018_103 },
+ { "mcp4018-503", MCP4018_503 },
+ { "mcp4018-104", MCP4018_104 },
+ { "mcp4019-502", MCP4018_502 },
+ { "mcp4019-103", MCP4018_103 },
+ { "mcp4019-503", MCP4018_503 },
+ { "mcp4019-104", MCP4018_104 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mcp4018_id);
+
#ifdef CONFIG_OF
#define MCP4018_COMPATIBLE(of_compatible, cfg) { \
@@ -125,8 +142,7 @@ MODULE_DEVICE_TABLE(of, mcp4018_of_match);
#endif
-static int mcp4018_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mcp4018_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct mcp4018_data *data;
@@ -150,7 +166,7 @@ static int mcp4018_probe(struct i2c_client *client,
if (match)
data->cfg = of_device_get_match_data(dev);
else
- data->cfg = &mcp4018_cfg[id->driver_data];
+ data->cfg = &mcp4018_cfg[i2c_match_id(mcp4018_id, client)->driver_data];
indio_dev->dev.parent = dev;
indio_dev->info = &mcp4018_info;
@@ -161,29 +177,12 @@ static int mcp4018_probe(struct i2c_client *client,
return devm_iio_device_register(dev, indio_dev);
}
-static const struct i2c_device_id mcp4018_id[] = {
- { "mcp4017-502", MCP4018_502 },
- { "mcp4017-103", MCP4018_103 },
- { "mcp4017-503", MCP4018_503 },
- { "mcp4017-104", MCP4018_104 },
- { "mcp4018-502", MCP4018_502 },
- { "mcp4018-103", MCP4018_103 },
- { "mcp4018-503", MCP4018_503 },
- { "mcp4018-104", MCP4018_104 },
- { "mcp4019-502", MCP4018_502 },
- { "mcp4019-103", MCP4018_103 },
- { "mcp4019-503", MCP4018_503 },
- { "mcp4019-104", MCP4018_104 },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, mcp4018_id);
-
static struct i2c_driver mcp4018_driver = {
.driver = {
.name = "mcp4018",
.of_match_table = of_match_ptr(mcp4018_of_match),
},
- .probe = mcp4018_probe,
+ .probe_new = mcp4018_probe,
.id_table = mcp4018_id,
};
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 114ab876fcc6..df894af6cccb 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -209,6 +209,75 @@ static const struct iio_info mcp4531_info = {
.write_raw = mcp4531_write_raw,
};
+static const struct i2c_device_id mcp4531_id[] = {
+ { "mcp4531-502", MCP453x_502 },
+ { "mcp4531-103", MCP453x_103 },
+ { "mcp4531-503", MCP453x_503 },
+ { "mcp4531-104", MCP453x_104 },
+ { "mcp4532-502", MCP453x_502 },
+ { "mcp4532-103", MCP453x_103 },
+ { "mcp4532-503", MCP453x_503 },
+ { "mcp4532-104", MCP453x_104 },
+ { "mcp4541-502", MCP454x_502 },
+ { "mcp4541-103", MCP454x_103 },
+ { "mcp4541-503", MCP454x_503 },
+ { "mcp4541-104", MCP454x_104 },
+ { "mcp4542-502", MCP454x_502 },
+ { "mcp4542-103", MCP454x_103 },
+ { "mcp4542-503", MCP454x_503 },
+ { "mcp4542-104", MCP454x_104 },
+ { "mcp4551-502", MCP455x_502 },
+ { "mcp4551-103", MCP455x_103 },
+ { "mcp4551-503", MCP455x_503 },
+ { "mcp4551-104", MCP455x_104 },
+ { "mcp4552-502", MCP455x_502 },
+ { "mcp4552-103", MCP455x_103 },
+ { "mcp4552-503", MCP455x_503 },
+ { "mcp4552-104", MCP455x_104 },
+ { "mcp4561-502", MCP456x_502 },
+ { "mcp4561-103", MCP456x_103 },
+ { "mcp4561-503", MCP456x_503 },
+ { "mcp4561-104", MCP456x_104 },
+ { "mcp4562-502", MCP456x_502 },
+ { "mcp4562-103", MCP456x_103 },
+ { "mcp4562-503", MCP456x_503 },
+ { "mcp4562-104", MCP456x_104 },
+ { "mcp4631-502", MCP463x_502 },
+ { "mcp4631-103", MCP463x_103 },
+ { "mcp4631-503", MCP463x_503 },
+ { "mcp4631-104", MCP463x_104 },
+ { "mcp4632-502", MCP463x_502 },
+ { "mcp4632-103", MCP463x_103 },
+ { "mcp4632-503", MCP463x_503 },
+ { "mcp4632-104", MCP463x_104 },
+ { "mcp4641-502", MCP464x_502 },
+ { "mcp4641-103", MCP464x_103 },
+ { "mcp4641-503", MCP464x_503 },
+ { "mcp4641-104", MCP464x_104 },
+ { "mcp4642-502", MCP464x_502 },
+ { "mcp4642-103", MCP464x_103 },
+ { "mcp4642-503", MCP464x_503 },
+ { "mcp4642-104", MCP464x_104 },
+ { "mcp4651-502", MCP465x_502 },
+ { "mcp4651-103", MCP465x_103 },
+ { "mcp4651-503", MCP465x_503 },
+ { "mcp4651-104", MCP465x_104 },
+ { "mcp4652-502", MCP465x_502 },
+ { "mcp4652-103", MCP465x_103 },
+ { "mcp4652-503", MCP465x_503 },
+ { "mcp4652-104", MCP465x_104 },
+ { "mcp4661-502", MCP466x_502 },
+ { "mcp4661-103", MCP466x_103 },
+ { "mcp4661-503", MCP466x_503 },
+ { "mcp4661-104", MCP466x_104 },
+ { "mcp4662-502", MCP466x_502 },
+ { "mcp4662-103", MCP466x_103 },
+ { "mcp4662-503", MCP466x_503 },
+ { "mcp4662-104", MCP466x_104 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mcp4531_id);
+
#ifdef CONFIG_OF
#define MCP4531_COMPATIBLE(of_compatible, cfg) { \
@@ -286,8 +355,7 @@ static const struct of_device_id mcp4531_of_match[] = {
MODULE_DEVICE_TABLE(of, mcp4531_of_match);
#endif
-static int mcp4531_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mcp4531_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct mcp4531_data *data;
@@ -311,7 +379,7 @@ static int mcp4531_probe(struct i2c_client *client,
if (match)
data->cfg = of_device_get_match_data(dev);
else
- data->cfg = &mcp4531_cfg[id->driver_data];
+ data->cfg = &mcp4531_cfg[i2c_match_id(mcp4531_id, client)->driver_data];
indio_dev->dev.parent = dev;
indio_dev->info = &mcp4531_info;
@@ -322,81 +390,12 @@ static int mcp4531_probe(struct i2c_client *client,
return devm_iio_device_register(dev, indio_dev);
}
-static const struct i2c_device_id mcp4531_id[] = {
- { "mcp4531-502", MCP453x_502 },
- { "mcp4531-103", MCP453x_103 },
- { "mcp4531-503", MCP453x_503 },
- { "mcp4531-104", MCP453x_104 },
- { "mcp4532-502", MCP453x_502 },
- { "mcp4532-103", MCP453x_103 },
- { "mcp4532-503", MCP453x_503 },
- { "mcp4532-104", MCP453x_104 },
- { "mcp4541-502", MCP454x_502 },
- { "mcp4541-103", MCP454x_103 },
- { "mcp4541-503", MCP454x_503 },
- { "mcp4541-104", MCP454x_104 },
- { "mcp4542-502", MCP454x_502 },
- { "mcp4542-103", MCP454x_103 },
- { "mcp4542-503", MCP454x_503 },
- { "mcp4542-104", MCP454x_104 },
- { "mcp4551-502", MCP455x_502 },
- { "mcp4551-103", MCP455x_103 },
- { "mcp4551-503", MCP455x_503 },
- { "mcp4551-104", MCP455x_104 },
- { "mcp4552-502", MCP455x_502 },
- { "mcp4552-103", MCP455x_103 },
- { "mcp4552-503", MCP455x_503 },
- { "mcp4552-104", MCP455x_104 },
- { "mcp4561-502", MCP456x_502 },
- { "mcp4561-103", MCP456x_103 },
- { "mcp4561-503", MCP456x_503 },
- { "mcp4561-104", MCP456x_104 },
- { "mcp4562-502", MCP456x_502 },
- { "mcp4562-103", MCP456x_103 },
- { "mcp4562-503", MCP456x_503 },
- { "mcp4562-104", MCP456x_104 },
- { "mcp4631-502", MCP463x_502 },
- { "mcp4631-103", MCP463x_103 },
- { "mcp4631-503", MCP463x_503 },
- { "mcp4631-104", MCP463x_104 },
- { "mcp4632-502", MCP463x_502 },
- { "mcp4632-103", MCP463x_103 },
- { "mcp4632-503", MCP463x_503 },
- { "mcp4632-104", MCP463x_104 },
- { "mcp4641-502", MCP464x_502 },
- { "mcp4641-103", MCP464x_103 },
- { "mcp4641-503", MCP464x_503 },
- { "mcp4641-104", MCP464x_104 },
- { "mcp4642-502", MCP464x_502 },
- { "mcp4642-103", MCP464x_103 },
- { "mcp4642-503", MCP464x_503 },
- { "mcp4642-104", MCP464x_104 },
- { "mcp4651-502", MCP465x_502 },
- { "mcp4651-103", MCP465x_103 },
- { "mcp4651-503", MCP465x_503 },
- { "mcp4651-104", MCP465x_104 },
- { "mcp4652-502", MCP465x_502 },
- { "mcp4652-103", MCP465x_103 },
- { "mcp4652-503", MCP465x_503 },
- { "mcp4652-104", MCP465x_104 },
- { "mcp4661-502", MCP466x_502 },
- { "mcp4661-103", MCP466x_103 },
- { "mcp4661-503", MCP466x_503 },
- { "mcp4661-104", MCP466x_104 },
- { "mcp4662-502", MCP466x_502 },
- { "mcp4662-103", MCP466x_103 },
- { "mcp4662-503", MCP466x_503 },
- { "mcp4662-104", MCP466x_104 },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, mcp4531_id);
-
static struct i2c_driver mcp4531_driver = {
.driver = {
.name = "mcp4531",
.of_match_table = of_match_ptr(mcp4531_of_match),
},
- .probe = mcp4531_probe,
+ .probe_new = mcp4531_probe,
.id_table = mcp4531_id,
};
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index 85714055cc74..90e895adf997 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -411,12 +411,14 @@ static int lmp91000_remove(struct i2c_client *client)
static const struct of_device_id lmp91000_of_match[] = {
{ .compatible = "ti,lmp91000", },
+ { .compatible = "ti,lmp91002", },
{ },
};
MODULE_DEVICE_TABLE(of, lmp91000_of_match);
static const struct i2c_device_id lmp91000_id[] = {
{ "lmp91000", 0 },
+ { "lmp91002", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, lmp91000_id);
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 4599fde4dd25..87c07af9181f 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/common/cros_ec_sensors_core.h>
#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger.h>
@@ -28,8 +29,6 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
-#include "../common/cros_ec_sensors/cros_ec_sensors_core.h"
-
/*
* One channel for pressure, the other for timestamp.
*/
diff --git a/drivers/iio/resolver/Kconfig b/drivers/iio/resolver/Kconfig
new file mode 100644
index 000000000000..2ced9f22aa70
--- /dev/null
+++ b/drivers/iio/resolver/Kconfig
@@ -0,0 +1,17 @@
+#
+# Resolver/Synchro drivers
+#
+menu "Resolver to digital converters"
+
+config AD2S1200
+ tristate "Analog Devices ad2s1200/ad2s1205 driver"
+ depends on SPI
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Say yes here to build support for Analog Devices spi resolver
+ to digital converters, ad2s1200 and ad2s1205, provides direct access
+ via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad2s1200.
+endmenu
diff --git a/drivers/iio/resolver/Makefile b/drivers/iio/resolver/Makefile
new file mode 100644
index 000000000000..4e1dccae07e7
--- /dev/null
+++ b/drivers/iio/resolver/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Resolver/Synchro drivers
+#
+
+obj-$(CONFIG_AD2S1200) += ad2s1200.o
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/iio/resolver/ad2s1200.c
index aa62c64e9bc4..28e618af9939 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/iio/resolver/ad2s1200.c
@@ -2,43 +2,49 @@
* ad2s1200.c simple support for the ADI Resolver to Digital Converters:
* AD2S1200/1205
*
+ * Copyright (c) 2018-2018 David Veenstra <davidjulianveenstra@gmail.com>
* Copyright (c) 2010-2010 Analog Devices Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
+
+#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#define DRV_NAME "ad2s1200"
-/* input pin sample and rdvel is controlled by driver */
-#define AD2S1200_PN 2
-
/* input clock on serial interface */
#define AD2S1200_HZ 8192000
/* clock period in nano second */
#define AD2S1200_TSCLK (1000000000 / AD2S1200_HZ)
+/**
+ * struct ad2s1200_state - driver instance specific data.
+ * @lock: protects both the GPIO pins and the rx buffer.
+ * @sdev: spi device.
+ * @sample: GPIO pin SAMPLE.
+ * @rdvel: GPIO pin RDVEL.
+ * @rx: buffer for spi transfers.
+ */
struct ad2s1200_state {
struct mutex lock;
struct spi_device *sdev;
- int sample;
- int rdvel;
- u8 rx[2] ____cacheline_aligned;
+ struct gpio_desc *sample;
+ struct gpio_desc *rdvel;
+ __be16 rx ____cacheline_aligned;
};
static int ad2s1200_read_raw(struct iio_dev *indio_dev,
@@ -47,39 +53,63 @@ static int ad2s1200_read_raw(struct iio_dev *indio_dev,
int *val2,
long m)
{
- int ret = 0;
- s16 vel;
struct ad2s1200_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL:
+ /* 2 * Pi / (2^12 - 1) ~= 0.001534355 */
+ *val = 0;
+ *val2 = 1534355;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_ANGL_VEL:
+ /* 2 * Pi ~= 6.283185 */
+ *val = 6;
+ *val2 = 283185;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ gpiod_set_value(st->sample, 0);
+
+ /* delay (6 * AD2S1200_TSCLK + 20) nano seconds */
+ udelay(1);
+ gpiod_set_value(st->sample, 1);
+ gpiod_set_value(st->rdvel, !!(chan->type == IIO_ANGL));
+
+ ret = spi_read(st->sdev, &st->rx, 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
- mutex_lock(&st->lock);
- gpio_set_value(st->sample, 0);
- /* delay (6 * AD2S1200_TSCLK + 20) nano seconds */
- udelay(1);
- gpio_set_value(st->sample, 1);
- gpio_set_value(st->rdvel, !!(chan->type == IIO_ANGL));
- ret = spi_read(st->sdev, st->rx, 2);
- if (ret < 0) {
+ switch (chan->type) {
+ case IIO_ANGL:
+ *val = be16_to_cpup(&st->rx) >> 4;
+ break;
+ case IIO_ANGL_VEL:
+ *val = sign_extend32(be16_to_cpup(&st->rx) >> 4, 11);
+ break;
+ default:
+ mutex_unlock(&st->lock);
+ return -EINVAL;
+ }
+
+ /* delay (2 * AD2S1200_TSCLK + 20) ns for sample pulse */
+ udelay(1);
mutex_unlock(&st->lock);
- return ret;
- }
- switch (chan->type) {
- case IIO_ANGL:
- *val = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
- break;
- case IIO_ANGL_VEL:
- vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
- vel = sign_extend32(vel, 11);
- *val = vel;
- break;
+ return IIO_VAL_INT;
default:
- mutex_unlock(&st->lock);
- return -EINVAL;
+ break;
}
- /* delay (2 * AD2S1200_TSCLK + 20) ns for sample pulse */
- udelay(1);
- mutex_unlock(&st->lock);
- return IIO_VAL_INT;
+
+ return -EINVAL;
}
static const struct iio_chan_spec ad2s1200_channels[] = {
@@ -88,11 +118,13 @@ static const struct iio_chan_spec ad2s1200_channels[] = {
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
}, {
.type = IIO_ANGL_VEL,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
}
};
@@ -104,27 +136,30 @@ static int ad2s1200_probe(struct spi_device *spi)
{
struct ad2s1200_state *st;
struct iio_dev *indio_dev;
- int pn, ret = 0;
- unsigned short *pins = spi->dev.platform_data;
-
- for (pn = 0; pn < AD2S1200_PN; pn++) {
- ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT,
- DRV_NAME);
- if (ret) {
- dev_err(&spi->dev, "request gpio pin %d failed\n",
- pins[pn]);
- return ret;
- }
- }
+ int ret;
+
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
+
spi_set_drvdata(spi, indio_dev);
st = iio_priv(indio_dev);
mutex_init(&st->lock);
st->sdev = spi;
- st->sample = pins[0];
- st->rdvel = pins[1];
+
+ st->sample = devm_gpiod_get(&spi->dev, "adi,sample", GPIOD_OUT_LOW);
+ if (IS_ERR(st->sample)) {
+ dev_err(&spi->dev, "Failed to claim SAMPLE gpio: err=%ld\n",
+ PTR_ERR(st->sample));
+ return PTR_ERR(st->sample);
+ }
+
+ st->rdvel = devm_gpiod_get(&spi->dev, "adi,rdvel", GPIOD_OUT_LOW);
+ if (IS_ERR(st->rdvel)) {
+ dev_err(&spi->dev, "Failed to claim RDVEL gpio: err=%ld\n",
+ PTR_ERR(st->rdvel));
+ return PTR_ERR(st->rdvel);
+ }
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &ad2s1200_info;
@@ -133,17 +168,25 @@ static int ad2s1200_probe(struct spi_device *spi)
indio_dev->num_channels = ARRAY_SIZE(ad2s1200_channels);
indio_dev->name = spi_get_device_id(spi)->name;
- ret = devm_iio_device_register(&spi->dev, indio_dev);
- if (ret)
- return ret;
-
spi->max_speed_hz = AD2S1200_HZ;
spi->mode = SPI_MODE_3;
- spi_setup(spi);
+ ret = spi_setup(spi);
- return 0;
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi_setup failed!\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
+static const struct of_device_id ad2s1200_of_match[] = {
+ { .compatible = "adi,ad2s1200", },
+ { .compatible = "adi,ad2s1205", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad2s1200_of_match);
+
static const struct spi_device_id ad2s1200_id[] = {
{ "ad2s1200" },
{ "ad2s1205" },
@@ -154,12 +197,14 @@ MODULE_DEVICE_TABLE(spi, ad2s1200_id);
static struct spi_driver ad2s1200_driver = {
.driver = {
.name = DRV_NAME,
+ .of_match_table = of_match_ptr(ad2s1200_of_match),
},
.probe = ad2s1200_probe,
.id_table = ad2s1200_id,
};
module_spi_driver(ad2s1200_driver);
+MODULE_AUTHOR("David Veenstra <davidjulianveenstra@gmail.com>");
MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 2a972ed6851b..b03af54367c0 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -35,6 +35,17 @@ config INFINIBAND_USER_ACCESS
libibverbs, libibcm and a hardware driver library from
rdma-core <https://github.com/linux-rdma/rdma-core>.
+config INFINIBAND_USER_ACCESS_UCM
+ bool "Userspace CM (UCM, DEPRECATED)"
+ depends on BROKEN
+ depends on INFINIBAND_USER_ACCESS
+ help
+ The UCM module has known security flaws, which no one is
+ interested to fix. The user-space part of this code was
+ dropped from the upstream a long time ago.
+
+ This option is DEPRECATED and planned to be removed.
+
config INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI
bool "Allow experimental legacy verbs in new ioctl uAPI (EXPERIMENTAL)"
depends on INFINIBAND_USER_ACCESS
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index dda9e856e3fa..61667705d746 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -5,15 +5,16 @@ user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_cm.o iw_cm.o \
$(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
-obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
- $(user_access-y)
+obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
+obj-$(CONFIG_INFINIBAND_USER_ACCESS_UCM) += ib_ucm.o $(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- security.o nldev.o restrack.o
+ nldev.o restrack.o
+ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
@@ -36,4 +37,4 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
rdma_core.o uverbs_std_types.o uverbs_ioctl.o \
uverbs_ioctl_merge.o uverbs_std_types_cq.o \
uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
- uverbs_std_types_mr.o
+ uverbs_std_types_mr.o uverbs_std_types_counters.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 88a7542d8c7b..4f32c4062fb6 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -56,7 +56,6 @@ struct addr_req {
struct sockaddr_storage src_addr;
struct sockaddr_storage dst_addr;
struct rdma_dev_addr *addr;
- struct rdma_addr_client *client;
void *context;
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context);
@@ -68,11 +67,8 @@ struct addr_req {
static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0);
-static void process_req(struct work_struct *work);
-
-static DEFINE_MUTEX(lock);
+static DEFINE_SPINLOCK(lock);
static LIST_HEAD(req_list);
-static DECLARE_DELAYED_WORK(work, process_req);
static struct workqueue_struct *addr_wq;
static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
@@ -112,7 +108,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
memcpy(&gid, nla_data(curr), nla_len(curr));
}
- mutex_lock(&lock);
+ spin_lock_bh(&lock);
list_for_each_entry(req, &req_list, list) {
if (nlh->nlmsg_seq != req->seq)
continue;
@@ -122,7 +118,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
found = 1;
break;
}
- mutex_unlock(&lock);
+ spin_unlock_bh(&lock);
if (!found)
pr_info("Couldn't find request waiting for DGID: %pI6\n",
@@ -223,28 +219,6 @@ int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
}
EXPORT_SYMBOL(rdma_addr_size_kss);
-static struct rdma_addr_client self;
-
-void rdma_addr_register_client(struct rdma_addr_client *client)
-{
- atomic_set(&client->refcount, 1);
- init_completion(&client->comp);
-}
-EXPORT_SYMBOL(rdma_addr_register_client);
-
-static inline void put_client(struct rdma_addr_client *client)
-{
- if (atomic_dec_and_test(&client->refcount))
- complete(&client->comp);
-}
-
-void rdma_addr_unregister_client(struct rdma_addr_client *client)
-{
- put_client(client);
- wait_for_completion(&client->comp);
-}
-EXPORT_SYMBOL(rdma_addr_unregister_client);
-
void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
const struct net_device *dev,
const unsigned char *dst_dev_addr)
@@ -302,7 +276,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
}
EXPORT_SYMBOL(rdma_translate_ip);
-static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
+static void set_timeout(struct addr_req *req, unsigned long time)
{
unsigned long delay;
@@ -310,23 +284,15 @@ static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
if ((long)delay < 0)
delay = 0;
- mod_delayed_work(addr_wq, delayed_work, delay);
+ mod_delayed_work(addr_wq, &req->work, delay);
}
static void queue_req(struct addr_req *req)
{
- struct addr_req *temp_req;
-
- mutex_lock(&lock);
- list_for_each_entry_reverse(temp_req, &req_list, list) {
- if (time_after_eq(req->timeout, temp_req->timeout))
- break;
- }
-
- list_add(&req->list, &temp_req->list);
-
- set_timeout(&req->work, req->timeout);
- mutex_unlock(&lock);
+ spin_lock_bh(&lock);
+ list_add_tail(&req->list, &req_list);
+ set_timeout(req, req->timeout);
+ spin_unlock_bh(&lock);
}
static int ib_nl_fetch_ha(const struct dst_entry *dst,
@@ -584,7 +550,6 @@ static void process_one_req(struct work_struct *_work)
struct addr_req *req;
struct sockaddr *src_in, *dst_in;
- mutex_lock(&lock);
req = container_of(_work, struct addr_req, work.work);
if (req->status == -ENODATA) {
@@ -596,72 +561,33 @@ static void process_one_req(struct work_struct *_work)
req->status = -ETIMEDOUT;
} else if (req->status == -ENODATA) {
/* requeue the work for retrying again */
- set_timeout(&req->work, req->timeout);
- mutex_unlock(&lock);
+ spin_lock_bh(&lock);
+ if (!list_empty(&req->list))
+ set_timeout(req, req->timeout);
+ spin_unlock_bh(&lock);
return;
}
}
- list_del(&req->list);
- mutex_unlock(&lock);
-
- /*
- * Although the work will normally have been canceled by the
- * workqueue, it can still be requeued as long as it is on the
- * req_list, so it could have been requeued before we grabbed &lock.
- * We need to cancel it after it is removed from req_list to really be
- * sure it is safe to free.
- */
- cancel_delayed_work(&req->work);
req->callback(req->status, (struct sockaddr *)&req->src_addr,
req->addr, req->context);
- put_client(req->client);
- kfree(req);
-}
-
-static void process_req(struct work_struct *work)
-{
- struct addr_req *req, *temp_req;
- struct sockaddr *src_in, *dst_in;
- struct list_head done_list;
-
- INIT_LIST_HEAD(&done_list);
-
- mutex_lock(&lock);
- list_for_each_entry_safe(req, temp_req, &req_list, list) {
- if (req->status == -ENODATA) {
- src_in = (struct sockaddr *) &req->src_addr;
- dst_in = (struct sockaddr *) &req->dst_addr;
- req->status = addr_resolve(src_in, dst_in, req->addr,
- true, req->seq);
- if (req->status && time_after_eq(jiffies, req->timeout))
- req->status = -ETIMEDOUT;
- else if (req->status == -ENODATA) {
- set_timeout(&req->work, req->timeout);
- continue;
- }
- }
- list_move_tail(&req->list, &done_list);
- }
-
- mutex_unlock(&lock);
-
- list_for_each_entry_safe(req, temp_req, &done_list, list) {
- list_del(&req->list);
- /* It is safe to cancel other work items from this work item
- * because at a time there can be only one work item running
- * with this single threaded work queue.
+ req->callback = NULL;
+
+ spin_lock_bh(&lock);
+ if (!list_empty(&req->list)) {
+ /*
+ * Although the work will normally have been canceled by the
+ * workqueue, it can still be requeued as long as it is on the
+ * req_list.
*/
cancel_delayed_work(&req->work);
- req->callback(req->status, (struct sockaddr *) &req->src_addr,
- req->addr, req->context);
- put_client(req->client);
+ list_del_init(&req->list);
kfree(req);
}
+ spin_unlock_bh(&lock);
}
-int rdma_resolve_ip(struct rdma_addr_client *client,
- struct sockaddr *src_addr, struct sockaddr *dst_addr,
+int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr,
struct rdma_dev_addr *addr, int timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
@@ -693,8 +619,6 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
req->addr = addr;
req->callback = callback;
req->context = context;
- req->client = client;
- atomic_inc(&client->refcount);
INIT_DELAYED_WORK(&req->work, process_one_req);
req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
@@ -710,7 +634,6 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
break;
default:
ret = req->status;
- atomic_dec(&client->refcount);
goto err;
}
return ret;
@@ -742,18 +665,36 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr,
void rdma_addr_cancel(struct rdma_dev_addr *addr)
{
struct addr_req *req, *temp_req;
+ struct addr_req *found = NULL;
- mutex_lock(&lock);
+ spin_lock_bh(&lock);
list_for_each_entry_safe(req, temp_req, &req_list, list) {
if (req->addr == addr) {
- req->status = -ECANCELED;
- req->timeout = jiffies;
- list_move(&req->list, &req_list);
- set_timeout(&req->work, req->timeout);
+ /*
+ * Removing from the list means we take ownership of
+ * the req
+ */
+ list_del_init(&req->list);
+ found = req;
break;
}
}
- mutex_unlock(&lock);
+ spin_unlock_bh(&lock);
+
+ if (!found)
+ return;
+
+ /*
+ * sync canceling the work after removing it from the req_list
+ * guarentees no work is running and none will be started.
+ */
+ cancel_delayed_work_sync(&found->work);
+
+ if (found->callback)
+ found->callback(-ECANCELED, (struct sockaddr *)&found->src_addr,
+ found->addr, found->context);
+
+ kfree(found);
}
EXPORT_SYMBOL(rdma_addr_cancel);
@@ -791,8 +732,8 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
dev_addr.net = &init_net;
init_completion(&ctx.comp);
- ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
- &dev_addr, 1000, resolve_cb, &ctx);
+ ret = rdma_resolve_ip(&sgid_addr._sockaddr, &dgid_addr._sockaddr,
+ &dev_addr, 1000, resolve_cb, &ctx);
if (ret)
return ret;
@@ -810,11 +751,17 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx)
{
+ struct addr_req *req;
+
if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx;
- if (neigh->nud_state & NUD_VALID)
- set_timeout(&work, jiffies);
+ if (neigh->nud_state & NUD_VALID) {
+ spin_lock_bh(&lock);
+ list_for_each_entry(req, &req_list, list)
+ set_timeout(req, jiffies);
+ spin_unlock_bh(&lock);
+ }
}
return 0;
}
@@ -830,14 +777,13 @@ int addr_init(void)
return -ENOMEM;
register_netevent_notifier(&nb);
- rdma_addr_register_client(&self);
return 0;
}
void addr_cleanup(void)
{
- rdma_addr_unregister_client(&self);
unregister_netevent_notifier(&nb);
destroy_workqueue(addr_wq);
+ WARN_ON(!list_empty(&req_list));
}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 3330d97faa1e..71a34bee453d 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -125,6 +125,16 @@ const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
}
EXPORT_SYMBOL(ib_cache_gid_type_str);
+/** rdma_is_zero_gid - Check if given GID is zero or not.
+ * @gid: GID to check
+ * Returns true if given GID is zero, returns false otherwise.
+ */
+bool rdma_is_zero_gid(const union ib_gid *gid)
+{
+ return !memcmp(gid, &zgid, sizeof(*gid));
+}
+EXPORT_SYMBOL(rdma_is_zero_gid);
+
int ib_cache_gid_parse_type_str(const char *buf)
{
unsigned int i;
@@ -149,6 +159,11 @@ int ib_cache_gid_parse_type_str(const char *buf)
}
EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
+static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
+{
+ return device->cache.ports[port - rdma_start_port(device)].gid;
+}
+
static void del_roce_gid(struct ib_device *device, u8 port_num,
struct ib_gid_table *table, int ix)
{
@@ -231,7 +246,7 @@ static int add_modify_gid(struct ib_gid_table *table,
* So ignore such behavior for IB link layer and don't
* fail the call, but don't add such entry to GID cache.
*/
- if (!memcmp(gid, &zgid, sizeof(*gid)))
+ if (rdma_is_zero_gid(gid))
return 0;
}
@@ -264,7 +279,7 @@ static void del_gid(struct ib_device *ib_dev, u8 port,
if (rdma_protocol_roce(ib_dev, port))
del_roce_gid(ib_dev, port, table, ix);
- memcpy(&table->data_vec[ix].gid, &zgid, sizeof(zgid));
+ memset(&table->data_vec[ix].gid, 0, sizeof(table->data_vec[ix].gid));
memset(&table->data_vec[ix].attr, 0, sizeof(table->data_vec[ix].attr));
table->data_vec[ix].context = NULL;
}
@@ -363,10 +378,10 @@ static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
* IB spec version 1.3 section 4.1.1 point (6) and
* section 12.7.10 and section 12.7.20
*/
- if (!memcmp(gid, &zgid, sizeof(*gid)))
+ if (rdma_is_zero_gid(gid))
return -EINVAL;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock);
@@ -433,7 +448,7 @@ _ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
int ret = 0;
int ix;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock);
@@ -472,7 +487,7 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
int ix;
bool deleted = false;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock);
@@ -496,7 +511,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
{
struct ib_gid_table *table;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
if (index < 0 || index >= table->sz)
return -EINVAL;
@@ -589,7 +604,7 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
if (!rdma_is_port_valid(ib_dev, port))
return -ENOENT;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
if (ndev)
mask |= GID_ATTR_FIND_MASK_NETDEV;
@@ -647,7 +662,7 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
!rdma_protocol_roce(ib_dev, port))
return -EPROTONOSUPPORT;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
read_lock_irqsave(&table->rwlock, flags);
for (i = 0; i < table->sz; i++) {
@@ -724,8 +739,7 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
mutex_lock(&table->lock);
for (i = 0; i < table->sz; ++i) {
- if (memcmp(&table->data_vec[i].gid, &zgid,
- sizeof(table->data_vec[i].gid))) {
+ if (!rdma_is_zero_gid(&table->data_vec[i].gid)) {
del_gid(ib_dev, port, table, i);
deleted = true;
}
@@ -747,7 +761,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
unsigned int gid_type;
unsigned long mask;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
mask = GID_ATTR_FIND_MASK_GID_TYPE |
GID_ATTR_FIND_MASK_DEFAULT |
@@ -772,8 +786,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
}
}
-static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
- struct ib_gid_table *table)
+static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
+ struct ib_gid_table *table)
{
unsigned int i;
unsigned long roce_gid_type_mask;
@@ -783,8 +797,7 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
num_default_gids = hweight_long(roce_gid_type_mask);
for (i = 0; i < num_default_gids && i < table->sz; i++) {
- struct ib_gid_table_entry *entry =
- &table->data_vec[i];
+ struct ib_gid_table_entry *entry = &table->data_vec[i];
entry->props |= GID_TABLE_ENTRY_DEFAULT;
current_gid = find_next_bit(&roce_gid_type_mask,
@@ -792,59 +805,42 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
current_gid);
entry->attr.gid_type = current_gid++;
}
+}
- return 0;
+
+static void gid_table_release_one(struct ib_device *ib_dev)
+{
+ struct ib_gid_table *table;
+ u8 port;
+
+ for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+ table = ib_dev->cache.ports[port].gid;
+ release_gid_table(table);
+ ib_dev->cache.ports[port].gid = NULL;
+ }
}
static int _gid_table_setup_one(struct ib_device *ib_dev)
{
u8 port;
struct ib_gid_table *table;
- int err = 0;
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
u8 rdma_port = port + rdma_start_port(ib_dev);
- table =
- alloc_gid_table(
+ table = alloc_gid_table(
ib_dev->port_immutable[rdma_port].gid_tbl_len);
- if (!table) {
- err = -ENOMEM;
+ if (!table)
goto rollback_table_setup;
- }
- err = gid_table_reserve_default(ib_dev,
- port + rdma_start_port(ib_dev),
- table);
- if (err)
- goto rollback_table_setup;
+ gid_table_reserve_default(ib_dev, rdma_port, table);
ib_dev->cache.ports[port].gid = table;
}
-
return 0;
rollback_table_setup:
- for (port = 0; port < ib_dev->phys_port_cnt; port++) {
- table = ib_dev->cache.ports[port].gid;
-
- cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
- table);
- release_gid_table(table);
- }
-
- return err;
-}
-
-static void gid_table_release_one(struct ib_device *ib_dev)
-{
- struct ib_gid_table *table;
- u8 port;
-
- for (port = 0; port < ib_dev->phys_port_cnt; port++) {
- table = ib_dev->cache.ports[port].gid;
- release_gid_table(table);
- ib_dev->cache.ports[port].gid = NULL;
- }
+ gid_table_release_one(ib_dev);
+ return -ENOMEM;
}
static void gid_table_cleanup_one(struct ib_device *ib_dev)
@@ -886,7 +882,7 @@ int ib_get_cached_gid(struct ib_device *device,
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- table = device->cache.ports[port_num - rdma_start_port(device)].gid;
+ table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags);
res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
read_unlock_irqrestore(&table->rwlock, flags);
@@ -1104,7 +1100,7 @@ static int config_non_roce_gid_cache(struct ib_device *device,
gid_attr.device = device;
gid_attr.port_num = port;
- table = device->cache.ports[port - rdma_start_port(device)].gid;
+ table = rdma_gid_table(device, port);
mutex_lock(&table->lock);
for (i = 0; i < gid_tbl_len; ++i) {
@@ -1137,7 +1133,7 @@ static void ib_cache_update(struct ib_device *device,
if (!rdma_is_port_valid(device, port))
return;
- table = device->cache.ports[port - rdma_start_port(device)].gid;
+ table = rdma_gid_table(device, port);
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
if (!tprops)
@@ -1300,13 +1296,3 @@ void ib_cache_cleanup_one(struct ib_device *device)
flush_workqueue(ib_wq);
gid_table_cleanup_one(device);
}
-
-void __init ib_cache_setup(void)
-{
- roce_gid_mgmt_init();
-}
-
-void __exit ib_cache_cleanup(void)
-{
- roce_gid_mgmt_cleanup();
-}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 36a4d90a7b47..27a7b0a2e27a 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -452,6 +452,32 @@ static void cm_set_private_data(struct cm_id_private *cm_id_priv,
cm_id_priv->private_data_len = private_data_len;
}
+static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
+ struct ib_grh *grh, struct cm_av *av)
+{
+ struct rdma_ah_attr new_ah_attr;
+ int ret;
+
+ av->port = port;
+ av->pkey_index = wc->pkey_index;
+
+ /*
+ * av->ah_attr might be initialized based on past wc during incoming
+ * connect request or while sending out connect request. So initialize
+ * a new ah_attr on stack. If initialization fails, old ah_attr is
+ * used for sending any responses. If initialization is successful,
+ * than new ah_attr is used by overwriting old one.
+ */
+ ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
+ port->port_num, wc,
+ grh, &new_ah_attr);
+ if (ret)
+ return ret;
+
+ memcpy(&av->ah_attr, &new_ah_attr, sizeof(new_ah_attr));
+ return 0;
+}
+
static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
struct ib_grh *grh, struct cm_av *av)
{
@@ -509,6 +535,7 @@ static struct cm_port *get_cm_port_from_path(struct sa_path_rec *path)
static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
struct cm_id_private *cm_id_priv)
{
+ struct rdma_ah_attr new_ah_attr;
struct cm_device *cm_dev;
struct cm_port *port;
int ret;
@@ -524,15 +551,26 @@ static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
return ret;
av->port = port;
+
+ /*
+ * av->ah_attr might be initialized based on wc or during
+ * request processing time. So initialize a new ah_attr on stack.
+ * If initialization fails, old ah_attr is used for sending any
+ * responses. If initialization is successful, than new ah_attr
+ * is used by overwriting the old one.
+ */
ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
- &av->ah_attr);
+ &new_ah_attr);
if (ret)
return ret;
av->timeout = path->packet_life_time + 1;
ret = add_cm_id_to_port_list(cm_id_priv, av, port);
- return ret;
+ if (ret)
+ return ret;
+ memcpy(&av->ah_attr, &new_ah_attr, sizeof(new_ah_attr));
+ return 0;
}
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
@@ -1669,7 +1707,9 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
spin_lock_irq(&cm_id_priv->lock);
work = cm_dequeue_work(cm_id_priv);
spin_unlock_irq(&cm_id_priv->lock);
- BUG_ON(!work);
+ if (!work)
+ return;
+
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
&work->cm_event);
cm_free_work(work);
@@ -3189,12 +3229,6 @@ static int cm_lap_handler(struct cm_work *work)
if (!cm_id_priv)
return -EINVAL;
- ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
- if (ret)
- goto deref;
-
param = &work->cm_event.param.lap_rcvd;
memset(&work->path[0], 0, sizeof(work->path[1]));
cm_path_set_rec_type(work->port->cm_dev->ib_device,
@@ -3239,10 +3273,16 @@ static int cm_lap_handler(struct cm_work *work)
goto unlock;
}
- cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
- cm_id_priv->tid = lap_msg->hdr.tid;
+ ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto unlock;
+
cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
cm_id_priv);
+ cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
+ cm_id_priv->tid = lap_msg->hdr.tid;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a693fcd4c513..6813ee717a38 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -146,6 +146,34 @@ const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
}
EXPORT_SYMBOL(rdma_consumer_reject_data);
+/**
+ * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
+ * @id: Communication Identifier
+ */
+struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
+{
+ struct rdma_id_private *id_priv;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (id->device->node_type == RDMA_NODE_RNIC)
+ return id_priv->cm_id.iw;
+ return NULL;
+}
+EXPORT_SYMBOL(rdma_iw_cm_id);
+
+/**
+ * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
+ * @res: rdma resource tracking entry pointer
+ */
+struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
+{
+ struct rdma_id_private *id_priv =
+ container_of(res, struct rdma_id_private, res);
+
+ return &id_priv->id;
+}
+EXPORT_SYMBOL(rdma_res_to_id);
+
static void cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
@@ -156,7 +184,6 @@ static struct ib_client cma_client = {
};
static struct ib_sa_client sa_client;
-static struct rdma_addr_client addr_client;
static LIST_HEAD(dev_list);
static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
@@ -2103,7 +2130,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
event.param.conn.responder_resources = iw_event->ord;
break;
default:
- BUG_ON(1);
+ goto out;
}
event.status = iw_event->status;
@@ -2936,7 +2963,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
- ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv),
+ ret = rdma_resolve_ip(cma_src_addr(id_priv),
dst_addr, &id->route.addr.dev_addr,
timeout_ms, addr_handler, id_priv);
}
@@ -4573,7 +4600,6 @@ static int __init cma_init(void)
goto err_wq;
ib_sa_register_client(&sa_client);
- rdma_addr_register_client(&addr_client);
register_netdevice_notifier(&cma_nb);
ret = ib_register_client(&cma_client);
@@ -4587,7 +4613,6 @@ static int __init cma_init(void)
err:
unregister_netdevice_notifier(&cma_nb);
- rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
err_wq:
destroy_workqueue(cma_wq);
@@ -4600,7 +4625,6 @@ static void __exit cma_cleanup(void)
rdma_nl_unregister(RDMA_NL_RDMA_CM);
ib_unregister_client(&cma_client);
unregister_netdevice_notifier(&cma_nb);
- rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
unregister_pernet_subsys(&cma_pernet_operations);
destroy_workqueue(cma_wq);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 54163a6e4067..fae417a391fb 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -88,9 +88,6 @@ int ib_device_register_sysfs(struct ib_device *device,
u8, struct kobject *));
void ib_device_unregister_sysfs(struct ib_device *device);
-void ib_cache_setup(void);
-void ib_cache_cleanup(void);
-
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
struct net_device *idev, void *cookie);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index ea9fbcfb21bd..84f51386e1e3 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1225,7 +1225,7 @@ static int __init ib_core_init(void)
nldev_init();
rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
- ib_cache_setup();
+ roce_gid_mgmt_init();
return 0;
@@ -1248,7 +1248,7 @@ err:
static void __exit ib_core_cleanup(void)
{
- ib_cache_cleanup();
+ roce_gid_mgmt_cleanup();
nldev_exit();
rdma_nl_unregister(RDMA_NL_LS);
unregister_lsm_notifier(&ibdev_lsm_nb);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b28452a55a08..f742ae7a768b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -651,7 +651,6 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
struct ib_mad_queue *mad_queue;
unsigned long flags;
- BUG_ON(!mad_list->mad_queue);
mad_queue = mad_list->mad_queue;
spin_lock_irqsave(&mad_queue->lock, flags);
list_del(&mad_list->list);
@@ -1557,7 +1556,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
mad_reg_req->oui, 3)) {
method = &(*vendor_table)->vendor_class[
vclass]->method_table[i];
- BUG_ON(!*method);
+ if (!*method)
+ goto error3;
goto check_in_use;
}
}
@@ -1567,10 +1567,12 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
vclass]->oui[i])) {
method = &(*vendor_table)->vendor_class[
vclass]->method_table[i];
- BUG_ON(*method);
/* Allocate method table for this OUI */
- if ((ret = allocate_method_table(method)))
- goto error3;
+ if (!*method) {
+ ret = allocate_method_table(method);
+ if (ret)
+ goto error3;
+ }
memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
mad_reg_req->oui, 3);
goto check_in_use;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index eb567765f45c..340c7bea45ab 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -98,8 +98,83 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
.len = IFNAMSIZ },
+ [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
+ [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 },
+ [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
+ [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
};
+static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
+ enum rdma_nldev_print_type print_type)
+{
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
+ return -EMSGSIZE;
+ if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
+ nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
+ enum rdma_nldev_print_type print_type,
+ u32 value)
+{
+ if (put_driver_name_print_type(msg, name, print_type))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
+ enum rdma_nldev_print_type print_type,
+ u64 value)
+{
+ if (put_driver_name_print_type(msg, name, print_type))
+ return -EMSGSIZE;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
+ RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
+{
+ return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u32);
+
+int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
+ u32 value)
+{
+ return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
+
+int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
+{
+ return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u64);
+
+int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
+{
+ return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
+
static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
{
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
@@ -122,7 +197,8 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
- device->attrs.device_cap_flags, 0))
+ device->attrs.device_cap_flags,
+ RDMA_NLDEV_ATTR_PAD))
return -EMSGSIZE;
ib_get_device_fw_str(device, fw);
@@ -131,10 +207,12 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
return -EMSGSIZE;
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
- be64_to_cpu(device->node_guid), 0))
+ be64_to_cpu(device->node_guid),
+ RDMA_NLDEV_ATTR_PAD))
return -EMSGSIZE;
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
- be64_to_cpu(device->attrs.sys_image_guid), 0))
+ be64_to_cpu(device->attrs.sys_image_guid),
+ RDMA_NLDEV_ATTR_PAD))
return -EMSGSIZE;
if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
return -EMSGSIZE;
@@ -161,11 +239,11 @@ static int fill_port_info(struct sk_buff *msg,
BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
- (u64)attr.port_cap_flags, 0))
+ (u64)attr.port_cap_flags, RDMA_NLDEV_ATTR_PAD))
return -EMSGSIZE;
if (rdma_protocol_ib(device, port) &&
nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
- attr.subnet_prefix, 0))
+ attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
return -EMSGSIZE;
if (rdma_protocol_ib(device, port)) {
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
@@ -209,8 +287,8 @@ static int fill_res_info_entry(struct sk_buff *msg,
if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
goto err;
- if (nla_put_u64_64bit(msg,
- RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 0))
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
+ RDMA_NLDEV_ATTR_PAD))
goto err;
nla_nest_end(msg, entry_attr);
@@ -282,6 +360,7 @@ static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_qp *qp = container_of(res, struct ib_qp, res);
+ struct rdma_restrack_root *resroot = &qp->device->res;
struct ib_qp_init_attr qp_init_attr;
struct nlattr *entry_attr;
struct ib_qp_attr qp_attr;
@@ -331,6 +410,9 @@ static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (fill_res_name_pid(msg, res))
goto err;
+ if (resroot->fill_res_entry(msg, res))
+ goto err;
+
nla_nest_end(msg, entry_attr);
return 0;
@@ -346,6 +428,7 @@ static int fill_res_cm_id_entry(struct sk_buff *msg,
{
struct rdma_id_private *id_priv =
container_of(res, struct rdma_id_private, res);
+ struct rdma_restrack_root *resroot = &id_priv->id.device->res;
struct rdma_cm_id *cm_id = &id_priv->id;
struct nlattr *entry_attr;
@@ -387,6 +470,9 @@ static int fill_res_cm_id_entry(struct sk_buff *msg,
if (fill_res_name_pid(msg, res))
goto err;
+ if (resroot->fill_res_entry(msg, res))
+ goto err;
+
nla_nest_end(msg, entry_attr);
return 0;
@@ -400,6 +486,7 @@ static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_cq *cq = container_of(res, struct ib_cq, res);
+ struct rdma_restrack_root *resroot = &cq->device->res;
struct nlattr *entry_attr;
entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY);
@@ -409,7 +496,7 @@ static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
goto err;
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
- atomic_read(&cq->usecnt), 0))
+ atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
goto err;
/* Poll context is only valid for kernel CQs */
@@ -420,6 +507,9 @@ static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (fill_res_name_pid(msg, res))
goto err;
+ if (resroot->fill_res_entry(msg, res))
+ goto err;
+
nla_nest_end(msg, entry_attr);
return 0;
@@ -433,6 +523,7 @@ static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_mr *mr = container_of(res, struct ib_mr, res);
+ struct rdma_restrack_root *resroot = &mr->pd->device->res;
struct nlattr *entry_attr;
entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY);
@@ -444,17 +535,18 @@ static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
goto err;
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
goto err;
- if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_IOVA,
- mr->iova, 0))
- goto err;
}
- if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, 0))
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
+ RDMA_NLDEV_ATTR_PAD))
goto err;
if (fill_res_name_pid(msg, res))
goto err;
+ if (resroot->fill_res_entry(msg, res))
+ goto err;
+
nla_nest_end(msg, entry_attr);
return 0;
@@ -468,6 +560,7 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_pd *pd = container_of(res, struct ib_pd, res);
+ struct rdma_restrack_root *resroot = &pd->device->res;
struct nlattr *entry_attr;
entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY);
@@ -484,7 +577,7 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
goto err;
}
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
- atomic_read(&pd->usecnt), 0))
+ atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
goto err;
if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
@@ -494,6 +587,9 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (fill_res_name_pid(msg, res))
goto err;
+ if (resroot->fill_res_entry(msg, res))
+ goto err;
+
nla_nest_end(msg, entry_attr);
return 0;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index efddd13e3edb..3b7fa0ccaa08 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
*/
@@ -12,9 +12,16 @@
#include "cma_priv.h"
+static int fill_res_noop(struct sk_buff *msg,
+ struct rdma_restrack_entry *entry)
+{
+ return 0;
+}
+
void rdma_restrack_init(struct rdma_restrack_root *res)
{
init_rwsem(&res->rwsem);
+ res->fill_res_entry = fill_res_noop;
}
static const char *type2str(enum rdma_restrack_type type)
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index c0e4fd55e2cc..a4fbdc5d28fa 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -44,8 +44,6 @@
static struct workqueue_struct *gid_cache_wq;
-static struct workqueue_struct *gid_cache_wq;
-
enum gid_op_type {
GID_DEL = 0,
GID_ADD
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index b61dda6b04fc..9b0bea8303e0 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -30,8 +30,6 @@
* SOFTWARE.
*/
-#ifdef CONFIG_SECURITY_INFINIBAND
-
#include <linux/security.h>
#include <linux/completion.h>
#include <linux/list.h>
@@ -751,5 +749,3 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
pkey_index,
map->agent.security);
}
-
-#endif /* CONFIG_SECURITY_INFINIBAND */
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index eab43b17e9cf..ec8fb289621f 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -235,7 +235,7 @@ static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
return NULL;
mutex_lock(&mut);
- mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
+ mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL);
mutex_unlock(&mut);
if (mc->id < 0)
goto error;
@@ -1421,6 +1421,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
goto err3;
}
+ mutex_lock(&mut);
+ idr_replace(&multicast_idr, mc, mc->id);
+ mutex_unlock(&mut);
+
mutex_unlock(&file->mut);
ucma_put_ctx(ctx);
return 0;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 2b6c9b516070..54ab6335c48d 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -64,8 +64,6 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
}
sg_free_table(&umem->sg_head);
- return;
-
}
/**
@@ -119,16 +117,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
- /*
- * We ask for writable memory if any of the following
- * access flags are set. "Local write" and "remote write"
- * obviously require write access. "Remote atomic" can do
- * things like fetch and add, which will modify memory, and
- * "MW bind" can change permissions by binding a window.
- */
- umem->writable = !!(access &
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
+ umem->writable = ib_access_writable(access);
if (access & IB_ACCESS_ON_DEMAND) {
ret = ib_umem_odp_get(context, umem, access);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index cfb51618ab7a..c0d40fc3a53a 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -263,6 +263,7 @@ struct ib_uverbs_flow_spec {
struct ib_uverbs_flow_spec_action_tag flow_tag;
struct ib_uverbs_flow_spec_action_drop drop;
struct ib_uverbs_flow_spec_action_handle action;
+ struct ib_uverbs_flow_spec_action_count flow_count;
};
};
@@ -287,6 +288,7 @@ extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL);
extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_XRCD);
extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION);
extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_DM);
+extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS);
#define IB_UVERBS_DECLARE_CMD(name) \
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e3662a8ee465..3179a95c6f5e 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2748,43 +2748,82 @@ out_put:
struct ib_uflow_resources {
size_t max;
size_t num;
- struct ib_flow_action *collection[0];
+ size_t collection_num;
+ size_t counters_num;
+ struct ib_counters **counters;
+ struct ib_flow_action **collection;
};
static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
{
struct ib_uflow_resources *resources;
- resources =
- kmalloc(struct_size(resources, collection, num_specs),
- GFP_KERNEL);
+ resources = kzalloc(sizeof(*resources), GFP_KERNEL);
if (!resources)
- return NULL;
+ goto err_res;
+
+ resources->counters =
+ kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
+
+ if (!resources->counters)
+ goto err_cnt;
+
+ resources->collection =
+ kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
+
+ if (!resources->collection)
+ goto err_collection;
- resources->num = 0;
resources->max = num_specs;
return resources;
+
+err_collection:
+ kfree(resources->counters);
+err_cnt:
+ kfree(resources);
+err_res:
+ return NULL;
}
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
{
unsigned int i;
- for (i = 0; i < uflow_res->num; i++)
+ for (i = 0; i < uflow_res->collection_num; i++)
atomic_dec(&uflow_res->collection[i]->usecnt);
+ for (i = 0; i < uflow_res->counters_num; i++)
+ atomic_dec(&uflow_res->counters[i]->usecnt);
+
+ kfree(uflow_res->collection);
+ kfree(uflow_res->counters);
kfree(uflow_res);
}
static void flow_resources_add(struct ib_uflow_resources *uflow_res,
- struct ib_flow_action *action)
+ enum ib_flow_spec_type type,
+ void *ibobj)
{
WARN_ON(uflow_res->num >= uflow_res->max);
- atomic_inc(&action->usecnt);
- uflow_res->collection[uflow_res->num++] = action;
+ switch (type) {
+ case IB_FLOW_SPEC_ACTION_HANDLE:
+ atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
+ uflow_res->collection[uflow_res->collection_num++] =
+ (struct ib_flow_action *)ibobj;
+ break;
+ case IB_FLOW_SPEC_ACTION_COUNT:
+ atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
+ uflow_res->counters[uflow_res->counters_num++] =
+ (struct ib_counters *)ibobj;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ uflow_res->num++;
}
static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
@@ -2821,9 +2860,29 @@ static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
return -EINVAL;
ib_spec->action.size =
sizeof(struct ib_flow_spec_action_handle);
- flow_resources_add(uflow_res, ib_spec->action.act);
+ flow_resources_add(uflow_res,
+ IB_FLOW_SPEC_ACTION_HANDLE,
+ ib_spec->action.act);
uobj_put_obj_read(ib_spec->action.act);
break;
+ case IB_FLOW_SPEC_ACTION_COUNT:
+ if (kern_spec->flow_count.size !=
+ sizeof(struct ib_uverbs_flow_spec_action_count))
+ return -EINVAL;
+ ib_spec->flow_count.counters =
+ uobj_get_obj_read(counters,
+ UVERBS_OBJECT_COUNTERS,
+ kern_spec->flow_count.handle,
+ ucontext);
+ if (!ib_spec->flow_count.counters)
+ return -EINVAL;
+ ib_spec->flow_count.size =
+ sizeof(struct ib_flow_spec_action_count);
+ flow_resources_add(uflow_res,
+ IB_FLOW_SPEC_ACTION_COUNT,
+ ib_spec->flow_count.counters);
+ uobj_put_obj_read(ib_spec->flow_count.counters);
+ break;
default:
return -EINVAL;
}
@@ -2948,6 +3007,28 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
break;
+ case IB_FLOW_SPEC_GRE:
+ ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
+ memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
+ break;
+ case IB_FLOW_SPEC_MPLS:
+ ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
+ memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
+ break;
default:
return -EINVAL;
}
@@ -3507,6 +3588,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
uflow_res);
if (err)
goto err_free;
+
flow_attr->size +=
((union ib_flow_spec *) ib_spec)->size;
cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
@@ -3519,11 +3601,16 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
err = -EINVAL;
goto err_free;
}
- flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
+
+ flow_id = qp->device->create_flow(qp, flow_attr,
+ IB_FLOW_DOMAIN_USER, uhw);
+
if (IS_ERR(flow_id)) {
err = PTR_ERR(flow_id);
goto err_free;
}
+ atomic_inc(&qp->usecnt);
+ flow_id->qp = qp;
flow_id->uobject = uobj;
uobj->object = flow_id;
uflow = container_of(uobj, typeof(*uflow), uobject);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 4445d8ee9314..3ae2339dd27a 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -41,6 +41,8 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/task.h>
#include <linux/file.h>
#include <linux/cdev.h>
#include <linux/anon_inodes.h>
@@ -1090,6 +1092,44 @@ err:
return;
}
+static void ib_uverbs_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct ib_device *ib_dev = ibcontext->device;
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+
+ owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
+ if (!owning_process)
+ return;
+
+ owning_mm = get_task_mm(owning_process);
+ if (!owning_mm) {
+ pr_info("no mm, disassociate ucontext is pending task termination\n");
+ while (1) {
+ put_task_struct(owning_process);
+ usleep_range(1000, 2000);
+ owning_process = get_pid_task(ibcontext->tgid,
+ PIDTYPE_PID);
+ if (!owning_process ||
+ owning_process->state == TASK_DEAD) {
+ pr_info("disassociate ucontext done, task was terminated\n");
+ /* in case task was dead need to release the
+ * task struct.
+ */
+ if (owning_process)
+ put_task_struct(owning_process);
+ return;
+ }
+ }
+ }
+
+ down_write(&owning_mm->mmap_sem);
+ ib_dev->disassociate_ucontext(ibcontext);
+ up_write(&owning_mm->mmap_sem);
+ mmput(owning_mm);
+ put_task_struct(owning_process);
+}
+
static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
struct ib_device *ib_dev)
{
@@ -1130,7 +1170,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
* (e.g mmput).
*/
ib_uverbs_event_handler(&file->event_handler, &event);
- ib_dev->disassociate_ucontext(ucontext);
+ ib_uverbs_disassociate_ucontext(ucontext);
mutex_lock(&file->cleanup_mutex);
ib_uverbs_cleanup_ucontext(file, ucontext, true);
mutex_unlock(&file->cleanup_mutex);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 569f48bd821e..b570acbd94af 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -302,7 +302,8 @@ static DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
&UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL),
&UVERBS_OBJECT(UVERBS_OBJECT_XRCD),
&UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION),
- &UVERBS_OBJECT(UVERBS_OBJECT_DM));
+ &UVERBS_OBJECT(UVERBS_OBJECT_DM),
+ &UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS));
const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
{
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
new file mode 100644
index 000000000000..03b182a684a6
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "uverbs.h"
+#include <rdma/uverbs_std_types.h>
+
+static int uverbs_free_counters(struct ib_uobject *uobject,
+ enum rdma_remove_reason why)
+{
+ struct ib_counters *counters = uobject->object;
+
+ if (why == RDMA_REMOVE_DESTROY &&
+ atomic_read(&counters->usecnt))
+ return -EBUSY;
+
+ return counters->device->destroy_counters(counters);
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(struct ib_device *ib_dev,
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_counters *counters;
+ struct ib_uobject *uobj;
+ int ret;
+
+ /*
+ * This check should be removed once the infrastructure
+ * have the ability to remove methods from parse tree once
+ * such condition is met.
+ */
+ if (!ib_dev->create_counters)
+ return -EOPNOTSUPP;
+
+ uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE);
+ counters = ib_dev->create_counters(ib_dev, attrs);
+ if (IS_ERR(counters)) {
+ ret = PTR_ERR(counters);
+ goto err_create_counters;
+ }
+
+ counters->device = ib_dev;
+ counters->uobject = uobj;
+ uobj->object = counters;
+ atomic_set(&counters->usecnt, 0);
+
+ return 0;
+
+err_create_counters:
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(struct ib_device *ib_dev,
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_counters_read_attr read_attr = {};
+ const struct uverbs_attr *uattr;
+ struct ib_counters *counters =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE);
+ int ret;
+
+ if (!ib_dev->read_counters)
+ return -EOPNOTSUPP;
+
+ if (!atomic_read(&counters->usecnt))
+ return -EINVAL;
+
+ ret = uverbs_copy_from(&read_attr.flags, attrs,
+ UVERBS_ATTR_READ_COUNTERS_FLAGS);
+ if (ret)
+ return ret;
+
+ uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF);
+ read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64);
+ read_attr.counters_buff = kcalloc(read_attr.ncounters,
+ sizeof(u64), GFP_KERNEL);
+ if (!read_attr.counters_buff)
+ return -ENOMEM;
+
+ ret = ib_dev->read_counters(counters,
+ &read_attr,
+ attrs);
+ if (ret)
+ goto err_read;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF,
+ read_attr.counters_buff,
+ read_attr.ncounters * sizeof(u64));
+
+err_read:
+ kfree(read_attr.counters_buff);
+ return ret;
+}
+
+static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_COUNTERS_CREATE,
+ &UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_NEW,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+
+static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_COUNTERS_DESTROY,
+ uverbs_destroy_def_handler,
+ &UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_DESTROY,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+
+#define MAX_COUNTERS_BUFF_SIZE USHRT_MAX
+static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_COUNTERS_READ,
+ &UVERBS_ATTR_IDR(UVERBS_ATTR_READ_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_READ,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+ &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_READ_COUNTERS_BUFF,
+ UVERBS_ATTR_SIZE(0, MAX_COUNTERS_BUFF_SIZE),
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+ &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_READ_COUNTERS_FLAGS,
+ UVERBS_ATTR_TYPE(__u32),
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+
+DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COUNTERS,
+ &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_counters),
+ &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_DESTROY),
+ &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_READ));
+
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index b0dbae9dd0d7..3d293d01afea 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -65,7 +65,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
struct ib_cq_init_attr attr = {};
struct ib_cq *cq;
struct ib_uverbs_completion_event_file *ev_file = NULL;
- const struct uverbs_attr *ev_file_attr;
struct ib_uobject *ev_file_uobj;
if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_CREATE_CQ))
@@ -87,10 +86,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
UVERBS_ATTR_CREATE_CQ_FLAGS)))
return -EFAULT;
- ev_file_attr = uverbs_attr_get(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL);
- if (!IS_ERR(ev_file_attr)) {
- ev_file_uobj = ev_file_attr->obj_attr.uobject;
-
+ ev_file_uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL);
+ if (!IS_ERR(ev_file_uobj)) {
ev_file = container_of(ev_file_uobj,
struct ib_uverbs_completion_event_file,
uobj_file.uobj);
@@ -102,8 +99,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
goto err_event_file;
}
- obj = container_of(uverbs_attr_get(attrs,
- UVERBS_ATTR_CREATE_CQ_HANDLE)->obj_attr.uobject,
+ obj = container_of(uverbs_attr_get_uobject(attrs,
+ UVERBS_ATTR_CREATE_CQ_HANDLE),
typeof(*obj), uobject);
obj->uverbs_file = ucontext->ufile;
obj->comp_events_reported = 0;
@@ -170,13 +167,17 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(struct ib_device *ib_dev,
struct ib_uverbs_file *file,
struct uverbs_attr_bundle *attrs)
{
- struct ib_uverbs_destroy_cq_resp resp;
struct ib_uobject *uobj =
- uverbs_attr_get(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE)->obj_attr.uobject;
- struct ib_ucq_object *obj = container_of(uobj, struct ib_ucq_object,
- uobject);
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE);
+ struct ib_uverbs_destroy_cq_resp resp;
+ struct ib_ucq_object *obj;
int ret;
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
+
+ obj = container_of(uobj, struct ib_ucq_object, uobject);
+
if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_DESTROY_CQ))
return -EOPNOTSUPP;
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index b4f016dfa23d..a7be51cf2e42 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -320,7 +320,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device
return ret;
/* No need to check as this attribute is marked as MANDATORY */
- uobj = uverbs_attr_get(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE)->obj_attr.uobject;
+ uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE);
action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs);
if (IS_ERR(action))
return PTR_ERR(action);
@@ -350,7 +350,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(struct ib_device
if (ret)
return ret;
- uobj = uverbs_attr_get(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE)->obj_attr.uobject;
+ uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE);
action = uobj->object;
if (action->type != IB_FLOW_ACTION_ESP)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 6ddfb1fade79..0b56828c1319 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1983,7 +1983,7 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp,
if (!qp->device->create_flow)
return ERR_PTR(-EOPNOTSUPP);
- flow_id = qp->device->create_flow(qp, flow_attr, domain);
+ flow_id = qp->device->create_flow(qp, flow_attr, domain, NULL);
if (!IS_ERR(flow_id)) {
atomic_inc(&qp->usecnt);
flow_id->qp = qp;
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index 837862287a29..c69bc4f52049 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -162,7 +162,6 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
spin_unlock_irq(&rhp->lock);
idr_preload_end();
- BUG_ON(ret == -ENOSPC);
return ret < 0 ? ret : 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index 0a671a61fc92..e0522a5d5a06 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_CXGB4
tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET
+ depends on INFINIBAND_ADDR_TRANS
select CHELSIO_LIB
select GENERIC_ALLOCATOR
---help---
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
index fa40b685831b..9edd92023e18 100644
--- a/drivers/infiniband/hw/cxgb4/Makefile
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -3,4 +3,5 @@ ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
-iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
+iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o \
+ restrack.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 4cf17c650c36..0912fa026327 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3210,6 +3210,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
+ cm_id->provider_data = ep;
ep->com.dev = dev;
ep->com.qp = get_qhp(dev, conn_param->qpn);
if (!ep->com.qp) {
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 831027717121..870649ff049c 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -55,6 +55,7 @@
#include <rdma/iw_cm.h>
#include <rdma/rdma_netlink.h>
#include <rdma/iw_portmap.h>
+#include <rdma/restrack.h>
#include "cxgb4.h"
#include "cxgb4_uld.h"
@@ -1082,4 +1083,8 @@ extern int use_dsgl;
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
+typedef int c4iw_restrack_func(struct sk_buff *msg,
+ struct rdma_restrack_entry *res);
+extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX];
+
#endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 0b9cc73c3ded..1feade8bb4b3 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -551,6 +551,13 @@ static struct net_device *get_netdev(struct ib_device *dev, u8 port)
return ndev;
}
+static int fill_res_entry(struct sk_buff *msg, struct rdma_restrack_entry *res)
+{
+ return (res->type < ARRAY_SIZE(c4iw_restrack_funcs) &&
+ c4iw_restrack_funcs[res->type]) ?
+ c4iw_restrack_funcs[res->type](msg, res) : 0;
+}
+
void c4iw_register_device(struct work_struct *work)
{
int ret;
@@ -645,6 +652,7 @@ void c4iw_register_device(struct work_struct *work)
dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+ dev->ibdev.res.fill_res_entry = fill_res_entry;
memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
sizeof(dev->ibdev.iwcm->ifname));
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index ae167b686608..4106eed1b8fb 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1297,8 +1297,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
- wqe = __skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof *wqe);
+ wqe = __skb_put_zero(skb, sizeof(*wqe));
wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID_V(qhp->ep->hwtid) |
@@ -1421,8 +1420,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- wqe = __skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof *wqe);
+ wqe = __skb_put_zero(skb, sizeof(*wqe));
wqe->op_compl = cpu_to_be32(
FW_WR_OP_V(FW_RI_INIT_WR) |
FW_WR_COMPL_F);
@@ -1487,8 +1485,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
}
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
- wqe = __skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof *wqe);
+ wqe = __skb_put_zero(skb, sizeof(*wqe));
wqe->op_compl = cpu_to_be32(
FW_WR_OP_V(FW_RI_INIT_WR) |
FW_WR_COMPL_F);
diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
new file mode 100644
index 000000000000..9a7520ee41e0
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/restrack.c
@@ -0,0 +1,501 @@
+/*
+ * Copyright (c) 2018 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/rdma_cm.h>
+
+#include "iw_cxgb4.h"
+#include <rdma/restrack.h>
+#include <uapi/rdma/rdma_netlink.h>
+
+static int fill_sq(struct sk_buff *msg, struct t4_wq *wq)
+{
+ /* WQ+SQ */
+ if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags))
+ goto err;
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_rq(struct sk_buff *msg, struct t4_wq *wq)
+{
+ /* RQ */
+ if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "memsize", wq->rq.memsize))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx", wq->rq.cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "pidx", wq->rq.pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->rq.wq_pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "msn", wq->rq.msn))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "rqt_hwaddr", wq->rq.rqt_hwaddr))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rqt_size", wq->rq.rqt_size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "in_use", wq->rq.in_use))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "size", wq->rq.size))
+ goto err;
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx,
+ struct t4_swsqe *sqe)
+{
+ if (rdma_nl_put_driver_u32(msg, "idx", idx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete))
+ goto err;
+ if (sqe->complete &&
+ rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe)))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed))
+ goto err;
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+/*
+ * Dump the first and last pending sqes.
+ */
+static int fill_swsqes(struct sk_buff *msg, struct t4_sq *sq,
+ u16 first_idx, struct t4_swsqe *first_sqe,
+ u16 last_idx, struct t4_swsqe *last_sqe)
+{
+ if (!first_sqe)
+ goto out;
+ if (fill_swsqe(msg, sq, first_idx, first_sqe))
+ goto err;
+ if (!last_sqe)
+ goto out;
+ if (fill_swsqe(msg, sq, last_idx, last_sqe))
+ goto err;
+out:
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_qp_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_qp *ibqp = container_of(res, struct ib_qp, res);
+ struct t4_swsqe *fsp = NULL, *lsp = NULL;
+ struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
+ u16 first_sq_idx = 0, last_sq_idx = 0;
+ struct t4_swsqe first_sqe, last_sqe;
+ struct nlattr *table_attr;
+ struct t4_wq wq;
+
+ /* User qp state is not available, so don't dump user qps */
+ if (qhp->ucontext)
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ /* Get a consistent snapshot */
+ spin_lock_irq(&qhp->lock);
+ wq = qhp->wq;
+
+ /* If there are any pending sqes, copy the first and last */
+ if (wq.sq.cidx != wq.sq.pidx) {
+ first_sq_idx = wq.sq.cidx;
+ first_sqe = qhp->wq.sq.sw_sq[first_sq_idx];
+ fsp = &first_sqe;
+ last_sq_idx = wq.sq.pidx;
+ if (last_sq_idx-- == 0)
+ last_sq_idx = wq.sq.size - 1;
+ if (last_sq_idx != first_sq_idx) {
+ last_sqe = qhp->wq.sq.sw_sq[last_sq_idx];
+ lsp = &last_sqe;
+ }
+ }
+ spin_unlock_irq(&qhp->lock);
+
+ if (fill_sq(msg, &wq))
+ goto err_cancel_table;
+
+ if (fill_swsqes(msg, &wq.sq, first_sq_idx, fsp, last_sq_idx, lsp))
+ goto err_cancel_table;
+
+ if (fill_rq(msg, &wq))
+ goto err_cancel_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+union union_ep {
+ struct c4iw_listen_ep lep;
+ struct c4iw_ep ep;
+};
+
+static int fill_res_ep_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct rdma_cm_id *cm_id = rdma_res_to_id(res);
+ struct nlattr *table_attr;
+ struct c4iw_ep_common *epcp;
+ struct c4iw_listen_ep *listen_ep = NULL;
+ struct c4iw_ep *ep = NULL;
+ struct iw_cm_id *iw_cm_id;
+ union union_ep *uep;
+
+ iw_cm_id = rdma_iw_cm_id(cm_id);
+ if (!iw_cm_id)
+ return 0;
+ epcp = (struct c4iw_ep_common *)iw_cm_id->provider_data;
+ if (!epcp)
+ return 0;
+ uep = kcalloc(1, sizeof(*uep), GFP_KERNEL);
+ if (!uep)
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err_free_uep;
+
+ /* Get a consistent snapshot */
+ mutex_lock(&epcp->mutex);
+ if (epcp->state == LISTEN) {
+ uep->lep = *(struct c4iw_listen_ep *)epcp;
+ mutex_unlock(&epcp->mutex);
+ listen_ep = &uep->lep;
+ epcp = &listen_ep->com;
+ } else {
+ uep->ep = *(struct c4iw_ep *)epcp;
+ mutex_unlock(&epcp->mutex);
+ ep = &uep->ep;
+ epcp = &ep->com;
+ }
+
+ if (rdma_nl_put_driver_u32(msg, "state", epcp->state))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u64_hex(msg, "flags", epcp->flags))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u64_hex(msg, "history", epcp->history))
+ goto err_cancel_table;
+
+ if (epcp->state == LISTEN) {
+ if (rdma_nl_put_driver_u32(msg, "stid", listen_ep->stid))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog))
+ goto err_cancel_table;
+ } else {
+ if (rdma_nl_put_driver_u32(msg, "hwtid", ep->hwtid))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "ord", ep->ord))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "ird", ep->ird))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "emss", ep->emss))
+ goto err_cancel_table;
+
+ if (!ep->parent_ep && rdma_nl_put_driver_u32(msg, "atid",
+ ep->atid))
+ goto err_cancel_table;
+ }
+ nla_nest_end(msg, table_attr);
+ kfree(uep);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err_free_uep:
+ kfree(uep);
+ return -EMSGSIZE;
+}
+
+static int fill_cq(struct sk_buff *msg, struct t4_cq *cq)
+{
+ if (rdma_nl_put_driver_u32(msg, "cqid", cq->cqid))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "memsize", cq->memsize))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "size", cq->size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx", cq->cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx_inc", cq->cidx_inc))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sw_cidx", cq->sw_cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sw_pidx", cq->sw_pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sw_in_use", cq->sw_in_use))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "vector", cq->vector))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "gen", cq->gen))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "error", cq->error))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts",
+ be64_to_cpu(cq->bits_type_ts)))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "flags", cq->flags))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_cqe(struct sk_buff *msg, struct t4_cqe *cqe, u16 idx,
+ const char *qstr)
+{
+ if (rdma_nl_put_driver_u32(msg, qstr, idx))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "header",
+ be32_to_cpu(cqe->header)))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "len", be32_to_cpu(cqe->len)))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "wrid_hi",
+ be32_to_cpu(cqe->u.gen.wrid_hi)))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "wrid_low",
+ be32_to_cpu(cqe->u.gen.wrid_low)))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts",
+ be64_to_cpu(cqe->bits_type_ts)))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_hwcqes(struct sk_buff *msg, struct t4_cq *cq,
+ struct t4_cqe *cqes)
+{
+ u16 idx;
+
+ idx = (cq->cidx > 0) ? cq->cidx - 1 : cq->size - 1;
+ if (fill_cqe(msg, cqes, idx, "hwcq_idx"))
+ goto err;
+ idx = cq->cidx;
+ if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx"))
+ goto err;
+
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_swcqes(struct sk_buff *msg, struct t4_cq *cq,
+ struct t4_cqe *cqes)
+{
+ u16 idx;
+
+ if (!cq->sw_in_use)
+ return 0;
+
+ idx = cq->sw_cidx;
+ if (fill_cqe(msg, cqes, idx, "swcq_idx"))
+ goto err;
+ if (cq->sw_in_use == 1)
+ goto out;
+ idx = (cq->sw_pidx > 0) ? cq->sw_pidx - 1 : cq->size - 1;
+ if (fill_cqe(msg, cqes + 1, idx, "swcq_idx"))
+ goto err;
+out:
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_cq_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_cq *ibcq = container_of(res, struct ib_cq, res);
+ struct c4iw_cq *chp = to_c4iw_cq(ibcq);
+ struct nlattr *table_attr;
+ struct t4_cqe hwcqes[2];
+ struct t4_cqe swcqes[2];
+ struct t4_cq cq;
+ u16 idx;
+
+ /* User cq state is not available, so don't dump user cqs */
+ if (ibcq->uobject)
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ /* Get a consistent snapshot */
+ spin_lock_irq(&chp->lock);
+
+ /* t4_cq struct */
+ cq = chp->cq;
+
+ /* get 2 hw cqes: cidx-1, and cidx */
+ idx = (cq.cidx > 0) ? cq.cidx - 1 : cq.size - 1;
+ hwcqes[0] = chp->cq.queue[idx];
+
+ idx = cq.cidx;
+ hwcqes[1] = chp->cq.queue[idx];
+
+ /* get first and last sw cqes */
+ if (cq.sw_in_use) {
+ swcqes[0] = chp->cq.sw_queue[cq.sw_cidx];
+ if (cq.sw_in_use > 1) {
+ idx = (cq.sw_pidx > 0) ? cq.sw_pidx - 1 : cq.size - 1;
+ swcqes[1] = chp->cq.sw_queue[idx];
+ }
+ }
+
+ spin_unlock_irq(&chp->lock);
+
+ if (fill_cq(msg, &cq))
+ goto err_cancel_table;
+
+ if (fill_swcqes(msg, &cq, swcqes))
+ goto err_cancel_table;
+
+ if (fill_hwcqes(msg, &cq, hwcqes))
+ goto err_cancel_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_mr_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
+ struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
+ struct c4iw_dev *dev = mhp->rhp;
+ u32 stag = mhp->attr.stag;
+ struct nlattr *table_attr;
+ struct fw_ri_tpte tpte;
+ int ret;
+
+ if (!stag)
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag, (__be32 *)&tpte);
+ if (ret) {
+ dev_err(&dev->rdev.lldi.pdev->dev,
+ "%s cxgb4_read_tpte err %d\n", __func__, ret);
+ return 0;
+ }
+
+ if (rdma_nl_put_driver_u32_hex(msg, "idx", stag >> 8))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "valid",
+ FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32_hex(msg, "key", stag & 0xff))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "state",
+ FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "pdid",
+ FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32_hex(msg, "perm",
+ FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "ps",
+ FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u64(msg, "len",
+ ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo)))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32_hex(msg, "pbl_addr",
+ FW_RI_TPTE_PBLADDR_G(ntohl(tpte.nosnoop_pbladdr))))
+ goto err_cancel_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = {
+ [RDMA_RESTRACK_QP] = fill_res_qp_entry,
+ [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry,
+ [RDMA_RESTRACK_CQ] = fill_res_cq_entry,
+ [RDMA_RESTRACK_MR] = fill_res_mr_entry,
+};
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index ce4010bad982..f451ba912f47 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -14,7 +14,15 @@ hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o \
uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \
verbs_txreq.o vnic_main.o vnic_sdma.o
-hfi1-$(CONFIG_DEBUG_FS) += debugfs.o
+
+ifdef CONFIG_DEBUG_FS
+hfi1-y += debugfs.o
+ifdef CONFIG_FAULT_INJECTION
+ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+hfi1-y += fault.o
+endif
+endif
+endif
CFLAGS_trace.o = -I$(src)
ifdef MVERSION
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index b5fab55cc275..fbe7198a715a 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -77,6 +77,58 @@ static inline void init_cpu_mask_set(struct cpu_mask_set *set)
set->gen = 0;
}
+/* Increment generation of CPU set if needed */
+static void _cpu_mask_set_gen_inc(struct cpu_mask_set *set)
+{
+ if (cpumask_equal(&set->mask, &set->used)) {
+ /*
+ * We've used up all the CPUs, bump up the generation
+ * and reset the 'used' map
+ */
+ set->gen++;
+ cpumask_clear(&set->used);
+ }
+}
+
+static void _cpu_mask_set_gen_dec(struct cpu_mask_set *set)
+{
+ if (cpumask_empty(&set->used) && set->gen) {
+ set->gen--;
+ cpumask_copy(&set->used, &set->mask);
+ }
+}
+
+/* Get the first CPU from the list of unused CPUs in a CPU set data structure */
+static int cpu_mask_set_get_first(struct cpu_mask_set *set, cpumask_var_t diff)
+{
+ int cpu;
+
+ if (!diff || !set)
+ return -EINVAL;
+
+ _cpu_mask_set_gen_inc(set);
+
+ /* Find out CPUs left in CPU mask */
+ cpumask_andnot(diff, &set->mask, &set->used);
+
+ cpu = cpumask_first(diff);
+ if (cpu >= nr_cpu_ids) /* empty */
+ cpu = -EINVAL;
+ else
+ cpumask_set_cpu(cpu, &set->used);
+
+ return cpu;
+}
+
+static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
+{
+ if (!set)
+ return;
+
+ cpumask_clear_cpu(cpu, &set->used);
+ _cpu_mask_set_gen_dec(set);
+}
+
/* Initialize non-HT cpu cores mask */
void init_real_cpu_mask(void)
{
@@ -156,7 +208,13 @@ int node_affinity_init(void)
return 0;
}
-void node_affinity_destroy(void)
+static void node_affinity_destroy(struct hfi1_affinity_node *entry)
+{
+ free_percpu(entry->comp_vect_affinity);
+ kfree(entry);
+}
+
+void node_affinity_destroy_all(void)
{
struct list_head *pos, *q;
struct hfi1_affinity_node *entry;
@@ -166,7 +224,7 @@ void node_affinity_destroy(void)
entry = list_entry(pos, struct hfi1_affinity_node,
list);
list_del(pos);
- kfree(entry);
+ node_affinity_destroy(entry);
}
mutex_unlock(&node_affinity.lock);
kfree(hfi1_per_node_cntr);
@@ -180,6 +238,7 @@ static struct hfi1_affinity_node *node_affinity_allocate(int node)
if (!entry)
return NULL;
entry->node = node;
+ entry->comp_vect_affinity = alloc_percpu(u16);
INIT_LIST_HEAD(&entry->list);
return entry;
@@ -209,6 +268,341 @@ static struct hfi1_affinity_node *node_affinity_lookup(int node)
return NULL;
}
+static int per_cpu_affinity_get(cpumask_var_t possible_cpumask,
+ u16 __percpu *comp_vect_affinity)
+{
+ int curr_cpu;
+ u16 cntr;
+ u16 prev_cntr;
+ int ret_cpu;
+
+ if (!possible_cpumask) {
+ ret_cpu = -EINVAL;
+ goto fail;
+ }
+
+ if (!comp_vect_affinity) {
+ ret_cpu = -EINVAL;
+ goto fail;
+ }
+
+ ret_cpu = cpumask_first(possible_cpumask);
+ if (ret_cpu >= nr_cpu_ids) {
+ ret_cpu = -EINVAL;
+ goto fail;
+ }
+
+ prev_cntr = *per_cpu_ptr(comp_vect_affinity, ret_cpu);
+ for_each_cpu(curr_cpu, possible_cpumask) {
+ cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
+
+ if (cntr < prev_cntr) {
+ ret_cpu = curr_cpu;
+ prev_cntr = cntr;
+ }
+ }
+
+ *per_cpu_ptr(comp_vect_affinity, ret_cpu) += 1;
+
+fail:
+ return ret_cpu;
+}
+
+static int per_cpu_affinity_put_max(cpumask_var_t possible_cpumask,
+ u16 __percpu *comp_vect_affinity)
+{
+ int curr_cpu;
+ int max_cpu;
+ u16 cntr;
+ u16 prev_cntr;
+
+ if (!possible_cpumask)
+ return -EINVAL;
+
+ if (!comp_vect_affinity)
+ return -EINVAL;
+
+ max_cpu = cpumask_first(possible_cpumask);
+ if (max_cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ prev_cntr = *per_cpu_ptr(comp_vect_affinity, max_cpu);
+ for_each_cpu(curr_cpu, possible_cpumask) {
+ cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
+
+ if (cntr > prev_cntr) {
+ max_cpu = curr_cpu;
+ prev_cntr = cntr;
+ }
+ }
+
+ *per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1;
+
+ return max_cpu;
+}
+
+/*
+ * Non-interrupt CPUs are used first, then interrupt CPUs.
+ * Two already allocated cpu masks must be passed.
+ */
+static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry,
+ cpumask_var_t non_intr_cpus,
+ cpumask_var_t available_cpus)
+ __must_hold(&node_affinity.lock)
+{
+ int cpu;
+ struct cpu_mask_set *set = dd->comp_vect;
+
+ lockdep_assert_held(&node_affinity.lock);
+ if (!non_intr_cpus) {
+ cpu = -1;
+ goto fail;
+ }
+
+ if (!available_cpus) {
+ cpu = -1;
+ goto fail;
+ }
+
+ /* Available CPUs for pinning completion vectors */
+ _cpu_mask_set_gen_inc(set);
+ cpumask_andnot(available_cpus, &set->mask, &set->used);
+
+ /* Available CPUs without SDMA engine interrupts */
+ cpumask_andnot(non_intr_cpus, available_cpus,
+ &entry->def_intr.used);
+
+ /* If there are non-interrupt CPUs available, use them first */
+ if (!cpumask_empty(non_intr_cpus))
+ cpu = cpumask_first(non_intr_cpus);
+ else /* Otherwise, use interrupt CPUs */
+ cpu = cpumask_first(available_cpus);
+
+ if (cpu >= nr_cpu_ids) { /* empty */
+ cpu = -1;
+ goto fail;
+ }
+ cpumask_set_cpu(cpu, &set->used);
+
+fail:
+ return cpu;
+}
+
+static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
+{
+ struct cpu_mask_set *set = dd->comp_vect;
+
+ if (cpu < 0)
+ return;
+
+ cpu_mask_set_put(set, cpu);
+}
+
+/* _dev_comp_vect_mappings_destroy() is reentrant */
+static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd)
+{
+ int i, cpu;
+
+ if (!dd->comp_vect_mappings)
+ return;
+
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ cpu = dd->comp_vect_mappings[i];
+ _dev_comp_vect_cpu_put(dd, cpu);
+ dd->comp_vect_mappings[i] = -1;
+ hfi1_cdbg(AFFINITY,
+ "[%s] Release CPU %d from completion vector %d",
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i);
+ }
+
+ kfree(dd->comp_vect_mappings);
+ dd->comp_vect_mappings = NULL;
+}
+
+/*
+ * This function creates the table for looking up CPUs for completion vectors.
+ * num_comp_vectors needs to have been initilized before calling this function.
+ */
+static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry)
+ __must_hold(&node_affinity.lock)
+{
+ int i, cpu, ret;
+ cpumask_var_t non_intr_cpus;
+ cpumask_var_t available_cpus;
+
+ lockdep_assert_held(&node_affinity.lock);
+
+ if (!zalloc_cpumask_var(&non_intr_cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&available_cpus, GFP_KERNEL)) {
+ free_cpumask_var(non_intr_cpus);
+ return -ENOMEM;
+ }
+
+ dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus,
+ sizeof(*dd->comp_vect_mappings),
+ GFP_KERNEL);
+ if (!dd->comp_vect_mappings) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++)
+ dd->comp_vect_mappings[i] = -1;
+
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus,
+ available_cpus);
+ if (cpu < 0) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dd->comp_vect_mappings[i] = cpu;
+ hfi1_cdbg(AFFINITY,
+ "[%s] Completion Vector %d -> CPU %d",
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
+ }
+
+ return 0;
+
+fail:
+ free_cpumask_var(available_cpus);
+ free_cpumask_var(non_intr_cpus);
+ _dev_comp_vect_mappings_destroy(dd);
+
+ return ret;
+}
+
+int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd)
+{
+ int ret;
+ struct hfi1_affinity_node *entry;
+
+ mutex_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ if (!entry) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ ret = _dev_comp_vect_mappings_create(dd, entry);
+unlock:
+ mutex_unlock(&node_affinity.lock);
+
+ return ret;
+}
+
+void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd)
+{
+ _dev_comp_vect_mappings_destroy(dd);
+}
+
+int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect)
+{
+ struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
+ struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
+
+ if (!dd->comp_vect_mappings)
+ return -EINVAL;
+ if (comp_vect >= dd->comp_vect_possible_cpus)
+ return -EINVAL;
+
+ return dd->comp_vect_mappings[comp_vect];
+}
+
+/*
+ * It assumes dd->comp_vect_possible_cpus is available.
+ */
+static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry,
+ bool first_dev_init)
+ __must_hold(&node_affinity.lock)
+{
+ int i, j, curr_cpu;
+ int possible_cpus_comp_vect = 0;
+ struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask;
+
+ lockdep_assert_held(&node_affinity.lock);
+ /*
+ * If there's only one CPU available for completion vectors, then
+ * there will only be one completion vector available. Othewise,
+ * the number of completion vector available will be the number of
+ * available CPUs divide it by the number of devices in the
+ * local NUMA node.
+ */
+ if (cpumask_weight(&entry->comp_vect_mask) == 1) {
+ possible_cpus_comp_vect = 1;
+ dd_dev_warn(dd,
+ "Number of kernel receive queues is too large for completion vector affinity to be effective\n");
+ } else {
+ possible_cpus_comp_vect +=
+ cpumask_weight(&entry->comp_vect_mask) /
+ hfi1_per_node_cntr[dd->node];
+
+ /*
+ * If the completion vector CPUs available doesn't divide
+ * evenly among devices, then the first device device to be
+ * initialized gets an extra CPU.
+ */
+ if (first_dev_init &&
+ cpumask_weight(&entry->comp_vect_mask) %
+ hfi1_per_node_cntr[dd->node] != 0)
+ possible_cpus_comp_vect++;
+ }
+
+ dd->comp_vect_possible_cpus = possible_cpus_comp_vect;
+
+ /* Reserving CPUs for device completion vector */
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ curr_cpu = per_cpu_affinity_get(&entry->comp_vect_mask,
+ entry->comp_vect_affinity);
+ if (curr_cpu < 0)
+ goto fail;
+
+ cpumask_set_cpu(curr_cpu, dev_comp_vect_mask);
+ }
+
+ hfi1_cdbg(AFFINITY,
+ "[%s] Completion vector affinity CPU set(s) %*pbl",
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi),
+ cpumask_pr_args(dev_comp_vect_mask));
+
+ return 0;
+
+fail:
+ for (j = 0; j < i; j++)
+ per_cpu_affinity_put_max(&entry->comp_vect_mask,
+ entry->comp_vect_affinity);
+
+ return curr_cpu;
+}
+
+/*
+ * It assumes dd->comp_vect_possible_cpus is available.
+ */
+static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry)
+ __must_hold(&node_affinity.lock)
+{
+ int i, cpu;
+
+ lockdep_assert_held(&node_affinity.lock);
+ if (!dd->comp_vect_possible_cpus)
+ return;
+
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask,
+ entry->comp_vect_affinity);
+ /* Clearing CPU in device completion vector cpu mask */
+ if (cpu >= 0)
+ cpumask_clear_cpu(cpu, &dd->comp_vect->mask);
+ }
+
+ dd->comp_vect_possible_cpus = 0;
+}
+
/*
* Interrupt affinity.
*
@@ -225,7 +619,8 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
int node = pcibus_to_node(dd->pcidev->bus);
struct hfi1_affinity_node *entry;
const struct cpumask *local_mask;
- int curr_cpu, possible, i;
+ int curr_cpu, possible, i, ret;
+ bool new_entry = false;
if (node < 0)
node = numa_node_id();
@@ -247,11 +642,14 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
if (!entry) {
dd_dev_err(dd,
"Unable to allocate global affinity node\n");
- mutex_unlock(&node_affinity.lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
+ new_entry = true;
+
init_cpu_mask_set(&entry->def_intr);
init_cpu_mask_set(&entry->rcv_intr);
+ cpumask_clear(&entry->comp_vect_mask);
cpumask_clear(&entry->general_intr_mask);
/* Use the "real" cpu mask of this node as the default */
cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
@@ -304,10 +702,64 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
&entry->general_intr_mask);
}
- node_affinity_add_tail(entry);
+ /* Determine completion vector CPUs for the entire node */
+ cpumask_and(&entry->comp_vect_mask,
+ &node_affinity.real_cpu_mask, local_mask);
+ cpumask_andnot(&entry->comp_vect_mask,
+ &entry->comp_vect_mask,
+ &entry->rcv_intr.mask);
+ cpumask_andnot(&entry->comp_vect_mask,
+ &entry->comp_vect_mask,
+ &entry->general_intr_mask);
+
+ /*
+ * If there ends up being 0 CPU cores leftover for completion
+ * vectors, use the same CPU core as the general/control
+ * context.
+ */
+ if (cpumask_weight(&entry->comp_vect_mask) == 0)
+ cpumask_copy(&entry->comp_vect_mask,
+ &entry->general_intr_mask);
}
+
+ ret = _dev_comp_vect_cpu_mask_init(dd, entry, new_entry);
+ if (ret < 0)
+ goto fail;
+
+ if (new_entry)
+ node_affinity_add_tail(entry);
+
mutex_unlock(&node_affinity.lock);
+
return 0;
+
+fail:
+ if (new_entry)
+ node_affinity_destroy(entry);
+ mutex_unlock(&node_affinity.lock);
+ return ret;
+}
+
+void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
+{
+ struct hfi1_affinity_node *entry;
+
+ if (dd->node < 0)
+ return;
+
+ mutex_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ if (!entry)
+ goto unlock;
+
+ /*
+ * Free device completion vector CPUs to be used by future
+ * completion vectors
+ */
+ _dev_comp_vect_cpu_mask_clean_up(dd, entry);
+unlock:
+ mutex_unlock(&node_affinity.lock);
+ dd->node = -1;
}
/*
@@ -456,17 +908,12 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
return -ENOMEM;
- if (cpumask_equal(&set->mask, &set->used)) {
- /*
- * We've used up all the CPUs, bump up the generation
- * and reset the 'used' map
- */
- set->gen++;
- cpumask_clear(&set->used);
+ cpu = cpu_mask_set_get_first(set, diff);
+ if (cpu < 0) {
+ free_cpumask_var(diff);
+ dd_dev_err(dd, "Failure to obtain CPU for IRQ\n");
+ return cpu;
}
- cpumask_andnot(diff, &set->mask, &set->used);
- cpu = cpumask_first(diff);
- cpumask_set_cpu(cpu, &set->used);
free_cpumask_var(diff);
}
@@ -526,10 +973,7 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
if (set) {
cpumask_andnot(&set->used, &set->used, &msix->mask);
- if (cpumask_empty(&set->used) && set->gen) {
- set->gen--;
- cpumask_copy(&set->used, &set->mask);
- }
+ _cpu_mask_set_gen_dec(set);
}
irq_set_affinity_hint(msix->irq, NULL);
@@ -640,10 +1084,7 @@ int hfi1_get_proc_affinity(int node)
* If we've used all available HW threads, clear the mask and start
* overloading.
*/
- if (cpumask_equal(&set->mask, &set->used)) {
- set->gen++;
- cpumask_clear(&set->used);
- }
+ _cpu_mask_set_gen_inc(set);
/*
* If NUMA node has CPUs used by interrupt handlers, include them in the
@@ -767,11 +1208,7 @@ void hfi1_put_proc_affinity(int cpu)
return;
mutex_lock(&affinity->lock);
- cpumask_clear_cpu(cpu, &set->used);
+ cpu_mask_set_put(set, cpu);
hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
- if (cpumask_empty(&set->used) && set->gen) {
- set->gen--;
- cpumask_copy(&set->used, &set->mask);
- }
mutex_unlock(&affinity->lock);
}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 2a1e374169c0..6a7e6ea4e426 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -98,9 +98,11 @@ void hfi1_put_proc_affinity(int cpu);
struct hfi1_affinity_node {
int node;
+ u16 __percpu *comp_vect_affinity;
struct cpu_mask_set def_intr;
struct cpu_mask_set rcv_intr;
struct cpumask general_intr_mask;
+ struct cpumask comp_vect_mask;
struct list_head list;
};
@@ -116,7 +118,11 @@ struct hfi1_affinity_node_list {
};
int node_affinity_init(void);
-void node_affinity_destroy(void);
+void node_affinity_destroy_all(void);
extern struct hfi1_affinity_node_list node_affinity;
+void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd);
+int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect);
+int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd);
+void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd);
#endif /* _HFI1_AFFINITY_H */
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e6bdd0c1e80a..6deb101cdd43 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -65,6 +65,7 @@
#include "aspm.h"
#include "affinity.h"
#include "debugfs.h"
+#include "fault.h"
#define NUM_IB_PORTS 1
@@ -1032,8 +1033,8 @@ static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
static void read_vc_remote_link_width(struct hfi1_devdata *dd,
u8 *remote_tx_rate, u16 *link_widths);
-static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
- u8 *flag_bits, u16 *link_widths);
+static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
+ u8 *flag_bits, u16 *link_widths);
static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
u8 *device_rev);
static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
@@ -6355,6 +6356,18 @@ static void handle_8051_request(struct hfi1_pportdata *ppd)
type);
hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
break;
+ case HREQ_LCB_RESET:
+ /* Put the LCB, RX FPE and TX FPE into reset */
+ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
+ /* Make sure the write completed */
+ (void)read_csr(dd, DCC_CFG_RESET);
+ /* Hold the reset long enough to take effect */
+ udelay(1);
+ /* Take the LCB, RX FPE and TX FPE out of reset */
+ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
+ hreq_response(dd, HREQ_SUCCESS, 0);
+
+ break;
case HREQ_CONFIG_DONE:
hreq_response(dd, HREQ_SUCCESS, 0);
break;
@@ -6465,8 +6478,7 @@ static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
reg = read_csr(dd, DCC_CFG_RESET);
write_csr(dd, DCC_CFG_RESET, reg |
- (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
- (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
+ DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
(void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
if (!abort) {
udelay(1); /* must hold for the longer of 16cclks or 20ns */
@@ -6531,7 +6543,7 @@ static void _dc_start(struct hfi1_devdata *dd)
__func__);
/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
- write_csr(dd, DCC_CFG_RESET, 0x10);
+ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
/* lcb_shutdown() with abort=1 does not restore these */
write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
dd->dc_shutdown = 0;
@@ -6829,7 +6841,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
}
rcvmask = HFI1_RCVCTRL_CTXT_ENB;
/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
- rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
+ rcvmask |= rcd->rcvhdrtail_kvaddr ?
HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
hfi1_rcvctrl(dd, rcvmask, rcd);
hfi1_rcd_put(rcd);
@@ -7352,7 +7364,7 @@ static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
u8 misc_bits, local_flags;
u16 active_tx, active_rx;
- read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
+ read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
tx = widths >> 12;
rx = (widths >> 8) & 0xf;
@@ -8355,7 +8367,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
u32 tail;
int present;
- if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
+ if (!rcd->rcvhdrtail_kvaddr)
present = (rcd->seq_cnt ==
rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
else /* is RDMA rtail */
@@ -8824,29 +8836,29 @@ static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
GENERAL_CONFIG, frame);
}
-static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
- u8 *flag_bits, u16 *link_widths)
+static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
+ u8 *flag_bits, u16 *link_widths)
{
u32 frame;
- read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
+ read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
&frame);
*misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
*flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
}
-static int write_vc_local_link_width(struct hfi1_devdata *dd,
- u8 misc_bits,
- u8 flag_bits,
- u16 link_widths)
+static int write_vc_local_link_mode(struct hfi1_devdata *dd,
+ u8 misc_bits,
+ u8 flag_bits,
+ u16 link_widths)
{
u32 frame;
frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
| (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
| (u32)link_widths << LINK_WIDTH_SHIFT;
- return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
+ return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
frame);
}
@@ -9316,8 +9328,16 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
if (loopback == LOOPBACK_SERDES)
misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
- ret = write_vc_local_link_width(dd, misc_bits, 0,
- opa_to_vc_link_widths(
+ /*
+ * An external device configuration request is used to reset the LCB
+ * to retry to obtain operational lanes when the first attempt is
+ * unsuccesful.
+ */
+ if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
+ misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
+
+ ret = write_vc_local_link_mode(dd, misc_bits, 0,
+ opa_to_vc_link_widths(
ppd->link_width_enabled));
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
@@ -10495,9 +10515,9 @@ u32 driver_pstate(struct hfi1_pportdata *ppd)
case HLS_DN_OFFLINE:
return OPA_PORTPHYSSTATE_OFFLINE;
case HLS_VERIFY_CAP:
- return IB_PORTPHYSSTATE_POLLING;
+ return IB_PORTPHYSSTATE_TRAINING;
case HLS_GOING_UP:
- return IB_PORTPHYSSTATE_POLLING;
+ return IB_PORTPHYSSTATE_TRAINING;
case HLS_GOING_OFFLINE:
return OPA_PORTPHYSSTATE_OFFLINE;
case HLS_LINK_COOLDOWN:
@@ -11823,7 +11843,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
/* reset the tail and hdr addresses, and sequence count */
write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
rcd->rcvhdrq_dma);
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
+ if (rcd->rcvhdrtail_kvaddr)
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
rcd->rcvhdrqtailaddr_dma);
rcd->seq_cnt = 1;
@@ -11903,7 +11923,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
- if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
+ if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
/* See comment on RcvCtxtCtrl.TailUpd above */
@@ -14620,7 +14640,9 @@ static void init_rxe(struct hfi1_devdata *dd)
/* Have 16 bytes (4DW) of bypass header available in header queue */
val = read_csr(dd, RCV_BYPASS);
- val |= (4ull << 16);
+ val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
+ val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
+ RCV_BYPASS_HDR_SIZE_SHIFT);
write_csr(dd, RCV_BYPASS, val);
}
@@ -15022,13 +15044,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret < 0)
goto bail_cleanup;
- /* verify that reads actually work, save revision for reset check */
- dd->revision = read_csr(dd, CCE_REVISION);
- if (dd->revision == ~(u64)0) {
- dd_dev_err(dd, "cannot read chip CSRs\n");
- ret = -EINVAL;
- goto bail_cleanup;
- }
dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
& CCE_REVISION_CHIP_REV_MAJOR_MASK;
dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
@@ -15224,6 +15239,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_cleanup;
+ ret = hfi1_comp_vectors_set_up(dd);
+ if (ret)
+ goto bail_clear_intr;
+
/* set up LCB access - must be after set_up_interrupts() */
init_lcb_access(dd);
@@ -15266,6 +15285,7 @@ bail_free_rcverr:
bail_free_cntrs:
free_cntrs(dd);
bail_clear_intr:
+ hfi1_comp_vectors_clean_up(dd);
hfi1_clean_up_interrupts(dd);
bail_cleanup:
hfi1_pcie_ddcleanup(dd);
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index c0d70f255050..fdf389e46e19 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -196,6 +196,15 @@
#define LSTATE_ARMED 0x3
#define LSTATE_ACTIVE 0x4
+/* DCC_CFG_RESET reset states */
+#define LCB_RX_FPE_TX_FPE_INTO_RESET (DCC_CFG_RESET_RESET_LCB | \
+ DCC_CFG_RESET_RESET_TX_FPE | \
+ DCC_CFG_RESET_RESET_RX_FPE | \
+ DCC_CFG_RESET_ENABLE_CCLK_BCC)
+ /* 0x17 */
+
+#define LCB_RX_FPE_TX_FPE_OUT_OF_RESET DCC_CFG_RESET_ENABLE_CCLK_BCC /* 0x10 */
+
/* DC8051_STS_CUR_STATE port values (physical link states) */
#define PLS_DISABLED 0x30
#define PLS_OFFLINE 0x90
@@ -283,6 +292,7 @@
#define HREQ_SET_TX_EQ_ABS 0x04
#define HREQ_SET_TX_EQ_REL 0x05
#define HREQ_ENABLE 0x06
+#define HREQ_LCB_RESET 0x07
#define HREQ_CONFIG_DONE 0xfe
#define HREQ_INTERFACE_TEST 0xff
@@ -383,7 +393,7 @@
#define TX_SETTINGS 0x06
#define VERIFY_CAP_LOCAL_PHY 0x07
#define VERIFY_CAP_LOCAL_FABRIC 0x08
-#define VERIFY_CAP_LOCAL_LINK_WIDTH 0x09
+#define VERIFY_CAP_LOCAL_LINK_MODE 0x09
#define LOCAL_DEVICE_ID 0x0a
#define RESERVED_REGISTERS 0x0b
#define LOCAL_LNI_INFO 0x0c
@@ -584,8 +594,9 @@ enum {
#define LOOPBACK_LCB 2
#define LOOPBACK_CABLE 3 /* external cable */
-/* set up serdes bit in MISC_CONFIG_BITS */
+/* set up bits in MISC_CONFIG_BITS */
#define LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT 0
+#define EXT_CFG_LCB_RESET_SUPPORTED_SHIFT 3
/* read and write hardware registers */
u64 read_csr(const struct hfi1_devdata *dd, u32 offset);
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 793514f1d15f..ee6dca5e2a2f 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -97,8 +97,11 @@
#define DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT 32
#define DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK 0x700000000ull
#define DCC_CFG_RESET (DCC_CSRS + 0x000000000000)
-#define DCC_CFG_RESET_RESET_LCB_SHIFT 0
-#define DCC_CFG_RESET_RESET_RX_FPE_SHIFT 2
+#define DCC_CFG_RESET_RESET_LCB BIT_ULL(0)
+#define DCC_CFG_RESET_RESET_TX_FPE BIT_ULL(1)
+#define DCC_CFG_RESET_RESET_RX_FPE BIT_ULL(2)
+#define DCC_CFG_RESET_RESET_8051 BIT_ULL(3)
+#define DCC_CFG_RESET_ENABLE_CCLK_BCC BIT_ULL(4)
#define DCC_CFG_SC_VL_TABLE_15_0 (DCC_CSRS + 0x000000000028)
#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY0_SHIFT 0
#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY10_SHIFT 40
@@ -635,6 +638,12 @@
#define RCV_BTH_QP_KDETH_QP_MASK 0xFFull
#define RCV_BTH_QP_KDETH_QP_SHIFT 16
#define RCV_BYPASS (RXE + 0x000000000038)
+#define RCV_BYPASS_HDR_SIZE_SHIFT 16
+#define RCV_BYPASS_HDR_SIZE_MASK 0x1Full
+#define RCV_BYPASS_HDR_SIZE_SMASK 0x1F0000ull
+#define RCV_BYPASS_BYPASS_CONTEXT_SHIFT 0
+#define RCV_BYPASS_BYPASS_CONTEXT_MASK 0xFFull
+#define RCV_BYPASS_BYPASS_CONTEXT_SMASK 0xFFull
#define RCV_CONTEXTS (RXE + 0x000000000010)
#define RCV_COUNTER_ARRAY32 (RXE + 0x000000000400)
#define RCV_COUNTER_ARRAY64 (RXE + 0x000000000500)
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 852173bf05d0..9f992ae36c89 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -60,15 +60,13 @@
#include "device.h"
#include "qp.h"
#include "sdma.h"
+#include "fault.h"
static struct dentry *hfi1_dbg_root;
/* wrappers to enforce srcu in seq file */
-static ssize_t hfi1_seq_read(
- struct file *file,
- char __user *buf,
- size_t size,
- loff_t *ppos)
+ssize_t hfi1_seq_read(struct file *file, char __user *buf, size_t size,
+ loff_t *ppos)
{
struct dentry *d = file->f_path.dentry;
ssize_t r;
@@ -81,10 +79,7 @@ static ssize_t hfi1_seq_read(
return r;
}
-static loff_t hfi1_seq_lseek(
- struct file *file,
- loff_t offset,
- int whence)
+loff_t hfi1_seq_lseek(struct file *file, loff_t offset, int whence)
{
struct dentry *d = file->f_path.dentry;
loff_t r;
@@ -100,48 +95,6 @@ static loff_t hfi1_seq_lseek(
#define private2dd(file) (file_inode(file)->i_private)
#define private2ppd(file) (file_inode(file)->i_private)
-#define DEBUGFS_SEQ_FILE_OPS(name) \
-static const struct seq_operations _##name##_seq_ops = { \
- .start = _##name##_seq_start, \
- .next = _##name##_seq_next, \
- .stop = _##name##_seq_stop, \
- .show = _##name##_seq_show \
-}
-
-#define DEBUGFS_SEQ_FILE_OPEN(name) \
-static int _##name##_open(struct inode *inode, struct file *s) \
-{ \
- struct seq_file *seq; \
- int ret; \
- ret = seq_open(s, &_##name##_seq_ops); \
- if (ret) \
- return ret; \
- seq = s->private_data; \
- seq->private = inode->i_private; \
- return 0; \
-}
-
-#define DEBUGFS_FILE_OPS(name) \
-static const struct file_operations _##name##_file_ops = { \
- .owner = THIS_MODULE, \
- .open = _##name##_open, \
- .read = hfi1_seq_read, \
- .llseek = hfi1_seq_lseek, \
- .release = seq_release \
-}
-
-#define DEBUGFS_FILE_CREATE(name, parent, data, ops, mode) \
-do { \
- struct dentry *ent; \
- ent = debugfs_create_file(name, mode, parent, \
- data, ops); \
- if (!ent) \
- pr_warn("create of %s failed\n", name); \
-} while (0)
-
-#define DEBUGFS_SEQ_FILE_CREATE(name, parent, data) \
- DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
-
static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
{
struct hfi1_opcode_stats_perctx *opstats;
@@ -1160,232 +1113,6 @@ DEBUGFS_SEQ_FILE_OPS(sdma_cpu_list);
DEBUGFS_SEQ_FILE_OPEN(sdma_cpu_list)
DEBUGFS_FILE_OPS(sdma_cpu_list);
-#ifdef CONFIG_FAULT_INJECTION
-static void *_fault_stats_seq_start(struct seq_file *s, loff_t *pos)
-{
- struct hfi1_opcode_stats_perctx *opstats;
-
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-static void *_fault_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct hfi1_opcode_stats_perctx *opstats;
-
- ++*pos;
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-static void _fault_stats_seq_stop(struct seq_file *s, void *v)
-{
-}
-
-static int _fault_stats_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos = v;
- loff_t i = *spos, j;
- u64 n_packets = 0, n_bytes = 0;
- struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
- struct hfi1_devdata *dd = dd_from_dev(ibd);
- struct hfi1_ctxtdata *rcd;
-
- for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
- rcd = hfi1_rcd_get_by_index(dd, j);
- if (rcd) {
- n_packets += rcd->opstats->stats[i].n_packets;
- n_bytes += rcd->opstats->stats[i].n_bytes;
- }
- hfi1_rcd_put(rcd);
- }
- for_each_possible_cpu(j) {
- struct hfi1_opcode_stats_perctx *sp =
- per_cpu_ptr(dd->tx_opstats, j);
-
- n_packets += sp->stats[i].n_packets;
- n_bytes += sp->stats[i].n_bytes;
- }
- if (!n_packets && !n_bytes)
- return SEQ_SKIP;
- if (!ibd->fault_opcode->n_rxfaults[i] &&
- !ibd->fault_opcode->n_txfaults[i])
- return SEQ_SKIP;
- seq_printf(s, "%02llx %llu/%llu (faults rx:%llu faults: tx:%llu)\n", i,
- (unsigned long long)n_packets,
- (unsigned long long)n_bytes,
- (unsigned long long)ibd->fault_opcode->n_rxfaults[i],
- (unsigned long long)ibd->fault_opcode->n_txfaults[i]);
- return 0;
-}
-
-DEBUGFS_SEQ_FILE_OPS(fault_stats);
-DEBUGFS_SEQ_FILE_OPEN(fault_stats);
-DEBUGFS_FILE_OPS(fault_stats);
-
-static void fault_exit_opcode_debugfs(struct hfi1_ibdev *ibd)
-{
- debugfs_remove_recursive(ibd->fault_opcode->dir);
- kfree(ibd->fault_opcode);
- ibd->fault_opcode = NULL;
-}
-
-static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd)
-{
- struct dentry *parent = ibd->hfi1_ibdev_dbg;
-
- ibd->fault_opcode = kzalloc(sizeof(*ibd->fault_opcode), GFP_KERNEL);
- if (!ibd->fault_opcode)
- return -ENOMEM;
-
- ibd->fault_opcode->attr.interval = 1;
- ibd->fault_opcode->attr.require_end = ULONG_MAX;
- ibd->fault_opcode->attr.stacktrace_depth = 32;
- ibd->fault_opcode->attr.dname = NULL;
- ibd->fault_opcode->attr.verbose = 0;
- ibd->fault_opcode->fault_by_opcode = false;
- ibd->fault_opcode->opcode = 0;
- ibd->fault_opcode->mask = 0xff;
-
- ibd->fault_opcode->dir =
- fault_create_debugfs_attr("fault_opcode",
- parent,
- &ibd->fault_opcode->attr);
- if (IS_ERR(ibd->fault_opcode->dir)) {
- kfree(ibd->fault_opcode);
- return -ENOENT;
- }
-
- DEBUGFS_SEQ_FILE_CREATE(fault_stats, ibd->fault_opcode->dir, ibd);
- if (!debugfs_create_bool("fault_by_opcode", 0600,
- ibd->fault_opcode->dir,
- &ibd->fault_opcode->fault_by_opcode))
- goto fail;
- if (!debugfs_create_x8("opcode", 0600, ibd->fault_opcode->dir,
- &ibd->fault_opcode->opcode))
- goto fail;
- if (!debugfs_create_x8("mask", 0600, ibd->fault_opcode->dir,
- &ibd->fault_opcode->mask))
- goto fail;
-
- return 0;
-fail:
- fault_exit_opcode_debugfs(ibd);
- return -ENOMEM;
-}
-
-static void fault_exit_packet_debugfs(struct hfi1_ibdev *ibd)
-{
- debugfs_remove_recursive(ibd->fault_packet->dir);
- kfree(ibd->fault_packet);
- ibd->fault_packet = NULL;
-}
-
-static int fault_init_packet_debugfs(struct hfi1_ibdev *ibd)
-{
- struct dentry *parent = ibd->hfi1_ibdev_dbg;
-
- ibd->fault_packet = kzalloc(sizeof(*ibd->fault_packet), GFP_KERNEL);
- if (!ibd->fault_packet)
- return -ENOMEM;
-
- ibd->fault_packet->attr.interval = 1;
- ibd->fault_packet->attr.require_end = ULONG_MAX;
- ibd->fault_packet->attr.stacktrace_depth = 32;
- ibd->fault_packet->attr.dname = NULL;
- ibd->fault_packet->attr.verbose = 0;
- ibd->fault_packet->fault_by_packet = false;
-
- ibd->fault_packet->dir =
- fault_create_debugfs_attr("fault_packet",
- parent,
- &ibd->fault_opcode->attr);
- if (IS_ERR(ibd->fault_packet->dir)) {
- kfree(ibd->fault_packet);
- return -ENOENT;
- }
-
- if (!debugfs_create_bool("fault_by_packet", 0600,
- ibd->fault_packet->dir,
- &ibd->fault_packet->fault_by_packet))
- goto fail;
- if (!debugfs_create_u64("fault_stats", 0400,
- ibd->fault_packet->dir,
- &ibd->fault_packet->n_faults))
- goto fail;
-
- return 0;
-fail:
- fault_exit_packet_debugfs(ibd);
- return -ENOMEM;
-}
-
-static void fault_exit_debugfs(struct hfi1_ibdev *ibd)
-{
- fault_exit_opcode_debugfs(ibd);
- fault_exit_packet_debugfs(ibd);
-}
-
-static int fault_init_debugfs(struct hfi1_ibdev *ibd)
-{
- int ret = 0;
-
- ret = fault_init_opcode_debugfs(ibd);
- if (ret)
- return ret;
-
- ret = fault_init_packet_debugfs(ibd);
- if (ret)
- fault_exit_opcode_debugfs(ibd);
-
- return ret;
-}
-
-bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
-{
- return ibd->fault_suppress_err;
-}
-
-bool hfi1_dbg_fault_opcode(struct rvt_qp *qp, u32 opcode, bool rx)
-{
- bool ret = false;
- struct hfi1_ibdev *ibd = to_idev(qp->ibqp.device);
-
- if (!ibd->fault_opcode || !ibd->fault_opcode->fault_by_opcode)
- return false;
- if (ibd->fault_opcode->opcode != (opcode & ibd->fault_opcode->mask))
- return false;
- ret = should_fail(&ibd->fault_opcode->attr, 1);
- if (ret) {
- trace_hfi1_fault_opcode(qp, opcode);
- if (rx)
- ibd->fault_opcode->n_rxfaults[opcode]++;
- else
- ibd->fault_opcode->n_txfaults[opcode]++;
- }
- return ret;
-}
-
-bool hfi1_dbg_fault_packet(struct hfi1_packet *packet)
-{
- struct rvt_dev_info *rdi = &packet->rcd->ppd->dd->verbs_dev.rdi;
- struct hfi1_ibdev *ibd = dev_from_rdi(rdi);
- bool ret = false;
-
- if (!ibd->fault_packet || !ibd->fault_packet->fault_by_packet)
- return false;
-
- ret = should_fail(&ibd->fault_packet->attr, 1);
- if (ret) {
- ++ibd->fault_packet->n_faults;
- trace_hfi1_fault_packet(packet);
- }
- return ret;
-}
-#endif
-
void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
{
char name[sizeof("port0counters") + 1];
@@ -1438,21 +1165,14 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
S_IRUGO : S_IRUGO | S_IWUSR);
}
-#ifdef CONFIG_FAULT_INJECTION
- debugfs_create_bool("fault_suppress_err", 0600,
- ibd->hfi1_ibdev_dbg,
- &ibd->fault_suppress_err);
- fault_init_debugfs(ibd);
-#endif
+ hfi1_fault_init_debugfs(ibd);
}
void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
{
if (!hfi1_dbg_root)
goto out;
-#ifdef CONFIG_FAULT_INJECTION
- fault_exit_debugfs(ibd);
-#endif
+ hfi1_fault_exit_debugfs(ibd);
debugfs_remove(ibd->hfi1_ibdev_link);
debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
out:
diff --git a/drivers/infiniband/hw/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h
index 38c38a98156d..d5d824459fcc 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.h
+++ b/drivers/infiniband/hw/hfi1/debugfs.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_DEBUGFS_H
#define _HFI1_DEBUGFS_H
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015, 2016, 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -48,51 +48,59 @@
*/
struct hfi1_ibdev;
-#ifdef CONFIG_DEBUG_FS
-void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd);
-void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd);
-void hfi1_dbg_init(void);
-void hfi1_dbg_exit(void);
-
-#ifdef CONFIG_FAULT_INJECTION
-#include <linux/fault-inject.h>
-struct fault_opcode {
- struct fault_attr attr;
- struct dentry *dir;
- bool fault_by_opcode;
- u64 n_rxfaults[256];
- u64 n_txfaults[256];
- u8 opcode;
- u8 mask;
-};
-struct fault_packet {
- struct fault_attr attr;
- struct dentry *dir;
- bool fault_by_packet;
- u64 n_faults;
-};
+#define DEBUGFS_FILE_CREATE(name, parent, data, ops, mode) \
+do { \
+ struct dentry *ent; \
+ const char *__name = name; \
+ ent = debugfs_create_file(__name, mode, parent, \
+ data, ops); \
+ if (!ent) \
+ pr_warn("create of %s failed\n", __name); \
+} while (0)
-bool hfi1_dbg_fault_opcode(struct rvt_qp *qp, u32 opcode, bool rx);
-bool hfi1_dbg_fault_packet(struct hfi1_packet *packet);
-bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd);
-#else
-static inline bool hfi1_dbg_fault_packet(struct hfi1_packet *packet)
-{
- return false;
+#define DEBUGFS_SEQ_FILE_OPS(name) \
+static const struct seq_operations _##name##_seq_ops = { \
+ .start = _##name##_seq_start, \
+ .next = _##name##_seq_next, \
+ .stop = _##name##_seq_stop, \
+ .show = _##name##_seq_show \
}
-static inline bool hfi1_dbg_fault_opcode(struct rvt_qp *qp,
- u32 opcode, bool rx)
-{
- return false;
+#define DEBUGFS_SEQ_FILE_OPEN(name) \
+static int _##name##_open(struct inode *inode, struct file *s) \
+{ \
+ struct seq_file *seq; \
+ int ret; \
+ ret = seq_open(s, &_##name##_seq_ops); \
+ if (ret) \
+ return ret; \
+ seq = s->private_data; \
+ seq->private = inode->i_private; \
+ return 0; \
}
-static inline bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
-{
- return false;
+#define DEBUGFS_FILE_OPS(name) \
+static const struct file_operations _##name##_file_ops = { \
+ .owner = THIS_MODULE, \
+ .open = _##name##_open, \
+ .read = hfi1_seq_read, \
+ .llseek = hfi1_seq_lseek, \
+ .release = seq_release \
}
-#endif
+
+#define DEBUGFS_SEQ_FILE_CREATE(name, parent, data) \
+ DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, 0444)
+
+ssize_t hfi1_seq_read(struct file *file, char __user *buf, size_t size,
+ loff_t *ppos);
+loff_t hfi1_seq_lseek(struct file *file, loff_t offset, int whence);
+
+#ifdef CONFIG_DEBUG_FS
+void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd);
+void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd);
+void hfi1_dbg_init(void);
+void hfi1_dbg_exit(void);
#else
static inline void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
@@ -110,22 +118,6 @@ static inline void hfi1_dbg_init(void)
static inline void hfi1_dbg_exit(void)
{
}
-
-static inline bool hfi1_dbg_fault_packet(struct hfi1_packet *packet)
-{
- return false;
-}
-
-static inline bool hfi1_dbg_fault_opcode(struct rvt_qp *qp,
- u32 opcode, bool rx)
-{
- return false;
-}
-
-static inline bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
-{
- return false;
-}
#endif
#endif /* _HFI1_DEBUGFS_H */
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index bd837a048bf4..94dca95db04f 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -61,6 +61,7 @@
#include "sdma.h"
#include "debugfs.h"
#include "vnic.h"
+#include "fault.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -1482,38 +1483,51 @@ static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
struct hfi1_pportdata *ppd = rcd->ppd;
struct hfi1_ibport *ibp = &ppd->ibport_data;
u8 l4;
- u8 grh_len;
packet->hdr = (struct hfi1_16b_header *)
hfi1_get_16B_header(packet->rcd->dd,
packet->rhf_addr);
- packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
-
l4 = hfi1_16B_get_l4(packet->hdr);
if (l4 == OPA_16B_L4_IB_LOCAL) {
- grh_len = 0;
packet->ohdr = packet->ebuf;
packet->grh = NULL;
+ packet->opcode = ib_bth_get_opcode(packet->ohdr);
+ packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
+ /* hdr_len_by_opcode already has an IB LRH factored in */
+ packet->hlen = hdr_len_by_opcode[packet->opcode] +
+ (LRH_16B_BYTES - LRH_9B_BYTES);
+ packet->migrated = opa_bth_is_migration(packet->ohdr);
} else if (l4 == OPA_16B_L4_IB_GLOBAL) {
u32 vtf;
+ u8 grh_len = sizeof(struct ib_grh);
- grh_len = sizeof(struct ib_grh);
packet->ohdr = packet->ebuf + grh_len;
packet->grh = packet->ebuf;
+ packet->opcode = ib_bth_get_opcode(packet->ohdr);
+ packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
+ /* hdr_len_by_opcode already has an IB LRH factored in */
+ packet->hlen = hdr_len_by_opcode[packet->opcode] +
+ (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len;
+ packet->migrated = opa_bth_is_migration(packet->ohdr);
+
if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
goto drop;
vtf = be32_to_cpu(packet->grh->version_tclass_flow);
if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
goto drop;
+ } else if (l4 == OPA_16B_L4_FM) {
+ packet->mgmt = packet->ebuf;
+ packet->ohdr = NULL;
+ packet->grh = NULL;
+ packet->opcode = IB_OPCODE_UD_SEND_ONLY;
+ packet->pad = OPA_16B_L4_FM_PAD;
+ packet->hlen = OPA_16B_L4_FM_HLEN;
+ packet->migrated = false;
} else {
goto drop;
}
/* Query commonly used fields from packet header */
- packet->opcode = ib_bth_get_opcode(packet->ohdr);
- /* hdr_len_by_opcode already has an IB LRH factored in */
- packet->hlen = hdr_len_by_opcode[packet->opcode] +
- (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len;
packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES;
packet->slid = hfi1_16B_get_slid(packet->hdr);
packet->dlid = hfi1_16B_get_dlid(packet->hdr);
@@ -1523,10 +1537,8 @@ static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
16B);
packet->sc = hfi1_16B_get_sc(packet->hdr);
packet->sl = ibp->sc_to_sl[packet->sc];
- packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
packet->extra_byte = SIZE_OF_LT;
packet->pkey = hfi1_16B_get_pkey(packet->hdr);
- packet->migrated = opa_bth_is_migration(packet->ohdr);
if (hfi1_bypass_ingress_pkt_check(packet))
goto drop;
@@ -1565,10 +1577,10 @@ void handle_eflags(struct hfi1_packet *packet)
*/
int process_receive_ib(struct hfi1_packet *packet)
{
- if (unlikely(hfi1_dbg_fault_packet(packet)))
+ if (hfi1_setup_9B_packet(packet))
return RHF_RCV_CONTINUE;
- if (hfi1_setup_9B_packet(packet))
+ if (unlikely(hfi1_dbg_should_fault_rx(packet)))
return RHF_RCV_CONTINUE;
trace_hfi1_rcvhdr(packet);
@@ -1642,7 +1654,8 @@ int process_receive_error(struct hfi1_packet *packet)
/* KHdrHCRCErr -- KDETH packet with a bad HCRC */
if (unlikely(
hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
- rhf_rcv_type_err(packet->rhf) == 3))
+ (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR ||
+ packet->rhf & RHF_DC_ERR)))
return RHF_RCV_CONTINUE;
hfi1_setup_ib_header(packet);
@@ -1657,10 +1670,10 @@ int process_receive_error(struct hfi1_packet *packet)
int kdeth_process_expected(struct hfi1_packet *packet)
{
- if (unlikely(hfi1_dbg_fault_packet(packet)))
+ hfi1_setup_9B_packet(packet);
+ if (unlikely(hfi1_dbg_should_fault_rx(packet)))
return RHF_RCV_CONTINUE;
- hfi1_setup_ib_header(packet);
if (unlikely(rhf_err_flags(packet->rhf)))
handle_eflags(packet);
@@ -1671,11 +1684,11 @@ int kdeth_process_expected(struct hfi1_packet *packet)
int kdeth_process_eager(struct hfi1_packet *packet)
{
- hfi1_setup_ib_header(packet);
+ hfi1_setup_9B_packet(packet);
+ if (unlikely(hfi1_dbg_should_fault_rx(packet)))
+ return RHF_RCV_CONTINUE;
if (unlikely(rhf_err_flags(packet->rhf)))
handle_eflags(packet);
- if (unlikely(hfi1_dbg_fault_packet(packet)))
- return RHF_RCV_CONTINUE;
dd_dev_err(packet->rcd->dd,
"Unhandled eager packet received. Dropping.\n");
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c
index 0af91675acc6..1be49a0d9c11 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
@@ -52,13 +52,24 @@
* exp_tid_group_init - initialize exp_tid_set
* @set - the set
*/
-void hfi1_exp_tid_group_init(struct exp_tid_set *set)
+static void hfi1_exp_tid_set_init(struct exp_tid_set *set)
{
INIT_LIST_HEAD(&set->list);
set->count = 0;
}
/**
+ * hfi1_exp_tid_group_init - initialize rcd expected receive
+ * @rcd - the rcd
+ */
+void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd)
+{
+ hfi1_exp_tid_set_init(&rcd->tid_group_list);
+ hfi1_exp_tid_set_init(&rcd->tid_used_list);
+ hfi1_exp_tid_set_init(&rcd->tid_full_list);
+}
+
+/**
* alloc_ctxt_rcv_groups - initialize expected receive groups
* @rcd - the context to add the groupings to
*/
@@ -68,13 +79,17 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
u32 tidbase;
struct tid_group *grp;
int i;
+ u32 ngroups;
+ ngroups = rcd->expected_count / dd->rcv_entries.group_size;
+ rcd->groups =
+ kcalloc_node(ngroups, sizeof(*rcd->groups),
+ GFP_KERNEL, rcd->numa_id);
+ if (!rcd->groups)
+ return -ENOMEM;
tidbase = rcd->expected_base;
- for (i = 0; i < rcd->expected_count /
- dd->rcv_entries.group_size; i++) {
- grp = kzalloc(sizeof(*grp), GFP_KERNEL);
- if (!grp)
- goto bail;
+ for (i = 0; i < ngroups; i++) {
+ grp = &rcd->groups[i];
grp->size = dd->rcv_entries.group_size;
grp->base = tidbase;
tid_group_add_tail(grp, &rcd->tid_group_list);
@@ -82,9 +97,6 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
}
return 0;
-bail:
- hfi1_free_ctxt_rcv_groups(rcd);
- return -ENOMEM;
}
/**
@@ -100,15 +112,12 @@ bail:
*/
void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
{
- struct tid_group *grp, *gptr;
-
WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_full_list));
WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_used_list));
- list_for_each_entry_safe(grp, gptr, &rcd->tid_group_list.list, list) {
- tid_group_remove(grp, &rcd->tid_group_list);
- kfree(grp);
- }
+ kfree(rcd->groups);
+ rcd->groups = NULL;
+ hfi1_exp_tid_group_init(rcd);
hfi1_clear_tids(rcd);
}
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.h b/drivers/infiniband/hw/hfi1/exp_rcv.h
index 08719047628a..f25362015095 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.h
@@ -183,8 +183,30 @@ static inline u32 rcventry2tidinfo(u32 rcventry)
EXP_TID_SET(CTRL, 1 << (rcventry - pair));
}
+/**
+ * hfi1_tid_group_to_idx - convert an index to a group
+ * @rcd - the receive context
+ * @grp - the group pointer
+ */
+static inline u16
+hfi1_tid_group_to_idx(struct hfi1_ctxtdata *rcd, struct tid_group *grp)
+{
+ return grp - &rcd->groups[0];
+}
+
+/**
+ * hfi1_idx_to_tid_group - convert a group to an index
+ * @rcd - the receive context
+ * @idx - the index
+ */
+static inline struct tid_group *
+hfi1_idx_to_tid_group(struct hfi1_ctxtdata *rcd, u16 idx)
+{
+ return &rcd->groups[idx];
+}
+
int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
-void hfi1_exp_tid_group_init(struct exp_tid_set *set);
+void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd);
#endif /* _HFI1_EXP_RCV_H */
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
new file mode 100644
index 000000000000..e2290f32c8d9
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitmap.h>
+
+#include "debugfs.h"
+#include "fault.h"
+#include "trace.h"
+
+#define HFI1_FAULT_DIR_TX BIT(0)
+#define HFI1_FAULT_DIR_RX BIT(1)
+#define HFI1_FAULT_DIR_TXRX (HFI1_FAULT_DIR_TX | HFI1_FAULT_DIR_RX)
+
+static void *_fault_stats_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct hfi1_opcode_stats_perctx *opstats;
+
+ if (*pos >= ARRAY_SIZE(opstats->stats))
+ return NULL;
+ return pos;
+}
+
+static void *_fault_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_opcode_stats_perctx *opstats;
+
+ ++*pos;
+ if (*pos >= ARRAY_SIZE(opstats->stats))
+ return NULL;
+ return pos;
+}
+
+static void _fault_stats_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _fault_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+ loff_t i = *spos, j;
+ u64 n_packets = 0, n_bytes = 0;
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_ctxtdata *rcd;
+
+ for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
+ rcd = hfi1_rcd_get_by_index(dd, j);
+ if (rcd) {
+ n_packets += rcd->opstats->stats[i].n_packets;
+ n_bytes += rcd->opstats->stats[i].n_bytes;
+ }
+ hfi1_rcd_put(rcd);
+ }
+ for_each_possible_cpu(j) {
+ struct hfi1_opcode_stats_perctx *sp =
+ per_cpu_ptr(dd->tx_opstats, j);
+
+ n_packets += sp->stats[i].n_packets;
+ n_bytes += sp->stats[i].n_bytes;
+ }
+ if (!n_packets && !n_bytes)
+ return SEQ_SKIP;
+ if (!ibd->fault->n_rxfaults[i] && !ibd->fault->n_txfaults[i])
+ return SEQ_SKIP;
+ seq_printf(s, "%02llx %llu/%llu (faults rx:%llu faults: tx:%llu)\n", i,
+ (unsigned long long)n_packets,
+ (unsigned long long)n_bytes,
+ (unsigned long long)ibd->fault->n_rxfaults[i],
+ (unsigned long long)ibd->fault->n_txfaults[i]);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(fault_stats);
+DEBUGFS_SEQ_FILE_OPEN(fault_stats);
+DEBUGFS_FILE_OPS(fault_stats);
+
+static int fault_opcodes_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ ssize_t ret = 0;
+ /* 1280 = 256 opcodes * 4 chars/opcode + 255 commas + NULL */
+ size_t copy, datalen = 1280;
+ char *data, *token, *ptr, *end;
+ struct fault *fault = file->private_data;
+
+ data = kcalloc(datalen, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ copy = min(len, datalen - 1);
+ if (copy_from_user(data, buf, copy))
+ return -EFAULT;
+
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (unlikely(ret))
+ return ret;
+ ptr = data;
+ token = ptr;
+ for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
+ char *dash;
+ unsigned long range_start, range_end, i;
+ bool remove = false;
+
+ end = strchr(ptr, ',');
+ if (end)
+ *end = '\0';
+ if (token[0] == '-') {
+ remove = true;
+ token++;
+ }
+ dash = strchr(token, '-');
+ if (dash)
+ *dash = '\0';
+ if (kstrtoul(token, 0, &range_start))
+ break;
+ if (dash) {
+ token = dash + 1;
+ if (kstrtoul(token, 0, &range_end))
+ break;
+ } else {
+ range_end = range_start;
+ }
+ if (range_start == range_end && range_start == -1UL) {
+ bitmap_zero(fault->opcodes, sizeof(fault->opcodes) *
+ BITS_PER_BYTE);
+ break;
+ }
+ for (i = range_start; i <= range_end; i++) {
+ if (remove)
+ clear_bit(i, fault->opcodes);
+ else
+ set_bit(i, fault->opcodes);
+ }
+ if (!end)
+ break;
+ }
+ ret = len;
+
+ debugfs_file_put(file->f_path.dentry);
+ kfree(data);
+ return ret;
+}
+
+static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ ssize_t ret = 0;
+ char *data;
+ size_t datalen = 1280, size = 0; /* see fault_opcodes_write() */
+ unsigned long bit = 0, zero = 0;
+ struct fault *fault = file->private_data;
+ size_t bitsize = sizeof(fault->opcodes) * BITS_PER_BYTE;
+
+ data = kcalloc(datalen, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (unlikely(ret))
+ return ret;
+ bit = find_first_bit(fault->opcodes, bitsize);
+ while (bit < bitsize) {
+ zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
+ if (zero - 1 != bit)
+ size += snprintf(data + size,
+ datalen - size - 1,
+ "0x%lx-0x%lx,", bit, zero - 1);
+ else
+ size += snprintf(data + size,
+ datalen - size - 1, "0x%lx,",
+ bit);
+ bit = find_next_bit(fault->opcodes, bitsize, zero);
+ }
+ debugfs_file_put(file->f_path.dentry);
+ data[size - 1] = '\n';
+ data[size] = '\0';
+ ret = simple_read_from_buffer(buf, len, pos, data, size);
+ kfree(data);
+ return ret;
+}
+
+static const struct file_operations __fault_opcodes_fops = {
+ .owner = THIS_MODULE,
+ .open = fault_opcodes_open,
+ .read = fault_opcodes_read,
+ .write = fault_opcodes_write,
+ .llseek = no_llseek
+};
+
+void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd)
+{
+ if (ibd->fault)
+ debugfs_remove_recursive(ibd->fault->dir);
+ kfree(ibd->fault);
+ ibd->fault = NULL;
+}
+
+int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd)
+{
+ struct dentry *parent = ibd->hfi1_ibdev_dbg;
+
+ ibd->fault = kzalloc(sizeof(*ibd->fault), GFP_KERNEL);
+ if (!ibd->fault)
+ return -ENOMEM;
+
+ ibd->fault->attr.interval = 1;
+ ibd->fault->attr.require_end = ULONG_MAX;
+ ibd->fault->attr.stacktrace_depth = 32;
+ ibd->fault->attr.dname = NULL;
+ ibd->fault->attr.verbose = 0;
+ ibd->fault->enable = false;
+ ibd->fault->opcode = false;
+ ibd->fault->fault_skip = 0;
+ ibd->fault->skip = 0;
+ ibd->fault->direction = HFI1_FAULT_DIR_TXRX;
+ ibd->fault->suppress_err = false;
+ bitmap_zero(ibd->fault->opcodes,
+ sizeof(ibd->fault->opcodes) * BITS_PER_BYTE);
+
+ ibd->fault->dir =
+ fault_create_debugfs_attr("fault", parent,
+ &ibd->fault->attr);
+ if (IS_ERR(ibd->fault->dir)) {
+ kfree(ibd->fault);
+ ibd->fault = NULL;
+ return -ENOENT;
+ }
+
+ DEBUGFS_SEQ_FILE_CREATE(fault_stats, ibd->fault->dir, ibd);
+ if (!debugfs_create_bool("enable", 0600, ibd->fault->dir,
+ &ibd->fault->enable))
+ goto fail;
+ if (!debugfs_create_bool("suppress_err", 0600,
+ ibd->fault->dir,
+ &ibd->fault->suppress_err))
+ goto fail;
+ if (!debugfs_create_bool("opcode_mode", 0600, ibd->fault->dir,
+ &ibd->fault->opcode))
+ goto fail;
+ if (!debugfs_create_file("opcodes", 0600, ibd->fault->dir,
+ ibd->fault, &__fault_opcodes_fops))
+ goto fail;
+ if (!debugfs_create_u64("skip_pkts", 0600,
+ ibd->fault->dir,
+ &ibd->fault->fault_skip))
+ goto fail;
+ if (!debugfs_create_u64("skip_usec", 0600,
+ ibd->fault->dir,
+ &ibd->fault->fault_skip_usec))
+ goto fail;
+ if (!debugfs_create_u8("direction", 0600, ibd->fault->dir,
+ &ibd->fault->direction))
+ goto fail;
+
+ return 0;
+fail:
+ hfi1_fault_exit_debugfs(ibd);
+ return -ENOMEM;
+}
+
+bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
+{
+ if (ibd->fault)
+ return ibd->fault->suppress_err;
+ return false;
+}
+
+static bool __hfi1_should_fault(struct hfi1_ibdev *ibd, u32 opcode,
+ u8 direction)
+{
+ bool ret = false;
+
+ if (!ibd->fault || !ibd->fault->enable)
+ return false;
+ if (!(ibd->fault->direction & direction))
+ return false;
+ if (ibd->fault->opcode) {
+ if (bitmap_empty(ibd->fault->opcodes,
+ (sizeof(ibd->fault->opcodes) *
+ BITS_PER_BYTE)))
+ return false;
+ if (!(test_bit(opcode, ibd->fault->opcodes)))
+ return false;
+ }
+ if (ibd->fault->fault_skip_usec &&
+ time_before(jiffies, ibd->fault->skip_usec))
+ return false;
+ if (ibd->fault->fault_skip && ibd->fault->skip) {
+ ibd->fault->skip--;
+ return false;
+ }
+ ret = should_fail(&ibd->fault->attr, 1);
+ if (ret) {
+ ibd->fault->skip = ibd->fault->fault_skip;
+ ibd->fault->skip_usec = jiffies +
+ usecs_to_jiffies(ibd->fault->fault_skip_usec);
+ }
+ return ret;
+}
+
+bool hfi1_dbg_should_fault_tx(struct rvt_qp *qp, u32 opcode)
+{
+ struct hfi1_ibdev *ibd = to_idev(qp->ibqp.device);
+
+ if (__hfi1_should_fault(ibd, opcode, HFI1_FAULT_DIR_TX)) {
+ trace_hfi1_fault_opcode(qp, opcode);
+ ibd->fault->n_txfaults[opcode]++;
+ return true;
+ }
+ return false;
+}
+
+bool hfi1_dbg_should_fault_rx(struct hfi1_packet *packet)
+{
+ struct hfi1_ibdev *ibd = &packet->rcd->dd->verbs_dev;
+
+ if (__hfi1_should_fault(ibd, packet->opcode, HFI1_FAULT_DIR_RX)) {
+ trace_hfi1_fault_packet(packet);
+ ibd->fault->n_rxfaults[packet->opcode]++;
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/infiniband/hw/hfi1/fault.h b/drivers/infiniband/hw/hfi1/fault.h
new file mode 100644
index 000000000000..a83382700a7c
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/fault.h
@@ -0,0 +1,109 @@
+#ifndef _HFI1_FAULT_H
+#define _HFI1_FAULT_H
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/fault-inject.h>
+#include <linux/dcache.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <rdma/rdma_vt.h>
+
+#include "hfi.h"
+
+struct hfi1_ibdev;
+
+#if defined(CONFIG_FAULT_INJECTION) && defined(CONFIG_FAULT_INJECTION_DEBUG_FS)
+struct fault {
+ struct fault_attr attr;
+ struct dentry *dir;
+ u64 n_rxfaults[(1U << BITS_PER_BYTE)];
+ u64 n_txfaults[(1U << BITS_PER_BYTE)];
+ u64 fault_skip;
+ u64 skip;
+ u64 fault_skip_usec;
+ unsigned long skip_usec;
+ unsigned long opcodes[(1U << BITS_PER_BYTE) / BITS_PER_LONG];
+ bool enable;
+ bool suppress_err;
+ bool opcode;
+ u8 direction;
+};
+
+int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd);
+bool hfi1_dbg_should_fault_tx(struct rvt_qp *qp, u32 opcode);
+bool hfi1_dbg_should_fault_rx(struct hfi1_packet *packet);
+bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd);
+void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd);
+
+#else
+
+static inline int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd)
+{
+ return 0;
+}
+
+static inline bool hfi1_dbg_should_fault_rx(struct hfi1_packet *packet)
+{
+ return false;
+}
+
+static inline bool hfi1_dbg_should_fault_tx(struct rvt_qp *qp,
+ u32 opcode)
+{
+ return false;
+}
+
+static inline bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
+{
+ return false;
+}
+
+static inline void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd)
+{
+}
+#endif
+#endif /* _HFI1_FAULT_H */
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index da4aa1a95b11..0fc4aa9455c3 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -110,7 +110,7 @@ static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
static int ctxt_reset(struct hfi1_ctxtdata *uctxt);
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
unsigned long arg);
-static int vma_fault(struct vm_fault *vmf);
+static vm_fault_t vma_fault(struct vm_fault *vmf);
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg);
@@ -505,7 +505,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
ret = -EINVAL;
goto done;
}
- if (flags & VM_WRITE) {
+ if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) {
ret = -EPERM;
goto done;
}
@@ -591,7 +591,7 @@ done:
* Local (non-chip) user memory is not mapped right away but as it is
* accessed by the user-level code.
*/
-static int vma_fault(struct vm_fault *vmf)
+static vm_fault_t vma_fault(struct vm_fault *vmf)
{
struct page *page;
@@ -689,8 +689,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
* checks to default and disable the send context.
*/
if (uctxt->sc) {
- set_pio_integrity(uctxt->sc);
sc_disable(uctxt->sc);
+ set_pio_integrity(uctxt->sc);
}
hfi1_free_ctxt_rcv_groups(uctxt);
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index cac2c62bc42d..4ab8b5bfbed1 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_KERNEL_H
#define _HFI1_KERNEL_H
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -231,20 +231,22 @@ struct hfi1_ctxtdata {
/* job key */
u16 jkey;
/* number of RcvArray groups for this context. */
- u32 rcv_array_groups;
+ u16 rcv_array_groups;
/* index of first eager TID entry. */
- u32 eager_base;
+ u16 eager_base;
/* number of expected TID entries */
- u32 expected_count;
+ u16 expected_count;
/* index of first expected TID entry. */
- u32 expected_base;
+ u16 expected_base;
+ /* array of tid_groups */
+ struct tid_group *groups;
struct exp_tid_set tid_group_list;
struct exp_tid_set tid_used_list;
struct exp_tid_set tid_full_list;
- /* lock protecting all Expected TID data */
- struct mutex exp_lock;
+ /* lock protecting all Expected TID data of user contexts */
+ struct mutex exp_mutex;
/* per-context configuration flags */
unsigned long flags;
/* per-context event flags for fileops/intr communication */
@@ -282,7 +284,7 @@ struct hfi1_ctxtdata {
/* interrupt handling */
u64 imask; /* clear interrupt mask */
int ireg; /* clear interrupt register */
- unsigned numa_id; /* numa node of this context */
+ int numa_id; /* numa node of this context */
/* verbs rx_stats per rcd */
struct hfi1_opcode_stats_perctx *opstats;
@@ -333,6 +335,7 @@ struct hfi1_packet {
struct rvt_qp *qp;
struct ib_other_headers *ohdr;
struct ib_grh *grh;
+ struct opa_16b_mgmt *mgmt;
u64 rhf;
u32 maxcnt;
u32 rhqoff;
@@ -392,10 +395,17 @@ struct hfi1_packet {
*/
#define OPA_16B_L4_9B 0x00
#define OPA_16B_L2_TYPE 0x02
+#define OPA_16B_L4_FM 0x08
#define OPA_16B_L4_IB_LOCAL 0x09
#define OPA_16B_L4_IB_GLOBAL 0x0A
#define OPA_16B_L4_ETHR OPA_VNIC_L4_ETHR
+/*
+ * OPA 16B Management
+ */
+#define OPA_16B_L4_FM_PAD 3 /* fixed 3B pad */
+#define OPA_16B_L4_FM_HLEN 24 /* 16B(16) + L4_FM(8) */
+
static inline u8 hfi1_16B_get_l4(struct hfi1_16b_header *hdr)
{
return (u8)(hdr->lrh[2] & OPA_16B_L4_MASK);
@@ -472,6 +482,27 @@ static inline u8 hfi1_16B_bth_get_pad(struct ib_other_headers *ohdr)
OPA_16B_BTH_PAD_MASK);
}
+/*
+ * 16B Management
+ */
+#define OPA_16B_MGMT_QPN_MASK 0xFFFFFF
+static inline u32 hfi1_16B_get_dest_qpn(struct opa_16b_mgmt *mgmt)
+{
+ return be32_to_cpu(mgmt->dest_qpn) & OPA_16B_MGMT_QPN_MASK;
+}
+
+static inline u32 hfi1_16B_get_src_qpn(struct opa_16b_mgmt *mgmt)
+{
+ return be32_to_cpu(mgmt->src_qpn) & OPA_16B_MGMT_QPN_MASK;
+}
+
+static inline void hfi1_16B_set_qpn(struct opa_16b_mgmt *mgmt,
+ u32 dest_qp, u32 src_qp)
+{
+ mgmt->dest_qpn = cpu_to_be32(dest_qp & OPA_16B_MGMT_QPN_MASK);
+ mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK);
+}
+
struct rvt_sge_state;
/*
@@ -880,9 +911,9 @@ typedef void (*hfi1_make_req)(struct rvt_qp *qp,
#define RHF_RCV_REPROCESS 2 /* stop. retain this packet */
struct rcv_array_data {
- u8 group_size;
u16 ngroups;
u16 nctxt_extra;
+ u8 group_size;
};
struct per_vl_data {
@@ -1263,6 +1294,9 @@ struct hfi1_devdata {
/* Save the enabled LCB error bits */
u64 lcb_err_en;
+ struct cpu_mask_set *comp_vect;
+ int *comp_vect_mappings;
+ u32 comp_vect_possible_cpus;
/*
* Capability to have different send engines simply by changing a
@@ -1856,6 +1890,7 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
#define HFI1_HAS_SDMA_TIMEOUT 0x8
#define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
#define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
+#define HFI1_SHUTDOWN 0x100 /* device is shutting down */
/* IB dword length mask in PBC (lower 11 bits); same for all chips */
#define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
@@ -2048,7 +2083,9 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
+#ifndef CONFIG_FAULT_INJECTION
| SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK
+#endif
| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
@@ -2061,7 +2098,11 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
| SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
if (ctxt_type == SC_USER)
- base_sc_integrity |= HFI1_PKT_USER_SC_INTEGRITY;
+ base_sc_integrity |=
+#ifndef CONFIG_FAULT_INJECTION
+ SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK |
+#endif
+ HFI1_PKT_USER_SC_INTEGRITY;
else
base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 6309edf811df..f110842b91f5 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -113,8 +113,8 @@ module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
static uint hfi1_hdrq_entsize = 32;
-module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
-MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
+module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444);
+MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)");
unsigned int user_credit_return_threshold = 33; /* default is 33% */
module_param(user_credit_return_threshold, uint, S_IRUGO);
@@ -361,16 +361,14 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
}
INIT_LIST_HEAD(&rcd->qp_wait_list);
- hfi1_exp_tid_group_init(&rcd->tid_group_list);
- hfi1_exp_tid_group_init(&rcd->tid_used_list);
- hfi1_exp_tid_group_init(&rcd->tid_full_list);
+ hfi1_exp_tid_group_init(rcd);
rcd->ppd = ppd;
rcd->dd = dd;
__set_bit(0, rcd->in_use_ctxts);
rcd->numa_id = numa;
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
- mutex_init(&rcd->exp_lock);
+ mutex_init(&rcd->exp_mutex);
hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
@@ -1058,6 +1056,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
unsigned pidx;
int i;
+ if (dd->flags & HFI1_SHUTDOWN)
+ return;
+ dd->flags |= HFI1_SHUTDOWN;
+
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
@@ -1240,6 +1242,8 @@ static void hfi1_clean_devdata(struct hfi1_devdata *dd)
dd->rcv_limit = NULL;
dd->send_schedule = NULL;
dd->tx_opstats = NULL;
+ kfree(dd->comp_vect);
+ dd->comp_vect = NULL;
sdma_clean(dd, dd->num_sdma);
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
@@ -1296,6 +1300,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
dd->unit = ret;
list_add(&dd->list, &hfi1_dev_list);
}
+ dd->node = -1;
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
idr_preload_end();
@@ -1348,6 +1353,12 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
goto bail;
}
+ dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL);
+ if (!dd->comp_vect) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
kobject_init(&dd->kobj, &hfi1_devdata_type);
return dd;
@@ -1391,6 +1402,7 @@ void hfi1_disable_after_error(struct hfi1_devdata *dd)
static void remove_one(struct pci_dev *);
static int init_one(struct pci_dev *, const struct pci_device_id *);
+static void shutdown_one(struct pci_dev *);
#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
#define PFX DRIVER_NAME ": "
@@ -1407,6 +1419,7 @@ static struct pci_driver hfi1_pci_driver = {
.name = DRIVER_NAME,
.probe = init_one,
.remove = remove_one,
+ .shutdown = shutdown_one,
.id_table = hfi1_pci_tbl,
.err_handler = &hfi1_pci_err_handler,
};
@@ -1515,7 +1528,7 @@ module_init(hfi1_mod_init);
static void __exit hfi1_mod_cleanup(void)
{
pci_unregister_driver(&hfi1_pci_driver);
- node_affinity_destroy();
+ node_affinity_destroy_all();
hfi1_wss_exit();
hfi1_dbg_exit();
@@ -1599,6 +1612,8 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
static void postinit_cleanup(struct hfi1_devdata *dd)
{
hfi1_start_cleanup(dd);
+ hfi1_comp_vectors_clean_up(dd);
+ hfi1_dev_affinity_clean_up(dd);
hfi1_pcie_ddcleanup(dd);
hfi1_pcie_cleanup(dd->pcidev);
@@ -1816,6 +1831,13 @@ static void remove_one(struct pci_dev *pdev)
postinit_cleanup(dd);
}
+static void shutdown_one(struct pci_dev *pdev)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+
+ shutdown_device(dd);
+}
+
/**
* hfi1_create_rcvhdrq - create a receive header queue
* @dd: the hfi1_ib device
@@ -1831,7 +1853,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
u64 reg;
if (!rcd->rcvhdrq) {
- dma_addr_t dma_hdrqtail;
gfp_t gfp_flags;
/*
@@ -1856,13 +1877,13 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
goto bail;
}
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
+ if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
+ HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
- gfp_flags);
+ &dd->pcidev->dev, PAGE_SIZE,
+ &rcd->rcvhdrqtailaddr_dma, gfp_flags);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
- rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
}
rcd->rcvhdrq_size = amt;
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index e9962c65c68f..0307405491e0 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1238,7 +1238,7 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
}
static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
- u32 logical_state, u32 phys_state)
+ u32 logical_state, u32 phys_state, int local_mad)
{
struct hfi1_devdata *dd = ppd->dd;
u32 link_state;
@@ -1314,7 +1314,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
* Don't send a reply if the response would be sent
* through the disabled port.
*/
- if (link_state == HLS_DN_DISABLE && smp->hop_cnt)
+ if (link_state == HLS_DN_DISABLE && !local_mad)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
break;
case IB_PORT_ARMED:
@@ -1350,7 +1350,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
*/
static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len, u32 max_len)
+ u32 *resp_len, u32 max_len, int local_mad)
{
struct opa_port_info *pi = (struct opa_port_info *)data;
struct ib_event event;
@@ -1634,7 +1634,7 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
*/
if (!invalid) {
- ret = set_port_states(ppd, smp, ls_new, ps_new);
+ ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
if (ret)
return ret;
}
@@ -2085,7 +2085,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len, u32 max_len)
+ u32 *resp_len, u32 max_len, int local_mad)
{
u32 nports = OPA_AM_NPORT(am);
u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
@@ -2122,7 +2122,7 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
}
if (!invalid) {
- ret = set_port_states(ppd, smp, ls_new, ps_new);
+ ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
if (ret)
return ret;
}
@@ -3424,6 +3424,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)pmp);
}
+ rsp->port_number = port;
/* PortRcvErrorInfo */
rsp->port_rcv_ei.status_and_code =
@@ -4190,7 +4191,7 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev, u8 port,
- u32 *resp_len, u32 max_len)
+ u32 *resp_len, u32 max_len, int local_mad)
{
int ret;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -4198,7 +4199,7 @@ static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
switch (attr_id) {
case IB_SMP_ATTR_PORT_INFO:
ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
- resp_len, max_len);
+ resp_len, max_len, local_mad);
break;
case IB_SMP_ATTR_PKEY_TABLE:
ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
@@ -4222,7 +4223,7 @@ static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
break;
case OPA_ATTRIB_ID_PORT_STATE_INFO:
ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
- resp_len, max_len);
+ resp_len, max_len, local_mad);
break;
case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
@@ -4314,7 +4315,7 @@ static int subn_get_opa_aggregate(struct opa_smp *smp,
static int subn_set_opa_aggregate(struct opa_smp *smp,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, int local_mad)
{
int i;
u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
@@ -4344,7 +4345,9 @@ static int subn_set_opa_aggregate(struct opa_smp *smp,
}
(void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
- ibdev, port, NULL, (u32)agg_data_len);
+ ibdev, port, NULL, (u32)agg_data_len,
+ local_mad);
+
if (smp->status & IB_SMP_INVALID_FIELD)
break;
if (smp->status & ~IB_SMP_DIRECTION) {
@@ -4519,7 +4522,7 @@ static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
u8 port, const struct opa_mad *in_mad,
struct opa_mad *out_mad,
- u32 *resp_len)
+ u32 *resp_len, int local_mad)
{
struct opa_smp *smp = (struct opa_smp *)out_mad;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -4588,11 +4591,11 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
default:
ret = subn_set_opa_sma(attr_id, smp, am, data,
ibdev, port, resp_len,
- data_size);
+ data_size, local_mad);
break;
case OPA_ATTRIB_ID_AGGREGATE:
ret = subn_set_opa_aggregate(smp, ibdev, port,
- resp_len);
+ resp_len, local_mad);
break;
}
break;
@@ -4832,6 +4835,7 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
{
int ret;
int pkey_idx;
+ int local_mad = 0;
u32 resp_len = 0;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -4846,13 +4850,14 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- if (is_local_mad(ibp, in_mad, in_wc)) {
+ local_mad = is_local_mad(ibp, in_mad, in_wc);
+ if (local_mad) {
ret = opa_local_smp_check(ibp, in_wc);
if (ret)
return IB_MAD_RESULT_FAILURE;
}
ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
- out_mad, &resp_len);
+ out_mad, &resp_len, local_mad);
goto bail;
case IB_MGMT_CLASS_PERF_MGMT:
ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc);
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index c1c982908b4b..4d4371bf2c7c 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -56,11 +56,6 @@
#include "chip_registers.h"
#include "aspm.h"
-/* link speed vector for Gen3 speed - not in Linux headers */
-#define GEN1_SPEED_VECTOR 0x1
-#define GEN2_SPEED_VECTOR 0x2
-#define GEN3_SPEED_VECTOR 0x3
-
/*
* This file contains PCIe utility routines.
*/
@@ -183,6 +178,14 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
return -ENOMEM;
}
dd_dev_info(dd, "UC base1: %p for %x\n", dd->kregbase1, RCV_ARRAY);
+
+ /* verify that reads actually work, save revision for reset check */
+ dd->revision = readq(dd->kregbase1 + CCE_REVISION);
+ if (dd->revision == ~(u64)0) {
+ dd_dev_err(dd, "Cannot read chip CSRs\n");
+ goto nomem;
+ }
+
dd->chip_rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT);
dd_dev_info(dd, "RcvArray count: %u\n", dd->chip_rcv_array_count);
dd->base2_start = RCV_ARRAY + dd->chip_rcv_array_count * 8;
@@ -262,7 +265,7 @@ static u32 extract_speed(u16 linkstat)
case PCI_EXP_LNKSTA_CLS_5_0GB:
speed = 5000; /* Gen 2, 5GHz */
break;
- case GEN3_SPEED_VECTOR:
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
speed = 8000; /* Gen 3, 8GHz */
break;
}
@@ -317,7 +320,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
return ret;
}
- if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) {
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_8_0GB) {
dd_dev_info(dd,
"This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
linkcap & PCI_EXP_LNKCAP_SLS);
@@ -694,9 +697,6 @@ const struct pci_error_handlers hfi1_pci_err_handler = {
/* gasket block secondary bus reset delay */
#define SBR_DELAY_US 200000 /* 200ms */
-/* mask for PCIe capability register lnkctl2 target link speed */
-#define LNKCTL2_TARGET_LINK_SPEED_MASK 0xf
-
static uint pcie_target = 3;
module_param(pcie_target, uint, S_IRUGO);
MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)");
@@ -1045,13 +1045,13 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
return 0;
if (pcie_target == 1) { /* target Gen1 */
- target_vector = GEN1_SPEED_VECTOR;
+ target_vector = PCI_EXP_LNKCTL2_TLS_2_5GT;
target_speed = 2500;
} else if (pcie_target == 2) { /* target Gen2 */
- target_vector = GEN2_SPEED_VECTOR;
+ target_vector = PCI_EXP_LNKCTL2_TLS_5_0GT;
target_speed = 5000;
} else if (pcie_target == 3) { /* target Gen3 */
- target_vector = GEN3_SPEED_VECTOR;
+ target_vector = PCI_EXP_LNKCTL2_TLS_8_0GT;
target_speed = 8000;
} else {
/* off or invalid target - skip */
@@ -1290,8 +1290,8 @@ retry:
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
(u32)lnkctl2);
/* only write to parent if target is not as high as ours */
- if ((lnkctl2 & LNKCTL2_TARGET_LINK_SPEED_MASK) < target_vector) {
- lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
+ if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) {
+ lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
(u32)lnkctl2);
@@ -1316,7 +1316,7 @@ retry:
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
(u32)lnkctl2);
- lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
+ lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
(u32)lnkctl2);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 40dac4d16eb8..9cac15d10c4f 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -50,8 +50,6 @@
#include "qp.h"
#include "trace.h"
-#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
-
#define SC(name) SEND_CTXT_##name
/*
* Send Context functions
@@ -961,15 +959,40 @@ void sc_disable(struct send_context *sc)
}
/* return SendEgressCtxtStatus.PacketOccupancy */
-#define packet_occupancy(r) \
- (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
- >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
+static u64 packet_occupancy(u64 reg)
+{
+ return (reg &
+ SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)
+ >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT;
+}
/* is egress halted on the context? */
-#define egress_halted(r) \
- ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
+static bool egress_halted(u64 reg)
+{
+ return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK);
+}
-/* wait for packet egress, optionally pause for credit return */
+/* is the send context halted? */
+static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
+{
+ return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
+ SC(STATUS_CTXT_HALTED_SMASK));
+}
+
+/**
+ * sc_wait_for_packet_egress
+ * @sc: valid send context
+ * @pause: wait for credit return
+ *
+ * Wait for packet egress, optionally pause for credit return
+ *
+ * Egress halt and Context halt are not necessarily the same thing, so
+ * check for both.
+ *
+ * NOTE: The context halt bit may not be set immediately. Because of this,
+ * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
+ * context bit to determine if the context is halted.
+ */
static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
{
struct hfi1_devdata *dd = sc->dd;
@@ -981,8 +1004,9 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
reg_prev = reg;
reg = read_csr(dd, sc->hw_context * 8 +
SEND_EGRESS_CTXT_STATUS);
- /* done if egress is stopped */
- if (egress_halted(reg))
+ /* done if any halt bits, SW or HW are set */
+ if (sc->flags & SCF_HALTED ||
+ is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
break;
reg = packet_occupancy(reg);
if (reg == 0)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index da58046a02ea..1a1a47ac53c6 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2012,7 +2012,7 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
unsigned long nsec = 1024 * ccti_timer;
hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
- HRTIMER_MODE_REL);
+ HRTIMER_MODE_REL_PINNED);
}
spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
@@ -2123,7 +2123,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
/* OK, process the packet. */
switch (opcode) {
case OP(SEND_FIRST):
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2149,7 +2149,7 @@ send_middle:
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
/* consume RWQE */
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2159,7 +2159,7 @@ send_middle:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
case OP(SEND_ONLY_WITH_INVALIDATE):
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2271,7 +2271,7 @@ send_last:
goto send_middle;
else if (opcode == OP(RDMA_WRITE_ONLY))
goto no_immediate_data;
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret) {
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index c0071ca4147a..ef4c566e206f 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -53,156 +53,6 @@
#include "verbs_txreq.h"
#include "trace.h"
-/*
- * Validate a RWQE and fill in the SGE state.
- * Return 1 if OK.
- */
-static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
-{
- int i, j, ret;
- struct ib_wc wc;
- struct rvt_lkey_table *rkt;
- struct rvt_pd *pd;
- struct rvt_sge_state *ss;
-
- rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
- pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
- ss = &qp->r_sge;
- ss->sg_list = qp->r_sg_list;
- qp->r_len = 0;
- for (i = j = 0; i < wqe->num_sge; i++) {
- if (wqe->sg_list[i].length == 0)
- continue;
- /* Check LKEY */
- ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
- NULL, &wqe->sg_list[i],
- IB_ACCESS_LOCAL_WRITE);
- if (unlikely(ret <= 0))
- goto bad_lkey;
- qp->r_len += wqe->sg_list[i].length;
- j++;
- }
- ss->num_sge = j;
- ss->total_len = qp->r_len;
- ret = 1;
- goto bail;
-
-bad_lkey:
- while (j) {
- struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
-
- rvt_put_mr(sge->mr);
- }
- ss->num_sge = 0;
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr_id;
- wc.status = IB_WC_LOC_PROT_ERR;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- /* Signal solicited completion event. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return -1 if there is a local error, 0 if no RWQE is available,
- * otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
-{
- unsigned long flags;
- struct rvt_rq *rq;
- struct rvt_rwq *wq;
- struct rvt_srq *srq;
- struct rvt_rwqe *wqe;
- void (*handler)(struct ib_event *, void *);
- u32 tail;
- int ret;
-
- if (qp->ibqp.srq) {
- srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
- handler = srq->ibsrq.event_handler;
- rq = &srq->rq;
- } else {
- srq = NULL;
- handler = NULL;
- rq = &qp->r_rq;
- }
-
- spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ret = 0;
- goto unlock;
- }
-
- wq = rq->wq;
- tail = wq->tail;
- /* Validate tail before using it since it is user writable. */
- if (tail >= rq->size)
- tail = 0;
- if (unlikely(tail == wq->head)) {
- ret = 0;
- goto unlock;
- }
- /* Make sure entry is read after head index is read. */
- smp_rmb();
- wqe = rvt_get_rwqe_ptr(rq, tail);
- /*
- * Even though we update the tail index in memory, the verbs
- * consumer is not supposed to post more entries until a
- * completion is generated.
- */
- if (++tail >= rq->size)
- tail = 0;
- wq->tail = tail;
- if (!wr_id_only && !init_sge(qp, wqe)) {
- ret = -1;
- goto unlock;
- }
- qp->r_wr_id = wqe->wr_id;
-
- ret = 1;
- set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
- if (handler) {
- u32 n;
-
- /*
- * Validate head pointer value and compute
- * the number of remaining WQEs.
- */
- n = wq->head;
- if (n >= rq->size)
- n = 0;
- if (n < tail)
- n += rq->size - tail;
- else
- n -= tail;
- if (n < srq->limit) {
- struct ib_event ev;
-
- srq->limit = 0;
- spin_unlock_irqrestore(&rq->lock, flags);
- ev.device = qp->ibqp.device;
- ev.element.srq = qp->ibqp.srq;
- ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- handler(&ev, srq->ibsrq.srq_context);
- goto bail;
- }
- }
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
-bail:
- return ret;
-}
-
static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
{
return (gid->global.interface_id == id &&
@@ -423,7 +273,7 @@ again:
/* FALLTHROUGH */
case IB_WR_SEND:
send:
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -435,7 +285,7 @@ send:
goto inv_err;
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 1f203309cf24..298e0e3fc0c9 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -923,9 +923,10 @@ ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
cpumask_var_t mask, new_mask;
unsigned long cpu;
int ret, vl, sz;
+ struct sdma_rht_node *rht_node;
vl = sdma_engine_get_vl(sde);
- if (unlikely(vl < 0))
+ if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map)))
return -EINVAL;
ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
@@ -953,19 +954,12 @@ ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
mutex_lock(&process_to_sde_mutex);
for_each_cpu(cpu, mask) {
- struct sdma_rht_node *rht_node;
-
/* Check if we have this already mapped */
if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
cpumask_set_cpu(cpu, new_mask);
continue;
}
- if (vl >= ARRAY_SIZE(rht_node->map)) {
- ret = -EINVAL;
- goto out;
- }
-
rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
sdma_rht_params);
if (!rht_node) {
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 89bd9851065b..7c8aed0ffc07 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -63,13 +63,20 @@ static u8 __get_ib_hdr_len(struct ib_header *hdr)
static u8 __get_16b_hdr_len(struct hfi1_16b_header *hdr)
{
- struct ib_other_headers *ohdr;
+ struct ib_other_headers *ohdr = NULL;
u8 opcode;
+ u8 l4 = hfi1_16B_get_l4(hdr);
+
+ if (l4 == OPA_16B_L4_FM) {
+ opcode = IB_OPCODE_UD_SEND_ONLY;
+ return (8 + 8); /* No BTH */
+ }
- if (hfi1_16B_get_l4(hdr) == OPA_16B_L4_IB_LOCAL)
+ if (l4 == OPA_16B_L4_IB_LOCAL)
ohdr = &hdr->u.oth;
else
ohdr = &hdr->u.l.oth;
+
opcode = ib_bth_get_opcode(ohdr);
return hdr_len_by_opcode[opcode] == 0 ?
0 : hdr_len_by_opcode[opcode] - (12 + 8 + 8);
@@ -234,17 +241,24 @@ const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
#define BTH_16B_PRN \
"op:0x%.2x,%s se:%d m:%d pad:%d tver:%d " \
"qpn:0x%.6x a:%d psn:0x%.8x"
-const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
- u8 ack, bool becn, bool fecn, u8 mig,
- u8 se, u8 pad, u8 opcode, const char *opname,
- u8 tver, u16 pkey, u32 psn, u32 qpn)
+#define L4_FM_16B_PRN \
+ "op:0x%.2x,%s dest_qpn:0x%.6x src_qpn:0x%.6x"
+const char *hfi1_trace_fmt_rest(struct trace_seq *p, bool bypass, u8 l4,
+ u8 ack, bool becn, bool fecn, u8 mig,
+ u8 se, u8 pad, u8 opcode, const char *opname,
+ u8 tver, u16 pkey, u32 psn, u32 qpn,
+ u32 dest_qpn, u32 src_qpn)
{
const char *ret = trace_seq_buffer_ptr(p);
if (bypass)
- trace_seq_printf(p, BTH_16B_PRN,
- opcode, opname,
- se, mig, pad, tver, qpn, ack, psn);
+ if (l4 == OPA_16B_L4_FM)
+ trace_seq_printf(p, L4_FM_16B_PRN,
+ opcode, opname, dest_qpn, src_qpn);
+ else
+ trace_seq_printf(p, BTH_16B_PRN,
+ opcode, opname,
+ se, mig, pad, tver, qpn, ack, psn);
else
trace_seq_printf(p, BTH_9B_PRN,
@@ -258,12 +272,17 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
const char *parse_everbs_hdrs(
struct trace_seq *p,
- u8 opcode,
+ u8 opcode, u8 l4, u32 dest_qpn, u32 src_qpn,
void *ehdrs)
{
union ib_ehdrs *eh = ehdrs;
const char *ret = trace_seq_buffer_ptr(p);
+ if (l4 == OPA_16B_L4_FM) {
+ trace_seq_printf(p, "mgmt pkt");
+ goto out;
+ }
+
switch (opcode) {
/* imm */
case OP(RC, SEND_LAST_WITH_IMMEDIATE):
@@ -334,6 +353,7 @@ const char *parse_everbs_hdrs(
be32_to_cpu(eh->ieth));
break;
}
+out:
trace_seq_putc(p, 0);
return ret;
}
@@ -374,6 +394,7 @@ const char *print_u32_array(
return ret;
}
+__hfi1_trace_fn(AFFINITY);
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
__hfi1_trace_fn(SDMA);
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
index 0e7d929530c5..e62171fb7379 100644
--- a/drivers/infiniband/hw/hfi1/trace_dbg.h
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -1,5 +1,5 @@
/*
-* Copyright(c) 2015, 2016 Intel Corporation.
+* Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -113,6 +113,7 @@ void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
* hfi1_cdbg(LVL, fmt, ...); as well as take care of all
* the debugfs stuff.
*/
+__hfi1_trace_def(AFFINITY);
__hfi1_trace_def(PKT);
__hfi1_trace_def(PROC);
__hfi1_trace_def(SDMA);
diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
index 2847626d3819..1dc2c28fc96e 100644
--- a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
+++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
@@ -96,7 +96,9 @@ __print_symbolic(opcode, \
ib_opcode_name(CNP))
u8 ibhdr_exhdr_len(struct ib_header *hdr);
-const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
+const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode,
+ u8 l4, u32 dest_qpn, u32 src_qpn,
+ void *ehdrs);
u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opah);
u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet);
const char *hfi1_trace_get_packet_l4_str(u8 l4);
@@ -123,14 +125,16 @@ const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
u8 rc, u8 sc, u8 sl, u16 entropy,
u16 len, u16 pkey, u32 dlid, u32 slid);
-const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
- u8 ack, bool becn, bool fecn, u8 mig,
- u8 se, u8 pad, u8 opcode, const char *opname,
- u8 tver, u16 pkey, u32 psn, u32 qpn);
+const char *hfi1_trace_fmt_rest(struct trace_seq *p, bool bypass, u8 l4,
+ u8 ack, bool becn, bool fecn, u8 mig,
+ u8 se, u8 pad, u8 opcode, const char *opname,
+ u8 tver, u16 pkey, u32 psn, u32 qpn,
+ u32 dest_qpn, u32 src_qpn);
const char *hfi1_trace_get_packet_l2_str(u8 l2);
-#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
+#define __parse_ib_ehdrs(op, l4, dest_qpn, src_qpn, ehdrs) \
+ parse_everbs_hdrs(p, op, l4, dest_qpn, src_qpn, ehdrs)
#define lrh_name(lrh) { HFI1_##lrh, #lrh }
#define show_lnh(lrh) \
@@ -169,6 +173,8 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
__field(u32, psn)
__field(u32, qpn)
__field(u32, slid)
+ __field(u32, dest_qpn)
+ __field(u32, src_qpn)
/* extended headers */
__dynamic_array(u8, ehdrs,
hfi1_trace_packet_hdr_len(packet))
@@ -178,6 +184,8 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
__entry->etype = packet->etype;
__entry->l2 = hfi1_16B_get_l2(packet->hdr);
+ __entry->dest_qpn = 0;
+ __entry->src_qpn = 0;
if (__entry->etype == RHF_RCV_TYPE_BYPASS) {
hfi1_trace_parse_16b_hdr(packet->hdr,
&__entry->age,
@@ -192,16 +200,23 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
&__entry->dlid,
&__entry->slid);
- hfi1_trace_parse_16b_bth(packet->ohdr,
- &__entry->ack,
- &__entry->mig,
- &__entry->opcode,
- &__entry->pad,
- &__entry->se,
- &__entry->tver,
- &__entry->psn,
- &__entry->qpn);
+ if (__entry->l4 == OPA_16B_L4_FM) {
+ __entry->opcode = IB_OPCODE_UD_SEND_ONLY;
+ __entry->dest_qpn = hfi1_16B_get_dest_qpn(packet->mgmt);
+ __entry->src_qpn = hfi1_16B_get_src_qpn(packet->mgmt);
+ } else {
+ hfi1_trace_parse_16b_bth(packet->ohdr,
+ &__entry->ack,
+ &__entry->mig,
+ &__entry->opcode,
+ &__entry->pad,
+ &__entry->se,
+ &__entry->tver,
+ &__entry->psn,
+ &__entry->qpn);
+ }
} else {
+ __entry->l4 = OPA_16B_L4_9B;
hfi1_trace_parse_9b_hdr(packet->hdr, sc5,
&__entry->lnh,
&__entry->lver,
@@ -223,8 +238,9 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
&__entry->pkey,
&__entry->psn,
&__entry->qpn);
- }
- /* extended headers */
+ }
+ /* extended headers */
+ if (__entry->l4 != OPA_16B_L4_FM)
memcpy(__get_dynamic_array(ehdrs),
&packet->ohdr->u,
__get_dynamic_array_len(ehdrs));
@@ -253,25 +269,31 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
__entry->pkey,
__entry->dlid,
__entry->slid),
- hfi1_trace_fmt_bth(p,
- __entry->etype ==
+ hfi1_trace_fmt_rest(p,
+ __entry->etype ==
RHF_RCV_TYPE_BYPASS,
- __entry->ack,
- __entry->becn,
- __entry->fecn,
- __entry->mig,
- __entry->se,
- __entry->pad,
- __entry->opcode,
- show_ib_opcode(__entry->opcode),
- __entry->tver,
- __entry->pkey,
- __entry->psn,
- __entry->qpn),
+ __entry->l4,
+ __entry->ack,
+ __entry->becn,
+ __entry->fecn,
+ __entry->mig,
+ __entry->se,
+ __entry->pad,
+ __entry->opcode,
+ show_ib_opcode(__entry->opcode),
+ __entry->tver,
+ __entry->pkey,
+ __entry->psn,
+ __entry->qpn,
+ __entry->dest_qpn,
+ __entry->src_qpn),
/* extended headers */
__get_dynamic_array_len(ehdrs),
__parse_ib_ehdrs(
__entry->opcode,
+ __entry->l4,
+ __entry->dest_qpn,
+ __entry->src_qpn,
(void *)__get_dynamic_array(ehdrs))
)
);
@@ -310,6 +332,8 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
__field(u32, psn)
__field(u32, qpn)
__field(u32, slid)
+ __field(u32, dest_qpn)
+ __field(u32, src_qpn)
/* extended headers */
__dynamic_array(u8, ehdrs,
hfi1_trace_opa_hdr_len(opah))
@@ -320,6 +344,8 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
DD_DEV_ASSIGN(dd);
__entry->hdr_type = opah->hdr_type;
+ __entry->dest_qpn = 0;
+ __entry->src_qpn = 0;
if (__entry->hdr_type) {
hfi1_trace_parse_16b_hdr(&opah->opah,
&__entry->age,
@@ -334,19 +360,26 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
&__entry->dlid,
&__entry->slid);
- if (__entry->l4 == OPA_16B_L4_IB_LOCAL)
- ohdr = &opah->opah.u.oth;
- else
- ohdr = &opah->opah.u.l.oth;
- hfi1_trace_parse_16b_bth(ohdr,
- &__entry->ack,
- &__entry->mig,
- &__entry->opcode,
- &__entry->pad,
- &__entry->se,
- &__entry->tver,
- &__entry->psn,
- &__entry->qpn);
+ if (__entry->l4 == OPA_16B_L4_FM) {
+ ohdr = NULL;
+ __entry->opcode = IB_OPCODE_UD_SEND_ONLY;
+ __entry->dest_qpn = hfi1_16B_get_dest_qpn(&opah->opah.u.mgmt);
+ __entry->src_qpn = hfi1_16B_get_src_qpn(&opah->opah.u.mgmt);
+ } else {
+ if (__entry->l4 == OPA_16B_L4_IB_LOCAL)
+ ohdr = &opah->opah.u.oth;
+ else
+ ohdr = &opah->opah.u.l.oth;
+ hfi1_trace_parse_16b_bth(ohdr,
+ &__entry->ack,
+ &__entry->mig,
+ &__entry->opcode,
+ &__entry->pad,
+ &__entry->se,
+ &__entry->tver,
+ &__entry->psn,
+ &__entry->qpn);
+ }
} else {
__entry->l4 = OPA_16B_L4_9B;
hfi1_trace_parse_9b_hdr(&opah->ibh, sc5,
@@ -376,8 +409,9 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
}
/* extended headers */
- memcpy(__get_dynamic_array(ehdrs),
- &ohdr->u, __get_dynamic_array_len(ehdrs));
+ if (__entry->l4 != OPA_16B_L4_FM)
+ memcpy(__get_dynamic_array(ehdrs),
+ &ohdr->u, __get_dynamic_array_len(ehdrs));
),
TP_printk("[%s] (%s) %s %s hlen:%d %s",
__get_str(dev),
@@ -399,24 +433,30 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
__entry->pkey,
__entry->dlid,
__entry->slid),
- hfi1_trace_fmt_bth(p,
- !!__entry->hdr_type,
- __entry->ack,
- __entry->becn,
- __entry->fecn,
- __entry->mig,
- __entry->se,
- __entry->pad,
- __entry->opcode,
- show_ib_opcode(__entry->opcode),
- __entry->tver,
- __entry->pkey,
- __entry->psn,
- __entry->qpn),
+ hfi1_trace_fmt_rest(p,
+ !!__entry->hdr_type,
+ __entry->l4,
+ __entry->ack,
+ __entry->becn,
+ __entry->fecn,
+ __entry->mig,
+ __entry->se,
+ __entry->pad,
+ __entry->opcode,
+ show_ib_opcode(__entry->opcode),
+ __entry->tver,
+ __entry->pkey,
+ __entry->psn,
+ __entry->qpn,
+ __entry->dest_qpn,
+ __entry->src_qpn),
/* extended headers */
__get_dynamic_array_len(ehdrs),
__parse_ib_ehdrs(
__entry->opcode,
+ __entry->l4,
+ __entry->dest_qpn,
+ __entry->src_qpn,
(void *)__get_dynamic_array(ehdrs))
)
);
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 9d7a3110c14c..b7b671017e59 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -397,7 +397,7 @@ send_first:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
qp->r_sge = qp->s_rdma_read_sge;
} else {
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -542,7 +542,7 @@ rdma_last_imm:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
rvt_put_ss(&qp->s_rdma_read_sge);
} else {
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 69c17a5ef038..1ab332f1866e 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -163,7 +163,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
} else {
int ret;
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock;
@@ -399,16 +399,30 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
struct hfi1_pportdata *ppd;
struct hfi1_ibport *ibp;
u32 dlid, slid, nwords, extra_bytes;
+ u32 dest_qp = wqe->ud_wr.remote_qpn;
+ u32 src_qp = qp->ibqp.qp_num;
u16 len, pkey;
u8 l4, sc5;
+ bool is_mgmt = false;
ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp);
ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
- /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
- ps->s_txreq->hdr_dwords = 9;
- if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
- ps->s_txreq->hdr_dwords++;
+
+ /*
+ * Build 16B Management Packet if either the destination
+ * or source queue pair number is 0 or 1.
+ */
+ if (dest_qp == 0 || src_qp == 0 || dest_qp == 1 || src_qp == 1) {
+ /* header size in dwords 16B LRH+L4_FM = (16+8)/4. */
+ ps->s_txreq->hdr_dwords = 6;
+ is_mgmt = true;
+ } else {
+ /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
+ ps->s_txreq->hdr_dwords = 9;
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ ps->s_txreq->hdr_dwords++;
+ }
/* SW provides space for CRC and LT for bypass packets. */
extra_bytes = hfi1_get_16b_padding((ps->s_txreq->hdr_dwords << 2),
@@ -453,7 +467,14 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
((1 << ppd->lmc) - 1));
- hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true);
+ if (is_mgmt) {
+ l4 = OPA_16B_L4_FM;
+ pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
+ hfi1_16B_set_qpn(&ps->s_txreq->phdr.hdr.opah.u.mgmt,
+ dest_qp, src_qp);
+ } else {
+ hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true);
+ }
/* Convert dwords to flits */
len = (ps->s_txreq->hdr_dwords + nwords) >> 1;
@@ -845,10 +866,8 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
*/
void hfi1_ud_rcv(struct hfi1_packet *packet)
{
- struct ib_other_headers *ohdr = packet->ohdr;
u32 hdrsize = packet->hlen;
struct ib_wc wc;
- u32 qkey;
u32 src_qp;
u16 pkey;
int mgmt_pkey_idx = -1;
@@ -864,27 +883,35 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
u32 dlid = packet->dlid;
u32 slid = packet->slid;
u8 extra_bytes;
+ u8 l4 = 0;
bool dlid_is_permissive;
bool slid_is_permissive;
+ bool solicited = false;
extra_bytes = packet->pad + packet->extra_byte + (SIZE_OF_CRC << 2);
- qkey = ib_get_qkey(ohdr);
- src_qp = ib_get_sqpn(ohdr);
if (packet->etype == RHF_RCV_TYPE_BYPASS) {
u32 permissive_lid =
opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B);
+ l4 = hfi1_16B_get_l4(packet->hdr);
pkey = hfi1_16B_get_pkey(packet->hdr);
dlid_is_permissive = (dlid == permissive_lid);
slid_is_permissive = (slid == permissive_lid);
} else {
- pkey = ib_bth_get_pkey(ohdr);
+ pkey = ib_bth_get_pkey(packet->ohdr);
dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));
}
sl_from_sc = ibp->sc_to_sl[sc5];
+ if (likely(l4 != OPA_16B_L4_FM)) {
+ src_qp = ib_get_sqpn(packet->ohdr);
+ solicited = ib_bth_is_solicited(packet->ohdr);
+ } else {
+ src_qp = hfi1_16B_get_src_qpn(packet->mgmt);
+ }
+
process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
/*
* Get the number of bytes the message was padded by
@@ -922,8 +949,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
if (mgmt_pkey_idx < 0)
goto drop;
}
- if (unlikely(qkey != qp->qkey)) /* Silent drop */
- return;
+ if (unlikely(l4 != OPA_16B_L4_FM &&
+ ib_get_qkey(packet->ohdr) != qp->qkey))
+ return; /* Silent drop */
/* Drop invalid MAD packets (see 13.5.3.1). */
if (unlikely(qp->ibqp.qp_num == 1 &&
@@ -950,7 +978,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
if (qp->ibqp.qp_num > 1 &&
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
- wc.ex.imm_data = ohdr->u.ud.imm_data;
+ wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
@@ -974,7 +1002,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
} else {
int ret;
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return;
@@ -1047,8 +1075,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- ib_bth_is_solicited(ohdr));
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, solicited);
return;
drop:
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 0d5330b7353d..dbe7d14a5c76 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -375,7 +375,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
* From this point on, we are going to be using shared (between master
* and subcontexts) context resources. We need to take the lock.
*/
- mutex_lock(&uctxt->exp_lock);
+ mutex_lock(&uctxt->exp_mutex);
/*
* The first step is to program the RcvArray entries which are complete
* groups.
@@ -437,7 +437,6 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
hfi1_cdbg(TID,
"Failed to program RcvArray entries %d",
ret);
- ret = -EFAULT;
goto unlock;
} else if (ret > 0) {
if (grp->used == grp->size)
@@ -462,7 +461,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
}
}
unlock:
- mutex_unlock(&uctxt->exp_lock);
+ mutex_unlock(&uctxt->exp_mutex);
nomem:
hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
mapped_pages, ret);
@@ -518,7 +517,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
if (IS_ERR(tidinfo))
return PTR_ERR(tidinfo);
- mutex_lock(&uctxt->exp_lock);
+ mutex_lock(&uctxt->exp_mutex);
for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
if (ret) {
@@ -531,7 +530,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
fd->tid_used -= tididx;
spin_unlock(&fd->tid_lock);
tinfo->tidcnt = tididx;
- mutex_unlock(&uctxt->exp_lock);
+ mutex_unlock(&uctxt->exp_mutex);
kfree(tidinfo);
return ret;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index a3d192424344..d2bc77f75253 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_USER_SDMA_H
#define _HFI1_USER_SDMA_H
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -122,8 +122,6 @@ static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
(req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
##__VA_ARGS__)
-extern uint extended_psn;
-
struct hfi1_user_sdma_pkt_q {
u16 ctxt;
u16 subctxt;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index c8cf4d4984d3..08991874c0e2 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -63,6 +63,8 @@
#include "verbs_txreq.h"
#include "debugfs.h"
#include "vnic.h"
+#include "fault.h"
+#include "affinity.h"
static unsigned int hfi1_lkey_table_size = 16;
module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
@@ -615,7 +617,12 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
wake_up(&mcast->wait);
} else {
/* Get the destination QP number. */
- qp_num = ib_bth_get_qpn(packet->ohdr);
+ if (packet->etype == RHF_RCV_TYPE_BYPASS &&
+ hfi1_16B_get_l4(packet->hdr) == OPA_16B_L4_FM)
+ qp_num = hfi1_16B_get_dest_qpn(packet->mgmt);
+ else
+ qp_num = ib_bth_get_qpn(packet->ohdr);
+
rcu_read_lock();
packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
if (!packet->qp)
@@ -624,10 +631,6 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
if (hfi1_do_pkey_check(packet))
goto unlock_drop;
- if (unlikely(hfi1_dbg_fault_opcode(packet->qp, packet->opcode,
- true)))
- goto unlock_drop;
-
spin_lock_irqsave(&packet->qp->r_lock, flags);
packet_handler = qp_ok(packet);
if (likely(packet_handler))
@@ -934,8 +937,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
else
pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
- if (unlikely(hfi1_dbg_fault_opcode(qp, ps->opcode,
- false)))
+ if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
pbc = create_pbc(ppd,
pbc,
@@ -1088,7 +1090,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
else
pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
- if (unlikely(hfi1_dbg_fault_opcode(qp, ps->opcode, false)))
+
+ if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
}
@@ -1310,21 +1313,23 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct hfi1_qp_priv *priv = qp->priv;
- struct ib_other_headers *ohdr;
+ struct ib_other_headers *ohdr = NULL;
send_routine sr;
int ret;
u16 pkey;
u32 slid;
+ u8 l4 = 0;
/* locate the pkey within the headers */
if (ps->s_txreq->phdr.hdr.hdr_type) {
struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah;
- u8 l4 = hfi1_16B_get_l4(hdr);
- if (l4 == OPA_16B_L4_IB_GLOBAL)
- ohdr = &hdr->u.l.oth;
- else
+ l4 = hfi1_16B_get_l4(hdr);
+ if (l4 == OPA_16B_L4_IB_LOCAL)
ohdr = &hdr->u.oth;
+ else if (l4 == OPA_16B_L4_IB_GLOBAL)
+ ohdr = &hdr->u.l.oth;
+
slid = hfi1_16B_get_slid(hdr);
pkey = hfi1_16B_get_pkey(hdr);
} else {
@@ -1339,7 +1344,11 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
pkey = ib_bth_get_pkey(ohdr);
}
- ps->opcode = ib_bth_get_opcode(ohdr);
+ if (likely(l4 != OPA_16B_L4_FM))
+ ps->opcode = ib_bth_get_opcode(ohdr);
+ else
+ ps->opcode = IB_OPCODE_UD_SEND_ONLY;
+
sr = get_send_routine(qp, ps);
ret = egress_pkey_check(dd->pport, slid, pkey,
priv->s_sc, qp->s_pkey_index);
@@ -1937,11 +1946,11 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
+ dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
+ hfi1_comp_vect_mappings_lookup;
/* completeion queue */
- snprintf(dd->verbs_dev.rdi.dparms.cq_name,
- sizeof(dd->verbs_dev.rdi.dparms.cq_name),
- "hfi1_cq%d", dd->unit);
+ dd->verbs_dev.rdi.ibdev.num_comp_vectors = dd->comp_vect_possible_cpus;
dd->verbs_dev.rdi.dparms.node = dd->node;
/* misc settings */
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 2d787b8346ca..a4d06502f06d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -110,6 +110,12 @@ enum {
#define LRH_9B_BYTES (FIELD_SIZEOF(struct ib_header, lrh))
#define LRH_9B_DWORDS (LRH_9B_BYTES / sizeof(u32))
+/* 24Bits for qpn, upper 8Bits reserved */
+struct opa_16b_mgmt {
+ __be32 dest_qpn;
+ __be32 src_qpn;
+};
+
struct hfi1_16b_header {
u32 lrh[4];
union {
@@ -118,6 +124,7 @@ struct hfi1_16b_header {
struct ib_other_headers oth;
} l;
struct ib_other_headers oth;
+ struct opa_16b_mgmt mgmt;
} u;
} __packed;
@@ -227,9 +234,7 @@ struct hfi1_ibdev {
/* per HFI symlinks to above */
struct dentry *hfi1_ibdev_link;
#ifdef CONFIG_FAULT_INJECTION
- struct fault_opcode *fault_opcode;
- struct fault_packet *fault_packet;
- bool fault_suppress_err;
+ struct fault *fault;
#endif
#endif
};
@@ -330,8 +335,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet);
int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
-int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only);
-
void hfi1_migrate_qp(struct rvt_qp *qp);
int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index a40ec939ece5..46f65f9f59d0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -197,7 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
buf->npages = 1 << order;
buf->page_shift = page_shift;
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
- buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
+ buf->direct.buf = dma_zalloc_coherent(dev,
+ size, &t, GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
@@ -207,8 +208,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
--buf->page_shift;
buf->npages *= 2;
}
-
- memset(buf->direct.buf, 0, size);
} else {
buf->nbufs = (size + page_size - 1) / page_size;
buf->npages = buf->nbufs;
@@ -220,7 +219,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
- buf->page_list[i].buf = dma_alloc_coherent(dev,
+ buf->page_list[i].buf = dma_zalloc_coherent(dev,
page_size, &t,
GFP_KERNEL);
@@ -228,7 +227,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
goto err_free;
buf->page_list[i].map = t;
- memset(buf->page_list[i].buf, 0, page_size);
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 9ebe839d8b24..a0ba19d4a10e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -176,6 +176,9 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout)
{
+ if (hr_dev->is_reset)
+ return 0;
+
if (hr_dev->cmd.use_events)
return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index fb305b7f99a8..31221d506d9a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -100,6 +100,9 @@
#define SERV_TYPE_UC 2
#define SERV_TYPE_UD 3
+/* Configure to HW for PAGE_SIZE larger than 4KB */
+#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
+
#define PAGES_SHIFT_8 8
#define PAGES_SHIFT_16 16
#define PAGES_SHIFT_24 24
@@ -211,6 +214,13 @@ enum {
struct hns_roce_uar {
u64 pfn;
unsigned long index;
+ unsigned long logic_idx;
+};
+
+struct hns_roce_vma_data {
+ struct list_head list;
+ struct vm_area_struct *vma;
+ struct mutex *vma_list_mutex;
};
struct hns_roce_ucontext {
@@ -218,6 +228,8 @@ struct hns_roce_ucontext {
struct hns_roce_uar uar;
struct list_head page_list;
struct mutex page_mutex;
+ struct list_head vma_list;
+ struct mutex vma_list_mutex;
};
struct hns_roce_pd {
@@ -770,6 +782,8 @@ struct hns_roce_dev {
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock;
spinlock_t bt_cmd_lock;
+ bool active;
+ bool is_reset;
struct hns_roce_ib_iboe iboe;
struct list_head pgdir_list;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 1f0965bb64ee..0e8dad68910a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -34,6 +34,7 @@
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/types.h>
#include <net/addrconf.h>
#include <rdma/ib_umem.h>
@@ -52,6 +53,53 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
+static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr,
+ unsigned int *sge_ind)
+{
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct ib_sge *sg;
+ int num_in_wqe = 0;
+ int extend_sge_num;
+ int fi_sge_num;
+ int se_sge_num;
+ int shift;
+ int i;
+
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
+ num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
+ extend_sge_num = wr->num_sge - num_in_wqe;
+ sg = wr->sg_list + num_in_wqe;
+ shift = qp->hr_buf.page_shift;
+
+ /*
+ * Check whether wr->num_sge sges are in the same page. If not, we
+ * should calculate how many sges in the first page and the second
+ * page.
+ */
+ dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
+ fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
+ (uintptr_t)dseg) /
+ sizeof(struct hns_roce_v2_wqe_data_seg);
+ if (extend_sge_num > fi_sge_num) {
+ se_sge_num = extend_sge_num - fi_sge_num;
+ for (i = 0; i < fi_sge_num; i++) {
+ set_data_seg_v2(dseg++, sg + i);
+ (*sge_ind)++;
+ }
+ dseg = get_send_extend_sge(qp,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
+ for (i = 0; i < se_sge_num; i++) {
+ set_data_seg_v2(dseg++, sg + fi_sge_num + i);
+ (*sge_ind)++;
+ }
+ } else {
+ for (i = 0; i < extend_sge_num; i++) {
+ set_data_seg_v2(dseg++, sg + i);
+ (*sge_ind)++;
+ }
+ }
+}
+
static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind,
@@ -85,7 +133,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
1);
} else {
- if (wr->num_sge <= 2) {
+ if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
for (i = 0; i < wr->num_sge; i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
@@ -98,24 +146,14 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
(*sge_ind) & (qp->sge.sge_cnt - 1));
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
dseg++;
}
}
- dseg = get_send_extend_sge(qp,
- (*sge_ind) & (qp->sge.sge_cnt - 1));
-
- for (i = 0; i < wr->num_sge - 2; i++) {
- if (likely(wr->sg_list[i + 2].length)) {
- set_data_seg_v2(dseg,
- wr->sg_list + 2 + i);
- dseg++;
- (*sge_ind)++;
- }
- }
+ set_extend_sge(qp, wr, sge_ind);
}
roce_set_field(rc_sq_wqe->byte_16,
@@ -319,13 +357,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
GID_LEN_V2);
- dseg = get_send_extend_sge(qp,
- sge_ind & (qp->sge.sge_cnt - 1));
- for (i = 0; i < wr->num_sge; i++) {
- set_data_seg_v2(dseg + i, wr->sg_list + i);
- sge_ind++;
- }
-
+ set_extend_sge(qp, wr, &sge_ind);
ind++;
} else if (ibqp->qp_type == IB_QPT_RC) {
rc_sq_wqe = wqe;
@@ -481,8 +513,8 @@ out:
V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
- roce_set_field(sq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
- V2_DB_PARAMETER_CONS_IDX_S,
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
+ V2_DB_PARAMETER_IDX_S,
qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
V2_DB_PARAMETER_SL_S, qp->sl);
@@ -775,6 +807,9 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
int ret = 0;
int ntc;
+ if (hr_dev->is_reset)
+ return 0;
+
spin_lock_bh(&csq->lock);
if (num > hns_roce_cmq_space(csq)) {
@@ -1031,40 +1066,40 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
- hr_dev->caps.qpc_ba_pg_sz);
+ hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
- hr_dev->caps.qpc_buf_pg_sz);
+ hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
- hr_dev->caps.srqc_ba_pg_sz);
+ hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
- hr_dev->caps.srqc_buf_pg_sz);
+ hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
- hr_dev->caps.cqc_ba_pg_sz);
+ hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
- hr_dev->caps.cqc_buf_pg_sz);
+ hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
- hr_dev->caps.mpt_ba_pg_sz);
+ hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
- hr_dev->caps.mpt_buf_pg_sz);
+ hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
@@ -1359,7 +1394,8 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
- V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, mr->pbl_ba_pg_sz);
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
+ mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
@@ -1435,7 +1471,8 @@ found:
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, mr->pbl_buf_pg_sz);
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+ mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
return 0;
@@ -1616,11 +1653,11 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
- hr_dev->caps.cqe_ba_pg_sz);
+ hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
- hr_dev->caps.cqe_buf_pg_sz);
+ hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
cq_context->cqe_ba = (u32)(dma_handle >> 3);
@@ -2719,7 +2756,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
- hr_dev->caps.mtt_ba_pg_sz);
+ hr_dev->caps.mtt_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
@@ -2727,7 +2764,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
- hr_dev->caps.mtt_buf_pg_sz);
+ hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
@@ -4161,12 +4198,14 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* set eqe_ba_pg_sz */
roce_set_field(eqc->byte_8,
HNS_ROCE_EQC_BA_PG_SZ_M,
- HNS_ROCE_EQC_BA_PG_SZ_S, eq->eqe_ba_pg_sz);
+ HNS_ROCE_EQC_BA_PG_SZ_S,
+ eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
/* set eqe_buf_pg_sz */
roce_set_field(eqc->byte_8,
HNS_ROCE_EQC_BUF_PG_SZ_M,
- HNS_ROCE_EQC_BUF_PG_SZ_S, eq->eqe_buf_pg_sz);
+ HNS_ROCE_EQC_BUF_PG_SZ_S,
+ eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
/* set eq_producer_idx */
roce_set_field(eqc->byte_8,
@@ -4800,14 +4839,87 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
{
struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ if (!hr_dev)
+ return;
+
hns_roce_exit(hr_dev);
kfree(hr_dev->priv);
ib_dealloc_device(&hr_dev->ib_dev);
}
+static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
+{
+ struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ struct ib_event event;
+
+ if (!hr_dev) {
+ dev_err(&handle->pdev->dev,
+ "Input parameter handle->priv is NULL!\n");
+ return -EINVAL;
+ }
+
+ hr_dev->active = false;
+ hr_dev->is_reset = true;
+
+ event.event = IB_EVENT_DEVICE_FATAL;
+ event.device = &hr_dev->ib_dev;
+ event.element.port_num = 1;
+ ib_dispatch_event(&event);
+
+ return 0;
+}
+
+static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
+{
+ int ret;
+
+ ret = hns_roce_hw_v2_init_instance(handle);
+ if (ret) {
+ /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
+ * callback function, RoCE Engine reinitialize. If RoCE reinit
+ * failed, we should inform NIC driver.
+ */
+ handle->priv = NULL;
+ dev_err(&handle->pdev->dev,
+ "In reset process RoCE reinit failed %d.\n", ret);
+ }
+
+ return ret;
+}
+
+static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
+{
+ msleep(100);
+ hns_roce_hw_v2_uninit_instance(handle, false);
+ return 0;
+}
+
+static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type)
+{
+ int ret = 0;
+
+ switch (type) {
+ case HNAE3_DOWN_CLIENT:
+ ret = hns_roce_hw_v2_reset_notify_down(handle);
+ break;
+ case HNAE3_INIT_CLIENT:
+ ret = hns_roce_hw_v2_reset_notify_init(handle);
+ break;
+ case HNAE3_UNINIT_CLIENT:
+ ret = hns_roce_hw_v2_reset_notify_uninit(handle);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
.init_instance = hns_roce_hw_v2_init_instance,
.uninit_instance = hns_roce_hw_v2_uninit_instance,
+ .reset_notify = hns_roce_hw_v2_reset_notify,
};
static struct hnae3_client hns_roce_hw_v2_client = {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 182b6726f783..d47675f365c7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -76,7 +76,8 @@
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100
-#define HNS_ROCE_CMQ_TX_TIMEOUT 200
+#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
+#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_CONTEXT_HOP_NUM 1
#define HNS_ROCE_MTT_HOP_NUM 1
@@ -897,8 +898,8 @@ struct hns_roce_v2_mpt_entry {
#define V2_DB_BYTE_4_CMD_S 24
#define V2_DB_BYTE_4_CMD_M GENMASK(27, 24)
-#define V2_DB_PARAMETER_CONS_IDX_S 0
-#define V2_DB_PARAMETER_CONS_IDX_M GENMASK(15, 0)
+#define V2_DB_PARAMETER_IDX_S 0
+#define V2_DB_PARAMETER_IDX_M GENMASK(15, 0)
#define V2_DB_PARAMETER_SL_S 16
#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 96fb6a9ed93c..21b901cfa2d6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -99,7 +99,6 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
struct ib_gid_attr zattr = { };
- union ib_gid zgid = { {0} };
u8 port = attr->port_num - 1;
unsigned long flags;
int ret;
@@ -333,6 +332,9 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
struct hns_roce_ib_alloc_ucontext_resp resp = {};
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ if (!hr_dev->active)
+ return ERR_PTR(-EAGAIN);
+
resp.qp_tab_size = hr_dev->caps.num_qps;
context = kmalloc(sizeof(*context), GFP_KERNEL);
@@ -343,6 +345,8 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
if (ret)
goto error_fail_uar_alloc;
+ INIT_LIST_HEAD(&context->vma_list);
+ mutex_init(&context->vma_list_mutex);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
INIT_LIST_HEAD(&context->page_list);
mutex_init(&context->page_mutex);
@@ -373,6 +377,50 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
return 0;
}
+static void hns_roce_vma_open(struct vm_area_struct *vma)
+{
+ vma->vm_ops = NULL;
+}
+
+static void hns_roce_vma_close(struct vm_area_struct *vma)
+{
+ struct hns_roce_vma_data *vma_data;
+
+ vma_data = (struct hns_roce_vma_data *)vma->vm_private_data;
+ vma_data->vma = NULL;
+ mutex_lock(vma_data->vma_list_mutex);
+ list_del(&vma_data->list);
+ mutex_unlock(vma_data->vma_list_mutex);
+ kfree(vma_data);
+}
+
+static const struct vm_operations_struct hns_roce_vm_ops = {
+ .open = hns_roce_vma_open,
+ .close = hns_roce_vma_close,
+};
+
+static int hns_roce_set_vma_data(struct vm_area_struct *vma,
+ struct hns_roce_ucontext *context)
+{
+ struct list_head *vma_head = &context->vma_list;
+ struct hns_roce_vma_data *vma_data;
+
+ vma_data = kzalloc(sizeof(*vma_data), GFP_KERNEL);
+ if (!vma_data)
+ return -ENOMEM;
+
+ vma_data->vma = vma;
+ vma_data->vma_list_mutex = &context->vma_list_mutex;
+ vma->vm_private_data = vma_data;
+ vma->vm_ops = &hns_roce_vm_ops;
+
+ mutex_lock(&context->vma_list_mutex);
+ list_add(&vma_data->list, vma_head);
+ mutex_unlock(&context->vma_list_mutex);
+
+ return 0;
+}
+
static int hns_roce_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma)
{
@@ -398,7 +446,7 @@ static int hns_roce_mmap(struct ib_ucontext *context,
} else
return -EINVAL;
- return 0;
+ return hns_roce_set_vma_data(vma, to_hr_ucontext(context));
}
static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
@@ -422,10 +470,30 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
return 0;
}
+static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
+ struct hns_roce_vma_data *vma_data, *n;
+ struct vm_area_struct *vma;
+
+ mutex_lock(&context->vma_list_mutex);
+ list_for_each_entry_safe(vma_data, n, &context->vma_list, list) {
+ vma = vma_data->vma;
+ zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
+
+ vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
+ vma->vm_ops = NULL;
+ list_del(&vma_data->list);
+ kfree(vma_data);
+ }
+ mutex_unlock(&context->vma_list_mutex);
+}
+
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
{
struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
+ hr_dev->active = false;
unregister_netdevice_notifier(&iboe->nb);
ib_unregister_device(&hr_dev->ib_dev);
}
@@ -516,6 +584,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
/* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable;
+ ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext;
ib_dev->driver_id = RDMA_DRIVER_HNS;
ret = ib_register_device(ib_dev, NULL);
@@ -537,6 +606,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
goto error_failed_setup_mtu_mac;
}
+ hr_dev->active = true;
return 0;
error_failed_setup_mtu_mac:
@@ -729,6 +799,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
return ret;
}
}
+ hr_dev->is_reset = false;
if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev);
@@ -828,6 +899,7 @@ EXPORT_SYMBOL_GPL(hns_roce_init);
void hns_roce_exit(struct hns_roce_dev *hr_dev)
{
hns_roce_unregister_device(hr_dev);
+
if (hr_dev->hw->hw_exit)
hr_dev->hw->hw_exit(hr_dev);
hns_roce_cleanup_bitmap(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index f7256d88d38f..d1fe0e7957e3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -1007,12 +1007,6 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
n = ib_umem_page_count(mr->umem);
- if (mr->umem->page_shift != HNS_ROCE_HEM_PAGE_SHIFT) {
- dev_err(dev, "Just support 4K page size but is 0x%lx now!\n",
- BIT(mr->umem->page_shift));
- ret = -EINVAL;
- goto err_umem;
- }
if (!hr_dev->caps.pbl_hop_num) {
if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 4b41e041799c..b9f2c871ff9a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -107,13 +107,15 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
int ret = 0;
/* Using bitmap to manager UAR index */
- ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->index);
+ ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx);
if (ret == -1)
return -ENOMEM;
- if (uar->index > 0)
- uar->index = (uar->index - 1) %
+ if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
+ uar->index = (uar->logic_idx - 1) %
(hr_dev->caps.phy_num_uars - 1) + 1;
+ else
+ uar->index = 0;
if (!dev_is_pci(hr_dev->dev)) {
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
@@ -132,7 +134,7 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
- hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index,
+ hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->logic_idx,
BITMAP_NO_RR);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index f7c6fd9ff6e2..7b2655128b9f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1519,18 +1519,13 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
/**
* i40iw_find_port - find port that matches reference port
- * @port: port number
+ * @hte: ptr to accelerated or non-accelerated list
* @accelerated_list: flag for accelerated vs non-accelerated list
*/
-static bool i40iw_find_port(struct i40iw_cm_core *cm_core, u16 port,
- bool accelerated_list)
+static bool i40iw_find_port(struct list_head *hte, u16 port)
{
- struct list_head *hte;
struct i40iw_cm_node *cm_node;
- hte = accelerated_list ?
- &cm_core->accelerated_list : &cm_core->non_accelerated_list;
-
list_for_each_entry(cm_node, hte, list) {
if (cm_node->loc_port == port)
return true;
@@ -1540,35 +1535,32 @@ static bool i40iw_find_port(struct i40iw_cm_core *cm_core, u16 port,
/**
* i40iw_port_in_use - determine if port is in use
+ * @cm_core: cm's core
* @port: port number
- * @active_side: flag for listener side vs active side
*/
-static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side)
+bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
{
struct i40iw_cm_listener *listen_node;
unsigned long flags;
- bool ret = false;
- if (active_side) {
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- ret = i40iw_find_port(cm_core, port, true);
- if (!ret)
- ret = i40iw_find_port(cm_core, port, false);
- if (!ret)
- clear_bit(port, cm_core->active_side_ports);
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (i40iw_find_port(&cm_core->accelerated_list, port) ||
+ i40iw_find_port(&cm_core->non_accelerated_list, port)) {
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- } else {
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
- if (listen_node->loc_port == port) {
- ret = true;
- break;
- }
+ return true;
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+ if (listen_node->loc_port == port) {
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ return true;
}
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
}
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- return ret;
+ return false;
}
/**
@@ -1788,7 +1780,7 @@ static enum i40iw_status_code i40iw_add_mqh_4(
&ifa->ifa_address,
rdma_vlan_dev_vlan_id(dev),
dev->dev_addr);
- child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM,
@@ -1917,7 +1909,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
if (listener->iwdev) {
- if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false))
+ if (apbvt_del)
i40iw_manage_apbvt(listener->iwdev,
listener->loc_port,
I40IW_MANAGE_APBVT_DEL);
@@ -2298,7 +2290,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
if (cm_node->listener) {
i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
} else {
- if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) {
+ if (cm_node->apbvt_set) {
i40iw_manage_apbvt(cm_node->iwdev,
cm_node->loc_port,
I40IW_MANAGE_APBVT_DEL);
@@ -2872,7 +2864,7 @@ static struct i40iw_cm_listener *i40iw_make_listen_node(
if (!listener) {
/* create a CM listen node (1/2 node to compare incoming traffic to) */
- listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
+ listener = kzalloc(sizeof(*listener), GFP_KERNEL);
if (!listener)
return NULL;
cm_core->stats_listen_nodes_created++;
@@ -3244,6 +3236,7 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
spin_lock_init(&cm_core->ht_lock);
spin_lock_init(&cm_core->listen_list_lock);
+ spin_lock_init(&cm_core->apbvt_lock);
cm_core->event_wq = alloc_ordered_workqueue("iwewq",
WQ_MEM_RECLAIM);
@@ -3811,7 +3804,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct sockaddr_in6 *laddr6;
struct sockaddr_in6 *raddr6;
int ret = 0;
- unsigned long flags;
ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
@@ -3882,15 +3874,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->qhash_set = true;
}
- spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
- if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) {
- spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
- if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) {
- ret = -EINVAL;
- goto err;
- }
- } else {
- spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ if (i40iw_manage_apbvt(iwdev, cm_info.loc_port,
+ I40IW_MANAGE_APBVT_ADD)) {
+ ret = -EINVAL;
+ goto err;
}
cm_node->apbvt_set = true;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 78ba36ae2bbe..66dc1ba03389 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -413,8 +413,9 @@ struct i40iw_cm_core {
spinlock_t ht_lock; /* manage hash table */
spinlock_t listen_list_lock; /* listen list */
+ spinlock_t apbvt_lock; /*manage apbvt entries*/
- unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)];
+ unsigned long ports_in_use[BITS_TO_LONGS(MAX_PORTS)];
u64 stats_nodes_created;
u64 stats_nodes_destroyed;
@@ -457,4 +458,5 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
struct i40iw_cm_info *nfo,
bool disconnect_all);
+bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port);
#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index c9f62ca7643c..2836c5420d60 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -443,13 +443,37 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool add_port)
{
struct i40iw_apbvt_info *info;
- enum i40iw_status_code status;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
+ unsigned long flags;
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ enum i40iw_status_code status = 0;
+ bool in_use;
+
+ /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
+ * protect against race where add APBVT CQP can race ahead of the delete
+ * APBVT for same port.
+ */
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+
+ if (!add_port) {
+ in_use = i40iw_port_in_use(cm_core, accel_local_port);
+ if (in_use)
+ goto exit;
+ clear_bit(accel_local_port, cm_core->ports_in_use);
+ } else {
+ in_use = test_and_set_bit(accel_local_port,
+ cm_core->ports_in_use);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ if (in_use)
+ return 0;
+ }
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
- if (!cqp_request)
- return -ENOMEM;
+ if (!cqp_request) {
+ status = -ENOMEM;
+ goto exit;
+ }
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_apbvt_entry.info;
@@ -465,6 +489,10 @@ int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool ad
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status)
i40iw_pr_err("CQP-OP Manage APBVT entry fail");
+exit:
+ if (!add_port)
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+
return status;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 05001e6da1f8..68095f00d08f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1757,7 +1757,7 @@ static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *cli
return;
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
if (!work)
return;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 0793a21d76f4..d604b3d5aa3e 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1934,7 +1934,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
"buf:%lld\n", wc.wr_id);
break;
default:
- BUG_ON(1);
break;
}
} else {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 5b70744f414a..f839bf3b1497 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -276,7 +276,7 @@ static int mlx4_ib_add_gid(const union ib_gid *gid,
found = i;
break;
}
- if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
+ if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
free = i; /* HW has space */
}
@@ -345,7 +345,8 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
if (!ctx->refcount) {
unsigned int real_index = ctx->real_index;
- memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
+ memset(&port_gid_table->gids[real_index].gid, 0,
+ sizeof(port_gid_table->gids[real_index].gid));
kfree(port_gid_table->gids[real_index].ctx);
port_gid_table->gids[real_index].ctx = NULL;
hw_update = 1;
@@ -1185,65 +1186,25 @@ static const struct vm_operations_struct mlx4_ib_vm_ops = {
static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
int i;
- int ret = 0;
struct vm_area_struct *vma;
struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
- if (!owning_process)
- return;
-
- owning_mm = get_task_mm(owning_process);
- if (!owning_mm) {
- pr_info("no mm, disassociate ucontext is pending task termination\n");
- while (1) {
- /* make sure that task is dead before returning, it may
- * prevent a rare case of module down in parallel to a
- * call to mlx4_ib_vma_close.
- */
- put_task_struct(owning_process);
- usleep_range(1000, 2000);
- owning_process = get_pid_task(ibcontext->tgid,
- PIDTYPE_PID);
- if (!owning_process ||
- owning_process->state == TASK_DEAD) {
- pr_info("disassociate ucontext done, task was terminated\n");
- /* in case task was dead need to release the task struct */
- if (owning_process)
- put_task_struct(owning_process);
- return;
- }
- }
- }
/* need to protect from a race on closing the vma as part of
* mlx4_ib_vma_close().
*/
- down_write(&owning_mm->mmap_sem);
for (i = 0; i < HW_BAR_COUNT; i++) {
vma = context->hw_bar_info[i].vma;
if (!vma)
continue;
- ret = zap_vma_ptes(context->hw_bar_info[i].vma,
- context->hw_bar_info[i].vma->vm_start,
- PAGE_SIZE);
- if (ret) {
- pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
- BUG_ON(1);
- }
+ zap_vma_ptes(context->hw_bar_info[i].vma,
+ context->hw_bar_info[i].vma->vm_start, PAGE_SIZE);
context->hw_bar_info[i].vma->vm_flags &=
~(VM_SHARED | VM_MAYSHARE);
/* context going to be destroyed, should not access ops any more */
context->hw_bar_info[i].vma->vm_ops = NULL;
}
-
- up_write(&owning_mm->mmap_sem);
- mmput(owning_mm);
- put_task_struct(owning_process);
}
static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
@@ -1847,7 +1808,7 @@ static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
- int domain)
+ int domain, struct ib_udata *udata)
{
int err = 0, i = 0, j = 0;
struct mlx4_ib_flow *mflow;
@@ -1865,6 +1826,10 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
(flow_attr->type != IB_FLOW_ATTR_NORMAL))
return ERR_PTR(-EOPNOTSUPP);
+ if (udata &&
+ udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
+ return ERR_PTR(-EOPNOTSUPP);
+
memset(type, 0, sizeof(type));
mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
@@ -3050,7 +3015,10 @@ void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
return;
- BUG_ON(qpn < dev->steer_qpn_base);
+ if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
+ qpn, dev->steer_qpn_base))
+ /* not supposed to be here */
+ return;
bitmap_release_region(dev->ib_uc_qpns_bitmap,
qpn - dev->steer_qpn_base,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 61d8b06375bb..ed1f253faf97 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -367,6 +367,40 @@ end:
return block_shift;
}
+static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
+ u64 length, u64 virt_addr,
+ int access_flags)
+{
+ /*
+ * Force registering the memory as writable if the underlying pages
+ * are writable. This is so rereg can change the access permissions
+ * from readable to writable without having to run through ib_umem_get
+ * again
+ */
+ if (!ib_access_writable(access_flags)) {
+ struct vm_area_struct *vma;
+
+ down_read(&current->mm->mmap_sem);
+ /*
+ * FIXME: Ideally this would iterate over all the vmas that
+ * cover the memory, but for now it requires a single vma to
+ * entirely cover the MR to support RO mappings.
+ */
+ vma = find_vma(current->mm, start);
+ if (vma && vma->vm_end >= start + length &&
+ vma->vm_start <= start) {
+ if (vma->vm_flags & VM_WRITE)
+ access_flags |= IB_ACCESS_LOCAL_WRITE;
+ } else {
+ access_flags |= IB_ACCESS_LOCAL_WRITE;
+ }
+
+ up_read(&current->mm->mmap_sem);
+ }
+
+ return ib_umem_get(context, start, length, access_flags, 0);
+}
+
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
@@ -381,10 +415,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- /* Force registering the memory as writable. */
- /* Used for memory re-registeration. HCA protects the access */
- mr->umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags | IB_ACCESS_LOCAL_WRITE, 0);
+ mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
+ virt_addr, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@@ -454,6 +486,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
}
if (flags & IB_MR_REREG_ACCESS) {
+ if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
+ return -EPERM;
+
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
convert_access(mr_access_flags));
@@ -467,10 +502,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem);
- mmr->umem = ib_umem_get(mr->uobject->context, start, length,
- mr_access_flags |
- IB_ACCESS_LOCAL_WRITE,
- 0);
+ mmr->umem =
+ mlx4_get_umem_mr(mr->uobject->context, start, length,
+ virt_addr, mr_access_flags);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 199648adac74..cd2c08c45334 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -3078,7 +3078,7 @@ static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num,
memcpy(gid, &port_gid_table->gids[index].gid, sizeof(*gid));
*gid_type = port_gid_table->gids[index].gid_type;
spin_unlock_irqrestore(&iboe->lock, flags);
- if (!memcmp(gid, &zgid, sizeof(*gid)))
+ if (rdma_is_zero_gid(gid))
return -ENOENT;
return 0;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 6d52ea03574e..ad39d64b8108 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -637,7 +637,7 @@ repoll:
}
static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
- struct ib_wc *wc)
+ struct ib_wc *wc, bool is_fatal_err)
{
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
struct mlx5_ib_wc *soft_wc, *next;
@@ -650,6 +650,10 @@ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
cq->mcq.cqn);
+ if (unlikely(is_fatal_err)) {
+ soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
+ soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ }
wc[npolled++] = soft_wc->wc;
list_del(&soft_wc->list);
kfree(soft_wc);
@@ -670,12 +674,17 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->lock, flags);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
+ /* make sure no soft wqe's are waiting */
+ if (unlikely(!list_empty(&cq->wc_list)))
+ soft_polled = poll_soft_wc(cq, num_entries, wc, true);
+
+ mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
+ wc + soft_polled, &npolled);
goto out;
}
if (unlikely(!list_empty(&cq->wc_list)))
- soft_polled = poll_soft_wc(cq, num_entries, wc);
+ soft_polled = poll_soft_wc(cq, num_entries, wc, false);
for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
@@ -742,6 +751,28 @@ static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
return 0;
}
+enum {
+ MLX5_CQE_RES_FORMAT_HASH = 0,
+ MLX5_CQE_RES_FORMAT_CSUM = 1,
+ MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
+};
+
+static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
+{
+ switch (format) {
+ case MLX5_IB_CQE_RES_FORMAT_HASH:
+ return MLX5_CQE_RES_FORMAT_HASH;
+ case MLX5_IB_CQE_RES_FORMAT_CSUM:
+ return MLX5_CQE_RES_FORMAT_CSUM;
+ case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
+ if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
+ return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
int entries, u32 **cqb,
@@ -807,6 +838,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*index = to_mucontext(context)->bfregi.sys_pages[0];
if (ucmd.cqe_comp_en == 1) {
+ int mini_cqe_format;
+
if (!((*cqe_size == 128 &&
MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
(*cqe_size == 64 &&
@@ -817,20 +850,18 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
goto err_cqb;
}
- if (unlikely(!ucmd.cqe_comp_res_format ||
- !(ucmd.cqe_comp_res_format <
- MLX5_IB_CQE_RES_RESERVED) ||
- (ucmd.cqe_comp_res_format &
- (ucmd.cqe_comp_res_format - 1)))) {
- err = -EOPNOTSUPP;
- mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n",
- ucmd.cqe_comp_res_format);
+ mini_cqe_format =
+ mini_cqe_res_format_to_hw(dev,
+ ucmd.cqe_comp_res_format);
+ if (mini_cqe_format < 0) {
+ err = mini_cqe_format;
+ mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
+ ucmd.cqe_comp_res_format, err);
goto err_cqb;
}
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
- MLX5_SET(cqc, cqc, mini_cqe_res_format,
- ilog2(ucmd.cqe_comp_res_format));
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
}
if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 0e04fdddf670..35a0e04c38f2 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*/
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index 046fd942fd46..2ba73636a2fb 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*/
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 69716a7ea993..e52dd21519b4 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -982,13 +982,21 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
- resp.cqe_comp_caps.max_num =
- MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
- MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
- resp.cqe_comp_caps.supported_format =
- MLX5_IB_CQE_RES_FORMAT_HASH |
- MLX5_IB_CQE_RES_FORMAT_CSUM;
resp.response_length += sizeof(resp.cqe_comp_caps);
+
+ if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
+ resp.cqe_comp_caps.max_num =
+ MLX5_CAP_GEN(dev->mdev,
+ cqe_compression_max_num);
+
+ resp.cqe_comp_caps.supported_format =
+ MLX5_IB_CQE_RES_FORMAT_HASH |
+ MLX5_IB_CQE_RES_FORMAT_CSUM;
+
+ if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
+ resp.cqe_comp_caps.supported_format |=
+ MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
+ }
}
if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
@@ -1084,6 +1092,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_GRE;
+ if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
+ MLX5_FLEX_PROTO_CW_MPLS_GRE)
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
+ if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
+ MLX5_FLEX_PROTO_CW_MPLS_UDP)
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
}
if (uhw->outlen) {
@@ -1953,49 +1969,15 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- int ret;
struct vm_area_struct *vma;
struct mlx5_ib_vma_private_data *vma_private, *n;
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
- owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
- if (!owning_process)
- return;
-
- owning_mm = get_task_mm(owning_process);
- if (!owning_mm) {
- pr_info("no mm, disassociate ucontext is pending task termination\n");
- while (1) {
- put_task_struct(owning_process);
- usleep_range(1000, 2000);
- owning_process = get_pid_task(ibcontext->tgid,
- PIDTYPE_PID);
- if (!owning_process ||
- owning_process->state == TASK_DEAD) {
- pr_info("disassociate ucontext done, task was terminated\n");
- /* in case task was dead need to release the
- * task struct.
- */
- if (owning_process)
- put_task_struct(owning_process);
- return;
- }
- }
- }
-
- /* need to protect from a race on closing the vma as part of
- * mlx5_ib_vma_close.
- */
- down_write(&owning_mm->mmap_sem);
mutex_lock(&context->vma_private_list_mutex);
list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
list) {
vma = vma_private->vma;
- ret = zap_vma_ptes(vma, vma->vm_start,
- PAGE_SIZE);
- WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
+ zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
/* context going to be destroyed, should
* not access ops any more.
*/
@@ -2005,9 +1987,6 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
kfree(vma_private);
}
mutex_unlock(&context->vma_private_list_mutex);
- up_write(&owning_mm->mmap_sem);
- mmput(owning_mm);
- put_task_struct(owning_process);
}
static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
@@ -2051,10 +2030,6 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
if (err)
return err;
- mlx5_ib_dbg(dev, "mapped clock info at 0x%lx, PA 0x%llx\n",
- vma->vm_start,
- (unsigned long long)pfn << PAGE_SHIFT);
-
return mlx5_ib_set_vma_data(vma, context);
}
@@ -2149,15 +2124,14 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
err = io_remap_pfn_range(vma, vma->vm_start, pfn,
PAGE_SIZE, vma->vm_page_prot);
if (err) {
- mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
- err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
+ mlx5_ib_err(dev,
+ "io_remap_pfn_range failed with error=%d, mmap_cmd=%s\n",
+ err, mmap_cmd2str(cmd));
err = -EAGAIN;
goto err;
}
pa = pfn << PAGE_SHIFT;
- mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
- vma->vm_start, &pa);
err = mlx5_ib_set_vma_data(vma, context);
if (err)
@@ -2243,10 +2217,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
if (io_remap_pfn_range(vma, vma->vm_start, pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
-
- mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
- vma->vm_start,
- (unsigned long long)pfn << PAGE_SHIFT);
break;
case MLX5_IB_MMAP_CLOCK_INFO:
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
@@ -2386,7 +2356,8 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
enum {
MATCH_CRITERIA_ENABLE_OUTER_BIT,
MATCH_CRITERIA_ENABLE_MISC_BIT,
- MATCH_CRITERIA_ENABLE_INNER_BIT
+ MATCH_CRITERIA_ENABLE_INNER_BIT,
+ MATCH_CRITERIA_ENABLE_MISC2_BIT
};
#define HEADER_IS_ZERO(match_criteria, headers) \
@@ -2406,6 +2377,9 @@ static u8 get_match_criteria_enable(u32 *match_criteria)
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
MATCH_CRITERIA_ENABLE_INNER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
+ MATCH_CRITERIA_ENABLE_MISC2_BIT;
return match_criteria_enable;
}
@@ -2440,6 +2414,27 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
}
+static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
+{
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
#define LAST_ETH_FIELD vlan_tag
#define LAST_IB_FIELD sl
#define LAST_IPV4_FIELD tos
@@ -2448,6 +2443,7 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
#define LAST_TUNNEL_FIELD tunnel_id
#define LAST_FLOW_TAG_FIELD tag_id
#define LAST_DROP_FIELD size
+#define LAST_COUNTERS_FIELD counters
/* Field is the last supported field */
#define FIELDS_NOT_SUPPORTED(filter, field)\
@@ -2479,12 +2475,16 @@ static int parse_flow_flow_action(const union ib_flow_spec *ib_spec,
static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
u32 *match_v, const union ib_flow_spec *ib_spec,
const struct ib_flow_attr *flow_attr,
- struct mlx5_flow_act *action)
+ struct mlx5_flow_act *action, u32 prev_type)
{
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
misc_parameters);
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
misc_parameters);
+ void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters_2);
+ void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ misc_parameters_2);
void *headers_c;
void *headers_v;
int match_ipv;
@@ -2689,6 +2689,93 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
ntohs(ib_spec->tcp_udp.val.dst_port));
break;
+ case IB_FLOW_SPEC_GRE:
+ if (ib_spec->gre.mask.c_ks_res0_ver)
+ return -EOPNOTSUPP;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ IPPROTO_GRE);
+
+ MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
+ ntohs(ib_spec->gre.val.protocol));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
+ gre_key_h),
+ &ib_spec->gre.mask.key,
+ sizeof(ib_spec->gre.mask.key));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
+ gre_key_h),
+ &ib_spec->gre.val.key,
+ sizeof(ib_spec->gre.val.key));
+ break;
+ case IB_FLOW_SPEC_MPLS:
+ switch (prev_type) {
+ case IB_FLOW_SPEC_UDP:
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls_over_udp),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls_over_udp),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls_over_udp),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ break;
+ case IB_FLOW_SPEC_GRE:
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls_over_gre),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls_over_gre),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls_over_gre),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ break;
+ default:
+ if (ib_spec->type & IB_FLOW_SPEC_INNER) {
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.inner_first_mpls),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ inner_first_mpls),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ inner_first_mpls),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ } else {
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ }
+ }
+ break;
case IB_FLOW_SPEC_VXLAN_TUNNEL:
if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
LAST_TUNNEL_FIELD))
@@ -2720,6 +2807,18 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
if (ret)
return ret;
break;
+ case IB_FLOW_SPEC_ACTION_COUNT:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
+ LAST_COUNTERS_FIELD))
+ return -EOPNOTSUPP;
+
+ /* for now support only one counters spec per flow */
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ return -EINVAL;
+
+ action->counters = ib_spec->flow_count.counters;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ break;
default:
return -EINVAL;
}
@@ -2867,6 +2966,17 @@ static void put_flow_table(struct mlx5_ib_dev *dev,
}
}
+static void counters_clear_description(struct ib_counters *counters)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ kfree(mcounters->counters_data);
+ mcounters->counters_data = NULL;
+ mcounters->cntrs_max_index = 0;
+ mutex_unlock(&mcounters->mcntrs_mutex);
+}
+
static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
{
struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
@@ -2886,8 +2996,11 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
mlx5_del_flow_rules(handler->rule);
put_flow_table(dev, handler->prio, true);
- mutex_unlock(&dev->flow_db->lock);
+ if (handler->ibcounters &&
+ atomic_read(&handler->ibcounters->usecnt) == 1)
+ counters_clear_description(handler->ibcounters);
+ mutex_unlock(&dev->flow_db->lock);
kfree(handler);
return 0;
@@ -3007,21 +3120,143 @@ static void set_underlay_qp(struct mlx5_ib_dev *dev,
}
}
+static int read_flow_counters(struct ib_device *ibdev,
+ struct mlx5_read_counters_attr *read_attr)
+{
+ struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+ return mlx5_fc_query(dev->mdev, fc,
+ &read_attr->out[IB_COUNTER_PACKETS],
+ &read_attr->out[IB_COUNTER_BYTES]);
+}
+
+/* flow counters currently expose two counters packets and bytes */
+#define FLOW_COUNTERS_NUM 2
+static int counters_set_description(struct ib_counters *counters,
+ enum mlx5_ib_counters_type counters_type,
+ struct mlx5_ib_flow_counters_desc *desc_data,
+ u32 ncounters)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+ u32 cntrs_max_index = 0;
+ int i;
+
+ if (counters_type != MLX5_IB_COUNTERS_FLOW)
+ return -EINVAL;
+
+ /* init the fields for the object */
+ mcounters->type = counters_type;
+ mcounters->read_counters = read_flow_counters;
+ mcounters->counters_num = FLOW_COUNTERS_NUM;
+ mcounters->ncounters = ncounters;
+ /* each counter entry have both description and index pair */
+ for (i = 0; i < ncounters; i++) {
+ if (desc_data[i].description > IB_COUNTER_BYTES)
+ return -EINVAL;
+
+ if (cntrs_max_index <= desc_data[i].index)
+ cntrs_max_index = desc_data[i].index + 1;
+ }
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ mcounters->counters_data = desc_data;
+ mcounters->cntrs_max_index = cntrs_max_index;
+ mutex_unlock(&mcounters->mcntrs_mutex);
+
+ return 0;
+}
+
+#define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
+static int flow_counters_set_data(struct ib_counters *ibcounters,
+ struct mlx5_ib_create_flow *ucmd)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
+ struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
+ struct mlx5_ib_flow_counters_desc *desc_data = NULL;
+ bool hw_hndl = false;
+ int ret = 0;
+
+ if (ucmd && ucmd->ncounters_data != 0) {
+ cntrs_data = ucmd->data;
+ if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
+ return -EINVAL;
+
+ desc_data = kcalloc(cntrs_data->ncounters,
+ sizeof(*desc_data),
+ GFP_KERNEL);
+ if (!desc_data)
+ return -ENOMEM;
+
+ if (copy_from_user(desc_data,
+ u64_to_user_ptr(cntrs_data->counters_data),
+ sizeof(*desc_data) * cntrs_data->ncounters)) {
+ ret = -EFAULT;
+ goto free;
+ }
+ }
+
+ if (!mcounters->hw_cntrs_hndl) {
+ mcounters->hw_cntrs_hndl = mlx5_fc_create(
+ to_mdev(ibcounters->device)->mdev, false);
+ if (!mcounters->hw_cntrs_hndl) {
+ ret = -ENOMEM;
+ goto free;
+ }
+ hw_hndl = true;
+ }
+
+ if (desc_data) {
+ /* counters already bound to at least one flow */
+ if (mcounters->cntrs_max_index) {
+ ret = -EINVAL;
+ goto free_hndl;
+ }
+
+ ret = counters_set_description(ibcounters,
+ MLX5_IB_COUNTERS_FLOW,
+ desc_data,
+ cntrs_data->ncounters);
+ if (ret)
+ goto free_hndl;
+
+ } else if (!mcounters->cntrs_max_index) {
+ /* counters not bound yet, must have udata passed */
+ ret = -EINVAL;
+ goto free_hndl;
+ }
+
+ return 0;
+
+free_hndl:
+ if (hw_hndl) {
+ mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
+ mcounters->hw_cntrs_hndl);
+ mcounters->hw_cntrs_hndl = NULL;
+ }
+free:
+ kfree(desc_data);
+ return ret;
+}
+
static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
const struct ib_flow_attr *flow_attr,
struct mlx5_flow_destination *dst,
- u32 underlay_qpn)
+ u32 underlay_qpn,
+ struct mlx5_ib_create_flow *ucmd)
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
struct mlx5_flow_spec *spec;
- struct mlx5_flow_destination *rule_dst = dst;
+ struct mlx5_flow_destination dest_arr[2] = {};
+ struct mlx5_flow_destination *rule_dst = dest_arr;
const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
unsigned int spec_index;
+ u32 prev_type = 0;
int err = 0;
- int dest_num = 1;
+ int dest_num = 0;
bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
if (!is_valid_attr(dev->mdev, flow_attr))
@@ -3035,14 +3270,20 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
}
INIT_LIST_HEAD(&handler->list);
+ if (dst) {
+ memcpy(&dest_arr[0], dst, sizeof(*dst));
+ dest_num++;
+ }
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
err = parse_flow_attr(dev->mdev, spec->match_criteria,
spec->match_value,
- ib_flow, flow_attr, &flow_act);
+ ib_flow, flow_attr, &flow_act,
+ prev_type);
if (err < 0)
goto free;
+ prev_type = ((union ib_flow_spec *)ib_flow)->type;
ib_flow += ((union ib_flow_spec *)ib_flow)->size;
}
@@ -3069,15 +3310,30 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
goto free;
}
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ err = flow_counters_set_data(flow_act.counters, ucmd);
+ if (err)
+ goto free;
+
+ handler->ibcounters = flow_act.counters;
+ dest_arr[dest_num].type =
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest_arr[dest_num].counter =
+ to_mcounters(flow_act.counters)->hw_cntrs_hndl;
+ dest_num++;
+ }
+
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
- rule_dst = NULL;
- dest_num = 0;
+ if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
+ rule_dst = NULL;
+ dest_num = 0;
+ }
} else {
if (is_egress)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
else
flow_act.action |=
- dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
+ dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
}
@@ -3103,8 +3359,12 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
ft_prio->flow_table = ft;
free:
- if (err)
+ if (err && handler) {
+ if (handler->ibcounters &&
+ atomic_read(&handler->ibcounters->usecnt) == 1)
+ counters_clear_description(handler->ibcounters);
kfree(handler);
+ }
kvfree(spec);
return err ? ERR_PTR(err) : handler;
}
@@ -3114,7 +3374,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
const struct ib_flow_attr *flow_attr,
struct mlx5_flow_destination *dst)
{
- return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0);
+ return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
}
static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
@@ -3244,7 +3504,8 @@ err:
static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
- int domain)
+ int domain,
+ struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
@@ -3253,9 +3514,44 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
struct mlx5_ib_flow_prio *ft_prio;
bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
+ struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
+ size_t min_ucmd_sz, required_ucmd_sz;
int err;
int underlay_qpn;
+ if (udata && udata->inlen) {
+ min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
+ sizeof(ucmd_hdr.reserved);
+ if (udata->inlen < min_ucmd_sz)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
+ if (err)
+ return ERR_PTR(err);
+
+ /* currently supports only one counters data */
+ if (ucmd_hdr.ncounters_data > 1)
+ return ERR_PTR(-EINVAL);
+
+ required_ucmd_sz = min_ucmd_sz +
+ sizeof(struct mlx5_ib_flow_counters_data) *
+ ucmd_hdr.ncounters_data;
+ if (udata->inlen > required_ucmd_sz &&
+ !ib_is_udata_cleared(udata, required_ucmd_sz,
+ udata->inlen - required_ucmd_sz))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
+ if (!ucmd)
+ return ERR_PTR(-ENOMEM);
+
+ err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
+ if (err) {
+ kfree(ucmd);
+ return ERR_PTR(err);
+ }
+ }
+
if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
return ERR_PTR(-ENOMEM);
@@ -3309,7 +3605,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
mqp->underlay_qpn : 0;
handler = _create_flow_rule(dev, ft_prio, flow_attr,
- dst, underlay_qpn);
+ dst, underlay_qpn, ucmd);
}
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
@@ -3330,6 +3626,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
mutex_unlock(&dev->flow_db->lock);
kfree(dst);
+ kfree(ucmd);
return &handler->ibflow;
@@ -3340,6 +3637,7 @@ destroy_ft:
unlock:
mutex_unlock(&dev->flow_db->lock);
kfree(dst);
+ kfree(ucmd);
kfree(handler);
return ERR_PTR(err);
}
@@ -5000,6 +5298,76 @@ static void depopulate_specs_root(struct mlx5_ib_dev *dev)
uverbs_free_spec_tree(dev->ib_dev.specs_root);
}
+static int mlx5_ib_read_counters(struct ib_counters *counters,
+ struct ib_counters_read_attr *read_attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+ struct mlx5_read_counters_attr mread_attr = {};
+ struct mlx5_ib_flow_counters_desc *desc;
+ int ret, i;
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ if (mcounters->cntrs_max_index > read_attr->ncounters) {
+ ret = -EINVAL;
+ goto err_bound;
+ }
+
+ mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
+ GFP_KERNEL);
+ if (!mread_attr.out) {
+ ret = -ENOMEM;
+ goto err_bound;
+ }
+
+ mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
+ mread_attr.flags = read_attr->flags;
+ ret = mcounters->read_counters(counters->device, &mread_attr);
+ if (ret)
+ goto err_read;
+
+ /* do the pass over the counters data array to assign according to the
+ * descriptions and indexing pairs
+ */
+ desc = mcounters->counters_data;
+ for (i = 0; i < mcounters->ncounters; i++)
+ read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
+
+err_read:
+ kfree(mread_attr.out);
+err_bound:
+ mutex_unlock(&mcounters->mcntrs_mutex);
+ return ret;
+}
+
+static int mlx5_ib_destroy_counters(struct ib_counters *counters)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+
+ counters_clear_description(counters);
+ if (mcounters->hw_cntrs_hndl)
+ mlx5_fc_destroy(to_mdev(counters->device)->mdev,
+ mcounters->hw_cntrs_hndl);
+
+ kfree(mcounters);
+
+ return 0;
+}
+
+static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_mcounters *mcounters;
+
+ mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
+ if (!mcounters)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&mcounters->mcntrs_mutex);
+
+ return &mcounters->ibcntrs;
+}
+
void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_cleanup_multiport_master(dev);
@@ -5243,6 +5611,9 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action;
dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp;
dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
+ dev->ib_dev.create_counters = mlx5_ib_create_counters;
+ dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters;
+ dev->ib_dev.read_counters = mlx5_ib_read_counters;
err = init_node_data(dev);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 49a1aa0ff429..d89c8fe626f6 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -175,6 +175,7 @@ struct mlx5_ib_flow_handler {
struct ib_flow ibflow;
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_handle *rule;
+ struct ib_counters *ibcounters;
};
struct mlx5_ib_flow_db {
@@ -813,6 +814,41 @@ struct mlx5_memic {
DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
};
+struct mlx5_read_counters_attr {
+ struct mlx5_fc *hw_cntrs_hndl;
+ u64 *out;
+ u32 flags;
+};
+
+enum mlx5_ib_counters_type {
+ MLX5_IB_COUNTERS_FLOW,
+};
+
+struct mlx5_ib_mcounters {
+ struct ib_counters ibcntrs;
+ enum mlx5_ib_counters_type type;
+ /* number of counters supported for this counters type */
+ u32 counters_num;
+ struct mlx5_fc *hw_cntrs_hndl;
+ /* read function for this counters type */
+ int (*read_counters)(struct ib_device *ibdev,
+ struct mlx5_read_counters_attr *read_attr);
+ /* max index set as part of create_flow */
+ u32 cntrs_max_index;
+ /* number of counters data entries (<description,index> pair) */
+ u32 ncounters;
+ /* counters data array for descriptions and indexes */
+ struct mlx5_ib_flow_counters_desc *counters_data;
+ /* protects access to mcounters internal data */
+ struct mutex mcntrs_mutex;
+};
+
+static inline struct mlx5_ib_mcounters *
+to_mcounters(struct ib_counters *ibcntrs)
+{
+ return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
+}
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 2193dc1765fb..a4f1f638509f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -54,6 +54,7 @@ enum {
enum {
MLX5_IB_SQ_STRIDE = 6,
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
};
static const u32 mlx5_ib_opcode[] = {
@@ -302,7 +303,9 @@ static int sq_overhead(struct ib_qp_init_attr *attr)
max(sizeof(struct mlx5_wqe_atomic_seg) +
sizeof(struct mlx5_wqe_raddr_seg),
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
- sizeof(struct mlx5_mkey_seg));
+ sizeof(struct mlx5_mkey_seg) +
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD /
+ MLX5_IB_UMR_OCTOWORD);
break;
case IB_QPT_XRC_TGT:
@@ -3641,13 +3644,15 @@ static __be64 sig_mkey_mask(void)
}
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct mlx5_ib_mr *mr)
+ struct mlx5_ib_mr *mr, bool umr_inline)
{
int size = mr->ndescs * mr->desc_size;
memset(umr, 0, sizeof(*umr));
umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+ if (umr_inline)
+ umr->flags |= MLX5_UMR_INLINE;
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
umr->mkey_mask = frwr_mkey_mask();
}
@@ -3831,6 +3836,24 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
}
+static void set_reg_umr_inline_seg(void *seg, struct mlx5_ib_qp *qp,
+ struct mlx5_ib_mr *mr, int mr_list_size)
+{
+ void *qend = qp->sq.qend;
+ void *addr = mr->descs;
+ int copy;
+
+ if (unlikely(seg + mr_list_size > qend)) {
+ copy = qend - seg;
+ memcpy(seg, addr, copy);
+ addr += copy;
+ mr_list_size -= copy;
+ seg = mlx5_get_send_wqe(qp, 0);
+ }
+ memcpy(seg, addr, mr_list_size);
+ seg += mr_list_size;
+}
+
static __be32 send_ieth(struct ib_send_wr *wr)
{
switch (wr->opcode) {
@@ -4225,6 +4248,8 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
{
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
+ int mr_list_size = mr->ndescs * mr->desc_size;
+ bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
mlx5_ib_warn(to_mdev(qp->ibqp.device),
@@ -4232,7 +4257,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
return -EINVAL;
}
- set_reg_umr_seg(*seg, mr);
+ set_reg_umr_seg(*seg, mr, umr_inline);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
if (unlikely((*seg == qp->sq.qend)))
@@ -4244,10 +4269,14 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
if (unlikely((*seg == qp->sq.qend)))
*seg = mlx5_get_send_wqe(qp, 0);
- set_reg_data_seg(*seg, mr, pd);
- *seg += sizeof(struct mlx5_wqe_data_seg);
- *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
-
+ if (umr_inline) {
+ set_reg_umr_inline_seg(*seg, qp, mr, mr_list_size);
+ *size += get_xlt_octo(mr_list_size);
+ } else {
+ set_reg_data_seg(*seg, mr, pd);
+ *seg += sizeof(struct mlx5_wqe_data_seg);
+ *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index e2caabb8a926..710032f1fad7 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -414,7 +414,7 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
DP_ERR(dev,
- "failed mmap, adrresses must be page aligned: start=0x%pK, end=0x%pK\n",
+ "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n",
(void *)vma->vm_start, (void *)vma->vm_end);
return -EINVAL;
}
@@ -2577,7 +2577,7 @@ static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
u32 pbes_in_page;
if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
- DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
+ DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 46072455130c..3461df002f81 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1228,6 +1228,7 @@ static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
#define QIB_BADINTR 0x8000 /* severe interrupt problems */
#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
+#define QIB_SHUTDOWN 0x40000 /* device is shutting down */
/*
* values for ppd->lflags (_ib_port_ related flags)
@@ -1423,8 +1424,7 @@ u64 qib_sps_ints(void);
/*
* dma_addr wrappers - all 0's invalid for hw
*/
-dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
- size_t, int);
+int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
/*
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 6a8800b65047..98e1ce14fa2a 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
goto done;
}
for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
+ dma_addr_t daddr;
+
for (; ntids--; tid++) {
if (tid == tidcnt)
tid = 0;
@@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
ret = -ENOMEM;
break;
}
+ ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
+ if (ret)
+ break;
+
tidlist[i] = tid + tidoff;
/* we "know" system pages and TID pages are same size */
dd->pageshadow[ctxttid + tid] = pagep[i];
- dd->physshadow[ctxttid + tid] =
- qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dd->physshadow[ctxttid + tid] = daddr;
/*
* don't need atomic or it's overhead
*/
@@ -868,7 +872,7 @@ bail:
/*
* qib_file_vma_fault - handle a VMA page fault.
*/
-static int qib_file_vma_fault(struct vm_fault *vmf)
+static vm_fault_t qib_file_vma_fault(struct vm_fault *vmf)
{
struct page *page;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 6c68f8a97018..015520289735 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -841,6 +841,10 @@ static void qib_shutdown_device(struct qib_devdata *dd)
struct qib_pportdata *ppd;
unsigned pidx;
+ if (dd->flags & QIB_SHUTDOWN)
+ return;
+ dd->flags |= QIB_SHUTDOWN;
+
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
@@ -1182,6 +1186,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
static void qib_remove_one(struct pci_dev *);
static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
+static void qib_shutdown_one(struct pci_dev *);
#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
@@ -1199,6 +1204,7 @@ static struct pci_driver qib_driver = {
.name = QIB_DRV_NAME,
.probe = qib_init_one,
.remove = qib_remove_one,
+ .shutdown = qib_shutdown_one,
.id_table = qib_pci_tbl,
.err_handler = &qib_pci_err_handler,
};
@@ -1549,6 +1555,13 @@ static void qib_remove_one(struct pci_dev *pdev)
qib_postinit_cleanup(dd);
}
+static void qib_shutdown_one(struct pci_dev *pdev)
+{
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+
+ qib_shutdown_device(dd);
+}
+
/**
* qib_create_rcvhdrq - create a receive header queue
* @dd: the qlogic_ib device
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index c9955d48c50f..f35fdeb14347 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1828,7 +1828,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
/* OK, process the packet. */
switch (opcode) {
case OP(SEND_FIRST):
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -1849,7 +1849,7 @@ send_middle:
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
/* consume RWQE */
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -1858,7 +1858,7 @@ send_middle:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -1949,7 +1949,7 @@ send_last:
goto send_middle;
else if (opcode == OP(RDMA_WRITE_ONLY))
goto no_immediate_data;
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret) {
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 4662cc7bde92..f8a7de795beb 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -38,156 +38,6 @@
#include "qib_mad.h"
/*
- * Validate a RWQE and fill in the SGE state.
- * Return 1 if OK.
- */
-static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
-{
- int i, j, ret;
- struct ib_wc wc;
- struct rvt_lkey_table *rkt;
- struct rvt_pd *pd;
- struct rvt_sge_state *ss;
-
- rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
- pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
- ss = &qp->r_sge;
- ss->sg_list = qp->r_sg_list;
- qp->r_len = 0;
- for (i = j = 0; i < wqe->num_sge; i++) {
- if (wqe->sg_list[i].length == 0)
- continue;
- /* Check LKEY */
- ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
- NULL, &wqe->sg_list[i],
- IB_ACCESS_LOCAL_WRITE);
- if (unlikely(ret <= 0))
- goto bad_lkey;
- qp->r_len += wqe->sg_list[i].length;
- j++;
- }
- ss->num_sge = j;
- ss->total_len = qp->r_len;
- ret = 1;
- goto bail;
-
-bad_lkey:
- while (j) {
- struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
-
- rvt_put_mr(sge->mr);
- }
- ss->num_sge = 0;
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr_id;
- wc.status = IB_WC_LOC_PROT_ERR;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- /* Signal solicited completion event. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * qib_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return -1 if there is a local error, 0 if no RWQE is available,
- * otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only)
-{
- unsigned long flags;
- struct rvt_rq *rq;
- struct rvt_rwq *wq;
- struct rvt_srq *srq;
- struct rvt_rwqe *wqe;
- void (*handler)(struct ib_event *, void *);
- u32 tail;
- int ret;
-
- if (qp->ibqp.srq) {
- srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
- handler = srq->ibsrq.event_handler;
- rq = &srq->rq;
- } else {
- srq = NULL;
- handler = NULL;
- rq = &qp->r_rq;
- }
-
- spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ret = 0;
- goto unlock;
- }
-
- wq = rq->wq;
- tail = wq->tail;
- /* Validate tail before using it since it is user writable. */
- if (tail >= rq->size)
- tail = 0;
- if (unlikely(tail == wq->head)) {
- ret = 0;
- goto unlock;
- }
- /* Make sure entry is read after head index is read. */
- smp_rmb();
- wqe = rvt_get_rwqe_ptr(rq, tail);
- /*
- * Even though we update the tail index in memory, the verbs
- * consumer is not supposed to post more entries until a
- * completion is generated.
- */
- if (++tail >= rq->size)
- tail = 0;
- wq->tail = tail;
- if (!wr_id_only && !qib_init_sge(qp, wqe)) {
- ret = -1;
- goto unlock;
- }
- qp->r_wr_id = wqe->wr_id;
-
- ret = 1;
- set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
- if (handler) {
- u32 n;
-
- /*
- * Validate head pointer value and compute
- * the number of remaining WQEs.
- */
- n = wq->head;
- if (n >= rq->size)
- n = 0;
- if (n < tail)
- n += rq->size - tail;
- else
- n -= tail;
- if (n < srq->limit) {
- struct ib_event ev;
-
- srq->limit = 0;
- spin_unlock_irqrestore(&rq->lock, flags);
- ev.device = qp->ibqp.device;
- ev.element.srq = qp->ibqp.srq;
- ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- handler(&ev, srq->ibsrq.srq_context);
- goto bail;
- }
- }
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
-bail:
- return ret;
-}
-
-/*
* Switch to alternate path.
* The QP s_lock should be held and interrupts disabled.
*/
@@ -419,7 +269,7 @@ again:
wc.ex.imm_data = wqe->wr.ex.imm_data;
/* FALLTHROUGH */
case IB_WR_SEND:
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -431,7 +281,7 @@ again:
goto inv_err;
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 840eec6ebc33..3e54bc11e0ae 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -335,7 +335,7 @@ send_first:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
qp->r_sge = qp->s_rdma_read_sge;
else {
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -471,7 +471,7 @@ rdma_last_imm:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
rvt_put_ss(&qp->s_rdma_read_sge);
else {
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 3e4ff77260c2..f8d029a2390f 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -139,7 +139,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
else {
int ret;
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock;
@@ -534,7 +534,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
else {
int ret;
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return;
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index ce83ba9a12ef..16543d5e80c3 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -99,23 +99,27 @@ bail:
*
* I'm sure we won't be so lucky with other iommu's, so FIXME.
*/
-dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
+int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
{
dma_addr_t phys;
- phys = pci_map_page(hwdev, page, offset, size, direction);
+ phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(hwdev, phys))
+ return -ENOMEM;
- if (phys == 0) {
- pci_unmap_page(hwdev, phys, size, direction);
- phys = pci_map_page(hwdev, page, offset, size, direction);
+ if (!phys) {
+ pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(hwdev, phys))
+ return -ENOMEM;
/*
* FIXME: If we get 0 again, we should keep this page,
* map another, then free the 0 page.
*/
}
-
- return phys;
+ *daddr = phys;
+ return 0;
}
/**
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 3977abbc83ad..14b4057a2b8f 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ * Copyright (c) 2012 - 2018 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -1631,10 +1631,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
- snprintf(dd->verbs_dev.rdi.dparms.cq_name,
- sizeof(dd->verbs_dev.rdi.dparms.cq_name),
- "qib_cq%d", dd->unit);
-
qib_fill_device_attr(dd);
ppd = dd->pport;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index f887737ac142..f9a46768a19a 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -321,8 +321,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
void mr_rcu_callback(struct rcu_head *list);
-int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only);
-
void qib_migrate_qp(struct rvt_qp *qp);
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
index 2b5513da7e83..98e798007f75 100644
--- a/drivers/infiniband/sw/rdmavt/Kconfig
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library"
- depends on 64BIT
+ depends on 64BIT && ARCH_DMA_ADDR_T_64BIT
depends on PCI
select DMA_VIRT_OPS
---help---
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index fb52b669bfce..4f1544ad4aff 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -47,11 +47,12 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/kthread.h>
#include "cq.h"
#include "vt.h"
#include "trace.h"
+static struct workqueue_struct *comp_vector_wq;
+
/**
* rvt_cq_enter - add a new entry to the completion queue
* @cq: completion queue
@@ -124,20 +125,17 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
* This will cause send_complete() to be called in
* another thread.
*/
- spin_lock(&cq->rdi->n_cqs_lock);
- if (likely(cq->rdi->worker)) {
- cq->notify = RVT_CQ_NONE;
- cq->triggered++;
- kthread_queue_work(cq->rdi->worker, &cq->comptask);
- }
- spin_unlock(&cq->rdi->n_cqs_lock);
+ cq->notify = RVT_CQ_NONE;
+ cq->triggered++;
+ queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
+ &cq->comptask);
}
spin_unlock_irqrestore(&cq->lock, flags);
}
EXPORT_SYMBOL(rvt_cq_enter);
-static void send_complete(struct kthread_work *work)
+static void send_complete(struct work_struct *work)
{
struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
@@ -189,6 +187,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
struct ib_cq *ret;
u32 sz;
unsigned int entries = attr->cqe;
+ int comp_vector = attr->comp_vector;
if (attr->flags)
return ERR_PTR(-EINVAL);
@@ -196,6 +195,11 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
if (entries < 1 || entries > rdi->dparms.props.max_cqe)
return ERR_PTR(-EINVAL);
+ if (comp_vector < 0)
+ comp_vector = 0;
+
+ comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
+
/* Allocate the completion queue structure. */
cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
if (!cq)
@@ -264,14 +268,22 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
* an error.
*/
cq->rdi = rdi;
+ if (rdi->driver_f.comp_vect_cpu_lookup)
+ cq->comp_vector_cpu =
+ rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
+ else
+ cq->comp_vector_cpu =
+ cpumask_first(cpumask_of_node(rdi->dparms.node));
+
cq->ibcq.cqe = entries;
cq->notify = RVT_CQ_NONE;
spin_lock_init(&cq->lock);
- kthread_init_work(&cq->comptask, send_complete);
+ INIT_WORK(&cq->comptask, send_complete);
cq->queue = wc;
ret = &cq->ibcq;
+ trace_rvt_create_cq(cq, attr);
goto done;
bail_ip:
@@ -297,7 +309,7 @@ int rvt_destroy_cq(struct ib_cq *ibcq)
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
struct rvt_dev_info *rdi = cq->rdi;
- kthread_flush_work(&cq->comptask);
+ flush_work(&cq->comptask);
spin_lock_irq(&rdi->n_cqs_lock);
rdi->n_cqs_allocated--;
spin_unlock_irq(&rdi->n_cqs_lock);
@@ -507,24 +519,13 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
*
* Return: 0 on success
*/
-int rvt_driver_cq_init(struct rvt_dev_info *rdi)
+int rvt_driver_cq_init(void)
{
- int cpu;
- struct kthread_worker *worker;
-
- if (rdi->worker)
- return 0;
-
- spin_lock_init(&rdi->n_cqs_lock);
-
- cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
- worker = kthread_create_worker_on_cpu(cpu, 0,
- "%s", rdi->dparms.cq_name);
- if (IS_ERR(worker))
- return PTR_ERR(worker);
+ comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
+ 0, "rdmavt_cq");
+ if (!comp_vector_wq)
+ return -ENOMEM;
- set_user_nice(worker->task, MIN_NICE);
- rdi->worker = worker;
return 0;
}
@@ -532,19 +533,8 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
* rvt_cq_exit - tear down cq reources
* @rdi: rvt dev structure
*/
-void rvt_cq_exit(struct rvt_dev_info *rdi)
+void rvt_cq_exit(void)
{
- struct kthread_worker *worker;
-
- /* block future queuing from send_complete() */
- spin_lock_irq(&rdi->n_cqs_lock);
- worker = rdi->worker;
- if (!worker) {
- spin_unlock_irq(&rdi->n_cqs_lock);
- return;
- }
- rdi->worker = NULL;
- spin_unlock_irq(&rdi->n_cqs_lock);
-
- kthread_destroy_worker(worker);
+ destroy_workqueue(comp_vector_wq);
+ comp_vector_wq = NULL;
}
diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
index 6182c29eff66..72184b1c176b 100644
--- a/drivers/infiniband/sw/rdmavt/cq.h
+++ b/drivers/infiniband/sw/rdmavt/cq.h
@@ -2,7 +2,7 @@
#define DEF_RVTCQ_H
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -59,6 +59,6 @@ int rvt_destroy_cq(struct ib_cq *ibcq);
int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-int rvt_driver_cq_init(struct rvt_dev_info *rdi);
-void rvt_cq_exit(struct rvt_dev_info *rdi);
+int rvt_driver_cq_init(void);
+void rvt_cq_exit(void);
#endif /* DEF_RVTCQ_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index c82e6bb3d77c..40046135c509 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1987,6 +1987,155 @@ int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
return 0;
}
+/*
+ * Validate a RWQE and fill in the SGE state.
+ * Return 1 if OK.
+ */
+static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
+{
+ int i, j, ret;
+ struct ib_wc wc;
+ struct rvt_lkey_table *rkt;
+ struct rvt_pd *pd;
+ struct rvt_sge_state *ss;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ rkt = &rdi->lkey_table;
+ pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+ ss = &qp->r_sge;
+ ss->sg_list = qp->r_sg_list;
+ qp->r_len = 0;
+ for (i = j = 0; i < wqe->num_sge; i++) {
+ if (wqe->sg_list[i].length == 0)
+ continue;
+ /* Check LKEY */
+ ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ NULL, &wqe->sg_list[i],
+ IB_ACCESS_LOCAL_WRITE);
+ if (unlikely(ret <= 0))
+ goto bad_lkey;
+ qp->r_len += wqe->sg_list[i].length;
+ j++;
+ }
+ ss->num_sge = j;
+ ss->total_len = qp->r_len;
+ return 1;
+
+bad_lkey:
+ while (j) {
+ struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+
+ rvt_put_mr(sge->mr);
+ }
+ ss->num_sge = 0;
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr_id;
+ wc.status = IB_WC_LOC_PROT_ERR;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ /* Signal solicited completion event. */
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ return 0;
+}
+
+/**
+ * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
+ * @qp: the QP
+ * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
+ *
+ * Return -1 if there is a local error, 0 if no RWQE is available,
+ * otherwise return 1.
+ *
+ * Can be called from interrupt level.
+ */
+int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
+{
+ unsigned long flags;
+ struct rvt_rq *rq;
+ struct rvt_rwq *wq;
+ struct rvt_srq *srq;
+ struct rvt_rwqe *wqe;
+ void (*handler)(struct ib_event *, void *);
+ u32 tail;
+ int ret;
+
+ if (qp->ibqp.srq) {
+ srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
+ handler = srq->ibsrq.event_handler;
+ rq = &srq->rq;
+ } else {
+ srq = NULL;
+ handler = NULL;
+ rq = &qp->r_rq;
+ }
+
+ spin_lock_irqsave(&rq->lock, flags);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ wq = rq->wq;
+ tail = wq->tail;
+ /* Validate tail before using it since it is user writable. */
+ if (tail >= rq->size)
+ tail = 0;
+ if (unlikely(tail == wq->head)) {
+ ret = 0;
+ goto unlock;
+ }
+ /* Make sure entry is read after head index is read. */
+ smp_rmb();
+ wqe = rvt_get_rwqe_ptr(rq, tail);
+ /*
+ * Even though we update the tail index in memory, the verbs
+ * consumer is not supposed to post more entries until a
+ * completion is generated.
+ */
+ if (++tail >= rq->size)
+ tail = 0;
+ wq->tail = tail;
+ if (!wr_id_only && !init_sge(qp, wqe)) {
+ ret = -1;
+ goto unlock;
+ }
+ qp->r_wr_id = wqe->wr_id;
+
+ ret = 1;
+ set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
+ if (handler) {
+ u32 n;
+
+ /*
+ * Validate head pointer value and compute
+ * the number of remaining WQEs.
+ */
+ n = wq->head;
+ if (n >= rq->size)
+ n = 0;
+ if (n < tail)
+ n += rq->size - tail;
+ else
+ n -= tail;
+ if (n < srq->limit) {
+ struct ib_event ev;
+
+ srq->limit = 0;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ handler(&ev, srq->ibsrq.srq_context);
+ goto bail;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rvt_get_rwqe);
+
/**
* qp_comm_est - handle trap with QP established
* @qp: the QP
@@ -2076,7 +2225,7 @@ void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
to = rvt_aeth_to_usec(aeth);
trace_rvt_rnrnak_add(qp, to);
hrtimer_start(&qp->s_rnr_timer,
- ns_to_ktime(1000 * to), HRTIMER_MODE_REL);
+ ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
}
EXPORT_SYMBOL(rvt_add_rnr_timer);
diff --git a/drivers/infiniband/sw/rdmavt/trace_cq.h b/drivers/infiniband/sw/rdmavt/trace_cq.h
index a315850aa9bb..df8e1adbef9d 100644
--- a/drivers/infiniband/sw/rdmavt/trace_cq.h
+++ b/drivers/infiniband/sw/rdmavt/trace_cq.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -71,6 +71,39 @@ __print_symbolic(opcode, \
wc_opcode_name(RECV), \
wc_opcode_name(RECV_RDMA_WITH_IMM))
+#define CQ_ATTR_PRINT \
+"[%s] user cq %s cqe %u comp_vector %d comp_vector_cpu %d flags %x"
+
+DECLARE_EVENT_CLASS(rvt_cq_template,
+ TP_PROTO(struct rvt_cq *cq,
+ const struct ib_cq_init_attr *attr),
+ TP_ARGS(cq, attr),
+ TP_STRUCT__entry(RDI_DEV_ENTRY(cq->rdi)
+ __field(struct rvt_mmap_info *, ip)
+ __field(unsigned int, cqe)
+ __field(int, comp_vector)
+ __field(int, comp_vector_cpu)
+ __field(u32, flags)
+ ),
+ TP_fast_assign(RDI_DEV_ASSIGN(cq->rdi)
+ __entry->ip = cq->ip;
+ __entry->cqe = attr->cqe;
+ __entry->comp_vector = attr->comp_vector;
+ __entry->comp_vector_cpu =
+ cq->comp_vector_cpu;
+ __entry->flags = attr->flags;
+ ),
+ TP_printk(CQ_ATTR_PRINT, __get_str(dev),
+ __entry->ip ? "true" : "false", __entry->cqe,
+ __entry->comp_vector, __entry->comp_vector_cpu,
+ __entry->flags
+ )
+);
+
+DEFINE_EVENT(rvt_cq_template, rvt_create_cq,
+ TP_PROTO(struct rvt_cq *cq, const struct ib_cq_init_attr *attr),
+ TP_ARGS(cq, attr));
+
#define CQ_PRN \
"[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x"
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 434199d0bc96..17e4abc067af 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -49,6 +49,7 @@
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include "vt.h"
+#include "cq.h"
#include "trace.h"
#define RVT_UVERBS_ABI_VERSION 2
@@ -58,21 +59,18 @@ MODULE_DESCRIPTION("RDMA Verbs Transport Library");
static int rvt_init(void)
{
- /*
- * rdmavt does not need to do anything special when it starts up. All it
- * needs to do is sit and wait until a driver attempts registration.
- */
- return 0;
+ int ret = rvt_driver_cq_init();
+
+ if (ret)
+ pr_err("Error in driver CQ init.\n");
+
+ return ret;
}
module_init(rvt_init);
static void rvt_cleanup(void)
{
- /*
- * Nothing to do at exit time either. The module won't be able to be
- * removed until all drivers are gone which means all the dev structs
- * are gone so there is really nothing to do.
- */
+ rvt_cq_exit();
}
module_exit(rvt_cleanup);
@@ -777,11 +775,7 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
}
/* Completion queues */
- ret = rvt_driver_cq_init(rdi);
- if (ret) {
- pr_err("Error in driver CQ init.\n");
- goto bail_mr;
- }
+ spin_lock_init(&rdi->n_cqs_lock);
/* DMA Operations */
rdi->ibdev.dev.dma_ops = rdi->ibdev.dev.dma_ops ? : &dma_virt_ops;
@@ -829,14 +823,15 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
rdi->ibdev.node_type = RDMA_NODE_IB_CA;
- rdi->ibdev.num_comp_vectors = 1;
+ if (!rdi->ibdev.num_comp_vectors)
+ rdi->ibdev.num_comp_vectors = 1;
rdi->ibdev.driver_id = driver_id;
/* We are now good to announce we exist */
ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
if (ret) {
rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
- goto bail_cq;
+ goto bail_mr;
}
rvt_create_mad_agents(rdi);
@@ -844,9 +839,6 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
rvt_pr_info(rdi, "Registration with rdmavt done.\n");
return ret;
-bail_cq:
- rvt_cq_exit(rdi);
-
bail_mr:
rvt_mr_exit(rdi);
@@ -870,7 +862,6 @@ void rvt_unregister_device(struct rvt_dev_info *rdi)
rvt_free_mad_agents(rdi);
ib_unregister_device(&rdi->ibdev);
- rvt_cq_exit(rdi);
rvt_mr_exit(rdi);
rvt_qp_exit(rdi);
}
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index bad4a576d7cf..67ae960ab523 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -1,6 +1,7 @@
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
+ depends on !64BIT || ARCH_DMA_ADDR_T_64BIT
select NET_UDP_TUNNEL
select CRYPTO_CRC32
select DMA_VIRT_OPS
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index e493fdbd61c6..7121e1b1eb89 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -291,7 +291,7 @@ err1:
return err;
}
-int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
+void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
{
struct rxe_port *port = &rxe->port;
enum ib_mtu mtu;
@@ -303,10 +303,7 @@ int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
port->attr.active_mtu = mtu;
port->mtu_cap = ib_mtu_enum_to_int(mtu);
-
- return 0;
}
-EXPORT_SYMBOL(rxe_set_mtu);
/* called by ifc layer to create new rxe device.
* The caller should allocate memory for rxe by calling ib_alloc_device.
@@ -321,9 +318,7 @@ int rxe_add(struct rxe_dev *rxe, unsigned int mtu)
if (err)
goto err1;
- err = rxe_set_mtu(rxe, mtu);
- if (err)
- goto err1;
+ rxe_set_mtu(rxe, mtu);
err = rxe_register_device(rxe);
if (err)
@@ -335,7 +330,6 @@ err1:
rxe_dev_put(rxe);
return err;
}
-EXPORT_SYMBOL(rxe_add);
/* called by the ifc layer to remove a device */
void rxe_remove(struct rxe_dev *rxe)
@@ -344,7 +338,6 @@ void rxe_remove(struct rxe_dev *rxe)
rxe_dev_put(rxe);
}
-EXPORT_SYMBOL(rxe_remove);
static int __init rxe_module_init(void)
{
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 561ad307c6ec..d9ec2de68738 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -92,13 +92,13 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
return retval;
}
-int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
+void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu);
void rxe_remove(struct rxe_dev *rxe);
void rxe_remove_all(void);
-int rxe_rcv(struct sk_buff *skb);
+void rxe_rcv(struct sk_buff *skb);
static inline void rxe_dev_put(struct rxe_dev *rxe)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 6cdc40ed8a9f..98d470d1f3fc 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -355,10 +355,9 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
int ret;
- ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
payload_size(pkt), to_mem_obj, NULL);
if (ret)
@@ -374,12 +373,11 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
int ret;
u64 atomic_orig = atmack_orig(pkt);
- ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
sizeof(u64), to_mem_obj, NULL);
if (ret)
@@ -661,7 +659,6 @@ int rxe_completer(void *arg)
qp->qp_timeout_jiffies)
mod_timer(&qp->retrans_timer,
jiffies + qp->qp_timeout_jiffies);
- WARN_ON_ONCE(skb);
goto exit;
case COMPST_ERROR_RETRY:
@@ -675,7 +672,6 @@ int rxe_completer(void *arg)
/* there is nothing to retry in this case */
if (!wqe || (wqe->state == wqe_state_posted)) {
- WARN_ON_ONCE(skb);
goto exit;
}
@@ -704,7 +700,6 @@ int rxe_completer(void *arg)
skb = NULL;
}
- WARN_ON_ONCE(skb);
goto exit;
} else {
@@ -748,7 +743,6 @@ int rxe_completer(void *arg)
skb = NULL;
}
- WARN_ON_ONCE(skb);
goto exit;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index b71023c1c58b..a51ece596c43 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -106,20 +106,20 @@ enum copy_direction {
from_mem_obj,
};
-int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_dma(struct rxe_pd *pd,
int access, struct rxe_mem *mem);
-int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mr);
-int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_fast(struct rxe_pd *pd,
int max_pages, struct rxe_mem *mem);
int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
int length, enum copy_direction dir, u32 *crcp);
-int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access,
+int copy_data(struct rxe_pd *pd, int access,
struct rxe_dma_info *dma, void *addr, int length,
enum copy_direction dir, u32 *crcp);
@@ -143,7 +143,7 @@ void rxe_mem_cleanup(struct rxe_pool_entry *arg);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
/* rxe_net.c */
-int rxe_loopback(struct sk_buff *skb);
+void rxe_loopback(struct sk_buff *skb);
int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt);
@@ -268,7 +268,8 @@ static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
if (pkt->mask & RXE_LOOPBACK_MASK) {
memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
- err = rxe_loopback(skb);
+ rxe_loopback(skb);
+ err = 0;
} else {
err = rxe_send(pkt, skb);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 5c2684bf430f..dff605fdf60f 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -107,7 +107,7 @@ void rxe_mem_cleanup(struct rxe_pool_entry *arg)
}
}
-static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
+static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
{
int i;
int num_map;
@@ -145,7 +145,7 @@ err1:
return -ENOMEM;
}
-int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_dma(struct rxe_pd *pd,
int access, struct rxe_mem *mem)
{
rxe_mem_init(access, mem);
@@ -158,7 +158,7 @@ int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
return 0;
}
-int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mem)
{
@@ -184,7 +184,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
rxe_mem_init(access, mem);
- err = rxe_mem_alloc(rxe, mem, num_buf);
+ err = rxe_mem_alloc(mem, num_buf);
if (err) {
pr_warn("err %d from rxe_mem_alloc\n", err);
ib_umem_release(umem);
@@ -236,7 +236,7 @@ err1:
return err;
}
-int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_fast(struct rxe_pd *pd,
int max_pages, struct rxe_mem *mem)
{
int err;
@@ -246,7 +246,7 @@ int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
/* In fastreg, we also set the rkey */
mem->ibmr.rkey = mem->ibmr.lkey;
- err = rxe_mem_alloc(rxe, mem, max_pages);
+ err = rxe_mem_alloc(mem, max_pages);
if (err)
goto err1;
@@ -434,7 +434,6 @@ err1:
* under the control of a dma descriptor
*/
int copy_data(
- struct rxe_dev *rxe,
struct rxe_pd *pd,
int access,
struct rxe_dma_info *dma,
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 9da6e37fb70c..59ec6d918ed4 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -276,9 +276,12 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
pkt->mask = RXE_GRH_MASK;
pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
- return rxe_rcv(skb);
+ rxe_rcv(skb);
+
+ return 0;
drop:
kfree_skb(skb);
+
return 0;
}
@@ -315,7 +318,7 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
return sock;
}
-void rxe_release_udp_tunnel(struct socket *sk)
+static void rxe_release_udp_tunnel(struct socket *sk)
{
if (sk)
udp_tunnel_sock_release(sk);
@@ -517,9 +520,9 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
return 0;
}
-int rxe_loopback(struct sk_buff *skb)
+void rxe_loopback(struct sk_buff *skb)
{
- return rxe_rcv(skb);
+ rxe_rcv(skb);
}
static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
@@ -562,11 +565,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
pkt->rxe = rxe;
pkt->port_num = port_num;
- pkt->hdr = skb_put(skb, paylen);
+ pkt->hdr = skb_put_zero(skb, paylen);
pkt->mask |= RXE_GRH_MASK;
- memset(pkt->hdr, 0, paylen);
-
dev_put(ndev);
return skb;
}
@@ -622,7 +623,6 @@ void rxe_remove_all(void)
}
spin_unlock_bh(&dev_list_lock);
}
-EXPORT_SYMBOL(rxe_remove_all);
static void rxe_port_event(struct rxe_dev *rxe,
enum ib_event_type event)
@@ -707,7 +707,7 @@ out:
return NOTIFY_OK;
}
-struct notifier_block rxe_net_notifier = {
+static struct notifier_block rxe_net_notifier = {
.notifier_call = rxe_notify,
};
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 728d8c71b36a..106c586dbb26 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -43,9 +43,6 @@ struct rxe_recv_sockets {
struct socket *sk6;
};
-extern struct notifier_block rxe_net_notifier;
-void rxe_release_udp_tunnel(struct socket *sk);
-
struct rxe_dev *rxe_net_add(struct net_device *ndev);
int rxe_net_init(void);
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index dd80c7d9074a..dfba44a40f0b 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -311,7 +311,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
* increase the users of the skb then post to the next qp
*/
if (mce->qp_list.next != &mcg->qp_list)
- refcount_inc(&skb->users);
+ skb_get(skb);
pkt->qp = qp;
rxe_add_ref(qp);
@@ -345,7 +345,7 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
}
/* rxe_rcv is called from the interface driver */
-int rxe_rcv(struct sk_buff *skb)
+void rxe_rcv(struct sk_buff *skb)
{
int err;
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
@@ -403,12 +403,11 @@ int rxe_rcv(struct sk_buff *skb)
else
rxe_rcv_pkt(rxe, pkt, skb);
- return 0;
+ return;
drop:
if (pkt->qp)
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
- return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 785199990457..f30eeba3f772 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -490,7 +490,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
wqe->dma.resid -= paylen;
wqe->dma.sge_offset += paylen;
} else {
- err = copy_data(rxe, qp->pd, 0, &wqe->dma,
+ err = copy_data(qp->pd, 0, &wqe->dma,
payload_addr(pkt), paylen,
from_mem_obj,
&crc);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 955ff3b6da9c..5b57de30dee4 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -511,9 +511,8 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
int data_len)
{
int err;
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
+ err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
data_addr, data_len, to_mem_obj, NULL);
if (unlikely(err))
return (err == -ENOSPC) ? RESPST_ERR_LENGTH
@@ -987,7 +986,7 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
sizeof(skb->cb) - sizeof(ack_pkt));
- refcount_inc(&skb->users);
+ skb_get(skb);
res->type = RXE_ATOMIC_MASK;
res->atomic.skb = skb;
res->first_psn = ack_pkt.psn;
@@ -1121,23 +1120,12 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
/* Find the operation in our list of responder resources. */
res = find_resource(qp, pkt->psn);
if (res) {
- struct sk_buff *skb_copy;
-
- skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
- if (skb_copy) {
- rxe_add_ref(qp); /* for the new SKB */
- } else {
- pr_warn("Couldn't clone atomic resp\n");
- rc = RESPST_CLEANUP;
- goto out;
- }
-
+ skb_get(res->atomic.skb);
/* Resend the result. */
rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
- pkt, skb_copy);
+ pkt, res->atomic.skb);
if (rc) {
pr_err("Failed resending result. This flow is not handled - skb ignored\n");
- rxe_drop_ref(qp);
rc = RESPST_CLEANUP;
goto out;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 73a00a1c06f6..9deafc3aa6af 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1003,7 +1003,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_add_ref(pd);
- err = rxe_mem_init_dma(rxe, pd, access, mr);
+ err = rxe_mem_init_dma(pd, access, mr);
if (err)
goto err2;
@@ -1038,7 +1038,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_add_ref(pd);
- err = rxe_mem_init_user(rxe, pd, start, length, iova,
+ err = rxe_mem_init_user(pd, start, length, iova,
access, udata, mr);
if (err)
goto err3;
@@ -1086,7 +1086,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
rxe_add_ref(pd);
- err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
+ err = rxe_mem_init_fast(pd, max_num_sg, mr);
if (err)
goto err2;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 308e0ce49289..a50b062ed13e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -415,6 +415,7 @@ struct ipoib_ah {
struct list_head list;
struct kref ref;
unsigned last_send;
+ int valid;
};
struct ipoib_path {
@@ -431,7 +432,6 @@ struct ipoib_path {
struct rb_node rb_node;
struct list_head list;
- int valid;
};
struct ipoib_neigh {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index cf291f90b58f..2ce40a7ff604 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -697,7 +697,8 @@ void ipoib_mark_paths_invalid(struct net_device *dev)
ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
path->pathrec.dgid.raw);
- path->valid = 0;
+ if (path->ah)
+ path->ah->valid = 0;
}
spin_unlock_irq(&priv->lock);
@@ -833,7 +834,7 @@ static void path_rec_completion(int status,
while ((skb = __skb_dequeue(&neigh->queue)))
__skb_queue_tail(&skqueue, skb);
}
- path->valid = 1;
+ path->ah->valid = 1;
}
path->query = NULL;
@@ -926,6 +927,24 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
+static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_path *path;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ path = __path_find(dev, daddr + 4);
+ if (!path)
+ goto out;
+ if (!path->query)
+ path_rec_start(dev, path);
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
struct net_device *dev)
{
@@ -963,7 +982,7 @@ static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
list_add_tail(&neigh->list, &path->neigh_list);
- if (path->ah) {
+ if (path->ah && path->ah->valid) {
kref_get(&path->ah->ref);
neigh->ah = path->ah;
@@ -1034,63 +1053,43 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
goto drop_and_unlock;
path = __path_find(dev, phdr->hwaddr + 4);
- if (!path || !path->valid) {
- int new_path = 0;
-
+ if (!path || !path->ah || !path->ah->valid) {
if (!path) {
path = path_rec_create(dev, phdr->hwaddr + 4);
- new_path = 1;
+ if (!path)
+ goto drop_and_unlock;
+ __path_add(dev, path);
+ } else {
+ /*
+ * make sure there are no changes in the existing
+ * path record
+ */
+ init_path_rec(priv, path, phdr->hwaddr + 4);
+ }
+ if (!path->query && path_rec_start(dev, path)) {
+ goto drop_and_unlock;
}
- if (path) {
- if (!new_path)
- /* make sure there is no changes in the existing path record */
- init_path_rec(priv, path, phdr->hwaddr + 4);
-
- if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
- push_pseudo_header(skb, phdr->hwaddr);
- __skb_queue_tail(&path->queue, skb);
- } else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
- }
- if (!path->query && path_rec_start(dev, path)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- if (new_path)
- path_free(dev, path);
- return;
- } else
- __path_add(dev, path);
+ if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, phdr->hwaddr);
+ __skb_queue_tail(&path->queue, skb);
+ goto unlock;
} else {
goto drop_and_unlock;
}
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return;
- }
-
- if (path->ah) {
- ipoib_dbg(priv, "Send unicast ARP to %08x\n",
- be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
-
- spin_unlock_irqrestore(&priv->lock, flags);
- path->ah->last_send = rn->send(dev, skb, path->ah->ah,
- IPOIB_QPN(phdr->hwaddr));
- return;
- } else if ((path->query || !path_rec_start(dev, path)) &&
- skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
- push_pseudo_header(skb, phdr->hwaddr);
- __skb_queue_tail(&path->queue, skb);
- } else {
- goto drop_and_unlock;
}
spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_dbg(priv, "Send unicast ARP to %08x\n",
+ be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
+ path->ah->last_send = rn->send(dev, skb, path->ah->ah,
+ IPOIB_QPN(phdr->hwaddr));
return;
drop_and_unlock:
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
+unlock:
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1161,10 +1160,12 @@ send_using_neigh:
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
goto unref;
}
- } else if (neigh->ah) {
+ } else if (neigh->ah && neigh->ah->valid) {
neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
IPOIB_QPN(phdr->hwaddr));
goto unref;
+ } else if (neigh->ah) {
+ neigh_refresh_path(neigh, phdr->hwaddr, dev);
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 9b3f47ae2016..6709328d90f8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -886,7 +886,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
struct netdev_hw_addr *ha;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
- unsigned long flags;
struct ib_sa_mcmember_rec rec;
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
@@ -898,9 +897,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
- local_irq_save(flags);
- netif_addr_lock(dev);
- spin_lock(&priv->lock);
+ netif_addr_lock_bh(dev);
+ spin_lock_irq(&priv->lock);
/*
* Unfortunately, the networking core only gives us a list of all of
@@ -978,9 +976,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
}
}
- spin_unlock(&priv->lock);
- netif_addr_unlock(dev);
- local_irq_restore(flags);
+ spin_unlock_irq(&priv->lock);
+ netif_addr_unlock_bh(dev);
ipoib_mcast_remove_list(&remove_list);
@@ -988,9 +985,9 @@ void ipoib_mcast_restart_task(struct work_struct *work)
* Double check that we are still up
*/
if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
__ipoib_mcast_schedule_join_thread(priv, NULL, 0);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irq(&priv->lock);
}
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 0336643c2ed6..9a6434c31db2 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -665,19 +665,17 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
goto free_host;
}
- /*
- * FRs or FMRs can only map up to a (device) page per entry, but if the
- * first entry is misaligned we'll end up using using two entries
- * (head and tail) for a single page worth data, so we have to drop
- * one segment from the calculation.
- */
- max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
+ max_fr_sectors = (shost->sg_tablesize * PAGE_SIZE) >> 9;
shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
iser_conn, shost->sg_tablesize,
shost->max_sectors);
+ if (shost->max_sectors < iser_max_sectors)
+ iser_warn("max_sectors was reduced from %u to %u\n",
+ iser_max_sectors, shost->max_sectors);
+
if (cmds_max > max_cmds) {
iser_info("cmds_max changed from %u to %u\n",
cmds_max, max_cmds);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index c1ae4aeae2f9..120b40829560 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -383,10 +383,6 @@ struct iser_device {
bool remote_inv_sup;
};
-#define ISER_CHECK_GUARD 0xc0
-#define ISER_CHECK_REFTAG 0x0f
-#define ISER_CHECK_APPTAG 0x30
-
/**
* struct iser_reg_resources - Fast registration recources
*
@@ -498,6 +494,7 @@ struct ib_conn {
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
+ * @pages_per_mr: maximum pages available for registration
*/
struct iser_conn {
struct ib_conn ib_conn;
@@ -520,6 +517,7 @@ struct iser_conn {
struct iser_rx_desc *rx_descs;
u32 num_rx_descs;
unsigned short scsi_sg_tablesize;
+ unsigned short pages_per_mr;
bool snd_w_inv;
};
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index df49c4eb67f7..ca858d6bd37a 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -251,7 +251,7 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max,
- iser_conn->scsi_sg_tablesize))
+ iser_conn->pages_per_mr))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 322209d5ff58..ca844a926e6a 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -362,9 +362,9 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
*mask = 0;
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
- *mask |= ISER_CHECK_REFTAG;
+ *mask |= IB_SIG_CHECK_REFTAG;
if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
- *mask |= ISER_CHECK_GUARD;
+ *mask |= IB_SIG_CHECK_GUARD;
}
static inline void
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 56b7240a3fc3..616d978cbf2b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -703,19 +703,34 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
unsigned int max_sectors)
{
struct iser_device *device = iser_conn->ib_conn.device;
+ struct ib_device_attr *attr = &device->ib_device->attrs;
unsigned short sg_tablesize, sup_sg_tablesize;
+ unsigned short reserved_mr_pages;
+
+ /*
+ * FRs without SG_GAPS or FMRs can only map up to a (device) page per
+ * entry, but if the first entry is misaligned we'll end up using two
+ * entries (head and tail) for a single page worth data, so one
+ * additional entry is required.
+ */
+ if ((attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) &&
+ (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ reserved_mr_pages = 0;
+ else
+ reserved_mr_pages = 1;
sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
- if (device->ib_device->attrs.device_cap_flags &
- IB_DEVICE_MEM_MGT_EXTENSIONS)
+ if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
sup_sg_tablesize =
min_t(
uint, ISCSI_ISER_MAX_SG_TABLESIZE,
- device->ib_device->attrs.max_fast_reg_page_list_len);
+ attr->max_fast_reg_page_list_len - reserved_mr_pages);
else
sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
+ iser_conn->pages_per_mr =
+ iser_conn->scsi_sg_tablesize + reserved_mr_pages;
}
/**
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index fff40b097947..f2f9318e1f49 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -886,15 +886,9 @@ isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_des
}
static void
-isert_create_send_desc(struct isert_conn *isert_conn,
- struct isert_cmd *isert_cmd,
- struct iser_tx_desc *tx_desc)
+__isert_create_send_desc(struct isert_device *device,
+ struct iser_tx_desc *tx_desc)
{
- struct isert_device *device = isert_conn->device;
- struct ib_device *ib_dev = device->ib_device;
-
- ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
- ISER_HEADERS_LEN, DMA_TO_DEVICE);
memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
tx_desc->iser_header.flags = ISCSI_CTRL;
@@ -907,6 +901,20 @@ isert_create_send_desc(struct isert_conn *isert_conn,
}
}
+static void
+isert_create_send_desc(struct isert_conn *isert_conn,
+ struct isert_cmd *isert_cmd,
+ struct iser_tx_desc *tx_desc)
+{
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+
+ ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+
+ __isert_create_send_desc(device, tx_desc);
+}
+
static int
isert_init_tx_hdrs(struct isert_conn *isert_conn,
struct iser_tx_desc *tx_desc)
@@ -994,7 +1002,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
int ret;
- isert_create_send_desc(isert_conn, NULL, tx_desc);
+ __isert_create_send_desc(device, tx_desc);
memcpy(&tx_desc->iscsi_header, &login->rsp[0],
sizeof(struct iscsi_hdr));
@@ -2106,10 +2114,13 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
return -EINVAL;
}
- sig_attrs->check_mask =
- (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
- (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
- (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)
+ sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)
+ sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)
+ sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
+
return 0;
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 5b714a062fa7..8ea77efb2e29 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -23,7 +23,7 @@ config IOMMU_IO_PGTABLE
config IOMMU_IO_PGTABLE_LPAE
bool "ARMv7/v8 Long Descriptor Format"
select IOMMU_IO_PGTABLE
- depends on HAS_DMA && (ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64))
+ depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
help
Enable support for the ARM long descriptor pagetable format.
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -42,7 +42,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
config IOMMU_IO_PGTABLE_ARMV7S
bool "ARMv7/v8 Short Descriptor Format"
select IOMMU_IO_PGTABLE
- depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST)
+ depends on ARM || ARM64 || COMPILE_TEST
help
Enable support for the ARM Short-descriptor pagetable format.
This supports 32-bit virtual and physical addresses mapped using
@@ -377,7 +377,6 @@ config QCOM_IOMMU
# Note: iommu drivers cannot (yet?) be built as modules
bool "Qualcomm IOMMU Support"
depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
- depends on HAS_DMA
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8fb8c737fffe..0cea80be2888 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -354,6 +354,9 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
};
int i, pos;
+ if (pci_ats_disabled())
+ return false;
+
for (i = 0; i < 3; ++i) {
pos = pci_find_ext_capability(pdev, caps[i]);
if (pos == 0)
@@ -544,7 +547,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
struct device *dev = iommu->iommu.dev;
- int type, devid, domid, flags;
+ int type, devid, pasid, flags, tag;
volatile u32 *event = __evt;
int count = 0;
u64 address;
@@ -552,7 +555,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
retry:
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
- domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+ pasid = PPR_PASID(*(u64 *)&event[0]);
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
address = (u64)(((u64)event[3]) << 32) | event[2];
@@ -567,7 +570,7 @@ retry:
}
if (type == EVENT_TYPE_IO_FAULT) {
- amd_iommu_report_page_fault(devid, domid, address, flags);
+ amd_iommu_report_page_fault(devid, pasid, address, flags);
return;
} else {
dev_err(dev, "AMD-Vi: Event logged [");
@@ -575,10 +578,9 @@ retry:
switch (type) {
case EVENT_TYPE_ILL_DEV:
- dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
+ dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
+ pasid, address, flags);
dump_dte_entry(devid);
break;
case EVENT_TYPE_DEV_TAB_ERR:
@@ -588,34 +590,38 @@ retry:
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
- dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
- "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
+ dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- domid, address, flags);
+ pasid, address, flags);
break;
case EVENT_TYPE_ILL_CMD:
dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
dump_command(address);
break;
case EVENT_TYPE_CMD_HARD_ERR:
- dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx "
- "flags=0x%04x]\n", address, flags);
+ dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx flags=0x%04x]\n",
+ address, flags);
break;
case EVENT_TYPE_IOTLB_INV_TO:
- dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
- "address=0x%016llx]\n",
+ dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%016llx]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address);
break;
case EVENT_TYPE_INV_DEV_REQ:
- dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
+ dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
+ pasid, address, flags);
+ break;
+ case EVENT_TYPE_INV_PPR_REQ:
+ pasid = ((event[0] >> 16) & 0xFFFF)
+ | ((event[1] << 6) & 0xF0000);
+ tag = event[1] & 0x03FF;
+ dev_err(dev, "INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pasid, address, flags);
break;
default:
- dev_err(dev, KERN_ERR "UNKNOWN event[0]=0x%08x event[1]=0x%08x "
- "event[2]=0x%08x event[3]=0x%08x\n",
+ dev_err(dev, "UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
event[0], event[1], event[2], event[3]);
}
@@ -1911,15 +1917,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct amd_iommu *iommu;
u16 alias;
- /*
- * First check if the device is still attached. It might already
- * be detached from its domain because the generic
- * iommu_detach_group code detached it and we try again here in
- * our alias handling.
- */
- if (!dev_data->domain)
- return;
-
iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = dev_data->alias;
@@ -1939,8 +1936,8 @@ static void do_detach(struct iommu_dev_data *dev_data)
}
/*
- * If a device is not yet associated with a domain, this function does
- * assigns it visible for the hardware
+ * If a device is not yet associated with a domain, this function makes the
+ * device visible in the domain
*/
static int __attach_device(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
@@ -2061,8 +2058,8 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev)
}
/*
- * If a device is not yet associated with a domain, this function
- * assigns it visible for the hardware
+ * If a device is not yet associated with a domain, this function makes the
+ * device visible in the domain
*/
static int attach_device(struct device *dev,
struct protection_domain *domain)
@@ -2124,9 +2121,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
*/
WARN_ON(!irqs_disabled());
- if (WARN_ON(!dev_data->domain))
- return;
-
domain = dev_data->domain;
spin_lock(&domain->lock);
@@ -2148,6 +2142,15 @@ static void detach_device(struct device *dev)
dev_data = get_dev_data(dev);
domain = dev_data->domain;
+ /*
+ * First check if the device is still attached. It might already
+ * be detached from its domain because the generic
+ * iommu_detach_group code detached it and we try again here in
+ * our alias handling.
+ */
+ if (WARN_ON(!dev_data->domain))
+ return;
+
/* lock device table */
spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
__detach_device(dev_data);
@@ -2793,6 +2796,7 @@ static void cleanup_domain(struct protection_domain *domain)
while (!list_empty(&domain->dev_list)) {
entry = list_first_entry(&domain->dev_list,
struct iommu_dev_data, list);
+ BUG_ON(!entry->domain);
__detach_device(entry);
}
@@ -3523,9 +3527,11 @@ int amd_iommu_device_info(struct pci_dev *pdev,
memset(info, 0, sizeof(*info));
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
- if (pos)
- info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+ if (!pci_ats_disabled()) {
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
+ if (pos)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+ }
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (pos)
@@ -4379,7 +4385,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
static struct irq_chip amd_ir_chip = {
.name = "AMD-IR",
- .irq_ack = ir_ack_apic_edge,
+ .irq_ack = apic_ack_irq,
.irq_set_affinity = amd_ir_set_affinity,
.irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
.irq_compose_msi_msg = ir_compose_msi_msg,
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 1c9b080276c9..986cbe0cc189 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -133,6 +133,7 @@
#define EVENT_TYPE_CMD_HARD_ERR 0x6
#define EVENT_TYPE_IOTLB_INV_TO 0x7
#define EVENT_TYPE_INV_DEV_REQ 0x8
+#define EVENT_TYPE_INV_PPR_REQ 0x9
#define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0
#define EVENT_DOMID_MASK 0xffff
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 460bed4fc5b1..4321f7704b23 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1618,17 +1618,13 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
int reg, fault_index;
u32 fault_status;
unsigned long flag;
- bool ratelimited;
static DEFINE_RATELIMIT_STATE(rs,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- /* Disable printing, simply clear the fault when ratelimited */
- ratelimited = !__ratelimit(&rs);
-
raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- if (fault_status && !ratelimited)
+ if (fault_status && __ratelimit(&rs))
pr_err("DRHD: handling fault status reg %x\n", fault_status);
/* TBD: ignore advanced fault log currently */
@@ -1638,6 +1634,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
fault_index = dma_fsts_fault_record_index(fault_status);
reg = cap_fault_reg_offset(iommu->cap);
while (1) {
+ /* Disable printing, simply clear the fault when ratelimited */
+ bool ratelimited = !__ratelimit(&rs);
u8 fault_reason;
u16 source_id;
u64 guest_addr;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 749d8f235346..89e49a429c57 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -485,37 +485,14 @@ static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int intel_iommu_ecs = 1;
-static int intel_iommu_pasid28;
static int iommu_identity_mapping;
#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
-/* Broadwell and Skylake have broken ECS support — normal so-called "second
- * level" translation of DMA requests-without-PASID doesn't actually happen
- * unless you also set the NESTE bit in an extended context-entry. Which of
- * course means that SVM doesn't work because it's trying to do nested
- * translation of the physical addresses it finds in the process page tables,
- * through the IOVA->phys mapping found in the "second level" page tables.
- *
- * The VT-d specification was retroactively changed to change the definition
- * of the capability bits and pretend that Broadwell/Skylake never happened...
- * but unfortunately the wrong bit was changed. It's ECS which is broken, but
- * for some reason it was the PASID capability bit which was redefined (from
- * bit 28 on BDW/SKL to bit 40 in future).
- *
- * So our test for ECS needs to eschew those implementations which set the old
- * PASID capabiity bit 28, since those are the ones on which ECS is broken.
- * Unless we are working around the 'pasid28' limitations, that is, by putting
- * the device into passthrough mode for normal DMA and thus masking the bug.
- */
-#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
- (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
-/* PASID support is thus enabled if ECS is enabled and *either* of the old
- * or new capability bits are set. */
-#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
- (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap))
+#define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -578,11 +555,6 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable extended context table support\n");
intel_iommu_ecs = 0;
- } else if (!strncmp(str, "pasid28", 7)) {
- printk(KERN_INFO
- "Intel-IOMMU: enable pre-production PASID support\n");
- intel_iommu_pasid28 = 1;
- iommu_identity_mapping |= IDENTMAP_GFX;
} else if (!strncmp(str, "tboot_noforce", 13)) {
printk(KERN_INFO
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -1606,6 +1578,18 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
iommu_flush_dev_iotlb(domain, addr, mask);
}
+/* Notification for newly created mappings */
+static inline void __mapping_notify_one(struct intel_iommu *iommu,
+ struct dmar_domain *domain,
+ unsigned long pfn, unsigned int pages)
+{
+ /* It's a non-present to present mapping. Only flush if caching mode */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
+ else
+ iommu_flush_write_buffer(iommu);
+}
+
static void iommu_flush_iova(struct iova_domain *iovad)
{
struct dmar_domain *domain;
@@ -2340,18 +2324,47 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return 0;
}
+static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ struct scatterlist *sg, unsigned long phys_pfn,
+ unsigned long nr_pages, int prot)
+{
+ int ret;
+ struct intel_iommu *iommu;
+
+ /* Do the real mapping first */
+ ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
+ if (ret)
+ return ret;
+
+ /* Notify about the new mapping */
+ if (domain_type_is_vm(domain)) {
+ /* VM typed domains can have more than one IOMMUs */
+ int iommu_id;
+ for_each_domain_iommu(iommu_id, domain) {
+ iommu = g_iommus[iommu_id];
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
+ } else {
+ /* General domains only have one IOMMU */
+ iommu = domain_get_iommu(domain);
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
+
+ return 0;
+}
+
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long nr_pages,
int prot)
{
- return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
+ return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
}
static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages,
int prot)
{
- return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
+ return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
}
static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
@@ -2459,7 +2472,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
- if (ecap_dev_iotlb_support(iommu->ecap) &&
+ if (!pci_ats_disabled() &&
+ ecap_dev_iotlb_support(iommu->ecap) &&
pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
dmar_find_matched_atsr_unit(pdev))
info->ats_supported = 1;
@@ -2533,7 +2547,7 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
struct device_domain_info *info = NULL;
struct dmar_domain *domain = NULL;
struct intel_iommu *iommu;
- u16 req_id, dma_alias;
+ u16 dma_alias;
unsigned long flags;
u8 bus, devfn;
@@ -2541,8 +2555,6 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
if (!iommu)
return NULL;
- req_id = ((u16)bus << 8) | devfn;
-
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2656,9 +2668,9 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
*/
dma_pte_clear_range(domain, first_vpfn, last_vpfn);
- return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
- last_vpfn - first_vpfn + 1,
- DMA_PTE_READ|DMA_PTE_WRITE);
+ return __domain_mapping(domain, first_vpfn, NULL,
+ first_vpfn, last_vpfn - first_vpfn + 1,
+ DMA_PTE_READ|DMA_PTE_WRITE);
}
static int domain_prepare_identity_map(struct device *dev,
@@ -3625,14 +3637,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
if (ret)
goto error;
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain,
- mm_to_dma_pfn(iova_pfn),
- size, 0, 1);
- else
- iommu_flush_write_buffer(iommu);
-
start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
start_paddr += paddr & ~PAGE_MASK;
return start_paddr;
@@ -3819,12 +3823,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
return 0;
}
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
- else
- iommu_flush_write_buffer(iommu);
-
return nelems;
}
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index e8cd984cf9c8..45f6e581cd56 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -319,7 +319,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
} else
pasid_max = 1 << 20;
- if ((flags & SVM_FLAG_SUPERVISOR_MODE)) {
+ if (flags & SVM_FLAG_SUPERVISOR_MODE) {
if (!ecap_srs(iommu->ecap))
return -EINVAL;
} else if (pasid) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 3062a154a9fb..967450bd421a 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1223,7 +1223,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
static struct irq_chip intel_ir_chip = {
.name = "INTEL-IR",
- .irq_ack = ir_ack_apic_edge,
+ .irq_ack = apic_ack_irq,
.irq_set_affinity = intel_ir_set_affinity,
.irq_compose_msi_msg = intel_ir_compose_msi_msg,
.irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 10e4a3d11c02..50e3a9fcf43e 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -898,8 +898,7 @@ static int __init arm_v7s_do_selftests(void)
/* Full unmap */
iova = 0;
- i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
- while (i != BITS_PER_LONG) {
+ for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << i;
if (ops->unmap(ops, iova, size) != size)
@@ -916,8 +915,6 @@ static int __init arm_v7s_do_selftests(void)
return __FAIL(ops);
iova += SZ_16M;
- i++;
- i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
}
free_io_pgtable_ops(ops);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 39c2a056da21..010a254305dd 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -231,12 +231,17 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
struct io_pgtable_cfg *cfg)
{
struct device *dev = cfg->iommu_dev;
+ int order = get_order(size);
+ struct page *p;
dma_addr_t dma;
- void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
+ void *pages;
- if (!pages)
+ VM_BUG_ON((gfp & __GFP_HIGHMEM));
+ p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
+ if (!p)
return NULL;
+ pages = page_address(p);
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
@@ -256,7 +261,7 @@ out_unmap:
dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
out_free:
- free_pages_exact(pages, size);
+ __free_pages(p, order);
return NULL;
}
@@ -266,7 +271,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
size, DMA_TO_DEVICE);
- free_pages_exact(pages, size);
+ free_pages((unsigned long)pages, get_order(size));
}
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
@@ -1120,8 +1125,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
/* Full unmap */
iova = 0;
- j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
- while (j != BITS_PER_LONG) {
+ for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << j;
if (ops->unmap(ops, iova, size) != size)
@@ -1138,8 +1142,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
return __FAIL(ops, i);
iova += SZ_1G;
- j++;
- j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
}
free_io_pgtable_ops(ops);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d2aa23202bb9..63b37563db7e 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -116,9 +116,11 @@ static void __iommu_detach_group(struct iommu_domain *domain,
static int __init iommu_set_def_domain_type(char *str)
{
bool pt;
+ int ret;
- if (!str || strtobool(str, &pt))
- return -EINVAL;
+ ret = kstrtobool(str, &pt);
+ if (ret)
+ return ret;
iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
return 0;
@@ -322,7 +324,6 @@ static struct kobj_type iommu_group_ktype = {
/**
* iommu_group_alloc - Allocate a new group
- * @name: Optional name to associate with group, visible in sysfs
*
* This function is called by an iommu driver to allocate a new iommu
* group. The iommu group represents the minimum granularity of the iommu.
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 496deee3ae3a..7d0f3074d41d 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -156,11 +156,6 @@ void panic_if_irq_remap(const char *msg)
panic(msg);
}
-void ir_ack_apic_edge(struct irq_data *data)
-{
- ack_APIC_irq();
-}
-
/**
* irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU
* device serving request @info
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index 039c7af7b190..0afef6e43be4 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -65,8 +65,6 @@ struct irq_remap_ops {
extern struct irq_remap_ops intel_irq_remap_ops;
extern struct irq_remap_ops amd_iommu_irq_ops;
-extern void ir_ack_apic_edge(struct irq_data *data);
-
#else /* CONFIG_IRQ_REMAP */
#define irq_remapping_enabled 0
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 65b9c99707f8..fe88a4880d3a 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -885,16 +885,14 @@ static int qcom_iommu_device_remove(struct platform_device *pdev)
static int __maybe_unused qcom_iommu_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
+ struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
return qcom_iommu_enable_clocks(qcom_iommu);
}
static int __maybe_unused qcom_iommu_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
+ struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
qcom_iommu_disable_clocks(qcom_iommu);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index b62f790ad1ba..89ec24c6952c 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -72,6 +72,8 @@ struct gart_domain {
static struct gart_device *gart_handle; /* unique for a system */
+static bool gart_debug;
+
#define GART_PTE(_pfn) \
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
@@ -271,6 +273,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
struct gart_device *gart = gart_domain->gart;
unsigned long flags;
unsigned long pfn;
+ unsigned long pte;
if (!gart_iova_range_valid(gart, iova, bytes))
return -EINVAL;
@@ -282,6 +285,14 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
spin_unlock_irqrestore(&gart->pte_lock, flags);
return -EINVAL;
}
+ if (gart_debug) {
+ pte = gart_read_pte(gart, iova);
+ if (pte & GART_ENTRY_PHYS_ADDR_VALID) {
+ spin_unlock_irqrestore(&gart->pte_lock, flags);
+ dev_err(gart->dev, "Page entry is in-use\n");
+ return -EBUSY;
+ }
+ }
gart_set_pte(gart, iova, GART_PTE(pfn));
FLUSH_GART_REGS(gart);
spin_unlock_irqrestore(&gart->pte_lock, flags);
@@ -302,7 +313,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
gart_set_pte(gart, iova, 0);
FLUSH_GART_REGS(gart);
spin_unlock_irqrestore(&gart->pte_lock, flags);
- return 0;
+ return bytes;
}
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -515,7 +526,9 @@ static void __exit tegra_gart_exit(void)
subsys_initcall(tegra_gart_init);
module_exit(tegra_gart_exit);
+module_param(gart_debug, bool, 0644);
+MODULE_PARM_DESC(gart_debug, "Enable GART debugging");
MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
MODULE_ALIAS("platform:tegra-gart");
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 57e3d900f19e..1ec3bfe56693 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -21,6 +21,7 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
+#include <linux/dma-iommu.h>
#define MSI_IRQS_PER_MSIR 32
#define MSI_MSIR_OFFSET 4
@@ -94,6 +95,8 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
if (msi_affinity_flag)
msg->data |= cpumask_first(data->common->affinity);
+
+ iommu_dma_map_msi_msg(data->irq, msg);
}
static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 5089c1e2838d..3a7e8905a97e 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -552,9 +552,7 @@ static struct irq_chip stm32_exti_h_chip = {
.irq_set_type = stm32_exti_h_set_type,
.irq_set_wake = stm32_exti_h_set_wake,
.flags = IRQCHIP_MASK_ON_SUSPEND,
-#ifdef CONFIG_SMP
- .irq_set_affinity = stm32_exti_h_set_affinity,
-#endif
+ .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32_exti_h_set_affinity : NULL,
};
static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index df88f1bdd921..6a4883e40cc0 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -203,7 +203,7 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
if (!lba_list) {
pr_err("pblk: could not interpret emeta (line %d)\n",
line->id);
- goto fail_free_ws;
+ goto fail_free_invalid_bitmap;
}
}
@@ -280,6 +280,7 @@ fail_free_gc_rq:
kfree(gc_rq);
fail_free_lba_list:
pblk_mfree(lba_list, l_mg->emeta_alloc_type);
+fail_free_invalid_bitmap:
kfree(invalid_bitmap);
fail_free_ws:
kfree(line_ws);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index ce561f5d48ce..491df0fa0835 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -20,7 +20,7 @@
#include "pblk.h"
-unsigned int write_buffer_size;
+static unsigned int write_buffer_size;
module_param(write_buffer_size, uint, 0644);
MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index a2bb27446dce..e63d29a95e76 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -109,16 +109,20 @@ config TI_MESSAGE_MANAGER
platform has support for the hardware block.
config HI3660_MBOX
- tristate "Hi3660 Mailbox"
- depends on ARCH_HISI && OF
+ tristate "Hi3660 Mailbox" if EXPERT
+ depends on (ARCH_HISI || COMPILE_TEST)
+ depends on OF
+ default ARCH_HISI
help
An implementation of the hi3660 mailbox. It is used to send message
between application processors and other processors/MCU/DSP. Select
Y here if you want to use Hi3660 mailbox controller.
config HI6220_MBOX
- tristate "Hi6220 Mailbox"
- depends on ARCH_HISI
+ tristate "Hi6220 Mailbox" if EXPERT
+ depends on (ARCH_HISI || COMPILE_TEST)
+ depends on OF
+ default ARCH_HISI
help
An implementation of the hi6220 mailbox. It is used to send message
between application processors and MCU. Say Y here if you want to
@@ -162,7 +166,6 @@ config XGENE_SLIMPRO_MBOX
config BCM_PDC_MBOX
tristate "Broadcom FlexSparx DMA Mailbox"
depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on HAS_DMA
help
Mailbox implementation for the Broadcom FlexSparx DMA ring manager,
which provides access to various offload engines on Broadcom
@@ -172,11 +175,18 @@ config BCM_FLEXRM_MBOX
tristate "Broadcom FlexRM Mailbox"
depends on ARM64
depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on HAS_DMA
select GENERIC_MSI_IRQ_DOMAIN
default m if ARCH_BCM_IPROC
help
Mailbox implementation of the Broadcom FlexRM ring manager,
which provides access to various offload engines on Broadcom
SoCs. Say Y here if you want to use the Broadcom FlexRM.
+
+config STM32_IPCC
+ tristate "STM32 IPCC Mailbox"
+ depends on MACH_STM32MP157
+ help
+ Mailbox implementation for STMicroelectonics STM32 family chips
+ with hardware for Inter-Processor Communication Controller (IPCC)
+ between processors. Say Y here if you want to have this support.
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index cc23c3a43fcd..4d501bea7863 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -38,3 +38,5 @@ obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o
obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
+
+obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
index cfb4b4496dd9..e92bbc533821 100644
--- a/drivers/mailbox/bcm2835-mailbox.c
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -134,7 +134,7 @@ static struct mbox_chan *bcm2835_mbox_index_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
if (sp->args_count != 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
return &mbox->chans[0];
}
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 57bde0dfd12f..333ed4a9d4b8 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -125,6 +125,8 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = (void *)8 },
{ .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = (void *)8 },
+ { .compatible = "qcom,sdm845-apss-shared", .data = (void *)12 },
{}
};
MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
new file mode 100644
index 000000000000..533b0da5235d
--- /dev/null
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
+ * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+
+#define IPCC_XCR 0x000
+#define XCR_RXOIE BIT(0)
+#define XCR_TXOIE BIT(16)
+
+#define IPCC_XMR 0x004
+#define IPCC_XSCR 0x008
+#define IPCC_XTOYSR 0x00c
+
+#define IPCC_PROC_OFFST 0x010
+
+#define IPCC_HWCFGR 0x3f0
+#define IPCFGR_CHAN_MASK GENMASK(7, 0)
+
+#define IPCC_VER 0x3f4
+#define VER_MINREV_MASK GENMASK(3, 0)
+#define VER_MAJREV_MASK GENMASK(7, 4)
+
+#define RX_BIT_MASK GENMASK(15, 0)
+#define RX_BIT_CHAN(chan) BIT(chan)
+#define TX_BIT_SHIFT 16
+#define TX_BIT_MASK GENMASK(31, 16)
+#define TX_BIT_CHAN(chan) BIT(TX_BIT_SHIFT + (chan))
+
+#define STM32_MAX_PROCS 2
+
+enum {
+ IPCC_IRQ_RX,
+ IPCC_IRQ_TX,
+ IPCC_IRQ_NUM,
+};
+
+struct stm32_ipcc {
+ struct mbox_controller controller;
+ void __iomem *reg_base;
+ void __iomem *reg_proc;
+ struct clk *clk;
+ int irqs[IPCC_IRQ_NUM];
+ int wkp;
+ u32 proc_id;
+ u32 n_chans;
+ u32 xcr;
+ u32 xmr;
+};
+
+static inline void stm32_ipcc_set_bits(void __iomem *reg, u32 mask)
+{
+ writel_relaxed(readl_relaxed(reg) | mask, reg);
+}
+
+static inline void stm32_ipcc_clr_bits(void __iomem *reg, u32 mask)
+{
+ writel_relaxed(readl_relaxed(reg) & ~mask, reg);
+}
+
+static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data)
+{
+ struct stm32_ipcc *ipcc = data;
+ struct device *dev = ipcc->controller.dev;
+ u32 status, mr, tosr, chan;
+ irqreturn_t ret = IRQ_NONE;
+ int proc_offset;
+
+ /* read 'channel occupied' status from other proc */
+ proc_offset = ipcc->proc_id ? -IPCC_PROC_OFFST : IPCC_PROC_OFFST;
+ tosr = readl_relaxed(ipcc->reg_proc + proc_offset + IPCC_XTOYSR);
+ mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+
+ /* search for unmasked 'channel occupied' */
+ status = tosr & FIELD_GET(RX_BIT_MASK, ~mr);
+
+ for (chan = 0; chan < ipcc->n_chans; chan++) {
+ if (!(status & (1 << chan)))
+ continue;
+
+ dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan);
+
+ mbox_chan_received_data(&ipcc->controller.chans[chan], NULL);
+
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XSCR,
+ RX_BIT_CHAN(chan));
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
+{
+ struct stm32_ipcc *ipcc = data;
+ struct device *dev = ipcc->controller.dev;
+ u32 status, mr, tosr, chan;
+ irqreturn_t ret = IRQ_NONE;
+
+ tosr = readl_relaxed(ipcc->reg_proc + IPCC_XTOYSR);
+ mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+
+ /* search for unmasked 'channel free' */
+ status = ~tosr & FIELD_GET(TX_BIT_MASK, ~mr);
+
+ for (chan = 0; chan < ipcc->n_chans ; chan++) {
+ if (!(status & (1 << chan)))
+ continue;
+
+ dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan);
+
+ /* mask 'tx channel free' interrupt */
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR,
+ TX_BIT_CHAN(chan));
+
+ mbox_chan_txdone(&ipcc->controller.chans[chan], 0);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
+{
+ unsigned int chan = (unsigned int)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+
+ dev_dbg(ipcc->controller.dev, "%s: chan:%d\n", __func__, chan);
+
+ /* set channel n occupied */
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XSCR, TX_BIT_CHAN(chan));
+
+ /* unmask 'tx channel free' interrupt */
+ stm32_ipcc_clr_bits(ipcc->reg_proc + IPCC_XMR, TX_BIT_CHAN(chan));
+
+ return 0;
+}
+
+static int stm32_ipcc_startup(struct mbox_chan *link)
+{
+ unsigned int chan = (unsigned int)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+ int ret;
+
+ ret = clk_prepare_enable(ipcc->clk);
+ if (ret) {
+ dev_err(ipcc->controller.dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ /* unmask 'rx channel occupied' interrupt */
+ stm32_ipcc_clr_bits(ipcc->reg_proc + IPCC_XMR, RX_BIT_CHAN(chan));
+
+ return 0;
+}
+
+static void stm32_ipcc_shutdown(struct mbox_chan *link)
+{
+ unsigned int chan = (unsigned int)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+
+ /* mask rx/tx interrupt */
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR,
+ RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan));
+
+ clk_disable_unprepare(ipcc->clk);
+}
+
+static const struct mbox_chan_ops stm32_ipcc_ops = {
+ .send_data = stm32_ipcc_send_data,
+ .startup = stm32_ipcc_startup,
+ .shutdown = stm32_ipcc_shutdown,
+};
+
+static int stm32_ipcc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct stm32_ipcc *ipcc;
+ struct resource *res;
+ unsigned int i;
+ int ret;
+ u32 ip_ver;
+ static const char * const irq_name[] = {"rx", "tx"};
+ irq_handler_t irq_thread[] = {stm32_ipcc_rx_irq, stm32_ipcc_tx_irq};
+
+ if (!np) {
+ dev_err(dev, "No DT found\n");
+ return -ENODEV;
+ }
+
+ ipcc = devm_kzalloc(dev, sizeof(*ipcc), GFP_KERNEL);
+ if (!ipcc)
+ return -ENOMEM;
+
+ /* proc_id */
+ if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) {
+ dev_err(dev, "Missing st,proc-id\n");
+ return -ENODEV;
+ }
+
+ if (ipcc->proc_id >= STM32_MAX_PROCS) {
+ dev_err(dev, "Invalid proc_id (%d)\n", ipcc->proc_id);
+ return -EINVAL;
+ }
+
+ /* regs */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ipcc->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ipcc->reg_base))
+ return PTR_ERR(ipcc->reg_base);
+
+ ipcc->reg_proc = ipcc->reg_base + ipcc->proc_id * IPCC_PROC_OFFST;
+
+ /* clock */
+ ipcc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ipcc->clk))
+ return PTR_ERR(ipcc->clk);
+
+ ret = clk_prepare_enable(ipcc->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ /* irq */
+ for (i = 0; i < IPCC_IRQ_NUM; i++) {
+ ipcc->irqs[i] = of_irq_get_byname(dev->of_node, irq_name[i]);
+ if (ipcc->irqs[i] < 0) {
+ dev_err(dev, "no IRQ specified %s\n", irq_name[i]);
+ ret = ipcc->irqs[i];
+ goto err_clk;
+ }
+
+ ret = devm_request_threaded_irq(dev, ipcc->irqs[i], NULL,
+ irq_thread[i], IRQF_ONESHOT,
+ dev_name(dev), ipcc);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d (%d)\n", i, ret);
+ goto err_clk;
+ }
+ }
+
+ /* mask and enable rx/tx irq */
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR,
+ RX_BIT_MASK | TX_BIT_MASK);
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XCR, XCR_RXOIE | XCR_TXOIE);
+
+ /* wakeup */
+ if (of_property_read_bool(np, "wakeup-source")) {
+ ipcc->wkp = of_irq_get_byname(dev->of_node, "wakeup");
+ if (ipcc->wkp < 0) {
+ dev_err(dev, "could not get wakeup IRQ\n");
+ ret = ipcc->wkp;
+ goto err_clk;
+ }
+
+ device_init_wakeup(dev, true);
+ ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp);
+ if (ret) {
+ dev_err(dev, "Failed to set wake up irq\n");
+ goto err_init_wkp;
+ }
+ } else {
+ device_init_wakeup(dev, false);
+ }
+
+ /* mailbox controller */
+ ipcc->n_chans = readl_relaxed(ipcc->reg_base + IPCC_HWCFGR);
+ ipcc->n_chans &= IPCFGR_CHAN_MASK;
+
+ ipcc->controller.dev = dev;
+ ipcc->controller.txdone_irq = true;
+ ipcc->controller.ops = &stm32_ipcc_ops;
+ ipcc->controller.num_chans = ipcc->n_chans;
+ ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans,
+ sizeof(*ipcc->controller.chans),
+ GFP_KERNEL);
+ if (!ipcc->controller.chans) {
+ ret = -ENOMEM;
+ goto err_irq_wkp;
+ }
+
+ for (i = 0; i < ipcc->controller.num_chans; i++)
+ ipcc->controller.chans[i].con_priv = (void *)i;
+
+ ret = mbox_controller_register(&ipcc->controller);
+ if (ret)
+ goto err_irq_wkp;
+
+ platform_set_drvdata(pdev, ipcc);
+
+ ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER);
+
+ dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n",
+ FIELD_GET(VER_MAJREV_MASK, ip_ver),
+ FIELD_GET(VER_MINREV_MASK, ip_ver),
+ ipcc->controller.num_chans, ipcc->proc_id);
+
+ clk_disable_unprepare(ipcc->clk);
+ return 0;
+
+err_irq_wkp:
+ if (ipcc->wkp)
+ dev_pm_clear_wake_irq(dev);
+err_init_wkp:
+ device_init_wakeup(dev, false);
+err_clk:
+ clk_disable_unprepare(ipcc->clk);
+ return ret;
+}
+
+static int stm32_ipcc_remove(struct platform_device *pdev)
+{
+ struct stm32_ipcc *ipcc = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&ipcc->controller);
+
+ if (ipcc->wkp)
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+ device_init_wakeup(&pdev->dev, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void stm32_ipcc_set_irq_wake(struct device *dev, bool enable)
+{
+ struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
+ unsigned int i;
+
+ if (device_may_wakeup(dev))
+ for (i = 0; i < IPCC_IRQ_NUM; i++)
+ irq_set_irq_wake(ipcc->irqs[i], enable);
+}
+
+static int stm32_ipcc_suspend(struct device *dev)
+{
+ struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
+
+ ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+ ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
+
+ stm32_ipcc_set_irq_wake(dev, true);
+
+ return 0;
+}
+
+static int stm32_ipcc_resume(struct device *dev)
+{
+ struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
+
+ stm32_ipcc_set_irq_wake(dev, false);
+
+ writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
+ writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stm32_ipcc_pm_ops,
+ stm32_ipcc_suspend, stm32_ipcc_resume);
+
+static const struct of_device_id stm32_ipcc_of_match[] = {
+ { .compatible = "st,stm32mp1-ipcc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_ipcc_of_match);
+
+static struct platform_driver stm32_ipcc_driver = {
+ .driver = {
+ .name = "stm32-ipcc",
+ .pm = &stm32_ipcc_pm_ops,
+ .of_match_table = stm32_ipcc_of_match,
+ },
+ .probe = stm32_ipcc_probe,
+ .remove = stm32_ipcc_remove,
+};
+
+module_platform_driver(stm32_ipcc_driver);
+
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STM32 IPCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 775c06d953b7..d10964d41fd7 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -185,9 +185,24 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
}
+static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct linear_c *lc = ti->private;
+ struct block_device *bdev = lc->dev->bdev;
+ struct dax_device *dax_dev = lc->dev->dax_dev;
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+
+ dev_sector = linear_map_sector(ti, sector);
+ if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
+ return 0;
+ return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+}
+
#else
#define linear_dax_direct_access NULL
#define linear_dax_copy_from_iter NULL
+#define linear_dax_copy_to_iter NULL
#endif
static struct target_type linear_target = {
@@ -204,6 +219,7 @@ static struct target_type linear_target = {
.iterate_devices = linear_iterate_devices,
.direct_access = linear_dax_direct_access,
.dax_copy_from_iter = linear_dax_copy_from_iter,
+ .dax_copy_to_iter = linear_dax_copy_to_iter,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index c90c7c08a77f..9ea2b0291f20 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -962,9 +962,23 @@ static size_t log_writes_dax_copy_from_iter(struct dm_target *ti,
dax_copy:
return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
}
+
+static size_t log_writes_dax_copy_to_iter(struct dm_target *ti,
+ pgoff_t pgoff, void *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ struct log_writes_c *lc = ti->private;
+ sector_t sector = pgoff * PAGE_SECTORS;
+
+ if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
+ return 0;
+ return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
+}
+
#else
#define log_writes_dax_direct_access NULL
#define log_writes_dax_copy_from_iter NULL
+#define log_writes_dax_copy_to_iter NULL
#endif
static struct target_type log_writes_target = {
@@ -982,6 +996,7 @@ static struct target_type log_writes_target = {
.io_hints = log_writes_io_hints,
.direct_access = log_writes_dax_direct_access,
.dax_copy_from_iter = log_writes_dax_copy_from_iter,
+ .dax_copy_to_iter = log_writes_dax_copy_to_iter,
};
static int __init dm_log_writes_init(void)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index fe7fb9b1aec3..8547d7594338 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -354,9 +354,29 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
}
+static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+ struct stripe_c *sc = ti->private;
+ struct dax_device *dax_dev;
+ struct block_device *bdev;
+ uint32_t stripe;
+
+ stripe_map_sector(sc, sector, &stripe, &dev_sector);
+ dev_sector += sc->stripe[stripe].physical_start;
+ dax_dev = sc->stripe[stripe].dev->dax_dev;
+ bdev = sc->stripe[stripe].dev->bdev;
+
+ if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
+ return 0;
+ return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+}
+
#else
#define stripe_dax_direct_access NULL
#define stripe_dax_copy_from_iter NULL
+#define stripe_dax_copy_to_iter NULL
#endif
/*
@@ -478,6 +498,7 @@ static struct target_type stripe_target = {
.io_hints = stripe_io_hints,
.direct_access = stripe_dax_direct_access,
.dax_copy_from_iter = stripe_dax_copy_from_iter,
+ .dax_copy_to_iter = stripe_dax_copy_to_iter,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 98dff36b89a3..e65429a29c06 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1089,6 +1089,30 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
return ret;
}
+static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct mapped_device *md = dax_get_private(dax_dev);
+ sector_t sector = pgoff * PAGE_SECTORS;
+ struct dm_target *ti;
+ long ret = 0;
+ int srcu_idx;
+
+ ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+
+ if (!ti)
+ goto out;
+ if (!ti->type->dax_copy_to_iter) {
+ ret = copy_to_iter(addr, bytes, i);
+ goto out;
+ }
+ ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
+ out:
+ dm_put_live_table(md, srcu_idx);
+
+ return ret;
+}
+
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
@@ -1953,9 +1977,10 @@ static void free_dev(struct mapped_device *md)
kvfree(md);
}
-static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
+static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
+ int ret = 0;
if (dm_table_bio_based(t)) {
/*
@@ -1982,13 +2007,16 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
bioset_initialized(&md->bs) ||
bioset_initialized(&md->io_bs));
- md->bs = p->bs;
- memset(&p->bs, 0, sizeof(p->bs));
- md->io_bs = p->io_bs;
- memset(&p->io_bs, 0, sizeof(p->io_bs));
+ ret = bioset_init_from_src(&md->bs, &p->bs);
+ if (ret)
+ goto out;
+ ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
+ if (ret)
+ bioset_exit(&md->bs);
out:
/* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
+ return ret;
}
/*
@@ -2033,6 +2061,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct request_queue *q = md->queue;
bool request_based = dm_table_request_based(t);
sector_t size;
+ int ret;
lockdep_assert_held(&md->suspend_lock);
@@ -2068,7 +2097,11 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
md->immutable_target = dm_table_get_immutable_target(t);
}
- __bind_mempools(md, t);
+ ret = __bind_mempools(md, t);
+ if (ret) {
+ old_map = ERR_PTR(ret);
+ goto out;
+ }
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
rcu_assign_pointer(md->map, (void *)t);
@@ -2078,6 +2111,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (old_map)
dm_sync_table(md);
+out:
return old_map;
}
@@ -3127,6 +3161,7 @@ static const struct block_device_operations dm_blk_dops = {
static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access,
.copy_from_iter = dm_dax_copy_from_iter,
+ .copy_to_iter = dm_dax_copy_to_iter,
};
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fc692b7128bb..29b0cd9ec951 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -84,6 +84,8 @@ static void autostart_arrays(int part);
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
+static struct kobj_type md_ktype;
+
struct md_cluster_operations *md_cluster_ops;
EXPORT_SYMBOL(md_cluster_ops);
struct module *md_cluster_mod;
@@ -130,6 +132,24 @@ static inline int speed_max(struct mddev *mddev)
mddev->sync_speed_max : sysctl_speed_limit_max;
}
+static void * flush_info_alloc(gfp_t gfp_flags, void *data)
+{
+ return kzalloc(sizeof(struct flush_info), gfp_flags);
+}
+static void flush_info_free(void *flush_info, void *data)
+{
+ kfree(flush_info);
+}
+
+static void * flush_bio_alloc(gfp_t gfp_flags, void *data)
+{
+ return kzalloc(sizeof(struct flush_bio), gfp_flags);
+}
+static void flush_bio_free(void *flush_bio, void *data)
+{
+ kfree(flush_bio);
+}
+
static struct ctl_table_header *raid_table_header;
static struct ctl_table raid_table[] = {
@@ -412,30 +432,53 @@ static int md_congested(void *data, int bits)
/*
* Generic flush handling for md
*/
+static void submit_flushes(struct work_struct *ws)
+{
+ struct flush_info *fi = container_of(ws, struct flush_info, flush_work);
+ struct mddev *mddev = fi->mddev;
+ struct bio *bio = fi->bio;
+
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ md_handle_request(mddev, bio);
+
+ mempool_free(fi, mddev->flush_pool);
+}
-static void md_end_flush(struct bio *bio)
+static void md_end_flush(struct bio *fbio)
{
- struct md_rdev *rdev = bio->bi_private;
- struct mddev *mddev = rdev->mddev;
+ struct flush_bio *fb = fbio->bi_private;
+ struct md_rdev *rdev = fb->rdev;
+ struct flush_info *fi = fb->fi;
+ struct bio *bio = fi->bio;
+ struct mddev *mddev = fi->mddev;
rdev_dec_pending(rdev, mddev);
- if (atomic_dec_and_test(&mddev->flush_pending)) {
- /* The pre-request flush has finished */
- queue_work(md_wq, &mddev->flush_work);
+ if (atomic_dec_and_test(&fi->flush_pending)) {
+ if (bio->bi_iter.bi_size == 0)
+ /* an empty barrier - all done */
+ bio_endio(bio);
+ else {
+ INIT_WORK(&fi->flush_work, submit_flushes);
+ queue_work(md_wq, &fi->flush_work);
+ }
}
- bio_put(bio);
-}
-static void md_submit_flush_data(struct work_struct *ws);
+ mempool_free(fb, mddev->flush_bio_pool);
+ bio_put(fbio);
+}
-static void submit_flushes(struct work_struct *ws)
+void md_flush_request(struct mddev *mddev, struct bio *bio)
{
- struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct md_rdev *rdev;
+ struct flush_info *fi;
+
+ fi = mempool_alloc(mddev->flush_pool, GFP_NOIO);
+
+ fi->bio = bio;
+ fi->mddev = mddev;
+ atomic_set(&fi->flush_pending, 1);
- INIT_WORK(&mddev->flush_work, md_submit_flush_data);
- atomic_set(&mddev->flush_pending, 1);
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
@@ -445,59 +488,39 @@ static void submit_flushes(struct work_struct *ws)
* we reclaim rcu_read_lock
*/
struct bio *bi;
+ struct flush_bio *fb;
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
+
+ fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO);
+ fb->fi = fi;
+ fb->rdev = rdev;
+
bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
- bi->bi_end_io = md_end_flush;
- bi->bi_private = rdev;
bio_set_dev(bi, rdev->bdev);
+ bi->bi_end_io = md_end_flush;
+ bi->bi_private = fb;
bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
- atomic_inc(&mddev->flush_pending);
+
+ atomic_inc(&fi->flush_pending);
submit_bio(bi);
+
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
rcu_read_unlock();
- if (atomic_dec_and_test(&mddev->flush_pending))
- queue_work(md_wq, &mddev->flush_work);
-}
-
-static void md_submit_flush_data(struct work_struct *ws)
-{
- struct mddev *mddev = container_of(ws, struct mddev, flush_work);
- struct bio *bio = mddev->flush_bio;
- /*
- * must reset flush_bio before calling into md_handle_request to avoid a
- * deadlock, because other bios passed md_handle_request suspend check
- * could wait for this and below md_handle_request could wait for those
- * bios because of suspend check
- */
- mddev->flush_bio = NULL;
- wake_up(&mddev->sb_wait);
-
- if (bio->bi_iter.bi_size == 0)
- /* an empty barrier - all done */
- bio_endio(bio);
- else {
- bio->bi_opf &= ~REQ_PREFLUSH;
- md_handle_request(mddev, bio);
+ if (atomic_dec_and_test(&fi->flush_pending)) {
+ if (bio->bi_iter.bi_size == 0)
+ /* an empty barrier - all done */
+ bio_endio(bio);
+ else {
+ INIT_WORK(&fi->flush_work, submit_flushes);
+ queue_work(md_wq, &fi->flush_work);
+ }
}
}
-
-void md_flush_request(struct mddev *mddev, struct bio *bio)
-{
- spin_lock_irq(&mddev->lock);
- wait_event_lock_irq(mddev->sb_wait,
- !mddev->flush_bio,
- mddev->lock);
- mddev->flush_bio = bio;
- spin_unlock_irq(&mddev->lock);
-
- INIT_WORK(&mddev->flush_work, submit_flushes);
- queue_work(md_wq, &mddev->flush_work);
-}
EXPORT_SYMBOL(md_flush_request);
static inline struct mddev *mddev_get(struct mddev *mddev)
@@ -510,11 +533,6 @@ static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(struct mddev *mddev)
{
- struct bio_set bs, sync_bs;
-
- memset(&bs, 0, sizeof(bs));
- memset(&sync_bs, 0, sizeof(sync_bs));
-
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
@@ -522,30 +540,23 @@ static void mddev_put(struct mddev *mddev)
/* Array is not configured at all, and not held active,
* so destroy it */
list_del_init(&mddev->all_mddevs);
- bs = mddev->bio_set;
- sync_bs = mddev->sync_set;
- memset(&mddev->bio_set, 0, sizeof(mddev->bio_set));
- memset(&mddev->sync_set, 0, sizeof(mddev->sync_set));
- if (mddev->gendisk) {
- /* We did a probe so need to clean up. Call
- * queue_work inside the spinlock so that
- * flush_workqueue() after mddev_find will
- * succeed in waiting for the work to be done.
- */
- INIT_WORK(&mddev->del_work, mddev_delayed_delete);
- queue_work(md_misc_wq, &mddev->del_work);
- } else
- kfree(mddev);
+
+ /*
+ * Call queue_work inside the spinlock so that
+ * flush_workqueue() after mddev_find will succeed in waiting
+ * for the work to be done.
+ */
+ INIT_WORK(&mddev->del_work, mddev_delayed_delete);
+ queue_work(md_misc_wq, &mddev->del_work);
}
spin_unlock(&all_mddevs_lock);
- bioset_exit(&bs);
- bioset_exit(&sync_bs);
}
static void md_safemode_timeout(struct timer_list *t);
void mddev_init(struct mddev *mddev)
{
+ kobject_init(&mddev->kobj, &md_ktype);
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->bitmap_info.mutex);
@@ -556,7 +567,6 @@ void mddev_init(struct mddev *mddev)
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
spin_lock_init(&mddev->lock);
- atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
@@ -2854,7 +2864,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
}
} else if (cmd_match(buf, "re-add")) {
- if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
+ if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
+ rdev->saved_raid_disk >= 0) {
/* clear_bit is performed _after_ all the devices
* have their local Faulty bit cleared. If any writes
* happen in the meantime in the local node, they
@@ -5215,6 +5226,8 @@ static void md_free(struct kobject *ko)
put_disk(mddev->gendisk);
percpu_ref_exit(&mddev->writes_pending);
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
kfree(mddev);
}
@@ -5348,8 +5361,7 @@ static int md_alloc(dev_t dev, char *name)
mutex_lock(&mddev->open_mutex);
add_disk(disk);
- error = kobject_init_and_add(&mddev->kobj, &md_ktype,
- &disk_to_dev(disk)->kobj, "%s", "md");
+ error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
if (error) {
/* This isn't possible, but as kobject_init_and_add is marked
* __must_check, we must do something with the result
@@ -5506,7 +5518,23 @@ int md_run(struct mddev *mddev)
if (!bioset_initialized(&mddev->sync_set)) {
err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
+ return err;
+ }
+ if (mddev->flush_pool == NULL) {
+ mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc,
+ flush_info_free, mddev);
+ if (!mddev->flush_pool) {
+ err = -ENOMEM;
goto abort;
+ }
+ }
+ if (mddev->flush_bio_pool == NULL) {
+ mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc,
+ flush_bio_free, mddev);
+ if (!mddev->flush_bio_pool) {
+ err = -ENOMEM;
+ goto abort;
+ }
}
spin_lock(&pers_lock);
@@ -5519,8 +5547,7 @@ int md_run(struct mddev *mddev)
else
pr_warn("md: personality for level %s is not loaded!\n",
mddev->clevel);
- err = -EINVAL;
- goto abort;
+ return -EINVAL;
}
spin_unlock(&pers_lock);
if (mddev->level != pers->level) {
@@ -5533,8 +5560,7 @@ int md_run(struct mddev *mddev)
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
module_put(pers->owner);
- err = -EINVAL;
- goto abort;
+ return -EINVAL;
}
if (pers->sync_request) {
@@ -5603,7 +5629,7 @@ int md_run(struct mddev *mddev)
mddev->private = NULL;
module_put(pers->owner);
bitmap_destroy(mddev);
- goto abort;
+ return err;
}
if (mddev->queue) {
bool nonrot = true;
@@ -5667,8 +5693,14 @@ int md_run(struct mddev *mddev)
return 0;
abort:
- bioset_exit(&mddev->bio_set);
- bioset_exit(&mddev->sync_set);
+ if (mddev->flush_bio_pool) {
+ mempool_destroy(mddev->flush_bio_pool);
+ mddev->flush_bio_pool = NULL;
+ }
+ if (mddev->flush_pool){
+ mempool_destroy(mddev->flush_pool);
+ mddev->flush_pool = NULL;
+ }
return err;
}
@@ -5881,6 +5913,14 @@ void md_stop(struct mddev *mddev)
* This is called from dm-raid
*/
__md_stop(mddev);
+ if (mddev->flush_bio_pool) {
+ mempool_destroy(mddev->flush_bio_pool);
+ mddev->flush_bio_pool = NULL;
+ }
+ if (mddev->flush_pool) {
+ mempool_destroy(mddev->flush_pool);
+ mddev->flush_pool = NULL;
+ }
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
}
@@ -6511,6 +6551,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
+ if (!mddev->pers)
+ return -ENODEV;
+
rdev = find_rdev(mddev, dev);
if (!rdev)
return -ENXIO;
@@ -8628,6 +8671,7 @@ static int remove_and_add_spares(struct mddev *mddev,
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
+ rdev->saved_raid_disk = rdev->raid_disk;
rdev->raid_disk = -1;
removed++;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 3507cab22cb6..2d148bdaba74 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -252,6 +252,19 @@ enum mddev_sb_flags {
MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
};
+#define NR_FLUSH_INFOS 8
+#define NR_FLUSH_BIOS 64
+struct flush_info {
+ struct bio *bio;
+ struct mddev *mddev;
+ struct work_struct flush_work;
+ atomic_t flush_pending;
+};
+struct flush_bio {
+ struct flush_info *fi;
+ struct md_rdev *rdev;
+};
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -457,13 +470,8 @@ struct mddev {
* metadata and bitmap writes
*/
- /* Generic flush handling.
- * The last to finish preflush schedules a worker to submit
- * the rest of the request (without the REQ_PREFLUSH flag).
- */
- struct bio *flush_bio;
- atomic_t flush_pending;
- struct work_struct flush_work;
+ mempool_t *flush_pool;
+ mempool_t *flush_bio_pool;
struct work_struct event_work; /* used by dm to report failure event */
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index bad28520719b..0b344d087581 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2449,7 +2449,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
struct mddev *mddev = conf->mddev;
struct bio *bio;
struct md_rdev *rdev;
- sector_t bio_sector;
clear_bit(R1BIO_ReadError, &r1_bio->state);
/* we got a read error. Maybe the drive is bad. Maybe just
@@ -2462,7 +2461,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
*/
bio = r1_bio->bios[r1_bio->read_disk];
- bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
bio_put(bio);
r1_bio->bios[r1_bio->read_disk] = NULL;
@@ -2473,6 +2471,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors);
unfreeze_array(conf);
+ } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
+ md_error(mddev, rdev);
} else {
r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 37d4b236b81b..1147ae59e3b6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -255,9 +255,11 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
for (j = conf->copies; j--; ) {
struct bio *bio = r10bio->devs[j].bio;
- rp = get_resync_pages(bio);
- resync_free_pages(rp);
- bio_put(bio);
+ if (bio) {
+ rp = get_resync_pages(bio);
+ resync_free_pages(rp);
+ bio_put(bio);
+ }
bio = r10bio->devs[j].repl_bio;
if (bio)
@@ -2362,7 +2364,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
{
int sect = 0; /* Offset from r10_bio->sector */
int sectors = r10_bio->sectors;
- struct md_rdev*rdev;
+ struct md_rdev *rdev;
int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
int d = r10_bio->devs[r10_bio->read_slot].devnum;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a2e64989b01f..73489446bbcb 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1139,6 +1139,9 @@ again:
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_iter.bi_size = STRIPE_SIZE;
+ bi->bi_write_hint = sh->dev[i].write_hint;
+ if (!rrdev)
+ sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -1190,6 +1193,8 @@ again:
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_iter.bi_size = STRIPE_SIZE;
+ rbi->bi_write_hint = sh->dev[i].write_hint;
+ sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -3204,6 +3209,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
(unsigned long long)sh->sector);
spin_lock_irq(&sh->stripe_lock);
+ sh->dev[dd_idx].write_hint = bi->bi_write_hint;
/* Don't allow new IO added to stripes in batch list */
if (sh->batch_head)
goto overlap;
@@ -4614,15 +4620,15 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
sh->check_state = head_sh->check_state;
sh->reconstruct_state = head_sh->reconstruct_state;
+ spin_lock_irq(&sh->stripe_lock);
+ sh->batch_head = NULL;
+ spin_unlock_irq(&sh->stripe_lock);
for (i = 0; i < sh->disks; i++) {
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
do_wakeup = 1;
sh->dev[i].flags = head_sh->dev[i].flags &
(~((1 << R5_WriteError) | (1 << R5_Overlap)));
}
- spin_lock_irq(&sh->stripe_lock);
- sh->batch_head = NULL;
- spin_unlock_irq(&sh->stripe_lock);
if (handle_flags == 0 ||
sh->state & handle_flags)
set_bit(STRIPE_HANDLE, &sh->state);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 72e75ba6abf0..8474c224127b 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -257,6 +257,7 @@ struct stripe_head {
sector_t sector; /* sector of this page */
unsigned long flags;
u32 log_checksum;
+ unsigned short write_hint;
} dev[1]; /* allocated with extra space depending of RAID geometry */
};
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 4e9c0ce94f27..059997f8ebce 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -1802,13 +1802,13 @@ typedef struct _CONFIG_PAGE_FC_PORT_0
#define MPI_FCPORTPAGE0_SUPPORT_CLASS_2 (0x00000002)
#define MPI_FCPORTPAGE0_SUPPORT_CLASS_3 (0x00000004)
-#define MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN (0x00000000) /* (SNIA)HBA_PORTSPEED_UNKNOWN 0 Unknown - transceiver incapable of reporting */
+#define MPI_FCPORTPAGE0_SUPPORT_SPEED_UNKNOWN (0x00000000) /* (SNIA)HBA_PORTSPEED_UNKNOWN 0 Unknown - transceiver incapable of reporting */
#define MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED (0x00000001) /* (SNIA)HBA_PORTSPEED_1GBIT 1 1 GBit/sec */
#define MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED (0x00000002) /* (SNIA)HBA_PORTSPEED_2GBIT 2 2 GBit/sec */
#define MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED (0x00000004) /* (SNIA)HBA_PORTSPEED_10GBIT 4 10 GBit/sec */
#define MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED (0x00000008) /* (SNIA)HBA_PORTSPEED_4GBIT 8 4 GBit/sec */
-#define MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_UNKNOWN MPI_FCPORTPAGE0_SUPPORT_SPEED_UNKNOWN
#define MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED
#define MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED
#define MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a746ccdd630a..a625ac4e2872 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -7600,7 +7600,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Initiator Device Table Overflow: max initiators=%02d "
- "current initators=%02d",
+ "current initiators=%02d",
max_init, current_init);
break;
}
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 6d461ca97150..06b175420be9 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -693,7 +693,7 @@ mptfc_display_port_link_speed(MPT_ADAPTER *ioc, int portnum, FCPortPage0_t *pp0d
state = pp0dest->PortState;
if (state != MPI_FCPORTPAGE0_PORTSTATE_OFFLINE &&
- new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN) {
+ new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UNKNOWN) {
old = old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" :
old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" :
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 55dd71bbdc2a..4cbed4d06aa7 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -670,7 +670,7 @@ out:
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-static int
+static netdev_tx_t
mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 19a5aa70ecda..76a66da33996 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -4320,7 +4320,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
hot_plug_info->id) {
printk(MYIOC_s_WARN_FMT "firmware bug: unable "
- "to add hidden disk - target_id matchs "
+ "to add hidden disk - target_id matches "
"volume_id\n", ioc->name);
mptsas_free_fw_event(ioc, fw_event);
return;
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index 64e088dfe7b0..503979c81dae 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -29,6 +29,7 @@ static const struct mfd_cell bd9571mwv_cells[] = {
static const struct regmap_range bd9571mwv_readable_yes_ranges[] = {
regmap_reg_range(BD9571MWV_VENDOR_CODE, BD9571MWV_PRODUCT_REVISION),
+ regmap_reg_range(BD9571MWV_BKUP_MODE_CNT, BD9571MWV_BKUP_MODE_CNT),
regmap_reg_range(BD9571MWV_AVS_SET_MONI, BD9571MWV_AVS_DVFS_VID(3)),
regmap_reg_range(BD9571MWV_VD18_VID, BD9571MWV_VD33_VID),
regmap_reg_range(BD9571MWV_DVFS_VINIT, BD9571MWV_DVFS_VINIT),
@@ -44,6 +45,7 @@ static const struct regmap_access_table bd9571mwv_readable_table = {
};
static const struct regmap_range bd9571mwv_writable_yes_ranges[] = {
+ regmap_reg_range(BD9571MWV_BKUP_MODE_CNT, BD9571MWV_BKUP_MODE_CNT),
regmap_reg_range(BD9571MWV_AVS_VD09_VID(0), BD9571MWV_AVS_VD09_VID(3)),
regmap_reg_range(BD9571MWV_DVFS_SETVID, BD9571MWV_DVFS_SETVID),
regmap_reg_range(BD9571MWV_GPIO_DIR, BD9571MWV_GPIO_OUT),
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index 90e35dec8648..5bddb84cfc1f 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -233,7 +233,8 @@ static int intel_quark_gpio_setup(struct pci_dev *pdev, struct mfd_cell *cell)
pdata->properties->idx = 0;
pdata->properties->ngpio = INTEL_QUARK_MFD_NGPIO;
pdata->properties->gpio_base = INTEL_QUARK_MFD_GPIO_BASE;
- pdata->properties->irq = pdev->irq;
+ pdata->properties->irq[0] = pdev->irq;
+ pdata->properties->has_irq = true;
pdata->properties->irq_shared = true;
cell->platform_data = pdata;
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index fe8897e64635..7b370466a227 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -203,7 +203,7 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
if (!val)
return false;
- if (test->last_irq - pdev->irq == msi_num - 1)
+ if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
return true;
return false;
@@ -233,7 +233,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
orig_src_addr = dma_alloc_coherent(dev, size + alignment,
&orig_src_phys_addr, GFP_KERNEL);
if (!orig_src_addr) {
- dev_err(dev, "failed to allocate source buffer\n");
+ dev_err(dev, "Failed to allocate source buffer\n");
ret = false;
goto err;
}
@@ -259,7 +259,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
&orig_dst_phys_addr, GFP_KERNEL);
if (!orig_dst_addr) {
- dev_err(dev, "failed to allocate destination address\n");
+ dev_err(dev, "Failed to allocate destination address\n");
ret = false;
goto err_orig_src_addr;
}
@@ -321,7 +321,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
- dev_err(dev, "failed to allocate address\n");
+ dev_err(dev, "Failed to allocate address\n");
ret = false;
goto err;
}
@@ -382,7 +382,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
- dev_err(dev, "failed to allocate destination address\n");
+ dev_err(dev, "Failed to allocate destination address\n");
ret = false;
goto err;
}
@@ -513,31 +513,31 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
if (!no_msi) {
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (irq < 0)
- dev_err(dev, "failed to get MSI interrupts\n");
+ dev_err(dev, "Failed to get MSI interrupts\n");
test->num_irqs = irq;
}
err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, test);
if (err) {
- dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
+ dev_err(dev, "Failed to request IRQ %d\n", pdev->irq);
goto err_disable_msi;
}
for (i = 1; i < irq; i++) {
- err = devm_request_irq(dev, pdev->irq + i,
+ err = devm_request_irq(dev, pci_irq_vector(pdev, i),
pci_endpoint_test_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, test);
if (err)
dev_err(dev, "failed to request IRQ %d for MSI %d\n",
- pdev->irq + i, i + 1);
+ pci_irq_vector(pdev, i), i + 1);
}
for (bar = BAR_0; bar <= BAR_5; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
base = pci_ioremap_bar(pdev, bar);
if (!base) {
- dev_err(dev, "failed to read BAR%d\n", bar);
+ dev_err(dev, "Failed to read BAR%d\n", bar);
WARN_ON(bar == test_reg_bar);
}
test->bar[bar] = base;
@@ -557,7 +557,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
err = id;
- dev_err(dev, "unable to get id\n");
+ dev_err(dev, "Unable to get id\n");
goto err_iounmap;
}
@@ -573,7 +573,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
err = misc_register(misc_device);
if (err) {
- dev_err(dev, "failed to register device\n");
+ dev_err(dev, "Failed to register device\n");
goto err_kfree_name;
}
@@ -592,7 +592,7 @@ err_iounmap:
}
for (i = 0; i < irq; i++)
- devm_free_irq(dev, pdev->irq + i, test);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
err_disable_msi:
pci_disable_msi(pdev);
@@ -625,7 +625,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
pci_iounmap(pdev, test->bar[bar]);
}
for (i = 0; i < test->num_irqs; i++)
- devm_free_irq(&pdev->dev, pdev->irq + i, test);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
pci_disable_msi(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -634,6 +634,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index fe2581d9d882..0f93d2239352 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -186,6 +186,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
/* TRX */
if (buf[0x000 / 4] == TRX_MAGIC) {
struct trx_header *trx;
+ uint32_t last_subpart;
+ uint32_t trx_size;
if (trx_num >= ARRAY_SIZE(trx_parts))
pr_warn("No enough space to store another TRX found at 0x%X\n",
@@ -195,11 +197,23 @@ static int bcm47xxpart_parse(struct mtd_info *master,
bcm47xxpart_add_part(&parts[curr_part++], "firmware",
offset, 0);
- /* Jump to the end of TRX */
+ /*
+ * Try to find TRX size. The "length" field isn't fully
+ * reliable as it could be decreased to make CRC32 cover
+ * only part of TRX data. It's commonly used as checksum
+ * can't cover e.g. ever-changing rootfs partition.
+ * Use offsets as helpers for assuming min TRX size.
+ */
trx = (struct trx_header *)buf;
- offset = roundup(offset + trx->length, blocksize);
- /* Next loop iteration will increase the offset */
- offset -= blocksize;
+ last_subpart = max3(trx->offset[0], trx->offset[1],
+ trx->offset[2]);
+ trx_size = max(trx->length, last_subpart + blocksize);
+
+ /*
+ * Skip the TRX data. Decrease offset by block size as
+ * the next loop iteration will increase it.
+ */
+ offset += roundup(trx_size, blocksize) - blocksize;
continue;
}
@@ -290,9 +304,16 @@ static int bcm47xxpart_parse(struct mtd_info *master,
return curr_part;
};
+static const struct of_device_id bcm47xxpart_of_match_table[] = {
+ { .compatible = "brcm,bcm947xx-cfe-partitions" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm47xxpart_of_match_table);
+
static struct mtd_part_parser bcm47xxpart_mtd_parser = {
.parse_fn = bcm47xxpart_parse,
.name = "bcm47xxpart",
+ .of_match_table = bcm47xxpart_of_match_table,
};
module_mtd_part_parser(bcm47xxpart_mtd_parser);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 692902df2598..7c889eca9ab0 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -42,10 +42,10 @@
#define AMD_BOOTLOC_BUG
#define FORCE_WORD_WRITE 0
-#define MAX_WORD_RETRIES 3
+#define MAX_RETRIES 3
-#define SST49LF004B 0x0060
-#define SST49LF040B 0x0050
+#define SST49LF004B 0x0060
+#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
#define AT49BV6416 0x00d6
@@ -207,7 +207,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->cfiq->BufWriteTimeoutTyp) {
- pr_debug("Using buffer write method\n" );
+ pr_debug("Using buffer write method\n");
mtd->_write = cfi_amdstd_write_buffers;
}
}
@@ -1563,7 +1563,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
* depending of the conditions. The ' + 1' is to avoid having a
* timeout of 0 jiffies if HZ is smaller than 1000.
*/
- unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
+ unsigned long uWriteTimeout = (HZ / 1000) + 1;
int ret = 0;
map_word oldd;
int retry_cnt = 0;
@@ -1578,7 +1578,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
}
pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
- __func__, adr, datum.x[0] );
+ __func__, adr, datum.x[0]);
if (mode == FL_OTP_WRITE)
otp_enter(map, chip, adr, map_bankwidth(map));
@@ -1644,10 +1644,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
/* Did we succeed? */
if (!chip_good(map, adr, datum)) {
/* reset on all failures. */
- map_write( map, CMD(0xF0), chip->start );
+ map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
- if (++retry_cnt <= MAX_WORD_RETRIES)
+ if (++retry_cnt <= MAX_RETRIES)
goto retry;
ret = -EIO;
@@ -1822,7 +1822,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
datum = map_word_load(map, buf);
pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
- __func__, adr, datum.x[0] );
+ __func__, adr, datum.x[0]);
XIP_INVAL_CACHED_RANGE(map, adr, len);
ENABLE_VPP(map);
@@ -1880,7 +1880,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
if (time_after(jiffies, timeo) && !chip_ready(map, adr))
break;
- if (chip_ready(map, adr)) {
+ if (chip_good(map, adr, datum)) {
xip_enable(map, chip, adr);
goto op_done;
}
@@ -2106,7 +2106,7 @@ retry:
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
- if (++retry_cnt <= MAX_WORD_RETRIES)
+ if (++retry_cnt <= MAX_RETRIES)
goto retry;
ret = -EIO;
@@ -2241,6 +2241,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
unsigned long int adr;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
+ int retry_cnt = 0;
adr = cfi->addr_unlock1;
@@ -2252,12 +2253,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
}
pr_debug("MTD %s(): ERASE 0x%.8lx\n",
- __func__, chip->start );
+ __func__, chip->start);
XIP_INVAL_CACHED_RANGE(map, adr, map->size);
ENABLE_VPP(map);
xip_disable(map, chip, adr);
+ retry:
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -2294,12 +2296,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->erase_suspended = 0;
}
- if (chip_ready(map, adr))
+ if (chip_good(map, adr, map_word_ff(map)))
break;
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
- __func__ );
+ __func__);
+ ret = -EIO;
break;
}
@@ -2307,12 +2310,15 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
UDELAY(map, chip, adr, 1000000/HZ);
}
/* Did we succeed? */
- if (!chip_good(map, adr, map_word_ff(map))) {
+ if (ret) {
/* reset on all failures. */
- map_write( map, CMD(0xF0), chip->start );
+ map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
- ret = -EIO;
+ if (++retry_cnt <= MAX_RETRIES) {
+ ret = 0;
+ goto retry;
+ }
}
chip->state = FL_READY;
@@ -2331,6 +2337,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
+ int retry_cnt = 0;
adr += chip->start;
@@ -2342,12 +2349,13 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
}
pr_debug("MTD %s(): ERASE 0x%.8lx\n",
- __func__, adr );
+ __func__, adr);
XIP_INVAL_CACHED_RANGE(map, adr, len);
ENABLE_VPP(map);
xip_disable(map, chip, adr);
+ retry:
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -2384,15 +2392,13 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->erase_suspended = 0;
}
- if (chip_ready(map, adr)) {
- xip_enable(map, chip, adr);
+ if (chip_good(map, adr, map_word_ff(map)))
break;
- }
if (time_after(jiffies, timeo)) {
- xip_enable(map, chip, adr);
printk(KERN_WARNING "MTD %s(): software timeout\n",
- __func__ );
+ __func__);
+ ret = -EIO;
break;
}
@@ -2400,15 +2406,19 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
UDELAY(map, chip, adr, 1000000/HZ);
}
/* Did we succeed? */
- if (!chip_good(map, adr, map_word_ff(map))) {
+ if (ret) {
/* reset on all failures. */
- map_write( map, CMD(0xF0), chip->start );
+ map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
- ret = -EIO;
+ if (++retry_cnt <= MAX_RETRIES) {
+ ret = 0;
+ goto retry;
+ }
}
chip->state = FL_READY;
+ xip_enable(map, chip, adr);
DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index e8d0164498b0..cf426956454c 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -63,6 +63,30 @@ do { \
#endif
+/*
+ * This fixup occurs immediately after reading the CFI structure and can affect
+ * the number of chips detected, unlike cfi_fixup, which occurs after an
+ * mtd_info structure has been created for the chip.
+ */
+struct cfi_early_fixup {
+ uint16_t mfr;
+ uint16_t id;
+ void (*fixup)(struct cfi_private *cfi);
+};
+
+static void cfi_early_fixup(struct cfi_private *cfi,
+ const struct cfi_early_fixup *fixups)
+{
+ const struct cfi_early_fixup *f;
+
+ for (f = fixups; f->fixup; f++) {
+ if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
+ ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
+ f->fixup(cfi);
+ }
+ }
+}
+
/* check for QRY.
in: interleave,type,mode
ret: table index, <0 for error
@@ -151,6 +175,22 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
return 1;
}
+static void fixup_s70gl02gs_chips(struct cfi_private *cfi)
+{
+ /*
+ * S70GL02GS flash reports a single 256 MiB chip, but is really made up
+ * of two 128 MiB chips with 1024 sectors each.
+ */
+ cfi->cfiq->DevSize = 27;
+ cfi->cfiq->EraseRegionInfo[0] = 0x20003ff;
+ pr_warn("Bad S70GL02GS CFI data; adjust to detect 2 chips\n");
+}
+
+static const struct cfi_early_fixup cfi_early_fixup_table[] = {
+ { CFI_MFR_AMD, 0x4801, fixup_s70gl02gs_chips },
+ { },
+};
+
static int __xipram cfi_chip_setup(struct map_info *map,
struct cfi_private *cfi)
{
@@ -235,6 +275,8 @@ static int __xipram cfi_chip_setup(struct map_info *map,
cfi_qry_mode_off(base, map, cfi);
xip_allowed(base, map);
+ cfi_early_fixup(cfi, cfi_early_fixup_table);
+
printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank. Manufacturer ID %#08x Chip ID %#08x\n",
map->name, cfi->interleave, cfi->device_type*8, base,
map->bankwidth*8, cfi->mfr, cfi->id);
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index fbd5affc0acf..3ea44cff9b75 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -190,7 +190,10 @@ static struct mtd_partition * newpart(char *s,
extra_mem = (unsigned char *)(parts + *num_parts);
}
- /* enter this partition (offset will be calculated later if it is zero at this point) */
+ /*
+ * enter this partition (offset will be calculated later if it is
+ * OFFSET_CONTINUOUS at this point)
+ */
parts[this_part].size = size;
parts[this_part].offset = offset;
parts[this_part].mask_flags = mask_flags;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index c594fe5eac08..802d8f159e90 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1470,8 +1470,7 @@ static struct docg3 *sysfs_dev2docg3(struct device *dev,
struct device_attribute *attr)
{
int floor;
- struct platform_device *pdev = to_platform_device(dev);
- struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+ struct mtd_info **docg3_floors = dev_get_drvdata(dev);
floor = attr->attr.name[1] - '0';
if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index aaaeaae01e1d..3a6f450d1093 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -140,7 +140,7 @@ static int dataflash_waitready(struct spi_device *spi)
if (status & (1 << 7)) /* RDY/nBSY */
return status;
- msleep(3);
+ usleep_range(3000, 4000);
}
}
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index aab4f68bd36f..2d598412972d 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -334,28 +334,37 @@ static int memcmpb(void *a, int c, int n)
static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
int len, int check_oob)
{
- u8 buf[SECTORSIZE + inftl->mbd.mtd->oobsize];
struct mtd_info *mtd = inftl->mbd.mtd;
size_t retlen;
- int i;
+ int i, ret;
+ u8 *buf;
+
+ buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL);
+ if (!buf)
+ return -1;
+ ret = -1;
for (i = 0; i < len; i += SECTORSIZE) {
if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
- return -1;
+ goto out;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
- return -1;
+ goto out;
if (check_oob) {
if(inftl_read_oob(mtd, address, mtd->oobsize,
&retlen, &buf[SECTORSIZE]) < 0)
- return -1;
+ goto out;
if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
- return -1;
+ goto out;
}
address += SECTORSIZE;
}
- return 0;
+ ret = 0;
+
+out:
+ kfree(buf);
+ return ret;
}
/*
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index dc6df9abea0b..c065d7995c0a 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -265,7 +265,6 @@ MODULE_DEVICE_TABLE(i2c, pismo_id);
static struct i2c_driver pismo_driver = {
.driver = {
.name = "pismo",
- .owner = THIS_MODULE,
},
.probe = pismo_probe,
.remove = pismo_remove,
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 64a1fcaafd9a..42395df06be9 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -210,6 +210,15 @@ static ssize_t mtd_oobsize_show(struct device *dev,
}
static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
+static ssize_t mtd_oobavail_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail);
+}
+static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL);
+
static ssize_t mtd_numeraseregions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -327,6 +336,7 @@ static struct attribute *mtd_attrs[] = {
&dev_attr_writesize.attr,
&dev_attr_subpagesize.attr,
&dev_attr_oobsize.attr,
+ &dev_attr_oobavail.attr,
&dev_attr_numeraseregions.attr,
&dev_attr_name.attr,
&dev_attr_ecc_strength.attr,
@@ -690,7 +700,6 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
const struct mtd_partition *parts,
int nr_parts)
{
- struct mtd_partitions parsed = { };
int ret;
mtd_set_dev_defaults(mtd);
@@ -702,13 +711,10 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
}
/* Prefer parsed partitions over driver-provided fallback */
- ret = parse_mtd_partitions(mtd, types, &parsed, parser_data);
- if (!ret && parsed.nr_parts) {
- parts = parsed.parts;
- nr_parts = parsed.nr_parts;
- }
-
- if (nr_parts)
+ ret = parse_mtd_partitions(mtd, types, parser_data);
+ if (ret > 0)
+ ret = 0;
+ else if (nr_parts)
ret = add_mtd_partitions(mtd, parts, nr_parts);
else if (!device_is_registered(&mtd->dev))
ret = add_mtd_device(mtd);
@@ -734,8 +740,6 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
}
out:
- /* Cleanup any parsed partitions */
- mtd_part_parser_cleanup(&parsed);
if (ret && device_is_registered(&mtd->dev))
del_mtd_device(mtd);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 37accfd0400e..9887bda317cd 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -15,7 +15,6 @@ int del_mtd_partitions(struct mtd_info *);
struct mtd_partitions;
int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
- struct mtd_partitions *pparts,
struct mtd_part_parser_data *data);
void mtd_part_parser_cleanup(struct mtd_partitions *parts);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 023516a63276..f8d3a015cdad 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -335,20 +335,7 @@ static inline void free_partition(struct mtd_part *p)
*/
static int mtd_parse_part(struct mtd_part *slave, const char *const *types)
{
- struct mtd_partitions parsed;
- int err;
-
- err = parse_mtd_partitions(&slave->mtd, types, &parsed, NULL);
- if (err)
- return err;
- else if (!parsed.nr_parts)
- return -ENOENT;
-
- err = add_mtd_partitions(&slave->mtd, parsed.parts, parsed.nr_parts);
-
- mtd_part_parser_cleanup(&parsed);
-
- return err;
+ return parse_mtd_partitions(&slave->mtd, types, NULL);
}
static struct mtd_part *allocate_partition(struct mtd_info *parent,
@@ -933,30 +920,27 @@ static int mtd_part_of_parse(struct mtd_info *master,
}
/**
- * parse_mtd_partitions - parse MTD partitions
+ * parse_mtd_partitions - parse and register MTD partitions
+ *
* @master: the master partition (describes whole MTD device)
* @types: names of partition parsers to try or %NULL
- * @pparts: info about partitions found is returned here
* @data: MTD partition parser-specific data
*
- * This function tries to find partition on MTD device @master. It uses MTD
- * partition parsers, specified in @types. However, if @types is %NULL, then
- * the default list of parsers is used. The default list contains only the
+ * This function tries to find & register partitions on MTD device @master. It
+ * uses MTD partition parsers, specified in @types. However, if @types is %NULL,
+ * then the default list of parsers is used. The default list contains only the
* "cmdlinepart" and "ofpart" parsers ATM.
* Note: If there are more then one parser in @types, the kernel only takes the
* partitions parsed out by the first parser.
*
* This function may return:
* o a negative error code in case of failure
- * o zero otherwise, and @pparts will describe the partitions, number of
- * partitions, and the parser which parsed them. Caller must release
- * resources with mtd_part_parser_cleanup() when finished with the returned
- * data.
+ * o number of found partitions otherwise
*/
int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
- struct mtd_partitions *pparts,
struct mtd_part_parser_data *data)
{
+ struct mtd_partitions pparts = { };
struct mtd_part_parser *parser;
int ret, err = 0;
@@ -970,7 +954,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
* handled in a separated function.
*/
if (!strcmp(*types, "ofpart")) {
- ret = mtd_part_of_parse(master, pparts);
+ ret = mtd_part_of_parse(master, &pparts);
} else {
pr_debug("%s: parsing partitions %s\n", master->name,
*types);
@@ -981,13 +965,17 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
parser ? parser->name : NULL);
if (!parser)
continue;
- ret = mtd_part_do_parse(parser, master, pparts, data);
+ ret = mtd_part_do_parse(parser, master, &pparts, data);
if (ret <= 0)
mtd_part_parser_put(parser);
}
/* Found partitions! */
- if (ret > 0)
- return 0;
+ if (ret > 0) {
+ err = add_mtd_partitions(master, pparts.parts,
+ pparts.nr_parts);
+ mtd_part_parser_cleanup(&pparts);
+ return err ? err : pparts.nr_parts;
+ }
/*
* Stash the first error we see; only report it if no parser
* succeeds
diff --git a/drivers/mtd/nand/onenand/samsung.c b/drivers/mtd/nand/onenand/samsung.c
index 2e9d076e445a..4cce4c0311ca 100644
--- a/drivers/mtd/nand/onenand/samsung.c
+++ b/drivers/mtd/nand/onenand/samsung.c
@@ -958,8 +958,7 @@ static int s3c_onenand_remove(struct platform_device *pdev)
static int s3c_pm_ops_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
struct onenand_chip *this = mtd->priv;
this->wait(mtd, FL_PM_SUSPENDED);
@@ -968,8 +967,7 @@ static int s3c_pm_ops_suspend(struct device *dev)
static int s3c_pm_ops_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
struct onenand_chip *this = mtd->priv;
this->unlock_all(mtd);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 19a2b283fbbe..6871ff0fd300 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -46,7 +46,7 @@ config MTD_NAND_DENALI
config MTD_NAND_DENALI_PCI
tristate "Support Denali NAND controller on Intel Moorestown"
select MTD_NAND_DENALI
- depends on HAS_DMA && PCI
+ depends on PCI
help
Enable the driver for NAND flash on Intel Moorestown, using the
Denali NAND controller core.
@@ -152,7 +152,6 @@ config MTD_NAND_S3C2410_CLKSTOP
config MTD_NAND_TANGO
tristate "NAND Flash support for Tango chips"
depends on ARCH_TANGO || COMPILE_TEST
- depends on HAS_DMA
help
Enables the NAND Flash controller on Tango chips.
@@ -285,7 +284,7 @@ config MTD_NAND_MARVELL
tristate "NAND controller support on Marvell boards"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
COMPILE_TEST
- depends on HAS_IOMEM && HAS_DMA
+ depends on HAS_IOMEM
help
This enables the NAND flash controller driver for Marvell boards,
including:
@@ -447,7 +446,6 @@ config MTD_NAND_SH_FLCTL
tristate "Support for NAND on Renesas SuperH FLCTL"
depends on SUPERH || COMPILE_TEST
depends on HAS_IOMEM
- depends on HAS_DMA
help
Several Renesas SuperH CPU has FLCTL. This option enables support
for NAND Flash using FLCTL.
@@ -515,7 +513,6 @@ config MTD_NAND_SUNXI
config MTD_NAND_HISI504
tristate "Support for NAND controller on Hisilicon SoC Hip04"
depends on ARCH_HISI || COMPILE_TEST
- depends on HAS_DMA
help
Enables support for NAND controller on Hisilicon SoC Hip04.
@@ -529,7 +526,6 @@ config MTD_NAND_QCOM
config MTD_NAND_MTK
tristate "Support for NAND controller on MTK SoCs"
depends on ARCH_MEDIATEK || COMPILE_TEST
- depends on HAS_DMA
help
Enables support for NAND controller on MTK SoCs.
This controller is found on mt27xx, mt81xx, mt65xx SoCs.
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 0f09518d980f..7255a0d94374 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
@@ -55,7 +54,6 @@ struct davinci_nand_info {
struct nand_chip chip;
struct device *dev;
- struct clk *clk;
bool is_readmode;
@@ -703,22 +701,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
/* Use board-specific ECC config */
info->chip.ecc.mode = pdata->ecc_mode;
- ret = -EINVAL;
-
- info->clk = devm_clk_get(&pdev->dev, "aemif");
- if (IS_ERR(info->clk)) {
- ret = PTR_ERR(info->clk);
- dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(info->clk);
- if (ret < 0) {
- dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
- ret);
- goto err_clk_enable;
- }
-
spin_lock_irq(&davinci_nand_lock);
/* put CSxNAND into NAND mode */
@@ -732,7 +714,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
- goto err;
+ return ret;
}
switch (info->chip.ecc.mode) {
@@ -838,9 +820,6 @@ err_cleanup_nand:
nand_cleanup(&info->chip);
err:
- clk_disable_unprepare(info->clk);
-
-err_clk_enable:
spin_lock_irq(&davinci_nand_lock);
if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
@@ -859,8 +838,6 @@ static int nand_davinci_remove(struct platform_device *pdev)
nand_release(nand_to_mtd(&info->chip));
- clk_disable_unprepare(info->clk);
-
return 0;
}
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 2b7b2b982b77..8d10061abb4b 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1480,12 +1480,12 @@ static int __init doc_probe(unsigned long physadr)
WriteDOC(tmp, virtadr, Mplus_DOCControl);
WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
- mdelay(1);
+ usleep_range(1000, 2000);
/* Enable the Millennium Plus ASIC */
tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
WriteDOC(tmp, virtadr, Mplus_DOCControl);
WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
- mdelay(1);
+ usleep_range(1000, 2000);
ChipID = ReadDOC(virtadr, ChipID);
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index d28df991c73c..51f0b340bc0d 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -813,8 +813,6 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
struct mtd_info *mtd = nand_to_mtd(&priv->chip);
- nand_release(mtd);
-
kfree(mtd->name);
if (priv->vbase)
@@ -926,15 +924,20 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
- mtd_device_parse_register(mtd, part_probe_types, NULL,
- NULL, 0);
+ ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+ if (ret)
+ goto cleanup_nand;
pr_info("eLBC NAND device at 0x%llx, bank %d\n",
(unsigned long long)res.start, priv->bank);
+
return 0;
+cleanup_nand:
+ nand_cleanup(&priv->chip);
err:
fsl_elbc_chip_remove(priv);
+
return ret;
}
@@ -942,7 +945,9 @@ static int fsl_elbc_nand_remove(struct platform_device *pdev)
{
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+ struct mtd_info *mtd = nand_to_mtd(&priv->chip);
+ nand_release(mtd);
fsl_elbc_chip_remove(priv);
mutex_lock(&fsl_elbc_nand_mutex);
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 61aae0224078..382b67e97174 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -342,9 +342,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_READID:
case NAND_CMD_PARAM: {
+ /*
+ * For READID, read 8 bytes that are currently used.
+ * For PARAM, read all 3 copies of 256-bytes pages.
+ */
+ int len = 8;
int timing = IFC_FIR_OP_RB;
- if (command == NAND_CMD_PARAM)
+ if (command == NAND_CMD_PARAM) {
timing = IFC_FIR_OP_RBCD;
+ len = 256 * 3;
+ }
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
@@ -354,12 +361,8 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
&ifc->ifc_nand.nand_fcr0);
ifc_out32(column, &ifc->ifc_nand.row3);
- /*
- * although currently it's 8 bytes for READID, we always read
- * the maximum 256 bytes(for PARAM)
- */
- ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
- ifc_nand_ctrl->read_bytes = 256;
+ ifc_out32(len, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = len;
set_addr(mtd, 0, 0, 0);
fsl_ifc_run_command(mtd);
@@ -924,8 +927,6 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
{
struct mtd_info *mtd = nand_to_mtd(&priv->chip);
- nand_release(mtd);
-
kfree(mtd->name);
if (priv->vbase)
@@ -1059,21 +1060,29 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
- mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+ ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+ if (ret)
+ goto cleanup_nand;
dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
(unsigned long long)res.start, priv->bank);
+
return 0;
+cleanup_nand:
+ nand_cleanup(&priv->chip);
err:
fsl_ifc_chip_remove(priv);
+
return ret;
}
static int fsl_ifc_nand_remove(struct platform_device *dev)
{
struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+ struct mtd_info *mtd = nand_to_mtd(&priv->chip);
+ nand_release(mtd);
fsl_ifc_chip_remove(priv);
mutex_lock(&fsl_ifc_nand_mutex);
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 28c48dcc514e..f4a5a317d4ae 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -1022,12 +1022,12 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
host->read_dma_chan = dma_request_channel(mask, filter, NULL);
if (!host->read_dma_chan) {
dev_err(&pdev->dev, "Unable to get read dma channel\n");
- goto err_req_read_chnl;
+ goto disable_clk;
}
host->write_dma_chan = dma_request_channel(mask, filter, NULL);
if (!host->write_dma_chan) {
dev_err(&pdev->dev, "Unable to get write dma channel\n");
- goto err_req_write_chnl;
+ goto release_dma_read_chan;
}
}
@@ -1050,7 +1050,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
ret = nand_scan_ident(mtd, 1, NULL);
if (ret) {
dev_err(&pdev->dev, "No NAND Device found!\n");
- goto err_scan_ident;
+ goto release_dma_write_chan;
}
if (AMBA_REV_BITS(host->pid) >= 8) {
@@ -1065,7 +1065,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
mtd->oobsize);
ret = -EINVAL;
- goto err_probe;
+ goto release_dma_write_chan;
}
mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
@@ -1090,7 +1090,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported ECC mode!\n");
- goto err_probe;
+ goto release_dma_write_chan;
}
/*
@@ -1110,7 +1110,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
"No oob scheme defined for oobsize %d\n",
mtd->oobsize);
ret = -EINVAL;
- goto err_probe;
+ goto release_dma_write_chan;
}
}
}
@@ -1118,26 +1118,29 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
/* Second stage of scan to fill MTD data-structures */
ret = nand_scan_tail(mtd);
if (ret)
- goto err_probe;
+ goto release_dma_write_chan;
mtd->name = "nand";
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
- goto err_probe;
+ goto cleanup_nand;
platform_set_drvdata(pdev, host);
dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
+
return 0;
-err_probe:
-err_scan_ident:
+cleanup_nand:
+ nand_cleanup(nand);
+release_dma_write_chan:
if (host->mode == USE_DMA_ACCESS)
dma_release_channel(host->write_dma_chan);
-err_req_write_chnl:
+release_dma_read_chan:
if (host->mode == USE_DMA_ACCESS)
dma_release_channel(host->read_dma_chan);
-err_req_read_chnl:
+disable_clk:
clk_disable_unprepare(host->clk);
+
return ret;
}
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
index e94556705dc7..83697b8df871 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
@@ -258,8 +258,9 @@ int bch_set_geometry(struct gpmi_nand_data *this)
unsigned int gf_len;
int ret;
- if (common_nfc_set_geometry(this))
- return !0;
+ ret = common_nfc_set_geometry(this);
+ if (ret)
+ return ret;
block_count = bch_geo->ecc_chunk_count - 1;
block_size = bch_geo->ecc_chunk_size;
@@ -544,19 +545,13 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
return reg & mask;
}
-static inline void set_dma_type(struct gpmi_nand_data *this,
- enum dma_ops_type type)
-{
- this->last_dma_type = this->dma_type;
- this->dma_type = type;
-}
-
int gpmi_send_command(struct gpmi_nand_data *this)
{
struct dma_chan *channel = get_dma_chan(this);
struct dma_async_tx_descriptor *desc;
struct scatterlist *sgl;
int chip = this->current_chip;
+ int ret;
u32 pio[3];
/* [1] send out the PIO words */
@@ -586,15 +581,19 @@ int gpmi_send_command(struct gpmi_nand_data *this)
return -EINVAL;
/* [3] submit the DMA */
- set_dma_type(this, DMA_FOR_COMMAND);
- return start_dma_without_bch_irq(this, desc);
+ ret = start_dma_without_bch_irq(this, desc);
+
+ dma_unmap_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
+
+ return ret;
}
-int gpmi_send_data(struct gpmi_nand_data *this)
+int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len)
{
struct dma_async_tx_descriptor *desc;
struct dma_chan *channel = get_dma_chan(this);
int chip = this->current_chip;
+ int ret;
uint32_t command_mode;
uint32_t address;
u32 pio[2];
@@ -608,7 +607,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
| BF_GPMI_CTRL0_CS(chip, this)
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
| BF_GPMI_CTRL0_ADDRESS(address)
- | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+ | BF_GPMI_CTRL0_XFER_COUNT(len);
pio[1] = 0;
desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
@@ -616,7 +615,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
return -EINVAL;
/* [2] send DMA request */
- prepare_data_dma(this, DMA_TO_DEVICE);
+ prepare_data_dma(this, buf, len, DMA_TO_DEVICE);
desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -624,16 +623,21 @@ int gpmi_send_data(struct gpmi_nand_data *this)
return -EINVAL;
/* [3] submit the DMA */
- set_dma_type(this, DMA_FOR_WRITE_DATA);
- return start_dma_without_bch_irq(this, desc);
+ ret = start_dma_without_bch_irq(this, desc);
+
+ dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
+
+ return ret;
}
-int gpmi_read_data(struct gpmi_nand_data *this)
+int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len)
{
struct dma_async_tx_descriptor *desc;
struct dma_chan *channel = get_dma_chan(this);
int chip = this->current_chip;
+ int ret;
u32 pio[2];
+ bool direct;
/* [1] : send PIO */
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
@@ -641,7 +645,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
| BF_GPMI_CTRL0_CS(chip, this)
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
- | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+ | BF_GPMI_CTRL0_XFER_COUNT(len);
pio[1] = 0;
desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio,
@@ -650,7 +654,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
return -EINVAL;
/* [2] : send DMA request */
- prepare_data_dma(this, DMA_FROM_DEVICE);
+ direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE);
desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -658,8 +662,14 @@ int gpmi_read_data(struct gpmi_nand_data *this)
return -EINVAL;
/* [3] : submit the DMA */
- set_dma_type(this, DMA_FOR_READ_DATA);
- return start_dma_without_bch_irq(this, desc);
+
+ ret = start_dma_without_bch_irq(this, desc);
+
+ dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
+ if (!direct)
+ memcpy(buf, this->data_buffer_dma, len);
+
+ return ret;
}
int gpmi_send_page(struct gpmi_nand_data *this,
@@ -703,7 +713,6 @@ int gpmi_send_page(struct gpmi_nand_data *this,
if (!desc)
return -EINVAL;
- set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
return start_dma_with_bch_irq(this, desc);
}
@@ -785,7 +794,6 @@ int gpmi_read_page(struct gpmi_nand_data *this,
return -EINVAL;
/* [4] submit the DMA */
- set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
return start_dma_with_bch_irq(this, desc);
}
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index c2597c8107a0..f6aa358a3452 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -198,17 +198,16 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
*
* We may have available oob space in this case.
*/
-static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
+ unsigned int ecc_strength,
+ unsigned int ecc_step)
{
struct bch_geometry *geo = &this->bch_geometry;
struct nand_chip *chip = &this->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int block_mark_bit_offset;
- if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
- return -EINVAL;
-
- switch (chip->ecc_step_ds) {
+ switch (ecc_step) {
case SZ_512:
geo->gf_len = 13;
break;
@@ -221,8 +220,8 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
chip->ecc_strength_ds, chip->ecc_step_ds);
return -EINVAL;
}
- geo->ecc_chunk_size = chip->ecc_step_ds;
- geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
+ geo->ecc_chunk_size = ecc_step;
+ geo->ecc_strength = round_up(ecc_strength, 2);
if (!gpmi_check_ecc(this))
return -EINVAL;
@@ -230,7 +229,7 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
if (geo->ecc_chunk_size < mtd->oobsize) {
dev_err(this->dev,
"unsupported nand chip. ecc size: %d, oob size : %d\n",
- chip->ecc_step_ds, mtd->oobsize);
+ ecc_step, mtd->oobsize);
return -EINVAL;
}
@@ -423,9 +422,20 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
int common_nfc_set_geometry(struct gpmi_nand_data *this)
{
+ struct nand_chip *chip = &this->nand;
+
+ if (chip->ecc.strength > 0 && chip->ecc.size > 0)
+ return set_geometry_by_ecc_info(this, chip->ecc.strength,
+ chip->ecc.size);
+
if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
- || legacy_set_geometry(this))
- return set_geometry_by_ecc_info(this);
+ || legacy_set_geometry(this)) {
+ if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
+ return -EINVAL;
+
+ return set_geometry_by_ecc_info(this, chip->ecc_strength_ds,
+ chip->ecc_step_ds);
+ }
return 0;
}
@@ -437,33 +447,32 @@ struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
}
/* Can we use the upper's buffer directly for DMA? */
-void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
+bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, int len,
+ enum dma_data_direction dr)
{
struct scatterlist *sgl = &this->data_sgl;
int ret;
/* first try to map the upper buffer directly */
- if (virt_addr_valid(this->upper_buf) &&
- !object_is_on_stack(this->upper_buf)) {
- sg_init_one(sgl, this->upper_buf, this->upper_len);
+ if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
+ sg_init_one(sgl, buf, len);
ret = dma_map_sg(this->dev, sgl, 1, dr);
if (ret == 0)
goto map_fail;
- this->direct_dma_map_ok = true;
- return;
+ return true;
}
map_fail:
/* We have to use our own DMA buffer. */
- sg_init_one(sgl, this->data_buffer_dma, this->upper_len);
+ sg_init_one(sgl, this->data_buffer_dma, len);
if (dr == DMA_TO_DEVICE)
- memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len);
+ memcpy(this->data_buffer_dma, buf, len);
dma_map_sg(this->dev, sgl, 1, dr);
- this->direct_dma_map_ok = false;
+ return false;
}
/* This will be called after the DMA operation is finished. */
@@ -472,31 +481,6 @@ static void dma_irq_callback(void *param)
struct gpmi_nand_data *this = param;
struct completion *dma_c = &this->dma_done;
- switch (this->dma_type) {
- case DMA_FOR_COMMAND:
- dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
- break;
-
- case DMA_FOR_READ_DATA:
- dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
- if (this->direct_dma_map_ok == false)
- memcpy(this->upper_buf, this->data_buffer_dma,
- this->upper_len);
- break;
-
- case DMA_FOR_WRITE_DATA:
- dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
- break;
-
- case DMA_FOR_READ_ECC_PAGE:
- case DMA_FOR_WRITE_ECC_PAGE:
- /* We have to wait the BCH interrupt to finish. */
- break;
-
- default:
- dev_err(this->dev, "in wrong DMA operation.\n");
- }
-
complete(dma_c);
}
@@ -516,8 +500,7 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
/* Wait for the interrupt from the DMA block. */
timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
if (!timeout) {
- dev_err(this->dev, "DMA timeout, last DMA :%d\n",
- this->last_dma_type);
+ dev_err(this->dev, "DMA timeout, last DMA\n");
gpmi_dump_info(this);
return -ETIMEDOUT;
}
@@ -546,8 +529,7 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
/* Wait for the interrupt from the BCH block. */
timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
if (!timeout) {
- dev_err(this->dev, "BCH timeout, last DMA :%d\n",
- this->last_dma_type);
+ dev_err(this->dev, "BCH timeout\n");
gpmi_dump_info(this);
return -ETIMEDOUT;
}
@@ -695,56 +677,6 @@ static void release_resources(struct gpmi_nand_data *this)
release_dma_channels(this);
}
-static int read_page_prepare(struct gpmi_nand_data *this,
- void *destination, unsigned length,
- void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
- void **use_virt, dma_addr_t *use_phys)
-{
- struct device *dev = this->dev;
-
- if (virt_addr_valid(destination)) {
- dma_addr_t dest_phys;
-
- dest_phys = dma_map_single(dev, destination,
- length, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, dest_phys)) {
- if (alt_size < length) {
- dev_err(dev, "Alternate buffer is too small\n");
- return -ENOMEM;
- }
- goto map_failed;
- }
- *use_virt = destination;
- *use_phys = dest_phys;
- this->direct_dma_map_ok = true;
- return 0;
- }
-
-map_failed:
- *use_virt = alt_virt;
- *use_phys = alt_phys;
- this->direct_dma_map_ok = false;
- return 0;
-}
-
-static inline void read_page_end(struct gpmi_nand_data *this,
- void *destination, unsigned length,
- void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
- void *used_virt, dma_addr_t used_phys)
-{
- if (this->direct_dma_map_ok)
- dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
-}
-
-static inline void read_page_swap_end(struct gpmi_nand_data *this,
- void *destination, unsigned length,
- void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
- void *used_virt, dma_addr_t used_phys)
-{
- if (!this->direct_dma_map_ok)
- memcpy(destination, alt_virt, length);
-}
-
static int send_page_prepare(struct gpmi_nand_data *this,
const void *source, unsigned length,
void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
@@ -946,10 +878,8 @@ static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
struct gpmi_nand_data *this = nand_get_controller_data(chip);
dev_dbg(this->dev, "len is %d\n", len);
- this->upper_buf = buf;
- this->upper_len = len;
- gpmi_read_data(this);
+ gpmi_read_data(this, buf, len);
}
static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
@@ -958,10 +888,8 @@ static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
struct gpmi_nand_data *this = nand_get_controller_data(chip);
dev_dbg(this->dev, "len is %d\n", len);
- this->upper_buf = (uint8_t *)buf;
- this->upper_len = len;
- gpmi_send_data(this);
+ gpmi_send_data(this, buf, len);
}
static uint8_t gpmi_read_byte(struct mtd_info *mtd)
@@ -1031,44 +959,46 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
struct mtd_info *mtd = nand_to_mtd(chip);
void *payload_virt;
dma_addr_t payload_phys;
- void *auxiliary_virt;
- dma_addr_t auxiliary_phys;
unsigned int i;
unsigned char *status;
unsigned int max_bitflips = 0;
int ret;
+ bool direct = false;
dev_dbg(this->dev, "page number is : %d\n", page);
- ret = read_page_prepare(this, buf, nfc_geo->payload_size,
- this->payload_virt, this->payload_phys,
- nfc_geo->payload_size,
- &payload_virt, &payload_phys);
- if (ret) {
- dev_err(this->dev, "Inadequate DMA buffer\n");
- ret = -ENOMEM;
- return ret;
+
+ payload_virt = this->payload_virt;
+ payload_phys = this->payload_phys;
+
+ if (virt_addr_valid(buf)) {
+ dma_addr_t dest_phys;
+
+ dest_phys = dma_map_single(this->dev, buf, nfc_geo->payload_size,
+ DMA_FROM_DEVICE);
+ if (!dma_mapping_error(this->dev, dest_phys)) {
+ payload_virt = buf;
+ payload_phys = dest_phys;
+ direct = true;
+ }
}
- auxiliary_virt = this->auxiliary_virt;
- auxiliary_phys = this->auxiliary_phys;
/* go! */
- ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
- read_page_end(this, buf, nfc_geo->payload_size,
- this->payload_virt, this->payload_phys,
- nfc_geo->payload_size,
- payload_virt, payload_phys);
+ ret = gpmi_read_page(this, payload_phys, this->auxiliary_phys);
+
+ if (direct)
+ dma_unmap_single(this->dev, payload_phys, nfc_geo->payload_size,
+ DMA_FROM_DEVICE);
+
if (ret) {
dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
return ret;
}
/* Loop over status bytes, accumulating ECC status. */
- status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+ status = this->auxiliary_virt + nfc_geo->auxiliary_status_offset;
- read_page_swap_end(this, buf, nfc_geo->payload_size,
- this->payload_virt, this->payload_phys,
- nfc_geo->payload_size,
- payload_virt, payload_phys);
+ if (!direct)
+ memcpy(buf, this->payload_virt, nfc_geo->payload_size);
for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
@@ -1123,7 +1053,7 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
buf + i * nfc_geo->ecc_chunk_size,
nfc_geo->ecc_chunk_size,
eccbuf, eccbytes,
- auxiliary_virt,
+ this->auxiliary_virt,
nfc_geo->metadata_size,
nfc_geo->ecc_strength);
} else {
@@ -1151,7 +1081,7 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
}
/* handle the block mark swapping */
- block_mark_swapping(this, buf, auxiliary_virt);
+ block_mark_swapping(this, buf, this->auxiliary_virt);
if (oob_required) {
/*
@@ -1165,7 +1095,7 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
* the block mark.
*/
memset(chip->oob_poi, ~0, mtd->oobsize);
- chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+ chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
}
return max_bitflips;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
index 62fde59b995f..6aa10d6962d6 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -77,15 +77,6 @@ struct boot_rom_geometry {
unsigned int search_area_stride_exponent;
};
-/* DMA operations types */
-enum dma_ops_type {
- DMA_FOR_COMMAND = 1,
- DMA_FOR_READ_DATA,
- DMA_FOR_WRITE_DATA,
- DMA_FOR_READ_ECC_PAGE,
- DMA_FOR_WRITE_ECC_PAGE
-};
-
enum gpmi_type {
IS_MX23,
IS_MX28,
@@ -150,13 +141,6 @@ struct gpmi_nand_data {
int current_chip;
unsigned int command_length;
- /* passed from upper layer */
- uint8_t *upper_buf;
- int upper_len;
-
- /* for DMA operations */
- bool direct_dma_map_ok;
-
struct scatterlist cmd_sgl;
char *cmd_buffer;
@@ -178,8 +162,6 @@ struct gpmi_nand_data {
/* DMA channels */
#define DMA_CHANS 8
struct dma_chan *dma_chans[DMA_CHANS];
- enum dma_ops_type last_dma_type;
- enum dma_ops_type dma_type;
struct completion dma_done;
/* private */
@@ -189,7 +171,7 @@ struct gpmi_nand_data {
/* Common Services */
int common_nfc_set_geometry(struct gpmi_nand_data *);
struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
-void prepare_data_dma(struct gpmi_nand_data *,
+bool prepare_data_dma(struct gpmi_nand_data *, const void *buf, int len,
enum dma_data_direction dr);
int start_dma_without_bch_irq(struct gpmi_nand_data *,
struct dma_async_tx_descriptor *);
@@ -208,8 +190,9 @@ int gpmi_disable_clk(struct gpmi_nand_data *this);
int gpmi_setup_data_interface(struct mtd_info *mtd, int chipnr,
const struct nand_data_interface *conf);
void gpmi_nfc_apply_timings(struct gpmi_nand_data *this);
-int gpmi_read_data(struct gpmi_nand_data *);
-int gpmi_send_data(struct gpmi_nand_data *);
+int gpmi_read_data(struct gpmi_nand_data *, void *buf, int len);
+int gpmi_send_data(struct gpmi_nand_data *, const void *buf, int len);
+
int gpmi_send_page(struct gpmi_nand_data *,
dma_addr_t payload, dma_addr_t auxiliary);
int gpmi_read_page(struct gpmi_nand_data *,
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index 27558a67fa41..a1e009c8e556 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -731,23 +731,19 @@ static int hisi_nfc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "no IRQ resource defined\n");
- ret = -ENXIO;
- goto err_res;
+ return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->iobase = devm_ioremap_resource(dev, res);
- if (IS_ERR(host->iobase)) {
- ret = PTR_ERR(host->iobase);
- goto err_res;
- }
+ if (IS_ERR(host->iobase))
+ return PTR_ERR(host->iobase);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
host->mmio = devm_ioremap_resource(dev, res);
if (IS_ERR(host->mmio)) {
- ret = PTR_ERR(host->mmio);
dev_err(dev, "devm_ioremap_resource[1] fail\n");
- goto err_res;
+ return PTR_ERR(host->mmio);
}
mtd->name = "hisi_nand";
@@ -770,19 +766,17 @@ static int hisi_nfc_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
if (ret) {
dev_err(dev, "failed to request IRQ\n");
- goto err_res;
+ return ret;
}
ret = nand_scan_ident(mtd, max_chips, NULL);
if (ret)
- goto err_res;
+ return ret;
host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
&host->dma_buffer, GFP_KERNEL);
- if (!host->buffer) {
- ret = -ENOMEM;
- goto err_res;
- }
+ if (!host->buffer)
+ return -ENOMEM;
host->dma_oob = host->dma_buffer + mtd->writesize;
memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
@@ -798,8 +792,7 @@ static int hisi_nfc_probe(struct platform_device *pdev)
*/
default:
dev_err(dev, "NON-2KB page size nand flash\n");
- ret = -EINVAL;
- goto err_res;
+ return -EINVAL;
}
hinfc_write(host, flag, HINFC504_CON);
@@ -809,21 +802,17 @@ static int hisi_nfc_probe(struct platform_device *pdev)
ret = nand_scan_tail(mtd);
if (ret) {
dev_err(dev, "nand_scan_tail failed: %d\n", ret);
- goto err_res;
+ return ret;
}
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "Err MTD partition=%d\n", ret);
- goto err_mtd;
+ nand_cleanup(chip);
+ return ret;
}
return 0;
-
-err_mtd:
- nand_release(mtd);
-err_res:
- return ret;
}
static int hisi_nfc_remove(struct platform_device *pdev)
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index e357948a7505..052d123a8304 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -673,7 +673,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
host->io_base = devm_ioremap_resource(&pdev->dev, rc);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
-
+
host->io_base_phy = rc->start;
nand_chip = &host->nand_chip;
@@ -706,11 +706,11 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "Clock initialization failure\n");
res = -ENOENT;
- goto err_exit1;
+ goto free_gpio;
}
res = clk_prepare_enable(host->clk);
if (res)
- goto err_put_clk;
+ goto put_clk;
nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
nand_chip->dev_ready = lpc32xx_nand_device_ready;
@@ -744,7 +744,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
res = lpc32xx_dma_setup(host);
if (res) {
res = -EIO;
- goto err_exit2;
+ goto unprepare_clk;
}
}
@@ -754,18 +754,18 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
*/
res = nand_scan_ident(mtd, 1, NULL);
if (res)
- goto err_exit3;
+ goto release_dma_chan;
host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
if (!host->dma_buf) {
res = -ENOMEM;
- goto err_exit3;
+ goto release_dma_chan;
}
host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
if (!host->dummy_buf) {
res = -ENOMEM;
- goto err_exit3;
+ goto release_dma_chan;
}
nand_chip->ecc.mode = NAND_ECC_HW;
@@ -783,14 +783,14 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
if (host->irq < 0) {
dev_err(&pdev->dev, "failed to get platform irq\n");
res = -EINVAL;
- goto err_exit3;
+ goto release_dma_chan;
}
if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
res = -ENXIO;
- goto err_exit3;
+ goto release_dma_chan;
}
/*
@@ -799,27 +799,29 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
*/
res = nand_scan_tail(mtd);
if (res)
- goto err_exit4;
+ goto free_irq;
mtd->name = DRV_NAME;
res = mtd_device_register(mtd, host->ncfg->parts,
host->ncfg->num_parts);
- if (!res)
- return res;
+ if (res)
+ goto cleanup_nand;
- nand_release(mtd);
+ return 0;
-err_exit4:
+cleanup_nand:
+ nand_cleanup(nand_chip);
+free_irq:
free_irq(host->irq, host);
-err_exit3:
+release_dma_chan:
if (use_dma)
dma_release_channel(host->dma_chan);
-err_exit2:
+unprepare_clk:
clk_disable_unprepare(host->clk);
-err_put_clk:
+put_clk:
clk_put(host->clk);
-err_exit1:
+free_gpio:
lpc32xx_wp_enable(host);
gpio_free(host->ncfg->wp_gpio);
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 5f7cc6da0a7f..42820aa1abab 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -831,11 +831,11 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "Clock failure\n");
res = -ENOENT;
- goto err_exit1;
+ goto enable_wp;
}
res = clk_prepare_enable(host->clk);
if (res)
- goto err_exit1;
+ goto enable_wp;
/* Set NAND IO addresses and command/ready functions */
chip->IO_ADDR_R = SLC_DATA(host->io_base);
@@ -874,19 +874,19 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
GFP_KERNEL);
if (host->data_buf == NULL) {
res = -ENOMEM;
- goto err_exit2;
+ goto unprepare_clk;
}
res = lpc32xx_nand_dma_setup(host);
if (res) {
res = -EIO;
- goto err_exit2;
+ goto unprepare_clk;
}
/* Find NAND device */
res = nand_scan_ident(mtd, 1, NULL);
if (res)
- goto err_exit3;
+ goto release_dma;
/* OOB and ECC CPU and DMA work areas */
host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
@@ -920,21 +920,23 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
*/
res = nand_scan_tail(mtd);
if (res)
- goto err_exit3;
+ goto release_dma;
mtd->name = "nxp_lpc3220_slc";
res = mtd_device_register(mtd, host->ncfg->parts,
host->ncfg->num_parts);
- if (!res)
- return res;
+ if (res)
+ goto cleanup_nand;
- nand_release(mtd);
+ return 0;
-err_exit3:
+cleanup_nand:
+ nand_cleanup(chip);
+release_dma:
dma_release_channel(host->dma_chan);
-err_exit2:
+unprepare_clk:
clk_disable_unprepare(host->clk);
-err_exit1:
+enable_wp:
lpc32xx_wp_enable(host);
return res;
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index 40d86a861a70..6432bd70c3b3 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -500,7 +500,6 @@ static int mtk_ecc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mtk_ecc *ecc;
struct resource *res;
- const struct of_device_id *of_ecc_id = NULL;
u32 max_eccdata_size;
int irq, ret;
@@ -508,11 +507,7 @@ static int mtk_ecc_probe(struct platform_device *pdev)
if (!ecc)
return -ENOMEM;
- of_ecc_id = of_match_device(mtk_ecc_dt_match, &pdev->dev);
- if (!of_ecc_id)
- return -ENODEV;
-
- ecc->caps = of_ecc_id->data;
+ ecc->caps = of_device_get_match_data(dev);
max_eccdata_size = ecc->caps->num_ecc_strength - 1;
max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 6977da3a26aa..75c845adb050 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1434,7 +1434,6 @@ static int mtk_nfc_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct mtk_nfc *nfc;
struct resource *res;
- const struct of_device_id *of_nfc_id = NULL;
int ret, irq;
nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
@@ -1452,6 +1451,7 @@ static int mtk_nfc_probe(struct platform_device *pdev)
else if (!nfc->ecc)
return -ENODEV;
+ nfc->caps = of_device_get_match_data(dev);
nfc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1498,14 +1498,6 @@ static int mtk_nfc_probe(struct platform_device *pdev)
goto clk_disable;
}
- of_nfc_id = of_match_device(mtk_nfc_id_table, &pdev->dev);
- if (!of_nfc_id) {
- ret = -ENODEV;
- goto clk_disable;
- }
-
- nfc->caps = of_nfc_id->data;
-
platform_set_drvdata(pdev, nfc);
ret = mtk_nfc_nand_chips_init(dev, nfc);
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index f28c3a555861..10c4f9919850 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -2174,7 +2174,6 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
struct mtd_info *mtd = nand_to_mtd(chip);
const u8 *params = data;
int i, ret;
- u8 status;
if (chip->exec_op) {
const struct nand_sdr_timings *sdr =
@@ -2188,26 +2187,18 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
};
struct nand_operation op = NAND_OPERATION(instrs);
- ret = nand_exec_op(chip, &op);
- if (ret)
- return ret;
-
- ret = nand_status_op(chip, &status);
- if (ret)
- return ret;
- } else {
- chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- chip->write_byte(mtd, params[i]);
+ return nand_exec_op(chip, &op);
+ }
- ret = chip->waitfunc(mtd, chip);
- if (ret < 0)
- return ret;
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, params[i]);
- status = ret;
- }
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
- if (status & NAND_STATUS_FAIL)
+ if (ret & NAND_STATUS_FAIL)
return -EIO;
return 0;
@@ -5092,6 +5083,37 @@ ext_out:
}
/*
+ * Recover data with bit-wise majority
+ */
+static void nand_bit_wise_majority(const void **srcbufs,
+ unsigned int nsrcbufs,
+ void *dstbuf,
+ unsigned int bufsize)
+{
+ int i, j, k;
+
+ for (i = 0; i < bufsize; i++) {
+ u8 val = 0;
+
+ for (j = 0; j < 8; j++) {
+ unsigned int cnt = 0;
+
+ for (k = 0; k < nsrcbufs; k++) {
+ const u8 *srcbuf = srcbufs[k];
+
+ if (srcbuf[i] & BIT(j))
+ cnt++;
+ }
+
+ if (cnt > nsrcbufs / 2)
+ val |= BIT(j);
+ }
+
+ ((u8 *)dstbuf)[i] = val;
+ }
+}
+
+/*
* Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
*/
static int nand_flash_detect_onfi(struct nand_chip *chip)
@@ -5107,7 +5129,7 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
return 0;
/* ONFI chip: allocate a buffer to hold its parameter page */
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc((sizeof(*p) * 3), GFP_KERNEL);
if (!p)
return -ENOMEM;
@@ -5118,21 +5140,32 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
}
for (i = 0; i < 3; i++) {
- ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ ret = nand_read_data_op(chip, &p[i], sizeof(*p), true);
if (ret) {
ret = 0;
goto free_onfi_param_page;
}
- if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
+ if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) ==
le16_to_cpu(p->crc)) {
+ if (i)
+ memcpy(p, &p[i], sizeof(*p));
break;
}
}
if (i == 3) {
- pr_err("Could not find valid ONFI parameter page; aborting\n");
- goto free_onfi_param_page;
+ const void *srcbufs[3] = {p, p + 1, p + 2};
+
+ pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
+ nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p,
+ sizeof(*p));
+
+ if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) !=
+ le16_to_cpu(p->crc)) {
+ pr_err("ONFI parameter recovery failed, aborting\n");
+ goto free_onfi_param_page;
+ }
}
/* Check version */
@@ -6635,24 +6668,26 @@ EXPORT_SYMBOL(nand_scan_tail);
#endif
/**
- * nand_scan - [NAND Interface] Scan for the NAND device
+ * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
* @maxchips: number of chips to scan for
+ * @ids: optional flash IDs table
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
* appropriate values.
*/
-int nand_scan(struct mtd_info *mtd, int maxchips)
+int nand_scan_with_ids(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *ids)
{
int ret;
- ret = nand_scan_ident(mtd, maxchips, NULL);
+ ret = nand_scan_ident(mtd, maxchips, ids);
if (!ret)
ret = nand_scan_tail(mtd);
return ret;
}
-EXPORT_SYMBOL(nand_scan);
+EXPORT_SYMBOL(nand_scan_with_ids);
/**
* nand_cleanup - [NAND Interface] Free resources held by the NAND device
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index aad42812a353..d831a141a196 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -166,48 +166,15 @@
#define NFC_MAX_CS 7
/*
- * Ready/Busy detection type: describes the Ready/Busy detection modes
- *
- * @RB_NONE: no external detection available, rely on STATUS command
- * and software timeouts
- * @RB_NATIVE: use sunxi NAND controller Ready/Busy support. The Ready/Busy
- * pin of the NAND flash chip must be connected to one of the
- * native NAND R/B pins (those which can be muxed to the NAND
- * Controller)
- * @RB_GPIO: use a simple GPIO to handle Ready/Busy status. The Ready/Busy
- * pin of the NAND flash chip must be connected to a GPIO capable
- * pin.
- */
-enum sunxi_nand_rb_type {
- RB_NONE,
- RB_NATIVE,
- RB_GPIO,
-};
-
-/*
- * Ready/Busy structure: stores information related to Ready/Busy detection
- *
- * @type: the Ready/Busy detection mode
- * @info: information related to the R/B detection mode. Either a gpio
- * id or a native R/B id (those supported by the NAND controller).
- */
-struct sunxi_nand_rb {
- enum sunxi_nand_rb_type type;
- union {
- int gpio;
- int nativeid;
- } info;
-};
-
-/*
* Chip Select structure: stores information related to NAND Chip Select
*
* @cs: the NAND CS id used to communicate with a NAND Chip
- * @rb: the Ready/Busy description
+ * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the
+ * NFC
*/
struct sunxi_nand_chip_sel {
u8 cs;
- struct sunxi_nand_rb rb;
+ s8 rb;
};
/*
@@ -440,30 +407,19 @@ static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
- struct sunxi_nand_rb *rb;
- int ret;
+ u32 mask;
if (sunxi_nand->selected < 0)
return 0;
- rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
-
- switch (rb->type) {
- case RB_NATIVE:
- ret = !!(readl(nfc->regs + NFC_REG_ST) &
- NFC_RB_STATE(rb->info.nativeid));
- break;
- case RB_GPIO:
- ret = gpio_get_value(rb->info.gpio);
- break;
- case RB_NONE:
- default:
- ret = 0;
+ if (sunxi_nand->sels[sunxi_nand->selected].rb < 0) {
dev_err(nfc->dev, "cannot check R/B NAND status!\n");
- break;
+ return 0;
}
- return ret;
+ mask = NFC_RB_STATE(sunxi_nand->sels[sunxi_nand->selected].rb);
+
+ return !!(readl(nfc->regs + NFC_REG_ST) & mask);
}
static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
@@ -488,12 +444,11 @@ static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
NFC_PAGE_SHIFT(nand->page_shift);
- if (sel->rb.type == RB_NONE) {
+ if (sel->rb < 0) {
nand->dev_ready = NULL;
} else {
nand->dev_ready = sunxi_nfc_dev_ready;
- if (sel->rb.type == RB_NATIVE)
- ctl |= NFC_RB_SEL(sel->rb.info.nativeid);
+ ctl |= NFC_RB_SEL(sel->rb);
}
writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
@@ -1946,26 +1901,10 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
chip->sels[i].cs = tmp;
if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
- tmp < 2) {
- chip->sels[i].rb.type = RB_NATIVE;
- chip->sels[i].rb.info.nativeid = tmp;
- } else {
- ret = of_get_named_gpio(np, "rb-gpios", i);
- if (ret >= 0) {
- tmp = ret;
- chip->sels[i].rb.type = RB_GPIO;
- chip->sels[i].rb.info.gpio = tmp;
- ret = devm_gpio_request(dev, tmp, "nand-rb");
- if (ret)
- return ret;
-
- ret = gpio_direction_input(tmp);
- if (ret)
- return ret;
- } else {
- chip->sels[i].rb.type = RB_NONE;
- }
- }
+ tmp < 2)
+ chip->sels[i].rb = tmp;
+ else
+ chip->sels[i].rb = -1;
}
nand = &chip->nand;
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index a6fbfa4e5799..6281da3dadac 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -272,28 +272,37 @@ static int memcmpb(void *a, int c, int n)
static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int len,
int check_oob)
{
- u8 buf[SECTORSIZE + nftl->mbd.mtd->oobsize];
struct mtd_info *mtd = nftl->mbd.mtd;
size_t retlen;
- int i;
+ int i, ret;
+ u8 *buf;
+
+ buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL);
+ if (!buf)
+ return -1;
+ ret = -1;
for (i = 0; i < len; i += SECTORSIZE) {
if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
- return -1;
+ goto out;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
- return -1;
+ goto out;
if (check_oob) {
if(nftl_read_oob(mtd, address, mtd->oobsize,
&retlen, &buf[SECTORSIZE]) < 0)
- return -1;
+ goto out;
if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
- return -1;
+ goto out;
}
address += SECTORSIZE;
}
- return 0;
+ ret = 0;
+
+out:
+ kfree(buf);
+ return ret;
}
/* NFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase Unit and
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 89da88e59121..6cc9c929ff57 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -71,7 +71,7 @@ config SPI_FSL_QUADSPI
config SPI_HISI_SFC
tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
depends on ARCH_HISI || COMPILE_TEST
- depends on HAS_IOMEM && HAS_DMA
+ depends on HAS_IOMEM
help
This enables support for hisilicon SPI-NOR flash controller.
@@ -90,7 +90,7 @@ config SPI_INTEL_SPI
tristate
config SPI_INTEL_SPI_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver"
+ tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
depends on X86 && PCI
select SPI_INTEL_SPI
help
@@ -106,7 +106,7 @@ config SPI_INTEL_SPI_PCI
will be called intel-spi-pci.
config SPI_INTEL_SPI_PLATFORM
- tristate "Intel PCH/PCU SPI flash platform driver"
+ tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
depends on X86
select SPI_INTEL_SPI
help
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 5872f31eaa60..c3f7aaa5d18f 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -18,6 +18,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -73,6 +75,10 @@ struct cqspi_st {
struct completion transfer_complete;
struct mutex bus_mutex;
+ struct dma_chan *rx_chan;
+ struct completion rx_dma_complete;
+ dma_addr_t mmap_phys_base;
+
int current_cs;
int current_page_size;
int current_erase_size;
@@ -930,11 +936,75 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
return len;
}
+static void cqspi_rx_dma_callback(void *param)
+{
+ struct cqspi_st *cqspi = param;
+
+ complete(&cqspi->rx_dma_complete);
+}
+
+static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
+ loff_t from, size_t len)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
+ int ret = 0;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ dma_addr_t dma_dst;
+
+ if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
+ memcpy_fromio(buf, cqspi->ahb_base + from, len);
+ return 0;
+ }
+
+ dma_dst = dma_map_single(nor->dev, buf, len, DMA_DEV_TO_MEM);
+ if (dma_mapping_error(nor->dev, dma_dst)) {
+ dev_err(nor->dev, "dma mapping failed\n");
+ return -ENOMEM;
+ }
+ tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
+ len, flags);
+ if (!tx) {
+ dev_err(nor->dev, "device_prep_dma_memcpy error\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ tx->callback = cqspi_rx_dma_callback;
+ tx->callback_param = cqspi;
+ cookie = tx->tx_submit(tx);
+ reinit_completion(&cqspi->rx_dma_complete);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(nor->dev, "dma_submit_error %d\n", cookie);
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(cqspi->rx_chan);
+ ret = wait_for_completion_timeout(&cqspi->rx_dma_complete,
+ msecs_to_jiffies(len));
+ if (ret <= 0) {
+ dmaengine_terminate_sync(cqspi->rx_chan);
+ dev_err(nor->dev, "DMA wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
+ goto err_unmap;
+ }
+
+err_unmap:
+ dma_unmap_single(nor->dev, dma_dst, len, DMA_DEV_TO_MEM);
+
+ return 0;
+}
+
static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
size_t len, u_char *buf)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
- struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 1);
@@ -946,7 +1016,7 @@ static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
return ret;
if (f_pdata->use_direct_mode)
- memcpy_fromio(buf, cqspi->ahb_base + from, len);
+ ret = cqspi_direct_read_execute(nor, buf, from, len);
else
ret = cqspi_indirect_read_execute(nor, buf, from, len);
if (ret)
@@ -1115,6 +1185,21 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
cqspi_controller_enable(cqspi, 1);
}
+static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ cqspi->rx_chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(cqspi->rx_chan)) {
+ dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
+ cqspi->rx_chan = NULL;
+ }
+ init_completion(&cqspi->rx_dma_complete);
+}
+
static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
{
const struct spi_nor_hwcaps hwcaps = {
@@ -1192,6 +1277,9 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
f_pdata->use_direct_mode = true;
dev_dbg(nor->dev, "using direct mode for %s\n",
mtd->name);
+
+ if (!cqspi->rx_chan)
+ cqspi_request_mmap_dma(cqspi);
}
}
@@ -1252,6 +1340,7 @@ static int cqspi_probe(struct platform_device *pdev)
dev_err(dev, "Cannot remap AHB address.\n");
return PTR_ERR(cqspi->ahb_base);
}
+ cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
cqspi->ahb_size = resource_size(res_ahb);
init_completion(&cqspi->transfer_complete);
@@ -1322,6 +1411,9 @@ static int cqspi_remove(struct platform_device *pdev)
cqspi_controller_enable(cqspi, 0);
+ if (cqspi->rx_chan)
+ dma_release_channel(cqspi->rx_chan);
+
clk_disable_unprepare(cqspi->clk);
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 3e3c0bbc45c0..7d9620c7ff6c 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -214,6 +214,7 @@ enum fsl_qspi_devtype {
FSL_QUADSPI_IMX7D,
FSL_QUADSPI_IMX6UL,
FSL_QUADSPI_LS1021A,
+ FSL_QUADSPI_LS2080A,
};
struct fsl_qspi_devtype_data {
@@ -267,6 +268,15 @@ static struct fsl_qspi_devtype_data ls1021a_data = {
.driver_data = 0,
};
+static const struct fsl_qspi_devtype_data ls2080a_data = {
+ .devtype = FSL_QUADSPI_LS2080A,
+ .rxfifo = 128,
+ .txfifo = 64,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_TKT253890,
+};
+
+
#define FSL_QSPI_MAX_CHIP 4
struct fsl_qspi {
struct spi_nor nor[FSL_QSPI_MAX_CHIP];
@@ -661,7 +671,7 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
* causes the controller to clear the buffer, and use the sequence pointed
* by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
*/
-static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
+static void fsl_qspi_init_ahb_read(struct fsl_qspi *q)
{
void __iomem *base = q->iobase;
int seqid;
@@ -795,7 +805,7 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
fsl_qspi_init_lut(q);
/* Init for AHB read */
- fsl_qspi_init_abh_read(q);
+ fsl_qspi_init_ahb_read(q);
return 0;
}
@@ -806,6 +816,7 @@ static const struct of_device_id fsl_qspi_dt_ids[] = {
{ .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
{ .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
{ .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
+ { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
index 04f9fb5cd9b6..dea7b0c4b339 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -112,7 +112,7 @@ struct hifmc_host {
u32 num_chip;
};
-static inline int wait_op_finish(struct hifmc_host *host)
+static inline int hisi_spi_nor_wait_op_finish(struct hifmc_host *host)
{
u32 reg;
@@ -120,7 +120,7 @@ static inline int wait_op_finish(struct hifmc_host *host)
(reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT);
}
-static int get_if_type(enum spi_nor_protocol proto)
+static int hisi_spi_nor_get_if_type(enum spi_nor_protocol proto)
{
enum hifmc_iftype if_type;
@@ -208,7 +208,7 @@ static int hisi_spi_nor_op_reg(struct spi_nor *nor,
reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype;
writel(reg, host->regbase + FMC_OP);
- return wait_op_finish(host);
+ return hisi_spi_nor_wait_op_finish(host);
}
static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
@@ -259,9 +259,9 @@ static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
reg = OP_CFG_FM_CS(priv->chipselect);
if (op_type == FMC_OP_READ)
- if_type = get_if_type(nor->read_proto);
+ if_type = hisi_spi_nor_get_if_type(nor->read_proto);
else
- if_type = get_if_type(nor->write_proto);
+ if_type = hisi_spi_nor_get_if_type(nor->write_proto);
reg |= OP_CFG_MEM_IF_TYPE(if_type);
if (op_type == FMC_OP_READ)
reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3);
@@ -274,7 +274,7 @@ static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
: OP_CTRL_WR_OPCODE(nor->program_opcode);
writel(reg, host->regbase + FMC_OP_DMA);
- return wait_op_finish(host);
+ return hisi_spi_nor_wait_op_finish(host);
}
static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len,
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index 699951523179..d2cbfc27826e 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -136,6 +136,7 @@
* @swseq_reg: Use SW sequencer in register reads/writes
* @swseq_erase: Use SW sequencer in erase operation
* @erase_64k: 64k erase supported
+ * @atomic_preopcode: Holds preopcode when atomic sequence is requested
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
*/
@@ -153,6 +154,7 @@ struct intel_spi {
bool swseq_reg;
bool swseq_erase;
bool erase_64k;
+ u8 atomic_preopcode;
u8 opcodes[8];
};
@@ -285,7 +287,7 @@ static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
u32 val;
return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
- !(val & HSFSTS_CTL_SCIP), 0,
+ !(val & HSFSTS_CTL_SCIP), 40,
INTEL_SPI_TIMEOUT * 1000);
}
@@ -294,7 +296,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
u32 val;
return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
- !(val & SSFSTS_CTL_SCIP), 0,
+ !(val & SSFSTS_CTL_SCIP), 40,
INTEL_SPI_TIMEOUT * 1000);
}
@@ -474,7 +476,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
int optype)
{
u32 val = 0, status;
- u16 preop;
+ u8 atomic_preopcode;
int ret;
ret = intel_spi_opcode_index(ispi, opcode, optype);
@@ -484,17 +486,42 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
if (len > INTEL_SPI_FIFO_SZ)
return -EINVAL;
+ /*
+ * Always clear it after each SW sequencer operation regardless
+ * of whether it is successful or not.
+ */
+ atomic_preopcode = ispi->atomic_preopcode;
+ ispi->atomic_preopcode = 0;
+
/* Only mark 'Data Cycle' bit when there is data to be transferred */
if (len > 0)
val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
val |= ret << SSFSTS_CTL_COP_SHIFT;
val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
val |= SSFSTS_CTL_SCGO;
- preop = readw(ispi->sregs + PREOP_OPTYPE);
- if (preop) {
- val |= SSFSTS_CTL_ACS;
- if (preop >> 8)
- val |= SSFSTS_CTL_SPOP;
+ if (atomic_preopcode) {
+ u16 preop;
+
+ switch (optype) {
+ case OPTYPE_WRITE_NO_ADDR:
+ case OPTYPE_WRITE_WITH_ADDR:
+ /* Pick matching preopcode for the atomic sequence */
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ if ((preop & 0xff) == atomic_preopcode)
+ ; /* Do nothing */
+ else if ((preop >> 8) == atomic_preopcode)
+ val |= SSFSTS_CTL_SPOP;
+ else
+ return -EINVAL;
+
+ /* Enable atomic sequence */
+ val |= SSFSTS_CTL_ACS;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
}
writel(val, ispi->sregs + SSFSTS_CTL);
@@ -538,13 +565,31 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
/*
* This is handled with atomic operation and preop code in Intel
- * controller so skip it here now. If the controller is not locked,
- * program the opcode to the PREOP register for later use.
+ * controller so we only verify that it is available. If the
+ * controller is not locked, program the opcode to the PREOP
+ * register for later use.
+ *
+ * When hardware sequencer is used there is no need to program
+ * any opcodes (it handles them automatically as part of a command).
*/
if (opcode == SPINOR_OP_WREN) {
- if (!ispi->locked)
+ u16 preop;
+
+ if (!ispi->swseq_reg)
+ return 0;
+
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
+ if (ispi->locked)
+ return -EINVAL;
writel(opcode, ispi->sregs + PREOP_OPTYPE);
+ }
+ /*
+ * This enables atomic sequence on next SW sycle. Will
+ * be cleared after next operation.
+ */
+ ispi->atomic_preopcode = opcode;
return 0;
}
@@ -569,6 +614,13 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
u32 val, status;
ssize_t ret;
+ /*
+ * Atomic sequence is not expected with HW sequencer reads. Make
+ * sure it is cleared regardless.
+ */
+ if (WARN_ON_ONCE(ispi->atomic_preopcode))
+ ispi->atomic_preopcode = 0;
+
switch (nor->read_opcode) {
case SPINOR_OP_READ:
case SPINOR_OP_READ_FAST:
@@ -627,6 +679,9 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
u32 val, status;
ssize_t ret;
+ /* Not needed with HW sequencer write, make sure it is cleared */
+ ispi->atomic_preopcode = 0;
+
while (len > 0) {
block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
@@ -707,6 +762,9 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
return 0;
}
+ /* Not needed with HW sequencer erase, make sure it is cleared */
+ ispi->atomic_preopcode = 0;
+
while (len > 0) {
writel(offs, ispi->base + FADDR);
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 5bfa36e95f35..d9c368c44194 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -284,6 +284,20 @@ static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
if (need_wren)
write_disable(nor);
+ if (!status && !enable &&
+ JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
+ /*
+ * On Winbond W25Q256FV, leaving 4byte mode causes
+ * the Extended Address Register to be set to 1, so all
+ * 3-byte-address reads come from the second 16M.
+ * We must clear the register to enable normal behavior.
+ */
+ write_enable(nor);
+ nor->cmd_buf[0] = 0;
+ nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
+ write_disable(nor);
+ }
+
return status;
default:
/* Spansion style */
@@ -980,6 +994,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
{ "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
@@ -1049,6 +1064,14 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
@@ -1087,6 +1110,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
{ "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
@@ -1198,6 +1222,11 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
+ {
+ "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{
@@ -1230,6 +1259,10 @@ static const struct flash_info spi_nor_ids[] = {
{ "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
{ "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
{ "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
+
+ /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
+ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ },
};
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
index b3c7f6addba7..72553506a00b 100644
--- a/drivers/mtd/spi-nor/stm32-quadspi.c
+++ b/drivers/mtd/spi-nor/stm32-quadspi.c
@@ -656,7 +656,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
return ret;
}
- rstc = devm_reset_control_get(dev, NULL);
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
if (!IS_ERR(rstc)) {
reset_control_assert(rstc);
udelay(2);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 753494e042d5..d2a726654ff1 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -526,6 +526,7 @@ void ubi_free_internal_volumes(struct ubi_device *ubi)
for (i = ubi->vtbl_slots;
i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
ubi_eba_replace_table(ubi->volumes[i], NULL);
+ ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
kfree(ubi->volumes[i]);
}
}
@@ -1091,6 +1092,9 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ cancel_work_sync(&ubi->fm_work);
+#endif
ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 250e30fac61b..edb1c8362faa 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -490,6 +490,103 @@ out_unlock:
return err;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * check_mapping - check and fixup a mapping
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @pnum: physical eraseblock number
+ *
+ * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
+ * operations, if such an operation is interrupted the mapping still looks
+ * good, but upon first read an ECC is reported to the upper layer.
+ * Normaly during the full-scan at attach time this is fixed, for Fastmap
+ * we have to deal with it while reading.
+ * If the PEB behind a LEB shows this symthom we change the mapping to
+ * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
+ *
+ * Returns 0 on success, negative error code in case of failure.
+ */
+static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ int *pnum)
+{
+ int err;
+ struct ubi_vid_io_buf *vidb;
+ struct ubi_vid_hdr *vid_hdr;
+
+ if (!ubi->fast_attach)
+ return 0;
+
+ if (!vol->checkmap || test_bit(lnum, vol->checkmap))
+ return 0;
+
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
+ return -ENOMEM;
+
+ err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
+ if (err > 0 && err != UBI_IO_BITFLIPS) {
+ int torture = 0;
+
+ switch (err) {
+ case UBI_IO_FF:
+ case UBI_IO_FF_BITFLIPS:
+ case UBI_IO_BAD_HDR:
+ case UBI_IO_BAD_HDR_EBADMSG:
+ break;
+ default:
+ ubi_assert(0);
+ }
+
+ if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
+ torture = 1;
+
+ down_read(&ubi->fm_eba_sem);
+ vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
+ up_read(&ubi->fm_eba_sem);
+ ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
+
+ *pnum = UBI_LEB_UNMAPPED;
+ } else if (err < 0) {
+ ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
+ *pnum, err);
+
+ goto out_free;
+ } else {
+ int found_vol_id, found_lnum;
+
+ ubi_assert(err == 0 || err == UBI_IO_BITFLIPS);
+
+ vid_hdr = ubi_get_vid_hdr(vidb);
+ found_vol_id = be32_to_cpu(vid_hdr->vol_id);
+ found_lnum = be32_to_cpu(vid_hdr->lnum);
+
+ if (found_lnum != lnum || found_vol_id != vol->vol_id) {
+ ubi_err(ubi, "EBA mismatch! PEB %i is LEB %i:%i instead of LEB %i:%i",
+ *pnum, found_vol_id, found_lnum, vol->vol_id, lnum);
+ ubi_ro_mode(ubi);
+ err = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ set_bit(lnum, vol->checkmap);
+ err = 0;
+
+out_free:
+ ubi_free_vid_buf(vidb);
+
+ return err;
+}
+#else
+static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ int *pnum)
+{
+ return 0;
+}
+#endif
+
/**
* ubi_eba_read_leb - read data.
* @ubi: UBI device description object
@@ -522,7 +619,13 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
return err;
pnum = vol->eba_tbl->entries[lnum].pnum;
- if (pnum < 0) {
+ if (pnum >= 0) {
+ err = check_mapping(ubi, vol, lnum, &pnum);
+ if (err < 0)
+ goto out_unlock;
+ }
+
+ if (pnum == UBI_LEB_UNMAPPED) {
/*
* The logical eraseblock is not mapped, fill the whole buffer
* with 0xFF bytes. The exception is static volumes for which
@@ -931,6 +1034,12 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum >= 0) {
+ err = check_mapping(ubi, vol, lnum, &pnum);
+ if (err < 0)
+ goto out;
+ }
+
+ if (pnum >= 0) {
dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 91705962ba73..462526a10537 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1100,6 +1100,26 @@ free_fm_sb:
goto out;
}
+int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
+{
+ struct ubi_device *ubi = vol->ubi;
+
+ if (!ubi->fast_attach)
+ return 0;
+
+ vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!vol->checkmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
+{
+ kfree(vol->checkmap);
+}
+
/**
* ubi_write_fastmap - writes a fastmap.
* @ubi: UBI device object
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index bfceae5a890e..195ff8ca8211 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -1,28 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
- * Copyright (c) International Business Machines Corp., 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
+ * Copyright (C) International Business Machines Corp., 2006
* Authors: Artem Bityutskiy (Битюцкий Ðртём)
* Thomas Gleixner
* Frank Haverkamp
* Oliver Lohmann
* Andreas Arnez
- */
-
-/*
+ *
* This file defines the layout of UBI headers and all the other UBI on-flash
* data structures.
*/
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5fe62653995e..f5ba97c46160 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -334,6 +334,9 @@ struct ubi_eba_leb_desc {
* @changing_leb: %1 if the atomic LEB change ioctl command is in progress
* @direct_writes: %1 if direct writes are enabled for this volume
*
+ * @checkmap: bitmap to remember which PEB->LEB mappings got checked,
+ * protected by UBI LEB lock tree.
+ *
* The @corrupted field indicates that the volume's contents is corrupted.
* Since UBI protects only static volumes, this field is not relevant to
* dynamic volumes - it is user's responsibility to assure their data
@@ -377,6 +380,10 @@ struct ubi_volume {
unsigned int updating:1;
unsigned int changing_leb:1;
unsigned int direct_writes:1;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ unsigned long *checkmap;
+#endif
};
/**
@@ -965,8 +972,12 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi);
int ubi_update_fastmap(struct ubi_device *ubi);
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_attach_info *scan_ai);
+int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count);
+void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol);
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
+int static inline ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
+static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {}
#endif
/* block.c */
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 3fd8d7ff7a02..0be516780e92 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -139,6 +139,7 @@ static void vol_release(struct device *dev)
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
ubi_eba_replace_table(vol, NULL);
+ ubi_fastmap_destroy_checkmap(vol);
kfree(vol);
}
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 263743e7b741..94d7a865b135 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -534,7 +534,7 @@ static int init_volumes(struct ubi_device *ubi,
const struct ubi_attach_info *ai,
const struct ubi_vtbl_record *vtbl)
{
- int i, reserved_pebs = 0;
+ int i, err, reserved_pebs = 0;
struct ubi_ainf_volume *av;
struct ubi_volume *vol;
@@ -620,6 +620,16 @@ static int init_volumes(struct ubi_device *ubi,
(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
vol->used_bytes += av->last_data_size;
vol->last_eb_bytes = av->last_data_size;
+
+ /*
+ * We use ubi->peb_count and not vol->reserved_pebs because
+ * we want to keep the code simple. Otherwise we'd have to
+ * resize/check the bitmap upon volume resize too.
+ * Allocating a few bytes more does not hurt.
+ */
+ err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
+ if (err)
+ return err;
}
/* And add the layout volume */
@@ -645,6 +655,9 @@ static int init_volumes(struct ubi_device *ubi,
reserved_pebs += vol->reserved_pebs;
ubi->vol_count += 1;
vol->ubi = ubi;
+ err = ubi_fastmap_init_checkmap(vol, UBI_LAYOUT_VOLUME_EBS);
+ if (err)
+ return err;
if (reserved_pebs > ubi->avail_pebs) {
ubi_err(ubi, "not enough PEBs, required %d, available %d",
@@ -849,6 +862,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
out_free:
vfree(ubi->vtbl);
for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
kfree(ubi->volumes[i]);
ubi->volumes[i] = NULL;
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 2052a647220e..f66b3b22f328 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1505,6 +1505,7 @@ int ubi_thread(void *u)
}
dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
+ ubi->thread_enabled = 0;
return 0;
}
@@ -1514,9 +1515,6 @@ int ubi_thread(void *u)
*/
static void shutdown_work(struct ubi_device *ubi)
{
-#ifdef CONFIG_MTD_UBI_FASTMAP
- flush_work(&ubi->fm_work);
-#endif
while (!list_empty(&ubi->works)) {
struct ubi_work *wrk;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a822e70c2af3..f2af87d70594 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3386,32 +3386,6 @@ err_disable_device:
}
/*****************************************************************************/
-static int ena_sriov_configure(struct pci_dev *dev, int numvfs)
-{
- int rc;
-
- if (numvfs > 0) {
- rc = pci_enable_sriov(dev, numvfs);
- if (rc != 0) {
- dev_err(&dev->dev,
- "pci_enable_sriov failed to enable: %d vfs with the error: %d\n",
- numvfs, rc);
- return rc;
- }
-
- return numvfs;
- }
-
- if (numvfs == 0) {
- pci_disable_sriov(dev);
- return 0;
- }
-
- return -EINVAL;
-}
-
-/*****************************************************************************/
-/*****************************************************************************/
/* ena_remove - Device Removal Routine
* @pdev: PCI device information struct
@@ -3526,7 +3500,7 @@ static struct pci_driver ena_pci_driver = {
.suspend = ena_suspend,
.resume = ena_resume,
#endif
- .sriov_configure = ena_sriov_configure,
+ .sriov_configure = pci_sriov_configure_simple,
};
static int __init ena_init(void)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c766ae23bc74..5b1ed240bf18 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13922,8 +13922,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
{
struct net_device *dev = NULL;
struct bnx2x *bp;
- enum pcie_link_width pcie_width;
- enum pci_bus_speed pcie_speed;
int rc, max_non_def_sbs;
int rx_count, tx_count, rss_count, doorbell_size;
int max_cos_est;
@@ -14091,21 +14089,12 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
- if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
- pcie_speed == PCI_SPEED_UNKNOWN ||
- pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
- BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
- else
- BNX2X_DEV_INFO(
- "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
- board_info[ent->driver_data].name,
- (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width,
- pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
- pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
- pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
- "Unknown",
- dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ BNX2X_DEV_INFO(
+ "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ pcie_print_link_status(bp->pdev);
bnx2x_register_phc(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index dfa0839f6656..176fc9f4d7de 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -8685,22 +8685,6 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
-static void bnxt_parse_log_pcie_link(struct bnxt *bp)
-{
- enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
- enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
-
- if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
- speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
- netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
- else
- netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
- speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
- speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
- speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
- "Unknown", width);
-}
-
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -8915,8 +8899,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
board_info[ent->driver_data].name,
(long)pci_resource_start(pdev, 0), dev->dev_addr);
-
- bnxt_parse_log_pcie_link(bp);
+ pcie_print_link_status(pdev);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0efae2030e71..35cb3ae4f7b6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5066,79 +5066,6 @@ static int init_rss(struct adapter *adap)
return 0;
}
-static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
- enum pci_bus_speed *speed,
- enum pcie_link_width *width)
-{
- u32 lnkcap1, lnkcap2;
- int err1, err2;
-
-#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
-
- *speed = PCI_SPEED_UNKNOWN;
- *width = PCIE_LNK_WIDTH_UNKNOWN;
-
- err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
- &lnkcap1);
- err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
- &lnkcap2);
- if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
- *speed = PCIE_SPEED_8_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
- *speed = PCIE_SPEED_5_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
- *speed = PCIE_SPEED_2_5GT;
- }
- if (!err1) {
- *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
- if (!lnkcap2) { /* pre-r3.0 */
- if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
- *speed = PCIE_SPEED_5_0GT;
- else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
- *speed = PCIE_SPEED_2_5GT;
- }
- }
-
- if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
- return err1 ? err1 : err2 ? err2 : -EINVAL;
- return 0;
-}
-
-static void cxgb4_check_pcie_caps(struct adapter *adap)
-{
- enum pcie_link_width width, width_cap;
- enum pci_bus_speed speed, speed_cap;
-
-#define PCIE_SPEED_STR(speed) \
- (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
- speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
- speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
- "Unknown")
-
- if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
- dev_warn(adap->pdev_dev,
- "Unable to determine PCIe device BW capabilities\n");
- return;
- }
-
- if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
- speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
- dev_warn(adap->pdev_dev,
- "Unable to determine PCI Express bandwidth.\n");
- return;
- }
-
- dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
- PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
- dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
- width, width_cap);
- if (speed < speed_cap || width < width_cap)
- dev_info(adap->pdev_dev,
- "A slot with more lanes and/or higher speed is "
- "suggested for optimal performance.\n");
-}
-
/* Dump basic information about the adapter */
static void print_adapter_info(struct adapter *adapter)
{
@@ -5798,7 +5725,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* check for PCI Express bandwidth capabiltites */
- cxgb4_check_pcie_caps(adapter);
+ pcie_print_link_status(pdev);
err = init_rss(adapter);
if (err)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 38b4e4899490..4929f7265598 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -245,9 +245,6 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
int expected_gts)
{
struct ixgbe_hw *hw = &adapter->hw;
- int max_gts = 0;
- enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
- enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
struct pci_dev *pdev;
/* Some devices are not connected over PCIe and thus do not negotiate
@@ -263,49 +260,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
else
pdev = adapter->pdev;
- if (pcie_get_minimum_link(pdev, &speed, &width) ||
- speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
- e_dev_warn("Unable to determine PCI Express bandwidth.\n");
- return;
- }
-
- switch (speed) {
- case PCIE_SPEED_2_5GT:
- /* 8b/10b encoding reduces max throughput by 20% */
- max_gts = 2 * width;
- break;
- case PCIE_SPEED_5_0GT:
- /* 8b/10b encoding reduces max throughput by 20% */
- max_gts = 4 * width;
- break;
- case PCIE_SPEED_8_0GT:
- /* 128b/130b encoding reduces throughput by less than 2% */
- max_gts = 8 * width;
- break;
- default:
- e_dev_warn("Unable to determine PCI Express bandwidth.\n");
- return;
- }
-
- e_dev_info("PCI Express bandwidth of %dGT/s available\n",
- max_gts);
- e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
- (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
- speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
- speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
- "Unknown"),
- width,
- (speed == PCIE_SPEED_2_5GT ? "20%" :
- speed == PCIE_SPEED_5_0GT ? "20%" :
- speed == PCIE_SPEED_8_0GT ? "<2%" :
- "Unknown"));
-
- if (max_gts < expected_gts) {
- e_dev_warn("This is not sufficient for optimal performance of this card.\n");
- e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
- expected_gts);
- e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
- }
+ pcie_print_link_status(pdev);
}
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6cab1dd66d1b..f63dfbcd29fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -2104,21 +2104,18 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
struct mlx5_vport *vport = &esw->vports[vport_idx];
u64 rx_discard_vport_down, tx_discard_vport_down;
u64 bytes = 0;
- u16 idx = 0;
int err = 0;
if (!vport->enabled || esw->mode != SRIOV_LEGACY)
return 0;
- if (vport->egress.drop_counter) {
- idx = vport->egress.drop_counter->id;
- mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
- }
+ if (vport->egress.drop_counter)
+ mlx5_fc_query(dev, vport->egress.drop_counter,
+ &stats->rx_dropped, &bytes);
- if (vport->ingress.drop_counter) {
- idx = vport->ingress.drop_counter->id;
- mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
- }
+ if (vport->ingress.drop_counter)
+ mlx5_fc_query(dev, vport->ingress.drop_counter,
+ &stats->tx_dropped, &bytes);
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
!MLX5_CAP_GEN(dev, transmit_discard_vport_down))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e1b609c61d59..49a75d31185e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -325,7 +325,8 @@ static bool check_valid_mask(u8 match_criteria_enable, const u32 *match_criteria
if (match_criteria_enable & ~(
(1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) |
(1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) |
- (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS)))
+ (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) |
+ (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2)))
return false;
if (!(match_criteria_enable &
@@ -361,6 +362,17 @@ static bool check_valid_mask(u8 match_criteria_enable, const u32 *match_criteria
return false;
}
+ if (!(match_criteria_enable &
+ 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2)) {
+ char *fg_type_mask = MLX5_ADDR_OF(fte_match_param,
+ match_criteria, misc_parameters_2);
+
+ if (fg_type_mask[0] ||
+ memcmp(fg_type_mask, fg_type_mask + 1,
+ MLX5_ST_SZ_BYTES(fte_match_set_misc2) - 1))
+ return false;
+ }
+
return check_last_reserved(match_criteria);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index e26d3e9d5f9f..32070e5d993d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -159,7 +159,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn;
};
-#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_600
+#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_800
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
*/
@@ -233,8 +233,6 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
unsigned long delay);
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
-int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
- u64 *packets, u64 *bytes);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index b7ab929d5f8e..58af6be13dfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -243,6 +243,7 @@ err_out:
return ERR_PTR(err);
}
+EXPORT_SYMBOL(mlx5_fc_create);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
@@ -260,6 +261,7 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
+EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
@@ -312,11 +314,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
}
}
-int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes)
{
- return mlx5_cmd_fc_query(dev, id, packets, bytes);
+ return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
}
+EXPORT_SYMBOL(mlx5_fc_query);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 99d8e7398a5b..23304aca25f9 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -189,7 +189,6 @@ struct netvsc_device;
struct net_device_context;
extern u32 netvsc_ring_bytes;
-extern struct reciprocal_value netvsc_ring_reciprocal;
struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *info);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d2ee66c259a7..5d5bd513847f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -31,7 +31,6 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
-#include <linux/reciprocal_div.h>
#include <asm/sync_bitops.h>
@@ -635,17 +634,6 @@ void netvsc_device_remove(struct hv_device *device)
#define RING_AVAIL_PERCENT_HIWATER 20
#define RING_AVAIL_PERCENT_LOWATER 10
-/*
- * Get the percentage of available bytes to write in the ring.
- * The return value is in range from 0 to 100.
- */
-static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info)
-{
- u32 avail_write = hv_get_bytes_to_write(ring_info);
-
- return reciprocal_divide(avail_write * 100, netvsc_ring_reciprocal);
-}
-
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
u32 index)
{
@@ -694,8 +682,8 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
if (netif_tx_queue_stopped(txq) &&
- (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
- queue_sends < 1)) {
+ (hv_get_avail_to_write_percent(&channel->outbound) >
+ RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
netif_tx_wake_queue(txq);
ndev_ctx->eth_stats.wake_queue++;
}
@@ -802,7 +790,7 @@ static inline int netvsc_send_pkt(
struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
u64 req_id;
int ret;
- u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
+ u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (skb)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8eec156418ea..7b18a8c267c2 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -35,7 +35,6 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/netpoll.h>
-#include <linux/reciprocal_div.h>
#include <net/arp.h>
#include <net/route.h>
@@ -59,7 +58,6 @@ static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, 0444);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
unsigned int netvsc_ring_bytes __ro_after_init;
-struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
@@ -2132,7 +2130,6 @@ static int __init netvsc_drv_init(void)
ring_size);
}
netvsc_ring_bytes = ring_size * PAGE_SIZE;
- netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes);
ret = vmbus_driver_register(&netvsc_drv);
if (ret)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 4dd0668003e7..679da1abd73c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -564,7 +564,7 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
-static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index a64023690cad..27902a8799b1 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -100,6 +100,9 @@ static int nvdimm_bus_probe(struct device *dev)
if (!try_module_get(provider))
return -ENXIO;
+ dev_dbg(&nvdimm_bus->dev, "START: %s.probe(%s)\n",
+ dev->driver->name, dev_name(dev));
+
nvdimm_bus_probe_start(nvdimm_bus);
rc = nd_drv->probe(dev);
if (rc == 0)
@@ -108,7 +111,7 @@ static int nvdimm_bus_probe(struct device *dev)
nd_region_disable(nvdimm_bus, dev);
nvdimm_bus_probe_end(nvdimm_bus);
- dev_dbg(&nvdimm_bus->dev, "%s.probe(%s) = %d\n", dev->driver->name,
+ dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
dev_name(dev), rc);
if (rc != 0)
@@ -566,14 +569,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
{
struct device *dev = disk_to_dev(disk)->parent;
struct nd_region *nd_region = to_nd_region(dev->parent);
- const char *pol = nd_region->ro ? "only" : "write";
+ int disk_ro = get_disk_ro(disk);
- if (nd_region->ro == get_disk_ro(disk))
+ /*
+ * Upgrade to read-only if the region is read-only preserve as
+ * read-only if the disk is already read-only.
+ */
+ if (disk_ro || nd_region->ro == disk_ro)
return 0;
- dev_info(dev, "%s read-%s, marking %s read-%s\n",
- dev_name(&nd_region->dev), pol, disk->disk_name, pol);
- set_disk_ro(disk, nd_region->ro);
+ dev_info(dev, "%s read-only, marking %s read-only\n",
+ dev_name(&nd_region->dev), disk->disk_name);
+ set_disk_ro(disk, 1);
return 0;
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index 6f9a6ffd7cde..521eaf53a52a 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -38,12 +38,27 @@ static int e820_range_to_nid(resource_size_t addr)
}
#endif
+static int e820_register_one(struct resource *res, void *data)
+{
+ struct nd_region_desc ndr_desc;
+ struct nvdimm_bus *nvdimm_bus = data;
+
+ memset(&ndr_desc, 0, sizeof(ndr_desc));
+ ndr_desc.res = res;
+ ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
+ ndr_desc.numa_node = e820_range_to_nid(res->start);
+ set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
+ if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
+ return -ENXIO;
+ return 0;
+}
+
static int e820_pmem_probe(struct platform_device *pdev)
{
static struct nvdimm_bus_descriptor nd_desc;
struct device *dev = &pdev->dev;
struct nvdimm_bus *nvdimm_bus;
- struct resource *p;
+ int rc = -ENXIO;
nd_desc.attr_groups = e820_pmem_attribute_groups;
nd_desc.provider_name = "e820";
@@ -53,27 +68,15 @@ static int e820_pmem_probe(struct platform_device *pdev)
goto err;
platform_set_drvdata(pdev, nvdimm_bus);
- for (p = iomem_resource.child; p ; p = p->sibling) {
- struct nd_region_desc ndr_desc;
-
- if (p->desc != IORES_DESC_PERSISTENT_MEMORY_LEGACY)
- continue;
-
- memset(&ndr_desc, 0, sizeof(ndr_desc));
- ndr_desc.res = p;
- ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
- ndr_desc.numa_node = e820_range_to_nid(p->start);
- set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
- if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
- goto err;
- }
-
+ rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
+ IORESOURCE_MEM, 0, -1, nvdimm_bus, e820_register_one);
+ if (rc)
+ goto err;
return 0;
-
- err:
+err:
nvdimm_bus_unregister(nvdimm_bus);
dev_err(dev, "failed to register legacy persistent memory ranges\n");
- return -ENXIO;
+ return rc;
}
static struct platform_driver e820_pmem_driver = {
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 30b08791597d..3f7ad5bc443e 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -561,8 +561,6 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
res->start += start_pad;
res->end -= end_trunc;
- pgmap->type = MEMORY_DEVICE_HOST;
-
if (nd_pfn->mode == PFN_MODE_RAM) {
if (offset < SZ_8K)
return -EINVAL;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index e023d6aa22b5..68940356cad3 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -164,11 +164,6 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
return rc;
}
-/* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
-#ifndef REQ_FLUSH
-#define REQ_FLUSH REQ_PREFLUSH
-#endif
-
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
{
blk_status_t rc = 0;
@@ -179,7 +174,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
struct pmem_device *pmem = q->queuedata;
struct nd_region *nd_region = to_region(pmem);
- if (bio->bi_opf & REQ_FLUSH)
+ if (bio->bi_opf & REQ_PREFLUSH)
nvdimm_flush(nd_region);
do_acct = nd_iostat_start(bio, &start);
@@ -264,9 +259,16 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
return copy_from_iter_flushcache(addr, bytes, i);
}
+static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ return copy_to_iter_mcsafe(addr, bytes, i);
+}
+
static const struct dax_operations pmem_dax_ops = {
.direct_access = pmem_dax_direct_access,
.copy_from_iter = pmem_copy_from_iter,
+ .copy_to_iter = pmem_copy_to_iter,
};
static const struct attribute_group *pmem_attribute_groups[] = {
@@ -294,12 +296,33 @@ static void pmem_release_disk(void *__pmem)
put_disk(pmem->disk);
}
+static void pmem_release_pgmap_ops(void *__pgmap)
+{
+ dev_pagemap_put_ops();
+}
+
+static void fsdax_pagefree(struct page *page, void *data)
+{
+ wake_up_var(&page->_refcount);
+}
+
+static int setup_pagemap_fsdax(struct device *dev, struct dev_pagemap *pgmap)
+{
+ dev_pagemap_get_ops();
+ if (devm_add_action_or_reset(dev, pmem_release_pgmap_ops, pgmap))
+ return -ENOMEM;
+ pgmap->type = MEMORY_DEVICE_FS_DAX;
+ pgmap->page_free = fsdax_pagefree;
+
+ return 0;
+}
+
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
{
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_region *nd_region = to_nd_region(dev->parent);
- int nid = dev_to_node(dev), fua, wbc;
+ int nid = dev_to_node(dev), fua;
struct resource *res = &nsio->res;
struct resource bb_res;
struct nd_pfn *nd_pfn = NULL;
@@ -335,7 +358,6 @@ static int pmem_attach_disk(struct device *dev,
dev_warn(dev, "unable to guarantee persistence of writes\n");
fua = 0;
}
- wbc = nvdimm_has_cache(nd_region);
if (!devm_request_mem_region(dev, res->start, resource_size(res),
dev_name(&ndns->dev))) {
@@ -353,6 +375,8 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags = PFN_DEV;
pmem->pgmap.ref = &q->q_usage_counter;
if (is_nd_pfn(dev)) {
+ if (setup_pagemap_fsdax(dev, &pmem->pgmap))
+ return -ENOMEM;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -364,6 +388,8 @@ static int pmem_attach_disk(struct device *dev,
} else if (pmem_should_map_pages(dev)) {
memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
pmem->pgmap.altmap_valid = false;
+ if (setup_pagemap_fsdax(dev, &pmem->pgmap))
+ return -ENOMEM;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
@@ -382,7 +408,7 @@ static int pmem_attach_disk(struct device *dev,
return PTR_ERR(addr);
pmem->virt_addr = addr;
- blk_queue_write_cache(q, wbc, fua);
+ blk_queue_write_cache(q, true, fua);
blk_queue_make_request(q, pmem_make_request);
blk_queue_physical_block_size(q, PAGE_SIZE);
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
@@ -413,7 +439,7 @@ static int pmem_attach_disk(struct device *dev,
put_disk(disk);
return -ENOMEM;
}
- dax_write_cache(dax_dev, wbc);
+ dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
pmem->dax_dev = dax_dev;
gendev = disk_to_dev(disk);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a612be6f019d..ec3543b83330 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -1132,7 +1132,8 @@ EXPORT_SYMBOL_GPL(nvdimm_has_flush);
int nvdimm_has_cache(struct nd_region *nd_region)
{
- return is_nd_pmem(&nd_region->dev);
+ return is_nd_pmem(&nd_region->dev) &&
+ !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
}
EXPORT_SYMBOL_GPL(nvdimm_has_cache);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c8b30067b6ae..effb1309682e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3245,7 +3245,7 @@ static void nvme_scan_work(struct work_struct *work)
WARN_ON_ONCE(!ctrl->tagset);
- if (test_and_clear_bit(EVENT_NS_CHANGED, &ctrl->events)) {
+ if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
if (nvme_scan_changed_ns_log(ctrl))
goto out_sort_namespaces;
dev_info(ctrl->device, "rescanning namespaces.\n");
@@ -3386,7 +3386,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
switch ((result & 0xff00) >> 8) {
case NVME_AER_NOTICE_NS_CHANGED:
- set_bit(EVENT_NS_CHANGED, &ctrl->events);
+ set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
nvme_queue_scan(ctrl);
break;
case NVME_AER_NOTICE_FW_ACT_STARTING:
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 5f5f7067c41d..fa32c1216409 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -952,6 +952,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
ret = -EBUSY;
goto out_unlock;
}
+ up_read(&nvmf_transports_rwsem);
ret = nvmf_check_required_opts(opts, ops->required_opts);
if (ret)
@@ -968,11 +969,11 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
}
module_put(ops->module);
- up_read(&nvmf_transports_rwsem);
return ctrl;
out_module_put:
module_put(ops->module);
+ goto out_free_opts;
out_unlock:
up_read(&nvmf_transports_rwsem);
out_free_opts:
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 0cf0460a5c92..7491a0bbf711 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -124,6 +124,9 @@ struct nvmf_ctrl_options {
* 1. At minimum, 'required_opts' and 'allowed_opts' should
* be set to the same enum parsing options defined earlier.
* 2. create_ctrl() must be defined (even if it does nothing)
+ * 3. struct nvmf_transport_ops must be statically allocated in the
+ * modules .bss section so that a pure module_get on @module
+ * prevents the memory from beeing freed.
*/
struct nvmf_transport_ops {
struct list_head entry;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index de24fe77c80b..34df07d44f80 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -194,7 +194,6 @@ struct nvme_ctrl {
struct delayed_work ka_work;
struct nvme_command ka_cmd;
struct work_struct fw_act_work;
-#define EVENT_NS_CHANGED (1 << 0)
unsigned long events;
/* Power saving configuration */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e526437bacbf..fc33804662e7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -42,7 +42,7 @@ static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
static bool use_cmb_sqes = true;
-module_param(use_cmb_sqes, bool, 0644);
+module_param(use_cmb_sqes, bool, 0444);
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
static unsigned int max_host_mem_size_mb = 128;
@@ -920,11 +920,9 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
{
u16 head = nvmeq->cq_head;
- if (likely(nvmeq->cq_vector >= 0)) {
- if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
- nvmeq->dbbuf_cq_ei))
- writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
- }
+ if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
+ nvmeq->dbbuf_cq_ei))
+ writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
}
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
@@ -1477,11 +1475,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
*/
vector = dev->num_vecs == 1 ? 0 : qid;
result = adapter_alloc_cq(dev, qid, nvmeq, vector);
- if (result < 0)
- goto out;
+ if (result)
+ return result;
result = adapter_alloc_sq(dev, qid, nvmeq);
if (result < 0)
+ return result;
+ else if (result)
goto release_cq;
/*
@@ -1503,7 +1503,6 @@ release_sq:
adapter_delete_sq(dev, qid);
release_cq:
adapter_delete_cq(dev, qid);
-out:
return result;
}
@@ -2012,13 +2011,7 @@ static void nvme_del_cq_end(struct request *req, blk_status_t error)
if (!error) {
unsigned long flags;
- /*
- * We might be called with the AQ cq_lock held
- * and the I/O queue cq_lock should always
- * nest inside the AQ one.
- */
- spin_lock_irqsave_nested(&nvmeq->cq_lock, flags,
- SINGLE_DEPTH_NESTING);
+ spin_lock_irqsave(&nvmeq->cq_lock, flags);
nvme_process_cq(nvmeq, &start, &end, -1);
spin_unlock_irqrestore(&nvmeq->cq_lock, flags);
@@ -2231,14 +2224,6 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_stop_queues(&dev->ctrl);
if (!dead && dev->ctrl.queue_count > 0) {
- /*
- * If the controller is still alive tell it to stop using the
- * host memory buffer. In theory the shutdown / reset should
- * make sure that it doesn't access the host memoery anymore,
- * but I'd rather be safe than sorry..
- */
- if (dev->host_mem_descs)
- nvme_set_host_mem(dev, 0);
nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
@@ -2614,7 +2599,7 @@ static void nvme_remove(struct pci_dev *pdev)
if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
- nvme_dev_disable(dev, false);
+ nvme_dev_disable(dev, true);
}
flush_work(&dev->ctrl.reset_work);
@@ -2630,24 +2615,6 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_put_ctrl(&dev->ctrl);
}
-static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
-{
- int ret = 0;
-
- if (numvfs == 0) {
- if (pci_vfs_assigned(pdev)) {
- dev_warn(&pdev->dev,
- "Cannot disable SR-IOV VFs while assigned\n");
- return -EPERM;
- }
- pci_disable_sriov(pdev);
- return 0;
- }
-
- ret = pci_enable_sriov(pdev, numvfs);
- return ret ? ret : numvfs;
-}
-
#ifdef CONFIG_PM_SLEEP
static int nvme_suspend(struct device *dev)
{
@@ -2774,7 +2741,7 @@ static struct pci_driver nvme_driver = {
.driver = {
.pm = &nvme_dev_pm_ops,
},
- .sriov_configure = nvme_pci_sriov_configure,
+ .sriov_configure = pci_sriov_configure_simple,
.err_handler = &nvme_err_handler,
};
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 7b3f08410430..2aba03876d84 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1951,8 +1951,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
}
/* sanity check keyed sgls */
- if (!(ctrl->ctrl.sgls & (1 << 20))) {
- dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n");
+ if (!(ctrl->ctrl.sgls & (1 << 2))) {
+ dev_err(ctrl->ctrl.device,
+ "Mandatory keyed sgls are not supported!\n");
ret = -EINVAL;
goto out_remove_admin_queue;
}
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index ead8fbe6922e..962532842769 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -270,8 +270,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
struct nvme_id_ns *id;
u16 status = 0;
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
- if (!ns) {
+ if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
goto out;
}
@@ -279,9 +278,14 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
status = NVME_SC_INTERNAL;
- goto out_put_ns;
+ goto out;
}
+ /* return an all zeroed buffer if we can't find an active namespace */
+ ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
+ if (!ns)
+ goto done;
+
/*
* nuse = ncap = nsze isn't always true, but we have no way to find
* that out from the underlying device.
@@ -306,11 +310,10 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
id->lbaf[0].ds = ns->blksize_shift;
+ nvmet_put_namespace(ns);
+done:
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
-
kfree(id);
-out_put_ns:
- nvmet_put_namespace(ns);
out:
nvmet_req_complete(req, status);
}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index ad9ff27234b5..d3f3b3ec4d1a 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -137,8 +137,10 @@ static ssize_t nvmet_addr_traddr_store(struct config_item *item,
pr_err("Disable the address before modifying\n");
return -EACCES;
}
- return snprintf(port->disc_addr.traddr,
- sizeof(port->disc_addr.traddr), "%s", page);
+
+ if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
+ return -EINVAL;
+ return count;
}
CONFIGFS_ATTR(nvmet_, addr_traddr);
@@ -208,8 +210,10 @@ static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
pr_err("Disable the address before modifying\n");
return -EACCES;
}
- return snprintf(port->disc_addr.trsvcid,
- sizeof(port->disc_addr.trsvcid), "%s", page);
+
+ if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
+ return -EINVAL;
+ return count;
}
CONFIGFS_ATTR(nvmet_, addr_trsvcid);
@@ -288,7 +292,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
kfree(ns->device_path);
ret = -ENOMEM;
- ns->device_path = kstrdup(page, GFP_KERNEL);
+ ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL);
if (!ns->device_path)
goto out_unlock;
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index f9d5480a4ae5..27d9b4bba535 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -177,7 +177,6 @@ int of_node_to_nid(struct device_node *device)
return NUMA_NO_NODE;
}
-EXPORT_SYMBOL(of_node_to_nid);
int __init of_numa_init(void)
{
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index c00d81dfac0b..0b49a62b38a3 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -32,6 +32,11 @@ const struct of_device_id of_default_bus_match_table[] = {
{} /* Empty terminated list */
};
+static const struct of_device_id of_skipped_node_table[] = {
+ { .compatible = "operating-points-v2", },
+ {} /* Empty terminated list */
+};
+
static int of_dev_node_match(struct device *dev, void *data)
{
return dev->of_node == data;
@@ -356,6 +361,12 @@ static int of_platform_bus_create(struct device_node *bus,
return 0;
}
+ /* Skip nodes for which we don't want to create devices */
+ if (unlikely(of_match_node(of_skipped_node_table, bus))) {
+ pr_debug("%s() - skipping %pOF node\n", __func__, bus);
+ return 0;
+ }
+
if (of_node_check_flag(bus, OF_POPULATED_BUS)) {
pr_debug("%s() - skipping %pOF, already populated\n",
__func__, bus);
@@ -537,6 +548,9 @@ int of_platform_device_destroy(struct device *dev, void *data)
if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS))
device_for_each_child(dev, NULL, of_platform_device_destroy);
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+ of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
+
if (dev->bus == &platform_bus_type)
platform_device_unregister(to_platform_device(dev));
#ifdef CONFIG_ARM_AMBA
@@ -544,8 +558,6 @@ int of_platform_device_destroy(struct device *dev, void *data)
amba_device_unregister(to_amba_device(dev));
#endif
- of_node_clear_flag(dev->of_node, OF_POPULATED);
- of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
return 0;
}
EXPORT_SYMBOL_GPL(of_platform_device_destroy);
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 65d0b7adfcd4..7edfac6f1914 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -122,6 +122,11 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
goto err_fail;
}
+ if (offset < 0 || offset + sizeof(__be32) > prop->length) {
+ err = -EINVAL;
+ goto err_fail;
+ }
+
*(__be32 *)(prop->value + offset) = cpu_to_be32(phandle);
}
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 6bb37c18292a..ecee50d10d14 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -165,20 +165,20 @@ static void __init of_unittest_dynamic(void)
/* Add a new property - should pass*/
prop->name = "new-property";
prop->value = "new-property-data";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
/* Try to add an existing property - should fail */
prop++;
prop->name = "new-property";
prop->value = "new-property-data-should-fail";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) != 0,
"Adding an existing property should have failed\n");
/* Try to modify an existing property - should pass */
prop->value = "modify-property-data-should-pass";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating an existing property should have passed\n");
@@ -186,7 +186,7 @@ static void __init of_unittest_dynamic(void)
prop++;
prop->name = "modify-property";
prop->value = "modify-missing-property-data-should-pass";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating a missing property should have passed\n");
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 29a487f31dae..b2f07635e94d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -67,6 +67,18 @@ config PCI_STUB
When in doubt, say N.
+config PCI_PF_STUB
+ tristate "PCI PF Stub driver"
+ depends on PCI
+ depends on PCI_IOV
+ help
+ Say Y or M here if you want to enable support for devices that
+ require SR-IOV support, while at the same time the PF itself is
+ not providing any actual services on the host itself such as
+ storage or networking.
+
+ When in doubt, say N.
+
config XEN_PCIDEV_FRONTEND
tristate "Xen PCI Frontend"
depends on PCI && X86 && XEN
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 952addc7bacf..84c9eef6b1c3 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PCI_LABEL) += pci-label.o
obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
+obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o
obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 89305b569d3d..4923a2a8e14b 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -20,6 +20,9 @@ void pci_ats_init(struct pci_dev *dev)
{
int pos;
+ if (pci_ats_disabled())
+ return;
+
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
if (!pos)
return;
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index 2f3f5c50aa48..16f52c626b4b 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -1,13 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
menu "DesignWare PCI Core Support"
+ depends on PCI
config PCIE_DW
bool
config PCIE_DW_HOST
bool
- depends on PCI
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
@@ -22,7 +22,7 @@ config PCI_DRA7XX
config PCI_DRA7XX_HOST
bool "TI DRA7xx PCIe controller Host Mode"
depends on SOC_DRA7XX || COMPILE_TEST
- depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI_IRQ_DOMAIN
depends on OF && HAS_IOMEM && TI_PIPE3
select PCIE_DW_HOST
select PCI_DRA7XX
@@ -51,50 +51,62 @@ config PCI_DRA7XX_EP
This uses the DesignWare core.
config PCIE_DW_PLAT
- bool "Platform bus based DesignWare PCIe Controller"
- depends on PCI
- depends on PCI_MSI_IRQ_DOMAIN
- select PCIE_DW_HOST
- ---help---
- This selects the DesignWare PCIe controller support. Select this if
- you have a PCIe controller on Platform bus.
+ bool
- If you have a controller with this interface, say Y or M here.
+config PCIE_DW_PLAT_HOST
+ bool "Platform bus based DesignWare PCIe Controller - Host mode"
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ select PCIE_DW_PLAT
+ default y
+ help
+ Enables support for the PCIe controller in the Designware IP to
+ work in host mode. There are two instances of PCIe controller in
+ Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
- If unsure, say N.
+config PCIE_DW_PLAT_EP
+ bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_DW_PLAT
+ help
+ Enables support for the PCIe controller in the Designware IP to
+ work in endpoint mode. There are two instances of PCIe controller
+ in Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
config PCI_EXYNOS
bool "Samsung Exynos PCIe controller"
- depends on PCI
- depends on SOC_EXYNOS5440
+ depends on SOC_EXYNOS5440 || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
config PCI_IMX6
bool "Freescale i.MX6 PCIe controller"
- depends on PCI
- depends on SOC_IMX6Q
+ depends on SOC_IMX6Q || (ARM && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
- depends on PCI
- depends on ARCH_SPEAR13XX
+ depends on ARCH_SPEAR13XX || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here if you want PCIe support on SPEAr13XX SoCs.
config PCI_KEYSTONE
bool "TI Keystone PCIe controller"
- depends on PCI
- depends on ARCH_KEYSTONE
+ depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here if you want to enable PCI controller support on Keystone
@@ -104,8 +116,7 @@ config PCI_KEYSTONE
config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller"
- depends on PCI
- depends on OF && (ARM || ARCH_LAYERSCAPE)
+ depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select MFD_SYSCON
select PCIE_DW_HOST
@@ -113,11 +124,9 @@ config PCI_LAYERSCAPE
Say Y here if you want PCIe controller support on Layerscape SoCs.
config PCI_HISI
- depends on OF && ARM64
+ depends on OF && (ARM64 || COMPILE_TEST)
bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
- depends on PCI
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
select PCI_HOST_COMMON
help
@@ -126,10 +135,8 @@ config PCI_HISI
config PCIE_QCOM
bool "Qualcomm PCIe controller"
- depends on PCI
- depends on ARCH_QCOM && OF
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
@@ -138,10 +145,8 @@ config PCIE_QCOM
config PCIE_ARMADA_8K
bool "Marvell Armada-8K PCIe controller"
- depends on PCI
- depends on ARCH_MVEBU
+ depends on ARCH_MVEBU || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here if you want to enable PCIe controller support on
@@ -154,9 +159,8 @@ config PCIE_ARTPEC6
config PCIE_ARTPEC6_HOST
bool "Axis ARTPEC-6 PCIe controller Host Mode"
- depends on MACH_ARTPEC6
- depends on PCI && PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
+ depends on MACH_ARTPEC6 || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCIE_ARTPEC6
help
@@ -165,7 +169,7 @@ config PCIE_ARTPEC6_HOST
config PCIE_ARTPEC6_EP
bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
- depends on MACH_ARTPEC6
+ depends on MACH_ARTPEC6 || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCIE_ARTPEC6
@@ -174,11 +178,9 @@ config PCIE_ARTPEC6_EP
endpoint mode. This uses the DesignWare core.
config PCIE_KIRIN
- depends on OF && ARM64
+ depends on OF && (ARM64 || COMPILE_TEST)
bool "HiSilicon Kirin series SoCs PCIe controllers"
depends on PCI_MSI_IRQ_DOMAIN
- depends on PCI
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here if you want PCIe controller support
@@ -186,10 +188,8 @@ config PCIE_KIRIN
config PCIE_HISI_STB
bool "HiSilicon STB SoCs PCIe controllers"
- depends on ARCH_HISI
- depends on PCI
+ depends on ARCH_HISI || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here if you want PCIe controller support on HiSilicon STB SoCs
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index ed8558d638e5..f688204e50c5 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -27,6 +27,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "../pci.h"
#include "pcie-designware.h"
/* PCIe controller wrapper DRA7XX configuration registers */
@@ -406,14 +407,14 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
ep->ops = &pcie_ep_ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
- pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base)
- return -ENOMEM;
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
- pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base2)
- return -ENOMEM;
+ pci->dbi_base2 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
@@ -459,9 +460,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
- pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base)
- return -ENOMEM;
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
pp->ops = &dra7xx_pcie_host_ops;
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index 4818ef875f8a..80f604602783 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -338,7 +338,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
break;
- case IMX6QP: /* FALLTHROUGH */
+ case IMX6QP: /* FALLTHROUGH */
case IMX6Q:
/* power up core phy and enable ref clock */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index d55ae0716adf..3722a5f31e5e 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -89,7 +89,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
dw_pcie_setup_rc(pp);
if (dw_pcie_link_up(pci)) {
- dev_err(dev, "Link already up\n");
+ dev_info(dev, "Link already up\n");
return 0;
}
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index b587352f8b9f..072fd7ecc29f 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -28,6 +28,7 @@
struct armada8k_pcie {
struct dw_pcie *pci;
struct clk *clk;
+ struct clk *clk_reg;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -229,26 +230,38 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
+ pcie->clk_reg = devm_clk_get(dev, "reg");
+ if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) {
+ ret = -EPROBE_DEFER;
+ goto fail;
+ }
+ if (!IS_ERR(pcie->clk_reg)) {
+ ret = clk_prepare_enable(pcie->clk_reg);
+ if (ret)
+ goto fail_clkreg;
+ }
+
/* Get the dw-pcie unit configuration/control registers base. */
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
if (IS_ERR(pci->dbi_base)) {
dev_err(dev, "couldn't remap regs base %p\n", base);
ret = PTR_ERR(pci->dbi_base);
- goto fail;
+ goto fail_clkreg;
}
platform_set_drvdata(pdev, pcie);
ret = armada8k_add_pcie_port(pcie, pdev);
if (ret)
- goto fail;
+ goto fail_clkreg;
return 0;
+fail_clkreg:
+ clk_disable_unprepare(pcie->clk_reg);
fail:
- if (!IS_ERR(pcie->clk))
- clk_disable_unprepare(pcie->clk);
+ clk_disable_unprepare(pcie->clk);
return ret;
}
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index e66cede2b5b7..321b56cfd5d0 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -463,9 +463,9 @@ static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
ep->ops = &pcie_ep_ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base2)
- return -ENOMEM;
+ pci->dbi_base2 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c
index f07678bf7cfc..1eec4415a77f 100644
--- a/drivers/pci/dwc/pcie-designware-ep.c
+++ b/drivers/pci/dwc/pcie-designware-ep.c
@@ -75,7 +75,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
if (free_win >= ep->num_ib_windows) {
- dev_err(pci->dev, "no free inbound window\n");
+ dev_err(pci->dev, "No free inbound window\n");
return -EINVAL;
}
@@ -100,7 +100,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
if (free_win >= ep->num_ob_windows) {
- dev_err(pci->dev, "no free outbound window\n");
+ dev_err(pci->dev, "No free outbound window\n");
return -EINVAL;
}
@@ -204,7 +204,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
if (ret) {
- dev_err(pci->dev, "failed to enable address\n");
+ dev_err(pci->dev, "Failed to enable address\n");
return ret;
}
@@ -348,21 +348,21 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
if (ret < 0) {
- dev_err(dev, "unable to read *num-ib-windows* property\n");
+ dev_err(dev, "Unable to read *num-ib-windows* property\n");
return ret;
}
if (ep->num_ib_windows > MAX_IATU_IN) {
- dev_err(dev, "invalid *num-ib-windows*\n");
+ dev_err(dev, "Invalid *num-ib-windows*\n");
return -EINVAL;
}
ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
if (ret < 0) {
- dev_err(dev, "unable to read *num-ob-windows* property\n");
+ dev_err(dev, "Unable to read *num-ob-windows* property\n");
return ret;
}
if (ep->num_ob_windows > MAX_IATU_OUT) {
- dev_err(dev, "invalid *num-ob-windows*\n");
+ dev_err(dev, "Invalid *num-ob-windows*\n");
return -EINVAL;
}
@@ -389,7 +389,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
- dev_err(dev, "failed to create epc device\n");
+ dev_err(dev, "Failed to create epc device\n");
return PTR_ERR(epc);
}
@@ -411,6 +411,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return -ENOMEM;
}
+ epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER;
+ EPC_FEATURE_SET_BAR(epc->features, BAR_0);
+
ep->epc = epc;
epc_set_drvdata(epc, ep);
dw_pcie_setup(pci);
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 6c409079d514..cba1432e395d 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -15,6 +15,7 @@
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
+#include "../pci.h"
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
@@ -83,18 +84,23 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
for (i = 0; i < num_ctrls; i++) {
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
- &val);
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, &val);
if (!val)
continue;
ret = IRQ_HANDLED;
pos = 0;
- while ((pos = find_next_bit((unsigned long *) &val, 32,
- pos)) != 32) {
- irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
+ while ((pos = find_next_bit((unsigned long *) &val,
+ MAX_MSI_IRQS_PER_CTRL,
+ pos)) != MAX_MSI_IRQS_PER_CTRL) {
+ irq = irq_find_mapping(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+ pos);
generic_handle_irq(irq);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE),
4, 1 << pos);
pos++;
}
@@ -157,9 +163,9 @@ static void dw_pci_bottom_mask(struct irq_data *data)
if (pp->ops->msi_clear_irq) {
pp->ops->msi_clear_irq(pp, data->hwirq);
} else {
- ctrl = data->hwirq / 32;
- res = ctrl * 12;
- bit = data->hwirq % 32;
+ ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] &= ~(1 << bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
@@ -180,9 +186,9 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
if (pp->ops->msi_set_irq) {
pp->ops->msi_set_irq(pp, data->hwirq);
} else {
- ctrl = data->hwirq / 32;
- res = ctrl * 12;
- bit = data->hwirq % 32;
+ ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] |= 1 << bit;
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
@@ -248,8 +254,10 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
+
bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
order_base_2(nr_irqs));
+
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -266,7 +274,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
&dw_pcie_msi_domain_ops, pp);
if (!pp->irq_domain) {
- dev_err(pci->dev, "failed to create IRQ domain\n");
+ dev_err(pci->dev, "Failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -274,7 +282,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
&dw_pcie_msi_domain_info,
pp->irq_domain);
if (!pp->msi_domain) {
- dev_err(pci->dev, "failed to create MSI domain\n");
+ dev_err(pci->dev, "Failed to create MSI domain\n");
irq_domain_remove(pp->irq_domain);
return -ENOMEM;
}
@@ -301,13 +309,13 @@ void dw_pcie_msi_init(struct pcie_port *pp)
page = alloc_page(GFP_KERNEL);
pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, pp->msi_data)) {
- dev_err(dev, "failed to map MSI data\n");
+ dev_err(dev, "Failed to map MSI data\n");
__free_page(page);
return;
}
msi_target = (u64)pp->msi_data;
- /* program the msi_data */
+ /* Program the msi_data */
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
lower_32_bits(msi_target));
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
@@ -330,19 +338,19 @@ int dw_pcie_host_init(struct pcie_port *pp)
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
- pp->cfg0_size = resource_size(cfg_res) / 2;
- pp->cfg1_size = resource_size(cfg_res) / 2;
+ pp->cfg0_size = resource_size(cfg_res) >> 1;
+ pp->cfg1_size = resource_size(cfg_res) >> 1;
pp->cfg0_base = cfg_res->start;
pp->cfg1_base = cfg_res->start + pp->cfg0_size;
} else if (!pp->va_cfg0_base) {
- dev_err(dev, "missing *config* reg space\n");
+ dev_err(dev, "Missing *config* reg space\n");
}
bridge = pci_alloc_host_bridge(0);
if (!bridge)
return -ENOMEM;
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
&bridge->windows, &pp->io_base);
if (ret)
return ret;
@@ -357,7 +365,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
case IORESOURCE_IO:
ret = pci_remap_iospace(win->res, pp->io_base);
if (ret) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
+ dev_warn(dev, "Error %d: failed to map resource %pR\n",
ret, win->res);
resource_list_destroy_entry(win);
} else {
@@ -375,8 +383,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
break;
case 0:
pp->cfg = win->res;
- pp->cfg0_size = resource_size(pp->cfg) / 2;
- pp->cfg1_size = resource_size(pp->cfg) / 2;
+ pp->cfg0_size = resource_size(pp->cfg) >> 1;
+ pp->cfg1_size = resource_size(pp->cfg) >> 1;
pp->cfg0_base = pp->cfg->start;
pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
break;
@@ -391,7 +399,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg->start,
resource_size(pp->cfg));
if (!pci->dbi_base) {
- dev_err(dev, "error with ioremap\n");
+ dev_err(dev, "Error with ioremap\n");
ret = -ENOMEM;
goto error;
}
@@ -403,7 +411,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
pp->cfg0_base, pp->cfg0_size);
if (!pp->va_cfg0_base) {
- dev_err(dev, "error with ioremap in function\n");
+ dev_err(dev, "Error with ioremap in function\n");
ret = -ENOMEM;
goto error;
}
@@ -414,7 +422,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg1_base,
pp->cfg1_size);
if (!pp->va_cfg1_base) {
- dev_err(dev, "error with ioremap\n");
+ dev_err(dev, "Error with ioremap\n");
ret = -ENOMEM;
goto error;
}
@@ -586,7 +594,7 @@ static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
return 0;
}
- /* access only one slot on each root port */
+ /* Access only one slot on each root port */
if (bus->number == pp->root_bus_nr && dev > 0)
return 0;
@@ -650,13 +658,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++)
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * 12), 4,
- &pp->irq_status[ctrl]);
- /* setup RC BARs */
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, &pp->irq_status[ctrl]);
+
+ /* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
- /* setup interrupt pins */
+ /* Setup interrupt pins */
dw_pcie_dbi_ro_wr_en(pci);
val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
@@ -664,13 +674,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
dw_pcie_dbi_ro_wr_dis(pci);
- /* setup bus numbers */
+ /* Setup bus numbers */
val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
val &= 0xff000000;
val |= 0x00ff0100;
dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
- /* setup command register */
+ /* Setup command register */
val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
@@ -683,7 +693,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf) {
- /* get iATU unroll support */
+ /* Get iATU unroll support */
pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
dev_dbg(pci->dev, "iATU unroll: %s\n",
pci->iatu_unroll_enabled ? "enabled" : "disabled");
@@ -701,7 +711,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* Enable write permission for the DBI read-only register */
dw_pcie_dbi_ro_wr_en(pci);
- /* program correct class for RC */
+ /* Program correct class for RC */
dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
/* Better disable write permission right after the update */
dw_pcie_dbi_ro_wr_dis(pci);
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index 5416aa8a07a5..5937fed4c938 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -12,19 +12,29 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/signal.h>
#include <linux/types.h>
+#include <linux/regmap.h>
#include "pcie-designware.h"
struct dw_plat_pcie {
- struct dw_pcie *pci;
+ struct dw_pcie *pci;
+ struct regmap *regmap;
+ enum dw_pcie_device_mode mode;
};
+struct dw_plat_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static const struct of_device_id dw_plat_pcie_of_match[];
+
static int dw_plat_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -38,13 +48,63 @@ static int dw_plat_pcie_host_init(struct pcie_port *pp)
return 0;
}
+static void dw_plat_set_num_vectors(struct pcie_port *pp)
+{
+ pp->num_vectors = MAX_MSI_IRQS;
+}
+
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
.host_init = dw_plat_pcie_host_init,
+ .set_num_vectors = dw_plat_set_num_vectors,
+};
+
+static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
+{
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = dw_plat_pcie_establish_link,
};
-static int dw_plat_add_pcie_port(struct pcie_port *pp,
+static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u8 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+ return -EINVAL;
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = dw_plat_pcie_ep_init,
+ .raise_irq = dw_plat_pcie_ep_raise_irq,
+};
+
+static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
struct platform_device *pdev)
{
+ struct dw_pcie *pci = dw_plat_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -63,15 +123,44 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(dev, "failed to initialize host\n");
+ dev_err(dev, "Failed to initialize host\n");
return ret;
}
return 0;
}
-static const struct dw_pcie_ops dw_pcie_ops = {
-};
+static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = dw_plat_pcie->pci;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+ pci->dbi_base2 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize endpoint\n");
+ return ret;
+ }
+ return 0;
+}
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
@@ -80,6 +169,16 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct resource *res; /* Resource from DT */
int ret;
+ const struct of_device_id *match;
+ const struct dw_plat_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
+
+ match = of_match_device(dw_plat_pcie_of_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct dw_plat_pcie_of_data *)match->data;
+ mode = (enum dw_pcie_device_mode)data->mode;
dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
if (!dw_plat_pcie)
@@ -93,23 +192,59 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ops = &dw_pcie_ops;
dw_plat_pcie->pci = pci;
+ dw_plat_pcie->mode = mode;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pci->dbi_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
platform_set_drvdata(pdev, dw_plat_pcie);
- ret = dw_plat_add_pcie_port(&pci->pp, pdev);
- if (ret < 0)
- return ret;
+ switch (dw_plat_pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
+ return -ENODEV;
+
+ ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
+ if (ret < 0)
+ return ret;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
+ return -ENODEV;
+
+ ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+ }
return 0;
}
+static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id dw_plat_pcie_of_match[] = {
- { .compatible = "snps,dw-pcie", },
+ {
+ .compatible = "snps,dw-pcie",
+ .data = &dw_plat_pcie_rc_of_data,
+ },
+ {
+ .compatible = "snps,dw-pcie-ep",
+ .data = &dw_plat_pcie_ep_of_data,
+ },
{},
};
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
index 1b7282e5b494..778c4f76a884 100644
--- a/drivers/pci/dwc/pcie-designware.c
+++ b/drivers/pci/dwc/pcie-designware.c
@@ -69,7 +69,7 @@ u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
ret = dw_pcie_read(base + reg, size, &val);
if (ret)
- dev_err(pci->dev, "read DBI address failed\n");
+ dev_err(pci->dev, "Read DBI address failed\n");
return val;
}
@@ -86,7 +86,7 @@ void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
ret = dw_pcie_write(base + reg, size, val);
if (ret)
- dev_err(pci->dev, "write DBI address failed\n");
+ dev_err(pci->dev, "Write DBI address failed\n");
}
static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
@@ -137,7 +137,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "outbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
@@ -180,7 +180,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "outbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
@@ -238,7 +238,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "inbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
return -EBUSY;
}
@@ -284,7 +284,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "inbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
return -EBUSY;
}
@@ -313,16 +313,16 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
{
int retries;
- /* check if the link is up or not */
+ /* Check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "link up\n");
+ dev_info(pci->dev, "Link up\n");
return 0;
}
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_err(pci->dev, "phy link never came up\n");
+ dev_err(pci->dev, "Phy link never came up\n");
return -ETIMEDOUT;
}
@@ -351,7 +351,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
if (ret)
lanes = 0;
- /* set the number of lanes */
+ /* Set the number of lanes */
val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
switch (lanes) {
@@ -373,7 +373,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
}
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
- /* set link width speed control register */
+ /* Set link width speed control register */
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
switch (lanes) {
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index fe811dbc12cf..bee4e2535a61 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -110,6 +110,7 @@
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
+#define MSI_REG_CTRL_BLOCK_SIZE 12
#define MSI_DEF_NUM_VECTORS 32
/* Maximum number of inbound/outbound iATUs */
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 5897af7d3355..a1d0198081a6 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -10,7 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -19,6 +19,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
@@ -869,7 +870,7 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= !BIT(0);
+ val &= ~BIT(0);
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
/* change DBI base address */
@@ -1088,6 +1089,7 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
struct qcom_pcie *pcie = to_qcom_pcie(pci);
int ret;
+ pm_runtime_get_sync(pci->dev);
qcom_ep_reset_assert(pcie);
ret = pcie->ops->init(pcie);
@@ -1124,6 +1126,7 @@ err_disable_phy:
phy_power_off(pcie->phy);
err_deinit:
pcie->ops->deinit(pcie);
+ pm_runtime_put(pci->dev);
return ret;
}
@@ -1212,6 +1215,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
+ pm_runtime_enable(dev);
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
@@ -1257,14 +1261,17 @@ static int qcom_pcie_probe(struct platform_device *pdev)
}
ret = phy_init(pcie->phy);
- if (ret)
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
return ret;
+ }
platform_set_drvdata(pdev, pcie);
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
+ pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 7cef85124325..63ed706445b9 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -87,7 +87,7 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
if (!src_addr) {
- dev_err(dev, "failed to allocate source address\n");
+ dev_err(dev, "Failed to allocate source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
ret = -ENOMEM;
goto err;
@@ -96,14 +96,14 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map source address\n");
+ dev_err(dev, "Failed to map source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
goto err_src_addr;
}
dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
if (!dst_addr) {
- dev_err(dev, "failed to allocate destination address\n");
+ dev_err(dev, "Failed to allocate destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
ret = -ENOMEM;
goto err_src_map_addr;
@@ -112,7 +112,7 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map destination address\n");
+ dev_err(dev, "Failed to map destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
goto err_dst_addr;
}
@@ -149,7 +149,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
if (!src_addr) {
- dev_err(dev, "failed to allocate address\n");
+ dev_err(dev, "Failed to allocate address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
ret = -ENOMEM;
goto err;
@@ -158,7 +158,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map address\n");
+ dev_err(dev, "Failed to map address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
goto err_addr;
}
@@ -201,7 +201,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
if (!dst_addr) {
- dev_err(dev, "failed to allocate address\n");
+ dev_err(dev, "Failed to allocate address\n");
reg->status = STATUS_DST_ADDR_INVALID;
ret = -ENOMEM;
goto err;
@@ -210,7 +210,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map address\n");
+ dev_err(dev, "Failed to map address\n");
reg->status = STATUS_DST_ADDR_INVALID;
goto err_addr;
}
@@ -230,7 +230,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
* wait 1ms inorder for the write to complete. Without this delay L3
* error in observed in the host system.
*/
- mdelay(1);
+ usleep_range(1000, 2000);
kfree(buf);
@@ -379,7 +379,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar);
- dev_err(dev, "failed to set BAR%d\n", bar);
+ dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
}
@@ -406,7 +406,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
test_reg_bar);
if (!base) {
- dev_err(dev, "failed to allocated register space\n");
+ dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
}
epf_test->reg[test_reg_bar] = base;
@@ -416,7 +416,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
continue;
base = pci_epf_alloc_space(epf, bar_size[bar], bar);
if (!base)
- dev_err(dev, "failed to allocate space for BAR%d\n",
+ dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
epf_test->reg[bar] = base;
}
@@ -435,9 +435,16 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (WARN_ON_ONCE(!epc))
return -EINVAL;
+ if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER)
+ epf_test->linkup_notifier = false;
+ else
+ epf_test->linkup_notifier = true;
+
+ epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
+
ret = pci_epc_write_header(epc, epf->func_no, header);
if (ret) {
- dev_err(dev, "configuration header write failed\n");
+ dev_err(dev, "Configuration header write failed\n");
return ret;
}
@@ -519,7 +526,7 @@ static int __init pci_epf_test_init(void)
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
ret = pci_epf_register_driver(&test_driver);
if (ret) {
- pr_err("failed to register pci epf test driver --> %d\n", ret);
+ pr_err("Failed to register pci epf test driver --> %d\n", ret);
return ret;
}
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 465b5f058b6d..523a8cab3bfb 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -15,6 +15,8 @@
#include <linux/pci-epf.h>
#include <linux/pci-ep-cfs.h>
+static DEFINE_MUTEX(pci_epf_mutex);
+
static struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
@@ -143,7 +145,13 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
*/
void pci_epf_unregister_driver(struct pci_epf_driver *driver)
{
- pci_ep_cfs_remove_epf_group(driver->group);
+ struct config_group *group;
+
+ mutex_lock(&pci_epf_mutex);
+ list_for_each_entry(group, &driver->epf_group, group_entry)
+ pci_ep_cfs_remove_epf_group(group);
+ list_del(&driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
@@ -159,6 +167,8 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner)
{
int ret;
+ struct config_group *group;
+ const struct pci_epf_device_id *id;
if (!driver->ops)
return -EINVAL;
@@ -173,7 +183,16 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
if (ret)
return ret;
- driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
+ INIT_LIST_HEAD(&driver->epf_group);
+
+ id = driver->id_table;
+ while (id->name[0]) {
+ group = pci_ep_cfs_add_epf_group(id->name);
+ mutex_lock(&pci_epf_mutex);
+ list_add_tail(&group->group_entry, &driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
+ id++;
+ }
return 0;
}
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 0d0177ce436c..a96e23bda664 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -5,13 +5,14 @@ menu "PCI host controller drivers"
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
- depends on ARCH_MVEBU || ARCH_DOVE
+ depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
+ depends on MVEBU_MBUS
depends on ARM
depends on OF
config PCI_AARDVARK
bool "Aardvark PCIe controller"
- depends on ARCH_MVEBU && ARM64
+ depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
help
@@ -21,7 +22,7 @@ config PCI_AARDVARK
config PCIE_XILINX_NWL
bool "NWL PCIe Core"
- depends on ARCH_ZYNQMP
+ depends on ARCH_ZYNQMP || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say 'Y' here if you want kernel support for Xilinx
@@ -32,12 +33,11 @@ config PCIE_XILINX_NWL
config PCI_FTPCI100
bool "Faraday Technology FTPCI100 PCI controller"
depends on OF
- depends on ARM
default ARCH_GEMINI
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want support for the PCIe host controller found
@@ -45,8 +45,8 @@ config PCI_TEGRA
config PCI_RCAR_GEN2
bool "Renesas R-Car Gen2 Internal PCI controller"
- depends on ARM
depends on ARCH_RENESAS || COMPILE_TEST
+ depends on ARM
help
Say Y here if you want internal PCI support on R-Car Gen2 SoC.
There are 3 internal PCI controllers available with a single
@@ -54,7 +54,7 @@ config PCI_RCAR_GEN2
config PCIE_RCAR
bool "Renesas R-Car PCIe controller"
- depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want PCIe controller support on R-Car SoCs.
@@ -65,25 +65,25 @@ config PCI_HOST_COMMON
config PCI_HOST_GENERIC
bool "Generic PCI host controller"
- depends on (ARM || ARM64) && OF
+ depends on OF
select PCI_HOST_COMMON
select IRQ_DOMAIN
+ select PCI_DOMAINS
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
- depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC)
+ depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
config PCI_XGENE
bool "X-Gene PCIe controller"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
- select PCIEPORTBUS
help
Say Y here if you want internal PCI support on APM X-Gene SoC.
There are 5 internal PCIe ports available. Each port is GEN3 capable
@@ -101,7 +101,7 @@ config PCI_XGENE_MSI
config PCI_V3_SEMI
bool "V3 Semiconductor PCI controller"
depends on OF
- depends on ARM
+ depends on ARM || COMPILE_TEST
default ARCH_INTEGRATOR_AP
config PCI_VERSATILE
@@ -147,8 +147,7 @@ config PCIE_IPROC_MSI
config PCIE_ALTERA
bool "Altera PCIe controller"
- depends on ARM || NIOS2
- depends on OF_PCI
+ depends on ARM || NIOS2 || COMPILE_TEST
select PCI_DOMAINS
help
Say Y here if you want to enable PCIe controller support on Altera
@@ -164,7 +163,7 @@ config PCIE_ALTERA_MSI
config PCI_HOST_THUNDER_PEM
bool "Cavium Thunder PCIe controller to off-chip devices"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
@@ -172,29 +171,45 @@ config PCI_HOST_THUNDER_PEM
config PCI_HOST_THUNDER_ECAM
bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
config PCIE_ROCKCHIP
- tristate "Rockchip PCIe controller"
+ bool
+ depends on PCI
+
+config PCIE_ROCKCHIP_HOST
+ tristate "Rockchip PCIe host controller"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
select MFD_SYSCON
+ select PCIE_ROCKCHIP
help
Say Y here if you want internal PCI support on Rockchip SoC.
There is 1 internal PCIe port available to support GEN2 with
4 slots.
+config PCIE_ROCKCHIP_EP
+ bool "Rockchip PCIe endpoint controller"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_ENDPOINT
+ select MFD_SYSCON
+ select PCIE_ROCKCHIP
+ help
+ Say Y here if you want to support Rockchip PCIe controller in
+ endpoint mode on Rockchip SoC. There is 1 internal PCIe port
+ available to support GEN2 with 4 slots.
+
config PCIE_MEDIATEK
bool "MediaTek PCIe controller"
- depends on (ARM || ARM64) && (ARCH_MEDIATEK || COMPILE_TEST)
+ depends on ARCH_MEDIATEK || COMPILE_TEST
depends on OF
- depends on PCI
- select PCIEPORTBUS
+ depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 3b1059190867..11d21b026d37 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
+obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 9abf549631b4..d3172d5d3d35 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -19,6 +19,8 @@
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include "../pci.h"
+
/* PCIe core registers */
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
@@ -822,14 +824,13 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
{
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
- struct device_node *np = dev->of_node;
struct resource_entry *win, *tmp;
resource_size_t iobase;
INIT_LIST_HEAD(&pcie->resources);
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
- &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, &iobase);
if (err)
return err;
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index 5008fd87956a..a1ebe9ed441f 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -28,6 +28,8 @@
#include <linux/irq.h>
#include <linux/clk.h>
+#include "../pci.h"
+
/*
* Special configuration registers directly in the first few words
* in I/O space.
@@ -476,8 +478,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- ret = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
- &res, &io_base);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &res, &io_base);
if (ret)
return ret;
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 5d028f53fdcd..d8f10451f273 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -101,5 +101,18 @@ int pci_host_common_probe(struct platform_device *pdev,
return ret;
}
+ platform_set_drvdata(pdev, bridge->bus);
+ return 0;
+}
+
+int pci_host_common_remove(struct platform_device *pdev)
+{
+ struct pci_bus *bus = platform_get_drvdata(pdev);
+
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bus);
+ pci_remove_root_bus(bus);
+ pci_unlock_rescan_remove();
+
return 0;
}
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 45319ee3b484..dea3ec7592a2 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -95,5 +95,6 @@ static struct platform_driver gen_pci_driver = {
.suppress_bind_attrs = true,
},
.probe = gen_pci_probe,
+ .remove = pci_host_common_remove,
};
builtin_platform_driver(gen_pci_driver);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 50cdefe3f6d3..6cc5036ac83c 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -433,7 +433,7 @@ enum hv_pcibus_state {
struct hv_pcibus_device {
struct pci_sysdata sysdata;
enum hv_pcibus_state state;
- atomic_t remove_lock;
+ refcount_t remove_lock;
struct hv_device *hdev;
resource_size_t low_mmio_space;
resource_size_t high_mmio_space;
@@ -488,17 +488,6 @@ enum hv_pcichild_state {
hv_pcichild_maximum
};
-enum hv_pcidev_ref_reason {
- hv_pcidev_ref_invalid = 0,
- hv_pcidev_ref_initial,
- hv_pcidev_ref_by_slot,
- hv_pcidev_ref_packet,
- hv_pcidev_ref_pnp,
- hv_pcidev_ref_childlist,
- hv_pcidev_irqdata,
- hv_pcidev_ref_max
-};
-
struct hv_pci_dev {
/* List protected by pci_rescan_remove_lock */
struct list_head list_entry;
@@ -548,14 +537,41 @@ static void hv_pci_generic_compl(void *context, struct pci_response *resp,
static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
u32 wslot);
-static void get_pcichild(struct hv_pci_dev *hv_pcidev,
- enum hv_pcidev_ref_reason reason);
-static void put_pcichild(struct hv_pci_dev *hv_pcidev,
- enum hv_pcidev_ref_reason reason);
+
+static void get_pcichild(struct hv_pci_dev *hpdev)
+{
+ refcount_inc(&hpdev->refs);
+}
+
+static void put_pcichild(struct hv_pci_dev *hpdev)
+{
+ if (refcount_dec_and_test(&hpdev->refs))
+ kfree(hpdev);
+}
static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
+/*
+ * There is no good way to get notified from vmbus_onoffer_rescind(),
+ * so let's use polling here, since this is not a hot path.
+ */
+static int wait_for_response(struct hv_device *hdev,
+ struct completion *comp)
+{
+ while (true) {
+ if (hdev->channel->rescind) {
+ dev_warn_once(&hdev->device, "The device is gone.\n");
+ return -ENODEV;
+ }
+
+ if (wait_for_completion_timeout(comp, HZ / 10))
+ break;
+ }
+
+ return 0;
+}
+
/**
* devfn_to_wslot() - Convert from Linux PCI slot to Windows
* @devfn: The Linux representation of PCI slot
@@ -762,7 +778,7 @@ static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
_hv_pcifront_read_config(hpdev, where, size, val);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return PCIBIOS_SUCCESSFUL;
}
@@ -790,7 +806,7 @@ static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
_hv_pcifront_write_config(hpdev, where, size, val);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return PCIBIOS_SUCCESSFUL;
}
@@ -856,7 +872,7 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
}
hv_int_desc_free(hpdev, int_desc);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
}
static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
@@ -1186,13 +1202,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->address_lo = comp.int_desc.address & 0xffffffff;
msg->data = comp.int_desc.data;
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return;
free_int_desc:
kfree(int_desc);
drop_reference:
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return_null_message:
msg->address_hi = 0;
msg->address_lo = 0;
@@ -1283,7 +1299,6 @@ static u64 get_bar_size(u64 bar_val)
*/
static void survey_child_resources(struct hv_pcibus_device *hbus)
{
- struct list_head *iter;
struct hv_pci_dev *hpdev;
resource_size_t bar_size = 0;
unsigned long flags;
@@ -1309,8 +1324,7 @@ static void survey_child_resources(struct hv_pcibus_device *hbus)
* for a child device are a power of 2 in size and aligned in memory,
* so it's sufficient to just add them up without tracking alignment.
*/
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev, list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
for (i = 0; i < 6; i++) {
if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
dev_err(&hbus->hdev->device,
@@ -1363,7 +1377,6 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
resource_size_t low_base = 0;
resource_size_t bar_size;
struct hv_pci_dev *hpdev;
- struct list_head *iter;
unsigned long flags;
u64 bar_val;
u32 command;
@@ -1385,9 +1398,7 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
/* Pick addresses for the BARs. */
do {
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
for (i = 0; i < 6; i++) {
bar_val = hpdev->probed_bar[i];
if (bar_val == 0)
@@ -1508,19 +1519,6 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
complete(&completion->host_event);
}
-static void get_pcichild(struct hv_pci_dev *hpdev,
- enum hv_pcidev_ref_reason reason)
-{
- refcount_inc(&hpdev->refs);
-}
-
-static void put_pcichild(struct hv_pci_dev *hpdev,
- enum hv_pcidev_ref_reason reason)
-{
- if (refcount_dec_and_test(&hpdev->refs))
- kfree(hpdev);
-}
-
/**
* new_pcichild_device() - Create a new child device
* @hbus: The internal struct tracking this root PCI bus.
@@ -1568,24 +1566,14 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
if (ret)
goto error;
- wait_for_completion(&comp_pkt.host_event);
+ if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
+ goto error;
hpdev->desc = *desc;
refcount_set(&hpdev->refs, 1);
- get_pcichild(hpdev, hv_pcidev_ref_childlist);
+ get_pcichild(hpdev);
spin_lock_irqsave(&hbus->device_list_lock, flags);
- /*
- * When a device is being added to the bus, we set the PCI domain
- * number to be the device serial number, which is non-zero and
- * unique on the same VM. The serial numbers start with 1, and
- * increase by 1 for each device. So device names including this
- * can have shorter names than based on the bus instance UUID.
- * Only the first device serial number is used for domain, so the
- * domain number will not change after the first device is added.
- */
- if (list_empty(&hbus->children))
- hbus->sysdata.domain = desc->ser;
list_add_tail(&hpdev->list_entry, &hbus->children);
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
return hpdev;
@@ -1618,7 +1606,7 @@ static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
list_for_each_entry(iter, &hbus->children, list_entry) {
if (iter->desc.win_slot.slot == wslot) {
hpdev = iter;
- get_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ get_pcichild(hpdev);
break;
}
}
@@ -1654,7 +1642,6 @@ static void pci_devices_present_work(struct work_struct *work)
{
u32 child_no;
bool found;
- struct list_head *iter;
struct pci_function_description *new_desc;
struct hv_pci_dev *hpdev;
struct hv_pcibus_device *hbus;
@@ -1691,10 +1678,8 @@ static void pci_devices_present_work(struct work_struct *work)
/* First, mark all existing children as reported missing. */
spin_lock_irqsave(&hbus->device_list_lock, flags);
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
- hpdev->reported_missing = true;
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ hpdev->reported_missing = true;
}
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
@@ -1704,11 +1689,8 @@ static void pci_devices_present_work(struct work_struct *work)
new_desc = &dr->func[child_no];
spin_lock_irqsave(&hbus->device_list_lock, flags);
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
- if ((hpdev->desc.win_slot.slot ==
- new_desc->win_slot.slot) &&
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
(hpdev->desc.v_id == new_desc->v_id) &&
(hpdev->desc.d_id == new_desc->d_id) &&
(hpdev->desc.ser == new_desc->ser)) {
@@ -1730,12 +1712,10 @@ static void pci_devices_present_work(struct work_struct *work)
spin_lock_irqsave(&hbus->device_list_lock, flags);
do {
found = false;
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
if (hpdev->reported_missing) {
found = true;
- put_pcichild(hpdev, hv_pcidev_ref_childlist);
+ put_pcichild(hpdev);
list_move_tail(&hpdev->list_entry, &removed);
break;
}
@@ -1748,7 +1728,7 @@ static void pci_devices_present_work(struct work_struct *work)
hpdev = list_first_entry(&removed, struct hv_pci_dev,
list_entry);
list_del(&hpdev->list_entry);
- put_pcichild(hpdev, hv_pcidev_ref_initial);
+ put_pcichild(hpdev);
}
switch (hbus->state) {
@@ -1883,8 +1863,8 @@ static void hv_eject_device_work(struct work_struct *work)
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
- put_pcichild(hpdev, hv_pcidev_ref_childlist);
- put_pcichild(hpdev, hv_pcidev_ref_pnp);
+ put_pcichild(hpdev);
+ put_pcichild(hpdev);
put_hvpcibus(hpdev->hbus);
}
@@ -1899,7 +1879,7 @@ static void hv_eject_device_work(struct work_struct *work)
static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
{
hpdev->state = hv_pcichild_ejecting;
- get_pcichild(hpdev, hv_pcidev_ref_pnp);
+ get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
get_hvpcibus(hpdev->hbus);
queue_work(hpdev->hbus->wq, &hpdev->wrk);
@@ -1999,8 +1979,7 @@ static void hv_pci_onchannelcallback(void *context)
dev_message->wslot.slot);
if (hpdev) {
hv_pci_eject_device(hpdev);
- put_pcichild(hpdev,
- hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
}
break;
@@ -2069,15 +2048,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
sizeof(struct pci_version_request),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
+
if (ret) {
dev_err(&hdev->device,
- "PCI Pass-through VSP failed sending version reqquest: %#x",
+ "PCI Pass-through VSP failed to request version: %d",
ret);
goto exit;
}
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status >= 0) {
pci_protocol_version = pci_protocol_versions[i];
dev_info(&hdev->device,
@@ -2286,11 +2266,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
+
if (ret)
goto exit;
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status < 0) {
dev_err(&hdev->device,
"PCI Pass-through VSP failed D0 Entry with status %x\n",
@@ -2330,11 +2311,10 @@ static int hv_pci_query_relations(struct hv_device *hdev)
ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
0, VM_PKT_DATA_INBAND, 0);
- if (ret)
- return ret;
+ if (!ret)
+ ret = wait_for_response(hdev, &comp);
- wait_for_completion(&comp);
- return 0;
+ return ret;
}
/**
@@ -2398,17 +2378,17 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
PCI_RESOURCES_ASSIGNED2;
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
}
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
ret = vmbus_sendpacket(hdev->channel, &pkt->message,
size_res, (unsigned long)pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
if (ret)
break;
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status < 0) {
ret = -EPROTO;
dev_err(&hdev->device,
@@ -2446,7 +2426,7 @@ static int hv_send_resources_released(struct hv_device *hdev)
pkt.message_type.type = PCI_RESOURCES_RELEASED;
pkt.wslot.slot = hpdev->desc.win_slot.slot;
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
VM_PKT_DATA_INBAND, 0);
@@ -2459,12 +2439,12 @@ static int hv_send_resources_released(struct hv_device *hdev)
static void get_hvpcibus(struct hv_pcibus_device *hbus)
{
- atomic_inc(&hbus->remove_lock);
+ refcount_inc(&hbus->remove_lock);
}
static void put_hvpcibus(struct hv_pcibus_device *hbus)
{
- if (atomic_dec_and_test(&hbus->remove_lock))
+ if (refcount_dec_and_test(&hbus->remove_lock))
complete(&hbus->remove_event);
}
@@ -2508,7 +2488,7 @@ static int hv_pci_probe(struct hv_device *hdev,
hdev->dev_instance.b[8] << 8;
hbus->hdev = hdev;
- atomic_inc(&hbus->remove_lock);
+ refcount_set(&hbus->remove_lock, 1);
INIT_LIST_HEAD(&hbus->children);
INIT_LIST_HEAD(&hbus->dr_list);
INIT_LIST_HEAD(&hbus->resources_for_children);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 5d4dccfc9d81..23e270839e6a 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -21,6 +21,8 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
+#include "../pci.h"
+
/*
* PCIe unit register offsets.
*/
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index dd4f1a6b57c5..326171cb1a97 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -21,6 +21,8 @@
#include <linux/sizes.h>
#include <linux/slab.h>
+#include "../pci.h"
+
/* AHB-PCI Bridge PCI communication registers */
#define RCAR_AHBPCI_PCICOM_OFFSET 0x800
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 389e74be846c..f4f53d092e00 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -40,6 +40,8 @@
#include <soc/tegra/cpuidle.h>
#include <soc/tegra/pmc.h>
+#include "../pci.h"
+
#define INT_PCI_MSI_NR (8 * 32)
/* register definitions */
diff --git a/drivers/pci/host/pci-v3-semi.c b/drivers/pci/host/pci-v3-semi.c
index 0a4dea796663..68b8bfbdb867 100644
--- a/drivers/pci/host/pci-v3-semi.c
+++ b/drivers/pci/host/pci-v3-semi.c
@@ -33,6 +33,8 @@
#include <linux/regmap.h>
#include <linux/clk.h>
+#include "../pci.h"
+
#define V3_PCI_VENDOR 0x00000000
#define V3_PCI_DEVICE 0x00000002
#define V3_PCI_CMD 0x00000004
@@ -791,7 +793,8 @@ static int v3_pci_probe(struct platform_device *pdev)
if (IS_ERR(v3->config_base))
return PTR_ERR(v3->config_base);
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &io_base);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &io_base);
if (ret)
return ret;
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 5b3876f5312b..994f32061b32 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -15,6 +15,8 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include "../pci.h"
+
static void __iomem *versatile_pci_base;
static void __iomem *versatile_cfg_base[2];
@@ -64,11 +66,10 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *res)
{
int err, mem = 1, res_valid = 0;
- struct device_node *np = dev->of_node;
resource_size_t iobase;
struct resource_entry *win, *tmp;
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase);
if (err)
return err;
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 0a0d7ee6d3c9..d854d67e873c 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -22,6 +22,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define PCIECORE_CTLANDSTATUS 0x50
#define PIM1_1L 0x80
#define IBAR2 0x98
@@ -632,7 +634,8 @@ static int xgene_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (ret)
return ret;
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index a6af62e0256d..7d05e51205b3 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -17,6 +17,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define RP_TX_REG0 0x2000
#define RP_TX_REG1 0x2004
#define RP_TX_CNTRL 0x2008
@@ -488,11 +490,10 @@ static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
{
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
- struct device_node *np = dev->of_node;
struct resource_entry *win;
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
- NULL);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, NULL);
if (err)
return err;
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index e764a2a2693c..f30f5f3fb5c1 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -16,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
+#include "../pci.h"
#include "pcie-iproc.h"
static const struct of_device_id iproc_pcie_of_match_table[] = {
@@ -99,8 +100,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
pcie->phy = NULL;
}
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
- &iobase);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
+ &iobase);
if (ret) {
dev_err(dev, "unable to get PCI host bridge resources\n");
return ret;
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
index a8b20c5012a9..0baabe30858f 100644
--- a/drivers/pci/host/pcie-mediatek.c
+++ b/drivers/pci/host/pcie-mediatek.c
@@ -11,8 +11,10 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
@@ -22,6 +24,8 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include "../pci.h"
+
/* PCIe shared registers */
#define PCIE_SYS_CFG 0x00
#define PCIE_INT_ENABLE 0x0c
@@ -66,6 +70,10 @@
/* PCIe V2 per-port registers */
#define PCIE_MSI_VECTOR 0x0c0
+
+#define PCIE_CONF_VEND_ID 0x100
+#define PCIE_CONF_CLASS_ID 0x106
+
#define PCIE_INT_MASK 0x420
#define INTX_MASK GENMASK(19, 16)
#define INTX_SHIFT 16
@@ -125,13 +133,13 @@ struct mtk_pcie_port;
/**
* struct mtk_pcie_soc - differentiate between host generations
- * @has_msi: whether this host supports MSI interrupts or not
+ * @need_fix_class_id: whether this host's class ID needed to be fixed or not
* @ops: pointer to configuration access functions
* @startup: pointer to controller setting functions
* @setup_irq: pointer to initialize IRQ functions
*/
struct mtk_pcie_soc {
- bool has_msi;
+ bool need_fix_class_id;
struct pci_ops *ops;
int (*startup)(struct mtk_pcie_port *port);
int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
@@ -155,7 +163,9 @@ struct mtk_pcie_soc {
* @lane: lane count
* @slot: port slot
* @irq_domain: legacy INTx IRQ domain
+ * @inner_domain: inner IRQ domain
* @msi_domain: MSI IRQ domain
+ * @lock: protect the msi_irq_in_use bitmap
* @msi_irq_in_use: bit map for assigned MSI IRQ
*/
struct mtk_pcie_port {
@@ -173,7 +183,9 @@ struct mtk_pcie_port {
u32 lane;
u32 slot;
struct irq_domain *irq_domain;
+ struct irq_domain *inner_domain;
struct irq_domain *msi_domain;
+ struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
};
@@ -375,6 +387,7 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
struct resource *mem = &pcie->mem;
+ const struct mtk_pcie_soc *soc = port->pcie->soc;
u32 val;
size_t size;
int err;
@@ -403,6 +416,15 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
PCIE_MAC_SRSTB | PCIE_CRSTB;
writel(val, port->base + PCIE_RST_CTRL);
+ /* Set up vendor ID and class code */
+ if (soc->need_fix_class_id) {
+ val = PCI_VENDOR_ID_MEDIATEK;
+ writew(val, port->base + PCIE_CONF_VEND_ID);
+
+ val = PCI_CLASS_BRIDGE_HOST;
+ writew(val, port->base + PCIE_CONF_CLASS_ID);
+ }
+
/* 100ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
!!(val & PCIE_PORT_LINKUP_V2), 20,
@@ -430,103 +452,130 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
return 0;
}
-static int mtk_pcie_msi_alloc(struct mtk_pcie_port *port)
+static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- int msi;
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr;
- msi = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
- if (msi < MTK_MSI_IRQS_NUM)
- set_bit(msi, port->msi_irq_in_use);
- else
- return -ENOSPC;
+ /* MT2712/MT7622 only support 32-bit MSI addresses */
+ addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
+ msg->address_hi = 0;
+ msg->address_lo = lower_32_bits(addr);
+
+ msg->data = data->hwirq;
- return msi;
+ dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static void mtk_pcie_msi_free(struct mtk_pcie_port *port, unsigned long hwirq)
+static int mtk_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- clear_bit(hwirq, port->msi_irq_in_use);
+ return -EINVAL;
}
-static int mtk_pcie_msi_setup_irq(struct msi_controller *chip,
- struct pci_dev *pdev, struct msi_desc *desc)
+static void mtk_msi_ack_irq(struct irq_data *data)
{
- struct mtk_pcie_port *port;
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
- phys_addr_t msg_addr;
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ u32 hwirq = data->hwirq;
- port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
- if (!port)
- return -EINVAL;
+ writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
+}
- hwirq = mtk_pcie_msi_alloc(port);
- if (hwirq < 0)
- return hwirq;
+static struct irq_chip mtk_msi_bottom_irq_chip = {
+ .name = "MTK MSI",
+ .irq_compose_msi_msg = mtk_compose_msi_msg,
+ .irq_set_affinity = mtk_msi_set_affinity,
+ .irq_ack = mtk_msi_ack_irq,
+};
- irq = irq_create_mapping(port->msi_domain, hwirq);
- if (!irq) {
- mtk_pcie_msi_free(port, hwirq);
- return -EINVAL;
- }
+static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct mtk_pcie_port *port = domain->host_data;
+ unsigned long bit;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&port->lock);
- chip->dev = &pdev->dev;
+ bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
+ if (bit >= MTK_MSI_IRQS_NUM) {
+ mutex_unlock(&port->lock);
+ return -ENOSPC;
+ }
- irq_set_msi_desc(irq, desc);
+ __set_bit(bit, port->msi_irq_in_use);
- /* MT2712/MT7622 only support 32-bit MSI addresses */
- msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
- msg.address_hi = 0;
- msg.address_lo = lower_32_bits(msg_addr);
- msg.data = hwirq;
+ mutex_unlock(&port->lock);
- pci_write_msi_msg(irq, &msg);
+ irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq,
+ NULL, NULL);
return 0;
}
-static void mtk_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
{
- struct pci_dev *pdev = to_pci_dev(chip->dev);
- struct irq_data *d = irq_get_irq_data(irq);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct mtk_pcie_port *port;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
- port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
- if (!port)
- return;
+ mutex_lock(&port->lock);
- irq_dispose_mapping(irq);
- mtk_pcie_msi_free(port, hwirq);
+ if (!test_bit(d->hwirq, port->msi_irq_in_use))
+ dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ else
+ __clear_bit(d->hwirq, port->msi_irq_in_use);
+
+ mutex_unlock(&port->lock);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static struct msi_controller mtk_pcie_msi_chip = {
- .setup_irq = mtk_pcie_msi_setup_irq,
- .teardown_irq = mtk_msi_teardown_irq,
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mtk_pcie_irq_domain_alloc,
+ .free = mtk_pcie_irq_domain_free,
};
static struct irq_chip mtk_msi_irq_chip = {
- .name = "MTK PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
+ .name = "MTK PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
};
-static int mtk_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+static struct msi_domain_info mtk_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &mtk_msi_irq_chip,
+};
+
+static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- irq_set_chip_and_handler(irq, &mtk_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
+ struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
+
+ mutex_init(&port->lock);
+
+ port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
+ &msi_domain_ops, port);
+ if (!port->inner_domain) {
+ dev_err(port->pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
+ port->inner_domain);
+ if (!port->msi_domain) {
+ dev_err(port->pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(port->inner_domain);
+ return -ENOMEM;
+ }
return 0;
}
-static const struct irq_domain_ops msi_domain_ops = {
- .map = mtk_pcie_msi_map,
-};
-
static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
{
u32 val;
@@ -559,6 +608,7 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
{
struct device *dev = port->pcie->dev;
struct device_node *pcie_intc_node;
+ int ret;
/* Setup INTx */
pcie_intc_node = of_get_next_child(node, NULL);
@@ -575,27 +625,28 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- port->msi_domain = irq_domain_add_linear(node, MTK_MSI_IRQS_NUM,
- &msi_domain_ops,
- &mtk_pcie_msi_chip);
- if (!port->msi_domain) {
- dev_err(dev, "failed to create MSI IRQ domain\n");
- return -ENODEV;
- }
+ ret = mtk_pcie_allocate_msi_domains(port);
+ if (ret)
+ return ret;
+
mtk_pcie_enable_msi(port);
}
return 0;
}
-static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
+static void mtk_pcie_intr_handler(struct irq_desc *desc)
{
- struct mtk_pcie_port *port = (struct mtk_pcie_port *)data;
+ struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
u32 virq;
u32 bit = INTX_SHIFT;
- while ((status = readl(port->base + PCIE_INT_STATUS)) & INTX_MASK) {
+ chained_irq_enter(irqchip, desc);
+
+ status = readl(port->base + PCIE_INT_STATUS);
+ if (status & INTX_MASK) {
for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
/* Clear the INTx */
writel(1 << bit, port->base + PCIE_INT_STATUS);
@@ -606,14 +657,12 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- while ((status = readl(port->base + PCIE_INT_STATUS)) & MSI_STATUS) {
+ if (status & MSI_STATUS){
unsigned long imsi_status;
while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
- /* Clear the MSI */
- writel(1 << bit, port->base + PCIE_IMSI_STATUS);
- virq = irq_find_mapping(port->msi_domain, bit);
+ virq = irq_find_mapping(port->inner_domain, bit);
generic_handle_irq(virq);
}
}
@@ -622,7 +671,9 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
}
}
- return IRQ_HANDLED;
+ chained_irq_exit(irqchip, desc);
+
+ return;
}
static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
@@ -633,20 +684,15 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
struct platform_device *pdev = to_platform_device(dev);
int err, irq;
- irq = platform_get_irq(pdev, port->slot);
- err = devm_request_irq(dev, irq, mtk_pcie_intr_handler,
- IRQF_SHARED, "mtk-pcie", port);
- if (err) {
- dev_err(dev, "unable to request IRQ %d\n", irq);
- return err;
- }
-
err = mtk_pcie_init_irq_domain(port, node);
if (err) {
dev_err(dev, "failed to init PCIe IRQ domain\n");
return err;
}
+ irq = platform_get_irq(pdev, port->slot);
+ irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
+
return 0;
}
@@ -1080,8 +1126,6 @@ static int mtk_pcie_register_host(struct pci_host_bridge *host)
host->map_irq = of_irq_parse_and_map_pci;
host->swizzle_irq = pci_common_swizzle;
host->sysdata = pcie;
- if (IS_ENABLED(CONFIG_PCI_MSI) && pcie->soc->has_msi)
- host->msi = &mtk_pcie_msi_chip;
err = pci_scan_root_bus_bridge(host);
if (err < 0)
@@ -1142,8 +1186,14 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
.startup = mtk_pcie_startup_port,
};
-static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
- .has_msi = true,
+static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
+ .ops = &mtk_pcie_ops_v2,
+ .startup = mtk_pcie_startup_port_v2,
+ .setup_irq = mtk_pcie_setup_irq,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
+ .need_fix_class_id = true,
.ops = &mtk_pcie_ops_v2,
.startup = mtk_pcie_startup_port_v2,
.setup_irq = mtk_pcie_setup_irq,
@@ -1152,8 +1202,8 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
static const struct of_device_id mtk_pcie_ids[] = {
{ .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
{ .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
- { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_v2 },
- { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_v2 },
+ { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
+ { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
{},
};
diff --git a/drivers/pci/host/pcie-mobiveil.c b/drivers/pci/host/pcie-mobiveil.c
new file mode 100644
index 000000000000..4d6c20e47bed
--- /dev/null
+++ b/drivers/pci/host/pcie-mobiveil.c
@@ -0,0 +1,866 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* register offsets and bit positions */
+
+/*
+ * translation tables are grouped into windows, each window registers are
+ * grouped into blocks of 4 or 16 registers each
+ */
+#define PAB_REG_BLOCK_SIZE 16
+#define PAB_EXT_REG_BLOCK_SIZE 4
+
+#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+
+#define LTSSM_STATUS 0x0404
+#define LTSSM_STATUS_L0_MASK 0x3f
+#define LTSSM_STATUS_L0 0x2d
+
+#define PAB_CTRL 0x0808
+#define AMBA_PIO_ENABLE_SHIFT 0
+#define PEX_PIO_ENABLE_SHIFT 1
+#define PAGE_SEL_SHIFT 13
+#define PAGE_SEL_MASK 0x3f
+#define PAGE_LO_MASK 0x3ff
+#define PAGE_SEL_EN 0xc00
+#define PAGE_SEL_OFFSET_SHIFT 10
+
+#define PAB_AXI_PIO_CTRL 0x0840
+#define APIO_EN_MASK 0xf
+
+#define PAB_PEX_PIO_CTRL 0x08c0
+#define PIO_ENABLE_SHIFT 0
+
+#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
+#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
+#define PAB_INTP_INTX_MASK 0x01e0
+#define PAB_INTP_MSI_MASK 0x8
+
+#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
+#define WIN_ENABLE_SHIFT 0
+#define WIN_TYPE_SHIFT 1
+
+#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
+
+#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
+#define AXI_WINDOW_ALIGN_MASK 3
+
+#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
+#define PAB_BUS_SHIFT 24
+#define PAB_DEVICE_SHIFT 19
+#define PAB_FUNCTION_SHIFT 16
+
+#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
+#define PAB_INTP_AXI_PIO_CLASS 0x474
+
+#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
+#define AMAP_CTRL_EN_SHIFT 0
+#define AMAP_CTRL_TYPE_SHIFT 1
+
+#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
+#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
+#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
+#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
+
+/* starting offset of INTX bits in status register */
+#define PAB_INTX_START 5
+
+/* supported number of MSI interrupts */
+#define PCI_NUM_MSI 16
+
+/* MSI registers */
+#define MSI_BASE_LO_OFFSET 0x04
+#define MSI_BASE_HI_OFFSET 0x08
+#define MSI_SIZE_OFFSET 0x0c
+#define MSI_ENABLE_OFFSET 0x14
+#define MSI_STATUS_OFFSET 0x18
+#define MSI_DATA_OFFSET 0x20
+#define MSI_ADDR_L_OFFSET 0x24
+#define MSI_ADDR_H_OFFSET 0x28
+
+/* outbound and inbound window definitions */
+#define WIN_NUM_0 0
+#define WIN_NUM_1 1
+#define CFG_WINDOW_TYPE 0
+#define IO_WINDOW_TYPE 1
+#define MEM_WINDOW_TYPE 2
+#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS 8
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_MIN 90000
+#define LINK_WAIT_MAX 100000
+
+struct mobiveil_msi { /* MSI information */
+ struct mutex lock; /* protect bitmap variable */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ phys_addr_t msi_pages_phys;
+ int num_of_vectors;
+ DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
+};
+
+struct mobiveil_pcie {
+ struct platform_device *pdev;
+ struct list_head resources;
+ void __iomem *config_axi_slave_base; /* endpoint config base */
+ void __iomem *csr_axi_slave_base; /* root port config base */
+ void __iomem *apb_csr_base; /* MSI register base */
+ void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */
+ struct irq_domain *intx_domain;
+ raw_spinlock_t intx_mask_lock;
+ int irq;
+ int apio_wins;
+ int ppio_wins;
+ int ob_wins_configured; /* configured outbound windows */
+ int ib_wins_configured; /* configured inbound windows */
+ struct resource *ob_io_res;
+ char root_bus_nr;
+ struct mobiveil_msi msi;
+};
+
+static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->csr_axi_slave_base + reg);
+}
+
+static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->csr_axi_slave_base + reg);
+}
+
+static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
+{
+ return (csr_readl(pcie, LTSSM_STATUS) &
+ LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
+}
+
+static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct mobiveil_pcie *pcie = bus->sysdata;
+
+ /* Only one device down on each root port */
+ if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
+ return false;
+
+ /*
+ * Do not read more than one device on the bus directly
+ * attached to RC
+ */
+ if ((bus->primary == pcie->root_bus_nr) && (devfn > 0))
+ return false;
+
+ return true;
+}
+
+/*
+ * mobiveil_pcie_map_bus - routine to get the configuration base of either
+ * root port or endpoint
+ */
+static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct mobiveil_pcie *pcie = bus->sysdata;
+
+ if (!mobiveil_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ if (bus->number == pcie->root_bus_nr) {
+ /* RC config access */
+ return pcie->csr_axi_slave_base + where;
+ }
+
+ /*
+ * EP config access (in Config/APIO space)
+ * Program PEX Address base (31..16 bits) with appropriate value
+ * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
+ * Relies on pci_lock serialization
+ */
+ csr_writel(pcie, bus->number << PAB_BUS_SHIFT |
+ PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
+ PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT,
+ PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+ return pcie->config_axi_slave_base + where;
+}
+
+static struct pci_ops mobiveil_pcie_ops = {
+ .map_bus = mobiveil_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void mobiveil_pcie_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct device *dev = &pcie->pdev->dev;
+ struct mobiveil_msi *msi = &pcie->msi;
+ u32 msi_data, msi_addr_lo, msi_addr_hi;
+ u32 intr_status, msi_status;
+ unsigned long shifted_status;
+ u32 bit, virq, val, mask;
+
+ /*
+ * The core provides a single interrupt for both INTx/MSI messages.
+ * So we'll read both INTx and MSI status
+ */
+
+ chained_irq_enter(chip, desc);
+
+ /* read INTx status */
+ val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ intr_status = val & mask;
+
+ /* Handle INTx */
+ if (intr_status & PAB_INTP_INTX_MASK) {
+ shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >>
+ PAB_INTX_START;
+ do {
+ for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
+ virq = irq_find_mapping(pcie->intx_domain,
+ bit + 1);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err_ratelimited(dev,
+ "unexpected IRQ, INT%d\n", bit);
+
+ /* clear interrupt */
+ csr_writel(pcie,
+ shifted_status << PAB_INTX_START,
+ PAB_INTP_AMBA_MISC_STAT);
+ }
+ } while ((shifted_status >> PAB_INTX_START) != 0);
+ }
+
+ /* read extra MSI status register */
+ msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
+
+ /* handle MSI interrupts */
+ while (msi_status & 1) {
+ msi_data = readl_relaxed(pcie->apb_csr_base
+ + MSI_DATA_OFFSET);
+
+ /*
+ * MSI_STATUS_OFFSET register gets updated to zero
+ * once we pop not only the MSI data but also address
+ * from MSI hardware FIFO. So keeping these following
+ * two dummy reads.
+ */
+ msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
+ MSI_ADDR_L_OFFSET);
+ msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
+ MSI_ADDR_H_OFFSET);
+ dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
+ msi_data, msi_addr_hi, msi_addr_lo);
+
+ virq = irq_find_mapping(msi->dev_domain, msi_data);
+ if (virq)
+ generic_handle_irq(virq);
+
+ msi_status = readl_relaxed(pcie->apb_csr_base +
+ MSI_STATUS_OFFSET);
+ }
+
+ /* Clear the interrupt status */
+ csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+ chained_irq_exit(chip, desc);
+}
+
+static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct platform_device *pdev = pcie->pdev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ const char *type;
+
+ type = of_get_property(node, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ /* map config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "config_axi_slave");
+ pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->config_axi_slave_base))
+ return PTR_ERR(pcie->config_axi_slave_base);
+ pcie->ob_io_res = res;
+
+ /* map csr resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "csr_axi_slave");
+ pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->csr_axi_slave_base))
+ return PTR_ERR(pcie->csr_axi_slave_base);
+ pcie->pcie_reg_base = res->start;
+
+ /* map MSI config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
+ pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->apb_csr_base))
+ return PTR_ERR(pcie->apb_csr_base);
+
+ /* read the number of windows requested */
+ if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
+ pcie->apio_wins = MAX_PIO_WINDOWS;
+
+ if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
+ pcie->ppio_wins = MAX_PIO_WINDOWS;
+
+ pcie->irq = platform_get_irq(pdev, 0);
+ if (pcie->irq <= 0) {
+ dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
+
+ return 0;
+}
+
+/*
+ * select_paged_register - routine to access paged register of root complex
+ *
+ * registers of RC are paged, for this scheme to work
+ * extracted higher 6 bits of the offset will be written to pg_sel
+ * field of PAB_CTRL register and rest of the lower 10 bits enabled with
+ * PAGE_SEL_EN are used as offset of the register.
+ */
+static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset)
+{
+ int pab_ctrl_dw, pg_sel;
+
+ /* clear pg_sel field */
+ pab_ctrl_dw = csr_readl(pcie, PAB_CTRL);
+ pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT));
+
+ /* set pg_sel field */
+ pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK;
+ pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT));
+ csr_writel(pcie, pab_ctrl_dw, PAB_CTRL);
+}
+
+static void write_paged_register(struct mobiveil_pcie *pcie,
+ u32 val, u32 offset)
+{
+ u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
+
+ select_paged_register(pcie, offset);
+ csr_writel(pcie, val, off);
+}
+
+static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset)
+{
+ u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
+
+ select_paged_register(pcie, offset);
+ return csr_readl(pcie, off);
+}
+
+static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
+ int pci_addr, u32 type, u64 size)
+{
+ int pio_ctrl_val;
+ int amap_ctrl_dw;
+ u64 size64 = ~(size - 1);
+
+ if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max inbound windows reached !\n");
+ return;
+ }
+
+ pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ csr_writel(pcie,
+ pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL);
+ amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT));
+ amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT));
+
+ write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64),
+ PAB_PEX_AMAP_CTRL(win_num));
+
+ write_paged_register(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
+
+ write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
+ write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num));
+}
+
+/*
+ * routine to program the outbound windows
+ */
+static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size)
+{
+
+ u32 value, type;
+ u64 size64 = ~(size - 1);
+
+ if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max outbound windows reached !\n");
+ return;
+ }
+
+ /*
+ * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
+ * to 4 KB in PAB_AXI_AMAP_CTRL register
+ */
+ type = config_io_bit;
+ value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+ lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num));
+
+ write_paged_register(pcie, upper_32_bits(size64),
+ PAB_EXT_AXI_AMAP_SIZE(win_num));
+
+ /*
+ * program AXI window base with appropriate value in
+ * PAB_AXI_AMAP_AXI_WIN0 register
+ */
+ value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num));
+ csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+
+ value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+ csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+ pcie->ob_wins_configured++;
+}
+
+static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (mobiveil_pcie_link_up(pcie))
+ return 0;
+
+ usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+ }
+ dev_err(&pcie->pdev->dev, "link never came up\n");
+ return -ETIMEDOUT;
+}
+
+static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
+{
+ phys_addr_t msg_addr = pcie->pcie_reg_base;
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ pcie->msi.num_of_vectors = PCI_NUM_MSI;
+ msi->msi_pages_phys = (phys_addr_t)msg_addr;
+
+ writel_relaxed(lower_32_bits(msg_addr),
+ pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
+ writel_relaxed(upper_32_bits(msg_addr),
+ pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
+ writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
+ writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
+}
+
+static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+{
+ u32 value, pab_ctrl, type = 0;
+ int err;
+ struct resource_entry *win, *tmp;
+
+ err = mobiveil_bringup_link(pcie);
+ if (err) {
+ dev_info(&pcie->pdev->dev, "link bring-up failed\n");
+ return err;
+ }
+
+ /*
+ * program Bus Master Enable Bit in Command Register in PAB Config
+ * Space
+ */
+ value = csr_readl(pcie, PCI_COMMAND);
+ csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER, PCI_COMMAND);
+
+ /*
+ * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
+ * register
+ */
+ pab_ctrl = csr_readl(pcie, PAB_CTRL);
+ csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) |
+ (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL);
+
+ csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
+
+ /*
+ * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
+ * PAB_AXI_PIO_CTRL Register
+ */
+ value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
+ csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL);
+
+ /*
+ * we'll program one outbound window for config reads and
+ * another default inbound window for all the upstream traffic
+ * rest of the outbound windows will be configured according to
+ * the "ranges" field defined in device tree
+ */
+
+ /* config outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+ pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE,
+ resource_size(pcie->ob_io_res));
+
+ /* memory inbound translation window */
+ program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
+ type = 0;
+ if (resource_type(win->res) == IORESOURCE_MEM)
+ type = MEM_WINDOW_TYPE;
+ if (resource_type(win->res) == IORESOURCE_IO)
+ type = IO_WINDOW_TYPE;
+ if (type) {
+ /* configure outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+ win->res->start, 0, type,
+ resource_size(win->res));
+ }
+ }
+
+ /* setup MSI hardware registers */
+ mobiveil_pcie_enable_msi(pcie);
+
+ return err;
+}
+
+static void mobiveil_mask_intx_irq(struct irq_data *data)
+{
+ struct irq_desc *desc = irq_to_desc(data->irq);
+ struct mobiveil_pcie *pcie;
+ unsigned long flags;
+ u32 mask, shifted_val;
+
+ pcie = irq_desc_get_chip_data(desc);
+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+ raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB);
+ raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static void mobiveil_unmask_intx_irq(struct irq_data *data)
+{
+ struct irq_desc *desc = irq_to_desc(data->irq);
+ struct mobiveil_pcie *pcie;
+ unsigned long flags;
+ u32 shifted_val, mask;
+
+ pcie = irq_desc_get_chip_data(desc);
+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+ raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB);
+ raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static struct irq_chip intx_irq_chip = {
+ .name = "mobiveil_pcie:intx",
+ .irq_enable = mobiveil_unmask_intx_irq,
+ .irq_disable = mobiveil_mask_intx_irq,
+ .irq_mask = mobiveil_mask_intx_irq,
+ .irq_unmask = mobiveil_unmask_intx_irq,
+};
+
+/* routine to setup the INTx related data */
+static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ return 0;
+}
+
+/* INTx domain operations structure */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mobiveil_pcie_intx_map,
+};
+
+static struct irq_chip mobiveil_msi_irq_chip = {
+ .name = "Mobiveil PCIe MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info mobiveil_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .chip = &mobiveil_msi_irq_chip,
+};
+
+static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip mobiveil_msi_bottom_irq_chip = {
+ .name = "Mobiveil MSI",
+ .irq_compose_msi_msg = mobiveil_compose_msi_msg,
+ .irq_set_affinity = mobiveil_msi_set_affinity,
+};
+
+static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs, void *args)
+{
+ struct mobiveil_pcie *pcie = domain->host_data;
+ struct mobiveil_msi *msi = &pcie->msi;
+ unsigned long bit;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&msi->lock);
+
+ bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
+ if (bit >= msi->num_of_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->msi_irq_in_use);
+
+ mutex_unlock(&msi->lock);
+
+ irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
+ domain->host_data, handle_level_irq,
+ NULL, NULL);
+ return 0;
+}
+
+static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+
+ if (!test_bit(d->hwirq, msi->msi_irq_in_use)) {
+ dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ } else {
+ __clear_bit(d->hwirq, msi->msi_irq_in_use);
+ }
+
+ mutex_unlock(&msi->lock);
+}
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mobiveil_irq_msi_domain_alloc,
+ .free = mobiveil_irq_msi_domain_free,
+};
+
+static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ mutex_init(&pcie->msi.lock);
+ msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, pcie);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &mobiveil_msi_domain_info, msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ /* setup INTx */
+ pcie->intx_domain = irq_domain_add_linear(node,
+ PCI_NUM_INTX, &intx_domain_ops, pcie);
+
+ if (!pcie->intx_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ raw_spin_lock_init(&pcie->intx_mask_lock);
+
+ /* setup MSI */
+ ret = mobiveil_allocate_msi_domains(pcie);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mobiveil_pcie_probe(struct platform_device *pdev)
+{
+ struct mobiveil_pcie *pcie;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ struct pci_host_bridge *bridge;
+ struct device *dev = &pdev->dev;
+ resource_size_t iobase;
+ int ret;
+
+ /* allocate the PCIe port */
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENODEV;
+
+ pcie = pci_host_bridge_priv(bridge);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pdev = pdev;
+
+ ret = mobiveil_pcie_parse_dt(pcie);
+ if (ret) {
+ dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&pcie->resources);
+
+ /* parse the host bridge base addresses from the device tree file */
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, &iobase);
+ if (ret) {
+ dev_err(dev, "Getting bridge resources failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * configure all inbound and outbound windows and prepare the RC for
+ * config access
+ */
+ ret = mobiveil_host_init(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to initialize host\n");
+ goto error;
+ }
+
+ /* fixup for PCIe class register */
+ csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
+
+ /* initialize the IRQ domains */
+ ret = mobiveil_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ goto error;
+ }
+
+ ret = devm_request_pci_bus_resources(dev, &pcie->resources);
+ if (ret)
+ goto error;
+
+ /* Initialize bridge */
+ list_splice_init(&pcie->resources, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = pcie;
+ bridge->busnr = pcie->root_bus_nr;
+ bridge->ops = &mobiveil_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ /* setup the kernel resources for the newly added PCIe root bus */
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret)
+ goto error;
+
+ bus = bridge->bus;
+
+ pci_assign_unassigned_bus_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_bus_add_devices(bus);
+
+ return 0;
+error:
+ pci_free_resource_list(&pcie->resources);
+ return ret;
+}
+
+static const struct of_device_id mobiveil_pcie_of_match[] = {
+ {.compatible = "mbvl,gpex40-pcie",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
+
+static struct platform_driver mobiveil_pcie_driver = {
+ .probe = mobiveil_pcie_probe,
+ .driver = {
+ .name = "mobiveil-pcie",
+ .of_match_table = mobiveil_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mobiveil_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
+MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 6ab28f29ac6a..874d75c9ee4a 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -11,6 +11,7 @@
* Author: Phil Edworthy <phil.edworthy@renesas.com>
*/
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -24,18 +25,23 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
-#define CONFIG_SEND_ENABLE (1 << 31)
+#define CONFIG_SEND_ENABLE BIT(31)
#define TYPE0 (0 << 8)
-#define TYPE1 (1 << 8)
+#define TYPE1 BIT(8)
#define PCIECDR 0x000020
#define PCIEMSR 0x000028
#define PCIEINTXR 0x000400
+#define PCIEPHYSR 0x0007f0
+#define PHYRDY BIT(0)
#define PCIEMSITXR 0x000840
/* Transfer control */
@@ -44,7 +50,7 @@
#define PCIETSTR 0x02004
#define DATA_LINK_ACTIVE 1
#define PCIEERRFR 0x02020
-#define UNSUPPORTED_REQUEST (1 << 4)
+#define UNSUPPORTED_REQUEST BIT(4)
#define PCIEMSIFR 0x02044
#define PCIEMSIALR 0x02048
#define MSIFE 1
@@ -57,17 +63,17 @@
/* local address reg & mask */
#define PCIELAR(x) (0x02200 + ((x) * 0x20))
#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
-#define LAM_PREFETCH (1 << 3)
-#define LAM_64BIT (1 << 2)
-#define LAR_ENABLE (1 << 1)
+#define LAM_PREFETCH BIT(3)
+#define LAM_64BIT BIT(2)
+#define LAR_ENABLE BIT(1)
/* PCIe address reg & mask */
#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
-#define PAR_ENABLE (1 << 31)
-#define IO_SPACE (1 << 8)
+#define PAR_ENABLE BIT(31)
+#define IO_SPACE BIT(8)
/* Configuration */
#define PCICONF(x) (0x010000 + ((x) * 0x4))
@@ -79,47 +85,46 @@
#define IDSETR1 0x011004
#define TLCTLR 0x011048
#define MACSR 0x011054
-#define SPCHGFIN (1 << 4)
-#define SPCHGFAIL (1 << 6)
-#define SPCHGSUC (1 << 7)
+#define SPCHGFIN BIT(4)
+#define SPCHGFAIL BIT(6)
+#define SPCHGSUC BIT(7)
#define LINK_SPEED (0xf << 16)
#define LINK_SPEED_2_5GTS (1 << 16)
#define LINK_SPEED_5_0GTS (2 << 16)
#define MACCTLR 0x011058
-#define SPEED_CHANGE (1 << 24)
-#define SCRAMBLE_DISABLE (1 << 27)
+#define SPEED_CHANGE BIT(24)
+#define SCRAMBLE_DISABLE BIT(27)
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
-#define SPCNGRSN (1 << 31)
+#define SPCNGRSN BIT(31)
/* R-Car H1 PHY */
#define H1_PCIEPHYADRR 0x04000c
-#define WRITE_CMD (1 << 16)
-#define PHY_ACK (1 << 24)
+#define WRITE_CMD BIT(16)
+#define PHY_ACK BIT(24)
#define RATE_POS 12
#define LANE_POS 8
#define ADR_POS 0
#define H1_PCIEPHYDOUTR 0x040014
-#define H1_PCIEPHYSR 0x040018
/* R-Car Gen2 PHY */
#define GEN2_PCIEPHYADDR 0x780
#define GEN2_PCIEPHYDATA 0x784
#define GEN2_PCIEPHYCTRL 0x78c
-#define INT_PCI_MSI_NR 32
+#define INT_PCI_MSI_NR 32
-#define RCONF(x) (PCICONF(0)+(x))
-#define RPMCAP(x) (PMCAP(0)+(x))
-#define REXPCAP(x) (EXPCAP(0)+(x))
-#define RVCCAP(x) (VCCAP(0)+(x))
+#define RCONF(x) (PCICONF(0) + (x))
+#define RPMCAP(x) (PMCAP(0) + (x))
+#define REXPCAP(x) (EXPCAP(0) + (x))
+#define RVCCAP(x) (VCCAP(0) + (x))
-#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
-#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
-#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
-#define RCAR_PCI_MAX_RESOURCES 4
-#define MAX_NR_INBOUND_MAPS 6
+#define RCAR_PCI_MAX_RESOURCES 4
+#define MAX_NR_INBOUND_MAPS 6
struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
@@ -139,10 +144,10 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
/* Structure representing the PCIe interface */
struct rcar_pcie {
struct device *dev;
+ struct phy *phy;
void __iomem *base;
struct list_head resources;
int root_bus_nr;
- struct clk *clk;
struct clk *bus_clk;
struct rcar_msi msi;
};
@@ -527,12 +532,12 @@ static void phy_write_reg(struct rcar_pcie *pcie,
phy_wait_for_ack(pcie);
}
-static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
{
unsigned int timeout = 10;
while (timeout--) {
- if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
return 0;
msleep(5);
@@ -541,6 +546,21 @@ static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
return -ETIMEDOUT;
}
+static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 10000;
+
+ while (timeout--) {
+ if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ return 0;
+
+ udelay(5);
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
{
int err;
@@ -551,6 +571,10 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
/* Set mode */
rcar_pci_write_reg(pcie, 1, PCIEMSR);
+ err = rcar_pcie_wait_for_phyrdy(pcie);
+ if (err)
+ return err;
+
/*
* Initial header for port config space is type 1, set the device
* class to match. Hardware takes care of propagating the IDSETR
@@ -605,10 +629,8 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
return 0;
}
-static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
+static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie)
{
- unsigned int timeout = 10;
-
/* Initialize the phy */
phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
@@ -627,17 +649,10 @@ static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
- while (timeout--) {
- if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
- return rcar_pcie_hw_init(pcie);
-
- msleep(5);
- }
-
- return -ETIMEDOUT;
+ return 0;
}
-static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
+static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie)
{
/*
* These settings come from the R-Car Series, 2nd Generation User's
@@ -654,7 +669,18 @@ static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
- return rcar_pcie_hw_init(pcie);
+ return 0;
+}
+
+static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
+{
+ int err;
+
+ err = phy_init(pcie->phy);
+ if (err)
+ return err;
+
+ return phy_power_on(pcie->phy);
}
static int rcar_msi_alloc(struct rcar_msi *chip)
@@ -842,6 +868,20 @@ static const struct irq_domain_ops msi_domain_ops = {
.map = rcar_msi_map,
};
+static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie)
+{
+ struct rcar_msi *msi = &pcie->msi;
+ int i, irq;
+
+ for (i = 0; i < INT_PCI_MSI_NR; i++) {
+ irq = irq_find_mapping(msi->domain, i);
+ if (irq > 0)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(msi->domain);
+}
+
static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -896,16 +936,35 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
return 0;
err:
- irq_domain_remove(msi->domain);
+ rcar_pcie_unmap_msi(pcie);
return err;
}
+static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie)
+{
+ struct rcar_msi *msi = &pcie->msi;
+
+ /* Disable all MSI interrupts */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
+
+ /* Disable address decoding of the MSI interrupt, MSIFE */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
+
+ free_pages(msi->pages, 0);
+
+ rcar_pcie_unmap_msi(pcie);
+}
+
static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct resource res;
int err, i;
+ pcie->phy = devm_phy_optional_get(dev, "pcie");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
return err;
@@ -914,30 +973,17 @@ static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- pcie->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(pcie->clk)) {
- dev_err(dev, "cannot get platform clock\n");
- return PTR_ERR(pcie->clk);
- }
- err = clk_prepare_enable(pcie->clk);
- if (err)
- return err;
-
pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(pcie->bus_clk)) {
dev_err(dev, "cannot get pcie bus clock\n");
- err = PTR_ERR(pcie->bus_clk);
- goto fail_clk;
+ return PTR_ERR(pcie->bus_clk);
}
- err = clk_prepare_enable(pcie->bus_clk);
- if (err)
- goto fail_clk;
i = irq_of_parse_and_map(dev->of_node, 0);
if (!i) {
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
- goto err_map_reg;
+ goto err_irq1;
}
pcie->msi.irq1 = i;
@@ -945,17 +991,15 @@ static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
if (!i) {
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
- goto err_map_reg;
+ goto err_irq2;
}
pcie->msi.irq2 = i;
return 0;
-err_map_reg:
- clk_disable_unprepare(pcie->bus_clk);
-fail_clk:
- clk_disable_unprepare(pcie->clk);
-
+err_irq2:
+ irq_dispose_mapping(pcie->msi.irq1);
+err_irq1:
return err;
}
@@ -1051,63 +1095,28 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
}
static const struct of_device_id rcar_pcie_of_match[] = {
- { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
+ { .compatible = "renesas,pcie-r8a7779",
+ .data = rcar_pcie_phy_init_h1 },
{ .compatible = "renesas,pcie-r8a7790",
- .data = rcar_pcie_hw_init_gen2 },
+ .data = rcar_pcie_phy_init_gen2 },
{ .compatible = "renesas,pcie-r8a7791",
- .data = rcar_pcie_hw_init_gen2 },
+ .data = rcar_pcie_phy_init_gen2 },
{ .compatible = "renesas,pcie-rcar-gen2",
- .data = rcar_pcie_hw_init_gen2 },
- { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
- { .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_hw_init },
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7795",
+ .data = rcar_pcie_phy_init_gen3 },
+ { .compatible = "renesas,pcie-rcar-gen3",
+ .data = rcar_pcie_phy_init_gen3 },
{},
};
-static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
-{
- int err;
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
- resource_size_t iobase;
- struct resource_entry *win, *tmp;
-
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
- &iobase);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, &pci->resources);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry_safe(win, tmp, &pci->resources) {
- struct resource *res = win->res;
-
- if (resource_type(res) == IORESOURCE_IO) {
- err = pci_remap_iospace(res, iobase);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, res);
-
- resource_list_destroy_entry(win);
- }
- }
- }
-
- return 0;
-
-out_release_res:
- pci_free_resource_list(&pci->resources);
- return err;
-}
-
static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rcar_pcie *pcie;
unsigned int data;
int err;
- int (*hw_init_fn)(struct rcar_pcie *);
+ int (*phy_init_fn)(struct rcar_pcie *);
struct pci_host_bridge *bridge;
bridge = pci_alloc_host_bridge(sizeof(*pcie));
@@ -1118,36 +1127,45 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
- INIT_LIST_HEAD(&pcie->resources);
-
- err = rcar_pcie_parse_request_of_pci_ranges(pcie);
+ err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
if (err)
goto err_free_bridge;
+ pm_runtime_enable(pcie->dev);
+ err = pm_runtime_get_sync(pcie->dev);
+ if (err < 0) {
+ dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
+ goto err_pm_disable;
+ }
+
err = rcar_pcie_get_resources(pcie);
if (err < 0) {
dev_err(dev, "failed to request resources: %d\n", err);
- goto err_free_resource_list;
+ goto err_pm_put;
+ }
+
+ err = clk_prepare_enable(pcie->bus_clk);
+ if (err) {
+ dev_err(dev, "failed to enable bus clock: %d\n", err);
+ goto err_unmap_msi_irqs;
}
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
if (err)
- goto err_free_resource_list;
+ goto err_clk_disable;
- pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
- if (err < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
- goto err_pm_disable;
+ phy_init_fn = of_device_get_match_data(dev);
+ err = phy_init_fn(pcie);
+ if (err) {
+ dev_err(dev, "failed to init PCIe PHY\n");
+ goto err_clk_disable;
}
/* Failure to get a link might just be that no cards are inserted */
- hw_init_fn = of_device_get_match_data(dev);
- err = hw_init_fn(pcie);
- if (err) {
+ if (rcar_pcie_hw_init(pcie)) {
dev_info(dev, "PCIe link down\n");
err = -ENODEV;
- goto err_pm_put;
+ goto err_clk_disable;
}
data = rcar_pci_read_reg(pcie, MACSR);
@@ -1159,24 +1177,34 @@ static int rcar_pcie_probe(struct platform_device *pdev)
dev_err(dev,
"failed to enable MSI support: %d\n",
err);
- goto err_pm_put;
+ goto err_clk_disable;
}
}
err = rcar_pcie_enable(pcie);
if (err)
- goto err_pm_put;
+ goto err_msi_teardown;
return 0;
+err_msi_teardown:
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pcie_teardown_msi(pcie);
+
+err_clk_disable:
+ clk_disable_unprepare(pcie->bus_clk);
+
+err_unmap_msi_irqs:
+ irq_dispose_mapping(pcie->msi.irq2);
+ irq_dispose_mapping(pcie->msi.irq1);
+
err_pm_put:
pm_runtime_put(dev);
err_pm_disable:
pm_runtime_disable(dev);
-
-err_free_resource_list:
pci_free_resource_list(&pcie->resources);
+
err_free_bridge:
pci_free_host_bridge(bridge);
diff --git a/drivers/pci/host/pcie-rockchip-ep.c b/drivers/pci/host/pcie-rockchip-ep.c
new file mode 100644
index 000000000000..fc267a49a932
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip-ep.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe endpoint controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/configfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pci-epc.h>
+#include <linux/platform_device.h>
+#include <linux/pci-epf.h>
+#include <linux/sizes.h>
+
+#include "pcie-rockchip.h"
+
+/**
+ * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
+ * @rockchip: Rockchip PCIe controller
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ * the sending of a memory write (MSI) / normal message (legacy
+ * IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ * the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ */
+struct rockchip_pcie_ep {
+ struct rockchip_pcie rockchip;
+ struct pci_epc *epc;
+ u32 max_regions;
+ unsigned long ob_region_map;
+ phys_addr_t *ob_addr;
+ phys_addr_t irq_phys_addr;
+ void __iomem *irq_cpu_addr;
+ u64 irq_pci_addr;
+ u8 irq_pci_fn;
+ u8 irq_pending;
+};
+
+static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
+ u32 region)
+{
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region));
+}
+
+static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
+ u32 r, u32 type, u64 cpu_addr,
+ u64 pci_addr, size_t size)
+{
+ u64 sz = 1ULL << fls64(size - 1);
+ int num_pass_bits = ilog2(sz);
+ u32 addr0, addr1, desc0, desc1;
+ bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG);
+
+ /* The minimal region size is 1MB */
+ if (num_pass_bits < 8)
+ num_pass_bits = 8;
+
+ cpu_addr -= rockchip->mem_res->start;
+ addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) &
+ PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+ (lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+ addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr);
+ desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type;
+ desc1 = 0;
+
+ if (is_nor_msg) {
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+ rockchip_pcie_write(rockchip, desc0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+ rockchip_pcie_write(rockchip, desc1,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+ } else {
+ /* PCI bus address region */
+ rockchip_pcie_write(rockchip, addr0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+ rockchip_pcie_write(rockchip, addr1,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+ rockchip_pcie_write(rockchip, desc0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+ rockchip_pcie_write(rockchip, desc1,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+
+ addr0 =
+ ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+ (lower_32_bits(cpu_addr) &
+ PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+ addr1 = upper_32_bits(cpu_addr);
+ }
+
+ /* CPU bus address region */
+ rockchip_pcie_write(rockchip, addr0,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r));
+ rockchip_pcie_write(rockchip, addr1,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
+}
+
+static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+ struct pci_epf_header *hdr)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ /* All functions share the same vendor ID with function 0 */
+ if (fn == 0) {
+ u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
+ (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
+
+ rockchip_pcie_write(rockchip, vid_regs,
+ PCIE_CORE_CONFIG_VENDOR);
+ }
+
+ rockchip_pcie_write(rockchip, hdr->deviceid << 16,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID);
+
+ rockchip_pcie_write(rockchip,
+ hdr->revid |
+ hdr->progif_code << 8 |
+ hdr->subclass_code << 16 |
+ hdr->baseclass_code << 24,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
+ rockchip_pcie_write(rockchip, hdr->cache_line_size,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_CACHE_LINE_SIZE);
+ rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_SUBSYSTEM_VENDOR_ID);
+ rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_INTERRUPT_LINE);
+
+ return 0;
+}
+
+static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ dma_addr_t bar_phys = epf_bar->phys_addr;
+ enum pci_barno bar = epf_bar->barno;
+ int flags = epf_bar->flags;
+ u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
+ u64 sz;
+
+ /* BAR size is 2^(aperture + 7) */
+ sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
+
+ /*
+ * roundup_pow_of_two() returns an unsigned long, which is not suited
+ * for 64bit values.
+ */
+ sz = 1ULL << fls64(sz - 1);
+ aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
+
+ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
+ } else {
+ bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
+ bool is_64bits = sz > SZ_2G;
+
+ if (is_64bits && (bar & 1))
+ return -EINVAL;
+
+ if (is_64bits && is_prefetch)
+ ctrl =
+ ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
+ else if (is_prefetch)
+ ctrl =
+ ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
+ else if (is_64bits)
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
+ else
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
+ }
+
+ if (bar < BAR_4) {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+
+ addr0 = lower_32_bits(bar_phys);
+ addr1 = upper_32_bits(bar_phys);
+
+ cfg = rockchip_pcie_read(rockchip, reg);
+ cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+
+ rockchip_pcie_write(rockchip, cfg, reg);
+ rockchip_pcie_write(rockchip, addr0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+ rockchip_pcie_write(rockchip, addr1,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 reg, cfg, b, ctrl;
+ enum pci_barno bar = epf_bar->barno;
+
+ if (bar < BAR_4) {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
+ cfg = rockchip_pcie_read(rockchip, reg);
+ cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+
+ rockchip_pcie_write(rockchip, cfg, reg);
+ rockchip_pcie_write(rockchip, 0x0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+ rockchip_pcie_write(rockchip, 0x0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+}
+
+static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr, u64 pci_addr,
+ size_t size)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *pcie = &ep->rockchip;
+ u32 r;
+
+ r = find_first_zero_bit(&ep->ob_region_map,
+ sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ /*
+ * Region 0 is reserved for configuration space and shouldn't
+ * be used elsewhere per TRM, so leave it out.
+ */
+ if (r >= ep->max_regions - 1) {
+ dev_err(&epc->dev, "no free outbound region\n");
+ return -EINVAL;
+ }
+
+ rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr,
+ pci_addr, size);
+
+ set_bit(r, &ep->ob_region_map);
+ ep->ob_addr[r] = addr;
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 r;
+
+ for (r = 0; r < ep->max_regions - 1; r++)
+ if (ep->ob_addr[r] == addr)
+ break;
+
+ /*
+ * Region 0 is reserved for configuration space and shouldn't
+ * be used elsewhere per TRM, so leave it out.
+ */
+ if (r == ep->max_regions - 1)
+ return;
+
+ rockchip_pcie_clear_ep_ob_atu(rockchip, r);
+
+ ep->ob_addr[r] = 0;
+ clear_bit(r, &ep->ob_region_map);
+}
+
+static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
+ u8 multi_msg_cap)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u16 flags;
+
+ flags = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
+ flags |=
+ ((multi_msg_cap << 1) << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+ PCI_MSI_FLAGS_64BIT;
+ flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
+ rockchip_pcie_write(rockchip, flags,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ return 0;
+}
+
+static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u16 flags;
+
+ flags = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+ return -EINVAL;
+
+ return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+}
+
+static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 intx, bool is_asserted)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 r = ep->max_regions - 1;
+ u32 offset;
+ u16 status;
+ u8 msg_code;
+
+ if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
+ ep->irq_pci_fn != fn)) {
+ rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
+ AXI_WRAPPER_NOR_MSG,
+ ep->irq_phys_addr, 0, 0);
+ ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR;
+ ep->irq_pci_fn = fn;
+ }
+
+ intx &= 3;
+ if (is_asserted) {
+ ep->irq_pending |= BIT(intx);
+ msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx;
+ } else {
+ ep->irq_pending &= ~BIT(intx);
+ msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx;
+ }
+
+ status = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_CMD_STATUS);
+ status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
+
+ if ((status != 0) ^ (ep->irq_pending != 0)) {
+ status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
+ rockchip_pcie_write(rockchip, status,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_CMD_STATUS);
+ }
+
+ offset =
+ ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) |
+ ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA;
+ writel(0, ep->irq_cpu_addr + offset);
+}
+
+static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 intx)
+{
+ u16 cmd;
+
+ cmd = rockchip_pcie_read(&ep->rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_CMD_STATUS);
+
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ return -EINVAL;
+
+ /*
+ * Should add some delay between toggling INTx per TRM vaguely saying
+ * it depends on some cycles of the AHB bus clock to function it. So
+ * add sufficient 1ms here.
+ */
+ rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
+ mdelay(1);
+ rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
+ return 0;
+}
+
+static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 interrupt_num)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u16 flags, mme, data, data_mask;
+ u8 msi_count;
+ u64 pci_addr, pci_addr_mask = 0xff;
+
+ /* Check MSI enable bit */
+ flags = rockchip_pcie_read(&ep->rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+ return -EINVAL;
+
+ /* Get MSI numbers from MME */
+ mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+ msi_count = 1 << mme;
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ /* Set MSI private data */
+ data_mask = msi_count - 1;
+ data = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_DATA_64);
+ data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
+
+ /* Get MSI PCI address */
+ pci_addr = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_ADDRESS_HI);
+ pci_addr <<= 32;
+ pci_addr |= rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_ADDRESS_LO);
+ pci_addr &= GENMASK_ULL(63, 2);
+
+ /* Set the outbound region if needed. */
+ if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
+ ep->irq_pci_fn != fn)) {
+ rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1,
+ AXI_WRAPPER_MEM_WRITE,
+ ep->irq_phys_addr,
+ pci_addr & ~pci_addr_mask,
+ pci_addr_mask + 1);
+ ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
+ ep->irq_pci_fn = fn;
+ }
+
+ writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
+ return 0;
+}
+
+static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+ enum pci_epc_irq_type type,
+ u8 interrupt_num)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
+ case PCI_EPC_IRQ_MSI:
+ return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rockchip_pcie_ep_start(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct pci_epf *epf;
+ u32 cfg;
+
+ cfg = BIT(0);
+ list_for_each_entry(epf, &epc->pci_epf, list)
+ cfg |= BIT(epf->func_no);
+
+ rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
+
+ list_for_each_entry(epf, &epc->pci_epf, list)
+ pci_epf_linkup(epf);
+
+ return 0;
+}
+
+static const struct pci_epc_ops rockchip_pcie_epc_ops = {
+ .write_header = rockchip_pcie_ep_write_header,
+ .set_bar = rockchip_pcie_ep_set_bar,
+ .clear_bar = rockchip_pcie_ep_clear_bar,
+ .map_addr = rockchip_pcie_ep_map_addr,
+ .unmap_addr = rockchip_pcie_ep_unmap_addr,
+ .set_msi = rockchip_pcie_ep_set_msi,
+ .get_msi = rockchip_pcie_ep_get_msi,
+ .raise_irq = rockchip_pcie_ep_raise_irq,
+ .start = rockchip_pcie_ep_start,
+};
+
+static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
+ struct rockchip_pcie_ep *ep)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ err = rockchip_pcie_parse_dt(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_get_phys(rockchip);
+ if (err)
+ return err;
+
+ err = of_property_read_u32(dev->of_node,
+ "rockchip,max-outbound-regions",
+ &ep->max_regions);
+ if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
+ ep->max_regions = MAX_REGION_LIMIT;
+
+ err = of_property_read_u8(dev->of_node, "max-functions",
+ &ep->epc->max_functions);
+ if (err < 0)
+ ep->epc->max_functions = 1;
+
+ return 0;
+}
+
+static const struct of_device_id rockchip_pcie_ep_of_match[] = {
+ { .compatible = "rockchip,rk3399-pcie-ep"},
+ {},
+};
+
+static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie_ep *ep;
+ struct rockchip_pcie *rockchip;
+ struct pci_epc *epc;
+ size_t max_regions;
+ int err;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ rockchip = &ep->rockchip;
+ rockchip->is_rc = false;
+ rockchip->dev = dev;
+
+ epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ err = rockchip_pcie_parse_ep_dt(rockchip, ep);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ goto err_disable_clocks;
+
+ /* Establish the link automatically */
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ max_regions = ep->max_regions;
+ ep->ob_addr = devm_kzalloc(dev, max_regions * sizeof(*ep->ob_addr),
+ GFP_KERNEL);
+
+ if (!ep->ob_addr) {
+ err = -ENOMEM;
+ goto err_uninit_port;
+ }
+
+ /* Only enable function 0 by default */
+ rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+
+ err = pci_epc_mem_init(epc, rockchip->mem_res->start,
+ resource_size(rockchip->mem_res));
+ if (err < 0) {
+ dev_err(dev, "failed to initialize the memory space\n");
+ goto err_uninit_port;
+ }
+
+ ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+ SZ_128K);
+ if (!ep->irq_cpu_addr) {
+ dev_err(dev, "failed to reserve memory space for MSI\n");
+ err = -ENOMEM;
+ goto err_epc_mem_exit;
+ }
+
+ ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+
+ return 0;
+err_epc_mem_exit:
+ pci_epc_mem_exit(epc);
+err_uninit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_disable_clocks:
+ rockchip_pcie_disable_clocks(rockchip);
+ return err;
+}
+
+static struct platform_driver rockchip_pcie_ep_driver = {
+ .driver = {
+ .name = "rockchip-pcie-ep",
+ .of_match_table = rockchip_pcie_ep_of_match,
+ },
+ .probe = rockchip_pcie_ep_probe,
+};
+
+builtin_platform_driver(rockchip_pcie_ep_driver);
diff --git a/drivers/pci/host/pcie-rockchip-host.c b/drivers/pci/host/pcie-rockchip-host.c
new file mode 100644
index 000000000000..1372d270764f
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip-host.c
@@ -0,0 +1,1142 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe host controller driver
+ *
+ * Copyright (c) 2016 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Wenrui Li <wenrui.li@rock-chips.com>
+ *
+ * Bits taken from Synopsys DesignWare Host controller driver and
+ * ARM PCI Host generic driver.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#include "../pci.h"
+#include "pcie-rockchip.h"
+
+static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
+{
+ u32 val;
+
+ /* Update Tx credit maximum update interval */
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
+ val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
+ val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
+ rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
+}
+
+static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, int dev)
+{
+ /* access only one slot on each root port */
+ if (bus->number == rockchip->root_bus_nr && dev > 0)
+ return 0;
+
+ /*
+ * do not read more than one device on the bus directly attached
+ * to RC's downstream side.
+ */
+ if (bus->primary == rockchip->root_bus_nr && dev > 0)
+ return 0;
+
+ return 1;
+}
+
+static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
+{
+ u32 val;
+ u8 map;
+
+ if (rockchip->legacy_phy)
+ return GENMASK(MAX_LANE_NUM - 1, 0);
+
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
+ map = val & PCIE_CORE_LANE_MAP_MASK;
+
+ /* The link may be using a reverse-indexed mapping. */
+ if (val & PCIE_CORE_LANE_MAP_REVERSE)
+ map = bitrev8(map) >> 4;
+
+ return map;
+}
+
+static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 4) {
+ *val = readl(addr);
+ } else if (size == 2) {
+ *val = readw(addr);
+ } else if (size == 1) {
+ *val = readb(addr);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 val)
+{
+ u32 mask, tmp, offset;
+ void __iomem *addr;
+
+ offset = where & ~0x3;
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
+
+ if (size == 4) {
+ writel(val, addr);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+
+ /*
+ * N.B. This read/modify/write isn't safe in general because it can
+ * corrupt RW1C bits in adjacent registers. But the hardware
+ * doesn't support smaller writes.
+ */
+ tmp = readl(addr) & mask;
+ tmp |= val << ((where & 0x3) * 8);
+ writel(tmp, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ u32 busdev;
+
+ busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ if (!IS_ALIGNED(busdev, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (bus->parent->number == rockchip->root_bus_nr)
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+ else
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE1_CFG);
+
+ if (size == 4) {
+ *val = readl(rockchip->reg_base + busdev);
+ } else if (size == 2) {
+ *val = readw(rockchip->reg_base + busdev);
+ } else if (size == 1) {
+ *val = readb(rockchip->reg_base + busdev);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ u32 busdev;
+
+ busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+ if (!IS_ALIGNED(busdev, size))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (bus->parent->number == rockchip->root_bus_nr)
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+ else
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE1_CFG);
+
+ if (size == 4)
+ writel(val, rockchip->reg_base + busdev);
+ else if (size == 2)
+ writew(val, rockchip->reg_base + busdev);
+ else if (size == 1)
+ writeb(val, rockchip->reg_base + busdev);
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (bus->number == rockchip->root_bus_nr)
+ return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size,
+ val);
+}
+
+static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == rockchip->root_bus_nr)
+ return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size,
+ val);
+}
+
+static struct pci_ops rockchip_pcie_ops = {
+ .read = rockchip_pcie_rd_conf,
+ .write = rockchip_pcie_wr_conf,
+};
+
+static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+{
+ int curr;
+ u32 status, scale, power;
+
+ if (IS_ERR(rockchip->vpcie3v3))
+ return;
+
+ /*
+ * Set RC's captured slot power limit and scale if
+ * vpcie3v3 available. The default values are both zero
+ * which means the software should set these two according
+ * to the actual power supply.
+ */
+ curr = regulator_get_current_limit(rockchip->vpcie3v3);
+ if (curr <= 0)
+ return;
+
+ scale = 3; /* 0.001x */
+ curr = curr / 1000; /* convert to mA */
+ power = (curr * 3300) / 1000; /* milliwatt */
+ while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ if (!scale) {
+ dev_warn(rockchip->dev, "invalid power supply\n");
+ return;
+ }
+ scale--;
+ power = power / 10;
+ }
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+ status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+ (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+}
+
+/**
+ * rockchip_pcie_host_init_port - Initialize hardware
+ * @rockchip: PCIe port information
+ */
+static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err, i = MAX_LANE_NUM;
+ u32 status;
+
+ gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ return err;
+
+ /* Fix the transmitted FTS count desired to exit from L0s. */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
+ status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+ (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
+
+ rockchip_pcie_set_power_limit(rockchip);
+
+ /* Set RC's clock architecture as common clock */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKSTA_SLC << 16;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ /* Set RC's RCB to 128 */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RCB;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ /* Enable Gen1 training */
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
+
+ /* 500ms timeout value should be enough for Gen1/2 training */
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
+ status, PCIE_LINK_UP(status), 20,
+ 500 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(dev, "PCIe link training gen1 timeout!\n");
+ goto err_power_off_phy;
+ }
+
+ if (rockchip->link_gen == 2) {
+ /*
+ * Enable retrain for gen2. This should be configured only after
+ * gen1 finished.
+ */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RL;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ status, PCIE_LINK_IS_GEN2(status), 20,
+ 500 * USEC_PER_MSEC);
+ if (err)
+ dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+ }
+
+ /* Check the final link width from negotiated lane counter from MGMT */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+ PCIE_CORE_PL_CONF_LANE_SHIFT);
+ dev_dbg(dev, "current link width is x%d\n", status);
+
+ /* Power off unused lane(s) */
+ rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ if (!(rockchip->lanes_map & BIT(i))) {
+ dev_dbg(dev, "idling lane %d\n", i);
+ phy_power_off(rockchip->phys[i]);
+ }
+ }
+
+ rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+ PCIE_CORE_CONFIG_VENDOR);
+ rockchip_pcie_write(rockchip,
+ PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
+ PCIE_RC_CONFIG_RID_CCR);
+
+ /* Clear THP cap's next cap pointer to remove L1 substate cap */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
+ status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
+
+ /* Clear L0s from RC's link cap */
+ if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
+ status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
+ }
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
+ status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
+ status |= PCIE_RC_CONFIG_DCSR_MPS_256;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
+
+ return 0;
+err_power_off_phy:
+ while (i--)
+ phy_power_off(rockchip->phys[i]);
+ i = MAX_LANE_NUM;
+ while (i--)
+ phy_exit(rockchip->phys[i]);
+ return err;
+}
+
+static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 sub_reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LOCAL) {
+ dev_dbg(dev, "local interrupt received\n");
+ sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
+ if (sub_reg & PCIE_CORE_INT_PRFPE)
+ dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFPE)
+ dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_RRPE)
+ dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_PRFO)
+ dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFO)
+ dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_RT)
+ dev_dbg(dev, "replay timer timed out\n");
+
+ if (sub_reg & PCIE_CORE_INT_RTR)
+ dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
+
+ if (sub_reg & PCIE_CORE_INT_PE)
+ dev_dbg(dev, "phy error detected on receive side\n");
+
+ if (sub_reg & PCIE_CORE_INT_MTR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_UCR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_FCE)
+ dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
+
+ if (sub_reg & PCIE_CORE_INT_CT)
+ dev_dbg(dev, "a request timed out waiting for completion\n");
+
+ if (sub_reg & PCIE_CORE_INT_UTC)
+ dev_dbg(dev, "unmapped TC error\n");
+
+ if (sub_reg & PCIE_CORE_INT_MMVC)
+ dev_dbg(dev, "MSI mask register changes\n");
+
+ rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
+ } else if (reg & PCIE_CLIENT_INT_PHY) {
+ dev_dbg(dev, "phy link changes\n");
+ rockchip_pcie_update_txcredit_mui(rockchip);
+ rockchip_pcie_clr_bw_int(rockchip);
+ }
+
+ rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
+ dev_dbg(dev, "legacy done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_MSG)
+ dev_dbg(dev, "message done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_HOT_RST)
+ dev_dbg(dev, "hot reset interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_DPA)
+ dev_dbg(dev, "dpa interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_FATAL_ERR)
+ dev_dbg(dev, "fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
+ dev_dbg(dev, "no fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_CORR_ERR)
+ dev_dbg(dev, "correctable error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_PHY)
+ dev_dbg(dev, "phy interrupt received\n");
+
+ rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
+ PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
+ PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
+ PCIE_CLIENT_INT_NFATAL_ERR |
+ PCIE_CLIENT_INT_CORR_ERR |
+ PCIE_CLIENT_INT_PHY),
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 hwirq;
+ u32 virq;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
+
+ while (reg) {
+ hwirq = ffs(reg) - 1;
+ reg &= ~BIT(hwirq);
+
+ virq = irq_find_mapping(rockchip->irq_domain, hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
+{
+ int irq, err;
+ struct device *dev = rockchip->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0) {
+ dev_err(dev, "missing sys IRQ resource\n");
+ return irq;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
+ IRQF_SHARED, "pcie-sys", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe subsystem IRQ\n");
+ return err;
+ }
+
+ irq = platform_get_irq_byname(pdev, "legacy");
+ if (irq < 0) {
+ dev_err(dev, "missing legacy IRQ resource\n");
+ return irq;
+ }
+
+ irq_set_chained_handler_and_data(irq,
+ rockchip_pcie_legacy_int_handler,
+ rockchip);
+
+ irq = platform_get_irq_byname(pdev, "client");
+ if (irq < 0) {
+ dev_err(dev, "missing client IRQ resource\n");
+ return irq;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
+ IRQF_SHARED, "pcie-client", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe client IRQ\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * rockchip_pcie_parse_host_dt - Parse Device Tree
+ * @rockchip: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ err = rockchip_pcie_parse_dt(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_setup_irq(rockchip);
+ if (err)
+ return err;
+
+ rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
+ if (IS_ERR(rockchip->vpcie12v)) {
+ if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie12v regulator found\n");
+ }
+
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie3v3 regulator found\n");
+ }
+
+ rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
+ if (IS_ERR(rockchip->vpcie1v8)) {
+ if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie1v8 regulator found\n");
+ }
+
+ rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
+ if (IS_ERR(rockchip->vpcie0v9)) {
+ if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie0v9 regulator found\n");
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ if (!IS_ERR(rockchip->vpcie12v)) {
+ err = regulator_enable(rockchip->vpcie12v);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie12v regulator\n");
+ goto err_out;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie3v3)) {
+ err = regulator_enable(rockchip->vpcie3v3);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie3v3 regulator\n");
+ goto err_disable_12v;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie1v8)) {
+ err = regulator_enable(rockchip->vpcie1v8);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+ goto err_disable_3v3;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie0v9)) {
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ goto err_disable_1v8;
+ }
+ }
+
+ return 0;
+
+err_disable_1v8:
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+err_disable_3v3:
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+err_disable_12v:
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+err_out:
+ return err;
+}
+
+static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
+ (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
+ rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
+ PCIE_CORE_INT_MASK);
+
+ rockchip_pcie_enable_bw_int(rockchip);
+}
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct device_node *intc = of_get_next_child(dev->of_node, NULL);
+
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
+ int region_no, int type, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ob_addr_0;
+ u32 ob_addr_1;
+ u32 ob_desc_0;
+ u32 aw_offset;
+
+ if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < 8)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+ if (region_no == 0) {
+ if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+ if (region_no != 0) {
+ if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+
+ aw_offset = (region_no << OB_REG_SIZE_SHIFT);
+
+ ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
+ ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
+ ob_addr_1 = upper_addr;
+ ob_desc_0 = (1 << 23 | type);
+
+ rockchip_pcie_write(rockchip, ob_addr_0,
+ PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_addr_1,
+ PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_desc_0,
+ PCIE_CORE_OB_REGION_DESC0 + aw_offset);
+ rockchip_pcie_write(rockchip, 0,
+ PCIE_CORE_OB_REGION_DESC1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
+ int region_no, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ib_addr_0;
+ u32 ib_addr_1;
+ u32 aw_offset;
+
+ if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+
+ aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
+
+ ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
+ ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
+ ib_addr_1 = upper_addr;
+
+ rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int offset;
+ int err;
+ int reg_no;
+
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+
+ for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+ AXI_WRAPPER_MEM_WRITE,
+ 20 - 1,
+ rockchip->mem_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC mem outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+ if (err) {
+ dev_err(dev, "program RC mem inbound ATU failed\n");
+ return err;
+ }
+
+ offset = rockchip->mem_size >> 20;
+ for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip,
+ reg_no + 1 + offset,
+ AXI_WRAPPER_IO_WRITE,
+ 20 - 1,
+ rockchip->io_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC io outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ /* assign message regions */
+ rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
+ AXI_WRAPPER_NOR_MSG,
+ 20 - 1, 0, 0);
+
+ rockchip->msg_bus_addr = rockchip->mem_bus_addr +
+ ((reg_no + offset) << 20);
+ return err;
+}
+
+static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
+{
+ u32 value;
+ int err;
+
+ /* send PME_TURN_OFF message */
+ writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
+
+ /* read LTSSM and wait for falling into L2 link state */
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
+ value, PCIE_LINK_IS_L2(value), 20,
+ jiffies_to_usecs(5 * HZ));
+ if (err) {
+ dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
+{
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+ int ret;
+
+ /* disable core and cli int since we don't need to ack PME_ACK */
+ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
+ PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
+ rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
+
+ ret = rockchip_pcie_wait_l2(rockchip);
+ if (ret) {
+ rockchip_pcie_enable_interrupts(rockchip);
+ return ret;
+ }
+
+ rockchip_pcie_deinit_phys(rockchip);
+
+ rockchip_pcie_disable_clocks(rockchip);
+
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+
+ return ret;
+}
+
+static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
+{
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+ int err;
+
+ if (!IS_ERR(rockchip->vpcie0v9)) {
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ return err;
+ }
+ }
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ goto err_disable_0v9;
+
+ err = rockchip_pcie_host_init_port(rockchip);
+ if (err)
+ goto err_pcie_resume;
+
+ err = rockchip_pcie_cfg_atu(rockchip);
+ if (err)
+ goto err_err_deinit_port;
+
+ /* Need this to enter L1 again */
+ rockchip_pcie_update_txcredit_mui(rockchip);
+ rockchip_pcie_enable_interrupts(rockchip);
+
+ return 0;
+
+err_err_deinit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_pcie_resume:
+ rockchip_pcie_disable_clocks(rockchip);
+err_disable_0v9:
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+ return err;
+}
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct rockchip_pcie *rockchip;
+ struct device *dev = &pdev->dev;
+ struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *win;
+ resource_size_t io_base;
+ struct resource *mem;
+ struct resource *io;
+ int err;
+
+ LIST_HEAD(res);
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
+ if (!bridge)
+ return -ENOMEM;
+
+ rockchip = pci_host_bridge_priv(bridge);
+
+ platform_set_drvdata(pdev, rockchip);
+ rockchip->dev = dev;
+ rockchip->is_rc = true;
+
+ err = rockchip_pcie_parse_host_dt(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_set_vpcie(rockchip);
+ if (err) {
+ dev_err(dev, "failed to set vpcie regulator\n");
+ goto err_set_vpcie;
+ }
+
+ err = rockchip_pcie_host_init_port(rockchip);
+ if (err)
+ goto err_vpcie;
+
+ rockchip_pcie_enable_interrupts(rockchip);
+
+ err = rockchip_pcie_init_irq_domain(rockchip);
+ if (err < 0)
+ goto err_deinit_port;
+
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &res, &io_base);
+ if (err)
+ goto err_remove_irq_domain;
+
+ err = devm_request_pci_bus_resources(dev, &res);
+ if (err)
+ goto err_free_res;
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry(win, &res) {
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ io = win->res;
+ io->name = "I/O";
+ rockchip->io_size = resource_size(io);
+ rockchip->io_bus_addr = io->start - win->offset;
+ err = pci_remap_iospace(io, io_base);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, io);
+ continue;
+ }
+ rockchip->io = io;
+ break;
+ case IORESOURCE_MEM:
+ mem = win->res;
+ mem->name = "MEM";
+ rockchip->mem_size = resource_size(mem);
+ rockchip->mem_bus_addr = mem->start - win->offset;
+ break;
+ case IORESOURCE_BUS:
+ rockchip->root_bus_nr = win->res->start;
+ break;
+ default:
+ continue;
+ }
+ }
+
+ err = rockchip_pcie_cfg_atu(rockchip);
+ if (err)
+ goto err_unmap_iospace;
+
+ rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
+ if (!rockchip->msg_region) {
+ err = -ENOMEM;
+ goto err_unmap_iospace;
+ }
+
+ list_splice_init(&res, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = rockchip;
+ bridge->busnr = 0;
+ bridge->ops = &rockchip_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(bridge);
+ if (err < 0)
+ goto err_unmap_iospace;
+
+ bus = bridge->bus;
+
+ rockchip->root_bus = bus;
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(bus);
+ return 0;
+
+err_unmap_iospace:
+ pci_unmap_iospace(rockchip->io);
+err_free_res:
+ pci_free_resource_list(&res);
+err_remove_irq_domain:
+ irq_domain_remove(rockchip->irq_domain);
+err_deinit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_vpcie:
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+err_set_vpcie:
+ rockchip_pcie_disable_clocks(rockchip);
+ return err;
+}
+
+static int rockchip_pcie_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+
+ pci_stop_root_bus(rockchip->root_bus);
+ pci_remove_root_bus(rockchip->root_bus);
+ pci_unmap_iospace(rockchip->io);
+ irq_domain_remove(rockchip->irq_domain);
+
+ rockchip_pcie_deinit_phys(rockchip);
+
+ rockchip_pcie_disable_clocks(rockchip);
+
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+
+ return 0;
+}
+
+static const struct dev_pm_ops rockchip_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
+ rockchip_pcie_resume_noirq)
+};
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3399-pcie", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ .pm = &rockchip_pcie_pm_ops,
+ },
+ .probe = rockchip_pcie_probe,
+ .remove = rockchip_pcie_remove,
+};
+module_platform_driver(rockchip_pcie_driver);
+
+MODULE_AUTHOR("Rockchip Inc");
+MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index f1e8f97ea1fb..c53d1322a3d6 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -11,535 +11,154 @@
* ARM PCI Host generic driver.
*/
-#include <linux/bitrev.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/iopoll.h>
-#include <linux/irq.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_pci.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
-#include <linux/regmap.h>
-/*
- * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
- * bits. This allows atomic updates of the register without locking.
- */
-#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
-
-#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
-#define MAX_LANE_NUM 4
-
-#define PCIE_CLIENT_BASE 0x0
-#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
-#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
-#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
-#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
-#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
-#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
-#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
-#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
-#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
-#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
-#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
-#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
-#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
-#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
-#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
-#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
-#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
-#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
-#define PCIE_CLIENT_INTR_SHIFT 5
-#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
-#define PCIE_CLIENT_INT_MSG BIT(14)
-#define PCIE_CLIENT_INT_HOT_RST BIT(13)
-#define PCIE_CLIENT_INT_DPA BIT(12)
-#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
-#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
-#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
-#define PCIE_CLIENT_INT_INTD BIT(8)
-#define PCIE_CLIENT_INT_INTC BIT(7)
-#define PCIE_CLIENT_INT_INTB BIT(6)
-#define PCIE_CLIENT_INT_INTA BIT(5)
-#define PCIE_CLIENT_INT_LOCAL BIT(4)
-#define PCIE_CLIENT_INT_UDMA BIT(3)
-#define PCIE_CLIENT_INT_PHY BIT(2)
-#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
-#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
-
-#define PCIE_CLIENT_INT_LEGACY \
- (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
- PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
-
-#define PCIE_CLIENT_INT_CLI \
- (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
- PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
- PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
- PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
- PCIE_CLIENT_INT_PHY)
-
-#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
-#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
-#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
-#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
-#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
-#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
-#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
-#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
-#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
-#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
-#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
-#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
-#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
-#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
- (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
-#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
-#define PCIE_CORE_LANE_MAP_MASK 0x0000000f
-#define PCIE_CORE_LANE_MAP_REVERSE BIT(16)
-#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
-#define PCIE_CORE_INT_PRFPE BIT(0)
-#define PCIE_CORE_INT_CRFPE BIT(1)
-#define PCIE_CORE_INT_RRPE BIT(2)
-#define PCIE_CORE_INT_PRFO BIT(3)
-#define PCIE_CORE_INT_CRFO BIT(4)
-#define PCIE_CORE_INT_RT BIT(5)
-#define PCIE_CORE_INT_RTR BIT(6)
-#define PCIE_CORE_INT_PE BIT(7)
-#define PCIE_CORE_INT_MTR BIT(8)
-#define PCIE_CORE_INT_UCR BIT(9)
-#define PCIE_CORE_INT_FCE BIT(10)
-#define PCIE_CORE_INT_CT BIT(11)
-#define PCIE_CORE_INT_UTC BIT(18)
-#define PCIE_CORE_INT_MMVC BIT(19)
-#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44)
-#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
-#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
-
-#define PCIE_CORE_INT \
- (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
- PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
- PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
- PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
- PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
- PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
- PCIE_CORE_INT_MMVC)
-
-#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
-#define PCIE_RC_CONFIG_BASE 0xa00000
-#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
-#define PCIE_RC_CONFIG_SCC_SHIFT 16
-#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
-#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
-#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
-#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
-#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
-#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
-#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
-#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
-#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
-#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
-#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
-#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
-#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
-
-#define PCIE_CORE_AXI_CONF_BASE 0xc00000
-#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
-#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
-#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
-#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
-#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
-#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
-
-#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
-#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
-#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
-#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
-#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
-
-/* Size of one AXI Region (not Region 0) */
-#define AXI_REGION_SIZE BIT(20)
-/* Size of Region 0, equal to sum of sizes of other regions */
-#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
-#define OB_REG_SIZE_SHIFT 5
-#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
-#define AXI_WRAPPER_IO_WRITE 0x6
-#define AXI_WRAPPER_MEM_WRITE 0x2
-#define AXI_WRAPPER_TYPE0_CFG 0xa
-#define AXI_WRAPPER_TYPE1_CFG 0xb
-#define AXI_WRAPPER_NOR_MSG 0xc
-
-#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
-#define MIN_AXI_ADDR_BITS_PASSED 8
-#define PCIE_RC_SEND_PME_OFF 0x11960
-#define ROCKCHIP_VENDOR_ID 0x1d87
-#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
-#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
-#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
-#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
-#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
- (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
- PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
-#define PCIE_LINK_IS_L2(x) \
- (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
-#define PCIE_LINK_UP(x) \
- (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
-#define PCIE_LINK_IS_GEN2(x) \
- (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
-
-#define RC_REGION_0_ADDR_TRANS_H 0x00000000
-#define RC_REGION_0_ADDR_TRANS_L 0x00000000
-#define RC_REGION_0_PASS_BITS (25 - 1)
-#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
-#define MAX_AXI_WRAPPER_REGION_NUM 33
-
-struct rockchip_pcie {
- void __iomem *reg_base; /* DT axi-base */
- void __iomem *apb_base; /* DT apb-base */
- bool legacy_phy;
- struct phy *phys[MAX_LANE_NUM];
- struct reset_control *core_rst;
- struct reset_control *mgmt_rst;
- struct reset_control *mgmt_sticky_rst;
- struct reset_control *pipe_rst;
- struct reset_control *pm_rst;
- struct reset_control *aclk_rst;
- struct reset_control *pclk_rst;
- struct clk *aclk_pcie;
- struct clk *aclk_perf_pcie;
- struct clk *hclk_pcie;
- struct clk *clk_pcie_pm;
- struct regulator *vpcie12v; /* 12V power supply */
- struct regulator *vpcie3v3; /* 3.3V power supply */
- struct regulator *vpcie1v8; /* 1.8V power supply */
- struct regulator *vpcie0v9; /* 0.9V power supply */
- struct gpio_desc *ep_gpio;
- u32 lanes;
- u8 lanes_map;
- u8 root_bus_nr;
- int link_gen;
- struct device *dev;
- struct irq_domain *irq_domain;
- int offset;
- struct pci_bus *root_bus;
- struct resource *io;
- phys_addr_t io_bus_addr;
- u32 io_size;
- void __iomem *msg_region;
- u32 mem_size;
- phys_addr_t msg_bus_addr;
- phys_addr_t mem_bus_addr;
-};
-
-static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
-{
- return readl(rockchip->apb_base + reg);
-}
-
-static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
- u32 reg)
-{
- writel(val, rockchip->apb_base + reg);
-}
-
-static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
-{
- u32 status;
-
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-}
-
-static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
-{
- u32 status;
-
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-}
-
-static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
-{
- u32 val;
-
- /* Update Tx credit maximum update interval */
- val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
- val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
- val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
- rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
-}
+#include "../pci.h"
+#include "pcie-rockchip.h"
-static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
- struct pci_bus *bus, int dev)
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
{
- /* access only one slot on each root port */
- if (bus->number == rockchip->root_bus_nr && dev > 0)
- return 0;
-
- /*
- * do not read more than one device on the bus directly attached
- * to RC's downstream side.
- */
- if (bus->primary == rockchip->root_bus_nr && dev > 0)
- return 0;
-
- return 1;
-}
-
-static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
-{
- u32 val;
- u8 map;
-
- if (rockchip->legacy_phy)
- return GENMASK(MAX_LANE_NUM - 1, 0);
-
- val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
- map = val & PCIE_CORE_LANE_MAP_MASK;
-
- /* The link may be using a reverse-indexed mapping. */
- if (val & PCIE_CORE_LANE_MAP_REVERSE)
- map = bitrev8(map) >> 4;
-
- return map;
-}
-
-static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
- int where, int size, u32 *val)
-{
- void __iomem *addr;
-
- addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
-
- if (!IS_ALIGNED((uintptr_t)addr, size)) {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
+ struct device *dev = rockchip->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *node = dev->of_node;
+ struct resource *regs;
+ int err;
- if (size == 4) {
- *val = readl(addr);
- } else if (size == 2) {
- *val = readw(addr);
- } else if (size == 1) {
- *val = readb(addr);
+ if (rockchip->is_rc) {
+ regs = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "axi-base");
+ rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
+ if (IS_ERR(rockchip->reg_base))
+ return PTR_ERR(rockchip->reg_base);
} else {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ rockchip->mem_res =
+ platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mem-base");
+ if (!rockchip->mem_res)
+ return -EINVAL;
}
- return PCIBIOS_SUCCESSFUL;
-}
-static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
- int where, int size, u32 val)
-{
- u32 mask, tmp, offset;
- void __iomem *addr;
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "apb-base");
+ rockchip->apb_base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
- offset = where & ~0x3;
- addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
+ err = rockchip_pcie_get_phys(rockchip);
+ if (err)
+ return err;
- if (size == 4) {
- writel(val, addr);
- return PCIBIOS_SUCCESSFUL;
+ rockchip->lanes = 1;
+ err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
+ if (!err && (rockchip->lanes == 0 ||
+ rockchip->lanes == 3 ||
+ rockchip->lanes > 4)) {
+ dev_warn(dev, "invalid num-lanes, default to use one lane\n");
+ rockchip->lanes = 1;
}
- mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
-
- /*
- * N.B. This read/modify/write isn't safe in general because it can
- * corrupt RW1C bits in adjacent registers. But the hardware
- * doesn't support smaller writes.
- */
- tmp = readl(addr) & mask;
- tmp |= val << ((where & 0x3) * 8);
- writel(tmp, addr);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static void rockchip_pcie_cfg_configuration_accesses(
- struct rockchip_pcie *rockchip, u32 type)
-{
- u32 ob_desc_0;
-
- /* Configuration Accesses for region 0 */
- rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
-
- rockchip_pcie_write(rockchip,
- (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
- PCIE_CORE_OB_REGION_ADDR0);
- rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
- PCIE_CORE_OB_REGION_ADDR1);
- ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
- ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
- ob_desc_0 |= (type | (0x1 << 23));
- rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
- rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
-}
-
-static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
- struct pci_bus *bus, u32 devfn,
- int where, int size, u32 *val)
-{
- u32 busdev;
-
- busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where);
+ rockchip->link_gen = of_pci_get_max_link_speed(node);
+ if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
+ rockchip->link_gen = 2;
- if (!IS_ALIGNED(busdev, size)) {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
+ if (IS_ERR(rockchip->core_rst)) {
+ if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing core reset property in node\n");
+ return PTR_ERR(rockchip->core_rst);
}
- if (bus->parent->number == rockchip->root_bus_nr)
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE0_CFG);
- else
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE1_CFG);
-
- if (size == 4) {
- *val = readl(rockchip->reg_base + busdev);
- } else if (size == 2) {
- *val = readw(rockchip->reg_base + busdev);
- } else if (size == 1) {
- *val = readb(rockchip->reg_base + busdev);
- } else {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
+ if (IS_ERR(rockchip->mgmt_rst)) {
+ if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_rst);
}
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
- struct pci_bus *bus, u32 devfn,
- int where, int size, u32 val)
-{
- u32 busdev;
-
- busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where);
- if (!IS_ALIGNED(busdev, size))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- if (bus->parent->number == rockchip->root_bus_nr)
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE0_CFG);
- else
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE1_CFG);
-
- if (size == 4)
- writel(val, rockchip->reg_base + busdev);
- else if (size == 2)
- writew(val, rockchip->reg_base + busdev);
- else if (size == 1)
- writeb(val, rockchip->reg_base + busdev);
- else
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- return PCIBIOS_SUCCESSFUL;
-}
-static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
- int size, u32 *val)
-{
- struct rockchip_pcie *rockchip = bus->sysdata;
-
- if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
- *val = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
+ rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
+ "mgmt-sticky");
+ if (IS_ERR(rockchip->mgmt_sticky_rst)) {
+ if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt-sticky reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_sticky_rst);
}
- if (bus->number == rockchip->root_bus_nr)
- return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
-
- return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val);
-}
+ rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
+ if (IS_ERR(rockchip->pipe_rst)) {
+ if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pipe reset property in node\n");
+ return PTR_ERR(rockchip->pipe_rst);
+ }
-static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
- int where, int size, u32 val)
-{
- struct rockchip_pcie *rockchip = bus->sysdata;
+ rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
+ if (IS_ERR(rockchip->pm_rst)) {
+ if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pm reset property in node\n");
+ return PTR_ERR(rockchip->pm_rst);
+ }
- if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
- return PCIBIOS_DEVICE_NOT_FOUND;
+ rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
+ if (IS_ERR(rockchip->pclk_rst)) {
+ if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pclk reset property in node\n");
+ return PTR_ERR(rockchip->pclk_rst);
+ }
- if (bus->number == rockchip->root_bus_nr)
- return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+ rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_rst)) {
+ if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing aclk reset property in node\n");
+ return PTR_ERR(rockchip->aclk_rst);
+ }
- return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val);
-}
+ if (rockchip->is_rc) {
+ rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->ep_gpio)) {
+ dev_err(dev, "missing ep-gpios property in node\n");
+ return PTR_ERR(rockchip->ep_gpio);
+ }
+ }
-static struct pci_ops rockchip_pcie_ops = {
- .read = rockchip_pcie_rd_conf,
- .write = rockchip_pcie_wr_conf,
-};
+ rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_pcie)) {
+ dev_err(dev, "aclk clock not found\n");
+ return PTR_ERR(rockchip->aclk_pcie);
+ }
-static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
-{
- int curr;
- u32 status, scale, power;
+ rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
+ if (IS_ERR(rockchip->aclk_perf_pcie)) {
+ dev_err(dev, "aclk_perf clock not found\n");
+ return PTR_ERR(rockchip->aclk_perf_pcie);
+ }
- if (IS_ERR(rockchip->vpcie3v3))
- return;
+ rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
+ if (IS_ERR(rockchip->hclk_pcie)) {
+ dev_err(dev, "hclk clock not found\n");
+ return PTR_ERR(rockchip->hclk_pcie);
+ }
- /*
- * Set RC's captured slot power limit and scale if
- * vpcie3v3 available. The default values are both zero
- * which means the software should set these two according
- * to the actual power supply.
- */
- curr = regulator_get_current_limit(rockchip->vpcie3v3);
- if (curr <= 0)
- return;
-
- scale = 3; /* 0.001x */
- curr = curr / 1000; /* convert to mA */
- power = (curr * 3300) / 1000; /* milliwatt */
- while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
- if (!scale) {
- dev_warn(rockchip->dev, "invalid power supply\n");
- return;
- }
- scale--;
- power = power / 10;
+ rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
+ if (IS_ERR(rockchip->clk_pcie_pm)) {
+ dev_err(dev, "pm clock not found\n");
+ return PTR_ERR(rockchip->clk_pcie_pm);
}
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
- status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
- (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+ return 0;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt);
-/**
- * rockchip_pcie_init_port - Initialize hardware
- * @rockchip: PCIe port information
- */
-static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
int err, i;
- u32 status;
-
- gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+ u32 regs;
err = reset_control_assert(rockchip->aclk_rst);
if (err) {
@@ -618,13 +237,15 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
PCIE_CLIENT_CONFIG);
- rockchip_pcie_write(rockchip,
- PCIE_CLIENT_CONF_ENABLE |
- PCIE_CLIENT_LINK_TRAIN_ENABLE |
- PCIE_CLIENT_ARI_ENABLE |
- PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
- PCIE_CLIENT_MODE_RC,
- PCIE_CLIENT_CONFIG);
+ regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE |
+ PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes);
+
+ if (rockchip->is_rc)
+ regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
+ else
+ regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP;
+
+ rockchip_pcie_write(rockchip, regs, PCIE_CLIENT_CONFIG);
for (i = 0; i < MAX_LANE_NUM; i++) {
err = phy_power_on(rockchip->phys[i]);
@@ -662,93 +283,6 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
goto err_power_off_phy;
}
- /* Fix the transmitted FTS count desired to exit from L0s. */
- status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
- status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
- (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
- rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
-
- rockchip_pcie_set_power_limit(rockchip);
-
- /* Set RC's clock architecture as common clock */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCI_EXP_LNKSTA_SLC << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
- /* Set RC's RCB to 128 */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCI_EXP_LNKCTL_RCB;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
- /* Enable Gen1 training */
- rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
- PCIE_CLIENT_CONFIG);
-
- gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
-
- /* 500ms timeout value should be enough for Gen1/2 training */
- err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
- status, PCIE_LINK_UP(status), 20,
- 500 * USEC_PER_MSEC);
- if (err) {
- dev_err(dev, "PCIe link training gen1 timeout!\n");
- goto err_power_off_phy;
- }
-
- if (rockchip->link_gen == 2) {
- /*
- * Enable retrain for gen2. This should be configured only after
- * gen1 finished.
- */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCI_EXP_LNKCTL_RL;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
- err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
- status, PCIE_LINK_IS_GEN2(status), 20,
- 500 * USEC_PER_MSEC);
- if (err)
- dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
- }
-
- /* Check the final link width from negotiated lane counter from MGMT */
- status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
- status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
- PCIE_CORE_PL_CONF_LANE_SHIFT);
- dev_dbg(dev, "current link width is x%d\n", status);
-
- /* Power off unused lane(s) */
- rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
- for (i = 0; i < MAX_LANE_NUM; i++) {
- if (!(rockchip->lanes_map & BIT(i))) {
- dev_dbg(dev, "idling lane %d\n", i);
- phy_power_off(rockchip->phys[i]);
- }
- }
-
- rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
- PCIE_CORE_CONFIG_VENDOR);
- rockchip_pcie_write(rockchip,
- PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
- PCIE_RC_CONFIG_RID_CCR);
-
- /* Clear THP cap's next cap pointer to remove L1 substate cap */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
- status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
-
- /* Clear L0s from RC's link cap */
- if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
- status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
- }
-
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
- status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
- status |= PCIE_RC_CONFIG_DCSR_MPS_256;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
-
return 0;
err_power_off_phy:
while (i--)
@@ -759,156 +293,9 @@ err_exit_phy:
phy_exit(rockchip->phys[i]);
return err;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_init_port);
-static void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
-{
- int i;
-
- for (i = 0; i < MAX_LANE_NUM; i++) {
- /* inactive lanes are already powered off */
- if (rockchip->lanes_map & BIT(i))
- phy_power_off(rockchip->phys[i]);
- phy_exit(rockchip->phys[i]);
- }
-}
-
-static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
-{
- struct rockchip_pcie *rockchip = arg;
- struct device *dev = rockchip->dev;
- u32 reg;
- u32 sub_reg;
-
- reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
- if (reg & PCIE_CLIENT_INT_LOCAL) {
- dev_dbg(dev, "local interrupt received\n");
- sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
- if (sub_reg & PCIE_CORE_INT_PRFPE)
- dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
-
- if (sub_reg & PCIE_CORE_INT_CRFPE)
- dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
-
- if (sub_reg & PCIE_CORE_INT_RRPE)
- dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
-
- if (sub_reg & PCIE_CORE_INT_PRFO)
- dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
-
- if (sub_reg & PCIE_CORE_INT_CRFO)
- dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
-
- if (sub_reg & PCIE_CORE_INT_RT)
- dev_dbg(dev, "replay timer timed out\n");
-
- if (sub_reg & PCIE_CORE_INT_RTR)
- dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
-
- if (sub_reg & PCIE_CORE_INT_PE)
- dev_dbg(dev, "phy error detected on receive side\n");
-
- if (sub_reg & PCIE_CORE_INT_MTR)
- dev_dbg(dev, "malformed TLP received from the link\n");
-
- if (sub_reg & PCIE_CORE_INT_UCR)
- dev_dbg(dev, "malformed TLP received from the link\n");
-
- if (sub_reg & PCIE_CORE_INT_FCE)
- dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
-
- if (sub_reg & PCIE_CORE_INT_CT)
- dev_dbg(dev, "a request timed out waiting for completion\n");
-
- if (sub_reg & PCIE_CORE_INT_UTC)
- dev_dbg(dev, "unmapped TC error\n");
-
- if (sub_reg & PCIE_CORE_INT_MMVC)
- dev_dbg(dev, "MSI mask register changes\n");
-
- rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
- } else if (reg & PCIE_CLIENT_INT_PHY) {
- dev_dbg(dev, "phy link changes\n");
- rockchip_pcie_update_txcredit_mui(rockchip);
- rockchip_pcie_clr_bw_int(rockchip);
- }
-
- rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
- PCIE_CLIENT_INT_STATUS);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
-{
- struct rockchip_pcie *rockchip = arg;
- struct device *dev = rockchip->dev;
- u32 reg;
-
- reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
- if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
- dev_dbg(dev, "legacy done interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_MSG)
- dev_dbg(dev, "message done interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_HOT_RST)
- dev_dbg(dev, "hot reset interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_DPA)
- dev_dbg(dev, "dpa interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_FATAL_ERR)
- dev_dbg(dev, "fatal error interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
- dev_dbg(dev, "no fatal error interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_CORR_ERR)
- dev_dbg(dev, "correctable error interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_PHY)
- dev_dbg(dev, "phy interrupt received\n");
-
- rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
- PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
- PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
- PCIE_CLIENT_INT_NFATAL_ERR |
- PCIE_CLIENT_INT_CORR_ERR |
- PCIE_CLIENT_INT_PHY),
- PCIE_CLIENT_INT_STATUS);
-
- return IRQ_HANDLED;
-}
-
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
- struct device *dev = rockchip->dev;
- u32 reg;
- u32 hwirq;
- u32 virq;
-
- chained_irq_enter(chip, desc);
-
- reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
- reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
-
- while (reg) {
- hwirq = ffs(reg) - 1;
- reg &= ~BIT(hwirq);
-
- virq = irq_find_mapping(rockchip->irq_domain, hwirq);
- if (virq)
- generic_handle_irq(virq);
- else
- dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
struct phy *phy;
@@ -948,452 +335,22 @@ static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
return 0;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys);
-static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
{
- int irq, err;
- struct device *dev = rockchip->dev;
- struct platform_device *pdev = to_platform_device(dev);
-
- irq = platform_get_irq_byname(pdev, "sys");
- if (irq < 0) {
- dev_err(dev, "missing sys IRQ resource\n");
- return irq;
- }
-
- err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
- IRQF_SHARED, "pcie-sys", rockchip);
- if (err) {
- dev_err(dev, "failed to request PCIe subsystem IRQ\n");
- return err;
- }
-
- irq = platform_get_irq_byname(pdev, "legacy");
- if (irq < 0) {
- dev_err(dev, "missing legacy IRQ resource\n");
- return irq;
- }
-
- irq_set_chained_handler_and_data(irq,
- rockchip_pcie_legacy_int_handler,
- rockchip);
-
- irq = platform_get_irq_byname(pdev, "client");
- if (irq < 0) {
- dev_err(dev, "missing client IRQ resource\n");
- return irq;
- }
-
- err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
- IRQF_SHARED, "pcie-client", rockchip);
- if (err) {
- dev_err(dev, "failed to request PCIe client IRQ\n");
- return err;
- }
-
- return 0;
-}
-
-/**
- * rockchip_pcie_parse_dt - Parse Device Tree
- * @rockchip: PCIe port information
- *
- * Return: '0' on success and error value on failure
- */
-static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct device_node *node = dev->of_node;
- struct resource *regs;
- int err;
-
- regs = platform_get_resource_byname(pdev,
- IORESOURCE_MEM,
- "axi-base");
- rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
- if (IS_ERR(rockchip->reg_base))
- return PTR_ERR(rockchip->reg_base);
-
- regs = platform_get_resource_byname(pdev,
- IORESOURCE_MEM,
- "apb-base");
- rockchip->apb_base = devm_ioremap_resource(dev, regs);
- if (IS_ERR(rockchip->apb_base))
- return PTR_ERR(rockchip->apb_base);
-
- err = rockchip_pcie_get_phys(rockchip);
- if (err)
- return err;
-
- rockchip->lanes = 1;
- err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
- if (!err && (rockchip->lanes == 0 ||
- rockchip->lanes == 3 ||
- rockchip->lanes > 4)) {
- dev_warn(dev, "invalid num-lanes, default to use one lane\n");
- rockchip->lanes = 1;
- }
-
- rockchip->link_gen = of_pci_get_max_link_speed(node);
- if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
- rockchip->link_gen = 2;
-
- rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
- if (IS_ERR(rockchip->core_rst)) {
- if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing core reset property in node\n");
- return PTR_ERR(rockchip->core_rst);
- }
-
- rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
- if (IS_ERR(rockchip->mgmt_rst)) {
- if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt reset property in node\n");
- return PTR_ERR(rockchip->mgmt_rst);
- }
-
- rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
- "mgmt-sticky");
- if (IS_ERR(rockchip->mgmt_sticky_rst)) {
- if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt-sticky reset property in node\n");
- return PTR_ERR(rockchip->mgmt_sticky_rst);
- }
-
- rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(rockchip->pipe_rst)) {
- if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pipe reset property in node\n");
- return PTR_ERR(rockchip->pipe_rst);
- }
-
- rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
- if (IS_ERR(rockchip->pm_rst)) {
- if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pm reset property in node\n");
- return PTR_ERR(rockchip->pm_rst);
- }
-
- rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
- if (IS_ERR(rockchip->pclk_rst)) {
- if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pclk reset property in node\n");
- return PTR_ERR(rockchip->pclk_rst);
- }
-
- rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
- if (IS_ERR(rockchip->aclk_rst)) {
- if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing aclk reset property in node\n");
- return PTR_ERR(rockchip->aclk_rst);
- }
-
- rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
- if (IS_ERR(rockchip->ep_gpio)) {
- dev_err(dev, "missing ep-gpios property in node\n");
- return PTR_ERR(rockchip->ep_gpio);
- }
-
- rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
- if (IS_ERR(rockchip->aclk_pcie)) {
- dev_err(dev, "aclk clock not found\n");
- return PTR_ERR(rockchip->aclk_pcie);
- }
-
- rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
- if (IS_ERR(rockchip->aclk_perf_pcie)) {
- dev_err(dev, "aclk_perf clock not found\n");
- return PTR_ERR(rockchip->aclk_perf_pcie);
- }
-
- rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
- if (IS_ERR(rockchip->hclk_pcie)) {
- dev_err(dev, "hclk clock not found\n");
- return PTR_ERR(rockchip->hclk_pcie);
- }
-
- rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
- if (IS_ERR(rockchip->clk_pcie_pm)) {
- dev_err(dev, "pm clock not found\n");
- return PTR_ERR(rockchip->clk_pcie_pm);
- }
-
- err = rockchip_pcie_setup_irq(rockchip);
- if (err)
- return err;
-
- rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
- if (IS_ERR(rockchip->vpcie12v)) {
- if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie12v regulator found\n");
- }
-
- rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
- if (IS_ERR(rockchip->vpcie3v3)) {
- if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie3v3 regulator found\n");
- }
-
- rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
- if (IS_ERR(rockchip->vpcie1v8)) {
- if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie1v8 regulator found\n");
- }
-
- rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
- if (IS_ERR(rockchip->vpcie0v9)) {
- if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie0v9 regulator found\n");
- }
-
- return 0;
-}
-
-static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- int err;
-
- if (!IS_ERR(rockchip->vpcie12v)) {
- err = regulator_enable(rockchip->vpcie12v);
- if (err) {
- dev_err(dev, "fail to enable vpcie12v regulator\n");
- goto err_out;
- }
- }
-
- if (!IS_ERR(rockchip->vpcie3v3)) {
- err = regulator_enable(rockchip->vpcie3v3);
- if (err) {
- dev_err(dev, "fail to enable vpcie3v3 regulator\n");
- goto err_disable_12v;
- }
- }
-
- if (!IS_ERR(rockchip->vpcie1v8)) {
- err = regulator_enable(rockchip->vpcie1v8);
- if (err) {
- dev_err(dev, "fail to enable vpcie1v8 regulator\n");
- goto err_disable_3v3;
- }
- }
-
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- goto err_disable_1v8;
- }
- }
-
- return 0;
-
-err_disable_1v8:
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
-err_disable_3v3:
- if (!IS_ERR(rockchip->vpcie3v3))
- regulator_disable(rockchip->vpcie3v3);
-err_disable_12v:
- if (!IS_ERR(rockchip->vpcie12v))
- regulator_disable(rockchip->vpcie12v);
-err_out:
- return err;
-}
-
-static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
-{
- rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
- (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
- rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
- PCIE_CORE_INT_MASK);
-
- rockchip_pcie_enable_bw_int(rockchip);
-}
-
-static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops intx_domain_ops = {
- .map = rockchip_pcie_intx_map,
-};
-
-static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- struct device_node *intc = of_get_next_child(dev->of_node, NULL);
-
- if (!intc) {
- dev_err(dev, "missing child interrupt-controller node\n");
- return -EINVAL;
- }
-
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
- if (!rockchip->irq_domain) {
- dev_err(dev, "failed to get a INTx IRQ domain\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
- int region_no, int type, u8 num_pass_bits,
- u32 lower_addr, u32 upper_addr)
-{
- u32 ob_addr_0;
- u32 ob_addr_1;
- u32 ob_desc_0;
- u32 aw_offset;
-
- if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
- return -EINVAL;
- if (num_pass_bits + 1 < 8)
- return -EINVAL;
- if (num_pass_bits > 63)
- return -EINVAL;
- if (region_no == 0) {
- if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
- return -EINVAL;
- }
- if (region_no != 0) {
- if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
- return -EINVAL;
- }
-
- aw_offset = (region_no << OB_REG_SIZE_SHIFT);
-
- ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
- ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
- ob_addr_1 = upper_addr;
- ob_desc_0 = (1 << 23 | type);
-
- rockchip_pcie_write(rockchip, ob_addr_0,
- PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
- rockchip_pcie_write(rockchip, ob_addr_1,
- PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
- rockchip_pcie_write(rockchip, ob_desc_0,
- PCIE_CORE_OB_REGION_DESC0 + aw_offset);
- rockchip_pcie_write(rockchip, 0,
- PCIE_CORE_OB_REGION_DESC1 + aw_offset);
-
- return 0;
-}
-
-static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
- int region_no, u8 num_pass_bits,
- u32 lower_addr, u32 upper_addr)
-{
- u32 ib_addr_0;
- u32 ib_addr_1;
- u32 aw_offset;
-
- if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
- return -EINVAL;
- if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
- return -EINVAL;
- if (num_pass_bits > 63)
- return -EINVAL;
-
- aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
-
- ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
- ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
- ib_addr_1 = upper_addr;
-
- rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
- rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
-
- return 0;
-}
-
-static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- int offset;
- int err;
- int reg_no;
-
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE0_CFG);
-
- for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
- AXI_WRAPPER_MEM_WRITE,
- 20 - 1,
- rockchip->mem_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC mem outbound ATU failed\n");
- return err;
- }
- }
-
- err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
- if (err) {
- dev_err(dev, "program RC mem inbound ATU failed\n");
- return err;
- }
-
- offset = rockchip->mem_size >> 20;
- for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip,
- reg_no + 1 + offset,
- AXI_WRAPPER_IO_WRITE,
- 20 - 1,
- rockchip->io_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC io outbound ATU failed\n");
- return err;
- }
- }
-
- /* assign message regions */
- rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
- AXI_WRAPPER_NOR_MSG,
- 20 - 1, 0, 0);
-
- rockchip->msg_bus_addr = rockchip->mem_bus_addr +
- ((reg_no + offset) << 20);
- return err;
-}
-
-static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
-{
- u32 value;
- int err;
-
- /* send PME_TURN_OFF message */
- writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
+ int i;
- /* read LTSSM and wait for falling into L2 link state */
- err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
- value, PCIE_LINK_IS_L2(value), 20,
- jiffies_to_usecs(5 * HZ));
- if (err) {
- dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
- return err;
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ /* inactive lanes are already powered off */
+ if (rockchip->lanes_map & BIT(i))
+ phy_power_off(rockchip->phys[i]);
+ phy_exit(rockchip->phys[i]);
}
-
- return 0;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys);
-static int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
int err;
@@ -1432,8 +389,9 @@ err_aclk_perf_pcie:
clk_disable_unprepare(rockchip->aclk_pcie);
return err;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
-static void rockchip_pcie_disable_clocks(void *data)
+void rockchip_pcie_disable_clocks(void *data)
{
struct rockchip_pcie *rockchip = data;
@@ -1442,267 +400,25 @@ static void rockchip_pcie_disable_clocks(void *data)
clk_disable_unprepare(rockchip->aclk_perf_pcie);
clk_disable_unprepare(rockchip->aclk_pcie);
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
-static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
-{
- struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
- int ret;
-
- /* disable core and cli int since we don't need to ack PME_ACK */
- rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
- PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
- rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
-
- ret = rockchip_pcie_wait_l2(rockchip);
- if (ret) {
- rockchip_pcie_enable_interrupts(rockchip);
- return ret;
- }
-
- rockchip_pcie_deinit_phys(rockchip);
-
- rockchip_pcie_disable_clocks(rockchip);
-
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
-
- return ret;
-}
-
-static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
-{
- struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
- int err;
-
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- return err;
- }
- }
-
- err = rockchip_pcie_enable_clocks(rockchip);
- if (err)
- goto err_disable_0v9;
-
- err = rockchip_pcie_init_port(rockchip);
- if (err)
- goto err_pcie_resume;
-
- err = rockchip_pcie_cfg_atu(rockchip);
- if (err)
- goto err_err_deinit_port;
-
- /* Need this to enter L1 again */
- rockchip_pcie_update_txcredit_mui(rockchip);
- rockchip_pcie_enable_interrupts(rockchip);
-
- return 0;
-
-err_err_deinit_port:
- rockchip_pcie_deinit_phys(rockchip);
-err_pcie_resume:
- rockchip_pcie_disable_clocks(rockchip);
-err_disable_0v9:
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
- return err;
-}
-
-static int rockchip_pcie_probe(struct platform_device *pdev)
-{
- struct rockchip_pcie *rockchip;
- struct device *dev = &pdev->dev;
- struct pci_bus *bus, *child;
- struct pci_host_bridge *bridge;
- struct resource_entry *win;
- resource_size_t io_base;
- struct resource *mem;
- struct resource *io;
- int err;
-
- LIST_HEAD(res);
-
- if (!dev->of_node)
- return -ENODEV;
-
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
- if (!bridge)
- return -ENOMEM;
-
- rockchip = pci_host_bridge_priv(bridge);
-
- platform_set_drvdata(pdev, rockchip);
- rockchip->dev = dev;
-
- err = rockchip_pcie_parse_dt(rockchip);
- if (err)
- return err;
-
- err = rockchip_pcie_enable_clocks(rockchip);
- if (err)
- return err;
-
- err = rockchip_pcie_set_vpcie(rockchip);
- if (err) {
- dev_err(dev, "failed to set vpcie regulator\n");
- goto err_set_vpcie;
- }
-
- err = rockchip_pcie_init_port(rockchip);
- if (err)
- goto err_vpcie;
-
- rockchip_pcie_enable_interrupts(rockchip);
-
- err = rockchip_pcie_init_irq_domain(rockchip);
- if (err < 0)
- goto err_deinit_port;
-
- err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
- &res, &io_base);
- if (err)
- goto err_remove_irq_domain;
-
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto err_free_res;
-
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- io = win->res;
- io->name = "I/O";
- rockchip->io_size = resource_size(io);
- rockchip->io_bus_addr = io->start - win->offset;
- err = pci_remap_iospace(io, io_base);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, io);
- continue;
- }
- rockchip->io = io;
- break;
- case IORESOURCE_MEM:
- mem = win->res;
- mem->name = "MEM";
- rockchip->mem_size = resource_size(mem);
- rockchip->mem_bus_addr = mem->start - win->offset;
- break;
- case IORESOURCE_BUS:
- rockchip->root_bus_nr = win->res->start;
- break;
- default:
- continue;
- }
- }
-
- err = rockchip_pcie_cfg_atu(rockchip);
- if (err)
- goto err_unmap_iospace;
-
- rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
- if (!rockchip->msg_region) {
- err = -ENOMEM;
- goto err_unmap_iospace;
- }
-
- list_splice_init(&res, &bridge->windows);
- bridge->dev.parent = dev;
- bridge->sysdata = rockchip;
- bridge->busnr = 0;
- bridge->ops = &rockchip_pcie_ops;
- bridge->map_irq = of_irq_parse_and_map_pci;
- bridge->swizzle_irq = pci_common_swizzle;
-
- err = pci_scan_root_bus_bridge(bridge);
- if (err < 0)
- goto err_unmap_iospace;
-
- bus = bridge->bus;
-
- rockchip->root_bus = bus;
-
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(bus);
- return 0;
-
-err_unmap_iospace:
- pci_unmap_iospace(rockchip->io);
-err_free_res:
- pci_free_resource_list(&res);
-err_remove_irq_domain:
- irq_domain_remove(rockchip->irq_domain);
-err_deinit_port:
- rockchip_pcie_deinit_phys(rockchip);
-err_vpcie:
- if (!IS_ERR(rockchip->vpcie12v))
- regulator_disable(rockchip->vpcie12v);
- if (!IS_ERR(rockchip->vpcie3v3))
- regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
-err_set_vpcie:
- rockchip_pcie_disable_clocks(rockchip);
- return err;
-}
-
-static int rockchip_pcie_remove(struct platform_device *pdev)
+void rockchip_pcie_cfg_configuration_accesses(
+ struct rockchip_pcie *rockchip, u32 type)
{
- struct device *dev = &pdev->dev;
- struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
-
- pci_stop_root_bus(rockchip->root_bus);
- pci_remove_root_bus(rockchip->root_bus);
- pci_unmap_iospace(rockchip->io);
- irq_domain_remove(rockchip->irq_domain);
-
- rockchip_pcie_deinit_phys(rockchip);
-
- rockchip_pcie_disable_clocks(rockchip);
+ u32 ob_desc_0;
- if (!IS_ERR(rockchip->vpcie12v))
- regulator_disable(rockchip->vpcie12v);
- if (!IS_ERR(rockchip->vpcie3v3))
- regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ /* Configuration Accesses for region 0 */
+ rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
- return 0;
+ rockchip_pcie_write(rockchip,
+ (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
+ PCIE_CORE_OB_REGION_ADDR0);
+ rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
+ PCIE_CORE_OB_REGION_ADDR1);
+ ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
+ ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
+ ob_desc_0 |= (type | (0x1 << 23));
+ rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
+ rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
}
-
-static const struct dev_pm_ops rockchip_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
- rockchip_pcie_resume_noirq)
-};
-
-static const struct of_device_id rockchip_pcie_of_match[] = {
- { .compatible = "rockchip,rk3399-pcie", },
- {}
-};
-MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
-
-static struct platform_driver rockchip_pcie_driver = {
- .driver = {
- .name = "rockchip-pcie",
- .of_match_table = rockchip_pcie_of_match,
- .pm = &rockchip_pcie_pm_ops,
- },
- .probe = rockchip_pcie_probe,
- .remove = rockchip_pcie_remove,
-};
-module_platform_driver(rockchip_pcie_driver);
-
-MODULE_AUTHOR("Rockchip Inc");
-MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
-MODULE_LICENSE("GPL v2");
+EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses);
diff --git a/drivers/pci/host/pcie-rockchip.h b/drivers/pci/host/pcie-rockchip.h
new file mode 100644
index 000000000000..8e87a059ce73
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip.h
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *
+ */
+
+#ifndef _PCIE_ROCKCHIP_H
+#define _PCIE_ROCKCHIP_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
+ * bits. This allows atomic updates of the register without locking.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+
+#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
+#define MAX_LANE_NUM 4
+#define MAX_REGION_LIMIT 32
+#define MIN_EP_APERTURE 28
+
+#define PCIE_CLIENT_BASE 0x0
+#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
+#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
+#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0)
+#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
+#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
+#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
+#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
+#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0)
+#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
+#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
+#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
+#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
+#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
+#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
+#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
+#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
+#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
+#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
+#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
+#define PCIE_CLIENT_INTR_SHIFT 5
+#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
+#define PCIE_CLIENT_INT_MSG BIT(14)
+#define PCIE_CLIENT_INT_HOT_RST BIT(13)
+#define PCIE_CLIENT_INT_DPA BIT(12)
+#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
+#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
+#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
+#define PCIE_CLIENT_INT_INTD BIT(8)
+#define PCIE_CLIENT_INT_INTC BIT(7)
+#define PCIE_CLIENT_INT_INTB BIT(6)
+#define PCIE_CLIENT_INT_INTA BIT(5)
+#define PCIE_CLIENT_INT_LOCAL BIT(4)
+#define PCIE_CLIENT_INT_UDMA BIT(3)
+#define PCIE_CLIENT_INT_PHY BIT(2)
+#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
+#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
+
+#define PCIE_CLIENT_INT_LEGACY \
+ (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
+ PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
+
+#define PCIE_CLIENT_INT_CLI \
+ (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
+ PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
+ PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
+ PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
+ PCIE_CLIENT_INT_PHY)
+
+#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
+#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
+#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
+#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
+#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
+#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
+#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
+#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
+#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
+#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
+ (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
+#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
+#define PCIE_CORE_LANE_MAP_MASK 0x0000000f
+#define PCIE_CORE_LANE_MAP_REVERSE BIT(16)
+#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
+#define PCIE_CORE_INT_PRFPE BIT(0)
+#define PCIE_CORE_INT_CRFPE BIT(1)
+#define PCIE_CORE_INT_RRPE BIT(2)
+#define PCIE_CORE_INT_PRFO BIT(3)
+#define PCIE_CORE_INT_CRFO BIT(4)
+#define PCIE_CORE_INT_RT BIT(5)
+#define PCIE_CORE_INT_RTR BIT(6)
+#define PCIE_CORE_INT_PE BIT(7)
+#define PCIE_CORE_INT_MTR BIT(8)
+#define PCIE_CORE_INT_UCR BIT(9)
+#define PCIE_CORE_INT_FCE BIT(10)
+#define PCIE_CORE_INT_CT BIT(11)
+#define PCIE_CORE_INT_UTC BIT(18)
+#define PCIE_CORE_INT_MMVC BIT(19)
+#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44)
+#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
+#define PCIE_CORE_PHY_FUNC_CFG (PCIE_CORE_CTRL_MGMT_BASE + 0x2c0)
+#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED 0x0
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS 0x1
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS 0x4
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS 0x6
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
+
+#define PCIE_CORE_INT \
+ (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
+ PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
+ PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
+ PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
+ PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
+ PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
+ PCIE_CORE_INT_MMVC)
+
+#define PCIE_RC_RP_ATS_BASE 0x400000
+#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
+#define PCIE_RC_CONFIG_BASE 0xa00000
+#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
+#define PCIE_RC_CONFIG_SCC_SHIFT 16
+#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
+#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
+#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
+#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
+#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
+#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
+#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
+#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
+#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
+#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
+#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
+
+#define PCIE_CORE_AXI_CONF_BASE 0xc00000
+#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
+#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
+#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
+#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
+
+#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
+#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
+#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
+
+/* Size of one AXI Region (not Region 0) */
+#define AXI_REGION_SIZE BIT(20)
+/* Size of Region 0, equal to sum of sizes of other regions */
+#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
+#define OB_REG_SIZE_SHIFT 5
+#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
+#define AXI_WRAPPER_IO_WRITE 0x6
+#define AXI_WRAPPER_MEM_WRITE 0x2
+#define AXI_WRAPPER_TYPE0_CFG 0xa
+#define AXI_WRAPPER_TYPE1_CFG 0xb
+#define AXI_WRAPPER_NOR_MSG 0xc
+
+#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
+#define MIN_AXI_ADDR_BITS_PASSED 8
+#define PCIE_RC_SEND_PME_OFF 0x11960
+#define ROCKCHIP_VENDOR_ID 0x1d87
+#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
+#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
+#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
+#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
+#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
+ (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
+ PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
+#define PCIE_LINK_IS_L2(x) \
+ (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
+#define PCIE_LINK_UP(x) \
+ (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
+#define PCIE_LINK_IS_GEN2(x) \
+ (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
+
+#define RC_REGION_0_ADDR_TRANS_H 0x00000000
+#define RC_REGION_0_ADDR_TRANS_L 0x00000000
+#define RC_REGION_0_PASS_BITS (25 - 1)
+#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
+#define MAX_AXI_WRAPPER_REGION_NUM 33
+
+#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2
+#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3
+#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4
+#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27
+#define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5)
+#define ROCKCHIP_PCIE_MSG_ROUTING(route) \
+ (((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK)
+#define ROCKCHIP_PCIE_MSG_CODE_MASK GENMASK(15, 8)
+#define ROCKCHIP_PCIE_MSG_CODE(code) \
+ (((code) << 8) & ROCKCHIP_PCIE_MSG_CODE_MASK)
+#define ROCKCHIP_PCIE_MSG_NO_DATA BIT(16)
+
+#define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4
+#define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET 20
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24)
+#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1
+#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3
+#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+ (PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+ (PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+ (((devfn) << 12) & \
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+ (((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+ (((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
+
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \
+ (PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn) \
+ (PCIE_CORE_CTRL_MGMT_BASE + 0x0244 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
+ (GENMASK(4, 0) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+ (((a) << ((b) * 8)) & \
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
+ (GENMASK(7, 5) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+ (((c) << ((b) * 8 + 5)) & \
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+
+struct rockchip_pcie {
+ void __iomem *reg_base; /* DT axi-base */
+ void __iomem *apb_base; /* DT apb-base */
+ bool legacy_phy;
+ struct phy *phys[MAX_LANE_NUM];
+ struct reset_control *core_rst;
+ struct reset_control *mgmt_rst;
+ struct reset_control *mgmt_sticky_rst;
+ struct reset_control *pipe_rst;
+ struct reset_control *pm_rst;
+ struct reset_control *aclk_rst;
+ struct reset_control *pclk_rst;
+ struct clk *aclk_pcie;
+ struct clk *aclk_perf_pcie;
+ struct clk *hclk_pcie;
+ struct clk *clk_pcie_pm;
+ struct regulator *vpcie12v; /* 12V power supply */
+ struct regulator *vpcie3v3; /* 3.3V power supply */
+ struct regulator *vpcie1v8; /* 1.8V power supply */
+ struct regulator *vpcie0v9; /* 0.9V power supply */
+ struct gpio_desc *ep_gpio;
+ u32 lanes;
+ u8 lanes_map;
+ u8 root_bus_nr;
+ int link_gen;
+ struct device *dev;
+ struct irq_domain *irq_domain;
+ int offset;
+ struct pci_bus *root_bus;
+ struct resource *io;
+ phys_addr_t io_bus_addr;
+ u32 io_size;
+ void __iomem *msg_region;
+ u32 mem_size;
+ phys_addr_t msg_bus_addr;
+ phys_addr_t mem_bus_addr;
+ bool is_rc;
+ struct resource *mem_res;
+};
+
+static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
+{
+ return readl(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
+{
+ writel(val, rockchip->apb_base + reg);
+}
+
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip);
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
+void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_cfg_configuration_accesses(
+ struct rockchip_pcie *rockchip, u32 type);
+
+#endif /* _PCIE_ROCKCHIP_H */
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 4839ae578711..6a4bbb5b3de0 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -21,6 +21,8 @@
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
+#include "../pci.h"
+
/* Bridge core config registers */
#define BRCFG_PCIE_RX0 0x00000000
#define BRCFG_INTERRUPT 0x00000010
@@ -825,7 +827,6 @@ static const struct of_device_id nwl_pcie_of_match[] = {
static int nwl_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
struct nwl_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
@@ -855,7 +856,8 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
- err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 0ad188effc09..b110a3a814e3 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -23,6 +23,8 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include "../pci.h"
+
/* Register definitions */
#define XILINX_PCIE_REG_BIR 0x00000130
#define XILINX_PCIE_REG_IDR 0x00000138
@@ -643,8 +645,8 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
return err;
}
- err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, &res,
- &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 930a8fa08bd6..942b64fc7f1f 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -24,6 +24,28 @@
#define VMD_MEMBAR1 2
#define VMD_MEMBAR2 4
+#define PCI_REG_VMCAP 0x40
+#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
+#define PCI_REG_VMCONFIG 0x44
+#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
+#define PCI_REG_VMLOCK 0x70
+#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
+
+enum vmd_features {
+ /*
+ * Device may contain registers which hint the physical location of the
+ * membars, in order to allow proper address translation during
+ * resource assignment to enable guest virtualization
+ */
+ VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
+
+ /*
+ * Device may provide root port configuration information which limits
+ * bus numbering
+ */
+ VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
+};
+
/*
* Lock for manipulating VMD IRQ lists.
*/
@@ -546,7 +568,7 @@ static int vmd_find_free_domain(void)
return domain + 1;
}
-static int vmd_enable_domain(struct vmd_dev *vmd)
+static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
{
struct pci_sysdata *sd = &vmd->sysdata;
struct fwnode_handle *fn;
@@ -554,12 +576,57 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
u32 upper_bits;
unsigned long flags;
LIST_HEAD(resources);
+ resource_size_t offset[2] = {0};
+ resource_size_t membar2_offset = 0x2000, busn_start = 0;
+
+ /*
+ * Shadow registers may exist in certain VMD device ids which allow
+ * guests to correctly assign host physical addresses to the root ports
+ * and child devices. These registers will either return the host value
+ * or 0, depending on an enable bit in the VMD device.
+ */
+ if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
+ u32 vmlock;
+ int ret;
+
+ membar2_offset = 0x2018;
+ ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
+ if (ret || vmlock == ~0)
+ return -ENODEV;
+
+ if (MB2_SHADOW_EN(vmlock)) {
+ void __iomem *membar2;
+
+ membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
+ if (!membar2)
+ return -ENOMEM;
+ offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
+ readq(membar2 + 0x2008);
+ offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
+ readq(membar2 + 0x2010);
+ pci_iounmap(vmd->dev, membar2);
+ }
+ }
+
+ /*
+ * Certain VMD devices may have a root port configuration option which
+ * limits the bus range to between 0-127 or 128-255
+ */
+ if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
+ u32 vmcap, vmconfig;
+
+ pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
+ pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
+ if (BUS_RESTRICT_CAP(vmcap) &&
+ (BUS_RESTRICT_CFG(vmconfig) == 0x1))
+ busn_start = 128;
+ }
res = &vmd->dev->resource[VMD_CFGBAR];
vmd->resources[0] = (struct resource) {
.name = "VMD CFGBAR",
- .start = 0,
- .end = (resource_size(res) >> 20) - 1,
+ .start = busn_start,
+ .end = busn_start + (resource_size(res) >> 20) - 1,
.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
};
@@ -600,7 +667,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
flags &= ~IORESOURCE_MEM_64;
vmd->resources[2] = (struct resource) {
.name = "VMD MEMBAR2",
- .start = res->start + 0x2000,
+ .start = res->start + membar2_offset,
.end = res->end,
.flags = flags,
.parent = res,
@@ -624,10 +691,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
return -ENODEV;
pci_add_resource(&resources, &vmd->resources[0]);
- pci_add_resource(&resources, &vmd->resources[1]);
- pci_add_resource(&resources, &vmd->resources[2]);
- vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd,
- &resources);
+ pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
+ pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
+
+ vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
+ sd, &resources);
if (!vmd->bus) {
pci_free_resource_list(&resources);
irq_domain_remove(vmd->irq_domain);
@@ -713,7 +781,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
- err = vmd_enable_domain(vmd);
+ err = vmd_enable_domain(vmd, (unsigned long) id->driver_data);
if (err)
return err;
@@ -778,7 +846,10 @@ static int vmd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
static const struct pci_device_id vmd_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index a8f21d051e0c..e9f78eb390d2 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -104,14 +104,11 @@ config HOTPLUG_PCI_CPCI_GENERIC
When in doubt, say N.
config HOTPLUG_PCI_SHPC
- tristate "SHPC PCI Hotplug driver"
+ bool "SHPC PCI Hotplug driver"
help
Say Y here if you have a motherboard with a SHPC PCI Hotplug
controller.
- To compile this driver as a module, choose M here: the
- module will be called shpchp.
-
When in doubt, say N.
config HOTPLUG_PCI_POWERNV
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index c9816166978e..3979f89b250a 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -63,22 +63,17 @@ static acpi_status acpi_run_oshp(acpi_handle handle)
/**
* acpi_get_hp_hw_control_from_firmware
* @dev: the pci_dev of the bridge that has a hotplug controller
- * @flags: requested control bits for _OSC
*
* Attempt to take hotplug control from firmware.
*/
-int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
{
+ const struct pci_host_bridge *host;
+ const struct acpi_pci_root *root;
acpi_status status;
acpi_handle chandle, handle;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
- if (!flags) {
- err("Invalid flags %u specified!\n", flags);
- return -EINVAL;
- }
-
/*
* Per PCI firmware specification, we should run the ACPI _OSC
* method to get control of hotplug hardware before using it. If
@@ -88,25 +83,20 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
* OSHP within the scope of the hotplug controller and its parents,
* up to the host bridge under which this controller exists.
*/
- handle = acpi_find_root_bridge_handle(pdev);
- if (handle) {
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
- dbg("Trying to get hotplug control for %s\n",
- (char *)string.pointer);
- status = acpi_pci_osc_control_set(handle, &flags, flags);
- if (ACPI_SUCCESS(status))
- goto got_one;
- if (status == AE_SUPPORT)
- goto no_control;
- kfree(string.pointer);
- string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
- }
+ if (shpchp_is_native(pdev))
+ return 0;
+
+ /* If _OSC exists, we should not evaluate OSHP */
+ host = pci_find_host_bridge(pdev->bus);
+ root = acpi_pci_find_root(ACPI_HANDLE(&host->dev));
+ if (root->osc_support_set)
+ goto no_control;
handle = ACPI_HANDLE(&pdev->dev);
if (!handle) {
/*
* This hotplug controller was not listed in the ACPI name
- * space at all. Try to get acpi handle of parent pci bus.
+ * space at all. Try to get ACPI handle of parent PCI bus.
*/
struct pci_bus *pbus;
for (pbus = pdev->bus; pbus; pbus = pbus->parent) {
@@ -118,8 +108,8 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
while (handle) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
- dbg("Trying to get hotplug control for %s\n",
- (char *)string.pointer);
+ pci_info(pdev, "Requesting control of SHPC hotplug via OSHP (%s)\n",
+ (char *)string.pointer);
status = acpi_run_oshp(handle);
if (ACPI_SUCCESS(status))
goto got_one;
@@ -131,13 +121,12 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
break;
}
no_control:
- dbg("Cannot get control of hotplug hardware for pci %s\n",
- pci_name(pdev));
+ pci_info(pdev, "Cannot get control of SHPC hotplug\n");
kfree(string.pointer);
return -ENODEV;
got_one:
- dbg("Gained control for hotplug HW for pci %s (%s)\n",
- pci_name(pdev), (char *)string.pointer);
+ pci_info(pdev, "Gained control of SHPC hotplug (%s)\n",
+ (char *)string.pointer);
kfree(string.pointer);
return 0;
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index b45b375c0e6c..3a17b290df5d 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -287,11 +287,12 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
/*
* Expose slots to user space for functions that have _EJ0 or _RMV or
* are located in dock stations. Do not expose them for devices handled
- * by the native PCIe hotplug (PCIeHP), becuase that code is supposed to
- * expose slots to user space in those cases.
+ * by the native PCIe hotplug (PCIeHP) or standard PCI hotplug
+ * (SHPCHP), because that code is supposed to expose slots to user
+ * space in those cases.
*/
if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
- && !(pdev && pdev->is_hotplug_bridge && pciehp_is_native(pdev))) {
+ && !(pdev && hotplug_is_native(pdev))) {
unsigned long long sun;
int retval;
@@ -430,6 +431,29 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
}
+static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
+{
+ struct pci_bus *bus = bridge->subordinate;
+ struct pci_dev *dev;
+ int max;
+
+ if (!bus)
+ return;
+
+ max = bus->busn_res.start;
+ /* Scan already configured non-hotplug bridges */
+ for_each_pci_bridge(dev, bus) {
+ if (!hotplug_is_native(dev))
+ max = pci_scan_bridge(bus, dev, max, 0);
+ }
+
+ /* Scan non-hotplug bridges that need to be reconfigured */
+ for_each_pci_bridge(dev, bus) {
+ if (!hotplug_is_native(dev))
+ max = pci_scan_bridge(bus, dev, max, 1);
+ }
+}
+
/**
* enable_slot - enable, configure a slot
* @slot: slot to be enabled
@@ -442,25 +466,42 @@ static void enable_slot(struct acpiphp_slot *slot)
struct pci_dev *dev;
struct pci_bus *bus = slot->bus;
struct acpiphp_func *func;
- int max, pass;
- LIST_HEAD(add_list);
- acpiphp_rescan_slot(slot);
- max = acpiphp_max_busnr(bus);
- for (pass = 0; pass < 2; pass++) {
+ if (bus->self && hotplug_is_native(bus->self)) {
+ /*
+ * If native hotplug is used, it will take care of hotplug
+ * slot management and resource allocation for hotplug
+ * bridges. However, ACPI hotplug may still be used for
+ * non-hotplug bridges to bring in additional devices such
+ * as a Thunderbolt host controller.
+ */
for_each_pci_bridge(dev, bus) {
- if (PCI_SLOT(dev->devfn) != slot->device)
- continue;
-
- max = pci_scan_bridge(bus, dev, max, pass);
- if (pass && dev->subordinate) {
- check_hotplug_bridge(slot, dev);
- pcibios_resource_survey_bus(dev->subordinate);
- __pci_bus_size_bridges(dev->subordinate, &add_list);
+ if (PCI_SLOT(dev->devfn) == slot->device)
+ acpiphp_native_scan_bridge(dev);
+ }
+ pci_assign_unassigned_bridge_resources(bus->self);
+ } else {
+ LIST_HEAD(add_list);
+ int max, pass;
+
+ acpiphp_rescan_slot(slot);
+ max = acpiphp_max_busnr(bus);
+ for (pass = 0; pass < 2; pass++) {
+ for_each_pci_bridge(dev, bus) {
+ if (PCI_SLOT(dev->devfn) != slot->device)
+ continue;
+
+ max = pci_scan_bridge(bus, dev, max, pass);
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+ __pci_bus_size_bridges(dev->subordinate,
+ &add_list);
+ }
}
}
+ __pci_bus_assign_resources(bus, &add_list, NULL);
}
- __pci_bus_assign_resources(bus, &add_list, NULL);
acpiphp_sanitize_bus(bus);
pcie_bus_configure_settings(bus);
@@ -481,7 +522,7 @@ static void enable_slot(struct acpiphp_slot *slot)
if (!dev) {
/* Do not set SLOT_ENABLED flag if some funcs
are not added. */
- slot->flags &= (~SLOT_ENABLED);
+ slot->flags &= ~SLOT_ENABLED;
continue;
}
}
@@ -510,7 +551,7 @@ static void disable_slot(struct acpiphp_slot *slot)
list_for_each_entry(func, &slot->funcs, sibling)
acpi_bus_trim(func_to_acpi_device(func));
- slot->flags &= (~SLOT_ENABLED);
+ slot->flags &= ~SLOT_ENABLED;
}
static bool slot_no_hotplug(struct acpiphp_slot *slot)
@@ -608,6 +649,11 @@ static void trim_stale_devices(struct pci_dev *dev)
alive = pci_device_is_present(dev);
if (!alive) {
+ pci_dev_set_disconnected(dev, NULL);
+ if (pci_has_subordinate(dev))
+ pci_walk_bus(dev->subordinate, pci_dev_set_disconnected,
+ NULL);
+
pci_stop_and_remove_bus_device(dev);
if (adev)
acpi_bus_trim(adev);
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index b81ca3fa0e84..1869b0411ce0 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -379,7 +379,7 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_max_bus_speed(struct slot *slot)
{
- int rc;
+ int rc = 0;
u8 mode = 0;
enum pci_bus_speed speed;
struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 88e917c9120f..5f892065585e 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -121,7 +121,7 @@ struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
-void pcie_enable_notification(struct controller *ctrl);
+void pcie_reenable_notification(struct controller *ctrl);
int pciehp_power_on_slot(struct slot *slot);
void pciehp_power_off_slot(struct slot *slot);
void pciehp_get_power_status(struct slot *slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 332b723ff9e6..44a6a63802d5 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -283,7 +283,7 @@ static int pciehp_resume(struct pcie_device *dev)
ctrl = get_service_data(dev);
/* reinitialize the chipset's event detection logic */
- pcie_enable_notification(ctrl);
+ pcie_reenable_notification(ctrl);
slot = ctrl->slot;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 18a42f8f5dc5..718b6073afad 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -10,7 +10,6 @@
* All rights reserved.
*
* Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
- *
*/
#include <linux/kernel.h>
@@ -147,25 +146,22 @@ static void pcie_wait_cmd(struct controller *ctrl)
else
rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
- /*
- * Controllers with errata like Intel CF118 don't generate
- * completion notifications unless the power/indicator/interlock
- * control bits are changed. On such controllers, we'll emit this
- * timeout message when we wait for completion of commands that
- * don't change those bits, e.g., commands that merely enable
- * interrupts.
- */
if (!rc)
ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
ctrl->slot_ctrl,
jiffies_to_msecs(jiffies - ctrl->cmd_started));
}
+#define CC_ERRATUM_MASK (PCI_EXP_SLTCTL_PCC | \
+ PCI_EXP_SLTCTL_PIC | \
+ PCI_EXP_SLTCTL_AIC | \
+ PCI_EXP_SLTCTL_EIC)
+
static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
u16 mask, bool wait)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
- u16 slot_ctrl;
+ u16 slot_ctrl_orig, slot_ctrl;
mutex_lock(&ctrl->ctrl_lock);
@@ -180,6 +176,7 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
goto out;
}
+ slot_ctrl_orig = slot_ctrl;
slot_ctrl &= ~mask;
slot_ctrl |= (cmd & mask);
ctrl->cmd_busy = 1;
@@ -189,6 +186,17 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
ctrl->slot_ctrl = slot_ctrl;
/*
+ * Controllers with the Intel CF118 and similar errata advertise
+ * Command Completed support, but they only set Command Completed
+ * if we change the "Control" bits for power, power indicator,
+ * attention indicator, or interlock. If we only change the
+ * "Enable" bits, they never set the Command Completed bit.
+ */
+ if (pdev->broken_cmd_compl &&
+ (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
+ ctrl->cmd_busy = 0;
+
+ /*
* Optionally wait for the hardware to be ready for a new command,
* indicating completion of the above issued command.
*/
@@ -231,25 +239,11 @@ bool pciehp_check_link_active(struct controller *ctrl)
return ret;
}
-static void __pcie_wait_link_active(struct controller *ctrl, bool active)
-{
- int timeout = 1000;
-
- if (pciehp_check_link_active(ctrl) == active)
- return;
- while (timeout > 0) {
- msleep(10);
- timeout -= 10;
- if (pciehp_check_link_active(ctrl) == active)
- return;
- }
- ctrl_dbg(ctrl, "Data Link Layer Link Active not %s in 1000 msec\n",
- active ? "set" : "cleared");
-}
-
static void pcie_wait_link_active(struct controller *ctrl)
{
- __pcie_wait_link_active(ctrl, true);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+
+ pcie_wait_for_link(pdev, true);
}
static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
@@ -659,7 +653,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return handled;
}
-void pcie_enable_notification(struct controller *ctrl)
+static void pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@@ -697,6 +691,17 @@ void pcie_enable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
}
+void pcie_reenable_notification(struct controller *ctrl)
+{
+ /*
+ * Clear both Presence and Data Link Layer Changed to make sure
+ * those events still fire after we have re-enabled them.
+ */
+ pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+ pcie_enable_notification(ctrl);
+}
+
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;
@@ -861,7 +866,7 @@ struct controller *pcie_init(struct pcie_device *dev)
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
PCI_EXP_SLTSTA_DLLSC);
- ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c\n",
+ ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
@@ -872,7 +877,8 @@ struct controller *pcie_init(struct pcie_device *dev)
FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
- FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC));
+ FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
+ pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
if (pcie_init_slot(ctrl))
goto abort_ctrl;
@@ -891,3 +897,21 @@ void pciehp_release_ctrl(struct controller *ctrl)
pcie_cleanup_slot(ctrl);
kfree(ctrl);
}
+
+static void quirk_cmd_compl(struct pci_dev *pdev)
+{
+ u32 slot_cap;
+
+ if (pci_is_pcie(pdev)) {
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
+ if (slot_cap & PCI_EXP_SLTCAP_HPC &&
+ !(slot_cap & PCI_EXP_SLTCAP_NCCS))
+ pdev->broken_cmd_compl = 1;
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index d44100687dfe..6c2e8d7307c6 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -220,12 +220,16 @@ static int pnv_php_populate_changeset(struct of_changeset *ocs,
for_each_child_of_node(dn, child) {
ret = of_changeset_attach_node(ocs, child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
ret = pnv_php_populate_changeset(ocs, child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
}
return ret;
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index c55730b61c9a..516e4835019c 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -105,7 +105,6 @@ struct controller {
};
/* Define AMD SHPC ID */
-#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458
/* AMD PCI-X bridge registers */
@@ -173,17 +172,6 @@ static inline const char *slot_name(struct slot *slot)
return hotplug_slot_name(slot->hotplug_slot);
}
-#ifdef CONFIG_ACPI
-#include <linux/pci-acpi.h>
-static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
-{
- u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
- return acpi_get_hp_hw_control_from_firmware(dev, flags);
-}
-#else
-#define get_hp_hw_control_from_firmware(dev) (0)
-#endif
-
struct ctrl_reg {
volatile u32 base_offset;
volatile u32 slot_avail1;
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 1f0f96908b5a..e91be287f292 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -270,24 +270,12 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static int is_shpc_capable(struct pci_dev *dev)
-{
- if (dev->vendor == PCI_VENDOR_ID_AMD &&
- dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
- return 1;
- if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
- return 0;
- if (get_hp_hw_control_from_firmware(dev))
- return 0;
- return 1;
-}
-
static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
struct controller *ctrl;
- if (!is_shpc_capable(pdev))
+ if (acpi_get_hp_hw_control_from_firmware(pdev))
return -ENODEV;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index bedda5bda910..1047b56e5730 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -585,13 +585,13 @@ static int shpchp_enable_slot (struct slot *p_slot)
ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
- if (((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
- (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458))
+ if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD &&
+ p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)
&& p_slot->ctrl->num_slots == 1) {
- /* handle amd pogo errata; this must be done before enable */
+ /* handle AMD POGO errata; this must be done before enable */
amd_pogo_errata_save_misc_reg(p_slot);
retval = board_added(p_slot);
- /* handle amd pogo errata; this must be done after enable */
+ /* handle AMD POGO errata; this must be done after enable */
amd_pogo_errata_restore_misc_reg(p_slot);
} else
retval = board_added(p_slot);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 8adf4a64f291..d0d73dbbd5ca 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -469,6 +469,7 @@ found:
iov->nres = nres;
iov->ctrl = ctrl;
iov->total_VFs = total;
+ iov->driver_max_VFs = total;
pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
iov->pgsz = pgsz;
iov->self = dev;
@@ -827,9 +828,42 @@ int pci_sriov_get_totalvfs(struct pci_dev *dev)
if (!dev->is_physfn)
return 0;
- if (dev->sriov->driver_max_VFs)
- return dev->sriov->driver_max_VFs;
-
- return dev->sriov->total_VFs;
+ return dev->sriov->driver_max_VFs;
}
EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
+
+/**
+ * pci_sriov_configure_simple - helper to configure SR-IOV
+ * @dev: the PCI device
+ * @nr_virtfn: number of virtual functions to enable, 0 to disable
+ *
+ * Enable or disable SR-IOV for devices that don't require any PF setup
+ * before enabling SR-IOV. Return value is negative on error, or number of
+ * VFs allocated on success.
+ */
+int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
+{
+ int rc;
+
+ might_sleep();
+
+ if (!dev->is_physfn)
+ return -ENODEV;
+
+ if (pci_vfs_assigned(dev)) {
+ pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n");
+ return -EPERM;
+ }
+
+ if (nr_virtfn == 0) {
+ sriov_disable(dev);
+ return 0;
+ }
+
+ rc = sriov_enable(dev, nr_virtfn);
+ if (rc < 0)
+ return rc;
+
+ return nr_virtfn;
+}
+EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index a28355c273ae..d088c9147f10 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -244,8 +244,9 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
#if defined(CONFIG_OF_ADDRESS)
/**
- * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
- * @dev: device node of the host bridge having the range property
+ * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
+ * host bridge resources from DT
+ * @dev: host bridge device
* @busno: bus number associated with the bridge root bus
* @bus_max: maximum number of buses for this bridge
* @resources: list where the range of resources will be added after DT parsing
@@ -253,8 +254,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* address for the start of the I/O range. Can be NULL if the caller doesn't
* expect I/O ranges to be present in the device tree.
*
- * It is the caller's job to free the @resources list.
- *
* This function will parse the "ranges" property of a PCI host bridge device
* node and setup the resource mapping based on its content. It is expected
* that the property conforms with the Power ePAPR document.
@@ -262,11 +261,11 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* It returns zero if the range parsing has been successful or a standard error
* value if it failed.
*/
-int of_pci_get_host_bridge_resources(struct device_node *dev,
+int devm_of_pci_get_host_bridge_resources(struct device *dev,
unsigned char busno, unsigned char bus_max,
struct list_head *resources, resource_size_t *io_base)
{
- struct resource_entry *window;
+ struct device_node *dev_node = dev->of_node;
struct resource *res;
struct resource *bus_range;
struct of_pci_range range;
@@ -277,19 +276,19 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
if (io_base)
*io_base = (resource_size_t)OF_BAD_ADDR;
- bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL);
+ bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL);
if (!bus_range)
return -ENOMEM;
- pr_info("host bridge %pOF ranges:\n", dev);
+ dev_info(dev, "host bridge %pOF ranges:\n", dev_node);
- err = of_pci_parse_bus_range(dev, bus_range);
+ err = of_pci_parse_bus_range(dev_node, bus_range);
if (err) {
bus_range->start = busno;
bus_range->end = bus_max;
bus_range->flags = IORESOURCE_BUS;
- pr_info(" No bus range found for %pOF, using %pR\n",
- dev, bus_range);
+ dev_info(dev, " No bus range found for %pOF, using %pR\n",
+ dev_node, bus_range);
} else {
if (bus_range->end > bus_range->start + bus_max)
bus_range->end = bus_range->start + bus_max;
@@ -297,11 +296,11 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
pci_add_resource(resources, bus_range);
/* Check for ranges property */
- err = of_pci_range_parser_init(&parser, dev);
+ err = of_pci_range_parser_init(&parser, dev_node);
if (err)
- goto parse_failed;
+ goto failed;
- pr_debug("Parsing ranges property...\n");
+ dev_dbg(dev, "Parsing ranges property...\n");
for_each_of_pci_range(&parser, &range) {
/* Read next ranges element */
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
@@ -310,9 +309,9 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
snprintf(range_type, 4, "MEM");
else
snprintf(range_type, 4, "err");
- pr_info(" %s %#010llx..%#010llx -> %#010llx\n", range_type,
- range.cpu_addr, range.cpu_addr + range.size - 1,
- range.pci_addr);
+ dev_info(dev, " %s %#010llx..%#010llx -> %#010llx\n",
+ range_type, range.cpu_addr,
+ range.cpu_addr + range.size - 1, range.pci_addr);
/*
* If we failed translation or got a zero-sized region
@@ -321,28 +320,28 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
continue;
- res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
if (!res) {
err = -ENOMEM;
- goto parse_failed;
+ goto failed;
}
- err = of_pci_range_to_resource(&range, dev, res);
+ err = of_pci_range_to_resource(&range, dev_node, res);
if (err) {
- kfree(res);
+ devm_kfree(dev, res);
continue;
}
if (resource_type(res) == IORESOURCE_IO) {
if (!io_base) {
- pr_err("I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
- dev);
+ dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
+ dev_node);
err = -EINVAL;
- goto conversion_failed;
+ goto failed;
}
if (*io_base != (resource_size_t)OF_BAD_ADDR)
- pr_warn("More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
- dev);
+ dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
+ dev_node);
*io_base = range.cpu_addr;
}
@@ -351,15 +350,11 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
return 0;
-conversion_failed:
- kfree(res);
-parse_failed:
- resource_list_for_each_entry(window, resources)
- kfree(window->res);
+failed:
pci_free_resource_list(resources);
return err;
}
-EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
+EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
#endif /* CONFIG_OF_ADDRESS */
/**
@@ -599,12 +594,12 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
struct resource **bus_range)
{
int err, res_valid = 0;
- struct device_node *np = dev->of_node;
resource_size_t iobase;
struct resource_entry *win, *tmp;
INIT_LIST_HEAD(resources);
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
+ &iobase);
if (err)
return err;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 1abdbf267c19..65113b6eed14 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -370,26 +370,57 @@ EXPORT_SYMBOL_GPL(pci_get_hp_params);
/**
* pciehp_is_native - Check whether a hotplug port is handled by the OS
- * @pdev: Hotplug port to check
+ * @bridge: Hotplug port to check
*
- * Walk up from @pdev to the host bridge, obtain its cached _OSC Control Field
- * and return the value of the "PCI Express Native Hot Plug control" bit.
- * On failure to obtain the _OSC Control Field return %false.
+ * Returns true if the given @bridge is handled by the native PCIe hotplug
+ * driver.
*/
-bool pciehp_is_native(struct pci_dev *pdev)
+bool pciehp_is_native(struct pci_dev *bridge)
{
- struct acpi_pci_root *root;
- acpi_handle handle;
+ const struct pci_host_bridge *host;
+ u32 slot_cap;
- handle = acpi_find_root_bridge_handle(pdev);
- if (!handle)
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
- root = acpi_pci_find_root(handle);
- if (!root)
+ pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
+ if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
+ return false;
+
+ if (pcie_ports_native)
+ return true;
+
+ host = pci_find_host_bridge(bridge->bus);
+ return host->native_pcie_hotplug;
+}
+
+/**
+ * shpchp_is_native - Check whether a hotplug port is handled by the OS
+ * @bridge: Hotplug port to check
+ *
+ * Returns true if the given @bridge is handled by the native SHPC hotplug
+ * driver.
+ */
+bool shpchp_is_native(struct pci_dev *bridge)
+{
+ const struct pci_host_bridge *host;
+
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
+ return false;
+
+ /*
+ * It is assumed that AMD GOLAM chips support SHPC but they do not
+ * have SHPC capability.
+ */
+ if (bridge->vendor == PCI_VENDOR_ID_AMD &&
+ bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
+ return true;
+
+ if (!pci_find_capability(bridge, PCI_CAP_ID_SHPC))
return false;
- return root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+ host = pci_find_host_bridge(bridge->bus);
+ return host->native_shpc_hotplug;
}
/**
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 18ba62c76480..c125d53033c6 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1539,7 +1539,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
/**
* pci_uevent_ers - emit a uevent during recovery path of PCI device
* @pdev: PCI device undergoing error recovery
diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c
new file mode 100644
index 000000000000..9795649fc6f9
--- /dev/null
+++ b/drivers/pci/pci-pf-stub.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* pci-pf-stub - simple stub driver for PCI SR-IOV PF device
+ *
+ * This driver is meant to act as a "whitelist" for devices that provde
+ * SR-IOV functionality while at the same time not actually needing a
+ * driver of their own.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+/**
+ * pci_pf_stub_whitelist - White list of devices to bind pci-pf-stub onto
+ *
+ * This table provides the list of IDs this driver is supposed to bind
+ * onto. You could think of this as a list of "quirked" devices where we
+ * are adding support for SR-IOV here since there are no other drivers
+ * that they would be running under.
+ */
+static const struct pci_device_id pci_pf_stub_whitelist[] = {
+ { PCI_VDEVICE(AMAZON, 0x0053) },
+ /* required last entry */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, pci_pf_stub_whitelist);
+
+static int pci_pf_stub_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ pci_info(dev, "claimed by pci-pf-stub\n");
+ return 0;
+}
+
+static struct pci_driver pf_stub_driver = {
+ .name = "pci-pf-stub",
+ .id_table = pci_pf_stub_whitelist,
+ .probe = pci_pf_stub_probe,
+ .sriov_configure = pci_sriov_configure_simple,
+};
+
+static int __init pci_pf_stub_init(void)
+{
+ return pci_register_driver(&pf_stub_driver);
+}
+
+static void __exit pci_pf_stub_exit(void)
+{
+ pci_unregister_driver(&pf_stub_driver);
+}
+
+module_init(pci_pf_stub_init);
+module_exit(pci_pf_stub_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 366d93af051d..788a200fb2dc 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -288,13 +288,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!val) {
- if (pci_is_enabled(pdev))
- pci_disable_device(pdev);
- else
- result = -EIO;
- } else
+ device_lock(dev);
+ if (dev->driver)
+ result = -EBUSY;
+ else if (val)
result = pci_enable_device(pdev);
+ else if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+ else
+ result = -EIO;
+ device_unlock(dev);
return result < 0 ? result : count;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e90cf5c32e14..97acba712e4e 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -112,6 +112,14 @@ unsigned int pcibios_max_latency = 255;
/* If set, the PCIe ARI capability will not be used. */
static bool pcie_ari_disabled;
+/* If set, the PCIe ATS capability will not be used. */
+static bool pcie_ats_disabled;
+
+bool pci_ats_disabled(void)
+{
+ return pcie_ats_disabled;
+}
+
/* Disable bridge_d3 for all PCIe ports */
static bool pci_bridge_d3_disable;
/* Force bridge_d3 for all PCIe ports */
@@ -4153,6 +4161,35 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
}
+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+ int timeout = 1000;
+ bool ret;
+ u16 lnk_status;
+
+ for (;;) {
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+ if (ret == active)
+ return true;
+ if (timeout <= 0)
+ break;
+ msleep(10);
+ timeout -= 10;
+ }
+
+ pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
+ active ? "set" : "cleared");
+
+ return false;
+}
void pci_reset_secondary_bus(struct pci_dev *dev)
{
@@ -5085,49 +5122,6 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
EXPORT_SYMBOL(pcie_set_mps);
/**
- * pcie_get_minimum_link - determine minimum link settings of a PCI device
- * @dev: PCI device to query
- * @speed: storage for minimum speed
- * @width: storage for minimum width
- *
- * This function will walk up the PCI device chain and determine the minimum
- * link width and speed of the device.
- */
-int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width)
-{
- int ret;
-
- *speed = PCI_SPEED_UNKNOWN;
- *width = PCIE_LNK_WIDTH_UNKNOWN;
-
- while (dev) {
- u16 lnksta;
- enum pci_bus_speed next_speed;
- enum pcie_link_width next_width;
-
- ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
- if (ret)
- return ret;
-
- next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
- next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
-
- if (next_speed < *speed)
- *speed = next_speed;
-
- if (next_width < *width)
- *width = next_width;
-
- dev = dev->bus->self;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(pcie_get_minimum_link);
-
-/**
* pcie_bandwidth_available - determine minimum link settings of a PCIe
* device and its bandwidth limitation
* @dev: PCI device to query
@@ -5717,15 +5711,14 @@ static void pci_no_domains(void)
#endif
}
-#ifdef CONFIG_PCI_DOMAINS
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
static atomic_t __domain_nr = ATOMIC_INIT(-1);
-int pci_get_new_domain_nr(void)
+static int pci_get_new_domain_nr(void)
{
return atomic_inc_return(&__domain_nr);
}
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
static int of_pci_bus_find_domain_nr(struct device *parent)
{
static int use_dt_domains = -1;
@@ -5780,7 +5773,6 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
acpi_pci_bus_find_domain_nr(bus);
}
#endif
-#endif
/**
* pci_ext_cfg_avail - can we access extended PCI config space?
@@ -5808,6 +5800,9 @@ static int __init pci_setup(char *str)
if (*str && (str = pcibios_setup(str)) && *str) {
if (!strcmp(str, "nomsi")) {
pci_no_msi();
+ } else if (!strncmp(str, "noats", 5)) {
+ pr_info("PCIe: ATS is disabled\n");
+ pcie_ats_disabled = true;
} else if (!strcmp(str, "noaer")) {
pci_no_aer();
} else if (!strncmp(str, "realloc=", 8)) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 023f7cf25bff..c358e7a07f3f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -353,6 +353,11 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
void pci_enable_acs(struct pci_dev *dev);
+/* PCI error reporting and recovery */
+void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service);
+void pcie_do_nonfatal_recovery(struct pci_dev *dev);
+
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
@@ -407,4 +412,44 @@ static inline u64 pci_rebar_size_to_bytes(int size)
return 1ULL << (size + 20);
}
+struct device_node;
+
+#ifdef CONFIG_OF
+int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
+int of_get_pci_domain_nr(struct device_node *node);
+int of_pci_get_max_link_speed(struct device_node *node);
+
+#else
+static inline int
+of_pci_parse_bus_range(struct device_node *node, struct resource *res)
+{
+ return -EINVAL;
+}
+
+static inline int
+of_get_pci_domain_nr(struct device_node *node)
+{
+ return -1;
+}
+
+static inline int
+of_pci_get_max_link_speed(struct device_node *node)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+#if defined(CONFIG_OF_ADDRESS)
+int devm_of_pci_get_host_bridge_resources(struct device *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base);
+#else
+static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base)
+{
+ return -EINVAL;
+}
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 800e1d404a45..03f4e0b3a140 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for PCI Express features and port driver
-pcieportdrv-y := portdrv_core.o portdrv_pci.o
+pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 779b3879b1b5..9735c19bf39c 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -94,7 +94,7 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
*/
static void aer_enable_rootport(struct aer_rpc *rpc)
{
- struct pci_dev *pdev = rpc->rpd->port;
+ struct pci_dev *pdev = rpc->rpd;
int aer_pos;
u16 reg16;
u32 reg32;
@@ -136,7 +136,7 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
*/
static void aer_disable_rootport(struct aer_rpc *rpc)
{
- struct pci_dev *pdev = rpc->rpd->port;
+ struct pci_dev *pdev = rpc->rpd;
u32 reg32;
int pos;
@@ -232,7 +232,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
/* Initialize Root lock access, e_lock, to Root Error Status Reg */
spin_lock_init(&rpc->e_lock);
- rpc->rpd = dev;
+ rpc->rpd = dev->port;
INIT_WORK(&rpc->dpc_handler, aer_isr);
mutex_init(&rpc->rpc_mutex);
@@ -353,10 +353,7 @@ static void aer_error_resume(struct pci_dev *dev)
pos = dev->aer_cap;
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
- if (dev->error_state == pci_channel_io_normal)
- status &= ~mask; /* Clear corresponding nonfatal bits */
- else
- status &= mask; /* Clear corresponding fatal bits */
+ status &= ~mask; /* Clear corresponding nonfatal bits */
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
}
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 08b4584f62fe..6e0ad9a68fd9 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -58,7 +58,7 @@ struct aer_err_source {
};
struct aer_rpc {
- struct pcie_device *rpd; /* Root Port device */
+ struct pci_dev *rpd; /* Root Port device */
struct work_struct dpc_handler;
struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
struct aer_err_info e_info;
@@ -76,36 +76,6 @@ struct aer_rpc {
*/
};
-struct aer_broadcast_data {
- enum pci_channel_state state;
- enum pci_ers_result result;
-};
-
-static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
- enum pci_ers_result new)
-{
- if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
- return PCI_ERS_RESULT_NO_AER_DRIVER;
-
- if (new == PCI_ERS_RESULT_NONE)
- return orig;
-
- switch (orig) {
- case PCI_ERS_RESULT_CAN_RECOVER:
- case PCI_ERS_RESULT_RECOVERED:
- orig = new;
- break;
- case PCI_ERS_RESULT_DISCONNECT:
- if (new == PCI_ERS_RESULT_NEED_RESET)
- orig = PCI_ERS_RESULT_NEED_RESET;
- break;
- default:
- break;
- }
-
- return orig;
-}
-
extern struct bus_type pcie_port_bus_type;
void aer_isr(struct work_struct *work);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 0ea5acc40323..42d4f3f32282 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/kfifo.h>
#include "aerdrv.h"
+#include "../../pci.h"
#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
@@ -227,329 +228,14 @@ static bool find_source_device(struct pci_dev *parent,
return true;
}
-static int report_error_detected(struct pci_dev *dev, void *data)
-{
- pci_ers_result_t vote;
- const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
- result_data = (struct aer_broadcast_data *) data;
-
- device_lock(&dev->dev);
- dev->error_state = result_data->state;
-
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->error_detected) {
- if (result_data->state == pci_channel_io_frozen &&
- dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
- /*
- * In case of fatal recovery, if one of down-
- * stream device has no driver. We might be
- * unable to recover because a later insmod
- * of a driver for this device is unaware of
- * its hw state.
- */
- pci_printk(KERN_DEBUG, dev, "device has %s\n",
- dev->driver ?
- "no AER-aware driver" : "no driver");
- }
-
- /*
- * If there's any device in the subtree that does not
- * have an error_detected callback, returning
- * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
- * the subsequent mmio_enabled/slot_reset/resume
- * callbacks of "any" device in the subtree. All the
- * devices in the subtree are left in the error state
- * without recovery.
- */
-
- if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
- vote = PCI_ERS_RESULT_NO_AER_DRIVER;
- else
- vote = PCI_ERS_RESULT_NONE;
- } else {
- err_handler = dev->driver->err_handler;
- vote = err_handler->error_detected(dev, result_data->state);
- pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
- }
-
- result_data->result = merge_result(result_data->result, vote);
- device_unlock(&dev->dev);
- return 0;
-}
-
-static int report_mmio_enabled(struct pci_dev *dev, void *data)
-{
- pci_ers_result_t vote;
- const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
- result_data = (struct aer_broadcast_data *) data;
-
- device_lock(&dev->dev);
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->mmio_enabled)
- goto out;
-
- err_handler = dev->driver->err_handler;
- vote = err_handler->mmio_enabled(dev);
- result_data->result = merge_result(result_data->result, vote);
-out:
- device_unlock(&dev->dev);
- return 0;
-}
-
-static int report_slot_reset(struct pci_dev *dev, void *data)
-{
- pci_ers_result_t vote;
- const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
- result_data = (struct aer_broadcast_data *) data;
-
- device_lock(&dev->dev);
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->slot_reset)
- goto out;
-
- err_handler = dev->driver->err_handler;
- vote = err_handler->slot_reset(dev);
- result_data->result = merge_result(result_data->result, vote);
-out:
- device_unlock(&dev->dev);
- return 0;
-}
-
-static int report_resume(struct pci_dev *dev, void *data)
-{
- const struct pci_error_handlers *err_handler;
-
- device_lock(&dev->dev);
- dev->error_state = pci_channel_io_normal;
-
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->resume)
- goto out;
-
- err_handler = dev->driver->err_handler;
- err_handler->resume(dev);
- pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
-out:
- device_unlock(&dev->dev);
- return 0;
-}
-
-/**
- * broadcast_error_message - handle message broadcast to downstream drivers
- * @dev: pointer to from where in a hierarchy message is broadcasted down
- * @state: error state
- * @error_mesg: message to print
- * @cb: callback to be broadcasted
- *
- * Invoked during error recovery process. Once being invoked, the content
- * of error severity will be broadcasted to all downstream drivers in a
- * hierarchy in question.
- */
-static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
- enum pci_channel_state state,
- char *error_mesg,
- int (*cb)(struct pci_dev *, void *))
-{
- struct aer_broadcast_data result_data;
-
- pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
- result_data.state = state;
- if (cb == report_error_detected)
- result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
- else
- result_data.result = PCI_ERS_RESULT_RECOVERED;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /*
- * If the error is reported by a bridge, we think this error
- * is related to the downstream link of the bridge, so we
- * do error recovery on all subordinates of the bridge instead
- * of the bridge and clear the error status of the bridge.
- */
- if (cb == report_error_detected)
- dev->error_state = state;
- pci_walk_bus(dev->subordinate, cb, &result_data);
- if (cb == report_resume) {
- pci_cleanup_aer_uncorrect_error_status(dev);
- dev->error_state = pci_channel_io_normal;
- }
- } else {
- /*
- * If the error is reported by an end point, we think this
- * error is related to the upstream link of the end point.
- */
- if (state == pci_channel_io_normal)
- /*
- * the error is non fatal so the bus is ok, just invoke
- * the callback for the function that logged the error.
- */
- cb(dev, &result_data);
- else
- pci_walk_bus(dev->bus, cb, &result_data);
- }
-
- return result_data.result;
-}
-
-/**
- * default_reset_link - default reset function
- * @dev: pointer to pci_dev data structure
- *
- * Invoked when performing link reset on a Downstream Port or a
- * Root Port with no aer driver.
- */
-static pci_ers_result_t default_reset_link(struct pci_dev *dev)
-{
- pci_reset_bridge_secondary_bus(dev);
- pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static int find_aer_service_iter(struct device *device, void *data)
-{
- struct pcie_port_service_driver *service_driver, **drv;
-
- drv = (struct pcie_port_service_driver **) data;
-
- if (device->bus == &pcie_port_bus_type && device->driver) {
- service_driver = to_service_driver(device->driver);
- if (service_driver->service == PCIE_PORT_SERVICE_AER) {
- *drv = service_driver;
- return 1;
- }
- }
-
- return 0;
-}
-
-static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
-{
- struct pcie_port_service_driver *drv = NULL;
-
- device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
-
- return drv;
-}
-
-static pci_ers_result_t reset_link(struct pci_dev *dev)
-{
- struct pci_dev *udev;
- pci_ers_result_t status;
- struct pcie_port_service_driver *driver;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /* Reset this port for all subordinates */
- udev = dev;
- } else {
- /* Reset the upstream component (likely downstream port) */
- udev = dev->bus->self;
- }
-
- /* Use the aer driver of the component firstly */
- driver = find_aer_service(udev);
-
- if (driver && driver->reset_link) {
- status = driver->reset_link(udev);
- } else if (udev->has_secondary_link) {
- status = default_reset_link(udev);
- } else {
- pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
- pci_name(udev));
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- if (status != PCI_ERS_RESULT_RECOVERED) {
- pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
- pci_name(udev));
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return status;
-}
-
-/**
- * do_recovery - handle nonfatal/fatal error recovery process
- * @dev: pointer to a pci_dev data structure of agent detecting an error
- * @severity: error severity type
- *
- * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
- * error detected message to all downstream drivers within a hierarchy in
- * question and return the returned code.
- */
-static void do_recovery(struct pci_dev *dev, int severity)
-{
- pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
- enum pci_channel_state state;
-
- if (severity == AER_FATAL)
- state = pci_channel_io_frozen;
- else
- state = pci_channel_io_normal;
-
- status = broadcast_error_message(dev,
- state,
- "error_detected",
- report_error_detected);
-
- if (severity == AER_FATAL) {
- result = reset_link(dev);
- if (result != PCI_ERS_RESULT_RECOVERED)
- goto failed;
- }
-
- if (status == PCI_ERS_RESULT_CAN_RECOVER)
- status = broadcast_error_message(dev,
- state,
- "mmio_enabled",
- report_mmio_enabled);
-
- if (status == PCI_ERS_RESULT_NEED_RESET) {
- /*
- * TODO: Should call platform-specific
- * functions to reset slot before calling
- * drivers' slot_reset callbacks?
- */
- status = broadcast_error_message(dev,
- state,
- "slot_reset",
- report_slot_reset);
- }
-
- if (status != PCI_ERS_RESULT_RECOVERED)
- goto failed;
-
- broadcast_error_message(dev,
- state,
- "resume",
- report_resume);
-
- pci_info(dev, "AER: Device recovery successful\n");
- return;
-
-failed:
- pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
- /* TODO: Should kernel panic here? */
- pci_info(dev, "AER: Device recovery failed\n");
-}
-
/**
* handle_error_source - handle logging error into an event log
- * @aerdev: pointer to pcie_device data structure of the root port
* @dev: pointer to pci_dev data structure of error source device
* @info: comprehensive error information
*
* Invoked when an error being detected by Root Port.
*/
-static void handle_error_source(struct pcie_device *aerdev,
- struct pci_dev *dev,
- struct aer_err_info *info)
+static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
{
int pos;
@@ -562,12 +248,13 @@ static void handle_error_source(struct pcie_device *aerdev,
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
- } else
- do_recovery(dev, info->severity);
+ } else if (info->severity == AER_NONFATAL)
+ pcie_do_nonfatal_recovery(dev);
+ else if (info->severity == AER_FATAL)
+ pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER);
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
-static void aer_recover_work_func(struct work_struct *work);
#define AER_RECOVER_RING_ORDER 4
#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
@@ -582,6 +269,30 @@ struct aer_recover_entry {
static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
AER_RECOVER_RING_SIZE);
+
+static void aer_recover_work_func(struct work_struct *work)
+{
+ struct aer_recover_entry entry;
+ struct pci_dev *pdev;
+
+ while (kfifo_get(&aer_recover_ring, &entry)) {
+ pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
+ entry.devfn);
+ if (!pdev) {
+ pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
+ entry.domain, entry.bus,
+ PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+ continue;
+ }
+ cper_print_aer(pdev, entry.severity, entry.regs);
+ if (entry.severity == AER_NONFATAL)
+ pcie_do_nonfatal_recovery(pdev);
+ else if (entry.severity == AER_FATAL)
+ pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER);
+ pci_dev_put(pdev);
+ }
+}
+
/*
* Mutual exclusion for writers of aer_recover_ring, reader side don't
* need lock, because there is only one reader and lock is not needed
@@ -611,27 +322,6 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
}
EXPORT_SYMBOL_GPL(aer_recover_queue);
-
-static void aer_recover_work_func(struct work_struct *work)
-{
- struct aer_recover_entry entry;
- struct pci_dev *pdev;
-
- while (kfifo_get(&aer_recover_ring, &entry)) {
- pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
- entry.devfn);
- if (!pdev) {
- pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
- entry.domain, entry.bus,
- PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
- continue;
- }
- cper_print_aer(pdev, entry.severity, entry.regs);
- if (entry.severity != AER_CORRECTABLE)
- do_recovery(pdev, entry.severity);
- pci_dev_put(pdev);
- }
-}
#endif
/**
@@ -695,8 +385,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
return 1;
}
-static inline void aer_process_err_devices(struct pcie_device *p_device,
- struct aer_err_info *e_info)
+static inline void aer_process_err_devices(struct aer_err_info *e_info)
{
int i;
@@ -707,19 +396,19 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (get_device_error_info(e_info->dev[i], e_info))
- handle_error_source(p_device, e_info->dev[i], e_info);
+ handle_error_source(e_info->dev[i], e_info);
}
}
/**
* aer_isr_one_error - consume an error detected by root port
- * @p_device: pointer to error root port service device
+ * @rpc: pointer to the root port which holds an error
* @e_src: pointer to an error source
*/
-static void aer_isr_one_error(struct pcie_device *p_device,
+static void aer_isr_one_error(struct aer_rpc *rpc,
struct aer_err_source *e_src)
{
- struct aer_rpc *rpc = get_service_data(p_device);
+ struct pci_dev *pdev = rpc->rpd;
struct aer_err_info *e_info = &rpc->e_info;
/*
@@ -734,11 +423,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
e_info->multi_error_valid = 1;
else
e_info->multi_error_valid = 0;
+ aer_print_port_info(pdev, e_info);
- aer_print_port_info(p_device->port, e_info);
-
- if (find_source_device(p_device->port, e_info))
- aer_process_err_devices(p_device, e_info);
+ if (find_source_device(pdev, e_info))
+ aer_process_err_devices(e_info);
}
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
@@ -754,10 +442,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
else
e_info->multi_error_valid = 0;
- aer_print_port_info(p_device->port, e_info);
+ aer_print_port_info(pdev, e_info);
- if (find_source_device(p_device->port, e_info))
- aer_process_err_devices(p_device, e_info);
+ if (find_source_device(pdev, e_info))
+ aer_process_err_devices(e_info);
}
}
@@ -799,11 +487,10 @@ static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
void aer_isr(struct work_struct *work)
{
struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
- struct pcie_device *p_device = rpc->rpd;
struct aer_err_source uninitialized_var(e_src);
mutex_lock(&rpc->rpc_mutex);
while (get_e_source(rpc, &e_src))
- aer_isr_one_error(p_device, &e_src);
+ aer_isr_one_error(rpc, &e_src);
mutex_unlock(&rpc->rpc_mutex);
}
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index cfc89dd57831..4985bdf64c2e 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -163,17 +163,17 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
int id = ((dev->bus->number << 8) | dev->devfn);
if (!info->status) {
- pci_err(dev, "PCIe Bus Error: severity=%s, type=Unaccessible, id=%04x(Unregistered Agent ID)\n",
- aer_error_severity_string[info->severity], id);
+ pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
+ aer_error_severity_string[info->severity]);
goto out;
}
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
aer_error_severity_string[info->severity],
- aer_error_layer[layer], id, aer_agent_string[agent]);
+ aer_error_layer[layer], aer_agent_string[agent]);
pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
dev->vendor, dev->device,
@@ -186,17 +186,21 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
- pci_err(dev, " Error of this Agent(%04x) is reported first\n", id);
+ pci_err(dev, " Error of this Agent is reported first\n");
trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
- info->severity);
+ info->severity, info->tlp_header_valid, &info->tlp);
}
void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
{
- pci_info(dev, "AER: %s%s error received: id=%04x\n",
+ u8 bus = info->id >> 8;
+ u8 devfn = info->id & 0xff;
+
+ pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n",
info->multi_error_valid ? "Multiple " : "",
- aer_error_severity_string[info->severity], info->id);
+ aer_error_severity_string[info->severity],
+ pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -216,28 +220,30 @@ EXPORT_SYMBOL_GPL(cper_severity_to_aer);
void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer)
{
- int layer, agent, status_strs_size, tlp_header_valid = 0;
+ int layer, agent, tlp_header_valid = 0;
u32 status, mask;
- const char **status_strs;
+ struct aer_err_info info;
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
- status_strs = aer_correctable_error_string;
- status_strs_size = ARRAY_SIZE(aer_correctable_error_string);
} else {
status = aer->uncor_status;
mask = aer->uncor_mask;
- status_strs = aer_uncorrectable_error_string;
- status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
tlp_header_valid = status & AER_LOG_TLP_MASKS;
}
layer = AER_GET_LAYER_ERROR(aer_severity, status);
agent = AER_GET_AGENT(aer_severity, status);
+ memset(&info, 0, sizeof(info));
+ info.severity = aer_severity;
+ info.status = status;
+ info.mask = mask;
+ info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
+
pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
- cper_print_bits("", status, status_strs, status_strs_size);
+ __aer_print_error(dev, &info);
pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
aer_error_layer[layer], aer_agent_string[agent]);
@@ -249,6 +255,6 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
__print_tlp_header(dev, &aer->header_log);
trace_aer_event(dev_name(&dev->dev), (status & ~mask),
- aer_severity);
+ aer_severity, tlp_header_valid, &aer->header_log);
}
#endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index f76eb7704f64..c687c817b47d 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -400,6 +400,15 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev,
info->l1ss_cap = 0;
return;
}
+
+ /*
+ * If we don't have LTR for the entire path from the Root Complex
+ * to this device, we can't use ASPM L1.2 because it relies on the
+ * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
+ */
+ if (!pdev->ltr_path)
+ info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
+
pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1,
&info->l1ss_ctl1);
pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2,
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 8c57d607e603..d6436681c535 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -68,44 +68,35 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
static void dpc_wait_link_inactive(struct dpc_dev *dpc)
{
- unsigned long timeout = jiffies + HZ;
struct pci_dev *pdev = dpc->dev->port;
- struct device *dev = &dpc->dev->device;
- u16 lnk_status;
- pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
- while (lnk_status & PCI_EXP_LNKSTA_DLLLA &&
- !time_after(jiffies, timeout)) {
- msleep(10);
- pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
- }
- if (lnk_status & PCI_EXP_LNKSTA_DLLLA)
- dev_warn(dev, "Link state not disabled for DPC event\n");
+ pcie_wait_for_link(pdev, false);
}
-static void dpc_work(struct work_struct *work)
+static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
- struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
- struct pci_dev *dev, *temp, *pdev = dpc->dev->port;
- struct pci_bus *parent = pdev->subordinate;
- u16 cap = dpc->cap_pos, ctl;
-
- pci_lock_rescan_remove();
- list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
- bus_list) {
- pci_dev_get(dev);
- pci_dev_set_disconnected(dev, NULL);
- if (pci_has_subordinate(dev))
- pci_walk_bus(dev->subordinate,
- pci_dev_set_disconnected, NULL);
- pci_stop_and_remove_bus_device(dev);
- pci_dev_put(dev);
- }
- pci_unlock_rescan_remove();
-
+ struct dpc_dev *dpc;
+ struct pcie_device *pciedev;
+ struct device *devdpc;
+ u16 cap, ctl;
+
+ /*
+ * DPC disables the Link automatically in hardware, so it has
+ * already been reset by the time we get here.
+ */
+ devdpc = pcie_port_find_device(pdev, PCIE_PORT_SERVICE_DPC);
+ pciedev = to_pcie_device(devdpc);
+ dpc = get_service_data(pciedev);
+ cap = dpc->cap_pos;
+
+ /*
+ * Wait until the Link is inactive, then clear DPC Trigger Status
+ * to allow the Port to leave DPC.
+ */
dpc_wait_link_inactive(dpc);
+
if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc))
- return;
+ return PCI_ERS_RESULT_DISCONNECT;
if (dpc->rp_extensions && dpc->rp_pio_status) {
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS,
dpc->rp_pio_status);
@@ -113,11 +104,22 @@ static void dpc_work(struct work_struct *work)
}
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
- PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT);
+ PCI_EXP_DPC_STATUS_TRIGGER);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
ctl | PCI_EXP_DPC_CTL_INT_EN);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void dpc_work(struct work_struct *work)
+{
+ struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
+ struct pci_dev *pdev = dpc->dev->port;
+
+ /* We configure DPC so it only triggers on ERR_FATAL */
+ pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
}
static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
@@ -223,6 +225,9 @@ static irqreturn_t dpc_irq(int irq, void *context)
if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
dpc_process_rp_pio_error(dpc);
+ pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_INTERRUPT);
+
schedule_work(&dpc->work);
return IRQ_HANDLED;
@@ -270,7 +275,7 @@ static int dpc_probe(struct pcie_device *dev)
}
}
- ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
+ ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
@@ -288,7 +293,7 @@ static void dpc_remove(struct pcie_device *dev)
u16 ctl;
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
- ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN);
+ ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
}
@@ -298,6 +303,7 @@ static struct pcie_port_service_driver dpcdriver = {
.service = PCIE_PORT_SERVICE_DPC,
.probe = dpc_probe,
.remove = dpc_remove,
+ .reset_link = dpc_reset_link,
};
static int __init dpc_service_init(void)
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
new file mode 100644
index 000000000000..f7ce0cb0b0b7
--- /dev/null
+++ b/drivers/pci/pcie/err.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file implements the error recovery as a core part of PCIe error
+ * reporting. When a PCIe error is delivered, an error message will be
+ * collected and printed to console, then, an error recovery procedure
+ * will be executed by following the PCI error recovery rules.
+ *
+ * Copyright (C) 2006 Intel Corp.
+ * Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/aer.h>
+#include "portdrv.h"
+#include "../pci.h"
+
+struct aer_broadcast_data {
+ enum pci_channel_state state;
+ enum pci_ers_result result;
+};
+
+static pci_ers_result_t merge_result(enum pci_ers_result orig,
+ enum pci_ers_result new)
+{
+ if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
+ return PCI_ERS_RESULT_NO_AER_DRIVER;
+
+ if (new == PCI_ERS_RESULT_NONE)
+ return orig;
+
+ switch (orig) {
+ case PCI_ERS_RESULT_CAN_RECOVER:
+ case PCI_ERS_RESULT_RECOVERED:
+ orig = new;
+ break;
+ case PCI_ERS_RESULT_DISCONNECT:
+ if (new == PCI_ERS_RESULT_NEED_RESET)
+ orig = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ default:
+ break;
+ }
+
+ return orig;
+}
+
+static int report_error_detected(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ dev->error_state = result_data->state;
+
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->error_detected) {
+ if (result_data->state == pci_channel_io_frozen &&
+ dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+ /*
+ * In case of fatal recovery, if one of down-
+ * stream device has no driver. We might be
+ * unable to recover because a later insmod
+ * of a driver for this device is unaware of
+ * its hw state.
+ */
+ pci_printk(KERN_DEBUG, dev, "device has %s\n",
+ dev->driver ?
+ "no AER-aware driver" : "no driver");
+ }
+
+ /*
+ * If there's any device in the subtree that does not
+ * have an error_detected callback, returning
+ * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
+ * the subsequent mmio_enabled/slot_reset/resume
+ * callbacks of "any" device in the subtree. All the
+ * devices in the subtree are left in the error state
+ * without recovery.
+ */
+
+ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
+ vote = PCI_ERS_RESULT_NO_AER_DRIVER;
+ else
+ vote = PCI_ERS_RESULT_NONE;
+ } else {
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->error_detected(dev, result_data->state);
+ pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
+ }
+
+ result_data->result = merge_result(result_data->result, vote);
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_mmio_enabled(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->mmio_enabled)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->mmio_enabled(dev);
+ result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_slot_reset(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->slot_reset)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->slot_reset(dev);
+ result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_resume(struct pci_dev *dev, void *data)
+{
+ const struct pci_error_handlers *err_handler;
+
+ device_lock(&dev->dev);
+ dev->error_state = pci_channel_io_normal;
+
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->resume)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ err_handler->resume(dev);
+ pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+/**
+ * default_reset_link - default reset function
+ * @dev: pointer to pci_dev data structure
+ *
+ * Invoked when performing link reset on a Downstream Port or a
+ * Root Port with no aer driver.
+ */
+static pci_ers_result_t default_reset_link(struct pci_dev *dev)
+{
+ pci_reset_bridge_secondary_bus(dev);
+ pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
+{
+ struct pci_dev *udev;
+ pci_ers_result_t status;
+ struct pcie_port_service_driver *driver = NULL;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ /* Reset this port for all subordinates */
+ udev = dev;
+ } else {
+ /* Reset the upstream component (likely downstream port) */
+ udev = dev->bus->self;
+ }
+
+ /* Use the aer driver of the component firstly */
+ driver = pcie_port_find_service(udev, service);
+
+ if (driver && driver->reset_link) {
+ status = driver->reset_link(udev);
+ } else if (udev->has_secondary_link) {
+ status = default_reset_link(udev);
+ } else {
+ pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
+ pci_name(udev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (status != PCI_ERS_RESULT_RECOVERED) {
+ pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
+ pci_name(udev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return status;
+}
+
+/**
+ * broadcast_error_message - handle message broadcast to downstream drivers
+ * @dev: pointer to from where in a hierarchy message is broadcasted down
+ * @state: error state
+ * @error_mesg: message to print
+ * @cb: callback to be broadcasted
+ *
+ * Invoked during error recovery process. Once being invoked, the content
+ * of error severity will be broadcasted to all downstream drivers in a
+ * hierarchy in question.
+ */
+static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
+ enum pci_channel_state state,
+ char *error_mesg,
+ int (*cb)(struct pci_dev *, void *))
+{
+ struct aer_broadcast_data result_data;
+
+ pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
+ result_data.state = state;
+ if (cb == report_error_detected)
+ result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
+ else
+ result_data.result = PCI_ERS_RESULT_RECOVERED;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ /*
+ * If the error is reported by a bridge, we think this error
+ * is related to the downstream link of the bridge, so we
+ * do error recovery on all subordinates of the bridge instead
+ * of the bridge and clear the error status of the bridge.
+ */
+ if (cb == report_error_detected)
+ dev->error_state = state;
+ pci_walk_bus(dev->subordinate, cb, &result_data);
+ if (cb == report_resume) {
+ pci_cleanup_aer_uncorrect_error_status(dev);
+ dev->error_state = pci_channel_io_normal;
+ }
+ } else {
+ /*
+ * If the error is reported by an end point, we think this
+ * error is related to the upstream link of the end point.
+ */
+ if (state == pci_channel_io_normal)
+ /*
+ * the error is non fatal so the bus is ok, just invoke
+ * the callback for the function that logged the error.
+ */
+ cb(dev, &result_data);
+ else
+ pci_walk_bus(dev->bus, cb, &result_data);
+ }
+
+ return result_data.result;
+}
+
+/**
+ * pcie_do_fatal_recovery - handle fatal error recovery process
+ * @dev: pointer to a pci_dev data structure of agent detecting an error
+ *
+ * Invoked when an error is fatal. Once being invoked, removes the devices
+ * beneath this AER agent, followed by reset link e.g. secondary bus reset
+ * followed by re-enumeration of devices.
+ */
+void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
+{
+ struct pci_dev *udev;
+ struct pci_bus *parent;
+ struct pci_dev *pdev, *temp;
+ pci_ers_result_t result;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ udev = dev;
+ else
+ udev = dev->bus->self;
+
+ parent = udev->subordinate;
+ pci_lock_rescan_remove();
+ list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
+ bus_list) {
+ pci_dev_get(pdev);
+ pci_dev_set_disconnected(pdev, NULL);
+ if (pci_has_subordinate(pdev))
+ pci_walk_bus(pdev->subordinate,
+ pci_dev_set_disconnected, NULL);
+ pci_stop_and_remove_bus_device(pdev);
+ pci_dev_put(pdev);
+ }
+
+ result = reset_link(udev, service);
+
+ if ((service == PCIE_PORT_SERVICE_AER) &&
+ (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
+ /*
+ * If the error is reported by a bridge, we think this error
+ * is related to the downstream link of the bridge, so we
+ * do error recovery on all subordinates of the bridge instead
+ * of the bridge and clear the error status of the bridge.
+ */
+ pci_cleanup_aer_uncorrect_error_status(dev);
+ }
+
+ if (result == PCI_ERS_RESULT_RECOVERED) {
+ if (pcie_wait_for_link(udev, true))
+ pci_rescan_bus(udev->bus);
+ pci_info(dev, "Device recovery from fatal error successful\n");
+ } else {
+ pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+ pci_info(dev, "Device recovery from fatal error failed\n");
+ }
+
+ pci_unlock_rescan_remove();
+}
+
+/**
+ * pcie_do_nonfatal_recovery - handle nonfatal error recovery process
+ * @dev: pointer to a pci_dev data structure of agent detecting an error
+ *
+ * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
+ * error detected message to all downstream drivers within a hierarchy in
+ * question and return the returned code.
+ */
+void pcie_do_nonfatal_recovery(struct pci_dev *dev)
+{
+ pci_ers_result_t status;
+ enum pci_channel_state state;
+
+ state = pci_channel_io_normal;
+
+ status = broadcast_error_message(dev,
+ state,
+ "error_detected",
+ report_error_detected);
+
+ if (status == PCI_ERS_RESULT_CAN_RECOVER)
+ status = broadcast_error_message(dev,
+ state,
+ "mmio_enabled",
+ report_mmio_enabled);
+
+ if (status == PCI_ERS_RESULT_NEED_RESET) {
+ /*
+ * TODO: Should call platform-specific
+ * functions to reset slot before calling
+ * drivers' slot_reset callbacks?
+ */
+ status = broadcast_error_message(dev,
+ state,
+ "slot_reset",
+ report_slot_reset);
+ }
+
+ if (status != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
+
+ broadcast_error_message(dev,
+ state,
+ "resume",
+ report_resume);
+
+ pci_info(dev, "AER: Device recovery successful\n");
+ return;
+
+failed:
+ pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+
+ /* TODO: Should kernel panic here? */
+ pci_info(dev, "AER: Device recovery failed\n");
+}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d0c6783dbfe3..2bb5db7b53e6 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -11,8 +11,6 @@
#include <linux/compiler.h>
-extern bool pcie_ports_native;
-
/* Service Type */
#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
@@ -112,4 +110,7 @@ static inline bool pcie_pme_no_msi(void) { return false; }
static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
+struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
+ u32 service);
+struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
deleted file mode 100644
index 8ab5d434b9c6..000000000000
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PCIe Port Native Services Support, ACPI-Related Part
- *
- * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- */
-
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
-
-#include "aer/aerdrv.h"
-#include "../pci.h"
-#include "portdrv.h"
-
-/**
- * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
- * @port: PCIe Port service for a root port or event collector.
- * @srv_mask: Bit mask of services that can be enabled for @port.
- *
- * Invoked when @port is identified as a PCIe port device. To avoid conflicts
- * with the BIOS PCIe port native services support requires the BIOS to yield
- * control of these services to the kernel. The mask of services that the BIOS
- * allows to be enabled for @port is written to @srv_mask.
- *
- * NOTE: It turns out that we cannot do that for individual port services
- * separately, because that would make some systems work incorrectly.
- */
-void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
-{
- struct acpi_pci_root *root;
- acpi_handle handle;
- u32 flags;
-
- if (acpi_pci_disabled)
- return;
-
- handle = acpi_find_root_bridge_handle(port);
- if (!handle)
- return;
-
- root = acpi_pci_find_root(handle);
- if (!root)
- return;
-
- flags = root->osc_control_set;
-
- *srv_mask = 0;
- if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
- *srv_mask |= PCIE_PORT_SERVICE_HP;
- if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
- *srv_mask |= PCIE_PORT_SERVICE_PME;
- if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
- *srv_mask |= PCIE_PORT_SERVICE_AER | PCIE_PORT_SERVICE_DPC;
-}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index c9c0663db282..e0261ad4bcdd 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -19,6 +19,12 @@
#include "../pci.h"
#include "portdrv.h"
+struct portdrv_service_data {
+ struct pcie_port_service_driver *drv;
+ struct device *dev;
+ u32 service;
+};
+
/**
* release_pcie_device - free PCI Express port service device structure
* @dev: Port service device to release
@@ -199,7 +205,7 @@ static int get_port_device_capability(struct pci_dev *dev)
int services = 0;
if (dev->is_hotplug_bridge &&
- (pcie_ports_native || host->native_hotplug)) {
+ (pcie_ports_native || host->native_pcie_hotplug)) {
services |= PCIE_PORT_SERVICE_HP;
/*
@@ -398,6 +404,69 @@ static int remove_iter(struct device *dev, void *data)
return 0;
}
+static int find_service_iter(struct device *device, void *data)
+{
+ struct pcie_port_service_driver *service_driver;
+ struct portdrv_service_data *pdrvs;
+ u32 service;
+
+ pdrvs = (struct portdrv_service_data *) data;
+ service = pdrvs->service;
+
+ if (device->bus == &pcie_port_bus_type && device->driver) {
+ service_driver = to_service_driver(device->driver);
+ if (service_driver->service == service) {
+ pdrvs->drv = service_driver;
+ pdrvs->dev = device;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pcie_port_find_service - find the service driver
+ * @dev: PCI Express port the service is associated with
+ * @service: Service to find
+ *
+ * Find PCI Express port service driver associated with given service
+ */
+struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
+ u32 service)
+{
+ struct pcie_port_service_driver *drv;
+ struct portdrv_service_data pdrvs;
+
+ pdrvs.drv = NULL;
+ pdrvs.service = service;
+ device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
+
+ drv = pdrvs.drv;
+ return drv;
+}
+
+/**
+ * pcie_port_find_device - find the struct device
+ * @dev: PCI Express port the service is associated with
+ * @service: For the service to find
+ *
+ * Find the struct device associated with given service on a pci_dev
+ */
+struct device *pcie_port_find_device(struct pci_dev *dev,
+ u32 service)
+{
+ struct device *device;
+ struct portdrv_service_data pdrvs;
+
+ pdrvs.dev = NULL;
+ pdrvs.service = service;
+ device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
+
+ device = pdrvs.dev;
+ return device;
+}
+
/**
* pcie_port_device_remove - unregister PCI Express port service devices
* @dev: PCI Express port the service devices to unregister are associated with
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ac91b6fd0bcd..ac876e32de4b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -526,12 +526,14 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
if (bridge->release_fn)
bridge->release_fn(bridge);
+
+ pci_free_resource_list(&bridge->windows);
}
static void pci_release_host_bridge_dev(struct device *dev)
{
devm_pci_release_host_bridge_dev(dev);
- pci_free_host_bridge(to_pci_host_bridge(dev));
+ kfree(to_pci_host_bridge(dev));
}
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
@@ -552,8 +554,10 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
* OS from interfering.
*/
bridge->native_aer = 1;
- bridge->native_hotplug = 1;
+ bridge->native_pcie_hotplug = 1;
+ bridge->native_shpc_hotplug = 1;
bridge->native_pme = 1;
+ bridge->native_ltr = 1;
return bridge;
}
@@ -882,6 +886,45 @@ free:
return err;
}
+static bool pci_bridge_child_ext_cfg_accessible(struct pci_dev *bridge)
+{
+ int pos;
+ u32 status;
+
+ /*
+ * If extended config space isn't accessible on a bridge's primary
+ * bus, we certainly can't access it on the secondary bus.
+ */
+ if (bridge->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
+ return false;
+
+ /*
+ * PCIe Root Ports and switch ports are PCIe on both sides, so if
+ * extended config space is accessible on the primary, it's also
+ * accessible on the secondary.
+ */
+ if (pci_is_pcie(bridge) &&
+ (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM ||
+ pci_pcie_type(bridge) == PCI_EXP_TYPE_DOWNSTREAM))
+ return true;
+
+ /*
+ * For the other bridge types:
+ * - PCI-to-PCI bridges
+ * - PCIe-to-PCI/PCI-X forward bridges
+ * - PCI/PCI-X-to-PCIe reverse bridges
+ * extended config space on the secondary side is only accessible
+ * if the bridge supports PCI-X Mode 2.
+ */
+ pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
+ if (!pos)
+ return false;
+
+ pci_read_config_dword(bridge, pos + PCI_X_STATUS, &status);
+ return status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ);
+}
+
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@@ -923,6 +966,16 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
pci_set_bus_of_node(child);
pci_set_bus_speed(child);
+ /*
+ * Check whether extended config space is accessible on the child
+ * bus. Note that we currently assume it is always accessible on
+ * the root bus.
+ */
+ if (!pci_bridge_child_ext_cfg_accessible(bridge)) {
+ child->bus_flags |= PCI_BUS_FLAGS_NO_EXTCFG;
+ pci_info(child, "extended config space not accessible\n");
+ }
+
/* Set up default resource pointers and names */
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
@@ -998,6 +1051,8 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* already configured by the BIOS and after we are done with all of
* them, we proceed to assigning numbers to the remaining buses in
* order to avoid overlaps between old and new bus numbers.
+ *
+ * Return: New subordinate number covering all buses behind this bridge.
*/
static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
int max, unsigned int available_buses,
@@ -1188,20 +1243,15 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
pci_domain_nr(bus), child->number);
- /* Has only triggered on CardBus, fixup is in yenta_socket */
+ /* Check that all devices are accessible */
while (bus->parent) {
if ((child->busn_res.end > bus->busn_res.end) ||
(child->number > bus->busn_res.end) ||
(child->number < bus->number) ||
(child->busn_res.end < bus->number)) {
- dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
- &child->busn_res,
- (bus->number > child->busn_res.end &&
- bus->busn_res.end < child->number) ?
- "wholly" : "partially",
- bus->self->transparent ? " transparent" : "",
- dev_name(&bus->dev),
- &bus->busn_res);
+ dev_info(&dev->dev, "devices behind bridge are unusable because %pR cannot be assigned for them\n",
+ &child->busn_res);
+ break;
}
bus = bus->parent;
}
@@ -1230,6 +1280,8 @@ out:
* already configured by the BIOS and after we are done with all of
* them, we proceed to assigning numbers to the remaining buses in
* order to avoid overlaps between old and new bus numbers.
+ *
+ * Return: New subordinate number covering all buses behind this bridge.
*/
int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
{
@@ -1393,6 +1445,9 @@ int pci_cfg_space_size(struct pci_dev *dev)
u32 status;
u16 class;
+ if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
+ return PCI_CFG_SPACE_SIZE;
+
class = dev->class >> 8;
if (class == PCI_CLASS_BRIDGE_HOST)
return pci_cfg_space_size_ext(dev);
@@ -1954,9 +2009,13 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
static void pci_configure_ltr(struct pci_dev *dev)
{
#ifdef CONFIG_PCIEASPM
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
u32 cap;
struct pci_dev *bridge;
+ if (!host->native_ltr)
+ return;
+
if (!pci_is_pcie(dev))
return;
@@ -2638,7 +2697,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
for_each_pci_bridge(dev, bus) {
cmax = max;
max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
- used_buses += cmax - max;
+
+ /*
+ * Reserve one bus for each bridge now to avoid extending
+ * hotplug bridges too much during the second scan below.
+ */
+ used_buses++;
+ if (cmax - max > 1)
+ used_buses += cmax - max - 1;
}
/* Scan bridges that need to be reconfigured */
@@ -2661,12 +2727,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* bridges if any.
*/
buses = available_buses / hotplug_bridges;
- buses = min(buses, available_buses - used_buses);
+ buses = min(buses, available_buses - used_buses + 1);
}
cmax = max;
max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
- used_buses += max - cmax;
+ /* One bus is already accounted so don't add it again */
+ if (max - cmax > 1)
+ used_buses += max - cmax - 1;
}
/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2990ad1e7c99..f439de848658 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -30,6 +30,162 @@
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
+static ktime_t fixup_debug_start(struct pci_dev *dev,
+ void (*fn)(struct pci_dev *dev))
+{
+ if (initcall_debug)
+ pci_info(dev, "calling %pF @ %i\n", fn, task_pid_nr(current));
+
+ return ktime_get();
+}
+
+static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
+ void (*fn)(struct pci_dev *dev))
+{
+ ktime_t delta, rettime;
+ unsigned long long duration;
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ if (initcall_debug || duration > 10000)
+ pci_info(dev, "%pF took %lld usecs\n", fn, duration);
+}
+
+static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+ struct pci_fixup *end)
+{
+ ktime_t calltime;
+
+ for (; f < end; f++)
+ if ((f->class == (u32) (dev->class >> f->class_shift) ||
+ f->class == (u32) PCI_ANY_ID) &&
+ (f->vendor == dev->vendor ||
+ f->vendor == (u16) PCI_ANY_ID) &&
+ (f->device == dev->device ||
+ f->device == (u16) PCI_ANY_ID)) {
+ calltime = fixup_debug_start(dev, f->hook);
+ f->hook(dev);
+ fixup_debug_report(dev, calltime, f->hook);
+ }
+}
+
+extern struct pci_fixup __start_pci_fixups_early[];
+extern struct pci_fixup __end_pci_fixups_early[];
+extern struct pci_fixup __start_pci_fixups_header[];
+extern struct pci_fixup __end_pci_fixups_header[];
+extern struct pci_fixup __start_pci_fixups_final[];
+extern struct pci_fixup __end_pci_fixups_final[];
+extern struct pci_fixup __start_pci_fixups_enable[];
+extern struct pci_fixup __end_pci_fixups_enable[];
+extern struct pci_fixup __start_pci_fixups_resume[];
+extern struct pci_fixup __end_pci_fixups_resume[];
+extern struct pci_fixup __start_pci_fixups_resume_early[];
+extern struct pci_fixup __end_pci_fixups_resume_early[];
+extern struct pci_fixup __start_pci_fixups_suspend[];
+extern struct pci_fixup __end_pci_fixups_suspend[];
+extern struct pci_fixup __start_pci_fixups_suspend_late[];
+extern struct pci_fixup __end_pci_fixups_suspend_late[];
+
+static bool pci_apply_fixup_final_quirks;
+
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
+{
+ struct pci_fixup *start, *end;
+
+ switch (pass) {
+ case pci_fixup_early:
+ start = __start_pci_fixups_early;
+ end = __end_pci_fixups_early;
+ break;
+
+ case pci_fixup_header:
+ start = __start_pci_fixups_header;
+ end = __end_pci_fixups_header;
+ break;
+
+ case pci_fixup_final:
+ if (!pci_apply_fixup_final_quirks)
+ return;
+ start = __start_pci_fixups_final;
+ end = __end_pci_fixups_final;
+ break;
+
+ case pci_fixup_enable:
+ start = __start_pci_fixups_enable;
+ end = __end_pci_fixups_enable;
+ break;
+
+ case pci_fixup_resume:
+ start = __start_pci_fixups_resume;
+ end = __end_pci_fixups_resume;
+ break;
+
+ case pci_fixup_resume_early:
+ start = __start_pci_fixups_resume_early;
+ end = __end_pci_fixups_resume_early;
+ break;
+
+ case pci_fixup_suspend:
+ start = __start_pci_fixups_suspend;
+ end = __end_pci_fixups_suspend;
+ break;
+
+ case pci_fixup_suspend_late:
+ start = __start_pci_fixups_suspend_late;
+ end = __end_pci_fixups_suspend_late;
+ break;
+
+ default:
+ /* stupid compiler warning, you would think with an enum... */
+ return;
+ }
+ pci_do_fixups(dev, start, end);
+}
+EXPORT_SYMBOL(pci_fixup_device);
+
+static int __init pci_apply_final_quirks(void)
+{
+ struct pci_dev *dev = NULL;
+ u8 cls = 0;
+ u8 tmp;
+
+ if (pci_cache_line_size)
+ printk(KERN_DEBUG "PCI: CLS %u bytes\n",
+ pci_cache_line_size << 2);
+
+ pci_apply_fixup_final_quirks = true;
+ for_each_pci_dev(dev) {
+ pci_fixup_device(pci_fixup_final, dev);
+ /*
+ * If arch hasn't set it explicitly yet, use the CLS
+ * value shared by all PCI devices. If there's a
+ * mismatch, fall back to the default value.
+ */
+ if (!pci_cache_line_size) {
+ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
+ if (!cls)
+ cls = tmp;
+ if (!tmp || cls == tmp)
+ continue;
+
+ printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
+ cls << 2, tmp << 2,
+ pci_dfl_cache_line_size << 2);
+ pci_cache_line_size = pci_dfl_cache_line_size;
+ }
+ }
+
+ if (!pci_cache_line_size) {
+ printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
+ cls << 2, pci_dfl_cache_line_size << 2);
+ pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
+ }
+
+ return 0;
+}
+fs_initcall_sync(pci_apply_final_quirks);
+
/*
* Decoding should be disabled for a PCI device during BAR sizing to avoid
* conflict. But doing so may cause problems on host bridge and perhaps other
@@ -43,9 +199,10 @@ static void quirk_mmio_always_on(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
-/* The Mellanox Tavor device gives false positive parity errors
- * Mark this device with a broken_parity_status, to allow
- * PCI scanning code to "skip" this now blacklisted device.
+/*
+ * The Mellanox Tavor device gives false positive parity errors. Mark this
+ * device with a broken_parity_status to allow PCI scanning code to "skip"
+ * this now blacklisted device.
*/
static void quirk_mellanox_tavor(struct pci_dev *dev)
{
@@ -54,15 +211,19 @@ static void quirk_mellanox_tavor(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
-/* Deal with broken BIOSes that neglect to enable passive release,
- which can cause problems in combination with the 82441FX/PPro MTRRs */
+/*
+ * Deal with broken BIOSes that neglect to enable passive release,
+ * which can cause problems in combination with the 82441FX/PPro MTRRs
+ */
static void quirk_passive_release(struct pci_dev *dev)
{
struct pci_dev *d = NULL;
unsigned char dlc;
- /* We have to make sure a particular bit is set in the PIIX3
- ISA bridge, so we have to go out and find it. */
+ /*
+ * We have to make sure a particular bit is set in the PIIX3
+ * ISA bridge, so we have to go out and find it.
+ */
while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
pci_read_config_byte(d, 0x82, &dlc);
if (!(dlc & 1<<1)) {
@@ -75,13 +236,14 @@ static void quirk_passive_release(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
-/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
- but VIA don't answer queries. If you happen to have good contacts at VIA
- ask them for me please -- Alan
-
- This appears to be BIOS not version dependent. So presumably there is a
- chipset level fix */
-
+/*
+ * The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a
+ * workaround but VIA don't answer queries. If you happen to have good
+ * contacts at VIA ask them for me please -- Alan
+ *
+ * This appears to be BIOS not version dependent. So presumably there is a
+ * chipset level fix.
+ */
static void quirk_isa_dma_hangs(struct pci_dev *dev)
{
if (!isa_dma_bridge_buggy) {
@@ -89,10 +251,10 @@ static void quirk_isa_dma_hangs(struct pci_dev *dev)
pci_info(dev, "Activating ISA DMA hang workarounds\n");
}
}
- /*
- * Its not totally clear which chipsets are the problematic ones
- * We know 82C586 and 82C596 variants are affected.
- */
+/*
+ * It's not totally clear which chipsets are the problematic ones. We know
+ * 82C586 and 82C596 variants are affected.
+ */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
@@ -121,9 +283,7 @@ static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
-/*
- * Chipsets where PCI->PCI transfers vanish or hang
- */
+/* Chipsets where PCI->PCI transfers vanish or hang */
static void quirk_nopcipci(struct pci_dev *dev)
{
if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
@@ -146,9 +306,7 @@ static void quirk_nopciamd(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
-/*
- * Triton requires workarounds to be used by the drivers
- */
+/* Triton requires workarounds to be used by the drivers */
static void quirk_triton(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
@@ -162,53 +320,62 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_tr
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
/*
- * VIA Apollo KT133 needs PCI latency patch
- * Made according to a windows driver based patch by George E. Breese
- * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
- * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
- * the info on which Mr Breese based his work.
+ * VIA Apollo KT133 needs PCI latency patch
+ * Made according to a Windows driver-based patch by George E. Breese;
+ * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
+ * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for the info on
+ * which Mr Breese based his work.
*
- * Updated based on further information from the site and also on
- * information provided by VIA
+ * Updated based on further information from the site and also on
+ * information provided by VIA
*/
static void quirk_vialatency(struct pci_dev *dev)
{
struct pci_dev *p;
u8 busarb;
- /* Ok we have a potential problem chipset here. Now see if we have
- a buggy southbridge */
+ /*
+ * Ok, we have a potential problem chipset here. Now see if we have
+ * a buggy southbridge.
+ */
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
if (p != NULL) {
- /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
- /* Check for buggy part revisions */
+
+ /*
+ * 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A;
+ * thanks Dan Hollis.
+ * Check for buggy part revisions
+ */
if (p->revision < 0x40 || p->revision > 0x42)
goto exit;
} else {
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
if (p == NULL) /* No problem parts */
goto exit;
+
/* Check for buggy part revisions */
if (p->revision < 0x10 || p->revision > 0x12)
goto exit;
}
/*
- * Ok we have the problem. Now set the PCI master grant to
- * occur every master grant. The apparent bug is that under high
- * PCI load (quite common in Linux of course) you can get data
- * loss when the CPU is held off the bus for 3 bus master requests
- * This happens to include the IDE controllers....
+ * Ok we have the problem. Now set the PCI master grant to occur
+ * every master grant. The apparent bug is that under high PCI load
+ * (quite common in Linux of course) you can get data loss when the
+ * CPU is held off the bus for 3 bus master requests. This happens
+ * to include the IDE controllers....
*
- * VIA only apply this fix when an SB Live! is present but under
- * both Linux and Windows this isn't enough, and we have seen
- * corruption without SB Live! but with things like 3 UDMA IDE
- * controllers. So we ignore that bit of the VIA recommendation..
+ * VIA only apply this fix when an SB Live! is present but under
+ * both Linux and Windows this isn't enough, and we have seen
+ * corruption without SB Live! but with things like 3 UDMA IDE
+ * controllers. So we ignore that bit of the VIA recommendation..
*/
-
pci_read_config_byte(dev, 0x76, &busarb);
- /* Set bit 4 and bi 5 of byte 76 to 0x01
- "Master priority rotation on every PCI master grant */
+
+ /*
+ * Set bit 4 and bit 5 of byte 76 to 0x01
+ * "Master priority rotation on every PCI master grant"
+ */
busarb &= ~(1<<5);
busarb |= (1<<4);
pci_write_config_byte(dev, 0x76, busarb);
@@ -224,9 +391,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vial
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
-/*
- * VIA Apollo VP3 needs ETBF on BT848/878
- */
+/* VIA Apollo VP3 needs ETBF on BT848/878 */
static void quirk_viaetbf(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
@@ -246,10 +411,9 @@ static void quirk_vsfx(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
/*
- * Ali Magik requires workarounds to be used by the drivers
- * that DMA to AGP space. Latency must be set to 0xA and triton
- * workaround applied too
- * [Info kindly provided by ALi]
+ * ALi Magik requires workarounds to be used by the drivers that DMA to AGP
+ * space. Latency must be set to 0xA and Triton workaround applied too.
+ * [Info kindly provided by ALi]
*/
static void quirk_alimagik(struct pci_dev *dev)
{
@@ -261,10 +425,7 @@ static void quirk_alimagik(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
-/*
- * Natoma has some interesting boundary conditions with Zoran stuff
- * at least
- */
+/* Natoma has some interesting boundary conditions with Zoran stuff at least */
static void quirk_natoma(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
@@ -280,8 +441,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quir
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
/*
- * This chip can cause PCI parity errors if config register 0xA0 is read
- * while DMAs are occurring.
+ * This chip can cause PCI parity errors if config register 0xA0 is read
+ * while DMAs are occurring.
*/
static void quirk_citrine(struct pci_dev *dev)
{
@@ -321,8 +482,8 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
/*
- * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
- * If it's needed, re-allocate the region.
+ * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
+ * If it's needed, re-allocate the region.
*/
static void quirk_s3_64M(struct pci_dev *dev)
{
@@ -413,8 +574,8 @@ static void quirk_io_region(struct pci_dev *dev, int port,
}
/*
- * ATI Northbridge setups MCE the processor if you even
- * read somewhere between 0x3b0->0x3bb or read 0x3d3
+ * ATI Northbridge setups MCE the processor if you even read somewhere
+ * between 0x3b0->0x3bb or read 0x3d3
*/
static void quirk_ati_exploding_mce(struct pci_dev *dev)
{
@@ -429,6 +590,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
* In the AMD NL platform, this device ([1022:7912]) has a class code of
* PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
* claim it.
+ *
* But the dwc3 driver is a more specific driver for this device, and we'd
* prefer to use it instead of xhci. To prevent xhci from claiming the
* device, change the class code to 0x0c03fe, which the PCI r3.0 spec
@@ -448,11 +610,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
quirk_amd_nl_class);
/*
- * Let's make the southbridge information explicit instead
- * of having to worry about people probing the ACPI areas,
- * for example.. (Yes, it happens, and if you read the wrong
- * ACPI register it will put the machine to sleep with no
- * way of waking it up again. Bummer).
+ * Let's make the southbridge information explicit instead of having to
+ * worry about people probing the ACPI areas, for example.. (Yes, it
+ * happens, and if you read the wrong ACPI register it will put the machine
+ * to sleep with no way of waking it up again. Bummer).
*
* ALI M7101: Two IO regions pointed to by words at
* 0xE0 (64 bytes of ACPI registers)
@@ -508,6 +669,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
break;
size = bit;
}
+
/*
* For now we only print it out. Eventually we'll want to
* reserve it, but let's get enough confirmation reports first.
@@ -579,8 +741,7 @@ static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
* priority and can't tell whether the legacy device or the one created
* here is really at that address. This happens on boards with broken
* BIOSes.
- */
-
+ */
pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
if (enable & ICH4_ACPI_EN)
quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
@@ -617,7 +778,8 @@ static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
"ICH6 GPIO");
}
-static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
+static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
+ const char *name, int dynsize)
{
u32 val;
u32 size, base;
@@ -641,7 +803,10 @@ static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const cha
}
base &= ~(size-1);
- /* Just print it out for now. We should reserve it after more debugging */
+ /*
+ * Just print it out for now. We should reserve it after more
+ * debugging.
+ */
pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
}
@@ -657,7 +822,8 @@ static void quirk_ich6_lpc(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
-static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
+static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
+ const char *name)
{
u32 val;
u32 mask, base;
@@ -668,15 +834,15 @@ static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const cha
if (!(val & 1))
return;
- /*
- * IO base in bits 15:2, mask in bits 23:18, both
- * are dword-based
- */
+ /* IO base in bits 15:2, mask in bits 23:18, both are dword-based */
base = val & 0xfffc;
mask = (val >> 16) & 0xfc;
mask |= 3;
- /* Just print it out for now. We should reserve it after more debugging */
+ /*
+ * Just print it out for now. We should reserve it after more
+ * debugging.
+ */
pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
}
@@ -748,8 +914,8 @@ static void quirk_vt8235_acpi(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
/*
- * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
- * Disable fast back-to-back on the secondary bus segment
+ * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast
+ * back-to-back: Disable fast back-to-back on the secondary bus segment
*/
static void quirk_xio2000a(struct pci_dev *dev)
{
@@ -774,8 +940,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
* VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
* devices to the external APIC.
*
- * TODO: When we have device-specific interrupt routers,
- * this code will go away from quirks.
+ * TODO: When we have device-specific interrupt routers, this code will go
+ * away from quirks.
*/
static void quirk_via_ioapic(struct pci_dev *dev)
{
@@ -816,13 +982,13 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
/*
- * The AMD io apic can hang the box when an apic irq is masked.
+ * The AMD IO-APIC can hang the box when an APIC IRQ is masked.
* We check all revs >= B0 (yet not in the pre production!) as the bug
* is currently marked NoFix
*
* We have multiple reports of hangs with this chipset that went away with
* noapic specified. For the moment we assume it's the erratum. We may be wrong
- * of course. However the advice is demonstrably good even if so..
+ * of course. However the advice is demonstrably good even if so.
*/
static void quirk_amd_ioapic(struct pci_dev *dev)
{
@@ -838,7 +1004,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_a
static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
{
- /* Fix for improper SRIOV configuration on Cavium cn88xx RNM device */
+ /* Fix for improper SR-IOV configuration on Cavium cn88xx RNM device */
if (dev->subsystem_device == 0xa118)
dev->sriov->link = dev->devfn;
}
@@ -860,19 +1026,17 @@ static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
/*
- * FIXME: it is questionable that quirk_via_acpi
- * is needed. It shows up as an ISA bridge, and does not
- * support the PCI_INTERRUPT_LINE register at all. Therefore
- * it seems like setting the pci_dev's 'irq' to the
- * value of the ACPI SCI interrupt is only done for convenience.
+ * FIXME: it is questionable that quirk_via_acpi() is needed. It shows up
+ * as an ISA bridge, and does not support the PCI_INTERRUPT_LINE register
+ * at all. Therefore it seems like setting the pci_dev's IRQ to the value
+ * of the ACPI SCI interrupt is only done for convenience.
* -jgarzik
*/
static void quirk_via_acpi(struct pci_dev *d)
{
- /*
- * VIA ACPI device: SCI IRQ line in PCI config byte 0x42
- */
u8 irq;
+
+ /* VIA ACPI device: SCI IRQ line in PCI config byte 0x42 */
pci_read_config_byte(d, 0x42, &irq);
irq &= 0xf;
if (irq && (irq != 2))
@@ -881,11 +1045,7 @@ static void quirk_via_acpi(struct pci_dev *d)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
-
-/*
- * VIA bridges which have VLink
- */
-
+/* VIA bridges which have VLink */
static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
static void quirk_via_bridge(struct pci_dev *dev)
@@ -893,9 +1053,11 @@ static void quirk_via_bridge(struct pci_dev *dev)
/* See what bridge we have and find the device ranges */
switch (dev->device) {
case PCI_DEVICE_ID_VIA_82C686:
- /* The VT82C686 is special, it attaches to PCI and can have
- any device number. All its subdevices are functions of
- that single device. */
+ /*
+ * The VT82C686 is special; it attaches to PCI and can have
+ * any device number. All its subdevices are functions of
+ * that single device.
+ */
via_vlink_dev_lo = PCI_SLOT(dev->devfn);
via_vlink_dev_hi = PCI_SLOT(dev->devfn);
break;
@@ -923,19 +1085,17 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_b
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
-/**
- * quirk_via_vlink - VIA VLink IRQ number update
- * @dev: PCI device
+/*
+ * quirk_via_vlink - VIA VLink IRQ number update
+ * @dev: PCI device
*
- * If the device we are dealing with is on a PIC IRQ we need to
- * ensure that the IRQ line register which usually is not relevant
- * for PCI cards, is actually written so that interrupts get sent
- * to the right place.
- * We only do this on systems where a VIA south bridge was detected,
- * and only for VIA devices on the motherboard (see quirk_via_bridge
- * above).
+ * If the device we are dealing with is on a PIC IRQ we need to ensure that
+ * the IRQ line register which usually is not relevant for PCI cards, is
+ * actually written so that interrupts get sent to the right place.
+ *
+ * We only do this on systems where a VIA south bridge was detected, and
+ * only for VIA devices on the motherboard (see quirk_via_bridge above).
*/
-
static void quirk_via_vlink(struct pci_dev *dev)
{
u8 irq, new_irq;
@@ -955,9 +1115,10 @@ static void quirk_via_vlink(struct pci_dev *dev)
PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
return;
- /* This is an internal VLink device on a PIC interrupt. The BIOS
- ought to have set this but may not have, so we redo it */
-
+ /*
+ * This is an internal VLink device on a PIC interrupt. The BIOS
+ * ought to have set this but may not have, so we redo it.
+ */
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
if (new_irq != irq) {
pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
@@ -969,10 +1130,9 @@ static void quirk_via_vlink(struct pci_dev *dev)
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
/*
- * VIA VT82C598 has its device ID settable and many BIOSes
- * set it to the ID of VT82C597 for backward compatibility.
- * We need to switch it off to be able to recognize the real
- * type of the chip.
+ * VIA VT82C598 has its device ID settable and many BIOSes set it to the ID
+ * of VT82C597 for backward compatibility. We need to switch it off to be
+ * able to recognize the real type of the chip.
*/
static void quirk_vt82c598_id(struct pci_dev *dev)
{
@@ -982,10 +1142,10 @@ static void quirk_vt82c598_id(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
/*
- * CardBus controllers have a legacy base address that enables them
- * to respond as i82365 pcmcia controllers. We don't want them to
- * do this even if the Linux CardBus driver is not loaded, because
- * the Linux i82365 driver does not (and should not) handle CardBus.
+ * CardBus controllers have a legacy base address that enables them to
+ * respond as i82365 pcmcia controllers. We don't want them to do this
+ * even if the Linux CardBus driver is not loaded, because the Linux i82365
+ * driver does not (and should not) handle CardBus.
*/
static void quirk_cardbus_legacy(struct pci_dev *dev)
{
@@ -997,11 +1157,11 @@ DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
/*
- * Following the PCI ordering rules is optional on the AMD762. I'm not
- * sure what the designers were smoking but let's not inhale...
+ * Following the PCI ordering rules is optional on the AMD762. I'm not sure
+ * what the designers were smoking but let's not inhale...
*
- * To be fair to AMD, it follows the spec by default, its BIOS people
- * who turn it off!
+ * To be fair to AMD, it follows the spec by default, it's BIOS people who
+ * turn it off!
*/
static void quirk_amd_ordering(struct pci_dev *dev)
{
@@ -1020,11 +1180,11 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
/*
- * DreamWorks provided workaround for Dunord I-3000 problem
+ * DreamWorks-provided workaround for Dunord I-3000 problem
*
- * This card decodes and responds to addresses not apparently
- * assigned to it. We force a larger allocation to ensure that
- * nothing gets put too close to it.
+ * This card decodes and responds to addresses not apparently assigned to
+ * it. We force a larger allocation to ensure that nothing gets put too
+ * close to it.
*/
static void quirk_dunord(struct pci_dev *dev)
{
@@ -1037,10 +1197,9 @@ static void quirk_dunord(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
/*
- * i82380FB mobile docking controller: its PCI-to-PCI bridge
- * is subtractive decoding (transparent), and does indicate this
- * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
- * instead of 0x01.
+ * i82380FB mobile docking controller: its PCI-to-PCI bridge is subtractive
+ * decoding (transparent), and does indicate this in the ProgIf.
+ * Unfortunately, the ProgIf value is wrong - 0x80 instead of 0x01.
*/
static void quirk_transparent_bridge(struct pci_dev *dev)
{
@@ -1050,10 +1209,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
/*
- * Common misconfiguration of the MediaGX/Geode PCI master that will
- * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1
- * datasheets found at http://www.national.com/analog for info on what
- * these bits do. <christer@weinigel.se>
+ * Common misconfiguration of the MediaGX/Geode PCI master that will reduce
+ * PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 datasheets
+ * found at http://www.national.com/analog for info on what these bits do.
+ * <christer@weinigel.se>
*/
static void quirk_mediagx_master(struct pci_dev *dev)
{
@@ -1071,9 +1230,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, qui
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
/*
- * Ensure C0 rev restreaming is off. This is normally done by
- * the BIOS but in the odd case it is not the results are corruption
- * hence the presence of a Linux check
+ * Ensure C0 rev restreaming is off. This is normally done by the BIOS but
+ * in the odd case it is not the results are corruption hence the presence
+ * of a Linux check.
*/
static void quirk_disable_pxb(struct pci_dev *pdev)
{
@@ -1117,9 +1276,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
-/*
- * Serverworks CSB5 IDE does not fully support native mode
- */
+/* Serverworks CSB5 IDE does not fully support native mode */
static void quirk_svwks_csb5ide(struct pci_dev *pdev)
{
u8 prog;
@@ -1133,9 +1290,7 @@ static void quirk_svwks_csb5ide(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
-/*
- * Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
- */
+/* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same */
static void quirk_ide_samemode(struct pci_dev *pdev)
{
u8 prog;
@@ -1151,10 +1306,7 @@ static void quirk_ide_samemode(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
-/*
- * Some ATA devices break if put into D3
- */
-
+/* Some ATA devices break if put into D3 */
static void quirk_no_ata_d3(struct pci_dev *pdev)
{
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
@@ -1172,7 +1324,8 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
-/* This was originally an Alpha specific thing, but it really fits here.
+/*
+ * This was originally an Alpha-specific thing, but it really fits here.
* The i82375 PCI/EISA bridge appears as non-classified. Fix that.
*/
static void quirk_eisa_bridge(struct pci_dev *dev)
@@ -1181,7 +1334,6 @@ static void quirk_eisa_bridge(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
-
/*
* On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
* is not activated. The myth is that Asus said that they do not want the
@@ -1398,15 +1550,19 @@ static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
+
/* read the Function Disable register, dword mode only */
val = readl(asus_rcba_base + 0x3418);
- writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */
+
+ /* enable the SMBus device */
+ writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
}
static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
{
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
+
iounmap(asus_rcba_base);
asus_rcba_base = NULL;
pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
@@ -1423,9 +1579,7 @@ DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
-/*
- * SiS 96x south bridge: BIOS typically hides SMBus device...
- */
+/* SiS 96x south bridge: BIOS typically hides SMBus device... */
static void quirk_sis_96x_smbus(struct pci_dev *dev)
{
u8 val = 0;
@@ -1448,7 +1602,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_si
* ... This is further complicated by the fact that some SiS96x south
* bridges pretend to be 85C503/5513 instead. In that case see if we
* spotted a compatible north bridge to make sure.
- * (pci_find_device doesn't work yet)
+ * (pci_find_device() doesn't work yet)
*
* We can also enable the sis96x bit in the discovery register..
*/
@@ -1468,9 +1622,9 @@ static void quirk_sis_503(struct pci_dev *dev)
}
/*
- * Ok, it now shows up as a 96x.. run the 96x quirk by
- * hand in case it has already been processed.
- * (depends on link order, which is apparently not guaranteed)
+ * Ok, it now shows up as a 96x. Run the 96x quirk by hand in case
+ * it has already been processed. (Depends on link order, which is
+ * apparently not guaranteed)
*/
dev->device = devid;
quirk_sis_96x_smbus(dev);
@@ -1478,7 +1632,6 @@ static void quirk_sis_503(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
-
/*
* On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
* and MC97 modem controller are disabled when a second PCI soundcard is
@@ -1515,9 +1668,8 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_h
#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
/*
- * If we are using libata we can drive this chip properly but must
- * do this early on to make the additional device appear during
- * the PCI scanning.
+ * If we are using libata we can drive this chip properly but must do this
+ * early on to make the additional device appear during the PCI scanning.
*/
static void quirk_jmicron_ata(struct pci_dev *pdev)
{
@@ -1613,14 +1765,18 @@ static void quirk_alder_ioapic(struct pci_dev *pdev)
if ((pdev->class >> 8) != 0xff00)
return;
- /* the first BAR is the location of the IO APIC...we must
+ /*
+ * The first BAR is the location of the IO-APIC... we must
* not touch this (and it's already covered by the fixmap), so
- * forcibly insert it into the resource tree */
+ * forcibly insert it into the resource tree.
+ */
if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
insert_resource(&iomem_resource, &pdev->resource[0]);
- /* The next five BARs all seem to be rubbish, so just clean
- * them out */
+ /*
+ * The next five BARs all seem to be rubbish, so just clean
+ * them out.
+ */
for (i = 1; i < 6; i++)
memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
}
@@ -1638,8 +1794,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
/*
- * It's possible for the MSI to get corrupted if shpc and acpi
- * are used together on certain PXH-based systems.
+ * It's possible for the MSI to get corrupted if SHPC and ACPI are used
+ * together on certain PXH-based systems.
*/
static void quirk_pcie_pxh(struct pci_dev *dev)
{
@@ -1653,15 +1809,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pc
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
/*
- * Some Intel PCI Express chipsets have trouble with downstream
- * device power management.
+ * Some Intel PCI Express chipsets have trouble with downstream device
+ * power management.
*/
static void quirk_intel_pcie_pm(struct pci_dev *dev)
{
pci_pm_d3_delay = 120;
dev->no_d1d2 = 1;
}
-
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
@@ -1723,7 +1878,7 @@ static const struct dmi_system_id boot_interrupt_dmi_table[] = {
/*
* Boot interrupts on some chipsets cannot be turned off. For these chipsets,
- * remap the original interrupt in the linux kernel to the boot interrupt, so
+ * remap the original interrupt in the Linux kernel to the boot interrupt, so
* that a PCI device's interrupt handler is installed on the boot interrupt
* line instead.
*/
@@ -1760,7 +1915,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk
*/
/*
- * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no
+ * IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no
* 300641-004US, section 5.7.3.
*/
#define INTEL_6300_IOAPIC_ABAR 0x40
@@ -1783,9 +1938,7 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
-/*
- * disable boot interrupts on HT-1000
- */
+/* Disable boot interrupts on HT-1000 */
#define BC_HT1000_FEATURE_REG 0x64
#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
#define BC_HT1000_MAP_IDX 0xC00
@@ -1816,9 +1969,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
-/*
- * disable boot interrupts on AMD and ATI chipsets
- */
+/* Disable boot interrupts on AMD and ATI chipsets */
+
/*
* NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
* rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
@@ -1894,7 +2046,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
quirk_tc86c001_ide);
/*
- * PLX PCI 9050 PCI Target bridge controller has an errata that prevents the
+ * PLX PCI 9050 PCI Target bridge controller has an erratum that prevents the
* local configuration registers accessible via BAR0 (memory) or BAR1 (i/o)
* being read correctly if bit 7 of the base address is set.
* The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128).
@@ -2087,15 +2239,17 @@ static void quirk_p64h2_1k_io(struct pci_dev *dev)
dev->io_window_1k = 1;
}
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
-/* Under some circumstances, AER is not linked with extended capabilities.
+/*
+ * Under some circumstances, AER is not linked with extended capabilities.
* Force it to be linked by setting the corresponding control bit in the
* config space.
*/
static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
{
uint8_t b;
+
if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
if (!(b & 0x20)) {
pci_write_config_byte(dev, 0xf41, b | 0x20);
@@ -2125,8 +2279,10 @@ static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
uint8_t b;
- /* p should contain the first (internal) VT6212L -- see if we have
- an external one by searching again */
+ /*
+ * p should contain the first (internal) VT6212L -- see if we have
+ * an external one by searching again.
+ */
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
if (!p)
return;
@@ -2171,7 +2327,6 @@ static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
pcie_set_readrq(dev, 2048);
}
}
-
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5719,
quirk_brcm_5719_limit_mrrs);
@@ -2179,14 +2334,16 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
#ifdef CONFIG_PCIE_IPROC_PLATFORM
static void quirk_paxc_bridge(struct pci_dev *pdev)
{
- /* The PCI config space is shared with the PAXC root port and the first
+ /*
+ * The PCI config space is shared with the PAXC root port and the first
* Ethernet device. So, we need to workaround this by telling the PCI
* code that the bridge is not an Ethernet device.
*/
if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
- /* MPSS is not being set properly (as it is currently 0). This is
+ /*
+ * MPSS is not being set properly (as it is currently 0). This is
* because that area of the PCI config space is hard coded to zero, and
* is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
* so that the MPS can be set to the real max value.
@@ -2197,10 +2354,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
#endif
-/* Originally in EDAC sources for i82875P:
- * Intel tells BIOS developers to hide device 6 which
- * configures the overflow device access containing
- * the DRBs - this is where we expose device 6.
+/*
+ * Originally in EDAC sources for i82875P: Intel tells BIOS developers to
+ * hide device 6 which configures the overflow device access containing the
+ * DRBs - this is where we expose device 6.
* http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
*/
static void quirk_unhide_mch_dev6(struct pci_dev *dev)
@@ -2212,18 +2369,18 @@ static void quirk_unhide_mch_dev6(struct pci_dev *dev)
pci_write_config_byte(dev, 0xF4, reg | 0x02);
}
}
-
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
quirk_unhide_mch_dev6);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
quirk_unhide_mch_dev6);
#ifdef CONFIG_PCI_MSI
-/* Some chipsets do not support MSI. We cannot easily rely on setting
- * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
- * some other buses controlled by the chipset even if Linux is not
- * aware of it. Instead of setting the flag on all buses in the
- * machine, simply disable MSI globally.
+/*
+ * Some chipsets do not support MSI. We cannot easily rely on setting
+ * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually some
+ * other buses controlled by the chipset even if Linux is not aware of it.
+ * Instead of setting the flag on all buses in the machine, simply disable
+ * MSI globally.
*/
static void quirk_disable_all_msi(struct pci_dev *dev)
{
@@ -2271,8 +2428,10 @@ static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
-/* Go through the list of Hypertransport capabilities and
- * return 1 if a HT MSI capability is found and enabled */
+/*
+ * Go through the list of HyperTransport capabilities and return 1 if a HT
+ * MSI capability is found and enabled.
+ */
static int msi_ht_cap_enabled(struct pci_dev *dev)
{
int pos, ttl = PCI_FIND_CAP_TTL;
@@ -2295,7 +2454,7 @@ static int msi_ht_cap_enabled(struct pci_dev *dev)
return 0;
}
-/* Check the hypertransport MSI mapping to know whether MSI is enabled or not */
+/* Check the HyperTransport MSI mapping to know whether MSI is enabled or not */
static void quirk_msi_ht_cap(struct pci_dev *dev)
{
if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
@@ -2306,8 +2465,9 @@ static void quirk_msi_ht_cap(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
quirk_msi_ht_cap);
-/* The nVidia CK804 chipset may have 2 HT MSI mappings.
- * MSI are supported if the MSI capability set in any of these mappings.
+/*
+ * The nVidia CK804 chipset may have 2 HT MSI mappings. MSI is supported
+ * if the MSI capability is set in any of these mappings.
*/
static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
{
@@ -2316,8 +2476,9 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
if (!dev->subordinate)
return;
- /* check HT MSI cap on this chipset and the root one.
- * a single one having MSI is enough to be sure that MSI are supported.
+ /*
+ * Check HT MSI cap on this chipset and the root one. A single one
+ * having MSI is enough to be sure that MSI is supported.
*/
pdev = pci_get_slot(dev->bus, 0);
if (!pdev)
@@ -2354,13 +2515,13 @@ static void ht_enable_msi_mapping(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
ht_enable_msi_mapping);
-
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
ht_enable_msi_mapping);
-/* The P5N32-SLI motherboards from Asus have a problem with msi
- * for the MCP55 NIC. It is not yet determined whether the msi problem
- * also affects other devices. As for now, turn off msi for this device.
+/*
+ * The P5N32-SLI motherboards from Asus have a problem with MSI
+ * for the MCP55 NIC. It is not yet determined whether the MSI problem
+ * also affects other devices. As for now, turn off MSI for this device.
*/
static void nvenet_msi_disable(struct pci_dev *dev)
{
@@ -2397,16 +2558,14 @@ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
pci_read_config_dword(dev, 0x74, &cfg);
if (cfg & ((1 << 2) | (1 << 15))) {
- printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
+ printk(KERN_INFO "Rewriting IRQ routing register on MCP55\n");
cfg &= ~((1 << 2) | (1 << 15));
pci_write_config_dword(dev, 0x74, cfg);
}
}
-
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
nvbridge_check_legacy_irq_routing);
-
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
nvbridge_check_legacy_irq_routing);
@@ -2416,7 +2575,7 @@ static int ht_check_msi_mapping(struct pci_dev *dev)
int pos, ttl = PCI_FIND_CAP_TTL;
int found = 0;
- /* check if there is HT MSI cap or enabled on this device */
+ /* Check if there is HT MSI cap or enabled on this device */
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
@@ -2452,7 +2611,7 @@ static int host_bridge_with_leaf(struct pci_dev *host_bridge)
if (!dev)
continue;
- /* found next host bridge ?*/
+ /* found next host bridge? */
pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
if (pos != 0) {
pci_dev_put(dev);
@@ -2611,27 +2770,27 @@ static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 1);
}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 0);
}
-
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
-
static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
}
+
static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
{
struct pci_dev *p;
- /* SB700 MSI issue will be fixed at HW level from revision A21,
+ /*
+ * SB700 MSI issue will be fixed at HW level from revision A21;
* we need check PCI REVISION ID of SMBus controller to get SB700
* revision.
*/
@@ -2644,6 +2803,7 @@ static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
pci_dev_put(p);
}
+
static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
{
/* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
@@ -2713,55 +2873,56 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
quirk_msi_intx_disable_qca_bug);
#endif /* CONFIG_PCI_MSI */
-/* Allow manual resource allocation for PCI hotplug bridges
- * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
- * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
- * kernel fails to allocate resources when hotplug device is
- * inserted and PCI bus is rescanned.
+/*
+ * Allow manual resource allocation for PCI hotplug bridges via
+ * pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For some PCI-PCI
+ * hotplug bridges, like PLX 6254 (former HINT HB6), kernel fails to
+ * allocate resources when hotplug device is inserted and PCI bus is
+ * rescanned.
*/
static void quirk_hotplug_bridge(struct pci_dev *dev)
{
dev->is_hotplug_bridge = 1;
}
-
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
/*
- * This is a quirk for the Ricoh MMC controller found as a part of
- * some mulifunction chips.
-
+ * This is a quirk for the Ricoh MMC controller found as a part of some
+ * multifunction chips.
+ *
* This is very similar and based on the ricoh_mmc driver written by
* Philip Langdale. Thank you for these magic sequences.
*
- * These chips implement the four main memory card controllers (SD, MMC, MS, xD)
- * and one or both of cardbus or firewire.
+ * These chips implement the four main memory card controllers (SD, MMC,
+ * MS, xD) and one or both of CardBus or FireWire.
*
- * It happens that they implement SD and MMC
- * support as separate controllers (and PCI functions). The linux SDHCI
- * driver supports MMC cards but the chip detects MMC cards in hardware
- * and directs them to the MMC controller - so the SDHCI driver never sees
- * them.
+ * It happens that they implement SD and MMC support as separate
+ * controllers (and PCI functions). The Linux SDHCI driver supports MMC
+ * cards but the chip detects MMC cards in hardware and directs them to the
+ * MMC controller - so the SDHCI driver never sees them.
*
- * To get around this, we must disable the useless MMC controller.
- * At that point, the SDHCI controller will start seeing them
- * It seems to be the case that the relevant PCI registers to deactivate the
- * MMC controller live on PCI function 0, which might be the cardbus controller
- * or the firewire controller, depending on the particular chip in question
+ * To get around this, we must disable the useless MMC controller. At that
+ * point, the SDHCI controller will start seeing them. It seems to be the
+ * case that the relevant PCI registers to deactivate the MMC controller
+ * live on PCI function 0, which might be the CardBus controller or the
+ * FireWire controller, depending on the particular chip in question
*
* This has to be done early, because as soon as we disable the MMC controller
- * other pci functions shift up one level, e.g. function #2 becomes function
- * #1, and this will confuse the pci core.
+ * other PCI functions shift up one level, e.g. function #2 becomes function
+ * #1, and this will confuse the PCI core.
*/
-
#ifdef CONFIG_MMC_RICOH_MMC
static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
{
- /* disable via cardbus interface */
u8 write_enable;
u8 write_target;
u8 disable;
- /* disable must be done via function #0 */
+ /*
+ * Disable via CardBus interface
+ *
+ * This must be done via function #0
+ */
if (PCI_FUNC(dev->devfn))
return;
@@ -2777,7 +2938,7 @@ static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
pci_write_config_byte(dev, 0x8E, write_enable);
pci_write_config_byte(dev, 0x8D, write_target);
- pci_notice(dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n");
+ pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
@@ -2785,17 +2946,20 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
{
- /* disable via firewire interface */
u8 write_enable;
u8 disable;
- /* disable must be done via function #0 */
+ /*
+ * Disable via FireWire interface
+ *
+ * This must be done via function #0
+ */
if (PCI_FUNC(dev->devfn))
return;
/*
* RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
- * certain types of SD/MMC cards. Lowering the SD base
- * clock frequency from 200Mhz to 50Mhz fixes this issue.
+ * certain types of SD/MMC cards. Lowering the SD base clock
+ * frequency from 200Mhz to 50Mhz fixes this issue.
*
* 0x150 - SD2.0 mode enable for changing base clock
* frequency to 50Mhz
@@ -2826,7 +2990,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
pci_write_config_byte(dev, 0xCB, disable | 0x02);
pci_write_config_byte(dev, 0xCA, write_enable);
- pci_notice(dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
+ pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
}
@@ -2842,13 +3006,13 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823,
#define VTUNCERRMSK_REG 0x1ac
#define VTD_MSK_SPEC_ERRORS (1 << 31)
/*
- * This is a quirk for masking vt-d spec defined errors to platform error
- * handling logic. With out this, platforms using Intel 7500, 5500 chipsets
+ * This is a quirk for masking VT-d spec-defined errors to platform error
+ * handling logic. Without this, platforms using Intel 7500, 5500 chipsets
* (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
- * on the RAS config settings of the platform) when a vt-d fault happens.
+ * on the RAS config settings of the platform) when a VT-d fault happens.
* The resulting SMI caused the system to hang.
*
- * VT-d spec related errors are already handled by the VT-d OS code, so no
+ * VT-d spec-related errors are already handled by the VT-d OS code, so no
* need to report the same error through other channels.
*/
static void vtd_mask_spec_errors(struct pci_dev *dev)
@@ -2874,7 +3038,8 @@ static void fixup_ti816x_class(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
-/* Some PCIe devices do not work reliably with the claimed maximum
+/*
+ * Some PCIe devices do not work reliably with the claimed maximum
* payload size supported.
*/
static void fixup_mpss_256(struct pci_dev *dev)
@@ -2888,9 +3053,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
-/* Intel 5000 and 5100 Memory controllers have an errata with read completion
+/*
+ * Intel 5000 and 5100 Memory controllers have an erratum with read completion
* coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
- * Since there is no way of knowing what the PCIE MPS on each fabric will be
+ * Since there is no way of knowing what the PCIe MPS on each fabric will be
* until all of the devices are discovered and buses walked, read completion
* coalescing must be disabled. Unfortunately, it cannot be re-enabled because
* it is possible to hotplug a device with MPS of 256B.
@@ -2904,9 +3070,10 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
pcie_bus_config == PCIE_BUS_DEFAULT)
return;
- /* Intel errata specifies bits to change but does not say what they are.
- * Keeping them magical until such time as the registers and values can
- * be explained.
+ /*
+ * Intel erratum specifies bits to change but does not say what
+ * they are. Keeping them magical until such time as the registers
+ * and values can be explained.
*/
err = pci_read_config_word(dev, 0x48, &rcc);
if (err) {
@@ -2925,7 +3092,7 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
return;
}
- pr_info_once("Read completion coalescing disabled due to hardware errata relating to 256B MPS\n");
+ pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n");
}
/* Intel 5000 series memory controllers and ports 2-7 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
@@ -2955,11 +3122,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
-
/*
- * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum. To
- * work around this, query the size it should be configured to by the device and
- * modify the resource end to correspond to this new size.
+ * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum.
+ * To work around this, query the size it should be configured to by the
+ * device and modify the resource end to correspond to this new size.
*/
static void quirk_intel_ntb(struct pci_dev *dev)
{
@@ -2981,39 +3147,17 @@ static void quirk_intel_ntb(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
-static ktime_t fixup_debug_start(struct pci_dev *dev,
- void (*fn)(struct pci_dev *dev))
-{
- if (initcall_debug)
- pci_info(dev, "calling %pF @ %i\n", fn, task_pid_nr(current));
-
- return ktime_get();
-}
-
-static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
- void (*fn)(struct pci_dev *dev))
-{
- ktime_t delta, rettime;
- unsigned long long duration;
-
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- if (initcall_debug || duration > 10000)
- pci_info(dev, "%pF took %lld usecs\n", fn, duration);
-}
-
/*
- * Some BIOS implementations leave the Intel GPU interrupts enabled,
- * even though no one is handling them (f.e. i915 driver is never loaded).
- * Additionally the interrupt destination is not set up properly
+ * Some BIOS implementations leave the Intel GPU interrupts enabled, even
+ * though no one is handling them (e.g., if the i915 driver is never
+ * loaded). Additionally the interrupt destination is not set up properly
* and the interrupt ends up -somewhere-.
*
- * These spurious interrupts are "sticky" and the kernel disables
- * the (shared) interrupt line after 100.000+ generated interrupts.
+ * These spurious interrupts are "sticky" and the kernel disables the
+ * (shared) interrupt line after 100,000+ generated interrupts.
*
- * Fix it by disabling the still enabled interrupts.
- * This resolves crashes often seen on monitor unplug.
+ * Fix it by disabling the still enabled interrupts. This resolves crashes
+ * often seen on monitor unplug.
*/
#define I915_DEIER_REG 0x4400c
static void disable_igfx_irq(struct pci_dev *dev)
@@ -3101,38 +3245,22 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
* Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
* DisINTx can be set but the interrupt status bit is non-functional.
*/
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2,
- quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking);
static u16 mellanox_broken_intx_devs[] = {
PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
@@ -3177,7 +3305,8 @@ static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
}
}
- /* Getting here means Connect-IB cards and up. Connect-IB has no INTx
+ /*
+ * Getting here means Connect-IB cards and up. Connect-IB has no INTx
* support so shouldn't be checked further
*/
if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
@@ -3297,8 +3426,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
* shutdown before suspend. Otherwise the native host interface (NHI) will not
* be present after resume if a device was plugged in before suspend.
*
- * The thunderbolt controller consists of a pcie switch with downstream
- * bridges leading to the NHI and to the tunnel pci bridges.
+ * The Thunderbolt controller consists of a PCIe switch with downstream
+ * bridges leading to the NHI and to the tunnel PCI bridges.
*
* This quirk cuts power to the whole chip. Therefore we have to apply it
* during suspend_noirq of the upstream bridge.
@@ -3316,17 +3445,19 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
bridge = ACPI_HANDLE(&dev->dev);
if (!bridge)
return;
+
/*
* SXIO and SXLV are present only on machines requiring this quirk.
- * TB bridges in external devices might have the same device id as those
- * on the host, but they will not have the associated ACPI methods. This
- * implicitly checks that we are at the right bridge.
+ * Thunderbolt bridges in external devices might have the same
+ * device ID as those on the host, but they will not have the
+ * associated ACPI methods. This implicitly checks that we are at
+ * the right bridge.
*/
if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
|| ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
|| ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
return;
- pci_info(dev, "quirk: cutting power to thunderbolt controller...\n");
+ pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
/* magic sequence */
acpi_execute_simple_method(SXIO, NULL, 1);
@@ -3341,9 +3472,9 @@ DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
quirk_apple_poweroff_thunderbolt);
/*
- * Apple: Wait for the thunderbolt controller to reestablish pci tunnels.
+ * Apple: Wait for the Thunderbolt controller to reestablish PCI tunnels
*
- * During suspend the thunderbolt controller is reset and all pci
+ * During suspend the Thunderbolt controller is reset and all PCI
* tunnels are lost. The NHI driver will try to reestablish all tunnels
* during resume. We have to manually wait for the NHI since there is
* no parent child relationship between the NHI and the tunneled
@@ -3358,9 +3489,10 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
return;
+
/*
- * Find the NHI and confirm that we are a bridge on the tb host
- * controller and not on a tb endpoint.
+ * Find the NHI and confirm that we are a bridge on the Thunderbolt
+ * host controller and not on a Thunderbolt endpoint.
*/
sibling = pci_get_slot(dev->bus, 0x0);
if (sibling == dev)
@@ -3377,7 +3509,7 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
|| nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
goto out;
- pci_info(dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
+ pci_info(dev, "quirk: waiting for Thunderbolt to reestablish PCI tunnels...\n");
device_pm_wait_for_dev(&dev->dev, &nhi->dev);
out:
pci_dev_put(nhi);
@@ -3397,142 +3529,6 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
quirk_apple_wait_for_thunderbolt);
#endif
-static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
- struct pci_fixup *end)
-{
- ktime_t calltime;
-
- for (; f < end; f++)
- if ((f->class == (u32) (dev->class >> f->class_shift) ||
- f->class == (u32) PCI_ANY_ID) &&
- (f->vendor == dev->vendor ||
- f->vendor == (u16) PCI_ANY_ID) &&
- (f->device == dev->device ||
- f->device == (u16) PCI_ANY_ID)) {
- calltime = fixup_debug_start(dev, f->hook);
- f->hook(dev);
- fixup_debug_report(dev, calltime, f->hook);
- }
-}
-
-extern struct pci_fixup __start_pci_fixups_early[];
-extern struct pci_fixup __end_pci_fixups_early[];
-extern struct pci_fixup __start_pci_fixups_header[];
-extern struct pci_fixup __end_pci_fixups_header[];
-extern struct pci_fixup __start_pci_fixups_final[];
-extern struct pci_fixup __end_pci_fixups_final[];
-extern struct pci_fixup __start_pci_fixups_enable[];
-extern struct pci_fixup __end_pci_fixups_enable[];
-extern struct pci_fixup __start_pci_fixups_resume[];
-extern struct pci_fixup __end_pci_fixups_resume[];
-extern struct pci_fixup __start_pci_fixups_resume_early[];
-extern struct pci_fixup __end_pci_fixups_resume_early[];
-extern struct pci_fixup __start_pci_fixups_suspend[];
-extern struct pci_fixup __end_pci_fixups_suspend[];
-extern struct pci_fixup __start_pci_fixups_suspend_late[];
-extern struct pci_fixup __end_pci_fixups_suspend_late[];
-
-static bool pci_apply_fixup_final_quirks;
-
-void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
-{
- struct pci_fixup *start, *end;
-
- switch (pass) {
- case pci_fixup_early:
- start = __start_pci_fixups_early;
- end = __end_pci_fixups_early;
- break;
-
- case pci_fixup_header:
- start = __start_pci_fixups_header;
- end = __end_pci_fixups_header;
- break;
-
- case pci_fixup_final:
- if (!pci_apply_fixup_final_quirks)
- return;
- start = __start_pci_fixups_final;
- end = __end_pci_fixups_final;
- break;
-
- case pci_fixup_enable:
- start = __start_pci_fixups_enable;
- end = __end_pci_fixups_enable;
- break;
-
- case pci_fixup_resume:
- start = __start_pci_fixups_resume;
- end = __end_pci_fixups_resume;
- break;
-
- case pci_fixup_resume_early:
- start = __start_pci_fixups_resume_early;
- end = __end_pci_fixups_resume_early;
- break;
-
- case pci_fixup_suspend:
- start = __start_pci_fixups_suspend;
- end = __end_pci_fixups_suspend;
- break;
-
- case pci_fixup_suspend_late:
- start = __start_pci_fixups_suspend_late;
- end = __end_pci_fixups_suspend_late;
- break;
-
- default:
- /* stupid compiler warning, you would think with an enum... */
- return;
- }
- pci_do_fixups(dev, start, end);
-}
-EXPORT_SYMBOL(pci_fixup_device);
-
-
-static int __init pci_apply_final_quirks(void)
-{
- struct pci_dev *dev = NULL;
- u8 cls = 0;
- u8 tmp;
-
- if (pci_cache_line_size)
- printk(KERN_DEBUG "PCI: CLS %u bytes\n",
- pci_cache_line_size << 2);
-
- pci_apply_fixup_final_quirks = true;
- for_each_pci_dev(dev) {
- pci_fixup_device(pci_fixup_final, dev);
- /*
- * If arch hasn't set it explicitly yet, use the CLS
- * value shared by all PCI devices. If there's a
- * mismatch, fall back to the default value.
- */
- if (!pci_cache_line_size) {
- pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
- if (!cls)
- cls = tmp;
- if (!tmp || cls == tmp)
- continue;
-
- printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
- cls << 2, tmp << 2,
- pci_dfl_cache_line_size << 2);
- pci_cache_line_size = pci_dfl_cache_line_size;
- }
- }
-
- if (!pci_cache_line_size) {
- printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
- cls << 2, pci_dfl_cache_line_size << 2);
- pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
- }
-
- return 0;
-}
-
-fs_initcall_sync(pci_apply_final_quirks);
-
/*
* Following are device-specific reset methods which can be used to
* reset a single function if other methods (e.g. FLR, PM D0->D3) are
@@ -3602,9 +3598,7 @@ reset_complete:
return 0;
}
-/*
- * Device-specific reset method for Chelsio T4-based adapters.
- */
+/* Device-specific reset method for Chelsio T4-based adapters */
static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
{
u16 old_command;
@@ -3887,7 +3881,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
/*
* Some devices have problems with Transaction Layer Packets with the Relaxed
* Ordering Attribute set. Such devices should mark themselves and other
- * Device Drivers should check before sending TLPs with RO set.
+ * device drivers should check before sending TLPs with RO set.
*/
static void quirk_relaxedordering_disable(struct pci_dev *dev)
{
@@ -3897,7 +3891,7 @@ static void quirk_relaxedordering_disable(struct pci_dev *dev)
/*
* Intel Xeon processors based on Broadwell/Haswell microarchitecture Root
- * Complex has a Flow Control Credit issue which can cause performance
+ * Complex have a Flow Control Credit issue which can cause performance
* problems with Upstream Transaction Layer Packets with Relaxed Ordering set.
*/
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
@@ -3958,7 +3952,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED
quirk_relaxedordering_disable);
/*
- * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex
+ * The AMD ARM A1100 (aka "SEATTLE") SoC has a bug in its PCIe Root Complex
* where Upstream Transaction Layer Packets with the Relaxed Ordering
* Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering
* set. This is a violation of the PCIe 3.0 Transaction Ordering Rules
@@ -4022,7 +4016,7 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
* This mask/compare operation selects for Physical Function 4 on a
* T5. We only need to fix up the Root Port once for any of the
* PFs. PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely
- * 0x54xx so we use that one,
+ * 0x54xx so we use that one.
*/
if ((pdev->device & 0xff00) == 0x5400)
quirk_disable_root_port_attributes(pdev);
@@ -4113,7 +4107,7 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
{
/*
- * X-Gene root matching this quirk do not allow peer-to-peer
+ * X-Gene Root Ports matching this quirk do not allow peer-to-peer
* transactions with others, allowing masking out these bits as if they
* were unimplemented in the ACS capability.
*/
@@ -4230,11 +4224,29 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
* 0xa290-0xa29f PCI Express Root port #{0-16}
* 0xa2e7-0xa2ee PCI Express Root port #{17-24}
*
+ * Mobile chipsets are also affected, 7th & 8th Generation
+ * Specification update confirms ACS errata 22, status no fix: (7th Generation
+ * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel
+ * Processor Family I/O for U Quad Core Platforms Specification Update,
+ * August 2017, Revision 002, Document#: 334660-002)[6]
+ * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O
+ * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U
+ * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7]
+ *
+ * 0x9d10-0x9d1b PCI Express Root port #{1-12}
+ *
+ * The 300 series chipset suffers from the same bug so include those root
+ * ports here as well.
+ *
+ * 0xa32c-0xa343 PCI Express Root port #{0-24}
+ *
* [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
* [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
* [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
* [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
* [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
+ * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html
+ * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html
*/
static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
{
@@ -4244,6 +4256,8 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
switch (dev->device) {
case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
+ case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
+ case 0xa32c ... 0xa343: /* 300 series */
return true;
}
@@ -4361,8 +4375,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
/* QCOM QDF2xxx root ports */
- { 0x17cb, 0x400, pci_quirk_qcom_rp_acs },
- { 0x17cb, 0x401, pci_quirk_qcom_rp_acs },
+ { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
+ { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
/* Intel PCH root ports */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
@@ -4436,7 +4450,7 @@ static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
/*
* Read the RCBA register from the LPC (D31:F0). PCH root ports
* are D28:F* and therefore get probed before LPC, thus we can't
- * use pci_get_slot/pci_read_config_dword here.
+ * use pci_get_slot()/pci_read_config_dword() here.
*/
pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
INTEL_LPC_RCBA_REG, &rcba);
@@ -4569,7 +4583,7 @@ int pci_dev_specific_enable_acs(struct pci_dev *dev)
}
/*
- * The PCI capabilities list for Intel DH895xCC VFs (device id 0x0443) with
+ * The PCI capabilities list for Intel DH895xCC VFs (device ID 0x0443) with
* QuickAssist Technology (QAT) is prematurely terminated in hardware. The
* Next Capability pointer in the MSI Capability Structure should point to
* the PCIe Capability Structure but is incorrectly hardwired as 0 terminating
@@ -4630,9 +4644,7 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
return;
- /*
- * Save PCIE cap
- */
+ /* Save PCIe cap */
state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
if (!state)
return;
@@ -4653,7 +4665,7 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
-/* FLR may cause some 82579 devices to hang. */
+/* FLR may cause some 82579 devices to hang */
static void quirk_intel_no_flr(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 072784f55ea5..79b1824e83b4 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1943,56 +1943,56 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
}
/*
+ * There is only one bridge on the bus so it gets all available
+ * resources which it can then distribute to the possible
+ * hotplug bridges below.
+ */
+ if (hotplug_bridges + normal_bridges == 1) {
+ dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
+ if (dev->subordinate) {
+ pci_bus_distribute_available_resources(dev->subordinate,
+ add_list, available_io, available_mmio,
+ available_mmio_pref);
+ }
+ return;
+ }
+
+ /*
* Go over devices on this bus and distribute the remaining
* resource space between hotplug bridges.
*/
for_each_pci_bridge(dev, bus) {
+ resource_size_t align, io, mmio, mmio_pref;
struct pci_bus *b;
b = dev->subordinate;
- if (!b)
+ if (!b || !dev->is_hotplug_bridge)
continue;
- if (!hotplug_bridges && normal_bridges == 1) {
- /*
- * There is only one bridge on the bus (upstream
- * port) so it gets all available resources
- * which it can then distribute to the possible
- * hotplug bridges below.
- */
- pci_bus_distribute_available_resources(b, add_list,
- available_io, available_mmio,
- available_mmio_pref);
- } else if (dev->is_hotplug_bridge) {
- resource_size_t align, io, mmio, mmio_pref;
-
- /*
- * Distribute available extra resources equally
- * between hotplug-capable downstream ports
- * taking alignment into account.
- *
- * Here hotplug_bridges is always != 0.
- */
- align = pci_resource_alignment(bridge, io_res);
- io = div64_ul(available_io, hotplug_bridges);
- io = min(ALIGN(io, align), remaining_io);
- remaining_io -= io;
-
- align = pci_resource_alignment(bridge, mmio_res);
- mmio = div64_ul(available_mmio, hotplug_bridges);
- mmio = min(ALIGN(mmio, align), remaining_mmio);
- remaining_mmio -= mmio;
-
- align = pci_resource_alignment(bridge, mmio_pref_res);
- mmio_pref = div64_ul(available_mmio_pref,
- hotplug_bridges);
- mmio_pref = min(ALIGN(mmio_pref, align),
- remaining_mmio_pref);
- remaining_mmio_pref -= mmio_pref;
-
- pci_bus_distribute_available_resources(b, add_list, io,
- mmio, mmio_pref);
- }
+ /*
+ * Distribute available extra resources equally between
+ * hotplug-capable downstream ports taking alignment into
+ * account.
+ *
+ * Here hotplug_bridges is always != 0.
+ */
+ align = pci_resource_alignment(bridge, io_res);
+ io = div64_ul(available_io, hotplug_bridges);
+ io = min(ALIGN(io, align), remaining_io);
+ remaining_io -= io;
+
+ align = pci_resource_alignment(bridge, mmio_res);
+ mmio = div64_ul(available_mmio, hotplug_bridges);
+ mmio = min(ALIGN(mmio, align), remaining_mmio);
+ remaining_mmio -= mmio;
+
+ align = pci_resource_alignment(bridge, mmio_pref_res);
+ mmio_pref = div64_ul(available_mmio_pref, hotplug_bridges);
+ mmio_pref = min(ALIGN(mmio_pref, align), remaining_mmio_pref);
+ remaining_mmio_pref -= mmio_pref;
+
+ pci_bus_distribute_available_resources(b, add_list, io, mmio,
+ mmio_pref);
}
}
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 28bb5a029558..08ebaf7cca8b 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -6,30 +6,32 @@ menu "Performance monitor support"
depends on PERF_EVENTS
config ARM_CCI_PMU
- bool
+ tristate "ARM CCI PMU driver"
+ depends on (ARM && CPU_V7) || ARM64
select ARM_CCI
+ help
+ Support for PMU events monitoring on the ARM CCI (Cache Coherent
+ Interconnect) family of products.
+
+ If compiled as a module, it will be called arm-cci.
config ARM_CCI400_PMU
- bool "ARM CCI400 PMU support"
- depends on (ARM && CPU_V7) || ARM64
+ bool "support CCI-400"
+ default y
+ depends on ARM_CCI_PMU
select ARM_CCI400_COMMON
- select ARM_CCI_PMU
help
- Support for PMU events monitoring on the ARM CCI-400 (cache coherent
- interconnect). CCI-400 supports counting events related to the
- connected slave/master interfaces.
+ CCI-400 provides 4 independent event counters counting events related
+ to the connected slave/master interfaces, plus a cycle counter.
config ARM_CCI5xx_PMU
- bool "ARM CCI-500/CCI-550 PMU support"
- depends on (ARM && CPU_V7) || ARM64
- select ARM_CCI_PMU
+ bool "support CCI-500/CCI-550"
+ default y
+ depends on ARM_CCI_PMU
help
- Support for PMU events monitoring on the ARM CCI-500/CCI-550 cache
- coherent interconnects. Both of them provide 8 independent event counters,
- which can count events pertaining to the slave/master interfaces as well
- as the internal events to the CCI.
-
- If unsure, say Y
+ CCI-500/CCI-550 both provide 8 independent event counters, which can
+ count events pertaining to the slave/master interfaces as well as the
+ internal events to the CCI.
config ARM_CCN
tristate "ARM CCN driver support"
@@ -94,7 +96,7 @@ config XGENE_PMU
config ARM_SPE_PMU
tristate "Enable support for the ARMv8.2 Statistical Profiling Extension"
- depends on PERF_EVENTS && ARM64
+ depends on ARM64
help
Enable perf support for the ARMv8.2 Statistical Profiling
Extension, which provides periodic sampling of operations in
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 383b2d3dcbc6..0d09d8e669cd 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -120,9 +120,9 @@ enum cci_models {
static void pmu_write_counters(struct cci_pmu *cci_pmu,
unsigned long *mask);
-static ssize_t cci_pmu_format_show(struct device *dev,
+static ssize_t __maybe_unused cci_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf);
-static ssize_t cci_pmu_event_show(struct device *dev,
+static ssize_t __maybe_unused cci_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \
@@ -1184,16 +1184,11 @@ static int cci_pmu_add(struct perf_event *event, int flags)
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
struct hw_perf_event *hwc = &event->hw;
int idx;
- int err = 0;
-
- perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
idx = pmu_get_event_idx(hw_events, event);
- if (idx < 0) {
- err = idx;
- goto out;
- }
+ if (idx < 0)
+ return idx;
event->hw.idx = idx;
hw_events->events[idx] = event;
@@ -1205,9 +1200,7 @@ static int cci_pmu_add(struct perf_event *event, int flags)
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
-out:
- perf_pmu_enable(event->pmu);
- return err;
+ return 0;
}
static void cci_pmu_del(struct perf_event *event, int flags)
@@ -1304,15 +1297,6 @@ static int __hw_perf_event_init(struct perf_event *event)
*/
hwc->config_base |= (unsigned long)mapping;
- /*
- * Limit the sample_period to half of the counter width. That way, the
- * new counter value is far less likely to overtake the previous one
- * unless you have some serious IRQ latency issues.
- */
- hwc->sample_period = CCI_PMU_CNTR_MASK >> 1;
- hwc->last_period = hwc->sample_period;
- local64_set(&hwc->period_left, hwc->sample_period);
-
if (event->group_leader != event) {
if (validate_group(event) != 0)
return -EINVAL;
@@ -1423,6 +1407,7 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
pmu_format_attr_group.attrs = model->format_attrs;
cci_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
.name = cci_pmu->model->name,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = cci_pmu_enable,
@@ -1466,7 +1451,7 @@ static int cci_pmu_offline_cpu(unsigned int cpu)
return 0;
}
-static struct cci_pmu_model cci_pmu_models[] = {
+static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
#ifdef CONFIG_ARM_CCI400_PMU
[CCI400_R0] = {
.name = "CCI_400",
@@ -1588,6 +1573,7 @@ static const struct of_device_id arm_cci_pmu_matches[] = {
#endif
{},
};
+MODULE_DEVICE_TABLE(of, arm_cci_pmu_matches);
static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
{
@@ -1709,14 +1695,27 @@ static int cci_pmu_probe(struct platform_device *pdev)
return 0;
}
+static int cci_pmu_remove(struct platform_device *pdev)
+{
+ if (!g_cci_pmu)
+ return 0;
+
+ cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
+ perf_pmu_unregister(&g_cci_pmu->pmu);
+ g_cci_pmu = NULL;
+
+ return 0;
+}
+
static struct platform_driver cci_pmu_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = arm_cci_pmu_matches,
},
.probe = cci_pmu_probe,
+ .remove = cci_pmu_remove,
};
-builtin_platform_driver(cci_pmu_driver);
-MODULE_LICENSE("GPL");
+module_platform_driver(cci_pmu_driver);
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ARM CCI PMU support");
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 65b7e4042ece..b416ee18e6bb 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -736,7 +736,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
ccn = pmu_to_arm_ccn(event->pmu);
if (hw->sample_period) {
- dev_warn(ccn->dev, "Sampling not supported!\n");
+ dev_dbg(ccn->dev, "Sampling not supported!\n");
return -EOPNOTSUPP;
}
@@ -744,12 +744,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
event->attr.exclude_kernel || event->attr.exclude_hv ||
event->attr.exclude_idle || event->attr.exclude_host ||
event->attr.exclude_guest) {
- dev_warn(ccn->dev, "Can't exclude execution levels!\n");
+ dev_dbg(ccn->dev, "Can't exclude execution levels!\n");
return -EINVAL;
}
if (event->cpu < 0) {
- dev_warn(ccn->dev, "Can't provide per-task data!\n");
+ dev_dbg(ccn->dev, "Can't provide per-task data!\n");
return -EOPNOTSUPP;
}
/*
@@ -771,13 +771,13 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
switch (type) {
case CCN_TYPE_MN:
if (node_xp != ccn->mn_id) {
- dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
+ dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp);
return -EINVAL;
}
break;
case CCN_TYPE_XP:
if (node_xp >= ccn->num_xps) {
- dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
+ dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp);
return -EINVAL;
}
break;
@@ -785,11 +785,11 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
break;
default:
if (node_xp >= ccn->num_nodes) {
- dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp);
+ dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp);
return -EINVAL;
}
if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
- dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n",
+ dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n",
type, node_xp);
return -EINVAL;
}
@@ -808,19 +808,19 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
if (event_id != e->event)
continue;
if (e->num_ports && port >= e->num_ports) {
- dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n",
+ dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n",
port, node_xp);
return -EINVAL;
}
if (e->num_vcs && vc >= e->num_vcs) {
- dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
+ dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n",
vc, node_xp);
return -EINVAL;
}
valid = 1;
}
if (!valid) {
- dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
+ dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
event_id, node_xp);
return -EINVAL;
}
@@ -1594,4 +1594,4 @@ module_init(arm_ccn_init);
module_exit(arm_ccn_exit);
MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 1a0d340b65cf..a6347d487635 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -339,7 +339,7 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
return IRQ_NONE;
start_clock = sched_clock();
- ret = armpmu->handle_irq(irq, armpmu);
+ ret = armpmu->handle_irq(armpmu);
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 28bb642af18b..54ec278d2fc4 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -131,8 +131,7 @@ static ssize_t arm_spe_pmu_cap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+ struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
struct dev_ext_attribute *ea =
container_of(attr, struct dev_ext_attribute, attr);
int cap = (long)ea->var;
@@ -247,8 +246,7 @@ static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+ struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 01fe8e0455a0..dd50371225bc 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -337,6 +337,7 @@ config PINCTRL_OCELOT
select GENERIC_PINMUX_FUNCTIONS
select REGMAP_MMIO
+source "drivers/pinctrl/actions/Kconfig"
source "drivers/pinctrl/aspeed/Kconfig"
source "drivers/pinctrl/bcm/Kconfig"
source "drivers/pinctrl/berlin/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 657332b121fb..de40863e7297 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
+obj-y += actions/
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
obj-y += bcm/
obj-$(CONFIG_PINCTRL_BERLIN) += berlin/
diff --git a/drivers/pinctrl/actions/Kconfig b/drivers/pinctrl/actions/Kconfig
new file mode 100644
index 000000000000..490927b4ea76
--- /dev/null
+++ b/drivers/pinctrl/actions/Kconfig
@@ -0,0 +1,15 @@
+config PINCTRL_OWL
+ bool "Actions Semi OWL pinctrl driver"
+ depends on (ARCH_ACTIONS || COMPILE_TEST) && OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ help
+ Say Y here to enable Actions Semi OWL pinctrl driver
+
+config PINCTRL_S900
+ bool "Actions Semi S900 pinctrl driver"
+ depends on PINCTRL_OWL
+ help
+ Say Y here to enable Actions Semi S900 pinctrl driver
diff --git a/drivers/pinctrl/actions/Makefile b/drivers/pinctrl/actions/Makefile
new file mode 100644
index 000000000000..bd232d28400f
--- /dev/null
+++ b/drivers/pinctrl/actions/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_PINCTRL_OWL) += pinctrl-owl.o
+obj-$(CONFIG_PINCTRL_S900) += pinctrl-s900.o
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
new file mode 100644
index 000000000000..76243caa08c6
--- /dev/null
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * OWL SoC's Pinctrl driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Copyright (c) 2018 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-owl.h"
+
+/**
+ * struct owl_pinctrl - pinctrl state of the device
+ * @dev: device handle
+ * @pctrldev: pinctrl handle
+ * @chip: gpio chip
+ * @lock: spinlock to protect registers
+ * @soc: reference to soc_data
+ * @base: pinctrl register base address
+ */
+struct owl_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctrldev;
+ struct gpio_chip chip;
+ raw_spinlock_t lock;
+ struct clk *clk;
+ const struct owl_pinctrl_soc_data *soc;
+ void __iomem *base;
+};
+
+static void owl_update_bits(void __iomem *base, u32 mask, u32 val)
+{
+ u32 reg_val;
+
+ reg_val = readl_relaxed(base);
+
+ reg_val = (reg_val & ~mask) | (val & mask);
+
+ writel_relaxed(reg_val, base);
+}
+
+static u32 owl_read_field(struct owl_pinctrl *pctrl, u32 reg,
+ u32 bit, u32 width)
+{
+ u32 tmp, mask;
+
+ tmp = readl_relaxed(pctrl->base + reg);
+ mask = (1 << width) - 1;
+
+ return (tmp >> bit) & mask;
+}
+
+static void owl_write_field(struct owl_pinctrl *pctrl, u32 reg, u32 arg,
+ u32 bit, u32 width)
+{
+ u32 mask;
+
+ mask = (1 << width) - 1;
+ mask = mask << bit;
+
+ owl_update_bits(pctrl->base + reg, mask, (arg << bit));
+}
+
+static int owl_get_groups_count(struct pinctrl_dev *pctrldev)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ return pctrl->soc->ngroups;
+}
+
+static const char *owl_get_group_name(struct pinctrl_dev *pctrldev,
+ unsigned int group)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ return pctrl->soc->groups[group].name;
+}
+
+static int owl_get_group_pins(struct pinctrl_dev *pctrldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ *pins = pctrl->soc->groups[group].pads;
+ *num_pins = pctrl->soc->groups[group].npads;
+
+ return 0;
+}
+
+static void owl_pin_dbg_show(struct pinctrl_dev *pctrldev,
+ struct seq_file *s,
+ unsigned int offset)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ seq_printf(s, "%s", dev_name(pctrl->dev));
+}
+
+static struct pinctrl_ops owl_pinctrl_ops = {
+ .get_groups_count = owl_get_groups_count,
+ .get_group_name = owl_get_group_name,
+ .get_group_pins = owl_get_group_pins,
+ .pin_dbg_show = owl_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int owl_get_funcs_count(struct pinctrl_dev *pctrldev)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ return pctrl->soc->nfunctions;
+}
+
+static const char *owl_get_func_name(struct pinctrl_dev *pctrldev,
+ unsigned int function)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ return pctrl->soc->functions[function].name;
+}
+
+static int owl_get_func_groups(struct pinctrl_dev *pctrldev,
+ unsigned int function,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ *groups = pctrl->soc->functions[function].groups;
+ *num_groups = pctrl->soc->functions[function].ngroups;
+
+ return 0;
+}
+
+static inline int get_group_mfp_mask_val(const struct owl_pingroup *g,
+ int function,
+ u32 *mask,
+ u32 *val)
+{
+ int id;
+ u32 option_num;
+ u32 option_mask;
+
+ for (id = 0; id < g->nfuncs; id++) {
+ if (g->funcs[id] == function)
+ break;
+ }
+ if (WARN_ON(id == g->nfuncs))
+ return -EINVAL;
+
+ option_num = (1 << g->mfpctl_width);
+ if (id > option_num)
+ id -= option_num;
+
+ option_mask = option_num - 1;
+ *mask = (option_mask << g->mfpctl_shift);
+ *val = (id << g->mfpctl_shift);
+
+ return 0;
+}
+
+static int owl_set_mux(struct pinctrl_dev *pctrldev,
+ unsigned int function,
+ unsigned int group)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+ const struct owl_pingroup *g;
+ unsigned long flags;
+ u32 val, mask;
+
+ g = &pctrl->soc->groups[group];
+
+ if (get_group_mfp_mask_val(g, function, &mask, &val))
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ owl_update_bits(pctrl->base + g->mfpctl_reg, mask, val);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static struct pinmux_ops owl_pinmux_ops = {
+ .get_functions_count = owl_get_funcs_count,
+ .get_function_name = owl_get_func_name,
+ .get_function_groups = owl_get_func_groups,
+ .set_mux = owl_set_mux,
+};
+
+static int owl_pad_pinconf_reg(const struct owl_padinfo *info,
+ unsigned int param,
+ u32 *reg,
+ u32 *bit,
+ u32 *width)
+{
+ switch (param) {
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (!info->pullctl)
+ return -EINVAL;
+ *reg = info->pullctl->reg;
+ *bit = info->pullctl->shift;
+ *width = info->pullctl->width;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (!info->st)
+ return -EINVAL;
+ *reg = info->st->reg;
+ *bit = info->st->shift;
+ *width = info->st->width;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_pad_pinconf_arg2val(const struct owl_padinfo *info,
+ unsigned int param,
+ u32 *arg)
+{
+ switch (param) {
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ *arg = OWL_PINCONF_PULL_HOLD;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ *arg = OWL_PINCONF_PULL_HIZ;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ *arg = OWL_PINCONF_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ *arg = OWL_PINCONF_PULL_UP;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ *arg = (*arg >= 1 ? 1 : 0);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_pad_pinconf_val2arg(const struct owl_padinfo *padinfo,
+ unsigned int param,
+ u32 *arg)
+{
+ switch (param) {
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ *arg = *arg == OWL_PINCONF_PULL_HOLD;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ *arg = *arg == OWL_PINCONF_PULL_HIZ;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ *arg = *arg == OWL_PINCONF_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ *arg = *arg == OWL_PINCONF_PULL_UP;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ *arg = *arg == 1;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_pin_config_get(struct pinctrl_dev *pctrldev,
+ unsigned int pin,
+ unsigned long *config)
+{
+ int ret = 0;
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+ const struct owl_padinfo *info;
+ unsigned int param = pinconf_to_config_param(*config);
+ u32 reg, bit, width, arg;
+
+ info = &pctrl->soc->padinfo[pin];
+
+ ret = owl_pad_pinconf_reg(info, param, &reg, &bit, &width);
+ if (ret)
+ return ret;
+
+ arg = owl_read_field(pctrl, reg, bit, width);
+
+ ret = owl_pad_pinconf_val2arg(info, param, &arg);
+ if (ret)
+ return ret;
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return ret;
+}
+
+static int owl_pin_config_set(struct pinctrl_dev *pctrldev,
+ unsigned int pin,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+ const struct owl_padinfo *info;
+ unsigned long flags;
+ unsigned int param;
+ u32 reg, bit, width, arg;
+ int ret, i;
+
+ info = &pctrl->soc->padinfo[pin];
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ ret = owl_pad_pinconf_reg(info, param, &reg, &bit, &width);
+ if (ret)
+ return ret;
+
+ ret = owl_pad_pinconf_arg2val(info, param, &arg);
+ if (ret)
+ return ret;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ owl_write_field(pctrl, reg, arg, bit, width);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ return ret;
+}
+
+static int owl_group_pinconf_reg(const struct owl_pingroup *g,
+ unsigned int param,
+ u32 *reg,
+ u32 *bit,
+ u32 *width)
+{
+ switch (param) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ if (g->drv_reg < 0)
+ return -EINVAL;
+ *reg = g->drv_reg;
+ *bit = g->drv_shift;
+ *width = g->drv_width;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (g->sr_reg < 0)
+ return -EINVAL;
+ *reg = g->sr_reg;
+ *bit = g->sr_shift;
+ *width = g->sr_width;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_group_pinconf_arg2val(const struct owl_pingroup *g,
+ unsigned int param,
+ u32 *arg)
+{
+ switch (param) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ switch (*arg) {
+ case 2:
+ *arg = OWL_PINCONF_DRV_2MA;
+ break;
+ case 4:
+ *arg = OWL_PINCONF_DRV_4MA;
+ break;
+ case 8:
+ *arg = OWL_PINCONF_DRV_8MA;
+ break;
+ case 12:
+ *arg = OWL_PINCONF_DRV_12MA;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (*arg)
+ *arg = OWL_PINCONF_SLEW_FAST;
+ else
+ *arg = OWL_PINCONF_SLEW_SLOW;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_group_pinconf_val2arg(const struct owl_pingroup *g,
+ unsigned int param,
+ u32 *arg)
+{
+ switch (param) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ switch (*arg) {
+ case OWL_PINCONF_DRV_2MA:
+ *arg = 2;
+ break;
+ case OWL_PINCONF_DRV_4MA:
+ *arg = 4;
+ break;
+ case OWL_PINCONF_DRV_8MA:
+ *arg = 8;
+ break;
+ case OWL_PINCONF_DRV_12MA:
+ *arg = 12;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (*arg)
+ *arg = 1;
+ else
+ *arg = 0;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_group_config_get(struct pinctrl_dev *pctrldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ const struct owl_pingroup *g;
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+ unsigned int param = pinconf_to_config_param(*config);
+ u32 reg, bit, width, arg;
+ int ret;
+
+ g = &pctrl->soc->groups[group];
+
+ ret = owl_group_pinconf_reg(g, param, &reg, &bit, &width);
+ if (ret)
+ return ret;
+
+ arg = owl_read_field(pctrl, reg, bit, width);
+
+ ret = owl_group_pinconf_val2arg(g, param, &arg);
+ if (ret)
+ return ret;
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return ret;
+
+}
+
+static int owl_group_config_set(struct pinctrl_dev *pctrldev,
+ unsigned int group,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ const struct owl_pingroup *g;
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+ unsigned long flags;
+ unsigned int param;
+ u32 reg, bit, width, arg;
+ int ret, i;
+
+ g = &pctrl->soc->groups[group];
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ ret = owl_group_pinconf_reg(g, param, &reg, &bit, &width);
+ if (ret)
+ return ret;
+
+ ret = owl_group_pinconf_arg2val(g, param, &arg);
+ if (ret)
+ return ret;
+
+ /* Update register */
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ owl_write_field(pctrl, reg, arg, bit, width);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops owl_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = owl_pin_config_get,
+ .pin_config_set = owl_pin_config_set,
+ .pin_config_group_get = owl_group_config_get,
+ .pin_config_group_set = owl_group_config_set,
+};
+
+static struct pinctrl_desc owl_pinctrl_desc = {
+ .pctlops = &owl_pinctrl_ops,
+ .pmxops = &owl_pinmux_ops,
+ .confops = &owl_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct owl_gpio_port *
+owl_gpio_get_port(struct owl_pinctrl *pctrl, unsigned int *pin)
+{
+ unsigned int start = 0, i;
+
+ for (i = 0; i < pctrl->soc->nports; i++) {
+ const struct owl_gpio_port *port = &pctrl->soc->ports[i];
+
+ if (*pin >= start && *pin < start + port->pins) {
+ *pin -= start;
+ return port;
+ }
+
+ start += port->pins;
+ }
+
+ return NULL;
+}
+
+static void owl_gpio_update_reg(void __iomem *base, unsigned int pin, int flag)
+{
+ u32 val;
+
+ val = readl_relaxed(base);
+
+ if (flag)
+ val |= BIT(pin);
+ else
+ val &= ~BIT(pin);
+
+ writel_relaxed(val, base);
+}
+
+static int owl_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return -ENODEV;
+
+ gpio_base = pctrl->base + port->offset;
+
+ /*
+ * GPIOs have higher priority over other modules, so either setting
+ * them as OUT or IN is sufficient
+ */
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ owl_gpio_update_reg(gpio_base + port->outen, offset, true);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static void owl_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ /* disable gpio output */
+ owl_gpio_update_reg(gpio_base + port->outen, offset, false);
+
+ /* disable gpio input */
+ owl_gpio_update_reg(gpio_base + port->inen, offset, false);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int owl_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+ u32 val;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return -ENODEV;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ val = readl_relaxed(gpio_base + port->dat);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return !!(val & BIT(offset));
+}
+
+static void owl_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ owl_gpio_update_reg(gpio_base + port->dat, offset, value);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int owl_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return -ENODEV;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ owl_gpio_update_reg(gpio_base + port->outen, offset, false);
+ owl_gpio_update_reg(gpio_base + port->inen, offset, true);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int owl_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return -ENODEV;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ owl_gpio_update_reg(gpio_base + port->inen, offset, false);
+ owl_gpio_update_reg(gpio_base + port->outen, offset, true);
+ owl_gpio_update_reg(gpio_base + port->dat, offset, value);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int owl_gpio_init(struct owl_pinctrl *pctrl)
+{
+ struct gpio_chip *chip;
+ int ret;
+
+ chip = &pctrl->chip;
+ chip->base = -1;
+ chip->ngpio = pctrl->soc->ngpios;
+ chip->label = dev_name(pctrl->dev);
+ chip->parent = pctrl->dev;
+ chip->owner = THIS_MODULE;
+ chip->of_node = pctrl->dev->of_node;
+
+ ret = gpiochip_add_data(&pctrl->chip, pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to register gpiochip\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int owl_pinctrl_probe(struct platform_device *pdev,
+ struct owl_pinctrl_soc_data *soc_data)
+{
+ struct resource *res;
+ struct owl_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pctrl->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctrl->base))
+ return PTR_ERR(pctrl->base);
+
+ /* enable GPIO/MFP clock */
+ pctrl->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pctrl->clk)) {
+ dev_err(&pdev->dev, "no clock defined\n");
+ return PTR_ERR(pctrl->clk);
+ }
+
+ ret = clk_prepare_enable(pctrl->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk enable failed\n");
+ return ret;
+ }
+
+ raw_spin_lock_init(&pctrl->lock);
+
+ owl_pinctrl_desc.name = dev_name(&pdev->dev);
+ owl_pinctrl_desc.pins = soc_data->pins;
+ owl_pinctrl_desc.npins = soc_data->npins;
+
+ pctrl->chip.direction_input = owl_gpio_direction_input;
+ pctrl->chip.direction_output = owl_gpio_direction_output;
+ pctrl->chip.get = owl_gpio_get;
+ pctrl->chip.set = owl_gpio_set;
+ pctrl->chip.request = owl_gpio_request;
+ pctrl->chip.free = owl_gpio_free;
+
+ pctrl->soc = soc_data;
+ pctrl->dev = &pdev->dev;
+
+ pctrl->pctrldev = devm_pinctrl_register(&pdev->dev,
+ &owl_pinctrl_desc, pctrl);
+ if (IS_ERR(pctrl->pctrldev)) {
+ dev_err(&pdev->dev, "could not register Actions OWL pinmux driver\n");
+ return PTR_ERR(pctrl->pctrldev);
+ }
+
+ ret = owl_gpio_init(pctrl);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pctrl);
+
+ return 0;
+}
diff --git a/drivers/pinctrl/actions/pinctrl-owl.h b/drivers/pinctrl/actions/pinctrl-owl.h
new file mode 100644
index 000000000000..74342378937c
--- /dev/null
+++ b/drivers/pinctrl/actions/pinctrl-owl.h
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * OWL SoC's Pinctrl definitions
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Copyright (c) 2018 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#ifndef __PINCTRL_OWL_H__
+#define __PINCTRL_OWL_H__
+
+#define OWL_PINCONF_SLEW_SLOW 0
+#define OWL_PINCONF_SLEW_FAST 1
+
+enum owl_pinconf_pull {
+ OWL_PINCONF_PULL_HIZ,
+ OWL_PINCONF_PULL_DOWN,
+ OWL_PINCONF_PULL_UP,
+ OWL_PINCONF_PULL_HOLD,
+};
+
+enum owl_pinconf_drv {
+ OWL_PINCONF_DRV_2MA,
+ OWL_PINCONF_DRV_4MA,
+ OWL_PINCONF_DRV_8MA,
+ OWL_PINCONF_DRV_12MA,
+};
+
+/**
+ * struct owl_pullctl - Actions pad pull control register
+ * @reg: offset to the pull control register
+ * @shift: shift value of the register
+ * @width: width of the register
+ */
+struct owl_pullctl {
+ int reg;
+ unsigned int shift;
+ unsigned int width;
+};
+
+/**
+ * struct owl_st - Actions pad schmitt trigger enable register
+ * @reg: offset to the schmitt trigger enable register
+ * @shift: shift value of the register
+ * @width: width of the register
+ */
+struct owl_st {
+ int reg;
+ unsigned int shift;
+ unsigned int width;
+};
+
+/**
+ * struct owl_pingroup - Actions pingroup definition
+ * @name: name of the pin group
+ * @pads: list of pins assigned to this pingroup
+ * @npads: size of @pads array
+ * @funcs: list of pinmux functions for this pingroup
+ * @nfuncs: size of @funcs array
+ * @mfpctl_reg: multiplexing control register offset
+ * @mfpctl_shift: multiplexing control register bit mask
+ * @mfpctl_width: multiplexing control register width
+ * @drv_reg: drive control register offset
+ * @drv_shift: drive control register bit mask
+ * @drv_width: driver control register width
+ * @sr_reg: slew rate control register offset
+ * @sr_shift: slew rate control register bit mask
+ * @sr_width: slew rate control register width
+ */
+struct owl_pingroup {
+ const char *name;
+ unsigned int *pads;
+ unsigned int npads;
+ unsigned int *funcs;
+ unsigned int nfuncs;
+
+ int mfpctl_reg;
+ unsigned int mfpctl_shift;
+ unsigned int mfpctl_width;
+
+ int drv_reg;
+ unsigned int drv_shift;
+ unsigned int drv_width;
+
+ int sr_reg;
+ unsigned int sr_shift;
+ unsigned int sr_width;
+};
+
+/**
+ * struct owl_padinfo - Actions pinctrl pad info
+ * @pad: pad name of the SoC
+ * @pullctl: pull control register info
+ * @st: schmitt trigger register info
+ */
+struct owl_padinfo {
+ int pad;
+ struct owl_pullctl *pullctl;
+ struct owl_st *st;
+};
+
+/**
+ * struct owl_pinmux_func - Actions pinctrl mux functions
+ * @name: name of the pinmux function.
+ * @groups: array of pin groups that may select this function.
+ * @ngroups: number of entries in @groups.
+ */
+struct owl_pinmux_func {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+/**
+ * struct owl_gpio_port - Actions GPIO port info
+ * @offset: offset of the GPIO port.
+ * @pins: number of pins belongs to the GPIO port.
+ * @outen: offset of the output enable register.
+ * @inen: offset of the input enable register.
+ * @dat: offset of the data register.
+ */
+struct owl_gpio_port {
+ unsigned int offset;
+ unsigned int pins;
+ unsigned int outen;
+ unsigned int inen;
+ unsigned int dat;
+};
+
+/**
+ * struct owl_pinctrl_soc_data - Actions pin controller driver configuration
+ * @pins: array describing all pins of the pin controller.
+ * @npins: number of entries in @pins.
+ * @functions: array describing all mux functions of this SoC.
+ * @nfunction: number of entries in @functions.
+ * @groups: array describing all pin groups of this SoC.
+ * @ngroups: number of entries in @groups.
+ * @padinfo: array describing the pad info of this SoC.
+ * @ngpios: number of pingroups the driver should expose as GPIOs.
+ * @port: array describing all GPIO ports of this SoC.
+ * @nports: number of GPIO ports in this SoC.
+ */
+struct owl_pinctrl_soc_data {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ const struct owl_pinmux_func *functions;
+ unsigned int nfunctions;
+ const struct owl_pingroup *groups;
+ unsigned int ngroups;
+ const struct owl_padinfo *padinfo;
+ unsigned int ngpios;
+ const struct owl_gpio_port *ports;
+ unsigned int nports;
+};
+
+int owl_pinctrl_probe(struct platform_device *pdev,
+ struct owl_pinctrl_soc_data *soc_data);
+
+#endif /* __PINCTRL_OWL_H__ */
diff --git a/drivers/pinctrl/actions/pinctrl-s900.c b/drivers/pinctrl/actions/pinctrl-s900.c
new file mode 100644
index 000000000000..5503c7945764
--- /dev/null
+++ b/drivers/pinctrl/actions/pinctrl-s900.c
@@ -0,0 +1,1888 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * OWL S900 Pinctrl driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Copyright (c) 2018 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-owl.h"
+
+/* Pinctrl registers offset */
+#define MFCTL0 (0x0040)
+#define MFCTL1 (0x0044)
+#define MFCTL2 (0x0048)
+#define MFCTL3 (0x004C)
+#define PAD_PULLCTL0 (0x0060)
+#define PAD_PULLCTL1 (0x0064)
+#define PAD_PULLCTL2 (0x0068)
+#define PAD_ST0 (0x006C)
+#define PAD_ST1 (0x0070)
+#define PAD_CTL (0x0074)
+#define PAD_DRV0 (0x0080)
+#define PAD_DRV1 (0x0084)
+#define PAD_DRV2 (0x0088)
+#define PAD_SR0 (0x0270)
+#define PAD_SR1 (0x0274)
+#define PAD_SR2 (0x0278)
+
+#define OWL_GPIO_PORT_A 0
+#define OWL_GPIO_PORT_B 1
+#define OWL_GPIO_PORT_C 2
+#define OWL_GPIO_PORT_D 3
+#define OWL_GPIO_PORT_E 4
+#define OWL_GPIO_PORT_F 5
+
+#define _GPIOA(offset) (offset)
+#define _GPIOB(offset) (32 + (offset))
+#define _GPIOC(offset) (64 + (offset))
+#define _GPIOD(offset) (76 + (offset))
+#define _GPIOE(offset) (106 + (offset))
+#define _GPIOF(offset) (138 + (offset))
+
+#define NUM_GPIOS (_GPIOF(7) + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
+
+#define ETH_TXD0 _GPIOA(0)
+#define ETH_TXD1 _GPIOA(1)
+#define ETH_TXEN _GPIOA(2)
+#define ETH_RXER _GPIOA(3)
+#define ETH_CRS_DV _GPIOA(4)
+#define ETH_RXD1 _GPIOA(5)
+#define ETH_RXD0 _GPIOA(6)
+#define ETH_REF_CLK _GPIOA(7)
+#define ETH_MDC _GPIOA(8)
+#define ETH_MDIO _GPIOA(9)
+#define SIRQ0 _GPIOA(10)
+#define SIRQ1 _GPIOA(11)
+#define SIRQ2 _GPIOA(12)
+#define I2S_D0 _GPIOA(13)
+#define I2S_BCLK0 _GPIOA(14)
+#define I2S_LRCLK0 _GPIOA(15)
+#define I2S_MCLK0 _GPIOA(16)
+#define I2S_D1 _GPIOA(17)
+#define I2S_BCLK1 _GPIOA(18)
+#define I2S_LRCLK1 _GPIOA(19)
+#define I2S_MCLK1 _GPIOA(20)
+#define ERAM_A5 _GPIOA(21)
+#define ERAM_A6 _GPIOA(22)
+#define ERAM_A7 _GPIOA(23)
+#define ERAM_A8 _GPIOA(24)
+#define ERAM_A9 _GPIOA(25)
+#define ERAM_A10 _GPIOA(26)
+#define ERAM_A11 _GPIOA(27)
+#define SD0_D0 _GPIOA(28)
+#define SD0_D1 _GPIOA(29)
+#define SD0_D2 _GPIOA(30)
+#define SD0_D3 _GPIOA(31)
+
+#define SD1_D0 _GPIOB(0)
+#define SD1_D1 _GPIOB(1)
+#define SD1_D2 _GPIOB(2)
+#define SD1_D3 _GPIOB(3)
+#define SD0_CMD _GPIOB(4)
+#define SD0_CLK _GPIOB(5)
+#define SD1_CMD _GPIOB(6)
+#define SD1_CLK _GPIOB(7)
+#define SPI0_SCLK _GPIOB(8)
+#define SPI0_SS _GPIOB(9)
+#define SPI0_MISO _GPIOB(10)
+#define SPI0_MOSI _GPIOB(11)
+#define UART0_RX _GPIOB(12)
+#define UART0_TX _GPIOB(13)
+#define UART2_RX _GPIOB(14)
+#define UART2_TX _GPIOB(15)
+#define UART2_RTSB _GPIOB(16)
+#define UART2_CTSB _GPIOB(17)
+#define UART4_RX _GPIOB(18)
+#define UART4_TX _GPIOB(19)
+#define I2C0_SCLK _GPIOB(20)
+#define I2C0_SDATA _GPIOB(21)
+#define I2C1_SCLK _GPIOB(22)
+#define I2C1_SDATA _GPIOB(23)
+#define I2C2_SCLK _GPIOB(24)
+#define I2C2_SDATA _GPIOB(25)
+#define CSI0_DN0 _GPIOB(26)
+#define CSI0_DP0 _GPIOB(27)
+#define CSI0_DN1 _GPIOB(28)
+#define CSI0_DP1 _GPIOB(29)
+#define CSI0_CN _GPIOB(30)
+#define CSI0_CP _GPIOB(31)
+
+#define CSI0_DN2 _GPIOC(0)
+#define CSI0_DP2 _GPIOC(1)
+#define CSI0_DN3 _GPIOC(2)
+#define CSI0_DP3 _GPIOC(3)
+#define SENSOR0_PCLK _GPIOC(4)
+#define CSI1_DN0 _GPIOC(5)
+#define CSI1_DP0 _GPIOC(6)
+#define CSI1_DN1 _GPIOC(7)
+#define CSI1_DP1 _GPIOC(8)
+#define CSI1_CN _GPIOC(9)
+#define CSI1_CP _GPIOC(10)
+#define SENSOR0_CKOUT _GPIOC(11)
+
+#define LVDS_OEP _GPIOD(0)
+#define LVDS_OEN _GPIOD(1)
+#define LVDS_ODP _GPIOD(2)
+#define LVDS_ODN _GPIOD(3)
+#define LVDS_OCP _GPIOD(4)
+#define LVDS_OCN _GPIOD(5)
+#define LVDS_OBP _GPIOD(6)
+#define LVDS_OBN _GPIOD(7)
+#define LVDS_OAP _GPIOD(8)
+#define LVDS_OAN _GPIOD(9)
+#define LVDS_EEP _GPIOD(10)
+#define LVDS_EEN _GPIOD(11)
+#define LVDS_EDP _GPIOD(12)
+#define LVDS_EDN _GPIOD(13)
+#define LVDS_ECP _GPIOD(14)
+#define LVDS_ECN _GPIOD(15)
+#define LVDS_EBP _GPIOD(16)
+#define LVDS_EBN _GPIOD(17)
+#define LVDS_EAP _GPIOD(18)
+#define LVDS_EAN _GPIOD(19)
+#define DSI_DP3 _GPIOD(20)
+#define DSI_DN3 _GPIOD(21)
+#define DSI_DP1 _GPIOD(22)
+#define DSI_DN1 _GPIOD(23)
+#define DSI_CP _GPIOD(24)
+#define DSI_CN _GPIOD(25)
+#define DSI_DP0 _GPIOD(26)
+#define DSI_DN0 _GPIOD(27)
+#define DSI_DP2 _GPIOD(28)
+#define DSI_DN2 _GPIOD(29)
+
+#define NAND0_D0 _GPIOE(0)
+#define NAND0_D1 _GPIOE(1)
+#define NAND0_D2 _GPIOE(2)
+#define NAND0_D3 _GPIOE(3)
+#define NAND0_D4 _GPIOE(4)
+#define NAND0_D5 _GPIOE(5)
+#define NAND0_D6 _GPIOE(6)
+#define NAND0_D7 _GPIOE(7)
+#define NAND0_DQS _GPIOE(8)
+#define NAND0_DQSN _GPIOE(9)
+#define NAND0_ALE _GPIOE(10)
+#define NAND0_CLE _GPIOE(11)
+#define NAND0_CEB0 _GPIOE(12)
+#define NAND0_CEB1 _GPIOE(13)
+#define NAND0_CEB2 _GPIOE(14)
+#define NAND0_CEB3 _GPIOE(15)
+#define NAND1_D0 _GPIOE(16)
+#define NAND1_D1 _GPIOE(17)
+#define NAND1_D2 _GPIOE(18)
+#define NAND1_D3 _GPIOE(19)
+#define NAND1_D4 _GPIOE(20)
+#define NAND1_D5 _GPIOE(21)
+#define NAND1_D6 _GPIOE(22)
+#define NAND1_D7 _GPIOE(23)
+#define NAND1_DQS _GPIOE(24)
+#define NAND1_DQSN _GPIOE(25)
+#define NAND1_ALE _GPIOE(26)
+#define NAND1_CLE _GPIOE(27)
+#define NAND1_CEB0 _GPIOE(28)
+#define NAND1_CEB1 _GPIOE(29)
+#define NAND1_CEB2 _GPIOE(30)
+#define NAND1_CEB3 _GPIOE(31)
+
+#define PCM1_IN _GPIOF(0)
+#define PCM1_CLK _GPIOF(1)
+#define PCM1_SYNC _GPIOF(2)
+#define PCM1_OUT _GPIOF(3)
+#define UART3_RX _GPIOF(4)
+#define UART3_TX _GPIOF(5)
+#define UART3_RTSB _GPIOF(6)
+#define UART3_CTSB _GPIOF(7)
+
+/* System */
+#define SGPIO0 _PIN(0)
+#define SGPIO1 _PIN(1)
+#define SGPIO2 _PIN(2)
+#define SGPIO3 _PIN(3)
+
+#define NUM_PADS (_PIN(3) + 1)
+
+/* Pad names as specified in datasheet */
+static const struct pinctrl_pin_desc s900_pads[] = {
+ PINCTRL_PIN(ETH_TXD0, "eth_txd0"),
+ PINCTRL_PIN(ETH_TXD1, "eth_txd1"),
+ PINCTRL_PIN(ETH_TXEN, "eth_txen"),
+ PINCTRL_PIN(ETH_RXER, "eth_rxer"),
+ PINCTRL_PIN(ETH_CRS_DV, "eth_crs_dv"),
+ PINCTRL_PIN(ETH_RXD1, "eth_rxd1"),
+ PINCTRL_PIN(ETH_RXD0, "eth_rxd0"),
+ PINCTRL_PIN(ETH_REF_CLK, "eth_ref_clk"),
+ PINCTRL_PIN(ETH_MDC, "eth_mdc"),
+ PINCTRL_PIN(ETH_MDIO, "eth_mdio"),
+ PINCTRL_PIN(SIRQ0, "sirq0"),
+ PINCTRL_PIN(SIRQ1, "sirq1"),
+ PINCTRL_PIN(SIRQ2, "sirq2"),
+ PINCTRL_PIN(I2S_D0, "i2s_d0"),
+ PINCTRL_PIN(I2S_BCLK0, "i2s_bclk0"),
+ PINCTRL_PIN(I2S_LRCLK0, "i2s_lrclk0"),
+ PINCTRL_PIN(I2S_MCLK0, "i2s_mclk0"),
+ PINCTRL_PIN(I2S_D1, "i2s_d1"),
+ PINCTRL_PIN(I2S_BCLK1, "i2s_bclk1"),
+ PINCTRL_PIN(I2S_LRCLK1, "i2s_lrclk1"),
+ PINCTRL_PIN(I2S_MCLK1, "i2s_mclk1"),
+ PINCTRL_PIN(PCM1_IN, "pcm1_in"),
+ PINCTRL_PIN(PCM1_CLK, "pcm1_clk"),
+ PINCTRL_PIN(PCM1_SYNC, "pcm1_sync"),
+ PINCTRL_PIN(PCM1_OUT, "pcm1_out"),
+ PINCTRL_PIN(ERAM_A5, "eram_a5"),
+ PINCTRL_PIN(ERAM_A6, "eram_a6"),
+ PINCTRL_PIN(ERAM_A7, "eram_a7"),
+ PINCTRL_PIN(ERAM_A8, "eram_a8"),
+ PINCTRL_PIN(ERAM_A9, "eram_a9"),
+ PINCTRL_PIN(ERAM_A10, "eram_a10"),
+ PINCTRL_PIN(ERAM_A11, "eram_a11"),
+ PINCTRL_PIN(LVDS_OEP, "lvds_oep"),
+ PINCTRL_PIN(LVDS_OEN, "lvds_oen"),
+ PINCTRL_PIN(LVDS_ODP, "lvds_odp"),
+ PINCTRL_PIN(LVDS_ODN, "lvds_odn"),
+ PINCTRL_PIN(LVDS_OCP, "lvds_ocp"),
+ PINCTRL_PIN(LVDS_OCN, "lvds_ocn"),
+ PINCTRL_PIN(LVDS_OBP, "lvds_obp"),
+ PINCTRL_PIN(LVDS_OBN, "lvds_obn"),
+ PINCTRL_PIN(LVDS_OAP, "lvds_oap"),
+ PINCTRL_PIN(LVDS_OAN, "lvds_oan"),
+ PINCTRL_PIN(LVDS_EEP, "lvds_eep"),
+ PINCTRL_PIN(LVDS_EEN, "lvds_een"),
+ PINCTRL_PIN(LVDS_EDP, "lvds_edp"),
+ PINCTRL_PIN(LVDS_EDN, "lvds_edn"),
+ PINCTRL_PIN(LVDS_ECP, "lvds_ecp"),
+ PINCTRL_PIN(LVDS_ECN, "lvds_ecn"),
+ PINCTRL_PIN(LVDS_EBP, "lvds_ebp"),
+ PINCTRL_PIN(LVDS_EBN, "lvds_ebn"),
+ PINCTRL_PIN(LVDS_EAP, "lvds_eap"),
+ PINCTRL_PIN(LVDS_EAN, "lvds_ean"),
+ PINCTRL_PIN(SD0_D0, "sd0_d0"),
+ PINCTRL_PIN(SD0_D1, "sd0_d1"),
+ PINCTRL_PIN(SD0_D2, "sd0_d2"),
+ PINCTRL_PIN(SD0_D3, "sd0_d3"),
+ PINCTRL_PIN(SD1_D0, "sd1_d0"),
+ PINCTRL_PIN(SD1_D1, "sd1_d1"),
+ PINCTRL_PIN(SD1_D2, "sd1_d2"),
+ PINCTRL_PIN(SD1_D3, "sd1_d3"),
+ PINCTRL_PIN(SD0_CMD, "sd0_cmd"),
+ PINCTRL_PIN(SD0_CLK, "sd0_clk"),
+ PINCTRL_PIN(SD1_CMD, "sd1_cmd"),
+ PINCTRL_PIN(SD1_CLK, "sd1_clk"),
+ PINCTRL_PIN(SPI0_SCLK, "spi0_sclk"),
+ PINCTRL_PIN(SPI0_SS, "spi0_ss"),
+ PINCTRL_PIN(SPI0_MISO, "spi0_miso"),
+ PINCTRL_PIN(SPI0_MOSI, "spi0_mosi"),
+ PINCTRL_PIN(UART0_RX, "uart0_rx"),
+ PINCTRL_PIN(UART0_TX, "uart0_tx"),
+ PINCTRL_PIN(UART2_RX, "uart2_rx"),
+ PINCTRL_PIN(UART2_TX, "uart2_tx"),
+ PINCTRL_PIN(UART2_RTSB, "uart2_rtsb"),
+ PINCTRL_PIN(UART2_CTSB, "uart2_ctsb"),
+ PINCTRL_PIN(UART3_RX, "uart3_rx"),
+ PINCTRL_PIN(UART3_TX, "uart3_tx"),
+ PINCTRL_PIN(UART3_RTSB, "uart3_rtsb"),
+ PINCTRL_PIN(UART3_CTSB, "uart3_ctsb"),
+ PINCTRL_PIN(UART4_RX, "uart4_rx"),
+ PINCTRL_PIN(UART4_TX, "uart4_tx"),
+ PINCTRL_PIN(I2C0_SCLK, "i2c0_sclk"),
+ PINCTRL_PIN(I2C0_SDATA, "i2c0_sdata"),
+ PINCTRL_PIN(I2C1_SCLK, "i2c1_sclk"),
+ PINCTRL_PIN(I2C1_SDATA, "i2c1_sdata"),
+ PINCTRL_PIN(I2C2_SCLK, "i2c2_sclk"),
+ PINCTRL_PIN(I2C2_SDATA, "i2c2_sdata"),
+ PINCTRL_PIN(CSI0_DN0, "csi0_dn0"),
+ PINCTRL_PIN(CSI0_DP0, "csi0_dp0"),
+ PINCTRL_PIN(CSI0_DN1, "csi0_dn1"),
+ PINCTRL_PIN(CSI0_DP1, "csi0_dp1"),
+ PINCTRL_PIN(CSI0_CN, "csi0_cn"),
+ PINCTRL_PIN(CSI0_CP, "csi0_cp"),
+ PINCTRL_PIN(CSI0_DN2, "csi0_dn2"),
+ PINCTRL_PIN(CSI0_DP2, "csi0_dp2"),
+ PINCTRL_PIN(CSI0_DN3, "csi0_dn3"),
+ PINCTRL_PIN(CSI0_DP3, "csi0_dp3"),
+ PINCTRL_PIN(DSI_DP3, "dsi_dp3"),
+ PINCTRL_PIN(DSI_DN3, "dsi_dn3"),
+ PINCTRL_PIN(DSI_DP1, "dsi_dp1"),
+ PINCTRL_PIN(DSI_DN1, "dsi_dn1"),
+ PINCTRL_PIN(DSI_CP, "dsi_cp"),
+ PINCTRL_PIN(DSI_CN, "dsi_cn"),
+ PINCTRL_PIN(DSI_DP0, "dsi_dp0"),
+ PINCTRL_PIN(DSI_DN0, "dsi_dn0"),
+ PINCTRL_PIN(DSI_DP2, "dsi_dp2"),
+ PINCTRL_PIN(DSI_DN2, "dsi_dn2"),
+ PINCTRL_PIN(SENSOR0_PCLK, "sensor0_pclk"),
+ PINCTRL_PIN(CSI1_DN0, "csi1_dn0"),
+ PINCTRL_PIN(CSI1_DP0, "csi1_dp0"),
+ PINCTRL_PIN(CSI1_DN1, "csi1_dn1"),
+ PINCTRL_PIN(CSI1_DP1, "csi1_dp1"),
+ PINCTRL_PIN(CSI1_CN, "csi1_cn"),
+ PINCTRL_PIN(CSI1_CP, "csi1_cp"),
+ PINCTRL_PIN(SENSOR0_CKOUT, "sensor0_ckout"),
+ PINCTRL_PIN(NAND0_D0, "nand0_d0"),
+ PINCTRL_PIN(NAND0_D1, "nand0_d1"),
+ PINCTRL_PIN(NAND0_D2, "nand0_d2"),
+ PINCTRL_PIN(NAND0_D3, "nand0_d3"),
+ PINCTRL_PIN(NAND0_D4, "nand0_d4"),
+ PINCTRL_PIN(NAND0_D5, "nand0_d5"),
+ PINCTRL_PIN(NAND0_D6, "nand0_d6"),
+ PINCTRL_PIN(NAND0_D7, "nand0_d7"),
+ PINCTRL_PIN(NAND0_DQS, "nand0_dqs"),
+ PINCTRL_PIN(NAND0_DQSN, "nand0_dqsn"),
+ PINCTRL_PIN(NAND0_ALE, "nand0_ale"),
+ PINCTRL_PIN(NAND0_CLE, "nand0_cle"),
+ PINCTRL_PIN(NAND0_CEB0, "nand0_ceb0"),
+ PINCTRL_PIN(NAND0_CEB1, "nand0_ceb1"),
+ PINCTRL_PIN(NAND0_CEB2, "nand0_ceb2"),
+ PINCTRL_PIN(NAND0_CEB3, "nand0_ceb3"),
+ PINCTRL_PIN(NAND1_D0, "nand1_d0"),
+ PINCTRL_PIN(NAND1_D1, "nand1_d1"),
+ PINCTRL_PIN(NAND1_D2, "nand1_d2"),
+ PINCTRL_PIN(NAND1_D3, "nand1_d3"),
+ PINCTRL_PIN(NAND1_D4, "nand1_d4"),
+ PINCTRL_PIN(NAND1_D5, "nand1_d5"),
+ PINCTRL_PIN(NAND1_D6, "nand1_d6"),
+ PINCTRL_PIN(NAND1_D7, "nand1_d7"),
+ PINCTRL_PIN(NAND1_DQS, "nand1_dqs"),
+ PINCTRL_PIN(NAND1_DQSN, "nand1_dqsn"),
+ PINCTRL_PIN(NAND1_ALE, "nand1_ale"),
+ PINCTRL_PIN(NAND1_CLE, "nand1_cle"),
+ PINCTRL_PIN(NAND1_CEB0, "nand1_ceb0"),
+ PINCTRL_PIN(NAND1_CEB1, "nand1_ceb1"),
+ PINCTRL_PIN(NAND1_CEB2, "nand1_ceb2"),
+ PINCTRL_PIN(NAND1_CEB3, "nand1_ceb3"),
+ PINCTRL_PIN(SGPIO0, "sgpio0"),
+ PINCTRL_PIN(SGPIO1, "sgpio1"),
+ PINCTRL_PIN(SGPIO2, "sgpio2"),
+ PINCTRL_PIN(SGPIO3, "sgpio3")
+};
+
+enum s900_pinmux_functions {
+ S900_MUX_ERAM,
+ S900_MUX_ETH_RMII,
+ S900_MUX_ETH_SMII,
+ S900_MUX_SPI0,
+ S900_MUX_SPI1,
+ S900_MUX_SPI2,
+ S900_MUX_SPI3,
+ S900_MUX_SENS0,
+ S900_MUX_UART0,
+ S900_MUX_UART1,
+ S900_MUX_UART2,
+ S900_MUX_UART3,
+ S900_MUX_UART4,
+ S900_MUX_UART5,
+ S900_MUX_UART6,
+ S900_MUX_I2S0,
+ S900_MUX_I2S1,
+ S900_MUX_PCM0,
+ S900_MUX_PCM1,
+ S900_MUX_JTAG,
+ S900_MUX_PWM0,
+ S900_MUX_PWM1,
+ S900_MUX_PWM2,
+ S900_MUX_PWM3,
+ S900_MUX_PWM4,
+ S900_MUX_PWM5,
+ S900_MUX_SD0,
+ S900_MUX_SD1,
+ S900_MUX_SD2,
+ S900_MUX_SD3,
+ S900_MUX_I2C0,
+ S900_MUX_I2C1,
+ S900_MUX_I2C2,
+ S900_MUX_I2C3,
+ S900_MUX_I2C4,
+ S900_MUX_I2C5,
+ S900_MUX_LVDS,
+ S900_MUX_USB20,
+ S900_MUX_USB30,
+ S900_MUX_GPU,
+ S900_MUX_MIPI_CSI0,
+ S900_MUX_MIPI_CSI1,
+ S900_MUX_MIPI_DSI,
+ S900_MUX_NAND0,
+ S900_MUX_NAND1,
+ S900_MUX_SPDIF,
+ S900_MUX_SIRQ0,
+ S900_MUX_SIRQ1,
+ S900_MUX_SIRQ2,
+ S900_MUX_AUX_START,
+ S900_MUX_MAX,
+ S900_MUX_RESERVED
+};
+
+/* mfp0_22 */
+static unsigned int lvds_oxx_uart4_mfp_pads[] = { LVDS_OAP, LVDS_OAN };
+static unsigned int lvds_oxx_uart4_mfp_funcs[] = { S900_MUX_ERAM,
+ S900_MUX_UART4 };
+/* mfp0_21_20 */
+static unsigned int rmii_mdc_mfp_pads[] = { ETH_MDC };
+static unsigned int rmii_mdc_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_PWM2,
+ S900_MUX_UART2,
+ S900_MUX_RESERVED };
+static unsigned int rmii_mdio_mfp_pads[] = { ETH_MDIO };
+static unsigned int rmii_mdio_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_PWM3,
+ S900_MUX_UART2,
+ S900_MUX_RESERVED };
+/* mfp0_19 */
+static unsigned int sirq0_mfp_pads[] = { SIRQ0 };
+static unsigned int sirq0_mfp_funcs[] = { S900_MUX_SIRQ0,
+ S900_MUX_PWM0 };
+static unsigned int sirq1_mfp_pads[] = { SIRQ1 };
+static unsigned int sirq1_mfp_funcs[] = { S900_MUX_SIRQ1,
+ S900_MUX_PWM1 };
+/* mfp0_18_16 */
+static unsigned int rmii_txd0_mfp_pads[] = { ETH_TXD0 };
+static unsigned int rmii_txd0_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_ETH_SMII,
+ S900_MUX_SPI2,
+ S900_MUX_UART6,
+ S900_MUX_SENS0,
+ S900_MUX_PWM0 };
+static unsigned int rmii_txd1_mfp_pads[] = { ETH_TXD1 };
+static unsigned int rmii_txd1_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_ETH_SMII,
+ S900_MUX_SPI2,
+ S900_MUX_UART6,
+ S900_MUX_SENS0,
+ S900_MUX_PWM1 };
+/* mfp0_15_13 */
+static unsigned int rmii_txen_mfp_pads[] = { ETH_TXEN };
+static unsigned int rmii_txen_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_UART2,
+ S900_MUX_SPI3,
+ S900_MUX_RESERVED,
+ S900_MUX_RESERVED,
+ S900_MUX_PWM2,
+ S900_MUX_SENS0 };
+
+static unsigned int rmii_rxer_mfp_pads[] = { ETH_RXER };
+static unsigned int rmii_rxer_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_UART2,
+ S900_MUX_SPI3,
+ S900_MUX_RESERVED,
+ S900_MUX_RESERVED,
+ S900_MUX_PWM3,
+ S900_MUX_SENS0 };
+/* mfp0_12_11 */
+static unsigned int rmii_crs_dv_mfp_pads[] = { ETH_CRS_DV };
+static unsigned int rmii_crs_dv_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_ETH_SMII,
+ S900_MUX_SPI2,
+ S900_MUX_UART4 };
+/* mfp0_10_8 */
+static unsigned int rmii_rxd1_mfp_pads[] = { ETH_RXD1 };
+static unsigned int rmii_rxd1_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_UART2,
+ S900_MUX_SPI3,
+ S900_MUX_RESERVED,
+ S900_MUX_UART5,
+ S900_MUX_PWM0,
+ S900_MUX_SENS0 };
+static unsigned int rmii_rxd0_mfp_pads[] = { ETH_RXD0 };
+static unsigned int rmii_rxd0_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_UART2,
+ S900_MUX_SPI3,
+ S900_MUX_RESERVED,
+ S900_MUX_UART5,
+ S900_MUX_PWM1,
+ S900_MUX_SENS0 };
+/* mfp0_7_6 */
+static unsigned int rmii_ref_clk_mfp_pads[] = { ETH_REF_CLK };
+static unsigned int rmii_ref_clk_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_UART4,
+ S900_MUX_SPI2,
+ S900_MUX_RESERVED };
+/* mfp0_5 */
+static unsigned int i2s_d0_mfp_pads[] = { I2S_D0 };
+static unsigned int i2s_d0_mfp_funcs[] = { S900_MUX_I2S0,
+ S900_MUX_PCM0 };
+static unsigned int i2s_d1_mfp_pads[] = { I2S_D1 };
+static unsigned int i2s_d1_mfp_funcs[] = { S900_MUX_I2S1,
+ S900_MUX_PCM0 };
+
+/* mfp0_4_3 */
+static unsigned int i2s_lr_m_clk0_mfp_pads[] = { I2S_LRCLK0,
+ I2S_MCLK0 };
+static unsigned int i2s_lr_m_clk0_mfp_funcs[] = { S900_MUX_I2S0,
+ S900_MUX_PCM0,
+ S900_MUX_PCM1,
+ S900_MUX_RESERVED };
+/* mfp0_2 */
+static unsigned int i2s_bclk0_mfp_pads[] = { I2S_BCLK0 };
+static unsigned int i2s_bclk0_mfp_funcs[] = { S900_MUX_I2S0,
+ S900_MUX_PCM0 };
+static unsigned int i2s_bclk1_mclk1_mfp_pads[] = { I2S_BCLK1,
+ I2S_LRCLK1,
+ I2S_MCLK1 };
+static unsigned int i2s_bclk1_mclk1_mfp_funcs[] = { S900_MUX_I2S1,
+ S900_MUX_PCM0 };
+/* mfp0_1_0 */
+static unsigned int pcm1_in_out_mfp_pads[] = { PCM1_IN,
+ PCM1_OUT };
+static unsigned int pcm1_in_out_mfp_funcs[] = { S900_MUX_PCM1,
+ S900_MUX_SPI1,
+ S900_MUX_I2C3,
+ S900_MUX_UART4 };
+static unsigned int pcm1_clk_mfp_pads[] = { PCM1_CLK };
+static unsigned int pcm1_clk_mfp_funcs[] = { S900_MUX_PCM1,
+ S900_MUX_SPI1,
+ S900_MUX_PWM4,
+ S900_MUX_UART4 };
+static unsigned int pcm1_sync_mfp_pads[] = { PCM1_SYNC };
+static unsigned int pcm1_sync_mfp_funcs[] = { S900_MUX_PCM1,
+ S900_MUX_SPI1,
+ S900_MUX_PWM5,
+ S900_MUX_UART4 };
+/* mfp1_31_29 */
+static unsigned int eram_a5_mfp_pads[] = { ERAM_A5 };
+static unsigned int eram_a5_mfp_funcs[] = { S900_MUX_UART4,
+ S900_MUX_JTAG,
+ S900_MUX_ERAM,
+ S900_MUX_PWM0,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0 };
+static unsigned int eram_a6_mfp_pads[] = { ERAM_A6 };
+static unsigned int eram_a6_mfp_funcs[] = { S900_MUX_UART4,
+ S900_MUX_JTAG,
+ S900_MUX_ERAM,
+ S900_MUX_PWM1,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0,
+};
+static unsigned int eram_a7_mfp_pads[] = { ERAM_A7 };
+static unsigned int eram_a7_mfp_funcs[] = { S900_MUX_RESERVED,
+ S900_MUX_JTAG,
+ S900_MUX_ERAM,
+ S900_MUX_RESERVED,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0 };
+/* mfp1_28_26 */
+static unsigned int eram_a8_mfp_pads[] = { ERAM_A8 };
+static unsigned int eram_a8_mfp_funcs[] = { S900_MUX_RESERVED,
+ S900_MUX_JTAG,
+ S900_MUX_ERAM,
+ S900_MUX_PWM1,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0 };
+static unsigned int eram_a9_mfp_pads[] = { ERAM_A9 };
+static unsigned int eram_a9_mfp_funcs[] = { S900_MUX_USB20,
+ S900_MUX_UART5,
+ S900_MUX_ERAM,
+ S900_MUX_PWM2,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0 };
+static unsigned int eram_a10_mfp_pads[] = { ERAM_A10 };
+static unsigned int eram_a10_mfp_funcs[] = { S900_MUX_USB30,
+ S900_MUX_JTAG,
+ S900_MUX_ERAM,
+ S900_MUX_PWM3,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0,
+ S900_MUX_RESERVED,
+ S900_MUX_RESERVED };
+/* mfp1_25_23 */
+static unsigned int eram_a11_mfp_pads[] = { ERAM_A11 };
+static unsigned int eram_a11_mfp_funcs[] = { S900_MUX_RESERVED,
+ S900_MUX_RESERVED,
+ S900_MUX_ERAM,
+ S900_MUX_PWM2,
+ S900_MUX_UART5,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0,
+ S900_MUX_RESERVED };
+/* mfp1_22 */
+static unsigned int lvds_oep_odn_mfp_pads[] = { LVDS_OEP,
+ LVDS_OEN,
+ LVDS_ODP,
+ LVDS_ODN };
+static unsigned int lvds_oep_odn_mfp_funcs[] = { S900_MUX_LVDS,
+ S900_MUX_UART2 };
+static unsigned int lvds_ocp_obn_mfp_pads[] = { LVDS_OCP,
+ LVDS_OCN,
+ LVDS_OBP,
+ LVDS_OBN };
+static unsigned int lvds_ocp_obn_mfp_funcs[] = { S900_MUX_LVDS,
+ S900_MUX_PCM1 };
+static unsigned int lvds_oap_oan_mfp_pads[] = { LVDS_OAP,
+ LVDS_OAN };
+static unsigned int lvds_oap_oan_mfp_funcs[] = { S900_MUX_LVDS,
+ S900_MUX_ERAM };
+/* mfp1_21 */
+static unsigned int lvds_e_mfp_pads[] = { LVDS_EEP,
+ LVDS_EEN,
+ LVDS_EDP,
+ LVDS_EDN,
+ LVDS_ECP,
+ LVDS_ECN,
+ LVDS_EBP,
+ LVDS_EBN,
+ LVDS_EAP,
+ LVDS_EAN };
+static unsigned int lvds_e_mfp_funcs[] = { S900_MUX_LVDS,
+ S900_MUX_ERAM };
+/* mfp1_5_4 */
+static unsigned int spi0_sclk_mosi_mfp_pads[] = { SPI0_SCLK,
+ SPI0_MOSI };
+static unsigned int spi0_sclk_mosi_mfp_funcs[] = { S900_MUX_SPI0,
+ S900_MUX_ERAM,
+ S900_MUX_I2C3,
+ S900_MUX_PCM0 };
+/* mfp1_3_1 */
+static unsigned int spi0_ss_mfp_pads[] = { SPI0_SS };
+static unsigned int spi0_ss_mfp_funcs[] = { S900_MUX_SPI0,
+ S900_MUX_ERAM,
+ S900_MUX_I2S1,
+ S900_MUX_PCM1,
+ S900_MUX_PCM0,
+ S900_MUX_PWM4 };
+static unsigned int spi0_miso_mfp_pads[] = { SPI0_MISO };
+static unsigned int spi0_miso_mfp_funcs[] = { S900_MUX_SPI0,
+ S900_MUX_ERAM,
+ S900_MUX_I2S1,
+ S900_MUX_PCM1,
+ S900_MUX_PCM0,
+ S900_MUX_PWM5 };
+/* mfp2_23 */
+static unsigned int uart2_rtsb_mfp_pads[] = { UART2_RTSB };
+static unsigned int uart2_rtsb_mfp_funcs[] = { S900_MUX_UART2,
+ S900_MUX_UART0 };
+/* mfp2_22 */
+static unsigned int uart2_ctsb_mfp_pads[] = { UART2_CTSB };
+static unsigned int uart2_ctsb_mfp_funcs[] = { S900_MUX_UART2,
+ S900_MUX_UART0 };
+/* mfp2_21 */
+static unsigned int uart3_rtsb_mfp_pads[] = { UART3_RTSB };
+static unsigned int uart3_rtsb_mfp_funcs[] = { S900_MUX_UART3,
+ S900_MUX_UART5 };
+/* mfp2_20 */
+static unsigned int uart3_ctsb_mfp_pads[] = { UART3_CTSB };
+static unsigned int uart3_ctsb_mfp_funcs[] = { S900_MUX_UART3,
+ S900_MUX_UART5 };
+/* mfp2_19_17 */
+static unsigned int sd0_d0_mfp_pads[] = { SD0_D0 };
+static unsigned int sd0_d0_mfp_funcs[] = { S900_MUX_SD0,
+ S900_MUX_ERAM,
+ S900_MUX_RESERVED,
+ S900_MUX_JTAG,
+ S900_MUX_UART2,
+ S900_MUX_UART5,
+ S900_MUX_GPU };
+/* mfp2_16_14 */
+static unsigned int sd0_d1_mfp_pads[] = { SD0_D1 };
+static unsigned int sd0_d1_mfp_funcs[] = { S900_MUX_SD0,
+ S900_MUX_ERAM,
+ S900_MUX_GPU,
+ S900_MUX_RESERVED,
+ S900_MUX_UART2,
+ S900_MUX_UART5 };
+/* mfp_13_11 */
+static unsigned int sd0_d2_d3_mfp_pads[] = { SD0_D2,
+ SD0_D3 };
+static unsigned int sd0_d2_d3_mfp_funcs[] = { S900_MUX_SD0,
+ S900_MUX_ERAM,
+ S900_MUX_RESERVED,
+ S900_MUX_JTAG,
+ S900_MUX_UART2,
+ S900_MUX_UART1,
+ S900_MUX_GPU };
+/* mfp2_10_9 */
+static unsigned int sd1_d0_d3_mfp_pads[] = { SD1_D0, SD1_D1,
+ SD1_D2, SD1_D3 };
+static unsigned int sd1_d0_d3_mfp_funcs[] = { S900_MUX_SD1,
+ S900_MUX_ERAM };
+/* mfp2_8_7 */
+static unsigned int sd0_cmd_mfp_pads[] = { SD0_CMD };
+static unsigned int sd0_cmd_mfp_funcs[] = { S900_MUX_SD0,
+ S900_MUX_ERAM,
+ S900_MUX_GPU,
+ S900_MUX_JTAG };
+/* mfp2_6_5 */
+static unsigned int sd0_clk_mfp_pads[] = { SD0_CLK };
+static unsigned int sd0_clk_mfp_funcs[] = { S900_MUX_SD0,
+ S900_MUX_ERAM,
+ S900_MUX_JTAG,
+ S900_MUX_GPU };
+/* mfp2_4_3 */
+static unsigned int sd1_cmd_clk_mfp_pads[] = { SD1_CMD, SD1_CLK };
+static unsigned int sd1_cmd_clk_mfp_funcs[] = { S900_MUX_SD1,
+ S900_MUX_ERAM };
+/* mfp2_2_0 */
+static unsigned int uart0_rx_mfp_pads[] = { UART0_RX };
+static unsigned int uart0_rx_mfp_funcs[] = { S900_MUX_UART0,
+ S900_MUX_UART2,
+ S900_MUX_SPI1,
+ S900_MUX_I2C5,
+ S900_MUX_PCM1,
+ S900_MUX_I2S1 };
+/* mfp3_27 */
+static unsigned int nand0_d0_ceb3_mfp_pads[] = { NAND0_D0, NAND0_D1,
+ NAND0_D2, NAND0_D3,
+ NAND0_D4, NAND0_D5,
+ NAND0_D6, NAND0_D7,
+ NAND0_DQSN, NAND0_CEB3 };
+static unsigned int nand0_d0_ceb3_mfp_funcs[] = { S900_MUX_NAND0,
+ S900_MUX_SD2 };
+/* mfp3_21_19 */
+static unsigned int uart0_tx_mfp_pads[] = { UART0_TX };
+static unsigned int uart0_tx_mfp_funcs[] = { S900_MUX_UART0,
+ S900_MUX_UART2,
+ S900_MUX_SPI1,
+ S900_MUX_I2C5,
+ S900_MUX_SPDIF,
+ S900_MUX_PCM1,
+ S900_MUX_I2S1 };
+/* mfp3_18_16 */
+static unsigned int i2c0_mfp_pads[] = { I2C0_SCLK, I2C0_SDATA };
+static unsigned int i2c0_mfp_funcs[] = { S900_MUX_I2C0,
+ S900_MUX_UART2,
+ S900_MUX_I2C1,
+ S900_MUX_UART1,
+ S900_MUX_SPI1 };
+/* mfp3_15 */
+static unsigned int csi0_cn_cp_mfp_pads[] = { CSI0_CN, CSI0_CP };
+static unsigned int csi0_cn_cp_mfp_funcs[] = { S900_MUX_SENS0,
+ S900_MUX_SENS0 };
+/* mfp3_14 */
+static unsigned int csi0_dn0_dp3_mfp_pads[] = { CSI0_DN0, CSI0_DP0,
+ CSI0_DN1, CSI0_DP1,
+ CSI0_CN, CSI0_CP,
+ CSI0_DP2, CSI0_DN2,
+ CSI0_DN3, CSI0_DP3 };
+static unsigned int csi0_dn0_dp3_mfp_funcs[] = { S900_MUX_MIPI_CSI0,
+ S900_MUX_SENS0 };
+/* mfp3_13 */
+static unsigned int csi1_dn0_cp_mfp_pads[] = { CSI1_DN0, CSI1_DP0,
+ CSI1_DN1, CSI1_DP1,
+ CSI1_CN, CSI1_CP };
+static unsigned int csi1_dn0_cp_mfp_funcs[] = { S900_MUX_MIPI_CSI1,
+ S900_MUX_SENS0 };
+/* mfp3_12_dsi */
+static unsigned int dsi_dp3_dn1_mfp_pads[] = { DSI_DP3, DSI_DN2,
+ DSI_DP1, DSI_DN1 };
+static unsigned int dsi_dp3_dn1_mfp_funcs[] = { S900_MUX_MIPI_DSI,
+ S900_MUX_UART2 };
+static unsigned int dsi_cp_dn0_mfp_pads[] = { DSI_CP, DSI_CN,
+ DSI_DP0, DSI_DN0 };
+static unsigned int dsi_cp_dn0_mfp_funcs[] = { S900_MUX_MIPI_DSI,
+ S900_MUX_PCM1 };
+static unsigned int dsi_dp2_dn2_mfp_pads[] = { DSI_DP2, DSI_DN2 };
+static unsigned int dsi_dp2_dn2_mfp_funcs[] = { S900_MUX_MIPI_DSI,
+ S900_MUX_UART4 };
+/* mfp3_11 */
+static unsigned int nand1_d0_ceb1_mfp_pads[] = { NAND1_D0, NAND1_D1,
+ NAND1_D2, NAND1_D3,
+ NAND1_D4, NAND1_D5,
+ NAND1_D6, NAND1_D7,
+ NAND1_DQSN, NAND1_CEB1 };
+static unsigned int nand1_d0_ceb1_mfp_funcs[] = { S900_MUX_NAND1,
+ S900_MUX_SD3 };
+/* mfp3_10 */
+static unsigned int nand1_ceb3_mfp_pads[] = { NAND1_CEB3 };
+static unsigned int nand1_ceb3_mfp_funcs[] = { S900_MUX_NAND1,
+ S900_MUX_PWM0 };
+static unsigned int nand1_ceb0_mfp_pads[] = { NAND1_CEB0 };
+static unsigned int nand1_ceb0_mfp_funcs[] = { S900_MUX_NAND1,
+ S900_MUX_PWM1 };
+/* mfp3_9 */
+static unsigned int csi1_dn0_dp0_mfp_pads[] = { CSI1_DN0, CSI1_DP0 };
+static unsigned int csi1_dn0_dp0_mfp_funcs[] = { S900_MUX_SENS0,
+ S900_MUX_SENS0 };
+/* mfp3_8 */
+static unsigned int uart4_rx_tx_mfp_pads[] = { UART4_RX, UART4_TX };
+static unsigned int uart4_rx_tx_mfp_funcs[] = { S900_MUX_UART4,
+ S900_MUX_I2C4 };
+/* PADDRV group data */
+/* drv0 */
+static unsigned int sgpio3_drv_pads[] = { SGPIO3 };
+static unsigned int sgpio2_drv_pads[] = { SGPIO2 };
+static unsigned int sgpio1_drv_pads[] = { SGPIO1 };
+static unsigned int sgpio0_drv_pads[] = { SGPIO0 };
+static unsigned int rmii_tx_d0_d1_drv_pads[] = { ETH_TXD0, ETH_TXD1 };
+static unsigned int rmii_txen_rxer_drv_pads[] = { ETH_TXEN, ETH_RXER };
+static unsigned int rmii_crs_dv_drv_pads[] = { ETH_CRS_DV };
+static unsigned int rmii_rx_d1_d0_drv_pads[] = { ETH_RXD1, ETH_RXD0 };
+static unsigned int rmii_ref_clk_drv_pads[] = { ETH_REF_CLK };
+static unsigned int rmii_mdc_mdio_drv_pads[] = { ETH_MDC, ETH_MDIO };
+static unsigned int sirq_0_1_drv_pads[] = { SIRQ0, SIRQ1 };
+static unsigned int sirq2_drv_pads[] = { SIRQ2 };
+static unsigned int i2s_d0_d1_drv_pads[] = { I2S_D0, I2S_D1 };
+static unsigned int i2s_lr_m_clk0_drv_pads[] = { I2S_LRCLK0, I2S_MCLK0 };
+static unsigned int i2s_blk1_mclk1_drv_pads[] = { I2S_BCLK0, I2S_BCLK1,
+ I2S_LRCLK1, I2S_MCLK1 };
+static unsigned int pcm1_in_out_drv_pads[] = { PCM1_IN, PCM1_CLK,
+ PCM1_SYNC, PCM1_OUT };
+/* drv1 */
+static unsigned int lvds_oap_oan_drv_pads[] = { LVDS_OAP, LVDS_OAN };
+static unsigned int lvds_oep_odn_drv_pads[] = { LVDS_OEP, LVDS_OEN,
+ LVDS_ODP, LVDS_ODN };
+static unsigned int lvds_ocp_obn_drv_pads[] = { LVDS_OCP, LVDS_OCN,
+ LVDS_OBP, LVDS_OBN };
+static unsigned int lvds_e_drv_pads[] = { LVDS_EEP, LVDS_EEN,
+ LVDS_EDP, LVDS_EDN,
+ LVDS_ECP, LVDS_ECN,
+ LVDS_EBP, LVDS_EBN };
+static unsigned int sd0_d3_d0_drv_pads[] = { SD0_D3, SD0_D2,
+ SD0_D1, SD0_D0 };
+static unsigned int sd1_d3_d0_drv_pads[] = { SD1_D3, SD1_D2,
+ SD1_D1, SD1_D0 };
+static unsigned int sd0_sd1_cmd_clk_drv_pads[] = { SD0_CLK, SD0_CMD,
+ SD1_CLK, SD1_CMD };
+static unsigned int spi0_sclk_mosi_drv_pads[] = { SPI0_SCLK, SPI0_MOSI };
+static unsigned int spi0_ss_miso_drv_pads[] = { SPI0_SS, SPI0_MISO };
+static unsigned int uart0_rx_tx_drv_pads[] = { UART0_RX, UART0_TX };
+static unsigned int uart4_rx_tx_drv_pads[] = { UART4_RX, UART4_TX };
+static unsigned int uart2_drv_pads[] = { UART2_RX, UART2_TX,
+ UART2_RTSB, UART2_CTSB };
+static unsigned int uart3_drv_pads[] = { UART3_RX, UART3_TX,
+ UART3_RTSB, UART3_CTSB };
+/* drv2 */
+static unsigned int i2c0_drv_pads[] = { I2C0_SCLK, I2C0_SDATA };
+static unsigned int i2c1_drv_pads[] = { I2C1_SCLK, I2C1_SDATA };
+static unsigned int i2c2_drv_pads[] = { I2C2_SCLK, I2C2_SDATA };
+static unsigned int sensor0_drv_pads[] = { SENSOR0_PCLK,
+ SENSOR0_CKOUT };
+/* SR group data */
+/* sr0 */
+static unsigned int sgpio3_sr_pads[] = { SGPIO3 };
+static unsigned int sgpio2_sr_pads[] = { SGPIO2 };
+static unsigned int sgpio1_sr_pads[] = { SGPIO1 };
+static unsigned int sgpio0_sr_pads[] = { SGPIO0 };
+static unsigned int rmii_tx_d0_d1_sr_pads[] = { ETH_TXD0, ETH_TXD1 };
+static unsigned int rmii_txen_rxer_sr_pads[] = { ETH_TXEN, ETH_RXER };
+static unsigned int rmii_crs_dv_sr_pads[] = { ETH_CRS_DV };
+static unsigned int rmii_rx_d1_d0_sr_pads[] = { ETH_RXD1, ETH_RXD0 };
+static unsigned int rmii_ref_clk_sr_pads[] = { ETH_REF_CLK };
+static unsigned int rmii_mdc_mdio_sr_pads[] = { ETH_MDC, ETH_MDIO };
+static unsigned int sirq_0_1_sr_pads[] = { SIRQ0, SIRQ1 };
+static unsigned int sirq2_sr_pads[] = { SIRQ2 };
+static unsigned int i2s_do_d1_sr_pads[] = { I2S_D0, I2S_D1 };
+static unsigned int i2s_lr_m_clk0_sr_pads[] = { I2S_LRCLK0, I2S_MCLK0 };
+static unsigned int i2s_bclk0_mclk1_sr_pads[] = { I2S_BCLK0, I2S_BCLK1,
+ I2S_LRCLK1, I2S_MCLK1 };
+static unsigned int pcm1_in_out_sr_pads[] = { PCM1_IN, PCM1_CLK,
+ PCM1_SYNC, PCM1_OUT };
+/* sr1 */
+static unsigned int sd1_d3_d0_sr_pads[] = { SD1_D3, SD1_D2,
+ SD1_D1, SD1_D0 };
+static unsigned int sd0_sd1_clk_cmd_sr_pads[] = { SD0_CLK, SD0_CMD,
+ SD1_CLK, SD1_CMD };
+static unsigned int spi0_sclk_mosi_sr_pads[] = { SPI0_SCLK, SPI0_MOSI };
+static unsigned int spi0_ss_miso_sr_pads[] = { SPI0_SS, SPI0_MISO };
+static unsigned int uart0_rx_tx_sr_pads[] = { UART0_RX, UART0_TX };
+static unsigned int uart4_rx_tx_sr_pads[] = { UART4_RX, UART4_TX };
+static unsigned int uart2_sr_pads[] = { UART2_RX, UART2_TX,
+ UART2_RTSB, UART2_CTSB };
+static unsigned int uart3_sr_pads[] = { UART3_RX, UART3_TX,
+ UART3_RTSB, UART3_CTSB };
+/* sr2 */
+static unsigned int i2c0_sr_pads[] = { I2C0_SCLK, I2C0_SDATA };
+static unsigned int i2c1_sr_pads[] = { I2C1_SCLK, I2C1_SDATA };
+static unsigned int i2c2_sr_pads[] = { I2C2_SCLK, I2C2_SDATA };
+static unsigned int sensor0_sr_pads[] = { SENSOR0_PCLK,
+ SENSOR0_CKOUT };
+
+#define MUX_PG(group_name, reg, shift, width) \
+ { \
+ .name = #group_name, \
+ .pads = group_name##_pads, \
+ .npads = ARRAY_SIZE(group_name##_pads), \
+ .funcs = group_name##_funcs, \
+ .nfuncs = ARRAY_SIZE(group_name##_funcs), \
+ .mfpctl_reg = MFCTL##reg, \
+ .mfpctl_shift = shift, \
+ .mfpctl_width = width, \
+ .drv_reg = -1, \
+ .drv_shift = -1, \
+ .drv_width = -1, \
+ .sr_reg = -1, \
+ .sr_shift = -1, \
+ .sr_width = -1, \
+ }
+
+#define DRV_PG(group_name, reg, shift, width) \
+ { \
+ .name = #group_name, \
+ .pads = group_name##_pads, \
+ .npads = ARRAY_SIZE(group_name##_pads), \
+ .mfpctl_reg = -1, \
+ .mfpctl_shift = -1, \
+ .mfpctl_width = -1, \
+ .drv_reg = PAD_DRV##reg, \
+ .drv_shift = shift, \
+ .drv_width = width, \
+ .sr_reg = -1, \
+ .sr_shift = -1, \
+ .sr_width = -1, \
+ }
+
+#define SR_PG(group_name, reg, shift, width) \
+ { \
+ .name = #group_name, \
+ .pads = group_name##_pads, \
+ .npads = ARRAY_SIZE(group_name##_pads), \
+ .mfpctl_reg = -1, \
+ .mfpctl_shift = -1, \
+ .mfpctl_width = -1, \
+ .drv_reg = -1, \
+ .drv_shift = -1, \
+ .drv_width = -1, \
+ .sr_reg = PAD_SR##reg, \
+ .sr_shift = shift, \
+ .sr_width = width, \
+ }
+
+/* Pinctrl groups */
+static const struct owl_pingroup s900_groups[] = {
+ MUX_PG(lvds_oxx_uart4_mfp, 0, 22, 1),
+ MUX_PG(rmii_mdc_mfp, 0, 20, 2),
+ MUX_PG(rmii_mdio_mfp, 0, 20, 2),
+ MUX_PG(sirq0_mfp, 0, 19, 1),
+ MUX_PG(sirq1_mfp, 0, 19, 1),
+ MUX_PG(rmii_txd0_mfp, 0, 16, 3),
+ MUX_PG(rmii_txd1_mfp, 0, 16, 3),
+ MUX_PG(rmii_txen_mfp, 0, 13, 3),
+ MUX_PG(rmii_rxer_mfp, 0, 13, 3),
+ MUX_PG(rmii_crs_dv_mfp, 0, 11, 2),
+ MUX_PG(rmii_rxd1_mfp, 0, 8, 3),
+ MUX_PG(rmii_rxd0_mfp, 0, 8, 3),
+ MUX_PG(rmii_ref_clk_mfp, 0, 6, 2),
+ MUX_PG(i2s_d0_mfp, 0, 5, 1),
+ MUX_PG(i2s_d1_mfp, 0, 5, 1),
+ MUX_PG(i2s_lr_m_clk0_mfp, 0, 3, 2),
+ MUX_PG(i2s_bclk0_mfp, 0, 2, 1),
+ MUX_PG(i2s_bclk1_mclk1_mfp, 0, 2, 1),
+ MUX_PG(pcm1_in_out_mfp, 0, 0, 2),
+ MUX_PG(pcm1_clk_mfp, 0, 0, 2),
+ MUX_PG(pcm1_sync_mfp, 0, 0, 2),
+ MUX_PG(eram_a5_mfp, 1, 29, 3),
+ MUX_PG(eram_a6_mfp, 1, 29, 3),
+ MUX_PG(eram_a7_mfp, 1, 29, 3),
+ MUX_PG(eram_a8_mfp, 1, 26, 3),
+ MUX_PG(eram_a9_mfp, 1, 26, 3),
+ MUX_PG(eram_a10_mfp, 1, 26, 3),
+ MUX_PG(eram_a11_mfp, 1, 23, 3),
+ MUX_PG(lvds_oep_odn_mfp, 1, 22, 1),
+ MUX_PG(lvds_ocp_obn_mfp, 1, 22, 1),
+ MUX_PG(lvds_oap_oan_mfp, 1, 22, 1),
+ MUX_PG(lvds_e_mfp, 1, 21, 1),
+ MUX_PG(spi0_sclk_mosi_mfp, 1, 4, 2),
+ MUX_PG(spi0_ss_mfp, 1, 1, 3),
+ MUX_PG(spi0_miso_mfp, 1, 1, 3),
+ MUX_PG(uart2_rtsb_mfp, 2, 23, 1),
+ MUX_PG(uart2_ctsb_mfp, 2, 22, 1),
+ MUX_PG(uart3_rtsb_mfp, 2, 21, 1),
+ MUX_PG(uart3_ctsb_mfp, 2, 20, 1),
+ MUX_PG(sd0_d0_mfp, 2, 17, 3),
+ MUX_PG(sd0_d1_mfp, 2, 14, 3),
+ MUX_PG(sd0_d2_d3_mfp, 2, 11, 3),
+ MUX_PG(sd1_d0_d3_mfp, 2, 9, 2),
+ MUX_PG(sd0_cmd_mfp, 2, 7, 2),
+ MUX_PG(sd0_clk_mfp, 2, 5, 2),
+ MUX_PG(sd1_cmd_clk_mfp, 2, 3, 2),
+ MUX_PG(uart0_rx_mfp, 2, 0, 3),
+ MUX_PG(nand0_d0_ceb3_mfp, 3, 27, 1),
+ MUX_PG(uart0_tx_mfp, 3, 19, 3),
+ MUX_PG(i2c0_mfp, 3, 16, 3),
+ MUX_PG(csi0_cn_cp_mfp, 3, 15, 1),
+ MUX_PG(csi0_dn0_dp3_mfp, 3, 14, 1),
+ MUX_PG(csi1_dn0_cp_mfp, 3, 13, 1),
+ MUX_PG(dsi_dp3_dn1_mfp, 3, 12, 1),
+ MUX_PG(dsi_cp_dn0_mfp, 3, 12, 1),
+ MUX_PG(dsi_dp2_dn2_mfp, 3, 12, 1),
+ MUX_PG(nand1_d0_ceb1_mfp, 3, 11, 1),
+ MUX_PG(nand1_ceb3_mfp, 3, 10, 1),
+ MUX_PG(nand1_ceb0_mfp, 3, 10, 1),
+ MUX_PG(csi1_dn0_dp0_mfp, 3, 9, 1),
+ MUX_PG(uart4_rx_tx_mfp, 3, 8, 1),
+
+ DRV_PG(sgpio3_drv, 0, 30, 2),
+ DRV_PG(sgpio2_drv, 0, 28, 2),
+ DRV_PG(sgpio1_drv, 0, 26, 2),
+ DRV_PG(sgpio0_drv, 0, 24, 2),
+ DRV_PG(rmii_tx_d0_d1_drv, 0, 22, 2),
+ DRV_PG(rmii_txen_rxer_drv, 0, 20, 2),
+ DRV_PG(rmii_crs_dv_drv, 0, 18, 2),
+ DRV_PG(rmii_rx_d1_d0_drv, 0, 16, 2),
+ DRV_PG(rmii_ref_clk_drv, 0, 14, 2),
+ DRV_PG(rmii_mdc_mdio_drv, 0, 12, 2),
+ DRV_PG(sirq_0_1_drv, 0, 10, 2),
+ DRV_PG(sirq2_drv, 0, 8, 2),
+ DRV_PG(i2s_d0_d1_drv, 0, 6, 2),
+ DRV_PG(i2s_lr_m_clk0_drv, 0, 4, 2),
+ DRV_PG(i2s_blk1_mclk1_drv, 0, 2, 2),
+ DRV_PG(pcm1_in_out_drv, 0, 0, 2),
+ DRV_PG(lvds_oap_oan_drv, 1, 28, 2),
+ DRV_PG(lvds_oep_odn_drv, 1, 26, 2),
+ DRV_PG(lvds_ocp_obn_drv, 1, 24, 2),
+ DRV_PG(lvds_e_drv, 1, 22, 2),
+ DRV_PG(sd0_d3_d0_drv, 1, 20, 2),
+ DRV_PG(sd1_d3_d0_drv, 1, 18, 2),
+ DRV_PG(sd0_sd1_cmd_clk_drv, 1, 16, 2),
+ DRV_PG(spi0_sclk_mosi_drv, 1, 14, 2),
+ DRV_PG(spi0_ss_miso_drv, 1, 12, 2),
+ DRV_PG(uart0_rx_tx_drv, 1, 10, 2),
+ DRV_PG(uart4_rx_tx_drv, 1, 8, 2),
+ DRV_PG(uart2_drv, 1, 6, 2),
+ DRV_PG(uart3_drv, 1, 4, 2),
+ DRV_PG(i2c0_drv, 2, 30, 2),
+ DRV_PG(i2c1_drv, 2, 28, 2),
+ DRV_PG(i2c2_drv, 2, 26, 2),
+ DRV_PG(sensor0_drv, 2, 20, 2),
+
+ SR_PG(sgpio3_sr, 0, 15, 1),
+ SR_PG(sgpio2_sr, 0, 14, 1),
+ SR_PG(sgpio1_sr, 0, 13, 1),
+ SR_PG(sgpio0_sr, 0, 12, 1),
+ SR_PG(rmii_tx_d0_d1_sr, 0, 11, 1),
+ SR_PG(rmii_txen_rxer_sr, 0, 10, 1),
+ SR_PG(rmii_crs_dv_sr, 0, 9, 1),
+ SR_PG(rmii_rx_d1_d0_sr, 0, 8, 1),
+ SR_PG(rmii_ref_clk_sr, 0, 7, 1),
+ SR_PG(rmii_mdc_mdio_sr, 0, 6, 1),
+ SR_PG(sirq_0_1_sr, 0, 5, 1),
+ SR_PG(sirq2_sr, 0, 4, 1),
+ SR_PG(i2s_do_d1_sr, 0, 3, 1),
+ SR_PG(i2s_lr_m_clk0_sr, 0, 2, 1),
+ SR_PG(i2s_bclk0_mclk1_sr, 0, 1, 1),
+ SR_PG(pcm1_in_out_sr, 0, 0, 1),
+ SR_PG(sd1_d3_d0_sr, 1, 25, 1),
+ SR_PG(sd0_sd1_clk_cmd_sr, 1, 24, 1),
+ SR_PG(spi0_sclk_mosi_sr, 1, 23, 1),
+ SR_PG(spi0_ss_miso_sr, 1, 22, 1),
+ SR_PG(uart0_rx_tx_sr, 1, 21, 1),
+ SR_PG(uart4_rx_tx_sr, 1, 20, 1),
+ SR_PG(uart2_sr, 1, 19, 1),
+ SR_PG(uart3_sr, 1, 18, 1),
+ SR_PG(i2c0_sr, 2, 31, 1),
+ SR_PG(i2c1_sr, 2, 30, 1),
+ SR_PG(i2c2_sr, 2, 29, 1),
+ SR_PG(sensor0_sr, 2, 25, 1)
+};
+
+static const char * const eram_groups[] = {
+ "lvds_oxx_uart4_mfp",
+ "eram_a5_mfp",
+ "eram_a6_mfp",
+ "eram_a7_mfp",
+ "eram_a8_mfp",
+ "eram_a9_mfp",
+ "eram_a10_mfp",
+ "eram_a11_mfp",
+ "lvds_oap_oan_mfp",
+ "lvds_e_mfp",
+ "spi0_sclk_mosi_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd1_d0_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
+ "sd1_cmd_clk_mfp",
+};
+
+static const char * const eth_rmii_groups[] = {
+ "rmii_mdc_mfp",
+ "rmii_mdio_mfp",
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "rmii_txen_mfp",
+ "rmii_rxer_mfp",
+ "rmii_crs_dv_mfp",
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+ "rmii_ref_clk_mfp",
+ "eth_smi_dummy",
+};
+
+static const char * const eth_smii_groups[] = {
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "rmii_crs_dv_mfp",
+ "eth_smi_dummy",
+};
+
+static const char * const spi0_groups[] = {
+ "spi0_sclk_mosi_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+ "spi0_sclk_mosi_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+};
+
+static const char * const spi1_groups[] = {
+ "pcm1_in_out_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "i2c0_mfp",
+};
+
+static const char * const spi2_groups[] = {
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "rmii_crs_dv_mfp",
+ "rmii_ref_clk_mfp",
+};
+
+static const char * const spi3_groups[] = {
+ "rmii_txen_mfp",
+ "rmii_rxer_mfp",
+};
+
+static const char * const sens0_groups[] = {
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "rmii_txen_mfp",
+ "rmii_rxer_mfp",
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+ "eram_a5_mfp",
+ "eram_a6_mfp",
+ "eram_a7_mfp",
+ "eram_a8_mfp",
+ "eram_a9_mfp",
+ "csi0_cn_cp_mfp",
+ "csi0_dn0_dp3_mfp",
+ "csi1_dn0_cp_mfp",
+ "csi1_dn0_dp0_mfp",
+};
+
+static const char * const uart0_groups[] = {
+ "uart2_rtsb_mfp",
+ "uart2_ctsb_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+};
+
+static const char * const uart1_groups[] = {
+ "sd0_d2_d3_mfp",
+ "i2c0_mfp",
+};
+
+static const char * const uart2_groups[] = {
+ "rmii_mdc_mfp",
+ "rmii_mdio_mfp",
+ "rmii_txen_mfp",
+ "rmii_rxer_mfp",
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+ "lvds_oep_odn_mfp",
+ "uart2_rtsb_mfp",
+ "uart2_ctsb_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp_pads",
+ "i2c0_mfp_pads",
+ "dsi_dp3_dn1_mfp",
+ "uart2_dummy"
+};
+
+static const char * const uart3_groups[] = {
+ "uart3_rtsb_mfp",
+ "uart3_ctsb_mfp",
+ "uart3_dummy"
+};
+
+static const char * const uart4_groups[] = {
+ "lvds_oxx_uart4_mfp",
+ "rmii_crs_dv_mfp",
+ "rmii_ref_clk_mfp",
+ "pcm1_in_out_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "eram_a5_mfp",
+ "eram_a6_mfp",
+ "dsi_dp2_dn2_mfp",
+ "uart4_rx_tx_mfp_pads",
+ "uart4_dummy"
+};
+
+static const char * const uart5_groups[] = {
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+ "eram_a9_mfp",
+ "eram_a11_mfp",
+ "uart3_rtsb_mfp",
+ "uart3_ctsb_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+};
+
+static const char * const uart6_groups[] = {
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+};
+
+static const char * const i2s0_groups[] = {
+ "i2s_d0_mfp",
+ "i2s_lr_m_clk0_mfp",
+ "i2s_bclk0_mfp",
+ "i2s0_dummy",
+};
+
+static const char * const i2s1_groups[] = {
+ "i2s_d1_mfp",
+ "i2s_bclk1_mclk1_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "i2s1_dummy",
+};
+
+static const char * const pcm0_groups[] = {
+ "i2s_d0_mfp",
+ "i2s_d1_mfp",
+ "i2s_lr_m_clk0_mfp",
+ "i2s_bclk0_mfp",
+ "i2s_bclk1_mclk1_mfp",
+ "spi0_sclk_mosi_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+};
+
+static const char * const pcm1_groups[] = {
+ "i2s_lr_m_clk0_mfp",
+ "pcm1_in_out_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "lvds_oep_odn_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "dsi_cp_dn0_mfp",
+ "pcm1_dummy",
+};
+
+static const char * const jtag_groups[] = {
+ "eram_a5_mfp",
+ "eram_a6_mfp",
+ "eram_a7_mfp",
+ "eram_a8_mfp",
+ "eram_a10_mfp",
+ "eram_a10_mfp",
+ "sd0_d2_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
+};
+
+static const char * const pwm0_groups[] = {
+ "sirq0_mfp",
+ "rmii_txd0_mfp",
+ "rmii_rxd1_mfp",
+ "eram_a5_mfp",
+ "nand1_ceb3_mfp",
+};
+
+static const char * const pwm1_groups[] = {
+ "sirq1_mfp",
+ "rmii_txd1_mfp",
+ "rmii_rxd0_mfp",
+ "eram_a6_mfp",
+ "eram_a8_mfp",
+ "nand1_ceb0_mfp",
+};
+
+static const char * const pwm2_groups[] = {
+ "rmii_mdc_mfp",
+ "rmii_txen_mfp",
+ "eram_a9_mfp",
+ "eram_a11_mfp",
+};
+
+static const char * const pwm3_groups[] = {
+ "rmii_mdio_mfp",
+ "rmii_rxer_mfp",
+ "eram_a10_mfp",
+};
+
+static const char * const pwm4_groups[] = {
+ "pcm1_clk_mfp",
+ "spi0_ss_mfp",
+};
+
+static const char * const pwm5_groups[] = {
+ "pcm1_sync_mfp",
+ "spi0_miso_mfp",
+};
+
+static const char * const sd0_groups[] = {
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
+};
+
+static const char * const sd1_groups[] = {
+ "sd1_d0_d3_mfp",
+ "sd1_cmd_clk_mfp",
+ "sd1_dummy",
+};
+
+static const char * const sd2_groups[] = {
+ "nand0_d0_ceb3_mfp",
+};
+
+static const char * const sd3_groups[] = {
+ "nand1_d0_ceb1_mfp",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0_mfp",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c0_mfp",
+ "i2c1_dummy"
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_dummy"
+};
+
+static const char * const i2c3_groups[] = {
+ "pcm1_in_out_mfp",
+ "spi0_sclk_mosi_mfp",
+};
+
+static const char * const i2c4_groups[] = {
+ "uart4_rx_tx_mfp",
+};
+
+static const char * const i2c5_groups[] = {
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+};
+
+
+static const char * const lvds_groups[] = {
+ "lvds_oep_odn_mfp",
+ "lvds_ocp_obn_mfp",
+ "lvds_oap_oan_mfp",
+ "lvds_e_mfp",
+};
+
+static const char * const usb20_groups[] = {
+ "eram_a9_mfp",
+};
+
+static const char * const usb30_groups[] = {
+ "eram_a10_mfp",
+};
+
+static const char * const gpu_groups[] = {
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
+};
+
+static const char * const mipi_csi0_groups[] = {
+ "csi0_dn0_dp3_mfp",
+};
+
+static const char * const mipi_csi1_groups[] = {
+ "csi1_dn0_cp_mfp",
+};
+
+static const char * const mipi_dsi_groups[] = {
+ "dsi_dp3_dn1_mfp",
+ "dsi_cp_dn0_mfp",
+ "dsi_dp2_dn2_mfp",
+ "mipi_dsi_dummy",
+};
+
+static const char * const nand0_groups[] = {
+ "nand0_d0_ceb3_mfp",
+ "nand0_dummy",
+};
+
+static const char * const nand1_groups[] = {
+ "nand1_d0_ceb1_mfp",
+ "nand1_ceb3_mfp",
+ "nand1_ceb0_mfp",
+ "nand1_dummy",
+};
+
+static const char * const spdif_groups[] = {
+ "uart0_tx_mfp",
+};
+
+static const char * const sirq0_groups[] = {
+ "sirq0_mfp",
+ "sirq0_dummy",
+};
+
+static const char * const sirq1_groups[] = {
+ "sirq1_mfp",
+ "sirq1_dummy",
+};
+
+static const char * const sirq2_groups[] = {
+ "sirq2_dummy",
+};
+
+#define FUNCTION(fname) \
+ { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+static const struct owl_pinmux_func s900_functions[] = {
+ [S900_MUX_ERAM] = FUNCTION(eram),
+ [S900_MUX_ETH_RMII] = FUNCTION(eth_rmii),
+ [S900_MUX_ETH_SMII] = FUNCTION(eth_smii),
+ [S900_MUX_SPI0] = FUNCTION(spi0),
+ [S900_MUX_SPI1] = FUNCTION(spi1),
+ [S900_MUX_SPI2] = FUNCTION(spi2),
+ [S900_MUX_SPI3] = FUNCTION(spi3),
+ [S900_MUX_SENS0] = FUNCTION(sens0),
+ [S900_MUX_UART0] = FUNCTION(uart0),
+ [S900_MUX_UART1] = FUNCTION(uart1),
+ [S900_MUX_UART2] = FUNCTION(uart2),
+ [S900_MUX_UART3] = FUNCTION(uart3),
+ [S900_MUX_UART4] = FUNCTION(uart4),
+ [S900_MUX_UART5] = FUNCTION(uart5),
+ [S900_MUX_UART6] = FUNCTION(uart6),
+ [S900_MUX_I2S0] = FUNCTION(i2s0),
+ [S900_MUX_I2S1] = FUNCTION(i2s1),
+ [S900_MUX_PCM0] = FUNCTION(pcm0),
+ [S900_MUX_PCM1] = FUNCTION(pcm1),
+ [S900_MUX_JTAG] = FUNCTION(jtag),
+ [S900_MUX_PWM0] = FUNCTION(pwm0),
+ [S900_MUX_PWM1] = FUNCTION(pwm1),
+ [S900_MUX_PWM2] = FUNCTION(pwm2),
+ [S900_MUX_PWM3] = FUNCTION(pwm3),
+ [S900_MUX_PWM4] = FUNCTION(pwm4),
+ [S900_MUX_PWM5] = FUNCTION(pwm5),
+ [S900_MUX_SD0] = FUNCTION(sd0),
+ [S900_MUX_SD1] = FUNCTION(sd1),
+ [S900_MUX_SD2] = FUNCTION(sd2),
+ [S900_MUX_SD3] = FUNCTION(sd3),
+ [S900_MUX_I2C0] = FUNCTION(i2c0),
+ [S900_MUX_I2C1] = FUNCTION(i2c1),
+ [S900_MUX_I2C2] = FUNCTION(i2c2),
+ [S900_MUX_I2C3] = FUNCTION(i2c3),
+ [S900_MUX_I2C4] = FUNCTION(i2c4),
+ [S900_MUX_I2C5] = FUNCTION(i2c5),
+ [S900_MUX_LVDS] = FUNCTION(lvds),
+ [S900_MUX_USB30] = FUNCTION(usb30),
+ [S900_MUX_USB20] = FUNCTION(usb20),
+ [S900_MUX_GPU] = FUNCTION(gpu),
+ [S900_MUX_MIPI_CSI0] = FUNCTION(mipi_csi0),
+ [S900_MUX_MIPI_CSI1] = FUNCTION(mipi_csi1),
+ [S900_MUX_MIPI_DSI] = FUNCTION(mipi_dsi),
+ [S900_MUX_NAND0] = FUNCTION(nand0),
+ [S900_MUX_NAND1] = FUNCTION(nand1),
+ [S900_MUX_SPDIF] = FUNCTION(spdif),
+ [S900_MUX_SIRQ0] = FUNCTION(sirq0),
+ [S900_MUX_SIRQ1] = FUNCTION(sirq1),
+ [S900_MUX_SIRQ2] = FUNCTION(sirq2)
+};
+/* PAD PULL UP/DOWN CONFIGURES */
+#define PULLCTL_CONF(pull_reg, pull_sft, pull_wdt) \
+ { \
+ .reg = PAD_PULLCTL##pull_reg, \
+ .shift = pull_sft, \
+ .width = pull_wdt, \
+ }
+
+#define PAD_PULLCTL_CONF(pad_name, pull_reg, pull_sft, pull_wdt) \
+ struct owl_pullctl pad_name##_pullctl_conf \
+ = PULLCTL_CONF(pull_reg, pull_sft, pull_wdt)
+
+#define ST_CONF(st_reg, st_sft, st_wdt) \
+ { \
+ .reg = PAD_ST##st_reg, \
+ .shift = st_sft, \
+ .width = st_wdt, \
+ }
+
+#define PAD_ST_CONF(pad_name, st_reg, st_sft, st_wdt) \
+ struct owl_st pad_name##_st_conf \
+ = ST_CONF(st_reg, st_sft, st_wdt)
+
+/* PAD_PULLCTL0 */
+static PAD_PULLCTL_CONF(ETH_RXER, 0, 18, 2);
+static PAD_PULLCTL_CONF(SIRQ0, 0, 16, 2);
+static PAD_PULLCTL_CONF(SIRQ1, 0, 14, 2);
+static PAD_PULLCTL_CONF(SIRQ2, 0, 12, 2);
+static PAD_PULLCTL_CONF(I2C0_SDATA, 0, 10, 2);
+static PAD_PULLCTL_CONF(I2C0_SCLK, 0, 8, 2);
+static PAD_PULLCTL_CONF(ERAM_A5, 0, 6, 2);
+static PAD_PULLCTL_CONF(ERAM_A6, 0, 4, 2);
+static PAD_PULLCTL_CONF(ERAM_A7, 0, 2, 2);
+static PAD_PULLCTL_CONF(ERAM_A10, 0, 0, 2);
+
+/* PAD_PULLCTL1 */
+static PAD_PULLCTL_CONF(PCM1_IN, 1, 30, 2);
+static PAD_PULLCTL_CONF(PCM1_OUT, 1, 28, 2);
+static PAD_PULLCTL_CONF(SD0_D0, 1, 26, 2);
+static PAD_PULLCTL_CONF(SD0_D1, 1, 24, 2);
+static PAD_PULLCTL_CONF(SD0_D2, 1, 22, 2);
+static PAD_PULLCTL_CONF(SD0_D3, 1, 20, 2);
+static PAD_PULLCTL_CONF(SD0_CMD, 1, 18, 2);
+static PAD_PULLCTL_CONF(SD0_CLK, 1, 16, 2);
+static PAD_PULLCTL_CONF(SD1_CMD, 1, 14, 2);
+static PAD_PULLCTL_CONF(SD1_D0, 1, 12, 2);
+static PAD_PULLCTL_CONF(SD1_D1, 1, 10, 2);
+static PAD_PULLCTL_CONF(SD1_D2, 1, 8, 2);
+static PAD_PULLCTL_CONF(SD1_D3, 1, 6, 2);
+static PAD_PULLCTL_CONF(UART0_RX, 1, 4, 2);
+static PAD_PULLCTL_CONF(UART0_TX, 1, 2, 2);
+
+/* PAD_PULLCTL2 */
+static PAD_PULLCTL_CONF(I2C2_SDATA, 2, 26, 2);
+static PAD_PULLCTL_CONF(I2C2_SCLK, 2, 24, 2);
+static PAD_PULLCTL_CONF(SPI0_SCLK, 2, 22, 2);
+static PAD_PULLCTL_CONF(SPI0_MOSI, 2, 20, 2);
+static PAD_PULLCTL_CONF(I2C1_SDATA, 2, 18, 2);
+static PAD_PULLCTL_CONF(I2C1_SCLK, 2, 16, 2);
+static PAD_PULLCTL_CONF(NAND0_D0, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D1, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D2, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D3, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D4, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D5, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D6, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D7, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_DQSN, 2, 14, 1);
+static PAD_PULLCTL_CONF(NAND0_DQS, 2, 13, 1);
+static PAD_PULLCTL_CONF(NAND1_D0, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D1, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D2, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D3, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D4, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D5, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D6, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D7, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_DQSN, 2, 11, 1);
+static PAD_PULLCTL_CONF(NAND1_DQS, 2, 10, 1);
+static PAD_PULLCTL_CONF(SGPIO2, 2, 8, 2);
+static PAD_PULLCTL_CONF(SGPIO3, 2, 6, 2);
+static PAD_PULLCTL_CONF(UART4_RX, 2, 4, 2);
+static PAD_PULLCTL_CONF(UART4_TX, 2, 2, 2);
+
+/* PAD_ST0 */
+static PAD_ST_CONF(I2C0_SDATA, 0, 30, 1);
+static PAD_ST_CONF(UART0_RX, 0, 29, 1);
+static PAD_ST_CONF(ETH_MDC, 0, 28, 1);
+static PAD_ST_CONF(I2S_MCLK1, 0, 23, 1);
+static PAD_ST_CONF(ETH_REF_CLK, 0, 22, 1);
+static PAD_ST_CONF(ETH_TXEN, 0, 21, 1);
+static PAD_ST_CONF(ETH_TXD0, 0, 20, 1);
+static PAD_ST_CONF(I2S_LRCLK1, 0, 19, 1);
+static PAD_ST_CONF(SGPIO2, 0, 18, 1);
+static PAD_ST_CONF(SGPIO3, 0, 17, 1);
+static PAD_ST_CONF(UART4_TX, 0, 16, 1);
+static PAD_ST_CONF(I2S_D1, 0, 15, 1);
+static PAD_ST_CONF(UART0_TX, 0, 14, 1);
+static PAD_ST_CONF(SPI0_SCLK, 0, 13, 1);
+static PAD_ST_CONF(SD0_CLK, 0, 12, 1);
+static PAD_ST_CONF(ERAM_A5, 0, 11, 1);
+static PAD_ST_CONF(I2C0_SCLK, 0, 7, 1);
+static PAD_ST_CONF(ERAM_A9, 0, 6, 1);
+static PAD_ST_CONF(LVDS_OEP, 0, 5, 1);
+static PAD_ST_CONF(LVDS_ODN, 0, 4, 1);
+static PAD_ST_CONF(LVDS_OAP, 0, 3, 1);
+static PAD_ST_CONF(I2S_BCLK1, 0, 2, 1);
+
+/* PAD_ST1 */
+static PAD_ST_CONF(I2S_LRCLK0, 1, 29, 1);
+static PAD_ST_CONF(UART4_RX, 1, 28, 1);
+static PAD_ST_CONF(UART3_CTSB, 1, 27, 1);
+static PAD_ST_CONF(UART3_RTSB, 1, 26, 1);
+static PAD_ST_CONF(UART3_RX, 1, 25, 1);
+static PAD_ST_CONF(UART2_RTSB, 1, 24, 1);
+static PAD_ST_CONF(UART2_CTSB, 1, 23, 1);
+static PAD_ST_CONF(UART2_RX, 1, 22, 1);
+static PAD_ST_CONF(ETH_RXD0, 1, 21, 1);
+static PAD_ST_CONF(ETH_RXD1, 1, 20, 1);
+static PAD_ST_CONF(ETH_CRS_DV, 1, 19, 1);
+static PAD_ST_CONF(ETH_RXER, 1, 18, 1);
+static PAD_ST_CONF(ETH_TXD1, 1, 17, 1);
+static PAD_ST_CONF(LVDS_OCP, 1, 16, 1);
+static PAD_ST_CONF(LVDS_OBP, 1, 15, 1);
+static PAD_ST_CONF(LVDS_OBN, 1, 14, 1);
+static PAD_ST_CONF(PCM1_OUT, 1, 12, 1);
+static PAD_ST_CONF(PCM1_CLK, 1, 11, 1);
+static PAD_ST_CONF(PCM1_IN, 1, 10, 1);
+static PAD_ST_CONF(PCM1_SYNC, 1, 9, 1);
+static PAD_ST_CONF(I2C1_SCLK, 1, 8, 1);
+static PAD_ST_CONF(I2C1_SDATA, 1, 7, 1);
+static PAD_ST_CONF(I2C2_SCLK, 1, 6, 1);
+static PAD_ST_CONF(I2C2_SDATA, 1, 5, 1);
+static PAD_ST_CONF(SPI0_MOSI, 1, 4, 1);
+static PAD_ST_CONF(SPI0_MISO, 1, 3, 1);
+static PAD_ST_CONF(SPI0_SS, 1, 2, 1);
+static PAD_ST_CONF(I2S_BCLK0, 1, 1, 1);
+static PAD_ST_CONF(I2S_MCLK0, 1, 0, 1);
+
+#define PAD_INFO(name) \
+ { \
+ .pad = name, \
+ .pullctl = NULL, \
+ .st = NULL, \
+ }
+
+#define PAD_INFO_ST(name) \
+ { \
+ .pad = name, \
+ .pullctl = NULL, \
+ .st = &name##_st_conf, \
+ }
+
+#define PAD_INFO_PULLCTL(name) \
+ { \
+ .pad = name, \
+ .pullctl = &name##_pullctl_conf, \
+ .st = NULL, \
+ }
+
+#define PAD_INFO_PULLCTL_ST(name) \
+ { \
+ .pad = name, \
+ .pullctl = &name##_pullctl_conf, \
+ .st = &name##_st_conf, \
+ }
+
+/* Pad info table */
+static struct owl_padinfo s900_padinfo[NUM_PADS] = {
+ [ETH_TXD0] = PAD_INFO_ST(ETH_TXD0),
+ [ETH_TXD1] = PAD_INFO_ST(ETH_TXD1),
+ [ETH_TXEN] = PAD_INFO_ST(ETH_TXEN),
+ [ETH_RXER] = PAD_INFO_PULLCTL_ST(ETH_RXER),
+ [ETH_CRS_DV] = PAD_INFO_ST(ETH_CRS_DV),
+ [ETH_RXD1] = PAD_INFO_ST(ETH_RXD1),
+ [ETH_RXD0] = PAD_INFO_ST(ETH_RXD0),
+ [ETH_REF_CLK] = PAD_INFO_ST(ETH_REF_CLK),
+ [ETH_MDC] = PAD_INFO_ST(ETH_MDC),
+ [ETH_MDIO] = PAD_INFO(ETH_MDIO),
+ [SIRQ0] = PAD_INFO_PULLCTL(SIRQ0),
+ [SIRQ1] = PAD_INFO_PULLCTL(SIRQ1),
+ [SIRQ2] = PAD_INFO_PULLCTL(SIRQ2),
+ [I2S_D0] = PAD_INFO(I2S_D0),
+ [I2S_BCLK0] = PAD_INFO_ST(I2S_BCLK0),
+ [I2S_LRCLK0] = PAD_INFO_ST(I2S_LRCLK0),
+ [I2S_MCLK0] = PAD_INFO_ST(I2S_MCLK0),
+ [I2S_D1] = PAD_INFO_ST(I2S_D1),
+ [I2S_BCLK1] = PAD_INFO_ST(I2S_BCLK1),
+ [I2S_LRCLK1] = PAD_INFO_ST(I2S_LRCLK1),
+ [I2S_MCLK1] = PAD_INFO_ST(I2S_MCLK1),
+ [PCM1_IN] = PAD_INFO_PULLCTL_ST(PCM1_IN),
+ [PCM1_CLK] = PAD_INFO_ST(PCM1_CLK),
+ [PCM1_SYNC] = PAD_INFO_ST(PCM1_SYNC),
+ [PCM1_OUT] = PAD_INFO_PULLCTL_ST(PCM1_OUT),
+ [ERAM_A5] = PAD_INFO_PULLCTL_ST(ERAM_A5),
+ [ERAM_A6] = PAD_INFO_PULLCTL(ERAM_A6),
+ [ERAM_A7] = PAD_INFO_PULLCTL(ERAM_A7),
+ [ERAM_A8] = PAD_INFO(ERAM_A8),
+ [ERAM_A9] = PAD_INFO_ST(ERAM_A9),
+ [ERAM_A10] = PAD_INFO_PULLCTL(ERAM_A10),
+ [ERAM_A11] = PAD_INFO(ERAM_A11),
+ [LVDS_OEP] = PAD_INFO_ST(LVDS_OEP),
+ [LVDS_OEN] = PAD_INFO(LVDS_OEN),
+ [LVDS_ODP] = PAD_INFO(LVDS_ODP),
+ [LVDS_ODN] = PAD_INFO_ST(LVDS_ODN),
+ [LVDS_OCP] = PAD_INFO_ST(LVDS_OCP),
+ [LVDS_OCN] = PAD_INFO(LVDS_OCN),
+ [LVDS_OBP] = PAD_INFO_ST(LVDS_OBP),
+ [LVDS_OBN] = PAD_INFO_ST(LVDS_OBN),
+ [LVDS_OAP] = PAD_INFO_ST(LVDS_OAP),
+ [LVDS_OAN] = PAD_INFO(LVDS_OAN),
+ [LVDS_EEP] = PAD_INFO(LVDS_EEP),
+ [LVDS_EEN] = PAD_INFO(LVDS_EEN),
+ [LVDS_EDP] = PAD_INFO(LVDS_EDP),
+ [LVDS_EDN] = PAD_INFO(LVDS_EDN),
+ [LVDS_ECP] = PAD_INFO(LVDS_ECP),
+ [LVDS_ECN] = PAD_INFO(LVDS_ECN),
+ [LVDS_EBP] = PAD_INFO(LVDS_EBP),
+ [LVDS_EBN] = PAD_INFO(LVDS_EBN),
+ [LVDS_EAP] = PAD_INFO(LVDS_EAP),
+ [LVDS_EAN] = PAD_INFO(LVDS_EAN),
+ [SD0_D0] = PAD_INFO_PULLCTL(SD0_D0),
+ [SD0_D1] = PAD_INFO_PULLCTL(SD0_D1),
+ [SD0_D2] = PAD_INFO_PULLCTL(SD0_D2),
+ [SD0_D3] = PAD_INFO_PULLCTL(SD0_D3),
+ [SD1_D0] = PAD_INFO_PULLCTL(SD1_D0),
+ [SD1_D1] = PAD_INFO_PULLCTL(SD1_D1),
+ [SD1_D2] = PAD_INFO_PULLCTL(SD1_D2),
+ [SD1_D3] = PAD_INFO_PULLCTL(SD1_D3),
+ [SD0_CMD] = PAD_INFO_PULLCTL(SD0_CMD),
+ [SD0_CLK] = PAD_INFO_PULLCTL_ST(SD0_CLK),
+ [SD1_CMD] = PAD_INFO_PULLCTL(SD1_CMD),
+ [SD1_CLK] = PAD_INFO(SD1_CLK),
+ [SPI0_SCLK] = PAD_INFO_PULLCTL_ST(SPI0_SCLK),
+ [SPI0_SS] = PAD_INFO_ST(SPI0_SS),
+ [SPI0_MISO] = PAD_INFO_ST(SPI0_MISO),
+ [SPI0_MOSI] = PAD_INFO_PULLCTL_ST(SPI0_MOSI),
+ [UART0_RX] = PAD_INFO_PULLCTL_ST(UART0_RX),
+ [UART0_TX] = PAD_INFO_PULLCTL_ST(UART0_TX),
+ [UART2_RX] = PAD_INFO_ST(UART2_RX),
+ [UART2_TX] = PAD_INFO(UART2_TX),
+ [UART2_RTSB] = PAD_INFO_ST(UART2_RTSB),
+ [UART2_CTSB] = PAD_INFO_ST(UART2_CTSB),
+ [UART3_RX] = PAD_INFO_ST(UART3_RX),
+ [UART3_TX] = PAD_INFO(UART3_TX),
+ [UART3_RTSB] = PAD_INFO_ST(UART3_RTSB),
+ [UART3_CTSB] = PAD_INFO_ST(UART3_CTSB),
+ [UART4_RX] = PAD_INFO_PULLCTL_ST(UART4_RX),
+ [UART4_TX] = PAD_INFO_PULLCTL_ST(UART4_TX),
+ [I2C0_SCLK] = PAD_INFO_PULLCTL_ST(I2C0_SCLK),
+ [I2C0_SDATA] = PAD_INFO_PULLCTL_ST(I2C0_SDATA),
+ [I2C1_SCLK] = PAD_INFO_PULLCTL_ST(I2C1_SCLK),
+ [I2C1_SDATA] = PAD_INFO_PULLCTL_ST(I2C1_SDATA),
+ [I2C2_SCLK] = PAD_INFO_PULLCTL_ST(I2C2_SCLK),
+ [I2C2_SDATA] = PAD_INFO_PULLCTL_ST(I2C2_SDATA),
+ [CSI0_DN0] = PAD_INFO(CSI0_DN0),
+ [CSI0_DP0] = PAD_INFO(CSI0_DP0),
+ [CSI0_DN1] = PAD_INFO(CSI0_DN1),
+ [CSI0_DP1] = PAD_INFO(CSI0_DP1),
+ [CSI0_CN] = PAD_INFO(CSI0_CN),
+ [CSI0_CP] = PAD_INFO(CSI0_CP),
+ [CSI0_DN2] = PAD_INFO(CSI0_DN2),
+ [CSI0_DP2] = PAD_INFO(CSI0_DP2),
+ [CSI0_DN3] = PAD_INFO(CSI0_DN3),
+ [CSI0_DP3] = PAD_INFO(CSI0_DP3),
+ [DSI_DP3] = PAD_INFO(DSI_DP3),
+ [DSI_DN3] = PAD_INFO(DSI_DN3),
+ [DSI_DP1] = PAD_INFO(DSI_DP1),
+ [DSI_DN1] = PAD_INFO(DSI_DN1),
+ [DSI_CP] = PAD_INFO(DSI_CP),
+ [DSI_CN] = PAD_INFO(DSI_CN),
+ [DSI_DP0] = PAD_INFO(DSI_DP0),
+ [DSI_DN0] = PAD_INFO(DSI_DN0),
+ [DSI_DP2] = PAD_INFO(DSI_DP2),
+ [DSI_DN2] = PAD_INFO(DSI_DN2),
+ [SENSOR0_PCLK] = PAD_INFO(SENSOR0_PCLK),
+ [CSI1_DN0] = PAD_INFO(CSI1_DN0),
+ [CSI1_DP0] = PAD_INFO(CSI1_DP0),
+ [CSI1_DN1] = PAD_INFO(CSI1_DN1),
+ [CSI1_DP1] = PAD_INFO(CSI1_DP1),
+ [CSI1_CN] = PAD_INFO(CSI1_CN),
+ [CSI1_CP] = PAD_INFO(CSI1_CP),
+ [SENSOR0_CKOUT] = PAD_INFO(SENSOR0_CKOUT),
+ [NAND0_D0] = PAD_INFO_PULLCTL(NAND0_D0),
+ [NAND0_D1] = PAD_INFO_PULLCTL(NAND0_D1),
+ [NAND0_D2] = PAD_INFO_PULLCTL(NAND0_D2),
+ [NAND0_D3] = PAD_INFO_PULLCTL(NAND0_D3),
+ [NAND0_D4] = PAD_INFO_PULLCTL(NAND0_D4),
+ [NAND0_D5] = PAD_INFO_PULLCTL(NAND0_D5),
+ [NAND0_D6] = PAD_INFO_PULLCTL(NAND0_D6),
+ [NAND0_D7] = PAD_INFO_PULLCTL(NAND0_D7),
+ [NAND0_DQS] = PAD_INFO_PULLCTL(NAND0_DQS),
+ [NAND0_DQSN] = PAD_INFO_PULLCTL(NAND0_DQSN),
+ [NAND0_ALE] = PAD_INFO(NAND0_ALE),
+ [NAND0_CLE] = PAD_INFO(NAND0_CLE),
+ [NAND0_CEB0] = PAD_INFO(NAND0_CEB0),
+ [NAND0_CEB1] = PAD_INFO(NAND0_CEB1),
+ [NAND0_CEB2] = PAD_INFO(NAND0_CEB2),
+ [NAND0_CEB3] = PAD_INFO(NAND0_CEB3),
+ [NAND1_D0] = PAD_INFO_PULLCTL(NAND1_D0),
+ [NAND1_D1] = PAD_INFO_PULLCTL(NAND1_D1),
+ [NAND1_D2] = PAD_INFO_PULLCTL(NAND1_D2),
+ [NAND1_D3] = PAD_INFO_PULLCTL(NAND1_D3),
+ [NAND1_D4] = PAD_INFO_PULLCTL(NAND1_D4),
+ [NAND1_D5] = PAD_INFO_PULLCTL(NAND1_D5),
+ [NAND1_D6] = PAD_INFO_PULLCTL(NAND1_D6),
+ [NAND1_D7] = PAD_INFO_PULLCTL(NAND1_D7),
+ [NAND1_DQS] = PAD_INFO_PULLCTL(NAND1_DQS),
+ [NAND1_DQSN] = PAD_INFO_PULLCTL(NAND1_DQSN),
+ [NAND1_ALE] = PAD_INFO(NAND1_ALE),
+ [NAND1_CLE] = PAD_INFO(NAND1_CLE),
+ [NAND1_CEB0] = PAD_INFO(NAND1_CEB0),
+ [NAND1_CEB1] = PAD_INFO(NAND1_CEB1),
+ [NAND1_CEB2] = PAD_INFO(NAND1_CEB2),
+ [NAND1_CEB3] = PAD_INFO(NAND1_CEB3),
+ [SGPIO0] = PAD_INFO(SGPIO0),
+ [SGPIO1] = PAD_INFO(SGPIO1),
+ [SGPIO2] = PAD_INFO_PULLCTL_ST(SGPIO2),
+ [SGPIO3] = PAD_INFO_PULLCTL_ST(SGPIO3)
+};
+
+#define OWL_GPIO_PORT(port, base, count, _outen, _inen, _dat) \
+ [OWL_GPIO_PORT_##port] = { \
+ .offset = base, \
+ .pins = count, \
+ .outen = _outen, \
+ .inen = _inen, \
+ .dat = _dat, \
+ }
+
+static const struct owl_gpio_port s900_gpio_ports[] = {
+ OWL_GPIO_PORT(A, 0x0000, 32, 0x0, 0x4, 0x8),
+ OWL_GPIO_PORT(B, 0x000C, 32, 0x0, 0x4, 0x8),
+ OWL_GPIO_PORT(C, 0x0018, 12, 0x0, 0x4, 0x8),
+ OWL_GPIO_PORT(D, 0x0024, 30, 0x0, 0x4, 0x8),
+ OWL_GPIO_PORT(E, 0x0030, 32, 0x0, 0x4, 0x8),
+ OWL_GPIO_PORT(F, 0x00F0, 8, 0x0, 0x4, 0x8)
+};
+
+static struct owl_pinctrl_soc_data s900_pinctrl_data = {
+ .padinfo = s900_padinfo,
+ .pins = (const struct pinctrl_pin_desc *)s900_pads,
+ .npins = ARRAY_SIZE(s900_pads),
+ .functions = s900_functions,
+ .nfunctions = ARRAY_SIZE(s900_functions),
+ .groups = s900_groups,
+ .ngroups = ARRAY_SIZE(s900_groups),
+ .ngpios = NUM_GPIOS,
+ .ports = s900_gpio_ports,
+ .nports = ARRAY_SIZE(s900_gpio_ports)
+};
+
+static int s900_pinctrl_probe(struct platform_device *pdev)
+{
+ return owl_pinctrl_probe(pdev, &s900_pinctrl_data);
+}
+
+static const struct of_device_id s900_pinctrl_of_match[] = {
+ { .compatible = "actions,s900-pinctrl", },
+ { }
+};
+
+static struct platform_driver s900_pinctrl_driver = {
+ .driver = {
+ .name = "pinctrl-s900",
+ .of_match_table = of_match_ptr(s900_pinctrl_of_match),
+ },
+ .probe = s900_pinctrl_probe,
+};
+
+static int __init s900_pinctrl_init(void)
+{
+ return platform_driver_register(&s900_pinctrl_driver);
+}
+arch_initcall(s900_pinctrl_init);
+
+static void __exit s900_pinctrl_exit(void)
+{
+ platform_driver_unregister(&s900_pinctrl_driver);
+}
+module_exit(s900_pinctrl_exit);
+
+MODULE_AUTHOR("Actions Semi Inc.");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Actions Semi S900 SoC Pinctrl Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index e8c4e4f934a6..0f38d51f47c6 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -20,6 +20,7 @@ config PINCTRL_BCM2835
bool
select PINMUX
select PINCONF
+ select GENERIC_PINCONF
select GPIOLIB_IRQCHIP
config PINCTRL_IPROC_GPIO
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 785c366fd6d6..136ccaf53df8 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -36,11 +36,13 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <dt-bindings/pinctrl/bcm2835.h>
#define MODULE_NAME "pinctrl-bcm2835"
#define BCM2835_NUM_GPIOS 54
@@ -72,13 +74,9 @@
enum bcm2835_pinconf_param {
/* argument: bcm2835_pinconf_pull */
- BCM2835_PINCONF_PARAM_PULL,
+ BCM2835_PINCONF_PARAM_PULL = (PIN_CONFIG_END + 1),
};
-#define BCM2835_PINCONF_PACK(_param_, _arg_) ((_param_) << 16 | (_arg_))
-#define BCM2835_PINCONF_UNPACK_PARAM(_conf_) ((_conf_) >> 16)
-#define BCM2835_PINCONF_UNPACK_ARG(_conf_) ((_conf_) & 0xffff)
-
struct bcm2835_pinctrl {
struct device *dev;
void __iomem *base;
@@ -213,14 +211,6 @@ static const char * const bcm2835_gpio_groups[] = {
};
enum bcm2835_fsel {
- BCM2835_FSEL_GPIO_IN = 0,
- BCM2835_FSEL_GPIO_OUT = 1,
- BCM2835_FSEL_ALT0 = 4,
- BCM2835_FSEL_ALT1 = 5,
- BCM2835_FSEL_ALT2 = 6,
- BCM2835_FSEL_ALT3 = 7,
- BCM2835_FSEL_ALT4 = 3,
- BCM2835_FSEL_ALT5 = 2,
BCM2835_FSEL_COUNT = 8,
BCM2835_FSEL_MASK = 0x7,
};
@@ -714,7 +704,7 @@ static int bcm2835_pctl_dt_node_to_map_pull(struct bcm2835_pinctrl *pc,
configs = kzalloc(sizeof(*configs), GFP_KERNEL);
if (!configs)
return -ENOMEM;
- configs[0] = BCM2835_PINCONF_PACK(BCM2835_PINCONF_PARAM_PULL, pull);
+ configs[0] = pinconf_to_config_packed(BCM2835_PINCONF_PARAM_PULL, pull);
map->type = PIN_MAP_TYPE_CONFIGS_PIN;
map->data.configs.group_or_pin = bcm2835_gpio_pins[pin].name;
@@ -727,7 +717,7 @@ static int bcm2835_pctl_dt_node_to_map_pull(struct bcm2835_pinctrl *pc,
static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np,
- struct pinctrl_map **map, unsigned *num_maps)
+ struct pinctrl_map **map, unsigned int *num_maps)
{
struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
struct property *pins, *funcs, *pulls;
@@ -736,6 +726,12 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
int i, err;
u32 pin, func, pull;
+ /* Check for generic binding in this node */
+ err = pinconf_generic_dt_node_to_map_all(pctldev, np, map, num_maps);
+ if (err || *num_maps)
+ return err;
+
+ /* Generic binding did not find anything continue with legacy parse */
pins = of_find_property(np, "brcm,pins", NULL);
if (!pins) {
dev_err(pc->dev, "%pOF: missing brcm,pins property\n", np);
@@ -917,37 +913,67 @@ static int bcm2835_pinconf_get(struct pinctrl_dev *pctldev,
return -ENOTSUPP;
}
+static void bcm2835_pull_config_set(struct bcm2835_pinctrl *pc,
+ unsigned int pin, unsigned int arg)
+{
+ u32 off, bit;
+
+ off = GPIO_REG_OFFSET(pin);
+ bit = GPIO_REG_SHIFT(pin);
+
+ bcm2835_gpio_wr(pc, GPPUD, arg & 3);
+ /*
+ * BCM2835 datasheet say to wait 150 cycles, but not of what.
+ * But the VideoCore firmware delay for this operation
+ * based nearly on the same amount of VPU cycles and this clock
+ * runs at 250 MHz.
+ */
+ udelay(1);
+ bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), BIT(bit));
+ udelay(1);
+ bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), 0);
+}
+
static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev,
- unsigned pin, unsigned long *configs,
- unsigned num_configs)
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
{
struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
- enum bcm2835_pinconf_param param;
- u16 arg;
- u32 off, bit;
+ u32 param, arg;
int i;
for (i = 0; i < num_configs; i++) {
- param = BCM2835_PINCONF_UNPACK_PARAM(configs[i]);
- arg = BCM2835_PINCONF_UNPACK_ARG(configs[i]);
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
- if (param != BCM2835_PINCONF_PARAM_PULL)
- return -EINVAL;
+ switch (param) {
+ /* Set legacy brcm,pull */
+ case BCM2835_PINCONF_PARAM_PULL:
+ bcm2835_pull_config_set(pc, pin, arg);
+ break;
- off = GPIO_REG_OFFSET(pin);
- bit = GPIO_REG_SHIFT(pin);
+ /* Set pull generic bindings */
+ case PIN_CONFIG_BIAS_DISABLE:
+ bcm2835_pull_config_set(pc, pin, BCM2835_PUD_OFF);
+ break;
- bcm2835_gpio_wr(pc, GPPUD, arg & 3);
- /*
- * BCM2835 datasheet say to wait 150 cycles, but not of what.
- * But the VideoCore firmware delay for this operation
- * based nearly on the same amount of VPU cycles and this clock
- * runs at 250 MHz.
- */
- udelay(1);
- bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), BIT(bit));
- udelay(1);
- bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), 0);
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ bcm2835_pull_config_set(pc, pin, BCM2835_PUD_DOWN);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ bcm2835_pull_config_set(pc, pin, BCM2835_PUD_UP);
+ break;
+
+ /* Set output-high or output-low */
+ case PIN_CONFIG_OUTPUT:
+ bcm2835_gpio_set_bit(pc, arg ? GPSET0 : GPCLR0, pin);
+ break;
+
+ default:
+ return -EINVAL;
+
+ } /* switch param type */
} /* for each config */
return 0;
diff --git a/drivers/pinctrl/berlin/berlin-bg2.c b/drivers/pinctrl/berlin/berlin-bg2.c
index bf2e17d0d6e4..acbd413340e8 100644
--- a/drivers/pinctrl/berlin/berlin-bg2.c
+++ b/drivers/pinctrl/berlin/berlin-bg2.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Berlin BG2 pinctrl driver.
*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Ténart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/berlin/berlin-bg2cd.c b/drivers/pinctrl/berlin/berlin-bg2cd.c
index 9bee7bd1650f..c0f5d86d5d01 100644
--- a/drivers/pinctrl/berlin/berlin-bg2cd.c
+++ b/drivers/pinctrl/berlin/berlin-bg2cd.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Berlin BG2CD pinctrl driver.
*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Ténart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/berlin/berlin-bg2q.c b/drivers/pinctrl/berlin/berlin-bg2q.c
index eee6763f114c..20a3216ede07 100644
--- a/drivers/pinctrl/berlin/berlin-bg2q.c
+++ b/drivers/pinctrl/berlin/berlin-bg2q.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Berlin BG2Q pinctrl driver
*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Ténart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index e6740656ee7c..6a7fe929a68b 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell berlin4ct pinctrl driver
*
* Copyright (C) 2015 Marvell Technology Group Ltd.
*
* Author: Jisheng Zhang <jszhang@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index cc3bd2efafe3..a620a8e8fa78 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Berlin SoC pinctrl core driver
*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Ténart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/io.h>
diff --git a/drivers/pinctrl/berlin/berlin.h b/drivers/pinctrl/berlin/berlin.h
index e9b30f95b03e..d7787754d1ed 100644
--- a/drivers/pinctrl/berlin/berlin.h
+++ b/drivers/pinctrl/berlin/berlin.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Marvell Berlin SoC pinctrl driver.
*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Ténart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef __PINCTRL_BERLIN_H
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 24aaddd760a0..e582a21cfe54 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -1,16 +1,11 @@
-/*
- * Core driver for the imx pin controller
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro Ltd.
- *
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Core driver for the imx pin controller
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro Ltd.
+//
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
#include <linux/err.h>
#include <linux/init.h>
@@ -371,7 +366,7 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
unsigned long config;
if (!pin_reg || pin_reg->conf_reg == -1) {
- seq_printf(s, "N/A");
+ seq_puts(s, "N/A");
return;
}
@@ -390,7 +385,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
if (group > pctldev->num_groups)
return;
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
grp = pinctrl_generic_get_group(pctldev, group);
if (!grp)
return;
@@ -414,11 +409,18 @@ static const struct pinconf_ops imx_pinconf_ops = {
};
/*
- * Each pin represented in fsl,pins consists of 5 u32 PIN_FUNC_ID and
- * 1 u32 CONFIG, so 24 types in total for each pin.
+ * Each pin represented in fsl,pins consists of a number of u32 PIN_FUNC_ID
+ * and 1 u32 CONFIG, the total size is PIN_FUNC_ID + CONFIG for each pin.
+ * For generic_pinconf case, there's no extra u32 CONFIG.
+ *
+ * PIN_FUNC_ID format:
+ * Default:
+ * <mux_reg conf_reg input_reg mux_mode input_val>
+ * SHARE_MUX_CONF_REG:
+ * <mux_conf_reg input_reg mux_mode input_val>
*/
#define FSL_PIN_SIZE 24
-#define SHARE_FSL_PIN_SIZE 20
+#define FSL_PIN_SHARE_SIZE 20
static int imx_pinctrl_parse_groups(struct device_node *np,
struct group_desc *grp,
@@ -434,7 +436,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
dev_dbg(ipctl->dev, "group(%d): %s\n", index, np->name);
if (info->flags & SHARE_MUX_CONF_REG)
- pin_size = SHARE_FSL_PIN_SIZE;
+ pin_size = FSL_PIN_SHARE_SIZE;
else
pin_size = FSL_PIN_SIZE;
@@ -617,7 +619,7 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
nfuncs = 1;
} else {
nfuncs = of_get_child_count(np);
- if (nfuncs <= 0) {
+ if (nfuncs == 0) {
dev_err(&pdev->dev, "no functions defined\n");
return -EINVAL;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index 038e8c0e5b96..4b8225ccb03a 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* IMX pinmux core definitions
*
@@ -5,11 +6,6 @@
* Copyright (C) 2012 Linaro Ltd.
*
* Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __DRIVERS_PINCTRL_IMX_H
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index a4e9f430d452..5af89de0ff02 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -1,19 +1,14 @@
-/*
- * Core driver for the imx pin controller in imx1/21/27
- *
- * Copyright (C) 2013 Pengutronix
- * Author: Markus Pargmann <mpa@pengutronix.de>
- *
- * Based on pinctrl-imx.c:
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Core driver for the imx pin controller in imx1/21/27
+//
+// Copyright (C) 2013 Pengutronix
+// Author: Markus Pargmann <mpa@pengutronix.de>
+//
+// Based on pinctrl-imx.c:
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro Ltd.
#include <linux/bitops.h>
#include <linux/err.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c
index fc8efc748734..faf770f13bc7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.c
@@ -1,13 +1,8 @@
-/*
- * i.MX1 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// i.MX1 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
#include <linux/init.h>
#include <linux/of.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.h b/drivers/pinctrl/freescale/pinctrl-imx1.h
index 174074308d6c..f1b9dabf7601 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* IMX pinmux core definitions
*
@@ -5,11 +6,6 @@
* Copyright (C) 2012 Linaro Ltd.
*
* Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __DRIVERS_PINCTRL_IMX1_H
diff --git a/drivers/pinctrl/freescale/pinctrl-imx21.c b/drivers/pinctrl/freescale/pinctrl-imx21.c
index 73e26bc12f09..8a102275a053 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx21.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx21.c
@@ -1,13 +1,8 @@
-/*
- * i.MX21 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// i.MX21 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
#include <linux/init.h>
#include <linux/of.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx23.c b/drivers/pinctrl/freescale/pinctrl-imx23.c
index c9405685971b..144020764a4b 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx23.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx23.c
@@ -1,16 +1,9 @@
-/*
- * Freescale i.MX23 pinctrl driver
- *
- * Author: Shawn Guo <shawn.guo@linaro.org>
- * Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Freescale i.MX23 pinctrl driver
+//
+// Author: Shawn Guo <shawn.guo@linaro.org>
+// Copyright 2012 Freescale Semiconductor, Inc.
#include <linux/init.h>
#include <linux/of_device.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
index db6d9d1382f9..a899a398b6bb 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
@@ -1,19 +1,15 @@
-/*
- * imx25 pinctrl driver.
- *
- * Copyright 2013 Eukréa Electromatique <denis@eukrea.com>
- *
- * This driver was mostly copied from the imx51 pinctrl driver which has:
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * Author: Denis Carikli <denis@eukrea.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// imx25 pinctrl driver.
+//
+// Copyright 2013 Eukréa Electromatique <denis@eukrea.com>
+//
+// This driver was mostly copied from the imx51 pinctrl driver which has:
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
+//
+// Author: Denis Carikli <denis@eukrea.com>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c
index e5992036fc6c..b4dfc1676cbc 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx27.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx27.c
@@ -1,15 +1,10 @@
-/*
- * imx27 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2013 Pengutronix
- *
- * Author: Markus Pargmann <mpa@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// imx27 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2013 Pengutronix
+//
+// Author: Markus Pargmann <mpa@pengutronix.de>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx28.c b/drivers/pinctrl/freescale/pinctrl-imx28.c
index 87deb9ec938a..13730dd193f1 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx28.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx28.c
@@ -1,16 +1,9 @@
-/*
- * Freescale i.MX28 pinctrl driver
- *
- * Author: Shawn Guo <shawn.guo@linaro.org>
- * Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Freescale i.MX28 pinctrl driver
+//
+// Author: Shawn Guo <shawn.guo@linaro.org>
+// Copyright 2012 Freescale Semiconductor, Inc.
#include <linux/init.h>
#include <linux/of_device.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx35.c b/drivers/pinctrl/freescale/pinctrl-imx35.c
index 6927946ae4b5..871bb419e2f0 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx35.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx35.c
@@ -1,17 +1,13 @@
-/*
- * imx35 pinctrl driver.
- *
- * This driver was mostly copied from the imx51 pinctrl driver which has:
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// imx35 pinctrl driver.
+//
+// This driver was mostly copied from the imx51 pinctrl driver which has:
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
+//
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c
index eb349b97290f..cf182c040e0b 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx50.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx50.c
@@ -1,15 +1,10 @@
-/*
- * imx50 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2013 Greg Ungerer <gerg@uclinux.org>
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// imx50 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2013 Greg Ungerer <gerg@uclinux.org>
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx51.c b/drivers/pinctrl/freescale/pinctrl-imx51.c
index 49acd991b5fb..e5c261e2bf1e 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx51.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx51.c
@@ -1,16 +1,11 @@
-/*
- * imx51 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// imx51 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
+//
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c
index 6dd0c60eaea4..64c97aaf20c7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx53.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx53.c
@@ -1,16 +1,11 @@
-/*
- * imx53 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// imx53 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
+//
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
index 91b85fc01de8..0858b4d79ed2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
@@ -1,13 +1,9 @@
-/*
- * Freescale imx6dl pinctrl driver
- *
- * Author: Shawn Guo <shawn.guo@linaro.org>
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale imx6dl pinctrl driver
+//
+// Author: Shawn Guo <shawn.guo@linaro.org>
+// Copyright (C) 2013 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c
index 5f653d69d0f5..078ed6a331fd 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6q.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c
@@ -1,16 +1,11 @@
-/*
- * imx6q pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// imx6q pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
+//
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
index 1167dc273c04..9d2e6f987aa7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
@@ -1,13 +1,9 @@
-/*
- * Freescale imx6sl pinctrl driver
- *
- * Author: Shawn Guo <shawn.guo@linaro.org>
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale imx6sl pinctrl driver
+//
+// Author: Shawn Guo <shawn.guo@linaro.org>
+// Copyright (C) 2013 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sll.c b/drivers/pinctrl/freescale/pinctrl-imx6sll.c
index 0fbea9cf536d..0618f4d887fd 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sll.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sll.c
@@ -1,9 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Copyright 2017-2018 NXP.
- *
- */
+//
+// Copyright (C) 2016 Freescale Semiconductor, Inc.
+// Copyright 2017-2018 NXP.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
index 15ea56c75f68..c7e2b1f94f01 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
@@ -1,13 +1,9 @@
-/*
- * Freescale imx6sx pinctrl driver
- *
- * Author: Anson Huang <Anson.Huang@freescale.com>
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale imx6sx pinctrl driver
+//
+// Author: Anson Huang <Anson.Huang@freescale.com>
+// Copyright (C) 2014 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
index 4580717ade19..7e37627c63f5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6ul.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -1,13 +1,9 @@
-/*
- * Freescale imx6ul pinctrl driver
- *
- * Author: Anson Huang <Anson.Huang@freescale.com>
- * Copyright (C) 2015 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale imx6ul pinctrl driver
+//
+// Author: Anson Huang <Anson.Huang@freescale.com>
+// Copyright (C) 2015 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c
index 0b0a2f33b06a..369d3e59fdd6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7d.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c
@@ -1,13 +1,9 @@
-/*
- * Freescale imx7d pinctrl driver
- *
- * Author: Anson Huang <Anson.Huang@freescale.com>
- * Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale imx7d pinctrl driver
+//
+// Author: Anson Huang <Anson.Huang@freescale.com>
+// Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7ulp.c b/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
index f363e45fd246..f521bdb53f62 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
@@ -1,14 +1,9 @@
-/*
- * Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Copyright (C) 2017 NXP
- *
- * Author: Dong Aisheng <aisheng.dong@nxp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2016 Freescale Semiconductor, Inc.
+// Copyright (C) 2017 NXP
+//
+// Author: Dong Aisheng <aisheng.dong@nxp.com>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index 6852010a6d70..594f3e5ce9a9 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -1,13 +1,6 @@
-/*
- * Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2012 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.h b/drivers/pinctrl/freescale/pinctrl-mxs.h
index 34dbf75208dc..ab9f834b03e6 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.h
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.h
@@ -1,12 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#ifndef __PINCTRL_MXS_H
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index c078f859ae15..37602b053ed2 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -1,13 +1,8 @@
-/*
- * VF610 pinctrl driver based on imx pinmux and pinconf core
- *
- * Copyright 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// VF610 pinctrl driver based on imx pinmux and pinconf core
+//
+// Copyright 2013 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index fee9225ca559..0f1019ae3993 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1527,6 +1527,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
},
{
@@ -1534,6 +1535,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
},
{
@@ -1541,6 +1543,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
},
{
@@ -1548,6 +1551,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
},
{}
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 862c5dbc6977..9905dc672f6b 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -1,12 +1,18 @@
menu "MediaTek pinctrl drivers"
depends on ARCH_MEDIATEK || COMPILE_TEST
+config EINT_MTK
+ bool "MediaTek External Interrupt Support"
+ depends on PINCTRL_MTK || PINCTRL_MT7622 || COMPILE_TEST
+ select IRQ_DOMAIN
+
config PINCTRL_MTK
bool
depends on OF
select PINMUX
select GENERIC_PINCONF
select GPIOLIB
+ select EINT_MTK
select OF_GPIO
# For ARMv7 SoCs
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 7959e773533f..3de7156df345 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Core
+obj-$(CONFIG_EINT_MTK) += mtk-eint.o
obj-$(CONFIG_PINCTRL_MTK) += pinctrl-mtk-common.o
# SoC Drivers
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
new file mode 100644
index 000000000000..30f3316747e2
--- /dev/null
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2014-2018 MediaTek Inc.
+
+/*
+ * Library for MediaTek External Interrupt Support
+ *
+ * Author: Maoguang Meng <maoguang.meng@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "mtk-eint.h"
+
+#define MTK_EINT_EDGE_SENSITIVE 0
+#define MTK_EINT_LEVEL_SENSITIVE 1
+#define MTK_EINT_DBNC_SET_DBNC_BITS 4
+#define MTK_EINT_DBNC_RST_BIT (0x1 << 1)
+#define MTK_EINT_DBNC_SET_EN (0x1 << 0)
+
+static const struct mtk_eint_regs mtk_generic_eint_regs = {
+ .stat = 0x000,
+ .ack = 0x040,
+ .mask = 0x080,
+ .mask_set = 0x0c0,
+ .mask_clr = 0x100,
+ .sens = 0x140,
+ .sens_set = 0x180,
+ .sens_clr = 0x1c0,
+ .soft = 0x200,
+ .soft_set = 0x240,
+ .soft_clr = 0x280,
+ .pol = 0x300,
+ .pol_set = 0x340,
+ .pol_clr = 0x380,
+ .dom_en = 0x400,
+ .dbnc_ctrl = 0x500,
+ .dbnc_set = 0x600,
+ .dbnc_clr = 0x700,
+};
+
+static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint,
+ unsigned int eint_num,
+ unsigned int offset)
+{
+ unsigned int eint_base = 0;
+ void __iomem *reg;
+
+ if (eint_num >= eint->hw->ap_num)
+ eint_base = eint->hw->ap_num;
+
+ reg = eint->base + offset + ((eint_num - eint_base) / 32) * 4;
+
+ return reg;
+}
+
+static unsigned int mtk_eint_can_en_debounce(struct mtk_eint *eint,
+ unsigned int eint_num)
+{
+ unsigned int sens;
+ unsigned int bit = BIT(eint_num % 32);
+ void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
+ eint->regs->sens);
+
+ if (readl(reg) & bit)
+ sens = MTK_EINT_LEVEL_SENSITIVE;
+ else
+ sens = MTK_EINT_EDGE_SENSITIVE;
+
+ if (eint_num < eint->hw->db_cnt && sens != MTK_EINT_EDGE_SENSITIVE)
+ return 1;
+ else
+ return 0;
+}
+
+static int mtk_eint_flip_edge(struct mtk_eint *eint, int hwirq)
+{
+ int start_level, curr_level;
+ unsigned int reg_offset;
+ u32 mask = BIT(hwirq & 0x1f);
+ u32 port = (hwirq >> 5) & eint->hw->port_mask;
+ void __iomem *reg = eint->base + (port << 2);
+
+ curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl, hwirq);
+
+ do {
+ start_level = curr_level;
+ if (start_level)
+ reg_offset = eint->regs->pol_clr;
+ else
+ reg_offset = eint->regs->pol_set;
+ writel(mask, reg + reg_offset);
+
+ curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl,
+ hwirq);
+ } while (start_level != curr_level);
+
+ return start_level;
+}
+
+static void mtk_eint_mask(struct irq_data *d)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
+ eint->regs->mask_set);
+
+ writel(mask, reg);
+}
+
+static void mtk_eint_unmask(struct irq_data *d)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
+ eint->regs->mask_clr);
+
+ writel(mask, reg);
+
+ if (eint->dual_edge[d->hwirq])
+ mtk_eint_flip_edge(eint, d->hwirq);
+}
+
+static unsigned int mtk_eint_get_mask(struct mtk_eint *eint,
+ unsigned int eint_num)
+{
+ unsigned int bit = BIT(eint_num % 32);
+ void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
+ eint->regs->mask);
+
+ return !!(readl(reg) & bit);
+}
+
+static void mtk_eint_ack(struct irq_data *d)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
+ eint->regs->ack);
+
+ writel(mask, reg);
+}
+
+static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg;
+
+ if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
+ ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) {
+ dev_err(eint->dev,
+ "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
+ d->irq, d->hwirq, type);
+ return -EINVAL;
+ }
+
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ eint->dual_edge[d->hwirq] = 1;
+ else
+ eint->dual_edge[d->hwirq] = 0;
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
+ reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_clr);
+ writel(mask, reg);
+ } else {
+ reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_set);
+ writel(mask, reg);
+ }
+
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
+ reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_clr);
+ writel(mask, reg);
+ } else {
+ reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_set);
+ writel(mask, reg);
+ }
+
+ if (eint->dual_edge[d->hwirq])
+ mtk_eint_flip_edge(eint, d->hwirq);
+
+ return 0;
+}
+
+static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ int shift = d->hwirq & 0x1f;
+ int reg = d->hwirq >> 5;
+
+ if (on)
+ eint->wake_mask[reg] |= BIT(shift);
+ else
+ eint->wake_mask[reg] &= ~BIT(shift);
+
+ return 0;
+}
+
+static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
+ void __iomem *base, u32 *buf)
+{
+ int port;
+ void __iomem *reg;
+
+ for (port = 0; port < eint->hw->ports; port++) {
+ reg = base + (port << 2);
+ writel_relaxed(~buf[port], reg + eint->regs->mask_set);
+ writel_relaxed(buf[port], reg + eint->regs->mask_clr);
+ }
+}
+
+static void mtk_eint_chip_read_mask(const struct mtk_eint *eint,
+ void __iomem *base, u32 *buf)
+{
+ int port;
+ void __iomem *reg;
+
+ for (port = 0; port < eint->hw->ports; port++) {
+ reg = base + eint->regs->mask + (port << 2);
+ buf[port] = ~readl_relaxed(reg);
+ /* Mask is 0 when irq is enabled, and 1 when disabled. */
+ }
+}
+
+static int mtk_eint_irq_request_resources(struct irq_data *d)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gpio_c;
+ unsigned int gpio_n;
+ int err;
+
+ err = eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq,
+ &gpio_n, &gpio_c);
+ if (err < 0) {
+ dev_err(eint->dev, "Can not find pin\n");
+ return err;
+ }
+
+ err = gpiochip_lock_as_irq(gpio_c, gpio_n);
+ if (err < 0) {
+ dev_err(eint->dev, "unable to lock HW IRQ %lu for IRQ\n",
+ irqd_to_hwirq(d));
+ return err;
+ }
+
+ err = eint->gpio_xlate->set_gpio_as_eint(eint->pctl, d->hwirq);
+ if (err < 0) {
+ dev_err(eint->dev, "Can not eint mode\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void mtk_eint_irq_release_resources(struct irq_data *d)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gpio_c;
+ unsigned int gpio_n;
+
+ eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq, &gpio_n,
+ &gpio_c);
+
+ gpiochip_unlock_as_irq(gpio_c, gpio_n);
+}
+
+static struct irq_chip mtk_eint_irq_chip = {
+ .name = "mt-eint",
+ .irq_disable = mtk_eint_mask,
+ .irq_mask = mtk_eint_mask,
+ .irq_unmask = mtk_eint_unmask,
+ .irq_ack = mtk_eint_ack,
+ .irq_set_type = mtk_eint_set_type,
+ .irq_set_wake = mtk_eint_irq_set_wake,
+ .irq_request_resources = mtk_eint_irq_request_resources,
+ .irq_release_resources = mtk_eint_irq_release_resources,
+};
+
+static unsigned int mtk_eint_hw_init(struct mtk_eint *eint)
+{
+ void __iomem *reg = eint->base + eint->regs->dom_en;
+ unsigned int i;
+
+ for (i = 0; i < eint->hw->ap_num; i += 32) {
+ writel(0xffffffff, reg);
+ reg += 4;
+ }
+
+ return 0;
+}
+
+static inline void
+mtk_eint_debounce_process(struct mtk_eint *eint, int index)
+{
+ unsigned int rst, ctrl_offset;
+ unsigned int bit, dbnc;
+
+ ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_ctrl;
+ dbnc = readl(eint->base + ctrl_offset);
+ bit = MTK_EINT_DBNC_SET_EN << ((index % 4) * 8);
+ if ((bit & dbnc) > 0) {
+ ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_set;
+ rst = MTK_EINT_DBNC_RST_BIT << ((index % 4) * 8);
+ writel(rst, eint->base + ctrl_offset);
+ }
+}
+
+static void mtk_eint_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mtk_eint *eint = irq_desc_get_handler_data(desc);
+ unsigned int status, eint_num;
+ int offset, index, virq;
+ void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
+ int dual_edge, start_level, curr_level;
+
+ chained_irq_enter(chip, desc);
+ for (eint_num = 0; eint_num < eint->hw->ap_num; eint_num += 32,
+ reg += 4) {
+ status = readl(reg);
+ while (status) {
+ offset = __ffs(status);
+ index = eint_num + offset;
+ virq = irq_find_mapping(eint->domain, index);
+ status &= ~BIT(offset);
+
+ dual_edge = eint->dual_edge[index];
+ if (dual_edge) {
+ /*
+ * Clear soft-irq in case we raised it last
+ * time.
+ */
+ writel(BIT(offset), reg - eint->regs->stat +
+ eint->regs->soft_clr);
+
+ start_level =
+ eint->gpio_xlate->get_gpio_state(eint->pctl,
+ index);
+ }
+
+ generic_handle_irq(virq);
+
+ if (dual_edge) {
+ curr_level = mtk_eint_flip_edge(eint, index);
+
+ /*
+ * If level changed, we might lost one edge
+ * interrupt, raised it through soft-irq.
+ */
+ if (start_level != curr_level)
+ writel(BIT(offset), reg -
+ eint->regs->stat +
+ eint->regs->soft_set);
+ }
+
+ if (index < eint->hw->db_cnt)
+ mtk_eint_debounce_process(eint, index);
+ }
+ }
+ chained_irq_exit(chip, desc);
+}
+
+int mtk_eint_do_suspend(struct mtk_eint *eint)
+{
+ mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask);
+ mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
+
+ return 0;
+}
+
+int mtk_eint_do_resume(struct mtk_eint *eint)
+{
+ mtk_eint_chip_write_mask(eint, eint->base, eint->cur_mask);
+
+ return 0;
+}
+
+int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
+ unsigned int debounce)
+{
+ int virq, eint_offset;
+ unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask,
+ dbnc;
+ static const unsigned int debounce_time[] = {500, 1000, 16000, 32000,
+ 64000, 128000, 256000};
+ struct irq_data *d;
+
+ virq = irq_find_mapping(eint->domain, eint_num);
+ eint_offset = (eint_num % 4) * 8;
+ d = irq_get_irq_data(virq);
+
+ set_offset = (eint_num / 4) * 4 + eint->regs->dbnc_set;
+ clr_offset = (eint_num / 4) * 4 + eint->regs->dbnc_clr;
+
+ if (!mtk_eint_can_en_debounce(eint, eint_num))
+ return -EINVAL;
+
+ dbnc = ARRAY_SIZE(debounce_time);
+ for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
+ if (debounce <= debounce_time[i]) {
+ dbnc = i;
+ break;
+ }
+ }
+
+ if (!mtk_eint_get_mask(eint, eint_num)) {
+ mtk_eint_mask(d);
+ unmask = 1;
+ } else {
+ unmask = 0;
+ }
+
+ clr_bit = 0xff << eint_offset;
+ writel(clr_bit, eint->base + clr_offset);
+
+ bit = ((dbnc << MTK_EINT_DBNC_SET_DBNC_BITS) | MTK_EINT_DBNC_SET_EN) <<
+ eint_offset;
+ rst = MTK_EINT_DBNC_RST_BIT << eint_offset;
+ writel(rst | bit, eint->base + set_offset);
+
+ /*
+ * Delay a while (more than 2T) to wait for hw debounce counter reset
+ * work correctly.
+ */
+ udelay(1);
+ if (unmask == 1)
+ mtk_eint_unmask(d);
+
+ return 0;
+}
+
+int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
+{
+ int irq;
+
+ irq = irq_find_mapping(eint->domain, eint_n);
+ if (!irq)
+ return -EINVAL;
+
+ return irq;
+}
+
+int mtk_eint_do_init(struct mtk_eint *eint)
+{
+ int i;
+
+ /* If clients don't assign a specific regs, let's use generic one */
+ if (!eint->regs)
+ eint->regs = &mtk_generic_eint_regs;
+
+ eint->wake_mask = devm_kcalloc(eint->dev, eint->hw->ports,
+ sizeof(*eint->wake_mask), GFP_KERNEL);
+ if (!eint->wake_mask)
+ return -ENOMEM;
+
+ eint->cur_mask = devm_kcalloc(eint->dev, eint->hw->ports,
+ sizeof(*eint->cur_mask), GFP_KERNEL);
+ if (!eint->cur_mask)
+ return -ENOMEM;
+
+ eint->dual_edge = devm_kcalloc(eint->dev, eint->hw->ap_num,
+ sizeof(int), GFP_KERNEL);
+ if (!eint->dual_edge)
+ return -ENOMEM;
+
+ eint->domain = irq_domain_add_linear(eint->dev->of_node,
+ eint->hw->ap_num,
+ &irq_domain_simple_ops, NULL);
+ if (!eint->domain)
+ return -ENOMEM;
+
+ mtk_eint_hw_init(eint);
+ for (i = 0; i < eint->hw->ap_num; i++) {
+ int virq = irq_create_mapping(eint->domain, i);
+
+ irq_set_chip_and_handler(virq, &mtk_eint_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(virq, eint);
+ }
+
+ irq_set_chained_handler_and_data(eint->irq, mtk_eint_irq_handler,
+ eint);
+
+ return 0;
+}
diff --git a/drivers/pinctrl/mediatek/mtk-eint.h b/drivers/pinctrl/mediatek/mtk-eint.h
new file mode 100644
index 000000000000..c286a9b940f2
--- /dev/null
+++ b/drivers/pinctrl/mediatek/mtk-eint.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014-2018 MediaTek Inc.
+ *
+ * Author: Maoguang Meng <maoguang.meng@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+#ifndef __MTK_EINT_H
+#define __MTK_EINT_H
+
+#include <linux/irqdomain.h>
+
+struct mtk_eint_regs {
+ unsigned int stat;
+ unsigned int ack;
+ unsigned int mask;
+ unsigned int mask_set;
+ unsigned int mask_clr;
+ unsigned int sens;
+ unsigned int sens_set;
+ unsigned int sens_clr;
+ unsigned int soft;
+ unsigned int soft_set;
+ unsigned int soft_clr;
+ unsigned int pol;
+ unsigned int pol_set;
+ unsigned int pol_clr;
+ unsigned int dom_en;
+ unsigned int dbnc_ctrl;
+ unsigned int dbnc_set;
+ unsigned int dbnc_clr;
+};
+
+struct mtk_eint_hw {
+ u8 port_mask;
+ u8 ports;
+ unsigned int ap_num;
+ unsigned int db_cnt;
+};
+
+struct mtk_eint;
+
+struct mtk_eint_xt {
+ int (*get_gpio_n)(void *data, unsigned long eint_n,
+ unsigned int *gpio_n,
+ struct gpio_chip **gpio_chip);
+ int (*get_gpio_state)(void *data, unsigned long eint_n);
+ int (*set_gpio_as_eint)(void *data, unsigned long eint_n);
+};
+
+struct mtk_eint {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *domain;
+ int irq;
+
+ int *dual_edge;
+ u32 *wake_mask;
+ u32 *cur_mask;
+
+ /* Used to fit into various EINT device */
+ const struct mtk_eint_hw *hw;
+ const struct mtk_eint_regs *regs;
+
+ /* Used to fit into various pinctrl device */
+ void *pctl;
+ const struct mtk_eint_xt *gpio_xlate;
+};
+
+#if IS_ENABLED(CONFIG_EINT_MTK)
+int mtk_eint_do_init(struct mtk_eint *eint);
+int mtk_eint_do_suspend(struct mtk_eint *eint);
+int mtk_eint_do_resume(struct mtk_eint *eint);
+int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n,
+ unsigned int debounce);
+int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n);
+
+#else
+static inline int mtk_eint_do_init(struct mtk_eint *eint)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mtk_eint_do_suspend(struct mtk_eint *eint)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mtk_eint_do_resume(struct mtk_eint *eint)
+{
+ return -EOPNOTSUPP;
+}
+
+int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n,
+ unsigned int debounce)
+{
+ return -EOPNOTSUPP;
+}
+
+int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif /* __MTK_EINT_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2701.c b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
index f86f3b379607..e91c314f3b75 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt2701.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
@@ -531,31 +531,12 @@ static const struct mtk_pinctrl_devdata mt2701_pinctrl_data = {
.port_shf = 4,
.port_mask = 0x1f,
.port_align = 4,
- .eint_offsets = {
- .name = "mt2701_eint",
- .stat = 0x000,
- .ack = 0x040,
- .mask = 0x080,
- .mask_set = 0x0c0,
- .mask_clr = 0x100,
- .sens = 0x140,
- .sens_set = 0x180,
- .sens_clr = 0x1c0,
- .soft = 0x200,
- .soft_set = 0x240,
- .soft_clr = 0x280,
- .pol = 0x300,
- .pol_set = 0x340,
- .pol_clr = 0x380,
- .dom_en = 0x400,
- .dbnc_ctrl = 0x500,
- .dbnc_set = 0x600,
- .dbnc_clr = 0x700,
+ .eint_hw = {
.port_mask = 6,
.ports = 6,
+ .ap_num = 169,
+ .db_cnt = 16,
},
- .ap_num = 169,
- .db_cnt = 16,
};
static int mt2701_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2712.c b/drivers/pinctrl/mediatek/pinctrl-mt2712.c
index 81e11f9e70f1..8398d55c01cb 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt2712.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt2712.c
@@ -576,31 +576,12 @@ static const struct mtk_pinctrl_devdata mt2712_pinctrl_data = {
.port_shf = 4,
.port_mask = 0xf,
.port_align = 4,
- .eint_offsets = {
- .name = "mt2712_eint",
- .stat = 0x000,
- .ack = 0x040,
- .mask = 0x080,
- .mask_set = 0x0c0,
- .mask_clr = 0x100,
- .sens = 0x140,
- .sens_set = 0x180,
- .sens_clr = 0x1c0,
- .soft = 0x200,
- .soft_set = 0x240,
- .soft_clr = 0x280,
- .pol = 0x300,
- .pol_set = 0x340,
- .pol_clr = 0x380,
- .dom_en = 0x400,
- .dbnc_ctrl = 0x500,
- .dbnc_set = 0x600,
- .dbnc_clr = 0x700,
+ .eint_hw = {
.port_mask = 0xf,
.ports = 8,
+ .ap_num = 229,
+ .db_cnt = 40,
},
- .ap_num = 229,
- .db_cnt = 40,
};
static int mt2712_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index 06e8406c4440..ad6da1184c9f 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -30,6 +31,7 @@
#include "../core.h"
#include "../pinconf.h"
#include "../pinmux.h"
+#include "mtk-eint.h"
#define PINCTRL_PINCTRL_DEV KBUILD_MODNAME
#define MTK_RANGE(_a) { .range = (_a), .nranges = ARRAY_SIZE(_a), }
@@ -123,6 +125,8 @@ struct mtk_pin_soc {
unsigned int ngrps;
const struct function_desc *funcs;
unsigned int nfuncs;
+ const struct mtk_eint_regs *eint_regs;
+ const struct mtk_eint_hw *eint_hw;
};
struct mtk_pinctrl {
@@ -131,6 +135,7 @@ struct mtk_pinctrl {
struct device *dev;
struct gpio_chip chip;
const struct mtk_pin_soc *soc;
+ struct mtk_eint *eint;
};
static const struct mtk_pin_field_calc mt7622_pin_mode_range[] = {
@@ -913,6 +918,13 @@ static const struct pin_config_item mtk_conf_items[] = {
};
#endif
+static const struct mtk_eint_hw mt7622_eint_hw = {
+ .port_mask = 7,
+ .ports = 7,
+ .ap_num = ARRAY_SIZE(mt7622_pins),
+ .db_cnt = 20,
+};
+
static const struct mtk_pin_soc mt7622_data = {
.reg_cal = mt7622_reg_cals,
.pins = mt7622_pins,
@@ -921,6 +933,7 @@ static const struct mtk_pin_soc mt7622_data = {
.ngrps = ARRAY_SIZE(mt7622_groups),
.funcs = mt7622_functions,
.nfuncs = ARRAY_SIZE(mt7622_functions),
+ .eint_hw = &mt7622_eint_hw,
};
static void mtk_w32(struct mtk_pinctrl *pctl, u32 reg, u32 val)
@@ -1441,6 +1454,32 @@ static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
return pinctrl_gpio_direction_output(chip->base + gpio);
}
+static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ unsigned long eint_n;
+
+ eint_n = offset;
+
+ return mtk_eint_find_irq(hw->eint, eint_n);
+}
+
+static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ unsigned long eint_n;
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ eint_n = offset;
+
+ return mtk_eint_set_debounce(hw->eint, eint_n, debounce);
+}
+
static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
{
struct gpio_chip *chip = &hw->chip;
@@ -1454,6 +1493,8 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
chip->set = mtk_gpio_set;
+ chip->to_irq = mtk_gpio_to_irq,
+ chip->set_config = mtk_gpio_set_config,
chip->base = -1;
chip->ngpio = hw->soc->npins;
chip->of_node = np;
@@ -1514,6 +1555,103 @@ static int mtk_build_functions(struct mtk_pinctrl *hw)
return 0;
}
+static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
+ unsigned int *gpio_n,
+ struct gpio_chip **gpio_chip)
+{
+ struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+
+ *gpio_chip = &hw->chip;
+ *gpio_n = eint_n;
+
+ return 0;
+}
+
+static int mtk_xt_get_gpio_state(void *data, unsigned long eint_n)
+{
+ struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+ struct gpio_chip *gpio_chip;
+ unsigned int gpio_n;
+ int err;
+
+ err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
+ if (err)
+ return err;
+
+ return mtk_gpio_get(gpio_chip, gpio_n);
+}
+
+static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n)
+{
+ struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+ struct gpio_chip *gpio_chip;
+ unsigned int gpio_n;
+ int err;
+
+ err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_MODE,
+ MTK_GPIO_MODE);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_DIR, MTK_INPUT);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_SMT, MTK_ENABLE);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct mtk_eint_xt mtk_eint_xt = {
+ .get_gpio_n = mtk_xt_get_gpio_n,
+ .get_gpio_state = mtk_xt_get_gpio_state,
+ .set_gpio_as_eint = mtk_xt_set_gpio_as_eint,
+};
+
+static int
+mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+
+ if (!IS_ENABLED(CONFIG_EINT_MTK))
+ return 0;
+
+ if (!of_property_read_bool(np, "interrupt-controller"))
+ return -ENODEV;
+
+ hw->eint = devm_kzalloc(hw->dev, sizeof(*hw->eint), GFP_KERNEL);
+ if (!hw->eint)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "eint");
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get eint resource\n");
+ return -ENODEV;
+ }
+
+ hw->eint->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->eint->base))
+ return PTR_ERR(hw->eint->base);
+
+ hw->eint->irq = irq_of_parse_and_map(np, 0);
+ if (!hw->eint->irq)
+ return -EINVAL;
+
+ hw->eint->dev = &pdev->dev;
+ hw->eint->hw = hw->soc->eint_hw;
+ hw->eint->pctl = hw;
+ hw->eint->gpio_xlate = &mtk_eint_xt;
+
+ return mtk_eint_do_init(hw->eint);
+}
+
static const struct of_device_id mtk_pinctrl_of_match[] = {
{ .compatible = "mediatek,mt7622-pinctrl", .data = &mt7622_data},
{ }
@@ -1577,6 +1715,11 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
return err;
}
+ err = mtk_build_eint(hw, pdev);
+ if (err)
+ dev_warn(&pdev->dev,
+ "Failed to add EINT, but pinctrl still can work\n");
+
platform_set_drvdata(pdev, hw);
return 0;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8127.c b/drivers/pinctrl/mediatek/pinctrl-mt8127.c
index d76491574841..2e4cc9257e00 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8127.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8127.c
@@ -300,31 +300,12 @@ static const struct mtk_pinctrl_devdata mt8127_pinctrl_data = {
.port_shf = 4,
.port_mask = 0xf,
.port_align = 4,
- .eint_offsets = {
- .name = "mt8127_eint",
- .stat = 0x000,
- .ack = 0x040,
- .mask = 0x080,
- .mask_set = 0x0c0,
- .mask_clr = 0x100,
- .sens = 0x140,
- .sens_set = 0x180,
- .sens_clr = 0x1c0,
- .soft = 0x200,
- .soft_set = 0x240,
- .soft_clr = 0x280,
- .pol = 0x300,
- .pol_set = 0x340,
- .pol_clr = 0x380,
- .dom_en = 0x400,
- .dbnc_ctrl = 0x500,
- .dbnc_set = 0x600,
- .dbnc_clr = 0x700,
+ .eint_hw = {
.port_mask = 7,
.ports = 6,
+ .ap_num = 143,
+ .db_cnt = 16,
},
- .ap_num = 143,
- .db_cnt = 16,
};
static int mt8127_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8135.c b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
index d8c645f16f21..7f5edfaffdc5 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8135.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
@@ -313,31 +313,12 @@ static const struct mtk_pinctrl_devdata mt8135_pinctrl_data = {
.port_shf = 4,
.port_mask = 0xf,
.port_align = 4,
- .eint_offsets = {
- .name = "mt8135_eint",
- .stat = 0x000,
- .ack = 0x040,
- .mask = 0x080,
- .mask_set = 0x0c0,
- .mask_clr = 0x100,
- .sens = 0x140,
- .sens_set = 0x180,
- .sens_clr = 0x1c0,
- .soft = 0x200,
- .soft_set = 0x240,
- .soft_clr = 0x280,
- .pol = 0x300,
- .pol_set = 0x340,
- .pol_clr = 0x380,
- .dom_en = 0x400,
- .dbnc_ctrl = 0x500,
- .dbnc_set = 0x600,
- .dbnc_clr = 0x700,
+ .eint_hw = {
.port_mask = 7,
.ports = 6,
+ .ap_num = 192,
+ .db_cnt = 16,
},
- .ap_num = 192,
- .db_cnt = 16,
};
static int mt8135_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
index 8bfd427b9135..c449c9a043da 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
@@ -340,31 +340,12 @@ static const struct mtk_pinctrl_devdata mt8173_pinctrl_data = {
.port_shf = 4,
.port_mask = 0xf,
.port_align = 4,
- .eint_offsets = {
- .name = "mt8173_eint",
- .stat = 0x000,
- .ack = 0x040,
- .mask = 0x080,
- .mask_set = 0x0c0,
- .mask_clr = 0x100,
- .sens = 0x140,
- .sens_set = 0x180,
- .sens_clr = 0x1c0,
- .soft = 0x200,
- .soft_set = 0x240,
- .soft_clr = 0x280,
- .pol = 0x300,
- .pol_set = 0x340,
- .pol_clr = 0x380,
- .dom_en = 0x400,
- .dbnc_ctrl = 0x500,
- .dbnc_set = 0x600,
- .dbnc_clr = 0x700,
+ .eint_hw = {
.port_mask = 7,
.ports = 6,
+ .ap_num = 224,
+ .db_cnt = 16,
},
- .ap_num = 224,
- .db_cnt = 16,
};
static int mt8173_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index c3975a04d1cd..b3799695d8db 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -38,6 +38,7 @@
#include "../core.h"
#include "../pinconf.h"
#include "../pinctrl-utils.h"
+#include "mtk-eint.h"
#include "pinctrl-mtk-common.h"
#define MAX_GPIO_MODE_PER_REG 5
@@ -831,243 +832,38 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- const struct mtk_desc_pin *pin;
struct mtk_pinctrl *pctl = gpiochip_get_data(chip);
- int irq;
-
- pin = pctl->devdata->pins + offset;
- if (pin->eint.eintnum == NO_EINT_SUPPORT)
- return -EINVAL;
-
- irq = irq_find_mapping(pctl->domain, pin->eint.eintnum);
- if (!irq)
- return -EINVAL;
-
- return irq;
-}
-
-static int mtk_pinctrl_irq_request_resources(struct irq_data *d)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_desc_pin *pin;
- int ret;
-
- pin = mtk_find_pin_by_eint_num(pctl, d->hwirq);
-
- if (!pin) {
- dev_err(pctl->dev, "Can not find pin\n");
- return -EINVAL;
- }
-
- ret = gpiochip_lock_as_irq(pctl->chip, pin->pin.number);
- if (ret) {
- dev_err(pctl->dev, "unable to lock HW IRQ %lu for IRQ\n",
- irqd_to_hwirq(d));
- return ret;
- }
-
- /* set mux to INT mode */
- mtk_pmx_set_mode(pctl->pctl_dev, pin->pin.number, pin->eint.eintmux);
- /* set gpio direction to input */
- mtk_pmx_gpio_set_direction(pctl->pctl_dev, NULL, pin->pin.number, true);
- /* set input-enable */
- mtk_pconf_set_ies_smt(pctl, pin->pin.number, 1, PIN_CONFIG_INPUT_ENABLE);
-
- return 0;
-}
-
-static void mtk_pinctrl_irq_release_resources(struct irq_data *d)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_desc_pin *pin;
-
- pin = mtk_find_pin_by_eint_num(pctl, d->hwirq);
-
- if (!pin) {
- dev_err(pctl->dev, "Can not find pin\n");
- return;
- }
-
- gpiochip_unlock_as_irq(pctl->chip, pin->pin.number);
-}
-
-static void __iomem *mtk_eint_get_offset(struct mtk_pinctrl *pctl,
- unsigned int eint_num, unsigned int offset)
-{
- unsigned int eint_base = 0;
- void __iomem *reg;
-
- if (eint_num >= pctl->devdata->ap_num)
- eint_base = pctl->devdata->ap_num;
-
- reg = pctl->eint_reg_base + offset + ((eint_num - eint_base) / 32) * 4;
-
- return reg;
-}
-
-/*
- * mtk_can_en_debounce: Check the EINT number is able to enable debounce or not
- * @eint_num: the EINT number to setmtk_pinctrl
- */
-static unsigned int mtk_eint_can_en_debounce(struct mtk_pinctrl *pctl,
- unsigned int eint_num)
-{
- unsigned int sens;
- unsigned int bit = BIT(eint_num % 32);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
-
- void __iomem *reg = mtk_eint_get_offset(pctl, eint_num,
- eint_offsets->sens);
-
- if (readl(reg) & bit)
- sens = MT_LEVEL_SENSITIVE;
- else
- sens = MT_EDGE_SENSITIVE;
-
- if ((eint_num < pctl->devdata->db_cnt) && (sens != MT_EDGE_SENSITIVE))
- return 1;
- else
- return 0;
-}
-
-/*
- * mtk_eint_get_mask: To get the eint mask
- * @eint_num: the EINT number to get
- */
-static unsigned int mtk_eint_get_mask(struct mtk_pinctrl *pctl,
- unsigned int eint_num)
-{
- unsigned int bit = BIT(eint_num % 32);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
-
- void __iomem *reg = mtk_eint_get_offset(pctl, eint_num,
- eint_offsets->mask);
-
- return !!(readl(reg) & bit);
-}
-
-static int mtk_eint_flip_edge(struct mtk_pinctrl *pctl, int hwirq)
-{
- int start_level, curr_level;
- unsigned int reg_offset;
- const struct mtk_eint_offsets *eint_offsets = &(pctl->devdata->eint_offsets);
- u32 mask = BIT(hwirq & 0x1f);
- u32 port = (hwirq >> 5) & eint_offsets->port_mask;
- void __iomem *reg = pctl->eint_reg_base + (port << 2);
const struct mtk_desc_pin *pin;
-
- pin = mtk_find_pin_by_eint_num(pctl, hwirq);
- curr_level = mtk_gpio_get(pctl->chip, pin->pin.number);
- do {
- start_level = curr_level;
- if (start_level)
- reg_offset = eint_offsets->pol_clr;
- else
- reg_offset = eint_offsets->pol_set;
- writel(mask, reg + reg_offset);
-
- curr_level = mtk_gpio_get(pctl->chip, pin->pin.number);
- } while (start_level != curr_level);
-
- return start_level;
-}
-
-static void mtk_eint_mask(struct irq_data *d)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- u32 mask = BIT(d->hwirq & 0x1f);
- void __iomem *reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->mask_set);
-
- writel(mask, reg);
-}
-
-static void mtk_eint_unmask(struct irq_data *d)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- u32 mask = BIT(d->hwirq & 0x1f);
- void __iomem *reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->mask_clr);
-
- writel(mask, reg);
-
- if (pctl->eint_dual_edges[d->hwirq])
- mtk_eint_flip_edge(pctl, d->hwirq);
-}
-
-static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
- unsigned debounce)
-{
- struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
- int eint_num, virq, eint_offset;
- unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
- static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
- 128000, 256000};
- const struct mtk_desc_pin *pin;
- struct irq_data *d;
+ unsigned long eint_n;
pin = pctl->devdata->pins + offset;
if (pin->eint.eintnum == NO_EINT_SUPPORT)
return -EINVAL;
- eint_num = pin->eint.eintnum;
- virq = irq_find_mapping(pctl->domain, eint_num);
- eint_offset = (eint_num % 4) * 8;
- d = irq_get_irq_data(virq);
+ eint_n = pin->eint.eintnum;
- set_offset = (eint_num / 4) * 4 + pctl->devdata->eint_offsets.dbnc_set;
- clr_offset = (eint_num / 4) * 4 + pctl->devdata->eint_offsets.dbnc_clr;
- if (!mtk_eint_can_en_debounce(pctl, eint_num))
- return -ENOSYS;
-
- dbnc = ARRAY_SIZE(debounce_time);
- for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
- if (debounce <= debounce_time[i]) {
- dbnc = i;
- break;
- }
- }
-
- if (!mtk_eint_get_mask(pctl, eint_num)) {
- mtk_eint_mask(d);
- unmask = 1;
- } else {
- unmask = 0;
- }
-
- clr_bit = 0xff << eint_offset;
- writel(clr_bit, pctl->eint_reg_base + clr_offset);
-
- bit = ((dbnc << EINT_DBNC_SET_DBNC_BITS) | EINT_DBNC_SET_EN) <<
- eint_offset;
- rst = EINT_DBNC_RST_BIT << eint_offset;
- writel(rst | bit, pctl->eint_reg_base + set_offset);
-
- /* Delay a while (more than 2T) to wait for hw debounce counter reset
- work correctly */
- udelay(1);
- if (unmask == 1)
- mtk_eint_unmask(d);
-
- return 0;
+ return mtk_eint_find_irq(pctl->eint, eint_n);
}
static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned offset,
unsigned long config)
{
+ struct mtk_pinctrl *pctl = gpiochip_get_data(chip);
+ const struct mtk_desc_pin *pin;
+ unsigned long eint_n;
u32 debounce;
if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
return -ENOTSUPP;
+ pin = pctl->devdata->pins + offset;
+ if (pin->eint.eintnum == NO_EINT_SUPPORT)
+ return -EINVAL;
+
debounce = pinconf_to_config_argument(config);
- return mtk_gpio_set_debounce(chip, offset, debounce);
+ eint_n = pin->eint.eintnum;
+
+ return mtk_eint_set_debounce(pctl->eint, eint_n, debounce);
}
static const struct gpio_chip mtk_gpio_chip = {
@@ -1084,117 +880,18 @@ static const struct gpio_chip mtk_gpio_chip = {
.of_gpio_n_cells = 2,
};
-static int mtk_eint_set_type(struct irq_data *d,
- unsigned int type)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- u32 mask = BIT(d->hwirq & 0x1f);
- void __iomem *reg;
-
- if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
- ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) {
- dev_err(pctl->dev, "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
- d->irq, d->hwirq, type);
- return -EINVAL;
- }
-
- if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
- pctl->eint_dual_edges[d->hwirq] = 1;
- else
- pctl->eint_dual_edges[d->hwirq] = 0;
-
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
- reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->pol_clr);
- writel(mask, reg);
- } else {
- reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->pol_set);
- writel(mask, reg);
- }
-
- if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
- reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->sens_clr);
- writel(mask, reg);
- } else {
- reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->sens_set);
- writel(mask, reg);
- }
-
- if (pctl->eint_dual_edges[d->hwirq])
- mtk_eint_flip_edge(pctl, d->hwirq);
-
- return 0;
-}
-
-static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- int shift = d->hwirq & 0x1f;
- int reg = d->hwirq >> 5;
-
- if (on)
- pctl->wake_mask[reg] |= BIT(shift);
- else
- pctl->wake_mask[reg] &= ~BIT(shift);
-
- return 0;
-}
-
-static void mtk_eint_chip_write_mask(const struct mtk_eint_offsets *chip,
- void __iomem *eint_reg_base, u32 *buf)
-{
- int port;
- void __iomem *reg;
-
- for (port = 0; port < chip->ports; port++) {
- reg = eint_reg_base + (port << 2);
- writel_relaxed(~buf[port], reg + chip->mask_set);
- writel_relaxed(buf[port], reg + chip->mask_clr);
- }
-}
-
-static void mtk_eint_chip_read_mask(const struct mtk_eint_offsets *chip,
- void __iomem *eint_reg_base, u32 *buf)
-{
- int port;
- void __iomem *reg;
-
- for (port = 0; port < chip->ports; port++) {
- reg = eint_reg_base + chip->mask + (port << 2);
- buf[port] = ~readl_relaxed(reg);
- /* Mask is 0 when irq is enabled, and 1 when disabled. */
- }
-}
-
static int mtk_eint_suspend(struct device *device)
{
- void __iomem *reg;
struct mtk_pinctrl *pctl = dev_get_drvdata(device);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
-
- reg = pctl->eint_reg_base;
- mtk_eint_chip_read_mask(eint_offsets, reg, pctl->cur_mask);
- mtk_eint_chip_write_mask(eint_offsets, reg, pctl->wake_mask);
- return 0;
+ return mtk_eint_do_suspend(pctl->eint);
}
static int mtk_eint_resume(struct device *device)
{
struct mtk_pinctrl *pctl = dev_get_drvdata(device);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
-
- mtk_eint_chip_write_mask(eint_offsets,
- pctl->eint_reg_base, pctl->cur_mask);
- return 0;
+ return mtk_eint_do_resume(pctl->eint);
}
const struct dev_pm_ops mtk_eint_pm_ops = {
@@ -1202,117 +899,6 @@ const struct dev_pm_ops mtk_eint_pm_ops = {
.resume_noirq = mtk_eint_resume,
};
-static void mtk_eint_ack(struct irq_data *d)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- u32 mask = BIT(d->hwirq & 0x1f);
- void __iomem *reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->ack);
-
- writel(mask, reg);
-}
-
-static struct irq_chip mtk_pinctrl_irq_chip = {
- .name = "mt-eint",
- .irq_disable = mtk_eint_mask,
- .irq_mask = mtk_eint_mask,
- .irq_unmask = mtk_eint_unmask,
- .irq_ack = mtk_eint_ack,
- .irq_set_type = mtk_eint_set_type,
- .irq_set_wake = mtk_eint_irq_set_wake,
- .irq_request_resources = mtk_pinctrl_irq_request_resources,
- .irq_release_resources = mtk_pinctrl_irq_release_resources,
-};
-
-static unsigned int mtk_eint_init(struct mtk_pinctrl *pctl)
-{
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- void __iomem *reg = pctl->eint_reg_base + eint_offsets->dom_en;
- unsigned int i;
-
- for (i = 0; i < pctl->devdata->ap_num; i += 32) {
- writel(0xffffffff, reg);
- reg += 4;
- }
- return 0;
-}
-
-static inline void
-mtk_eint_debounce_process(struct mtk_pinctrl *pctl, int index)
-{
- unsigned int rst, ctrl_offset;
- unsigned int bit, dbnc;
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
-
- ctrl_offset = (index / 4) * 4 + eint_offsets->dbnc_ctrl;
- dbnc = readl(pctl->eint_reg_base + ctrl_offset);
- bit = EINT_DBNC_SET_EN << ((index % 4) * 8);
- if ((bit & dbnc) > 0) {
- ctrl_offset = (index / 4) * 4 + eint_offsets->dbnc_set;
- rst = EINT_DBNC_RST_BIT << ((index % 4) * 8);
- writel(rst, pctl->eint_reg_base + ctrl_offset);
- }
-}
-
-static void mtk_eint_irq_handler(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc);
- unsigned int status, eint_num;
- int offset, index, virq;
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- void __iomem *reg = mtk_eint_get_offset(pctl, 0, eint_offsets->stat);
- int dual_edges, start_level, curr_level;
- const struct mtk_desc_pin *pin;
-
- chained_irq_enter(chip, desc);
- for (eint_num = 0;
- eint_num < pctl->devdata->ap_num;
- eint_num += 32, reg += 4) {
- status = readl(reg);
- while (status) {
- offset = __ffs(status);
- index = eint_num + offset;
- virq = irq_find_mapping(pctl->domain, index);
- status &= ~BIT(offset);
-
- dual_edges = pctl->eint_dual_edges[index];
- if (dual_edges) {
- /* Clear soft-irq in case we raised it
- last time */
- writel(BIT(offset), reg - eint_offsets->stat +
- eint_offsets->soft_clr);
-
- pin = mtk_find_pin_by_eint_num(pctl, index);
- start_level = mtk_gpio_get(pctl->chip,
- pin->pin.number);
- }
-
- generic_handle_irq(virq);
-
- if (dual_edges) {
- curr_level = mtk_eint_flip_edge(pctl, index);
-
- /* If level changed, we might lost one edge
- interrupt, raised it through soft-irq */
- if (start_level != curr_level)
- writel(BIT(offset), reg -
- eint_offsets->stat +
- eint_offsets->soft_set);
- }
-
- if (index < pctl->devdata->db_cnt)
- mtk_eint_debounce_process(pctl , index);
- }
- }
- chained_irq_exit(chip, desc);
-}
-
static int mtk_pctrl_build_state(struct platform_device *pdev)
{
struct mtk_pinctrl *pctl = platform_get_drvdata(pdev);
@@ -1345,6 +931,101 @@ static int mtk_pctrl_build_state(struct platform_device *pdev)
return 0;
}
+static int
+mtk_xt_get_gpio_n(void *data, unsigned long eint_n, unsigned int *gpio_n,
+ struct gpio_chip **gpio_chip)
+{
+ struct mtk_pinctrl *pctl = (struct mtk_pinctrl *)data;
+ const struct mtk_desc_pin *pin;
+
+ pin = mtk_find_pin_by_eint_num(pctl, eint_n);
+ if (!pin)
+ return -EINVAL;
+
+ *gpio_chip = pctl->chip;
+ *gpio_n = pin->pin.number;
+
+ return 0;
+}
+
+static int mtk_xt_get_gpio_state(void *data, unsigned long eint_n)
+{
+ struct mtk_pinctrl *pctl = (struct mtk_pinctrl *)data;
+ const struct mtk_desc_pin *pin;
+
+ pin = mtk_find_pin_by_eint_num(pctl, eint_n);
+ if (!pin)
+ return -EINVAL;
+
+ return mtk_gpio_get(pctl->chip, pin->pin.number);
+}
+
+static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n)
+{
+ struct mtk_pinctrl *pctl = (struct mtk_pinctrl *)data;
+ const struct mtk_desc_pin *pin;
+
+ pin = mtk_find_pin_by_eint_num(pctl, eint_n);
+ if (!pin)
+ return -EINVAL;
+
+ /* set mux to INT mode */
+ mtk_pmx_set_mode(pctl->pctl_dev, pin->pin.number, pin->eint.eintmux);
+ /* set gpio direction to input */
+ mtk_pmx_gpio_set_direction(pctl->pctl_dev, NULL, pin->pin.number,
+ true);
+ /* set input-enable */
+ mtk_pconf_set_ies_smt(pctl, pin->pin.number, 1,
+ PIN_CONFIG_INPUT_ENABLE);
+
+ return 0;
+}
+
+static const struct mtk_eint_xt mtk_eint_xt = {
+ .get_gpio_n = mtk_xt_get_gpio_n,
+ .get_gpio_state = mtk_xt_get_gpio_state,
+ .set_gpio_as_eint = mtk_xt_set_gpio_as_eint,
+};
+
+static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+
+ if (!of_property_read_bool(np, "interrupt-controller"))
+ return -ENODEV;
+
+ pctl->eint = devm_kzalloc(pctl->dev, sizeof(*pctl->eint), GFP_KERNEL);
+ if (!pctl->eint)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get eint resource\n");
+ return -ENODEV;
+ }
+
+ pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctl->eint->base))
+ return PTR_ERR(pctl->eint->base);
+
+ pctl->eint->irq = irq_of_parse_and_map(np, 0);
+ if (!pctl->eint->irq)
+ return -EINVAL;
+
+ pctl->eint->dev = &pdev->dev;
+ /*
+ * If pctl->eint->regs == NULL, it would fall back into using a generic
+ * register map in mtk_eint_do_init calls.
+ */
+ pctl->eint->regs = pctl->devdata->eint_regs;
+ pctl->eint->hw = &pctl->devdata->eint_hw;
+ pctl->eint->pctl = pctl;
+ pctl->eint->gpio_xlate = &mtk_eint_xt;
+
+ return mtk_eint_do_init(pctl->eint);
+}
+
int mtk_pctrl_init(struct platform_device *pdev,
const struct mtk_pinctrl_devdata *data,
struct regmap *regmap)
@@ -1353,8 +1034,7 @@ int mtk_pctrl_init(struct platform_device *pdev,
struct mtk_pinctrl *pctl;
struct device_node *np = pdev->dev.of_node, *node;
struct property *prop;
- struct resource *res;
- int i, ret, irq, ports_buf;
+ int ret, i;
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
@@ -1441,70 +1121,10 @@ int mtk_pctrl_init(struct platform_device *pdev,
goto chip_error;
}
- if (!of_property_read_bool(np, "interrupt-controller"))
- return 0;
-
- /* Get EINT register base from dts. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get Pinctrl resource\n");
- ret = -EINVAL;
- goto chip_error;
- }
-
- pctl->eint_reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pctl->eint_reg_base)) {
- ret = -EINVAL;
- goto chip_error;
- }
-
- ports_buf = pctl->devdata->eint_offsets.ports;
- pctl->wake_mask = devm_kcalloc(&pdev->dev, ports_buf,
- sizeof(*pctl->wake_mask), GFP_KERNEL);
- if (!pctl->wake_mask) {
- ret = -ENOMEM;
- goto chip_error;
- }
-
- pctl->cur_mask = devm_kcalloc(&pdev->dev, ports_buf,
- sizeof(*pctl->cur_mask), GFP_KERNEL);
- if (!pctl->cur_mask) {
- ret = -ENOMEM;
- goto chip_error;
- }
-
- pctl->eint_dual_edges = devm_kcalloc(&pdev->dev, pctl->devdata->ap_num,
- sizeof(int), GFP_KERNEL);
- if (!pctl->eint_dual_edges) {
- ret = -ENOMEM;
- goto chip_error;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- dev_err(&pdev->dev, "couldn't parse and map irq\n");
- ret = -EINVAL;
- goto chip_error;
- }
-
- pctl->domain = irq_domain_add_linear(np,
- pctl->devdata->ap_num, &irq_domain_simple_ops, NULL);
- if (!pctl->domain) {
- dev_err(&pdev->dev, "Couldn't register IRQ domain\n");
- ret = -ENOMEM;
+ ret = mtk_eint_init(pctl, pdev);
+ if (ret)
goto chip_error;
- }
-
- mtk_eint_init(pctl);
- for (i = 0; i < pctl->devdata->ap_num; i++) {
- int virq = irq_create_mapping(pctl->domain, i);
-
- irq_set_chip_and_handler(virq, &mtk_pinctrl_irq_chip,
- handle_level_irq);
- irq_set_chip_data(virq, pctl);
- }
- irq_set_chained_handler_and_data(irq, mtk_eint_irq_handler, pctl);
return 0;
chip_error:
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
index 8543bc478a1e..bf13eb0a68d6 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
@@ -19,6 +19,8 @@
#include <linux/regmap.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include "mtk-eint.h"
+
#define NO_EINT_SUPPORT 255
#define MT_EDGE_SENSITIVE 0
#define MT_LEVEL_SENSITIVE 1
@@ -258,9 +260,8 @@ struct mtk_pinctrl_devdata {
unsigned char port_shf;
unsigned char port_mask;
unsigned char port_align;
- struct mtk_eint_offsets eint_offsets;
- unsigned int ap_num;
- unsigned int db_cnt;
+ struct mtk_eint_hw eint_hw;
+ struct mtk_eint_regs *eint_regs;
};
struct mtk_pinctrl {
@@ -274,11 +275,7 @@ struct mtk_pinctrl {
const char **grp_names;
struct pinctrl_dev *pctl_dev;
const struct mtk_pinctrl_devdata *devdata;
- void __iomem *eint_reg_base;
- struct irq_domain *domain;
- int *eint_dual_edges;
- u32 *wake_mask;
- u32 *cur_mask;
+ struct mtk_eint *eint;
};
int mtk_pctrl_init(struct platform_device *pdev,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 99a6ceac8e53..46a0918bd284 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -312,6 +312,47 @@ static const unsigned int pdm_din1_pins[] = {GPIOA_16};
static const unsigned int pdm_din2_pins[] = {GPIOA_17};
static const unsigned int pdm_din3_pins[] = {GPIOA_18};
+/* mclk */
+static const unsigned int mclk_c_pins[] = {GPIOA_0};
+static const unsigned int mclk_b_pins[] = {GPIOA_1};
+
+/* tdm */
+static const unsigned int tdma_sclk_pins[] = {GPIOX_12};
+static const unsigned int tdma_sclk_slv_pins[] = {GPIOX_12};
+static const unsigned int tdma_fs_pins[] = {GPIOX_13};
+static const unsigned int tdma_fs_slv_pins[] = {GPIOX_13};
+static const unsigned int tdma_din0_pins[] = {GPIOX_14};
+static const unsigned int tdma_dout0_x14_pins[] = {GPIOX_14};
+static const unsigned int tdma_dout0_x15_pins[] = {GPIOX_15};
+static const unsigned int tdma_dout1_pins[] = {GPIOX_15};
+static const unsigned int tdma_din1_pins[] = {GPIOX_15};
+
+static const unsigned int tdmc_sclk_pins[] = {GPIOA_2};
+static const unsigned int tdmc_sclk_slv_pins[] = {GPIOA_2};
+static const unsigned int tdmc_fs_pins[] = {GPIOA_3};
+static const unsigned int tdmc_fs_slv_pins[] = {GPIOA_3};
+static const unsigned int tdmc_din0_pins[] = {GPIOA_4};
+static const unsigned int tdmc_dout0_pins[] = {GPIOA_4};
+static const unsigned int tdmc_din1_pins[] = {GPIOA_5};
+static const unsigned int tdmc_dout1_pins[] = {GPIOA_5};
+static const unsigned int tdmc_din2_pins[] = {GPIOA_6};
+static const unsigned int tdmc_dout2_pins[] = {GPIOA_6};
+static const unsigned int tdmc_din3_pins[] = {GPIOA_7};
+static const unsigned int tdmc_dout3_pins[] = {GPIOA_7};
+
+static const unsigned int tdmb_sclk_pins[] = {GPIOA_8};
+static const unsigned int tdmb_sclk_slv_pins[] = {GPIOA_8};
+static const unsigned int tdmb_fs_pins[] = {GPIOA_9};
+static const unsigned int tdmb_fs_slv_pins[] = {GPIOA_9};
+static const unsigned int tdmb_din0_pins[] = {GPIOA_10};
+static const unsigned int tdmb_dout0_pins[] = {GPIOA_10};
+static const unsigned int tdmb_din1_pins[] = {GPIOA_11};
+static const unsigned int tdmb_dout1_pins[] = {GPIOA_11};
+static const unsigned int tdmb_din2_pins[] = {GPIOA_12};
+static const unsigned int tdmb_dout2_pins[] = {GPIOA_12};
+static const unsigned int tdmb_din3_pins[] = {GPIOA_13};
+static const unsigned int tdmb_dout3_pins[] = {GPIOA_13};
+
static struct meson_pmx_group meson_axg_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
@@ -495,6 +536,15 @@ static struct meson_pmx_group meson_axg_periphs_groups[] = {
GROUP(eth_rx_dv_x, 4),
GROUP(eth_mdio_x, 4),
GROUP(eth_mdc_x, 4),
+ GROUP(tdma_sclk, 1),
+ GROUP(tdma_sclk_slv, 2),
+ GROUP(tdma_fs, 1),
+ GROUP(tdma_fs_slv, 2),
+ GROUP(tdma_din0, 1),
+ GROUP(tdma_dout0_x14, 2),
+ GROUP(tdma_dout0_x15, 1),
+ GROUP(tdma_dout1, 2),
+ GROUP(tdma_din1, 3),
/* bank GPIOY */
GROUP(eth_txd0_y, 1),
@@ -544,6 +594,32 @@ static struct meson_pmx_group meson_axg_periphs_groups[] = {
GROUP(pdm_din1, 1),
GROUP(pdm_din2, 1),
GROUP(pdm_din3, 1),
+ GROUP(mclk_c, 1),
+ GROUP(mclk_b, 1),
+ GROUP(tdmc_sclk, 1),
+ GROUP(tdmc_sclk_slv, 2),
+ GROUP(tdmc_fs, 1),
+ GROUP(tdmc_fs_slv, 2),
+ GROUP(tdmc_din0, 2),
+ GROUP(tdmc_dout0, 1),
+ GROUP(tdmc_din1, 2),
+ GROUP(tdmc_dout1, 1),
+ GROUP(tdmc_din2, 2),
+ GROUP(tdmc_dout2, 1),
+ GROUP(tdmc_din3, 2),
+ GROUP(tdmc_dout3, 1),
+ GROUP(tdmb_sclk, 1),
+ GROUP(tdmb_sclk_slv, 2),
+ GROUP(tdmb_fs, 1),
+ GROUP(tdmb_fs_slv, 2),
+ GROUP(tdmb_din0, 2),
+ GROUP(tdmb_dout0, 1),
+ GROUP(tdmb_din1, 2),
+ GROUP(tdmb_dout1, 1),
+ GROUP(tdmb_din2, 2),
+ GROUP(tdmb_dout2, 1),
+ GROUP(tdmb_din3, 2),
+ GROUP(tdmb_dout3, 1),
};
/* uart_ao_a */
@@ -845,6 +921,32 @@ static const char * const jtag_ao_groups[] = {
"jtag_ao_tdi", "jtag_ao_tdo", "jtag_ao_clk", "jtag_ao_tms",
};
+static const char * const mclk_c_groups[] = {
+ "mclk_c",
+};
+
+static const char * const mclk_b_groups[] = {
+ "mclk_b",
+};
+
+static const char * const tdma_groups[] = {
+ "tdma_sclk", "tdma_sclk_slv", "tdma_fs", "tdma_fs_slv",
+ "tdma_din0", "tdma_dout0_x14", "tdma_dout0_x15", "tdma_dout1",
+ "tdma_din1",
+};
+
+static const char * const tdmc_groups[] = {
+ "tdmc_sclk", "tdmc_sclk_slv", "tdmc_fs", "tdmc_fs_slv",
+ "tdmc_din0", "tdmc_dout0", "tdmc_din1", "tdmc_dout1",
+ "tdmc_din2", "tdmc_dout2", "tdmc_din3", "tdmc_dout3",
+};
+
+static const char * const tdmb_groups[] = {
+ "tdmb_sclk", "tdmb_sclk_slv", "tdmb_fs", "tdmb_fs_slv",
+ "tdmb_din0", "tdmb_dout0", "tdmb_din1", "tdmb_dout1",
+ "tdmb_din2", "tdmb_dout2", "tdmb_din3", "tdmb_dout3",
+};
+
static struct meson_pmx_func meson_axg_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
@@ -870,6 +972,11 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = {
FUNCTION(spdif_in),
FUNCTION(jtag_ee),
FUNCTION(pdm),
+ FUNCTION(mclk_b),
+ FUNCTION(mclk_c),
+ FUNCTION(tdma),
+ FUNCTION(tdmb),
+ FUNCTION(tdmc),
};
static struct meson_pmx_func meson_axg_aobus_functions[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 9079020259c5..2c97a2e07a5f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -627,8 +627,8 @@ static const char * const sdio_groups[] = {
};
static const char * const nand_groups[] = {
- "nand_ce0", "nand_ce1", "nand_rb0", "nand_ale", "nand_cle",
- "nand_wen_clk", "nand_ren_wr", "nand_dqs",
+ "emmc_nand_d07", "nand_ce0", "nand_ce1", "nand_rb0", "nand_ale",
+ "nand_cle", "nand_wen_clk", "nand_ren_wr", "nand_dqs",
};
static const char * const uart_a_groups[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index b3786cde963d..7dae1d7bf6b0 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -617,8 +617,8 @@ static const char * const sdio_groups[] = {
};
static const char * const nand_groups[] = {
- "nand_ce0", "nand_ce1", "nand_rb0", "nand_ale", "nand_cle",
- "nand_wen_clk", "nand_ren_wr", "nand_dqs",
+ "emmc_nand_d07", "nand_ce0", "nand_ce1", "nand_rb0", "nand_ale",
+ "nand_cle", "nand_wen_clk", "nand_ren_wr", "nand_dqs",
};
static const char * const uart_a_groups[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 49c7ce03547b..c6d79315218f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -1,5 +1,5 @@
/*
- * Pin controller and GPIO driver for Amlogic Meson8.
+ * Pin controller and GPIO driver for Amlogic Meson8 and Meson8m2.
*
* Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
*
@@ -299,6 +299,10 @@ static const unsigned int spi_mosi_1_pins[] = { GPIOZ_12 };
static const unsigned int spi_miso_1_pins[] = { GPIOZ_13 };
static const unsigned int spi_ss2_1_pins[] = { GPIOZ_14 };
+static const unsigned int eth_txd3_pins[] = { GPIOZ_0 };
+static const unsigned int eth_txd2_pins[] = { GPIOZ_1 };
+static const unsigned int eth_rxd3_pins[] = { GPIOZ_2 };
+static const unsigned int eth_rxd2_pins[] = { GPIOZ_3 };
static const unsigned int eth_tx_clk_50m_pins[] = { GPIOZ_4 };
static const unsigned int eth_tx_en_pins[] = { GPIOZ_5 };
static const unsigned int eth_txd1_pins[] = { GPIOZ_6 };
@@ -650,6 +654,12 @@ static struct meson_pmx_group meson8_cbus_groups[] = {
GROUP(eth_mdio, 6, 6),
GROUP(eth_mdc, 6, 5),
+ /* NOTE: the following four groups are only available on Meson8m2: */
+ GROUP(eth_rxd2, 6, 3),
+ GROUP(eth_rxd3, 6, 2),
+ GROUP(eth_txd2, 6, 1),
+ GROUP(eth_txd3, 6, 0),
+
GROUP(i2c_sda_a0, 5, 31),
GROUP(i2c_sck_a0, 5, 30),
@@ -877,7 +887,8 @@ static const char * const spi_groups[] = {
static const char * const ethernet_groups[] = {
"eth_tx_clk_50m", "eth_tx_en", "eth_txd1",
"eth_txd0", "eth_rx_clk_in", "eth_rx_dv",
- "eth_rxd1", "eth_rxd0", "eth_mdio", "eth_mdc"
+ "eth_rxd1", "eth_rxd0", "eth_mdio", "eth_mdc", "eth_rxd2",
+ "eth_rxd3", "eth_txd2", "eth_txd3"
};
static const char * const i2c_a_groups[] = {
@@ -1080,6 +1091,14 @@ static const struct of_device_id meson8_pinctrl_dt_match[] = {
.compatible = "amlogic,meson8-aobus-pinctrl",
.data = &meson8_aobus_pinctrl_data,
},
+ {
+ .compatible = "amlogic,meson8m2-cbus-pinctrl",
+ .data = &meson8_cbus_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson8m2-aobus-pinctrl",
+ .data = &meson8_aobus_pinctrl_data,
+ },
{ },
};
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 5b63248c8209..674ffdf8103c 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -214,18 +214,6 @@ static inline void armada_37xx_update_reg(unsigned int *reg,
}
}
-static int armada_37xx_get_func_reg(struct armada_37xx_pin_group *grp,
- const char *func)
-{
- int f;
-
- for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++)
- if (!strcmp(grp->funcs[f], func))
- return f;
-
- return -ENOTSUPP;
-}
-
static struct armada_37xx_pin_group *armada_37xx_find_next_grp_by_pin(
struct armada_37xx_pinctrl *info, int pin, int *grp)
{
@@ -344,10 +332,9 @@ static int armada_37xx_pmx_set_by_name(struct pinctrl_dev *pctldev,
dev_dbg(info->dev, "enable function %s group %s\n",
name, grp->name);
- func = armada_37xx_get_func_reg(grp, name);
-
+ func = match_string(grp->funcs, NB_FUNCS, name);
if (func < 0)
- return func;
+ return -ENOTSUPP;
val = grp->val[func];
@@ -679,12 +666,13 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
writel(1 << hwirq,
info->base +
IRQ_STATUS + 4 * i);
- continue;
+ goto update_status;
}
}
generic_handle_irq(virq);
+update_status:
/* Update status in case a new IRQ appears */
spin_lock_irqsave(&info->irq_lock, flags);
status = readl_relaxed(info->base +
@@ -932,12 +920,12 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
struct armada_37xx_pin_group *gp = &info->groups[g];
int f;
- for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) {
- if (strcmp(gp->funcs[f], name) == 0) {
- *groups = gp->name;
- groups++;
- }
- }
+ f = match_string(gp->funcs, NB_FUNCS, name);
+ if (f < 0)
+ continue;
+
+ *groups = gp->name;
+ groups++;
}
}
return 0;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index b854f1ee5de5..5e828468e43d 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -431,40 +431,40 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = {
MPP_MODE(19,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x4, "dev", "rb", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "nand", "rb", V_98DX3236_PLUS)),
MPP_MODE(20,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
MPP_VAR_FUNCTION(0x4, "dev", "we0", V_98DX3236_PLUS)),
MPP_MODE(21,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad0", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad0", V_98DX3236_PLUS)),
MPP_MODE(22,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad1", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad1", V_98DX3236_PLUS)),
MPP_MODE(23,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad2", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad2", V_98DX3236_PLUS)),
MPP_MODE(24,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad3", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad3", V_98DX3236_PLUS)),
MPP_MODE(25,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad4", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad4", V_98DX3236_PLUS)),
MPP_MODE(26,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad5", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad5", V_98DX3236_PLUS)),
MPP_MODE(27,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad6", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad6", V_98DX3236_PLUS)),
MPP_MODE(28,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad7", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad7", V_98DX3236_PLUS)),
MPP_MODE(29,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "a0", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "a0", V_98DX3236_PLUS)),
MPP_MODE(30,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "a1", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "a1", V_98DX3236_PLUS)),
MPP_MODE(31,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
MPP_VAR_FUNCTION(0x1, "slv_smi", "mdc", V_98DX3236_PLUS),
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 4b57a13758a4..bafb3d40545e 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -576,8 +576,10 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
for_each_child_of_node(np_config, np) {
ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(np);
break;
+ }
}
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 3924779f5578..1882713e68f9 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -59,6 +59,7 @@
#define GPIO_LS_SYNC 0x60
enum rockchip_pinctrl_type {
+ PX30,
RV1108,
RK2928,
RK3066B,
@@ -701,6 +702,66 @@ static void rockchip_get_recalced_mux(struct rockchip_pin_bank *bank, int pin,
*bit = data->bit;
}
+static struct rockchip_mux_route_data px30_mux_route_data[] = {
+ {
+ /* cif-d2m0 */
+ .bank_num = 2,
+ .pin = 0,
+ .func = 1,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 7),
+ }, {
+ /* cif-d2m1 */
+ .bank_num = 3,
+ .pin = 3,
+ .func = 3,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 7) | BIT(7),
+ }, {
+ /* pdm-m0 */
+ .bank_num = 3,
+ .pin = 22,
+ .func = 2,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 8),
+ }, {
+ /* pdm-m1 */
+ .bank_num = 2,
+ .pin = 22,
+ .func = 1,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 8) | BIT(8),
+ }, {
+ /* uart2-rxm0 */
+ .bank_num = 1,
+ .pin = 27,
+ .func = 2,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 10),
+ }, {
+ /* uart2-rxm1 */
+ .bank_num = 2,
+ .pin = 14,
+ .func = 2,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 10) | BIT(10),
+ }, {
+ /* uart3-rxm0 */
+ .bank_num = 0,
+ .pin = 17,
+ .func = 2,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 9),
+ }, {
+ /* uart3-rxm1 */
+ .bank_num = 1,
+ .pin = 15,
+ .func = 2,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 9) | BIT(9),
+ },
+};
+
static struct rockchip_mux_route_data rk3128_mux_route_data[] = {
{
/* spi-0 */
@@ -1202,6 +1263,97 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
return ret;
}
+#define PX30_PULL_PMU_OFFSET 0x10
+#define PX30_PULL_GRF_OFFSET 0x60
+#define PX30_PULL_BITS_PER_PIN 2
+#define PX30_PULL_PINS_PER_REG 8
+#define PX30_PULL_BANK_STRIDE 16
+
+static void px30_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 32 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = PX30_PULL_PMU_OFFSET;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = PX30_PULL_GRF_OFFSET;
+
+ /* correct the offset, as we're starting with the 2nd bank */
+ *reg -= 0x10;
+ *reg += bank->bank_num * PX30_PULL_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / PX30_PULL_PINS_PER_REG) * 4);
+ *bit = (pin_num % PX30_PULL_PINS_PER_REG);
+ *bit *= PX30_PULL_BITS_PER_PIN;
+}
+
+#define PX30_DRV_PMU_OFFSET 0x20
+#define PX30_DRV_GRF_OFFSET 0xf0
+#define PX30_DRV_BITS_PER_PIN 2
+#define PX30_DRV_PINS_PER_REG 8
+#define PX30_DRV_BANK_STRIDE 16
+
+static void px30_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 32 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = PX30_DRV_PMU_OFFSET;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = PX30_DRV_GRF_OFFSET;
+
+ /* correct the offset, as we're starting with the 2nd bank */
+ *reg -= 0x10;
+ *reg += bank->bank_num * PX30_DRV_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / PX30_DRV_PINS_PER_REG) * 4);
+ *bit = (pin_num % PX30_DRV_PINS_PER_REG);
+ *bit *= PX30_DRV_BITS_PER_PIN;
+}
+
+#define PX30_SCHMITT_PMU_OFFSET 0x38
+#define PX30_SCHMITT_GRF_OFFSET 0xc0
+#define PX30_SCHMITT_PINS_PER_PMU_REG 16
+#define PX30_SCHMITT_BANK_STRIDE 16
+#define PX30_SCHMITT_PINS_PER_GRF_REG 8
+
+static int px30_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+ int pins_per_reg;
+
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = PX30_SCHMITT_PMU_OFFSET;
+ pins_per_reg = PX30_SCHMITT_PINS_PER_PMU_REG;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = PX30_SCHMITT_GRF_OFFSET;
+ pins_per_reg = PX30_SCHMITT_PINS_PER_GRF_REG;
+ *reg += (bank->bank_num - 1) * PX30_SCHMITT_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / pins_per_reg) * 4);
+ *bit = pin_num % pins_per_reg;
+
+ return 0;
+}
+
#define RV1108_PULL_PMU_OFFSET 0x10
#define RV1108_PULL_OFFSET 0x110
#define RV1108_PULL_PINS_PER_REG 8
@@ -1798,6 +1950,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
return !(data & BIT(bit))
? PIN_CONFIG_BIAS_PULL_PIN_DEFAULT
: PIN_CONFIG_BIAS_DISABLE;
+ case PX30:
case RV1108:
case RK3188:
case RK3288:
@@ -1841,6 +1994,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
data |= BIT(bit);
ret = regmap_write(regmap, reg, data);
break;
+ case PX30:
case RV1108:
case RK3188:
case RK3288:
@@ -2103,6 +2257,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
pull == PIN_CONFIG_BIAS_DISABLE);
case RK3066B:
return pull ? false : true;
+ case PX30:
case RV1108:
case RK3188:
case RK3288:
@@ -2555,6 +2710,57 @@ static int rockchip_gpio_direction_output(struct gpio_chip *gc,
return pinctrl_gpio_direction_output(gc->base + offset);
}
+static void rockchip_gpio_set_debounce(struct gpio_chip *gc,
+ unsigned int offset, bool enable)
+{
+ struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
+ void __iomem *reg = bank->reg_base + GPIO_DEBOUNCE;
+ unsigned long flags;
+ u32 data;
+
+ clk_enable(bank->clk);
+ raw_spin_lock_irqsave(&bank->slock, flags);
+
+ data = readl(reg);
+ if (enable)
+ data |= BIT(offset);
+ else
+ data &= ~BIT(offset);
+ writel(data, reg);
+
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
+ clk_disable(bank->clk);
+}
+
+/*
+ * gpiolib set_config callback function. The setting of the pin
+ * mux function as 'gpio output' will be handled by the pinctrl subsystem
+ * interface.
+ */
+static int rockchip_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ enum pin_config_param param = pinconf_to_config_param(config);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ rockchip_gpio_set_debounce(gc, offset, true);
+ /*
+ * Rockchip's gpio could only support up to one period
+ * of the debounce clock(pclk), which is far away from
+ * satisftying the requirement, as pclk is usually near
+ * 100MHz shared by all peripherals. So the fact is it
+ * has crippled debounce capability could only be useful
+ * to prevent any spurious glitches from waking up the system
+ * if the gpio is conguired as wakeup interrupt source. Let's
+ * still return -ENOTSUPP as before, to make sure the caller
+ * of gpiod_set_debounce won't change its behaviour.
+ */
+ default:
+ return -ENOTSUPP;
+ }
+}
+
/*
* gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
* and a virtual IRQ, if not already present.
@@ -2580,6 +2786,7 @@ static const struct gpio_chip rockchip_gpiolib_chip = {
.get_direction = rockchip_gpio_get_direction,
.direction_input = rockchip_gpio_direction_input,
.direction_output = rockchip_gpio_direction_output,
+ .set_config = rockchip_gpio_set_config,
.to_irq = rockchip_gpio_to_irq,
.owner = THIS_MODULE,
};
@@ -3237,6 +3444,43 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static struct rockchip_pin_bank px30_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU
+ ),
+ PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT
+ ),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT
+ ),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT
+ ),
+};
+
+static struct rockchip_pin_ctrl px30_pin_ctrl = {
+ .pin_banks = px30_pin_banks,
+ .nr_banks = ARRAY_SIZE(px30_pin_banks),
+ .label = "PX30-GPIO",
+ .type = PX30,
+ .grf_mux_offset = 0x0,
+ .pmu_mux_offset = 0x0,
+ .iomux_routes = px30_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(px30_mux_route_data),
+ .pull_calc_reg = px30_calc_pull_reg_and_bit,
+ .drv_calc_reg = px30_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = px30_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rv1108_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU,
IOMUX_SOURCE_PMU,
@@ -3545,6 +3789,8 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
};
static const struct of_device_id rockchip_pinctrl_dt_match[] = {
+ { .compatible = "rockchip,px30-pinctrl",
+ .data = &px30_pin_ctrl },
{ .compatible = "rockchip,rv1108-pinctrl",
.data = &rv1108_pin_ctrl },
{ .compatible = "rockchip,rk2928-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index a7c5eb39b1eb..9c3c00515aa0 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -144,6 +144,7 @@ struct pcs_soc_data {
* struct pcs_device - pinctrl device instance
* @res: resources
* @base: virtual address of the controller
+ * @saved_vals: saved values for the controller
* @size: size of the ioremapped area
* @dev: device entry
* @np: device tree node
@@ -172,11 +173,13 @@ struct pcs_soc_data {
struct pcs_device {
struct resource *res;
void __iomem *base;
+ void *saved_vals;
unsigned size;
struct device *dev;
struct device_node *np;
struct pinctrl_dev *pctl;
unsigned flags;
+#define PCS_CONTEXT_LOSS_OFF (1 << 3)
#define PCS_QUIRK_SHARED_IRQ (1 << 2)
#define PCS_FEAT_IRQ (1 << 1)
#define PCS_FEAT_PINCONF (1 << 0)
@@ -1576,6 +1579,67 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
}
#ifdef CONFIG_PM
+static int pcs_save_context(struct pcs_device *pcs)
+{
+ int i, mux_bytes;
+ u64 *regsl;
+ u32 *regsw;
+ u16 *regshw;
+
+ mux_bytes = pcs->width / BITS_PER_BYTE;
+
+ if (!pcs->saved_vals)
+ pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC);
+
+ switch (pcs->width) {
+ case 64:
+ regsl = (u64 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ regsl[i] = pcs->read(pcs->base + i * mux_bytes);
+ break;
+ case 32:
+ regsw = (u32 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ regsw[i] = pcs->read(pcs->base + i * mux_bytes);
+ break;
+ case 16:
+ regshw = (u16 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ regshw[i] = pcs->read(pcs->base + i * mux_bytes);
+ break;
+ }
+
+ return 0;
+}
+
+static void pcs_restore_context(struct pcs_device *pcs)
+{
+ int i, mux_bytes;
+ u64 *regsl;
+ u32 *regsw;
+ u16 *regshw;
+
+ mux_bytes = pcs->width / BITS_PER_BYTE;
+
+ switch (pcs->width) {
+ case 64:
+ regsl = (u64 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ pcs->write(regsl[i], pcs->base + i * mux_bytes);
+ break;
+ case 32:
+ regsw = (u32 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ pcs->write(regsw[i], pcs->base + i * mux_bytes);
+ break;
+ case 16:
+ regshw = (u16 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ pcs->write(regshw[i], pcs->base + i * mux_bytes);
+ break;
+ }
+}
+
static int pinctrl_single_suspend(struct platform_device *pdev,
pm_message_t state)
{
@@ -1585,6 +1649,9 @@ static int pinctrl_single_suspend(struct platform_device *pdev,
if (!pcs)
return -EINVAL;
+ if (pcs->flags & PCS_CONTEXT_LOSS_OFF)
+ pcs_save_context(pcs);
+
return pinctrl_force_sleep(pcs->pctl);
}
@@ -1596,6 +1663,9 @@ static int pinctrl_single_resume(struct platform_device *pdev)
if (!pcs)
return -EINVAL;
+ if (pcs->flags & PCS_CONTEXT_LOSS_OFF)
+ pcs_restore_context(pcs);
+
return pinctrl_force_default(pcs->pctl);
}
#endif
@@ -1824,7 +1894,7 @@ static const struct pcs_soc_data pinctrl_single_dra7 = {
};
static const struct pcs_soc_data pinctrl_single_am437x = {
- .flags = PCS_QUIRK_SHARED_IRQ,
+ .flags = PCS_QUIRK_SHARED_IRQ | PCS_CONTEXT_LOSS_OFF,
.irq_enable_mask = (1 << 29), /* OMAP_WAKEUP_EN */
.irq_status_mask = (1 << 30), /* OMAP_WAKEUP_EVENT */
};
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index ad80a17c9990..0e22f52b2a19 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -58,7 +58,10 @@ struct msm_pinctrl {
struct device *dev;
struct pinctrl_dev *pctrl;
struct gpio_chip chip;
+ struct pinctrl_desc desc;
struct notifier_block restart_nb;
+
+ struct irq_chip irq_chip;
int irq;
raw_spinlock_t lock;
@@ -390,13 +393,6 @@ static const struct pinconf_ops msm_pinconf_ops = {
.pin_config_group_set = msm_config_group_set,
};
-static struct pinctrl_desc msm_pinctrl_desc = {
- .pctlops = &msm_pinctrl_ops,
- .pmxops = &msm_pinmux_ops,
- .confops = &msm_pinconf_ops,
- .owner = THIS_MODULE,
-};
-
static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
const struct msm_pingroup *g;
@@ -506,29 +502,46 @@ static void msm_gpio_dbg_show_one(struct seq_file *s,
int is_out;
int drive;
int pull;
- u32 ctl_reg;
+ int val;
+ u32 ctl_reg, io_reg;
- static const char * const pulls[] = {
+ static const char * const pulls_keeper[] = {
"no pull",
"pull down",
"keeper",
"pull up"
};
+ static const char * const pulls_no_keeper[] = {
+ "no pull",
+ "pull down",
+ "pull up",
+ };
+
if (!gpiochip_line_is_valid(chip, offset))
return;
g = &pctrl->soc->groups[offset];
ctl_reg = readl(pctrl->regs + g->ctl_reg);
+ io_reg = readl(pctrl->regs + g->io_reg);
is_out = !!(ctl_reg & BIT(g->oe_bit));
func = (ctl_reg >> g->mux_bit) & 7;
drive = (ctl_reg >> g->drv_bit) & 7;
pull = (ctl_reg >> g->pull_bit) & 3;
- seq_printf(s, " %-8s: %-3s %d", g->name, is_out ? "out" : "in", func);
+ if (is_out)
+ val = !!(io_reg & BIT(g->out_bit));
+ else
+ val = !!(io_reg & BIT(g->in_bit));
+
+ seq_printf(s, " %-8s: %-3s", g->name, is_out ? "out" : "in");
+ seq_printf(s, " %-4s func%d", val ? "high" : "low", func);
seq_printf(s, " %dmA", msm_regval_to_drive(drive));
- seq_printf(s, " %s", pulls[pull]);
+ if (pctrl->soc->pull_no_keeper)
+ seq_printf(s, " %s", pulls_no_keeper[pull]);
+ else
+ seq_printf(s, " %s", pulls_keeper[pull]);
seq_puts(s, "\n");
}
@@ -776,15 +789,6 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-static struct irq_chip msm_gpio_irq_chip = {
- .name = "msmgpio",
- .irq_mask = msm_gpio_irq_mask,
- .irq_unmask = msm_gpio_irq_unmask,
- .irq_ack = msm_gpio_irq_ack,
- .irq_set_type = msm_gpio_irq_set_type,
- .irq_set_wake = msm_gpio_irq_set_wake,
-};
-
static void msm_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -877,6 +881,13 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
chip->of_node = pctrl->dev->of_node;
chip->need_valid_mask = msm_gpio_needs_valid_mask(pctrl);
+ pctrl->irq_chip.name = "msmgpio";
+ pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
+ pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
+ pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
+ pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
+ pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
+
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "Failed register gpiochip\n");
@@ -890,15 +901,28 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
return ret;
}
- ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio);
- if (ret) {
- dev_err(pctrl->dev, "Failed to add pin range\n");
- gpiochip_remove(&pctrl->chip);
- return ret;
+ /*
+ * For DeviceTree-supported systems, the gpio core checks the
+ * pinctrl's device node for the "gpio-ranges" property.
+ * If it is present, it takes care of adding the pin ranges
+ * for the driver. In this case the driver can skip ahead.
+ *
+ * In order to remain compatible with older, existing DeviceTree
+ * files which don't set the "gpio-ranges" property or systems that
+ * utilize ACPI the driver has to call gpiochip_add_pin_range().
+ */
+ if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) {
+ ret = gpiochip_add_pin_range(&pctrl->chip,
+ dev_name(pctrl->dev), 0, 0, chip->ngpio);
+ if (ret) {
+ dev_err(pctrl->dev, "Failed to add pin range\n");
+ gpiochip_remove(&pctrl->chip);
+ return ret;
+ }
}
ret = gpiochip_irqchip_add(chip,
- &msm_gpio_irq_chip,
+ &pctrl->irq_chip,
0,
handle_edge_irq,
IRQ_TYPE_NONE);
@@ -908,7 +932,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
return -ENOSYS;
}
- gpiochip_set_chained_irqchip(chip, &msm_gpio_irq_chip, pctrl->irq,
+ gpiochip_set_chained_irqchip(chip, &pctrl->irq_chip, pctrl->irq,
msm_gpio_irq_handler);
return 0;
@@ -979,11 +1003,15 @@ int msm_pinctrl_probe(struct platform_device *pdev,
return pctrl->irq;
}
- msm_pinctrl_desc.name = dev_name(&pdev->dev);
- msm_pinctrl_desc.pins = pctrl->soc->pins;
- msm_pinctrl_desc.npins = pctrl->soc->npins;
- pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &msm_pinctrl_desc,
- pctrl);
+ pctrl->desc.owner = THIS_MODULE;
+ pctrl->desc.pctlops = &msm_pinctrl_ops;
+ pctrl->desc.pmxops = &msm_pinmux_ops;
+ pctrl->desc.confops = &msm_pinconf_ops;
+ pctrl->desc.name = dev_name(&pdev->dev);
+ pctrl->desc.pins = pctrl->soc->pins;
+ pctrl->desc.npins = pctrl->soc->npins;
+
+ pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &pctrl->desc, pctrl);
if (IS_ERR(pctrl->pctrl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(pctrl->pctrl);
diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
index bb3ce5c3e18b..1dfbe42dd895 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
@@ -30,9 +30,7 @@
#include "pinctrl-msm.h"
-static struct msm_pinctrl_soc_data qdf2xxx_pinctrl;
-
-/* A reasonable limit to the number of GPIOS */
+/* A maximum of 256 allows us to use a u8 array to hold the GPIO numbers */
#define MAX_GPIOS 256
/* maximum size of each gpio name (enough room for "gpioXXX" + null) */
@@ -40,77 +38,111 @@ static struct msm_pinctrl_soc_data qdf2xxx_pinctrl;
static int qdf2xxx_pinctrl_probe(struct platform_device *pdev)
{
+ struct msm_pinctrl_soc_data *pinctrl;
struct pinctrl_pin_desc *pins;
struct msm_pingroup *groups;
char (*names)[NAME_SIZE];
unsigned int i;
u32 num_gpios;
+ unsigned int avail_gpios; /* The number of GPIOs we support */
+ u8 gpios[MAX_GPIOS]; /* An array of supported GPIOs */
int ret;
/* Query the number of GPIOs from ACPI */
ret = device_property_read_u32(&pdev->dev, "num-gpios", &num_gpios);
if (ret < 0) {
- dev_warn(&pdev->dev, "missing num-gpios property\n");
+ dev_err(&pdev->dev, "missing 'num-gpios' property\n");
return ret;
}
-
if (!num_gpios || num_gpios > MAX_GPIOS) {
- dev_warn(&pdev->dev, "invalid num-gpios property\n");
+ dev_err(&pdev->dev, "invalid 'num-gpios' property\n");
+ return -ENODEV;
+ }
+
+ /* The number of GPIOs in the approved list */
+ ret = device_property_read_u8_array(&pdev->dev, "gpios", NULL, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "missing 'gpios' property\n");
+ return ret;
+ }
+ /*
+ * The number of available GPIOs should be non-zero, and no
+ * more than the total number of GPIOS.
+ */
+ if (!ret || ret > num_gpios) {
+ dev_err(&pdev->dev, "invalid 'gpios' property\n");
return -ENODEV;
}
+ avail_gpios = ret;
+ ret = device_property_read_u8_array(&pdev->dev, "gpios", gpios,
+ avail_gpios);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not read list of GPIOs\n");
+ return ret;
+ }
+
+ pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pinctrl), GFP_KERNEL);
pins = devm_kcalloc(&pdev->dev, num_gpios,
sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
groups = devm_kcalloc(&pdev->dev, num_gpios,
sizeof(struct msm_pingroup), GFP_KERNEL);
- names = devm_kcalloc(&pdev->dev, num_gpios, NAME_SIZE, GFP_KERNEL);
+ names = devm_kcalloc(&pdev->dev, avail_gpios, NAME_SIZE, GFP_KERNEL);
- if (!pins || !groups || !names)
+ if (!pinctrl || !pins || !groups || !names)
return -ENOMEM;
+ /*
+ * Initialize the array. GPIOs not listed in the 'gpios' array
+ * still need a number, but nothing else.
+ */
for (i = 0; i < num_gpios; i++) {
- snprintf(names[i], NAME_SIZE, "gpio%u", i);
-
pins[i].number = i;
- pins[i].name = names[i];
-
- groups[i].npins = 1;
- groups[i].name = names[i];
groups[i].pins = &pins[i].number;
+ }
- groups[i].ctl_reg = 0x10000 * i;
- groups[i].io_reg = 0x04 + 0x10000 * i;
- groups[i].intr_cfg_reg = 0x08 + 0x10000 * i;
- groups[i].intr_status_reg = 0x0c + 0x10000 * i;
- groups[i].intr_target_reg = 0x08 + 0x10000 * i;
-
- groups[i].mux_bit = 2;
- groups[i].pull_bit = 0;
- groups[i].drv_bit = 6;
- groups[i].oe_bit = 9;
- groups[i].in_bit = 0;
- groups[i].out_bit = 1;
- groups[i].intr_enable_bit = 0;
- groups[i].intr_status_bit = 0;
- groups[i].intr_target_bit = 5;
- groups[i].intr_target_kpss_val = 1;
- groups[i].intr_raw_status_bit = 4;
- groups[i].intr_polarity_bit = 1;
- groups[i].intr_detection_bit = 2;
- groups[i].intr_detection_width = 2;
+ /* Populate the entries that are meant to be exposed as GPIOs. */
+ for (i = 0; i < avail_gpios; i++) {
+ unsigned int gpio = gpios[i];
+
+ groups[gpio].npins = 1;
+ snprintf(names[i], NAME_SIZE, "gpio%u", gpio);
+ pins[gpio].name = names[i];
+ groups[gpio].name = names[i];
+
+ groups[gpio].ctl_reg = 0x10000 * gpio;
+ groups[gpio].io_reg = 0x04 + 0x10000 * gpio;
+ groups[gpio].intr_cfg_reg = 0x08 + 0x10000 * gpio;
+ groups[gpio].intr_status_reg = 0x0c + 0x10000 * gpio;
+ groups[gpio].intr_target_reg = 0x08 + 0x10000 * gpio;
+
+ groups[gpio].mux_bit = 2;
+ groups[gpio].pull_bit = 0;
+ groups[gpio].drv_bit = 6;
+ groups[gpio].oe_bit = 9;
+ groups[gpio].in_bit = 0;
+ groups[gpio].out_bit = 1;
+ groups[gpio].intr_enable_bit = 0;
+ groups[gpio].intr_status_bit = 0;
+ groups[gpio].intr_target_bit = 5;
+ groups[gpio].intr_target_kpss_val = 1;
+ groups[gpio].intr_raw_status_bit = 4;
+ groups[gpio].intr_polarity_bit = 1;
+ groups[gpio].intr_detection_bit = 2;
+ groups[gpio].intr_detection_width = 2;
}
- qdf2xxx_pinctrl.pins = pins;
- qdf2xxx_pinctrl.groups = groups;
- qdf2xxx_pinctrl.npins = num_gpios;
- qdf2xxx_pinctrl.ngroups = num_gpios;
- qdf2xxx_pinctrl.ngpios = num_gpios;
+ pinctrl->pins = pins;
+ pinctrl->groups = groups;
+ pinctrl->npins = num_gpios;
+ pinctrl->ngroups = num_gpios;
+ pinctrl->ngpios = num_gpios;
- return msm_pinctrl_probe(pdev, &qdf2xxx_pinctrl);
+ return msm_pinctrl_probe(pdev, pinctrl);
}
static const struct acpi_device_id qdf2xxx_acpi_ids[] = {
- {"QCOM8001"},
+ {"QCOM8002"},
{},
};
MODULE_DEVICE_TABLE(acpi, qdf2xxx_acpi_ids);
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index 11b5eeb14c4a..425fadd6c346 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -8,26 +8,20 @@ config PINCTRL_SAMSUNG
select PINCONF
config PINCTRL_EXYNOS
- bool "Pinctrl driver data for Samsung EXYNOS SoCs other than 5440"
+ bool "Pinctrl driver data for Samsung EXYNOS SoCs"
depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_SAMSUNG
select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
config PINCTRL_EXYNOS_ARM
- bool "ARMv7-specific pinctrl driver data for Exynos (except Exynos5440)" if COMPILE_TEST
+ bool "ARMv7-specific pinctrl driver data for Exynos" if COMPILE_TEST
depends on PINCTRL_EXYNOS
config PINCTRL_EXYNOS_ARM64
bool "ARMv8-specific pinctrl driver data for Exynos" if COMPILE_TEST
depends on PINCTRL_EXYNOS
-config PINCTRL_EXYNOS5440
- bool "Samsung EXYNOS5440 SoC pinctrl driver"
- depends on SOC_EXYNOS5440
- select PINMUX
- select PINCONF
-
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
depends on ARCH_S3C24XX && OF
diff --git a/drivers/pinctrl/samsung/Makefile b/drivers/pinctrl/samsung/Makefile
index df426561d067..ed951df6a112 100644
--- a/drivers/pinctrl/samsung/Makefile
+++ b/drivers/pinctrl/samsung/Makefile
@@ -5,6 +5,5 @@ obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o
obj-$(CONFIG_PINCTRL_EXYNOS_ARM) += pinctrl-exynos-arm.o
obj-$(CONFIG_PINCTRL_EXYNOS_ARM64) += pinctrl-exynos-arm64.o
-obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o
obj-$(CONFIG_PINCTRL_S3C24XX) += pinctrl-s3c24xx.o
obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
index 90c274490181..d82820fc349a 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
@@ -88,6 +88,7 @@ static const struct samsung_retention_data s5pv210_retention_data __initconst =
/* pin banks of s5pv210 pin-controller */
static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -105,12 +106,12 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
EXYNOS_PIN_BANK_EINTG(7, 0x200, "gpg3", 0x40),
- EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x44),
EXYNOS_PIN_BANK_EINTG(6, 0x260, "gpj1", 0x48),
EXYNOS_PIN_BANK_EINTG(8, 0x280, "gpj2", 0x4c),
EXYNOS_PIN_BANK_EINTG(8, 0x2a0, "gpj3", 0x50),
EXYNOS_PIN_BANK_EINTG(5, 0x2c0, "gpj4", 0x54),
+ EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
EXYNOS_PIN_BANK_EINTN(8, 0x2e0, "mp01"),
EXYNOS_PIN_BANK_EINTN(4, 0x300, "mp02"),
EXYNOS_PIN_BANK_EINTN(8, 0x320, "mp03"),
@@ -147,6 +148,7 @@ static atomic_t exynos_shared_retention_refcnt;
/* pin banks of exynos3250 pin-controller 0 */
static const struct samsung_pin_bank_data exynos3250_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -158,6 +160,7 @@ static const struct samsung_pin_bank_data exynos3250_pin_banks0[] __initconst =
/* pin banks of exynos3250 pin-controller 1 */
static const struct samsung_pin_bank_data exynos3250_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTN(8, 0x120, "gpe0"),
EXYNOS_PIN_BANK_EINTN(8, 0x140, "gpe1"),
EXYNOS_PIN_BANK_EINTN(3, 0x180, "gpe2"),
@@ -232,6 +235,7 @@ const struct samsung_pinctrl_of_match_data exynos3250_of_data __initconst = {
/* pin banks of exynos4210 pin-controller 0 */
static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -252,6 +256,7 @@ static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst =
/* pin banks of exynos4210 pin-controller 1 */
static const struct samsung_pin_bank_data exynos4210_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpj0", 0x00),
EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpj1", 0x04),
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
@@ -276,6 +281,7 @@ static const struct samsung_pin_bank_data exynos4210_pin_banks1[] __initconst =
/* pin banks of exynos4210 pin-controller 2 */
static const struct samsung_pin_bank_data exynos4210_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTN(7, 0x000, "gpz"),
};
@@ -346,6 +352,7 @@ const struct samsung_pinctrl_of_match_data exynos4210_of_data __initconst = {
/* pin banks of exynos4x12 pin-controller 0 */
static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -363,6 +370,7 @@ static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst =
/* pin banks of exynos4x12 pin-controller 1 */
static const struct samsung_pin_bank_data exynos4x12_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
@@ -390,11 +398,13 @@ static const struct samsung_pin_bank_data exynos4x12_pin_banks1[] __initconst =
/* pin banks of exynos4x12 pin-controller 2 */
static const struct samsung_pin_bank_data exynos4x12_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
/* pin banks of exynos4x12 pin-controller 3 */
static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpv2", 0x08),
@@ -449,6 +459,7 @@ const struct samsung_pinctrl_of_match_data exynos4x12_of_data __initconst = {
/* pin banks of exynos5250 pin-controller 0 */
static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -478,6 +489,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst =
/* pin banks of exynos5250 pin-controller 1 */
static const struct samsung_pin_bank_data exynos5250_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
EXYNOS_PIN_BANK_EINTG(4, 0x040, "gpf0", 0x08),
@@ -491,6 +503,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks1[] __initconst =
/* pin banks of exynos5250 pin-controller 2 */
static const struct samsung_pin_bank_data exynos5250_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08),
@@ -500,6 +513,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks2[] __initconst =
/* pin banks of exynos5250 pin-controller 3 */
static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
@@ -550,6 +564,7 @@ const struct samsung_pinctrl_of_match_data exynos5250_of_data __initconst = {
/* pin banks of exynos5260 pin-controller 0 */
static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -575,6 +590,7 @@ static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst =
/* pin banks of exynos5260 pin-controller 1 */
static const struct samsung_pin_bank_data exynos5260_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpc0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpc1", 0x04),
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
@@ -584,6 +600,7 @@ static const struct samsung_pin_bank_data exynos5260_pin_banks1[] __initconst =
/* pin banks of exynos5260 pin-controller 2 */
static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
@@ -619,6 +636,7 @@ const struct samsung_pinctrl_of_match_data exynos5260_of_data __initconst = {
/* pin banks of exynos5410 pin-controller 0 */
static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -630,7 +648,6 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc3", 0x20),
EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc1", 0x24),
EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc2", 0x28),
- EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x2c),
EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpe0", 0x30),
EXYNOS_PIN_BANK_EINTG(2, 0x1C0, "gpe1", 0x34),
@@ -641,6 +658,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
EXYNOS_PIN_BANK_EINTG(2, 0x260, "gpg2", 0x48),
EXYNOS_PIN_BANK_EINTG(4, 0x280, "gph0", 0x4c),
EXYNOS_PIN_BANK_EINTG(8, 0x2A0, "gph1", 0x50),
+ EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
EXYNOS_PIN_BANK_EINTN(8, 0x2C0, "gpm7"),
EXYNOS_PIN_BANK_EINTN(6, 0x2E0, "gpy0"),
EXYNOS_PIN_BANK_EINTN(4, 0x300, "gpy1"),
@@ -658,6 +676,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
/* pin banks of exynos5410 pin-controller 1 */
static const struct samsung_pin_bank_data exynos5410_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpj0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpj1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpj2", 0x08),
@@ -671,6 +690,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks1[] __initconst =
/* pin banks of exynos5410 pin-controller 2 */
static const struct samsung_pin_bank_data exynos5410_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08),
@@ -680,6 +700,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks2[] __initconst =
/* pin banks of exynos5410 pin-controller 3 */
static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
@@ -727,6 +748,7 @@ const struct samsung_pinctrl_of_match_data exynos5410_of_data __initconst = {
/* pin banks of exynos5420 pin-controller 0 */
static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00),
EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
@@ -736,6 +758,7 @@ static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst =
/* pin banks of exynos5420 pin-controller 1 */
static const struct samsung_pin_bank_data exynos5420_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpc0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc1", 0x04),
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
@@ -753,6 +776,7 @@ static const struct samsung_pin_bank_data exynos5420_pin_banks1[] __initconst =
/* pin banks of exynos5420 pin-controller 2 */
static const struct samsung_pin_bank_data exynos5420_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
EXYNOS_PIN_BANK_EINTG(6, 0x040, "gpf0", 0x08),
@@ -765,6 +789,7 @@ static const struct samsung_pin_bank_data exynos5420_pin_banks2[] __initconst =
/* pin banks of exynos5420 pin-controller 3 */
static const struct samsung_pin_bank_data exynos5420_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -778,6 +803,7 @@ static const struct samsung_pin_bank_data exynos5420_pin_banks3[] __initconst =
/* pin banks of exynos5420 pin-controller 4 */
static const struct samsung_pin_bank_data exynos5420_pin_banks4[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index 71c9d1d9f345..b6e56422a700 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -45,6 +45,7 @@ static atomic_t exynos_shared_retention_refcnt;
/* pin banks of exynos5433 pin-controller - ALIVE */
static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
@@ -58,27 +59,32 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst =
/* pin banks of exynos5433 pin-controller - AUD */
static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
/* pin banks of exynos5433 pin-controller - CPIF */
static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
};
/* pin banks of exynos5433 pin-controller - eSE */
static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
};
/* pin banks of exynos5433 pin-controller - FINGER */
static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
};
/* pin banks of exynos5433 pin-controller - FSYS */
static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
@@ -89,16 +95,19 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst =
/* pin banks of exynos5433 pin-controller - IMEM */
static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
};
/* pin banks of exynos5433 pin-controller - NFC */
static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
};
/* pin banks of exynos5433 pin-controller - PERIC */
static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
@@ -120,6 +129,7 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst =
/* pin banks of exynos5433 pin-controller - TOUCH */
static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
};
@@ -267,6 +277,7 @@ const struct samsung_pinctrl_of_match_data exynos5433_of_data __initconst = {
/* pin banks of exynos7 pin-controller - ALIVE */
static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
@@ -275,6 +286,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {
/* pin banks of exynos7 pin-controller - BUS0 */
static const struct samsung_pin_bank_data exynos7_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpb0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc0", 0x04),
EXYNOS_PIN_BANK_EINTG(2, 0x040, "gpc1", 0x08),
@@ -294,31 +306,37 @@ static const struct samsung_pin_bank_data exynos7_pin_banks1[] __initconst = {
/* pin banks of exynos7 pin-controller - NFC */
static const struct samsung_pin_bank_data exynos7_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
};
/* pin banks of exynos7 pin-controller - TOUCH */
static const struct samsung_pin_bank_data exynos7_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
};
/* pin banks of exynos7 pin-controller - FF */
static const struct samsung_pin_bank_data exynos7_pin_banks4[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpg4", 0x00),
};
/* pin banks of exynos7 pin-controller - ESE */
static const struct samsung_pin_bank_data exynos7_pin_banks5[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpv7", 0x00),
};
/* pin banks of exynos7 pin-controller - FSYS0 */
static const struct samsung_pin_bank_data exynos7_pin_banks6[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpr4", 0x00),
};
/* pin banks of exynos7 pin-controller - FSYS1 */
static const struct samsung_pin_bank_data exynos7_pin_banks7[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpr0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpr1", 0x04),
EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr2", 0x08),
@@ -327,6 +345,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks7[] __initconst = {
/* pin banks of exynos7 pin-controller - BUS1 */
static const struct samsung_pin_bank_data exynos7_pin_banks8[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpf0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpf1", 0x04),
EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpf2", 0x08),
@@ -340,6 +359,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks8[] __initconst = {
};
static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index abd43aa7eb0d..da1ec13697e7 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -99,7 +99,7 @@
#define EXYNOS5433_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
{ \
- .type = &exynos5433_bank_type_alive, \
+ .type = &exynos5433_bank_type_off, \
.pctl_offset = reg, \
.nr_pins = pins, \
.eint_type = EINT_TYPE_WKUP, \
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
deleted file mode 100644
index 3d8d5e812839..000000000000
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ /dev/null
@@ -1,1005 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-//
-// pin-controller/pin-mux/pin-config/gpio-driver for Samsung's EXYNOS5440 SoC.
-//
-// Author: Thomas Abraham <thomas.ab@samsung.com>
-//
-// Copyright (c) 2012 Samsung Electronics Co., Ltd.
-// http://www.samsung.com
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/gpio/driver.h>
-#include <linux/device.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <linux/of_irq.h>
-#include "../core.h"
-
-/* EXYNOS5440 GPIO and Pinctrl register offsets */
-#define GPIO_MUX 0x00
-#define GPIO_IE 0x04
-#define GPIO_INT 0x08
-#define GPIO_TYPE 0x0C
-#define GPIO_VAL 0x10
-#define GPIO_OE 0x14
-#define GPIO_IN 0x18
-#define GPIO_PE 0x1C
-#define GPIO_PS 0x20
-#define GPIO_SR 0x24
-#define GPIO_DS0 0x28
-#define GPIO_DS1 0x2C
-
-#define EXYNOS5440_MAX_PINS 23
-#define EXYNOS5440_MAX_GPIO_INT 8
-#define PIN_NAME_LENGTH 10
-
-#define GROUP_SUFFIX "-grp"
-#define FUNCTION_SUFFIX "-mux"
-
-/*
- * pin configuration type and its value are packed together into a 16-bits.
- * The upper 8-bits represent the configuration type and the lower 8-bits
- * hold the value of the configuration type.
- */
-#define PINCFG_TYPE_MASK 0xFF
-#define PINCFG_VALUE_SHIFT 8
-#define PINCFG_VALUE_MASK (0xFF << PINCFG_VALUE_SHIFT)
-#define PINCFG_PACK(type, value) (((value) << PINCFG_VALUE_SHIFT) | type)
-#define PINCFG_UNPACK_TYPE(cfg) ((cfg) & PINCFG_TYPE_MASK)
-#define PINCFG_UNPACK_VALUE(cfg) (((cfg) & PINCFG_VALUE_MASK) >> \
- PINCFG_VALUE_SHIFT)
-
-/**
- * enum pincfg_type - possible pin configuration types supported.
- * @PINCFG_TYPE_PUD: Pull up/down configuration.
- * @PINCFG_TYPE_DRV: Drive strength configuration.
- * @PINCFG_TYPE_SKEW_RATE: Skew rate configuration.
- * @PINCFG_TYPE_INPUT_TYPE: Pin input type configuration.
- */
-enum pincfg_type {
- PINCFG_TYPE_PUD,
- PINCFG_TYPE_DRV,
- PINCFG_TYPE_SKEW_RATE,
- PINCFG_TYPE_INPUT_TYPE
-};
-
-/**
- * struct exynos5440_pin_group: represent group of pins for pincfg setting.
- * @name: name of the pin group, used to lookup the group.
- * @pins: the pins included in this group.
- * @num_pins: number of pins included in this group.
- */
-struct exynos5440_pin_group {
- const char *name;
- const unsigned int *pins;
- u8 num_pins;
-};
-
-/**
- * struct exynos5440_pmx_func: represent a pin function.
- * @name: name of the pin function, used to lookup the function.
- * @groups: one or more names of pin groups that provide this function.
- * @num_groups: number of groups included in @groups.
- * @function: the function number to be programmed when selected.
- */
-struct exynos5440_pmx_func {
- const char *name;
- const char **groups;
- u8 num_groups;
- unsigned long function;
-};
-
-/**
- * struct exynos5440_pinctrl_priv_data: driver's private runtime data.
- * @reg_base: ioremapped based address of the register space.
- * @gc: gpio chip registered with gpiolib.
- * @pin_groups: list of pin groups parsed from device tree.
- * @nr_groups: number of pin groups available.
- * @pmx_functions: list of pin functions parsed from device tree.
- * @nr_functions: number of pin functions available.
- * @range: gpio range to register with pinctrl
- */
-struct exynos5440_pinctrl_priv_data {
- void __iomem *reg_base;
- struct gpio_chip *gc;
- struct irq_domain *irq_domain;
-
- const struct exynos5440_pin_group *pin_groups;
- unsigned int nr_groups;
- const struct exynos5440_pmx_func *pmx_functions;
- unsigned int nr_functions;
- struct pinctrl_gpio_range range;
-};
-
-/**
- * struct exynos5440_gpio_intr_data: private data for gpio interrupts.
- * @priv: driver's private runtime data.
- * @gpio_int: gpio interrupt number.
- */
-struct exynos5440_gpio_intr_data {
- struct exynos5440_pinctrl_priv_data *priv;
- unsigned int gpio_int;
-};
-
-/* list of all possible config options supported */
-static struct pin_config {
- char *prop_cfg;
- unsigned int cfg_type;
-} pcfgs[] = {
- { "samsung,exynos5440-pin-pud", PINCFG_TYPE_PUD },
- { "samsung,exynos5440-pin-drv", PINCFG_TYPE_DRV },
- { "samsung,exynos5440-pin-skew-rate", PINCFG_TYPE_SKEW_RATE },
- { "samsung,exynos5440-pin-input-type", PINCFG_TYPE_INPUT_TYPE },
-};
-
-/* check if the selector is a valid pin group selector */
-static int exynos5440_get_group_count(struct pinctrl_dev *pctldev)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- return priv->nr_groups;
-}
-
-/* return the name of the group selected by the group selector */
-static const char *exynos5440_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- return priv->pin_groups[selector].name;
-}
-
-/* return the pin numbers associated with the specified group */
-static int exynos5440_get_group_pins(struct pinctrl_dev *pctldev,
- unsigned selector, const unsigned **pins, unsigned *num_pins)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- *pins = priv->pin_groups[selector].pins;
- *num_pins = priv->pin_groups[selector].num_pins;
- return 0;
-}
-
-/* create pinctrl_map entries by parsing device tree nodes */
-static int exynos5440_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np, struct pinctrl_map **maps,
- unsigned *nmaps)
-{
- struct device *dev = pctldev->dev;
- struct pinctrl_map *map;
- unsigned long *cfg = NULL;
- char *gname, *fname;
- int cfg_cnt = 0, map_cnt = 0, idx = 0;
-
- /* count the number of config options specfied in the node */
- for (idx = 0; idx < ARRAY_SIZE(pcfgs); idx++)
- if (of_find_property(np, pcfgs[idx].prop_cfg, NULL))
- cfg_cnt++;
-
- /*
- * Find out the number of map entries to create. All the config options
- * can be accomadated into a single config map entry.
- */
- if (cfg_cnt)
- map_cnt = 1;
- if (of_find_property(np, "samsung,exynos5440-pin-function", NULL))
- map_cnt++;
- if (!map_cnt) {
- dev_err(dev, "node %s does not have either config or function "
- "configurations\n", np->name);
- return -EINVAL;
- }
-
- /* Allocate memory for pin-map entries */
- map = kzalloc(sizeof(*map) * map_cnt, GFP_KERNEL);
- if (!map)
- return -ENOMEM;
- *nmaps = 0;
-
- /*
- * Allocate memory for pin group name. The pin group name is derived
- * from the node name from which these map entries are be created.
- */
- gname = kasprintf(GFP_KERNEL, "%s%s", np->name, GROUP_SUFFIX);
- if (!gname)
- goto free_map;
-
- /*
- * don't have config options? then skip over to creating function
- * map entries.
- */
- if (!cfg_cnt)
- goto skip_cfgs;
-
- /* Allocate memory for config entries */
- cfg = kzalloc(sizeof(*cfg) * cfg_cnt, GFP_KERNEL);
- if (!cfg)
- goto free_gname;
-
- /* Prepare a list of config settings */
- for (idx = 0, cfg_cnt = 0; idx < ARRAY_SIZE(pcfgs); idx++) {
- u32 value;
- if (!of_property_read_u32(np, pcfgs[idx].prop_cfg, &value))
- cfg[cfg_cnt++] =
- PINCFG_PACK(pcfgs[idx].cfg_type, value);
- }
-
- /* create the config map entry */
- map[*nmaps].data.configs.group_or_pin = gname;
- map[*nmaps].data.configs.configs = cfg;
- map[*nmaps].data.configs.num_configs = cfg_cnt;
- map[*nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
- *nmaps += 1;
-
-skip_cfgs:
- /* create the function map entry */
- if (of_find_property(np, "samsung,exynos5440-pin-function", NULL)) {
- fname = kasprintf(GFP_KERNEL,
- "%s%s", np->name, FUNCTION_SUFFIX);
- if (!fname)
- goto free_cfg;
-
- map[*nmaps].data.mux.group = gname;
- map[*nmaps].data.mux.function = fname;
- map[*nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
- *nmaps += 1;
- }
-
- *maps = map;
- return 0;
-
-free_cfg:
- kfree(cfg);
-free_gname:
- kfree(gname);
-free_map:
- kfree(map);
- return -ENOMEM;
-}
-
-/* free the memory allocated to hold the pin-map table */
-static void exynos5440_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps)
-{
- int idx;
-
- for (idx = 0; idx < num_maps; idx++) {
- if (map[idx].type == PIN_MAP_TYPE_MUX_GROUP) {
- kfree(map[idx].data.mux.function);
- if (!idx)
- kfree(map[idx].data.mux.group);
- } else if (map->type == PIN_MAP_TYPE_CONFIGS_GROUP) {
- kfree(map[idx].data.configs.configs);
- if (!idx)
- kfree(map[idx].data.configs.group_or_pin);
- }
- }
-
- kfree(map);
-}
-
-/* list of pinctrl callbacks for the pinctrl core */
-static const struct pinctrl_ops exynos5440_pctrl_ops = {
- .get_groups_count = exynos5440_get_group_count,
- .get_group_name = exynos5440_get_group_name,
- .get_group_pins = exynos5440_get_group_pins,
- .dt_node_to_map = exynos5440_dt_node_to_map,
- .dt_free_map = exynos5440_dt_free_map,
-};
-
-/* check if the selector is a valid pin function selector */
-static int exynos5440_get_functions_count(struct pinctrl_dev *pctldev)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- return priv->nr_functions;
-}
-
-/* return the name of the pin function specified */
-static const char *exynos5440_pinmux_get_fname(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- return priv->pmx_functions[selector].name;
-}
-
-/* return the groups associated for the specified function selector */
-static int exynos5440_pinmux_get_groups(struct pinctrl_dev *pctldev,
- unsigned selector, const char * const **groups,
- unsigned * const num_groups)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- *groups = priv->pmx_functions[selector].groups;
- *num_groups = priv->pmx_functions[selector].num_groups;
- return 0;
-}
-
-/* enable or disable a pinmux function */
-static void exynos5440_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group, bool enable)
-{
- struct exynos5440_pinctrl_priv_data *priv;
- void __iomem *base;
- u32 function;
- u32 data;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- base = priv->reg_base;
- function = priv->pmx_functions[selector].function;
-
- data = readl(base + GPIO_MUX);
- if (enable)
- data |= (1 << function);
- else
- data &= ~(1 << function);
- writel(data, base + GPIO_MUX);
-}
-
-/* enable a specified pinmux by writing to registers */
-static int exynos5440_pinmux_set_mux(struct pinctrl_dev *pctldev,
- unsigned selector,
- unsigned group)
-{
- exynos5440_pinmux_setup(pctldev, selector, group, true);
- return 0;
-}
-
-/*
- * The calls to gpio_direction_output() and gpio_direction_input()
- * leads to this function call (via the pinctrl_gpio_direction_{input|output}()
- * function called from the gpiolib interface).
- */
-static int exynos5440_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range, unsigned offset, bool input)
-{
- return 0;
-}
-
-/* list of pinmux callbacks for the pinmux vertical in pinctrl core */
-static const struct pinmux_ops exynos5440_pinmux_ops = {
- .get_functions_count = exynos5440_get_functions_count,
- .get_function_name = exynos5440_pinmux_get_fname,
- .get_function_groups = exynos5440_pinmux_get_groups,
- .set_mux = exynos5440_pinmux_set_mux,
- .gpio_set_direction = exynos5440_pinmux_gpio_set_direction,
-};
-
-/* set the pin config settings for a specified pin */
-static int exynos5440_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long *configs,
- unsigned num_configs)
-{
- struct exynos5440_pinctrl_priv_data *priv;
- void __iomem *base;
- enum pincfg_type cfg_type;
- u32 cfg_value;
- u32 data;
- int i;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- base = priv->reg_base;
-
- for (i = 0; i < num_configs; i++) {
- cfg_type = PINCFG_UNPACK_TYPE(configs[i]);
- cfg_value = PINCFG_UNPACK_VALUE(configs[i]);
-
- switch (cfg_type) {
- case PINCFG_TYPE_PUD:
- /* first set pull enable/disable bit */
- data = readl(base + GPIO_PE);
- data &= ~(1 << pin);
- if (cfg_value)
- data |= (1 << pin);
- writel(data, base + GPIO_PE);
-
- /* then set pull up/down bit */
- data = readl(base + GPIO_PS);
- data &= ~(1 << pin);
- if (cfg_value == 2)
- data |= (1 << pin);
- writel(data, base + GPIO_PS);
- break;
-
- case PINCFG_TYPE_DRV:
- /* set the first bit of the drive strength */
- data = readl(base + GPIO_DS0);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_DS0);
- cfg_value >>= 1;
-
- /* set the second bit of the driver strength */
- data = readl(base + GPIO_DS1);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_DS1);
- break;
- case PINCFG_TYPE_SKEW_RATE:
- data = readl(base + GPIO_SR);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_SR);
- break;
- case PINCFG_TYPE_INPUT_TYPE:
- data = readl(base + GPIO_TYPE);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_TYPE);
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
- } /* for each config */
-
- return 0;
-}
-
-/* get the pin config settings for a specified pin */
-static int exynos5440_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long *config)
-{
- struct exynos5440_pinctrl_priv_data *priv;
- void __iomem *base;
- enum pincfg_type cfg_type = PINCFG_UNPACK_TYPE(*config);
- u32 data;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- base = priv->reg_base;
-
- switch (cfg_type) {
- case PINCFG_TYPE_PUD:
- data = readl(base + GPIO_PE);
- data = (data >> pin) & 1;
- if (!data)
- *config = 0;
- else
- *config = ((readl(base + GPIO_PS) >> pin) & 1) + 1;
- break;
- case PINCFG_TYPE_DRV:
- data = readl(base + GPIO_DS0);
- data = (data >> pin) & 1;
- *config = data;
- data = readl(base + GPIO_DS1);
- data = (data >> pin) & 1;
- *config |= (data << 1);
- break;
- case PINCFG_TYPE_SKEW_RATE:
- data = readl(base + GPIO_SR);
- *config = (data >> pin) & 1;
- break;
- case PINCFG_TYPE_INPUT_TYPE:
- data = readl(base + GPIO_TYPE);
- *config = (data >> pin) & 1;
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* set the pin config settings for a specified pin group */
-static int exynos5440_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned group, unsigned long *configs,
- unsigned num_configs)
-{
- struct exynos5440_pinctrl_priv_data *priv;
- const unsigned int *pins;
- unsigned int cnt;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- pins = priv->pin_groups[group].pins;
-
- for (cnt = 0; cnt < priv->pin_groups[group].num_pins; cnt++)
- exynos5440_pinconf_set(pctldev, pins[cnt], configs,
- num_configs);
-
- return 0;
-}
-
-/* get the pin config settings for a specified pin group */
-static int exynos5440_pinconf_group_get(struct pinctrl_dev *pctldev,
- unsigned int group, unsigned long *config)
-{
- struct exynos5440_pinctrl_priv_data *priv;
- const unsigned int *pins;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- pins = priv->pin_groups[group].pins;
- exynos5440_pinconf_get(pctldev, pins[0], config);
- return 0;
-}
-
-/* list of pinconfig callbacks for pinconfig vertical in the pinctrl code */
-static const struct pinconf_ops exynos5440_pinconf_ops = {
- .pin_config_get = exynos5440_pinconf_get,
- .pin_config_set = exynos5440_pinconf_set,
- .pin_config_group_get = exynos5440_pinconf_group_get,
- .pin_config_group_set = exynos5440_pinconf_group_set,
-};
-
-/* gpiolib gpio_set callback function */
-static void exynos5440_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
-{
- struct exynos5440_pinctrl_priv_data *priv = gpiochip_get_data(gc);
- void __iomem *base = priv->reg_base;
- u32 data;
-
- data = readl(base + GPIO_VAL);
- data &= ~(1 << offset);
- if (value)
- data |= 1 << offset;
- writel(data, base + GPIO_VAL);
-}
-
-/* gpiolib gpio_get callback function */
-static int exynos5440_gpio_get(struct gpio_chip *gc, unsigned offset)
-{
- struct exynos5440_pinctrl_priv_data *priv = gpiochip_get_data(gc);
- void __iomem *base = priv->reg_base;
- u32 data;
-
- data = readl(base + GPIO_IN);
- data >>= offset;
- data &= 1;
- return data;
-}
-
-/* gpiolib gpio_direction_input callback function */
-static int exynos5440_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
-{
- struct exynos5440_pinctrl_priv_data *priv = gpiochip_get_data(gc);
- void __iomem *base = priv->reg_base;
- u32 data;
-
- /* first disable the data output enable on this pin */
- data = readl(base + GPIO_OE);
- data &= ~(1 << offset);
- writel(data, base + GPIO_OE);
-
- /* now enable input on this pin */
- data = readl(base + GPIO_IE);
- data |= 1 << offset;
- writel(data, base + GPIO_IE);
- return 0;
-}
-
-/* gpiolib gpio_direction_output callback function */
-static int exynos5440_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
- int value)
-{
- struct exynos5440_pinctrl_priv_data *priv = gpiochip_get_data(gc);
- void __iomem *base = priv->reg_base;
- u32 data;
-
- exynos5440_gpio_set(gc, offset, value);
-
- /* first disable the data input enable on this pin */
- data = readl(base + GPIO_IE);
- data &= ~(1 << offset);
- writel(data, base + GPIO_IE);
-
- /* now enable output on this pin */
- data = readl(base + GPIO_OE);
- data |= 1 << offset;
- writel(data, base + GPIO_OE);
- return 0;
-}
-
-/* gpiolib gpio_to_irq callback function */
-static int exynos5440_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
-{
- struct exynos5440_pinctrl_priv_data *priv = gpiochip_get_data(gc);
- unsigned int virq;
-
- if (offset < 16 || offset > 23)
- return -ENXIO;
-
- if (!priv->irq_domain)
- return -ENXIO;
-
- virq = irq_create_mapping(priv->irq_domain, offset - 16);
- return virq ? : -ENXIO;
-}
-
-/* parse the pin numbers listed in the 'samsung,exynos5440-pins' property */
-static int exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
- struct device_node *cfg_np, unsigned int **pin_list,
- unsigned int *npins)
-{
- struct device *dev = &pdev->dev;
- struct property *prop;
-
- prop = of_find_property(cfg_np, "samsung,exynos5440-pins", NULL);
- if (!prop)
- return -ENOENT;
-
- *npins = prop->length / sizeof(unsigned long);
- if (!*npins) {
- dev_err(dev, "invalid pin list in %s node", cfg_np->name);
- return -EINVAL;
- }
-
- *pin_list = devm_kzalloc(dev, *npins * sizeof(**pin_list), GFP_KERNEL);
- if (!*pin_list)
- return -ENOMEM;
-
- return of_property_read_u32_array(cfg_np, "samsung,exynos5440-pins",
- *pin_list, *npins);
-}
-
-/*
- * Parse the information about all the available pin groups and pin functions
- * from device node of the pin-controller.
- */
-static int exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
- struct exynos5440_pinctrl_priv_data *priv)
-{
- struct device *dev = &pdev->dev;
- struct device_node *dev_np = dev->of_node;
- struct device_node *cfg_np;
- struct exynos5440_pin_group *groups, *grp;
- struct exynos5440_pmx_func *functions, *func;
- unsigned *pin_list;
- unsigned int npins, grp_cnt, func_idx = 0;
- char *gname, *fname;
- int ret;
-
- grp_cnt = of_get_child_count(dev_np);
- if (!grp_cnt)
- return -EINVAL;
-
- groups = devm_kzalloc(dev, grp_cnt * sizeof(*groups), GFP_KERNEL);
- if (!groups)
- return -EINVAL;
-
- grp = groups;
-
- functions = devm_kzalloc(dev, grp_cnt * sizeof(*functions), GFP_KERNEL);
- if (!functions)
- return -EINVAL;
-
- func = functions;
-
- /*
- * Iterate over all the child nodes of the pin controller node
- * and create pin groups and pin function lists.
- */
- for_each_child_of_node(dev_np, cfg_np) {
- u32 function;
-
- ret = exynos5440_pinctrl_parse_dt_pins(pdev, cfg_np,
- &pin_list, &npins);
- if (ret) {
- gname = NULL;
- goto skip_to_pin_function;
- }
-
- /* derive pin group name from the node name */
- gname = devm_kasprintf(dev, GFP_KERNEL,
- "%s%s", cfg_np->name, GROUP_SUFFIX);
- if (!gname)
- return -ENOMEM;
-
- grp->name = gname;
- grp->pins = pin_list;
- grp->num_pins = npins;
- grp++;
-
-skip_to_pin_function:
- ret = of_property_read_u32(cfg_np, "samsung,exynos5440-pin-function",
- &function);
- if (ret)
- continue;
-
- /* derive function name from the node name */
- fname = devm_kasprintf(dev, GFP_KERNEL,
- "%s%s", cfg_np->name, FUNCTION_SUFFIX);
- if (!fname)
- return -ENOMEM;
-
- func->name = fname;
- func->groups = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
- if (!func->groups)
- return -ENOMEM;
- func->groups[0] = gname;
- func->num_groups = gname ? 1 : 0;
- func->function = function;
- func++;
- func_idx++;
- }
-
- priv->pin_groups = groups;
- priv->nr_groups = grp_cnt;
- priv->pmx_functions = functions;
- priv->nr_functions = func_idx;
- return 0;
-}
-
-/* register the pinctrl interface with the pinctrl subsystem */
-static int exynos5440_pinctrl_register(struct platform_device *pdev,
- struct exynos5440_pinctrl_priv_data *priv)
-{
- struct device *dev = &pdev->dev;
- struct pinctrl_desc *ctrldesc;
- struct pinctrl_dev *pctl_dev;
- struct pinctrl_pin_desc *pindesc, *pdesc;
- char *pin_names;
- int pin, ret;
-
- ctrldesc = devm_kzalloc(dev, sizeof(*ctrldesc), GFP_KERNEL);
- if (!ctrldesc)
- return -ENOMEM;
-
- ctrldesc->name = "exynos5440-pinctrl";
- ctrldesc->owner = THIS_MODULE;
- ctrldesc->pctlops = &exynos5440_pctrl_ops;
- ctrldesc->pmxops = &exynos5440_pinmux_ops;
- ctrldesc->confops = &exynos5440_pinconf_ops;
-
- pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
- EXYNOS5440_MAX_PINS, GFP_KERNEL);
- if (!pindesc)
- return -ENOMEM;
- ctrldesc->pins = pindesc;
- ctrldesc->npins = EXYNOS5440_MAX_PINS;
-
- /* dynamically populate the pin number and pin name for pindesc */
- for (pin = 0, pdesc = pindesc; pin < ctrldesc->npins; pin++, pdesc++)
- pdesc->number = pin;
-
- /*
- * allocate space for storing the dynamically generated names for all
- * the pins which belong to this pin-controller.
- */
- pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH *
- ctrldesc->npins, GFP_KERNEL);
- if (!pin_names)
- return -ENOMEM;
-
- /* for each pin, set the name of the pin */
- for (pin = 0; pin < ctrldesc->npins; pin++) {
- snprintf(pin_names, 6, "gpio%02d", pin);
- pdesc = pindesc + pin;
- pdesc->name = pin_names;
- pin_names += PIN_NAME_LENGTH;
- }
-
- ret = exynos5440_pinctrl_parse_dt(pdev, priv);
- if (ret)
- return ret;
-
- pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc, priv);
- if (IS_ERR(pctl_dev)) {
- dev_err(&pdev->dev, "could not register pinctrl driver\n");
- return PTR_ERR(pctl_dev);
- }
-
- priv->range.name = "exynos5440-pctrl-gpio-range";
- priv->range.id = 0;
- priv->range.base = 0;
- priv->range.npins = EXYNOS5440_MAX_PINS;
- priv->range.gc = priv->gc;
- pinctrl_add_gpio_range(pctl_dev, &priv->range);
- return 0;
-}
-
-/* register the gpiolib interface with the gpiolib subsystem */
-static int exynos5440_gpiolib_register(struct platform_device *pdev,
- struct exynos5440_pinctrl_priv_data *priv)
-{
- struct gpio_chip *gc;
- int ret;
-
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
- return -ENOMEM;
-
- priv->gc = gc;
- gc->base = 0;
- gc->ngpio = EXYNOS5440_MAX_PINS;
- gc->parent = &pdev->dev;
- gc->set = exynos5440_gpio_set;
- gc->get = exynos5440_gpio_get;
- gc->direction_input = exynos5440_gpio_direction_input;
- gc->direction_output = exynos5440_gpio_direction_output;
- gc->to_irq = exynos5440_gpio_to_irq;
- gc->label = "gpiolib-exynos5440";
- gc->owner = THIS_MODULE;
- ret = gpiochip_add_data(gc, priv);
- if (ret) {
- dev_err(&pdev->dev, "failed to register gpio_chip %s, error "
- "code: %d\n", gc->label, ret);
- return ret;
- }
-
- return 0;
-}
-
-/* unregister the gpiolib interface with the gpiolib subsystem */
-static int exynos5440_gpiolib_unregister(struct platform_device *pdev,
- struct exynos5440_pinctrl_priv_data *priv)
-{
- gpiochip_remove(priv->gc);
- return 0;
-}
-
-static void exynos5440_gpio_irq_unmask(struct irq_data *irqd)
-{
- struct exynos5440_pinctrl_priv_data *d;
- unsigned long gpio_int;
-
- d = irq_data_get_irq_chip_data(irqd);
- gpio_int = readl(d->reg_base + GPIO_INT);
- gpio_int |= 1 << irqd->hwirq;
- writel(gpio_int, d->reg_base + GPIO_INT);
-}
-
-static void exynos5440_gpio_irq_mask(struct irq_data *irqd)
-{
- struct exynos5440_pinctrl_priv_data *d;
- unsigned long gpio_int;
-
- d = irq_data_get_irq_chip_data(irqd);
- gpio_int = readl(d->reg_base + GPIO_INT);
- gpio_int &= ~(1 << irqd->hwirq);
- writel(gpio_int, d->reg_base + GPIO_INT);
-}
-
-/* irq_chip for gpio interrupts */
-static struct irq_chip exynos5440_gpio_irq_chip = {
- .name = "exynos5440_gpio_irq_chip",
- .irq_unmask = exynos5440_gpio_irq_unmask,
- .irq_mask = exynos5440_gpio_irq_mask,
-};
-
-/* interrupt handler for GPIO interrupts 0..7 */
-static irqreturn_t exynos5440_gpio_irq(int irq, void *data)
-{
- struct exynos5440_gpio_intr_data *intd = data;
- struct exynos5440_pinctrl_priv_data *d = intd->priv;
- int virq;
-
- virq = irq_linear_revmap(d->irq_domain, intd->gpio_int);
- if (!virq)
- return IRQ_NONE;
- generic_handle_irq(virq);
- return IRQ_HANDLED;
-}
-
-static int exynos5440_gpio_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct exynos5440_pinctrl_priv_data *d = h->host_data;
-
- irq_set_chip_data(virq, d);
- irq_set_chip_and_handler(virq, &exynos5440_gpio_irq_chip,
- handle_level_irq);
- return 0;
-}
-
-/* irq domain callbacks for gpio interrupt controller */
-static const struct irq_domain_ops exynos5440_gpio_irqd_ops = {
- .map = exynos5440_gpio_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-/* setup handling of gpio interrupts */
-static int exynos5440_gpio_irq_init(struct platform_device *pdev,
- struct exynos5440_pinctrl_priv_data *priv)
-{
- struct device *dev = &pdev->dev;
- struct exynos5440_gpio_intr_data *intd;
- int i, irq, ret;
-
- intd = devm_kzalloc(dev, sizeof(*intd) * EXYNOS5440_MAX_GPIO_INT,
- GFP_KERNEL);
- if (!intd)
- return -ENOMEM;
-
- for (i = 0; i < EXYNOS5440_MAX_GPIO_INT; i++) {
- irq = irq_of_parse_and_map(dev->of_node, i);
- if (irq <= 0) {
- dev_err(dev, "irq parsing failed\n");
- return -EINVAL;
- }
-
- intd->gpio_int = i;
- intd->priv = priv;
- ret = devm_request_irq(dev, irq, exynos5440_gpio_irq,
- 0, dev_name(dev), intd++);
- if (ret) {
- dev_err(dev, "irq request failed\n");
- return -ENXIO;
- }
- }
-
- priv->irq_domain = irq_domain_add_linear(dev->of_node,
- EXYNOS5440_MAX_GPIO_INT,
- &exynos5440_gpio_irqd_ops, priv);
- if (!priv->irq_domain) {
- dev_err(dev, "failed to create irq domain\n");
- return -ENXIO;
- }
-
- return 0;
-}
-
-static int exynos5440_pinctrl_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct exynos5440_pinctrl_priv_data *priv;
- struct resource *res;
- int ret;
-
- if (!dev->of_node) {
- dev_err(dev, "device tree node not found\n");
- return -ENODEV;
- }
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->reg_base))
- return PTR_ERR(priv->reg_base);
-
- ret = exynos5440_gpiolib_register(pdev, priv);
- if (ret)
- return ret;
-
- ret = exynos5440_pinctrl_register(pdev, priv);
- if (ret) {
- exynos5440_gpiolib_unregister(pdev, priv);
- return ret;
- }
-
- ret = exynos5440_gpio_irq_init(pdev, priv);
- if (ret) {
- dev_err(dev, "failed to setup gpio interrupts\n");
- return ret;
- }
-
- platform_set_drvdata(pdev, priv);
- dev_info(dev, "EXYNOS5440 pinctrl driver registered\n");
- return 0;
-}
-
-static const struct of_device_id exynos5440_pinctrl_dt_match[] = {
- { .compatible = "samsung,exynos5440-pinctrl" },
- {},
-};
-
-static struct platform_driver exynos5440_pinctrl_driver = {
- .probe = exynos5440_pinctrl_probe,
- .driver = {
- .name = "exynos5440-pinctrl",
- .of_match_table = exynos5440_pinctrl_dt_match,
- .suppress_bind_attrs = true,
- },
-};
-
-static int __init exynos5440_pinctrl_drv_register(void)
-{
- return platform_driver_register(&exynos5440_pinctrl_driver);
-}
-postcore_initcall(exynos5440_pinctrl_drv_register);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 336e88d7bdb9..618945a0fd38 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -279,6 +279,32 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+/* Forward declaration which can be used by samsung_pin_dbg_show */
+static int samsung_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config);
+static const char * const reg_names[] = {"CON", "DAT", "PUD", "DRV", "CON_PDN",
+ "PUD_PDN"};
+
+static void samsung_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int pin)
+{
+ enum pincfg_type cfg_type;
+ unsigned long config;
+ int ret;
+
+ for (cfg_type = 0; cfg_type < PINCFG_TYPE_NUM; cfg_type++) {
+ config = PINCFG_PACK(cfg_type, 0);
+ ret = samsung_pinconf_get(pctldev, pin, &config);
+ if (ret < 0)
+ continue;
+
+ seq_printf(s, " %s(0x%lx)", reg_names[cfg_type],
+ PINCFG_UNPACK_VALUE(config));
+ }
+}
+#endif
+
/* list of pinctrl callbacks for the pinctrl core */
static const struct pinctrl_ops samsung_pctrl_ops = {
.get_groups_count = samsung_get_group_count,
@@ -286,6 +312,9 @@ static const struct pinctrl_ops samsung_pctrl_ops = {
.get_group_pins = samsung_get_group_pins,
.dt_node_to_map = samsung_dt_node_to_map,
.dt_free_map = samsung_dt_free_map,
+#ifdef CONFIG_DEBUG_FS
+ .pin_dbg_show = samsung_pin_dbg_show,
+#endif
};
/* check if the selector is a valid pin function selector */
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index c11b789ec583..43d950c16528 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -44,6 +44,11 @@ config PINCTRL_PFC_R8A7745
depends on ARCH_R8A7745
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A77470
+ def_bool y
+ depends on ARCH_R8A77470
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7778
def_bool y
depends on ARCH_R8A7778
@@ -104,6 +109,11 @@ config PINCTRL_PFC_R8A77980
depends on ARCH_R8A77980
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A77990
+ def_bool y
+ depends on ARCH_R8A77990
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A77995
def_bool y
depends on ARCH_R8A77995
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 463775f28cf1..d0b29c51c159 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A73A4) += pfc-r8a73a4.o
obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
obj-$(CONFIG_PINCTRL_PFC_R8A7743) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7745) += pfc-r8a7794.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77470) += pfc-r8a77470.o
obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o
obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
obj-$(CONFIG_PINCTRL_PFC_R8A7790) += pfc-r8a7790.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7796) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77965) += pfc-r8a77965.o
obj-$(CONFIG_PINCTRL_PFC_R8A77970) += pfc-r8a77970.o
obj-$(CONFIG_PINCTRL_PFC_R8A77980) += pfc-r8a77980.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77990) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A77995) += pfc-r8a77995.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 74861b7b5b0d..eb06981538b4 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -503,6 +503,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7745_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77470
+ {
+ .compatible = "renesas,pfc-r8a77470",
+ .data = &r8a77470_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A7778
{
.compatible = "renesas,pfc-r8a7778",
@@ -575,6 +581,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a77980_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77990
+ {
+ .compatible = "renesas,pfc-r8a77990",
+ .data = &r8a77990_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A77995
{
.compatible = "renesas,pfc-r8a77995",
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77470.c b/drivers/pinctrl/sh-pfc/pfc-r8a77470.c
new file mode 100644
index 000000000000..9d3ed438ec7b
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77470.c
@@ -0,0 +1,2343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A77470 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+
+#include <linux/kernel.h>
+
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, sfx) \
+ PORT_GP_23(0, fn, sfx), \
+ PORT_GP_23(1, fn, sfx), \
+ PORT_GP_32(2, fn, sfx), \
+ PORT_GP_17(3, fn, sfx), \
+ PORT_GP_1(3, 27, fn, sfx), \
+ PORT_GP_1(3, 28, fn, sfx), \
+ PORT_GP_1(3, 29, fn, sfx), \
+ PORT_GP_26(4, fn, sfx), \
+ PORT_GP_32(5, fn, sfx)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+
+ /* GPSR0 */
+ FN_USB0_PWEN, FN_USB0_OVC, FN_USB1_PWEN, FN_USB1_OVC, FN_CLKOUT,
+ FN_IP0_3_0, FN_IP0_7_4, FN_IP0_11_8, FN_IP0_15_12, FN_IP0_19_16,
+ FN_IP0_23_20, FN_IP0_27_24, FN_IP0_31_28, FN_MMC0_CLK_SDHI1_CLK,
+ FN_MMC0_CMD_SDHI1_CMD, FN_MMC0_D0_SDHI1_D0, FN_MMC0_D1_SDHI1_D1,
+ FN_MMC0_D2_SDHI1_D2, FN_MMC0_D3_SDHI1_D3, FN_IP1_3_0,
+ FN_IP1_7_4, FN_MMC0_D6, FN_MMC0_D7,
+
+ /* GPSR1 */
+ FN_IP1_11_8, FN_IP1_15_12, FN_IP1_19_16, FN_IP1_23_20, FN_IP1_27_24,
+ FN_IP1_31_28, FN_IP2_3_0, FN_IP2_7_4, FN_IP2_11_8, FN_IP2_15_12,
+ FN_IP2_19_16, FN_IP2_23_20, FN_IP2_27_24, FN_IP2_31_28, FN_IP3_3_0,
+ FN_IP3_7_4, FN_IP3_11_8, FN_IP3_15_12, FN_IP3_19_16, FN_IP3_23_20,
+ FN_IP3_27_24, FN_IP3_31_28, FN_IP4_3_0,
+
+ /* GPSR2 */
+ FN_IP4_7_4, FN_IP4_11_8, FN_IP4_15_12, FN_IP4_19_16, FN_IP4_23_20,
+ FN_IP4_27_24, FN_IP4_31_28, FN_IP5_3_0, FN_IP5_7_4, FN_IP5_11_8,
+ FN_IP5_15_12, FN_IP5_19_16, FN_IP5_23_20, FN_IP5_27_24, FN_IP5_31_28,
+ FN_IP6_3_0, FN_IP6_7_4, FN_IP6_11_8, FN_IP6_15_12, FN_IP6_19_16,
+ FN_IP6_23_20, FN_IP6_27_24, FN_IP6_31_28, FN_IP7_3_0, FN_IP7_7_4,
+ FN_IP7_11_8, FN_IP7_15_12, FN_IP7_19_16, FN_IP7_23_20, FN_IP7_27_24,
+ FN_IP7_31_28, FN_IP8_3_0,
+
+ /* GPSR3 */
+ FN_IP8_7_4, FN_IP8_11_8, FN_IP8_15_12, FN_IP8_19_16, FN_IP8_23_20,
+ FN_IP8_27_24, FN_IP8_31_28, FN_IP9_3_0, FN_IP9_7_4, FN_IP9_11_8,
+ FN_IP9_15_12, FN_IP9_19_16, FN_IP9_23_20, FN_IP9_27_24, FN_IP9_31_28,
+ FN_IP10_3_0, FN_IP10_7_4, FN_IP10_11_8, FN_IP10_15_12, FN_IP10_19_16,
+
+ /* GPSR4 */
+ FN_IP10_23_20, FN_IP10_27_24, FN_IP10_31_28, FN_IP11_3_0, FN_IP11_7_4,
+ FN_IP11_11_8, FN_IP11_15_12, FN_IP11_19_16, FN_IP11_23_20,
+ FN_IP11_27_24, FN_IP11_31_28, FN_IP12_3_0, FN_IP12_7_4, FN_IP12_11_8,
+ FN_IP12_15_12, FN_IP12_19_16, FN_IP12_23_20, FN_IP12_27_24,
+ FN_IP12_31_28, FN_IP13_3_0, FN_IP13_7_4, FN_IP13_11_8, FN_IP13_15_12,
+ FN_IP13_19_16, FN_IP13_23_20, FN_IP13_27_24,
+
+ /* GPSR5 */
+ FN_IP13_31_28, FN_IP14_3_0, FN_IP14_7_4, FN_IP14_11_8, FN_IP14_15_12,
+ FN_IP14_19_16, FN_IP14_23_20, FN_IP14_27_24, FN_IP14_31_28,
+ FN_IP15_3_0, FN_IP15_7_4, FN_IP15_11_8, FN_IP15_15_12, FN_IP15_19_16,
+ FN_IP15_23_20, FN_IP15_27_24, FN_IP15_31_28, FN_IP16_3_0, FN_IP16_7_4,
+ FN_IP16_11_8, FN_IP16_15_12, FN_IP16_19_16, FN_IP16_23_20,
+ FN_IP16_27_24, FN_IP16_31_28, FN_IP17_3_0, FN_IP17_7_4, FN_IP17_11_8,
+ FN_IP17_15_12, FN_IP17_19_16, FN_IP17_23_20, FN_IP17_27_24,
+
+ /* IPSR0 */
+ FN_SD0_CLK, FN_SSI_SCK1_C, FN_RX3_C,
+ FN_SD0_CMD, FN_SSI_WS1_C, FN_TX3_C,
+ FN_SD0_DAT0, FN_SSI_SDATA1_C, FN_RX4_E,
+ FN_SD0_DAT1, FN_SSI_SCK0129_B, FN_TX4_E,
+ FN_SD0_DAT2, FN_SSI_WS0129_B, FN_RX5_E,
+ FN_SD0_DAT3, FN_SSI_SDATA0_B, FN_TX5_E,
+ FN_SD0_CD, FN_CAN0_RX_A,
+ FN_SD0_WP, FN_IRQ7, FN_CAN0_TX_A,
+
+ /* IPSR1 */
+ FN_MMC0_D4, FN_SD1_CD,
+ FN_MMC0_D5, FN_SD1_WP,
+ FN_D0, FN_SCL3_B, FN_RX5_B, FN_IRQ4, FN_MSIOF2_RXD_C, FN_SSI_SDATA5_B,
+ FN_D1, FN_SDA3_B, FN_TX5_B, FN_MSIOF2_TXD_C, FN_SSI_WS5_B,
+ FN_D2, FN_RX4_B, FN_SCL0_D, FN_PWM1_C, FN_MSIOF2_SCK_C, FN_SSI_SCK5_B,
+ FN_D3, FN_TX4_B, FN_SDA0_D, FN_PWM0_A, FN_MSIOF2_SYNC_C,
+ FN_D4, FN_IRQ3, FN_TCLK1_A, FN_PWM6_C,
+ FN_D5, FN_HRX2, FN_SCL1_B, FN_PWM2_C, FN_TCLK2_B,
+
+ /* IPSR2 */
+ FN_D6, FN_HTX2, FN_SDA1_B, FN_PWM4_C,
+ FN_D7, FN_HSCK2, FN_SCIF1_SCK_C, FN_IRQ6, FN_PWM5_C,
+ FN_D8, FN_HCTS2_N, FN_RX1_C, FN_SCL1_D, FN_PWM3_C,
+ FN_D9, FN_HRTS2_N, FN_TX1_C, FN_SDA1_D,
+ FN_D10, FN_MSIOF2_RXD_A, FN_HRX0_B,
+ FN_D11, FN_MSIOF2_TXD_A, FN_HTX0_B,
+ FN_D12, FN_MSIOF2_SCK_A, FN_HSCK0, FN_CAN_CLK_C,
+ FN_D13, FN_MSIOF2_SYNC_A, FN_RX4_C,
+
+ /* IPSR3 */
+ FN_D14, FN_MSIOF2_SS1, FN_TX4_C, FN_CAN1_RX_B, FN_AVB_AVTP_CAPTURE_A,
+ FN_D15, FN_MSIOF2_SS2, FN_PWM4_A, FN_CAN1_TX_B, FN_IRQ2, FN_AVB_AVTP_MATCH_A,
+ FN_QSPI0_SPCLK, FN_WE0_N,
+ FN_QSPI0_MOSI_QSPI0_IO0, FN_BS_N,
+ FN_QSPI0_MISO_QSPI0_IO1, FN_RD_WR_N,
+ FN_QSPI0_IO2, FN_CS0_N,
+ FN_QSPI0_IO3, FN_RD_N,
+ FN_QSPI0_SSL, FN_WE1_N,
+
+ /* IPSR4 */
+ FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK_A,
+ FN_DU0_DR0, FN_RX5_C, FN_SCL2_D, FN_A0,
+ FN_DU0_DR1, FN_TX5_C, FN_SDA2_D, FN_A1,
+ FN_DU0_DR2, FN_RX0_D, FN_SCL0_E, FN_A2,
+ FN_DU0_DR3, FN_TX0_D, FN_SDA0_E, FN_PWM0_B, FN_A3,
+ FN_DU0_DR4, FN_RX1_D, FN_A4,
+ FN_DU0_DR5, FN_TX1_D, FN_PWM1_B, FN_A5,
+ FN_DU0_DR6, FN_RX2_C, FN_A6,
+
+ /* IPSR5 */
+ FN_DU0_DR7, FN_TX2_C, FN_PWM2_B, FN_A7,
+ FN_DU0_DG0, FN_RX3_B, FN_SCL3_D, FN_A8,
+ FN_DU0_DG1, FN_TX3_B, FN_SDA3_D, FN_PWM3_B, FN_A9,
+ FN_DU0_DG2, FN_RX4_D, FN_A10,
+ FN_DU0_DG3, FN_TX4_D, FN_PWM4_B, FN_A11,
+ FN_DU0_DG4, FN_HRX0_A, FN_A12,
+ FN_DU0_DG5, FN_HTX0_A, FN_PWM5_B, FN_A13,
+ FN_DU0_DG6, FN_HRX1_C, FN_A14,
+
+ /* IPSR6 */
+ FN_DU0_DG7, FN_HTX1_C, FN_PWM6_B, FN_A15,
+ FN_DU0_DB0, FN_SCL4_D, FN_CAN0_RX_C, FN_A16,
+ FN_DU0_DB1, FN_SDA4_D, FN_CAN0_TX_C, FN_A17,
+ FN_DU0_DB2, FN_HCTS0_N, FN_A18,
+ FN_DU0_DB3, FN_HRTS0_N, FN_A19,
+ FN_DU0_DB4, FN_HCTS1_N_C, FN_A20,
+ FN_DU0_DB5, FN_HRTS1_N_C, FN_A21,
+ FN_DU0_DB6, FN_A22,
+
+ /* IPSR7 */
+ FN_DU0_DB7, FN_A23,
+ FN_DU0_DOTCLKIN, FN_A24,
+ FN_DU0_DOTCLKOUT0, FN_A25,
+ FN_DU0_DOTCLKOUT1, FN_MSIOF2_RXD_B, FN_CS1_N_A26,
+ FN_DU0_EXHSYNC_DU0_HSYNC, FN_MSIOF2_TXD_B, FN_DREQ0_N,
+ FN_DU0_EXVSYNC_DU0_VSYNC, FN_MSIOF2_SYNC_B, FN_DACK0,
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_MSIOF2_SCK_B, FN_DRACK0,
+ FN_DU0_DISP, FN_CAN1_RX_C,
+
+ /* IPSR8 */
+ FN_DU0_CDE, FN_CAN1_TX_C,
+ FN_VI1_CLK, FN_AVB_RX_CLK, FN_ETH_REF_CLK,
+ FN_VI1_DATA0, FN_AVB_RX_DV, FN_ETH_CRS_DV,
+ FN_VI1_DATA1, FN_AVB_RXD0, FN_ETH_RXD0,
+ FN_VI1_DATA2, FN_AVB_RXD1, FN_ETH_RXD1,
+ FN_VI1_DATA3, FN_AVB_RXD2, FN_ETH_MDIO,
+ FN_VI1_DATA4, FN_AVB_RXD3, FN_ETH_RX_ER,
+ FN_VI1_DATA5, FN_AVB_RXD4, FN_ETH_LINK,
+
+ /* IPSR9 */
+ FN_VI1_DATA6, FN_AVB_RXD5, FN_ETH_TXD1,
+ FN_VI1_DATA7, FN_AVB_RXD6, FN_ETH_TX_EN,
+ FN_VI1_CLKENB, FN_SCL3_A, FN_AVB_RXD7, FN_ETH_MAGIC,
+ FN_VI1_FIELD, FN_SDA3_A, FN_AVB_RX_ER, FN_ETH_TXD0,
+ FN_VI1_HSYNC_N, FN_RX0_B, FN_SCL0_C, FN_AVB_GTXREFCLK, FN_ETH_MDC,
+ FN_VI1_VSYNC_N, FN_TX0_B, FN_SDA0_C, FN_AUDIO_CLKOUT_B, FN_AVB_TX_CLK,
+ FN_VI1_DATA8, FN_SCL2_B, FN_AVB_TX_EN,
+ FN_VI1_DATA9, FN_SDA2_B, FN_AVB_TXD0,
+
+ /* IPSR10 */
+ FN_VI1_DATA10, FN_CAN0_RX_B, FN_AVB_TXD1,
+ FN_VI1_DATA11, FN_CAN0_TX_B, FN_AVB_TXD2,
+ FN_AVB_TXD3, FN_AUDIO_CLKA_B, FN_SSI_SCK1_D, FN_RX5_F, FN_MSIOF0_RXD_B,
+ FN_AVB_TXD4, FN_AUDIO_CLKB_B, FN_SSI_WS1_D, FN_TX5_F, FN_MSIOF0_TXD_B,
+ FN_AVB_TXD5, FN_SCIF_CLK_B, FN_AUDIO_CLKC_B, FN_SSI_SDATA1_D, FN_MSIOF0_SCK_B,
+ FN_SCL0_A, FN_RX0_C, FN_PWM5_A, FN_TCLK1_B, FN_AVB_TXD6, FN_CAN1_RX_D, FN_MSIOF0_SYNC_B,
+ FN_SDA0_A, FN_TX0_C, FN_IRQ5, FN_CAN_CLK_A, FN_AVB_GTX_CLK, FN_CAN1_TX_D, FN_DVC_MUTE,
+ FN_SCL1_A, FN_RX4_A, FN_PWM5_D, FN_DU1_DR0, FN_SSI_SCK6_B, FN_VI0_G0,
+
+ /* IPSR11 */
+ FN_SDA1_A, FN_TX4_A, FN_DU1_DR1, FN_SSI_WS6_B, FN_VI0_G1,
+ FN_MSIOF0_RXD_A, FN_RX5_A, FN_SCL2_C, FN_DU1_DR2, FN_QSPI1_MOSI_QSPI1_IO0, FN_SSI_SDATA6_B, FN_VI0_G2,
+ FN_MSIOF0_TXD_A, FN_TX5_A, FN_SDA2_C, FN_DU1_DR3, FN_QSPI1_MISO_QSPI1_IO1, FN_SSI_WS78_B, FN_VI0_G3,
+ FN_MSIOF0_SCK_A, FN_IRQ0, FN_DU1_DR4, FN_QSPI1_SPCLK, FN_SSI_SCK78_B, FN_VI0_G4,
+ FN_MSIOF0_SYNC_A, FN_PWM1_A, FN_DU1_DR5, FN_QSPI1_IO2, FN_SSI_SDATA7_B,
+ FN_MSIOF0_SS1_A, FN_DU1_DR6, FN_QSPI1_IO3, FN_SSI_SDATA8_B,
+ FN_MSIOF0_SS2_A, FN_DU1_DR7, FN_QSPI1_SSL,
+ FN_HRX1_A, FN_SCL4_A, FN_PWM6_A, FN_DU1_DG0, FN_RX0_A,
+
+ /* IPSR12 */
+ FN_HTX1_A, FN_SDA4_A, FN_DU1_DG1, FN_TX0_A,
+ FN_HCTS1_N_A, FN_PWM2_A, FN_DU1_DG2, FN_REMOCON_B,
+ FN_HRTS1_N_A, FN_DU1_DG3, FN_SSI_WS1_B, FN_IRQ1,
+ FN_SD2_CLK, FN_HSCK1, FN_DU1_DG4, FN_SSI_SCK1_B,
+ FN_SD2_CMD, FN_SCIF1_SCK_A, FN_TCLK2_A, FN_DU1_DG5, FN_SSI_SCK2_B, FN_PWM3_A,
+ FN_SD2_DAT0, FN_RX1_A, FN_SCL1_E, FN_DU1_DG6, FN_SSI_SDATA1_B,
+ FN_SD2_DAT1, FN_TX1_A, FN_SDA1_E, FN_DU1_DG7, FN_SSI_WS2_B,
+ FN_SD2_DAT2, FN_RX2_A, FN_DU1_DB0, FN_SSI_SDATA2_B,
+
+ /* IPSR13 */
+ FN_SD2_DAT3, FN_TX2_A, FN_DU1_DB1, FN_SSI_WS9_B,
+ FN_SD2_CD, FN_SCIF2_SCK_A, FN_DU1_DB2, FN_SSI_SCK9_B,
+ FN_SD2_WP, FN_SCIF3_SCK, FN_DU1_DB3, FN_SSI_SDATA9_B,
+ FN_RX3_A, FN_SCL1_C, FN_MSIOF1_RXD_B, FN_DU1_DB4, FN_AUDIO_CLKA_C, FN_SSI_SDATA4_B,
+ FN_TX3_A, FN_SDA1_C, FN_MSIOF1_TXD_B, FN_DU1_DB5, FN_AUDIO_CLKB_C, FN_SSI_WS4_B,
+ FN_SCL2_A, FN_MSIOF1_SCK_B, FN_DU1_DB6, FN_AUDIO_CLKC_C, FN_SSI_SCK4_B,
+ FN_SDA2_A, FN_MSIOF1_SYNC_B, FN_DU1_DB7, FN_AUDIO_CLKOUT_C,
+ FN_SSI_SCK5_A, FN_DU1_DOTCLKOUT1,
+
+ /* IPSR14 */
+ FN_SSI_WS5_A, FN_SCL3_C, FN_DU1_DOTCLKIN,
+ FN_SSI_SDATA5_A, FN_SDA3_C, FN_DU1_DOTCLKOUT0,
+ FN_SSI_SCK6_A, FN_DU1_EXODDF_DU1_ODDF_DISP_CDE,
+ FN_SSI_WS6_A, FN_SCL4_C, FN_DU1_EXHSYNC_DU1_HSYNC,
+ FN_SSI_SDATA6_A, FN_SDA4_C, FN_DU1_EXVSYNC_DU1_VSYNC,
+ FN_SSI_SCK78_A, FN_SDA4_E, FN_DU1_DISP,
+ FN_SSI_WS78_A, FN_SCL4_E, FN_DU1_CDE,
+ FN_SSI_SDATA7_A, FN_IRQ8, FN_AUDIO_CLKA_D, FN_CAN_CLK_D, FN_VI0_G5,
+
+ /* IPSR15 */
+ FN_SSI_SCK0129_A, FN_MSIOF1_RXD_A, FN_RX5_D, FN_VI0_G6,
+ FN_SSI_WS0129_A, FN_MSIOF1_TXD_A, FN_TX5_D, FN_VI0_G7,
+ FN_SSI_SDATA0_A, FN_MSIOF1_SYNC_A, FN_PWM0_C, FN_VI0_R0,
+ FN_SSI_SCK34, FN_MSIOF1_SCK_A, FN_AVB_MDC, FN_DACK1, FN_VI0_R1,
+ FN_SSI_WS34, FN_MSIOF1_SS1_A, FN_AVB_MDIO, FN_CAN1_RX_A, FN_DREQ1_N, FN_VI0_R2,
+ FN_SSI_SDATA3, FN_MSIOF1_SS2_A, FN_AVB_LINK, FN_CAN1_TX_A, FN_DREQ2_N, FN_VI0_R3,
+ FN_SSI_SCK4_A, FN_AVB_MAGIC, FN_VI0_R4,
+ FN_SSI_WS4_A, FN_AVB_PHY_INT, FN_VI0_R5,
+
+ /* IPSR16 */
+ FN_SSI_SDATA4_A, FN_AVB_CRS, FN_VI0_R6,
+ FN_SSI_SCK1_A, FN_SCIF1_SCK_B, FN_PWM1_D, FN_IRQ9, FN_REMOCON_A, FN_DACK2, FN_VI0_CLK, FN_AVB_COL,
+ FN_SSI_SDATA8_A, FN_RX1_B, FN_CAN0_RX_D, FN_AVB_AVTP_CAPTURE_B, FN_VI0_R7,
+ FN_SSI_WS1_A, FN_TX1_B, FN_CAN0_TX_D, FN_AVB_AVTP_MATCH_B, FN_VI0_DATA0_VI0_B0,
+ FN_SSI_SDATA1_A, FN_HRX1_B, FN_VI0_DATA1_VI0_B1,
+ FN_SSI_SCK2_A, FN_HTX1_B, FN_AVB_TXD7, FN_VI0_DATA2_VI0_B2,
+ FN_SSI_WS2_A, FN_HCTS1_N_B, FN_AVB_TX_ER, FN_VI0_DATA3_VI0_B3,
+ FN_SSI_SDATA2_A, FN_HRTS1_N_B, FN_VI0_DATA4_VI0_B4,
+
+ /* IPSR17 */
+ FN_SSI_SCK9_A, FN_RX2_B, FN_SCL3_E, FN_EX_WAIT1, FN_VI0_DATA5_VI0_B5,
+ FN_SSI_WS9_A, FN_TX2_B, FN_SDA3_E, FN_VI0_DATA6_VI0_B6,
+ FN_SSI_SDATA9_A, FN_SCIF2_SCK_B, FN_PWM2_D, FN_VI0_DATA7_VI0_B7,
+ FN_AUDIO_CLKA_A, FN_SCL0_B, FN_VI0_CLKENB,
+ FN_AUDIO_CLKB_A, FN_SDA0_B, FN_VI0_FIELD,
+ FN_AUDIO_CLKC_A, FN_SCL4_B, FN_VI0_HSYNC_N,
+ FN_AUDIO_CLKOUT_A, FN_SDA4_B, FN_VI0_VSYNC_N,
+
+ /* MOD_SEL0 */
+ FN_SEL_ADGA_0, FN_SEL_ADGA_1, FN_SEL_ADGA_2, FN_SEL_ADGA_3,
+ FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2, FN_SEL_CANCLK_3,
+ FN_SEL_CAN1_0, FN_SEL_CAN1_1, FN_SEL_CAN1_2, FN_SEL_CAN1_3,
+ FN_SEL_CAN0_0, FN_SEL_CAN0_1, FN_SEL_CAN0_2, FN_SEL_CAN0_3,
+ FN_SEL_I2C04_0, FN_SEL_I2C04_1, FN_SEL_I2C04_2, FN_SEL_I2C04_3, FN_SEL_I2C04_4,
+ FN_SEL_I2C03_0, FN_SEL_I2C03_1, FN_SEL_I2C03_2, FN_SEL_I2C03_3, FN_SEL_I2C03_4,
+ FN_SEL_I2C02_0, FN_SEL_I2C02_1, FN_SEL_I2C02_2, FN_SEL_I2C02_3,
+ FN_SEL_I2C01_0, FN_SEL_I2C01_1, FN_SEL_I2C01_2, FN_SEL_I2C01_3, FN_SEL_I2C01_4,
+ FN_SEL_I2C00_0, FN_SEL_I2C00_1, FN_SEL_I2C00_2, FN_SEL_I2C00_3, FN_SEL_I2C00_4,
+ FN_SEL_AVB_0, FN_SEL_AVB_1,
+
+ /* MOD_SEL1 */
+ FN_SEL_SCIFCLK_0, FN_SEL_SCIFCLK_1,
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, FN_SEL_SCIF5_2, FN_SEL_SCIF5_3, FN_SEL_SCIF5_4, FN_SEL_SCIF5_5,
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, FN_SEL_SCIF4_3, FN_SEL_SCIF4_4,
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2,
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2,
+ FN_SEL_SCIF2_CLK_0, FN_SEL_SCIF2_CLK_1,
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3,
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
+ FN_SEL_MSIOF2_0, FN_SEL_MSIOF2_1, FN_SEL_MSIOF2_2,
+ FN_SEL_MSIOF1_0, FN_SEL_MSIOF1_1,
+ FN_SEL_MSIOF0_0, FN_SEL_MSIOF0_1,
+ FN_SEL_RCN_0, FN_SEL_RCN_1,
+ FN_SEL_TMU2_0, FN_SEL_TMU2_1,
+ FN_SEL_TMU1_0, FN_SEL_TMU1_1,
+ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1, FN_SEL_HSCIF1_2,
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,
+
+ /* MOD_SEL2 */
+ FN_SEL_ADGB_0, FN_SEL_ADGB_1, FN_SEL_ADGB_2,
+ FN_SEL_ADGC_0, FN_SEL_ADGC_1, FN_SEL_ADGC_2,
+ FN_SEL_SSI9_0, FN_SEL_SSI9_1,
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1,
+ FN_SEL_SSI7_0, FN_SEL_SSI7_1,
+ FN_SEL_SSI6_0, FN_SEL_SSI6_1,
+ FN_SEL_SSI5_0, FN_SEL_SSI5_1,
+ FN_SEL_SSI4_0, FN_SEL_SSI4_1,
+ FN_SEL_SSI2_0, FN_SEL_SSI2_1,
+ FN_SEL_SSI1_0, FN_SEL_SSI1_1, FN_SEL_SSI1_2, FN_SEL_SSI1_3,
+ FN_SEL_SSI0_0, FN_SEL_SSI0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ USB0_PWEN_MARK, USB0_OVC_MARK, USB1_PWEN_MARK, USB1_OVC_MARK,
+ CLKOUT_MARK, MMC0_CLK_SDHI1_CLK_MARK, MMC0_CMD_SDHI1_CMD_MARK,
+ MMC0_D0_SDHI1_D0_MARK, MMC0_D1_SDHI1_D1_MARK,
+ MMC0_D2_SDHI1_D2_MARK, MMC0_D3_SDHI1_D3_MARK, MMC0_D6_MARK,
+ MMC0_D7_MARK,
+
+ /* IPSR0 */
+ SD0_CLK_MARK, SSI_SCK1_C_MARK, RX3_C_MARK,
+ SD0_CMD_MARK, SSI_WS1_C_MARK, TX3_C_MARK,
+ SD0_DAT0_MARK, SSI_SDATA1_C_MARK, RX4_E_MARK,
+ SD0_DAT1_MARK, SSI_SCK0129_B_MARK, TX4_E_MARK,
+ SD0_DAT2_MARK, SSI_WS0129_B_MARK, RX5_E_MARK,
+ SD0_DAT3_MARK, SSI_SDATA0_B_MARK, TX5_E_MARK,
+ SD0_CD_MARK, CAN0_RX_A_MARK,
+ SD0_WP_MARK, IRQ7_MARK, CAN0_TX_A_MARK,
+
+ /* IPSR1 */
+ MMC0_D4_MARK, SD1_CD_MARK,
+ MMC0_D5_MARK, SD1_WP_MARK,
+ D0_MARK, SCL3_B_MARK, RX5_B_MARK, IRQ4_MARK, MSIOF2_RXD_C_MARK, SSI_SDATA5_B_MARK,
+ D1_MARK, SDA3_B_MARK, TX5_B_MARK, MSIOF2_TXD_C_MARK, SSI_WS5_B_MARK,
+ D2_MARK, RX4_B_MARK, SCL0_D_MARK, PWM1_C_MARK, MSIOF2_SCK_C_MARK, SSI_SCK5_B_MARK,
+ D3_MARK, TX4_B_MARK, SDA0_D_MARK, PWM0_A_MARK, MSIOF2_SYNC_C_MARK,
+ D4_MARK, IRQ3_MARK, TCLK1_A_MARK, PWM6_C_MARK,
+ D5_MARK, HRX2_MARK, SCL1_B_MARK, PWM2_C_MARK, TCLK2_B_MARK,
+
+ /* IPSR2 */
+ D6_MARK, HTX2_MARK, SDA1_B_MARK, PWM4_C_MARK,
+ D7_MARK, HSCK2_MARK, SCIF1_SCK_C_MARK, IRQ6_MARK, PWM5_C_MARK,
+ D8_MARK, HCTS2_N_MARK, RX1_C_MARK, SCL1_D_MARK, PWM3_C_MARK,
+ D9_MARK, HRTS2_N_MARK, TX1_C_MARK, SDA1_D_MARK,
+ D10_MARK, MSIOF2_RXD_A_MARK, HRX0_B_MARK,
+ D11_MARK, MSIOF2_TXD_A_MARK, HTX0_B_MARK,
+ D12_MARK, MSIOF2_SCK_A_MARK, HSCK0_MARK, CAN_CLK_C_MARK,
+ D13_MARK, MSIOF2_SYNC_A_MARK, RX4_C_MARK,
+
+ /* IPSR3 */
+ D14_MARK, MSIOF2_SS1_MARK, TX4_C_MARK, CAN1_RX_B_MARK, AVB_AVTP_CAPTURE_A_MARK,
+ D15_MARK, MSIOF2_SS2_MARK, PWM4_A_MARK, CAN1_TX_B_MARK, IRQ2_MARK, AVB_AVTP_MATCH_A_MARK,
+ QSPI0_SPCLK_MARK, WE0_N_MARK,
+ QSPI0_MOSI_QSPI0_IO0_MARK, BS_N_MARK,
+ QSPI0_MISO_QSPI0_IO1_MARK, RD_WR_N_MARK,
+ QSPI0_IO2_MARK, CS0_N_MARK,
+ QSPI0_IO3_MARK, RD_N_MARK,
+ QSPI0_SSL_MARK, WE1_N_MARK,
+
+ /* IPSR4 */
+ EX_WAIT0_MARK, CAN_CLK_B_MARK, SCIF_CLK_A_MARK,
+ DU0_DR0_MARK, RX5_C_MARK, SCL2_D_MARK, A0_MARK,
+ DU0_DR1_MARK, TX5_C_MARK, SDA2_D_MARK, A1_MARK,
+ DU0_DR2_MARK, RX0_D_MARK, SCL0_E_MARK, A2_MARK,
+ DU0_DR3_MARK, TX0_D_MARK, SDA0_E_MARK, PWM0_B_MARK, A3_MARK,
+ DU0_DR4_MARK, RX1_D_MARK, A4_MARK,
+ DU0_DR5_MARK, TX1_D_MARK, PWM1_B_MARK, A5_MARK,
+ DU0_DR6_MARK, RX2_C_MARK, A6_MARK,
+
+ /* IPSR5 */
+ DU0_DR7_MARK, TX2_C_MARK, PWM2_B_MARK, A7_MARK,
+ DU0_DG0_MARK, RX3_B_MARK, SCL3_D_MARK, A8_MARK,
+ DU0_DG1_MARK, TX3_B_MARK, SDA3_D_MARK, PWM3_B_MARK, A9_MARK,
+ DU0_DG2_MARK, RX4_D_MARK, A10_MARK,
+ DU0_DG3_MARK, TX4_D_MARK, PWM4_B_MARK, A11_MARK,
+ DU0_DG4_MARK, HRX0_A_MARK, A12_MARK,
+ DU0_DG5_MARK, HTX0_A_MARK, PWM5_B_MARK, A13_MARK,
+ DU0_DG6_MARK, HRX1_C_MARK, A14_MARK,
+
+ /* IPSR6 */
+ DU0_DG7_MARK, HTX1_C_MARK, PWM6_B_MARK, A15_MARK,
+ DU0_DB0_MARK, SCL4_D_MARK, CAN0_RX_C_MARK, A16_MARK,
+ DU0_DB1_MARK, SDA4_D_MARK, CAN0_TX_C_MARK, A17_MARK,
+ DU0_DB2_MARK, HCTS0_N_MARK, A18_MARK,
+ DU0_DB3_MARK, HRTS0_N_MARK, A19_MARK,
+ DU0_DB4_MARK, HCTS1_N_C_MARK, A20_MARK,
+ DU0_DB5_MARK, HRTS1_N_C_MARK, A21_MARK,
+ DU0_DB6_MARK, A22_MARK,
+
+ /* IPSR7 */
+ DU0_DB7_MARK, A23_MARK,
+ DU0_DOTCLKIN_MARK, A24_MARK,
+ DU0_DOTCLKOUT0_MARK, A25_MARK,
+ DU0_DOTCLKOUT1_MARK, MSIOF2_RXD_B_MARK, CS1_N_A26_MARK,
+ DU0_EXHSYNC_DU0_HSYNC_MARK, MSIOF2_TXD_B_MARK, DREQ0_N_MARK,
+ DU0_EXVSYNC_DU0_VSYNC_MARK, MSIOF2_SYNC_B_MARK, DACK0_MARK,
+ DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK, MSIOF2_SCK_B_MARK, DRACK0_MARK,
+ DU0_DISP_MARK, CAN1_RX_C_MARK,
+
+ /* IPSR8 */
+ DU0_CDE_MARK, CAN1_TX_C_MARK,
+ VI1_CLK_MARK, AVB_RX_CLK_MARK, ETH_REF_CLK_MARK,
+ VI1_DATA0_MARK, AVB_RX_DV_MARK, ETH_CRS_DV_MARK,
+ VI1_DATA1_MARK, AVB_RXD0_MARK, ETH_RXD0_MARK,
+ VI1_DATA2_MARK, AVB_RXD1_MARK, ETH_RXD1_MARK,
+ VI1_DATA3_MARK, AVB_RXD2_MARK, ETH_MDIO_MARK,
+ VI1_DATA4_MARK, AVB_RXD3_MARK, ETH_RX_ER_MARK,
+ VI1_DATA5_MARK, AVB_RXD4_MARK, ETH_LINK_MARK,
+
+ /* IPSR9 */
+ VI1_DATA6_MARK, AVB_RXD5_MARK, ETH_TXD1_MARK,
+ VI1_DATA7_MARK, AVB_RXD6_MARK, ETH_TX_EN_MARK,
+ VI1_CLKENB_MARK, SCL3_A_MARK, AVB_RXD7_MARK, ETH_MAGIC_MARK,
+ VI1_FIELD_MARK, SDA3_A_MARK, AVB_RX_ER_MARK, ETH_TXD0_MARK,
+ VI1_HSYNC_N_MARK, RX0_B_MARK, SCL0_C_MARK, AVB_GTXREFCLK_MARK, ETH_MDC_MARK,
+ VI1_VSYNC_N_MARK, TX0_B_MARK, SDA0_C_MARK, AUDIO_CLKOUT_B_MARK, AVB_TX_CLK_MARK,
+ VI1_DATA8_MARK, SCL2_B_MARK, AVB_TX_EN_MARK,
+ VI1_DATA9_MARK, SDA2_B_MARK, AVB_TXD0_MARK,
+
+ /* IPSR10 */
+ VI1_DATA10_MARK, CAN0_RX_B_MARK, AVB_TXD1_MARK,
+ VI1_DATA11_MARK, CAN0_TX_B_MARK, AVB_TXD2_MARK,
+ AVB_TXD3_MARK, AUDIO_CLKA_B_MARK, SSI_SCK1_D_MARK, RX5_F_MARK, MSIOF0_RXD_B_MARK,
+ AVB_TXD4_MARK, AUDIO_CLKB_B_MARK, SSI_WS1_D_MARK, TX5_F_MARK, MSIOF0_TXD_B_MARK,
+ AVB_TXD5_MARK, SCIF_CLK_B_MARK, AUDIO_CLKC_B_MARK, SSI_SDATA1_D_MARK, MSIOF0_SCK_B_MARK,
+ SCL0_A_MARK, RX0_C_MARK, PWM5_A_MARK, TCLK1_B_MARK, AVB_TXD6_MARK, CAN1_RX_D_MARK, MSIOF0_SYNC_B_MARK,
+ SDA0_A_MARK, TX0_C_MARK, IRQ5_MARK, CAN_CLK_A_MARK, AVB_GTX_CLK_MARK, CAN1_TX_D_MARK, DVC_MUTE_MARK,
+ SCL1_A_MARK, RX4_A_MARK, PWM5_D_MARK, DU1_DR0_MARK, SSI_SCK6_B_MARK, VI0_G0_MARK,
+
+ /* IPSR11 */
+ SDA1_A_MARK, TX4_A_MARK, DU1_DR1_MARK, SSI_WS6_B_MARK, VI0_G1_MARK,
+ MSIOF0_RXD_A_MARK, RX5_A_MARK, SCL2_C_MARK, DU1_DR2_MARK, QSPI1_MOSI_QSPI1_IO0_MARK, SSI_SDATA6_B_MARK, VI0_G2_MARK,
+ MSIOF0_TXD_A_MARK, TX5_A_MARK, SDA2_C_MARK, DU1_DR3_MARK, QSPI1_MISO_QSPI1_IO1_MARK, SSI_WS78_B_MARK, VI0_G3_MARK,
+ MSIOF0_SCK_A_MARK, IRQ0_MARK, DU1_DR4_MARK, QSPI1_SPCLK_MARK, SSI_SCK78_B_MARK, VI0_G4_MARK,
+ MSIOF0_SYNC_A_MARK, PWM1_A_MARK, DU1_DR5_MARK, QSPI1_IO2_MARK, SSI_SDATA7_B_MARK,
+ MSIOF0_SS1_A_MARK, DU1_DR6_MARK, QSPI1_IO3_MARK, SSI_SDATA8_B_MARK,
+ MSIOF0_SS2_A_MARK, DU1_DR7_MARK, QSPI1_SSL_MARK,
+ HRX1_A_MARK, SCL4_A_MARK, PWM6_A_MARK, DU1_DG0_MARK, RX0_A_MARK,
+
+ /* IPSR12 */
+ HTX1_A_MARK, SDA4_A_MARK, DU1_DG1_MARK, TX0_A_MARK,
+ HCTS1_N_A_MARK, PWM2_A_MARK, DU1_DG2_MARK, REMOCON_B_MARK,
+ HRTS1_N_A_MARK, DU1_DG3_MARK, SSI_WS1_B_MARK, IRQ1_MARK,
+ SD2_CLK_MARK, HSCK1_MARK, DU1_DG4_MARK, SSI_SCK1_B_MARK,
+ SD2_CMD_MARK, SCIF1_SCK_A_MARK, TCLK2_A_MARK, DU1_DG5_MARK, SSI_SCK2_B_MARK, PWM3_A_MARK,
+ SD2_DAT0_MARK, RX1_A_MARK, SCL1_E_MARK, DU1_DG6_MARK, SSI_SDATA1_B_MARK,
+ SD2_DAT1_MARK, TX1_A_MARK, SDA1_E_MARK, DU1_DG7_MARK, SSI_WS2_B_MARK,
+ SD2_DAT2_MARK, RX2_A_MARK, DU1_DB0_MARK, SSI_SDATA2_B_MARK,
+
+ /* IPSR13 */
+ SD2_DAT3_MARK, TX2_A_MARK, DU1_DB1_MARK, SSI_WS9_B_MARK,
+ SD2_CD_MARK, SCIF2_SCK_A_MARK, DU1_DB2_MARK, SSI_SCK9_B_MARK,
+ SD2_WP_MARK, SCIF3_SCK_MARK, DU1_DB3_MARK, SSI_SDATA9_B_MARK,
+ RX3_A_MARK, SCL1_C_MARK, MSIOF1_RXD_B_MARK, DU1_DB4_MARK, AUDIO_CLKA_C_MARK, SSI_SDATA4_B_MARK,
+ TX3_A_MARK, SDA1_C_MARK, MSIOF1_TXD_B_MARK, DU1_DB5_MARK, AUDIO_CLKB_C_MARK, SSI_WS4_B_MARK,
+ SCL2_A_MARK, MSIOF1_SCK_B_MARK, DU1_DB6_MARK, AUDIO_CLKC_C_MARK, SSI_SCK4_B_MARK,
+ SDA2_A_MARK, MSIOF1_SYNC_B_MARK, DU1_DB7_MARK, AUDIO_CLKOUT_C_MARK,
+ SSI_SCK5_A_MARK, DU1_DOTCLKOUT1_MARK,
+
+ /* IPSR14 */
+ SSI_WS5_A_MARK, SCL3_C_MARK, DU1_DOTCLKIN_MARK,
+ SSI_SDATA5_A_MARK, SDA3_C_MARK, DU1_DOTCLKOUT0_MARK,
+ SSI_SCK6_A_MARK, DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK,
+ SSI_WS6_A_MARK, SCL4_C_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK,
+ SSI_SDATA6_A_MARK, SDA4_C_MARK, DU1_EXVSYNC_DU1_VSYNC_MARK,
+ SSI_SCK78_A_MARK, SDA4_E_MARK, DU1_DISP_MARK,
+ SSI_WS78_A_MARK, SCL4_E_MARK, DU1_CDE_MARK,
+ SSI_SDATA7_A_MARK, IRQ8_MARK, AUDIO_CLKA_D_MARK, CAN_CLK_D_MARK, VI0_G5_MARK,
+
+ /* IPSR15 */
+ SSI_SCK0129_A_MARK, MSIOF1_RXD_A_MARK, RX5_D_MARK, VI0_G6_MARK,
+ SSI_WS0129_A_MARK, MSIOF1_TXD_A_MARK, TX5_D_MARK, VI0_G7_MARK,
+ SSI_SDATA0_A_MARK, MSIOF1_SYNC_A_MARK, PWM0_C_MARK, VI0_R0_MARK,
+ SSI_SCK34_MARK, MSIOF1_SCK_A_MARK, AVB_MDC_MARK, DACK1_MARK, VI0_R1_MARK,
+ SSI_WS34_MARK, MSIOF1_SS1_A_MARK, AVB_MDIO_MARK, CAN1_RX_A_MARK, DREQ1_N_MARK, VI0_R2_MARK,
+ SSI_SDATA3_MARK, MSIOF1_SS2_A_MARK, AVB_LINK_MARK, CAN1_TX_A_MARK, DREQ2_N_MARK, VI0_R3_MARK,
+ SSI_SCK4_A_MARK, AVB_MAGIC_MARK, VI0_R4_MARK,
+ SSI_WS4_A_MARK, AVB_PHY_INT_MARK, VI0_R5_MARK,
+
+ /* IPSR16 */
+ SSI_SDATA4_A_MARK, AVB_CRS_MARK, VI0_R6_MARK,
+ SSI_SCK1_A_MARK, SCIF1_SCK_B_MARK, PWM1_D_MARK, IRQ9_MARK, REMOCON_A_MARK, DACK2_MARK, VI0_CLK_MARK, AVB_COL_MARK,
+ SSI_SDATA8_A_MARK, RX1_B_MARK, CAN0_RX_D_MARK, AVB_AVTP_CAPTURE_B_MARK, VI0_R7_MARK,
+ SSI_WS1_A_MARK, TX1_B_MARK, CAN0_TX_D_MARK, AVB_AVTP_MATCH_B_MARK, VI0_DATA0_VI0_B0_MARK,
+ SSI_SDATA1_A_MARK, HRX1_B_MARK, VI0_DATA1_VI0_B1_MARK,
+ SSI_SCK2_A_MARK, HTX1_B_MARK, AVB_TXD7_MARK, VI0_DATA2_VI0_B2_MARK,
+ SSI_WS2_A_MARK, HCTS1_N_B_MARK, AVB_TX_ER_MARK, VI0_DATA3_VI0_B3_MARK,
+ SSI_SDATA2_A_MARK, HRTS1_N_B_MARK, VI0_DATA4_VI0_B4_MARK,
+
+ /* IPSR17 */
+ SSI_SCK9_A_MARK, RX2_B_MARK, SCL3_E_MARK, EX_WAIT1_MARK, VI0_DATA5_VI0_B5_MARK,
+ SSI_WS9_A_MARK, TX2_B_MARK, SDA3_E_MARK, VI0_DATA6_VI0_B6_MARK,
+ SSI_SDATA9_A_MARK, SCIF2_SCK_B_MARK, PWM2_D_MARK, VI0_DATA7_VI0_B7_MARK,
+ AUDIO_CLKA_A_MARK, SCL0_B_MARK, VI0_CLKENB_MARK,
+ AUDIO_CLKB_A_MARK, SDA0_B_MARK, VI0_FIELD_MARK,
+ AUDIO_CLKC_A_MARK, SCL4_B_MARK, VI0_HSYNC_N_MARK,
+ AUDIO_CLKOUT_A_MARK, SDA4_B_MARK, VI0_VSYNC_N_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
+
+ PINMUX_SINGLE(USB0_PWEN),
+ PINMUX_SINGLE(USB0_OVC),
+ PINMUX_SINGLE(USB1_PWEN),
+ PINMUX_SINGLE(USB1_OVC),
+ PINMUX_SINGLE(CLKOUT),
+ PINMUX_SINGLE(MMC0_CLK_SDHI1_CLK),
+ PINMUX_SINGLE(MMC0_CMD_SDHI1_CMD),
+ PINMUX_SINGLE(MMC0_D0_SDHI1_D0),
+ PINMUX_SINGLE(MMC0_D1_SDHI1_D1),
+ PINMUX_SINGLE(MMC0_D2_SDHI1_D2),
+ PINMUX_SINGLE(MMC0_D3_SDHI1_D3),
+ PINMUX_SINGLE(MMC0_D6),
+ PINMUX_SINGLE(MMC0_D7),
+
+ /* IPSR0 */
+ PINMUX_IPSR_GPSR(IP0_3_0, SD0_CLK),
+ PINMUX_IPSR_MSEL(IP0_3_0, SSI_SCK1_C, SEL_SSI1_2),
+ PINMUX_IPSR_MSEL(IP0_3_0, RX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_GPSR(IP0_7_4, SD0_CMD),
+ PINMUX_IPSR_MSEL(IP0_7_4, SSI_WS1_C, SEL_SSI1_2),
+ PINMUX_IPSR_MSEL(IP0_7_4, TX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_GPSR(IP0_11_8, SD0_DAT0),
+ PINMUX_IPSR_MSEL(IP0_11_8, SSI_SDATA1_C, SEL_SSI1_2),
+ PINMUX_IPSR_MSEL(IP0_11_8, RX4_E, SEL_SCIF4_4),
+ PINMUX_IPSR_GPSR(IP0_15_12, SD0_DAT1),
+ PINMUX_IPSR_MSEL(IP0_15_12, SSI_SCK0129_B, SEL_SSI0_1),
+ PINMUX_IPSR_MSEL(IP0_15_12, TX4_E, SEL_SCIF4_4),
+ PINMUX_IPSR_GPSR(IP0_19_16, SD0_DAT2),
+ PINMUX_IPSR_MSEL(IP0_19_16, SSI_WS0129_B, SEL_SSI0_1),
+ PINMUX_IPSR_MSEL(IP0_19_16, RX5_E, SEL_SCIF5_4),
+ PINMUX_IPSR_GPSR(IP0_23_20, SD0_DAT3),
+ PINMUX_IPSR_MSEL(IP0_23_20, SSI_SDATA0_B, SEL_SSI0_1),
+ PINMUX_IPSR_MSEL(IP0_23_20, TX5_E, SEL_SCIF5_4),
+ PINMUX_IPSR_GPSR(IP0_27_24, SD0_CD),
+ PINMUX_IPSR_MSEL(IP0_27_24, CAN0_RX_A, SEL_CAN0_0),
+ PINMUX_IPSR_GPSR(IP0_31_28, SD0_WP),
+ PINMUX_IPSR_GPSR(IP0_31_28, IRQ7),
+ PINMUX_IPSR_MSEL(IP0_31_28, CAN0_TX_A, SEL_CAN0_0),
+
+ /* IPSR1 */
+ PINMUX_IPSR_GPSR(IP1_3_0, MMC0_D4),
+ PINMUX_IPSR_GPSR(IP1_3_0, SD1_CD),
+ PINMUX_IPSR_GPSR(IP1_7_4, MMC0_D5),
+ PINMUX_IPSR_GPSR(IP1_7_4, SD1_WP),
+ PINMUX_IPSR_GPSR(IP1_11_8, D0),
+ PINMUX_IPSR_MSEL(IP1_11_8, SCL3_B, SEL_I2C03_1),
+ PINMUX_IPSR_MSEL(IP1_11_8, RX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_GPSR(IP1_11_8, IRQ4),
+ PINMUX_IPSR_MSEL(IP1_11_8, MSIOF2_RXD_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_MSEL(IP1_11_8, SSI_SDATA5_B, SEL_SSI5_1),
+ PINMUX_IPSR_GPSR(IP1_15_12, D1),
+ PINMUX_IPSR_MSEL(IP1_15_12, SDA3_B, SEL_I2C03_1),
+ PINMUX_IPSR_MSEL(IP1_15_12, TX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MSEL(IP1_15_12, MSIOF2_TXD_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_MSEL(IP1_15_12, SSI_WS5_B, SEL_SSI5_1),
+ PINMUX_IPSR_GPSR(IP1_19_16, D2),
+ PINMUX_IPSR_MSEL(IP1_19_16, RX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_MSEL(IP1_19_16, SCL0_D, SEL_I2C00_3),
+ PINMUX_IPSR_GPSR(IP1_19_16, PWM1_C),
+ PINMUX_IPSR_MSEL(IP1_19_16, MSIOF2_SCK_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_MSEL(IP1_19_16, SSI_SCK5_B, SEL_SSI5_1),
+ PINMUX_IPSR_GPSR(IP1_23_20, D3),
+ PINMUX_IPSR_MSEL(IP1_23_20, TX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_MSEL(IP1_23_20, SDA0_D, SEL_I2C00_3),
+ PINMUX_IPSR_GPSR(IP1_23_20, PWM0_A),
+ PINMUX_IPSR_MSEL(IP1_23_20, MSIOF2_SYNC_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_GPSR(IP1_27_24, D4),
+ PINMUX_IPSR_GPSR(IP1_27_24, IRQ3),
+ PINMUX_IPSR_MSEL(IP1_27_24, TCLK1_A, SEL_TMU1_0),
+ PINMUX_IPSR_GPSR(IP1_27_24, PWM6_C),
+ PINMUX_IPSR_GPSR(IP1_31_28, D5),
+ PINMUX_IPSR_GPSR(IP1_31_28, HRX2),
+ PINMUX_IPSR_MSEL(IP1_31_28, SCL1_B, SEL_I2C01_1),
+ PINMUX_IPSR_GPSR(IP1_31_28, PWM2_C),
+ PINMUX_IPSR_MSEL(IP1_31_28, TCLK2_B, SEL_TMU2_1),
+
+ /* IPSR2 */
+ PINMUX_IPSR_GPSR(IP2_3_0, D6),
+ PINMUX_IPSR_GPSR(IP2_3_0, HTX2),
+ PINMUX_IPSR_MSEL(IP2_3_0, SDA1_B, SEL_I2C01_1),
+ PINMUX_IPSR_GPSR(IP2_3_0, PWM4_C),
+ PINMUX_IPSR_GPSR(IP2_7_4, D7),
+ PINMUX_IPSR_GPSR(IP2_7_4, HSCK2),
+ PINMUX_IPSR_MSEL(IP2_7_4, SCIF1_SCK_C, SEL_SCIF1_2),
+ PINMUX_IPSR_GPSR(IP2_7_4, IRQ6),
+ PINMUX_IPSR_GPSR(IP2_7_4, PWM5_C),
+ PINMUX_IPSR_GPSR(IP2_11_8, D8),
+ PINMUX_IPSR_GPSR(IP2_11_8, HCTS2_N),
+ PINMUX_IPSR_MSEL(IP2_11_8, RX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MSEL(IP2_11_8, SCL1_D, SEL_I2C01_3),
+ PINMUX_IPSR_GPSR(IP2_11_8, PWM3_C),
+ PINMUX_IPSR_GPSR(IP2_15_12, D9),
+ PINMUX_IPSR_GPSR(IP2_15_12, HRTS2_N),
+ PINMUX_IPSR_MSEL(IP2_15_12, TX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MSEL(IP2_15_12, SDA1_D, SEL_I2C01_3),
+ PINMUX_IPSR_GPSR(IP2_19_16, D10),
+ PINMUX_IPSR_MSEL(IP2_19_16, MSIOF2_RXD_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP2_19_16, HRX0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_GPSR(IP2_23_20, D11),
+ PINMUX_IPSR_MSEL(IP2_23_20, MSIOF2_TXD_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP2_23_20, HTX0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_GPSR(IP2_27_24, D12),
+ PINMUX_IPSR_MSEL(IP2_27_24, MSIOF2_SCK_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_GPSR(IP2_27_24, HSCK0),
+ PINMUX_IPSR_MSEL(IP2_27_24, CAN_CLK_C, SEL_CANCLK_2),
+ PINMUX_IPSR_GPSR(IP2_31_28, D13),
+ PINMUX_IPSR_MSEL(IP2_31_28, MSIOF2_SYNC_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP2_31_28, RX4_C, SEL_SCIF4_2),
+
+ /* IPSR3 */
+ PINMUX_IPSR_GPSR(IP3_3_0, D14),
+ PINMUX_IPSR_GPSR(IP3_3_0, MSIOF2_SS1),
+ PINMUX_IPSR_MSEL(IP3_3_0, TX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MSEL(IP3_3_0, CAN1_RX_B, SEL_CAN1_1),
+ PINMUX_IPSR_MSEL(IP3_3_0, AVB_AVTP_CAPTURE_A, SEL_AVB_0),
+ PINMUX_IPSR_GPSR(IP3_7_4, D15),
+ PINMUX_IPSR_GPSR(IP3_7_4, MSIOF2_SS2),
+ PINMUX_IPSR_GPSR(IP3_7_4, PWM4_A),
+ PINMUX_IPSR_MSEL(IP3_7_4, CAN1_TX_B, SEL_CAN1_1),
+ PINMUX_IPSR_GPSR(IP3_7_4, IRQ2),
+ PINMUX_IPSR_MSEL(IP3_7_4, AVB_AVTP_MATCH_A, SEL_AVB_0),
+ PINMUX_IPSR_GPSR(IP3_11_8, QSPI0_SPCLK),
+ PINMUX_IPSR_GPSR(IP3_11_8, WE0_N),
+ PINMUX_IPSR_GPSR(IP3_15_12, QSPI0_MOSI_QSPI0_IO0),
+ PINMUX_IPSR_GPSR(IP3_15_12, BS_N),
+ PINMUX_IPSR_GPSR(IP3_19_16, QSPI0_MISO_QSPI0_IO1),
+ PINMUX_IPSR_GPSR(IP3_19_16, RD_WR_N),
+ PINMUX_IPSR_GPSR(IP3_23_20, QSPI0_IO2),
+ PINMUX_IPSR_GPSR(IP3_23_20, CS0_N),
+ PINMUX_IPSR_GPSR(IP3_27_24, QSPI0_IO3),
+ PINMUX_IPSR_GPSR(IP3_27_24, RD_N),
+ PINMUX_IPSR_GPSR(IP3_31_28, QSPI0_SSL),
+ PINMUX_IPSR_GPSR(IP3_31_28, WE1_N),
+
+ /* IPSR4 */
+ PINMUX_IPSR_GPSR(IP4_3_0, EX_WAIT0),
+ PINMUX_IPSR_MSEL(IP4_3_0, CAN_CLK_B, SEL_CANCLK_1),
+ PINMUX_IPSR_MSEL(IP4_3_0, SCIF_CLK_A, SEL_SCIFCLK_0),
+ PINMUX_IPSR_GPSR(IP4_7_4, DU0_DR0),
+ PINMUX_IPSR_MSEL(IP4_7_4, RX5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_MSEL(IP4_7_4, SCL2_D, SEL_I2C02_3),
+ PINMUX_IPSR_GPSR(IP4_7_4, A0),
+ PINMUX_IPSR_GPSR(IP4_11_8, DU0_DR1),
+ PINMUX_IPSR_MSEL(IP4_11_8, TX5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_MSEL(IP4_11_8, SDA2_D, SEL_I2C02_3),
+ PINMUX_IPSR_GPSR(IP4_11_8, A1),
+ PINMUX_IPSR_GPSR(IP4_15_12, DU0_DR2),
+ PINMUX_IPSR_MSEL(IP4_15_12, RX0_D, SEL_SCIF0_3),
+ PINMUX_IPSR_MSEL(IP4_15_12, SCL0_E, SEL_I2C00_4),
+ PINMUX_IPSR_GPSR(IP4_15_12, A2),
+ PINMUX_IPSR_GPSR(IP4_19_16, DU0_DR3),
+ PINMUX_IPSR_MSEL(IP4_19_16, TX0_D, SEL_SCIF0_3),
+ PINMUX_IPSR_MSEL(IP4_19_16, SDA0_E, SEL_I2C00_4),
+ PINMUX_IPSR_GPSR(IP4_19_16, PWM0_B),
+ PINMUX_IPSR_GPSR(IP4_19_16, A3),
+ PINMUX_IPSR_GPSR(IP4_23_20, DU0_DR4),
+ PINMUX_IPSR_MSEL(IP4_23_20, RX1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_GPSR(IP4_23_20, A4),
+ PINMUX_IPSR_GPSR(IP4_27_24, DU0_DR5),
+ PINMUX_IPSR_MSEL(IP4_27_24, TX1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_GPSR(IP4_27_24, PWM1_B),
+ PINMUX_IPSR_GPSR(IP4_27_24, A5),
+ PINMUX_IPSR_GPSR(IP4_31_28, DU0_DR6),
+ PINMUX_IPSR_MSEL(IP4_31_28, RX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_GPSR(IP4_31_28, A6),
+
+ /* IPSR5 */
+ PINMUX_IPSR_GPSR(IP5_3_0, DU0_DR7),
+ PINMUX_IPSR_MSEL(IP5_3_0, TX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_GPSR(IP5_3_0, PWM2_B),
+ PINMUX_IPSR_GPSR(IP5_3_0, A7),
+ PINMUX_IPSR_GPSR(IP5_7_4, DU0_DG0),
+ PINMUX_IPSR_MSEL(IP5_7_4, RX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MSEL(IP5_7_4, SCL3_D, SEL_I2C03_3),
+ PINMUX_IPSR_GPSR(IP5_7_4, A8),
+ PINMUX_IPSR_GPSR(IP5_11_8, DU0_DG1),
+ PINMUX_IPSR_MSEL(IP5_11_8, TX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MSEL(IP5_11_8, SDA3_D, SEL_I2C03_3),
+ PINMUX_IPSR_GPSR(IP5_11_8, PWM3_B),
+ PINMUX_IPSR_GPSR(IP5_11_8, A9),
+ PINMUX_IPSR_GPSR(IP5_15_12, DU0_DG2),
+ PINMUX_IPSR_MSEL(IP5_15_12, RX4_D, SEL_SCIF4_3),
+ PINMUX_IPSR_GPSR(IP5_15_12, A10),
+ PINMUX_IPSR_GPSR(IP5_19_16, DU0_DG3),
+ PINMUX_IPSR_MSEL(IP5_19_16, TX4_D, SEL_SCIF4_3),
+ PINMUX_IPSR_GPSR(IP5_19_16, PWM4_B),
+ PINMUX_IPSR_GPSR(IP5_19_16, A11),
+ PINMUX_IPSR_GPSR(IP5_23_20, DU0_DG4),
+ PINMUX_IPSR_MSEL(IP5_23_20, HRX0_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_GPSR(IP5_23_20, A12),
+ PINMUX_IPSR_GPSR(IP5_27_24, DU0_DG5),
+ PINMUX_IPSR_MSEL(IP5_27_24, HTX0_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_GPSR(IP5_27_24, PWM5_B),
+ PINMUX_IPSR_GPSR(IP5_27_24, A13),
+ PINMUX_IPSR_GPSR(IP5_31_28, DU0_DG6),
+ PINMUX_IPSR_MSEL(IP5_31_28, HRX1_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_GPSR(IP5_31_28, A14),
+
+ /* IPSR6 */
+ PINMUX_IPSR_GPSR(IP6_3_0, DU0_DG7),
+ PINMUX_IPSR_MSEL(IP6_3_0, HTX1_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_GPSR(IP6_3_0, PWM6_B),
+ PINMUX_IPSR_GPSR(IP6_3_0, A15),
+ PINMUX_IPSR_GPSR(IP6_7_4, DU0_DB0),
+ PINMUX_IPSR_MSEL(IP6_7_4, SCL4_D, SEL_I2C04_3),
+ PINMUX_IPSR_MSEL(IP6_7_4, CAN0_RX_C, SEL_CAN0_2),
+ PINMUX_IPSR_GPSR(IP6_7_4, A16),
+ PINMUX_IPSR_GPSR(IP6_11_8, DU0_DB1),
+ PINMUX_IPSR_MSEL(IP6_11_8, SDA4_D, SEL_I2C04_3),
+ PINMUX_IPSR_MSEL(IP6_11_8, CAN0_TX_C, SEL_CAN0_2),
+ PINMUX_IPSR_GPSR(IP6_11_8, A17),
+ PINMUX_IPSR_GPSR(IP6_15_12, DU0_DB2),
+ PINMUX_IPSR_GPSR(IP6_15_12, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP6_15_12, A18),
+ PINMUX_IPSR_GPSR(IP6_19_16, DU0_DB3),
+ PINMUX_IPSR_GPSR(IP6_19_16, HRTS0_N),
+ PINMUX_IPSR_GPSR(IP6_19_16, A19),
+ PINMUX_IPSR_GPSR(IP6_23_20, DU0_DB4),
+ PINMUX_IPSR_MSEL(IP6_23_20, HCTS1_N_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_GPSR(IP6_23_20, A20),
+ PINMUX_IPSR_GPSR(IP6_27_24, DU0_DB5),
+ PINMUX_IPSR_MSEL(IP6_27_24, HRTS1_N_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_GPSR(IP6_27_24, A21),
+ PINMUX_IPSR_GPSR(IP6_31_28, DU0_DB6),
+ PINMUX_IPSR_GPSR(IP6_31_28, A22),
+
+ /* IPSR7 */
+ PINMUX_IPSR_GPSR(IP7_3_0, DU0_DB7),
+ PINMUX_IPSR_GPSR(IP7_3_0, A23),
+ PINMUX_IPSR_GPSR(IP7_7_4, DU0_DOTCLKIN),
+ PINMUX_IPSR_GPSR(IP7_7_4, A24),
+ PINMUX_IPSR_GPSR(IP7_11_8, DU0_DOTCLKOUT0),
+ PINMUX_IPSR_GPSR(IP7_11_8, A25),
+ PINMUX_IPSR_GPSR(IP7_15_12, DU0_DOTCLKOUT1),
+ PINMUX_IPSR_MSEL(IP7_15_12, MSIOF2_RXD_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP7_15_12, CS1_N_A26),
+ PINMUX_IPSR_GPSR(IP7_19_16, DU0_EXHSYNC_DU0_HSYNC),
+ PINMUX_IPSR_MSEL(IP7_19_16, MSIOF2_TXD_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP7_19_16, DREQ0_N),
+ PINMUX_IPSR_GPSR(IP7_23_20, DU0_EXVSYNC_DU0_VSYNC),
+ PINMUX_IPSR_MSEL(IP7_23_20, MSIOF2_SYNC_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP7_23_20, DACK0),
+ PINMUX_IPSR_GPSR(IP7_27_24, DU0_EXODDF_DU0_ODDF_DISP_CDE),
+ PINMUX_IPSR_MSEL(IP7_27_24, MSIOF2_SCK_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP7_27_24, DRACK0),
+ PINMUX_IPSR_GPSR(IP7_31_28, DU0_DISP),
+ PINMUX_IPSR_MSEL(IP7_31_28, CAN1_RX_C, SEL_CAN1_2),
+
+ /* IPSR8 */
+ PINMUX_IPSR_GPSR(IP8_3_0, DU0_CDE),
+ PINMUX_IPSR_MSEL(IP8_3_0, CAN1_TX_C, SEL_CAN1_2),
+ PINMUX_IPSR_GPSR(IP8_7_4, VI1_CLK),
+ PINMUX_IPSR_GPSR(IP8_7_4, AVB_RX_CLK),
+ PINMUX_IPSR_GPSR(IP8_7_4, ETH_REF_CLK),
+ PINMUX_IPSR_GPSR(IP8_11_8, VI1_DATA0),
+ PINMUX_IPSR_GPSR(IP8_11_8, AVB_RX_DV),
+ PINMUX_IPSR_GPSR(IP8_11_8, ETH_CRS_DV),
+ PINMUX_IPSR_GPSR(IP8_15_12, VI1_DATA1),
+ PINMUX_IPSR_GPSR(IP8_15_12, AVB_RXD0),
+ PINMUX_IPSR_GPSR(IP8_15_12, ETH_RXD0),
+ PINMUX_IPSR_GPSR(IP8_19_16, VI1_DATA2),
+ PINMUX_IPSR_GPSR(IP8_19_16, AVB_RXD1),
+ PINMUX_IPSR_GPSR(IP8_19_16, ETH_RXD1),
+ PINMUX_IPSR_GPSR(IP8_23_20, VI1_DATA3),
+ PINMUX_IPSR_GPSR(IP8_23_20, AVB_RXD2),
+ PINMUX_IPSR_GPSR(IP8_23_20, ETH_MDIO),
+ PINMUX_IPSR_GPSR(IP8_27_24, VI1_DATA4),
+ PINMUX_IPSR_GPSR(IP8_27_24, AVB_RXD3),
+ PINMUX_IPSR_GPSR(IP8_27_24, ETH_RX_ER),
+ PINMUX_IPSR_GPSR(IP8_31_28, VI1_DATA5),
+ PINMUX_IPSR_GPSR(IP8_31_28, AVB_RXD4),
+ PINMUX_IPSR_GPSR(IP8_31_28, ETH_LINK),
+
+ /* IPSR9 */
+ PINMUX_IPSR_GPSR(IP9_3_0, VI1_DATA6),
+ PINMUX_IPSR_GPSR(IP9_3_0, AVB_RXD5),
+ PINMUX_IPSR_GPSR(IP9_3_0, ETH_TXD1),
+ PINMUX_IPSR_GPSR(IP9_7_4, VI1_DATA7),
+ PINMUX_IPSR_GPSR(IP9_7_4, AVB_RXD6),
+ PINMUX_IPSR_GPSR(IP9_7_4, ETH_TX_EN),
+ PINMUX_IPSR_GPSR(IP9_11_8, VI1_CLKENB),
+ PINMUX_IPSR_MSEL(IP9_11_8, SCL3_A, SEL_I2C03_0),
+ PINMUX_IPSR_GPSR(IP9_11_8, AVB_RXD7),
+ PINMUX_IPSR_GPSR(IP9_11_8, ETH_MAGIC),
+ PINMUX_IPSR_GPSR(IP9_15_12, VI1_FIELD),
+ PINMUX_IPSR_MSEL(IP9_15_12, SDA3_A, SEL_I2C03_0),
+ PINMUX_IPSR_GPSR(IP9_15_12, AVB_RX_ER),
+ PINMUX_IPSR_GPSR(IP9_15_12, ETH_TXD0),
+ PINMUX_IPSR_GPSR(IP9_19_16, VI1_HSYNC_N),
+ PINMUX_IPSR_MSEL(IP9_19_16, RX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MSEL(IP9_19_16, SCL0_C, SEL_I2C00_2),
+ PINMUX_IPSR_GPSR(IP9_19_16, AVB_GTXREFCLK),
+ PINMUX_IPSR_GPSR(IP9_19_16, ETH_MDC),
+ PINMUX_IPSR_GPSR(IP9_23_20, VI1_VSYNC_N),
+ PINMUX_IPSR_MSEL(IP9_23_20, TX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MSEL(IP9_23_20, SDA0_C, SEL_I2C00_2),
+ PINMUX_IPSR_GPSR(IP9_23_20, AUDIO_CLKOUT_B),
+ PINMUX_IPSR_GPSR(IP9_23_20, AVB_TX_CLK),
+ PINMUX_IPSR_GPSR(IP9_27_24, VI1_DATA8),
+ PINMUX_IPSR_MSEL(IP9_27_24, SCL2_B, SEL_I2C02_1),
+ PINMUX_IPSR_GPSR(IP9_27_24, AVB_TX_EN),
+ PINMUX_IPSR_GPSR(IP9_31_28, VI1_DATA9),
+ PINMUX_IPSR_MSEL(IP9_31_28, SDA2_B, SEL_I2C02_1),
+ PINMUX_IPSR_GPSR(IP9_31_28, AVB_TXD0),
+
+ /* IPSR10 */
+ PINMUX_IPSR_GPSR(IP10_3_0, VI1_DATA10),
+ PINMUX_IPSR_MSEL(IP10_3_0, CAN0_RX_B, SEL_CAN0_1),
+ PINMUX_IPSR_GPSR(IP10_3_0, AVB_TXD1),
+ PINMUX_IPSR_GPSR(IP10_7_4, VI1_DATA11),
+ PINMUX_IPSR_MSEL(IP10_7_4, CAN0_TX_B, SEL_CAN0_1),
+ PINMUX_IPSR_GPSR(IP10_7_4, AVB_TXD2),
+ PINMUX_IPSR_GPSR(IP10_11_8, AVB_TXD3),
+ PINMUX_IPSR_MSEL(IP10_11_8, AUDIO_CLKA_B, SEL_ADGA_1),
+ PINMUX_IPSR_MSEL(IP10_11_8, SSI_SCK1_D, SEL_SSI1_3),
+ PINMUX_IPSR_MSEL(IP10_11_8, RX5_F, SEL_SCIF5_5),
+ PINMUX_IPSR_MSEL(IP10_11_8, MSIOF0_RXD_B, SEL_MSIOF0_1),
+ PINMUX_IPSR_GPSR(IP10_15_12, AVB_TXD4),
+ PINMUX_IPSR_MSEL(IP10_15_12, AUDIO_CLKB_B, SEL_ADGB_1),
+ PINMUX_IPSR_MSEL(IP10_15_12, SSI_WS1_D, SEL_SSI1_3),
+ PINMUX_IPSR_MSEL(IP10_15_12, TX5_F, SEL_SCIF5_5),
+ PINMUX_IPSR_MSEL(IP10_15_12, MSIOF0_TXD_B, SEL_MSIOF0_1),
+ PINMUX_IPSR_GPSR(IP10_19_16, AVB_TXD5),
+ PINMUX_IPSR_MSEL(IP10_19_16, SCIF_CLK_B, SEL_SCIFCLK_1),
+ PINMUX_IPSR_MSEL(IP10_19_16, AUDIO_CLKC_B, SEL_ADGC_1),
+ PINMUX_IPSR_MSEL(IP10_19_16, SSI_SDATA1_D, SEL_SSI1_3),
+ PINMUX_IPSR_MSEL(IP10_19_16, MSIOF0_SCK_B, SEL_MSIOF0_1),
+ PINMUX_IPSR_MSEL(IP10_23_20, SCL0_A, SEL_I2C00_0),
+ PINMUX_IPSR_MSEL(IP10_23_20, RX0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_GPSR(IP10_23_20, PWM5_A),
+ PINMUX_IPSR_MSEL(IP10_23_20, TCLK1_B, SEL_TMU1_1),
+ PINMUX_IPSR_GPSR(IP10_23_20, AVB_TXD6),
+ PINMUX_IPSR_MSEL(IP10_23_20, CAN1_RX_D, SEL_CAN1_3),
+ PINMUX_IPSR_MSEL(IP10_23_20, MSIOF0_SYNC_B, SEL_MSIOF0_1),
+ PINMUX_IPSR_MSEL(IP10_27_24, SDA0_A, SEL_I2C00_0),
+ PINMUX_IPSR_MSEL(IP10_27_24, TX0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_GPSR(IP10_27_24, IRQ5),
+ PINMUX_IPSR_MSEL(IP10_27_24, CAN_CLK_A, SEL_CANCLK_0),
+ PINMUX_IPSR_GPSR(IP10_27_24, AVB_GTX_CLK),
+ PINMUX_IPSR_MSEL(IP10_27_24, CAN1_TX_D, SEL_CAN1_3),
+ PINMUX_IPSR_GPSR(IP10_27_24, DVC_MUTE),
+ PINMUX_IPSR_MSEL(IP10_31_28, SCL1_A, SEL_I2C01_0),
+ PINMUX_IPSR_MSEL(IP10_31_28, RX4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_GPSR(IP10_31_28, PWM5_D),
+ PINMUX_IPSR_GPSR(IP10_31_28, DU1_DR0),
+ PINMUX_IPSR_MSEL(IP10_31_28, SSI_SCK6_B, SEL_SSI6_1),
+ PINMUX_IPSR_GPSR(IP10_31_28, VI0_G0),
+
+ /* IPSR11 */
+ PINMUX_IPSR_MSEL(IP11_3_0, SDA1_A, SEL_I2C01_0),
+ PINMUX_IPSR_MSEL(IP11_3_0, TX4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_GPSR(IP11_3_0, DU1_DR1),
+ PINMUX_IPSR_MSEL(IP11_3_0, SSI_WS6_B, SEL_SSI6_1),
+ PINMUX_IPSR_GPSR(IP11_3_0, VI0_G1),
+ PINMUX_IPSR_MSEL(IP11_7_4, MSIOF0_RXD_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_MSEL(IP11_7_4, RX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_MSEL(IP11_7_4, SCL2_C, SEL_I2C02_2),
+ PINMUX_IPSR_GPSR(IP11_7_4, DU1_DR2),
+ PINMUX_IPSR_GPSR(IP11_7_4, QSPI1_MOSI_QSPI1_IO0),
+ PINMUX_IPSR_MSEL(IP11_7_4, SSI_SDATA6_B, SEL_SSI6_1),
+ PINMUX_IPSR_GPSR(IP11_7_4, VI0_G2),
+ PINMUX_IPSR_MSEL(IP11_11_8, MSIOF0_TXD_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_MSEL(IP11_11_8, TX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_MSEL(IP11_11_8, SDA2_C, SEL_I2C02_2),
+ PINMUX_IPSR_GPSR(IP11_11_8, DU1_DR3),
+ PINMUX_IPSR_GPSR(IP11_11_8, QSPI1_MISO_QSPI1_IO1),
+ PINMUX_IPSR_MSEL(IP11_11_8, SSI_WS78_B, SEL_SSI7_1),
+ PINMUX_IPSR_GPSR(IP11_11_8, VI0_G3),
+ PINMUX_IPSR_MSEL(IP11_15_12, MSIOF0_SCK_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_GPSR(IP11_15_12, IRQ0),
+ PINMUX_IPSR_GPSR(IP11_15_12, DU1_DR4),
+ PINMUX_IPSR_GPSR(IP11_15_12, QSPI1_SPCLK),
+ PINMUX_IPSR_MSEL(IP11_15_12, SSI_SCK78_B, SEL_SSI7_1),
+ PINMUX_IPSR_GPSR(IP11_15_12, VI0_G4),
+ PINMUX_IPSR_MSEL(IP11_19_16, MSIOF0_SYNC_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_GPSR(IP11_19_16, PWM1_A),
+ PINMUX_IPSR_GPSR(IP11_19_16, DU1_DR5),
+ PINMUX_IPSR_GPSR(IP11_19_16, QSPI1_IO2),
+ PINMUX_IPSR_MSEL(IP11_19_16, SSI_SDATA7_B, SEL_SSI7_1),
+ PINMUX_IPSR_MSEL(IP11_23_20, MSIOF0_SS1_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_GPSR(IP11_23_20, DU1_DR6),
+ PINMUX_IPSR_GPSR(IP11_23_20, QSPI1_IO3),
+ PINMUX_IPSR_MSEL(IP11_23_20, SSI_SDATA8_B, SEL_SSI8_1),
+ PINMUX_IPSR_MSEL(IP11_27_24, MSIOF0_SS2_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_GPSR(IP11_27_24, DU1_DR7),
+ PINMUX_IPSR_GPSR(IP11_27_24, QSPI1_SSL),
+ PINMUX_IPSR_MSEL(IP11_31_28, HRX1_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_MSEL(IP11_31_28, SCL4_A, SEL_I2C04_0),
+ PINMUX_IPSR_GPSR(IP11_31_28, PWM6_A),
+ PINMUX_IPSR_GPSR(IP11_31_28, DU1_DG0),
+ PINMUX_IPSR_MSEL(IP11_31_28, RX0_A, SEL_SCIF0_0),
+
+ /* IPSR12 */
+ PINMUX_IPSR_MSEL(IP12_3_0, HTX1_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_3_0, SDA4_A, SEL_I2C04_0),
+ PINMUX_IPSR_GPSR(IP12_3_0, DU1_DG1),
+ PINMUX_IPSR_MSEL(IP12_3_0, TX0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP12_7_4, HCTS1_N_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_GPSR(IP12_7_4, PWM2_A),
+ PINMUX_IPSR_GPSR(IP12_7_4, DU1_DG2),
+ PINMUX_IPSR_MSEL(IP12_7_4, REMOCON_B, SEL_RCN_1),
+ PINMUX_IPSR_MSEL(IP12_11_8, HRTS1_N_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_GPSR(IP12_11_8, DU1_DG3),
+ PINMUX_IPSR_MSEL(IP12_11_8, SSI_WS1_B, SEL_SSI1_1),
+ PINMUX_IPSR_GPSR(IP12_11_8, IRQ1),
+ PINMUX_IPSR_GPSR(IP12_15_12, SD2_CLK),
+ PINMUX_IPSR_GPSR(IP12_15_12, HSCK1),
+ PINMUX_IPSR_GPSR(IP12_15_12, DU1_DG4),
+ PINMUX_IPSR_MSEL(IP12_15_12, SSI_SCK1_B, SEL_SSI1_1),
+ PINMUX_IPSR_GPSR(IP12_19_16, SD2_CMD),
+ PINMUX_IPSR_MSEL(IP12_19_16, SCIF1_SCK_A, SEL_SCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_19_16, TCLK2_A, SEL_TMU2_0),
+ PINMUX_IPSR_GPSR(IP12_19_16, DU1_DG5),
+ PINMUX_IPSR_MSEL(IP12_19_16, SSI_SCK2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP12_19_16, PWM3_A),
+ PINMUX_IPSR_GPSR(IP12_23_20, SD2_DAT0),
+ PINMUX_IPSR_MSEL(IP12_23_20, RX1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_23_20, SCL1_E, SEL_I2C01_4),
+ PINMUX_IPSR_GPSR(IP12_23_20, DU1_DG6),
+ PINMUX_IPSR_MSEL(IP12_23_20, SSI_SDATA1_B, SEL_SSI1_1),
+ PINMUX_IPSR_GPSR(IP12_27_24, SD2_DAT1),
+ PINMUX_IPSR_MSEL(IP12_27_24, TX1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_27_24, SDA1_E, SEL_I2C01_4),
+ PINMUX_IPSR_GPSR(IP12_27_24, DU1_DG7),
+ PINMUX_IPSR_MSEL(IP12_27_24, SSI_WS2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP12_31_28, SD2_DAT2),
+ PINMUX_IPSR_MSEL(IP12_31_28, RX2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_GPSR(IP12_31_28, DU1_DB0),
+ PINMUX_IPSR_MSEL(IP12_31_28, SSI_SDATA2_B, SEL_SSI2_1),
+
+ /* IPSR13 */
+ PINMUX_IPSR_GPSR(IP13_3_0, SD2_DAT3),
+ PINMUX_IPSR_MSEL(IP13_3_0, TX2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_GPSR(IP13_3_0, DU1_DB1),
+ PINMUX_IPSR_MSEL(IP13_3_0, SSI_WS9_B, SEL_SSI9_1),
+ PINMUX_IPSR_GPSR(IP13_7_4, SD2_CD),
+ PINMUX_IPSR_MSEL(IP13_7_4, SCIF2_SCK_A, SEL_SCIF2_CLK_0),
+ PINMUX_IPSR_GPSR(IP13_7_4, DU1_DB2),
+ PINMUX_IPSR_MSEL(IP13_7_4, SSI_SCK9_B, SEL_SSI9_1),
+ PINMUX_IPSR_GPSR(IP13_11_8, SD2_WP),
+ PINMUX_IPSR_GPSR(IP13_11_8, SCIF3_SCK),
+ PINMUX_IPSR_GPSR(IP13_11_8, DU1_DB3),
+ PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA9_B, SEL_SSI9_1),
+ PINMUX_IPSR_MSEL(IP13_15_12, RX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_MSEL(IP13_15_12, SCL1_C, SEL_I2C01_2),
+ PINMUX_IPSR_MSEL(IP13_15_12, MSIOF1_RXD_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_GPSR(IP13_15_12, DU1_DB4),
+ PINMUX_IPSR_MSEL(IP13_15_12, AUDIO_CLKA_C, SEL_ADGA_2),
+ PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA4_B, SEL_SSI4_1),
+ PINMUX_IPSR_MSEL(IP13_19_16, TX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_MSEL(IP13_19_16, SDA1_C, SEL_I2C01_2),
+ PINMUX_IPSR_MSEL(IP13_19_16, MSIOF1_TXD_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_GPSR(IP13_19_16, DU1_DB5),
+ PINMUX_IPSR_MSEL(IP13_19_16, AUDIO_CLKB_C, SEL_ADGB_2),
+ PINMUX_IPSR_MSEL(IP13_19_16, SSI_WS4_B, SEL_SSI4_1),
+ PINMUX_IPSR_MSEL(IP13_23_20, SCL2_A, SEL_I2C02_0),
+ PINMUX_IPSR_MSEL(IP13_23_20, MSIOF1_SCK_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_GPSR(IP13_23_20, DU1_DB6),
+ PINMUX_IPSR_MSEL(IP13_23_20, AUDIO_CLKC_C, SEL_ADGC_2),
+ PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK4_B, SEL_SSI4_1),
+ PINMUX_IPSR_MSEL(IP13_27_24, SDA2_A, SEL_I2C02_0),
+ PINMUX_IPSR_MSEL(IP13_27_24, MSIOF1_SYNC_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_GPSR(IP13_27_24, DU1_DB7),
+ PINMUX_IPSR_GPSR(IP13_27_24, AUDIO_CLKOUT_C),
+ PINMUX_IPSR_MSEL(IP13_31_28, SSI_SCK5_A, SEL_SSI5_0),
+ PINMUX_IPSR_GPSR(IP13_31_28, DU1_DOTCLKOUT1),
+
+ /* IPSR14 */
+ PINMUX_IPSR_MSEL(IP14_3_0, SSI_WS5_A, SEL_SSI5_0),
+ PINMUX_IPSR_MSEL(IP14_3_0, SCL3_C, SEL_I2C03_2),
+ PINMUX_IPSR_GPSR(IP14_3_0, DU1_DOTCLKIN),
+ PINMUX_IPSR_MSEL(IP14_7_4, SSI_SDATA5_A, SEL_SSI5_0),
+ PINMUX_IPSR_MSEL(IP14_7_4, SDA3_C, SEL_I2C03_2),
+ PINMUX_IPSR_GPSR(IP14_7_4, DU1_DOTCLKOUT0),
+ PINMUX_IPSR_MSEL(IP14_11_8, SSI_SCK6_A, SEL_SSI6_0),
+ PINMUX_IPSR_GPSR(IP14_11_8, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ PINMUX_IPSR_MSEL(IP14_15_12, SSI_WS6_A, SEL_SSI6_0),
+ PINMUX_IPSR_MSEL(IP14_15_12, SCL4_C, SEL_I2C04_2),
+ PINMUX_IPSR_GPSR(IP14_15_12, DU1_EXHSYNC_DU1_HSYNC),
+ PINMUX_IPSR_MSEL(IP14_19_16, SSI_SDATA6_A, SEL_SSI6_0),
+ PINMUX_IPSR_MSEL(IP14_19_16, SDA4_C, SEL_I2C04_2),
+ PINMUX_IPSR_GPSR(IP14_19_16, DU1_EXVSYNC_DU1_VSYNC),
+ PINMUX_IPSR_MSEL(IP14_23_20, SSI_SCK78_A, SEL_SSI7_0),
+ PINMUX_IPSR_MSEL(IP14_23_20, SDA4_E, SEL_I2C04_4),
+ PINMUX_IPSR_GPSR(IP14_23_20, DU1_DISP),
+ PINMUX_IPSR_MSEL(IP14_27_24, SSI_WS78_A, SEL_SSI7_0),
+ PINMUX_IPSR_MSEL(IP14_27_24, SCL4_E, SEL_I2C04_4),
+ PINMUX_IPSR_GPSR(IP14_27_24, DU1_CDE),
+ PINMUX_IPSR_MSEL(IP14_31_28, SSI_SDATA7_A, SEL_SSI7_0),
+ PINMUX_IPSR_GPSR(IP14_31_28, IRQ8),
+ PINMUX_IPSR_MSEL(IP14_31_28, AUDIO_CLKA_D, SEL_ADGA_3),
+ PINMUX_IPSR_MSEL(IP14_31_28, CAN_CLK_D, SEL_CANCLK_3),
+ PINMUX_IPSR_GPSR(IP14_31_28, VI0_G5),
+
+ /* IPSR15 */
+ PINMUX_IPSR_MSEL(IP15_3_0, SSI_SCK0129_A, SEL_SSI0_0),
+ PINMUX_IPSR_MSEL(IP15_3_0, MSIOF1_RXD_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_MSEL(IP15_3_0, RX5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_GPSR(IP15_3_0, VI0_G6),
+ PINMUX_IPSR_MSEL(IP15_7_4, SSI_WS0129_A, SEL_SSI0_0),
+ PINMUX_IPSR_MSEL(IP15_7_4, MSIOF1_TXD_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_MSEL(IP15_7_4, TX5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_GPSR(IP15_7_4, VI0_G7),
+ PINMUX_IPSR_MSEL(IP15_11_8, SSI_SDATA0_A, SEL_SSI0_0),
+ PINMUX_IPSR_MSEL(IP15_11_8, MSIOF1_SYNC_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_GPSR(IP15_11_8, PWM0_C),
+ PINMUX_IPSR_GPSR(IP15_11_8, VI0_R0),
+ PINMUX_IPSR_GPSR(IP15_15_12, SSI_SCK34),
+ PINMUX_IPSR_MSEL(IP15_15_12, MSIOF1_SCK_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_GPSR(IP15_15_12, AVB_MDC),
+ PINMUX_IPSR_GPSR(IP15_15_12, DACK1),
+ PINMUX_IPSR_GPSR(IP15_15_12, VI0_R1),
+ PINMUX_IPSR_GPSR(IP15_19_16, SSI_WS34),
+ PINMUX_IPSR_MSEL(IP15_19_16, MSIOF1_SS1_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_GPSR(IP15_19_16, AVB_MDIO),
+ PINMUX_IPSR_MSEL(IP15_19_16, CAN1_RX_A, SEL_CAN1_0),
+ PINMUX_IPSR_GPSR(IP15_19_16, DREQ1_N),
+ PINMUX_IPSR_GPSR(IP15_19_16, VI0_R2),
+ PINMUX_IPSR_GPSR(IP15_23_20, SSI_SDATA3),
+ PINMUX_IPSR_MSEL(IP15_23_20, MSIOF1_SS2_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_GPSR(IP15_23_20, AVB_LINK),
+ PINMUX_IPSR_MSEL(IP15_23_20, CAN1_TX_A, SEL_CAN1_0),
+ PINMUX_IPSR_GPSR(IP15_23_20, DREQ2_N),
+ PINMUX_IPSR_GPSR(IP15_23_20, VI0_R3),
+ PINMUX_IPSR_MSEL(IP15_27_24, SSI_SCK4_A, SEL_SSI4_0),
+ PINMUX_IPSR_GPSR(IP15_27_24, AVB_MAGIC),
+ PINMUX_IPSR_GPSR(IP15_27_24, VI0_R4),
+ PINMUX_IPSR_MSEL(IP15_31_28, SSI_WS4_A, SEL_SSI4_0),
+ PINMUX_IPSR_GPSR(IP15_31_28, AVB_PHY_INT),
+ PINMUX_IPSR_GPSR(IP15_31_28, VI0_R5),
+
+ /* IPSR16 */
+ PINMUX_IPSR_MSEL(IP16_3_0, SSI_SDATA4_A, SEL_SSI4_0),
+ PINMUX_IPSR_GPSR(IP16_3_0, AVB_CRS),
+ PINMUX_IPSR_GPSR(IP16_3_0, VI0_R6),
+ PINMUX_IPSR_MSEL(IP16_7_4, SSI_SCK1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MSEL(IP16_7_4, SCIF1_SCK_B, SEL_SCIF1_1),
+ PINMUX_IPSR_GPSR(IP16_7_4, PWM1_D),
+ PINMUX_IPSR_GPSR(IP16_7_4, IRQ9),
+ PINMUX_IPSR_MSEL(IP16_7_4, REMOCON_A, SEL_RCN_0),
+ PINMUX_IPSR_GPSR(IP16_7_4, DACK2),
+ PINMUX_IPSR_GPSR(IP16_7_4, VI0_CLK),
+ PINMUX_IPSR_GPSR(IP16_7_4, AVB_COL),
+ PINMUX_IPSR_MSEL(IP16_11_8, SSI_SDATA8_A, SEL_SSI8_0),
+ PINMUX_IPSR_MSEL(IP16_11_8, RX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_MSEL(IP16_11_8, CAN0_RX_D, SEL_CAN0_3),
+ PINMUX_IPSR_MSEL(IP16_11_8, AVB_AVTP_CAPTURE_B, SEL_AVB_1),
+ PINMUX_IPSR_GPSR(IP16_11_8, VI0_R7),
+ PINMUX_IPSR_MSEL(IP16_15_12, SSI_WS1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MSEL(IP16_15_12, TX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_MSEL(IP16_15_12, CAN0_TX_D, SEL_CAN0_3),
+ PINMUX_IPSR_MSEL(IP16_15_12, AVB_AVTP_MATCH_B, SEL_AVB_1),
+ PINMUX_IPSR_GPSR(IP16_15_12, VI0_DATA0_VI0_B0),
+ PINMUX_IPSR_MSEL(IP16_19_16, SSI_SDATA1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MSEL(IP16_19_16, HRX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_GPSR(IP16_19_16, VI0_DATA1_VI0_B1),
+ PINMUX_IPSR_MSEL(IP16_23_20, SSI_SCK2_A, SEL_SSI2_0),
+ PINMUX_IPSR_MSEL(IP16_23_20, HTX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_GPSR(IP16_23_20, AVB_TXD7),
+ PINMUX_IPSR_GPSR(IP16_23_20, VI0_DATA2_VI0_B2),
+ PINMUX_IPSR_MSEL(IP16_27_24, SSI_WS2_A, SEL_SSI2_0),
+ PINMUX_IPSR_MSEL(IP16_27_24, HCTS1_N_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_GPSR(IP16_27_24, AVB_TX_ER),
+ PINMUX_IPSR_GPSR(IP16_27_24, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA2_A, SEL_SSI2_0),
+ PINMUX_IPSR_MSEL(IP16_31_28, HRTS1_N_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_GPSR(IP16_31_28, VI0_DATA4_VI0_B4),
+
+ /* IPSR17 */
+ PINMUX_IPSR_MSEL(IP17_3_0, SSI_SCK9_A, SEL_SSI9_0),
+ PINMUX_IPSR_MSEL(IP17_3_0, RX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MSEL(IP17_3_0, SCL3_E, SEL_I2C03_4),
+ PINMUX_IPSR_GPSR(IP17_3_0, EX_WAIT1),
+ PINMUX_IPSR_GPSR(IP17_3_0, VI0_DATA5_VI0_B5),
+ PINMUX_IPSR_MSEL(IP17_7_4, SSI_WS9_A, SEL_SSI9_0),
+ PINMUX_IPSR_MSEL(IP17_7_4, TX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MSEL(IP17_7_4, SDA3_E, SEL_I2C03_4),
+ PINMUX_IPSR_GPSR(IP17_7_4, VI0_DATA6_VI0_B6),
+ PINMUX_IPSR_MSEL(IP17_11_8, SSI_SDATA9_A, SEL_SSI9_0),
+ PINMUX_IPSR_GPSR(IP17_11_8, SCIF2_SCK_B),
+ PINMUX_IPSR_GPSR(IP17_11_8, PWM2_D),
+ PINMUX_IPSR_GPSR(IP17_11_8, VI0_DATA7_VI0_B7),
+ PINMUX_IPSR_MSEL(IP17_15_12, AUDIO_CLKA_A, SEL_ADGA_0),
+ PINMUX_IPSR_MSEL(IP17_15_12, SCL0_B, SEL_I2C00_1),
+ PINMUX_IPSR_GPSR(IP17_15_12, VI0_CLKENB),
+ PINMUX_IPSR_MSEL(IP17_19_16, AUDIO_CLKB_A, SEL_ADGB_0),
+ PINMUX_IPSR_MSEL(IP17_19_16, SDA0_B, SEL_I2C00_1),
+ PINMUX_IPSR_GPSR(IP17_19_16, VI0_FIELD),
+ PINMUX_IPSR_MSEL(IP17_23_20, AUDIO_CLKC_A, SEL_ADGC_0),
+ PINMUX_IPSR_MSEL(IP17_23_20, SCL4_B, SEL_I2C04_1),
+ PINMUX_IPSR_GPSR(IP17_23_20, VI0_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP17_27_24, AUDIO_CLKOUT_A),
+ PINMUX_IPSR_MSEL(IP17_27_24, SDA4_B, SEL_I2C04_1),
+ PINMUX_IPSR_GPSR(IP17_27_24, VI0_VSYNC_N),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+};
+
+/* - MMC -------------------------------------------------------------------- */
+static const unsigned int mmc_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int mmc_data1_mux[] = {
+ MMC0_D0_SDHI1_D0_MARK,
+};
+static const unsigned int mmc_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 16),
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 18),
+};
+static const unsigned int mmc_data4_mux[] = {
+ MMC0_D0_SDHI1_D0_MARK, MMC0_D1_SDHI1_D1_MARK,
+ MMC0_D2_SDHI1_D2_MARK, MMC0_D3_SDHI1_D3_MARK,
+};
+static const unsigned int mmc_data8_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 16),
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 18),
+ RCAR_GP_PIN(0, 19), RCAR_GP_PIN(0, 20),
+ RCAR_GP_PIN(0, 21), RCAR_GP_PIN(0, 22),
+};
+static const unsigned int mmc_data8_mux[] = {
+ MMC0_D0_SDHI1_D0_MARK, MMC0_D1_SDHI1_D1_MARK,
+ MMC0_D2_SDHI1_D2_MARK, MMC0_D3_SDHI1_D3_MARK,
+ MMC0_D4_MARK, MMC0_D5_MARK,
+ MMC0_D6_MARK, MMC0_D7_MARK,
+};
+static const unsigned int mmc_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(0, 13), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int mmc_ctrl_mux[] = {
+ MMC0_CLK_SDHI1_CLK_MARK, MMC0_CMD_SDHI1_CMD_MARK,
+};
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 11),
+};
+static const unsigned int scif0_data_a_mux[] = {
+ RX0_A_MARK, TX0_A_MARK,
+};
+static const unsigned int scif0_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 11), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int scif0_data_b_mux[] = {
+ RX0_B_MARK, TX0_B_MARK,
+};
+static const unsigned int scif0_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+};
+static const unsigned int scif0_data_c_mux[] = {
+ RX0_C_MARK, TX0_C_MARK,
+};
+static const unsigned int scif0_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+};
+static const unsigned int scif0_data_d_mux[] = {
+ RX0_D_MARK, TX0_D_MARK,
+};
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 16), RCAR_GP_PIN(4, 17),
+};
+static const unsigned int scif1_data_a_mux[] = {
+ RX1_A_MARK, TX1_A_MARK,
+};
+static const unsigned int scif1_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 15),
+};
+static const unsigned int scif1_clk_a_mux[] = {
+ SCIF1_SCK_A_MARK,
+};
+static const unsigned int scif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 20),
+};
+static const unsigned int scif1_data_b_mux[] = {
+ RX1_B_MARK, TX1_B_MARK,
+};
+static const unsigned int scif1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int scif1_clk_b_mux[] = {
+ SCIF1_SCK_B_MARK,
+};
+static const unsigned int scif1_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int scif1_data_c_mux[] = {
+ RX1_C_MARK, TX1_C_MARK,
+};
+static const unsigned int scif1_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int scif1_clk_c_mux[] = {
+ SCIF1_SCK_C_MARK,
+};
+static const unsigned int scif1_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
+};
+static const unsigned int scif1_data_d_mux[] = {
+ RX1_D_MARK, TX1_D_MARK,
+};
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 18), RCAR_GP_PIN(4, 19),
+};
+static const unsigned int scif2_data_a_mux[] = {
+ RX2_A_MARK, TX2_A_MARK,
+};
+static const unsigned int scif2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 20),
+};
+static const unsigned int scif2_clk_a_mux[] = {
+ SCIF2_SCK_A_MARK,
+};
+static const unsigned int scif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 25), RCAR_GP_PIN(5, 26),
+};
+static const unsigned int scif2_data_b_mux[] = {
+ RX2_B_MARK, TX2_B_MARK,
+};
+static const unsigned int scif2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 27),
+};
+static const unsigned int scif2_clk_b_mux[] = {
+ SCIF2_SCK_B_MARK,
+};
+static const unsigned int scif2_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
+};
+static const unsigned int scif2_data_c_mux[] = {
+ RX2_C_MARK, TX2_C_MARK,
+};
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 22), RCAR_GP_PIN(4, 23),
+};
+static const unsigned int scif3_data_a_mux[] = {
+ RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 21),
+};
+static const unsigned int scif3_clk_mux[] = {
+ SCIF3_SCK_MARK,
+};
+static const unsigned int scif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9),
+};
+static const unsigned int scif3_data_b_mux[] = {
+ RX3_B_MARK, TX3_B_MARK,
+};
+static const unsigned int scif3_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6),
+};
+static const unsigned int scif3_data_c_mux[] = {
+ RX3_C_MARK, TX3_C_MARK,
+};
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+};
+static const unsigned int scif4_data_a_mux[] = {
+ RX4_A_MARK, TX4_A_MARK,
+};
+static const unsigned int scif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scif4_data_b_mux[] = {
+ RX4_B_MARK, TX4_B_MARK,
+};
+static const unsigned int scif4_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 13), RCAR_GP_PIN(1, 14),
+};
+static const unsigned int scif4_data_c_mux[] = {
+ RX4_C_MARK, TX4_C_MARK,
+};
+static const unsigned int scif4_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
+};
+static const unsigned int scif4_data_d_mux[] = {
+ RX4_D_MARK, TX4_D_MARK,
+};
+static const unsigned int scif4_data_e_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 7), RCAR_GP_PIN(0, 8),
+};
+static const unsigned int scif4_data_e_mux[] = {
+ RX4_E_MARK, TX4_E_MARK,
+};
+/* - SCIF5 ------------------------------------------------------------------ */
+static const unsigned int scif5_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+};
+static const unsigned int scif5_data_a_mux[] = {
+ RX5_A_MARK, TX5_A_MARK,
+};
+static const unsigned int scif5_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+};
+static const unsigned int scif5_data_b_mux[] = {
+ RX5_B_MARK, TX5_B_MARK,
+};
+static const unsigned int scif5_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int scif5_data_c_mux[] = {
+ RX5_C_MARK, TX5_C_MARK,
+};
+static const unsigned int scif5_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int scif5_data_d_mux[] = {
+ RX5_D_MARK, TX5_D_MARK,
+};
+static const unsigned int scif5_data_e_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 10),
+};
+static const unsigned int scif5_data_e_mux[] = {
+ RX5_E_MARK, TX5_E_MARK,
+};
+static const unsigned int scif5_data_f_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 27), RCAR_GP_PIN(3, 28),
+};
+static const unsigned int scif5_data_f_mux[] = {
+ RX5_F_MARK, TX5_F_MARK,
+};
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_a_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int scif_clk_a_mux[] = {
+ SCIF_CLK_A_MARK,
+};
+static const unsigned int scif_clk_b_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(3, 29),
+};
+static const unsigned int scif_clk_b_mux[] = {
+ SCIF_CLK_B_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(mmc_data1),
+ SH_PFC_PIN_GROUP(mmc_data4),
+ SH_PFC_PIN_GROUP(mmc_data8),
+ SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(scif0_data_a),
+ SH_PFC_PIN_GROUP(scif0_data_b),
+ SH_PFC_PIN_GROUP(scif0_data_c),
+ SH_PFC_PIN_GROUP(scif0_data_d),
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_clk_a),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif1_clk_b),
+ SH_PFC_PIN_GROUP(scif1_data_c),
+ SH_PFC_PIN_GROUP(scif1_clk_c),
+ SH_PFC_PIN_GROUP(scif1_data_d),
+ SH_PFC_PIN_GROUP(scif2_data_a),
+ SH_PFC_PIN_GROUP(scif2_clk_a),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif2_clk_b),
+ SH_PFC_PIN_GROUP(scif2_data_c),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_c),
+ SH_PFC_PIN_GROUP(scif4_data_a),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_data_d),
+ SH_PFC_PIN_GROUP(scif4_data_e),
+ SH_PFC_PIN_GROUP(scif5_data_a),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_data_c),
+ SH_PFC_PIN_GROUP(scif5_data_d),
+ SH_PFC_PIN_GROUP(scif5_data_e),
+ SH_PFC_PIN_GROUP(scif5_data_f),
+ SH_PFC_PIN_GROUP(scif_clk_a),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+};
+
+static const char * const mmc_groups[] = {
+ "mmc_data1",
+ "mmc_data4",
+ "mmc_data8",
+ "mmc_ctrl",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data_a",
+ "scif0_data_b",
+ "scif0_data_c",
+ "scif0_data_d",
+};
+
+static const char * const scif1_groups[] = {
+ "scif1_data_a",
+ "scif1_clk_a",
+ "scif1_data_b",
+ "scif1_clk_b",
+ "scif1_data_c",
+ "scif1_clk_c",
+ "scif1_data_d",
+};
+
+static const char * const scif2_groups[] = {
+ "scif2_data_a",
+ "scif2_clk_a",
+ "scif2_data_b",
+ "scif2_clk_b",
+ "scif2_data_c",
+};
+
+static const char * const scif3_groups[] = {
+ "scif3_data_a",
+ "scif3_clk",
+ "scif3_data_b",
+ "scif3_data_c",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data_a",
+ "scif4_data_b",
+ "scif4_data_c",
+ "scif4_data_d",
+ "scif4_data_e",
+};
+
+static const char * const scif5_groups[] = {
+ "scif5_data_a",
+ "scif5_data_b",
+ "scif5_data_c",
+ "scif5_data_d",
+ "scif5_data_e",
+ "scif5_data_f",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk_a",
+ "scif_clk_b",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(mmc),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scif_clk),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_0_22_FN, FN_MMC0_D7,
+ GP_0_21_FN, FN_MMC0_D6,
+ GP_0_20_FN, FN_IP1_7_4,
+ GP_0_19_FN, FN_IP1_3_0,
+ GP_0_18_FN, FN_MMC0_D3_SDHI1_D3,
+ GP_0_17_FN, FN_MMC0_D2_SDHI1_D2,
+ GP_0_16_FN, FN_MMC0_D1_SDHI1_D1,
+ GP_0_15_FN, FN_MMC0_D0_SDHI1_D0,
+ GP_0_14_FN, FN_MMC0_CMD_SDHI1_CMD,
+ GP_0_13_FN, FN_MMC0_CLK_SDHI1_CLK,
+ GP_0_12_FN, FN_IP0_31_28,
+ GP_0_11_FN, FN_IP0_27_24,
+ GP_0_10_FN, FN_IP0_23_20,
+ GP_0_9_FN, FN_IP0_19_16,
+ GP_0_8_FN, FN_IP0_15_12,
+ GP_0_7_FN, FN_IP0_11_8,
+ GP_0_6_FN, FN_IP0_7_4,
+ GP_0_5_FN, FN_IP0_3_0,
+ GP_0_4_FN, FN_CLKOUT,
+ GP_0_3_FN, FN_USB1_OVC,
+ GP_0_2_FN, FN_USB1_PWEN,
+ GP_0_1_FN, FN_USB0_OVC,
+ GP_0_0_FN, FN_USB0_PWEN, }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_1_22_FN, FN_IP4_3_0,
+ GP_1_21_FN, FN_IP3_31_28,
+ GP_1_20_FN, FN_IP3_27_24,
+ GP_1_19_FN, FN_IP3_23_20,
+ GP_1_18_FN, FN_IP3_19_16,
+ GP_1_17_FN, FN_IP3_15_12,
+ GP_1_16_FN, FN_IP3_11_8,
+ GP_1_15_FN, FN_IP3_7_4,
+ GP_1_14_FN, FN_IP3_3_0,
+ GP_1_13_FN, FN_IP2_31_28,
+ GP_1_12_FN, FN_IP2_27_24,
+ GP_1_11_FN, FN_IP2_23_20,
+ GP_1_10_FN, FN_IP2_19_16,
+ GP_1_9_FN, FN_IP2_15_12,
+ GP_1_8_FN, FN_IP2_11_8,
+ GP_1_7_FN, FN_IP2_7_4,
+ GP_1_6_FN, FN_IP2_3_0,
+ GP_1_5_FN, FN_IP1_31_28,
+ GP_1_4_FN, FN_IP1_27_24,
+ GP_1_3_FN, FN_IP1_23_20,
+ GP_1_2_FN, FN_IP1_19_16,
+ GP_1_1_FN, FN_IP1_15_12,
+ GP_1_0_FN, FN_IP1_11_8, }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1) {
+ GP_2_31_FN, FN_IP8_3_0,
+ GP_2_30_FN, FN_IP7_31_28,
+ GP_2_29_FN, FN_IP7_27_24,
+ GP_2_28_FN, FN_IP7_23_20,
+ GP_2_27_FN, FN_IP7_19_16,
+ GP_2_26_FN, FN_IP7_15_12,
+ GP_2_25_FN, FN_IP7_11_8,
+ GP_2_24_FN, FN_IP7_7_4,
+ GP_2_23_FN, FN_IP7_3_0,
+ GP_2_22_FN, FN_IP6_31_28,
+ GP_2_21_FN, FN_IP6_27_24,
+ GP_2_20_FN, FN_IP6_23_20,
+ GP_2_19_FN, FN_IP6_19_16,
+ GP_2_18_FN, FN_IP6_15_12,
+ GP_2_17_FN, FN_IP6_11_8,
+ GP_2_16_FN, FN_IP6_7_4,
+ GP_2_15_FN, FN_IP6_3_0,
+ GP_2_14_FN, FN_IP5_31_28,
+ GP_2_13_FN, FN_IP5_27_24,
+ GP_2_12_FN, FN_IP5_23_20,
+ GP_2_11_FN, FN_IP5_19_16,
+ GP_2_10_FN, FN_IP5_15_12,
+ GP_2_9_FN, FN_IP5_11_8,
+ GP_2_8_FN, FN_IP5_7_4,
+ GP_2_7_FN, FN_IP5_3_0,
+ GP_2_6_FN, FN_IP4_31_28,
+ GP_2_5_FN, FN_IP4_27_24,
+ GP_2_4_FN, FN_IP4_23_20,
+ GP_2_3_FN, FN_IP4_19_16,
+ GP_2_2_FN, FN_IP4_15_12,
+ GP_2_1_FN, FN_IP4_11_8,
+ GP_2_0_FN, FN_IP4_7_4, }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1) {
+ 0, 0,
+ 0, 0,
+ GP_3_29_FN, FN_IP10_19_16,
+ GP_3_28_FN, FN_IP10_15_12,
+ GP_3_27_FN, FN_IP10_11_8,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_3_16_FN, FN_IP10_7_4,
+ GP_3_15_FN, FN_IP10_3_0,
+ GP_3_14_FN, FN_IP9_31_28,
+ GP_3_13_FN, FN_IP9_27_24,
+ GP_3_12_FN, FN_IP9_23_20,
+ GP_3_11_FN, FN_IP9_19_16,
+ GP_3_10_FN, FN_IP9_15_12,
+ GP_3_9_FN, FN_IP9_11_8,
+ GP_3_8_FN, FN_IP9_7_4,
+ GP_3_7_FN, FN_IP9_3_0,
+ GP_3_6_FN, FN_IP8_31_28,
+ GP_3_5_FN, FN_IP8_27_24,
+ GP_3_4_FN, FN_IP8_23_20,
+ GP_3_3_FN, FN_IP8_19_16,
+ GP_3_2_FN, FN_IP8_15_12,
+ GP_3_1_FN, FN_IP8_11_8,
+ GP_3_0_FN, FN_IP8_7_4, }
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_4_25_FN, FN_IP13_27_24,
+ GP_4_24_FN, FN_IP13_23_20,
+ GP_4_23_FN, FN_IP13_19_16,
+ GP_4_22_FN, FN_IP13_15_12,
+ GP_4_21_FN, FN_IP13_11_8,
+ GP_4_20_FN, FN_IP13_7_4,
+ GP_4_19_FN, FN_IP13_3_0,
+ GP_4_18_FN, FN_IP12_31_28,
+ GP_4_17_FN, FN_IP12_27_24,
+ GP_4_16_FN, FN_IP12_23_20,
+ GP_4_15_FN, FN_IP12_19_16,
+ GP_4_14_FN, FN_IP12_15_12,
+ GP_4_13_FN, FN_IP12_11_8,
+ GP_4_12_FN, FN_IP12_7_4,
+ GP_4_11_FN, FN_IP12_3_0,
+ GP_4_10_FN, FN_IP11_31_28,
+ GP_4_9_FN, FN_IP11_27_24,
+ GP_4_8_FN, FN_IP11_23_20,
+ GP_4_7_FN, FN_IP11_19_16,
+ GP_4_6_FN, FN_IP11_15_12,
+ GP_4_5_FN, FN_IP11_11_8,
+ GP_4_4_FN, FN_IP11_7_4,
+ GP_4_3_FN, FN_IP11_3_0,
+ GP_4_2_FN, FN_IP10_31_28,
+ GP_4_1_FN, FN_IP10_27_24,
+ GP_4_0_FN, FN_IP10_23_20, }
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1) {
+ GP_5_31_FN, FN_IP17_27_24,
+ GP_5_30_FN, FN_IP17_23_20,
+ GP_5_29_FN, FN_IP17_19_16,
+ GP_5_28_FN, FN_IP17_15_12,
+ GP_5_27_FN, FN_IP17_11_8,
+ GP_5_26_FN, FN_IP17_7_4,
+ GP_5_25_FN, FN_IP17_3_0,
+ GP_5_24_FN, FN_IP16_31_28,
+ GP_5_23_FN, FN_IP16_27_24,
+ GP_5_22_FN, FN_IP16_23_20,
+ GP_5_21_FN, FN_IP16_19_16,
+ GP_5_20_FN, FN_IP16_15_12,
+ GP_5_19_FN, FN_IP16_11_8,
+ GP_5_18_FN, FN_IP16_7_4,
+ GP_5_17_FN, FN_IP16_3_0,
+ GP_5_16_FN, FN_IP15_31_28,
+ GP_5_15_FN, FN_IP15_27_24,
+ GP_5_14_FN, FN_IP15_23_20,
+ GP_5_13_FN, FN_IP15_19_16,
+ GP_5_12_FN, FN_IP15_15_12,
+ GP_5_11_FN, FN_IP15_11_8,
+ GP_5_10_FN, FN_IP15_7_4,
+ GP_5_9_FN, FN_IP15_3_0,
+ GP_5_8_FN, FN_IP14_31_28,
+ GP_5_7_FN, FN_IP14_27_24,
+ GP_5_6_FN, FN_IP14_23_20,
+ GP_5_5_FN, FN_IP14_19_16,
+ GP_5_4_FN, FN_IP14_15_12,
+ GP_5_3_FN, FN_IP14_11_8,
+ GP_5_2_FN, FN_IP14_7_4,
+ GP_5_1_FN, FN_IP14_3_0,
+ GP_5_0_FN, FN_IP13_31_28, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR0", 0xE6060040, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP0_31_28 [4] */
+ FN_SD0_WP, FN_IRQ7, FN_CAN0_TX_A, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_27_24 [4] */
+ FN_SD0_CD, 0, FN_CAN0_RX_A, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_23_20 [4] */
+ FN_SD0_DAT3, 0, 0, FN_SSI_SDATA0_B, FN_TX5_E, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_19_16 [4] */
+ FN_SD0_DAT2, 0, 0, FN_SSI_WS0129_B, FN_RX5_E, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_15_12 [4] */
+ FN_SD0_DAT1, 0, 0, FN_SSI_SCK0129_B, FN_TX4_E, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_11_8 [4] */
+ FN_SD0_DAT0, 0, 0, FN_SSI_SDATA1_C, FN_RX4_E, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_7_4 [4] */
+ FN_SD0_CMD, 0, 0, FN_SSI_WS1_C, FN_TX3_C, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_3_0 [4] */
+ FN_SD0_CLK, 0, 0, FN_SSI_SCK1_C, FN_RX3_C, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR1", 0xE6060044, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP1_31_28 [4] */
+ FN_D5, FN_HRX2, FN_SCL1_B, FN_PWM2_C, FN_TCLK2_B, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_27_24 [4] */
+ FN_D4, 0, FN_IRQ3, FN_TCLK1_A, FN_PWM6_C, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_23_20 [4] */
+ FN_D3, 0, FN_TX4_B, FN_SDA0_D, FN_PWM0_A,
+ FN_MSIOF2_SYNC_C, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_19_16 [4] */
+ FN_D2, 0, FN_RX4_B, FN_SCL0_D, FN_PWM1_C,
+ FN_MSIOF2_SCK_C, FN_SSI_SCK5_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_15_12 [4] */
+ FN_D1, 0, FN_SDA3_B, FN_TX5_B, 0, FN_MSIOF2_TXD_C,
+ FN_SSI_WS5_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_11_8 [4] */
+ FN_D0, 0, FN_SCL3_B, FN_RX5_B, FN_IRQ4,
+ FN_MSIOF2_RXD_C, FN_SSI_SDATA5_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_7_4 [4] */
+ FN_MMC0_D5, FN_SD1_WP, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_3_0 [4] */
+ FN_MMC0_D4, FN_SD1_CD, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR2", 0xE6060048, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP2_31_28 [4] */
+ FN_D13, FN_MSIOF2_SYNC_A, 0, FN_RX4_C, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_27_24 [4] */
+ FN_D12, FN_MSIOF2_SCK_A, FN_HSCK0, 0, FN_CAN_CLK_C,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_23_20 [4] */
+ FN_D11, FN_MSIOF2_TXD_A, FN_HTX0_B, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_19_16 [4] */
+ FN_D10, FN_MSIOF2_RXD_A, FN_HRX0_B, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_15_12 [4] */
+ FN_D9, FN_HRTS2_N, FN_TX1_C, FN_SDA1_D, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_11_8 [4] */
+ FN_D8, FN_HCTS2_N, FN_RX1_C, FN_SCL1_D, FN_PWM3_C, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_7_4 [4] */
+ FN_D7, FN_HSCK2, FN_SCIF1_SCK_C, FN_IRQ6, FN_PWM5_C,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_3_0 [4] */
+ FN_D6, FN_HTX2, FN_SDA1_B, FN_PWM4_C, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR3", 0xE606004C, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP3_31_28 [4] */
+ FN_QSPI0_SSL, FN_WE1_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP3_27_24 [4] */
+ FN_QSPI0_IO3, FN_RD_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP3_23_20 [4] */
+ FN_QSPI0_IO2, FN_CS0_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP3_19_16 [4] */
+ FN_QSPI0_MISO_QSPI0_IO1, FN_RD_WR_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP3_15_12 [4] */
+ FN_QSPI0_MOSI_QSPI0_IO0, FN_BS_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0,
+ /* IP3_11_8 [4] */
+ FN_QSPI0_SPCLK, FN_WE0_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP3_7_4 [4] */
+ FN_D15, FN_MSIOF2_SS2, FN_PWM4_A, 0, FN_CAN1_TX_B, FN_IRQ2,
+ FN_AVB_AVTP_MATCH_A, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP3_3_0 [4] */
+ FN_D14, FN_MSIOF2_SS1, 0, FN_TX4_C, FN_CAN1_RX_B,
+ 0, FN_AVB_AVTP_CAPTURE_A,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR4", 0xE6060050, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP4_31_28 [4] */
+ FN_DU0_DR6, 0, FN_RX2_C, 0, 0, 0, FN_A6, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_27_24 [4] */
+ FN_DU0_DR5, 0, FN_TX1_D, 0, FN_PWM1_B, 0, FN_A5, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_23_20 [4] */
+ FN_DU0_DR4, 0, FN_RX1_D, 0, 0, 0, FN_A4, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP4_19_16 [4] */
+ FN_DU0_DR3, 0, FN_TX0_D, FN_SDA0_E, FN_PWM0_B, 0,
+ FN_A3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_15_12 [4] */
+ FN_DU0_DR2, 0, FN_RX0_D, FN_SCL0_E, 0, 0, FN_A2, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_11_8 [4] */
+ FN_DU0_DR1, 0, FN_TX5_C, FN_SDA2_D, 0, 0, FN_A1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_7_4 [4] */
+ FN_DU0_DR0, 0, FN_RX5_C, FN_SCL2_D, 0, 0, FN_A0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_3_0 [4] */
+ FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK_A, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR5", 0xE6060054, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP5_31_28 [4] */
+ FN_DU0_DG6, 0, FN_HRX1_C, 0, 0, 0, FN_A14, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP5_27_24 [4] */
+ FN_DU0_DG5, 0, FN_HTX0_A, 0, FN_PWM5_B, 0, FN_A13,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_23_20 [4] */
+ FN_DU0_DG4, 0, FN_HRX0_A, 0, 0, 0, FN_A12, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP5_19_16 [4] */
+ FN_DU0_DG3, 0, FN_TX4_D, 0, FN_PWM4_B, 0, FN_A11, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_15_12 [4] */
+ FN_DU0_DG2, 0, FN_RX4_D, 0, 0, 0, FN_A10, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP5_11_8 [4] */
+ FN_DU0_DG1, 0, FN_TX3_B, FN_SDA3_D, FN_PWM3_B, 0,
+ FN_A9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_7_4 [4] */
+ FN_DU0_DG0, 0, FN_RX3_B, FN_SCL3_D, 0, 0, FN_A8, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_3_0 [4] */
+ FN_DU0_DR7, 0, FN_TX2_C, 0, FN_PWM2_B, 0, FN_A7, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR6", 0xE6060058, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP6_31_28 [4] */
+ FN_DU0_DB6, 0, 0, 0, 0, 0, FN_A22, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_27_24 [4] */
+ FN_DU0_DB5, 0, FN_HRTS1_N_C, 0, 0, 0,
+ FN_A21, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_23_20 [4] */
+ FN_DU0_DB4, 0, FN_HCTS1_N_C, 0, 0, 0,
+ FN_A20, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_19_16 [4] */
+ FN_DU0_DB3, 0, FN_HRTS0_N, 0, 0, 0, FN_A19, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP6_15_12 [4] */
+ FN_DU0_DB2, 0, FN_HCTS0_N, 0, 0, 0, FN_A18, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP6_11_8 [4] */
+ FN_DU0_DB1, 0, 0, FN_SDA4_D, FN_CAN0_TX_C, 0, FN_A17,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_7_4 [4] */
+ FN_DU0_DB0, 0, 0, FN_SCL4_D, FN_CAN0_RX_C, 0, FN_A16,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_3_0 [4] */
+ FN_DU0_DG7, 0, FN_HTX1_C, 0, FN_PWM6_B, 0, FN_A15,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR7", 0xE606005C, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP7_31_28 [4] */
+ FN_DU0_DISP, 0, 0, 0, FN_CAN1_RX_C, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP7_27_24 [4] */
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, 0, FN_MSIOF2_SCK_B,
+ 0, 0, 0, FN_DRACK0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_23_20 [4] */
+ FN_DU0_EXVSYNC_DU0_VSYNC, 0, FN_MSIOF2_SYNC_B, 0,
+ 0, 0, FN_DACK0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_19_16 [4] */
+ FN_DU0_EXHSYNC_DU0_HSYNC, 0, FN_MSIOF2_TXD_B, 0,
+ 0, 0, FN_DREQ0_N, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_15_12 [4] */
+ FN_DU0_DOTCLKOUT1, 0, FN_MSIOF2_RXD_B, 0, 0, 0,
+ FN_CS1_N_A26, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_11_8 [4] */
+ FN_DU0_DOTCLKOUT0, 0, 0, 0, 0, 0, FN_A25, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP7_7_4 [4] */
+ FN_DU0_DOTCLKIN, 0, 0, 0, 0, 0, FN_A24, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP7_3_0 [4] */
+ FN_DU0_DB7, 0, 0, 0, 0, 0, FN_A23, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR8", 0xE6060060, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP8_31_28 [4] */
+ FN_VI1_DATA5, 0, 0, 0, FN_AVB_RXD4, FN_ETH_LINK, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP8_27_24 [4] */
+ FN_VI1_DATA4, 0, 0, 0, FN_AVB_RXD3, FN_ETH_RX_ER, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP8_23_20 [4] */
+ FN_VI1_DATA3, 0, 0, 0, FN_AVB_RXD2, FN_ETH_MDIO, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP8_19_16 [4] */
+ FN_VI1_DATA2, 0, 0, 0, FN_AVB_RXD1, FN_ETH_RXD1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP8_15_12 [4] */
+ FN_VI1_DATA1, 0, 0, 0, FN_AVB_RXD0, FN_ETH_RXD0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP8_11_8 [4] */
+ FN_VI1_DATA0, 0, 0, 0, FN_AVB_RX_DV, FN_ETH_CRS_DV, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP8_7_4 [4] */
+ FN_VI1_CLK, 0, 0, 0, FN_AVB_RX_CLK, FN_ETH_REF_CLK, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP8_3_0 [4] */
+ FN_DU0_CDE, 0, 0, 0, FN_CAN1_TX_C, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR9", 0xE6060064, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP9_31_28 [4] */
+ FN_VI1_DATA9, 0, 0, FN_SDA2_B, FN_AVB_TXD0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP9_27_24 [4] */
+ FN_VI1_DATA8, 0, 0, FN_SCL2_B, FN_AVB_TX_EN, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP9_23_20 [4] */
+ FN_VI1_VSYNC_N, FN_TX0_B, FN_SDA0_C, FN_AUDIO_CLKOUT_B,
+ FN_AVB_TX_CLK, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP9_19_16 [4] */
+ FN_VI1_HSYNC_N, FN_RX0_B, FN_SCL0_C, 0, FN_AVB_GTXREFCLK,
+ FN_ETH_MDC, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP9_15_12 [4] */
+ FN_VI1_FIELD, FN_SDA3_A, 0, 0, FN_AVB_RX_ER, FN_ETH_TXD0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP9_11_8 [4] */
+ FN_VI1_CLKENB, FN_SCL3_A, 0, 0, FN_AVB_RXD7, FN_ETH_MAGIC, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP9_7_4 [4] */
+ FN_VI1_DATA7, 0, 0, 0, FN_AVB_RXD6, FN_ETH_TX_EN, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP9_3_0 [4] */
+ FN_VI1_DATA6, 0, 0, 0, FN_AVB_RXD5, FN_ETH_TXD1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR10", 0xE6060068, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP10_31_28 [4] */
+ FN_SCL1_A, FN_RX4_A, FN_PWM5_D, FN_DU1_DR0, 0, 0,
+ FN_SSI_SCK6_B, FN_VI0_G0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_27_24 [4] */
+ FN_SDA0_A, FN_TX0_C, FN_IRQ5, FN_CAN_CLK_A, FN_AVB_GTX_CLK,
+ FN_CAN1_TX_D, FN_DVC_MUTE, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_23_20 [4] */
+ FN_SCL0_A, FN_RX0_C, FN_PWM5_A, FN_TCLK1_B, FN_AVB_TXD6,
+ FN_CAN1_RX_D, FN_MSIOF0_SYNC_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_19_16 [4] */
+ FN_AVB_TXD5, FN_SCIF_CLK_B, FN_AUDIO_CLKC_B, 0,
+ FN_SSI_SDATA1_D, 0, FN_MSIOF0_SCK_B, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP10_15_12 [4] */
+ FN_AVB_TXD4, 0, FN_AUDIO_CLKB_B, 0, FN_SSI_WS1_D, FN_TX5_F,
+ FN_MSIOF0_TXD_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_11_8 [4] */
+ FN_AVB_TXD3, 0, FN_AUDIO_CLKA_B, 0, FN_SSI_SCK1_D, FN_RX5_F,
+ FN_MSIOF0_RXD_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_7_4 [4] */
+ FN_VI1_DATA11, 0, 0, FN_CAN0_TX_B, FN_AVB_TXD2, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_3_0 [4] */
+ FN_VI1_DATA10, 0, 0, FN_CAN0_RX_B, FN_AVB_TXD1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR11", 0xE606006C, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP11_31_28 [4] */
+ FN_HRX1_A, FN_SCL4_A, FN_PWM6_A, FN_DU1_DG0, FN_RX0_A, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_27_24 [4] */
+ FN_MSIOF0_SS2_A, 0, 0, FN_DU1_DR7, 0,
+ FN_QSPI1_SSL, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_23_20 [4] */
+ FN_MSIOF0_SS1_A, 0, 0, FN_DU1_DR6, 0,
+ FN_QSPI1_IO3, FN_SSI_SDATA8_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_19_16 [4] */
+ FN_MSIOF0_SYNC_A, FN_PWM1_A, 0, FN_DU1_DR5,
+ 0, FN_QSPI1_IO2, FN_SSI_SDATA7_B, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP11_15_12 [4] */
+ FN_MSIOF0_SCK_A, FN_IRQ0, 0, FN_DU1_DR4,
+ 0, FN_QSPI1_SPCLK, FN_SSI_SCK78_B, FN_VI0_G4,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_11_8 [4] */
+ FN_MSIOF0_TXD_A, FN_TX5_A, FN_SDA2_C, FN_DU1_DR3, 0,
+ FN_QSPI1_MISO_QSPI1_IO1, FN_SSI_WS78_B, FN_VI0_G3,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_7_4 [4] */
+ FN_MSIOF0_RXD_A, FN_RX5_A, FN_SCL2_C, FN_DU1_DR2, 0,
+ FN_QSPI1_MOSI_QSPI1_IO0, FN_SSI_SDATA6_B, FN_VI0_G2,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_3_0 [4] */
+ FN_SDA1_A, FN_TX4_A, 0, FN_DU1_DR1, 0, 0, FN_SSI_WS6_B,
+ FN_VI0_G1, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR12", 0xE6060070, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP12_31_28 [4] */
+ FN_SD2_DAT2, FN_RX2_A, 0, FN_DU1_DB0, FN_SSI_SDATA2_B, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_27_24 [4] */
+ FN_SD2_DAT1, FN_TX1_A, FN_SDA1_E, FN_DU1_DG7, FN_SSI_WS2_B,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_23_20 [4] */
+ FN_SD2_DAT0, FN_RX1_A, FN_SCL1_E, FN_DU1_DG6,
+ FN_SSI_SDATA1_B, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_19_16 [4] */
+ FN_SD2_CMD, FN_SCIF1_SCK_A, FN_TCLK2_A, FN_DU1_DG5,
+ FN_SSI_SCK2_B, FN_PWM3_A, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_15_12 [4] */
+ FN_SD2_CLK, FN_HSCK1, 0, FN_DU1_DG4, FN_SSI_SCK1_B, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_11_8 [4] */
+ FN_HRTS1_N_A, 0, 0, FN_DU1_DG3, FN_SSI_WS1_B, FN_IRQ1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_7_4 [4] */
+ FN_HCTS1_N_A, FN_PWM2_A, 0, FN_DU1_DG2, FN_REMOCON_B,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_3_0 [4] */
+ FN_HTX1_A, FN_SDA4_A, 0, FN_DU1_DG1, FN_TX0_A, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR13", 0xE6060074, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP13_31_28 [4] */
+ FN_SSI_SCK5_A, 0, 0, FN_DU1_DOTCLKOUT1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP13_27_24 [4] */
+ FN_SDA2_A, 0, FN_MSIOF1_SYNC_B, FN_DU1_DB7, FN_AUDIO_CLKOUT_C,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP13_23_20 [4] */
+ FN_SCL2_A, 0, FN_MSIOF1_SCK_B, FN_DU1_DB6, FN_AUDIO_CLKC_C,
+ FN_SSI_SCK4_B, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP13_19_16 [4] */
+ FN_TX3_A, FN_SDA1_C, FN_MSIOF1_TXD_B, FN_DU1_DB5,
+ FN_AUDIO_CLKB_C, FN_SSI_WS4_B, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP13_15_12 [4] */
+ FN_RX3_A, FN_SCL1_C, FN_MSIOF1_RXD_B, FN_DU1_DB4,
+ FN_AUDIO_CLKA_C, FN_SSI_SDATA4_B, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP13_11_8 [4] */
+ FN_SD2_WP, FN_SCIF3_SCK, 0, FN_DU1_DB3, FN_SSI_SDATA9_B, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP13_7_4 [4] */
+ FN_SD2_CD, FN_SCIF2_SCK_A, 0, FN_DU1_DB2, FN_SSI_SCK9_B, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP13_3_0 [4] */
+ FN_SD2_DAT3, FN_TX2_A, 0, FN_DU1_DB1, FN_SSI_WS9_B, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR14", 0xE6060078, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP14_31_28 [4] */
+ FN_SSI_SDATA7_A, 0, 0, FN_IRQ8, FN_AUDIO_CLKA_D, FN_CAN_CLK_D,
+ FN_VI0_G5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP14_27_24 [4] */
+ FN_SSI_WS78_A, 0, FN_SCL4_E, FN_DU1_CDE, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP14_23_20 [4] */
+ FN_SSI_SCK78_A, 0, FN_SDA4_E, FN_DU1_DISP, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP14_19_16 [4] */
+ FN_SSI_SDATA6_A, 0, FN_SDA4_C, FN_DU1_EXVSYNC_DU1_VSYNC, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP14_15_12 [4] */
+ FN_SSI_WS6_A, 0, FN_SCL4_C, FN_DU1_EXHSYNC_DU1_HSYNC, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP14_11_8 [4] */
+ FN_SSI_SCK6_A, 0, 0, FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP14_7_4 [4] */
+ FN_SSI_SDATA5_A, 0, FN_SDA3_C, FN_DU1_DOTCLKOUT0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP14_3_0 [4] */
+ FN_SSI_WS5_A, 0, FN_SCL3_C, FN_DU1_DOTCLKIN, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR15", 0xE606007C, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP15_31_28 [4] */
+ FN_SSI_WS4_A, 0, FN_AVB_PHY_INT, 0, 0, 0, FN_VI0_R5, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP15_27_24 [4] */
+ FN_SSI_SCK4_A, 0, FN_AVB_MAGIC, 0, 0, 0, FN_VI0_R4, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP15_23_20 [4] */
+ FN_SSI_SDATA3, FN_MSIOF1_SS2_A, FN_AVB_LINK, 0, FN_CAN1_TX_A,
+ FN_DREQ2_N, FN_VI0_R3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP15_19_16 [4] */
+ FN_SSI_WS34, FN_MSIOF1_SS1_A, FN_AVB_MDIO, 0, FN_CAN1_RX_A,
+ FN_DREQ1_N, FN_VI0_R2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP15_15_12 [4] */
+ FN_SSI_SCK34, FN_MSIOF1_SCK_A, FN_AVB_MDC, 0, 0, FN_DACK1,
+ FN_VI0_R1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP15_11_8 [4] */
+ FN_SSI_SDATA0_A, FN_MSIOF1_SYNC_A, FN_PWM0_C, 0, 0, 0,
+ FN_VI0_R0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP15_7_4 [4] */
+ FN_SSI_WS0129_A, FN_MSIOF1_TXD_A, FN_TX5_D, 0, 0, 0,
+ FN_VI0_G7, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP15_3_0 [4] */
+ FN_SSI_SCK0129_A, FN_MSIOF1_RXD_A, FN_RX5_D, 0, 0, 0,
+ FN_VI0_G6, 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR16", 0xE6060080, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP16_31_28 [4] */
+ FN_SSI_SDATA2_A, FN_HRTS1_N_B, 0, 0, 0, 0,
+ FN_VI0_DATA4_VI0_B4, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_27_24 [4] */
+ FN_SSI_WS2_A, FN_HCTS1_N_B, 0, 0, 0, FN_AVB_TX_ER,
+ FN_VI0_DATA3_VI0_B3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_23_20 [4] */
+ FN_SSI_SCK2_A, FN_HTX1_B, 0, 0, 0, FN_AVB_TXD7,
+ FN_VI0_DATA2_VI0_B2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_19_16 [4] */
+ FN_SSI_SDATA1_A, FN_HRX1_B, 0, 0, 0, 0, FN_VI0_DATA1_VI0_B1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_15_12 [4] */
+ FN_SSI_WS1_A, FN_TX1_B, 0, 0, FN_CAN0_TX_D,
+ FN_AVB_AVTP_MATCH_B, FN_VI0_DATA0_VI0_B0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0,
+ /* IP16_11_8 [4] */
+ FN_SSI_SDATA8_A, FN_RX1_B, 0, 0, FN_CAN0_RX_D,
+ FN_AVB_AVTP_CAPTURE_B, FN_VI0_R7, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_7_4 [4] */
+ FN_SSI_SCK1_A, FN_SCIF1_SCK_B, FN_PWM1_D, FN_IRQ9, FN_REMOCON_A,
+ FN_DACK2, FN_VI0_CLK, FN_AVB_COL, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_3_0 [4] */
+ FN_SSI_SDATA4_A, 0, FN_AVB_CRS, 0, 0, 0, FN_VI0_R6, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR17", 0xE6060084, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP17_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_27_24 [4] */
+ FN_AUDIO_CLKOUT_A, FN_SDA4_B, 0, 0, 0, 0,
+ FN_VI0_VSYNC_N, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_23_20 [4] */
+ FN_AUDIO_CLKC_A, FN_SCL4_B, 0, 0, 0, 0,
+ FN_VI0_HSYNC_N, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_19_16 [4] */
+ FN_AUDIO_CLKB_A, FN_SDA0_B, 0, 0, 0, 0,
+ FN_VI0_FIELD, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_15_12 [4] */
+ FN_AUDIO_CLKA_A, FN_SCL0_B, 0, 0, 0, 0,
+ FN_VI0_CLKENB, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_11_8 [4] */
+ FN_SSI_SDATA9_A, FN_SCIF2_SCK_B, FN_PWM2_D, 0, 0, 0,
+ FN_VI0_DATA7_VI0_B7, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_7_4 [4] */
+ FN_SSI_WS9_A, FN_TX2_B, FN_SDA3_E, 0, 0, 0,
+ FN_VI0_DATA6_VI0_B6, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_3_0 [4] */
+ FN_SSI_SCK9_A, FN_RX2_B, FN_SCL3_E, 0, 0, FN_EX_WAIT1,
+ FN_VI0_DATA5_VI0_B5, 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xE60600C0, 32,
+ 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 1, 3, 3,
+ 1, 2, 3, 3, 1) {
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_ADGA [2] */
+ FN_SEL_ADGA_0, FN_SEL_ADGA_1, FN_SEL_ADGA_2, FN_SEL_ADGA_3,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_CANCLK [2] */
+ FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2,
+ FN_SEL_CANCLK_3,
+ /* SEL_CAN1 [2] */
+ FN_SEL_CAN1_0, FN_SEL_CAN1_1, FN_SEL_CAN1_2, FN_SEL_CAN1_3,
+ /* SEL_CAN0 [2] */
+ FN_SEL_CAN0_0, FN_SEL_CAN0_1, FN_SEL_CAN0_2, FN_SEL_CAN0_3,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_I2C04 [3] */
+ FN_SEL_I2C04_0, FN_SEL_I2C04_1, FN_SEL_I2C04_2, FN_SEL_I2C04_3,
+ FN_SEL_I2C04_4, 0, 0, 0,
+ /* SEL_I2C03 [3] */
+ FN_SEL_I2C03_0, FN_SEL_I2C03_1, FN_SEL_I2C03_2, FN_SEL_I2C03_3,
+ FN_SEL_I2C03_4, 0, 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_I2C02 [2] */
+ FN_SEL_I2C02_0, FN_SEL_I2C02_1, FN_SEL_I2C02_2, FN_SEL_I2C02_3,
+ /* SEL_I2C01 [3] */
+ FN_SEL_I2C01_0, FN_SEL_I2C01_1, FN_SEL_I2C01_2, FN_SEL_I2C01_3,
+ FN_SEL_I2C01_4, 0, 0, 0,
+ /* SEL_I2C00 [3] */
+ FN_SEL_I2C00_0, FN_SEL_I2C00_1, FN_SEL_I2C00_2, FN_SEL_I2C00_3,
+ FN_SEL_I2C00_4, 0, 0, 0,
+ /* SEL_AVB [1] */
+ FN_SEL_AVB_0, FN_SEL_AVB_1, }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xE60600C4, 32,
+ 1, 3, 3, 2, 2, 1, 2, 2,
+ 2, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 1) {
+ /* SEL_SCIFCLK [1] */
+ FN_SEL_SCIFCLK_0, FN_SEL_SCIFCLK_1,
+ /* SEL_SCIF5 [3] */
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, FN_SEL_SCIF5_2, FN_SEL_SCIF5_3,
+ FN_SEL_SCIF5_4, FN_SEL_SCIF5_5, 0, 0,
+ /* SEL_SCIF4 [3] */
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, FN_SEL_SCIF4_3,
+ FN_SEL_SCIF4_4, 0, 0, 0,
+ /* SEL_SCIF3 [2] */
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, 0,
+ /* SEL_SCIF2 [2] */
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2, 0,
+ /* SEL_SCIF2_CLK [1] */
+ FN_SEL_SCIF2_CLK_0, FN_SEL_SCIF2_CLK_1,
+ /* SEL_SCIF1 [2] */
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3,
+ /* SEL_SCIF0 [2] */
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
+ /* SEL_MSIOF2 [2] */
+ FN_SEL_MSIOF2_0, FN_SEL_MSIOF2_1, FN_SEL_MSIOF2_2, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_MSIOF1 [1] */
+ FN_SEL_MSIOF1_0, FN_SEL_MSIOF1_1,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_MSIOF0 [1] */
+ FN_SEL_MSIOF0_0, FN_SEL_MSIOF0_1,
+ /* SEL_RCN [1] */
+ FN_SEL_RCN_0, FN_SEL_RCN_1,
+ /* RESERVED [2] */
+ 0, 0, 0, 0,
+ /* SEL_TMU2 [1] */
+ FN_SEL_TMU2_0, FN_SEL_TMU2_1,
+ /* SEL_TMU1 [1] */
+ FN_SEL_TMU1_0, FN_SEL_TMU1_1,
+ /* RESERVED [2] */
+ 0, 0, 0, 0,
+ /* SEL_HSCIF1 [2] */
+ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1, FN_SEL_HSCIF1_2, 0,
+ /* SEL_HSCIF0 [1] */
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,}
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE60600C8, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) {
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_ADGB [2] */
+ FN_SEL_ADGB_0, FN_SEL_ADGB_1, FN_SEL_ADGB_2, 0,
+ /* SEL_ADGC [2] */
+ FN_SEL_ADGC_0, FN_SEL_ADGC_1, FN_SEL_ADGC_2, 0,
+ /* SEL_SSI9 [2] */
+ FN_SEL_SSI9_0, FN_SEL_SSI9_1, 0, 0,
+ /* SEL_SSI8 [2] */
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1, 0, 0,
+ /* SEL_SSI7 [2] */
+ FN_SEL_SSI7_0, FN_SEL_SSI7_1, 0, 0,
+ /* SEL_SSI6 [2] */
+ FN_SEL_SSI6_0, FN_SEL_SSI6_1, 0, 0,
+ /* SEL_SSI5 [2] */
+ FN_SEL_SSI5_0, FN_SEL_SSI5_1, 0, 0,
+ /* SEL_SSI4 [2] */
+ FN_SEL_SSI4_0, FN_SEL_SSI4_1, 0, 0,
+ /* SEL_SSI2 [2] */
+ FN_SEL_SSI2_0, FN_SEL_SSI2_1, 0, 0,
+ /* SEL_SSI1 [2] */
+ FN_SEL_SSI1_0, FN_SEL_SSI1_1, FN_SEL_SSI1_2, FN_SEL_SSI1_3,
+ /* SEL_SSI0 [2] */
+ FN_SEL_SSI0_0, FN_SEL_SSI0_1, 0, 0, }
+ },
+ { },
+};
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77470
+const struct sh_pfc_soc_info r8a77470_pinmux_info = {
+ .name = "r8a77470_pfc",
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index 82a1c411c952..a6c5d50557e6 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -1432,10 +1432,10 @@ static const u16 pinmux_data[] = {
/*
* Static pins can not be muxed between different functions but
- * still needs a mark entry in the pinmux list. Add each static
+ * still need mark entries in the pinmux list. Add each static
* pin to the list without an associated function. The sh-pfc
- * core will do the right thing and skip trying to mux then pin
- * while still applying configuration to it
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
*/
#define FM(x) PINMUX_DATA(x##_MARK, 0),
PINMUX_STATIC
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 7100a2dd65f8..4f55b1562ad4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -1493,10 +1493,10 @@ static const u16 pinmux_data[] = {
/*
* Static pins can not be muxed between different functions but
- * still needs a mark entry in the pinmux list. Add each static
+ * still need mark entries in the pinmux list. Add each static
* pin to the list without an associated function. The sh-pfc
- * core will do the right thing and skip trying to mux then pin
- * while still applying configuration to it
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
*/
#define FM(x) PINMUX_DATA(x##_MARK, 0),
PINMUX_STATIC
@@ -3122,7 +3122,7 @@ static const unsigned int msiof3_ss1_e_mux[] = {
MSIOF3_SS1_E_MARK,
};
static const unsigned int msiof3_ss2_e_pins[] = {
- /* SS1 */
+ /* SS2 */
RCAR_GP_PIN(2, 0),
};
static const unsigned int msiof3_ss2_e_mux[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 4bc5b1f820c1..3ea133cfb241 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -1499,10 +1499,10 @@ static const u16 pinmux_data[] = {
/*
* Static pins can not be muxed between different functions but
- * still needs a mark entry in the pinmux list. Add each static
+ * still need mark entries in the pinmux list. Add each static
* pin to the list without an associated function. The sh-pfc
- * core will do the right thing and skip trying to mux then pin
- * while still applying configuration to it
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
*/
#define FM(x) PINMUX_DATA(x##_MARK, 0),
PINMUX_STATIC
@@ -3122,7 +3122,7 @@ static const unsigned int msiof3_ss1_e_mux[] = {
MSIOF3_SS1_E_MARK,
};
static const unsigned int msiof3_ss2_e_pins[] = {
- /* SS1 */
+ /* SS2 */
RCAR_GP_PIN(2, 0),
};
static const unsigned int msiof3_ss2_e_mux[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index cea9d0599c12..d2bbee656381 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A77965 processor support - PFC hardware block.
*
@@ -1501,10 +1501,10 @@ static const u16 pinmux_data[] = {
/*
* Static pins can not be muxed between different functions but
- * still needs a mark entry in the pinmux list. Add each static
+ * still need mark entries in the pinmux list. Add each static
* pin to the list without an associated function. The sh-pfc
- * core will do the right thing and skip trying to mux then pin
- * while still applying configuration to it
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
*/
#define FM(x) PINMUX_DATA(x##_MARK, 0),
PINMUX_STATIC
@@ -1662,6 +1662,153 @@ static const unsigned int avb_avtp_capture_b_mux[] = {
AVB_AVTP_CAPTURE_B_MARK,
};
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2),
+};
+
+static const unsigned int du_rgb666_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK,
+};
+
+static const unsigned int du_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 8),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16),
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2),
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+
+static const unsigned int du_rgb888_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK, DU_DR1_MARK, DU_DR0_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK, DU_DG1_MARK, DU_DG0_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK, DU_DB1_MARK, DU_DB0_MARK,
+};
+
+static const unsigned int du_clk_out_0_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(1, 27),
+};
+
+static const unsigned int du_clk_out_0_mux[] = {
+ DU_DOTCLKOUT0_MARK
+};
+
+static const unsigned int du_clk_out_1_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(2, 3),
+};
+
+static const unsigned int du_clk_out_1_mux[] = {
+ DU_DOTCLKOUT1_MARK
+};
+
+static const unsigned int du_sync_pins[] = {
+ /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+ RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 4),
+};
+
+static const unsigned int du_sync_mux[] = {
+ DU_EXVSYNC_DU_VSYNC_MARK, DU_EXHSYNC_DU_HSYNC_MARK
+};
+
+static const unsigned int du_oddf_pins[] = {
+ /* EXDISP/EXODDF/EXCDE */
+ RCAR_GP_PIN(2, 2),
+};
+
+static const unsigned int du_oddf_mux[] = {
+ DU_EXODDF_DU_ODDF_DISP_CDE_MARK,
+};
+
+static const unsigned int du_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(2, 0),
+};
+
+static const unsigned int du_cde_mux[] = {
+ DU_CDE_MARK,
+};
+
+static const unsigned int du_disp_pins[] = {
+ /* DISP */
+ RCAR_GP_PIN(2, 1),
+};
+
+static const unsigned int du_disp_mux[] = {
+ DU_DISP_MARK,
+};
+
+/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c1_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int i2c1_a_mux[] = {
+ SDA1_A_MARK, SCL1_A_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
+};
+static const unsigned int i2c1_b_mux[] = {
+ SDA1_B_MARK, SCL1_B_MARK,
+};
+static const unsigned int i2c2_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int i2c2_a_mux[] = {
+ SDA2_A_MARK, SCL2_A_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int i2c2_b_mux[] = {
+ SDA2_B_MARK, SCL2_B_MARK,
+};
+static const unsigned int i2c6_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int i2c6_a_mux[] = {
+ SDA6_A_MARK, SCL6_A_MARK,
+};
+static const unsigned int i2c6_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int i2c6_b_mux[] = {
+ SDA6_B_MARK, SCL6_B_MARK,
+};
+static const unsigned int i2c6_c_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int i2c6_c_mux[] = {
+ SDA6_C_MARK, SCL6_C_MARK,
+};
+
/* - INTC-EX ---------------------------------------------------------------- */
static const unsigned int intc_ex_irq0_pins[] = {
/* IRQ0 */
@@ -1706,6 +1853,803 @@ static const unsigned int intc_ex_irq5_mux[] = {
IRQ5_MARK,
};
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 17),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 22),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 8),
+};
+static const unsigned int msiof1_clk_a_mux[] = {
+ MSIOF1_SCK_A_MARK,
+};
+static const unsigned int msiof1_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(6, 9),
+};
+static const unsigned int msiof1_sync_a_mux[] = {
+ MSIOF1_SYNC_A_MARK,
+};
+static const unsigned int msiof1_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(6, 5),
+};
+static const unsigned int msiof1_ss1_a_mux[] = {
+ MSIOF1_SS1_A_MARK,
+};
+static const unsigned int msiof1_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(6, 6),
+};
+static const unsigned int msiof1_ss2_a_mux[] = {
+ MSIOF1_SS2_A_MARK,
+};
+static const unsigned int msiof1_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int msiof1_txd_a_mux[] = {
+ MSIOF1_TXD_A_MARK,
+};
+static const unsigned int msiof1_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int msiof1_rxd_a_mux[] = {
+ MSIOF1_RXD_A_MARK,
+};
+static const unsigned int msiof1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 9),
+};
+static const unsigned int msiof1_clk_b_mux[] = {
+ MSIOF1_SCK_B_MARK,
+};
+static const unsigned int msiof1_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int msiof1_sync_b_mux[] = {
+ MSIOF1_SYNC_B_MARK,
+};
+static const unsigned int msiof1_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int msiof1_ss1_b_mux[] = {
+ MSIOF1_SS1_B_MARK,
+};
+static const unsigned int msiof1_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int msiof1_ss2_b_mux[] = {
+ MSIOF1_SS2_B_MARK,
+};
+static const unsigned int msiof1_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 8),
+};
+static const unsigned int msiof1_txd_b_mux[] = {
+ MSIOF1_TXD_B_MARK,
+};
+static const unsigned int msiof1_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int msiof1_rxd_b_mux[] = {
+ MSIOF1_RXD_B_MARK,
+};
+static const unsigned int msiof1_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 17),
+};
+static const unsigned int msiof1_clk_c_mux[] = {
+ MSIOF1_SCK_C_MARK,
+};
+static const unsigned int msiof1_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(6, 18),
+};
+static const unsigned int msiof1_sync_c_mux[] = {
+ MSIOF1_SYNC_C_MARK,
+};
+static const unsigned int msiof1_ss1_c_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int msiof1_ss1_c_mux[] = {
+ MSIOF1_SS1_C_MARK,
+};
+static const unsigned int msiof1_ss2_c_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(6, 27),
+};
+static const unsigned int msiof1_ss2_c_mux[] = {
+ MSIOF1_SS2_C_MARK,
+};
+static const unsigned int msiof1_txd_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int msiof1_txd_c_mux[] = {
+ MSIOF1_TXD_C_MARK,
+};
+static const unsigned int msiof1_rxd_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int msiof1_rxd_c_mux[] = {
+ MSIOF1_RXD_C_MARK,
+};
+static const unsigned int msiof1_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int msiof1_clk_d_mux[] = {
+ MSIOF1_SCK_D_MARK,
+};
+static const unsigned int msiof1_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int msiof1_sync_d_mux[] = {
+ MSIOF1_SYNC_D_MARK,
+};
+static const unsigned int msiof1_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int msiof1_ss1_d_mux[] = {
+ MSIOF1_SS1_D_MARK,
+};
+static const unsigned int msiof1_ss2_d_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int msiof1_ss2_d_mux[] = {
+ MSIOF1_SS2_D_MARK,
+};
+static const unsigned int msiof1_txd_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int msiof1_txd_d_mux[] = {
+ MSIOF1_TXD_D_MARK,
+};
+static const unsigned int msiof1_rxd_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int msiof1_rxd_d_mux[] = {
+ MSIOF1_RXD_D_MARK,
+};
+static const unsigned int msiof1_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 0),
+};
+static const unsigned int msiof1_clk_e_mux[] = {
+ MSIOF1_SCK_E_MARK,
+};
+static const unsigned int msiof1_sync_e_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(3, 1),
+};
+static const unsigned int msiof1_sync_e_mux[] = {
+ MSIOF1_SYNC_E_MARK,
+};
+static const unsigned int msiof1_ss1_e_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(3, 4),
+};
+static const unsigned int msiof1_ss1_e_mux[] = {
+ MSIOF1_SS1_E_MARK,
+};
+static const unsigned int msiof1_ss2_e_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(3, 5),
+};
+static const unsigned int msiof1_ss2_e_mux[] = {
+ MSIOF1_SS2_E_MARK,
+};
+static const unsigned int msiof1_txd_e_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(3, 3),
+};
+static const unsigned int msiof1_txd_e_mux[] = {
+ MSIOF1_TXD_E_MARK,
+};
+static const unsigned int msiof1_rxd_e_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(3, 2),
+};
+static const unsigned int msiof1_rxd_e_mux[] = {
+ MSIOF1_RXD_E_MARK,
+};
+static const unsigned int msiof1_clk_f_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 23),
+};
+static const unsigned int msiof1_clk_f_mux[] = {
+ MSIOF1_SCK_F_MARK,
+};
+static const unsigned int msiof1_sync_f_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 24),
+};
+static const unsigned int msiof1_sync_f_mux[] = {
+ MSIOF1_SYNC_F_MARK,
+};
+static const unsigned int msiof1_ss1_f_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(6, 1),
+};
+static const unsigned int msiof1_ss1_f_mux[] = {
+ MSIOF1_SS1_F_MARK,
+};
+static const unsigned int msiof1_ss2_f_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(6, 2),
+};
+static const unsigned int msiof1_ss2_f_mux[] = {
+ MSIOF1_SS2_F_MARK,
+};
+static const unsigned int msiof1_txd_f_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(6, 0),
+};
+static const unsigned int msiof1_txd_f_mux[] = {
+ MSIOF1_TXD_F_MARK,
+};
+static const unsigned int msiof1_rxd_f_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 25),
+};
+static const unsigned int msiof1_rxd_f_mux[] = {
+ MSIOF1_RXD_F_MARK,
+};
+static const unsigned int msiof1_clk_g_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 6),
+};
+static const unsigned int msiof1_clk_g_mux[] = {
+ MSIOF1_SCK_G_MARK,
+};
+static const unsigned int msiof1_sync_g_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(3, 7),
+};
+static const unsigned int msiof1_sync_g_mux[] = {
+ MSIOF1_SYNC_G_MARK,
+};
+static const unsigned int msiof1_ss1_g_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(3, 10),
+};
+static const unsigned int msiof1_ss1_g_mux[] = {
+ MSIOF1_SS1_G_MARK,
+};
+static const unsigned int msiof1_ss2_g_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int msiof1_ss2_g_mux[] = {
+ MSIOF1_SS2_G_MARK,
+};
+static const unsigned int msiof1_txd_g_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(3, 9),
+};
+static const unsigned int msiof1_txd_g_mux[] = {
+ MSIOF1_TXD_G_MARK,
+};
+static const unsigned int msiof1_rxd_g_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(3, 8),
+};
+static const unsigned int msiof1_rxd_g_mux[] = {
+ MSIOF1_RXD_G_MARK,
+};
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof2_clk_a_mux[] = {
+ MSIOF2_SCK_A_MARK,
+};
+static const unsigned int msiof2_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof2_sync_a_mux[] = {
+ MSIOF2_SYNC_A_MARK,
+};
+static const unsigned int msiof2_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof2_ss1_a_mux[] = {
+ MSIOF2_SS1_A_MARK,
+};
+static const unsigned int msiof2_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof2_ss2_a_mux[] = {
+ MSIOF2_SS2_A_MARK,
+};
+static const unsigned int msiof2_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof2_txd_a_mux[] = {
+ MSIOF2_TXD_A_MARK,
+};
+static const unsigned int msiof2_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof2_rxd_a_mux[] = {
+ MSIOF2_RXD_A_MARK,
+};
+static const unsigned int msiof2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 4),
+};
+static const unsigned int msiof2_clk_b_mux[] = {
+ MSIOF2_SCK_B_MARK,
+};
+static const unsigned int msiof2_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 5),
+};
+static const unsigned int msiof2_sync_b_mux[] = {
+ MSIOF2_SYNC_B_MARK,
+};
+static const unsigned int msiof2_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof2_ss1_b_mux[] = {
+ MSIOF2_SS1_B_MARK,
+};
+static const unsigned int msiof2_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof2_ss2_b_mux[] = {
+ MSIOF2_SS2_B_MARK,
+};
+static const unsigned int msiof2_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 7),
+};
+static const unsigned int msiof2_txd_b_mux[] = {
+ MSIOF2_TXD_B_MARK,
+};
+static const unsigned int msiof2_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 6),
+};
+static const unsigned int msiof2_rxd_b_mux[] = {
+ MSIOF2_RXD_B_MARK,
+};
+static const unsigned int msiof2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int msiof2_clk_c_mux[] = {
+ MSIOF2_SCK_C_MARK,
+};
+static const unsigned int msiof2_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 11),
+};
+static const unsigned int msiof2_sync_c_mux[] = {
+ MSIOF2_SYNC_C_MARK,
+};
+static const unsigned int msiof2_ss1_c_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 10),
+};
+static const unsigned int msiof2_ss1_c_mux[] = {
+ MSIOF2_SS1_C_MARK,
+};
+static const unsigned int msiof2_ss2_c_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int msiof2_ss2_c_mux[] = {
+ MSIOF2_SS2_C_MARK,
+};
+static const unsigned int msiof2_txd_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int msiof2_txd_c_mux[] = {
+ MSIOF2_TXD_C_MARK,
+};
+static const unsigned int msiof2_rxd_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int msiof2_rxd_c_mux[] = {
+ MSIOF2_RXD_C_MARK,
+};
+static const unsigned int msiof2_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 8),
+};
+static const unsigned int msiof2_clk_d_mux[] = {
+ MSIOF2_SCK_D_MARK,
+};
+static const unsigned int msiof2_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 9),
+};
+static const unsigned int msiof2_sync_d_mux[] = {
+ MSIOF2_SYNC_D_MARK,
+};
+static const unsigned int msiof2_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 12),
+};
+static const unsigned int msiof2_ss1_d_mux[] = {
+ MSIOF2_SS1_D_MARK,
+};
+static const unsigned int msiof2_ss2_d_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int msiof2_ss2_d_mux[] = {
+ MSIOF2_SS2_D_MARK,
+};
+static const unsigned int msiof2_txd_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 11),
+};
+static const unsigned int msiof2_txd_d_mux[] = {
+ MSIOF2_TXD_D_MARK,
+};
+static const unsigned int msiof2_rxd_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 10),
+};
+static const unsigned int msiof2_rxd_d_mux[] = {
+ MSIOF2_RXD_D_MARK,
+};
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof3_clk_a_mux[] = {
+ MSIOF3_SCK_A_MARK,
+};
+static const unsigned int msiof3_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_sync_a_mux[] = {
+ MSIOF3_SYNC_A_MARK,
+};
+static const unsigned int msiof3_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 14),
+};
+static const unsigned int msiof3_ss1_a_mux[] = {
+ MSIOF3_SS1_A_MARK,
+};
+static const unsigned int msiof3_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int msiof3_ss2_a_mux[] = {
+ MSIOF3_SS2_A_MARK,
+};
+static const unsigned int msiof3_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_txd_a_mux[] = {
+ MSIOF3_TXD_A_MARK,
+};
+static const unsigned int msiof3_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_rxd_a_mux[] = {
+ MSIOF3_RXD_A_MARK,
+};
+static const unsigned int msiof3_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int msiof3_clk_b_mux[] = {
+ MSIOF3_SCK_B_MARK,
+};
+static const unsigned int msiof3_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int msiof3_sync_b_mux[] = {
+ MSIOF3_SYNC_B_MARK,
+};
+static const unsigned int msiof3_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int msiof3_ss1_b_mux[] = {
+ MSIOF3_SS1_B_MARK,
+};
+static const unsigned int msiof3_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 5),
+};
+static const unsigned int msiof3_ss2_b_mux[] = {
+ MSIOF3_SS2_B_MARK,
+};
+static const unsigned int msiof3_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int msiof3_txd_b_mux[] = {
+ MSIOF3_TXD_B_MARK,
+};
+static const unsigned int msiof3_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int msiof3_rxd_b_mux[] = {
+ MSIOF3_RXD_B_MARK,
+};
+static const unsigned int msiof3_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 12),
+};
+static const unsigned int msiof3_clk_c_mux[] = {
+ MSIOF3_SCK_C_MARK,
+};
+static const unsigned int msiof3_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int msiof3_sync_c_mux[] = {
+ MSIOF3_SYNC_C_MARK,
+};
+static const unsigned int msiof3_txd_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int msiof3_txd_c_mux[] = {
+ MSIOF3_TXD_C_MARK,
+};
+static const unsigned int msiof3_rxd_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int msiof3_rxd_c_mux[] = {
+ MSIOF3_RXD_C_MARK,
+};
+static const unsigned int msiof3_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int msiof3_clk_d_mux[] = {
+ MSIOF3_SCK_D_MARK,
+};
+static const unsigned int msiof3_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof3_sync_d_mux[] = {
+ MSIOF3_SYNC_D_MARK,
+};
+static const unsigned int msiof3_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int msiof3_ss1_d_mux[] = {
+ MSIOF3_SS1_D_MARK,
+};
+static const unsigned int msiof3_txd_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int msiof3_txd_d_mux[] = {
+ MSIOF3_TXD_D_MARK,
+};
+static const unsigned int msiof3_rxd_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int msiof3_rxd_d_mux[] = {
+ MSIOF3_RXD_D_MARK,
+};
+static const unsigned int msiof3_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int msiof3_clk_e_mux[] = {
+ MSIOF3_SCK_E_MARK,
+};
+static const unsigned int msiof3_sync_e_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int msiof3_sync_e_mux[] = {
+ MSIOF3_SYNC_E_MARK,
+};
+static const unsigned int msiof3_ss1_e_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int msiof3_ss1_e_mux[] = {
+ MSIOF3_SS1_E_MARK,
+};
+static const unsigned int msiof3_ss2_e_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int msiof3_ss2_e_mux[] = {
+ MSIOF3_SS2_E_MARK,
+};
+static const unsigned int msiof3_txd_e_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int msiof3_txd_e_mux[] = {
+ MSIOF3_TXD_E_MARK,
+};
+static const unsigned int msiof3_rxd_e_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int msiof3_rxd_e_mux[] = {
+ MSIOF3_RXD_E_MARK,
+};
+
+/* - PWM0 --------------------------------------------------------------------*/
+static const unsigned int pwm0_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 6),
+};
+static const unsigned int pwm0_mux[] = {
+ PWM0_MARK,
+};
+/* - PWM1 --------------------------------------------------------------------*/
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+/* - PWM2 --------------------------------------------------------------------*/
+static const unsigned int pwm2_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int pwm2_a_mux[] = {
+ PWM2_A_MARK,
+};
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+/* - PWM3 --------------------------------------------------------------------*/
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+/* - PWM4 --------------------------------------------------------------------*/
+static const unsigned int pwm4_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int pwm4_a_mux[] = {
+ PWM4_A_MARK,
+};
+static const unsigned int pwm4_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int pwm4_b_mux[] = {
+ PWM4_B_MARK,
+};
+/* - PWM5 --------------------------------------------------------------------*/
+static const unsigned int pwm5_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int pwm5_a_mux[] = {
+ PWM5_A_MARK,
+};
+static const unsigned int pwm5_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int pwm5_b_mux[] = {
+ PWM5_B_MARK,
+};
+/* - PWM6 --------------------------------------------------------------------*/
+static const unsigned int pwm6_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int pwm6_a_mux[] = {
+ PWM6_A_MARK,
+};
+static const unsigned int pwm6_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int pwm6_b_mux[] = {
+ PWM6_B_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -1917,6 +2861,264 @@ static const unsigned int scif_clk_b_mux[] = {
SCIF_CLK_B_MARK,
};
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 2),
+};
+
+static const unsigned int sdhi0_data1_mux[] = {
+ SD0_DAT0_MARK,
+};
+
+static const unsigned int sdhi0_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+};
+
+static const unsigned int sdhi0_data4_mux[] = {
+ SD0_DAT0_MARK, SD0_DAT1_MARK,
+ SD0_DAT2_MARK, SD0_DAT3_MARK,
+};
+
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+};
+
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SD0_CLK_MARK, SD0_CMD_MARK,
+};
+
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 12),
+};
+
+static const unsigned int sdhi0_cd_mux[] = {
+ SD0_CD_MARK,
+};
+
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 13),
+};
+
+static const unsigned int sdhi0_wp_mux[] = {
+ SD0_WP_MARK,
+};
+
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 8),
+};
+
+static const unsigned int sdhi1_data1_mux[] = {
+ SD1_DAT0_MARK,
+};
+
+static const unsigned int sdhi1_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+
+static const unsigned int sdhi1_data4_mux[] = {
+ SD1_DAT0_MARK, SD1_DAT1_MARK,
+ SD1_DAT2_MARK, SD1_DAT3_MARK,
+};
+
+static const unsigned int sdhi1_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+};
+
+static const unsigned int sdhi1_ctrl_mux[] = {
+ SD1_CLK_MARK, SD1_CMD_MARK,
+};
+
+static const unsigned int sdhi1_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 14),
+};
+
+static const unsigned int sdhi1_cd_mux[] = {
+ SD1_CD_MARK,
+};
+
+static const unsigned int sdhi1_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 15),
+};
+
+static const unsigned int sdhi1_wp_mux[] = {
+ SD1_WP_MARK,
+};
+
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 2),
+};
+
+static const unsigned int sdhi2_data1_mux[] = {
+ SD2_DAT0_MARK,
+};
+
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+};
+
+static const unsigned int sdhi2_data4_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+};
+
+static const unsigned int sdhi2_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+
+static const unsigned int sdhi2_data8_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+ SD2_DAT4_MARK, SD2_DAT5_MARK,
+ SD2_DAT6_MARK, SD2_DAT7_MARK,
+};
+
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+};
+
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SD2_CLK_MARK, SD2_CMD_MARK,
+};
+
+static const unsigned int sdhi2_cd_a_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 13),
+};
+
+static const unsigned int sdhi2_cd_a_mux[] = {
+ SD2_CD_A_MARK,
+};
+
+static const unsigned int sdhi2_cd_b_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(5, 10),
+};
+
+static const unsigned int sdhi2_cd_b_mux[] = {
+ SD2_CD_B_MARK,
+};
+
+static const unsigned int sdhi2_wp_a_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 14),
+};
+
+static const unsigned int sdhi2_wp_a_mux[] = {
+ SD2_WP_A_MARK,
+};
+
+static const unsigned int sdhi2_wp_b_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(5, 11),
+};
+
+static const unsigned int sdhi2_wp_b_mux[] = {
+ SD2_WP_B_MARK,
+};
+
+static const unsigned int sdhi2_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 6),
+};
+
+static const unsigned int sdhi2_ds_mux[] = {
+ SD2_DS_MARK,
+};
+
+/* - SDHI3 ------------------------------------------------------------------ */
+static const unsigned int sdhi3_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 9),
+};
+
+static const unsigned int sdhi3_data1_mux[] = {
+ SD3_DAT0_MARK,
+};
+
+static const unsigned int sdhi3_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+
+static const unsigned int sdhi3_data4_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+};
+
+static const unsigned int sdhi3_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
+};
+
+static const unsigned int sdhi3_data8_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+ SD3_DAT4_MARK, SD3_DAT5_MARK,
+ SD3_DAT6_MARK, SD3_DAT7_MARK,
+};
+
+static const unsigned int sdhi3_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
+};
+
+static const unsigned int sdhi3_ctrl_mux[] = {
+ SD3_CLK_MARK, SD3_CMD_MARK,
+};
+
+static const unsigned int sdhi3_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 15),
+};
+
+static const unsigned int sdhi3_cd_mux[] = {
+ SD3_CD_MARK,
+};
+
+static const unsigned int sdhi3_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 16),
+};
+
+static const unsigned int sdhi3_wp_mux[] = {
+ SD3_WP_MARK,
+};
+
+static const unsigned int sdhi3_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 17),
+};
+
+static const unsigned int sdhi3_ds_mux[] = {
+ SD3_DS_MARK,
+};
+
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_pins[] = {
/* PWEN, OVC */
@@ -1959,12 +3161,139 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_avtp_capture_a),
SH_PFC_PIN_GROUP(avb_avtp_match_b),
SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c6_c),
SH_PFC_PIN_GROUP(intc_ex_irq0),
SH_PFC_PIN_GROUP(intc_ex_irq1),
SH_PFC_PIN_GROUP(intc_ex_irq2),
SH_PFC_PIN_GROUP(intc_ex_irq3),
SH_PFC_PIN_GROUP(intc_ex_irq4),
SH_PFC_PIN_GROUP(intc_ex_irq5),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+ SH_PFC_PIN_GROUP(msiof1_clk_a),
+ SH_PFC_PIN_GROUP(msiof1_sync_a),
+ SH_PFC_PIN_GROUP(msiof1_ss1_a),
+ SH_PFC_PIN_GROUP(msiof1_ss2_a),
+ SH_PFC_PIN_GROUP(msiof1_txd_a),
+ SH_PFC_PIN_GROUP(msiof1_rxd_a),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_sync_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_txd_b),
+ SH_PFC_PIN_GROUP(msiof1_rxd_b),
+ SH_PFC_PIN_GROUP(msiof1_clk_c),
+ SH_PFC_PIN_GROUP(msiof1_sync_c),
+ SH_PFC_PIN_GROUP(msiof1_ss1_c),
+ SH_PFC_PIN_GROUP(msiof1_ss2_c),
+ SH_PFC_PIN_GROUP(msiof1_txd_c),
+ SH_PFC_PIN_GROUP(msiof1_rxd_c),
+ SH_PFC_PIN_GROUP(msiof1_clk_d),
+ SH_PFC_PIN_GROUP(msiof1_sync_d),
+ SH_PFC_PIN_GROUP(msiof1_ss1_d),
+ SH_PFC_PIN_GROUP(msiof1_ss2_d),
+ SH_PFC_PIN_GROUP(msiof1_txd_d),
+ SH_PFC_PIN_GROUP(msiof1_rxd_d),
+ SH_PFC_PIN_GROUP(msiof1_clk_e),
+ SH_PFC_PIN_GROUP(msiof1_sync_e),
+ SH_PFC_PIN_GROUP(msiof1_ss1_e),
+ SH_PFC_PIN_GROUP(msiof1_ss2_e),
+ SH_PFC_PIN_GROUP(msiof1_txd_e),
+ SH_PFC_PIN_GROUP(msiof1_rxd_e),
+ SH_PFC_PIN_GROUP(msiof1_clk_f),
+ SH_PFC_PIN_GROUP(msiof1_sync_f),
+ SH_PFC_PIN_GROUP(msiof1_ss1_f),
+ SH_PFC_PIN_GROUP(msiof1_ss2_f),
+ SH_PFC_PIN_GROUP(msiof1_txd_f),
+ SH_PFC_PIN_GROUP(msiof1_rxd_f),
+ SH_PFC_PIN_GROUP(msiof1_clk_g),
+ SH_PFC_PIN_GROUP(msiof1_sync_g),
+ SH_PFC_PIN_GROUP(msiof1_ss1_g),
+ SH_PFC_PIN_GROUP(msiof1_ss2_g),
+ SH_PFC_PIN_GROUP(msiof1_txd_g),
+ SH_PFC_PIN_GROUP(msiof1_rxd_g),
+ SH_PFC_PIN_GROUP(msiof2_clk_a),
+ SH_PFC_PIN_GROUP(msiof2_sync_a),
+ SH_PFC_PIN_GROUP(msiof2_ss1_a),
+ SH_PFC_PIN_GROUP(msiof2_ss2_a),
+ SH_PFC_PIN_GROUP(msiof2_txd_a),
+ SH_PFC_PIN_GROUP(msiof2_rxd_a),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_txd_b),
+ SH_PFC_PIN_GROUP(msiof2_rxd_b),
+ SH_PFC_PIN_GROUP(msiof2_clk_c),
+ SH_PFC_PIN_GROUP(msiof2_sync_c),
+ SH_PFC_PIN_GROUP(msiof2_ss1_c),
+ SH_PFC_PIN_GROUP(msiof2_ss2_c),
+ SH_PFC_PIN_GROUP(msiof2_txd_c),
+ SH_PFC_PIN_GROUP(msiof2_rxd_c),
+ SH_PFC_PIN_GROUP(msiof2_clk_d),
+ SH_PFC_PIN_GROUP(msiof2_sync_d),
+ SH_PFC_PIN_GROUP(msiof2_ss1_d),
+ SH_PFC_PIN_GROUP(msiof2_ss2_d),
+ SH_PFC_PIN_GROUP(msiof2_txd_d),
+ SH_PFC_PIN_GROUP(msiof2_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_a),
+ SH_PFC_PIN_GROUP(msiof3_sync_a),
+ SH_PFC_PIN_GROUP(msiof3_ss1_a),
+ SH_PFC_PIN_GROUP(msiof3_ss2_a),
+ SH_PFC_PIN_GROUP(msiof3_txd_a),
+ SH_PFC_PIN_GROUP(msiof3_rxd_a),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_ss1_b),
+ SH_PFC_PIN_GROUP(msiof3_ss2_b),
+ SH_PFC_PIN_GROUP(msiof3_txd_b),
+ SH_PFC_PIN_GROUP(msiof3_rxd_b),
+ SH_PFC_PIN_GROUP(msiof3_clk_c),
+ SH_PFC_PIN_GROUP(msiof3_sync_c),
+ SH_PFC_PIN_GROUP(msiof3_txd_c),
+ SH_PFC_PIN_GROUP(msiof3_rxd_c),
+ SH_PFC_PIN_GROUP(msiof3_clk_d),
+ SH_PFC_PIN_GROUP(msiof3_sync_d),
+ SH_PFC_PIN_GROUP(msiof3_ss1_d),
+ SH_PFC_PIN_GROUP(msiof3_txd_d),
+ SH_PFC_PIN_GROUP(msiof3_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_e),
+ SH_PFC_PIN_GROUP(msiof3_sync_e),
+ SH_PFC_PIN_GROUP(msiof3_ss1_e),
+ SH_PFC_PIN_GROUP(msiof3_ss2_e),
+ SH_PFC_PIN_GROUP(msiof3_txd_e),
+ SH_PFC_PIN_GROUP(msiof3_rxd_e),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4_a),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5_a),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6_a),
+ SH_PFC_PIN_GROUP(pwm6_b),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -1994,6 +3323,32 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif5_clk_b),
SH_PFC_PIN_GROUP(scif_clk_a),
SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_data8),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_a),
+ SH_PFC_PIN_GROUP(sdhi2_wp_a),
+ SH_PFC_PIN_GROUP(sdhi2_cd_b),
+ SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(sdhi2_ds),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_data8),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(sdhi3_ds),
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb30),
@@ -2013,6 +3368,33 @@ static const char * const avb_groups[] = {
"avb_avtp_capture_b",
};
+static const char * const du_groups[] = {
+ "du_rgb666",
+ "du_rgb888",
+ "du_clk_out_0",
+ "du_clk_out_1",
+ "du_sync",
+ "du_oddf",
+ "du_cde",
+ "du_disp",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_a",
+ "i2c1_b",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_a",
+ "i2c2_b",
+};
+
+static const char * const i2c6_groups[] = {
+ "i2c6_a",
+ "i2c6_b",
+ "i2c6_c",
+};
+
static const char * const intc_ex_groups[] = {
"intc_ex_irq0",
"intc_ex_irq1",
@@ -2022,6 +3404,151 @@ static const char * const intc_ex_groups[] = {
"intc_ex_irq5",
};
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk_a",
+ "msiof1_sync_a",
+ "msiof1_ss1_a",
+ "msiof1_ss2_a",
+ "msiof1_txd_a",
+ "msiof1_rxd_a",
+ "msiof1_clk_b",
+ "msiof1_sync_b",
+ "msiof1_ss1_b",
+ "msiof1_ss2_b",
+ "msiof1_txd_b",
+ "msiof1_rxd_b",
+ "msiof1_clk_c",
+ "msiof1_sync_c",
+ "msiof1_ss1_c",
+ "msiof1_ss2_c",
+ "msiof1_txd_c",
+ "msiof1_rxd_c",
+ "msiof1_clk_d",
+ "msiof1_sync_d",
+ "msiof1_ss1_d",
+ "msiof1_ss2_d",
+ "msiof1_txd_d",
+ "msiof1_rxd_d",
+ "msiof1_clk_e",
+ "msiof1_sync_e",
+ "msiof1_ss1_e",
+ "msiof1_ss2_e",
+ "msiof1_txd_e",
+ "msiof1_rxd_e",
+ "msiof1_clk_f",
+ "msiof1_sync_f",
+ "msiof1_ss1_f",
+ "msiof1_ss2_f",
+ "msiof1_txd_f",
+ "msiof1_rxd_f",
+ "msiof1_clk_g",
+ "msiof1_sync_g",
+ "msiof1_ss1_g",
+ "msiof1_ss2_g",
+ "msiof1_txd_g",
+ "msiof1_rxd_g",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk_a",
+ "msiof2_sync_a",
+ "msiof2_ss1_a",
+ "msiof2_ss2_a",
+ "msiof2_txd_a",
+ "msiof2_rxd_a",
+ "msiof2_clk_b",
+ "msiof2_sync_b",
+ "msiof2_ss1_b",
+ "msiof2_ss2_b",
+ "msiof2_txd_b",
+ "msiof2_rxd_b",
+ "msiof2_clk_c",
+ "msiof2_sync_c",
+ "msiof2_ss1_c",
+ "msiof2_ss2_c",
+ "msiof2_txd_c",
+ "msiof2_rxd_c",
+ "msiof2_clk_d",
+ "msiof2_sync_d",
+ "msiof2_ss1_d",
+ "msiof2_ss2_d",
+ "msiof2_txd_d",
+ "msiof2_rxd_d",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk_a",
+ "msiof3_sync_a",
+ "msiof3_ss1_a",
+ "msiof3_ss2_a",
+ "msiof3_txd_a",
+ "msiof3_rxd_a",
+ "msiof3_clk_b",
+ "msiof3_sync_b",
+ "msiof3_ss1_b",
+ "msiof3_ss2_b",
+ "msiof3_txd_b",
+ "msiof3_rxd_b",
+ "msiof3_clk_c",
+ "msiof3_sync_c",
+ "msiof3_txd_c",
+ "msiof3_rxd_c",
+ "msiof3_clk_d",
+ "msiof3_sync_d",
+ "msiof3_ss1_d",
+ "msiof3_txd_d",
+ "msiof3_rxd_d",
+ "msiof3_clk_e",
+ "msiof3_sync_e",
+ "msiof3_ss1_e",
+ "msiof3_ss2_e",
+ "msiof3_txd_e",
+ "msiof3_rxd_e",
+};
+
+static const char * const pwm0_groups[] = {
+ "pwm0",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2_a",
+ "pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4_a",
+ "pwm4_b",
+};
+
+static const char * const pwm5_groups[] = {
+ "pwm5_a",
+ "pwm5_b",
+};
+
+static const char * const pwm6_groups[] = {
+ "pwm6_a",
+ "pwm6_b",
+};
+
static const char * const scif0_groups[] = {
"scif0_data",
"scif0_clk",
@@ -2071,6 +3598,44 @@ static const char * const scif_clk_groups[] = {
"scif_clk_b",
};
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+ "sdhi1_data1",
+ "sdhi1_data4",
+ "sdhi1_ctrl",
+ "sdhi1_cd",
+ "sdhi1_wp",
+};
+
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_data8",
+ "sdhi2_ctrl",
+ "sdhi2_cd_a",
+ "sdhi2_wp_a",
+ "sdhi2_cd_b",
+ "sdhi2_wp_b",
+ "sdhi2_ds",
+};
+
+static const char * const sdhi3_groups[] = {
+ "sdhi3_data1",
+ "sdhi3_data4",
+ "sdhi3_data8",
+ "sdhi3_ctrl",
+ "sdhi3_cd",
+ "sdhi3_wp",
+ "sdhi3_ds",
+};
+
static const char * const usb0_groups[] = {
"usb0",
};
@@ -2085,7 +3650,22 @@ static const char * const usb30_groups[] = {
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c6),
SH_PFC_FUNCTION(intc_ex),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -2093,6 +3673,10 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif5),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb30),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
index b1bb7263532b..b02caf316711 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
@@ -21,13 +21,15 @@
#include "core.h"
#include "sh_pfc.h"
+#define CFG_FLAGS SH_PFC_PIN_CFG_DRIVE_STRENGTH
+
#define CPU_ALL_PORT(fn, sfx) \
- PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_28(1, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_17(2, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_6(4, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_15(5, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH)
+ PORT_GP_CFG_22(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_17(2, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_17(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_6(4, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_15(5, fn, sfx, CFG_FLAGS)
/*
* F_() : just information
* FM() : macro for FN_xxx / xxx_MARK
@@ -2382,18 +2384,31 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
+enum ioctrl_regs {
+ IOCTRL30,
+ IOCTRL31,
+ IOCTRL32,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [IOCTRL30] = { 0xe6060380 },
+ [IOCTRL31] = { 0xe6060384 },
+ [IOCTRL32] = { 0xe6060388 },
+ { /* sentinel */ },
+};
+
static int r8a77970_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
u32 *pocctrl)
{
int bit = pin & 0x1f;
- *pocctrl = 0xe6060380;
+ *pocctrl = pinmux_ioctrl_regs[IOCTRL30].reg;
if (pin >= RCAR_GP_PIN(0, 0) && pin <= RCAR_GP_PIN(0, 21))
return bit;
if (pin >= RCAR_GP_PIN(2, 0) && pin <= RCAR_GP_PIN(2, 9))
return bit + 22;
- *pocctrl += 4;
+ *pocctrl = pinmux_ioctrl_regs[IOCTRL31].reg;
if (pin >= RCAR_GP_PIN(2, 10) && pin <= RCAR_GP_PIN(2, 16))
return bit - 10;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 16))
@@ -2421,6 +2436,7 @@ const struct sh_pfc_soc_info r8a77970_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions),
.cfg_regs = pinmux_config_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
index 84c8f1c2f1d1..3f6967331f64 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
@@ -19,10 +19,10 @@
#include "sh_pfc.h"
#define CPU_ALL_PORT(fn, sfx) \
- PORT_GP_22(0, fn, sfx), \
+ PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
PORT_GP_28(1, fn, sfx), \
- PORT_GP_30(2, fn, sfx), \
- PORT_GP_17(3, fn, sfx), \
+ PORT_GP_CFG_30(2, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
PORT_GP_25(4, fn, sfx), \
PORT_GP_15(5, fn, sfx)
@@ -2779,8 +2779,53 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
+enum ioctrl_regs {
+ IOCTRL30,
+ IOCTRL31,
+ IOCTRL32,
+ IOCTRL33,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [IOCTRL30] = { 0xe6060380, },
+ [IOCTRL31] = { 0xe6060384, },
+ [IOCTRL32] = { 0xe6060388, },
+ [IOCTRL33] = { 0xe606038c, },
+ { /* sentinel */ },
+};
+
+static int r8a77980_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
+ u32 *pocctrl)
+{
+ int bit = pin & 0x1f;
+
+ *pocctrl = pinmux_ioctrl_regs[IOCTRL30].reg;
+ if (pin >= RCAR_GP_PIN(0, 0) && pin <= RCAR_GP_PIN(0, 21))
+ return bit;
+ else if (pin >= RCAR_GP_PIN(2, 0) && pin <= RCAR_GP_PIN(2, 9))
+ return bit + 22;
+
+ *pocctrl = pinmux_ioctrl_regs[IOCTRL31].reg;
+ if (pin >= RCAR_GP_PIN(2, 10) && pin <= RCAR_GP_PIN(2, 16))
+ return bit - 10;
+ if ((pin >= RCAR_GP_PIN(2, 17) && pin <= RCAR_GP_PIN(2, 24)) ||
+ (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 16)))
+ return bit + 7;
+
+ *pocctrl = pinmux_ioctrl_regs[IOCTRL32].reg;
+ if (pin >= RCAR_GP_PIN(2, 25) && pin <= RCAR_GP_PIN(2, 29))
+ return pin - 25;
+
+ return -EINVAL;
+}
+
+static const struct sh_pfc_soc_operations pinmux_ops = {
+ .pin_to_pocctrl = r8a77980_pin_to_pocctrl,
+};
+
const struct sh_pfc_soc_info r8a77980_pinmux_info = {
.name = "r8a77980_pfc",
+ .ops = &pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
@@ -2793,6 +2838,7 @@ const struct sh_pfc_soc_info r8a77980_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions),
.cfg_regs = pinmux_config_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
new file mode 100644
index 000000000000..a68fd658aada
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
@@ -0,0 +1,2695 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A77990 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ *
+ * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+ *
+ * R8A7796 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2016-2017 Renesas Electronics Corp.
+ */
+
+#include <linux/kernel.h>
+
+#include "core.h"
+#include "sh_pfc.h"
+
+#define CFG_FLAGS (SH_PFC_PIN_CFG_PULL_UP | \
+ SH_PFC_PIN_CFG_PULL_DOWN)
+
+#define CPU_ALL_PORT(fn, sfx) \
+ PORT_GP_CFG_18(0, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_23(1, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_26(2, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_16(3, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_11(4, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_20(5, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_18(6, fn, sfx, CFG_FLAGS)
+/*
+ * F_() : just information
+ * FM() : macro for FN_xxx / xxx_MARK
+ */
+
+/* GPSR0 */
+#define GPSR0_17 F_(SDA4, IP7_27_24)
+#define GPSR0_16 F_(SCL4, IP7_23_20)
+#define GPSR0_15 F_(D15, IP7_19_16)
+#define GPSR0_14 F_(D14, IP7_15_12)
+#define GPSR0_13 F_(D13, IP7_11_8)
+#define GPSR0_12 F_(D12, IP7_7_4)
+#define GPSR0_11 F_(D11, IP7_3_0)
+#define GPSR0_10 F_(D10, IP6_31_28)
+#define GPSR0_9 F_(D9, IP6_27_24)
+#define GPSR0_8 F_(D8, IP6_23_20)
+#define GPSR0_7 F_(D7, IP6_19_16)
+#define GPSR0_6 F_(D6, IP6_15_12)
+#define GPSR0_5 F_(D5, IP6_11_8)
+#define GPSR0_4 F_(D4, IP6_7_4)
+#define GPSR0_3 F_(D3, IP6_3_0)
+#define GPSR0_2 F_(D2, IP5_31_28)
+#define GPSR0_1 F_(D1, IP5_27_24)
+#define GPSR0_0 F_(D0, IP5_23_20)
+
+/* GPSR1 */
+#define GPSR1_22 F_(WE0_N, IP5_19_16)
+#define GPSR1_21 F_(CS0_N, IP5_15_12)
+#define GPSR1_20 FM(CLKOUT)
+#define GPSR1_19 F_(A19, IP5_11_8)
+#define GPSR1_18 F_(A18, IP5_7_4)
+#define GPSR1_17 F_(A17, IP5_3_0)
+#define GPSR1_16 F_(A16, IP4_31_28)
+#define GPSR1_15 F_(A15, IP4_27_24)
+#define GPSR1_14 F_(A14, IP4_23_20)
+#define GPSR1_13 F_(A13, IP4_19_16)
+#define GPSR1_12 F_(A12, IP4_15_12)
+#define GPSR1_11 F_(A11, IP4_11_8)
+#define GPSR1_10 F_(A10, IP4_7_4)
+#define GPSR1_9 F_(A9, IP4_3_0)
+#define GPSR1_8 F_(A8, IP3_31_28)
+#define GPSR1_7 F_(A7, IP3_27_24)
+#define GPSR1_6 F_(A6, IP3_23_20)
+#define GPSR1_5 F_(A5, IP3_19_16)
+#define GPSR1_4 F_(A4, IP3_15_12)
+#define GPSR1_3 F_(A3, IP3_11_8)
+#define GPSR1_2 F_(A2, IP3_7_4)
+#define GPSR1_1 F_(A1, IP3_3_0)
+#define GPSR1_0 F_(A0, IP2_31_28)
+
+/* GPSR2 */
+#define GPSR2_25 F_(EX_WAIT0, IP2_27_24)
+#define GPSR2_24 F_(RD_WR_N, IP2_23_20)
+#define GPSR2_23 F_(RD_N, IP2_19_16)
+#define GPSR2_22 F_(BS_N, IP2_15_12)
+#define GPSR2_21 FM(AVB_PHY_INT)
+#define GPSR2_20 F_(AVB_TXCREFCLK, IP2_3_0)
+#define GPSR2_19 FM(AVB_RD3)
+#define GPSR2_18 F_(AVB_RD2, IP1_31_28)
+#define GPSR2_17 F_(AVB_RD1, IP1_27_24)
+#define GPSR2_16 F_(AVB_RD0, IP1_23_20)
+#define GPSR2_15 FM(AVB_RXC)
+#define GPSR2_14 FM(AVB_RX_CTL)
+#define GPSR2_13 F_(RPC_RESET_N, IP1_19_16)
+#define GPSR2_12 F_(RPC_INT_N, IP1_15_12)
+#define GPSR2_11 F_(QSPI1_SSL, IP1_11_8)
+#define GPSR2_10 F_(QSPI1_IO3, IP1_7_4)
+#define GPSR2_9 F_(QSPI1_IO2, IP1_3_0)
+#define GPSR2_8 F_(QSPI1_MISO_IO1, IP0_31_28)
+#define GPSR2_7 F_(QSPI1_MOSI_IO0, IP0_27_24)
+#define GPSR2_6 F_(QSPI1_SPCLK, IP0_23_20)
+#define GPSR2_5 FM(QSPI0_SSL)
+#define GPSR2_4 F_(QSPI0_IO3, IP0_19_16)
+#define GPSR2_3 F_(QSPI0_IO2, IP0_15_12)
+#define GPSR2_2 F_(QSPI0_MISO_IO1, IP0_11_8)
+#define GPSR2_1 F_(QSPI0_MOSI_IO0, IP0_7_4)
+#define GPSR2_0 F_(QSPI0_SPCLK, IP0_3_0)
+
+/* GPSR3 */
+#define GPSR3_15 F_(SD1_WP, IP11_7_4)
+#define GPSR3_14 F_(SD1_CD, IP11_3_0)
+#define GPSR3_13 F_(SD0_WP, IP10_31_28)
+#define GPSR3_12 F_(SD0_CD, IP10_27_24)
+#define GPSR3_11 F_(SD1_DAT3, IP9_11_8)
+#define GPSR3_10 F_(SD1_DAT2, IP9_7_4)
+#define GPSR3_9 F_(SD1_DAT1, IP9_3_0)
+#define GPSR3_8 F_(SD1_DAT0, IP8_31_28)
+#define GPSR3_7 F_(SD1_CMD, IP8_27_24)
+#define GPSR3_6 F_(SD1_CLK, IP8_23_20)
+#define GPSR3_5 F_(SD0_DAT3, IP8_19_16)
+#define GPSR3_4 F_(SD0_DAT2, IP8_15_12)
+#define GPSR3_3 F_(SD0_DAT1, IP8_11_8)
+#define GPSR3_2 F_(SD0_DAT0, IP8_7_4)
+#define GPSR3_1 F_(SD0_CMD, IP8_3_0)
+#define GPSR3_0 F_(SD0_CLK, IP7_31_28)
+
+/* GPSR4 */
+#define GPSR4_10 F_(SD3_DS, IP10_23_20)
+#define GPSR4_9 F_(SD3_DAT7, IP10_19_16)
+#define GPSR4_8 F_(SD3_DAT6, IP10_15_12)
+#define GPSR4_7 F_(SD3_DAT5, IP10_11_8)
+#define GPSR4_6 F_(SD3_DAT4, IP10_7_4)
+#define GPSR4_5 F_(SD3_DAT3, IP10_3_0)
+#define GPSR4_4 F_(SD3_DAT2, IP9_31_28)
+#define GPSR4_3 F_(SD3_DAT1, IP9_27_24)
+#define GPSR4_2 F_(SD3_DAT0, IP9_23_20)
+#define GPSR4_1 F_(SD3_CMD, IP9_19_16)
+#define GPSR4_0 F_(SD3_CLK, IP9_15_12)
+
+/* GPSR5 */
+#define GPSR5_19 F_(MLB_DAT, IP13_23_20)
+#define GPSR5_18 F_(MLB_SIG, IP13_19_16)
+#define GPSR5_17 F_(MLB_CLK, IP13_15_12)
+#define GPSR5_16 F_(SSI_SDATA9, IP13_11_8)
+#define GPSR5_15 F_(MSIOF0_SS2, IP13_7_4)
+#define GPSR5_14 F_(MSIOF0_SS1, IP13_3_0)
+#define GPSR5_13 F_(MSIOF0_SYNC, IP12_31_28)
+#define GPSR5_12 F_(MSIOF0_TXD, IP12_27_24)
+#define GPSR5_11 F_(MSIOF0_RXD, IP12_23_20)
+#define GPSR5_10 F_(MSIOF0_SCK, IP12_19_16)
+#define GPSR5_9 F_(RX2_A, IP12_15_12)
+#define GPSR5_8 F_(TX2_A, IP12_11_8)
+#define GPSR5_7 F_(SCK2_A, IP12_7_4)
+#define GPSR5_6 F_(TX1, IP12_3_0)
+#define GPSR5_5 F_(RX1, IP11_31_28)
+#define GPSR5_4 F_(RTS0_N_TANS_A, IP11_23_20)
+#define GPSR5_3 F_(CTS0_N_A, IP11_19_16)
+#define GPSR5_2 F_(TX0_A, IP11_15_12)
+#define GPSR5_1 F_(RX0_A, IP11_11_8)
+#define GPSR5_0 F_(SCK0_A, IP11_27_24)
+
+/* GPSR6 */
+#define GPSR6_17 F_(USB30_PWEN, IP15_27_24)
+#define GPSR6_16 F_(SSI_SDATA6, IP15_19_16)
+#define GPSR6_15 F_(SSI_WS6, IP15_15_12)
+#define GPSR6_14 F_(SSI_SCK6, IP15_11_8)
+#define GPSR6_13 F_(SSI_SDATA5, IP15_7_4)
+#define GPSR6_12 F_(SSI_WS5, IP15_3_0)
+#define GPSR6_11 F_(SSI_SCK5, IP14_31_28)
+#define GPSR6_10 F_(SSI_SDATA4, IP14_27_24)
+#define GPSR6_9 F_(USB30_OVC, IP15_31_28)
+#define GPSR6_8 F_(AUDIO_CLKA, IP15_23_20)
+#define GPSR6_7 F_(SSI_SDATA3, IP14_23_20)
+#define GPSR6_6 F_(SSI_WS349, IP14_19_16)
+#define GPSR6_5 F_(SSI_SCK349, IP14_15_12)
+#define GPSR6_4 F_(SSI_SDATA2, IP14_11_8)
+#define GPSR6_3 F_(SSI_SDATA1, IP14_7_4)
+#define GPSR6_2 F_(SSI_SDATA0, IP14_3_0)
+#define GPSR6_1 F_(SSI_WS01239, IP13_31_28)
+#define GPSR6_0 F_(SSI_SCK01239, IP13_27_24)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 - F */
+#define IP0_3_0 FM(QSPI0_SPCLK) FM(HSCK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_7_4 FM(QSPI0_MOSI_IO0) FM(HCTS4_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_11_8 FM(QSPI0_MISO_IO1) FM(HRTS4_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_15_12 FM(QSPI0_IO2) FM(HTX4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_19_16 FM(QSPI0_IO3) FM(HRX4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_23_20 FM(QSPI1_SPCLK) FM(RIF2_CLK_A) FM(HSCK4_B) FM(VI4_DATA0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_27_24 FM(QSPI1_MOSI_IO0) FM(RIF2_SYNC_A) FM(HTX4_B) FM(VI4_DATA1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_31_28 FM(QSPI1_MISO_IO1) FM(RIF2_D0_A) FM(HRX4_B) FM(VI4_DATA2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_3_0 FM(QSPI1_IO2) FM(RIF2_D1_A) FM(HTX3_C) FM(VI4_DATA3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_7_4 FM(QSPI1_IO3) FM(RIF3_CLK_A) FM(HRX3_C) FM(VI4_DATA4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_11_8 FM(QSPI1_SSL) FM(RIF3_SYNC_A) FM(HSCK3_C) FM(VI4_DATA5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_15_12 FM(RPC_INT_N) FM(RIF3_D0_A) FM(HCTS3_N_C) FM(VI4_DATA6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_19_16 FM(RPC_RESET_N) FM(RIF3_D1_A) FM(HRTS3_N_C) FM(VI4_DATA7_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_23_20 FM(AVB_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_27_24 FM(AVB_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_31_28 FM(AVB_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_3_0 FM(AVB_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_7_4 FM(AVB_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_11_8 FM(AVB_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_15_12 FM(BS_N) FM(PWM0_A) FM(AVB_MAGIC) FM(VI4_CLK) F_(0, 0) FM(TX3_C) F_(0, 0) FM(VI5_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_19_16 FM(RD_N) FM(PWM1_A) FM(AVB_LINK) FM(VI4_FIELD) F_(0, 0) FM(RX3_C) FM(FSCLKST2_N_A) FM(VI5_DATA0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH_A) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE_A) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_31_28 FM(A0) FM(IRQ0) FM(PWM2_A) FM(MSIOF3_SS1_B) FM(VI5_CLK_A) FM(DU_CDE) FM(HRX3_D) FM(IERX) FM(QSTB_QHE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_3_0 FM(A1) FM(IRQ1) FM(PWM3_A) FM(DU_DOTCLKIN1) FM(VI5_DATA0_A) FM(DU_DISP_CDE) FM(SDA6_B) FM(IETX) FM(QCPV_QDE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_7_4 FM(A2) FM(IRQ2) FM(AVB_AVTP_PPS) FM(VI4_CLKENB) FM(VI5_DATA1_A) FM(DU_DISP) FM(SCL6_B) F_(0, 0) FM(QSTVB_QVE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_11_8 FM(A3) FM(CTS4_N_A) FM(PWM4_A) FM(VI4_DATA12) F_(0, 0) FM(DU_DOTCLKOUT0) FM(HTX3_D) FM(IECLK) FM(LCDOUT12) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_15_12 FM(A4) FM(RTS4_N_TANS_A) FM(MSIOF3_SYNC_B) FM(VI4_DATA8) FM(PWM2_B) FM(DU_DG4) FM(RIF2_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_19_16 FM(A5) FM(SCK4_A) FM(MSIOF3_SCK_B) FM(VI4_DATA9) FM(PWM3_B) F_(0, 0) FM(RIF2_SYNC_B) F_(0, 0) FM(QPOLA) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_23_20 FM(A6) FM(RX4_A) FM(MSIOF3_RXD_B) FM(VI4_DATA10) F_(0, 0) F_(0, 0) FM(RIF2_D0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_27_24 FM(A7) FM(TX4_A) FM(MSIOF3_TXD_B) FM(VI4_DATA11) F_(0, 0) F_(0, 0) FM(RIF2_D1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_31_28 FM(A8) FM(SDA6_A) FM(RX3_B) FM(HRX4_C) FM(VI5_HSYNC_N_A) FM(DU_HSYNC) FM(VI4_DATA0_B) F_(0, 0) FM(QSTH_QHS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 - F */
+#define IP4_3_0 FM(A9) FM(TX5_A) FM(IRQ3) FM(VI4_DATA16) FM(VI5_VSYNC_N_A) FM(DU_DG7) F_(0, 0) F_(0, 0) FM(LCDOUT15) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_7_4 FM(A10) FM(IRQ4) FM(MSIOF2_SYNC_B) FM(VI4_DATA13) FM(VI5_FIELD_A) FM(DU_DG5) FM(FSCLKST2_N_B) F_(0, 0) FM(LCDOUT13) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_11_8 FM(A11) FM(SCL6_A) FM(TX3_B) FM(HTX4_C) F_(0, 0) FM(DU_VSYNC) FM(VI4_DATA1_B) F_(0, 0) FM(QSTVA_QVS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_15_12 FM(A12) FM(RX5_A) FM(MSIOF2_SS2_B) FM(VI4_DATA17) FM(VI5_DATA3_A) FM(DU_DG6) F_(0, 0) F_(0, 0) FM(LCDOUT14) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_19_16 FM(A13) FM(SCK5_A) FM(MSIOF2_SCK_B) FM(VI4_DATA14) FM(HRX4_D) FM(DU_DB2) F_(0, 0) F_(0, 0) FM(LCDOUT2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_23_20 FM(A14) FM(MSIOF1_SS1) FM(MSIOF2_RXD_B) FM(VI4_DATA15) FM(HTX4_D) FM(DU_DB3) F_(0, 0) F_(0, 0) FM(LCDOUT3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_27_24 FM(A15) FM(MSIOF1_SS2) FM(MSIOF2_TXD_B) FM(VI4_DATA18) FM(VI5_DATA4_A) FM(DU_DB4) F_(0, 0) F_(0, 0) FM(LCDOUT4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_31_28 FM(A16) FM(MSIOF1_SYNC) FM(MSIOF2_SS1_B) FM(VI4_DATA19) FM(VI5_DATA5_A) FM(DU_DB5) F_(0, 0) F_(0, 0) FM(LCDOUT5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_3_0 FM(A17) FM(MSIOF1_RXD) F_(0, 0) FM(VI4_DATA20) FM(VI5_DATA6_A) FM(DU_DB6) F_(0, 0) F_(0, 0) FM(LCDOUT6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_7_4 FM(A18) FM(MSIOF1_TXD) F_(0, 0) FM(VI4_DATA21) FM(VI5_DATA7_A) FM(DU_DB0) F_(0, 0) FM(HRX4_E) FM(LCDOUT0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_11_8 FM(A19) FM(MSIOF1_SCK) F_(0, 0) FM(VI4_DATA22) FM(VI5_DATA2_A) FM(DU_DB1) F_(0, 0) FM(HTX4_E) FM(LCDOUT1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_15_12 FM(CS0_N) FM(SCL5) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR0) FM(VI4_DATA2_B) F_(0, 0) FM(LCDOUT16) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_19_16 FM(WE0_N) FM(SDA5) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR1) FM(VI4_DATA3_B) F_(0, 0) FM(LCDOUT17) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_23_20 FM(D0) FM(MSIOF3_SCK_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR2) FM(CTS4_N_C) F_(0, 0) FM(LCDOUT18) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_27_24 FM(D1) FM(MSIOF3_SYNC_A) FM(SCK3_A) FM(VI4_DATA23) FM(VI5_CLKENB_A) FM(DU_DB7) FM(RTS4_N_TANS_C) F_(0, 0) FM(LCDOUT7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_31_28 FM(D2) FM(MSIOF3_RXD_A) FM(RX5_C) F_(0, 0) FM(VI5_DATA14_A) FM(DU_DR3) FM(RX4_C) F_(0, 0) FM(LCDOUT19) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_3_0 FM(D3) FM(MSIOF3_TXD_A) FM(TX5_C) F_(0, 0) FM(VI5_DATA15_A) FM(DU_DR4) FM(TX4_C) F_(0, 0) FM(LCDOUT20) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_7_4 FM(D4) FM(CANFD1_TX) FM(HSCK3_B) FM(CAN1_TX) FM(RTS3_N_TANS_A) FM(MSIOF3_SS2_A) F_(0, 0) FM(VI5_DATA1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_11_8 FM(D5) FM(RX3_A) FM(HRX3_B) F_(0, 0) F_(0, 0) FM(DU_DR5) FM(VI4_DATA4_B) F_(0, 0) FM(LCDOUT21) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_15_12 FM(D6) FM(TX3_A) FM(HTX3_B) F_(0, 0) F_(0, 0) FM(DU_DR6) FM(VI4_DATA5_B) F_(0, 0) FM(LCDOUT22) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_19_16 FM(D7) FM(CANFD1_RX) FM(IRQ5) FM(CAN1_RX) FM(CTS3_N_A) F_(0, 0) F_(0, 0) FM(VI5_DATA2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_23_20 FM(D8) FM(MSIOF2_SCK_A) FM(SCK4_B) F_(0, 0) FM(VI5_DATA12_A) FM(DU_DR7) FM(RIF3_CLK_B) FM(HCTS3_N_E) FM(LCDOUT23) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_27_24 FM(D9) FM(MSIOF2_SYNC_A) F_(0, 0) F_(0, 0) FM(VI5_DATA10_A) FM(DU_DG0) FM(RIF3_SYNC_B) FM(HRX3_E) FM(LCDOUT8) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_31_28 FM(D10) FM(MSIOF2_RXD_A) F_(0, 0) F_(0, 0) FM(VI5_DATA13_A) FM(DU_DG1) FM(RIF3_D0_B) FM(HTX3_E) FM(LCDOUT9) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_3_0 FM(D11) FM(MSIOF2_TXD_A) F_(0, 0) F_(0, 0) FM(VI5_DATA11_A) FM(DU_DG2) FM(RIF3_D1_B) FM(HRTS3_N_E) FM(LCDOUT10) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_7_4 FM(D12) FM(CANFD0_TX) FM(TX4_B) FM(CAN0_TX) FM(VI5_DATA8_A) F_(0, 0) F_(0, 0) FM(VI5_DATA3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_11_8 FM(D13) FM(CANFD0_RX) FM(RX4_B) FM(CAN0_RX) FM(VI5_DATA9_A) FM(SCL7_B) F_(0, 0) FM(VI5_DATA4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_15_12 FM(D14) FM(CAN_CLK) FM(HRX3_A) FM(MSIOF2_SS2_A) F_(0, 0) FM(SDA7_B) F_(0, 0) FM(VI5_DATA5_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_19_16 FM(D15) FM(MSIOF2_SS1_A) FM(HTX3_A) FM(MSIOF3_SS1_A) F_(0, 0) FM(DU_DG3) F_(0, 0) F_(0, 0) FM(LCDOUT11) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_23_20 FM(SCL4) FM(CS1_N_A26) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DOTCLKIN0) FM(VI4_DATA6_B) FM(VI5_DATA6_B) FM(QCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_27_24 FM(SDA4) FM(WE1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(VI4_DATA7_B) FM(VI5_DATA7_B) FM(QPOLB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_31_28 FM(SD0_CLK) FM(NFDATA8) FM(SCL1_C) FM(HSCK1_B) FM(SDA2_E) FM(FMCLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 - F */
+#define IP8_3_0 FM(SD0_CMD) FM(NFDATA9) F_(0, 0) FM(HRX1_B) F_(0, 0) FM(SPEEDIN_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_7_4 FM(SD0_DAT0) FM(NFDATA10) F_(0, 0) FM(HTX1_B) F_(0, 0) FM(REMOCON_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_11_8 FM(SD0_DAT1) FM(NFDATA11) FM(SDA2_C) FM(HCTS1_N_B) F_(0, 0) FM(FMIN_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_15_12 FM(SD0_DAT2) FM(NFDATA12) FM(SCL2_C) FM(HRTS1_N_B) F_(0, 0) FM(BPFCLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_19_16 FM(SD0_DAT3) FM(NFDATA13) FM(SDA1_C) FM(SCL2_E) FM(SPEEDIN_C) FM(REMOCON_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_23_20 FM(SD1_CLK) FM(NFDATA14_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_27_24 FM(SD1_CMD) FM(NFDATA15_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_31_28 FM(SD1_DAT0) FM(NFWP_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_3_0 FM(SD1_DAT1) FM(NFCE_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_7_4 FM(SD1_DAT2) FM(NFALE_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_11_8 FM(SD1_DAT3) FM(NFRB_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_15_12 FM(SD3_CLK) FM(NFWE_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_19_16 FM(SD3_CMD) FM(NFRE_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_23_20 FM(SD3_DAT0) FM(NFDATA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_27_24 FM(SD3_DAT1) FM(NFDATA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_31_28 FM(SD3_DAT2) FM(NFDATA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_3_0 FM(SD3_DAT3) FM(NFDATA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_7_4 FM(SD3_DAT4) FM(NFDATA4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_11_8 FM(SD3_DAT5) FM(NFDATA5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_15_12 FM(SD3_DAT6) FM(NFDATA6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_19_16 FM(SD3_DAT7) FM(NFDATA7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_23_20 FM(SD3_DS) FM(NFCLE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_27_24 FM(SD0_CD) FM(NFALE_A) FM(SD3_CD) FM(RIF0_CLK_B) FM(SCL2_B) FM(TCLK1_A) FM(SSI_SCK2_B) FM(TS_SCK0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_31_28 FM(SD0_WP) FM(NFRB_N_A) FM(SD3_WP) FM(RIF0_D0_B) FM(SDA2_B) FM(TCLK2_A) FM(SSI_WS2_B) FM(TS_SDAT0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_3_0 FM(SD1_CD) FM(NFCE_N_A) FM(SSI_SCK1) FM(RIF0_D1_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDEN0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_7_4 FM(SD1_WP) FM(NFWP_N_A) FM(SSI_WS1) FM(RIF0_SYNC_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SPSYNC0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_11_8 FM(RX0_A) FM(HRX1_A) FM(SSI_SCK2_A) FM(RIF1_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_15_12 FM(TX0_A) FM(HTX1_A) FM(SSI_WS2_A) FM(RIF1_D0) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDAT1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_19_16 FM(CTS0_N_A) FM(NFDATA14_A) FM(AUDIO_CLKOUT_A) FM(RIF1_D1) FM(SCIF_CLK_A) FM(FMCLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_23_20 FM(RTS0_N_TANS_A) FM(NFDATA15_A) FM(AUDIO_CLKOUT1_A) FM(RIF1_CLK) FM(SCL2_A) FM(FMIN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_27_24 FM(SCK0_A) FM(HSCK1_A) FM(USB3HS0_ID) FM(RTS1_N_TANS) FM(SDA2_A) FM(FMCLK_C) F_(0, 0) F_(0, 0) FM(USB1_ID) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_31_28 FM(RX1) FM(HRX2_B) FM(SSI_SCK9_B) FM(AUDIO_CLKOUT1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 - F */
+#define IP12_3_0 FM(TX1) FM(HTX2_B) FM(SSI_WS9_B) FM(AUDIO_CLKOUT3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_7_4 FM(SCK2_A) FM(HSCK0_A) FM(AUDIO_CLKB_A) FM(CTS1_N) FM(RIF0_CLK_A) FM(REMOCON_A) FM(SCIF_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_11_8 FM(TX2_A) FM(HRX0_A) FM(AUDIO_CLKOUT2_A) F_(0, 0) FM(SCL1_A) F_(0, 0) FM(FSO_CFE_0_N_A) FM(TS_SDEN1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_15_12 FM(RX2_A) FM(HTX0_A) FM(AUDIO_CLKOUT3_A) F_(0, 0) FM(SDA1_A) F_(0, 0) FM(FSO_CFE_1_N_A) FM(TS_SPSYNC1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_19_16 FM(MSIOF0_SCK) F_(0, 0) FM(SSI_SCK78) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_23_20 FM(MSIOF0_RXD) F_(0, 0) FM(SSI_WS78) F_(0, 0) F_(0, 0) FM(TX2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_27_24 FM(MSIOF0_TXD) F_(0, 0) FM(SSI_SDATA7) F_(0, 0) F_(0, 0) FM(RX2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_31_28 FM(MSIOF0_SYNC) FM(AUDIO_CLKOUT_B) FM(SSI_SDATA8) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_3_0 FM(MSIOF0_SS1) FM(HRX2_A) FM(SSI_SCK4) FM(HCTS0_N_A) FM(BPFCLK_C) FM(SPEEDIN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_7_4 FM(MSIOF0_SS2) FM(HTX2_A) FM(SSI_WS4) FM(HRTS0_N_A) FM(FMIN_C) FM(BPFCLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_11_8 FM(SSI_SDATA9) F_(0, 0) FM(AUDIO_CLKC_A) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_15_12 FM(MLB_CLK) FM(RX0_B) F_(0, 0) FM(RIF0_D0_A) FM(SCL1_B) FM(TCLK1_B) F_(0, 0) F_(0, 0) FM(SIM0_RST_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_19_16 FM(MLB_SIG) FM(SCK0_B) F_(0, 0) FM(RIF0_D1_A) FM(SDA1_B) FM(TCLK2_B) F_(0, 0) F_(0, 0) FM(SIM0_D_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_23_20 FM(MLB_DAT) FM(TX0_B) F_(0, 0) FM(RIF0_SYNC_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(SIM0_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_27_24 FM(SSI_SCK01239) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_31_28 FM(SSI_WS01239) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_3_0 FM(SSI_SDATA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_7_4 FM(SSI_SDATA1) FM(AUDIO_CLKC_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_11_8 FM(SSI_SDATA2) FM(AUDIO_CLKOUT2_B) FM(SSI_SCK9_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_15_12 FM(SSI_SCK349) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM2_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_19_16 FM(SSI_WS349) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM3_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_23_20 FM(SSI_SDATA3) FM(AUDIO_CLKOUT1_C) FM(AUDIO_CLKB_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_27_24 FM(SSI_SDATA4) F_(0, 0) FM(SSI_WS9_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM5_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_31_28 FM(SSI_SCK5) FM(HRX0_B) F_(0, 0) FM(USB0_PWEN_B) FM(SCL2_D) F_(0, 0) FM(PWM6_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_3_0 FM(SSI_WS5) FM(HTX0_B) F_(0, 0) FM(USB0_OVC_B) FM(SDA2_D) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_7_4 FM(SSI_SDATA5) FM(HSCK0_B) FM(AUDIO_CLKB_C) FM(TPU0TO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_11_8 FM(SSI_SCK6) FM(HSCK2_A) FM(AUDIO_CLKC_C) FM(TPU0TO1) F_(0, 0) F_(0, 0) FM(FSO_CFE_0_N_B) F_(0, 0) FM(SIM0_RST_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_15_12 FM(SSI_WS6) FM(HCTS2_N_A) FM(AUDIO_CLKOUT2_C) FM(TPU0TO2) FM(SDA1_D) F_(0, 0) FM(FSO_CFE_1_N_B) F_(0, 0) FM(SIM0_D_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_19_16 FM(SSI_SDATA6) FM(HRTS2_N_A) FM(AUDIO_CLKOUT3_C) FM(TPU0TO3) FM(SCL1_D) F_(0, 0) FM(FSO_TOE_N_B) F_(0, 0) FM(SIM0_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_23_20 FM(AUDIO_CLKA) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_27_24 FM(USB30_PWEN) FM(USB0_PWEN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_31_28 FM(USB30_OVC) FM(USB0_OVC_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(FSO_TOE_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR \
+\
+ \
+ \
+ \
+ \
+ \
+ \
+ GPSR2_25 \
+ GPSR2_24 \
+ GPSR2_23 \
+ GPSR1_22 GPSR2_22 \
+ GPSR1_21 GPSR2_21 \
+ GPSR1_20 GPSR2_20 \
+ GPSR1_19 GPSR2_19 GPSR5_19 \
+ GPSR1_18 GPSR2_18 GPSR5_18 \
+GPSR0_17 GPSR1_17 GPSR2_17 GPSR5_17 GPSR6_17 \
+GPSR0_16 GPSR1_16 GPSR2_16 GPSR5_16 GPSR6_16 \
+GPSR0_15 GPSR1_15 GPSR2_15 GPSR3_15 GPSR5_15 GPSR6_15 \
+GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR5_14 GPSR6_14 \
+GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR5_13 GPSR6_13 \
+GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR5_12 GPSR6_12 \
+GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR5_11 GPSR6_11 \
+GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 \
+GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 \
+GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 \
+GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 \
+GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 \
+GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 \
+GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 \
+GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 \
+GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 \
+GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 \
+GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0
+
+#define PINMUX_IPSR \
+\
+FM(IP0_3_0) IP0_3_0 FM(IP1_3_0) IP1_3_0 FM(IP2_3_0) IP2_3_0 FM(IP3_3_0) IP3_3_0 \
+FM(IP0_7_4) IP0_7_4 FM(IP1_7_4) IP1_7_4 FM(IP2_7_4) IP2_7_4 FM(IP3_7_4) IP3_7_4 \
+FM(IP0_11_8) IP0_11_8 FM(IP1_11_8) IP1_11_8 FM(IP2_11_8) IP2_11_8 FM(IP3_11_8) IP3_11_8 \
+FM(IP0_15_12) IP0_15_12 FM(IP1_15_12) IP1_15_12 FM(IP2_15_12) IP2_15_12 FM(IP3_15_12) IP3_15_12 \
+FM(IP0_19_16) IP0_19_16 FM(IP1_19_16) IP1_19_16 FM(IP2_19_16) IP2_19_16 FM(IP3_19_16) IP3_19_16 \
+FM(IP0_23_20) IP0_23_20 FM(IP1_23_20) IP1_23_20 FM(IP2_23_20) IP2_23_20 FM(IP3_23_20) IP3_23_20 \
+FM(IP0_27_24) IP0_27_24 FM(IP1_27_24) IP1_27_24 FM(IP2_27_24) IP2_27_24 FM(IP3_27_24) IP3_27_24 \
+FM(IP0_31_28) IP0_31_28 FM(IP1_31_28) IP1_31_28 FM(IP2_31_28) IP2_31_28 FM(IP3_31_28) IP3_31_28 \
+\
+FM(IP4_3_0) IP4_3_0 FM(IP5_3_0) IP5_3_0 FM(IP6_3_0) IP6_3_0 FM(IP7_3_0) IP7_3_0 \
+FM(IP4_7_4) IP4_7_4 FM(IP5_7_4) IP5_7_4 FM(IP6_7_4) IP6_7_4 FM(IP7_7_4) IP7_7_4 \
+FM(IP4_11_8) IP4_11_8 FM(IP5_11_8) IP5_11_8 FM(IP6_11_8) IP6_11_8 FM(IP7_11_8) IP7_11_8 \
+FM(IP4_15_12) IP4_15_12 FM(IP5_15_12) IP5_15_12 FM(IP6_15_12) IP6_15_12 FM(IP7_15_12) IP7_15_12 \
+FM(IP4_19_16) IP4_19_16 FM(IP5_19_16) IP5_19_16 FM(IP6_19_16) IP6_19_16 FM(IP7_19_16) IP7_19_16 \
+FM(IP4_23_20) IP4_23_20 FM(IP5_23_20) IP5_23_20 FM(IP6_23_20) IP6_23_20 FM(IP7_23_20) IP7_23_20 \
+FM(IP4_27_24) IP4_27_24 FM(IP5_27_24) IP5_27_24 FM(IP6_27_24) IP6_27_24 FM(IP7_27_24) IP7_27_24 \
+FM(IP4_31_28) IP4_31_28 FM(IP5_31_28) IP5_31_28 FM(IP6_31_28) IP6_31_28 FM(IP7_31_28) IP7_31_28 \
+\
+FM(IP8_3_0) IP8_3_0 FM(IP9_3_0) IP9_3_0 FM(IP10_3_0) IP10_3_0 FM(IP11_3_0) IP11_3_0 \
+FM(IP8_7_4) IP8_7_4 FM(IP9_7_4) IP9_7_4 FM(IP10_7_4) IP10_7_4 FM(IP11_7_4) IP11_7_4 \
+FM(IP8_11_8) IP8_11_8 FM(IP9_11_8) IP9_11_8 FM(IP10_11_8) IP10_11_8 FM(IP11_11_8) IP11_11_8 \
+FM(IP8_15_12) IP8_15_12 FM(IP9_15_12) IP9_15_12 FM(IP10_15_12) IP10_15_12 FM(IP11_15_12) IP11_15_12 \
+FM(IP8_19_16) IP8_19_16 FM(IP9_19_16) IP9_19_16 FM(IP10_19_16) IP10_19_16 FM(IP11_19_16) IP11_19_16 \
+FM(IP8_23_20) IP8_23_20 FM(IP9_23_20) IP9_23_20 FM(IP10_23_20) IP10_23_20 FM(IP11_23_20) IP11_23_20 \
+FM(IP8_27_24) IP8_27_24 FM(IP9_27_24) IP9_27_24 FM(IP10_27_24) IP10_27_24 FM(IP11_27_24) IP11_27_24 \
+FM(IP8_31_28) IP8_31_28 FM(IP9_31_28) IP9_31_28 FM(IP10_31_28) IP10_31_28 FM(IP11_31_28) IP11_31_28 \
+\
+FM(IP12_3_0) IP12_3_0 FM(IP13_3_0) IP13_3_0 FM(IP14_3_0) IP14_3_0 FM(IP15_3_0) IP15_3_0 \
+FM(IP12_7_4) IP12_7_4 FM(IP13_7_4) IP13_7_4 FM(IP14_7_4) IP14_7_4 FM(IP15_7_4) IP15_7_4 \
+FM(IP12_11_8) IP12_11_8 FM(IP13_11_8) IP13_11_8 FM(IP14_11_8) IP14_11_8 FM(IP15_11_8) IP15_11_8 \
+FM(IP12_15_12) IP12_15_12 FM(IP13_15_12) IP13_15_12 FM(IP14_15_12) IP14_15_12 FM(IP15_15_12) IP15_15_12 \
+FM(IP12_19_16) IP12_19_16 FM(IP13_19_16) IP13_19_16 FM(IP14_19_16) IP14_19_16 FM(IP15_19_16) IP15_19_16 \
+FM(IP12_23_20) IP12_23_20 FM(IP13_23_20) IP13_23_20 FM(IP14_23_20) IP14_23_20 FM(IP15_23_20) IP15_23_20 \
+FM(IP12_27_24) IP12_27_24 FM(IP13_27_24) IP13_27_24 FM(IP14_27_24) IP14_27_24 FM(IP15_27_24) IP15_27_24 \
+FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM(IP15_31_28) IP15_31_28
+
+/* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+#define MOD_SEL0_30_29 FM(SEL_ADGB_0) FM(SEL_ADGB_1) FM(SEL_ADGB_2) F_(0, 0)
+#define MOD_SEL0_28 FM(SEL_DRIF0_0) FM(SEL_DRIF0_1)
+#define MOD_SEL0_27_26 FM(SEL_FM_0) FM(SEL_FM_1) FM(SEL_FM_2) F_(0, 0)
+#define MOD_SEL0_25 FM(SEL_FSO_0) FM(SEL_FSO_1)
+#define MOD_SEL0_24 FM(SEL_HSCIF0_0) FM(SEL_HSCIF0_1)
+#define MOD_SEL0_23 FM(SEL_HSCIF1_0) FM(SEL_HSCIF1_1)
+#define MOD_SEL0_22 FM(SEL_HSCIF2_0) FM(SEL_HSCIF2_1)
+#define MOD_SEL0_21_20 FM(SEL_I2C1_0) FM(SEL_I2C1_1) FM(SEL_I2C1_2) FM(SEL_I2C1_3) FM(SEL_I2C1_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL0_19_18_17 FM(SEL_I2C2_0) FM(SEL_I2C2_1) FM(SEL_I2C2_2) FM(SEL_I2C2_3) FM(SEL_I2C2_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL0_16 FM(SEL_NDFC_0) FM(SEL_NDFC_1)
+#define MOD_SEL0_15 FM(SEL_PWM0_0) FM(SEL_PWM0_1)
+#define MOD_SEL0_14 FM(SEL_PWM1_0) FM(SEL_PWM1_1)
+#define MOD_SEL0_13_12 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) F_(0, 0)
+#define MOD_SEL0_11_10 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) F_(0, 0)
+#define MOD_SEL0_9 FM(SEL_PWM4_0) FM(SEL_PWM4_1)
+#define MOD_SEL0_8 FM(SEL_PWM5_0) FM(SEL_PWM5_1)
+#define MOD_SEL0_7 FM(SEL_PWM6_0) FM(SEL_PWM6_1)
+#define MOD_SEL0_6_5 FM(SEL_REMOCON_0) FM(SEL_REMOCON_1) FM(SEL_REMOCON_2) F_(0, 0)
+#define MOD_SEL0_4 FM(SEL_SCIF_0) FM(SEL_SCIF_1)
+#define MOD_SEL0_3 FM(SEL_SCIF0_0) FM(SEL_SCIF0_1)
+#define MOD_SEL0_2 FM(SEL_SCIF2_0) FM(SEL_SCIF2_1)
+#define MOD_SEL0_1_0 FM(SEL_SPEED_PULSE_IF_0) FM(SEL_SPEED_PULSE_IF_1) FM(SEL_SPEED_PULSE_IF_2) F_(0, 0)
+
+/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+#define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1)
+#define MOD_SEL1_30 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
+#define MOD_SEL1_29 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
+#define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1)
+#define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
+#define MOD_SEL1_25 FM(SEL_DRIF3_0) FM(SEL_DRIF3_1)
+#define MOD_SEL1_24_23_22 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1) FM(SEL_HSCIF3_2) FM(SEL_HSCIF3_3) FM(SEL_HSCIF3_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL1_21_20_19 FM(SEL_HSCIF4_0) FM(SEL_HSCIF4_1) FM(SEL_HSCIF4_2) FM(SEL_HSCIF4_3) FM(SEL_HSCIF4_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL1_18 FM(SEL_I2C6_0) FM(SEL_I2C6_1)
+#define MOD_SEL1_17 FM(SEL_I2C7_0) FM(SEL_I2C7_1)
+#define MOD_SEL1_16 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1)
+#define MOD_SEL1_15 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1)
+#define MOD_SEL1_14_13 FM(SEL_SCIF3_0) FM(SEL_SCIF3_1) FM(SEL_SCIF3_2) F_(0, 0)
+#define MOD_SEL1_12_11 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1) FM(SEL_SCIF4_2) F_(0, 0)
+#define MOD_SEL1_10_9 FM(SEL_SCIF5_0) FM(SEL_SCIF5_1) FM(SEL_SCIF5_2) F_(0, 0)
+#define MOD_SEL1_8 FM(SEL_VIN4_0) FM(SEL_VIN4_1)
+#define MOD_SEL1_7 FM(SEL_VIN5_0) FM(SEL_VIN5_1)
+#define MOD_SEL1_6_5 FM(SEL_ADGC_0) FM(SEL_ADGC_1) FM(SEL_ADGC_2) F_(0, 0)
+#define MOD_SEL1_4 FM(SEL_SSI9_0) FM(SEL_SSI9_1)
+
+#define PINMUX_MOD_SELS \
+\
+ MOD_SEL1_31 \
+MOD_SEL0_30_29 MOD_SEL1_30 \
+ MOD_SEL1_29 \
+MOD_SEL0_28 MOD_SEL1_28 \
+MOD_SEL0_27_26 \
+ MOD_SEL1_26 \
+MOD_SEL0_25 MOD_SEL1_25 \
+MOD_SEL0_24 MOD_SEL1_24_23_22 \
+MOD_SEL0_23 \
+MOD_SEL0_22 \
+MOD_SEL0_21_20 MOD_SEL1_21_20_19 \
+MOD_SEL0_19_18_17 MOD_SEL1_18 \
+ MOD_SEL1_17 \
+MOD_SEL0_16 MOD_SEL1_16 \
+MOD_SEL0_15 MOD_SEL1_15 \
+MOD_SEL0_14 MOD_SEL1_14_13 \
+MOD_SEL0_13_12 \
+ MOD_SEL1_12_11 \
+MOD_SEL0_11_10 \
+ MOD_SEL1_10_9 \
+MOD_SEL0_9 \
+MOD_SEL0_8 MOD_SEL1_8 \
+MOD_SEL0_7 MOD_SEL1_7 \
+MOD_SEL0_6_5 MOD_SEL1_6_5 \
+MOD_SEL0_4 MOD_SEL1_4 \
+MOD_SEL0_3 \
+MOD_SEL0_2 \
+MOD_SEL0_1_0
+
+/*
+ * These pins are not able to be muxed but have other properties
+ * that can be set, such as pull-up/pull-down enable.
+ */
+#define PINMUX_STATIC \
+ FM(AVB_TX_CTL) FM(AVB_TXC) FM(AVB_TD0) FM(AVB_TD1) FM(AVB_TD2) \
+ FM(AVB_TD3) \
+ FM(PRESETOUT_N) FM(FSCLKST_N) FM(TRST_N) FM(TCK) FM(TMS) FM(TDI) \
+ FM(ASEBRK) \
+ FM(MLB_REF)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x) FN_##x,
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x) x##_MARK,
+ PINMUX_MARK_BEGIN,
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_STATIC
+ PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(),
+
+ PINMUX_SINGLE(CLKOUT),
+ PINMUX_SINGLE(AVB_PHY_INT),
+ PINMUX_SINGLE(AVB_RD3),
+ PINMUX_SINGLE(AVB_RXC),
+ PINMUX_SINGLE(AVB_RX_CTL),
+ PINMUX_SINGLE(QSPI0_SSL),
+
+ /* IPSR0 */
+ PINMUX_IPSR_GPSR(IP0_3_0, QSPI0_SPCLK),
+ PINMUX_IPSR_MSEL(IP0_3_0, HSCK4_A, SEL_HSCIF4_0),
+
+ PINMUX_IPSR_GPSR(IP0_7_4, QSPI0_MOSI_IO0),
+ PINMUX_IPSR_MSEL(IP0_7_4, HCTS4_N_A, SEL_HSCIF4_0),
+
+ PINMUX_IPSR_GPSR(IP0_11_8, QSPI0_MISO_IO1),
+ PINMUX_IPSR_MSEL(IP0_11_8, HRTS4_N_A, SEL_HSCIF4_0),
+
+ PINMUX_IPSR_GPSR(IP0_15_12, QSPI0_IO2),
+ PINMUX_IPSR_GPSR(IP0_15_12, HTX4_A),
+
+ PINMUX_IPSR_GPSR(IP0_19_16, QSPI0_IO3),
+ PINMUX_IPSR_MSEL(IP0_19_16, HRX4_A, SEL_HSCIF4_0),
+
+ PINMUX_IPSR_GPSR(IP0_23_20, QSPI1_SPCLK),
+ PINMUX_IPSR_MSEL(IP0_23_20, RIF2_CLK_A, SEL_DRIF2_0),
+ PINMUX_IPSR_MSEL(IP0_23_20, HSCK4_B, SEL_HSCIF4_1),
+ PINMUX_IPSR_MSEL(IP0_23_20, VI4_DATA0_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP0_27_24, QSPI1_MOSI_IO0),
+ PINMUX_IPSR_MSEL(IP0_27_24, RIF2_SYNC_A, SEL_DRIF2_0),
+ PINMUX_IPSR_GPSR(IP0_27_24, HTX4_B),
+ PINMUX_IPSR_MSEL(IP0_27_24, VI4_DATA1_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP0_31_28, QSPI1_MISO_IO1),
+ PINMUX_IPSR_MSEL(IP0_31_28, RIF2_D0_A, SEL_DRIF2_0),
+ PINMUX_IPSR_MSEL(IP0_31_28, HRX4_B, SEL_HSCIF4_1),
+ PINMUX_IPSR_MSEL(IP0_31_28, VI4_DATA2_A, SEL_VIN4_0),
+
+ /* IPSR1 */
+ PINMUX_IPSR_GPSR(IP1_3_0, QSPI1_IO2),
+ PINMUX_IPSR_MSEL(IP1_3_0, RIF2_D1_A, SEL_DRIF2_0),
+ PINMUX_IPSR_GPSR(IP1_3_0, HTX3_C),
+ PINMUX_IPSR_MSEL(IP1_3_0, VI4_DATA3_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP1_7_4, QSPI1_IO3),
+ PINMUX_IPSR_MSEL(IP1_7_4, RIF3_CLK_A, SEL_DRIF3_0),
+ PINMUX_IPSR_MSEL(IP1_7_4, HRX3_C, SEL_HSCIF3_2),
+ PINMUX_IPSR_MSEL(IP1_7_4, VI4_DATA4_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP1_11_8, QSPI1_SSL),
+ PINMUX_IPSR_MSEL(IP1_11_8, RIF3_SYNC_A, SEL_DRIF3_0),
+ PINMUX_IPSR_MSEL(IP1_11_8, HSCK3_C, SEL_HSCIF3_2),
+ PINMUX_IPSR_MSEL(IP1_11_8, VI4_DATA5_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP1_15_12, RPC_INT_N),
+ PINMUX_IPSR_MSEL(IP1_15_12, RIF3_D0_A, SEL_DRIF3_0),
+ PINMUX_IPSR_MSEL(IP1_15_12, HCTS3_N_C, SEL_HSCIF3_2),
+ PINMUX_IPSR_MSEL(IP1_15_12, VI4_DATA6_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP1_19_16, RPC_RESET_N),
+ PINMUX_IPSR_MSEL(IP1_19_16, RIF3_D1_A, SEL_DRIF3_0),
+ PINMUX_IPSR_MSEL(IP1_19_16, HRTS3_N_C, SEL_HSCIF3_2),
+ PINMUX_IPSR_MSEL(IP1_19_16, VI4_DATA7_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP1_23_20, AVB_RD0),
+
+ PINMUX_IPSR_GPSR(IP1_27_24, AVB_RD1),
+
+ PINMUX_IPSR_GPSR(IP1_31_28, AVB_RD2),
+
+ /* IPSR2 */
+ PINMUX_IPSR_GPSR(IP2_3_0, AVB_TXCREFCLK),
+
+ PINMUX_IPSR_GPSR(IP2_7_4, AVB_MDIO),
+
+ PINMUX_IPSR_GPSR(IP2_11_8, AVB_MDC),
+
+ PINMUX_IPSR_GPSR(IP2_15_12, BS_N),
+ PINMUX_IPSR_MSEL(IP2_15_12, PWM0_A, SEL_PWM0_0),
+ PINMUX_IPSR_GPSR(IP2_15_12, AVB_MAGIC),
+ PINMUX_IPSR_GPSR(IP2_15_12, VI4_CLK),
+ PINMUX_IPSR_GPSR(IP2_15_12, TX3_C),
+ PINMUX_IPSR_MSEL(IP2_15_12, VI5_CLK_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP2_19_16, RD_N),
+ PINMUX_IPSR_MSEL(IP2_19_16, PWM1_A, SEL_PWM1_0),
+ PINMUX_IPSR_GPSR(IP2_19_16, AVB_LINK),
+ PINMUX_IPSR_GPSR(IP2_19_16, VI4_FIELD),
+ PINMUX_IPSR_MSEL(IP2_19_16, RX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_GPSR(IP2_19_16, FSCLKST2_N_A),
+ PINMUX_IPSR_MSEL(IP2_19_16, VI5_DATA0_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP2_23_20, RD_WR_N),
+ PINMUX_IPSR_MSEL(IP2_23_20, SCL7_A, SEL_I2C7_0),
+ PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH_A),
+ PINMUX_IPSR_GPSR(IP2_23_20, VI4_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP2_23_20, TX5_B),
+ PINMUX_IPSR_MSEL(IP2_23_20, SCK3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_MSEL(IP2_23_20, PWM5_A, SEL_PWM5_0),
+
+ PINMUX_IPSR_GPSR(IP2_27_24, EX_WAIT0),
+ PINMUX_IPSR_MSEL(IP2_27_24, SDA7_A, SEL_I2C7_0),
+ PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE_A),
+ PINMUX_IPSR_GPSR(IP2_27_24, VI4_HSYNC_N),
+ PINMUX_IPSR_MSEL(IP2_27_24, RX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MSEL(IP2_27_24, PWM6_A, SEL_PWM6_0),
+
+ PINMUX_IPSR_GPSR(IP2_31_28, A0),
+ PINMUX_IPSR_GPSR(IP2_31_28, IRQ0),
+ PINMUX_IPSR_MSEL(IP2_31_28, PWM2_A, SEL_PWM2_0),
+ PINMUX_IPSR_MSEL(IP2_31_28, MSIOF3_SS1_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_MSEL(IP2_31_28, VI5_CLK_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP2_31_28, DU_CDE),
+ PINMUX_IPSR_MSEL(IP2_31_28, HRX3_D, SEL_HSCIF3_3),
+ PINMUX_IPSR_GPSR(IP2_31_28, IERX),
+ PINMUX_IPSR_GPSR(IP2_31_28, QSTB_QHE),
+
+ /* IPSR3 */
+ PINMUX_IPSR_GPSR(IP3_3_0, A1),
+ PINMUX_IPSR_GPSR(IP3_3_0, IRQ1),
+ PINMUX_IPSR_MSEL(IP3_3_0, PWM3_A, SEL_PWM3_0),
+ PINMUX_IPSR_GPSR(IP3_3_0, DU_DOTCLKIN1),
+ PINMUX_IPSR_MSEL(IP3_3_0, VI5_DATA0_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP3_3_0, DU_DISP_CDE),
+ PINMUX_IPSR_MSEL(IP3_3_0, SDA6_B, SEL_I2C6_1),
+ PINMUX_IPSR_GPSR(IP3_3_0, IETX),
+ PINMUX_IPSR_GPSR(IP3_3_0, QCPV_QDE),
+
+ PINMUX_IPSR_GPSR(IP3_7_4, A2),
+ PINMUX_IPSR_GPSR(IP3_7_4, IRQ2),
+ PINMUX_IPSR_GPSR(IP3_7_4, AVB_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP3_7_4, VI4_CLKENB),
+ PINMUX_IPSR_MSEL(IP3_7_4, VI5_DATA1_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP3_7_4, DU_DISP),
+ PINMUX_IPSR_MSEL(IP3_7_4, SCL6_B, SEL_I2C6_1),
+ PINMUX_IPSR_GPSR(IP3_7_4, QSTVB_QVE),
+
+ PINMUX_IPSR_GPSR(IP3_11_8, A3),
+ PINMUX_IPSR_MSEL(IP3_11_8, CTS4_N_A, SEL_SCIF4_0),
+ PINMUX_IPSR_MSEL(IP3_11_8, PWM4_A, SEL_PWM4_0),
+ PINMUX_IPSR_GPSR(IP3_11_8, VI4_DATA12),
+ PINMUX_IPSR_GPSR(IP3_11_8, DU_DOTCLKOUT0),
+ PINMUX_IPSR_GPSR(IP3_11_8, HTX3_D),
+ PINMUX_IPSR_GPSR(IP3_11_8, IECLK),
+ PINMUX_IPSR_GPSR(IP3_11_8, LCDOUT12),
+
+ PINMUX_IPSR_GPSR(IP3_15_12, A4),
+ PINMUX_IPSR_MSEL(IP3_15_12, RTS4_N_TANS_A, SEL_SCIF4_0),
+ PINMUX_IPSR_MSEL(IP3_15_12, MSIOF3_SYNC_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_GPSR(IP3_15_12, VI4_DATA8),
+ PINMUX_IPSR_MSEL(IP3_15_12, PWM2_B, SEL_PWM2_1),
+ PINMUX_IPSR_GPSR(IP3_15_12, DU_DG4),
+ PINMUX_IPSR_MSEL(IP3_15_12, RIF2_CLK_B, SEL_DRIF2_1),
+
+ PINMUX_IPSR_GPSR(IP3_19_16, A5),
+ PINMUX_IPSR_MSEL(IP3_19_16, SCK4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_MSEL(IP3_19_16, MSIOF3_SCK_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_GPSR(IP3_19_16, VI4_DATA9),
+ PINMUX_IPSR_MSEL(IP3_19_16, PWM3_B, SEL_PWM3_1),
+ PINMUX_IPSR_MSEL(IP3_19_16, RIF2_SYNC_B, SEL_DRIF2_1),
+ PINMUX_IPSR_GPSR(IP3_19_16, QPOLA),
+
+ PINMUX_IPSR_GPSR(IP3_23_20, A6),
+ PINMUX_IPSR_MSEL(IP3_23_20, RX4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_MSEL(IP3_23_20, MSIOF3_RXD_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_GPSR(IP3_23_20, VI4_DATA10),
+ PINMUX_IPSR_MSEL(IP3_23_20, RIF2_D0_B, SEL_DRIF2_1),
+
+ PINMUX_IPSR_GPSR(IP3_27_24, A7),
+ PINMUX_IPSR_GPSR(IP3_27_24, TX4_A),
+ PINMUX_IPSR_GPSR(IP3_27_24, MSIOF3_TXD_B),
+ PINMUX_IPSR_GPSR(IP3_27_24, VI4_DATA11),
+ PINMUX_IPSR_MSEL(IP3_27_24, RIF2_D1_B, SEL_DRIF2_1),
+
+ PINMUX_IPSR_GPSR(IP3_31_28, A8),
+ PINMUX_IPSR_MSEL(IP3_31_28, SDA6_A, SEL_I2C6_0),
+ PINMUX_IPSR_MSEL(IP3_31_28, RX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MSEL(IP3_31_28, HRX4_C, SEL_HSCIF4_2),
+ PINMUX_IPSR_MSEL(IP3_31_28, VI5_HSYNC_N_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP3_31_28, DU_HSYNC),
+ PINMUX_IPSR_MSEL(IP3_31_28, VI4_DATA0_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP3_31_28, QSTH_QHS),
+
+ /* IPSR4 */
+ PINMUX_IPSR_GPSR(IP4_3_0, A9),
+ PINMUX_IPSR_GPSR(IP4_3_0, TX5_A),
+ PINMUX_IPSR_GPSR(IP4_3_0, IRQ3),
+ PINMUX_IPSR_GPSR(IP4_3_0, VI4_DATA16),
+ PINMUX_IPSR_MSEL(IP4_3_0, VI5_VSYNC_N_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP4_3_0, DU_DG7),
+ PINMUX_IPSR_GPSR(IP4_3_0, LCDOUT15),
+
+ PINMUX_IPSR_GPSR(IP4_7_4, A10),
+ PINMUX_IPSR_GPSR(IP4_7_4, IRQ4),
+ PINMUX_IPSR_MSEL(IP4_7_4, MSIOF2_SYNC_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP4_7_4, VI4_DATA13),
+ PINMUX_IPSR_MSEL(IP4_7_4, VI5_FIELD_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP4_7_4, DU_DG5),
+ PINMUX_IPSR_GPSR(IP4_7_4, FSCLKST2_N_B),
+ PINMUX_IPSR_GPSR(IP4_7_4, LCDOUT13),
+
+ PINMUX_IPSR_GPSR(IP4_11_8, A11),
+ PINMUX_IPSR_MSEL(IP4_11_8, SCL6_A, SEL_I2C6_0),
+ PINMUX_IPSR_GPSR(IP4_11_8, TX3_B),
+ PINMUX_IPSR_GPSR(IP4_11_8, HTX4_C),
+ PINMUX_IPSR_GPSR(IP4_11_8, DU_VSYNC),
+ PINMUX_IPSR_MSEL(IP4_11_8, VI4_DATA1_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP4_11_8, QSTVA_QVS),
+
+ PINMUX_IPSR_GPSR(IP4_15_12, A12),
+ PINMUX_IPSR_MSEL(IP4_15_12, RX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_GPSR(IP4_15_12, MSIOF2_SS2_B),
+ PINMUX_IPSR_GPSR(IP4_15_12, VI4_DATA17),
+ PINMUX_IPSR_MSEL(IP4_15_12, VI5_DATA3_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP4_15_12, DU_DG6),
+ PINMUX_IPSR_GPSR(IP4_15_12, LCDOUT14),
+
+ PINMUX_IPSR_GPSR(IP4_19_16, A13),
+ PINMUX_IPSR_MSEL(IP4_19_16, SCK5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_MSEL(IP4_19_16, MSIOF2_SCK_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP4_19_16, VI4_DATA14),
+ PINMUX_IPSR_MSEL(IP4_19_16, HRX4_D, SEL_HSCIF4_3),
+ PINMUX_IPSR_GPSR(IP4_19_16, DU_DB2),
+ PINMUX_IPSR_GPSR(IP4_19_16, LCDOUT2),
+
+ PINMUX_IPSR_GPSR(IP4_23_20, A14),
+ PINMUX_IPSR_GPSR(IP4_23_20, MSIOF1_SS1),
+ PINMUX_IPSR_MSEL(IP4_23_20, MSIOF2_RXD_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP4_23_20, VI4_DATA15),
+ PINMUX_IPSR_GPSR(IP4_23_20, HTX4_D),
+ PINMUX_IPSR_GPSR(IP4_23_20, DU_DB3),
+ PINMUX_IPSR_GPSR(IP4_23_20, LCDOUT3),
+
+ PINMUX_IPSR_GPSR(IP4_27_24, A15),
+ PINMUX_IPSR_GPSR(IP4_27_24, MSIOF1_SS2),
+ PINMUX_IPSR_GPSR(IP4_27_24, MSIOF2_TXD_B),
+ PINMUX_IPSR_GPSR(IP4_27_24, VI4_DATA18),
+ PINMUX_IPSR_MSEL(IP4_27_24, VI5_DATA4_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP4_27_24, DU_DB4),
+ PINMUX_IPSR_GPSR(IP4_27_24, LCDOUT4),
+
+ PINMUX_IPSR_GPSR(IP4_31_28, A16),
+ PINMUX_IPSR_GPSR(IP4_31_28, MSIOF1_SYNC),
+ PINMUX_IPSR_GPSR(IP4_31_28, MSIOF2_SS1_B),
+ PINMUX_IPSR_GPSR(IP4_31_28, VI4_DATA19),
+ PINMUX_IPSR_MSEL(IP4_31_28, VI5_DATA5_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP4_31_28, DU_DB5),
+ PINMUX_IPSR_GPSR(IP4_31_28, LCDOUT5),
+
+ /* IPSR5 */
+ PINMUX_IPSR_GPSR(IP5_3_0, A17),
+ PINMUX_IPSR_GPSR(IP5_3_0, MSIOF1_RXD),
+ PINMUX_IPSR_GPSR(IP5_3_0, VI4_DATA20),
+ PINMUX_IPSR_MSEL(IP5_3_0, VI5_DATA6_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP5_3_0, DU_DB6),
+ PINMUX_IPSR_GPSR(IP5_3_0, LCDOUT6),
+
+ PINMUX_IPSR_GPSR(IP5_7_4, A18),
+ PINMUX_IPSR_GPSR(IP5_7_4, MSIOF1_TXD),
+ PINMUX_IPSR_GPSR(IP5_7_4, VI4_DATA21),
+ PINMUX_IPSR_MSEL(IP5_7_4, VI5_DATA7_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP5_7_4, DU_DB0),
+ PINMUX_IPSR_MSEL(IP5_7_4, HRX4_E, SEL_HSCIF4_4),
+ PINMUX_IPSR_GPSR(IP5_7_4, LCDOUT0),
+
+ PINMUX_IPSR_GPSR(IP5_11_8, A19),
+ PINMUX_IPSR_GPSR(IP5_11_8, MSIOF1_SCK),
+ PINMUX_IPSR_GPSR(IP5_11_8, VI4_DATA22),
+ PINMUX_IPSR_MSEL(IP5_11_8, VI5_DATA2_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP5_11_8, DU_DB1),
+ PINMUX_IPSR_GPSR(IP5_11_8, HTX4_E),
+ PINMUX_IPSR_GPSR(IP5_11_8, LCDOUT1),
+
+ PINMUX_IPSR_GPSR(IP5_15_12, CS0_N),
+ PINMUX_IPSR_GPSR(IP5_15_12, SCL5),
+ PINMUX_IPSR_GPSR(IP5_15_12, DU_DR0),
+ PINMUX_IPSR_MSEL(IP5_15_12, VI4_DATA2_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP5_15_12, LCDOUT16),
+
+ PINMUX_IPSR_GPSR(IP5_19_16, WE0_N),
+ PINMUX_IPSR_GPSR(IP5_19_16, SDA5),
+ PINMUX_IPSR_GPSR(IP5_19_16, DU_DR1),
+ PINMUX_IPSR_MSEL(IP5_19_16, VI4_DATA3_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP5_19_16, LCDOUT17),
+
+ PINMUX_IPSR_GPSR(IP5_23_20, D0),
+ PINMUX_IPSR_MSEL(IP5_23_20, MSIOF3_SCK_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_GPSR(IP5_23_20, DU_DR2),
+ PINMUX_IPSR_MSEL(IP5_23_20, CTS4_N_C, SEL_SCIF4_2),
+ PINMUX_IPSR_GPSR(IP5_23_20, LCDOUT18),
+
+ PINMUX_IPSR_GPSR(IP5_27_24, D1),
+ PINMUX_IPSR_MSEL(IP5_27_24, MSIOF3_SYNC_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_MSEL(IP5_27_24, SCK3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_GPSR(IP5_27_24, VI4_DATA23),
+ PINMUX_IPSR_MSEL(IP5_27_24, VI5_CLKENB_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP5_27_24, DU_DB7),
+ PINMUX_IPSR_MSEL(IP5_27_24, RTS4_N_TANS_C, SEL_SCIF4_2),
+ PINMUX_IPSR_GPSR(IP5_27_24, LCDOUT7),
+
+ PINMUX_IPSR_GPSR(IP5_31_28, D2),
+ PINMUX_IPSR_MSEL(IP5_31_28, MSIOF3_RXD_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_MSEL(IP5_31_28, RX5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_MSEL(IP5_31_28, VI5_DATA14_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP5_31_28, DU_DR3),
+ PINMUX_IPSR_MSEL(IP5_31_28, RX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_GPSR(IP5_31_28, LCDOUT19),
+
+ /* IPSR6 */
+ PINMUX_IPSR_GPSR(IP6_3_0, D3),
+ PINMUX_IPSR_GPSR(IP6_3_0, MSIOF3_TXD_A),
+ PINMUX_IPSR_GPSR(IP6_3_0, TX5_C),
+ PINMUX_IPSR_MSEL(IP6_3_0, VI5_DATA15_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP6_3_0, DU_DR4),
+ PINMUX_IPSR_GPSR(IP6_3_0, TX4_C),
+ PINMUX_IPSR_GPSR(IP6_3_0, LCDOUT20),
+
+ PINMUX_IPSR_GPSR(IP6_7_4, D4),
+ PINMUX_IPSR_GPSR(IP6_7_4, CANFD1_TX),
+ PINMUX_IPSR_MSEL(IP6_7_4, HSCK3_B, SEL_HSCIF3_1),
+ PINMUX_IPSR_GPSR(IP6_7_4, CAN1_TX),
+ PINMUX_IPSR_MSEL(IP6_7_4, RTS3_N_TANS_A, SEL_SCIF3_0),
+ PINMUX_IPSR_GPSR(IP6_7_4, MSIOF3_SS2_A),
+ PINMUX_IPSR_MSEL(IP6_7_4, VI5_DATA1_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP6_11_8, D5),
+ PINMUX_IPSR_MSEL(IP6_11_8, RX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_MSEL(IP6_11_8, HRX3_B, SEL_HSCIF3_1),
+ PINMUX_IPSR_GPSR(IP6_11_8, DU_DR5),
+ PINMUX_IPSR_MSEL(IP6_11_8, VI4_DATA4_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP6_11_8, LCDOUT21),
+
+ PINMUX_IPSR_GPSR(IP6_15_12, D6),
+ PINMUX_IPSR_GPSR(IP6_15_12, TX3_A),
+ PINMUX_IPSR_GPSR(IP6_15_12, HTX3_B),
+ PINMUX_IPSR_GPSR(IP6_15_12, DU_DR6),
+ PINMUX_IPSR_MSEL(IP6_15_12, VI4_DATA5_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP6_15_12, LCDOUT22),
+
+ PINMUX_IPSR_GPSR(IP6_19_16, D7),
+ PINMUX_IPSR_GPSR(IP6_19_16, CANFD1_RX),
+ PINMUX_IPSR_GPSR(IP6_19_16, IRQ5),
+ PINMUX_IPSR_GPSR(IP6_19_16, CAN1_RX),
+ PINMUX_IPSR_MSEL(IP6_19_16, CTS3_N_A, SEL_SCIF3_0),
+ PINMUX_IPSR_MSEL(IP6_19_16, VI5_DATA2_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP6_23_20, D8),
+ PINMUX_IPSR_MSEL(IP6_23_20, MSIOF2_SCK_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP6_23_20, SCK4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_MSEL(IP6_23_20, VI5_DATA12_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP6_23_20, DU_DR7),
+ PINMUX_IPSR_MSEL(IP6_23_20, RIF3_CLK_B, SEL_DRIF3_1),
+ PINMUX_IPSR_MSEL(IP6_23_20, HCTS3_N_E, SEL_HSCIF3_4),
+ PINMUX_IPSR_GPSR(IP6_23_20, LCDOUT23),
+
+ PINMUX_IPSR_GPSR(IP6_27_24, D9),
+ PINMUX_IPSR_MSEL(IP6_27_24, MSIOF2_SYNC_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP6_27_24, VI5_DATA10_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP6_27_24, DU_DG0),
+ PINMUX_IPSR_MSEL(IP6_27_24, RIF3_SYNC_B, SEL_DRIF3_1),
+ PINMUX_IPSR_MSEL(IP6_27_24, HRX3_E, SEL_HSCIF3_4),
+ PINMUX_IPSR_GPSR(IP6_27_24, LCDOUT8),
+
+ PINMUX_IPSR_GPSR(IP6_31_28, D10),
+ PINMUX_IPSR_MSEL(IP6_31_28, MSIOF2_RXD_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP6_31_28, VI5_DATA13_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP6_31_28, DU_DG1),
+ PINMUX_IPSR_MSEL(IP6_31_28, RIF3_D0_B, SEL_DRIF3_1),
+ PINMUX_IPSR_GPSR(IP6_31_28, HTX3_E),
+ PINMUX_IPSR_GPSR(IP6_31_28, LCDOUT9),
+
+ /* IPSR7 */
+ PINMUX_IPSR_GPSR(IP7_3_0, D11),
+ PINMUX_IPSR_GPSR(IP7_3_0, MSIOF2_TXD_A),
+ PINMUX_IPSR_MSEL(IP7_3_0, VI5_DATA11_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP7_3_0, DU_DG2),
+ PINMUX_IPSR_MSEL(IP7_3_0, RIF3_D1_B, SEL_DRIF3_1),
+ PINMUX_IPSR_MSEL(IP7_3_0, HRTS3_N_E, SEL_HSCIF3_4),
+ PINMUX_IPSR_GPSR(IP7_3_0, LCDOUT10),
+
+ PINMUX_IPSR_GPSR(IP7_7_4, D12),
+ PINMUX_IPSR_GPSR(IP7_7_4, CANFD0_TX),
+ PINMUX_IPSR_GPSR(IP7_7_4, TX4_B),
+ PINMUX_IPSR_GPSR(IP7_7_4, CAN0_TX),
+ PINMUX_IPSR_MSEL(IP7_7_4, VI5_DATA8_A, SEL_VIN5_0),
+ PINMUX_IPSR_MSEL(IP7_7_4, VI5_DATA3_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP7_11_8, D13),
+ PINMUX_IPSR_GPSR(IP7_11_8, CANFD0_RX),
+ PINMUX_IPSR_MSEL(IP7_11_8, RX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_GPSR(IP7_11_8, CAN0_RX),
+ PINMUX_IPSR_MSEL(IP7_11_8, VI5_DATA9_A, SEL_VIN5_0),
+ PINMUX_IPSR_MSEL(IP7_11_8, SCL7_B, SEL_I2C7_1),
+ PINMUX_IPSR_MSEL(IP7_11_8, VI5_DATA4_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP7_15_12, D14),
+ PINMUX_IPSR_GPSR(IP7_15_12, CAN_CLK),
+ PINMUX_IPSR_MSEL(IP7_15_12, HRX3_A, SEL_HSCIF3_0),
+ PINMUX_IPSR_GPSR(IP7_15_12, MSIOF2_SS2_A),
+ PINMUX_IPSR_MSEL(IP7_15_12, SDA7_B, SEL_I2C7_1),
+ PINMUX_IPSR_MSEL(IP7_15_12, VI5_DATA5_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP7_19_16, D15),
+ PINMUX_IPSR_GPSR(IP7_19_16, MSIOF2_SS1_A),
+ PINMUX_IPSR_GPSR(IP7_19_16, HTX3_A),
+ PINMUX_IPSR_GPSR(IP7_19_16, MSIOF3_SS1_A),
+ PINMUX_IPSR_GPSR(IP7_19_16, DU_DG3),
+ PINMUX_IPSR_GPSR(IP7_19_16, LCDOUT11),
+
+ PINMUX_IPSR_GPSR(IP7_23_20, SCL4),
+ PINMUX_IPSR_GPSR(IP7_23_20, CS1_N_A26),
+ PINMUX_IPSR_GPSR(IP7_23_20, DU_DOTCLKIN0),
+ PINMUX_IPSR_MSEL(IP7_23_20, VI4_DATA6_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP7_23_20, VI5_DATA6_B, SEL_VIN5_1),
+ PINMUX_IPSR_GPSR(IP7_23_20, QCLK),
+
+ PINMUX_IPSR_GPSR(IP7_27_24, SDA4),
+ PINMUX_IPSR_GPSR(IP7_27_24, WE1_N),
+ PINMUX_IPSR_MSEL(IP7_27_24, VI4_DATA7_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP7_27_24, VI5_DATA7_B, SEL_VIN5_1),
+ PINMUX_IPSR_GPSR(IP7_27_24, QPOLB),
+
+ PINMUX_IPSR_GPSR(IP7_31_28, SD0_CLK),
+ PINMUX_IPSR_GPSR(IP7_31_28, NFDATA8),
+ PINMUX_IPSR_MSEL(IP7_31_28, SCL1_C, SEL_I2C1_2),
+ PINMUX_IPSR_MSEL(IP7_31_28, HSCK1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MSEL(IP7_31_28, SDA2_E, SEL_I2C2_4),
+ PINMUX_IPSR_MSEL(IP7_31_28, FMCLK_B, SEL_FM_1),
+
+ /* IPSR8 */
+ PINMUX_IPSR_GPSR(IP8_3_0, SD0_CMD),
+ PINMUX_IPSR_GPSR(IP8_3_0, NFDATA9),
+ PINMUX_IPSR_MSEL(IP8_3_0, HRX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MSEL(IP8_3_0, SPEEDIN_B, SEL_SPEED_PULSE_IF_1),
+
+ PINMUX_IPSR_GPSR(IP8_7_4, SD0_DAT0),
+ PINMUX_IPSR_GPSR(IP8_7_4, NFDATA10),
+ PINMUX_IPSR_GPSR(IP8_7_4, HTX1_B),
+ PINMUX_IPSR_MSEL(IP8_7_4, REMOCON_B, SEL_REMOCON_1),
+
+ PINMUX_IPSR_GPSR(IP8_11_8, SD0_DAT1),
+ PINMUX_IPSR_GPSR(IP8_11_8, NFDATA11),
+ PINMUX_IPSR_MSEL(IP8_11_8, SDA2_C, SEL_I2C2_2),
+ PINMUX_IPSR_MSEL(IP8_11_8, HCTS1_N_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MSEL(IP8_11_8, FMIN_B, SEL_FM_1),
+
+ PINMUX_IPSR_GPSR(IP8_15_12, SD0_DAT2),
+ PINMUX_IPSR_GPSR(IP8_15_12, NFDATA12),
+ PINMUX_IPSR_MSEL(IP8_15_12, SCL2_C, SEL_I2C2_2),
+ PINMUX_IPSR_MSEL(IP8_15_12, HRTS1_N_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_GPSR(IP8_15_12, BPFCLK_B),
+
+ PINMUX_IPSR_GPSR(IP8_19_16, SD0_DAT3),
+ PINMUX_IPSR_GPSR(IP8_19_16, NFDATA13),
+ PINMUX_IPSR_MSEL(IP8_19_16, SDA1_C, SEL_I2C1_2),
+ PINMUX_IPSR_MSEL(IP8_19_16, SCL2_E, SEL_I2C2_4),
+ PINMUX_IPSR_MSEL(IP8_19_16, SPEEDIN_C, SEL_SPEED_PULSE_IF_2),
+ PINMUX_IPSR_MSEL(IP8_19_16, REMOCON_C, SEL_REMOCON_2),
+
+ PINMUX_IPSR_GPSR(IP8_23_20, SD1_CLK),
+ PINMUX_IPSR_MSEL(IP8_23_20, NFDATA14_B, SEL_NDFC_1),
+
+ PINMUX_IPSR_GPSR(IP8_27_24, SD1_CMD),
+ PINMUX_IPSR_MSEL(IP8_27_24, NFDATA15_B, SEL_NDFC_1),
+
+ PINMUX_IPSR_GPSR(IP8_31_28, SD1_DAT0),
+ PINMUX_IPSR_MSEL(IP8_31_28, NFWP_N_B, SEL_NDFC_1),
+
+ /* IPSR9 */
+ PINMUX_IPSR_GPSR(IP9_3_0, SD1_DAT1),
+ PINMUX_IPSR_MSEL(IP9_3_0, NFCE_N_B, SEL_NDFC_1),
+
+ PINMUX_IPSR_GPSR(IP9_7_4, SD1_DAT2),
+ PINMUX_IPSR_MSEL(IP9_7_4, NFALE_B, SEL_NDFC_1),
+
+ PINMUX_IPSR_GPSR(IP9_11_8, SD1_DAT3),
+ PINMUX_IPSR_MSEL(IP9_11_8, NFRB_N_B, SEL_NDFC_1),
+
+ PINMUX_IPSR_GPSR(IP9_15_12, SD3_CLK),
+ PINMUX_IPSR_GPSR(IP9_15_12, NFWE_N),
+
+ PINMUX_IPSR_GPSR(IP9_19_16, SD3_CMD),
+ PINMUX_IPSR_GPSR(IP9_19_16, NFRE_N),
+
+ PINMUX_IPSR_GPSR(IP9_23_20, SD3_DAT0),
+ PINMUX_IPSR_GPSR(IP9_23_20, NFDATA0),
+
+ PINMUX_IPSR_GPSR(IP9_27_24, SD3_DAT1),
+ PINMUX_IPSR_GPSR(IP9_27_24, NFDATA1),
+
+ PINMUX_IPSR_GPSR(IP9_31_28, SD3_DAT2),
+ PINMUX_IPSR_GPSR(IP9_31_28, NFDATA2),
+
+ /* IPSR10 */
+ PINMUX_IPSR_GPSR(IP10_3_0, SD3_DAT3),
+ PINMUX_IPSR_GPSR(IP10_3_0, NFDATA3),
+
+ PINMUX_IPSR_GPSR(IP10_7_4, SD3_DAT4),
+ PINMUX_IPSR_GPSR(IP10_7_4, NFDATA4),
+
+ PINMUX_IPSR_GPSR(IP10_11_8, SD3_DAT5),
+ PINMUX_IPSR_GPSR(IP10_11_8, NFDATA5),
+
+ PINMUX_IPSR_GPSR(IP10_15_12, SD3_DAT6),
+ PINMUX_IPSR_GPSR(IP10_15_12, NFDATA6),
+
+ PINMUX_IPSR_GPSR(IP10_19_16, SD3_DAT7),
+ PINMUX_IPSR_GPSR(IP10_19_16, NFDATA7),
+
+ PINMUX_IPSR_GPSR(IP10_23_20, SD3_DS),
+ PINMUX_IPSR_GPSR(IP10_23_20, NFCLE),
+
+ PINMUX_IPSR_GPSR(IP10_27_24, SD0_CD),
+ PINMUX_IPSR_GPSR(IP10_27_24, NFALE_A),
+ PINMUX_IPSR_GPSR(IP10_27_24, SD3_CD),
+ PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1),
+ PINMUX_IPSR_MSEL(IP10_27_24, SCL2_B, SEL_I2C2_1),
+ PINMUX_IPSR_MSEL(IP10_27_24, TCLK1_A, SEL_TIMER_TMU_0),
+ PINMUX_IPSR_MSEL(IP10_27_24, SSI_SCK2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP10_27_24, TS_SCK0),
+
+ PINMUX_IPSR_GPSR(IP10_31_28, SD0_WP),
+ PINMUX_IPSR_GPSR(IP10_31_28, NFRB_N_A),
+ PINMUX_IPSR_GPSR(IP10_31_28, SD3_WP),
+ PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1),
+ PINMUX_IPSR_MSEL(IP10_31_28, SDA2_B, SEL_I2C2_1),
+ PINMUX_IPSR_MSEL(IP10_31_28, TCLK2_A, SEL_TIMER_TMU_0),
+ PINMUX_IPSR_MSEL(IP10_31_28, SSI_WS2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP10_31_28, TS_SDAT0),
+
+ /* IPSR11 */
+ PINMUX_IPSR_GPSR(IP11_3_0, SD1_CD),
+ PINMUX_IPSR_MSEL(IP11_3_0, NFCE_N_A, SEL_NDFC_0),
+ PINMUX_IPSR_GPSR(IP11_3_0, SSI_SCK1),
+ PINMUX_IPSR_MSEL(IP11_3_0, RIF0_D1_B, SEL_DRIF0_1),
+ PINMUX_IPSR_GPSR(IP11_3_0, TS_SDEN0),
+
+ PINMUX_IPSR_GPSR(IP11_7_4, SD1_WP),
+ PINMUX_IPSR_MSEL(IP11_7_4, NFWP_N_A, SEL_NDFC_0),
+ PINMUX_IPSR_GPSR(IP11_7_4, SSI_WS1),
+ PINMUX_IPSR_MSEL(IP11_7_4, RIF0_SYNC_B, SEL_DRIF0_1),
+ PINMUX_IPSR_GPSR(IP11_7_4, TS_SPSYNC0),
+
+ PINMUX_IPSR_MSEL(IP11_11_8, RX0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP11_11_8, HRX1_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_MSEL(IP11_11_8, SSI_SCK2_A, SEL_SSI2_0),
+ PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC),
+ PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1),
+
+ PINMUX_IPSR_GPSR(IP11_15_12, TX0_A),
+ PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A),
+ PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0),
+ PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0),
+ PINMUX_IPSR_GPSR(IP11_15_12, TS_SDAT1),
+
+ PINMUX_IPSR_MSEL(IP11_19_16, CTS0_N_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP11_19_16, NFDATA14_A, SEL_NDFC_0),
+ PINMUX_IPSR_GPSR(IP11_19_16, AUDIO_CLKOUT_A),
+ PINMUX_IPSR_GPSR(IP11_19_16, RIF1_D1),
+ PINMUX_IPSR_MSEL(IP11_19_16, SCIF_CLK_A, SEL_SCIF_0),
+ PINMUX_IPSR_MSEL(IP11_19_16, FMCLK_A, SEL_FM_0),
+
+ PINMUX_IPSR_MSEL(IP11_23_20, RTS0_N_TANS_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP11_23_20, NFDATA15_A, SEL_NDFC_0),
+ PINMUX_IPSR_GPSR(IP11_23_20, AUDIO_CLKOUT1_A),
+ PINMUX_IPSR_GPSR(IP11_23_20, RIF1_CLK),
+ PINMUX_IPSR_MSEL(IP11_23_20, SCL2_A, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP11_23_20, FMIN_A, SEL_FM_0),
+
+ PINMUX_IPSR_MSEL(IP11_27_24, SCK0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP11_27_24, HSCK1_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_GPSR(IP11_27_24, USB3HS0_ID),
+ PINMUX_IPSR_GPSR(IP11_27_24, RTS1_N_TANS),
+ PINMUX_IPSR_MSEL(IP11_27_24, SDA2_A, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP11_27_24, FMCLK_C, SEL_FM_2),
+ PINMUX_IPSR_GPSR(IP11_27_24, USB1_ID),
+
+ PINMUX_IPSR_GPSR(IP11_31_28, RX1),
+ PINMUX_IPSR_MSEL(IP11_31_28, HRX2_B, SEL_HSCIF2_1),
+ PINMUX_IPSR_MSEL(IP11_31_28, SSI_SCK9_B, SEL_SSI9_1),
+ PINMUX_IPSR_GPSR(IP11_31_28, AUDIO_CLKOUT1_B),
+
+ /* IPSR12 */
+ PINMUX_IPSR_GPSR(IP12_3_0, TX1),
+ PINMUX_IPSR_GPSR(IP12_3_0, HTX2_B),
+ PINMUX_IPSR_MSEL(IP12_3_0, SSI_WS9_B, SEL_SSI9_1),
+ PINMUX_IPSR_GPSR(IP12_3_0, AUDIO_CLKOUT3_B),
+
+ PINMUX_IPSR_GPSR(IP12_7_4, SCK2_A),
+ PINMUX_IPSR_MSEL(IP12_7_4, HSCK0_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_MSEL(IP12_7_4, AUDIO_CLKB_A, SEL_ADGB_0),
+ PINMUX_IPSR_GPSR(IP12_7_4, CTS1_N),
+ PINMUX_IPSR_MSEL(IP12_7_4, RIF0_CLK_A, SEL_DRIF0_0),
+ PINMUX_IPSR_MSEL(IP12_7_4, REMOCON_A, SEL_REMOCON_0),
+ PINMUX_IPSR_MSEL(IP12_7_4, SCIF_CLK_B, SEL_SCIF_1),
+
+ PINMUX_IPSR_GPSR(IP12_11_8, TX2_A),
+ PINMUX_IPSR_MSEL(IP12_11_8, HRX0_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_GPSR(IP12_11_8, AUDIO_CLKOUT2_A),
+ PINMUX_IPSR_MSEL(IP12_11_8, SCL1_A, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP12_11_8, FSO_CFE_0_N_A, SEL_FSO_0),
+ PINMUX_IPSR_GPSR(IP12_11_8, TS_SDEN1),
+
+ PINMUX_IPSR_GPSR(IP12_15_12, RX2_A),
+ PINMUX_IPSR_GPSR(IP12_15_12, HTX0_A),
+ PINMUX_IPSR_GPSR(IP12_15_12, AUDIO_CLKOUT3_A),
+ PINMUX_IPSR_MSEL(IP12_15_12, SDA1_A, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP12_15_12, FSO_CFE_1_N_A, SEL_FSO_0),
+ PINMUX_IPSR_GPSR(IP12_15_12, TS_SPSYNC1),
+
+ PINMUX_IPSR_GPSR(IP12_19_16, MSIOF0_SCK),
+ PINMUX_IPSR_GPSR(IP12_19_16, SSI_SCK78),
+
+ PINMUX_IPSR_GPSR(IP12_23_20, MSIOF0_RXD),
+ PINMUX_IPSR_GPSR(IP12_23_20, SSI_WS78),
+ PINMUX_IPSR_GPSR(IP12_23_20, TX2_B),
+
+ PINMUX_IPSR_GPSR(IP12_27_24, MSIOF0_TXD),
+ PINMUX_IPSR_GPSR(IP12_27_24, SSI_SDATA7),
+ PINMUX_IPSR_GPSR(IP12_27_24, RX2_B),
+
+ PINMUX_IPSR_GPSR(IP12_31_28, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP12_31_28, AUDIO_CLKOUT_B),
+ PINMUX_IPSR_GPSR(IP12_31_28, SSI_SDATA8),
+
+ /* IPSR13 */
+ PINMUX_IPSR_GPSR(IP13_3_0, MSIOF0_SS1),
+ PINMUX_IPSR_MSEL(IP13_3_0, HRX2_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_GPSR(IP13_3_0, SSI_SCK4),
+ PINMUX_IPSR_MSEL(IP13_3_0, HCTS0_N_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_GPSR(IP13_3_0, BPFCLK_C),
+ PINMUX_IPSR_MSEL(IP13_3_0, SPEEDIN_A, SEL_SPEED_PULSE_IF_0),
+
+ PINMUX_IPSR_GPSR(IP13_7_4, MSIOF0_SS2),
+ PINMUX_IPSR_GPSR(IP13_7_4, HTX2_A),
+ PINMUX_IPSR_GPSR(IP13_7_4, SSI_WS4),
+ PINMUX_IPSR_MSEL(IP13_7_4, HRTS0_N_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_MSEL(IP13_7_4, FMIN_C, SEL_FM_2),
+ PINMUX_IPSR_GPSR(IP13_7_4, BPFCLK_A),
+
+ PINMUX_IPSR_GPSR(IP13_11_8, SSI_SDATA9),
+ PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKC_A, SEL_ADGC_0),
+ PINMUX_IPSR_GPSR(IP13_11_8, SCK1),
+
+ PINMUX_IPSR_GPSR(IP13_15_12, MLB_CLK),
+ PINMUX_IPSR_MSEL(IP13_15_12, RX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MSEL(IP13_15_12, RIF0_D0_A, SEL_DRIF0_0),
+ PINMUX_IPSR_MSEL(IP13_15_12, SCL1_B, SEL_I2C1_1),
+ PINMUX_IPSR_MSEL(IP13_15_12, TCLK1_B, SEL_TIMER_TMU_1),
+ PINMUX_IPSR_GPSR(IP13_15_12, SIM0_RST_A),
+
+ PINMUX_IPSR_GPSR(IP13_19_16, MLB_SIG),
+ PINMUX_IPSR_MSEL(IP13_19_16, SCK0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_A, SEL_DRIF0_0),
+ PINMUX_IPSR_MSEL(IP13_19_16, SDA1_B, SEL_I2C1_1),
+ PINMUX_IPSR_MSEL(IP13_19_16, TCLK2_B, SEL_TIMER_TMU_1),
+ PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0),
+
+ PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT),
+ PINMUX_IPSR_GPSR(IP13_23_20, TX0_B),
+ PINMUX_IPSR_MSEL(IP13_23_20, RIF0_SYNC_A, SEL_DRIF0_0),
+ PINMUX_IPSR_GPSR(IP13_23_20, SIM0_CLK_A),
+
+ PINMUX_IPSR_GPSR(IP13_27_24, SSI_SCK01239),
+
+ PINMUX_IPSR_GPSR(IP13_31_28, SSI_WS01239),
+
+ /* IPSR14 */
+ PINMUX_IPSR_GPSR(IP14_3_0, SSI_SDATA0),
+
+ PINMUX_IPSR_GPSR(IP14_7_4, SSI_SDATA1),
+ PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_B, SEL_ADGC_1),
+ PINMUX_IPSR_MSEL(IP14_7_4, PWM0_B, SEL_PWM0_1),
+
+ PINMUX_IPSR_GPSR(IP14_11_8, SSI_SDATA2),
+ PINMUX_IPSR_GPSR(IP14_11_8, AUDIO_CLKOUT2_B),
+ PINMUX_IPSR_MSEL(IP14_11_8, SSI_SCK9_A, SEL_SSI9_0),
+ PINMUX_IPSR_MSEL(IP14_11_8, PWM1_B, SEL_PWM1_1),
+
+ PINMUX_IPSR_GPSR(IP14_15_12, SSI_SCK349),
+ PINMUX_IPSR_MSEL(IP14_15_12, PWM2_C, SEL_PWM2_2),
+
+ PINMUX_IPSR_GPSR(IP14_19_16, SSI_WS349),
+ PINMUX_IPSR_MSEL(IP14_19_16, PWM3_C, SEL_PWM3_2),
+
+ PINMUX_IPSR_GPSR(IP14_23_20, SSI_SDATA3),
+ PINMUX_IPSR_GPSR(IP14_23_20, AUDIO_CLKOUT1_C),
+ PINMUX_IPSR_MSEL(IP14_23_20, AUDIO_CLKB_B, SEL_ADGB_1),
+ PINMUX_IPSR_MSEL(IP14_23_20, PWM4_B, SEL_PWM4_1),
+
+ PINMUX_IPSR_GPSR(IP14_27_24, SSI_SDATA4),
+ PINMUX_IPSR_MSEL(IP14_27_24, SSI_WS9_A, SEL_SSI9_0),
+ PINMUX_IPSR_MSEL(IP14_27_24, PWM5_B, SEL_PWM5_1),
+
+ PINMUX_IPSR_GPSR(IP14_31_28, SSI_SCK5),
+ PINMUX_IPSR_MSEL(IP14_31_28, HRX0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_GPSR(IP14_31_28, USB0_PWEN_B),
+ PINMUX_IPSR_MSEL(IP14_31_28, SCL2_D, SEL_I2C2_3),
+ PINMUX_IPSR_MSEL(IP14_31_28, PWM6_B, SEL_PWM6_1),
+
+ /* IPSR15 */
+ PINMUX_IPSR_GPSR(IP15_3_0, SSI_WS5),
+ PINMUX_IPSR_GPSR(IP15_3_0, HTX0_B),
+ PINMUX_IPSR_MSEL(IP15_3_0, USB0_OVC_B, SEL_USB_20_CH0_1),
+ PINMUX_IPSR_MSEL(IP15_3_0, SDA2_D, SEL_I2C2_3),
+
+ PINMUX_IPSR_GPSR(IP15_7_4, SSI_SDATA5),
+ PINMUX_IPSR_MSEL(IP15_7_4, HSCK0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_MSEL(IP15_7_4, AUDIO_CLKB_C, SEL_ADGB_2),
+ PINMUX_IPSR_GPSR(IP15_7_4, TPU0TO0),
+
+ PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK6),
+ PINMUX_IPSR_MSEL(IP15_11_8, HSCK2_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_MSEL(IP15_11_8, AUDIO_CLKC_C, SEL_ADGC_2),
+ PINMUX_IPSR_GPSR(IP15_11_8, TPU0TO1),
+ PINMUX_IPSR_MSEL(IP15_11_8, FSO_CFE_0_N_B, SEL_FSO_1),
+ PINMUX_IPSR_GPSR(IP15_11_8, SIM0_RST_B),
+
+ PINMUX_IPSR_GPSR(IP15_15_12, SSI_WS6),
+ PINMUX_IPSR_MSEL(IP15_15_12, HCTS2_N_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_GPSR(IP15_15_12, AUDIO_CLKOUT2_C),
+ PINMUX_IPSR_GPSR(IP15_15_12, TPU0TO2),
+ PINMUX_IPSR_MSEL(IP15_15_12, SDA1_D, SEL_I2C1_3),
+ PINMUX_IPSR_MSEL(IP15_15_12, FSO_CFE_1_N_B, SEL_FSO_1),
+ PINMUX_IPSR_MSEL(IP15_15_12, SIM0_D_B, SEL_SIMCARD_1),
+
+ PINMUX_IPSR_GPSR(IP15_19_16, SSI_SDATA6),
+ PINMUX_IPSR_MSEL(IP15_19_16, HRTS2_N_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_GPSR(IP15_19_16, AUDIO_CLKOUT3_C),
+ PINMUX_IPSR_GPSR(IP15_19_16, TPU0TO3),
+ PINMUX_IPSR_MSEL(IP15_19_16, SCL1_D, SEL_I2C1_3),
+ PINMUX_IPSR_MSEL(IP15_19_16, FSO_TOE_N_B, SEL_FSO_1),
+ PINMUX_IPSR_GPSR(IP15_19_16, SIM0_CLK_B),
+
+ PINMUX_IPSR_GPSR(IP15_23_20, AUDIO_CLKA),
+
+ PINMUX_IPSR_GPSR(IP15_27_24, USB30_PWEN),
+ PINMUX_IPSR_GPSR(IP15_27_24, USB0_PWEN_A),
+
+ PINMUX_IPSR_GPSR(IP15_31_28, USB30_OVC),
+ PINMUX_IPSR_MSEL(IP15_31_28, USB0_OVC_A, SEL_USB_20_CH0_0),
+
+/*
+ * Static pins can not be muxed between different functions but
+ * still need mark entries in the pinmux list. Add each static
+ * pin to the list without an associated function. The sh-pfc
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
+ */
+#define FM(x) PINMUX_DATA(x##_MARK, 0),
+ PINMUX_STATIC
+#undef FM
+};
+
+/*
+ * R8A77990 has 7 banks with 32 GPIOs in each => 224 GPIOs.
+ * Physical layout rows: A - AE, cols: 1 - 25.
+ */
+#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
+#define PIN_NUMBER(r, c) (((r) - 'A') * 25 + (c) + 300)
+#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+#define PIN_NONE U16_MAX
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+
+ /*
+ * Pins not associated with a GPIO port.
+ *
+ * The pin positions are different between different R8A77990
+ * packages, all that is needed for the pfc driver is a unique
+ * number for each pin. To this end use the pin layout from
+ * R8A77990 to calculate a unique number for each pin.
+ */
+ SH_PFC_PIN_NAMED_CFG('F', 1, TRST_N, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('F', 3, TMS, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('F', 4, TCK, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('G', 2, TDI, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('G', 3, FSCLKST_N, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('H', 1, ASEBRK, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('N', 1, AVB_TXC, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('N', 2, AVB_TD0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('N', 3, AVB_TD1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('N', 5, AVB_TD2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('N', 6, AVB_TD3, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('P', 3, AVB_TX_CTL, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('P', 4, AVB_MDIO, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('P', 5, AVB_MDC, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('T', 21, MLB_REF, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('D'), 3, PRESETOUT_N, CFG_FLAGS),
+};
+
+/* - EtherAVB --------------------------------------------------------------- */
+static const unsigned int avb_link_pins[] = {
+ /* AVB_LINK */
+ RCAR_GP_PIN(2, 23),
+};
+
+static const unsigned int avb_link_mux[] = {
+ AVB_LINK_MARK,
+};
+
+static const unsigned int avb_magic_pins[] = {
+ /* AVB_MAGIC */
+ RCAR_GP_PIN(2, 22),
+};
+
+static const unsigned int avb_magic_mux[] = {
+ AVB_MAGIC_MARK,
+};
+
+static const unsigned int avb_phy_int_pins[] = {
+ /* AVB_PHY_INT */
+ RCAR_GP_PIN(2, 21),
+};
+
+static const unsigned int avb_phy_int_mux[] = {
+ AVB_PHY_INT_MARK,
+};
+
+static const unsigned int avb_mii_pins[] = {
+ /*
+ * AVB_RX_CTL, AVB_RXC, AVB_RD0,
+ * AVB_RD1, AVB_RD2, AVB_RD3,
+ * AVB_TXCREFCLK
+ */
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 16),
+ RCAR_GP_PIN(2, 17), RCAR_GP_PIN(2, 18), RCAR_GP_PIN(2, 19),
+ RCAR_GP_PIN(2, 20),
+};
+
+static const unsigned int avb_mii_mux[] = {
+ AVB_RX_CTL_MARK, AVB_RXC_MARK, AVB_RD0_MARK,
+ AVB_RD1_MARK, AVB_RD2_MARK, AVB_RD3_MARK,
+ AVB_TXCREFCLK_MARK,
+};
+
+static const unsigned int avb_avtp_pps_pins[] = {
+ /* AVB_AVTP_PPS */
+ RCAR_GP_PIN(1, 2),
+};
+
+static const unsigned int avb_avtp_pps_mux[] = {
+ AVB_AVTP_PPS_MARK,
+};
+
+static const unsigned int avb_avtp_match_a_pins[] = {
+ /* AVB_AVTP_MATCH_A */
+ RCAR_GP_PIN(2, 24),
+};
+
+static const unsigned int avb_avtp_match_a_mux[] = {
+ AVB_AVTP_MATCH_A_MARK,
+};
+
+static const unsigned int avb_avtp_capture_a_pins[] = {
+ /* AVB_AVTP_CAPTURE_A */
+ RCAR_GP_PIN(2, 25),
+};
+
+static const unsigned int avb_avtp_capture_a_mux[] = {
+ AVB_AVTP_CAPTURE_A_MARK,
+};
+
+/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c1_a_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9),
+};
+
+static const unsigned int i2c1_a_mux[] = {
+ SCL1_A_MARK, SDA1_A_MARK,
+};
+
+static const unsigned int i2c1_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 18),
+};
+
+static const unsigned int i2c1_b_mux[] = {
+ SCL1_B_MARK, SDA1_B_MARK,
+};
+
+static const unsigned int i2c1_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 5),
+};
+
+static const unsigned int i2c1_c_mux[] = {
+ SCL1_C_MARK, SDA1_C_MARK,
+};
+
+static const unsigned int i2c1_d_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 15),
+};
+
+static const unsigned int i2c1_d_mux[] = {
+ SCL1_D_MARK, SDA1_D_MARK,
+};
+
+static const unsigned int i2c2_a_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 0),
+};
+
+static const unsigned int i2c2_a_mux[] = {
+ SCL2_A_MARK, SDA2_A_MARK,
+};
+
+static const unsigned int i2c2_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
+};
+
+static const unsigned int i2c2_b_mux[] = {
+ SCL2_B_MARK, SDA2_B_MARK,
+};
+
+static const unsigned int i2c2_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 3),
+};
+
+static const unsigned int i2c2_c_mux[] = {
+ SCL2_C_MARK, SDA2_C_MARK,
+};
+
+static const unsigned int i2c2_d_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+
+static const unsigned int i2c2_d_mux[] = {
+ SCL2_D_MARK, SDA2_D_MARK,
+};
+
+static const unsigned int i2c2_e_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 0),
+};
+
+static const unsigned int i2c2_e_mux[] = {
+ SCL2_E_MARK, SDA2_E_MARK,
+};
+
+static const unsigned int i2c4_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(0, 16), RCAR_GP_PIN(0, 17),
+};
+
+static const unsigned int i2c4_mux[] = {
+ SCL4_MARK, SDA4_MARK,
+};
+
+static const unsigned int i2c5_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(1, 21), RCAR_GP_PIN(1, 22),
+};
+
+static const unsigned int i2c5_mux[] = {
+ SCL5_MARK, SDA5_MARK,
+};
+
+static const unsigned int i2c6_a_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(1, 11), RCAR_GP_PIN(1, 8),
+};
+
+static const unsigned int i2c6_a_mux[] = {
+ SCL6_A_MARK, SDA6_A_MARK,
+};
+
+static const unsigned int i2c6_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1),
+};
+
+static const unsigned int i2c6_b_mux[] = {
+ SCL6_B_MARK, SDA6_B_MARK,
+};
+
+static const unsigned int i2c7_a_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 24), RCAR_GP_PIN(2, 25),
+};
+
+static const unsigned int i2c7_a_mux[] = {
+ SCL7_A_MARK, SDA7_A_MARK,
+};
+
+static const unsigned int i2c7_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(0, 13), RCAR_GP_PIN(0, 14),
+};
+
+static const unsigned int i2c7_b_mux[] = {
+ SCL7_B_MARK, SDA7_B_MARK,
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+
+static const unsigned int scif0_data_a_mux[] = {
+ RX0_A_MARK, TX0_A_MARK,
+};
+
+static const unsigned int scif0_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 0),
+};
+
+static const unsigned int scif0_clk_a_mux[] = {
+ SCK0_A_MARK,
+};
+
+static const unsigned int scif0_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+
+static const unsigned int scif0_ctrl_a_mux[] = {
+ RTS0_N_TANS_A_MARK, CTS0_N_A_MARK,
+};
+
+static const unsigned int scif0_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 19),
+};
+
+static const unsigned int scif0_data_b_mux[] = {
+ RX0_B_MARK, TX0_B_MARK,
+};
+
+static const unsigned int scif0_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 18),
+};
+
+static const unsigned int scif0_clk_b_mux[] = {
+ SCK0_B_MARK,
+};
+
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+
+static const unsigned int scif1_data_mux[] = {
+ RX1_MARK, TX1_MARK,
+};
+
+static const unsigned int scif1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 16),
+};
+
+static const unsigned int scif1_clk_mux[] = {
+ SCK1_MARK,
+};
+
+static const unsigned int scif1_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 7),
+};
+
+static const unsigned int scif1_ctrl_mux[] = {
+ RTS1_N_TANS_MARK, CTS1_N_MARK,
+};
+
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 8),
+};
+
+static const unsigned int scif2_data_a_mux[] = {
+ RX2_A_MARK, TX2_A_MARK,
+};
+
+static const unsigned int scif2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 7),
+};
+
+static const unsigned int scif2_clk_a_mux[] = {
+ SCK2_A_MARK,
+};
+
+static const unsigned int scif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 11),
+};
+
+static const unsigned int scif2_data_b_mux[] = {
+ RX2_B_MARK, TX2_B_MARK,
+};
+
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6),
+};
+
+static const unsigned int scif3_data_a_mux[] = {
+ RX3_A_MARK, TX3_A_MARK,
+};
+
+static const unsigned int scif3_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 1),
+};
+
+static const unsigned int scif3_clk_a_mux[] = {
+ SCK3_A_MARK,
+};
+
+static const unsigned int scif3_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 7),
+};
+
+static const unsigned int scif3_ctrl_a_mux[] = {
+ RTS3_N_TANS_A_MARK, CTS3_N_A_MARK,
+};
+
+static const unsigned int scif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+
+static const unsigned int scif3_data_b_mux[] = {
+ RX3_B_MARK, TX3_B_MARK,
+};
+
+static const unsigned int scif3_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 23), RCAR_GP_PIN(2, 22),
+};
+
+static const unsigned int scif3_data_c_mux[] = {
+ RX3_C_MARK, TX3_C_MARK,
+};
+
+static const unsigned int scif3_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 24),
+};
+
+static const unsigned int scif3_clk_c_mux[] = {
+ SCK3_C_MARK,
+};
+
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+
+static const unsigned int scif4_data_a_mux[] = {
+ RX4_A_MARK, TX4_A_MARK,
+};
+
+static const unsigned int scif4_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 5),
+};
+
+static const unsigned int scif4_clk_a_mux[] = {
+ SCK4_A_MARK,
+};
+
+static const unsigned int scif4_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3),
+};
+
+static const unsigned int scif4_ctrl_a_mux[] = {
+ RTS4_N_TANS_A_MARK, CTS4_N_A_MARK,
+};
+
+static const unsigned int scif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 13), RCAR_GP_PIN(0, 12),
+};
+
+static const unsigned int scif4_data_b_mux[] = {
+ RX4_B_MARK, TX4_B_MARK,
+};
+
+static const unsigned int scif4_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 8),
+};
+
+static const unsigned int scif4_clk_b_mux[] = {
+ SCK4_B_MARK,
+};
+
+static const unsigned int scif4_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+};
+
+static const unsigned int scif4_data_c_mux[] = {
+ RX4_C_MARK, TX4_C_MARK,
+};
+
+static const unsigned int scif4_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(0, 1), RCAR_GP_PIN(0, 0),
+};
+
+static const unsigned int scif4_ctrl_c_mux[] = {
+ RTS4_N_TANS_C_MARK, CTS4_N_C_MARK,
+};
+
+/* - SCIF5 ------------------------------------------------------------------ */
+static const unsigned int scif5_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 9),
+};
+
+static const unsigned int scif5_data_a_mux[] = {
+ RX5_A_MARK, TX5_A_MARK,
+};
+
+static const unsigned int scif5_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 13),
+};
+
+static const unsigned int scif5_clk_a_mux[] = {
+ SCK5_A_MARK,
+};
+
+static const unsigned int scif5_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 25), RCAR_GP_PIN(2, 24),
+};
+
+static const unsigned int scif5_data_b_mux[] = {
+ RX5_B_MARK, TX5_B_MARK,
+};
+
+static const unsigned int scif5_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+};
+
+static const unsigned int scif5_data_c_mux[] = {
+ RX5_C_MARK, TX5_C_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_a_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(5, 3),
+};
+
+static const unsigned int scif_clk_a_mux[] = {
+ SCIF_CLK_A_MARK,
+};
+
+static const unsigned int scif_clk_b_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(5, 7),
+};
+
+static const unsigned int scif_clk_b_mux[] = {
+ SCIF_CLK_B_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_avtp_pps),
+ SH_PFC_PIN_GROUP(avb_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c1_c),
+ SH_PFC_PIN_GROUP(i2c1_d),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c2_c),
+ SH_PFC_PIN_GROUP(i2c2_d),
+ SH_PFC_PIN_GROUP(i2c2_e),
+ SH_PFC_PIN_GROUP(i2c4),
+ SH_PFC_PIN_GROUP(i2c5),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c7_a),
+ SH_PFC_PIN_GROUP(i2c7_b),
+ SH_PFC_PIN_GROUP(scif0_data_a),
+ SH_PFC_PIN_GROUP(scif0_clk_a),
+ SH_PFC_PIN_GROUP(scif0_ctrl_a),
+ SH_PFC_PIN_GROUP(scif0_data_b),
+ SH_PFC_PIN_GROUP(scif0_clk_b),
+ SH_PFC_PIN_GROUP(scif1_data),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif2_data_a),
+ SH_PFC_PIN_GROUP(scif2_clk_a),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk_a),
+ SH_PFC_PIN_GROUP(scif3_ctrl_a),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_c),
+ SH_PFC_PIN_GROUP(scif3_clk_c),
+ SH_PFC_PIN_GROUP(scif4_data_a),
+ SH_PFC_PIN_GROUP(scif4_clk_a),
+ SH_PFC_PIN_GROUP(scif4_ctrl_a),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_clk_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_ctrl_c),
+ SH_PFC_PIN_GROUP(scif5_data_a),
+ SH_PFC_PIN_GROUP(scif5_clk_a),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_data_c),
+ SH_PFC_PIN_GROUP(scif_clk_a),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+};
+
+static const char * const avb_groups[] = {
+ "avb_link",
+ "avb_magic",
+ "avb_phy_int",
+ "avb_mii",
+ "avb_avtp_pps",
+ "avb_avtp_match_a",
+ "avb_avtp_capture_a",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_a",
+ "i2c1_b",
+ "i2c1_c",
+ "i2c1_d",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_a",
+ "i2c2_b",
+ "i2c2_c",
+ "i2c2_d",
+ "i2c2_e",
+};
+
+static const char * const i2c4_groups[] = {
+ "i2c4",
+};
+
+static const char * const i2c5_groups[] = {
+ "i2c5",
+};
+
+static const char * const i2c6_groups[] = {
+ "i2c6_a",
+ "i2c6_b",
+};
+
+static const char * const i2c7_groups[] = {
+ "i2c7_a",
+ "i2c7_b",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data_a",
+ "scif0_clk_a",
+ "scif0_ctrl_a",
+ "scif0_data_b",
+ "scif0_clk_b",
+};
+
+static const char * const scif1_groups[] = {
+ "scif1_data",
+ "scif1_clk",
+ "scif1_ctrl",
+};
+
+static const char * const scif2_groups[] = {
+ "scif2_data_a",
+ "scif2_clk_a",
+ "scif2_data_b",
+};
+
+static const char * const scif3_groups[] = {
+ "scif3_data_a",
+ "scif3_clk_a",
+ "scif3_ctrl_a",
+ "scif3_data_b",
+ "scif3_data_c",
+ "scif3_clk_c",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data_a",
+ "scif4_clk_a",
+ "scif4_ctrl_a",
+ "scif4_data_b",
+ "scif4_clk_b",
+ "scif4_data_c",
+ "scif4_ctrl_c",
+};
+
+static const char * const scif5_groups[] = {
+ "scif5_data_a",
+ "scif5_clk_a",
+ "scif5_data_b",
+ "scif5_data_c",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk_a",
+ "scif_clk_b",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c4),
+ SH_PFC_FUNCTION(i2c5),
+ SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(i2c7),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scif_clk),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y) FN_##y
+#define FM(x) FN_##x
+ { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_0_17_FN, GPSR0_17,
+ GP_0_16_FN, GPSR0_16,
+ GP_0_15_FN, GPSR0_15,
+ GP_0_14_FN, GPSR0_14,
+ GP_0_13_FN, GPSR0_13,
+ GP_0_12_FN, GPSR0_12,
+ GP_0_11_FN, GPSR0_11,
+ GP_0_10_FN, GPSR0_10,
+ GP_0_9_FN, GPSR0_9,
+ GP_0_8_FN, GPSR0_8,
+ GP_0_7_FN, GPSR0_7,
+ GP_0_6_FN, GPSR0_6,
+ GP_0_5_FN, GPSR0_5,
+ GP_0_4_FN, GPSR0_4,
+ GP_0_3_FN, GPSR0_3,
+ GP_0_2_FN, GPSR0_2,
+ GP_0_1_FN, GPSR0_1,
+ GP_0_0_FN, GPSR0_0, }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_1_22_FN, GPSR1_22,
+ GP_1_21_FN, GPSR1_21,
+ GP_1_20_FN, GPSR1_20,
+ GP_1_19_FN, GPSR1_19,
+ GP_1_18_FN, GPSR1_18,
+ GP_1_17_FN, GPSR1_17,
+ GP_1_16_FN, GPSR1_16,
+ GP_1_15_FN, GPSR1_15,
+ GP_1_14_FN, GPSR1_14,
+ GP_1_13_FN, GPSR1_13,
+ GP_1_12_FN, GPSR1_12,
+ GP_1_11_FN, GPSR1_11,
+ GP_1_10_FN, GPSR1_10,
+ GP_1_9_FN, GPSR1_9,
+ GP_1_8_FN, GPSR1_8,
+ GP_1_7_FN, GPSR1_7,
+ GP_1_6_FN, GPSR1_6,
+ GP_1_5_FN, GPSR1_5,
+ GP_1_4_FN, GPSR1_4,
+ GP_1_3_FN, GPSR1_3,
+ GP_1_2_FN, GPSR1_2,
+ GP_1_1_FN, GPSR1_1,
+ GP_1_0_FN, GPSR1_0, }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_2_25_FN, GPSR2_25,
+ GP_2_24_FN, GPSR2_24,
+ GP_2_23_FN, GPSR2_23,
+ GP_2_22_FN, GPSR2_22,
+ GP_2_21_FN, GPSR2_21,
+ GP_2_20_FN, GPSR2_20,
+ GP_2_19_FN, GPSR2_19,
+ GP_2_18_FN, GPSR2_18,
+ GP_2_17_FN, GPSR2_17,
+ GP_2_16_FN, GPSR2_16,
+ GP_2_15_FN, GPSR2_15,
+ GP_2_14_FN, GPSR2_14,
+ GP_2_13_FN, GPSR2_13,
+ GP_2_12_FN, GPSR2_12,
+ GP_2_11_FN, GPSR2_11,
+ GP_2_10_FN, GPSR2_10,
+ GP_2_9_FN, GPSR2_9,
+ GP_2_8_FN, GPSR2_8,
+ GP_2_7_FN, GPSR2_7,
+ GP_2_6_FN, GPSR2_6,
+ GP_2_5_FN, GPSR2_5,
+ GP_2_4_FN, GPSR2_4,
+ GP_2_3_FN, GPSR2_3,
+ GP_2_2_FN, GPSR2_2,
+ GP_2_1_FN, GPSR2_1,
+ GP_2_0_FN, GPSR2_0, }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_3_15_FN, GPSR3_15,
+ GP_3_14_FN, GPSR3_14,
+ GP_3_13_FN, GPSR3_13,
+ GP_3_12_FN, GPSR3_12,
+ GP_3_11_FN, GPSR3_11,
+ GP_3_10_FN, GPSR3_10,
+ GP_3_9_FN, GPSR3_9,
+ GP_3_8_FN, GPSR3_8,
+ GP_3_7_FN, GPSR3_7,
+ GP_3_6_FN, GPSR3_6,
+ GP_3_5_FN, GPSR3_5,
+ GP_3_4_FN, GPSR3_4,
+ GP_3_3_FN, GPSR3_3,
+ GP_3_2_FN, GPSR3_2,
+ GP_3_1_FN, GPSR3_1,
+ GP_3_0_FN, GPSR3_0, }
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_4_10_FN, GPSR4_10,
+ GP_4_9_FN, GPSR4_9,
+ GP_4_8_FN, GPSR4_8,
+ GP_4_7_FN, GPSR4_7,
+ GP_4_6_FN, GPSR4_6,
+ GP_4_5_FN, GPSR4_5,
+ GP_4_4_FN, GPSR4_4,
+ GP_4_3_FN, GPSR4_3,
+ GP_4_2_FN, GPSR4_2,
+ GP_4_1_FN, GPSR4_1,
+ GP_4_0_FN, GPSR4_0, }
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_5_19_FN, GPSR5_19,
+ GP_5_18_FN, GPSR5_18,
+ GP_5_17_FN, GPSR5_17,
+ GP_5_16_FN, GPSR5_16,
+ GP_5_15_FN, GPSR5_15,
+ GP_5_14_FN, GPSR5_14,
+ GP_5_13_FN, GPSR5_13,
+ GP_5_12_FN, GPSR5_12,
+ GP_5_11_FN, GPSR5_11,
+ GP_5_10_FN, GPSR5_10,
+ GP_5_9_FN, GPSR5_9,
+ GP_5_8_FN, GPSR5_8,
+ GP_5_7_FN, GPSR5_7,
+ GP_5_6_FN, GPSR5_6,
+ GP_5_5_FN, GPSR5_5,
+ GP_5_4_FN, GPSR5_4,
+ GP_5_3_FN, GPSR5_3,
+ GP_5_2_FN, GPSR5_2,
+ GP_5_1_FN, GPSR5_1,
+ GP_5_0_FN, GPSR5_0, }
+ },
+ { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_6_17_FN, GPSR6_17,
+ GP_6_16_FN, GPSR6_16,
+ GP_6_15_FN, GPSR6_15,
+ GP_6_14_FN, GPSR6_14,
+ GP_6_13_FN, GPSR6_13,
+ GP_6_12_FN, GPSR6_12,
+ GP_6_11_FN, GPSR6_11,
+ GP_6_10_FN, GPSR6_10,
+ GP_6_9_FN, GPSR6_9,
+ GP_6_8_FN, GPSR6_8,
+ GP_6_7_FN, GPSR6_7,
+ GP_6_6_FN, GPSR6_6,
+ GP_6_5_FN, GPSR6_5,
+ GP_6_4_FN, GPSR6_4,
+ GP_6_3_FN, GPSR6_3,
+ GP_6_2_FN, GPSR6_2,
+ GP_6_1_FN, GPSR6_1,
+ GP_6_0_FN, GPSR6_0, }
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+ IP0_31_28
+ IP0_27_24
+ IP0_23_20
+ IP0_19_16
+ IP0_15_12
+ IP0_11_8
+ IP0_7_4
+ IP0_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+ IP1_31_28
+ IP1_27_24
+ IP1_23_20
+ IP1_19_16
+ IP1_15_12
+ IP1_11_8
+ IP1_7_4
+ IP1_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+ IP2_31_28
+ IP2_27_24
+ IP2_23_20
+ IP2_19_16
+ IP2_15_12
+ IP2_11_8
+ IP2_7_4
+ IP2_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+ IP3_31_28
+ IP3_27_24
+ IP3_23_20
+ IP3_19_16
+ IP3_15_12
+ IP3_11_8
+ IP3_7_4
+ IP3_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+ IP4_31_28
+ IP4_27_24
+ IP4_23_20
+ IP4_19_16
+ IP4_15_12
+ IP4_11_8
+ IP4_7_4
+ IP4_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+ IP5_31_28
+ IP5_27_24
+ IP5_23_20
+ IP5_19_16
+ IP5_15_12
+ IP5_11_8
+ IP5_7_4
+ IP5_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+ IP6_31_28
+ IP6_27_24
+ IP6_23_20
+ IP6_19_16
+ IP6_15_12
+ IP6_11_8
+ IP6_7_4
+ IP6_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+ IP7_31_28
+ IP7_27_24
+ IP7_23_20
+ IP7_19_16
+ IP7_15_12
+ IP7_11_8
+ IP7_7_4
+ IP7_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+ IP8_31_28
+ IP8_27_24
+ IP8_23_20
+ IP8_19_16
+ IP8_15_12
+ IP8_11_8
+ IP8_7_4
+ IP8_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
+ IP9_31_28
+ IP9_27_24
+ IP9_23_20
+ IP9_19_16
+ IP9_15_12
+ IP9_11_8
+ IP9_7_4
+ IP9_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
+ IP10_31_28
+ IP10_27_24
+ IP10_23_20
+ IP10_19_16
+ IP10_15_12
+ IP10_11_8
+ IP10_7_4
+ IP10_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4) {
+ IP11_31_28
+ IP11_27_24
+ IP11_23_20
+ IP11_19_16
+ IP11_15_12
+ IP11_11_8
+ IP11_7_4
+ IP11_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4) {
+ IP12_31_28
+ IP12_27_24
+ IP12_23_20
+ IP12_19_16
+ IP12_15_12
+ IP12_11_8
+ IP12_7_4
+ IP12_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4) {
+ IP13_31_28
+ IP13_27_24
+ IP13_23_20
+ IP13_19_16
+ IP13_15_12
+ IP13_11_8
+ IP13_7_4
+ IP13_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4) {
+ IP14_31_28
+ IP14_27_24
+ IP14_23_20
+ IP14_19_16
+ IP14_15_12
+ IP14_11_8
+ IP14_7_4
+ IP14_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4) {
+ IP15_31_28
+ IP15_27_24
+ IP15_23_20
+ IP15_19_16
+ IP15_15_12
+ IP15_11_8
+ IP15_7_4
+ IP15_3_0 }
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
+ 1, 2, 1, 2, 1, 1, 1, 1, 2, 3, 1,
+ 1, 1, 2, 2, 1, 1, 1, 2, 1, 1, 1, 2) {
+ /* RESERVED 31 */
+ 0, 0,
+ MOD_SEL0_30_29
+ MOD_SEL0_28
+ MOD_SEL0_27_26
+ MOD_SEL0_25
+ MOD_SEL0_24
+ MOD_SEL0_23
+ MOD_SEL0_22
+ MOD_SEL0_21_20
+ MOD_SEL0_19_18_17
+ MOD_SEL0_16
+ MOD_SEL0_15
+ MOD_SEL0_14
+ MOD_SEL0_13_12
+ MOD_SEL0_11_10
+ MOD_SEL0_9
+ MOD_SEL0_8
+ MOD_SEL0_7
+ MOD_SEL0_6_5
+ MOD_SEL0_4
+ MOD_SEL0_3
+ MOD_SEL0_2
+ MOD_SEL0_1_0 }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
+ 1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1,
+ 1, 2, 2, 2, 1, 1, 2, 1, 4) {
+ MOD_SEL1_31
+ MOD_SEL1_30
+ MOD_SEL1_29
+ MOD_SEL1_28
+ /* RESERVED 27 */
+ 0, 0,
+ MOD_SEL1_26
+ MOD_SEL1_25
+ MOD_SEL1_24_23_22
+ MOD_SEL1_21_20_19
+ MOD_SEL1_18
+ MOD_SEL1_17
+ MOD_SEL1_16
+ MOD_SEL1_15
+ MOD_SEL1_14_13
+ MOD_SEL1_12_11
+ MOD_SEL1_10_9
+ MOD_SEL1_8
+ MOD_SEL1_7
+ MOD_SEL1_6_5
+ MOD_SEL1_4
+ /* RESERVED 3, 2, 1, 0 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { },
+};
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [0] = RCAR_GP_PIN(2, 23), /* RD# */
+ [1] = RCAR_GP_PIN(2, 22), /* BS# */
+ [2] = RCAR_GP_PIN(2, 21), /* AVB_PHY_INT */
+ [3] = PIN_NUMBER('P', 5), /* AVB_MDC */
+ [4] = PIN_NUMBER('P', 4), /* AVB_MDIO */
+ [5] = RCAR_GP_PIN(2, 20), /* AVB_TXCREFCLK */
+ [6] = PIN_NUMBER('N', 6), /* AVB_TD3 */
+ [7] = PIN_NUMBER('N', 5), /* AVB_TD2 */
+ [8] = PIN_NUMBER('N', 3), /* AVB_TD1 */
+ [9] = PIN_NUMBER('N', 2), /* AVB_TD0 */
+ [10] = PIN_NUMBER('N', 1), /* AVB_TXC */
+ [11] = PIN_NUMBER('P', 3), /* AVB_TX_CTL */
+ [12] = RCAR_GP_PIN(2, 19), /* AVB_RD3 */
+ [13] = RCAR_GP_PIN(2, 18), /* AVB_RD2 */
+ [14] = RCAR_GP_PIN(2, 17), /* AVB_RD1 */
+ [15] = RCAR_GP_PIN(2, 16), /* AVB_RD0 */
+ [16] = RCAR_GP_PIN(2, 15), /* AVB_RXC */
+ [17] = RCAR_GP_PIN(2, 14), /* AVB_RX_CTL */
+ [18] = RCAR_GP_PIN(2, 13), /* RPC_RESET# */
+ [19] = RCAR_GP_PIN(2, 12), /* RPC_INT# */
+ [20] = RCAR_GP_PIN(2, 11), /* QSPI1_SSL */
+ [21] = RCAR_GP_PIN(2, 10), /* QSPI1_IO3 */
+ [22] = RCAR_GP_PIN(2, 9), /* QSPI1_IO2 */
+ [23] = RCAR_GP_PIN(2, 8), /* QSPI1_MISO/IO1 */
+ [24] = RCAR_GP_PIN(2, 7), /* QSPI1_MOSI/IO0 */
+ [25] = RCAR_GP_PIN(2, 6), /* QSPI1_SPCLK */
+ [26] = RCAR_GP_PIN(2, 5), /* QSPI0_SSL */
+ [27] = RCAR_GP_PIN(2, 4), /* QSPI0_IO3 */
+ [28] = RCAR_GP_PIN(2, 3), /* QSPI0_IO2 */
+ [29] = RCAR_GP_PIN(2, 2), /* QSPI0_MISO/IO1 */
+ [30] = RCAR_GP_PIN(2, 1), /* QSPI0_MOSI/IO0 */
+ [31] = RCAR_GP_PIN(2, 0), /* QSPI0_SPCLK */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [0] = RCAR_GP_PIN(0, 4), /* D4 */
+ [1] = RCAR_GP_PIN(0, 3), /* D3 */
+ [2] = RCAR_GP_PIN(0, 2), /* D2 */
+ [3] = RCAR_GP_PIN(0, 1), /* D1 */
+ [4] = RCAR_GP_PIN(0, 0), /* D0 */
+ [5] = RCAR_GP_PIN(1, 22), /* WE0# */
+ [6] = RCAR_GP_PIN(1, 21), /* CS0# */
+ [7] = RCAR_GP_PIN(1, 20), /* CLKOUT */
+ [8] = RCAR_GP_PIN(1, 19), /* A19 */
+ [9] = RCAR_GP_PIN(1, 18), /* A18 */
+ [10] = RCAR_GP_PIN(1, 17), /* A17 */
+ [11] = RCAR_GP_PIN(1, 16), /* A16 */
+ [12] = RCAR_GP_PIN(1, 15), /* A15 */
+ [13] = RCAR_GP_PIN(1, 14), /* A14 */
+ [14] = RCAR_GP_PIN(1, 13), /* A13 */
+ [15] = RCAR_GP_PIN(1, 12), /* A12 */
+ [16] = RCAR_GP_PIN(1, 11), /* A11 */
+ [17] = RCAR_GP_PIN(1, 10), /* A10 */
+ [18] = RCAR_GP_PIN(1, 9), /* A9 */
+ [19] = RCAR_GP_PIN(1, 8), /* A8 */
+ [20] = RCAR_GP_PIN(1, 7), /* A7 */
+ [21] = RCAR_GP_PIN(1, 6), /* A6 */
+ [22] = RCAR_GP_PIN(1, 5), /* A5 */
+ [23] = RCAR_GP_PIN(1, 4), /* A4 */
+ [24] = RCAR_GP_PIN(1, 3), /* A3 */
+ [25] = RCAR_GP_PIN(1, 2), /* A2 */
+ [26] = RCAR_GP_PIN(1, 1), /* A1 */
+ [27] = RCAR_GP_PIN(1, 0), /* A0 */
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = RCAR_GP_PIN(2, 25), /* PUEN_EX_WAIT0 */
+ [31] = RCAR_GP_PIN(2, 24), /* PUEN_RD/WR# */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [0] = RCAR_GP_PIN(3, 1), /* SD0_CMD */
+ [1] = RCAR_GP_PIN(3, 0), /* SD0_CLK */
+ [2] = PIN_NUMBER('H', 1), /* ASEBRK */
+ [3] = PIN_NONE,
+ [4] = PIN_NUMBER('G', 2), /* TDI */
+ [5] = PIN_NUMBER('F', 3), /* TMS */
+ [6] = PIN_NUMBER('F', 4), /* TCK */
+ [7] = PIN_NUMBER('F', 1), /* TRST# */
+ [8] = PIN_NONE,
+ [9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NUMBER('G', 3), /* FSCLKST# */
+ [16] = RCAR_GP_PIN(0, 17), /* SDA4 */
+ [17] = RCAR_GP_PIN(0, 16), /* SCL4 */
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_A_NUMBER('D', 3), /* PRESETOUT# */
+ [21] = RCAR_GP_PIN(0, 15), /* D15 */
+ [22] = RCAR_GP_PIN(0, 14), /* D14 */
+ [23] = RCAR_GP_PIN(0, 13), /* D13 */
+ [24] = RCAR_GP_PIN(0, 12), /* D12 */
+ [25] = RCAR_GP_PIN(0, 11), /* D11 */
+ [26] = RCAR_GP_PIN(0, 10), /* D10 */
+ [27] = RCAR_GP_PIN(0, 9), /* D9 */
+ [28] = RCAR_GP_PIN(0, 8), /* D8 */
+ [29] = RCAR_GP_PIN(0, 7), /* D7 */
+ [30] = RCAR_GP_PIN(0, 6), /* D6 */
+ [31] = RCAR_GP_PIN(0, 5), /* D5 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [0] = RCAR_GP_PIN(5, 0), /* SCK0_A */
+ [1] = RCAR_GP_PIN(5, 4), /* RTS0#/TANS_A */
+ [2] = RCAR_GP_PIN(5, 3), /* CTS0#_A */
+ [3] = RCAR_GP_PIN(5, 2), /* TX0_A */
+ [4] = RCAR_GP_PIN(5, 1), /* RX0_A */
+ [5] = PIN_NONE,
+ [6] = PIN_NONE,
+ [7] = RCAR_GP_PIN(3, 15), /* SD1_WP */
+ [8] = RCAR_GP_PIN(3, 14), /* SD1_CD */
+ [9] = RCAR_GP_PIN(3, 13), /* SD0_WP */
+ [10] = RCAR_GP_PIN(3, 12), /* SD0_CD */
+ [11] = RCAR_GP_PIN(4, 10), /* SD3_DS */
+ [12] = RCAR_GP_PIN(4, 9), /* SD3_DAT7 */
+ [13] = RCAR_GP_PIN(4, 8), /* SD3_DAT6 */
+ [14] = RCAR_GP_PIN(4, 7), /* SD3_DAT5 */
+ [15] = RCAR_GP_PIN(4, 6), /* SD3_DAT4 */
+ [16] = RCAR_GP_PIN(4, 5), /* SD3_DAT3 */
+ [17] = RCAR_GP_PIN(4, 4), /* SD3_DAT2 */
+ [18] = RCAR_GP_PIN(4, 3), /* SD3_DAT1 */
+ [19] = RCAR_GP_PIN(4, 2), /* SD3_DAT0 */
+ [20] = RCAR_GP_PIN(4, 1), /* SD3_CMD */
+ [21] = RCAR_GP_PIN(4, 0), /* SD3_CLK */
+ [22] = RCAR_GP_PIN(3, 11), /* SD1_DAT3 */
+ [23] = RCAR_GP_PIN(3, 10), /* SD1_DAT2 */
+ [24] = RCAR_GP_PIN(3, 9), /* SD1_DAT1 */
+ [25] = RCAR_GP_PIN(3, 8), /* SD1_DAT0 */
+ [26] = RCAR_GP_PIN(3, 7), /* SD1_CMD */
+ [27] = RCAR_GP_PIN(3, 6), /* SD1_CLK */
+ [28] = RCAR_GP_PIN(3, 5), /* SD0_DAT3 */
+ [29] = RCAR_GP_PIN(3, 4), /* SD0_DAT2 */
+ [30] = RCAR_GP_PIN(3, 3), /* SD0_DAT1 */
+ [31] = RCAR_GP_PIN(3, 2), /* SD0_DAT0 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [0] = RCAR_GP_PIN(6, 8), /* AUDIO_CLKA */
+ [1] = RCAR_GP_PIN(6, 16), /* SSI_SDATA6 */
+ [2] = RCAR_GP_PIN(6, 15), /* SSI_WS6 */
+ [3] = RCAR_GP_PIN(6, 14), /* SSI_SCK6 */
+ [4] = RCAR_GP_PIN(6, 13), /* SSI_SDATA5 */
+ [5] = RCAR_GP_PIN(6, 12), /* SSI_WS5 */
+ [6] = RCAR_GP_PIN(6, 11), /* SSI_SCK5 */
+ [7] = RCAR_GP_PIN(6, 10), /* SSI_SDATA4 */
+ [8] = RCAR_GP_PIN(6, 7), /* SSI_SDATA3 */
+ [9] = RCAR_GP_PIN(6, 6), /* SSI_WS349 */
+ [10] = RCAR_GP_PIN(6, 5), /* SSI_SCK349 */
+ [11] = RCAR_GP_PIN(6, 4), /* SSI_SDATA2 */
+ [12] = RCAR_GP_PIN(6, 3), /* SSI_SDATA1 */
+ [13] = RCAR_GP_PIN(6, 2), /* SSI_SDATA0 */
+ [14] = RCAR_GP_PIN(6, 1), /* SSI_WS01239 */
+ [15] = RCAR_GP_PIN(6, 0), /* SSI_SCK01239 */
+ [16] = PIN_NUMBER('T', 21), /* MLB_REF */
+ [17] = RCAR_GP_PIN(5, 19), /* MLB_DAT */
+ [18] = RCAR_GP_PIN(5, 18), /* MLB_SIG */
+ [19] = RCAR_GP_PIN(5, 17), /* MLB_CLK */
+ [20] = RCAR_GP_PIN(5, 16), /* SSI_SDATA9 */
+ [21] = RCAR_GP_PIN(5, 15), /* MSIOF0_SS2 */
+ [22] = RCAR_GP_PIN(5, 14), /* MSIOF0_SS1 */
+ [23] = RCAR_GP_PIN(5, 13), /* MSIOF0_SYNC */
+ [24] = RCAR_GP_PIN(5, 12), /* MSIOF0_TXD */
+ [25] = RCAR_GP_PIN(5, 11), /* MSIOF0_RXD */
+ [26] = RCAR_GP_PIN(5, 10), /* MSIOF0_SCK */
+ [27] = RCAR_GP_PIN(5, 9), /* RX2_A */
+ [28] = RCAR_GP_PIN(5, 8), /* TX2_A */
+ [29] = RCAR_GP_PIN(5, 7), /* SCK2_A */
+ [30] = RCAR_GP_PIN(5, 6), /* TX1 */
+ [31] = RCAR_GP_PIN(5, 5), /* RX1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+ [0] = PIN_NONE,
+ [1] = PIN_NONE,
+ [2] = PIN_NONE,
+ [3] = PIN_NONE,
+ [4] = PIN_NONE,
+ [5] = PIN_NONE,
+ [6] = PIN_NONE,
+ [7] = PIN_NONE,
+ [8] = PIN_NONE,
+ [9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = RCAR_GP_PIN(6, 9), /* PUEN_USB30_OVC */
+ [31] = RCAR_GP_PIN(6, 17), /* PUEN_USB30_PWEN */
+ } },
+ { /* sentinel */ },
+};
+
+static unsigned int r8a77990_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
+{
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
+
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
+ return PIN_CONFIG_BIAS_DISABLE;
+
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
+ return PIN_CONFIG_BIAS_DISABLE;
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
+ return PIN_CONFIG_BIAS_PULL_UP;
+ else
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+}
+
+static void r8a77990_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
+{
+ const struct pinmux_bias_reg *reg;
+ u32 enable, updown;
+ unsigned int bit;
+
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
+ return;
+
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
+ if (bias != PIN_CONFIG_BIAS_DISABLE)
+ enable |= BIT(bit);
+
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
+ if (bias == PIN_CONFIG_BIAS_PULL_UP)
+ updown |= BIT(bit);
+
+ sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->puen, enable);
+}
+
+static const struct sh_pfc_soc_operations r8a77990_pinmux_ops = {
+ .get_bias = r8a77990_pinmux_get_bias,
+ .set_bias = r8a77990_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a77990_pinmux_info = {
+ .name = "r8a77990_pfc",
+ .ops = &r8a77990_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 7fad897cd9f5..3d0b31636d6d 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -274,6 +274,7 @@ extern const struct sh_pfc_soc_info r8a73a4_pinmux_info;
extern const struct sh_pfc_soc_info r8a7740_pinmux_info;
extern const struct sh_pfc_soc_info r8a7743_pinmux_info;
extern const struct sh_pfc_soc_info r8a7745_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77470_pinmux_info;
extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
extern const struct sh_pfc_soc_info r8a7790_pinmux_info;
@@ -287,6 +288,7 @@ extern const struct sh_pfc_soc_info r8a7796_pinmux_info;
extern const struct sh_pfc_soc_info r8a77965_pinmux_info;
extern const struct sh_pfc_soc_info r8a77970_pinmux_info;
extern const struct sh_pfc_soc_info r8a77980_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77990_pinmux_info;
extern const struct sh_pfc_soc_info r8a77995_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
@@ -415,9 +417,13 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
PORT_GP_CFG_1(bank, 9, fn, sfx, cfg)
#define PORT_GP_10(bank, fn, sfx) PORT_GP_CFG_10(bank, fn, sfx, 0)
-#define PORT_GP_CFG_12(bank, fn, sfx, cfg) \
+#define PORT_GP_CFG_11(bank, fn, sfx, cfg) \
PORT_GP_CFG_10(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 10, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 10, fn, sfx, cfg)
+#define PORT_GP_11(bank, fn, sfx) PORT_GP_CFG_11(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_12(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_11(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 11, fn, sfx, cfg)
#define PORT_GP_12(bank, fn, sfx) PORT_GP_CFG_12(bank, fn, sfx, 0)
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index 5de1f63b07bb..95282cda6cee 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -81,4 +81,8 @@ config PINCTRL_SUN50I_H6
def_bool ARM64 && ARCH_SUNXI
select PINCTRL_SUNXI
+config PINCTRL_SUN50I_H6_R
+ def_bool ARM64 && ARCH_SUNXI
+ select PINCTRL_SUNXI
+
endif
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index 3c4aec6611e9..adb8443aa55c 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -19,5 +19,6 @@ obj-$(CONFIG_PINCTRL_SUN8I_H3_R) += pinctrl-sun8i-h3-r.o
obj-$(CONFIG_PINCTRL_SUN8I_V3S) += pinctrl-sun8i-v3s.o
obj-$(CONFIG_PINCTRL_SUN50I_H5) += pinctrl-sun50i-h5.o
obj-$(CONFIG_PINCTRL_SUN50I_H6) += pinctrl-sun50i-h6.o
+obj-$(CONFIG_PINCTRL_SUN50I_H6_R) += pinctrl-sun50i-h6-r.o
obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o
obj-$(CONFIG_PINCTRL_SUN9I_A80_R) += pinctrl-sun9i-a80-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
new file mode 100644
index 000000000000..4557e18d5989
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner H6 R_PIO pin controller driver
+ *
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+ *
+ * Based on pinctrl-sun6i-a31-r.c, which is:
+ * Copyright (C) 2014 Boris Brezillon
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/reset.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun50i_h6_r_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "s_i2c"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PL_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "s_i2c"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PL_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PL_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PL_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* MS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PL_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PL_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* DO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PL_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PL_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_pwm"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PL_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_cir_rx"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PL_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_w1"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PL_EINT10 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PM_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PM_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2), /* PM_EINT2 */
+ SUNXI_FUNCTION(0x3, "1wire")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PM_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PM_EINT4 */
+};
+
+static const struct sunxi_pinctrl_desc sun50i_h6_r_pinctrl_data = {
+ .pins = sun50i_h6_r_pins,
+ .npins = ARRAY_SIZE(sun50i_h6_r_pins),
+ .pin_base = PL_BASE,
+ .irq_banks = 2,
+};
+
+static int sun50i_h6_r_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun50i_h6_r_pinctrl_data);
+}
+
+static const struct of_device_id sun50i_h6_r_pinctrl_match[] = {
+ { .compatible = "allwinner,sun50i-h6-r-pinctrl", },
+ {}
+};
+
+static struct platform_driver sun50i_h6_r_pinctrl_driver = {
+ .probe = sun50i_h6_r_pinctrl_probe,
+ .driver = {
+ .name = "sun50i-h6-r-pinctrl",
+ .of_match_table = sun50i_h6_r_pinctrl_match,
+ },
+};
+builtin_platform_driver(sun50i_h6_r_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 020d6d84639c..25e80a5370ca 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -12,12 +12,12 @@
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/clk-provider.h>
#include <linux/gpio/driver.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/export.h>
#include <linux/of.h>
+#include <linux/of_clk.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -1361,7 +1361,7 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
goto gpiochip_error;
}
- ret = of_count_phandle_with_args(node, "clocks", "#clock-cells");
+ ret = of_clk_get_parent_count(node);
clk = devm_clk_get(&pdev->dev, ret == 1 ? NULL : "apb");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 72c718e66ebb..49c7c1499bc3 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -33,17 +33,6 @@
#include "../pinctrl-utils.h"
#include "pinctrl-tegra.h"
-struct tegra_pmx {
- struct device *dev;
- struct pinctrl_dev *pctl;
-
- const struct tegra_pinctrl_soc_data *soc;
- const char **group_pins;
-
- int nbanks;
- void __iomem **regs;
-};
-
static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
{
return readl(pmx->regs[bank] + reg);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index 33b17cb1471e..aa33c20766c4 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -16,6 +16,17 @@
#ifndef __PINMUX_TEGRA_H__
#define __PINMUX_TEGRA_H__
+struct tegra_pmx {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+
+ const struct tegra_pinctrl_soc_data *soc;
+ const char **group_pins;
+
+ int nbanks;
+ void __iomem **regs;
+};
+
enum tegra_pinconf_param {
/* argument: tegra_pinconf_pull */
TEGRA_PINCONF_PARAM_PULL,
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c
index 7e38ee9bae78..b6dd939d32cc 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra20.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra20.c
@@ -19,6 +19,7 @@
* more details.
*/
+#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -2231,9 +2232,36 @@ static const struct tegra_pinctrl_soc_data tegra20_pinctrl = {
.drvtype_in_mux = false,
};
+static const char *cdev1_parents[] = {
+ "dev1_osc_div", "pll_a_out0", "pll_m_out1", "audio",
+};
+
+static const char *cdev2_parents[] = {
+ "dev2_osc_div", "hclk", "pclk", "pll_p_out4",
+};
+
+static void tegra20_pinctrl_register_clock_muxes(struct platform_device *pdev)
+{
+ struct tegra_pmx *pmx = platform_get_drvdata(pdev);
+
+ clk_register_mux(NULL, "cdev1_mux", cdev1_parents, 4, 0,
+ pmx->regs[1] + 0x8, 2, 2, CLK_MUX_READ_ONLY, NULL);
+
+ clk_register_mux(NULL, "cdev2_mux", cdev2_parents, 4, 0,
+ pmx->regs[1] + 0x8, 4, 2, CLK_MUX_READ_ONLY, NULL);
+}
+
static int tegra20_pinctrl_probe(struct platform_device *pdev)
{
- return tegra_pinctrl_probe(pdev, &tegra20_pinctrl);
+ int err;
+
+ err = tegra_pinctrl_probe(pdev, &tegra20_pinctrl);
+ if (err)
+ return err;
+
+ tegra20_pinctrl_register_clock_muxes(pdev);
+
+ return 0;
}
static const struct of_device_id tegra20_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 0976fbfecd50..58825f68b58b 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -481,6 +481,31 @@ static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned ether_rmii_pins[] = {6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17};
static const int ether_rmii_muxvals[] = {4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4};
+static const unsigned hscin0_ci_pins[] = {102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112};
+static const int hscin0_ci_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const unsigned hscin0_p_pins[] = {102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112};
+static const int hscin0_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscin0_s_pins[] = {116, 117, 118, 119};
+static const int hscin0_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscin1_p_pins[] = {124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134};
+static const int hscin1_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscin1_s_pins[] = {120, 121, 122, 123};
+static const int hscin1_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscin2_s_pins[] = {124, 125, 126, 127};
+static const int hscin2_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscout0_ci_pins[] = {113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123};
+static const int hscout0_ci_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const unsigned hscout0_p_pins[] = {113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123};
+static const int hscout0_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscout0_s_pins[] = {116, 117, 118, 119};
+static const int hscout0_s_muxvals[] = {4, 4, 4, 4};
+static const unsigned hscout1_s_pins[] = {120, 121, 122, 123};
+static const int hscout1_s_muxvals[] = {4, 4, 4, 4};
static const unsigned i2c0_pins[] = {63, 64};
static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {65, 66};
@@ -556,6 +581,16 @@ static const struct uniphier_pinctrl_group uniphier_ld11_groups[] = {
UNIPHIER_PINCTRL_GROUP(emmc),
UNIPHIER_PINCTRL_GROUP(emmc_dat8),
UNIPHIER_PINCTRL_GROUP(ether_rmii),
+ UNIPHIER_PINCTRL_GROUP(hscin0_ci),
+ UNIPHIER_PINCTRL_GROUP(hscin0_p),
+ UNIPHIER_PINCTRL_GROUP(hscin0_s),
+ UNIPHIER_PINCTRL_GROUP(hscin1_p),
+ UNIPHIER_PINCTRL_GROUP(hscin1_s),
+ UNIPHIER_PINCTRL_GROUP(hscin2_s),
+ UNIPHIER_PINCTRL_GROUP(hscout0_ci),
+ UNIPHIER_PINCTRL_GROUP(hscout0_p),
+ UNIPHIER_PINCTRL_GROUP(hscout0_s),
+ UNIPHIER_PINCTRL_GROUP(hscout1_s),
UNIPHIER_PINCTRL_GROUP(i2c0),
UNIPHIER_PINCTRL_GROUP(i2c1),
UNIPHIER_PINCTRL_GROUP(i2c3),
@@ -583,6 +618,15 @@ static const char * const aout1_groups[] = {"aout1"};
static const char * const aoutiec1_groups[] = {"aoutiec1"};
static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
static const char * const ether_rmii_groups[] = {"ether_rmii"};
+static const char * const hscin0_groups[] = {"hscin0_ci",
+ "hscin0_p",
+ "hscin0_s"};
+static const char * const hscin1_groups[] = {"hscin1_p", "hscin1_s"};
+static const char * const hscin2_groups[] = {"hscin2_s"};
+static const char * const hscout0_groups[] = {"hscout0_ci",
+ "hscout0_p",
+ "hscout0_s"};
+static const char * const hscout1_groups[] = {"hscout1_s"};
static const char * const i2c0_groups[] = {"i2c0"};
static const char * const i2c1_groups[] = {"i2c1"};
static const char * const i2c3_groups[] = {"i2c3"};
@@ -603,6 +647,11 @@ static const struct uniphier_pinmux_function uniphier_ld11_functions[] = {
UNIPHIER_PINMUX_FUNCTION(aoutiec1),
UNIPHIER_PINMUX_FUNCTION(emmc),
UNIPHIER_PINMUX_FUNCTION(ether_rmii),
+ UNIPHIER_PINMUX_FUNCTION(hscin0),
+ UNIPHIER_PINMUX_FUNCTION(hscin1),
+ UNIPHIER_PINMUX_FUNCTION(hscin2),
+ UNIPHIER_PINMUX_FUNCTION(hscout0),
+ UNIPHIER_PINMUX_FUNCTION(hscout1),
UNIPHIER_PINMUX_FUNCTION(i2c0),
UNIPHIER_PINMUX_FUNCTION(i2c1),
UNIPHIER_PINMUX_FUNCTION(i2c3),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index bf8f0c3bea5e..9f449b35e300 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -566,6 +566,33 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
41, 42, 45};
static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
+static const unsigned hscin0_ci_pins[] = {102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112};
+static const int hscin0_ci_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const unsigned hscin0_p_pins[] = {102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112};
+static const int hscin0_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscin0_s_pins[] = {116, 117, 118, 119};
+static const int hscin0_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscin1_p_pins[] = {124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134};
+static const int hscin1_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscin1_s_pins[] = {120, 121, 122, 123};
+static const int hscin1_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscin2_s_pins[] = {124, 125, 126, 127};
+static const int hscin2_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscin3_s_pins[] = {129, 130, 131, 132};
+static const int hscin3_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscout0_ci_pins[] = {113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123};
+static const int hscout0_ci_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const unsigned hscout0_p_pins[] = {113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123};
+static const int hscout0_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscout0_s_pins[] = {116, 117, 118, 119};
+static const int hscout0_s_muxvals[] = {4, 4, 4, 4};
+static const unsigned hscout1_s_pins[] = {120, 121, 122, 123};
+static const int hscout1_s_muxvals[] = {4, 4, 4, 4};
static const unsigned i2c0_pins[] = {63, 64};
static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {65, 66};
@@ -641,6 +668,17 @@ static const struct uniphier_pinctrl_group uniphier_ld20_groups[] = {
UNIPHIER_PINCTRL_GROUP(emmc_dat8),
UNIPHIER_PINCTRL_GROUP(ether_rgmii),
UNIPHIER_PINCTRL_GROUP(ether_rmii),
+ UNIPHIER_PINCTRL_GROUP(hscin0_ci),
+ UNIPHIER_PINCTRL_GROUP(hscin0_p),
+ UNIPHIER_PINCTRL_GROUP(hscin0_s),
+ UNIPHIER_PINCTRL_GROUP(hscin1_p),
+ UNIPHIER_PINCTRL_GROUP(hscin1_s),
+ UNIPHIER_PINCTRL_GROUP(hscin2_s),
+ UNIPHIER_PINCTRL_GROUP(hscin3_s),
+ UNIPHIER_PINCTRL_GROUP(hscout0_ci),
+ UNIPHIER_PINCTRL_GROUP(hscout0_p),
+ UNIPHIER_PINCTRL_GROUP(hscout0_s),
+ UNIPHIER_PINCTRL_GROUP(hscout1_s),
UNIPHIER_PINCTRL_GROUP(i2c0),
UNIPHIER_PINCTRL_GROUP(i2c1),
UNIPHIER_PINCTRL_GROUP(i2c3),
@@ -668,6 +706,16 @@ static const char * const aoutiec1_groups[] = {"aoutiec1"};
static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
static const char * const ether_rgmii_groups[] = {"ether_rgmii"};
static const char * const ether_rmii_groups[] = {"ether_rmii"};
+static const char * const hscin0_groups[] = {"hscin0_ci",
+ "hscin0_p",
+ "hscin0_s"};
+static const char * const hscin1_groups[] = {"hscin1_p", "hscin1_s"};
+static const char * const hscin2_groups[] = {"hscin2_s"};
+static const char * const hscin3_groups[] = {"hscin3_s"};
+static const char * const hscout0_groups[] = {"hscout0_ci",
+ "hscout0_p",
+ "hscout0_s"};
+static const char * const hscout1_groups[] = {"hscout1_s"};
static const char * const i2c0_groups[] = {"i2c0"};
static const char * const i2c1_groups[] = {"i2c1"};
static const char * const i2c3_groups[] = {"i2c3"};
@@ -691,6 +739,12 @@ static const struct uniphier_pinmux_function uniphier_ld20_functions[] = {
UNIPHIER_PINMUX_FUNCTION(emmc),
UNIPHIER_PINMUX_FUNCTION(ether_rgmii),
UNIPHIER_PINMUX_FUNCTION(ether_rmii),
+ UNIPHIER_PINMUX_FUNCTION(hscin0),
+ UNIPHIER_PINMUX_FUNCTION(hscin1),
+ UNIPHIER_PINMUX_FUNCTION(hscin2),
+ UNIPHIER_PINMUX_FUNCTION(hscin3),
+ UNIPHIER_PINMUX_FUNCTION(hscout0),
+ UNIPHIER_PINMUX_FUNCTION(hscout1),
UNIPHIER_PINMUX_FUNCTION(i2c0),
UNIPHIER_PINMUX_FUNCTION(i2c1),
UNIPHIER_PINMUX_FUNCTION(i2c3),
diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
index 6273ad3b411d..38206c39b3bf 100644
--- a/drivers/power/reset/gpio-poweroff.c
+++ b/drivers/power/reset/gpio-poweroff.c
@@ -35,11 +35,11 @@ static void gpio_poweroff_do_poweroff(void)
gpiod_direction_output(reset_gpio, 1);
mdelay(100);
/* drive inactive, also active->inactive edge */
- gpiod_set_value(reset_gpio, 0);
+ gpiod_set_value_cansleep(reset_gpio, 0);
mdelay(100);
/* drive it active, also inactive->active edge */
- gpiod_set_value(reset_gpio, 1);
+ gpiod_set_value_cansleep(reset_gpio, 1);
/* give it some time */
mdelay(timeout);
diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c
index 4a7ed50d1dc5..7b2b69916f48 100644
--- a/drivers/power/supply/ab8500_bmdata.c
+++ b/drivers/power/supply/ab8500_bmdata.c
@@ -430,13 +430,6 @@ static const struct abx500_maxim_parameters ab8500_maxi_params = {
.charger_curr_step = 100,
};
-static const struct abx500_maxim_parameters abx540_maxi_params = {
- .ena_maxi = true,
- .chg_curr = 3000,
- .wait_cycles = 10,
- .charger_curr_step = 200,
-};
-
static const struct abx500_bm_charger_parameters chg = {
.usb_volt_max = 5500,
.usb_curr_max = 1500,
@@ -453,17 +446,6 @@ static int ab8500_charge_output_curr_map[] = {
900, 1000, 1100, 1200, 1300, 1400, 1500, 1500,
};
-static int ab8540_charge_output_curr_map[] = {
- 0, 0, 0, 75, 100, 125, 150, 175,
- 200, 225, 250, 275, 300, 325, 350, 375,
- 400, 425, 450, 475, 500, 525, 550, 575,
- 600, 625, 650, 675, 700, 725, 750, 775,
- 800, 825, 850, 875, 900, 925, 950, 975,
- 1000, 1025, 1050, 1075, 1100, 1125, 1150, 1175,
- 1200, 1225, 1250, 1275, 1300, 1325, 1350, 1375,
- 1400, 1425, 1450, 1500, 1600, 1700, 1900, 2000,
-};
-
/*
* This array maps the raw hex value to charger input current used by the
* AB8500 values
@@ -473,17 +455,6 @@ static int ab8500_charge_input_curr_map[] = {
700, 800, 900, 1000, 1100, 1300, 1400, 1500,
};
-static int ab8540_charge_input_curr_map[] = {
- 25, 50, 75, 100, 125, 150, 175, 200,
- 225, 250, 275, 300, 325, 350, 375, 400,
- 425, 450, 475, 500, 525, 550, 575, 600,
- 625, 650, 675, 700, 725, 750, 775, 800,
- 825, 850, 875, 900, 925, 950, 975, 1000,
- 1025, 1050, 1075, 1100, 1125, 1150, 1175, 1200,
- 1225, 1250, 1275, 1300, 1325, 1350, 1375, 1400,
- 1425, 1450, 1475, 1500, 1500, 1500, 1500, 1500,
-};
-
struct abx500_bm_data ab8500_bm_data = {
.temp_under = 3,
.temp_low = 8,
@@ -518,40 +489,6 @@ struct abx500_bm_data ab8500_bm_data = {
.n_chg_in_curr = ARRAY_SIZE(ab8500_charge_input_curr_map),
};
-struct abx500_bm_data ab8540_bm_data = {
- .temp_under = 3,
- .temp_low = 8,
- .temp_high = 43,
- .temp_over = 48,
- .main_safety_tmr_h = 4,
- .temp_interval_chg = 20,
- .temp_interval_nochg = 120,
- .usb_safety_tmr_h = 4,
- .bkup_bat_v = BUP_VCH_SEL_2P6V,
- .bkup_bat_i = BUP_ICH_SEL_150UA,
- .no_maintenance = false,
- .capacity_scaling = false,
- .adc_therm = ABx500_ADC_THERM_BATCTRL,
- .chg_unknown_bat = false,
- .enable_overshoot = false,
- .fg_res = 100,
- .cap_levels = &cap_levels,
- .bat_type = bat_type_thermistor,
- .n_btypes = ARRAY_SIZE(bat_type_thermistor),
- .batt_id = 0,
- .interval_charging = 5,
- .interval_not_charging = 120,
- .temp_hysteresis = 3,
- .gnd_lift_resistance = 0,
- .maxi = &abx540_maxi_params,
- .chg_params = &chg,
- .fg_params = &fg,
- .chg_output_curr = ab8540_charge_output_curr_map,
- .n_chg_out_curr = ARRAY_SIZE(ab8540_charge_output_curr_map),
- .chg_input_curr = ab8540_charge_input_curr_map,
- .n_chg_in_curr = ARRAY_SIZE(ab8540_charge_input_curr_map),
-};
-
int ab8500_bm_of_probe(struct device *dev,
struct device_node *np,
struct abx500_bm_data *bm)
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index f7a35ebfbab2..708fd58cd62b 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -214,22 +214,10 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
/* Only do this for batteries with internal NTC */
if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
- if (is_ab8540(di->parent)) {
- if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_60UA)
- curr = BAT_CTRL_60U_ENA;
- else
- curr = BAT_CTRL_120U_ENA;
- } else if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
- if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_16UA)
- curr = BAT_CTRL_16U_ENA;
- else
- curr = BAT_CTRL_18U_ENA;
- } else {
- if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
- curr = BAT_CTRL_7U_ENA;
- else
- curr = BAT_CTRL_20U_ENA;
- }
+ if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
+ curr = BAT_CTRL_7U_ENA;
+ else
+ curr = BAT_CTRL_20U_ENA;
dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
@@ -260,28 +248,12 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
} else if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
dev_dbg(di->dev, "Disable BATCTRL curr source\n");
- if (is_ab8540(di->parent)) {
- /* Write 0 to the curr bits */
- ret = abx500_mask_and_set_register_interruptible(
- di->dev,
- AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
- BAT_CTRL_60U_ENA | BAT_CTRL_120U_ENA,
- ~(BAT_CTRL_60U_ENA | BAT_CTRL_120U_ENA));
- } else if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
- /* Write 0 to the curr bits */
- ret = abx500_mask_and_set_register_interruptible(
- di->dev,
- AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
- BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA,
- ~(BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA));
- } else {
- /* Write 0 to the curr bits */
- ret = abx500_mask_and_set_register_interruptible(
- di->dev,
- AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
- BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
- ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
- }
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(
+ di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+ ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
if (ret) {
dev_err(di->dev, "%s failed disabling current source\n",
@@ -324,25 +296,11 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
* if we got an error above
*/
disable_curr_source:
- if (is_ab8540(di->parent)) {
- /* Write 0 to the curr bits */
- ret = abx500_mask_and_set_register_interruptible(di->dev,
- AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
- BAT_CTRL_60U_ENA | BAT_CTRL_120U_ENA,
- ~(BAT_CTRL_60U_ENA | BAT_CTRL_120U_ENA));
- } else if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
- /* Write 0 to the curr bits */
- ret = abx500_mask_and_set_register_interruptible(di->dev,
- AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
- BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA,
- ~(BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA));
- } else {
- /* Write 0 to the curr bits */
- ret = abx500_mask_and_set_register_interruptible(di->dev,
- AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
- BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
- ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
- }
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+ ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
if (ret) {
dev_err(di->dev, "%s failed disabling current source\n",
@@ -556,13 +514,8 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
{
int res;
u8 i;
- if (is_ab8540(di->parent))
- di->curr_source = BTEMP_BATCTRL_CURR_SRC_60UA;
- else if (is_ab9540(di->parent) || is_ab8505(di->parent))
- di->curr_source = BTEMP_BATCTRL_CURR_SRC_16UA;
- else
- di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
di->bm->batt_id = BATTERY_UNKNOWN;
res = ab8500_btemp_get_batctrl_res(di);
@@ -600,18 +553,8 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
*/
if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
di->bm->batt_id == 1) {
- if (is_ab8540(di->parent)) {
- dev_dbg(di->dev,
- "Set BATCTRL current source to 60uA\n");
- di->curr_source = BTEMP_BATCTRL_CURR_SRC_60UA;
- } else if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
- dev_dbg(di->dev,
- "Set BATCTRL current source to 16uA\n");
- di->curr_source = BTEMP_BATCTRL_CURR_SRC_16UA;
- } else {
- dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
- di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
- }
+ dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
}
return di->bm->batt_id;
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 5a76c6d343de..98b335042ba6 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -58,9 +58,7 @@
#define MAIN_CH_INPUT_CURR_SHIFT 4
#define VBUS_IN_CURR_LIM_SHIFT 4
-#define AB8540_VBUS_IN_CURR_LIM_SHIFT 2
#define AUTO_VBUS_IN_CURR_LIM_SHIFT 4
-#define AB8540_AUTO_VBUS_IN_CURR_MASK 0x3F
#define VBUS_IN_CURR_LIM_RETRY_SET_TIME 30 /* seconds */
#define LED_INDICATOR_PWM_ENA 0x01
@@ -1138,10 +1136,7 @@ static int ab8500_charger_set_current(struct ab8500_charger *di,
no_stepping = true;
break;
case AB8500_USBCH_IPT_CRNTLVL_REG:
- if (is_ab8540(di->parent))
- shift_value = AB8540_VBUS_IN_CURR_LIM_SHIFT;
- else
- shift_value = VBUS_IN_CURR_LIM_SHIFT;
+ shift_value = VBUS_IN_CURR_LIM_SHIFT;
prev_curr_index = (reg_value >> shift_value);
curr_index = ab8500_vbus_in_curr_to_regval(di, ich);
step_udelay = STEP_UDELAY * 100;
@@ -1865,67 +1860,6 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
return ret;
}
-/**
- * ab8540_charger_power_path_enable() - enable usb power path mode
- * @charger: pointer to the ux500_charger structure
- * @enable: enable/disable flag
- *
- * Enable or disable the power path for usb mode
- * Returns error code in case of failure else 0(on success)
- */
-static int ab8540_charger_power_path_enable(struct ux500_charger *charger,
- bool enable)
-{
- int ret;
- struct ab8500_charger *di;
-
- if (charger->psy->desc->type == POWER_SUPPLY_TYPE_USB)
- di = to_ab8500_charger_usb_device_info(charger);
- else
- return -ENXIO;
-
- ret = abx500_mask_and_set_register_interruptible(di->dev,
- AB8500_CHARGER, AB8540_USB_PP_MODE_REG,
- BUS_POWER_PATH_MODE_ENA, enable);
- if (ret) {
- dev_err(di->dev, "%s write failed\n", __func__);
- return ret;
- }
-
- return ret;
-}
-
-
-/**
- * ab8540_charger_usb_pre_chg_enable() - enable usb pre change
- * @charger: pointer to the ux500_charger structure
- * @enable: enable/disable flag
- *
- * Enable or disable the pre-chage for usb mode
- * Returns error code in case of failure else 0(on success)
- */
-static int ab8540_charger_usb_pre_chg_enable(struct ux500_charger *charger,
- bool enable)
-{
- int ret;
- struct ab8500_charger *di;
-
- if (charger->psy->desc->type == POWER_SUPPLY_TYPE_USB)
- di = to_ab8500_charger_usb_device_info(charger);
- else
- return -ENXIO;
-
- ret = abx500_mask_and_set_register_interruptible(di->dev,
- AB8500_CHARGER, AB8540_USB_PP_CHR_REG,
- BUS_POWER_PATH_PRECHG_ENA, enable);
- if (ret) {
- dev_err(di->dev, "%s write failed\n", __func__);
- return ret;
- }
-
- return ret;
-}
-
static int ab8500_charger_get_ext_psy_data(struct device *dev, void *data)
{
struct power_supply *psy;
@@ -2704,23 +2638,15 @@ static void ab8500_charger_vbus_drop_end_work(struct work_struct *work)
abx500_set_register_interruptible(di->dev,
AB8500_CHARGER, AB8500_CHARGER_CTRL, 0x01);
- if (is_ab8540(di->parent))
- ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
- AB8540_CH_USBCH_STAT3_REG, &reg_value);
- else
- ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
- AB8500_CH_USBCH_STAT2_REG, &reg_value);
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_USBCH_STAT2_REG, &reg_value);
if (ret < 0) {
dev_err(di->dev, "%s read failed\n", __func__);
return;
}
- if (is_ab8540(di->parent))
- curr = di->bm->chg_input_curr[
- reg_value & AB8540_AUTO_VBUS_IN_CURR_MASK];
- else
- curr = di->bm->chg_input_curr[
- reg_value >> AUTO_VBUS_IN_CURR_LIM_SHIFT];
+ curr = di->bm->chg_input_curr[
+ reg_value >> AUTO_VBUS_IN_CURR_LIM_SHIFT];
if (di->max_usb_in_curr.calculated_max != curr) {
/* USB source is collapsing */
@@ -3097,14 +3023,9 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
goto out;
}
- if (is_ab8540(di->parent))
- ret = abx500_set_register_interruptible(di->dev,
- AB8500_CHARGER, AB8500_CH_OPT_CRNTLVL_MAX_REG,
- CH_OP_CUR_LVL_2P);
- else
- ret = abx500_set_register_interruptible(di->dev,
- AB8500_CHARGER, AB8500_CH_OPT_CRNTLVL_MAX_REG,
- CH_OP_CUR_LVL_1P6);
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_OPT_CRNTLVL_MAX_REG,
+ CH_OP_CUR_LVL_1P6);
if (ret) {
dev_err(di->dev,
"failed to set CH_OPT_CRNTLVL_MAX_REG\n");
@@ -3112,8 +3033,7 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
}
}
- if (is_ab9540_2p0(di->parent) || is_ab9540_3p0(di->parent)
- || is_ab8505_2p0(di->parent) || is_ab8540(di->parent))
+ if (is_ab8505_2p0(di->parent))
ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_CHARGER,
AB8500_USBCH_CTRL2_REG,
@@ -3146,7 +3066,7 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
AB8500_SYS_CTRL2_BLOCK,
AB8500_MAIN_WDOG_CTRL_REG, MAIN_WDOG_ENA);
if (ret) {
- dev_err(di->dev, "faile to enable main watchdog\n");
+ dev_err(di->dev, "failed to enable main watchdog\n");
goto out;
}
@@ -3205,17 +3125,6 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
dev_err(di->dev, "failed to setup backup battery charging\n");
goto out;
}
- if (is_ab8540(di->parent)) {
- ret = abx500_set_register_interruptible(di->dev,
- AB8500_RTC,
- AB8500_RTC_CTRL1_REG,
- bup_vch_range | vbup33_vrtcn);
- if (ret) {
- dev_err(di->dev,
- "failed to setup backup battery charging\n");
- goto out;
- }
- }
/* Enable backup battery charging */
ret = abx500_mask_and_set_register_interruptible(di->dev,
@@ -3226,25 +3135,6 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
goto out;
}
- if (is_ab8540(di->parent)) {
- ret = abx500_mask_and_set_register_interruptible(di->dev,
- AB8500_CHARGER, AB8540_USB_PP_MODE_REG,
- BUS_VSYS_VOL_SELECT_MASK, BUS_VSYS_VOL_SELECT_3P6V);
- if (ret) {
- dev_err(di->dev,
- "failed to setup usb power path vsys voltage\n");
- goto out;
- }
- ret = abx500_mask_and_set_register_interruptible(di->dev,
- AB8500_CHARGER, AB8540_USB_PP_CHR_REG,
- BUS_PP_PRECHG_CURRENT_MASK, 0);
- if (ret) {
- dev_err(di->dev,
- "failed to setup usb power path precharge current\n");
- goto out;
- }
- }
-
out:
return ret;
}
@@ -3529,8 +3419,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_chg.ops.check_enable = &ab8500_charger_usb_check_enable;
di->usb_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
di->usb_chg.ops.update_curr = &ab8500_charger_update_charger_current;
- di->usb_chg.ops.pp_enable = &ab8540_charger_power_path_enable;
- di->usb_chg.ops.pre_chg_enable = &ab8540_charger_usb_pre_chg_enable;
di->usb_chg.max_out_volt = ab8500_charger_voltage_map[
ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
di->usb_chg.max_out_curr =
@@ -3538,7 +3426,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_chg.wdt_refresh = CHG_WD_INTERVAL;
di->usb_chg.enabled = di->bm->usb_enabled;
di->usb_chg.external = false;
- di->usb_chg.power_path = di->bm->usb_power_path;
di->usb_state.usb_current = -1;
/* Create a work queue for the charger */
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index c569f82a0071..d9c6c7bedd85 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -1408,7 +1408,7 @@ static void ab8500_fg_charge_state_to(struct ab8500_fg *di,
static void ab8500_fg_discharge_state_to(struct ab8500_fg *di,
enum ab8500_fg_discharge_state new_state)
{
- dev_dbg(di->dev, "Disharge state from %d [%s] to %d [%s]\n",
+ dev_dbg(di->dev, "Discharge state from %d [%s] to %d [%s]\n",
di->discharge_state,
discharge_state[di->discharge_state],
new_state,
@@ -2326,9 +2326,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
goto out;
}
- if (((is_ab8505(di->parent) || is_ab9540(di->parent)) &&
- abx500_get_chip_id(di->dev) >= AB8500_CUT2P0)
- || is_ab8540(di->parent)) {
+ if (is_ab8505(di->parent)) {
ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
AB8505_RTC_PCUT_MAX_TIME_REG, di->bm->fg_params->pcut_max_time);
@@ -2915,9 +2913,7 @@ static int ab8500_fg_sysfs_psy_create_attrs(struct ab8500_fg *di)
{
unsigned int i;
- if (((is_ab8505(di->parent) || is_ab9540(di->parent)) &&
- abx500_get_chip_id(di->dev) >= AB8500_CUT2P0)
- || is_ab8540(di->parent)) {
+ if (is_ab8505(di->parent)) {
for (i = 0; i < ARRAY_SIZE(ab8505_fg_sysfs_psy_attrs); i++)
if (device_create_file(&di->fg_psy->dev,
&ab8505_fg_sysfs_psy_attrs[i]))
@@ -2937,9 +2933,7 @@ static void ab8500_fg_sysfs_psy_remove_attrs(struct ab8500_fg *di)
{
unsigned int i;
- if (((is_ab8505(di->parent) || is_ab9540(di->parent)) &&
- abx500_get_chip_id(di->dev) >= AB8500_CUT2P0)
- || is_ab8540(di->parent)) {
+ if (is_ab8505(di->parent)) {
for (i = 0; i < ARRAY_SIZE(ab8505_fg_sysfs_psy_attrs); i++)
(void)device_remove_file(&di->fg_psy->dev,
&ab8505_fg_sysfs_psy_attrs[i]);
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index a4411d6bbc96..947709cdd14e 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -44,9 +44,6 @@
/* Five minutes expressed in seconds */
#define FIVE_MINUTES_IN_SECONDS 300
-/* Plus margin for the low battery threshold */
-#define BAT_PLUS_MARGIN (100)
-
#define CHARGALG_CURR_STEP_LOW 0
#define CHARGALG_CURR_STEP_HIGH 100
@@ -101,7 +98,6 @@ enum abx500_chargalg_states {
STATE_HW_TEMP_PROTECT_INIT,
STATE_HW_TEMP_PROTECT,
STATE_NORMAL_INIT,
- STATE_USB_PP_PRE_CHARGE,
STATE_NORMAL,
STATE_WAIT_FOR_RECHARGE_INIT,
STATE_WAIT_FOR_RECHARGE,
@@ -133,7 +129,6 @@ static const char *states[] = {
"HW_TEMP_PROTECT_INIT",
"HW_TEMP_PROTECT",
"NORMAL_INIT",
- "USB_PP_PRE_CHARGE",
"NORMAL",
"WAIT_FOR_RECHARGE_INIT",
"WAIT_FOR_RECHARGE",
@@ -603,37 +598,6 @@ static int abx500_chargalg_usb_en(struct abx500_chargalg *di, int enable,
return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
}
- /**
- * ab8540_chargalg_usb_pp_en() - Enable/ disable USB power path
- * @di: pointer to the abx500_chargalg structure
- * @enable: power path enable/disable
- *
- * The USB power path will be enable/ disable
- */
-static int ab8540_chargalg_usb_pp_en(struct abx500_chargalg *di, bool enable)
-{
- if (!di->usb_chg || !di->usb_chg->ops.pp_enable)
- return -ENXIO;
-
- return di->usb_chg->ops.pp_enable(di->usb_chg, enable);
-}
-
-/**
- * ab8540_chargalg_usb_pre_chg_en() - Enable/ disable USB pre-charge
- * @di: pointer to the abx500_chargalg structure
- * @enable: USB pre-charge enable/disable
- *
- * The USB USB pre-charge will be enable/ disable
- */
-static int ab8540_chargalg_usb_pre_chg_en(struct abx500_chargalg *di,
- bool enable)
-{
- if (!di->usb_chg || !di->usb_chg->ops.pre_chg_enable)
- return -ENXIO;
-
- return di->usb_chg->ops.pre_chg_enable(di->usb_chg, enable);
-}
-
/**
* abx500_chargalg_update_chg_curr() - Update charger current
* @di: pointer to the abx500_chargalg structure
@@ -833,9 +797,6 @@ static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
di->batt_data.avg_curr > 0) {
if (++di->eoc_cnt >= EOC_COND_CNT) {
di->eoc_cnt = 0;
- if ((di->chg_info.charger_type & USB_CHG) &&
- (di->usb_chg->power_path))
- ab8540_chargalg_usb_pp_en(di, true);
di->charge_status = POWER_SUPPLY_STATUS_FULL;
di->maintenance_chg = true;
dev_dbg(di->dev, "EOC reached!\n");
@@ -1536,22 +1497,6 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
break;
case STATE_NORMAL_INIT:
- if ((di->chg_info.charger_type & USB_CHG) &&
- di->usb_chg->power_path) {
- if (di->batt_data.volt >
- (di->bm->fg_params->lowbat_threshold +
- BAT_PLUS_MARGIN)) {
- ab8540_chargalg_usb_pre_chg_en(di, false);
- ab8540_chargalg_usb_pp_en(di, false);
- } else {
- ab8540_chargalg_usb_pp_en(di, true);
- ab8540_chargalg_usb_pre_chg_en(di, true);
- abx500_chargalg_state_to(di,
- STATE_USB_PP_PRE_CHARGE);
- break;
- }
- }
-
if (di->curr_status.curr_step == CHARGALG_CURR_STEP_LOW)
abx500_chargalg_stop_charging(di);
else {
@@ -1575,13 +1520,6 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
break;
- case STATE_USB_PP_PRE_CHARGE:
- if (di->batt_data.volt >
- (di->bm->fg_params->lowbat_threshold +
- BAT_PLUS_MARGIN))
- abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
- break;
-
case STATE_NORMAL:
handle_maxim_chg_curr(di);
if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 9bfbde15b07d..6e1bc14c3304 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -88,6 +88,8 @@
#define CHRG_VBUS_ILIM_2000MA 0x4 /* 2000mA */
#define CHRG_VBUS_ILIM_2500MA 0x5 /* 2500mA */
#define CHRG_VBUS_ILIM_3000MA 0x6 /* 3000mA */
+#define CHRG_VBUS_ILIM_3500MA 0x7 /* 3500mA */
+#define CHRG_VBUS_ILIM_4000MA 0x8 /* 4000mA */
#define CHRG_VLTFC_0C 0xA5 /* 0 DegC */
#define CHRG_VHTFC_45C 0x1F /* 45 DegC */
@@ -223,9 +225,11 @@ static int axp288_charger_get_vbus_inlmt(struct axp288_chrg_info *info)
return 2500000;
case CHRG_VBUS_ILIM_3000MA:
return 3000000;
+ case CHRG_VBUS_ILIM_3500MA:
+ return 3500000;
default:
- dev_warn(&info->pdev->dev, "Unknown ilim reg val: %d\n", val);
- return 0;
+ /* All b1xxx values map to 4000 mA */
+ return 4000000;
}
}
@@ -235,7 +239,11 @@ static inline int axp288_charger_set_vbus_inlmt(struct axp288_chrg_info *info,
int ret;
u8 reg_val;
- if (inlmt >= 3000000)
+ if (inlmt >= 4000000)
+ reg_val = CHRG_VBUS_ILIM_4000MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 3500000)
+ reg_val = CHRG_VBUS_ILIM_3500MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 3000000)
reg_val = CHRG_VBUS_ILIM_3000MA << CHRG_VBUS_ILIM_BIT_POS;
else if (inlmt >= 2500000)
reg_val = CHRG_VBUS_ILIM_2500MA << CHRG_VBUS_ILIM_BIT_POS;
@@ -739,6 +747,18 @@ static int axp288_charger_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config charger_cfg = {};
+ unsigned int val;
+
+ /*
+ * On some devices the fuelgauge and charger parts of the axp288 are
+ * not used, check that the fuelgauge is enabled (CC_CTRL != 0).
+ */
+ ret = regmap_read(axp20x->regmap, AXP20X_CC_CTRL, &val);
+ if (ret < 0)
+ return ret;
+ if (val == 0)
+ return -ENODEV;
+
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index fd8f0b2210bc..084c8ba9749d 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -24,7 +24,6 @@
#include <linux/regmap.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
-#include <linux/workqueue.h>
#include <linux/mfd/axp20x.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
@@ -88,7 +87,6 @@
#define FG_LOW_CAP_CRIT_THR 4 /* 4 perc */
#define FG_LOW_CAP_SHDN_THR 0 /* 0 perc */
-#define STATUS_MON_DELAY_JIFFIES (HZ * 60) /*60 sec */
#define NR_RETRY_CNT 3
#define DEV_NAME "axp288_fuel_gauge"
@@ -128,7 +126,6 @@ struct axp288_fg_info {
struct mutex lock;
int status;
int max_volt;
- struct delayed_work status_monitor;
struct dentry *debug_file;
};
@@ -592,16 +589,6 @@ static int fuel_gauge_property_is_writeable(struct power_supply *psy,
return ret;
}
-static void fuel_gauge_status_monitor(struct work_struct *work)
-{
- struct axp288_fg_info *info = container_of(work,
- struct axp288_fg_info, status_monitor.work);
-
- fuel_gauge_get_status(info);
- power_supply_changed(info->bat);
- schedule_delayed_work(&info->status_monitor, STATUS_MON_DELAY_JIFFIES);
-}
-
static irqreturn_t fuel_gauge_thread_handler(int irq, void *dev)
{
struct axp288_fg_info *info = dev;
@@ -754,10 +741,21 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
[BAT_D_CURR] = "axp288-chrg-d-curr",
[BAT_VOLT] = "axp288-batt-volt",
};
+ unsigned int val;
if (dmi_check_system(axp288_fuel_gauge_blacklist))
return -ENODEV;
+ /*
+ * On some devices the fuelgauge and charger parts of the axp288 are
+ * not used, check that the fuelgauge is enabled (CC_CTRL != 0).
+ */
+ ret = regmap_read(axp20x->regmap, AXP20X_CC_CTRL, &val);
+ if (ret < 0)
+ return ret;
+ if (val == 0)
+ return -ENODEV;
+
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -770,7 +768,6 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
mutex_init(&info->lock);
- INIT_DELAYED_WORK(&info->status_monitor, fuel_gauge_status_monitor);
for (i = 0; i < IIO_CHANNEL_NUM; i++) {
/*
@@ -830,7 +827,6 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
fuel_gauge_create_debugfs(info);
fuel_gauge_init_irq(info);
- schedule_delayed_work(&info->status_monitor, STATUS_MON_DELAY_JIFFIES);
return 0;
@@ -853,7 +849,6 @@ static int axp288_fuel_gauge_remove(struct platform_device *pdev)
struct axp288_fg_info *info = platform_get_drvdata(pdev);
int i;
- cancel_delayed_work_sync(&info->status_monitor);
power_supply_unregister(info->bat);
fuel_gauge_remove_debugfs(info);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 7ce60519b1bc..d44ed8e17c47 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -432,6 +432,7 @@ static u8
BQ27XXX_DM_REG_ROWS,
};
#define bq27425_regs bq27421_regs
+#define bq27426_regs bq27421_regs
#define bq27441_regs bq27421_regs
#define bq27621_regs bq27421_regs
@@ -664,6 +665,7 @@ static enum power_supply_property bq27421_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
#define bq27425_props bq27421_props
+#define bq27426_props bq27421_props
#define bq27441_props bq27421_props
#define bq27621_props bq27421_props
@@ -734,6 +736,12 @@ static struct bq27xxx_dm_reg bq27425_dm_regs[] = {
[BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 18, 2, 2800, 3700 },
};
+static struct bq27xxx_dm_reg bq27426_dm_regs[] = {
+ [BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 6, 2, 0, 8000 },
+ [BQ27XXX_DM_DESIGN_ENERGY] = { 82, 8, 2, 0, 32767 },
+ [BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 10, 2, 2500, 3700 },
+};
+
#if 0 /* not yet tested */
#define bq27441_dm_regs bq27421_dm_regs
#else
@@ -795,6 +803,7 @@ static struct {
[BQ27545] = BQ27XXX_DATA(bq27545, 0x04143672, BQ27XXX_O_OTDC),
[BQ27421] = BQ27XXX_DATA(bq27421, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
[BQ27425] = BQ27XXX_DATA(bq27425, 0x04143672, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP),
+ [BQ27426] = BQ27XXX_DATA(bq27426, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
[BQ27441] = BQ27XXX_DATA(bq27441, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
[BQ27621] = BQ27XXX_DATA(bq27621, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
};
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 6b25e5f2337e..40069128ad44 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -249,6 +249,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27545", BQ27545 },
{ "bq27421", BQ27421 },
{ "bq27425", BQ27425 },
+ { "bq27426", BQ27426 },
{ "bq27441", BQ27441 },
{ "bq27621", BQ27621 },
{},
@@ -280,6 +281,7 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
{ .compatible = "ti,bq27545" },
{ .compatible = "ti,bq27421" },
{ .compatible = "ti,bq27425" },
+ { .compatible = "ti,bq27426" },
{ .compatible = "ti,bq27441" },
{ .compatible = "ti,bq27621" },
{},
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 1de4b4493824..2a50b4654793 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -1700,8 +1700,9 @@ static int charger_manager_probe(struct platform_device *pdev)
power_supply_put(psy);
}
- if (desc->polling_interval_ms == 0 ||
- msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL) {
+ if (cm->desc->polling_mode != CM_POLL_DISABLE &&
+ (desc->polling_interval_ms == 0 ||
+ msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL)) {
dev_err(&pdev->dev, "polling_interval_ms is too small\n");
return -EINVAL;
}
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index bd2468ca6b63..c3f2a9479468 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -212,8 +212,7 @@ static int gpio_charger_suspend(struct device *dev)
static int gpio_charger_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
+ struct gpio_charger *gpio_charger = dev_get_drvdata(dev);
if (device_may_wakeup(dev) && gpio_charger->wakeup_enabled)
disable_irq_wake(gpio_charger->irq);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 1350068c401a..6170ed8b6854 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -116,15 +116,15 @@ static ssize_t power_supply_show_usb_type(struct device *dev,
static ssize_t power_supply_show_property(struct device *dev,
struct device_attribute *attr,
char *buf) {
- ssize_t ret = 0;
+ ssize_t ret;
struct power_supply *psy = dev_get_drvdata(dev);
- const ptrdiff_t off = attr - power_supply_attrs;
+ enum power_supply_property psp = attr - power_supply_attrs;
union power_supply_propval value;
- if (off == POWER_SUPPLY_PROP_TYPE) {
+ if (psp == POWER_SUPPLY_PROP_TYPE) {
value.intval = psy->desc->type;
} else {
- ret = power_supply_get_property(psy, off, &value);
+ ret = power_supply_get_property(psy, psp, &value);
if (ret < 0) {
if (ret == -ENODATA)
@@ -137,35 +137,48 @@ static ssize_t power_supply_show_property(struct device *dev,
}
}
- if (off == POWER_SUPPLY_PROP_STATUS)
- return sprintf(buf, "%s\n",
- power_supply_status_text[value.intval]);
- else if (off == POWER_SUPPLY_PROP_CHARGE_TYPE)
- return sprintf(buf, "%s\n",
- power_supply_charge_type_text[value.intval]);
- else if (off == POWER_SUPPLY_PROP_HEALTH)
- return sprintf(buf, "%s\n",
- power_supply_health_text[value.intval]);
- else if (off == POWER_SUPPLY_PROP_TECHNOLOGY)
- return sprintf(buf, "%s\n",
- power_supply_technology_text[value.intval]);
- else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL)
- return sprintf(buf, "%s\n",
- power_supply_capacity_level_text[value.intval]);
- else if (off == POWER_SUPPLY_PROP_TYPE)
- return sprintf(buf, "%s\n",
- power_supply_type_text[value.intval]);
- else if (off == POWER_SUPPLY_PROP_USB_TYPE)
- return power_supply_show_usb_type(dev, psy->desc->usb_types,
- psy->desc->num_usb_types,
- &value, buf);
- else if (off == POWER_SUPPLY_PROP_SCOPE)
- return sprintf(buf, "%s\n",
- power_supply_scope_text[value.intval]);
- else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
- return sprintf(buf, "%s\n", value.strval);
-
- return sprintf(buf, "%d\n", value.intval);
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = sprintf(buf, "%s\n",
+ power_supply_status_text[value.intval]);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = sprintf(buf, "%s\n",
+ power_supply_charge_type_text[value.intval]);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = sprintf(buf, "%s\n",
+ power_supply_health_text[value.intval]);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ ret = sprintf(buf, "%s\n",
+ power_supply_technology_text[value.intval]);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ ret = sprintf(buf, "%s\n",
+ power_supply_capacity_level_text[value.intval]);
+ break;
+ case POWER_SUPPLY_PROP_TYPE:
+ ret = sprintf(buf, "%s\n",
+ power_supply_type_text[value.intval]);
+ break;
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ ret = power_supply_show_usb_type(dev, psy->desc->usb_types,
+ psy->desc->num_usb_types,
+ &value, buf);
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ ret = sprintf(buf, "%s\n",
+ power_supply_scope_text[value.intval]);
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ ret = sprintf(buf, "%s\n", value.strval);
+ break;
+ default:
+ ret = sprintf(buf, "%d\n", value.intval);
+ }
+
+ return ret;
}
static ssize_t power_supply_store_property(struct device *dev,
@@ -173,11 +186,10 @@ static ssize_t power_supply_store_property(struct device *dev,
const char *buf, size_t count) {
ssize_t ret;
struct power_supply *psy = dev_get_drvdata(dev);
- const ptrdiff_t off = attr - power_supply_attrs;
+ enum power_supply_property psp = attr - power_supply_attrs;
union power_supply_propval value;
- /* maybe it is a enum property? */
- switch (off) {
+ switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
ret = sysfs_match_string(power_supply_status_text, buf);
break;
@@ -216,7 +228,7 @@ static ssize_t power_supply_store_property(struct device *dev,
value.intval = ret;
- ret = power_supply_set_property(psy, off, &value);
+ ret = power_supply_set_property(psy, psp, &value);
if (ret < 0)
return ret;
diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
index 0ffe5cd3abf6..3d00b35cafc9 100644
--- a/drivers/power/supply/s3c_adc_battery.c
+++ b/drivers/power/supply/s3c_adc_battery.c
@@ -293,6 +293,7 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
{
struct s3c_adc_client *client;
struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
+ struct power_supply_config psy_cfg = {};
int ret;
client = s3c_adc_register(pdev, NULL, NULL, 0);
@@ -309,14 +310,15 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
main_bat.cur_value = -1;
main_bat.cable_plugged = 0;
main_bat.status = POWER_SUPPLY_STATUS_DISCHARGING;
+ psy_cfg.drv_data = &main_bat;
- main_bat.psy = power_supply_register(&pdev->dev, &main_bat_desc, NULL);
+ main_bat.psy = power_supply_register(&pdev->dev, &main_bat_desc, &psy_cfg);
if (IS_ERR(main_bat.psy)) {
ret = PTR_ERR(main_bat.psy);
goto err_reg_main;
}
if (pdata->backup_volt_mult) {
- const struct power_supply_config psy_cfg
+ const struct power_supply_config backup_psy_cfg
= { .drv_data = &backup_bat, };
backup_bat.client = client;
@@ -324,7 +326,7 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
backup_bat.volt_value = -1;
backup_bat.psy = power_supply_register(&pdev->dev,
&backup_bat_desc,
- &psy_cfg);
+ &backup_psy_cfg);
if (IS_ERR(backup_bat.psy)) {
ret = PTR_ERR(backup_bat.psy);
goto err_reg_backup;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 097f61784a7d..5dbccf5f3037 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -180,6 +180,17 @@ config REGULATOR_BCM590XX
BCM590xx PMUs. This will enable support for the software
controllable LDO/Switching regulators.
+config REGULATOR_BD71837
+ tristate "ROHM BD71837 Power Regulator"
+ depends on MFD_BD71837
+ help
+ This driver supports voltage regulators on ROHM BD71837 PMIC.
+ This will enable support for the software controllable buck
+ and LDO regulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called bd71837-regulator.
+
config REGULATOR_BD9571MWV
tristate "ROHM BD9571MWV Regulators"
depends on MFD_BD9571MWV
@@ -801,6 +812,13 @@ config REGULATOR_STW481X_VMMC
This driver supports the internal VMMC regulator in the STw481x
PMIC chips.
+config REGULATOR_SY8106A
+ tristate "Silergy SY8106A regulator"
+ depends on I2C && (OF || COMPILE_TEST)
+ select REGMAP_I2C
+ help
+ This driver supports SY8106A single output regulator.
+
config REGULATOR_TPS51632
tristate "TI TPS51632 Power Regulator"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 590674fbecd7..bd818ceb7c72 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
+obj-$(CONFIG_REGULATOR_BD71837) += bd71837-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
@@ -100,6 +101,7 @@ obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
+obj-$(CONFIG_REGULATOR_SY8106A) += sy8106a-regulator.o
obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
@@ -125,5 +127,4 @@ obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
-
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 0f97514e3474..83dba3fbfe0c 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -132,33 +132,6 @@ static const unsigned int ldo_vaux56_voltages[] = {
2790000,
};
-static const unsigned int ldo_vaux3_ab8540_voltages[] = {
- 1200000,
- 1500000,
- 1800000,
- 2100000,
- 2500000,
- 2750000,
- 2790000,
- 2910000,
- 3050000,
-};
-
-static const unsigned int ldo_vaux56_ab8540_voltages[] = {
- 750000, 760000, 770000, 780000, 790000, 800000,
- 810000, 820000, 830000, 840000, 850000, 860000,
- 870000, 880000, 890000, 900000, 910000, 920000,
- 930000, 940000, 950000, 960000, 970000, 980000,
- 990000, 1000000, 1010000, 1020000, 1030000,
- 1040000, 1050000, 1060000, 1070000, 1080000,
- 1090000, 1100000, 1110000, 1120000, 1130000,
- 1140000, 1150000, 1160000, 1170000, 1180000,
- 1190000, 1200000, 1210000, 1220000, 1230000,
- 1240000, 1250000, 1260000, 1270000, 1280000,
- 1290000, 1300000, 1310000, 1320000, 1330000,
- 1340000, 1350000, 1360000, 1800000, 2790000,
-};
-
static const unsigned int ldo_vintcore_voltages[] = {
1200000,
1225000,
@@ -232,8 +205,6 @@ static const unsigned int ldo_vdmic_voltages[] = {
static DEFINE_MUTEX(shared_mode_mutex);
static struct ab8500_shared_mode ldo_anamic1_shared;
static struct ab8500_shared_mode ldo_anamic2_shared;
-static struct ab8500_shared_mode ab8540_ldo_anamic1_shared;
-static struct ab8500_shared_mode ab8540_ldo_anamic2_shared;
static int ab8500_regulator_enable(struct regulator_dev *rdev)
{
@@ -507,53 +478,6 @@ static int ab8500_regulator_get_voltage_sel(struct regulator_dev *rdev)
return (regval & info->voltage_mask) >> voltage_shift;
}
-static int ab8540_aux3_regulator_get_voltage_sel(struct regulator_dev *rdev)
-{
- int ret, voltage_shift;
- struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
- u8 regval, regval_expand;
-
- if (info == NULL) {
- dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
- return -EINVAL;
- }
-
- ret = abx500_get_register_interruptible(info->dev,
- info->expand_register.voltage_bank,
- info->expand_register.voltage_reg, &regval_expand);
- if (ret < 0) {
- dev_err(rdev_get_dev(rdev),
- "couldn't read voltage expand reg for regulator\n");
- return ret;
- }
-
- dev_vdbg(rdev_get_dev(rdev),
- "%s-get_voltage expand (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
- info->desc.name, info->expand_register.voltage_bank,
- info->expand_register.voltage_reg,
- info->expand_register.voltage_mask, regval_expand);
-
- if (regval_expand & info->expand_register.voltage_mask)
- return info->expand_register.voltage_limit;
-
- ret = abx500_get_register_interruptible(info->dev,
- info->voltage_bank, info->voltage_reg, &regval);
- if (ret < 0) {
- dev_err(rdev_get_dev(rdev),
- "couldn't read voltage reg for regulator\n");
- return ret;
- }
-
- dev_vdbg(rdev_get_dev(rdev),
- "%s-get_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
- info->desc.name, info->voltage_bank, info->voltage_reg,
- info->voltage_mask, regval);
-
- voltage_shift = ffs(info->voltage_mask) - 1;
-
- return (regval & info->voltage_mask) >> voltage_shift;
-}
-
static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
@@ -586,61 +510,6 @@ static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev,
return ret;
}
-static int ab8540_aux3_regulator_set_voltage_sel(struct regulator_dev *rdev,
- unsigned selector)
-{
- int ret;
- struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
- u8 regval, regval_expand;
-
- if (info == NULL) {
- dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
- return -EINVAL;
- }
-
- if (selector < info->expand_register.voltage_limit) {
- int voltage_shift = ffs(info->voltage_mask) - 1;
-
- regval = (u8)selector << voltage_shift;
- ret = abx500_mask_and_set_register_interruptible(info->dev,
- info->voltage_bank, info->voltage_reg,
- info->voltage_mask, regval);
- if (ret < 0) {
- dev_err(rdev_get_dev(rdev),
- "couldn't set voltage reg for regulator\n");
- return ret;
- }
-
- dev_vdbg(rdev_get_dev(rdev),
- "%s-set_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
- info->desc.name, info->voltage_bank, info->voltage_reg,
- info->voltage_mask, regval);
-
- regval_expand = 0;
- } else {
- regval_expand = info->expand_register.voltage_mask;
- }
-
- ret = abx500_mask_and_set_register_interruptible(info->dev,
- info->expand_register.voltage_bank,
- info->expand_register.voltage_reg,
- info->expand_register.voltage_mask,
- regval_expand);
- if (ret < 0) {
- dev_err(rdev_get_dev(rdev),
- "couldn't set expand voltage reg for regulator\n");
- return ret;
- }
-
- dev_vdbg(rdev_get_dev(rdev),
- "%s-set_voltage expand (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
- info->desc.name, info->expand_register.voltage_bank,
- info->expand_register.voltage_reg,
- info->expand_register.voltage_mask, regval_expand);
-
- return 0;
-}
-
static struct regulator_ops ab8500_regulator_volt_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
@@ -653,18 +522,6 @@ static struct regulator_ops ab8500_regulator_volt_mode_ops = {
.list_voltage = regulator_list_voltage_table,
};
-static struct regulator_ops ab8540_aux3_regulator_volt_mode_ops = {
- .enable = ab8500_regulator_enable,
- .disable = ab8500_regulator_disable,
- .get_optimum_mode = ab8500_regulator_get_optimum_mode,
- .set_mode = ab8500_regulator_set_mode,
- .get_mode = ab8500_regulator_get_mode,
- .is_enabled = ab8500_regulator_is_enabled,
- .get_voltage_sel = ab8540_aux3_regulator_get_voltage_sel,
- .set_voltage_sel = ab8540_aux3_regulator_set_voltage_sel,
- .list_voltage = regulator_list_voltage_table,
-};
-
static struct regulator_ops ab8500_regulator_volt_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
@@ -1217,562 +1074,6 @@ static struct ab8500_regulator_info
},
};
-/* AB9540 regulator information */
-static struct ab8500_regulator_info
- ab9540_regulator_info[AB9540_NUM_REGULATORS] = {
- /*
- * Variable Voltage Regulators
- * name, min mV, max mV,
- * update bank, reg, mask, enable val
- * volt bank, reg, mask
- */
- [AB9540_LDO_AUX1] = {
- .desc = {
- .name = "LDO-AUX1",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_AUX1,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
- .volt_table = ldo_vauxn_voltages,
- },
- .load_lp_uA = 5000,
- .update_bank = 0x04,
- .update_reg = 0x09,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- .voltage_bank = 0x04,
- .voltage_reg = 0x1f,
- .voltage_mask = 0x0f,
- },
- [AB9540_LDO_AUX2] = {
- .desc = {
- .name = "LDO-AUX2",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_AUX2,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
- .volt_table = ldo_vauxn_voltages,
- },
- .load_lp_uA = 5000,
- .update_bank = 0x04,
- .update_reg = 0x09,
- .update_mask = 0x0c,
- .update_val = 0x04,
- .update_val_idle = 0x0c,
- .update_val_normal = 0x04,
- .voltage_bank = 0x04,
- .voltage_reg = 0x20,
- .voltage_mask = 0x0f,
- },
- [AB9540_LDO_AUX3] = {
- .desc = {
- .name = "LDO-AUX3",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_AUX3,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vaux3_voltages),
- .volt_table = ldo_vaux3_voltages,
- },
- .load_lp_uA = 5000,
- .update_bank = 0x04,
- .update_reg = 0x0a,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- .voltage_bank = 0x04,
- .voltage_reg = 0x21,
- .voltage_mask = 0x07,
- },
- [AB9540_LDO_AUX4] = {
- .desc = {
- .name = "LDO-AUX4",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_AUX4,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
- .volt_table = ldo_vauxn_voltages,
- },
- .load_lp_uA = 5000,
- /* values for Vaux4Regu register */
- .update_bank = 0x04,
- .update_reg = 0x2e,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- /* values for Vaux4SEL register */
- .voltage_bank = 0x04,
- .voltage_reg = 0x2f,
- .voltage_mask = 0x0f,
- },
- [AB9540_LDO_INTCORE] = {
- .desc = {
- .name = "LDO-INTCORE",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_INTCORE,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
- .volt_table = ldo_vintcore_voltages,
- },
- .load_lp_uA = 5000,
- .update_bank = 0x03,
- .update_reg = 0x80,
- .update_mask = 0x44,
- .update_val = 0x44,
- .update_val_idle = 0x44,
- .update_val_normal = 0x04,
- .voltage_bank = 0x03,
- .voltage_reg = 0x80,
- .voltage_mask = 0x38,
- },
-
- /*
- * Fixed Voltage Regulators
- * name, fixed mV,
- * update bank, reg, mask, enable val
- */
- [AB9540_LDO_TVOUT] = {
- .desc = {
- .name = "LDO-TVOUT",
- .ops = &ab8500_regulator_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_TVOUT,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_2000000_voltage,
- .enable_time = 10000,
- },
- .load_lp_uA = 1000,
- .update_bank = 0x03,
- .update_reg = 0x80,
- .update_mask = 0x82,
- .update_val = 0x02,
- .update_val_idle = 0x82,
- .update_val_normal = 0x02,
- },
- [AB9540_LDO_USB] = {
- .desc = {
- .name = "LDO-USB",
- .ops = &ab8500_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_USB,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_3300000_voltage,
- },
- .update_bank = 0x03,
- .update_reg = 0x82,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- },
- [AB9540_LDO_AUDIO] = {
- .desc = {
- .name = "LDO-AUDIO",
- .ops = &ab8500_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_AUDIO,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_2000000_voltage,
- },
- .update_bank = 0x03,
- .update_reg = 0x83,
- .update_mask = 0x02,
- .update_val = 0x02,
- },
- [AB9540_LDO_ANAMIC1] = {
- .desc = {
- .name = "LDO-ANAMIC1",
- .ops = &ab8500_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_ANAMIC1,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_2050000_voltage,
- },
- .update_bank = 0x03,
- .update_reg = 0x83,
- .update_mask = 0x08,
- .update_val = 0x08,
- },
- [AB9540_LDO_ANAMIC2] = {
- .desc = {
- .name = "LDO-ANAMIC2",
- .ops = &ab8500_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_ANAMIC2,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_2050000_voltage,
- },
- .update_bank = 0x03,
- .update_reg = 0x83,
- .update_mask = 0x10,
- .update_val = 0x10,
- },
- [AB9540_LDO_DMIC] = {
- .desc = {
- .name = "LDO-DMIC",
- .ops = &ab8500_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_DMIC,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_1800000_voltage,
- },
- .update_bank = 0x03,
- .update_reg = 0x83,
- .update_mask = 0x04,
- .update_val = 0x04,
- },
-
- /*
- * Regulators with fixed voltage and normal/idle modes
- */
- [AB9540_LDO_ANA] = {
- .desc = {
- .name = "LDO-ANA",
- .ops = &ab8500_regulator_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB9540_LDO_ANA,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_1200000_voltage,
- },
- .load_lp_uA = 1000,
- .update_bank = 0x04,
- .update_reg = 0x06,
- .update_mask = 0x0c,
- .update_val = 0x08,
- .update_val_idle = 0x0c,
- .update_val_normal = 0x08,
- },
-};
-
-/* AB8540 regulator information */
-static struct ab8500_regulator_info
- ab8540_regulator_info[AB8540_NUM_REGULATORS] = {
- /*
- * Variable Voltage Regulators
- * name, min mV, max mV,
- * update bank, reg, mask, enable val
- * volt bank, reg, mask
- */
- [AB8540_LDO_AUX1] = {
- .desc = {
- .name = "LDO-AUX1",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_AUX1,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
- .volt_table = ldo_vauxn_voltages,
- },
- .load_lp_uA = 5000,
- .update_bank = 0x04,
- .update_reg = 0x09,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- .voltage_bank = 0x04,
- .voltage_reg = 0x1f,
- .voltage_mask = 0x0f,
- },
- [AB8540_LDO_AUX2] = {
- .desc = {
- .name = "LDO-AUX2",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_AUX2,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
- .volt_table = ldo_vauxn_voltages,
- },
- .load_lp_uA = 5000,
- .update_bank = 0x04,
- .update_reg = 0x09,
- .update_mask = 0x0c,
- .update_val = 0x04,
- .update_val_idle = 0x0c,
- .update_val_normal = 0x04,
- .voltage_bank = 0x04,
- .voltage_reg = 0x20,
- .voltage_mask = 0x0f,
- },
- [AB8540_LDO_AUX3] = {
- .desc = {
- .name = "LDO-AUX3",
- .ops = &ab8540_aux3_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_AUX3,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vaux3_ab8540_voltages),
- .volt_table = ldo_vaux3_ab8540_voltages,
- },
- .load_lp_uA = 5000,
- .update_bank = 0x04,
- .update_reg = 0x0a,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- .voltage_bank = 0x04,
- .voltage_reg = 0x21,
- .voltage_mask = 0x07,
- .expand_register = {
- .voltage_limit = 8,
- .voltage_bank = 0x04,
- .voltage_reg = 0x01,
- .voltage_mask = 0x10,
- }
- },
- [AB8540_LDO_AUX4] = {
- .desc = {
- .name = "LDO-AUX4",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_AUX4,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
- .volt_table = ldo_vauxn_voltages,
- },
- .load_lp_uA = 5000,
- /* values for Vaux4Regu register */
- .update_bank = 0x04,
- .update_reg = 0x2e,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- /* values for Vaux4SEL register */
- .voltage_bank = 0x04,
- .voltage_reg = 0x2f,
- .voltage_mask = 0x0f,
- },
- [AB8540_LDO_AUX5] = {
- .desc = {
- .name = "LDO-AUX5",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_AUX5,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vaux56_ab8540_voltages),
- .volt_table = ldo_vaux56_ab8540_voltages,
- },
- .load_lp_uA = 20000,
- /* values for Vaux5Regu register */
- .update_bank = 0x04,
- .update_reg = 0x32,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- /* values for Vaux5SEL register */
- .voltage_bank = 0x04,
- .voltage_reg = 0x33,
- .voltage_mask = 0x3f,
- },
- [AB8540_LDO_AUX6] = {
- .desc = {
- .name = "LDO-AUX6",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_AUX6,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vaux56_ab8540_voltages),
- .volt_table = ldo_vaux56_ab8540_voltages,
- },
- .load_lp_uA = 20000,
- /* values for Vaux6Regu register */
- .update_bank = 0x04,
- .update_reg = 0x35,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- /* values for Vaux6SEL register */
- .voltage_bank = 0x04,
- .voltage_reg = 0x36,
- .voltage_mask = 0x3f,
- },
- [AB8540_LDO_INTCORE] = {
- .desc = {
- .name = "LDO-INTCORE",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_INTCORE,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
- .volt_table = ldo_vintcore_voltages,
- },
- .load_lp_uA = 5000,
- .update_bank = 0x03,
- .update_reg = 0x80,
- .update_mask = 0x44,
- .update_val = 0x44,
- .update_val_idle = 0x44,
- .update_val_normal = 0x04,
- .voltage_bank = 0x03,
- .voltage_reg = 0x80,
- .voltage_mask = 0x38,
- },
-
- /*
- * Fixed Voltage Regulators
- * name, fixed mV,
- * update bank, reg, mask, enable val
- */
- [AB8540_LDO_TVOUT] = {
- .desc = {
- .name = "LDO-TVOUT",
- .ops = &ab8500_regulator_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_TVOUT,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_2000000_voltage,
- .enable_time = 10000,
- },
- .load_lp_uA = 1000,
- .update_bank = 0x03,
- .update_reg = 0x80,
- .update_mask = 0x82,
- .update_val = 0x02,
- .update_val_idle = 0x82,
- .update_val_normal = 0x02,
- },
- [AB8540_LDO_AUDIO] = {
- .desc = {
- .name = "LDO-AUDIO",
- .ops = &ab8500_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_AUDIO,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_2000000_voltage,
- },
- .update_bank = 0x03,
- .update_reg = 0x83,
- .update_mask = 0x02,
- .update_val = 0x02,
- },
- [AB8540_LDO_ANAMIC1] = {
- .desc = {
- .name = "LDO-ANAMIC1",
- .ops = &ab8500_regulator_anamic_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_ANAMIC1,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_2050000_voltage,
- },
- .shared_mode = &ab8540_ldo_anamic1_shared,
- .update_bank = 0x03,
- .update_reg = 0x83,
- .update_mask = 0x08,
- .update_val = 0x08,
- .mode_bank = 0x03,
- .mode_reg = 0x83,
- .mode_mask = 0x20,
- .mode_val_idle = 0x20,
- .mode_val_normal = 0x00,
- },
- [AB8540_LDO_ANAMIC2] = {
- .desc = {
- .name = "LDO-ANAMIC2",
- .ops = &ab8500_regulator_anamic_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_ANAMIC2,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_2050000_voltage,
- },
- .shared_mode = &ab8540_ldo_anamic2_shared,
- .update_bank = 0x03,
- .update_reg = 0x83,
- .update_mask = 0x10,
- .update_val = 0x10,
- .mode_bank = 0x03,
- .mode_reg = 0x83,
- .mode_mask = 0x20,
- .mode_val_idle = 0x20,
- .mode_val_normal = 0x00,
- },
- [AB8540_LDO_DMIC] = {
- .desc = {
- .name = "LDO-DMIC",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_DMIC,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_vdmic_voltages),
- .volt_table = ldo_vdmic_voltages,
- },
- .load_lp_uA = 1000,
- .update_bank = 0x03,
- .update_reg = 0x83,
- .update_mask = 0x04,
- .update_val = 0x04,
- .voltage_bank = 0x03,
- .voltage_reg = 0x83,
- .voltage_mask = 0xc0,
- },
-
- /*
- * Regulators with fixed voltage and normal/idle modes
- */
- [AB8540_LDO_ANA] = {
- .desc = {
- .name = "LDO-ANA",
- .ops = &ab8500_regulator_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_ANA,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_1200000_voltage,
- },
- .load_lp_uA = 1000,
- .update_bank = 0x04,
- .update_reg = 0x06,
- .update_mask = 0x0c,
- .update_val = 0x04,
- .update_val_idle = 0x0c,
- .update_val_normal = 0x04,
- },
- [AB8540_LDO_SDIO] = {
- .desc = {
- .name = "LDO-SDIO",
- .ops = &ab8500_regulator_volt_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8540_LDO_SDIO,
- .owner = THIS_MODULE,
- .n_voltages = ARRAY_SIZE(ldo_sdio_voltages),
- .volt_table = ldo_sdio_voltages,
- },
- .load_lp_uA = 5000,
- .update_bank = 0x03,
- .update_reg = 0x88,
- .update_mask = 0x30,
- .update_val = 0x10,
- .update_val_idle = 0x30,
- .update_val_normal = 0x10,
- .voltage_bank = 0x03,
- .voltage_reg = 0x88,
- .voltage_mask = 0x07,
- },
-};
-
static struct ab8500_shared_mode ldo_anamic1_shared = {
.shared_regulator = &ab8505_regulator_info[AB8505_LDO_ANAMIC2],
};
@@ -1781,14 +1082,6 @@ static struct ab8500_shared_mode ldo_anamic2_shared = {
.shared_regulator = &ab8505_regulator_info[AB8505_LDO_ANAMIC1],
};
-static struct ab8500_shared_mode ab8540_ldo_anamic1_shared = {
- .shared_regulator = &ab8540_regulator_info[AB8540_LDO_ANAMIC2],
-};
-
-static struct ab8500_shared_mode ab8540_ldo_anamic2_shared = {
- .shared_regulator = &ab8540_regulator_info[AB8540_LDO_ANAMIC1],
-};
-
struct ab8500_reg_init {
u8 bank;
u8 addr;
@@ -2243,659 +1536,6 @@ static struct ab8500_reg_init ab8505_reg_init[] = {
REG_INIT(AB8505_CTRLVAUX6, 0x01, 0x56, 0x9f),
};
-/* AB9540 register init */
-static struct ab8500_reg_init ab9540_reg_init[] = {
- /*
- * 0x03, VarmRequestCtrl
- * 0x0c, VapeRequestCtrl
- * 0x30, Vsmps1RequestCtrl
- * 0xc0, Vsmps2RequestCtrl
- */
- REG_INIT(AB9540_REGUREQUESTCTRL1, 0x03, 0x03, 0xff),
- /*
- * 0x03, Vsmps3RequestCtrl
- * 0x0c, VpllRequestCtrl
- * 0x30, VanaRequestCtrl
- * 0xc0, VextSupply1RequestCtrl
- */
- REG_INIT(AB9540_REGUREQUESTCTRL2, 0x03, 0x04, 0xff),
- /*
- * 0x03, VextSupply2RequestCtrl
- * 0x0c, VextSupply3RequestCtrl
- * 0x30, Vaux1RequestCtrl
- * 0xc0, Vaux2RequestCtrl
- */
- REG_INIT(AB9540_REGUREQUESTCTRL3, 0x03, 0x05, 0xff),
- /*
- * 0x03, Vaux3RequestCtrl
- * 0x04, SwHPReq
- */
- REG_INIT(AB9540_REGUREQUESTCTRL4, 0x03, 0x06, 0x07),
- /*
- * 0x01, Vsmps1SysClkReq1HPValid
- * 0x02, Vsmps2SysClkReq1HPValid
- * 0x04, Vsmps3SysClkReq1HPValid
- * 0x08, VanaSysClkReq1HPValid
- * 0x10, VpllSysClkReq1HPValid
- * 0x20, Vaux1SysClkReq1HPValid
- * 0x40, Vaux2SysClkReq1HPValid
- * 0x80, Vaux3SysClkReq1HPValid
- */
- REG_INIT(AB9540_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xff),
- /*
- * 0x01, VapeSysClkReq1HPValid
- * 0x02, VarmSysClkReq1HPValid
- * 0x04, VbbSysClkReq1HPValid
- * 0x08, VmodSysClkReq1HPValid
- * 0x10, VextSupply1SysClkReq1HPValid
- * 0x20, VextSupply2SysClkReq1HPValid
- * 0x40, VextSupply3SysClkReq1HPValid
- */
- REG_INIT(AB9540_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x7f),
- /*
- * 0x01, Vsmps1HwHPReq1Valid
- * 0x02, Vsmps2HwHPReq1Valid
- * 0x04, Vsmps3HwHPReq1Valid
- * 0x08, VanaHwHPReq1Valid
- * 0x10, VpllHwHPReq1Valid
- * 0x20, Vaux1HwHPReq1Valid
- * 0x40, Vaux2HwHPReq1Valid
- * 0x80, Vaux3HwHPReq1Valid
- */
- REG_INIT(AB9540_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xff),
- /*
- * 0x01, VextSupply1HwHPReq1Valid
- * 0x02, VextSupply2HwHPReq1Valid
- * 0x04, VextSupply3HwHPReq1Valid
- * 0x08, VmodHwHPReq1Valid
- */
- REG_INIT(AB9540_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x0f),
- /*
- * 0x01, Vsmps1HwHPReq2Valid
- * 0x02, Vsmps2HwHPReq2Valid
- * 0x03, Vsmps3HwHPReq2Valid
- * 0x08, VanaHwHPReq2Valid
- * 0x10, VpllHwHPReq2Valid
- * 0x20, Vaux1HwHPReq2Valid
- * 0x40, Vaux2HwHPReq2Valid
- * 0x80, Vaux3HwHPReq2Valid
- */
- REG_INIT(AB9540_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xff),
- /*
- * 0x01, VextSupply1HwHPReq2Valid
- * 0x02, VextSupply2HwHPReq2Valid
- * 0x04, VextSupply3HwHPReq2Valid
- * 0x08, VmodHwHPReq2Valid
- */
- REG_INIT(AB9540_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x0f),
- /*
- * 0x01, VapeSwHPReqValid
- * 0x02, VarmSwHPReqValid
- * 0x04, Vsmps1SwHPReqValid
- * 0x08, Vsmps2SwHPReqValid
- * 0x10, Vsmps3SwHPReqValid
- * 0x20, VanaSwHPReqValid
- * 0x40, VpllSwHPReqValid
- * 0x80, Vaux1SwHPReqValid
- */
- REG_INIT(AB9540_REGUSWHPREQVALID1, 0x03, 0x0d, 0xff),
- /*
- * 0x01, Vaux2SwHPReqValid
- * 0x02, Vaux3SwHPReqValid
- * 0x04, VextSupply1SwHPReqValid
- * 0x08, VextSupply2SwHPReqValid
- * 0x10, VextSupply3SwHPReqValid
- * 0x20, VmodSwHPReqValid
- */
- REG_INIT(AB9540_REGUSWHPREQVALID2, 0x03, 0x0e, 0x3f),
- /*
- * 0x02, SysClkReq2Valid1
- * ...
- * 0x80, SysClkReq8Valid1
- */
- REG_INIT(AB9540_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe),
- /*
- * 0x02, SysClkReq2Valid2
- * ...
- * 0x80, SysClkReq8Valid2
- */
- REG_INIT(AB9540_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe),
- /*
- * 0x01, Vaux4SwHPReqValid
- * 0x02, Vaux4HwHPReq2Valid
- * 0x04, Vaux4HwHPReq1Valid
- * 0x08, Vaux4SysClkReq1HPValid
- */
- REG_INIT(AB9540_REGUVAUX4REQVALID, 0x03, 0x11, 0x0f),
- /*
- * 0x02, VTVoutEna
- * 0x04, Vintcore12Ena
- * 0x38, Vintcore12Sel
- * 0x40, Vintcore12LP
- * 0x80, VTVoutLP
- */
- REG_INIT(AB9540_REGUMISC1, 0x03, 0x80, 0xfe),
- /*
- * 0x02, VaudioEna
- * 0x04, VdmicEna
- * 0x08, Vamic1Ena
- * 0x10, Vamic2Ena
- */
- REG_INIT(AB9540_VAUDIOSUPPLY, 0x03, 0x83, 0x1e),
- /*
- * 0x01, Vamic1_dzout
- * 0x02, Vamic2_dzout
- */
- REG_INIT(AB9540_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
- /*
- * 0x03, Vsmps1Regu
- * 0x0c, Vsmps1SelCtrl
- * 0x10, Vsmps1AutoMode
- * 0x20, Vsmps1PWMMode
- */
- REG_INIT(AB9540_VSMPS1REGU, 0x04, 0x03, 0x3f),
- /*
- * 0x03, Vsmps2Regu
- * 0x0c, Vsmps2SelCtrl
- * 0x10, Vsmps2AutoMode
- * 0x20, Vsmps2PWMMode
- */
- REG_INIT(AB9540_VSMPS2REGU, 0x04, 0x04, 0x3f),
- /*
- * 0x03, Vsmps3Regu
- * 0x0c, Vsmps3SelCtrl
- * NOTE! PRCMU register
- */
- REG_INIT(AB9540_VSMPS3REGU, 0x04, 0x05, 0x0f),
- /*
- * 0x03, VpllRegu
- * 0x0c, VanaRegu
- */
- REG_INIT(AB9540_VPLLVANAREGU, 0x04, 0x06, 0x0f),
- /*
- * 0x03, VextSupply1Regu
- * 0x0c, VextSupply2Regu
- * 0x30, VextSupply3Regu
- * 0x40, ExtSupply2Bypass
- * 0x80, ExtSupply3Bypass
- */
- REG_INIT(AB9540_EXTSUPPLYREGU, 0x04, 0x08, 0xff),
- /*
- * 0x03, Vaux1Regu
- * 0x0c, Vaux2Regu
- */
- REG_INIT(AB9540_VAUX12REGU, 0x04, 0x09, 0x0f),
- /*
- * 0x0c, Vrf1Regu
- * 0x03, Vaux3Regu
- */
- REG_INIT(AB9540_VRF1VAUX3REGU, 0x04, 0x0a, 0x0f),
- /*
- * 0x3f, Vsmps1Sel1
- */
- REG_INIT(AB9540_VSMPS1SEL1, 0x04, 0x13, 0x3f),
- /*
- * 0x3f, Vsmps1Sel2
- */
- REG_INIT(AB9540_VSMPS1SEL2, 0x04, 0x14, 0x3f),
- /*
- * 0x3f, Vsmps1Sel3
- */
- REG_INIT(AB9540_VSMPS1SEL3, 0x04, 0x15, 0x3f),
- /*
- * 0x3f, Vsmps2Sel1
- */
- REG_INIT(AB9540_VSMPS2SEL1, 0x04, 0x17, 0x3f),
- /*
- * 0x3f, Vsmps2Sel2
- */
- REG_INIT(AB9540_VSMPS2SEL2, 0x04, 0x18, 0x3f),
- /*
- * 0x3f, Vsmps2Sel3
- */
- REG_INIT(AB9540_VSMPS2SEL3, 0x04, 0x19, 0x3f),
- /*
- * 0x7f, Vsmps3Sel1
- * NOTE! PRCMU register
- */
- REG_INIT(AB9540_VSMPS3SEL1, 0x04, 0x1b, 0x7f),
- /*
- * 0x7f, Vsmps3Sel2
- * NOTE! PRCMU register
- */
- REG_INIT(AB9540_VSMPS3SEL2, 0x04, 0x1c, 0x7f),
- /*
- * 0x0f, Vaux1Sel
- */
- REG_INIT(AB9540_VAUX1SEL, 0x04, 0x1f, 0x0f),
- /*
- * 0x0f, Vaux2Sel
- */
- REG_INIT(AB9540_VAUX2SEL, 0x04, 0x20, 0x0f),
- /*
- * 0x07, Vaux3Sel
- * 0x30, Vrf1Sel
- */
- REG_INIT(AB9540_VRF1VAUX3SEL, 0x04, 0x21, 0x37),
- /*
- * 0x01, VextSupply12LP
- */
- REG_INIT(AB9540_REGUCTRL2SPARE, 0x04, 0x22, 0x01),
- /*
- * 0x03, Vaux4RequestCtrl
- */
- REG_INIT(AB9540_VAUX4REQCTRL, 0x04, 0x2d, 0x03),
- /*
- * 0x03, Vaux4Regu
- */
- REG_INIT(AB9540_VAUX4REGU, 0x04, 0x2e, 0x03),
- /*
- * 0x08, Vaux4Sel
- */
- REG_INIT(AB9540_VAUX4SEL, 0x04, 0x2f, 0x0f),
- /*
- * 0x01, VpllDisch
- * 0x02, Vrf1Disch
- * 0x04, Vaux1Disch
- * 0x08, Vaux2Disch
- * 0x10, Vaux3Disch
- * 0x20, Vintcore12Disch
- * 0x40, VTVoutDisch
- * 0x80, VaudioDisch
- */
- REG_INIT(AB9540_REGUCTRLDISCH, 0x04, 0x43, 0xff),
- /*
- * 0x01, VsimDisch
- * 0x02, VanaDisch
- * 0x04, VdmicPullDownEna
- * 0x08, VpllPullDownEna
- * 0x10, VdmicDisch
- */
- REG_INIT(AB9540_REGUCTRLDISCH2, 0x04, 0x44, 0x1f),
- /*
- * 0x01, Vaux4Disch
- */
- REG_INIT(AB9540_REGUCTRLDISCH3, 0x04, 0x48, 0x01),
-};
-
-/* AB8540 register init */
-static struct ab8500_reg_init ab8540_reg_init[] = {
- /*
- * 0x01, VSimSycClkReq1Valid
- * 0x02, VSimSycClkReq2Valid
- * 0x04, VSimSycClkReq3Valid
- * 0x08, VSimSycClkReq4Valid
- * 0x10, VSimSycClkReq5Valid
- * 0x20, VSimSycClkReq6Valid
- * 0x40, VSimSycClkReq7Valid
- * 0x80, VSimSycClkReq8Valid
- */
- REG_INIT(AB8540_VSIMSYSCLKCTRL, 0x02, 0x33, 0xff),
- /*
- * 0x03, VarmRequestCtrl
- * 0x0c, VapeRequestCtrl
- * 0x30, Vsmps1RequestCtrl
- * 0xc0, Vsmps2RequestCtrl
- */
- REG_INIT(AB8540_REGUREQUESTCTRL1, 0x03, 0x03, 0xff),
- /*
- * 0x03, Vsmps3RequestCtrl
- * 0x0c, VpllRequestCtrl
- * 0x30, VanaRequestCtrl
- * 0xc0, VextSupply1RequestCtrl
- */
- REG_INIT(AB8540_REGUREQUESTCTRL2, 0x03, 0x04, 0xff),
- /*
- * 0x03, VextSupply2RequestCtrl
- * 0x0c, VextSupply3RequestCtrl
- * 0x30, Vaux1RequestCtrl
- * 0xc0, Vaux2RequestCtrl
- */
- REG_INIT(AB8540_REGUREQUESTCTRL3, 0x03, 0x05, 0xff),
- /*
- * 0x03, Vaux3RequestCtrl
- * 0x04, SwHPReq
- */
- REG_INIT(AB8540_REGUREQUESTCTRL4, 0x03, 0x06, 0x07),
- /*
- * 0x01, Vsmps1SysClkReq1HPValid
- * 0x02, Vsmps2SysClkReq1HPValid
- * 0x04, Vsmps3SysClkReq1HPValid
- * 0x08, VanaSysClkReq1HPValid
- * 0x10, VpllSysClkReq1HPValid
- * 0x20, Vaux1SysClkReq1HPValid
- * 0x40, Vaux2SysClkReq1HPValid
- * 0x80, Vaux3SysClkReq1HPValid
- */
- REG_INIT(AB8540_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xff),
- /*
- * 0x01, VapeSysClkReq1HPValid
- * 0x02, VarmSysClkReq1HPValid
- * 0x04, VbbSysClkReq1HPValid
- * 0x10, VextSupply1SysClkReq1HPValid
- * 0x20, VextSupply2SysClkReq1HPValid
- * 0x40, VextSupply3SysClkReq1HPValid
- */
- REG_INIT(AB8540_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x77),
- /*
- * 0x01, Vsmps1HwHPReq1Valid
- * 0x02, Vsmps2HwHPReq1Valid
- * 0x04, Vsmps3HwHPReq1Valid
- * 0x08, VanaHwHPReq1Valid
- * 0x10, VpllHwHPReq1Valid
- * 0x20, Vaux1HwHPReq1Valid
- * 0x40, Vaux2HwHPReq1Valid
- * 0x80, Vaux3HwHPReq1Valid
- */
- REG_INIT(AB8540_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xff),
- /*
- * 0x01, VextSupply1HwHPReq1Valid
- * 0x02, VextSupply2HwHPReq1Valid
- * 0x04, VextSupply3HwHPReq1Valid
- */
- REG_INIT(AB8540_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x07),
- /*
- * 0x01, Vsmps1HwHPReq2Valid
- * 0x02, Vsmps2HwHPReq2Valid
- * 0x03, Vsmps3HwHPReq2Valid
- * 0x08, VanaHwHPReq2Valid
- * 0x10, VpllHwHPReq2Valid
- * 0x20, Vaux1HwHPReq2Valid
- * 0x40, Vaux2HwHPReq2Valid
- * 0x80, Vaux3HwHPReq2Valid
- */
- REG_INIT(AB8540_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xff),
- /*
- * 0x01, VextSupply1HwHPReq2Valid
- * 0x02, VextSupply2HwHPReq2Valid
- * 0x04, VextSupply3HwHPReq2Valid
- */
- REG_INIT(AB8540_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x07),
- /*
- * 0x01, VapeSwHPReqValid
- * 0x02, VarmSwHPReqValid
- * 0x04, Vsmps1SwHPReqValid
- * 0x08, Vsmps2SwHPReqValid
- * 0x10, Vsmps3SwHPReqValid
- * 0x20, VanaSwHPReqValid
- * 0x40, VpllSwHPReqValid
- * 0x80, Vaux1SwHPReqValid
- */
- REG_INIT(AB8540_REGUSWHPREQVALID1, 0x03, 0x0d, 0xff),
- /*
- * 0x01, Vaux2SwHPReqValid
- * 0x02, Vaux3SwHPReqValid
- * 0x04, VextSupply1SwHPReqValid
- * 0x08, VextSupply2SwHPReqValid
- * 0x10, VextSupply3SwHPReqValid
- */
- REG_INIT(AB8540_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f),
- /*
- * 0x02, SysClkReq2Valid1
- * ...
- * 0x80, SysClkReq8Valid1
- */
- REG_INIT(AB8540_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xff),
- /*
- * 0x02, SysClkReq2Valid2
- * ...
- * 0x80, SysClkReq8Valid2
- */
- REG_INIT(AB8540_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xff),
- /*
- * 0x01, Vaux4SwHPReqValid
- * 0x02, Vaux4HwHPReq2Valid
- * 0x04, Vaux4HwHPReq1Valid
- * 0x08, Vaux4SysClkReq1HPValid
- */
- REG_INIT(AB8540_REGUVAUX4REQVALID, 0x03, 0x11, 0x0f),
- /*
- * 0x01, Vaux5SwHPReqValid
- * 0x02, Vaux5HwHPReq2Valid
- * 0x04, Vaux5HwHPReq1Valid
- * 0x08, Vaux5SysClkReq1HPValid
- */
- REG_INIT(AB8540_REGUVAUX5REQVALID, 0x03, 0x12, 0x0f),
- /*
- * 0x01, Vaux6SwHPReqValid
- * 0x02, Vaux6HwHPReq2Valid
- * 0x04, Vaux6HwHPReq1Valid
- * 0x08, Vaux6SysClkReq1HPValid
- */
- REG_INIT(AB8540_REGUVAUX6REQVALID, 0x03, 0x13, 0x0f),
- /*
- * 0x01, VclkbSwHPReqValid
- * 0x02, VclkbHwHPReq2Valid
- * 0x04, VclkbHwHPReq1Valid
- * 0x08, VclkbSysClkReq1HPValid
- */
- REG_INIT(AB8540_REGUVCLKBREQVALID, 0x03, 0x14, 0x0f),
- /*
- * 0x01, Vrf1SwHPReqValid
- * 0x02, Vrf1HwHPReq2Valid
- * 0x04, Vrf1HwHPReq1Valid
- * 0x08, Vrf1SysClkReq1HPValid
- */
- REG_INIT(AB8540_REGUVRF1REQVALID, 0x03, 0x15, 0x0f),
- /*
- * 0x02, VTVoutEna
- * 0x04, Vintcore12Ena
- * 0x38, Vintcore12Sel
- * 0x40, Vintcore12LP
- * 0x80, VTVoutLP
- */
- REG_INIT(AB8540_REGUMISC1, 0x03, 0x80, 0xfe),
- /*
- * 0x02, VaudioEna
- * 0x04, VdmicEna
- * 0x08, Vamic1Ena
- * 0x10, Vamic2Ena
- * 0x20, Vamic12LP
- * 0xC0, VdmicSel
- */
- REG_INIT(AB8540_VAUDIOSUPPLY, 0x03, 0x83, 0xfe),
- /*
- * 0x01, Vamic1_dzout
- * 0x02, Vamic2_dzout
- */
- REG_INIT(AB8540_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
- /*
- * 0x07, VHSICSel
- * 0x08, VHSICOffState
- * 0x10, VHSIEna
- * 0x20, VHSICLP
- */
- REG_INIT(AB8540_VHSIC, 0x03, 0x87, 0x3f),
- /*
- * 0x07, VSDIOSel
- * 0x08, VSDIOOffState
- * 0x10, VSDIOEna
- * 0x20, VSDIOLP
- */
- REG_INIT(AB8540_VSDIO, 0x03, 0x88, 0x3f),
- /*
- * 0x03, Vsmps1Regu
- * 0x0c, Vsmps1SelCtrl
- * 0x10, Vsmps1AutoMode
- * 0x20, Vsmps1PWMMode
- */
- REG_INIT(AB8540_VSMPS1REGU, 0x04, 0x03, 0x3f),
- /*
- * 0x03, Vsmps2Regu
- * 0x0c, Vsmps2SelCtrl
- * 0x10, Vsmps2AutoMode
- * 0x20, Vsmps2PWMMode
- */
- REG_INIT(AB8540_VSMPS2REGU, 0x04, 0x04, 0x3f),
- /*
- * 0x03, Vsmps3Regu
- * 0x0c, Vsmps3SelCtrl
- * 0x10, Vsmps3AutoMode
- * 0x20, Vsmps3PWMMode
- * NOTE! PRCMU register
- */
- REG_INIT(AB8540_VSMPS3REGU, 0x04, 0x05, 0x0f),
- /*
- * 0x03, VpllRegu
- * 0x0c, VanaRegu
- */
- REG_INIT(AB8540_VPLLVANAREGU, 0x04, 0x06, 0x0f),
- /*
- * 0x03, VextSupply1Regu
- * 0x0c, VextSupply2Regu
- * 0x30, VextSupply3Regu
- * 0x40, ExtSupply2Bypass
- * 0x80, ExtSupply3Bypass
- */
- REG_INIT(AB8540_EXTSUPPLYREGU, 0x04, 0x08, 0xff),
- /*
- * 0x03, Vaux1Regu
- * 0x0c, Vaux2Regu
- */
- REG_INIT(AB8540_VAUX12REGU, 0x04, 0x09, 0x0f),
- /*
- * 0x0c, VRF1Regu
- * 0x03, Vaux3Regu
- */
- REG_INIT(AB8540_VRF1VAUX3REGU, 0x04, 0x0a, 0x0f),
- /*
- * 0x3f, Vsmps1Sel1
- */
- REG_INIT(AB8540_VSMPS1SEL1, 0x04, 0x13, 0x3f),
- /*
- * 0x3f, Vsmps1Sel2
- */
- REG_INIT(AB8540_VSMPS1SEL2, 0x04, 0x14, 0x3f),
- /*
- * 0x3f, Vsmps1Sel3
- */
- REG_INIT(AB8540_VSMPS1SEL3, 0x04, 0x15, 0x3f),
- /*
- * 0x3f, Vsmps2Sel1
- */
- REG_INIT(AB8540_VSMPS2SEL1, 0x04, 0x17, 0x3f),
- /*
- * 0x3f, Vsmps2Sel2
- */
- REG_INIT(AB8540_VSMPS2SEL2, 0x04, 0x18, 0x3f),
- /*
- * 0x3f, Vsmps2Sel3
- */
- REG_INIT(AB8540_VSMPS2SEL3, 0x04, 0x19, 0x3f),
- /*
- * 0x7f, Vsmps3Sel1
- * NOTE! PRCMU register
- */
- REG_INIT(AB8540_VSMPS3SEL1, 0x04, 0x1b, 0x7f),
- /*
- * 0x7f, Vsmps3Sel2
- * NOTE! PRCMU register
- */
- REG_INIT(AB8540_VSMPS3SEL2, 0x04, 0x1c, 0x7f),
- /*
- * 0x0f, Vaux1Sel
- */
- REG_INIT(AB8540_VAUX1SEL, 0x04, 0x1f, 0x0f),
- /*
- * 0x0f, Vaux2Sel
- */
- REG_INIT(AB8540_VAUX2SEL, 0x04, 0x20, 0x0f),
- /*
- * 0x07, Vaux3Sel
- * 0x70, Vrf1Sel
- */
- REG_INIT(AB8540_VRF1VAUX3SEL, 0x04, 0x21, 0x77),
- /*
- * 0x01, VextSupply12LP
- */
- REG_INIT(AB8540_REGUCTRL2SPARE, 0x04, 0x22, 0x01),
- /*
- * 0x07, Vanasel
- * 0x30, Vpllsel
- */
- REG_INIT(AB8540_VANAVPLLSEL, 0x04, 0x29, 0x37),
- /*
- * 0x03, Vaux4RequestCtrl
- */
- REG_INIT(AB8540_VAUX4REQCTRL, 0x04, 0x2d, 0x03),
- /*
- * 0x03, Vaux4Regu
- */
- REG_INIT(AB8540_VAUX4REGU, 0x04, 0x2e, 0x03),
- /*
- * 0x0f, Vaux4Sel
- */
- REG_INIT(AB8540_VAUX4SEL, 0x04, 0x2f, 0x0f),
- /*
- * 0x03, Vaux5RequestCtrl
- */
- REG_INIT(AB8540_VAUX5REQCTRL, 0x04, 0x31, 0x03),
- /*
- * 0x03, Vaux5Regu
- */
- REG_INIT(AB8540_VAUX5REGU, 0x04, 0x32, 0x03),
- /*
- * 0x3f, Vaux5Sel
- */
- REG_INIT(AB8540_VAUX5SEL, 0x04, 0x33, 0x3f),
- /*
- * 0x03, Vaux6RequestCtrl
- */
- REG_INIT(AB8540_VAUX6REQCTRL, 0x04, 0x34, 0x03),
- /*
- * 0x03, Vaux6Regu
- */
- REG_INIT(AB8540_VAUX6REGU, 0x04, 0x35, 0x03),
- /*
- * 0x3f, Vaux6Sel
- */
- REG_INIT(AB8540_VAUX6SEL, 0x04, 0x36, 0x3f),
- /*
- * 0x03, VCLKBRequestCtrl
- */
- REG_INIT(AB8540_VCLKBREQCTRL, 0x04, 0x37, 0x03),
- /*
- * 0x03, VCLKBRegu
- */
- REG_INIT(AB8540_VCLKBREGU, 0x04, 0x38, 0x03),
- /*
- * 0x07, VCLKBSel
- */
- REG_INIT(AB8540_VCLKBSEL, 0x04, 0x39, 0x07),
- /*
- * 0x03, Vrf1RequestCtrl
- */
- REG_INIT(AB8540_VRF1REQCTRL, 0x04, 0x3a, 0x03),
- /*
- * 0x01, VpllDisch
- * 0x02, Vrf1Disch
- * 0x04, Vaux1Disch
- * 0x08, Vaux2Disch
- * 0x10, Vaux3Disch
- * 0x20, Vintcore12Disch
- * 0x40, VTVoutDisch
- * 0x80, VaudioDisch
- */
- REG_INIT(AB8540_REGUCTRLDISCH, 0x04, 0x43, 0xff),
- /*
- * 0x02, VanaDisch
- * 0x04, VdmicPullDownEna
- * 0x08, VpllPullDownEna
- * 0x10, VdmicDisch
- */
- REG_INIT(AB8540_REGUCTRLDISCH2, 0x04, 0x44, 0x1e),
- /*
- * 0x01, Vaux4Disch
- */
- REG_INIT(AB8540_REGUCTRLDISCH3, 0x04, 0x48, 0x01),
- /*
- * 0x01, Vaux5Disch
- * 0x02, Vaux6Disch
- * 0x04, VCLKBDisch
- */
- REG_INIT(AB8540_REGUCTRLDISCH4, 0x04, 0x49, 0x07),
-};
-
static struct of_regulator_match ab8500_regulator_match[] = {
{ .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8500_LDO_AUX1, },
{ .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8500_LDO_AUX2, },
@@ -2925,37 +1565,6 @@ static struct of_regulator_match ab8505_regulator_match[] = {
{ .name = "ab8500_ldo_ana", .driver_data = (void *) AB8505_LDO_ANA, },
};
-static struct of_regulator_match ab8540_regulator_match[] = {
- { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8540_LDO_AUX1, },
- { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8540_LDO_AUX2, },
- { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8540_LDO_AUX3, },
- { .name = "ab8500_ldo_aux4", .driver_data = (void *) AB8540_LDO_AUX4, },
- { .name = "ab8500_ldo_aux5", .driver_data = (void *) AB8540_LDO_AUX5, },
- { .name = "ab8500_ldo_aux6", .driver_data = (void *) AB8540_LDO_AUX6, },
- { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8540_LDO_INTCORE, },
- { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB8540_LDO_TVOUT, },
- { .name = "ab8500_ldo_audio", .driver_data = (void *) AB8540_LDO_AUDIO, },
- { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8540_LDO_ANAMIC1, },
- { .name = "ab8500_ldo_anamic2", .driver_data = (void *) AB8540_LDO_ANAMIC2, },
- { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB8540_LDO_DMIC, },
- { .name = "ab8500_ldo_ana", .driver_data = (void *) AB8540_LDO_ANA, },
- { .name = "ab8500_ldo_sdio", .driver_data = (void *) AB8540_LDO_SDIO, },
-};
-
-static struct of_regulator_match ab9540_regulator_match[] = {
- { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB9540_LDO_AUX1, },
- { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB9540_LDO_AUX2, },
- { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB9540_LDO_AUX3, },
- { .name = "ab8500_ldo_aux4", .driver_data = (void *) AB9540_LDO_AUX4, },
- { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB9540_LDO_INTCORE, },
- { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB9540_LDO_TVOUT, },
- { .name = "ab8500_ldo_audio", .driver_data = (void *) AB9540_LDO_AUDIO, },
- { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB9540_LDO_ANAMIC1, },
- { .name = "ab8500_ldo_anamic2", .driver_data = (void *) AB9540_LDO_ANAMIC2, },
- { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB9540_LDO_DMIC, },
- { .name = "ab8500_ldo_ana", .driver_data = (void *) AB9540_LDO_ANA, },
-};
-
static struct {
struct ab8500_regulator_info *info;
int info_size;
@@ -2967,27 +1576,13 @@ static struct {
static void abx500_get_regulator_info(struct ab8500 *ab8500)
{
- if (is_ab9540(ab8500)) {
- abx500_regulator.info = ab9540_regulator_info;
- abx500_regulator.info_size = ARRAY_SIZE(ab9540_regulator_info);
- abx500_regulator.init = ab9540_reg_init;
- abx500_regulator.init_size = AB9540_NUM_REGULATOR_REGISTERS;
- abx500_regulator.match = ab9540_regulator_match;
- abx500_regulator.match_size = ARRAY_SIZE(ab9540_regulator_match);
- } else if (is_ab8505(ab8500)) {
+ if (is_ab8505(ab8500)) {
abx500_regulator.info = ab8505_regulator_info;
abx500_regulator.info_size = ARRAY_SIZE(ab8505_regulator_info);
abx500_regulator.init = ab8505_reg_init;
abx500_regulator.init_size = AB8505_NUM_REGULATOR_REGISTERS;
abx500_regulator.match = ab8505_regulator_match;
abx500_regulator.match_size = ARRAY_SIZE(ab8505_regulator_match);
- } else if (is_ab8540(ab8500)) {
- abx500_regulator.info = ab8540_regulator_info;
- abx500_regulator.info_size = ARRAY_SIZE(ab8540_regulator_info);
- abx500_regulator.init = ab8540_reg_init;
- abx500_regulator.init_size = AB8540_NUM_REGULATOR_REGISTERS;
- abx500_regulator.match = ab8540_regulator_match;
- abx500_regulator.match_size = ARRAY_SIZE(ab8540_regulator_match);
} else {
abx500_regulator.info = ab8500_regulator_info;
abx500_regulator.info_size = ARRAY_SIZE(ab8500_regulator_info);
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 7d6478e6a503..d9d8155ed8cb 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -1,22 +1,6 @@
-/*
- * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
-
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/slab.h>
#include <linux/device.h>
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index 96fddfff5dc4..f6d6a4ad9e8a 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -17,12 +17,11 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
-#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/regulator/arizona-ldo1.h>
@@ -198,16 +197,6 @@ static int arizona_ldo1_of_get_pdata(struct arizona_ldo1_pdata *pdata,
struct device_node *init_node, *dcvdd_node;
struct regulator_init_data *init_data;
- pdata->ldoena = of_get_named_gpio(np, "wlf,ldoena", 0);
- if (pdata->ldoena < 0) {
- dev_warn(config->dev,
- "LDOENA GPIO property missing/malformed: %d\n",
- pdata->ldoena);
- pdata->ldoena = 0;
- } else {
- config->ena_gpio_initialized = true;
- }
-
init_node = of_get_child_by_name(np, "ldo1");
dcvdd_node = of_parse_phandle(np, "DCVDD-supply", 0);
@@ -264,7 +253,11 @@ static int arizona_ldo1_common_init(struct platform_device *pdev,
}
}
- config.ena_gpio = pdata->ldoena;
+ /* We assume that high output = regulator off */
+ config.ena_gpiod = devm_gpiod_get_optional(&pdev->dev, "wlf,ldoena",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(config.ena_gpiod))
+ return PTR_ERR(config.ena_gpiod);
if (pdata->init_data)
config.init_data = pdata->init_data;
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 181622b2813d..91b8ff8bac15 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -721,6 +721,8 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
case AXP803_ID:
regulators = axp803_regulators;
nregulators = AXP803_REG_ID_MAX;
+ drivevbus = of_property_read_bool(pdev->dev.parent->of_node,
+ "x-powers,drive-vbus-en");
break;
case AXP806_ID:
regulators = axp806_regulators;
diff --git a/drivers/regulator/bd71837-regulator.c b/drivers/regulator/bd71837-regulator.c
new file mode 100644
index 000000000000..6eae4d0432a2
--- /dev/null
+++ b/drivers/regulator/bd71837-regulator.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 ROHM Semiconductors
+// bd71837-regulator.c ROHM BD71837MWV regulator driver
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/mfd/bd71837.h>
+#include <linux/regulator/of_regulator.h>
+
+struct bd71837_pmic {
+ struct regulator_desc descs[BD71837_REGULATOR_CNT];
+ struct bd71837 *mfd;
+ struct platform_device *pdev;
+ struct regulator_dev *rdev[BD71837_REGULATOR_CNT];
+};
+
+/*
+ * BUCK1/2/3/4
+ * BUCK1RAMPRATE[1:0] BUCK1 DVS ramp rate setting
+ * 00: 10.00mV/usec 10mV 1uS
+ * 01: 5.00mV/usec 10mV 2uS
+ * 10: 2.50mV/usec 10mV 4uS
+ * 11: 1.25mV/usec 10mV 8uS
+ */
+static int bd71837_buck1234_set_ramp_delay(struct regulator_dev *rdev,
+ int ramp_delay)
+{
+ struct bd71837_pmic *pmic = rdev_get_drvdata(rdev);
+ struct bd71837 *mfd = pmic->mfd;
+ int id = rdev->desc->id;
+ unsigned int ramp_value = BUCK_RAMPRATE_10P00MV;
+
+ dev_dbg(&(pmic->pdev->dev), "Buck[%d] Set Ramp = %d\n", id + 1,
+ ramp_delay);
+ switch (ramp_delay) {
+ case 1 ... 1250:
+ ramp_value = BUCK_RAMPRATE_1P25MV;
+ break;
+ case 1251 ... 2500:
+ ramp_value = BUCK_RAMPRATE_2P50MV;
+ break;
+ case 2501 ... 5000:
+ ramp_value = BUCK_RAMPRATE_5P00MV;
+ break;
+ case 5001 ... 10000:
+ ramp_value = BUCK_RAMPRATE_10P00MV;
+ break;
+ default:
+ ramp_value = BUCK_RAMPRATE_10P00MV;
+ dev_err(&pmic->pdev->dev,
+ "%s: ramp_delay: %d not supported, setting 10000mV//us\n",
+ rdev->desc->name, ramp_delay);
+ }
+
+ return regmap_update_bits(mfd->regmap, BD71837_REG_BUCK1_CTRL + id,
+ BUCK_RAMPRATE_MASK, ramp_value << 6);
+}
+
+/* Bucks 1 to 4 support DVS. PWM mode is used when voltage is changed.
+ * Bucks 5 to 8 and LDOs can use PFM and must be disabled when voltage
+ * is changed. Hence we return -EBUSY for these if voltage is changed
+ * when BUCK/LDO is enabled.
+ */
+static int bd71837_set_voltage_sel_restricted(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ int ret;
+
+ ret = regulator_is_enabled_regmap(rdev);
+ if (!ret)
+ ret = regulator_set_voltage_sel_regmap(rdev, sel);
+ else if (ret == 1)
+ ret = -EBUSY;
+ return ret;
+}
+
+static struct regulator_ops bd71837_ldo_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = bd71837_set_voltage_sel_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static struct regulator_ops bd71837_ldo_regulator_nolinear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = bd71837_set_voltage_sel_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static struct regulator_ops bd71837_buck_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = bd71837_set_voltage_sel_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops bd71837_buck_regulator_nolinear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = bd71837_set_voltage_sel_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops bd71837_buck1234_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = bd71837_buck1234_set_ramp_delay,
+};
+
+/*
+ * BUCK1/2/3/4
+ * 0.70 to 1.30V (10mV step)
+ */
+static const struct regulator_linear_range bd71837_buck1234_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0x00, 0x3C, 10000),
+ REGULATOR_LINEAR_RANGE(1300000, 0x3D, 0x3F, 0),
+};
+
+/*
+ * BUCK5
+ * 0.9V to 1.35V ()
+ */
+static const struct regulator_linear_range bd71837_buck5_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0x00, 0x03, 100000),
+ REGULATOR_LINEAR_RANGE(1050000, 0x04, 0x05, 50000),
+ REGULATOR_LINEAR_RANGE(1200000, 0x06, 0x07, 150000),
+};
+
+/*
+ * BUCK6
+ * 3.0V to 3.3V (step 100mV)
+ */
+static const struct regulator_linear_range bd71837_buck6_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(3000000, 0x00, 0x03, 100000),
+};
+
+/*
+ * BUCK7
+ * 000 = 1.605V
+ * 001 = 1.695V
+ * 010 = 1.755V
+ * 011 = 1.8V (Initial)
+ * 100 = 1.845V
+ * 101 = 1.905V
+ * 110 = 1.95V
+ * 111 = 1.995V
+ */
+static const unsigned int buck_7_volts[] = {
+ 1605000, 1695000, 1755000, 1800000, 1845000, 1905000, 1950000, 1995000
+};
+
+/*
+ * BUCK8
+ * 0.8V to 1.40V (step 10mV)
+ */
+static const struct regulator_linear_range bd71837_buck8_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0x3C, 10000),
+ REGULATOR_LINEAR_RANGE(1400000, 0x3D, 0x3F, 0),
+};
+
+/*
+ * LDO1
+ * 3.0 to 3.3V (100mV step)
+ */
+static const struct regulator_linear_range bd71837_ldo1_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(3000000, 0x00, 0x03, 100000),
+};
+
+/*
+ * LDO2
+ * 0.8 or 0.9V
+ */
+const unsigned int ldo_2_volts[] = {
+ 900000, 800000
+};
+
+/*
+ * LDO3
+ * 1.8 to 3.3V (100mV step)
+ */
+static const struct regulator_linear_range bd71837_ldo3_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
+};
+
+/*
+ * LDO4
+ * 0.9 to 1.8V (100mV step)
+ */
+static const struct regulator_linear_range bd71837_ldo4_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0x00, 0x09, 100000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x0A, 0x0F, 0),
+};
+
+/*
+ * LDO5
+ * 1.8 to 3.3V (100mV step)
+ */
+static const struct regulator_linear_range bd71837_ldo5_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
+};
+
+/*
+ * LDO6
+ * 0.9 to 1.8V (100mV step)
+ */
+static const struct regulator_linear_range bd71837_ldo6_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0x00, 0x09, 100000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x0A, 0x0F, 0),
+};
+
+/*
+ * LDO7
+ * 1.8 to 3.3V (100mV step)
+ */
+static const struct regulator_linear_range bd71837_ldo7_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
+};
+
+static const struct regulator_desc bd71837_regulators[] = {
+ {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_BUCK1,
+ .ops = &bd71837_buck1234_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_BUCK1_VOLTAGE_NUM,
+ .linear_ranges = bd71837_buck1234_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_buck1234_voltage_ranges),
+ .vsel_reg = BD71837_REG_BUCK1_VOLT_RUN,
+ .vsel_mask = BUCK1_RUN_MASK,
+ .enable_reg = BD71837_REG_BUCK1_CTRL,
+ .enable_mask = BD71837_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_BUCK2,
+ .ops = &bd71837_buck1234_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_BUCK2_VOLTAGE_NUM,
+ .linear_ranges = bd71837_buck1234_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_buck1234_voltage_ranges),
+ .vsel_reg = BD71837_REG_BUCK2_VOLT_RUN,
+ .vsel_mask = BUCK2_RUN_MASK,
+ .enable_reg = BD71837_REG_BUCK2_CTRL,
+ .enable_mask = BD71837_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_BUCK3,
+ .ops = &bd71837_buck1234_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_BUCK3_VOLTAGE_NUM,
+ .linear_ranges = bd71837_buck1234_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_buck1234_voltage_ranges),
+ .vsel_reg = BD71837_REG_BUCK3_VOLT_RUN,
+ .vsel_mask = BUCK3_RUN_MASK,
+ .enable_reg = BD71837_REG_BUCK3_CTRL,
+ .enable_mask = BD71837_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "buck4",
+ .of_match = of_match_ptr("BUCK4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_BUCK4,
+ .ops = &bd71837_buck1234_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_BUCK4_VOLTAGE_NUM,
+ .linear_ranges = bd71837_buck1234_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_buck1234_voltage_ranges),
+ .vsel_reg = BD71837_REG_BUCK4_VOLT_RUN,
+ .vsel_mask = BUCK4_RUN_MASK,
+ .enable_reg = BD71837_REG_BUCK4_CTRL,
+ .enable_mask = BD71837_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "buck5",
+ .of_match = of_match_ptr("BUCK5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_BUCK5,
+ .ops = &bd71837_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_BUCK5_VOLTAGE_NUM,
+ .linear_ranges = bd71837_buck5_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_buck5_voltage_ranges),
+ .vsel_reg = BD71837_REG_BUCK5_VOLT,
+ .vsel_mask = BUCK5_MASK,
+ .enable_reg = BD71837_REG_BUCK5_CTRL,
+ .enable_mask = BD71837_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "buck6",
+ .of_match = of_match_ptr("BUCK6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_BUCK6,
+ .ops = &bd71837_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_BUCK6_VOLTAGE_NUM,
+ .linear_ranges = bd71837_buck6_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_buck6_voltage_ranges),
+ .vsel_reg = BD71837_REG_BUCK6_VOLT,
+ .vsel_mask = BUCK6_MASK,
+ .enable_reg = BD71837_REG_BUCK6_CTRL,
+ .enable_mask = BD71837_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "buck7",
+ .of_match = of_match_ptr("BUCK7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_BUCK7,
+ .ops = &bd71837_buck_regulator_nolinear_ops,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &buck_7_volts[0],
+ .n_voltages = ARRAY_SIZE(buck_7_volts),
+ .vsel_reg = BD71837_REG_BUCK7_VOLT,
+ .vsel_mask = BUCK7_MASK,
+ .enable_reg = BD71837_REG_BUCK7_CTRL,
+ .enable_mask = BD71837_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "buck8",
+ .of_match = of_match_ptr("BUCK8"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_BUCK8,
+ .ops = &bd71837_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_BUCK8_VOLTAGE_NUM,
+ .linear_ranges = bd71837_buck8_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_buck8_voltage_ranges),
+ .vsel_reg = BD71837_REG_BUCK8_VOLT,
+ .vsel_mask = BUCK8_MASK,
+ .enable_reg = BD71837_REG_BUCK8_CTRL,
+ .enable_mask = BD71837_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_LDO1,
+ .ops = &bd71837_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_LDO1_VOLTAGE_NUM,
+ .linear_ranges = bd71837_ldo1_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_ldo1_voltage_ranges),
+ .vsel_reg = BD71837_REG_LDO1_VOLT,
+ .vsel_mask = LDO1_MASK,
+ .enable_reg = BD71837_REG_LDO1_VOLT,
+ .enable_mask = BD71837_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_LDO2,
+ .ops = &bd71837_ldo_regulator_nolinear_ops,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &ldo_2_volts[0],
+ .vsel_reg = BD71837_REG_LDO2_VOLT,
+ .vsel_mask = LDO2_MASK,
+ .n_voltages = ARRAY_SIZE(ldo_2_volts),
+ .n_voltages = BD71837_LDO2_VOLTAGE_NUM,
+ .enable_reg = BD71837_REG_LDO2_VOLT,
+ .enable_mask = BD71837_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_LDO3,
+ .ops = &bd71837_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_LDO3_VOLTAGE_NUM,
+ .linear_ranges = bd71837_ldo3_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_ldo3_voltage_ranges),
+ .vsel_reg = BD71837_REG_LDO3_VOLT,
+ .vsel_mask = LDO3_MASK,
+ .enable_reg = BD71837_REG_LDO3_VOLT,
+ .enable_mask = BD71837_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo4",
+ .of_match = of_match_ptr("LDO4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_LDO4,
+ .ops = &bd71837_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_LDO4_VOLTAGE_NUM,
+ .linear_ranges = bd71837_ldo4_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_ldo4_voltage_ranges),
+ .vsel_reg = BD71837_REG_LDO4_VOLT,
+ .vsel_mask = LDO4_MASK,
+ .enable_reg = BD71837_REG_LDO4_VOLT,
+ .enable_mask = BD71837_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo5",
+ .of_match = of_match_ptr("LDO5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_LDO5,
+ .ops = &bd71837_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_LDO5_VOLTAGE_NUM,
+ .linear_ranges = bd71837_ldo5_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_ldo5_voltage_ranges),
+ /* LDO5 is supplied by buck6 */
+ .supply_name = "buck6",
+ .vsel_reg = BD71837_REG_LDO5_VOLT,
+ .vsel_mask = LDO5_MASK,
+ .enable_reg = BD71837_REG_LDO5_VOLT,
+ .enable_mask = BD71837_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo6",
+ .of_match = of_match_ptr("LDO6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_LDO6,
+ .ops = &bd71837_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_LDO6_VOLTAGE_NUM,
+ .linear_ranges = bd71837_ldo6_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_ldo6_voltage_ranges),
+ /* LDO6 is supplied by buck7 */
+ .supply_name = "buck7",
+ .vsel_reg = BD71837_REG_LDO6_VOLT,
+ .vsel_mask = LDO6_MASK,
+ .enable_reg = BD71837_REG_LDO6_VOLT,
+ .enable_mask = BD71837_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo7",
+ .of_match = of_match_ptr("LDO7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71837_LDO7,
+ .ops = &bd71837_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_LDO7_VOLTAGE_NUM,
+ .linear_ranges = bd71837_ldo7_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_ldo7_voltage_ranges),
+ .vsel_reg = BD71837_REG_LDO7_VOLT,
+ .vsel_mask = LDO7_MASK,
+ .enable_reg = BD71837_REG_LDO7_VOLT,
+ .enable_mask = BD71837_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+};
+
+struct reg_init {
+ unsigned int reg;
+ unsigned int mask;
+};
+
+static int bd71837_probe(struct platform_device *pdev)
+{
+ struct bd71837_pmic *pmic;
+ struct bd71837_board *pdata;
+ struct regulator_config config = { 0 };
+ struct reg_init pmic_regulator_inits[] = {
+ {
+ .reg = BD71837_REG_BUCK1_CTRL,
+ .mask = BD71837_BUCK_SEL,
+ }, {
+ .reg = BD71837_REG_BUCK2_CTRL,
+ .mask = BD71837_BUCK_SEL,
+ }, {
+ .reg = BD71837_REG_BUCK3_CTRL,
+ .mask = BD71837_BUCK_SEL,
+ }, {
+ .reg = BD71837_REG_BUCK4_CTRL,
+ .mask = BD71837_BUCK_SEL,
+ }, {
+ .reg = BD71837_REG_BUCK5_CTRL,
+ .mask = BD71837_BUCK_SEL,
+ }, {
+ .reg = BD71837_REG_BUCK6_CTRL,
+ .mask = BD71837_BUCK_SEL,
+ }, {
+ .reg = BD71837_REG_BUCK7_CTRL,
+ .mask = BD71837_BUCK_SEL,
+ }, {
+ .reg = BD71837_REG_BUCK8_CTRL,
+ .mask = BD71837_BUCK_SEL,
+ }, {
+ .reg = BD71837_REG_LDO1_VOLT,
+ .mask = BD71837_LDO_SEL,
+ }, {
+ .reg = BD71837_REG_LDO2_VOLT,
+ .mask = BD71837_LDO_SEL,
+ }, {
+ .reg = BD71837_REG_LDO3_VOLT,
+ .mask = BD71837_LDO_SEL,
+ }, {
+ .reg = BD71837_REG_LDO4_VOLT,
+ .mask = BD71837_LDO_SEL,
+ }, {
+ .reg = BD71837_REG_LDO5_VOLT,
+ .mask = BD71837_LDO_SEL,
+ }, {
+ .reg = BD71837_REG_LDO6_VOLT,
+ .mask = BD71837_LDO_SEL,
+ }, {
+ .reg = BD71837_REG_LDO7_VOLT,
+ .mask = BD71837_LDO_SEL,
+ }
+ };
+
+ int i, err;
+
+ pmic = devm_kzalloc(&pdev->dev, sizeof(struct bd71837_pmic),
+ GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ memcpy(pmic->descs, bd71837_regulators, sizeof(pmic->descs));
+
+ pmic->pdev = pdev;
+ pmic->mfd = dev_get_drvdata(pdev->dev.parent);
+
+ if (!pmic->mfd) {
+ dev_err(&pdev->dev, "No MFD driver data\n");
+ err = -EINVAL;
+ goto err;
+ }
+ platform_set_drvdata(pdev, pmic);
+ pdata = dev_get_platdata(pmic->mfd->dev);
+
+ /* Register LOCK release */
+ err = regmap_update_bits(pmic->mfd->regmap, BD71837_REG_REGLOCK,
+ (REGLOCK_PWRSEQ | REGLOCK_VREG), 0);
+ if (err) {
+ dev_err(&pmic->pdev->dev, "Failed to unlock PMIC (%d)\n", err);
+ goto err;
+ } else {
+ dev_dbg(&pmic->pdev->dev, "%s: Unlocked lock register 0x%x\n",
+ __func__, BD71837_REG_REGLOCK);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) {
+
+ struct regulator_desc *desc;
+ struct regulator_dev *rdev;
+
+ desc = &pmic->descs[i];
+
+ if (pdata)
+ config.init_data = pdata->init_data[i];
+
+ config.dev = pdev->dev.parent;
+ config.driver_data = pmic;
+ config.regmap = pmic->mfd->regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(pmic->mfd->dev,
+ "failed to register %s regulator\n",
+ desc->name);
+ err = PTR_ERR(rdev);
+ goto err;
+ }
+ /* Regulator register gets the regulator constraints and
+ * applies them (set_machine_constraints). This should have
+ * turned the control register(s) to correct values and we
+ * can now switch the control from PMIC state machine to the
+ * register interface
+ */
+ err = regmap_update_bits(pmic->mfd->regmap,
+ pmic_regulator_inits[i].reg,
+ pmic_regulator_inits[i].mask,
+ 0xFFFFFFFF);
+ if (err) {
+ dev_err(&pmic->pdev->dev,
+ "Failed to write BUCK/LDO SEL bit for (%s)\n",
+ desc->name);
+ goto err;
+ }
+
+ pmic->rdev[i] = rdev;
+ }
+
+ return 0;
+
+err:
+ return err;
+}
+
+static struct platform_driver bd71837_regulator = {
+ .driver = {
+ .name = "bd71837-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = bd71837_probe,
+};
+
+module_platform_driver(bd71837_regulator);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD71837 voltage regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index c67a83d53c4c..be574eb444eb 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -24,6 +24,18 @@
#include <linux/mfd/bd9571mwv.h>
+struct bd9571mwv_reg {
+ struct bd9571mwv *bd;
+
+ /* DDR Backup Power */
+ u8 bkup_mode_cnt_keepon; /* from "rohm,ddr-backup-power" */
+ u8 bkup_mode_cnt_saved;
+
+ /* Power switch type */
+ bool rstbmode_level;
+ bool rstbmode_pulse;
+};
+
enum bd9571mwv_regulators { VD09, VD18, VD25, VD33, DVFS };
#define BD9571MWV_REG(_name, _of, _id, _ops, _vr, _vm, _nv, _min, _step, _lmin)\
@@ -131,14 +143,99 @@ static struct regulator_desc regulators[] = {
0x80, 600000, 10000, 0x3c),
};
+#ifdef CONFIG_PM_SLEEP
+static int bd9571mwv_bkup_mode_read(struct bd9571mwv *bd, unsigned int *mode)
+{
+ int ret;
+
+ ret = regmap_read(bd->regmap, BD9571MWV_BKUP_MODE_CNT, mode);
+ if (ret) {
+ dev_err(bd->dev, "failed to read backup mode (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bd9571mwv_bkup_mode_write(struct bd9571mwv *bd, unsigned int mode)
+{
+ int ret;
+
+ ret = regmap_write(bd->regmap, BD9571MWV_BKUP_MODE_CNT, mode);
+ if (ret) {
+ dev_err(bd->dev, "failed to configure backup mode 0x%x (%d)\n",
+ mode, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bd9571mwv_suspend(struct device *dev)
+{
+ struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
+ unsigned int mode;
+ int ret;
+
+ if (!device_may_wakeup(dev))
+ return 0;
+
+ /* Save DDR Backup Mode */
+ ret = bd9571mwv_bkup_mode_read(bdreg->bd, &mode);
+ if (ret)
+ return ret;
+
+ bdreg->bkup_mode_cnt_saved = mode;
+
+ if (!bdreg->rstbmode_pulse)
+ return 0;
+
+ /* Enable DDR Backup Mode */
+ mode &= ~BD9571MWV_BKUP_MODE_CNT_KEEPON_MASK;
+ mode |= bdreg->bkup_mode_cnt_keepon;
+
+ if (mode != bdreg->bkup_mode_cnt_saved)
+ return bd9571mwv_bkup_mode_write(bdreg->bd, mode);
+
+ return 0;
+}
+
+static int bd9571mwv_resume(struct device *dev)
+{
+ struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
+
+ if (!device_may_wakeup(dev))
+ return 0;
+
+ /* Restore DDR Backup Mode */
+ return bd9571mwv_bkup_mode_write(bdreg->bd, bdreg->bkup_mode_cnt_saved);
+}
+
+static const struct dev_pm_ops bd9571mwv_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(bd9571mwv_suspend, bd9571mwv_resume)
+};
+
+#define DEV_PM_OPS &bd9571mwv_pm
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static int bd9571mwv_regulator_probe(struct platform_device *pdev)
{
struct bd9571mwv *bd = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
+ struct bd9571mwv_reg *bdreg;
struct regulator_dev *rdev;
+ unsigned int val;
int i;
- platform_set_drvdata(pdev, bd);
+ bdreg = devm_kzalloc(&pdev->dev, sizeof(*bdreg), GFP_KERNEL);
+ if (!bdreg)
+ return -ENOMEM;
+
+ bdreg->bd = bd;
+
+ platform_set_drvdata(pdev, bdreg);
config.dev = &pdev->dev;
config.dev->of_node = bd->dev->of_node;
@@ -155,6 +252,33 @@ static int bd9571mwv_regulator_probe(struct platform_device *pdev)
}
}
+ val = 0;
+ of_property_read_u32(bd->dev->of_node, "rohm,ddr-backup-power", &val);
+ if (val & ~BD9571MWV_BKUP_MODE_CNT_KEEPON_MASK) {
+ dev_err(bd->dev, "invalid %s mode %u\n",
+ "rohm,ddr-backup-power", val);
+ return -EINVAL;
+ }
+ bdreg->bkup_mode_cnt_keepon = val;
+
+ bdreg->rstbmode_level = of_property_read_bool(bd->dev->of_node,
+ "rohm,rstbmode-level");
+ bdreg->rstbmode_pulse = of_property_read_bool(bd->dev->of_node,
+ "rohm,rstbmode-pulse");
+ if (bdreg->rstbmode_level && bdreg->rstbmode_pulse) {
+ dev_err(bd->dev, "only one rohm,rstbmode-* may be specified");
+ return -EINVAL;
+ }
+
+ if (bdreg->bkup_mode_cnt_keepon) {
+ device_set_wakeup_capable(&pdev->dev, true);
+ /*
+ * Wakeup is enabled by default in pulse mode, but needs
+ * explicit user setup in level mode.
+ */
+ device_set_wakeup_enable(&pdev->dev, bdreg->rstbmode_pulse);
+ }
+
return 0;
}
@@ -167,6 +291,7 @@ MODULE_DEVICE_TABLE(platform, bd9571mwv_regulator_id_table);
static struct platform_driver bd9571mwv_regulator_driver = {
.driver = {
.name = "bd9571mwv-regulator",
+ .pm = DEV_PM_OPS,
},
.probe = bd9571mwv_regulator_probe,
.id_table = bd9571mwv_regulator_id_table,
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d4803460a557..6ed568b96c0e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -147,6 +147,56 @@ static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev)
}
/**
+ * regulator_lock_nested - lock a single regulator
+ * @rdev: regulator source
+ * @subclass: mutex subclass used for lockdep
+ *
+ * This function can be called many times by one task on
+ * a single regulator and its mutex will be locked only
+ * once. If a task, which is calling this function is other
+ * than the one, which initially locked the mutex, it will
+ * wait on mutex.
+ */
+static void regulator_lock_nested(struct regulator_dev *rdev,
+ unsigned int subclass)
+{
+ if (!mutex_trylock(&rdev->mutex)) {
+ if (rdev->mutex_owner == current) {
+ rdev->ref_cnt++;
+ return;
+ }
+ mutex_lock_nested(&rdev->mutex, subclass);
+ }
+
+ rdev->ref_cnt = 1;
+ rdev->mutex_owner = current;
+}
+
+static inline void regulator_lock(struct regulator_dev *rdev)
+{
+ regulator_lock_nested(rdev, 0);
+}
+
+/**
+ * regulator_unlock - unlock a single regulator
+ * @rdev: regulator_source
+ *
+ * This function unlocks the mutex when the
+ * reference counter reaches 0.
+ */
+static void regulator_unlock(struct regulator_dev *rdev)
+{
+ if (rdev->ref_cnt != 0) {
+ rdev->ref_cnt--;
+
+ if (!rdev->ref_cnt) {
+ rdev->mutex_owner = NULL;
+ mutex_unlock(&rdev->mutex);
+ }
+ }
+}
+
+/**
* regulator_lock_supply - lock a regulator and its supplies
* @rdev: regulator source
*/
@@ -155,7 +205,7 @@ static void regulator_lock_supply(struct regulator_dev *rdev)
int i;
for (i = 0; rdev; rdev = rdev_get_supply(rdev), i++)
- mutex_lock_nested(&rdev->mutex, i);
+ regulator_lock_nested(rdev, i);
}
/**
@@ -167,7 +217,7 @@ static void regulator_unlock_supply(struct regulator_dev *rdev)
struct regulator *supply;
while (1) {
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
supply = rdev->supply;
if (!rdev->supply)
@@ -350,9 +400,9 @@ static ssize_t regulator_uV_show(struct device *dev,
struct regulator_dev *rdev = dev_get_drvdata(dev);
ssize_t ret;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
ret = sprintf(buf, "%d\n", _regulator_get_voltage(rdev));
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
@@ -416,9 +466,9 @@ static ssize_t regulator_state_show(struct device *dev,
struct regulator_dev *rdev = dev_get_drvdata(dev);
ssize_t ret;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
ret = regulator_print_state(buf, _regulator_is_enabled(rdev));
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
@@ -526,10 +576,10 @@ static ssize_t regulator_total_uA_show(struct device *dev,
struct regulator *regulator;
int uA = 0;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
list_for_each_entry(regulator, &rdev->consumer_list, list)
uA += regulator->uA_load;
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return sprintf(buf, "%d\n", uA);
}
static DEVICE_ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL);
@@ -886,6 +936,18 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
rdev->constraints->min_uV && rdev->constraints->max_uV) {
int target_min, target_max;
int current_uV = _regulator_get_voltage(rdev);
+
+ if (current_uV == -ENOTRECOVERABLE) {
+ /* This regulator can't be read and must be initted */
+ rdev_info(rdev, "Setting %d-%duV\n",
+ rdev->constraints->min_uV,
+ rdev->constraints->max_uV);
+ _regulator_do_set_voltage(rdev,
+ rdev->constraints->min_uV,
+ rdev->constraints->max_uV);
+ current_uV = _regulator_get_voltage(rdev);
+ }
+
if (current_uV < 0) {
rdev_err(rdev,
"failed to get the current voltage(%d)\n",
@@ -1321,7 +1383,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
if (regulator == NULL)
return NULL;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
regulator->rdev = rdev;
list_add(&regulator->list, &rdev->consumer_list);
@@ -1376,12 +1438,12 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
_regulator_is_enabled(rdev))
regulator->always_on = true;
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return regulator;
overflow_err:
list_del(&regulator->list);
kfree(regulator);
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return NULL;
}
@@ -1770,13 +1832,13 @@ static void _regulator_put(struct regulator *regulator)
/* remove any sysfs entries */
if (regulator->dev)
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
list_del(&regulator->list);
rdev->open_count--;
rdev->exclusive = 0;
put_device(&rdev->dev);
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
kfree_const(regulator->supply_name);
kfree(regulator);
@@ -2384,7 +2446,7 @@ static void regulator_disable_work(struct work_struct *work)
disable_work.work);
int count, i, ret;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
BUG_ON(!rdev->deferred_disables);
@@ -2405,7 +2467,7 @@ static void regulator_disable_work(struct work_struct *work)
rdev_err(rdev, "Deferred disable failed: %d\n", ret);
}
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
if (rdev->supply) {
for (i = 0; i < count; i++) {
@@ -2440,11 +2502,11 @@ int regulator_disable_deferred(struct regulator *regulator, int ms)
if (!ms)
return regulator_disable(regulator);
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
rdev->deferred_disables++;
mod_delayed_work(system_power_efficient_wq, &rdev->disable_work,
msecs_to_jiffies(ms));
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return 0;
}
@@ -2476,10 +2538,10 @@ static int _regulator_list_voltage(struct regulator_dev *rdev,
if (selector >= rdev->desc->n_voltages)
return -EINVAL;
if (lock)
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
ret = ops->list_voltage(rdev, selector);
if (lock)
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
} else if (rdev->is_switch && rdev->supply) {
ret = _regulator_list_voltage(rdev->supply->rdev,
selector, lock);
@@ -3252,7 +3314,7 @@ int regulator_sync_voltage(struct regulator *regulator)
struct regulator_voltage *voltage = &regulator->voltage[PM_SUSPEND_ON];
int ret, min_uV, max_uV;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
if (!rdev->desc->ops->set_voltage &&
!rdev->desc->ops->set_voltage_sel) {
@@ -3281,7 +3343,7 @@ int regulator_sync_voltage(struct regulator *regulator)
ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
out:
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
EXPORT_SYMBOL_GPL(regulator_sync_voltage);
@@ -3374,7 +3436,7 @@ int regulator_set_current_limit(struct regulator *regulator,
struct regulator_dev *rdev = regulator->rdev;
int ret;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
/* sanity check */
if (!rdev->desc->ops->set_current_limit) {
@@ -3389,7 +3451,7 @@ int regulator_set_current_limit(struct regulator *regulator,
ret = rdev->desc->ops->set_current_limit(rdev, min_uA, max_uA);
out:
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
EXPORT_SYMBOL_GPL(regulator_set_current_limit);
@@ -3398,7 +3460,7 @@ static int _regulator_get_current_limit(struct regulator_dev *rdev)
{
int ret;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
/* sanity check */
if (!rdev->desc->ops->get_current_limit) {
@@ -3408,7 +3470,7 @@ static int _regulator_get_current_limit(struct regulator_dev *rdev)
ret = rdev->desc->ops->get_current_limit(rdev);
out:
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
@@ -3444,7 +3506,7 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode)
int ret;
int regulator_curr_mode;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
/* sanity check */
if (!rdev->desc->ops->set_mode) {
@@ -3468,7 +3530,7 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode)
ret = rdev->desc->ops->set_mode(rdev, mode);
out:
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
EXPORT_SYMBOL_GPL(regulator_set_mode);
@@ -3477,7 +3539,7 @@ static unsigned int _regulator_get_mode(struct regulator_dev *rdev)
{
int ret;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
/* sanity check */
if (!rdev->desc->ops->get_mode) {
@@ -3487,7 +3549,7 @@ static unsigned int _regulator_get_mode(struct regulator_dev *rdev)
ret = rdev->desc->ops->get_mode(rdev);
out:
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
@@ -3508,7 +3570,7 @@ static int _regulator_get_error_flags(struct regulator_dev *rdev,
{
int ret;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
/* sanity check */
if (!rdev->desc->ops->get_error_flags) {
@@ -3518,7 +3580,7 @@ static int _regulator_get_error_flags(struct regulator_dev *rdev,
ret = rdev->desc->ops->get_error_flags(rdev, flags);
out:
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
@@ -3567,10 +3629,10 @@ int regulator_set_load(struct regulator *regulator, int uA_load)
struct regulator_dev *rdev = regulator->rdev;
int ret;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
regulator->uA_load = uA_load;
ret = drms_uA_update(rdev);
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
@@ -3598,7 +3660,7 @@ int regulator_allow_bypass(struct regulator *regulator, bool enable)
if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_BYPASS))
return 0;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
if (enable && !regulator->bypass) {
rdev->bypass_count++;
@@ -3622,7 +3684,7 @@ int regulator_allow_bypass(struct regulator *regulator, bool enable)
if (ret == 0)
regulator->bypass = enable;
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
@@ -4067,6 +4129,96 @@ static int regulator_register_resolve_supply(struct device *dev, void *data)
return 0;
}
+static int regulator_fill_coupling_array(struct regulator_dev *rdev)
+{
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ int n_coupled = c_desc->n_coupled;
+ struct regulator_dev *c_rdev;
+ int i;
+
+ for (i = 1; i < n_coupled; i++) {
+ /* already resolved */
+ if (c_desc->coupled_rdevs[i])
+ continue;
+
+ c_rdev = of_parse_coupled_regulator(rdev, i - 1);
+
+ if (c_rdev) {
+ c_desc->coupled_rdevs[i] = c_rdev;
+ c_desc->n_resolved++;
+ }
+ }
+
+ if (rdev->coupling_desc.n_resolved < n_coupled)
+ return -1;
+ else
+ return 0;
+}
+
+static int regulator_register_fill_coupling_array(struct device *dev,
+ void *data)
+{
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+
+ if (!IS_ENABLED(CONFIG_OF))
+ return 0;
+
+ if (regulator_fill_coupling_array(rdev))
+ rdev_dbg(rdev, "unable to resolve coupling\n");
+
+ return 0;
+}
+
+static int regulator_resolve_coupling(struct regulator_dev *rdev)
+{
+ int n_phandles;
+
+ if (!IS_ENABLED(CONFIG_OF))
+ n_phandles = 0;
+ else
+ n_phandles = of_get_n_coupled(rdev);
+
+ if (n_phandles + 1 > MAX_COUPLED) {
+ rdev_err(rdev, "too many regulators coupled\n");
+ return -EPERM;
+ }
+
+ /*
+ * Every regulator should always have coupling descriptor filled with
+ * at least pointer to itself.
+ */
+ rdev->coupling_desc.coupled_rdevs[0] = rdev;
+ rdev->coupling_desc.n_coupled = n_phandles + 1;
+ rdev->coupling_desc.n_resolved++;
+
+ /* regulator isn't coupled */
+ if (n_phandles == 0)
+ return 0;
+
+ /* regulator, which can't change its voltage, can't be coupled */
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
+ rdev_err(rdev, "voltage operation not allowed\n");
+ return -EPERM;
+ }
+
+ if (rdev->constraints->max_spread <= 0) {
+ rdev_err(rdev, "wrong max_spread value\n");
+ return -EPERM;
+ }
+
+ if (!of_check_coupling_data(rdev))
+ return -EPERM;
+
+ /*
+ * After everything has been checked, try to fill rdevs array
+ * with pointers to regulators parsed from device tree. If some
+ * regulators are not registered yet, retry in late init call
+ */
+ regulator_fill_coupling_array(rdev);
+
+ return 0;
+}
+
/**
* regulator_register - register regulator
* @regulator_desc: regulator to register
@@ -4200,6 +4352,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
if (ret < 0)
goto wash;
+ mutex_lock(&regulator_list_mutex);
+ ret = regulator_resolve_coupling(rdev);
+ mutex_unlock(&regulator_list_mutex);
+
+ if (ret != 0)
+ goto wash;
+
/* add consumers devices */
if (init_data) {
mutex_lock(&regulator_list_mutex);
@@ -4288,9 +4447,9 @@ static int _regulator_suspend_late(struct device *dev, void *data)
suspend_state_t *state = data;
int ret;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
ret = suspend_set_state(rdev, *state);
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
@@ -4320,14 +4479,14 @@ static int _regulator_resume_early(struct device *dev, void *data)
if (rstate == NULL)
return 0;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
if (rdev->desc->ops->resume_early &&
(rstate->enabled == ENABLE_IN_SUSPEND ||
rstate->enabled == DISABLE_IN_SUSPEND))
ret = rdev->desc->ops->resume_early(rdev);
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return ret;
}
@@ -4629,7 +4788,7 @@ static int __init regulator_late_cleanup(struct device *dev, void *data)
if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS))
return 0;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
if (rdev->use_count)
goto unlock;
@@ -4660,7 +4819,7 @@ static int __init regulator_late_cleanup(struct device *dev, void *data)
}
unlock:
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return 0;
}
@@ -4694,6 +4853,9 @@ static int __init regulator_init_complete(void)
class_for_each_device(&regulator_class, NULL, NULL,
regulator_late_cleanup);
+ class_for_each_device(&regulator_class, NULL, NULL,
+ regulator_register_fill_coupling_array);
+
return 0;
}
late_initcall_sync(regulator_init_complete);
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
index f541b80f1b54..bd910fe123d9 100644
--- a/drivers/regulator/cpcap-regulator.c
+++ b/drivers/regulator/cpcap-regulator.c
@@ -222,7 +222,7 @@ static unsigned int cpcap_map_mode(unsigned int mode)
case CPCAP_BIT_AUDIO_LOW_PWR:
return REGULATOR_MODE_STANDBY;
default:
- return -EINVAL;
+ return REGULATOR_MODE_INVALID;
}
}
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index abfd56e8c78a..943926a156f2 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -56,14 +56,27 @@ static inline struct regulator_dev *dev_to_rdev(struct device *dev)
return container_of(dev, struct regulator_dev, dev);
}
-struct regulator_dev *of_find_regulator_by_node(struct device_node *np);
-
#ifdef CONFIG_OF
+struct regulator_dev *of_find_regulator_by_node(struct device_node *np);
struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
struct regulator_config *config,
struct device_node **node);
+
+struct regulator_dev *of_parse_coupled_regulator(struct regulator_dev *rdev,
+ int index);
+
+int of_get_n_coupled(struct regulator_dev *rdev);
+
+bool of_check_coupling_data(struct regulator_dev *rdev);
+
#else
+static inline struct regulator_dev *
+of_find_regulator_by_node(struct device_node *np)
+{
+ return NULL;
+}
+
static inline struct regulator_init_data *
regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
@@ -72,8 +85,25 @@ regulator_of_get_init_data(struct device *dev,
{
return NULL;
}
-#endif
+static inline struct regulator_dev *
+of_parse_coupled_regulator(struct regulator_dev *rdev,
+ int index)
+{
+ return NULL;
+}
+
+static inline int of_get_n_coupled(struct regulator_dev *rdev)
+{
+ return 0;
+}
+
+static inline bool of_check_coupling_data(struct regulator_dev *rdev)
+{
+ return false;
+}
+
+#endif
enum regulator_get_type {
NORMAL_GET,
EXCLUSIVE_GET,
@@ -83,5 +113,4 @@ enum regulator_get_type {
struct regulator *_regulator_get(struct device *dev, const char *id,
enum regulator_get_type get_type);
-
#endif
diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
index ce5f7d9ad475..b615a413ca9f 100644
--- a/drivers/regulator/lm363x-regulator.c
+++ b/drivers/regulator/lm363x-regulator.c
@@ -16,7 +16,7 @@
#include <linux/mfd/ti-lmu-register.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
@@ -219,7 +219,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = {
},
};
-static int lm363x_regulator_of_get_enable_gpio(struct device_node *np, int id)
+static struct gpio_desc *lm363x_regulator_of_get_enable_gpio(struct device *dev, int id)
{
/*
* Check LCM_EN1/2_GPIO is configured.
@@ -227,11 +227,11 @@ static int lm363x_regulator_of_get_enable_gpio(struct device_node *np, int id)
*/
switch (id) {
case LM3632_LDO_POS:
- return of_get_named_gpio(np, "enable-gpios", 0);
+ return devm_gpiod_get_index_optional(dev, "enable", 0, GPIOD_OUT_LOW);
case LM3632_LDO_NEG:
- return of_get_named_gpio(np, "enable-gpios", 1);
+ return devm_gpiod_get_index_optional(dev, "enable", 1, GPIOD_OUT_LOW);
default:
- return -EINVAL;
+ return NULL;
}
}
@@ -243,7 +243,8 @@ static int lm363x_regulator_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
struct device *dev = &pdev->dev;
int id = pdev->id;
- int ret, ena_gpio;
+ struct gpio_desc *gpiod;
+ int ret;
cfg.dev = dev;
cfg.regmap = regmap;
@@ -252,10 +253,9 @@ static int lm363x_regulator_probe(struct platform_device *pdev)
* LM3632 LDOs can be controlled by external pin.
* Register update is required if the pin is used.
*/
- ena_gpio = lm363x_regulator_of_get_enable_gpio(dev->of_node, id);
- if (gpio_is_valid(ena_gpio)) {
- cfg.ena_gpio = ena_gpio;
- cfg.ena_gpio_flags = GPIOF_OUT_INIT_LOW;
+ gpiod = lm363x_regulator_of_get_enable_gpio(dev, id);
+ if (gpiod) {
+ cfg.ena_gpiod = gpiod;
ret = regmap_update_bits(regmap, LM3632_REG_BIAS_CONFIG,
LM3632_EXT_EN_MASK,
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index cfdbe294fb6a..c192357d1dea 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -95,6 +95,10 @@ static int lp87565_buck_set_ramp_delay(struct regulator_dev *rdev,
rdev->constraints->ramp_delay = lp87565_buck_ramp_delay[reg];
+ /* Conservatively give a 15% margin */
+ rdev->constraints->ramp_delay =
+ rdev->constraints->ramp_delay * 85 / 100;
+
return 0;
}
@@ -154,32 +158,33 @@ static const struct lp87565_regulator regulators[] = {
LP87565_REGULATOR("BUCK0", LP87565_BUCK_0, "buck0", lp87565_buck_ops,
256, LP87565_REG_BUCK0_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK0_CTRL_1,
- LP87565_BUCK_CTRL_1_EN, 3800,
+ LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK0_CTRL_2),
LP87565_REGULATOR("BUCK1", LP87565_BUCK_1, "buck1", lp87565_buck_ops,
256, LP87565_REG_BUCK1_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK1_CTRL_1,
- LP87565_BUCK_CTRL_1_EN, 3800,
+ LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK1_CTRL_2),
LP87565_REGULATOR("BUCK2", LP87565_BUCK_2, "buck2", lp87565_buck_ops,
256, LP87565_REG_BUCK2_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK2_CTRL_1,
- LP87565_BUCK_CTRL_1_EN, 3800,
+ LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK2_CTRL_2),
LP87565_REGULATOR("BUCK3", LP87565_BUCK_3, "buck3", lp87565_buck_ops,
256, LP87565_REG_BUCK3_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK3_CTRL_1,
- LP87565_BUCK_CTRL_1_EN, 3800,
+ LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK3_CTRL_2),
LP87565_REGULATOR("BUCK10", LP87565_BUCK_10, "buck10", lp87565_buck_ops,
256, LP87565_REG_BUCK0_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK0_CTRL_1,
- LP87565_BUCK_CTRL_1_EN, 3800,
+ LP87565_BUCK_CTRL_1_EN |
+ LP87565_BUCK_CTRL_1_FPWM_MP_0_2, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK0_CTRL_2),
LP87565_REGULATOR("BUCK23", LP87565_BUCK_23, "buck23", lp87565_buck_ops,
256, LP87565_REG_BUCK2_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK2_CTRL_1,
- LP87565_BUCK_CTRL_1_EN, 3800,
+ LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK2_CTRL_2),
};
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index cbfd35873575..f2347474a106 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -16,7 +16,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/mfd/lp8788.h>
/* register address */
@@ -85,8 +85,6 @@
#define LP8788_STARTUP_TIME_S 3
#define ENABLE_TIME_USEC 32
-#define ENABLE GPIOF_OUT_INIT_HIGH
-#define DISABLE GPIOF_OUT_INIT_LOW
enum lp8788_ldo_id {
DLDO1,
@@ -117,7 +115,7 @@ struct lp8788_ldo {
struct lp8788 *lp;
struct regulator_desc *desc;
struct regulator_dev *regulator;
- struct lp8788_ldo_enable_pin *en_pin;
+ struct gpio_desc *ena_gpiod;
};
/* DLDO 1, 2, 3, 9 voltage table */
@@ -469,7 +467,6 @@ static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
enum lp8788_ldo_id id)
{
struct lp8788 *lp = ldo->lp;
- struct lp8788_platform_data *pdata = lp->pdata;
enum lp8788_ext_ldo_en_id enable_id;
u8 en_mask[] = {
[EN_ALDO1] = LP8788_EN_SEL_ALDO1_M,
@@ -504,11 +501,18 @@ static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
return 0;
}
- /* if no platform data for ldo pin, then set default enable mode */
- if (!pdata || !pdata->ldo_pin || !pdata->ldo_pin[enable_id])
+ /* FIXME: check default mode for GPIO here: high or low? */
+ ldo->ena_gpiod = devm_gpiod_get_index_optional(&pdev->dev,
+ "enable",
+ enable_id,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ldo->ena_gpiod))
+ return PTR_ERR(ldo->ena_gpiod);
+
+ /* if no GPIO for ldo pin, then set default enable mode */
+ if (!ldo->ena_gpiod)
goto set_default_ldo_enable_mode;
- ldo->en_pin = pdata->ldo_pin[enable_id];
return 0;
set_default_ldo_enable_mode:
@@ -533,10 +537,8 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (ldo->en_pin) {
- cfg.ena_gpio = ldo->en_pin->gpio;
- cfg.ena_gpio_flags = ldo->en_pin->init_state;
- }
+ if (ldo->ena_gpiod)
+ cfg.ena_gpiod = ldo->ena_gpiod;
cfg.dev = pdev->dev.parent;
cfg.init_data = lp->pdata ? lp->pdata->dldo_data[id] : NULL;
@@ -582,10 +584,8 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (ldo->en_pin) {
- cfg.ena_gpio = ldo->en_pin->gpio;
- cfg.ena_gpio_flags = ldo->en_pin->init_state;
- }
+ if (ldo->ena_gpiod)
+ cfg.ena_gpiod = ldo->ena_gpiod;
cfg.dev = pdev->dev.parent;
cfg.init_data = lp->pdata ? lp->pdata->aldo_data[id] : NULL;
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index 662ee05ea44d..9dec1609ff66 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -52,6 +52,7 @@
#define LTC3676_CLIRQ 0x1F
#define LTC3676_DVBxA_REF_SELECT BIT(5)
+#define LTC3676_DVBxB_PGOOD_MASK BIT(5)
#define LTC3676_IRQSTAT_PGOOD_TIMEOUT BIT(3)
#define LTC3676_IRQSTAT_UNDERVOLT_WARN BIT(4)
@@ -123,6 +124,23 @@ static int ltc3676_set_suspend_mode(struct regulator_dev *rdev,
mask, val);
}
+static int ltc3676_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
+{
+ struct ltc3676 *ltc3676 = rdev_get_drvdata(rdev);
+ struct device *dev = ltc3676->dev;
+ int ret, dcdc = rdev_get_id(rdev);
+
+ dev_dbg(dev, "%s id=%d selector=%d\n", __func__, dcdc, selector);
+
+ ret = regmap_update_bits(ltc3676->regmap, rdev->desc->vsel_reg + 1,
+ LTC3676_DVBxB_PGOOD_MASK,
+ LTC3676_DVBxB_PGOOD_MASK);
+ if (ret)
+ return ret;
+
+ return regulator_set_voltage_sel_regmap(rdev, selector);
+}
+
static inline unsigned int ltc3676_scale(unsigned int uV, u32 r1, u32 r2)
{
uint64_t tmp;
@@ -166,7 +184,7 @@ static const struct regulator_ops ltc3676_linear_regulator_ops = {
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_sel = ltc3676_set_voltage_sel,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_suspend_voltage = ltc3676_set_suspend_voltage,
.set_suspend_mode = ltc3676_set_suspend_mode,
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 1096546c05e9..f1e77ed5dfec 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -27,6 +27,7 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/max8952.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -148,7 +149,6 @@ static struct max8952_platform_data *max8952_parse_dt(struct device *dev)
pd->gpio_vid0 = of_get_named_gpio(np, "max8952,vid-gpios", 0);
pd->gpio_vid1 = of_get_named_gpio(np, "max8952,vid-gpios", 1);
- pd->gpio_en = of_get_named_gpio(np, "max8952,en-gpio", 0);
if (of_property_read_u32(np, "max8952,default-mode", &pd->default_mode))
dev_warn(dev, "Default mode not specified, assuming 0\n");
@@ -197,6 +197,8 @@ static int max8952_pmic_probe(struct i2c_client *client,
struct regulator_config config = { };
struct max8952_data *max8952;
struct regulator_dev *rdev;
+ struct gpio_desc *gpiod;
+ enum gpiod_flags gflags;
int ret = 0, err = 0;
@@ -224,11 +226,17 @@ static int max8952_pmic_probe(struct i2c_client *client,
config.driver_data = max8952;
config.of_node = client->dev.of_node;
- config.ena_gpio = pdata->gpio_en;
- if (client->dev.of_node)
- config.ena_gpio_initialized = true;
if (pdata->reg_data->constraints.boot_on)
- config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+ gflags = GPIOD_OUT_HIGH;
+ else
+ gflags = GPIOD_OUT_LOW;
+ gpiod = devm_gpiod_get_optional(&client->dev,
+ "max8952,en",
+ gflags);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ if (gpiod)
+ config.ena_gpiod = gpiod;
rdev = devm_regulator_register(&client->dev, &regulator, &config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index e0c747aa9f85..7cd493ec6315 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -34,6 +34,7 @@
#include <linux/regulator/max8973-regulator.h>
#include <linux/regulator/of_regulator.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_gpio.h>
#include <linux/i2c.h>
#include <linux/slab.h>
@@ -114,7 +115,6 @@ struct max8973_chip {
struct regulator_desc desc;
struct regmap *regmap;
bool enable_external_control;
- int enable_gpio;
int dvs_gpio;
int lru_index[MAX8973_MAX_VOUT_REG];
int curr_vout_val[MAX8973_MAX_VOUT_REG];
@@ -567,7 +567,6 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
pdata->enable_ext_control = of_property_read_bool(np,
"maxim,externally-enable");
- pdata->enable_gpio = of_get_named_gpio(np, "maxim,enable-gpio", 0);
pdata->dvs_gpio = of_get_named_gpio(np, "maxim,dvs-gpio", 0);
ret = of_property_read_u32(np, "maxim,dvs-default-state", &pval);
@@ -633,6 +632,8 @@ static int max8973_probe(struct i2c_client *client,
struct max8973_chip *max;
bool pdata_from_dt = false;
unsigned int chip_id;
+ struct gpio_desc *gpiod;
+ enum gpiod_flags gflags;
int ret;
pdata = dev_get_platdata(&client->dev);
@@ -647,8 +648,7 @@ static int max8973_probe(struct i2c_client *client,
return -EIO;
}
- if ((pdata->dvs_gpio == -EPROBE_DEFER) ||
- (pdata->enable_gpio == -EPROBE_DEFER))
+ if (pdata->dvs_gpio == -EPROBE_DEFER)
return -EPROBE_DEFER;
max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL);
@@ -696,15 +696,11 @@ static int max8973_probe(struct i2c_client *client,
max->desc.n_voltages = MAX8973_BUCK_N_VOLTAGE;
max->dvs_gpio = (pdata->dvs_gpio) ? pdata->dvs_gpio : -EINVAL;
- max->enable_gpio = (pdata->enable_gpio) ? pdata->enable_gpio : -EINVAL;
max->enable_external_control = pdata->enable_ext_control;
max->curr_gpio_val = pdata->dvs_def_state;
max->curr_vout_reg = MAX8973_VOUT + pdata->dvs_def_state;
max->junction_temp_warning = pdata->junction_temp_warning;
- if (gpio_is_valid(max->enable_gpio))
- max->enable_external_control = true;
-
max->lru_index[0] = max->curr_vout_reg;
if (gpio_is_valid(max->dvs_gpio)) {
@@ -757,27 +753,35 @@ static int max8973_probe(struct i2c_client *client,
break;
}
- if (gpio_is_valid(max->enable_gpio)) {
- config.ena_gpio_flags = GPIOF_OUT_INIT_LOW;
- if (ridata && (ridata->constraints.always_on ||
- ridata->constraints.boot_on))
- config.ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
- config.ena_gpio = max->enable_gpio;
+ if (ridata && (ridata->constraints.always_on ||
+ ridata->constraints.boot_on))
+ gflags = GPIOD_OUT_HIGH;
+ else
+ gflags = GPIOD_OUT_LOW;
+ gpiod = devm_gpiod_get_optional(&client->dev,
+ "maxim,enable",
+ gflags);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ if (gpiod) {
+ config.ena_gpiod = gpiod;
+ max->enable_external_control = true;
}
+
break;
case MAX77621:
- if (gpio_is_valid(max->enable_gpio)) {
- ret = devm_gpio_request_one(&client->dev,
- max->enable_gpio, GPIOF_OUT_INIT_HIGH,
- "max8973-en-gpio");
- if (ret) {
- dev_err(&client->dev,
- "gpio_request for gpio %d failed: %d\n",
- max->enable_gpio, ret);
- return ret;
- }
- }
+ /*
+ * We do not let the core switch this regulator on/off,
+ * we just leave it on.
+ */
+ gpiod = devm_gpiod_get_optional(&client->dev,
+ "maxim,enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ if (gpiod)
+ max->enable_external_control = true;
max->desc.enable_reg = MAX8973_VOUT;
max->desc.enable_mask = MAX8973_VOUT_ENABLE;
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 3027e7ce100b..6a2b61c012b5 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -309,8 +309,7 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
- struct max8998_platform_data *pdata =
- dev_get_platdata(max8998->iodev->dev);
+ struct max8998_platform_data *pdata = max8998->iodev->pdata;
struct i2c_client *i2c = max8998->iodev->i2c;
int buck = rdev_get_id(rdev);
int reg, shift = 0, mask, ret, j;
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 0e0277bd91a8..8fd1adc9c9a9 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -1,14 +1,10 @@
-/*
- * Regulator Driver for Freescale MC13783 PMIC
- *
- * Copyright 2010 Yong Shen <yong.shen@linaro.org>
- * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
- * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Regulator Driver for Freescale MC13783 PMIC
+//
+// Copyright 2010 Yong Shen <yong.shen@linaro.org>
+// Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+// Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
#include <linux/mfd/mc13783.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 15dd7bc7b529..f3fba1cc1379 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -1,14 +1,10 @@
-/*
- * Regulator Driver for Freescale MC13892 PMIC
- *
- * Copyright 2010 Yong Shen <yong.shen@linaro.org>
- *
- * Based on draft driver from Arnaud Patard <arnaud.patard@rtp-net.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Regulator Driver for Freescale MC13892 PMIC
+//
+// Copyright 2010 Yong Shen <yong.shen@linaro.org>
+//
+// Based on draft driver from Arnaud Patard <arnaud.patard@rtp-net.org>
#include <linux/mfd/mc13892.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 0281c31ae2ed..41271aeea63e 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -1,19 +1,15 @@
-/*
- * Regulator Driver for Freescale MC13xxx PMIC
- *
- * Copyright 2010 Yong Shen <yong.shen@linaro.org>
- *
- * Based on mc13783 regulator driver :
- * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
- * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Regs infos taken from mc13xxx drivers from freescale and mc13xxx.pdf file
- * from freescale
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Regulator Driver for Freescale MC13xxx PMIC
+//
+// Copyright 2010 Yong Shen <yong.shen@linaro.org>
+//
+// Based on mc13783 regulator driver :
+// Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+// Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
+//
+// Regs infos taken from mc13xxx drivers from freescale and mc13xxx.pdf file
+// from freescale
#include <linux/mfd/mc13xxx.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index f47264fa1940..638f17d4c848 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -31,7 +31,8 @@ static void of_get_regulation_constraints(struct device_node *np,
struct regulation_constraints *constraints = &(*init_data)->constraints;
struct regulator_state *suspend_state;
struct device_node *suspend_np;
- int ret, i;
+ unsigned int mode;
+ int ret, i, len;
u32 pval;
constraints->name = of_get_property(np, "regulator-name", NULL);
@@ -124,20 +125,51 @@ static void of_get_regulation_constraints(struct device_node *np,
if (!of_property_read_u32(np, "regulator-initial-mode", &pval)) {
if (desc && desc->of_map_mode) {
- ret = desc->of_map_mode(pval);
- if (ret == -EINVAL)
+ mode = desc->of_map_mode(pval);
+ if (mode == REGULATOR_MODE_INVALID)
pr_err("%s: invalid mode %u\n", np->name, pval);
else
- constraints->initial_mode = ret;
+ constraints->initial_mode = mode;
} else {
pr_warn("%s: mapping for mode %d not defined\n",
np->name, pval);
}
}
+ len = of_property_count_elems_of_size(np, "regulator-allowed-modes",
+ sizeof(u32));
+ if (len > 0) {
+ if (desc && desc->of_map_mode) {
+ for (i = 0; i < len; i++) {
+ ret = of_property_read_u32_index(np,
+ "regulator-allowed-modes", i, &pval);
+ if (ret) {
+ pr_err("%s: couldn't read allowed modes index %d, ret=%d\n",
+ np->name, i, ret);
+ break;
+ }
+ mode = desc->of_map_mode(pval);
+ if (mode == REGULATOR_MODE_INVALID)
+ pr_err("%s: invalid regulator-allowed-modes element %u\n",
+ np->name, pval);
+ else
+ constraints->valid_modes_mask |= mode;
+ }
+ if (constraints->valid_modes_mask)
+ constraints->valid_ops_mask
+ |= REGULATOR_CHANGE_MODE;
+ } else {
+ pr_warn("%s: mode mapping not defined\n", np->name);
+ }
+ }
+
if (!of_property_read_u32(np, "regulator-system-load", &pval))
constraints->system_load = pval;
+ if (!of_property_read_u32(np, "regulator-coupled-max-spread",
+ &pval))
+ constraints->max_spread = pval;
+
constraints->over_current_protection = of_property_read_bool(np,
"regulator-over-current-protection");
@@ -163,12 +195,12 @@ static void of_get_regulation_constraints(struct device_node *np,
if (!of_property_read_u32(suspend_np, "regulator-mode",
&pval)) {
if (desc && desc->of_map_mode) {
- ret = desc->of_map_mode(pval);
- if (ret == -EINVAL)
+ mode = desc->of_map_mode(pval);
+ if (mode == REGULATOR_MODE_INVALID)
pr_err("%s: invalid mode %u\n",
np->name, pval);
else
- suspend_state->mode = ret;
+ suspend_state->mode = mode;
} else {
pr_warn("%s: mapping for mode %d not defined\n",
np->name, pval);
@@ -407,3 +439,150 @@ struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
return dev ? dev_to_rdev(dev) : NULL;
}
+
+/*
+ * Returns number of regulators coupled with rdev.
+ */
+int of_get_n_coupled(struct regulator_dev *rdev)
+{
+ struct device_node *node = rdev->dev.of_node;
+ int n_phandles;
+
+ n_phandles = of_count_phandle_with_args(node,
+ "regulator-coupled-with",
+ NULL);
+
+ return (n_phandles > 0) ? n_phandles : 0;
+}
+
+/* Looks for "to_find" device_node in src's "regulator-coupled-with" property */
+static bool of_coupling_find_node(struct device_node *src,
+ struct device_node *to_find)
+{
+ int n_phandles, i;
+ bool found = false;
+
+ n_phandles = of_count_phandle_with_args(src,
+ "regulator-coupled-with",
+ NULL);
+
+ for (i = 0; i < n_phandles; i++) {
+ struct device_node *tmp = of_parse_phandle(src,
+ "regulator-coupled-with", i);
+
+ if (!tmp)
+ break;
+
+ /* found */
+ if (tmp == to_find)
+ found = true;
+
+ of_node_put(tmp);
+
+ if (found)
+ break;
+ }
+
+ return found;
+}
+
+/**
+ * of_check_coupling_data - Parse rdev's coupling properties and check data
+ * consistency
+ * @rdev - pointer to regulator_dev whose data is checked
+ *
+ * Function checks if all the following conditions are met:
+ * - rdev's max_spread is greater than 0
+ * - all coupled regulators have the same max_spread
+ * - all coupled regulators have the same number of regulator_dev phandles
+ * - all regulators are linked to each other
+ *
+ * Returns true if all conditions are met.
+ */
+bool of_check_coupling_data(struct regulator_dev *rdev)
+{
+ int max_spread = rdev->constraints->max_spread;
+ struct device_node *node = rdev->dev.of_node;
+ int n_phandles = of_get_n_coupled(rdev);
+ struct device_node *c_node;
+ int i;
+ bool ret = true;
+
+ if (max_spread <= 0) {
+ dev_err(&rdev->dev, "max_spread value invalid\n");
+ return false;
+ }
+
+ /* iterate over rdev's phandles */
+ for (i = 0; i < n_phandles; i++) {
+ int c_max_spread, c_n_phandles;
+
+ c_node = of_parse_phandle(node,
+ "regulator-coupled-with", i);
+
+ if (!c_node)
+ ret = false;
+
+ c_n_phandles = of_count_phandle_with_args(c_node,
+ "regulator-coupled-with",
+ NULL);
+
+ if (c_n_phandles != n_phandles) {
+ dev_err(&rdev->dev, "number of couped reg phandles mismatch\n");
+ ret = false;
+ goto clean;
+ }
+
+ if (of_property_read_u32(c_node, "regulator-coupled-max-spread",
+ &c_max_spread)) {
+ ret = false;
+ goto clean;
+ }
+
+ if (c_max_spread != max_spread) {
+ dev_err(&rdev->dev,
+ "coupled regulators max_spread mismatch\n");
+ ret = false;
+ goto clean;
+ }
+
+ if (!of_coupling_find_node(c_node, node)) {
+ dev_err(&rdev->dev, "missing 2-way linking for coupled regulators\n");
+ ret = false;
+ }
+
+clean:
+ of_node_put(c_node);
+ if (!ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * of_parse_coupled regulator - Get regulator_dev pointer from rdev's property
+ * @rdev: Pointer to regulator_dev, whose DTS is used as a source to parse
+ * "regulator-coupled-with" property
+ * @index: Index in phandles array
+ *
+ * Returns the regulator_dev pointer parsed from DTS. If it has not been yet
+ * registered, returns NULL
+ */
+struct regulator_dev *of_parse_coupled_regulator(struct regulator_dev *rdev,
+ int index)
+{
+ struct device_node *node = rdev->dev.of_node;
+ struct device_node *c_node;
+ struct regulator_dev *c_rdev;
+
+ c_node = of_parse_phandle(node, "regulator-coupled-with", index);
+ if (!c_node)
+ return NULL;
+
+ c_rdev = of_find_regulator_by_node(c_node);
+
+ of_node_put(c_node);
+
+ return c_rdev;
+}
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 63922a2167e5..8d9dbcc775ea 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -1,20 +1,7 @@
-/*
- * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -86,6 +73,13 @@ static const int pfuze100_coin[] = {
2500000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000,
};
+static const int pfuze3000_sw1a[] = {
+ 700000, 725000, 750000, 775000, 800000, 825000, 850000, 875000,
+ 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000,
+ 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000,
+ 1300000, 1325000, 1350000, 1375000, 1400000, 1425000, 1800000, 3300000,
+};
+
static const int pfuze3000_sw2lo[] = {
1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000,
};
@@ -148,6 +142,9 @@ static const struct regulator_ops pfuze100_fixed_regulator_ops = {
};
static const struct regulator_ops pfuze100_sw_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -158,6 +155,7 @@ static const struct regulator_ops pfuze100_sw_regulator_ops = {
static const struct regulator_ops pfuze100_swb_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -193,6 +191,11 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
.uV_step = (step), \
.vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
.vsel_mask = 0x3f, \
+ .enable_reg = (base) + PFUZE100_MODE_OFFSET, \
+ .enable_val = 0xc, \
+ .disable_val = 0x0, \
+ .enable_mask = 0xf, \
+ .enable_time = 500, \
}, \
.stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
.stby_mask = 0x3f, \
@@ -343,7 +346,7 @@ static struct pfuze_regulator pfuze200_regulators[] = {
};
static struct pfuze_regulator pfuze3000_regulators[] = {
- PFUZE100_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 700000, 1475000, 25000),
+ PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
@@ -648,7 +651,6 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
config.init_data = init_data;
config.driver_data = pfuze_chip;
config.of_node = match_of_node(i);
- config.ena_gpio = -EINVAL;
pfuze_chip->regulators[i] =
devm_regulator_register(&client->dev, desc, &config);
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 63c7a0c17777..9817f1a75342 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -25,6 +25,8 @@
#include <linux/regulator/driver.h>
#include <linux/regmap.h>
#include <linux/list.h>
+#include <linux/mfd/syscon.h>
+#include <linux/io.h>
/* Pin control enable input pins. */
#define SPMI_REGULATOR_PIN_CTRL_ENABLE_NONE 0x00
@@ -181,6 +183,23 @@ enum spmi_boost_byp_registers {
SPMI_BOOST_BYP_REG_CURRENT_LIMIT = 0x4b,
};
+enum spmi_saw3_registers {
+ SAW3_SECURE = 0x00,
+ SAW3_ID = 0x04,
+ SAW3_SPM_STS = 0x0C,
+ SAW3_AVS_STS = 0x10,
+ SAW3_PMIC_STS = 0x14,
+ SAW3_RST = 0x18,
+ SAW3_VCTL = 0x1C,
+ SAW3_AVS_CTL = 0x20,
+ SAW3_AVS_LIMIT = 0x24,
+ SAW3_AVS_DLY = 0x28,
+ SAW3_AVS_HYSTERESIS = 0x2C,
+ SAW3_SPM_STS2 = 0x38,
+ SAW3_SPM_PMIC_DATA_3 = 0x4C,
+ SAW3_VERSION = 0xFD0,
+};
+
/* Used for indexing into ctrl_reg. These are offets from 0x40 */
enum spmi_common_control_register_index {
SPMI_COMMON_IDX_VOLTAGE_RANGE = 0,
@@ -1035,6 +1054,89 @@ static irqreturn_t spmi_regulator_vs_ocp_isr(int irq, void *data)
return IRQ_HANDLED;
}
+#define SAW3_VCTL_DATA_MASK 0xFF
+#define SAW3_VCTL_CLEAR_MASK 0x700FF
+#define SAW3_AVS_CTL_EN_MASK 0x1
+#define SAW3_AVS_CTL_TGGL_MASK 0x8000000
+#define SAW3_AVS_CTL_CLEAR_MASK 0x7efc00
+
+static struct regmap *saw_regmap = NULL;
+
+static void spmi_saw_set_vdd(void *data)
+{
+ u32 vctl, data3, avs_ctl, pmic_sts;
+ bool avs_enabled = false;
+ unsigned long timeout;
+ u8 voltage_sel = *(u8 *)data;
+
+ regmap_read(saw_regmap, SAW3_AVS_CTL, &avs_ctl);
+ regmap_read(saw_regmap, SAW3_VCTL, &vctl);
+ regmap_read(saw_regmap, SAW3_SPM_PMIC_DATA_3, &data3);
+
+ /* select the band */
+ vctl &= ~SAW3_VCTL_CLEAR_MASK;
+ vctl |= (u32)voltage_sel;
+
+ data3 &= ~SAW3_VCTL_CLEAR_MASK;
+ data3 |= (u32)voltage_sel;
+
+ /* If AVS is enabled, switch it off during the voltage change */
+ avs_enabled = SAW3_AVS_CTL_EN_MASK & avs_ctl;
+ if (avs_enabled) {
+ avs_ctl &= ~SAW3_AVS_CTL_TGGL_MASK;
+ regmap_write(saw_regmap, SAW3_AVS_CTL, avs_ctl);
+ }
+
+ regmap_write(saw_regmap, SAW3_RST, 1);
+ regmap_write(saw_regmap, SAW3_VCTL, vctl);
+ regmap_write(saw_regmap, SAW3_SPM_PMIC_DATA_3, data3);
+
+ timeout = jiffies + usecs_to_jiffies(100);
+ do {
+ regmap_read(saw_regmap, SAW3_PMIC_STS, &pmic_sts);
+ pmic_sts &= SAW3_VCTL_DATA_MASK;
+ if (pmic_sts == (u32)voltage_sel)
+ break;
+
+ cpu_relax();
+
+ } while (time_before(jiffies, timeout));
+
+ /* After successful voltage change, switch the AVS back on */
+ if (avs_enabled) {
+ pmic_sts &= 0x3f;
+ avs_ctl &= ~SAW3_AVS_CTL_CLEAR_MASK;
+ avs_ctl |= ((pmic_sts - 4) << 10);
+ avs_ctl |= (pmic_sts << 17);
+ avs_ctl |= SAW3_AVS_CTL_TGGL_MASK;
+ regmap_write(saw_regmap, SAW3_AVS_CTL, avs_ctl);
+ }
+}
+
+static int
+spmi_regulator_saw_set_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ int ret;
+ u8 range_sel, voltage_sel;
+
+ ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
+ if (ret)
+ return ret;
+
+ if (0 != range_sel) {
+ dev_dbg(&rdev->dev, "range_sel = %02X voltage_sel = %02X", \
+ range_sel, voltage_sel);
+ return -EINVAL;
+ }
+
+ /* Always do the SAW register writes on the first CPU */
+ return smp_call_function_single(0, spmi_saw_set_vdd, \
+ &voltage_sel, true);
+}
+
+static struct regulator_ops spmi_saw_ops = {};
+
static struct regulator_ops spmi_smps_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -1250,6 +1352,7 @@ static int spmi_regulator_match(struct spmi_regulator *vreg, u16 force_type)
}
dig_major_rev = version[SPMI_COMMON_REG_DIG_MAJOR_REV
- SPMI_COMMON_REG_DIG_MAJOR_REV];
+
if (!force_type) {
type = version[SPMI_COMMON_REG_TYPE -
SPMI_COMMON_REG_DIG_MAJOR_REV];
@@ -1648,7 +1751,9 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
struct regmap *regmap;
const char *name;
struct device *dev = &pdev->dev;
- int ret;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *syscon;
+ int ret, lenp;
struct list_head *vreg_list;
vreg_list = devm_kzalloc(dev, sizeof(*vreg_list), GFP_KERNEL);
@@ -1665,7 +1770,22 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
if (!match)
return -ENODEV;
+ if (of_find_property(node, "qcom,saw-reg", &lenp)) {
+ syscon = of_parse_phandle(node, "qcom,saw-reg", 0);
+ saw_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(regmap))
+ dev_err(dev, "ERROR reading SAW regmap\n");
+ }
+
for (reg = match->data; reg->name; reg++) {
+
+ if (saw_regmap && \
+ of_find_property(of_find_node_by_name(node, reg->name), \
+ "qcom,saw-slave", &lenp)) {
+ continue;
+ }
+
vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
if (!vreg)
return -ENOMEM;
@@ -1673,7 +1793,6 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
vreg->dev = dev;
vreg->base = reg->base;
vreg->regmap = regmap;
-
if (reg->ocp) {
vreg->ocp_irq = platform_get_irq_byname(pdev, reg->ocp);
if (vreg->ocp_irq < 0) {
@@ -1681,7 +1800,6 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
goto err;
}
}
-
vreg->desc.id = -1;
vreg->desc.owner = THIS_MODULE;
vreg->desc.type = REGULATOR_VOLTAGE;
@@ -1698,6 +1816,15 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
if (ret)
continue;
+ if (saw_regmap && \
+ of_find_property(of_find_node_by_name(node, reg->name), \
+ "qcom,saw-leader", &lenp)) {
+ spmi_saw_ops = *(vreg->desc.ops);
+ spmi_saw_ops.set_voltage_sel = \
+ spmi_regulator_saw_set_voltage;
+ vreg->desc.ops = &spmi_saw_ops;
+ }
+
config.dev = dev;
config.driver_data = vreg;
config.regmap = regmap;
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 4836947e1521..b8443a360646 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
@@ -459,15 +460,14 @@ static void s5m8767_regulator_config_ext_control(struct s5m8767_info *s5m8767,
return;
}
- if (!gpio_is_valid(rdata->ext_control_gpio)) {
+ if (!rdata->ext_control_gpiod) {
dev_warn(s5m8767->dev,
"ext-control for %s: GPIO not valid, ignoring\n",
- rdata->reg_node->name);
+ rdata->reg_node->name);
return;
}
- config->ena_gpio = rdata->ext_control_gpio;
- config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
+ config->ena_gpiod = rdata->ext_control_gpiod;
}
/*
@@ -577,8 +577,14 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
continue;
}
- rdata->ext_control_gpio = of_get_named_gpio(reg_np,
- "s5m8767,pmic-ext-control-gpios", 0);
+ rdata->ext_control_gpiod = devm_gpiod_get_from_of_node(&pdev->dev,
+ reg_np,
+ "s5m8767,pmic-ext-control-gpios",
+ 0,
+ GPIOD_OUT_HIGH,
+ "s5m8767");
+ if (IS_ERR(rdata->ext_control_gpiod))
+ return PTR_ERR(rdata->ext_control_gpiod);
rdata->id = i;
rdata->initdata = of_get_regulator_init_data(
@@ -954,10 +960,8 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
config.driver_data = s5m8767;
config.regmap = iodev->regmap_pmic;
config.of_node = pdata->regulators[i].reg_node;
- config.ena_gpio = -EINVAL;
- config.ena_gpio_flags = 0;
- config.ena_gpio_initialized = true;
- if (gpio_is_valid(pdata->regulators[i].ext_control_gpio))
+ config.ena_gpiod = NULL;
+ if (pdata->regulators[i].ext_control_gpiod)
s5m8767_regulator_config_ext_control(s5m8767,
&pdata->regulators[i], &config);
@@ -970,7 +974,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
return ret;
}
- if (gpio_is_valid(pdata->regulators[i].ext_control_gpio)) {
+ if (pdata->regulators[i].ext_control_gpiod) {
ret = s5m8767_enable_ext_control(s5m8767, rdev);
if (ret < 0) {
dev_err(s5m8767->dev,
diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c
new file mode 100644
index 000000000000..65fbd1f0b612
--- /dev/null
+++ b/drivers/regulator/sy8106a-regulator.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// sy8106a-regulator.c - Regulator device driver for SY8106A
+//
+// Copyright (C) 2016 Ondřej Jirman <megous@megous.com>
+// Copyright (c) 2017-2018 Icenowy Zheng <icenowy@aosc.io>
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+#define SY8106A_REG_VOUT1_SEL 0x01
+#define SY8106A_REG_VOUT_COM 0x02
+#define SY8106A_REG_VOUT1_SEL_MASK 0x7f
+#define SY8106A_DISABLE_REG BIT(0)
+/*
+ * The I2C controlled voltage will only work when this bit is set; otherwise
+ * it will behave like a fixed regulator.
+ */
+#define SY8106A_GO_BIT BIT(7)
+
+struct sy8106a {
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ u32 fixed_voltage;
+};
+
+static const struct regmap_config sy8106a_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct regulator_ops sy8106a_ops = {
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ /* Enabling/disabling the regulator is not yet implemented */
+};
+
+/* Default limits measured in millivolts */
+#define SY8106A_MIN_MV 680
+#define SY8106A_MAX_MV 1950
+#define SY8106A_STEP_MV 10
+
+static const struct regulator_desc sy8106a_reg = {
+ .name = "SY8106A",
+ .id = 0,
+ .ops = &sy8106a_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ((SY8106A_MAX_MV - SY8106A_MIN_MV) / SY8106A_STEP_MV) + 1,
+ .min_uV = (SY8106A_MIN_MV * 1000),
+ .uV_step = (SY8106A_STEP_MV * 1000),
+ .vsel_reg = SY8106A_REG_VOUT1_SEL,
+ .vsel_mask = SY8106A_REG_VOUT1_SEL_MASK,
+ /*
+ * This ramp_delay is a conservative default value which works on
+ * H3/H5 boards VDD-CPUX situations.
+ */
+ .ramp_delay = 200,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * I2C driver interface functions
+ */
+static int sy8106a_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct sy8106a *chip;
+ struct device *dev = &i2c->dev;
+ struct regulator_dev *rdev = NULL;
+ struct regulator_config config = { };
+ unsigned int reg, vsel;
+ int error;
+
+ chip = devm_kzalloc(&i2c->dev, sizeof(struct sy8106a), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ error = of_property_read_u32(dev->of_node, "silergy,fixed-microvolt",
+ &chip->fixed_voltage);
+ if (error)
+ return error;
+
+ if (chip->fixed_voltage < SY8106A_MIN_MV * 1000 ||
+ chip->fixed_voltage > SY8106A_MAX_MV * 1000)
+ return -EINVAL;
+
+ chip->regmap = devm_regmap_init_i2c(i2c, &sy8106a_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ error = PTR_ERR(chip->regmap);
+ dev_err(dev, "Failed to allocate register map: %d\n", error);
+ return error;
+ }
+
+ config.dev = &i2c->dev;
+ config.regmap = chip->regmap;
+ config.driver_data = chip;
+
+ config.of_node = dev->of_node;
+ config.init_data = of_get_regulator_init_data(dev, dev->of_node,
+ &sy8106a_reg);
+
+ if (!config.init_data)
+ return -ENOMEM;
+
+ /* Ensure GO_BIT is enabled when probing */
+ error = regmap_read(chip->regmap, SY8106A_REG_VOUT1_SEL, &reg);
+ if (error)
+ return error;
+
+ if (!(reg & SY8106A_GO_BIT)) {
+ vsel = (chip->fixed_voltage / 1000 - SY8106A_MIN_MV) /
+ SY8106A_STEP_MV;
+
+ error = regmap_write(chip->regmap, SY8106A_REG_VOUT1_SEL,
+ vsel | SY8106A_GO_BIT);
+ if (error)
+ return error;
+ }
+
+ /* Probe regulator */
+ rdev = devm_regulator_register(&i2c->dev, &sy8106a_reg, &config);
+ if (IS_ERR(rdev)) {
+ error = PTR_ERR(rdev);
+ dev_err(&i2c->dev, "Failed to register SY8106A regulator: %d\n", error);
+ return error;
+ }
+
+ chip->rdev = rdev;
+
+ i2c_set_clientdata(i2c, chip);
+
+ return 0;
+}
+
+static const struct of_device_id sy8106a_i2c_of_match[] = {
+ { .compatible = "silergy,sy8106a" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sy8106a_i2c_of_match);
+
+static const struct i2c_device_id sy8106a_i2c_id[] = {
+ { "sy8106a", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sy8106a_i2c_id);
+
+static struct i2c_driver sy8106a_regulator_driver = {
+ .driver = {
+ .name = "sy8106a",
+ .of_match_table = of_match_ptr(sy8106a_i2c_of_match),
+ },
+ .probe = sy8106a_i2c_probe,
+ .id_table = sy8106a_i2c_id,
+};
+
+module_i2c_driver(sy8106a_regulator_driver);
+
+MODULE_AUTHOR("Ondřej Jirman <megous@megous.com>");
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_DESCRIPTION("Regulator device driver for Silergy SY8106A");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index 395f35dc8cdb..2d398fa3b720 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -19,8 +19,8 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/platform_device.h>
@@ -300,26 +300,6 @@ static int tps65090_regulator_disable_ext_control(
return tps65090_config_ext_control(ri, false);
}
-static void tps65090_configure_regulator_config(
- struct tps65090_regulator_plat_data *tps_pdata,
- struct regulator_config *config)
-{
- if (gpio_is_valid(tps_pdata->gpio)) {
- int gpio_flag = GPIOF_OUT_INIT_LOW;
-
- if (tps_pdata->reg_init_data->constraints.always_on ||
- tps_pdata->reg_init_data->constraints.boot_on)
- gpio_flag = GPIOF_OUT_INIT_HIGH;
-
- config->ena_gpio = tps_pdata->gpio;
- config->ena_gpio_initialized = true;
- config->ena_gpio_flags = gpio_flag;
- } else {
- config->ena_gpio = -EINVAL;
- config->ena_gpio_initialized = false;
- }
-}
-
#ifdef CONFIG_OF
static struct of_regulator_match tps65090_matches[] = {
{ .name = "dcdc1", },
@@ -385,9 +365,26 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
rpdata->enable_ext_control = of_property_read_bool(
tps65090_matches[idx].of_node,
"ti,enable-ext-control");
- if (rpdata->enable_ext_control)
- rpdata->gpio = of_get_named_gpio(np,
- "dcdc-ext-control-gpios", 0);
+ if (rpdata->enable_ext_control) {
+ enum gpiod_flags gflags;
+
+ if (ri_data->constraints.always_on ||
+ ri_data->constraints.boot_on)
+ gflags = GPIOD_OUT_HIGH;
+ else
+ gflags = GPIOD_OUT_LOW;
+
+ rpdata->gpiod = devm_gpiod_get_from_of_node(&pdev->dev,
+ tps65090_matches[idx].of_node,
+ "dcdc-ext-control-gpios", 0,
+ gflags,
+ "tps65090");
+ if (IS_ERR(rpdata->gpiod))
+ return ERR_CAST(rpdata->gpiod);
+ if (!rpdata->gpiod)
+ dev_err(&pdev->dev,
+ "could not find DCDC external control GPIO\n");
+ }
if (of_property_read_u32(tps65090_matches[idx].of_node,
"ti,overcurrent-wait",
@@ -455,8 +452,7 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
*/
if (tps_pdata && is_dcdc(num) && tps_pdata->reg_init_data) {
if (tps_pdata->enable_ext_control) {
- tps65090_configure_regulator_config(
- tps_pdata, &config);
+ config.ena_gpiod = tps_pdata->gpiod;
ri->desc->ops = &tps65090_ext_control_ops;
} else {
ret = tps65090_regulator_disable_ext_control(
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 9e9d22038017..ba3dae7b2d2d 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -342,6 +342,7 @@ static struct tps6586x_regulator *find_regulator_info(int id, int version)
switch (version) {
case TPS658623:
+ case TPS658624:
table = tps658623_regulator;
num = ARRAY_SIZE(tps658623_regulator);
break;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index a4456db5849d..884c7505ed91 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -274,7 +274,7 @@ static inline unsigned int twl4030reg_map_mode(unsigned int mode)
case RES_STATE_SLEEP:
return REGULATOR_MODE_STANDBY;
default:
- return -EINVAL;
+ return REGULATOR_MODE_INVALID;
}
}
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index da9106bd2109..8ad11b074b49 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -911,6 +911,7 @@ static unsigned int get_mode(int uA, const struct wm8350_dcdc_efficiency *eff)
while (eff[i].uA_load_min != -1) {
if (uA >= eff[i].uA_load_min && uA <= eff[i].uA_load_max)
return eff[i].mode;
+ i++;
}
return REGULATOR_MODE_NORMAL;
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 59e6dede3db3..a2ba5db36145 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1613,7 +1613,7 @@ config RTC_DRV_JZ4740
If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
controllers.
- This driver can also be buillt as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called rtc-jz4740.
config RTC_DRV_LPC24XX
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 7cbdc9228dd5..6d4012dd6922 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -441,6 +441,11 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
int err;
+ if (!rtc->ops)
+ return -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ return -EINVAL;
+
err = rtc_valid_tm(&alarm->time);
if (err != 0)
return err;
diff --git a/drivers/rtc/nvmem.c b/drivers/rtc/nvmem.c
index 17ec4c8d0fad..36ab183c42f1 100644
--- a/drivers/rtc/nvmem.c
+++ b/drivers/rtc/nvmem.c
@@ -94,7 +94,7 @@ int rtc_nvmem_register(struct rtc_device *rtc,
nvmem_config->dev = rtc->dev.parent;
nvmem_config->owner = rtc->owner;
rtc->nvmem = nvmem_register(nvmem_config);
- if (IS_ERR_OR_NULL(rtc->nvmem))
+ if (IS_ERR(rtc->nvmem))
return PTR_ERR(rtc->nvmem);
/* Register the old ABI */
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 6cbafefa80a2..cab293cb2bf0 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -52,10 +52,8 @@ struct pm80x_rtc_info {
struct regmap *map;
struct rtc_device *rtc_dev;
struct device *dev;
- struct delayed_work calib_work;
int irq;
- int vrtc;
};
static irqreturn_t rtc_update_handler(int irq, void *data)
@@ -100,13 +98,13 @@ static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
next->tm_min = alrm->tm_min;
next->tm_sec = alrm->tm_sec;
- rtc_tm_to_time(now, &now_time);
- rtc_tm_to_time(next, &next_time);
+ now_time = rtc_tm_to_time64(now);
+ next_time = rtc_tm_to_time64(next);
if (next_time < now_time) {
/* Advance one day */
next_time += 60 * 60 * 24;
- rtc_time_to_tm(next_time, next);
+ rtc_time64_to_tm(next_time, next);
}
}
@@ -125,7 +123,7 @@ static int pm80x_rtc_read_time(struct device *dev, struct rtc_time *tm)
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
- rtc_time_to_tm(ticks, tm);
+ rtc_time64_to_tm(ticks, tm);
return 0;
}
@@ -134,13 +132,8 @@ static int pm80x_rtc_set_time(struct device *dev, struct rtc_time *tm)
struct pm80x_rtc_info *info = dev_get_drvdata(dev);
unsigned char buf[4];
unsigned long ticks, base, data;
- if (tm->tm_year > 206) {
- dev_dbg(info->dev,
- "Set time %d out of range. Please set time between 1970 to 2106.\n",
- 1900 + tm->tm_year);
- return -EINVAL;
- }
- rtc_tm_to_time(tm, &ticks);
+
+ ticks = rtc_tm_to_time64(tm);
/* load 32-bit read-only counter */
regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
@@ -174,7 +167,7 @@ static int pm80x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
- rtc_time_to_tm(ticks, &alrm->time);
+ rtc_time64_to_tm(ticks, &alrm->time);
regmap_read(info->map, PM800_RTC_CONTROL, &ret);
alrm->enabled = (ret & PM800_ALARM1_EN) ? 1 : 0;
alrm->pending = (ret & (PM800_ALARM | PM800_ALARM_WAKEUP)) ? 1 : 0;
@@ -202,11 +195,11 @@ static int pm80x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
- rtc_time_to_tm(ticks, &now_tm);
+ rtc_time64_to_tm(ticks, &now_tm);
dev_dbg(info->dev, "%s, now time : %lu\n", __func__, ticks);
rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time);
/* get new ticks for alarm in 24 hours */
- rtc_tm_to_time(&alarm_tm, &ticks);
+ ticks = rtc_tm_to_time64(&alarm_tm);
dev_dbg(info->dev, "%s, alarm time: %lu\n", __func__, ticks);
data = ticks - base;
@@ -254,8 +247,6 @@ static int pm80x_rtc_probe(struct platform_device *pdev)
struct pm80x_rtc_pdata *pdata = dev_get_platdata(&pdev->dev);
struct pm80x_rtc_info *info;
struct device_node *node = pdev->dev.of_node;
- struct rtc_time tm;
- unsigned long ticks = 0;
int ret;
if (!pdata && !node) {
@@ -294,6 +285,10 @@ static int pm80x_rtc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, info);
+ info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(info->rtc_dev))
+ return PTR_ERR(info->rtc_dev);
+
ret = pm80x_request_irq(chip, info->irq, rtc_update_handler,
IRQF_ONESHOT, "rtc", info);
if (ret < 0) {
@@ -302,30 +297,11 @@ static int pm80x_rtc_probe(struct platform_device *pdev)
goto out;
}
- ret = pm80x_rtc_read_time(&pdev->dev, &tm);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to read initial time.\n");
- goto out_rtc;
- }
- if ((tm.tm_year < 70) || (tm.tm_year > 138)) {
- tm.tm_year = 70;
- tm.tm_mon = 0;
- tm.tm_mday = 1;
- tm.tm_hour = 0;
- tm.tm_min = 0;
- tm.tm_sec = 0;
- ret = pm80x_rtc_set_time(&pdev->dev, &tm);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to set initial time.\n");
- goto out_rtc;
- }
- }
- rtc_tm_to_time(&tm, &ticks);
+ info->rtc_dev->ops = &pm80x_rtc_ops;
+ info->rtc_dev->range_max = U32_MAX;
- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm80x-rtc",
- &pm80x_rtc_ops, THIS_MODULE);
- if (IS_ERR(info->rtc_dev)) {
- ret = PTR_ERR(info->rtc_dev);
+ ret = rtc_register_device(info->rtc_dev);
+ if (ret) {
dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
goto out_rtc;
}
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index 8dc451932446..2233601761ac 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -265,15 +265,6 @@ static int abb5zes3_rtc_set_time(struct device *dev, struct rtc_time *tm)
u8 regs[ABB5ZES3_REG_RTC_SC + ABB5ZES3_RTC_SEC_LEN];
int ret;
- /*
- * Year register is 8-bit wide and bcd-coded, i.e records values
- * between 0 and 99. tm_year is an offset from 1900 and we are
- * interested in the 2000-2099 range, so any value less than 100
- * is invalid.
- */
- if (tm->tm_year < 100)
- return -EINVAL;
-
regs[ABB5ZES3_REG_RTC_SC] = bin2bcd(tm->tm_sec); /* MSB=0 clears OSC */
regs[ABB5ZES3_REG_RTC_MN] = bin2bcd(tm->tm_min);
regs[ABB5ZES3_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */
@@ -925,6 +916,14 @@ static int abb5zes3_probe(struct i2c_client *client,
if (ret)
goto err;
+ data->rtc = devm_rtc_allocate_device(dev);
+ ret = PTR_ERR_OR_ZERO(data->rtc);
+ if (ret) {
+ dev_err(dev, "%s: unable to allocate RTC device (%d)\n",
+ __func__, ret);
+ goto err;
+ }
+
if (client->irq > 0) {
ret = devm_request_threaded_irq(dev, client->irq, NULL,
_abb5zes3_rtc_interrupt,
@@ -942,14 +941,9 @@ static int abb5zes3_probe(struct i2c_client *client,
}
}
- data->rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops,
- THIS_MODULE);
- ret = PTR_ERR_OR_ZERO(data->rtc);
- if (ret) {
- dev_err(dev, "%s: unable to register RTC device (%d)\n",
- __func__, ret);
- goto err;
- }
+ data->rtc->ops = &rtc_ops;
+ data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ data->rtc->range_max = RTC_TIMESTAMP_END_2099;
/* Enable battery low detection interrupt if battery not already low */
if (!data->battery_low && data->irq) {
@@ -961,6 +955,8 @@ static int abb5zes3_probe(struct i2c_client *client,
}
}
+ ret = rtc_register_device(data->rtc);
+
err:
if (ret && data && data->irq)
device_init_wakeup(dev, false);
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index de81ecedd571..caa71d04e989 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -440,6 +440,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
rtc->ops = &at91_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
ret = rtc_register_device(rtc);
if (ret)
goto err_clk;
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index bd170cb3361c..d768f6747961 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -48,8 +48,7 @@ static void bq4802_write_mem(struct bq4802 *p, int off, u8 val)
static int bq4802_read_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct bq4802 *p = platform_get_drvdata(pdev);
+ struct bq4802 *p = dev_get_drvdata(dev);
unsigned long flags;
unsigned int century;
u8 val;
@@ -91,8 +90,7 @@ static int bq4802_read_time(struct device *dev, struct rtc_time *tm)
static int bq4802_set_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct bq4802 *p = platform_get_drvdata(pdev);
+ struct bq4802 *p = dev_get_drvdata(dev);
u8 sec, min, hrs, day, mon, yrs, century, val;
unsigned long flags;
unsigned int year;
diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
index bdd6674a1054..f4010a75f2be 100644
--- a/drivers/rtc/rtc-brcmstb-waketimer.c
+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
@@ -145,9 +145,6 @@ static int brcmstb_waketmr_settime(struct device *dev,
sec = rtc_tm_to_time64(tm);
- if (sec > U32_MAX || sec < 0)
- return -EINVAL;
-
writel_relaxed(sec, timer->base + BRCMSTB_WKTMR_COUNTER);
return 0;
@@ -184,9 +181,6 @@ static int brcmstb_waketmr_setalarm(struct device *dev,
else
sec = 0;
- if (sec > U32_MAX || sec < 0)
- return -EINVAL;
-
brcmstb_waketmr_set_alarm(timer, sec);
return 0;
@@ -229,6 +223,10 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
if (IS_ERR(timer->base))
return PTR_ERR(timer->base);
+ timer->rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(timer->rtc))
+ return PTR_ERR(timer->rtc);
+
/*
* Set wakeup capability before requesting wakeup interrupt, so we can
* process boot-time "wakeups" (e.g., from S5 soft-off)
@@ -261,11 +259,12 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
timer->reboot_notifier.notifier_call = brcmstb_waketmr_reboot;
register_reboot_notifier(&timer->reboot_notifier);
- timer->rtc = rtc_device_register("brcmstb-waketmr", dev,
- &brcmstb_waketmr_ops, THIS_MODULE);
- if (IS_ERR(timer->rtc)) {
+ timer->rtc->ops = &brcmstb_waketmr_ops;
+ timer->rtc->range_max = U32_MAX;
+
+ ret = rtc_register_device(timer->rtc);
+ if (ret) {
dev_err(dev, "unable to register device\n");
- ret = PTR_ERR(timer->rtc);
goto err_notifier;
}
@@ -288,7 +287,6 @@ static int brcmstb_waketmr_remove(struct platform_device *pdev)
struct brcmstb_waketmr *timer = dev_get_drvdata(&pdev->dev);
unregister_reboot_notifier(&timer->reboot_notifier);
- rtc_device_unregister(timer->rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 1b3738a11702..cd3a2411bc2f 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -43,11 +43,24 @@
#include <linux/of_platform.h>
#ifdef CONFIG_X86
#include <asm/i8259.h>
+#include <asm/processor.h>
+#include <linux/dmi.h>
#endif
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <linux/mc146818rtc.h>
+/*
+ * Use ACPI SCI to replace HPET interrupt for RTC Alarm event
+ *
+ * If cleared, ACPI SCI is only used to wake up the system from suspend
+ *
+ * If set, ACPI SCI is used to handle UIE/AIE and system wakeup
+ */
+
+static bool use_acpi_alarm;
+module_param(use_acpi_alarm, bool, 0444);
+
struct cmos_rtc {
struct rtc_device *rtc;
struct device *dev;
@@ -153,6 +166,12 @@ static inline int hpet_unregister_irq_handler(irq_handler_t handler)
#endif
+/* Don't use HPET for RTC Alarm event if ACPI Fixed event is used */
+static int use_hpet_alarm(void)
+{
+ return is_hpet_enabled() && !use_acpi_alarm;
+}
+
/*----------------------------------------------------------------*/
#ifdef RTC_PORT
@@ -298,7 +317,7 @@ static void cmos_checkintr(struct cmos_rtc *cmos, unsigned char rtc_control)
*/
rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
- if (is_hpet_enabled())
+ if (use_hpet_alarm())
return;
rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
@@ -318,7 +337,13 @@ static void cmos_irq_enable(struct cmos_rtc *cmos, unsigned char mask)
rtc_control |= mask;
CMOS_WRITE(rtc_control, RTC_CONTROL);
- hpet_set_rtc_irq_bit(mask);
+ if (use_hpet_alarm())
+ hpet_set_rtc_irq_bit(mask);
+
+ if ((mask & RTC_AIE) && use_acpi_alarm) {
+ if (cmos->wake_on)
+ cmos->wake_on(cmos->dev);
+ }
cmos_checkintr(cmos, rtc_control);
}
@@ -330,7 +355,13 @@ static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask)
rtc_control = CMOS_READ(RTC_CONTROL);
rtc_control &= ~mask;
CMOS_WRITE(rtc_control, RTC_CONTROL);
- hpet_mask_rtc_irq_bit(mask);
+ if (use_hpet_alarm())
+ hpet_mask_rtc_irq_bit(mask);
+
+ if ((mask & RTC_AIE) && use_acpi_alarm) {
+ if (cmos->wake_off)
+ cmos->wake_off(cmos->dev);
+ }
cmos_checkintr(cmos, rtc_control);
}
@@ -448,10 +479,14 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
CMOS_WRITE(mon, cmos->mon_alrm);
}
- /* FIXME the HPET alarm glue currently ignores day_alrm
- * and mon_alrm ...
- */
- hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, t->time.tm_sec);
+ if (use_hpet_alarm()) {
+ /*
+ * FIXME the HPET alarm glue currently ignores day_alrm
+ * and mon_alrm ...
+ */
+ hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min,
+ t->time.tm_sec);
+ }
if (t->enabled)
cmos_irq_enable(cmos, RTC_AIE);
@@ -508,7 +543,7 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
"batt_status\t: %s\n",
(rtc_control & RTC_PIE) ? "yes" : "no",
(rtc_control & RTC_UIE) ? "yes" : "no",
- is_hpet_enabled() ? "yes" : "no",
+ use_hpet_alarm() ? "yes" : "no",
// (rtc_control & RTC_SQWE) ? "yes" : "no",
(rtc_control & RTC_DM_BINARY) ? "no" : "yes",
(rtc_control & RTC_DST_EN) ? "yes" : "no",
@@ -614,7 +649,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
*/
irqstat = CMOS_READ(RTC_INTR_FLAGS);
rtc_control = CMOS_READ(RTC_CONTROL);
- if (is_hpet_enabled())
+ if (use_hpet_alarm())
irqstat = (unsigned long)irq & 0xF0;
/* If we were suspended, RTC_CONTROL may not be accurate since the
@@ -633,7 +668,8 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
cmos_rtc.suspend_ctrl &= ~RTC_AIE;
rtc_control &= ~RTC_AIE;
CMOS_WRITE(rtc_control, RTC_CONTROL);
- hpet_mask_rtc_irq_bit(RTC_AIE);
+ if (use_hpet_alarm())
+ hpet_mask_rtc_irq_bit(RTC_AIE);
CMOS_READ(RTC_INTR_FLAGS);
}
spin_unlock(&rtc_lock);
@@ -762,7 +798,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
* need to do something about other clock frequencies.
*/
cmos_rtc.rtc->irq_freq = 1024;
- hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq);
+ if (use_hpet_alarm())
+ hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq);
CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT);
}
@@ -780,12 +817,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup1;
}
- hpet_rtc_timer_init();
+ if (use_hpet_alarm())
+ hpet_rtc_timer_init();
if (is_valid_irq(rtc_irq)) {
irq_handler_t rtc_cmos_int_handler;
- if (is_hpet_enabled()) {
+ if (use_hpet_alarm()) {
rtc_cmos_int_handler = hpet_rtc_interrupt;
retval = hpet_register_irq_handler(cmos_interrupt);
if (retval) {
@@ -824,7 +862,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
"alarms up to one day",
cmos_rtc.century ? ", y3k" : "",
nvmem_cfg.size,
- is_hpet_enabled() ? ", hpet irqs" : "");
+ use_hpet_alarm() ? ", hpet irqs" : "");
return 0;
@@ -858,7 +896,8 @@ static void cmos_do_remove(struct device *dev)
if (is_valid_irq(cmos->irq)) {
free_irq(cmos->irq, cmos->rtc);
- hpet_unregister_irq_handler(cmos_interrupt);
+ if (use_hpet_alarm())
+ hpet_unregister_irq_handler(cmos_interrupt);
}
cmos->rtc = NULL;
@@ -935,13 +974,13 @@ static int cmos_suspend(struct device *dev)
mask = RTC_IRQMASK;
tmp &= ~mask;
CMOS_WRITE(tmp, RTC_CONTROL);
- hpet_mask_rtc_irq_bit(mask);
-
+ if (use_hpet_alarm())
+ hpet_mask_rtc_irq_bit(mask);
cmos_checkintr(cmos, tmp);
}
spin_unlock_irq(&rtc_lock);
- if (tmp & RTC_AIE) {
+ if ((tmp & RTC_AIE) && !use_acpi_alarm) {
cmos->enabled_wake = 1;
if (cmos->wake_on)
cmos->wake_on(dev);
@@ -976,8 +1015,26 @@ static void cmos_check_wkalrm(struct device *dev)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
struct rtc_wkalrm current_alarm;
+ time64_t t_now;
time64_t t_current_expires;
time64_t t_saved_expires;
+ struct rtc_time now;
+
+ /* Check if we have RTC Alarm armed */
+ if (!(cmos->suspend_ctrl & RTC_AIE))
+ return;
+
+ cmos_read_time(dev, &now);
+ t_now = rtc_tm_to_time64(&now);
+
+ /*
+ * ACPI RTC wake event is cleared after resume from STR,
+ * ACK the rtc irq here
+ */
+ if (t_now >= cmos->alarm_expires && use_acpi_alarm) {
+ cmos_interrupt(0, (void *)cmos->rtc);
+ return;
+ }
cmos_read_alarm(dev, &current_alarm);
t_current_expires = rtc_tm_to_time64(&current_alarm.time);
@@ -996,7 +1053,7 @@ static int __maybe_unused cmos_resume(struct device *dev)
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char tmp;
- if (cmos->enabled_wake) {
+ if (cmos->enabled_wake && !use_acpi_alarm) {
if (cmos->wake_off)
cmos->wake_off(dev);
else
@@ -1014,16 +1071,17 @@ static int __maybe_unused cmos_resume(struct device *dev)
if (tmp & RTC_IRQMASK) {
unsigned char mask;
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev) && use_hpet_alarm())
hpet_rtc_timer_init();
do {
CMOS_WRITE(tmp, RTC_CONTROL);
- hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
+ if (use_hpet_alarm())
+ hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
mask = CMOS_READ(RTC_INTR_FLAGS);
mask &= (tmp & RTC_IRQMASK) | RTC_IRQF;
- if (!is_hpet_enabled() || !is_intr(mask))
+ if (!use_hpet_alarm() || !is_intr(mask))
break;
/* force one-shot behavior if HPET blocked
@@ -1068,16 +1126,27 @@ static u32 rtc_handler(void *context)
unsigned char rtc_intr;
unsigned long flags;
- spin_lock_irqsave(&rtc_lock, flags);
- if (cmos_rtc.suspend_ctrl)
- rtc_control = CMOS_READ(RTC_CONTROL);
- if (rtc_control & RTC_AIE) {
- cmos_rtc.suspend_ctrl &= ~RTC_AIE;
- CMOS_WRITE(rtc_control, RTC_CONTROL);
- rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
- rtc_update_irq(cmos->rtc, 1, rtc_intr);
+
+ /*
+ * Always update rtc irq when ACPI is used as RTC Alarm.
+ * Or else, ACPI SCI is enabled during suspend/resume only,
+ * update rtc irq in that case.
+ */
+ if (use_acpi_alarm)
+ cmos_interrupt(0, (void *)cmos->rtc);
+ else {
+ /* Fix me: can we use cmos_interrupt() here as well? */
+ spin_lock_irqsave(&rtc_lock, flags);
+ if (cmos_rtc.suspend_ctrl)
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ if (rtc_control & RTC_AIE) {
+ cmos_rtc.suspend_ctrl &= ~RTC_AIE;
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+ rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
+ rtc_update_irq(cmos->rtc, 1, rtc_intr);
+ }
+ spin_unlock_irqrestore(&rtc_lock, flags);
}
- spin_unlock_irqrestore(&rtc_lock, flags);
pm_wakeup_hard_event(dev);
acpi_clear_event(ACPI_EVENT_RTC);
@@ -1107,6 +1176,28 @@ static void rtc_wake_off(struct device *dev)
acpi_disable_event(ACPI_EVENT_RTC, 0);
}
+#ifdef CONFIG_X86
+/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
+static void use_acpi_alarm_quirks(void)
+{
+ int year;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return;
+
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
+ return;
+
+ if (!is_hpet_enabled())
+ return;
+
+ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2015)
+ use_acpi_alarm = true;
+}
+#else
+static inline void use_acpi_alarm_quirks(void) { }
+#endif
+
/* Every ACPI platform has a mc146818 compatible "cmos rtc". Here we find
* its device node and pass extra config data. This helps its driver use
* capabilities that the now-obsolete mc146818 didn't have, and informs it
@@ -1119,6 +1210,8 @@ static void cmos_wake_setup(struct device *dev)
if (acpi_disabled)
return;
+ use_acpi_alarm_quirks();
+
rtc_wake_setup(dev);
acpi_rtc_info.wake_on = rtc_wake_on;
acpi_rtc_info.wake_off = rtc_wake_off;
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index bf7ced095c94..e5444296075e 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -1,19 +1,8 @@
-/*
- * RTC driver for Chrome OS Embedded Controller
- *
- * Copyright (c) 2017, Google, Inc
- *
- * Author: Stephen Barber <smbarber@chromium.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+// RTC driver for ChromeOS Embedded Controller.
+//
+// Copyright (C) 2017 Google, Inc.
+// Author: Stephen Barber <smbarber@chromium.org>
#include <linux/kernel.h>
#include <linux/mfd/cros_ec.h>
@@ -409,5 +398,5 @@ module_platform_driver(cros_ec_rtc_driver);
MODULE_DESCRIPTION("RTC driver for Chrome OS ECs");
MODULE_AUTHOR("Stephen Barber <smbarber@chromium.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
index 5f158715fb4c..50fabe1cd286 100644
--- a/drivers/rtc/rtc-ds1216.c
+++ b/drivers/rtc/rtc-ds1216.c
@@ -76,8 +76,7 @@ static void ds1216_switch_ds_to_clock(u8 __iomem *ioaddr)
static int ds1216_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1216_priv *priv = platform_get_drvdata(pdev);
+ struct ds1216_priv *priv = dev_get_drvdata(dev);
struct ds1216_regs regs;
ds1216_switch_ds_to_clock(priv->ioaddr);
@@ -104,8 +103,7 @@ static int ds1216_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int ds1216_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1216_priv *priv = platform_get_drvdata(pdev);
+ struct ds1216_priv *priv = dev_get_drvdata(dev);
struct ds1216_regs regs;
ds1216_switch_ds_to_clock(priv->ioaddr);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index a13e59edff53..e9ec4160d7f6 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -201,6 +201,7 @@ static const struct chip_desc chips[last_ds_type] = {
.century_reg = DS1307_REG_HOUR,
.century_enable_bit = DS1340_BIT_CENTURY_EN,
.century_bit = DS1340_BIT_CENTURY,
+ .do_trickle_setup = &do_trickle_setup_ds1339,
.trickle_charger_reg = 0x08,
},
[ds_1341] = {
@@ -1371,6 +1372,7 @@ static void ds1307_clks_register(struct ds1307 *ds1307)
static const struct regmap_config regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+ .max_register = 0x9,
};
static int ds1307_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index a7d5ca428d68..b8b6e51c0461 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -314,8 +314,7 @@ ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
static int
ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
if (pdata->irq <= 0)
return -EINVAL;
@@ -334,8 +333,7 @@ ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int
ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
if (pdata->irq <= 0)
return -EINVAL;
@@ -373,8 +371,7 @@ ds1511_interrupt(int irq, void *dev_id)
static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
if (pdata->irq <= 0)
return -EINVAL;
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 2441b9a2b366..34af7a802f43 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -73,8 +73,7 @@ struct rtc_plat_data {
static int ds1553_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u8 century;
@@ -98,8 +97,7 @@ static int ds1553_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int ds1553_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
unsigned int year, month, day, hour, minute, second, week;
unsigned int century;
@@ -155,8 +153,7 @@ static void ds1553_rtc_update_alarm(struct rtc_plat_data *pdata)
static int ds1553_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
if (pdata->irq <= 0)
return -EINVAL;
@@ -172,8 +169,7 @@ static int ds1553_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int ds1553_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
if (pdata->irq <= 0)
return -EINVAL;
@@ -208,8 +204,7 @@ static irqreturn_t ds1553_rtc_interrupt(int irq, void *dev_id)
static int ds1553_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
if (pdata->irq <= 0)
return -EINVAL;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 1a39829d2b40..5c0db6c8134c 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -267,8 +267,7 @@ ds1685_rtc_get_ssn(struct ds1685_priv *rtc, u8 *ssn)
static int
ds1685_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 ctrlb, century;
u8 seconds, minutes, hours, wday, mday, month, years;
@@ -317,8 +316,7 @@ ds1685_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int
ds1685_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 ctrlb, seconds, minutes, hours, wday, mday, month, years, century;
/* Fetch the time info from rtc_time. */
@@ -394,8 +392,7 @@ ds1685_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int
ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 seconds, minutes, hours, mday, ctrlb, ctrlc;
int ret;
@@ -453,8 +450,7 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int
ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 ctrlb, seconds, minutes, hours, mday;
int ret;
@@ -1119,8 +1115,7 @@ static ssize_t
ds1685_rtc_sysfs_battery_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 ctrld;
ctrld = rtc->read(rtc, RTC_CTRL_D);
@@ -1140,8 +1135,7 @@ static ssize_t
ds1685_rtc_sysfs_auxbatt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 ctrl4a;
ds1685_rtc_switch_to_bank1(rtc);
@@ -1163,8 +1157,7 @@ static ssize_t
ds1685_rtc_sysfs_serial_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ds1685_priv *rtc = platform_get_drvdata(pdev);
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 ssn[8];
ds1685_rtc_switch_to_bank1(rtc);
@@ -2044,6 +2037,26 @@ ds1685_rtc_probe(struct platform_device *pdev)
rtc->write(rtc, RTC_EXT_CTRL_4B,
(rtc->read(rtc, RTC_EXT_CTRL_4B) | RTC_CTRL_4B_KSE));
+ rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc_dev))
+ return PTR_ERR(rtc_dev);
+
+ rtc_dev->ops = &ds1685_rtc_ops;
+
+ /* Century bit is useless because leap year fails in 1900 and 2100 */
+ rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc_dev->range_max = RTC_TIMESTAMP_END_2099;
+
+ /* Maximum periodic rate is 8192Hz (0.122070ms). */
+ rtc_dev->max_user_freq = RTC_MAX_USER_FREQ;
+
+ /* See if the platform doesn't support UIE. */
+ if (pdata->uie_unsupported)
+ rtc_dev->uie_unsupported = 1;
+ rtc->uie_unsupported = pdata->uie_unsupported;
+
+ rtc->dev = rtc_dev;
+
/*
* Fetch the IRQ and setup the interrupt handler.
*
@@ -2076,32 +2089,13 @@ ds1685_rtc_probe(struct platform_device *pdev)
/* Setup complete. */
ds1685_rtc_switch_to_bank0(rtc);
- /* Register the device as an RTC. */
- rtc_dev = rtc_device_register(pdev->name, &pdev->dev,
- &ds1685_rtc_ops, THIS_MODULE);
-
- /* Success? */
- if (IS_ERR(rtc_dev))
- return PTR_ERR(rtc_dev);
-
- /* Maximum periodic rate is 8192Hz (0.122070ms). */
- rtc_dev->max_user_freq = RTC_MAX_USER_FREQ;
-
- /* See if the platform doesn't support UIE. */
- if (pdata->uie_unsupported)
- rtc_dev->uie_unsupported = 1;
- rtc->uie_unsupported = pdata->uie_unsupported;
-
- rtc->dev = rtc_dev;
-
#ifdef CONFIG_SYSFS
ret = ds1685_rtc_sysfs_register(&pdev->dev);
if (ret)
- rtc_device_unregister(rtc->dev);
+ return ret;
#endif
- /* Done! */
- return ret;
+ return rtc_register_device(rtc_dev);
}
/**
@@ -2117,8 +2111,6 @@ ds1685_rtc_remove(struct platform_device *pdev)
ds1685_rtc_sysfs_unregister(&pdev->dev);
#endif
- rtc_device_unregister(rtc->dev);
-
/* Read Ctrl B and clear PIE/AIE/UIE. */
rtc->write(rtc, RTC_CTRL_B,
(rtc->read(rtc, RTC_CTRL_B) &
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 2d781180e968..5a4c2c5e86fe 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -58,8 +58,7 @@ struct rtc_plat_data {
static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr_rtc;
u8 century;
@@ -83,8 +82,7 @@ static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int ds1742_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr_rtc;
unsigned int year, month, day, hour, minute, second, week;
unsigned int century;
@@ -154,8 +152,6 @@ static int ds1742_rtc_probe(struct platform_device *pdev)
int ret = 0;
struct nvmem_config nvmem_cfg = {
.name = "ds1742_nvram",
- .word_size = 1,
- .stride = 1,
.reg_read = ds1742_nvram_read,
.reg_write = ds1742_nvram_write,
};
diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c
index af8d6beae20c..61f798c6101f 100644
--- a/drivers/rtc/rtc-ftrtc010.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -73,8 +73,8 @@ static int ftrtc010_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct ftrtc010_rtc *rtc = dev_get_drvdata(dev);
- unsigned int days, hour, min, sec;
- unsigned long offset, time;
+ u32 days, hour, min, sec, offset;
+ timeu64_t time;
sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND);
min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE);
@@ -84,7 +84,7 @@ static int ftrtc010_rtc_read_time(struct device *dev, struct rtc_time *tm)
time = offset + days * 86400 + hour * 3600 + min * 60 + sec;
- rtc_time_to_tm(time, tm);
+ rtc_time64_to_tm(time, tm);
return 0;
}
@@ -92,13 +92,10 @@ static int ftrtc010_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int ftrtc010_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct ftrtc010_rtc *rtc = dev_get_drvdata(dev);
- unsigned int sec, min, hour, day;
- unsigned long offset, time;
+ u32 sec, min, hour, day, offset;
+ timeu64_t time;
- if (tm->tm_year >= 2148) /* EPOCH Year + 179 */
- return -EINVAL;
-
- rtc_tm_to_time(tm, &time);
+ time = rtc_tm_to_time64(tm);
sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND);
min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE);
@@ -120,6 +117,7 @@ static const struct rtc_class_ops ftrtc010_rtc_ops = {
static int ftrtc010_rtc_probe(struct platform_device *pdev)
{
+ u32 days, hour, min, sec;
struct ftrtc010_rtc *rtc;
struct device *dev = &pdev->dev;
struct resource *res;
@@ -166,14 +164,27 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
if (!rtc->rtc_base)
return -ENOMEM;
+ rtc->rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
+
+ rtc->rtc_dev->ops = &ftrtc010_rtc_ops;
+
+ sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND);
+ min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE);
+ hour = readl(rtc->rtc_base + FTRTC010_RTC_HOUR);
+ days = readl(rtc->rtc_base + FTRTC010_RTC_DAYS);
+
+ rtc->rtc_dev->range_min = (u64)days * 86400 + hour * 3600 +
+ min * 60 + sec;
+ rtc->rtc_dev->range_max = U32_MAX + rtc->rtc_dev->range_min;
+
ret = devm_request_irq(dev, rtc->rtc_irq, ftrtc010_rtc_interrupt,
IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
return ret;
- rtc->rtc_dev = rtc_device_register(pdev->name, dev,
- &ftrtc010_rtc_ops, THIS_MODULE);
- return PTR_ERR_OR_ZERO(rtc->rtc_dev);
+ return rtc_register_device(rtc->rtc_dev);
}
static int ftrtc010_rtc_remove(struct platform_device *pdev)
@@ -184,7 +195,6 @@ static int ftrtc010_rtc_remove(struct platform_device *pdev)
clk_disable_unprepare(rtc->extclk);
if (!IS_ERR(rtc->pclk))
clk_disable_unprepare(rtc->pclk);
- rtc_device_unregister(rtc->rtc_dev);
return 0;
}
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 3ba87239aacc..910e600275b9 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -294,11 +294,10 @@ static int lpc32xx_rtc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int lpc32xx_rtc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
if (rtc->irq >= 0) {
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(dev))
enable_irq_wake(rtc->irq);
else
disable_irq_wake(rtc->irq);
@@ -309,10 +308,9 @@ static int lpc32xx_rtc_suspend(struct device *dev)
static int lpc32xx_rtc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
- if (rtc->irq >= 0 && device_may_wakeup(&pdev->dev))
+ if (rtc->irq >= 0 && device_may_wakeup(dev))
disable_irq_wake(rtc->irq);
return 0;
@@ -321,8 +319,7 @@ static int lpc32xx_rtc_resume(struct device *dev)
/* Unconditionally disable the alarm */
static int lpc32xx_rtc_freeze(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
spin_lock_irq(&rtc->lock);
@@ -337,8 +334,7 @@ static int lpc32xx_rtc_freeze(struct device *dev)
static int lpc32xx_rtc_thaw(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
if (rtc->alarm_enabled) {
spin_lock_irq(&rtc->lock);
diff --git a/drivers/rtc/rtc-ls1x.c b/drivers/rtc/rtc-ls1x.c
index 045af1135e48..f4c248655edd 100644
--- a/drivers/rtc/rtc-ls1x.c
+++ b/drivers/rtc/rtc-ls1x.c
@@ -87,16 +87,17 @@
static int ls1x_rtc_read_time(struct device *dev, struct rtc_time *rtm)
{
- unsigned long v, t;
+ unsigned long v;
+ time64_t t;
v = readl(SYS_TOYREAD0);
t = readl(SYS_TOYREAD1);
memset(rtm, 0, sizeof(struct rtc_time));
- t = mktime((t & LS1X_YEAR_MASK), ls1x_get_month(v),
+ t = mktime64((t & LS1X_YEAR_MASK), ls1x_get_month(v),
ls1x_get_day(v), ls1x_get_hour(v),
ls1x_get_min(v), ls1x_get_sec(v));
- rtc_time_to_tm(t, rtm);
+ rtc_time64_to_tm(t, rtm);
return 0;
}
@@ -147,15 +148,13 @@ static int ls1x_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtcdev;
unsigned long v;
- int ret;
v = readl(SYS_COUNTER_CNTRL);
if (!(v & RTC_CNTR_OK)) {
dev_err(&pdev->dev, "rtc counters not working\n");
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
- ret = -ETIMEDOUT;
+
/* set to 1 HZ if needed */
if (readl(SYS_TOYTRIM) != 32767) {
v = 0x100000;
@@ -164,7 +163,7 @@ static int ls1x_rtc_probe(struct platform_device *pdev)
if (!v) {
dev_err(&pdev->dev, "time out\n");
- goto err;
+ return -ETIMEDOUT;
}
writel(32767, SYS_TOYTRIM);
}
@@ -172,17 +171,16 @@ static int ls1x_rtc_probe(struct platform_device *pdev)
while (readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_TTS)
usleep_range(1000, 3000);
- rtcdev = devm_rtc_device_register(&pdev->dev, "ls1x-rtc",
- &ls1x_rtc_ops , THIS_MODULE);
- if (IS_ERR(rtcdev)) {
- ret = PTR_ERR(rtcdev);
- goto err;
- }
+ rtcdev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtcdev))
+ return PTR_ERR(rtcdev);
platform_set_drvdata(pdev, rtcdev);
- return 0;
-err:
- return ret;
+ rtcdev->ops = &ls1x_rtc_ops;
+ rtcdev->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ rtcdev->range_max = RTC_TIMESTAMP_END_2099;
+
+ return rtc_register_device(rtcdev);
}
static struct platform_driver ls1x_rtc_driver = {
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 216fac62c888..1053a406b3aa 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -47,8 +47,7 @@ struct m48t59_private {
static void
m48t59_mem_writeb(struct device *dev, u32 ofs, u8 val)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct m48t59_private *m48t59 = dev_get_drvdata(dev);
writeb(val, m48t59->ioaddr+ofs);
}
@@ -56,8 +55,7 @@ m48t59_mem_writeb(struct device *dev, u32 ofs, u8 val)
static u8
m48t59_mem_readb(struct device *dev, u32 ofs)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct m48t59_private *m48t59 = dev_get_drvdata(dev);
return readb(m48t59->ioaddr+ofs);
}
@@ -67,9 +65,8 @@ m48t59_mem_readb(struct device *dev, u32 ofs)
*/
static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
- struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct m48t59_plat_data *pdata = dev_get_platdata(dev);
+ struct m48t59_private *m48t59 = dev_get_drvdata(dev);
unsigned long flags;
u8 val;
@@ -110,9 +107,8 @@ static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
- struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct m48t59_plat_data *pdata = dev_get_platdata(dev);
+ struct m48t59_private *m48t59 = dev_get_drvdata(dev);
unsigned long flags;
u8 val = 0;
int year = tm->tm_year;
@@ -157,9 +153,8 @@ static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
*/
static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
- struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct m48t59_plat_data *pdata = dev_get_platdata(dev);
+ struct m48t59_private *m48t59 = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
unsigned long flags;
u8 val;
@@ -204,9 +199,8 @@ static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
*/
static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
- struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct m48t59_plat_data *pdata = dev_get_platdata(dev);
+ struct m48t59_private *m48t59 = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
u8 mday, hour, min, sec;
unsigned long flags;
@@ -265,9 +259,8 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
*/
static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
- struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct m48t59_plat_data *pdata = dev_get_platdata(dev);
+ struct m48t59_private *m48t59 = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&m48t59->lock, flags);
@@ -282,9 +275,8 @@ static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
- struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct m48t59_plat_data *pdata = dev_get_platdata(dev);
+ struct m48t59_private *m48t59 = dev_get_drvdata(dev);
unsigned long flags;
u8 val;
@@ -303,9 +295,8 @@ static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
{
struct device *dev = (struct device *)dev_id;
- struct platform_device *pdev = to_platform_device(dev);
- struct m48t59_plat_data *pdata = dev_get_platdata(&pdev->dev);
- struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct m48t59_plat_data *pdata = dev_get_platdata(dev);
+ struct m48t59_private *m48t59 = dev_get_drvdata(dev);
u8 event;
spin_lock(&m48t59->lock);
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index fcb9de5218b2..097a4d4e2aba 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -45,7 +45,6 @@ struct mrst_rtc {
struct rtc_device *rtc;
struct device *dev;
int irq;
- struct resource *iomem;
u8 enabled_wake;
u8 suspend_ctrl;
@@ -329,24 +328,22 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
if (!iomem)
return -ENODEV;
- iomem = request_mem_region(iomem->start, resource_size(iomem),
- driver_name);
+ iomem = devm_request_mem_region(dev, iomem->start, resource_size(iomem),
+ driver_name);
if (!iomem) {
dev_dbg(dev, "i/o mem already in use.\n");
return -EBUSY;
}
mrst_rtc.irq = rtc_irq;
- mrst_rtc.iomem = iomem;
mrst_rtc.dev = dev;
dev_set_drvdata(dev, &mrst_rtc);
- mrst_rtc.rtc = rtc_device_register(driver_name, dev,
- &mrst_rtc_ops, THIS_MODULE);
- if (IS_ERR(mrst_rtc.rtc)) {
- retval = PTR_ERR(mrst_rtc.rtc);
- goto cleanup0;
- }
+ mrst_rtc.rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(mrst_rtc.rtc))
+ return PTR_ERR(mrst_rtc.rtc);
+
+ mrst_rtc.rtc->ops = &mrst_rtc_ops;
rename_region(iomem, dev_name(&mrst_rtc.rtc->dev));
@@ -359,23 +356,27 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
dev_dbg(dev, "TODO: support more than 24-hr BCD mode\n");
if (rtc_irq) {
- retval = request_irq(rtc_irq, mrst_rtc_irq,
- 0, dev_name(&mrst_rtc.rtc->dev),
- mrst_rtc.rtc);
+ retval = devm_request_irq(dev, rtc_irq, mrst_rtc_irq,
+ 0, dev_name(&mrst_rtc.rtc->dev),
+ mrst_rtc.rtc);
if (retval < 0) {
dev_dbg(dev, "IRQ %d is already in use, err %d\n",
rtc_irq, retval);
- goto cleanup1;
+ goto cleanup0;
}
}
+
+ retval = rtc_register_device(mrst_rtc.rtc);
+ if (retval) {
+ retval = PTR_ERR(mrst_rtc.rtc);
+ goto cleanup0;
+ }
+
dev_dbg(dev, "initialised\n");
return 0;
-cleanup1:
- rtc_device_unregister(mrst_rtc.rtc);
cleanup0:
mrst_rtc.dev = NULL;
- release_mem_region(iomem->start, resource_size(iomem));
dev_err(dev, "rtc-mrst: unable to initialise\n");
return retval;
}
@@ -390,20 +391,10 @@ static void rtc_mrst_do_shutdown(void)
static void rtc_mrst_do_remove(struct device *dev)
{
struct mrst_rtc *mrst = dev_get_drvdata(dev);
- struct resource *iomem;
rtc_mrst_do_shutdown();
- if (mrst->irq)
- free_irq(mrst->irq, mrst->rtc);
-
- rtc_device_unregister(mrst->rtc);
mrst->rtc = NULL;
-
- iomem = mrst->iomem;
- release_mem_region(iomem->start, resource_size(iomem));
- mrst->iomem = NULL;
-
mrst->dev = NULL;
}
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index bc52dbb0c0e2..4b198b3778d3 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -176,8 +176,7 @@ static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
if (pdata->irq < 0)
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index bce427d202ee..878c6ee82901 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -1,13 +1,6 @@
-/*
- * Copyright 2004-2008 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2004-2008 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/io.h>
#include <linux/rtc.h>
@@ -109,8 +102,7 @@ static inline int is_imx1_rtc(struct rtc_plat_data *data)
*/
static time64_t get_alarm_or_time(struct device *dev, int time_alarm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 day = 0, hr = 0, min = 0, sec = 0, hr_min = 0;
@@ -139,8 +131,7 @@ static time64_t get_alarm_or_time(struct device *dev, int time_alarm)
static void set_alarm_or_time(struct device *dev, int time_alarm, time64_t time)
{
u32 tod, day, hr, min, sec, temp;
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
day = div_s64_rem(time, 86400, &tod);
@@ -176,8 +167,7 @@ static void set_alarm_or_time(struct device *dev, int time_alarm, time64_t time)
static void rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
{
time64_t time;
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
time = rtc_tm_to_time64(alrm);
@@ -190,8 +180,7 @@ static void rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
unsigned int enabled)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 reg;
@@ -266,8 +255,7 @@ static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm)
*/
static int mxc_rtc_set_mmss(struct device *dev, time64_t time)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
/*
* TTC_DAYR register is 9-bit in MX1 SoC, save time and day of year only
@@ -295,8 +283,7 @@ static int mxc_rtc_set_mmss(struct device *dev, time64_t time)
*/
static int mxc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
rtc_time64_to_tm(get_alarm_or_time(dev, MXC_RTC_ALARM), &alrm->time);
@@ -310,8 +297,7 @@ static int mxc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
*/
static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
rtc_update_alarm(dev, &alrm->time);
diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
index 9e14efb990b2..c75f26dc8fcc 100644
--- a/drivers/rtc/rtc-mxc_v2.c
+++ b/drivers/rtc/rtc-mxc_v2.c
@@ -165,11 +165,6 @@ static int mxc_rtc_set_time(struct device *dev, struct rtc_time *tm)
time64_t time = rtc_tm_to_time64(tm);
int ret;
- if (time > U32_MAX) {
- dev_err(dev, "RTC exceeded by %llus\n", time - U32_MAX);
- return -EINVAL;
- }
-
ret = mxc_rtc_lock(pdata);
if (ret)
return ret;
@@ -198,7 +193,7 @@ static int mxc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (ret)
return ret;
- rtc_time_to_tm(readl(ioaddr + SRTC_LPSAR), &alrm->time);
+ rtc_time64_to_tm(readl(ioaddr + SRTC_LPSAR), &alrm->time);
alrm->pending = !!(readl(ioaddr + SRTC_LPSR) & SRTC_LPSR_ALP);
return mxc_rtc_unlock(pdata);
}
@@ -248,11 +243,6 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (ret)
return ret;
- if (time > U32_MAX) {
- dev_err(dev, "Hopefully I am out of service by then :-(\n");
- return -EINVAL;
- }
-
writel((u32)time, pdata->ioaddr + SRTC_LPSAR);
/* clear alarm interrupt status bit */
@@ -343,6 +333,13 @@ static int mxc_rtc_probe(struct platform_device *pdev)
return ret;
}
+ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(pdata->rtc))
+ return PTR_ERR(pdata->rtc);
+
+ pdata->rtc->ops = &mxc_rtc_ops;
+ pdata->rtc->range_max = U32_MAX;
+
clk_disable(pdata->clk);
platform_set_drvdata(pdev, pdata);
ret =
@@ -354,15 +351,11 @@ static int mxc_rtc_probe(struct platform_device *pdev)
return ret;
}
- pdata->rtc =
- devm_rtc_device_register(&pdev->dev, pdev->name, &mxc_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(pdata->rtc)) {
+ ret = rtc_register_device(pdata->rtc);
+ if (ret < 0)
clk_unprepare(pdata->clk);
- return PTR_ERR(pdata->rtc);
- }
- return 0;
+ return ret;
}
static int mxc_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
index c05f524ba9af..f176cb9d0dbc 100644
--- a/drivers/rtc/rtc-pcap.c
+++ b/drivers/rtc/rtc-pcap.c
@@ -43,8 +43,7 @@ static irqreturn_t pcap_rtc_irq(int irq, void *_pcap_rtc)
static int pcap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+ struct pcap_rtc *pcap_rtc = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
unsigned long secs;
u32 tod; /* time of day, seconds since midnight */
@@ -63,8 +62,7 @@ static int pcap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int pcap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+ struct pcap_rtc *pcap_rtc = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
unsigned long secs;
u32 tod, days;
@@ -82,8 +80,7 @@ static int pcap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int pcap_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+ struct pcap_rtc *pcap_rtc = dev_get_drvdata(dev);
unsigned long secs;
u32 tod, days;
@@ -100,8 +97,7 @@ static int pcap_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int pcap_rtc_set_mmss(struct device *dev, unsigned long secs)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+ struct pcap_rtc *pcap_rtc = dev_get_drvdata(dev);
u32 tod, days;
tod = secs % SEC_PER_DAY;
@@ -115,8 +111,7 @@ static int pcap_rtc_set_mmss(struct device *dev, unsigned long secs)
static int pcap_rtc_irq_enable(struct device *dev, int pirq, unsigned int en)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+ struct pcap_rtc *pcap_rtc = dev_get_drvdata(dev);
if (en)
enable_irq(pcap_to_irq(pcap_rtc->pcap, pirq));
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index 47304f5664d8..e1887b86fdc7 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -363,7 +363,7 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
sa1100_rtc->rtar = pxa_rtc->base + 0x4;
sa1100_rtc->rttr = pxa_rtc->base + 0xc;
ret = sa1100_rtc_init(pdev, sa1100_rtc);
- if (!ret) {
+ if (ret) {
dev_err(dev, "Unable to init SA1100 RTC sub-device\n");
return ret;
}
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index 32caadf912ca..eac882169744 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
+#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/log2.h>
@@ -51,52 +52,20 @@
#define RX8581_CTRL_RESET 0x01 /* RESET bit */
struct rx8581 {
- struct i2c_client *client;
+ struct regmap *regmap;
struct rtc_device *rtc;
- s32 (*read_block_data)(const struct i2c_client *client, u8 command,
- u8 length, u8 *values);
- s32 (*write_block_data)(const struct i2c_client *client, u8 command,
- u8 length, const u8 *values);
};
-static struct i2c_driver rx8581_driver;
-
-static int rx8581_read_block_data(const struct i2c_client *client, u8 command,
- u8 length, u8 *values)
-{
- s32 i, data;
-
- for (i = 0; i < length; i++) {
- data = i2c_smbus_read_byte_data(client, command + i);
- if (data < 0)
- return data;
- values[i] = data;
- }
- return i;
-}
-
-static int rx8581_write_block_data(const struct i2c_client *client, u8 command,
- u8 length, const u8 *values)
-{
- s32 i, ret;
-
- for (i = 0; i < length; i++) {
- ret = i2c_smbus_write_byte_data(client, command + i,
- values[i]);
- if (ret < 0)
- return ret;
- }
- return length;
-}
-
/*
* In the routines that deal directly with the rx8581 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
*/
-static int rx8581_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+static int rx8581_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
+ struct i2c_client *client = to_i2c_client(dev);
unsigned char date[7];
- int data, err;
+ unsigned int data;
+ int err;
struct rx8581 *rx8581 = i2c_get_clientdata(client);
/* First we ensure that the "update flag" is not set, we read the
@@ -104,45 +73,38 @@ static int rx8581_get_datetime(struct i2c_client *client, struct rtc_time *tm)
* has been set, we know that the time has changed during the read so
* we repeat the whole process again.
*/
- data = i2c_smbus_read_byte_data(client, RX8581_REG_FLAG);
- if (data < 0) {
- dev_err(&client->dev, "Unable to read device flags\n");
- return -EIO;
+ err = regmap_read(rx8581->regmap, RX8581_REG_FLAG, &data);
+ if (err < 0)
+ return err;
+
+ if (data & RX8581_FLAG_VLF) {
+ dev_warn(dev,
+ "low voltage detected, date/time is not reliable.\n");
+ return -EINVAL;
}
do {
/* If update flag set, clear it */
if (data & RX8581_FLAG_UF) {
- err = i2c_smbus_write_byte_data(client,
- RX8581_REG_FLAG, (data & ~RX8581_FLAG_UF));
- if (err != 0) {
- dev_err(&client->dev, "Unable to write device flags\n");
- return -EIO;
- }
+ err = regmap_write(rx8581->regmap, RX8581_REG_FLAG,
+ data & ~RX8581_FLAG_UF);
+ if (err < 0)
+ return err;
}
/* Now read time and date */
- err = rx8581->read_block_data(client, RX8581_REG_SC,
- 7, date);
- if (err < 0) {
- dev_err(&client->dev, "Unable to read date\n");
- return -EIO;
- }
+ err = regmap_bulk_read(rx8581->regmap, RX8581_REG_SC, date,
+ sizeof(date));
+ if (err < 0)
+ return err;
/* Check flag register */
- data = i2c_smbus_read_byte_data(client, RX8581_REG_FLAG);
- if (data < 0) {
- dev_err(&client->dev, "Unable to read device flags\n");
- return -EIO;
- }
+ err = regmap_read(rx8581->regmap, RX8581_REG_FLAG, &data);
+ if (err < 0)
+ return err;
} while (data & RX8581_FLAG_UF);
- if (data & RX8581_FLAG_VLF)
- dev_info(&client->dev,
- "low voltage detected, date/time is not reliable.\n");
-
- dev_dbg(&client->dev,
- "%s: raw data is sec=%02x, min=%02x, hr=%02x, "
+ dev_dbg(dev, "%s: raw data is sec=%02x, min=%02x, hr=%02x, "
"wday=%02x, mday=%02x, mon=%02x, year=%02x\n",
__func__,
date[0], date[1], date[2], date[3], date[4], date[5], date[6]);
@@ -153,12 +115,9 @@ static int rx8581_get_datetime(struct i2c_client *client, struct rtc_time *tm)
tm->tm_wday = ilog2(date[RX8581_REG_DW] & 0x7F);
tm->tm_mday = bcd2bin(date[RX8581_REG_DM] & 0x3F);
tm->tm_mon = bcd2bin(date[RX8581_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
- tm->tm_year = bcd2bin(date[RX8581_REG_YR]);
- if (tm->tm_year < 70)
- tm->tm_year += 100; /* assume we are in 1970...2069 */
-
+ tm->tm_year = bcd2bin(date[RX8581_REG_YR]) + 100;
- dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
+ dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
@@ -167,13 +126,14 @@ static int rx8581_get_datetime(struct i2c_client *client, struct rtc_time *tm)
return 0;
}
-static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
+static int rx8581_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- int data, err;
+ struct i2c_client *client = to_i2c_client(dev);
+ int err;
unsigned char buf[7];
struct rx8581 *rx8581 = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
+ dev_dbg(dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
@@ -190,69 +150,30 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
buf[RX8581_REG_MO] = bin2bcd(tm->tm_mon + 1);
/* year and century */
- buf[RX8581_REG_YR] = bin2bcd(tm->tm_year % 100);
+ buf[RX8581_REG_YR] = bin2bcd(tm->tm_year - 100);
buf[RX8581_REG_DW] = (0x1 << tm->tm_wday);
/* Stop the clock */
- data = i2c_smbus_read_byte_data(client, RX8581_REG_CTRL);
- if (data < 0) {
- dev_err(&client->dev, "Unable to read control register\n");
- return -EIO;
- }
-
- err = i2c_smbus_write_byte_data(client, RX8581_REG_CTRL,
- (data | RX8581_CTRL_STOP));
- if (err < 0) {
- dev_err(&client->dev, "Unable to write control register\n");
- return -EIO;
- }
+ err = regmap_update_bits(rx8581->regmap, RX8581_REG_CTRL,
+ RX8581_CTRL_STOP, RX8581_CTRL_STOP);
+ if (err < 0)
+ return err;
/* write register's data */
- err = rx8581->write_block_data(client, RX8581_REG_SC, 7, buf);
- if (err < 0) {
- dev_err(&client->dev, "Unable to write to date registers\n");
- return -EIO;
- }
+ err = regmap_bulk_write(rx8581->regmap, RX8581_REG_SC,
+ buf, sizeof(buf));
+ if (err < 0)
+ return err;
/* get VLF and clear it */
- data = i2c_smbus_read_byte_data(client, RX8581_REG_FLAG);
- if (data < 0) {
- dev_err(&client->dev, "Unable to read flag register\n");
- return -EIO;
- }
-
- err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG,
- (data & ~(RX8581_FLAG_VLF)));
- if (err != 0) {
- dev_err(&client->dev, "Unable to write flag register\n");
- return -EIO;
- }
+ err = regmap_update_bits(rx8581->regmap, RX8581_REG_FLAG,
+ RX8581_FLAG_VLF, 0);
+ if (err < 0)
+ return err;
/* Restart the clock */
- data = i2c_smbus_read_byte_data(client, RX8581_REG_CTRL);
- if (data < 0) {
- dev_err(&client->dev, "Unable to read control register\n");
- return -EIO;
- }
-
- err = i2c_smbus_write_byte_data(client, RX8581_REG_CTRL,
- (data & ~(RX8581_CTRL_STOP)));
- if (err != 0) {
- dev_err(&client->dev, "Unable to write control register\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static int rx8581_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- return rx8581_get_datetime(to_i2c_client(dev), tm);
-}
-
-static int rx8581_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- return rx8581_set_datetime(to_i2c_client(dev), tm);
+ return regmap_update_bits(rx8581->regmap, RX8581_REG_CTRL,
+ RX8581_CTRL_STOP, 0);
}
static const struct rtc_class_ops rx8581_rtc_ops = {
@@ -264,38 +185,35 @@ static int rx8581_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct rx8581 *rx8581;
+ static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xf,
+ };
dev_dbg(&client->dev, "%s\n", __func__);
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)
- && !i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
- return -EIO;
-
rx8581 = devm_kzalloc(&client->dev, sizeof(struct rx8581), GFP_KERNEL);
if (!rx8581)
return -ENOMEM;
i2c_set_clientdata(client, rx8581);
- rx8581->client = client;
-
- if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
- rx8581->read_block_data = i2c_smbus_read_i2c_block_data;
- rx8581->write_block_data = i2c_smbus_write_i2c_block_data;
- } else {
- rx8581->read_block_data = rx8581_read_block_data;
- rx8581->write_block_data = rx8581_write_block_data;
- }
- rx8581->rtc = devm_rtc_device_register(&client->dev,
- rx8581_driver.driver.name, &rx8581_rtc_ops, THIS_MODULE);
+ rx8581->regmap = devm_regmap_init_i2c(client, &config);
+ if (IS_ERR(rx8581->regmap))
+ return PTR_ERR(rx8581->regmap);
- if (IS_ERR(rx8581->rtc)) {
- dev_err(&client->dev,
- "unable to register the class device\n");
+ rx8581->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rx8581->rtc))
return PTR_ERR(rx8581->rtc);
- }
- return 0;
+ rx8581->rtc->ops = &rx8581_rtc_ops;
+ rx8581->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rx8581->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rx8581->rtc->start_secs = 0;
+ rx8581->rtc->set_start_time = true;
+
+ return rtc_register_device(rx8581->rtc);
}
static const struct i2c_device_id rx8581_id[] = {
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
index 00d87d138984..deea5c3726ad 100644
--- a/drivers/rtc/rtc-sc27xx.c
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -35,6 +35,8 @@
#define SPRD_RTC_DAY_ALM_VALUE 0x4c
#define SPRD_RTC_SPG_VALUE 0x50
#define SPRD_RTC_SPG_UPD 0x54
+#define SPRD_RTC_PWR_CTRL 0x58
+#define SPRD_RTC_PWR_STS 0x5c
#define SPRD_RTC_SEC_AUXALM_UPD 0x60
#define SPRD_RTC_MIN_AUXALM_UPD 0x64
#define SPRD_RTC_HOUR_AUXALM_UPD 0x68
@@ -86,7 +88,13 @@
/* SPG values definition for SPRD_RTC_SPG_UPD register */
#define SPRD_RTC_POWEROFF_ALM_FLAG BIT(8)
-#define SPRD_RTC_POWER_RESET_FLAG BIT(9)
+
+/* power control/status definition */
+#define SPRD_RTC_POWER_RESET_VALUE 0x96
+#define SPRD_RTC_POWER_STS_CLEAR GENMASK(7, 0)
+#define SPRD_RTC_POWER_STS_SHIFT 8
+#define SPRD_RTC_POWER_STS_VALID \
+ (~SPRD_RTC_POWER_RESET_VALUE << SPRD_RTC_POWER_STS_SHIFT)
/* timeout of synchronizing time and alarm registers (us) */
#define SPRD_RTC_POLL_TIMEOUT 200000
@@ -383,7 +391,6 @@ static int sprd_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct sprd_rtc *rtc = dev_get_drvdata(dev);
time64_t secs = rtc_tm_to_time64(tm);
- u32 val;
int ret;
ret = sprd_rtc_set_secs(rtc, SPRD_RTC_TIME, secs);
@@ -391,27 +398,20 @@ static int sprd_rtc_set_time(struct device *dev, struct rtc_time *tm)
return ret;
if (!rtc->valid) {
- /*
- * Set SPRD_RTC_POWER_RESET_FLAG to indicate now RTC has valid
- * time values.
- */
- ret = regmap_update_bits(rtc->regmap,
- rtc->base + SPRD_RTC_SPG_UPD,
- SPRD_RTC_POWER_RESET_FLAG,
- SPRD_RTC_POWER_RESET_FLAG);
+ /* Clear RTC power status firstly */
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_PWR_CTRL,
+ SPRD_RTC_POWER_STS_CLEAR);
if (ret)
return ret;
- ret = regmap_read_poll_timeout(rtc->regmap,
- rtc->base + SPRD_RTC_INT_RAW_STS,
- val, (val & SPRD_RTC_SPG_UPD_EN),
- SPRD_RTC_POLL_DELAY_US,
- SPRD_RTC_POLL_TIMEOUT);
- if (ret) {
- dev_err(rtc->dev, "failed to update SPG value:%d\n",
- ret);
+ /*
+ * Set RTC power status to indicate now RTC has valid time
+ * values.
+ */
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_PWR_CTRL,
+ SPRD_RTC_POWER_STS_VALID);
+ if (ret)
return ret;
- }
rtc->valid = true;
}
@@ -562,15 +562,16 @@ static int sprd_rtc_check_power_down(struct sprd_rtc *rtc)
u32 val;
int ret;
- ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_PWR_STS, &val);
if (ret)
return ret;
/*
- * If the SPRD_RTC_POWER_RESET_FLAG was not set, which means the RTC has
- * been powered down, so the RTC time values are invalid.
+ * If the RTC power status value is SPRD_RTC_POWER_RESET_VALUE, which
+ * means the RTC has been powered down, so the RTC time values are
+ * invalid.
*/
- rtc->valid = (val & SPRD_RTC_POWER_RESET_FLAG) ? true : false;
+ rtc->valid = val == SPRD_RTC_POWER_RESET_VALUE ? false : true;
return 0;
}
@@ -600,6 +601,10 @@ static int sprd_rtc_probe(struct platform_device *pdev)
return rtc->irq;
}
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtc))
+ return PTR_ERR(rtc->rtc);
+
rtc->dev = &pdev->dev;
platform_set_drvdata(pdev, rtc);
@@ -626,10 +631,14 @@ static int sprd_rtc_probe(struct platform_device *pdev)
return ret;
}
- rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &sprd_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc->rtc))
- return PTR_ERR(rtc->rtc);
+ rtc->rtc->ops = &sprd_rtc_ops;
+ rtc->rtc->range_min = 0;
+ rtc->rtc->range_max = 5662310399LL;
+ ret = rtc_register_device(rtc->rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register rtc device\n");
+ return ret;
+ }
device_init_wakeup(&pdev->dev, 1);
return 0;
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 4e8ab370ce63..4f98543d1ea5 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -359,8 +359,7 @@ static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_rtc *rtc = platform_get_drvdata(pdev);
+ struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int sec128, sec2, yr, yr100, cf_bit;
do {
@@ -419,8 +418,7 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_rtc *rtc = platform_get_drvdata(pdev);
+ struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int tmp;
int year;
@@ -475,8 +473,7 @@ static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
static int sh_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_rtc *rtc = platform_get_drvdata(pdev);
+ struct sh_rtc *rtc = dev_get_drvdata(dev);
struct rtc_time *tm = &wkalrm->time;
spin_lock_irq(&rtc->lock);
@@ -509,8 +506,7 @@ static inline void sh_rtc_write_alarm_value(struct sh_rtc *rtc,
static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_rtc *rtc = platform_get_drvdata(pdev);
+ struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int rcr1;
struct rtc_time *tm = &wkalrm->time;
int mon;
@@ -723,8 +719,7 @@ static int __exit sh_rtc_remove(struct platform_device *pdev)
static void sh_rtc_set_irq_wake(struct device *dev, int enabled)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_rtc *rtc = platform_get_drvdata(pdev);
+ struct sh_rtc *rtc = dev_get_drvdata(dev);
irq_set_irq_wake(rtc->periodic_irq, enabled);
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 9af591d5223c..8a75cc3af6e7 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -1,13 +1,6 @@
-/*
- * Copyright (C) 2011-2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2011-2012 Freescale Semiconductor, Inc.
#include <linux/init.h>
#include <linux/io.h>
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index d5222667f892..bee75ca7ff79 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -212,6 +212,10 @@ static int st_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
+ rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
+
spin_lock_init(&rtc->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -253,26 +257,19 @@ static int st_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- rtc->rtc_dev = rtc_device_register("st-lpc-rtc", &pdev->dev,
- &st_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc->rtc_dev)) {
+ rtc->rtc_dev->ops = &st_rtc_ops;
+ rtc->rtc_dev->range_max = U64_MAX;
+ do_div(rtc->rtc_dev->range_max, rtc->clkrate);
+
+ ret = rtc_register_device(rtc->rtc_dev);
+ if (ret) {
clk_disable_unprepare(rtc->clk);
- return PTR_ERR(rtc->rtc_dev);
+ return ret;
}
return 0;
}
-static int st_rtc_remove(struct platform_device *pdev)
-{
- struct st_rtc *rtc = platform_get_drvdata(pdev);
-
- if (likely(rtc->rtc_dev))
- rtc_device_unregister(rtc->rtc_dev);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int st_rtc_suspend(struct device *dev)
{
@@ -325,7 +322,6 @@ static struct platform_driver st_rtc_platform_driver = {
.of_match_table = st_rtc_match,
},
.probe = st_rtc_probe,
- .remove = st_rtc_remove,
};
module_platform_driver(st_rtc_platform_driver);
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index e70b78d17a98..fccbecbb2c98 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -74,8 +74,7 @@ struct rtc_plat_data {
static int stk17ta8_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u8 flags;
@@ -97,8 +96,7 @@ static int stk17ta8_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int stk17ta8_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
unsigned int year, month, day, hour, minute, second, week;
unsigned int century;
@@ -163,8 +161,7 @@ static void stk17ta8_rtc_update_alarm(struct rtc_plat_data *pdata)
static int stk17ta8_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
if (pdata->irq <= 0)
return -EINVAL;
@@ -180,8 +177,7 @@ static int stk17ta8_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int stk17ta8_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
if (pdata->irq <= 0)
return -EINVAL;
@@ -217,8 +213,7 @@ static irqreturn_t stk17ta8_rtc_interrupt(int irq, void *dev_id)
static int stk17ta8_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
if (pdata->irq <= 0)
return -EINVAL;
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index f25dabe8fd02..c5908cfea234 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) STMicroelectronics SA 2017
- * Author: Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
+ * Copyright (C) STMicroelectronics 2017
+ * Author: Amelie Delaunay <amelie.delaunay@st.com>
*/
#include <linux/bcd.h>
@@ -11,20 +11,12 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#define DRIVER_NAME "stm32_rtc"
-/* STM32 RTC registers */
-#define STM32_RTC_TR 0x00
-#define STM32_RTC_DR 0x04
-#define STM32_RTC_CR 0x08
-#define STM32_RTC_ISR 0x0C
-#define STM32_RTC_PRER 0x10
-#define STM32_RTC_ALRMAR 0x1C
-#define STM32_RTC_WPR 0x24
-
/* STM32_RTC_TR bit fields */
#define STM32_RTC_TR_SEC_SHIFT 0
#define STM32_RTC_TR_SEC GENMASK(6, 0)
@@ -48,7 +40,7 @@
#define STM32_RTC_CR_ALRAE BIT(8)
#define STM32_RTC_CR_ALRAIE BIT(12)
-/* STM32_RTC_ISR bit fields */
+/* STM32_RTC_ISR/STM32_RTC_ICSR bit fields */
#define STM32_RTC_ISR_ALRAWF BIT(0)
#define STM32_RTC_ISR_INITS BIT(4)
#define STM32_RTC_ISR_RSF BIT(5)
@@ -80,52 +72,87 @@
#define STM32_RTC_ALRMXR_WDAY GENMASK(27, 24)
#define STM32_RTC_ALRMXR_DATE_MASK BIT(31)
+/* STM32_RTC_SR/_SCR bit fields */
+#define STM32_RTC_SR_ALRA BIT(0)
+
+/* STM32_RTC_VERR bit fields */
+#define STM32_RTC_VERR_MINREV_SHIFT 0
+#define STM32_RTC_VERR_MINREV GENMASK(3, 0)
+#define STM32_RTC_VERR_MAJREV_SHIFT 4
+#define STM32_RTC_VERR_MAJREV GENMASK(7, 4)
+
/* STM32_RTC_WPR key constants */
#define RTC_WPR_1ST_KEY 0xCA
#define RTC_WPR_2ND_KEY 0x53
#define RTC_WPR_WRONG_KEY 0xFF
-/*
- * RTC registers are protected against parasitic write access.
- * PWR_CR_DBP bit must be set to enable write access to RTC registers.
- */
-/* STM32_PWR_CR */
-#define PWR_CR 0x00
-/* STM32_PWR_CR bit field */
-#define PWR_CR_DBP BIT(8)
+/* Max STM32 RTC register offset is 0x3FC */
+#define UNDEF_REG 0xFFFF
+
+struct stm32_rtc;
+
+struct stm32_rtc_registers {
+ u16 tr;
+ u16 dr;
+ u16 cr;
+ u16 isr;
+ u16 prer;
+ u16 alrmar;
+ u16 wpr;
+ u16 sr;
+ u16 scr;
+ u16 verr;
+};
+
+struct stm32_rtc_events {
+ u32 alra;
+};
struct stm32_rtc_data {
+ const struct stm32_rtc_registers regs;
+ const struct stm32_rtc_events events;
+ void (*clear_events)(struct stm32_rtc *rtc, unsigned int flags);
bool has_pclk;
+ bool need_dbp;
+ bool has_wakeirq;
};
struct stm32_rtc {
struct rtc_device *rtc_dev;
void __iomem *base;
struct regmap *dbp;
- struct stm32_rtc_data *data;
+ unsigned int dbp_reg;
+ unsigned int dbp_mask;
struct clk *pclk;
struct clk *rtc_ck;
+ const struct stm32_rtc_data *data;
int irq_alarm;
+ int wakeirq_alarm;
};
static void stm32_rtc_wpr_unlock(struct stm32_rtc *rtc)
{
- writel_relaxed(RTC_WPR_1ST_KEY, rtc->base + STM32_RTC_WPR);
- writel_relaxed(RTC_WPR_2ND_KEY, rtc->base + STM32_RTC_WPR);
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
+
+ writel_relaxed(RTC_WPR_1ST_KEY, rtc->base + regs->wpr);
+ writel_relaxed(RTC_WPR_2ND_KEY, rtc->base + regs->wpr);
}
static void stm32_rtc_wpr_lock(struct stm32_rtc *rtc)
{
- writel_relaxed(RTC_WPR_WRONG_KEY, rtc->base + STM32_RTC_WPR);
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
+
+ writel_relaxed(RTC_WPR_WRONG_KEY, rtc->base + regs->wpr);
}
static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc)
{
- unsigned int isr = readl_relaxed(rtc->base + STM32_RTC_ISR);
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
+ unsigned int isr = readl_relaxed(rtc->base + regs->isr);
if (!(isr & STM32_RTC_ISR_INITF)) {
isr |= STM32_RTC_ISR_INIT;
- writel_relaxed(isr, rtc->base + STM32_RTC_ISR);
+ writel_relaxed(isr, rtc->base + regs->isr);
/*
* It takes around 2 rtc_ck clock cycles to enter in
@@ -134,7 +161,7 @@ static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc)
* 1MHz, we poll every 10 us with a timeout of 100ms.
*/
return readl_relaxed_poll_timeout_atomic(
- rtc->base + STM32_RTC_ISR,
+ rtc->base + regs->isr,
isr, (isr & STM32_RTC_ISR_INITF),
10, 100000);
}
@@ -144,40 +171,50 @@ static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc)
static void stm32_rtc_exit_init_mode(struct stm32_rtc *rtc)
{
- unsigned int isr = readl_relaxed(rtc->base + STM32_RTC_ISR);
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
+ unsigned int isr = readl_relaxed(rtc->base + regs->isr);
isr &= ~STM32_RTC_ISR_INIT;
- writel_relaxed(isr, rtc->base + STM32_RTC_ISR);
+ writel_relaxed(isr, rtc->base + regs->isr);
}
static int stm32_rtc_wait_sync(struct stm32_rtc *rtc)
{
- unsigned int isr = readl_relaxed(rtc->base + STM32_RTC_ISR);
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
+ unsigned int isr = readl_relaxed(rtc->base + regs->isr);
isr &= ~STM32_RTC_ISR_RSF;
- writel_relaxed(isr, rtc->base + STM32_RTC_ISR);
+ writel_relaxed(isr, rtc->base + regs->isr);
/*
* Wait for RSF to be set to ensure the calendar registers are
* synchronised, it takes around 2 rtc_ck clock cycles
*/
- return readl_relaxed_poll_timeout_atomic(rtc->base + STM32_RTC_ISR,
+ return readl_relaxed_poll_timeout_atomic(rtc->base + regs->isr,
isr,
(isr & STM32_RTC_ISR_RSF),
10, 100000);
}
+static void stm32_rtc_clear_event_flags(struct stm32_rtc *rtc,
+ unsigned int flags)
+{
+ rtc->data->clear_events(rtc, flags);
+}
+
static irqreturn_t stm32_rtc_alarm_irq(int irq, void *dev_id)
{
struct stm32_rtc *rtc = (struct stm32_rtc *)dev_id;
- unsigned int isr, cr;
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
+ const struct stm32_rtc_events *evts = &rtc->data->events;
+ unsigned int status, cr;
mutex_lock(&rtc->rtc_dev->ops_lock);
- isr = readl_relaxed(rtc->base + STM32_RTC_ISR);
- cr = readl_relaxed(rtc->base + STM32_RTC_CR);
+ status = readl_relaxed(rtc->base + regs->sr);
+ cr = readl_relaxed(rtc->base + regs->cr);
- if ((isr & STM32_RTC_ISR_ALRAF) &&
+ if ((status & evts->alra) &&
(cr & STM32_RTC_CR_ALRAIE)) {
/* Alarm A flag - Alarm interrupt */
dev_dbg(&rtc->rtc_dev->dev, "Alarm occurred\n");
@@ -185,9 +222,8 @@ static irqreturn_t stm32_rtc_alarm_irq(int irq, void *dev_id)
/* Pass event to the kernel */
rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
- /* Clear event flag, otherwise new events won't be received */
- writel_relaxed(isr & ~STM32_RTC_ISR_ALRAF,
- rtc->base + STM32_RTC_ISR);
+ /* Clear event flags, otherwise new events won't be received */
+ stm32_rtc_clear_event_flags(rtc, evts->alra);
}
mutex_unlock(&rtc->rtc_dev->ops_lock);
@@ -234,11 +270,12 @@ static void bcd2tm(struct rtc_time *tm)
static int stm32_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct stm32_rtc *rtc = dev_get_drvdata(dev);
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
unsigned int tr, dr;
/* Time and Date in BCD format */
- tr = readl_relaxed(rtc->base + STM32_RTC_TR);
- dr = readl_relaxed(rtc->base + STM32_RTC_DR);
+ tr = readl_relaxed(rtc->base + regs->tr);
+ dr = readl_relaxed(rtc->base + regs->dr);
tm->tm_sec = (tr & STM32_RTC_TR_SEC) >> STM32_RTC_TR_SEC_SHIFT;
tm->tm_min = (tr & STM32_RTC_TR_MIN) >> STM32_RTC_TR_MIN_SHIFT;
@@ -259,6 +296,7 @@ static int stm32_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int stm32_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct stm32_rtc *rtc = dev_get_drvdata(dev);
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
unsigned int tr, dr;
int ret = 0;
@@ -283,8 +321,8 @@ static int stm32_rtc_set_time(struct device *dev, struct rtc_time *tm)
goto end;
}
- writel_relaxed(tr, rtc->base + STM32_RTC_TR);
- writel_relaxed(dr, rtc->base + STM32_RTC_DR);
+ writel_relaxed(tr, rtc->base + regs->tr);
+ writel_relaxed(dr, rtc->base + regs->dr);
stm32_rtc_exit_init_mode(rtc);
@@ -298,12 +336,14 @@ end:
static int stm32_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct stm32_rtc *rtc = dev_get_drvdata(dev);
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
+ const struct stm32_rtc_events *evts = &rtc->data->events;
struct rtc_time *tm = &alrm->time;
- unsigned int alrmar, cr, isr;
+ unsigned int alrmar, cr, status;
- alrmar = readl_relaxed(rtc->base + STM32_RTC_ALRMAR);
- cr = readl_relaxed(rtc->base + STM32_RTC_CR);
- isr = readl_relaxed(rtc->base + STM32_RTC_ISR);
+ alrmar = readl_relaxed(rtc->base + regs->alrmar);
+ cr = readl_relaxed(rtc->base + regs->cr);
+ status = readl_relaxed(rtc->base + regs->sr);
if (alrmar & STM32_RTC_ALRMXR_DATE_MASK) {
/*
@@ -356,7 +396,7 @@ static int stm32_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
bcd2tm(tm);
alrm->enabled = (cr & STM32_RTC_CR_ALRAE) ? 1 : 0;
- alrm->pending = (isr & STM32_RTC_ISR_ALRAF) ? 1 : 0;
+ alrm->pending = (status & evts->alra) ? 1 : 0;
return 0;
}
@@ -364,9 +404,11 @@ static int stm32_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int stm32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct stm32_rtc *rtc = dev_get_drvdata(dev);
- unsigned int isr, cr;
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
+ const struct stm32_rtc_events *evts = &rtc->data->events;
+ unsigned int cr;
- cr = readl_relaxed(rtc->base + STM32_RTC_CR);
+ cr = readl_relaxed(rtc->base + regs->cr);
stm32_rtc_wpr_unlock(rtc);
@@ -375,12 +417,10 @@ static int stm32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
cr |= (STM32_RTC_CR_ALRAIE | STM32_RTC_CR_ALRAE);
else
cr &= ~(STM32_RTC_CR_ALRAIE | STM32_RTC_CR_ALRAE);
- writel_relaxed(cr, rtc->base + STM32_RTC_CR);
+ writel_relaxed(cr, rtc->base + regs->cr);
- /* Clear event flag, otherwise new events won't be received */
- isr = readl_relaxed(rtc->base + STM32_RTC_ISR);
- isr &= ~STM32_RTC_ISR_ALRAF;
- writel_relaxed(isr, rtc->base + STM32_RTC_ISR);
+ /* Clear event flags, otherwise new events won't be received */
+ stm32_rtc_clear_event_flags(rtc, evts->alra);
stm32_rtc_wpr_lock(rtc);
@@ -389,9 +429,10 @@ static int stm32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
static int stm32_rtc_valid_alrm(struct stm32_rtc *rtc, struct rtc_time *tm)
{
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
int cur_day, cur_mon, cur_year, cur_hour, cur_min, cur_sec;
- unsigned int dr = readl_relaxed(rtc->base + STM32_RTC_DR);
- unsigned int tr = readl_relaxed(rtc->base + STM32_RTC_TR);
+ unsigned int dr = readl_relaxed(rtc->base + regs->dr);
+ unsigned int tr = readl_relaxed(rtc->base + regs->tr);
cur_day = (dr & STM32_RTC_DR_DATE) >> STM32_RTC_DR_DATE_SHIFT;
cur_mon = (dr & STM32_RTC_DR_MONTH) >> STM32_RTC_DR_MONTH_SHIFT;
@@ -425,6 +466,7 @@ static int stm32_rtc_valid_alrm(struct stm32_rtc *rtc, struct rtc_time *tm)
static int stm32_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct stm32_rtc *rtc = dev_get_drvdata(dev);
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
struct rtc_time *tm = &alrm->time;
unsigned int cr, isr, alrmar;
int ret = 0;
@@ -456,15 +498,15 @@ static int stm32_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
stm32_rtc_wpr_unlock(rtc);
/* Disable Alarm */
- cr = readl_relaxed(rtc->base + STM32_RTC_CR);
+ cr = readl_relaxed(rtc->base + regs->cr);
cr &= ~STM32_RTC_CR_ALRAE;
- writel_relaxed(cr, rtc->base + STM32_RTC_CR);
+ writel_relaxed(cr, rtc->base + regs->cr);
/*
* Poll Alarm write flag to be sure that Alarm update is allowed: it
* takes around 2 rtc_ck clock cycles
*/
- ret = readl_relaxed_poll_timeout_atomic(rtc->base + STM32_RTC_ISR,
+ ret = readl_relaxed_poll_timeout_atomic(rtc->base + regs->isr,
isr,
(isr & STM32_RTC_ISR_ALRAWF),
10, 100000);
@@ -475,7 +517,7 @@ static int stm32_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
/* Write to Alarm register */
- writel_relaxed(alrmar, rtc->base + STM32_RTC_ALRMAR);
+ writel_relaxed(alrmar, rtc->base + regs->alrmar);
if (alrm->enabled)
stm32_rtc_alarm_irq_enable(dev, 1);
@@ -496,17 +538,95 @@ static const struct rtc_class_ops stm32_rtc_ops = {
.alarm_irq_enable = stm32_rtc_alarm_irq_enable,
};
+static void stm32_rtc_clear_events(struct stm32_rtc *rtc,
+ unsigned int flags)
+{
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
+
+ /* Flags are cleared by writing 0 in RTC_ISR */
+ writel_relaxed(readl_relaxed(rtc->base + regs->isr) & ~flags,
+ rtc->base + regs->isr);
+}
+
static const struct stm32_rtc_data stm32_rtc_data = {
.has_pclk = false,
+ .need_dbp = true,
+ .has_wakeirq = false,
+ .regs = {
+ .tr = 0x00,
+ .dr = 0x04,
+ .cr = 0x08,
+ .isr = 0x0C,
+ .prer = 0x10,
+ .alrmar = 0x1C,
+ .wpr = 0x24,
+ .sr = 0x0C, /* set to ISR offset to ease alarm management */
+ .scr = UNDEF_REG,
+ .verr = UNDEF_REG,
+ },
+ .events = {
+ .alra = STM32_RTC_ISR_ALRAF,
+ },
+ .clear_events = stm32_rtc_clear_events,
};
static const struct stm32_rtc_data stm32h7_rtc_data = {
.has_pclk = true,
+ .need_dbp = true,
+ .has_wakeirq = false,
+ .regs = {
+ .tr = 0x00,
+ .dr = 0x04,
+ .cr = 0x08,
+ .isr = 0x0C,
+ .prer = 0x10,
+ .alrmar = 0x1C,
+ .wpr = 0x24,
+ .sr = 0x0C, /* set to ISR offset to ease alarm management */
+ .scr = UNDEF_REG,
+ .verr = UNDEF_REG,
+ },
+ .events = {
+ .alra = STM32_RTC_ISR_ALRAF,
+ },
+ .clear_events = stm32_rtc_clear_events,
+};
+
+static void stm32mp1_rtc_clear_events(struct stm32_rtc *rtc,
+ unsigned int flags)
+{
+ struct stm32_rtc_registers regs = rtc->data->regs;
+
+ /* Flags are cleared by writing 1 in RTC_SCR */
+ writel_relaxed(flags, rtc->base + regs.scr);
+}
+
+static const struct stm32_rtc_data stm32mp1_data = {
+ .has_pclk = true,
+ .need_dbp = false,
+ .has_wakeirq = true,
+ .regs = {
+ .tr = 0x00,
+ .dr = 0x04,
+ .cr = 0x18,
+ .isr = 0x0C, /* named RTC_ICSR on stm32mp1 */
+ .prer = 0x10,
+ .alrmar = 0x40,
+ .wpr = 0x24,
+ .sr = 0x50,
+ .scr = 0x5C,
+ .verr = 0x3F4,
+ },
+ .events = {
+ .alra = STM32_RTC_SR_ALRA,
+ },
+ .clear_events = stm32mp1_rtc_clear_events,
};
static const struct of_device_id stm32_rtc_of_match[] = {
{ .compatible = "st,stm32-rtc", .data = &stm32_rtc_data },
{ .compatible = "st,stm32h7-rtc", .data = &stm32h7_rtc_data },
+ { .compatible = "st,stm32mp1-rtc", .data = &stm32mp1_data },
{}
};
MODULE_DEVICE_TABLE(of, stm32_rtc_of_match);
@@ -514,6 +634,7 @@ MODULE_DEVICE_TABLE(of, stm32_rtc_of_match);
static int stm32_rtc_init(struct platform_device *pdev,
struct stm32_rtc *rtc)
{
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
unsigned int prer, pred_a, pred_s, pred_a_max, pred_s_max, cr;
unsigned int rate;
int ret = 0;
@@ -554,14 +675,14 @@ static int stm32_rtc_init(struct platform_device *pdev,
}
prer = (pred_s << STM32_RTC_PRER_PRED_S_SHIFT) & STM32_RTC_PRER_PRED_S;
- writel_relaxed(prer, rtc->base + STM32_RTC_PRER);
+ writel_relaxed(prer, rtc->base + regs->prer);
prer |= (pred_a << STM32_RTC_PRER_PRED_A_SHIFT) & STM32_RTC_PRER_PRED_A;
- writel_relaxed(prer, rtc->base + STM32_RTC_PRER);
+ writel_relaxed(prer, rtc->base + regs->prer);
/* Force 24h time format */
- cr = readl_relaxed(rtc->base + STM32_RTC_CR);
+ cr = readl_relaxed(rtc->base + regs->cr);
cr &= ~STM32_RTC_CR_FMT;
- writel_relaxed(cr, rtc->base + STM32_RTC_CR);
+ writel_relaxed(cr, rtc->base + regs->cr);
stm32_rtc_exit_init_mode(rtc);
@@ -575,8 +696,8 @@ end:
static int stm32_rtc_probe(struct platform_device *pdev)
{
struct stm32_rtc *rtc;
+ const struct stm32_rtc_registers *regs;
struct resource *res;
- const struct of_device_id *match;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
@@ -588,15 +709,32 @@ static int stm32_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
- rtc->dbp = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "st,syscfg");
- if (IS_ERR(rtc->dbp)) {
- dev_err(&pdev->dev, "no st,syscfg\n");
- return PTR_ERR(rtc->dbp);
- }
+ rtc->data = (struct stm32_rtc_data *)
+ of_device_get_match_data(&pdev->dev);
+ regs = &rtc->data->regs;
+
+ if (rtc->data->need_dbp) {
+ rtc->dbp = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "st,syscfg");
+ if (IS_ERR(rtc->dbp)) {
+ dev_err(&pdev->dev, "no st,syscfg\n");
+ return PTR_ERR(rtc->dbp);
+ }
+
+ ret = of_property_read_u32_index(pdev->dev.of_node, "st,syscfg",
+ 1, &rtc->dbp_reg);
+ if (ret) {
+ dev_err(&pdev->dev, "can't read DBP register offset\n");
+ return ret;
+ }
- match = of_match_device(stm32_rtc_of_match, &pdev->dev);
- rtc->data = (struct stm32_rtc_data *)match->data;
+ ret = of_property_read_u32_index(pdev->dev.of_node, "st,syscfg",
+ 2, &rtc->dbp_mask);
+ if (ret) {
+ dev_err(&pdev->dev, "can't read DBP register mask\n");
+ return ret;
+ }
+ }
if (!rtc->data->has_pclk) {
rtc->pclk = NULL;
@@ -624,11 +762,13 @@ static int stm32_rtc_probe(struct platform_device *pdev)
if (ret)
goto err;
- regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, PWR_CR_DBP);
+ if (rtc->data->need_dbp)
+ regmap_update_bits(rtc->dbp, rtc->dbp_reg,
+ rtc->dbp_mask, rtc->dbp_mask);
/*
* After a system reset, RTC_ISR.INITS flag can be read to check if
- * the calendar has been initalized or not. INITS flag is reset by a
+ * the calendar has been initialized or not. INITS flag is reset by a
* power-on reset (no vbat, no power-supply). It is not reset if
* rtc_ck parent clock has changed (so RTC prescalers need to be
* changed). That's why we cannot rely on this flag to know if RTC
@@ -645,15 +785,22 @@ static int stm32_rtc_probe(struct platform_device *pdev)
goto err;
}
- platform_set_drvdata(pdev, rtc);
-
ret = device_init_wakeup(&pdev->dev, true);
+ if (rtc->data->has_wakeirq) {
+ rtc->wakeirq_alarm = platform_get_irq(pdev, 1);
+ if (rtc->wakeirq_alarm <= 0)
+ ret = rtc->wakeirq_alarm;
+ else
+ ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
+ rtc->wakeirq_alarm);
+ }
if (ret)
- dev_warn(&pdev->dev,
- "alarm won't be able to wake up the system");
+ dev_warn(&pdev->dev, "alarm can't wake up the system: %d", ret);
+
+ platform_set_drvdata(pdev, rtc);
rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, pdev->name,
- &stm32_rtc_ops, THIS_MODULE);
+ &stm32_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc_dev)) {
ret = PTR_ERR(rtc->rtc_dev);
dev_err(&pdev->dev, "rtc device registration failed, err=%d\n",
@@ -663,8 +810,7 @@ static int stm32_rtc_probe(struct platform_device *pdev)
/* Handle RTC alarm interrupts */
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq_alarm, NULL,
- stm32_rtc_alarm_irq,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ stm32_rtc_alarm_irq, IRQF_ONESHOT,
pdev->name, rtc);
if (ret) {
dev_err(&pdev->dev, "IRQ%d (alarm interrupt) already claimed\n",
@@ -676,17 +822,27 @@ static int stm32_rtc_probe(struct platform_device *pdev)
* If INITS flag is reset (calendar year field set to 0x00), calendar
* must be initialized
*/
- if (!(readl_relaxed(rtc->base + STM32_RTC_ISR) & STM32_RTC_ISR_INITS))
+ if (!(readl_relaxed(rtc->base + regs->isr) & STM32_RTC_ISR_INITS))
dev_warn(&pdev->dev, "Date/Time must be initialized\n");
+ if (regs->verr != UNDEF_REG) {
+ u32 ver = readl_relaxed(rtc->base + regs->verr);
+
+ dev_info(&pdev->dev, "registered rev:%d.%d\n",
+ (ver >> STM32_RTC_VERR_MAJREV_SHIFT) & 0xF,
+ (ver >> STM32_RTC_VERR_MINREV_SHIFT) & 0xF);
+ }
+
return 0;
err:
if (rtc->data->has_pclk)
clk_disable_unprepare(rtc->pclk);
clk_disable_unprepare(rtc->rtc_ck);
- regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, 0);
+ if (rtc->data->need_dbp)
+ regmap_update_bits(rtc->dbp, rtc->dbp_reg, rtc->dbp_mask, 0);
+ dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
return ret;
@@ -695,22 +851,25 @@ err:
static int stm32_rtc_remove(struct platform_device *pdev)
{
struct stm32_rtc *rtc = platform_get_drvdata(pdev);
+ const struct stm32_rtc_registers *regs = &rtc->data->regs;
unsigned int cr;
/* Disable interrupts */
stm32_rtc_wpr_unlock(rtc);
- cr = readl_relaxed(rtc->base + STM32_RTC_CR);
+ cr = readl_relaxed(rtc->base + regs->cr);
cr &= ~STM32_RTC_CR_ALRAIE;
- writel_relaxed(cr, rtc->base + STM32_RTC_CR);
+ writel_relaxed(cr, rtc->base + regs->cr);
stm32_rtc_wpr_lock(rtc);
clk_disable_unprepare(rtc->rtc_ck);
if (rtc->data->has_pclk)
clk_disable_unprepare(rtc->pclk);
- /* Enable backup domain write protection */
- regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, 0);
+ /* Enable backup domain write protection if needed */
+ if (rtc->data->need_dbp)
+ regmap_update_bits(rtc->dbp, rtc->dbp_reg, rtc->dbp_mask, 0);
+ dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
return 0;
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 2e6fb275acc8..2cd5a7b1a2e3 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -74,7 +74,7 @@
#define SUN6I_ALARM_CONFIG_WAKEUP BIT(0)
#define SUN6I_LOSC_OUT_GATING 0x0060
-#define SUN6I_LOSC_OUT_GATING_EN BIT(0)
+#define SUN6I_LOSC_OUT_GATING_EN_OFFSET 0
/*
* Get date values
@@ -255,7 +255,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
&clkout_name);
rtc->ext_losc = clk_register_gate(NULL, clkout_name, rtc->hw.init->name,
0, rtc->base + SUN6I_LOSC_OUT_GATING,
- SUN6I_LOSC_OUT_GATING_EN, 0,
+ SUN6I_LOSC_OUT_GATING_EN_OFFSET, 0,
&rtc->lock);
if (IS_ERR(rtc->ext_losc)) {
pr_crit("Couldn't register the LOSC external gate\n");
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
index dadbf8b324ad..21865d3d8fe8 100644
--- a/drivers/rtc/rtc-sunxi.c
+++ b/drivers/rtc/rtc-sunxi.c
@@ -445,6 +445,10 @@ static int sunxi_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, chip);
chip->dev = &pdev->dev;
+ chip->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(chip->rtc))
+ return PTR_ERR(chip->rtc);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
chip->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(chip->base))
@@ -481,11 +485,12 @@ static int sunxi_rtc_probe(struct platform_device *pdev)
writel(SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND, chip->base +
SUNXI_ALRM_IRQ_STA);
- chip->rtc = rtc_device_register("rtc-sunxi", &pdev->dev,
- &sunxi_rtc_ops, THIS_MODULE);
- if (IS_ERR(chip->rtc)) {
+ chip->rtc->ops = &sunxi_rtc_ops;
+
+ ret = rtc_register_device(chip->rtc);
+ if (ret) {
dev_err(&pdev->dev, "unable to register device\n");
- return PTR_ERR(chip->rtc);
+ return ret;
}
dev_info(&pdev->dev, "RTC enabled\n");
@@ -493,18 +498,8 @@ static int sunxi_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int sunxi_rtc_remove(struct platform_device *pdev)
-{
- struct sunxi_rtc_dev *chip = platform_get_drvdata(pdev);
-
- rtc_device_unregister(chip->rtc);
-
- return 0;
-}
-
static struct platform_driver sunxi_rtc_driver = {
.probe = sunxi_rtc_probe,
- .remove = sunxi_rtc_remove,
.driver = {
.name = "sunxi-rtc",
.of_match_table = sunxi_rtc_dt_ids,
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 3a2da4c892d6..8469256edc2a 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -13,135 +13,139 @@
#include <linux/rtc.h>
#include <linux/platform_device.h>
-static int test_mmss64;
-module_param(test_mmss64, int, 0644);
-MODULE_PARM_DESC(test_mmss64, "Test struct rtc_class_ops.set_mmss64().");
+#define MAX_RTC_TEST 3
-static struct platform_device *test0 = NULL, *test1 = NULL;
+struct rtc_test_data {
+ struct rtc_device *rtc;
+ time64_t offset;
+ struct timer_list alarm;
+ bool alarm_en;
+};
-static int test_rtc_read_alarm(struct device *dev,
- struct rtc_wkalrm *alrm)
-{
- return 0;
-}
+struct platform_device *pdev[MAX_RTC_TEST];
-static int test_rtc_set_alarm(struct device *dev,
- struct rtc_wkalrm *alrm)
+static int test_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- return 0;
-}
+ struct rtc_test_data *rtd = dev_get_drvdata(dev);
+ time64_t alarm;
+
+ alarm = (rtd->alarm.expires - jiffies) / HZ;
+ alarm += ktime_get_real_seconds() + rtd->offset;
+
+ rtc_time64_to_tm(alarm, &alrm->time);
+ alrm->enabled = rtd->alarm_en;
-static int test_rtc_read_time(struct device *dev,
- struct rtc_time *tm)
-{
- rtc_time64_to_tm(ktime_get_real_seconds(), tm);
return 0;
}
-static int test_rtc_set_mmss64(struct device *dev, time64_t secs)
+static int test_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- dev_info(dev, "%s, secs = %lld\n", __func__, (long long)secs);
+ struct rtc_test_data *rtd = dev_get_drvdata(dev);
+ ktime_t timeout;
+ u64 expires;
+
+ timeout = rtc_tm_to_time64(&alrm->time) - ktime_get_real_seconds();
+ timeout -= rtd->offset;
+
+ del_timer(&rtd->alarm);
+
+ expires = jiffies + timeout * HZ;
+ if (expires > U32_MAX)
+ expires = U32_MAX;
+
+ pr_err("ABE: %s +%d %s\n", __FILE__, __LINE__, __func__);
+ rtd->alarm.expires = expires;
+
+ if (alrm->enabled)
+ add_timer(&rtd->alarm);
+
+ rtd->alarm_en = alrm->enabled;
+
return 0;
}
-static int test_rtc_set_mmss(struct device *dev, unsigned long secs)
+static int test_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- dev_info(dev, "%s, secs = %lu\n", __func__, secs);
+ struct rtc_test_data *rtd = dev_get_drvdata(dev);
+
+ rtc_time64_to_tm(ktime_get_real_seconds() + rtd->offset, tm);
+
return 0;
}
-static int test_rtc_proc(struct device *dev, struct seq_file *seq)
+static int test_rtc_set_mmss64(struct device *dev, time64_t secs)
{
- struct platform_device *plat_dev = to_platform_device(dev);
+ struct rtc_test_data *rtd = dev_get_drvdata(dev);
- seq_printf(seq, "test\t\t: yes\n");
- seq_printf(seq, "id\t\t: %d\n", plat_dev->id);
+ rtd->offset = secs - ktime_get_real_seconds();
return 0;
}
static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
+ struct rtc_test_data *rtd = dev_get_drvdata(dev);
+
+ rtd->alarm_en = enable;
+ if (enable)
+ add_timer(&rtd->alarm);
+ else
+ del_timer(&rtd->alarm);
+
return 0;
}
-static struct rtc_class_ops test_rtc_ops = {
- .proc = test_rtc_proc,
+static const struct rtc_class_ops test_rtc_ops_noalm = {
+ .read_time = test_rtc_read_time,
+ .set_mmss64 = test_rtc_set_mmss64,
+ .alarm_irq_enable = test_rtc_alarm_irq_enable,
+};
+
+static const struct rtc_class_ops test_rtc_ops = {
.read_time = test_rtc_read_time,
.read_alarm = test_rtc_read_alarm,
.set_alarm = test_rtc_set_alarm,
- .set_mmss = test_rtc_set_mmss,
+ .set_mmss64 = test_rtc_set_mmss64,
.alarm_irq_enable = test_rtc_alarm_irq_enable,
};
-static ssize_t test_irq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static void test_rtc_alarm_handler(struct timer_list *t)
{
- return sprintf(buf, "%d\n", 42);
-}
-static ssize_t test_irq_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int retval;
- struct platform_device *plat_dev = to_platform_device(dev);
- struct rtc_device *rtc = platform_get_drvdata(plat_dev);
-
- retval = count;
- if (strncmp(buf, "tick", 4) == 0 && rtc->pie_enabled)
- rtc_update_irq(rtc, 1, RTC_PF | RTC_IRQF);
- else if (strncmp(buf, "alarm", 5) == 0) {
- struct rtc_wkalrm alrm;
- int err = rtc_read_alarm(rtc, &alrm);
-
- if (!err && alrm.enabled)
- rtc_update_irq(rtc, 1, RTC_AF | RTC_IRQF);
-
- } else if (strncmp(buf, "update", 6) == 0 && rtc->uie_rtctimer.enabled)
- rtc_update_irq(rtc, 1, RTC_UF | RTC_IRQF);
- else
- retval = -EINVAL;
+ struct rtc_test_data *rtd = from_timer(rtd, t, alarm);
- return retval;
+ rtc_update_irq(rtd->rtc, 1, RTC_AF | RTC_IRQF);
}
-static DEVICE_ATTR(irq, S_IRUGO | S_IWUSR, test_irq_show, test_irq_store);
static int test_probe(struct platform_device *plat_dev)
{
- int err;
- struct rtc_device *rtc;
-
- if (test_mmss64) {
- test_rtc_ops.set_mmss64 = test_rtc_set_mmss64;
- test_rtc_ops.set_mmss = NULL;
- }
+ struct rtc_test_data *rtd;
- rtc = devm_rtc_device_register(&plat_dev->dev, "test",
- &test_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- return PTR_ERR(rtc);
- }
+ rtd = devm_kzalloc(&plat_dev->dev, sizeof(*rtd), GFP_KERNEL);
+ if (!rtd)
+ return -ENOMEM;
- err = device_create_file(&plat_dev->dev, &dev_attr_irq);
- if (err)
- dev_err(&plat_dev->dev, "Unable to create sysfs entry: %s\n",
- dev_attr_irq.attr.name);
+ platform_set_drvdata(plat_dev, rtd);
- platform_set_drvdata(plat_dev, rtc);
+ rtd->rtc = devm_rtc_allocate_device(&plat_dev->dev);
+ if (IS_ERR(rtd->rtc))
+ return PTR_ERR(rtd->rtc);
- return 0;
-}
+ switch (plat_dev->id) {
+ case 0:
+ rtd->rtc->ops = &test_rtc_ops_noalm;
+ break;
+ default:
+ rtd->rtc->ops = &test_rtc_ops;
+ }
-static int test_remove(struct platform_device *plat_dev)
-{
- device_remove_file(&plat_dev->dev, &dev_attr_irq);
+ timer_setup(&rtd->alarm, test_rtc_alarm_handler, 0);
+ rtd->alarm.expires = 0;
- return 0;
+ return rtc_register_device(rtd->rtc);
}
static struct platform_driver test_driver = {
.probe = test_probe,
- .remove = test_remove,
.driver = {
.name = "rtc-test",
},
@@ -149,47 +153,45 @@ static struct platform_driver test_driver = {
static int __init test_init(void)
{
- int err;
+ int i, err;
if ((err = platform_driver_register(&test_driver)))
return err;
- if ((test0 = platform_device_alloc("rtc-test", 0)) == NULL) {
- err = -ENOMEM;
- goto exit_driver_unregister;
+ err = -ENOMEM;
+ for (i = 0; i < MAX_RTC_TEST; i++) {
+ pdev[i] = platform_device_alloc("rtc-test", i);
+ if (!pdev[i])
+ goto exit_free_mem;
}
- if ((test1 = platform_device_alloc("rtc-test", 1)) == NULL) {
- err = -ENOMEM;
- goto exit_put_test0;
+ for (i = 0; i < MAX_RTC_TEST; i++) {
+ err = platform_device_add(pdev[i]);
+ if (err)
+ goto exit_device_del;
}
- if ((err = platform_device_add(test0)))
- goto exit_put_test1;
-
- if ((err = platform_device_add(test1)))
- goto exit_del_test0;
-
return 0;
-exit_del_test0:
- platform_device_del(test0);
-
-exit_put_test1:
- platform_device_put(test1);
+exit_device_del:
+ for (; i > 0; i--)
+ platform_device_del(pdev[i - 1]);
-exit_put_test0:
- platform_device_put(test0);
+exit_free_mem:
+ for (i = 0; i < MAX_RTC_TEST; i++)
+ platform_device_put(pdev[i]);
-exit_driver_unregister:
platform_driver_unregister(&test_driver);
return err;
}
static void __exit test_exit(void)
{
- platform_device_unregister(test0);
- platform_device_unregister(test1);
+ int i;
+
+ for (i = 0; i < MAX_RTC_TEST; i++)
+ platform_device_unregister(pdev[i]);
+
platform_driver_unregister(&test_driver);
}
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index d7785ae0a2b4..d6434e514a52 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -58,7 +58,6 @@ struct tps6586x_rtc {
struct rtc_device *rtc;
int irq;
bool irq_en;
- unsigned long long epoch_start;
};
static inline struct device *to_tps6586x_dev(struct device *dev)
@@ -68,10 +67,9 @@ static inline struct device *to_tps6586x_dev(struct device *dev)
static int tps6586x_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
struct device *tps_dev = to_tps6586x_dev(dev);
unsigned long long ticks = 0;
- unsigned long seconds;
+ time64_t seconds;
u8 buff[6];
int ret;
int i;
@@ -88,26 +86,20 @@ static int tps6586x_rtc_read_time(struct device *dev, struct rtc_time *tm)
}
seconds = ticks >> 10;
- seconds += rtc->epoch_start;
- rtc_time_to_tm(seconds, tm);
+ rtc_time64_to_tm(seconds, tm);
+
return 0;
}
static int tps6586x_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
struct device *tps_dev = to_tps6586x_dev(dev);
unsigned long long ticks;
- unsigned long seconds;
+ time64_t seconds;
u8 buff[5];
int ret;
- rtc_tm_to_time(tm, &seconds);
- if (seconds < rtc->epoch_start) {
- dev_err(dev, "requested time unsupported\n");
- return -EINVAL;
- }
- seconds -= rtc->epoch_start;
+ seconds = rtc_tm_to_time64(tm);
ticks = (unsigned long long)seconds << 10;
buff[0] = (ticks >> 32) & 0xff;
@@ -155,9 +147,8 @@ static int tps6586x_rtc_alarm_irq_enable(struct device *dev,
static int tps6586x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
struct device *tps_dev = to_tps6586x_dev(dev);
- unsigned long seconds;
+ time64_t seconds;
unsigned long ticks;
unsigned long rtc_current_time;
unsigned long long rticks = 0;
@@ -166,12 +157,7 @@ static int tps6586x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
int ret;
int i;
- rtc_tm_to_time(&alrm->time, &seconds);
-
- if (alrm->enabled && (seconds < rtc->epoch_start)) {
- dev_err(dev, "can't set alarm to requested time\n");
- return -EINVAL;
- }
+ seconds = rtc_tm_to_time64(&alrm->time);
ret = tps6586x_rtc_alarm_irq_enable(dev, alrm->enabled);
if (ret < 0) {
@@ -179,7 +165,6 @@ static int tps6586x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
}
- seconds -= rtc->epoch_start;
ret = tps6586x_reads(tps_dev, RTC_COUNT4_DUMMYREAD,
sizeof(rbuff), rbuff);
if (ret < 0) {
@@ -210,10 +195,9 @@ static int tps6586x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int tps6586x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
struct device *tps_dev = to_tps6586x_dev(dev);
unsigned long ticks;
- unsigned long seconds;
+ time64_t seconds;
u8 buff[3];
int ret;
@@ -225,9 +209,8 @@ static int tps6586x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
ticks = (buff[0] << 16) | (buff[1] << 8) | buff[2];
seconds = ticks >> 10;
- seconds += rtc->epoch_start;
- rtc_time_to_tm(seconds, &alrm->time);
+ rtc_time64_to_tm(seconds, &alrm->time);
return 0;
}
@@ -260,9 +243,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
rtc->dev = &pdev->dev;
rtc->irq = platform_get_irq(pdev, 0);
- /* Set epoch start as 00:00:00:01:01:2009 */
- rtc->epoch_start = mktime(2009, 1, 1, 0, 0, 0);
-
/* 1 kHz tick mode, enable tick counting */
ret = tps6586x_update(tps_dev, RTC_CTRL,
RTC_ENABLE | OSC_SRC_SEL |
@@ -276,14 +256,18 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, rtc);
- rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev),
- &tps6586x_rtc_ops, THIS_MODULE);
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc)) {
ret = PTR_ERR(rtc->rtc);
- dev_err(&pdev->dev, "RTC device register: ret %d\n", ret);
+ dev_err(&pdev->dev, "RTC allocate device: ret %d\n", ret);
goto fail_rtc_register;
}
+ rtc->rtc->ops = &tps6586x_rtc_ops;
+ rtc->rtc->range_max = (1ULL << 30) - 1; /* 30-bit seconds */
+ rtc->rtc->start_secs = mktime64(2009, 1, 1, 0, 0, 0);
+ rtc->rtc->set_start_time = true;
+
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
tps6586x_rtc_irq,
IRQF_ONESHOT,
@@ -294,6 +278,13 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
goto fail_rtc_register;
}
disable_irq(rtc->irq);
+
+ ret = rtc_register_device(rtc->rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "RTC device register: ret %d\n", ret);
+ goto fail_rtc_register;
+ }
+
return 0;
fail_rtc_register:
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index d0244d7979fc..a9bbd022aeef 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -380,6 +380,10 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
if (!tps_rtc)
return -ENOMEM;
+ tps_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(tps_rtc->rtc))
+ return PTR_ERR(tps_rtc->rtc);
+
/* Clear pending interrupts */
ret = regmap_read(tps65910->regmap, TPS65910_RTC_STATUS, &rtc_reg);
if (ret < 0)
@@ -421,10 +425,12 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
tps_rtc->irq = irq;
device_set_wakeup_capable(&pdev->dev, 1);
- tps_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &tps65910_rtc_ops, THIS_MODULE);
- if (IS_ERR(tps_rtc->rtc)) {
- ret = PTR_ERR(tps_rtc->rtc);
+ tps_rtc->rtc->ops = &tps65910_rtc_ops;
+ tps_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ tps_rtc->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ ret = rtc_register_device(tps_rtc->rtc);
+ if (ret) {
dev_err(&pdev->dev, "RTC device register: err %d\n", ret);
return ret;
}
@@ -432,17 +438,6 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
return 0;
}
-/*
- * Disable tps65910 RTC interrupts.
- * Sets status flag to free.
- */
-static int tps65910_rtc_remove(struct platform_device *pdev)
-{
- tps65910_rtc_alarm_irq_enable(&pdev->dev, 0);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int tps65910_rtc_suspend(struct device *dev)
{
@@ -468,7 +463,6 @@ static SIMPLE_DEV_PM_OPS(tps65910_rtc_pm_ops, tps65910_rtc_suspend,
static struct platform_driver tps65910_rtc_driver = {
.probe = tps65910_rtc_probe,
- .remove = tps65910_rtc_remove,
.driver = {
.name = "tps65910-rtc",
.pm = &tps65910_rtc_pm_ops,
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 7ce22967fd16..70f013e692b0 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -88,7 +88,7 @@ static unsigned int alarm_enabled;
static int aie_irq;
static int pie_irq;
-static inline unsigned long read_elapsed_second(void)
+static inline time64_t read_elapsed_second(void)
{
unsigned long first_low, first_mid, first_high;
@@ -105,10 +105,10 @@ static inline unsigned long read_elapsed_second(void)
} while (first_low != second_low || first_mid != second_mid ||
first_high != second_high);
- return (first_high << 17) | (first_mid << 1) | (first_low >> 15);
+ return ((u64)first_high << 17) | (first_mid << 1) | (first_low >> 15);
}
-static inline void write_elapsed_second(unsigned long sec)
+static inline void write_elapsed_second(time64_t sec)
{
spin_lock_irq(&rtc_lock);
@@ -121,22 +121,22 @@ static inline void write_elapsed_second(unsigned long sec)
static int vr41xx_rtc_read_time(struct device *dev, struct rtc_time *time)
{
- unsigned long epoch_sec, elapsed_sec;
+ time64_t epoch_sec, elapsed_sec;
- epoch_sec = mktime(epoch, 1, 1, 0, 0, 0);
+ epoch_sec = mktime64(epoch, 1, 1, 0, 0, 0);
elapsed_sec = read_elapsed_second();
- rtc_time_to_tm(epoch_sec + elapsed_sec, time);
+ rtc_time64_to_tm(epoch_sec + elapsed_sec, time);
return 0;
}
static int vr41xx_rtc_set_time(struct device *dev, struct rtc_time *time)
{
- unsigned long epoch_sec, current_sec;
+ time64_t epoch_sec, current_sec;
- epoch_sec = mktime(epoch, 1, 1, 0, 0, 0);
- current_sec = mktime(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday,
+ epoch_sec = mktime64(epoch, 1, 1, 0, 0, 0);
+ current_sec = mktime64(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday,
time->tm_hour, time->tm_min, time->tm_sec);
write_elapsed_second(current_sec - epoch_sec);
@@ -165,11 +165,11 @@ static int vr41xx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
- unsigned long alarm_sec;
+ time64_t alarm_sec;
struct rtc_time *time = &wkalrm->time;
- alarm_sec = mktime(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday,
- time->tm_hour, time->tm_min, time->tm_sec);
+ alarm_sec = mktime64(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday,
+ time->tm_hour, time->tm_min, time->tm_sec);
spin_lock_irq(&rtc_lock);
@@ -292,13 +292,16 @@ static int rtc_probe(struct platform_device *pdev)
goto err_rtc1_iounmap;
}
- rtc = devm_rtc_device_register(&pdev->dev, rtc_name, &vr41xx_rtc_ops,
- THIS_MODULE);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc)) {
retval = PTR_ERR(rtc);
goto err_iounmap_all;
}
+ rtc->ops = &vr41xx_rtc_ops;
+
+ /* 48-bit counter at 32.768 kHz */
+ rtc->range_max = (1ULL << 33) - 1;
rtc->max_user_freq = MAX_PERIODIC_RATE;
spin_lock_irq(&rtc_lock);
@@ -340,6 +343,10 @@ static int rtc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n");
+ retval = rtc_register_device(rtc);
+ if (retval)
+ goto err_iounmap_all;
+
return 0;
err_iounmap_all:
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index fba994dc31eb..c532bd13fbe5 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -278,10 +278,9 @@ static int xlnx_rtc_remove(struct platform_device *pdev)
static int __maybe_unused xlnx_rtc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct xlnx_rtc_dev *xrtcdev = platform_get_drvdata(pdev);
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(dev))
enable_irq_wake(xrtcdev->alarm_irq);
else
xlnx_rtc_alarm_irq_enable(dev, 0);
@@ -291,10 +290,9 @@ static int __maybe_unused xlnx_rtc_suspend(struct device *dev)
static int __maybe_unused xlnx_rtc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct xlnx_rtc_dev *xrtcdev = platform_get_drvdata(pdev);
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(dev))
disable_irq_wake(xrtcdev->alarm_irq);
else
xlnx_rtc_alarm_irq_enable(dev, 1);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 0a312e450207..29024492b8ed 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -51,9 +51,16 @@ static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
return copy_from_iter(addr, bytes, i);
}
+static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+ return copy_to_iter(addr, bytes, i);
+}
+
static const struct dax_operations dcssblk_dax_ops = {
.direct_access = dcssblk_dax_direct_access,
.copy_from_iter = dcssblk_dax_copy_from_iter,
+ .copy_to_iter = dcssblk_dax_copy_to_iter,
};
struct dcssblk_dev_info {
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 18c4f933e8b9..3b368fcf13f4 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -285,6 +285,8 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
struct list_head *entry;
unsigned long flags;
+ lockdep_assert_held(&adapter->erp_lock);
+
if (unlikely(!debug_level_enabled(dbf->rec, level)))
return;
@@ -599,16 +601,18 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
}
/**
- * zfcp_dbf_scsi - trace event for scsi commands
- * @tag: identifier for event
- * @sc: pointer to struct scsi_cmnd
- * @fsf: pointer to struct zfcp_fsf_req
+ * zfcp_dbf_scsi_common() - Common trace event helper for scsi.
+ * @tag: Identifier for event.
+ * @level: trace level of event.
+ * @sdev: Pointer to SCSI device as context for this event.
+ * @sc: Pointer to SCSI command, or NULL with task management function (TMF).
+ * @fsf: Pointer to FSF request, or NULL.
*/
-void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
- struct zfcp_fsf_req *fsf)
+void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
+ struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
{
struct zfcp_adapter *adapter =
- (struct zfcp_adapter *) sc->device->host->hostdata[0];
+ (struct zfcp_adapter *) sdev->host->hostdata[0];
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
struct fcp_resp_with_ext *fcp_rsp;
@@ -620,16 +624,28 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_SCSI_CMND;
- rec->scsi_result = sc->result;
- rec->scsi_retries = sc->retries;
- rec->scsi_allowed = sc->allowed;
- rec->scsi_id = sc->device->id;
- rec->scsi_lun = (u32)sc->device->lun;
- rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
- rec->host_scribble = (unsigned long)sc->host_scribble;
-
- memcpy(rec->scsi_opcode, sc->cmnd,
- min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
+ if (sc) {
+ rec->scsi_result = sc->result;
+ rec->scsi_retries = sc->retries;
+ rec->scsi_allowed = sc->allowed;
+ rec->scsi_id = sc->device->id;
+ rec->scsi_lun = (u32)sc->device->lun;
+ rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
+ rec->host_scribble = (unsigned long)sc->host_scribble;
+
+ memcpy(rec->scsi_opcode, sc->cmnd,
+ min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
+ } else {
+ rec->scsi_result = ~0;
+ rec->scsi_retries = ~0;
+ rec->scsi_allowed = ~0;
+ rec->scsi_id = sdev->id;
+ rec->scsi_lun = (u32)sdev->lun;
+ rec->scsi_lun_64_hi = (u32)(sdev->lun >> 32);
+ rec->host_scribble = ~0;
+
+ memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
+ }
if (fsf) {
rec->fsf_req_id = fsf->req_id;
@@ -664,6 +680,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
+/**
+ * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
+ * @tag: Identifier for event.
+ * @adapter: Pointer to zfcp adapter as context for this event.
+ * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
+ * @ret: Return value of calling function.
+ *
+ * This SCSI trace variant does not depend on any of:
+ * scsi_cmnd, zfcp_fsf_req, scsi_device.
+ */
+void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
+ unsigned int scsi_id, int ret)
+{
+ struct zfcp_dbf *dbf = adapter->dbf;
+ struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
+ unsigned long flags;
+ static int const level = 1;
+
+ if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
+ return;
+
+ spin_lock_irqsave(&dbf->scsi_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->id = ZFCP_DBF_SCSI_CMND;
+ rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
+ rec->scsi_retries = ~0;
+ rec->scsi_allowed = ~0;
+ rec->fcp_rsp_info = ~0;
+ rec->scsi_id = scsi_id;
+ rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
+ rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
+ rec->host_scribble = ~0;
+ memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
+
+ debug_event(dbf->scsi, level, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->scsi_lock, flags);
+}
+
static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
{
struct debug_info *d;
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index e2a973cd2573..d116c07ed77a 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -359,7 +359,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
scmd->device->host->hostdata[0];
if (debug_level_enabled(adapter->dbf->scsi, level))
- zfcp_dbf_scsi(tag, level, scmd, req);
+ zfcp_dbf_scsi_common(tag, level, scmd->device, scmd, req);
}
/**
@@ -402,16 +402,23 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
}
/**
- * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
- * @tag: tag indicating success or failure of reset operation
- * @scmnd: SCSI command which caused this error recovery
- * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
+ * zfcp_dbf_scsi_devreset() - Trace event for Logical Unit or Target Reset.
+ * @tag: Tag indicating success or failure of reset operation.
+ * @sdev: Pointer to SCSI device as context for this event.
+ * @flag: Indicates type of reset (Target Reset, Logical Unit Reset).
+ * @fsf_req: Pointer to FSF request representing the TMF, or NULL.
*/
static inline
-void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_device *sdev, u8 flag,
struct zfcp_fsf_req *fsf_req)
{
+ struct zfcp_adapter *adapter = (struct zfcp_adapter *)
+ sdev->host->hostdata[0];
char tmp_tag[ZFCP_DBF_TAG_LEN];
+ static int const level = 1;
+
+ if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
+ return;
if (flag == FCP_TMF_TGT_RESET)
memcpy(tmp_tag, "tr_", 3);
@@ -419,7 +426,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
memcpy(tmp_tag, "lr_", 3);
memcpy(&tmp_tag[3], tag, 4);
- _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
+ zfcp_dbf_scsi_common(tmp_tag, level, sdev, NULL, fsf_req);
}
/**
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 1d91a32db08e..e7e6b63905e2 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -19,7 +19,6 @@
enum zfcp_erp_act_flags {
ZFCP_STATUS_ERP_TIMEDOUT = 0x10000000,
ZFCP_STATUS_ERP_CLOSE_ONLY = 0x01000000,
- ZFCP_STATUS_ERP_DISMISSING = 0x00100000,
ZFCP_STATUS_ERP_DISMISSED = 0x00200000,
ZFCP_STATUS_ERP_LOWMEM = 0x00400000,
ZFCP_STATUS_ERP_NO_REF = 0x00800000,
@@ -27,7 +26,6 @@ enum zfcp_erp_act_flags {
enum zfcp_erp_steps {
ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
- ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
@@ -35,16 +33,28 @@ enum zfcp_erp_steps {
ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
};
+/**
+ * enum zfcp_erp_act_type - Type of ERP action object.
+ * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
+ * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
+ * either of the first four enum values.
+ * Used to indicate that an ERP action could not be
+ * set up despite a detected need for some recovery.
+ * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
+ * either of the first four enum values.
+ * Used to indicate that ERP not needed because
+ * the object has ZFCP_STATUS_COMMON_ERP_FAILED.
+ */
enum zfcp_erp_act_type {
ZFCP_ERP_ACTION_REOPEN_LUN = 1,
ZFCP_ERP_ACTION_REOPEN_PORT = 2,
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
-};
-
-enum zfcp_erp_act_state {
- ZFCP_ERP_ACTION_RUNNING = 1,
- ZFCP_ERP_ACTION_READY = 2,
+ ZFCP_ERP_ACTION_NONE = 0xc0,
+ ZFCP_ERP_ACTION_FAILED = 0xe0,
};
enum zfcp_erp_act_result {
@@ -62,14 +72,14 @@ static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
ZFCP_STATUS_COMMON_UNBLOCKED | mask);
}
-static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
+static bool zfcp_erp_action_is_running(struct zfcp_erp_action *act)
{
struct zfcp_erp_action *curr_act;
list_for_each_entry(curr_act, &act->adapter->erp_running_head, list)
if (act == curr_act)
- return ZFCP_ERP_ACTION_RUNNING;
- return 0;
+ return true;
+ return false;
}
static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
@@ -85,7 +95,7 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
{
act->status |= ZFCP_STATUS_ERP_DISMISSED;
- if (zfcp_erp_action_exists(act) == ZFCP_ERP_ACTION_RUNNING)
+ if (zfcp_erp_action_is_running(act))
zfcp_erp_action_ready(act);
}
@@ -126,6 +136,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
}
}
+static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+ struct scsi_device *sdev)
+{
+ int need = want;
+ struct zfcp_scsi_dev *zsdev;
+
+ switch (want) {
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ zsdev = sdev_to_zfcp(sdev);
+ if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+ need = 0;
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+ need = 0;
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ if (atomic_read(&port->status) &
+ ZFCP_STATUS_COMMON_ERP_FAILED) {
+ need = 0;
+ /* ensure propagation of failed status to new devices */
+ zfcp_erp_set_port_status(
+ port, ZFCP_STATUS_COMMON_ERP_FAILED);
+ }
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+ if (atomic_read(&adapter->status) &
+ ZFCP_STATUS_COMMON_ERP_FAILED) {
+ need = 0;
+ /* ensure propagation of failed status to new devices */
+ zfcp_erp_set_adapter_status(
+ adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+ }
+ break;
+ default:
+ need = 0;
+ break;
+ }
+
+ return need;
+}
+
static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
@@ -241,48 +294,70 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
return erp_action;
}
-static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
- struct zfcp_port *port,
- struct scsi_device *sdev,
- char *id, u32 act_status)
+static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+ struct scsi_device *sdev,
+ char *id, u32 act_status)
{
- int retval = 1, need;
+ int need;
struct zfcp_erp_action *act;
- if (!adapter->erp_thread)
- return -EIO;
+ need = zfcp_erp_handle_failed(want, adapter, port, sdev);
+ if (!need) {
+ need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
+ goto out;
+ }
+
+ if (!adapter->erp_thread) {
+ need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
+ goto out;
+ }
need = zfcp_erp_required_act(want, adapter, port, sdev);
if (!need)
goto out;
act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
- if (!act)
+ if (!act) {
+ need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
goto out;
+ }
atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
++adapter->erp_total_count;
list_add_tail(&act->list, &adapter->erp_ready_head);
wake_up(&adapter->erp_ready_wq);
- retval = 0;
out:
zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
- return retval;
}
-static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
+void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
+ u64 port_name, u32 port_id)
+{
+ unsigned long flags;
+ static /* don't waste stack */ struct zfcp_port tmpport;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ /* Stand-in zfcp port with fields just good enough for
+ * zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
+ * Under lock because tmpport is static.
+ */
+ atomic_set(&tmpport.status, -1); /* unknown */
+ tmpport.wwpn = port_name;
+ tmpport.d_id = port_id;
+ zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
+ ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
+ ZFCP_ERP_ACTION_NONE);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
int clear_mask, char *id)
{
zfcp_erp_adapter_block(adapter, clear_mask);
zfcp_scsi_schedule_rports_block(adapter);
- /* ensure propagation of failed status to new devices */
- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
- zfcp_erp_set_adapter_status(adapter,
- ZFCP_STATUS_COMMON_ERP_FAILED);
- return -EIO;
- }
- return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
- adapter, NULL, NULL, id, 0);
+ zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
+ adapter, NULL, NULL, id, 0);
}
/**
@@ -299,12 +374,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
zfcp_scsi_schedule_rports_block(adapter);
write_lock_irqsave(&adapter->erp_lock, flags);
- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
- zfcp_erp_set_adapter_status(adapter,
- ZFCP_STATUS_COMMON_ERP_FAILED);
- else
- zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
- NULL, NULL, id, 0);
+ zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
+ NULL, NULL, id, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
@@ -345,9 +416,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
- return;
-
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
port->adapter, port, NULL, id, 0);
}
@@ -368,19 +436,13 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
-static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
- /* ensure propagation of failed status to new devices */
- zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
- return -EIO;
- }
-
- return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
- port->adapter, port, NULL, id, 0);
+ zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
+ port->adapter, port, NULL, id, 0);
}
/**
@@ -388,20 +450,15 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
* @port: port to recover
* @clear_mask: flags in port status to be cleared
* @id: Id for debug trace event.
- *
- * Returns 0 if recovery has been triggered, < 0 if not.
*/
-int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
{
- int retval;
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
- retval = _zfcp_erp_port_reopen(port, clear, id);
+ _zfcp_erp_port_reopen(port, clear, id);
write_unlock_irqrestore(&adapter->erp_lock, flags);
-
- return retval;
}
static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
@@ -418,9 +475,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
zfcp_erp_lun_block(sdev, clear);
- if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
- return;
-
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
zfcp_sdev->port, sdev, id, act_status);
}
@@ -482,21 +536,23 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
zfcp_erp_wait(adapter);
}
-static int status_change_set(unsigned long mask, atomic_t *status)
+static int zfcp_erp_status_change_set(unsigned long mask, atomic_t *status)
{
return (atomic_read(status) ^ mask) & mask;
}
static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
{
- if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
+ if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
+ &adapter->status))
zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
}
static void zfcp_erp_port_unblock(struct zfcp_port *port)
{
- if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
+ if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
+ &port->status))
zfcp_dbf_rec_run("erpubl1", &port->erp_action);
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
}
@@ -505,7 +561,8 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
- if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
+ if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
+ &zfcp_sdev->status))
zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
}
@@ -553,7 +610,7 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
unsigned long flags;
write_lock_irqsave(&adapter->erp_lock, flags);
- if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
+ if (zfcp_erp_action_is_running(erp_action)) {
erp_action->status |= set_mask;
zfcp_erp_action_ready(erp_action);
}
@@ -1634,3 +1691,14 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
atomic_set(&zfcp_sdev->erp_counter, 0);
}
+/**
+ * zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait.
+ * @adapter: Pointer to zfcp_adapter to reopen.
+ * @id: Trace tag string of length %ZFCP_DBF_TAG_LEN.
+ */
+void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id)
+{
+ zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, id);
+ zfcp_erp_wait(adapter);
+}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index e5eed8aac0ce..bd0c5a9f04cb 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -50,17 +50,23 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
- struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
+ struct scsi_cmnd *sc,
+ struct zfcp_fsf_req *fsf);
+extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
+ unsigned int scsi_id, int ret);
/* zfcp_erp.c */
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
+extern void zfcp_erp_port_forced_no_port_dbf(char *id,
+ struct zfcp_adapter *adapter,
+ u64 port_name, u32 port_id);
extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
-extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
@@ -73,6 +79,7 @@ extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
extern void zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_timeout_handler(struct timer_list *t);
+extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id);
/* zfcp_fc.c */
extern struct kmem_cache *zfcp_fc_req_cache;
@@ -120,7 +127,8 @@ extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
struct zfcp_fsf_ct_els *, unsigned int);
extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
-extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8);
+extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
+ u8 tm_flags);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 6162cf57a20a..f6c415d6ef48 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -111,11 +111,10 @@ void zfcp_fc_post_event(struct work_struct *work)
list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
- event->code, event->data);
+ event->code, event->data);
list_del(&event->list);
kfree(event);
}
-
}
/**
@@ -126,7 +125,7 @@ void zfcp_fc_post_event(struct work_struct *work)
* @event_data: The event data (e.g. n_port page in case of els)
*/
void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
- enum fc_host_event_code event_code, u32 event_data)
+ enum fc_host_event_code event_code, u32 event_data)
{
struct zfcp_fc_event *event;
@@ -425,6 +424,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
struct zfcp_port *port = container_of(work, struct zfcp_port,
gid_pn_work);
+ set_worker_desc("zgidpn%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
ret = zfcp_fc_ns_gid_pn(port);
if (ret) {
/* could not issue gid_pn for some reason */
@@ -559,6 +559,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
container_of(work, struct zfcp_port, test_link_work);
int retval;
+ set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
get_device(&port->dev);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
@@ -596,7 +597,7 @@ void zfcp_fc_test_link(struct zfcp_port *port)
put_device(&port->dev);
}
-static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
+static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
{
struct zfcp_fc_req *fc_req;
@@ -748,7 +749,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
if (zfcp_fc_wka_port_get(&adapter->gs->ds))
return;
- fc_req = zfcp_alloc_sg_env(buf_num);
+ fc_req = zfcp_fc_alloc_sg_env(buf_num);
if (!fc_req)
goto out;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 6a397ddaadf0..3cd74729cfb9 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -207,21 +207,14 @@ struct zfcp_fc_wka_ports {
* zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
* @fcp: fcp_cmnd to setup
* @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
- * @tm: task management flags to setup task management command
*/
static inline
-void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
- u8 tm_flags)
+void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
{
u32 datalen;
int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
- if (unlikely(tm_flags)) {
- fcp->fc_tm_flags = tm_flags;
- return;
- }
-
fcp->fc_pri_ta = FCP_PTA_SIMPLE;
if (scsi->sc_data_direction == DMA_FROM_DEVICE)
@@ -241,6 +234,19 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
}
/**
+ * zfcp_fc_fcp_tm() - Setup FCP command as task management command.
+ * @fcp: Pointer to FCP_CMND IU to set up.
+ * @dev: Pointer to SCSI_device where to send the task management command.
+ * @tm_flags: Task management flags to setup tm command.
+ */
+static inline
+void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
+{
+ int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
+ fcp->fc_tm_flags = tm_flags;
+}
+
+/**
* zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
* @fcp_rsp: FCP RSP IU to evaluate
* @scsi: SCSI command where to update status and sense buffer
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index b12cb81ad8a2..3c86e27f094d 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4,7 +4,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#define KMSG_COMPONENT "zfcp"
@@ -437,6 +437,9 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
+#define ZFCP_FSF_PORTSPEED_32GBIT (1 << 6)
+#define ZFCP_FSF_PORTSPEED_64GBIT (1 << 7)
+#define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8)
#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
@@ -454,6 +457,12 @@ static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
fdmi_speed |= FC_PORTSPEED_8GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
fdmi_speed |= FC_PORTSPEED_16GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT)
+ fdmi_speed |= FC_PORTSPEED_32GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT)
+ fdmi_speed |= FC_PORTSPEED_64GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT)
+ fdmi_speed |= FC_PORTSPEED_128GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
return fdmi_speed;
@@ -662,7 +671,7 @@ static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
return req;
}
-static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
+static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
{
struct fsf_qtcb *qtcb;
@@ -701,9 +710,10 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
if (likely(pool))
- req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
+ req->qtcb = zfcp_fsf_qtcb_alloc(
+ adapter->pool.qtcb_pool);
else
- req->qtcb = zfcp_qtcb_alloc(NULL);
+ req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
if (unlikely(!req->qtcb)) {
zfcp_fsf_req_free(req);
@@ -2036,10 +2046,14 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
sizeof(blktrc));
}
-static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
+/**
+ * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF.
+ * @req: Pointer to FSF request.
+ * @sdev: Pointer to SCSI device as request context.
+ */
+static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
+ struct scsi_device *sdev)
{
- struct scsi_cmnd *scmnd = req->data;
- struct scsi_device *sdev = scmnd->device;
struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;
@@ -2051,7 +2065,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_HANDLE_MISMATCH:
case FSF_PORT_HANDLE_NOT_VALID:
- zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
+ zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_FCPLUN_NOT_VALID:
@@ -2069,8 +2083,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
req->qtcb->bottom.io.data_direction,
(unsigned long long)zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
- zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
- "fssfch3");
+ zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_CMND_LENGTH_NOT_VALID:
@@ -2080,8 +2093,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
req->qtcb->bottom.io.fcp_cmnd_length,
(unsigned long long)zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
- zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
- "fssfch4");
+ zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_BOXED:
@@ -2120,7 +2132,7 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
return;
}
- zfcp_fsf_fcp_handler_common(req);
+ zfcp_fsf_fcp_handler_common(req, scpnt->device);
if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
@@ -2258,7 +2270,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
- zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
scsi_prot_sg_count(scsi_cmnd)) {
@@ -2297,10 +2309,11 @@ out:
static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
{
+ struct scsi_device *sdev = req->data;
struct fcp_resp_with_ext *fcp_rsp;
struct fcp_resp_rsp_info *rsp_info;
- zfcp_fsf_fcp_handler_common(req);
+ zfcp_fsf_fcp_handler_common(req, sdev);
fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
@@ -2311,17 +2324,18 @@ static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
}
/**
- * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
- * @scmnd: SCSI command to send the task management command for
- * @tm_flags: unsigned byte for task management flags
- * Returns: on success pointer to struct fsf_req, NULL otherwise
+ * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF).
+ * @sdev: Pointer to SCSI device to send the task management command to.
+ * @tm_flags: Unsigned byte for task management flags.
+ *
+ * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise.
*/
-struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
+struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
u8 tm_flags)
{
struct zfcp_fsf_req *req = NULL;
struct fcp_cmnd *fcp_cmnd;
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
if (unlikely(!(atomic_read(&zfcp_sdev->status) &
@@ -2341,7 +2355,8 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
goto out;
}
- req->data = scmnd;
+ req->data = sdev;
+
req->handler = zfcp_fsf_fcp_task_mgmt_handler;
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
req->qtcb->header.port_handle = zfcp_sdev->port->handle;
@@ -2352,7 +2367,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
- zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
+ zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 4baca67aba6d..535628b92f0a 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -4,7 +4,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#ifndef FSF_H
@@ -356,7 +356,7 @@ struct fsf_qtcb_bottom_config {
u32 adapter_features;
u32 connection_features;
u32 fc_topology;
- u32 fc_link_speed;
+ u32 fc_link_speed; /* one of ZFCP_FSF_PORTSPEED_* */
u32 adapter_type;
u8 res0;
u8 peer_d_id[3];
@@ -382,7 +382,7 @@ struct fsf_qtcb_bottom_port {
u32 class_of_service; /* should be 0x00000006 for class 2 and 3 */
u8 supported_fc4_types[32]; /* should be 0x00000100 for scsi fcp */
u8 active_fc4_types[32];
- u32 supported_speed; /* 0x0001 for 1 GBit/s or 0x0002 for 2 GBit/s */
+ u32 supported_speed; /* any combination of ZFCP_FSF_PORTSPEED_* */
u32 maximum_frame_size; /* fixed value of 2112 */
u64 seconds_since_last_reset;
u64 tx_frames;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 22f9562f415c..a8efcb330bc1 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -181,6 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
if (abrt_req)
break;
+ zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret) {
@@ -264,44 +265,52 @@ static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
write_unlock_irqrestore(&adapter->abort_lock, flags);
}
-static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+/**
+ * zfcp_scsi_task_mgmt_function() - Send a task management function (sync).
+ * @sdev: Pointer to SCSI device to send the task management command to.
+ * @tm_flags: Task management flags,
+ * here we only handle %FCP_TMF_TGT_RESET or %FCP_TMF_LUN_RESET.
+ */
+static int zfcp_scsi_task_mgmt_function(struct scsi_device *sdev, u8 tm_flags)
{
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct zfcp_fsf_req *fsf_req = NULL;
int retval = SUCCESS, ret;
int retry = 3;
while (retry--) {
- fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
+ fsf_req = zfcp_fsf_fcp_task_mgmt(sdev, tm_flags);
if (fsf_req)
break;
+ zfcp_dbf_scsi_devreset("wait", sdev, tm_flags, NULL);
zfcp_erp_wait(adapter);
- ret = fc_block_scsi_eh(scpnt);
+ ret = fc_block_rport(rport);
if (ret) {
- zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
+ zfcp_dbf_scsi_devreset("fiof", sdev, tm_flags, NULL);
return ret;
}
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
- zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
+ zfcp_dbf_scsi_devreset("nres", sdev, tm_flags, NULL);
return SUCCESS;
}
}
if (!fsf_req) {
- zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
+ zfcp_dbf_scsi_devreset("reqf", sdev, tm_flags, NULL);
return FAILED;
}
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
- zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
+ zfcp_dbf_scsi_devreset("fail", sdev, tm_flags, fsf_req);
retval = FAILED;
} else {
- zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
+ zfcp_dbf_scsi_devreset("okay", sdev, tm_flags, fsf_req);
zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
}
@@ -311,27 +320,81 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
- return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
+ struct scsi_device *sdev = scpnt->device;
+
+ return zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_LUN_RESET);
}
static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
- return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
+ struct scsi_target *starget = scsi_target(scpnt->device);
+ struct fc_rport *rport = starget_to_rport(starget);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct scsi_device *sdev = NULL, *tmp_sdev;
+ struct zfcp_adapter *adapter =
+ (struct zfcp_adapter *)shost->hostdata[0];
+ int ret;
+
+ shost_for_each_device(tmp_sdev, shost) {
+ if (tmp_sdev->id == starget->id) {
+ sdev = tmp_sdev;
+ break;
+ }
+ }
+ if (!sdev) {
+ ret = FAILED;
+ zfcp_dbf_scsi_eh("tr_nosd", adapter, starget->id, ret);
+ return ret;
+ }
+
+ ret = zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_TGT_RESET);
+
+ /* release reference from above shost_for_each_device */
+ if (sdev)
+ scsi_device_put(tmp_sdev);
+
+ return ret;
}
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
- int ret;
+ int ret = SUCCESS, fc_ret;
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
- ret = fc_block_scsi_eh(scpnt);
- if (ret)
+ fc_ret = fc_block_scsi_eh(scpnt);
+ if (fc_ret)
+ ret = fc_ret;
+
+ zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
+ return ret;
+}
+
+/**
+ * zfcp_scsi_sysfs_host_reset() - Support scsi_host sysfs attribute host_reset.
+ * @shost: Pointer to Scsi_Host to perform action on.
+ * @reset_type: We support %SCSI_ADAPTER_RESET but not %SCSI_FIRMWARE_RESET.
+ *
+ * Return: 0 on %SCSI_ADAPTER_RESET, -%EOPNOTSUPP otherwise.
+ *
+ * This is similar to zfcp_sysfs_adapter_failed_store().
+ */
+static int zfcp_scsi_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ struct zfcp_adapter *adapter =
+ (struct zfcp_adapter *)shost->hostdata[0];
+ int ret = 0;
+
+ if (reset_type != SCSI_ADAPTER_RESET) {
+ ret = -EOPNOTSUPP;
+ zfcp_dbf_scsi_eh("scshr_n", adapter, ~0, ret);
return ret;
+ }
- return SUCCESS;
+ zfcp_erp_adapter_reset_sync(adapter, "scshr_y");
+ return ret;
}
struct scsi_transport_template *zfcp_scsi_transport_template;
@@ -349,6 +412,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
.slave_configure = zfcp_scsi_slave_configure,
.slave_destroy = zfcp_scsi_slave_destroy,
.change_queue_depth = scsi_change_queue_depth,
+ .host_reset = zfcp_scsi_sysfs_host_reset,
.proc_name = "zfcp",
.can_queue = 4096,
.this_id = -1,
@@ -363,6 +427,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
.shost_attrs = zfcp_sysfs_shost_attrs,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
.track_queue_depth = 1,
+ .supported_mode = MODE_INITIATOR,
};
/**
@@ -430,7 +495,7 @@ void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
}
static struct fc_host_statistics*
-zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
+zfcp_scsi_init_fc_host_stats(struct zfcp_adapter *adapter)
{
struct fc_host_statistics *fc_stats;
@@ -444,9 +509,9 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
return adapter->fc_stats;
}
-static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
- struct fsf_qtcb_bottom_port *data,
- struct fsf_qtcb_bottom_port *old)
+static void zfcp_scsi_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
+ struct fsf_qtcb_bottom_port *data,
+ struct fsf_qtcb_bottom_port *old)
{
fc_stats->seconds_since_last_reset =
data->seconds_since_last_reset - old->seconds_since_last_reset;
@@ -477,8 +542,8 @@ static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
}
-static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
- struct fsf_qtcb_bottom_port *data)
+static void zfcp_scsi_set_fc_host_stats(struct fc_host_statistics *fc_stats,
+ struct fsf_qtcb_bottom_port *data)
{
fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
fc_stats->tx_frames = data->tx_frames;
@@ -502,7 +567,8 @@ static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
fc_stats->fcp_output_megabytes = data->output_mb;
}
-static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
+static struct fc_host_statistics *
+zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host)
{
struct zfcp_adapter *adapter;
struct fc_host_statistics *fc_stats;
@@ -510,7 +576,7 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
int ret;
adapter = (struct zfcp_adapter *)host->hostdata[0];
- fc_stats = zfcp_init_fc_host_stats(adapter);
+ fc_stats = zfcp_scsi_init_fc_host_stats(adapter);
if (!fc_stats)
return NULL;
@@ -527,16 +593,16 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
if (adapter->stats_reset &&
((jiffies/HZ - adapter->stats_reset) <
data->seconds_since_last_reset))
- zfcp_adjust_fc_host_stats(fc_stats, data,
- adapter->stats_reset_data);
+ zfcp_scsi_adjust_fc_host_stats(fc_stats, data,
+ adapter->stats_reset_data);
else
- zfcp_set_fc_host_stats(fc_stats, data);
+ zfcp_scsi_set_fc_host_stats(fc_stats, data);
kfree(data);
return fc_stats;
}
-static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
+static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost)
{
struct zfcp_adapter *adapter;
struct fsf_qtcb_bottom_port *data;
@@ -558,7 +624,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
}
}
-static void zfcp_get_host_port_state(struct Scsi_Host *shost)
+static void zfcp_scsi_get_host_port_state(struct Scsi_Host *shost)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
@@ -575,7 +641,8 @@ static void zfcp_get_host_port_state(struct Scsi_Host *shost)
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
}
-static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
+static void zfcp_scsi_set_rport_dev_loss_tmo(struct fc_rport *rport,
+ u32 timeout)
{
rport->dev_loss_tmo = timeout;
}
@@ -602,6 +669,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
if (port) {
zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
put_device(&port->dev);
+ } else {
+ zfcp_erp_port_forced_no_port_dbf(
+ "sctrpin", adapter,
+ rport->port_name /* zfcp_scsi_rport_register */,
+ rport->port_id /* zfcp_scsi_rport_register */);
}
}
@@ -687,6 +759,9 @@ void zfcp_scsi_rport_work(struct work_struct *work)
struct zfcp_port *port = container_of(work, struct zfcp_port,
rport_work);
+ set_worker_desc("zrp%c-%16llx",
+ (port->rport_task == RPORT_ADD) ? 'a' : 'd',
+ port->wwpn); /* < WORKER_DESC_LEN=24 */
while (port->rport_task) {
if (port->rport_task == RPORT_ADD) {
port->rport_task = RPORT_NONE;
@@ -761,10 +836,10 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_serial_number = 1,
- .get_fc_host_stats = zfcp_get_fc_host_stats,
- .reset_fc_host_stats = zfcp_reset_fc_host_stats,
- .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
- .get_host_port_state = zfcp_get_host_port_state,
+ .get_fc_host_stats = zfcp_scsi_get_fc_host_stats,
+ .reset_fc_host_stats = zfcp_scsi_reset_fc_host_stats,
+ .set_rport_dev_loss_tmo = zfcp_scsi_set_rport_dev_loss_tmo,
+ .get_host_port_state = zfcp_scsi_get_host_port_state,
.terminate_rport_io = zfcp_scsi_terminate_rport_io,
.show_host_port_state = 1,
.show_host_active_fc4s = 1,
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3ac823f2540f..b277be6f7611 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -200,10 +200,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
goto out;
}
- zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
- zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
- "syafai2");
- zfcp_erp_wait(adapter);
+ zfcp_erp_adapter_reset_sync(adapter, "syafai2");
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b42c9c479d4b..99ba4a770406 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -882,6 +882,11 @@ static int twa_chrdev_open(struct inode *inode, struct file *file)
unsigned int minor_number;
int retval = TW_IOCTL_ERROR_OS_ENODEV;
+ if (!capable(CAP_SYS_ADMIN)) {
+ retval = -EACCES;
+ goto out;
+ }
+
minor_number = iminor(inode);
if (minor_number >= twa_device_extension_count)
goto out;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 33261b690774..f6179e3d6953 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1033,6 +1033,9 @@ static int tw_chrdev_open(struct inode *inode, struct file *file)
dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n");
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
minor_number = iminor(inode);
if (minor_number >= tw_device_extension_count)
return -ENODEV;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 11e89e56b865..35c909bbf8ba 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1351,6 +1351,20 @@ config SCSI_ZORRO7XX
accelerator card for the Amiga 1200,
- the SCSI controller on the GVP Turbo 040/060 accelerator.
+config SCSI_ZORRO_ESP
+ tristate "Zorro ESP SCSI support"
+ depends on ZORRO && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
+ expansion boards for the Amiga.
+ This includes:
+ - the Phase5 Blizzard 1230 II and IV SCSI controllers,
+ - the Phase5 Blizzard 2060 SCSI controller,
+ - the Phase5 Blizzard Cyberstorm and Cyberstorm II SCSI
+ controllers,
+ - the Fastlane Zorro III SCSI controller.
+
config ATARI_SCSI
tristate "Atari native SCSI support"
depends on ATARI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 56c940394729..80aca2456353 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
+obj-$(CONFIG_SCSI_ZORRO_ESP) += esp_scsi.o zorro_esp.o
obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
@@ -189,7 +190,7 @@ $(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
$(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
quiet_cmd_bflags = GEN $@
- cmd_bflags = sed -n 's/.*BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
+ cmd_bflags = sed -n 's/.*define *BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
$(call if_changed,bflags)
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 8086bd0ac9fd..b2942ec3d455 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1222,19 +1222,8 @@ static struct pci_driver inia100_pci_driver = {
.remove = inia100_remove_one,
};
-static int __init inia100_init(void)
-{
- return pci_register_driver(&inia100_pci_driver);
-}
-
-static void __exit inia100_exit(void)
-{
- pci_unregister_driver(&inia100_pci_driver);
-}
+module_pci_driver(inia100_pci_driver);
MODULE_DESCRIPTION("Initio A100U2W SCSI driver");
MODULE_AUTHOR("Initio Corporation");
MODULE_LICENSE("Dual BSD/GPL");
-
-module_init(inia100_init);
-module_exit(inia100_exit);
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index beea30e5a34a..d81ca66e24d6 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -556,15 +556,7 @@ static struct pci_driver am53c974_driver = {
.remove = pci_esp_remove_one,
};
-static int __init am53c974_module_init(void)
-{
- return pci_register_driver(&am53c974_driver);
-}
-
-static void __exit am53c974_module_exit(void)
-{
- pci_unregister_driver(&am53c974_driver);
-}
+module_pci_driver(am53c974_driver);
MODULE_DESCRIPTION("AM53C974 SCSI driver");
MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
@@ -577,6 +569,3 @@ MODULE_PARM_DESC(am53c974_debug, "Enable debugging");
module_param(am53c974_fenab, bool, 0444);
MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes");
-
-module_init(am53c974_module_init);
-module_exit(am53c974_module_exit);
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
index a011c5dbf214..f1b17e3efb3f 100644
--- a/drivers/scsi/cxlflash/Kconfig
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -4,7 +4,7 @@
config CXLFLASH
tristate "Support for IBM CAPI Flash"
- depends on PCI && SCSI && CXL && EEH
+ depends on PCI && SCSI && (CXL || OCXL) && EEH
select IRQ_POLL
default m
help
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
index 7ec3f6b55dde..283377d8f6fb 100644
--- a/drivers/scsi/cxlflash/Makefile
+++ b/drivers/scsi/cxlflash/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_CXLFLASH) += cxlflash.o
-cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
+cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
+cxlflash-$(CONFIG_CXL) += cxl_hw.o
+cxlflash-$(CONFIG_OCXL) += ocxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
index 339e42b03c49..55638d19c2fd 100644
--- a/drivers/scsi/cxlflash/backend.h
+++ b/drivers/scsi/cxlflash/backend.h
@@ -12,30 +12,41 @@
* 2 of the License, or (at your option) any later version.
*/
+#ifndef _CXLFLASH_BACKEND_H
+#define _CXLFLASH_BACKEND_H
+
extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
+extern const struct cxlflash_backend_ops cxlflash_ocxl_ops;
struct cxlflash_backend_ops {
struct module *module;
- void __iomem * (*psa_map)(void *);
- void (*psa_unmap)(void __iomem *);
- int (*process_element)(void *);
- int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
- void (*unmap_afu_irq)(void *, int, void *);
- int (*start_context)(void *);
- int (*stop_context)(void *);
- int (*afu_reset)(void *);
- void (*set_master)(void *);
- void * (*get_context)(struct pci_dev *, void *);
- void * (*dev_context_init)(struct pci_dev *, void *);
- int (*release_context)(void *);
- void (*perst_reloads_same_image)(void *, bool);
- ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
- int (*allocate_afu_irqs)(void *, int);
- void (*free_afu_irqs)(void *);
- void * (*create_afu)(struct pci_dev *);
- struct file * (*get_fd)(void *, struct file_operations *, int *);
- void * (*fops_get_context)(struct file *);
- int (*start_work)(void *, u64);
- int (*fd_mmap)(struct file *, struct vm_area_struct *);
- int (*fd_release)(struct inode *, struct file *);
+ void __iomem * (*psa_map)(void *ctx_cookie);
+ void (*psa_unmap)(void __iomem *addr);
+ int (*process_element)(void *ctx_cookie);
+ int (*map_afu_irq)(void *ctx_cookie, int num, irq_handler_t handler,
+ void *cookie, char *name);
+ void (*unmap_afu_irq)(void *ctx_cookie, int num, void *cookie);
+ u64 (*get_irq_objhndl)(void *ctx_cookie, int irq);
+ int (*start_context)(void *ctx_cookie);
+ int (*stop_context)(void *ctx_cookie);
+ int (*afu_reset)(void *ctx_cookie);
+ void (*set_master)(void *ctx_cookie);
+ void * (*get_context)(struct pci_dev *dev, void *afu_cookie);
+ void * (*dev_context_init)(struct pci_dev *dev, void *afu_cookie);
+ int (*release_context)(void *ctx_cookie);
+ void (*perst_reloads_same_image)(void *afu_cookie, bool image);
+ ssize_t (*read_adapter_vpd)(struct pci_dev *dev, void *buf,
+ size_t count);
+ int (*allocate_afu_irqs)(void *ctx_cookie, int num);
+ void (*free_afu_irqs)(void *ctx_cookie);
+ void * (*create_afu)(struct pci_dev *dev);
+ void (*destroy_afu)(void *afu_cookie);
+ struct file * (*get_fd)(void *ctx_cookie, struct file_operations *fops,
+ int *fd);
+ void * (*fops_get_context)(struct file *file);
+ int (*start_work)(void *ctx_cookie, u64 irqs);
+ int (*fd_mmap)(struct file *file, struct vm_area_struct *vm);
+ int (*fd_release)(struct inode *inode, struct file *file);
};
+
+#endif /* _CXLFLASH_BACKEND_H */
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 102fd26ca886..8908a20065c8 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -211,6 +211,7 @@ struct hwq {
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
ctx_hndl_t ctx_hndl; /* master's context handle */
u32 index; /* Index of this hwq */
+ int num_irqs; /* Number of interrupts requested for context */
struct list_head pending_cmds; /* Commands pending completion */
atomic_t hsq_credits;
@@ -223,6 +224,7 @@ struct hwq {
u64 *hrrq_end;
u64 *hrrq_curr;
bool toggle;
+ bool hrrq_online;
s64 room;
@@ -231,13 +233,14 @@ struct hwq {
struct afu {
struct hwq hwqs[CXLFLASH_MAX_HWQS];
- int (*send_cmd)(struct afu *, struct afu_cmd *);
- int (*context_reset)(struct hwq *);
+ int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd);
+ int (*context_reset)(struct hwq *hwq);
/* AFU HW */
struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
atomic_t cmds_active; /* Number of currently active AFU commands */
+ struct mutex sync_active; /* Mutex to serialize AFU commands */
u64 hb;
u32 internal_lun; /* User-desired LUN mode for this AFU */
@@ -272,6 +275,11 @@ static inline bool afu_has_cap(struct afu *afu, u64 cap)
return afu_cap & cap;
}
+static inline bool afu_is_ocxl_lisn(struct afu *afu)
+{
+ return afu_has_cap(afu, SISL_INTVER_CAP_OCXL_LISN);
+}
+
static inline bool afu_is_afu_debug(struct afu *afu)
{
return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
index db1cadad5c5d..b42da88386bd 100644
--- a/drivers/scsi/cxlflash/cxl_hw.c
+++ b/drivers/scsi/cxlflash/cxl_hw.c
@@ -49,6 +49,12 @@ static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
cxl_unmap_afu_irq(ctx_cookie, num, cookie);
}
+static u64 cxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
+{
+ /* Dummy fop for cxl */
+ return 0;
+}
+
static int cxlflash_start_context(void *ctx_cookie)
{
return cxl_start_context(ctx_cookie, 0, NULL);
@@ -110,6 +116,11 @@ static void *cxlflash_create_afu(struct pci_dev *dev)
return cxl_pci_to_afu(dev);
}
+static void cxlflash_destroy_afu(void *afu)
+{
+ /* Dummy fop for cxl */
+}
+
static struct file *cxlflash_get_fd(void *ctx_cookie,
struct file_operations *fops, int *fd)
{
@@ -148,6 +159,7 @@ const struct cxlflash_backend_ops cxlflash_cxl_ops = {
.process_element = cxlflash_process_element,
.map_afu_irq = cxlflash_map_afu_irq,
.unmap_afu_irq = cxlflash_unmap_afu_irq,
+ .get_irq_objhndl = cxlflash_get_irq_objhndl,
.start_context = cxlflash_start_context,
.stop_context = cxlflash_stop_context,
.afu_reset = cxlflash_afu_reset,
@@ -160,6 +172,7 @@ const struct cxlflash_backend_ops cxlflash_cxl_ops = {
.allocate_afu_irqs = cxlflash_allocate_afu_irqs,
.free_afu_irqs = cxlflash_free_afu_irqs,
.create_afu = cxlflash_create_afu,
+ .destroy_afu = cxlflash_destroy_afu,
.get_fd = cxlflash_get_fd,
.fops_get_context = cxlflash_fops_get_context,
.start_work = cxlflash_start_work,
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index 4d232e271af6..edea1255fdab 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -12,9 +12,11 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <misc/cxl.h>
#include <asm/unaligned.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
#include <scsi/scsi_host.h>
#include <uapi/scsi/cxlflash_ioctl.h>
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index d8fe7ab870b8..6637116529aa 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -19,8 +19,6 @@
#include <asm/unaligned.h>
-#include <misc/cxl.h>
-
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <uapi/scsi/cxlflash_ioctl.h>
@@ -339,8 +337,8 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
out:
spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
- dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
- cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
+ dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
+ __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
return rc;
}
@@ -473,6 +471,7 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
struct afu_cmd *cmd = NULL;
struct device *dev = &cfg->dev->dev;
struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
+ bool needs_deletion = false;
char *buf = NULL;
ulong lock_flags;
int rc = 0;
@@ -527,6 +526,7 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
if (!to) {
dev_err(dev, "%s: TMF timed out\n", __func__);
rc = -ETIMEDOUT;
+ needs_deletion = true;
} else if (cmd->cmd_aborted) {
dev_err(dev, "%s: TMF aborted\n", __func__);
rc = -EAGAIN;
@@ -537,6 +537,12 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
}
cfg->tmf_active = false;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
+
+ if (needs_deletion) {
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
+ list_del(&cmd->list);
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
+ }
out:
kfree(buf);
return rc;
@@ -608,6 +614,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
rc = 0;
goto out;
default:
+ atomic_inc(&afu->cmds_active);
break;
}
@@ -633,6 +640,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
rc = afu->send_cmd(afu, cmd);
+ atomic_dec(&afu->cmds_active);
out:
return rc;
}
@@ -793,6 +801,10 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
hwq->ctx_cookie = NULL;
+ spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
+ hwq->hrrq_online = false;
+ spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
+
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
flush_pending_cmds(hwq);
spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
@@ -946,9 +958,9 @@ static void cxlflash_remove(struct pci_dev *pdev)
return;
}
- /* If a Task Management Function is active, wait for it to complete
- * before continuing with remove.
- */
+ /* Yield to running recovery threads before continuing with remove */
+ wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
+ cfg->state != STATE_PROBING);
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
if (cfg->tmf_active)
wait_event_interruptible_lock_irq(cfg->tmf_waitq,
@@ -971,6 +983,7 @@ static void cxlflash_remove(struct pci_dev *pdev)
case INIT_STATE_AFU:
term_afu(cfg);
case INIT_STATE_PCI:
+ cfg->ops->destroy_afu(cfg->afu_cookie);
pci_disable_device(pdev);
case INIT_STATE_NONE:
free_mem(cfg);
@@ -1303,7 +1316,10 @@ static void afu_err_intr_init(struct afu *afu)
for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i);
- writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
+ reg = readq_be(&hwq->host_map->ctx_ctrl);
+ WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
+ reg |= SISL_MSI_SYNC_ERROR;
+ writeq_be(reg, &hwq->host_map->ctx_ctrl);
writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
}
}
@@ -1463,6 +1479,12 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
+ /* Silently drop spurious interrupts when queue is not online */
+ if (!hwq->hrrq_online) {
+ spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
+ return IRQ_HANDLED;
+ }
+
if (afu_is_irqpoll_enabled(afu)) {
irq_poll_sched(&hwq->irqpoll);
spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
@@ -1752,6 +1774,8 @@ static int init_global(struct cxlflash_cfg *cfg)
u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
int i = 0, num_ports = 0;
int rc = 0;
+ int j;
+ void *ctx;
u64 reg;
rc = read_vpd(cfg, &wwpn[0]);
@@ -1767,6 +1791,7 @@ static int init_global(struct cxlflash_cfg *cfg)
writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
+ hwq->hrrq_online = true;
if (afu_is_sq_cmd_mode(afu)) {
writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
@@ -1812,6 +1837,25 @@ static int init_global(struct cxlflash_cfg *cfg)
msleep(100);
}
+ if (afu_is_ocxl_lisn(afu)) {
+ /* Set up the LISN effective address for each master */
+ for (i = 0; i < afu->num_hwqs; i++) {
+ hwq = get_hwq(afu, i);
+ ctx = hwq->ctx_cookie;
+
+ for (j = 0; j < hwq->num_irqs; j++) {
+ reg = cfg->ops->get_irq_objhndl(ctx, j);
+ writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
+ }
+
+ reg = hwq->ctx_hndl;
+ writeq_be(SISL_LISN_PASID(reg, reg),
+ &hwq->ctrl_map->lisn_pasid[0]);
+ writeq_be(SISL_LISN_PASID(0UL, reg),
+ &hwq->ctrl_map->lisn_pasid[1]);
+ }
+ }
+
/* Set up master's own CTX_CAP to allow real mode, host translation */
/* tables, afu cmds and read/write GSCSI cmds. */
/* First, unlock ctx_cap write by reading mbox */
@@ -1911,7 +1955,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
int rc = 0;
enum undo_level level = UNDO_NOOP;
bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
- int num_irqs = is_primary_hwq ? 3 : 2;
+ int num_irqs = hwq->num_irqs;
rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
if (unlikely(rc)) {
@@ -1965,16 +2009,20 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
struct device *dev = &cfg->dev->dev;
struct hwq *hwq = get_hwq(cfg->afu, index);
int rc = 0;
+ int num_irqs;
enum undo_level level;
hwq->afu = cfg->afu;
hwq->index = index;
INIT_LIST_HEAD(&hwq->pending_cmds);
- if (index == PRIMARY_HWQ)
+ if (index == PRIMARY_HWQ) {
ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
- else
+ num_irqs = 3;
+ } else {
ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
+ num_irqs = 2;
+ }
if (IS_ERR_OR_NULL(ctx)) {
rc = -ENOMEM;
goto err1;
@@ -1982,6 +2030,7 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
WARN_ON(hwq->ctx_cookie);
hwq->ctx_cookie = ctx;
+ hwq->num_irqs = num_irqs;
/* Set it up as a master with the CXL */
cfg->ops->set_master(ctx);
@@ -2075,6 +2124,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
+ mutex_init(&afu->sync_active);
afu->num_hwqs = afu->desired_hwqs;
for (i = 0; i < afu->num_hwqs; i++) {
rc = init_mc(cfg, i);
@@ -2254,10 +2304,10 @@ static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
struct device *dev = &cfg->dev->dev;
struct afu_cmd *cmd = NULL;
struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
+ ulong lock_flags;
char *buf = NULL;
int rc = 0;
int nretry = 0;
- static DEFINE_MUTEX(sync_active);
if (cfg->state != STATE_NORMAL) {
dev_dbg(dev, "%s: Sync not required state=%u\n",
@@ -2265,7 +2315,7 @@ static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
return 0;
}
- mutex_lock(&sync_active);
+ mutex_lock(&afu->sync_active);
atomic_inc(&afu->cmds_active);
buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
if (unlikely(!buf)) {
@@ -2299,6 +2349,11 @@ retry:
case -ETIMEDOUT:
rc = afu->context_reset(hwq);
if (rc) {
+ /* Delete the command from pending_cmds list */
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
+ list_del(&cmd->list);
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
+
cxlflash_schedule_async_reset(cfg);
break;
}
@@ -2315,7 +2370,7 @@ retry:
*rcb->ioasa = cmd->sa;
out:
atomic_dec(&afu->cmds_active);
- mutex_unlock(&sync_active);
+ mutex_unlock(&afu->sync_active);
kfree(buf);
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
@@ -3138,7 +3193,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
CXLFLASH_NOTIFY_SHUTDOWN };
static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
- CXLFLASH_NOTIFY_SHUTDOWN };
+ (CXLFLASH_NOTIFY_SHUTDOWN |
+ CXLFLASH_OCXL_DEV) };
/*
* PCI device binding table
@@ -3649,8 +3705,9 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->init_state = INIT_STATE_NONE;
cfg->dev = pdev;
- cfg->ops = &cxlflash_cxl_ops;
cfg->cxl_fops = cxlflash_cxl_fops;
+ cfg->ops = cxlflash_assign_ops(ddv);
+ WARN_ON_ONCE(!cfg->ops);
/*
* Promoted LUNs move to the top of the LUN table. The rest stay on
@@ -3681,8 +3738,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cfg);
- cfg->afu_cookie = cfg->ops->create_afu(pdev);
-
rc = init_pci(cfg);
if (rc) {
dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
@@ -3690,6 +3745,12 @@ static int cxlflash_probe(struct pci_dev *pdev,
}
cfg->init_state = INIT_STATE_PCI;
+ cfg->afu_cookie = cfg->ops->create_afu(pdev);
+ if (unlikely(!cfg->afu_cookie)) {
+ dev_err(dev, "%s: create_afu failed\n", __func__);
+ goto out_remove;
+ }
+
rc = init_afu(cfg);
if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index ba0108a7a9c2..2a3977823812 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -20,6 +20,8 @@
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
+#include "backend.h"
+
#define CXLFLASH_NAME "cxlflash"
#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
#define CXLFLASH_MAX_ADAPTERS 32
@@ -97,8 +99,27 @@ struct dev_dependent_vals {
u64 flags;
#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL
+#define CXLFLASH_OCXL_DEV 0x0000000000000004ULL
};
+static inline const struct cxlflash_backend_ops *
+cxlflash_assign_ops(struct dev_dependent_vals *ddv)
+{
+ const struct cxlflash_backend_ops *ops = NULL;
+
+#ifdef CONFIG_OCXL
+ if (ddv->flags & CXLFLASH_OCXL_DEV)
+ ops = &cxlflash_ocxl_ops;
+#endif
+
+#ifdef CONFIG_CXL
+ if (!(ddv->flags & CXLFLASH_OCXL_DEV))
+ ops = &cxlflash_cxl_ops;
+#endif
+
+ return ops;
+}
+
struct asyc_intr_info {
u64 status;
char *desc;
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
new file mode 100644
index 000000000000..0a95b5f25380
--- /dev/null
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -0,0 +1,1436 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/file.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/poll.h>
+#include <linux/sched/signal.h>
+
+#include <misc/ocxl.h>
+
+#include <uapi/misc/cxl.h>
+
+#include "backend.h"
+#include "ocxl_hw.h"
+
+/*
+ * Pseudo-filesystem to allocate inodes.
+ */
+
+#define OCXLFLASH_FS_MAGIC 0x1697698f
+
+static int ocxlflash_fs_cnt;
+static struct vfsmount *ocxlflash_vfs_mount;
+
+static const struct dentry_operations ocxlflash_fs_dops = {
+ .d_dname = simple_dname,
+};
+
+/*
+ * ocxlflash_fs_mount() - mount the pseudo-filesystem
+ * @fs_type: File system type.
+ * @flags: Flags for the filesystem.
+ * @dev_name: Device name associated with the filesystem.
+ * @data: Data pointer.
+ *
+ * Return: pointer to the directory entry structure
+ */
+static struct dentry *ocxlflash_fs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data)
+{
+ return mount_pseudo(fs_type, "ocxlflash:", NULL, &ocxlflash_fs_dops,
+ OCXLFLASH_FS_MAGIC);
+}
+
+static struct file_system_type ocxlflash_fs_type = {
+ .name = "ocxlflash",
+ .owner = THIS_MODULE,
+ .mount = ocxlflash_fs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+/*
+ * ocxlflash_release_mapping() - release the memory mapping
+ * @ctx: Context whose mapping is to be released.
+ */
+static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
+{
+ if (ctx->mapping)
+ simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
+ ctx->mapping = NULL;
+}
+
+/*
+ * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
+ * @dev: Generic device of the host.
+ * @name: Name of the pseudo filesystem.
+ * @fops: File operations.
+ * @priv: Private data.
+ * @flags: Flags for the file.
+ *
+ * Return: pointer to the file on success, ERR_PTR on failure
+ */
+static struct file *ocxlflash_getfile(struct device *dev, const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags)
+{
+ struct qstr this;
+ struct path path;
+ struct file *file;
+ struct inode *inode = NULL;
+ int rc;
+
+ if (fops->owner && !try_module_get(fops->owner)) {
+ dev_err(dev, "%s: Owner does not exist\n", __func__);
+ rc = -ENOENT;
+ goto err1;
+ }
+
+ rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
+ &ocxlflash_fs_cnt);
+ if (unlikely(rc < 0)) {
+ dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
+ __func__, rc);
+ goto err2;
+ }
+
+ inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
+ if (IS_ERR(inode)) {
+ rc = PTR_ERR(inode);
+ dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
+ __func__, rc);
+ goto err3;
+ }
+
+ this.name = name;
+ this.len = strlen(name);
+ this.hash = 0;
+ path.dentry = d_alloc_pseudo(ocxlflash_vfs_mount->mnt_sb, &this);
+ if (!path.dentry) {
+ dev_err(dev, "%s: d_alloc_pseudo failed\n", __func__);
+ rc = -ENOMEM;
+ goto err4;
+ }
+
+ path.mnt = mntget(ocxlflash_vfs_mount);
+ d_instantiate(path.dentry, inode);
+
+ file = alloc_file(&path, OPEN_FMODE(flags), fops);
+ if (IS_ERR(file)) {
+ rc = PTR_ERR(file);
+ dev_err(dev, "%s: alloc_file failed rc=%d\n",
+ __func__, rc);
+ goto err5;
+ }
+
+ file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
+ file->private_data = priv;
+out:
+ return file;
+err5:
+ path_put(&path);
+err4:
+ iput(inode);
+err3:
+ simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
+err2:
+ module_put(fops->owner);
+err1:
+ file = ERR_PTR(rc);
+ goto out;
+}
+
+/**
+ * ocxlflash_psa_map() - map the process specific MMIO space
+ * @ctx_cookie: Adapter context for which the mapping needs to be done.
+ *
+ * Return: MMIO pointer of the mapped region
+ */
+static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
+{
+ struct ocxlflash_context *ctx = ctx_cookie;
+ struct device *dev = ctx->hw_afu->dev;
+
+ mutex_lock(&ctx->state_mutex);
+ if (ctx->state != STARTED) {
+ dev_err(dev, "%s: Context not started, state=%d\n", __func__,
+ ctx->state);
+ mutex_unlock(&ctx->state_mutex);
+ return NULL;
+ }
+ mutex_unlock(&ctx->state_mutex);
+
+ return ioremap(ctx->psn_phys, ctx->psn_size);
+}
+
+/**
+ * ocxlflash_psa_unmap() - unmap the process specific MMIO space
+ * @addr: MMIO pointer to unmap.
+ */
+static void ocxlflash_psa_unmap(void __iomem *addr)
+{
+ iounmap(addr);
+}
+
+/**
+ * ocxlflash_process_element() - get process element of the adapter context
+ * @ctx_cookie: Adapter context associated with the process element.
+ *
+ * Return: process element of the adapter context
+ */
+static int ocxlflash_process_element(void *ctx_cookie)
+{
+ struct ocxlflash_context *ctx = ctx_cookie;
+
+ return ctx->pe;
+}
+
+/**
+ * afu_map_irq() - map the interrupt of the adapter context
+ * @flags: Flags.
+ * @ctx: Adapter context.
+ * @num: Per-context AFU interrupt number.
+ * @handler: Interrupt handler to register.
+ * @cookie: Interrupt handler private data.
+ * @name: Name of the interrupt.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
+ irq_handler_t handler, void *cookie, char *name)
+{
+ struct ocxl_hw_afu *afu = ctx->hw_afu;
+ struct device *dev = afu->dev;
+ struct ocxlflash_irqs *irq;
+ void __iomem *vtrig;
+ u32 virq;
+ int rc = 0;
+
+ if (num < 0 || num >= ctx->num_irqs) {
+ dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
+ rc = -ENOENT;
+ goto out;
+ }
+
+ irq = &ctx->irqs[num];
+ virq = irq_create_mapping(NULL, irq->hwirq);
+ if (unlikely(!virq)) {
+ dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = request_irq(virq, handler, 0, name, cookie);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
+ goto err1;
+ }
+
+ vtrig = ioremap(irq->ptrig, PAGE_SIZE);
+ if (unlikely(!vtrig)) {
+ dev_err(dev, "%s: Trigger page mapping failed\n", __func__);
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ irq->virq = virq;
+ irq->vtrig = vtrig;
+out:
+ return rc;
+err2:
+ free_irq(virq, cookie);
+err1:
+ irq_dispose_mapping(virq);
+ goto out;
+}
+
+/**
+ * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
+ * @ctx_cookie: Adapter context.
+ * @num: Per-context AFU interrupt number.
+ * @handler: Interrupt handler to register.
+ * @cookie: Interrupt handler private data.
+ * @name: Name of the interrupt.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
+ irq_handler_t handler, void *cookie,
+ char *name)
+{
+ return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
+}
+
+/**
+ * afu_unmap_irq() - unmap the interrupt
+ * @flags: Flags.
+ * @ctx: Adapter context.
+ * @num: Per-context AFU interrupt number.
+ * @cookie: Interrupt handler private data.
+ */
+static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
+ void *cookie)
+{
+ struct ocxl_hw_afu *afu = ctx->hw_afu;
+ struct device *dev = afu->dev;
+ struct ocxlflash_irqs *irq;
+
+ if (num < 0 || num >= ctx->num_irqs) {
+ dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
+ return;
+ }
+
+ irq = &ctx->irqs[num];
+ if (irq->vtrig)
+ iounmap(irq->vtrig);
+
+ if (irq_find_mapping(NULL, irq->hwirq)) {
+ free_irq(irq->virq, cookie);
+ irq_dispose_mapping(irq->virq);
+ }
+
+ memset(irq, 0, sizeof(*irq));
+}
+
+/**
+ * ocxlflash_unmap_afu_irq() - unmap the interrupt
+ * @ctx_cookie: Adapter context.
+ * @num: Per-context AFU interrupt number.
+ * @cookie: Interrupt handler private data.
+ */
+static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
+{
+ return afu_unmap_irq(0, ctx_cookie, num, cookie);
+}
+
+/**
+ * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
+ * @ctx_cookie: Context associated with the interrupt.
+ * @irq: Interrupt number.
+ *
+ * Return: effective address of the mapped region
+ */
+static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
+{
+ struct ocxlflash_context *ctx = ctx_cookie;
+
+ if (irq < 0 || irq >= ctx->num_irqs)
+ return 0;
+
+ return (__force u64)ctx->irqs[irq].vtrig;
+}
+
+/**
+ * ocxlflash_xsl_fault() - callback when translation error is triggered
+ * @data: Private data provided at callback registration, the context.
+ * @addr: Address that triggered the error.
+ * @dsisr: Value of dsisr register.
+ */
+static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
+{
+ struct ocxlflash_context *ctx = data;
+
+ spin_lock(&ctx->slock);
+ ctx->fault_addr = addr;
+ ctx->fault_dsisr = dsisr;
+ ctx->pending_fault = true;
+ spin_unlock(&ctx->slock);
+
+ wake_up_all(&ctx->wq);
+}
+
+/**
+ * start_context() - local routine to start a context
+ * @ctx: Adapter context to be started.
+ *
+ * Assign the context specific MMIO space, add and enable the PE.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int start_context(struct ocxlflash_context *ctx)
+{
+ struct ocxl_hw_afu *afu = ctx->hw_afu;
+ struct ocxl_afu_config *acfg = &afu->acfg;
+ void *link_token = afu->link_token;
+ struct device *dev = afu->dev;
+ bool master = ctx->master;
+ struct mm_struct *mm;
+ int rc = 0;
+ u32 pid;
+
+ mutex_lock(&ctx->state_mutex);
+ if (ctx->state != OPENED) {
+ dev_err(dev, "%s: Context state invalid, state=%d\n",
+ __func__, ctx->state);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (master) {
+ ctx->psn_size = acfg->global_mmio_size;
+ ctx->psn_phys = afu->gmmio_phys;
+ } else {
+ ctx->psn_size = acfg->pp_mmio_stride;
+ ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
+ }
+
+ /* pid and mm not set for master contexts */
+ if (master) {
+ pid = 0;
+ mm = NULL;
+ } else {
+ pid = current->mm->context.id;
+ mm = current->mm;
+ }
+
+ rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm,
+ ocxlflash_xsl_fault, ctx);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ ctx->state = STARTED;
+out:
+ mutex_unlock(&ctx->state_mutex);
+ return rc;
+}
+
+/**
+ * ocxlflash_start_context() - start a kernel context
+ * @ctx_cookie: Adapter context to be started.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_start_context(void *ctx_cookie)
+{
+ struct ocxlflash_context *ctx = ctx_cookie;
+
+ return start_context(ctx);
+}
+
+/**
+ * ocxlflash_stop_context() - stop a context
+ * @ctx_cookie: Adapter context to be stopped.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_stop_context(void *ctx_cookie)
+{
+ struct ocxlflash_context *ctx = ctx_cookie;
+ struct ocxl_hw_afu *afu = ctx->hw_afu;
+ struct ocxl_afu_config *acfg = &afu->acfg;
+ struct pci_dev *pdev = afu->pdev;
+ struct device *dev = afu->dev;
+ enum ocxlflash_ctx_state state;
+ int rc = 0;
+
+ mutex_lock(&ctx->state_mutex);
+ state = ctx->state;
+ ctx->state = CLOSED;
+ mutex_unlock(&ctx->state_mutex);
+ if (state != STARTED)
+ goto out;
+
+ rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
+ ctx->pe);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
+ __func__, rc);
+ /* If EBUSY, PE could be referenced in future by the AFU */
+ if (rc == -EBUSY)
+ goto out;
+ }
+
+ rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+out:
+ return rc;
+}
+
+/**
+ * ocxlflash_afu_reset() - reset the AFU
+ * @ctx_cookie: Adapter context.
+ */
+static int ocxlflash_afu_reset(void *ctx_cookie)
+{
+ struct ocxlflash_context *ctx = ctx_cookie;
+ struct device *dev = ctx->hw_afu->dev;
+
+ /* Pending implementation from OCXL transport services */
+ dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
+
+ /* Silently return success until it is implemented */
+ return 0;
+}
+
+/**
+ * ocxlflash_set_master() - sets the context as master
+ * @ctx_cookie: Adapter context to set as master.
+ */
+static void ocxlflash_set_master(void *ctx_cookie)
+{
+ struct ocxlflash_context *ctx = ctx_cookie;
+
+ ctx->master = true;
+}
+
+/**
+ * ocxlflash_get_context() - obtains the context associated with the host
+ * @pdev: PCI device associated with the host.
+ * @afu_cookie: Hardware AFU associated with the host.
+ *
+ * Return: returns the pointer to host adapter context
+ */
+static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
+{
+ struct ocxl_hw_afu *afu = afu_cookie;
+
+ return afu->ocxl_ctx;
+}
+
+/**
+ * ocxlflash_dev_context_init() - allocate and initialize an adapter context
+ * @pdev: PCI device associated with the host.
+ * @afu_cookie: Hardware AFU associated with the host.
+ *
+ * Return: returns the adapter context on success, ERR_PTR on failure
+ */
+static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
+{
+ struct ocxl_hw_afu *afu = afu_cookie;
+ struct device *dev = afu->dev;
+ struct ocxlflash_context *ctx;
+ int rc;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (unlikely(!ctx)) {
+ dev_err(dev, "%s: Context allocation failed\n", __func__);
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ idr_preload(GFP_KERNEL);
+ rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
+ idr_preload_end();
+ if (unlikely(rc < 0)) {
+ dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
+ goto err2;
+ }
+
+ spin_lock_init(&ctx->slock);
+ init_waitqueue_head(&ctx->wq);
+ mutex_init(&ctx->state_mutex);
+
+ ctx->state = OPENED;
+ ctx->pe = rc;
+ ctx->master = false;
+ ctx->mapping = NULL;
+ ctx->hw_afu = afu;
+ ctx->irq_bitmap = 0;
+ ctx->pending_irq = false;
+ ctx->pending_fault = false;
+out:
+ return ctx;
+err2:
+ kfree(ctx);
+err1:
+ ctx = ERR_PTR(rc);
+ goto out;
+}
+
+/**
+ * ocxlflash_release_context() - releases an adapter context
+ * @ctx_cookie: Adapter context to be released.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_release_context(void *ctx_cookie)
+{
+ struct ocxlflash_context *ctx = ctx_cookie;
+ struct device *dev;
+ int rc = 0;
+
+ if (!ctx)
+ goto out;
+
+ dev = ctx->hw_afu->dev;
+ mutex_lock(&ctx->state_mutex);
+ if (ctx->state >= STARTED) {
+ dev_err(dev, "%s: Context in use, state=%d\n", __func__,
+ ctx->state);
+ mutex_unlock(&ctx->state_mutex);
+ rc = -EBUSY;
+ goto out;
+ }
+ mutex_unlock(&ctx->state_mutex);
+
+ idr_remove(&ctx->hw_afu->idr, ctx->pe);
+ ocxlflash_release_mapping(ctx);
+ kfree(ctx);
+out:
+ return rc;
+}
+
+/**
+ * ocxlflash_perst_reloads_same_image() - sets the image reload policy
+ * @afu_cookie: Hardware AFU associated with the host.
+ * @image: Whether to load the same image on PERST.
+ */
+static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
+{
+ struct ocxl_hw_afu *afu = afu_cookie;
+
+ afu->perst_same_image = image;
+}
+
+/**
+ * ocxlflash_read_adapter_vpd() - reads the adapter VPD
+ * @pdev: PCI device associated with the host.
+ * @buf: Buffer to get the VPD data.
+ * @count: Size of buffer (maximum bytes that can be read).
+ *
+ * Return: size of VPD on success, -errno on failure
+ */
+static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
+ size_t count)
+{
+ return pci_read_vpd(pdev, 0, count, buf);
+}
+
+/**
+ * free_afu_irqs() - internal service to free interrupts
+ * @ctx: Adapter context.
+ */
+static void free_afu_irqs(struct ocxlflash_context *ctx)
+{
+ struct ocxl_hw_afu *afu = ctx->hw_afu;
+ struct device *dev = afu->dev;
+ int i;
+
+ if (!ctx->irqs) {
+ dev_err(dev, "%s: Interrupts not allocated\n", __func__);
+ return;
+ }
+
+ for (i = ctx->num_irqs; i >= 0; i--)
+ ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
+
+ kfree(ctx->irqs);
+ ctx->irqs = NULL;
+}
+
+/**
+ * alloc_afu_irqs() - internal service to allocate interrupts
+ * @ctx: Context associated with the request.
+ * @num: Number of interrupts requested.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
+{
+ struct ocxl_hw_afu *afu = ctx->hw_afu;
+ struct device *dev = afu->dev;
+ struct ocxlflash_irqs *irqs;
+ u64 addr;
+ int rc = 0;
+ int hwirq;
+ int i;
+
+ if (ctx->irqs) {
+ dev_err(dev, "%s: Interrupts already allocated\n", __func__);
+ rc = -EEXIST;
+ goto out;
+ }
+
+ if (num > OCXL_MAX_IRQS) {
+ dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
+ if (unlikely(!irqs)) {
+ dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num; i++) {
+ rc = ocxl_link_irq_alloc(afu->link_token, &hwirq, &addr);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
+ __func__, rc);
+ goto err;
+ }
+
+ irqs[i].hwirq = hwirq;
+ irqs[i].ptrig = addr;
+ }
+
+ ctx->irqs = irqs;
+ ctx->num_irqs = num;
+out:
+ return rc;
+err:
+ for (i = i-1; i >= 0; i--)
+ ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
+ kfree(irqs);
+ goto out;
+}
+
+/**
+ * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
+ * @ctx_cookie: Context associated with the request.
+ * @num: Number of interrupts requested.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
+{
+ return alloc_afu_irqs(ctx_cookie, num);
+}
+
+/**
+ * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
+ * @ctx_cookie: Adapter context.
+ */
+static void ocxlflash_free_afu_irqs(void *ctx_cookie)
+{
+ free_afu_irqs(ctx_cookie);
+}
+
+/**
+ * ocxlflash_unconfig_afu() - unconfigure the AFU
+ * @afu: AFU associated with the host.
+ */
+static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
+{
+ if (afu->gmmio_virt) {
+ iounmap(afu->gmmio_virt);
+ afu->gmmio_virt = NULL;
+ }
+}
+
+/**
+ * ocxlflash_destroy_afu() - destroy the AFU structure
+ * @afu_cookie: AFU to be freed.
+ */
+static void ocxlflash_destroy_afu(void *afu_cookie)
+{
+ struct ocxl_hw_afu *afu = afu_cookie;
+ int pos;
+
+ if (!afu)
+ return;
+
+ ocxlflash_release_context(afu->ocxl_ctx);
+ idr_destroy(&afu->idr);
+
+ /* Disable the AFU */
+ pos = afu->acfg.dvsec_afu_control_pos;
+ ocxl_config_set_afu_state(afu->pdev, pos, 0);
+
+ ocxlflash_unconfig_afu(afu);
+ kfree(afu);
+}
+
+/**
+ * ocxlflash_config_fn() - configure the host function
+ * @pdev: PCI device associated with the host.
+ * @afu: AFU associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
+{
+ struct ocxl_fn_config *fcfg = &afu->fcfg;
+ struct device *dev = &pdev->dev;
+ u16 base, enabled, supported;
+ int rc = 0;
+
+ /* Read DVSEC config of the function */
+ rc = ocxl_config_read_function(pdev, fcfg);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Check if function has AFUs defined, only 1 per function supported */
+ if (fcfg->max_afu_index >= 0) {
+ afu->is_present = true;
+ if (fcfg->max_afu_index != 0)
+ dev_warn(dev, "%s: Unexpected AFU index value %d\n",
+ __func__, fcfg->max_afu_index);
+ }
+
+ rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ afu->fn_actag_base = base;
+ afu->fn_actag_enabled = enabled;
+
+ ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
+ dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
+ __func__, base, enabled);
+
+ rc = ocxl_link_setup(pdev, 0, &afu->link_token);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
+ __func__, rc);
+ goto err;
+ }
+out:
+ return rc;
+err:
+ ocxl_link_release(pdev, afu->link_token);
+ goto out;
+}
+
+/**
+ * ocxlflash_unconfig_fn() - unconfigure the host function
+ * @pdev: PCI device associated with the host.
+ * @afu: AFU associated with the host.
+ */
+static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
+{
+ ocxl_link_release(pdev, afu->link_token);
+}
+
+/**
+ * ocxlflash_map_mmio() - map the AFU MMIO space
+ * @afu: AFU associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
+{
+ struct ocxl_afu_config *acfg = &afu->acfg;
+ struct pci_dev *pdev = afu->pdev;
+ struct device *dev = afu->dev;
+ phys_addr_t gmmio, ppmmio;
+ int rc = 0;
+
+ rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
+ gmmio += acfg->global_mmio_offset;
+
+ rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
+ __func__, rc);
+ goto err1;
+ }
+ ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
+ ppmmio += acfg->pp_mmio_offset;
+
+ afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
+ if (unlikely(!afu->gmmio_virt)) {
+ dev_err(dev, "%s: MMIO mapping failed\n", __func__);
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ afu->gmmio_phys = gmmio;
+ afu->ppmmio_phys = ppmmio;
+out:
+ return rc;
+err2:
+ pci_release_region(pdev, acfg->pp_mmio_bar);
+err1:
+ pci_release_region(pdev, acfg->global_mmio_bar);
+ goto out;
+}
+
+/**
+ * ocxlflash_config_afu() - configure the host AFU
+ * @pdev: PCI device associated with the host.
+ * @afu: AFU associated with the host.
+ *
+ * Must be called _after_ host function configuration.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
+{
+ struct ocxl_afu_config *acfg = &afu->acfg;
+ struct ocxl_fn_config *fcfg = &afu->fcfg;
+ struct device *dev = &pdev->dev;
+ int count;
+ int base;
+ int pos;
+ int rc = 0;
+
+ /* This HW AFU function does not have any AFUs defined */
+ if (!afu->is_present)
+ goto out;
+
+ /* Read AFU config at index 0 */
+ rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Only one AFU per function is supported, so actag_base is same */
+ base = afu->fn_actag_base;
+ count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
+ pos = acfg->dvsec_afu_control_pos;
+
+ ocxl_config_set_afu_actag(pdev, pos, base, count);
+ dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
+ afu->afu_actag_base = base;
+ afu->afu_actag_enabled = count;
+ afu->max_pasid = 1 << acfg->pasid_supported_log;
+
+ ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
+
+ rc = ocxlflash_map_mmio(afu);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Enable the AFU */
+ ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
+out:
+ return rc;
+}
+
+/**
+ * ocxlflash_create_afu() - create the AFU for OCXL
+ * @pdev: PCI device associated with the host.
+ *
+ * Return: AFU on success, NULL on failure
+ */
+static void *ocxlflash_create_afu(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ocxlflash_context *ctx;
+ struct ocxl_hw_afu *afu;
+ int rc;
+
+ afu = kzalloc(sizeof(*afu), GFP_KERNEL);
+ if (unlikely(!afu)) {
+ dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
+ goto out;
+ }
+
+ afu->pdev = pdev;
+ afu->dev = dev;
+ idr_init(&afu->idr);
+
+ rc = ocxlflash_config_fn(pdev, afu);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: Function configuration failed rc=%d\n",
+ __func__, rc);
+ goto err1;
+ }
+
+ rc = ocxlflash_config_afu(pdev, afu);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: AFU configuration failed rc=%d\n",
+ __func__, rc);
+ goto err2;
+ }
+
+ ctx = ocxlflash_dev_context_init(pdev, afu);
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
+ dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
+ __func__, rc);
+ goto err3;
+ }
+
+ afu->ocxl_ctx = ctx;
+out:
+ return afu;
+err3:
+ ocxlflash_unconfig_afu(afu);
+err2:
+ ocxlflash_unconfig_fn(pdev, afu);
+err1:
+ idr_destroy(&afu->idr);
+ kfree(afu);
+ afu = NULL;
+ goto out;
+}
+
+/**
+ * ctx_event_pending() - check for any event pending on the context
+ * @ctx: Context to be checked.
+ *
+ * Return: true if there is an event pending, false if none pending
+ */
+static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
+{
+ if (ctx->pending_irq || ctx->pending_fault)
+ return true;
+
+ return false;
+}
+
+/**
+ * afu_poll() - poll the AFU for events on the context
+ * @file: File associated with the adapter context.
+ * @poll: Poll structure from the user.
+ *
+ * Return: poll mask
+ */
+static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
+{
+ struct ocxlflash_context *ctx = file->private_data;
+ struct device *dev = ctx->hw_afu->dev;
+ ulong lock_flags;
+ int mask = 0;
+
+ poll_wait(file, &ctx->wq, poll);
+
+ spin_lock_irqsave(&ctx->slock, lock_flags);
+ if (ctx_event_pending(ctx))
+ mask |= POLLIN | POLLRDNORM;
+ else if (ctx->state == CLOSED)
+ mask |= POLLERR;
+ spin_unlock_irqrestore(&ctx->slock, lock_flags);
+
+ dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
+ __func__, ctx->pe, mask);
+
+ return mask;
+}
+
+/**
+ * afu_read() - perform a read on the context for any event
+ * @file: File associated with the adapter context.
+ * @buf: Buffer to receive the data.
+ * @count: Size of buffer (maximum bytes that can be read).
+ * @off: Offset.
+ *
+ * Return: size of the data read on success, -errno on failure
+ */
+static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
+ loff_t *off)
+{
+ struct ocxlflash_context *ctx = file->private_data;
+ struct device *dev = ctx->hw_afu->dev;
+ struct cxl_event event;
+ ulong lock_flags;
+ ssize_t esize;
+ ssize_t rc;
+ int bit;
+ DEFINE_WAIT(event_wait);
+
+ if (*off != 0) {
+ dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
+ __func__, *off);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ spin_lock_irqsave(&ctx->slock, lock_flags);
+
+ for (;;) {
+ prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
+
+ if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ dev_err(dev, "%s: File cannot be blocked on I/O\n",
+ __func__);
+ rc = -EAGAIN;
+ goto err;
+ }
+
+ if (signal_pending(current)) {
+ dev_err(dev, "%s: Signal pending on the process\n",
+ __func__);
+ rc = -ERESTARTSYS;
+ goto err;
+ }
+
+ spin_unlock_irqrestore(&ctx->slock, lock_flags);
+ schedule();
+ spin_lock_irqsave(&ctx->slock, lock_flags);
+ }
+
+ finish_wait(&ctx->wq, &event_wait);
+
+ memset(&event, 0, sizeof(event));
+ event.header.process_element = ctx->pe;
+ event.header.size = sizeof(struct cxl_event_header);
+ if (ctx->pending_irq) {
+ esize = sizeof(struct cxl_event_afu_interrupt);
+ event.header.size += esize;
+ event.header.type = CXL_EVENT_AFU_INTERRUPT;
+
+ bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
+ clear_bit(bit, &ctx->irq_bitmap);
+ event.irq.irq = bit + 1;
+ if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
+ ctx->pending_irq = false;
+ } else if (ctx->pending_fault) {
+ event.header.size += sizeof(struct cxl_event_data_storage);
+ event.header.type = CXL_EVENT_DATA_STORAGE;
+ event.fault.addr = ctx->fault_addr;
+ event.fault.dsisr = ctx->fault_dsisr;
+ ctx->pending_fault = false;
+ }
+
+ spin_unlock_irqrestore(&ctx->slock, lock_flags);
+
+ if (copy_to_user(buf, &event, event.header.size)) {
+ dev_err(dev, "%s: copy_to_user failed\n", __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = event.header.size;
+out:
+ return rc;
+err:
+ finish_wait(&ctx->wq, &event_wait);
+ spin_unlock_irqrestore(&ctx->slock, lock_flags);
+ goto out;
+}
+
+/**
+ * afu_release() - release and free the context
+ * @inode: File inode pointer.
+ * @file: File associated with the context.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int afu_release(struct inode *inode, struct file *file)
+{
+ struct ocxlflash_context *ctx = file->private_data;
+ int i;
+
+ /* Unmap and free the interrupts associated with the context */
+ for (i = ctx->num_irqs; i >= 0; i--)
+ afu_unmap_irq(0, ctx, i, ctx);
+ free_afu_irqs(ctx);
+
+ return ocxlflash_release_context(ctx);
+}
+
+/**
+ * ocxlflash_mmap_fault() - mmap fault handler
+ * @vmf: VM fault associated with current fault.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_mmap_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ocxlflash_context *ctx = vma->vm_file->private_data;
+ struct device *dev = ctx->hw_afu->dev;
+ u64 mmio_area, offset;
+
+ offset = vmf->pgoff << PAGE_SHIFT;
+ if (offset >= ctx->psn_size)
+ return VM_FAULT_SIGBUS;
+
+ mutex_lock(&ctx->state_mutex);
+ if (ctx->state != STARTED) {
+ dev_err(dev, "%s: Context not started, state=%d\n",
+ __func__, ctx->state);
+ mutex_unlock(&ctx->state_mutex);
+ return VM_FAULT_SIGBUS;
+ }
+ mutex_unlock(&ctx->state_mutex);
+
+ mmio_area = ctx->psn_phys;
+ mmio_area += offset;
+
+ vm_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct ocxlflash_vmops = {
+ .fault = ocxlflash_mmap_fault,
+};
+
+/**
+ * afu_mmap() - map the fault handler operations
+ * @file: File associated with the context.
+ * @vma: VM area associated with mapping.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int afu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct ocxlflash_context *ctx = file->private_data;
+
+ if ((vma_pages(vma) + vma->vm_pgoff) >
+ (ctx->psn_size >> PAGE_SHIFT))
+ return -EINVAL;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_ops = &ocxlflash_vmops;
+ return 0;
+}
+
+static const struct file_operations ocxl_afu_fops = {
+ .owner = THIS_MODULE,
+ .poll = afu_poll,
+ .read = afu_read,
+ .release = afu_release,
+ .mmap = afu_mmap,
+};
+
+#define PATCH_FOPS(NAME) \
+ do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
+
+/**
+ * ocxlflash_get_fd() - get file descriptor for an adapter context
+ * @ctx_cookie: Adapter context.
+ * @fops: File operations to be associated.
+ * @fd: File descriptor to be returned back.
+ *
+ * Return: pointer to the file on success, ERR_PTR on failure
+ */
+static struct file *ocxlflash_get_fd(void *ctx_cookie,
+ struct file_operations *fops, int *fd)
+{
+ struct ocxlflash_context *ctx = ctx_cookie;
+ struct device *dev = ctx->hw_afu->dev;
+ struct file *file;
+ int flags, fdtmp;
+ int rc = 0;
+ char *name = NULL;
+
+ /* Only allow one fd per context */
+ if (ctx->mapping) {
+ dev_err(dev, "%s: Context is already mapped to an fd\n",
+ __func__);
+ rc = -EEXIST;
+ goto err1;
+ }
+
+ flags = O_RDWR | O_CLOEXEC;
+
+ /* This code is similar to anon_inode_getfd() */
+ rc = get_unused_fd_flags(flags);
+ if (unlikely(rc < 0)) {
+ dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
+ __func__, rc);
+ goto err1;
+ }
+ fdtmp = rc;
+
+ /* Patch the file ops that are not defined */
+ if (fops) {
+ PATCH_FOPS(poll);
+ PATCH_FOPS(read);
+ PATCH_FOPS(release);
+ PATCH_FOPS(mmap);
+ } else /* Use default ops */
+ fops = (struct file_operations *)&ocxl_afu_fops;
+
+ name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
+ file = ocxlflash_getfile(dev, name, fops, ctx, flags);
+ kfree(name);
+ if (IS_ERR(file)) {
+ rc = PTR_ERR(file);
+ dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
+ __func__, rc);
+ goto err2;
+ }
+
+ ctx->mapping = file->f_mapping;
+ *fd = fdtmp;
+out:
+ return file;
+err2:
+ put_unused_fd(fdtmp);
+err1:
+ file = ERR_PTR(rc);
+ goto out;
+}
+
+/**
+ * ocxlflash_fops_get_context() - get the context associated with the file
+ * @file: File associated with the adapter context.
+ *
+ * Return: pointer to the context
+ */
+static void *ocxlflash_fops_get_context(struct file *file)
+{
+ return file->private_data;
+}
+
+/**
+ * ocxlflash_afu_irq() - interrupt handler for user contexts
+ * @irq: Interrupt number.
+ * @data: Private data provided at interrupt registration, the context.
+ *
+ * Return: Always return IRQ_HANDLED.
+ */
+static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
+{
+ struct ocxlflash_context *ctx = data;
+ struct device *dev = ctx->hw_afu->dev;
+ int i;
+
+ dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
+ __func__, ctx->pe, irq);
+
+ for (i = 0; i < ctx->num_irqs; i++) {
+ if (ctx->irqs[i].virq == irq)
+ break;
+ }
+ if (unlikely(i >= ctx->num_irqs)) {
+ dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
+ goto out;
+ }
+
+ spin_lock(&ctx->slock);
+ set_bit(i - 1, &ctx->irq_bitmap);
+ ctx->pending_irq = true;
+ spin_unlock(&ctx->slock);
+
+ wake_up_all(&ctx->wq);
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * ocxlflash_start_work() - start a user context
+ * @ctx_cookie: Context to be started.
+ * @num_irqs: Number of interrupts requested.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
+{
+ struct ocxlflash_context *ctx = ctx_cookie;
+ struct ocxl_hw_afu *afu = ctx->hw_afu;
+ struct device *dev = afu->dev;
+ char *name;
+ int rc = 0;
+ int i;
+
+ rc = alloc_afu_irqs(ctx, num_irqs);
+ if (unlikely(rc < 0)) {
+ dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
+ goto out;
+ }
+
+ for (i = 0; i < num_irqs; i++) {
+ name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
+ dev_name(dev), ctx->pe, i);
+ rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
+ kfree(name);
+ if (unlikely(rc < 0)) {
+ dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
+ __func__, rc);
+ goto err;
+ }
+ }
+
+ rc = start_context(ctx);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
+ goto err;
+ }
+out:
+ return rc;
+err:
+ for (i = i-1; i >= 0; i--)
+ afu_unmap_irq(0, ctx, i, ctx);
+ free_afu_irqs(ctx);
+ goto out;
+};
+
+/**
+ * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
+ * @file: File installed with adapter file descriptor.
+ * @vma: VM area associated with mapping.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return afu_mmap(file, vma);
+}
+
+/**
+ * ocxlflash_fd_release() - release the context associated with the file
+ * @inode: File inode pointer.
+ * @file: File associated with the adapter context.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_fd_release(struct inode *inode, struct file *file)
+{
+ return afu_release(inode, file);
+}
+
+/* Backend ops to ocxlflash services */
+const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
+ .module = THIS_MODULE,
+ .psa_map = ocxlflash_psa_map,
+ .psa_unmap = ocxlflash_psa_unmap,
+ .process_element = ocxlflash_process_element,
+ .map_afu_irq = ocxlflash_map_afu_irq,
+ .unmap_afu_irq = ocxlflash_unmap_afu_irq,
+ .get_irq_objhndl = ocxlflash_get_irq_objhndl,
+ .start_context = ocxlflash_start_context,
+ .stop_context = ocxlflash_stop_context,
+ .afu_reset = ocxlflash_afu_reset,
+ .set_master = ocxlflash_set_master,
+ .get_context = ocxlflash_get_context,
+ .dev_context_init = ocxlflash_dev_context_init,
+ .release_context = ocxlflash_release_context,
+ .perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
+ .read_adapter_vpd = ocxlflash_read_adapter_vpd,
+ .allocate_afu_irqs = ocxlflash_allocate_afu_irqs,
+ .free_afu_irqs = ocxlflash_free_afu_irqs,
+ .create_afu = ocxlflash_create_afu,
+ .destroy_afu = ocxlflash_destroy_afu,
+ .get_fd = ocxlflash_get_fd,
+ .fops_get_context = ocxlflash_fops_get_context,
+ .start_work = ocxlflash_start_work,
+ .fd_mmap = ocxlflash_fd_mmap,
+ .fd_release = ocxlflash_fd_release,
+};
diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h
new file mode 100644
index 000000000000..9270d35c4620
--- /dev/null
+++ b/drivers/scsi/cxlflash/ocxl_hw.h
@@ -0,0 +1,77 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define OCXL_MAX_IRQS 4 /* Max interrupts per process */
+
+struct ocxlflash_irqs {
+ int hwirq;
+ u32 virq;
+ u64 ptrig;
+ void __iomem *vtrig;
+};
+
+/* OCXL hardware AFU associated with the host */
+struct ocxl_hw_afu {
+ struct ocxlflash_context *ocxl_ctx; /* Host context */
+ struct pci_dev *pdev; /* PCI device */
+ struct device *dev; /* Generic device */
+ bool perst_same_image; /* Same image loaded on perst */
+
+ struct ocxl_fn_config fcfg; /* DVSEC config of the function */
+ struct ocxl_afu_config acfg; /* AFU configuration data */
+
+ int fn_actag_base; /* Function acTag base */
+ int fn_actag_enabled; /* Function acTag number enabled */
+ int afu_actag_base; /* AFU acTag base */
+ int afu_actag_enabled; /* AFU acTag number enabled */
+
+ phys_addr_t ppmmio_phys; /* Per process MMIO space */
+ phys_addr_t gmmio_phys; /* Global AFU MMIO space */
+ void __iomem *gmmio_virt; /* Global MMIO map */
+
+ void *link_token; /* Link token for the SPA */
+ struct idr idr; /* IDR to manage contexts */
+ int max_pasid; /* Maximum number of contexts */
+ bool is_present; /* Function has AFUs defined */
+};
+
+enum ocxlflash_ctx_state {
+ CLOSED,
+ OPENED,
+ STARTED
+};
+
+struct ocxlflash_context {
+ struct ocxl_hw_afu *hw_afu; /* HW AFU back pointer */
+ struct address_space *mapping; /* Mapping for pseudo filesystem */
+ bool master; /* Whether this is a master context */
+ int pe; /* Process element */
+
+ phys_addr_t psn_phys; /* Process mapping */
+ u64 psn_size; /* Process mapping size */
+
+ spinlock_t slock; /* Protects irq/fault/event updates */
+ wait_queue_head_t wq; /* Wait queue for poll and interrupts */
+ struct mutex state_mutex; /* Mutex to update context state */
+ enum ocxlflash_ctx_state state; /* Context state */
+
+ struct ocxlflash_irqs *irqs; /* Pointer to array of structures */
+ int num_irqs; /* Number of interrupts */
+ bool pending_irq; /* Pending interrupt on the context */
+ ulong irq_bitmap; /* Bits indicating pending irq num */
+
+ u64 fault_addr; /* Address that triggered the fault */
+ u64 fault_dsisr; /* Value of dsisr register at fault */
+ bool pending_fault; /* Pending translation fault */
+};
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index bedf1ce2f33c..874abce35ab4 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -258,23 +258,30 @@ struct sisl_host_map {
* exit since there is no way to tell which
* command caused the error.
*/
-#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
-#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
-#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
-#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_3_EA 0x0400ULL /* b53, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_2_EA 0x0200ULL /* b54, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_1_EA 0x0100ULL /* b55, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_3_PASID 0x0080ULL /* b56, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_2_PASID 0x0040ULL /* b57, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_1_PASID 0x0020ULL /* b58, user error */
+#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
+#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
+#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
+#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
/* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
* Same error in data/LXT/RHT access is reported via IOASA.
*/
-#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can be generated
- * only when AFU auto
- * retry is disabled.
- * If user can determine
- * the command that
- * caused the error, it
- * can be retried.
- */
-#define SISL_ISTATUS_UNMASK (0x001FULL) /* 1 means unmasked */
-#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
+#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can only be
+ * generated when AFU
+ * auto retry is
+ * disabled. If user
+ * can determine the
+ * command that caused
+ * the error, it can
+ * be retried.
+ */
+#define SISL_ISTATUS_UNMASK (0x07FFULL) /* 1 means unmasked */
+#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
__be64 intr_clear;
__be64 intr_mask;
@@ -284,6 +291,7 @@ struct sisl_host_map {
__be64 cmd_room;
__be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */
+#define SISL_CTX_CTRL_LISN_MASK (0xFFULL)
__be64 mbox_w; /* restricted use */
__be64 sq_start; /* Submission Queue (R/W): write sequence and */
__be64 sq_end; /* inclusion semantics are the same as RRQ */
@@ -309,6 +317,10 @@ struct sisl_ctrl_map {
#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */
#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */
__be64 mbox_r;
+ __be64 lisn_pasid[2];
+ /* pasid _a arg must be ULL */
+#define SISL_LISN_PASID(_a, _b) (((_a) << 32) | (_b))
+ __be64 lisn_ea[3];
};
/* single copy global regs */
@@ -415,6 +427,7 @@ struct sisl_global_regs {
#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL
#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL
+#define SISL_INTVER_CAP_OCXL_LISN 0x020000000000ULL
};
#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 2fe79df5c73c..e489d89cbb45 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -14,8 +14,9 @@
#include <linux/delay.h>
#include <linux/file.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
#include <linux/syscalls.h>
-#include <misc/cxl.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -269,6 +270,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
int rc = 0;
struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
u64 val;
+ int i;
/* Unlock cap and restrict user to read/write cmds in translated mode */
readq_be(&ctrl_map->mbox_r);
@@ -282,6 +284,19 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
goto out;
}
+ if (afu_is_ocxl_lisn(afu)) {
+ /* Set up the LISN effective address for each interrupt */
+ for (i = 0; i < ctxi->irqs; i++) {
+ val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
+ writeq_be(val, &ctrl_map->lisn_ea[i]);
+ }
+
+ /* Use primary HWQ PASID as identifier for all interrupts */
+ val = hwq->ctx_hndl;
+ writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
+ writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
+ }
+
/* Set up MMIO registers pointing to the RHT */
writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
@@ -974,6 +989,10 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
* theoretically never occur), every call into this routine results
* in a complete freeing of a context.
*
+ * Detaching the LUN is typically an ioctl() operation and the underlying
+ * code assumes that ioctl_rwsem has been acquired as a reader. To support
+ * that design point, the semaphore is acquired and released around detach.
+ *
* Return: 0 on success
*/
static int cxlflash_cxl_release(struct inode *inode, struct file *file)
@@ -1012,9 +1031,11 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
+ down_read(&cfg->ioctl_rwsem);
detach.context_id = ctxi->ctxid;
list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
+ up_read(&cfg->ioctl_rwsem);
out_release:
cfg->ops->fd_release(inode, file);
out:
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 5deef57a7834..66e445a17d6c 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -12,8 +12,9 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/interrupt.h>
+#include <linux/pci.h>
#include <linux/syscalls.h>
-#include <misc/cxl.h>
#include <asm/unaligned.h>
#include <asm/bitsperlong.h>
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 5ceea8da7bb6..37de8fb186d7 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1706,7 +1706,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
u32 reply_size = 0;
u32 __user *user_msg = arg;
u32 __user * user_reply = NULL;
- void *sg_list[pHba->sg_tablesize];
+ void **sg_list = NULL;
u32 sg_offset = 0;
u32 sg_count = 0;
int sg_index = 0;
@@ -1748,19 +1748,23 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
msg[2] = 0x40000000; // IOCTL context
msg[3] = adpt_ioctl_to_context(pHba, reply);
if (msg[3] == (u32)-1) {
- kfree(reply);
- return -EBUSY;
+ rcode = -EBUSY;
+ goto free;
}
- memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
+ sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
+ if (!sg_list) {
+ rcode = -ENOMEM;
+ goto free;
+ }
if(sg_offset) {
// TODO add 64 bit API
struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
if (sg_count > pHba->sg_tablesize){
printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
- kfree (reply);
- return -EINVAL;
+ rcode = -EINVAL;
+ goto free;
}
for(i = 0; i < sg_count; i++) {
@@ -1879,7 +1883,6 @@ cleanup:
if (rcode != -ETIME && rcode != -EINTR) {
struct sg_simple_element *sg =
(struct sg_simple_element*) (msg +sg_offset);
- kfree (reply);
while(sg_index) {
if(sg_list[--sg_index]) {
dma_free_coherent(&pHba->pDev->dev,
@@ -1889,6 +1892,10 @@ cleanup:
}
}
}
+
+free:
+ kfree(sg_list);
+ kfree(reply);
return rcode;
}
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 9dffcb28c9b7..9db645dde35e 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -1202,8 +1202,6 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
case ESAS2R_INIT_MSG_START:
case ESAS2R_INIT_MSG_REINIT:
{
- struct timeval now;
- do_gettimeofday(&now);
esas2r_hdebug("CFG init");
esas2r_build_cfg_req(a,
rq,
@@ -1212,7 +1210,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
NULL);
ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
ci->sgl_page_size = cpu_to_le32(sgl_page_size);
- ci->epoch_time = cpu_to_le32(now.tv_sec);
+ /* firmware interface overflows in y2106 */
+ ci->epoch_time = cpu_to_le32(ktime_get_real_seconds());
rq->flags |= RF_FAILURE_OK;
a->init_msg = ESAS2R_INIT_MSG_INIT;
break;
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 97623002908f..34bcc8c04ff4 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -1849,7 +1849,7 @@ int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
/* allocate a request */
rq = esas2r_alloc_request(a);
if (rq == NULL) {
- esas2r_debug("esas2r_read_vda: out of requestss");
+ esas2r_debug("esas2r_read_vda: out of requests");
return -EBUSY;
}
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index e07eac5be087..c07118617d89 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -283,7 +283,7 @@ MODULE_PARM_DESC(num_requests,
int num_ae_requests = 4;
module_param(num_ae_requests, int, 0);
MODULE_PARM_DESC(num_ae_requests,
- "Number of VDA asynchromous event requests. Default 4.");
+ "Number of VDA asynchronous event requests. Default 4.");
int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN;
module_param(cmd_per_lun, int, 0);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index d1153e8e846b..7052a5d45f7f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -136,12 +136,14 @@ struct hisi_sas_phy {
struct hisi_sas_port *port;
struct asd_sas_phy sas_phy;
struct sas_identify identify;
+ struct completion *reset_completion;
+ spinlock_t lock;
u64 port_id; /* from hw */
- u64 dev_sas_addr;
u64 frame_rcvd_size;
u8 frame_rcvd[32];
u8 phy_attached;
- u8 reserved[3];
+ u8 in_reset;
+ u8 reserved[2];
u32 phy_type;
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
@@ -162,7 +164,7 @@ struct hisi_sas_cq {
struct hisi_sas_dq {
struct hisi_hba *hisi_hba;
- struct hisi_sas_slot *slot_prep;
+ struct list_head list;
spinlock_t lock;
int wr_point;
int id;
@@ -174,15 +176,22 @@ struct hisi_sas_device {
struct completion *completion;
struct hisi_sas_dq *dq;
struct list_head list;
- u64 attached_phy;
enum sas_device_type dev_type;
int device_id;
int sata_idx;
u8 dev_status;
};
+struct hisi_sas_tmf_task {
+ int force_phy;
+ int phy_id;
+ u8 tmf;
+ u16 tag_of_task_to_be_managed;
+};
+
struct hisi_sas_slot {
struct list_head entry;
+ struct list_head delivery;
struct sas_task *task;
struct hisi_sas_port *port;
u64 n_elem;
@@ -192,17 +201,15 @@ struct hisi_sas_slot {
int cmplt_queue_slot;
int idx;
int abort;
+ int ready;
void *buf;
dma_addr_t buf_dma;
void *cmd_hdr;
dma_addr_t cmd_hdr_dma;
struct work_struct abort_slot;
struct timer_list internal_abort_timer;
-};
-
-struct hisi_sas_tmf_task {
- u8 tmf;
- u16 tag_of_task_to_be_managed;
+ bool is_internal;
+ struct hisi_sas_tmf_task *tmf;
};
struct hisi_sas_hw {
@@ -215,14 +222,13 @@ struct hisi_sas_hw {
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq);
void (*start_delivery)(struct hisi_sas_dq *dq);
- int (*prep_ssp)(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot, int is_tmf,
- struct hisi_sas_tmf_task *tmf);
- int (*prep_smp)(struct hisi_hba *hisi_hba,
+ void (*prep_ssp)(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot);
+ void (*prep_smp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
- int (*prep_stp)(struct hisi_hba *hisi_hba,
+ void (*prep_stp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
- int (*prep_abort)(struct hisi_hba *hisi_hba,
+ void (*prep_abort)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
int device_id, int abort_flag, int tag_to_abort);
int (*slot_complete)(struct hisi_hba *hisi_hba,
@@ -245,8 +251,11 @@ struct hisi_sas_hw {
u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
u8 reg_index, u8 reg_count, u8 *write_data);
+ void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
+ int delay_ms, int timeout_ms);
int max_command_entries;
int complete_hdr_size;
+ struct scsi_host_template *sht;
};
struct hisi_hba {
@@ -273,6 +282,8 @@ struct hisi_hba {
struct workqueue_struct *wq;
int slot_index_count;
+ int last_slot_index;
+ int last_dev_id;
unsigned long *slot_index_tags;
unsigned long reject_stp_links_msk;
@@ -411,7 +422,7 @@ struct hisi_sas_command_table_ssp {
union {
struct {
struct ssp_command_iu task;
- u32 prot[6];
+ u32 prot[7];
};
struct ssp_tmf_iu ssp_task;
struct xfer_rdy_iu xfer_rdy;
@@ -437,10 +448,7 @@ struct hisi_sas_slot_buf_table {
};
extern struct scsi_transport_template *hisi_sas_stt;
-extern struct scsi_host_template *hisi_sas_sht;
-
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
-extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
@@ -454,6 +462,11 @@ extern int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *ops);
extern int hisi_sas_remove(struct platform_device *pdev);
+extern int hisi_sas_slave_configure(struct scsi_device *sdev);
+extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
+extern void hisi_sas_scan_start(struct Scsi_Host *shost);
+extern struct device_attribute *host_attrs[];
+extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
@@ -465,4 +478,5 @@ extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
enum hisi_sas_phy_event event);
extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
+extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 49c1fa643803..6f562974f8f6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -24,6 +24,9 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
+static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
+ struct domain_device *device);
+static void hisi_sas_dev_gone(struct domain_device *device);
u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
@@ -78,22 +81,23 @@ u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
case ATA_CMD_STANDBYNOW1:
case ATA_CMD_ZAC_MGMT_OUT:
return HISI_SAS_SATA_PROTOCOL_NONDATA;
+
+ case ATA_CMD_SET_MAX:
+ switch (fis->features) {
+ case ATA_SET_MAX_PASSWD:
+ case ATA_SET_MAX_LOCK:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ case ATA_SET_MAX_PASSWD_DMA:
+ case ATA_SET_MAX_UNLOCK_DMA:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+
+ default:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ }
+
default:
{
- if (fis->command == ATA_CMD_SET_MAX) {
- switch (fis->features) {
- case ATA_SET_MAX_PASSWD:
- case ATA_SET_MAX_LOCK:
- return HISI_SAS_SATA_PROTOCOL_PIO;
-
- case ATA_SET_MAX_PASSWD_DMA:
- case ATA_SET_MAX_UNLOCK_DMA:
- return HISI_SAS_SATA_PROTOCOL_DMA;
-
- default:
- return HISI_SAS_SATA_PROTOCOL_NONDATA;
- }
- }
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
return HISI_SAS_SATA_PROTOCOL_PIO;
@@ -134,6 +138,22 @@ int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
}
EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
+/*
+ * This function assumes linkrate mask fits in 8 bits, which it
+ * does for all HW versions supported.
+ */
+u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
+{
+ u16 rate = 0;
+ int i;
+
+ max -= SAS_LINK_RATE_1_5_GBPS;
+ for (i = 0; i <= max; i++)
+ rate |= 1 << (i * 2);
+ return rate;
+}
+EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
+
static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
{
return device->port->ha->lldd_ha;
@@ -178,11 +198,18 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
unsigned int index;
void *bitmap = hisi_hba->slot_index_tags;
- index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
- if (index >= hisi_hba->slot_index_count)
- return -SAS_QUEUE_FULL;
+ index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
+ hisi_hba->last_slot_index + 1);
+ if (index >= hisi_hba->slot_index_count) {
+ index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
+ 0);
+ if (index >= hisi_hba->slot_index_count)
+ return -SAS_QUEUE_FULL;
+ }
hisi_sas_slot_index_set(hisi_hba, index);
*slot_idx = index;
+ hisi_hba->last_slot_index = index;
+
return 0;
}
@@ -197,6 +224,8 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
+ struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
+ unsigned long flags;
if (task) {
struct device *dev = hisi_hba->dev;
@@ -216,40 +245,43 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
if (slot->buf)
dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
+ spin_lock_irqsave(&dq->lock, flags);
list_del_init(&slot->entry);
+ spin_unlock_irqrestore(&dq->lock, flags);
slot->buf = NULL;
slot->task = NULL;
slot->port = NULL;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot->idx);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
/* slot memory is fully zeroed when it is reused */
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
-static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
+static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
- return hisi_hba->hw->prep_smp(hisi_hba, slot);
+ hisi_hba->hw->prep_smp(hisi_hba, slot);
}
-static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot, int is_tmf,
- struct hisi_sas_tmf_task *tmf)
+static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
- return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
+ hisi_hba->hw->prep_ssp(hisi_hba, slot);
}
-static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
+static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
- return hisi_hba->hw->prep_stp(hisi_hba, slot);
+ hisi_hba->hw->prep_stp(hisi_hba, slot);
}
-static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
+static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
int device_id, int abort_flag, int tag_to_abort)
{
- return hisi_hba->hw->prep_abort(hisi_hba, slot,
+ hisi_hba->hw->prep_abort(hisi_hba, slot,
device_id, abort_flag, tag_to_abort);
}
@@ -269,7 +301,6 @@ static void hisi_sas_slot_abort(struct work_struct *work)
struct scsi_lun lun;
struct device *dev = hisi_hba->dev;
int tag = abort_slot->idx;
- unsigned long flags;
if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
dev_err(dev, "cannot abort slot for non-ssp task\n");
@@ -283,27 +314,29 @@ static void hisi_sas_slot_abort(struct work_struct *work)
hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
out:
/* Do cleanup for this task */
- spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
if (task->task_done)
task->task_done(task);
}
-static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
- *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
- int *pass)
+static int hisi_sas_task_prep(struct sas_task *task,
+ struct hisi_sas_dq **dq_pointer,
+ bool is_tmf, struct hisi_sas_tmf_task *tmf,
+ int *pass)
{
- struct hisi_hba *hisi_hba = dq->hisi_hba;
struct domain_device *device = task->dev;
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
struct asd_sas_port *sas_port = device->port;
struct device *dev = hisi_hba->dev;
- int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
- unsigned long flags;
+ int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
+ int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
+ unsigned long flags, flags_dq;
+ struct hisi_sas_dq *dq;
+ int wr_q_index;
if (!sas_port) {
struct task_status_struct *ts = &task->task_status;
@@ -330,6 +363,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
return -ECOMM;
}
+ *dq_pointer = dq = sas_dev->dq;
+
port = to_hisi_sas_port(sas_port);
if (port && !port->port_attached) {
dev_info(dev, "task prep: %s port%d not attach device\n",
@@ -341,6 +376,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
}
if (!sas_protocol_ata(task->task_proto)) {
+ unsigned int req_len, resp_len;
+
if (task->num_scatter) {
n_elem = dma_map_sg(dev, task->scatter,
task->num_scatter, task->data_dir);
@@ -348,31 +385,74 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
rc = -ENOMEM;
goto prep_out;
}
+ } else if (task->task_proto & SAS_PROTOCOL_SMP) {
+ n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ if (!n_elem_req) {
+ rc = -ENOMEM;
+ goto prep_out;
+ }
+ req_len = sg_dma_len(&task->smp_task.smp_req);
+ if (req_len & 0x3) {
+ rc = -EINVAL;
+ goto err_out_dma_unmap;
+ }
+ n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
+ 1, DMA_FROM_DEVICE);
+ if (!n_elem_resp) {
+ rc = -ENOMEM;
+ goto err_out_dma_unmap;
+ }
+ resp_len = sg_dma_len(&task->smp_task.smp_resp);
+ if (resp_len & 0x3) {
+ rc = -EINVAL;
+ goto err_out_dma_unmap;
+ }
}
} else
n_elem = task->num_scatter;
+ if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
+ dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
+ n_elem);
+ rc = -EINVAL;
+ goto err_out_dma_unmap;
+ }
+
spin_lock_irqsave(&hisi_hba->lock, flags);
if (hisi_hba->hw->slot_index_alloc)
rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
device);
else
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
- if (rc) {
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- goto err_out;
- }
spin_unlock_irqrestore(&hisi_hba->lock, flags);
-
- rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (rc)
- goto err_out_tag;
+ goto err_out_dma_unmap;
- dlvry_queue = dq->id;
- dlvry_queue_slot = dq->wr_point;
slot = &hisi_hba->slot_info[slot_idx];
memset(slot, 0, sizeof(struct hisi_sas_slot));
+ slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
+ GFP_ATOMIC, &slot->buf_dma);
+ if (!slot->buf) {
+ rc = -ENOMEM;
+ goto err_out_tag;
+ }
+
+ spin_lock_irqsave(&dq->lock, flags_dq);
+ wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
+ if (wr_q_index < 0) {
+ spin_unlock_irqrestore(&dq->lock, flags_dq);
+ rc = -EAGAIN;
+ goto err_out_buf;
+ }
+
+ list_add_tail(&slot->delivery, &dq->list);
+ spin_unlock_irqrestore(&dq->lock, flags_dq);
+
+ dlvry_queue = dq->id;
+ dlvry_queue_slot = wr_q_index;
+
slot->idx = slot_idx;
slot->n_elem = n_elem;
slot->dlvry_queue = dlvry_queue;
@@ -381,99 +461,94 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
slot->task = task;
slot->port = port;
+ slot->tmf = tmf;
+ slot->is_internal = is_tmf;
task->lldd_task = slot;
INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
- slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
- GFP_ATOMIC, &slot->buf_dma);
- if (!slot->buf) {
- rc = -ENOMEM;
- goto err_out_slot_buf;
- }
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
- rc = hisi_sas_task_prep_smp(hisi_hba, slot);
+ hisi_sas_task_prep_smp(hisi_hba, slot);
break;
case SAS_PROTOCOL_SSP:
- rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
+ hisi_sas_task_prep_ssp(hisi_hba, slot);
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- rc = hisi_sas_task_prep_ata(hisi_hba, slot);
+ hisi_sas_task_prep_ata(hisi_hba, slot);
break;
default:
dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
task->task_proto);
- rc = -EINVAL;
break;
}
- if (rc) {
- dev_err(dev, "task prep: rc = 0x%x\n", rc);
- goto err_out_buf;
- }
-
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ spin_lock_irqsave(&dq->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock_irqrestore(&dq->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- dq->slot_prep = slot;
++(*pass);
+ slot->ready = 1;
return 0;
err_out_buf:
dma_pool_free(hisi_hba->buffer_pool, slot->buf,
- slot->buf_dma);
-err_out_slot_buf:
- /* Nothing to be done */
+ slot->buf_dma);
err_out_tag:
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
-err_out:
- dev_err(dev, "task prep: failed[%d]!\n", rc);
- if (!sas_protocol_ata(task->task_proto))
- if (n_elem)
- dma_unmap_sg(dev, task->scatter,
- task->num_scatter,
- task->data_dir);
+err_out_dma_unmap:
+ if (!sas_protocol_ata(task->task_proto)) {
+ if (task->num_scatter) {
+ dma_unmap_sg(dev, task->scatter, task->num_scatter,
+ task->data_dir);
+ } else if (task->task_proto & SAS_PROTOCOL_SMP) {
+ if (n_elem_req)
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ if (n_elem_resp)
+ dma_unmap_sg(dev, &task->smp_task.smp_resp,
+ 1, DMA_FROM_DEVICE);
+ }
+ }
prep_out:
+ dev_err(dev, "task prep: failed[%d]!\n", rc);
return rc;
}
static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
- int is_tmf, struct hisi_sas_tmf_task *tmf)
+ bool is_tmf, struct hisi_sas_tmf_task *tmf)
{
u32 rc;
u32 pass = 0;
unsigned long flags;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
struct device *dev = hisi_hba->dev;
- struct domain_device *device = task->dev;
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct hisi_sas_dq *dq = sas_dev->dq;
+ struct hisi_sas_dq *dq = NULL;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
return -EINVAL;
/* protect task_prep and start_delivery sequence */
- spin_lock_irqsave(&dq->lock, flags);
- rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
+ rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
if (rc)
dev_err(dev, "task exec: failed[%d]!\n", rc);
- if (likely(pass))
+ if (likely(pass)) {
+ spin_lock_irqsave(&dq->lock, flags);
hisi_hba->hw->start_delivery(dq);
- spin_unlock_irqrestore(&dq->lock, flags);
+ spin_unlock_irqrestore(&dq->lock, flags);
+ }
return rc;
}
@@ -524,10 +599,12 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_device *sas_dev = NULL;
unsigned long flags;
+ int last = hisi_hba->last_dev_id;
+ int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
int i;
spin_lock_irqsave(&hisi_hba->lock, flags);
- for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
+ for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
int queue = i % hisi_hba->queue_count;
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
@@ -542,18 +619,57 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
INIT_LIST_HEAD(&hisi_hba->devices[i].list);
break;
}
+ i++;
}
+ hisi_hba->last_dev_id = i;
spin_unlock_irqrestore(&hisi_hba->lock, flags);
return sas_dev;
}
+#define HISI_SAS_SRST_ATA_DISK_CNT 3
+static int hisi_sas_init_device(struct domain_device *device)
+{
+ int rc = TMF_RESP_FUNC_COMPLETE;
+ struct scsi_lun lun;
+ struct hisi_sas_tmf_task tmf_task;
+ int retry = HISI_SAS_SRST_ATA_DISK_CNT;
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+
+ switch (device->dev_type) {
+ case SAS_END_DEVICE:
+ int_to_scsilun(0, &lun);
+
+ tmf_task.tmf = TMF_CLEAR_TASK_SET;
+ rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
+ &tmf_task);
+ if (rc == TMF_RESP_FUNC_COMPLETE)
+ hisi_sas_release_task(hisi_hba, device);
+ break;
+ case SAS_SATA_DEV:
+ case SAS_SATA_PM:
+ case SAS_SATA_PM_PORT:
+ case SAS_SATA_PENDING:
+ while (retry-- > 0) {
+ rc = hisi_sas_softreset_ata_disk(device);
+ if (!rc)
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
static int hisi_sas_dev_found(struct domain_device *device)
{
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct domain_device *parent_dev = device->parent;
struct hisi_sas_device *sas_dev;
struct device *dev = hisi_hba->dev;
+ int rc;
if (hisi_hba->hw->alloc_dev)
sas_dev = hisi_hba->hw->alloc_dev(device);
@@ -576,10 +692,8 @@ static int hisi_sas_dev_found(struct domain_device *device)
for (phy_no = 0; phy_no < phy_num; phy_no++) {
phy = &parent_dev->ex_dev.ex_phy[phy_no];
if (SAS_ADDR(phy->attached_sas_addr) ==
- SAS_ADDR(device->sas_addr)) {
- sas_dev->attached_phy = phy_no;
+ SAS_ADDR(device->sas_addr))
break;
- }
}
if (phy_no == phy_num) {
@@ -587,17 +701,25 @@ static int hisi_sas_dev_found(struct domain_device *device)
"dev:%016llx at ex:%016llx\n",
SAS_ADDR(device->sas_addr),
SAS_ADDR(parent_dev->sas_addr));
- return -EINVAL;
+ rc = -EINVAL;
+ goto err_out;
}
}
dev_info(dev, "dev[%d:%x] found\n",
sas_dev->device_id, sas_dev->dev_type);
+ rc = hisi_sas_init_device(device);
+ if (rc)
+ goto err_out;
return 0;
+
+err_out:
+ hisi_sas_dev_gone(device);
+ return rc;
}
-static int hisi_sas_slave_configure(struct scsi_device *sdev)
+int hisi_sas_slave_configure(struct scsi_device *sdev)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
int ret = sas_slave_configure(sdev);
@@ -609,15 +731,17 @@ static int hisi_sas_slave_configure(struct scsi_device *sdev)
return 0;
}
+EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
-static void hisi_sas_scan_start(struct Scsi_Host *shost)
+void hisi_sas_scan_start(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
hisi_hba->hw->phys_init(hisi_hba);
}
+EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
-static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
+int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
struct sas_ha_struct *sha = &hisi_hba->sha;
@@ -629,6 +753,7 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
sas_drain_work(sha);
return 1;
}
+EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
static void hisi_sas_phyup_work(struct work_struct *work)
{
@@ -803,6 +928,33 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
}
+static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *r)
+{
+ struct sas_phy_linkrates _r;
+
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ enum sas_linkrate min, max;
+
+ if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = sas_phy->phy->maximum_linkrate;
+ min = r->minimum_linkrate;
+ } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = r->maximum_linkrate;
+ min = sas_phy->phy->minimum_linkrate;
+ } else
+ return;
+
+ _r.maximum_linkrate = max;
+ _r.minimum_linkrate = min;
+
+ hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+ msleep(100);
+ hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
+ hisi_hba->hw->phy_start(hisi_hba, phy_no);
+}
+
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata)
{
@@ -826,7 +978,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_SET_LINK_RATE:
- hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
+ hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
break;
case PHY_FUNC_GET_EVENTS:
if (hisi_hba->hw->get_events) {
@@ -990,7 +1142,6 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
int s = sizeof(struct host_to_dev_fis);
- unsigned long flags;
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
@@ -1015,11 +1166,8 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
dev_err(dev, "ata disk reset failed\n");
}
- if (rc == TMF_RESP_FUNC_COMPLETE) {
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ if (rc == TMF_RESP_FUNC_COMPLETE)
hisi_sas_release_task(hisi_hba, device);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- }
return rc;
}
@@ -1111,12 +1259,103 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
}
}
+static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
+{
+ struct hisi_sas_device *sas_dev;
+ struct domain_device *device;
+ int i;
+
+ for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
+ sas_dev = &hisi_hba->devices[i];
+ device = sas_dev->sas_device;
+
+ if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
+ continue;
+
+ hisi_sas_init_device(device);
+ }
+}
+
+static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
+ struct asd_sas_port *sas_port,
+ struct domain_device *device)
+{
+ struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
+ struct ata_port *ap = device->sata_dev.ap;
+ struct device *dev = hisi_hba->dev;
+ int s = sizeof(struct host_to_dev_fis);
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct asd_sas_phy *sas_phy;
+ struct ata_link *link;
+ u8 fis[20] = {0};
+ u32 state;
+
+ state = hisi_hba->hw->get_phys_state(hisi_hba);
+ list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
+ if (!(state & BIT(sas_phy->id)))
+ continue;
+
+ ata_for_each_link(link, ap, EDGE) {
+ int pmp = sata_srst_pmp(link);
+
+ tmf_task.phy_id = sas_phy->id;
+ hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
+ rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
+ &tmf_task);
+ if (rc != TMF_RESP_FUNC_COMPLETE) {
+ dev_err(dev, "phy%d ata reset failed rc=%d\n",
+ sas_phy->id, rc);
+ break;
+ }
+ }
+ }
+}
+
+static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ int port_no, rc, i;
+
+ for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
+ struct domain_device *device = sas_dev->sas_device;
+
+ if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
+ continue;
+
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0)
+ dev_err(dev, "STP reject: abort dev failed %d\n", rc);
+ }
+
+ for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
+ struct hisi_sas_port *port = &hisi_hba->port[port_no];
+ struct asd_sas_port *sas_port = &port->sas_port;
+ struct domain_device *port_dev = sas_port->port_dev;
+ struct domain_device *device;
+
+ if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type))
+ continue;
+
+ /* Try to find a SATA device */
+ list_for_each_entry(device, &sas_port->dev_list,
+ dev_list_node) {
+ if (dev_is_sata(device)) {
+ hisi_sas_send_ata_reset_each_phy(hisi_hba,
+ sas_port,
+ device);
+ break;
+ }
+ }
+ }
+}
+
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
u32 old_state, state;
- unsigned long flags;
int rc;
if (!hisi_hba->hw->soft_reset)
@@ -1129,6 +1368,11 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
old_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
+ hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
+
+ if (timer_pending(&hisi_hba->timer))
+ del_timer_sync(&hisi_hba->timer);
+
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
rc = hisi_hba->hw->soft_reset(hisi_hba);
if (rc) {
@@ -1137,9 +1381,6 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
scsi_unblock_requests(shost);
goto out;
}
- spin_lock_irqsave(&hisi_hba->lock, flags);
- hisi_sas_release_tasks(hisi_hba);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
@@ -1147,6 +1388,10 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
hisi_hba->hw->phys_init(hisi_hba);
msleep(1000);
hisi_sas_refresh_port_id(hisi_hba);
+
+ if (hisi_hba->reject_stp_links_msk)
+ hisi_sas_terminate_stp_reject(hisi_hba);
+ hisi_sas_reset_init_all_devices(hisi_hba);
scsi_unblock_requests(shost);
state = hisi_hba->hw->get_phys_state(hisi_hba);
@@ -1165,20 +1410,25 @@ static int hisi_sas_abort_task(struct sas_task *task)
struct hisi_sas_tmf_task tmf_task;
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
- struct device *dev = hisi_hba->dev;
+ struct hisi_hba *hisi_hba;
+ struct device *dev;
int rc = TMF_RESP_FUNC_FAILED;
unsigned long flags;
- if (!sas_dev) {
- dev_warn(dev, "Device has been removed\n");
+ if (!sas_dev)
return TMF_RESP_FUNC_FAILED;
- }
+ hisi_hba = dev_to_hisi_hba(task->dev);
+ dev = hisi_hba->dev;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
rc = TMF_RESP_FUNC_COMPLETE;
goto out;
}
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
sas_dev->dev_status = HISI_SAS_DEV_EH;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
@@ -1209,11 +1459,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
* will have already been completed
*/
if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
- if (task->lldd_task) {
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ if (task->lldd_task)
hisi_sas_do_release_task(hisi_hba, task, slot);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- }
}
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
@@ -1235,11 +1482,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
- task->lldd_task) {
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ task->lldd_task)
hisi_sas_do_release_task(hisi_hba, task, slot);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- }
}
out:
@@ -1254,7 +1498,6 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
struct device *dev = hisi_hba->dev;
struct hisi_sas_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
- unsigned long flags;
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
@@ -1267,11 +1510,8 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
tmf_task.tmf = TMF_ABORT_TASK_SET;
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
- if (rc == TMF_RESP_FUNC_COMPLETE) {
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ if (rc == TMF_RESP_FUNC_COMPLETE)
hisi_sas_release_task(hisi_hba, device);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- }
return rc;
}
@@ -1289,12 +1529,39 @@ static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
{
- struct sas_phy *phy = sas_get_local_phy(device);
+ struct sas_phy *local_phy = sas_get_local_phy(device);
int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
(device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
- rc = sas_phy_reset(phy, reset_type);
- sas_put_local_phy(phy);
- msleep(2000);
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
+ struct hisi_sas_phy *phy = container_of(sas_phy,
+ struct hisi_sas_phy, sas_phy);
+ DECLARE_COMPLETION_ONSTACK(phyreset);
+
+ if (scsi_is_sas_phy_local(local_phy)) {
+ phy->in_reset = 1;
+ phy->reset_completion = &phyreset;
+ }
+
+ rc = sas_phy_reset(local_phy, reset_type);
+ sas_put_local_phy(local_phy);
+
+ if (scsi_is_sas_phy_local(local_phy)) {
+ int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy->lock, flags);
+ phy->reset_completion = NULL;
+ phy->in_reset = 0;
+ spin_unlock_irqrestore(&phy->lock, flags);
+
+ /* report PHY down if timed out */
+ if (!ret)
+ hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
+ } else
+ msleep(2000);
+
return rc;
}
@@ -1304,7 +1571,6 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
- unsigned long flags;
if (sas_dev->dev_status != HISI_SAS_DEV_EH)
return TMF_RESP_FUNC_FAILED;
@@ -1320,11 +1586,9 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
rc = hisi_sas_debug_I_T_nexus_reset(device);
- if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
hisi_sas_release_task(hisi_hba, device);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- }
+
return rc;
}
@@ -1333,7 +1597,6 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- unsigned long flags;
int rc = TMF_RESP_FUNC_FAILED;
sas_dev->dev_status = HISI_SAS_DEV_EH;
@@ -1353,11 +1616,8 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
rc = sas_phy_reset(phy, 1);
- if (rc == 0) {
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ if (rc == 0)
hisi_sas_release_task(hisi_hba, device);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- }
sas_put_local_phy(phy);
} else {
struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
@@ -1371,11 +1631,8 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
- if (rc == TMF_RESP_FUNC_COMPLETE) {
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ if (rc == TMF_RESP_FUNC_COMPLETE)
hisi_sas_release_task(hisi_hba, device);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- }
}
out:
if (rc != TMF_RESP_FUNC_COMPLETE)
@@ -1445,7 +1702,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
struct hisi_sas_cmd_hdr *cmd_hdr_base;
struct hisi_sas_dq *dq = sas_dev->dq;
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
- unsigned long flags, flags_dq;
+ unsigned long flags, flags_dq = 0;
+ int wr_q_index;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
return -EINVAL;
@@ -1464,16 +1722,28 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
}
spin_unlock_irqrestore(&hisi_hba->lock, flags);
- spin_lock_irqsave(&dq->lock, flags_dq);
- rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
- if (rc)
+ slot = &hisi_hba->slot_info[slot_idx];
+ memset(slot, 0, sizeof(struct hisi_sas_slot));
+
+ slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
+ GFP_ATOMIC, &slot->buf_dma);
+ if (!slot->buf) {
+ rc = -ENOMEM;
goto err_out_tag;
+ }
- dlvry_queue = dq->id;
- dlvry_queue_slot = dq->wr_point;
+ spin_lock_irqsave(&dq->lock, flags_dq);
+ wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
+ if (wr_q_index < 0) {
+ spin_unlock_irqrestore(&dq->lock, flags_dq);
+ rc = -EAGAIN;
+ goto err_out_buf;
+ }
+ list_add_tail(&slot->delivery, &dq->list);
+ spin_unlock_irqrestore(&dq->lock, flags_dq);
- slot = &hisi_hba->slot_info[slot_idx];
- memset(slot, 0, sizeof(struct hisi_sas_slot));
+ dlvry_queue = dq->id;
+ dlvry_queue_slot = wr_q_index;
slot->idx = slot_idx;
slot->n_elem = n_elem;
@@ -1483,47 +1753,36 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
slot->task = task;
slot->port = port;
+ slot->is_internal = true;
task->lldd_task = slot;
- slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
- GFP_ATOMIC, &slot->buf_dma);
- if (!slot->buf) {
- rc = -ENOMEM;
- goto err_out_tag;
- }
-
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
- rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
+ hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
abort_flag, task_tag);
- if (rc)
- goto err_out_buf;
- spin_lock_irqsave(&hisi_hba->lock, flags);
- list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- dq->slot_prep = slot;
-
+ slot->ready = 1;
/* send abort command to the chip */
+ spin_lock_irqsave(&dq->lock, flags);
+ list_add_tail(&slot->entry, &sas_dev->list);
hisi_hba->hw->start_delivery(dq);
- spin_unlock_irqrestore(&dq->lock, flags_dq);
+ spin_unlock_irqrestore(&dq->lock, flags);
return 0;
err_out_buf:
dma_pool_free(hisi_hba->buffer_pool, slot->buf,
- slot->buf_dma);
+ slot->buf_dma);
err_out_tag:
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
- spin_unlock_irqrestore(&dq->lock, flags_dq);
err_out:
dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
@@ -1651,6 +1910,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ struct device *dev = hisi_hba->dev;
if (rdy) {
/* Phy down but ready */
@@ -1659,6 +1919,10 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
} else {
struct hisi_sas_port *port = phy->port;
+ if (phy->in_reset) {
+ dev_info(dev, "ignore flutter phy%d down\n", phy_no);
+ return;
+ }
/* Phy down and not ready */
sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
sas_phy_disconnected(sas_phy);
@@ -1693,34 +1957,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
-static struct device_attribute *host_attrs[] = {
+struct device_attribute *host_attrs[] = {
&dev_attr_phy_event_threshold,
NULL,
};
-
-static struct scsi_host_template _hisi_sas_sht = {
- .module = THIS_MODULE,
- .name = DRV_NAME,
- .queuecommand = sas_queuecommand,
- .target_alloc = sas_target_alloc,
- .slave_configure = hisi_sas_slave_configure,
- .scan_finished = hisi_sas_scan_finished,
- .scan_start = hisi_sas_scan_start,
- .change_queue_depth = sas_change_queue_depth,
- .bios_param = sas_bios_param,
- .can_queue = 1,
- .this_id = -1,
- .sg_tablesize = SG_ALL,
- .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
- .eh_target_reset_handler = sas_eh_target_reset_handler,
- .target_destroy = sas_target_destroy,
- .ioctl = sas_ioctl,
- .shost_attrs = host_attrs,
-};
-struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
-EXPORT_SYMBOL_GPL(hisi_sas_sht);
+EXPORT_SYMBOL_GPL(host_attrs);
static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_dev_found = hisi_sas_dev_found,
@@ -1798,6 +2039,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
/* Delivery queue structure */
spin_lock_init(&dq->lock);
+ INIT_LIST_HEAD(&dq->list);
dq->id = i;
dq->hisi_hba = hisi_hba;
@@ -1822,13 +2064,11 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
goto err_out;
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
- hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
+ hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
GFP_KERNEL);
if (!hisi_hba->itct)
goto err_out;
- memset(hisi_hba->itct, 0, s);
-
hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
sizeof(struct hisi_sas_slot),
GFP_KERNEL);
@@ -2031,7 +2271,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
- shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
+ shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
if (!shost) {
dev_err(dev, "scsi host alloc failed\n");
return NULL;
@@ -2080,19 +2320,8 @@ err_out:
return NULL;
}
-void hisi_sas_init_add(struct hisi_hba *hisi_hba)
-{
- int i;
-
- for (i = 0; i < hisi_hba->n_phy; i++)
- memcpy(&hisi_hba->phy[i].dev_sas_addr,
- hisi_hba->sas_addr,
- SAS_ADDR_SIZE);
-}
-EXPORT_SYMBOL_GPL(hisi_sas_init_add);
-
int hisi_sas_probe(struct platform_device *pdev,
- const struct hisi_sas_hw *hw)
+ const struct hisi_sas_hw *hw)
{
struct Scsi_Host *shost;
struct hisi_hba *hisi_hba;
@@ -2144,8 +2373,6 @@ int hisi_sas_probe(struct platform_device *pdev,
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
- hisi_sas_init_add(hisi_hba);
-
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha;
@@ -2177,6 +2404,9 @@ int hisi_sas_remove(struct platform_device *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
+ if (timer_pending(&hisi_hba->timer))
+ del_timer(&hisi_hba->timer);
+
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 84a0ccc4daf5..89ab18c1959c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -855,39 +855,12 @@ static enum sas_linkrate phy_get_max_linkrate_v1_hw(void)
static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
- u32 prog_phy_link_rate =
- hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
- struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
- int i;
- enum sas_linkrate min, max;
- u32 rate_mask = 0;
-
- if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
- max = sas_phy->phy->maximum_linkrate;
- min = r->minimum_linkrate;
- } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
- max = r->maximum_linkrate;
- min = sas_phy->phy->minimum_linkrate;
- } else
- return;
-
- sas_phy->phy->maximum_linkrate = max;
- sas_phy->phy->minimum_linkrate = min;
-
- max -= SAS_LINK_RATE_1_5_GBPS;
+ enum sas_linkrate max = r->maximum_linkrate;
+ u32 prog_phy_link_rate = 0x800;
- for (i = 0; i <= max; i++)
- rate_mask |= 1 << (i * 2);
-
- prog_phy_link_rate &= ~0xff;
- prog_phy_link_rate |= rate_mask;
-
- disable_phy_v1_hw(hisi_hba, phy_no);
- msleep(100);
+ prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
- prog_phy_link_rate);
- start_phy_v1_hw(hisi_hba, phy_no);
+ prog_phy_link_rate);
}
static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
@@ -921,37 +894,45 @@ get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
return -EAGAIN;
}
- return 0;
+ dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
+
+ return w;
}
+/* DQ lock must be taken here */
static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- int dlvry_queue = dq->slot_prep->dlvry_queue;
- int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
+ struct hisi_sas_slot *s, *s1;
+ struct list_head *dq_list;
+ int dlvry_queue = dq->id;
+ int wp, count = 0;
+
+ dq_list = &dq->list;
+ list_for_each_entry_safe(s, s1, &dq->list, delivery) {
+ if (!s->ready)
+ break;
+ count++;
+ wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ list_del(&s->delivery);
+ }
- dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
- hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
- dq->wr_point);
+ if (!count)
+ return;
+
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
-static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
+static void prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
struct hisi_sas_cmd_hdr *hdr,
struct scatterlist *scatter,
int n_elem)
{
struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
- struct device *dev = hisi_hba->dev;
struct scatterlist *sg;
int i;
- if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
- dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
- n_elem);
- return -EINVAL;
- }
-
for_each_sg(scatter, sg, n_elem, i) {
struct hisi_sas_sge *entry = &sge_page->sge[i];
@@ -964,48 +945,25 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
-
- return 0;
}
-static int prep_smp_v1_hw(struct hisi_hba *hisi_hba,
+static void prep_smp_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct domain_device *device = task->dev;
- struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port = slot->port;
- struct scatterlist *sg_req, *sg_resp;
+ struct scatterlist *sg_req;
struct hisi_sas_device *sas_dev = device->lldd_dev;
dma_addr_t req_dma_addr;
- unsigned int req_len, resp_len;
- int elem, rc;
+ unsigned int req_len;
- /*
- * DMA-map SMP request, response buffers
- */
/* req */
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
- if (!elem)
- return -ENOMEM;
req_len = sg_dma_len(sg_req);
req_dma_addr = sg_dma_address(sg_req);
- /* resp */
- sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
- if (!elem) {
- rc = -ENOMEM;
- goto err_out_req;
- }
- resp_len = sg_dma_len(sg_resp);
- if ((req_len & 0x3) || (resp_len & 0x3)) {
- rc = -EINVAL;
- goto err_out_resp;
- }
-
/* create header */
/* dw0 */
hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
@@ -1025,21 +983,10 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba,
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
-
- return 0;
-
-err_out_resp:
- dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
- DMA_FROM_DEVICE);
-err_out_req:
- dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
- return rc;
}
-static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot, int is_tmf,
- struct hisi_sas_tmf_task *tmf)
+static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
@@ -1048,7 +995,8 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- int has_data = 0, rc, priority = is_tmf;
+ struct hisi_sas_tmf_task *tmf = slot->tmf;
+ int has_data = 0, priority = !!tmf;
u8 *buf_cmd, fburst = 0;
u32 dw1, dw2;
@@ -1062,7 +1010,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
dw1 = 1 << CMD_HDR_VERIFY_DTL_OFF;
- if (is_tmf) {
+ if (tmf) {
dw1 |= 3 << CMD_HDR_SSP_FRAME_TYPE_OFF;
} else {
switch (scsi_cmnd->sc_data_direction) {
@@ -1083,7 +1031,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
dw1 |= sas_dev->device_id << CMD_HDR_DEVICE_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
- if (is_tmf) {
+ if (tmf) {
dw2 = ((sizeof(struct ssp_tmf_iu) +
sizeof(struct ssp_frame_hdr)+3)/4) <<
CMD_HDR_CFL_OFF;
@@ -1097,12 +1045,9 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
- if (has_data) {
- rc = prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter,
+ if (has_data)
+ prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter,
slot->n_elem);
- if (rc)
- return rc;
- }
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@@ -1117,7 +1062,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
hdr->dw2 = cpu_to_le32(dw2);
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
- if (!is_tmf) {
+ if (!tmf) {
buf_cmd[9] = fburst | task->ssp_task.task_attr |
(task->ssp_task.task_prio << 3);
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
@@ -1136,8 +1081,6 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
break;
}
}
-
- return 0;
}
/* by default, task resp is complete */
@@ -1430,6 +1373,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
irqreturn_t res = IRQ_HANDLED;
+ unsigned long flags;
irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) {
@@ -1483,6 +1427,13 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
SAS_PROTOCOL_SMP;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
+ spin_lock_irqsave(&phy->lock, flags);
+ if (phy->reset_completion) {
+ phy->in_reset = 0;
+ complete(phy->reset_completion);
+ }
+ spin_unlock_irqrestore(&phy->lock, flags);
+
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
CHL_INT2_SL_PHY_ENA_MSK);
@@ -1845,6 +1796,28 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba)
return 0;
}
+static struct scsi_host_template sht_v1_hw = {
+ .name = DRV_NAME,
+ .module = THIS_MODULE,
+ .queuecommand = sas_queuecommand,
+ .target_alloc = sas_target_alloc,
+ .slave_configure = hisi_sas_slave_configure,
+ .scan_finished = hisi_sas_scan_finished,
+ .scan_start = hisi_sas_scan_start,
+ .change_queue_depth = sas_change_queue_depth,
+ .bios_param = sas_bios_param,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_target_reset_handler = sas_eh_target_reset_handler,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
+ .shost_attrs = host_attrs,
+};
+
static const struct hisi_sas_hw hisi_sas_v1_hw = {
.hw_init = hisi_sas_v1_init,
.setup_itct = setup_itct_v1_hw,
@@ -1864,6 +1837,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.get_wideport_bitmap = get_wideport_bitmap_v1_hw,
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
+ .sht = &sht_v1_hw,
};
static int hisi_sas_v1_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index f89fb9a49ea9..213c530e63f2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -144,6 +144,7 @@
#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19
#define SAS_ECC_INTR_MSK 0x1ec
#define HGC_ERR_STAT_EN 0x238
+#define CQE_SEND_CNT 0x248
#define DLVRY_Q_0_BASE_ADDR_LO 0x260
#define DLVRY_Q_0_BASE_ADDR_HI 0x264
#define DLVRY_Q_0_DEPTH 0x268
@@ -295,6 +296,10 @@
#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
#define CMD_HDR_TLR_CTRL_OFF 6
#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
+#define CMD_HDR_PHY_ID_OFF 8
+#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF)
+#define CMD_HDR_FORCE_PHY_OFF 17
+#define CMD_HDR_FORCE_PHY_MSK (0x1 << CMD_HDR_FORCE_PHY_OFF)
#define CMD_HDR_PORT_OFF 18
#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
#define CMD_HDR_PRIORITY_OFF 27
@@ -1216,7 +1221,22 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
}
for (i = 0; i < hisi_hba->n_phy; i++) {
- hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[i];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ u32 prog_phy_link_rate = 0x800;
+
+ if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
+ SAS_LINK_RATE_1_5_GBPS)) {
+ prog_phy_link_rate = 0x855;
+ } else {
+ enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
+
+ prog_phy_link_rate =
+ hisi_sas_get_prog_phy_linkrate_mask(max) |
+ 0x800;
+ }
+ hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
+ prog_phy_link_rate);
hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, sas_phy_ctrl);
hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0);
@@ -1585,39 +1605,12 @@ static enum sas_linkrate phy_get_max_linkrate_v2_hw(void)
static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
- u32 prog_phy_link_rate =
- hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
- struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
- int i;
- enum sas_linkrate min, max;
- u32 rate_mask = 0;
-
- if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
- max = sas_phy->phy->maximum_linkrate;
- min = r->minimum_linkrate;
- } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
- max = r->maximum_linkrate;
- min = sas_phy->phy->minimum_linkrate;
- } else
- return;
-
- sas_phy->phy->maximum_linkrate = max;
- sas_phy->phy->minimum_linkrate = min;
-
- max -= SAS_LINK_RATE_1_5_GBPS;
-
- for (i = 0; i <= max; i++)
- rate_mask |= 1 << (i * 2);
-
- prog_phy_link_rate &= ~0xff;
- prog_phy_link_rate |= rate_mask;
+ enum sas_linkrate max = r->maximum_linkrate;
+ u32 prog_phy_link_rate = 0x800;
- disable_phy_v2_hw(hisi_hba, phy_no);
- msleep(100);
+ prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
- prog_phy_link_rate);
- start_phy_v2_hw(hisi_hba, phy_no);
+ prog_phy_link_rate);
}
static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
@@ -1658,42 +1651,50 @@ get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
+ dev_warn(dev, "full queue=%d r=%d w=%d\n",
queue, r, w);
return -EAGAIN;
}
- return 0;
+ dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
+
+ return w;
}
+/* DQ lock must be taken here */
static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- int dlvry_queue = dq->slot_prep->dlvry_queue;
- int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
+ struct hisi_sas_slot *s, *s1;
+ struct list_head *dq_list;
+ int dlvry_queue = dq->id;
+ int wp, count = 0;
+
+ dq_list = &dq->list;
+ list_for_each_entry_safe(s, s1, &dq->list, delivery) {
+ if (!s->ready)
+ break;
+ count++;
+ wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ list_del(&s->delivery);
+ }
+
+ if (!count)
+ return;
- dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
- hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
- dq->wr_point);
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
-static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
+static void prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
struct hisi_sas_cmd_hdr *hdr,
struct scatterlist *scatter,
int n_elem)
{
struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
- struct device *dev = hisi_hba->dev;
struct scatterlist *sg;
int i;
- if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
- dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
- n_elem);
- return -EINVAL;
- }
-
for_each_sg(scatter, sg, n_elem, i) {
struct hisi_sas_sge *entry = &sge_page->sge[i];
@@ -1706,47 +1707,24 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
-
- return 0;
}
-static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
+static void prep_smp_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct domain_device *device = task->dev;
- struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port = slot->port;
- struct scatterlist *sg_req, *sg_resp;
+ struct scatterlist *sg_req;
struct hisi_sas_device *sas_dev = device->lldd_dev;
dma_addr_t req_dma_addr;
- unsigned int req_len, resp_len;
- int elem, rc;
+ unsigned int req_len;
- /*
- * DMA-map SMP request, response buffers
- */
/* req */
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
- if (!elem)
- return -ENOMEM;
- req_len = sg_dma_len(sg_req);
req_dma_addr = sg_dma_address(sg_req);
-
- /* resp */
- sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
- if (!elem) {
- rc = -ENOMEM;
- goto err_out_req;
- }
- resp_len = sg_dma_len(sg_resp);
- if ((req_len & 0x3) || (resp_len & 0x3)) {
- rc = -EINVAL;
- goto err_out_resp;
- }
+ req_len = sg_dma_len(&task->smp_task.smp_req);
/* create header */
/* dw0 */
@@ -1768,21 +1746,10 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
-
- return 0;
-
-err_out_resp:
- dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
- DMA_FROM_DEVICE);
-err_out_req:
- dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
- return rc;
}
-static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot, int is_tmf,
- struct hisi_sas_tmf_task *tmf)
+static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
@@ -1791,7 +1758,8 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- int has_data = 0, rc, priority = is_tmf;
+ struct hisi_sas_tmf_task *tmf = slot->tmf;
+ int has_data = 0, priority = !!tmf;
u8 *buf_cmd;
u32 dw1 = 0, dw2 = 0;
@@ -1802,7 +1770,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
(1 << CMD_HDR_CMD_OFF)); /* ssp */
dw1 = 1 << CMD_HDR_VDTL_OFF;
- if (is_tmf) {
+ if (tmf) {
dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
} else {
@@ -1833,12 +1801,9 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
hdr->transfer_tags = cpu_to_le32(slot->idx);
- if (has_data) {
- rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
+ if (has_data)
+ prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
slot->n_elem);
- if (rc)
- return rc;
- }
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@@ -1848,7 +1813,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
sizeof(struct ssp_frame_hdr);
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
- if (!is_tmf) {
+ if (!tmf) {
buf_cmd[9] = task->ssp_task.task_attr |
(task->ssp_task.task_prio << 3);
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
@@ -1867,8 +1832,6 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
break;
}
}
-
- return 0;
}
#define TRANS_TX_ERR 0
@@ -2380,23 +2343,24 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
+ struct sas_ha_struct *ha;
enum exec_status sts;
struct hisi_sas_complete_v2_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v2_hdr *complete_hdr =
&complete_queue[slot->cmplt_queue_slot];
unsigned long flags;
- int aborted;
+ bool is_internal = slot->is_internal;
if (unlikely(!task || !task->lldd_task || !task->dev))
return -EINVAL;
ts = &task->task_status;
device = task->dev;
+ ha = device->port->ha;
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
- aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
task->task_state_flags &=
~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2404,15 +2368,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
memset(ts, 0, sizeof(*ts));
ts->resp = SAS_TASK_COMPLETE;
- if (unlikely(aborted)) {
- dev_dbg(dev, "slot_complete: task(%p) aborted\n", task);
- ts->stat = SAS_ABORTED_TASK;
- spin_lock_irqsave(&hisi_hba->lock, flags);
- hisi_sas_slot_task_free(hisi_hba, task, slot);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- return ts->stat;
- }
-
if (unlikely(!sas_dev)) {
dev_dbg(dev, "slot complete: port has no device\n");
ts->stat = SAS_PHY_DOWN;
@@ -2459,10 +2414,10 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
slot_err_v2_hw(hisi_hba, task, slot, 2);
if (ts->stat != SAS_DATA_UNDERRUN)
- dev_info(dev, "erroneous completion iptt=%d task=%p "
+ dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
"Error info: 0x%x 0x%x 0x%x 0x%x\n",
- slot->idx, task,
+ slot->idx, task, sas_dev->device_id,
complete_hdr->dw0, complete_hdr->dw1,
complete_hdr->act, complete_hdr->dw3,
error_info[0], error_info[1],
@@ -2523,13 +2478,27 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
+ sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ dev_info(dev, "slot complete: task(%p) aborted\n", task);
+ return SAS_ABORTED_TASK;
+ }
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- spin_lock_irqsave(&hisi_hba->lock, flags);
- hisi_sas_slot_task_free(hisi_hba, task, slot);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- sts = ts->stat;
+
+ if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
+ spin_lock_irqsave(&device->done_lock, flags);
+ if (test_bit(SAS_HA_FROZEN, &ha->state)) {
+ spin_unlock_irqrestore(&device->done_lock, flags);
+ dev_info(dev, "slot complete: task(%p) ignored\n ",
+ task);
+ return sts;
+ }
+ spin_unlock_irqrestore(&device->done_lock, flags);
+ }
if (task->task_done)
task->task_done(task);
@@ -2537,7 +2506,7 @@ out:
return sts;
}
-static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
+static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
@@ -2547,8 +2516,9 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ struct hisi_sas_tmf_task *tmf = slot->tmf;
u8 *buf_cmd;
- int has_data = 0, rc = 0, hdr_tag = 0;
+ int has_data = 0, hdr_tag = 0;
u32 dw1 = 0, dw2 = 0;
/* create header */
@@ -2559,6 +2529,12 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
else
hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
+ if (tmf && tmf->force_phy) {
+ hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK;
+ hdr->dw0 |= cpu_to_le32((1 << tmf->phy_id)
+ << CMD_HDR_PHY_ID_OFF);
+ }
+
/* dw1 */
switch (task->data_dir) {
case DMA_TO_DEVICE:
@@ -2596,12 +2572,9 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
/* dw3 */
hdr->transfer_tags = cpu_to_le32(slot->idx);
- if (has_data) {
- rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
+ if (has_data)
+ prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
slot->n_elem);
- if (rc)
- return rc;
- }
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@@ -2613,8 +2586,6 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
/* fill in command FIS */
memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
-
- return 0;
}
static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
@@ -2651,7 +2622,7 @@ static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
}
}
-static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
+static void prep_abort_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
int device_id, int abort_flag, int tag_to_abort)
{
@@ -2679,8 +2650,6 @@ static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
/* dw7 */
hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
-
- return 0;
}
static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -2692,6 +2661,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct device *dev = hisi_hba->dev;
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
+ unsigned long flags;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
@@ -2744,6 +2714,12 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
set_link_timer_quirk(hisi_hba);
}
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
+ spin_lock_irqsave(&phy->lock, flags);
+ if (phy->reset_completion) {
+ phy->in_reset = 0;
+ complete(phy->reset_completion);
+ }
+ spin_unlock_irqrestore(&phy->lock, flags);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -3151,14 +3127,12 @@ static void cq_tasklet_v2_hw(unsigned long val)
struct hisi_sas_complete_v2_hdr *complete_queue;
u32 rd_point = cq->rd_point, wr_point, dev_id;
int queue = cq->id;
- struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
if (unlikely(hisi_hba->reject_stp_links_msk))
phys_try_accept_stp_links_v2_hw(hisi_hba);
complete_queue = hisi_hba->complete_hdr[queue];
- spin_lock(&dq->lock);
wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
(0x14 * queue));
@@ -3208,7 +3182,6 @@ static void cq_tasklet_v2_hw(unsigned long val)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
- spin_unlock(&dq->lock);
}
static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
@@ -3235,6 +3208,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
irqreturn_t res = IRQ_HANDLED;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
+ unsigned long flags;
int phy_no, offset;
phy_no = sas_phy->id;
@@ -3295,6 +3269,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
sas_phy->oob_mode = SATA_OOB_MODE;
/* Make up some unique SAS address */
attached_sas_addr[0] = 0x50;
+ attached_sas_addr[6] = hisi_hba->shost->host_no;
attached_sas_addr[7] = phy_no;
memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE);
memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis));
@@ -3308,6 +3283,12 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
+ spin_lock_irqsave(&phy->lock, flags);
+ if (phy->reset_completion) {
+ phy->in_reset = 0;
+ complete(phy->reset_completion);
+ }
+ spin_unlock_irqrestore(&phy->lock, flags);
end:
hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk);
@@ -3546,6 +3527,46 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
return 0;
}
+static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
+ int delay_ms, int timeout_ms)
+{
+ struct device *dev = hisi_hba->dev;
+ int entries, entries_old = 0, time;
+
+ for (time = 0; time < timeout_ms; time += delay_ms) {
+ entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT);
+ if (entries == entries_old)
+ break;
+
+ entries_old = entries;
+ msleep(delay_ms);
+ }
+
+ dev_dbg(dev, "wait commands complete %dms\n", time);
+}
+
+static struct scsi_host_template sht_v2_hw = {
+ .name = DRV_NAME,
+ .module = THIS_MODULE,
+ .queuecommand = sas_queuecommand,
+ .target_alloc = sas_target_alloc,
+ .slave_configure = hisi_sas_slave_configure,
+ .scan_finished = hisi_sas_scan_finished,
+ .scan_start = hisi_sas_scan_start,
+ .change_queue_depth = sas_change_queue_depth,
+ .bios_param = sas_bios_param,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_target_reset_handler = sas_eh_target_reset_handler,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
+ .shost_attrs = host_attrs,
+};
+
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
.setup_itct = setup_itct_v2_hw,
@@ -3574,6 +3595,8 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.soft_reset = soft_reset_v2_hw,
.get_phys_state = get_phys_state_v2_hw,
.write_gpio = write_gpio_v2_hw,
+ .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v2_hw,
+ .sht = &sht_v2_hw,
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
@@ -3598,9 +3621,6 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
- if (timer_pending(&hisi_hba->timer))
- del_timer(&hisi_hba->timer);
-
hisi_sas_kill_tasklets(hisi_hba);
return hisi_sas_remove(pdev);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 6f3e5ba6b472..9f1e2d03f914 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -92,6 +92,7 @@
#define SAS_ECC_INTR 0x1e8
#define SAS_ECC_INTR_MSK 0x1ec
#define HGC_ERR_STAT_EN 0x238
+#define CQE_SEND_CNT 0x248
#define DLVRY_Q_0_BASE_ADDR_LO 0x260
#define DLVRY_Q_0_BASE_ADDR_HI 0x264
#define DLVRY_Q_0_DEPTH 0x268
@@ -106,6 +107,11 @@
#define COMPL_Q_0_RD_PTR 0x4f0
#define AWQOS_AWCACHE_CFG 0xc84
#define ARQOS_ARCACHE_CFG 0xc88
+#define HILINK_ERR_DFX 0xe04
+#define SAS_GPIO_CFG_0 0x1000
+#define SAS_GPIO_CFG_1 0x1004
+#define SAS_GPIO_TX_0_1 0x1040
+#define SAS_CFG_DRIVE_VLD 0x1070
/* phy registers requiring init */
#define PORT_BASE (0x2000)
@@ -167,6 +173,7 @@
#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
+#define CHL_INT2_RX_INVLD_DW_OFF 30
#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
@@ -216,6 +223,9 @@
#define SAS_RAS_INTR1 (RAS_BASE + 0x04)
#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
+#define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c)
+#define SAS_RAS_INTR2 (RAS_BASE + 0x20)
+#define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24)
/* HW dma structures */
/* Delivery queue header */
@@ -348,10 +358,11 @@ struct hisi_sas_err_record_v3 {
#define DIR_TO_DEVICE 2
#define DIR_RESERVED 3
-#define CMD_IS_UNCONSTRAINT(cmd) \
- ((cmd == ATA_CMD_READ_LOG_EXT) || \
- (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \
- (cmd == ATA_CMD_DEV_RESET))
+#define FIS_CMD_IS_UNCONSTRAINED(fis) \
+ ((fis.command == ATA_CMD_READ_LOG_EXT) || \
+ (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \
+ ((fis.command == ATA_CMD_DEV_RESET) && \
+ ((fis.control & ATA_SRST) != 0)))
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
@@ -390,8 +401,23 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
return readl(regs);
}
+#define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \
+ timeout_us) \
+({ \
+ void __iomem *regs = hisi_hba->regs + off; \
+ readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \
+})
+
+#define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \
+ timeout_us) \
+({ \
+ void __iomem *regs = hisi_hba->regs + off; \
+ readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\
+})
+
static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
{
+ struct pci_dev *pdev = hisi_hba->pci_dev;
int i;
/* Global registers init */
@@ -409,7 +435,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
+ if (pdev->revision >= 0x21)
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff);
+ else
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
@@ -422,13 +451,33 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
for (i = 0; i < hisi_hba->n_phy; i++) {
- hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[i];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ u32 prog_phy_link_rate = 0x800;
+
+ if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
+ SAS_LINK_RATE_1_5_GBPS)) {
+ prog_phy_link_rate = 0x855;
+ } else {
+ enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
+
+ prog_phy_link_rate =
+ hisi_sas_get_prog_phy_linkrate_mask(max) |
+ 0x800;
+ }
+ hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
+ prog_phy_link_rate);
hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
+ if (pdev->revision >= 0x21)
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
+ 0xffffffff);
+ else
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
+ 0xff87ffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -503,6 +552,16 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* RAS registers init */
hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0);
+ hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0);
+
+ /* LED registers init */
+ hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff);
+ hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080);
+ hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080);
+ /* Configure blink generator rate A to 1Hz and B to 4Hz */
+ hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700);
+ hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000);
}
static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -654,8 +713,8 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
udelay(50);
/* Ensure axi bus idle */
- ret = readl_poll_timeout(hisi_hba->regs + AXI_CFG, val, !val,
- 20000, 1000000);
+ ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val,
+ 20000, 1000000);
if (ret) {
dev_err(dev, "axi bus is not idle, ret = %d!\n", ret);
return -EIO;
@@ -794,42 +853,49 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
+ dev_warn(dev, "full queue=%d r=%d w=%d\n",
queue, r, w);
return -EAGAIN;
}
- return 0;
+ dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
+
+ return w;
}
static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- int dlvry_queue = dq->slot_prep->dlvry_queue;
- int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
+ struct hisi_sas_slot *s, *s1;
+ struct list_head *dq_list;
+ int dlvry_queue = dq->id;
+ int wp, count = 0;
+
+ dq_list = &dq->list;
+ list_for_each_entry_safe(s, s1, &dq->list, delivery) {
+ if (!s->ready)
+ break;
+ count++;
+ wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ list_del(&s->delivery);
+ }
+
+ if (!count)
+ return;
- dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
- hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
- dq->wr_point);
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
-static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
+static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
struct hisi_sas_cmd_hdr *hdr,
struct scatterlist *scatter,
int n_elem)
{
struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
- struct device *dev = hisi_hba->dev;
struct scatterlist *sg;
int i;
- if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
- dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
- n_elem);
- return -EINVAL;
- }
-
for_each_sg(scatter, sg, n_elem, i) {
struct hisi_sas_sge *entry = &sge_page->sge[i];
@@ -842,13 +908,10 @@ static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
-
- return 0;
}
-static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot, int is_tmf,
- struct hisi_sas_tmf_task *tmf)
+static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
@@ -857,7 +920,8 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- int has_data = 0, rc, priority = is_tmf;
+ struct hisi_sas_tmf_task *tmf = slot->tmf;
+ int has_data = 0, priority = !!tmf;
u8 *buf_cmd;
u32 dw1 = 0, dw2 = 0;
@@ -868,7 +932,7 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
(1 << CMD_HDR_CMD_OFF)); /* ssp */
dw1 = 1 << CMD_HDR_VDTL_OFF;
- if (is_tmf) {
+ if (tmf) {
dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
} else {
@@ -898,12 +962,9 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
hdr->dw2 = cpu_to_le32(dw2);
hdr->transfer_tags = cpu_to_le32(slot->idx);
- if (has_data) {
- rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
+ if (has_data)
+ prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
slot->n_elem);
- if (rc)
- return rc;
- }
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@@ -913,7 +974,7 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
sizeof(struct ssp_frame_hdr);
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
- if (!is_tmf) {
+ if (!tmf) {
buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
} else {
@@ -930,48 +991,25 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
break;
}
}
-
- return 0;
}
-static int prep_smp_v3_hw(struct hisi_hba *hisi_hba,
+static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct domain_device *device = task->dev;
- struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port = slot->port;
- struct scatterlist *sg_req, *sg_resp;
+ struct scatterlist *sg_req;
struct hisi_sas_device *sas_dev = device->lldd_dev;
dma_addr_t req_dma_addr;
- unsigned int req_len, resp_len;
- int elem, rc;
+ unsigned int req_len;
- /*
- * DMA-map SMP request, response buffers
- */
/* req */
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
- if (!elem)
- return -ENOMEM;
req_len = sg_dma_len(sg_req);
req_dma_addr = sg_dma_address(sg_req);
- /* resp */
- sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
- if (!elem) {
- rc = -ENOMEM;
- goto err_out_req;
- }
- resp_len = sg_dma_len(sg_resp);
- if ((req_len & 0x3) || (resp_len & 0x3)) {
- rc = -EINVAL;
- goto err_out_resp;
- }
-
/* create header */
/* dw0 */
hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
@@ -993,18 +1031,9 @@ static int prep_smp_v3_hw(struct hisi_hba *hisi_hba,
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
- return 0;
-
-err_out_resp:
- dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
- DMA_FROM_DEVICE);
-err_out_req:
- dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
- return rc;
}
-static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
+static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
@@ -1015,7 +1044,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
u8 *buf_cmd;
- int has_data = 0, rc = 0, hdr_tag = 0;
+ int has_data = 0, hdr_tag = 0;
u32 dw1 = 0, dw2 = 0;
hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
@@ -1046,7 +1075,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
- if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command))
+ if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis))
dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF;
hdr->dw1 = cpu_to_le32(dw1);
@@ -1064,12 +1093,9 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
/* dw3 */
hdr->transfer_tags = cpu_to_le32(slot->idx);
- if (has_data) {
- rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
+ if (has_data)
+ prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
slot->n_elem);
- if (rc)
- return rc;
- }
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@@ -1081,11 +1107,9 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
/* fill in command FIS */
memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
-
- return 0;
}
-static int prep_abort_v3_hw(struct hisi_hba *hisi_hba,
+static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
int device_id, int abort_flag, int tag_to_abort)
{
@@ -1110,7 +1134,6 @@ static int prep_abort_v3_hw(struct hisi_hba *hisi_hba,
hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
- return 0;
}
static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -1120,6 +1143,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct device *dev = hisi_hba->dev;
+ unsigned long flags;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
@@ -1188,6 +1212,12 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy->phy_attached = 1;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
res = IRQ_HANDLED;
+ spin_lock_irqsave(&phy->lock, flags);
+ if (phy->reset_completion) {
+ phy->in_reset = 0;
+ complete(phy->reset_completion);
+ }
+ spin_unlock_irqrestore(&phy->lock, flags);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_PHY_ENABLE_MSK);
@@ -1301,14 +1331,10 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
struct device *dev = hisi_hba->dev;
- u32 ent_msk, ent_tmp, irq_msk;
+ struct pci_dev *pci_dev = hisi_hba->pci_dev;
+ u32 irq_msk;
int phy_no = 0;
- ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
- ent_tmp = ent_msk;
- ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
-
irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
& 0xeeeeeeee;
@@ -1319,6 +1345,13 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
CHL_INT1);
u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
CHL_INT2);
+ u32 irq_msk1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT1_MSK);
+ u32 irq_msk2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT2_MSK);
+
+ irq_value1 &= ~irq_msk1;
+ irq_value2 &= ~irq_msk2;
if ((irq_msk & (4 << (phy_no * 4))) &&
irq_value1) {
@@ -1364,8 +1397,28 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT2, irq_value2);
- }
+ if ((irq_value2 & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
+ (pci_dev->revision == 0x20)) {
+ u32 reg_value;
+ int rc;
+
+ rc = hisi_sas_read32_poll_timeout_atomic(
+ HILINK_ERR_DFX, reg_value,
+ !((reg_value >> 8) & BIT(phy_no)),
+ 1000, 10000);
+ if (rc) {
+ disable_phy_v3_hw(hisi_hba, phy_no);
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT2,
+ BIT(CHL_INT2_RX_INVLD_DW_OFF));
+ hisi_sas_phy_read32(hisi_hba, phy_no,
+ ERR_CNT_INVLD_DW);
+ mdelay(1);
+ enable_phy_v3_hw(hisi_hba, phy_no);
+ }
+ }
+ }
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
hisi_sas_phy_write32(hisi_hba, phy_no,
@@ -1378,8 +1431,6 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
phy_no++;
}
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
-
return IRQ_HANDLED;
}
@@ -1448,6 +1499,7 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00);
irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ irq_value &= ~irq_msk;
for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) {
const struct hisi_sas_hw_error *error = &fatal_axi_error[i];
@@ -1549,37 +1601,30 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
+ struct sas_ha_struct *ha;
enum exec_status sts;
struct hisi_sas_complete_v3_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v3_hdr *complete_hdr =
&complete_queue[slot->cmplt_queue_slot];
- int aborted;
unsigned long flags;
+ bool is_internal = slot->is_internal;
if (unlikely(!task || !task->lldd_task || !task->dev))
return -EINVAL;
ts = &task->task_status;
device = task->dev;
+ ha = device->port->ha;
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
- aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
task->task_state_flags &=
~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
spin_unlock_irqrestore(&task->task_state_lock, flags);
memset(ts, 0, sizeof(*ts));
ts->resp = SAS_TASK_COMPLETE;
- if (unlikely(aborted)) {
- dev_dbg(dev, "slot complete: task(%p) aborted\n", task);
- ts->stat = SAS_ABORTED_TASK;
- spin_lock_irqsave(&hisi_hba->lock, flags);
- hisi_sas_slot_task_free(hisi_hba, task, slot);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- return ts->stat;
- }
if (unlikely(!sas_dev)) {
dev_dbg(dev, "slot complete: port has not device\n");
@@ -1619,10 +1664,10 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
slot_err_v3_hw(hisi_hba, task, slot);
if (ts->stat != SAS_DATA_UNDERRUN)
- dev_info(dev, "erroneous completion iptt=%d task=%p "
+ dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
"Error info: 0x%x 0x%x 0x%x 0x%x\n",
- slot->idx, task,
+ slot->idx, task, sas_dev->device_id,
complete_hdr->dw0, complete_hdr->dw1,
complete_hdr->act, complete_hdr->dw3,
error_info[0], error_info[1],
@@ -1677,13 +1722,27 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
+ sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ dev_info(dev, "slot complete: task(%p) aborted\n", task);
+ return SAS_ABORTED_TASK;
+ }
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- spin_lock_irqsave(&hisi_hba->lock, flags);
- hisi_sas_slot_task_free(hisi_hba, task, slot);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- sts = ts->stat;
+
+ if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
+ spin_lock_irqsave(&device->done_lock, flags);
+ if (test_bit(SAS_HA_FROZEN, &ha->state)) {
+ spin_unlock_irqrestore(&device->done_lock, flags);
+ dev_info(dev, "slot complete: task(%p) ignored\n ",
+ task);
+ return sts;
+ }
+ spin_unlock_irqrestore(&device->done_lock, flags);
+ }
if (task->task_done)
task->task_done(task);
@@ -1699,25 +1758,27 @@ static void cq_tasklet_v3_hw(unsigned long val)
struct hisi_sas_complete_v3_hdr *complete_queue;
u32 rd_point = cq->rd_point, wr_point;
int queue = cq->id;
- struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
complete_queue = hisi_hba->complete_hdr[queue];
- spin_lock(&dq->lock);
wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
(0x14 * queue));
while (rd_point != wr_point) {
struct hisi_sas_complete_v3_hdr *complete_hdr;
+ struct device *dev = hisi_hba->dev;
int iptt;
complete_hdr = &complete_queue[rd_point];
iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
- slot = &hisi_hba->slot_info[iptt];
- slot->cmplt_queue_slot = rd_point;
- slot->cmplt_queue = queue;
- slot_complete_v3_hw(hisi_hba, slot);
+ if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) {
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v3_hw(hisi_hba, slot);
+ } else
+ dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt);
if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
rd_point = 0;
@@ -1726,7 +1787,6 @@ static void cq_tasklet_v3_hw(unsigned long val)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
- spin_unlock(&dq->lock);
}
static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
@@ -1839,39 +1899,12 @@ static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
- u32 prog_phy_link_rate =
- hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
- struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
- int i;
- enum sas_linkrate min, max;
- u32 rate_mask = 0;
-
- if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
- max = sas_phy->phy->maximum_linkrate;
- min = r->minimum_linkrate;
- } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
- max = r->maximum_linkrate;
- min = sas_phy->phy->minimum_linkrate;
- } else
- return;
-
- sas_phy->phy->maximum_linkrate = max;
- sas_phy->phy->minimum_linkrate = min;
-
- max -= SAS_LINK_RATE_1_5_GBPS;
-
- for (i = 0; i <= max; i++)
- rate_mask |= 1 << (i * 2);
-
- prog_phy_link_rate &= ~0xff;
- prog_phy_link_rate |= rate_mask;
+ enum sas_linkrate max = r->maximum_linkrate;
+ u32 prog_phy_link_rate = 0x800;
- disable_phy_v3_hw(hisi_hba, phy_no);
- msleep(100);
+ prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
- prog_phy_link_rate);
- start_phy_v3_hw(hisi_hba, phy_no);
+ prog_phy_link_rate);
}
static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
@@ -1948,8 +1981,9 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
/* wait until bus idle */
- rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
- AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
+ rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
+ AM_CURR_TRANS_RETURN, status,
+ status == 0x3, 10, 100);
if (rc) {
dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
return rc;
@@ -1960,6 +1994,75 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
return hw_init_v3_hw(hisi_hba);
}
+static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ struct device *dev = hisi_hba->dev;
+ u32 *data = (u32 *)write_data;
+ int i;
+
+ switch (reg_type) {
+ case SAS_GPIO_REG_TX:
+ if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) {
+ dev_err(dev, "write gpio: invalid reg range[%d, %d]\n",
+ reg_index, reg_index + reg_count - 1);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reg_count; i++)
+ hisi_sas_write32(hisi_hba,
+ SAS_GPIO_TX_0_1 + (reg_index + i) * 4,
+ data[i]);
+ break;
+ default:
+ dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
+ reg_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
+ int delay_ms, int timeout_ms)
+{
+ struct device *dev = hisi_hba->dev;
+ int entries, entries_old = 0, time;
+
+ for (time = 0; time < timeout_ms; time += delay_ms) {
+ entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT);
+ if (entries == entries_old)
+ break;
+
+ entries_old = entries;
+ msleep(delay_ms);
+ }
+
+ dev_dbg(dev, "wait commands complete %dms\n", time);
+}
+
+static struct scsi_host_template sht_v3_hw = {
+ .name = DRV_NAME,
+ .module = THIS_MODULE,
+ .queuecommand = sas_queuecommand,
+ .target_alloc = sas_target_alloc,
+ .slave_configure = hisi_sas_slave_configure,
+ .scan_finished = hisi_sas_scan_finished,
+ .scan_start = hisi_sas_scan_start,
+ .change_queue_depth = sas_change_queue_depth,
+ .bios_param = sas_bios_param,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_target_reset_handler = sas_eh_target_reset_handler,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
+ .shost_attrs = host_attrs,
+};
+
static const struct hisi_sas_hw hisi_sas_v3_hw = {
.hw_init = hisi_sas_v3_init,
.setup_itct = setup_itct_v3_hw,
@@ -1985,6 +2088,8 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.soft_reset = soft_reset_v3_hw,
.get_phys_state = get_phys_state_v3_hw,
.get_events = phy_get_events_v3_hw,
+ .write_gpio = write_gpio_v3_hw,
+ .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
};
static struct Scsi_Host *
@@ -1994,7 +2099,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
- shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
+ shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba));
if (!shost) {
dev_err(dev, "shost alloc failed\n");
return NULL;
@@ -2108,8 +2213,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
- hisi_sas_init_add(hisi_hba);
-
rc = scsi_add_host(shost, dev);
if (rc)
goto err_out_ha;
@@ -2161,6 +2264,9 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
+ if (timer_pending(&hisi_hba->timer))
+ del_timer(&hisi_hba->timer);
+
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);
@@ -2222,6 +2328,29 @@ static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
{ .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
};
+static const struct hisi_sas_hw_error sas_ras_intr2_nfe[] = {
+ { .irq_msk = BIT(0), .msg = "DMAC0_AXI_BUS_ERR" },
+ { .irq_msk = BIT(1), .msg = "DMAC1_AXI_BUS_ERR" },
+ { .irq_msk = BIT(2), .msg = "DMAC2_AXI_BUS_ERR" },
+ { .irq_msk = BIT(3), .msg = "DMAC3_AXI_BUS_ERR" },
+ { .irq_msk = BIT(4), .msg = "DMAC4_AXI_BUS_ERR" },
+ { .irq_msk = BIT(5), .msg = "DMAC5_AXI_BUS_ERR" },
+ { .irq_msk = BIT(6), .msg = "DMAC6_AXI_BUS_ERR" },
+ { .irq_msk = BIT(7), .msg = "DMAC7_AXI_BUS_ERR" },
+ { .irq_msk = BIT(8), .msg = "DMAC0_FIFO_OMIT_ERR" },
+ { .irq_msk = BIT(9), .msg = "DMAC1_FIFO_OMIT_ERR" },
+ { .irq_msk = BIT(10), .msg = "DMAC2_FIFO_OMIT_ERR" },
+ { .irq_msk = BIT(11), .msg = "DMAC3_FIFO_OMIT_ERR" },
+ { .irq_msk = BIT(12), .msg = "DMAC4_FIFO_OMIT_ERR" },
+ { .irq_msk = BIT(13), .msg = "DMAC5_FIFO_OMIT_ERR" },
+ { .irq_msk = BIT(14), .msg = "DMAC6_FIFO_OMIT_ERR" },
+ { .irq_msk = BIT(15), .msg = "DMAC7_FIFO_OMIT_ERR" },
+ { .irq_msk = BIT(16), .msg = "HGC_RLSE_SLOT_UNMATCH" },
+ { .irq_msk = BIT(17), .msg = "HGC_LM_ADD_FCH_LIST_ERR" },
+ { .irq_msk = BIT(18), .msg = "HGC_AXI_BUS_ERR" },
+ { .irq_msk = BIT(19), .msg = "HGC_FIFO_OMIT_ERR" },
+};
+
static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
@@ -2252,6 +2381,17 @@ static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
}
hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
+ irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR2);
+ for (i = 0; i < ARRAY_SIZE(sas_ras_intr2_nfe); i++) {
+ ras_error = &sas_ras_intr2_nfe[i];
+ if (ras_error->irq_msk & irq_value) {
+ dev_warn(dev, "SAS_RAS_INTR2: %s(irq_value=0x%x) found.\n",
+ ras_error->msg, irq_value);
+ need_reset = true;
+ }
+ }
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR2, irq_value);
+
return need_reset;
}
@@ -2307,7 +2447,6 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
u32 device_state, status;
int rc;
u32 reg_val;
- unsigned long flags;
if (!pdev->pm_cap) {
dev_err(dev, "PCI PM not supported\n");
@@ -2332,8 +2471,9 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
AM_CTRL_GLOBAL, reg_val);
/* wait until bus idle */
- rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
- AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
+ rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
+ AM_CURR_TRANS_RETURN, status,
+ status == 0x3, 10, 100);
if (rc) {
dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
@@ -2351,9 +2491,7 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
pci_disable_device(pdev);
pci_set_power_state(pdev, device_state);
- spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_release_tasks(hisi_hba);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
sas_suspend_ha(sha);
return 0;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index dda1a64ab89c..6615ad8754b8 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -435,6 +435,8 @@ struct ipr_error_table_t ipr_error_table[] = {
"4080: IOA exceeded maximum operating temperature"},
{0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
"4085: Service required"},
+ {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4086: SAS Adapter Hardware Configuration Error"},
{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
"3140: Device bus not ready to ready transition"},
{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index e3c8857741a1..bd6ac6b5980a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -291,7 +291,7 @@ static void ips_freescb(ips_ha_t *, ips_scb_t *);
static void ips_setup_funclist(ips_ha_t *);
static void ips_statinit(ips_ha_t *);
static void ips_statinit_memio(ips_ha_t *);
-static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time_t);
+static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time64_t);
static void ips_ffdc_reset(ips_ha_t *, int);
static void ips_ffdc_time(ips_ha_t *);
static uint32_t ips_statupd_copperhead(ips_ha_t *);
@@ -985,10 +985,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
/* FFDC */
if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) {
- struct timeval tv;
-
- do_gettimeofday(&tv);
- ha->last_ffdc = tv.tv_sec;
+ ha->last_ffdc = ktime_get_real_seconds();
ha->reset_count++;
ips_ffdc_reset(ha, IPS_INTR_IORL);
}
@@ -2392,7 +2389,6 @@ static int
ips_hainit(ips_ha_t * ha)
{
int i;
- struct timeval tv;
METHOD_TRACE("ips_hainit", 1);
@@ -2407,8 +2403,7 @@ ips_hainit(ips_ha_t * ha)
/* Send FFDC */
ha->reset_count = 1;
- do_gettimeofday(&tv);
- ha->last_ffdc = tv.tv_sec;
+ ha->last_ffdc = ktime_get_real_seconds();
ips_ffdc_reset(ha, IPS_INTR_IORL);
if (!ips_read_config(ha, IPS_INTR_IORL)) {
@@ -2548,12 +2543,9 @@ ips_next(ips_ha_t * ha, int intr)
if ((ha->subsys->param[3] & 0x300000)
&& (ha->scb_activelist.count == 0)) {
- struct timeval tv;
-
- do_gettimeofday(&tv);
-
- if (tv.tv_sec - ha->last_ffdc > IPS_SECS_8HOURS) {
- ha->last_ffdc = tv.tv_sec;
+ time64_t now = ktime_get_real_seconds();
+ if (now - ha->last_ffdc > IPS_SECS_8HOURS) {
+ ha->last_ffdc = now;
ips_ffdc_time(ha);
}
}
@@ -5988,59 +5980,21 @@ ips_ffdc_time(ips_ha_t * ha)
/* */
/****************************************************************************/
static void
-ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time_t current_time)
+ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time64_t current_time)
{
- long days;
- long rem;
- int i;
- int year;
- int yleap;
- int year_lengths[2] = { IPS_DAYS_NORMAL_YEAR, IPS_DAYS_LEAP_YEAR };
- int month_lengths[12][2] = { {31, 31},
- {28, 29},
- {31, 31},
- {30, 30},
- {31, 31},
- {30, 30},
- {31, 31},
- {31, 31},
- {30, 30},
- {31, 31},
- {30, 30},
- {31, 31}
- };
+ struct tm tm;
METHOD_TRACE("ips_fix_ffdc_time", 1);
- days = current_time / IPS_SECS_DAY;
- rem = current_time % IPS_SECS_DAY;
-
- scb->cmd.ffdc.hour = (rem / IPS_SECS_HOUR);
- rem = rem % IPS_SECS_HOUR;
- scb->cmd.ffdc.minute = (rem / IPS_SECS_MIN);
- scb->cmd.ffdc.second = (rem % IPS_SECS_MIN);
-
- year = IPS_EPOCH_YEAR;
- while (days < 0 || days >= year_lengths[yleap = IPS_IS_LEAP_YEAR(year)]) {
- int newy;
-
- newy = year + (days / IPS_DAYS_NORMAL_YEAR);
- if (days < 0)
- --newy;
- days -= (newy - year) * IPS_DAYS_NORMAL_YEAR +
- IPS_NUM_LEAP_YEARS_THROUGH(newy - 1) -
- IPS_NUM_LEAP_YEARS_THROUGH(year - 1);
- year = newy;
- }
-
- scb->cmd.ffdc.yearH = year / 100;
- scb->cmd.ffdc.yearL = year % 100;
-
- for (i = 0; days >= month_lengths[i][yleap]; ++i)
- days -= month_lengths[i][yleap];
+ time64_to_tm(current_time, 0, &tm);
- scb->cmd.ffdc.month = i + 1;
- scb->cmd.ffdc.day = days + 1;
+ scb->cmd.ffdc.hour = tm.tm_hour;
+ scb->cmd.ffdc.minute = tm.tm_min;
+ scb->cmd.ffdc.second = tm.tm_sec;
+ scb->cmd.ffdc.yearH = (tm.tm_year + 1900) / 100;
+ scb->cmd.ffdc.yearL = tm.tm_year % 100;
+ scb->cmd.ffdc.month = tm.tm_mon + 1;
+ scb->cmd.ffdc.day = tm.tm_mday;
}
/****************************************************************************
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 366be3b2f9b4..db546171e97f 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -402,16 +402,7 @@
#define IPS_BIOS_HEADER 0xC0
/* time oriented stuff */
- #define IPS_IS_LEAP_YEAR(y) (((y % 4 == 0) && ((y % 100 != 0) || (y % 400 == 0))) ? 1 : 0)
- #define IPS_NUM_LEAP_YEARS_THROUGH(y) ((y) / 4 - (y) / 100 + (y) / 400)
-
- #define IPS_SECS_MIN 60
- #define IPS_SECS_HOUR 3600
#define IPS_SECS_8HOURS 28800
- #define IPS_SECS_DAY 86400
- #define IPS_DAYS_NORMAL_YEAR 365
- #define IPS_DAYS_LEAP_YEAR 366
- #define IPS_EPOCH_YEAR 1970
/*
* Scsi_Host Template
@@ -1054,7 +1045,7 @@ typedef struct ips_ha {
uint8_t active;
int ioctl_reset; /* IOCTL Requested Reset Flag */
uint16_t reset_count; /* number of resets */
- time_t last_ffdc; /* last time we sent ffdc info*/
+ time64_t last_ffdc; /* last time we sent ffdc info*/
uint8_t slot_num; /* PCI Slot Number */
int ioctl_len; /* size of ioctl buffer */
dma_addr_t ioctl_busaddr; /* dma address of ioctl buffer*/
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 922e3e56c90d..05cf4daf8788 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -433,9 +433,6 @@ static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
(u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
- if (u->in_connection_align_insertion_frequency < 3)
- return SCI_FAILURE_INVALID_PARAMETER_VALUE;
-
if ((u->in_connection_align_insertion_frequency < 3) ||
(u->align_insertion_frequency == 0) ||
(u->notify_enable_spin_up_insertion_frequency == 0))
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 2ba4b68fdb73..b025a0b74341 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -962,7 +962,6 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
if (conn->datadgst_en)
sdev->request_queue->backing_dev_info->capabilities
|= BDI_CAP_STABLE_WRITES;
- blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
blk_queue_dma_alignment(sdev->request_queue, 0);
return 0;
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 0cc1567eacc1..ff1d612f6fb9 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -577,6 +577,11 @@ int sas_ata_init(struct domain_device *found_dev)
ata_sas_port_destroy(ap);
return rc;
}
+ rc = ata_sas_tport_add(found_dev->sata_dev.ata_host.dev, ap);
+ if (rc) {
+ ata_sas_port_destroy(ap);
+ return rc;
+ }
found_dev->sata_dev.ap = ap;
return 0;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index a0fa7ef3a071..1ffca28fe6a8 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -314,6 +314,7 @@ void sas_free_device(struct kref *kref)
kfree(dev->ex_dev.ex_phy);
if (dev_is_sata(dev) && dev->sata_dev.ap) {
+ ata_sas_tport_delete(dev->sata_dev.ap);
ata_sas_port_destroy(dev->sata_dev.ap);
dev->sata_dev.ap = NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 6c0d351c0d0d..20b249a649dd 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -64,8 +64,6 @@ struct lpfc_sli2_slim;
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
-#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
- queue depth change in millisecs */
#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
#define LPFC_MIN_TGT_QDEPTH 10
#define LPFC_MAX_TGT_QDEPTH 0xFFFF
@@ -784,6 +782,7 @@ struct lpfc_hba {
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
uint32_t cfg_nvme_io_channel;
+ uint32_t cfg_nvmet_mrq_post;
uint32_t cfg_nvmet_mrq;
uint32_t cfg_enable_nvmet;
uint32_t cfg_nvme_enable_fb;
@@ -922,12 +921,6 @@ struct lpfc_hba {
atomic_t fc4ScsiOutputRequests;
atomic_t fc4ScsiControlRequests;
atomic_t fc4ScsiIoCmpls;
- atomic_t fc4NvmeInputRequests;
- atomic_t fc4NvmeOutputRequests;
- atomic_t fc4NvmeControlRequests;
- atomic_t fc4NvmeIoCmpls;
- atomic_t fc4NvmeLsRequests;
- atomic_t fc4NvmeLsCmpls;
uint64_t bg_guard_err_cnt;
uint64_t bg_apptag_err_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2ac1d21c553f..729d343861f4 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -149,10 +149,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_rport *rport;
struct lpfc_nodelist *ndlp;
struct nvme_fc_remote_port *nrport;
- uint64_t data1, data2, data3, tot;
+ struct lpfc_nvme_ctrl_stat *cstat;
+ uint64_t data1, data2, data3;
+ uint64_t totin, totout, tot;
char *statep;
+ int i;
int len = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
@@ -293,6 +297,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
spin_lock_irq(shost->host_lock);
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
+ phba->brd_no,
+ phba->sli4_hba.max_cfg_param.max_xri,
+ phba->sli4_hba.nvme_xri_max,
+ phba->sli4_hba.scsi_xri_max,
+ lpfc_sli4_get_els_iocb_cnt(phba));
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -309,11 +320,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
localport->port_id, statep);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!ndlp->nrport)
+ rport = lpfc_ndlp_get_nrport(ndlp);
+ if (!rport)
continue;
/* local short-hand pointer. */
- nrport = ndlp->nrport->remoteport;
+ nrport = rport->remoteport;
+ if (!nrport)
+ continue;
/* Port state is only one of two values for now. */
switch (nrport->port_state) {
@@ -364,11 +378,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
}
spin_unlock_irq(shost->host_lock);
+ if (!lport)
+ return len;
+
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len,
"LS: Xmt %010x Cmpl %010x Abort %08x\n",
- atomic_read(&phba->fc4NvmeLsRequests),
- atomic_read(&phba->fc4NvmeLsCmpls),
+ atomic_read(&lport->fc4NvmeLsRequests),
+ atomic_read(&lport->fc4NvmeLsCmpls),
atomic_read(&lport->xmt_ls_abort));
len += snprintf(buf + len, PAGE_SIZE - len,
@@ -377,28 +394,33 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->cmpl_ls_xb),
atomic_read(&lport->cmpl_ls_err));
- tot = atomic_read(&phba->fc4NvmeIoCmpls);
- data1 = atomic_read(&phba->fc4NvmeInputRequests);
- data2 = atomic_read(&phba->fc4NvmeOutputRequests);
- data3 = atomic_read(&phba->fc4NvmeControlRequests);
+ totin = 0;
+ totout = 0;
+ for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+ cstat = &lport->cstat[i];
+ tot = atomic_read(&cstat->fc4NvmeIoCmpls);
+ totin += tot;
+ data1 = atomic_read(&cstat->fc4NvmeInputRequests);
+ data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
+ data3 = atomic_read(&cstat->fc4NvmeControlRequests);
+ totout += (data1 + data2 + data3);
+ }
len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP: Rd %016llx Wr %016llx IO %016llx\n",
- data1, data2, data3);
+ "Total FCP Cmpl %016llx Issue %016llx "
+ "OutIO %016llx\n",
+ totin, totout, totout - totin);
len += snprintf(buf+len, PAGE_SIZE-len,
- " noxri %08x nondlp %08x qdepth %08x "
- "wqerr %08x\n",
+ " abort %08x noxri %08x nondlp %08x qdepth %08x "
+ "wqerr %08x err %08x\n",
+ atomic_read(&lport->xmt_fcp_abort),
atomic_read(&lport->xmt_fcp_noxri),
atomic_read(&lport->xmt_fcp_bad_ndlp),
atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_err),
atomic_read(&lport->xmt_fcp_wqerr));
len += snprintf(buf + len, PAGE_SIZE - len,
- " Cmpl %016llx Outstanding %016llx Abort %08x\n",
- tot, ((data1 + data2 + data3) - tot),
- atomic_read(&lport->xmt_fcp_abort));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
"FCP CMPL: xb %08x Err %08x\n",
atomic_read(&lport->cmpl_fcp_xb),
atomic_read(&lport->cmpl_fcp_err));
@@ -3280,6 +3302,9 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
{
struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ struct lpfc_nvme_rport *rport;
+#endif
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
@@ -3289,8 +3314,9 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
#if (IS_ENABLED(CONFIG_NVME_FC))
- if (ndlp->nrport)
- nvme_fc_set_remoteport_devloss(ndlp->nrport->remoteport,
+ rport = lpfc_ndlp_get_nrport(ndlp);
+ if (rport)
+ nvme_fc_set_remoteport_devloss(rport->remoteport,
vport->cfg_devloss_tmo);
#endif
}
@@ -3414,6 +3440,15 @@ LPFC_ATTR_R(nvmet_mrq,
"Specify number of RQ pairs for processing NVMET cmds");
/*
+ * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post
+ * to each NVMET RQ. Range 64 to 2048, default is 512.
+ */
+LPFC_ATTR_R(nvmet_mrq_post,
+ LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
+ LPFC_NVMET_RQE_DEF_COUNT,
+ "Specify number of RQ buffers to initially post");
+
+/*
* lpfc_enable_fc4_type: Defines what FC4 types are supported.
* Supported Values: 1 - register just FCP
* 3 - register both FCP and NVME
@@ -3469,8 +3504,49 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
# tgt_queue_depth: This parameter is used to limit the number of outstanding
# commands per target port. Value range is [10,65535]. Default value is 65535.
*/
-LPFC_VPORT_ATTR_RW(tgt_queue_depth, 65535, 10, 65535,
- "Max number of FCP commands we can queue to a specific target port");
+static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
+module_param(lpfc_tgt_queue_depth, uint, 0444);
+MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
+lpfc_vport_param_show(tgt_queue_depth);
+lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
+ LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
+
+/**
+ * lpfc_tgt_queue_depth_store: Sets an attribute value.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Description: Sets the parameter to the new value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ */
+static int
+lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+
+ if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
+ return -EINVAL;
+
+ if (val == vport->cfg_tgt_queue_depth)
+ return 0;
+
+ spin_lock_irq(shost->host_lock);
+ vport->cfg_tgt_queue_depth = val;
+
+ /* Next loop thru nodelist and change cmd_qdepth */
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
+ ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+}
+
+lpfc_vport_param_store(tgt_queue_depth);
+static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
/*
# hba_queue_depth: This parameter is used to limit the number of outstanding
@@ -5302,6 +5378,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_suppress_rsp,
&dev_attr_lpfc_nvme_io_channel,
&dev_attr_lpfc_nvmet_mrq,
+ &dev_attr_lpfc_nvmet_mrq_post,
&dev_attr_lpfc_nvme_enable_fb,
&dev_attr_lpfc_nvmet_fb_size,
&dev_attr_lpfc_enable_bg,
@@ -6352,6 +6429,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
+ lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
/* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 0f174ca80f67..edb1a18a6414 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -3621,7 +3621,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
bsg_reply->result = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2937 SLI_CONFIG ext-buffer maibox command "
+ "2937 SLI_CONFIG ext-buffer mailbox command "
"(x%x/x%x) complete bsg job done, bsize:%d\n",
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType, size);
@@ -3632,7 +3632,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
- "2938 SLI_CONFIG ext-buffer maibox "
+ "2938 SLI_CONFIG ext-buffer mailbox "
"command (x%x/x%x) failure, rc:x%x\n",
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType, rc);
@@ -3666,7 +3666,7 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2939 SLI_CONFIG ext-buffer rd maibox command "
+ "2939 SLI_CONFIG ext-buffer rd mailbox command "
"complete, ctxState:x%x, mbxStatus:x%x\n",
phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
@@ -3706,7 +3706,7 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2940 SLI_CONFIG ext-buffer wr maibox command "
+ "2940 SLI_CONFIG ext-buffer wr mailbox command "
"complete, ctxState:x%x, mbxStatus:x%x\n",
phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
@@ -3988,12 +3988,12 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2947 Issued SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2948 Failed to issue SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
rc = -EPIPE;
job_error:
@@ -4147,12 +4147,12 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2955 Issued SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2956 Failed to issue SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
rc = -EPIPE;
goto job_error;
}
@@ -4492,12 +4492,12 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2969 Issued SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2970 Failed to issue SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
rc = -EPIPE;
goto job_error;
}
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 0617c8ea88c6..d4a200ae5a6f 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -471,6 +471,11 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
+ /* Don't assume the rport is always the previous
+ * FC4 type.
+ */
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
+
/* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@@ -691,6 +696,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fc_flag &= ~FC_RSCN_DEFERRED;
spin_unlock_irq(shost->host_lock);
+ /* This is a GID_FT completing so the gidft_inp counter was
+ * incremented before the GID_FT was issued to the wire.
+ */
+ vport->gidft_inp--;
+
/*
* Skip processing the NS response
* Re-issue the NS cmd
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index fb0dc2aeed91..9df0c051349f 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -544,7 +544,7 @@ static int
lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
{
int len = 0;
- int cnt;
+ int i, iocnt, outio, cnt;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
@@ -552,12 +552,15 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
struct nvme_fc_local_port *localport;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_remote_port *nrport;
+ struct lpfc_nvme_rport *rport;
cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
+ outio = 0;
len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ iocnt = 0;
if (!cnt) {
len += snprintf(buf+len, size-len,
"Missing Nodelist Entries\n");
@@ -585,9 +588,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
break;
case NLP_STE_UNMAPPED_NODE:
statep = "UNMAP ";
+ iocnt = 1;
break;
case NLP_STE_MAPPED_NODE:
statep = "MAPPED";
+ iocnt = 1;
break;
case NLP_STE_NPR_NODE:
statep = "NPR ";
@@ -614,8 +619,10 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
if (ndlp->nlp_type & NLP_FC_NODE)
len += snprintf(buf+len, size-len, "FC_NODE ");
- if (ndlp->nlp_type & NLP_FABRIC)
+ if (ndlp->nlp_type & NLP_FABRIC) {
len += snprintf(buf+len, size-len, "FABRIC ");
+ iocnt = 0;
+ }
if (ndlp->nlp_type & NLP_FCP_TARGET)
len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
ndlp->nlp_sid);
@@ -632,10 +639,20 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
ndlp->nlp_usg_map);
len += snprintf(buf+len, size-len, "refcnt:%x",
kref_read(&ndlp->kref));
+ if (iocnt) {
+ i = atomic_read(&ndlp->cmd_pending);
+ len += snprintf(buf + len, size - len,
+ " OutIO:x%x Qdepth x%x",
+ i, ndlp->cmd_qdepth);
+ outio += i;
+ }
len += snprintf(buf+len, size-len, "\n");
}
spin_unlock_irq(shost->host_lock);
+ len += snprintf(buf + len, size - len,
+ "\nOutstanding IO x%x\n", outio);
+
if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
len += snprintf(buf + len, size - len,
@@ -679,10 +696,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf + len, size - len, "\tRport List:\n");
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
- if (!ndlp->nrport)
+ rport = lpfc_ndlp_get_nrport(ndlp);
+ if (!rport)
continue;
- nrport = ndlp->nrport->remoteport;
+ nrport = rport->remoteport;
+ if (!nrport)
+ continue;
/* Port state is only one of two values for now. */
switch (nrport->port_state) {
@@ -751,10 +771,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_ctrl_stat *cstat;
struct lpfc_nvme_lport *lport;
- uint64_t tot, data1, data2, data3;
+ uint64_t data1, data2, data3;
+ uint64_t tot, totin, totout;
+ int cnt, i, maxch;
int len = 0;
- int cnt;
if (phba->nvmet_support) {
if (!phba->targetport)
@@ -880,33 +902,52 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return len;
+ localport = vport->localport;
+ if (!localport)
+ return len;
+ lport = (struct lpfc_nvme_lport *)localport->private;
+ if (!lport)
+ return len;
+
len += snprintf(buf + len, size - len,
"\nNVME Lport Statistics\n");
len += snprintf(buf + len, size - len,
"LS: Xmt %016x Cmpl %016x\n",
- atomic_read(&phba->fc4NvmeLsRequests),
- atomic_read(&phba->fc4NvmeLsCmpls));
-
- tot = atomic_read(&phba->fc4NvmeIoCmpls);
- data1 = atomic_read(&phba->fc4NvmeInputRequests);
- data2 = atomic_read(&phba->fc4NvmeOutputRequests);
- data3 = atomic_read(&phba->fc4NvmeControlRequests);
+ atomic_read(&lport->fc4NvmeLsRequests),
+ atomic_read(&lport->fc4NvmeLsCmpls));
- len += snprintf(buf + len, size - len,
- "FCP: Rd %016llx Wr %016llx IO %016llx\n",
- data1, data2, data3);
-
- len += snprintf(buf + len, size - len,
- " Cmpl %016llx Outstanding %016llx\n",
- tot, (data1 + data2 + data3) - tot);
+ if (phba->cfg_nvme_io_channel < 32)
+ maxch = phba->cfg_nvme_io_channel;
+ else
+ maxch = 32;
+ totin = 0;
+ totout = 0;
+ for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+ cstat = &lport->cstat[i];
+ tot = atomic_read(&cstat->fc4NvmeIoCmpls);
+ totin += tot;
+ data1 = atomic_read(&cstat->fc4NvmeInputRequests);
+ data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
+ data3 = atomic_read(&cstat->fc4NvmeControlRequests);
+ totout += (data1 + data2 + data3);
+
+ /* Limit to 32, debugfs display buffer limitation */
+ if (i >= 32)
+ continue;
- localport = vport->localport;
- if (!localport)
- return len;
- lport = (struct lpfc_nvme_lport *)localport->private;
- if (!lport)
- return len;
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "FCP (%d): Rd %016llx Wr %016llx "
+ "IO %016llx ",
+ i, data1, data2, data3);
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Cmpl %016llx OutIO %016llx\n",
+ tot, ((data1 + data2 + data3) - tot));
+ }
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Total FCP Cmpl %016llx Issue %016llx "
+ "OutIO %016llx\n",
+ totin, totout, totout - totin);
len += snprintf(buf + len, size - len,
"LS Xmt Err: Abrt %08x Err %08x "
@@ -918,11 +959,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf + len, size - len,
"FCP Xmt Err: noxri %06x nondlp %06x "
- "qdepth %06x wqerr %06x Abrt %06x\n",
+ "qdepth %06x wqerr %06x err %06x Abrt %06x\n",
atomic_read(&lport->xmt_fcp_noxri),
atomic_read(&lport->xmt_fcp_bad_ndlp),
atomic_read(&lport->xmt_fcp_qdepth),
atomic_read(&lport->xmt_fcp_wqerr),
+ atomic_read(&lport->xmt_fcp_err),
atomic_read(&lport->xmt_fcp_abort));
len += snprintf(buf + len, size - len,
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 74895e62aaea..6d84a10fef07 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -6268,7 +6268,6 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
* flush the RSCN. Otherwise, the outstanding requests
* need to complete.
*/
- vport->gidft_inp = 0;
if (lpfc_issue_gidft(vport) > 0)
return 1;
} else {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 3e7712cd6c9a..2fef54fab86d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -708,8 +708,7 @@ lpfc_work_done(struct lpfc_hba *phba)
HA_RXMASK));
}
}
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (!list_empty(&pring->txq)))
+ if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_drain_txq(phba);
/*
* Turn on Ring interrupts
@@ -3876,10 +3875,6 @@ int
lpfc_issue_gidft(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_nodelist *ndlp;
-
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
- ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
/* Good status, issue CT Request to NameServer */
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 98b80559c215..f43f0bacb77a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -566,6 +566,7 @@ struct lpfc_register {
/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
#define LPFC_SLI_INTF 0x0058
+#define LPFC_SLI_ASIC_VER 0x009C
#define LPFC_CTL_PORT_SEM_OFFSET 0x400
#define lpfc_port_smphr_perr_SHIFT 31
@@ -3912,6 +3913,7 @@ struct lpfc_acqe_link {
#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
+#define LPFC_ASYNC_LINK_FAULT_LR_LRR 0x3
#define lpfc_acqe_logical_link_speed_SHIFT 16
#define lpfc_acqe_logical_link_speed_MASK 0x0000FFFF
#define lpfc_acqe_logical_link_speed_WORD word1
@@ -4616,6 +4618,9 @@ union lpfc_wqe128 {
struct send_frame_wqe send_frame;
};
+#define MAGIC_NUMER_G6 0xFEAA0003
+#define MAGIC_NUMER_G7 0xFEAA0005
+
struct lpfc_grp_hdr {
uint32_t size;
uint32_t magic_number;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7887468c71b4..7ae343b14630 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -1266,6 +1266,9 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
uint64_t tot, data1, data2, data3;
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_register reg_data;
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_ctrl_stat *cstat;
void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
vports = lpfc_create_vport_work_array(phba);
@@ -1299,14 +1302,25 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
tot += atomic_read(&tgtp->xmt_fcp_release);
tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
} else {
- tot = atomic_read(&phba->fc4NvmeIoCmpls);
- data1 = atomic_read(
- &phba->fc4NvmeInputRequests);
- data2 = atomic_read(
- &phba->fc4NvmeOutputRequests);
- data3 = atomic_read(
- &phba->fc4NvmeControlRequests);
- tot = (data1 + data2 + data3) - tot;
+ localport = phba->pport->localport;
+ if (!localport || !localport->private)
+ goto skip_eqdelay;
+ lport = (struct lpfc_nvme_lport *)
+ localport->private;
+ tot = 0;
+ for (i = 0;
+ i < phba->cfg_nvme_io_channel; i++) {
+ cstat = &lport->cstat[i];
+ data1 = atomic_read(
+ &cstat->fc4NvmeInputRequests);
+ data2 = atomic_read(
+ &cstat->fc4NvmeOutputRequests);
+ data3 = atomic_read(
+ &cstat->fc4NvmeControlRequests);
+ tot += (data1 + data2 + data3);
+ tot -= atomic_read(
+ &cstat->fc4NvmeIoCmpls);
+ }
}
}
@@ -4265,32 +4279,24 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
* @phba: pointer to lpfc hba data structure.
* @acqe_link: pointer to the async link completion queue entry.
*
- * This routine is to parse the SLI4 link-attention link fault code and
- * translate it into the base driver's read link attention mailbox command
- * status.
- *
- * Return: Link-attention status in terms of base driver's coding.
+ * This routine is to parse the SLI4 link-attention link fault code.
**/
-static uint16_t
+static void
lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
struct lpfc_acqe_link *acqe_link)
{
- uint16_t latt_fault;
-
switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
case LPFC_ASYNC_LINK_FAULT_NONE:
case LPFC_ASYNC_LINK_FAULT_LOCAL:
case LPFC_ASYNC_LINK_FAULT_REMOTE:
- latt_fault = 0;
+ case LPFC_ASYNC_LINK_FAULT_LR_LRR:
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0398 Invalid link fault code: x%x\n",
+ "0398 Unknown link fault code: x%x\n",
bf_get(lpfc_acqe_link_fault, acqe_link));
- latt_fault = MBXERR_ERROR;
break;
}
- return latt_fault;
}
/**
@@ -4565,9 +4571,12 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
* the READ_TOPOLOGY completion routine to continue without actually
* sending the READ_TOPOLOGY mailbox command to the port.
*/
- /* Parse and translate status field */
+ /* Initialize completion status */
mb = &pmb->u.mb;
- mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
+ mb->mbxStatus = MBX_SUCCESS;
+
+ /* Parse port fault information field */
+ lpfc_sli4_parse_latt_fault(phba, acqe_link);
/* Parse and translate link attention fields */
la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
@@ -4695,10 +4704,12 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
break;
}
- /* Parse and translate status field */
+ /* Initialize completion status */
mb = &pmb->u.mb;
- mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
- (void *)acqe_fc);
+ mb->mbxStatus = MBX_SUCCESS;
+
+ /* Parse port fault information field */
+ lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
/* Parse and translate link attention fields */
la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
@@ -5103,7 +5114,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
LOG_DISCOVERY,
- "2772 Issue FCF rediscover mabilbox "
+ "2772 Issue FCF rediscover mailbox "
"command failed, fail through to FCF "
"dead event\n");
spin_lock_irq(&phba->hbalock);
@@ -5195,7 +5206,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
LOG_DISCOVERY,
"2774 Issue FCF rediscover "
- "mabilbox command failed, "
+ "mailbox command failed, "
"through to CVL event\n");
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
@@ -5839,6 +5850,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
int fof_vectors = 0;
int extra;
uint64_t wwn;
+ u32 if_type;
+ u32 if_fam;
phba->sli4_hba.num_online_cpu = num_online_cpus();
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
@@ -6160,15 +6173,28 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
rc = lpfc_get_sli4_parameters(phba, mboxq);
if (rc) {
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if_fam = bf_get(lpfc_sli_intf_sli_family,
+ &phba->sli4_hba.sli_intf);
if (phba->sli4_hba.extents_in_use &&
phba->sli4_hba.rpi_hdrs_in_use) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2999 Unsupported SLI4 Parameters "
"Extents and RPI headers enabled.\n");
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
+ if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -EIO;
+ goto out_free_bsmbx;
+ }
+ }
+ if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
+ if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -EIO;
+ goto out_free_bsmbx;
}
- mempool_free(mboxq, phba->mbox_mem_pool);
- rc = -EIO;
- goto out_free_bsmbx;
}
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -6406,8 +6432,11 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
return error;
}
- /* workqueue for deferred irq use */
- phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ else
+ phba->wq = NULL;
return 0;
}
@@ -6430,7 +6459,8 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
}
/* Stop kernel worker thread */
- kthread_stop(phba->worker_thread);
+ if (phba->worker_thread)
+ kthread_stop(phba->worker_thread);
}
/**
@@ -6895,12 +6925,6 @@ lpfc_create_shost(struct lpfc_hba *phba)
atomic_set(&phba->fc4ScsiOutputRequests, 0);
atomic_set(&phba->fc4ScsiControlRequests, 0);
atomic_set(&phba->fc4ScsiIoCmpls, 0);
- atomic_set(&phba->fc4NvmeInputRequests, 0);
- atomic_set(&phba->fc4NvmeOutputRequests, 0);
- atomic_set(&phba->fc4NvmeControlRequests, 0);
- atomic_set(&phba->fc4NvmeIoCmpls, 0);
- atomic_set(&phba->fc4NvmeLsRequests, 0);
- atomic_set(&phba->fc4NvmeLsCmpls, 0);
vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
if (!vport)
return -ENODEV;
@@ -7781,6 +7805,40 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.max_wq,
phba->sli4_hba.max_cfg_param.max_rq);
+ /*
+ * Calculate NVME queue resources based on how
+ * many WQ/CQs are available.
+ */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ length = phba->sli4_hba.max_cfg_param.max_wq;
+ if (phba->sli4_hba.max_cfg_param.max_cq <
+ phba->sli4_hba.max_cfg_param.max_wq)
+ length = phba->sli4_hba.max_cfg_param.max_cq;
+
+ /*
+ * Whats left after this can go toward NVME.
+ * The minus 6 accounts for ELS, NVME LS, MBOX
+ * fof plus a couple extra. When configured for
+ * NVMET, FCP io channel WQs are not created.
+ */
+ length -= 6;
+ if (!phba->nvmet_support)
+ length -= phba->cfg_fcp_io_channel;
+
+ if (phba->cfg_nvme_io_channel > length) {
+ lpfc_printf_log(
+ phba, KERN_ERR, LOG_SLI,
+ "2005 Reducing NVME IO channel to %d: "
+ "WQ %d CQ %d NVMEIO %d FCPIO %d\n",
+ length,
+ phba->sli4_hba.max_cfg_param.max_wq,
+ phba->sli4_hba.max_cfg_param.max_cq,
+ phba->cfg_nvme_io_channel,
+ phba->cfg_fcp_io_channel);
+
+ phba->cfg_nvme_io_channel = length;
+ }
+ }
}
if (rc)
@@ -10533,6 +10591,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_pc_sli4_params *sli4_params;
uint32_t mbox_tmo;
int length;
+ bool exp_wqcq_pages = true;
struct lpfc_sli4_parameters *mbx_sli4_parameters;
/*
@@ -10659,8 +10718,15 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->nvme_support, phba->nvme_embed_pbde,
phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2) &&
+ (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_FAMILY_LNCR_A0))
+ exp_wqcq_pages = false;
+
if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
(bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
+ exp_wqcq_pages &&
(sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
phba->enab_exp_wqcq_pages = 1;
else
@@ -11322,7 +11388,11 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
const struct firmware *fw)
{
- if (offset == ADD_STATUS_FW_NOT_SUPPORTED)
+ if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
+ (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+ magic_number != MAGIC_NUMER_G6) ||
+ (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
+ magic_number != MAGIC_NUMER_G7))
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3030 This firmware version is not supported on "
"this HBA model. Device:%x Magic:%x Type:%x "
@@ -11719,6 +11789,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
lpfc_nvme_free(phba);
lpfc_free_iocb_list(phba);
+ lpfc_unset_driver_resource_phase2(phba);
lpfc_sli4_driver_resource_unset(phba);
/* Unmap adapter Control and Doorbell registers */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 022060636ae1..1a803975bcbc 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1936,31 +1936,14 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto out;
}
- /* When the rport rejected the FCP PRLI as unsupported.
- * This should only happen in Pt2Pt so an NVME PRLI
- * should be outstanding still.
- */
- if (npr && ndlp->nlp_flag & NLP_FCP_PRLI_RJT) {
+ /* Adjust the nlp_type accordingly if the PRLI failed */
+ if (npr)
ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
- goto out_err;
- }
-
- /* The LS Req had some error. Don't let this be a
- * target.
- */
- if ((ndlp->fc4_prli_sent == 1) &&
- (ndlp->nlp_state == NLP_STE_PRLI_ISSUE) &&
- (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_FCP_INITIATOR)))
- /* The FCP PRLI completed successfully but
- * the NVME PRLI failed. Since they are sent in
- * succession, allow the FCP to complete.
- */
- goto out_err;
+ if (nvpr)
+ ndlp->nlp_fc4_type &= ~NLP_FC4_NVME;
- ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- ndlp->nlp_type |= NLP_FCP_INITIATOR;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- return ndlp->nlp_state;
+ /* We can't set the DSM state till BOTH PRLIs complete */
+ goto out_err;
}
if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
@@ -1999,6 +1982,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_disc, nvpr))
ndlp->nlp_type |= NLP_NVME_DISCOVERY;
+ /* This node is an NVME target. Adjust the command
+ * queue depth on this node to not exceed the available
+ * xris.
+ */
+ ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max;
+
/*
* If prli_fba is set, the Target supports FirstBurst.
* If prli_fb_sz is 0, the FirstBurst size is unlimited,
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 378dca40ca20..76a5a99605aa 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -334,7 +334,14 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
"6146 remoteport delete of remoteport %p\n",
remoteport);
spin_lock_irq(&vport->phba->hbalock);
- ndlp->nrport = NULL;
+
+ /* The register rebind might have occurred before the delete
+ * downcall. Guard against this race.
+ */
+ if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
+ ndlp->nrport = NULL;
+ ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
+ }
spin_unlock_irq(&vport->phba->hbalock);
/* Remove original register reference. The host transport
@@ -357,15 +364,19 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_dmabuf *buf_ptr;
struct lpfc_nodelist *ndlp;
- atomic_inc(&vport->phba->fc4NvmeLsCmpls);
-
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
- if (status) {
+
+ if (vport->localport) {
lport = (struct lpfc_nvme_lport *)vport->localport->private;
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
- atomic_inc(&lport->cmpl_ls_xb);
- atomic_inc(&lport->cmpl_ls_err);
+ if (lport) {
+ atomic_inc(&lport->fc4NvmeLsCmpls);
+ if (status) {
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_ls_xb);
+ atomic_inc(&lport->cmpl_ls_err);
+ }
+ }
}
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
@@ -570,6 +581,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ if (unlikely(!lport) || unlikely(!rport))
+ return -EINVAL;
+
vport = lport->vport;
if (vport->load_flag & FC_UNLOADING)
@@ -639,7 +653,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
- atomic_inc(&vport->phba->fc4NvmeLsRequests);
+ atomic_inc(&lport->fc4NvmeLsRequests);
/* Hardcode the wait to 30 seconds. Connections are failing otherwise.
* This code allows it all to work.
@@ -690,6 +704,8 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_iocbq *wqe, *next_wqe;
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ if (unlikely(!lport))
+ return;
vport = lport->vport;
phba = vport->phba;
@@ -949,28 +965,48 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_nodelist *ndlp;
struct lpfc_nvme_fcpreq_priv *freqpriv;
struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_ctrl_stat *cstat;
unsigned long flags;
- uint32_t code, status;
+ uint32_t code, status, idx;
uint16_t cid, sqhd, data;
uint32_t *ptr;
/* Sanity check on return of outstanding command */
if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
+ if (!lpfc_ncmd) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_NODE | LOG_NVME_IOERR,
+ "6071 Null lpfc_ncmd pointer. No "
+ "release, skip completion\n");
+ return;
+ }
+
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6071 Completion pointers bad on wqe %p.\n",
- wcqe);
+ "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
+ "nvmeCmd %p nrport %p\n",
+ lpfc_ncmd, lpfc_ncmd->nvmeCmd,
+ lpfc_ncmd->nrport);
+
+ /* Release the lpfc_ncmd regardless of the missing elements. */
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
- atomic_inc(&phba->fc4NvmeIoCmpls);
-
nCmd = lpfc_ncmd->nvmeCmd;
rport = lpfc_ncmd->nrport;
status = bf_get(lpfc_wcqe_c_status, wcqe);
- if (status) {
+
+ if (vport->localport) {
lport = (struct lpfc_nvme_lport *)vport->localport->private;
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
- atomic_inc(&lport->cmpl_fcp_xb);
- atomic_inc(&lport->cmpl_fcp_err);
+ if (lport) {
+ idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
+ cstat = &lport->cstat[idx];
+ atomic_inc(&cstat->fc4NvmeIoCmpls);
+ if (status) {
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_fcp_xb);
+ atomic_inc(&lport->cmpl_fcp_err);
+ }
+ }
}
lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
@@ -1163,7 +1199,8 @@ out_err:
static int
lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
struct lpfc_nvme_buf *lpfc_ncmd,
- struct lpfc_nodelist *pnode)
+ struct lpfc_nodelist *pnode,
+ struct lpfc_nvme_ctrl_stat *cstat)
{
struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
@@ -1201,7 +1238,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
} else {
wqe->fcp_iwrite.initial_xfer_len = 0;
}
- atomic_inc(&phba->fc4NvmeOutputRequests);
+ atomic_inc(&cstat->fc4NvmeOutputRequests);
} else {
/* From the iread template, initialize words 7 - 11 */
memcpy(&wqe->words[7],
@@ -1214,13 +1251,13 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 5 */
wqe->fcp_iread.rsrvd5 = 0;
- atomic_inc(&phba->fc4NvmeInputRequests);
+ atomic_inc(&cstat->fc4NvmeInputRequests);
}
} else {
/* From the icmnd template, initialize words 4 - 11 */
memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
sizeof(uint32_t) * 8);
- atomic_inc(&phba->fc4NvmeControlRequests);
+ atomic_inc(&cstat->fc4NvmeControlRequests);
}
/*
* Finish initializing those WQE fields that are independent
@@ -1400,7 +1437,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
{
int ret = 0;
int expedite = 0;
+ int idx;
struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_ctrl_stat *cstat;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
struct lpfc_nodelist *ndlp;
@@ -1425,9 +1464,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
if (unlikely(!hw_queue_handle)) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
- "6129 Fail Abort, NULL hw_queue_handle\n");
- ret = -EINVAL;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6117 Fail IO, NULL hw_queue_handle\n");
+ atomic_inc(&lport->xmt_fcp_err);
+ ret = -EBUSY;
goto out_fail;
}
@@ -1439,12 +1479,18 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
}
if (vport->load_flag & FC_UNLOADING) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6124 Fail IO, Driver unload\n");
+ atomic_inc(&lport->xmt_fcp_err);
ret = -ENODEV;
goto out_fail;
}
freqpriv = pnvme_fcreq->private;
if (unlikely(!freqpriv)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6158 Fail IO, NULL request data\n");
+ atomic_inc(&lport->xmt_fcp_err);
ret = -EINVAL;
goto out_fail;
}
@@ -1462,32 +1508,26 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
*/
ndlp = rport->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6053 rport %p, ndlp %p, DID x%06x "
- "ndlp not ready.\n",
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
+ "6053 Fail IO, ndlp not ready: rport %p "
+ "ndlp %p, DID x%06x\n",
rport, ndlp, pnvme_rport->port_id);
-
- ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
- if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
- "6066 Missing node for DID %x\n",
- pnvme_rport->port_id);
- atomic_inc(&lport->xmt_fcp_bad_ndlp);
- ret = -ENODEV;
- goto out_fail;
- }
+ atomic_inc(&lport->xmt_fcp_err);
+ ret = -EBUSY;
+ goto out_fail;
}
/* The remote node has to be a mapped target or it's an error. */
if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6036 rport %p, DID x%06x not ready for "
- "IO. State x%x, Type x%x\n",
- rport, pnvme_rport->port_id,
- ndlp->nlp_state, ndlp->nlp_type);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
+ "6036 Fail IO, DID x%06x not ready for "
+ "IO. State x%x, Type x%x Flg x%x\n",
+ pnvme_rport->port_id,
+ ndlp->nlp_state, ndlp->nlp_type,
+ ndlp->upcall_flags);
atomic_inc(&lport->xmt_fcp_bad_ndlp);
- ret = -ENODEV;
+ ret = -EBUSY;
goto out_fail;
}
@@ -1508,6 +1548,12 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
*/
if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
!expedite) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6174 Fail IO, ndlp qdepth exceeded: "
+ "idx %d DID %x pend %d qdepth %d\n",
+ lpfc_queue_info->index, ndlp->nlp_DID,
+ atomic_read(&ndlp->cmd_pending),
+ ndlp->cmd_qdepth);
atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY;
goto out_fail;
@@ -1517,8 +1563,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
if (lpfc_ncmd == NULL) {
atomic_inc(&lport->xmt_fcp_noxri);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
- "6065 driver's buffer pool is empty, "
- "IO failed\n");
+ "6065 Fail IO, driver buffer pool is empty: "
+ "idx %d DID %x\n",
+ lpfc_queue_info->index, ndlp->nlp_DID);
ret = -EBUSY;
goto out_fail;
}
@@ -1543,15 +1590,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->start_time = jiffies;
- lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
- ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
- if (ret) {
- ret = -ENOMEM;
- goto out_free_nvme_buf;
- }
-
- atomic_inc(&ndlp->cmd_pending);
-
/*
* Issue the IO on the WQ indicated by index in the hw_queue_handle.
* This identfier was create in our hardware queue create callback
@@ -1560,7 +1598,23 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* index to use and that they have affinitized a CPU to this hardware
* queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
*/
- lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index;
+ idx = lpfc_queue_info->index;
+ lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
+ cstat = &lport->cstat[idx];
+
+ lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
+ ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
+ if (ret) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6175 Fail IO, Prep DMA: "
+ "idx %d DID %x\n",
+ lpfc_queue_info->index, ndlp->nlp_DID);
+ atomic_inc(&lport->xmt_fcp_err);
+ ret = -ENOMEM;
+ goto out_free_nvme_buf;
+ }
+
+ atomic_inc(&ndlp->cmd_pending);
lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
@@ -1571,7 +1625,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
atomic_inc(&lport->xmt_fcp_wqerr);
atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
- "6113 FCP could not issue WQE err %x "
+ "6113 Fail IO, Could not issue WQE err %x "
"sid: x%x did: x%x oxid: x%x\n",
ret, vport->fc_myDID, ndlp->nlp_DID,
lpfc_ncmd->cur_iocbq.sli4_xritag);
@@ -1605,11 +1659,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
out_free_nvme_buf:
if (lpfc_ncmd->nvmeCmd->sg_cnt) {
if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
- atomic_dec(&phba->fc4NvmeOutputRequests);
+ atomic_dec(&cstat->fc4NvmeOutputRequests);
else
- atomic_dec(&phba->fc4NvmeInputRequests);
+ atomic_dec(&cstat->fc4NvmeInputRequests);
} else
- atomic_dec(&phba->fc4NvmeControlRequests);
+ atomic_dec(&cstat->fc4NvmeControlRequests);
lpfc_release_nvme_buf(phba, lpfc_ncmd);
out_fail:
return ret;
@@ -2390,7 +2444,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
struct nvme_fc_port_info nfcp_info;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
- int len;
+ struct lpfc_nvme_ctrl_stat *cstat;
+ int len, i;
/* Initialize this localport instance. The vport wwn usage ensures
* that NPIV is accounted for.
@@ -2414,6 +2469,11 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
+ cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
+ phba->cfg_nvme_io_channel), GFP_KERNEL);
+ if (!cstat)
+ return -ENOMEM;
+
/* localport is allocated from the stack, but the registration
* call allocates heap memory as well as the private area.
*/
@@ -2436,11 +2496,13 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private;
vport->localport = localport;
lport->vport = vport;
+ lport->cstat = cstat;
vport->nvmei_support = 1;
atomic_set(&lport->xmt_fcp_noxri, 0);
atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
atomic_set(&lport->xmt_fcp_qdepth, 0);
+ atomic_set(&lport->xmt_fcp_err, 0);
atomic_set(&lport->xmt_fcp_wqerr, 0);
atomic_set(&lport->xmt_fcp_abort, 0);
atomic_set(&lport->xmt_ls_abort, 0);
@@ -2449,6 +2511,16 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
atomic_set(&lport->cmpl_fcp_err, 0);
atomic_set(&lport->cmpl_ls_xb, 0);
atomic_set(&lport->cmpl_ls_err, 0);
+ atomic_set(&lport->fc4NvmeLsRequests, 0);
+ atomic_set(&lport->fc4NvmeLsCmpls, 0);
+
+ for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+ cstat = &lport->cstat[i];
+ atomic_set(&cstat->fc4NvmeInputRequests, 0);
+ atomic_set(&cstat->fc4NvmeOutputRequests, 0);
+ atomic_set(&cstat->fc4NvmeControlRequests, 0);
+ atomic_set(&cstat->fc4NvmeIoCmpls, 0);
+ }
/* Don't post more new bufs if repost already recovered
* the nvme sgls.
@@ -2458,6 +2530,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
phba->sli4_hba.nvme_xri_max);
vport->phba->total_nvme_bufs += len;
}
+ } else {
+ kfree(cstat);
}
return ret;
@@ -2520,6 +2594,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_ctrl_stat *cstat;
int ret;
if (vport->nvmei_support == 0)
@@ -2528,6 +2603,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
localport = vport->localport;
vport->localport = NULL;
lport = (struct lpfc_nvme_lport *)localport->private;
+ cstat = lport->cstat;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6011 Destroying NVME localport %p\n",
@@ -2543,6 +2619,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
* indefinitely or succeeds
*/
lpfc_nvme_lport_unreg_wait(vport, lport);
+ kfree(cstat);
/* Regardless of the unregister upcall response, clear
* nvmei_support. All rports are unregistered and the
@@ -2607,6 +2684,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
+ struct lpfc_nvme_rport *oldrport;
struct nvme_fc_remote_port *remote_port;
struct nvme_fc_port_info rpinfo;
struct lpfc_nodelist *prev_ndlp;
@@ -2639,7 +2717,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
- if (!ndlp->nrport)
+
+ oldrport = lpfc_ndlp_get_nrport(ndlp);
+ if (!oldrport)
lpfc_nlp_get(ndlp);
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
@@ -2648,9 +2728,15 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* a resume of the existing rport. Else this is a
* new rport.
*/
+ /* Guard against an unregister/reregister
+ * race that leaves the WAIT flag set.
+ */
+ spin_lock_irq(&vport->phba->hbalock);
+ ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
+ spin_unlock_irq(&vport->phba->hbalock);
rport = remote_port->private;
- if (ndlp->nrport) {
- if (ndlp->nrport == remote_port->private) {
+ if (oldrport) {
+ if (oldrport == remote_port->private) {
/* Same remoteport. Just reuse. */
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
@@ -2674,11 +2760,20 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
+ ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
spin_unlock_irq(&vport->phba->hbalock);
rport->ndlp = NULL;
rport->remoteport = NULL;
- if (prev_ndlp)
- lpfc_nlp_put(ndlp);
+
+ /* Reference only removed if previous NDLP is no longer
+ * active. It might be just a swap and removing the
+ * reference would cause a premature cleanup.
+ */
+ if (prev_ndlp && prev_ndlp != ndlp) {
+ if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
+ (!prev_ndlp->nrport))
+ lpfc_nlp_put(prev_ndlp);
+ }
}
/* Clean bind the rport to the ndlp. */
@@ -2746,7 +2841,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!lport)
goto input_err;
- rport = ndlp->nrport;
+ rport = lpfc_ndlp_get_nrport(ndlp);
if (!rport)
goto input_err;
@@ -2767,6 +2862,15 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* The transport will update it.
*/
ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
+
+ /* Don't let the host nvme transport keep sending keep-alives
+ * on this remoteport. Vport is unloading, no recovery. The
+ * return values is ignored. The upcall is a courtesy to the
+ * transport.
+ */
+ if (vport->load_flag & FC_UNLOADING)
+ (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
+
ret = nvme_fc_unregister_remoteport(remoteport);
if (ret != 0) {
lpfc_nlp_put(ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 9216653e0441..04bd463dd043 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -30,21 +30,36 @@
#define LPFC_NVME_FB_SHIFT 9
#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
+#define lpfc_ndlp_get_nrport(ndlp) \
+ ((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \
+ ? NULL : ndlp->nrport)
+
struct lpfc_nvme_qhandle {
uint32_t index; /* WQ index to use */
uint32_t qidx; /* queue index passed to create */
uint32_t cpu_id; /* current cpu id at time of create */
};
+struct lpfc_nvme_ctrl_stat {
+ atomic_t fc4NvmeInputRequests;
+ atomic_t fc4NvmeOutputRequests;
+ atomic_t fc4NvmeControlRequests;
+ atomic_t fc4NvmeIoCmpls;
+};
+
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
struct completion lport_unreg_done;
/* Add stats counters here */
+ struct lpfc_nvme_ctrl_stat *cstat;
+ atomic_t fc4NvmeLsRequests;
+ atomic_t fc4NvmeLsCmpls;
atomic_t xmt_fcp_noxri;
atomic_t xmt_fcp_bad_ndlp;
atomic_t xmt_fcp_qdepth;
atomic_t xmt_fcp_wqerr;
+ atomic_t xmt_fcp_err;
atomic_t xmt_fcp_abort;
atomic_t xmt_ls_abort;
atomic_t xmt_ls_err;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index c1bcef3f103c..81f520abfd64 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -22,8 +22,10 @@
********************************************************************/
#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
-#define LPFC_NVMET_RQE_DEF_COUNT 512
-#define LPFC_NVMET_SUCCESS_LEN 12
+#define LPFC_NVMET_RQE_MIN_POST 128
+#define LPFC_NVMET_RQE_DEF_POST 512
+#define LPFC_NVMET_RQE_DEF_COUNT 2048
+#define LPFC_NVMET_SUCCESS_LEN 12
#define LPFC_NVMET_MRQ_OFF 0xffff
#define LPFC_NVMET_MRQ_AUTO 0
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 050f04418f5f..a94fb9f8bb44 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -1021,7 +1021,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (lpfc_test_rrq_active(phba, ndlp,
lpfc_cmd->cur_iocbq.sli4_lxritag))
continue;
- list_del(&lpfc_cmd->list);
+ list_del_init(&lpfc_cmd->list);
found = 1;
break;
}
@@ -1036,7 +1036,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (lpfc_test_rrq_active(
phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
continue;
- list_del(&lpfc_cmd->list);
+ list_del_init(&lpfc_cmd->list);
found = 1;
break;
}
@@ -3983,9 +3983,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
#endif
- if (pnode && NLP_CHK_NODE_ACT(pnode))
- atomic_dec(&pnode->cmd_pending);
-
if (lpfc_cmd->status) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
(lpfc_cmd->result & IOERR_DRVR_MASK))
@@ -4125,6 +4122,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(shost->host_lock, flags);
if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ atomic_dec(&pnode->cmd_pending);
if (pnode->cmd_qdepth >
atomic_read(&pnode->cmd_pending) &&
(atomic_read(&pnode->cmd_pending) >
@@ -4138,16 +4136,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
spin_unlock_irqrestore(shost->host_lock, flags);
} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
- if ((pnode->cmd_qdepth != vport->cfg_tgt_queue_depth) &&
- time_after(jiffies, pnode->last_change_time +
- msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
- spin_lock_irqsave(shost->host_lock, flags);
- pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
- pnode->last_change_time = jiffies;
- spin_unlock_irqrestore(shost->host_lock, flags);
- }
+ atomic_dec(&pnode->cmd_pending);
}
-
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
spin_lock_irqsave(&phba->hbalock, flags);
@@ -4591,6 +4581,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
ndlp->nlp_portname.u.wwn[7]);
goto out_tgt_busy;
}
+ atomic_inc(&ndlp->cmd_pending);
+
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
if (lpfc_cmd == NULL) {
lpfc_rampdown_queue_depth(phba);
@@ -4643,11 +4635,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
- atomic_inc(&ndlp->cmd_pending);
err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err) {
- atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"3376 FCP could not issue IOCB err %x"
"FCP cmd x%x <%d/%llu> "
@@ -4691,6 +4681,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
+ atomic_dec(&ndlp->cmd_pending);
return SCSI_MLQUEUE_HOST_BUSY;
out_tgt_busy:
@@ -4725,7 +4716,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
int ret = SUCCESS, status = 0;
struct lpfc_sli_ring *pring_s4;
int ret_val;
- unsigned long flags, iflags;
+ unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
status = fc_block_scsi_eh(cmnd);
@@ -4825,16 +4816,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
abtsiocb->vport = vport;
if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = lpfc_sli4_calc_ring(phba, iocb);
+ pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb);
if (pring_s4 == NULL) {
ret = FAILED;
goto out_unlock;
}
/* Note: both hbalock and ring_lock must be set here */
- spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+ spin_lock(&pring_s4->ring_lock);
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
abtsiocb, 0);
- spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+ spin_unlock(&pring_s4->ring_lock);
} else {
ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
abtsiocb, 0);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 8e38e0204c47..c38e4da71f5f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index cb17e2b2be81..4b70d53acb72 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -96,6 +96,34 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
return &iocbq->iocb;
}
+#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
+/**
+ * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ * Must be a multiple of sizeof(uint64_t)
+ *
+ * This function is used for copying data between driver memory
+ * and the SLI WQ. This function also changes the endianness
+ * of each word if native endianness is different from SLI
+ * endianness. This function can be called with or without
+ * lock.
+ **/
+void
+lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+ uint64_t *src = srcp;
+ uint64_t *dest = destp;
+ int i;
+
+ for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
+ *dest++ = *src++;
+}
+#else
+#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
+#endif
+
/**
* lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
* @q: The Work Queue to operate on.
@@ -137,7 +165,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
- lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+ lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
if (q->dpp_enable && q->phba->cfg_enable_dpp) {
/* write to DPP aperture taking advatage of Combined Writes */
tmp = (uint8_t *)temp_wqe;
@@ -240,7 +268,7 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
/* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM;
- lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
+ lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
/* Save off the mailbox pointer for completion */
q->phba->mbox = (MAILBOX_t *)temp_mqe;
@@ -663,8 +691,8 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
/* If the host has not yet processed the next entry then we are done */
if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
return -EBUSY;
- lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
- lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
+ lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
+ lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
/* Update the host index to point to the next slot */
hq->host_index = ((hq_put_index + 1) % hq->entry_count);
@@ -7199,7 +7227,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
lpfc_post_rq_buffer(
phba, phba->sli4_hba.nvmet_mrq_hdr[i],
phba->sli4_hba.nvmet_mrq_data[i],
- LPFC_NVMET_RQE_DEF_COUNT, i);
+ phba->cfg_nvmet_mrq_post, i);
}
}
@@ -8185,8 +8213,8 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
*/
mbx_cmnd = bf_get(lpfc_mqe_command, mb);
memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
- lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
- sizeof(struct lpfc_mqe));
+ lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
+ sizeof(struct lpfc_mqe));
/* Post the high mailbox dma address to the port and wait for ready. */
dma_address = &phba->sli4_hba.bmbx.dma_address;
@@ -8210,11 +8238,11 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* If so, update the mailbox status so that the upper layers
* can complete the request normally.
*/
- lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
- sizeof(struct lpfc_mqe));
+ lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
+ sizeof(struct lpfc_mqe));
mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
- lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
- sizeof(struct lpfc_mcqe));
+ lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
+ sizeof(struct lpfc_mcqe));
mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
/*
* When the CQE status indicates a failure and the mailbox status
@@ -11300,11 +11328,11 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
unsigned long iflags;
struct lpfc_sli_ring *pring_s4;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
/* all I/Os are in process of being flushed */
if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return 0;
}
sum = 0;
@@ -11366,14 +11394,14 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
- if (pring_s4 == NULL)
+ pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq);
+ if (!pring_s4)
continue;
/* Note: both hbalock and ring_lock must be set here */
- spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+ spin_lock(&pring_s4->ring_lock);
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
abtsiocbq, 0);
- spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+ spin_unlock(&pring_s4->ring_lock);
} else {
ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
abtsiocbq, 0);
@@ -11385,7 +11413,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
else
sum++;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return sum;
}
@@ -12830,7 +12858,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
/* Move mbox data to caller's mailbox region, do endian swapping */
if (pmb->mbox_cmpl && mbox)
- lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
+ lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
/*
* For mcqe errors, conditionally move a modified error code to
@@ -12913,7 +12941,7 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
bool workposted;
/* Copy the mailbox MCQE and convert endian order as needed */
- lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
+ lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
/* Invoke the proper event handling routine */
if (!bf_get(lpfc_trailer_async, &mcqe))
@@ -12944,6 +12972,17 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
int txcmplq_cnt = 0;
int fcp_txcmplq_cnt = 0;
+ /* Check for response status */
+ if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
+ /* Log the error status */
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0357 ELS CQE error: status=x%x: "
+ "CQE: %08x %08x %08x %08x\n",
+ bf_get(lpfc_wcqe_c_status, wcqe),
+ wcqe->word0, wcqe->total_data_placed,
+ wcqe->parameter, wcqe->word3);
+ }
+
/* Get an irspiocbq for later ELS response processing use */
irspiocbq = lpfc_sli_get_iocbq(phba);
if (!irspiocbq) {
@@ -13173,7 +13212,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
bool workposted = false;
/* Copy the work queue CQE and convert endian order if needed */
- lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
+ lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
/* Check and process for different type of WCQE and dispatch */
switch (bf_get(lpfc_cqe_code, &cqevt)) {
@@ -13364,14 +13403,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
phba->lpfc_rampdown_queue_depth(phba);
/* Log the error status */
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0373 FCP complete error: status=x%x, "
- "hw_status=x%x, total_data_specified=%d, "
- "parameter=x%x, word3=x%x\n",
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0373 FCP CQE error: status=x%x: "
+ "CQE: %08x %08x %08x %08x\n",
bf_get(lpfc_wcqe_c_status, wcqe),
- bf_get(lpfc_wcqe_c_hw_status, wcqe),
- wcqe->total_data_placed, wcqe->parameter,
- wcqe->word3);
+ wcqe->word0, wcqe->total_data_placed,
+ wcqe->parameter, wcqe->word3);
}
/* Look up the FCP command IOCB and create pseudo response IOCB */
@@ -13581,7 +13618,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
bool workposted = false;
/* Copy the work queue CQE and convert endian order if needed */
- lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+ lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
/* Check and process for different type of WCQE and dispatch */
switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
@@ -19032,9 +19069,22 @@ lpfc_drain_txq(struct lpfc_hba *phba)
struct lpfc_sglq *sglq;
union lpfc_wqe128 wqe;
uint32_t txq_cnt = 0;
+ struct lpfc_queue *wq;
- pring = lpfc_phba_elsring(phba);
- if (unlikely(!pring))
+ if (phba->link_flag & LS_MDS_LOOPBACK) {
+ /* MDS WQE are posted only to first WQ*/
+ wq = phba->sli4_hba.fcp_wq[0];
+ if (unlikely(!wq))
+ return 0;
+ pring = wq->pring;
+ } else {
+ wq = phba->sli4_hba.els_wq;
+ if (unlikely(!wq))
+ return 0;
+ pring = lpfc_phba_elsring(phba);
+ }
+
+ if (unlikely(!pring) || list_empty(&pring->txq))
return 0;
spin_lock_irqsave(&pring->ring_lock, iflags);
@@ -19075,7 +19125,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
fail_msg = "to convert bpl to sgl";
else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
fail_msg = "to convert iocb to wqe";
- else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+ else if (lpfc_sli4_wq_put(wq, &wqe))
fail_msg = " - Wq is full";
else
lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e8b089abbfb3..18c23afcf46b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.0.0.1"
+#define LPFC_DRIVER_VERSION "12.0.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 91f5e2c68dbc..3b3767e240d8 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4166,6 +4166,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
int irq, i, j;
int error = -ENODEV;
+ if (hba_count >= MAX_CONTROLLERS)
+ goto out;
+
if (pci_enable_device(pdev))
goto out;
pci_set_master(pdev);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 27fab8235ea5..75dc25f78336 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.704.04.00-rc1"
-#define MEGASAS_RELDATE "December 7, 2017"
+#define MEGASAS_VERSION "07.705.02.00-rc1"
+#define MEGASAS_RELDATE "April 4, 2018"
/*
* Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index ce656c466ca9..c5d0c4bd71d2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -92,7 +92,7 @@ MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, S_IRUGO);
-MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
int rdpq_enable = 1;
module_param(rdpq_enable, int, S_IRUGO);
@@ -2224,9 +2224,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION_111));
else {
new_affiliation_111 =
- pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_LD_VF_AFFILIATION_111),
- &new_affiliation_111_h);
+ pci_zalloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h);
if (!new_affiliation_111) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2234,8 +2234,6 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
- memset(new_affiliation_111, 0,
- sizeof(struct MR_LD_VF_AFFILIATION_111));
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -2333,10 +2331,10 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION));
else {
new_affiliation =
- pci_alloc_consistent(instance->pdev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- &new_affiliation_h);
+ pci_zalloc_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h);
if (!new_affiliation) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2344,8 +2342,6 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
- memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION));
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -5636,16 +5632,15 @@ megasas_get_seq_num(struct megasas_instance *instance,
}
dcmd = &cmd->frame->dcmd;
- el_info = pci_alloc_consistent(instance->pdev,
- sizeof(struct megasas_evt_log_info),
- &el_info_h);
+ el_info = pci_zalloc_consistent(instance->pdev,
+ sizeof(struct megasas_evt_log_info),
+ &el_info_h);
if (!el_info) {
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
- memset(el_info, 0, sizeof(*el_info));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f4d988dd1e9d..98a7a090b75e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -684,15 +684,14 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, array_size,
- &fusion->rdpq_phys);
+ fusion->rdpq_virt = pci_zalloc_consistent(instance->pdev, array_size,
+ &fusion->rdpq_phys);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- memset(fusion->rdpq_virt, 0, array_size);
msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
@@ -2981,6 +2980,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
} else {
+ if (os_timeout_value)
+ os_timeout_value++;
+
/* system pd Fast Path */
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
timeout_limit = (scmd->device->type == TYPE_DISK) ?
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index b015c30d2c32..1e45268a78fc 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -9,7 +9,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.48
+ * mpi2.h Version: 02.00.50
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -114,6 +114,8 @@
* 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT.
* 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT.
* 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -152,8 +154,9 @@
MPI26_VERSION_MINOR)
#define MPI2_VERSION_02_06 (0x0206)
-/*Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x30)
+
+/* Unit and Dev versioning for this MPI header set */
+#define MPI2_HEADER_VERSION_UNIT (0x32)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 0ad88deb3176..5122920a961a 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -7,7 +7,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.40
+ * mpi2_cnfg.h Version: 02.00.42
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -219,6 +219,18 @@
* Added ChassisSlot field to SAS Enclosure Page 0.
* Added ChassisSlot Valid bit (bit 5) to the Flags field
* in SAS Enclosure Page 0.
+ * 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and
+ * MPI26_MFGPAGE_DEVID_SAS3916 defines.
+ * Removed MPI26_MFGPAGE_DEVID_SAS4008 define.
+ * Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define.
+ * Renamed PI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to
+ * PI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN.
+ * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to
+ * MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK.
+ * 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2.
+ * Added NOIOB field to PCIe Device Page 2.
+ * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to
+ * the Capabilities field of PCIe Device Page 2.
* --------------------------------------------------------------------------
*/
@@ -556,7 +568,8 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1)
#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2)
-#define MPI26_MFGPAGE_DEVID_SAS4008 (0x00A1)
+#define MPI26_MFGPAGE_DEVID_SAS3816 (0x00A1)
+#define MPI26_MFGPAGE_DEVID_SAS3916 (0x00A0)
/*Manufacturing Page 0 */
@@ -3864,20 +3877,25 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 {
typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
U16 DevHandle; /*0x08 */
- U16 Reserved1; /*0x0A */
- U32 MaximumDataTransferSize;/*0x0C */
+ U8 ControllerResetTO; /* 0x0A */
+ U8 Reserved1; /* 0x0B */
+ U32 MaximumDataTransferSize; /*0x0C */
U32 Capabilities; /*0x10 */
- U32 Reserved2; /*0x14 */
+ U16 NOIOB; /* 0x14 */
+ U16 Reserved2; /* 0x16 */
} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2,
Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t;
-#define MPI26_PCIEDEVICE2_PAGEVERSION (0x00)
+#define MPI26_PCIEDEVICE2_PAGEVERSION (0x01)
/*defines for PCIe Device Page 2 Capabilities field */
+#define MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN (0x00000008)
#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004)
#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002)
#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001)
+/* Defines for the NOIOB field */
+#define MPI26_PCIEDEV2_NOIOB_UNSUPPORTED (0x0000)
/****************************************************************************
* PCIe Link Config Pages (MPI v2.6 and later)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index 948a3ba682d7..6213ce6791ac 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -75,7 +75,7 @@
typedef struct _MPI2_SCSI_IO_CDB_EEDP32 {
U8 CDB[20]; /*0x00 */
- U32 PrimaryReferenceTag; /*0x14 */
+ __be32 PrimaryReferenceTag; /*0x14 */
U16 PrimaryApplicationTag; /*0x18 */
U16 PrimaryApplicationTagMask; /*0x1A */
U32 TransferLength; /*0x1C */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index cc2aff7aa67b..1faec3a93e69 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -7,7 +7,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.32
+ * mpi2_ioc.h Version: 02.00.34
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -167,6 +167,10 @@
* 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP.
* Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related
* defines for the ReasonCode field.
+ * 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD.
+ * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED
+ * to the ReasonCode field in PCIe Device Status Change
+ * Event Data.
* --------------------------------------------------------------------------
*/
@@ -1182,6 +1186,7 @@ typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE {
#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
#define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10)
+#define MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x11)
/*PCIe Enumeration Event data (MPI v2.6 and later) */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 61f93a134956..bf04fa90f433 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
static int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, S_IRUGO);
-MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
static int max_msix_vectors = -1;
module_param(max_msix_vectors, int, 0);
@@ -297,12 +297,15 @@ static void *
_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
dma_addr_t chain_buffer_dma)
{
- u16 index;
-
- for (index = 0; index < ioc->chain_depth; index++) {
- if (ioc->chain_lookup[index].chain_buffer_dma ==
- chain_buffer_dma)
- return ioc->chain_lookup[index].chain_buffer;
+ u16 index, j;
+ struct chain_tracker *ct;
+
+ for (index = 0; index < ioc->scsiio_depth; index++) {
+ for (j = 0; j < ioc->chains_needed_per_io; j++) {
+ ct = &ioc->chain_lookup[index].chains_per_smid[j];
+ if (ct && ct->chain_buffer_dma == chain_buffer_dma)
+ return ct->chain_buffer;
+ }
}
pr_info(MPT3SAS_FMT
"Provided chain_buffer_dma address is not in the lookup list\n",
@@ -394,13 +397,14 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
buff_ptr_phys = buffer_iomem_phys;
WARN_ON(buff_ptr_phys > U32_MAX);
- if (sgel->FlagsLength &
+ if (le32_to_cpu(sgel->FlagsLength) &
(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
is_write = 1;
for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
- sgl_flags = (sgel->FlagsLength >> MPI2_SGE_FLAGS_SHIFT);
+ sgl_flags =
+ (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
@@ -411,7 +415,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
*/
sgel_next =
_base_get_chain_buffer_dma_to_chain_buffer(ioc,
- sgel->Address);
+ le32_to_cpu(sgel->Address));
if (sgel_next == NULL)
return;
/*
@@ -426,7 +430,8 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
dst_addr_phys = _base_get_chain_phys(ioc,
smid, sge_chain_count);
WARN_ON(dst_addr_phys > U32_MAX);
- sgel->Address = (u32)dst_addr_phys;
+ sgel->Address =
+ cpu_to_le32(lower_32_bits(dst_addr_phys));
sgel = sgel_next;
sge_chain_count++;
break;
@@ -435,22 +440,28 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
if (is_scsiio_req) {
_base_clone_to_sys_mem(buff_ptr,
sg_virt(sg_scmd),
- (sgel->FlagsLength & 0x00ffffff));
+ (le32_to_cpu(sgel->FlagsLength) &
+ 0x00ffffff));
/*
* FIXME: this relies on a a zero
* PCI mem_offset.
*/
- sgel->Address = (u32)buff_ptr_phys;
+ sgel->Address =
+ cpu_to_le32((u32)buff_ptr_phys);
} else {
_base_clone_to_sys_mem(buff_ptr,
ioc->config_vaddr,
- (sgel->FlagsLength & 0x00ffffff));
- sgel->Address = (u32)buff_ptr_phys;
+ (le32_to_cpu(sgel->FlagsLength) &
+ 0x00ffffff));
+ sgel->Address =
+ cpu_to_le32((u32)buff_ptr_phys);
}
}
- buff_ptr += (sgel->FlagsLength & 0x00ffffff);
- buff_ptr_phys += (sgel->FlagsLength & 0x00ffffff);
- if ((sgel->FlagsLength &
+ buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
+ 0x00ffffff);
+ buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
+ 0x00ffffff);
+ if ((le32_to_cpu(sgel->FlagsLength) &
(MPI2_SGE_FLAGS_END_OF_BUFFER
<< MPI2_SGE_FLAGS_SHIFT)))
goto eob_clone_chain;
@@ -1019,6 +1030,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
desc = "Cable Event";
break;
+ case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
+ desc = "SAS Device Discovery Error";
+ break;
case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
desc = "PCIE Device Status Change";
break;
@@ -1433,7 +1447,7 @@ _base_interrupt(int irq, void *bus_id)
cpu_to_le32(reply);
if (ioc->is_mcpu_endpoint)
_base_clone_reply_to_sys_mem(ioc,
- cpu_to_le32(reply),
+ reply,
ioc->reply_free_host_index);
writel(ioc->reply_free_host_index,
&ioc->chip->ReplyFreeHostIndex);
@@ -1671,7 +1685,8 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
* @ioc: per adapter object
* @scmd: SCSI commands of the IO request
*
- * Returns chain tracker(from ioc->free_chain_list)
+ * Returns chain tracker from chain_lookup table using key as
+ * smid and smid's chain_offset.
*/
static struct chain_tracker *
_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
@@ -1679,20 +1694,15 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
{
struct chain_tracker *chain_req;
struct scsiio_tracker *st = scsi_cmd_priv(scmd);
- unsigned long flags;
+ u16 smid = st->smid;
+ u8 chain_offset =
+ atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (list_empty(&ioc->free_chain_list)) {
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "chain buffers not available\n", ioc->name));
+ if (chain_offset == ioc->chains_needed_per_io)
return NULL;
- }
- chain_req = list_entry(ioc->free_chain_list.next,
- struct chain_tracker, tracker_list);
- list_del_init(&chain_req->tracker_list);
- list_add_tail(&chain_req->tracker_list, &st->chain_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
+ atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
return chain_req;
}
@@ -3044,7 +3054,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->combined_reply_index_count; i++) {
ioc->replyPostRegisterIndex[i] = (resource_size_t *)
- ((u8 *)&ioc->chip->Doorbell +
+ ((u8 __force *)&ioc->chip->Doorbell +
MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
(i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
}
@@ -3273,13 +3283,7 @@ void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
return;
st->cb_idx = 0xFF;
st->direct_io = 0;
- if (!list_empty(&st->chain_list)) {
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- list_splice_init(&st->chain_list, &ioc->free_chain_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- }
+ atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
}
/**
@@ -3339,7 +3343,7 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
spinlock_t *writeq_lock)
{
unsigned long flags;
- __u64 data_out = cpu_to_le64(b);
+ __u64 data_out = b;
spin_lock_irqsave(writeq_lock, flags);
writel((u32)(data_out), addr);
@@ -3362,7 +3366,7 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
static inline void
_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
{
- writeq(cpu_to_le64(b), addr);
+ writeq(b, addr);
}
#else
static inline void
@@ -3389,7 +3393,7 @@ _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
_clone_sg_entries(ioc, (void *) mfp, smid);
- mpi_req_iomem = (void *)ioc->chip +
+ mpi_req_iomem = (void __force *)ioc->chip +
MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
ioc->request_sz);
@@ -3473,7 +3477,8 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
request_hdr = (MPI2RequestHeader_t *)mfp;
/* TBD 256 is offset within sys register. */
- mpi_req_iomem = (void *)ioc->chip + MPI_FRAME_START_OFFSET
+ mpi_req_iomem = (void __force *)ioc->chip
+ + MPI_FRAME_START_OFFSET
+ (smid * ioc->request_sz);
_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
ioc->request_sz);
@@ -3542,7 +3547,7 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
_clone_sg_entries(ioc, (void *) mfp, smid);
/* TBD 256 is offset within sys register */
- mpi_req_iomem = (void *)ioc->chip +
+ mpi_req_iomem = (void __force *)ioc->chip +
MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
ioc->request_sz);
@@ -3823,6 +3828,105 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
+ * version from FW Image Header.
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+ static int
+_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2FWImageHeader_t *FWImgHdr;
+ Mpi25FWUploadRequest_t *mpi_request;
+ Mpi2FWUploadReply_t mpi_reply;
+ int r = 0;
+ void *fwpkg_data = NULL;
+ dma_addr_t fwpkg_data_dma;
+ u16 smid, ioc_status;
+ size_t data_length;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ data_length = sizeof(Mpi2FWImageHeader_t);
+ fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
+ &fwpkg_data_dma);
+ if (!fwpkg_data) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENOMEM;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ r = -EAGAIN;
+ goto out;
+ }
+
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
+ mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
+ mpi_request->ImageSize = cpu_to_le32(data_length);
+ ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
+ data_length);
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ /* Wait for 15 seconds */
+ wait_for_completion_timeout(&ioc->base_cmds.done,
+ FW_IMG_HDR_READ_TIMEOUT*HZ);
+ pr_info(MPT3SAS_FMT "%s: complete\n",
+ ioc->name, __func__);
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi25FWUploadRequest_t)/4);
+ r = -ETIME;
+ } else {
+ memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
+ if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
+ memcpy(&mpi_reply, ioc->base_cmds.reply,
+ sizeof(Mpi2FWUploadReply_t));
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
+ if (FWImgHdr->PackageVersion.Word) {
+ pr_info(MPT3SAS_FMT "FW Package Version"
+ "(%02d.%02d.%02d.%02d)\n",
+ ioc->name,
+ FWImgHdr->PackageVersion.Struct.Major,
+ FWImgHdr->PackageVersion.Struct.Minor,
+ FWImgHdr->PackageVersion.Struct.Unit,
+ FWImgHdr->PackageVersion.Struct.Dev);
+ }
+ } else {
+ _debug_dump_mf(&mpi_reply,
+ sizeof(Mpi2FWUploadReply_t)/4);
+ }
+ }
+ }
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+out:
+ if (fwpkg_data)
+ pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
+ fwpkg_data_dma);
+ return r;
+}
+
+/**
* _base_display_ioc_capabilities - Disply IOC's capabilities.
* @ioc: per adapter object
*
@@ -4038,6 +4142,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
Mpi2ConfigReply_t mpi_reply;
u32 iounit_pg1_flags;
+ ioc->nvme_abort_timeout = 30;
mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
if (ioc->ir_firmware)
mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
@@ -4056,6 +4161,18 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
&ioc->manu_pg11);
}
+ if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
+ ioc->tm_custom_handling = 1;
+ else {
+ ioc->tm_custom_handling = 0;
+ if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
+ ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
+ else if (ioc->manu_pg11.NVMeAbortTO >
+ NVME_TASK_ABORT_MAX_TIMEOUT)
+ ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
+ else
+ ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
+ }
mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
@@ -4085,6 +4202,27 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * mpt3sas_free_enclosure_list - release memory
+ * @ioc: per adapter object
+ *
+ * Free memory allocated during encloure add.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
+
+ /* Free enclosure list */
+ list_for_each_entry_safe(enclosure_dev,
+ enclosure_dev_next, &ioc->enclosure_list, list) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ }
+}
+
+/**
* _base_release_memory_pools - release memory
* @ioc: per adapter object
*
@@ -4096,6 +4234,8 @@ static void
_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
{
int i = 0;
+ int j = 0;
+ struct chain_tracker *ct;
struct reply_post_struct *rps;
dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
@@ -4153,7 +4293,14 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
} while (ioc->rdpq_array_enable &&
(++i < ioc->reply_queue_count));
-
+ if (ioc->reply_post_free_array &&
+ ioc->rdpq_array_enable) {
+ dma_pool_free(ioc->reply_post_free_array_dma_pool,
+ ioc->reply_post_free_array,
+ ioc->reply_post_free_array_dma);
+ ioc->reply_post_free_array = NULL;
+ }
+ dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
dma_pool_destroy(ioc->reply_post_free_dma_pool);
kfree(ioc->reply_post);
}
@@ -4179,19 +4326,49 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->hpr_lookup);
kfree(ioc->internal_lookup);
if (ioc->chain_lookup) {
- for (i = 0; i < ioc->chain_depth; i++) {
- if (ioc->chain_lookup[i].chain_buffer)
- dma_pool_free(ioc->chain_dma_pool,
- ioc->chain_lookup[i].chain_buffer,
- ioc->chain_lookup[i].chain_buffer_dma);
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ for (j = ioc->chains_per_prp_buffer;
+ j < ioc->chains_needed_per_io; j++) {
+ ct = &ioc->chain_lookup[i].chains_per_smid[j];
+ if (ct && ct->chain_buffer)
+ dma_pool_free(ioc->chain_dma_pool,
+ ct->chain_buffer,
+ ct->chain_buffer_dma);
+ }
+ kfree(ioc->chain_lookup[i].chains_per_smid);
}
dma_pool_destroy(ioc->chain_dma_pool);
- free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
+ kfree(ioc->chain_lookup);
ioc->chain_lookup = NULL;
}
}
/**
+ * is_MSB_are_same - checks whether all reply queues in a set are
+ * having same upper 32bits in their base memory address.
+ * @reply_pool_start_address: Base address of a reply queue set
+ * @pool_sz: Size of single Reply Descriptor Post Queues pool size
+ *
+ * Returns 1 if reply queues in a set have a same upper 32bits
+ * in their base memory address,
+ * else 0
+ */
+
+static int
+is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
+{
+ long reply_pool_end_address;
+
+ reply_pool_end_address = reply_pool_start_address + pool_sz;
+
+ if (upper_32_bits(reply_pool_start_address) ==
+ upper_32_bits(reply_pool_end_address))
+ return 1;
+ else
+ return 0;
+}
+
+/**
* _base_allocate_memory_pools - allocate start of day memory pools
* @ioc: per adapter object
*
@@ -4203,12 +4380,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
struct mpt3sas_facts *facts;
u16 max_sge_elements;
u16 chains_needed_per_io;
- u32 sz, total_sz, reply_post_free_sz;
+ u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
u32 retry_sz;
u16 max_request_credit, nvme_blocks_needed;
unsigned short sg_tablesize;
u16 sge_size;
- int i;
+ int i, j;
+ struct chain_tracker *ct;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -4489,37 +4667,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name, ioc->request, ioc->scsiio_depth));
ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
- sz = ioc->chain_depth * sizeof(struct chain_tracker);
- ioc->chain_pages = get_order(sz);
- ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
- GFP_KERNEL, ioc->chain_pages);
+ sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
+ ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
if (!ioc->chain_lookup) {
- pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
- ioc->name);
+ pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
+ "failed\n", ioc->name);
goto out;
}
- ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
- ioc->chain_segment_sz, 16, 0);
- if (!ioc->chain_dma_pool) {
- pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
- ioc->name);
- goto out;
- }
- for (i = 0; i < ioc->chain_depth; i++) {
- ioc->chain_lookup[i].chain_buffer = dma_pool_alloc(
- ioc->chain_dma_pool , GFP_KERNEL,
- &ioc->chain_lookup[i].chain_buffer_dma);
- if (!ioc->chain_lookup[i].chain_buffer) {
- ioc->chain_depth = i;
- goto chain_done;
+
+ sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
+ if (!ioc->chain_lookup[i].chains_per_smid) {
+ pr_err(MPT3SAS_FMT "chain_lookup: "
+ " kzalloc failed\n", ioc->name);
+ goto out;
}
- total_sz += ioc->chain_segment_sz;
}
- chain_done:
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
- ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
/* initialize hi-priority queue smid's */
ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
@@ -4561,6 +4725,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* be required for NVMe PRP's, only each set of NVMe blocks will be
* contiguous, so a new set is allocated for each possible I/O.
*/
+ ioc->chains_per_prp_buffer = 0;
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
nvme_blocks_needed =
(ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
@@ -4583,6 +4748,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name);
goto out;
}
+
+ ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
+ ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
+ ioc->chains_needed_per_io);
+
for (i = 0; i < ioc->scsiio_depth; i++) {
ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
ioc->pcie_sgl_dma_pool, GFP_KERNEL,
@@ -4593,13 +4763,55 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name);
goto out;
}
+ for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
+ ct = &ioc->chain_lookup[i].chains_per_smid[j];
+ ct->chain_buffer =
+ ioc->pcie_sg_lookup[i].pcie_sgl +
+ (j * ioc->chain_segment_sz);
+ ct->chain_buffer_dma =
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma +
+ (j * ioc->chain_segment_sz);
+ }
}
dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
"element_size(%d), pool_size(%d kB)\n", ioc->name,
ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
+ "fit in a PRP page(%d)\n", ioc->name,
+ ioc->chains_per_prp_buffer));
total_sz += sz * ioc->scsiio_depth;
}
+
+ ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
+ ioc->chain_segment_sz, 16, 0);
+ if (!ioc->chain_dma_pool) {
+ pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ for (j = ioc->chains_per_prp_buffer;
+ j < ioc->chains_needed_per_io; j++) {
+ ct = &ioc->chain_lookup[i].chains_per_smid[j];
+ ct->chain_buffer = dma_pool_alloc(
+ ioc->chain_dma_pool, GFP_KERNEL,
+ &ct->chain_buffer_dma);
+ if (!ct->chain_buffer) {
+ pr_err(MPT3SAS_FMT "chain_lookup: "
+ " pci_pool_alloc failed\n", ioc->name);
+ _base_release_memory_pools(ioc);
+ goto out;
+ }
+ }
+ total_sz += ioc->chain_segment_sz;
+ }
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
+ ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
+
/* sense buffers, 4 byte align */
sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
@@ -4616,6 +4828,37 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name);
goto out;
}
+ /* sense buffer requires to be in same 4 gb region.
+ * Below function will check the same.
+ * In case of failure, new pci pool will be created with updated
+ * alignment. Older allocation and pool will be destroyed.
+ * Alignment will be used such a way that next allocation if
+ * success, will always meet same 4gb region requirement.
+ * Actual requirement is not alignment, but we need start and end of
+ * DMA address must have same upper 32 bit address.
+ */
+ if (!is_MSB_are_same((long)ioc->sense, sz)) {
+ //Release Sense pool & Reallocate
+ dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
+ dma_pool_destroy(ioc->sense_dma_pool);
+ ioc->sense = NULL;
+
+ ioc->sense_dma_pool =
+ dma_pool_create("sense pool", &ioc->pdev->dev, sz,
+ roundup_pow_of_two(sz), 0);
+ if (!ioc->sense_dma_pool) {
+ pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
+ &ioc->sense_dma);
+ if (!ioc->sense) {
+ pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ }
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"sense pool(0x%p): depth(%d), element_size(%d), pool_size"
"(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
@@ -4675,6 +4918,28 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name, (unsigned long long)ioc->reply_free_dma));
total_sz += sz;
+ if (ioc->rdpq_array_enable) {
+ reply_post_free_array_sz = ioc->reply_queue_count *
+ sizeof(Mpi2IOCInitRDPQArrayEntry);
+ ioc->reply_post_free_array_dma_pool =
+ dma_pool_create("reply_post_free_array pool",
+ &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
+ if (!ioc->reply_post_free_array_dma_pool) {
+ dinitprintk(ioc,
+ pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
+ "dma_pool_create failed\n", ioc->name));
+ goto out;
+ }
+ ioc->reply_post_free_array =
+ dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
+ GFP_KERNEL, &ioc->reply_post_free_array_dma);
+ if (!ioc->reply_post_free_array) {
+ dinitprintk(ioc,
+ pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
+ "dma_pool_alloc failed\n", ioc->name));
+ goto out;
+ }
+ }
ioc->config_page_sz = 512;
ioc->config_page = pci_alloc_consistent(ioc->pdev,
ioc->config_page_sz, &ioc->config_page_dma);
@@ -5002,7 +5267,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
/* send message 32-bits at a time */
for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
- writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
+ writel((u32)(request[i]), &ioc->chip->Doorbell);
if ((_base_wait_for_doorbell_ack(ioc, 5)))
failed = 1;
}
@@ -5023,7 +5288,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
/* read the first two 16-bits, it gives the total length of the reply */
- reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ reply[0] = (u16)(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -5032,7 +5297,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
ioc->name, __LINE__);
return -EFAULT;
}
- reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ reply[1] = (u16)(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
@@ -5046,7 +5311,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
if (i >= reply_bytes/2) /* overflow case */
readl(&ioc->chip->Doorbell);
else
- reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ reply[i] = (u16)(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
}
@@ -5481,8 +5746,6 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
ktime_t current_time;
u16 ioc_status;
u32 reply_post_free_array_sz = 0;
- Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
- dma_addr_t reply_post_free_array_dma;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -5516,23 +5779,14 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
if (ioc->rdpq_array_enable) {
reply_post_free_array_sz = ioc->reply_queue_count *
sizeof(Mpi2IOCInitRDPQArrayEntry);
- reply_post_free_array = pci_alloc_consistent(ioc->pdev,
- reply_post_free_array_sz, &reply_post_free_array_dma);
- if (!reply_post_free_array) {
- pr_err(MPT3SAS_FMT
- "reply_post_free_array: pci_alloc_consistent failed\n",
- ioc->name);
- r = -ENOMEM;
- goto out;
- }
- memset(reply_post_free_array, 0, reply_post_free_array_sz);
+ memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
for (i = 0; i < ioc->reply_queue_count; i++)
- reply_post_free_array[i].RDPQBaseAddress =
+ ioc->reply_post_free_array[i].RDPQBaseAddress =
cpu_to_le64(
(u64)ioc->reply_post[i].reply_post_free_dma);
mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
mpi_request.ReplyDescriptorPostQueueAddress =
- cpu_to_le64((u64)reply_post_free_array_dma);
+ cpu_to_le64((u64)ioc->reply_post_free_array_dma);
} else {
mpi_request.ReplyDescriptorPostQueueAddress =
cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
@@ -5562,7 +5816,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
if (r != 0) {
pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
ioc->name, __func__, r);
- goto out;
+ return r;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
@@ -5572,11 +5826,6 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
r = -EIO;
}
-out:
- if (reply_post_free_array)
- pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
- reply_post_free_array,
- reply_post_free_array_dma);
return r;
}
@@ -6157,12 +6406,6 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
&ioc->internal_free_list);
}
- /* chain pool */
- INIT_LIST_HEAD(&ioc->free_chain_list);
- for (i = 0; i < ioc->chain_depth; i++)
- list_add_tail(&ioc->chain_lookup[i].tracker_list,
- &ioc->free_chain_list);
-
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
/* initialize Reply Free Queue */
@@ -6172,7 +6415,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_free[i] = cpu_to_le32(reply_address);
if (ioc->is_mcpu_endpoint)
_base_clone_reply_to_sys_mem(ioc,
- (__le32)reply_address, i);
+ reply_address, i);
}
/* initialize reply queues */
@@ -6230,12 +6473,18 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
skip_init_reply_post_host_index:
_base_unmask_interrupts(ioc);
+
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ r = _base_display_fwpkg_version(ioc);
+ if (r)
+ return r;
+ }
+
+ _base_static_config_pages(ioc);
r = _base_event_notification(ioc);
if (r)
return r;
- _base_static_config_pages(ioc);
-
if (ioc->is_driver_loading) {
if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
@@ -6492,6 +6741,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
_base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
if (ioc->is_gen35_ioc) {
_base_unmask_events(ioc,
@@ -6558,6 +6808,7 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_base_stop_watchdog(ioc);
mpt3sas_base_free_resources(ioc);
_base_release_memory_pools(ioc);
+ mpt3sas_free_enclosure_list(ioc);
pci_set_drvdata(ioc->pdev, NULL);
kfree(ioc->cpu_msix_table);
if (ioc->is_warpdrive)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index ae36d8fb2f2b..f02974c0be4a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -74,8 +74,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "17.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 17
+#define MPT3SAS_DRIVER_VERSION "25.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 25
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -138,6 +138,7 @@
#define MAX_CHAIN_ELEMT_SZ 16
#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
+#define FW_IMG_HDR_READ_TIMEOUT 15
/*
* NVMe defines
*/
@@ -145,8 +146,12 @@
#define NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */
#define NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */
#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */
+#define NVME_TASK_ABORT_MIN_TIMEOUT 6
+#define NVME_TASK_ABORT_MAX_TIMEOUT 60
+#define NVME_TASK_MNGT_CUSTOM_MASK (0x0010)
#define NVME_PRP_PAGE_SIZE 4096 /* Page size */
+
/*
* reset phases
*/
@@ -362,7 +367,15 @@ struct Mpi2ManufacturingPage11_t {
u8 EEDPTagMode; /* 09h */
u8 Reserved3; /* 0Ah */
u8 Reserved4; /* 0Bh */
- __le32 Reserved5[23]; /* 0Ch-60h*/
+ __le32 Reserved5[8]; /* 0Ch-2Ch */
+ u16 AddlFlags2; /* 2Ch */
+ u8 AddlFlags3; /* 2Eh */
+ u8 Reserved6; /* 2Fh */
+ __le32 Reserved7[7]; /* 30h - 4Bh */
+ u8 NVMeAbortTO; /* 4Ch */
+ u8 Reserved8; /* 4Dh */
+ u16 Reserved9; /* 4Eh */
+ __le32 Reserved10[4]; /* 50h - 60h */
};
/**
@@ -572,6 +585,7 @@ struct _pcie_device {
u8 enclosure_level;
u8 connector_name[4];
u8 *serial_number;
+ u8 reset_timeout;
struct kref refcount;
};
/**
@@ -741,6 +755,17 @@ struct _sas_node {
struct list_head sas_port_list;
};
+
+/**
+ * struct _enclosure_node - enclosure information
+ * @list: list of enclosures
+ * @pg0: enclosure pg0;
+ */
+struct _enclosure_node {
+ struct list_head list;
+ Mpi2SasEnclosurePage0_t pg0;
+};
+
/**
* enum reset_type - reset state
* @FORCE_BIG_HAMMER: issue diagnostic reset
@@ -770,7 +795,11 @@ struct pcie_sg_list {
struct chain_tracker {
void *chain_buffer;
dma_addr_t chain_buffer_dma;
- struct list_head tracker_list;
+};
+
+struct chain_lookup {
+ struct chain_tracker *chains_per_smid;
+ atomic_t chain_offset;
};
/**
@@ -829,8 +858,8 @@ struct _sc_list {
*/
struct _event_ack_list {
struct list_head list;
- u16 Event;
- u32 EventContext;
+ U16 Event;
+ U32 EventContext;
};
/**
@@ -1009,6 +1038,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @iounit_pg8: static iounit page 8
* @sas_hba: sas host object
* @sas_expander_list: expander object list
+ * @enclosure_list: enclosure object list
* @sas_node_lock:
* @sas_device_list: sas device object list
* @sas_device_init_list: sas device object list (used only at init time)
@@ -1194,6 +1224,10 @@ struct MPT3SAS_ADAPTER {
void *event_log;
u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+ u8 tm_custom_handling;
+ u8 nvme_abort_timeout;
+
+
/* static config pages */
struct mpt3sas_facts facts;
struct mpt3sas_port_facts *pfacts;
@@ -1214,6 +1248,7 @@ struct MPT3SAS_ADAPTER {
/* sas hba, expander, and device list */
struct _sas_node sas_hba;
struct list_head sas_expander_list;
+ struct list_head enclosure_list;
spinlock_t sas_node_lock;
struct list_head sas_device_list;
struct list_head sas_device_init_list;
@@ -1261,7 +1296,7 @@ struct MPT3SAS_ADAPTER {
u32 page_size;
/* chain */
- struct chain_tracker *chain_lookup;
+ struct chain_lookup *chain_lookup;
struct list_head free_chain_list;
struct dma_pool *chain_dma_pool;
ulong chain_pages;
@@ -1315,6 +1350,9 @@ struct MPT3SAS_ADAPTER {
u8 rdpq_array_enable;
u8 rdpq_array_enable_assigned;
struct dma_pool *reply_post_free_dma_pool;
+ struct dma_pool *reply_post_free_array_dma_pool;
+ Mpi2IOCInitRDPQArrayEntry *reply_post_free_array;
+ dma_addr_t reply_post_free_array_dma;
u8 reply_queue_count;
struct list_head reply_queue_list;
@@ -1384,6 +1422,7 @@ int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
enum reset_type type);
@@ -1451,10 +1490,11 @@ u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
-int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
+int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
+ u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method);
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
+ u64 lun, u8 type, u16 smid_task, u16 msix_task,
+ u8 timeout, u8 tr_method);
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index d3cb387ba9f4..3269ef43f07e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -297,7 +297,7 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
nvme_error_reply =
(Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
- le32_to_cpu(nvme_error_reply->ErrorResponseCount));
+ le16_to_cpu(nvme_error_reply->ErrorResponseCount));
sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
memcpy(ioc->ctl_cmds.sense, sense_data, sz);
}
@@ -644,9 +644,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
MPI2RequestHeader_t *mpi_request = NULL, *request;
MPI2DefaultReply_t *mpi_reply;
Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
+ struct _pcie_device *pcie_device = NULL;
u32 ioc_state;
u16 smid;
- unsigned long timeout;
+ u8 timeout;
u8 issue_reset;
u32 sz, sz_arg;
void *psge;
@@ -659,6 +660,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
long ret;
u16 wait_state_count;
u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
issue_reset = 0;
@@ -803,12 +805,13 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
* Build the PRPs and set direction bits.
* Send the request.
*/
- nvme_encap_request->ErrorResponseBaseAddress = ioc->sense_dma &
- 0xFFFFFFFF00000000;
+ nvme_encap_request->ErrorResponseBaseAddress =
+ cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL);
nvme_encap_request->ErrorResponseBaseAddress |=
- (U64)mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+ cpu_to_le64(le32_to_cpu(
+ mpt3sas_base_get_sense_buffer_dma(ioc, smid)));
nvme_encap_request->ErrorResponseAllocationLength =
- NVME_ERROR_RESPONSE_SIZE;
+ cpu_to_le16(NVME_ERROR_RESPONSE_SIZE);
memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
data_out_dma, data_out_sz, data_in_dma, data_in_sz);
@@ -1073,14 +1076,26 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->name,
le16_to_cpu(mpi_request->FunctionDependent1));
mpt3sas_halt_firmware(ioc);
- mpt3sas_scsih_issue_locked_tm(ioc,
- le16_to_cpu(mpi_request->FunctionDependent1), 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
+ pcie_device = mpt3sas_get_pdev_by_handle(ioc,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ if (pcie_device && (!ioc->tm_custom_handling))
+ mpt3sas_scsih_issue_locked_tm(ioc,
+ le16_to_cpu(mpi_request->FunctionDependent1),
+ 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 0, pcie_device->reset_timeout,
+ tr_method);
+ else
+ mpt3sas_scsih_issue_locked_tm(ioc,
+ le16_to_cpu(mpi_request->FunctionDependent1),
+ 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
out:
+ if (pcie_device)
+ pcie_device_put(pcie_device);
/* free memory associated with sg buffers */
if (data_in)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index a44046cff0f3..18b46faef6f1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -184,7 +184,7 @@ struct mpt3_ioctl_iocinfo {
/* number of event log entries */
-#define MPT3SAS_CTL_EVENT_LOG_SIZE (50)
+#define MPT3SAS_CTL_EVENT_LOG_SIZE (200)
/**
* struct mpt3_ioctl_eventquery - query event count and type
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8cd3782fab49..b8d131a455d0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -157,8 +157,8 @@ MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
/* raid transport support */
-struct raid_template *mpt3sas_raid_template;
-struct raid_template *mpt2sas_raid_template;
+static struct raid_template *mpt3sas_raid_template;
+static struct raid_template *mpt2sas_raid_template;
/**
@@ -1088,7 +1088,7 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
pcie_device->slot);
if (pcie_device->connector_name[0] != '\0')
pr_info(MPT3SAS_FMT
- "removing enclosure level(0x%04x), connector name( %s)\n",
+ "removing enclosure level(0x%04x), connector name( %s)\n",
ioc->name, pcie_device->enclosure_level,
pcie_device->connector_name);
@@ -1362,6 +1362,30 @@ mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
+ * @ioc: per adapter object
+ * @handle: enclosure handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for enclosure device based on handle, then returns the
+ * enclosure object.
+ */
+static struct _enclosure_node *
+mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _enclosure_node *enclosure_dev, *r;
+
+ r = NULL;
+ list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
+ if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
+ continue;
+ r = enclosure_dev;
+ goto out;
+ }
+out:
+ return r;
+}
+/**
* mpt3sas_scsih_expander_find_by_sas_address - expander device search
* @ioc: per adapter object
* @sas_address: sas address
@@ -2608,6 +2632,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @smid_task: smid assigned to the task
* @msix_task: MSIX table index supplied by the OS
* @timeout: timeout in seconds
+ * @tr_method: Target Reset Method
* Context: user
*
* A generic API for sending task management requests to firmware.
@@ -2618,8 +2643,8 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* Return SUCCESS or FAILED.
*/
int
-mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
+ u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
@@ -2665,8 +2690,8 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
}
dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
- ioc->name, handle, type, smid_task));
+ "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
+ ioc->name, handle, type, smid_task, timeout, tr_method));
ioc->tm_cmds.status = MPT3_CMD_PENDING;
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->tm_cmds.smid = smid;
@@ -2675,6 +2700,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = type;
+ mpi_request->MsgFlags = tr_method;
mpi_request->TaskMID = cpu_to_le16(smid_task);
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
@@ -2721,13 +2747,14 @@ out:
}
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
+ u64 lun, u8 type, u16 smid_task, u16 msix_task,
+ u8 timeout, u8 tr_method)
{
int ret;
mutex_lock(&ioc->tm_cmds.mutex);
ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
- msix_task, timeout);
+ msix_task, timeout, tr_method);
mutex_unlock(&ioc->tm_cmds.mutex);
return ret;
@@ -2830,6 +2857,8 @@ scsih_abort(struct scsi_cmnd *scmd)
u16 handle;
int r;
+ u8 timeout = 30;
+ struct _pcie_device *pcie_device = NULL;
sdev_printk(KERN_INFO, scmd->device,
"attempting task abort! scmd(%p)\n", scmd);
_scsih_tm_display_info(ioc, scmd);
@@ -2864,15 +2893,20 @@ scsih_abort(struct scsi_cmnd *scmd)
mpt3sas_halt_firmware(ioc);
handle = sas_device_priv_data->sas_target->handle;
+ pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
+ if (pcie_device && (!ioc->tm_custom_handling))
+ timeout = ioc->nvme_abort_timeout;
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
- st->smid, st->msix_io, 30);
+ st->smid, st->msix_io, timeout, 0);
/* Command must be cleared after abort */
if (r == SUCCESS && st->cb_idx != 0xFF)
r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ if (pcie_device)
+ pcie_device_put(pcie_device);
return r;
}
@@ -2888,7 +2922,10 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
u16 handle;
+ u8 tr_method = 0;
+ u8 tr_timeout = 30;
int r;
struct scsi_target *starget = scmd->device->sdev_target;
@@ -2926,8 +2963,16 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
+ pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
+
+ if (pcie_device && (!ioc->tm_custom_handling)) {
+ tr_timeout = pcie_device->reset_timeout;
+ tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
+ } else
+ tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 30);
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
+ tr_timeout, tr_method);
/* Check for busy commands after reset */
if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
r = FAILED;
@@ -2937,6 +2982,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
if (sas_device)
sas_device_put(sas_device);
+ if (pcie_device)
+ pcie_device_put(pcie_device);
return r;
}
@@ -2953,7 +3000,10 @@ scsih_target_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
u16 handle;
+ u8 tr_method = 0;
+ u8 tr_timeout = 30;
int r;
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
@@ -2990,8 +3040,16 @@ scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
+ pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
+
+ if (pcie_device && (!ioc->tm_custom_handling)) {
+ tr_timeout = pcie_device->reset_timeout;
+ tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
+ } else
+ tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
+ tr_timeout, tr_method);
/* Check for busy commands after reset */
if (r == SUCCESS && atomic_read(&starget->target_busy))
r = FAILED;
@@ -3001,7 +3059,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
if (sas_device)
sas_device_put(sas_device);
-
+ if (pcie_device)
+ pcie_device_put(pcie_device);
return r;
}
@@ -3535,6 +3594,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
unsigned long flags;
struct _tr_list *delayed_tr;
u32 ioc_state;
+ u8 tr_method = 0;
if (ioc->pci_error_recovery) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -3577,6 +3637,11 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_address = pcie_device->wwid;
}
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ if (pcie_device && (!ioc->tm_custom_handling))
+ tr_method =
+ MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
+ else
+ tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
}
if (sas_target_priv_data) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -3640,6 +3705,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ mpi_request->MsgFlags = tr_method;
set_bit(handle, ioc->device_remove_in_progress);
mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
@@ -3680,11 +3746,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 ioc_state;
struct _sc_list *delayed_sc;
- if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host has been removed\n", __func__, ioc->name));
- return 1;
- } else if (ioc->pci_error_recovery) {
+ if (ioc->pci_error_recovery) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host in pci error recovery\n", __func__,
ioc->name));
@@ -3725,7 +3787,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if (!delayed_sc)
return _scsih_check_for_pending_tm(ioc, smid);
INIT_LIST_HEAD(&delayed_sc->list);
- delayed_sc->handle = mpi_request_tm->DevHandle;
+ delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"DELAYED:sc:handle(0x%04x), (open)\n",
@@ -3806,8 +3868,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
u16 smid;
struct _tr_list *delayed_tr;
- if (ioc->shost_recovery || ioc->remove_host ||
- ioc->pci_error_recovery) {
+ if (ioc->pci_error_recovery) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host reset in progress!\n",
__func__, ioc->name));
@@ -3860,8 +3921,7 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
Mpi2SCSITaskManagementReply_t *mpi_reply =
mpt3sas_base_get_reply_virt_addr(ioc, reply);
- if (ioc->shost_recovery || ioc->remove_host ||
- ioc->pci_error_recovery) {
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host reset in progress!\n",
__func__, ioc->name));
@@ -3903,8 +3963,8 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* Context - processed in interrupt context.
*/
static void
-_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
- u32 event_context)
+_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
+ U32 event_context)
{
Mpi2EventAckRequest_t *ack_request;
int i = smid - ioc->internal_smid;
@@ -3979,13 +4039,13 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, le16_to_cpu(handle), smid,
+ ioc->name, handle, smid,
ioc->tm_sas_control_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
- mpi_request->DevHandle = handle;
+ mpi_request->DevHandle = cpu_to_le16(handle);
mpt3sas_base_put_smid_default(ioc, smid);
}
@@ -5618,10 +5678,10 @@ static int
_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct _sas_node *sas_expander;
+ struct _enclosure_node *enclosure_dev;
Mpi2ConfigReply_t mpi_reply;
Mpi2ExpanderPage0_t expander_pg0;
Mpi2ExpanderPage1_t expander_pg1;
- Mpi2SasEnclosurePage0_t enclosure_pg0;
u32 ioc_status;
u16 parent_handle;
u64 sas_address, sas_address_parent = 0;
@@ -5743,11 +5803,12 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
if (sas_expander->enclosure_handle) {
- if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
- &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
- sas_expander->enclosure_handle)))
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ sas_expander->enclosure_handle);
+ if (enclosure_dev)
sas_expander->enclosure_logical_id =
- le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
}
_scsih_expander_node_add(ioc, sas_expander);
@@ -5891,52 +5952,6 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
}
/**
- * _scsih_get_enclosure_logicalid_chassis_slot - get device's
- * EnclosureLogicalID and ChassisSlot information.
- * @ioc: per adapter object
- * @sas_device_pg0: SAS device page0
- * @sas_device: per sas device object
- *
- * Returns nothing.
- */
-static void
-_scsih_get_enclosure_logicalid_chassis_slot(struct MPT3SAS_ADAPTER *ioc,
- Mpi2SasDevicePage0_t *sas_device_pg0, struct _sas_device *sas_device)
-{
- Mpi2ConfigReply_t mpi_reply;
- Mpi2SasEnclosurePage0_t enclosure_pg0;
-
- if (!sas_device_pg0 || !sas_device)
- return;
-
- sas_device->enclosure_handle =
- le16_to_cpu(sas_device_pg0->EnclosureHandle);
- sas_device->is_chassis_slot_valid = 0;
-
- if (!le16_to_cpu(sas_device_pg0->EnclosureHandle))
- return;
-
- if (mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
- &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
- le16_to_cpu(sas_device_pg0->EnclosureHandle))) {
- pr_err(MPT3SAS_FMT
- "Enclosure Pg0 read failed for handle(0x%04x)\n",
- ioc->name, le16_to_cpu(sas_device_pg0->EnclosureHandle));
- return;
- }
-
- sas_device->enclosure_logical_id =
- le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
-
- if (le16_to_cpu(enclosure_pg0.Flags) &
- MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
- sas_device->is_chassis_slot_valid = 1;
- sas_device->chassis_slot = enclosure_pg0.ChassisSlot;
- }
-}
-
-
-/**
* _scsih_check_device - checking device responsiveness
* @ioc: per adapter object
* @parent_sas_address: sas address of parent expander or sas host
@@ -5953,6 +5968,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
struct _sas_device *sas_device;
+ struct _enclosure_node *enclosure_dev = NULL;
u32 ioc_status;
unsigned long flags;
u64 sas_address;
@@ -6007,8 +6023,21 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->connector_name[0] = '\0';
}
- _scsih_get_enclosure_logicalid_chassis_slot(ioc,
- &sas_device_pg0, sas_device);
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ sas_device->is_chassis_slot_valid = 0;
+ enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ sas_device->enclosure_handle);
+ if (enclosure_dev) {
+ sas_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
+ if (le16_to_cpu(enclosure_dev->pg0.Flags) &
+ MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
+ sas_device->is_chassis_slot_valid = 1;
+ sas_device->chassis_slot =
+ enclosure_dev->pg0.ChassisSlot;
+ }
+ }
}
/* check if device is present */
@@ -6055,12 +6084,11 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
{
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
- Mpi2SasEnclosurePage0_t enclosure_pg0;
struct _sas_device *sas_device;
+ struct _enclosure_node *enclosure_dev = NULL;
u32 ioc_status;
u64 sas_address;
u32 device_info;
- int encl_pg0_rc = -1;
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -6106,12 +6134,12 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
}
if (sas_device_pg0.EnclosureHandle) {
- encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
- &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
- sas_device_pg0.EnclosureHandle);
- if (encl_pg0_rc)
- pr_info(MPT3SAS_FMT
- "Enclosure Pg0 read failed for handle(0x%04x)\n",
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ le16_to_cpu(sas_device_pg0.EnclosureHandle));
+ if (enclosure_dev == NULL)
+ pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
+ "doesn't match with enclosure device!\n",
ioc->name, sas_device_pg0.EnclosureHandle);
}
@@ -6152,18 +6180,16 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device->enclosure_level = 0;
sas_device->connector_name[0] = '\0';
}
-
- /* get enclosure_logical_id & chassis_slot */
+ /* get enclosure_logical_id & chassis_slot*/
sas_device->is_chassis_slot_valid = 0;
- if (encl_pg0_rc == 0) {
+ if (enclosure_dev) {
sas_device->enclosure_logical_id =
- le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
-
- if (le16_to_cpu(enclosure_pg0.Flags) &
+ le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
+ if (le16_to_cpu(enclosure_dev->pg0.Flags) &
MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
sas_device->is_chassis_slot_valid = 1;
sas_device->chassis_slot =
- enclosure_pg0.ChassisSlot;
+ enclosure_dev->pg0.ChassisSlot;
}
}
@@ -6845,8 +6871,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
Mpi26PCIeDevicePage0_t pcie_device_pg0;
Mpi26PCIeDevicePage2_t pcie_device_pg2;
Mpi2ConfigReply_t mpi_reply;
- Mpi2SasEnclosurePage0_t enclosure_pg0;
struct _pcie_device *pcie_device;
+ struct _enclosure_node *enclosure_dev;
u32 pcie_device_type;
u32 ioc_status;
u64 wwid;
@@ -6917,7 +6943,7 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (pcie_device->enclosure_handle != 0)
pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
- if (le16_to_cpu(pcie_device_pg0.Flags) &
+ if (le32_to_cpu(pcie_device_pg0.Flags) &
MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
memcpy(&pcie_device->connector_name[0],
@@ -6928,13 +6954,14 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/* get enclosure_logical_id */
- if (pcie_device->enclosure_handle &&
- !(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
- &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
- pcie_device->enclosure_handle)))
- pcie_device->enclosure_logical_id =
- le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
-
+ if (pcie_device->enclosure_handle) {
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ pcie_device->enclosure_handle);
+ if (enclosure_dev)
+ pcie_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
+ }
/* TODO -- Add device name once FW supports it */
if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
&pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
@@ -6953,6 +6980,11 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
pcie_device->nvme_mdts =
le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
+ if (pcie_device_pg2.ControllerResetTO)
+ pcie_device->reset_timeout =
+ pcie_device_pg2.ControllerResetTO;
+ else
+ pcie_device->reset_timeout = 30;
if (ioc->wait_for_discovery_to_complete)
_scsih_pcie_device_init_add(ioc, pcie_device);
@@ -7205,6 +7237,9 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
reason_str = "internal async notification";
break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
+ reason_str = "pcie hot reset failed";
+ break;
default:
reason_str = "unknown reason";
break;
@@ -7320,10 +7355,60 @@ static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
+ Mpi2ConfigReply_t mpi_reply;
+ struct _enclosure_node *enclosure_dev = NULL;
+ Mpi2EventDataSasEnclDevStatusChange_t *event_data =
+ (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
+ int rc;
+ u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
+
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
(Mpi2EventDataSasEnclDevStatusChange_t *)
fw_event->event_data);
+ if (ioc->shost_recovery)
+ return;
+
+ if (enclosure_handle)
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ enclosure_handle);
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_ENCL_RC_ADDED:
+ if (!enclosure_dev) {
+ enclosure_dev =
+ kzalloc(sizeof(struct _enclosure_node),
+ GFP_KERNEL);
+ if (!enclosure_dev) {
+ pr_info(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_dev->pg0,
+ MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ enclosure_handle);
+
+ if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK)) {
+ kfree(enclosure_dev);
+ return;
+ }
+
+ list_add_tail(&enclosure_dev->list,
+ &ioc->enclosure_list);
+ }
+ break;
+ case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
+ if (enclosure_dev) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ }
+ break;
+ default:
+ break;
+ }
}
/**
@@ -7409,7 +7494,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
- st->msix_io, 30);
+ st->msix_io, 30, 0);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -7450,7 +7535,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
- st->msix_io, 30);
+ st->msix_io, 30, 0);
if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
@@ -7527,6 +7612,44 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_sas_device_discovery_error_event - display SAS device discovery error
+ * events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
+ (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
+
+ switch (event_data->ReasonCode) {
+ case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
+ pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
+ "(handle:0x%04x, sas_address:0x%016llx,"
+ "physical_port:0x%02x) has failed",
+ ioc->name, le16_to_cpu(event_data->DevHandle),
+ (unsigned long long)le64_to_cpu(event_data->SASAddress),
+ event_data->PhysicalPort);
+ break;
+ case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
+ pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
+ "(handle:0x%04x, sas_address:0x%016llx,"
+ "physical_port:0x%02x) has timed out",
+ ioc->name, le16_to_cpu(event_data->DevHandle),
+ (unsigned long long)le64_to_cpu(event_data->SASAddress),
+ event_data->PhysicalPort);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
* _scsih_pcie_enumeration_event - handle enumeration events
* @ioc: per adapter object
* @fw_event: The fw_event_work object
@@ -8360,12 +8483,23 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
struct scsi_target *starget;
struct _sas_device *sas_device = NULL;
+ struct _enclosure_node *enclosure_dev = NULL;
unsigned long flags;
+ if (sas_device_pg0->EnclosureHandle) {
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ le16_to_cpu(sas_device_pg0->EnclosureHandle));
+ if (enclosure_dev == NULL)
+ pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
+ "doesn't match with enclosure device!\n",
+ ioc->name, sas_device_pg0->EnclosureHandle);
+ }
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if ((sas_device->sas_address == sas_device_pg0->SASAddress) &&
- (sas_device->slot == sas_device_pg0->Slot)) {
+ if ((sas_device->sas_address == le64_to_cpu(
+ sas_device_pg0->SASAddress)) && (sas_device->slot ==
+ le16_to_cpu(sas_device_pg0->Slot))) {
sas_device->responding = 1;
starget = sas_device->starget;
if (starget && starget->hostdata) {
@@ -8377,7 +8511,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
if (starget) {
starget_printk(KERN_INFO, starget,
"handle(0x%04x), sas_addr(0x%016llx)\n",
- sas_device_pg0->DevHandle,
+ le16_to_cpu(sas_device_pg0->DevHandle),
(unsigned long long)
sas_device->sas_address);
@@ -8389,7 +8523,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
sas_device->enclosure_logical_id,
sas_device->slot);
}
- if (sas_device_pg0->Flags &
+ if (le16_to_cpu(sas_device_pg0->Flags) &
MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
sas_device_pg0->EnclosureLevel;
@@ -8400,17 +8534,30 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
sas_device->connector_name[0] = '\0';
}
- _scsih_get_enclosure_logicalid_chassis_slot(ioc,
- sas_device_pg0, sas_device);
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0->EnclosureHandle);
+ sas_device->is_chassis_slot_valid = 0;
+ if (enclosure_dev) {
+ sas_device->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.EnclosureLogicalID);
+ if (le16_to_cpu(enclosure_dev->pg0.Flags) &
+ MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
+ sas_device->is_chassis_slot_valid = 1;
+ sas_device->chassis_slot =
+ enclosure_dev->pg0.ChassisSlot;
+ }
+ }
- if (sas_device->handle == sas_device_pg0->DevHandle)
+ if (sas_device->handle == le16_to_cpu(
+ sas_device_pg0->DevHandle))
goto out;
pr_info("\thandle changed from(0x%04x)!!!\n",
sas_device->handle);
- sas_device->handle = sas_device_pg0->DevHandle;
+ sas_device->handle = le16_to_cpu(
+ sas_device_pg0->DevHandle);
if (sas_target_priv_data)
sas_target_priv_data->handle =
- sas_device_pg0->DevHandle;
+ le16_to_cpu(sas_device_pg0->DevHandle);
goto out;
}
}
@@ -8419,6 +8566,52 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
}
/**
+ * _scsih_create_enclosure_list_after_reset - Free Existing list,
+ * And create enclosure list by scanning all Enclosure Page(0)s
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _enclosure_node *enclosure_dev;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 enclosure_handle;
+ int rc;
+
+ /* Free existing enclosure list */
+ mpt3sas_free_enclosure_list(ioc);
+
+ /* Re constructing enclosure list after reset*/
+ enclosure_handle = 0xFFFF;
+ do {
+ enclosure_dev =
+ kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
+ if (!enclosure_dev) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_dev->pg0,
+ MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
+ enclosure_handle);
+
+ if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK)) {
+ kfree(enclosure_dev);
+ return;
+ }
+ list_add_tail(&enclosure_dev->list,
+ &ioc->enclosure_list);
+ enclosure_handle =
+ le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
+ } while (1);
+}
+
+/**
* _scsih_search_responding_sas_devices -
* @ioc: per adapter object
*
@@ -8449,15 +8642,10 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
break;
- handle = sas_device_pg0.DevHandle =
- le16_to_cpu(sas_device_pg0.DevHandle);
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
if (!(_scsih_is_end_device(device_info)))
continue;
- sas_device_pg0.SASAddress =
- le64_to_cpu(sas_device_pg0.SASAddress);
- sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
- sas_device_pg0.Flags = le16_to_cpu(sas_device_pg0.Flags);
_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
}
@@ -8487,8 +8675,9 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
- if ((pcie_device->wwid == pcie_device_pg0->WWID) &&
- (pcie_device->slot == pcie_device_pg0->Slot)) {
+ if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
+ && (pcie_device->slot == le16_to_cpu(
+ pcie_device_pg0->Slot))) {
pcie_device->responding = 1;
starget = pcie_device->starget;
if (starget && starget->hostdata) {
@@ -8523,14 +8712,16 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
pcie_device->connector_name[0] = '\0';
}
- if (pcie_device->handle == pcie_device_pg0->DevHandle)
+ if (pcie_device->handle == le16_to_cpu(
+ pcie_device_pg0->DevHandle))
goto out;
pr_info("\thandle changed from(0x%04x)!!!\n",
pcie_device->handle);
- pcie_device->handle = pcie_device_pg0->DevHandle;
+ pcie_device->handle = le16_to_cpu(
+ pcie_device_pg0->DevHandle);
if (sas_target_priv_data)
sas_target_priv_data->handle =
- pcie_device_pg0->DevHandle;
+ le16_to_cpu(pcie_device_pg0->DevHandle);
goto out;
}
}
@@ -8579,10 +8770,6 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
if (!(_scsih_is_nvme_device(device_info)))
continue;
- pcie_device_pg0.WWID = le64_to_cpu(pcie_device_pg0.WWID),
- pcie_device_pg0.Slot = le16_to_cpu(pcie_device_pg0.Slot);
- pcie_device_pg0.Flags = le32_to_cpu(pcie_device_pg0.Flags);
- pcie_device_pg0.DevHandle = handle;
_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
}
out:
@@ -8736,22 +8923,16 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
{
struct _sas_node *sas_expander = NULL;
unsigned long flags;
- int i, encl_pg0_rc = -1;
- Mpi2ConfigReply_t mpi_reply;
- Mpi2SasEnclosurePage0_t enclosure_pg0;
+ int i;
+ struct _enclosure_node *enclosure_dev = NULL;
u16 handle = le16_to_cpu(expander_pg0->DevHandle);
+ u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
- if (le16_to_cpu(expander_pg0->EnclosureHandle)) {
- encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
- &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
- le16_to_cpu(expander_pg0->EnclosureHandle));
- if (encl_pg0_rc)
- pr_info(MPT3SAS_FMT
- "Enclosure Pg0 read failed for handle(0x%04x)\n",
- ioc->name,
- le16_to_cpu(expander_pg0->EnclosureHandle));
- }
+ if (enclosure_handle)
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ enclosure_handle);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
@@ -8759,12 +8940,12 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
continue;
sas_expander->responding = 1;
- if (!encl_pg0_rc)
+ if (enclosure_dev) {
sas_expander->enclosure_logical_id =
- le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
-
- sas_expander->enclosure_handle =
- le16_to_cpu(expander_pg0->EnclosureHandle);
+ le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
+ sas_expander->enclosure_handle =
+ le16_to_cpu(expander_pg0->EnclosureHandle);
+ }
if (sas_expander->handle == handle)
goto out;
@@ -9286,6 +9467,7 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
!ioc->sas_hba.num_phys)) {
_scsih_prep_device_scan(ioc);
+ _scsih_create_enclosure_list_after_reset(ioc);
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_pcie_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
@@ -9356,6 +9538,9 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
case MPI2_EVENT_SAS_DISCOVERY:
_scsih_sas_discovery_event(ioc, fw_event);
break;
+ case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
+ _scsih_sas_device_discovery_error_event(ioc, fw_event);
+ break;
case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
_scsih_sas_broadcast_primitive_event(ioc, fw_event);
break;
@@ -9433,8 +9618,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u16 sz;
Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
- /* events turned off due to host reset or driver unloading */
- if (ioc->remove_host || ioc->pci_error_recovery)
+ /* events turned off due to host reset */
+ if (ioc->pci_error_recovery)
return 1;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
@@ -9540,6 +9725,7 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
case MPI2_EVENT_IR_OPERATION_STATUS:
case MPI2_EVENT_SAS_DISCOVERY:
+ case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
case MPI2_EVENT_IR_PHYSICAL_DISK:
case MPI2_EVENT_PCIE_ENUMERATION:
@@ -10513,6 +10699,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->sas_device_list);
INIT_LIST_HEAD(&ioc->sas_device_init_list);
INIT_LIST_HEAD(&ioc->sas_expander_list);
+ INIT_LIST_HEAD(&ioc->enclosure_list);
INIT_LIST_HEAD(&ioc->pcie_device_list);
INIT_LIST_HEAD(&ioc->pcie_device_init_list);
INIT_LIST_HEAD(&ioc->fw_event_list);
@@ -11100,10 +11287,10 @@ _mpt3sas_exit(void)
pr_info("mpt3sas version %s unloading\n",
MPT3SAS_DRIVER_VERSION);
- pci_unregister_driver(&mpt3sas_driver);
-
mpt3sas_ctl_exit(hbas_to_enumerate);
+ pci_unregister_driver(&mpt3sas_driver);
+
scsih_exit();
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index 6bfcee4757e0..45aa94915cbf 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -177,7 +177,8 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
if (mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
&pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
vol_pg0->PhysDisk[count].PhysDiskNum) ||
- pd_pg0.DevHandle == MPT3SAS_INVALID_DEVICE_HANDLE) {
+ le16_to_cpu(pd_pg0.DevHandle) ==
+ MPT3SAS_INVALID_DEVICE_HANDLE) {
pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
"disabled for the drive with handle(0x%04x) member"
"handle retrieval failed for member number=%d\n",
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index afd27165cd93..b3cd9a6b1d30 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2693,22 +2693,4 @@ static struct pci_driver mvumi_pci_driver = {
#endif
};
-/**
- * mvumi_init - Driver load entry point
- */
-static int __init mvumi_init(void)
-{
- return pci_register_driver(&mvumi_pci_driver);
-}
-
-/**
- * mvumi_exit - Driver unload entry point
- */
-static void __exit mvumi_exit(void)
-{
-
- pci_unregister_driver(&mvumi_pci_driver);
-}
-
-module_init(mvumi_init);
-module_exit(mvumi_exit);
+module_pci_driver(mvumi_pci_driver);
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 5a33e1ad9881..67b14576fff2 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1840,14 +1840,14 @@ int osd_req_decode_sense_full(struct osd_request *or,
case osd_sense_response_integrity_check:
{
struct osd_sense_response_integrity_check_descriptor
- *osricd = cur_descriptor;
- const unsigned len =
- sizeof(osricd->integrity_check_value);
- char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */
-
- hex_dump_to_buffer(osricd->integrity_check_value, len,
- 32, 1, key_dump, sizeof(key_dump), true);
- OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump);
+ *d = cur_descriptor;
+ /* 2nibbles+space+ASCII */
+ char dump[sizeof(d->integrity_check_value) * 4 + 2];
+
+ hex_dump_to_buffer(d->integrity_check_value,
+ sizeof(d->integrity_check_value),
+ 32, 1, dump, sizeof(dump), true);
+ OSD_SENSE_PRINT2("response_integrity [%s]\n", dump);
}
case osd_sense_attribute_identification:
{
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index db88a8e7ee0e..4dd6cad330e8 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3607,7 +3607,7 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_UNSORPORTED\n"));
+ pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_SUPPORTED\n"));
break;
}
complete(pm8001_dev->dcompletion);
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index a980ef756a67..5bd10b534c99 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index b5c236efd465..42fde55ac735 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
index 5d5095e3d96d..29a55257224f 100644
--- a/drivers/scsi/qedf/drv_scsi_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
index 8fbe6e4d0b4f..bf102204fe56 100644
--- a/drivers/scsi/qedf/drv_scsi_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index cabb6af60fb8..2c78d8fb9122 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -180,6 +180,7 @@ struct qedf_rport {
spinlock_t rport_lock;
#define QEDF_RPORT_SESSION_READY 1
#define QEDF_RPORT_UPLOADING_CONNECTION 2
+#define QEDF_RPORT_IN_RESET 3
unsigned long flags;
unsigned long retry_delay_timestamp;
struct fc_rport *rport;
@@ -300,6 +301,7 @@ struct qedf_ctx {
#define QEDF_FALLBACK_VLAN 1002
#define QEDF_DEFAULT_PRIO 3
int vlan_id;
+ u8 prio;
struct qed_dev *cdev;
struct qed_dev_fcoe_info dev_info;
struct qed_int_info int_info;
@@ -365,6 +367,7 @@ struct qedf_ctx {
#define QEDF_IO_WORK_MIN 64
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
+ struct delayed_work grcdump_work;
u32 slow_sge_ios;
u32 fast_sge_ios;
@@ -504,6 +507,7 @@ extern int qedf_send_flogi(struct qedf_ctx *qedf);
extern void qedf_get_protocol_tlv_data(void *dev, void *data);
extern void qedf_fp_io_handler(struct work_struct *work);
extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
+extern void qedf_wq_grcdump(struct work_struct *work);
#define FCOE_WORD_TO_BYTE 4
#define QEDF_MAX_TASK_NUM 0xFFFF
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index fa6727685627..0487b7237104 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c
index bd1cef25a900..f2397ee9ba69 100644
--- a/drivers/scsi/qedf/qedf_dbg.c
+++ b/drivers/scsi/qedf/qedf_dbg.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -147,7 +147,7 @@ qedf_get_grc_dump(struct qed_dev *cdev, const struct qed_common_ops *common,
if (!*buf)
return -EINVAL;
- return common->dbg_grc(cdev, *buf, grcsize);
+ return common->dbg_all_data(cdev, *buf);
}
void
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 77c27e888969..dd0109653aa3 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index 5789ce185923..c29c162a494f 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 QLogic Corporation
+ * Copyright (c) 2016-2018 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index aa22b11436ba..04f0c4d2e256 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -14,8 +14,8 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
{
- struct qedf_ctx *qedf = fcport->qedf;
- struct fc_lport *lport = qedf->lport;
+ struct qedf_ctx *qedf;
+ struct fc_lport *lport;
struct qedf_ioreq *els_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
@@ -29,6 +29,15 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
unsigned long flags;
u16 sqe_idx;
+ if (!fcport) {
+ QEDF_ERR(NULL, "fcport is NULL");
+ rc = -EINVAL;
+ goto els_err;
+ }
+
+ qedf = fcport->qedf;
+ lport = qedf->lport;
+
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
rc = fc_remote_port_chkready(fcport->rport);
@@ -201,6 +210,14 @@ static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
kref_put(&orig_io_req->refcount, qedf_release_cmd);
out_free:
+ /*
+ * Release a reference to the rrq request if we timed out as the
+ * rrq completion handler is called directly from the timeout handler
+ * and not from els_compl where the reference would have normally been
+ * released.
+ */
+ if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
+ kref_put(&rrq_req->refcount, qedf_release_cmd);
kfree(cb_arg);
}
@@ -322,6 +339,17 @@ void qedf_restart_rport(struct qedf_rport *fcport)
if (!fcport)
return;
+ if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
+ !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
+ test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+ QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
+ fcport);
+ return;
+ }
+
+ /* Set that we are now in reset */
+ set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
+
rdata = fcport->rdata;
if (rdata) {
lport = fcport->qedf->lport;
@@ -334,6 +362,7 @@ void qedf_restart_rport(struct qedf_rport *fcport)
if (rdata)
fc_rport_login(rdata);
}
+ clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
}
static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 16d1a21cdff9..3fd3af799b3d 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -137,7 +137,7 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: "
"dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub,
- ntohs(vlan_tci));
+ vlan_tci);
if (qedf_dump_frames)
print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb->len, false);
@@ -184,6 +184,7 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Dropping CVL since FCF has not been selected "
"yet.");
+ kfree_skb(skb);
return;
}
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
index 503c1ae3ccd0..f6f634e48d69 100644
--- a/drivers/scsi/qedf/qedf_hsi.h
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 3fe579d0f1a8..6bbc38b1b465 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -23,12 +23,31 @@ static void qedf_cmd_timeout(struct work_struct *work)
struct qedf_ioreq *io_req =
container_of(work, struct qedf_ioreq, timeout_work.work);
- struct qedf_ctx *qedf = io_req->fcport->qedf;
- struct qedf_rport *fcport = io_req->fcport;
+ struct qedf_ctx *qedf;
+ struct qedf_rport *fcport;
u8 op = 0;
+ if (io_req == NULL) {
+ QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
+ return;
+ }
+
+ fcport = io_req->fcport;
+ if (io_req->fcport == NULL) {
+ QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
+ return;
+ }
+
+ qedf = fcport->qedf;
+
switch (io_req->cmd_type) {
case QEDF_ABTS:
+ if (qedf == NULL) {
+ QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for xid=0x%x.\n",
+ io_req->xid);
+ return;
+ }
+
QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
io_req->xid);
/* Cleanup timed out ABTS */
@@ -931,6 +950,15 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
return 0;
}
+ if (!qedf->pdev->msix_enabled) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
+ sc_cmd);
+ sc_cmd->result = DID_NO_CONNECT << 16;
+ sc_cmd->scsi_done(sc_cmd);
+ return 0;
+ }
+
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
@@ -1420,6 +1448,12 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
if (!fcport)
return;
+ /* Check that fcport is still offloaded */
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
+ return;
+ }
+
qedf = fcport->qedf;
cmd_mgr = qedf->cmd_mgr;
@@ -1436,8 +1470,8 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
rc = kref_get_unless_zero(&io_req->refcount);
if (!rc) {
QEDF_ERR(&(qedf->dbg_ctx),
- "Could not get kref for io_req=0x%p.\n",
- io_req);
+ "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
+ io_req, io_req->xid);
continue;
}
qedf_flush_els_req(qedf, io_req);
@@ -1448,6 +1482,31 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
goto free_cmd;
}
+ if (io_req->cmd_type == QEDF_ABTS) {
+ rc = kref_get_unless_zero(&io_req->refcount);
+ if (!rc) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
+ io_req, io_req->xid);
+ continue;
+ }
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Flushing abort xid=0x%x.\n", io_req->xid);
+
+ clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
+
+ if (io_req->sc_cmd) {
+ if (io_req->return_scsi_cmd_on_abts)
+ qedf_scsi_done(qedf, io_req, DID_ERROR);
+ }
+
+ /* Notify eh_abort handler that ABTS is complete */
+ complete(&io_req->abts_done);
+ kref_put(&io_req->refcount, qedf_release_cmd);
+
+ goto free_cmd;
+ }
+
if (!io_req->sc_cmd)
continue;
if (lun > 0) {
@@ -1463,7 +1522,7 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
rc = kref_get_unless_zero(&io_req->refcount);
if (!rc) {
QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
- "io_req=0x%p\n", io_req);
+ "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
continue;
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
@@ -1525,6 +1584,21 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
goto abts_err;
}
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
+ rc = 1;
+ goto out;
+ }
+
+ if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
+ test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
+ test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
+ "cleanup or abort processing or already "
+ "completed.\n", io_req->xid);
+ rc = 1;
+ goto out;
+ }
kref_get(&io_req->refcount);
@@ -1564,6 +1638,7 @@ abts_err:
* task at the firmware.
*/
qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
+out:
return rc;
}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d3f73d8d7738..90394cef0f41 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -44,20 +44,20 @@ module_param_named(debug, qedf_debug, uint, S_IRUGO);
MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
" mask");
-static uint qedf_fipvlan_retries = 30;
+static uint qedf_fipvlan_retries = 60;
module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
- "before giving up (default 30)");
+ "before giving up (default 60)");
static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
"(default 1002).");
-static uint qedf_default_prio = QEDF_DEFAULT_PRIO;
+static int qedf_default_prio = -1;
module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
-MODULE_PARM_DESC(default_prio, " Default 802.1q priority for FIP and FCoE"
- " traffic (default 3).");
+MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE"
+ " traffic (value between 0 and 7, default 3).");
uint qedf_dump_frames;
module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
@@ -89,6 +89,11 @@ module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
"delay handling (default off).");
+static bool qedf_dcbx_no_wait;
+module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start "
+ "sending FIP VLAN requests on link up (Default: off).");
+
static uint qedf_dp_module;
module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
@@ -109,9 +114,9 @@ static struct kmem_cache *qedf_io_work_cache;
void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
{
qedf->vlan_id = vlan_id;
- qedf->vlan_id |= qedf_default_prio << VLAN_PRIO_SHIFT;
+ qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x "
- "prio=%d.\n", vlan_id, qedf_default_prio);
+ "prio=%d.\n", vlan_id, qedf->prio);
}
/* Returns true if we have a valid vlan, false otherwise */
@@ -480,6 +485,11 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
if (link->link_up) {
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
+ QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Ignoring link up event as link is already up.\n");
+ return;
+ }
QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
link->speed / 1000);
@@ -489,7 +499,8 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
atomic_set(&qedf->link_state, QEDF_LINK_UP);
qedf_update_link_speed(qedf, link);
- if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
+ if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE ||
+ qedf_dcbx_no_wait) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"DCBx done.\n");
if (atomic_read(&qedf->link_down_tmo_valid) > 0)
@@ -515,7 +526,7 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
"Starting link down tmo.\n");
atomic_set(&qedf->link_down_tmo_valid, 1);
}
- qedf->vlan_id = 0;
+ qedf->vlan_id = 0;
qedf_update_link_speed(qedf, link);
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
qedf_link_down_tmo * HZ);
@@ -526,6 +537,7 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
{
struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+ u8 tmp_prio;
QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
"prio=%d.\n", get->operational.valid, get->operational.enabled,
@@ -541,7 +553,26 @@ static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
- if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
+ /*
+ * Set the 8021q priority in the following manner:
+ *
+ * 1. If a modparam is set use that
+ * 2. If the value is not between 0..7 use the default
+ * 3. Use the priority we get from the DCBX app tag
+ */
+ tmp_prio = get->operational.app_prio.fcoe;
+ if (qedf_default_prio > -1)
+ qedf->prio = qedf_default_prio;
+ else if (tmp_prio < 0 || tmp_prio > 7) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "FIP/FCoE prio %d out of range, setting to %d.\n",
+ tmp_prio, QEDF_DEFAULT_PRIO);
+ qedf->prio = QEDF_DEFAULT_PRIO;
+ } else
+ qedf->prio = tmp_prio;
+
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP &&
+ !qedf_dcbx_no_wait) {
if (atomic_read(&qedf->link_down_tmo_valid) > 0)
queue_delayed_work(qedf->link_update_wq,
&qedf->link_recovery, 0);
@@ -614,16 +645,6 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
goto out;
}
- if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
- test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
- test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
- QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
- "cleanup or abort processing or already "
- "completed.\n", io_req->xid);
- rc = SUCCESS;
- goto out;
- }
-
QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x "
"fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx);
@@ -705,7 +726,6 @@ static void qedf_ctx_soft_reset(struct fc_lport *lport)
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
- atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
qedf_wait_for_upload(qedf);
@@ -720,6 +740,22 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
{
struct fc_lport *lport;
struct qedf_ctx *qedf;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+
+ if (rval) {
+ QEDF_ERR(NULL, "device_reset rport not ready\n");
+ return FAILED;
+ }
+
+ if (fcport == NULL) {
+ QEDF_ERR(NULL, "device_reset: rport is NULL\n");
+ return FAILED;
+ }
lport = shost_priv(sc_cmd->device->host);
qedf = lport_priv(lport);
@@ -1109,7 +1145,7 @@ static int qedf_offload_connection(struct qedf_ctx *qedf,
conn_info.vlan_tag = qedf->vlan_id <<
FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
conn_info.vlan_tag |=
- qedf_default_prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
+ qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
@@ -1649,6 +1685,15 @@ static int qedf_vport_destroy(struct fc_vport *vport)
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fc_lport *vn_port = vport->dd_data;
+ struct qedf_ctx *qedf = lport_priv(vn_port);
+
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is NULL.\n");
+ goto out;
+ }
+
+ /* Set unloading bit on vport qedf_ctx to prevent more I/O */
+ set_bit(QEDF_UNLOADING, &qedf->flags);
mutex_lock(&n_port->lp_mutex);
list_del(&vn_port->list);
@@ -1675,6 +1720,7 @@ static int qedf_vport_destroy(struct fc_vport *vport)
if (vn_port->host)
scsi_host_put(vn_port->host);
+out:
return 0;
}
@@ -2109,7 +2155,8 @@ static int qedf_setup_int(struct qedf_ctx *qedf)
QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
qedf->int_info.used_cnt = 1;
- return 0;
+ QEDF_ERR(&qedf->dbg_ctx, "Only MSI-X supported. Failing probe.\n");
+ return -EINVAL;
}
/* Main function for libfc frame reception */
@@ -2195,6 +2242,7 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"FC frame d_id mismatch with MAC %pM.\n", dest_mac);
+ kfree_skb(skb);
return;
}
@@ -2983,8 +3031,17 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->link_update_wq = create_workqueue(host_buf);
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
-
+ INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
qedf->fipvlan_retries = qedf_fipvlan_retries;
+ /* Set a default prio in case DCBX doesn't converge */
+ if (qedf_default_prio > -1) {
+ /*
+ * This is the case where we pass a modparam in so we want to
+ * honor it even if dcbx doesn't converge.
+ */
+ qedf->prio = qedf_default_prio;
+ } else
+ qedf->prio = QEDF_DEFAULT_PRIO;
/*
* Common probe. Takes care of basic hardware init and pci_*
@@ -3214,7 +3271,8 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
* unload process.
*/
if (mode != QEDF_MODE_RECOVERY) {
- qedf->grcdump_size = qed_ops->common->dbg_grc_size(qedf->cdev);
+ qedf->grcdump_size =
+ qed_ops->common->dbg_all_data_size(qedf->cdev);
if (qedf->grcdump_size) {
rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
qedf->grcdump_size);
@@ -3398,6 +3456,15 @@ static void qedf_remove(struct pci_dev *pdev)
__qedf_remove(pdev, QEDF_MODE_NORMAL);
}
+void qedf_wq_grcdump(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, grcdump_work.work);
+
+ QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
+ qedf_capture_grc_dump(qedf);
+}
+
/*
* Protocol TLV handler
*/
@@ -3508,6 +3575,17 @@ static int __init qedf_init(void)
if (qedf_debug == QEDF_LOG_DEFAULT)
qedf_debug = QEDF_DEFAULT_LOG_MASK;
+ /*
+ * Check that default prio for FIP/FCoE traffic is between 0..7 if a
+ * value has been set
+ */
+ if (qedf_default_prio > -1)
+ if (qedf_default_prio > 7) {
+ qedf_default_prio = QEDF_DEFAULT_PRIO;
+ QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n",
+ QEDF_DEFAULT_PRIO);
+ }
+
/* Print driver banner */
QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
QEDF_VERSION);
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index c2478056356a..9455faacd5de 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -1,15 +1,15 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016-2017 Cavium Inc.
+ * Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
-#define QEDF_VERSION "8.33.0.20"
+#define QEDF_VERSION "8.33.16.20"
#define QEDF_DRIVER_MAJOR_VER 8
#define QEDF_DRIVER_MINOR_VER 33
-#define QEDF_DRIVER_REV_VER 0
+#define QEDF_DRIVER_REV_VER 16
#define QEDF_DRIVER_ENG_VER 20
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index eb2ec1fb07cb..9442e18aef6f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2279,8 +2279,6 @@ enum discovery_state {
DSC_LOGIN_PEND,
DSC_LOGIN_FAILED,
DSC_GPDB,
- DSC_GFPN_ID,
- DSC_GPSC,
DSC_UPD_FCPORT,
DSC_LOGIN_COMPLETE,
DSC_ADISC,
@@ -2346,6 +2344,7 @@ typedef struct fc_port {
unsigned int login_succ:1;
unsigned int query:1;
unsigned int id_changed:1;
+ unsigned int rscn_rcvd:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -3226,6 +3225,7 @@ enum qla_work_type {
QLA_EVT_GNNID,
QLA_EVT_GFPNID,
QLA_EVT_SP_RETRY,
+ QLA_EVT_IIDMA,
};
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3c4c84ed0f0f..f68eb6096559 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -116,7 +116,8 @@ extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
-
+int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
+void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
/*
* Global Data in qla_os.c source file.
*/
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 9e914f9c3ffb..4bc2b66b299f 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3175,7 +3175,6 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
- fcport->flags &= ~FCF_ASYNC_SENT;
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
@@ -3239,7 +3238,7 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
return;
}
- qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ qla_post_iidma_work(vha, fcport);
}
static void qla24xx_async_gpsc_sp_done(void *s, int res)
@@ -3257,8 +3256,6 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-
if (res == (DID_ERROR << 16)) {
/* entry status error */
goto done;
@@ -3327,7 +3324,6 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!sp)
goto done;
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpsc";
sp->gen1 = fcport->rscn_gen;
@@ -3862,6 +3858,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
bool found;
struct fab_scan_rp *rp;
unsigned long flags;
+ u8 recheck = 0;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s enter\n", __func__);
@@ -3914,8 +3911,8 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
continue;
+ fcport->rscn_rcvd = 0;
fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->d_id.b24 = rp->id.b24;
found = true;
/*
* If device was not a fabric device before.
@@ -3923,7 +3920,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
qla2x00_clear_loop_id(fcport);
fcport->flags |= FCF_FABRIC_DEVICE;
+ } else if (fcport->d_id.b24 != rp->id.b24) {
+ qlt_schedule_sess_for_deletion(fcport);
}
+ fcport->d_id.b24 = rp->id.b24;
break;
}
@@ -3940,10 +3940,13 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
* Logout all previous fabric dev marked lost, except FCP2 devices.
*/
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
+ fcport->rscn_rcvd = 0;
continue;
+ }
if (fcport->scan_state != QLA_FCPORT_FOUND) {
+ fcport->rscn_rcvd = 0;
if ((qla_dual_mode_enabled(vha) ||
qla_ini_mode_enabled(vha)) &&
atomic_read(&fcport->state) == FCS_ONLINE) {
@@ -3961,15 +3964,31 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
continue;
}
}
- } else
- qla24xx_fcport_handle_login(vha, fcport);
+ } else {
+ if (fcport->rscn_rcvd ||
+ fcport->disc_state != DSC_LOGIN_COMPLETE) {
+ fcport->rscn_rcvd = 0;
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+ }
}
+ recheck = 1;
out:
qla24xx_sp_unmap(vha, sp);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (recheck) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->rscn_rcvd) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ break;
+ }
+ }
+ }
}
static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
@@ -4532,7 +4551,6 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
- fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
@@ -4594,7 +4612,6 @@ static void qla2x00_async_gfpnid_sp_done(void *s, int res)
struct event_arg ea;
u64 wwn;
- fcport->flags &= ~FCF_ASYNC_SENT;
wwn = wwn_to_u64(fpn);
if (wwn)
memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
@@ -4623,12 +4640,10 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
- fcport->disc_state = DSC_GFPN_ID;
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gfpnid";
sp->gen1 = fcport->rscn_gen;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 8f55dd44adae..1aa3720ea2ed 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1021,30 +1021,11 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
vha->fcport_count++;
ea->fcport->login_succ = 1;
- if (!IS_IIDMA_CAPABLE(vha->hw) ||
- !vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0x20d6,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, ea->fcport->port_name,
- vha->fcport_count);
-
- qla24xx_post_upd_fcport_work(vha, ea->fcport);
- } else {
- if (ea->fcport->id_changed) {
- ea->fcport->id_changed = 0;
- ql_dbg(ql_dbg_disc, vha, 0x20d7,
- "%s %d %8phC post gfpnid fcp_cnt %d\n",
- __func__, __LINE__, ea->fcport->port_name,
- vha->fcport_count);
- qla24xx_post_gfpnid_work(vha, ea->fcport);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20d7,
- "%s %d %8phC post gpsc fcp_cnt %d\n",
- __func__, __LINE__, ea->fcport->port_name,
- vha->fcport_count);
- qla24xx_post_gpsc_work(vha, ea->fcport);
- }
- }
+ ql_dbg(ql_dbg_disc, vha, 0x20d6,
+ "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
} else if (ea->fcport->login_succ) {
/*
* We have an existing session. A late RSCN delivery
@@ -1167,9 +1148,6 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
fcport->login_gen, fcport->login_retry,
fcport->loop_id, fcport->scan_state);
- if (fcport->login_retry == 0)
- return 0;
-
if (fcport->scan_state != QLA_FCPORT_FOUND)
return 0;
@@ -1194,7 +1172,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
return 0;
}
- fcport->login_retry--;
+ if (fcport->login_retry > 0)
+ fcport->login_retry--;
switch (fcport->disc_state) {
case DSC_DELETED:
@@ -1350,20 +1329,7 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
fc_port_t *f, *tf;
uint32_t id = 0, mask, rid;
unsigned long flags;
-
- switch (ea->event) {
- case FCME_RSCN:
- case FCME_GIDPN_DONE:
- case FCME_GPSC_DONE:
- case FCME_GPNID_DONE:
- case FCME_GNNID_DONE:
- if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
- test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
- return;
- break;
- default:
- break;
- }
+ fc_port_t *fcport;
switch (ea->event) {
case FCME_RELOGIN:
@@ -1377,6 +1343,11 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
return;
switch (ea->id.b.rsvd_1) {
case RSCN_PORT_ADDR:
+ fcport = qla2x00_find_fcport_by_nportid
+ (vha, &ea->id, 1);
+ if (fcport)
+ fcport->rscn_rcvd = 1;
+
spin_lock_irqsave(&vha->work_lock, flags);
if (vha->scan.scan_flags == 0) {
ql_dbg(ql_dbg_disc, vha, 0xffff,
@@ -4532,7 +4503,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
fcport->deleted = QLA_SESS_DELETED;
fcport->login_retry = vha->hw->login_retry_count;
- fcport->login_retry = 5;
fcport->logout_on_delete = 1;
if (!fcport->ct_desc.ct_sns) {
@@ -5054,6 +5024,24 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
}
}
+void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ qla2x00_iidma_fcport(vha, fcport);
+ qla24xx_update_fcport_fcp_prio(vha, fcport);
+}
+
+int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
static void
qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
@@ -5122,13 +5110,14 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (IS_QLAFX00(vha->hw)) {
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- goto reg_port;
+ } else {
+ fcport->login_retry = 0;
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ fcport->deleted = 0;
+ fcport->logout_on_delete = 1;
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
}
- fcport->login_retry = 0;
- fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- fcport->deleted = 0;
- fcport->logout_on_delete = 1;
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
@@ -5140,7 +5129,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla24xx_update_fcport_fcp_prio(vha, fcport);
-reg_port:
switch (vha->host->active_mode) {
case MODE_INITIATOR:
qla2x00_reg_remote_port(vha, fcport);
@@ -5159,6 +5147,23 @@ reg_port:
default:
break;
}
+
+ if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
+ if (fcport->id_changed) {
+ fcport->id_changed = 0;
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gfpnid fcp_cnt %d\n",
+ __func__, __LINE__, fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gfpnid_work(vha, fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__, fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gpsc_work(vha, fcport);
+ }
+ }
}
/*
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 15eaa6dded04..817c18a8e84d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -5063,6 +5063,10 @@ qla2x00_do_work(struct scsi_qla_host *vha)
break;
case QLA_EVT_SP_RETRY:
qla_sp_retry(vha, e);
+ break;
+ case QLA_EVT_IIDMA:
+ qla_do_iidma_work(vha, e->u.fcport.fcport);
+ break;
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 025dc2d3f3de..b85c833099ff 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -601,24 +601,18 @@ void qla2x00_async_nack_sp_done(void *s, int res)
vha->fcport_count++;
- if (!IS_IIDMA_CAPABLE(vha->hw) ||
- !vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0x20f3,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__,
- sp->fcport->port_name,
- vha->fcport_count);
- sp->fcport->disc_state = DSC_UPD_FCPORT;
- qla24xx_post_upd_fcport_work(vha, sp->fcport);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20f5,
- "%s %d %8phC post gpsc fcp_cnt %d\n",
- __func__, __LINE__,
- sp->fcport->port_name,
- vha->fcport_count);
-
- qla24xx_post_gpsc_work(vha, sp->fcport);
- }
+ ql_dbg(ql_dbg_disc, vha, 0x20f3,
+ "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+ __func__, __LINE__,
+ sp->fcport->port_name,
+ vha->fcport_count);
+ sp->fcport->disc_state = DSC_UPD_FCPORT;
+ qla24xx_post_upd_fcport_work(vha, sp->fcport);
+ } else {
+ sp->fcport->login_retry = 0;
+ sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
+ sp->fcport->deleted = 0;
+ sp->fcport->logout_on_delete = 1;
}
break;
@@ -1930,13 +1924,84 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
}
+static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
+ uint64_t unpacked_lun)
+{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_qpair_hint *h = NULL;
+
+ if (vha->flags.qpairs_available) {
+ h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
+ if (!h)
+ h = &tgt->qphints[0];
+ } else {
+ h = &tgt->qphints[0];
+ }
+
+ return h;
+}
+
+static void qlt_do_tmr_work(struct work_struct *work)
+{
+ struct qla_tgt_mgmt_cmd *mcmd =
+ container_of(work, struct qla_tgt_mgmt_cmd, work);
+ struct qla_hw_data *ha = mcmd->vha->hw;
+ int rc = EIO;
+ uint32_t tag;
+ unsigned long flags;
+
+ switch (mcmd->tmr_func) {
+ case QLA_TGT_ABTS:
+ tag = mcmd->orig_iocb.abts.exchange_addr_to_abort;
+ break;
+ default:
+ tag = 0;
+ break;
+ }
+
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
+ mcmd->tmr_func, tag);
+
+ if (rc != 0) {
+ spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
+ switch (mcmd->tmr_func) {
+ case QLA_TGT_ABTS:
+ qlt_24xx_send_abts_resp(mcmd->qpair,
+ &mcmd->orig_iocb.abts,
+ FCP_TMF_REJECTED, false);
+ break;
+ case QLA_TGT_LUN_RESET:
+ case QLA_TGT_CLEAR_TS:
+ case QLA_TGT_ABORT_TS:
+ case QLA_TGT_CLEAR_ACA:
+ case QLA_TGT_TARGET_RESET:
+ qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
+ qla_sam_status);
+ break;
+
+ case QLA_TGT_ABORT_ALL:
+ case QLA_TGT_NEXUS_LOSS_SESS:
+ case QLA_TGT_NEXUS_LOSS:
+ qlt_send_notify_ack(mcmd->qpair,
+ &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
+ break;
+ }
+ spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
+
+ ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
+ "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
+ mcmd->vha->vp_idx, rc);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ }
+}
+
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct fc_port *sess)
{
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_mgmt_cmd *mcmd;
- int rc;
+ struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
/* send TASK_ABORT response immediately */
@@ -1961,23 +2026,29 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
mcmd->reset_count = ha->base_qpair->chip_reset;
mcmd->tmr_func = QLA_TGT_ABTS;
- mcmd->qpair = ha->base_qpair;
+ mcmd->qpair = h->qpair;
mcmd->vha = vha;
/*
* LUN is looked up by target-core internally based on the passed
* abts->exchange_addr_to_abort tag.
*/
- rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func,
- abts->exchange_addr_to_abort);
- if (rc != 0) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
- "qla_target(%d): tgt_ops->handle_tmr()"
- " failed: %d", vha->vp_idx, rc);
- mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
- return -EFAULT;
+ mcmd->se_cmd.cpuid = h->cpuid;
+
+ if (ha->tgt.tgt_ops->find_cmd_by_tag) {
+ struct qla_tgt_cmd *abort_cmd;
+
+ abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
+ abts->exchange_addr_to_abort);
+ if (abort_cmd && abort_cmd->qpair) {
+ mcmd->qpair = abort_cmd->qpair;
+ mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
+ }
}
+ INIT_WORK(&mcmd->work, qlt_do_tmr_work);
+ queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
+
return 0;
}
@@ -3556,13 +3627,6 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
ctio24->u.status1.ox_id = cpu_to_le16(temp);
- /* Most likely, it isn't needed */
- ctio24->u.status1.residual = get_unaligned((uint32_t *)
- &atio->u.isp24.fcp_cmnd.add_cdb[
- atio->u.isp24.fcp_cmnd.add_cdb_len]);
- if (ctio24->u.status1.residual != 0)
- ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
-
/* Memory Barrier */
wmb();
if (qpair->reqq_start_iocbs)
@@ -4057,9 +4121,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
fcp_task_attr = qlt_get_fcp_task_attr(vha,
atio->u.isp24.fcp_cmnd.task_attr);
- data_length = be32_to_cpu(get_unaligned((uint32_t *)
- &atio->u.isp24.fcp_cmnd.add_cdb[
- atio->u.isp24.fcp_cmnd.add_cdb_len]));
+ data_length = get_datalen_for_atio(atio);
ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
fcp_task_attr, data_dir, bidi);
@@ -4335,7 +4397,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_mgmt_cmd *mcmd;
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
- int res;
+ struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
if (!mcmd) {
@@ -4355,24 +4417,36 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
mcmd->tmr_func = fn;
mcmd->flags = flags;
mcmd->reset_count = ha->base_qpair->chip_reset;
- mcmd->qpair = ha->base_qpair;
+ mcmd->qpair = h->qpair;
mcmd->vha = vha;
+ mcmd->se_cmd.cpuid = h->cpuid;
+ mcmd->unpacked_lun = lun;
switch (fn) {
case QLA_TGT_LUN_RESET:
- abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
- break;
- }
+ case QLA_TGT_CLEAR_TS:
+ case QLA_TGT_ABORT_TS:
+ abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
+ /* drop through */
+ case QLA_TGT_CLEAR_ACA:
+ h = qlt_find_qphint(vha, mcmd->unpacked_lun);
+ mcmd->qpair = h->qpair;
+ mcmd->se_cmd.cpuid = h->cpuid;
+ break;
- res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0);
- if (res != 0) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
- "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
- sess->vha->vp_idx, res);
- mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
- return -EFAULT;
+ case QLA_TGT_TARGET_RESET:
+ case QLA_TGT_NEXUS_LOSS_SESS:
+ case QLA_TGT_NEXUS_LOSS:
+ case QLA_TGT_ABORT_ALL:
+ default:
+ /* no-op */
+ break;
}
+ INIT_WORK(&mcmd->work, qlt_do_tmr_work);
+ queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
+ &mcmd->work);
+
return 0;
}
@@ -4841,7 +4915,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
switch (sess->disc_state) {
case DSC_LOGIN_PEND:
case DSC_GPDB:
- case DSC_GPSC:
case DSC_UPD_FCPORT:
case DSC_LOGIN_COMPLETE:
case DSC_ADISC:
@@ -5113,8 +5186,6 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
"qla_target(%d): Immediate notify task %x\n",
vha->vp_idx, iocb->u.isp2x.task_flags);
- if (qlt_handle_task_mgmt(vha, iocb) == 0)
- send_notify_ack = 0;
break;
case IMM_NTFY_ELS:
@@ -5147,10 +5218,15 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
struct fc_port *sess = NULL;
unsigned long flags;
u16 temp;
+ port_id_t id;
+
+ id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
+ id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
+ id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
+ id.b.rsvd_1 = 0;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
- atio->u.isp24.fcp_hdr.s_id);
+ sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (!sess) {
qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
@@ -5189,6 +5265,12 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
*/
ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
ctio24->u.status1.scsi_status = cpu_to_le16(status);
+
+ ctio24->u.status1.residual = get_datalen_for_atio(atio);
+
+ if (ctio24->u.status1.residual != 0)
+ ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
/* Memory Barrier */
wmb();
if (qpair->reqq_start_iocbs)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 728ce74358e7..fecf96f0225c 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -682,7 +682,7 @@ struct qla_tgt_cmd;
* target module (tcm_qla2xxx).
*/
struct qla_tgt_func_tmpl {
-
+ struct qla_tgt_cmd *(*find_cmd_by_tag)(struct fc_port *, uint64_t);
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *);
@@ -966,6 +966,8 @@ struct qla_tgt_mgmt_cmd {
unsigned int flags;
uint32_t reset_count;
#define QLA24XX_MGMT_SEND_NACK 1
+ struct work_struct work;
+ uint64_t unpacked_lun;
union {
struct atio_from_isp atio;
struct imm_ntfy_from_isp imm_ntfy;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 0c55d7057280..1ad7582220c3 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.06-k"
+#define QLA2XXX_VERSION "10.00.00.07-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index aadfeaac3898..0c2e82af9c0a 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -48,7 +48,6 @@
#include "tcm_qla2xxx.h"
static struct workqueue_struct *tcm_qla2xxx_free_wq;
-static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
/*
* Parse WWN.
@@ -630,6 +629,32 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
transl_tmr_func, GFP_ATOMIC, tag, flags);
}
+static struct qla_tgt_cmd *tcm_qla2xxx_find_cmd_by_tag(struct fc_port *sess,
+ uint64_t tag)
+{
+ struct qla_tgt_cmd *cmd = NULL;
+ struct se_cmd *secmd;
+ unsigned long flags;
+
+ if (!sess->se_sess)
+ return NULL;
+
+ spin_lock_irqsave(&sess->se_sess->sess_cmd_lock, flags);
+ list_for_each_entry(secmd, &sess->se_sess->sess_cmd_list, se_cmd_list) {
+ /* skip task management functions, including tmr->task_cmd */
+ if (secmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ continue;
+
+ if (secmd->tag == tag) {
+ cmd = container_of(secmd, struct qla_tgt_cmd, se_cmd);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&sess->se_sess->sess_cmd_lock, flags);
+
+ return cmd;
+}
+
static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
@@ -1608,6 +1633,7 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
* Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
*/
static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+ .find_cmd_by_tag = tcm_qla2xxx_find_cmd_by_tag,
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
.handle_tmr = tcm_qla2xxx_handle_tmr,
@@ -1976,16 +2002,8 @@ static int tcm_qla2xxx_register_configfs(void)
goto out_fabric_npiv;
}
- tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
- if (!tcm_qla2xxx_cmd_wq) {
- ret = -ENOMEM;
- goto out_free_wq;
- }
-
return 0;
-out_free_wq:
- destroy_workqueue(tcm_qla2xxx_free_wq);
out_fabric_npiv:
target_unregister_template(&tcm_qla2xxx_npiv_ops);
out_fabric:
@@ -1995,7 +2013,6 @@ out_fabric:
static void tcm_qla2xxx_deregister_configfs(void)
{
- destroy_workqueue(tcm_qla2xxx_cmd_wq);
destroy_workqueue(tcm_qla2xxx_free_wq);
target_unregister_template(&tcm_qla2xxx_ops);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index cec9a14982e6..8578e566ab41 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1385,6 +1385,9 @@ fail_unmap_queues:
qpti->req_cpu, qpti->req_dvma);
#undef QSIZE
+fail_free_irq:
+ free_irq(qpti->irq, qpti);
+
fail_unmap_regs:
of_iounmap(&op->resource[0], qpti->qregs,
resource_size(&op->resource[0]));
@@ -1392,9 +1395,6 @@ fail_unmap_regs:
of_iounmap(&op->resource[0], qpti->sreg,
sizeof(unsigned char));
-fail_free_irq:
- free_irq(qpti->irq, qpti);
-
fail_unlink:
scsi_host_put(host);
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index b784002ef0bd..c5a8756384bc 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -4,7 +4,7 @@
#include <scsi/scsi_dbg.h>
#include "scsi_debugfs.h"
-#define SCSI_CMD_FLAG_NAME(name) [ilog2(SCMD_##name)] = #name
+#define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name
static const char *const scsi_cmd_flags[] = {
SCSI_CMD_FLAG_NAME(TAGGED),
SCSI_CMD_FLAG_NAME(UNCHECKED_ISA_DMA),
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index dd107dc4db0e..c4cbfd07b916 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -161,15 +161,16 @@ static struct {
{"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, storage on LUN 0 */
{"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
+ {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN |
+ BLIST_REPORTLUN2 | BLIST_RETRY_ITF},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
{"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
{"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
{"easyRAID", "F8", NULL, BLIST_NOREPORTLUN},
{"FSC", "CentricStor", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"FUJITSU", "ETERNUS_DXM", "*", BLIST_RETRY_ASC_C1},
{"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
- {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36},
- {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36},
+ {"Generic", "USB Storage-SMC", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, /* FW: 0180 and 0207 */
{"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
{"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
@@ -361,8 +362,22 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
scsi_strcpy_devinfo("model", devinfo->model, sizeof(devinfo->model),
model, compatible);
- if (strflags)
- flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0);
+ if (strflags) {
+ unsigned long long val;
+ int ret = kstrtoull(strflags, 0, &val);
+
+ if (ret != 0) {
+ kfree(devinfo);
+ return ret;
+ }
+ flags = (__force blist_flags_t)val;
+ }
+ if (flags & __BLIST_UNUSED_MASK) {
+ pr_err("scsi_devinfo (%s:%s): unsupported flags 0x%llx",
+ vendor, model, flags & __BLIST_UNUSED_MASK);
+ kfree(devinfo);
+ return -EINVAL;
+ }
devinfo->flags = flags;
devinfo->compatible = compatible;
@@ -615,7 +630,7 @@ static int devinfo_seq_show(struct seq_file *m, void *v)
devinfo_table->name)
seq_printf(m, "[%s]:\n", devinfo_table->name);
- seq_printf(m, "'%.8s' '%.16s' 0x%x\n",
+ seq_printf(m, "'%.8s' '%.16s' 0x%llx\n",
devinfo->vendor, devinfo->model, devinfo->flags);
return 0;
}
@@ -734,9 +749,9 @@ MODULE_PARM_DESC(dev_flags,
" list entries for vendor and model with an integer value of flags"
" to the scsi device info list");
-module_param_named(default_dev_flags, scsi_default_dev_flags, int, S_IRUGO|S_IWUSR);
+module_param_named(default_dev_flags, scsi_default_dev_flags, ullong, 0644);
MODULE_PARM_DESC(default_dev_flags,
- "scsi default device flag integer value");
+ "scsi default device flag uint64_t value");
/**
* scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 188f30572aa1..5a58cbf3a75d 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -58,7 +58,10 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"IBM", "3526", "rdac", },
{"IBM", "3542", "rdac", },
{"IBM", "3552", "rdac", },
- {"SGI", "TP9", "rdac", },
+ {"SGI", "TP9300", "rdac", },
+ {"SGI", "TP9400", "rdac", },
+ {"SGI", "TP9500", "rdac", },
+ {"SGI", "TP9700", "rdac", },
{"SGI", "IS", "rdac", },
{"STK", "OPENstorage", "rdac", },
{"STK", "FLEXLINE 380", "rdac", },
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 9c02ba2e7ef3..8932ae81a15a 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -38,6 +38,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_dh.h>
+#include <scsi/scsi_devinfo.h>
#include <scsi/sg.h>
#include "scsi_priv.h"
@@ -525,6 +526,12 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
if (sshdr.asc == 0x10) /* DIF */
return SUCCESS;
+ if (sshdr.asc == 0x44 && sdev->sdev_bflags & BLIST_RETRY_ITF)
+ return ADD_TO_MLQUEUE;
+ if (sshdr.asc == 0xc1 && sshdr.ascq == 0x01 &&
+ sdev->sdev_bflags & BLIST_RETRY_ASC_C1)
+ return ADD_TO_MLQUEUE;
+
return NEEDS_RETRY;
case NOT_READY:
case UNIT_ATTENTION:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fb38aeff9dbd..41e9ac9fc138 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -985,6 +985,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
case 0x08: /* Long write in progress */
case 0x09: /* self test in progress */
case 0x14: /* space allocation in progress */
+ case 0x1a: /* start stop unit in progress */
+ case 0x1b: /* sanitize in progress */
+ case 0x1d: /* configuration in progress */
+ case 0x24: /* depopulation in progress */
action = ACTION_DELAYED_RETRY;
break;
default:
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 1e36c9a9ad17..7943b762c12d 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -968,7 +968,7 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
#define BLIST_FLAG_NAME(name) \
- [ilog2((__force unsigned int)BLIST_##name)] = #name
+ [const_ilog2((__force __u64)BLIST_##name)] = #name
static const char *const sdev_bflags_name[] = {
#include "scsi_devinfo_tbl.c"
};
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index e2953b416746..0cd16e80b019 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -213,10 +213,6 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
to_sas_host_attrs(shost)->q = q;
}
- /*
- * by default assume old behaviour and bounce for any highmem page
- */
- blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
return 0;
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 0d663b5e45bb..392c7d078ae3 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -74,12 +74,12 @@ struct scsi_disk {
struct gendisk *disk;
struct opal_dev *opal_dev;
#ifdef CONFIG_BLK_DEV_ZONED
- unsigned int nr_zones;
- unsigned int zone_blocks;
- unsigned int zone_shift;
- unsigned int zones_optimal_open;
- unsigned int zones_optimal_nonseq;
- unsigned int zones_max_open;
+ u32 nr_zones;
+ u32 zone_blocks;
+ u32 zone_shift;
+ u32 zones_optimal_open;
+ u32 zones_optimal_nonseq;
+ u32 zones_max_open;
#endif
atomic_t openers;
sector_t capacity; /* size in logical blocks */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 210407cd2341..323e3dc4bc59 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -299,16 +299,6 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
case REQ_OP_WRITE:
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
-
- if (result &&
- sshdr->sense_key == ILLEGAL_REQUEST &&
- sshdr->asc == 0x21)
- /*
- * INVALID ADDRESS FOR WRITE error: It is unlikely that
- * retrying write requests failed with any kind of
- * alignement error will result in success. So don't.
- */
- cmd->allowed = 0;
break;
case REQ_OP_ZONE_REPORT:
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 6fc58e2c99d3..573763908562 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1191,7 +1191,7 @@ sg_fasync(int fd, struct file *filp, int mode)
return fasync_helper(fd, filp, mode, &sfp->async_qp);
}
-static int
+static vm_fault_t
sg_vma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index d8a376b7882d..d9b2e46424aa 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -47,10 +47,10 @@ static const char * const snic_req_state_str[] = {
[SNIC_IOREQ_NOT_INITED] = "SNIC_IOREQ_NOT_INITED",
[SNIC_IOREQ_PENDING] = "SNIC_IOREQ_PENDING",
[SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING",
- [SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPELTE",
+ [SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPLETE",
[SNIC_IOREQ_LR_PENDING] = "SNIC_IOREQ_LR_PENDING",
- [SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPELTE",
- [SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPELTE",
+ [SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPLETE",
+ [SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPLETE",
};
/* snic cmd status strings */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a427ce9497be..c9e27e752c25 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3878,7 +3878,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
{
struct st_buffer *tb;
- tb = kzalloc(sizeof(struct st_buffer), GFP_ATOMIC);
+ tb = kzalloc(sizeof(struct st_buffer), GFP_KERNEL);
if (!tb) {
printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n");
return NULL;
@@ -3889,7 +3889,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
tb->buffer_size = 0;
tb->reserved_pages = kzalloc(max_sg * sizeof(struct page *),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!tb->reserved_pages) {
kfree(tb);
return NULL;
@@ -4290,7 +4290,7 @@ static int st_probe(struct device *dev)
goto out_buffer_free;
}
- tpnt = kzalloc(sizeof(struct scsi_tape), GFP_ATOMIC);
+ tpnt = kzalloc(sizeof(struct scsi_tape), GFP_KERNEL);
if (tpnt == NULL) {
sdev_printk(KERN_ERR, SDp,
"st: Can't allocate device descriptor.\n");
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index a2ec0bc9e9fa..33a4a4dad324 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -395,6 +395,12 @@ MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
+
+static int ring_avail_percent_lowater = 10;
+module_param(ring_avail_percent_lowater, int, S_IRUGO);
+MODULE_PARM_DESC(ring_avail_percent_lowater,
+ "Select a channel if available ring size > this in percent");
+
/*
* Timeout in seconds for all devices managed by this driver.
*/
@@ -1241,7 +1247,7 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
{
u16 slot = 0;
u16 hash_qnum;
- struct cpumask alloced_mask;
+ const struct cpumask *node_mask;
int num_channels, tgt_cpu;
if (stor_device->num_sc == 0)
@@ -1257,10 +1263,13 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
* III. Mapping is persistent.
*/
- cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
- cpumask_of_node(cpu_to_node(q_num)));
+ node_mask = cpumask_of_node(cpu_to_node(q_num));
- num_channels = cpumask_weight(&alloced_mask);
+ num_channels = 0;
+ for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
+ if (cpumask_test_cpu(tgt_cpu, node_mask))
+ num_channels++;
+ }
if (num_channels == 0)
return stor_device->device->channel;
@@ -1268,7 +1277,9 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
while (hash_qnum >= num_channels)
hash_qnum -= num_channels;
- for_each_cpu(tgt_cpu, &alloced_mask) {
+ for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
+ if (!cpumask_test_cpu(tgt_cpu, node_mask))
+ continue;
if (slot == hash_qnum)
break;
slot++;
@@ -1285,9 +1296,9 @@ static int storvsc_do_io(struct hv_device *device,
{
struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
- struct vmbus_channel *outgoing_channel;
+ struct vmbus_channel *outgoing_channel, *channel;
int ret = 0;
- struct cpumask alloced_mask;
+ const struct cpumask *node_mask;
int tgt_cpu;
vstor_packet = &request->vstor_packet;
@@ -1301,22 +1312,52 @@ static int storvsc_do_io(struct hv_device *device,
/*
* Select an an appropriate channel to send the request out.
*/
-
if (stor_device->stor_chns[q_num] != NULL) {
outgoing_channel = stor_device->stor_chns[q_num];
- if (outgoing_channel->target_cpu == smp_processor_id()) {
+ if (outgoing_channel->target_cpu == q_num) {
/*
* Ideally, we want to pick a different channel if
* available on the same NUMA node.
*/
- cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
- cpumask_of_node(cpu_to_node(q_num)));
- for_each_cpu_wrap(tgt_cpu, &alloced_mask,
- outgoing_channel->target_cpu + 1) {
- if (tgt_cpu != outgoing_channel->target_cpu) {
- outgoing_channel =
- stor_device->stor_chns[tgt_cpu];
- break;
+ node_mask = cpumask_of_node(cpu_to_node(q_num));
+ for_each_cpu_wrap(tgt_cpu,
+ &stor_device->alloced_cpus, q_num + 1) {
+ if (!cpumask_test_cpu(tgt_cpu, node_mask))
+ continue;
+ if (tgt_cpu == q_num)
+ continue;
+ channel = stor_device->stor_chns[tgt_cpu];
+ if (hv_get_avail_to_write_percent(
+ &channel->outbound)
+ > ring_avail_percent_lowater) {
+ outgoing_channel = channel;
+ goto found_channel;
+ }
+ }
+
+ /*
+ * All the other channels on the same NUMA node are
+ * busy. Try to use the channel on the current CPU
+ */
+ if (hv_get_avail_to_write_percent(
+ &outgoing_channel->outbound)
+ > ring_avail_percent_lowater)
+ goto found_channel;
+
+ /*
+ * If we reach here, all the channels on the current
+ * NUMA node are busy. Try to find a channel in
+ * other NUMA nodes
+ */
+ for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
+ if (cpumask_test_cpu(tgt_cpu, node_mask))
+ continue;
+ channel = stor_device->stor_chns[tgt_cpu];
+ if (hv_get_avail_to_write_percent(
+ &channel->outbound)
+ > ring_avail_percent_lowater) {
+ outgoing_channel = channel;
+ goto found_channel;
}
}
}
@@ -1324,7 +1365,7 @@ static int storvsc_do_io(struct hv_device *device,
outgoing_channel = get_og_chn(stor_device, q_num);
}
-
+found_channel:
vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
@@ -1382,9 +1423,6 @@ static int storvsc_device_alloc(struct scsi_device *sdevice)
static int storvsc_device_configure(struct scsi_device *sdevice)
{
-
- blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
-
blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
/* Ensure there are no gaps in presented sgls */
@@ -1732,8 +1770,9 @@ static int storvsc_probe(struct hv_device *device,
(num_cpus - 1) / storvsc_vcpus_per_sub_channel;
}
- scsi_driver.can_queue = (max_outstanding_req_per_channel *
- (max_sub_channels + 1));
+ scsi_driver.can_queue = max_outstanding_req_per_channel *
+ (max_sub_channels + 1) *
+ (100 - ring_avail_percent_lowater) / 100;
host = scsi_host_alloc(&scsi_driver,
sizeof(struct hv_host_device));
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 2b38db2eeafa..221820a7c78b 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1098,7 +1098,7 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
}
- if (host->hw_ver.major >= 0x2) {
+ if (host->hw_ver.major == 0x2) {
hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
if (!ufs_qcom_cap_qunipro(host))
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index d0a1674915a1..3a811c5f70ba 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -233,8 +233,6 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *desired_pwr_mode);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
@@ -266,6 +264,18 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
+static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
+{
+ if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
+ scsi_unblock_requests(hba->host);
+}
+
+static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
+{
+ if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
+ scsi_block_requests(hba->host);
+}
+
/* replace non-printable or non-ASCII characters with spaces */
static inline void ufshcd_remove_non_printable(char *val)
{
@@ -675,7 +685,24 @@ static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
*/
static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
{
- ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
+ ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+ else
+ ufshcd_writel(hba, ~(1 << pos),
+ REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+}
+
+/**
+ * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
+{
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
+ ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
+ else
+ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
}
/**
@@ -1091,12 +1118,12 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
- scsi_block_requests(hba->host);
+ ufshcd_scsi_block_requests(hba);
down_write(&hba->clk_scaling_lock);
if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
}
return ret;
@@ -1105,7 +1132,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
{
up_write(&hba->clk_scaling_lock);
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
}
/**
@@ -1200,16 +1227,13 @@ static int ufshcd_devfreq_target(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start;
bool scale_up, sched_clk_scaling_suspend_work = false;
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
unsigned long irq_flags;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
- if ((*freq > 0) && (*freq < UINT_MAX)) {
- dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
- return -EINVAL;
- }
-
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (ufshcd_eh_in_progress(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
@@ -1219,7 +1243,13 @@ static int ufshcd_devfreq_target(struct device *dev,
if (!hba->clk_scaling.active_reqs)
sched_clk_scaling_suspend_work = true;
- scale_up = (*freq == UINT_MAX) ? true : false;
+ if (list_empty(clk_list)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ goto out;
+ }
+
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ scale_up = (*freq == clki->max_freq) ? true : false;
if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
ret = 0;
@@ -1287,6 +1317,55 @@ static struct devfreq_dev_profile ufs_devfreq_profile = {
.get_dev_status = ufshcd_devfreq_get_dev_status,
};
+static int ufshcd_devfreq_init(struct ufs_hba *hba)
+{
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ struct devfreq *devfreq;
+ int ret;
+
+ /* Skip devfreq if we don't have any clocks in the list */
+ if (list_empty(clk_list))
+ return 0;
+
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_add(hba->dev, clki->min_freq, 0);
+ dev_pm_opp_add(hba->dev, clki->max_freq, 0);
+
+ devfreq = devfreq_add_device(hba->dev,
+ &ufs_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ NULL);
+ if (IS_ERR(devfreq)) {
+ ret = PTR_ERR(devfreq);
+ dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
+
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+ return ret;
+ }
+
+ hba->devfreq = devfreq;
+
+ return 0;
+}
+
+static void ufshcd_devfreq_remove(struct ufs_hba *hba)
+{
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+
+ if (!hba->devfreq)
+ return;
+
+ devfreq_remove_device(hba->devfreq);
+ hba->devfreq = NULL;
+
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+}
+
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
unsigned long flags;
@@ -1425,7 +1504,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
hba->clk_gating.is_suspended = false;
}
unblock_reqs:
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
}
/**
@@ -1481,11 +1560,12 @@ start:
* work and to enable clocks.
*/
case CLKS_OFF:
- scsi_block_requests(hba->host);
+ ufshcd_scsi_block_requests(hba);
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- schedule_work(&hba->clk_gating.ungate_work);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
@@ -1671,6 +1751,8 @@ out:
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
+ char wq_name[sizeof("ufs_clk_gating_00")];
+
if (!ufshcd_is_clkgating_allowed(hba))
return;
@@ -1678,6 +1760,11 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
+ hba->host->host_no);
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
+ WQ_MEM_RECLAIM);
+
hba->clk_gating.is_enabled = true;
hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
@@ -1705,6 +1792,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
cancel_work_sync(&hba->clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
/* Must be called with host lock acquired */
@@ -3383,6 +3471,52 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
"dme-link-startup: error code %d\n", ret);
return ret;
}
+/**
+ * ufshcd_dme_reset - UIC command for DME_RESET
+ * @hba: per adapter instance
+ *
+ * DME_RESET command is issued in order to reset UniPro stack.
+ * This function now deal with cold reset.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_reset(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_RESET;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-reset: error code %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * ufshcd_dme_enable - UIC command for DME_ENABLE
+ * @hba: per adapter instance
+ *
+ * DME_ENABLE command is issued in order to enable UniPro stack.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_enable(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_ENABLE;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-reset: error code %d\n", ret);
+
+ return ret;
+}
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
{
@@ -3906,7 +4040,7 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
* @hba: per-adapter instance
* @desired_pwr_mode: desired power configuration
*/
-static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode)
{
struct ufs_pa_layer_attr final_params = { 0 };
@@ -3924,6 +4058,7 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
/**
* ufshcd_complete_dev_init() - checks device readiness
@@ -4041,7 +4176,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
}
/**
- * ufshcd_hba_enable - initialize the controller
+ * ufshcd_hba_execute_hce - initialize the controller
* @hba: per adapter instance
*
* The controller resets itself and controller firmware initialization
@@ -4050,7 +4185,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_hba_enable(struct ufs_hba *hba)
+static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
{
int retry;
@@ -4105,6 +4240,31 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
return 0;
}
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
+ ufshcd_set_link_off(hba);
+ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
+
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+ ret = ufshcd_dme_reset(hba);
+ if (!ret) {
+ ret = ufshcd_dme_enable(hba);
+ if (!ret)
+ ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
+ if (ret)
+ dev_err(hba->dev,
+ "Host controller enable failed with non-hce\n");
+ }
+ } else {
+ ret = ufshcd_hba_execute_hce(hba);
+ }
+
+ return ret;
+}
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
{
int tx_lanes, i, err = 0;
@@ -4678,7 +4838,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
* false interrupt if device completes another request after resetting
* aggregation and before reading the DB.
*/
- if (ufshcd_is_intr_aggr_allowed(hba))
+ if (ufshcd_is_intr_aggr_allowed(hba) &&
+ !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -4969,6 +5130,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eeh_work);
pm_runtime_get_sync(hba->dev);
+ scsi_block_requests(hba->host);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -4982,6 +5144,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufshcd_bkops_exception_event_handler(hba);
out:
+ scsi_unblock_requests(hba->host);
pm_runtime_put_sync(hba->dev);
return;
}
@@ -5192,7 +5355,7 @@ skip_err_handling:
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
}
@@ -5294,7 +5457,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
/* handle fatal errors only when link is functional */
if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
/* block commands from scsi mid-layer */
- scsi_block_requests(hba->host);
+ ufshcd_scsi_block_requests(hba);
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
@@ -5371,19 +5534,30 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
u32 intr_status, enabled_intr_status;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- enabled_intr_status =
- intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status)
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ /*
+ * There could be max of hba->nutrs reqs in flight and in worst case
+ * if the reqs get finished 1 by 1 after the interrupt status is
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+ do {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ if (enabled_intr_status) {
+ ufshcd_sl_intr(hba, enabled_intr_status);
+ retval = IRQ_HANDLED;
+ }
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ } while (intr_status && --retries);
- if (enabled_intr_status) {
- ufshcd_sl_intr(hba, enabled_intr_status);
- retval = IRQ_HANDLED;
- }
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -5398,7 +5572,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
+ ufshcd_utmrl_clear(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* poll for max. 1 sec to clear door bell register by h/w */
@@ -5958,14 +6132,18 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
int buff_len = hba->desc_size.pwr_desc;
- u8 desc_buf[hba->desc_size.pwr_desc];
+ u8 *desc_buf;
+
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return;
ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
if (ret) {
dev_err(hba->dev,
"%s: Failed reading power descriptor.len = %d ret = %d",
__func__, buff_len, ret);
- return;
+ goto out;
}
hba->init_prefetch_data.icc_level =
@@ -5983,6 +6161,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
"%s: Failed configuring bActiveICCLevel = %d ret = %d",
__func__, hba->init_prefetch_data.icc_level , ret);
+out:
+ kfree(desc_buf);
}
/**
@@ -6052,9 +6232,17 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
struct ufs_dev_desc *dev_desc)
{
int err;
+ size_t buff_len;
u8 model_index;
- u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
- u8 desc_buf[hba->desc_size.dev_desc];
+ u8 *desc_buf;
+
+ buff_len = max_t(size_t, hba->desc_size.dev_desc,
+ QUERY_DESC_MAX_SIZE + 1);
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
if (err) {
@@ -6072,7 +6260,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
- err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
+ /* Zero-pad entire buffer for string termination. */
+ memset(desc_buf, 0, buff_len);
+
+ err = ufshcd_read_string_desc(hba, model_index, desc_buf,
QUERY_DESC_MAX_SIZE, true/*ASCII*/);
if (err) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
@@ -6080,15 +6271,16 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
goto out;
}
- str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
- strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
- min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
+ desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
+ strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
+ min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
MAX_MODEL_LEN));
/* Null terminate the model string */
dev_desc->model[MAX_MODEL_LEN] = '\0';
out:
+ kfree(desc_buf);
return err;
}
@@ -6439,16 +6631,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
sizeof(struct ufs_pa_layer_attr));
hba->clk_scaling.saved_pwr_info.is_valid = true;
if (!hba->devfreq) {
- hba->devfreq = devm_devfreq_add_device(hba->dev,
- &ufs_devfreq_profile,
- "simple_ondemand",
- NULL);
- if (IS_ERR(hba->devfreq)) {
- ret = PTR_ERR(hba->devfreq);
- dev_err(hba->dev, "Unable to register with devfreq %d\n",
- ret);
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
goto out;
- }
}
hba->clk_scaling.is_allowed = true;
}
@@ -6799,9 +6984,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (list_empty(head))
goto out;
- ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
- if (ret)
- return ret;
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * before disabling the clocks managed here.
+ */
+ if (!on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
+ }
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
@@ -6825,9 +7017,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
}
}
- ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
- if (ret)
- return ret;
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * after enabling the clocks managed here.
+ */
+ if (on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
+ }
out:
if (ret) {
@@ -6992,6 +7191,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->devfreq)
ufshcd_suspend_clkscaling(hba);
destroy_workqueue(hba->clk_scaling.workq);
+ ufshcd_devfreq_remove(hba);
}
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
@@ -7904,7 +8104,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
-
+ atomic_set(&hba->scsi_block_reqs_cnt, 0);
/*
* We are assuming that device wasn't put in sleep/power-down
* state exclusively during the boot stage before kernel.
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 8110dcd04d22..f51758f1e5cc 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -362,6 +362,7 @@ struct ufs_clk_gating {
struct device_attribute enable_attr;
bool is_enabled;
int active_reqs;
+ struct workqueue_struct *clk_gating_workq;
};
struct ufs_saved_pwr_info {
@@ -499,6 +500,7 @@ struct ufs_stats {
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
+ * @scsi_block_reqs_cnt: reference counting for scsi block requests
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -595,6 +597,22 @@ struct ufs_hba {
*/
#define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
+ /*
+ * Clear handling for transfer/task request list is just opposite.
+ */
+ #define UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR 0x100
+
+ /*
+ * This quirk needs to be enabled if host controller doesn't allow
+ * that the interrupt aggregation timer and counter are reset by s/w.
+ */
+ #define UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR 0x200
+
+ /*
+ * This quirks needs to be enabled if host controller cannot be
+ * enabled via HCE register.
+ */
+ #define UFSHCI_QUIRK_BROKEN_HCE 0x400
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
@@ -683,6 +701,7 @@ struct ufs_hba {
struct rw_semaphore clk_scaling_lock;
struct ufs_desc_size desc_size;
+ atomic_t scsi_block_reqs_cnt;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -789,6 +808,8 @@ extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer);
extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer);
+extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode);
/* UIC command interfaces for DME primitives */
#define DME_LOCAL 0
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 2ba2b7b47f41..974bfb3f30f4 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -978,18 +978,7 @@ static struct pci_driver wd719x_pci_driver = {
.remove = wd719x_pci_remove,
};
-static int __init wd719x_init(void)
-{
- return pci_register_driver(&wd719x_pci_driver);
-}
-
-static void __exit wd719x_exit(void)
-{
- pci_unregister_driver(&wd719x_pci_driver);
-}
-
-module_init(wd719x_init);
-module_exit(wd719x_exit);
+module_pci_driver(wd719x_pci_driver);
MODULE_DESCRIPTION("Western Digital WD7193/7197/7296 SCSI driver");
MODULE_AUTHOR("Ondrej Zary, Aaron Dewell, Juergen Gaertner");
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
new file mode 100644
index 000000000000..bb70882e6b56
--- /dev/null
+++ b/drivers/scsi/zorro_esp.c
@@ -0,0 +1,1172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ESP front-end for Amiga ZORRO SCSI systems.
+ *
+ * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
+ *
+ * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for
+ * migration to ESP SCSI core
+ *
+ * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
+ * Blizzard 1230 DMA and probe function fixes
+ *
+ * Copyright (C) 2017 Finn Thain for PIO code from Mac ESP driver adapted here
+ */
+/*
+ * ZORRO bus code from:
+ */
+/*
+ * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
+ * Amiga MacroSystemUS WarpEngine SCSI controller.
+ * Amiga Technologies/DKB A4091 SCSI controller.
+ *
+ * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
+ * plus modifications of the 53c7xx.c driver to support the Amiga.
+ *
+ * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/zorro.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+
+#include "esp_scsi.h"
+
+MODULE_AUTHOR("Michael Schmitz <schmitz@debian.org>");
+MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver");
+MODULE_LICENSE("GPL");
+
+/* per-board register layout definitions */
+
+/* Blizzard 1230 DMA interface */
+
+struct blz1230_dma_registers {
+ unsigned char dma_addr; /* DMA address [0x0000] */
+ unsigned char dmapad2[0x7fff];
+ unsigned char dma_latch; /* DMA latch [0x8000] */
+};
+
+/* Blizzard 1230II DMA interface */
+
+struct blz1230II_dma_registers {
+ unsigned char dma_addr; /* DMA address [0x0000] */
+ unsigned char dmapad2[0xf];
+ unsigned char dma_latch; /* DMA latch [0x0010] */
+};
+
+/* Blizzard 2060 DMA interface */
+
+struct blz2060_dma_registers {
+ unsigned char dma_led_ctrl; /* DMA led control [0x000] */
+ unsigned char dmapad1[0x0f];
+ unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
+ unsigned char dmapad2[0x03];
+ unsigned char dma_addr1; /* DMA address [0x014] */
+ unsigned char dmapad3[0x03];
+ unsigned char dma_addr2; /* DMA address [0x018] */
+ unsigned char dmapad4[0x03];
+ unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
+};
+
+/* DMA control bits */
+#define DMA_WRITE 0x80000000
+
+/* Cyberstorm DMA interface */
+
+struct cyber_dma_registers {
+ unsigned char dma_addr0; /* DMA address (MSB) [0x000] */
+ unsigned char dmapad1[1];
+ unsigned char dma_addr1; /* DMA address [0x002] */
+ unsigned char dmapad2[1];
+ unsigned char dma_addr2; /* DMA address [0x004] */
+ unsigned char dmapad3[1];
+ unsigned char dma_addr3; /* DMA address (LSB) [0x006] */
+ unsigned char dmapad4[0x3fb];
+ unsigned char cond_reg; /* DMA cond (ro) [0x402] */
+#define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
+};
+
+/* DMA control bits */
+#define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
+#define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
+
+/* DMA status bits */
+#define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
+
+/* The CyberStorm II DMA interface */
+struct cyberII_dma_registers {
+ unsigned char cond_reg; /* DMA cond (ro) [0x000] */
+#define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
+ unsigned char dmapad4[0x3f];
+ unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
+ unsigned char dmapad1[3];
+ unsigned char dma_addr1; /* DMA address [0x044] */
+ unsigned char dmapad2[3];
+ unsigned char dma_addr2; /* DMA address [0x048] */
+ unsigned char dmapad3[3];
+ unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
+};
+
+/* Fastlane DMA interface */
+
+struct fastlane_dma_registers {
+ unsigned char cond_reg; /* DMA status (ro) [0x0000] */
+#define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
+ char dmapad1[0x3f];
+ unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
+};
+
+/*
+ * The controller registers can be found in the Z2 config area at these
+ * offsets:
+ */
+#define FASTLANE_ESP_ADDR 0x1000001
+
+/* DMA status bits */
+#define FASTLANE_DMA_MINT 0x80
+#define FASTLANE_DMA_IACT 0x40
+#define FASTLANE_DMA_CREQ 0x20
+
+/* DMA control bits */
+#define FASTLANE_DMA_FCODE 0xa0
+#define FASTLANE_DMA_MASK 0xf3
+#define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
+#define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
+#define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
+#define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
+
+/*
+ * private data used for driver
+ */
+struct zorro_esp_priv {
+ struct esp *esp; /* our ESP instance - for Scsi_host* */
+ void __iomem *board_base; /* virtual address (Zorro III board) */
+ int error; /* PIO error flag */
+ int zorro3; /* board is Zorro III */
+ unsigned char ctrl_data; /* shadow copy of ctrl_reg */
+};
+
+/*
+ * On all implementations except for the Oktagon, padding between ESP
+ * registers is three bytes.
+ * On Oktagon, it is one byte - use a different accessor there.
+ *
+ * Oktagon needs PDMA - currently unsupported!
+ */
+
+static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+ writeb(val, esp->regs + (reg * 4UL));
+}
+
+static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
+{
+ return readb(esp->regs + (reg * 4UL));
+}
+
+static dma_addr_t zorro_esp_map_single(struct esp *esp, void *buf,
+ size_t sz, int dir)
+{
+ return dma_map_single(esp->dev, buf, sz, dir);
+}
+
+static int zorro_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ return dma_map_sg(esp->dev, sg, num_sg, dir);
+}
+
+static void zorro_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+ size_t sz, int dir)
+{
+ dma_unmap_single(esp->dev, addr, sz, dir);
+}
+
+static void zorro_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ dma_unmap_sg(esp->dev, sg, num_sg, dir);
+}
+
+static int zorro_esp_irq_pending(struct esp *esp)
+{
+ /* check ESP status register; DMA has no status reg. */
+ if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+
+ return 0;
+}
+
+static int cyber_esp_irq_pending(struct esp *esp)
+{
+ struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
+ unsigned char dma_status = readb(&dregs->cond_reg);
+
+ /* It's important to check the DMA IRQ bit in the correct way! */
+ return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) &&
+ (dma_status & CYBER_DMA_HNDL_INTR));
+}
+
+static int fastlane_esp_irq_pending(struct esp *esp)
+{
+ struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
+ unsigned char dma_status;
+
+ dma_status = readb(&dregs->cond_reg);
+
+ if (dma_status & FASTLANE_DMA_IACT)
+ return 0; /* not our IRQ */
+
+ /* Return non-zero if ESP requested IRQ */
+ return (
+ (dma_status & FASTLANE_DMA_CREQ) &&
+ (!(dma_status & FASTLANE_DMA_MINT)) &&
+ (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR));
+}
+
+static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
+ u32 dma_len)
+{
+ return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len;
+}
+
+static void zorro_esp_reset_dma(struct esp *esp)
+{
+ /* nothing to do here */
+}
+
+static void zorro_esp_dma_drain(struct esp *esp)
+{
+ /* nothing to do here */
+}
+
+static void zorro_esp_dma_invalidate(struct esp *esp)
+{
+ /* nothing to do here */
+}
+
+static void fastlane_esp_dma_invalidate(struct esp *esp)
+{
+ struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+ struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
+ unsigned char *ctrl_data = &zep->ctrl_data;
+
+ *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK);
+ writeb(0, &dregs->clear_strobe);
+ z_writel(0, zep->board_base);
+}
+
+/*
+ * Programmed IO routines follow.
+ */
+
+static inline unsigned int zorro_esp_wait_for_fifo(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ unsigned int fbytes = zorro_esp_read8(esp, ESP_FFLAGS)
+ & ESP_FF_FBYTES;
+
+ if (fbytes)
+ return fbytes;
+
+ udelay(2);
+ } while (--i);
+
+ pr_err("FIFO is empty (sreg %02x)\n",
+ zorro_esp_read8(esp, ESP_STATUS));
+ return 0;
+}
+
+static inline int zorro_esp_wait_for_intr(struct esp *esp)
+{
+ struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+ int i = 500000;
+
+ do {
+ esp->sreg = zorro_esp_read8(esp, ESP_STATUS);
+ if (esp->sreg & ESP_STAT_INTR)
+ return 0;
+
+ udelay(2);
+ } while (--i);
+
+ pr_err("IRQ timeout (sreg %02x)\n", esp->sreg);
+ zep->error = 1;
+ return 1;
+}
+
+/*
+ * PIO macros as used in mac_esp.c.
+ * Note that addr and fifo arguments are local-scope variables declared
+ * in zorro_esp_send_pio_cmd(), the macros are only used in that function,
+ * and addr and fifo are referenced in each use of the macros so there
+ * is no need to pass them as macro parameters.
+ */
+#define ZORRO_ESP_PIO_LOOP(operands, reg1) \
+ asm volatile ( \
+ "1: moveb " operands "\n" \
+ " subqw #1,%1 \n" \
+ " jbne 1b \n" \
+ : "+a" (addr), "+r" (reg1) \
+ : "a" (fifo));
+
+#define ZORRO_ESP_PIO_FILL(operands, reg1) \
+ asm volatile ( \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " moveb " operands "\n" \
+ " subqw #8,%1 \n" \
+ " subqw #8,%1 \n" \
+ : "+a" (addr), "+r" (reg1) \
+ : "a" (fifo));
+
+#define ZORRO_ESP_FIFO_SIZE 16
+
+static void zorro_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+ u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
+ u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+ cmd &= ~ESP_CMD_DMA;
+
+ if (write) {
+ u8 *dst = (u8 *)addr;
+ u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
+
+ scsi_esp_cmd(esp, cmd);
+
+ while (1) {
+ if (!zorro_esp_wait_for_fifo(esp))
+ break;
+
+ *dst++ = zorro_esp_read8(esp, ESP_FDATA);
+ --esp_count;
+
+ if (!esp_count)
+ break;
+
+ if (zorro_esp_wait_for_intr(esp))
+ break;
+
+ if ((esp->sreg & ESP_STAT_PMASK) != phase)
+ break;
+
+ esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
+ if (esp->ireg & mask) {
+ zep->error = 1;
+ break;
+ }
+
+ if (phase == ESP_MIP)
+ scsi_esp_cmd(esp, ESP_CMD_MOK);
+
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ }
+ } else { /* unused, as long as we only handle MIP here */
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ if (esp_count >= ZORRO_ESP_FIFO_SIZE)
+ ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
+ else
+ ZORRO_ESP_PIO_LOOP("%0@+,%2@", esp_count)
+
+ scsi_esp_cmd(esp, cmd);
+
+ while (esp_count) {
+ unsigned int n;
+
+ if (zorro_esp_wait_for_intr(esp))
+ break;
+
+ if ((esp->sreg & ESP_STAT_PMASK) != phase)
+ break;
+
+ esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
+ if (esp->ireg & ~ESP_INTR_BSERV) {
+ zep->error = 1;
+ break;
+ }
+
+ n = ZORRO_ESP_FIFO_SIZE -
+ (zorro_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES);
+ if (n > esp_count)
+ n = esp_count;
+
+ if (n == ZORRO_ESP_FIFO_SIZE)
+ ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
+ else {
+ esp_count -= n;
+ ZORRO_ESP_PIO_LOOP("%0@+,%2@", n)
+ }
+
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ }
+ }
+}
+
+/* Blizzard 1230/60 SCSI-IV DMA */
+
+static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
+ u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+ struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+ struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
+ u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+ zep->error = 0;
+ /*
+ * Use PIO if transferring message bytes to esp->command_block_dma.
+ * PIO requires a virtual address, so substitute esp->command_block
+ * for addr.
+ */
+ if (phase == ESP_MIP && addr == esp->command_block_dma) {
+ zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+ esp_count, dma_count, write, cmd);
+ return;
+ }
+
+ if (write)
+ /* DMA receive */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_FROM_DEVICE);
+ else
+ /* DMA send */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_TO_DEVICE);
+
+ addr >>= 1;
+ if (write)
+ addr &= ~(DMA_WRITE);
+ else
+ addr |= DMA_WRITE;
+
+ writeb((addr >> 24) & 0xff, &dregs->dma_latch);
+ writeb((addr >> 24) & 0xff, &dregs->dma_addr);
+ writeb((addr >> 16) & 0xff, &dregs->dma_addr);
+ writeb((addr >> 8) & 0xff, &dregs->dma_addr);
+ writeb(addr & 0xff, &dregs->dma_addr);
+
+ scsi_esp_cmd(esp, ESP_CMD_DMA);
+ zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+ scsi_esp_cmd(esp, cmd);
+}
+
+/* Blizzard 1230-II DMA */
+
+static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
+ u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+ struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+ struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
+ u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+ zep->error = 0;
+ /* Use PIO if transferring message bytes to esp->command_block_dma */
+ if (phase == ESP_MIP && addr == esp->command_block_dma) {
+ zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+ esp_count, dma_count, write, cmd);
+ return;
+ }
+
+ if (write)
+ /* DMA receive */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_FROM_DEVICE);
+ else
+ /* DMA send */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_TO_DEVICE);
+
+ addr >>= 1;
+ if (write)
+ addr &= ~(DMA_WRITE);
+ else
+ addr |= DMA_WRITE;
+
+ writeb((addr >> 24) & 0xff, &dregs->dma_latch);
+ writeb((addr >> 16) & 0xff, &dregs->dma_addr);
+ writeb((addr >> 8) & 0xff, &dregs->dma_addr);
+ writeb(addr & 0xff, &dregs->dma_addr);
+
+ scsi_esp_cmd(esp, ESP_CMD_DMA);
+ zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+ scsi_esp_cmd(esp, cmd);
+}
+
+/* Blizzard 2060 DMA */
+
+static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
+ u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+ struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+ struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
+ u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+ zep->error = 0;
+ /* Use PIO if transferring message bytes to esp->command_block_dma */
+ if (phase == ESP_MIP && addr == esp->command_block_dma) {
+ zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+ esp_count, dma_count, write, cmd);
+ return;
+ }
+
+ if (write)
+ /* DMA receive */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_FROM_DEVICE);
+ else
+ /* DMA send */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_TO_DEVICE);
+
+ addr >>= 1;
+ if (write)
+ addr &= ~(DMA_WRITE);
+ else
+ addr |= DMA_WRITE;
+
+ writeb(addr & 0xff, &dregs->dma_addr3);
+ writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
+ writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
+ writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
+
+ scsi_esp_cmd(esp, ESP_CMD_DMA);
+ zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+ scsi_esp_cmd(esp, cmd);
+}
+
+/* Cyberstorm I DMA */
+
+static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
+ u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+ struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+ struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
+ u8 phase = esp->sreg & ESP_STAT_PMASK;
+ unsigned char *ctrl_data = &zep->ctrl_data;
+
+ zep->error = 0;
+ /* Use PIO if transferring message bytes to esp->command_block_dma */
+ if (phase == ESP_MIP && addr == esp->command_block_dma) {
+ zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+ esp_count, dma_count, write, cmd);
+ return;
+ }
+
+ zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+ if (write) {
+ /* DMA receive */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_FROM_DEVICE);
+ addr &= ~(1);
+ } else {
+ /* DMA send */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_TO_DEVICE);
+ addr |= 1;
+ }
+
+ writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
+ writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
+ writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
+ writeb(addr & 0xff, &dregs->dma_addr3);
+
+ if (write)
+ *ctrl_data &= ~(CYBER_DMA_WRITE);
+ else
+ *ctrl_data |= CYBER_DMA_WRITE;
+
+ *ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
+
+ writeb(*ctrl_data, &dregs->ctrl_reg);
+
+ scsi_esp_cmd(esp, cmd);
+}
+
+/* Cyberstorm II DMA */
+
+static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
+ u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+ struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+ struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
+ u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+ zep->error = 0;
+ /* Use PIO if transferring message bytes to esp->command_block_dma */
+ if (phase == ESP_MIP && addr == esp->command_block_dma) {
+ zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+ esp_count, dma_count, write, cmd);
+ return;
+ }
+
+ zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+ if (write) {
+ /* DMA receive */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_FROM_DEVICE);
+ addr &= ~(1);
+ } else {
+ /* DMA send */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_TO_DEVICE);
+ addr |= 1;
+ }
+
+ writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
+ writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
+ writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
+ writeb(addr & 0xff, &dregs->dma_addr3);
+
+ scsi_esp_cmd(esp, cmd);
+}
+
+/* Fastlane DMA */
+
+static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
+ u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+ struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+ struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
+ u8 phase = esp->sreg & ESP_STAT_PMASK;
+ unsigned char *ctrl_data = &zep->ctrl_data;
+
+ zep->error = 0;
+ /* Use PIO if transferring message bytes to esp->command_block_dma */
+ if (phase == ESP_MIP && addr == esp->command_block_dma) {
+ zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+ esp_count, dma_count, write, cmd);
+ return;
+ }
+
+ zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+ if (write) {
+ /* DMA receive */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_FROM_DEVICE);
+ addr &= ~(1);
+ } else {
+ /* DMA send */
+ dma_sync_single_for_device(esp->dev, addr, esp_count,
+ DMA_TO_DEVICE);
+ addr |= 1;
+ }
+
+ writeb(0, &dregs->clear_strobe);
+ z_writel(addr, ((addr & 0x00ffffff) + zep->board_base));
+
+ if (write) {
+ *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) |
+ FASTLANE_DMA_ENABLE;
+ } else {
+ *ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) |
+ FASTLANE_DMA_ENABLE |
+ FASTLANE_DMA_WRITE);
+ }
+
+ writeb(*ctrl_data, &dregs->ctrl_reg);
+
+ scsi_esp_cmd(esp, cmd);
+}
+
+static int zorro_esp_dma_error(struct esp *esp)
+{
+ struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+
+ /* check for error in case we've been doing PIO */
+ if (zep->error == 1)
+ return 1;
+
+ /* do nothing - there seems to be no way to check for DMA errors */
+ return 0;
+}
+
+/* per-board ESP driver ops */
+
+static const struct esp_driver_ops blz1230_esp_ops = {
+ .esp_write8 = zorro_esp_write8,
+ .esp_read8 = zorro_esp_read8,
+ .map_single = zorro_esp_map_single,
+ .map_sg = zorro_esp_map_sg,
+ .unmap_single = zorro_esp_unmap_single,
+ .unmap_sg = zorro_esp_unmap_sg,
+ .irq_pending = zorro_esp_irq_pending,
+ .dma_length_limit = zorro_esp_dma_length_limit,
+ .reset_dma = zorro_esp_reset_dma,
+ .dma_drain = zorro_esp_dma_drain,
+ .dma_invalidate = zorro_esp_dma_invalidate,
+ .send_dma_cmd = zorro_esp_send_blz1230_dma_cmd,
+ .dma_error = zorro_esp_dma_error,
+};
+
+static const struct esp_driver_ops blz1230II_esp_ops = {
+ .esp_write8 = zorro_esp_write8,
+ .esp_read8 = zorro_esp_read8,
+ .map_single = zorro_esp_map_single,
+ .map_sg = zorro_esp_map_sg,
+ .unmap_single = zorro_esp_unmap_single,
+ .unmap_sg = zorro_esp_unmap_sg,
+ .irq_pending = zorro_esp_irq_pending,
+ .dma_length_limit = zorro_esp_dma_length_limit,
+ .reset_dma = zorro_esp_reset_dma,
+ .dma_drain = zorro_esp_dma_drain,
+ .dma_invalidate = zorro_esp_dma_invalidate,
+ .send_dma_cmd = zorro_esp_send_blz1230II_dma_cmd,
+ .dma_error = zorro_esp_dma_error,
+};
+
+static const struct esp_driver_ops blz2060_esp_ops = {
+ .esp_write8 = zorro_esp_write8,
+ .esp_read8 = zorro_esp_read8,
+ .map_single = zorro_esp_map_single,
+ .map_sg = zorro_esp_map_sg,
+ .unmap_single = zorro_esp_unmap_single,
+ .unmap_sg = zorro_esp_unmap_sg,
+ .irq_pending = zorro_esp_irq_pending,
+ .dma_length_limit = zorro_esp_dma_length_limit,
+ .reset_dma = zorro_esp_reset_dma,
+ .dma_drain = zorro_esp_dma_drain,
+ .dma_invalidate = zorro_esp_dma_invalidate,
+ .send_dma_cmd = zorro_esp_send_blz2060_dma_cmd,
+ .dma_error = zorro_esp_dma_error,
+};
+
+static const struct esp_driver_ops cyber_esp_ops = {
+ .esp_write8 = zorro_esp_write8,
+ .esp_read8 = zorro_esp_read8,
+ .map_single = zorro_esp_map_single,
+ .map_sg = zorro_esp_map_sg,
+ .unmap_single = zorro_esp_unmap_single,
+ .unmap_sg = zorro_esp_unmap_sg,
+ .irq_pending = cyber_esp_irq_pending,
+ .dma_length_limit = zorro_esp_dma_length_limit,
+ .reset_dma = zorro_esp_reset_dma,
+ .dma_drain = zorro_esp_dma_drain,
+ .dma_invalidate = zorro_esp_dma_invalidate,
+ .send_dma_cmd = zorro_esp_send_cyber_dma_cmd,
+ .dma_error = zorro_esp_dma_error,
+};
+
+static const struct esp_driver_ops cyberII_esp_ops = {
+ .esp_write8 = zorro_esp_write8,
+ .esp_read8 = zorro_esp_read8,
+ .map_single = zorro_esp_map_single,
+ .map_sg = zorro_esp_map_sg,
+ .unmap_single = zorro_esp_unmap_single,
+ .unmap_sg = zorro_esp_unmap_sg,
+ .irq_pending = zorro_esp_irq_pending,
+ .dma_length_limit = zorro_esp_dma_length_limit,
+ .reset_dma = zorro_esp_reset_dma,
+ .dma_drain = zorro_esp_dma_drain,
+ .dma_invalidate = zorro_esp_dma_invalidate,
+ .send_dma_cmd = zorro_esp_send_cyberII_dma_cmd,
+ .dma_error = zorro_esp_dma_error,
+};
+
+static const struct esp_driver_ops fastlane_esp_ops = {
+ .esp_write8 = zorro_esp_write8,
+ .esp_read8 = zorro_esp_read8,
+ .map_single = zorro_esp_map_single,
+ .map_sg = zorro_esp_map_sg,
+ .unmap_single = zorro_esp_unmap_single,
+ .unmap_sg = zorro_esp_unmap_sg,
+ .irq_pending = fastlane_esp_irq_pending,
+ .dma_length_limit = zorro_esp_dma_length_limit,
+ .reset_dma = zorro_esp_reset_dma,
+ .dma_drain = zorro_esp_dma_drain,
+ .dma_invalidate = fastlane_esp_dma_invalidate,
+ .send_dma_cmd = zorro_esp_send_fastlane_dma_cmd,
+ .dma_error = zorro_esp_dma_error,
+};
+
+/* Zorro driver config data */
+
+struct zorro_driver_data {
+ const char *name;
+ unsigned long offset;
+ unsigned long dma_offset;
+ int absolute; /* offset is absolute address */
+ int scsi_option;
+ const struct esp_driver_ops *esp_ops;
+};
+
+/* board types */
+
+enum {
+ ZORRO_BLZ1230,
+ ZORRO_BLZ1230II,
+ ZORRO_BLZ2060,
+ ZORRO_CYBER,
+ ZORRO_CYBERII,
+ ZORRO_FASTLANE,
+};
+
+/* per-board config data */
+
+static const struct zorro_driver_data zorro_esp_boards[] = {
+ [ZORRO_BLZ1230] = {
+ .name = "Blizzard 1230",
+ .offset = 0x8000,
+ .dma_offset = 0x10000,
+ .scsi_option = 1,
+ .esp_ops = &blz1230_esp_ops,
+ },
+ [ZORRO_BLZ1230II] = {
+ .name = "Blizzard 1230II",
+ .offset = 0x10000,
+ .dma_offset = 0x10021,
+ .scsi_option = 1,
+ .esp_ops = &blz1230II_esp_ops,
+ },
+ [ZORRO_BLZ2060] = {
+ .name = "Blizzard 2060",
+ .offset = 0x1ff00,
+ .dma_offset = 0x1ffe0,
+ .esp_ops = &blz2060_esp_ops,
+ },
+ [ZORRO_CYBER] = {
+ .name = "CyberStormI",
+ .offset = 0xf400,
+ .dma_offset = 0xf800,
+ .esp_ops = &cyber_esp_ops,
+ },
+ [ZORRO_CYBERII] = {
+ .name = "CyberStormII",
+ .offset = 0x1ff03,
+ .dma_offset = 0x1ff43,
+ .scsi_option = 1,
+ .esp_ops = &cyberII_esp_ops,
+ },
+ [ZORRO_FASTLANE] = {
+ .name = "Fastlane",
+ .offset = 0x1000001,
+ .dma_offset = 0x1000041,
+ .esp_ops = &fastlane_esp_ops,
+ },
+};
+
+static const struct zorro_device_id zorro_esp_zorro_tbl[] = {
+ { /* Blizzard 1230 IV */
+ .id = ZORRO_ID(PHASE5, 0x11, 0),
+ .driver_data = ZORRO_BLZ1230,
+ },
+ { /* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */
+ .id = ZORRO_ID(PHASE5, 0x0B, 0),
+ .driver_data = ZORRO_BLZ1230II,
+ },
+ { /* Blizzard 2060 */
+ .id = ZORRO_ID(PHASE5, 0x18, 0),
+ .driver_data = ZORRO_BLZ2060,
+ },
+ { /* Cyberstorm */
+ .id = ZORRO_ID(PHASE5, 0x0C, 0),
+ .driver_data = ZORRO_CYBER,
+ },
+ { /* Cyberstorm II */
+ .id = ZORRO_ID(PHASE5, 0x19, 0),
+ .driver_data = ZORRO_CYBERII,
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl);
+
+static int zorro_esp_probe(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct scsi_host_template *tpnt = &scsi_esp_template;
+ struct Scsi_Host *host;
+ struct esp *esp;
+ const struct zorro_driver_data *zdd;
+ struct zorro_esp_priv *zep;
+ unsigned long board, ioaddr, dmaaddr;
+ int err;
+
+ board = zorro_resource_start(z);
+ zdd = &zorro_esp_boards[ent->driver_data];
+
+ pr_info("%s found at address 0x%lx.\n", zdd->name, board);
+
+ zep = kzalloc(sizeof(*zep), GFP_KERNEL);
+ if (!zep) {
+ pr_err("Can't allocate device private data!\n");
+ return -ENOMEM;
+ }
+
+ /* let's figure out whether we have a Zorro II or Zorro III board */
+ if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) {
+ if (board > 0xffffff)
+ zep->zorro3 = 1;
+ } else {
+ /*
+ * Even though most of these boards identify as Zorro II,
+ * they are in fact CPU expansion slot boards and have full
+ * access to all of memory. Fix up DMA bitmask here.
+ */
+ z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ }
+
+ /*
+ * If Zorro III and ID matches Fastlane, our device table entry
+ * contains data for the Blizzard 1230 II board which does share the
+ * same ID. Fix up device table entry here.
+ * TODO: Some Cyberstom060 boards also share this ID but would need
+ * to use the Cyberstorm I driver data ... we catch this by checking
+ * for presence of ESP chip later, but don't try to fix up yet.
+ */
+ if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
+ pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n",
+ zdd->name, board);
+ zdd = &zorro_esp_boards[ZORRO_FASTLANE];
+ }
+
+ if (zdd->absolute) {
+ ioaddr = zdd->offset;
+ dmaaddr = zdd->dma_offset;
+ } else {
+ ioaddr = board + zdd->offset;
+ dmaaddr = board + zdd->dma_offset;
+ }
+
+ if (!zorro_request_device(z, zdd->name)) {
+ pr_err("cannot reserve region 0x%lx, abort\n",
+ board);
+ err = -EBUSY;
+ goto fail_free_zep;
+ }
+
+ host = scsi_host_alloc(tpnt, sizeof(struct esp));
+
+ if (!host) {
+ pr_err("No host detected; board configuration problem?\n");
+ err = -ENOMEM;
+ goto fail_release_device;
+ }
+
+ host->base = ioaddr;
+ host->this_id = 7;
+
+ esp = shost_priv(host);
+ esp->host = host;
+ esp->dev = &z->dev;
+
+ esp->scsi_id = host->this_id;
+ esp->scsi_id_mask = (1 << esp->scsi_id);
+
+ esp->cfreq = 40000000;
+
+ zep->esp = esp;
+
+ dev_set_drvdata(esp->dev, zep);
+
+ /* additional setup required for Fastlane */
+ if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
+ /* map full address space up to ESP base for DMA */
+ zep->board_base = ioremap_nocache(board,
+ FASTLANE_ESP_ADDR-1);
+ if (!zep->board_base) {
+ pr_err("Cannot allocate board address space\n");
+ err = -ENOMEM;
+ goto fail_free_host;
+ }
+ /* initialize DMA control shadow register */
+ zep->ctrl_data = (FASTLANE_DMA_FCODE |
+ FASTLANE_DMA_EDI | FASTLANE_DMA_ESI);
+ }
+
+ esp->ops = zdd->esp_ops;
+
+ if (ioaddr > 0xffffff)
+ esp->regs = ioremap_nocache(ioaddr, 0x20);
+ else
+ /* ZorroII address space remapped nocache by early startup */
+ esp->regs = ZTWO_VADDR(ioaddr);
+
+ if (!esp->regs) {
+ err = -ENOMEM;
+ goto fail_unmap_fastlane;
+ }
+
+ /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
+ if (zdd->scsi_option) {
+ zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
+ if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) {
+ err = -ENODEV;
+ goto fail_unmap_regs;
+ }
+ }
+
+ if (zep->zorro3) {
+ /*
+ * Only Fastlane Z3 for now - add switch for correct struct
+ * dma_registers size if adding any more
+ */
+ esp->dma_regs = ioremap_nocache(dmaaddr,
+ sizeof(struct fastlane_dma_registers));
+ } else
+ /* ZorroII address space remapped nocache by early startup */
+ esp->dma_regs = ZTWO_VADDR(dmaaddr);
+
+ if (!esp->dma_regs) {
+ err = -ENOMEM;
+ goto fail_unmap_regs;
+ }
+
+ esp->command_block = dma_alloc_coherent(esp->dev, 16,
+ &esp->command_block_dma,
+ GFP_KERNEL);
+
+ if (!esp->command_block) {
+ err = -ENOMEM;
+ goto fail_unmap_dma_regs;
+ }
+
+ host->irq = IRQ_AMIGA_PORTS;
+ err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
+ "Amiga Zorro ESP", esp);
+ if (err < 0) {
+ err = -ENODEV;
+ goto fail_free_command_block;
+ }
+
+ /* register the chip */
+ err = scsi_esp_register(esp, &z->dev);
+
+ if (err) {
+ err = -ENOMEM;
+ goto fail_free_irq;
+ }
+
+ return 0;
+
+fail_free_irq:
+ free_irq(host->irq, esp);
+
+fail_free_command_block:
+ dma_free_coherent(esp->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
+
+fail_unmap_dma_regs:
+ if (zep->zorro3)
+ iounmap(esp->dma_regs);
+
+fail_unmap_regs:
+ if (ioaddr > 0xffffff)
+ iounmap(esp->regs);
+
+fail_unmap_fastlane:
+ if (zep->zorro3)
+ iounmap(zep->board_base);
+
+fail_free_host:
+ scsi_host_put(host);
+
+fail_release_device:
+ zorro_release_device(z);
+
+fail_free_zep:
+ kfree(zep);
+
+ return err;
+}
+
+static void zorro_esp_remove(struct zorro_dev *z)
+{
+ struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev);
+ struct esp *esp = zep->esp;
+ struct Scsi_Host *host = esp->host;
+
+ scsi_esp_unregister(esp);
+
+ free_irq(host->irq, esp);
+ dma_free_coherent(esp->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
+
+ if (zep->zorro3) {
+ iounmap(zep->board_base);
+ iounmap(esp->dma_regs);
+ }
+
+ if (host->base > 0xffffff)
+ iounmap(esp->regs);
+
+ scsi_host_put(host);
+
+ zorro_release_device(z);
+
+ kfree(zep);
+}
+
+static struct zorro_driver zorro_esp_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = zorro_esp_zorro_tbl,
+ .probe = zorro_esp_probe,
+ .remove = zorro_esp_remove,
+};
+
+static int __init zorro_esp_scsi_init(void)
+{
+ return zorro_register_driver(&zorro_esp_driver);
+}
+
+static void __exit zorro_esp_scsi_exit(void)
+{
+ zorro_unregister_driver(&zorro_esp_driver);
+}
+
+module_init(zorro_esp_scsi_init);
+module_exit(zorro_esp_scsi_exit);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 111c44fc1c12..f874baaf934c 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -14,6 +14,7 @@
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/of_address.h>
+#include <linux/of_clk.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/regmap.h>
@@ -400,8 +401,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->info = pd_info;
pd->pmu = pmu;
- pd->num_clks = of_count_phandle_with_args(node, "clocks",
- "#clock-cells");
+ pd->num_clks = of_clk_get_parent_count(node);
if (pd->num_clks > 0) {
pd->clks = devm_kcalloc(pmu->dev, pd->num_clks,
sizeof(*pd->clks), GFP_KERNEL);
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 3e3d12ce4587..2d6f3fcf3211 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -31,6 +31,7 @@
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_clk.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -731,7 +732,7 @@ static int tegra_powergate_of_get_clks(struct tegra_powergate *pg,
unsigned int i, count;
int err;
- count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ count = of_clk_get_parent_count(np);
if (count == 0)
return -ENODEV;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index d5926f0d3f6c..75a480497d22 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,10 +24,6 @@ menuconfig STAGING
if STAGING
-source "drivers/staging/ipx/Kconfig"
-
-source "drivers/staging/ncpfs/Kconfig"
-
source "drivers/staging/wlan-ng/Kconfig"
source "drivers/staging/comedi/Kconfig"
@@ -84,8 +80,6 @@ source "drivers/staging/netlogic/Kconfig"
source "drivers/staging/mt29f_spinand/Kconfig"
-source "drivers/staging/lustre/Kconfig"
-
source "drivers/staging/dgnc/Kconfig"
source "drivers/staging/gs_fpgaboot/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 919753c3d3f6..e84959a8a684 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -3,8 +3,6 @@
obj-y += media/
obj-y += typec/
-obj-$(CONFIG_IPX) += ipx/
-obj-$(CONFIG_NCP_FS) += ncpfs/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
@@ -32,7 +30,6 @@ obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
-obj-$(CONFIG_LNET) += lustre/
obj-$(CONFIG_DGNC) += dgnc/
obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/
obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 71a50b99caff..17c5587805f5 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -6,7 +6,7 @@ config ASHMEM
bool "Enable the Anonymous Shared Memory Subsystem"
default n
depends on SHMEM
- ---help---
+ help
The ashmem subsystem is a new shared memory allocator, similar to
POSIX SHM but with different behavior and sporting a simpler
file-based API.
@@ -14,6 +14,15 @@ config ASHMEM
It is, in theory, a good memory allocator for low-memory devices,
because it can discard shared memory units when under memory pressure.
+config ANDROID_VSOC
+ tristate "Android Virtual SoC support"
+ default n
+ depends on PCI_MSI
+ help
+ This option adds support for the Virtual SoC driver needed to boot
+ a 'cuttlefish' Android image inside QEmu. The driver interacts with
+ a QEmu ivshmem device. If built as a module, it will be called vsoc.
+
source "drivers/staging/android/ion/Kconfig"
endif # if ANDROID
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 7cf1564a49a5..90e6154f11a4 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -3,3 +3,4 @@ ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
obj-$(CONFIG_ASHMEM) += ashmem.o
+obj-$(CONFIG_ANDROID_VSOC) += vsoc.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 687e0eac85bf..fbf015cc6d62 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -11,5 +11,14 @@ ion/
- Split /dev/ion up into multiple nodes (e.g. /dev/ion/heap0)
- Better test framework (integration with VGEM was suggested)
+vsoc.c, uapi/vsoc_shm.h
+ - The current driver uses the same wait queue for all of the futexes in a
+ region. This will cause false wakeups in regions with a large number of
+ waiting threads. We should eventually use multiple queues and select the
+ queue based on the region.
+ - Add debugfs support for examining the permissions of regions.
+ - Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been
+ superseded by the futex and is there for legacy reasons.
+
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
index 60d7208f110a..1a478173cd21 100644
--- a/drivers/staging/android/ashmem.h
+++ b/drivers/staging/android/ashmem.h
@@ -1,13 +1,9 @@
-// SPDX-License-Identifier: (GPL-2.0 OR Apache-2.0)
+/* SPDX-License-Identifier: GPL-2.0 OR Apache-2.0 */
/*
* include/linux/ashmem.h
*
* Copyright 2008 Google Inc.
* Author: Robert Love
- *
- * This file is dual licensed. It may be redistributed and/or modified
- * under the terms of the Apache 2.0 License OR version 2 of the GNU
- * General Public License.
*/
#ifndef _LINUX_ASHMEM_H
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index 898e9a834ccc..c16dd16afe6a 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -3,7 +3,7 @@ menuconfig ION
depends on HAVE_MEMBLOCK && HAS_DMA && MMU
select GENERIC_ALLOCATOR
select DMA_SHARED_BUFFER
- ---help---
+ help
Choose this option to enable the ION Memory Manager,
used by Android to efficiently allocate buffers
from userspace that can be shared between drivers.
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index e74db7902549..9d1109e43ed4 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -74,6 +74,8 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->heap = heap;
buffer->flags = flags;
+ buffer->dev = dev;
+ buffer->size = len;
ret = heap->ops->allocate(heap, buffer, len, flags);
@@ -93,11 +95,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
goto err1;
}
- buffer->dev = dev;
- buffer->size = len;
-
- buffer->dev = dev;
- buffer->size = len;
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
mutex_lock(&dev->buffer_lock);
@@ -114,8 +111,11 @@ err2:
void ion_buffer_destroy(struct ion_buffer *buffer)
{
- if (WARN_ON(buffer->kmap_cnt > 0))
+ if (buffer->kmap_cnt > 0) {
+ pr_warn_once("%s: buffer still mapped in the kernel\n",
+ __func__);
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ }
buffer->heap->ops->free(buffer);
kfree(buffer);
}
@@ -318,6 +318,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
struct ion_dma_buf_attachment *a;
+ int ret = 0;
/*
* TODO: Move this elsewhere because we don't always need a vaddr
@@ -325,6 +326,10 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (buffer->heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto unlock;
+ }
mutex_unlock(&buffer->lock);
}
@@ -333,9 +338,10 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
direction);
}
- mutex_unlock(&buffer->lock);
- return 0;
+unlock:
+ mutex_unlock(&buffer->lock);
+ return ret;
}
static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index ea0897812780..16cbd38a7160 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* drivers/staging/android/ion/ion.h
*
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index db8f61446917..9bc56eb48d2a 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -32,6 +32,9 @@ static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
list_add_tail(&page->lru, &pool->low_items);
pool->low_count++;
}
+
+ mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+ (1 << (PAGE_SHIFT + pool->order)));
mutex_unlock(&pool->mutex);
}
@@ -50,6 +53,8 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
}
list_del(&page->lru);
+ mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+ -(1 << (PAGE_SHIFT + pool->order)));
return page;
}
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
index 5b531af6820e..5442e0019dcd 100644
--- a/drivers/staging/android/uapi/ashmem.h
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -1,13 +1,9 @@
-// SPDX-License-Identifier: (GPL-2.0 OR Apache-2.0)
+/* SPDX-License-Identifier: GPL-2.0 OR Apache-2.0 */
/*
* drivers/staging/android/uapi/ashmem.h
*
* Copyright 2008 Google Inc.
* Author: Robert Love
- *
- * This file is dual licensed. It may be redistributed and/or modified
- * under the terms of the Apache 2.0 License OR version 2 of the GNU
- * General Public License.
*/
#ifndef _UAPI_LINUX_ASHMEM_H
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
index 825d3e95ccd3..5d7009884c13 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/drivers/staging/android/uapi/ion.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* drivers/staging/android/uapi/ion.h
*
diff --git a/drivers/staging/android/uapi/vsoc_shm.h b/drivers/staging/android/uapi/vsoc_shm.h
new file mode 100644
index 000000000000..6291fb24efb2
--- /dev/null
+++ b/drivers/staging/android/uapi/vsoc_shm.h
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ */
+
+#ifndef _UAPI_LINUX_VSOC_SHM_H
+#define _UAPI_LINUX_VSOC_SHM_H
+
+#include <linux/types.h>
+
+/**
+ * A permission is a token that permits a receiver to read and/or write an area
+ * of memory within a Vsoc region.
+ *
+ * An fd_scoped permission grants both read and write access, and can be
+ * attached to a file description (see open(2)).
+ * Ownership of the area can then be shared by passing a file descriptor
+ * among processes.
+ *
+ * begin_offset and end_offset define the area of memory that is controlled by
+ * the permission. owner_offset points to a word, also in shared memory, that
+ * controls ownership of the area.
+ *
+ * ownership of the region expires when the associated file description is
+ * released.
+ *
+ * At most one permission can be attached to each file description.
+ *
+ * This is useful when implementing HALs like gralloc that scope and pass
+ * ownership of shared resources via file descriptors.
+ *
+ * The caller is responsibe for doing any fencing.
+ *
+ * The calling process will normally identify a currently free area of
+ * memory. It will construct a proposed fd_scoped_permission_arg structure:
+ *
+ * begin_offset and end_offset describe the area being claimed
+ *
+ * owner_offset points to the location in shared memory that indicates the
+ * owner of the area.
+ *
+ * owned_value is the value that will be stored in owner_offset iff the
+ * permission can be granted. It must be different than VSOC_REGION_FREE.
+ *
+ * Two fd_scoped_permission structures are compatible if they vary only by
+ * their owned_value fields.
+ *
+ * The driver ensures that, for any group of simultaneous callers proposing
+ * compatible fd_scoped_permissions, it will accept exactly one of the
+ * propopsals. The other callers will get a failure with errno of EAGAIN.
+ *
+ * A process receiving a file descriptor can identify the region being
+ * granted using the VSOC_GET_FD_SCOPED_PERMISSION ioctl.
+ */
+struct fd_scoped_permission {
+ __u32 begin_offset;
+ __u32 end_offset;
+ __u32 owner_offset;
+ __u32 owned_value;
+};
+
+/*
+ * This value represents a free area of memory. The driver expects to see this
+ * value at owner_offset when creating a permission otherwise it will not do it,
+ * and will write this value back once the permission is no longer needed.
+ */
+#define VSOC_REGION_FREE ((__u32)0)
+
+/**
+ * ioctl argument for VSOC_CREATE_FD_SCOPE_PERMISSION
+ */
+struct fd_scoped_permission_arg {
+ struct fd_scoped_permission perm;
+ __s32 managed_region_fd;
+};
+
+#define VSOC_NODE_FREE ((__u32)0)
+
+/*
+ * Describes a signal table in shared memory. Each non-zero entry in the
+ * table indicates that the receiver should signal the futex at the given
+ * offset. Offsets are relative to the region, not the shared memory window.
+ *
+ * interrupt_signalled_offset is used to reliably signal interrupts across the
+ * vmm boundary. There are two roles: transmitter and receiver. For example,
+ * in the host_to_guest_signal_table the host is the transmitter and the
+ * guest is the receiver. The protocol is as follows:
+ *
+ * 1. The transmitter should convert the offset of the futex to an offset
+ * in the signal table [0, (1 << num_nodes_lg2))
+ * The transmitter can choose any appropriate hashing algorithm, including
+ * hash = futex_offset & ((1 << num_nodes_lg2) - 1)
+ *
+ * 3. The transmitter should atomically compare and swap futex_offset with 0
+ * at hash. There are 3 possible outcomes
+ * a. The swap fails because the futex_offset is already in the table.
+ * The transmitter should stop.
+ * b. Some other offset is in the table. This is a hash collision. The
+ * transmitter should move to another table slot and try again. One
+ * possible algorithm:
+ * hash = (hash + 1) & ((1 << num_nodes_lg2) - 1)
+ * c. The swap worked. Continue below.
+ *
+ * 3. The transmitter atomically swaps 1 with the value at the
+ * interrupt_signalled_offset. There are two outcomes:
+ * a. The prior value was 1. In this case an interrupt has already been
+ * posted. The transmitter is done.
+ * b. The prior value was 0, indicating that the receiver may be sleeping.
+ * The transmitter will issue an interrupt.
+ *
+ * 4. On waking the receiver immediately exchanges a 0 with the
+ * interrupt_signalled_offset. If it receives a 0 then this a spurious
+ * interrupt. That may occasionally happen in the current protocol, but
+ * should be rare.
+ *
+ * 5. The receiver scans the signal table by atomicaly exchanging 0 at each
+ * location. If a non-zero offset is returned from the exchange the
+ * receiver wakes all sleepers at the given offset:
+ * futex((int*)(region_base + old_value), FUTEX_WAKE, MAX_INT);
+ *
+ * 6. The receiver thread then does a conditional wait, waking immediately
+ * if the value at interrupt_signalled_offset is non-zero. This catches cases
+ * here additional signals were posted while the table was being scanned.
+ * On the guest the wait is handled via the VSOC_WAIT_FOR_INCOMING_INTERRUPT
+ * ioctl.
+ */
+struct vsoc_signal_table_layout {
+ /* log_2(Number of signal table entries) */
+ __u32 num_nodes_lg2;
+ /*
+ * Offset to the first signal table entry relative to the start of the
+ * region
+ */
+ __u32 futex_uaddr_table_offset;
+ /*
+ * Offset to an atomic_t / atomic uint32_t. A non-zero value indicates
+ * that one or more offsets are currently posted in the table.
+ * semi-unique access to an entry in the table
+ */
+ __u32 interrupt_signalled_offset;
+};
+
+#define VSOC_REGION_WHOLE ((__s32)0)
+#define VSOC_DEVICE_NAME_SZ 16
+
+/**
+ * Each HAL would (usually) talk to a single device region
+ * Mulitple entities care about these regions:
+ * - The ivshmem_server will populate the regions in shared memory
+ * - The guest kernel will read the region, create minor device nodes, and
+ * allow interested parties to register for FUTEX_WAKE events in the region
+ * - HALs will access via the minor device nodes published by the guest kernel
+ * - Host side processes will access the region via the ivshmem_server:
+ * 1. Pass name to ivshmem_server at a UNIX socket
+ * 2. ivshmemserver will reply with 2 fds:
+ * - host->guest doorbell fd
+ * - guest->host doorbell fd
+ * - fd for the shared memory region
+ * - region offset
+ * 3. Start a futex receiver thread on the doorbell fd pointed at the
+ * signal_nodes
+ */
+struct vsoc_device_region {
+ __u16 current_version;
+ __u16 min_compatible_version;
+ __u32 region_begin_offset;
+ __u32 region_end_offset;
+ __u32 offset_of_region_data;
+ struct vsoc_signal_table_layout guest_to_host_signal_table;
+ struct vsoc_signal_table_layout host_to_guest_signal_table;
+ /* Name of the device. Must always be terminated with a '\0', so
+ * the longest supported device name is 15 characters.
+ */
+ char device_name[VSOC_DEVICE_NAME_SZ];
+ /* There are two ways that permissions to access regions are handled:
+ * - When subdivided_by is VSOC_REGION_WHOLE, any process that can
+ * open the device node for the region gains complete access to it.
+ * - When subdivided is set processes that open the region cannot
+ * access it. Access to a sub-region must be established by invoking
+ * the VSOC_CREATE_FD_SCOPE_PERMISSION ioctl on the region
+ * referenced in subdivided_by, providing a fileinstance
+ * (represented by a fd) opened on this region.
+ */
+ __u32 managed_by;
+};
+
+/*
+ * The vsoc layout descriptor.
+ * The first 4K should be reserved for the shm header and region descriptors.
+ * The regions should be page aligned.
+ */
+
+struct vsoc_shm_layout_descriptor {
+ __u16 major_version;
+ __u16 minor_version;
+
+ /* size of the shm. This may be redundant but nice to have */
+ __u32 size;
+
+ /* number of shared memory regions */
+ __u32 region_count;
+
+ /* The offset to the start of region descriptors */
+ __u32 vsoc_region_desc_offset;
+};
+
+/*
+ * This specifies the current version that should be stored in
+ * vsoc_shm_layout_descriptor.major_version and
+ * vsoc_shm_layout_descriptor.minor_version.
+ * It should be updated only if the vsoc_device_region and
+ * vsoc_shm_layout_descriptor structures have changed.
+ * Versioning within each region is transferred
+ * via the min_compatible_version and current_version fields in
+ * vsoc_device_region. The driver does not consult these fields: they are left
+ * for the HALs and host processes and will change independently of the layout
+ * version.
+ */
+#define CURRENT_VSOC_LAYOUT_MAJOR_VERSION 2
+#define CURRENT_VSOC_LAYOUT_MINOR_VERSION 0
+
+#define VSOC_CREATE_FD_SCOPED_PERMISSION \
+ _IOW(0xF5, 0, struct fd_scoped_permission)
+#define VSOC_GET_FD_SCOPED_PERMISSION _IOR(0xF5, 1, struct fd_scoped_permission)
+
+/*
+ * This is used to signal the host to scan the guest_to_host_signal_table
+ * for new futexes to wake. This sends an interrupt if one is not already
+ * in flight.
+ */
+#define VSOC_MAYBE_SEND_INTERRUPT_TO_HOST _IO(0xF5, 2)
+
+/*
+ * When this returns the guest will scan host_to_guest_signal_table to
+ * check for new futexes to wake.
+ */
+/* TODO(ghartman): Consider moving this to the bottom half */
+#define VSOC_WAIT_FOR_INCOMING_INTERRUPT _IO(0xF5, 3)
+
+/*
+ * Guest HALs will use this to retrieve the region description after
+ * opening their device node.
+ */
+#define VSOC_DESCRIBE_REGION _IOR(0xF5, 4, struct vsoc_device_region)
+
+/*
+ * Wake any threads that may be waiting for a host interrupt on this region.
+ * This is mostly used during shutdown.
+ */
+#define VSOC_SELF_INTERRUPT _IO(0xF5, 5)
+
+/*
+ * This is used to signal the host to scan the guest_to_host_signal_table
+ * for new futexes to wake. This sends an interrupt unconditionally.
+ */
+#define VSOC_SEND_INTERRUPT_TO_HOST _IO(0xF5, 6)
+
+enum wait_types {
+ VSOC_WAIT_UNDEFINED = 0,
+ VSOC_WAIT_IF_EQUAL = 1,
+ VSOC_WAIT_IF_EQUAL_TIMEOUT = 2
+};
+
+/*
+ * Wait for a condition to be true
+ *
+ * Note, this is sized and aligned so the 32 bit and 64 bit layouts are
+ * identical.
+ */
+struct vsoc_cond_wait {
+ /* Input: Offset of the 32 bit word to check */
+ __u32 offset;
+ /* Input: Value that will be compared with the offset */
+ __u32 value;
+ /* Monotonic time to wake at in seconds */
+ __u64 wake_time_sec;
+ /* Input: Monotonic time to wait in nanoseconds */
+ __u32 wake_time_nsec;
+ /* Input: Type of wait */
+ __u32 wait_type;
+ /* Output: Number of times the thread woke before returning. */
+ __u32 wakes;
+ /* Ensure that we're 8-byte aligned and 8 byte length for 32/64 bit
+ * compatibility.
+ */
+ __u32 reserved_1;
+};
+
+#define VSOC_COND_WAIT _IOWR(0xF5, 7, struct vsoc_cond_wait)
+
+/* Wake any local threads waiting at the offset given in arg */
+#define VSOC_COND_WAKE _IO(0xF5, 8)
+
+#endif /* _UAPI_LINUX_VSOC_SHM_H */
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
new file mode 100644
index 000000000000..806beda1040b
--- /dev/null
+++ b/drivers/staging/android/vsoc.c
@@ -0,0 +1,1152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/android/staging/vsoc.c
+ *
+ * Android Virtual System on a Chip (VSoC) driver
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Author: ghartman@google.com
+ *
+ * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory
+ * Copyright 2009 Cam Macdonell <cam@cs.ualberta.ca>
+ *
+ * Based on cirrusfb.c and 8139cp.c:
+ * Copyright 1999-2001 Jeff Garzik
+ * Copyright 2001-2004 Jeff Garzik
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/futex.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/file.h>
+#include "uapi/vsoc_shm.h"
+
+#define VSOC_DEV_NAME "vsoc"
+
+/*
+ * Description of the ivshmem-doorbell PCI device used by QEmu. These
+ * constants follow docs/specs/ivshmem-spec.txt, which can be found in
+ * the QEmu repository. This was last reconciled with the version that
+ * came out with 2.8
+ */
+
+/*
+ * These constants are determined KVM Inter-VM shared memory device
+ * register offsets
+ */
+enum {
+ INTR_MASK = 0x00, /* Interrupt Mask */
+ INTR_STATUS = 0x04, /* Interrupt Status */
+ IV_POSITION = 0x08, /* VM ID */
+ DOORBELL = 0x0c, /* Doorbell */
+};
+
+static const int REGISTER_BAR; /* Equal to 0 */
+static const int MAX_REGISTER_BAR_LEN = 0x100;
+/*
+ * The MSI-x BAR is not used directly.
+ *
+ * static const int MSI_X_BAR = 1;
+ */
+static const int SHARED_MEMORY_BAR = 2;
+
+struct vsoc_region_data {
+ char name[VSOC_DEVICE_NAME_SZ + 1];
+ wait_queue_head_t interrupt_wait_queue;
+ /* TODO(b/73664181): Use multiple futex wait queues */
+ wait_queue_head_t futex_wait_queue;
+ /* Flag indicating that an interrupt has been signalled by the host. */
+ atomic_t *incoming_signalled;
+ /* Flag indicating the guest has signalled the host. */
+ atomic_t *outgoing_signalled;
+ bool irq_requested;
+ bool device_created;
+};
+
+struct vsoc_device {
+ /* Kernel virtual address of REGISTER_BAR. */
+ void __iomem *regs;
+ /* Physical address of SHARED_MEMORY_BAR. */
+ phys_addr_t shm_phys_start;
+ /* Kernel virtual address of SHARED_MEMORY_BAR. */
+ void __iomem *kernel_mapped_shm;
+ /* Size of the entire shared memory window in bytes. */
+ size_t shm_size;
+ /*
+ * Pointer to the virtual address of the shared memory layout structure.
+ * This is probably identical to kernel_mapped_shm, but saving this
+ * here saves a lot of annoying casts.
+ */
+ struct vsoc_shm_layout_descriptor *layout;
+ /*
+ * Points to a table of region descriptors in the kernel's virtual
+ * address space. Calculated from
+ * vsoc_shm_layout_descriptor.vsoc_region_desc_offset
+ */
+ struct vsoc_device_region *regions;
+ /* Head of a list of permissions that have been granted. */
+ struct list_head permissions;
+ struct pci_dev *dev;
+ /* Per-region (and therefore per-interrupt) information. */
+ struct vsoc_region_data *regions_data;
+ /*
+ * Table of msi-x entries. This has to be separated from struct
+ * vsoc_region_data because the kernel deals with them as an array.
+ */
+ struct msix_entry *msix_entries;
+ /* Mutex that protectes the permission list */
+ struct mutex mtx;
+ /* Major number assigned by the kernel */
+ int major;
+ /* Character device assigned by the kernel */
+ struct cdev cdev;
+ /* Device class assigned by the kernel */
+ struct class *class;
+ /*
+ * Flags that indicate what we've initialized. These are used to do an
+ * orderly cleanup of the device.
+ */
+ bool enabled_device;
+ bool requested_regions;
+ bool cdev_added;
+ bool class_added;
+ bool msix_enabled;
+};
+
+static struct vsoc_device vsoc_dev;
+
+/*
+ * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions.
+ */
+
+struct fd_scoped_permission_node {
+ struct fd_scoped_permission permission;
+ struct list_head list;
+};
+
+struct vsoc_private_data {
+ struct fd_scoped_permission_node *fd_scoped_permission_node;
+};
+
+static long vsoc_ioctl(struct file *, unsigned int, unsigned long);
+static int vsoc_mmap(struct file *, struct vm_area_struct *);
+static int vsoc_open(struct inode *, struct file *);
+static int vsoc_release(struct inode *, struct file *);
+static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *);
+static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin);
+static int
+do_create_fd_scoped_permission(struct vsoc_device_region *region_p,
+ struct fd_scoped_permission_node *np,
+ struct fd_scoped_permission_arg __user *arg);
+static void
+do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p,
+ struct fd_scoped_permission *perm);
+static long do_vsoc_describe_region(struct file *,
+ struct vsoc_device_region __user *);
+static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off);
+
+/**
+ * Validate arguments on entry points to the driver.
+ */
+inline int vsoc_validate_inode(struct inode *inode)
+{
+ if (iminor(inode) >= vsoc_dev.layout->region_count) {
+ dev_err(&vsoc_dev.dev->dev,
+ "describe_region: invalid region %d\n", iminor(inode));
+ return -ENODEV;
+ }
+ return 0;
+}
+
+inline int vsoc_validate_filep(struct file *filp)
+{
+ int ret = vsoc_validate_inode(file_inode(filp));
+
+ if (ret)
+ return ret;
+ if (!filp->private_data) {
+ dev_err(&vsoc_dev.dev->dev,
+ "No private data on fd, region %d\n",
+ iminor(file_inode(filp)));
+ return -EBADFD;
+ }
+ return 0;
+}
+
+/* Converts from shared memory offset to virtual address */
+static inline void *shm_off_to_virtual_addr(__u32 offset)
+{
+ return (void __force *)vsoc_dev.kernel_mapped_shm + offset;
+}
+
+/* Converts from shared memory offset to physical address */
+static inline phys_addr_t shm_off_to_phys_addr(__u32 offset)
+{
+ return vsoc_dev.shm_phys_start + offset;
+}
+
+/**
+ * Convenience functions to obtain the region from the inode or file.
+ * Dangerous to call before validating the inode/file.
+ */
+static
+inline struct vsoc_device_region *vsoc_region_from_inode(struct inode *inode)
+{
+ return &vsoc_dev.regions[iminor(inode)];
+}
+
+static
+inline struct vsoc_device_region *vsoc_region_from_filep(struct file *inode)
+{
+ return vsoc_region_from_inode(file_inode(inode));
+}
+
+static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r)
+{
+ return r->region_end_offset - r->region_begin_offset;
+}
+
+static const struct file_operations vsoc_ops = {
+ .owner = THIS_MODULE,
+ .open = vsoc_open,
+ .mmap = vsoc_mmap,
+ .read = vsoc_read,
+ .unlocked_ioctl = vsoc_ioctl,
+ .compat_ioctl = vsoc_ioctl,
+ .write = vsoc_write,
+ .llseek = vsoc_lseek,
+ .release = vsoc_release,
+};
+
+static struct pci_device_id vsoc_id_table[] = {
+ {0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0},
+};
+
+MODULE_DEVICE_TABLE(pci, vsoc_id_table);
+
+static void vsoc_remove_device(struct pci_dev *pdev);
+static int vsoc_probe_device(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static struct pci_driver vsoc_pci_driver = {
+ .name = "vsoc",
+ .id_table = vsoc_id_table,
+ .probe = vsoc_probe_device,
+ .remove = vsoc_remove_device,
+};
+
+static int
+do_create_fd_scoped_permission(struct vsoc_device_region *region_p,
+ struct fd_scoped_permission_node *np,
+ struct fd_scoped_permission_arg __user *arg)
+{
+ struct file *managed_filp;
+ s32 managed_fd;
+ atomic_t *owner_ptr = NULL;
+ struct vsoc_device_region *managed_region_p;
+
+ if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) ||
+ copy_from_user(&managed_fd,
+ &arg->managed_region_fd, sizeof(managed_fd))) {
+ return -EFAULT;
+ }
+ managed_filp = fdget(managed_fd).file;
+ /* Check that it's a valid fd, */
+ if (!managed_filp || vsoc_validate_filep(managed_filp))
+ return -EPERM;
+ /* EEXIST if the given fd already has a permission. */
+ if (((struct vsoc_private_data *)managed_filp->private_data)->
+ fd_scoped_permission_node)
+ return -EEXIST;
+ managed_region_p = vsoc_region_from_filep(managed_filp);
+ /* Check that the provided region is managed by this one */
+ if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p)
+ return -EPERM;
+ /* The area must be well formed and have non-zero size */
+ if (np->permission.begin_offset >= np->permission.end_offset)
+ return -EINVAL;
+ /* The area must fit in the memory window */
+ if (np->permission.end_offset >
+ vsoc_device_region_size(managed_region_p))
+ return -ERANGE;
+ /* The area must be in the region data section */
+ if (np->permission.begin_offset <
+ managed_region_p->offset_of_region_data)
+ return -ERANGE;
+ /* The area must be page aligned */
+ if (!PAGE_ALIGNED(np->permission.begin_offset) ||
+ !PAGE_ALIGNED(np->permission.end_offset))
+ return -EINVAL;
+ /* Owner offset must be naturally aligned in the window */
+ if (np->permission.owner_offset &
+ (sizeof(np->permission.owner_offset) - 1))
+ return -EINVAL;
+ /* The owner flag must reside in the owner memory */
+ if (np->permission.owner_offset + sizeof(np->permission.owner_offset) >
+ vsoc_device_region_size(region_p))
+ return -ERANGE;
+ /* The owner flag must reside in the data section */
+ if (np->permission.owner_offset < region_p->offset_of_region_data)
+ return -EINVAL;
+ /* The owner value must change to claim the memory */
+ if (np->permission.owned_value == VSOC_REGION_FREE)
+ return -EINVAL;
+ owner_ptr =
+ (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset +
+ np->permission.owner_offset);
+ /* We've already verified that this is in the shared memory window, so
+ * it should be safe to write to this address.
+ */
+ if (atomic_cmpxchg(owner_ptr,
+ VSOC_REGION_FREE,
+ np->permission.owned_value) != VSOC_REGION_FREE) {
+ return -EBUSY;
+ }
+ ((struct vsoc_private_data *)managed_filp->private_data)->
+ fd_scoped_permission_node = np;
+ /* The file offset needs to be adjusted if the calling
+ * process did any read/write operations on the fd
+ * before creating the permission.
+ */
+ if (managed_filp->f_pos) {
+ if (managed_filp->f_pos > np->permission.end_offset) {
+ /* If the offset is beyond the permission end, set it
+ * to the end.
+ */
+ managed_filp->f_pos = np->permission.end_offset;
+ } else {
+ /* If the offset is within the permission interval
+ * keep it there otherwise reset it to zero.
+ */
+ if (managed_filp->f_pos < np->permission.begin_offset) {
+ managed_filp->f_pos = 0;
+ } else {
+ managed_filp->f_pos -=
+ np->permission.begin_offset;
+ }
+ }
+ }
+ return 0;
+}
+
+static void
+do_destroy_fd_scoped_permission_node(struct vsoc_device_region *owner_region_p,
+ struct fd_scoped_permission_node *node)
+{
+ if (node) {
+ do_destroy_fd_scoped_permission(owner_region_p,
+ &node->permission);
+ mutex_lock(&vsoc_dev.mtx);
+ list_del(&node->list);
+ mutex_unlock(&vsoc_dev.mtx);
+ kfree(node);
+ }
+}
+
+static void
+do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p,
+ struct fd_scoped_permission *perm)
+{
+ atomic_t *owner_ptr = NULL;
+ int prev = 0;
+
+ if (!perm)
+ return;
+ owner_ptr = (atomic_t *)shm_off_to_virtual_addr
+ (owner_region_p->region_begin_offset + perm->owner_offset);
+ prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE);
+ if (prev != perm->owned_value)
+ dev_err(&vsoc_dev.dev->dev,
+ "%x-%x: owner (%s) %x: expected to be %x was %x",
+ perm->begin_offset, perm->end_offset,
+ owner_region_p->device_name, perm->owner_offset,
+ perm->owned_value, prev);
+}
+
+static long do_vsoc_describe_region(struct file *filp,
+ struct vsoc_device_region __user *dest)
+{
+ struct vsoc_device_region *region_p;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ region_p = vsoc_region_from_filep(filp);
+ if (copy_to_user(dest, region_p, sizeof(*region_p)))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Implements the inner logic of cond_wait. Copies to and from userspace are
+ * done in the helper function below.
+ */
+static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
+{
+ DEFINE_WAIT(wait);
+ u32 region_number = iminor(file_inode(filp));
+ struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
+ struct hrtimer_sleeper timeout, *to = NULL;
+ int ret = 0;
+ struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
+ atomic_t *address = NULL;
+ struct timespec ts;
+
+ /* Ensure that the offset is aligned */
+ if (arg->offset & (sizeof(uint32_t) - 1))
+ return -EADDRNOTAVAIL;
+ /* Ensure that the offset is within shared memory */
+ if (((uint64_t)arg->offset) + region_p->region_begin_offset +
+ sizeof(uint32_t) > region_p->region_end_offset)
+ return -E2BIG;
+ address = shm_off_to_virtual_addr(region_p->region_begin_offset +
+ arg->offset);
+
+ /* Ensure that the type of wait is valid */
+ switch (arg->wait_type) {
+ case VSOC_WAIT_IF_EQUAL:
+ break;
+ case VSOC_WAIT_IF_EQUAL_TIMEOUT:
+ to = &timeout;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (to) {
+ /* Copy the user-supplied timesec into the kernel structure.
+ * We do things this way to flatten differences between 32 bit
+ * and 64 bit timespecs.
+ */
+ ts.tv_sec = arg->wake_time_sec;
+ ts.tv_nsec = arg->wake_time_nsec;
+
+ if (!timespec_valid(&ts))
+ return -EINVAL;
+ hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
+ hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts),
+ current->timer_slack_ns);
+
+ hrtimer_init_sleeper(to, current);
+ }
+
+ while (1) {
+ prepare_to_wait(&data->futex_wait_queue, &wait,
+ TASK_INTERRUPTIBLE);
+ /*
+ * Check the sentinel value after prepare_to_wait. If the value
+ * changes after this check the writer will call signal,
+ * changing the task state from INTERRUPTIBLE to RUNNING. That
+ * will ensure that schedule() will eventually schedule this
+ * task.
+ */
+ if (atomic_read(address) != arg->value) {
+ ret = 0;
+ break;
+ }
+ if (to) {
+ hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
+ if (likely(to->task))
+ freezable_schedule();
+ hrtimer_cancel(&to->timer);
+ if (!to->task) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ } else {
+ freezable_schedule();
+ }
+ /* Count the number of times that we woke up. This is useful
+ * for unit testing.
+ */
+ ++arg->wakes;
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ }
+ finish_wait(&data->futex_wait_queue, &wait);
+ if (to)
+ destroy_hrtimer_on_stack(&to->timer);
+ return ret;
+}
+
+/**
+ * Handles the details of copying from/to userspace to ensure that the copies
+ * happen on all of the return paths of cond_wait.
+ */
+static int do_vsoc_cond_wait(struct file *filp,
+ struct vsoc_cond_wait __user *untrusted_in)
+{
+ struct vsoc_cond_wait arg;
+ int rval = 0;
+
+ if (copy_from_user(&arg, untrusted_in, sizeof(arg)))
+ return -EFAULT;
+ /* wakes is an out parameter. Initialize it to something sensible. */
+ arg.wakes = 0;
+ rval = handle_vsoc_cond_wait(filp, &arg);
+ if (copy_to_user(untrusted_in, &arg, sizeof(arg)))
+ return -EFAULT;
+ return rval;
+}
+
+static int do_vsoc_cond_wake(struct file *filp, uint32_t offset)
+{
+ struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
+ u32 region_number = iminor(file_inode(filp));
+ struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
+ /* Ensure that the offset is aligned */
+ if (offset & (sizeof(uint32_t) - 1))
+ return -EADDRNOTAVAIL;
+ /* Ensure that the offset is within shared memory */
+ if (((uint64_t)offset) + region_p->region_begin_offset +
+ sizeof(uint32_t) > region_p->region_end_offset)
+ return -E2BIG;
+ /*
+ * TODO(b/73664181): Use multiple futex wait queues.
+ * We need to wake every sleeper when the condition changes. Typically
+ * only a single thread will be waiting on the condition, but there
+ * are exceptions. The worst case is about 10 threads.
+ */
+ wake_up_interruptible_all(&data->futex_wait_queue);
+ return 0;
+}
+
+static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int rv = 0;
+ struct vsoc_device_region *region_p;
+ u32 reg_num;
+ struct vsoc_region_data *reg_data;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ region_p = vsoc_region_from_filep(filp);
+ reg_num = iminor(file_inode(filp));
+ reg_data = vsoc_dev.regions_data + reg_num;
+ switch (cmd) {
+ case VSOC_CREATE_FD_SCOPED_PERMISSION:
+ {
+ struct fd_scoped_permission_node *node = NULL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ /* We can't allocate memory for the permission */
+ if (!node)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&node->list);
+ rv = do_create_fd_scoped_permission
+ (region_p,
+ node,
+ (struct fd_scoped_permission_arg __user *)arg);
+ if (!rv) {
+ mutex_lock(&vsoc_dev.mtx);
+ list_add(&node->list, &vsoc_dev.permissions);
+ mutex_unlock(&vsoc_dev.mtx);
+ } else {
+ kfree(node);
+ return rv;
+ }
+ }
+ break;
+
+ case VSOC_GET_FD_SCOPED_PERMISSION:
+ {
+ struct fd_scoped_permission_node *node =
+ ((struct vsoc_private_data *)filp->private_data)->
+ fd_scoped_permission_node;
+ if (!node)
+ return -ENOENT;
+ if (copy_to_user
+ ((struct fd_scoped_permission __user *)arg,
+ &node->permission, sizeof(node->permission)))
+ return -EFAULT;
+ }
+ break;
+
+ case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST:
+ if (!atomic_xchg(reg_data->outgoing_signalled, 1)) {
+ writel(reg_num, vsoc_dev.regs + DOORBELL);
+ return 0;
+ } else {
+ return -EBUSY;
+ }
+ break;
+
+ case VSOC_SEND_INTERRUPT_TO_HOST:
+ writel(reg_num, vsoc_dev.regs + DOORBELL);
+ return 0;
+ case VSOC_WAIT_FOR_INCOMING_INTERRUPT:
+ wait_event_interruptible
+ (reg_data->interrupt_wait_queue,
+ (atomic_read(reg_data->incoming_signalled) != 0));
+ break;
+
+ case VSOC_DESCRIBE_REGION:
+ return do_vsoc_describe_region
+ (filp,
+ (struct vsoc_device_region __user *)arg);
+
+ case VSOC_SELF_INTERRUPT:
+ atomic_set(reg_data->incoming_signalled, 1);
+ wake_up_interruptible(&reg_data->interrupt_wait_queue);
+ break;
+
+ case VSOC_COND_WAIT:
+ return do_vsoc_cond_wait(filp,
+ (struct vsoc_cond_wait __user *)arg);
+ case VSOC_COND_WAKE:
+ return do_vsoc_cond_wake(filp, arg);
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len,
+ loff_t *poffset)
+{
+ __u32 area_off;
+ const void *area_p;
+ ssize_t area_len;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, &area_off);
+ area_p = shm_off_to_virtual_addr(area_off);
+ area_p += *poffset;
+ area_len -= *poffset;
+ if (area_len <= 0)
+ return 0;
+ if (area_len < len)
+ len = area_len;
+ if (copy_to_user(buffer, area_p, len))
+ return -EFAULT;
+ *poffset += len;
+ return len;
+}
+
+static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin)
+{
+ ssize_t area_len = 0;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, NULL);
+ switch (origin) {
+ case SEEK_SET:
+ break;
+
+ case SEEK_CUR:
+ if (offset > 0 && offset + filp->f_pos < 0)
+ return -EOVERFLOW;
+ offset += filp->f_pos;
+ break;
+
+ case SEEK_END:
+ if (offset > 0 && offset + area_len < 0)
+ return -EOVERFLOW;
+ offset += area_len;
+ break;
+
+ case SEEK_DATA:
+ if (offset >= area_len)
+ return -EINVAL;
+ if (offset < 0)
+ offset = 0;
+ break;
+
+ case SEEK_HOLE:
+ /* Next hole is always the end of the region, unless offset is
+ * beyond that
+ */
+ if (offset < area_len)
+ offset = area_len;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (offset < 0 || offset > area_len)
+ return -EINVAL;
+ filp->f_pos = offset;
+
+ return offset;
+}
+
+static ssize_t vsoc_write(struct file *filp, const char __user *buffer,
+ size_t len, loff_t *poffset)
+{
+ __u32 area_off;
+ void *area_p;
+ ssize_t area_len;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, &area_off);
+ area_p = shm_off_to_virtual_addr(area_off);
+ area_p += *poffset;
+ area_len -= *poffset;
+ if (area_len <= 0)
+ return 0;
+ if (area_len < len)
+ len = area_len;
+ if (copy_from_user(area_p, buffer, len))
+ return -EFAULT;
+ *poffset += len;
+ return len;
+}
+
+static irqreturn_t vsoc_interrupt(int irq, void *region_data_v)
+{
+ struct vsoc_region_data *region_data =
+ (struct vsoc_region_data *)region_data_v;
+ int reg_num = region_data - vsoc_dev.regions_data;
+
+ if (unlikely(!region_data))
+ return IRQ_NONE;
+
+ if (unlikely(reg_num < 0 ||
+ reg_num >= vsoc_dev.layout->region_count)) {
+ dev_err(&vsoc_dev.dev->dev,
+ "invalid irq @%p reg_num=0x%04x\n",
+ region_data, reg_num);
+ return IRQ_NONE;
+ }
+ if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) {
+ dev_err(&vsoc_dev.dev->dev,
+ "irq not aligned @%p reg_num=0x%04x\n",
+ region_data, reg_num);
+ return IRQ_NONE;
+ }
+ wake_up_interruptible(&region_data->interrupt_wait_queue);
+ return IRQ_HANDLED;
+}
+
+static int vsoc_probe_device(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int result;
+ int i;
+ resource_size_t reg_size;
+ dev_t devt;
+
+ vsoc_dev.dev = pdev;
+ result = pci_enable_device(pdev);
+ if (result) {
+ dev_err(&pdev->dev,
+ "pci_enable_device failed %s: error %d\n",
+ pci_name(pdev), result);
+ return result;
+ }
+ vsoc_dev.enabled_device = true;
+ result = pci_request_regions(pdev, "vsoc");
+ if (result < 0) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.requested_regions = true;
+ /* Set up the control registers in BAR 0 */
+ reg_size = pci_resource_len(pdev, REGISTER_BAR);
+ if (reg_size > MAX_REGISTER_BAR_LEN)
+ vsoc_dev.regs =
+ pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN);
+ else
+ vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size);
+
+ if (!vsoc_dev.regs) {
+ dev_err(&pdev->dev,
+ "cannot map registers of size %zu\n",
+ (size_t)reg_size);
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+
+ /* Map the shared memory in BAR 2 */
+ vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR);
+ vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR);
+
+ dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n",
+ &vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
+ vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0);
+ if (!vsoc_dev.kernel_mapped_shm) {
+ dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+
+ vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *)
+ vsoc_dev.kernel_mapped_shm;
+ dev_info(&pdev->dev, "major_version: %d\n",
+ vsoc_dev.layout->major_version);
+ dev_info(&pdev->dev, "minor_version: %d\n",
+ vsoc_dev.layout->minor_version);
+ dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size);
+ dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count);
+ if (vsoc_dev.layout->major_version !=
+ CURRENT_VSOC_LAYOUT_MAJOR_VERSION) {
+ dev_err(&vsoc_dev.dev->dev,
+ "driver supports only major_version %d\n",
+ CURRENT_VSOC_LAYOUT_MAJOR_VERSION);
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count,
+ VSOC_DEV_NAME);
+ if (result) {
+ dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.major = MAJOR(devt);
+ cdev_init(&vsoc_dev.cdev, &vsoc_ops);
+ vsoc_dev.cdev.owner = THIS_MODULE;
+ result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count);
+ if (result) {
+ dev_err(&vsoc_dev.dev->dev, "cdev_add error\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.cdev_added = true;
+ vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME);
+ if (IS_ERR(vsoc_dev.class)) {
+ dev_err(&vsoc_dev.dev->dev, "class_create failed\n");
+ vsoc_remove_device(pdev);
+ return PTR_ERR(vsoc_dev.class);
+ }
+ vsoc_dev.class_added = true;
+ vsoc_dev.regions = (struct vsoc_device_region __force *)
+ ((void *)vsoc_dev.layout +
+ vsoc_dev.layout->vsoc_region_desc_offset);
+ vsoc_dev.msix_entries =
+ kcalloc(vsoc_dev.layout->region_count,
+ sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL);
+ if (!vsoc_dev.msix_entries) {
+ dev_err(&vsoc_dev.dev->dev,
+ "unable to allocate msix_entries\n");
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ vsoc_dev.regions_data =
+ kcalloc(vsoc_dev.layout->region_count,
+ sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL);
+ if (!vsoc_dev.regions_data) {
+ dev_err(&vsoc_dev.dev->dev,
+ "unable to allocate regions' data\n");
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i)
+ vsoc_dev.msix_entries[i].entry = i;
+
+ result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries,
+ vsoc_dev.layout->region_count);
+ if (result) {
+ dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result);
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ /* Check that all regions are well formed */
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+ const struct vsoc_device_region *region = vsoc_dev.regions + i;
+
+ if (!PAGE_ALIGNED(region->region_begin_offset) ||
+ !PAGE_ALIGNED(region->region_end_offset)) {
+ dev_err(&vsoc_dev.dev->dev,
+ "region %d not aligned (%x:%x)", i,
+ region->region_begin_offset,
+ region->region_end_offset);
+ vsoc_remove_device(pdev);
+ return -EFAULT;
+ }
+ if (region->region_begin_offset >= region->region_end_offset ||
+ region->region_end_offset > vsoc_dev.shm_size) {
+ dev_err(&vsoc_dev.dev->dev,
+ "region %d offsets are wrong: %x %x %zx",
+ i, region->region_begin_offset,
+ region->region_end_offset, vsoc_dev.shm_size);
+ vsoc_remove_device(pdev);
+ return -EFAULT;
+ }
+ if (region->managed_by >= vsoc_dev.layout->region_count) {
+ dev_err(&vsoc_dev.dev->dev,
+ "region %d has invalid owner: %u",
+ i, region->managed_by);
+ vsoc_remove_device(pdev);
+ return -EFAULT;
+ }
+ }
+ vsoc_dev.msix_enabled = true;
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+ const struct vsoc_device_region *region = vsoc_dev.regions + i;
+ size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1;
+ const struct vsoc_signal_table_layout *h_to_g_signal_table =
+ &region->host_to_guest_signal_table;
+ const struct vsoc_signal_table_layout *g_to_h_signal_table =
+ &region->guest_to_host_signal_table;
+
+ vsoc_dev.regions_data[i].name[name_sz] = '\0';
+ memcpy(vsoc_dev.regions_data[i].name, region->device_name,
+ name_sz);
+ dev_info(&pdev->dev, "region %d name=%s\n",
+ i, vsoc_dev.regions_data[i].name);
+ init_waitqueue_head
+ (&vsoc_dev.regions_data[i].interrupt_wait_queue);
+ init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue);
+ vsoc_dev.regions_data[i].incoming_signalled =
+ shm_off_to_virtual_addr(region->region_begin_offset) +
+ h_to_g_signal_table->interrupt_signalled_offset;
+ vsoc_dev.regions_data[i].outgoing_signalled =
+ shm_off_to_virtual_addr(region->region_begin_offset) +
+ g_to_h_signal_table->interrupt_signalled_offset;
+ result = request_irq(vsoc_dev.msix_entries[i].vector,
+ vsoc_interrupt, 0,
+ vsoc_dev.regions_data[i].name,
+ vsoc_dev.regions_data + i);
+ if (result) {
+ dev_info(&pdev->dev,
+ "request_irq failed irq=%d vector=%d\n",
+ i, vsoc_dev.msix_entries[i].vector);
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ vsoc_dev.regions_data[i].irq_requested = true;
+ if (!device_create(vsoc_dev.class, NULL,
+ MKDEV(vsoc_dev.major, i),
+ NULL, vsoc_dev.regions_data[i].name)) {
+ dev_err(&vsoc_dev.dev->dev, "device_create failed\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.regions_data[i].device_created = true;
+ }
+ return 0;
+}
+
+/*
+ * This should undo all of the allocations in the probe function in reverse
+ * order.
+ *
+ * Notes:
+ *
+ * The device may have been partially initialized, so double check
+ * that the allocations happened.
+ *
+ * This function may be called multiple times, so mark resources as freed
+ * as they are deallocated.
+ */
+static void vsoc_remove_device(struct pci_dev *pdev)
+{
+ int i;
+ /*
+ * pdev is the first thing to be set on probe and the last thing
+ * to be cleared here. If it's NULL then there is no cleanup.
+ */
+ if (!pdev || !vsoc_dev.dev)
+ return;
+ dev_info(&pdev->dev, "remove_device\n");
+ if (vsoc_dev.regions_data) {
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+ if (vsoc_dev.regions_data[i].device_created) {
+ device_destroy(vsoc_dev.class,
+ MKDEV(vsoc_dev.major, i));
+ vsoc_dev.regions_data[i].device_created = false;
+ }
+ if (vsoc_dev.regions_data[i].irq_requested)
+ free_irq(vsoc_dev.msix_entries[i].vector, NULL);
+ vsoc_dev.regions_data[i].irq_requested = false;
+ }
+ kfree(vsoc_dev.regions_data);
+ vsoc_dev.regions_data = NULL;
+ }
+ if (vsoc_dev.msix_enabled) {
+ pci_disable_msix(pdev);
+ vsoc_dev.msix_enabled = false;
+ }
+ kfree(vsoc_dev.msix_entries);
+ vsoc_dev.msix_entries = NULL;
+ vsoc_dev.regions = NULL;
+ if (vsoc_dev.class_added) {
+ class_destroy(vsoc_dev.class);
+ vsoc_dev.class_added = false;
+ }
+ if (vsoc_dev.cdev_added) {
+ cdev_del(&vsoc_dev.cdev);
+ vsoc_dev.cdev_added = false;
+ }
+ if (vsoc_dev.major && vsoc_dev.layout) {
+ unregister_chrdev_region(MKDEV(vsoc_dev.major, 0),
+ vsoc_dev.layout->region_count);
+ vsoc_dev.major = 0;
+ }
+ vsoc_dev.layout = NULL;
+ if (vsoc_dev.kernel_mapped_shm) {
+ pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm);
+ vsoc_dev.kernel_mapped_shm = NULL;
+ }
+ if (vsoc_dev.regs) {
+ pci_iounmap(pdev, vsoc_dev.regs);
+ vsoc_dev.regs = NULL;
+ }
+ if (vsoc_dev.requested_regions) {
+ pci_release_regions(pdev);
+ vsoc_dev.requested_regions = false;
+ }
+ if (vsoc_dev.enabled_device) {
+ pci_disable_device(pdev);
+ vsoc_dev.enabled_device = false;
+ }
+ /* Do this last: it indicates that the device is not initialized. */
+ vsoc_dev.dev = NULL;
+}
+
+static void __exit vsoc_cleanup_module(void)
+{
+ vsoc_remove_device(vsoc_dev.dev);
+ pci_unregister_driver(&vsoc_pci_driver);
+}
+
+static int __init vsoc_init_module(void)
+{
+ int err = -ENOMEM;
+
+ INIT_LIST_HEAD(&vsoc_dev.permissions);
+ mutex_init(&vsoc_dev.mtx);
+
+ err = pci_register_driver(&vsoc_pci_driver);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static int vsoc_open(struct inode *inode, struct file *filp)
+{
+ /* Can't use vsoc_validate_filep because filp is still incomplete */
+ int ret = vsoc_validate_inode(inode);
+
+ if (ret)
+ return ret;
+ filp->private_data =
+ kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL);
+ if (!filp->private_data)
+ return -ENOMEM;
+ return 0;
+}
+
+static int vsoc_release(struct inode *inode, struct file *filp)
+{
+ struct vsoc_private_data *private_data = NULL;
+ struct fd_scoped_permission_node *node = NULL;
+ struct vsoc_device_region *owner_region_p = NULL;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ private_data = (struct vsoc_private_data *)filp->private_data;
+ if (!private_data)
+ return 0;
+
+ node = private_data->fd_scoped_permission_node;
+ if (node) {
+ owner_region_p = vsoc_region_from_inode(inode);
+ if (owner_region_p->managed_by != VSOC_REGION_WHOLE) {
+ owner_region_p =
+ &vsoc_dev.regions[owner_region_p->managed_by];
+ }
+ do_destroy_fd_scoped_permission_node(owner_region_p, node);
+ private_data->fd_scoped_permission_node = NULL;
+ }
+ kfree(private_data);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+/*
+ * Returns the device relative offset and length of the area specified by the
+ * fd scoped permission. If there is no fd scoped permission set, a default
+ * permission covering the entire region is assumed, unless the region is owned
+ * by another one, in which case the default is a permission with zero size.
+ */
+static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset)
+{
+ __u32 off = 0;
+ ssize_t length = 0;
+ struct vsoc_device_region *region_p;
+ struct fd_scoped_permission *perm;
+
+ region_p = vsoc_region_from_filep(filp);
+ off = region_p->region_begin_offset;
+ perm = &((struct vsoc_private_data *)filp->private_data)->
+ fd_scoped_permission_node->permission;
+ if (perm) {
+ off += perm->begin_offset;
+ length = perm->end_offset - perm->begin_offset;
+ } else if (region_p->managed_by == VSOC_REGION_WHOLE) {
+ /* No permission set and the regions is not owned by another,
+ * default to full region access.
+ */
+ length = vsoc_device_region_size(region_p);
+ } else {
+ /* return zero length, access is denied. */
+ length = 0;
+ }
+ if (area_offset)
+ *area_offset = off;
+ return length;
+}
+
+static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long len = vma->vm_end - vma->vm_start;
+ __u32 area_off;
+ phys_addr_t mem_off;
+ ssize_t area_len;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, &area_off);
+ /* Add the requested offset */
+ area_off += (vma->vm_pgoff << PAGE_SHIFT);
+ area_len -= (vma->vm_pgoff << PAGE_SHIFT);
+ if (area_len < len)
+ return -EINVAL;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ mem_off = shm_off_to_phys_addr(area_off);
+ if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT,
+ len, vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
+
+module_init(vsoc_init_module);
+module_exit(vsoc_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Hartman <ghartman@google.com>");
+MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device");
+MODULE_VERSION("1.0");
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index 4de4fd06eebc..962cc0c79988 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Staging board support for Armadillo 800 eva.
* Enable not-yet-DT-capable devices here.
@@ -6,15 +7,6 @@
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/dma-mapping.h>
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
index 86dc41101610..cb6feb34dd40 100644
--- a/drivers/staging/board/board.c
+++ b/drivers/staging/board/board.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Magnus Damm
* Copyright (C) 2015 Glider bvba
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#define pr_fmt(fmt) "board_staging: " fmt
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index 97fb9388bc22..fa9d239474ee 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -34,7 +34,7 @@
struct comedi32_chaninfo_struct {
unsigned int subdev;
compat_uptr_t maxdata_list; /* 32-bit 'unsigned int *' */
- compat_uptr_t flaglist; /* 32-bit 'unsigned int *' */
+ compat_uptr_t flaglist; /* 32-bit 'unsigned int *' */
compat_uptr_t rangelist; /* 32-bit 'unsigned int *' */
unsigned int unused[4];
};
@@ -57,16 +57,16 @@ struct comedi32_cmd_struct {
unsigned int scan_end_arg;
unsigned int stop_src;
unsigned int stop_arg;
- compat_uptr_t chanlist; /* 32-bit 'unsigned int *' */
+ compat_uptr_t chanlist; /* 32-bit 'unsigned int *' */
unsigned int chanlist_len;
- compat_uptr_t data; /* 32-bit 'short *' */
+ compat_uptr_t data; /* 32-bit 'short *' */
unsigned int data_len;
};
struct comedi32_insn_struct {
unsigned int insn;
unsigned int n;
- compat_uptr_t data; /* 32-bit 'unsigned int *' */
+ compat_uptr_t data; /* 32-bit 'unsigned int *' */
unsigned int subdev;
unsigned int chanspec;
unsigned int unused[3];
@@ -74,7 +74,7 @@ struct comedi32_insn_struct {
struct comedi32_insnlist_struct {
unsigned int n_insns;
- compat_uptr_t insns; /* 32-bit 'struct comedi_insn *' */
+ compat_uptr_t insns; /* 32-bit 'struct comedi_insn *' */
};
/* Handle translated ioctl. */
@@ -194,7 +194,7 @@ static int get_compat_cmd(struct comedi_cmd __user *cmd,
err |= __put_user(temp.uint, &cmd->stop_arg);
err |= __get_user(temp.uptr, &cmd32->chanlist);
err |= __put_user((unsigned int __force *)compat_ptr(temp.uptr),
- &cmd->chanlist);
+ &cmd->chanlist);
err |= __get_user(temp.uint, &cmd32->chanlist_len);
err |= __put_user(temp.uint, &cmd->chanlist_len);
err |= __get_user(temp.uptr, &cmd32->data);
diff --git a/drivers/staging/comedi/comedi_usb.h b/drivers/staging/comedi/comedi_usb.h
index 50287de7a239..601e29d3891c 100644
--- a/drivers/staging/comedi/comedi_usb.h
+++ b/drivers/staging/comedi/comedi_usb.h
@@ -1,6 +1,5 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * comedi_usb.h
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* comedi_usb.h
* header file for USB Comedi drivers
*
* COMEDI - Linux Control and Measurement Device Interface
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index f3474a4ba69e..c54ac94d89d2 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* comedidev.h
* header file for kernel-only structures, variables, and constants
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index fdd81c3beb51..631a703b345d 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -2268,14 +2268,14 @@ static inline unsigned int dma_transfer_size(struct comedi_device *dev)
}
static u32 ai_convert_counter_6xxx(const struct comedi_device *dev,
- const struct comedi_cmd *cmd)
+ const struct comedi_cmd *cmd)
{
/* supposed to load counter with desired divisor minus 3 */
return cmd->convert_arg / TIMER_BASE - 3;
}
static u32 ai_scan_counter_6xxx(struct comedi_device *dev,
- struct comedi_cmd *cmd)
+ struct comedi_cmd *cmd)
{
u32 count;
@@ -2296,7 +2296,7 @@ static u32 ai_scan_counter_6xxx(struct comedi_device *dev,
}
static u32 ai_convert_counter_4020(struct comedi_device *dev,
- struct comedi_cmd *cmd)
+ struct comedi_cmd *cmd)
{
struct pcidas64_private *devpriv = dev->private;
unsigned int divisor;
diff --git a/drivers/staging/emxx_udc/Kconfig b/drivers/staging/emxx_udc/Kconfig
index d7577096fb25..e50e72218364 100644
--- a/drivers/staging/emxx_udc/Kconfig
+++ b/drivers/staging/emxx_udc/Kconfig
@@ -1,6 +1,6 @@
config USB_EMXX
tristate "EMXX USB Function Device Controller"
- depends on USB_GADGET && (ARCH_SHMOBILE || (ARM && COMPILE_TEST))
+ depends on USB_GADGET && (ARCH_RENESAS || (ARM && COMPILE_TEST))
help
The Emma Mobile series of SoCs from Renesas Electronics and
former NEC Electronics include USB Function hardware.
diff --git a/drivers/staging/emxx_udc/TODO b/drivers/staging/emxx_udc/TODO
index 1319379beb7e..471529a470c7 100644
--- a/drivers/staging/emxx_udc/TODO
+++ b/drivers/staging/emxx_udc/TODO
@@ -1,4 +1,6 @@
* add clock framework support (platform device with CCF needs special care)
* break out board-specific VBUS GPIO to work with multiplatform
+* convert VBUS GPIO to use GPIO descriptors from <linux/gpio/consumer.h>
+ and stop using the old GPIO API
* DT bindings
* move driver into drivers/usb/gadget/
diff --git a/drivers/staging/fbtft/TODO b/drivers/staging/fbtft/TODO
new file mode 100644
index 000000000000..7e64c7e438f0
--- /dev/null
+++ b/drivers/staging/fbtft/TODO
@@ -0,0 +1,4 @@
+* convert all uses of the old GPIO API from <linux/gpio.h> to the
+ GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
+ lines from device tree, ACPI or board files, board files should
+ use <linux/gpio/machine.h>
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index a263bce260c9..871b307d83cb 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -22,10 +22,13 @@ void func(struct fbtft_par *par, int len, ...) \
if (unlikely(par->debug & DEBUG_WRITE_REGISTER)) { \
va_start(args, len); \
for (i = 0; i < len; i++) { \
- buf[i] = modifier((data_type)va_arg(args, unsigned int)); \
+ buf[i] = modifier((data_type)va_arg(args, \
+ unsigned int)); \
} \
va_end(args); \
- fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device, buffer_type, buf, len, "%s: ", __func__); \
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, \
+ par->info->device, buffer_type, buf, len, \
+ "%s: ", __func__); \
} \
\
va_start(args, len); \
@@ -37,7 +40,8 @@ void func(struct fbtft_par *par, int len, ...) \
} \
\
*buf = modifier((data_type)va_arg(args, unsigned int)); \
- ret = fbtft_write_buf_dc(par, par->buf, sizeof(data_type) + offset, 0); \
+ ret = fbtft_write_buf_dc(par, par->buf, sizeof(data_type) + offset, \
+ 0); \
if (ret < 0) \
goto out; \
len--; \
@@ -48,7 +52,8 @@ void func(struct fbtft_par *par, int len, ...) \
if (len) { \
i = len; \
while (i--) \
- *buf++ = modifier((data_type)va_arg(args, unsigned int)); \
+ *buf++ = modifier((data_type)va_arg(args, \
+ unsigned int)); \
fbtft_write_buf_dc(par, par->buf, \
len * (sizeof(data_type) + offset), 1); \
} \
diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig
index bbb7af551696..a4c4b83ddc9c 100644
--- a/drivers/staging/fsl-dpaa2/Kconfig
+++ b/drivers/staging/fsl-dpaa2/Kconfig
@@ -24,3 +24,11 @@ config FSL_DPAA2_ETHSW
---help---
Driver for Freescale DPAA2 Ethernet Switch. Select
BRIDGE to have support for bridge tools.
+
+config FSL_DPAA2_PTP_CLOCK
+ tristate "Freescale DPAA2 PTP Clock"
+ depends on FSL_DPAA2_ETH && POSIX_TIMERS
+ select PTP_1588_CLOCK
+ help
+ This driver adds support for using the DPAA2 1588 timer module
+ as a PTP clock.
diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile
index 6cfd76b29970..9c7062945758 100644
--- a/drivers/staging/fsl-dpaa2/Makefile
+++ b/drivers/staging/fsl-dpaa2/Makefile
@@ -2,5 +2,6 @@
# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers
#
-obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
-obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
+obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
+obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
+obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += rtc/
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
index 553678d8b2eb..396371728aa1 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
@@ -38,8 +38,11 @@
#include <linux/msi.h>
#include <linux/kthread.h>
#include <linux/iommu.h>
-
+#include <linux/net_tstamp.h>
#include <linux/fsl/mc.h>
+
+#include <net/sock.h>
+
#include "dpaa2-eth.h"
/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
@@ -275,6 +278,18 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
prefetch(skb->data);
+ /* Get the timestamp value */
+ if (priv->rx_tstamp) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ __le64 *ts = dpaa2_get_ts(vaddr, false);
+ u64 ns;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ }
+
/* Check if we need to validate the L4 csum */
if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
status = le32_to_cpu(fas->status);
@@ -334,6 +349,28 @@ static int consume_frames(struct dpaa2_eth_channel *ch)
return cleaned;
}
+/* Configure the egress frame annotation for timestamp update */
+static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
+{
+ struct dpaa2_faead *faead;
+ u32 ctrl, frc;
+
+ /* Mark the egress frame annotation area as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+
+ /* Set hardware annotation size */
+ ctrl = dpaa2_fd_get_ctrl(fd);
+ dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
+
+ /* enable UPD (update prepanded data) bit in FAEAD field of
+ * hardware frame annotation area
+ */
+ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
+ faead = dpaa2_get_faead(buf_start, true);
+ faead->ctrl = cpu_to_le32(ctrl);
+}
+
/* Create a frame descriptor based on a fragmented skb */
static int build_sg_fd(struct dpaa2_eth_priv *priv,
struct sk_buff *skb,
@@ -420,6 +457,9 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_PTA | DPAA2_FD_CTRL_PTV1);
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ enable_tx_tstamp(fd, sgt_buf);
+
return 0;
dma_map_single_failed:
@@ -470,6 +510,9 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_format(fd, dpaa2_fd_single);
dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_PTA | DPAA2_FD_CTRL_PTV1);
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ enable_tx_tstamp(fd, buffer_start);
+
return 0;
}
@@ -520,6 +563,19 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
return;
}
+ /* Get the timestamp value */
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ __le64 *ts = dpaa2_get_ts(skbh, true);
+ u64 ns;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
/* Free SGT buffer allocated on tx */
if (fd_format != dpaa2_fd_single)
skb_free_frag(skbh);
@@ -552,6 +608,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
goto err_alloc_headroom;
}
percpu_extras->tx_reallocs++;
+
+ if (skb->sk)
+ skb_set_owner_w(ns, skb->sk);
+
dev_kfree_skb(skb);
skb = ns;
}
@@ -1365,6 +1425,45 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
return 0;
}
+static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->tx_tstamp = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->tx_tstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ priv->rx_tstamp = false;
+ } else {
+ priv->rx_tstamp = true;
+ /* TS is set for all frame types, not only those requested */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ if (cmd == SIOCSHWTSTAMP)
+ return dpaa2_eth_ts_ioctl(dev, rq, cmd);
+
+ return -EINVAL;
+}
+
static const struct net_device_ops dpaa2_eth_ops = {
.ndo_open = dpaa2_eth_open,
.ndo_start_xmit = dpaa2_eth_tx,
@@ -1375,6 +1474,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_change_mtu = dpaa2_eth_change_mtu,
.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
.ndo_set_features = dpaa2_eth_set_features,
+ .ndo_do_ioctl = dpaa2_eth_ioctl,
};
static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -1770,7 +1870,9 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
/* tx buffer */
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
- buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+ buf_layout.pass_timestamp = true;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &buf_layout);
if (err) {
@@ -1779,7 +1881,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
}
/* tx-confirm buffer */
- buf_layout.options = 0;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX_CONFIRM, &buf_layout);
if (err) {
@@ -1810,7 +1912,8 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
- DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_RX, &buf_layout);
if (err) {
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
index 54cea2fc6e58..905a4e6be8fa 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
@@ -88,8 +88,12 @@
#define DPAA2_ETH_SKB_SIZE \
(DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
-/* Hardware annotation area in RX buffers */
+/* Hardware annotation area in RX/TX buffers */
#define DPAA2_ETH_RX_HWA_SIZE 64
+#define DPAA2_ETH_TX_HWA_SIZE 128
+
+/* PTP nominal frequency 1GHz */
+#define DPAA2_PTP_CLK_PERIOD_NS 1
/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
* to 256B. For newer revisions, the requirement is only for 64B alignment
@@ -135,6 +139,7 @@ struct dpaa2_eth_swa {
/* Annotation bits in FD CTRL */
#define DPAA2_FD_CTRL_PTA 0x00800000
#define DPAA2_FD_CTRL_PTV1 0x00400000
+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128B */
/* Frame annotation status */
struct dpaa2_fas {
@@ -150,6 +155,23 @@ struct dpaa2_fas {
#define DPAA2_FAS_OFFSET 0
#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
+/* Timestamp is located in the next 8 bytes of the buffer's
+ * hardware annotation area
+ */
+#define DPAA2_TS_OFFSET 0x8
+
+/* Frame annotation egress action descriptor */
+#define DPAA2_FAEAD_OFFSET 0x58
+
+struct dpaa2_faead {
+ __le32 conf_fqid;
+ __le32 ctrl;
+};
+
+#define DPAA2_FAEAD_A2V 0x20000000
+#define DPAA2_FAEAD_UPDV 0x00001000
+#define DPAA2_FAEAD_UPD 0x00000010
+
/* Accessors for the hardware annotation fields that we use */
static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
{
@@ -161,6 +183,16 @@ static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
}
+static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
+}
+
+static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
+}
+
/* Error and status bits in the frame annotation status word */
/* Debug frame, otherwise supposed to be discarded */
#define DPAA2_FAS_DISC 0x80000000
@@ -319,6 +351,9 @@ struct dpaa2_eth_priv {
u16 bpid;
struct iommu_domain *iommu_domain;
+ bool tx_tstamp; /* Tx timestamping enabled */
+ bool rx_tstamp; /* Rx timestamping enabled */
+
u16 tx_qdid;
u16 rx_buf_align;
struct fsl_mc_io *mc_io;
@@ -355,6 +390,7 @@ struct dpaa2_eth_priv {
extern const struct ethtool_ops dpaa2_ethtool_ops;
extern const char dpaa2_eth_drv_version[];
+extern int dpaa2_phc_index;
static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
u16 ver_major, u16 ver_minor)
@@ -377,10 +413,19 @@ static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
{
+ unsigned int headroom = DPAA2_ETH_SWA_SIZE;
+
+ /* For non-linear skbs we have no headroom requirement, as we build a
+ * SG frame with a newly allocated SGT buffer
+ */
if (skb_is_nonlinear(skb))
return 0;
- return DPAA2_ETH_SWA_SIZE;
+ /* If we have Tx timestamping, need 128B hardware annotation */
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ headroom += DPAA2_ETH_TX_HWA_SIZE;
+
+ return headroom;
}
/* Extra headroom space requested to hardware, in order to make sure there's
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
index bfc8b64169ca..1ae779ae8c99 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
@@ -30,6 +30,8 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/net_tstamp.h>
+
#include "dpni.h" /* DPNI_LINK_OPT_* */
#include "dpaa2-eth.h"
@@ -274,6 +276,26 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
return 0;
}
+int dpaa2_phc_index = -1;
+EXPORT_SYMBOL(dpaa2_phc_index);
+
+static int dpaa2_eth_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = dpaa2_phc_index;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
const struct ethtool_ops dpaa2_ethtool_ops = {
.get_drvinfo = dpaa2_eth_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -283,4 +305,5 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
.get_strings = dpaa2_eth_get_strings,
.get_rxnfc = dpaa2_eth_get_rxnfc,
+ .get_ts_info = dpaa2_eth_get_ts_info,
};
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
index 1c203e6e8035..da744f2b0ee6 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
@@ -49,6 +49,8 @@
#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
+#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
+
#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
@@ -206,6 +208,17 @@ struct dpsw_cmd_if_set_tci {
__le16 conf;
};
+struct dpsw_cmd_if_get_tci {
+ __le16 if_id;
+};
+
+struct dpsw_rsp_if_get_tci {
+ __le16 pad;
+ __le16 vlan_id;
+ u8 dei;
+ u8 pcp;
+};
+
#define DPSW_STATE_SHIFT 0
#define DPSW_STATE_SIZE 4
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
index 9b9bc604b461..cabed77b445d 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
@@ -529,6 +529,48 @@ int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
}
/**
+ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpsw_tci_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_tci *cmd_params;
+ struct dpsw_rsp_if_get_tci *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
+ cfg->pcp = rsp_params->pcp;
+ cfg->dei = rsp_params->dei;
+ cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
+
+ return 0;
+}
+
+/**
* dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
index 3335adde0193..82f80c409ec3 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
@@ -306,6 +306,12 @@ int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
u16 if_id,
const struct dpsw_tci_cfg *cfg);
+int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpsw_tci_cfg *cfg);
+
/**
* enum dpsw_stp_state - Spanning Tree Protocol (STP) states
* @DPSW_STP_STATE_BLOCKING: Blocking state
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index a17dd2972ccd..0d54564e4f38 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -50,14 +50,23 @@ static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
return 0;
}
-static int ethsw_port_set_tci(struct ethsw_port_priv *port_priv,
- struct dpsw_tci_cfg *tci_cfg)
+static int ethsw_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct net_device *netdev = port_priv->netdev;
+ struct dpsw_tci_cfg tci_cfg = { 0 };
bool is_oper;
int err, ret;
+ err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
+ return err;
+ }
+
+ tci_cfg.vlan_id = pvid;
+
/* Interface needs to be down to change PVID */
is_oper = netif_oper_up(netdev);
if (is_oper) {
@@ -71,17 +80,16 @@ static int ethsw_port_set_tci(struct ethsw_port_priv *port_priv,
}
err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
- port_priv->idx, tci_cfg);
+ port_priv->idx, &tci_cfg);
if (err) {
netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
goto set_tci_error;
}
/* Delete previous PVID info and mark the new one */
- if (port_priv->pvid)
- port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
- port_priv->vlans[tci_cfg->vlan_id] |= ETHSW_VLAN_PVID;
- port_priv->pvid = tci_cfg->vlan_id;
+ port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
+ port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
+ port_priv->pvid = pvid;
set_tci_error:
if (is_oper) {
@@ -133,13 +141,7 @@ static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
}
if (flags & BRIDGE_VLAN_INFO_PVID) {
- struct dpsw_tci_cfg tci_cfg = {
- .pcp = 0,
- .dei = 0,
- .vlan_id = vid,
- };
-
- err = ethsw_port_set_tci(port_priv, &tci_cfg);
+ err = ethsw_port_set_pvid(port_priv, vid);
if (err)
return err;
}
@@ -616,10 +618,8 @@ static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
- struct fsl_mc_device_irq *irq;
int err;
- irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, 0);
if (err)
@@ -822,9 +822,7 @@ static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
return -ENOENT;
if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
- struct dpsw_tci_cfg tci_cfg = { 0 };
-
- err = ethsw_port_set_tci(port_priv, &tci_cfg);
+ err = ethsw_port_set_pvid(port_priv, 0);
if (err)
return err;
}
@@ -1260,7 +1258,6 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
struct net_device *netdev = port_priv->netdev;
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct dpsw_tci_cfg tci_cfg = {0};
struct dpsw_vlan_if_cfg vcfg;
int err;
@@ -1278,7 +1275,7 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err;
}
- err = ethsw_port_set_tci(port_priv, &tci_cfg);
+ err = ethsw_port_set_pvid(port_priv, 0);
if (err)
return err;
diff --git a/drivers/staging/fsl-dpaa2/rtc/Makefile b/drivers/staging/fsl-dpaa2/rtc/Makefile
new file mode 100644
index 000000000000..5468da071163
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/rtc/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Freescale DPAA2 PTP clock
+#
+
+obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += dpaa2-rtc.o
+
+dpaa2-rtc-objs := rtc.o dprtc.o
diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
new file mode 100644
index 000000000000..db6a473430cc
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef _FSL_DPRTC_CMD_H
+#define _FSL_DPRTC_CMD_H
+
+/* DPRTC Version */
+#define DPRTC_VER_MAJOR 2
+#define DPRTC_VER_MINOR 0
+
+/* Command versioning */
+#define DPRTC_CMD_BASE_VERSION 1
+#define DPRTC_CMD_ID_OFFSET 4
+
+#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
+#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
+#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910)
+#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990)
+#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10)
+
+#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002)
+#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003)
+#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004)
+#define DPRTC_CMDID_RESET DPRTC_CMD(0x005)
+#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006)
+
+#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
+#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
+#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
+#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
+#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
+#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
+
+#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0)
+#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
+#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
+#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
+#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
+#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5)
+#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6)
+#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7)
+#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8)
+#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9)
+#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPRTC_MASK(field) \
+ GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \
+ DPRTC_##field##_SHIFT)
+#define dprtc_get_field(var, field) \
+ (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dprtc_cmd_open {
+ __le32 dprtc_id;
+};
+
+struct dprtc_cmd_destroy {
+ __le32 object_id;
+};
+
+#define DPRTC_ENABLE_SHIFT 0
+#define DPRTC_ENABLE_SIZE 1
+
+struct dprtc_rsp_is_enabled {
+ u8 en;
+};
+
+struct dprtc_cmd_get_irq {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dprtc_cmd_set_irq_enable {
+ u8 en;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_enable {
+ u8 en;
+};
+
+struct dprtc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dprtc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprtc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_attributes {
+ __le32 pad;
+ __le32 id;
+};
+
+struct dprtc_cmd_set_clock_offset {
+ __le64 offset;
+};
+
+struct dprtc_get_freq_compensation {
+ __le32 freq_compensation;
+};
+
+struct dprtc_time {
+ __le64 time;
+};
+
+struct dprtc_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+#pragma pack(pop)
+
+#endif /* _FSL_DPRTC_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.c b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
new file mode 100644
index 000000000000..68ae6ffefbf5
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
@@ -0,0 +1,701 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+/**
+ * dprtc_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dprtc_id: DPRTC unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dprtc_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dprtc_id,
+ u16 *token)
+{
+ struct dprtc_cmd_open *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dprtc_cmd_open *)cmd.params;
+ cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprtc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_create() - Create the DPRTC object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPRTC object, allocate required resources and
+ * perform required initialization.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ const struct dprtc_cfg *cfg,
+ u32 *obj_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dprtc_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ u32 object_id)
+{
+ struct dprtc_cmd_destroy *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(object_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_enable() - Enable the DPRTC.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_disable() - Disable the DPRTC.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_is_enabled() - Check if the DPRTC is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct dprtc_rsp_is_enabled *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
+ *en = dprtc_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dprtc_reset() - Reset the DPRTC, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct dprtc_cmd_set_irq_enable *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->en = en;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_irq_enable() - Get overall interrupt state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en)
+{
+ struct dprtc_rsp_get_irq_enable *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->en;
+
+ return 0;
+}
+
+/**
+ * dprtc_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @mask: Event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct dprtc_cmd_set_irq_mask *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask)
+{
+ struct dprtc_rsp_get_irq_mask *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
+ return 0;
+}
+
+/**
+ * dprtc_get_irq_status() - Get the current status of any pending interrupts.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct dprtc_cmd_get_irq_status *cmd_params;
+ struct dprtc_rsp_get_irq_status *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dprtc_clear_irq_status() - Clear a pending interrupt's status
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @status: Bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct dprtc_cmd_clear_irq_status *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->status = cpu_to_le32(status);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_attributes - Retrieve DPRTC attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprtc_attr *attr)
+{
+ struct dprtc_rsp_get_attributes *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+
+ return 0;
+}
+
+/**
+ * dprtc_set_clock_offset() - Sets the clock's offset
+ * (usually relative to another clock).
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @offset: New clock offset (in nanoseconds).
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int64_t offset)
+{
+ struct dprtc_cmd_set_clock_offset *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
+ cmd_params->offset = cpu_to_le64(offset);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @freq_compensation: The new frequency compensation value to set.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 freq_compensation)
+{
+ struct dprtc_get_freq_compensation *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
+ cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @freq_compensation: Frequency compensation value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 *freq_compensation)
+{
+ struct dprtc_get_freq_compensation *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
+ *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
+
+ return 0;
+}
+
+/**
+ * dprtc_get_time() - Returns the current RTC time.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @time: Current RTC time.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t *time)
+{
+ struct dprtc_time *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_time *)cmd.params;
+ *time = le64_to_cpu(rsp_params->time);
+
+ return 0;
+}
+
+/**
+ * dprtc_set_time() - Updates current RTC time.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @time: New RTC time.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t time)
+{
+ struct dprtc_time *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_time *)cmd.params;
+ cmd_params->time = cpu_to_le64(time);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_set_alarm() - Defines and sets alarm.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @time: In nanoseconds, the time when the alarm
+ * should go off - must be a multiple of
+ * 1 microsecond
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_alarm(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token, uint64_t time)
+{
+ struct dprtc_time *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_time *)cmd.params;
+ cmd_params->time = cpu_to_le64(time);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_api_version() - Get Data Path Real Time Counter API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path real time counter API
+ * @minor_ver: Minor version of data path real time counter API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct dprtc_rsp_get_api_version *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.h b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
new file mode 100644
index 000000000000..08f7c7bebbca
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef __FSL_DPRTC_H
+#define __FSL_DPRTC_H
+
+/* Data Path Real Time Counter API
+ * Contains initialization APIs and runtime control APIs for RTC
+ */
+
+struct fsl_mc_io;
+
+/**
+ * Number of irq's
+ */
+#define DPRTC_MAX_IRQ_NUM 1
+#define DPRTC_IRQ_INDEX 0
+
+/**
+ * Interrupt event masks:
+ */
+
+/**
+ * Interrupt event mask indicating alarm event had occurred
+ */
+#define DPRTC_EVENT_ALARM 0x40000000
+/**
+ * Interrupt event mask indicating periodic pulse event had occurred
+ */
+#define DPRTC_EVENT_PPS 0x08000000
+
+int dprtc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dprtc_id,
+ u16 *token);
+
+int dprtc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dprtc_cfg - Structure representing DPRTC configuration
+ * @options: place holder
+ */
+struct dprtc_cfg {
+ u32 options;
+};
+
+int dprtc_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ const struct dprtc_cfg *cfg,
+ u32 *obj_id);
+
+int dprtc_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ u32 object_id);
+
+int dprtc_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dprtc_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dprtc_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dprtc_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int64_t offset);
+
+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 freq_compensation);
+
+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 *freq_compensation);
+
+int dprtc_get_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t *time);
+
+int dprtc_set_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t time);
+
+int dprtc_set_alarm(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t time);
+
+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dprtc_attr - Structure representing DPRTC attributes
+ * @id: DPRTC object ID
+ */
+struct dprtc_attr {
+ int id;
+};
+
+int dprtc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprtc_attr *attr);
+
+int dprtc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+#endif /* __FSL_DPRTC_H */
diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.c b/drivers/staging/fsl-dpaa2/rtc/rtc.c
new file mode 100644
index 000000000000..0d52cb85441f
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "rtc.h"
+
+struct ptp_dpaa2_priv {
+ struct fsl_mc_device *rtc_mc_dev;
+ struct ptp_clock *clock;
+ struct ptp_clock_info caps;
+ u32 freq_comp;
+};
+
+/* PTP clock operations */
+static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct ptp_dpaa2_priv *ptp_dpaa2 =
+ container_of(ptp, struct ptp_dpaa2_priv, caps);
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
+ struct device *dev = &mc_dev->dev;
+ u64 adj;
+ u32 diff, tmr_add;
+ int neg_adj = 0;
+ int err = 0;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ tmr_add = ptp_dpaa2->freq_comp;
+ adj = tmr_add;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
+
+ err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
+ mc_dev->mc_handle, tmr_add);
+ if (err)
+ dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
+ return 0;
+}
+
+static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ptp_dpaa2_priv *ptp_dpaa2 =
+ container_of(ptp, struct ptp_dpaa2_priv, caps);
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
+ struct device *dev = &mc_dev->dev;
+ s64 now;
+ int err = 0;
+
+ err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
+ if (err) {
+ dev_err(dev, "dprtc_get_time err %d\n", err);
+ return 0;
+ }
+
+ now += delta;
+
+ err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
+ if (err) {
+ dev_err(dev, "dprtc_set_time err %d\n", err);
+ return 0;
+ }
+ return 0;
+}
+
+static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ptp_dpaa2_priv *ptp_dpaa2 =
+ container_of(ptp, struct ptp_dpaa2_priv, caps);
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
+ struct device *dev = &mc_dev->dev;
+ u64 ns;
+ u32 remainder;
+ int err = 0;
+
+ err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
+ if (err) {
+ dev_err(dev, "dprtc_get_time err %d\n", err);
+ return 0;
+ }
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+ return 0;
+}
+
+static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ptp_dpaa2_priv *ptp_dpaa2 =
+ container_of(ptp, struct ptp_dpaa2_priv, caps);
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
+ struct device *dev = &mc_dev->dev;
+ u64 ns;
+ int err = 0;
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
+ if (err)
+ dev_err(dev, "dprtc_set_time err %d\n", err);
+ return 0;
+}
+
+static struct ptp_clock_info ptp_dpaa2_caps = {
+ .owner = THIS_MODULE,
+ .name = "DPAA2 PTP Clock",
+ .max_adj = 512000,
+ .n_alarm = 2,
+ .n_ext_ts = 2,
+ .n_per_out = 3,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfreq = ptp_dpaa2_adjfreq,
+ .adjtime = ptp_dpaa2_adjtime,
+ .gettime64 = ptp_dpaa2_gettime,
+ .settime64 = ptp_dpaa2_settime,
+};
+
+static int rtc_probe(struct fsl_mc_device *mc_dev)
+{
+ struct device *dev = &mc_dev->dev;
+ struct ptp_dpaa2_priv *ptp_dpaa2;
+ u32 tmr_add = 0;
+ int err;
+
+ ptp_dpaa2 = kzalloc(sizeof(*ptp_dpaa2), GFP_KERNEL);
+ if (!ptp_dpaa2)
+ return -ENOMEM;
+
+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
+ if (err) {
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_exit;
+ }
+
+ err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+ &mc_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dprtc_open err %d\n", err);
+ goto err_free_mcp;
+ }
+
+ ptp_dpaa2->rtc_mc_dev = mc_dev;
+
+ err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
+ mc_dev->mc_handle, &tmr_add);
+ if (err) {
+ dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
+ goto err_close;
+ }
+
+ ptp_dpaa2->freq_comp = tmr_add;
+ ptp_dpaa2->caps = ptp_dpaa2_caps;
+
+ ptp_dpaa2->clock = ptp_clock_register(&ptp_dpaa2->caps, dev);
+ if (IS_ERR(ptp_dpaa2->clock)) {
+ err = PTR_ERR(ptp_dpaa2->clock);
+ goto err_close;
+ }
+
+ dpaa2_phc_index = ptp_clock_index(ptp_dpaa2->clock);
+
+ dev_set_drvdata(dev, ptp_dpaa2);
+
+ return 0;
+
+err_close:
+ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+err_free_mcp:
+ fsl_mc_portal_free(mc_dev->mc_io);
+err_exit:
+ kfree(ptp_dpaa2);
+ dev_set_drvdata(dev, NULL);
+ return err;
+}
+
+static int rtc_remove(struct fsl_mc_device *mc_dev)
+{
+ struct ptp_dpaa2_priv *ptp_dpaa2;
+ struct device *dev = &mc_dev->dev;
+
+ ptp_dpaa2 = dev_get_drvdata(dev);
+ ptp_clock_unregister(ptp_dpaa2->clock);
+
+ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+ fsl_mc_portal_free(mc_dev->mc_io);
+
+ kfree(ptp_dpaa2);
+ dev_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+static const struct fsl_mc_device_id rtc_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dprtc",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(fslmc, rtc_match_id_table);
+
+static struct fsl_mc_driver rtc_drv = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = rtc_probe,
+ .remove = rtc_remove,
+ .match_id_table = rtc_match_id_table,
+};
+
+module_fsl_mc_driver(rtc_drv);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DPAA2 PTP Clock Driver");
diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.h b/drivers/staging/fsl-dpaa2/rtc/rtc.h
new file mode 100644
index 000000000000..ff2e177395d4
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/rtc/rtc.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTC_H
+#define __RTC_H
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+extern int dpaa2_phc_index;
+
+#endif
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
index 182b38412a82..11a90a90d827 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
@@ -13,6 +13,7 @@
#include <linux/msi.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/fsl/mc.h>
#include "../../include/dpaa2-io.h"
@@ -100,7 +101,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
if (err) {
dev_dbg(dev, "MC portal allocation failed\n");
err = -EPROBE_DEFER;
- goto err_mcportal;
+ goto err_priv_alloc;
}
err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
@@ -146,10 +147,22 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
* Set the CENA regs to be the cache inhibited area of the portal to
* avoid coherency issues if a user migrates to another core.
*/
- desc.regs_cena = ioremap_wc(dpio_dev->regions[1].start,
- resource_size(&dpio_dev->regions[1]));
- desc.regs_cinh = ioremap(dpio_dev->regions[1].start,
- resource_size(&dpio_dev->regions[1]));
+ desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]),
+ MEMREMAP_WC);
+ if (IS_ERR(desc.regs_cena)) {
+ dev_err(dev, "devm_memremap failed\n");
+ err = PTR_ERR(desc.regs_cena);
+ goto err_allocate_irqs;
+ }
+
+ desc.regs_cinh = devm_ioremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]));
+ if (!desc.regs_cinh) {
+ err = -ENOMEM;
+ dev_err(dev, "devm_ioremap failed\n");
+ goto err_allocate_irqs;
+ }
err = fsl_mc_allocate_irqs(dpio_dev);
if (err) {
@@ -164,6 +177,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
priv->io = dpaa2_io_create(&desc);
if (!priv->io) {
dev_err(dev, "dpaa2_io_create failed\n");
+ err = -ENOMEM;
goto err_dpaa2_io_create;
}
@@ -185,8 +199,6 @@ err_get_attr:
dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
err_open:
fsl_mc_portal_free(dpio_dev->mc_io);
-err_mcportal:
- dev_set_drvdata(dev, NULL);
err_priv_alloc:
return err;
}
@@ -230,8 +242,6 @@ static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
fsl_mc_portal_free(dpio_dev->mc_io);
- dev_set_drvdata(dev, NULL);
-
return 0;
err_open:
diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
index 4488a445b709..69db3c818742 100644
--- a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
@@ -15,7 +15,7 @@ struct qbman_swp;
/* qbman software portal descriptor structure */
struct qbman_swp_desc {
void *cena_bar; /* Cache-enabled portal base address */
- void *cinh_bar; /* Cache-inhibited portal base address */
+ void __iomem *cinh_bar; /* Cache-inhibited portal base address */
u32 qman_version;
};
@@ -102,7 +102,7 @@ struct qbman_release_desc {
/* portal data structure */
struct qbman_swp {
const struct qbman_swp_desc *desc;
- void __iomem *addr_cena;
+ void *addr_cena;
void __iomem *addr_cinh;
/* Management commands */
diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h
index f71227d3df8d..ab51e40d11db 100644
--- a/drivers/staging/fsl-mc/include/dpaa2-io.h
+++ b/drivers/staging/fsl-mc/include/dpaa2-io.h
@@ -52,7 +52,7 @@ struct dpaa2_io_desc {
int has_8prio;
int cpu;
void *regs_cena;
- void *regs_cinh;
+ void __iomem *regs_cinh;
int dpio_id;
u32 qman_version;
};
diff --git a/drivers/staging/gdm724x/gdm_endian.c b/drivers/staging/gdm724x/gdm_endian.c
index 4200391b1a97..ae39e59daf70 100644
--- a/drivers/staging/gdm724x/gdm_endian.c
+++ b/drivers/staging/gdm724x/gdm_endian.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#include <linux/kernel.h>
#include "gdm_endian.h"
diff --git a/drivers/staging/gdm724x/gdm_endian.h b/drivers/staging/gdm724x/gdm_endian.h
index e58d29f868ba..f373dc3a19bf 100644
--- a/drivers/staging/gdm724x/gdm_endian.h
+++ b/drivers/staging/gdm724x/gdm_endian.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#ifndef __GDM_ENDIAN_H__
#define __GDM_ENDIAN_H__
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 4f3c518304f2..3c2aab7a921e 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -408,7 +398,7 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
return nic_type;
}
-static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
{
struct nic *nic = netdev_priv(dev);
u32 nic_type;
diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h
index bad0855e4721..f2143a6e0e99 100644
--- a/drivers/staging/gdm724x/gdm_lte.h
+++ b/drivers/staging/gdm724x/gdm_lte.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#ifndef _GDM_LTE_H_
#define _GDM_LTE_H_
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 63921bad519e..e2a050ba6fbb 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/staging/gdm724x/gdm_mux.h b/drivers/staging/gdm724x/gdm_mux.h
index 0871b8feec55..51c22e3d8aeb 100644
--- a/drivers/staging/gdm724x/gdm_mux.h
+++ b/drivers/staging/gdm724x/gdm_mux.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#ifndef _GDM_MUX_H_
#define _GDM_MUX_H_
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
index 3cdebb81ba63..bf554f7c56ca 100644
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/staging/gdm724x/gdm_tty.h b/drivers/staging/gdm724x/gdm_tty.h
index 195c5902989f..afec97ced476 100644
--- a/drivers/staging/gdm724x/gdm_tty.h
+++ b/drivers/staging/gdm724x/gdm_tty.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#ifndef _GDM_TTY_H_
#define _GDM_TTY_H_
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index c95bad4a8615..0218782d1a08 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/staging/gdm724x/gdm_usb.h b/drivers/staging/gdm724x/gdm_usb.h
index 701038685e23..db689b091c4f 100644
--- a/drivers/staging/gdm724x/gdm_usb.h
+++ b/drivers/staging/gdm724x/gdm_usb.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#ifndef _GDM_USB_H_
#define _GDM_USB_H_
diff --git a/drivers/staging/gdm724x/hci.h b/drivers/staging/gdm724x/hci.h
index 9a591b0db516..b30945daf3a5 100644
--- a/drivers/staging/gdm724x/hci.h
+++ b/drivers/staging/gdm724x/hci.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#ifndef _HCI_H_
#define _HCI_H_
diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h
index 22ce8b9477b6..83fbd2515467 100644
--- a/drivers/staging/gdm724x/hci_packet.h
+++ b/drivers/staging/gdm724x/hci_packet.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#ifndef _HCI_PACKET_H_
#define _HCI_PACKET_H_
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
index abe242505882..92440c3f055b 100644
--- a/drivers/staging/gdm724x/netlink_k.c
+++ b/drivers/staging/gdm724x/netlink_k.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/staging/gdm724x/netlink_k.h b/drivers/staging/gdm724x/netlink_k.h
index 5ebd73157f5a..c9e1d3b2d54f 100644
--- a/drivers/staging/gdm724x/netlink_k.h
+++ b/drivers/staging/gdm724x/netlink_k.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
#ifndef _NETLINK_K_H
#define _NETLINK_K_H
diff --git a/drivers/staging/goldfish/Kconfig b/drivers/staging/goldfish/Kconfig
index 4e094602437c..9165385df9de 100644
--- a/drivers/staging/goldfish/Kconfig
+++ b/drivers/staging/goldfish/Kconfig
@@ -1,13 +1,6 @@
config GOLDFISH_AUDIO
tristate "Goldfish AVD Audio Device"
depends on GOLDFISH
- ---help---
+ help
Emulated audio channel for the Goldfish Android Virtual Device
-config MTD_GOLDFISH_NAND
- tristate "Goldfish NAND device"
- depends on GOLDFISH
- depends on MTD
- help
- Drives the emulated NAND flash device on the Google Goldfish
- Android virtual device.
diff --git a/drivers/staging/goldfish/Makefile b/drivers/staging/goldfish/Makefile
index dec34ad58162..054eeb82151e 100644
--- a/drivers/staging/goldfish/Makefile
+++ b/drivers/staging/goldfish/Makefile
@@ -3,4 +3,3 @@
#
obj-$(CONFIG_GOLDFISH_AUDIO) += goldfish_audio.o
-obj-$(CONFIG_MTD_GOLDFISH_NAND) += goldfish_nand.o
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c
deleted file mode 100644
index f5e002ecba75..000000000000
--- a/drivers/staging/goldfish/goldfish_nand.c
+++ /dev/null
@@ -1,441 +0,0 @@
-/*
- * drivers/mtd/devices/goldfish_nand.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (C) 2012 Intel, Inc.
- * Copyright (C) 2013 Intel, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/io.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/vmalloc.h>
-#include <linux/mtd/mtd.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/goldfish.h>
-#include <asm/div64.h>
-#include <linux/dma-mapping.h>
-
-#include "goldfish_nand_reg.h"
-
-struct goldfish_nand {
- /* lock protects access to the device registers */
- struct mutex lock;
- unsigned char __iomem *base;
- struct cmd_params *cmd_params;
- size_t mtd_count;
- struct mtd_info mtd[0];
-};
-
-static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd,
- enum nand_cmd cmd, u64 addr, u32 len,
- void *ptr, u32 *rv)
-{
- u32 cmdp;
- struct goldfish_nand *nand = mtd->priv;
- struct cmd_params *cps = nand->cmd_params;
- unsigned char __iomem *base = nand->base;
-
- if (!cps)
- return -1;
-
- switch (cmd) {
- case NAND_CMD_ERASE:
- cmdp = NAND_CMD_ERASE_WITH_PARAMS;
- break;
- case NAND_CMD_READ:
- cmdp = NAND_CMD_READ_WITH_PARAMS;
- break;
- case NAND_CMD_WRITE:
- cmdp = NAND_CMD_WRITE_WITH_PARAMS;
- break;
- default:
- return -1;
- }
- cps->dev = mtd - nand->mtd;
- cps->addr_high = (u32)(addr >> 32);
- cps->addr_low = (u32)addr;
- cps->transfer_size = len;
- cps->data = (unsigned long)ptr;
- writel(cmdp, base + NAND_COMMAND);
- *rv = cps->result;
- return 0;
-}
-
-static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd,
- u64 addr, u32 len, void *ptr)
-{
- struct goldfish_nand *nand = mtd->priv;
- u32 rv;
- unsigned char __iomem *base = nand->base;
-
- mutex_lock(&nand->lock);
- if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) {
- writel(mtd - nand->mtd, base + NAND_DEV);
- writel((u32)(addr >> 32), base + NAND_ADDR_HIGH);
- writel((u32)addr, base + NAND_ADDR_LOW);
- writel(len, base + NAND_TRANSFER_SIZE);
- gf_write_ptr(ptr, base + NAND_DATA, base + NAND_DATA_HIGH);
- writel(cmd, base + NAND_COMMAND);
- rv = readl(base + NAND_RESULT);
- }
- mutex_unlock(&nand->lock);
- return rv;
-}
-
-static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- loff_t ofs = instr->addr;
- u32 len = instr->len;
- s32 rem;
-
- if (ofs + len > mtd->size)
- goto invalid_arg;
- ofs = div_s64_rem(ofs, mtd->writesize, &rem);
- if (rem)
- goto invalid_arg;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (len % mtd->writesize)
- goto invalid_arg;
- len = len / mtd->writesize * (mtd->writesize + mtd->oobsize);
-
- if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) {
- pr_err("%s: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n",
- __func__, ofs, len, mtd->size, mtd->erasesize);
- return -EIO;
- }
-
- return 0;
-
-invalid_arg:
- pr_err("%s: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n",
- __func__, ofs, len, mtd->size, mtd->erasesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- s32 rem;
-
- if (ofs + ops->len > mtd->size)
- goto invalid_arg;
- if (ops->datbuf && ops->len && ops->len != mtd->writesize)
- goto invalid_arg;
- if (ops->ooblen + ops->ooboffs > mtd->oobsize)
- goto invalid_arg;
-
- ofs = div_s64_rem(ofs, mtd->writesize, &rem);
- if (rem)
- goto invalid_arg;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (ops->datbuf)
- ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
- ops->len, ops->datbuf);
- ofs += mtd->writesize + ops->ooboffs;
- if (ops->oobbuf)
- ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
- ops->ooblen, ops->oobbuf);
- return 0;
-
-invalid_arg:
- pr_err("%s: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
- __func__, ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- s32 rem;
-
- if (ofs + ops->len > mtd->size)
- goto invalid_arg;
- if (ops->len && ops->len != mtd->writesize)
- goto invalid_arg;
- if (ops->ooblen + ops->ooboffs > mtd->oobsize)
- goto invalid_arg;
-
- ofs = div_s64_rem(ofs, mtd->writesize, &rem);
- if (rem)
- goto invalid_arg;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (ops->datbuf)
- ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
- ops->len, ops->datbuf);
- ofs += mtd->writesize + ops->ooboffs;
- if (ops->oobbuf)
- ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
- ops->ooblen, ops->oobbuf);
- return 0;
-
-invalid_arg:
- pr_err("%s: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
- __func__, ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- s32 rem;
-
- if (from + len > mtd->size)
- goto invalid_arg;
-
- from = div_s64_rem(from, mtd->writesize, &rem);
- if (rem)
- goto invalid_arg;
- from *= (mtd->writesize + mtd->oobsize);
-
- *retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf);
- return 0;
-
-invalid_arg:
- pr_err("%s: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n",
- __func__, from, len, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- s32 rem;
-
- if (to + len > mtd->size)
- goto invalid_arg;
-
- to = div_s64_rem(to, mtd->writesize, &rem);
- if (rem)
- goto invalid_arg;
- to *= (mtd->writesize + mtd->oobsize);
-
- *retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf);
- return 0;
-
-invalid_arg:
- pr_err("%s: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n",
- __func__, to, len, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
-{
- s32 rem;
-
- if (ofs >= mtd->size)
- goto invalid_arg;
-
- ofs = div_s64_rem(ofs, mtd->writesize, &rem);
- if (rem)
- goto invalid_arg;
- ofs *= mtd->erasesize / mtd->writesize;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL);
-
-invalid_arg:
- pr_err("%s: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
- __func__, ofs, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- s32 rem;
-
- if (ofs >= mtd->size)
- goto invalid_arg;
-
- ofs = div_s64_rem(ofs, mtd->writesize, &rem);
- if (rem)
- goto invalid_arg;
- ofs *= mtd->erasesize / mtd->writesize;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1)
- return -EIO;
- return 0;
-
-invalid_arg:
- pr_err("%s: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
- __func__, ofs, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int nand_setup_cmd_params(struct platform_device *pdev,
- struct goldfish_nand *nand)
-{
- dma_addr_t dma_handle;
- unsigned char __iomem *base = nand->base;
-
- nand->cmd_params = dmam_alloc_coherent(&pdev->dev,
- sizeof(struct cmd_params),
- &dma_handle, GFP_KERNEL);
- if (!nand->cmd_params) {
- dev_err(&pdev->dev, "allocate buffer failed\n");
- return -ENOMEM;
- }
- writel((u32)((u64)dma_handle >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
- writel((u32)dma_handle, base + NAND_CMD_PARAMS_ADDR_LOW);
- return 0;
-}
-
-static int goldfish_nand_init_device(struct platform_device *pdev,
- struct goldfish_nand *nand, int id)
-{
- u32 name_len;
- u32 result;
- u32 flags;
- unsigned char __iomem *base = nand->base;
- struct mtd_info *mtd = &nand->mtd[id];
- char *name;
-
- mutex_lock(&nand->lock);
- writel(id, base + NAND_DEV);
- flags = readl(base + NAND_DEV_FLAGS);
- name_len = readl(base + NAND_DEV_NAME_LEN);
- mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE);
- mtd->size = readl(base + NAND_DEV_SIZE_LOW);
- mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32;
- mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE);
- mtd->oobavail = mtd->oobsize;
- mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
- (mtd->writesize + mtd->oobsize) * mtd->writesize;
- mtd->size = div_s64(mtd->size, mtd->writesize + mtd->oobsize);
- mtd->size *= mtd->writesize;
- dev_dbg(&pdev->dev,
- "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
- id, mtd->size, mtd->writesize,
- mtd->oobsize, mtd->erasesize);
- mutex_unlock(&nand->lock);
-
- mtd->priv = nand;
-
- name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
- mtd->name = name;
-
- result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len,
- name);
- if (result != name_len) {
- dev_err(&pdev->dev,
- "%s: failed to get dev name %d != %d\n",
- __func__, result, name_len);
- return -ENODEV;
- }
- ((char *)mtd->name)[name_len] = '\0';
-
- /* Setup the MTD structure */
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- if (flags & NAND_DEV_FLAG_READ_ONLY)
- mtd->flags &= ~MTD_WRITEABLE;
- if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP)
- nand_setup_cmd_params(pdev, nand);
-
- mtd->owner = THIS_MODULE;
- mtd->_erase = goldfish_nand_erase;
- mtd->_read = goldfish_nand_read;
- mtd->_write = goldfish_nand_write;
- mtd->_read_oob = goldfish_nand_read_oob;
- mtd->_write_oob = goldfish_nand_write_oob;
- mtd->_block_isbad = goldfish_nand_block_isbad;
- mtd->_block_markbad = goldfish_nand_block_markbad;
-
- if (mtd_device_register(mtd, NULL, 0))
- return -EIO;
-
- return 0;
-}
-
-static int goldfish_nand_probe(struct platform_device *pdev)
-{
- u32 num_dev;
- int i;
- int err;
- u32 num_dev_working;
- u32 version;
- struct resource *r;
- struct goldfish_nand *nand;
- unsigned char __iomem *base;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
-
- base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
- if (!base)
- return -ENOMEM;
-
- version = readl(base + NAND_VERSION);
- if (version != NAND_VERSION_CURRENT) {
- dev_err(&pdev->dev,
- "goldfish_nand_init: version mismatch, got %d, expected %d\n",
- version, NAND_VERSION_CURRENT);
- return -ENODEV;
- }
- num_dev = readl(base + NAND_NUM_DEV);
- if (num_dev == 0)
- return -ENODEV;
-
- nand = devm_kzalloc(&pdev->dev, sizeof(*nand) +
- sizeof(struct mtd_info) * num_dev, GFP_KERNEL);
- if (!nand)
- return -ENOMEM;
-
- mutex_init(&nand->lock);
- nand->base = base;
- nand->mtd_count = num_dev;
- platform_set_drvdata(pdev, nand);
-
- num_dev_working = 0;
- for (i = 0; i < num_dev; i++) {
- err = goldfish_nand_init_device(pdev, nand, i);
- if (err == 0)
- num_dev_working++;
- }
- if (num_dev_working == 0)
- return -ENODEV;
- return 0;
-}
-
-static int goldfish_nand_remove(struct platform_device *pdev)
-{
- struct goldfish_nand *nand = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < nand->mtd_count; i++) {
- if (nand->mtd[i].name)
- mtd_device_unregister(&nand->mtd[i]);
- }
- return 0;
-}
-
-static struct platform_driver goldfish_nand_driver = {
- .probe = goldfish_nand_probe,
- .remove = goldfish_nand_remove,
- .driver = {
- .name = "goldfish_nand"
- }
-};
-
-module_platform_driver(goldfish_nand_driver);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/goldfish/goldfish_nand_reg.h b/drivers/staging/goldfish/goldfish_nand_reg.h
deleted file mode 100644
index 43aeba3a4c8f..000000000000
--- a/drivers/staging/goldfish/goldfish_nand_reg.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * drivers/mtd/devices/goldfish_nand_reg.h
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef GOLDFISH_NAND_REG_H
-#define GOLDFISH_NAND_REG_H
-
-enum nand_cmd {
- /* Write device name for NAND_DEV to NAND_DATA (vaddr) */
- NAND_CMD_GET_DEV_NAME,
- NAND_CMD_READ,
- NAND_CMD_WRITE,
- NAND_CMD_ERASE,
- /* NAND_RESULT is 1 if block is bad, 0 if it is not */
- NAND_CMD_BLOCK_BAD_GET,
- NAND_CMD_BLOCK_BAD_SET,
- NAND_CMD_READ_WITH_PARAMS,
- NAND_CMD_WRITE_WITH_PARAMS,
- NAND_CMD_ERASE_WITH_PARAMS
-};
-
-enum nand_dev_flags {
- NAND_DEV_FLAG_READ_ONLY = 0x00000001,
- NAND_DEV_FLAG_CMD_PARAMS_CAP = 0x00000002,
-};
-
-#define NAND_VERSION_CURRENT (1)
-
-enum nand_reg {
- /* Global */
- NAND_VERSION = 0x000,
- NAND_NUM_DEV = 0x004,
- NAND_DEV = 0x008,
-
- /* Dev info */
- NAND_DEV_FLAGS = 0x010,
- NAND_DEV_NAME_LEN = 0x014,
- NAND_DEV_PAGE_SIZE = 0x018,
- NAND_DEV_EXTRA_SIZE = 0x01c,
- NAND_DEV_ERASE_SIZE = 0x020,
- NAND_DEV_SIZE_LOW = 0x028,
- NAND_DEV_SIZE_HIGH = 0x02c,
-
- /* Command */
- NAND_RESULT = 0x040,
- NAND_COMMAND = 0x044,
- NAND_DATA = 0x048,
- NAND_DATA_HIGH = 0x100,
- NAND_TRANSFER_SIZE = 0x04c,
- NAND_ADDR_LOW = 0x050,
- NAND_ADDR_HIGH = 0x054,
- NAND_CMD_PARAMS_ADDR_LOW = 0x058,
- NAND_CMD_PARAMS_ADDR_HIGH = 0x05c,
-};
-
-struct cmd_params {
- u32 dev;
- u32 addr_low;
- u32 addr_high;
- u32 transfer_size;
- unsigned long data;
- u32 result;
-};
-#endif
diff --git a/drivers/staging/greybus/TODO b/drivers/staging/greybus/TODO
new file mode 100644
index 000000000000..3b90a5711998
--- /dev/null
+++ b/drivers/staging/greybus/TODO
@@ -0,0 +1,5 @@
+* Convert all uses of the old GPIO API from <linux/gpio.h> to the
+ GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
+ lines from device tree or ACPI.
+* Convert the GPIO driver to use the GPIO irqchip library
+ GPIOLIB_IRQCHIP instead of reimplementing the same.
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index 83254a72a7bb..4c36e88766e7 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -315,8 +315,7 @@ static ssize_t state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
+ struct arche_platform_drvdata *arche_pdata = dev_get_drvdata(dev);
int ret = 0;
mutex_lock(&arche_pdata->platform_state_mutex);
@@ -448,7 +447,7 @@ static int arche_platform_probe(struct platform_device *pdev)
arche_pdata->svc_reset_gpio = of_get_named_gpio(np,
"svc,reset-gpio",
0);
- if (arche_pdata->svc_reset_gpio < 0) {
+ if (!gpio_is_valid(arche_pdata->svc_reset_gpio)) {
dev_err(dev, "failed to get reset-gpio\n");
return arche_pdata->svc_reset_gpio;
}
@@ -468,7 +467,7 @@ static int arche_platform_probe(struct platform_device *pdev)
arche_pdata->svc_sysboot_gpio = of_get_named_gpio(np,
"svc,sysboot-gpio",
0);
- if (arche_pdata->svc_sysboot_gpio < 0) {
+ if (!gpio_is_valid(arche_pdata->svc_sysboot_gpio)) {
dev_err(dev, "failed to get sysboot gpio\n");
return arche_pdata->svc_sysboot_gpio;
}
@@ -487,7 +486,7 @@ static int arche_platform_probe(struct platform_device *pdev)
arche_pdata->svc_refclk_req = of_get_named_gpio(np,
"svc,refclk-req-gpio",
0);
- if (arche_pdata->svc_refclk_req < 0) {
+ if (!gpio_is_valid(arche_pdata->svc_refclk_req)) {
dev_err(dev, "failed to get svc clock-req gpio\n");
return arche_pdata->svc_refclk_req;
}
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
index a1d5440552d4..4efd8b3ebe07 100644
--- a/drivers/staging/greybus/audio_codec.h
+++ b/drivers/staging/greybus/audio_codec.h
@@ -23,7 +23,10 @@ enum {
NUM_CODEC_DAIS,
};
-/* device_type should be same as defined in audio.h (Android media layer) */
+/*
+ * device_type should be same as defined in audio.h
+ * (Android media layer)
+ */
enum {
GBAUDIO_DEVICE_NONE = 0x0,
/* reserved bits */
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index de4b1b2b12f3..15e57f701630 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -996,7 +996,7 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
ret = gbaudio_validate_kcontrol_count(w);
if (ret) {
- dev_err(module->dev, "Inavlid kcontrol count=%d for %s\n",
+ dev_err(module->dev, "Invalid kcontrol count=%d for %s\n",
w->ncontrols, w->name);
return ret;
}
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index 07ebfb88db9b..341f729a9779 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -1174,11 +1174,6 @@ static int gb_camera_debugfs_init(struct gb_camera *gcam)
gcam->bundle->id);
gcam->debugfs.root = debugfs_create_dir(dirname, gb_debugfs_get());
- if (IS_ERR(gcam->debugfs.root)) {
- gcam_err(gcam, "debugfs root create failed (%ld)\n",
- PTR_ERR(gcam->debugfs.root));
- return PTR_ERR(gcam->debugfs.root);
- }
gcam->debugfs.buffers = vmalloc(sizeof(*gcam->debugfs.buffers) *
GB_CAMERA_DEBUGFS_BUFFER_MAX);
@@ -1188,18 +1183,12 @@ static int gb_camera_debugfs_init(struct gb_camera *gcam)
for (i = 0; i < ARRAY_SIZE(gb_camera_debugfs_entries); ++i) {
const struct gb_camera_debugfs_entry *entry =
&gb_camera_debugfs_entries[i];
- struct dentry *dentry;
gcam->debugfs.buffers[i].length = 0;
- dentry = debugfs_create_file(entry->name, entry->mask,
- gcam->debugfs.root, gcam,
- &gb_camera_debugfs_ops);
- if (IS_ERR(dentry)) {
- gcam_err(gcam,
- "debugfs operation %s create failed (%ld)\n",
- entry->name, PTR_ERR(dentry));
- return PTR_ERR(dentry);
+ debugfs_create_file(entry->name, entry->mask,
+ gcam->debugfs.root, gcam,
+ &gb_camera_debugfs_ops);
}
}
diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c
index a874fed761a1..a2bb7e1a3db3 100644
--- a/drivers/staging/greybus/svc.c
+++ b/drivers/staging/greybus/svc.c
@@ -1137,7 +1137,6 @@ static int gb_svc_intf_reset_recv(struct gb_operation *op)
struct gb_svc *svc = gb_connection_get_data(op->connection);
struct gb_message *request = op->request;
struct gb_svc_intf_reset_request *reset;
- u8 intf_id;
if (request->payload_size < sizeof(*reset)) {
dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
@@ -1146,8 +1145,6 @@ static int gb_svc_intf_reset_recv(struct gb_operation *op)
}
reset = request->payload;
- intf_id = reset->intf_id;
-
/* FIXME Reset the interface here */
return 0;
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index bd9445956511..aee2335a25a1 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -11,7 +11,6 @@ source "drivers/staging/iio/cdc/Kconfig"
source "drivers/staging/iio/frequency/Kconfig"
source "drivers/staging/iio/gyro/Kconfig"
source "drivers/staging/iio/impedance-analyzer/Kconfig"
-source "drivers/staging/iio/light/Kconfig"
source "drivers/staging/iio/meter/Kconfig"
source "drivers/staging/iio/resolver/Kconfig"
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index e99a375c07b9..c28d657497de 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -10,6 +10,5 @@ obj-y += cdc/
obj-y += frequency/
obj-y += gyro/
obj-y += impedance-analyzer/
-obj-y += light/
obj-y += meter/
obj-y += resolver/
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
index 4922402e2e98..1b8ebf2c1b69 100644
--- a/drivers/staging/iio/TODO
+++ b/drivers/staging/iio/TODO
@@ -1,4 +1,11 @@
-2016 10/09
+2018-04-15
+
+All affected drivers:
+Convert all uses of the old GPIO API from <linux/gpio.h> to the
+GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
+lines from device tree, ACPI or board files, board files should
+use <linux/gpio/machine.h>.
+
ADI Drivers:
CC the device-drivers-devel@blackfin.uclinux.org mailing list when
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index c6b0f5eae7ab..befbbfe911c2 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -3,18 +3,6 @@
#
menu "Accelerometers"
-config ADIS16201
- tristate "Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16201 dual-axis
- digital inclinometer and accelerometer.
-
- To compile this driver as a module, say M here: the module will
- be called adis16201.
-
config ADIS16203
tristate "Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer"
depends on SPI
@@ -27,18 +15,6 @@ config ADIS16203
To compile this driver as a module, say M here: the module will be
called adis16203.
-config ADIS16209
- tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16209 dual-axis digital inclinometer
- and accelerometer.
-
- To compile this driver as a module, say M here: the module will be
- called adis16209.
-
config ADIS16240
tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
depends on SPI
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 5d8ad21a0dae..773212e0c859 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -2,7 +2,5 @@
# Makefile for industrial I/O accelerometer drivers
#
-obj-$(CONFIG_ADIS16201) += adis16201.o
obj-$(CONFIG_ADIS16203) += adis16203.o
-obj-$(CONFIG_ADIS16209) += adis16209.o
obj-$(CONFIG_ADIS16240) += adis16240.o
diff --git a/drivers/staging/iio/accel/adis16201.c b/drivers/staging/iio/accel/adis16201.c
deleted file mode 100644
index 0fae8aaf1cf4..000000000000
--- a/drivers/staging/iio/accel/adis16201.c
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#define ADIS16201_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16201_FLASH_CNT 0x00
-
-/* Output, power supply */
-#define ADIS16201_SUPPLY_OUT 0x02
-
-/* Output, x-axis accelerometer */
-#define ADIS16201_XACCL_OUT 0x04
-
-/* Output, y-axis accelerometer */
-#define ADIS16201_YACCL_OUT 0x06
-
-/* Output, auxiliary ADC input */
-#define ADIS16201_AUX_ADC 0x08
-
-/* Output, temperature */
-#define ADIS16201_TEMP_OUT 0x0A
-
-/* Output, x-axis inclination */
-#define ADIS16201_XINCL_OUT 0x0C
-
-/* Output, y-axis inclination */
-#define ADIS16201_YINCL_OUT 0x0E
-
-/* Calibration, x-axis acceleration offset */
-#define ADIS16201_XACCL_OFFS 0x10
-
-/* Calibration, y-axis acceleration offset */
-#define ADIS16201_YACCL_OFFS 0x12
-
-/* x-axis acceleration scale factor */
-#define ADIS16201_XACCL_SCALE 0x14
-
-/* y-axis acceleration scale factor */
-#define ADIS16201_YACCL_SCALE 0x16
-
-/* Calibration, x-axis inclination offset */
-#define ADIS16201_XINCL_OFFS 0x18
-
-/* Calibration, y-axis inclination offset */
-#define ADIS16201_YINCL_OFFS 0x1A
-
-/* x-axis inclination scale factor */
-#define ADIS16201_XINCL_SCALE 0x1C
-
-/* y-axis inclination scale factor */
-#define ADIS16201_YINCL_SCALE 0x1E
-
-/* Alarm 1 amplitude threshold */
-#define ADIS16201_ALM_MAG1 0x20
-
-/* Alarm 2 amplitude threshold */
-#define ADIS16201_ALM_MAG2 0x22
-
-/* Alarm 1, sample period */
-#define ADIS16201_ALM_SMPL1 0x24
-
-/* Alarm 2, sample period */
-#define ADIS16201_ALM_SMPL2 0x26
-
-/* Alarm control */
-#define ADIS16201_ALM_CTRL 0x28
-
-/* Auxiliary DAC data */
-#define ADIS16201_AUX_DAC 0x30
-
-/* General-purpose digital input/output control */
-#define ADIS16201_GPIO_CTRL 0x32
-
-/* Miscellaneous control */
-#define ADIS16201_MSC_CTRL 0x34
-
-/* Internal sample period (rate) control */
-#define ADIS16201_SMPL_PRD 0x36
-
-/* Operation, filter configuration */
-#define ADIS16201_AVG_CNT 0x38
-
-/* Operation, sleep mode control */
-#define ADIS16201_SLP_CNT 0x3A
-
-/* Diagnostics, system status register */
-#define ADIS16201_DIAG_STAT 0x3C
-
-/* Operation, system command register */
-#define ADIS16201_GLOB_CMD 0x3E
-
-/* MSC_CTRL */
-
-/* Self-test enable */
-#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8)
-
-/* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2)
-
-/* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1)
-
-/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
-#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
-
-/* DIAG_STAT */
-
-/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_ALARM2 BIT(9)
-
-/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_ALARM1 BIT(8)
-
-/* SPI communications failure */
-#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3
-
-/* Flash update failure */
-#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2
-
-/* Power supply above 3.625 V */
-#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1
-
-/* Power supply below 3.15 V */
-#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-
-#define ADIS16201_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16201_GLOB_CMD_FACTORY_CAL BIT(1)
-
-#define ADIS16201_ERROR_ACTIVE BIT(14)
-
-enum adis16201_scan {
- ADIS16201_SCAN_ACC_X,
- ADIS16201_SCAN_ACC_Y,
- ADIS16201_SCAN_INCLI_X,
- ADIS16201_SCAN_INCLI_Y,
- ADIS16201_SCAN_SUPPLY,
- ADIS16201_SCAN_AUX_ADC,
- ADIS16201_SCAN_TEMP,
-};
-
-static const u8 adis16201_addresses[] = {
- [ADIS16201_SCAN_ACC_X] = ADIS16201_XACCL_OFFS,
- [ADIS16201_SCAN_ACC_Y] = ADIS16201_YACCL_OFFS,
- [ADIS16201_SCAN_INCLI_X] = ADIS16201_XINCL_OFFS,
- [ADIS16201_SCAN_INCLI_Y] = ADIS16201_YINCL_OFFS,
-};
-
-static int adis16201_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16201_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220000; /* 1.22 mV */
- } else {
- *val = 0;
- *val2 = 610000; /* 0.610 mV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = -470; /* 0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(462400); /* 0.4624 mg */
- return IIO_VAL_INT_PLUS_NANO;
- case IIO_INCLI:
- *val = 0;
- *val2 = 100000; /* 0.1 degree */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 12;
- break;
- case IIO_INCLI:
- bits = 9;
- break;
- default:
- return -EINVAL;
- }
- addr = adis16201_addresses[chan->scan_index];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret)
- return ret;
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- return IIO_VAL_INT;
- }
-
- return -EINVAL;
-}
-
-static int adis16201_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int bits;
- s16 val16;
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 12;
- break;
- case IIO_INCLI:
- bits = 9;
- break;
- default:
- return -EINVAL;
- }
- val16 = val & ((1 << bits) - 1);
- addr = adis16201_addresses[chan->scan_index];
- return adis_write_reg_16(st, addr, val16);
- }
-
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16201_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16201_SUPPLY_OUT, ADIS16201_SCAN_SUPPLY, 0, 12),
- ADIS_TEMP_CHAN(ADIS16201_TEMP_OUT, ADIS16201_SCAN_TEMP, 0, 12),
- ADIS_ACCEL_CHAN(X, ADIS16201_XACCL_OUT, ADIS16201_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_ACCEL_CHAN(Y, ADIS16201_YACCL_OUT, ADIS16201_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC, ADIS16201_SCAN_AUX_ADC, 0, 12),
- ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT, ADIS16201_SCAN_INCLI_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT, ADIS16201_SCAN_INCLI_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- IIO_CHAN_SOFT_TIMESTAMP(7)
-};
-
-static const struct iio_info adis16201_info = {
- .read_raw = adis16201_read_raw,
- .write_raw = adis16201_write_raw,
- .update_scan_mode = adis_update_scan_mode,
-};
-
-static const char * const adis16201_status_error_msgs[] = {
- [ADIS16201_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16201_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16201_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16201_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
-};
-
-static const struct adis_data adis16201_data = {
- .read_delay = 20,
- .msc_ctrl_reg = ADIS16201_MSC_CTRL,
- .glob_cmd_reg = ADIS16201_GLOB_CMD,
- .diag_stat_reg = ADIS16201_DIAG_STAT,
-
- .self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
- .self_test_no_autoclear = true,
- .startup_delay = ADIS16201_STARTUP_DELAY,
-
- .status_error_msgs = adis16201_status_error_msgs,
- .status_error_mask = BIT(ADIS16201_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16201_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16201_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16201_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16201_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16201_info;
-
- indio_dev->channels = adis16201_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16201_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16201_data);
- if (ret)
- return ret;
-
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16201_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16201_driver = {
- .driver = {
- .name = "adis16201",
- },
- .probe = adis16201_probe,
- .remove = adis16201_remove,
-};
-module_spi_driver(adis16201_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16201");
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index 3eb6f8f312dd..a34c2a1d5373 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -18,8 +18,7 @@
static int ad7606_par16_read_block(struct device *dev,
int count, void *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
insw((unsigned long)st->base_address, buf, count);
@@ -34,8 +33,7 @@ static const struct ad7606_bus_ops ad7606_par16_bops = {
static int ad7606_par8_read_block(struct device *dev,
int count, void *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
insb((unsigned long)st->base_address, buf, count * 2);
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index a7797af579b9..16d72072c076 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -128,7 +128,7 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
};
#define AD7780_CHANNEL(bits, wordsize) \
- AD_SD_CHANNEL(1, 0, 0, bits, 32, wordsize - bits)
+ AD_SD_CHANNEL_NO_SAMP_FREQ(1, 0, 0, bits, 32, wordsize - bits)
static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
[ID_AD7170] = {
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 4882dbc81c53..f53612a6461d 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -6,15 +6,15 @@
* Licensed under the GPL-2.
*/
-#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/i2c.h>
-#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/sysfs.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -220,8 +220,8 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
struct ad7746_chip_info *chip = iio_priv(indio_dev);
- int ret, delay, idx;
u8 vt_setup, cap_setup;
+ int ret, delay, idx;
switch (chan->type) {
case IIO_CAPACITANCE:
@@ -289,8 +289,8 @@ static inline ssize_t ad7746_start_calib(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7746_chip_info *chip = iio_priv(indio_dev);
- bool doit;
int ret, timeout = 10;
+ bool doit;
ret = strtobool(buf, &doit);
if (ret < 0)
@@ -410,8 +410,7 @@ static struct attribute *ad7746_attributes[] = {
&iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_voltage0_calibscale_calibration.dev_attr.attr,
&iio_const_attr_in_voltage_sampling_frequency_available.dev_attr.attr,
- &iio_const_attr_in_capacitance_sampling_frequency_available.
- dev_attr.attr,
+ &iio_const_attr_in_capacitance_sampling_frequency_available.dev_attr.attr,
NULL,
};
@@ -451,26 +450,26 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
goto out;
}
- ret = i2c_smbus_write_word_data(chip->client, reg, swab16(val));
+ ret = i2c_smbus_write_word_swapped(chip->client, reg, val);
if (ret < 0)
goto out;
ret = 0;
break;
case IIO_CHAN_INFO_CALIBBIAS:
- if ((val < 0) | (val > 0xFFFF)) {
+ if (val < 0 || val > 0xFFFF) {
ret = -EINVAL;
goto out;
}
- ret = i2c_smbus_write_word_data(chip->client,
- AD7746_REG_CAP_OFFH, swab16(val));
+ ret = i2c_smbus_write_word_swapped(chip->client,
+ AD7746_REG_CAP_OFFH, val);
if (ret < 0)
goto out;
ret = 0;
break;
case IIO_CHAN_INFO_OFFSET:
- if ((val < 0) | (val > 43008000)) { /* 21pF */
+ if (val < 0 || val > 43008000) { /* 21pF */
ret = -EINVAL;
goto out;
}
@@ -556,7 +555,8 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
/* Now read the actual register */
ret = i2c_smbus_read_i2c_block_data(chip->client,
- chan->address >> 8, 3, &chip->data.d8[1]);
+ chan->address >> 8, 3,
+ &chip->data.d8[1]);
if (ret < 0)
goto out;
@@ -594,27 +594,27 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
goto out;
}
- ret = i2c_smbus_read_word_data(chip->client, reg);
+ ret = i2c_smbus_read_word_swapped(chip->client, reg);
if (ret < 0)
goto out;
/* 1 + gain_val / 2^16 */
*val = 1;
- *val2 = (15625 * swab16(ret)) / 1024;
+ *val2 = (15625 * ret) / 1024;
ret = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_CALIBBIAS:
- ret = i2c_smbus_read_word_data(chip->client,
- AD7746_REG_CAP_OFFH);
+ ret = i2c_smbus_read_word_swapped(chip->client,
+ AD7746_REG_CAP_OFFH);
if (ret < 0)
goto out;
- *val = swab16(ret);
+ *val = ret;
ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_OFFSET:
*val = AD7746_CAPDAC_DACP(chip->capdac[chan->channel]
- [chan->differential]) * 338646;
+ [chan->differential]) * 338646;
ret = IIO_VAL_INT;
break;
@@ -680,8 +680,8 @@ static int ad7746_probe(struct i2c_client *client,
struct ad7746_platform_data *pdata = client->dev.platform_data;
struct ad7746_chip_info *chip;
struct iio_dev *indio_dev;
- int ret = 0;
unsigned char regval = 0;
+ int ret = 0;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
deleted file mode 100644
index aacb0ae58c0e..000000000000
--- a/drivers/staging/iio/light/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Light sensors
-#
-menu "Light sensors"
-
-config TSL2x7x
- tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors"
- depends on I2C
- help
- Support for: tsl2571, tsl2671, tmd2671, tsl2771, tmd2771, tsl2572, tsl2672,
- tmd2672, tsl2772, tmd2772 devices.
- Provides iio_events and direct access via sysfs.
-
-endmenu
diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile
deleted file mode 100644
index ab8dc3a3d10b..000000000000
--- a/drivers/staging/iio/light/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for industrial I/O Light sensors
-#
-
-obj-$(CONFIG_TSL2x7x) += tsl2x7x.o
diff --git a/drivers/staging/iio/light/tsl2x7x.c b/drivers/staging/iio/light/tsl2x7x.c
deleted file mode 100644
index 82681300e106..000000000000
--- a/drivers/staging/iio/light/tsl2x7x.c
+++ /dev/null
@@ -1,1889 +0,0 @@
-/*
- * Device driver for monitoring ambient light intensity in (lux)
- * and proximity detection (prox) within the TAOS TSL2X7X family of devices.
- *
- * Copyright (c) 2012, TAOS Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/iio/events.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include "tsl2x7x.h"
-
-/* Cal defs*/
-#define PROX_STAT_CAL 0
-#define PROX_STAT_SAMP 1
-#define MAX_SAMPLES_CAL 200
-
-/* TSL2X7X Device ID */
-#define TRITON_ID 0x00
-#define SWORDFISH_ID 0x30
-#define HALIBUT_ID 0x20
-
-/* Lux calculation constants */
-#define TSL2X7X_LUX_CALC_OVER_FLOW 65535
-
-/* TAOS Register definitions - note:
- * depending on device, some of these register are not used and the
- * register address is benign.
- */
-/* 2X7X register offsets */
-#define TSL2X7X_MAX_CONFIG_REG 16
-
-/* Device Registers and Masks */
-#define TSL2X7X_CNTRL 0x00
-#define TSL2X7X_ALS_TIME 0X01
-#define TSL2X7X_PRX_TIME 0x02
-#define TSL2X7X_WAIT_TIME 0x03
-#define TSL2X7X_ALS_MINTHRESHLO 0X04
-#define TSL2X7X_ALS_MINTHRESHHI 0X05
-#define TSL2X7X_ALS_MAXTHRESHLO 0X06
-#define TSL2X7X_ALS_MAXTHRESHHI 0X07
-#define TSL2X7X_PRX_MINTHRESHLO 0X08
-#define TSL2X7X_PRX_MINTHRESHHI 0X09
-#define TSL2X7X_PRX_MAXTHRESHLO 0X0A
-#define TSL2X7X_PRX_MAXTHRESHHI 0X0B
-#define TSL2X7X_PERSISTENCE 0x0C
-#define TSL2X7X_PRX_CONFIG 0x0D
-#define TSL2X7X_PRX_COUNT 0x0E
-#define TSL2X7X_GAIN 0x0F
-#define TSL2X7X_NOTUSED 0x10
-#define TSL2X7X_REVID 0x11
-#define TSL2X7X_CHIPID 0x12
-#define TSL2X7X_STATUS 0x13
-#define TSL2X7X_ALS_CHAN0LO 0x14
-#define TSL2X7X_ALS_CHAN0HI 0x15
-#define TSL2X7X_ALS_CHAN1LO 0x16
-#define TSL2X7X_ALS_CHAN1HI 0x17
-#define TSL2X7X_PRX_LO 0x18
-#define TSL2X7X_PRX_HI 0x19
-
-/* tsl2X7X cmd reg masks */
-#define TSL2X7X_CMD_REG 0x80
-#define TSL2X7X_CMD_SPL_FN 0x60
-
-#define TSL2X7X_CMD_PROX_INT_CLR 0X05
-#define TSL2X7X_CMD_ALS_INT_CLR 0x06
-#define TSL2X7X_CMD_PROXALS_INT_CLR 0X07
-
-/* tsl2X7X cntrl reg masks */
-#define TSL2X7X_CNTL_ADC_ENBL 0x02
-#define TSL2X7X_CNTL_PWR_ON 0x01
-
-/* tsl2X7X status reg masks */
-#define TSL2X7X_STA_ADC_VALID 0x01
-#define TSL2X7X_STA_PRX_VALID 0x02
-#define TSL2X7X_STA_ADC_PRX_VALID (TSL2X7X_STA_ADC_VALID | \
- TSL2X7X_STA_PRX_VALID)
-#define TSL2X7X_STA_ALS_INTR 0x10
-#define TSL2X7X_STA_PRX_INTR 0x20
-
-/* tsl2X7X cntrl reg masks */
-#define TSL2X7X_CNTL_REG_CLEAR 0x00
-#define TSL2X7X_CNTL_PROX_INT_ENBL 0X20
-#define TSL2X7X_CNTL_ALS_INT_ENBL 0X10
-#define TSL2X7X_CNTL_WAIT_TMR_ENBL 0X08
-#define TSL2X7X_CNTL_PROX_DET_ENBL 0X04
-#define TSL2X7X_CNTL_PWRON 0x01
-#define TSL2X7X_CNTL_ALSPON_ENBL 0x03
-#define TSL2X7X_CNTL_INTALSPON_ENBL 0x13
-#define TSL2X7X_CNTL_PROXPON_ENBL 0x0F
-#define TSL2X7X_CNTL_INTPROXPON_ENBL 0x2F
-
-/*Prox diode to use */
-#define TSL2X7X_DIODE0 0x01
-#define TSL2X7X_DIODE1 0x02
-#define TSL2X7X_DIODE_BOTH 0x03
-
-/* LED Power */
-#define TSL2X7X_100_mA 0x00
-#define TSL2X7X_50_mA 0x01
-#define TSL2X7X_25_mA 0x02
-#define TSL2X7X_13_mA 0x03
-#define TSL2X7X_MAX_TIMER_CNT 0xFF
-
-#define TSL2X7X_MIN_ITIME 3
-
-/* TAOS txx2x7x Device family members */
-enum {
- tsl2571,
- tsl2671,
- tmd2671,
- tsl2771,
- tmd2771,
- tsl2572,
- tsl2672,
- tmd2672,
- tsl2772,
- tmd2772
-};
-
-enum {
- TSL2X7X_CHIP_UNKNOWN = 0,
- TSL2X7X_CHIP_WORKING = 1,
- TSL2X7X_CHIP_SUSPENDED = 2
-};
-
-/* Per-device data */
-struct tsl2x7x_als_info {
- u16 als_ch0;
- u16 als_ch1;
- u16 lux;
-};
-
-struct tsl2x7x_prox_stat {
- int min;
- int max;
- int mean;
- unsigned long stddev;
-};
-
-struct tsl2x7x_chip_info {
- int chan_table_elements;
- struct iio_chan_spec channel[4];
- const struct iio_info *info;
-};
-
-struct tsl2X7X_chip {
- kernel_ulong_t id;
- struct mutex prox_mutex;
- struct mutex als_mutex;
- struct i2c_client *client;
- u16 prox_data;
- struct tsl2x7x_als_info als_cur_info;
- struct tsl2x7x_settings settings;
- struct tsl2X7X_platform_data *pdata;
- int als_time_scale;
- int als_saturation;
- int tsl2x7x_chip_status;
- u8 tsl2x7x_config[TSL2X7X_MAX_CONFIG_REG];
- const struct tsl2x7x_chip_info *chip_info;
- const struct iio_info *info;
- s64 event_timestamp;
- /*
- * This structure is intentionally large to accommodate
- * updates via sysfs.
- * Sized to 9 = max 8 segments + 1 termination segment
- */
- struct tsl2x7x_lux tsl2x7x_device_lux[TSL2X7X_MAX_LUX_TABLE_SIZE];
-};
-
-/* Different devices require different coefficents */
-static const struct tsl2x7x_lux tsl2x71_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
- { 14461, 611, 1211 },
- { 18540, 352, 623 },
- { 0, 0, 0 },
-};
-
-static const struct tsl2x7x_lux tmd2x71_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
- { 11635, 115, 256 },
- { 15536, 87, 179 },
- { 0, 0, 0 },
-};
-
-static const struct tsl2x7x_lux tsl2x72_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
- { 14013, 466, 917 },
- { 18222, 310, 552 },
- { 0, 0, 0 },
-};
-
-static const struct tsl2x7x_lux tmd2x72_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
- { 13218, 130, 262 },
- { 17592, 92, 169 },
- { 0, 0, 0 },
-};
-
-static const struct tsl2x7x_lux *tsl2x7x_default_lux_table_group[] = {
- [tsl2571] = tsl2x71_lux_table,
- [tsl2671] = tsl2x71_lux_table,
- [tmd2671] = tmd2x71_lux_table,
- [tsl2771] = tsl2x71_lux_table,
- [tmd2771] = tmd2x71_lux_table,
- [tsl2572] = tsl2x72_lux_table,
- [tsl2672] = tsl2x72_lux_table,
- [tmd2672] = tmd2x72_lux_table,
- [tsl2772] = tsl2x72_lux_table,
- [tmd2772] = tmd2x72_lux_table,
-};
-
-static const struct tsl2x7x_settings tsl2x7x_default_settings = {
- .als_time = 219, /* 101 ms */
- .als_gain = 0,
- .prx_time = 254, /* 5.4 ms */
- .prox_gain = 0,
- .wait_time = 245,
- .prox_config = 0,
- .als_gain_trim = 1000,
- .als_cal_target = 150,
- .als_thresh_low = 200,
- .als_thresh_high = 256,
- .persistence = 255,
- .interrupts_en = 0,
- .prox_thres_low = 0,
- .prox_thres_high = 512,
- .prox_max_samples_cal = 30,
- .prox_pulse_count = 8,
- .prox_diode = TSL2X7X_DIODE1,
- .prox_power = TSL2X7X_100_mA
-};
-
-static const s16 tsl2x7x_als_gain[] = {
- 1,
- 8,
- 16,
- 120
-};
-
-static const s16 tsl2x7x_prx_gain[] = {
- 1,
- 2,
- 4,
- 8
-};
-
-/* Channel variations */
-enum {
- ALS,
- PRX,
- ALSPRX,
- PRX2,
- ALSPRX2,
-};
-
-static const u8 device_channel_config[] = {
- ALS,
- PRX,
- PRX,
- ALSPRX,
- ALSPRX,
- ALS,
- PRX2,
- PRX2,
- ALSPRX2,
- ALSPRX2
-};
-
-static int tsl2x7x_clear_interrupts(struct tsl2X7X_chip *chip, int reg)
-{
- int ret;
-
- ret = i2c_smbus_write_byte(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN | reg);
- if (ret < 0)
- dev_err(&chip->client->dev,
- "%s: failed to clear interrupt status %x: %d\n",
- __func__, reg, ret);
-
- return ret;
-}
-
-static int tsl2x7x_read_status(struct tsl2X7X_chip *chip)
-{
- int ret;
-
- ret = i2c_smbus_read_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_STATUS);
- if (ret < 0)
- dev_err(&chip->client->dev,
- "%s: failed to read STATUS register: %d\n", __func__,
- ret);
-
- return ret;
-}
-
-static int tsl2x7x_write_control_reg(struct tsl2X7X_chip *chip, u8 data)
-{
- int ret;
-
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, data);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "%s: failed to write to control register %x: %d\n",
- __func__, data, ret);
- }
-
- return ret;
-}
-
-/**
- * tsl2x7x_get_lux() - Reads and calculates current lux value.
- * @indio_dev: pointer to IIO device
- *
- * The raw ch0 and ch1 values of the ambient light sensed in the last
- * integration cycle are read from the device.
- * Time scale factor array values are adjusted based on the integration time.
- * The raw values are multiplied by a scale factor, and device gain is obtained
- * using gain index. Limit checks are done next, then the ratio of a multiple
- * of ch1 value, to the ch0 value, is calculated. Array tsl2x7x_device_lux[]
- * is then scanned to find the first ratio value that is just above the ratio
- * we just calculated. The ch0 and ch1 multiplier constants in the array are
- * then used along with the time scale factor array values, to calculate the
- * lux.
- */
-static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
-{
- u16 ch0, ch1; /* separated ch0/ch1 data from device */
- u32 lux; /* raw lux calculated from device data */
- u64 lux64;
- u32 ratio;
- u8 buf[4];
- struct tsl2x7x_lux *p;
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int i, ret;
- u32 ch0lux = 0;
- u32 ch1lux = 0;
-
- mutex_lock(&chip->als_mutex);
-
- if (chip->tsl2x7x_chip_status != TSL2X7X_CHIP_WORKING) {
- /* device is not enabled */
- dev_err(&chip->client->dev, "%s: device is not enabled\n",
- __func__);
- ret = -EBUSY;
- goto out_unlock;
- }
-
- ret = tsl2x7x_read_status(chip);
- if (ret < 0)
- goto out_unlock;
-
- /* is data new & valid */
- if (!(ret & TSL2X7X_STA_ADC_VALID)) {
- dev_err(&chip->client->dev,
- "%s: data not valid yet\n", __func__);
- ret = chip->als_cur_info.lux; /* return LAST VALUE */
- goto out_unlock;
- }
-
- for (i = 0; i < 4; i++) {
- int reg = TSL2X7X_CMD_REG | (TSL2X7X_ALS_CHAN0LO + i);
-
- ret = i2c_smbus_read_byte_data(chip->client, reg);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "failed to read. err=%x\n", ret);
- goto out_unlock;
- }
-
- buf[i] = ret;
- }
-
- ret = tsl2x7x_clear_interrupts(chip, TSL2X7X_CMD_ALS_INT_CLR);
- if (ret < 0)
- goto out_unlock;
-
- /* extract ALS/lux data */
- ch0 = le16_to_cpup((const __le16 *)&buf[0]);
- ch1 = le16_to_cpup((const __le16 *)&buf[2]);
-
- chip->als_cur_info.als_ch0 = ch0;
- chip->als_cur_info.als_ch1 = ch1;
-
- if (ch0 >= chip->als_saturation || ch1 >= chip->als_saturation) {
- lux = TSL2X7X_LUX_CALC_OVER_FLOW;
- goto return_max;
- }
-
- if (!ch0) {
- /* have no data, so return LAST VALUE */
- ret = chip->als_cur_info.lux;
- goto out_unlock;
- }
- /* calculate ratio */
- ratio = (ch1 << 15) / ch0;
- /* convert to unscaled lux using the pointer to the table */
- p = (struct tsl2x7x_lux *)chip->tsl2x7x_device_lux;
- while (p->ratio != 0 && p->ratio < ratio)
- p++;
-
- if (p->ratio == 0) {
- lux = 0;
- } else {
- lux = DIV_ROUND_UP(ch0 * p->ch0,
- tsl2x7x_als_gain[chip->settings.als_gain]) -
- DIV_ROUND_UP(ch1 * p->ch1,
- tsl2x7x_als_gain[chip->settings.als_gain]);
- }
-
- /* note: lux is 31 bit max at this point */
- if (ch1lux > ch0lux) {
- dev_dbg(&chip->client->dev, "ch1lux > ch0lux-return last value\n");
- ret = chip->als_cur_info.lux;
- goto out_unlock;
- }
-
- /* adjust for active time scale */
- if (chip->als_time_scale == 0)
- lux = 0;
- else
- lux = (lux + (chip->als_time_scale >> 1)) /
- chip->als_time_scale;
-
- /* adjust for active gain scale
- * The tsl2x7x_device_lux tables have a factor of 256 built-in.
- * User-specified gain provides a multiplier.
- * Apply user-specified gain before shifting right to retain precision.
- * Use 64 bits to avoid overflow on multiplication.
- * Then go back to 32 bits before division to avoid using div_u64().
- */
-
- lux64 = lux;
- lux64 = lux64 * chip->settings.als_gain_trim;
- lux64 >>= 8;
- lux = lux64;
- lux = (lux + 500) / 1000;
-
- if (lux > TSL2X7X_LUX_CALC_OVER_FLOW) /* check for overflow */
- lux = TSL2X7X_LUX_CALC_OVER_FLOW;
-
- /* Update the structure with the latest lux. */
-return_max:
- chip->als_cur_info.lux = lux;
- ret = lux;
-
-out_unlock:
- mutex_unlock(&chip->als_mutex);
-
- return ret;
-}
-
-/**
- * tsl2x7x_get_prox() - Reads proximity data registers and updates
- * chip->prox_data.
- *
- * @indio_dev: pointer to IIO device
- */
-static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
-{
- int i;
- int ret;
- u8 chdata[2];
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-
- mutex_lock(&chip->prox_mutex);
-
- ret = tsl2x7x_read_status(chip);
- if (ret < 0)
- goto prox_poll_err;
-
- switch (chip->id) {
- case tsl2571:
- case tsl2671:
- case tmd2671:
- case tsl2771:
- case tmd2771:
- if (!(ret & TSL2X7X_STA_ADC_VALID)) {
- ret = -EINVAL;
- goto prox_poll_err;
- }
- break;
- case tsl2572:
- case tsl2672:
- case tmd2672:
- case tsl2772:
- case tmd2772:
- if (!(ret & TSL2X7X_STA_PRX_VALID)) {
- ret = -EINVAL;
- goto prox_poll_err;
- }
- break;
- }
-
- for (i = 0; i < 2; i++) {
- int reg = TSL2X7X_CMD_REG | (TSL2X7X_PRX_LO + i);
-
- ret = i2c_smbus_read_byte_data(chip->client, reg);
- if (ret < 0)
- goto prox_poll_err;
-
- chdata[i] = ret;
- }
-
- chip->prox_data = le16_to_cpup((const __le16 *)&chdata[0]);
- ret = chip->prox_data;
-
-prox_poll_err:
- mutex_unlock(&chip->prox_mutex);
-
- return ret;
-}
-
-/**
- * tsl2x7x_defaults() - Populates the device nominal operating parameters
- * with those provided by a 'platform' data struct or
- * with prefined defaults.
- *
- * @chip: pointer to device structure.
- */
-static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
-{
- /* If Operational settings defined elsewhere.. */
- if (chip->pdata && chip->pdata->platform_default_settings)
- memcpy(&chip->settings, chip->pdata->platform_default_settings,
- sizeof(tsl2x7x_default_settings));
- else
- memcpy(&chip->settings, &tsl2x7x_default_settings,
- sizeof(tsl2x7x_default_settings));
-
- /* Load up the proper lux table. */
- if (chip->pdata && chip->pdata->platform_lux_table[0].ratio != 0)
- memcpy(chip->tsl2x7x_device_lux,
- chip->pdata->platform_lux_table,
- sizeof(chip->pdata->platform_lux_table));
- else
- memcpy(chip->tsl2x7x_device_lux,
- tsl2x7x_default_lux_table_group[chip->id],
- TSL2X7X_DEFAULT_TABLE_BYTES);
-}
-
-/**
- * tsl2x7x_als_calibrate() - Obtain single reading and calculate
- * the als_gain_trim.
- *
- * @indio_dev: pointer to IIO device
- */
-static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret, lux_val;
-
- ret = i2c_smbus_read_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "%s: failed to read from the CNTRL register\n",
- __func__);
- return ret;
- }
-
- if ((ret & (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON))
- != (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON)) {
- dev_err(&chip->client->dev,
- "%s: Device is not powered on and/or ADC is not enabled\n",
- __func__);
- return -EINVAL;
- } else if ((ret & TSL2X7X_STA_ADC_VALID) != TSL2X7X_STA_ADC_VALID) {
- dev_err(&chip->client->dev,
- "%s: The two ADC channels have not completed an integration cycle\n",
- __func__);
- return -ENODATA;
- }
-
- lux_val = tsl2x7x_get_lux(indio_dev);
- if (lux_val < 0) {
- dev_err(&chip->client->dev,
- "%s: failed to get lux\n", __func__);
- return lux_val;
- }
-
- ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) /
- lux_val;
- if (ret < 250 || ret > 4000)
- return -ERANGE;
-
- chip->settings.als_gain_trim = ret;
- dev_info(&chip->client->dev,
- "%s als_calibrate completed\n", chip->client->name);
-
- return ret;
-}
-
-static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
-{
- int i;
- int ret = 0;
- u8 *dev_reg;
- int als_count;
- int als_time;
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- u8 reg_val = 0;
-
- /* Non calculated parameters */
- chip->tsl2x7x_config[TSL2X7X_PRX_TIME] = chip->settings.prx_time;
- chip->tsl2x7x_config[TSL2X7X_WAIT_TIME] = chip->settings.wait_time;
- chip->tsl2x7x_config[TSL2X7X_PRX_CONFIG] = chip->settings.prox_config;
-
- chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHLO] =
- (chip->settings.als_thresh_low) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHHI] =
- (chip->settings.als_thresh_low >> 8) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHLO] =
- (chip->settings.als_thresh_high) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHHI] =
- (chip->settings.als_thresh_high >> 8) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_PERSISTENCE] = chip->settings.persistence;
-
- chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
- chip->settings.prox_pulse_count;
- chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
- (chip->settings.prox_thres_low) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
- (chip->settings.prox_thres_low >> 8) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
- (chip->settings.prox_thres_high) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
- (chip->settings.prox_thres_high >> 8) & 0xFF;
-
- /* and make sure we're not already on */
- if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
- /* if forcing a register update - turn off, then on */
- dev_info(&chip->client->dev, "device is already enabled\n");
- return -EINVAL;
- }
-
- /* determine als integration register */
- als_count = (chip->settings.als_time * 100 + 135) / 270;
- if (!als_count)
- als_count = 1; /* ensure at least one cycle */
-
- /* convert back to time (encompasses overrides) */
- als_time = (als_count * 27 + 5) / 10;
- chip->tsl2x7x_config[TSL2X7X_ALS_TIME] = 256 - als_count;
-
- /* Set the gain based on tsl2x7x_settings struct */
- chip->tsl2x7x_config[TSL2X7X_GAIN] =
- (chip->settings.als_gain & 0xFF) |
- ((chip->settings.prox_gain & 0xFF) << 2) |
- (chip->settings.prox_diode << 4) |
- (chip->settings.prox_power << 6);
-
- /* set chip struct re scaling and saturation */
- chip->als_saturation = als_count * 922; /* 90% of full scale */
- chip->als_time_scale = (als_time + 25) / 50;
-
- /*
- * TSL2X7X Specific power-on / adc enable sequence
- * Power on the device 1st.
- */
- ret = tsl2x7x_write_control_reg(chip, TSL2X7X_CNTL_PWR_ON);
- if (ret < 0)
- return ret;
-
- /*
- * Use the following shadow copy for our delay before enabling ADC.
- * Write all the registers.
- */
- for (i = 0, dev_reg = chip->tsl2x7x_config;
- i < TSL2X7X_MAX_CONFIG_REG; i++) {
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG + i,
- *dev_reg++);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "failed on write to reg %d.\n", i);
- return ret;
- }
- }
-
- /* Power-on settling time */
- usleep_range(3000, 3500);
-
- /*
- * NOW enable the ADC
- * initialize the desired mode of operation
- */
- ret = tsl2x7x_write_control_reg(chip,
- TSL2X7X_CNTL_PWR_ON |
- TSL2X7X_CNTL_ADC_ENBL |
- TSL2X7X_CNTL_PROX_DET_ENBL);
- if (ret < 0)
- return ret;
-
- chip->tsl2x7x_chip_status = TSL2X7X_CHIP_WORKING;
-
- if (chip->settings.interrupts_en != 0) {
- dev_info(&chip->client->dev, "Setting Up Interrupt(s)\n");
-
- reg_val = TSL2X7X_CNTL_PWR_ON | TSL2X7X_CNTL_ADC_ENBL;
- if (chip->settings.interrupts_en == 0x20 ||
- chip->settings.interrupts_en == 0x30)
- reg_val |= TSL2X7X_CNTL_PROX_DET_ENBL;
-
- reg_val |= chip->settings.interrupts_en;
- ret = tsl2x7x_write_control_reg(chip, reg_val);
- if (ret < 0)
- return ret;
-
- ret = tsl2x7x_clear_interrupts(chip,
- TSL2X7X_CMD_PROXALS_INT_CLR);
- if (ret < 0)
- return ret;
- }
-
- return ret;
-}
-
-static int tsl2x7x_chip_off(struct iio_dev *indio_dev)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-
- /* turn device off */
- chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
- return tsl2x7x_write_control_reg(chip, 0x00);
-}
-
-/**
- * tsl2x7x_invoke_change
- * @indio_dev: pointer to IIO device
- *
- * Obtain and lock both ALS and PROX resources,
- * determine and save device state (On/Off),
- * cycle device to implement updated parameter,
- * put device back into proper state, and unlock
- * resource.
- */
-static int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int device_status = chip->tsl2x7x_chip_status;
- int ret;
-
- mutex_lock(&chip->als_mutex);
- mutex_lock(&chip->prox_mutex);
-
- if (device_status == TSL2X7X_CHIP_WORKING) {
- ret = tsl2x7x_chip_off(indio_dev);
- if (ret < 0)
- goto unlock;
- }
-
- ret = tsl2x7x_chip_on(indio_dev);
-
-unlock:
- mutex_unlock(&chip->prox_mutex);
- mutex_unlock(&chip->als_mutex);
-
- return ret;
-}
-
-static void tsl2x7x_prox_calculate(int *data, int length,
- struct tsl2x7x_prox_stat *stat)
-{
- int i;
- int sample_sum;
- int tmp;
-
- if (!length)
- length = 1;
-
- sample_sum = 0;
- stat->min = INT_MAX;
- stat->max = INT_MIN;
- for (i = 0; i < length; i++) {
- sample_sum += data[i];
- stat->min = min(stat->min, data[i]);
- stat->max = max(stat->max, data[i]);
- }
-
- stat->mean = sample_sum / length;
- sample_sum = 0;
- for (i = 0; i < length; i++) {
- tmp = data[i] - stat->mean;
- sample_sum += tmp * tmp;
- }
- stat->stddev = int_sqrt((long)sample_sum / length);
-}
-
-/**
- * tsl2x7x_prox_cal() - Calculates std. and sets thresholds.
- * @indio_dev: pointer to IIO device
- *
- * Calculates a standard deviation based on the samples,
- * and sets the threshold accordingly.
- */
-static int tsl2x7x_prox_cal(struct iio_dev *indio_dev)
-{
- int prox_history[MAX_SAMPLES_CAL + 1];
- int i, ret;
- struct tsl2x7x_prox_stat prox_stat_data[2];
- struct tsl2x7x_prox_stat *cal;
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- u8 tmp_irq_settings;
- u8 current_state = chip->tsl2x7x_chip_status;
-
- if (chip->settings.prox_max_samples_cal > MAX_SAMPLES_CAL) {
- dev_err(&chip->client->dev,
- "max prox samples cal is too big: %d\n",
- chip->settings.prox_max_samples_cal);
- chip->settings.prox_max_samples_cal = MAX_SAMPLES_CAL;
- }
-
- /* have to stop to change settings */
- ret = tsl2x7x_chip_off(indio_dev);
- if (ret < 0)
- return ret;
-
- /* Enable proximity detection save just in case prox not wanted yet*/
- tmp_irq_settings = chip->settings.interrupts_en;
- chip->settings.interrupts_en |= TSL2X7X_CNTL_PROX_INT_ENBL;
-
- /*turn on device if not already on*/
- ret = tsl2x7x_chip_on(indio_dev);
- if (ret < 0)
- return ret;
-
- /*gather the samples*/
- for (i = 0; i < chip->settings.prox_max_samples_cal; i++) {
- usleep_range(15000, 17500);
- ret = tsl2x7x_get_prox(indio_dev);
- if (ret < 0)
- return ret;
- prox_history[i] = chip->prox_data;
- dev_info(&chip->client->dev, "2 i=%d prox data= %d\n",
- i, chip->prox_data);
- }
-
- ret = tsl2x7x_chip_off(indio_dev);
- if (ret < 0)
- return ret;
- cal = &prox_stat_data[PROX_STAT_CAL];
- tsl2x7x_prox_calculate(prox_history,
- chip->settings.prox_max_samples_cal, cal);
- chip->settings.prox_thres_high = (cal->max << 1) - cal->mean;
-
- dev_info(&chip->client->dev, " cal min=%d mean=%d max=%d\n",
- cal->min, cal->mean, cal->max);
- dev_info(&chip->client->dev,
- "%s proximity threshold set to %d\n",
- chip->client->name, chip->settings.prox_thres_high);
-
- /* back to the way they were */
- chip->settings.interrupts_en = tmp_irq_settings;
- if (current_state == TSL2X7X_CHIP_WORKING) {
- ret = tsl2x7x_chip_on(indio_dev);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static ssize_t
-in_illuminance0_calibscale_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
-
- switch (chip->id) {
- case tsl2571:
- case tsl2671:
- case tmd2671:
- case tsl2771:
- case tmd2771:
- return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 128");
- }
-
- return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 120");
-}
-
-static IIO_CONST_ATTR(in_proximity0_calibscale_available, "1 2 4 8");
-
-static IIO_CONST_ATTR(in_illuminance0_integration_time_available,
- ".00272 - .696");
-
-static ssize_t in_illuminance0_target_input_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
-
- return snprintf(buf, PAGE_SIZE, "%d\n", chip->settings.als_cal_target);
-}
-
-static ssize_t in_illuminance0_target_input_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- unsigned long value;
- int ret;
-
- if (kstrtoul(buf, 0, &value))
- return -EINVAL;
-
- if (value)
- chip->settings.als_cal_target = value;
-
- ret = tsl2x7x_invoke_change(indio_dev);
- if (ret < 0)
- return ret;
-
- return len;
-}
-
-static ssize_t in_illuminance0_calibrate_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- bool value;
- int ret;
-
- if (strtobool(buf, &value))
- return -EINVAL;
-
- if (value) {
- ret = tsl2x7x_als_calibrate(indio_dev);
- if (ret < 0)
- return ret;
- }
-
- ret = tsl2x7x_invoke_change(indio_dev);
- if (ret < 0)
- return ret;
-
- return len;
-}
-
-static ssize_t in_illuminance0_lux_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- int i = 0;
- int offset = 0;
-
- while (i < TSL2X7X_MAX_LUX_TABLE_SIZE) {
- offset += snprintf(buf + offset, PAGE_SIZE, "%u,%u,%u,",
- chip->tsl2x7x_device_lux[i].ratio,
- chip->tsl2x7x_device_lux[i].ch0,
- chip->tsl2x7x_device_lux[i].ch1);
- if (chip->tsl2x7x_device_lux[i].ratio == 0) {
- /*
- * We just printed the first "0" entry.
- * Now get rid of the extra "," and break.
- */
- offset--;
- break;
- }
- i++;
- }
-
- offset += snprintf(buf + offset, PAGE_SIZE, "\n");
- return offset;
-}
-
-static ssize_t in_illuminance0_lux_table_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int value[ARRAY_SIZE(chip->tsl2x7x_device_lux) * 3 + 1];
- int n, ret;
-
- get_options(buf, ARRAY_SIZE(value), value);
-
- /* We now have an array of ints starting at value[1], and
- * enumerated by value[0].
- * We expect each group of three ints is one table entry,
- * and the last table entry is all 0.
- */
- n = value[0];
- if ((n % 3) || n < 6 ||
- n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) {
- dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
- return -EINVAL;
- }
-
- if ((value[(n - 2)] | value[(n - 1)] | value[n]) != 0) {
- dev_info(dev, "LUX TABLE INPUT ERROR 2 Value[0]=%d\n", n);
- return -EINVAL;
- }
-
- if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
- ret = tsl2x7x_chip_off(indio_dev);
- if (ret < 0)
- return ret;
- }
-
- /* Zero out the table */
- memset(chip->tsl2x7x_device_lux, 0, sizeof(chip->tsl2x7x_device_lux));
- memcpy(chip->tsl2x7x_device_lux, &value[1], (value[0] * 4));
-
- ret = tsl2x7x_invoke_change(indio_dev);
- if (ret < 0)
- return ret;
-
- return len;
-}
-
-static ssize_t in_proximity0_calibrate_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- bool value;
- int ret;
-
- if (strtobool(buf, &value))
- return -EINVAL;
-
- if (value) {
- ret = tsl2x7x_prox_cal(indio_dev);
- if (ret < 0)
- return ret;
- }
-
- ret = tsl2x7x_invoke_change(indio_dev);
- if (ret < 0)
- return ret;
-
- return len;
-}
-
-static int tsl2x7x_read_interrupt_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret;
-
- if (chan->type == IIO_INTENSITY)
- ret = !!(chip->settings.interrupts_en & 0x10);
- else
- ret = !!(chip->settings.interrupts_en & 0x20);
-
- return ret;
-}
-
-static int tsl2x7x_write_interrupt_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- int val)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret;
-
- if (chan->type == IIO_INTENSITY) {
- if (val)
- chip->settings.interrupts_en |= 0x10;
- else
- chip->settings.interrupts_en &= 0x20;
- } else {
- if (val)
- chip->settings.interrupts_en |= 0x20;
- else
- chip->settings.interrupts_en &= 0x10;
- }
-
- ret = tsl2x7x_invoke_change(indio_dev);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int tsl2x7x_write_event_value(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int val, int val2)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret = -EINVAL, y, z, filter_delay;
- u8 time;
-
- switch (info) {
- case IIO_EV_INFO_VALUE:
- if (chan->type == IIO_INTENSITY) {
- switch (dir) {
- case IIO_EV_DIR_RISING:
- chip->settings.als_thresh_high = val;
- ret = 0;
- break;
- case IIO_EV_DIR_FALLING:
- chip->settings.als_thresh_low = val;
- ret = 0;
- break;
- default:
- break;
- }
- } else {
- switch (dir) {
- case IIO_EV_DIR_RISING:
- chip->settings.prox_thres_high = val;
- ret = 0;
- break;
- case IIO_EV_DIR_FALLING:
- chip->settings.prox_thres_low = val;
- ret = 0;
- break;
- default:
- break;
- }
- }
- break;
- case IIO_EV_INFO_PERIOD:
- if (chan->type == IIO_INTENSITY)
- time = chip->settings.als_time;
- else
- time = chip->settings.prx_time;
-
- y = (TSL2X7X_MAX_TIMER_CNT - time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
-
- filter_delay = DIV_ROUND_UP((val * 1000) + val2, z);
-
- if (chan->type == IIO_INTENSITY) {
- chip->settings.persistence &= 0xF0;
- chip->settings.persistence |=
- (filter_delay & 0x0F);
- dev_info(&chip->client->dev, "%s: ALS persistence = %d",
- __func__, filter_delay);
- } else {
- chip->settings.persistence &= 0x0F;
- chip->settings.persistence |=
- ((filter_delay << 4) & 0xF0);
- dev_info(&chip->client->dev,
- "%s: Proximity persistence = %d",
- __func__, filter_delay);
- }
- ret = 0;
- break;
- default:
- break;
- }
-
- if (ret < 0)
- return ret;
-
- return tsl2x7x_invoke_change(indio_dev);
-}
-
-static int tsl2x7x_read_event_value(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int *val, int *val2)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret = -EINVAL, filter_delay, mult;
- u8 time;
-
- switch (info) {
- case IIO_EV_INFO_VALUE:
- if (chan->type == IIO_INTENSITY) {
- switch (dir) {
- case IIO_EV_DIR_RISING:
- *val = chip->settings.als_thresh_high;
- ret = IIO_VAL_INT;
- break;
- case IIO_EV_DIR_FALLING:
- *val = chip->settings.als_thresh_low;
- ret = IIO_VAL_INT;
- break;
- default:
- break;
- }
- } else {
- switch (dir) {
- case IIO_EV_DIR_RISING:
- *val = chip->settings.prox_thres_high;
- ret = IIO_VAL_INT;
- break;
- case IIO_EV_DIR_FALLING:
- *val = chip->settings.prox_thres_low;
- ret = IIO_VAL_INT;
- break;
- default:
- break;
- }
- }
- break;
- case IIO_EV_INFO_PERIOD:
- if (chan->type == IIO_INTENSITY) {
- time = chip->settings.als_time;
- mult = chip->settings.persistence & 0x0F;
- } else {
- time = chip->settings.prx_time;
- mult = (chip->settings.persistence & 0xF0) >> 4;
- }
-
- /* Determine integration time */
- *val = (TSL2X7X_MAX_TIMER_CNT - time) + 1;
- *val2 = *val * TSL2X7X_MIN_ITIME;
- filter_delay = *val2 * mult;
- *val = filter_delay / 1000;
- *val2 = filter_delay % 1000;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- int ret = -EINVAL;
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-
- switch (mask) {
- case IIO_CHAN_INFO_PROCESSED:
- switch (chan->type) {
- case IIO_LIGHT:
- tsl2x7x_get_lux(indio_dev);
- *val = chip->als_cur_info.lux;
- ret = IIO_VAL_INT;
- break;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_RAW:
- switch (chan->type) {
- case IIO_INTENSITY:
- tsl2x7x_get_lux(indio_dev);
- if (chan->channel == 0)
- *val = chip->als_cur_info.als_ch0;
- else
- *val = chip->als_cur_info.als_ch1;
- ret = IIO_VAL_INT;
- break;
- case IIO_PROXIMITY:
- tsl2x7x_get_prox(indio_dev);
- *val = chip->prox_data;
- ret = IIO_VAL_INT;
- break;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->type == IIO_LIGHT)
- *val = tsl2x7x_als_gain[chip->settings.als_gain];
- else
- *val = tsl2x7x_prx_gain[chip->settings.prox_gain];
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_CALIBBIAS:
- *val = chip->settings.als_gain_trim;
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_INT_TIME:
- *val = (TSL2X7X_MAX_TIMER_CNT - chip->settings.als_time) + 1;
- *val2 = ((*val * TSL2X7X_MIN_ITIME) % 1000) / 1000;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->type == IIO_INTENSITY) {
- switch (val) {
- case 1:
- chip->settings.als_gain = 0;
- break;
- case 8:
- chip->settings.als_gain = 1;
- break;
- case 16:
- chip->settings.als_gain = 2;
- break;
- case 120:
- switch (chip->id) {
- case tsl2572:
- case tsl2672:
- case tmd2672:
- case tsl2772:
- case tmd2772:
- return -EINVAL;
- }
- chip->settings.als_gain = 3;
- break;
- case 128:
- switch (chip->id) {
- case tsl2571:
- case tsl2671:
- case tmd2671:
- case tsl2771:
- case tmd2771:
- return -EINVAL;
- }
- chip->settings.als_gain = 3;
- break;
- default:
- return -EINVAL;
- }
- } else {
- switch (val) {
- case 1:
- chip->settings.prox_gain = 0;
- break;
- case 2:
- chip->settings.prox_gain = 1;
- break;
- case 4:
- chip->settings.prox_gain = 2;
- break;
- case 8:
- chip->settings.prox_gain = 3;
- break;
- default:
- return -EINVAL;
- }
- }
- break;
- case IIO_CHAN_INFO_CALIBBIAS:
- chip->settings.als_gain_trim = val;
- break;
- case IIO_CHAN_INFO_INT_TIME:
- chip->settings.als_time =
- TSL2X7X_MAX_TIMER_CNT - (val2 / TSL2X7X_MIN_ITIME);
-
- dev_info(&chip->client->dev, "%s: als time = %d",
- __func__, chip->settings.als_time);
- break;
- default:
- return -EINVAL;
- }
-
- return tsl2x7x_invoke_change(indio_dev);
-}
-
-static DEVICE_ATTR_RO(in_illuminance0_calibscale_available);
-
-static DEVICE_ATTR_RW(in_illuminance0_target_input);
-
-static DEVICE_ATTR_WO(in_illuminance0_calibrate);
-
-static DEVICE_ATTR_WO(in_proximity0_calibrate);
-
-static DEVICE_ATTR_RW(in_illuminance0_lux_table);
-
-/* Use the default register values to identify the Taos device */
-static int tsl2x7x_device_id(int *id, int target)
-{
- switch (target) {
- case tsl2571:
- case tsl2671:
- case tsl2771:
- return (*id & 0xf0) == TRITON_ID;
- case tmd2671:
- case tmd2771:
- return (*id & 0xf0) == HALIBUT_ID;
- case tsl2572:
- case tsl2672:
- case tmd2672:
- case tsl2772:
- case tmd2772:
- return (*id & 0xf0) == SWORDFISH_ID;
- }
-
- return -EINVAL;
-}
-
-static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- s64 timestamp = iio_get_time_ns(indio_dev);
- int ret;
-
- ret = tsl2x7x_read_status(chip);
- if (ret < 0)
- return ret;
-
- /* What type of interrupt do we need to process */
- if (ret & TSL2X7X_STA_PRX_INTR) {
- tsl2x7x_get_prox(indio_dev); /* freshen data for ABI */
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_EITHER),
- timestamp);
- }
-
- if (ret & TSL2X7X_STA_ALS_INTR) {
- tsl2x7x_get_lux(indio_dev); /* freshen data for ABI */
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_EITHER),
- timestamp);
- }
-
- ret = tsl2x7x_clear_interrupts(chip, TSL2X7X_CMD_PROXALS_INT_CLR);
- if (ret < 0)
- return ret;
-
- return IRQ_HANDLED;
-}
-
-static struct attribute *tsl2x7x_ALS_device_attrs[] = {
- &dev_attr_in_illuminance0_calibscale_available.attr,
- &iio_const_attr_in_illuminance0_integration_time_available
- .dev_attr.attr,
- &dev_attr_in_illuminance0_target_input.attr,
- &dev_attr_in_illuminance0_calibrate.attr,
- &dev_attr_in_illuminance0_lux_table.attr,
- NULL
-};
-
-static struct attribute *tsl2x7x_PRX_device_attrs[] = {
- &dev_attr_in_proximity0_calibrate.attr,
- NULL
-};
-
-static struct attribute *tsl2x7x_ALSPRX_device_attrs[] = {
- &dev_attr_in_illuminance0_calibscale_available.attr,
- &iio_const_attr_in_illuminance0_integration_time_available
- .dev_attr.attr,
- &dev_attr_in_illuminance0_target_input.attr,
- &dev_attr_in_illuminance0_calibrate.attr,
- &dev_attr_in_illuminance0_lux_table.attr,
- NULL
-};
-
-static struct attribute *tsl2x7x_PRX2_device_attrs[] = {
- &dev_attr_in_proximity0_calibrate.attr,
- &iio_const_attr_in_proximity0_calibscale_available.dev_attr.attr,
- NULL
-};
-
-static struct attribute *tsl2x7x_ALSPRX2_device_attrs[] = {
- &dev_attr_in_illuminance0_calibscale_available.attr,
- &iio_const_attr_in_illuminance0_integration_time_available
- .dev_attr.attr,
- &dev_attr_in_illuminance0_target_input.attr,
- &dev_attr_in_illuminance0_calibrate.attr,
- &dev_attr_in_illuminance0_lux_table.attr,
- &dev_attr_in_proximity0_calibrate.attr,
- &iio_const_attr_in_proximity0_calibscale_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group tsl2X7X_device_attr_group_tbl[] = {
- [ALS] = {
- .attrs = tsl2x7x_ALS_device_attrs,
- },
- [PRX] = {
- .attrs = tsl2x7x_PRX_device_attrs,
- },
- [ALSPRX] = {
- .attrs = tsl2x7x_ALSPRX_device_attrs,
- },
- [PRX2] = {
- .attrs = tsl2x7x_PRX2_device_attrs,
- },
- [ALSPRX2] = {
- .attrs = tsl2x7x_ALSPRX2_device_attrs,
- },
-};
-
-static const struct iio_info tsl2X7X_device_info[] = {
- [ALS] = {
- .attrs = &tsl2X7X_device_attr_group_tbl[ALS],
- .read_raw = &tsl2x7x_read_raw,
- .write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_event_value,
- .write_event_value = &tsl2x7x_write_event_value,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
- },
- [PRX] = {
- .attrs = &tsl2X7X_device_attr_group_tbl[PRX],
- .read_raw = &tsl2x7x_read_raw,
- .write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_event_value,
- .write_event_value = &tsl2x7x_write_event_value,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
- },
- [ALSPRX] = {
- .attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX],
- .read_raw = &tsl2x7x_read_raw,
- .write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_event_value,
- .write_event_value = &tsl2x7x_write_event_value,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
- },
- [PRX2] = {
- .attrs = &tsl2X7X_device_attr_group_tbl[PRX2],
- .read_raw = &tsl2x7x_read_raw,
- .write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_event_value,
- .write_event_value = &tsl2x7x_write_event_value,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
- },
- [ALSPRX2] = {
- .attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX2],
- .read_raw = &tsl2x7x_read_raw,
- .write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_event_value,
- .write_event_value = &tsl2x7x_write_event_value,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
- },
-};
-
-static const struct iio_event_spec tsl2x7x_events[] = {
- {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
- BIT(IIO_EV_INFO_ENABLE),
- }, {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
- BIT(IIO_EV_INFO_ENABLE),
- }, {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_EITHER,
- .mask_separate = BIT(IIO_EV_INFO_PERIOD),
- },
-};
-
-static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
- [ALS] = {
- .channel = {
- {
- .type = IIO_LIGHT,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_INT_TIME),
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 1,
- },
- },
- .chan_table_elements = 3,
- .info = &tsl2X7X_device_info[ALS],
- },
- [PRX] = {
- .channel = {
- {
- .type = IIO_PROXIMITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- },
- },
- .chan_table_elements = 1,
- .info = &tsl2X7X_device_info[PRX],
- },
- [ALSPRX] = {
- .channel = {
- {
- .type = IIO_LIGHT,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_INT_TIME),
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .type = IIO_PROXIMITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- },
- },
- .chan_table_elements = 4,
- .info = &tsl2X7X_device_info[ALSPRX],
- },
- [PRX2] = {
- .channel = {
- {
- .type = IIO_PROXIMITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- },
- },
- .chan_table_elements = 1,
- .info = &tsl2X7X_device_info[PRX2],
- },
- [ALSPRX2] = {
- .channel = {
- {
- .type = IIO_LIGHT,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_INT_TIME),
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .type = IIO_PROXIMITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- },
- },
- .chan_table_elements = 4,
- .info = &tsl2X7X_device_info[ALSPRX2],
- },
-};
-
-static int tsl2x7x_probe(struct i2c_client *clientp,
- const struct i2c_device_id *id)
-{
- int ret;
- struct iio_dev *indio_dev;
- struct tsl2X7X_chip *chip;
-
- indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
- if (!indio_dev)
- return -ENOMEM;
-
- chip = iio_priv(indio_dev);
- chip->client = clientp;
- i2c_set_clientdata(clientp, indio_dev);
-
- ret = i2c_smbus_read_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CHIPID);
- if (ret < 0)
- return ret;
-
- if ((!tsl2x7x_device_id(&ret, id->driver_data)) ||
- (tsl2x7x_device_id(&ret, id->driver_data) == -EINVAL)) {
- dev_info(&chip->client->dev,
- "%s: i2c device found does not match expected id\n",
- __func__);
- return -EINVAL;
- }
-
- ret = i2c_smbus_write_byte(clientp, TSL2X7X_CMD_REG | TSL2X7X_CNTRL);
- if (ret < 0) {
- dev_err(&clientp->dev, "write to cmd reg failed. err = %d\n",
- ret);
- return ret;
- }
-
- /*
- * ALS and PROX functions can be invoked via user space poll
- * or H/W interrupt. If busy return last sample.
- */
- mutex_init(&chip->als_mutex);
- mutex_init(&chip->prox_mutex);
-
- chip->tsl2x7x_chip_status = TSL2X7X_CHIP_UNKNOWN;
- chip->pdata = dev_get_platdata(&clientp->dev);
- chip->id = id->driver_data;
- chip->chip_info =
- &tsl2x7x_chip_info_tbl[device_channel_config[id->driver_data]];
-
- indio_dev->info = chip->chip_info->info;
- indio_dev->dev.parent = &clientp->dev;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->name = chip->client->name;
- indio_dev->channels = chip->chip_info->channel;
- indio_dev->num_channels = chip->chip_info->chan_table_elements;
-
- if (clientp->irq) {
- ret = devm_request_threaded_irq(&clientp->dev, clientp->irq,
- NULL,
- &tsl2x7x_event_handler,
- IRQF_TRIGGER_RISING |
- IRQF_ONESHOT,
- "TSL2X7X_event",
- indio_dev);
- if (ret) {
- dev_err(&clientp->dev,
- "%s: irq request failed", __func__);
- return ret;
- }
- }
-
- /* Load up the defaults */
- tsl2x7x_defaults(chip);
- /* Make sure the chip is on */
- tsl2x7x_chip_on(indio_dev);
-
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&clientp->dev,
- "%s: iio registration failed\n", __func__);
- return ret;
- }
-
- dev_info(&clientp->dev, "%s Light sensor found.\n", id->name);
-
- return 0;
-}
-
-static int tsl2x7x_suspend(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret = 0;
-
- if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
- ret = tsl2x7x_chip_off(indio_dev);
- chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
- }
-
- return ret;
-}
-
-static int tsl2x7x_resume(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret = 0;
-
- if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_SUSPENDED)
- ret = tsl2x7x_chip_on(indio_dev);
-
- return ret;
-}
-
-static int tsl2x7x_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- tsl2x7x_chip_off(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
-static const struct i2c_device_id tsl2x7x_idtable[] = {
- { "tsl2571", tsl2571 },
- { "tsl2671", tsl2671 },
- { "tmd2671", tmd2671 },
- { "tsl2771", tsl2771 },
- { "tmd2771", tmd2771 },
- { "tsl2572", tsl2572 },
- { "tsl2672", tsl2672 },
- { "tmd2672", tmd2672 },
- { "tsl2772", tsl2772 },
- { "tmd2772", tmd2772 },
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, tsl2x7x_idtable);
-
-static const struct of_device_id tsl2x7x_of_match[] = {
- { .compatible = "amstaos,tsl2571" },
- { .compatible = "amstaos,tsl2671" },
- { .compatible = "amstaos,tmd2671" },
- { .compatible = "amstaos,tsl2771" },
- { .compatible = "amstaos,tmd2771" },
- { .compatible = "amstaos,tsl2572" },
- { .compatible = "amstaos,tsl2672" },
- { .compatible = "amstaos,tmd2672" },
- { .compatible = "amstaos,tsl2772" },
- { .compatible = "amstaos,tmd2772" },
- {}
-};
-MODULE_DEVICE_TABLE(of, tsl2x7x_of_match);
-
-static const struct dev_pm_ops tsl2x7x_pm_ops = {
- .suspend = tsl2x7x_suspend,
- .resume = tsl2x7x_resume,
-};
-
-/* Driver definition */
-static struct i2c_driver tsl2x7x_driver = {
- .driver = {
- .name = "tsl2x7x",
- .of_match_table = tsl2x7x_of_match,
- .pm = &tsl2x7x_pm_ops,
- },
- .id_table = tsl2x7x_idtable,
- .probe = tsl2x7x_probe,
- .remove = tsl2x7x_remove,
-};
-
-module_i2c_driver(tsl2x7x_driver);
-
-MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
-MODULE_DESCRIPTION("TAOS tsl2x7x ambient and proximity light sensor driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/light/tsl2x7x.h b/drivers/staging/iio/light/tsl2x7x.h
deleted file mode 100644
index 28b0e7fdc9b8..000000000000
--- a/drivers/staging/iio/light/tsl2x7x.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Device driver for monitoring ambient light intensity (lux)
- * and proximity (prox) within the TAOS TSL2X7X family of devices.
- *
- * Copyright (c) 2012, TAOS Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __TSL2X7X_H
-#define __TSL2X7X_H
-
-struct tsl2x7x_lux {
- unsigned int ratio;
- unsigned int ch0;
- unsigned int ch1;
-};
-
-/* Max number of segments allowable in LUX table */
-#define TSL2X7X_MAX_LUX_TABLE_SIZE 9
-/* The default LUX tables all have 3 elements. */
-#define TSL2X7X_DEF_LUX_TABLE_SZ 3
-#define TSL2X7X_DEFAULT_TABLE_BYTES (sizeof(struct tsl2x7x_lux) * \
- TSL2X7X_DEF_LUX_TABLE_SZ)
-
-/**
- * struct tsl2x7x_default_settings - power on defaults unless
- * overridden by platform data.
- * @als_time: ALS Integration time - multiple of 50mS
- * @als_gain: Index into the ALS gain table.
- * @als_gain_trim: default gain trim to account for
- * aperture effects.
- * @wait_time: Time between PRX and ALS cycles
- * in 2.7 periods
- * @prx_time: 5.2ms prox integration time -
- * decrease in 2.7ms periods
- * @prx_gain: Proximity gain index
- * @prox_config: Prox configuration filters.
- * @als_cal_target: Known external ALS reading for
- * calibration.
- * @interrupts_en: Enable/Disable - 0x00 = none, 0x10 = als,
- * 0x20 = prx, 0x30 = bth
- * @persistence: H/W Filters, Number of 'out of limits'
- * ADC readings PRX/ALS.
- * @als_thresh_low: CH0 'low' count to trigger interrupt.
- * @als_thresh_high: CH0 'high' count to trigger interrupt.
- * @prox_thres_low: Low threshold proximity detection.
- * @prox_thres_high: High threshold proximity detection
- * @prox_pulse_count: Number if proximity emitter pulses
- * @prox_max_samples_cal: Used for prox cal.
- */
-struct tsl2x7x_settings {
- int als_time;
- int als_gain;
- int als_gain_trim;
- int wait_time;
- int prx_time;
- int prox_gain;
- int prox_config;
- int als_cal_target;
- u8 interrupts_en;
- u8 persistence;
- int als_thresh_low;
- int als_thresh_high;
- int prox_thres_low;
- int prox_thres_high;
- int prox_pulse_count;
- int prox_max_samples_cal;
- int prox_diode;
- int prox_power;
-};
-
-/**
- * struct tsl2X7X_platform_data - Platform callback, glass and defaults
- * @platform_power: Suspend/resume platform callback
- * @power_on: Power on callback
- * @power_off: Power off callback
- * @platform_lux_table: Device specific glass coefficents
- * @platform_default_settings: Device specific power on defaults
- *
- */
-struct tsl2X7X_platform_data {
- struct tsl2x7x_lux platform_lux_table[TSL2X7X_MAX_LUX_TABLE_SIZE];
- struct tsl2x7x_settings *platform_default_settings;
-};
-
-#endif /* __TSL2X7X_H */
diff --git a/drivers/staging/iio/meter/Kconfig b/drivers/staging/iio/meter/Kconfig
index 64cd3704ec6e..e01eb8abcdce 100644
--- a/drivers/staging/iio/meter/Kconfig
+++ b/drivers/staging/iio/meter/Kconfig
@@ -3,48 +3,6 @@
#
menu "Active energy metering IC"
-config ADE7753
- tristate "Analog Devices ADE7753/6 Single-Phase Multifunction Metering IC Driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices ADE7753 Single-Phase Multifunction
- Metering IC with di/dt Sensor Interface.
-
- To compile this driver as a module, choose M here: the
- module will be called ade7753.
-
-config ADE7754
- tristate "Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices ADE7754 Polyphase
- Multifunction Energy Metering IC Driver.
-
- To compile this driver as a module, choose M here: the
- module will be called ade7754.
-
-config ADE7758
- tristate "Analog Devices ADE7758 Poly Phase Multifunction Energy Metering IC Driver"
- depends on SPI
- select IIO_TRIGGER if IIO_BUFFER
- select IIO_KFIFO_BUF if IIO_BUFFER
- help
- Say yes here to build support for Analog Devices ADE7758 Polyphase
- Multifunction Energy Metering IC with Per Phase Information Driver.
-
- To compile this driver as a module, choose M here: the
- module will be called ade7758.
-
-config ADE7759
- tristate "Analog Devices ADE7759 Active Energy Metering IC Driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices ADE7758 Active Energy
- Metering IC with di/dt Sensor Interface.
-
- To compile this driver as a module, choose M here: the
- module will be called ade7759.
-
config ADE7854
tristate "Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver"
depends on SPI || I2C
diff --git a/drivers/staging/iio/meter/Makefile b/drivers/staging/iio/meter/Makefile
index 19e7982f5563..ed4547e38f3a 100644
--- a/drivers/staging/iio/meter/Makefile
+++ b/drivers/staging/iio/meter/Makefile
@@ -3,14 +3,6 @@
# Makefile for metering ic drivers
#
-obj-$(CONFIG_ADE7753) += ade7753.o
-obj-$(CONFIG_ADE7754) += ade7754.o
-
-ade7758-y := ade7758_core.o
-ade7758-$(CONFIG_IIO_BUFFER) += ade7758_ring.o ade7758_trigger.o
-obj-$(CONFIG_ADE7758) += ade7758.o
-
-obj-$(CONFIG_ADE7759) += ade7759.o
obj-$(CONFIG_ADE7854) += ade7854.o
obj-$(CONFIG_ADE7854_I2C) += ade7854-i2c.o
obj-$(CONFIG_ADE7854_SPI) += ade7854-spi.o
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
deleted file mode 100644
index 275e8dfff836..000000000000
--- a/drivers/staging/iio/meter/ade7753.c
+++ /dev/null
@@ -1,630 +0,0 @@
-/*
- * ADE7753 Single-Phase Multifunction Metering IC with di/dt Sensor Interface
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/spi/spi.h>
-#include "meter.h"
-
-#define ADE7753_WAVEFORM 0x01
-#define ADE7753_AENERGY 0x02
-#define ADE7753_RAENERGY 0x03
-#define ADE7753_LAENERGY 0x04
-#define ADE7753_VAENERGY 0x05
-#define ADE7753_RVAENERGY 0x06
-#define ADE7753_LVAENERGY 0x07
-#define ADE7753_LVARENERGY 0x08
-#define ADE7753_MODE 0x09
-#define ADE7753_IRQEN 0x0A
-#define ADE7753_STATUS 0x0B
-#define ADE7753_RSTSTATUS 0x0C
-#define ADE7753_CH1OS 0x0D
-#define ADE7753_CH2OS 0x0E
-#define ADE7753_GAIN 0x0F
-#define ADE7753_PHCAL 0x10
-#define ADE7753_APOS 0x11
-#define ADE7753_WGAIN 0x12
-#define ADE7753_WDIV 0x13
-#define ADE7753_CFNUM 0x14
-#define ADE7753_CFDEN 0x15
-#define ADE7753_IRMS 0x16
-#define ADE7753_VRMS 0x17
-#define ADE7753_IRMSOS 0x18
-#define ADE7753_VRMSOS 0x19
-#define ADE7753_VAGAIN 0x1A
-#define ADE7753_VADIV 0x1B
-#define ADE7753_LINECYC 0x1C
-#define ADE7753_ZXTOUT 0x1D
-#define ADE7753_SAGCYC 0x1E
-#define ADE7753_SAGLVL 0x1F
-#define ADE7753_IPKLVL 0x20
-#define ADE7753_VPKLVL 0x21
-#define ADE7753_IPEAK 0x22
-#define ADE7753_RSTIPEAK 0x23
-#define ADE7753_VPEAK 0x24
-#define ADE7753_RSTVPEAK 0x25
-#define ADE7753_TEMP 0x26
-#define ADE7753_PERIOD 0x27
-#define ADE7753_TMODE 0x3D
-#define ADE7753_CHKSUM 0x3E
-#define ADE7753_DIEREV 0x3F
-
-#define ADE7753_READ_REG(a) a
-#define ADE7753_WRITE_REG(a) ((a) | 0x80)
-
-#define ADE7753_MAX_TX 4
-#define ADE7753_MAX_RX 4
-#define ADE7753_STARTUP_DELAY 1000
-
-#define ADE7753_SPI_SLOW (u32)(300 * 1000)
-#define ADE7753_SPI_BURST (u32)(1000 * 1000)
-#define ADE7753_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct ade7753_state - device instance specific data
- * @us: actual spi_device
- * @tx: transmit buffer
- * @rx: receive buffer
- * @buf_lock: mutex to protect tx, rx and write frequency
- **/
-struct ade7753_state {
- struct spi_device *us;
- struct mutex buf_lock;
- u8 tx[ADE7753_MAX_TX] ____cacheline_aligned;
- u8 rx[ADE7753_MAX_RX];
-};
-
-static int ade7753_spi_write_reg_8(struct device *dev,
- u8 reg_address,
- u8 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7753_WRITE_REG(reg_address);
- st->tx[1] = val;
-
- ret = spi_write(st->us, st->tx, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int __ade7753_spi_write_reg_16(struct device *dev, u8 reg_address,
- u16 value)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
-
- st->tx[0] = ADE7753_WRITE_REG(reg_address);
- st->tx[1] = (value >> 8) & 0xFF;
- st->tx[2] = value & 0xFF;
-
- return spi_write(st->us, st->tx, 3);
-}
-
-static int ade7753_spi_write_reg_16(struct device *dev, u8 reg_address,
- u16 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- ret = __ade7753_spi_write_reg_16(dev, reg_address, value);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7753_spi_read_reg_8(struct device *dev,
- u8 reg_address,
- u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
- ssize_t ret;
-
- ret = spi_w8r8(st->us, ADE7753_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
- return ret;
- }
- *val = ret;
-
- return 0;
-}
-
-static int ade7753_spi_read_reg_16(struct device *dev,
- u8 reg_address,
- u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
- ssize_t ret;
-
- ret = spi_w8r16be(st->us, ADE7753_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
- return ret;
- }
-
- *val = ret;
-
- return 0;
-}
-
-static int ade7753_spi_read_reg_24(struct device *dev,
- u8 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 1,
- }, {
- .rx_buf = st->tx,
- .bits_per_word = 8,
- .len = 3,
- }
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7753_READ_REG(reg_address);
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static ssize_t ade7753_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u8 val;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7753_spi_read_reg_8(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7753_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 val;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7753_spi_read_reg_16(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7753_read_24bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u32 val;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7753_spi_read_reg_24(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7753_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u8 val;
-
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7753_spi_write_reg_8(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t ade7753_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7753_spi_write_reg_16(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int ade7753_reset(struct device *dev)
-{
- u16 val;
- int ret;
-
- ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
- if (ret)
- return ret;
-
- val |= BIT(6); /* Software Chip Reset */
-
- return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
-}
-
-static IIO_DEV_ATTR_AENERGY(ade7753_read_24bit, ADE7753_AENERGY);
-static IIO_DEV_ATTR_LAENERGY(ade7753_read_24bit, ADE7753_LAENERGY);
-static IIO_DEV_ATTR_VAENERGY(ade7753_read_24bit, ADE7753_VAENERGY);
-static IIO_DEV_ATTR_LVAENERGY(ade7753_read_24bit, ADE7753_LVAENERGY);
-static IIO_DEV_ATTR_CFDEN(0644,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_CFDEN);
-static IIO_DEV_ATTR_CFNUM(0644,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_CFNUM);
-static IIO_DEV_ATTR_CHKSUM(ade7753_read_8bit, ADE7753_CHKSUM);
-static IIO_DEV_ATTR_PHCAL(0644,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_PHCAL);
-static IIO_DEV_ATTR_APOS(0644,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_APOS);
-static IIO_DEV_ATTR_SAGCYC(0644,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_SAGCYC);
-static IIO_DEV_ATTR_SAGLVL(0644,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_SAGLVL);
-static IIO_DEV_ATTR_LINECYC(0644,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_LINECYC);
-static IIO_DEV_ATTR_WDIV(0644,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_WDIV);
-static IIO_DEV_ATTR_IRMS(0644,
- ade7753_read_24bit,
- NULL,
- ADE7753_IRMS);
-static IIO_DEV_ATTR_VRMS(0444,
- ade7753_read_24bit,
- NULL,
- ADE7753_VRMS);
-static IIO_DEV_ATTR_IRMSOS(0644,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_IRMSOS);
-static IIO_DEV_ATTR_VRMSOS(0644,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_VRMSOS);
-static IIO_DEV_ATTR_WGAIN(0644,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_WGAIN);
-static IIO_DEV_ATTR_VAGAIN(0644,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_VAGAIN);
-static IIO_DEV_ATTR_PGA_GAIN(0644,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_GAIN);
-static IIO_DEV_ATTR_IPKLVL(0644,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_IPKLVL);
-static IIO_DEV_ATTR_VPKLVL(0644,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_VPKLVL);
-static IIO_DEV_ATTR_IPEAK(0444,
- ade7753_read_24bit,
- NULL,
- ADE7753_IPEAK);
-static IIO_DEV_ATTR_VPEAK(0444,
- ade7753_read_24bit,
- NULL,
- ADE7753_VPEAK);
-static IIO_DEV_ATTR_VPERIOD(0444,
- ade7753_read_16bit,
- NULL,
- ADE7753_PERIOD);
-
-static IIO_DEVICE_ATTR(choff_1, 0644,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_CH1OS);
-
-static IIO_DEVICE_ATTR(choff_2, 0644,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_CH2OS);
-
-static int ade7753_set_irq(struct device *dev, bool enable)
-{
- int ret;
- u8 irqen;
-
- ret = ade7753_spi_read_reg_8(dev, ADE7753_IRQEN, &irqen);
- if (ret)
- goto error_ret;
-
- if (enable)
- irqen |= BIT(3); /* Enables an interrupt when a data is
- * present in the waveform register
- */
- else
- irqen &= ~BIT(3);
-
- ret = ade7753_spi_write_reg_8(dev, ADE7753_IRQEN, irqen);
-
-error_ret:
- return ret;
-}
-
-/* Power down the device */
-static int ade7753_stop_device(struct device *dev)
-{
- u16 val;
- int ret;
-
- ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
- if (ret)
- return ret;
-
- val |= BIT(4); /* AD converters can be turned off */
-
- return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
-}
-
-static int ade7753_initial_setup(struct iio_dev *indio_dev)
-{
- int ret;
- struct device *dev = &indio_dev->dev;
- struct ade7753_state *st = iio_priv(indio_dev);
-
- /* use low spi speed for init */
- st->us->mode = SPI_MODE_3;
- spi_setup(st->us);
-
- /* Disable IRQ */
- ret = ade7753_set_irq(dev, false);
- if (ret) {
- dev_err(dev, "disable irq failed");
- goto err_ret;
- }
-
- ade7753_reset(dev);
- usleep_range(ADE7753_STARTUP_DELAY, ADE7753_STARTUP_DELAY + 100);
-
-err_ret:
- return ret;
-}
-
-static ssize_t ade7753_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 t;
- int sps;
-
- ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &t);
- if (ret)
- return ret;
-
- t = (t >> 11) & 0x3;
- sps = 27900 / (1 + t);
-
- return sprintf(buf, "%d\n", sps);
-}
-
-static ssize_t ade7753_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
- u16 val;
- int ret;
- u16 reg, t;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- return ret;
- if (!val)
- return -EINVAL;
-
- mutex_lock(&st->buf_lock);
-
- t = 27900 / val;
- if (t > 0)
- t--;
-
- if (t > 1)
- st->us->max_speed_hz = ADE7753_SPI_SLOW;
- else
- st->us->max_speed_hz = ADE7753_SPI_FAST;
-
- ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &reg);
- if (ret)
- goto out;
-
- reg &= ~(3 << 11);
- reg |= t << 11;
-
- ret = __ade7753_spi_write_reg_16(dev, ADE7753_MODE, reg);
-
-out:
- mutex_unlock(&st->buf_lock);
-
- return ret ? ret : len;
-}
-
-static IIO_DEV_ATTR_TEMP_RAW(ade7753_read_8bit);
-static IIO_CONST_ATTR(in_temp_offset, "-25 C");
-static IIO_CONST_ATTR(in_temp_scale, "0.67 C");
-
-static IIO_DEV_ATTR_SAMP_FREQ(0644,
- ade7753_read_frequency,
- ade7753_write_frequency);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
-
-static struct attribute *ade7753_attributes[] = {
- &iio_dev_attr_in_temp_raw.dev_attr.attr,
- &iio_const_attr_in_temp_offset.dev_attr.attr,
- &iio_const_attr_in_temp_scale.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_phcal.dev_attr.attr,
- &iio_dev_attr_cfden.dev_attr.attr,
- &iio_dev_attr_aenergy.dev_attr.attr,
- &iio_dev_attr_laenergy.dev_attr.attr,
- &iio_dev_attr_vaenergy.dev_attr.attr,
- &iio_dev_attr_lvaenergy.dev_attr.attr,
- &iio_dev_attr_cfnum.dev_attr.attr,
- &iio_dev_attr_apos.dev_attr.attr,
- &iio_dev_attr_sagcyc.dev_attr.attr,
- &iio_dev_attr_saglvl.dev_attr.attr,
- &iio_dev_attr_linecyc.dev_attr.attr,
- &iio_dev_attr_chksum.dev_attr.attr,
- &iio_dev_attr_pga_gain.dev_attr.attr,
- &iio_dev_attr_wgain.dev_attr.attr,
- &iio_dev_attr_choff_1.dev_attr.attr,
- &iio_dev_attr_choff_2.dev_attr.attr,
- &iio_dev_attr_wdiv.dev_attr.attr,
- &iio_dev_attr_irms.dev_attr.attr,
- &iio_dev_attr_vrms.dev_attr.attr,
- &iio_dev_attr_irmsos.dev_attr.attr,
- &iio_dev_attr_vrmsos.dev_attr.attr,
- &iio_dev_attr_vagain.dev_attr.attr,
- &iio_dev_attr_ipklvl.dev_attr.attr,
- &iio_dev_attr_vpklvl.dev_attr.attr,
- &iio_dev_attr_ipeak.dev_attr.attr,
- &iio_dev_attr_vpeak.dev_attr.attr,
- &iio_dev_attr_vperiod.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ade7753_attribute_group = {
- .attrs = ade7753_attributes,
-};
-
-static const struct iio_info ade7753_info = {
- .attrs = &ade7753_attribute_group,
-};
-
-static int ade7753_probe(struct spi_device *spi)
-{
- int ret;
- struct ade7753_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- st = iio_priv(indio_dev);
- st->us = spi;
- mutex_init(&st->buf_lock);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ade7753_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- /* Get the device into a sane initial state */
- ret = ade7753_initial_setup(indio_dev);
- if (ret)
- return ret;
-
- return iio_device_register(indio_dev);
-}
-
-static int ade7753_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
- ade7753_stop_device(&indio_dev->dev);
-
- return 0;
-}
-
-static struct spi_driver ade7753_driver = {
- .driver = {
- .name = "ade7753",
- },
- .probe = ade7753_probe,
- .remove = ade7753_remove,
-};
-module_spi_driver(ade7753_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADE7753/6 Single-Phase Multifunction Meter");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ade7753");
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
deleted file mode 100644
index 9aa067736715..000000000000
--- a/drivers/staging/iio/meter/ade7754.c
+++ /dev/null
@@ -1,664 +0,0 @@
-/*
- * ADE7754 Polyphase Multifunction Energy Metering IC Driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include "meter.h"
-
-#define ADE7754_AENERGY 0x01
-#define ADE7754_RAENERGY 0x02
-#define ADE7754_LAENERGY 0x03
-#define ADE7754_VAENERGY 0x04
-#define ADE7754_RVAENERGY 0x05
-#define ADE7754_LVAENERGY 0x06
-#define ADE7754_PERIOD 0x07
-#define ADE7754_TEMP 0x08
-#define ADE7754_WFORM 0x09
-#define ADE7754_OPMODE 0x0A
-#define ADE7754_MMODE 0x0B
-#define ADE7754_WAVMODE 0x0C
-#define ADE7754_WATMODE 0x0D
-#define ADE7754_VAMODE 0x0E
-#define ADE7754_IRQEN 0x0F
-#define ADE7754_STATUS 0x10
-#define ADE7754_RSTATUS 0x11
-#define ADE7754_ZXTOUT 0x12
-#define ADE7754_LINCYC 0x13
-#define ADE7754_SAGCYC 0x14
-#define ADE7754_SAGLVL 0x15
-#define ADE7754_VPEAK 0x16
-#define ADE7754_IPEAK 0x17
-#define ADE7754_GAIN 0x18
-#define ADE7754_AWG 0x19
-#define ADE7754_BWG 0x1A
-#define ADE7754_CWG 0x1B
-#define ADE7754_AVAG 0x1C
-#define ADE7754_BVAG 0x1D
-#define ADE7754_CVAG 0x1E
-#define ADE7754_APHCAL 0x1F
-#define ADE7754_BPHCAL 0x20
-#define ADE7754_CPHCAL 0x21
-#define ADE7754_AAPOS 0x22
-#define ADE7754_BAPOS 0x23
-#define ADE7754_CAPOS 0x24
-#define ADE7754_CFNUM 0x25
-#define ADE7754_CFDEN 0x26
-#define ADE7754_WDIV 0x27
-#define ADE7754_VADIV 0x28
-#define ADE7754_AIRMS 0x29
-#define ADE7754_BIRMS 0x2A
-#define ADE7754_CIRMS 0x2B
-#define ADE7754_AVRMS 0x2C
-#define ADE7754_BVRMS 0x2D
-#define ADE7754_CVRMS 0x2E
-#define ADE7754_AIRMSOS 0x2F
-#define ADE7754_BIRMSOS 0x30
-#define ADE7754_CIRMSOS 0x31
-#define ADE7754_AVRMSOS 0x32
-#define ADE7754_BVRMSOS 0x33
-#define ADE7754_CVRMSOS 0x34
-#define ADE7754_AAPGAIN 0x35
-#define ADE7754_BAPGAIN 0x36
-#define ADE7754_CAPGAIN 0x37
-#define ADE7754_AVGAIN 0x38
-#define ADE7754_BVGAIN 0x39
-#define ADE7754_CVGAIN 0x3A
-#define ADE7754_CHKSUM 0x3E
-#define ADE7754_VERSION 0x3F
-
-#define ADE7754_READ_REG(a) a
-#define ADE7754_WRITE_REG(a) ((a) | 0x80)
-
-#define ADE7754_MAX_TX 4
-#define ADE7754_MAX_RX 4
-#define ADE7754_STARTUP_DELAY 1000
-
-#define ADE7754_SPI_SLOW (u32)(300 * 1000)
-#define ADE7754_SPI_BURST (u32)(1000 * 1000)
-#define ADE7754_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct ade7754_state - device instance specific data
- * @us: actual spi_device
- * @buf_lock: mutex to protect tx, rx and write frequency
- * @tx: transmit buffer
- * @rx: receive buffer
- **/
-struct ade7754_state {
- struct spi_device *us;
- struct mutex buf_lock;
- u8 tx[ADE7754_MAX_TX] ____cacheline_aligned;
- u8 rx[ADE7754_MAX_RX];
-};
-
-/* Unlocked version of ade7754_spi_write_reg_8 function */
-static int __ade7754_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
-
- st->tx[0] = ADE7754_WRITE_REG(reg_address);
- st->tx[1] = val;
- return spi_write(st->us, st->tx, 2);
-}
-
-static int ade7754_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- ret = __ade7754_spi_write_reg_8(dev, reg_address, val);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7754_spi_write_reg_16(struct device *dev,
- u8 reg_address, u16 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7754_WRITE_REG(reg_address);
- st->tx[1] = (val >> 8) & 0xFF;
- st->tx[2] = val & 0xFF;
- ret = spi_write(st->us, st->tx, 3);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7754_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = spi_w8r8(st->us, ADE7754_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
- return ret;
- }
- *val = ret;
-
- return 0;
-}
-
-static int ade7754_spi_read_reg_16(struct device *dev,
- u8 reg_address, u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = spi_w8r16be(st->us, ADE7754_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
- return ret;
- }
-
- *val = ret;
-
- return 0;
-}
-
-static int ade7754_spi_read_reg_24(struct device *dev,
- u8 reg_address, u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 4,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7754_READ_REG(reg_address);
- st->tx[1] = 0;
- st->tx[2] = 0;
- st->tx[3] = 0;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static ssize_t ade7754_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u8 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7754_spi_read_reg_8(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7754_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7754_spi_read_reg_16(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7754_read_24bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u32 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7754_spi_read_reg_24(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val & 0xFFFFFF);
-}
-
-static ssize_t ade7754_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u8 val;
-
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7754_spi_write_reg_8(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t ade7754_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7754_spi_write_reg_16(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int ade7754_reset(struct device *dev)
-{
- int ret;
- u8 val;
-
- ret = ade7754_spi_read_reg_8(dev, ADE7754_OPMODE, &val);
- if (ret < 0)
- return ret;
-
- val |= BIT(6); /* Software Chip Reset */
- return ade7754_spi_write_reg_8(dev, ADE7754_OPMODE, val);
-}
-
-static IIO_DEV_ATTR_AENERGY(ade7754_read_24bit, ADE7754_AENERGY);
-static IIO_DEV_ATTR_LAENERGY(ade7754_read_24bit, ADE7754_LAENERGY);
-static IIO_DEV_ATTR_VAENERGY(ade7754_read_24bit, ADE7754_VAENERGY);
-static IIO_DEV_ATTR_LVAENERGY(ade7754_read_24bit, ADE7754_LVAENERGY);
-static IIO_DEV_ATTR_VPEAK(0644,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_VPEAK);
-static IIO_DEV_ATTR_IPEAK(0644,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_VPEAK);
-static IIO_DEV_ATTR_APHCAL(0644,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_APHCAL);
-static IIO_DEV_ATTR_BPHCAL(0644,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_BPHCAL);
-static IIO_DEV_ATTR_CPHCAL(0644,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_CPHCAL);
-static IIO_DEV_ATTR_AAPOS(0644,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_AAPOS);
-static IIO_DEV_ATTR_BAPOS(0644,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_BAPOS);
-static IIO_DEV_ATTR_CAPOS(0644,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CAPOS);
-static IIO_DEV_ATTR_WDIV(0644,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_WDIV);
-static IIO_DEV_ATTR_VADIV(0644,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_VADIV);
-static IIO_DEV_ATTR_CFNUM(0644,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CFNUM);
-static IIO_DEV_ATTR_CFDEN(0644,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CFDEN);
-static IIO_DEV_ATTR_ACTIVE_POWER_A_GAIN(0644,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_AAPGAIN);
-static IIO_DEV_ATTR_ACTIVE_POWER_B_GAIN(0644,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_BAPGAIN);
-static IIO_DEV_ATTR_ACTIVE_POWER_C_GAIN(0644,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CAPGAIN);
-static IIO_DEV_ATTR_AIRMS(0444,
- ade7754_read_24bit,
- NULL,
- ADE7754_AIRMS);
-static IIO_DEV_ATTR_BIRMS(0444,
- ade7754_read_24bit,
- NULL,
- ADE7754_BIRMS);
-static IIO_DEV_ATTR_CIRMS(0444,
- ade7754_read_24bit,
- NULL,
- ADE7754_CIRMS);
-static IIO_DEV_ATTR_AVRMS(0444,
- ade7754_read_24bit,
- NULL,
- ADE7754_AVRMS);
-static IIO_DEV_ATTR_BVRMS(0444,
- ade7754_read_24bit,
- NULL,
- ADE7754_BVRMS);
-static IIO_DEV_ATTR_CVRMS(0444,
- ade7754_read_24bit,
- NULL,
- ADE7754_CVRMS);
-static IIO_DEV_ATTR_AIRMSOS(0444,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_AIRMSOS);
-static IIO_DEV_ATTR_BIRMSOS(0444,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_BIRMSOS);
-static IIO_DEV_ATTR_CIRMSOS(0444,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CIRMSOS);
-static IIO_DEV_ATTR_AVRMSOS(0444,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_AVRMSOS);
-static IIO_DEV_ATTR_BVRMSOS(0444,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_BVRMSOS);
-static IIO_DEV_ATTR_CVRMSOS(0444,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CVRMSOS);
-
-static int ade7754_set_irq(struct device *dev, bool enable)
-{
- int ret;
- u16 irqen;
-
- ret = ade7754_spi_read_reg_16(dev, ADE7754_IRQEN, &irqen);
- if (ret)
- return ret;
-
- if (enable)
- irqen |= BIT(14); /* Enables an interrupt when a data is
- * present in the waveform register
- */
- else
- irqen &= ~BIT(14);
-
- return ade7754_spi_write_reg_16(dev, ADE7754_IRQEN, irqen);
-}
-
-/* Power down the device */
-static int ade7754_stop_device(struct device *dev)
-{
- int ret;
- u8 val;
-
- ret = ade7754_spi_read_reg_8(dev, ADE7754_OPMODE, &val);
- if (ret < 0) {
- dev_err(dev, "unable to power down the device, error: %d",
- ret);
- return ret;
- }
-
- val |= 7 << 3; /* ADE7754 powered down */
- return ade7754_spi_write_reg_8(dev, ADE7754_OPMODE, val);
-}
-
-static int ade7754_initial_setup(struct iio_dev *indio_dev)
-{
- int ret;
- struct ade7754_state *st = iio_priv(indio_dev);
- struct device *dev = &indio_dev->dev;
-
- /* use low spi speed for init */
- st->us->mode = SPI_MODE_3;
- spi_setup(st->us);
-
- /* Disable IRQ */
- ret = ade7754_set_irq(dev, false);
- if (ret) {
- dev_err(dev, "disable irq failed");
- goto err_ret;
- }
-
- ade7754_reset(dev);
- usleep_range(ADE7754_STARTUP_DELAY, ADE7754_STARTUP_DELAY + 100);
-
-err_ret:
- return ret;
-}
-
-static ssize_t ade7754_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u8 t;
- int sps;
-
- ret = ade7754_spi_read_reg_8(dev, ADE7754_WAVMODE, &t);
- if (ret)
- return ret;
-
- t = (t >> 3) & 0x3;
- sps = 26000 / (1 + t);
-
- return sprintf(buf, "%d\n", sps);
-}
-
-static ssize_t ade7754_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
- u16 val;
- int ret;
- u8 reg, t;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- return ret;
- if (!val)
- return -EINVAL;
-
- mutex_lock(&st->buf_lock);
-
- t = 26000 / val;
- if (t > 0)
- t--;
-
- if (t > 1)
- st->us->max_speed_hz = ADE7754_SPI_SLOW;
- else
- st->us->max_speed_hz = ADE7754_SPI_FAST;
-
- ret = ade7754_spi_read_reg_8(dev, ADE7754_WAVMODE, &reg);
- if (ret)
- goto out;
-
- reg &= ~(3 << 3);
- reg |= t << 3;
-
- ret = __ade7754_spi_write_reg_8(dev, ADE7754_WAVMODE, reg);
-
-out:
- mutex_unlock(&st->buf_lock);
-
- return ret ? ret : len;
-}
-static IIO_DEV_ATTR_TEMP_RAW(ade7754_read_8bit);
-static IIO_CONST_ATTR(in_temp_offset, "129 C");
-static IIO_CONST_ATTR(in_temp_scale, "4 C");
-
-static IIO_DEV_ATTR_SAMP_FREQ(0644,
- ade7754_read_frequency,
- ade7754_write_frequency);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26000 13000 65000 33000");
-
-static struct attribute *ade7754_attributes[] = {
- &iio_dev_attr_in_temp_raw.dev_attr.attr,
- &iio_const_attr_in_temp_offset.dev_attr.attr,
- &iio_const_attr_in_temp_scale.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_aenergy.dev_attr.attr,
- &iio_dev_attr_laenergy.dev_attr.attr,
- &iio_dev_attr_vaenergy.dev_attr.attr,
- &iio_dev_attr_lvaenergy.dev_attr.attr,
- &iio_dev_attr_vpeak.dev_attr.attr,
- &iio_dev_attr_ipeak.dev_attr.attr,
- &iio_dev_attr_aphcal.dev_attr.attr,
- &iio_dev_attr_bphcal.dev_attr.attr,
- &iio_dev_attr_cphcal.dev_attr.attr,
- &iio_dev_attr_aapos.dev_attr.attr,
- &iio_dev_attr_bapos.dev_attr.attr,
- &iio_dev_attr_capos.dev_attr.attr,
- &iio_dev_attr_wdiv.dev_attr.attr,
- &iio_dev_attr_vadiv.dev_attr.attr,
- &iio_dev_attr_cfnum.dev_attr.attr,
- &iio_dev_attr_cfden.dev_attr.attr,
- &iio_dev_attr_active_power_a_gain.dev_attr.attr,
- &iio_dev_attr_active_power_b_gain.dev_attr.attr,
- &iio_dev_attr_active_power_c_gain.dev_attr.attr,
- &iio_dev_attr_airms.dev_attr.attr,
- &iio_dev_attr_birms.dev_attr.attr,
- &iio_dev_attr_cirms.dev_attr.attr,
- &iio_dev_attr_avrms.dev_attr.attr,
- &iio_dev_attr_bvrms.dev_attr.attr,
- &iio_dev_attr_cvrms.dev_attr.attr,
- &iio_dev_attr_airmsos.dev_attr.attr,
- &iio_dev_attr_birmsos.dev_attr.attr,
- &iio_dev_attr_cirmsos.dev_attr.attr,
- &iio_dev_attr_avrmsos.dev_attr.attr,
- &iio_dev_attr_bvrmsos.dev_attr.attr,
- &iio_dev_attr_cvrmsos.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ade7754_attribute_group = {
- .attrs = ade7754_attributes,
-};
-
-static const struct iio_info ade7754_info = {
- .attrs = &ade7754_attribute_group,
-};
-
-static int ade7754_probe(struct spi_device *spi)
-{
- int ret;
- struct ade7754_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- st = iio_priv(indio_dev);
- st->us = spi;
- mutex_init(&st->buf_lock);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ade7754_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- /* Get the device into a sane initial state */
- ret = ade7754_initial_setup(indio_dev);
- if (ret)
- goto powerdown_on_error;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto powerdown_on_error;
- return ret;
-
-powerdown_on_error:
- ade7754_stop_device(&indio_dev->dev);
- return ret;
-}
-
-static int ade7754_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
- ade7754_stop_device(&indio_dev->dev);
-
- return 0;
-}
-
-static struct spi_driver ade7754_driver = {
- .driver = {
- .name = "ade7754",
- },
- .probe = ade7754_probe,
- .remove = ade7754_remove,
-};
-module_spi_driver(ade7754_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7754");
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
deleted file mode 100644
index 2de81b53e786..000000000000
--- a/drivers/staging/iio/meter/ade7758.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * ADE7758 Poly Phase Multifunction Energy Metering IC driver
- *
- * Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef _ADE7758_H
-#define _ADE7758_H
-
-#define ADE7758_AWATTHR 0x01
-#define ADE7758_BWATTHR 0x02
-#define ADE7758_CWATTHR 0x03
-#define ADE7758_AVARHR 0x04
-#define ADE7758_BVARHR 0x05
-#define ADE7758_CVARHR 0x06
-#define ADE7758_AVAHR 0x07
-#define ADE7758_BVAHR 0x08
-#define ADE7758_CVAHR 0x09
-#define ADE7758_AIRMS 0x0A
-#define ADE7758_BIRMS 0x0B
-#define ADE7758_CIRMS 0x0C
-#define ADE7758_AVRMS 0x0D
-#define ADE7758_BVRMS 0x0E
-#define ADE7758_CVRMS 0x0F
-#define ADE7758_FREQ 0x10
-#define ADE7758_TEMP 0x11
-#define ADE7758_WFORM 0x12
-#define ADE7758_OPMODE 0x13
-#define ADE7758_MMODE 0x14
-#define ADE7758_WAVMODE 0x15
-#define ADE7758_COMPMODE 0x16
-#define ADE7758_LCYCMODE 0x17
-#define ADE7758_MASK 0x18
-#define ADE7758_STATUS 0x19
-#define ADE7758_RSTATUS 0x1A
-#define ADE7758_ZXTOUT 0x1B
-#define ADE7758_LINECYC 0x1C
-#define ADE7758_SAGCYC 0x1D
-#define ADE7758_SAGLVL 0x1E
-#define ADE7758_VPINTLVL 0x1F
-#define ADE7758_IPINTLVL 0x20
-#define ADE7758_VPEAK 0x21
-#define ADE7758_IPEAK 0x22
-#define ADE7758_GAIN 0x23
-#define ADE7758_AVRMSGAIN 0x24
-#define ADE7758_BVRMSGAIN 0x25
-#define ADE7758_CVRMSGAIN 0x26
-#define ADE7758_AIGAIN 0x27
-#define ADE7758_BIGAIN 0x28
-#define ADE7758_CIGAIN 0x29
-#define ADE7758_AWG 0x2A
-#define ADE7758_BWG 0x2B
-#define ADE7758_CWG 0x2C
-#define ADE7758_AVARG 0x2D
-#define ADE7758_BVARG 0x2E
-#define ADE7758_CVARG 0x2F
-#define ADE7758_AVAG 0x30
-#define ADE7758_BVAG 0x31
-#define ADE7758_CVAG 0x32
-#define ADE7758_AVRMSOS 0x33
-#define ADE7758_BVRMSOS 0x34
-#define ADE7758_CVRMSOS 0x35
-#define ADE7758_AIRMSOS 0x36
-#define ADE7758_BIRMSOS 0x37
-#define ADE7758_CIRMSOS 0x38
-#define ADE7758_AWAITOS 0x39
-#define ADE7758_BWAITOS 0x3A
-#define ADE7758_CWAITOS 0x3B
-#define ADE7758_AVAROS 0x3C
-#define ADE7758_BVAROS 0x3D
-#define ADE7758_CVAROS 0x3E
-#define ADE7758_APHCAL 0x3F
-#define ADE7758_BPHCAL 0x40
-#define ADE7758_CPHCAL 0x41
-#define ADE7758_WDIV 0x42
-#define ADE7758_VADIV 0x44
-#define ADE7758_VARDIV 0x43
-#define ADE7758_APCFNUM 0x45
-#define ADE7758_APCFDEN 0x46
-#define ADE7758_VARCFNUM 0x47
-#define ADE7758_VARCFDEN 0x48
-#define ADE7758_CHKSUM 0x7E
-#define ADE7758_VERSION 0x7F
-
-#define ADE7758_READ_REG(a) a
-#define ADE7758_WRITE_REG(a) ((a) | 0x80)
-
-#define ADE7758_MAX_TX 8
-#define ADE7758_MAX_RX 4
-#define ADE7758_STARTUP_DELAY 1000
-
-#define AD7758_NUM_WAVSEL 5
-#define AD7758_NUM_PHSEL 3
-#define AD7758_NUM_WAVESRC (AD7758_NUM_WAVSEL * AD7758_NUM_PHSEL)
-
-#define AD7758_PHASE_A 0
-#define AD7758_PHASE_B 1
-#define AD7758_PHASE_C 2
-#define AD7758_CURRENT 0
-#define AD7758_VOLTAGE 1
-#define AD7758_ACT_PWR 2
-#define AD7758_REACT_PWR 3
-#define AD7758_APP_PWR 4
-#define AD7758_WT(p, w) (((w) << 2) | (p))
-
-/**
- * struct ade7758_state - device instance specific data
- * @us: actual spi_device
- * @trig: data ready trigger registered with iio
- * @tx: transmit buffer
- * @rx: receive buffer
- * @buf_lock: mutex to protect tx, rx, read and write frequency
- **/
-struct ade7758_state {
- struct spi_device *us;
- struct iio_trigger *trig;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
- struct spi_transfer ring_xfer[4];
- struct spi_message ring_msg;
- /*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
- */
- unsigned char rx_buf[8] ____cacheline_aligned;
- unsigned char tx_buf[8];
-
-};
-
-#ifdef CONFIG_IIO_BUFFER
-/* At the moment triggers are only used for ring buffer
- * filling. This may change!
- */
-
-void ade7758_remove_trigger(struct iio_dev *indio_dev);
-int ade7758_probe_trigger(struct iio_dev *indio_dev);
-
-ssize_t ade7758_read_data_from_ring(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-int ade7758_configure_ring(struct iio_dev *indio_dev);
-void ade7758_unconfigure_ring(struct iio_dev *indio_dev);
-
-int ade7758_set_irq(struct device *dev, bool enable);
-
-int ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val);
-int ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val);
-
-#else /* CONFIG_IIO_BUFFER */
-
-static inline void ade7758_remove_trigger(struct iio_dev *indio_dev)
-{
-}
-
-static inline int ade7758_probe_trigger(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static int ade7758_configure_ring(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
-{
-}
-
-static inline int ade7758_initialize_ring(struct iio_ring_buffer *ring)
-{
- return 0;
-}
-
-static inline void ade7758_uninitialize_ring(struct iio_dev *indio_dev)
-{
-}
-
-#endif /* CONFIG_IIO_BUFFER */
-
-#endif
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
deleted file mode 100644
index 4e0dbf5c5705..000000000000
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ /dev/null
@@ -1,955 +0,0 @@
-/*
- * ADE7758 Poly Phase Multifunction Energy Metering IC driver
- *
- * Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include "meter.h"
-#include "ade7758.h"
-
-static int __ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
-
- st->tx[0] = ADE7758_WRITE_REG(reg_address);
- st->tx[1] = val;
-
- return spi_write(st->us, st->tx, 2);
-}
-
-int ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- ret = __ade7758_spi_write_reg_8(dev, reg_address, val);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7758_spi_write_reg_16(struct device *dev, u8 reg_address,
- u16 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 3,
- }
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7758_WRITE_REG(reg_address);
- st->tx[1] = (value >> 8) & 0xFF;
- st->tx[2] = value & 0xFF;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7758_spi_write_reg_24(struct device *dev, u8 reg_address,
- u32 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 4,
- }
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7758_WRITE_REG(reg_address);
- st->tx[1] = (value >> 16) & 0xFF;
- st->tx[2] = (value >> 8) & 0xFF;
- st->tx[3] = value & 0xFF;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int __ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 1,
- .delay_usecs = 4,
- },
- {
- .tx_buf = &st->tx[1],
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 1,
- },
- };
-
- st->tx[0] = ADE7758_READ_REG(reg_address);
- st->tx[1] = 0;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = st->rx[0];
-
-error_ret:
- return ret;
-}
-
-int ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->buf_lock);
- ret = __ade7758_spi_read_reg_8(dev, reg_address, val);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7758_spi_read_reg_16(struct device *dev, u8 reg_address,
- u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 1,
- .delay_usecs = 4,
- },
- {
- .tx_buf = &st->tx[1],
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 2,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7758_READ_REG(reg_address);
- st->tx[1] = 0;
- st->tx[2] = 0;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
-
- *val = (st->rx[0] << 8) | st->rx[1];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7758_spi_read_reg_24(struct device *dev, u8 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 1,
- .delay_usecs = 4,
- },
- {
- .tx_buf = &st->tx[1],
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 3,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7758_READ_REG(reg_address);
- st->tx[1] = 0;
- st->tx[2] = 0;
- st->tx[3] = 0;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static ssize_t ade7758_read_8bit(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int ret;
- u8 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7758_spi_read_reg_8(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7758_read_16bit(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int ret;
- u16 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7758_spi_read_reg_16(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7758_read_24bit(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int ret;
- u32 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7758_spi_read_reg_24(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val & 0xFFFFFF);
-}
-
-static ssize_t ade7758_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u8 val;
-
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7758_spi_write_reg_8(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t ade7758_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7758_spi_write_reg_16(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int ade7758_reset(struct device *dev)
-{
- int ret;
- u8 val;
-
- ret = ade7758_spi_read_reg_8(dev, ADE7758_OPMODE, &val);
- if (ret < 0) {
- dev_err(dev, "Failed to read opmode reg\n");
- return ret;
- }
- val |= BIT(6); /* Software Chip Reset */
- ret = ade7758_spi_write_reg_8(dev, ADE7758_OPMODE, val);
- if (ret < 0)
- dev_err(dev, "Failed to write opmode reg\n");
- return ret;
-}
-
-static IIO_DEV_ATTR_VPEAK(0644,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_VPEAK);
-static IIO_DEV_ATTR_IPEAK(0644,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_VPEAK);
-static IIO_DEV_ATTR_APHCAL(0644,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_APHCAL);
-static IIO_DEV_ATTR_BPHCAL(0644,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_BPHCAL);
-static IIO_DEV_ATTR_CPHCAL(0644,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_CPHCAL);
-static IIO_DEV_ATTR_WDIV(0644,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_WDIV);
-static IIO_DEV_ATTR_VADIV(0644,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_VADIV);
-static IIO_DEV_ATTR_AIRMS(0444,
- ade7758_read_24bit,
- NULL,
- ADE7758_AIRMS);
-static IIO_DEV_ATTR_BIRMS(0444,
- ade7758_read_24bit,
- NULL,
- ADE7758_BIRMS);
-static IIO_DEV_ATTR_CIRMS(0444,
- ade7758_read_24bit,
- NULL,
- ADE7758_CIRMS);
-static IIO_DEV_ATTR_AVRMS(0444,
- ade7758_read_24bit,
- NULL,
- ADE7758_AVRMS);
-static IIO_DEV_ATTR_BVRMS(0444,
- ade7758_read_24bit,
- NULL,
- ADE7758_BVRMS);
-static IIO_DEV_ATTR_CVRMS(0444,
- ade7758_read_24bit,
- NULL,
- ADE7758_CVRMS);
-static IIO_DEV_ATTR_AIRMSOS(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_AIRMSOS);
-static IIO_DEV_ATTR_BIRMSOS(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_BIRMSOS);
-static IIO_DEV_ATTR_CIRMSOS(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_CIRMSOS);
-static IIO_DEV_ATTR_AVRMSOS(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_AVRMSOS);
-static IIO_DEV_ATTR_BVRMSOS(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_BVRMSOS);
-static IIO_DEV_ATTR_CVRMSOS(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_CVRMSOS);
-static IIO_DEV_ATTR_AIGAIN(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_AIGAIN);
-static IIO_DEV_ATTR_BIGAIN(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_BIGAIN);
-static IIO_DEV_ATTR_CIGAIN(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_CIGAIN);
-static IIO_DEV_ATTR_AVRMSGAIN(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_AVRMSGAIN);
-static IIO_DEV_ATTR_BVRMSGAIN(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_BVRMSGAIN);
-static IIO_DEV_ATTR_CVRMSGAIN(0644,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_CVRMSGAIN);
-
-int ade7758_set_irq(struct device *dev, bool enable)
-{
- int ret;
- u32 irqen;
-
- ret = ade7758_spi_read_reg_24(dev, ADE7758_MASK, &irqen);
- if (ret)
- return ret;
-
- if (enable)
- irqen |= BIT(16); /* Enables an interrupt when a data is
- * present in the waveform register
- */
- else
- irqen &= ~BIT(16);
-
- ret = ade7758_spi_write_reg_24(dev, ADE7758_MASK, irqen);
-
- return ret;
-}
-
-/* Power down the device */
-static int ade7758_stop_device(struct device *dev)
-{
- int ret;
- u8 val;
-
- ret = ade7758_spi_read_reg_8(dev, ADE7758_OPMODE, &val);
- if (ret < 0) {
- dev_err(dev, "Failed to read opmode reg\n");
- return ret;
- }
- val |= 7 << 3; /* ADE7758 powered down */
- ret = ade7758_spi_write_reg_8(dev, ADE7758_OPMODE, val);
- if (ret < 0)
- dev_err(dev, "Failed to write opmode reg\n");
- return ret;
-}
-
-static int ade7758_initial_setup(struct iio_dev *indio_dev)
-{
- struct ade7758_state *st = iio_priv(indio_dev);
- struct device *dev = &indio_dev->dev;
- int ret;
-
- /* use low spi speed for init */
- st->us->mode = SPI_MODE_1;
- spi_setup(st->us);
-
- /* Disable IRQ */
- ret = ade7758_set_irq(dev, false);
- if (ret) {
- dev_err(dev, "disable irq failed");
- goto err_ret;
- }
-
- ade7758_reset(dev);
- usleep_range(ADE7758_STARTUP_DELAY, ADE7758_STARTUP_DELAY + 100);
-
-err_ret:
- return ret;
-}
-
-static int ade7758_read_samp_freq(struct device *dev, int *val)
-{
- int ret;
- u8 t;
-
- ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t);
- if (ret)
- return ret;
-
- t = (t >> 5) & 0x3;
- *val = 26040 / (1 << t);
-
- return 0;
-}
-
-static int ade7758_write_samp_freq(struct device *dev, int val)
-{
- int ret;
- u8 reg, t;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
-
- switch (val) {
- case 26040:
- t = 0;
- break;
- case 13020:
- t = 1;
- break;
- case 6510:
- t = 2;
- break;
- case 3255:
- t = 3;
- break;
- default:
- return -EINVAL;
- }
-
- mutex_lock(&st->buf_lock);
-
- ret = __ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &reg);
- if (ret)
- goto out;
-
- reg &= ~(5 << 3);
- reg |= t << 5;
-
- ret = __ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
-
-out:
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7758_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- int ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_SAMP_FREQ:
-
- ret = ade7758_read_samp_freq(&indio_dev->dev, val);
-
- return ret;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int ade7758_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
-{
- int ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_SAMP_FREQ:
- if (val2)
- return -EINVAL;
-
- ret = ade7758_write_samp_freq(&indio_dev->dev, val);
-
- return ret;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit);
-static IIO_CONST_ATTR(in_temp_offset, "129 C");
-static IIO_CONST_ATTR(in_temp_scale, "4 C");
-
-static IIO_DEV_ATTR_AWATTHR(ade7758_read_16bit,
- ADE7758_AWATTHR);
-static IIO_DEV_ATTR_BWATTHR(ade7758_read_16bit,
- ADE7758_BWATTHR);
-static IIO_DEV_ATTR_CWATTHR(ade7758_read_16bit,
- ADE7758_CWATTHR);
-static IIO_DEV_ATTR_AVARHR(ade7758_read_16bit,
- ADE7758_AVARHR);
-static IIO_DEV_ATTR_BVARHR(ade7758_read_16bit,
- ADE7758_BVARHR);
-static IIO_DEV_ATTR_CVARHR(ade7758_read_16bit,
- ADE7758_CVARHR);
-static IIO_DEV_ATTR_AVAHR(ade7758_read_16bit,
- ADE7758_AVAHR);
-static IIO_DEV_ATTR_BVAHR(ade7758_read_16bit,
- ADE7758_BVAHR);
-static IIO_DEV_ATTR_CVAHR(ade7758_read_16bit,
- ADE7758_CVAHR);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255");
-
-static struct attribute *ade7758_attributes[] = {
- &iio_dev_attr_in_temp_raw.dev_attr.attr,
- &iio_const_attr_in_temp_offset.dev_attr.attr,
- &iio_const_attr_in_temp_scale.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_awatthr.dev_attr.attr,
- &iio_dev_attr_bwatthr.dev_attr.attr,
- &iio_dev_attr_cwatthr.dev_attr.attr,
- &iio_dev_attr_avarhr.dev_attr.attr,
- &iio_dev_attr_bvarhr.dev_attr.attr,
- &iio_dev_attr_cvarhr.dev_attr.attr,
- &iio_dev_attr_avahr.dev_attr.attr,
- &iio_dev_attr_bvahr.dev_attr.attr,
- &iio_dev_attr_cvahr.dev_attr.attr,
- &iio_dev_attr_vpeak.dev_attr.attr,
- &iio_dev_attr_ipeak.dev_attr.attr,
- &iio_dev_attr_aphcal.dev_attr.attr,
- &iio_dev_attr_bphcal.dev_attr.attr,
- &iio_dev_attr_cphcal.dev_attr.attr,
- &iio_dev_attr_wdiv.dev_attr.attr,
- &iio_dev_attr_vadiv.dev_attr.attr,
- &iio_dev_attr_airms.dev_attr.attr,
- &iio_dev_attr_birms.dev_attr.attr,
- &iio_dev_attr_cirms.dev_attr.attr,
- &iio_dev_attr_avrms.dev_attr.attr,
- &iio_dev_attr_bvrms.dev_attr.attr,
- &iio_dev_attr_cvrms.dev_attr.attr,
- &iio_dev_attr_aigain.dev_attr.attr,
- &iio_dev_attr_bigain.dev_attr.attr,
- &iio_dev_attr_cigain.dev_attr.attr,
- &iio_dev_attr_avrmsgain.dev_attr.attr,
- &iio_dev_attr_bvrmsgain.dev_attr.attr,
- &iio_dev_attr_cvrmsgain.dev_attr.attr,
- &iio_dev_attr_airmsos.dev_attr.attr,
- &iio_dev_attr_birmsos.dev_attr.attr,
- &iio_dev_attr_cirmsos.dev_attr.attr,
- &iio_dev_attr_avrmsos.dev_attr.attr,
- &iio_dev_attr_bvrmsos.dev_attr.attr,
- &iio_dev_attr_cvrmsos.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ade7758_attribute_group = {
- .attrs = ade7758_attributes,
-};
-
-static const struct iio_chan_spec ade7758_channels[] = {
- {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
- .scan_index = 0,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_CURRENT,
- .indexed = 1,
- .channel = 0,
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
- .scan_index = 1,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 0,
- .extend_name = "apparent",
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
- .scan_index = 2,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 0,
- .extend_name = "active",
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
- .scan_index = 3,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 0,
- .extend_name = "reactive",
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
- .scan_index = 4,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
- .scan_index = 5,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_CURRENT,
- .indexed = 1,
- .channel = 1,
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
- .scan_index = 6,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 1,
- .extend_name = "apparent",
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
- .scan_index = 7,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 1,
- .extend_name = "active",
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
- .scan_index = 8,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 1,
- .extend_name = "reactive",
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
- .scan_index = 9,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 2,
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
- .scan_index = 10,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_CURRENT,
- .indexed = 1,
- .channel = 2,
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
- .scan_index = 11,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 2,
- .extend_name = "apparent",
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
- .scan_index = 12,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 2,
- .extend_name = "active",
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
- .scan_index = 13,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 2,
- .extend_name = "reactive",
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
- .scan_index = 14,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- },
- IIO_CHAN_SOFT_TIMESTAMP(15),
-};
-
-static const struct iio_info ade7758_info = {
- .attrs = &ade7758_attribute_group,
- .read_raw = &ade7758_read_raw,
- .write_raw = &ade7758_write_raw,
-};
-
-static int ade7758_probe(struct spi_device *spi)
-{
- int ret;
- struct ade7758_state *st;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- /* Allocate the comms buffers */
- st->rx = kcalloc(ADE7758_MAX_RX, sizeof(*st->rx), GFP_KERNEL);
- if (!st->rx)
- return -ENOMEM;
- st->tx = kcalloc(ADE7758_MAX_TX, sizeof(*st->tx), GFP_KERNEL);
- if (!st->tx) {
- ret = -ENOMEM;
- goto error_free_rx;
- }
- st->us = spi;
- mutex_init(&st->buf_lock);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ade7758_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = ade7758_channels;
- indio_dev->num_channels = ARRAY_SIZE(ade7758_channels);
-
- ret = ade7758_configure_ring(indio_dev);
- if (ret)
- goto error_free_tx;
-
- /* Get the device into a sane initial state */
- ret = ade7758_initial_setup(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
-
- if (spi->irq) {
- ret = ade7758_probe_trigger(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
- }
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_remove_trigger;
-
- return 0;
-
-error_remove_trigger:
- if (spi->irq)
- ade7758_remove_trigger(indio_dev);
-error_unreg_ring_funcs:
- ade7758_unconfigure_ring(indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
- return ret;
-}
-
-static int ade7758_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ade7758_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- ade7758_stop_device(&indio_dev->dev);
- ade7758_remove_trigger(indio_dev);
- ade7758_unconfigure_ring(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
-
- return 0;
-}
-
-static const struct spi_device_id ade7758_id[] = {
- {"ade7758", 0},
- {}
-};
-MODULE_DEVICE_TABLE(spi, ade7758_id);
-
-static struct spi_driver ade7758_driver = {
- .driver = {
- .name = "ade7758",
- },
- .probe = ade7758_probe,
- .remove = ade7758_remove,
- .id_table = ade7758_id,
-};
-module_spi_driver(ade7758_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADE7758 Polyphase Multifunction Energy Metering IC Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
deleted file mode 100644
index 6d7444d6e880..000000000000
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * ADE7758 Poly Phase Multifunction Energy Metering IC driver
- *
- * Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <asm/unaligned.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/kfifo_buf.h>
-#include <linux/iio/trigger_consumer.h>
-#include "ade7758.h"
-
-/**
- * ade7758_spi_read_burst() - read data registers
- * @indio_dev: the IIO device
- **/
-static int ade7758_spi_read_burst(struct iio_dev *indio_dev)
-{
- struct ade7758_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = spi_sync(st->us, &st->ring_msg);
- if (ret)
- dev_err(&st->us->dev, "problem when reading WFORM value\n");
-
- return ret;
-}
-
-static int ade7758_write_waveform_type(struct device *dev, unsigned int type)
-{
- int ret;
- u8 reg;
-
- ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &reg);
- if (ret)
- goto out;
-
- reg &= ~0x1F;
- reg |= type & 0x1F;
-
- ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
-out:
- return ret;
-}
-
-/* Whilst this makes a lot of calls to iio_sw_ring functions - it is too device
- * specific to be rolled into the core.
- */
-static irqreturn_t ade7758_trigger_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- struct ade7758_state *st = iio_priv(indio_dev);
- s64 dat64[2];
- u32 *dat32 = (u32 *)dat64;
-
- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
- if (ade7758_spi_read_burst(indio_dev) >= 0)
- *dat32 = get_unaligned_be32(&st->rx_buf[5]) & 0xFFFFFF;
-
- iio_push_to_buffers_with_timestamp(indio_dev, dat64, pf->timestamp);
-
- iio_trigger_notify_done(indio_dev->trig);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ade7758_ring_preenable() setup the parameters of the ring before enabling
- *
- * The complex nature of the setting of the number of bytes per datum is due
- * to this driver currently ensuring that the timestamp is stored at an 8
- * byte boundary.
- **/
-static int ade7758_ring_preenable(struct iio_dev *indio_dev)
-{
- unsigned int channel;
-
- if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
- return -EINVAL;
-
- channel = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
-
- ade7758_write_waveform_type(&indio_dev->dev,
- indio_dev->channels[channel].address);
-
- return 0;
-}
-
-static const struct iio_buffer_setup_ops ade7758_ring_setup_ops = {
- .preenable = &ade7758_ring_preenable,
- .postenable = &iio_triggered_buffer_postenable,
- .predisable = &iio_triggered_buffer_predisable,
- .validate_scan_mask = &iio_validate_scan_mask_onehot,
-};
-
-void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
-{
- iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_kfifo_free(indio_dev->buffer);
-}
-
-int ade7758_configure_ring(struct iio_dev *indio_dev)
-{
- struct ade7758_state *st = iio_priv(indio_dev);
- struct iio_buffer *buffer;
- int ret = 0;
-
- buffer = iio_kfifo_allocate();
- if (!buffer)
- return -ENOMEM;
-
- iio_device_attach_buffer(indio_dev, buffer);
-
- indio_dev->setup_ops = &ade7758_ring_setup_ops;
-
- indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
- &ade7758_trigger_handler,
- 0,
- indio_dev,
- "ade7759_consumer%d",
- indio_dev->id);
- if (!indio_dev->pollfunc) {
- ret = -ENOMEM;
- goto error_iio_kfifo_free;
- }
-
- indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
-
- st->tx_buf[0] = ADE7758_READ_REG(ADE7758_RSTATUS);
- st->tx_buf[1] = 0;
- st->tx_buf[2] = 0;
- st->tx_buf[3] = 0;
- st->tx_buf[4] = ADE7758_READ_REG(ADE7758_WFORM);
- st->tx_buf[5] = 0;
- st->tx_buf[6] = 0;
- st->tx_buf[7] = 0;
-
- /* build spi ring message */
- st->ring_xfer[0].tx_buf = &st->tx_buf[0];
- st->ring_xfer[0].len = 1;
- st->ring_xfer[0].bits_per_word = 8;
- st->ring_xfer[0].delay_usecs = 4;
- st->ring_xfer[1].rx_buf = &st->rx_buf[1];
- st->ring_xfer[1].len = 3;
- st->ring_xfer[1].bits_per_word = 8;
- st->ring_xfer[1].cs_change = 1;
-
- st->ring_xfer[2].tx_buf = &st->tx_buf[4];
- st->ring_xfer[2].len = 1;
- st->ring_xfer[2].bits_per_word = 8;
- st->ring_xfer[2].delay_usecs = 1;
- st->ring_xfer[3].rx_buf = &st->rx_buf[5];
- st->ring_xfer[3].len = 3;
- st->ring_xfer[3].bits_per_word = 8;
-
- spi_message_init(&st->ring_msg);
- spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
- spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg);
- spi_message_add_tail(&st->ring_xfer[2], &st->ring_msg);
- spi_message_add_tail(&st->ring_xfer[3], &st->ring_msg);
-
- return 0;
-
-error_iio_kfifo_free:
- iio_kfifo_free(indio_dev->buffer);
- return ret;
-}
diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c
deleted file mode 100644
index 4f6b338cffeb..000000000000
--- a/drivers/staging/iio/meter/ade7758_trigger.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * ADE7758 Poly Phase Multifunction Energy Metering IC driver
- *
- * Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/export.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/trigger.h>
-#include "ade7758.h"
-
-/**
- * ade7758_data_rdy_trig_poll() the event handler for the data rdy trig
- **/
-static irqreturn_t ade7758_data_rdy_trig_poll(int irq, void *private)
-{
- disable_irq_nosync(irq);
- iio_trigger_poll(private);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ade7758_data_rdy_trigger_set_state() set datardy interrupt state
- **/
-static int ade7758_data_rdy_trigger_set_state(struct iio_trigger *trig,
- bool state)
-{
- struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
-
- dev_dbg(&indio_dev->dev, "(%d)\n", state);
- return ade7758_set_irq(&indio_dev->dev, state);
-}
-
-/**
- * ade7758_trig_try_reen() try renabling irq for data rdy trigger
- * @trig: the datardy trigger
- **/
-static int ade7758_trig_try_reen(struct iio_trigger *trig)
-{
- struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
- struct ade7758_state *st = iio_priv(indio_dev);
-
- enable_irq(st->us->irq);
- /* irq reenabled so success! */
- return 0;
-}
-
-static const struct iio_trigger_ops ade7758_trigger_ops = {
- .set_trigger_state = &ade7758_data_rdy_trigger_set_state,
- .try_reenable = &ade7758_trig_try_reen,
-};
-
-int ade7758_probe_trigger(struct iio_dev *indio_dev)
-{
- struct ade7758_state *st = iio_priv(indio_dev);
- int ret;
-
- st->trig = iio_trigger_alloc("%s-dev%d",
- spi_get_device_id(st->us)->name,
- indio_dev->id);
- if (!st->trig) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- ret = request_irq(st->us->irq,
- ade7758_data_rdy_trig_poll,
- IRQF_TRIGGER_LOW,
- spi_get_device_id(st->us)->name,
- st->trig);
- if (ret)
- goto error_free_trig;
-
- st->trig->dev.parent = &st->us->dev;
- st->trig->ops = &ade7758_trigger_ops;
- iio_trigger_set_drvdata(st->trig, indio_dev);
- ret = iio_trigger_register(st->trig);
-
- /* select default trigger */
- indio_dev->trig = iio_trigger_get(st->trig);
- if (ret)
- goto error_free_irq;
-
- return 0;
-
-error_free_irq:
- free_irq(st->us->irq, st->trig);
-error_free_trig:
- iio_trigger_free(st->trig);
-error_ret:
- return ret;
-}
-
-void ade7758_remove_trigger(struct iio_dev *indio_dev)
-{
- struct ade7758_state *st = iio_priv(indio_dev);
-
- iio_trigger_unregister(st->trig);
- free_irq(st->us->irq, st->trig);
- iio_trigger_free(st->trig);
-}
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
deleted file mode 100644
index c078b770fa53..000000000000
--- a/drivers/staging/iio/meter/ade7759.c
+++ /dev/null
@@ -1,558 +0,0 @@
-/*
- * ADE7759 Active Energy Metering IC with di/dt Sensor Interface Driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include "meter.h"
-
-#define ADE7759_WAVEFORM 0x01
-#define ADE7759_AENERGY 0x02
-#define ADE7759_RSTENERGY 0x03
-#define ADE7759_STATUS 0x04
-#define ADE7759_RSTSTATUS 0x05
-#define ADE7759_MODE 0x06
-#define ADE7759_CFDEN 0x07
-#define ADE7759_CH1OS 0x08
-#define ADE7759_CH2OS 0x09
-#define ADE7759_GAIN 0x0A
-#define ADE7759_APGAIN 0x0B
-#define ADE7759_PHCAL 0x0C
-#define ADE7759_APOS 0x0D
-#define ADE7759_ZXTOUT 0x0E
-#define ADE7759_SAGCYC 0x0F
-#define ADE7759_IRQEN 0x10
-#define ADE7759_SAGLVL 0x11
-#define ADE7759_TEMP 0x12
-#define ADE7759_LINECYC 0x13
-#define ADE7759_LENERGY 0x14
-#define ADE7759_CFNUM 0x15
-#define ADE7759_CHKSUM 0x1E
-#define ADE7759_DIEREV 0x1F
-
-#define ADE7759_READ_REG(a) a
-#define ADE7759_WRITE_REG(a) ((a) | 0x80)
-
-#define ADE7759_MAX_TX 6
-#define ADE7759_MAX_RX 6
-#define ADE7759_STARTUP_DELAY 1000
-
-#define ADE7759_SPI_SLOW (u32)(300 * 1000)
-#define ADE7759_SPI_BURST (u32)(1000 * 1000)
-#define ADE7759_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct ade7759_state - device instance specific data
- * @us: actual spi_device
- * @buf_lock: mutex to protect tx and rx and write frequency
- * @tx: transmit buffer
- * @rx: receive buffer
- **/
-struct ade7759_state {
- struct spi_device *us;
- struct mutex buf_lock;
- u8 tx[ADE7759_MAX_TX] ____cacheline_aligned;
- u8 rx[ADE7759_MAX_RX];
-};
-
-static int ade7759_spi_write_reg_8(struct device *dev,
- u8 reg_address,
- u8 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7759_WRITE_REG(reg_address);
- st->tx[1] = val;
-
- ret = spi_write(st->us, st->tx, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-/*Unlocked version of ade7759_spi_write_reg_16 function */
-static int __ade7759_spi_write_reg_16(struct device *dev,
- u8 reg_address,
- u16 value)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
-
- st->tx[0] = ADE7759_WRITE_REG(reg_address);
- st->tx[1] = (value >> 8) & 0xFF;
- st->tx[2] = value & 0xFF;
- return spi_write(st->us, st->tx, 3);
-}
-
-static int ade7759_spi_write_reg_16(struct device *dev,
- u8 reg_address,
- u16 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- ret = __ade7759_spi_write_reg_16(dev, reg_address, value);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7759_spi_read_reg_8(struct device *dev,
- u8 reg_address,
- u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = spi_w8r8(st->us, ADE7759_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev,
- "problem when reading 8 bit register 0x%02X",
- reg_address);
- return ret;
- }
- *val = ret;
-
- return 0;
-}
-
-static int ade7759_spi_read_reg_16(struct device *dev,
- u8 reg_address,
- u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = spi_w8r16be(st->us, ADE7759_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
- return ret;
- }
-
- *val = ret;
-
- return 0;
-}
-
-static int ade7759_spi_read_reg_40(struct device *dev,
- u8 reg_address,
- u64 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 6,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7759_READ_REG(reg_address);
- memset(&st->tx[1], 0, 5);
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev,
- "problem when reading 40 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = ((u64)st->rx[1] << 32) | ((u64)st->rx[2] << 24) |
- (st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static ssize_t ade7759_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u8 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7759_spi_read_reg_8(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7759_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7759_spi_read_reg_16(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7759_read_40bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u64 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7759_spi_read_reg_40(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%llu\n", val);
-}
-
-static ssize_t ade7759_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u8 val;
-
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7759_spi_write_reg_8(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t ade7759_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7759_spi_write_reg_16(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int ade7759_reset(struct device *dev)
-{
- int ret;
- u16 val;
-
- ret = ade7759_spi_read_reg_16(dev, ADE7759_MODE, &val);
- if (ret < 0)
- return ret;
-
- val |= BIT(6); /* Software Chip Reset */
- return ade7759_spi_write_reg_16(dev,
- ADE7759_MODE,
- val);
-}
-
-static IIO_DEV_ATTR_AENERGY(ade7759_read_40bit, ADE7759_AENERGY);
-static IIO_DEV_ATTR_CFDEN(0644,
- ade7759_read_16bit,
- ade7759_write_16bit,
- ADE7759_CFDEN);
-static IIO_DEV_ATTR_CFNUM(0644,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_CFNUM);
-static IIO_DEV_ATTR_CHKSUM(ade7759_read_8bit, ADE7759_CHKSUM);
-static IIO_DEV_ATTR_PHCAL(0644,
- ade7759_read_16bit,
- ade7759_write_16bit,
- ADE7759_PHCAL);
-static IIO_DEV_ATTR_APOS(0644,
- ade7759_read_16bit,
- ade7759_write_16bit,
- ADE7759_APOS);
-static IIO_DEV_ATTR_SAGCYC(0644,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_SAGCYC);
-static IIO_DEV_ATTR_SAGLVL(0644,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_SAGLVL);
-static IIO_DEV_ATTR_LINECYC(0644,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_LINECYC);
-static IIO_DEV_ATTR_LENERGY(ade7759_read_40bit, ADE7759_LENERGY);
-static IIO_DEV_ATTR_PGA_GAIN(0644,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_GAIN);
-static IIO_DEV_ATTR_ACTIVE_POWER_GAIN(0644,
- ade7759_read_16bit,
- ade7759_write_16bit,
- ADE7759_APGAIN);
-
-static IIO_DEVICE_ATTR(choff_1, 0644,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_CH1OS);
-
-static IIO_DEVICE_ATTR(choff_2, 0644,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_CH2OS);
-
-static int ade7759_set_irq(struct device *dev, bool enable)
-{
- int ret;
- u8 irqen;
-
- ret = ade7759_spi_read_reg_8(dev, ADE7759_IRQEN, &irqen);
- if (ret)
- goto error_ret;
-
- if (enable)
- irqen |= BIT(3); /* Enables an interrupt when a data is
- * present in the waveform register
- */
- else
- irqen &= ~BIT(3);
-
- ret = ade7759_spi_write_reg_8(dev, ADE7759_IRQEN, irqen);
-
-error_ret:
- return ret;
-}
-
-/* Power down the device */
-static int ade7759_stop_device(struct device *dev)
-{
- int ret;
- u16 val;
-
- ret = ade7759_spi_read_reg_16(dev, ADE7759_MODE, &val);
- if (ret < 0) {
- dev_err(dev, "unable to power down the device, error: %d\n",
- ret);
- return ret;
- }
-
- val |= BIT(4); /* AD converters can be turned off */
-
- return ade7759_spi_write_reg_16(dev, ADE7759_MODE, val);
-}
-
-static int ade7759_initial_setup(struct iio_dev *indio_dev)
-{
- int ret;
- struct ade7759_state *st = iio_priv(indio_dev);
- struct device *dev = &indio_dev->dev;
-
- /* use low spi speed for init */
- st->us->mode = SPI_MODE_3;
- spi_setup(st->us);
-
- /* Disable IRQ */
- ret = ade7759_set_irq(dev, false);
- if (ret) {
- dev_err(dev, "disable irq failed");
- goto err_ret;
- }
-
- ade7759_reset(dev);
- usleep_range(ADE7759_STARTUP_DELAY, ADE7759_STARTUP_DELAY + 100);
-
-err_ret:
- return ret;
-}
-
-static ssize_t ade7759_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 t;
- int sps;
-
- ret = ade7759_spi_read_reg_16(dev, ADE7759_MODE, &t);
- if (ret)
- return ret;
-
- t = (t >> 3) & 0x3;
- sps = 27900 / (1 + t);
-
- return sprintf(buf, "%d\n", sps);
-}
-
-static ssize_t ade7759_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
- u16 val;
- int ret;
- u16 reg, t;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- return ret;
- if (!val)
- return -EINVAL;
-
- mutex_lock(&st->buf_lock);
-
- t = 27900 / val;
- if (t > 0)
- t--;
-
- if (t > 1)
- st->us->max_speed_hz = ADE7759_SPI_SLOW;
- else
- st->us->max_speed_hz = ADE7759_SPI_FAST;
-
- ret = ade7759_spi_read_reg_16(dev, ADE7759_MODE, &reg);
- if (ret)
- goto out;
-
- reg &= ~(3 << 13);
- reg |= t << 13;
-
- ret = __ade7759_spi_write_reg_16(dev, ADE7759_MODE, reg);
-
-out:
- mutex_unlock(&st->buf_lock);
-
- return ret ? ret : len;
-}
-static IIO_DEV_ATTR_TEMP_RAW(ade7759_read_8bit);
-static IIO_CONST_ATTR(in_temp_offset, "70 C");
-static IIO_CONST_ATTR(in_temp_scale, "1 C");
-
-static IIO_DEV_ATTR_SAMP_FREQ(0644,
- ade7759_read_frequency,
- ade7759_write_frequency);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
-
-static struct attribute *ade7759_attributes[] = {
- &iio_dev_attr_in_temp_raw.dev_attr.attr,
- &iio_const_attr_in_temp_offset.dev_attr.attr,
- &iio_const_attr_in_temp_scale.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_phcal.dev_attr.attr,
- &iio_dev_attr_cfden.dev_attr.attr,
- &iio_dev_attr_aenergy.dev_attr.attr,
- &iio_dev_attr_cfnum.dev_attr.attr,
- &iio_dev_attr_apos.dev_attr.attr,
- &iio_dev_attr_sagcyc.dev_attr.attr,
- &iio_dev_attr_saglvl.dev_attr.attr,
- &iio_dev_attr_linecyc.dev_attr.attr,
- &iio_dev_attr_lenergy.dev_attr.attr,
- &iio_dev_attr_chksum.dev_attr.attr,
- &iio_dev_attr_pga_gain.dev_attr.attr,
- &iio_dev_attr_active_power_gain.dev_attr.attr,
- &iio_dev_attr_choff_1.dev_attr.attr,
- &iio_dev_attr_choff_2.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ade7759_attribute_group = {
- .attrs = ade7759_attributes,
-};
-
-static const struct iio_info ade7759_info = {
- .attrs = &ade7759_attribute_group,
-};
-
-static int ade7759_probe(struct spi_device *spi)
-{
- int ret;
- struct ade7759_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- st = iio_priv(indio_dev);
- st->us = spi;
- mutex_init(&st->buf_lock);
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ade7759_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- /* Get the device into a sane initial state */
- ret = ade7759_initial_setup(indio_dev);
- if (ret)
- return ret;
-
- return iio_device_register(indio_dev);
-}
-
-static int ade7759_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
- ade7759_stop_device(&indio_dev->dev);
-
- return 0;
-}
-
-static struct spi_driver ade7759_driver = {
- .driver = {
- .name = "ade7759",
- },
- .probe = ade7759_probe,
- .remove = ade7759_remove,
-};
-module_spi_driver(ade7759_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADE7759 Active Energy Metering IC Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7759");
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
index 317e4f0d8176..c3aa6ea9d036 100644
--- a/drivers/staging/iio/meter/ade7854-i2c.c
+++ b/drivers/staging/iio/meter/ade7854-i2c.c
@@ -15,91 +15,60 @@
#include <linux/iio/iio.h>
#include "ade7854.h"
-static int ade7854_i2c_write_reg_8(struct device *dev,
- u16 reg_address,
- u8 val)
+static int ade7854_i2c_write_reg(struct device *dev,
+ u16 reg_address,
+ u32 val,
+ int bits)
{
int ret;
+ int count;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = (reg_address >> 8) & 0xFF;
st->tx[1] = reg_address & 0xFF;
- st->tx[2] = val;
- ret = i2c_master_send(st->i2c, st->tx, 3);
+ switch (bits) {
+ case 8:
+ st->tx[2] = val & 0xFF;
+ count = 3;
+ break;
+ case 16:
+ st->tx[2] = (val >> 8) & 0xFF;
+ st->tx[3] = val & 0xFF;
+ count = 4;
+ break;
+ case 24:
+ st->tx[2] = (val >> 16) & 0xFF;
+ st->tx[3] = (val >> 8) & 0xFF;
+ st->tx[4] = val & 0xFF;
+ count = 5;
+ break;
+ case 32:
+ st->tx[2] = (val >> 24) & 0xFF;
+ st->tx[3] = (val >> 16) & 0xFF;
+ st->tx[4] = (val >> 8) & 0xFF;
+ st->tx[5] = val & 0xFF;
+ count = 6;
+ break;
+ default:
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = i2c_master_send(st->i2c, st->tx, count);
+
+unlock:
mutex_unlock(&st->buf_lock);
- return ret;
+ return ret < 0 ? ret : 0;
}
-static int ade7854_i2c_write_reg_16(struct device *dev,
- u16 reg_address,
- u16 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
- st->tx[2] = (val >> 8) & 0xFF;
- st->tx[3] = val & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 4);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_i2c_write_reg_24(struct device *dev,
- u16 reg_address,
- u32 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
- st->tx[2] = (val >> 16) & 0xFF;
- st->tx[3] = (val >> 8) & 0xFF;
- st->tx[4] = val & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 5);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_i2c_write_reg_32(struct device *dev,
- u16 reg_address,
- u32 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
- st->tx[2] = (val >> 24) & 0xFF;
- st->tx[3] = (val >> 16) & 0xFF;
- st->tx[4] = (val >> 8) & 0xFF;
- st->tx[5] = val & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 6);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_i2c_read_reg_8(struct device *dev,
- u16 reg_address,
- u8 *val)
+static int ade7854_i2c_read_reg(struct device *dev,
+ u16 reg_address,
+ u32 *val,
+ int bits)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
@@ -110,94 +79,33 @@ static int ade7854_i2c_read_reg_8(struct device *dev,
st->tx[1] = reg_address & 0xFF;
ret = i2c_master_send(st->i2c, st->tx, 2);
- if (ret)
- goto out;
-
- ret = i2c_master_recv(st->i2c, st->rx, 1);
- if (ret)
- goto out;
-
- *val = st->rx[0];
-out:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_i2c_read_reg_16(struct device *dev,
- u16 reg_address,
- u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 2);
- if (ret)
- goto out;
-
- ret = i2c_master_recv(st->i2c, st->rx, 2);
- if (ret)
- goto out;
-
- *val = (st->rx[0] << 8) | st->rx[1];
-out:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_i2c_read_reg_24(struct device *dev,
- u16 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 2);
- if (ret)
- goto out;
-
- ret = i2c_master_recv(st->i2c, st->rx, 3);
- if (ret)
- goto out;
-
- *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
-out:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_i2c_read_reg_32(struct device *dev,
- u16 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 2);
- if (ret)
- goto out;
-
- ret = i2c_master_recv(st->i2c, st->rx, 3);
- if (ret)
- goto out;
-
- *val = (st->rx[0] << 24) | (st->rx[1] << 16) |
- (st->rx[2] << 8) | st->rx[3];
-out:
+ if (ret < 0)
+ goto unlock;
+
+ ret = i2c_master_recv(st->i2c, st->rx, bits);
+ if (ret < 0)
+ goto unlock;
+
+ switch (bits) {
+ case 8:
+ *val = st->rx[0];
+ break;
+ case 16:
+ *val = (st->rx[0] << 8) | st->rx[1];
+ break;
+ case 24:
+ *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
+ break;
+ case 32:
+ *val = (st->rx[0] << 24) | (st->rx[1] << 16) |
+ (st->rx[2] << 8) | st->rx[3];
+ break;
+ default:
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+unlock:
mutex_unlock(&st->buf_lock);
return ret;
}
@@ -213,14 +121,8 @@ static int ade7854_i2c_probe(struct i2c_client *client,
return -ENOMEM;
st = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
- st->read_reg_8 = ade7854_i2c_read_reg_8;
- st->read_reg_16 = ade7854_i2c_read_reg_16;
- st->read_reg_24 = ade7854_i2c_read_reg_24;
- st->read_reg_32 = ade7854_i2c_read_reg_32;
- st->write_reg_8 = ade7854_i2c_write_reg_8;
- st->write_reg_16 = ade7854_i2c_write_reg_16;
- st->write_reg_24 = ade7854_i2c_write_reg_24;
- st->write_reg_32 = ade7854_i2c_write_reg_32;
+ st->read_reg = ade7854_i2c_read_reg;
+ st->write_reg = ade7854_i2c_write_reg;
st->i2c = client;
st->irq = client->irq;
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index 4419b8f06197..fc9146757283 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -15,9 +15,10 @@
#include <linux/iio/iio.h>
#include "ade7854.h"
-static int ade7854_spi_write_reg_8(struct device *dev,
- u16 reg_address,
- u8 val)
+static int ade7854_spi_write_reg(struct device *dev,
+ u16 reg_address,
+ u32 val,
+ int bits)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -32,173 +33,44 @@ static int ade7854_spi_write_reg_8(struct device *dev,
st->tx[0] = ADE7854_WRITE_REG;
st->tx[1] = (reg_address >> 8) & 0xFF;
st->tx[2] = reg_address & 0xFF;
- st->tx[3] = val & 0xFF;
-
- ret = spi_sync_transfer(st->spi, &xfer, 1);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_spi_write_reg_16(struct device *dev,
- u16 reg_address,
- u16 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- struct spi_transfer xfer = {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 5,
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7854_WRITE_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
- st->tx[3] = (val >> 8) & 0xFF;
- st->tx[4] = val & 0xFF;
-
- ret = spi_sync_transfer(st->spi, &xfer, 1);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_spi_write_reg_24(struct device *dev,
- u16 reg_address,
- u32 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- struct spi_transfer xfer = {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 6,
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7854_WRITE_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
- st->tx[3] = (val >> 16) & 0xFF;
- st->tx[4] = (val >> 8) & 0xFF;
- st->tx[5] = val & 0xFF;
-
- ret = spi_sync_transfer(st->spi, &xfer, 1);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_spi_write_reg_32(struct device *dev,
- u16 reg_address,
- u32 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- struct spi_transfer xfer = {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 7,
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7854_WRITE_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
- st->tx[3] = (val >> 24) & 0xFF;
- st->tx[4] = (val >> 16) & 0xFF;
- st->tx[5] = (val >> 8) & 0xFF;
- st->tx[6] = val & 0xFF;
-
- ret = spi_sync_transfer(st->spi, &xfer, 1);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_spi_read_reg_8(struct device *dev,
- u16 reg_address,
- u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 3,
- }, {
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 1,
- }
- };
-
- mutex_lock(&st->buf_lock);
-
- st->tx[0] = ADE7854_READ_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
-
- ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->spi->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
- goto error_ret;
+ switch (bits) {
+ case 8:
+ st->tx[3] = val & 0xFF;
+ break;
+ case 16:
+ xfer.len = 5;
+ st->tx[3] = (val >> 8) & 0xFF;
+ st->tx[4] = val & 0xFF;
+ break;
+ case 24:
+ xfer.len = 6;
+ st->tx[3] = (val >> 16) & 0xFF;
+ st->tx[4] = (val >> 8) & 0xFF;
+ st->tx[5] = val & 0xFF;
+ break;
+ case 32:
+ xfer.len = 7;
+ st->tx[3] = (val >> 24) & 0xFF;
+ st->tx[4] = (val >> 16) & 0xFF;
+ st->tx[5] = (val >> 8) & 0xFF;
+ st->tx[6] = val & 0xFF;
+ break;
+ default:
+ ret = -EINVAL;
+ goto unlock;
}
- *val = st->rx[0];
-error_ret:
+ ret = spi_sync_transfer(st->spi, &xfer, 1);
+unlock:
mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_spi_read_reg_16(struct device *dev,
- u16 reg_address,
- u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 3,
- }, {
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 2,
- }
- };
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7854_READ_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
-
- ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->spi->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = be16_to_cpup((const __be16 *)st->rx);
-
-error_ret:
- mutex_unlock(&st->buf_lock);
return ret;
}
-static int ade7854_spi_read_reg_24(struct device *dev,
- u16 reg_address,
- u32 *val)
+static int ade7854_spi_read_reg(struct device *dev,
+ u16 reg_address,
+ u32 *val,
+ int bits)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
@@ -211,7 +83,7 @@ static int ade7854_spi_read_reg_24(struct device *dev,
}, {
.rx_buf = st->rx,
.bits_per_word = 8,
- .len = 3,
+ .len = bits,
}
};
@@ -222,52 +94,28 @@ static int ade7854_spi_read_reg_24(struct device *dev,
st->tx[2] = reg_address & 0xFF;
ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->spi->dev, "problem when reading 24 bit register 0x%02X",
+ if (ret < 0) {
+ dev_err(&st->spi->dev, "problem when reading register 0x%02X",
reg_address);
- goto error_ret;
+ goto unlock;
}
- *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_spi_read_reg_32(struct device *dev,
- u16 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 3,
- }, {
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 4,
- }
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7854_READ_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
-
- ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->spi->dev, "problem when reading 32 bit register 0x%02X",
- reg_address);
- goto error_ret;
+ switch (bits) {
+ case 8:
+ *val = st->rx[0];
+ break;
+ case 16:
+ *val = be16_to_cpup((const __be16 *)st->rx);
+ break;
+ case 24:
+ *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
+ break;
+ case 32:
+ *val = be32_to_cpup((const __be32 *)st->rx);
+ break;
}
- *val = be32_to_cpup((const __be32 *)st->rx);
-error_ret:
+unlock:
mutex_unlock(&st->buf_lock);
return ret;
}
@@ -282,14 +130,8 @@ static int ade7854_spi_probe(struct spi_device *spi)
return -ENOMEM;
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
- st->read_reg_8 = ade7854_spi_read_reg_8;
- st->read_reg_16 = ade7854_spi_read_reg_16;
- st->read_reg_24 = ade7854_spi_read_reg_24;
- st->read_reg_32 = ade7854_spi_read_reg_32;
- st->write_reg_8 = ade7854_spi_write_reg_8;
- st->write_reg_16 = ade7854_spi_write_reg_16;
- st->write_reg_24 = ade7854_spi_write_reg_24;
- st->write_reg_32 = ade7854_spi_write_reg_32;
+ st->read_reg = ade7854_spi_read_reg;
+ st->write_reg = ade7854_spi_write_reg;
st->irq = spi->irq;
st->spi = spi;
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 90d07cdca4b8..029c3bf42d4d 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -27,13 +27,13 @@ static ssize_t ade7854_read_8bit(struct device *dev,
char *buf)
{
int ret;
- u8 val = 0;
+ u32 val = 0;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- ret = st->read_reg_8(dev, this_attr->address, &val);
- if (ret)
+ ret = st->read_reg(dev, this_attr->address, &val, 8);
+ if (ret < 0)
return ret;
return sprintf(buf, "%u\n", val);
@@ -44,13 +44,13 @@ static ssize_t ade7854_read_16bit(struct device *dev,
char *buf)
{
int ret;
- u16 val = 0;
+ u32 val = 0;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- ret = st->read_reg_16(dev, this_attr->address, &val);
- if (ret)
+ ret = st->read_reg(dev, this_attr->address, &val, 16);
+ if (ret < 0)
return ret;
return sprintf(buf, "%u\n", val);
@@ -66,8 +66,8 @@ static ssize_t ade7854_read_24bit(struct device *dev,
struct ade7854_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- ret = st->read_reg_24(dev, this_attr->address, &val);
- if (ret)
+ ret = st->read_reg(dev, this_attr->address, &val, 24);
+ if (ret < 0)
return ret;
return sprintf(buf, "%u\n", val);
@@ -83,8 +83,8 @@ static ssize_t ade7854_read_32bit(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
- ret = st->read_reg_32(dev, this_attr->address, &val);
- if (ret)
+ ret = st->read_reg(dev, this_attr->address, &val, 32);
+ if (ret < 0)
return ret;
return sprintf(buf, "%u\n", val);
@@ -105,7 +105,7 @@ static ssize_t ade7854_write_8bit(struct device *dev,
ret = kstrtou8(buf, 10, &val);
if (ret)
goto error_ret;
- ret = st->write_reg_8(dev, this_attr->address, val);
+ ret = st->write_reg(dev, this_attr->address, val, 8);
error_ret:
return ret ? ret : len;
@@ -126,7 +126,7 @@ static ssize_t ade7854_write_16bit(struct device *dev,
ret = kstrtou16(buf, 10, &val);
if (ret)
goto error_ret;
- ret = st->write_reg_16(dev, this_attr->address, val);
+ ret = st->write_reg(dev, this_attr->address, val, 16);
error_ret:
return ret ? ret : len;
@@ -147,7 +147,7 @@ static ssize_t ade7854_write_24bit(struct device *dev,
ret = kstrtou32(buf, 10, &val);
if (ret)
goto error_ret;
- ret = st->write_reg_24(dev, this_attr->address, val);
+ ret = st->write_reg(dev, this_attr->address, val, 24);
error_ret:
return ret ? ret : len;
@@ -168,7 +168,7 @@ static ssize_t ade7854_write_32bit(struct device *dev,
ret = kstrtou32(buf, 10, &val);
if (ret)
goto error_ret;
- ret = st->write_reg_32(dev, this_attr->address, val);
+ ret = st->write_reg(dev, this_attr->address, val, 32);
error_ret:
return ret ? ret : len;
@@ -178,12 +178,12 @@ static int ade7854_reset(struct device *dev)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7854_state *st = iio_priv(indio_dev);
- u16 val;
+ u32 val;
- st->read_reg_16(dev, ADE7854_CONFIG, &val);
+ st->read_reg(dev, ADE7854_CONFIG, &val, 16);
val |= BIT(7); /* Software Chip Reset */
- return st->write_reg_16(dev, ADE7854_CONFIG, val);
+ return st->write_reg(dev, ADE7854_CONFIG, val, 16);
}
static IIO_DEV_ATTR_AIGAIN(0644,
@@ -415,8 +415,8 @@ static int ade7854_set_irq(struct device *dev, bool enable)
int ret;
u32 irqen;
- ret = st->read_reg_32(dev, ADE7854_MASK0, &irqen);
- if (ret)
+ ret = st->read_reg(dev, ADE7854_MASK0, &irqen, 32);
+ if (ret < 0)
return ret;
if (enable)
@@ -426,7 +426,7 @@ static int ade7854_set_irq(struct device *dev, bool enable)
else
irqen &= ~BIT(17);
- return st->write_reg_32(dev, ADE7854_MASK0, irqen);
+ return st->write_reg(dev, ADE7854_MASK0, irqen, 32);
}
static int ade7854_initial_setup(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/meter/ade7854.h b/drivers/staging/iio/meter/ade7854.h
index a82d38224cbd..a51e6e3183d3 100644
--- a/drivers/staging/iio/meter/ade7854.h
+++ b/drivers/staging/iio/meter/ade7854.h
@@ -145,7 +145,9 @@
/**
* struct ade7854_state - device instance specific data
- * @spi: actual spi_device
+ * @spi: actual spi_device
+ * @read_reg Wrapper function for I2C and SPI read
+ * @write_reg Wrapper function for I2C and SPI write
* @indio_dev: industrial I/O device structure
* @buf_lock: mutex to protect tx and rx
* @tx: transmit buffer
@@ -154,14 +156,10 @@
struct ade7854_state {
struct spi_device *spi;
struct i2c_client *i2c;
- int (*read_reg_8)(struct device *dev, u16 reg_address, u8 *val);
- int (*read_reg_16)(struct device *dev, u16 reg_address, u16 *val);
- int (*read_reg_24)(struct device *dev, u16 reg_address, u32 *val);
- int (*read_reg_32)(struct device *dev, u16 reg_address, u32 *val);
- int (*write_reg_8)(struct device *dev, u16 reg_address, u8 val);
- int (*write_reg_16)(struct device *dev, u16 reg_address, u16 val);
- int (*write_reg_24)(struct device *dev, u16 reg_address, u32 val);
- int (*write_reg_32)(struct device *dev, u16 reg_address, u32 val);
+ int (*read_reg)(struct device *dev, u16 reg_address, u32 *val,
+ int bits);
+ int (*write_reg)(struct device *dev, u16 reg_address, u32 val,
+ int bits);
int irq;
struct mutex buf_lock;
u8 tx[ADE7854_MAX_TX] ____cacheline_aligned;
diff --git a/drivers/staging/iio/resolver/Kconfig b/drivers/staging/iio/resolver/Kconfig
index 1c7e2860d6b7..6a469ee6101f 100644
--- a/drivers/staging/iio/resolver/Kconfig
+++ b/drivers/staging/iio/resolver/Kconfig
@@ -13,18 +13,6 @@ config AD2S90
To compile this driver as a module, choose M here: the
module will be called ad2s90.
-config AD2S1200
- tristate "Analog Devices ad2s1200/ad2s1205 driver"
- depends on SPI
- depends on GPIOLIB || COMPILE_TEST
- help
- Say yes here to build support for Analog Devices spi resolver
- to digital converters, ad2s1200 and ad2s1205, provides direct access
- via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad2s1200.
-
config AD2S1210
tristate "Analog Devices ad2s1210 driver"
depends on SPI
diff --git a/drivers/staging/iio/resolver/Makefile b/drivers/staging/iio/resolver/Makefile
index 14375e444ebf..8d901dc7500b 100644
--- a/drivers/staging/iio/resolver/Makefile
+++ b/drivers/staging/iio/resolver/Makefile
@@ -3,5 +3,4 @@
#
obj-$(CONFIG_AD2S90) += ad2s90.o
-obj-$(CONFIG_AD2S1200) += ad2s1200.o
obj-$(CONFIG_AD2S1210) += ad2s1210.o
diff --git a/drivers/staging/ipx/Kconfig b/drivers/staging/ipx/Kconfig
deleted file mode 100644
index cdff083d0ee6..000000000000
--- a/drivers/staging/ipx/Kconfig
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-# IPX configuration
-#
-config IPX
- tristate "The IPX protocol"
- depends on NET
- select LLC
- ---help---
- This is support for the Novell networking protocol, IPX, commonly
- used for local networks of Windows machines. You need it if you
- want to access Novell NetWare file or print servers using the Linux
- Novell client ncpfs (available from
- <ftp://platan.vc.cvut.cz/pub/linux/ncpfs/>) or from
- within the Linux DOS emulator DOSEMU (read the DOSEMU-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>). In order
- to do the former, you'll also have to say Y to "NCP file system
- support", below.
-
- IPX is similar in scope to IP, while SPX, which runs on top of IPX,
- is similar to TCP.
-
- To turn your Linux box into a fully featured NetWare file server and
- IPX router, say Y here and fetch either lwared from
- <ftp://ibiblio.org/pub/Linux/system/network/daemons/> or
- mars_nwe from <ftp://www.compu-art.de/mars_nwe/>. For more
- information, read the IPX-HOWTO available from
- <http://www.tldp.org/docs.html#howto>.
-
- The IPX driver would enlarge your kernel by about 16 KB. To compile
- this driver as a module, choose M here: the module will be called ipx.
- Unless you want to integrate your Linux box with a local Novell
- network, say N.
-
-config IPX_INTERN
- bool "IPX: Full internal IPX network"
- depends on IPX
- ---help---
- Every IPX network has an address that identifies it. Sometimes it is
- useful to give an IPX "network" address to your Linux box as well
- (for example if your box is acting as a file server for different
- IPX networks: it will then be accessible from everywhere using the
- same address). The way this is done is to create a virtual internal
- "network" inside your box and to assign an IPX address to this
- network. Say Y here if you want to do this; read the IPX-HOWTO at
- <http://www.tldp.org/docs.html#howto> for details.
-
- The full internal IPX network enables you to allocate sockets on
- different virtual nodes of the internal network. This is done by
- evaluating the field sipx_node of the socket address given to the
- bind call. So applications should always initialize the node field
- to 0 when binding a socket on the primary network. In this case the
- socket is assigned the default node that has been given to the
- kernel when the internal network was created. By enabling the full
- internal IPX network the cross-forwarding of packets targeted at
- 'special' sockets to sockets listening on the primary network is
- disabled. This might break existing applications, especially RIP/SAP
- daemons. A RIP/SAP daemon that works well with the full internal net
- can be found on <ftp://ftp.gwdg.de/pub/linux/misc/ncpfs/>.
-
- If you don't know what you are doing, say N.
-
diff --git a/drivers/staging/ipx/Makefile b/drivers/staging/ipx/Makefile
deleted file mode 100644
index 440fafa9fd07..000000000000
--- a/drivers/staging/ipx/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for the Linux IPX layer.
-#
-
-obj-$(CONFIG_IPX) += ipx.o
-
-ipx-y := af_ipx.o ipx_route.o ipx_proc.o pe2.o
-ipx-$(CONFIG_SYSCTL) += sysctl_net_ipx.o
diff --git a/drivers/staging/ipx/TODO b/drivers/staging/ipx/TODO
deleted file mode 100644
index 80db5d968264..000000000000
--- a/drivers/staging/ipx/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-The ipx code will be removed soon from the kernel tree as it is old and
-obsolete and broken.
-
-Don't worry about fixing up anything here, it's not needed.
diff --git a/drivers/staging/ipx/af_ipx.c b/drivers/staging/ipx/af_ipx.c
deleted file mode 100644
index 208b5c161631..000000000000
--- a/drivers/staging/ipx/af_ipx.c
+++ /dev/null
@@ -1,2082 +0,0 @@
-/*
- * Implements an IPX socket layer.
- *
- * This code is derived from work by
- * Ross Biro : Writing the original IP stack
- * Fred Van Kempen : Tidying up the TCP/IP
- *
- * Many thanks go to Keith Baker, Institute For Industrial Information
- * Technology Ltd, Swansea University for allowing me to work on this
- * in my own time even though it was in some ways related to commercial
- * work I am currently employed to do there.
- *
- * All the material in this file is subject to the Gnu license version 2.
- * Neither Alan Cox nor the Swansea University Computer Society admit
- * liability nor provide warranty for any of this software. This material
- * is provided as is and at no charge.
- *
- * Portions Copyright (c) 2000-2003 Conectiva, Inc. <acme@conectiva.com.br>
- * Neither Arnaldo Carvalho de Melo nor Conectiva, Inc. admit liability nor
- * provide warranty for any of this software. This material is provided
- * "AS-IS" and at no charge.
- *
- * Portions Copyright (c) 1995 Caldera, Inc. <greg@caldera.com>
- * Neither Greg Page nor Caldera, Inc. admit liability nor provide
- * warranty for any of this software. This material is provided
- * "AS-IS" and at no charge.
- *
- * See net/ipx/ChangeLog.
- */
-
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/init.h>
-#include <linux/ipx.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/net.h>
-#include <linux/netdevice.h>
-#include <linux/uio.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/termios.h>
-
-#include <net/ipx.h>
-#include <net/p8022.h>
-#include <net/psnap.h>
-#include <net/sock.h>
-#include <net/datalink.h>
-#include <net/tcp_states.h>
-#include <net/net_namespace.h>
-
-#include <linux/uaccess.h>
-
-/* Configuration Variables */
-static unsigned char ipxcfg_max_hops = 16;
-static char ipxcfg_auto_select_primary;
-static char ipxcfg_auto_create_interfaces;
-int sysctl_ipx_pprop_broadcasting = 1;
-
-/* Global Variables */
-static struct datalink_proto *p8022_datalink;
-static struct datalink_proto *pEII_datalink;
-static struct datalink_proto *p8023_datalink;
-static struct datalink_proto *pSNAP_datalink;
-
-static const struct proto_ops ipx_dgram_ops;
-
-LIST_HEAD(ipx_interfaces);
-DEFINE_SPINLOCK(ipx_interfaces_lock);
-
-struct ipx_interface *ipx_primary_net;
-struct ipx_interface *ipx_internal_net;
-
-struct ipx_interface *ipx_interfaces_head(void)
-{
- struct ipx_interface *rc = NULL;
-
- if (!list_empty(&ipx_interfaces))
- rc = list_entry(ipx_interfaces.next,
- struct ipx_interface, node);
- return rc;
-}
-
-static void ipxcfg_set_auto_select(char val)
-{
- ipxcfg_auto_select_primary = val;
- if (val && !ipx_primary_net)
- ipx_primary_net = ipx_interfaces_head();
-}
-
-static int ipxcfg_get_config_data(struct ipx_config_data __user *arg)
-{
- struct ipx_config_data vals;
-
- vals.ipxcfg_auto_create_interfaces = ipxcfg_auto_create_interfaces;
- vals.ipxcfg_auto_select_primary = ipxcfg_auto_select_primary;
-
- return copy_to_user(arg, &vals, sizeof(vals)) ? -EFAULT : 0;
-}
-
-/*
- * Note: Sockets may not be removed _during_ an interrupt or inet_bh
- * handler using this technique. They can be added although we do not
- * use this facility.
- */
-
-static void ipx_remove_socket(struct sock *sk)
-{
- /* Determine interface with which socket is associated */
- struct ipx_interface *intrfc = ipx_sk(sk)->intrfc;
-
- if (!intrfc)
- goto out;
-
- ipxitf_hold(intrfc);
- spin_lock_bh(&intrfc->if_sklist_lock);
- sk_del_node_init(sk);
- spin_unlock_bh(&intrfc->if_sklist_lock);
- ipxitf_put(intrfc);
-out:
- return;
-}
-
-static void ipx_destroy_socket(struct sock *sk)
-{
- ipx_remove_socket(sk);
- skb_queue_purge(&sk->sk_receive_queue);
- sk_refcnt_debug_dec(sk);
-}
-
-/*
- * The following code is used to support IPX Interfaces (IPXITF). An
- * IPX interface is defined by a physical device and a frame type.
- */
-
-/* ipxitf_clear_primary_net has to be called with ipx_interfaces_lock held */
-
-static void ipxitf_clear_primary_net(void)
-{
- ipx_primary_net = NULL;
- if (ipxcfg_auto_select_primary)
- ipx_primary_net = ipx_interfaces_head();
-}
-
-static struct ipx_interface *__ipxitf_find_using_phys(struct net_device *dev,
- __be16 datalink)
-{
- struct ipx_interface *i;
-
- list_for_each_entry(i, &ipx_interfaces, node)
- if (i->if_dev == dev && i->if_dlink_type == datalink)
- goto out;
- i = NULL;
-out:
- return i;
-}
-
-static struct ipx_interface *ipxitf_find_using_phys(struct net_device *dev,
- __be16 datalink)
-{
- struct ipx_interface *i;
-
- spin_lock_bh(&ipx_interfaces_lock);
- i = __ipxitf_find_using_phys(dev, datalink);
- if (i)
- ipxitf_hold(i);
- spin_unlock_bh(&ipx_interfaces_lock);
- return i;
-}
-
-struct ipx_interface *ipxitf_find_using_net(__be32 net)
-{
- struct ipx_interface *i;
-
- spin_lock_bh(&ipx_interfaces_lock);
- if (net) {
- list_for_each_entry(i, &ipx_interfaces, node)
- if (i->if_netnum == net)
- goto hold;
- i = NULL;
- goto unlock;
- }
-
- i = ipx_primary_net;
- if (i)
-hold:
- ipxitf_hold(i);
-unlock:
- spin_unlock_bh(&ipx_interfaces_lock);
- return i;
-}
-
-/* Sockets are bound to a particular IPX interface. */
-static void ipxitf_insert_socket(struct ipx_interface *intrfc, struct sock *sk)
-{
- ipxitf_hold(intrfc);
- spin_lock_bh(&intrfc->if_sklist_lock);
- ipx_sk(sk)->intrfc = intrfc;
- sk_add_node(sk, &intrfc->if_sklist);
- spin_unlock_bh(&intrfc->if_sklist_lock);
- ipxitf_put(intrfc);
-}
-
-/* caller must hold intrfc->if_sklist_lock */
-static struct sock *__ipxitf_find_socket(struct ipx_interface *intrfc,
- __be16 port)
-{
- struct sock *s;
-
- sk_for_each(s, &intrfc->if_sklist)
- if (ipx_sk(s)->port == port)
- goto found;
- s = NULL;
-found:
- return s;
-}
-
-/* caller must hold a reference to intrfc */
-static struct sock *ipxitf_find_socket(struct ipx_interface *intrfc,
- __be16 port)
-{
- struct sock *s;
-
- spin_lock_bh(&intrfc->if_sklist_lock);
- s = __ipxitf_find_socket(intrfc, port);
- if (s)
- sock_hold(s);
- spin_unlock_bh(&intrfc->if_sklist_lock);
-
- return s;
-}
-
-#ifdef CONFIG_IPX_INTERN
-static struct sock *ipxitf_find_internal_socket(struct ipx_interface *intrfc,
- unsigned char *ipx_node,
- __be16 port)
-{
- struct sock *s;
-
- ipxitf_hold(intrfc);
- spin_lock_bh(&intrfc->if_sklist_lock);
-
- sk_for_each(s, &intrfc->if_sklist) {
- struct ipx_sock *ipxs = ipx_sk(s);
-
- if (ipxs->port == port &&
- !memcmp(ipx_node, ipxs->node, IPX_NODE_LEN))
- goto found;
- }
- s = NULL;
-found:
- spin_unlock_bh(&intrfc->if_sklist_lock);
- ipxitf_put(intrfc);
- return s;
-}
-#endif
-
-static void __ipxitf_down(struct ipx_interface *intrfc)
-{
- struct sock *s;
- struct hlist_node *t;
-
- /* Delete all routes associated with this interface */
- ipxrtr_del_routes(intrfc);
-
- spin_lock_bh(&intrfc->if_sklist_lock);
- /* error sockets */
- sk_for_each_safe(s, t, &intrfc->if_sklist) {
- struct ipx_sock *ipxs = ipx_sk(s);
-
- s->sk_err = ENOLINK;
- s->sk_error_report(s);
- ipxs->intrfc = NULL;
- ipxs->port = 0;
- sock_set_flag(s, SOCK_ZAPPED); /* Indicates it is no longer bound */
- sk_del_node_init(s);
- }
- INIT_HLIST_HEAD(&intrfc->if_sklist);
- spin_unlock_bh(&intrfc->if_sklist_lock);
-
- /* remove this interface from list */
- list_del(&intrfc->node);
-
- /* remove this interface from *special* networks */
- if (intrfc == ipx_primary_net)
- ipxitf_clear_primary_net();
- if (intrfc == ipx_internal_net)
- ipx_internal_net = NULL;
-
- if (intrfc->if_dev)
- dev_put(intrfc->if_dev);
- kfree(intrfc);
-}
-
-void ipxitf_down(struct ipx_interface *intrfc)
-{
- spin_lock_bh(&ipx_interfaces_lock);
- __ipxitf_down(intrfc);
- spin_unlock_bh(&ipx_interfaces_lock);
-}
-
-static void __ipxitf_put(struct ipx_interface *intrfc)
-{
- if (refcount_dec_and_test(&intrfc->refcnt))
- __ipxitf_down(intrfc);
-}
-
-static int ipxitf_device_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct ipx_interface *i, *tmp;
-
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
-
- if (event != NETDEV_DOWN && event != NETDEV_UP)
- goto out;
-
- spin_lock_bh(&ipx_interfaces_lock);
- list_for_each_entry_safe(i, tmp, &ipx_interfaces, node)
- if (i->if_dev == dev) {
- if (event == NETDEV_UP)
- ipxitf_hold(i);
- else
- __ipxitf_put(i);
- }
- spin_unlock_bh(&ipx_interfaces_lock);
-out:
- return NOTIFY_DONE;
-}
-
-
-static __exit void ipxitf_cleanup(void)
-{
- struct ipx_interface *i, *tmp;
-
- spin_lock_bh(&ipx_interfaces_lock);
- list_for_each_entry_safe(i, tmp, &ipx_interfaces, node)
- __ipxitf_put(i);
- spin_unlock_bh(&ipx_interfaces_lock);
-}
-
-static void ipxitf_def_skb_handler(struct sock *sock, struct sk_buff *skb)
-{
- if (sock_queue_rcv_skb(sock, skb) < 0)
- kfree_skb(skb);
-}
-
-/*
- * On input skb->sk is NULL. Nobody is charged for the memory.
- */
-
-/* caller must hold a reference to intrfc */
-
-#ifdef CONFIG_IPX_INTERN
-static int ipxitf_demux_socket(struct ipx_interface *intrfc,
- struct sk_buff *skb, int copy)
-{
- struct ipxhdr *ipx = ipx_hdr(skb);
- int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node,
- IPX_NODE_LEN);
- struct sock *s;
- int rc;
-
- spin_lock_bh(&intrfc->if_sklist_lock);
-
- sk_for_each(s, &intrfc->if_sklist) {
- struct ipx_sock *ipxs = ipx_sk(s);
-
- if (ipxs->port == ipx->ipx_dest.sock &&
- (is_broadcast || !memcmp(ipx->ipx_dest.node,
- ipxs->node, IPX_NODE_LEN))) {
- /* We found a socket to which to send */
- struct sk_buff *skb1;
-
- if (copy) {
- skb1 = skb_clone(skb, GFP_ATOMIC);
- rc = -ENOMEM;
- if (!skb1)
- goto out;
- } else {
- skb1 = skb;
- copy = 1; /* skb may only be used once */
- }
- ipxitf_def_skb_handler(s, skb1);
-
- /* On an external interface, one socket can listen */
- if (intrfc != ipx_internal_net)
- break;
- }
- }
-
- /* skb was solely for us, and we did not make a copy, so free it. */
- if (!copy)
- kfree_skb(skb);
-
- rc = 0;
-out:
- spin_unlock_bh(&intrfc->if_sklist_lock);
- return rc;
-}
-#else
-static struct sock *ncp_connection_hack(struct ipx_interface *intrfc,
- struct ipxhdr *ipx)
-{
- /* The packet's target is a NCP connection handler. We want to hand it
- * to the correct socket directly within the kernel, so that the
- * mars_nwe packet distribution process does not have to do it. Here we
- * only care about NCP and BURST packets.
- *
- * You might call this a hack, but believe me, you do not want a
- * complete NCP layer in the kernel, and this is VERY fast as well. */
- struct sock *sk = NULL;
- int connection = 0;
- u8 *ncphdr = (u8 *)(ipx + 1);
-
- if (*ncphdr == 0x22 && *(ncphdr + 1) == 0x22) /* NCP request */
- connection = (((int) *(ncphdr + 5)) << 8) | (int) *(ncphdr + 3);
- else if (*ncphdr == 0x77 && *(ncphdr + 1) == 0x77) /* BURST packet */
- connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8);
-
- if (connection) {
- /* Now we have to look for a special NCP connection handling
- * socket. Only these sockets have ipx_ncp_conn != 0, set by
- * SIOCIPXNCPCONN. */
- spin_lock_bh(&intrfc->if_sklist_lock);
- sk_for_each(sk, &intrfc->if_sklist)
- if (ipx_sk(sk)->ipx_ncp_conn == connection) {
- sock_hold(sk);
- goto found;
- }
- sk = NULL;
- found:
- spin_unlock_bh(&intrfc->if_sklist_lock);
- }
- return sk;
-}
-
-static int ipxitf_demux_socket(struct ipx_interface *intrfc,
- struct sk_buff *skb, int copy)
-{
- struct ipxhdr *ipx = ipx_hdr(skb);
- struct sock *sock1 = NULL, *sock2 = NULL;
- struct sk_buff *skb1 = NULL, *skb2 = NULL;
- int rc;
-
- if (intrfc == ipx_primary_net && ntohs(ipx->ipx_dest.sock) == 0x451)
- sock1 = ncp_connection_hack(intrfc, ipx);
- if (!sock1)
- /* No special socket found, forward the packet the normal way */
- sock1 = ipxitf_find_socket(intrfc, ipx->ipx_dest.sock);
-
- /*
- * We need to check if there is a primary net and if
- * this is addressed to one of the *SPECIAL* sockets because
- * these need to be propagated to the primary net.
- * The *SPECIAL* socket list contains: 0x452(SAP), 0x453(RIP) and
- * 0x456(Diagnostic).
- */
-
- if (ipx_primary_net && intrfc != ipx_primary_net) {
- const int dsock = ntohs(ipx->ipx_dest.sock);
-
- if (dsock == 0x452 || dsock == 0x453 || dsock == 0x456)
- /* The appropriate thing to do here is to dup the
- * packet and route to the primary net interface via
- * ipxitf_send; however, we'll cheat and just demux it
- * here. */
- sock2 = ipxitf_find_socket(ipx_primary_net,
- ipx->ipx_dest.sock);
- }
-
- /*
- * If there is nothing to do return. The kfree will cancel any charging.
- */
- rc = 0;
- if (!sock1 && !sock2) {
- if (!copy)
- kfree_skb(skb);
- goto out;
- }
-
- /*
- * This next segment of code is a little awkward, but it sets it up
- * so that the appropriate number of copies of the SKB are made and
- * that skb1 and skb2 point to it (them) so that it (they) can be
- * demuxed to sock1 and/or sock2. If we are unable to make enough
- * copies, we do as much as is possible.
- */
-
- if (copy)
- skb1 = skb_clone(skb, GFP_ATOMIC);
- else
- skb1 = skb;
-
- rc = -ENOMEM;
- if (!skb1)
- goto out_put;
-
- /* Do we need 2 SKBs? */
- if (sock1 && sock2)
- skb2 = skb_clone(skb1, GFP_ATOMIC);
- else
- skb2 = skb1;
-
- if (sock1)
- ipxitf_def_skb_handler(sock1, skb1);
-
- if (!skb2)
- goto out_put;
-
- if (sock2)
- ipxitf_def_skb_handler(sock2, skb2);
-
- rc = 0;
-out_put:
- if (sock1)
- sock_put(sock1);
- if (sock2)
- sock_put(sock2);
-out:
- return rc;
-}
-#endif /* CONFIG_IPX_INTERN */
-
-static struct sk_buff *ipxitf_adjust_skbuff(struct ipx_interface *intrfc,
- struct sk_buff *skb)
-{
- struct sk_buff *skb2;
- int in_offset = (unsigned char *)ipx_hdr(skb) - skb->head;
- int out_offset = intrfc->if_ipx_offset;
- int len;
-
- /* Hopefully, most cases */
- if (in_offset >= out_offset)
- return skb;
-
- /* Need new SKB */
- len = skb->len + out_offset;
- skb2 = alloc_skb(len, GFP_ATOMIC);
- if (skb2) {
- skb_reserve(skb2, out_offset);
- skb_reset_network_header(skb2);
- skb_reset_transport_header(skb2);
- skb_put(skb2, skb->len);
- memcpy(ipx_hdr(skb2), ipx_hdr(skb), skb->len);
- memcpy(skb2->cb, skb->cb, sizeof(skb->cb));
- }
- kfree_skb(skb);
- return skb2;
-}
-
-/* caller must hold a reference to intrfc and the skb has to be unshared */
-int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node)
-{
- struct ipxhdr *ipx = ipx_hdr(skb);
- struct net_device *dev = intrfc->if_dev;
- struct datalink_proto *dl = intrfc->if_dlink;
- char dest_node[IPX_NODE_LEN];
- int send_to_wire = 1;
- int addr_len;
-
- ipx->ipx_tctrl = IPX_SKB_CB(skb)->ipx_tctrl;
- ipx->ipx_dest.net = IPX_SKB_CB(skb)->ipx_dest_net;
- ipx->ipx_source.net = IPX_SKB_CB(skb)->ipx_source_net;
-
- /* see if we need to include the netnum in the route list */
- if (IPX_SKB_CB(skb)->last_hop.index >= 0) {
- __be32 *last_hop = (__be32 *)(((u8 *) skb->data) +
- sizeof(struct ipxhdr) +
- IPX_SKB_CB(skb)->last_hop.index *
- sizeof(__be32));
- *last_hop = IPX_SKB_CB(skb)->last_hop.netnum;
- IPX_SKB_CB(skb)->last_hop.index = -1;
- }
-
- /*
- * We need to know how many skbuffs it will take to send out this
- * packet to avoid unnecessary copies.
- */
-
- if (!dl || !dev || dev->flags & IFF_LOOPBACK)
- send_to_wire = 0; /* No non looped */
-
- /*
- * See if this should be demuxed to sockets on this interface
- *
- * We want to ensure the original was eaten or that we only use
- * up clones.
- */
-
- if (ipx->ipx_dest.net == intrfc->if_netnum) {
- /*
- * To our own node, loop and free the original.
- * The internal net will receive on all node address.
- */
- if (intrfc == ipx_internal_net ||
- !memcmp(intrfc->if_node, node, IPX_NODE_LEN)) {
- /* Don't charge sender */
- skb_orphan(skb);
-
- /* Will charge receiver */
- return ipxitf_demux_socket(intrfc, skb, 0);
- }
-
- /* Broadcast, loop and possibly keep to send on. */
- if (!memcmp(ipx_broadcast_node, node, IPX_NODE_LEN)) {
- if (!send_to_wire)
- skb_orphan(skb);
- ipxitf_demux_socket(intrfc, skb, send_to_wire);
- if (!send_to_wire)
- goto out;
- }
- }
-
- /*
- * If the originating net is not equal to our net; this is routed
- * We are still charging the sender. Which is right - the driver
- * free will handle this fairly.
- */
- if (ipx->ipx_source.net != intrfc->if_netnum) {
- /*
- * Unshare the buffer before modifying the count in
- * case it's a flood or tcpdump
- */
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (!skb)
- goto out;
- if (++ipx->ipx_tctrl > ipxcfg_max_hops)
- send_to_wire = 0;
- }
-
- if (!send_to_wire) {
- kfree_skb(skb);
- goto out;
- }
-
- /* Determine the appropriate hardware address */
- addr_len = dev->addr_len;
- if (!memcmp(ipx_broadcast_node, node, IPX_NODE_LEN))
- memcpy(dest_node, dev->broadcast, addr_len);
- else
- memcpy(dest_node, &(node[IPX_NODE_LEN-addr_len]), addr_len);
-
- /* Make any compensation for differing physical/data link size */
- skb = ipxitf_adjust_skbuff(intrfc, skb);
- if (!skb)
- goto out;
-
- /* set up data link and physical headers */
- skb->dev = dev;
- skb->protocol = htons(ETH_P_IPX);
-
- /* Send it out */
- dl->request(dl, skb, dest_node);
-out:
- return 0;
-}
-
-static int ipxitf_add_local_route(struct ipx_interface *intrfc)
-{
- return ipxrtr_add_route(intrfc->if_netnum, intrfc, NULL);
-}
-
-static void ipxitf_discover_netnum(struct ipx_interface *intrfc,
- struct sk_buff *skb);
-static int ipxitf_pprop(struct ipx_interface *intrfc, struct sk_buff *skb);
-
-static int ipxitf_rcv(struct ipx_interface *intrfc, struct sk_buff *skb)
-{
- struct ipxhdr *ipx = ipx_hdr(skb);
- int rc = 0;
-
- ipxitf_hold(intrfc);
-
- /* See if we should update our network number */
- if (!intrfc->if_netnum) /* net number of intrfc not known yet */
- ipxitf_discover_netnum(intrfc, skb);
-
- IPX_SKB_CB(skb)->last_hop.index = -1;
- if (ipx->ipx_type == IPX_TYPE_PPROP) {
- rc = ipxitf_pprop(intrfc, skb);
- if (rc)
- goto out_free_skb;
- }
-
- /* local processing follows */
- if (!IPX_SKB_CB(skb)->ipx_dest_net)
- IPX_SKB_CB(skb)->ipx_dest_net = intrfc->if_netnum;
- if (!IPX_SKB_CB(skb)->ipx_source_net)
- IPX_SKB_CB(skb)->ipx_source_net = intrfc->if_netnum;
-
- /* it doesn't make sense to route a pprop packet, there's no meaning
- * in the ipx_dest_net for such packets */
- if (ipx->ipx_type != IPX_TYPE_PPROP &&
- intrfc->if_netnum != IPX_SKB_CB(skb)->ipx_dest_net) {
- /* We only route point-to-point packets. */
- if (skb->pkt_type == PACKET_HOST) {
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (skb)
- rc = ipxrtr_route_skb(skb);
- goto out_intrfc;
- }
-
- goto out_free_skb;
- }
-
- /* see if we should keep it */
- if (!memcmp(ipx_broadcast_node, ipx->ipx_dest.node, IPX_NODE_LEN) ||
- !memcmp(intrfc->if_node, ipx->ipx_dest.node, IPX_NODE_LEN)) {
- rc = ipxitf_demux_socket(intrfc, skb, 0);
- goto out_intrfc;
- }
-
- /* we couldn't pawn it off so unload it */
-out_free_skb:
- kfree_skb(skb);
-out_intrfc:
- ipxitf_put(intrfc);
- return rc;
-}
-
-static void ipxitf_discover_netnum(struct ipx_interface *intrfc,
- struct sk_buff *skb)
-{
- const struct ipx_cb *cb = IPX_SKB_CB(skb);
-
- /* see if this is an intra packet: source_net == dest_net */
- if (cb->ipx_source_net == cb->ipx_dest_net && cb->ipx_source_net) {
- struct ipx_interface *i =
- ipxitf_find_using_net(cb->ipx_source_net);
- /* NB: NetWare servers lie about their hop count so we
- * dropped the test based on it. This is the best way
- * to determine this is a 0 hop count packet. */
- if (!i) {
- intrfc->if_netnum = cb->ipx_source_net;
- ipxitf_add_local_route(intrfc);
- } else {
- printk(KERN_WARNING "IPX: Network number collision "
- "%lx\n %s %s and %s %s\n",
- (unsigned long) ntohl(cb->ipx_source_net),
- ipx_device_name(i),
- ipx_frame_name(i->if_dlink_type),
- ipx_device_name(intrfc),
- ipx_frame_name(intrfc->if_dlink_type));
- ipxitf_put(i);
- }
- }
-}
-
-/**
- * ipxitf_pprop - Process packet propagation IPX packet type 0x14, used for
- * NetBIOS broadcasts
- * @intrfc: IPX interface receiving this packet
- * @skb: Received packet
- *
- * Checks if packet is valid: if its more than %IPX_MAX_PPROP_HOPS hops or if it
- * is smaller than a IPX header + the room for %IPX_MAX_PPROP_HOPS hops we drop
- * it, not even processing it locally, if it has exact %IPX_MAX_PPROP_HOPS we
- * don't broadcast it, but process it locally. See chapter 5 of Novell's "IPX
- * RIP and SAP Router Specification", Part Number 107-000029-001.
- *
- * If it is valid, check if we have pprop broadcasting enabled by the user,
- * if not, just return zero for local processing.
- *
- * If it is enabled check the packet and don't broadcast it if we have already
- * seen this packet.
- *
- * Broadcast: send it to the interfaces that aren't on the packet visited nets
- * array, just after the IPX header.
- *
- * Returns -EINVAL for invalid packets, so that the calling function drops
- * the packet without local processing. 0 if packet is to be locally processed.
- */
-static int ipxitf_pprop(struct ipx_interface *intrfc, struct sk_buff *skb)
-{
- struct ipxhdr *ipx = ipx_hdr(skb);
- int i, rc = -EINVAL;
- struct ipx_interface *ifcs;
- char *c;
- __be32 *l;
-
- /* Illegal packet - too many hops or too short */
- /* We decide to throw it away: no broadcasting, no local processing.
- * NetBIOS unaware implementations route them as normal packets -
- * tctrl <= 15, any data payload... */
- if (IPX_SKB_CB(skb)->ipx_tctrl > IPX_MAX_PPROP_HOPS ||
- ntohs(ipx->ipx_pktsize) < sizeof(struct ipxhdr) +
- IPX_MAX_PPROP_HOPS * sizeof(u32))
- goto out;
- /* are we broadcasting this damn thing? */
- rc = 0;
- if (!sysctl_ipx_pprop_broadcasting)
- goto out;
- /* We do broadcast packet on the IPX_MAX_PPROP_HOPS hop, but we
- * process it locally. All previous hops broadcasted it, and process it
- * locally. */
- if (IPX_SKB_CB(skb)->ipx_tctrl == IPX_MAX_PPROP_HOPS)
- goto out;
-
- c = ((u8 *) ipx) + sizeof(struct ipxhdr);
- l = (__be32 *) c;
-
- /* Don't broadcast packet if already seen this net */
- for (i = 0; i < IPX_SKB_CB(skb)->ipx_tctrl; i++)
- if (*l++ == intrfc->if_netnum)
- goto out;
-
- /* < IPX_MAX_PPROP_HOPS hops && input interface not in list. Save the
- * position where we will insert recvd netnum into list, later on,
- * in ipxitf_send */
- IPX_SKB_CB(skb)->last_hop.index = i;
- IPX_SKB_CB(skb)->last_hop.netnum = intrfc->if_netnum;
- /* xmit on all other interfaces... */
- spin_lock_bh(&ipx_interfaces_lock);
- list_for_each_entry(ifcs, &ipx_interfaces, node) {
- /* Except unconfigured interfaces */
- if (!ifcs->if_netnum)
- continue;
-
- /* That aren't in the list */
- if (ifcs == intrfc)
- continue;
- l = (__be32 *) c;
- /* don't consider the last entry in the packet list,
- * it is our netnum, and it is not there yet */
- for (i = 0; i < IPX_SKB_CB(skb)->ipx_tctrl; i++)
- if (ifcs->if_netnum == *l++)
- break;
- if (i == IPX_SKB_CB(skb)->ipx_tctrl) {
- struct sk_buff *s = skb_copy(skb, GFP_ATOMIC);
-
- if (s) {
- IPX_SKB_CB(s)->ipx_dest_net = ifcs->if_netnum;
- ipxrtr_route_skb(s);
- }
- }
- }
- spin_unlock_bh(&ipx_interfaces_lock);
-out:
- return rc;
-}
-
-static void ipxitf_insert(struct ipx_interface *intrfc)
-{
- spin_lock_bh(&ipx_interfaces_lock);
- list_add_tail(&intrfc->node, &ipx_interfaces);
- spin_unlock_bh(&ipx_interfaces_lock);
-
- if (ipxcfg_auto_select_primary && !ipx_primary_net)
- ipx_primary_net = intrfc;
-}
-
-static struct ipx_interface *ipxitf_alloc(struct net_device *dev, __be32 netnum,
- __be16 dlink_type,
- struct datalink_proto *dlink,
- unsigned char internal,
- int ipx_offset)
-{
- struct ipx_interface *intrfc = kmalloc(sizeof(*intrfc), GFP_ATOMIC);
-
- if (intrfc) {
- intrfc->if_dev = dev;
- intrfc->if_netnum = netnum;
- intrfc->if_dlink_type = dlink_type;
- intrfc->if_dlink = dlink;
- intrfc->if_internal = internal;
- intrfc->if_ipx_offset = ipx_offset;
- intrfc->if_sknum = IPX_MIN_EPHEMERAL_SOCKET;
- INIT_HLIST_HEAD(&intrfc->if_sklist);
- refcount_set(&intrfc->refcnt, 1);
- spin_lock_init(&intrfc->if_sklist_lock);
- }
-
- return intrfc;
-}
-
-static int ipxitf_create_internal(struct ipx_interface_definition *idef)
-{
- struct ipx_interface *intrfc;
- int rc = -EEXIST;
-
- /* Only one primary network allowed */
- if (ipx_primary_net)
- goto out;
-
- /* Must have a valid network number */
- rc = -EADDRNOTAVAIL;
- if (!idef->ipx_network)
- goto out;
- intrfc = ipxitf_find_using_net(idef->ipx_network);
- rc = -EADDRINUSE;
- if (intrfc) {
- ipxitf_put(intrfc);
- goto out;
- }
- intrfc = ipxitf_alloc(NULL, idef->ipx_network, 0, NULL, 1, 0);
- rc = -EAGAIN;
- if (!intrfc)
- goto out;
- memcpy((char *)&(intrfc->if_node), idef->ipx_node, IPX_NODE_LEN);
- ipx_internal_net = ipx_primary_net = intrfc;
- ipxitf_hold(intrfc);
- ipxitf_insert(intrfc);
-
- rc = ipxitf_add_local_route(intrfc);
- ipxitf_put(intrfc);
-out:
- return rc;
-}
-
-static __be16 ipx_map_frame_type(unsigned char type)
-{
- __be16 rc = 0;
-
- switch (type) {
- case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break;
- case IPX_FRAME_8022: rc = htons(ETH_P_802_2); break;
- case IPX_FRAME_SNAP: rc = htons(ETH_P_SNAP); break;
- case IPX_FRAME_8023: rc = htons(ETH_P_802_3); break;
- }
-
- return rc;
-}
-
-static int ipxitf_create(struct ipx_interface_definition *idef)
-{
- struct net_device *dev;
- __be16 dlink_type = 0;
- struct datalink_proto *datalink = NULL;
- struct ipx_interface *intrfc;
- int rc;
-
- if (idef->ipx_special == IPX_INTERNAL) {
- rc = ipxitf_create_internal(idef);
- goto out;
- }
-
- rc = -EEXIST;
- if (idef->ipx_special == IPX_PRIMARY && ipx_primary_net)
- goto out;
-
- intrfc = ipxitf_find_using_net(idef->ipx_network);
- rc = -EADDRINUSE;
- if (idef->ipx_network && intrfc) {
- ipxitf_put(intrfc);
- goto out;
- }
-
- if (intrfc)
- ipxitf_put(intrfc);
-
- dev = dev_get_by_name(&init_net, idef->ipx_device);
- rc = -ENODEV;
- if (!dev)
- goto out;
-
- switch (idef->ipx_dlink_type) {
- case IPX_FRAME_8022:
- dlink_type = htons(ETH_P_802_2);
- datalink = p8022_datalink;
- break;
- case IPX_FRAME_ETHERII:
- if (dev->type != ARPHRD_IEEE802) {
- dlink_type = htons(ETH_P_IPX);
- datalink = pEII_datalink;
- break;
- }
- /* fall through */
- case IPX_FRAME_SNAP:
- dlink_type = htons(ETH_P_SNAP);
- datalink = pSNAP_datalink;
- break;
- case IPX_FRAME_8023:
- dlink_type = htons(ETH_P_802_3);
- datalink = p8023_datalink;
- break;
- case IPX_FRAME_NONE:
- default:
- rc = -EPROTONOSUPPORT;
- goto out_dev;
- }
-
- rc = -ENETDOWN;
- if (!(dev->flags & IFF_UP))
- goto out_dev;
-
- /* Check addresses are suitable */
- rc = -EINVAL;
- if (dev->addr_len > IPX_NODE_LEN)
- goto out_dev;
-
- intrfc = ipxitf_find_using_phys(dev, dlink_type);
- if (!intrfc) {
- /* Ok now create */
- intrfc = ipxitf_alloc(dev, idef->ipx_network, dlink_type,
- datalink, 0, dev->hard_header_len +
- datalink->header_length);
- rc = -EAGAIN;
- if (!intrfc)
- goto out_dev;
- /* Setup primary if necessary */
- if (idef->ipx_special == IPX_PRIMARY)
- ipx_primary_net = intrfc;
- if (!memcmp(idef->ipx_node, "\000\000\000\000\000\000",
- IPX_NODE_LEN)) {
- memset(intrfc->if_node, 0, IPX_NODE_LEN);
- memcpy(intrfc->if_node + IPX_NODE_LEN - dev->addr_len,
- dev->dev_addr, dev->addr_len);
- } else
- memcpy(intrfc->if_node, idef->ipx_node, IPX_NODE_LEN);
- ipxitf_hold(intrfc);
- ipxitf_insert(intrfc);
- }
-
-
- /* If the network number is known, add a route */
- rc = 0;
- if (!intrfc->if_netnum)
- goto out_intrfc;
-
- rc = ipxitf_add_local_route(intrfc);
-out_intrfc:
- ipxitf_put(intrfc);
- goto out;
-out_dev:
- dev_put(dev);
-out:
- return rc;
-}
-
-static int ipxitf_delete(struct ipx_interface_definition *idef)
-{
- struct net_device *dev = NULL;
- __be16 dlink_type = 0;
- struct ipx_interface *intrfc;
- int rc = 0;
-
- spin_lock_bh(&ipx_interfaces_lock);
- if (idef->ipx_special == IPX_INTERNAL) {
- if (ipx_internal_net) {
- __ipxitf_put(ipx_internal_net);
- goto out;
- }
- rc = -ENOENT;
- goto out;
- }
-
- dlink_type = ipx_map_frame_type(idef->ipx_dlink_type);
- rc = -EPROTONOSUPPORT;
- if (!dlink_type)
- goto out;
-
- dev = __dev_get_by_name(&init_net, idef->ipx_device);
- rc = -ENODEV;
- if (!dev)
- goto out;
-
- intrfc = __ipxitf_find_using_phys(dev, dlink_type);
- rc = -EINVAL;
- if (!intrfc)
- goto out;
- __ipxitf_put(intrfc);
-
- rc = 0;
-out:
- spin_unlock_bh(&ipx_interfaces_lock);
- return rc;
-}
-
-static struct ipx_interface *ipxitf_auto_create(struct net_device *dev,
- __be16 dlink_type)
-{
- struct ipx_interface *intrfc = NULL;
- struct datalink_proto *datalink;
-
- if (!dev)
- goto out;
-
- /* Check addresses are suitable */
- if (dev->addr_len > IPX_NODE_LEN)
- goto out;
-
- switch (ntohs(dlink_type)) {
- case ETH_P_IPX: datalink = pEII_datalink; break;
- case ETH_P_802_2: datalink = p8022_datalink; break;
- case ETH_P_SNAP: datalink = pSNAP_datalink; break;
- case ETH_P_802_3: datalink = p8023_datalink; break;
- default: goto out;
- }
-
- intrfc = ipxitf_alloc(dev, 0, dlink_type, datalink, 0,
- dev->hard_header_len + datalink->header_length);
-
- if (intrfc) {
- memset(intrfc->if_node, 0, IPX_NODE_LEN);
- memcpy((char *)&(intrfc->if_node[IPX_NODE_LEN-dev->addr_len]),
- dev->dev_addr, dev->addr_len);
- spin_lock_init(&intrfc->if_sklist_lock);
- refcount_set(&intrfc->refcnt, 1);
- ipxitf_insert(intrfc);
- dev_hold(dev);
- }
-
-out:
- return intrfc;
-}
-
-static int ipxitf_ioctl(unsigned int cmd, void __user *arg)
-{
- int rc = -EINVAL;
- struct ifreq ifr;
- int val;
-
- switch (cmd) {
- case SIOCSIFADDR: {
- struct sockaddr_ipx *sipx;
- struct ipx_interface_definition f;
-
- rc = -EFAULT;
- if (copy_from_user(&ifr, arg, sizeof(ifr)))
- break;
- sipx = (struct sockaddr_ipx *)&ifr.ifr_addr;
- rc = -EINVAL;
- if (sipx->sipx_family != AF_IPX)
- break;
- f.ipx_network = sipx->sipx_network;
- memcpy(f.ipx_device, ifr.ifr_name,
- sizeof(f.ipx_device));
- memcpy(f.ipx_node, sipx->sipx_node, IPX_NODE_LEN);
- f.ipx_dlink_type = sipx->sipx_type;
- f.ipx_special = sipx->sipx_special;
-
- if (sipx->sipx_action == IPX_DLTITF)
- rc = ipxitf_delete(&f);
- else
- rc = ipxitf_create(&f);
- break;
- }
- case SIOCGIFADDR: {
- struct sockaddr_ipx *sipx;
- struct ipx_interface *ipxif;
- struct net_device *dev;
-
- rc = -EFAULT;
- if (copy_from_user(&ifr, arg, sizeof(ifr)))
- break;
- sipx = (struct sockaddr_ipx *)&ifr.ifr_addr;
- dev = __dev_get_by_name(&init_net, ifr.ifr_name);
- rc = -ENODEV;
- if (!dev)
- break;
- ipxif = ipxitf_find_using_phys(dev,
- ipx_map_frame_type(sipx->sipx_type));
- rc = -EADDRNOTAVAIL;
- if (!ipxif)
- break;
-
- sipx->sipx_family = AF_IPX;
- sipx->sipx_network = ipxif->if_netnum;
- memcpy(sipx->sipx_node, ipxif->if_node,
- sizeof(sipx->sipx_node));
- rc = 0;
- if (copy_to_user(arg, &ifr, sizeof(ifr)))
- rc = -EFAULT;
- ipxitf_put(ipxif);
- break;
- }
- case SIOCAIPXITFCRT:
- rc = -EFAULT;
- if (get_user(val, (unsigned char __user *) arg))
- break;
- rc = 0;
- ipxcfg_auto_create_interfaces = val;
- break;
- case SIOCAIPXPRISLT:
- rc = -EFAULT;
- if (get_user(val, (unsigned char __user *) arg))
- break;
- rc = 0;
- ipxcfg_set_auto_select(val);
- break;
- }
-
- return rc;
-}
-
-/*
- * Checksum routine for IPX
- */
-
-/* Note: We assume ipx_tctrl==0 and htons(length)==ipx_pktsize */
-/* This functions should *not* mess with packet contents */
-
-__be16 ipx_cksum(struct ipxhdr *packet, int length)
-{
- /*
- * NOTE: sum is a net byte order quantity, which optimizes the
- * loop. This only works on big and little endian machines. (I
- * don't know of a machine that isn't.)
- */
- /* handle the first 3 words separately; checksum should be skipped
- * and ipx_tctrl masked out */
- __u16 *p = (__u16 *)packet;
- __u32 sum = p[1] + (p[2] & (__force u16)htons(0x00ff));
- __u32 i = (length >> 1) - 3; /* Number of remaining complete words */
-
- /* Loop through them */
- p += 3;
- while (i--)
- sum += *p++;
-
- /* Add on the last part word if it exists */
- if (packet->ipx_pktsize & htons(1))
- sum += (__force u16)htons(0xff00) & *p;
-
- /* Do final fixup */
- sum = (sum & 0xffff) + (sum >> 16);
-
- /* It's a pity there's no concept of carry in C */
- if (sum >= 0x10000)
- sum++;
-
- /*
- * Leave 0 alone; we don't want 0xffff here. Note that we can't get
- * here with 0x10000, so this check is the same as ((__u16)sum)
- */
- if (sum)
- sum = ~sum;
-
- return (__force __be16)sum;
-}
-
-const char *ipx_frame_name(__be16 frame)
-{
- char* rc = "None";
-
- switch (ntohs(frame)) {
- case ETH_P_IPX: rc = "EtherII"; break;
- case ETH_P_802_2: rc = "802.2"; break;
- case ETH_P_SNAP: rc = "SNAP"; break;
- case ETH_P_802_3: rc = "802.3"; break;
- }
-
- return rc;
-}
-
-const char *ipx_device_name(struct ipx_interface *intrfc)
-{
- return intrfc->if_internal ? "Internal" :
- intrfc->if_dev ? intrfc->if_dev->name : "Unknown";
-}
-
-/* Handling for system calls applied via the various interfaces to an IPX
- * socket object. */
-
-static int ipx_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- int opt;
- int rc = -EINVAL;
-
- lock_sock(sk);
- if (optlen != sizeof(int))
- goto out;
-
- rc = -EFAULT;
- if (get_user(opt, (unsigned int __user *)optval))
- goto out;
-
- rc = -ENOPROTOOPT;
- if (!(level == SOL_IPX && optname == IPX_TYPE))
- goto out;
-
- ipx_sk(sk)->type = opt;
- rc = 0;
-out:
- release_sock(sk);
- return rc;
-}
-
-static int ipx_getsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- int val = 0;
- int len;
- int rc = -ENOPROTOOPT;
-
- lock_sock(sk);
- if (!(level == SOL_IPX && optname == IPX_TYPE))
- goto out;
-
- val = ipx_sk(sk)->type;
-
- rc = -EFAULT;
- if (get_user(len, optlen))
- goto out;
-
- len = min_t(unsigned int, len, sizeof(int));
- rc = -EINVAL;
- if(len < 0)
- goto out;
-
- rc = -EFAULT;
- if (put_user(len, optlen) || copy_to_user(optval, &val, len))
- goto out;
-
- rc = 0;
-out:
- release_sock(sk);
- return rc;
-}
-
-static struct proto ipx_proto = {
- .name = "IPX",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct ipx_sock),
-};
-
-static int ipx_create(struct net *net, struct socket *sock, int protocol,
- int kern)
-{
- int rc = -ESOCKTNOSUPPORT;
- struct sock *sk;
-
- if (!net_eq(net, &init_net))
- return -EAFNOSUPPORT;
-
- /*
- * SPX support is not anymore in the kernel sources. If you want to
- * ressurrect it, completing it and making it understand shared skbs,
- * be fully multithreaded, etc, grab the sources in an early 2.5 kernel
- * tree.
- */
- if (sock->type != SOCK_DGRAM)
- goto out;
-
- rc = -ENOMEM;
- sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto, kern);
- if (!sk)
- goto out;
-
- sk_refcnt_debug_inc(sk);
- sock_init_data(sock, sk);
- sk->sk_no_check_tx = 1; /* Checksum off by default */
- sock->ops = &ipx_dgram_ops;
- rc = 0;
-out:
- return rc;
-}
-
-static int ipx_release(struct socket *sock)
-{
- struct sock *sk = sock->sk;
-
- if (!sk)
- goto out;
-
- lock_sock(sk);
- sk->sk_shutdown = SHUTDOWN_MASK;
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_state_change(sk);
-
- sock_set_flag(sk, SOCK_DEAD);
- sock->sk = NULL;
- sk_refcnt_debug_release(sk);
- ipx_destroy_socket(sk);
- release_sock(sk);
- sock_put(sk);
-out:
- return 0;
-}
-
-/* caller must hold a reference to intrfc */
-
-static __be16 ipx_first_free_socketnum(struct ipx_interface *intrfc)
-{
- unsigned short socketNum = intrfc->if_sknum;
-
- spin_lock_bh(&intrfc->if_sklist_lock);
-
- if (socketNum < IPX_MIN_EPHEMERAL_SOCKET)
- socketNum = IPX_MIN_EPHEMERAL_SOCKET;
-
- while (__ipxitf_find_socket(intrfc, htons(socketNum)))
- if (socketNum > IPX_MAX_EPHEMERAL_SOCKET)
- socketNum = IPX_MIN_EPHEMERAL_SOCKET;
- else
- socketNum++;
-
- spin_unlock_bh(&intrfc->if_sklist_lock);
- intrfc->if_sknum = socketNum;
-
- return htons(socketNum);
-}
-
-static int __ipx_bind(struct socket *sock,
- struct sockaddr *uaddr, int addr_len)
-{
- struct sock *sk = sock->sk;
- struct ipx_sock *ipxs = ipx_sk(sk);
- struct ipx_interface *intrfc;
- struct sockaddr_ipx *addr = (struct sockaddr_ipx *)uaddr;
- int rc = -EINVAL;
-
- if (!sock_flag(sk, SOCK_ZAPPED) || addr_len != sizeof(struct sockaddr_ipx))
- goto out;
-
- intrfc = ipxitf_find_using_net(addr->sipx_network);
- rc = -EADDRNOTAVAIL;
- if (!intrfc)
- goto out;
-
- if (!addr->sipx_port) {
- addr->sipx_port = ipx_first_free_socketnum(intrfc);
- rc = -EINVAL;
- if (!addr->sipx_port)
- goto out_put;
- }
-
- /* protect IPX system stuff like routing/sap */
- rc = -EACCES;
- if (ntohs(addr->sipx_port) < IPX_MIN_EPHEMERAL_SOCKET &&
- !capable(CAP_NET_ADMIN))
- goto out_put;
-
- ipxs->port = addr->sipx_port;
-
-#ifdef CONFIG_IPX_INTERN
- if (intrfc == ipx_internal_net) {
- /* The source address is to be set explicitly if the
- * socket is to be bound on the internal network. If a
- * node number 0 was specified, the default is used.
- */
-
- rc = -EINVAL;
- if (!memcmp(addr->sipx_node, ipx_broadcast_node, IPX_NODE_LEN))
- goto out_put;
- if (!memcmp(addr->sipx_node, ipx_this_node, IPX_NODE_LEN))
- memcpy(ipxs->node, intrfc->if_node, IPX_NODE_LEN);
- else
- memcpy(ipxs->node, addr->sipx_node, IPX_NODE_LEN);
-
- rc = -EADDRINUSE;
- if (ipxitf_find_internal_socket(intrfc, ipxs->node,
- ipxs->port)) {
- SOCK_DEBUG(sk,
- "IPX: bind failed because port %X in use.\n",
- ntohs(addr->sipx_port));
- goto out_put;
- }
- } else {
- /* Source addresses are easy. It must be our
- * network:node pair for an interface routed to IPX
- * with the ipx routing ioctl()
- */
-
- memcpy(ipxs->node, intrfc->if_node, IPX_NODE_LEN);
-
- rc = -EADDRINUSE;
- if (ipxitf_find_socket(intrfc, addr->sipx_port)) {
- SOCK_DEBUG(sk,
- "IPX: bind failed because port %X in use.\n",
- ntohs(addr->sipx_port));
- goto out_put;
- }
- }
-
-#else /* !def CONFIG_IPX_INTERN */
-
- /* Source addresses are easy. It must be our network:node pair for
- an interface routed to IPX with the ipx routing ioctl() */
-
- rc = -EADDRINUSE;
- if (ipxitf_find_socket(intrfc, addr->sipx_port)) {
- SOCK_DEBUG(sk, "IPX: bind failed because port %X in use.\n",
- ntohs((int)addr->sipx_port));
- goto out_put;
- }
-
-#endif /* CONFIG_IPX_INTERN */
-
- ipxitf_insert_socket(intrfc, sk);
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- rc = 0;
-out_put:
- ipxitf_put(intrfc);
-out:
- return rc;
-}
-
-static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
-{
- struct sock *sk = sock->sk;
- int rc;
-
- lock_sock(sk);
- rc = __ipx_bind(sock, uaddr, addr_len);
- release_sock(sk);
-
- return rc;
-}
-
-static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
- int addr_len, int flags)
-{
- struct sock *sk = sock->sk;
- struct ipx_sock *ipxs = ipx_sk(sk);
- struct sockaddr_ipx *addr;
- int rc = -EINVAL;
- struct ipx_route *rt;
-
- sk->sk_state = TCP_CLOSE;
- sock->state = SS_UNCONNECTED;
-
- lock_sock(sk);
- if (addr_len != sizeof(*addr))
- goto out;
- addr = (struct sockaddr_ipx *)uaddr;
-
- /* put the autobinding in */
- if (!ipxs->port) {
- struct sockaddr_ipx uaddr;
-
- uaddr.sipx_port = 0;
- uaddr.sipx_network = 0;
-
-#ifdef CONFIG_IPX_INTERN
- rc = -ENETDOWN;
- if (!ipxs->intrfc)
- goto out; /* Someone zonked the iface */
- memcpy(uaddr.sipx_node, ipxs->intrfc->if_node,
- IPX_NODE_LEN);
-#endif /* CONFIG_IPX_INTERN */
-
- rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
- sizeof(struct sockaddr_ipx));
- if (rc)
- goto out;
- }
-
- /* We can either connect to primary network or somewhere
- * we can route to */
- rt = ipxrtr_lookup(addr->sipx_network);
- rc = -ENETUNREACH;
- if (!rt && !(!addr->sipx_network && ipx_primary_net))
- goto out;
-
- ipxs->dest_addr.net = addr->sipx_network;
- ipxs->dest_addr.sock = addr->sipx_port;
- memcpy(ipxs->dest_addr.node, addr->sipx_node, IPX_NODE_LEN);
- ipxs->type = addr->sipx_type;
-
- if (sock->type == SOCK_DGRAM) {
- sock->state = SS_CONNECTED;
- sk->sk_state = TCP_ESTABLISHED;
- }
-
- if (rt)
- ipxrtr_put(rt);
- rc = 0;
-out:
- release_sock(sk);
- return rc;
-}
-
-
-static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
- int peer)
-{
- struct ipx_address *addr;
- struct sockaddr_ipx sipx;
- struct sock *sk = sock->sk;
- struct ipx_sock *ipxs = ipx_sk(sk);
- int rc;
-
- lock_sock(sk);
- if (peer) {
- rc = -ENOTCONN;
- if (sk->sk_state != TCP_ESTABLISHED)
- goto out;
-
- addr = &ipxs->dest_addr;
- sipx.sipx_network = addr->net;
- sipx.sipx_port = addr->sock;
- memcpy(sipx.sipx_node, addr->node, IPX_NODE_LEN);
- } else {
- if (ipxs->intrfc) {
- sipx.sipx_network = ipxs->intrfc->if_netnum;
-#ifdef CONFIG_IPX_INTERN
- memcpy(sipx.sipx_node, ipxs->node, IPX_NODE_LEN);
-#else
- memcpy(sipx.sipx_node, ipxs->intrfc->if_node,
- IPX_NODE_LEN);
-#endif /* CONFIG_IPX_INTERN */
-
- } else {
- sipx.sipx_network = 0;
- memset(sipx.sipx_node, '\0', IPX_NODE_LEN);
- }
-
- sipx.sipx_port = ipxs->port;
- }
-
- sipx.sipx_family = AF_IPX;
- sipx.sipx_type = ipxs->type;
- sipx.sipx_zero = 0;
- memcpy(uaddr, &sipx, sizeof(sipx));
-
- rc = sizeof(struct sockaddr_ipx);
-out:
- release_sock(sk);
- return rc;
-}
-
-static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
-{
- /* NULL here for pt means the packet was looped back */
- struct ipx_interface *intrfc;
- struct ipxhdr *ipx;
- u16 ipx_pktsize;
- int rc = 0;
-
- if (!net_eq(dev_net(dev), &init_net))
- goto drop;
-
- /* Not ours */
- if (skb->pkt_type == PACKET_OTHERHOST)
- goto drop;
-
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
- goto out;
-
- if (!pskb_may_pull(skb, sizeof(struct ipxhdr)))
- goto drop;
-
- ipx_pktsize = ntohs(ipx_hdr(skb)->ipx_pktsize);
-
- /* Too small or invalid header? */
- if (ipx_pktsize < sizeof(struct ipxhdr) ||
- !pskb_may_pull(skb, ipx_pktsize))
- goto drop;
-
- ipx = ipx_hdr(skb);
- if (ipx->ipx_checksum != IPX_NO_CHECKSUM &&
- ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize))
- goto drop;
-
- IPX_SKB_CB(skb)->ipx_tctrl = ipx->ipx_tctrl;
- IPX_SKB_CB(skb)->ipx_dest_net = ipx->ipx_dest.net;
- IPX_SKB_CB(skb)->ipx_source_net = ipx->ipx_source.net;
-
- /* Determine what local ipx endpoint this is */
- intrfc = ipxitf_find_using_phys(dev, pt->type);
- if (!intrfc) {
- if (ipxcfg_auto_create_interfaces &&
- IPX_SKB_CB(skb)->ipx_dest_net) {
- intrfc = ipxitf_auto_create(dev, pt->type);
- if (intrfc)
- ipxitf_hold(intrfc);
- }
-
- if (!intrfc) /* Not one of ours */
- /* or invalid packet for auto creation */
- goto drop;
- }
-
- rc = ipxitf_rcv(intrfc, skb);
- ipxitf_put(intrfc);
- goto out;
-drop:
- kfree_skb(skb);
-out:
- return rc;
-}
-
-static int ipx_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
-{
- struct sock *sk = sock->sk;
- struct ipx_sock *ipxs = ipx_sk(sk);
- DECLARE_SOCKADDR(struct sockaddr_ipx *, usipx, msg->msg_name);
- struct sockaddr_ipx local_sipx;
- int rc = -EINVAL;
- int flags = msg->msg_flags;
-
- lock_sock(sk);
- /* Socket gets bound below anyway */
-/* if (sk->sk_zapped)
- return -EIO; */ /* Socket not bound */
- if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
- goto out;
-
- /* Max possible packet size limited by 16 bit pktsize in header */
- if (len >= 65535 - sizeof(struct ipxhdr))
- goto out;
-
- if (usipx) {
- if (!ipxs->port) {
- struct sockaddr_ipx uaddr;
-
- uaddr.sipx_port = 0;
- uaddr.sipx_network = 0;
-#ifdef CONFIG_IPX_INTERN
- rc = -ENETDOWN;
- if (!ipxs->intrfc)
- goto out; /* Someone zonked the iface */
- memcpy(uaddr.sipx_node, ipxs->intrfc->if_node,
- IPX_NODE_LEN);
-#endif
- rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
- sizeof(struct sockaddr_ipx));
- if (rc)
- goto out;
- }
-
- rc = -EINVAL;
- if (msg->msg_namelen < sizeof(*usipx) ||
- usipx->sipx_family != AF_IPX)
- goto out;
- } else {
- rc = -ENOTCONN;
- if (sk->sk_state != TCP_ESTABLISHED)
- goto out;
-
- usipx = &local_sipx;
- usipx->sipx_family = AF_IPX;
- usipx->sipx_type = ipxs->type;
- usipx->sipx_port = ipxs->dest_addr.sock;
- usipx->sipx_network = ipxs->dest_addr.net;
- memcpy(usipx->sipx_node, ipxs->dest_addr.node, IPX_NODE_LEN);
- }
-
- rc = ipxrtr_route_packet(sk, usipx, msg, len, flags & MSG_DONTWAIT);
- if (rc >= 0)
- rc = len;
-out:
- release_sock(sk);
- return rc;
-}
-
-
-static int ipx_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
- int flags)
-{
- struct sock *sk = sock->sk;
- struct ipx_sock *ipxs = ipx_sk(sk);
- DECLARE_SOCKADDR(struct sockaddr_ipx *, sipx, msg->msg_name);
- struct ipxhdr *ipx = NULL;
- struct sk_buff *skb;
- int copied, rc;
- bool locked = true;
-
- lock_sock(sk);
- /* put the autobinding in */
- if (!ipxs->port) {
- struct sockaddr_ipx uaddr;
-
- uaddr.sipx_port = 0;
- uaddr.sipx_network = 0;
-
-#ifdef CONFIG_IPX_INTERN
- rc = -ENETDOWN;
- if (!ipxs->intrfc)
- goto out; /* Someone zonked the iface */
- memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN);
-#endif /* CONFIG_IPX_INTERN */
-
- rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
- sizeof(struct sockaddr_ipx));
- if (rc)
- goto out;
- }
-
- rc = -ENOTCONN;
- if (sock_flag(sk, SOCK_ZAPPED))
- goto out;
-
- release_sock(sk);
- locked = false;
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &rc);
- if (!skb) {
- if (rc == -EAGAIN && (sk->sk_shutdown & RCV_SHUTDOWN))
- rc = 0;
- goto out;
- }
-
- ipx = ipx_hdr(skb);
- copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr);
- if (copied > size) {
- copied = size;
- msg->msg_flags |= MSG_TRUNC;
- }
-
- rc = skb_copy_datagram_msg(skb, sizeof(struct ipxhdr), msg, copied);
- if (rc)
- goto out_free;
- if (skb->tstamp)
- sk->sk_stamp = skb->tstamp;
-
- if (sipx) {
- sipx->sipx_family = AF_IPX;
- sipx->sipx_port = ipx->ipx_source.sock;
- memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN);
- sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net;
- sipx->sipx_type = ipx->ipx_type;
- sipx->sipx_zero = 0;
- msg->msg_namelen = sizeof(*sipx);
- }
- rc = copied;
-
-out_free:
- skb_free_datagram(sk, skb);
-out:
- if (locked)
- release_sock(sk);
- return rc;
-}
-
-
-static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
-{
- int rc = 0;
- long amount = 0;
- struct sock *sk = sock->sk;
- void __user *argp = (void __user *)arg;
-
- lock_sock(sk);
- switch (cmd) {
- case TIOCOUTQ:
- amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
- if (amount < 0)
- amount = 0;
- rc = put_user(amount, (int __user *)argp);
- break;
- case TIOCINQ: {
- struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
- /* These two are safe on a single CPU system as only
- * user tasks fiddle here */
- if (skb)
- amount = skb->len - sizeof(struct ipxhdr);
- rc = put_user(amount, (int __user *)argp);
- break;
- }
- case SIOCADDRT:
- case SIOCDELRT:
- rc = -EPERM;
- if (capable(CAP_NET_ADMIN))
- rc = ipxrtr_ioctl(cmd, argp);
- break;
- case SIOCSIFADDR:
- case SIOCAIPXITFCRT:
- case SIOCAIPXPRISLT:
- rc = -EPERM;
- if (!capable(CAP_NET_ADMIN))
- break;
- /* fall through */
- case SIOCGIFADDR:
- rc = ipxitf_ioctl(cmd, argp);
- break;
- case SIOCIPXCFGDATA:
- rc = ipxcfg_get_config_data(argp);
- break;
- case SIOCIPXNCPCONN:
- /*
- * This socket wants to take care of the NCP connection
- * handed to us in arg.
- */
- rc = -EPERM;
- if (!capable(CAP_NET_ADMIN))
- break;
- rc = get_user(ipx_sk(sk)->ipx_ncp_conn,
- (const unsigned short __user *)argp);
- break;
- case SIOCGSTAMP:
- rc = sock_get_timestamp(sk, argp);
- break;
- case SIOCGIFDSTADDR:
- case SIOCSIFDSTADDR:
- case SIOCGIFBRDADDR:
- case SIOCSIFBRDADDR:
- case SIOCGIFNETMASK:
- case SIOCSIFNETMASK:
- rc = -EINVAL;
- break;
- default:
- rc = -ENOIOCTLCMD;
- break;
- }
- release_sock(sk);
-
- return rc;
-}
-
-
-#ifdef CONFIG_COMPAT
-static int ipx_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
-{
- /*
- * These 4 commands use same structure on 32bit and 64bit. Rest of IPX
- * commands is handled by generic ioctl code. As these commands are
- * SIOCPROTOPRIVATE..SIOCPROTOPRIVATE+3, they cannot be handled by generic
- * code.
- */
- switch (cmd) {
- case SIOCAIPXITFCRT:
- case SIOCAIPXPRISLT:
- case SIOCIPXCFGDATA:
- case SIOCIPXNCPCONN:
- return ipx_ioctl(sock, cmd, arg);
- default:
- return -ENOIOCTLCMD;
- }
-}
-#endif
-
-static int ipx_shutdown(struct socket *sock, int mode)
-{
- struct sock *sk = sock->sk;
-
- if (mode < SHUT_RD || mode > SHUT_RDWR)
- return -EINVAL;
- /* This maps:
- * SHUT_RD (0) -> RCV_SHUTDOWN (1)
- * SHUT_WR (1) -> SEND_SHUTDOWN (2)
- * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
- */
- ++mode;
-
- lock_sock(sk);
- sk->sk_shutdown |= mode;
- release_sock(sk);
- sk->sk_state_change(sk);
-
- return 0;
-}
-
-/*
- * Socket family declarations
- */
-
-static const struct net_proto_family ipx_family_ops = {
- .family = PF_IPX,
- .create = ipx_create,
- .owner = THIS_MODULE,
-};
-
-static const struct proto_ops ipx_dgram_ops = {
- .family = PF_IPX,
- .owner = THIS_MODULE,
- .release = ipx_release,
- .bind = ipx_bind,
- .connect = ipx_connect,
- .socketpair = sock_no_socketpair,
- .accept = sock_no_accept,
- .getname = ipx_getname,
- .poll_mask = datagram_poll_mask,
- .ioctl = ipx_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ipx_compat_ioctl,
-#endif
- .listen = sock_no_listen,
- .shutdown = ipx_shutdown,
- .setsockopt = ipx_setsockopt,
- .getsockopt = ipx_getsockopt,
- .sendmsg = ipx_sendmsg,
- .recvmsg = ipx_recvmsg,
- .mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
-};
-
-static struct packet_type ipx_8023_packet_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_802_3),
- .func = ipx_rcv,
-};
-
-static struct packet_type ipx_dix_packet_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_IPX),
- .func = ipx_rcv,
-};
-
-static struct notifier_block ipx_dev_notifier = {
- .notifier_call = ipxitf_device_event,
-};
-
-static const unsigned char ipx_8022_type = 0xE0;
-static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 };
-static const char ipx_EII_err_msg[] __initconst =
- KERN_CRIT "IPX: Unable to register with Ethernet II\n";
-static const char ipx_8023_err_msg[] __initconst =
- KERN_CRIT "IPX: Unable to register with 802.3\n";
-static const char ipx_llc_err_msg[] __initconst =
- KERN_CRIT "IPX: Unable to register with 802.2\n";
-static const char ipx_snap_err_msg[] __initconst =
- KERN_CRIT "IPX: Unable to register with SNAP\n";
-
-static int __init ipx_init(void)
-{
- int rc = proto_register(&ipx_proto, 1);
-
- if (rc != 0)
- goto out;
-
- sock_register(&ipx_family_ops);
-
- pEII_datalink = make_EII_client();
- if (pEII_datalink)
- dev_add_pack(&ipx_dix_packet_type);
- else
- printk(ipx_EII_err_msg);
-
- p8023_datalink = make_8023_client();
- if (p8023_datalink)
- dev_add_pack(&ipx_8023_packet_type);
- else
- printk(ipx_8023_err_msg);
-
- p8022_datalink = register_8022_client(ipx_8022_type, ipx_rcv);
- if (!p8022_datalink)
- printk(ipx_llc_err_msg);
-
- pSNAP_datalink = register_snap_client(ipx_snap_id, ipx_rcv);
- if (!pSNAP_datalink)
- printk(ipx_snap_err_msg);
-
- register_netdevice_notifier(&ipx_dev_notifier);
- ipx_register_sysctl();
- ipx_proc_init();
-out:
- return rc;
-}
-
-static void __exit ipx_proto_finito(void)
-{
- ipx_proc_exit();
- ipx_unregister_sysctl();
-
- unregister_netdevice_notifier(&ipx_dev_notifier);
-
- ipxitf_cleanup();
-
- if (pSNAP_datalink) {
- unregister_snap_client(pSNAP_datalink);
- pSNAP_datalink = NULL;
- }
-
- if (p8022_datalink) {
- unregister_8022_client(p8022_datalink);
- p8022_datalink = NULL;
- }
-
- dev_remove_pack(&ipx_8023_packet_type);
- if (p8023_datalink) {
- destroy_8023_client(p8023_datalink);
- p8023_datalink = NULL;
- }
-
- dev_remove_pack(&ipx_dix_packet_type);
- if (pEII_datalink) {
- destroy_EII_client(pEII_datalink);
- pEII_datalink = NULL;
- }
-
- proto_unregister(&ipx_proto);
- sock_unregister(ipx_family_ops.family);
-}
-
-module_init(ipx_init);
-module_exit(ipx_proto_finito);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NETPROTO(PF_IPX);
diff --git a/drivers/staging/ipx/ipx_proc.c b/drivers/staging/ipx/ipx_proc.c
deleted file mode 100644
index 360f0ad970de..000000000000
--- a/drivers/staging/ipx/ipx_proc.c
+++ /dev/null
@@ -1,303 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * IPX proc routines
- *
- * Copyright(C) Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 2002
- */
-
-#include <linux/init.h>
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#include <linux/spinlock.h>
-#include <linux/seq_file.h>
-#include <linux/export.h>
-#include <net/net_namespace.h>
-#include <net/tcp_states.h>
-#include <net/ipx.h>
-
-static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos)
-{
- spin_lock_bh(&ipx_interfaces_lock);
- return seq_list_start_head(&ipx_interfaces, *pos);
-}
-
-static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return seq_list_next(v, &ipx_interfaces, pos);
-}
-
-static void ipx_seq_interface_stop(struct seq_file *seq, void *v)
-{
- spin_unlock_bh(&ipx_interfaces_lock);
-}
-
-static int ipx_seq_interface_show(struct seq_file *seq, void *v)
-{
- struct ipx_interface *i;
-
- if (v == &ipx_interfaces) {
- seq_puts(seq, "Network Node_Address Primary Device "
- "Frame_Type");
-#ifdef IPX_REFCNT_DEBUG
- seq_puts(seq, " refcnt");
-#endif
- seq_puts(seq, "\n");
- goto out;
- }
-
- i = list_entry(v, struct ipx_interface, node);
- seq_printf(seq, "%08X ", ntohl(i->if_netnum));
- seq_printf(seq, "%02X%02X%02X%02X%02X%02X ",
- i->if_node[0], i->if_node[1], i->if_node[2],
- i->if_node[3], i->if_node[4], i->if_node[5]);
- seq_printf(seq, "%-9s", i == ipx_primary_net ? "Yes" : "No");
- seq_printf(seq, "%-11s", ipx_device_name(i));
- seq_printf(seq, "%-9s", ipx_frame_name(i->if_dlink_type));
-#ifdef IPX_REFCNT_DEBUG
- seq_printf(seq, "%6d", refcount_read(&i->refcnt));
-#endif
- seq_puts(seq, "\n");
-out:
- return 0;
-}
-
-static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos)
-{
- read_lock_bh(&ipx_routes_lock);
- return seq_list_start_head(&ipx_routes, *pos);
-}
-
-static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return seq_list_next(v, &ipx_routes, pos);
-}
-
-static void ipx_seq_route_stop(struct seq_file *seq, void *v)
-{
- read_unlock_bh(&ipx_routes_lock);
-}
-
-static int ipx_seq_route_show(struct seq_file *seq, void *v)
-{
- struct ipx_route *rt;
-
- if (v == &ipx_routes) {
- seq_puts(seq, "Network Router_Net Router_Node\n");
- goto out;
- }
-
- rt = list_entry(v, struct ipx_route, node);
-
- seq_printf(seq, "%08X ", ntohl(rt->ir_net));
- if (rt->ir_routed)
- seq_printf(seq, "%08X %02X%02X%02X%02X%02X%02X\n",
- ntohl(rt->ir_intrfc->if_netnum),
- rt->ir_router_node[0], rt->ir_router_node[1],
- rt->ir_router_node[2], rt->ir_router_node[3],
- rt->ir_router_node[4], rt->ir_router_node[5]);
- else
- seq_puts(seq, "Directly Connected\n");
-out:
- return 0;
-}
-
-static __inline__ struct sock *ipx_get_socket_idx(loff_t pos)
-{
- struct sock *s = NULL;
- struct ipx_interface *i;
-
- list_for_each_entry(i, &ipx_interfaces, node) {
- spin_lock_bh(&i->if_sklist_lock);
- sk_for_each(s, &i->if_sklist) {
- if (!pos)
- break;
- --pos;
- }
- spin_unlock_bh(&i->if_sklist_lock);
- if (!pos) {
- if (s)
- goto found;
- break;
- }
- }
- s = NULL;
-found:
- return s;
-}
-
-static void *ipx_seq_socket_start(struct seq_file *seq, loff_t *pos)
-{
- loff_t l = *pos;
-
- spin_lock_bh(&ipx_interfaces_lock);
- return l ? ipx_get_socket_idx(--l) : SEQ_START_TOKEN;
-}
-
-static void *ipx_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- struct sock* sk, *next;
- struct ipx_interface *i;
- struct ipx_sock *ipxs;
-
- ++*pos;
- if (v == SEQ_START_TOKEN) {
- sk = NULL;
- i = ipx_interfaces_head();
- if (!i)
- goto out;
- sk = sk_head(&i->if_sklist);
- if (sk)
- spin_lock_bh(&i->if_sklist_lock);
- goto out;
- }
- sk = v;
- next = sk_next(sk);
- if (next) {
- sk = next;
- goto out;
- }
- ipxs = ipx_sk(sk);
- i = ipxs->intrfc;
- spin_unlock_bh(&i->if_sklist_lock);
- sk = NULL;
- for (;;) {
- if (i->node.next == &ipx_interfaces)
- break;
- i = list_entry(i->node.next, struct ipx_interface, node);
- spin_lock_bh(&i->if_sklist_lock);
- if (!hlist_empty(&i->if_sklist)) {
- sk = sk_head(&i->if_sklist);
- break;
- }
- spin_unlock_bh(&i->if_sklist_lock);
- }
-out:
- return sk;
-}
-
-static int ipx_seq_socket_show(struct seq_file *seq, void *v)
-{
- struct sock *s;
- struct ipx_sock *ipxs;
-
- if (v == SEQ_START_TOKEN) {
-#ifdef CONFIG_IPX_INTERN
- seq_puts(seq, "Local_Address "
- "Remote_Address Tx_Queue "
- "Rx_Queue State Uid\n");
-#else
- seq_puts(seq, "Local_Address Remote_Address "
- "Tx_Queue Rx_Queue State Uid\n");
-#endif
- goto out;
- }
-
- s = v;
- ipxs = ipx_sk(s);
-#ifdef CONFIG_IPX_INTERN
- seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ",
- ntohl(ipxs->intrfc->if_netnum),
- ipxs->node[0], ipxs->node[1], ipxs->node[2], ipxs->node[3],
- ipxs->node[4], ipxs->node[5], ntohs(ipxs->port));
-#else
- seq_printf(seq, "%08X:%04X ", ntohl(ipxs->intrfc->if_netnum),
- ntohs(ipxs->port));
-#endif /* CONFIG_IPX_INTERN */
- if (s->sk_state != TCP_ESTABLISHED)
- seq_printf(seq, "%-28s", "Not_Connected");
- else {
- seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ",
- ntohl(ipxs->dest_addr.net),
- ipxs->dest_addr.node[0], ipxs->dest_addr.node[1],
- ipxs->dest_addr.node[2], ipxs->dest_addr.node[3],
- ipxs->dest_addr.node[4], ipxs->dest_addr.node[5],
- ntohs(ipxs->dest_addr.sock));
- }
-
- seq_printf(seq, "%08X %08X %02X %03u\n",
- sk_wmem_alloc_get(s),
- sk_rmem_alloc_get(s),
- s->sk_state,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
-out:
- return 0;
-}
-
-static const struct seq_operations ipx_seq_interface_ops = {
- .start = ipx_seq_interface_start,
- .next = ipx_seq_interface_next,
- .stop = ipx_seq_interface_stop,
- .show = ipx_seq_interface_show,
-};
-
-static const struct seq_operations ipx_seq_route_ops = {
- .start = ipx_seq_route_start,
- .next = ipx_seq_route_next,
- .stop = ipx_seq_route_stop,
- .show = ipx_seq_route_show,
-};
-
-static const struct seq_operations ipx_seq_socket_ops = {
- .start = ipx_seq_socket_start,
- .next = ipx_seq_socket_next,
- .stop = ipx_seq_interface_stop,
- .show = ipx_seq_socket_show,
-};
-
-static struct proc_dir_entry *ipx_proc_dir;
-
-int __init ipx_proc_init(void)
-{
- struct proc_dir_entry *p;
- int rc = -ENOMEM;
-
- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
-
- if (!ipx_proc_dir)
- goto out;
- p = proc_create_seq("interface", S_IRUGO, ipx_proc_dir,
- &ipx_seq_interface_ops);
- if (!p)
- goto out_interface;
-
- p = proc_create_seq("route", S_IRUGO, ipx_proc_dir, &ipx_seq_route_ops);
- if (!p)
- goto out_route;
-
- p = proc_create_seq("socket", S_IRUGO, ipx_proc_dir,
- &ipx_seq_socket_ops);
- if (!p)
- goto out_socket;
-
- rc = 0;
-out:
- return rc;
-out_socket:
- remove_proc_entry("route", ipx_proc_dir);
-out_route:
- remove_proc_entry("interface", ipx_proc_dir);
-out_interface:
- remove_proc_entry("ipx", init_net.proc_net);
- goto out;
-}
-
-void __exit ipx_proc_exit(void)
-{
- remove_proc_entry("interface", ipx_proc_dir);
- remove_proc_entry("route", ipx_proc_dir);
- remove_proc_entry("socket", ipx_proc_dir);
- remove_proc_entry("ipx", init_net.proc_net);
-}
-
-#else /* CONFIG_PROC_FS */
-
-int __init ipx_proc_init(void)
-{
- return 0;
-}
-
-void __exit ipx_proc_exit(void)
-{
-}
-
-#endif /* CONFIG_PROC_FS */
diff --git a/drivers/staging/ipx/ipx_route.c b/drivers/staging/ipx/ipx_route.c
deleted file mode 100644
index 3cf93aa9f284..000000000000
--- a/drivers/staging/ipx/ipx_route.c
+++ /dev/null
@@ -1,293 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Implements the IPX routing routines.
- * Code moved from af_ipx.c.
- *
- * Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 2003
- *
- * See net/ipx/ChangeLog.
- */
-
-#include <linux/list.h>
-#include <linux/route.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-
-#include <net/ipx.h>
-#include <net/sock.h>
-
-LIST_HEAD(ipx_routes);
-DEFINE_RWLOCK(ipx_routes_lock);
-
-extern struct ipx_interface *ipx_internal_net;
-
-extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
-extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
- struct sk_buff *skb, int copy);
-extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
- struct sk_buff *skb, int copy);
-
-struct ipx_route *ipxrtr_lookup(__be32 net)
-{
- struct ipx_route *r;
-
- read_lock_bh(&ipx_routes_lock);
- list_for_each_entry(r, &ipx_routes, node)
- if (r->ir_net == net) {
- ipxrtr_hold(r);
- goto unlock;
- }
- r = NULL;
-unlock:
- read_unlock_bh(&ipx_routes_lock);
- return r;
-}
-
-/*
- * Caller must hold a reference to intrfc
- */
-int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
- unsigned char *node)
-{
- struct ipx_route *rt;
- int rc;
-
- /* Get a route structure; either existing or create */
- rt = ipxrtr_lookup(network);
- if (!rt) {
- rt = kmalloc(sizeof(*rt), GFP_ATOMIC);
- rc = -EAGAIN;
- if (!rt)
- goto out;
-
- refcount_set(&rt->refcnt, 1);
- ipxrtr_hold(rt);
- write_lock_bh(&ipx_routes_lock);
- list_add(&rt->node, &ipx_routes);
- write_unlock_bh(&ipx_routes_lock);
- } else {
- rc = -EEXIST;
- if (intrfc == ipx_internal_net)
- goto out_put;
- }
-
- rt->ir_net = network;
- rt->ir_intrfc = intrfc;
- if (!node) {
- memset(rt->ir_router_node, '\0', IPX_NODE_LEN);
- rt->ir_routed = 0;
- } else {
- memcpy(rt->ir_router_node, node, IPX_NODE_LEN);
- rt->ir_routed = 1;
- }
-
- rc = 0;
-out_put:
- ipxrtr_put(rt);
-out:
- return rc;
-}
-
-void ipxrtr_del_routes(struct ipx_interface *intrfc)
-{
- struct ipx_route *r, *tmp;
-
- write_lock_bh(&ipx_routes_lock);
- list_for_each_entry_safe(r, tmp, &ipx_routes, node)
- if (r->ir_intrfc == intrfc) {
- list_del(&r->node);
- ipxrtr_put(r);
- }
- write_unlock_bh(&ipx_routes_lock);
-}
-
-static int ipxrtr_create(struct ipx_route_definition *rd)
-{
- struct ipx_interface *intrfc;
- int rc = -ENETUNREACH;
-
- /* Find the appropriate interface */
- intrfc = ipxitf_find_using_net(rd->ipx_router_network);
- if (!intrfc)
- goto out;
- rc = ipxrtr_add_route(rd->ipx_network, intrfc, rd->ipx_router_node);
- ipxitf_put(intrfc);
-out:
- return rc;
-}
-
-static int ipxrtr_delete(__be32 net)
-{
- struct ipx_route *r, *tmp;
- int rc;
-
- write_lock_bh(&ipx_routes_lock);
- list_for_each_entry_safe(r, tmp, &ipx_routes, node)
- if (r->ir_net == net) {
- /* Directly connected; can't lose route */
- rc = -EPERM;
- if (!r->ir_routed)
- goto out;
- list_del(&r->node);
- ipxrtr_put(r);
- rc = 0;
- goto out;
- }
- rc = -ENOENT;
-out:
- write_unlock_bh(&ipx_routes_lock);
- return rc;
-}
-
-/*
- * The skb has to be unshared, we'll end up calling ipxitf_send, that'll
- * modify the packet
- */
-int ipxrtr_route_skb(struct sk_buff *skb)
-{
- struct ipxhdr *ipx = ipx_hdr(skb);
- struct ipx_route *r = ipxrtr_lookup(IPX_SKB_CB(skb)->ipx_dest_net);
-
- if (!r) { /* no known route */
- kfree_skb(skb);
- return 0;
- }
-
- ipxitf_hold(r->ir_intrfc);
- ipxitf_send(r->ir_intrfc, skb, r->ir_routed ?
- r->ir_router_node : ipx->ipx_dest.node);
- ipxitf_put(r->ir_intrfc);
- ipxrtr_put(r);
-
- return 0;
-}
-
-/*
- * Route an outgoing frame from a socket.
- */
-int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
- struct msghdr *msg, size_t len, int noblock)
-{
- struct sk_buff *skb;
- struct ipx_sock *ipxs = ipx_sk(sk);
- struct ipx_interface *intrfc;
- struct ipxhdr *ipx;
- size_t size;
- int ipx_offset;
- struct ipx_route *rt = NULL;
- int rc;
-
- /* Find the appropriate interface on which to send packet */
- if (!usipx->sipx_network && ipx_primary_net) {
- usipx->sipx_network = ipx_primary_net->if_netnum;
- intrfc = ipx_primary_net;
- } else {
- rt = ipxrtr_lookup(usipx->sipx_network);
- rc = -ENETUNREACH;
- if (!rt)
- goto out;
- intrfc = rt->ir_intrfc;
- }
-
- ipxitf_hold(intrfc);
- ipx_offset = intrfc->if_ipx_offset;
- size = sizeof(struct ipxhdr) + len + ipx_offset;
-
- skb = sock_alloc_send_skb(sk, size, noblock, &rc);
- if (!skb)
- goto out_put;
-
- skb_reserve(skb, ipx_offset);
- skb->sk = sk;
-
- /* Fill in IPX header */
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
- skb_put(skb, sizeof(struct ipxhdr));
- ipx = ipx_hdr(skb);
- ipx->ipx_pktsize = htons(len + sizeof(struct ipxhdr));
- IPX_SKB_CB(skb)->ipx_tctrl = 0;
- ipx->ipx_type = usipx->sipx_type;
-
- IPX_SKB_CB(skb)->last_hop.index = -1;
-#ifdef CONFIG_IPX_INTERN
- IPX_SKB_CB(skb)->ipx_source_net = ipxs->intrfc->if_netnum;
- memcpy(ipx->ipx_source.node, ipxs->node, IPX_NODE_LEN);
-#else
- rc = ntohs(ipxs->port);
- if (rc == 0x453 || rc == 0x452) {
- /* RIP/SAP special handling for mars_nwe */
- IPX_SKB_CB(skb)->ipx_source_net = intrfc->if_netnum;
- memcpy(ipx->ipx_source.node, intrfc->if_node, IPX_NODE_LEN);
- } else {
- IPX_SKB_CB(skb)->ipx_source_net = ipxs->intrfc->if_netnum;
- memcpy(ipx->ipx_source.node, ipxs->intrfc->if_node,
- IPX_NODE_LEN);
- }
-#endif /* CONFIG_IPX_INTERN */
- ipx->ipx_source.sock = ipxs->port;
- IPX_SKB_CB(skb)->ipx_dest_net = usipx->sipx_network;
- memcpy(ipx->ipx_dest.node, usipx->sipx_node, IPX_NODE_LEN);
- ipx->ipx_dest.sock = usipx->sipx_port;
-
- rc = memcpy_from_msg(skb_put(skb, len), msg, len);
- if (rc) {
- kfree_skb(skb);
- goto out_put;
- }
-
- /* Apply checksum. Not allowed on 802.3 links. */
- if (sk->sk_no_check_tx ||
- intrfc->if_dlink_type == htons(IPX_FRAME_8023))
- ipx->ipx_checksum = htons(0xFFFF);
- else
- ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr));
-
- rc = ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ?
- rt->ir_router_node : ipx->ipx_dest.node);
-out_put:
- ipxitf_put(intrfc);
- if (rt)
- ipxrtr_put(rt);
-out:
- return rc;
-}
-
-/*
- * We use a normal struct rtentry for route handling
- */
-int ipxrtr_ioctl(unsigned int cmd, void __user *arg)
-{
- struct rtentry rt; /* Use these to behave like 'other' stacks */
- struct sockaddr_ipx *sg, *st;
- int rc = -EFAULT;
-
- if (copy_from_user(&rt, arg, sizeof(rt)))
- goto out;
-
- sg = (struct sockaddr_ipx *)&rt.rt_gateway;
- st = (struct sockaddr_ipx *)&rt.rt_dst;
-
- rc = -EINVAL;
- if (!(rt.rt_flags & RTF_GATEWAY) || /* Direct routes are fixed */
- sg->sipx_family != AF_IPX ||
- st->sipx_family != AF_IPX)
- goto out;
-
- switch (cmd) {
- case SIOCDELRT:
- rc = ipxrtr_delete(st->sipx_network);
- break;
- case SIOCADDRT: {
- struct ipx_route_definition f;
- f.ipx_network = st->sipx_network;
- f.ipx_router_network = sg->sipx_network;
- memcpy(f.ipx_router_node, sg->sipx_node, IPX_NODE_LEN);
- rc = ipxrtr_create(&f);
- break;
- }
- }
-
-out:
- return rc;
-}
diff --git a/drivers/staging/ipx/pe2.c b/drivers/staging/ipx/pe2.c
deleted file mode 100644
index ba7d4214bbff..000000000000
--- a/drivers/staging/ipx/pe2.c
+++ /dev/null
@@ -1,36 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/in.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-
-#include <net/datalink.h>
-
-static int pEII_request(struct datalink_proto *dl,
- struct sk_buff *skb, unsigned char *dest_node)
-{
- struct net_device *dev = skb->dev;
-
- skb->protocol = htons(ETH_P_IPX);
- dev_hard_header(skb, dev, ETH_P_IPX, dest_node, NULL, skb->len);
- return dev_queue_xmit(skb);
-}
-
-struct datalink_proto *make_EII_client(void)
-{
- struct datalink_proto *proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
-
- if (proto) {
- proto->header_length = 0;
- proto->request = pEII_request;
- }
-
- return proto;
-}
-
-void destroy_EII_client(struct datalink_proto *dl)
-{
- kfree(dl);
-}
diff --git a/drivers/staging/ipx/sysctl_net_ipx.c b/drivers/staging/ipx/sysctl_net_ipx.c
deleted file mode 100644
index c3eef457db88..000000000000
--- a/drivers/staging/ipx/sysctl_net_ipx.c
+++ /dev/null
@@ -1,40 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* -*- linux-c -*-
- * sysctl_net_ipx.c: sysctl interface to net IPX subsystem.
- *
- * Begun April 1, 1996, Mike Shaver.
- * Added /proc/sys/net/ipx directory entry (empty =) ). [MS]
- * Added /proc/sys/net/ipx/ipx_pprop_broadcasting - acme March 4, 2001
- */
-
-#include <linux/mm.h>
-#include <linux/sysctl.h>
-#include <net/net_namespace.h>
-#include <net/ipx.h>
-
-#ifndef CONFIG_SYSCTL
-#error This file should not be compiled without CONFIG_SYSCTL defined
-#endif
-
-static struct ctl_table ipx_table[] = {
- {
- .procname = "ipx_pprop_broadcasting",
- .data = &sysctl_ipx_pprop_broadcasting,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { },
-};
-
-static struct ctl_table_header *ipx_table_header;
-
-void ipx_register_sysctl(void)
-{
- ipx_table_header = register_net_sysctl(&init_net, "net/ipx", ipx_table);
-}
-
-void ipx_unregister_sysctl(void)
-{
- unregister_net_sysctl_table(ipx_table_header);
-}
diff --git a/drivers/staging/ks7010/eap_packet.h b/drivers/staging/ks7010/eap_packet.h
index 58c2a3dafca2..1eee774319ad 100644
--- a/drivers/staging/ks7010/eap_packet.h
+++ b/drivers/staging/ks7010/eap_packet.h
@@ -6,8 +6,6 @@
#include <linux/bitops.h>
#include <uapi/linux/if_ether.h>
-#define ETHER_HDR_SIZE 20
-
struct ether_hdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
@@ -19,6 +17,8 @@ struct ether_hdr {
/* followed by length octets of data */
} __packed;
+#define ETHER_HDR_SIZE sizeof(struct ether_hdr)
+
struct ieee802_1x_hdr {
unsigned char version;
unsigned char type;
@@ -26,60 +26,14 @@ struct ieee802_1x_hdr {
/* followed by length octets of data */
} __packed;
-#define EAPOL_VERSION 2
-
-enum { IEEE802_1X_TYPE_EAP_PACKET = 0,
+enum {
+ IEEE802_1X_TYPE_EAP_PACKET = 0,
IEEE802_1X_TYPE_EAPOL_START = 1,
IEEE802_1X_TYPE_EAPOL_LOGOFF = 2,
IEEE802_1X_TYPE_EAPOL_KEY = 3,
IEEE802_1X_TYPE_EAPOL_ENCAPSULATED_ASF_ALERT = 4
};
-enum { EAPOL_KEY_TYPE_RC4 = 1, EAPOL_KEY_TYPE_RSN = 2,
- EAPOL_KEY_TYPE_WPA = 254
-};
-
-#define IEEE8021X_REPLAY_COUNTER_LEN 8
-#define IEEE8021X_KEY_SIGN_LEN 16
-#define IEEE8021X_KEY_IV_LEN 16
-
-#define IEEE8021X_KEY_INDEX_FLAG 0x80
-#define IEEE8021X_KEY_INDEX_MASK 0x03
-
-struct ieee802_1x_eapol_key {
- unsigned char type;
- unsigned short key_length;
- /*
- * does not repeat within the life of the keying material used to
- * encrypt the Key field; 64-bit NTP timestamp MAY be used here
- */
- unsigned char replay_counter[IEEE8021X_REPLAY_COUNTER_LEN];
- unsigned char key_iv[IEEE8021X_KEY_IV_LEN]; /* cryptographically random
- * number
- */
- unsigned char key_index; /*
- * key flag in the most significant bit:
- * 0 = broadcast (default key),
- * 1 = unicast (key mapping key);
- * key index is in the 7 least
- * significant bits
- */
- /*
- * HMAC-MD5 message integrity check computed with MS-MPPE-Send-Key as
- * the key
- */
- unsigned char key_signature[IEEE8021X_KEY_SIGN_LEN];
-
- /*
- * followed by key: if packet body length = 44 + key length, then the
- * key field (of key_length bytes) contains the key in encrypted form;
- * if packet body length = 44, key field is absent and key_length
- * represents the number of least significant octets from
- * MS-MPPE-Send-Key attribute to be used as the keying material;
- * RC4 key used in encryption = Key-IV + MS-MPPE-Recv-Key
- */
-} __packed;
-
#define WPA_NONCE_LEN 32
#define WPA_REPLAY_COUNTER_LEN 8
@@ -113,26 +67,4 @@ struct wpa_eapol_key {
#define WPA_KEY_INFO_REQUEST BIT(11)
#define WPA_KEY_INFO_ENCR_KEY_DATA BIT(12) /* IEEE 802.11i/RSN only */
-#define WPA_CAPABILITY_PREAUTH BIT(0)
-
-#define GENERIC_INFO_ELEM 0xdd
-#define RSN_INFO_ELEM 0x30
-
-enum {
- REASON_UNSPECIFIED = 1,
- REASON_DEAUTH_LEAVING = 3,
- REASON_INVALID_IE = 13,
- REASON_MICHAEL_MIC_FAILURE = 14,
- REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
- REASON_GROUP_KEY_UPDATE_TIMEOUT = 16,
- REASON_IE_IN_4WAY_DIFFERS = 17,
- REASON_GROUP_CIPHER_NOT_VALID = 18,
- REASON_PAIRWISE_CIPHER_NOT_VALID = 19,
- REASON_AKMP_NOT_VALID = 20,
- REASON_UNSUPPORTED_RSN_IE_VERSION = 21,
- REASON_INVALID_RSN_IE_CAPAB = 22,
- REASON_IEEE_802_1X_AUTH_FAILED = 23,
- REASON_CIPHER_SUITE_REJECTED = 24
-};
-
#endif /* EAP_PACKET_H */
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
index b8f55a11ee1c..74551eb717fc 100644
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -1,76 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for KeyStream, KS7010 based SDIO cards.
*
* Copyright (C) 2006-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
* Copyright (C) 2016 Sang Engineering, Wolfram Sang
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#include <linux/atomic.h>
#include <linux/firmware.h>
+#include <linux/jiffies.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/module.h>
#include <linux/workqueue.h>
-#include <linux/atomic.h>
-
#include "ks_wlan.h"
-#include "ks_wlan_ioctl.h"
#include "ks_hostif.h"
-#include "ks7010_sdio.h"
-#define KS7010_FUNC_NUM 1
-#define KS7010_IO_BLOCK_SIZE 512
-#define KS7010_MAX_CLOCK 25000000
+#define ROM_FILE "ks7010sd.rom"
-static const struct sdio_device_id ks7010_sdio_ids[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_A, SDIO_DEVICE_ID_KS_7010)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_B, SDIO_DEVICE_ID_KS_7010)},
- { /* all zero */ }
+/* SDIO KeyStream vendor and device */
+#define SDIO_VENDOR_ID_KS_CODE_A 0x005b
+#define SDIO_VENDOR_ID_KS_CODE_B 0x0023
+
+/* Older sources suggest earlier versions were named 7910 or 79xx */
+#define SDIO_DEVICE_ID_KS_7010 0x7910
+
+/* Read/Write Status Register */
+#define READ_STATUS_REG 0x000000
+#define WRITE_STATUS_REG 0x00000C
+enum reg_status_type {
+ REG_STATUS_BUSY,
+ REG_STATUS_IDLE
};
-MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
-static inline void inc_txqhead(struct ks_wlan_private *priv)
-{
- priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE;
-}
+/* Read Index Register */
+#define READ_INDEX_REG 0x000004
-static inline void inc_txqtail(struct ks_wlan_private *priv)
-{
- priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE;
-}
+/* Read Data Size Register */
+#define READ_DATA_SIZE_REG 0x000008
-static inline unsigned int cnt_txqbody(struct ks_wlan_private *priv)
-{
- unsigned int tx_cnt = priv->tx_dev.qtail - priv->tx_dev.qhead;
+/* Write Index Register */
+#define WRITE_INDEX_REG 0x000010
- return (tx_cnt + TX_DEVICE_BUFF_SIZE) % TX_DEVICE_BUFF_SIZE;
-}
+/*
+ * Write Status/Read Data Size Register
+ * for network packet (less than 2048 bytes data)
+ */
+#define WSTATUS_RSIZE_REG 0x000014
+
+/* Write Status Register value */
+#define WSTATUS_MASK 0x80
+
+/* Read Data Size Register value [10:4] */
+#define RSIZE_MASK 0x7F
+
+/* ARM to SD interrupt Enable */
+#define INT_ENABLE_REG 0x000020
+/* ARM to SD interrupt Pending */
+#define INT_PENDING_REG 0x000024
+
+#define INT_GCR_B BIT(7)
+#define INT_GCR_A BIT(6)
+#define INT_WRITE_STATUS BIT(5)
+#define INT_WRITE_INDEX BIT(4)
+#define INT_WRITE_SIZE BIT(3)
+#define INT_READ_STATUS BIT(2)
+#define INT_READ_INDEX BIT(1)
+#define INT_READ_SIZE BIT(0)
+
+/* General Communication Register A */
+#define GCR_A_REG 0x000028
+enum gen_com_reg_a {
+ GCR_A_INIT,
+ GCR_A_REMAP,
+ GCR_A_RUN
+};
-static inline void inc_rxqhead(struct ks_wlan_private *priv)
-{
- priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE;
-}
+/* General Communication Register B */
+#define GCR_B_REG 0x00002C
+enum gen_com_reg_b {
+ GCR_B_ACTIVE,
+ GCR_B_DOZE
+};
-static inline void inc_rxqtail(struct ks_wlan_private *priv)
-{
- priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE;
-}
+/* Wakeup Register */
+#define WAKEUP_REG 0x008018
+#define WAKEUP_REQ 0x5a
+
+/* AHB Data Window 0x010000-0x01FFFF */
+#define DATA_WINDOW 0x010000
+#define WINDOW_SIZE (64 * 1024)
-static inline unsigned int cnt_rxqbody(struct ks_wlan_private *priv)
+#define KS7010_IRAM_ADDRESS 0x06000000
+
+#define KS7010_IO_BLOCK_SIZE 512
+
+/**
+ * struct ks_sdio_card - SDIO device data.
+ *
+ * Structure is used as the &struct sdio_func private data.
+ *
+ * @func: Pointer to the SDIO function device.
+ * @priv: Pointer to the &struct net_device private data.
+ */
+struct ks_sdio_card {
+ struct sdio_func *func;
+ struct ks_wlan_private *priv;
+};
+
+static struct sdio_func *ks7010_to_func(struct ks_wlan_private *priv)
{
- unsigned int rx_cnt = priv->rx_dev.qtail - priv->rx_dev.qhead;
+ struct ks_sdio_card *ks_sdio = priv->if_hw;
- return (rx_cnt + RX_DEVICE_BUFF_SIZE) % RX_DEVICE_BUFF_SIZE;
+ return ks_sdio->func;
}
/* Read single byte from device address into byte (CMD52) */
-static int ks7010_sdio_readb(struct ks_wlan_private *priv, unsigned int address,
- unsigned char *byte)
+static int ks7010_sdio_readb(struct ks_wlan_private *priv,
+ u32 address, u8 *byte)
{
- struct sdio_func *func = priv->ks_sdio_card->func;
+ struct sdio_func *func = ks7010_to_func(priv);
int ret;
*byte = sdio_readb(func, address, &ret);
@@ -79,19 +129,19 @@ static int ks7010_sdio_readb(struct ks_wlan_private *priv, unsigned int address,
}
/* Read length bytes from device address into buffer (CMD53) */
-static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address,
- unsigned char *buffer, int length)
+static int ks7010_sdio_read(struct ks_wlan_private *priv, u32 address,
+ u8 *buffer, unsigned int length)
{
- struct sdio_func *func = priv->ks_sdio_card->func;
+ struct sdio_func *func = ks7010_to_func(priv);
return sdio_memcpy_fromio(func, buffer, address, length);
}
/* Write single byte to device address (CMD52) */
static int ks7010_sdio_writeb(struct ks_wlan_private *priv,
- unsigned int address, unsigned char byte)
+ u32 address, u8 byte)
{
- struct sdio_func *func = priv->ks_sdio_card->func;
+ struct sdio_func *func = ks7010_to_func(priv);
int ret;
sdio_writeb(func, byte, address, &ret);
@@ -100,10 +150,10 @@ static int ks7010_sdio_writeb(struct ks_wlan_private *priv,
}
/* Write length bytes to device address from buffer (CMD53) */
-static int ks7010_sdio_write(struct ks_wlan_private *priv, unsigned int address,
- unsigned char *buffer, int length)
+static int ks7010_sdio_write(struct ks_wlan_private *priv, u32 address,
+ u8 *buffer, unsigned int length)
{
- struct sdio_func *func = priv->ks_sdio_card->func;
+ struct sdio_func *func = ks7010_to_func(priv);
return sdio_memcpy_toio(func, address, buffer, length);
}
@@ -116,9 +166,9 @@ static void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
atomic_set(&priv->sleepstatus.doze_request, 0);
if (atomic_read(&priv->sleepstatus.status) == 0) {
- ret = ks7010_sdio_writeb(priv, GCR_B, GCR_B_DOZE);
+ ret = ks7010_sdio_writeb(priv, GCR_B_REG, GCR_B_DOZE);
if (ret) {
- netdev_err(priv->net_dev, " error : GCR_B\n");
+ netdev_err(priv->net_dev, "write GCR_B_REG\n");
goto set_sleep_mode;
}
atomic_set(&priv->sleepstatus.status, 1);
@@ -137,9 +187,9 @@ static void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
atomic_set(&priv->sleepstatus.wakeup_request, 0);
if (atomic_read(&priv->sleepstatus.status) == 1) {
- ret = ks7010_sdio_writeb(priv, WAKEUP, WAKEUP_REQ);
+ ret = ks7010_sdio_writeb(priv, WAKEUP_REG, WAKEUP_REQ);
if (ret) {
- netdev_err(priv->net_dev, " error : WAKEUP\n");
+ netdev_err(priv->net_dev, "write WAKEUP_REG\n");
goto set_sleep_mode;
}
atomic_set(&priv->sleepstatus.status, 0);
@@ -156,9 +206,9 @@ void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
int ret;
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- ret = ks7010_sdio_writeb(priv, WAKEUP, WAKEUP_REQ);
+ ret = ks7010_sdio_writeb(priv, WAKEUP_REG, WAKEUP_REQ);
if (ret)
- netdev_err(priv->net_dev, " error : WAKEUP\n");
+ netdev_err(priv->net_dev, "write WAKEUP_REG\n");
priv->last_wakeup = jiffies;
++priv->wakeup_count;
@@ -167,7 +217,7 @@ void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
static void _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
{
- unsigned char byte;
+ u8 byte;
int ret;
if (priv->reg.power_mgmt == POWER_MGMT_ACTIVE)
@@ -185,30 +235,35 @@ static void _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE)
return;
- netdev_dbg(priv->net_dev, "\npsstatus.status=%d\npsstatus.confirm_wait=%d\npsstatus.snooze_guard=%d\ncnt_txqbody=%d\n",
+ netdev_dbg(priv->net_dev,
+ "STATUS:\n"
+ "- psstatus.status = %d\n"
+ "- psstatus.confirm_wait = %d\n"
+ "- psstatus.snooze_guard = %d\n"
+ "- txq_count = %d\n",
atomic_read(&priv->psstatus.status),
atomic_read(&priv->psstatus.confirm_wait),
atomic_read(&priv->psstatus.snooze_guard),
- cnt_txqbody(priv));
+ txq_count(priv));
if (atomic_read(&priv->psstatus.confirm_wait) ||
atomic_read(&priv->psstatus.snooze_guard) ||
- cnt_txqbody(priv)) {
+ txq_has_space(priv)) {
queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
return;
}
- ret = ks7010_sdio_readb(priv, INT_PENDING, &byte);
+ ret = ks7010_sdio_readb(priv, INT_PENDING_REG, &byte);
if (ret) {
- netdev_err(priv->net_dev, " error : INT_PENDING\n");
+ netdev_err(priv->net_dev, "read INT_PENDING_REG\n");
goto queue_delayed_work;
}
if (byte)
goto queue_delayed_work;
- ret = ks7010_sdio_writeb(priv, GCR_B, GCR_B_DOZE);
+ ret = ks7010_sdio_writeb(priv, GCR_B_REG, GCR_B_DOZE);
if (ret) {
- netdev_err(priv->net_dev, " error : GCR_B\n");
+ netdev_err(priv->net_dev, "write GCR_B_REG\n");
goto queue_delayed_work;
}
atomic_set(&priv->psstatus.status, PS_SNOOZE);
@@ -239,7 +294,7 @@ static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
goto err_complete;
}
- if ((TX_DEVICE_BUFF_SIZE - 1) <= cnt_txqbody(priv)) {
+ if ((TX_DEVICE_BUFF_SIZE - 1) <= txq_count(priv)) {
netdev_err(priv->net_dev, "tx buffer overflow\n");
ret = -EOVERFLOW;
goto err_complete;
@@ -263,7 +318,7 @@ err_complete:
}
/* write data */
-static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
+static int write_to_device(struct ks_wlan_private *priv, u8 *buffer,
unsigned long size)
{
struct hostif_hdr *hdr;
@@ -279,13 +334,13 @@ static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
ret = ks7010_sdio_write(priv, DATA_WINDOW, buffer, size);
if (ret) {
- netdev_err(priv->net_dev, " write error : retval=%d\n", ret);
+ netdev_err(priv->net_dev, "write DATA_WINDOW\n");
return ret;
}
- ret = ks7010_sdio_writeb(priv, WRITE_STATUS, REG_STATUS_BUSY);
+ ret = ks7010_sdio_writeb(priv, WRITE_STATUS_REG, REG_STATUS_BUSY);
if (ret) {
- netdev_err(priv->net_dev, " error : WRITE_STATUS\n");
+ netdev_err(priv->net_dev, "write WRITE_STATUS_REG\n");
return ret;
}
@@ -297,7 +352,7 @@ static void tx_device_task(struct ks_wlan_private *priv)
struct tx_device_buffer *sp;
int ret;
- if (cnt_txqbody(priv) <= 0 ||
+ if (!txq_has_space(priv) ||
atomic_read(&priv->psstatus.status) == PS_SNOOZE)
return;
@@ -305,7 +360,8 @@ static void tx_device_task(struct ks_wlan_private *priv)
if (priv->dev_state >= DEVICE_STATE_BOOT) {
ret = write_to_device(priv, sp->sendp, sp->size);
if (ret) {
- netdev_err(priv->net_dev, "write_to_device error !!(%d)\n", ret);
+ netdev_err(priv->net_dev,
+ "write_to_device error !!(%d)\n", ret);
queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
return;
}
@@ -315,7 +371,7 @@ static void tx_device_task(struct ks_wlan_private *priv)
(*sp->complete_handler)(priv, sp->skb);
inc_txqhead(priv);
- if (cnt_txqbody(priv) > 0)
+ if (txq_has_space(priv))
queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
}
@@ -343,7 +399,7 @@ int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
result = enqueue_txdev(priv, p, size, complete_handler, skb);
spin_unlock(&priv->tx_dev.tx_dev_lock);
- if (cnt_txqbody(priv) > 0)
+ if (txq_has_space(priv))
queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
return result;
@@ -354,25 +410,25 @@ static void rx_event_task(unsigned long dev)
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
struct rx_device_buffer *rp;
- if (cnt_rxqbody(priv) > 0 && priv->dev_state >= DEVICE_STATE_BOOT) {
+ if (rxq_has_space(priv) && priv->dev_state >= DEVICE_STATE_BOOT) {
rp = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qhead];
hostif_receive(priv, rp->data, rp->size);
inc_rxqhead(priv);
- if (cnt_rxqbody(priv) > 0)
+ if (rxq_has_space(priv))
tasklet_schedule(&priv->rx_bh_task);
}
}
-static void ks_wlan_hw_rx(struct ks_wlan_private *priv, uint16_t size)
+static void ks_wlan_hw_rx(struct ks_wlan_private *priv, size_t size)
{
int ret;
struct rx_device_buffer *rx_buffer;
struct hostif_hdr *hdr;
- unsigned short event = 0;
+ u16 event = 0;
/* receive data */
- if (cnt_rxqbody(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
+ if (rxq_count(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
netdev_err(priv->net_dev, "rx buffer overflow\n");
return;
}
@@ -390,9 +446,9 @@ static void ks_wlan_hw_rx(struct ks_wlan_private *priv, uint16_t size)
DUMP_PREFIX_OFFSET,
rx_buffer->data, 32);
#endif
- ret = ks7010_sdio_writeb(priv, READ_STATUS, REG_STATUS_IDLE);
+ ret = ks7010_sdio_writeb(priv, READ_STATUS_REG, REG_STATUS_IDLE);
if (ret)
- netdev_err(priv->net_dev, " error : READ_STATUS\n");
+ netdev_err(priv->net_dev, "write READ_STATUS_REG\n");
/* length check fail */
return;
@@ -403,15 +459,13 @@ static void ks_wlan_hw_rx(struct ks_wlan_private *priv, uint16_t size)
event = le16_to_cpu(hdr->event);
inc_rxqtail(priv);
- ret = ks7010_sdio_writeb(priv, READ_STATUS, REG_STATUS_IDLE);
+ ret = ks7010_sdio_writeb(priv, READ_STATUS_REG, REG_STATUS_IDLE);
if (ret)
- netdev_err(priv->net_dev, " error : READ_STATUS\n");
+ netdev_err(priv->net_dev, "write READ_STATUS_REG\n");
- if (atomic_read(&priv->psstatus.confirm_wait)) {
- if (IS_HIF_CONF(event)) {
- netdev_dbg(priv->net_dev, "IS_HIF_CONF true !!\n");
- atomic_dec(&priv->psstatus.confirm_wait);
- }
+ if (atomic_read(&priv->psstatus.confirm_wait) && is_hif_conf(event)) {
+ netdev_dbg(priv->net_dev, "IS_HIF_CONF true !!\n");
+ atomic_dec(&priv->psstatus.confirm_wait);
}
tasklet_schedule(&priv->rx_bh_task);
@@ -419,34 +473,33 @@ static void ks_wlan_hw_rx(struct ks_wlan_private *priv, uint16_t size)
static void ks7010_rw_function(struct work_struct *work)
{
- struct ks_wlan_private *priv;
- unsigned char byte;
+ struct ks_wlan_private *priv = container_of(work,
+ struct ks_wlan_private,
+ rw_dwork.work);
+ struct sdio_func *func = ks7010_to_func(priv);
+ u8 byte;
int ret;
- priv = container_of(work, struct ks_wlan_private, rw_dwork.work);
-
/* wait after DOZE */
- if (time_after(priv->last_doze + ((30 * HZ) / 1000), jiffies)) {
+ if (time_after(priv->last_doze + msecs_to_jiffies(30), jiffies)) {
netdev_dbg(priv->net_dev, "wait after DOZE\n");
queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
return;
}
/* wait after WAKEUP */
- while (time_after(priv->last_wakeup + ((30 * HZ) / 1000), jiffies)) {
+ while (time_after(priv->last_wakeup + msecs_to_jiffies(30), jiffies)) {
netdev_dbg(priv->net_dev, "wait after WAKEUP\n");
- dev_info(&priv->ks_sdio_card->func->dev,
- "wake: %lu %lu\n",
- priv->last_wakeup + (30 * HZ) / 1000,
- jiffies);
+ dev_info(&func->dev, "wake: %lu %lu\n",
+ priv->last_wakeup + msecs_to_jiffies(30), jiffies);
msleep(30);
}
- sdio_claim_host(priv->ks_sdio_card->func);
+ sdio_claim_host(func);
/* power save wakeup */
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- if (cnt_txqbody(priv) > 0) {
+ if (txq_has_space(priv)) {
ks_wlan_hw_wakeup_request(priv);
queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
}
@@ -465,15 +518,15 @@ static void ks7010_rw_function(struct work_struct *work)
}
/* read (WriteStatus/ReadDataSize FN1:00_0014) */
- ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE, &byte);
+ ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE_REG, &byte);
if (ret) {
- netdev_err(priv->net_dev, " error : WSTATUS_RSIZE psstatus=%d\n",
+ netdev_err(priv->net_dev, "read WSTATUS_RSIZE_REG psstatus=%d\n",
atomic_read(&priv->psstatus.status));
goto release_host;
}
if (byte & RSIZE_MASK) { /* Read schedule */
- ks_wlan_hw_rx(priv, (uint16_t)((byte & RSIZE_MASK) << 4));
+ ks_wlan_hw_rx(priv, (size_t)((byte & RSIZE_MASK) << 4));
}
if ((byte & WSTATUS_MASK))
tx_device_task(priv);
@@ -481,7 +534,7 @@ static void ks7010_rw_function(struct work_struct *work)
_ks_wlan_hw_power_save(priv);
release_host:
- sdio_release_host(priv->ks_sdio_card->func);
+ sdio_release_host(func);
}
static void ks_sdio_interrupt(struct sdio_func *func)
@@ -489,7 +542,7 @@ static void ks_sdio_interrupt(struct sdio_func *func)
int ret;
struct ks_sdio_card *card;
struct ks_wlan_private *priv;
- unsigned char status, rsize, byte;
+ u8 status, rsize, byte;
card = sdio_get_drvdata(func);
priv = card->priv;
@@ -497,9 +550,9 @@ static void ks_sdio_interrupt(struct sdio_func *func)
if (priv->dev_state < DEVICE_STATE_BOOT)
goto queue_delayed_work;
- ret = ks7010_sdio_readb(priv, INT_PENDING, &status);
+ ret = ks7010_sdio_readb(priv, INT_PENDING_REG, &status);
if (ret) {
- netdev_err(priv->net_dev, "error : INT_PENDING\n");
+ netdev_err(priv->net_dev, "read INT_PENDING_REG\n");
goto queue_delayed_work;
}
@@ -510,9 +563,9 @@ static void ks_sdio_interrupt(struct sdio_func *func)
/* bit2 -> Read Status Busy */
if (status & INT_GCR_B ||
atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- ret = ks7010_sdio_readb(priv, GCR_B, &byte);
+ ret = ks7010_sdio_readb(priv, GCR_B_REG, &byte);
if (ret) {
- netdev_err(priv->net_dev, " error : GCR_B\n");
+ netdev_err(priv->net_dev, "read GCR_B_REG\n");
goto queue_delayed_work;
}
if (byte == GCR_B_ACTIVE) {
@@ -526,20 +579,21 @@ static void ks_sdio_interrupt(struct sdio_func *func)
do {
/* read (WriteStatus/ReadDataSize FN1:00_0014) */
- ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE, &byte);
+ ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE_REG, &byte);
if (ret) {
- netdev_err(priv->net_dev, " error : WSTATUS_RSIZE\n");
+ netdev_err(priv->net_dev, "read WSTATUS_RSIZE_REG\n");
goto queue_delayed_work;
}
rsize = byte & RSIZE_MASK;
if (rsize != 0) /* Read schedule */
- ks_wlan_hw_rx(priv, (uint16_t)(rsize << 4));
+ ks_wlan_hw_rx(priv, (size_t)(rsize << 4));
if (byte & WSTATUS_MASK) {
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- if (cnt_txqbody(priv)) {
+ if (txq_has_space(priv)) {
ks_wlan_hw_wakeup_request(priv);
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
+ queue_delayed_work(priv->wq,
+ &priv->rw_dwork, 1);
return;
}
} else {
@@ -573,7 +627,7 @@ static void trx_device_exit(struct ks_wlan_private *priv)
struct tx_device_buffer *sp;
/* tx buffer clear */
- while (cnt_txqbody(priv) > 0) {
+ while (txq_has_space(priv)) {
sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
kfree(sp->sendp);
if (sp->complete_handler) /* TX Complete */
@@ -589,16 +643,15 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
int ret;
unsigned char *data_buf;
- data_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+ data_buf = kmemdup(&index, sizeof(u32), GFP_KERNEL);
if (!data_buf)
return -ENOMEM;
- memcpy(data_buf, &index, sizeof(index));
- ret = ks7010_sdio_write(priv, WRITE_INDEX, data_buf, sizeof(index));
+ ret = ks7010_sdio_write(priv, WRITE_INDEX_REG, data_buf, sizeof(index));
if (ret)
goto err_free_data_buf;
- ret = ks7010_sdio_write(priv, READ_INDEX, data_buf, sizeof(index));
+ ret = ks7010_sdio_write(priv, READ_INDEX_REG, data_buf, sizeof(index));
if (ret)
goto err_free_data_buf;
@@ -612,10 +665,10 @@ err_free_data_buf:
#define ROM_BUFF_SIZE (64 * 1024)
static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
- unsigned char *data, unsigned int size)
+ u8 *data, unsigned int size)
{
int ret;
- unsigned char *read_buf;
+ u8 *read_buf;
read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
if (!read_buf)
@@ -639,37 +692,22 @@ err_free_read_buf:
return ret;
}
-static int ks7010_upload_firmware(struct ks_sdio_card *card)
+static int ks7010_copy_firmware(struct ks_wlan_private *priv,
+ const struct firmware *fw_entry)
{
- struct ks_wlan_private *priv = card->priv;
- unsigned int size, offset, n = 0;
- unsigned char *rom_buf;
- unsigned char byte = 0;
- int ret;
unsigned int length;
- const struct firmware *fw_entry = NULL;
+ unsigned int size;
+ unsigned int offset;
+ unsigned int n = 0;
+ u8 *rom_buf;
+ int ret;
rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
if (!rom_buf)
return -ENOMEM;
- sdio_claim_host(card->func);
-
- /* Firmware running ? */
- ret = ks7010_sdio_readb(priv, GCR_A, &byte);
- if (byte == GCR_A_RUN) {
- netdev_dbg(priv->net_dev, "MAC firmware running ...\n");
- goto release_host_and_free;
- }
-
- ret = request_firmware(&fw_entry, ROM_FILE,
- &priv->ks_sdio_card->func->dev);
- if (ret)
- goto release_host_and_free;
-
length = fw_entry->size;
- n = 0;
do {
if (length >= ROM_BUFF_SIZE) {
size = ROM_BUFF_SIZE;
@@ -680,33 +718,69 @@ static int ks7010_upload_firmware(struct ks_sdio_card *card)
}
if (size == 0)
break;
+
memcpy(rom_buf, fw_entry->data + n, size);
offset = n;
- ret = ks7010_sdio_update_index(priv, KS7010_IRAM_ADDRESS + offset);
+ ret = ks7010_sdio_update_index(priv,
+ KS7010_IRAM_ADDRESS + offset);
if (ret)
- goto release_firmware;
+ goto free_rom_buf;
ret = ks7010_sdio_write(priv, DATA_WINDOW, rom_buf, size);
if (ret)
- goto release_firmware;
+ goto free_rom_buf;
- ret = ks7010_sdio_data_compare(priv, DATA_WINDOW, rom_buf, size);
+ ret = ks7010_sdio_data_compare(priv,
+ DATA_WINDOW, rom_buf, size);
if (ret)
- goto release_firmware;
+ goto free_rom_buf;
n += size;
} while (size);
- ret = ks7010_sdio_writeb(priv, GCR_A, GCR_A_REMAP);
+ ret = ks7010_sdio_writeb(priv, GCR_A_REG, GCR_A_REMAP);
+
+free_rom_buf:
+ kfree(rom_buf);
+ return ret;
+}
+
+static int ks7010_upload_firmware(struct ks_sdio_card *card)
+{
+ struct ks_wlan_private *priv = card->priv;
+ struct sdio_func *func = ks7010_to_func(priv);
+ unsigned int n;
+ u8 byte = 0;
+ int ret;
+ const struct firmware *fw_entry = NULL;
+
+ sdio_claim_host(func);
+
+ /* Firmware running ? */
+ ret = ks7010_sdio_readb(priv, GCR_A_REG, &byte);
+ if (ret)
+ goto release_host;
+ if (byte == GCR_A_RUN) {
+ netdev_dbg(priv->net_dev, "MAC firmware running ...\n");
+ ret = -EBUSY;
+ goto release_host;
+ }
+
+ ret = request_firmware(&fw_entry, ROM_FILE,
+ &func->dev);
+ if (ret)
+ goto release_host;
+
+ ret = ks7010_copy_firmware(priv, fw_entry);
if (ret)
goto release_firmware;
/* Firmware running check */
for (n = 0; n < 50; ++n) {
- mdelay(10); /* wait_ms(10); */
- ret = ks7010_sdio_readb(priv, GCR_A, &byte);
+ usleep_range(10000, 11000); /* wait_ms(10); */
+ ret = ks7010_sdio_readb(priv, GCR_A_REG, &byte);
if (ret)
goto release_firmware;
@@ -723,13 +797,29 @@ static int ks7010_upload_firmware(struct ks_sdio_card *card)
release_firmware:
release_firmware(fw_entry);
- release_host_and_free:
- sdio_release_host(card->func);
- kfree(rom_buf);
+ release_host:
+ sdio_release_host(func);
return ret;
}
+static void ks7010_sme_enqueue_events(struct ks_wlan_private *priv)
+{
+ static const u16 init_events[] = {
+ SME_GET_EEPROM_CKSUM, SME_STOP_REQUEST,
+ SME_RTS_THRESHOLD_REQUEST, SME_FRAGMENTATION_THRESHOLD_REQUEST,
+ SME_WEP_INDEX_REQUEST, SME_WEP_KEY1_REQUEST,
+ SME_WEP_KEY2_REQUEST, SME_WEP_KEY3_REQUEST,
+ SME_WEP_KEY4_REQUEST, SME_WEP_FLAG_REQUEST,
+ SME_RSN_ENABLED_REQUEST, SME_MODE_SET_REQUEST,
+ SME_START_REQUEST
+ };
+ int ev;
+
+ for (ev = 0; ev < ARRAY_SIZE(init_events); ev++)
+ hostif_sme_enqueue(priv, init_events[ev]);
+}
+
static void ks7010_card_init(struct ks_wlan_private *priv)
{
init_completion(&priv->confirm_wait);
@@ -745,24 +835,7 @@ static void ks7010_card_init(struct ks_wlan_private *priv)
if (priv->mac_address_valid && priv->version_size != 0)
priv->dev_state = DEVICE_STATE_PREINIT;
- hostif_sme_enqueue(priv, SME_GET_EEPROM_CKSUM);
-
- /* load initial wireless parameter */
- hostif_sme_enqueue(priv, SME_STOP_REQUEST);
-
- hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_REQUEST);
- hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_REQUEST);
-
- hostif_sme_enqueue(priv, SME_WEP_INDEX_REQUEST);
- hostif_sme_enqueue(priv, SME_WEP_KEY1_REQUEST);
- hostif_sme_enqueue(priv, SME_WEP_KEY2_REQUEST);
- hostif_sme_enqueue(priv, SME_WEP_KEY3_REQUEST);
- hostif_sme_enqueue(priv, SME_WEP_KEY4_REQUEST);
-
- hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
- hostif_sme_enqueue(priv, SME_RSN_ENABLED_REQUEST);
- hostif_sme_enqueue(priv, SME_MODE_SET_REQUEST);
- hostif_sme_enqueue(priv, SME_START_REQUEST);
+ ks7010_sme_enqueue_events(priv);
if (!wait_for_completion_interruptible_timeout
(&priv->confirm_wait, 5 * HZ)) {
@@ -802,18 +875,87 @@ static void ks7010_init_defaults(struct ks_wlan_private *priv)
priv->reg.rate_set.size = 12;
}
+static int ks7010_sdio_setup_irqs(struct sdio_func *func)
+{
+ int ret;
+
+ /* interrupt disable */
+ sdio_writeb(func, 0, INT_ENABLE_REG, &ret);
+ if (ret)
+ goto irq_error;
+
+ sdio_writeb(func, 0xff, INT_PENDING_REG, &ret);
+ if (ret)
+ goto irq_error;
+
+ /* setup interrupt handler */
+ ret = sdio_claim_irq(func, ks_sdio_interrupt);
+
+irq_error:
+ return ret;
+}
+
+static void ks7010_sdio_init_irqs(struct sdio_func *func,
+ struct ks_wlan_private *priv)
+{
+ u8 byte;
+ int ret;
+
+ /*
+ * interrupt setting
+ * clear Interrupt status write
+ * (ARMtoSD_InterruptPending FN1:00_0024)
+ */
+ sdio_claim_host(func);
+ ret = ks7010_sdio_writeb(priv, INT_PENDING_REG, 0xff);
+ sdio_release_host(func);
+ if (ret)
+ netdev_err(priv->net_dev, "write INT_PENDING_REG\n");
+
+ /* enable ks7010sdio interrupt */
+ byte = (INT_GCR_B | INT_READ_STATUS | INT_WRITE_STATUS);
+ sdio_claim_host(func);
+ ret = ks7010_sdio_writeb(priv, INT_ENABLE_REG, byte);
+ sdio_release_host(func);
+ if (ret)
+ netdev_err(priv->net_dev, "write INT_ENABLE_REG\n");
+}
+
+static void ks7010_private_init(struct ks_wlan_private *priv,
+ struct ks_sdio_card *card,
+ struct net_device *netdev)
+{
+ /* private memory initialize */
+ priv->if_hw = card;
+
+ priv->dev_state = DEVICE_STATE_PREBOOT;
+ priv->net_dev = netdev;
+ priv->firmware_version[0] = '\0';
+ priv->version_size = 0;
+ priv->last_doze = jiffies;
+ priv->last_wakeup = jiffies;
+ memset(&priv->nstats, 0, sizeof(priv->nstats));
+ memset(&priv->wstats, 0, sizeof(priv->wstats));
+
+ /* sleep mode */
+ atomic_set(&priv->sleepstatus.doze_request, 0);
+ atomic_set(&priv->sleepstatus.wakeup_request, 0);
+ atomic_set(&priv->sleepstatus.wakeup_request, 0);
+
+ trx_device_init(priv);
+ hostif_init(priv);
+ ks_wlan_net_start(netdev);
+ ks7010_init_defaults(priv);
+}
+
static int ks7010_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *device)
{
- struct ks_wlan_private *priv;
+ struct ks_wlan_private *priv = NULL;
+ struct net_device *netdev = NULL;
struct ks_sdio_card *card;
- struct net_device *netdev;
- unsigned char byte;
int ret;
- priv = NULL;
- netdev = NULL;
-
card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
@@ -823,6 +965,9 @@ static int ks7010_sdio_probe(struct sdio_func *func,
sdio_claim_host(func);
ret = sdio_set_block_size(func, KS7010_IO_BLOCK_SIZE);
+ if (ret)
+ goto err_free_card;
+
dev_dbg(&card->func->dev, "multi_block=%d sdio_set_block_size()=%d %d\n",
func->card->cccr.multi_block, func->cur_blksize, ret);
@@ -830,16 +975,7 @@ static int ks7010_sdio_probe(struct sdio_func *func,
if (ret)
goto err_free_card;
- /* interrupt disable */
- sdio_writeb(func, 0, INT_ENABLE, &ret);
- if (ret)
- goto err_free_card;
- sdio_writeb(func, 0xff, INT_PENDING, &ret);
- if (ret)
- goto err_disable_func;
-
- /* setup interrupt handler */
- ret = sdio_claim_irq(func, ks_sdio_interrupt);
+ ret = ks7010_sdio_setup_irqs(func);
if (ret)
goto err_disable_func;
@@ -853,70 +989,35 @@ static int ks7010_sdio_probe(struct sdio_func *func,
/* private memory allocate */
netdev = alloc_etherdev(sizeof(*priv));
if (!netdev) {
- dev_err(&card->func->dev, "ks7010 : Unable to alloc new net device\n");
+ dev_err(&card->func->dev, "Unable to alloc new net device\n");
goto err_release_irq;
}
- if (dev_alloc_name(netdev, "wlan%d") < 0) {
- dev_err(&card->func->dev,
- "ks7010 : Couldn't get name!\n");
+
+ ret = dev_alloc_name(netdev, "wlan%d");
+ if (ret < 0) {
+ dev_err(&card->func->dev, "Couldn't get name!\n");
goto err_free_netdev;
}
priv = netdev_priv(netdev);
card->priv = priv;
- SET_NETDEV_DEV(netdev, &card->func->dev); /* for create sysfs symlinks */
-
- /* private memory initialize */
- priv->ks_sdio_card = card;
-
- priv->dev_state = DEVICE_STATE_PREBOOT;
- priv->net_dev = netdev;
- priv->firmware_version[0] = '\0';
- priv->version_size = 0;
- priv->last_doze = jiffies;
- priv->last_wakeup = jiffies;
- memset(&priv->nstats, 0, sizeof(priv->nstats));
- memset(&priv->wstats, 0, sizeof(priv->wstats));
-
- /* sleep mode */
- atomic_set(&priv->sleepstatus.doze_request, 0);
- atomic_set(&priv->sleepstatus.wakeup_request, 0);
- atomic_set(&priv->sleepstatus.wakeup_request, 0);
+ SET_NETDEV_DEV(netdev, &card->func->dev);
- trx_device_init(priv);
- hostif_init(priv);
- ks_wlan_net_start(netdev);
-
- ks7010_init_defaults(priv);
+ ks7010_private_init(priv, card, netdev);
ret = ks7010_upload_firmware(card);
if (ret) {
netdev_err(priv->net_dev,
- "ks7010: firmware load failed !! return code = %d\n",
- ret);
+ "firmware load failed !! ret = %d\n", ret);
goto err_free_netdev;
}
- /* interrupt setting */
- /* clear Interrupt status write (ARMtoSD_InterruptPending FN1:00_0024) */
- sdio_claim_host(func);
- ret = ks7010_sdio_writeb(priv, INT_PENDING, 0xff);
- sdio_release_host(func);
- if (ret)
- netdev_err(priv->net_dev, " error : INT_PENDING\n");
-
- /* enable ks7010sdio interrupt */
- byte = (INT_GCR_B | INT_READ_STATUS | INT_WRITE_STATUS);
- sdio_claim_host(func);
- ret = ks7010_sdio_writeb(priv, INT_ENABLE, byte);
- sdio_release_host(func);
- if (ret)
- netdev_err(priv->net_dev, " err : INT_ENABLE\n");
+ ks7010_sdio_init_irqs(func, priv);
priv->dev_state = DEVICE_STATE_BOOT;
- priv->wq = create_workqueue("wq");
+ priv->wq = alloc_workqueue("wq", WQ_MEM_RECLAIM, 1);
if (!priv->wq) {
netdev_err(priv->net_dev, "create_workqueue failed !!\n");
goto err_free_netdev;
@@ -932,8 +1033,7 @@ static int ks7010_sdio_probe(struct sdio_func *func,
return 0;
err_free_netdev:
- free_netdev(priv->net_dev);
- card->priv = NULL;
+ free_netdev(netdev);
err_release_irq:
sdio_claim_host(func);
sdio_release_irq(func);
@@ -950,25 +1050,22 @@ static int ks7010_sdio_probe(struct sdio_func *func,
/* send stop request to MAC */
static int send_stop_request(struct sdio_func *func)
{
- struct hostif_stop_request_t *pp;
+ struct hostif_stop_request *pp;
struct ks_sdio_card *card;
size_t size;
card = sdio_get_drvdata(func);
pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
- if (!pp) {
- netdev_err(card->priv->net_dev, "allocate memory failed..\n");
+ if (!pp)
return -ENOMEM;
- }
size = sizeof(*pp) - sizeof(pp->header.size);
- pp->header.size = cpu_to_le16((uint16_t)size);
- pp->header.event = cpu_to_le16((uint16_t)HIF_STOP_REQ);
+ pp->header.size = cpu_to_le16(size);
+ pp->header.event = cpu_to_le16(HIF_STOP_REQ);
sdio_claim_host(func);
- write_to_device(card->priv, (unsigned char *)pp,
- hif_align_size(sizeof(*pp)));
+ write_to_device(card->priv, (u8 *)pp, hif_align_size(sizeof(*pp)));
sdio_release_host(func);
kfree(pp);
@@ -987,43 +1084,50 @@ static void ks7010_sdio_remove(struct sdio_func *func)
return;
priv = card->priv;
- if (priv) {
- struct net_device *netdev = priv->net_dev;
+ if (!priv)
+ goto err_free_card;
- ks_wlan_net_stop(netdev);
+ ks_wlan_net_stop(priv->net_dev);
- /* interrupt disable */
- sdio_claim_host(func);
- sdio_writeb(func, 0, INT_ENABLE, &ret);
- sdio_writeb(func, 0xff, INT_PENDING, &ret);
- sdio_release_host(func);
+ /* interrupt disable */
+ sdio_claim_host(func);
+ sdio_writeb(func, 0, INT_ENABLE_REG, &ret);
+ sdio_writeb(func, 0xff, INT_PENDING_REG, &ret);
+ sdio_release_host(func);
- ret = send_stop_request(func);
- if (ret) /* memory allocation failure */
- return;
+ ret = send_stop_request(func);
+ if (ret) /* memory allocation failure */
+ goto err_free_card;
- if (priv->wq) {
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
- }
+ if (priv->wq) {
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ }
- hostif_exit(priv);
+ hostif_exit(priv);
- unregister_netdev(netdev);
+ unregister_netdev(priv->net_dev);
- trx_device_exit(priv);
- free_netdev(priv->net_dev);
- card->priv = NULL;
- }
+ trx_device_exit(priv);
+ free_netdev(priv->net_dev);
+ card->priv = NULL;
sdio_claim_host(func);
sdio_release_irq(func);
sdio_disable_func(func);
sdio_release_host(func);
+err_free_card:
sdio_set_drvdata(func, NULL);
kfree(card);
}
+static const struct sdio_device_id ks7010_sdio_ids[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_A, SDIO_DEVICE_ID_KS_7010)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_B, SDIO_DEVICE_ID_KS_7010)},
+ { /* all zero */ }
+};
+MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
+
static struct sdio_driver ks7010_sdio_driver = {
.name = "ks7010_sdio",
.id_table = ks7010_sdio_ids,
diff --git a/drivers/staging/ks7010/ks7010_sdio.h b/drivers/staging/ks7010/ks7010_sdio.h
deleted file mode 100644
index e4f56a11c888..000000000000
--- a/drivers/staging/ks7010/ks7010_sdio.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Driver for KeyStream, KS7010 based SDIO cards.
- *
- * Copyright (C) 2006-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _KS7010_SDIO_H
-#define _KS7010_SDIO_H
-
-#ifdef DEVICE_ALIGNMENT
-#undef DEVICE_ALIGNMENT
-#endif
-#define DEVICE_ALIGNMENT 32
-
-/* SDIO KeyStream vendor and device */
-#define SDIO_VENDOR_ID_KS_CODE_A 0x005b
-#define SDIO_VENDOR_ID_KS_CODE_B 0x0023
-/* Older sources suggest earlier versions were named 7910 or 79xx */
-#define SDIO_DEVICE_ID_KS_7010 0x7910
-
-/* Read/Write Status Register */
-#define READ_STATUS 0x000000
-#define WRITE_STATUS 0x00000C
-enum reg_status_type {
- REG_STATUS_BUSY,
- REG_STATUS_IDLE
-};
-
-/* Read Index Register */
-#define READ_INDEX 0x000004
-
-/* Read Data Size Register */
-#define READ_DATA_SIZE 0x000008
-
-/* Write Index Register */
-#define WRITE_INDEX 0x000010
-
-/* Write Status/Read Data Size Register
- * for network packet (less than 2048 bytes data)
- */
-#define WSTATUS_RSIZE 0x000014
-#define WSTATUS_MASK 0x80 /* Write Status Register value */
-#define RSIZE_MASK 0x7F /* Read Data Size Register value [10:4] */
-
-/* ARM to SD interrupt Enable */
-#define INT_ENABLE 0x000020
-/* ARM to SD interrupt Pending */
-#define INT_PENDING 0x000024
-
-#define INT_GCR_B BIT(7)
-#define INT_GCR_A BIT(6)
-#define INT_WRITE_STATUS BIT(5)
-#define INT_WRITE_INDEX BIT(4)
-#define INT_WRITE_SIZE BIT(3)
-#define INT_READ_STATUS BIT(2)
-#define INT_READ_INDEX BIT(1)
-#define INT_READ_SIZE BIT(0)
-
-/* General Communication Register A */
-#define GCR_A 0x000028
-enum gen_com_reg_a {
- GCR_A_INIT,
- GCR_A_REMAP,
- GCR_A_RUN
-};
-
-/* General Communication Register B */
-#define GCR_B 0x00002C
-enum gen_com_reg_b {
- GCR_B_ACTIVE,
- GCR_B_DOZE
-};
-
-/* Wakeup Register */
-#define WAKEUP 0x008018
-#define WAKEUP_REQ 0x5a
-
-/* AHB Data Window 0x010000-0x01FFFF */
-#define DATA_WINDOW 0x010000
-#define WINDOW_SIZE (64 * 1024)
-
-#define KS7010_IRAM_ADDRESS 0x06000000
-
-/**
- * struct ks_sdio_card - SDIO device data.
- *
- * Structure is used as the &struct sdio_func private data.
- *
- * @func: Pointer to the SDIO function device.
- * @priv: Pointer to the &struct net_device private data.
- */
-struct ks_sdio_card {
- struct sdio_func *func;
- struct ks_wlan_private *priv;
-};
-
-/* Tx Device struct */
-#define TX_DEVICE_BUFF_SIZE 1024
-
-/**
- * struct tx_device_buffer - Queue item for the tx queue.
- * @sendp: Pointer to the send request data.
- * @size: Size of @sendp data.
- * @complete_handler: Function called once data write to device is complete.
- * @arg1: First argument to @complete_handler.
- * @arg2: Second argument to @complete_handler.
- */
-struct tx_device_buffer {
- unsigned char *sendp;
- unsigned int size;
- void (*complete_handler)(struct ks_wlan_private *priv,
- struct sk_buff *skb);
- struct sk_buff *skb;
-};
-
-/**
- * struct tx_device - Tx buffer queue.
- * @tx_device_buffer: Queue buffer.
- * @qhead: Head of tx queue.
- * @qtail: Tail of tx queue.
- * @tx_dev_lock: Queue lock.
- */
-struct tx_device {
- struct tx_device_buffer tx_dev_buff[TX_DEVICE_BUFF_SIZE];
- unsigned int qhead;
- unsigned int qtail;
- spinlock_t tx_dev_lock; /* protect access to the queue */
-};
-
-/* Rx Device struct */
-#define RX_DATA_SIZE (2 + 2 + 2347 + 1)
-#define RX_DEVICE_BUFF_SIZE 32
-
-/**
- * struct rx_device_buffer - Queue item for the rx queue.
- * @data: rx data.
- * @size: Size of @data.
- */
-struct rx_device_buffer {
- unsigned char data[RX_DATA_SIZE];
- unsigned int size;
-};
-
-/**
- * struct rx_device - Rx buffer queue.
- * @rx_device_buffer: Queue buffer.
- * @qhead: Head of rx queue.
- * @qtail: Tail of rx queue.
- * @rx_dev_lock: Queue lock.
- */
-struct rx_device {
- struct rx_device_buffer rx_dev_buff[RX_DEVICE_BUFF_SIZE];
- unsigned int qhead;
- unsigned int qtail;
- spinlock_t rx_dev_lock; /* protect access to the queue */
-};
-
-#define ROM_FILE "ks7010sd.rom"
-
-#endif /* _KS7010_SDIO_H */
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 676961cf4103..0ecffab52ec2 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -1,25 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for KeyStream wireless LAN cards.
*
* Copyright (C) 2005-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#include "ks_wlan.h"
-#include "ks_hostif.h"
+#include <linux/circ_buf.h>
+#include <linux/if_arp.h>
+#include <net/iw_handler.h>
+#include <uapi/linux/llc.h>
#include "eap_packet.h"
+#include "ks_wlan.h"
#include "michael_mic.h"
-
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_arp.h>
-
-/* Include Wireless Extension definition and check version */
-#include <net/iw_handler.h> /* New driver API */
+#include "ks_hostif.h"
static inline void inc_smeqhead(struct ks_wlan_private *priv)
{
@@ -33,13 +27,10 @@ static inline void inc_smeqtail(struct ks_wlan_private *priv)
static inline unsigned int cnt_smeqbody(struct ks_wlan_private *priv)
{
- unsigned int sme_cnt = priv->sme_i.qtail - priv->sme_i.qhead;
-
- return (sme_cnt + SME_EVENT_BUFF_SIZE) % SME_EVENT_BUFF_SIZE;
+ return CIRC_CNT_TO_END(priv->sme_i.qhead, priv->sme_i.qtail,
+ SME_EVENT_BUFF_SIZE);
}
-#define KS_WLAN_MEM_FLAG (GFP_ATOMIC)
-
static inline u8 get_byte(struct ks_wlan_private *priv)
{
u8 data;
@@ -96,82 +87,59 @@ static void ks_wlan_hw_wakeup_task(struct work_struct *work)
tasklet_enable(&priv->sme_task);
}
-static
-int ks_wlan_do_power_save(struct ks_wlan_private *priv)
+static void ks_wlan_do_power_save(struct ks_wlan_private *priv)
{
if (is_connect_status(priv->connect_status))
hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
else
priv->dev_state = DEVICE_STATE_READY;
- return 0;
}
static
-int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
+int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info *ap_info)
{
- struct local_ap_t *ap;
+ struct local_ap *ap;
union iwreq_data wrqu;
struct net_device *netdev = priv->net_dev;
+ u8 size;
ap = &priv->current_ap;
if (is_disconnect_status(priv->connect_status)) {
- memset(ap, 0, sizeof(struct local_ap_t));
+ memset(ap, 0, sizeof(struct local_ap));
return -EPERM;
}
- /* bssid */
- memcpy(ap->bssid, ap_info->bssid, ETH_ALEN);
- /* essid */
+ ether_addr_copy(ap->bssid, ap_info->bssid);
memcpy(ap->ssid.body, priv->reg.ssid.body,
priv->reg.ssid.size);
ap->ssid.size = priv->reg.ssid.size;
- /* rate_set */
memcpy(ap->rate_set.body, ap_info->rate_set.body,
ap_info->rate_set.size);
ap->rate_set.size = ap_info->rate_set.size;
if (ap_info->ext_rate_set.size != 0) {
- /* rate_set */
memcpy(&ap->rate_set.body[ap->rate_set.size],
ap_info->ext_rate_set.body,
ap_info->ext_rate_set.size);
ap->rate_set.size += ap_info->ext_rate_set.size;
}
- /* channel */
ap->channel = ap_info->ds_parameter.channel;
- /* rssi */
ap->rssi = ap_info->rssi;
- /* sq */
ap->sq = ap_info->sq;
- /* noise */
ap->noise = ap_info->noise;
- /* capability */
ap->capability = le16_to_cpu(ap_info->capability);
- /* rsn */
+ size = (ap_info->rsn.size <= RSN_IE_BODY_MAX) ?
+ ap_info->rsn.size : RSN_IE_BODY_MAX;
if ((ap_info->rsn_mode & RSN_MODE_WPA2) &&
(priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)) {
- ap->rsn_ie.id = 0x30;
- if (ap_info->rsn.size <= RSN_IE_BODY_MAX) {
- ap->rsn_ie.size = ap_info->rsn.size;
- memcpy(ap->rsn_ie.body, ap_info->rsn.body,
- ap_info->rsn.size);
- } else {
- ap->rsn_ie.size = RSN_IE_BODY_MAX;
- memcpy(ap->rsn_ie.body, ap_info->rsn.body,
- RSN_IE_BODY_MAX);
- }
+ ap->rsn_ie.id = RSN_INFO_ELEM_ID;
+ ap->rsn_ie.size = size;
+ memcpy(ap->rsn_ie.body, ap_info->rsn.body, size);
} else if ((ap_info->rsn_mode & RSN_MODE_WPA) &&
(priv->wpa.version == IW_AUTH_WPA_VERSION_WPA)) {
- ap->wpa_ie.id = 0xdd;
- if (ap_info->rsn.size <= RSN_IE_BODY_MAX) {
- ap->wpa_ie.size = ap_info->rsn.size;
- memcpy(ap->wpa_ie.body, ap_info->rsn.body,
- ap_info->rsn.size);
- } else {
- ap->wpa_ie.size = RSN_IE_BODY_MAX;
- memcpy(ap->wpa_ie.body, ap_info->rsn.body,
- RSN_IE_BODY_MAX);
- }
+ ap->wpa_ie.id = WPA_INFO_ELEM_ID;
+ ap->wpa_ie.size = size;
+ memcpy(ap->wpa_ie.body, ap_info->rsn.body, size);
} else {
ap->rsn_ie.id = 0;
ap->rsn_ie.size = 0;
@@ -183,20 +151,24 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
if (is_connect_status(priv->connect_status)) {
- memcpy(wrqu.ap_addr.sa_data,
- priv->current_ap.bssid, ETH_ALEN);
+ ether_addr_copy(wrqu.ap_addr.sa_data, priv->current_ap.bssid);
netdev_dbg(priv->net_dev,
- "IWEVENT: connect bssid=%pM\n", wrqu.ap_addr.sa_data);
+ "IWEVENT: connect bssid=%pM\n",
+ wrqu.ap_addr.sa_data);
wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
}
- netdev_dbg(priv->net_dev, " Link AP\n");
- netdev_dbg(priv->net_dev, " bssid=%02X:%02X:%02X:%02X:%02X:%02X\n"
- " essid=%s\n"
- " rate_set=%02X,%02X,%02X,%02X,%02X,%02X,%02X,%02X\n"
- " channel=%d\n"
- " rssi=%d\n"
- " sq=%d\n"
- " capability=%04X\n",
+ netdev_dbg(priv->net_dev, "Link AP\n"
+ "- bssid=%02X:%02X:%02X:%02X:%02X:%02X\n"
+ "- essid=%s\n"
+ "- rate_set=%02X,%02X,%02X,%02X,%02X,%02X,%02X,%02X\n"
+ "- channel=%d\n"
+ "- rssi=%d\n"
+ "- sq=%d\n"
+ "- capability=%04X\n"
+ "- rsn.mode=%d\n"
+ "- rsn.size=%d\n"
+ "- ext_rate_set_size=%d\n"
+ "- rate_set_size=%d\n",
ap->bssid[0], ap->bssid[1], ap->bssid[2],
ap->bssid[3], ap->bssid[4], ap->bssid[5],
&(ap->ssid.body[0]),
@@ -204,10 +176,8 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
ap->rate_set.body[2], ap->rate_set.body[3],
ap->rate_set.body[4], ap->rate_set.body[5],
ap->rate_set.body[6], ap->rate_set.body[7],
- ap->channel, ap->rssi, ap->sq, ap->capability);
- netdev_dbg(priv->net_dev, " Link AP\n rsn.mode=%d\n rsn.size=%d\n",
- ap_info->rsn_mode, ap_info->rsn.size);
- netdev_dbg(priv->net_dev, " ext_rate_set_size=%d\n rate_set_size=%d\n",
+ ap->channel, ap->rssi, ap->sq, ap->capability,
+ ap_info->rsn_mode, ap_info->rsn.size,
ap_info->ext_rate_set.size, ap_info->rate_set.size);
return 0;
@@ -223,25 +193,19 @@ static u8 read_ie(unsigned char *bp, u8 max, u8 *body)
static
-int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
- struct local_ap_t *ap)
+int get_ap_information(struct ks_wlan_private *priv, struct ap_info *ap_info,
+ struct local_ap *ap)
{
unsigned char *bp;
int bsize, offset;
- memset(ap, 0, sizeof(struct local_ap_t));
+ memset(ap, 0, sizeof(struct local_ap));
- /* bssid */
- memcpy(ap->bssid, ap_info->bssid, ETH_ALEN);
- /* rssi */
+ ether_addr_copy(ap->bssid, ap_info->bssid);
ap->rssi = ap_info->rssi;
- /* sq */
ap->sq = ap_info->sq;
- /* noise */
ap->noise = ap_info->noise;
- /* capability */
ap->capability = le16_to_cpu(ap_info->capability);
- /* channel */
ap->channel = ap_info->ch_info;
bp = ap_info->body;
@@ -269,8 +233,6 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
(RATE_SET_MAX_SIZE - ap->rate_set.size);
}
break;
- case WLAN_EID_DS_PARAMS:
- break;
case WLAN_EID_RSN:
ap->rsn_ie.id = *bp;
ap->rsn_ie.size = read_ie(bp, RSN_IE_BODY_MAX,
@@ -284,7 +246,7 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
ap->wpa_ie.body);
}
break;
-
+ case WLAN_EID_DS_PARAMS:
case WLAN_EID_FH_PARAMS:
case WLAN_EID_CF_PARAMS:
case WLAN_EID_TIM:
@@ -293,7 +255,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
case WLAN_EID_ERP_INFO:
break;
default:
- netdev_err(priv->net_dev, "unknown Element ID=%d\n", *bp);
+ netdev_err(priv->net_dev,
+ "unknown Element ID=%d\n", *bp);
break;
}
@@ -314,11 +277,11 @@ int hostif_data_indication_wpa(struct ks_wlan_private *priv,
unsigned char recv_mic[8];
char buf[128];
unsigned long now;
- struct mic_failure_t *mic_failure;
- struct michael_mic_t michael_mic;
+ struct mic_failure *mic_failure;
+ struct michael_mic michael_mic;
union iwreq_data wrqu;
unsigned int key_index = auth_type - 1;
- struct wpa_key_t *key = &priv->wpa.key[key_index];
+ struct wpa_key *key = &priv->wpa.key[key_index];
eth_hdr = (struct ether_hdr *)(priv->rxp);
eth_proto = ntohs(eth_hdr->h_proto);
@@ -341,12 +304,9 @@ int hostif_data_indication_wpa(struct ks_wlan_private *priv,
memcpy(&recv_mic[0], (priv->rxp) + ((priv->rx_size) - 8), 8);
priv->rx_size = priv->rx_size - 8;
if (auth_type > 0 && auth_type < 4) { /* auth_type check */
- michael_mic_function(&michael_mic,
- (uint8_t *)key->rx_mic_key,
- (uint8_t *)priv->rxp,
- (int)priv->rx_size,
- (uint8_t)0, /* priority */
- (uint8_t *)michael_mic.result);
+ michael_mic_function(&michael_mic, key->rx_mic_key,
+ priv->rxp, priv->rx_size,
+ 0, michael_mic.result);
}
if (memcmp(michael_mic.result, recv_mic, 8) != 0) {
now = jiffies;
@@ -363,8 +323,9 @@ int hostif_data_indication_wpa(struct ks_wlan_private *priv,
} else if (mic_failure->failure == 1) {
mic_failure->failure = 2;
mic_failure->counter =
- (uint16_t)((now - mic_failure->last_failure_time) / HZ);
- if (!mic_failure->counter) /* range 1-60 */
+ (u16)((now - mic_failure->last_failure_time) / HZ);
+ /* range 1-60 */
+ if (!mic_failure->counter)
mic_failure->counter = 1;
}
priv->wpa.mic_failure.last_failure_time = now;
@@ -390,7 +351,7 @@ void hostif_data_indication(struct ks_wlan_private *priv)
{
unsigned int rx_ind_size; /* indicate data size */
struct sk_buff *skb;
- unsigned short auth_type;
+ u16 auth_type;
unsigned char temp[256];
struct ether_hdr *eth_hdr;
unsigned short eth_proto;
@@ -411,7 +372,7 @@ void hostif_data_indication(struct ks_wlan_private *priv)
eth_proto = ntohs(eth_hdr->h_proto);
/* source address check */
- if (memcmp(&priv->eth_addr[0], eth_hdr->h_source, ETH_ALEN) == 0) {
+ if (ether_addr_equal(&priv->eth_addr[0], eth_hdr->h_source)) {
netdev_err(priv->net_dev, "invalid : source is own mac address !!\n");
netdev_err(priv->net_dev,
"eth_hdrernet->h_dest=%02X:%02X:%02X:%02X:%02X:%02X\n",
@@ -436,7 +397,7 @@ void hostif_data_indication(struct ks_wlan_private *priv)
/* check 13th byte at rx data */
switch (*(priv->rxp + 12)) {
- case 0xAA: /* SNAP */
+ case LLC_SAP_SNAP:
rx_ind_size = priv->rx_size - 6;
skb = dev_alloc_skb(rx_ind_size);
if (!skb) {
@@ -456,7 +417,7 @@ void hostif_data_indication(struct ks_wlan_private *priv)
aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + ETHER_HDR_SIZE);
break;
- case 0xF0: /* NETBEUI/NetBIOS */
+ case LLC_SAP_NETBEUI:
rx_ind_size = (priv->rx_size + 2);
skb = dev_alloc_skb(rx_ind_size);
if (!skb) {
@@ -506,13 +467,12 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
u16 mib_val_size;
u16 mib_val_type;
- mib_status = get_dword(priv); /* MIB status */
- mib_attribute = get_dword(priv); /* MIB atttibute */
- mib_val_size = get_word(priv); /* MIB value size */
- mib_val_type = get_word(priv); /* MIB value type */
+ mib_status = get_dword(priv);
+ mib_attribute = get_dword(priv);
+ mib_val_size = get_word(priv);
+ mib_val_type = get_word(priv);
if (mib_status) {
- /* in case of error */
netdev_err(priv->net_dev, "attribute=%08X, status=%08X\n",
mib_attribute, mib_status);
return;
@@ -520,22 +480,13 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
switch (mib_attribute) {
case DOT11_MAC_ADDRESS:
- /* MAC address */
hostif_sme_enqueue(priv, SME_GET_MAC_ADDRESS);
- memcpy(priv->eth_addr, priv->rxp, ETH_ALEN);
+ ether_addr_copy(priv->eth_addr, priv->rxp);
priv->mac_address_valid = true;
- dev->dev_addr[0] = priv->eth_addr[0];
- dev->dev_addr[1] = priv->eth_addr[1];
- dev->dev_addr[2] = priv->eth_addr[2];
- dev->dev_addr[3] = priv->eth_addr[3];
- dev->dev_addr[4] = priv->eth_addr[4];
- dev->dev_addr[5] = priv->eth_addr[5];
- dev->dev_addr[6] = 0x00;
- dev->dev_addr[7] = 0x00;
+ ether_addr_copy(dev->dev_addr, priv->eth_addr);
netdev_info(dev, "MAC ADDRESS = %pM\n", priv->eth_addr);
break;
case DOT11_PRODUCT_VERSION:
- /* firmware version */
priv->version_size = priv->rx_size;
memcpy(priv->firmware_version, priv->rxp, priv->rx_size);
priv->firmware_version[priv->rx_size] = '\0';
@@ -553,18 +504,15 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
break;
case LOCAL_EEPROM_SUM:
memcpy(&priv->eeprom_sum, priv->rxp, sizeof(priv->eeprom_sum));
- if (priv->eeprom_sum.type == 0) {
- priv->eeprom_checksum = EEPROM_CHECKSUM_NONE;
- } else if (priv->eeprom_sum.type == 1) {
- if (priv->eeprom_sum.result == 0) {
- priv->eeprom_checksum = EEPROM_NG;
- netdev_info(dev, "LOCAL_EEPROM_SUM NG\n");
- } else if (priv->eeprom_sum.result == 1) {
- priv->eeprom_checksum = EEPROM_OK;
- }
- } else {
+ if (priv->eeprom_sum.type != 0 &&
+ priv->eeprom_sum.type != 1) {
netdev_err(dev, "LOCAL_EEPROM_SUM error!\n");
+ return;
}
+ priv->eeprom_checksum = (priv->eeprom_sum.type == 0) ?
+ EEPROM_CHECKSUM_NONE :
+ (priv->eeprom_sum.result == 0) ?
+ EEPROM_NG : EEPROM_OK;
break;
default:
netdev_err(priv->net_dev, "mib_attribute=%08x\n",
@@ -576,11 +524,11 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
static
void hostif_mib_set_confirm(struct ks_wlan_private *priv)
{
- u32 mib_status; /* +04 MIB Status */
- u32 mib_attribute; /* +08 MIB attribute */
+ u32 mib_status;
+ u32 mib_attribute;
- mib_status = get_dword(priv); /* MIB Status */
- mib_attribute = get_dword(priv); /* MIB attribute */
+ mib_status = get_dword(priv);
+ mib_attribute = get_dword(priv);
if (mib_status) {
/* in case of error */
@@ -649,8 +597,6 @@ void hostif_mib_set_confirm(struct ks_wlan_private *priv)
case DOT11_RSN_CONFIG_AUTH_SUITE:
hostif_sme_enqueue(priv, SME_RSN_AUTH_CONFIRM);
break;
- case DOT11_PMK_TSC:
- break;
case DOT11_GMK1_TSC:
if (atomic_read(&priv->psstatus.snooze_guard))
atomic_set(&priv->psstatus.snooze_guard, 0);
@@ -659,16 +605,11 @@ void hostif_mib_set_confirm(struct ks_wlan_private *priv)
if (atomic_read(&priv->psstatus.snooze_guard))
atomic_set(&priv->psstatus.snooze_guard, 0);
break;
+ case DOT11_PMK_TSC:
case LOCAL_PMK:
- break;
case LOCAL_GAIN:
- break;
-#ifdef WPS
case LOCAL_WPS_ENABLE:
- break;
case LOCAL_WPS_PROBE_REQ:
- break;
-#endif /* WPS */
case LOCAL_REGION:
default:
break;
@@ -698,7 +639,6 @@ void hostif_sleep_confirm(struct ks_wlan_private *priv)
static
void hostif_start_confirm(struct ks_wlan_private *priv)
{
-#ifdef WPS
union iwreq_data wrqu;
wrqu.data.length = 0;
@@ -708,7 +648,6 @@ void hostif_start_confirm(struct ks_wlan_private *priv)
eth_zero_addr(wrqu.ap_addr.sa_data);
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
}
-#endif
netdev_dbg(priv->net_dev, " scan_ind_count=%d\n", priv->scan_ind_count);
hostif_sme_enqueue(priv, SME_START_CONFIRM);
}
@@ -716,7 +655,7 @@ void hostif_start_confirm(struct ks_wlan_private *priv)
static
void hostif_connect_indication(struct ks_wlan_private *priv)
{
- unsigned short connect_code;
+ u16 connect_code;
unsigned int tmp = 0;
unsigned int old_status = priv->connect_status;
struct net_device *netdev = priv->net_dev;
@@ -725,13 +664,13 @@ void hostif_connect_indication(struct ks_wlan_private *priv)
connect_code = get_word(priv);
switch (connect_code) {
- case RESULT_CONNECT: /* connect */
+ case RESULT_CONNECT:
if (!(priv->connect_status & FORCE_DISCONNECT))
netif_carrier_on(netdev);
tmp = FORCE_DISCONNECT & priv->connect_status;
priv->connect_status = tmp + CONNECT_STATUS;
break;
- case RESULT_DISCONNECT: /* disconnect */
+ case RESULT_DISCONNECT:
netif_carrier_off(netdev);
tmp = FORCE_DISCONNECT & priv->connect_status;
priv->connect_status = tmp + DISCONNECT_STATUS;
@@ -745,7 +684,7 @@ void hostif_connect_indication(struct ks_wlan_private *priv)
break;
}
- get_current_ap(priv, (struct link_ap_info_t *)priv->rxp);
+ get_current_ap(priv, (struct link_ap_info *)priv->rxp);
if (is_connect_status(priv->connect_status) &&
is_disconnect_status(old_status)) {
/* for power save */
@@ -771,10 +710,11 @@ static
void hostif_scan_indication(struct ks_wlan_private *priv)
{
int i;
- struct ap_info_t *ap_info;
+ struct ap_info *ap_info;
- netdev_dbg(priv->net_dev, "scan_ind_count = %d\n", priv->scan_ind_count);
- ap_info = (struct ap_info_t *)(priv->rxp);
+ netdev_dbg(priv->net_dev,
+ "scan_ind_count = %d\n", priv->scan_ind_count);
+ ap_info = (struct ap_info *)(priv->rxp);
if (priv->scan_ind_count) {
/* bssid check */
@@ -794,7 +734,7 @@ void hostif_scan_indication(struct ks_wlan_private *priv)
if (priv->scan_ind_count < LOCAL_APLIST_MAX + 1) {
netdev_dbg(priv->net_dev, " scan_ind_count=%d :: aplist.size=%d\n",
priv->scan_ind_count, priv->aplist.size);
- get_ap_information(priv, (struct ap_info_t *)(priv->rxp),
+ get_ap_information(priv, (struct ap_info *)(priv->rxp),
&(priv->aplist.ap[priv->scan_ind_count - 1]));
priv->aplist.size = priv->scan_ind_count;
} else {
@@ -863,8 +803,8 @@ void hostif_adhoc_set_confirm(struct ks_wlan_private *priv)
static
void hostif_associate_indication(struct ks_wlan_private *priv)
{
- struct association_request_t *assoc_req;
- struct association_response_t *assoc_resp;
+ struct association_request *assoc_req;
+ struct association_response *assoc_resp;
unsigned char *pb;
union iwreq_data wrqu;
char buf[IW_CUSTOM_MAX];
@@ -874,8 +814,8 @@ void hostif_associate_indication(struct ks_wlan_private *priv)
static const char associnfo_leader0[] = "ASSOCINFO(ReqIEs=";
static const char associnfo_leader1[] = " RespIEs=";
- assoc_req = (struct association_request_t *)(priv->rxp);
- assoc_resp = (struct association_response_t *)(assoc_req + 1);
+ assoc_req = (struct association_request *)(priv->rxp);
+ assoc_resp = (struct association_response *)(assoc_req + 1);
pb = (unsigned char *)(assoc_resp + 1);
memset(&wrqu, 0, sizeof(wrqu));
@@ -905,13 +845,13 @@ void hostif_associate_indication(struct ks_wlan_private *priv)
static
void hostif_bss_scan_confirm(struct ks_wlan_private *priv)
{
- unsigned int result_code;
+ u32 result_code;
struct net_device *dev = priv->net_dev;
union iwreq_data wrqu;
result_code = get_dword(priv);
- netdev_dbg(priv->net_dev, "result=%d :: scan_ind_count=%d\n", result_code,
- priv->scan_ind_count);
+ netdev_dbg(priv->net_dev, "result=%d :: scan_ind_count=%d\n",
+ result_code, priv->scan_ind_count);
priv->sme_i.sme_flag &= ~SME_AP_SCAN;
hostif_sme_enqueue(priv, SME_BSS_SCAN_CONFIRM);
@@ -926,10 +866,10 @@ static
void hostif_phy_information_confirm(struct ks_wlan_private *priv)
{
struct iw_statistics *wstats = &priv->wstats;
- unsigned char rssi, signal, noise;
- unsigned char link_speed;
- unsigned int transmitted_frame_count, received_fragment_count;
- unsigned int failed_count, fcs_error_count;
+ u8 rssi, signal, noise;
+ u8 link_speed;
+ u32 transmitted_frame_count, received_fragment_count;
+ u32 failed_count, fcs_error_count;
rssi = get_byte(priv);
signal = get_byte(priv);
@@ -964,16 +904,17 @@ void hostif_phy_information_confirm(struct ks_wlan_private *priv)
static
void hostif_mic_failure_confirm(struct ks_wlan_private *priv)
{
- netdev_dbg(priv->net_dev, "mic_failure=%u\n", priv->wpa.mic_failure.failure);
+ netdev_dbg(priv->net_dev, "mic_failure=%u\n",
+ priv->wpa.mic_failure.failure);
hostif_sme_enqueue(priv, SME_MIC_FAILURE_CONFIRM);
}
static
void hostif_event_check(struct ks_wlan_private *priv)
{
- unsigned short event;
+ u16 event;
- event = get_word(priv); /* get event */
+ event = get_word(priv);
switch (event) {
case HIF_DATA_IND:
hostif_data_indication(priv);
@@ -1044,11 +985,11 @@ static void *hostif_generic_request(size_t size, int event)
{
struct hostif_hdr *p;
- p = kzalloc(hif_align_size(size), KS_WLAN_MEM_FLAG);
+ p = kzalloc(hif_align_size(size), GFP_ATOMIC);
if (!p)
return NULL;
- p->size = cpu_to_le16((u16)(size - sizeof(p->size)));
+ p->size = cpu_to_le16(size - sizeof(p->size));
p->event = cpu_to_le16(event);
return p;
@@ -1057,15 +998,14 @@ static void *hostif_generic_request(size_t size, int event)
int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
{
unsigned int skb_len = 0;
-
unsigned char *buffer = NULL;
unsigned int length = 0;
- struct hostif_data_request_t *pp;
+ struct hostif_data_request *pp;
unsigned char *p;
int result = 0;
unsigned short eth_proto;
struct ether_hdr *eth_hdr;
- struct michael_mic_t michael_mic;
+ struct michael_mic michael_mic;
unsigned short keyinfo = 0;
struct ieee802_1x_hdr *aa1x_hdr;
struct wpa_eapol_key *eap_key;
@@ -1098,7 +1038,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
}
size = sizeof(*pp) + 6 + skb_len + 8;
- pp = kmalloc(hif_align_size(size), KS_WLAN_MEM_FLAG);
+ pp = kmalloc(hif_align_size(size), GFP_ATOMIC);
if (!pp) {
ret = -ENOMEM;
goto err_kfree_skb;
@@ -1111,9 +1051,10 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
/* skb check */
eth = (struct ethhdr *)skb->data;
- if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN) != 0) {
- netdev_err(priv->net_dev, "invalid mac address !!\n");
- netdev_err(priv->net_dev, "ethernet->h_source=%pM\n", eth->h_source);
+ if (!ether_addr_equal(&priv->eth_addr[0], eth->h_source)) {
+ netdev_err(priv->net_dev,
+ "Invalid mac address: ethernet->h_source=%pM\n",
+ eth->h_source);
ret = -ENXIO;
goto err_kfree;
}
@@ -1128,7 +1069,6 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
/* EtherType/Length check */
if (*(buffer + 1) + (*buffer << 8) > 1500) {
/* ProtocolEAP = *(buffer+1) + (*buffer << 8); */
- /* netdev_dbg(priv->net_dev, "Send [SNAP]Type %x\n",ProtocolEAP); */
/* SAP/CTL/OUI(6 byte) add */
*p++ = 0xAA; /* DSAP */
*p++ = 0xAA; /* SSAP */
@@ -1169,40 +1109,37 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
priv->wpa.key[1].key_len == 0 &&
priv->wpa.key[2].key_len == 0 &&
priv->wpa.key[3].key_len == 0) {
- pp->auth_type = cpu_to_le16((uint16_t)TYPE_AUTH);
+ pp->auth_type = cpu_to_le16(TYPE_AUTH);
} else {
if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
michael_mic_function(&michael_mic,
- (uint8_t *)priv->wpa.key[0].tx_mic_key,
- (uint8_t *)&pp->data[0],
- (int)skb_len,
- (uint8_t)0, /* priority */
- (uint8_t *)michael_mic.result);
+ priv->wpa.key[0].tx_mic_key,
+ &pp->data[0], skb_len,
+ 0, michael_mic.result);
memcpy(p, michael_mic.result, 8);
length += 8;
skb_len += 8;
p += 8;
pp->auth_type =
- cpu_to_le16((uint16_t)TYPE_DATA);
+ cpu_to_le16(TYPE_DATA);
} else if (priv->wpa.pairwise_suite ==
IW_AUTH_CIPHER_CCMP) {
pp->auth_type =
- cpu_to_le16((uint16_t)TYPE_DATA);
+ cpu_to_le16(TYPE_DATA);
}
}
} else {
if (eth_proto == ETH_P_PAE)
- pp->auth_type = cpu_to_le16((uint16_t)TYPE_AUTH);
+ pp->auth_type = cpu_to_le16(TYPE_AUTH);
else
- pp->auth_type = cpu_to_le16((uint16_t)TYPE_DATA);
+ pp->auth_type = cpu_to_le16(TYPE_DATA);
}
/* header value set */
pp->header.size =
- cpu_to_le16((uint16_t)
- (sizeof(*pp) - sizeof(pp->header.size) + skb_len));
- pp->header.event = cpu_to_le16((uint16_t)HIF_DATA_REQ);
+ cpu_to_le16((sizeof(*pp) - sizeof(pp->header.size) + skb_len));
+ pp->header.event = cpu_to_le16(HIF_DATA_REQ);
/* tx request */
result = ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + skb_len),
@@ -1213,7 +1150,8 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
priv->wpa.mic_failure.failure > 0) {
if (keyinfo & WPA_KEY_INFO_ERROR &&
keyinfo & WPA_KEY_INFO_REQUEST) {
- netdev_err(priv->net_dev, " MIC ERROR Report SET : %04X\n", keyinfo);
+ netdev_err(priv->net_dev,
+ "MIC ERROR Report SET : %04X\n", keyinfo);
hostif_sme_enqueue(priv, SME_MIC_FAILURE_REQUEST);
}
if (priv->wpa.mic_failure.failure == 2)
@@ -1236,27 +1174,31 @@ static inline void ps_confirm_wait_inc(struct ks_wlan_private *priv)
atomic_inc(&priv->psstatus.confirm_wait);
}
-static
-void hostif_mib_get_request(struct ks_wlan_private *priv,
- unsigned long mib_attribute)
+static inline void send_request_to_device(struct ks_wlan_private *priv,
+ void *data, size_t size)
+{
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, data, size, NULL, NULL);
+}
+
+static void hostif_mib_get_request(struct ks_wlan_private *priv,
+ u32 mib_attribute)
{
- struct hostif_mib_get_request_t *pp;
+ struct hostif_mib_get_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_MIB_GET_REQ);
if (!pp)
return;
- pp->mib_attribute = cpu_to_le32((uint32_t)mib_attribute);
+ pp->mib_attribute = cpu_to_le32(mib_attribute);
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
-static
-void hostif_mib_set_request(struct ks_wlan_private *priv,
- unsigned long mib_attribute, unsigned short size,
- unsigned short type, void *vp)
+static void hostif_mib_set_request(struct ks_wlan_private *priv,
+ enum mib_attribute attr,
+ enum mib_data_type type,
+ void *data, size_t size)
{
struct hostif_mib_set_request_t *pp;
@@ -1267,30 +1209,52 @@ void hostif_mib_set_request(struct ks_wlan_private *priv,
if (!pp)
return;
- pp->mib_attribute = cpu_to_le32((uint32_t)mib_attribute);
- pp->mib_value.size = cpu_to_le16((uint16_t)size);
- pp->mib_value.type = cpu_to_le16((uint16_t)type);
- memcpy(&pp->mib_value.body, vp, size);
+ pp->mib_attribute = cpu_to_le32(attr);
+ pp->mib_value.size = cpu_to_le16(size);
+ pp->mib_value.type = cpu_to_le16(type);
+ memcpy(&pp->mib_value.body, data, size);
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + size), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp) + size));
+}
+
+static inline void hostif_mib_set_request_int(struct ks_wlan_private *priv,
+ enum mib_attribute attr, int val)
+{
+ __le32 v = cpu_to_le32(val);
+ size_t size = sizeof(v);
+
+ hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_INT, &v, size);
+}
+
+static inline void hostif_mib_set_request_bool(struct ks_wlan_private *priv,
+ enum mib_attribute attr,
+ bool val)
+{
+ __le32 v = cpu_to_le32(val);
+ size_t size = sizeof(v);
+
+ hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_BOOL, &v, size);
+}
+
+static inline void hostif_mib_set_request_ostring(struct ks_wlan_private *priv,
+ enum mib_attribute attr,
+ void *data, size_t size)
+{
+ hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_OSTRING, data, size);
}
static
void hostif_start_request(struct ks_wlan_private *priv, unsigned char mode)
{
- struct hostif_start_request_t *pp;
+ struct hostif_start_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_START_REQ);
if (!pp)
return;
- pp->mode = cpu_to_le16((uint16_t)mode);
+ pp->mode = cpu_to_le16(mode);
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
priv->aplist.size = 0;
priv->scan_ind_count = 0;
@@ -1310,14 +1274,15 @@ static __le16 ks_wlan_cap(struct ks_wlan_private *priv)
capability &= ~(WLAN_CAPABILITY_DSSS_OFDM);
}
- return cpu_to_le16((uint16_t)capability);
+ return cpu_to_le16(capability);
}
-static void init_request(struct ks_wlan_private *priv, struct hostif_request_t *req)
+static void init_request(struct ks_wlan_private *priv,
+ struct hostif_request *req)
{
- req->phy_type = cpu_to_le16((uint16_t)(priv->reg.phy_type));
- req->cts_mode = cpu_to_le16((uint16_t)(priv->reg.cts_mode));
- req->scan_type = cpu_to_le16((uint16_t)(priv->reg.scan_type));
+ req->phy_type = cpu_to_le16(priv->reg.phy_type);
+ req->cts_mode = cpu_to_le16(priv->reg.cts_mode);
+ req->scan_type = cpu_to_le16(priv->reg.scan_type);
req->rate_set.size = priv->reg.rate_set.size;
req->capability = ks_wlan_cap(priv);
memcpy(&req->rate_set.body[0], &priv->reg.rate_set.body[0],
@@ -1327,24 +1292,22 @@ static void init_request(struct ks_wlan_private *priv, struct hostif_request_t *
static
void hostif_ps_adhoc_set_request(struct ks_wlan_private *priv)
{
- struct hostif_ps_adhoc_set_request_t *pp;
+ struct hostif_ps_adhoc_set_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_PS_ADH_SET_REQ);
if (!pp)
return;
init_request(priv, &pp->request);
- pp->channel = cpu_to_le16((uint16_t)(priv->reg.channel));
+ pp->channel = cpu_to_le16(priv->reg.channel);
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_infrastructure_set_request(struct ks_wlan_private *priv, int event)
{
- struct hostif_infrastructure_set_request_t *pp;
+ struct hostif_infrastructure_set_request *pp;
pp = hostif_generic_request(sizeof(*pp), event);
if (!pp)
@@ -1354,8 +1317,8 @@ void hostif_infrastructure_set_request(struct ks_wlan_private *priv, int event)
pp->ssid.size = priv->reg.ssid.size;
memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
pp->beacon_lost_count =
- cpu_to_le16((uint16_t)(priv->reg.beacon_lost_count));
- pp->auth_type = cpu_to_le16((uint16_t)(priv->reg.authenticate_type));
+ cpu_to_le16(priv->reg.beacon_lost_count);
+ pp->auth_type = cpu_to_le16(priv->reg.authenticate_type);
pp->channel_list.body[0] = 1;
pp->channel_list.body[1] = 8;
@@ -1377,34 +1340,30 @@ void hostif_infrastructure_set_request(struct ks_wlan_private *priv, int event)
pp->channel_list.size = 14;
}
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_adhoc_set_request(struct ks_wlan_private *priv)
{
- struct hostif_adhoc_set_request_t *pp;
+ struct hostif_adhoc_set_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ);
if (!pp)
return;
init_request(priv, &pp->request);
- pp->channel = cpu_to_le16((uint16_t)(priv->reg.channel));
+ pp->channel = cpu_to_le16(priv->reg.channel);
pp->ssid.size = priv->reg.ssid.size;
memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
{
- struct hostif_adhoc_set2_request_t *pp;
+ struct hostif_adhoc_set2_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ);
if (!pp)
@@ -1418,82 +1377,70 @@ void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
pp->channel_list.size = 1;
memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN);
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_stop_request(struct ks_wlan_private *priv)
{
- struct hostif_stop_request_t *pp;
+ struct hostif_stop_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_STOP_REQ);
if (!pp)
return;
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_phy_information_request(struct ks_wlan_private *priv)
{
- struct hostif_phy_information_request_t *pp;
+ struct hostif_phy_information_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_PHY_INFO_REQ);
if (!pp)
return;
if (priv->reg.phy_info_timer) {
- pp->type = cpu_to_le16((uint16_t)TIME_TYPE);
- pp->time = cpu_to_le16((uint16_t)(priv->reg.phy_info_timer));
+ pp->type = cpu_to_le16(TIME_TYPE);
+ pp->time = cpu_to_le16(priv->reg.phy_info_timer);
} else {
- pp->type = cpu_to_le16((uint16_t)NORMAL_TYPE);
- pp->time = cpu_to_le16((uint16_t)0);
+ pp->type = cpu_to_le16(NORMAL_TYPE);
+ pp->time = cpu_to_le16(0);
}
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_power_mgmt_request(struct ks_wlan_private *priv,
- unsigned long mode, unsigned long wake_up,
- unsigned long receive_dtims)
+ u32 mode, u32 wake_up, u32 receive_dtims)
{
- struct hostif_power_mgmt_request_t *pp;
+ struct hostif_power_mgmt_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_POWER_MGMT_REQ);
if (!pp)
return;
- pp->mode = cpu_to_le32((uint32_t)mode);
- pp->wake_up = cpu_to_le32((uint32_t)wake_up);
- pp->receive_dtims = cpu_to_le32((uint32_t)receive_dtims);
+ pp->mode = cpu_to_le32(mode);
+ pp->wake_up = cpu_to_le32(wake_up);
+ pp->receive_dtims = cpu_to_le32(receive_dtims);
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
static
void hostif_sleep_request(struct ks_wlan_private *priv,
enum sleep_mode_type mode)
{
- struct hostif_sleep_request_t *pp;
+ struct hostif_sleep_request *pp;
if (mode == SLP_SLEEP) {
pp = hostif_generic_request(sizeof(*pp), HIF_SLEEP_REQ);
if (!pp)
return;
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL,
- NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
} else if (mode == SLP_ACTIVE) {
atomic_set(&priv->sleepstatus.wakeup_request, 1);
queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
@@ -1505,10 +1452,10 @@ void hostif_sleep_request(struct ks_wlan_private *priv,
static
void hostif_bss_scan_request(struct ks_wlan_private *priv,
- unsigned long scan_type, uint8_t *scan_ssid,
- uint8_t scan_ssid_len)
+ unsigned long scan_type, u8 *scan_ssid,
+ u8 scan_ssid_len)
{
- struct hostif_bss_scan_request_t *pp;
+ struct hostif_bss_scan_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_SCAN_REQ);
if (!pp)
@@ -1516,8 +1463,8 @@ void hostif_bss_scan_request(struct ks_wlan_private *priv,
pp->scan_type = scan_type;
- pp->ch_time_min = cpu_to_le32((uint32_t)110); /* default value */
- pp->ch_time_max = cpu_to_le32((uint32_t)130); /* default value */
+ pp->ch_time_min = cpu_to_le32(110); /* default value */
+ pp->ch_time_max = cpu_to_le32(130); /* default value */
pp->channel_list.body[0] = 1;
pp->channel_list.body[1] = 8;
pp->channel_list.body[2] = 2;
@@ -1545,9 +1492,7 @@ void hostif_bss_scan_request(struct ks_wlan_private *priv,
memcpy(&pp->ssid.body[0], scan_ssid, scan_ssid_len);
}
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
priv->aplist.size = 0;
priv->scan_ind_count = 0;
@@ -1555,45 +1500,42 @@ void hostif_bss_scan_request(struct ks_wlan_private *priv,
static
void hostif_mic_failure_request(struct ks_wlan_private *priv,
- unsigned short failure_count,
- unsigned short timer)
+ u16 failure_count, u16 timer)
{
- struct hostif_mic_failure_request_t *pp;
+ struct hostif_mic_failure_request *pp;
pp = hostif_generic_request(sizeof(*pp), HIF_MIC_FAILURE_REQ);
if (!pp)
return;
- pp->failure_count = cpu_to_le16((uint16_t)failure_count);
- pp->timer = cpu_to_le16((uint16_t)timer);
+ pp->failure_count = cpu_to_le16(failure_count);
+ pp->timer = cpu_to_le16(timer);
- /* send to device request */
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
+ send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
}
/* Device I/O Receive indicate */
static void devio_rec_ind(struct ks_wlan_private *priv, unsigned char *p,
unsigned int size)
{
- if (priv->device_open_status) {
- spin_lock(&priv->dev_read_lock); /* request spin lock */
- priv->dev_data[atomic_read(&priv->rec_count)] = p;
- priv->dev_size[atomic_read(&priv->rec_count)] = size;
-
- if (atomic_read(&priv->event_count) != DEVICE_STOCK_COUNT) {
- /* rx event count inc */
- atomic_inc(&priv->event_count);
- }
- atomic_inc(&priv->rec_count);
- if (atomic_read(&priv->rec_count) == DEVICE_STOCK_COUNT)
- atomic_set(&priv->rec_count, 0);
+ if (!priv->is_device_open)
+ return;
- wake_up_interruptible_all(&priv->devread_wait);
+ spin_lock(&priv->dev_read_lock);
+ priv->dev_data[atomic_read(&priv->rec_count)] = p;
+ priv->dev_size[atomic_read(&priv->rec_count)] = size;
- /* release spin lock */
- spin_unlock(&priv->dev_read_lock);
+ if (atomic_read(&priv->event_count) != DEVICE_STOCK_COUNT) {
+ /* rx event count inc */
+ atomic_inc(&priv->event_count);
}
+ atomic_inc(&priv->rec_count);
+ if (atomic_read(&priv->rec_count) == DEVICE_STOCK_COUNT)
+ atomic_set(&priv->rec_count, 0);
+
+ wake_up_interruptible_all(&priv->devread_wait);
+
+ spin_unlock(&priv->dev_read_lock);
}
void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
@@ -1604,247 +1546,183 @@ void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
priv->rxp = p;
priv->rx_size = size;
- if (get_word(priv) == priv->rx_size) { /* length check !! */
- hostif_event_check(priv); /* event check */
- }
+ if (get_word(priv) == priv->rx_size)
+ hostif_event_check(priv);
}
-static
-void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
+static void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
{
- __le32 val;
-
switch (type) {
case SME_WEP_INDEX_REQUEST:
- val = cpu_to_le32((uint32_t)(priv->reg.wep_index));
- hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_ID,
- sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ hostif_mib_set_request_int(priv, DOT11_WEP_DEFAULT_KEY_ID,
+ priv->reg.wep_index);
break;
case SME_WEP_KEY1_REQUEST:
- if (!priv->wpa.wpa_enabled)
- hostif_mib_set_request(priv,
+ if (priv->wpa.wpa_enabled)
+ return;
+ hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE1,
- priv->reg.wep_key[0].size,
- MIB_VALUE_TYPE_OSTRING,
- &priv->reg.wep_key[0].val[0]);
+ &priv->reg.wep_key[0].val[0],
+ priv->reg.wep_key[0].size);
break;
case SME_WEP_KEY2_REQUEST:
- if (!priv->wpa.wpa_enabled)
- hostif_mib_set_request(priv,
+ if (priv->wpa.wpa_enabled)
+ return;
+ hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE2,
- priv->reg.wep_key[1].size,
- MIB_VALUE_TYPE_OSTRING,
- &priv->reg.wep_key[1].val[0]);
+ &priv->reg.wep_key[1].val[0],
+ priv->reg.wep_key[1].size);
break;
case SME_WEP_KEY3_REQUEST:
- if (!priv->wpa.wpa_enabled)
- hostif_mib_set_request(priv,
+ if (priv->wpa.wpa_enabled)
+ return;
+ hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE3,
- priv->reg.wep_key[2].size,
- MIB_VALUE_TYPE_OSTRING,
- &priv->reg.wep_key[2].val[0]);
+ &priv->reg.wep_key[2].val[0],
+ priv->reg.wep_key[2].size);
break;
case SME_WEP_KEY4_REQUEST:
- if (!priv->wpa.wpa_enabled)
- hostif_mib_set_request(priv,
+ if (priv->wpa.wpa_enabled)
+ return;
+ hostif_mib_set_request_ostring(priv,
DOT11_WEP_DEFAULT_KEY_VALUE4,
- priv->reg.wep_key[3].size,
- MIB_VALUE_TYPE_OSTRING,
- &priv->reg.wep_key[3].val[0]);
+ &priv->reg.wep_key[3].val[0],
+ priv->reg.wep_key[3].size);
break;
case SME_WEP_FLAG_REQUEST:
- val = cpu_to_le32((uint32_t)(priv->reg.privacy_invoked));
- hostif_mib_set_request(priv, DOT11_PRIVACY_INVOKED,
- sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+ hostif_mib_set_request_bool(priv, DOT11_PRIVACY_INVOKED,
+ priv->reg.privacy_invoked);
break;
}
}
-struct wpa_suite_t {
+struct wpa_suite {
__le16 size;
unsigned char suite[4][CIPHER_ID_LEN];
} __packed;
-struct rsn_mode_t {
+struct rsn_mode {
__le32 rsn_mode;
__le16 rsn_capability;
} __packed;
-static
-void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
+static void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
{
- struct wpa_suite_t wpa_suite;
- struct rsn_mode_t rsn_mode;
- __le32 val;
+ struct wpa_suite wpa_suite;
+ struct rsn_mode rsn_mode;
+ size_t size;
+ u32 mode;
+ const u8 *buf = NULL;
memset(&wpa_suite, 0, sizeof(wpa_suite));
switch (type) {
case SME_RSN_UCAST_REQUEST:
- wpa_suite.size = cpu_to_le16((uint16_t)1);
+ wpa_suite.size = cpu_to_le16(1);
switch (priv->wpa.pairwise_suite) {
case IW_AUTH_CIPHER_NONE:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA2_NONE, CIPHER_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA_NONE, CIPHER_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ CIPHER_ID_WPA2_NONE : CIPHER_ID_WPA_NONE;
break;
case IW_AUTH_CIPHER_WEP40:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA2_WEP40, CIPHER_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA_WEP40, CIPHER_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ CIPHER_ID_WPA2_WEP40 : CIPHER_ID_WPA_WEP40;
break;
case IW_AUTH_CIPHER_TKIP:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA2_TKIP, CIPHER_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA_TKIP, CIPHER_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ CIPHER_ID_WPA2_TKIP : CIPHER_ID_WPA_TKIP;
break;
case IW_AUTH_CIPHER_CCMP:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA2_CCMP, CIPHER_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA_CCMP, CIPHER_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ CIPHER_ID_WPA2_CCMP : CIPHER_ID_WPA_CCMP;
break;
case IW_AUTH_CIPHER_WEP104:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA2_WEP104, CIPHER_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA_WEP104, CIPHER_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ CIPHER_ID_WPA2_WEP104 : CIPHER_ID_WPA_WEP104;
break;
}
- hostif_mib_set_request(priv, DOT11_RSN_CONFIG_UNICAST_CIPHER,
- sizeof(wpa_suite.size) +
- CIPHER_ID_LEN *
- le16_to_cpu(wpa_suite.size),
- MIB_VALUE_TYPE_OSTRING, &wpa_suite);
+ if (buf)
+ memcpy(&wpa_suite.suite[0][0], buf, CIPHER_ID_LEN);
+ size = sizeof(wpa_suite.size) +
+ (CIPHER_ID_LEN * le16_to_cpu(wpa_suite.size));
+ hostif_mib_set_request_ostring(priv,
+ DOT11_RSN_CONFIG_UNICAST_CIPHER,
+ &wpa_suite, size);
break;
case SME_RSN_MCAST_REQUEST:
switch (priv->wpa.group_suite) {
case IW_AUTH_CIPHER_NONE:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA2_NONE, CIPHER_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA_NONE, CIPHER_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ CIPHER_ID_WPA2_NONE : CIPHER_ID_WPA_NONE;
break;
case IW_AUTH_CIPHER_WEP40:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA2_WEP40, CIPHER_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA_WEP40, CIPHER_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ CIPHER_ID_WPA2_WEP40 : CIPHER_ID_WPA_WEP40;
break;
case IW_AUTH_CIPHER_TKIP:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA2_TKIP, CIPHER_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA_TKIP, CIPHER_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ CIPHER_ID_WPA2_TKIP : CIPHER_ID_WPA_TKIP;
break;
case IW_AUTH_CIPHER_CCMP:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA2_CCMP, CIPHER_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA_CCMP, CIPHER_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ CIPHER_ID_WPA2_CCMP : CIPHER_ID_WPA_CCMP;
break;
case IW_AUTH_CIPHER_WEP104:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA2_WEP104, CIPHER_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- CIPHER_ID_WPA_WEP104, CIPHER_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ CIPHER_ID_WPA2_WEP104 : CIPHER_ID_WPA_WEP104;
break;
}
-
- hostif_mib_set_request(priv, DOT11_RSN_CONFIG_MULTICAST_CIPHER,
- CIPHER_ID_LEN, MIB_VALUE_TYPE_OSTRING,
- &wpa_suite.suite[0][0]);
+ if (buf)
+ memcpy(&wpa_suite.suite[0][0], buf, CIPHER_ID_LEN);
+ hostif_mib_set_request_ostring(priv,
+ DOT11_RSN_CONFIG_MULTICAST_CIPHER,
+ &wpa_suite.suite[0][0],
+ CIPHER_ID_LEN);
break;
case SME_RSN_AUTH_REQUEST:
- wpa_suite.size = cpu_to_le16((uint16_t)1);
+ wpa_suite.size = cpu_to_le16(1);
switch (priv->wpa.key_mgmt_suite) {
case IW_AUTH_KEY_MGMT_802_1X:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- KEY_MGMT_ID_WPA2_1X, KEY_MGMT_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- KEY_MGMT_ID_WPA_1X, KEY_MGMT_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ KEY_MGMT_ID_WPA2_1X : KEY_MGMT_ID_WPA_1X;
break;
case IW_AUTH_KEY_MGMT_PSK:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- KEY_MGMT_ID_WPA2_PSK, KEY_MGMT_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- KEY_MGMT_ID_WPA_PSK, KEY_MGMT_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ KEY_MGMT_ID_WPA2_PSK : KEY_MGMT_ID_WPA_PSK;
break;
case 0:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- KEY_MGMT_ID_WPA2_NONE, KEY_MGMT_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- KEY_MGMT_ID_WPA_NONE, KEY_MGMT_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ KEY_MGMT_ID_WPA2_NONE : KEY_MGMT_ID_WPA_NONE;
break;
case 4:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
- memcpy(&wpa_suite.suite[0][0],
- KEY_MGMT_ID_WPA2_WPANONE,
- KEY_MGMT_ID_LEN);
- else
- memcpy(&wpa_suite.suite[0][0],
- KEY_MGMT_ID_WPA_WPANONE,
- KEY_MGMT_ID_LEN);
+ buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ KEY_MGMT_ID_WPA2_WPANONE :
+ KEY_MGMT_ID_WPA_WPANONE;
break;
}
- hostif_mib_set_request(priv, DOT11_RSN_CONFIG_AUTH_SUITE,
- sizeof(wpa_suite.size) +
- KEY_MGMT_ID_LEN *
- le16_to_cpu(wpa_suite.size),
- MIB_VALUE_TYPE_OSTRING, &wpa_suite);
+ if (buf)
+ memcpy(&wpa_suite.suite[0][0], buf, KEY_MGMT_ID_LEN);
+ size = sizeof(wpa_suite.size) +
+ (KEY_MGMT_ID_LEN * le16_to_cpu(wpa_suite.size));
+ hostif_mib_set_request_ostring(priv,
+ DOT11_RSN_CONFIG_AUTH_SUITE,
+ &wpa_suite, size);
break;
case SME_RSN_ENABLED_REQUEST:
- val = cpu_to_le32((uint32_t)(priv->wpa.rsn_enabled));
- hostif_mib_set_request(priv, DOT11_RSN_ENABLED,
- sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+ hostif_mib_set_request_bool(priv, DOT11_RSN_ENABLED,
+ priv->wpa.rsn_enabled);
break;
case SME_RSN_MODE_REQUEST:
- if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) {
- rsn_mode.rsn_mode =
- cpu_to_le32((uint32_t)RSN_MODE_WPA2);
- rsn_mode.rsn_capability = cpu_to_le16((uint16_t)0);
- } else if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA) {
- rsn_mode.rsn_mode =
- cpu_to_le32((uint32_t)RSN_MODE_WPA);
- rsn_mode.rsn_capability = cpu_to_le16((uint16_t)0);
- } else {
- rsn_mode.rsn_mode =
- cpu_to_le32((uint32_t)RSN_MODE_NONE);
- rsn_mode.rsn_capability = cpu_to_le16((uint16_t)0);
- }
- hostif_mib_set_request(priv, LOCAL_RSN_MODE, sizeof(rsn_mode),
- MIB_VALUE_TYPE_OSTRING, &rsn_mode);
+ mode = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
+ RSN_MODE_WPA2 :
+ (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA) ?
+ RSN_MODE_WPA : RSN_MODE_NONE;
+ rsn_mode.rsn_mode = cpu_to_le32(mode);
+ rsn_mode.rsn_capability = cpu_to_le16(0);
+ hostif_mib_set_request_ostring(priv, LOCAL_RSN_MODE,
+ &rsn_mode, sizeof(rsn_mode));
break;
}
}
@@ -1884,7 +1762,7 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
/* rate mask by phy setting */
if (priv->reg.phy_type == D_11B_ONLY_MODE) {
for (i = 0; i < priv->reg.rate_set.size; i++) {
- if (!IS_11B_RATE(priv->reg.rate_set.body[i]))
+ if (!is_11b_rate(priv->reg.rate_set.body[i]))
break;
if ((priv->reg.rate_set.body[i] & RATE_MASK) >= TX_RATE_5M) {
@@ -1897,10 +1775,10 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
} else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
for (i = 0; i < priv->reg.rate_set.size; i++) {
- if (!IS_11BG_RATE(priv->reg.rate_set.body[i]))
+ if (!is_11bg_rate(priv->reg.rate_set.body[i]))
break;
- if (IS_OFDM_EXT_RATE(priv->reg.rate_set.body[i])) {
+ if (is_ofdm_ext_rate(priv->reg.rate_set.body[i])) {
rate_octet[i] = priv->reg.rate_set.body[i] &
RATE_MASK;
} else {
@@ -1923,21 +1801,20 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
switch (priv->reg.operation_mode) {
case MODE_PSEUDO_ADHOC:
- /* Pseudo Ad-Hoc mode */
hostif_ps_adhoc_set_request(priv);
break;
case MODE_INFRASTRUCTURE:
- /* Infrastructure mode */
if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) {
- hostif_infrastructure_set_request(priv, HIF_INFRA_SET_REQ);
+ hostif_infrastructure_set_request(priv,
+ HIF_INFRA_SET_REQ);
} else {
- hostif_infrastructure_set_request(priv, HIF_INFRA_SET2_REQ);
+ hostif_infrastructure_set_request(priv,
+ HIF_INFRA_SET2_REQ);
netdev_dbg(priv->net_dev,
"Infra bssid = %pM\n", priv->reg.bssid);
}
break;
case MODE_ADHOC:
- /* IEEE802.11 Ad-Hoc mode */
if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) {
hostif_adhoc_set_request(priv);
} else {
@@ -1958,7 +1835,6 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
int mc_count;
struct netdev_hw_addr *ha;
char set_address[NIC_MAX_MCAST_LIST * ETH_ALEN];
- __le32 filter_type;
int i = 0;
spin_lock(&priv->multicast_spin);
@@ -1966,156 +1842,117 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
if (dev->flags & IFF_PROMISC) {
- filter_type = cpu_to_le32((uint32_t)MCAST_FILTER_PROMISC);
- hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
- sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
- &filter_type);
+ hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
+ MCAST_FILTER_PROMISC);
goto spin_unlock;
}
if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
(dev->flags & IFF_ALLMULTI)) {
- filter_type = cpu_to_le32((uint32_t)MCAST_FILTER_MCASTALL);
- hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
- sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
- &filter_type);
+ hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
+ MCAST_FILTER_MCASTALL);
goto spin_unlock;
}
if (priv->sme_i.sme_flag & SME_MULTICAST) {
mc_count = netdev_mc_count(dev);
netdev_for_each_mc_addr(ha, dev) {
- memcpy(&set_address[i * ETH_ALEN], ha->addr, ETH_ALEN);
+ ether_addr_copy(&set_address[i * ETH_ALEN], ha->addr);
i++;
}
priv->sme_i.sme_flag &= ~SME_MULTICAST;
- hostif_mib_set_request(priv, LOCAL_MULTICAST_ADDRESS,
- ETH_ALEN * mc_count,
- MIB_VALUE_TYPE_OSTRING,
- &set_address[0]);
+ hostif_mib_set_request_ostring(priv, LOCAL_MULTICAST_ADDRESS,
+ &set_address[0],
+ ETH_ALEN * mc_count);
} else {
- filter_type = cpu_to_le32((uint32_t)MCAST_FILTER_MCAST);
priv->sme_i.sme_flag |= SME_MULTICAST;
- hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
- sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
- &filter_type);
+ hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
+ MCAST_FILTER_MCAST);
}
spin_unlock:
spin_unlock(&priv->multicast_spin);
}
-static
-void hostif_sme_power_mgmt_set(struct ks_wlan_private *priv)
+static void hostif_sme_power_mgmt_set(struct ks_wlan_private *priv)
{
- unsigned long mode, wake_up, receive_dtims;
+ u32 mode, wake_up, receive_dtims;
- switch (priv->reg.power_mgmt) {
- case POWER_MGMT_ACTIVE:
+ if (priv->reg.power_mgmt != POWER_MGMT_SAVE1 &&
+ priv->reg.power_mgmt != POWER_MGMT_SAVE2) {
mode = POWER_ACTIVE;
wake_up = 0;
receive_dtims = 0;
- break;
- case POWER_MGMT_SAVE1:
- if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
- mode = POWER_SAVE;
- wake_up = 0;
- receive_dtims = 0;
- } else {
- mode = POWER_ACTIVE;
- wake_up = 0;
- receive_dtims = 0;
- }
- break;
- case POWER_MGMT_SAVE2:
- if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
- mode = POWER_SAVE;
- wake_up = 0;
- receive_dtims = 1;
- } else {
- mode = POWER_ACTIVE;
- wake_up = 0;
- receive_dtims = 0;
- }
- break;
- default:
- mode = POWER_ACTIVE;
+ } else {
+ mode = (priv->reg.operation_mode == MODE_INFRASTRUCTURE) ?
+ POWER_SAVE : POWER_ACTIVE;
wake_up = 0;
- receive_dtims = 0;
- break;
+ receive_dtims = (priv->reg.operation_mode == MODE_INFRASTRUCTURE &&
+ priv->reg.power_mgmt == POWER_MGMT_SAVE2);
}
+
hostif_power_mgmt_request(priv, mode, wake_up, receive_dtims);
}
-static
-void hostif_sme_sleep_set(struct ks_wlan_private *priv)
+static void hostif_sme_sleep_set(struct ks_wlan_private *priv)
{
- switch (priv->sleep_mode) {
- case SLP_SLEEP:
- hostif_sleep_request(priv, priv->sleep_mode);
- break;
- case SLP_ACTIVE:
- hostif_sleep_request(priv, priv->sleep_mode);
- break;
- default:
- break;
- }
+ if (priv->sleep_mode != SLP_SLEEP &&
+ priv->sleep_mode != SLP_ACTIVE)
+ return;
+
+ hostif_sleep_request(priv, priv->sleep_mode);
}
static
void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
{
- __le32 val;
-
switch (type) {
case SME_SET_FLAG:
- val = cpu_to_le32((uint32_t)(priv->reg.privacy_invoked));
- hostif_mib_set_request(priv, DOT11_PRIVACY_INVOKED,
- sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+ hostif_mib_set_request_bool(priv, DOT11_PRIVACY_INVOKED,
+ priv->reg.privacy_invoked);
break;
case SME_SET_TXKEY:
- val = cpu_to_le32((uint32_t)(priv->wpa.txkey));
- hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_ID,
- sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ hostif_mib_set_request_int(priv, DOT11_WEP_DEFAULT_KEY_ID,
+ priv->wpa.txkey);
break;
case SME_SET_KEY1:
- hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE1,
- priv->wpa.key[0].key_len,
- MIB_VALUE_TYPE_OSTRING,
- &priv->wpa.key[0].key_val[0]);
+ hostif_mib_set_request_ostring(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE1,
+ &priv->wpa.key[0].key_val[0],
+ priv->wpa.key[0].key_len);
break;
case SME_SET_KEY2:
- hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE2,
- priv->wpa.key[1].key_len,
- MIB_VALUE_TYPE_OSTRING,
- &priv->wpa.key[1].key_val[0]);
+ hostif_mib_set_request_ostring(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE2,
+ &priv->wpa.key[1].key_val[0],
+ priv->wpa.key[1].key_len);
break;
case SME_SET_KEY3:
- hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE3,
- priv->wpa.key[2].key_len,
- MIB_VALUE_TYPE_OSTRING,
- &priv->wpa.key[2].key_val[0]);
+ hostif_mib_set_request_ostring(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE3,
+ &priv->wpa.key[2].key_val[0],
+ priv->wpa.key[2].key_len);
break;
case SME_SET_KEY4:
- hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE4,
- priv->wpa.key[3].key_len,
- MIB_VALUE_TYPE_OSTRING,
- &priv->wpa.key[3].key_val[0]);
+ hostif_mib_set_request_ostring(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE4,
+ &priv->wpa.key[3].key_val[0],
+ priv->wpa.key[3].key_len);
break;
case SME_SET_PMK_TSC:
- hostif_mib_set_request(priv, DOT11_PMK_TSC,
- WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
- &priv->wpa.key[0].rx_seq[0]);
+ hostif_mib_set_request_ostring(priv, DOT11_PMK_TSC,
+ &priv->wpa.key[0].rx_seq[0],
+ WPA_RX_SEQ_LEN);
break;
case SME_SET_GMK1_TSC:
- hostif_mib_set_request(priv, DOT11_GMK1_TSC,
- WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
- &priv->wpa.key[1].rx_seq[0]);
+ hostif_mib_set_request_ostring(priv, DOT11_GMK1_TSC,
+ &priv->wpa.key[1].rx_seq[0],
+ WPA_RX_SEQ_LEN);
break;
case SME_SET_GMK2_TSC:
- hostif_mib_set_request(priv, DOT11_GMK2_TSC,
- WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
- &priv->wpa.key[2].rx_seq[0]);
+ hostif_mib_set_request_ostring(priv, DOT11_GMK2_TSC,
+ &priv->wpa.key[2].rx_seq[0],
+ WPA_RX_SEQ_LEN);
break;
}
}
@@ -2123,38 +1960,34 @@ void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
static
void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
{
- struct pmk_cache_t {
+ struct pmk_cache {
__le16 size;
struct {
u8 bssid[ETH_ALEN];
u8 pmkid[IW_PMKID_LEN];
} __packed list[PMK_LIST_MAX];
} __packed pmkcache;
- struct pmk_t *pmk;
- int i;
+ struct pmk *pmk;
+ size_t size;
+ int i = 0;
- i = 0;
list_for_each_entry(pmk, &priv->pmklist.head, list) {
- if (i < PMK_LIST_MAX) {
- memcpy(pmkcache.list[i].bssid, pmk->bssid, ETH_ALEN);
- memcpy(pmkcache.list[i].pmkid, pmk->pmkid,
- IW_PMKID_LEN);
- i++;
- }
+ if (i >= PMK_LIST_MAX)
+ break;
+ ether_addr_copy(pmkcache.list[i].bssid, pmk->bssid);
+ memcpy(pmkcache.list[i].pmkid, pmk->pmkid, IW_PMKID_LEN);
+ i++;
}
- pmkcache.size = cpu_to_le16((uint16_t)(priv->pmklist.size));
- hostif_mib_set_request(priv, LOCAL_PMK,
- sizeof(priv->pmklist.size) + (ETH_ALEN +
- IW_PMKID_LEN) *
- (priv->pmklist.size), MIB_VALUE_TYPE_OSTRING,
- &pmkcache);
+ pmkcache.size = cpu_to_le16(priv->pmklist.size);
+ size = sizeof(priv->pmklist.size) +
+ ((ETH_ALEN + IW_PMKID_LEN) * priv->pmklist.size);
+ hostif_mib_set_request_ostring(priv, LOCAL_PMK, &pmkcache, size);
}
/* execute sme */
-static
-void hostif_sme_execute(struct ks_wlan_private *priv, int event)
+static void hostif_sme_execute(struct ks_wlan_private *priv, int event)
{
- __le32 val;
+ u16 failure;
switch (event) {
case SME_START:
@@ -2165,9 +1998,8 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
hostif_sme_multicast_set(priv);
break;
case SME_MACADDRESS_SET_REQUEST:
- hostif_mib_set_request(priv, LOCAL_CURRENTADDRESS, ETH_ALEN,
- MIB_VALUE_TYPE_OSTRING,
- &priv->eth_addr[0]);
+ hostif_mib_set_request_ostring(priv, LOCAL_CURRENTADDRESS,
+ &priv->eth_addr[0], ETH_ALEN);
break;
case SME_BSS_SCAN_REQUEST:
hostif_bss_scan_request(priv, priv->reg.scan_type,
@@ -2180,18 +2012,15 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
hostif_phy_information_request(priv);
break;
case SME_MIC_FAILURE_REQUEST:
- if (priv->wpa.mic_failure.failure == 1) {
- hostif_mic_failure_request(priv,
- priv->wpa.mic_failure.failure - 1,
- 0);
- } else if (priv->wpa.mic_failure.failure == 2) {
- hostif_mic_failure_request(priv,
- priv->wpa.mic_failure.failure - 1,
- priv->wpa.mic_failure.counter);
- } else {
- netdev_err(priv->net_dev, "SME_MIC_FAILURE_REQUEST: failure count=%u error?\n",
- priv->wpa.mic_failure.failure);
+ failure = priv->wpa.mic_failure.failure;
+ if (failure != 1 && failure != 2) {
+ netdev_err(priv->net_dev,
+ "SME_MIC_FAILURE_REQUEST: failure count=%u error?\n",
+ failure);
+ return;
}
+ hostif_mic_failure_request(priv, failure - 1, (failure == 1) ?
+ 0 : priv->wpa.mic_failure.counter);
break;
case SME_MIC_FAILURE_CONFIRM:
if (priv->wpa.mic_failure.failure == 2) {
@@ -2213,14 +2042,12 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
hostif_stop_request(priv);
break;
case SME_RTS_THRESHOLD_REQUEST:
- val = cpu_to_le32((uint32_t)(priv->reg.rts));
- hostif_mib_set_request(priv, DOT11_RTS_THRESHOLD,
- sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ hostif_mib_set_request_int(priv, DOT11_RTS_THRESHOLD,
+ priv->reg.rts);
break;
case SME_FRAGMENTATION_THRESHOLD_REQUEST:
- val = cpu_to_le32((uint32_t)(priv->reg.fragment));
- hostif_mib_set_request(priv, DOT11_FRAGMENTATION_THRESHOLD,
- sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ hostif_mib_set_request_int(priv, DOT11_FRAGMENTATION_THRESHOLD,
+ priv->reg.fragment);
break;
case SME_WEP_INDEX_REQUEST:
case SME_WEP_KEY1_REQUEST:
@@ -2251,26 +2078,20 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
case SME_SET_PMKSA:
hostif_sme_set_pmksa(priv);
break;
-#ifdef WPS
case SME_WPS_ENABLE_REQUEST:
- hostif_mib_set_request(priv, LOCAL_WPS_ENABLE,
- sizeof(priv->wps.wps_enabled),
- MIB_VALUE_TYPE_INT,
- &priv->wps.wps_enabled);
+ hostif_mib_set_request_int(priv, LOCAL_WPS_ENABLE,
+ priv->wps.wps_enabled);
break;
case SME_WPS_PROBE_REQUEST:
- hostif_mib_set_request(priv, LOCAL_WPS_PROBE_REQ,
- priv->wps.ielen,
- MIB_VALUE_TYPE_OSTRING, priv->wps.ie);
+ hostif_mib_set_request_ostring(priv, LOCAL_WPS_PROBE_REQ,
+ priv->wps.ie, priv->wps.ielen);
break;
-#endif /* WPS */
case SME_MODE_SET_REQUEST:
hostif_sme_mode_setup(priv);
break;
case SME_SET_GAIN:
- hostif_mib_set_request(priv, LOCAL_GAIN,
- sizeof(priv->gain),
- MIB_VALUE_TYPE_OSTRING, &priv->gain);
+ hostif_mib_set_request_ostring(priv, LOCAL_GAIN,
+ &priv->gain, sizeof(priv->gain));
break;
case SME_GET_GAIN:
hostif_mib_get_request(priv, LOCAL_GAIN);
@@ -2295,9 +2116,7 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
hostif_sme_sleep_set(priv);
break;
case SME_SET_REGION:
- val = cpu_to_le32((uint32_t)(priv->region));
- hostif_mib_set_request(priv, LOCAL_REGION,
- sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ hostif_mib_set_request_int(priv, LOCAL_REGION, priv->region);
break;
case SME_MULTICAST_CONFIRM:
case SME_BSS_SCAN_CONFIRM:
@@ -2318,7 +2137,6 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
case SME_RSN_ENABLED_CONFIRM:
case SME_RSN_MODE_CONFIRM:
case SME_MODE_SET_CONFIRM:
- break;
case SME_TERMINATE:
default:
break;
@@ -2343,7 +2161,7 @@ void hostif_sme_task(unsigned long dev)
}
/* send to Station Management Entity module */
-void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event)
+void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event)
{
/* enqueue sme event */
if (cnt_smeqbody(priv) < (SME_EVENT_BUFF_SIZE - 1)) {
@@ -2359,7 +2177,8 @@ void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event)
static inline void hostif_aplist_init(struct ks_wlan_private *priv)
{
- size_t size = LOCAL_APLIST_MAX * sizeof(struct local_ap_t);
+ size_t size = LOCAL_APLIST_MAX * sizeof(struct local_ap);
+
priv->aplist.size = 0;
memset(&priv->aplist.ap[0], 0, size);
}
@@ -2384,7 +2203,7 @@ static inline void hostif_sme_init(struct ks_wlan_private *priv)
static inline void hostif_wpa_init(struct ks_wlan_private *priv)
{
memset(&priv->wpa, 0, sizeof(priv->wpa));
- priv->wpa.rsn_enabled = 0;
+ priv->wpa.rsn_enabled = false;
priv->wpa.mic_failure.failure = 0;
priv->wpa.mic_failure.last_failure_time = 0;
priv->wpa.mic_failure.stop = 0;
diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
index 2f918b11b337..ca7dc8f5166c 100644
--- a/drivers/staging/ks7010/ks_hostif.h
+++ b/drivers/staging/ks7010/ks_hostif.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for KeyStream wireless LAN
*
* Copyright (c) 2005-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _KS_HOSTIF_H_
@@ -67,7 +64,7 @@ struct hostif_hdr {
__le16 event;
} __packed;
-struct hostif_data_request_t {
+struct hostif_data_request {
struct hostif_hdr header;
__le16 auth_type;
#define TYPE_DATA 0x0000
@@ -76,85 +73,127 @@ struct hostif_data_request_t {
u8 data[0];
} __packed;
-struct hostif_data_indication_t {
- struct hostif_hdr header;
- __le16 auth_type;
-/* #define TYPE_DATA 0x0000 */
#define TYPE_PMK1 0x0001
#define TYPE_GMK1 0x0002
#define TYPE_GMK2 0x0003
- __le16 reserved;
- u8 data[0];
-} __packed;
#define CHANNEL_LIST_MAX_SIZE 14
-struct channel_list_t {
+struct channel_list {
u8 size;
u8 body[CHANNEL_LIST_MAX_SIZE];
u8 pad;
} __packed;
-/* MIB Attribute */
-#define DOT11_MAC_ADDRESS 0x21010100 /* MAC Address (R) */
-#define DOT11_PRODUCT_VERSION 0x31024100 /* FirmWare Version (R) */
-#define DOT11_RTS_THRESHOLD 0x21020100 /* RTS Threshold (R/W) */
-#define DOT11_FRAGMENTATION_THRESHOLD 0x21050100 /* Fragment Threshold (R/W) */
-#define DOT11_PRIVACY_INVOKED 0x15010100 /* WEP ON/OFF (W) */
-#define DOT11_WEP_DEFAULT_KEY_ID 0x15020100 /* WEP Index (W) */
-#define DOT11_WEP_DEFAULT_KEY_VALUE1 0x13020101 /* WEP Key#1(TKIP AES: PairwiseTemporalKey) (W) */
-#define DOT11_WEP_DEFAULT_KEY_VALUE2 0x13020102 /* WEP Key#2(TKIP AES: GroupKey1) (W) */
-#define DOT11_WEP_DEFAULT_KEY_VALUE3 0x13020103 /* WEP Key#3(TKIP AES: GroupKey2) (W) */
-#define DOT11_WEP_DEFAULT_KEY_VALUE4 0x13020104 /* WEP Key#4 (W) */
-#define DOT11_WEP_LIST 0x13020100 /* WEP LIST */
-#define DOT11_DESIRED_SSID 0x11090100 /* SSID */
-#define DOT11_CURRENT_CHANNEL 0x45010100 /* channel set */
-#define DOT11_OPERATION_RATE_SET 0x11110100 /* rate set */
-
-#define LOCAL_AP_SEARCH_INTEAVAL 0xF1010100 /* AP search interval (R/W) */
-#define LOCAL_CURRENTADDRESS 0xF1050100 /* MAC Address change (W) */
-#define LOCAL_MULTICAST_ADDRESS 0xF1060100 /* Multicast Address (W) */
-#define LOCAL_MULTICAST_FILTER 0xF1060200 /* Multicast Address Filter enable/disable (W) */
-#define LOCAL_SEARCHED_AP_LIST 0xF1030100 /* AP list (R) */
-#define LOCAL_LINK_AP_STATUS 0xF1040100 /* Link AP status (R) */
-#define LOCAL_PACKET_STATISTICS 0xF1020100 /* tx,rx packets statistics */
-#define LOCAL_AP_SCAN_LIST_TYPE_SET 0xF1030200 /* AP_SCAN_LIST_TYPE */
-
-#define DOT11_RSN_ENABLED 0x15070100 /* WPA enable/disable (W) */
-#define LOCAL_RSN_MODE 0x56010100 /* RSN mode WPA/WPA2 (W) */
-#define DOT11_RSN_CONFIG_MULTICAST_CIPHER 0x51040100 /* GroupKeyCipherSuite (W) */
-#define DOT11_RSN_CONFIG_UNICAST_CIPHER 0x52020100 /* PairwiseKeyCipherSuite (W) */
-#define DOT11_RSN_CONFIG_AUTH_SUITE 0x53020100 /* AuthenticationKeyManagementSuite (W) */
-#define DOT11_RSN_CONFIG_VERSION 0x51020100 /* RSN version (W) */
-#define LOCAL_RSN_CONFIG_ALL 0x5F010100 /* RSN CONFIG ALL (W) */
-#define DOT11_PMK_TSC 0x55010100 /* PMK_TSC (W) */
-#define DOT11_GMK1_TSC 0x55010101 /* GMK1_TSC (W) */
-#define DOT11_GMK2_TSC 0x55010102 /* GMK2_TSC (W) */
-#define DOT11_GMK3_TSC 0x55010103 /* GMK3_TSC */
-#define LOCAL_PMK 0x58010100 /* Pairwise Master Key cache (W) */
-
-#define LOCAL_REGION 0xF10A0100 /* Region setting */
-
-#ifdef WPS
-#define LOCAL_WPS_ENABLE 0xF10B0100 /* WiFi Protected Setup */
-#define LOCAL_WPS_PROBE_REQ 0xF10C0100 /* WPS Probe Request */
-#endif /* WPS */
-
-#define LOCAL_GAIN 0xF10D0100 /* Carrer sense threshold for demo ato show */
-#define LOCAL_EEPROM_SUM 0xF10E0100 /* EEPROM checksum information */
-
-struct hostif_mib_get_request_t {
+/**
+ * enum mib_attribute - Management Information Base attribute
+ * Attribute value used for accessing and updating MIB
+ *
+ * @DOT11_MAC_ADDRESS: MAC Address (R)
+ * @DOT11_PRODUCT_VERSION: FirmWare Version (R)
+ * @DOT11_RTS_THRESHOLD: RTS Threshold (R/W)
+ * @DOT11_FRAGMENTATION_THRESHOLD: Fragment Threshold (R/W)
+ * @DOT11_PRIVACY_INVOKED: WEP ON/OFF (W)
+ * @DOT11_WEP_DEFAULT_KEY_ID: WEP Index (W)
+ * @DOT11_WEP_DEFAULT_KEY_VALUE1: WEP Key#1(TKIP AES: PairwiseTemporalKey) (W)
+ * @DOT11_WEP_DEFAULT_KEY_VALUE2: WEP Key#2(TKIP AES: GroupKey1) (W)
+ * @DOT11_WEP_DEFAULT_KEY_VALUE3: WEP Key#3(TKIP AES: GroupKey2) (W)
+ * @DOT11_WEP_DEFAULT_KEY_VALUE4: WEP Key#4 (W)
+ * @DOT11_WEP_LIST: WEP LIST
+ * @DOT11_DESIRED_SSID: SSID
+ * @DOT11_CURRENT_CHANNEL: channel set
+ * @DOT11_OPERATION_RATE_SET: rate set
+ * @LOCAL_AP_SEARCH_INTERVAL: AP search interval (R/W)
+ * @LOCAL_CURRENTADDRESS: MAC Address change (W)
+ * @LOCAL_MULTICAST_ADDRESS: Multicast Address (W)
+ * @LOCAL_MULTICAST_FILTER: Multicast Address Filter enable/disable (W)
+ * @LOCAL_SEARCHED_AP_LIST: AP list (R)
+ * @LOCAL_LINK_AP_STATUS: Link AP status (R)
+ * @LOCAL_PACKET_STATISTICS: tx,rx packets statistics
+ * @LOCAL_AP_SCAN_LIST_TYPE_SET: AP_SCAN_LIST_TYPE
+ * @DOT11_RSN_ENABLED: WPA enable/disable (W)
+ * @LOCAL_RSN_MODE: RSN mode WPA/WPA2 (W)
+ * @DOT11_RSN_CONFIG_MULTICAST_CIPHER: GroupKeyCipherSuite (W)
+ * @DOT11_RSN_CONFIG_UNICAST_CIPHER: PairwiseKeyCipherSuite (W)
+ * @DOT11_RSN_CONFIG_AUTH_SUITE: AuthenticationKeyManagementSuite (W)
+ * @DOT11_RSN_CONFIG_VERSION: RSN version (W)
+ * @LOCAL_RSN_CONFIG_ALL: RSN CONFIG ALL (W)
+ * @DOT11_PMK_TSC: PMK_TSC (W)
+ * @DOT11_GMK1_TSC: GMK1_TSC (W)
+ * @DOT11_GMK2_TSC: GMK2_TSC (W)
+ * @DOT11_GMK3_TSC: GMK3_TSC
+ * @LOCAL_PMK: Pairwise Master Key cache (W)
+ * @LOCAL_REGION: Region setting
+ * @LOCAL_WPS_ENABLE: WiFi Protected Setup
+ * @LOCAL_WPS_PROBE_REQ: WPS Probe Request
+ * @LOCAL_GAIN: Carrer sense threshold for demo ato show
+ * @LOCAL_EEPROM_SUM: EEPROM checksum information
+ */
+enum mib_attribute {
+ DOT11_MAC_ADDRESS = 0x21010100,
+ DOT11_PRODUCT_VERSION = 0x31024100,
+ DOT11_RTS_THRESHOLD = 0x21020100,
+ DOT11_FRAGMENTATION_THRESHOLD = 0x21050100,
+ DOT11_PRIVACY_INVOKED = 0x15010100,
+ DOT11_WEP_DEFAULT_KEY_ID = 0x15020100,
+ DOT11_WEP_DEFAULT_KEY_VALUE1 = 0x13020101,
+ DOT11_WEP_DEFAULT_KEY_VALUE2 = 0x13020102,
+ DOT11_WEP_DEFAULT_KEY_VALUE3 = 0x13020103,
+ DOT11_WEP_DEFAULT_KEY_VALUE4 = 0x13020104,
+ DOT11_WEP_LIST = 0x13020100,
+ DOT11_DESIRED_SSID = 0x11090100,
+ DOT11_CURRENT_CHANNEL = 0x45010100,
+ DOT11_OPERATION_RATE_SET = 0x11110100,
+ LOCAL_AP_SEARCH_INTERVAL = 0xF1010100,
+ LOCAL_CURRENTADDRESS = 0xF1050100,
+ LOCAL_MULTICAST_ADDRESS = 0xF1060100,
+ LOCAL_MULTICAST_FILTER = 0xF1060200,
+ LOCAL_SEARCHED_AP_LIST = 0xF1030100,
+ LOCAL_LINK_AP_STATUS = 0xF1040100,
+ LOCAL_PACKET_STATISTICS = 0xF1020100,
+ LOCAL_AP_SCAN_LIST_TYPE_SET = 0xF1030200,
+ DOT11_RSN_ENABLED = 0x15070100,
+ LOCAL_RSN_MODE = 0x56010100,
+ DOT11_RSN_CONFIG_MULTICAST_CIPHER = 0x51040100,
+ DOT11_RSN_CONFIG_UNICAST_CIPHER = 0x52020100,
+ DOT11_RSN_CONFIG_AUTH_SUITE = 0x53020100,
+ DOT11_RSN_CONFIG_VERSION = 0x51020100,
+ LOCAL_RSN_CONFIG_ALL = 0x5F010100,
+ DOT11_PMK_TSC = 0x55010100,
+ DOT11_GMK1_TSC = 0x55010101,
+ DOT11_GMK2_TSC = 0x55010102,
+ DOT11_GMK3_TSC = 0x55010103,
+ LOCAL_PMK = 0x58010100,
+ LOCAL_REGION = 0xF10A0100,
+ LOCAL_WPS_ENABLE = 0xF10B0100,
+ LOCAL_WPS_PROBE_REQ = 0xF10C0100,
+ LOCAL_GAIN = 0xF10D0100,
+ LOCAL_EEPROM_SUM = 0xF10E0100
+};
+
+struct hostif_mib_get_request {
struct hostif_hdr header;
__le32 mib_attribute;
} __packed;
-struct hostif_mib_value_t {
+/**
+ * enum mib_data_type - Message Information Base data type.
+ * @MIB_VALUE_TYPE_NULL: NULL type
+ * @MIB_VALUE_TYPE_INT: INTEGER type
+ * @MIB_VALUE_TYPE_BOOL: BOOL type
+ * @MIB_VALUE_TYPE_COUNT32: unused
+ * @MIB_VALUE_TYPE_OSTRING: Chunk of memory
+ */
+enum mib_data_type {
+ MIB_VALUE_TYPE_NULL = 0,
+ MIB_VALUE_TYPE_INT,
+ MIB_VALUE_TYPE_BOOL,
+ MIB_VALUE_TYPE_COUNT32,
+ MIB_VALUE_TYPE_OSTRING
+};
+
+struct hostif_mib_value {
__le16 size;
__le16 type;
-#define MIB_VALUE_TYPE_NULL 0
-#define MIB_VALUE_TYPE_INT 1
-#define MIB_VALUE_TYPE_BOOL 2
-#define MIB_VALUE_TYPE_COUNT32 3
-#define MIB_VALUE_TYPE_OSTRING 4
u8 body[0];
} __packed;
@@ -166,22 +205,16 @@ struct hostif_mib_get_confirm_t {
#define MIB_READ_ONLY 2
#define MIB_WRITE_ONLY 3
__le32 mib_attribute;
- struct hostif_mib_value_t mib_value;
+ struct hostif_mib_value mib_value;
} __packed;
struct hostif_mib_set_request_t {
struct hostif_hdr header;
__le32 mib_attribute;
- struct hostif_mib_value_t mib_value;
-} __packed;
-
-struct hostif_mib_set_confirm_t {
- struct hostif_hdr header;
- __le32 mib_status;
- __le32 mib_attribute;
+ struct hostif_mib_value mib_value;
} __packed;
-struct hostif_power_mgmt_request_t {
+struct hostif_power_mgmt_request {
struct hostif_hdr header;
__le32 mode;
#define POWER_ACTIVE 1
@@ -206,12 +239,7 @@ enum power_mgmt_mode_type {
/* #define RESULT_ALREADY_RUNNING 3 */
#define RESULT_ALREADY_RUNNING 7
-struct hostif_power_mgmt_confirm_t {
- struct hostif_hdr header;
- __le16 result_code;
-} __packed;
-
-struct hostif_start_request_t {
+struct hostif_start_request {
struct hostif_hdr header;
__le16 mode;
#define MODE_PSEUDO_ADHOC 0
@@ -220,43 +248,38 @@ struct hostif_start_request_t {
#define MODE_ADHOC 3
} __packed;
-struct hostif_start_confirm_t {
- struct hostif_hdr header;
- __le16 result_code;
-} __packed;
-
-struct ssid_t {
+struct ssid {
u8 size;
u8 body[IEEE80211_MAX_SSID_LEN];
u8 ssid_pad;
} __packed;
#define RATE_SET_MAX_SIZE 16
-struct rate_set8_t {
+struct rate_set8 {
u8 size;
u8 body[8];
u8 rate_pad;
} __packed;
-struct fh_parms_t {
+struct fh_parms {
__le16 dwell_time;
u8 hop_set;
u8 hop_pattern;
u8 hop_index;
} __packed;
-struct ds_parms_t {
+struct ds_parms {
u8 channel;
} __packed;
-struct cf_parms_t {
+struct cf_parms {
u8 count;
u8 period;
__le16 max_duration;
__le16 dur_remaining;
} __packed;
-struct ibss_parms_t {
+struct ibss_parms {
__le16 atim_window;
} __packed;
@@ -270,13 +293,13 @@ struct erp_params_t {
u8 erp_info;
} __packed;
-struct rate_set16_t {
+struct rate_set16 {
u8 size;
u8 body[16];
u8 rate_pad;
} __packed;
-struct ap_info_t {
+struct ap_info {
u8 bssid[6]; /* +00 */
u8 rssi; /* +06 */
u8 sq; /* +07 */
@@ -291,7 +314,7 @@ struct ap_info_t {
/* +1032 */
} __packed;
-struct link_ap_info_t {
+struct link_ap_info {
u8 bssid[6]; /* +00 */
u8 rssi; /* +06 */
u8 sq; /* +07 */
@@ -299,14 +322,14 @@ struct link_ap_info_t {
u8 pad0; /* +09 */
__le16 beacon_period; /* +10 */
__le16 capability; /* +12 */
- struct rate_set8_t rate_set; /* +14 */
- struct fh_parms_t fh_parameter; /* +24 */
- struct ds_parms_t ds_parameter; /* +29 */
- struct cf_parms_t cf_parameter; /* +30 */
- struct ibss_parms_t ibss_parameter; /* +36 */
+ struct rate_set8 rate_set; /* +14 */
+ struct fh_parms fh_parameter; /* +24 */
+ struct ds_parms ds_parameter; /* +29 */
+ struct cf_parms cf_parameter; /* +30 */
+ struct ibss_parms ibss_parameter; /* +36 */
struct erp_params_t erp_parameter; /* +38 */
u8 pad1; /* +39 */
- struct rate_set8_t ext_rate_set; /* +40 */
+ struct rate_set8 ext_rate_set; /* +40 */
u8 DTIM_period; /* +50 */
u8 rsn_mode; /* +51 */
#define RSN_MODE_NONE 0
@@ -318,23 +341,13 @@ struct link_ap_info_t {
} __packed rsn;
} __packed;
-struct hostif_connect_indication_t {
- struct hostif_hdr header;
- __le16 connect_code;
#define RESULT_CONNECT 0
#define RESULT_DISCONNECT 1
- struct link_ap_info_t link_ap_info;
-} __packed;
-struct hostif_stop_request_t {
+struct hostif_stop_request {
struct hostif_hdr header;
} __packed;
-struct hostif_stop_confirm_t {
- struct hostif_hdr header;
- __le16 result_code;
-} __packed;
-
#define D_11B_ONLY_MODE 0
#define D_11G_ONLY_MODE 1
#define D_11BG_COMPATIBLE_MODE 2
@@ -343,98 +356,78 @@ struct hostif_stop_confirm_t {
#define CTS_MODE_FALSE 0
#define CTS_MODE_TRUE 1
-struct hostif_request_t {
+struct hostif_request {
__le16 phy_type;
__le16 cts_mode;
__le16 scan_type;
__le16 capability;
- struct rate_set16_t rate_set;
+ struct rate_set16 rate_set;
} __packed;
/**
- * struct hostif_ps_adhoc_set_request_t - pseudo adhoc mode
+ * struct hostif_ps_adhoc_set_request - pseudo adhoc mode
* @capability: bit5 : preamble
* bit6 : pbcc - Not supported always 0
* bit10 : ShortSlotTime
* bit13 : DSSS-OFDM - Not supported always 0
*/
-struct hostif_ps_adhoc_set_request_t {
+struct hostif_ps_adhoc_set_request {
struct hostif_hdr header;
- struct hostif_request_t request;
+ struct hostif_request request;
__le16 channel;
} __packed;
-struct hostif_ps_adhoc_set_confirm_t {
- struct hostif_hdr header;
- __le16 result_code;
-} __packed;
-
#define AUTH_TYPE_OPEN_SYSTEM 0
#define AUTH_TYPE_SHARED_KEY 1
/**
- * struct hostif_infrastructure_set_request_t
+ * struct hostif_infrastructure_set_request
* @capability: bit5 : preamble
* bit6 : pbcc - Not supported always 0
* bit10 : ShortSlotTime
* bit13 : DSSS-OFDM - Not supported always 0
*/
-struct hostif_infrastructure_set_request_t {
+struct hostif_infrastructure_set_request {
struct hostif_hdr header;
- struct hostif_request_t request;
- struct ssid_t ssid;
+ struct hostif_request request;
+ struct ssid ssid;
__le16 beacon_lost_count;
__le16 auth_type;
- struct channel_list_t channel_list;
+ struct channel_list channel_list;
u8 bssid[ETH_ALEN];
} __packed;
-struct hostif_infrastructure_set_confirm_t {
- struct hostif_hdr header;
- __le16 result_code;
-} __packed;
-
/**
- * struct hostif_adhoc_set_request_t
+ * struct hostif_adhoc_set_request
* @capability: bit5 : preamble
* bit6 : pbcc - Not supported always 0
* bit10 : ShortSlotTime
* bit13 : DSSS-OFDM - Not supported always 0
*/
-struct hostif_adhoc_set_request_t {
+struct hostif_adhoc_set_request {
struct hostif_hdr header;
- struct hostif_request_t request;
- struct ssid_t ssid;
+ struct hostif_request request;
+ struct ssid ssid;
__le16 channel;
} __packed;
/**
- * struct hostif_adhoc_set2_request_t
+ * struct hostif_adhoc_set2_request
* @capability: bit5 : preamble
* bit6 : pbcc - Not supported always 0
* bit10 : ShortSlotTime
* bit13 : DSSS-OFDM - Not supported always 0
*/
-struct hostif_adhoc_set2_request_t {
+struct hostif_adhoc_set2_request {
struct hostif_hdr header;
- struct hostif_request_t request;
+ struct hostif_request request;
__le16 reserved;
- struct ssid_t ssid;
- struct channel_list_t channel_list;
+ struct ssid ssid;
+ struct channel_list channel_list;
u8 bssid[ETH_ALEN];
} __packed;
-struct hostif_adhoc_set_confirm_t {
- struct hostif_hdr header;
- __le16 result_code;
-} __packed;
-
-struct last_associate_t {
- u8 type;
- u8 status;
-} __packed;
-
-struct association_request_t {
+struct association_request {
u8 type;
u8 pad;
__le16 capability;
@@ -443,7 +436,7 @@ struct association_request_t {
__le16 req_ies_size;
} __packed;
-struct association_response_t {
+struct association_response {
u8 type;
u8 pad;
__le16 capability;
@@ -452,15 +445,7 @@ struct association_response_t {
__le16 resp_ies_size;
} __packed;
-struct hostif_associate_indication_t {
- struct hostif_hdr header;
- struct association_request_t assoc_req;
- struct association_response_t assoc_resp;
- /* followed by (req_ies_size + resp_ies_size) octets of data */
- /* reqIEs data *//* respIEs data */
-} __packed;
-
-struct hostif_bss_scan_request_t {
+struct hostif_bss_scan_request {
struct hostif_hdr header;
u8 scan_type;
#define ACTIVE_SCAN 0
@@ -468,17 +453,11 @@ struct hostif_bss_scan_request_t {
u8 pad[3];
__le32 ch_time_min;
__le32 ch_time_max;
- struct channel_list_t channel_list;
- struct ssid_t ssid;
+ struct channel_list channel_list;
+ struct ssid ssid;
} __packed;
-struct hostif_bss_scan_confirm_t {
- struct hostif_hdr header;
- __le16 result_code;
- __le16 reserved;
-} __packed;
-
-struct hostif_phy_information_request_t {
+struct hostif_phy_information_request {
struct hostif_hdr header;
__le16 type;
#define NORMAL_TYPE 0
@@ -486,43 +465,21 @@ struct hostif_phy_information_request_t {
__le16 time; /* unit 100ms */
} __packed;
-struct hostif_phy_information_confirm_t {
- struct hostif_hdr header;
- u8 rssi;
- u8 sq;
- u8 noise;
- u8 link_speed;
- __le32 tx_frame;
- __le32 rx_frame;
- __le32 tx_error;
- __le32 rx_error;
-} __packed;
-
enum sleep_mode_type {
SLP_ACTIVE,
SLP_SLEEP
};
-struct hostif_sleep_request_t {
+struct hostif_sleep_request {
struct hostif_hdr header;
} __packed;
-struct hostif_sleep_confirm_t {
- struct hostif_hdr header;
- __le16 result_code;
-} __packed;
-
-struct hostif_mic_failure_request_t {
+struct hostif_mic_failure_request {
struct hostif_hdr header;
__le16 failure_count;
__le16 timer;
} __packed;
-struct hostif_mic_failure_confirm_t {
- struct hostif_hdr header;
- __le16 result_code;
-} __packed;
-
#define BASIC_RATE 0x80
#define RATE_MASK 0x7F
@@ -541,34 +498,54 @@ struct hostif_mic_failure_confirm_t {
#define TX_RATE_FIXED 5
/* 11b rate */
-#define TX_RATE_1M (uint8_t)(10 / 5) /* 11b 11g basic rate */
-#define TX_RATE_2M (uint8_t)(20 / 5) /* 11b 11g basic rate */
-#define TX_RATE_5M (uint8_t)(55 / 5) /* 11g basic rate */
-#define TX_RATE_11M (uint8_t)(110 / 5) /* 11g basic rate */
+#define TX_RATE_1M (u8)(10 / 5) /* 11b 11g basic rate */
+#define TX_RATE_2M (u8)(20 / 5) /* 11b 11g basic rate */
+#define TX_RATE_5M (u8)(55 / 5) /* 11g basic rate */
+#define TX_RATE_11M (u8)(110 / 5) /* 11g basic rate */
/* 11g rate */
-#define TX_RATE_6M (uint8_t)(60 / 5) /* 11g basic rate */
-#define TX_RATE_12M (uint8_t)(120 / 5) /* 11g basic rate */
-#define TX_RATE_24M (uint8_t)(240 / 5) /* 11g basic rate */
-#define TX_RATE_9M (uint8_t)(90 / 5)
-#define TX_RATE_18M (uint8_t)(180 / 5)
-#define TX_RATE_36M (uint8_t)(360 / 5)
-#define TX_RATE_48M (uint8_t)(480 / 5)
-#define TX_RATE_54M (uint8_t)(540 / 5)
-
-#define IS_11B_RATE(A) (((A & RATE_MASK) == TX_RATE_1M) || ((A & RATE_MASK) == TX_RATE_2M) || \
- ((A & RATE_MASK) == TX_RATE_5M) || ((A & RATE_MASK) == TX_RATE_11M))
+#define TX_RATE_6M (u8)(60 / 5) /* 11g basic rate */
+#define TX_RATE_12M (u8)(120 / 5) /* 11g basic rate */
+#define TX_RATE_24M (u8)(240 / 5) /* 11g basic rate */
+#define TX_RATE_9M (u8)(90 / 5)
+#define TX_RATE_18M (u8)(180 / 5)
+#define TX_RATE_36M (u8)(360 / 5)
+#define TX_RATE_48M (u8)(480 / 5)
+#define TX_RATE_54M (u8)(540 / 5)
+
+static inline bool is_11b_rate(u8 rate)
+{
+ return (((rate & RATE_MASK) == TX_RATE_1M) ||
+ ((rate & RATE_MASK) == TX_RATE_2M) ||
+ ((rate & RATE_MASK) == TX_RATE_5M) ||
+ ((rate & RATE_MASK) == TX_RATE_11M));
+}
-#define IS_OFDM_RATE(A) (((A & RATE_MASK) == TX_RATE_6M) || ((A & RATE_MASK) == TX_RATE_12M) || \
- ((A & RATE_MASK) == TX_RATE_24M) || ((A & RATE_MASK) == TX_RATE_9M) || \
- ((A & RATE_MASK) == TX_RATE_18M) || ((A & RATE_MASK) == TX_RATE_36M) || \
- ((A & RATE_MASK) == TX_RATE_48M) || ((A & RATE_MASK) == TX_RATE_54M))
+static inline bool is_ofdm_rate(u8 rate)
+{
+ return (((rate & RATE_MASK) == TX_RATE_6M) ||
+ ((rate & RATE_MASK) == TX_RATE_12M) ||
+ ((rate & RATE_MASK) == TX_RATE_24M) ||
+ ((rate & RATE_MASK) == TX_RATE_9M) ||
+ ((rate & RATE_MASK) == TX_RATE_18M) ||
+ ((rate & RATE_MASK) == TX_RATE_36M) ||
+ ((rate & RATE_MASK) == TX_RATE_48M) ||
+ ((rate & RATE_MASK) == TX_RATE_54M));
+}
-#define IS_11BG_RATE(A) (IS_11B_RATE(A) || IS_OFDM_RATE(A))
+static inline bool is_11bg_rate(u8 rate)
+{
+ return (is_11b_rate(rate) || is_ofdm_rate(rate));
+}
-#define IS_OFDM_EXT_RATE(A) (((A & RATE_MASK) == TX_RATE_9M) || ((A & RATE_MASK) == TX_RATE_18M) || \
- ((A & RATE_MASK) == TX_RATE_36M) || ((A & RATE_MASK) == TX_RATE_48M) || \
- ((A & RATE_MASK) == TX_RATE_54M))
+static inline bool is_ofdm_ext_rate(u8 rate)
+{
+ return (((rate & RATE_MASK) == TX_RATE_9M) ||
+ ((rate & RATE_MASK) == TX_RATE_18M) ||
+ ((rate & RATE_MASK) == TX_RATE_36M) ||
+ ((rate & RATE_MASK) == TX_RATE_48M) ||
+ ((rate & RATE_MASK) == TX_RATE_54M));
+}
enum connect_status_type {
CONNECT_STATUS,
@@ -588,19 +565,25 @@ enum multicast_filter_type {
#define NIC_MAX_MCAST_LIST 32
-/* macro function */
#define HIF_EVENT_MASK 0xE800
-#define IS_HIF_IND(_EVENT) ((_EVENT & HIF_EVENT_MASK) == 0xE800 && \
- ((_EVENT & ~HIF_EVENT_MASK) == 0x0001 || \
- (_EVENT & ~HIF_EVENT_MASK) == 0x0006 || \
- (_EVENT & ~HIF_EVENT_MASK) == 0x000C || \
- (_EVENT & ~HIF_EVENT_MASK) == 0x0011 || \
- (_EVENT & ~HIF_EVENT_MASK) == 0x0012))
-
-#define IS_HIF_CONF(_EVENT) ((_EVENT & HIF_EVENT_MASK) == 0xE800 && \
- (_EVENT & ~HIF_EVENT_MASK) > 0x0000 && \
- (_EVENT & ~HIF_EVENT_MASK) < 0x0012 && \
- !IS_HIF_IND(_EVENT))
+
+static inline bool is_hif_ind(unsigned short event)
+{
+ return (((event & HIF_EVENT_MASK) == HIF_EVENT_MASK) &&
+ (((event & ~HIF_EVENT_MASK) == 0x0001) ||
+ ((event & ~HIF_EVENT_MASK) == 0x0006) ||
+ ((event & ~HIF_EVENT_MASK) == 0x000C) ||
+ ((event & ~HIF_EVENT_MASK) == 0x0011) ||
+ ((event & ~HIF_EVENT_MASK) == 0x0012)));
+}
+
+static inline bool is_hif_conf(unsigned short event)
+{
+ return (((event & HIF_EVENT_MASK) == HIF_EVENT_MASK) &&
+ ((event & ~HIF_EVENT_MASK) > 0x0000) &&
+ ((event & ~HIF_EVENT_MASK) < 0x0012) &&
+ !is_hif_ind(event));
+}
#ifdef __KERNEL__
@@ -610,7 +593,7 @@ enum multicast_filter_type {
int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb);
void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
unsigned int size);
-void hostif_sme_enqueue(struct ks_wlan_private *priv, uint16_t event);
+void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event);
int hostif_init(struct ks_wlan_private *priv);
void hostif_exit(struct ks_wlan_private *priv);
int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
@@ -622,19 +605,11 @@ void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb);
void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv);
int ks_wlan_hw_power_save(struct ks_wlan_private *priv);
-static
-inline int hif_align_size(int size)
+#define KS7010_SIZE_ALIGNMENT 32
+
+static inline size_t hif_align_size(size_t size)
{
-#ifdef KS_ATOM
- if (size < 1024)
- size = 1024;
-#endif
-#ifdef DEVICE_ALIGNMENT
- return (size % DEVICE_ALIGNMENT) ? size + DEVICE_ALIGNMENT -
- (size % DEVICE_ALIGNMENT) : size;
-#else
- return size;
-#endif
+ return ALIGN(size, KS7010_SIZE_ALIGNMENT);
}
#endif /* __KERNEL__ */
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
index 1b7036c32d1c..7aaf8d780939 100644
--- a/drivers/staging/ks7010/ks_wlan.h
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -1,60 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for KeyStream IEEE802.11 b/g wireless LAN cards.
*
* Copyright (C) 2006-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _KS_WLAN_H
#define _KS_WLAN_H
-#define WPS
-
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <linux/spinlock.h> /* spinlock_t */
-#include <linux/sched.h> /* wait_queue_head_t */
-#include <linux/types.h> /* pid_t */
-#include <linux/netdevice.h> /* struct net_device_stats, struct sk_buff */
-#include <linux/etherdevice.h>
+#include <linux/atomic.h>
+#include <linux/circ_buf.h>
+#include <linux/completion.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
#include <linux/wireless.h>
-#include <linux/atomic.h> /* struct atomic_t */
-#include <linux/timer.h> /* struct timer_list */
-#include <linux/string.h>
-#include <linux/completion.h> /* struct completion */
-#include <linux/workqueue.h>
-
-#include <linux/io.h>
-
-#include "ks7010_sdio.h"
struct ks_wlan_parameter {
- u8 operation_mode; /* Operation Mode */
- u8 channel; /* Channel */
- u8 tx_rate; /* Transmit Rate */
+ u8 operation_mode;
+ u8 channel;
+ u8 tx_rate;
struct {
u8 size;
u8 body[16];
} rate_set;
- u8 bssid[ETH_ALEN]; /* BSSID */
+ u8 bssid[ETH_ALEN];
struct {
u8 size;
u8 body[32 + 1];
- } ssid; /* SSID */
- u8 preamble; /* Preamble */
+ } ssid;
+ u8 preamble;
u8 power_mgmt;
- u32 scan_type; /* AP List Scan Type */
-#define BEACON_LOST_COUNT_MIN 0
+ u32 scan_type;
#define BEACON_LOST_COUNT_MAX 65535
- u32 beacon_lost_count; /* Beacon Lost Count */
- u32 rts; /* RTS Threashold */
- u32 fragment; /* Fragmentation Threshold */
+ u32 beacon_lost_count;
+ u32 rts;
+ u32 fragment;
u32 privacy_invoked;
u32 wep_index;
struct {
@@ -62,9 +44,9 @@ struct ks_wlan_parameter {
u8 val[13 * 2 + 1];
} wep_key[4];
u16 authenticate_type;
- u16 phy_type; /* 11b/11g/11bg mode type */
- u16 cts_mode; /* for 11g/11bg mode cts mode */
- u16 phy_info_timer; /* phy information timer */
+ u16 phy_type;
+ u16 cts_mode;
+ u16 phy_info_timer;
};
enum {
@@ -133,10 +115,8 @@ enum {
SME_RSN_AUTH_REQUEST,
SME_RSN_ENABLED_REQUEST,
SME_RSN_MODE_REQUEST,
-#ifdef WPS
SME_WPS_ENABLE_REQUEST,
SME_WPS_PROBE_REQUEST,
-#endif
SME_SET_GAIN,
SME_GET_GAIN,
SME_SLEEP_REQUEST,
@@ -174,7 +154,7 @@ enum {
SME_WEP_SET_CONFIRM,
SME_TERMINATE,
- SME_EVENT_SIZE /* end */
+ SME_EVENT_SIZE
};
/* SME Status */
@@ -196,29 +176,30 @@ struct sme_info {
unsigned long sme_flag;
};
-struct hostt_t {
+struct hostt {
int buff[SME_EVENT_BUFF_SIZE];
unsigned int qhead;
unsigned int qtail;
};
#define RSN_IE_BODY_MAX 64
-struct rsn_ie_t {
+struct rsn_ie {
u8 id; /* 0xdd = WPA or 0x30 = RSN */
u8 size; /* max ? 255 ? */
u8 body[RSN_IE_BODY_MAX];
} __packed;
-#ifdef WPS
+#define WPA_INFO_ELEM_ID 0xdd
+#define RSN_INFO_ELEM_ID 0x30
+
#define WPS_IE_BODY_MAX 255
-struct wps_ie_t {
+struct wps_ie {
u8 id; /* 221 'dd <len> 00 50 F2 04' */
u8 size; /* max ? 255 ? */
u8 body[WPS_IE_BODY_MAX];
} __packed;
-#endif /* WPS */
-struct local_ap_t {
+struct local_ap {
u8 bssid[6];
u8 rssi;
u8 sq;
@@ -235,28 +216,26 @@ struct local_ap_t {
u16 capability;
u8 channel;
u8 noise;
- struct rsn_ie_t wpa_ie;
- struct rsn_ie_t rsn_ie;
-#ifdef WPS
- struct wps_ie_t wps_ie;
-#endif /* WPS */
+ struct rsn_ie wpa_ie;
+ struct rsn_ie rsn_ie;
+ struct wps_ie wps_ie;
};
#define LOCAL_APLIST_MAX 31
#define LOCAL_CURRENT_AP LOCAL_APLIST_MAX
-struct local_aplist_t {
+struct local_aplist {
int size;
- struct local_ap_t ap[LOCAL_APLIST_MAX + 1];
+ struct local_ap ap[LOCAL_APLIST_MAX + 1];
};
-struct local_gain_t {
+struct local_gain {
u8 tx_mode;
u8 rx_mode;
u8 tx_gain;
u8 rx_gain;
};
-struct local_eeprom_sum_t {
+struct local_eeprom_sum {
u8 type;
u8 result;
};
@@ -278,33 +257,25 @@ enum {
PS_WAKEUP
};
-struct power_save_status_t {
+struct power_save_status {
atomic_t status; /* initialvalue 0 */
struct completion wakeup_wait;
atomic_t confirm_wait;
atomic_t snooze_guard;
};
-struct sleep_status_t {
+struct sleep_status {
atomic_t status; /* initialvalue 0 */
atomic_t doze_request;
atomic_t wakeup_request;
};
/* WPA */
-struct scan_ext_t {
+struct scan_ext {
unsigned int flag;
char ssid[IW_ESSID_MAX_SIZE + 1];
};
-enum {
- CIPHER_NONE,
- CIPHER_WEP40,
- CIPHER_TKIP,
- CIPHER_CCMP,
- CIPHER_WEP104
-};
-
#define CIPHER_ID_WPA_NONE "\x00\x50\xf2\x00"
#define CIPHER_ID_WPA_WEP40 "\x00\x50\xf2\x01"
#define CIPHER_ID_WPA_TKIP "\x00\x50\xf2\x02"
@@ -339,7 +310,7 @@ enum {
#define MIC_KEY_SIZE 8
-struct wpa_key_t {
+struct wpa_key {
u32 ext_flags; /* IW_ENCODE_EXT_xxx */
u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
@@ -357,50 +328,111 @@ struct wpa_key_t {
#define WPA_KEY_INDEX_MAX 4
#define WPA_RX_SEQ_LEN 6
-struct mic_failure_t {
+struct mic_failure {
u16 failure; /* MIC Failure counter 0 or 1 or 2 */
u16 counter; /* 1sec counter 0-60 */
u32 last_failure_time;
- int stop; /* stop flag */
+ int stop;
};
-struct wpa_status_t {
+struct wpa_status {
int wpa_enabled;
- unsigned int rsn_enabled;
+ bool rsn_enabled;
int version;
int pairwise_suite; /* unicast cipher */
int group_suite; /* multicast cipher */
- int key_mgmt_suite; /* authentication key management suite */
+ int key_mgmt_suite;
int auth_alg;
int txkey;
- struct wpa_key_t key[WPA_KEY_INDEX_MAX];
- struct scan_ext_t scan_ext;
- struct mic_failure_t mic_failure;
+ struct wpa_key key[WPA_KEY_INDEX_MAX];
+ struct scan_ext scan_ext;
+ struct mic_failure mic_failure;
};
#include <linux/list.h>
#define PMK_LIST_MAX 8
-struct pmk_list_t {
+struct pmk_list {
u16 size;
struct list_head head;
- struct pmk_t {
+ struct pmk {
struct list_head list;
u8 bssid[ETH_ALEN];
u8 pmkid[IW_PMKID_LEN];
} pmk[PMK_LIST_MAX];
};
-#ifdef WPS
-struct wps_status_t {
+struct wps_status {
int wps_enabled;
int ielen;
u8 ie[255];
};
-#endif /* WPS */
+
+/* Tx Device struct */
+#define TX_DEVICE_BUFF_SIZE 1024
+
+struct ks_wlan_private;
+
+/**
+ * struct tx_device_buffer - Queue item for the tx queue.
+ * @sendp: Pointer to the send request data.
+ * @size: Size of @sendp data.
+ * @complete_handler: Function called once data write to device is complete.
+ * @arg1: First argument to @complete_handler.
+ * @arg2: Second argument to @complete_handler.
+ */
+struct tx_device_buffer {
+ unsigned char *sendp;
+ unsigned int size;
+ void (*complete_handler)(struct ks_wlan_private *priv,
+ struct sk_buff *skb);
+ struct sk_buff *skb;
+};
+
+/**
+ * struct tx_device - Tx buffer queue.
+ * @tx_device_buffer: Queue buffer.
+ * @qhead: Head of tx queue.
+ * @qtail: Tail of tx queue.
+ * @tx_dev_lock: Queue lock.
+ */
+struct tx_device {
+ struct tx_device_buffer tx_dev_buff[TX_DEVICE_BUFF_SIZE];
+ unsigned int qhead;
+ unsigned int qtail;
+ spinlock_t tx_dev_lock; /* protect access to the queue */
+};
+
+/* Rx Device struct */
+#define RX_DATA_SIZE (2 + 2 + 2347 + 1)
+#define RX_DEVICE_BUFF_SIZE 32
+
+/**
+ * struct rx_device_buffer - Queue item for the rx queue.
+ * @data: rx data.
+ * @size: Size of @data.
+ */
+struct rx_device_buffer {
+ unsigned char data[RX_DATA_SIZE];
+ unsigned int size;
+};
+
+/**
+ * struct rx_device - Rx buffer queue.
+ * @rx_device_buffer: Queue buffer.
+ * @qhead: Head of rx queue.
+ * @qtail: Tail of rx queue.
+ * @rx_dev_lock: Queue lock.
+ */
+struct rx_device {
+ struct rx_device_buffer rx_dev_buff[RX_DEVICE_BUFF_SIZE];
+ unsigned int qhead;
+ unsigned int qtail;
+ spinlock_t rx_dev_lock; /* protect access to the queue */
+};
struct ks_wlan_private {
/* hardware information */
- struct ks_sdio_card *ks_sdio_card;
+ void *if_hw;
struct workqueue_struct *wq;
struct delayed_work rw_dwork;
struct tasklet_struct rx_bh_task;
@@ -423,12 +455,12 @@ struct ks_wlan_private {
unsigned char eth_addr[ETH_ALEN];
- struct local_aplist_t aplist;
- struct local_ap_t current_ap;
- struct power_save_status_t psstatus;
- struct sleep_status_t sleepstatus;
- struct wpa_status_t wpa;
- struct pmk_list_t pmklist;
+ struct local_aplist aplist;
+ struct local_ap current_ap;
+ struct power_save_status psstatus;
+ struct sleep_status sleepstatus;
+ struct wpa_status wpa;
+ struct pmk_list pmklist;
/* wireless parameter */
struct ks_wlan_parameter reg;
u8 current_rate;
@@ -443,7 +475,7 @@ struct ks_wlan_private {
unsigned int need_commit; /* for ioctl */
/* DeviceIoControl */
- int device_open_status;
+ bool is_device_open;
atomic_t event_count;
atomic_t rec_count;
int dev_count;
@@ -455,42 +487,78 @@ struct ks_wlan_private {
unsigned char firmware_version[128 + 1];
int version_size;
- bool mac_address_valid; /* Mac Address Status */
+ bool mac_address_valid;
int dev_state;
struct sk_buff *skb;
unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
- /* spinlock_t lock; */
#define FORCE_DISCONNECT 0x80000000
#define CONNECT_STATUS_MASK 0x7FFFFFFF
- u32 connect_status; /* connect status */
- int infra_status; /* Infractructure status */
-
- u8 data_buff[0x1000];
-
+ u32 connect_status;
+ int infra_status;
u8 scan_ssid_len;
u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
- struct local_gain_t gain;
-#ifdef WPS
- struct net_device *l2_dev;
- int l2_fd;
- struct wps_status_t wps;
-#endif /* WPS */
+ struct local_gain gain;
+ struct wps_status wps;
u8 sleep_mode;
u8 region;
- struct local_eeprom_sum_t eeprom_sum;
+ struct local_eeprom_sum eeprom_sum;
u8 eeprom_checksum;
- struct hostt_t hostt;
+ struct hostt hostt;
unsigned long last_doze;
unsigned long last_wakeup;
- uint wakeup_count; /* for detect wakeup loop */
+ unsigned int wakeup_count; /* for detect wakeup loop */
};
+static inline void inc_txqhead(struct ks_wlan_private *priv)
+{
+ priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE;
+}
+
+static inline void inc_txqtail(struct ks_wlan_private *priv)
+{
+ priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE;
+}
+
+static inline bool txq_has_space(struct ks_wlan_private *priv)
+{
+ return (CIRC_SPACE(priv->tx_dev.qhead, priv->tx_dev.qtail,
+ TX_DEVICE_BUFF_SIZE) > 0);
+}
+
+static inline void inc_rxqhead(struct ks_wlan_private *priv)
+{
+ priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE;
+}
+
+static inline void inc_rxqtail(struct ks_wlan_private *priv)
+{
+ priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE;
+}
+
+static inline bool rxq_has_space(struct ks_wlan_private *priv)
+{
+ return (CIRC_SPACE(priv->rx_dev.qhead, priv->rx_dev.qtail,
+ RX_DEVICE_BUFF_SIZE) > 0);
+}
+
+static inline unsigned int txq_count(struct ks_wlan_private *priv)
+{
+ return CIRC_CNT_TO_END(priv->tx_dev.qhead, priv->tx_dev.qtail,
+ TX_DEVICE_BUFF_SIZE);
+}
+
+static inline unsigned int rxq_count(struct ks_wlan_private *priv)
+{
+ return CIRC_CNT_TO_END(priv->rx_dev.qhead, priv->rx_dev.qtail,
+ RX_DEVICE_BUFF_SIZE);
+}
+
int ks_wlan_net_start(struct net_device *dev);
int ks_wlan_net_stop(struct net_device *dev);
bool is_connect_status(u32 status);
diff --git a/drivers/staging/ks7010/ks_wlan_ioctl.h b/drivers/staging/ks7010/ks_wlan_ioctl.h
index 121e7cb808a2..97c7d95de411 100644
--- a/drivers/staging/ks7010/ks_wlan_ioctl.h
+++ b/drivers/staging/ks7010/ks_wlan_ioctl.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for KeyStream 11b/g wireless LAN
*
* Copyright (c) 2005-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _KS_WLAN_IOCTL_H
@@ -19,11 +16,9 @@
/* former KS_WLAN_GET_DRIVER_VERSION (SIOCIWFIRSTPRIV + 1) */
/* (SIOCIWFIRSTPRIV + 2) */
#define KS_WLAN_GET_FIRM_VERSION (SIOCIWFIRSTPRIV + 3)
-#ifdef WPS
#define KS_WLAN_SET_WPS_ENABLE (SIOCIWFIRSTPRIV + 4)
#define KS_WLAN_GET_WPS_ENABLE (SIOCIWFIRSTPRIV + 5)
#define KS_WLAN_SET_WPS_PROBE_REQ (SIOCIWFIRSTPRIV + 6)
-#endif
#define KS_WLAN_GET_EEPROM_CKSUM (SIOCIWFIRSTPRIV + 7)
#define KS_WLAN_SET_PREAMBLE (SIOCIWFIRSTPRIV + 8)
#define KS_WLAN_GET_PREAMBLE (SIOCIWFIRSTPRIV + 9)
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 9078e13b0d4a..dc5459ae0b51 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -1,31 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for KeyStream 11b/g wireless LAN
*
* Copyright (C) 2005-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/compiler.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-#include <linux/rtnetlink.h>
-#include <linux/delay.h>
+#include <linux/atomic.h>
#include <linux/completion.h>
-#include <linux/mii.h>
-#include <linux/pci.h>
-#include <linux/ctype.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
#include <linux/timer.h>
-#include <linux/atomic.h>
-#include <linux/io.h>
#include <linux/uaccess.h>
static int wep_on_off;
@@ -43,7 +28,8 @@ static int wep_on_off;
#include <net/iw_handler.h> /* New driver API */
/* Frequency list (map channels to frequencies) */
-static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+static const long frequency_list[] = {
+ 2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484
};
@@ -55,17 +41,6 @@ struct wep_key {
u8 key[16]; /* 40-bit and 104-bit keys */
};
-/* Backward compatibility */
-#ifndef IW_ENCODE_NOKEY
-#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */
-#define IW_ENCODE_MODE (IW_ENCODE_DISABLED | IW_ENCODE_RESTRICTED | IW_ENCODE_OPEN)
-#endif /* IW_ENCODE_NOKEY */
-
-/* List of Wireless Handlers (new API) */
-static const struct iw_handler_def ks_wlan_handler_def;
-
-#define KSC_OPNOTSUPP /* Operation Not Support */
-
/*
* function prototypes
*/
@@ -73,7 +48,7 @@ static int ks_wlan_open(struct net_device *dev);
static void ks_wlan_tx_timeout(struct net_device *dev);
static int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
static int ks_wlan_close(struct net_device *dev);
-static void ks_wlan_set_multicast_list(struct net_device *dev);
+static void ks_wlan_set_rx_mode(struct net_device *dev);
static struct net_device_stats *ks_wlan_get_stats(struct net_device *dev);
static int ks_wlan_set_mac_address(struct net_device *dev, void *addr);
static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
@@ -172,7 +147,8 @@ int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
*/
static int ks_wlan_get_name(struct net_device *dev,
- struct iw_request_info *info, char *cwrq,
+ struct iw_request_info *info,
+ union iwreq_data *cwrq,
char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -182,20 +158,20 @@ static int ks_wlan_get_name(struct net_device *dev,
/* for SLEEP MODE */
if (priv->dev_state < DEVICE_STATE_READY)
- strcpy(cwrq, "NOT READY!");
+ strcpy(cwrq->name, "NOT READY!");
else if (priv->reg.phy_type == D_11B_ONLY_MODE)
- strcpy(cwrq, "IEEE 802.11b");
+ strcpy(cwrq->name, "IEEE 802.11b");
else if (priv->reg.phy_type == D_11G_ONLY_MODE)
- strcpy(cwrq, "IEEE 802.11g");
+ strcpy(cwrq->name, "IEEE 802.11g");
else
- strcpy(cwrq, "IEEE 802.11b/g");
+ strcpy(cwrq->name, "IEEE 802.11b/g");
return 0;
}
static int ks_wlan_set_freq(struct net_device *dev,
- struct iw_request_info *info, struct iw_freq *fwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *fwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
int channel;
@@ -205,28 +181,28 @@ static int ks_wlan_set_freq(struct net_device *dev,
/* for SLEEP MODE */
/* If setting by frequency, convert to a channel */
- if ((fwrq->e == 1) &&
- (fwrq->m >= (int)2.412e8) && (fwrq->m <= (int)2.487e8)) {
- int f = fwrq->m / 100000;
+ if ((fwrq->freq.e == 1) &&
+ (fwrq->freq.m >= (int)2.412e8) && (fwrq->freq.m <= (int)2.487e8)) {
+ int f = fwrq->freq.m / 100000;
int c = 0;
while ((c < 14) && (f != frequency_list[c]))
c++;
/* Hack to fall through... */
- fwrq->e = 0;
- fwrq->m = c + 1;
+ fwrq->freq.e = 0;
+ fwrq->freq.m = c + 1;
}
/* Setting by channel number */
- if ((fwrq->m > 1000) || (fwrq->e > 0))
+ if ((fwrq->freq.m > 1000) || (fwrq->freq.e > 0))
return -EOPNOTSUPP;
- channel = fwrq->m;
+ channel = fwrq->freq.m;
/* We should do a better check than that,
* based on the card capability !!!
*/
if ((channel < 1) || (channel > 14)) {
netdev_dbg(dev, "%s: New channel value of %d is invalid!\n",
- dev->name, fwrq->m);
+ dev->name, fwrq->freq.m);
return -EINVAL;
}
@@ -238,8 +214,8 @@ static int ks_wlan_set_freq(struct net_device *dev,
}
static int ks_wlan_get_freq(struct net_device *dev,
- struct iw_request_info *info, struct iw_freq *fwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *fwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
int f;
@@ -253,15 +229,15 @@ static int ks_wlan_get_freq(struct net_device *dev,
else
f = (int)priv->reg.channel;
- fwrq->m = frequency_list[f - 1] * 100000;
- fwrq->e = 1;
+ fwrq->freq.m = frequency_list[f - 1] * 100000;
+ fwrq->freq.e = 1;
return 0;
}
static int ks_wlan_set_essid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
size_t len;
@@ -271,12 +247,12 @@ static int ks_wlan_set_essid(struct net_device *dev,
/* for SLEEP MODE */
/* Check if we asked for `any' */
- if (!dwrq->flags) {
+ if (!dwrq->essid.flags) {
/* Just send an empty SSID list */
memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
priv->reg.ssid.size = 0;
} else {
- len = dwrq->length;
+ len = dwrq->essid.length;
/* iwconfig uses nul termination in SSID.. */
if (len > 0 && extra[len - 1] == '\0')
len--;
@@ -300,7 +276,7 @@ static int ks_wlan_set_essid(struct net_device *dev,
static int ks_wlan_get_essid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -318,18 +294,18 @@ static int ks_wlan_get_essid(struct net_device *dev,
/* If none, we may want to get the one that was set */
/* Push it out ! */
- dwrq->length = priv->reg.ssid.size;
- dwrq->flags = 1; /* active */
+ dwrq->essid.length = priv->reg.ssid.size;
+ dwrq->essid.flags = 1; /* active */
} else {
- dwrq->length = 0;
- dwrq->flags = 0; /* ANY */
+ dwrq->essid.length = 0;
+ dwrq->essid.flags = 0; /* ANY */
}
return 0;
}
static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra)
+ union iwreq_data *awrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -337,18 +313,16 @@ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
return -EPERM;
/* for SLEEP MODE */
- if (priv->reg.operation_mode == MODE_ADHOC ||
- priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
- memcpy(priv->reg.bssid, &ap_addr->sa_data, ETH_ALEN);
-
- if (is_valid_ether_addr((u8 *)priv->reg.bssid))
- priv->need_commit |= SME_MODE_SET;
-
- } else {
+ if (priv->reg.operation_mode != MODE_ADHOC &&
+ priv->reg.operation_mode != MODE_INFRASTRUCTURE) {
eth_zero_addr(priv->reg.bssid);
return -EOPNOTSUPP;
}
+ ether_addr_copy(priv->reg.bssid, awrq->ap_addr.sa_data);
+ if (is_valid_ether_addr((u8 *)priv->reg.bssid))
+ priv->need_commit |= SME_MODE_SET;
+
netdev_dbg(dev, "bssid = %pM\n", priv->reg.bssid);
/* Write it to the card */
@@ -360,7 +334,7 @@ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
}
static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
- struct sockaddr *awrq, char *extra)
+ union iwreq_data *awrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -369,18 +343,18 @@ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
/* for SLEEP MODE */
if (is_connect_status(priv->connect_status))
- memcpy(awrq->sa_data, priv->current_ap.bssid, ETH_ALEN);
+ ether_addr_copy(awrq->ap_addr.sa_data, priv->current_ap.bssid);
else
- eth_zero_addr(awrq->sa_data);
+ eth_zero_addr(awrq->ap_addr.sa_data);
- awrq->sa_family = ARPHRD_ETHER;
+ awrq->ap_addr.sa_family = ARPHRD_ETHER;
return 0;
}
static int ks_wlan_set_nick(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *dwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -389,18 +363,18 @@ static int ks_wlan_set_nick(struct net_device *dev,
/* for SLEEP MODE */
/* Check the size of the string */
- if (dwrq->length > 16 + 1)
+ if (dwrq->data.length > 16 + 1)
return -E2BIG;
memset(priv->nick, 0, sizeof(priv->nick));
- memcpy(priv->nick, extra, dwrq->length);
+ memcpy(priv->nick, extra, dwrq->data.length);
return -EINPROGRESS; /* Call commit handler */
}
static int ks_wlan_get_nick(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *dwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -410,14 +384,14 @@ static int ks_wlan_get_nick(struct net_device *dev,
/* for SLEEP MODE */
strncpy(extra, priv->nick, 16);
extra[16] = '\0';
- dwrq->length = strlen(extra) + 1;
+ dwrq->data.length = strlen(extra) + 1;
return 0;
}
static int ks_wlan_set_rate(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
int i = 0;
@@ -427,17 +401,17 @@ static int ks_wlan_set_rate(struct net_device *dev,
/* for SLEEP MODE */
if (priv->reg.phy_type == D_11B_ONLY_MODE) {
- if (vwrq->fixed == 1) {
- switch (vwrq->value) {
+ if (vwrq->bitrate.fixed == 1) {
+ switch (vwrq->bitrate.value) {
case 11000000:
case 5500000:
priv->reg.rate_set.body[0] =
- (uint8_t)(vwrq->value / 500000);
+ (u8)(vwrq->bitrate.value / 500000);
break;
case 2000000:
case 1000000:
priv->reg.rate_set.body[0] =
- ((uint8_t)(vwrq->value / 500000)) |
+ ((u8)(vwrq->bitrate.value / 500000)) |
BASIC_RATE;
break;
default:
@@ -446,8 +420,8 @@ static int ks_wlan_set_rate(struct net_device *dev,
priv->reg.tx_rate = TX_RATE_FIXED;
priv->reg.rate_set.size = 1;
} else { /* vwrq->fixed == 0 */
- if (vwrq->value > 0) {
- switch (vwrq->value) {
+ if (vwrq->bitrate.value > 0) {
+ switch (vwrq->bitrate.value) {
case 11000000:
priv->reg.rate_set.body[3] =
TX_RATE_11M;
@@ -484,15 +458,15 @@ static int ks_wlan_set_rate(struct net_device *dev,
}
}
} else { /* D_11B_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
- if (vwrq->fixed == 1) {
- switch (vwrq->value) {
+ if (vwrq->bitrate.fixed == 1) {
+ switch (vwrq->bitrate.value) {
case 54000000:
case 48000000:
case 36000000:
case 18000000:
case 9000000:
priv->reg.rate_set.body[0] =
- (uint8_t)(vwrq->value / 500000);
+ (u8)(vwrq->bitrate.value / 500000);
break;
case 24000000:
case 12000000:
@@ -502,7 +476,7 @@ static int ks_wlan_set_rate(struct net_device *dev,
case 2000000:
case 1000000:
priv->reg.rate_set.body[0] =
- ((uint8_t)(vwrq->value / 500000)) |
+ ((u8)(vwrq->bitrate.value / 500000)) |
BASIC_RATE;
break;
default:
@@ -511,8 +485,8 @@ static int ks_wlan_set_rate(struct net_device *dev,
priv->reg.tx_rate = TX_RATE_FIXED;
priv->reg.rate_set.size = 1;
} else { /* vwrq->fixed == 0 */
- if (vwrq->value > 0) {
- switch (vwrq->value) {
+ if (vwrq->bitrate.value > 0) {
+ switch (vwrq->bitrate.value) {
case 54000000:
priv->reg.rate_set.body[11] =
TX_RATE_54M;
@@ -534,7 +508,7 @@ static int ks_wlan_set_rate(struct net_device *dev,
case 11000000:
case 9000000:
case 6000000:
- if (vwrq->value == 24000000) {
+ if (vwrq->bitrate.value == 24000000) {
priv->reg.rate_set.body[8] =
TX_RATE_18M;
i++;
@@ -553,7 +527,7 @@ static int ks_wlan_set_rate(struct net_device *dev,
priv->reg.rate_set.body[3] =
TX_RATE_11M | BASIC_RATE;
i++;
- } else if (vwrq->value == 18000000) {
+ } else if (vwrq->bitrate.value == 18000000) {
priv->reg.rate_set.body[7] =
TX_RATE_18M;
i++;
@@ -569,7 +543,7 @@ static int ks_wlan_set_rate(struct net_device *dev,
priv->reg.rate_set.body[3] =
TX_RATE_11M | BASIC_RATE;
i++;
- } else if (vwrq->value == 12000000) {
+ } else if (vwrq->bitrate.value == 12000000) {
priv->reg.rate_set.body[6] =
TX_RATE_9M;
i++;
@@ -582,7 +556,7 @@ static int ks_wlan_set_rate(struct net_device *dev,
priv->reg.rate_set.body[3] =
TX_RATE_11M | BASIC_RATE;
i++;
- } else if (vwrq->value == 11000000) {
+ } else if (vwrq->bitrate.value == 11000000) {
priv->reg.rate_set.body[5] =
TX_RATE_9M;
i++;
@@ -592,7 +566,7 @@ static int ks_wlan_set_rate(struct net_device *dev,
priv->reg.rate_set.body[3] =
TX_RATE_11M | BASIC_RATE;
i++;
- } else if (vwrq->value == 9000000) {
+ } else if (vwrq->bitrate.value == 9000000) {
priv->reg.rate_set.body[4] =
TX_RATE_9M;
i++;
@@ -657,8 +631,8 @@ static int ks_wlan_set_rate(struct net_device *dev,
}
static int ks_wlan_get_rate(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -672,26 +646,23 @@ static int ks_wlan_get_rate(struct net_device *dev,
if (!atomic_read(&update_phyinfo))
ks_wlan_update_phy_information(priv);
- vwrq->value = ((priv->current_rate) & RATE_MASK) * 500000;
- if (priv->reg.tx_rate == TX_RATE_FIXED)
- vwrq->fixed = 1;
- else
- vwrq->fixed = 0;
+ vwrq->bitrate.value = ((priv->current_rate) & RATE_MASK) * 500000;
+ vwrq->bitrate.fixed = (priv->reg.tx_rate == TX_RATE_FIXED) ? 1 : 0;
return 0;
}
static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+ union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
- int rthr = vwrq->value;
+ int rthr = vwrq->rts.value;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if (vwrq->disabled)
+ if (vwrq->rts.disabled)
rthr = 2347;
if ((rthr < 0) || (rthr > 2347))
return -EINVAL;
@@ -703,7 +674,7 @@ static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
}
static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+ union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -711,25 +682,25 @@ static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
return -EPERM;
/* for SLEEP MODE */
- vwrq->value = priv->reg.rts;
- vwrq->disabled = (vwrq->value >= 2347);
- vwrq->fixed = 1;
+ vwrq->rts.value = priv->reg.rts;
+ vwrq->rts.disabled = (vwrq->rts.value >= 2347);
+ vwrq->rts.fixed = 1;
return 0;
}
static int ks_wlan_set_frag(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
- int fthr = vwrq->value;
+ int fthr = vwrq->frag.value;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if (vwrq->disabled)
+ if (vwrq->frag.disabled)
fthr = 2346;
if ((fthr < 256) || (fthr > 2346))
return -EINVAL;
@@ -742,8 +713,8 @@ static int ks_wlan_set_frag(struct net_device *dev,
}
static int ks_wlan_get_frag(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -751,134 +722,99 @@ static int ks_wlan_get_frag(struct net_device *dev,
return -EPERM;
/* for SLEEP MODE */
- vwrq->value = priv->reg.fragment;
- vwrq->disabled = (vwrq->value >= 2346);
- vwrq->fixed = 1;
+ vwrq->frag.value = priv->reg.fragment;
+ vwrq->frag.disabled = (vwrq->frag.value >= 2346);
+ vwrq->frag.fixed = 1;
return 0;
}
static int ks_wlan_set_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- /* for SLEEP MODE */
- switch (*uwrq) {
- case IW_MODE_ADHOC:
- priv->reg.operation_mode = MODE_ADHOC;
- priv->need_commit |= SME_MODE_SET;
- break;
- case IW_MODE_INFRA:
- priv->reg.operation_mode = MODE_INFRASTRUCTURE;
- priv->need_commit |= SME_MODE_SET;
- break;
- case IW_MODE_AUTO:
- case IW_MODE_MASTER:
- case IW_MODE_REPEAT:
- case IW_MODE_SECOND:
- case IW_MODE_MONITOR:
- default:
+ if (uwrq->mode != IW_MODE_ADHOC &&
+ uwrq->mode != IW_MODE_INFRA)
return -EINVAL;
- }
+
+ priv->reg.operation_mode = (uwrq->mode == IW_MODE_ADHOC) ?
+ MODE_ADHOC : MODE_INFRASTRUCTURE;
+ priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
static int ks_wlan_get_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *uwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- /* for SLEEP MODE */
/* If not managed, assume it's ad-hoc */
- switch (priv->reg.operation_mode) {
- case MODE_INFRASTRUCTURE:
- *uwrq = IW_MODE_INFRA;
- break;
- case MODE_ADHOC:
- *uwrq = IW_MODE_ADHOC;
- break;
- default:
- *uwrq = IW_MODE_ADHOC;
- }
+ uwrq->mode = (priv->reg.operation_mode == MODE_INFRASTRUCTURE) ?
+ IW_MODE_INFRA : IW_MODE_ADHOC;
return 0;
}
static int ks_wlan_set_encode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
-
+ struct iw_point *enc = &dwrq->encoding;
struct wep_key key;
- int index = (dwrq->flags & IW_ENCODE_INDEX);
- int current_index = priv->reg.wep_index;
- int i;
+ int index = (enc->flags & IW_ENCODE_INDEX);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
+ if (enc->length > MAX_KEY_SIZE)
+ return -EINVAL;
+
/* for SLEEP MODE */
- /* index check */
if ((index < 0) || (index > 4))
return -EINVAL;
- else if (index == 0)
- index = current_index;
- else
- index--;
+
+ index = (index == 0) ? priv->reg.wep_index : (index - 1);
/* Is WEP supported ? */
/* Basic checking: do we have a key to set ? */
- if (dwrq->length > 0) {
- if (dwrq->length > MAX_KEY_SIZE) { /* Check the size of the key */
- return -EINVAL;
- }
- if (dwrq->length > MIN_KEY_SIZE) { /* Set the length */
- key.len = MAX_KEY_SIZE;
- priv->reg.privacy_invoked = 0x01;
- priv->need_commit |= SME_WEP_FLAG;
- wep_on_off = WEP_ON_128BIT;
- } else {
- if (dwrq->length > 0) {
- key.len = MIN_KEY_SIZE;
- priv->reg.privacy_invoked = 0x01;
- priv->need_commit |= SME_WEP_FLAG;
- wep_on_off = WEP_ON_64BIT;
- } else { /* Disable the key */
- key.len = 0;
- }
- }
+ if (enc->length > 0) {
+ key.len = (enc->length > MIN_KEY_SIZE) ?
+ MAX_KEY_SIZE : MIN_KEY_SIZE;
+ priv->reg.privacy_invoked = 0x01;
+ priv->need_commit |= SME_WEP_FLAG;
+ wep_on_off = (enc->length > MIN_KEY_SIZE) ?
+ WEP_ON_128BIT : WEP_ON_64BIT;
/* Check if the key is not marked as invalid */
- if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
- /* Cleanup */
- memset(key.key, 0, MAX_KEY_SIZE);
- /* Copy the key in the driver */
- if (copy_from_user
- (key.key, dwrq->pointer, dwrq->length)) {
- key.len = 0;
- return -EFAULT;
- }
- /* Send the key to the card */
- priv->reg.wep_key[index].size = key.len;
- for (i = 0; i < (priv->reg.wep_key[index].size); i++)
- priv->reg.wep_key[index].val[i] = key.key[i];
+ if (enc->flags & IW_ENCODE_NOKEY)
+ return 0;
- priv->need_commit |= (SME_WEP_VAL1 << index);
- priv->reg.wep_index = index;
- priv->need_commit |= SME_WEP_INDEX;
+ /* Cleanup */
+ memset(key.key, 0, MAX_KEY_SIZE);
+ /* Copy the key in the driver */
+ if (copy_from_user(key.key, enc->pointer, enc->length)) {
+ key.len = 0;
+ return -EFAULT;
}
+ /* Send the key to the card */
+ priv->reg.wep_key[index].size = key.len;
+ memcpy(&priv->reg.wep_key[index].val[0], &key.key[0],
+ priv->reg.wep_key[index].size);
+ priv->need_commit |= (SME_WEP_VAL1 << index);
+ priv->reg.wep_index = index;
+ priv->need_commit |= SME_WEP_INDEX;
} else {
- if (dwrq->flags & IW_ENCODE_DISABLED) {
+ if (enc->flags & IW_ENCODE_DISABLED) {
priv->reg.wep_key[0].size = 0;
priv->reg.wep_key[1].size = 0;
priv->reg.wep_key[2].size = 0;
@@ -891,35 +827,29 @@ static int ks_wlan_set_encode(struct net_device *dev,
wep_on_off = WEP_OFF;
priv->need_commit |= SME_WEP_FLAG;
} else {
- /* Do we want to just set the transmit key index ? */
- if ((index >= 0) && (index < 4)) {
- /* set_wep_key(priv, index, 0, 0, 1); xxx */
- if (priv->reg.wep_key[index].size != 0) {
- priv->reg.wep_index = index;
- priv->need_commit |= SME_WEP_INDEX;
- } else {
- return -EINVAL;
- }
- }
+ /* set_wep_key(priv, index, 0, 0, 1); xxx */
+ if (priv->reg.wep_key[index].size == 0)
+ return -EINVAL;
+ priv->reg.wep_index = index;
+ priv->need_commit |= SME_WEP_INDEX;
}
}
/* Commit the changes if needed */
- if (dwrq->flags & IW_ENCODE_MODE)
+ if (enc->flags & IW_ENCODE_MODE)
priv->need_commit |= SME_WEP_FLAG;
- if (dwrq->flags & IW_ENCODE_OPEN) {
+ if (enc->flags & IW_ENCODE_OPEN) {
if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
priv->need_commit |= SME_MODE_SET;
priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
- } else if (dwrq->flags & IW_ENCODE_RESTRICTED) {
+ } else if (enc->flags & IW_ENCODE_RESTRICTED) {
if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM)
priv->need_commit |= SME_MODE_SET;
priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
}
-// return -EINPROGRESS; /* Call commit handler */
if (priv->need_commit) {
ks_wlan_setup_parameter(priv, priv->need_commit);
priv->need_commit = 0;
@@ -929,104 +859,49 @@ static int ks_wlan_set_encode(struct net_device *dev,
static int ks_wlan_get_encode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
- char zeros[16];
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ struct iw_point *enc = &dwrq->encoding;
+ int index = (enc->flags & IW_ENCODE_INDEX) - 1;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- dwrq->flags = IW_ENCODE_DISABLED;
+ enc->flags = IW_ENCODE_DISABLED;
/* Check encryption mode */
switch (priv->reg.authenticate_type) {
case AUTH_TYPE_OPEN_SYSTEM:
- dwrq->flags = IW_ENCODE_OPEN;
+ enc->flags = IW_ENCODE_OPEN;
break;
case AUTH_TYPE_SHARED_KEY:
- dwrq->flags = IW_ENCODE_RESTRICTED;
+ enc->flags = IW_ENCODE_RESTRICTED;
break;
}
- memset(zeros, 0, sizeof(zeros));
-
/* Which key do we want ? -1 -> tx index */
if ((index < 0) || (index >= 4))
index = priv->reg.wep_index;
if (priv->reg.privacy_invoked) {
- dwrq->flags &= ~IW_ENCODE_DISABLED;
+ enc->flags &= ~IW_ENCODE_DISABLED;
/* dwrq->flags |= IW_ENCODE_NOKEY; */
}
- dwrq->flags |= index + 1;
+ enc->flags |= index + 1;
/* Copy the key to the user buffer */
- if ((index >= 0) && (index < 4))
- dwrq->length = priv->reg.wep_key[index].size;
- if (dwrq->length > 16)
- dwrq->length = 0;
-#if 1 /* IW_ENCODE_NOKEY; */
- if (dwrq->length) {
- if ((index >= 0) && (index < 4))
- memcpy(extra, priv->reg.wep_key[index].val,
- dwrq->length);
- } else {
- memcpy(extra, zeros, dwrq->length);
+ if (index >= 0 && index < 4) {
+ enc->length = (priv->reg.wep_key[index].size <= 16) ?
+ priv->reg.wep_key[index].size : 0;
+ memcpy(extra, priv->reg.wep_key[index].val, enc->length);
}
-#endif
- return 0;
-}
-
-#ifndef KSC_OPNOTSUPP
-static int ks_wlan_set_txpow(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- return -EOPNOTSUPP; /* Not Support */
-}
-
-static int ks_wlan_get_txpow(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- /* Not Support */
- vwrq->value = 0;
- vwrq->disabled = (vwrq->value == 0);
- vwrq->fixed = 1;
return 0;
}
-static int ks_wlan_set_retry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- return -EOPNOTSUPP; /* Not Support */
-}
-
-static int ks_wlan_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* Not Support */
- vwrq->value = 0;
- vwrq->disabled = (vwrq->value == 0);
- vwrq->fixed = 1;
- return 0;
-}
-#endif /* KSC_OPNOTSUPP */
-
static int ks_wlan_get_range(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_range *range = (struct iw_range *)extra;
@@ -1036,7 +911,7 @@ static int ks_wlan_get_range(struct net_device *dev,
return -EPERM;
/* for SLEEP MODE */
- dwrq->length = sizeof(struct iw_range);
+ dwrq->data.length = sizeof(struct iw_range);
memset(range, 0, sizeof(*range));
range->min_nwid = 0x0000;
range->max_nwid = 0x0000;
@@ -1051,7 +926,8 @@ static int ks_wlan_get_range(struct net_device *dev,
range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
}
range->num_frequency = k;
- if (priv->reg.phy_type == D_11B_ONLY_MODE || priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) { /* channel 14 */
+ if (priv->reg.phy_type == D_11B_ONLY_MODE ||
+ priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) { /* channel 14 */
range->freq[13].i = 14; /* List index */
range->freq[13].m = frequency_list[13] * 100000;
range->freq[13].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
@@ -1156,20 +1032,19 @@ static int ks_wlan_get_range(struct net_device *dev,
static int ks_wlan_set_power(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+ union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- if (vwrq->disabled) {
+ if (vwrq->power.disabled) {
priv->reg.power_mgmt = POWER_MGMT_ACTIVE;
} else {
- if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
- priv->reg.power_mgmt = POWER_MGMT_SAVE1;
- else
+ if (priv->reg.operation_mode != MODE_INFRASTRUCTURE)
return -EINVAL;
+ priv->reg.power_mgmt = POWER_MGMT_SAVE1;
}
hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
@@ -1179,76 +1054,50 @@ static int ks_wlan_set_power(struct net_device *dev,
static int ks_wlan_get_power(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+ union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if (priv->reg.power_mgmt > 0)
- vwrq->disabled = 0;
- else
- vwrq->disabled = 1;
+ vwrq->power.disabled = (priv->reg.power_mgmt <= 0);
return 0;
}
static int ks_wlan_get_iwstats(struct net_device *dev,
struct iw_request_info *info,
- struct iw_quality *vwrq, char *extra)
+ union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- vwrq->qual = 0; /* not supported */
- vwrq->level = priv->wstats.qual.level;
- vwrq->noise = 0; /* not supported */
- vwrq->updated = 0;
+ vwrq->qual.qual = 0; /* not supported */
+ vwrq->qual.level = priv->wstats.qual.level;
+ vwrq->qual.noise = 0; /* not supported */
+ vwrq->qual.updated = 0;
return 0;
}
-#ifndef KSC_OPNOTSUPP
-
-static int ks_wlan_set_sens(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq,
- char *extra)
-{
- return -EOPNOTSUPP; /* Not Support */
-}
-
-static int ks_wlan_get_sens(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq,
- char *extra)
-{
- /* Not Support */
- vwrq->value = 0;
- vwrq->disabled = (vwrq->value == 0);
- vwrq->fixed = 1;
- return 0;
-}
-#endif /* KSC_OPNOTSUPP */
-
/* Note : this is deprecated in favor of IWSCAN */
static int ks_wlan_get_aplist(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct sockaddr *address = (struct sockaddr *)extra;
struct iw_quality qual[LOCAL_APLIST_MAX];
-
int i;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
for (i = 0; i < priv->aplist.size; i++) {
- memcpy(address[i].sa_data, &(priv->aplist.ap[i].bssid[0]),
- ETH_ALEN);
+ ether_addr_copy(address[i].sa_data, priv->aplist.ap[i].bssid);
address[i].sa_family = ARPHRD_ETHER;
qual[i].level = 256 - priv->aplist.ap[i].rssi;
qual[i].qual = priv->aplist.ap[i].sq;
@@ -1256,11 +1105,11 @@ static int ks_wlan_get_aplist(struct net_device *dev,
qual[i].updated = 7;
}
if (i) {
- dwrq->flags = 1; /* Should be define'd */
+ dwrq->data.flags = 1; /* Should be define'd */
memcpy(extra + sizeof(struct sockaddr) * i,
&qual, sizeof(struct iw_quality) * i);
}
- dwrq->length = i;
+ dwrq->data.length = i;
return 0;
}
@@ -1294,6 +1143,33 @@ static int ks_wlan_set_scan(struct net_device *dev,
return 0;
}
+static char *ks_wlan_add_leader_event(const char *rsn_leader, char *end_buf,
+ char *current_ev, struct rsn_ie *rsn,
+ struct iw_event *iwe,
+ struct iw_request_info *info)
+{
+ char buffer[RSN_IE_BODY_MAX * 2 + 30];
+ char *pbuf;
+ int i;
+
+ pbuf = &buffer[0];
+ memset(iwe, 0, sizeof(*iwe));
+ iwe->cmd = IWEVCUSTOM;
+ memcpy(buffer, rsn_leader, sizeof(rsn_leader) - 1);
+ iwe->u.data.length += sizeof(rsn_leader) - 1;
+ pbuf += sizeof(rsn_leader) - 1;
+ pbuf += sprintf(pbuf, "%02x", rsn->id);
+ pbuf += sprintf(pbuf, "%02x", rsn->size);
+ iwe->u.data.length += 4;
+
+ for (i = 0; i < rsn->size; i++)
+ pbuf += sprintf(pbuf, "%02x", rsn->body[i]);
+
+ iwe->u.data.length += rsn->size * 2;
+
+ return iwe_stream_add_point(info, current_ev, end_buf, iwe, &buffer[0]);
+}
+
/*
* Translate scan data returned from the card to a card independent
* format that the Wireless Tools will understand - Jean II
@@ -1301,25 +1177,22 @@ static int ks_wlan_set_scan(struct net_device *dev,
static inline char *ks_wlan_translate_scan(struct net_device *dev,
struct iw_request_info *info,
char *current_ev, char *end_buf,
- struct local_ap_t *ap)
+ struct local_ap *ap)
{
/* struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; */
+ static const char rsn_leader[] = "rsn_ie=";
+ static const char wpa_leader[] = "wpa_ie=";
struct iw_event iwe; /* Temporary buffer */
u16 capabilities;
char *current_val; /* For rates */
int i;
- static const char rsn_leader[] = "rsn_ie=";
- static const char wpa_leader[] = "wpa_ie=";
- char buf0[RSN_IE_BODY_MAX * 2 + 30];
- char buf1[RSN_IE_BODY_MAX * 2 + 30];
- char *pbuf;
+
/* First entry *MUST* be the AP MAC address */
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, ap->bssid, ETH_ALEN);
- current_ev =
- iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_ADDR_LEN);
+ ether_addr_copy(iwe.u.ap_addr.sa_data, ap->bssid);
+ current_ev = iwe_stream_add_event(info, current_ev,
+ end_buf, &iwe, IW_EV_ADDR_LEN);
/* Other entries will be displayed in the order we give them */
@@ -1329,21 +1202,17 @@ static inline char *ks_wlan_translate_scan(struct net_device *dev,
iwe.u.data.length = 32;
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
- current_ev =
- iwe_stream_add_point(info, current_ev, end_buf, &iwe,
- ap->ssid.body);
+ current_ev = iwe_stream_add_point(info, current_ev,
+ end_buf, &iwe, ap->ssid.body);
/* Add mode */
iwe.cmd = SIOCGIWMODE;
capabilities = ap->capability;
if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
- if (capabilities & WLAN_CAPABILITY_ESS)
- iwe.u.mode = IW_MODE_INFRA;
- else
- iwe.u.mode = IW_MODE_ADHOC;
- current_ev =
- iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_UINT_LEN);
+ iwe.u.mode = (capabilities & WLAN_CAPABILITY_ESS) ?
+ IW_MODE_INFRA : IW_MODE_ADHOC;
+ current_ev = iwe_stream_add_event(info, current_ev,
+ end_buf, &iwe, IW_EV_UINT_LEN);
}
/* Add frequency */
@@ -1351,32 +1220,29 @@ static inline char *ks_wlan_translate_scan(struct net_device *dev,
iwe.u.freq.m = ap->channel;
iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
iwe.u.freq.e = 1;
- current_ev =
- iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_FREQ_LEN);
+ current_ev = iwe_stream_add_event(info, current_ev,
+ end_buf, &iwe, IW_EV_FREQ_LEN);
/* Add quality statistics */
iwe.cmd = IWEVQUAL;
iwe.u.qual.level = 256 - ap->rssi;
iwe.u.qual.qual = ap->sq;
iwe.u.qual.noise = 0; /* invalid noise value */
- current_ev =
- iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_QUAL_LEN);
+ current_ev = iwe_stream_add_event(info, current_ev, end_buf,
+ &iwe, IW_EV_QUAL_LEN);
/* Add encryption capability */
iwe.cmd = SIOCGIWENCODE;
- if (capabilities & WLAN_CAPABILITY_PRIVACY)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.flags = (capabilities & WLAN_CAPABILITY_PRIVACY) ?
+ (IW_ENCODE_ENABLED | IW_ENCODE_NOKEY) :
+ IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
- current_ev =
- iwe_stream_add_point(info, current_ev, end_buf, &iwe,
- ap->ssid.body);
+ current_ev = iwe_stream_add_point(info, current_ev, end_buf,
+ &iwe, ap->ssid.body);
- /* Rate : stuffing multiple values in a single event require a bit
- * more of magic - Jean II
+ /*
+ * Rate : stuffing multiple values in a single event
+ * require a bit more of magic - Jean II
*/
current_val = current_ev + IW_EV_LCP_LEN;
@@ -1394,71 +1260,34 @@ static inline char *ks_wlan_translate_scan(struct net_device *dev,
/* Bit rate given in 500 kb/s units (+ 0x80) */
iwe.u.bitrate.value = ((ap->rate_set.body[i] & 0x7f) * 500000);
/* Add new value to event */
- current_val =
- iwe_stream_add_value(info, current_ev, current_val, end_buf,
- &iwe, IW_EV_PARAM_LEN);
+ current_val = iwe_stream_add_value(info, current_ev,
+ current_val, end_buf, &iwe,
+ IW_EV_PARAM_LEN);
}
/* Check if we added any event */
if ((current_val - current_ev) > IW_EV_LCP_LEN)
current_ev = current_val;
-#define GENERIC_INFO_ELEM_ID 0xdd
-#define RSN_INFO_ELEM_ID 0x30
- if (ap->rsn_ie.id == RSN_INFO_ELEM_ID && ap->rsn_ie.size != 0) {
- pbuf = &buf0[0];
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- memcpy(buf0, rsn_leader, sizeof(rsn_leader) - 1);
- iwe.u.data.length += sizeof(rsn_leader) - 1;
- pbuf += sizeof(rsn_leader) - 1;
-
- pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.id);
- pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.size);
- iwe.u.data.length += 4;
-
- for (i = 0; i < ap->rsn_ie.size; i++)
- pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.body[i]);
- iwe.u.data.length += (ap->rsn_ie.size) * 2;
-
- netdev_dbg(dev, "ap->rsn.size=%d\n", ap->rsn_ie.size);
-
- current_ev =
- iwe_stream_add_point(info, current_ev, end_buf, &iwe,
- &buf0[0]);
- }
- if (ap->wpa_ie.id == GENERIC_INFO_ELEM_ID && ap->wpa_ie.size != 0) {
- pbuf = &buf1[0];
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- memcpy(buf1, wpa_leader, sizeof(wpa_leader) - 1);
- iwe.u.data.length += sizeof(wpa_leader) - 1;
- pbuf += sizeof(wpa_leader) - 1;
-
- pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.id);
- pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.size);
- iwe.u.data.length += 4;
-
- for (i = 0; i < ap->wpa_ie.size; i++)
- pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.body[i]);
- iwe.u.data.length += (ap->wpa_ie.size) * 2;
-
- netdev_dbg(dev, "ap->rsn.size=%d\n", ap->wpa_ie.size);
- netdev_dbg(dev, "iwe.u.data.length=%d\n", iwe.u.data.length);
-
- current_ev =
- iwe_stream_add_point(info, current_ev, end_buf, &iwe,
- &buf1[0]);
- }
+ if (ap->rsn_ie.id == RSN_INFO_ELEM_ID && ap->rsn_ie.size != 0)
+ current_ev = ks_wlan_add_leader_event(rsn_leader, end_buf,
+ current_ev, &ap->rsn_ie,
+ &iwe, info);
+
+ if (ap->wpa_ie.id == WPA_INFO_ELEM_ID && ap->wpa_ie.size != 0)
+ current_ev = ks_wlan_add_leader_event(wpa_leader, end_buf,
+ current_ev, &ap->wpa_ie,
+ &iwe, info);
- /* The other data in the scan result are not really
+ /*
+ * The other data in the scan result are not really
* interesting, so for now drop it - Jean II
*/
return current_ev;
}
static int ks_wlan_get_scan(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *dwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
int i;
@@ -1479,25 +1308,26 @@ static int ks_wlan_get_scan(struct net_device *dev,
/* Read and parse all entries */
for (i = 0; i < priv->aplist.size; i++) {
- if ((extra + dwrq->length) - current_ev <= IW_EV_ADDR_LEN) {
- dwrq->length = 0;
+ if ((extra + dwrq->data.length) - current_ev <= IW_EV_ADDR_LEN) {
+ dwrq->data.length = 0;
return -E2BIG;
}
/* Translate to WE format this entry */
current_ev = ks_wlan_translate_scan(dev, info, current_ev,
- extra + dwrq->length,
+ extra + dwrq->data.length,
&priv->aplist.ap[i]);
}
/* Length of data */
- dwrq->length = (current_ev - extra);
- dwrq->flags = 0;
+ dwrq->data.length = (current_ev - extra);
+ dwrq->data.flags = 0;
return 0;
}
/* called after a bunch of SET operations */
static int ks_wlan_config_commit(struct net_device *dev,
- struct iw_request_info *info, void *zwrq,
+ struct iw_request_info *info,
+ union iwreq_data *zwrq,
char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -1513,7 +1343,7 @@ static int ks_wlan_config_commit(struct net_device *dev,
/* set association ie params */
static int ks_wlan_set_genie(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -1526,11 +1356,12 @@ static int ks_wlan_set_genie(struct net_device *dev,
static int ks_wlan_set_auth_mode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+ union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
- int index = (vwrq->flags & IW_AUTH_INDEX);
- int value = vwrq->value;
+ struct iw_param *param = &vwrq->param;
+ int index = (param->flags & IW_AUTH_INDEX);
+ int value = param->value;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -1541,14 +1372,14 @@ static int ks_wlan_set_auth_mode(struct net_device *dev,
case IW_AUTH_WPA_VERSION_DISABLED:
priv->wpa.version = value;
if (priv->wpa.rsn_enabled)
- priv->wpa.rsn_enabled = 0;
+ priv->wpa.rsn_enabled = false;
priv->need_commit |= SME_RSN;
break;
case IW_AUTH_WPA_VERSION_WPA:
case IW_AUTH_WPA_VERSION_WPA2:
priv->wpa.version = value;
if (!(priv->wpa.rsn_enabled))
- priv->wpa.rsn_enabled = 1;
+ priv->wpa.rsn_enabled = true;
priv->need_commit |= SME_RSN;
break;
default:
@@ -1658,10 +1489,11 @@ static int ks_wlan_set_auth_mode(struct net_device *dev,
static int ks_wlan_get_auth_mode(struct net_device *dev,
struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+ union iwreq_data *vwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
- int index = (vwrq->flags & IW_AUTH_INDEX);
+ struct iw_param *param = &vwrq->param;
+ int index = (param->flags & IW_AUTH_INDEX);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -1670,22 +1502,22 @@ static int ks_wlan_get_auth_mode(struct net_device *dev,
/* WPA (not used ?? wpa_supplicant) */
switch (index) {
case IW_AUTH_WPA_VERSION:
- vwrq->value = priv->wpa.version;
+ param->value = priv->wpa.version;
break;
case IW_AUTH_CIPHER_PAIRWISE:
- vwrq->value = priv->wpa.pairwise_suite;
+ param->value = priv->wpa.pairwise_suite;
break;
case IW_AUTH_CIPHER_GROUP:
- vwrq->value = priv->wpa.group_suite;
+ param->value = priv->wpa.group_suite;
break;
case IW_AUTH_KEY_MGMT:
- vwrq->value = priv->wpa.key_mgmt_suite;
+ param->value = priv->wpa.key_mgmt_suite;
break;
case IW_AUTH_80211_AUTH_ALG:
- vwrq->value = priv->wpa.auth_alg;
+ param->value = priv->wpa.auth_alg;
break;
case IW_AUTH_WPA_ENABLED:
- vwrq->value = priv->wpa.rsn_enabled;
+ param->value = priv->wpa.rsn_enabled;
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* OK??? */
case IW_AUTH_TKIP_COUNTERMEASURES:
@@ -1700,13 +1532,13 @@ static int ks_wlan_get_auth_mode(struct net_device *dev,
/* set encoding token & mode (WPA)*/
static int ks_wlan_set_encode_ext(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_encode_ext *enc;
- int index = dwrq->flags & IW_ENCODE_INDEX;
+ int index = dwrq->encoding.flags & IW_ENCODE_INDEX;
unsigned int commit = 0;
- struct wpa_key_t *key;
+ struct wpa_key *key;
enc = (struct iw_encode_ext *)extra;
if (!enc)
@@ -1721,7 +1553,7 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
index--;
key = &priv->wpa.key[index];
- if (dwrq->flags & IW_ENCODE_DISABLED)
+ if (dwrq->encoding.flags & IW_ENCODE_DISABLED)
key->key_len = 0;
key->ext_flags = enc->ext_flags;
@@ -1732,7 +1564,7 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
memcpy(&key->rx_seq[0], &enc->rx_seq[0], IW_ENCODE_SEQ_MAX_SIZE);
}
- memcpy(&key->addr.sa_data[0], &enc->addr.sa_data[0], ETH_ALEN);
+ ether_addr_copy(&key->addr.sa_data[0], &enc->addr.sa_data[0]);
switch (enc->alg) {
case IW_ENCODE_ALG_NONE:
@@ -1793,7 +1625,7 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
/* get encoding token & mode (WPA)*/
static int ks_wlan_get_encode_ext(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -1813,12 +1645,12 @@ static int ks_wlan_get_encode_ext(struct net_device *dev,
static int ks_wlan_set_pmksa(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_pmksa *pmksa;
int i;
- struct pmk_t *pmk;
+ struct pmk *pmk;
struct list_head *ptr;
if (priv->sleep_mode == SLP_SLEEP)
@@ -1832,68 +1664,65 @@ static int ks_wlan_set_pmksa(struct net_device *dev,
switch (pmksa->cmd) {
case IW_PMKSA_ADD:
- if (list_empty(&priv->pmklist.head)) { /* new list */
+ if (list_empty(&priv->pmklist.head)) {
for (i = 0; i < PMK_LIST_MAX; i++) {
pmk = &priv->pmklist.pmk[i];
- if (memcmp("\x00\x00\x00\x00\x00\x00",
- pmk->bssid, ETH_ALEN) == 0)
- break; /* loop */
+ if (is_zero_ether_addr(pmk->bssid))
+ break;
}
- memcpy(pmk->bssid, pmksa->bssid.sa_data, ETH_ALEN);
+ ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
list_add(&pmk->list, &priv->pmklist.head);
priv->pmklist.size++;
- break; /* case */
+ break;
}
/* search cache data */
list_for_each(ptr, &priv->pmklist.head) {
- pmk = list_entry(ptr, struct pmk_t, list);
- if (memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN) == 0) {
+ pmk = list_entry(ptr, struct pmk, list);
+ if (ether_addr_equal(pmksa->bssid.sa_data, pmk->bssid)) {
memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
list_move(&pmk->list, &priv->pmklist.head);
- break; /* list_for_each */
+ break;
}
}
- if (ptr != &priv->pmklist.head) /* not find address. */
- break; /* case */
-
- if (priv->pmklist.size < PMK_LIST_MAX) { /* new cache data */
+ /* not find address. */
+ if (ptr != &priv->pmklist.head)
+ break;
+ /* new cache data */
+ if (priv->pmklist.size < PMK_LIST_MAX) {
for (i = 0; i < PMK_LIST_MAX; i++) {
pmk = &priv->pmklist.pmk[i];
- if (memcmp("\x00\x00\x00\x00\x00\x00",
- pmk->bssid, ETH_ALEN) == 0)
- break; /* loop */
+ if (is_zero_ether_addr(pmk->bssid))
+ break;
}
- memcpy(pmk->bssid, pmksa->bssid.sa_data, ETH_ALEN);
+ ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
list_add(&pmk->list, &priv->pmklist.head);
priv->pmklist.size++;
- } else { /* overwrite old cache data */
- pmk = list_entry(priv->pmklist.head.prev, struct pmk_t,
+ } else { /* overwrite old cache data */
+ pmk = list_entry(priv->pmklist.head.prev, struct pmk,
list);
- memcpy(pmk->bssid, pmksa->bssid.sa_data, ETH_ALEN);
+ ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
list_move(&pmk->list, &priv->pmklist.head);
}
break;
case IW_PMKSA_REMOVE:
- if (list_empty(&priv->pmklist.head)) { /* list empty */
+ if (list_empty(&priv->pmklist.head))
return -EINVAL;
- }
/* search cache data */
list_for_each(ptr, &priv->pmklist.head) {
- pmk = list_entry(ptr, struct pmk_t, list);
- if (memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN) == 0) {
+ pmk = list_entry(ptr, struct pmk, list);
+ if (ether_addr_equal(pmksa->bssid.sa_data, pmk->bssid)) {
eth_zero_addr(pmk->bssid);
memset(pmk->pmkid, 0, IW_PMKID_LEN);
list_del_init(&pmk->list);
break;
}
}
- if (ptr == &priv->pmklist.head) { /* not find address. */
+ /* not find address. */
+ if (ptr == &priv->pmklist.head)
return 0;
- }
-
break;
case IW_PMKSA_FLUSH:
memset(&priv->pmklist, 0, sizeof(priv->pmklist));
@@ -1914,14 +1743,11 @@ static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_statistics *wstats = &priv->wstats;
- if (!atomic_read(&update_phyinfo)) {
- if (priv->dev_state < DEVICE_STATE_READY)
- return NULL; /* not finished initialize */
- else
- return wstats;
- }
+ if (!atomic_read(&update_phyinfo))
+ return (priv->dev_state < DEVICE_STATE_READY) ? NULL : wstats;
- /* Packets discarded in the wireless adapter due to wireless
+ /*
+ * Packets discarded in the wireless adapter due to wireless
* specific problems
*/
wstats->discard.nwid = 0; /* Rx invalid nwid */
@@ -1953,28 +1779,25 @@ static int ks_wlan_set_stop_request(struct net_device *dev,
#include <linux/ieee80211.h>
static int ks_wlan_set_mlme(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *dwrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *dwrq, char *extra)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- __u32 mode;
+ __u32 mode = 1;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- /* for SLEEP MODE */
- switch (mlme->cmd) {
- case IW_MLME_DEAUTH:
- if (mlme->reason_code == WLAN_REASON_MIC_FAILURE)
- return 0;
- /* fall through */
- case IW_MLME_DISASSOC:
- mode = 1;
- return ks_wlan_set_stop_request(dev, NULL, &mode, NULL);
- default:
- return -EOPNOTSUPP; /* Not Support */
- }
+ if (mlme->cmd != IW_MLME_DEAUTH &&
+ mlme->cmd != IW_MLME_DISASSOC)
+ return -EOPNOTSUPP;
+
+ if (mlme->cmd == IW_MLME_DEAUTH &&
+ mlme->reason_code == WLAN_REASON_MIC_FAILURE)
+ return 0;
+
+ return ks_wlan_set_stop_request(dev, NULL, &mode, NULL);
}
static int ks_wlan_get_firmware_version(struct net_device *dev,
@@ -1998,14 +1821,10 @@ static int ks_wlan_set_preamble(struct net_device *dev,
return -EPERM;
/* for SLEEP MODE */
- if (*uwrq == LONG_PREAMBLE) { /* 0 */
- priv->reg.preamble = LONG_PREAMBLE;
- } else if (*uwrq == SHORT_PREAMBLE) { /* 1 */
- priv->reg.preamble = SHORT_PREAMBLE;
- } else {
+ if (*uwrq != LONG_PREAMBLE && *uwrq != SHORT_PREAMBLE)
return -EINVAL;
- }
+ priv->reg.preamble = *uwrq;
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
@@ -2033,23 +1852,16 @@ static int ks_wlan_set_power_mgmt(struct net_device *dev,
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- /* for SLEEP MODE */
- if (*uwrq == POWER_MGMT_ACTIVE) { /* 0 */
- priv->reg.power_mgmt = POWER_MGMT_ACTIVE;
- } else if (*uwrq == POWER_MGMT_SAVE1) { /* 1 */
- if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
- priv->reg.power_mgmt = POWER_MGMT_SAVE1;
- else
- return -EINVAL;
- } else if (*uwrq == POWER_MGMT_SAVE2) { /* 2 */
- if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
- priv->reg.power_mgmt = POWER_MGMT_SAVE2;
- else
- return -EINVAL;
- } else {
+ if (*uwrq != POWER_MGMT_ACTIVE &&
+ *uwrq != POWER_MGMT_SAVE1 &&
+ *uwrq != POWER_MGMT_SAVE2)
+ return -EINVAL;
+
+ if ((*uwrq == POWER_MGMT_SAVE1 || *uwrq == POWER_MGMT_SAVE2) &&
+ (priv->reg.operation_mode != MODE_INFRASTRUCTURE))
return -EINVAL;
- }
+ priv->reg.power_mgmt = *uwrq;
hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
return 0;
@@ -2078,14 +1890,11 @@ static int ks_wlan_set_scan_type(struct net_device *dev,
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if (*uwrq == ACTIVE_SCAN) { /* 0 */
- priv->reg.scan_type = ACTIVE_SCAN;
- } else if (*uwrq == PASSIVE_SCAN) { /* 1 */
- priv->reg.scan_type = PASSIVE_SCAN;
- } else {
+
+ if (*uwrq != ACTIVE_SCAN && *uwrq != PASSIVE_SCAN)
return -EINVAL;
- }
+ priv->reg.scan_type = *uwrq;
return 0;
}
@@ -2111,17 +1920,17 @@ static int ks_wlan_set_beacon_lost(struct net_device *dev,
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX)
- priv->reg.beacon_lost_count = *uwrq;
- else
+ if (*uwrq > BEACON_LOST_COUNT_MAX)
return -EINVAL;
+ priv->reg.beacon_lost_count = *uwrq;
+
if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
- } else {
- return 0;
}
+
+ return 0;
}
static int ks_wlan_get_beacon_lost(struct net_device *dev,
@@ -2145,17 +1954,14 @@ static int ks_wlan_set_phy_type(struct net_device *dev,
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- /* for SLEEP MODE */
- if (*uwrq == D_11B_ONLY_MODE) { /* 0 */
- priv->reg.phy_type = D_11B_ONLY_MODE;
- } else if (*uwrq == D_11G_ONLY_MODE) { /* 1 */
- priv->reg.phy_type = D_11G_ONLY_MODE;
- } else if (*uwrq == D_11BG_COMPATIBLE_MODE) { /* 2 */
- priv->reg.phy_type = D_11BG_COMPATIBLE_MODE;
- } else {
+
+ if (*uwrq != D_11B_ONLY_MODE &&
+ *uwrq != D_11G_ONLY_MODE &&
+ *uwrq != D_11BG_COMPATIBLE_MODE)
return -EINVAL;
- }
+ /* for SLEEP MODE */
+ priv->reg.phy_type = *uwrq;
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
@@ -2182,18 +1988,13 @@ static int ks_wlan_set_cts_mode(struct net_device *dev,
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if (*uwrq == CTS_MODE_FALSE) { /* 0 */
- priv->reg.cts_mode = CTS_MODE_FALSE;
- } else if (*uwrq == CTS_MODE_TRUE) { /* 1 */
- if (priv->reg.phy_type == D_11G_ONLY_MODE ||
- priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) {
- priv->reg.cts_mode = CTS_MODE_TRUE;
- } else {
- priv->reg.cts_mode = CTS_MODE_FALSE;
- }
- } else {
+ if (*uwrq != CTS_MODE_FALSE && *uwrq != CTS_MODE_TRUE)
return -EINVAL;
- }
+
+ priv->reg.cts_mode = (*uwrq == CTS_MODE_FALSE) ? *uwrq :
+ (priv->reg.phy_type == D_11G_ONLY_MODE ||
+ priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) ?
+ *uwrq : !*uwrq;
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
@@ -2218,22 +2019,20 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev,
{
struct ks_wlan_private *priv = netdev_priv(dev);
- if (*uwrq == SLP_SLEEP) {
- priv->sleep_mode = *uwrq;
- netdev_info(dev, "SET_SLEEP_MODE %d\n", priv->sleep_mode);
-
- hostif_sme_enqueue(priv, SME_STOP_REQUEST);
- hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
-
- } else if (*uwrq == SLP_ACTIVE) {
- priv->sleep_mode = *uwrq;
- netdev_info(dev, "SET_SLEEP_MODE %d\n", priv->sleep_mode);
- hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
- } else {
+ if (*uwrq != SLP_SLEEP &&
+ *uwrq != SLP_ACTIVE) {
netdev_err(dev, "SET_SLEEP_MODE %d error\n", *uwrq);
return -EINVAL;
}
+ priv->sleep_mode = *uwrq;
+ netdev_info(dev, "SET_SLEEP_MODE %d\n", priv->sleep_mode);
+
+ if (*uwrq == SLP_SLEEP)
+ hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+
+ hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
+
return 0;
}
@@ -2248,8 +2047,6 @@ static int ks_wlan_get_sleep_mode(struct net_device *dev,
return 0;
}
-#ifdef WPS
-
static int ks_wlan_set_wps_enable(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
@@ -2259,11 +2056,10 @@ static int ks_wlan_set_wps_enable(struct net_device *dev,
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if (*uwrq == 0 || *uwrq == 1)
- priv->wps.wps_enabled = *uwrq;
- else
+ if (*uwrq != 0 && *uwrq != 1)
return -EINVAL;
+ priv->wps.wps_enabled = *uwrq;
hostif_sme_enqueue(priv, SME_WPS_ENABLE_REQUEST);
return 0;
@@ -2314,7 +2110,6 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev,
return 0;
}
-#endif /* WPS */
static int ks_wlan_set_tx_gain(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
@@ -2325,16 +2120,11 @@ static int ks_wlan_set_tx_gain(struct net_device *dev,
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
- priv->gain.tx_gain = (uint8_t)*uwrq;
- else
+ if (*uwrq > 0xFF)
return -EINVAL;
- if (priv->gain.tx_gain < 0xFF)
- priv->gain.tx_mode = 1;
- else
- priv->gain.tx_mode = 0;
-
+ priv->gain.tx_gain = (u8)*uwrq;
+ priv->gain.tx_mode = (priv->gain.tx_gain < 0xFF) ? 1 : 0;
hostif_sme_enqueue(priv, SME_SET_GAIN);
return 0;
}
@@ -2362,16 +2152,11 @@ static int ks_wlan_set_rx_gain(struct net_device *dev,
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
- priv->gain.rx_gain = (uint8_t)*uwrq;
- else
+ if (*uwrq > 0xFF)
return -EINVAL;
- if (priv->gain.rx_gain < 0xFF)
- priv->gain.rx_mode = 1;
- else
- priv->gain.rx_mode = 0;
-
+ priv->gain.rx_gain = (u8)*uwrq;
+ priv->gain.rx_mode = (priv->gain.rx_gain < 0xFF) ? 1 : 0;
hostif_sme_enqueue(priv, SME_SET_GAIN);
return 0;
}
@@ -2535,14 +2320,12 @@ static const struct iw_priv_args ks_wlan_private_args[] = {
/*{ cmd, set_args, get_args, name[16] } */
{KS_WLAN_GET_FIRM_VERSION, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_CHAR | (128 + 1), "GetFirmwareVer"},
-#ifdef WPS
{KS_WLAN_SET_WPS_ENABLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetWPSEnable"},
{KS_WLAN_GET_WPS_ENABLE, IW_PRIV_TYPE_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetW"},
{KS_WLAN_SET_WPS_PROBE_REQ, IW_PRIV_TYPE_BYTE | 2047, IW_PRIV_TYPE_NONE,
"SetWPSProbeReq"},
-#endif /* WPS */
{KS_WLAN_SET_PREAMBLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_NONE, "SetPreamble"},
{KS_WLAN_GET_PREAMBLE, IW_PRIV_TYPE_NONE,
@@ -2586,105 +2369,62 @@ static const struct iw_priv_args ks_wlan_private_args[] = {
};
static const iw_handler ks_wlan_handler[] = {
- (iw_handler)ks_wlan_config_commit, /* SIOCSIWCOMMIT */
- (iw_handler)ks_wlan_get_name, /* SIOCGIWNAME */
- (iw_handler)NULL, /* SIOCSIWNWID */
- (iw_handler)NULL, /* SIOCGIWNWID */
- (iw_handler)ks_wlan_set_freq, /* SIOCSIWFREQ */
- (iw_handler)ks_wlan_get_freq, /* SIOCGIWFREQ */
- (iw_handler)ks_wlan_set_mode, /* SIOCSIWMODE */
- (iw_handler)ks_wlan_get_mode, /* SIOCGIWMODE */
-#ifndef KSC_OPNOTSUPP
- (iw_handler)ks_wlan_set_sens, /* SIOCSIWSENS */
- (iw_handler)ks_wlan_get_sens, /* SIOCGIWSENS */
-#else /* KSC_OPNOTSUPP */
- (iw_handler)NULL, /* SIOCSIWSENS */
- (iw_handler)NULL, /* SIOCGIWSENS */
-#endif /* KSC_OPNOTSUPP */
- (iw_handler)NULL, /* SIOCSIWRANGE */
- (iw_handler)ks_wlan_get_range, /* SIOCGIWRANGE */
- (iw_handler)NULL, /* SIOCSIWPRIV */
- (iw_handler)NULL, /* SIOCGIWPRIV */
- (iw_handler)NULL, /* SIOCSIWSTATS */
- (iw_handler)ks_wlan_get_iwstats, /* SIOCGIWSTATS */
- (iw_handler)NULL, /* SIOCSIWSPY */
- (iw_handler)NULL, /* SIOCGIWSPY */
- (iw_handler)NULL, /* SIOCSIWTHRSPY */
- (iw_handler)NULL, /* SIOCGIWTHRSPY */
- (iw_handler)ks_wlan_set_wap, /* SIOCSIWAP */
- (iw_handler)ks_wlan_get_wap, /* SIOCGIWAP */
-// (iw_handler)NULL, /* SIOCSIWMLME */
- (iw_handler)ks_wlan_set_mlme, /* SIOCSIWMLME */
- (iw_handler)ks_wlan_get_aplist, /* SIOCGIWAPLIST */
- (iw_handler)ks_wlan_set_scan, /* SIOCSIWSCAN */
- (iw_handler)ks_wlan_get_scan, /* SIOCGIWSCAN */
- (iw_handler)ks_wlan_set_essid, /* SIOCSIWESSID */
- (iw_handler)ks_wlan_get_essid, /* SIOCGIWESSID */
- (iw_handler)ks_wlan_set_nick, /* SIOCSIWNICKN */
- (iw_handler)ks_wlan_get_nick, /* SIOCGIWNICKN */
- (iw_handler)NULL, /* -- hole -- */
- (iw_handler)NULL, /* -- hole -- */
- (iw_handler)ks_wlan_set_rate, /* SIOCSIWRATE */
- (iw_handler)ks_wlan_get_rate, /* SIOCGIWRATE */
- (iw_handler)ks_wlan_set_rts, /* SIOCSIWRTS */
- (iw_handler)ks_wlan_get_rts, /* SIOCGIWRTS */
- (iw_handler)ks_wlan_set_frag, /* SIOCSIWFRAG */
- (iw_handler)ks_wlan_get_frag, /* SIOCGIWFRAG */
-#ifndef KSC_OPNOTSUPP
- (iw_handler)ks_wlan_set_txpow, /* SIOCSIWTXPOW */
- (iw_handler)ks_wlan_get_txpow, /* SIOCGIWTXPOW */
- (iw_handler)ks_wlan_set_retry, /* SIOCSIWRETRY */
- (iw_handler)ks_wlan_get_retry, /* SIOCGIWRETRY */
-#else /* KSC_OPNOTSUPP */
- (iw_handler)NULL, /* SIOCSIWTXPOW */
- (iw_handler)NULL, /* SIOCGIWTXPOW */
- (iw_handler)NULL, /* SIOCSIWRETRY */
- (iw_handler)NULL, /* SIOCGIWRETRY */
-#endif /* KSC_OPNOTSUPP */
- (iw_handler)ks_wlan_set_encode, /* SIOCSIWENCODE */
- (iw_handler)ks_wlan_get_encode, /* SIOCGIWENCODE */
- (iw_handler)ks_wlan_set_power, /* SIOCSIWPOWER */
- (iw_handler)ks_wlan_get_power, /* SIOCGIWPOWER */
- (iw_handler)NULL, /* -- hole -- */
- (iw_handler)NULL, /* -- hole -- */
-// (iw_handler)NULL, /* SIOCSIWGENIE */
- (iw_handler)ks_wlan_set_genie, /* SIOCSIWGENIE */
- (iw_handler)NULL, /* SIOCGIWGENIE */
- (iw_handler)ks_wlan_set_auth_mode, /* SIOCSIWAUTH */
- (iw_handler)ks_wlan_get_auth_mode, /* SIOCGIWAUTH */
- (iw_handler)ks_wlan_set_encode_ext, /* SIOCSIWENCODEEXT */
- (iw_handler)ks_wlan_get_encode_ext, /* SIOCGIWENCODEEXT */
- (iw_handler)ks_wlan_set_pmksa, /* SIOCSIWPMKSA */
- (iw_handler)NULL, /* -- hole -- */
+ IW_HANDLER(SIOCSIWCOMMIT, ks_wlan_config_commit),
+ IW_HANDLER(SIOCGIWNAME, ks_wlan_get_name),
+ IW_HANDLER(SIOCSIWFREQ, ks_wlan_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, ks_wlan_get_freq),
+ IW_HANDLER(SIOCSIWMODE, ks_wlan_set_mode),
+ IW_HANDLER(SIOCGIWMODE, ks_wlan_get_mode),
+ IW_HANDLER(SIOCGIWRANGE, ks_wlan_get_range),
+ IW_HANDLER(SIOCGIWSTATS, ks_wlan_get_iwstats),
+ IW_HANDLER(SIOCSIWAP, ks_wlan_set_wap),
+ IW_HANDLER(SIOCGIWAP, ks_wlan_get_wap),
+ IW_HANDLER(SIOCSIWMLME, ks_wlan_set_mlme),
+ IW_HANDLER(SIOCGIWAPLIST, ks_wlan_get_aplist),
+ IW_HANDLER(SIOCSIWSCAN, ks_wlan_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, ks_wlan_get_scan),
+ IW_HANDLER(SIOCSIWESSID, ks_wlan_set_essid),
+ IW_HANDLER(SIOCGIWESSID, ks_wlan_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, ks_wlan_set_nick),
+ IW_HANDLER(SIOCGIWNICKN, ks_wlan_get_nick),
+ IW_HANDLER(SIOCSIWRATE, ks_wlan_set_rate),
+ IW_HANDLER(SIOCGIWRATE, ks_wlan_get_rate),
+ IW_HANDLER(SIOCSIWRTS, ks_wlan_set_rts),
+ IW_HANDLER(SIOCGIWRTS, ks_wlan_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, ks_wlan_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, ks_wlan_get_frag),
+ IW_HANDLER(SIOCSIWENCODE, ks_wlan_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, ks_wlan_get_encode),
+ IW_HANDLER(SIOCSIWPOWER, ks_wlan_set_power),
+ IW_HANDLER(SIOCGIWPOWER, ks_wlan_get_power),
+ IW_HANDLER(SIOCSIWGENIE, ks_wlan_set_genie),
+ IW_HANDLER(SIOCSIWAUTH, ks_wlan_set_auth_mode),
+ IW_HANDLER(SIOCGIWAUTH, ks_wlan_get_auth_mode),
+ IW_HANDLER(SIOCSIWENCODEEXT, ks_wlan_set_encode_ext),
+ IW_HANDLER(SIOCGIWENCODEEXT, ks_wlan_get_encode_ext),
+ IW_HANDLER(SIOCSIWPMKSA, ks_wlan_set_pmksa),
};
/* private_handler */
static const iw_handler ks_wlan_private_handler[] = {
- (iw_handler)NULL, /* 0 */
- (iw_handler)NULL, /* 1, used to be: KS_WLAN_GET_DRIVER_VERSION */
- (iw_handler)NULL, /* 2 */
- (iw_handler)ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */
-#ifdef WPS
- (iw_handler)ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */
- (iw_handler)ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */
- (iw_handler)ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */
-#else
- (iw_handler)NULL, /* 4 */
- (iw_handler)NULL, /* 5 */
- (iw_handler)NULL, /* 6 */
-#endif /* WPS */
-
- (iw_handler)ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */
- (iw_handler)ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */
- (iw_handler)ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */
+ (iw_handler)NULL, /* 0 */
+ (iw_handler)NULL, /* 1, KS_WLAN_GET_DRIVER_VERSION */
+ (iw_handler)NULL, /* 2 */
+ (iw_handler)ks_wlan_get_firmware_version,/* 3 KS_WLAN_GET_FIRM_VERSION */
+ (iw_handler)ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */
+ (iw_handler)ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */
+ (iw_handler)ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */
+ (iw_handler)ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */
+ (iw_handler)ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */
+ (iw_handler)ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */
(iw_handler)ks_wlan_set_power_mgmt, /* 10 KS_WLAN_SET_POWER_SAVE */
(iw_handler)ks_wlan_get_power_mgmt, /* 11 KS_WLAN_GET_POWER_SAVE */
(iw_handler)ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */
(iw_handler)ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */
(iw_handler)ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */
(iw_handler)ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */
- (iw_handler)ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */
- (iw_handler)NULL, /* 17 */
+ (iw_handler)ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */
+ (iw_handler)NULL, /* 17 */
(iw_handler)ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */
(iw_handler)ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */
(iw_handler)ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */
@@ -2693,22 +2433,21 @@ static const iw_handler ks_wlan_private_handler[] = {
(iw_handler)ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */
(iw_handler)ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */
(iw_handler)ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */
- (iw_handler)NULL, /* 26 */
- (iw_handler)NULL, /* 27 */
+ (iw_handler)NULL, /* 26 */
+ (iw_handler)NULL, /* 27 */
(iw_handler)ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */
(iw_handler)ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */
- (iw_handler)NULL, /* 30 */
- (iw_handler)NULL, /* 31 */
+ (iw_handler)NULL, /* 30 */
+ (iw_handler)NULL, /* 31 */
};
static const struct iw_handler_def ks_wlan_handler_def = {
- .num_standard = sizeof(ks_wlan_handler) / sizeof(iw_handler),
- .num_private = sizeof(ks_wlan_private_handler) / sizeof(iw_handler),
- .num_private_args =
- sizeof(ks_wlan_private_args) / sizeof(struct iw_priv_args),
- .standard = (iw_handler *)ks_wlan_handler,
- .private = (iw_handler *)ks_wlan_private_handler,
- .private_args = (struct iw_priv_args *)ks_wlan_private_args,
+ .num_standard = ARRAY_SIZE(ks_wlan_handler),
+ .num_private = ARRAY_SIZE(ks_wlan_private_handler),
+ .num_private_args = ARRAY_SIZE(ks_wlan_private_args),
+ .standard = ks_wlan_handler,
+ .private = ks_wlan_private_handler,
+ .private_args = ks_wlan_private_args,
.get_wireless_stats = ks_get_wireless_stats,
};
@@ -2750,7 +2489,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
- memcpy(priv->eth_addr, mac_addr->sa_data, ETH_ALEN);
+ ether_addr_copy(priv->eth_addr, mac_addr->sa_data);
priv->mac_address_valid = false;
hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
@@ -2818,7 +2557,7 @@ void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb)
* This routine is not state sensitive and need not be SMP locked.
*/
static
-void ks_wlan_set_multicast_list(struct net_device *dev)
+void ks_wlan_set_rx_mode(struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
@@ -2866,7 +2605,7 @@ static const struct net_device_ops ks_wlan_netdev_ops = {
.ndo_set_mac_address = ks_wlan_set_mac_address,
.ndo_get_stats = ks_wlan_get_stats,
.ndo_tx_timeout = ks_wlan_tx_timeout,
- .ndo_set_rx_mode = ks_wlan_set_multicast_list,
+ .ndo_set_rx_mode = ks_wlan_set_rx_mode,
};
int ks_wlan_net_start(struct net_device *dev)
@@ -2876,24 +2615,15 @@ int ks_wlan_net_start(struct net_device *dev)
priv = netdev_priv(dev);
priv->mac_address_valid = false;
+ priv->is_device_open = true;
priv->need_commit = 0;
-
- priv->device_open_status = 1;
-
/* phy information update timer */
atomic_set(&update_phyinfo, 0);
timer_setup(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout, 0);
/* dummy address set */
- memcpy(priv->eth_addr, dummy_addr, ETH_ALEN);
- dev->dev_addr[0] = priv->eth_addr[0];
- dev->dev_addr[1] = priv->eth_addr[1];
- dev->dev_addr[2] = priv->eth_addr[2];
- dev->dev_addr[3] = priv->eth_addr[3];
- dev->dev_addr[4] = priv->eth_addr[4];
- dev->dev_addr[5] = priv->eth_addr[5];
- dev->dev_addr[6] = 0x00;
- dev->dev_addr[7] = 0x00;
+ ether_addr_copy(priv->eth_addr, dummy_addr);
+ ether_addr_copy(dev->dev_addr, priv->eth_addr);
/* The ks_wlan-specific entries in the device structure. */
dev->netdev_ops = &ks_wlan_netdev_ops;
@@ -2909,7 +2639,7 @@ int ks_wlan_net_stop(struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
- priv->device_open_status = 0;
+ priv->is_device_open = false;
del_timer_sync(&update_phyinfo_timer);
if (netif_running(dev))
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c
index 292eae29c552..e6bd70846e98 100644
--- a/drivers/staging/ks7010/michael_mic.c
+++ b/drivers/staging/ks7010/michael_mic.c
@@ -1,30 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for KeyStream wireless LAN
*
* Copyright (C) 2005-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/bitops.h>
#include <asm/unaligned.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
#include "michael_mic.h"
// Reset the state to the empty message.
-static inline void michael_clear(struct michael_mic_t *mic)
+static inline void michael_clear(struct michael_mic *mic)
{
mic->l = mic->k0;
mic->r = mic->k1;
mic->m_bytes = 0;
}
-static void michael_init(struct michael_mic_t *mic, u8 *key)
+static void michael_init(struct michael_mic *mic, u8 *key)
{
// Set the key
mic->k0 = get_unaligned_le32(key);
@@ -34,20 +30,20 @@ static void michael_init(struct michael_mic_t *mic, u8 *key)
michael_clear(mic);
}
-static inline void michael_block(struct michael_mic_t *mic)
+static inline void michael_block(struct michael_mic *mic)
{
mic->r ^= rol32(mic->l, 17);
mic->l += mic->r;
mic->r ^= ((mic->l & 0xff00ff00) >> 8) |
((mic->l & 0x00ff00ff) << 8);
mic->l += mic->r;
- mic->r ^= rol32(mic->l, 3); \
+ mic->r ^= rol32(mic->l, 3);
mic->l += mic->r;
- mic->r ^= ror32(mic->l, 2); \
+ mic->r ^= ror32(mic->l, 2);
mic->l += mic->r;
}
-static void michael_append(struct michael_mic_t *mic, u8 *src, int bytes)
+static void michael_append(struct michael_mic *mic, u8 *src, int bytes)
{
int addlen;
@@ -81,7 +77,7 @@ static void michael_append(struct michael_mic_t *mic, u8 *src, int bytes)
}
}
-static void michael_get_mic(struct michael_mic_t *mic, u8 *dst)
+static void michael_get_mic(struct michael_mic *mic, u8 *dst)
{
u8 *data = mic->m;
@@ -110,8 +106,8 @@ static void michael_get_mic(struct michael_mic_t *mic, u8 *dst)
michael_clear(mic);
}
-void michael_mic_function(struct michael_mic_t *mic, u8 *key,
- u8 *data, int len, u8 priority, u8 *result)
+void michael_mic_function(struct michael_mic *mic, u8 *key,
+ u8 *data, unsigned int len, u8 priority, u8 *result)
{
u8 pad_data[4] = { priority, 0, 0, 0 };
// Compute the MIC value
diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h
index 894a8d4121a4..f0ac164b999b 100644
--- a/drivers/staging/ks7010/michael_mic.h
+++ b/drivers/staging/ks7010/michael_mic.h
@@ -1,16 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for KeyStream wireless LAN
*
* Copyright (C) 2005-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/* MichaelMIC routine define */
-struct michael_mic_t {
+struct michael_mic {
u32 k0; // Key
u32 k1; // Key
u32 l; // Current state
@@ -20,5 +17,5 @@ struct michael_mic_t {
u8 result[8];
};
-void michael_mic_function(struct michael_mic_t *mic, u8 *key,
- u8 *data, int len, u8 priority, u8 *result);
+void michael_mic_function(struct michael_mic *mic, u8 *key,
+ u8 *data, unsigned int len, u8 priority, u8 *result);
diff --git a/drivers/staging/lustre/Kconfig b/drivers/staging/lustre/Kconfig
deleted file mode 100644
index b7d81096eee9..000000000000
--- a/drivers/staging/lustre/Kconfig
+++ /dev/null
@@ -1,3 +0,0 @@
-source "drivers/staging/lustre/lnet/Kconfig"
-
-source "drivers/staging/lustre/lustre/Kconfig"
diff --git a/drivers/staging/lustre/Makefile b/drivers/staging/lustre/Makefile
deleted file mode 100644
index 95ffe337a80a..000000000000
--- a/drivers/staging/lustre/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_LNET) += lnet/
-obj-$(CONFIG_LUSTRE_FS) += lustre/
diff --git a/drivers/staging/lustre/README.txt b/drivers/staging/lustre/README.txt
deleted file mode 100644
index 0676243eea9e..000000000000
--- a/drivers/staging/lustre/README.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-Lustre Parallel Filesystem Client
-=================================
-
-The Lustre file system is an open-source, parallel file system
-that supports many requirements of leadership class HPC simulation
-environments.
-Born from from a research project at Carnegie Mellon University,
-the Lustre file system is a widely-used option in HPC.
-The Lustre file system provides a POSIX compliant file system interface,
-can scale to thousands of clients, petabytes of storage and
-hundreds of gigabytes per second of I/O bandwidth.
-
-Unlike shared disk storage cluster filesystems (e.g. OCFS2, GFS, GPFS),
-Lustre has independent Metadata and Data servers that clients can access
-in parallel to maximize performance.
-
-In order to use Lustre client you will need to download the "lustre-client"
-package that contains the userspace tools from http://lustre.org/download/
-
-You will need to install and configure your Lustre servers separately.
-
-Mount Syntax
-============
-After you installed the lustre-client tools including mount.lustre binary
-you can mount your Lustre filesystem with:
-
-mount -t lustre mgs:/fsname mnt
-
-where mgs is the host name or ip address of your Lustre MGS(management service)
-fsname is the name of the filesystem you would like to mount.
-
-
-Mount Options
-=============
-
- noflock
- Disable posix file locking (Applications trying to use
- the functionality will get ENOSYS)
-
- localflock
- Enable local flock support, using only client-local flock
- (faster, for applications that require flock but do not run
- on multiple nodes).
-
- flock
- Enable cluster-global posix file locking coherent across all
- client nodes.
-
- user_xattr, nouser_xattr
- Support "user." extended attributes (or not)
-
- user_fid2path, nouser_fid2path
- Enable FID to path translation by regular users (or not)
-
- checksum, nochecksum
- Verify data consistency on the wire and in memory as it passes
- between the layers (or not).
-
- lruresize, nolruresize
- Allow lock LRU to be controlled by memory pressure on the server
- (or only 100 (default, controlled by lru_size proc parameter) locks
- per CPU per server on this client).
-
- lazystatfs, nolazystatfs
- Do not block in statfs() if some of the servers are down.
-
- 32bitapi
- Shrink inode numbers to fit into 32 bits. This is necessary
- if you plan to reexport Lustre filesystem from this client via
- NFSv4.
-
- verbose, noverbose
- Enable mount/umount console messages (or not)
-
-More Information
-================
-You can get more information at the Lustre website: http://wiki.lustre.org/
-
-Source for the userspace tools and out-of-tree client and server code
-is available at: http://git.hpdd.intel.com/fs/lustre-release.git
-
-Latest binary packages:
-http://lustre.org/download/
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO
deleted file mode 100644
index 94446487748a..000000000000
--- a/drivers/staging/lustre/TODO
+++ /dev/null
@@ -1,302 +0,0 @@
-Currently all the work directed toward the lustre upstream client is tracked
-at the following link:
-
-https://jira.hpdd.intel.com/browse/LU-9679
-
-Under this ticket you will see the following work items that need to be
-addressed:
-
-******************************************************************************
-* libcfs cleanup
-*
-* https://jira.hpdd.intel.com/browse/LU-9859
-*
-* Track all the cleanups and simplification of the libcfs module. Remove
-* functions the kernel provides. Possible intergrate some of the functionality
-* into the kernel proper.
-*
-******************************************************************************
-
-https://jira.hpdd.intel.com/browse/LU-100086
-
-LNET_MINOR conflicts with USERIO_MINOR
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-8130
-
-Fix and simplify libcfs hash handling
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-8703
-
-The current way we handle SMP is wrong. Platforms like ARM and KNL can have
-core and NUMA setups with things like NUMA nodes with no cores. We need to
-handle such cases. This work also greatly simplified the lustre SMP code.
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9019
-
-Replace libcfs time API with standard kernel APIs. Also migrate away from
-jiffies. We found jiffies can vary on nodes which can lead to corner cases
-that can break the file system due to nodes having inconsistent behavior.
-So move to time64_t and ktime_t as much as possible.
-
-******************************************************************************
-* Proper IB support for ko2iblnd
-******************************************************************************
-https://jira.hpdd.intel.com/browse/LU-9179
-
-Poor performance for the ko2iblnd driver. This is related to many of the
-patches below that are missing from the linux client.
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9886
-
-Crash in upstream kiblnd_handle_early_rxs()
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-10394 / LU-10526 / LU-10089
-
-Default to default to using MEM_REG
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-10459
-
-throttle tx based on queue depth
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9943
-
-correct WR fast reg accounting
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-10291
-
-remove concurrent_sends tunable
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-10213
-
-calculate qp max_send_wrs properly
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9810
-
-use less CQ entries for each connection
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-10129 / LU-9180
-
-rework map_on_demand behavior
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-10129
-
-query device capabilities
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-10015
-
-fix race at kiblnd_connect_peer
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9983
-
-allow for discontiguous fragments
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9500
-
-Don't Page Align remote_addr with FastReg
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9448
-
-handle empty CPTs
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9507
-
-Don't Assert On Reconnect with MultiQP
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9472
-
-Fix FastReg map/unmap for MLX5
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9425
-
-Turn on 2 sges by default
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-8943
-
-Enable Multiple OPA Endpoints between Nodes
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-5718
-
-multiple sges for work request
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9094
-
-kill timedout txs from ibp_tx_queue
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9094
-
-reconnect peer for REJ_INVALID_SERVICE_ID
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-8752
-
-Stop MLX5 triggering a dump_cqe
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-8874
-
-Move ko2iblnd to latest RDMA changes
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-8875 / LU-8874
-
-Change to new RDMA done callback mechanism
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9164 / LU-8874
-
-Incorporate RDMA map/unamp API's into ko2iblnd
-
-******************************************************************************
-* sysfs/debugfs fixes
-*
-* https://jira.hpdd.intel.com/browse/LU-8066
-*
-* The original migration to sysfs was done in haste without properly working
-* utilities to test the changes. This covers the work to restore the proper
-* behavior. Huge project to make this right.
-*
-******************************************************************************
-
-https://jira.hpdd.intel.com/browse/LU-9431
-
-The function class_process_proc_param was used for our mass updates of proc
-tunables. It didn't work with sysfs and it was just ugly so it was removed.
-In the process the ability to mass update thousands of clients was lost. This
-work restores this in a sane way.
-
-------------------------------------------------------------------------------
-https://jira.hpdd.intel.com/browse/LU-9091
-
-One the major request of users is the ability to pass in parameters into a
-sysfs file in various different units. For example we can set max_pages_per_rpc
-but this can vary on platforms due to different platform sizes. So you can
-set this like max_pages_per_rpc=16MiB. The original code to handle this written
-before the string helpers were created so the code doesn't follow that format
-but it would be easy to move to. Currently the string helpers does the reverse
-of what we need, changing bytes to string. We need to change a string to bytes.
-
-******************************************************************************
-* Proper user land to kernel space interface for Lustre
-*
-* https://jira.hpdd.intel.com/browse/LU-9680
-*
-******************************************************************************
-
-https://jira.hpdd.intel.com/browse/LU-8915
-
-Don't use linux list structure as user land arguments for lnet selftest.
-This code is pretty poor quality and really needs to be reworked.
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-8834
-
-The lustre ioctl LL_IOC_FUTIMES_3 is very generic. Need to either work with
-other file systems with similar functionality and make a common syscall
-interface or rework our server code to automagically do it for us.
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-6202
-
-Cleanup up ioctl handling. We have many obsolete ioctls. Also the way we do
-ioctls can be changed over to netlink. This also has the benefit of working
-better with HPC systems that do IO forwarding. Such systems don't like ioctls
-very well.
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9667
-
-More cleanups by making our utilities use sysfs instead of ioctls for LNet.
-Also it has been requested to move the remaining ioctls to the netlink API.
-
-******************************************************************************
-* Misc
-******************************************************************************
-
-------------------------------------------------------------------------------
-https://jira.hpdd.intel.com/browse/LU-9855
-
-Clean up obdclass preprocessor code. One of the major eye sores is the various
-pointer redirections and macros used by the obdclass. This makes the code very
-difficult to understand. It was requested by the Al Viro to clean this up before
-we leave staging.
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9633
-
-Migrate to sphinx kernel-doc style comments. Add documents in Documentation.
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-6142
-
-Possible remaining coding style fix. Remove deadcode. Enforce kernel code
-style. Other minor misc cleanups...
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-8837
-
-Separate client/server functionality. Functions only used by server can be
-removed from client. Most of this has been done but we need a inspect of the
-code to make sure.
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-8964
-
-Lustre client readahead/writeback control needs to better suit kernel providings.
-Currently its being explored. We could end up replacing the CLIO read ahead
-abstract with the kernel proper version.
-
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9862
-
-Patch that landed for LU-7890 leads to static checker errors
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-9868
-
-dcache/namei fixes for lustre
-------------------------------------------------------------------------------
-
-https://jira.hpdd.intel.com/browse/LU-10467
-
-use standard linux wait_events macros work by Neil Brown
-
-------------------------------------------------------------------------------
-
-Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger
-<andreas.dilger@intel.com>, James Simmons <jsimmons@infradead.org> and
-Oleg Drokin <oleg.drokin@intel.com>.
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
deleted file mode 100644
index 4702956805a6..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/curproc.h
- *
- * Lustre curproc API declaration
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- */
-
-#ifndef __LIBCFS_CURPROC_H__
-#define __LIBCFS_CURPROC_H__
-
-/*
- * Plus, platform-specific constant
- *
- * CFS_CURPROC_COMM_MAX,
- *
- * and opaque scalar type
- *
- * kernel_cap_t
- */
-
-/* check if task is running in compat mode.*/
-#define current_pid() (current->pid)
-#define current_comm() (current->comm)
-
-typedef u32 cfs_cap_t;
-
-#define CFS_CAP_FS_MASK (BIT(CAP_CHOWN) | \
- BIT(CAP_DAC_OVERRIDE) | \
- BIT(CAP_DAC_READ_SEARCH) | \
- BIT(CAP_FOWNER) | \
- BIT(CAP_FSETID) | \
- BIT(CAP_LINUX_IMMUTABLE) | \
- BIT(CAP_SYS_ADMIN) | \
- BIT(CAP_SYS_BOOT) | \
- BIT(CAP_SYS_RESOURCE))
-
-static inline cfs_cap_t cfs_curproc_cap_pack(void)
-{
- /* cfs_cap_t is only the first word of kernel_cap_t */
- return (cfs_cap_t)(current_cap().cap[0]);
-}
-
-/* __LIBCFS_CURPROC_H__ */
-#endif
-/*
- * Local variables:
- * c-indentation-style: "K&R"
- * c-basic-offset: 8
- * tab-width: 8
- * fill-column: 80
- * scroll-step: 1
- * End:
- */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
deleted file mode 100644
index 392793582956..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ /dev/null
@@ -1,135 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LIBCFS_LIBCFS_H__
-#define __LIBCFS_LIBCFS_H__
-
-#include <linux/gfp.h>
-#include <linux/list.h>
-
-#include <uapi/linux/lnet/libcfs_ioctl.h>
-#include <linux/libcfs/linux/libcfs.h>
-#include <linux/libcfs/libcfs_debug.h>
-#include <linux/libcfs/libcfs_private.h>
-#include <linux/libcfs/libcfs_cpu.h>
-#include <linux/libcfs/libcfs_prim.h>
-#include <linux/libcfs/libcfs_time.h>
-#include <linux/libcfs/libcfs_string.h>
-#include <linux/libcfs/libcfs_hash.h>
-#include <linux/libcfs/libcfs_fail.h>
-#include <linux/libcfs/curproc.h>
-
-#define LIBCFS_VERSION "0.7.0"
-
-#define LOWEST_BIT_SET(x) ((x) & ~((x) - 1))
-
-/*
- * Lustre Error Checksum: calculates checksum
- * of Hex number by XORing each bit.
- */
-#define LERRCHKSUM(hexnum) (((hexnum) & 0xf) ^ ((hexnum) >> 4 & 0xf) ^ \
- ((hexnum) >> 8 & 0xf))
-
-/* need both kernel and user-land acceptor */
-#define LNET_ACCEPTOR_MIN_RESERVED_PORT 512
-#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023
-
-/* Block all signals except for the @sigs */
-static inline void cfs_block_sigsinv(unsigned long sigs, sigset_t *old)
-{
- sigset_t new;
-
- siginitsetinv(&new, sigs);
- sigorsets(&new, &current->blocked, &new);
- sigprocmask(SIG_BLOCK, &new, old);
-}
-
-static inline void
-cfs_restore_sigs(sigset_t *old)
-{
- sigprocmask(SIG_SETMASK, old, NULL);
-}
-
-struct libcfs_ioctl_handler {
- struct list_head item;
- int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
-};
-
-#define DECLARE_IOCTL_HANDLER(ident, func) \
- struct libcfs_ioctl_handler ident = { \
- .item = LIST_HEAD_INIT(ident.item), \
- .handle_ioctl = func \
- }
-
-int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
-int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
-
-int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
- const struct libcfs_ioctl_hdr __user *uparam);
-int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
-int libcfs_ioctl(unsigned long cmd, void __user *arg);
-
-/* container_of depends on "likely" which is defined in libcfs_private.h */
-static inline void *__container_of(void *ptr, unsigned long shift)
-{
- if (IS_ERR_OR_NULL(ptr))
- return ptr;
- return (char *)ptr - shift;
-}
-
-#define container_of0(ptr, type, member) \
- ((type *)__container_of((void *)(ptr), offsetof(type, member)))
-
-#define _LIBCFS_H
-
-extern struct miscdevice libcfs_dev;
-/**
- * The path of debug log dump upcall script.
- */
-extern char lnet_debug_log_upcall[1024];
-
-extern struct workqueue_struct *cfs_rehash_wq;
-
-struct lnet_debugfs_symlink_def {
- char *name;
- char *target;
-};
-
-void lustre_insert_debugfs(struct ctl_table *table,
- const struct lnet_debugfs_symlink_def *symlinks);
-int lprocfs_call_handler(void *data, int write, loff_t *ppos,
- void __user *buffer, size_t *lenp,
- int (*handler)(void *data, int write, loff_t pos,
- void __user *buffer, int len));
-
-#endif /* _LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
deleted file mode 100644
index 61bce77fddd6..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ /dev/null
@@ -1,295 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2012, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_cpu.h
- *
- * CPU partition
- * . CPU partition is virtual processing unit
- *
- * . CPU partition can present 1-N cores, or 1-N NUMA nodes,
- * in other words, CPU partition is a processors pool.
- *
- * CPU Partition Table (CPT)
- * . a set of CPU partitions
- *
- * . There are two modes for CPT: CFS_CPU_MODE_NUMA and CFS_CPU_MODE_SMP
- *
- * . User can specify total number of CPU partitions while creating a
- * CPT, ID of CPU partition is always start from 0.
- *
- * Example: if there are 8 cores on the system, while creating a CPT
- * with cpu_npartitions=4:
- * core[0, 1] = partition[0], core[2, 3] = partition[1]
- * core[4, 5] = partition[2], core[6, 7] = partition[3]
- *
- * cpu_npartitions=1:
- * core[0, 1, ... 7] = partition[0]
- *
- * . User can also specify CPU partitions by string pattern
- *
- * Examples: cpu_partitions="0[0,1], 1[2,3]"
- * cpu_partitions="N 0[0-3], 1[4-8]"
- *
- * The first character "N" means following numbers are numa ID
- *
- * . NUMA allocators, CPU affinity threads are built over CPU partitions,
- * instead of HW CPUs or HW nodes.
- *
- * . By default, Lustre modules should refer to the global cfs_cpt_table,
- * instead of accessing HW CPUs directly, so concurrency of Lustre can be
- * configured by cpu_npartitions of the global cfs_cpt_table
- *
- * . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
- * same way as 2.2 or earlier versions
- *
- * Author: liang@whamcloud.com
- */
-
-#ifndef __LIBCFS_CPU_H__
-#define __LIBCFS_CPU_H__
-
-/* any CPU partition */
-#define CFS_CPT_ANY (-1)
-
-#ifdef CONFIG_SMP
-/**
- * return cpumask of CPU partition \a cpt
- */
-cpumask_var_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
-/**
- * print string information of cpt-table
- */
-int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len);
-#else /* !CONFIG_SMP */
-struct cfs_cpt_table {
- /* # of CPU partitions */
- int ctb_nparts;
- /* cpu mask */
- cpumask_t ctb_mask;
- /* node mask */
- nodemask_t ctb_nodemask;
- /* version */
- u64 ctb_version;
-};
-
-static inline cpumask_var_t *
-cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
-{
- return NULL;
-}
-
-static inline int
-cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
-{
- return 0;
-}
-#endif /* CONFIG_SMP */
-
-extern struct cfs_cpt_table *cfs_cpt_table;
-
-/**
- * destroy a CPU partition table
- */
-void cfs_cpt_table_free(struct cfs_cpt_table *cptab);
-/**
- * create a cfs_cpt_table with \a ncpt number of partitions
- */
-struct cfs_cpt_table *cfs_cpt_table_alloc(unsigned int ncpt);
-/**
- * return total number of CPU partitions in \a cptab
- */
-int
-cfs_cpt_number(struct cfs_cpt_table *cptab);
-/**
- * return number of HW cores or hyper-threadings in a CPU partition \a cpt
- */
-int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt);
-/**
- * is there any online CPU in CPU partition \a cpt
- */
-int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt);
-/**
- * return nodemask of CPU partition \a cpt
- */
-nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt);
-/**
- * shadow current HW processor ID to CPU-partition ID of \a cptab
- */
-int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap);
-/**
- * shadow HW processor ID \a CPU to CPU-partition ID by \a cptab
- */
-int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu);
-/**
- * bind current thread on a CPU-partition \a cpt of \a cptab
- */
-int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt);
-/**
- * add \a cpu to CPU partition @cpt of \a cptab, return 1 for success,
- * otherwise 0 is returned
- */
-int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
-/**
- * remove \a cpu from CPU partition \a cpt of \a cptab
- */
-void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
-/**
- * add all cpus in \a mask to CPU partition \a cpt
- * return 1 if successfully set all CPUs, otherwise return 0
- */
-int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab,
- int cpt, cpumask_t *mask);
-/**
- * remove all cpus in \a mask from CPU partition \a cpt
- */
-void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab,
- int cpt, cpumask_t *mask);
-/**
- * add all cpus in NUMA node \a node to CPU partition \a cpt
- * return 1 if successfully set all CPUs, otherwise return 0
- */
-int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node);
-/**
- * remove all cpus in NUMA node \a node from CPU partition \a cpt
- */
-void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node);
-
-/**
- * add all cpus in node mask \a mask to CPU partition \a cpt
- * return 1 if successfully set all CPUs, otherwise return 0
- */
-int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab,
- int cpt, nodemask_t *mask);
-/**
- * remove all cpus in node mask \a mask from CPU partition \a cpt
- */
-void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab,
- int cpt, nodemask_t *mask);
-/**
- * unset all cpus for CPU partition \a cpt
- */
-void cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt);
-/**
- * convert partition id \a cpt to numa node id, if there are more than one
- * nodes in this partition, it might return a different node id each time.
- */
-int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
-
-/**
- * return number of HTs in the same core of \a cpu
- */
-int cfs_cpu_ht_nsiblings(int cpu);
-
-/*
- * allocate per-cpu-partition data, returned value is an array of pointers,
- * variable can be indexed by CPU ID.
- * cptab != NULL: size of array is number of CPU partitions
- * cptab == NULL: size of array is number of HW cores
- */
-void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
-/*
- * destroy per-cpu-partition variable
- */
-void cfs_percpt_free(void *vars);
-int cfs_percpt_number(void *vars);
-
-#define cfs_percpt_for_each(var, i, vars) \
- for (i = 0; i < cfs_percpt_number(vars) && \
- ((var) = (vars)[i]) != NULL; i++)
-
-/*
- * percpu partition lock
- *
- * There are some use-cases like this in Lustre:
- * . each CPU partition has it's own private data which is frequently changed,
- * and mostly by the local CPU partition.
- * . all CPU partitions share some global data, these data are rarely changed.
- *
- * LNet is typical example.
- * CPU partition lock is designed for this kind of use-cases:
- * . each CPU partition has it's own private lock
- * . change on private data just needs to take the private lock
- * . read on shared data just needs to take _any_ of private locks
- * . change on shared data needs to take _all_ private locks,
- * which is slow and should be really rare.
- */
-enum {
- CFS_PERCPT_LOCK_EX = -1, /* negative */
-};
-
-struct cfs_percpt_lock {
- /* cpu-partition-table for this lock */
- struct cfs_cpt_table *pcl_cptab;
- /* exclusively locked */
- unsigned int pcl_locked;
- /* private lock table */
- spinlock_t **pcl_locks;
-};
-
-/* return number of private locks */
-#define cfs_percpt_lock_num(pcl) cfs_cpt_number(pcl->pcl_cptab)
-
-/*
- * create a cpu-partition lock based on CPU partition table \a cptab,
- * each private lock has extra \a psize bytes padding data
- */
-struct cfs_percpt_lock *cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
- struct lock_class_key *keys);
-/* destroy a cpu-partition lock */
-void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
-
-/* lock private lock \a index of \a pcl */
-void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
-
-/* unlock private lock \a index of \a pcl */
-void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
-
-#define CFS_PERCPT_LOCK_KEYS 256
-
-/* NB: don't allocate keys dynamically, lockdep needs them to be in ".data" */
-#define cfs_percpt_lock_alloc(cptab) \
-({ \
- static struct lock_class_key ___keys[CFS_PERCPT_LOCK_KEYS]; \
- struct cfs_percpt_lock *___lk; \
- \
- if (cfs_cpt_number(cptab) > CFS_PERCPT_LOCK_KEYS) \
- ___lk = cfs_percpt_lock_create(cptab, NULL); \
- else \
- ___lk = cfs_percpt_lock_create(cptab, ___keys); \
- ___lk; \
-})
-
-/**
- * iterate over all CPU partitions in \a cptab
- */
-#define cfs_cpt_for_each(i, cptab) \
- for (i = 0; i < cfs_cpt_number(cptab); i++)
-
-int cfs_cpu_init(void);
-void cfs_cpu_fini(void);
-
-#endif /* __LIBCFS_CPU_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
deleted file mode 100644
index 3a72117140ed..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ /dev/null
@@ -1,205 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/*
- * Copyright 2012 Xyratex Technology Limited
- */
-
-#ifndef _LIBCFS_CRYPTO_H
-#define _LIBCFS_CRYPTO_H
-
-struct cfs_crypto_hash_type {
- char *cht_name; /*< hash algorithm name, equal to
- * format name for crypto api
- */
- unsigned int cht_key; /*< init key by default (valid for
- * 4 bytes context like crc32, adler
- */
- unsigned int cht_size; /**< hash digest size */
-};
-
-enum cfs_crypto_hash_alg {
- CFS_HASH_ALG_NULL = 0,
- CFS_HASH_ALG_ADLER32,
- CFS_HASH_ALG_CRC32,
- CFS_HASH_ALG_MD5,
- CFS_HASH_ALG_SHA1,
- CFS_HASH_ALG_SHA256,
- CFS_HASH_ALG_SHA384,
- CFS_HASH_ALG_SHA512,
- CFS_HASH_ALG_CRC32C,
- CFS_HASH_ALG_MAX,
- CFS_HASH_ALG_UNKNOWN = 0xff
-};
-
-static struct cfs_crypto_hash_type hash_types[] = {
- [CFS_HASH_ALG_NULL] = {
- .cht_name = "null",
- .cht_key = 0,
- .cht_size = 0
- },
- [CFS_HASH_ALG_ADLER32] = {
- .cht_name = "adler32",
- .cht_key = 1,
- .cht_size = 4
- },
- [CFS_HASH_ALG_CRC32] = {
- .cht_name = "crc32",
- .cht_key = ~0,
- .cht_size = 4
- },
- [CFS_HASH_ALG_CRC32C] = {
- .cht_name = "crc32c",
- .cht_key = ~0,
- .cht_size = 4
- },
- [CFS_HASH_ALG_MD5] = {
- .cht_name = "md5",
- .cht_key = 0,
- .cht_size = 16
- },
- [CFS_HASH_ALG_SHA1] = {
- .cht_name = "sha1",
- .cht_key = 0,
- .cht_size = 20
- },
- [CFS_HASH_ALG_SHA256] = {
- .cht_name = "sha256",
- .cht_key = 0,
- .cht_size = 32
- },
- [CFS_HASH_ALG_SHA384] = {
- .cht_name = "sha384",
- .cht_key = 0,
- .cht_size = 48
- },
- [CFS_HASH_ALG_SHA512] = {
- .cht_name = "sha512",
- .cht_key = 0,
- .cht_size = 64
- },
- [CFS_HASH_ALG_MAX] = {
- .cht_name = NULL,
- .cht_key = 0,
- .cht_size = 64
- },
-};
-
-/* Maximum size of hash_types[].cht_size */
-#define CFS_CRYPTO_HASH_DIGESTSIZE_MAX 64
-
-/**
- * Return hash algorithm information for the specified algorithm identifier
- *
- * Hash information includes algorithm name, initial seed, hash size.
- *
- * \retval cfs_crypto_hash_type for valid ID (CFS_HASH_ALG_*)
- * \retval NULL for unknown algorithm identifier
- */
-static inline const struct cfs_crypto_hash_type *
-cfs_crypto_hash_type(enum cfs_crypto_hash_alg hash_alg)
-{
- struct cfs_crypto_hash_type *ht;
-
- if (hash_alg < CFS_HASH_ALG_MAX) {
- ht = &hash_types[hash_alg];
- if (ht->cht_name)
- return ht;
- }
- return NULL;
-}
-
-/**
- * Return hash name for hash algorithm identifier
- *
- * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*)
- *
- * \retval string name of known hash algorithm
- * \retval "unknown" if hash algorithm is unknown
- */
-static inline const char *
-cfs_crypto_hash_name(enum cfs_crypto_hash_alg hash_alg)
-{
- const struct cfs_crypto_hash_type *ht;
-
- ht = cfs_crypto_hash_type(hash_alg);
- if (ht)
- return ht->cht_name;
- return "unknown";
-}
-
-/**
- * Return digest size for hash algorithm type
- *
- * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*)
- *
- * \retval hash algorithm digest size in bytes
- * \retval 0 if hash algorithm type is unknown
- */
-static inline int cfs_crypto_hash_digestsize(enum cfs_crypto_hash_alg hash_alg)
-{
- const struct cfs_crypto_hash_type *ht;
-
- ht = cfs_crypto_hash_type(hash_alg);
- if (ht)
- return ht->cht_size;
- return 0;
-}
-
-/**
- * Find hash algorithm ID for the specified algorithm name
- *
- * \retval hash algorithm ID for valid ID (CFS_HASH_ALG_*)
- * \retval CFS_HASH_ALG_UNKNOWN for unknown algorithm name
- */
-static inline unsigned char cfs_crypto_hash_alg(const char *algname)
-{
- enum cfs_crypto_hash_alg hash_alg;
-
- for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
- if (!strcmp(hash_types[hash_alg].cht_name, algname))
- return hash_alg;
-
- return CFS_HASH_ALG_UNKNOWN;
-}
-
-int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
- const void *buf, unsigned int buf_len,
- unsigned char *key, unsigned int key_len,
- unsigned char *hash, unsigned int *hash_len);
-
-struct ahash_request *
-cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
- unsigned char *key, unsigned int key_len);
-int cfs_crypto_hash_update_page(struct ahash_request *desc,
- struct page *page, unsigned int offset,
- unsigned int len);
-int cfs_crypto_hash_update(struct ahash_request *desc, const void *buf,
- unsigned int buf_len);
-int cfs_crypto_hash_final(struct ahash_request *desc,
- unsigned char *hash, unsigned int *hash_len);
-int cfs_crypto_register(void);
-void cfs_crypto_unregister(void);
-int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg);
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
deleted file mode 100644
index 9290a19429e7..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ /dev/null
@@ -1,167 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_debug.h
- *
- * Debug messages and assertions
- *
- */
-
-#ifndef __LIBCFS_DEBUG_H__
-#define __LIBCFS_DEBUG_H__
-
-#include <uapi/linux/lnet/libcfs_debug.h>
-
-/*
- * Debugging
- */
-extern unsigned int libcfs_subsystem_debug;
-extern unsigned int libcfs_stack;
-extern unsigned int libcfs_debug;
-extern unsigned int libcfs_printk;
-extern unsigned int libcfs_console_ratelimit;
-extern unsigned int libcfs_console_max_delay;
-extern unsigned int libcfs_console_min_delay;
-extern unsigned int libcfs_console_backoff;
-extern unsigned int libcfs_debug_binary;
-extern char libcfs_debug_file_path_arr[PATH_MAX];
-
-int libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys);
-int libcfs_debug_str2mask(int *mask, const char *str, int is_subsys);
-
-/* Has there been an LBUG? */
-extern unsigned int libcfs_catastrophe;
-extern unsigned int libcfs_panic_on_lbug;
-
-#ifndef DEBUG_SUBSYSTEM
-# define DEBUG_SUBSYSTEM S_UNDEFINED
-#endif
-
-#define CDEBUG_DEFAULT_MAX_DELAY (600 * HZ) /* jiffies */
-#define CDEBUG_DEFAULT_MIN_DELAY ((HZ + 1) / 2) /* jiffies */
-#define CDEBUG_DEFAULT_BACKOFF 2
-struct cfs_debug_limit_state {
- unsigned long cdls_next;
- unsigned int cdls_delay;
- int cdls_count;
-};
-
-struct libcfs_debug_msg_data {
- const char *msg_file;
- const char *msg_fn;
- int msg_subsys;
- int msg_line;
- int msg_mask;
- struct cfs_debug_limit_state *msg_cdls;
-};
-
-#define LIBCFS_DEBUG_MSG_DATA_INIT(data, mask, cdls) \
-do { \
- (data)->msg_subsys = DEBUG_SUBSYSTEM; \
- (data)->msg_file = __FILE__; \
- (data)->msg_fn = __func__; \
- (data)->msg_line = __LINE__; \
- (data)->msg_cdls = (cdls); \
- (data)->msg_mask = (mask); \
-} while (0)
-
-#define LIBCFS_DEBUG_MSG_DATA_DECL(dataname, mask, cdls) \
- static struct libcfs_debug_msg_data dataname = { \
- .msg_subsys = DEBUG_SUBSYSTEM, \
- .msg_file = __FILE__, \
- .msg_fn = __func__, \
- .msg_line = __LINE__, \
- .msg_cdls = (cdls) }; \
- dataname.msg_mask = (mask)
-
-/**
- * Filters out logging messages based on mask and subsystem.
- */
-static inline int cfs_cdebug_show(unsigned int mask, unsigned int subsystem)
-{
- return mask & D_CANTMASK ||
- ((libcfs_debug & mask) && (libcfs_subsystem_debug & subsystem));
-}
-
-#define __CDEBUG(cdls, mask, format, ...) \
-do { \
- static struct libcfs_debug_msg_data msgdata; \
- \
- CFS_CHECK_STACK(&msgdata, mask, cdls); \
- \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_INIT(&msgdata, mask, cdls); \
- libcfs_debug_msg(&msgdata, format, ## __VA_ARGS__); \
- } \
-} while (0)
-
-#define CDEBUG(mask, format, ...) __CDEBUG(NULL, mask, format, ## __VA_ARGS__)
-
-#define CDEBUG_LIMIT(mask, format, ...) \
-do { \
- static struct cfs_debug_limit_state cdls; \
- \
- __CDEBUG(&cdls, mask, format, ## __VA_ARGS__); \
-} while (0)
-
-#define CWARN(format, ...) CDEBUG_LIMIT(D_WARNING, format, ## __VA_ARGS__)
-#define CERROR(format, ...) CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)
-#define CNETERR(format, a...) CDEBUG_LIMIT(D_NETERROR, format, ## a)
-#define CEMERG(format, ...) CDEBUG_LIMIT(D_EMERG, format, ## __VA_ARGS__)
-
-#define LCONSOLE(mask, format, ...) CDEBUG(D_CONSOLE | (mask), format, ## __VA_ARGS__)
-#define LCONSOLE_INFO(format, ...) CDEBUG_LIMIT(D_CONSOLE, format, ## __VA_ARGS__)
-#define LCONSOLE_WARN(format, ...) CDEBUG_LIMIT(D_CONSOLE | D_WARNING, format, ## __VA_ARGS__)
-#define LCONSOLE_ERROR_MSG(errnum, format, ...) CDEBUG_LIMIT(D_CONSOLE | D_ERROR, \
- "%x-%x: " format, errnum, LERRCHKSUM(errnum), ## __VA_ARGS__)
-#define LCONSOLE_ERROR(format, ...) LCONSOLE_ERROR_MSG(0x00, format, ## __VA_ARGS__)
-
-#define LCONSOLE_EMERG(format, ...) CDEBUG(D_CONSOLE | D_EMERG, format, ## __VA_ARGS__)
-
-int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
- const char *format1, ...)
- __printf(2, 3);
-
-int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
- const char *format1,
- va_list args, const char *format2, ...)
- __printf(4, 5);
-
-/* other external symbols that tracefile provides: */
-int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
- const char __user *usr_buffer, int usr_buffer_nob);
-int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
- const char *knl_buffer, char *append);
-
-#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log"
-
-#endif /* __LIBCFS_DEBUG_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
deleted file mode 100644
index d6fc3164e7e7..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ /dev/null
@@ -1,191 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Oracle Corporation, Inc.
- */
-
-#ifndef _LIBCFS_FAIL_H
-#define _LIBCFS_FAIL_H
-
-extern unsigned long cfs_fail_loc;
-extern unsigned int cfs_fail_val;
-extern int cfs_fail_err;
-
-extern wait_queue_head_t cfs_race_waitq;
-extern int cfs_race_state;
-
-int __cfs_fail_check_set(u32 id, u32 value, int set);
-int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set);
-
-enum {
- CFS_FAIL_LOC_NOSET = 0,
- CFS_FAIL_LOC_ORSET = 1,
- CFS_FAIL_LOC_RESET = 2,
- CFS_FAIL_LOC_VALUE = 3
-};
-
-/* Failure injection control */
-#define CFS_FAIL_MASK_SYS 0x0000FF00
-#define CFS_FAIL_MASK_LOC (0x000000FF | CFS_FAIL_MASK_SYS)
-
-#define CFS_FAILED_BIT 30
-/* CFS_FAILED is 0x40000000 */
-#define CFS_FAILED BIT(CFS_FAILED_BIT)
-
-#define CFS_FAIL_ONCE_BIT 31
-/* CFS_FAIL_ONCE is 0x80000000 */
-#define CFS_FAIL_ONCE BIT(CFS_FAIL_ONCE_BIT)
-
-/* The following flags aren't made to be combined */
-#define CFS_FAIL_SKIP 0x20000000 /* skip N times then fail */
-#define CFS_FAIL_SOME 0x10000000 /* only fail N times */
-#define CFS_FAIL_RAND 0x08000000 /* fail 1/N of the times */
-#define CFS_FAIL_USR1 0x04000000 /* user flag */
-
-#define CFS_FAULT 0x02000000 /* match any CFS_FAULT_CHECK */
-
-static inline bool CFS_FAIL_PRECHECK(u32 id)
-{
- return cfs_fail_loc &&
- ((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) ||
- (cfs_fail_loc & id & CFS_FAULT));
-}
-
-static inline int cfs_fail_check_set(u32 id, u32 value,
- int set, int quiet)
-{
- int ret = 0;
-
- if (unlikely(CFS_FAIL_PRECHECK(id))) {
- ret = __cfs_fail_check_set(id, value, set);
- if (ret) {
- if (quiet) {
- CDEBUG(D_INFO, "*** cfs_fail_loc=%x, val=%u***\n",
- id, value);
- } else {
- LCONSOLE_INFO("*** cfs_fail_loc=%x, val=%u***\n",
- id, value);
- }
- }
- }
-
- return ret;
-}
-
-/* If id hit cfs_fail_loc, return 1, otherwise return 0 */
-#define CFS_FAIL_CHECK(id) \
- cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 0)
-#define CFS_FAIL_CHECK_QUIET(id) \
- cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 1)
-
-/*
- * If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1,
- * otherwise return 0
- */
-#define CFS_FAIL_CHECK_VALUE(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 0)
-#define CFS_FAIL_CHECK_VALUE_QUIET(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 1)
-
-/*
- * If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1,
- * otherwise return 0
- */
-#define CFS_FAIL_CHECK_ORSET(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 0)
-#define CFS_FAIL_CHECK_ORSET_QUIET(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 1)
-
-/*
- * If id hit cfs_fail_loc, cfs_fail_loc = value and return 1,
- * otherwise return 0
- */
-#define CFS_FAIL_CHECK_RESET(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 0)
-#define CFS_FAIL_CHECK_RESET_QUIET(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 1)
-
-static inline int cfs_fail_timeout_set(u32 id, u32 value, int ms, int set)
-{
- if (unlikely(CFS_FAIL_PRECHECK(id)))
- return __cfs_fail_timeout_set(id, value, ms, set);
- return 0;
-}
-
-/* If id hit cfs_fail_loc, sleep for seconds or milliseconds */
-#define CFS_FAIL_TIMEOUT(id, secs) \
- cfs_fail_timeout_set(id, 0, secs * 1000, CFS_FAIL_LOC_NOSET)
-
-#define CFS_FAIL_TIMEOUT_MS(id, ms) \
- cfs_fail_timeout_set(id, 0, ms, CFS_FAIL_LOC_NOSET)
-
-/*
- * If id hit cfs_fail_loc, cfs_fail_loc |= value and
- * sleep seconds or milliseconds
- */
-#define CFS_FAIL_TIMEOUT_ORSET(id, value, secs) \
- cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_ORSET)
-
-#define CFS_FAIL_TIMEOUT_RESET(id, value, secs) \
- cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_RESET)
-
-#define CFS_FAIL_TIMEOUT_MS_ORSET(id, value, ms) \
- cfs_fail_timeout_set(id, value, ms, CFS_FAIL_LOC_ORSET)
-
-#define CFS_FAULT_CHECK(id) \
- CFS_FAIL_CHECK(CFS_FAULT | (id))
-
-/*
- * The idea here is to synchronise two threads to force a race. The
- * first thread that calls this with a matching fail_loc is put to
- * sleep. The next thread that calls with the same fail_loc wakes up
- * the first and continues.
- */
-static inline void cfs_race(u32 id)
-{
- if (CFS_FAIL_PRECHECK(id)) {
- if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
- int rc;
-
- cfs_race_state = 0;
- CERROR("cfs_race id %x sleeping\n", id);
- rc = wait_event_interruptible(cfs_race_waitq,
- !!cfs_race_state);
- CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
- } else {
- CERROR("cfs_fail_race id %x waking\n", id);
- cfs_race_state = 1;
- wake_up(&cfs_race_waitq);
- }
- }
-}
-
-#define CFS_RACE(id) cfs_race(id)
-
-#endif /* _LIBCFS_FAIL_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
deleted file mode 100644
index 0506f1d45757..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ /dev/null
@@ -1,866 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_hash.h
- *
- * Hashing routines
- *
- */
-
-#ifndef __LIBCFS_HASH_H__
-#define __LIBCFS_HASH_H__
-
-#include <linux/hash.h>
-
-/*
- * Knuth recommends primes in approximately golden ratio to the maximum
- * integer representable by a machine word for multiplicative hashing.
- * Chuck Lever verified the effectiveness of this technique:
- * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
- *
- * These primes are chosen to be bit-sparse, that is operations on
- * them can use shifts and additions instead of multiplications for
- * machines where multiplications are slow.
- */
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
-/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
-
-/** disable debug */
-#define CFS_HASH_DEBUG_NONE 0
-/*
- * record hash depth and output to console when it's too deep,
- * computing overhead is low but consume more memory
- */
-#define CFS_HASH_DEBUG_1 1
-/** expensive, check key validation */
-#define CFS_HASH_DEBUG_2 2
-
-#define CFS_HASH_DEBUG_LEVEL CFS_HASH_DEBUG_NONE
-
-struct cfs_hash_ops;
-struct cfs_hash_lock_ops;
-struct cfs_hash_hlist_ops;
-
-union cfs_hash_lock {
- rwlock_t rw; /**< rwlock */
- spinlock_t spin; /**< spinlock */
-};
-
-/**
- * cfs_hash_bucket is a container of:
- * - lock, counter ...
- * - array of hash-head starting from hsb_head[0], hash-head can be one of
- * . struct cfs_hash_head
- * . struct cfs_hash_head_dep
- * . struct cfs_hash_dhead
- * . struct cfs_hash_dhead_dep
- * which depends on requirement of user
- * - some extra bytes (caller can require it while creating hash)
- */
-struct cfs_hash_bucket {
- union cfs_hash_lock hsb_lock; /**< bucket lock */
- u32 hsb_count; /**< current entries */
- u32 hsb_version; /**< change version */
- unsigned int hsb_index; /**< index of bucket */
- int hsb_depmax; /**< max depth on bucket */
- long hsb_head[0]; /**< hash-head array */
-};
-
-/**
- * cfs_hash bucket descriptor, it's normally in stack of caller
- */
-struct cfs_hash_bd {
- /* address of bucket */
- struct cfs_hash_bucket *bd_bucket;
- /* offset in bucket */
- unsigned int bd_offset;
-};
-
-#define CFS_HASH_NAME_LEN 16 /**< default name length */
-#define CFS_HASH_BIGNAME_LEN 64 /**< bigname for param tree */
-
-#define CFS_HASH_BKT_BITS 3 /**< default bits of bucket */
-#define CFS_HASH_BITS_MAX 30 /**< max bits of bucket */
-#define CFS_HASH_BITS_MIN CFS_HASH_BKT_BITS
-
-/**
- * common hash attributes.
- */
-enum cfs_hash_tag {
- /**
- * don't need any lock, caller will protect operations with it's
- * own lock. With this flag:
- * . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK
- * will be ignored.
- * . Some functions will be disabled with this flag, i.e:
- * cfs_hash_for_each_empty, cfs_hash_rehash
- */
- CFS_HASH_NO_LOCK = BIT(0),
- /** no bucket lock, use one spinlock to protect the whole hash */
- CFS_HASH_NO_BKTLOCK = BIT(1),
- /** rwlock to protect bucket */
- CFS_HASH_RW_BKTLOCK = BIT(2),
- /** spinlock to protect bucket */
- CFS_HASH_SPIN_BKTLOCK = BIT(3),
- /** always add new item to tail */
- CFS_HASH_ADD_TAIL = BIT(4),
- /** hash-table doesn't have refcount on item */
- CFS_HASH_NO_ITEMREF = BIT(5),
- /** big name for param-tree */
- CFS_HASH_BIGNAME = BIT(6),
- /** track global count */
- CFS_HASH_COUNTER = BIT(7),
- /** rehash item by new key */
- CFS_HASH_REHASH_KEY = BIT(8),
- /** Enable dynamic hash resizing */
- CFS_HASH_REHASH = BIT(9),
- /** can shrink hash-size */
- CFS_HASH_SHRINK = BIT(10),
- /** assert hash is empty on exit */
- CFS_HASH_ASSERT_EMPTY = BIT(11),
- /** record hlist depth */
- CFS_HASH_DEPTH = BIT(12),
- /**
- * rehash is always scheduled in a different thread, so current
- * change on hash table is non-blocking
- */
- CFS_HASH_NBLK_CHANGE = BIT(13),
- /**
- * NB, we typed hs_flags as u16, please change it
- * if you need to extend >=16 flags
- */
-};
-
-/** most used attributes */
-#define CFS_HASH_DEFAULT (CFS_HASH_RW_BKTLOCK | \
- CFS_HASH_COUNTER | CFS_HASH_REHASH)
-
-/**
- * cfs_hash is a hash-table implementation for general purpose, it can support:
- * . two refcount modes
- * hash-table with & without refcount
- * . four lock modes
- * nolock, one-spinlock, rw-bucket-lock, spin-bucket-lock
- * . general operations
- * lookup, add(add_tail or add_head), delete
- * . rehash
- * grows or shrink
- * . iteration
- * locked iteration and unlocked iteration
- * . bigname
- * support long name hash
- * . debug
- * trace max searching depth
- *
- * Rehash:
- * When the htable grows or shrinks, a separate task (cfs_hash_rehash_worker)
- * is spawned to handle the rehash in the background, it's possible that other
- * processes can concurrently perform additions, deletions, and lookups
- * without being blocked on rehash completion, because rehash will release
- * the global wrlock for each bucket.
- *
- * rehash and iteration can't run at the same time because it's too tricky
- * to keep both of them safe and correct.
- * As they are relatively rare operations, so:
- * . if iteration is in progress while we try to launch rehash, then
- * it just giveup, iterator will launch rehash at the end.
- * . if rehash is in progress while we try to iterate the hash table,
- * then we just wait (shouldn't be very long time), anyway, nobody
- * should expect iteration of whole hash-table to be non-blocking.
- *
- * During rehashing, a (key,object) pair may be in one of two buckets,
- * depending on whether the worker task has yet to transfer the object
- * to its new location in the table. Lookups and deletions need to search both
- * locations; additions must take care to only insert into the new bucket.
- */
-
-struct cfs_hash {
- /**
- * serialize with rehash, or serialize all operations if
- * the hash-table has CFS_HASH_NO_BKTLOCK
- */
- union cfs_hash_lock hs_lock;
- /** hash operations */
- struct cfs_hash_ops *hs_ops;
- /** hash lock operations */
- struct cfs_hash_lock_ops *hs_lops;
- /** hash list operations */
- struct cfs_hash_hlist_ops *hs_hops;
- /** hash buckets-table */
- struct cfs_hash_bucket **hs_buckets;
- /** total number of items on this hash-table */
- atomic_t hs_count;
- /** hash flags, see cfs_hash_tag for detail */
- u16 hs_flags;
- /** # of extra-bytes for bucket, for user saving extended attributes */
- u16 hs_extra_bytes;
- /** wants to iterate */
- u8 hs_iterating;
- /** hash-table is dying */
- u8 hs_exiting;
- /** current hash bits */
- u8 hs_cur_bits;
- /** min hash bits */
- u8 hs_min_bits;
- /** max hash bits */
- u8 hs_max_bits;
- /** bits for rehash */
- u8 hs_rehash_bits;
- /** bits for each bucket */
- u8 hs_bkt_bits;
- /** resize min threshold */
- u16 hs_min_theta;
- /** resize max threshold */
- u16 hs_max_theta;
- /** resize count */
- u32 hs_rehash_count;
- /** # of iterators (caller of cfs_hash_for_each_*) */
- u32 hs_iterators;
- /** rehash workitem */
- struct work_struct hs_rehash_work;
- /** refcount on this hash table */
- atomic_t hs_refcount;
- /** rehash buckets-table */
- struct cfs_hash_bucket **hs_rehash_buckets;
-#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
- /** serialize debug members */
- spinlock_t hs_dep_lock;
- /** max depth */
- unsigned int hs_dep_max;
- /** id of the deepest bucket */
- unsigned int hs_dep_bkt;
- /** offset in the deepest bucket */
- unsigned int hs_dep_off;
- /** bits when we found the max depth */
- unsigned int hs_dep_bits;
- /** workitem to output max depth */
- struct work_struct hs_dep_work;
-#endif
- /** name of htable */
- char hs_name[0];
-};
-
-struct cfs_hash_lock_ops {
- /** lock the hash table */
- void (*hs_lock)(union cfs_hash_lock *lock, int exclusive);
- /** unlock the hash table */
- void (*hs_unlock)(union cfs_hash_lock *lock, int exclusive);
- /** lock the hash bucket */
- void (*hs_bkt_lock)(union cfs_hash_lock *lock, int exclusive);
- /** unlock the hash bucket */
- void (*hs_bkt_unlock)(union cfs_hash_lock *lock, int exclusive);
-};
-
-struct cfs_hash_hlist_ops {
- /** return hlist_head of hash-head of @bd */
- struct hlist_head *(*hop_hhead)(struct cfs_hash *hs,
- struct cfs_hash_bd *bd);
- /** return hash-head size */
- int (*hop_hhead_size)(struct cfs_hash *hs);
- /** add @hnode to hash-head of @bd */
- int (*hop_hnode_add)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode);
- /** remove @hnode from hash-head of @bd */
- int (*hop_hnode_del)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode);
-};
-
-struct cfs_hash_ops {
- /** return hashed value from @key */
- unsigned int (*hs_hash)(struct cfs_hash *hs, const void *key,
- unsigned int mask);
- /** return key address of @hnode */
- void * (*hs_key)(struct hlist_node *hnode);
- /** copy key from @hnode to @key */
- void (*hs_keycpy)(struct hlist_node *hnode, void *key);
- /**
- * compare @key with key of @hnode
- * returns 1 on a match
- */
- int (*hs_keycmp)(const void *key, struct hlist_node *hnode);
- /** return object address of @hnode, i.e: container_of(...hnode) */
- void * (*hs_object)(struct hlist_node *hnode);
- /** get refcount of item, always called with holding bucket-lock */
- void (*hs_get)(struct cfs_hash *hs, struct hlist_node *hnode);
- /** release refcount of item */
- void (*hs_put)(struct cfs_hash *hs, struct hlist_node *hnode);
- /** release refcount of item, always called with holding bucket-lock */
- void (*hs_put_locked)(struct cfs_hash *hs,
- struct hlist_node *hnode);
- /** it's called before removing of @hnode */
- void (*hs_exit)(struct cfs_hash *hs, struct hlist_node *hnode);
-};
-
-/** total number of buckets in @hs */
-#define CFS_HASH_NBKT(hs) \
- BIT((hs)->hs_cur_bits - (hs)->hs_bkt_bits)
-
-/** total number of buckets in @hs while rehashing */
-#define CFS_HASH_RH_NBKT(hs) \
- BIT((hs)->hs_rehash_bits - (hs)->hs_bkt_bits)
-
-/** number of hlist for in bucket */
-#define CFS_HASH_BKT_NHLIST(hs) BIT((hs)->hs_bkt_bits)
-
-/** total number of hlist in @hs */
-#define CFS_HASH_NHLIST(hs) BIT((hs)->hs_cur_bits)
-
-/** total number of hlist in @hs while rehashing */
-#define CFS_HASH_RH_NHLIST(hs) BIT((hs)->hs_rehash_bits)
-
-static inline int
-cfs_hash_with_no_lock(struct cfs_hash *hs)
-{
- /* caller will serialize all operations for this hash-table */
- return hs->hs_flags & CFS_HASH_NO_LOCK;
-}
-
-static inline int
-cfs_hash_with_no_bktlock(struct cfs_hash *hs)
-{
- /* no bucket lock, one single lock to protect the hash-table */
- return hs->hs_flags & CFS_HASH_NO_BKTLOCK;
-}
-
-static inline int
-cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
-{
- /* rwlock to protect hash bucket */
- return hs->hs_flags & CFS_HASH_RW_BKTLOCK;
-}
-
-static inline int
-cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
-{
- /* spinlock to protect hash bucket */
- return hs->hs_flags & CFS_HASH_SPIN_BKTLOCK;
-}
-
-static inline int
-cfs_hash_with_add_tail(struct cfs_hash *hs)
-{
- return hs->hs_flags & CFS_HASH_ADD_TAIL;
-}
-
-static inline int
-cfs_hash_with_no_itemref(struct cfs_hash *hs)
-{
- /*
- * hash-table doesn't keep refcount on item,
- * item can't be removed from hash unless it's
- * ZERO refcount
- */
- return hs->hs_flags & CFS_HASH_NO_ITEMREF;
-}
-
-static inline int
-cfs_hash_with_bigname(struct cfs_hash *hs)
-{
- return hs->hs_flags & CFS_HASH_BIGNAME;
-}
-
-static inline int
-cfs_hash_with_counter(struct cfs_hash *hs)
-{
- return hs->hs_flags & CFS_HASH_COUNTER;
-}
-
-static inline int
-cfs_hash_with_rehash(struct cfs_hash *hs)
-{
- return hs->hs_flags & CFS_HASH_REHASH;
-}
-
-static inline int
-cfs_hash_with_rehash_key(struct cfs_hash *hs)
-{
- return hs->hs_flags & CFS_HASH_REHASH_KEY;
-}
-
-static inline int
-cfs_hash_with_shrink(struct cfs_hash *hs)
-{
- return hs->hs_flags & CFS_HASH_SHRINK;
-}
-
-static inline int
-cfs_hash_with_assert_empty(struct cfs_hash *hs)
-{
- return hs->hs_flags & CFS_HASH_ASSERT_EMPTY;
-}
-
-static inline int
-cfs_hash_with_depth(struct cfs_hash *hs)
-{
- return hs->hs_flags & CFS_HASH_DEPTH;
-}
-
-static inline int
-cfs_hash_with_nblk_change(struct cfs_hash *hs)
-{
- return hs->hs_flags & CFS_HASH_NBLK_CHANGE;
-}
-
-static inline int
-cfs_hash_is_exiting(struct cfs_hash *hs)
-{
- /* cfs_hash_destroy is called */
- return hs->hs_exiting;
-}
-
-static inline int
-cfs_hash_is_rehashing(struct cfs_hash *hs)
-{
- /* rehash is launched */
- return !!hs->hs_rehash_bits;
-}
-
-static inline int
-cfs_hash_is_iterating(struct cfs_hash *hs)
-{
- /* someone is calling cfs_hash_for_each_* */
- return hs->hs_iterating || hs->hs_iterators;
-}
-
-static inline int
-cfs_hash_bkt_size(struct cfs_hash *hs)
-{
- return offsetof(struct cfs_hash_bucket, hsb_head[0]) +
- hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
- hs->hs_extra_bytes;
-}
-
-static inline unsigned
-cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned int mask)
-{
- return hs->hs_ops->hs_hash(hs, key, mask);
-}
-
-static inline void *
-cfs_hash_key(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_key(hnode);
-}
-
-static inline void
-cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key)
-{
- if (hs->hs_ops->hs_keycpy)
- hs->hs_ops->hs_keycpy(hnode, key);
-}
-
-/**
- * Returns 1 on a match,
- */
-static inline int
-cfs_hash_keycmp(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_keycmp(key, hnode);
-}
-
-static inline void *
-cfs_hash_object(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_object(hnode);
-}
-
-static inline void
-cfs_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_get(hs, hnode);
-}
-
-static inline void
-cfs_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_put_locked(hs, hnode);
-}
-
-static inline void
-cfs_hash_put(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_put(hs, hnode);
-}
-
-static inline void
-cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- if (hs->hs_ops->hs_exit)
- hs->hs_ops->hs_exit(hs, hnode);
-}
-
-static inline void cfs_hash_lock(struct cfs_hash *hs, int excl)
-{
- hs->hs_lops->hs_lock(&hs->hs_lock, excl);
-}
-
-static inline void cfs_hash_unlock(struct cfs_hash *hs, int excl)
-{
- hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
-}
-
-static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs,
- atomic_t *condition)
-{
- LASSERT(cfs_hash_with_no_bktlock(hs));
- return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
-}
-
-static inline void cfs_hash_bd_lock(struct cfs_hash *hs,
- struct cfs_hash_bd *bd, int excl)
-{
- hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl);
-}
-
-static inline void cfs_hash_bd_unlock(struct cfs_hash *hs,
- struct cfs_hash_bd *bd, int excl)
-{
- hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl);
-}
-
-/**
- * operations on cfs_hash bucket (bd: bucket descriptor),
- * they are normally for hash-table without rehash
- */
-void cfs_hash_bd_get(struct cfs_hash *hs, const void *key,
- struct cfs_hash_bd *bd);
-
-static inline void
-cfs_hash_bd_get_and_lock(struct cfs_hash *hs, const void *key,
- struct cfs_hash_bd *bd, int excl)
-{
- cfs_hash_bd_get(hs, key, bd);
- cfs_hash_bd_lock(hs, bd, excl);
-}
-
-static inline unsigned
-cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
-}
-
-static inline void
-cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned int index,
- struct cfs_hash_bd *bd)
-{
- bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
- bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U);
-}
-
-static inline void *
-cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- return (void *)bd->bd_bucket +
- cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
-}
-
-static inline u32
-cfs_hash_bd_version_get(struct cfs_hash_bd *bd)
-{
- /* need hold cfs_hash_bd_lock */
- return bd->bd_bucket->hsb_version;
-}
-
-static inline u32
-cfs_hash_bd_count_get(struct cfs_hash_bd *bd)
-{
- /* need hold cfs_hash_bd_lock */
- return bd->bd_bucket->hsb_count;
-}
-
-static inline int
-cfs_hash_bd_depmax_get(struct cfs_hash_bd *bd)
-{
- return bd->bd_bucket->hsb_depmax;
-}
-
-static inline int
-cfs_hash_bd_compare(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
-{
- if (bd1->bd_bucket->hsb_index != bd2->bd_bucket->hsb_index)
- return bd1->bd_bucket->hsb_index - bd2->bd_bucket->hsb_index;
-
- if (bd1->bd_offset != bd2->bd_offset)
- return bd1->bd_offset - bd2->bd_offset;
-
- return 0;
-}
-
-void cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode);
-void cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode);
-void cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
- struct cfs_hash_bd *bd_new,
- struct hlist_node *hnode);
-
-static inline int
-cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- atomic_t *condition)
-{
- LASSERT(cfs_hash_with_spin_bktlock(hs));
- return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin);
-}
-
-static inline struct hlist_head *
-cfs_hash_bd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- return hs->hs_hops->hop_hhead(hs, bd);
-}
-
-struct hlist_node *
-cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- const void *key);
-struct hlist_node *
-cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- const void *key);
-
-/**
- * operations on cfs_hash bucket (bd: bucket descriptor),
- * they are safe for hash-table with rehash
- */
-void cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
- struct cfs_hash_bd *bds);
-void cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- int excl);
-void cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- int excl);
-
-static inline void
-cfs_hash_dual_bd_get_and_lock(struct cfs_hash *hs, const void *key,
- struct cfs_hash_bd *bds, int excl)
-{
- cfs_hash_dual_bd_get(hs, key, bds);
- cfs_hash_dual_bd_lock(hs, bds, excl);
-}
-
-struct hlist_node *
-cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- const void *key);
-struct hlist_node *
-cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- const void *key, struct hlist_node *hnode,
- int insist_add);
-struct hlist_node *
-cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- const void *key, struct hlist_node *hnode);
-
-/* Hash init/cleanup functions */
-struct cfs_hash *
-cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
- unsigned int bkt_bits, unsigned int extra_bytes,
- unsigned int min_theta, unsigned int max_theta,
- struct cfs_hash_ops *ops, unsigned int flags);
-
-struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
-void cfs_hash_putref(struct cfs_hash *hs);
-
-/* Hash addition functions */
-void cfs_hash_add(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode);
-int cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode);
-void *cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode);
-
-/* Hash deletion functions */
-void *cfs_hash_del(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode);
-void *cfs_hash_del_key(struct cfs_hash *hs, const void *key);
-
-/* Hash lookup/for_each functions */
-#define CFS_HASH_LOOP_HOG 1024
-
-typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs,
- struct cfs_hash_bd *bd,
- struct hlist_node *node,
- void *data);
-void *
-cfs_hash_lookup(struct cfs_hash *hs, const void *key);
-void
-cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, void *data);
-void
-cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
- void *data);
-int
-cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
- void *data, int start);
-int
-cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
- void *data);
-void
-cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
- cfs_hash_for_each_cb_t cb, void *data);
-typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
-void
-cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t cb, void *data);
-
-void
-cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
- cfs_hash_for_each_cb_t cb, void *data);
-int cfs_hash_is_empty(struct cfs_hash *hs);
-u64 cfs_hash_size_get(struct cfs_hash *hs);
-
-/*
- * Rehash - Theta is calculated to be the average chained
- * hash depth assuming a perfectly uniform hash function.
- */
-void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
-void cfs_hash_rehash_cancel(struct cfs_hash *hs);
-void cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
-void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
- void *new_key, struct hlist_node *hnode);
-
-#if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
-/* Validate hnode references the correct key */
-static inline void
-cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode)
-{
- LASSERT(cfs_hash_keycmp(hs, key, hnode));
-}
-
-/* Validate hnode is in the correct bucket */
-static inline void
-cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- struct cfs_hash_bd bds[2];
-
- cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds);
- LASSERT(bds[0].bd_bucket == bd->bd_bucket ||
- bds[1].bd_bucket == bd->bd_bucket);
-}
-
-#else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */
-
-static inline void
-cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode) {}
-
-static inline void
-cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode) {}
-
-#endif /* CFS_HASH_DEBUG_LEVEL */
-
-#define CFS_HASH_THETA_BITS 10
-#define CFS_HASH_MIN_THETA BIT(CFS_HASH_THETA_BITS - 1)
-#define CFS_HASH_MAX_THETA BIT(CFS_HASH_THETA_BITS + 1)
-
-/* Return integer component of theta */
-static inline int __cfs_hash_theta_int(int theta)
-{
- return (theta >> CFS_HASH_THETA_BITS);
-}
-
-/* Return a fractional value between 0 and 999 */
-static inline int __cfs_hash_theta_frac(int theta)
-{
- return ((theta * 1000) >> CFS_HASH_THETA_BITS) -
- (__cfs_hash_theta_int(theta) * 1000);
-}
-
-static inline int __cfs_hash_theta(struct cfs_hash *hs)
-{
- return (atomic_read(&hs->hs_count) <<
- CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
-}
-
-static inline void
-__cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
-{
- LASSERT(min < max);
- hs->hs_min_theta = (u16)min;
- hs->hs_max_theta = (u16)max;
-}
-
-/* Generic debug formatting routines mainly for proc handler */
-struct seq_file;
-void cfs_hash_debug_header(struct seq_file *m);
-void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
-
-/*
- * Generic djb2 hash algorithm for character arrays.
- */
-static inline unsigned
-cfs_hash_djb2_hash(const void *key, size_t size, unsigned int mask)
-{
- unsigned int i, hash = 5381;
-
- LASSERT(key);
-
- for (i = 0; i < size; i++)
- hash = hash * 33 + ((char *)key)[i];
-
- return (hash & mask);
-}
-
-/*
- * Generic u32 hash algorithm.
- */
-static inline unsigned
-cfs_hash_u32_hash(const u32 key, unsigned int mask)
-{
- return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
-}
-
-/*
- * Generic u64 hash algorithm.
- */
-static inline unsigned
-cfs_hash_u64_hash(const u64 key, unsigned int mask)
-{
- return ((unsigned int)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
-}
-
-/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */
-#define cfs_hash_for_each_bd(bds, n, i) \
- for (i = 0; i < n && (bds)[i].bd_bucket != NULL; i++)
-
-/** iterate over all buckets of @hs */
-#define cfs_hash_for_each_bucket(hs, bd, pos) \
- for (pos = 0; \
- pos < CFS_HASH_NBKT(hs) && \
- ((bd)->bd_bucket = (hs)->hs_buckets[pos]) != NULL; pos++)
-
-/** iterate over all hlist of bucket @bd */
-#define cfs_hash_bd_for_each_hlist(hs, bd, hlist) \
- for ((bd)->bd_offset = 0; \
- (bd)->bd_offset < CFS_HASH_BKT_NHLIST(hs) && \
- (hlist = cfs_hash_bd_hhead(hs, bd)) != NULL; \
- (bd)->bd_offset++)
-
-/* !__LIBCFS__HASH_H__ */
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
deleted file mode 100644
index d4c5965c43b1..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_prim.h
- *
- * General primitives.
- *
- */
-
-#ifndef __LIBCFS_PRIM_H__
-#define __LIBCFS_PRIM_H__
-
-/*
- * Memory
- */
-#if BITS_PER_LONG == 32
-/* limit to lowmem on 32-bit systems */
-#define NUM_CACHEPAGES \
- min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
-#else
-#define NUM_CACHEPAGES totalram_pages
-#endif
-
-static inline unsigned int memory_pressure_get(void)
-{
- return current->flags & PF_MEMALLOC;
-}
-
-static inline void memory_pressure_set(void)
-{
- current->flags |= PF_MEMALLOC;
-}
-
-static inline void memory_pressure_clr(void)
-{
- current->flags &= ~PF_MEMALLOC;
-}
-
-static inline int cfs_memory_pressure_get_and_set(void)
-{
- int old = memory_pressure_get();
-
- if (!old)
- memory_pressure_set();
- return old;
-}
-
-static inline void cfs_memory_pressure_restore(int old)
-{
- if (old)
- memory_pressure_set();
- else
- memory_pressure_clr();
-}
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
deleted file mode 100644
index 491d5971d199..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ /dev/null
@@ -1,200 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_private.h
- *
- * Various defines for libcfs.
- *
- */
-
-#ifndef __LIBCFS_PRIVATE_H__
-#define __LIBCFS_PRIVATE_H__
-
-#ifndef DEBUG_SUBSYSTEM
-# define DEBUG_SUBSYSTEM S_UNDEFINED
-#endif
-
-#define LASSERTF(cond, fmt, ...) \
-do { \
- if (unlikely(!(cond))) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \
- libcfs_debug_msg(&__msg_data, \
- "ASSERTION( %s ) failed: " fmt, #cond, \
- ## __VA_ARGS__); \
- lbug_with_loc(&__msg_data); \
- } \
-} while (0)
-
-#define LASSERT(cond) LASSERTF(cond, "\n")
-
-#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
-/**
- * This is for more expensive checks that one doesn't want to be enabled all
- * the time. LINVRNT() has to be explicitly enabled by
- * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option.
- */
-# define LINVRNT(exp) LASSERT(exp)
-#else
-# define LINVRNT(exp) ((void)sizeof !!(exp))
-#endif
-
-void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msg);
-
-#define LBUG() \
-do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
- lbug_with_loc(&msgdata); \
-} while (0)
-
-/*
- * Use #define rather than inline, as lnet_cpt_table() might
- * not be defined yet
- */
-#define kmalloc_cpt(size, flags, cpt) \
- kmalloc_node(size, flags, cfs_cpt_spread_node(lnet_cpt_table(), cpt))
-
-#define kzalloc_cpt(size, flags, cpt) \
- kmalloc_node(size, flags | __GFP_ZERO, \
- cfs_cpt_spread_node(lnet_cpt_table(), cpt))
-
-#define kvmalloc_cpt(size, flags, cpt) \
- kvmalloc_node(size, flags, \
- cfs_cpt_spread_node(lnet_cpt_table(), cpt))
-
-#define kvzalloc_cpt(size, flags, cpt) \
- kvmalloc_node(size, flags | __GFP_ZERO, \
- cfs_cpt_spread_node(lnet_cpt_table(), cpt))
-
-/******************************************************************************/
-
-void libcfs_debug_dumplog(void);
-int libcfs_debug_init(unsigned long bufsize);
-int libcfs_debug_cleanup(void);
-int libcfs_debug_clear_buffer(void);
-int libcfs_debug_mark_buffer(const char *text);
-
-/*
- * allocate a variable array, returned value is an array of pointers.
- * Caller can specify length of array by count.
- */
-void *cfs_array_alloc(int count, unsigned int size);
-void cfs_array_free(void *vars);
-
-#define LASSERT_ATOMIC_ENABLED (1)
-
-#if LASSERT_ATOMIC_ENABLED
-
-/** assert value of @a is equal to @v */
-#define LASSERT_ATOMIC_EQ(a, v) \
- LASSERTF(atomic_read(a) == v, "value: %d\n", atomic_read((a)))
-
-/** assert value of @a is unequal to @v */
-#define LASSERT_ATOMIC_NE(a, v) \
- LASSERTF(atomic_read(a) != v, "value: %d\n", atomic_read((a)))
-
-/** assert value of @a is little than @v */
-#define LASSERT_ATOMIC_LT(a, v) \
- LASSERTF(atomic_read(a) < v, "value: %d\n", atomic_read((a)))
-
-/** assert value of @a is little/equal to @v */
-#define LASSERT_ATOMIC_LE(a, v) \
- LASSERTF(atomic_read(a) <= v, "value: %d\n", atomic_read((a)))
-
-/** assert value of @a is great than @v */
-#define LASSERT_ATOMIC_GT(a, v) \
- LASSERTF(atomic_read(a) > v, "value: %d\n", atomic_read((a)))
-
-/** assert value of @a is great/equal to @v */
-#define LASSERT_ATOMIC_GE(a, v) \
- LASSERTF(atomic_read(a) >= v, "value: %d\n", atomic_read((a)))
-
-/** assert value of @a is great than @v1 and little than @v2 */
-#define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
-do { \
- int __v = atomic_read(a); \
- LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \
-} while (0)
-
-/** assert value of @a is great than @v1 and little/equal to @v2 */
-#define LASSERT_ATOMIC_GT_LE(a, v1, v2) \
-do { \
- int __v = atomic_read(a); \
- LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \
-} while (0)
-
-/** assert value of @a is great/equal to @v1 and little than @v2 */
-#define LASSERT_ATOMIC_GE_LT(a, v1, v2) \
-do { \
- int __v = atomic_read(a); \
- LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \
-} while (0)
-
-/** assert value of @a is great/equal to @v1 and little/equal to @v2 */
-#define LASSERT_ATOMIC_GE_LE(a, v1, v2) \
-do { \
- int __v = atomic_read(a); \
- LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
-} while (0)
-
-#else /* !LASSERT_ATOMIC_ENABLED */
-
-#define LASSERT_ATOMIC_EQ(a, v) do {} while (0)
-#define LASSERT_ATOMIC_NE(a, v) do {} while (0)
-#define LASSERT_ATOMIC_LT(a, v) do {} while (0)
-#define LASSERT_ATOMIC_LE(a, v) do {} while (0)
-#define LASSERT_ATOMIC_GT(a, v) do {} while (0)
-#define LASSERT_ATOMIC_GE(a, v) do {} while (0)
-#define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0)
-#define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0)
-#define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0)
-#define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0)
-
-#endif /* LASSERT_ATOMIC_ENABLED */
-
-#define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
-#define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
-
-/* implication */
-#define ergo(a, b) (!(a) || (b))
-/* logical equivalence */
-#define equi(a, b) (!!(a) == !!(b))
-
-#ifndef HAVE_CFS_SIZE_ROUND
-static inline size_t cfs_size_round(int val)
-{
- return round_up(val, 8);
-}
-
-#define HAVE_CFS_SIZE_ROUND
-#endif
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
deleted file mode 100644
index 66463477074a..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_string.h
- *
- * Generic string manipulation functions.
- *
- * Author: Nathan Rutman <nathan.rutman@sun.com>
- */
-
-#ifndef __LIBCFS_STRING_H__
-#define __LIBCFS_STRING_H__
-
-/* libcfs_string.c */
-/* Convert a text string to a bitmask */
-int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
- int *oldmask, int minmask, int allmask);
-/* trim leading and trailing space characters */
-char *cfs_firststr(char *str, size_t size);
-
-/**
- * Structure to represent NULL-less strings.
- */
-struct cfs_lstr {
- char *ls_str;
- int ls_len;
-};
-
-/*
- * Structure to represent \<range_expr\> token of the syntax.
- */
-struct cfs_range_expr {
- /*
- * Link to cfs_expr_list::el_exprs.
- */
- struct list_head re_link;
- u32 re_lo;
- u32 re_hi;
- u32 re_stride;
-};
-
-struct cfs_expr_list {
- struct list_head el_link;
- struct list_head el_exprs;
-};
-
-int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res);
-int cfs_str2num_check(char *str, int nob, unsigned int *num,
- unsigned int min, unsigned int max);
-int cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list);
-int cfs_expr_list_print(char *buffer, int count,
- struct cfs_expr_list *expr_list);
-int cfs_expr_list_values(struct cfs_expr_list *expr_list,
- int max, u32 **values);
-static inline void
-cfs_expr_list_values_free(u32 *values, int num)
-{
- /*
- * This array is allocated by kvalloc(), so it shouldn't be freed
- * by OBD_FREE() if it's called by module other than libcfs & LNet,
- * otherwise we will see fake memory leak
- */
- kvfree(values);
-}
-
-void cfs_expr_list_free(struct cfs_expr_list *expr_list);
-int cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
- struct cfs_expr_list **elpp);
-void cfs_expr_list_free_list(struct list_head *list);
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
deleted file mode 100644
index c4f25be78268..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_time.h
- *
- * Time functions.
- *
- */
-
-#ifndef __LIBCFS_TIME_H__
-#define __LIBCFS_TIME_H__
-/*
- * generic time manipulation functions.
- */
-
-static inline unsigned long cfs_time_add(unsigned long t, long d)
-{
- return (unsigned long)(t + d);
-}
-
-static inline unsigned long cfs_time_sub(unsigned long t1, unsigned long t2)
-{
- return (unsigned long)(t1 - t2);
-}
-
-static inline int cfs_time_after(unsigned long t1, unsigned long t2)
-{
- return time_before(t2, t1);
-}
-
-static inline int cfs_time_aftereq(unsigned long t1, unsigned long t2)
-{
- return time_before_eq(t2, t1);
-}
-
-static inline unsigned long cfs_time_shift(int seconds)
-{
- return cfs_time_add(cfs_time_current(), seconds * HZ);
-}
-
-/*
- * return valid time-out based on user supplied one. Currently we only check
- * that time-out is not shorted than allowed.
- */
-static inline long cfs_timeout_cap(long timeout)
-{
- if (timeout < CFS_TICK)
- timeout = CFS_TICK;
- return timeout;
-}
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
deleted file mode 100644
index 30e333af8d0d..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ /dev/null
@@ -1,133 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LIBCFS_LINUX_LIBCFS_H__
-#define __LIBCFS_LINUX_LIBCFS_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-#include <linux/bitops.h>
-#include <linux/compiler.h>
-#include <linux/ctype.h>
-#include <linux/errno.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <linux/kallsyms.h>
-#include <linux/kernel.h>
-#include <linux/kmod.h>
-#include <linux/kthread.h>
-#include <linux/miscdevice.h>
-#include <linux/mm.h>
-#include <linux/mm_inline.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mutex.h>
-#include <linux/notifier.h>
-#include <linux/pagemap.h>
-#include <linux/random.h>
-#include <linux/rbtree.h>
-#include <linux/rwsem.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/time.h>
-#include <linux/timer.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/vmalloc.h>
-#include <net/sock.h>
-#include <linux/atomic.h>
-#include <asm/div64.h>
-#include <linux/timex.h>
-#include <linux/uaccess.h>
-#include <stdarg.h>
-#include "linux-cpu.h"
-#include "linux-time.h"
-
-#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
-
-#if !defined(__x86_64__)
-# ifdef __ia64__
-# define CDEBUG_STACK() (THREAD_SIZE - \
- ((unsigned long)__builtin_dwarf_cfa() & \
- (THREAD_SIZE - 1)))
-# else
-# define CDEBUG_STACK() (THREAD_SIZE - \
- ((unsigned long)__builtin_frame_address(0) & \
- (THREAD_SIZE - 1)))
-# endif /* __ia64__ */
-
-#define __CHECK_STACK(msgdata, mask, cdls) \
-do { \
- if (unlikely(CDEBUG_STACK() > libcfs_stack)) { \
- LIBCFS_DEBUG_MSG_DATA_INIT(msgdata, D_WARNING, NULL); \
- libcfs_stack = CDEBUG_STACK(); \
- libcfs_debug_msg(msgdata, \
- "maximum lustre stack %lu\n", \
- CDEBUG_STACK()); \
- (msgdata)->msg_mask = mask; \
- (msgdata)->msg_cdls = cdls; \
- dump_stack(); \
- /*panic("LBUG");*/ \
- } \
-} while (0)
-#define CFS_CHECK_STACK(msgdata, mask, cdls) __CHECK_STACK(msgdata, mask, cdls)
-#else /* __x86_64__ */
-#define CFS_CHECK_STACK(msgdata, mask, cdls) do {} while (0)
-#define CDEBUG_STACK() (0L)
-#endif /* __x86_64__ */
-
-#define __current_nesting_level() (0)
-
-/**
- * Platform specific declarations for cfs_curproc API (libcfs/curproc.h)
- *
- * Implementation is in linux-curproc.c
- */
-#define CFS_CURPROC_COMM_MAX (sizeof((struct task_struct *)0)->comm)
-
-#include <linux/capability.h>
-
-#ifndef WITH_WATCHDOG
-#define WITH_WATCHDOG
-#endif
-
-#endif /* _LINUX_LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
deleted file mode 100644
index 6035376f2830..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/linux/linux-cpu.h
- *
- * Basic library routines.
- *
- * Author: liang@whamcloud.com
- */
-
-#ifndef __LIBCFS_LINUX_CPU_H__
-#define __LIBCFS_LINUX_CPU_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-#include <linux/cpu.h>
-#include <linux/cpuset.h>
-#include <linux/topology.h>
-
-#ifdef CONFIG_SMP
-
-#define HAVE_LIBCFS_CPT
-
-/** virtual processing unit */
-struct cfs_cpu_partition {
- /* CPUs mask for this partition */
- cpumask_var_t cpt_cpumask;
- /* nodes mask for this partition */
- nodemask_t *cpt_nodemask;
- /* spread rotor for NUMA allocator */
- unsigned int cpt_spread_rotor;
-};
-
-/** descriptor for CPU partitions */
-struct cfs_cpt_table {
- /* version, reserved for hotplug */
- unsigned int ctb_version;
- /* spread rotor for NUMA allocator */
- unsigned int ctb_spread_rotor;
- /* # of CPU partitions */
- unsigned int ctb_nparts;
- /* partitions tables */
- struct cfs_cpu_partition *ctb_parts;
- /* shadow HW CPU to CPU partition ID */
- int *ctb_cpu2cpt;
- /* all cpus in this partition table */
- cpumask_var_t ctb_cpumask;
- /* all nodes in this partition table */
- nodemask_t *ctb_nodemask;
-};
-
-#endif /* CONFIG_SMP */
-#endif /* __LIBCFS_LINUX_CPU_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
deleted file mode 100644
index 805cb326af86..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ /dev/null
@@ -1,103 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/linux/linux-time.h
- *
- * Implementation of portable time API for Linux (kernel and user-level).
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- */
-
-#ifndef __LIBCFS_LINUX_LINUX_TIME_H__
-#define __LIBCFS_LINUX_LINUX_TIME_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-#define ONE_BILLION ((u_int64_t)1000000000)
-#define ONE_MILLION 1000000
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <asm/div64.h>
-
-/*
- * post 2.5 kernels.
- */
-
-#include <linux/jiffies.h>
-
-/*
- * Generic kernel stuff
- */
-
-static inline unsigned long cfs_time_current(void)
-{
- return jiffies;
-}
-
-static inline long cfs_duration_sec(long d)
-{
- return d / msecs_to_jiffies(MSEC_PER_SEC);
-}
-
-#define cfs_time_current_64 get_jiffies_64
-
-static inline u64 cfs_time_add_64(u64 t, u64 d)
-{
- return t + d;
-}
-
-static inline u64 cfs_time_shift_64(int seconds)
-{
- return cfs_time_add_64(cfs_time_current_64(),
- seconds * HZ);
-}
-
-static inline int cfs_time_before_64(u64 t1, u64 t2)
-{
- return (__s64)t2 - (__s64)t1 > 0;
-}
-
-static inline int cfs_time_beforeq_64(u64 t1, u64 t2)
-{
- return (__s64)t2 - (__s64)t1 >= 0;
-}
-
-/*
- * One jiffy
- */
-#define CFS_TICK (1)
-
-#define CFS_DURATION_T "%ld"
-
-#endif /* __LIBCFS_LINUX_LINUX_TIME_H__ */
diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h
deleted file mode 100644
index dae2e4f0056c..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/api.h
+++ /dev/null
@@ -1,212 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011 - 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- */
-
-#ifndef __LNET_API_H__
-#define __LNET_API_H__
-
-/** \defgroup lnet LNet
- *
- * The Lustre Networking subsystem.
- *
- * LNet is an asynchronous message-passing API, which provides an unreliable
- * connectionless service that can't guarantee any order. It supports OFA IB,
- * TCP/IP, and Cray Interconnects, and routes between heterogeneous networks.
- *
- * @{
- */
-
-#include <uapi/linux/lnet/lnet-types.h>
-
-/** \defgroup lnet_init_fini Initialization and cleanup
- * The LNet must be properly initialized before any LNet calls can be made.
- * @{
- */
-int LNetNIInit(lnet_pid_t requested_pid);
-int LNetNIFini(void);
-/** @} lnet_init_fini */
-
-/** \defgroup lnet_addr LNet addressing and basic types
- *
- * Addressing scheme and basic data types of LNet.
- *
- * The LNet API is memory-oriented, so LNet must be able to address not only
- * end-points but also memory region within a process address space.
- * An ::lnet_nid_t addresses an end-point. An ::lnet_pid_t identifies a process
- * in a node. A portal represents an opening in the address space of a
- * process. Match bits is criteria to identify a region of memory inside a
- * portal, and offset specifies an offset within the memory region.
- *
- * LNet creates a table of portals for each process during initialization.
- * This table has MAX_PORTALS entries and its size can't be dynamically
- * changed. A portal stays empty until the owning process starts to add
- * memory regions to it. A portal is sometimes called an index because
- * it's an entry in the portals table of a process.
- *
- * \see LNetMEAttach
- * @{
- */
-int LNetGetId(unsigned int index, struct lnet_process_id *id);
-int LNetDist(lnet_nid_t nid, lnet_nid_t *srcnid, __u32 *order);
-
-/** @} lnet_addr */
-
-/** \defgroup lnet_me Match entries
- *
- * A match entry (abbreviated as ME) describes a set of criteria to accept
- * incoming requests.
- *
- * A portal is essentially a match list plus a set of attributes. A match
- * list is a chain of MEs. Each ME includes a pointer to a memory descriptor
- * and a set of match criteria. The match criteria can be used to reject
- * incoming requests based on process ID or the match bits provided in the
- * request. MEs can be dynamically inserted into a match list by LNetMEAttach()
- * and LNetMEInsert(), and removed from its list by LNetMEUnlink().
- * @{
- */
-int LNetMEAttach(unsigned int portal,
- struct lnet_process_id match_id_in,
- __u64 match_bits_in,
- __u64 ignore_bits_in,
- enum lnet_unlink unlink_in,
- enum lnet_ins_pos pos_in,
- struct lnet_handle_me *handle_out);
-
-int LNetMEInsert(struct lnet_handle_me current_in,
- struct lnet_process_id match_id_in,
- __u64 match_bits_in,
- __u64 ignore_bits_in,
- enum lnet_unlink unlink_in,
- enum lnet_ins_pos position_in,
- struct lnet_handle_me *handle_out);
-
-int LNetMEUnlink(struct lnet_handle_me current_in);
-/** @} lnet_me */
-
-/** \defgroup lnet_md Memory descriptors
- *
- * A memory descriptor contains information about a region of a user's
- * memory (either in kernel or user space) and optionally points to an
- * event queue where information about the operations performed on the
- * memory descriptor are recorded. Memory descriptor is abbreviated as
- * MD and can be used interchangeably with the memory region it describes.
- *
- * The LNet API provides two operations to create MDs: LNetMDAttach()
- * and LNetMDBind(); one operation to unlink and release the resources
- * associated with a MD: LNetMDUnlink().
- * @{
- */
-int LNetMDAttach(struct lnet_handle_me current_in,
- struct lnet_md md_in,
- enum lnet_unlink unlink_in,
- struct lnet_handle_md *md_handle_out);
-
-int LNetMDBind(struct lnet_md md_in,
- enum lnet_unlink unlink_in,
- struct lnet_handle_md *md_handle_out);
-
-int LNetMDUnlink(struct lnet_handle_md md_in);
-/** @} lnet_md */
-
-/** \defgroup lnet_eq Events and event queues
- *
- * Event queues (abbreviated as EQ) are used to log operations performed on
- * local MDs. In particular, they signal the completion of a data transmission
- * into or out of a MD. They can also be used to hold acknowledgments for
- * completed PUT operations and indicate when a MD has been unlinked. Multiple
- * MDs can share a single EQ. An EQ may have an optional event handler
- * associated with it. If an event handler exists, it will be run for each
- * event that is deposited into the EQ.
- *
- * In addition to the lnet_handle_eq, the LNet API defines two types
- * associated with events: The ::lnet_event_kind defines the kinds of events
- * that can be stored in an EQ. The lnet_event defines a structure that
- * holds the information about with an event.
- *
- * There are five functions for dealing with EQs: LNetEQAlloc() is used to
- * create an EQ and allocate the resources needed, while LNetEQFree()
- * releases these resources and free the EQ. LNetEQGet() retrieves the next
- * event from an EQ, and LNetEQWait() can be used to block a process until
- * an EQ has at least one event. LNetEQPoll() can be used to test or wait
- * on multiple EQs.
- * @{
- */
-int LNetEQAlloc(unsigned int count_in,
- lnet_eq_handler_t handler,
- struct lnet_handle_eq *handle_out);
-
-int LNetEQFree(struct lnet_handle_eq eventq_in);
-
-int LNetEQPoll(struct lnet_handle_eq *eventqs_in,
- int neq_in,
- int timeout_ms,
- int interruptible,
- struct lnet_event *event_out,
- int *which_eq_out);
-/** @} lnet_eq */
-
-/** \defgroup lnet_data Data movement operations
- *
- * The LNet API provides two data movement operations: LNetPut()
- * and LNetGet().
- * @{
- */
-int LNetPut(lnet_nid_t self,
- struct lnet_handle_md md_in,
- enum lnet_ack_req ack_req_in,
- struct lnet_process_id target_in,
- unsigned int portal_in,
- __u64 match_bits_in,
- unsigned int offset_in,
- __u64 hdr_data_in);
-
-int LNetGet(lnet_nid_t self,
- struct lnet_handle_md md_in,
- struct lnet_process_id target_in,
- unsigned int portal_in,
- __u64 match_bits_in,
- unsigned int offset_in);
-/** @} lnet_data */
-
-/** \defgroup lnet_misc Miscellaneous operations.
- * Miscellaneous operations.
- * @{
- */
-int LNetSetLazyPortal(int portal);
-int LNetClearLazyPortal(int portal);
-int LNetCtl(unsigned int cmd, void *arg);
-void LNetDebugPeer(struct lnet_process_id id);
-
-/** @} lnet_misc */
-
-/** @} lnet */
-#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
deleted file mode 100644
index df4c72507a15..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ /dev/null
@@ -1,644 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- *
- * lnet/include/lnet/lib-lnet.h
- */
-
-#ifndef __LNET_LIB_LNET_H__
-#define __LNET_LIB_LNET_H__
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/lnet/api.h>
-#include <linux/lnet/lib-types.h>
-#include <uapi/linux/lnet/lnet-dlc.h>
-#include <uapi/linux/lnet/lnet-types.h>
-#include <uapi/linux/lnet/lnetctl.h>
-#include <uapi/linux/lnet/nidstr.h>
-
-extern struct lnet the_lnet; /* THE network */
-
-#if (BITS_PER_LONG == 32)
-/* 2 CPTs, allowing more CPTs might make us under memory pressure */
-#define LNET_CPT_MAX_BITS 1
-
-#else /* 64-bit system */
-/*
- * 256 CPTs for thousands of CPUs, allowing more CPTs might make us
- * under risk of consuming all lh_cookie.
- */
-#define LNET_CPT_MAX_BITS 8
-#endif /* BITS_PER_LONG == 32 */
-
-/* max allowed CPT number */
-#define LNET_CPT_MAX (1 << LNET_CPT_MAX_BITS)
-
-#define LNET_CPT_NUMBER (the_lnet.ln_cpt_number)
-#define LNET_CPT_BITS (the_lnet.ln_cpt_bits)
-#define LNET_CPT_MASK ((1ULL << LNET_CPT_BITS) - 1)
-
-/** exclusive lock */
-#define LNET_LOCK_EX CFS_PERCPT_LOCK_EX
-
-static inline int lnet_is_route_alive(struct lnet_route *route)
-{
- /* gateway is down */
- if (!route->lr_gateway->lp_alive)
- return 0;
- /* no NI status, assume it's alive */
- if ((route->lr_gateway->lp_ping_feats &
- LNET_PING_FEAT_NI_STATUS) == 0)
- return 1;
- /* has NI status, check # down NIs */
- return route->lr_downis == 0;
-}
-
-static inline int lnet_is_wire_handle_none(struct lnet_handle_wire *wh)
-{
- return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE &&
- wh->wh_object_cookie == LNET_WIRE_HANDLE_COOKIE_NONE);
-}
-
-static inline int lnet_md_exhausted(struct lnet_libmd *md)
-{
- return (!md->md_threshold ||
- ((md->md_options & LNET_MD_MAX_SIZE) &&
- md->md_offset + md->md_max_size > md->md_length));
-}
-
-static inline int lnet_md_unlinkable(struct lnet_libmd *md)
-{
- /*
- * Should unlink md when its refcount is 0 and either:
- * - md has been flagged for deletion (by auto unlink or
- * LNetM[DE]Unlink, in the latter case md may not be exhausted).
- * - auto unlink is on and md is exhausted.
- */
- if (md->md_refcount)
- return 0;
-
- if (md->md_flags & LNET_MD_FLAG_ZOMBIE)
- return 1;
-
- return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) &&
- lnet_md_exhausted(md));
-}
-
-#define lnet_cpt_table() (the_lnet.ln_cpt_table)
-#define lnet_cpt_current() cfs_cpt_current(the_lnet.ln_cpt_table, 1)
-
-static inline int
-lnet_cpt_of_cookie(__u64 cookie)
-{
- unsigned int cpt = (cookie >> LNET_COOKIE_TYPE_BITS) & LNET_CPT_MASK;
-
- /*
- * LNET_CPT_NUMBER doesn't have to be power2, which means we can
- * get illegal cpt from it's invalid cookie
- */
- return cpt < LNET_CPT_NUMBER ? cpt : cpt % LNET_CPT_NUMBER;
-}
-
-static inline void
-lnet_res_lock(int cpt)
-{
- cfs_percpt_lock(the_lnet.ln_res_lock, cpt);
-}
-
-static inline void
-lnet_res_unlock(int cpt)
-{
- cfs_percpt_unlock(the_lnet.ln_res_lock, cpt);
-}
-
-static inline int
-lnet_res_lock_current(void)
-{
- int cpt = lnet_cpt_current();
-
- lnet_res_lock(cpt);
- return cpt;
-}
-
-static inline void
-lnet_net_lock(int cpt)
-{
- cfs_percpt_lock(the_lnet.ln_net_lock, cpt);
-}
-
-static inline void
-lnet_net_unlock(int cpt)
-{
- cfs_percpt_unlock(the_lnet.ln_net_lock, cpt);
-}
-
-static inline int
-lnet_net_lock_current(void)
-{
- int cpt = lnet_cpt_current();
-
- lnet_net_lock(cpt);
- return cpt;
-}
-
-#define LNET_LOCK() lnet_net_lock(LNET_LOCK_EX)
-#define LNET_UNLOCK() lnet_net_unlock(LNET_LOCK_EX)
-
-#define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock)
-#define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock)
-#define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock() spin_unlock(&the_lnet.ln_eq_wait_lock)
-#define lnet_ni_lock(ni) spin_lock(&(ni)->ni_lock)
-#define lnet_ni_unlock(ni) spin_unlock(&(ni)->ni_lock)
-
-#define MAX_PORTALS 64
-
-static inline struct lnet_libmd *
-lnet_md_alloc(struct lnet_md *umd)
-{
- struct lnet_libmd *md;
- unsigned int size;
- unsigned int niov;
-
- if (umd->options & LNET_MD_KIOV) {
- niov = umd->length;
- size = offsetof(struct lnet_libmd, md_iov.kiov[niov]);
- } else {
- niov = umd->options & LNET_MD_IOVEC ? umd->length : 1;
- size = offsetof(struct lnet_libmd, md_iov.iov[niov]);
- }
-
- md = kzalloc(size, GFP_NOFS);
-
- if (md) {
- /* Set here in case of early free */
- md->md_options = umd->options;
- md->md_niov = niov;
- INIT_LIST_HEAD(&md->md_list);
- }
-
- return md;
-}
-
-struct lnet_libhandle *lnet_res_lh_lookup(struct lnet_res_container *rec,
- __u64 cookie);
-void lnet_res_lh_initialize(struct lnet_res_container *rec,
- struct lnet_libhandle *lh);
-static inline void
-lnet_res_lh_invalidate(struct lnet_libhandle *lh)
-{
- /* NB: cookie is still useful, don't reset it */
- list_del(&lh->lh_hash_chain);
-}
-
-static inline void
-lnet_eq2handle(struct lnet_handle_eq *handle, struct lnet_eq *eq)
-{
- if (!eq) {
- LNetInvalidateEQHandle(handle);
- return;
- }
-
- handle->cookie = eq->eq_lh.lh_cookie;
-}
-
-static inline struct lnet_eq *
-lnet_handle2eq(struct lnet_handle_eq *handle)
-{
- struct lnet_libhandle *lh;
-
- lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie);
- if (!lh)
- return NULL;
-
- return lh_entry(lh, struct lnet_eq, eq_lh);
-}
-
-static inline void
-lnet_md2handle(struct lnet_handle_md *handle, struct lnet_libmd *md)
-{
- handle->cookie = md->md_lh.lh_cookie;
-}
-
-static inline struct lnet_libmd *
-lnet_handle2md(struct lnet_handle_md *handle)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_libhandle *lh;
- int cpt;
-
- cpt = lnet_cpt_of_cookie(handle->cookie);
- lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt],
- handle->cookie);
- if (!lh)
- return NULL;
-
- return lh_entry(lh, struct lnet_libmd, md_lh);
-}
-
-static inline struct lnet_libmd *
-lnet_wire_handle2md(struct lnet_handle_wire *wh)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_libhandle *lh;
- int cpt;
-
- if (wh->wh_interface_cookie != the_lnet.ln_interface_cookie)
- return NULL;
-
- cpt = lnet_cpt_of_cookie(wh->wh_object_cookie);
- lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt],
- wh->wh_object_cookie);
- if (!lh)
- return NULL;
-
- return lh_entry(lh, struct lnet_libmd, md_lh);
-}
-
-static inline void
-lnet_me2handle(struct lnet_handle_me *handle, struct lnet_me *me)
-{
- handle->cookie = me->me_lh.lh_cookie;
-}
-
-static inline struct lnet_me *
-lnet_handle2me(struct lnet_handle_me *handle)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_libhandle *lh;
- int cpt;
-
- cpt = lnet_cpt_of_cookie(handle->cookie);
- lh = lnet_res_lh_lookup(the_lnet.ln_me_containers[cpt],
- handle->cookie);
- if (!lh)
- return NULL;
-
- return lh_entry(lh, struct lnet_me, me_lh);
-}
-
-static inline void
-lnet_peer_addref_locked(struct lnet_peer *lp)
-{
- LASSERT(lp->lp_refcount > 0);
- lp->lp_refcount++;
-}
-
-void lnet_destroy_peer_locked(struct lnet_peer *lp);
-
-static inline void
-lnet_peer_decref_locked(struct lnet_peer *lp)
-{
- LASSERT(lp->lp_refcount > 0);
- lp->lp_refcount--;
- if (!lp->lp_refcount)
- lnet_destroy_peer_locked(lp);
-}
-
-static inline int
-lnet_isrouter(struct lnet_peer *lp)
-{
- return lp->lp_rtr_refcount ? 1 : 0;
-}
-
-static inline void
-lnet_ni_addref_locked(struct lnet_ni *ni, int cpt)
-{
- LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER);
- LASSERT(*ni->ni_refs[cpt] >= 0);
-
- (*ni->ni_refs[cpt])++;
-}
-
-static inline void
-lnet_ni_addref(struct lnet_ni *ni)
-{
- lnet_net_lock(0);
- lnet_ni_addref_locked(ni, 0);
- lnet_net_unlock(0);
-}
-
-static inline void
-lnet_ni_decref_locked(struct lnet_ni *ni, int cpt)
-{
- LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER);
- LASSERT(*ni->ni_refs[cpt] > 0);
-
- (*ni->ni_refs[cpt])--;
-}
-
-static inline void
-lnet_ni_decref(struct lnet_ni *ni)
-{
- lnet_net_lock(0);
- lnet_ni_decref_locked(ni, 0);
- lnet_net_unlock(0);
-}
-
-void lnet_ni_free(struct lnet_ni *ni);
-struct lnet_ni *
-lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist);
-
-static inline int
-lnet_nid2peerhash(lnet_nid_t nid)
-{
- return hash_long(nid, LNET_PEER_HASH_BITS);
-}
-
-static inline struct list_head *
-lnet_net2rnethash(__u32 net)
-{
- return &the_lnet.ln_remote_nets_hash[(LNET_NETNUM(net) +
- LNET_NETTYP(net)) &
- ((1U << the_lnet.ln_remote_nets_hbits) - 1)];
-}
-
-extern struct lnet_lnd the_lolnd;
-extern int avoid_asym_router_failure;
-
-int lnet_cpt_of_nid_locked(lnet_nid_t nid);
-int lnet_cpt_of_nid(lnet_nid_t nid);
-struct lnet_ni *lnet_nid2ni_locked(lnet_nid_t nid, int cpt);
-struct lnet_ni *lnet_net2ni_locked(__u32 net, int cpt);
-struct lnet_ni *lnet_net2ni(__u32 net);
-
-extern int portal_rotor;
-
-int lnet_lib_init(void);
-void lnet_lib_exit(void);
-
-int lnet_notify(struct lnet_ni *ni, lnet_nid_t peer, int alive,
- unsigned long when);
-void lnet_notify_locked(struct lnet_peer *lp, int notifylnd, int alive,
- unsigned long when);
-int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid,
- unsigned int priority);
-int lnet_check_routes(void);
-int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
-void lnet_destroy_routes(void);
-int lnet_get_route(int idx, __u32 *net, __u32 *hops,
- lnet_nid_t *gateway, __u32 *alive, __u32 *priority);
-int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg);
-
-void lnet_router_debugfs_init(void);
-void lnet_router_debugfs_fini(void);
-int lnet_rtrpools_alloc(int im_a_router);
-void lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages);
-int lnet_rtrpools_adjust(int tiny, int small, int large);
-int lnet_rtrpools_enable(void);
-void lnet_rtrpools_disable(void);
-void lnet_rtrpools_free(int keep_pools);
-struct lnet_remotenet *lnet_find_net_locked(__u32 net);
-int lnet_dyn_add_ni(lnet_pid_t requested_pid,
- struct lnet_ioctl_config_data *conf);
-int lnet_dyn_del_ni(__u32 net);
-int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason);
-
-int lnet_islocalnid(lnet_nid_t nid);
-int lnet_islocalnet(__u32 net);
-
-void lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
- unsigned int offset, unsigned int mlen);
-void lnet_msg_detach_md(struct lnet_msg *msg, int status);
-void lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev);
-void lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type);
-void lnet_msg_commit(struct lnet_msg *msg, int cpt);
-void lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status);
-
-void lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev);
-void lnet_prep_send(struct lnet_msg *msg, int type,
- struct lnet_process_id target, unsigned int offset,
- unsigned int len);
-int lnet_send(lnet_nid_t nid, struct lnet_msg *msg, lnet_nid_t rtr_nid);
-void lnet_return_tx_credits_locked(struct lnet_msg *msg);
-void lnet_return_rx_credits_locked(struct lnet_msg *msg);
-void lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp);
-void lnet_drop_routed_msgs_locked(struct list_head *list, int cpt);
-
-/* portals functions */
-/* portals attributes */
-static inline int
-lnet_ptl_is_lazy(struct lnet_portal *ptl)
-{
- return !!(ptl->ptl_options & LNET_PTL_LAZY);
-}
-
-static inline int
-lnet_ptl_is_unique(struct lnet_portal *ptl)
-{
- return !!(ptl->ptl_options & LNET_PTL_MATCH_UNIQUE);
-}
-
-static inline int
-lnet_ptl_is_wildcard(struct lnet_portal *ptl)
-{
- return !!(ptl->ptl_options & LNET_PTL_MATCH_WILDCARD);
-}
-
-static inline void
-lnet_ptl_setopt(struct lnet_portal *ptl, int opt)
-{
- ptl->ptl_options |= opt;
-}
-
-static inline void
-lnet_ptl_unsetopt(struct lnet_portal *ptl, int opt)
-{
- ptl->ptl_options &= ~opt;
-}
-
-/* match-table functions */
-struct list_head *lnet_mt_match_head(struct lnet_match_table *mtable,
- struct lnet_process_id id, __u64 mbits);
-struct lnet_match_table *lnet_mt_of_attach(unsigned int index,
- struct lnet_process_id id,
- __u64 mbits, __u64 ignore_bits,
- enum lnet_ins_pos pos);
-int lnet_mt_match_md(struct lnet_match_table *mtable,
- struct lnet_match_info *info, struct lnet_msg *msg);
-
-/* portals match/attach functions */
-void lnet_ptl_attach_md(struct lnet_me *me, struct lnet_libmd *md,
- struct list_head *matches, struct list_head *drops);
-void lnet_ptl_detach_md(struct lnet_me *me, struct lnet_libmd *md);
-int lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg);
-
-/* initialized and finalize portals */
-int lnet_portals_create(void);
-void lnet_portals_destroy(void);
-
-/* message functions */
-int lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr,
- lnet_nid_t fromnid, void *private, int rdma_req);
-int lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg);
-int lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg);
-
-void lnet_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
- int delayed, unsigned int offset, unsigned int mlen,
- unsigned int rlen);
-void lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
- int delayed, unsigned int offset,
- unsigned int mlen, unsigned int rlen);
-
-struct lnet_msg *lnet_create_reply_msg(struct lnet_ni *ni,
- struct lnet_msg *get_msg);
-void lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *msg,
- unsigned int len);
-
-void lnet_finalize(struct lnet_ni *ni, struct lnet_msg *msg, int rc);
-
-void lnet_drop_message(struct lnet_ni *ni, int cpt, void *private,
- unsigned int nob);
-void lnet_drop_delayed_msg_list(struct list_head *head, char *reason);
-void lnet_recv_delayed_msg_list(struct list_head *head);
-
-int lnet_msg_container_setup(struct lnet_msg_container *container, int cpt);
-void lnet_msg_container_cleanup(struct lnet_msg_container *container);
-void lnet_msg_containers_destroy(void);
-int lnet_msg_containers_create(void);
-
-char *lnet_msgtyp2str(int type);
-void lnet_print_hdr(struct lnet_hdr *hdr);
-int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold);
-
-/** \addtogroup lnet_fault_simulation @{ */
-
-int lnet_fault_ctl(int cmd, struct libcfs_ioctl_data *data);
-int lnet_fault_init(void);
-void lnet_fault_fini(void);
-
-bool lnet_drop_rule_match(struct lnet_hdr *hdr);
-
-int lnet_delay_rule_add(struct lnet_fault_attr *attr);
-int lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown);
-int lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
- struct lnet_fault_stat *stat);
-void lnet_delay_rule_reset(void);
-void lnet_delay_rule_check(void);
-bool lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg);
-
-/** @} lnet_fault_simulation */
-
-void lnet_counters_get(struct lnet_counters *counters);
-void lnet_counters_reset(void);
-
-unsigned int lnet_iov_nob(unsigned int niov, struct kvec *iov);
-int lnet_extract_iov(int dst_niov, struct kvec *dst,
- int src_niov, const struct kvec *src,
- unsigned int offset, unsigned int len);
-
-unsigned int lnet_kiov_nob(unsigned int niov, struct bio_vec *iov);
-int lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
- int src_niov, const struct bio_vec *src,
- unsigned int offset, unsigned int len);
-
-void lnet_copy_iov2iter(struct iov_iter *to,
- unsigned int nsiov, const struct kvec *siov,
- unsigned int soffset, unsigned int nob);
-void lnet_copy_kiov2iter(struct iov_iter *to,
- unsigned int nkiov, const struct bio_vec *kiov,
- unsigned int kiovoffset, unsigned int nob);
-
-void lnet_me_unlink(struct lnet_me *me);
-
-void lnet_md_unlink(struct lnet_libmd *md);
-void lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd);
-
-void lnet_register_lnd(struct lnet_lnd *lnd);
-void lnet_unregister_lnd(struct lnet_lnd *lnd);
-
-int lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
- __u32 local_ip, __u32 peer_ip, int peer_port);
-void lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
- __u32 peer_ip, int port);
-int lnet_count_acceptor_nis(void);
-int lnet_acceptor_timeout(void);
-int lnet_acceptor_port(void);
-
-int lnet_count_acceptor_nis(void);
-int lnet_acceptor_port(void);
-
-int lnet_acceptor_start(void);
-void lnet_acceptor_stop(void);
-
-int lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask);
-int lnet_ipif_enumerate(char ***names);
-void lnet_ipif_free_enumeration(char **names, int n);
-int lnet_sock_setbuf(struct socket *socket, int txbufsize, int rxbufsize);
-int lnet_sock_getbuf(struct socket *socket, int *txbufsize, int *rxbufsize);
-int lnet_sock_getaddr(struct socket *socket, bool remote, __u32 *ip, int *port);
-int lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout);
-int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout);
-
-int lnet_sock_listen(struct socket **sockp, __u32 ip, int port, int backlog);
-int lnet_sock_accept(struct socket **newsockp, struct socket *sock);
-int lnet_sock_connect(struct socket **sockp, int *fatal,
- __u32 local_ip, int local_port,
- __u32 peer_ip, int peer_port);
-void libcfs_sock_release(struct socket *sock);
-
-int lnet_peers_start_down(void);
-int lnet_peer_buffer_credits(struct lnet_ni *ni);
-
-int lnet_router_checker_start(void);
-void lnet_router_checker_stop(void);
-void lnet_router_ni_update_locked(struct lnet_peer *gw, __u32 net);
-void lnet_swap_pinginfo(struct lnet_ping_info *info);
-
-int lnet_parse_ip2nets(char **networksp, char *ip2nets);
-int lnet_parse_routes(char *route_str, int *im_a_router);
-int lnet_parse_networks(struct list_head *nilist, char *networks);
-int lnet_net_unique(__u32 net, struct list_head *nilist);
-
-int lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt);
-struct lnet_peer *lnet_find_peer_locked(struct lnet_peer_table *ptable,
- lnet_nid_t nid);
-void lnet_peer_tables_cleanup(struct lnet_ni *ni);
-void lnet_peer_tables_destroy(void);
-int lnet_peer_tables_create(void);
-void lnet_debug_peer(lnet_nid_t nid);
-int lnet_get_peer_info(__u32 peer_index, __u64 *nid,
- char alivness[LNET_MAX_STR_LEN],
- __u32 *cpt_iter, __u32 *refcount,
- __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
- __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credtis,
- __u32 *peer_tx_qnob);
-
-static inline void
-lnet_peer_set_alive(struct lnet_peer *lp)
-{
- lp->lp_last_query = jiffies;
- lp->lp_last_alive = jiffies;
- if (!lp->lp_alive)
- lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
-}
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
deleted file mode 100644
index cfe8ee424e94..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ /dev/null
@@ -1,666 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- *
- * lnet/include/lnet/lib-types.h
- */
-
-#ifndef __LNET_LIB_TYPES_H__
-#define __LNET_LIB_TYPES_H__
-
-#include <linux/kthread.h>
-#include <linux/uio.h>
-#include <linux/types.h>
-#include <linux/completion.h>
-
-#include <uapi/linux/lnet/lnet-types.h>
-#include <uapi/linux/lnet/lnetctl.h>
-
-/* Max payload size */
-#define LNET_MAX_PAYLOAD CONFIG_LNET_MAX_PAYLOAD
-#if (LNET_MAX_PAYLOAD < LNET_MTU)
-# error "LNET_MAX_PAYLOAD too small - error in configure --with-max-payload-mb"
-#elif (LNET_MAX_PAYLOAD > (PAGE_SIZE * LNET_MAX_IOV))
-# error "LNET_MAX_PAYLOAD too large - error in configure --with-max-payload-mb"
-#endif
-
-/* forward refs */
-struct lnet_libmd;
-
-struct lnet_msg {
- struct list_head msg_activelist;
- struct list_head msg_list; /* Q for credits/MD */
-
- struct lnet_process_id msg_target;
- /* where is it from, it's only for building event */
- lnet_nid_t msg_from;
- __u32 msg_type;
-
- /* committed for sending */
- unsigned int msg_tx_committed:1;
- /* CPT # this message committed for sending */
- unsigned int msg_tx_cpt:15;
- /* committed for receiving */
- unsigned int msg_rx_committed:1;
- /* CPT # this message committed for receiving */
- unsigned int msg_rx_cpt:15;
- /* queued for tx credit */
- unsigned int msg_tx_delayed:1;
- /* queued for RX buffer */
- unsigned int msg_rx_delayed:1;
- /* ready for pending on RX delay list */
- unsigned int msg_rx_ready_delay:1;
-
- unsigned int msg_vmflush:1; /* VM trying to free memory */
- unsigned int msg_target_is_router:1; /* sending to a router */
- unsigned int msg_routing:1; /* being forwarded */
- unsigned int msg_ack:1; /* ack on finalize (PUT) */
- unsigned int msg_sending:1; /* outgoing message */
- unsigned int msg_receiving:1; /* being received */
- unsigned int msg_txcredit:1; /* taken an NI send credit */
- unsigned int msg_peertxcredit:1; /* taken a peer send credit */
- unsigned int msg_rtrcredit:1; /* taken a global router credit */
- unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */
- unsigned int msg_onactivelist:1; /* on the activelist */
- unsigned int msg_rdma_get:1;
-
- struct lnet_peer *msg_txpeer; /* peer I'm sending to */
- struct lnet_peer *msg_rxpeer; /* peer I received from */
-
- void *msg_private;
- struct lnet_libmd *msg_md;
-
- unsigned int msg_len;
- unsigned int msg_wanted;
- unsigned int msg_offset;
- unsigned int msg_niov;
- struct kvec *msg_iov;
- struct bio_vec *msg_kiov;
-
- struct lnet_event msg_ev;
- struct lnet_hdr msg_hdr;
-};
-
-struct lnet_libhandle {
- struct list_head lh_hash_chain;
- __u64 lh_cookie;
-};
-
-#define lh_entry(ptr, type, member) \
- ((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
-
-struct lnet_eq {
- struct list_head eq_list;
- struct lnet_libhandle eq_lh;
- unsigned long eq_enq_seq;
- unsigned long eq_deq_seq;
- unsigned int eq_size;
- lnet_eq_handler_t eq_callback;
- struct lnet_event *eq_events;
- int **eq_refs; /* percpt refcount for EQ */
-};
-
-struct lnet_me {
- struct list_head me_list;
- struct lnet_libhandle me_lh;
- struct lnet_process_id me_match_id;
- unsigned int me_portal;
- unsigned int me_pos; /* hash offset in mt_hash */
- __u64 me_match_bits;
- __u64 me_ignore_bits;
- enum lnet_unlink me_unlink;
- struct lnet_libmd *me_md;
-};
-
-struct lnet_libmd {
- struct list_head md_list;
- struct lnet_libhandle md_lh;
- struct lnet_me *md_me;
- char *md_start;
- unsigned int md_offset;
- unsigned int md_length;
- unsigned int md_max_size;
- int md_threshold;
- int md_refcount;
- unsigned int md_options;
- unsigned int md_flags;
- void *md_user_ptr;
- struct lnet_eq *md_eq;
- unsigned int md_niov; /* # frags */
- union {
- struct kvec iov[LNET_MAX_IOV];
- struct bio_vec kiov[LNET_MAX_IOV];
- } md_iov;
-};
-
-#define LNET_MD_FLAG_ZOMBIE BIT(0)
-#define LNET_MD_FLAG_AUTO_UNLINK BIT(1)
-#define LNET_MD_FLAG_ABORTED BIT(2)
-
-struct lnet_test_peer {
- /* info about peers we are trying to fail */
- struct list_head tp_list; /* ln_test_peers */
- lnet_nid_t tp_nid; /* matching nid */
- unsigned int tp_threshold; /* # failures to simulate */
-};
-
-#define LNET_COOKIE_TYPE_MD 1
-#define LNET_COOKIE_TYPE_ME 2
-#define LNET_COOKIE_TYPE_EQ 3
-#define LNET_COOKIE_TYPE_BITS 2
-#define LNET_COOKIE_MASK ((1ULL << LNET_COOKIE_TYPE_BITS) - 1ULL)
-
-struct lnet_ni; /* forward ref */
-
-struct lnet_lnd {
- /* fields managed by portals */
- struct list_head lnd_list; /* stash in the LND table */
- int lnd_refcount; /* # active instances */
-
- /* fields initialised by the LND */
- __u32 lnd_type;
-
- int (*lnd_startup)(struct lnet_ni *ni);
- void (*lnd_shutdown)(struct lnet_ni *ni);
- int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
-
- /*
- * In data movement APIs below, payload buffers are described as a set
- * of 'niov' fragments which are...
- * EITHER
- * in virtual memory (struct iovec *iov != NULL)
- * OR
- * in pages (kernel only: plt_kiov_t *kiov != NULL).
- * The LND may NOT overwrite these fragment descriptors.
- * An 'offset' and may specify a byte offset within the set of
- * fragments to start from
- */
-
- /*
- * Start sending a preformatted message. 'private' is NULL for PUT and
- * GET messages; otherwise this is a response to an incoming message
- * and 'private' is the 'private' passed to lnet_parse(). Return
- * non-zero for immediate failure, otherwise complete later with
- * lnet_finalize()
- */
- int (*lnd_send)(struct lnet_ni *ni, void *private,
- struct lnet_msg *msg);
-
- /*
- * Start receiving 'mlen' bytes of payload data, skipping the following
- * 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to
- * lnet_parse(). Return non-zero for immediate failure, otherwise
- * complete later with lnet_finalize(). This also gives back a receive
- * credit if the LND does flow control.
- */
- int (*lnd_recv)(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
- int delayed, struct iov_iter *to, unsigned int rlen);
-
- /*
- * lnet_parse() has had to delay processing of this message
- * (e.g. waiting for a forwarding buffer or send credits). Give the
- * LND a chance to free urgently needed resources. If called, return 0
- * for success and do NOT give back a receive credit; that has to wait
- * until lnd_recv() gets called. On failure return < 0 and
- * release resources; lnd_recv() will not be called.
- */
- int (*lnd_eager_recv)(struct lnet_ni *ni, void *private,
- struct lnet_msg *msg, void **new_privatep);
-
- /* notification of peer health */
- void (*lnd_notify)(struct lnet_ni *ni, lnet_nid_t peer, int alive);
-
- /* query of peer aliveness */
- void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer,
- unsigned long *when);
-
- /* accept a new connection */
- int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
-};
-
-struct lnet_tx_queue {
- int tq_credits; /* # tx credits free */
- int tq_credits_min; /* lowest it's been */
- int tq_credits_max; /* total # tx credits */
- struct list_head tq_delayed; /* delayed TXs */
-};
-
-struct lnet_ni {
- spinlock_t ni_lock;
- struct list_head ni_list; /* chain on ln_nis */
- struct list_head ni_cptlist; /* chain on ln_nis_cpt */
- int ni_maxtxcredits; /* # tx credits */
- /* # per-peer send credits */
- int ni_peertxcredits;
- /* # per-peer router buffer credits */
- int ni_peerrtrcredits;
- /* seconds to consider peer dead */
- int ni_peertimeout;
- int ni_ncpts; /* number of CPTs */
- __u32 *ni_cpts; /* bond NI on some CPTs */
- lnet_nid_t ni_nid; /* interface's NID */
- void *ni_data; /* instance-specific data */
- struct lnet_lnd *ni_lnd; /* procedural interface */
- struct lnet_tx_queue **ni_tx_queues; /* percpt TX queues */
- int **ni_refs; /* percpt reference count */
- time64_t ni_last_alive;/* when I was last alive */
- struct lnet_ni_status *ni_status; /* my health status */
- /* per NI LND tunables */
- struct lnet_ioctl_config_lnd_tunables *ni_lnd_tunables;
- /* equivalent interfaces to use */
- char *ni_interfaces[LNET_MAX_INTERFACES];
- /* original net namespace */
- struct net *ni_net_ns;
-};
-
-#define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL
-
-/*
- * NB: value of these features equal to LNET_PROTO_PING_VERSION_x
- * of old LNet, so there shouldn't be any compatibility issue
- */
-#define LNET_PING_FEAT_INVAL (0) /* no feature */
-#define LNET_PING_FEAT_BASE BIT(0) /* just a ping */
-#define LNET_PING_FEAT_NI_STATUS BIT(1) /* return NI status */
-#define LNET_PING_FEAT_RTE_DISABLED BIT(2) /* Routing enabled */
-
-#define LNET_PING_FEAT_MASK (LNET_PING_FEAT_BASE | \
- LNET_PING_FEAT_NI_STATUS)
-
-/* router checker data, per router */
-#define LNET_MAX_RTR_NIS 16
-#define LNET_PINGINFO_SIZE offsetof(struct lnet_ping_info, pi_ni[LNET_MAX_RTR_NIS])
-struct lnet_rc_data {
- /* chain on the_lnet.ln_zombie_rcd or ln_deathrow_rcd */
- struct list_head rcd_list;
- struct lnet_handle_md rcd_mdh; /* ping buffer MD */
- struct lnet_peer *rcd_gateway; /* reference to gateway */
- struct lnet_ping_info *rcd_pinginfo; /* ping buffer */
-};
-
-struct lnet_peer {
- struct list_head lp_hashlist; /* chain on peer hash */
- struct list_head lp_txq; /* messages blocking for
- * tx credits
- */
- struct list_head lp_rtrq; /* messages blocking for
- * router credits
- */
- struct list_head lp_rtr_list; /* chain on router list */
- int lp_txcredits; /* # tx credits available */
- int lp_mintxcredits; /* low water mark */
- int lp_rtrcredits; /* # router credits */
- int lp_minrtrcredits; /* low water mark */
- unsigned int lp_alive:1; /* alive/dead? */
- unsigned int lp_notify:1; /* notification outstanding? */
- unsigned int lp_notifylnd:1;/* outstanding notification
- * for LND?
- */
- unsigned int lp_notifying:1; /* some thread is handling
- * notification
- */
- unsigned int lp_ping_notsent;/* SEND event outstanding
- * from ping
- */
- int lp_alive_count; /* # times router went
- * dead<->alive
- */
- long lp_txqnob; /* ytes queued for sending */
- unsigned long lp_timestamp; /* time of last aliveness
- * news
- */
- unsigned long lp_ping_timestamp;/* time of last ping
- * attempt
- */
- unsigned long lp_ping_deadline; /* != 0 if ping reply
- * expected
- */
- unsigned long lp_last_alive; /* when I was last alive */
- unsigned long lp_last_query; /* when lp_ni was queried
- * last time
- */
- struct lnet_ni *lp_ni; /* interface peer is on */
- lnet_nid_t lp_nid; /* peer's NID */
- int lp_refcount; /* # refs */
- int lp_cpt; /* CPT this peer attached on */
- /* # refs from lnet_route::lr_gateway */
- int lp_rtr_refcount;
- /* returned RC ping features */
- unsigned int lp_ping_feats;
- struct list_head lp_routes; /* routers on this peer */
- struct lnet_rc_data *lp_rcd; /* router checker state */
-};
-
-/* peer hash size */
-#define LNET_PEER_HASH_BITS 9
-#define LNET_PEER_HASH_SIZE (1 << LNET_PEER_HASH_BITS)
-
-/* peer hash table */
-struct lnet_peer_table {
- int pt_version; /* /proc validity stamp */
- int pt_number; /* # peers extant */
- /* # zombies to go to deathrow (and not there yet) */
- int pt_zombies;
- struct list_head pt_deathrow; /* zombie peers */
- struct list_head *pt_hash; /* NID->peer hash */
-};
-
-/*
- * peer aliveness is enabled only on routers for peers in a network where the
- * lnet_ni::ni_peertimeout has been set to a positive value
- */
-#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing && \
- (lp)->lp_ni->ni_peertimeout > 0)
-
-struct lnet_route {
- struct list_head lr_list; /* chain on net */
- struct list_head lr_gwlist; /* chain on gateway */
- struct lnet_peer *lr_gateway; /* router node */
- __u32 lr_net; /* remote network number */
- int lr_seq; /* sequence for round-robin */
- unsigned int lr_downis; /* number of down NIs */
- __u32 lr_hops; /* how far I am */
- unsigned int lr_priority; /* route priority */
-};
-
-#define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7)
-#define LNET_REMOTE_NETS_HASH_MAX (1U << 16)
-#define LNET_REMOTE_NETS_HASH_SIZE (1 << the_lnet.ln_remote_nets_hbits)
-
-struct lnet_remotenet {
- struct list_head lrn_list; /* chain on
- * ln_remote_nets_hash
- */
- struct list_head lrn_routes; /* routes to me */
- __u32 lrn_net; /* my net number */
-};
-
-/** lnet message has credit and can be submitted to lnd for send/receive */
-#define LNET_CREDIT_OK 0
-/** lnet message is waiting for credit */
-#define LNET_CREDIT_WAIT 1
-
-struct lnet_rtrbufpool {
- struct list_head rbp_bufs; /* my free buffer pool */
- struct list_head rbp_msgs; /* messages blocking
- * for a buffer
- */
- int rbp_npages; /* # pages in each buffer */
- /* requested number of buffers */
- int rbp_req_nbuffers;
- /* # buffers actually allocated */
- int rbp_nbuffers;
- int rbp_credits; /* # free buffers
- * blocked messages
- */
- int rbp_mincredits; /* low water mark */
-};
-
-struct lnet_rtrbuf {
- struct list_head rb_list; /* chain on rbp_bufs */
- struct lnet_rtrbufpool *rb_pool; /* owning pool */
- struct bio_vec rb_kiov[0]; /* the buffer space */
-};
-
-#define LNET_PEER_HASHSIZE 503 /* prime! */
-
-#define LNET_TINY_BUF_IDX 0
-#define LNET_SMALL_BUF_IDX 1
-#define LNET_LARGE_BUF_IDX 2
-
-/* # different router buffer pools */
-#define LNET_NRBPOOLS (LNET_LARGE_BUF_IDX + 1)
-
-enum lnet_match_flags {
- /* Didn't match anything */
- LNET_MATCHMD_NONE = BIT(0),
- /* Matched OK */
- LNET_MATCHMD_OK = BIT(1),
- /* Must be discarded */
- LNET_MATCHMD_DROP = BIT(2),
- /* match and buffer is exhausted */
- LNET_MATCHMD_EXHAUSTED = BIT(3),
- /* match or drop */
- LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
-};
-
-/* Options for lnet_portal::ptl_options */
-#define LNET_PTL_LAZY BIT(0)
-#define LNET_PTL_MATCH_UNIQUE BIT(1) /* unique match, for RDMA */
-#define LNET_PTL_MATCH_WILDCARD BIT(2) /* wildcard match, request portal */
-
-/* parameter for matching operations (GET, PUT) */
-struct lnet_match_info {
- __u64 mi_mbits;
- struct lnet_process_id mi_id;
- unsigned int mi_opc;
- unsigned int mi_portal;
- unsigned int mi_rlength;
- unsigned int mi_roffset;
-};
-
-/* ME hash of RDMA portal */
-#define LNET_MT_HASH_BITS 8
-#define LNET_MT_HASH_SIZE (1 << LNET_MT_HASH_BITS)
-#define LNET_MT_HASH_MASK (LNET_MT_HASH_SIZE - 1)
-/*
- * we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash,
- * the last entry is reserved for MEs with ignore-bits
- */
-#define LNET_MT_HASH_IGNORE LNET_MT_HASH_SIZE
-/*
- * __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which
- * is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the
- * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE]
- */
-#define LNET_MT_BITS_U64 6 /* 2^6 bits */
-#define LNET_MT_EXHAUSTED_BITS (LNET_MT_HASH_BITS - LNET_MT_BITS_U64)
-#define LNET_MT_EXHAUSTED_BMAP ((1 << LNET_MT_EXHAUSTED_BITS) + 1)
-
-/* portal match table */
-struct lnet_match_table {
- /* reserved for upcoming patches, CPU partition ID */
- unsigned int mt_cpt;
- unsigned int mt_portal; /* portal index */
- /*
- * match table is set as "enabled" if there's non-exhausted MD
- * attached on mt_mhash, it's only valid for wildcard portal
- */
- unsigned int mt_enabled;
- /* bitmap to flag whether MEs on mt_hash are exhausted or not */
- __u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP];
- struct list_head *mt_mhash; /* matching hash */
-};
-
-/* these are only useful for wildcard portal */
-/* Turn off message rotor for wildcard portals */
-#define LNET_PTL_ROTOR_OFF 0
-/* round-robin dispatch all PUT messages for wildcard portals */
-#define LNET_PTL_ROTOR_ON 1
-/* round-robin dispatch routed PUT message for wildcard portals */
-#define LNET_PTL_ROTOR_RR_RT 2
-/* dispatch routed PUT message by hashing source NID for wildcard portals */
-#define LNET_PTL_ROTOR_HASH_RT 3
-
-struct lnet_portal {
- spinlock_t ptl_lock;
- unsigned int ptl_index; /* portal ID, reserved */
- /* flags on this portal: lazy, unique... */
- unsigned int ptl_options;
- /* list of messages which are stealing buffer */
- struct list_head ptl_msg_stealing;
- /* messages blocking for MD */
- struct list_head ptl_msg_delayed;
- /* Match table for each CPT */
- struct lnet_match_table **ptl_mtables;
- /* spread rotor of incoming "PUT" */
- unsigned int ptl_rotor;
- /* # active entries for this portal */
- int ptl_mt_nmaps;
- /* array of active entries' cpu-partition-id */
- int ptl_mt_maps[0];
-};
-
-#define LNET_LH_HASH_BITS 12
-#define LNET_LH_HASH_SIZE (1ULL << LNET_LH_HASH_BITS)
-#define LNET_LH_HASH_MASK (LNET_LH_HASH_SIZE - 1)
-
-/* resource container (ME, MD, EQ) */
-struct lnet_res_container {
- unsigned int rec_type; /* container type */
- __u64 rec_lh_cookie; /* cookie generator */
- struct list_head rec_active; /* active resource list */
- struct list_head *rec_lh_hash; /* handle hash */
-};
-
-/* message container */
-struct lnet_msg_container {
- int msc_init; /* initialized or not */
- /* max # threads finalizing */
- int msc_nfinalizers;
- /* msgs waiting to complete finalizing */
- struct list_head msc_finalizing;
- struct list_head msc_active; /* active message list */
- /* threads doing finalization */
- void **msc_finalizers;
-};
-
-/* Router Checker states */
-#define LNET_RC_STATE_SHUTDOWN 0 /* not started */
-#define LNET_RC_STATE_RUNNING 1 /* started up OK */
-#define LNET_RC_STATE_STOPPING 2 /* telling thread to stop */
-
-struct lnet {
- /* CPU partition table of LNet */
- struct cfs_cpt_table *ln_cpt_table;
- /* number of CPTs in ln_cpt_table */
- unsigned int ln_cpt_number;
- unsigned int ln_cpt_bits;
-
- /* protect LNet resources (ME/MD/EQ) */
- struct cfs_percpt_lock *ln_res_lock;
- /* # portals */
- int ln_nportals;
- /* the vector of portals */
- struct lnet_portal **ln_portals;
- /* percpt ME containers */
- struct lnet_res_container **ln_me_containers;
- /* percpt MD container */
- struct lnet_res_container **ln_md_containers;
-
- /* Event Queue container */
- struct lnet_res_container ln_eq_container;
- wait_queue_head_t ln_eq_waitq;
- spinlock_t ln_eq_wait_lock;
- unsigned int ln_remote_nets_hbits;
-
- /* protect NI, peer table, credits, routers, rtrbuf... */
- struct cfs_percpt_lock *ln_net_lock;
- /* percpt message containers for active/finalizing/freed message */
- struct lnet_msg_container **ln_msg_containers;
- struct lnet_counters **ln_counters;
- struct lnet_peer_table **ln_peer_tables;
- /* failure simulation */
- struct list_head ln_test_peers;
- struct list_head ln_drop_rules;
- struct list_head ln_delay_rules;
-
- struct list_head ln_nis; /* LND instances */
- /* NIs bond on specific CPT(s) */
- struct list_head ln_nis_cpt;
- /* dying LND instances */
- struct list_head ln_nis_zombie;
- struct lnet_ni *ln_loni; /* the loopback NI */
-
- /* remote networks with routes to them */
- struct list_head *ln_remote_nets_hash;
- /* validity stamp */
- __u64 ln_remote_nets_version;
- /* list of all known routers */
- struct list_head ln_routers;
- /* validity stamp */
- __u64 ln_routers_version;
- /* percpt router buffer pools */
- struct lnet_rtrbufpool **ln_rtrpools;
-
- struct lnet_handle_md ln_ping_target_md;
- struct lnet_handle_eq ln_ping_target_eq;
- struct lnet_ping_info *ln_ping_info;
-
- /* router checker startup/shutdown state */
- int ln_rc_state;
- /* router checker's event queue */
- struct lnet_handle_eq ln_rc_eqh;
- /* rcd still pending on net */
- struct list_head ln_rcd_deathrow;
- /* rcd ready for free */
- struct list_head ln_rcd_zombie;
- /* serialise startup/shutdown */
- struct completion ln_rc_signal;
-
- struct mutex ln_api_mutex;
- struct mutex ln_lnd_mutex;
- struct mutex ln_delay_mutex;
- /* Have I called LNetNIInit myself? */
- int ln_niinit_self;
- /* LNetNIInit/LNetNIFini counter */
- int ln_refcount;
- /* shutdown in progress */
- int ln_shutdown;
-
- int ln_routing; /* am I a router? */
- lnet_pid_t ln_pid; /* requested pid */
- /* uniquely identifies this ni in this epoch */
- __u64 ln_interface_cookie;
- /* registered LNDs */
- struct list_head ln_lnds;
-
- /* test protocol compatibility flags */
- int ln_testprotocompat;
-
- /*
- * 0 - load the NIs from the mod params
- * 1 - do not load the NIs from the mod params
- * Reverse logic to ensure that other calls to LNetNIInit
- * need no change
- */
- bool ln_nis_from_mod_params;
-
- /*
- * waitq for router checker. As long as there are no routes in
- * the list, the router checker will sleep on this queue. when
- * routes are added the thread will wake up
- */
- wait_queue_head_t ln_rc_waitq;
-
-};
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h
deleted file mode 100644
index 6bd1bca190a3..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/socklnd.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012 - 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- *
- * lnet/include/lnet/socklnd.h
- */
-#ifndef __LNET_LNET_SOCKLND_H__
-#define __LNET_LNET_SOCKLND_H__
-
-#include <uapi/linux/lnet/lnet-types.h>
-#include <uapi/linux/lnet/socklnd.h>
-
-struct ksock_hello_msg {
- __u32 kshm_magic; /* magic number of socklnd message */
- __u32 kshm_version; /* version of socklnd message */
- lnet_nid_t kshm_src_nid; /* sender's nid */
- lnet_nid_t kshm_dst_nid; /* destination nid */
- lnet_pid_t kshm_src_pid; /* sender's pid */
- lnet_pid_t kshm_dst_pid; /* destination pid */
- __u64 kshm_src_incarnation; /* sender's incarnation */
- __u64 kshm_dst_incarnation; /* destination's incarnation */
- __u32 kshm_ctype; /* connection type */
- __u32 kshm_nips; /* # IP addrs */
- __u32 kshm_ips[0]; /* IP addrs */
-} WIRE_ATTR;
-
-struct ksock_lnet_msg {
- struct lnet_hdr ksnm_hdr; /* lnet hdr */
-
- /*
- * ksnm_payload is removed because of winnt compiler's limitation:
- * zero-sized array can only be placed at the tail of [nested]
- * structure definitions. lnet payload will be stored just after
- * the body of structure ksock_lnet_msg_t
- */
-} WIRE_ATTR;
-
-struct ksock_msg {
- __u32 ksm_type; /* type of socklnd message */
- __u32 ksm_csum; /* checksum if != 0 */
- __u64 ksm_zc_cookies[2]; /* Zero-Copy request/ACK cookie */
- union {
- struct ksock_lnet_msg lnetmsg; /* lnet message, it's empty if
- * it's NOOP
- */
- } WIRE_ATTR ksm_u;
-} WIRE_ATTR;
-
-#define KSOCK_MSG_NOOP 0xC0 /* ksm_u empty */
-#define KSOCK_MSG_LNET 0xC1 /* lnet msg */
-
-/*
- * We need to know this number to parse hello msg from ksocklnd in
- * other LND (usocklnd, for example)
- */
-#define KSOCK_PROTO_V2 2
-#define KSOCK_PROTO_V3 3
-
-#endif
diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_debug.h b/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_debug.h
deleted file mode 100644
index c4d9472b374f..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_debug.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2014, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_debug.h
- *
- * Debug messages and assertions
- *
- */
-
-#ifndef __UAPI_LIBCFS_DEBUG_H__
-#define __UAPI_LIBCFS_DEBUG_H__
-
-/**
- * Format for debug message headers
- */
-struct ptldebug_header {
- __u32 ph_len;
- __u32 ph_flags;
- __u32 ph_subsys;
- __u32 ph_mask;
- __u16 ph_cpu_id;
- __u16 ph_type;
- /* time_t overflow in 2106 */
- __u32 ph_sec;
- __u64 ph_usec;
- __u32 ph_stack;
- __u32 ph_pid;
- __u32 ph_extern_pid;
- __u32 ph_line_num;
-} __attribute__((packed));
-
-#define PH_FLAG_FIRST_RECORD 1
-
-/* Debugging subsystems (32 bits, non-overlapping) */
-#define S_UNDEFINED 0x00000001
-#define S_MDC 0x00000002
-#define S_MDS 0x00000004
-#define S_OSC 0x00000008
-#define S_OST 0x00000010
-#define S_CLASS 0x00000020
-#define S_LOG 0x00000040
-#define S_LLITE 0x00000080
-#define S_RPC 0x00000100
-#define S_MGMT 0x00000200
-#define S_LNET 0x00000400
-#define S_LND 0x00000800 /* ALL LNDs */
-#define S_PINGER 0x00001000
-#define S_FILTER 0x00002000
-#define S_LIBCFS 0x00004000
-#define S_ECHO 0x00008000
-#define S_LDLM 0x00010000
-#define S_LOV 0x00020000
-#define S_LQUOTA 0x00040000
-#define S_OSD 0x00080000
-#define S_LFSCK 0x00100000
-#define S_SNAPSHOT 0x00200000
-/* unused */
-#define S_LMV 0x00800000 /* b_new_cmd */
-/* unused */
-#define S_SEC 0x02000000 /* upcall cache */
-#define S_GSS 0x04000000 /* b_new_cmd */
-/* unused */
-#define S_MGC 0x10000000
-#define S_MGS 0x20000000
-#define S_FID 0x40000000 /* b_new_cmd */
-#define S_FLD 0x80000000 /* b_new_cmd */
-
-#define LIBCFS_DEBUG_SUBSYS_NAMES { \
- "undefined", "mdc", "mds", "osc", "ost", "class", "log", \
- "llite", "rpc", "mgmt", "lnet", "lnd", "pinger", "filter", \
- "libcfs", "echo", "ldlm", "lov", "lquota", "osd", "lfsck", \
- "snapshot", "", "lmv", "", "sec", "gss", "", "mgc", "mgs", \
- "fid", "fld", NULL }
-
-/* Debugging masks (32 bits, non-overlapping) */
-#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
-#define D_INODE 0x00000002
-#define D_SUPER 0x00000004
-#define D_EXT2 0x00000008 /* anything from ext2_debug */
-#define D_MALLOC 0x00000010 /* print malloc, free information */
-#define D_CACHE 0x00000020 /* cache-related items */
-#define D_INFO 0x00000040 /* general information */
-#define D_IOCTL 0x00000080 /* ioctl related information */
-#define D_NETERROR 0x00000100 /* network errors */
-#define D_NET 0x00000200 /* network communications */
-#define D_WARNING 0x00000400 /* CWARN(...) == CDEBUG (D_WARNING, ...) */
-#define D_BUFFS 0x00000800
-#define D_OTHER 0x00001000
-#define D_DENTRY 0x00002000
-#define D_NETTRACE 0x00004000
-#define D_PAGE 0x00008000 /* bulk page handling */
-#define D_DLMTRACE 0x00010000
-#define D_ERROR 0x00020000 /* CERROR(...) == CDEBUG (D_ERROR, ...) */
-#define D_EMERG 0x00040000 /* CEMERG(...) == CDEBUG (D_EMERG, ...) */
-#define D_HA 0x00080000 /* recovery and failover */
-#define D_RPCTRACE 0x00100000 /* for distributed debugging */
-#define D_VFSTRACE 0x00200000
-#define D_READA 0x00400000 /* read-ahead */
-#define D_MMAP 0x00800000
-#define D_CONFIG 0x01000000
-#define D_CONSOLE 0x02000000
-#define D_QUOTA 0x04000000
-#define D_SEC 0x08000000
-#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
-#define D_HSM 0x20000000
-#define D_SNAPSHOT 0x40000000 /* snapshot */
-#define D_LAYOUT 0x80000000
-
-#define LIBCFS_DEBUG_MASKS_NAMES { \
- "trace", "inode", "super", "ext2", "malloc", "cache", "info", \
- "ioctl", "neterror", "net", "warning", "buffs", "other", \
- "dentry", "nettrace", "page", "dlmtrace", "error", "emerg", \
- "ha", "rpctrace", "vfstrace", "reada", "mmap", "config", \
- "console", "quota", "sec", "lfsck", "hsm", "snapshot", "layout",\
- NULL }
-
-#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE)
-
-#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log"
-
-#endif /* __UAPI_LIBCFS_DEBUG_H__ */
diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_ioctl.h b/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_ioctl.h
deleted file mode 100644
index cce6b58e3682..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_ioctl.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_ioctl.h
- *
- * Low-level ioctl data structures. Kernel ioctl functions declared here,
- * and user space functions are in libcfs/util/ioctl.h.
- *
- */
-
-#ifndef __LIBCFS_IOCTL_H__
-#define __LIBCFS_IOCTL_H__
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define LIBCFS_IOCTL_VERSION 0x0001000a
-#define LIBCFS_IOCTL_VERSION2 0x0001000b
-
-struct libcfs_ioctl_hdr {
- __u32 ioc_len;
- __u32 ioc_version;
-};
-
-/** max size to copy from userspace */
-#define LIBCFS_IOC_DATA_MAX (128 * 1024)
-
-struct libcfs_ioctl_data {
- struct libcfs_ioctl_hdr ioc_hdr;
-
- __u64 ioc_nid;
- __u64 ioc_u64[1];
-
- __u32 ioc_flags;
- __u32 ioc_count;
- __u32 ioc_net;
- __u32 ioc_u32[7];
-
- __u32 ioc_inllen1;
- char *ioc_inlbuf1;
- __u32 ioc_inllen2;
- char *ioc_inlbuf2;
-
- __u32 ioc_plen1; /* buffers in userspace */
- void __user *ioc_pbuf1;
- __u32 ioc_plen2; /* buffers in userspace */
- void __user *ioc_pbuf2;
-
- char ioc_bulk[0];
-};
-
-struct libcfs_debug_ioctl_data {
- struct libcfs_ioctl_hdr hdr;
- unsigned int subs;
- unsigned int debug;
-};
-
-/* 'f' ioctls are defined in lustre_ioctl.h and lustre_user.h except for: */
-#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long)
-#define IOCTL_LIBCFS_TYPE long
-
-#define IOC_LIBCFS_TYPE ('e')
-#define IOC_LIBCFS_MIN_NR 30
-/* libcfs ioctls */
-/* IOC_LIBCFS_PANIC obsolete in 2.8.0, was _IOWR('e', 30, IOCTL_LIBCFS_TYPE) */
-#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, IOCTL_LIBCFS_TYPE)
-/* IOC_LIBCFS_MEMHOG obsolete in 2.8.0, was _IOWR('e', 36, IOCTL_LIBCFS_TYPE) */
-/* lnet ioctls */
-#define IOC_LIBCFS_GET_NI _IOWR('e', 50, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, IOCTL_LIBCFS_TYPE)
-/* IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, IOCTL_LIBCFS_TYPE) */
-#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_PING _IOWR('e', 61, IOCTL_LIBCFS_TYPE)
-/* IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, IOCTL_LIBCFS_TYPE) */
-#define IOC_LIBCFS_LNETST _IOWR('e', 63, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, IOCTL_LIBCFS_TYPE)
-/* lnd ioctls */
-#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, IOCTL_LIBCFS_TYPE)
-/* ioctl 77 is free for use */
-#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, IOCTL_LIBCFS_TYPE)
-
-/*
- * DLC Specific IOCTL numbers.
- * In order to maintain backward compatibility with any possible external
- * tools which might be accessing the IOCTL numbers, a new group of IOCTL
- * number have been allocated.
- */
-#define IOCTL_CONFIG_SIZE struct lnet_ioctl_config_data
-#define IOC_LIBCFS_ADD_ROUTE _IOWR(IOC_LIBCFS_TYPE, 81, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_DEL_ROUTE _IOWR(IOC_LIBCFS_TYPE, 82, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_GET_ROUTE _IOWR(IOC_LIBCFS_TYPE, 83, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_ADD_NET _IOWR(IOC_LIBCFS_TYPE, 84, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_DEL_NET _IOWR(IOC_LIBCFS_TYPE, 85, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_GET_NET _IOWR(IOC_LIBCFS_TYPE, 86, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_CONFIG_RTR _IOWR(IOC_LIBCFS_TYPE, 87, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_ADD_BUF _IOWR(IOC_LIBCFS_TYPE, 88, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_GET_BUF _IOWR(IOC_LIBCFS_TYPE, 89, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_GET_PEER_INFO _IOWR(IOC_LIBCFS_TYPE, 90, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_GET_LNET_STATS _IOWR(IOC_LIBCFS_TYPE, 91, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_MAX_NR 91
-
-#endif /* __LIBCFS_IOCTL_H__ */
diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/lnet-dlc.h b/drivers/staging/lustre/include/uapi/linux/lnet/lnet-dlc.h
deleted file mode 100644
index e45d828bfd1b..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lnet/lnet-dlc.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * LGPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library.
- *
- * LGPL HEADER END
- *
- */
-/*
- * Copyright (c) 2014, Intel Corporation.
- */
-/*
- * Author: Amir Shehata <amir.shehata@intel.com>
- */
-
-#ifndef LNET_DLC_H
-#define LNET_DLC_H
-
-#include <uapi/linux/lnet/libcfs_ioctl.h>
-#include <uapi/linux/lnet/lnet-types.h>
-
-#define MAX_NUM_SHOW_ENTRIES 32
-#define LNET_MAX_STR_LEN 128
-#define LNET_MAX_SHOW_NUM_CPT 128
-#define LNET_UNDEFINED_HOPS ((__u32)(-1))
-
-struct lnet_ioctl_config_lnd_cmn_tunables {
- __u32 lct_version;
- __u32 lct_peer_timeout;
- __u32 lct_peer_tx_credits;
- __u32 lct_peer_rtr_credits;
- __u32 lct_max_tx_credits;
-};
-
-struct lnet_ioctl_config_o2iblnd_tunables {
- __u32 lnd_version;
- __u32 lnd_peercredits_hiw;
- __u32 lnd_map_on_demand;
- __u32 lnd_concurrent_sends;
- __u32 lnd_fmr_pool_size;
- __u32 lnd_fmr_flush_trigger;
- __u32 lnd_fmr_cache;
- __u32 pad;
-};
-
-struct lnet_ioctl_config_lnd_tunables {
- struct lnet_ioctl_config_lnd_cmn_tunables lt_cmn;
- union {
- struct lnet_ioctl_config_o2iblnd_tunables lt_o2ib;
- } lt_tun_u;
-};
-
-struct lnet_ioctl_net_config {
- char ni_interfaces[LNET_MAX_INTERFACES][LNET_MAX_STR_LEN];
- __u32 ni_status;
- __u32 ni_cpts[LNET_MAX_SHOW_NUM_CPT];
- char cfg_bulk[0];
-};
-
-#define LNET_TINY_BUF_IDX 0
-#define LNET_SMALL_BUF_IDX 1
-#define LNET_LARGE_BUF_IDX 2
-
-/* # different router buffer pools */
-#define LNET_NRBPOOLS (LNET_LARGE_BUF_IDX + 1)
-
-struct lnet_ioctl_pool_cfg {
- struct {
- __u32 pl_npages;
- __u32 pl_nbuffers;
- __u32 pl_credits;
- __u32 pl_mincredits;
- } pl_pools[LNET_NRBPOOLS];
- __u32 pl_routing;
-};
-
-struct lnet_ioctl_config_data {
- struct libcfs_ioctl_hdr cfg_hdr;
-
- __u32 cfg_net;
- __u32 cfg_count;
- __u64 cfg_nid;
- __u32 cfg_ncpts;
-
- union {
- struct {
- __u32 rtr_hop;
- __u32 rtr_priority;
- __u32 rtr_flags;
- } cfg_route;
- struct {
- char net_intf[LNET_MAX_STR_LEN];
- __s32 net_peer_timeout;
- __s32 net_peer_tx_credits;
- __s32 net_peer_rtr_credits;
- __s32 net_max_tx_credits;
- __u32 net_cksum_algo;
- __u32 net_interface_count;
- } cfg_net;
- struct {
- __u32 buf_enable;
- __s32 buf_tiny;
- __s32 buf_small;
- __s32 buf_large;
- } cfg_buffers;
- } cfg_config_u;
-
- char cfg_bulk[0];
-};
-
-struct lnet_ioctl_peer {
- struct libcfs_ioctl_hdr pr_hdr;
- __u32 pr_count;
- __u32 pr_pad;
- __u64 pr_nid;
-
- union {
- struct {
- char cr_aliveness[LNET_MAX_STR_LEN];
- __u32 cr_refcount;
- __u32 cr_ni_peer_tx_credits;
- __u32 cr_peer_tx_credits;
- __u32 cr_peer_rtr_credits;
- __u32 cr_peer_min_rtr_credits;
- __u32 cr_peer_tx_qnob;
- __u32 cr_ncpt;
- } pr_peer_credits;
- } pr_lnd_u;
-};
-
-struct lnet_ioctl_lnet_stats {
- struct libcfs_ioctl_hdr st_hdr;
- struct lnet_counters st_cntrs;
-};
-
-#endif /* LNET_DLC_H */
diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/lnet-types.h b/drivers/staging/lustre/include/uapi/linux/lnet/lnet-types.h
deleted file mode 100644
index 1be9b7aa7326..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lnet/lnet-types.h
+++ /dev/null
@@ -1,669 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012 - 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- */
-
-#ifndef __LNET_TYPES_H__
-#define __LNET_TYPES_H__
-
-#include <linux/types.h>
-#include <linux/bvec.h>
-
-/** \addtogroup lnet
- * @{
- */
-
-#define LNET_VERSION "0.6.0"
-
-/** \addtogroup lnet_addr
- * @{
- */
-
-/** Portal reserved for LNet's own use.
- * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments.
- */
-#define LNET_RESERVED_PORTAL 0
-
-/**
- * Address of an end-point in an LNet network.
- *
- * A node can have multiple end-points and hence multiple addresses.
- * An LNet network can be a simple network (e.g. tcp0) or a network of
- * LNet networks connected by LNet routers. Therefore an end-point address
- * has two parts: network ID, and address within a network.
- *
- * \see LNET_NIDNET, LNET_NIDADDR, and LNET_MKNID.
- */
-typedef __u64 lnet_nid_t;
-/**
- * ID of a process in a node. Shortened as PID to distinguish from
- * lnet_process_id, the global process ID.
- */
-typedef __u32 lnet_pid_t;
-
-/** wildcard NID that matches any end-point address */
-#define LNET_NID_ANY ((lnet_nid_t)(-1))
-/** wildcard PID that matches any lnet_pid_t */
-#define LNET_PID_ANY ((lnet_pid_t)(-1))
-
-#define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */
-#define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */
-#define LNET_PID_LUSTRE 12345
-
-#define LNET_TIME_FOREVER (-1)
-
-/* how an LNET NID encodes net:address */
-/** extract the address part of an lnet_nid_t */
-
-static inline __u32 LNET_NIDADDR(lnet_nid_t nid)
-{
- return nid & 0xffffffff;
-}
-
-static inline __u32 LNET_NIDNET(lnet_nid_t nid)
-{
- return (nid >> 32) & 0xffffffff;
-}
-
-static inline lnet_nid_t LNET_MKNID(__u32 net, __u32 addr)
-{
- return (((__u64)net) << 32) | addr;
-}
-
-static inline __u32 LNET_NETNUM(__u32 net)
-{
- return net & 0xffff;
-}
-
-static inline __u32 LNET_NETTYP(__u32 net)
-{
- return (net >> 16) & 0xffff;
-}
-
-static inline __u32 LNET_MKNET(__u32 type, __u32 num)
-{
- return (type << 16) | num;
-}
-
-#define WIRE_ATTR __packed
-
-/* Packed version of lnet_process_id to transfer via network */
-struct lnet_process_id_packed {
- /* node id / process id */
- lnet_nid_t nid;
- lnet_pid_t pid;
-} WIRE_ATTR;
-
-/*
- * The wire handle's interface cookie only matches one network interface in
- * one epoch (i.e. new cookie when the interface restarts or the node
- * reboots). The object cookie only matches one object on that interface
- * during that object's lifetime (i.e. no cookie re-use).
- */
-struct lnet_handle_wire {
- __u64 wh_interface_cookie;
- __u64 wh_object_cookie;
-} WIRE_ATTR;
-
-enum lnet_msg_type {
- LNET_MSG_ACK = 0,
- LNET_MSG_PUT,
- LNET_MSG_GET,
- LNET_MSG_REPLY,
- LNET_MSG_HELLO,
-};
-
-/*
- * The variant fields of the portals message header are aligned on an 8
- * byte boundary in the message header. Note that all types used in these
- * wire structs MUST be fixed size and the smaller types are placed at the
- * end.
- */
-struct lnet_ack {
- struct lnet_handle_wire dst_wmd;
- __u64 match_bits;
- __u32 mlength;
-} WIRE_ATTR;
-
-struct lnet_put {
- struct lnet_handle_wire ack_wmd;
- __u64 match_bits;
- __u64 hdr_data;
- __u32 ptl_index;
- __u32 offset;
-} WIRE_ATTR;
-
-struct lnet_get {
- struct lnet_handle_wire return_wmd;
- __u64 match_bits;
- __u32 ptl_index;
- __u32 src_offset;
- __u32 sink_length;
-} WIRE_ATTR;
-
-struct lnet_reply {
- struct lnet_handle_wire dst_wmd;
-} WIRE_ATTR;
-
-struct lnet_hello {
- __u64 incarnation;
- __u32 type;
-} WIRE_ATTR;
-
-struct lnet_hdr {
- lnet_nid_t dest_nid;
- lnet_nid_t src_nid;
- lnet_pid_t dest_pid;
- lnet_pid_t src_pid;
- __u32 type; /* enum lnet_msg_type */
- __u32 payload_length; /* payload data to follow */
- /*<------__u64 aligned------->*/
- union {
- struct lnet_ack ack;
- struct lnet_put put;
- struct lnet_get get;
- struct lnet_reply reply;
- struct lnet_hello hello;
- } msg;
-} WIRE_ATTR;
-
-/*
- * A HELLO message contains a magic number and protocol version
- * code in the header's dest_nid, the peer's NID in the src_nid, and
- * LNET_MSG_HELLO in the type field. All other common fields are zero
- * (including payload_size; i.e. no payload).
- * This is for use by byte-stream LNDs (e.g. TCP/IP) to check the peer is
- * running the same protocol and to find out its NID. These LNDs should
- * exchange HELLO messages when a connection is first established. Individual
- * LNDs can put whatever else they fancy in struct lnet_hdr::msg.
- */
-struct lnet_magicversion {
- __u32 magic; /* LNET_PROTO_TCP_MAGIC */
- __u16 version_major; /* increment on incompatible change */
- __u16 version_minor; /* increment on compatible change */
-} WIRE_ATTR;
-
-/* PROTO MAGIC for LNDs */
-#define LNET_PROTO_IB_MAGIC 0x0be91b91
-#define LNET_PROTO_GNI_MAGIC 0xb00fbabe /* ask Kim */
-#define LNET_PROTO_TCP_MAGIC 0xeebc0ded
-#define LNET_PROTO_ACCEPTOR_MAGIC 0xacce7100
-#define LNET_PROTO_PING_MAGIC 0x70696E67 /* 'ping' */
-
-/* Placeholder for a future "unified" protocol across all LNDs */
-/*
- * Current LNDs that receive a request with this magic will respond with a
- * "stub" reply using their current protocol
- */
-#define LNET_PROTO_MAGIC 0x45726963 /* ! */
-
-#define LNET_PROTO_TCP_VERSION_MAJOR 1
-#define LNET_PROTO_TCP_VERSION_MINOR 0
-
-/* Acceptor connection request */
-struct lnet_acceptor_connreq {
- __u32 acr_magic; /* PTL_ACCEPTOR_PROTO_MAGIC */
- __u32 acr_version; /* protocol version */
- __u64 acr_nid; /* target NID */
-} WIRE_ATTR;
-
-#define LNET_PROTO_ACCEPTOR_VERSION 1
-
-struct lnet_ni_status {
- lnet_nid_t ns_nid;
- __u32 ns_status;
- __u32 ns_unused;
-} WIRE_ATTR;
-
-struct lnet_ping_info {
- __u32 pi_magic;
- __u32 pi_features;
- lnet_pid_t pi_pid;
- __u32 pi_nnis;
- struct lnet_ni_status pi_ni[0];
-} WIRE_ATTR;
-
-struct lnet_counters {
- __u32 msgs_alloc;
- __u32 msgs_max;
- __u32 errors;
- __u32 send_count;
- __u32 recv_count;
- __u32 route_count;
- __u32 drop_count;
- __u64 send_length;
- __u64 recv_length;
- __u64 route_length;
- __u64 drop_length;
-} WIRE_ATTR;
-
-#define LNET_NI_STATUS_UP 0x15aac0de
-#define LNET_NI_STATUS_DOWN 0xdeadface
-#define LNET_NI_STATUS_INVALID 0x00000000
-
-#define LNET_MAX_INTERFACES 16
-
-/**
- * Objects maintained by the LNet are accessed through handles. Handle types
- * have names of the form lnet_handle_xx, where xx is one of the two letter
- * object type codes ('eq' for event queue, 'md' for memory descriptor, and
- * 'me' for match entry). Each type of object is given a unique handle type
- * to enhance type checking.
- */
-#define LNET_WIRE_HANDLE_COOKIE_NONE (-1)
-
-struct lnet_handle_eq {
- u64 cookie;
-};
-
-/**
- * Invalidate eq handle @h.
- */
-static inline void LNetInvalidateEQHandle(struct lnet_handle_eq *h)
-{
- h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE;
-}
-
-/**
- * Check whether eq handle @h is invalid.
- *
- * @return 1 if handle is invalid, 0 if valid.
- */
-static inline int LNetEQHandleIsInvalid(struct lnet_handle_eq h)
-{
- return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie);
-}
-
-struct lnet_handle_md {
- u64 cookie;
-};
-
-/**
- * Invalidate md handle @h.
- */
-static inline void LNetInvalidateMDHandle(struct lnet_handle_md *h)
-{
- h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE;
-}
-
-/**
- * Check whether eq handle @h is invalid.
- *
- * @return 1 if handle is invalid, 0 if valid.
- */
-static inline int LNetMDHandleIsInvalid(struct lnet_handle_md h)
-{
- return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie);
-}
-
-struct lnet_handle_me {
- u64 cookie;
-};
-
-/**
- * Global process ID.
- */
-struct lnet_process_id {
- /** node id */
- lnet_nid_t nid;
- /** process id */
- lnet_pid_t pid;
-};
-/** @} lnet_addr */
-
-/** \addtogroup lnet_me
- * @{
- */
-
-/**
- * Specifies whether the match entry or memory descriptor should be unlinked
- * automatically (LNET_UNLINK) or not (LNET_RETAIN).
- */
-enum lnet_unlink {
- LNET_RETAIN = 0,
- LNET_UNLINK
-};
-
-/**
- * Values of the type lnet_ins_pos are used to control where a new match
- * entry is inserted. The value LNET_INS_BEFORE is used to insert the new
- * entry before the current entry or before the head of the list. The value
- * LNET_INS_AFTER is used to insert the new entry after the current entry
- * or after the last item in the list.
- */
-enum lnet_ins_pos {
- /** insert ME before current position or head of the list */
- LNET_INS_BEFORE,
- /** insert ME after current position or tail of the list */
- LNET_INS_AFTER,
- /** attach ME at tail of local CPU partition ME list */
- LNET_INS_LOCAL
-};
-
-/** @} lnet_me */
-
-/** \addtogroup lnet_md
- * @{
- */
-
-/**
- * Defines the visible parts of a memory descriptor. Values of this type
- * are used to initialize memory descriptors.
- */
-struct lnet_md {
- /**
- * Specify the memory region associated with the memory descriptor.
- * If the options field has:
- * - LNET_MD_KIOV bit set: The start field points to the starting
- * address of an array of struct bio_vec and the length field specifies
- * the number of entries in the array. The length can't be bigger
- * than LNET_MAX_IOV. The struct bio_vec is used to describe page-based
- * fragments that are not necessarily mapped in virtual memory.
- * - LNET_MD_IOVEC bit set: The start field points to the starting
- * address of an array of struct iovec and the length field specifies
- * the number of entries in the array. The length can't be bigger
- * than LNET_MAX_IOV. The struct iovec is used to describe fragments
- * that have virtual addresses.
- * - Otherwise: The memory region is contiguous. The start field
- * specifies the starting address for the memory region and the
- * length field specifies its length.
- *
- * When the memory region is fragmented, all fragments but the first
- * one must start on page boundary, and all but the last must end on
- * page boundary.
- */
- void *start;
- unsigned int length;
- /**
- * Specifies the maximum number of operations that can be performed
- * on the memory descriptor. An operation is any action that could
- * possibly generate an event. In the usual case, the threshold value
- * is decremented for each operation on the MD. When the threshold
- * drops to zero, the MD becomes inactive and does not respond to
- * operations. A threshold value of LNET_MD_THRESH_INF indicates that
- * there is no bound on the number of operations that may be applied
- * to a MD.
- */
- int threshold;
- /**
- * Specifies the largest incoming request that the memory descriptor
- * should respond to. When the unused portion of a MD (length -
- * local offset) falls below this value, the MD becomes inactive and
- * does not respond to further operations. This value is only used
- * if the LNET_MD_MAX_SIZE option is set.
- */
- int max_size;
- /**
- * Specifies the behavior of the memory descriptor. A bitwise OR
- * of the following values can be used:
- * - LNET_MD_OP_PUT: The LNet PUT operation is allowed on this MD.
- * - LNET_MD_OP_GET: The LNet GET operation is allowed on this MD.
- * - LNET_MD_MANAGE_REMOTE: The offset used in accessing the memory
- * region is provided by the incoming request. By default, the
- * offset is maintained locally. When maintained locally, the
- * offset is incremented by the length of the request so that
- * the next operation (PUT or GET) will access the next part of
- * the memory region. Note that only one offset variable exists
- * per memory descriptor. If both PUT and GET operations are
- * performed on a memory descriptor, the offset is updated each time.
- * - LNET_MD_TRUNCATE: The length provided in the incoming request can
- * be reduced to match the memory available in the region (determined
- * by subtracting the offset from the length of the memory region).
- * By default, if the length in the incoming operation is greater
- * than the amount of memory available, the operation is rejected.
- * - LNET_MD_ACK_DISABLE: An acknowledgment should not be sent for
- * incoming PUT operations, even if requested. By default,
- * acknowledgments are sent for PUT operations that request an
- * acknowledgment. Acknowledgments are never sent for GET operations.
- * The data sent in the REPLY serves as an implicit acknowledgment.
- * - LNET_MD_KIOV: The start and length fields specify an array of
- * struct bio_vec.
- * - LNET_MD_IOVEC: The start and length fields specify an array of
- * struct iovec.
- * - LNET_MD_MAX_SIZE: The max_size field is valid.
- *
- * Note:
- * - LNET_MD_KIOV or LNET_MD_IOVEC allows for a scatter/gather
- * capability for memory descriptors. They can't be both set.
- * - When LNET_MD_MAX_SIZE is set, the total length of the memory
- * region (i.e. sum of all fragment lengths) must not be less than
- * \a max_size.
- */
- unsigned int options;
- /**
- * A user-specified value that is associated with the memory
- * descriptor. The value does not need to be a pointer, but must fit
- * in the space used by a pointer. This value is recorded in events
- * associated with operations on this MD.
- */
- void *user_ptr;
- /**
- * A handle for the event queue used to log the operations performed on
- * the memory region. If this argument is a NULL handle (i.e. nullified
- * by LNetInvalidateHandle()), operations performed on this memory
- * descriptor are not logged.
- */
- struct lnet_handle_eq eq_handle;
-};
-
-/*
- * Max Transfer Unit (minimum supported everywhere).
- * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
- * these limits are system wide and not interface-local.
- */
-#define LNET_MTU_BITS 20
-#define LNET_MTU (1 << LNET_MTU_BITS)
-
-/** limit on the number of fragments in discontiguous MDs */
-#define LNET_MAX_IOV 256
-
-/**
- * Options for the MD structure. See lnet_md::options.
- */
-#define LNET_MD_OP_PUT (1 << 0)
-/** See lnet_md::options. */
-#define LNET_MD_OP_GET (1 << 1)
-/** See lnet_md::options. */
-#define LNET_MD_MANAGE_REMOTE (1 << 2)
-/* unused (1 << 3) */
-/** See lnet_md::options. */
-#define LNET_MD_TRUNCATE (1 << 4)
-/** See lnet_md::options. */
-#define LNET_MD_ACK_DISABLE (1 << 5)
-/** See lnet_md::options. */
-#define LNET_MD_IOVEC (1 << 6)
-/** See lnet_md::options. */
-#define LNET_MD_MAX_SIZE (1 << 7)
-/** See lnet_md::options. */
-#define LNET_MD_KIOV (1 << 8)
-
-/* For compatibility with Cray Portals */
-#define LNET_MD_PHYS 0
-
-/** Infinite threshold on MD operations. See lnet_md::threshold */
-#define LNET_MD_THRESH_INF (-1)
-
-/** @} lnet_md */
-
-/** \addtogroup lnet_eq
- * @{
- */
-
-/**
- * Six types of events can be logged in an event queue.
- */
-enum lnet_event_kind {
- /** An incoming GET operation has completed on the MD. */
- LNET_EVENT_GET = 1,
- /**
- * An incoming PUT operation has completed on the MD. The
- * underlying layers will not alter the memory (on behalf of this
- * operation) once this event has been logged.
- */
- LNET_EVENT_PUT,
- /**
- * A REPLY operation has completed. This event is logged after the
- * data (if any) from the REPLY has been written into the MD.
- */
- LNET_EVENT_REPLY,
- /** An acknowledgment has been received. */
- LNET_EVENT_ACK,
- /**
- * An outgoing send (PUT or GET) operation has completed. This event
- * is logged after the entire buffer has been sent and it is safe for
- * the caller to reuse the buffer.
- *
- * Note:
- * - The LNET_EVENT_SEND doesn't guarantee message delivery. It can
- * happen even when the message has not yet been put out on wire.
- * - It's unsafe to assume that in an outgoing GET operation
- * the LNET_EVENT_SEND event would happen before the
- * LNET_EVENT_REPLY event. The same holds for LNET_EVENT_SEND and
- * LNET_EVENT_ACK events in an outgoing PUT operation.
- */
- LNET_EVENT_SEND,
- /**
- * A MD has been unlinked. Note that LNetMDUnlink() does not
- * necessarily trigger an LNET_EVENT_UNLINK event.
- * \see LNetMDUnlink
- */
- LNET_EVENT_UNLINK,
-};
-
-#define LNET_SEQ_GT(a, b) (((signed long)((a) - (b))) > 0)
-
-/**
- * Information about an event on a MD.
- */
-struct lnet_event {
- /** The identifier (nid, pid) of the target. */
- struct lnet_process_id target;
- /** The identifier (nid, pid) of the initiator. */
- struct lnet_process_id initiator;
- /**
- * The NID of the immediate sender. If the request has been forwarded
- * by routers, this is the NID of the last hop; otherwise it's the
- * same as the initiator.
- */
- lnet_nid_t sender;
- /** Indicates the type of the event. */
- enum lnet_event_kind type;
- /** The portal table index specified in the request */
- unsigned int pt_index;
- /** A copy of the match bits specified in the request. */
- __u64 match_bits;
- /** The length (in bytes) specified in the request. */
- unsigned int rlength;
- /**
- * The length (in bytes) of the data that was manipulated by the
- * operation. For truncated operations, the manipulated length will be
- * the number of bytes specified by the MD (possibly with an offset,
- * see lnet_md). For all other operations, the manipulated length
- * will be the length of the requested operation, i.e. rlength.
- */
- unsigned int mlength;
- /**
- * The handle to the MD associated with the event. The handle may be
- * invalid if the MD has been unlinked.
- */
- struct lnet_handle_md md_handle;
- /**
- * A snapshot of the state of the MD immediately after the event has
- * been processed. In particular, the threshold field in md will
- * reflect the value of the threshold after the operation occurred.
- */
- struct lnet_md md;
- /**
- * 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT.
- * \see LNetPut
- */
- __u64 hdr_data;
- /**
- * Indicates the completion status of the operation. It's 0 for
- * successful operations, otherwise it's an error code.
- */
- int status;
- /**
- * Indicates whether the MD has been unlinked. Note that:
- * - An event with unlinked set is the last event on the MD.
- * - This field is also set for an explicit LNET_EVENT_UNLINK event.
- * \see LNetMDUnlink
- */
- int unlinked;
- /**
- * The displacement (in bytes) into the memory region that the
- * operation used. The offset can be determined by the operation for
- * a remote managed MD or by the local MD.
- * \see lnet_md::options
- */
- unsigned int offset;
- /**
- * The sequence number for this event. Sequence numbers are unique
- * to each event.
- */
- volatile unsigned long sequence;
-};
-
-/**
- * Event queue handler function type.
- *
- * The EQ handler runs for each event that is deposited into the EQ. The
- * handler is supplied with a pointer to the event that triggered the
- * handler invocation.
- *
- * The handler must not block, must be reentrant, and must not call any LNet
- * API functions. It should return as quickly as possible.
- */
-typedef void (*lnet_eq_handler_t)(struct lnet_event *event);
-#define LNET_EQ_HANDLER_NONE NULL
-/** @} lnet_eq */
-
-/** \addtogroup lnet_data
- * @{
- */
-
-/**
- * Specify whether an acknowledgment should be sent by target when the PUT
- * operation completes (i.e., when the data has been written to a MD of the
- * target process).
- *
- * \see lnet_md::options for the discussion on LNET_MD_ACK_DISABLE by which
- * acknowledgments can be disabled for a MD.
- */
-enum lnet_ack_req {
- /** Request an acknowledgment */
- LNET_ACK_REQ,
- /** Request that no acknowledgment should be generated. */
- LNET_NOACK_REQ
-};
-/** @} lnet_data */
-
-/** @} lnet */
-#endif
diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/lnetctl.h b/drivers/staging/lustre/include/uapi/linux/lnet/lnetctl.h
deleted file mode 100644
index d9da625d70de..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lnet/lnetctl.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * This file is part of Portals, http://www.sf.net/projects/lustre/
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * header for lnet ioctl
- */
-#ifndef _LNETCTL_H_
-#define _LNETCTL_H_
-
-#include <uapi/linux/lnet/lnet-types.h>
-
-/** \addtogroup lnet_fault_simulation
- * @{
- */
-
-enum {
- LNET_CTL_DROP_ADD,
- LNET_CTL_DROP_DEL,
- LNET_CTL_DROP_RESET,
- LNET_CTL_DROP_LIST,
- LNET_CTL_DELAY_ADD,
- LNET_CTL_DELAY_DEL,
- LNET_CTL_DELAY_RESET,
- LNET_CTL_DELAY_LIST,
-};
-
-#define LNET_ACK_BIT (1 << 0)
-#define LNET_PUT_BIT (1 << 1)
-#define LNET_GET_BIT (1 << 2)
-#define LNET_REPLY_BIT (1 << 3)
-
-/** ioctl parameter for LNet fault simulation */
-struct lnet_fault_attr {
- /**
- * source NID of drop rule
- * LNET_NID_ANY is wildcard for all sources
- * 255.255.255.255@net is wildcard for all addresses from @net
- */
- lnet_nid_t fa_src;
- /** destination NID of drop rule, see \a dr_src for details */
- lnet_nid_t fa_dst;
- /**
- * Portal mask to drop, -1 means all portals, for example:
- * fa_ptl_mask = (1 << _LDLM_CB_REQUEST_PORTAL ) |
- * (1 << LDLM_CANCEL_REQUEST_PORTAL)
- *
- * If it is non-zero then only PUT and GET will be filtered, otherwise
- * there is no portal filter, all matched messages will be checked.
- */
- __u64 fa_ptl_mask;
- /**
- * message types to drop, for example:
- * dra_type = LNET_DROP_ACK_BIT | LNET_DROP_PUT_BIT
- *
- * If it is non-zero then only specified message types are filtered,
- * otherwise all message types will be checked.
- */
- __u32 fa_msg_mask;
- union {
- /** message drop simulation */
- struct {
- /** drop rate of this rule */
- __u32 da_rate;
- /**
- * time interval of message drop, it is exclusive
- * with da_rate
- */
- __u32 da_interval;
- } drop;
- /** message latency simulation */
- struct {
- __u32 la_rate;
- /**
- * time interval of message delay, it is exclusive
- * with la_rate
- */
- __u32 la_interval;
- /** latency to delay */
- __u32 la_latency;
- } delay;
- __u64 space[8];
- } u;
-};
-
-/** fault simluation stats */
-struct lnet_fault_stat {
- /** total # matched messages */
- __u64 fs_count;
- /** # dropped LNET_MSG_PUT by this rule */
- __u64 fs_put;
- /** # dropped LNET_MSG_ACK by this rule */
- __u64 fs_ack;
- /** # dropped LNET_MSG_GET by this rule */
- __u64 fs_get;
- /** # dropped LNET_MSG_REPLY by this rule */
- __u64 fs_reply;
- union {
- struct {
- /** total # dropped messages */
- __u64 ds_dropped;
- } drop;
- struct {
- /** total # delayed messages */
- __u64 ls_delayed;
- } delay;
- __u64 space[8];
- } u;
-};
-
-/** @} lnet_fault_simulation */
-
-#define LNET_DEV_ID 0
-#define LNET_DEV_PATH "/dev/lnet"
-#define LNET_DEV_MAJOR 10
-#define LNET_DEV_MINOR 240
-#define OBD_DEV_ID 1
-#define OBD_DEV_NAME "obd"
-#define OBD_DEV_PATH "/dev/" OBD_DEV_NAME
-#define OBD_DEV_MAJOR 10
-#define OBD_DEV_MINOR 241
-#define SMFS_DEV_ID 2
-#define SMFS_DEV_PATH "/dev/snapdev"
-#define SMFS_DEV_MAJOR 10
-#define SMFS_DEV_MINOR 242
-
-#endif
diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/lnetst.h b/drivers/staging/lustre/include/uapi/linux/lnet/lnetst.h
deleted file mode 100644
index a4f9ff01d458..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lnet/lnetst.h
+++ /dev/null
@@ -1,556 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011 - 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- *
- * lnet/include/lnet/lnetst.h
- *
- * Author: Liang Zhen <liang.zhen@intel.com>
- */
-
-#ifndef __LNET_ST_H__
-#define __LNET_ST_H__
-
-#include <linux/types.h>
-
-#define LST_FEAT_NONE (0)
-#define LST_FEAT_BULK_LEN (1 << 0) /* enable variable page size */
-
-#define LST_FEATS_EMPTY (LST_FEAT_NONE)
-#define LST_FEATS_MASK (LST_FEAT_NONE | LST_FEAT_BULK_LEN)
-
-#define LST_NAME_SIZE 32 /* max name buffer length */
-
-#define LSTIO_DEBUG 0xC00 /* debug */
-#define LSTIO_SESSION_NEW 0xC01 /* create session */
-#define LSTIO_SESSION_END 0xC02 /* end session */
-#define LSTIO_SESSION_INFO 0xC03 /* query session */
-#define LSTIO_GROUP_ADD 0xC10 /* add group */
-#define LSTIO_GROUP_LIST 0xC11 /* list all groups in session */
-#define LSTIO_GROUP_INFO 0xC12 /* query default information of
- * specified group
- */
-#define LSTIO_GROUP_DEL 0xC13 /* delete group */
-#define LSTIO_NODES_ADD 0xC14 /* add nodes to specified group */
-#define LSTIO_GROUP_UPDATE 0xC15 /* update group */
-#define LSTIO_BATCH_ADD 0xC20 /* add batch */
-#define LSTIO_BATCH_START 0xC21 /* start batch */
-#define LSTIO_BATCH_STOP 0xC22 /* stop batch */
-#define LSTIO_BATCH_DEL 0xC23 /* delete batch */
-#define LSTIO_BATCH_LIST 0xC24 /* show all batches in the session */
-#define LSTIO_BATCH_INFO 0xC25 /* show defail of specified batch */
-#define LSTIO_TEST_ADD 0xC26 /* add test (to batch) */
-#define LSTIO_BATCH_QUERY 0xC27 /* query batch status */
-#define LSTIO_STAT_QUERY 0xC30 /* get stats */
-
-struct lst_sid {
- lnet_nid_t ses_nid; /* nid of console node */
- __u64 ses_stamp; /* time stamp */
-}; /*** session id */
-
-extern struct lst_sid LST_INVALID_SID;
-
-struct lst_bid {
- __u64 bat_id; /* unique id in session */
-}; /*** batch id (group of tests) */
-
-/* Status of test node */
-#define LST_NODE_ACTIVE 0x1 /* node in this session */
-#define LST_NODE_BUSY 0x2 /* node is taken by other session */
-#define LST_NODE_DOWN 0x4 /* node is down */
-#define LST_NODE_UNKNOWN 0x8 /* node not in session */
-
-struct lstcon_node_ent {
- struct lnet_process_id nde_id; /* id of node */
- int nde_state; /* state of node */
-}; /*** node entry, for list_group command */
-
-struct lstcon_ndlist_ent {
- int nle_nnode; /* # of nodes */
- int nle_nactive; /* # of active nodes */
- int nle_nbusy; /* # of busy nodes */
- int nle_ndown; /* # of down nodes */
- int nle_nunknown; /* # of unknown nodes */
-}; /*** node_list entry, for list_batch command */
-
-struct lstcon_test_ent {
- int tse_type; /* test type */
- int tse_loop; /* loop count */
- int tse_concur; /* concurrency of test */
-}; /* test summary entry, for
- * list_batch command
- */
-
-struct lstcon_batch_ent {
- int bae_state; /* batch status */
- int bae_timeout; /* batch timeout */
- int bae_ntest; /* # of tests in the batch */
-}; /* batch summary entry, for
- * list_batch command
- */
-
-struct lstcon_test_batch_ent {
- struct lstcon_ndlist_ent tbe_cli_nle; /* client (group) node_list
- * entry
- */
- struct lstcon_ndlist_ent tbe_srv_nle; /* server (group) node_list
- * entry
- */
- union {
- struct lstcon_test_ent tbe_test; /* test entry */
- struct lstcon_batch_ent tbe_batch;/* batch entry */
- } u;
-}; /* test/batch verbose information entry,
- * for list_batch command
- */
-
-struct lstcon_rpc_ent {
- struct list_head rpe_link; /* link chain */
- struct lnet_process_id rpe_peer; /* peer's id */
- struct timeval rpe_stamp; /* time stamp of RPC */
- int rpe_state; /* peer's state */
- int rpe_rpc_errno; /* RPC errno */
-
- struct lst_sid rpe_sid; /* peer's session id */
- int rpe_fwk_errno; /* framework errno */
- int rpe_priv[4]; /* private data */
- char rpe_payload[0]; /* private reply payload */
-};
-
-struct lstcon_trans_stat {
- int trs_rpc_stat[4]; /* RPCs stat (0: total 1: failed
- * 2: finished
- * 4: reserved
- */
- int trs_rpc_errno; /* RPC errno */
- int trs_fwk_stat[8]; /* framework stat */
- int trs_fwk_errno; /* errno of the first remote error */
- void *trs_fwk_private; /* private framework stat */
-};
-
-static inline int
-lstcon_rpc_stat_total(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_rpc_stat[0] : stat->trs_rpc_stat[0];
-}
-
-static inline int
-lstcon_rpc_stat_success(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_rpc_stat[1] : stat->trs_rpc_stat[1];
-}
-
-static inline int
-lstcon_rpc_stat_failure(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_rpc_stat[2] : stat->trs_rpc_stat[2];
-}
-
-static inline int
-lstcon_sesop_stat_success(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
-}
-
-static inline int
-lstcon_sesop_stat_failure(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
-}
-
-static inline int
-lstcon_sesqry_stat_active(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
-}
-
-static inline int
-lstcon_sesqry_stat_busy(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
-}
-
-static inline int
-lstcon_sesqry_stat_unknown(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[2] : stat->trs_fwk_stat[2];
-}
-
-static inline int
-lstcon_tsbop_stat_success(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
-}
-
-static inline int
-lstcon_tsbop_stat_failure(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
-}
-
-static inline int
-lstcon_tsbqry_stat_idle(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
-}
-
-static inline int
-lstcon_tsbqry_stat_run(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
-}
-
-static inline int
-lstcon_tsbqry_stat_failure(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[2] : stat->trs_fwk_stat[2];
-}
-
-static inline int
-lstcon_statqry_stat_success(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
-}
-
-static inline int
-lstcon_statqry_stat_failure(struct lstcon_trans_stat *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
-}
-
-/* create a session */
-struct lstio_session_new_args {
- int lstio_ses_key; /* IN: local key */
- int lstio_ses_timeout; /* IN: session timeout */
- int lstio_ses_force; /* IN: force create ? */
- /** IN: session features */
- unsigned int lstio_ses_feats;
- struct lst_sid __user *lstio_ses_idp; /* OUT: session id */
- int lstio_ses_nmlen; /* IN: name length */
- char __user *lstio_ses_namep; /* IN: session name */
-};
-
-/* query current session */
-struct lstio_session_info_args {
- struct lst_sid __user *lstio_ses_idp; /* OUT: session id */
- int __user *lstio_ses_keyp; /* OUT: local key */
- /** OUT: session features */
- unsigned int __user *lstio_ses_featp;
- struct lstcon_ndlist_ent __user *lstio_ses_ndinfo;/* OUT: */
- int lstio_ses_nmlen; /* IN: name length */
- char __user *lstio_ses_namep; /* OUT: session name */
-};
-
-/* delete a session */
-struct lstio_session_end_args {
- int lstio_ses_key; /* IN: session key */
-};
-
-#define LST_OPC_SESSION 1
-#define LST_OPC_GROUP 2
-#define LST_OPC_NODES 3
-#define LST_OPC_BATCHCLI 4
-#define LST_OPC_BATCHSRV 5
-
-struct lstio_debug_args {
- int lstio_dbg_key; /* IN: session key */
- int lstio_dbg_type; /* IN: debug
- * session|batch|
- * group|nodes list
- */
- int lstio_dbg_flags; /* IN: reserved debug
- * flags
- */
- int lstio_dbg_timeout; /* IN: timeout of
- * debug
- */
- int lstio_dbg_nmlen; /* IN: len of name */
- char __user *lstio_dbg_namep; /* IN: name of
- * group|batch
- */
- int lstio_dbg_count; /* IN: # of test nodes
- * to debug
- */
- struct lnet_process_id __user *lstio_dbg_idsp; /* IN: id of test
- * nodes
- */
- struct list_head __user *lstio_dbg_resultp; /* OUT: list head of
- * result buffer
- */
-};
-
-struct lstio_group_add_args {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_nmlen; /* IN: name length */
- char __user *lstio_grp_namep; /* IN: group name */
-};
-
-struct lstio_group_del_args {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_nmlen; /* IN: name length */
- char __user *lstio_grp_namep; /* IN: group name */
-};
-
-#define LST_GROUP_CLEAN 1 /* remove inactive nodes in the group */
-#define LST_GROUP_REFRESH 2 /* refresh inactive nodes
- * in the group
- */
-#define LST_GROUP_RMND 3 /* delete nodes from the group */
-
-struct lstio_group_update_args {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_opc; /* IN: OPC */
- int lstio_grp_args; /* IN: arguments */
- int lstio_grp_nmlen; /* IN: name length */
- char __user *lstio_grp_namep; /* IN: group name */
- int lstio_grp_count; /* IN: # of nodes id */
- struct lnet_process_id __user *lstio_grp_idsp; /* IN: array of nodes */
- struct list_head __user *lstio_grp_resultp; /* OUT: list head of
- * result buffer
- */
-};
-
-struct lstio_group_nodes_args {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_nmlen; /* IN: name length */
- char __user *lstio_grp_namep; /* IN: group name */
- int lstio_grp_count; /* IN: # of nodes */
- /** OUT: session features */
- unsigned int __user *lstio_grp_featp;
- struct lnet_process_id __user *lstio_grp_idsp; /* IN: nodes */
- struct list_head __user *lstio_grp_resultp; /* OUT: list head of
- * result buffer
- */
-};
-
-struct lstio_group_list_args {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_idx; /* IN: group idx */
- int lstio_grp_nmlen; /* IN: name len */
- char __user *lstio_grp_namep; /* OUT: name */
-};
-
-struct lstio_group_info_args {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_nmlen; /* IN: name len */
- char __user *lstio_grp_namep; /* IN: name */
- struct lstcon_ndlist_ent __user *lstio_grp_entp;/* OUT: description
- * of group
- */
- int __user *lstio_grp_idxp; /* IN/OUT: node index */
- int __user *lstio_grp_ndentp; /* IN/OUT: # of nodent */
- struct lstcon_node_ent __user *lstio_grp_dentsp;/* OUT: nodent array */
-};
-
-#define LST_DEFAULT_BATCH "batch" /* default batch name */
-
-struct lstio_batch_add_args {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_nmlen; /* IN: name length */
- char __user *lstio_bat_namep; /* IN: batch name */
-};
-
-struct lstio_batch_del_args {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_nmlen; /* IN: name length */
- char __user *lstio_bat_namep; /* IN: batch name */
-};
-
-struct lstio_batch_run_args {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_timeout; /* IN: timeout for
- * the batch
- */
- int lstio_bat_nmlen; /* IN: name length */
- char __user *lstio_bat_namep; /* IN: batch name */
- struct list_head __user *lstio_bat_resultp; /* OUT: list head of
- * result buffer
- */
-};
-
-struct lstio_batch_stop_args {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_force; /* IN: abort unfinished
- * test RPC
- */
- int lstio_bat_nmlen; /* IN: name length */
- char __user *lstio_bat_namep; /* IN: batch name */
- struct list_head __user *lstio_bat_resultp; /* OUT: list head of
- * result buffer
- */
-};
-
-struct lstio_batch_query_args {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_testidx; /* IN: test index */
- int lstio_bat_client; /* IN: we testing
- * client?
- */
- int lstio_bat_timeout; /* IN: timeout for
- * waiting
- */
- int lstio_bat_nmlen; /* IN: name length */
- char __user *lstio_bat_namep; /* IN: batch name */
- struct list_head __user *lstio_bat_resultp; /* OUT: list head of
- * result buffer
- */
-};
-
-struct lstio_batch_list_args {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_idx; /* IN: index */
- int lstio_bat_nmlen; /* IN: name length */
- char __user *lstio_bat_namep; /* IN: batch name */
-};
-
-struct lstio_batch_info_args {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_nmlen; /* IN: name length */
- char __user *lstio_bat_namep; /* IN: name */
- int lstio_bat_server; /* IN: query server
- * or not
- */
- int lstio_bat_testidx; /* IN: test index */
- struct lstcon_test_batch_ent __user *lstio_bat_entp;/* OUT: batch ent */
-
- int __user *lstio_bat_idxp; /* IN/OUT: index of node */
- int __user *lstio_bat_ndentp; /* IN/OUT: # of nodent */
- struct lstcon_node_ent __user *lstio_bat_dentsp;/* array of nodent */
-};
-
-/* add stat in session */
-struct lstio_stat_args {
- int lstio_sta_key; /* IN: session key */
- int lstio_sta_timeout; /* IN: timeout for
- * stat request
- */
- int lstio_sta_nmlen; /* IN: group name
- * length
- */
- char __user *lstio_sta_namep; /* IN: group name */
- int lstio_sta_count; /* IN: # of pid */
- struct lnet_process_id __user *lstio_sta_idsp; /* IN: pid */
- struct list_head __user *lstio_sta_resultp; /* OUT: list head of
- * result buffer
- */
-};
-
-enum lst_test_type {
- LST_TEST_BULK = 1,
- LST_TEST_PING = 2
-};
-
-/* create a test in a batch */
-#define LST_MAX_CONCUR 1024 /* Max concurrency of test */
-
-struct lstio_test_args {
- int lstio_tes_key; /* IN: session key */
- int lstio_tes_bat_nmlen; /* IN: batch name len */
- char __user *lstio_tes_bat_name; /* IN: batch name */
- int lstio_tes_type; /* IN: test type */
- int lstio_tes_oneside; /* IN: one sided test */
- int lstio_tes_loop; /* IN: loop count */
- int lstio_tes_concur; /* IN: concurrency */
-
- int lstio_tes_dist; /* IN: node distribution in
- * destination groups
- */
- int lstio_tes_span; /* IN: node span in
- * destination groups
- */
- int lstio_tes_sgrp_nmlen; /* IN: source group
- * name length
- */
- char __user *lstio_tes_sgrp_name; /* IN: group name */
- int lstio_tes_dgrp_nmlen; /* IN: destination group
- * name length
- */
- char __user *lstio_tes_dgrp_name; /* IN: group name */
-
- int lstio_tes_param_len; /* IN: param buffer len */
- void __user *lstio_tes_param; /* IN: parameter for specified
- * test: lstio_bulk_param_t,
- * lstio_ping_param_t,
- * ... more
- */
- int __user *lstio_tes_retp; /* OUT: private returned
- * value
- */
- struct list_head __user *lstio_tes_resultp;/* OUT: list head of
- * result buffer
- */
-};
-
-enum lst_brw_type {
- LST_BRW_READ = 1,
- LST_BRW_WRITE = 2
-};
-
-enum lst_brw_flags {
- LST_BRW_CHECK_NONE = 1,
- LST_BRW_CHECK_SIMPLE = 2,
- LST_BRW_CHECK_FULL = 3
-};
-
-struct lst_test_bulk_param {
- int blk_opc; /* bulk operation code */
- int blk_size; /* size (bytes) */
- int blk_time; /* time of running the test*/
- int blk_flags; /* reserved flags */
- int blk_cli_off; /* bulk offset on client */
- int blk_srv_off; /* reserved: bulk offset on server */
-};
-
-struct lst_test_ping_param {
- int png_size; /* size of ping message */
- int png_time; /* time */
- int png_loop; /* loop */
- int png_flags; /* reserved flags */
-};
-
-struct srpc_counters {
- __u32 errors;
- __u32 rpcs_sent;
- __u32 rpcs_rcvd;
- __u32 rpcs_dropped;
- __u32 rpcs_expired;
- __u64 bulk_get;
- __u64 bulk_put;
-} WIRE_ATTR;
-
-struct sfw_counters {
- /** milliseconds since current session started */
- __u32 running_ms;
- __u32 active_batches;
- __u32 zombie_sessions;
- __u32 brw_errors;
- __u32 ping_errors;
-} WIRE_ATTR;
-
-#endif
diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/nidstr.h b/drivers/staging/lustre/include/uapi/linux/lnet/nidstr.h
deleted file mode 100644
index 882074ed6021..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lnet/nidstr.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-#ifndef _LNET_NIDSTRINGS_H
-#define _LNET_NIDSTRINGS_H
-
-#include <uapi/linux/lnet/lnet-types.h>
-
-/**
- * Lustre Network Driver types.
- */
-enum {
- /*
- * Only add to these values (i.e. don't ever change or redefine them):
- * network addresses depend on them...
- */
- QSWLND = 1,
- SOCKLND = 2,
- GMLND = 3,
- PTLLND = 4,
- O2IBLND = 5,
- CIBLND = 6,
- OPENIBLND = 7,
- IIBLND = 8,
- LOLND = 9,
- RALND = 10,
- VIBLND = 11,
- MXLND = 12,
- GNILND = 13,
- GNIIPLND = 14,
-};
-
-struct list_head;
-
-#define LNET_NIDSTR_COUNT 1024 /* # of nidstrings */
-#define LNET_NIDSTR_SIZE 32 /* size of each one (see below for usage) */
-
-/* support decl needed by both kernel and user space */
-char *libcfs_next_nidstring(void);
-int libcfs_isknown_lnd(__u32 lnd);
-char *libcfs_lnd2modname(__u32 lnd);
-char *libcfs_lnd2str_r(__u32 lnd, char *buf, size_t buf_size);
-static inline char *libcfs_lnd2str(__u32 lnd)
-{
- return libcfs_lnd2str_r(lnd, libcfs_next_nidstring(),
- LNET_NIDSTR_SIZE);
-}
-
-int libcfs_str2lnd(const char *str);
-char *libcfs_net2str_r(__u32 net, char *buf, size_t buf_size);
-static inline char *libcfs_net2str(__u32 net)
-{
- return libcfs_net2str_r(net, libcfs_next_nidstring(),
- LNET_NIDSTR_SIZE);
-}
-
-char *libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size);
-static inline char *libcfs_nid2str(lnet_nid_t nid)
-{
- return libcfs_nid2str_r(nid, libcfs_next_nidstring(),
- LNET_NIDSTR_SIZE);
-}
-
-__u32 libcfs_str2net(const char *str);
-lnet_nid_t libcfs_str2nid(const char *str);
-int libcfs_str2anynid(lnet_nid_t *nid, const char *str);
-char *libcfs_id2str(struct lnet_process_id id);
-void cfs_free_nidlist(struct list_head *list);
-int cfs_parse_nidlist(char *str, int len, struct list_head *list);
-int cfs_print_nidlist(char *buffer, int count, struct list_head *list);
-int cfs_match_nid(lnet_nid_t nid, struct list_head *list);
-
-int cfs_ip_addr_parse(char *str, int len, struct list_head *list);
-int cfs_ip_addr_match(__u32 addr, struct list_head *list);
-bool cfs_nidrange_is_contiguous(struct list_head *nidlist);
-void cfs_nidrange_find_min_max(struct list_head *nidlist, char *min_nid,
- char *max_nid, size_t nidstr_length);
-
-struct netstrfns {
- __u32 nf_type;
- char *nf_name;
- char *nf_modname;
- void (*nf_addr2str)(__u32 addr, char *str, size_t size);
- int (*nf_str2addr)(const char *str, int nob, __u32 *addr);
- int (*nf_parse_addrlist)(char *str, int len,
- struct list_head *list);
- int (*nf_print_addrlist)(char *buffer, int count,
- struct list_head *list);
- int (*nf_match_addr)(__u32 addr, struct list_head *list);
- bool (*nf_is_contiguous)(struct list_head *nidlist);
- void (*nf_min_max)(struct list_head *nidlist, __u32 *min_nid,
- __u32 *max_nid);
-};
-
-#endif /* _LNET_NIDSTRINGS_H */
diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/socklnd.h b/drivers/staging/lustre/include/uapi/linux/lnet/socklnd.h
deleted file mode 100644
index 6453e053fa99..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lnet/socklnd.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * #defines shared between socknal implementation and utilities
- */
-#ifndef __UAPI_LNET_SOCKLND_H__
-#define __UAPI_LNET_SOCKLND_H__
-
-#define SOCKLND_CONN_NONE (-1)
-#define SOCKLND_CONN_ANY 0
-#define SOCKLND_CONN_CONTROL 1
-#define SOCKLND_CONN_BULK_IN 2
-#define SOCKLND_CONN_BULK_OUT 3
-#define SOCKLND_CONN_NTYPES 4
-
-#define SOCKLND_CONN_ACK SOCKLND_CONN_BULK_IN
-
-#endif
diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_cfg.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_cfg.h
deleted file mode 100644
index 11b51d93f64c..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_cfg.h
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _UAPI_LUSTRE_CFG_H_
-#define _UAPI_LUSTRE_CFG_H_
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <uapi/linux/lustre/lustre_user.h>
-
-/** \defgroup cfg cfg
- *
- * @{
- */
-
-/*
- * 1cf6
- * lcfG
- */
-#define LUSTRE_CFG_VERSION 0x1cf60001
-#define LUSTRE_CFG_MAX_BUFCOUNT 8
-
-#define LCFG_HDR_SIZE(count) \
- __ALIGN_KERNEL(offsetof(struct lustre_cfg, lcfg_buflens[(count)]), 8)
-
-/** If the LCFG_REQUIRED bit is set in a configuration command,
- * then the client is required to understand this parameter
- * in order to mount the filesystem. If it does not understand
- * a REQUIRED command the client mount will fail.
- */
-#define LCFG_REQUIRED 0x0001000
-
-enum lcfg_command_type {
- LCFG_ATTACH = 0x00cf001, /**< create a new obd instance */
- LCFG_DETACH = 0x00cf002, /**< destroy obd instance */
- LCFG_SETUP = 0x00cf003, /**< call type-specific setup */
- LCFG_CLEANUP = 0x00cf004, /**< call type-specific cleanup
- */
- LCFG_ADD_UUID = 0x00cf005, /**< add a nid to a niduuid */
- LCFG_DEL_UUID = 0x00cf006, /**< remove a nid from
- * a niduuid
- */
- LCFG_MOUNTOPT = 0x00cf007, /**< create a profile
- * (mdc, osc)
- */
- LCFG_DEL_MOUNTOPT = 0x00cf008, /**< destroy a profile */
- LCFG_SET_TIMEOUT = 0x00cf009, /**< set obd_timeout */
- LCFG_SET_UPCALL = 0x00cf00a, /**< deprecated */
- LCFG_ADD_CONN = 0x00cf00b, /**< add a failover niduuid to
- * an obd
- */
- LCFG_DEL_CONN = 0x00cf00c, /**< remove a failover niduuid */
- LCFG_LOV_ADD_OBD = 0x00cf00d, /**< add an osc to a lov */
- LCFG_LOV_DEL_OBD = 0x00cf00e, /**< remove an osc from a lov */
- LCFG_PARAM = 0x00cf00f, /**< set a proc parameter */
- LCFG_MARKER = 0x00cf010, /**< metadata about next
- * cfg rec
- */
- LCFG_LOG_START = 0x00ce011, /**< mgc only, process a
- * cfg log
- */
- LCFG_LOG_END = 0x00ce012, /**< stop processing updates */
- LCFG_LOV_ADD_INA = 0x00ce013, /**< like LOV_ADD_OBD,
- * inactive
- */
- LCFG_ADD_MDC = 0x00cf014, /**< add an mdc to a lmv */
- LCFG_DEL_MDC = 0x00cf015, /**< remove an mdc from a lmv */
- LCFG_SPTLRPC_CONF = 0x00ce016, /**< security */
- LCFG_POOL_NEW = 0x00ce020, /**< create an ost pool name */
- LCFG_POOL_ADD = 0x00ce021, /**< add an ost to a pool */
- LCFG_POOL_REM = 0x00ce022, /**< remove an ost from a pool */
- LCFG_POOL_DEL = 0x00ce023, /**< destroy an ost pool name */
- LCFG_SET_LDLM_TIMEOUT = 0x00ce030, /**< set ldlm_timeout */
- LCFG_PRE_CLEANUP = 0x00cf031, /**< call type-specific pre
- * cleanup cleanup
- */
- LCFG_SET_PARAM = 0x00ce032, /**< use set_param syntax to set
- * a proc parameters
- */
-};
-
-struct lustre_cfg_bufs {
- void *lcfg_buf[LUSTRE_CFG_MAX_BUFCOUNT];
- __u32 lcfg_buflen[LUSTRE_CFG_MAX_BUFCOUNT];
- __u32 lcfg_bufcount;
-};
-
-struct lustre_cfg {
- __u32 lcfg_version;
- __u32 lcfg_command;
-
- __u32 lcfg_num;
- __u32 lcfg_flags;
- __u64 lcfg_nid;
- __u32 lcfg_nal; /* not used any more */
-
- __u32 lcfg_bufcount;
- __u32 lcfg_buflens[0];
-};
-
-enum cfg_record_type {
- PORTALS_CFG_TYPE = 1,
- LUSTRE_CFG_TYPE = 123,
-};
-
-#define LUSTRE_CFG_BUFLEN(lcfg, idx) \
- ((lcfg)->lcfg_bufcount <= (idx) ? 0 : (lcfg)->lcfg_buflens[(idx)])
-
-static inline void lustre_cfg_bufs_set(struct lustre_cfg_bufs *bufs,
- __u32 index, void *buf, __u32 buflen)
-{
- if (index >= LUSTRE_CFG_MAX_BUFCOUNT)
- return;
-
- if (!bufs)
- return;
-
- if (bufs->lcfg_bufcount <= index)
- bufs->lcfg_bufcount = index + 1;
-
- bufs->lcfg_buf[index] = buf;
- bufs->lcfg_buflen[index] = buflen;
-}
-
-static inline void lustre_cfg_bufs_set_string(struct lustre_cfg_bufs *bufs,
- __u32 index, char *str)
-{
- lustre_cfg_bufs_set(bufs, index, str, str ? strlen(str) + 1 : 0);
-}
-
-static inline void lustre_cfg_bufs_reset(struct lustre_cfg_bufs *bufs,
- char *name)
-{
- memset((bufs), 0, sizeof(*bufs));
- if (name)
- lustre_cfg_bufs_set_string(bufs, 0, name);
-}
-
-static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, __u32 index)
-{
- __u32 i;
- size_t offset;
- __u32 bufcount;
-
- if (!lcfg)
- return NULL;
-
- bufcount = lcfg->lcfg_bufcount;
- if (index >= bufcount)
- return NULL;
-
- offset = LCFG_HDR_SIZE(lcfg->lcfg_bufcount);
- for (i = 0; i < index; i++)
- offset += __ALIGN_KERNEL(lcfg->lcfg_buflens[i], 8);
- return (char *)lcfg + offset;
-}
-
-static inline void lustre_cfg_bufs_init(struct lustre_cfg_bufs *bufs,
- struct lustre_cfg *lcfg)
-{
- __u32 i;
-
- bufs->lcfg_bufcount = lcfg->lcfg_bufcount;
- for (i = 0; i < bufs->lcfg_bufcount; i++) {
- bufs->lcfg_buflen[i] = lcfg->lcfg_buflens[i];
- bufs->lcfg_buf[i] = lustre_cfg_buf(lcfg, i);
- }
-}
-
-static inline __u32 lustre_cfg_len(__u32 bufcount, __u32 *buflens)
-{
- __u32 i;
- __u32 len;
-
- len = LCFG_HDR_SIZE(bufcount);
- for (i = 0; i < bufcount; i++)
- len += __ALIGN_KERNEL(buflens[i], 8);
-
- return __ALIGN_KERNEL(len, 8);
-}
-
-static inline void lustre_cfg_init(struct lustre_cfg *lcfg, int cmd,
- struct lustre_cfg_bufs *bufs)
-{
- char *ptr;
- __u32 i;
-
- lcfg->lcfg_version = LUSTRE_CFG_VERSION;
- lcfg->lcfg_command = cmd;
- lcfg->lcfg_bufcount = bufs->lcfg_bufcount;
-
- ptr = (char *)lcfg + LCFG_HDR_SIZE(lcfg->lcfg_bufcount);
- for (i = 0; i < lcfg->lcfg_bufcount; i++) {
- lcfg->lcfg_buflens[i] = bufs->lcfg_buflen[i];
- if (bufs->lcfg_buf[i]) {
- memcpy(ptr, bufs->lcfg_buf[i], bufs->lcfg_buflen[i]);
- ptr += __ALIGN_KERNEL(bufs->lcfg_buflen[i], 8);
- }
- }
-}
-
-static inline int lustre_cfg_sanity_check(void *buf, size_t len)
-{
- struct lustre_cfg *lcfg = (struct lustre_cfg *)buf;
-
- if (!lcfg)
- return -EINVAL;
-
- /* check that the first bits of the struct are valid */
- if (len < LCFG_HDR_SIZE(0))
- return -EINVAL;
-
- if (lcfg->lcfg_version != LUSTRE_CFG_VERSION)
- return -EINVAL;
-
- if (lcfg->lcfg_bufcount >= LUSTRE_CFG_MAX_BUFCOUNT)
- return -EINVAL;
-
- /* check that the buflens are valid */
- if (len < LCFG_HDR_SIZE(lcfg->lcfg_bufcount))
- return -EINVAL;
-
- /* make sure all the pointers point inside the data */
- if (len < lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens))
- return -EINVAL;
-
- return 0;
-}
-
-/** @} cfg */
-
-#endif /* _UAPI_LUSTRE_CFG_H_ */
diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fid.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fid.h
deleted file mode 100644
index 2e7a8d103777..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fid.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2014, Intel Corporation.
- *
- * Copyright 2016 Cray Inc, all rights reserved.
- * Author: Ben Evans.
- *
- * all fid manipulation functions go here
- *
- * FIDS are globally unique within a Lustre filessytem, and are made up
- * of three parts: sequence, Object ID, and version.
- *
- */
-#ifndef _UAPI_LUSTRE_FID_H_
-#define _UAPI_LUSTRE_FID_H_
-
-#include <uapi/linux/lustre/lustre_idl.h>
-
-/** returns fid object sequence */
-static inline __u64 fid_seq(const struct lu_fid *fid)
-{
- return fid->f_seq;
-}
-
-/** returns fid object id */
-static inline __u32 fid_oid(const struct lu_fid *fid)
-{
- return fid->f_oid;
-}
-
-/** returns fid object version */
-static inline __u32 fid_ver(const struct lu_fid *fid)
-{
- return fid->f_ver;
-}
-
-static inline void fid_zero(struct lu_fid *fid)
-{
- memset(fid, 0, sizeof(*fid));
-}
-
-static inline __u64 fid_ver_oid(const struct lu_fid *fid)
-{
- return (__u64)fid_ver(fid) << 32 | fid_oid(fid);
-}
-
-static inline bool fid_seq_is_mdt0(__u64 seq)
-{
- return seq == FID_SEQ_OST_MDT0;
-}
-
-static inline bool fid_seq_is_mdt(__u64 seq)
-{
- return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
-};
-
-static inline bool fid_seq_is_echo(__u64 seq)
-{
- return seq == FID_SEQ_ECHO;
-}
-
-static inline bool fid_is_echo(const struct lu_fid *fid)
-{
- return fid_seq_is_echo(fid_seq(fid));
-}
-
-static inline bool fid_seq_is_llog(__u64 seq)
-{
- return seq == FID_SEQ_LLOG;
-}
-
-static inline bool fid_is_llog(const struct lu_fid *fid)
-{
- /* file with OID == 0 is not llog but contains last oid */
- return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
-}
-
-static inline bool fid_seq_is_rsvd(__u64 seq)
-{
- return seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD;
-};
-
-static inline bool fid_seq_is_special(__u64 seq)
-{
- return seq == FID_SEQ_SPECIAL;
-};
-
-static inline bool fid_seq_is_local_file(__u64 seq)
-{
- return seq == FID_SEQ_LOCAL_FILE ||
- seq == FID_SEQ_LOCAL_NAME;
-};
-
-static inline bool fid_seq_is_root(__u64 seq)
-{
- return seq == FID_SEQ_ROOT;
-}
-
-static inline bool fid_seq_is_dot(__u64 seq)
-{
- return seq == FID_SEQ_DOT_LUSTRE;
-}
-
-static inline bool fid_seq_is_default(__u64 seq)
-{
- return seq == FID_SEQ_LOV_DEFAULT;
-}
-
-static inline bool fid_is_mdt0(const struct lu_fid *fid)
-{
- return fid_seq_is_mdt0(fid_seq(fid));
-}
-
-/**
- * Check if a fid is igif or not.
- * \param fid the fid to be tested.
- * \return true if the fid is an igif; otherwise false.
- */
-static inline bool fid_seq_is_igif(__u64 seq)
-{
- return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
-}
-
-static inline bool fid_is_igif(const struct lu_fid *fid)
-{
- return fid_seq_is_igif(fid_seq(fid));
-}
-
-/**
- * Check if a fid is idif or not.
- * \param fid the fid to be tested.
- * \return true if the fid is an idif; otherwise false.
- */
-static inline bool fid_seq_is_idif(__u64 seq)
-{
- return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
-}
-
-static inline bool fid_is_idif(const struct lu_fid *fid)
-{
- return fid_seq_is_idif(fid_seq(fid));
-}
-
-static inline bool fid_is_local_file(const struct lu_fid *fid)
-{
- return fid_seq_is_local_file(fid_seq(fid));
-}
-
-static inline bool fid_seq_is_norm(__u64 seq)
-{
- return (seq >= FID_SEQ_NORMAL);
-}
-
-static inline bool fid_is_norm(const struct lu_fid *fid)
-{
- return fid_seq_is_norm(fid_seq(fid));
-}
-
-/* convert an OST objid into an IDIF FID SEQ number */
-static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
-{
- return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
-}
-
-/* convert a packed IDIF FID into an OST objid */
-static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
-{
- return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
-}
-
-static inline __u32 idif_ost_idx(__u64 seq)
-{
- return (seq >> 16) & 0xffff;
-}
-
-/* extract ost index from IDIF FID */
-static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
-{
- return idif_ost_idx(fid_seq(fid));
-}
-
-/**
- * Get inode number from an igif.
- * \param fid an igif to get inode number from.
- * \return inode number for the igif.
- */
-static inline ino_t lu_igif_ino(const struct lu_fid *fid)
-{
- return fid_seq(fid);
-}
-
-/**
- * Get inode generation from an igif.
- * \param fid an igif to get inode generation from.
- * \return inode generation for the igif.
- */
-static inline __u32 lu_igif_gen(const struct lu_fid *fid)
-{
- return fid_oid(fid);
-}
-
-/**
- * Build igif from the inode number/generation.
- */
-static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
-{
- fid->f_seq = ino;
- fid->f_oid = gen;
- fid->f_ver = 0;
-}
-
-/*
- * Fids are transmitted across network (in the sender byte-ordering),
- * and stored on disk in big-endian order.
- */
-static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
-{
- dst->f_seq = __cpu_to_le64(fid_seq(src));
- dst->f_oid = __cpu_to_le32(fid_oid(src));
- dst->f_ver = __cpu_to_le32(fid_ver(src));
-}
-
-static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
-{
- dst->f_seq = __le64_to_cpu(fid_seq(src));
- dst->f_oid = __le32_to_cpu(fid_oid(src));
- dst->f_ver = __le32_to_cpu(fid_ver(src));
-}
-
-static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
-{
- dst->f_seq = __cpu_to_be64(fid_seq(src));
- dst->f_oid = __cpu_to_be32(fid_oid(src));
- dst->f_ver = __cpu_to_be32(fid_ver(src));
-}
-
-static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
-{
- dst->f_seq = __be64_to_cpu(fid_seq(src));
- dst->f_oid = __be32_to_cpu(fid_oid(src));
- dst->f_ver = __be32_to_cpu(fid_ver(src));
-}
-
-static inline bool fid_is_sane(const struct lu_fid *fid)
-{
- return fid && ((fid_seq(fid) >= FID_SEQ_START && !fid_ver(fid)) ||
- fid_is_igif(fid) || fid_is_idif(fid) ||
- fid_seq_is_rsvd(fid_seq(fid)));
-}
-
-static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
-{
- return !memcmp(f0, f1, sizeof(*f0));
-}
-
-static inline int lu_fid_cmp(const struct lu_fid *f0,
- const struct lu_fid *f1)
-{
- if (fid_seq(f0) != fid_seq(f1))
- return fid_seq(f0) > fid_seq(f1) ? 1 : -1;
-
- if (fid_oid(f0) != fid_oid(f1))
- return fid_oid(f0) > fid_oid(f1) ? 1 : -1;
-
- if (fid_ver(f0) != fid_ver(f1))
- return fid_ver(f0) > fid_ver(f1) ? 1 : -1;
-
- return 0;
-}
-#endif
diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fiemap.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fiemap.h
deleted file mode 100644
index f5214dc33c60..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fiemap.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2014, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * FIEMAP data structures and flags. This header file will be used until
- * fiemap.h is available in the upstream kernel.
- *
- * Author: Kalpak Shah <kalpak.shah@sun.com>
- * Author: Andreas Dilger <adilger@sun.com>
- */
-
-#ifndef _LUSTRE_FIEMAP_H
-#define _LUSTRE_FIEMAP_H
-
-#include <stddef.h>
-#include <linux/fiemap.h>
-
-/* XXX: We use fiemap_extent::fe_reserved[0] */
-#define fe_device fe_reserved[0]
-
-static inline size_t fiemap_count_to_size(size_t extent_count)
-{
- return sizeof(struct fiemap) + extent_count *
- sizeof(struct fiemap_extent);
-}
-
-static inline unsigned fiemap_size_to_count(size_t array_size)
-{
- return (array_size - sizeof(struct fiemap)) /
- sizeof(struct fiemap_extent);
-}
-
-#define FIEMAP_FLAG_DEVICE_ORDER 0x40000000 /* return device ordered mapping */
-
-#ifdef FIEMAP_FLAGS_COMPAT
-#undef FIEMAP_FLAGS_COMPAT
-#endif
-
-/* Lustre specific flags - use a high bit, don't conflict with upstream flag */
-#define FIEMAP_EXTENT_NO_DIRECT 0x40000000 /* Data mapping undefined */
-#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely.
- * Sets NO_DIRECT flag
- */
-
-#endif /* _LUSTRE_FIEMAP_H */
diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_idl.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_idl.h
deleted file mode 100644
index aac98dbcf6e3..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_idl.h
+++ /dev/null
@@ -1,2688 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Lustre wire protocol definitions.
- */
-
-/** \defgroup lustreidl lustreidl
- *
- * Lustre wire protocol definitions.
- *
- * ALL structs passing over the wire should be declared here. Structs
- * that are used in interfaces with userspace should go in lustre_user.h.
- *
- * All structs being declared here should be built from simple fixed-size
- * types (__u8, __u16, __u32, __u64) or be built from other types or
- * structs also declared in this file. Similarly, all flags and magic
- * values in those structs should also be declared here. This ensures
- * that the Lustre wire protocol is not influenced by external dependencies.
- *
- * The only other acceptable items in this file are VERY SIMPLE accessor
- * functions to avoid callers grubbing inside the structures. Nothing that
- * depends on external functions or definitions should be in here.
- *
- * Structs must be properly aligned to put 64-bit values on an 8-byte
- * boundary. Any structs being added here must also be added to
- * utils/wirecheck.c and "make newwiretest" run to regenerate the
- * utils/wiretest.c sources. This allows us to verify that wire structs
- * have the proper alignment/size on all architectures.
- *
- * DO NOT CHANGE any of the structs, flags, values declared here and used
- * in released Lustre versions. Some structs may have padding fields that
- * can be used. Some structs might allow addition at the end (verify this
- * in the code to ensure that new/old clients that see this larger struct
- * do not fail, otherwise you need to implement protocol compatibility).
- *
- * @{
- */
-
-#ifndef _LUSTRE_IDL_H_
-#define _LUSTRE_IDL_H_
-
-#include <asm/byteorder.h>
-#include <linux/types.h>
-
-#include <uapi/linux/lnet/lnet-types.h>
-/* Defn's shared with user-space. */
-#include <uapi/linux/lustre/lustre_user.h>
-#include <uapi/linux/lustre/lustre_ver.h>
-
-/*
- * GENERAL STUFF
- */
-/* FOO_REQUEST_PORTAL is for incoming requests on the FOO
- * FOO_REPLY_PORTAL is for incoming replies on the FOO
- * FOO_BULK_PORTAL is for incoming bulk on the FOO
- */
-
-/* Lustre service names are following the format
- * service name + MDT + seq name
- */
-#define LUSTRE_MDT_MAXNAMELEN 80
-
-#define CONNMGR_REQUEST_PORTAL 1
-#define CONNMGR_REPLY_PORTAL 2
-/*#define OSC_REQUEST_PORTAL 3 */
-#define OSC_REPLY_PORTAL 4
-/*#define OSC_BULK_PORTAL 5 */
-#define OST_IO_PORTAL 6
-#define OST_CREATE_PORTAL 7
-#define OST_BULK_PORTAL 8
-/*#define MDC_REQUEST_PORTAL 9 */
-#define MDC_REPLY_PORTAL 10
-/*#define MDC_BULK_PORTAL 11 */
-#define MDS_REQUEST_PORTAL 12
-/*#define MDS_REPLY_PORTAL 13 */
-#define MDS_BULK_PORTAL 14
-#define LDLM_CB_REQUEST_PORTAL 15
-#define LDLM_CB_REPLY_PORTAL 16
-#define LDLM_CANCEL_REQUEST_PORTAL 17
-#define LDLM_CANCEL_REPLY_PORTAL 18
-/*#define PTLBD_REQUEST_PORTAL 19 */
-/*#define PTLBD_REPLY_PORTAL 20 */
-/*#define PTLBD_BULK_PORTAL 21 */
-#define MDS_SETATTR_PORTAL 22
-#define MDS_READPAGE_PORTAL 23
-#define OUT_PORTAL 24
-
-#define MGC_REPLY_PORTAL 25
-#define MGS_REQUEST_PORTAL 26
-#define MGS_REPLY_PORTAL 27
-#define OST_REQUEST_PORTAL 28
-#define FLD_REQUEST_PORTAL 29
-#define SEQ_METADATA_PORTAL 30
-#define SEQ_DATA_PORTAL 31
-#define SEQ_CONTROLLER_PORTAL 32
-#define MGS_BULK_PORTAL 33
-
-/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com,
- * n8851@cray.com
- */
-
-/* packet types */
-#define PTL_RPC_MSG_REQUEST 4711
-#define PTL_RPC_MSG_ERR 4712
-#define PTL_RPC_MSG_REPLY 4713
-
-/* DON'T use swabbed values of MAGIC as magic! */
-#define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
-#define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
-
-#define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
-
-#define PTLRPC_MSG_VERSION 0x00000003
-#define LUSTRE_VERSION_MASK 0xffff0000
-#define LUSTRE_OBD_VERSION 0x00010000
-#define LUSTRE_MDS_VERSION 0x00020000
-#define LUSTRE_OST_VERSION 0x00030000
-#define LUSTRE_DLM_VERSION 0x00040000
-#define LUSTRE_LOG_VERSION 0x00050000
-#define LUSTRE_MGS_VERSION 0x00060000
-
-/**
- * Describes a range of sequence, lsr_start is included but lsr_end is
- * not in the range.
- * Same structure is used in fld module where lsr_index field holds mdt id
- * of the home mdt.
- */
-struct lu_seq_range {
- __u64 lsr_start;
- __u64 lsr_end;
- __u32 lsr_index;
- __u32 lsr_flags;
-};
-
-struct lu_seq_range_array {
- __u32 lsra_count;
- __u32 lsra_padding;
- struct lu_seq_range lsra_lsr[0];
-};
-
-#define LU_SEQ_RANGE_MDT 0x0
-#define LU_SEQ_RANGE_OST 0x1
-#define LU_SEQ_RANGE_ANY 0x3
-
-#define LU_SEQ_RANGE_MASK 0x3
-
-/** \defgroup lu_fid lu_fid
- * @{
- */
-
-/**
- * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
- * Deprecated since HSM and SOM attributes are now stored in separate on-disk
- * xattr.
- */
-enum lma_compat {
- LMAC_HSM = 0x00000001,
-/* LMAC_SOM = 0x00000002, obsolete since 2.8.0 */
- LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
- LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
- * under /O/<seq>/d<x>.
- */
-};
-
-/**
- * Masks for all features that should be supported by a Lustre version to
- * access a specific file.
- * This information is stored in lustre_mdt_attrs::lma_incompat.
- */
-enum lma_incompat {
- LMAI_RELEASED = 0x00000001, /* file is released */
- LMAI_AGENT = 0x00000002, /* agent inode */
- LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
- * is on the remote MDT
- */
-};
-
-#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
-
-/**
- * fid constants
- */
-enum {
- /** LASTID file has zero OID */
- LUSTRE_FID_LASTID_OID = 0UL,
- /** initial fid id value */
- LUSTRE_FID_INIT_OID = 1UL
-};
-
-/* copytool uses a 32b bitmask field to encode archive-Ids during register
- * with MDT thru kuc.
- * archive num = 0 => all
- * archive num from 1 to 32
- */
-#define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8)
-
-/**
- * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
- * inodes in the IGIF namespace, so these reserved SEQ numbers can be
- * used for other purposes and not risk collisions with existing inodes.
- *
- * Different FID Format
- * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
- */
-enum fid_seq {
- FID_SEQ_OST_MDT0 = 0,
- FID_SEQ_LLOG = 1, /* unnamed llogs */
- FID_SEQ_ECHO = 2,
- FID_SEQ_OST_MDT1 = 3,
- FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
- FID_SEQ_LLOG_NAME = 10, /* named llogs */
- FID_SEQ_RSVD = 11,
- FID_SEQ_IGIF = 12,
- FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
- FID_SEQ_IDIF = 0x100000000ULL,
- FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
- /* Normal FID sequence starts from this value, i.e. 1<<33 */
- FID_SEQ_START = 0x200000000ULL,
- /* sequence for local pre-defined FIDs listed in local_oid */
- FID_SEQ_LOCAL_FILE = 0x200000001ULL,
- FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
- /* sequence is used for local named objects FIDs generated
- * by local_object_storage library
- */
- FID_SEQ_LOCAL_NAME = 0x200000003ULL,
- /* Because current FLD will only cache the fid sequence, instead
- * of oid on the client side, if the FID needs to be exposed to
- * clients sides, it needs to make sure all of fids under one
- * sequence will be located in one MDT.
- */
- FID_SEQ_SPECIAL = 0x200000004ULL,
- FID_SEQ_QUOTA = 0x200000005ULL,
- FID_SEQ_QUOTA_GLB = 0x200000006ULL,
- FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
- FID_SEQ_NORMAL = 0x200000400ULL,
- FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
-};
-
-#define OBIF_OID_MAX_BITS 32
-#define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
-#define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
-#define IDIF_OID_MAX_BITS 48
-#define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
-#define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
-
-/** OID for FID_SEQ_SPECIAL */
-enum special_oid {
- /* Big Filesystem Lock to serialize rename operations */
- FID_OID_SPECIAL_BFL = 1UL,
-};
-
-/** OID for FID_SEQ_DOT_LUSTRE */
-enum dot_lustre_oid {
- FID_OID_DOT_LUSTRE = 1UL,
- FID_OID_DOT_LUSTRE_OBF = 2UL,
-};
-
-/** OID for FID_SEQ_ROOT */
-enum root_oid {
- FID_OID_ROOT = 1UL,
- FID_OID_ECHO_ROOT = 2UL,
-};
-
-/** @} lu_fid */
-
-/** \defgroup lu_dir lu_dir
- * @{
- */
-
-/**
- * Enumeration of possible directory entry attributes.
- *
- * Attributes follow directory entry header in the order they appear in this
- * enumeration.
- */
-enum lu_dirent_attrs {
- LUDA_FID = 0x0001,
- LUDA_TYPE = 0x0002,
- LUDA_64BITHASH = 0x0004,
-};
-
-/**
- * Layout of readdir pages, as transmitted on wire.
- */
-struct lu_dirent {
- /** valid if LUDA_FID is set. */
- struct lu_fid lde_fid;
- /** a unique entry identifier: a hash or an offset. */
- __u64 lde_hash;
- /** total record length, including all attributes. */
- __u16 lde_reclen;
- /** name length */
- __u16 lde_namelen;
- /** optional variable size attributes following this entry.
- * taken from enum lu_dirent_attrs.
- */
- __u32 lde_attrs;
- /** name is followed by the attributes indicated in ->ldp_attrs, in
- * their natural order. After the last attribute, padding bytes are
- * added to make ->lde_reclen a multiple of 8.
- */
- char lde_name[0];
-};
-
-/*
- * Definitions of optional directory entry attributes formats.
- *
- * Individual attributes do not have their length encoded in a generic way. It
- * is assumed that consumer of an attribute knows its format. This means that
- * it is impossible to skip over an unknown attribute, except by skipping over all
- * remaining attributes (by using ->lde_reclen), which is not too
- * constraining, because new server versions will append new attributes at
- * the end of an entry.
- */
-
-/**
- * Fid directory attribute: a fid of an object referenced by the entry. This
- * will be almost always requested by the client and supplied by the server.
- *
- * Aligned to 8 bytes.
- */
-/* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
-
-/**
- * File type.
- *
- * Aligned to 2 bytes.
- */
-struct luda_type {
- __u16 lt_type;
-};
-
-#ifndef IFSHIFT
-#define IFSHIFT 12
-#endif
-
-#ifndef IFTODT
-#define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
-#endif
-#ifndef DTTOIF
-#define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
-#endif
-
-struct lu_dirpage {
- __le64 ldp_hash_start;
- __le64 ldp_hash_end;
- __le32 ldp_flags;
- __le32 ldp_pad0;
- struct lu_dirent ldp_entries[0];
-};
-
-enum lu_dirpage_flags {
- /**
- * dirpage contains no entry.
- */
- LDF_EMPTY = 1 << 0,
- /**
- * last entry's lde_hash equals ldp_hash_end.
- */
- LDF_COLLIDE = 1 << 1
-};
-
-static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
-{
- if (__le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
- return NULL;
- else
- return dp->ldp_entries;
-}
-
-static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
-{
- struct lu_dirent *next;
-
- if (__le16_to_cpu(ent->lde_reclen) != 0)
- next = ((void *)ent) + __le16_to_cpu(ent->lde_reclen);
- else
- next = NULL;
-
- return next;
-}
-
-static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr)
-{
- size_t size;
-
- if (attr & LUDA_TYPE) {
- const size_t align = sizeof(struct luda_type) - 1;
-
- size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
- size += sizeof(struct luda_type);
- } else {
- size = sizeof(struct lu_dirent) + namelen;
- }
-
- return (size + 7) & ~7;
-}
-
-#define MDS_DIR_END_OFF 0xfffffffffffffffeULL
-
-/**
- * MDS_READPAGE page size
- *
- * This is the directory page size packed in MDS_READPAGE RPC.
- * It's different than PAGE_SIZE because the client needs to
- * access the struct lu_dirpage header packed at the beginning of
- * the "page" and without this there isn't any way to know find the
- * lu_dirpage header is if client and server PAGE_SIZE differ.
- */
-#define LU_PAGE_SHIFT 12
-#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
-#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
-
-#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
-
-/** @} lu_dir */
-
-struct lustre_handle {
- __u64 cookie;
-};
-
-#define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
-
-static inline bool lustre_handle_is_used(const struct lustre_handle *lh)
-{
- return lh->cookie != 0ull;
-}
-
-static inline bool lustre_handle_equal(const struct lustre_handle *lh1,
- const struct lustre_handle *lh2)
-{
- return lh1->cookie == lh2->cookie;
-}
-
-static inline void lustre_handle_copy(struct lustre_handle *tgt,
- const struct lustre_handle *src)
-{
- tgt->cookie = src->cookie;
-}
-
-/* flags for lm_flags */
-#define MSGHDR_AT_SUPPORT 0x1
-#define MSGHDR_CKSUM_INCOMPAT18 0x2
-
-#define lustre_msg lustre_msg_v2
-/* we depend on this structure to be 8-byte aligned */
-/* this type is only endian-adjusted in lustre_unpack_msg() */
-struct lustre_msg_v2 {
- __u32 lm_bufcount;
- __u32 lm_secflvr;
- __u32 lm_magic;
- __u32 lm_repsize;
- __u32 lm_cksum;
- __u32 lm_flags;
- __u32 lm_padding_2;
- __u32 lm_padding_3;
- __u32 lm_buflens[0];
-};
-
-/* without gss, ptlrpc_body is put at the first buffer. */
-#define PTLRPC_NUM_VERSIONS 4
-
-struct ptlrpc_body_v3 {
- struct lustre_handle pb_handle;
- __u32 pb_type;
- __u32 pb_version;
- __u32 pb_opc;
- __u32 pb_status;
- __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
- __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
- __u16 pb_padding0;
- __u32 pb_padding1;
- __u64 pb_last_committed;
- __u64 pb_transno;
- __u32 pb_flags;
- __u32 pb_op_flags;
- __u32 pb_conn_cnt;
- __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
- __u32 pb_service_time; /* for rep, actual service time */
- __u32 pb_limit;
- __u64 pb_slv;
- /* VBR: pre-versions */
- __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
- __u64 pb_mbits; /**< match bits for bulk request */
- /* padding for future needs */
- __u64 pb_padding64_0;
- __u64 pb_padding64_1;
- __u64 pb_padding64_2;
- char pb_jobid[LUSTRE_JOBID_SIZE];
-};
-
-#define ptlrpc_body ptlrpc_body_v3
-
-struct ptlrpc_body_v2 {
- struct lustre_handle pb_handle;
- __u32 pb_type;
- __u32 pb_version;
- __u32 pb_opc;
- __u32 pb_status;
- __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
- __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
- __u16 pb_padding0;
- __u32 pb_padding1;
- __u64 pb_last_committed;
- __u64 pb_transno;
- __u32 pb_flags;
- __u32 pb_op_flags;
- __u32 pb_conn_cnt;
- __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
- __u32 pb_service_time; /* for rep, actual service time, also used for
- * net_latency of req
- */
- __u32 pb_limit;
- __u64 pb_slv;
- /* VBR: pre-versions */
- __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
- __u64 pb_mbits; /**< unused in V2 */
- /* padding for future needs */
- __u64 pb_padding64_0;
- __u64 pb_padding64_1;
- __u64 pb_padding64_2;
-};
-
-/* message body offset for lustre_msg_v2 */
-/* ptlrpc body offset in all request/reply messages */
-#define MSG_PTLRPC_BODY_OFF 0
-
-/* normal request/reply message record offset */
-#define REQ_REC_OFF 1
-#define REPLY_REC_OFF 1
-
-/* ldlm request message body offset */
-#define DLM_LOCKREQ_OFF 1 /* lockreq offset */
-#define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
-
-/* ldlm intent lock message body offset */
-#define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
-#define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
-
-/* ldlm reply message body offset */
-#define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
-#define DLM_REPLY_REC_OFF 2 /* reply record offset */
-
-/** only use in req->rq_{req,rep}_swab_mask */
-#define MSG_PTLRPC_HEADER_OFF 31
-
-/* Flags that are operation-specific go in the top 16 bits. */
-#define MSG_OP_FLAG_MASK 0xffff0000
-#define MSG_OP_FLAG_SHIFT 16
-
-/* Flags that apply to all requests are in the bottom 16 bits */
-#define MSG_GEN_FLAG_MASK 0x0000ffff
-#define MSG_LAST_REPLAY 0x0001
-#define MSG_RESENT 0x0002
-#define MSG_REPLAY 0x0004
-/* #define MSG_AT_SUPPORT 0x0008
- * This was used in early prototypes of adaptive timeouts, and while there
- * shouldn't be any users of that code there also isn't a need for using this
- * bits. Defer usage until at least 1.10 to avoid potential conflict.
- */
-#define MSG_DELAY_REPLAY 0x0010
-#define MSG_VERSION_REPLAY 0x0020
-#define MSG_REQ_REPLAY_DONE 0x0040
-#define MSG_LOCK_REPLAY_DONE 0x0080
-
-/*
- * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
- */
-
-#define MSG_CONNECT_RECOVERING 0x00000001
-#define MSG_CONNECT_RECONNECT 0x00000002
-#define MSG_CONNECT_REPLAYABLE 0x00000004
-/*#define MSG_CONNECT_PEER 0x8 */
-#define MSG_CONNECT_LIBCLIENT 0x00000010
-#define MSG_CONNECT_INITIAL 0x00000020
-#define MSG_CONNECT_ASYNC 0x00000040
-#define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
-#define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
-
-/* Connect flags */
-#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
-#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
-#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
-#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
-#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
-#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
-#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
-#define OBD_CONNECT_ACL 0x80ULL /*access control lists */
-#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
-#define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
-#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
-#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
-#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
-#define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
- *We do not support JOIN FILE
- *anymore, reserve this flags
- *just for preventing such bit
- *to be reused.
- */
-#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
-#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
-#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /* Remote client, never used
- * in production. Removed in
- * 2.9. Keep this flag to
- * avoid reuse.
- */
-#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /* Remote client by force,
- * never used in production.
- * Removed in 2.9. Keep this
- * flag to avoid reuse
- */
-#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
-#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
-#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
-#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
-#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
-#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
-#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
-#define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
-#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
-#define OBD_CONNECT_REAL 0x8000000ULL /* obsolete since 2.8 */
-#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
-#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
-#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
-#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
-#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
-#define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
-#define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
-#define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
-#define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
-#define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
-#define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
- * directory hash
- */
-#define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
-#define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
-#define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
-#define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
-#define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
- * RPC error properly
- */
-#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
- * finer space reservation
- */
-#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
- * policy and 2.x server
- */
-#define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
-#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
-#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
-#define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
-#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
-#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
-#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
-#define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack
- * name in request
- */
-#define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */
-#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
-#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify
- * RPCs in parallel
- */
-#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL/* striped DNE dir */
-#define OBD_CONNECT_SUBTREE 0x800000000000000ULL /* fileset mount */
-#define OBD_CONNECT_LOCK_AHEAD 0x1000000000000000ULL /* lock ahead */
-/** bulk matchbits is sent within ptlrpc_body */
-#define OBD_CONNECT_BULK_MBITS 0x2000000000000000ULL
-#define OBD_CONNECT_OBDOPACK 0x4000000000000000ULL /* compact OUT obdo */
-#define OBD_CONNECT_FLAGS2 0x8000000000000000ULL /* second flags word */
-
-/* XXX README XXX:
- * Please DO NOT add flag values here before first ensuring that this same
- * flag value is not in use on some other branch. Please clear any such
- * changes with senior engineers before starting to use a new flag. Then,
- * submit a small patch against EVERY branch that ONLY adds the new flag,
- * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
- * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
- * can be approved and landed easily to reserve the flag for future use.
- */
-
-/* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
- * connection. It is a temporary bug fix for Imperative Recovery interop
- * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
- * 2.2 clients/servers is no longer needed. LU-1252/LU-1644.
- */
-#define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
-
-#define OCD_HAS_FLAG(ocd, flg) \
- (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
-
-/* Features required for this version of the client to work with server */
-#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
- OBD_CONNECT_FULL20)
-
-/* This structure is used for both request and reply.
- *
- * If we eventually have separate connect data for different types, which we
- * almost certainly will, then perhaps we stick a union in here.
- */
-struct obd_connect_data {
- __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
- __u32 ocd_version; /* lustre release version number */
- __u32 ocd_grant; /* initial cache grant amount (bytes) */
- __u32 ocd_index; /* LOV index to connect to */
- __u32 ocd_brw_size; /* Maximum BRW size in bytes */
- __u64 ocd_ibits_known; /* inode bits this client understands */
- __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
- __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
- __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
- __u32 ocd_unused; /* also fix lustre_swab_connect */
- __u64 ocd_transno; /* first transno from client to be replayed */
- __u32 ocd_group; /* MDS group on OST */
- __u32 ocd_cksum_types; /* supported checksum algorithms */
- __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
- __u32 ocd_instance; /* instance # of this target */
- __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
- /* Fields after ocd_maxbytes are only accessible by the receiver
- * if the corresponding flag in ocd_connect_flags is set. Accessing
- * any field after ocd_maxbytes on the receiver without a valid flag
- * may result in out-of-bound memory access and kernel oops.
- */
- __u16 ocd_maxmodrpcs; /* Maximum modify RPCs in parallel */
- __u16 padding0; /* added 2.1.0. also fix lustre_swab_connect */
- __u32 padding1; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 ocd_connect_flags2;
- __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
-};
-
-/* XXX README XXX:
- * Please DO NOT use any fields here before first ensuring that this same
- * field is not in use on some other branch. Please clear any such changes
- * with senior engineers before starting to use a new field. Then, submit
- * a small patch against EVERY branch that ONLY adds the new field along with
- * the matching OBD_CONNECT flag, so that can be approved and landed easily to
- * reserve the flag for future use.
- */
-
-/*
- * Supported checksum algorithms. Up to 32 checksum types are supported.
- * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
- * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
- * algorithm and also the OBD_FL_CKSUM* flags.
- */
-enum cksum_type {
- OBD_CKSUM_CRC32 = 0x00000001,
- OBD_CKSUM_ADLER = 0x00000002,
- OBD_CKSUM_CRC32C = 0x00000004,
-};
-
-/*
- * OST requests: OBDO & OBD request records
- */
-
-/* opcodes */
-enum ost_cmd {
- OST_REPLY = 0, /* reply ? */
- OST_GETATTR = 1,
- OST_SETATTR = 2,
- OST_READ = 3,
- OST_WRITE = 4,
- OST_CREATE = 5,
- OST_DESTROY = 6,
- OST_GET_INFO = 7,
- OST_CONNECT = 8,
- OST_DISCONNECT = 9,
- OST_PUNCH = 10,
- OST_OPEN = 11,
- OST_CLOSE = 12,
- OST_STATFS = 13,
- OST_SYNC = 16,
- OST_SET_INFO = 17,
- OST_QUOTACHECK = 18, /* not used since 2.4 */
- OST_QUOTACTL = 19,
- OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
- OST_LAST_OPC
-};
-#define OST_FIRST_OPC OST_REPLY
-
-enum obdo_flags {
- OBD_FL_INLINEDATA = 0x00000001,
- OBD_FL_OBDMDEXISTS = 0x00000002,
- OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
- OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
- OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
- OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
- OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
- OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
- OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
- OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
- OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
- OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
- OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
- OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
- OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
- OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
- OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
- OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
- * XXX: obsoleted - reserved for old
- * clients prior than 2.2
- */
- OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
- OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
- OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
- OBD_FL_SHORT_IO = 0x00400000, /* short io request */
-
- /* Note that while these checksum values are currently separate bits,
- * in 2.x we can actually allow all values from 1-31 if we wanted.
- */
- OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
- OBD_FL_CKSUM_CRC32C,
-
- /* mask for local-only flag, which won't be sent over network */
- OBD_FL_LOCAL_MASK = 0xF0000000,
-};
-
-/*
- * All LOV EA magics should have the same postfix, if some new version
- * Lustre instroduces new LOV EA magic, then when down-grade to an old
- * Lustre, even though the old version system does not recognizes such
- * new magic, it still can distinguish the corrupted cases by checking
- * the magic's postfix.
- */
-#define LOV_MAGIC_MAGIC 0x0BD0
-#define LOV_MAGIC_MASK 0xFFFF
-
-#define LOV_MAGIC_V1 (0x0BD10000 | LOV_MAGIC_MAGIC)
-#define LOV_MAGIC_JOIN_V1 (0x0BD20000 | LOV_MAGIC_MAGIC)
-#define LOV_MAGIC_V3 (0x0BD30000 | LOV_MAGIC_MAGIC)
-#define LOV_MAGIC_MIGRATE (0x0BD40000 | LOV_MAGIC_MAGIC)
-/* reserved for specifying OSTs */
-#define LOV_MAGIC_SPECIFIC (0x0BD50000 | LOV_MAGIC_MAGIC)
-#define LOV_MAGIC LOV_MAGIC_V1
-
-/*
- * magic for fully defined striping
- * the idea is that we should have different magics for striping "hints"
- * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
- * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
- * we can't just change it w/o long way preparation, but we still need a
- * mechanism to allow LOD to differentiate hint versus ready striping.
- * so, at the moment we do a trick: MDT knows what to expect from request
- * depending on the case (replay uses ready striping, non-replay req uses
- * hints), so MDT replaces magic with appropriate one and now LOD can
- * easily understand what's inside -bzzz
- */
-#define LOV_MAGIC_V1_DEF 0x0CD10BD0
-#define LOV_MAGIC_V3_DEF 0x0CD30BD0
-
-#define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
-#define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
-
-#define lov_ost_data lov_ost_data_v1
-struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
- struct ost_id l_ost_oi; /* OST object ID */
- __u32 l_ost_gen; /* generation of this l_ost_idx */
- __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
-};
-
-#define lov_mds_md lov_mds_md_v1
-struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
- __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
- __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
- struct ost_id lmm_oi; /* LOV object ID */
- __u32 lmm_stripe_size; /* size of stripe in bytes */
- /* lmm_stripe_count used to be __u32 */
- __u16 lmm_stripe_count; /* num stripes in use for this object */
- __u16 lmm_layout_gen; /* layout generation number */
- struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
-};
-
-#define MAX_MD_SIZE \
- (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
-#define MIN_MD_SIZE \
- (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
-
-#define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
-#define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
-#define XATTR_USER_PREFIX "user."
-#define XATTR_TRUSTED_PREFIX "trusted."
-#define XATTR_SECURITY_PREFIX "security."
-#define XATTR_LUSTRE_PREFIX "lustre."
-
-#define XATTR_NAME_LOV "trusted.lov"
-#define XATTR_NAME_LMA "trusted.lma"
-#define XATTR_NAME_LMV "trusted.lmv"
-#define XATTR_NAME_DEFAULT_LMV "trusted.dmv"
-#define XATTR_NAME_LINK "trusted.link"
-#define XATTR_NAME_FID "trusted.fid"
-#define XATTR_NAME_VERSION "trusted.version"
-#define XATTR_NAME_SOM "trusted.som"
-#define XATTR_NAME_HSM "trusted.hsm"
-#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
-
-struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
- __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
- __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
- struct ost_id lmm_oi; /* LOV object ID */
- __u32 lmm_stripe_size; /* size of stripe in bytes */
- /* lmm_stripe_count used to be __u32 */
- __u16 lmm_stripe_count; /* num stripes in use for this object */
- __u16 lmm_layout_gen; /* layout generation number */
- char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */
- struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
-};
-
-static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
-{
- if (lmm_magic == LOV_MAGIC_V3)
- return sizeof(struct lov_mds_md_v3) +
- stripes * sizeof(struct lov_ost_data_v1);
- else
- return sizeof(struct lov_mds_md_v1) +
- stripes * sizeof(struct lov_ost_data_v1);
-}
-
-static inline __u32
-lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
-{
- switch (lmm_magic) {
- case LOV_MAGIC_V1: {
- struct lov_mds_md_v1 lmm;
-
- if (buf_size < sizeof(lmm))
- return 0;
-
- return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
- }
- case LOV_MAGIC_V3: {
- struct lov_mds_md_v3 lmm;
-
- if (buf_size < sizeof(lmm))
- return 0;
-
- return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
- }
- default:
- return 0;
- }
-}
-
-#define OBD_MD_FLID (0x00000001ULL) /* object ID */
-#define OBD_MD_FLATIME (0x00000002ULL) /* access time */
-#define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
-#define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
-#define OBD_MD_FLSIZE (0x00000010ULL) /* size */
-#define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
-#define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
-#define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
-#define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
-#define OBD_MD_FLUID (0x00000200ULL) /* user ID */
-#define OBD_MD_FLGID (0x00000400ULL) /* group ID */
-#define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
-#define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
-#define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
-/*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
-#define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
-#define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
-#define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
-#define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
-#define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
-#define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
-/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
-/* OBD_MD_FLCOOKIE (0x00800000ULL) obsolete in 2.8 */
-#define OBD_MD_FLGROUP (0x01000000ULL) /* group */
-#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
-#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
- /* ->mds if epoch opens or closes
- */
-#define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
-#define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
-#define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
-#define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
-#define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
-
-#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
-#define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
-#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
-#define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
-
-#define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
-#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
-#define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
-#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
-/* OBD_MD_FLRMTPERM (0x0000010000000000ULL) remote perm, obsolete */
-#define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
-#define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
-#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
-#define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
-#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
- * under lock; for xattr
- * requests means the
- * client holds the lock
- */
-#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
-
-/* OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) lfs lsetfacl, obsolete */
-/* OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) lfs lgetfacl, obsolete */
-/* OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) lfs rsetfacl, obsolete */
-/* OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */
-
-#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
-#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent
- * executed
- */
-
-#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */
-
-#define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
- OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
- OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
- OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
- OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
-
-#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
-
-/* don't forget obdo_fid which is way down at the bottom so it can
- * come after the definition of llog_cookie
- */
-
-enum hss_valid {
- HSS_SETMASK = 0x01,
- HSS_CLEARMASK = 0x02,
- HSS_ARCHIVE_ID = 0x04,
-};
-
-struct hsm_state_set {
- __u32 hss_valid;
- __u32 hss_archive_id;
- __u64 hss_setmask;
- __u64 hss_clearmask;
-};
-
-/* ost_body.data values for OST_BRW */
-
-#define OBD_BRW_READ 0x01
-#define OBD_BRW_WRITE 0x02
-#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
-#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
- * transfer and is not accounted in
- * the grant.
- */
-#define OBD_BRW_CHECK 0x10
-#define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
-#define OBD_BRW_GRANTED 0x40 /* the ost manages this */
-#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
-#define OBD_BRW_NOQUOTA 0x100
-#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
-#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
-#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
-#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
-#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
-#define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server
- * that the client is running low on
- * space for unstable pages; asking
- * it to sync quickly
- */
-
-#define OBD_OBJECT_EOF LUSTRE_EOF
-
-#define OST_MIN_PRECREATE 32
-#define OST_MAX_PRECREATE 20000
-
-struct obd_ioobj {
- struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
- __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
- * now (PTLRPC_BULK_OPS_COUNT - 1) in
- * high 16 bits in 2.4 and later
- */
- __u32 ioo_bufcnt; /* number of niobufs for this object */
-};
-
-/*
- * NOTE: IOOBJ_MAX_BRW_BITS defines the _offset_ of the max_brw field in
- * ioo_max_brw, NOT the maximum number of bits in PTLRPC_BULK_OPS_BITS.
- * That said, ioo_max_brw is a 32-bit field so the limit is also 16 bits.
- */
-#define IOOBJ_MAX_BRW_BITS 16
-#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
-#define ioobj_max_brw_set(ioo, num) \
-do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
-
-/* multiple of 8 bytes => can array */
-struct niobuf_remote {
- __u64 rnb_offset;
- __u32 rnb_len;
- __u32 rnb_flags;
-};
-
-/* lock value block communicated between the filter and llite */
-
-/* OST_LVB_ERR_INIT is needed because the return code in rc is
- * negative, i.e. because ((MASK + rc) & MASK) != MASK.
- */
-#define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
-#define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
-#define OST_LVB_IS_ERR(blocks) \
- ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
-#define OST_LVB_SET_ERR(blocks, rc) \
- do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
-#define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
-
-struct ost_lvb_v1 {
- __u64 lvb_size;
- __s64 lvb_mtime;
- __s64 lvb_atime;
- __s64 lvb_ctime;
- __u64 lvb_blocks;
-};
-
-struct ost_lvb {
- __u64 lvb_size;
- __s64 lvb_mtime;
- __s64 lvb_atime;
- __s64 lvb_ctime;
- __u64 lvb_blocks;
- __u32 lvb_mtime_ns;
- __u32 lvb_atime_ns;
- __u32 lvb_ctime_ns;
- __u32 lvb_padding;
-};
-
-/*
- * lquota data structures
- */
-
-/* The lquota_id structure is a union of all the possible identifier types that
- * can be used with quota, this includes:
- * - 64-bit user ID
- * - 64-bit group ID
- * - a FID which can be used for per-directory quota in the future
- */
-union lquota_id {
- struct lu_fid qid_fid; /* FID for per-directory quota */
- __u64 qid_uid; /* user identifier */
- __u64 qid_gid; /* group identifier */
-};
-
-/* quotactl management */
-struct obd_quotactl {
- __u32 qc_cmd;
- __u32 qc_type; /* see Q_* flag below */
- __u32 qc_id;
- __u32 qc_stat;
- struct obd_dqinfo qc_dqinfo;
- struct obd_dqblk qc_dqblk;
-};
-
-#define Q_COPY(out, in, member) (out)->member = (in)->member
-
-#define QCTL_COPY(out, in) \
-do { \
- Q_COPY(out, in, qc_cmd); \
- Q_COPY(out, in, qc_type); \
- Q_COPY(out, in, qc_id); \
- Q_COPY(out, in, qc_stat); \
- Q_COPY(out, in, qc_dqinfo); \
- Q_COPY(out, in, qc_dqblk); \
-} while (0)
-
-/* Data structures associated with the quota locks */
-
-/* Glimpse descriptor used for the index & per-ID quota locks */
-struct ldlm_gl_lquota_desc {
- union lquota_id gl_id; /* quota ID subject to the glimpse */
- __u64 gl_flags; /* see LQUOTA_FL* below */
- __u64 gl_ver; /* new index version */
- __u64 gl_hardlimit; /* new hardlimit or qunit value */
- __u64 gl_softlimit; /* new softlimit */
- __u64 gl_time;
- __u64 gl_pad2;
-};
-
-/* quota glimpse flags */
-#define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
-
-/* LVB used with quota (global and per-ID) locks */
-struct lquota_lvb {
- __u64 lvb_flags; /* see LQUOTA_FL* above */
- __u64 lvb_id_may_rel; /* space that might be released later */
- __u64 lvb_id_rel; /* space released by the slave for this ID */
- __u64 lvb_id_qunit; /* current qunit value */
- __u64 lvb_pad1;
-};
-
-/* op codes */
-enum quota_cmd {
- QUOTA_DQACQ = 601,
- QUOTA_DQREL = 602,
- QUOTA_LAST_OPC
-};
-#define QUOTA_FIRST_OPC QUOTA_DQACQ
-
-/*
- * MDS REQ RECORDS
- */
-
-/* opcodes */
-enum mds_cmd {
- MDS_GETATTR = 33,
- MDS_GETATTR_NAME = 34,
- MDS_CLOSE = 35,
- MDS_REINT = 36,
- MDS_READPAGE = 37,
- MDS_CONNECT = 38,
- MDS_DISCONNECT = 39,
- MDS_GETSTATUS = 40,
- MDS_STATFS = 41,
- MDS_PIN = 42, /* obsolete, never used in a release */
- MDS_UNPIN = 43, /* obsolete, never used in a release */
- MDS_SYNC = 44,
- MDS_DONE_WRITING = 45, /* obsolete since 2.8.0 */
- MDS_SET_INFO = 46,
- MDS_QUOTACHECK = 47, /* not used since 2.4 */
- MDS_QUOTACTL = 48,
- MDS_GETXATTR = 49,
- MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
- MDS_WRITEPAGE = 51,
- MDS_IS_SUBDIR = 52, /* obsolete, never used in a release */
- MDS_GET_INFO = 53,
- MDS_HSM_STATE_GET = 54,
- MDS_HSM_STATE_SET = 55,
- MDS_HSM_ACTION = 56,
- MDS_HSM_PROGRESS = 57,
- MDS_HSM_REQUEST = 58,
- MDS_HSM_CT_REGISTER = 59,
- MDS_HSM_CT_UNREGISTER = 60,
- MDS_SWAP_LAYOUTS = 61,
- MDS_LAST_OPC
-};
-
-#define MDS_FIRST_OPC MDS_GETATTR
-
-/*
- * Do not exceed 63
- */
-
-enum mdt_reint_cmd {
- REINT_SETATTR = 1,
- REINT_CREATE = 2,
- REINT_LINK = 3,
- REINT_UNLINK = 4,
- REINT_RENAME = 5,
- REINT_OPEN = 6,
- REINT_SETXATTR = 7,
- REINT_RMENTRY = 8,
- REINT_MIGRATE = 9,
- REINT_MAX
-};
-
-/* the disposition of the intent outlines what was executed */
-#define DISP_IT_EXECD 0x00000001
-#define DISP_LOOKUP_EXECD 0x00000002
-#define DISP_LOOKUP_NEG 0x00000004
-#define DISP_LOOKUP_POS 0x00000008
-#define DISP_OPEN_CREATE 0x00000010
-#define DISP_OPEN_OPEN 0x00000020
-#define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
-#define DISP_ENQ_OPEN_REF 0x00800000
-#define DISP_ENQ_CREATE_REF 0x01000000
-#define DISP_OPEN_LOCK 0x02000000
-#define DISP_OPEN_LEASE 0x04000000
-#define DISP_OPEN_STRIPE 0x08000000
-#define DISP_OPEN_DENY 0x10000000
-
-/* INODE LOCK PARTS */
-#define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
- * was used to protect permission (mode,
- * owner, group etc) before 2.4.
- */
-#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
-#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
-#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
-
-/* The PERM bit is added int 2.4, and it is used to protect permission(mode,
- * owner, group, acl etc), so to separate the permission from LOOKUP lock.
- * Because for remote directories(in DNE), these locks will be granted by
- * different MDTs(different ldlm namespace).
- *
- * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
- * For Remote directory, the master MDT, where the remote directory is, will
- * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
- * will grant LOOKUP_LOCK.
- */
-#define MDS_INODELOCK_PERM 0x000010
-#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
-
-#define MDS_INODELOCK_MAXSHIFT 5
-/* This FULL lock is useful to take on unlink sort of operations */
-#define MDS_INODELOCK_FULL ((1 << (MDS_INODELOCK_MAXSHIFT + 1)) - 1)
-
-/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
- * but was moved into name[1] along with the OID to avoid consuming the
- * name[2,3] fields that need to be used for the quota id (also a FID).
- */
-enum {
- LUSTRE_RES_ID_SEQ_OFF = 0,
- LUSTRE_RES_ID_VER_OID_OFF = 1,
- LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
- LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
- LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
- LUSTRE_RES_ID_HSH_OFF = 3
-};
-
-#define MDS_STATUS_CONN 1
-#define MDS_STATUS_LOV 2
-
-/* these should be identical to their EXT4_*_FL counterparts, they are
- * redefined here only to avoid dragging in fs/ext4/ext4.h
- */
-#define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
-#define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
-#define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
-#define LUSTRE_NODUMP_FL 0x00000040 /* do not dump file */
-#define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
-#define LUSTRE_INDEX_FL 0x00001000 /* hash-indexed directory */
-#define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
-#define LUSTRE_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
-#define LUSTRE_DIRECTIO_FL 0x00100000 /* Use direct i/o */
-#define LUSTRE_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
-
-/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
- * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
- * protocol equivalents of LDISKFS_*_FL values stored on disk, while
- * the S_* flags are kernel-internal values that change between kernel
- * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
- * See b=16526 for a full history.
- */
-static inline int ll_ext_to_inode_flags(int flags)
-{
- return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
- ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
- ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
- ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
- ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
-}
-
-static inline int ll_inode_to_ext_flags(int iflags)
-{
- return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
- ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
- ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
- ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
- ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
-}
-
-/* 64 possible states */
-enum md_transient_state {
- MS_RESTORE = (1 << 0), /* restore is running */
-};
-
-struct mdt_body {
- struct lu_fid mbo_fid1;
- struct lu_fid mbo_fid2;
- struct lustre_handle mbo_handle;
- __u64 mbo_valid;
- __u64 mbo_size; /* Offset, in the case of MDS_READPAGE */
- __s64 mbo_mtime;
- __s64 mbo_atime;
- __s64 mbo_ctime;
- __u64 mbo_blocks; /* XID, in the case of MDS_READPAGE */
- __u64 mbo_ioepoch;
- __u64 mbo_t_state; /* transient file state defined in
- * enum md_transient_state
- * was "ino" until 2.4.0
- */
- __u32 mbo_fsuid;
- __u32 mbo_fsgid;
- __u32 mbo_capability;
- __u32 mbo_mode;
- __u32 mbo_uid;
- __u32 mbo_gid;
- __u32 mbo_flags; /* LUSTRE_*_FL file attributes */
- __u32 mbo_rdev;
- __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */
- __u32 mbo_unused2; /* was "generation" until 2.4.0 */
- __u32 mbo_suppgid;
- __u32 mbo_eadatasize;
- __u32 mbo_aclsize;
- __u32 mbo_max_mdsize;
- __u32 mbo_unused3; /* was max_cookiesize until 2.8 */
- __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */
- __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */
- __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */
- __u64 mbo_padding_6;
- __u64 mbo_padding_7;
- __u64 mbo_padding_8;
- __u64 mbo_padding_9;
- __u64 mbo_padding_10;
-}; /* 216 */
-
-struct mdt_ioepoch {
- struct lustre_handle mio_handle;
- __u64 mio_unused1; /* was ioepoch */
- __u32 mio_unused2; /* was flags */
- __u32 mio_padding;
-};
-
-/* permissions for md_perm.mp_perm */
-enum {
- CFS_SETUID_PERM = 0x01,
- CFS_SETGID_PERM = 0x02,
- CFS_SETGRP_PERM = 0x04,
-};
-
-struct mdt_rec_setattr {
- __u32 sa_opcode;
- __u32 sa_cap;
- __u32 sa_fsuid;
- __u32 sa_fsuid_h;
- __u32 sa_fsgid;
- __u32 sa_fsgid_h;
- __u32 sa_suppgid;
- __u32 sa_suppgid_h;
- __u32 sa_padding_1;
- __u32 sa_padding_1_h;
- struct lu_fid sa_fid;
- __u64 sa_valid;
- __u32 sa_uid;
- __u32 sa_gid;
- __u64 sa_size;
- __u64 sa_blocks;
- __s64 sa_mtime;
- __s64 sa_atime;
- __s64 sa_ctime;
- __u32 sa_attr_flags;
- __u32 sa_mode;
- __u32 sa_bias; /* some operation flags */
- __u32 sa_padding_3;
- __u32 sa_padding_4;
- __u32 sa_padding_5;
-};
-
-/*
- * Attribute flags used in mdt_rec_setattr::sa_valid.
- * The kernel's #defines for ATTR_* should not be used over the network
- * since the client and MDS may run different kernels (see bug 13828)
- * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
- */
-#define MDS_ATTR_MODE 0x1ULL /* = 1 */
-#define MDS_ATTR_UID 0x2ULL /* = 2 */
-#define MDS_ATTR_GID 0x4ULL /* = 4 */
-#define MDS_ATTR_SIZE 0x8ULL /* = 8 */
-#define MDS_ATTR_ATIME 0x10ULL /* = 16 */
-#define MDS_ATTR_MTIME 0x20ULL /* = 32 */
-#define MDS_ATTR_CTIME 0x40ULL /* = 64 */
-#define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
-#define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
-#define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
-#define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
-#define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
-#define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
-#define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
-#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path,
- * ie O_TRUNC
- */
-#define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
-
-#define MDS_FMODE_CLOSED 00000000
-#define MDS_FMODE_EXEC 00000004
-/* MDS_FMODE_EPOCH 01000000 obsolete since 2.8.0 */
-/* MDS_FMODE_TRUNC 02000000 obsolete since 2.8.0 */
-/* MDS_FMODE_SOM 04000000 obsolete since 2.8.0 */
-
-#define MDS_OPEN_CREATED 00000010
-#define MDS_OPEN_CROSS 00000020
-
-#define MDS_OPEN_CREAT 00000100
-#define MDS_OPEN_EXCL 00000200
-#define MDS_OPEN_TRUNC 00001000
-#define MDS_OPEN_APPEND 00002000
-#define MDS_OPEN_SYNC 00010000
-#define MDS_OPEN_DIRECTORY 00200000
-
-#define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
-#define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
-#define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
-#define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
- * We do not support JOIN FILE
- * anymore, reserve this flags
- * just for preventing such bit
- * to be reused.
- */
-
-#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
-#define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
-#define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
-#define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
-#define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
- * hsm restore) */
-#define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
- unlinked */
-#define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
- * delegation, succeed if it's not
- * being opened with conflict mode.
- */
-#define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
-
-#define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS | \
- MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK | \
- MDS_OPEN_BY_FID | MDS_OPEN_LEASE | \
- MDS_OPEN_RELEASE)
-
-enum mds_op_bias {
- MDS_CHECK_SPLIT = 1 << 0,
- MDS_CROSS_REF = 1 << 1,
- MDS_VTX_BYPASS = 1 << 2,
- MDS_PERM_BYPASS = 1 << 3,
-/* MDS_SOM = 1 << 4, obsolete since 2.8.0 */
- MDS_QUOTA_IGNORE = 1 << 5,
- MDS_CLOSE_CLEANUP = 1 << 6,
- MDS_KEEP_ORPHAN = 1 << 7,
- MDS_RECOV_OPEN = 1 << 8,
- MDS_DATA_MODIFIED = 1 << 9,
- MDS_CREATE_VOLATILE = 1 << 10,
- MDS_OWNEROVERRIDE = 1 << 11,
- MDS_HSM_RELEASE = 1 << 12,
- MDS_RENAME_MIGRATE = 1 << 13,
- MDS_CLOSE_LAYOUT_SWAP = 1 << 14,
-};
-
-/* instance of mdt_reint_rec */
-struct mdt_rec_create {
- __u32 cr_opcode;
- __u32 cr_cap;
- __u32 cr_fsuid;
- __u32 cr_fsuid_h;
- __u32 cr_fsgid;
- __u32 cr_fsgid_h;
- __u32 cr_suppgid1;
- __u32 cr_suppgid1_h;
- __u32 cr_suppgid2;
- __u32 cr_suppgid2_h;
- struct lu_fid cr_fid1;
- struct lu_fid cr_fid2;
- struct lustre_handle cr_old_handle; /* handle in case of open replay */
- __s64 cr_time;
- __u64 cr_rdev;
- __u64 cr_ioepoch;
- __u64 cr_padding_1; /* rr_blocks */
- __u32 cr_mode;
- __u32 cr_bias;
- /* use of helpers set/get_mrc_cr_flags() is needed to access
- * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
- * extend cr_flags size without breaking 1.8 compat
- */
- __u32 cr_flags_l; /* for use with open, low 32 bits */
- __u32 cr_flags_h; /* for use with open, high 32 bits */
- __u32 cr_umask; /* umask for create */
- __u32 cr_padding_4; /* rr_padding_4 */
-};
-
-/* instance of mdt_reint_rec */
-struct mdt_rec_link {
- __u32 lk_opcode;
- __u32 lk_cap;
- __u32 lk_fsuid;
- __u32 lk_fsuid_h;
- __u32 lk_fsgid;
- __u32 lk_fsgid_h;
- __u32 lk_suppgid1;
- __u32 lk_suppgid1_h;
- __u32 lk_suppgid2;
- __u32 lk_suppgid2_h;
- struct lu_fid lk_fid1;
- struct lu_fid lk_fid2;
- __s64 lk_time;
- __u64 lk_padding_1; /* rr_atime */
- __u64 lk_padding_2; /* rr_ctime */
- __u64 lk_padding_3; /* rr_size */
- __u64 lk_padding_4; /* rr_blocks */
- __u32 lk_bias;
- __u32 lk_padding_5; /* rr_mode */
- __u32 lk_padding_6; /* rr_flags */
- __u32 lk_padding_7; /* rr_padding_2 */
- __u32 lk_padding_8; /* rr_padding_3 */
- __u32 lk_padding_9; /* rr_padding_4 */
-};
-
-/* instance of mdt_reint_rec */
-struct mdt_rec_unlink {
- __u32 ul_opcode;
- __u32 ul_cap;
- __u32 ul_fsuid;
- __u32 ul_fsuid_h;
- __u32 ul_fsgid;
- __u32 ul_fsgid_h;
- __u32 ul_suppgid1;
- __u32 ul_suppgid1_h;
- __u32 ul_suppgid2;
- __u32 ul_suppgid2_h;
- struct lu_fid ul_fid1;
- struct lu_fid ul_fid2;
- __s64 ul_time;
- __u64 ul_padding_2; /* rr_atime */
- __u64 ul_padding_3; /* rr_ctime */
- __u64 ul_padding_4; /* rr_size */
- __u64 ul_padding_5; /* rr_blocks */
- __u32 ul_bias;
- __u32 ul_mode;
- __u32 ul_padding_6; /* rr_flags */
- __u32 ul_padding_7; /* rr_padding_2 */
- __u32 ul_padding_8; /* rr_padding_3 */
- __u32 ul_padding_9; /* rr_padding_4 */
-};
-
-/* instance of mdt_reint_rec */
-struct mdt_rec_rename {
- __u32 rn_opcode;
- __u32 rn_cap;
- __u32 rn_fsuid;
- __u32 rn_fsuid_h;
- __u32 rn_fsgid;
- __u32 rn_fsgid_h;
- __u32 rn_suppgid1;
- __u32 rn_suppgid1_h;
- __u32 rn_suppgid2;
- __u32 rn_suppgid2_h;
- struct lu_fid rn_fid1;
- struct lu_fid rn_fid2;
- __s64 rn_time;
- __u64 rn_padding_1; /* rr_atime */
- __u64 rn_padding_2; /* rr_ctime */
- __u64 rn_padding_3; /* rr_size */
- __u64 rn_padding_4; /* rr_blocks */
- __u32 rn_bias; /* some operation flags */
- __u32 rn_mode; /* cross-ref rename has mode */
- __u32 rn_padding_5; /* rr_flags */
- __u32 rn_padding_6; /* rr_padding_2 */
- __u32 rn_padding_7; /* rr_padding_3 */
- __u32 rn_padding_8; /* rr_padding_4 */
-};
-
-/* instance of mdt_reint_rec */
-struct mdt_rec_setxattr {
- __u32 sx_opcode;
- __u32 sx_cap;
- __u32 sx_fsuid;
- __u32 sx_fsuid_h;
- __u32 sx_fsgid;
- __u32 sx_fsgid_h;
- __u32 sx_suppgid1;
- __u32 sx_suppgid1_h;
- __u32 sx_suppgid2;
- __u32 sx_suppgid2_h;
- struct lu_fid sx_fid;
- __u64 sx_padding_1; /* These three are rr_fid2 */
- __u32 sx_padding_2;
- __u32 sx_padding_3;
- __u64 sx_valid;
- __s64 sx_time;
- __u64 sx_padding_5; /* rr_ctime */
- __u64 sx_padding_6; /* rr_size */
- __u64 sx_padding_7; /* rr_blocks */
- __u32 sx_size;
- __u32 sx_flags;
- __u32 sx_padding_8; /* rr_flags */
- __u32 sx_padding_9; /* rr_padding_2 */
- __u32 sx_padding_10; /* rr_padding_3 */
- __u32 sx_padding_11; /* rr_padding_4 */
-};
-
-/*
- * mdt_rec_reint is the template for all mdt_reint_xxx structures.
- * Do NOT change the size of various members, otherwise the value
- * will be broken in lustre_swab_mdt_rec_reint().
- *
- * If you add new members in other mdt_reint_xxx structures and need to use the
- * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
- */
-struct mdt_rec_reint {
- __u32 rr_opcode;
- __u32 rr_cap;
- __u32 rr_fsuid;
- __u32 rr_fsuid_h;
- __u32 rr_fsgid;
- __u32 rr_fsgid_h;
- __u32 rr_suppgid1;
- __u32 rr_suppgid1_h;
- __u32 rr_suppgid2;
- __u32 rr_suppgid2_h;
- struct lu_fid rr_fid1;
- struct lu_fid rr_fid2;
- __s64 rr_mtime;
- __s64 rr_atime;
- __s64 rr_ctime;
- __u64 rr_size;
- __u64 rr_blocks;
- __u32 rr_bias;
- __u32 rr_mode;
- __u32 rr_flags;
- __u32 rr_flags_h;
- __u32 rr_umask;
- __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
-};
-
-/* lmv structures */
-struct lmv_desc {
- __u32 ld_tgt_count; /* how many MDS's */
- __u32 ld_active_tgt_count; /* how many active */
- __u32 ld_default_stripe_count; /* how many objects are used */
- __u32 ld_pattern; /* default hash pattern */
- __u64 ld_default_hash_size;
- __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
- __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
- __u32 ld_qos_maxage; /* in second */
- __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
- __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
- struct obd_uuid ld_uuid;
-};
-
-/* LMV layout EA, and it will be stored both in master and slave object */
-struct lmv_mds_md_v1 {
- __u32 lmv_magic;
- __u32 lmv_stripe_count;
- __u32 lmv_master_mdt_index; /* On master object, it is master
- * MDT index, on slave object, it
- * is stripe index of the slave obj
- */
- __u32 lmv_hash_type; /* dir stripe policy, i.e. indicate
- * which hash function to be used,
- * Note: only lower 16 bits is being
- * used for now. Higher 16 bits will
- * be used to mark the object status,
- * for example migrating or dead.
- */
- __u32 lmv_layout_version; /* Used for directory restriping */
- __u32 lmv_padding1;
- __u64 lmv_padding2;
- __u64 lmv_padding3;
- char lmv_pool_name[LOV_MAXPOOLNAME + 1];/* pool name */
- struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */
-};
-
-#define LMV_MAGIC_V1 0x0CD20CD0 /* normal stripe lmv magic */
-#define LMV_MAGIC LMV_MAGIC_V1
-
-/* #define LMV_USER_MAGIC 0x0CD30CD0 */
-#define LMV_MAGIC_STRIPE 0x0CD40CD0 /* magic for dir sub_stripe */
-
-/*
- *Right now only the lower part(0-16bits) of lmv_hash_type is being used,
- * and the higher part will be the flag to indicate the status of object,
- * for example the object is being migrated. And the hash function
- * might be interpreted differently with different flags.
- */
-#define LMV_HASH_TYPE_MASK 0x0000ffff
-
-#define LMV_HASH_FLAG_MIGRATION 0x80000000
-#define LMV_HASH_FLAG_DEAD 0x40000000
-
-/**
- * The FNV-1a hash algorithm is as follows:
- * hash = FNV_offset_basis
- * for each octet_of_data to be hashed
- * hash = hash XOR octet_of_data
- * hash = hash × FNV_prime
- * return hash
- * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
- *
- * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source
- * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL
- **/
-#define LUSTRE_FNV_1A_64_PRIME 0x100000001b3ULL
-#define LUSTRE_FNV_1A_64_OFFSET_BIAS 0xcbf29ce484222325ULL
-static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size)
-{
- __u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS;
- const unsigned char *p = buf;
- size_t i;
-
- for (i = 0; i < size; i++) {
- hash ^= p[i];
- hash *= LUSTRE_FNV_1A_64_PRIME;
- }
-
- return hash;
-}
-
-union lmv_mds_md {
- __u32 lmv_magic;
- struct lmv_mds_md_v1 lmv_md_v1;
- struct lmv_user_md lmv_user_md;
-};
-
-static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
-{
- ssize_t len = -EINVAL;
-
- switch (lmm_magic) {
- case LMV_MAGIC_V1: {
- struct lmv_mds_md_v1 *lmm1;
-
- len = sizeof(*lmm1);
- len += stripe_count * sizeof(lmm1->lmv_stripe_fids[0]);
- break; }
- default:
- break;
- }
- return len;
-}
-
-static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm)
-{
- switch (__le32_to_cpu(lmm->lmv_magic)) {
- case LMV_MAGIC_V1:
- return __le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count);
- case LMV_USER_MAGIC:
- return __le32_to_cpu(lmm->lmv_user_md.lum_stripe_count);
- default:
- return -EINVAL;
- }
-}
-
-enum fld_rpc_opc {
- FLD_QUERY = 900,
- FLD_READ = 901,
- FLD_LAST_OPC,
- FLD_FIRST_OPC = FLD_QUERY
-};
-
-enum seq_rpc_opc {
- SEQ_QUERY = 700,
- SEQ_LAST_OPC,
- SEQ_FIRST_OPC = SEQ_QUERY
-};
-
-enum seq_op {
- SEQ_ALLOC_SUPER = 0,
- SEQ_ALLOC_META = 1
-};
-
-enum fld_op {
- FLD_CREATE = 0,
- FLD_DELETE = 1,
- FLD_LOOKUP = 2,
-};
-
-/*
- * LOV data structures
- */
-
-#define LOV_MAX_UUID_BUFFER_SIZE 8192
-/* The size of the buffer the lov/mdc reserves for the
- * array of UUIDs returned by the MDS. With the current
- * protocol, this will limit the max number of OSTs per LOV
- */
-
-#define LOV_DESC_MAGIC 0xB0CCDE5C
-#define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
-#define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
-
-/* LOV settings descriptor (should only contain static info) */
-struct lov_desc {
- __u32 ld_tgt_count; /* how many OBD's */
- __u32 ld_active_tgt_count; /* how many active */
- __u32 ld_default_stripe_count; /* how many objects are used */
- __u32 ld_pattern; /* default PATTERN_RAID0 */
- __u64 ld_default_stripe_size; /* in bytes */
- __u64 ld_default_stripe_offset; /* in bytes */
- __u32 ld_padding_0; /* unused */
- __u32 ld_qos_maxage; /* in second */
- __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
- __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
- struct obd_uuid ld_uuid;
-};
-
-#define ld_magic ld_active_tgt_count /* for swabbing from llogs */
-
-/*
- * LDLM requests:
- */
-/* opcodes -- MUST be distinct from OST/MDS opcodes */
-enum ldlm_cmd {
- LDLM_ENQUEUE = 101,
- LDLM_CONVERT = 102,
- LDLM_CANCEL = 103,
- LDLM_BL_CALLBACK = 104,
- LDLM_CP_CALLBACK = 105,
- LDLM_GL_CALLBACK = 106,
- LDLM_SET_INFO = 107,
- LDLM_LAST_OPC
-};
-#define LDLM_FIRST_OPC LDLM_ENQUEUE
-
-#define RES_NAME_SIZE 4
-struct ldlm_res_id {
- __u64 name[RES_NAME_SIZE];
-};
-
-#define DLDLMRES "[%#llx:%#llx:%#llx].%llx"
-#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
- (res)->lr_name.name[2], (res)->lr_name.name[3]
-
-/* lock types */
-enum ldlm_mode {
- LCK_MINMODE = 0,
- LCK_EX = 1,
- LCK_PW = 2,
- LCK_PR = 4,
- LCK_CW = 8,
- LCK_CR = 16,
- LCK_NL = 32,
- LCK_GROUP = 64,
- LCK_COS = 128,
- LCK_MAXMODE
-};
-
-#define LCK_MODE_NUM 8
-
-enum ldlm_type {
- LDLM_PLAIN = 10,
- LDLM_EXTENT = 11,
- LDLM_FLOCK = 12,
- LDLM_IBITS = 13,
- LDLM_MAX_TYPE
-};
-
-#define LDLM_MIN_TYPE LDLM_PLAIN
-
-struct ldlm_extent {
- __u64 start;
- __u64 end;
- __u64 gid;
-};
-
-struct ldlm_inodebits {
- __u64 bits;
-};
-
-struct ldlm_flock_wire {
- __u64 lfw_start;
- __u64 lfw_end;
- __u64 lfw_owner;
- __u32 lfw_padding;
- __u32 lfw_pid;
-};
-
-/* it's important that the fields of the ldlm_extent structure match
- * the first fields of the ldlm_flock structure because there is only
- * one ldlm_swab routine to process the ldlm_policy_data_t union. if
- * this ever changes we will need to swab the union differently based
- * on the resource type.
- */
-
-union ldlm_wire_policy_data {
- struct ldlm_extent l_extent;
- struct ldlm_flock_wire l_flock;
- struct ldlm_inodebits l_inodebits;
-};
-
-union ldlm_gl_desc {
- struct ldlm_gl_lquota_desc lquota_desc;
-};
-
-enum ldlm_intent_flags {
- IT_OPEN = 0x00000001,
- IT_CREAT = 0x00000002,
- IT_OPEN_CREAT = 0x00000003,
- IT_READDIR = 0x00000004,
- IT_GETATTR = 0x00000008,
- IT_LOOKUP = 0x00000010,
- IT_UNLINK = 0x00000020,
- IT_TRUNC = 0x00000040,
- IT_GETXATTR = 0x00000080,
- IT_EXEC = 0x00000100,
- IT_PIN = 0x00000200,
- IT_LAYOUT = 0x00000400,
- IT_QUOTA_DQACQ = 0x00000800,
- IT_QUOTA_CONN = 0x00001000,
- IT_SETXATTR = 0x00002000,
-};
-
-struct ldlm_intent {
- __u64 opc;
-};
-
-struct ldlm_resource_desc {
- enum ldlm_type lr_type;
- __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
- struct ldlm_res_id lr_name;
-};
-
-struct ldlm_lock_desc {
- struct ldlm_resource_desc l_resource;
- enum ldlm_mode l_req_mode;
- enum ldlm_mode l_granted_mode;
- union ldlm_wire_policy_data l_policy_data;
-};
-
-#define LDLM_LOCKREQ_HANDLES 2
-#define LDLM_ENQUEUE_CANCEL_OFF 1
-
-struct ldlm_request {
- __u32 lock_flags;
- __u32 lock_count;
- struct ldlm_lock_desc lock_desc;
- struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
-};
-
-struct ldlm_reply {
- __u32 lock_flags;
- __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
- struct ldlm_lock_desc lock_desc;
- struct lustre_handle lock_handle;
- __u64 lock_policy_res1;
- __u64 lock_policy_res2;
-};
-
-#define ldlm_flags_to_wire(flags) ((__u32)(flags))
-#define ldlm_flags_from_wire(flags) ((__u64)(flags))
-
-/*
- * Opcodes for mountconf (mgs and mgc)
- */
-enum mgs_cmd {
- MGS_CONNECT = 250,
- MGS_DISCONNECT,
- MGS_EXCEPTION, /* node died, etc. */
- MGS_TARGET_REG, /* whenever target starts up */
- MGS_TARGET_DEL,
- MGS_SET_INFO,
- MGS_CONFIG_READ,
- MGS_LAST_OPC
-};
-#define MGS_FIRST_OPC MGS_CONNECT
-
-#define MGS_PARAM_MAXLEN 1024
-#define KEY_SET_INFO "set_info"
-
-struct mgs_send_param {
- char mgs_param[MGS_PARAM_MAXLEN];
-};
-
-/* We pass this info to the MGS so it can write config logs */
-#define MTI_NAME_MAXLEN 64
-#define MTI_PARAM_MAXLEN 4096
-#define MTI_NIDS_MAX 32
-struct mgs_target_info {
- __u32 mti_lustre_ver;
- __u32 mti_stripe_index;
- __u32 mti_config_ver;
- __u32 mti_flags;
- __u32 mti_nid_count;
- __u32 mti_instance; /* Running instance of target */
- char mti_fsname[MTI_NAME_MAXLEN];
- char mti_svname[MTI_NAME_MAXLEN];
- char mti_uuid[sizeof(struct obd_uuid)];
- __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
- char mti_params[MTI_PARAM_MAXLEN];
-};
-
-struct mgs_nidtbl_entry {
- __u64 mne_version; /* table version of this entry */
- __u32 mne_instance; /* target instance # */
- __u32 mne_index; /* target index */
- __u32 mne_length; /* length of this entry - by bytes */
- __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
- __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
- __u8 mne_nid_size; /* size of each NID, by bytes */
- __u8 mne_nid_count; /* # of NIDs in buffer */
- union {
- lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
- } u;
-};
-
-struct mgs_config_body {
- char mcb_name[MTI_NAME_MAXLEN]; /* logname */
- __u64 mcb_offset; /* next index of config log to request */
- __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
- __u8 mcb_reserved;
- __u8 mcb_bits; /* bits unit size of config log */
- __u32 mcb_units; /* # of units for bulk transfer */
-};
-
-struct mgs_config_res {
- __u64 mcr_offset; /* index of last config log */
- __u64 mcr_size; /* size of the log */
-};
-
-/* Config marker flags (in config log) */
-#define CM_START 0x01
-#define CM_END 0x02
-#define CM_SKIP 0x04
-#define CM_UPGRADE146 0x08
-#define CM_EXCLUDE 0x10
-#define CM_START_SKIP (CM_START | CM_SKIP)
-
-struct cfg_marker {
- __u32 cm_step; /* aka config version */
- __u32 cm_flags;
- __u32 cm_vers; /* lustre release version number */
- __u32 cm_padding; /* 64 bit align */
- __s64 cm_createtime; /*when this record was first created */
- __s64 cm_canceltime; /*when this record is no longer valid*/
- char cm_tgtname[MTI_NAME_MAXLEN];
- char cm_comment[MTI_NAME_MAXLEN];
-};
-
-/*
- * Opcodes for multiple servers.
- */
-
-enum obd_cmd {
- OBD_PING = 400,
- OBD_LOG_CANCEL,
- OBD_QC_CALLBACK, /* not used since 2.4 */
- OBD_IDX_READ,
- OBD_LAST_OPC
-};
-#define OBD_FIRST_OPC OBD_PING
-
-/**
- * llog contexts indices.
- *
- * There is compatibility problem with indexes below, they are not
- * continuous and must keep their numbers for compatibility needs.
- * See LU-5218 for details.
- */
-enum llog_ctxt_id {
- LLOG_CONFIG_ORIG_CTXT = 0,
- LLOG_CONFIG_REPL_CTXT = 1,
- LLOG_MDS_OST_ORIG_CTXT = 2,
- LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */
- LLOG_SIZE_ORIG_CTXT = 4,
- LLOG_SIZE_REPL_CTXT = 5,
- LLOG_TEST_ORIG_CTXT = 8,
- LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */
- LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */
- LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */
- /* for multiple changelog consumers */
- LLOG_CHANGELOG_USER_ORIG_CTXT = 14,
- LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */
- LLOG_MAX_CTXTS
-};
-
-/** Identifier for a single log object */
-struct llog_logid {
- struct ost_id lgl_oi;
- __u32 lgl_ogen;
-} __packed;
-
-/** Records written to the CATALOGS list */
-#define CATLIST "CATALOGS"
-struct llog_catid {
- struct llog_logid lci_logid;
- __u32 lci_padding1;
- __u32 lci_padding2;
- __u32 lci_padding3;
-} __packed;
-
-/* Log data record types - there is no specific reason that these need to
- * be related to the RPC opcodes, but no reason not to (may be handy later?)
- */
-#define LLOG_OP_MAGIC 0x10600000
-#define LLOG_OP_MASK 0xfff00000
-
-enum llog_op_type {
- LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
- OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
- /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
- MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
- REINT_UNLINK, /* obsolete after 2.5.0 */
- MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
- REINT_UNLINK,
- /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
- MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
- REINT_SETATTR,
- OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
- /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
- LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
- /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
- CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
- CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
- HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
- LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
- LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
-};
-
-#define LLOG_REC_HDR_NEEDS_SWABBING(r) \
- (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
-
-/** Log record header - stored in little endian order.
- * Each record must start with this struct, end with a llog_rec_tail,
- * and be a multiple of 256 bits in size.
- */
-struct llog_rec_hdr {
- __u32 lrh_len;
- __u32 lrh_index;
- __u32 lrh_type;
- __u32 lrh_id;
-};
-
-struct llog_rec_tail {
- __u32 lrt_len;
- __u32 lrt_index;
-};
-
-/* Where data follow just after header */
-#define REC_DATA(ptr) \
- ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
-
-#define REC_DATA_LEN(rec) \
- (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
- sizeof(struct llog_rec_tail))
-
-struct llog_logid_rec {
- struct llog_rec_hdr lid_hdr;
- struct llog_logid lid_id;
- __u32 lid_padding1;
- __u64 lid_padding2;
- __u64 lid_padding3;
- struct llog_rec_tail lid_tail;
-} __packed;
-
-struct llog_unlink_rec {
- struct llog_rec_hdr lur_hdr;
- __u64 lur_oid;
- __u32 lur_oseq;
- __u32 lur_count;
- struct llog_rec_tail lur_tail;
-} __packed;
-
-struct llog_unlink64_rec {
- struct llog_rec_hdr lur_hdr;
- struct lu_fid lur_fid;
- __u32 lur_count; /* to destroy the lost precreated */
- __u32 lur_padding1;
- __u64 lur_padding2;
- __u64 lur_padding3;
- struct llog_rec_tail lur_tail;
-} __packed;
-
-struct llog_setattr64_rec {
- struct llog_rec_hdr lsr_hdr;
- struct ost_id lsr_oi;
- __u32 lsr_uid;
- __u32 lsr_uid_h;
- __u32 lsr_gid;
- __u32 lsr_gid_h;
- __u64 lsr_valid;
- struct llog_rec_tail lsr_tail;
-} __packed;
-
-struct llog_size_change_rec {
- struct llog_rec_hdr lsc_hdr;
- struct ll_fid lsc_fid;
- __u32 lsc_ioepoch;
- __u32 lsc_padding1;
- __u64 lsc_padding2;
- __u64 lsc_padding3;
- struct llog_rec_tail lsc_tail;
-} __packed;
-
-/* changelog llog name, needed by client replicators */
-#define CHANGELOG_CATALOG "changelog_catalog"
-
-struct changelog_setinfo {
- __u64 cs_recno;
- __u32 cs_id;
-} __packed;
-
-/** changelog record */
-struct llog_changelog_rec {
- struct llog_rec_hdr cr_hdr;
- struct changelog_rec cr; /**< Variable length field */
- struct llog_rec_tail cr_do_not_use; /**< for_sizezof_only */
-} __packed;
-
-struct llog_changelog_user_rec {
- struct llog_rec_hdr cur_hdr;
- __u32 cur_id;
- __u32 cur_padding;
- __u64 cur_endrec;
- struct llog_rec_tail cur_tail;
-} __packed;
-
-enum agent_req_status {
- ARS_WAITING,
- ARS_STARTED,
- ARS_FAILED,
- ARS_CANCELED,
- ARS_SUCCEED,
-};
-
-static inline const char *agent_req_status2name(const enum agent_req_status ars)
-{
- switch (ars) {
- case ARS_WAITING:
- return "WAITING";
- case ARS_STARTED:
- return "STARTED";
- case ARS_FAILED:
- return "FAILED";
- case ARS_CANCELED:
- return "CANCELED";
- case ARS_SUCCEED:
- return "SUCCEED";
- default:
- return "UNKNOWN";
- }
-}
-
-struct llog_agent_req_rec {
- struct llog_rec_hdr arr_hdr; /**< record header */
- __u32 arr_status; /**< status of the request */
- /* must match enum
- * agent_req_status
- */
- __u32 arr_archive_id; /**< backend archive number */
- __u64 arr_flags; /**< req flags */
- __u64 arr_compound_id;/**< compound cookie */
- __u64 arr_req_create; /**< req. creation time */
- __u64 arr_req_change; /**< req. status change time */
- struct hsm_action_item arr_hai; /**< req. to the agent */
- struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
-} __packed;
-
-/* Old llog gen for compatibility */
-struct llog_gen {
- __u64 mnt_cnt;
- __u64 conn_cnt;
-} __packed;
-
-struct llog_gen_rec {
- struct llog_rec_hdr lgr_hdr;
- struct llog_gen lgr_gen;
- __u64 padding1;
- __u64 padding2;
- __u64 padding3;
- struct llog_rec_tail lgr_tail;
-};
-
-/* flags for the logs */
-enum llog_flag {
- LLOG_F_ZAP_WHEN_EMPTY = 0x1,
- LLOG_F_IS_CAT = 0x2,
- LLOG_F_IS_PLAIN = 0x4,
- LLOG_F_EXT_JOBID = 0x8,
- LLOG_F_IS_FIXSIZE = 0x10,
-
- /*
- * Note: Flags covered by LLOG_F_EXT_MASK will be inherited from
- * catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here,
- * because the catlog record is usually fixed size, but its plain
- * log record can be variable
- */
- LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
-};
-
-/* On-disk header structure of each log object, stored in little endian order */
-#define LLOG_MIN_CHUNK_SIZE 8192
-#define LLOG_HEADER_SIZE (96) /* sizeof (llog_log_hdr) +
- * sizeof(llh_tail) - sizeof(llh_bitmap)
- */
-#define LLOG_BITMAP_BYTES (LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE)
-#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
-
-/* flags for the logs */
-struct llog_log_hdr {
- struct llog_rec_hdr llh_hdr;
- __s64 llh_timestamp;
- __u32 llh_count;
- __u32 llh_bitmap_offset;
- __u32 llh_size;
- __u32 llh_flags;
- __u32 llh_cat_idx;
- /* for a catalog the first plain slot is next to it */
- struct obd_uuid llh_tgtuuid;
- __u32 llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23];
- /* These fields must always be at the end of the llog_log_hdr.
- * Note: llh_bitmap size is variable because llog chunk size could be
- * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192
- * bytes, and the real size is stored in llh_hdr.lrh_len, which means
- * llh_tail should only be referred by LLOG_HDR_TAIL().
- * But this structure is also used by client/server llog interface
- * (see llog_client.c), it will be kept in its original way to avoid
- * compatibility issue.
- */
- __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
- struct llog_rec_tail llh_tail;
-} __packed;
-
-#undef LLOG_HEADER_SIZE
-#undef LLOG_BITMAP_BYTES
-
-#define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
- llh->llh_bitmap_offset - \
- sizeof(llh->llh_tail)) * 8)
-#define LLOG_HDR_BITMAP(llh) (__u32 *)((char *)(llh) + \
- (llh)->llh_bitmap_offset)
-#define LLOG_HDR_TAIL(llh) ((struct llog_rec_tail *)((char *)llh + \
- llh->llh_hdr.lrh_len - \
- sizeof(llh->llh_tail)))
-
-/** log cookies are used to reference a specific log file and a record
- * therein
- */
-struct llog_cookie {
- struct llog_logid lgc_lgl;
- __u32 lgc_subsys;
- __u32 lgc_index;
- __u32 lgc_padding;
-} __packed;
-
-/** llog protocol */
-enum llogd_rpc_ops {
- LLOG_ORIGIN_HANDLE_CREATE = 501,
- LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
- LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
- LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
- LLOG_ORIGIN_HANDLE_CLOSE = 505,
- LLOG_ORIGIN_CONNECT = 506,
- LLOG_CATINFO = 507, /* deprecated */
- LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
- LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
- LLOG_LAST_OPC,
- LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
-};
-
-struct llogd_body {
- struct llog_logid lgd_logid;
- __u32 lgd_ctxt_idx;
- __u32 lgd_llh_flags;
- __u32 lgd_index;
- __u32 lgd_saved_index;
- __u32 lgd_len;
- __u64 lgd_cur_offset;
-} __packed;
-
-struct llogd_conn_body {
- struct llog_gen lgdc_gen;
- struct llog_logid lgdc_logid;
- __u32 lgdc_ctxt_idx;
-} __packed;
-
-/* Note: 64-bit types are 64-bit aligned in structure */
-struct obdo {
- __u64 o_valid; /* hot fields in this obdo */
- struct ost_id o_oi;
- __u64 o_parent_seq;
- __u64 o_size; /* o_size-o_blocks == ost_lvb */
- __s64 o_mtime;
- __s64 o_atime;
- __s64 o_ctime;
- __u64 o_blocks; /* brw: cli sent cached bytes */
- __u64 o_grant;
-
- /* 32-bit fields start here: keep an even number of them via padding */
- __u32 o_blksize; /* optimal IO blocksize */
- __u32 o_mode; /* brw: cli sent cache remain */
- __u32 o_uid;
- __u32 o_gid;
- __u32 o_flags;
- __u32 o_nlink; /* brw: checksum */
- __u32 o_parent_oid;
- __u32 o_misc; /* brw: o_dropped */
-
- __u64 o_ioepoch; /* epoch in ost writes */
- __u32 o_stripe_idx; /* holds stripe idx */
- __u32 o_parent_ver;
- struct lustre_handle o_handle; /* brw: lock handle to prolong locks
- */
- struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS,
- * obsolete in 2.8, reused in OSP
- */
- __u32 o_uid_h;
- __u32 o_gid_h;
-
- __u64 o_data_version; /* getattr: sum of iversion for
- * each stripe.
- * brw: grant space consumed on
- * the client for the write
- */
- __u64 o_padding_4;
- __u64 o_padding_5;
- __u64 o_padding_6;
-};
-
-#define o_dirty o_blocks
-#define o_undirty o_mode
-#define o_dropped o_misc
-#define o_cksum o_nlink
-#define o_grant_used o_data_version
-
-/* request structure for OST's */
-struct ost_body {
- struct obdo oa;
-};
-
-/* Key for FIEMAP to be used in get_info calls */
-struct ll_fiemap_info_key {
- char lfik_name[8];
- struct obdo lfik_oa;
- struct fiemap lfik_fiemap;
-};
-
-/* security opcodes */
-enum sec_cmd {
- SEC_CTX_INIT = 801,
- SEC_CTX_INIT_CONT = 802,
- SEC_CTX_FINI = 803,
- SEC_LAST_OPC,
- SEC_FIRST_OPC = SEC_CTX_INIT
-};
-
-/*
- * capa related definitions
- */
-#define CAPA_HMAC_MAX_LEN 64
-#define CAPA_HMAC_KEY_MAX_LEN 56
-
-/* NB take care when changing the sequence of elements this struct,
- * because the offset info is used in find_capa()
- */
-struct lustre_capa {
- struct lu_fid lc_fid; /** fid */
- __u64 lc_opc; /** operations allowed */
- __u64 lc_uid; /** file owner */
- __u64 lc_gid; /** file group */
- __u32 lc_flags; /** HMAC algorithm & flags */
- __u32 lc_keyid; /** key# used for the capability */
- __u32 lc_timeout; /** capa timeout value (sec) */
-/* FIXME: y2038 time_t overflow: */
- __u32 lc_expiry; /** expiry time (sec) */
- __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
-} __packed;
-
-/** lustre_capa::lc_opc */
-enum {
- CAPA_OPC_BODY_WRITE = 1 << 0, /**< write object data */
- CAPA_OPC_BODY_READ = 1 << 1, /**< read object data */
- CAPA_OPC_INDEX_LOOKUP = 1 << 2, /**< lookup object fid */
- CAPA_OPC_INDEX_INSERT = 1 << 3, /**< insert object fid */
- CAPA_OPC_INDEX_DELETE = 1 << 4, /**< delete object fid */
- CAPA_OPC_OSS_WRITE = 1 << 5, /**< write oss object data */
- CAPA_OPC_OSS_READ = 1 << 6, /**< read oss object data */
- CAPA_OPC_OSS_TRUNC = 1 << 7, /**< truncate oss object */
- CAPA_OPC_OSS_DESTROY = 1 << 8, /**< destroy oss object */
- CAPA_OPC_META_WRITE = 1 << 9, /**< write object meta data */
- CAPA_OPC_META_READ = 1 << 10, /**< read object meta data */
-};
-
-#define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
-#define CAPA_OPC_MDS_ONLY \
- (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
- CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
-#define CAPA_OPC_OSS_ONLY \
- (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
- CAPA_OPC_OSS_DESTROY)
-#define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
-#define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
-
-struct lustre_capa_key {
- __u64 lk_seq; /**< mds# */
- __u32 lk_keyid; /**< key# */
- __u32 lk_padding;
- __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
-} __packed;
-
-/** The link ea holds 1 \a link_ea_entry for each hardlink */
-#define LINK_EA_MAGIC 0x11EAF1DFUL
-struct link_ea_header {
- __u32 leh_magic;
- __u32 leh_reccount;
- __u64 leh_len; /* total size */
- __u32 leh_overflow_time;
- __u32 leh_padding;
-};
-
-/** Hardlink data is name and parent fid.
- * Stored in this crazy struct for maximum packing and endian-neutrality
- */
-struct link_ea_entry {
- /** __u16 stored big-endian, unaligned */
- unsigned char lee_reclen[2];
- unsigned char lee_parent_fid[sizeof(struct lu_fid)];
- char lee_name[0];
-} __packed;
-
-/** fid2path request/reply structure */
-struct getinfo_fid2path {
- struct lu_fid gf_fid;
- __u64 gf_recno;
- __u32 gf_linkno;
- __u32 gf_pathlen;
- char gf_path[0];
-} __packed;
-
-/** path2parent request/reply structures */
-struct getparent {
- struct lu_fid gp_fid; /**< parent FID */
- __u32 gp_linkno; /**< hardlink number */
- __u32 gp_name_size; /**< size of the name field */
- char gp_name[0]; /**< zero-terminated link name */
-} __packed;
-
-enum {
- LAYOUT_INTENT_ACCESS = 0,
- LAYOUT_INTENT_READ = 1,
- LAYOUT_INTENT_WRITE = 2,
- LAYOUT_INTENT_GLIMPSE = 3,
- LAYOUT_INTENT_TRUNC = 4,
- LAYOUT_INTENT_RELEASE = 5,
- LAYOUT_INTENT_RESTORE = 6
-};
-
-/* enqueue layout lock with intent */
-struct layout_intent {
- __u32 li_opc; /* intent operation for enqueue, read, write etc */
- __u32 li_flags;
- __u64 li_start;
- __u64 li_end;
-};
-
-/**
- * On the wire version of hsm_progress structure.
- *
- * Contains the userspace hsm_progress and some internal fields.
- */
-struct hsm_progress_kernel {
- /* Field taken from struct hsm_progress */
- struct lu_fid hpk_fid;
- __u64 hpk_cookie;
- struct hsm_extent hpk_extent;
- __u16 hpk_flags;
- __u16 hpk_errval; /* positive val */
- __u32 hpk_padding1;
- /* Additional fields */
- __u64 hpk_data_version;
- __u64 hpk_padding2;
-} __packed;
-
-/** layout swap request structure
- * fid1 and fid2 are in mdt_body
- */
-struct mdc_swap_layouts {
- __u64 msl_flags;
-} __packed;
-
-struct close_data {
- struct lustre_handle cd_handle;
- struct lu_fid cd_fid;
- __u64 cd_data_version;
- __u64 cd_reserved[8];
-};
-
-#endif
-/** @} lustreidl */
diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ioctl.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ioctl.h
deleted file mode 100644
index 9590864e0b50..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ioctl.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-#ifndef _UAPI_LUSTRE_IOCTL_H_
-#define _UAPI_LUSTRE_IOCTL_H_
-
-#include <linux/ioctl.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-
-#if !defined(__KERNEL__) && !defined(LUSTRE_UTILS)
-# error This file is for Lustre internal use only.
-#endif
-
-enum md_echo_cmd {
- ECHO_MD_CREATE = 1, /* Open/Create file on MDT */
- ECHO_MD_MKDIR = 2, /* Mkdir on MDT */
- ECHO_MD_DESTROY = 3, /* Unlink file on MDT */
- ECHO_MD_RMDIR = 4, /* Rmdir on MDT */
- ECHO_MD_LOOKUP = 5, /* Lookup on MDT */
- ECHO_MD_GETATTR = 6, /* Getattr on MDT */
- ECHO_MD_SETATTR = 7, /* Setattr on MDT */
- ECHO_MD_ALLOC_FID = 8, /* Get FIDs from MDT */
-};
-
-#define OBD_DEV_ID 1
-#define OBD_DEV_NAME "obd"
-#define OBD_DEV_PATH "/dev/" OBD_DEV_NAME
-#define OBD_DEV_MAJOR 10
-#define OBD_DEV_MINOR 241
-
-#define OBD_IOCTL_VERSION 0x00010004
-#define OBD_DEV_BY_DEVNAME 0xffffd0de
-
-struct obd_ioctl_data {
- __u32 ioc_len;
- __u32 ioc_version;
-
- union {
- __u64 ioc_cookie;
- __u64 ioc_u64_1;
- };
- union {
- __u32 ioc_conn1;
- __u32 ioc_u32_1;
- };
- union {
- __u32 ioc_conn2;
- __u32 ioc_u32_2;
- };
-
- struct obdo ioc_obdo1;
- struct obdo ioc_obdo2;
-
- __u64 ioc_count;
- __u64 ioc_offset;
- __u32 ioc_dev;
- __u32 ioc_command;
-
- __u64 ioc_nid;
- __u32 ioc_nal;
- __u32 ioc_type;
-
- /* buffers the kernel will treat as user pointers */
- __u32 ioc_plen1;
- char __user *ioc_pbuf1;
- __u32 ioc_plen2;
- char __user *ioc_pbuf2;
-
- /* inline buffers for various arguments */
- __u32 ioc_inllen1;
- char *ioc_inlbuf1;
- __u32 ioc_inllen2;
- char *ioc_inlbuf2;
- __u32 ioc_inllen3;
- char *ioc_inlbuf3;
- __u32 ioc_inllen4;
- char *ioc_inlbuf4;
-
- char ioc_bulk[0];
-};
-
-struct obd_ioctl_hdr {
- __u32 ioc_len;
- __u32 ioc_version;
-};
-
-static inline __u32 obd_ioctl_packlen(struct obd_ioctl_data *data)
-{
- __u32 len = __ALIGN_KERNEL(sizeof(*data), 8);
-
- len += __ALIGN_KERNEL(data->ioc_inllen1, 8);
- len += __ALIGN_KERNEL(data->ioc_inllen2, 8);
- len += __ALIGN_KERNEL(data->ioc_inllen3, 8);
- len += __ALIGN_KERNEL(data->ioc_inllen4, 8);
-
- return len;
-}
-
-/*
- * OBD_IOC_DATA_TYPE is only for compatibility reasons with older
- * Linux Lustre user tools. New ioctls should NOT use this macro as
- * the ioctl "size". Instead the ioctl should get a "size" argument
- * which is the actual data type used by the ioctl, to ensure the
- * ioctl interface is versioned correctly.
- */
-#define OBD_IOC_DATA_TYPE long
-
-/* IOC_LDLM_TEST _IOWR('f', 40, long) */
-/* IOC_LDLM_DUMP _IOWR('f', 41, long) */
-/* IOC_LDLM_REGRESS_START _IOWR('f', 42, long) */
-/* IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long) */
-
-#define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_DESTROY _IOW('f', 104, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE) */
-
-#define OBD_IOC_SETATTR _IOW('f', 107, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_GETATTR _IOWR('f', 108, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SYNC _IOW('f', 114, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE) */
-/* OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE) */
-/* OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE) */
-/* OBD_IOC_COPY _IOWR('f', 120, OBD_IOC_DATA_TYPE) */
-/* OBD_IOC_MIGR _IOWR('f', 121, OBD_IOC_DATA_TYPE) */
-/* OBD_IOC_PUNCH _IOWR('f', 122, OBD_IOC_DATA_TYPE) */
-
-/* OBD_IOC_MODULE_DEBUG _IOWR('f', 124, OBD_IOC_DATA_TYPE) */
-#define OBD_IOC_BRW_READ _IOWR('f', 125, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_BRW_WRITE _IOWR('f', 126, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_NAME2DEV _IOWR('f', 127, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_UUID2DEV _IOWR('f', 130, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_GETNAME _IOWR('f', 131, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_GETMDNAME _IOR('f', 131, char[MAX_OBD_NAME])
-#define OBD_IOC_GETDTNAME OBD_IOC_GETNAME
-#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_CLIENT_RECOVER _IOW('f', 133, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PING_TARGET _IOW('f', 136, OBD_IOC_DATA_TYPE)
-
-/* OBD_IOC_DEC_FS_USE_COUNT _IO('f', 139) */
-#define OBD_IOC_NO_TRANSNO _IOW('f', 140, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SET_READONLY _IOW('f', 141, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_ABORT_RECOVERY _IOR('f', 142, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE) */
-#define OBD_GET_VERSION _IOWR('f', 144, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_GSS_SUPPORT _IOWR('f', 145, OBD_IOC_DATA_TYPE) */
-/* OBD_IOC_CLOSE_UUID _IOWR('f', 147, OBD_IOC_DATA_TYPE) */
-#define OBD_IOC_CHANGELOG_SEND _IOW('f', 148, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_GETDEVICE _IOWR('f', 149, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_FID2PATH _IOWR('f', 150, OBD_IOC_DATA_TYPE)
-/* lustre/lustre_user.h 151-153 */
-/* OBD_IOC_LOV_SETSTRIPE 154 LL_IOC_LOV_SETSTRIPE */
-/* OBD_IOC_LOV_GETSTRIPE 155 LL_IOC_LOV_GETSTRIPE */
-/* OBD_IOC_LOV_SETEA 156 LL_IOC_LOV_SETEA */
-/* lustre/lustre_user.h 157-159 */
-/* OBD_IOC_QUOTACHECK _IOW('f', 160, int) */
-/* OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *) */
-#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
-/* lustre/lustre_user.h 163-176 */
-#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_DEREG _IOW('f', 178, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_CLEAR _IOW('f', 179, struct obd_ioctl_data)
-/* OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE) */
-/* OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE) */
-/* OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE) */
-/* OBD_IOC_DORECORD _IOWR('f', 183, OBD_IOC_DATA_TYPE) */
-#define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE) */
-/* OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE) */
-#define OBD_IOC_PARAM _IOW('f', 187, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_POOL _IOWR('f', 188, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_REPLACE_NIDS _IOWR('f', 189, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_CATLOGLIST _IOWR('f', 190, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_INFO _IOWR('f', 191, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_PRINT _IOWR('f', 192, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_CANCEL _IOWR('f', 193, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_REMOVE _IOWR('f', 194, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_CHECK _IOWR('f', 195, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE) */
-#define OBD_IOC_NODEMAP _IOWR('f', 197, OBD_IOC_DATA_TYPE)
-
-/* ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) */
-/* ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) */
-/* ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) */
-/* ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) */
-
-#define OBD_IOC_GET_OBJ_VERSION _IOR('f', 210, OBD_IOC_DATA_TYPE)
-
-/* lustre/lustre_user.h 212-217 */
-#define OBD_IOC_GET_MNTOPT _IOW('f', 220, mntopt_t)
-#define OBD_IOC_ECHO_MD _IOR('f', 221, struct obd_ioctl_data)
-#define OBD_IOC_ECHO_ALLOC_SEQ _IOWR('f', 222, struct obd_ioctl_data)
-#define OBD_IOC_START_LFSCK _IOWR('f', 230, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_STOP_LFSCK _IOW('f', 231, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_QUERY_LFSCK _IOR('f', 232, struct obd_ioctl_data)
-/* lustre/lustre_user.h 240-249 */
-/* LIBCFS_IOC_DEBUG_MASK 250 */
-
-#define IOC_OSC_SET_ACTIVE _IOWR('h', 21, void *)
-
-#endif /* _UAPI_LUSTRE_IOCTL_H_ */
diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_kernelcomm.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_kernelcomm.h
deleted file mode 100644
index 94dadbe8e069..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_kernelcomm.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- *
- * Author: Nathan Rutman <nathan.rutman@sun.com>
- *
- * Kernel <-> userspace communication routines.
- * The definitions below are used in the kernel and userspace.
- */
-
-#ifndef __UAPI_LUSTRE_KERNELCOMM_H__
-#define __UAPI_LUSTRE_KERNELCOMM_H__
-
-#include <linux/types.h>
-
-/* KUC message header.
- * All current and future KUC messages should use this header.
- * To avoid having to include Lustre headers from libcfs, define this here.
- */
-struct kuc_hdr {
- __u16 kuc_magic;
- /* Each new Lustre feature should use a different transport */
- __u8 kuc_transport;
- __u8 kuc_flags;
- /* Message type or opcode, transport-specific */
- __u16 kuc_msgtype;
- /* Including header */
- __u16 kuc_msglen;
-} __aligned(sizeof(__u64));
-
-#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr) + CR_MAXSIZE)
-
-#define KUC_MAGIC 0x191C /*Lustre9etLinC */
-
-/* kuc_msgtype values are defined in each transport */
-enum kuc_transport_type {
- KUC_TRANSPORT_GENERIC = 1,
- KUC_TRANSPORT_HSM = 2,
- KUC_TRANSPORT_CHANGELOG = 3,
-};
-
-enum kuc_generic_message_type {
- KUC_MSG_SHUTDOWN = 1,
-};
-
-/* KUC Broadcast Groups. This determines which userspace process hears which
- * messages. Mutliple transports may be used within a group, or multiple
- * groups may use the same transport. Broadcast
- * groups need not be used if e.g. a UID is specified instead;
- * use group 0 to signify unicast.
- */
-#define KUC_GRP_HSM 0x02
-#define KUC_GRP_MAX KUC_GRP_HSM
-
-#define LK_FLG_STOP 0x01
-#define LK_NOFD -1U
-
-/* kernelcomm control structure, passed from userspace to kernel */
-struct lustre_kernelcomm {
- __u32 lk_wfd;
- __u32 lk_rfd;
- __u32 lk_uid;
- __u32 lk_group;
- __u32 lk_data;
- __u32 lk_flags;
-} __packed;
-
-#endif /* __UAPI_LUSTRE_KERNELCOMM_H__ */
diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ostid.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ostid.h
deleted file mode 100644
index 3343b602219b..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ostid.h
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2014, Intel Corporation.
- *
- * Copyright 2015 Cray Inc, all rights reserved.
- * Author: Ben Evans.
- *
- * Define ost_id associated functions
- */
-
-#ifndef _UAPI_LUSTRE_OSTID_H_
-#define _UAPI_LUSTRE_OSTID_H_
-
-#include <linux/errno.h>
-#include <uapi/linux/lustre/lustre_fid.h>
-
-static inline __u64 lmm_oi_id(const struct ost_id *oi)
-{
- return oi->oi.oi_id;
-}
-
-static inline __u64 lmm_oi_seq(const struct ost_id *oi)
-{
- return oi->oi.oi_seq;
-}
-
-static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
-{
- oi->oi.oi_seq = seq;
-}
-
-static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
-{
- oi->oi.oi_id = oid;
-}
-
-static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
- const struct ost_id *src_oi)
-{
- dst_oi->oi.oi_id = __le64_to_cpu(src_oi->oi.oi_id);
- dst_oi->oi.oi_seq = __le64_to_cpu(src_oi->oi.oi_seq);
-}
-
-static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
- const struct ost_id *src_oi)
-{
- dst_oi->oi.oi_id = __cpu_to_le64(src_oi->oi.oi_id);
- dst_oi->oi.oi_seq = __cpu_to_le64(src_oi->oi.oi_seq);
-}
-
-/* extract OST sequence (group) from a wire ost_id (id/seq) pair */
-static inline __u64 ostid_seq(const struct ost_id *ostid)
-{
- if (fid_seq_is_mdt0(ostid->oi.oi_seq))
- return FID_SEQ_OST_MDT0;
-
- if (fid_seq_is_default(ostid->oi.oi_seq))
- return FID_SEQ_LOV_DEFAULT;
-
- if (fid_is_idif(&ostid->oi_fid))
- return FID_SEQ_OST_MDT0;
-
- return fid_seq(&ostid->oi_fid);
-}
-
-/* extract OST objid from a wire ost_id (id/seq) pair */
-static inline __u64 ostid_id(const struct ost_id *ostid)
-{
- if (fid_seq_is_mdt0(ostid->oi.oi_seq))
- return ostid->oi.oi_id & IDIF_OID_MASK;
-
- if (fid_seq_is_default(ostid->oi.oi_seq))
- return ostid->oi.oi_id;
-
- if (fid_is_idif(&ostid->oi_fid))
- return fid_idif_id(fid_seq(&ostid->oi_fid),
- fid_oid(&ostid->oi_fid), 0);
-
- return fid_oid(&ostid->oi_fid);
-}
-
-static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
-{
- if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
- oi->oi.oi_seq = seq;
- } else {
- oi->oi_fid.f_seq = seq;
- /*
- * Note: if f_oid + f_ver is zero, we need init it
- * to be 1, otherwise, ostid_seq will treat this
- * as old ostid (oi_seq == 0)
- */
- if (!oi->oi_fid.f_oid && !oi->oi_fid.f_ver)
- oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
- }
-}
-
-static inline void ostid_set_seq_mdt0(struct ost_id *oi)
-{
- ostid_set_seq(oi, FID_SEQ_OST_MDT0);
-}
-
-static inline void ostid_set_seq_echo(struct ost_id *oi)
-{
- ostid_set_seq(oi, FID_SEQ_ECHO);
-}
-
-static inline void ostid_set_seq_llog(struct ost_id *oi)
-{
- ostid_set_seq(oi, FID_SEQ_LLOG);
-}
-
-static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
- struct ost_id *dst_oi)
-{
- if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) {
- dst_oi->oi.oi_id = __cpu_to_le64(src_oi->oi.oi_id);
- dst_oi->oi.oi_seq = __cpu_to_le64(src_oi->oi.oi_seq);
- } else {
- fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
- }
-}
-
-static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
- struct ost_id *dst_oi)
-{
- if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) {
- dst_oi->oi.oi_id = __le64_to_cpu(src_oi->oi.oi_id);
- dst_oi->oi.oi_seq = __le64_to_cpu(src_oi->oi.oi_seq);
- } else {
- fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
- }
-}
-
-/**
- * Sigh, because pre-2.4 uses
- * struct lov_mds_md_v1 {
- * ........
- * __u64 lmm_object_id;
- * __u64 lmm_object_seq;
- * ......
- * }
- * to identify the LOV(MDT) object, and lmm_object_seq will
- * be normal_fid, which make it hard to combine these conversion
- * to ostid_to FID. so we will do lmm_oi/fid conversion separately
- *
- * We can tell the lmm_oi by this way,
- * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
- * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
- * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
- * lmm_oi.f_ver = 0
- *
- * But currently lmm_oi/lsm_oi does not have any "real" usages,
- * except for printing some information, and the user can always
- * get the real FID from LMA, besides this multiple case check might
- * make swab more complicate. So we will keep using id/seq for lmm_oi.
- */
-
-static inline void fid_to_lmm_oi(const struct lu_fid *fid,
- struct ost_id *oi)
-{
- oi->oi.oi_id = fid_oid(fid);
- oi->oi.oi_seq = fid_seq(fid);
-}
-
-/**
- * Unpack an OST object id/seq (group) into a FID. This is needed for
- * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
- * FIDs. Note that if an id/seq is already in FID/IDIF format it will
- * be passed through unchanged. Only legacy OST objects in "group 0"
- * will be mapped into the IDIF namespace so that they can fit into the
- * struct lu_fid fields without loss.
- */
-static inline int ostid_to_fid(struct lu_fid *fid, const struct ost_id *ostid,
- __u32 ost_idx)
-{
- __u64 seq = ostid_seq(ostid);
-
- if (ost_idx > 0xffff)
- return -EBADF;
-
- if (fid_seq_is_mdt0(seq)) {
- __u64 oid = ostid_id(ostid);
-
- /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
- * that we map into the IDIF namespace. It allows up to 2^48
- * objects per OST, as this is the object namespace that has
- * been in production for years. This can handle create rates
- * of 1M objects/s/OST for 9 years, or combinations thereof.
- */
- if (oid >= IDIF_MAX_OID)
- return -EBADF;
-
- fid->f_seq = fid_idif_seq(oid, ost_idx);
- /* truncate to 32 bits by assignment */
- fid->f_oid = oid;
- /* in theory, not currently used */
- fid->f_ver = oid >> 48;
- } else if (!fid_seq_is_default(seq)) {
- /* This is either an IDIF object, which identifies objects
- * across all OSTs, or a regular FID. The IDIF namespace
- * maps legacy OST objects into the FID namespace. In both
- * cases, we just pass the FID through, no conversion needed.
- */
- if (ostid->oi_fid.f_ver)
- return -EBADF;
-
- *fid = ostid->oi_fid;
- }
-
- return 0;
-}
-#endif /* _UAPI_LUSTRE_OSTID_H_ */
diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_param.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_param.h
deleted file mode 100644
index 1eab2ceca338..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_param.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * User-settable parameter keys
- *
- * Author: Nathan Rutman <nathan@clusterfs.com>
- */
-
-#ifndef _UAPI_LUSTRE_PARAM_H_
-#define _UAPI_LUSTRE_PARAM_H_
-
-/** \defgroup param param
- *
- * @{
- */
-
-/****************** User-settable parameter keys *********************/
-/* e.g.
- * tunefs.lustre --param="failover.node=192.168.0.13@tcp0" /dev/sda
- * lctl conf_param testfs-OST0000 failover.node=3@elan,192.168.0.3@tcp0
- * ... testfs-MDT0000.lov.stripesize=4M
- * ... testfs-OST0000.ost.client_cache_seconds=15
- * ... testfs.sys.timeout=<secs>
- * ... testfs.llite.max_read_ahead_mb=16
- */
-
-/* System global or special params not handled in obd's proc
- * See mgs_write_log_sys()
- */
-#define PARAM_TIMEOUT "timeout=" /* global */
-#define PARAM_LDLM_TIMEOUT "ldlm_timeout=" /* global */
-#define PARAM_AT_MIN "at_min=" /* global */
-#define PARAM_AT_MAX "at_max=" /* global */
-#define PARAM_AT_EXTRA "at_extra=" /* global */
-#define PARAM_AT_EARLY_MARGIN "at_early_margin=" /* global */
-#define PARAM_AT_HISTORY "at_history=" /* global */
-#define PARAM_JOBID_VAR "jobid_var=" /* global */
-#define PARAM_MGSNODE "mgsnode=" /* only at mounttime */
-#define PARAM_FAILNODE "failover.node=" /* add failover nid */
-#define PARAM_FAILMODE "failover.mode=" /* initial mount only */
-#define PARAM_ACTIVE "active=" /* activate/deactivate */
-#define PARAM_NETWORK "network=" /* bind on nid */
-#define PARAM_ID_UPCALL "identity_upcall=" /* identity upcall */
-
-/* Prefixes for parameters handled by obd's proc methods (XXX_process_config) */
-#define PARAM_OST "ost."
-#define PARAM_OSD "osd."
-#define PARAM_OSC "osc."
-#define PARAM_MDT "mdt."
-#define PARAM_HSM "mdt.hsm."
-#define PARAM_MDD "mdd."
-#define PARAM_MDC "mdc."
-#define PARAM_LLITE "llite."
-#define PARAM_LOV "lov."
-#define PARAM_LOD "lod."
-#define PARAM_OSP "osp."
-#define PARAM_SYS "sys." /* global */
-#define PARAM_SRPC "srpc."
-#define PARAM_SRPC_FLVR "srpc.flavor."
-#define PARAM_SRPC_UDESC "srpc.udesc.cli2mdt"
-#define PARAM_SEC "security."
-#define PARAM_QUOTA "quota." /* global */
-
-/** @} param */
-
-#endif /* _UAPI_LUSTRE_PARAM_H_ */
diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_user.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_user.h
deleted file mode 100644
index 5e332e3af68a..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_user.h
+++ /dev/null
@@ -1,1325 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre/lustre_user.h
- *
- * Lustre public user-space interface definitions.
- */
-
-#ifndef _LUSTRE_USER_H
-#define _LUSTRE_USER_H
-
-/** \defgroup lustreuser lustreuser
- *
- * @{
- */
-
-#ifdef __KERNEL__
-# include <linux/fs.h>
-# include <linux/quota.h>
-# include <linux/sched/signal.h>
-# include <linux/string.h> /* snprintf() */
-# include <linux/version.h>
-#else /* !__KERNEL__ */
-# define NEED_QUOTA_DEFS
-# include <stdio.h> /* snprintf() */
-# include <string.h>
-# include <sys/quota.h>
-# include <sys/stat.h>
-#endif /* __KERNEL__ */
-#include <uapi/linux/lustre/lustre_fiemap.h>
-
-/*
- * We need to always use 64bit version because the structure
- * is shared across entire cluster where 32bit and 64bit machines
- * are co-existing.
- */
-#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
-typedef struct stat64 lstat_t;
-#define lstat_f lstat64
-#define fstat_f fstat64
-#define fstatat_f fstatat64
-#else
-typedef struct stat lstat_t;
-#define lstat_f lstat
-#define fstat_f fstat
-#define fstatat_f fstatat
-#endif
-
-#define HAVE_LOV_USER_MDS_DATA
-
-#define LUSTRE_EOF 0xffffffffffffffffULL
-
-/* for statfs() */
-#define LL_SUPER_MAGIC 0x0BD00BD0
-
-#ifndef FSFILT_IOC_GETFLAGS
-#define FSFILT_IOC_GETFLAGS _IOR('f', 1, long)
-#define FSFILT_IOC_SETFLAGS _IOW('f', 2, long)
-#define FSFILT_IOC_GETVERSION _IOR('f', 3, long)
-#define FSFILT_IOC_SETVERSION _IOW('f', 4, long)
-#define FSFILT_IOC_GETVERSION_OLD _IOR('v', 1, long)
-#define FSFILT_IOC_SETVERSION_OLD _IOW('v', 2, long)
-#endif
-
-/* FIEMAP flags supported by Lustre */
-#define LUSTRE_FIEMAP_FLAGS_COMPAT (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_DEVICE_ORDER)
-
-enum obd_statfs_state {
- OS_STATE_DEGRADED = 0x00000001, /**< RAID degraded/rebuilding */
- OS_STATE_READONLY = 0x00000002, /**< filesystem is read-only */
- OS_STATE_RDONLY_1 = 0x00000004, /**< obsolete 1.6, was EROFS=30 */
- OS_STATE_RDONLY_2 = 0x00000008, /**< obsolete 1.6, was EROFS=30 */
- OS_STATE_RDONLY_3 = 0x00000010, /**< obsolete 1.6, was EROFS=30 */
-};
-
-struct obd_statfs {
- __u64 os_type;
- __u64 os_blocks;
- __u64 os_bfree;
- __u64 os_bavail;
- __u64 os_files;
- __u64 os_ffree;
- __u8 os_fsid[40];
- __u32 os_bsize;
- __u32 os_namelen;
- __u64 os_maxbytes;
- __u32 os_state; /**< obd_statfs_state OS_STATE_* flag */
- __u32 os_fprecreated; /* objs available now to the caller */
- /* used in QoS code to find preferred OSTs */
- __u32 os_spare2;
- __u32 os_spare3;
- __u32 os_spare4;
- __u32 os_spare5;
- __u32 os_spare6;
- __u32 os_spare7;
- __u32 os_spare8;
- __u32 os_spare9;
-};
-
-/**
- * File IDentifier.
- *
- * FID is a cluster-wide unique identifier of a file or an object (stripe).
- * FIDs are never reused.
- **/
-struct lu_fid {
- /**
- * FID sequence. Sequence is a unit of migration: all files (objects)
- * with FIDs from a given sequence are stored on the same server.
- * Lustre should support 2^64 objects, so even if each sequence
- * has only a single object we can still enumerate 2^64 objects.
- **/
- __u64 f_seq;
- /* FID number within sequence. */
- __u32 f_oid;
- /**
- * FID version, used to distinguish different versions (in the sense
- * of snapshots, etc.) of the same file system object. Not currently
- * used.
- **/
- __u32 f_ver;
-};
-
-static inline bool fid_is_zero(const struct lu_fid *fid)
-{
- return !fid->f_seq && !fid->f_oid;
-}
-
-struct filter_fid {
- struct lu_fid ff_parent; /* ff_parent.f_ver == file stripe number */
-};
-
-/* keep this one for compatibility */
-struct filter_fid_old {
- struct lu_fid ff_parent;
- __u64 ff_objid;
- __u64 ff_seq;
-};
-
-/* Userspace should treat lu_fid as opaque, and only use the following methods
- * to print or parse them. Other functions (e.g. compare, swab) could be moved
- * here from lustre_idl.h if needed.
- */
-struct lu_fid;
-
-/**
- * Following struct for object attributes, that will be kept inode's EA.
- * Introduced in 2.0 release (please see b15993, for details)
- * Added to all objects since Lustre 2.4 as contains self FID
- */
-struct lustre_mdt_attrs {
- /**
- * Bitfield for supported data in this structure. From enum lma_compat.
- * lma_self_fid and lma_flags are always available.
- */
- __u32 lma_compat;
- /**
- * Per-file incompat feature list. Lustre version should support all
- * flags set in this field. The supported feature mask is available in
- * LMA_INCOMPAT_SUPP.
- */
- __u32 lma_incompat;
- /** FID of this inode */
- struct lu_fid lma_self_fid;
-};
-
-/**
- * Prior to 2.4, the LMA structure also included SOM attributes which has since
- * been moved to a dedicated xattr
- * lma_flags was also removed because of lma_compat/incompat fields.
- */
-#define LMA_OLD_SIZE (sizeof(struct lustre_mdt_attrs) + 5 * sizeof(__u64))
-
-/**
- * OST object IDentifier.
- */
-struct ost_id {
- union {
- struct {
- __u64 oi_id;
- __u64 oi_seq;
- } oi;
- struct lu_fid oi_fid;
- };
-};
-
-#define DOSTID "%#llx:%llu"
-#define POSTID(oi) ostid_seq(oi), ostid_id(oi)
-
-/*
- * The ioctl naming rules:
- * LL_* - works on the currently opened filehandle instead of parent dir
- * *_OBD_* - gets data for both OSC or MDC (LOV, LMV indirectly)
- * *_MDC_* - gets/sets data related to MDC
- * *_LOV_* - gets/sets data related to OSC/LOV
- * *FILE* - called on parent dir and passes in a filename
- * *STRIPE* - set/get lov_user_md
- * *INFO - set/get lov_user_mds_data
- */
-/* lustre_ioctl.h 101-150 */
-#define LL_IOC_GETFLAGS _IOR('f', 151, long)
-#define LL_IOC_SETFLAGS _IOW('f', 152, long)
-#define LL_IOC_CLRFLAGS _IOW('f', 153, long)
-#define LL_IOC_LOV_SETSTRIPE _IOW('f', 154, long)
-#define LL_IOC_LOV_GETSTRIPE _IOW('f', 155, long)
-#define LL_IOC_LOV_SETEA _IOW('f', 156, long)
-/* LL_IOC_RECREATE_OBJ 157 obsolete */
-/* LL_IOC_RECREATE_FID 158 obsolete */
-#define LL_IOC_GROUP_LOCK _IOW('f', 158, long)
-#define LL_IOC_GROUP_UNLOCK _IOW('f', 159, long)
-/* #define LL_IOC_QUOTACHECK 160 OBD_IOC_QUOTACHECK */
-/* #define LL_IOC_POLL_QUOTACHECK 161 OBD_IOC_POLL_QUOTACHECK */
-/* #define LL_IOC_QUOTACTL 162 OBD_IOC_QUOTACTL */
-#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
-/* IOC_LOV_GETINFO 165 obsolete */
-#define LL_IOC_FLUSHCTX _IOW('f', 166, long)
-/* LL_IOC_RMTACL 167 obsolete */
-#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long)
-#define LL_IOC_LLOOP_ATTACH _IOWR('f', 169, long)
-#define LL_IOC_LLOOP_DETACH _IOWR('f', 170, long)
-#define LL_IOC_LLOOP_INFO _IOWR('f', 171, struct lu_fid)
-#define LL_IOC_LLOOP_DETACH_BYDEV _IOWR('f', 172, long)
-#define LL_IOC_PATH2FID _IOR('f', 173, long)
-#define LL_IOC_GET_CONNECT_FLAGS _IOWR('f', 174, __u64 *)
-#define LL_IOC_GET_MDTIDX _IOR('f', 175, int)
-
-/* lustre_ioctl.h 177-210 */
-#define LL_IOC_HSM_STATE_GET _IOR('f', 211, struct hsm_user_state)
-#define LL_IOC_HSM_STATE_SET _IOW('f', 212, struct hsm_state_set)
-#define LL_IOC_HSM_CT_START _IOW('f', 213, struct lustre_kernelcomm)
-#define LL_IOC_HSM_COPY_START _IOW('f', 214, struct hsm_copy *)
-#define LL_IOC_HSM_COPY_END _IOW('f', 215, struct hsm_copy *)
-#define LL_IOC_HSM_PROGRESS _IOW('f', 216, struct hsm_user_request)
-#define LL_IOC_HSM_REQUEST _IOW('f', 217, struct hsm_user_request)
-#define LL_IOC_DATA_VERSION _IOR('f', 218, struct ioc_data_version)
-#define LL_IOC_LOV_SWAP_LAYOUTS _IOW('f', 219, \
- struct lustre_swap_layouts)
-#define LL_IOC_HSM_ACTION _IOR('f', 220, \
- struct hsm_current_action)
-/* see <lustre_lib.h> for ioctl numbers 221-232 */
-
-#define LL_IOC_LMV_SETSTRIPE _IOWR('f', 240, struct lmv_user_md)
-#define LL_IOC_LMV_GETSTRIPE _IOWR('f', 241, struct lmv_user_md)
-#define LL_IOC_SET_LEASE _IOWR('f', 243, long)
-#define LL_IOC_GET_LEASE _IO('f', 244)
-#define LL_IOC_HSM_IMPORT _IOWR('f', 245, struct hsm_user_import)
-#define LL_IOC_LMV_SET_DEFAULT_STRIPE _IOWR('f', 246, struct lmv_user_md)
-#define LL_IOC_MIGRATE _IOR('f', 247, int)
-#define LL_IOC_FID2MDTIDX _IOWR('f', 248, struct lu_fid)
-#define LL_IOC_GETPARENT _IOWR('f', 249, struct getparent)
-
-/* Lease types for use as arg and return of LL_IOC_{GET,SET}_LEASE ioctl. */
-enum ll_lease_type {
- LL_LEASE_RDLCK = 0x1,
- LL_LEASE_WRLCK = 0x2,
- LL_LEASE_UNLCK = 0x4,
-};
-
-#define LL_STATFS_LMV 1
-#define LL_STATFS_LOV 2
-#define LL_STATFS_NODELAY 4
-
-#define IOC_MDC_TYPE 'i'
-#define IOC_MDC_LOOKUP _IOWR(IOC_MDC_TYPE, 20, struct obd_device *)
-#define IOC_MDC_GETFILESTRIPE _IOWR(IOC_MDC_TYPE, 21, struct lov_user_md *)
-#define IOC_MDC_GETFILEINFO _IOWR(IOC_MDC_TYPE, 22, struct lov_user_mds_data *)
-#define LL_IOC_MDC_GETINFO _IOWR(IOC_MDC_TYPE, 23, struct lov_user_mds_data *)
-
-#define MAX_OBD_NAME 128 /* If this changes, a NEW ioctl must be added */
-
-/* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular
- * files, but are unlikely to be used in practice and are not harmful if
- * used incorrectly. O_NOCTTY and FASYNC are only meaningful for character
- * devices and are safe for use on new files (See LU-812, LU-4209).
- */
-#define O_LOV_DELAY_CREATE (O_NOCTTY | FASYNC)
-
-#define LL_FILE_IGNORE_LOCK 0x00000001
-#define LL_FILE_GROUP_LOCKED 0x00000002
-#define LL_FILE_READAHEA 0x00000004
-#define LL_FILE_LOCKED_DIRECTIO 0x00000008 /* client-side locks with dio */
-#define LL_FILE_LOCKLESS_IO 0x00000010 /* server-side locks with cio */
-#define LL_FILE_RMTACL 0x00000020
-
-#define LOV_USER_MAGIC_V1 0x0BD10BD0
-#define LOV_USER_MAGIC LOV_USER_MAGIC_V1
-#define LOV_USER_MAGIC_JOIN_V1 0x0BD20BD0
-#define LOV_USER_MAGIC_V3 0x0BD30BD0
-/* 0x0BD40BD0 is occupied by LOV_MAGIC_MIGRATE */
-#define LOV_USER_MAGIC_SPECIFIC 0x0BD50BD0 /* for specific OSTs */
-
-#define LMV_USER_MAGIC 0x0CD30CD0 /*default lmv magic*/
-
-#define LOV_PATTERN_RAID0 0x001
-#define LOV_PATTERN_RAID1 0x002
-#define LOV_PATTERN_FIRST 0x100
-#define LOV_PATTERN_CMOBD 0x200
-
-#define LOV_PATTERN_F_MASK 0xffff0000
-#define LOV_PATTERN_F_HOLE 0x40000000 /* there is hole in LOV EA */
-#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */
-
-#define LOV_MAXPOOLNAME 15
-#define LOV_POOLNAMEF "%.15s"
-
-#define LOV_MIN_STRIPE_BITS 16 /* maximum PAGE_SIZE (ia64), power of 2 */
-#define LOV_MIN_STRIPE_SIZE (1 << LOV_MIN_STRIPE_BITS)
-#define LOV_MAX_STRIPE_COUNT_OLD 160
-/* This calculation is crafted so that input of 4096 will result in 160
- * which in turn is equal to old maximal stripe count.
- * XXX: In fact this is too simplified for now, what it also need is to get
- * ea_type argument to clearly know how much space each stripe consumes.
- *
- * The limit of 12 pages is somewhat arbitrary, but is a reasonably large
- * allocation that is sufficient for the current generation of systems.
- *
- * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1)
- */
-#define LOV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */
-#define LOV_ALL_STRIPES 0xffff /* only valid for directories */
-#define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */
-
-#define XATTR_LUSTRE_PREFIX "lustre."
-#define XATTR_LUSTRE_LOV "lustre.lov"
-
-#define lov_user_ost_data lov_user_ost_data_v1
-struct lov_user_ost_data_v1 { /* per-stripe data structure */
- struct ost_id l_ost_oi; /* OST object ID */
- __u32 l_ost_gen; /* generation of this OST index */
- __u32 l_ost_idx; /* OST index in LOV */
-} __packed;
-
-#define lov_user_md lov_user_md_v1
-struct lov_user_md_v1 { /* LOV EA user data (host-endian) */
- __u32 lmm_magic; /* magic number = LOV_USER_MAGIC_V1 */
- __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
- struct ost_id lmm_oi; /* LOV object ID */
- __u32 lmm_stripe_size; /* size of stripe in bytes */
- __u16 lmm_stripe_count; /* num stripes in use for this object */
- union {
- __u16 lmm_stripe_offset; /* starting stripe offset in
- * lmm_objects, use when writing
- */
- __u16 lmm_layout_gen; /* layout generation number
- * used when reading
- */
- };
- struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
-} __attribute__((packed, __may_alias__));
-
-struct lov_user_md_v3 { /* LOV EA user data (host-endian) */
- __u32 lmm_magic; /* magic number = LOV_USER_MAGIC_V3 */
- __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
- struct ost_id lmm_oi; /* LOV object ID */
- __u32 lmm_stripe_size; /* size of stripe in bytes */
- __u16 lmm_stripe_count; /* num stripes in use for this object */
- union {
- __u16 lmm_stripe_offset; /* starting stripe offset in
- * lmm_objects, use when writing
- */
- __u16 lmm_layout_gen; /* layout generation number
- * used when reading
- */
- };
- char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* pool name */
- struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
-} __packed;
-
-static inline __u32 lov_user_md_size(__u16 stripes, __u32 lmm_magic)
-{
- if (lmm_magic == LOV_USER_MAGIC_V1)
- return sizeof(struct lov_user_md_v1) +
- stripes * sizeof(struct lov_user_ost_data_v1);
- return sizeof(struct lov_user_md_v3) +
- stripes * sizeof(struct lov_user_ost_data_v1);
-}
-
-/* Compile with -D_LARGEFILE64_SOURCE or -D_GNU_SOURCE (or #define) to
- * use this. It is unsafe to #define those values in this header as it
- * is possible the application has already #included <sys/stat.h>. */
-#ifdef HAVE_LOV_USER_MDS_DATA
-#define lov_user_mds_data lov_user_mds_data_v1
-struct lov_user_mds_data_v1 {
- lstat_t lmd_st; /* MDS stat struct */
- struct lov_user_md_v1 lmd_lmm; /* LOV EA V1 user data */
-} __packed;
-
-struct lov_user_mds_data_v3 {
- lstat_t lmd_st; /* MDS stat struct */
- struct lov_user_md_v3 lmd_lmm; /* LOV EA V3 user data */
-} __packed;
-#endif
-
-struct lmv_user_mds_data {
- struct lu_fid lum_fid;
- __u32 lum_padding;
- __u32 lum_mds;
-};
-
-enum lmv_hash_type {
- LMV_HASH_TYPE_UNKNOWN = 0, /* 0 is reserved for testing purpose */
- LMV_HASH_TYPE_ALL_CHARS = 1,
- LMV_HASH_TYPE_FNV_1A_64 = 2,
-};
-
-#define LMV_HASH_NAME_ALL_CHARS "all_char"
-#define LMV_HASH_NAME_FNV_1A_64 "fnv_1a_64"
-
-/*
- * Got this according to how get LOV_MAX_STRIPE_COUNT, see above,
- * (max buffer size - lmv+rpc header) / sizeof(struct lmv_user_mds_data)
- */
-#define LMV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */
-#define lmv_user_md lmv_user_md_v1
-struct lmv_user_md_v1 {
- __u32 lum_magic; /* must be the first field */
- __u32 lum_stripe_count; /* dirstripe count */
- __u32 lum_stripe_offset; /* MDT idx for default dirstripe */
- __u32 lum_hash_type; /* Dir stripe policy */
- __u32 lum_type; /* LMV type: default or normal */
- __u32 lum_padding1;
- __u32 lum_padding2;
- __u32 lum_padding3;
- char lum_pool_name[LOV_MAXPOOLNAME + 1];
- struct lmv_user_mds_data lum_objects[0];
-} __packed;
-
-static inline int lmv_user_md_size(int stripes, int lmm_magic)
-{
- return sizeof(struct lmv_user_md) +
- stripes * sizeof(struct lmv_user_mds_data);
-}
-
-struct ll_recreate_obj {
- __u64 lrc_id;
- __u32 lrc_ost_idx;
-};
-
-struct ll_fid {
- __u64 id; /* holds object id */
- __u32 generation; /* holds object generation */
- __u32 f_type; /* holds object type or stripe idx when passing it to
- * OST for saving into EA. */
-};
-
-#define UUID_MAX 40
-struct obd_uuid {
- char uuid[UUID_MAX];
-};
-
-static inline bool obd_uuid_equals(const struct obd_uuid *u1,
- const struct obd_uuid *u2)
-{
- return strcmp((char *)u1->uuid, (char *)u2->uuid) == 0;
-}
-
-static inline int obd_uuid_empty(struct obd_uuid *uuid)
-{
- return uuid->uuid[0] == '\0';
-}
-
-static inline void obd_str2uuid(struct obd_uuid *uuid, const char *tmp)
-{
- strncpy((char *)uuid->uuid, tmp, sizeof(*uuid));
- uuid->uuid[sizeof(*uuid) - 1] = '\0';
-}
-
-/* For printf's only, make sure uuid is terminated */
-static inline char *obd_uuid2str(const struct obd_uuid *uuid)
-{
- if (!uuid)
- return NULL;
-
- if (uuid->uuid[sizeof(*uuid) - 1] != '\0') {
- /* Obviously not safe, but for printfs, no real harm done...
- * we're always null-terminated, even in a race.
- */
- static char temp[sizeof(*uuid)];
-
- memcpy(temp, uuid->uuid, sizeof(*uuid) - 1);
- temp[sizeof(*uuid) - 1] = '\0';
- return temp;
- }
- return (char *)(uuid->uuid);
-}
-
-/* Extract fsname from uuid (or target name) of a target
- * e.g. (myfs-OST0007_UUID -> myfs)
- * see also deuuidify.
- */
-static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
-{
- char *p;
-
- strncpy(buf, uuid, buflen - 1);
- buf[buflen - 1] = '\0';
- p = strrchr(buf, '-');
- if (p)
- *p = '\0';
-}
-
-/* printf display format
- * * usage: printf("file FID is "DFID"\n", PFID(fid));
- */
-#define FID_NOBRACE_LEN 40
-#define FID_LEN (FID_NOBRACE_LEN + 2)
-#define DFID_NOBRACE "%#llx:0x%x:0x%x"
-#define DFID "[" DFID_NOBRACE "]"
-#define PFID(fid) (unsigned long long)(fid)->f_seq, (fid)->f_oid, (fid)->f_ver
-
-/* scanf input parse format for fids in DFID_NOBRACE format
- * Need to strip '[' from DFID format first or use "["SFID"]" at caller.
- * usage: sscanf(fidstr, SFID, RFID(&fid));
- */
-#define SFID "0x%llx:0x%x:0x%x"
-#define RFID(fid) &((fid)->f_seq), &((fid)->f_oid), &((fid)->f_ver)
-
-/********* Quotas **********/
-
-#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */
-#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */
-#define Q_GETOINFO 0x800102 /* get obd quota info */
-#define Q_GETOQUOTA 0x800103 /* get obd quotas */
-#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
-
-/* these must be explicitly translated into linux Q_* in ll_dir_ioctl */
-#define LUSTRE_Q_QUOTAON 0x800002 /* deprecated as of 2.4 */
-#define LUSTRE_Q_QUOTAOFF 0x800003 /* deprecated as of 2.4 */
-#define LUSTRE_Q_GETINFO 0x800005 /* get information about quota files */
-#define LUSTRE_Q_SETINFO 0x800006 /* set information about quota files */
-#define LUSTRE_Q_GETQUOTA 0x800007 /* get user quota structure */
-#define LUSTRE_Q_SETQUOTA 0x800008 /* set user quota structure */
-/* lustre-specific control commands */
-#define LUSTRE_Q_INVALIDATE 0x80000b /* deprecated as of 2.4 */
-#define LUSTRE_Q_FINVALIDATE 0x80000c /* deprecated as of 2.4 */
-
-#define UGQUOTA 2 /* set both USRQUOTA and GRPQUOTA */
-
-#define IDENTITY_DOWNCALL_MAGIC 0x6d6dd629
-
-/* permission */
-#define N_PERMS_MAX 64
-
-struct perm_downcall_data {
- __u64 pdd_nid;
- __u32 pdd_perm;
- __u32 pdd_padding;
-};
-
-struct identity_downcall_data {
- __u32 idd_magic;
- __u32 idd_err;
- __u32 idd_uid;
- __u32 idd_gid;
- __u32 idd_nperms;
- __u32 idd_ngroups;
- struct perm_downcall_data idd_perms[N_PERMS_MAX];
- __u32 idd_groups[0];
-};
-
-/* lustre volatile file support
- * file name header: .^L^S^T^R:volatile"
- */
-#define LUSTRE_VOLATILE_HDR ".\x0c\x13\x14\x12:VOLATILE"
-#define LUSTRE_VOLATILE_HDR_LEN 14
-/* hdr + MDT index */
-#define LUSTRE_VOLATILE_IDX LUSTRE_VOLATILE_HDR":%.4X:"
-
-enum lustre_quota_version {
- LUSTRE_QUOTA_V2 = 1
-};
-
-/* XXX: same as if_dqinfo struct in kernel */
-struct obd_dqinfo {
- __u64 dqi_bgrace;
- __u64 dqi_igrace;
- __u32 dqi_flags;
- __u32 dqi_valid;
-};
-
-/* XXX: same as if_dqblk struct in kernel, plus one padding */
-struct obd_dqblk {
- __u64 dqb_bhardlimit;
- __u64 dqb_bsoftlimit;
- __u64 dqb_curspace;
- __u64 dqb_ihardlimit;
- __u64 dqb_isoftlimit;
- __u64 dqb_curinodes;
- __u64 dqb_btime;
- __u64 dqb_itime;
- __u32 dqb_valid;
- __u32 dqb_padding;
-};
-
-enum {
- QC_GENERAL = 0,
- QC_MDTIDX = 1,
- QC_OSTIDX = 2,
- QC_UUID = 3
-};
-
-struct if_quotactl {
- __u32 qc_cmd;
- __u32 qc_type;
- __u32 qc_id;
- __u32 qc_stat;
- __u32 qc_valid;
- __u32 qc_idx;
- struct obd_dqinfo qc_dqinfo;
- struct obd_dqblk qc_dqblk;
- char obd_type[16];
- struct obd_uuid obd_uuid;
-};
-
-/* swap layout flags */
-#define SWAP_LAYOUTS_CHECK_DV1 (1 << 0)
-#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1)
-#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2)
-#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3)
-#define SWAP_LAYOUTS_CLOSE (1 << 4)
-
-/* Swap XATTR_NAME_HSM as well, only on the MDT so far */
-#define SWAP_LAYOUTS_MDS_HSM (1 << 31)
-struct lustre_swap_layouts {
- __u64 sl_flags;
- __u32 sl_fd;
- __u32 sl_gid;
- __u64 sl_dv1;
- __u64 sl_dv2;
-};
-
-/********* Changelogs **********/
-/** Changelog record types */
-enum changelog_rec_type {
- CL_MARK = 0,
- CL_CREATE = 1, /* namespace */
- CL_MKDIR = 2, /* namespace */
- CL_HARDLINK = 3, /* namespace */
- CL_SOFTLINK = 4, /* namespace */
- CL_MKNOD = 5, /* namespace */
- CL_UNLINK = 6, /* namespace */
- CL_RMDIR = 7, /* namespace */
- CL_RENAME = 8, /* namespace */
- CL_EXT = 9, /* namespace extended record (2nd half of rename) */
- CL_OPEN = 10, /* not currently used */
- CL_CLOSE = 11, /* may be written to log only with mtime change */
- CL_LAYOUT = 12, /* file layout/striping modified */
- CL_TRUNC = 13,
- CL_SETATTR = 14,
- CL_XATTR = 15,
- CL_HSM = 16, /* HSM specific events, see flags */
- CL_MTIME = 17, /* Precedence: setattr > mtime > ctime > atime */
- CL_CTIME = 18,
- CL_ATIME = 19,
- CL_LAST
-};
-
-static inline const char *changelog_type2str(int type)
-{
- static const char *changelog_str[] = {
- "MARK", "CREAT", "MKDIR", "HLINK", "SLINK", "MKNOD", "UNLNK",
- "RMDIR", "RENME", "RNMTO", "OPEN", "CLOSE", "LYOUT", "TRUNC",
- "SATTR", "XATTR", "HSM", "MTIME", "CTIME", "ATIME",
- };
-
- if (type >= 0 && type < CL_LAST)
- return changelog_str[type];
- return NULL;
-}
-
-/* per-record flags */
-#define CLF_FLAGSHIFT 12
-#define CLF_FLAGMASK ((1U << CLF_FLAGSHIFT) - 1)
-#define CLF_VERMASK (~CLF_FLAGMASK)
-enum changelog_rec_flags {
- CLF_VERSION = 0x1000,
- CLF_RENAME = 0x2000,
- CLF_JOBID = 0x4000,
- CLF_SUPPORTED = CLF_VERSION | CLF_RENAME | CLF_JOBID
-};
-
-/* Anything under the flagmask may be per-type (if desired) */
-/* Flags for unlink */
-#define CLF_UNLINK_LAST 0x0001 /* Unlink of last hardlink */
-#define CLF_UNLINK_HSM_EXISTS 0x0002 /* File has something in HSM */
- /* HSM cleaning needed */
-/* Flags for rename */
-#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of
- * target
- */
-#define CLF_RENAME_LAST_EXISTS 0x0002 /* rename unlink last hardlink of target
- * has an archive in backend
- */
-
-/* Flags for HSM */
-/* 12b used (from high weight to low weight):
- * 2b for flags
- * 3b for event
- * 7b for error code
- */
-#define CLF_HSM_ERR_L 0 /* HSM return code, 7 bits */
-#define CLF_HSM_ERR_H 6
-#define CLF_HSM_EVENT_L 7 /* HSM event, 3 bits, see enum hsm_event */
-#define CLF_HSM_EVENT_H 9
-#define CLF_HSM_FLAG_L 10 /* HSM flags, 2 bits, 1 used, 1 spare */
-#define CLF_HSM_FLAG_H 11
-#define CLF_HSM_SPARE_L 12 /* 4 spare bits */
-#define CLF_HSM_SPARE_H 15
-#define CLF_HSM_LAST 15
-
-/* Remove bits higher than _h, then extract the value
- * between _h and _l by shifting lower weigth to bit 0.
- */
-#define CLF_GET_BITS(_b, _h, _l) (((_b << (CLF_HSM_LAST - _h)) & 0xFFFF) \
- >> (CLF_HSM_LAST - _h + _l))
-
-#define CLF_HSM_SUCCESS 0x00
-#define CLF_HSM_MAXERROR 0x7E
-#define CLF_HSM_ERROVERFLOW 0x7F
-
-#define CLF_HSM_DIRTY 1 /* file is dirty after HSM request end */
-
-/* 3 bits field => 8 values allowed */
-enum hsm_event {
- HE_ARCHIVE = 0,
- HE_RESTORE = 1,
- HE_CANCEL = 2,
- HE_RELEASE = 3,
- HE_REMOVE = 4,
- HE_STATE = 5,
- HE_SPARE1 = 6,
- HE_SPARE2 = 7,
-};
-
-static inline enum hsm_event hsm_get_cl_event(__u16 flags)
-{
- return CLF_GET_BITS(flags, CLF_HSM_EVENT_H, CLF_HSM_EVENT_L);
-}
-
-static inline void hsm_set_cl_event(int *flags, enum hsm_event he)
-{
- *flags |= (he << CLF_HSM_EVENT_L);
-}
-
-static inline __u16 hsm_get_cl_flags(int flags)
-{
- return CLF_GET_BITS(flags, CLF_HSM_FLAG_H, CLF_HSM_FLAG_L);
-}
-
-static inline void hsm_set_cl_flags(int *flags, int bits)
-{
- *flags |= (bits << CLF_HSM_FLAG_L);
-}
-
-static inline int hsm_get_cl_error(int flags)
-{
- return CLF_GET_BITS(flags, CLF_HSM_ERR_H, CLF_HSM_ERR_L);
-}
-
-static inline void hsm_set_cl_error(int *flags, int error)
-{
- *flags |= (error << CLF_HSM_ERR_L);
-}
-
-enum changelog_send_flag {
- /* Not yet implemented */
- CHANGELOG_FLAG_FOLLOW = 0x01,
- /*
- * Blocking IO makes sense in case of slow user parsing of the records,
- * but it also prevents us from cleaning up if the records are not
- * consumed.
- */
- CHANGELOG_FLAG_BLOCK = 0x02,
- /* Pack jobid into the changelog records if available. */
- CHANGELOG_FLAG_JOBID = 0x04,
-};
-
-#define CR_MAXSIZE cfs_size_round(2 * NAME_MAX + 2 + \
- changelog_rec_offset(CLF_SUPPORTED))
-
-/* 31 usable bytes string + null terminator. */
-#define LUSTRE_JOBID_SIZE 32
-
-/*
- * This is the minimal changelog record. It can contain extensions
- * such as rename fields or process jobid. Its exact content is described
- * by the cr_flags.
- *
- * Extensions are packed in the same order as their corresponding flags.
- */
-struct changelog_rec {
- __u16 cr_namelen;
- __u16 cr_flags; /**< \a changelog_rec_flags */
- __u32 cr_type; /**< \a changelog_rec_type */
- __u64 cr_index; /**< changelog record number */
- __u64 cr_prev; /**< last index for this target fid */
- __u64 cr_time;
- union {
- struct lu_fid cr_tfid; /**< target fid */
- __u32 cr_markerflags; /**< CL_MARK flags */
- };
- struct lu_fid cr_pfid; /**< parent fid */
-} __packed;
-
-/* Changelog extension for RENAME. */
-struct changelog_ext_rename {
- struct lu_fid cr_sfid; /**< source fid, or zero */
- struct lu_fid cr_spfid; /**< source parent fid, or zero */
-};
-
-/* Changelog extension to include JOBID. */
-struct changelog_ext_jobid {
- char cr_jobid[LUSTRE_JOBID_SIZE]; /**< zero-terminated string. */
-};
-
-static inline size_t changelog_rec_offset(enum changelog_rec_flags crf)
-{
- size_t size = sizeof(struct changelog_rec);
-
- if (crf & CLF_RENAME)
- size += sizeof(struct changelog_ext_rename);
-
- if (crf & CLF_JOBID)
- size += sizeof(struct changelog_ext_jobid);
-
- return size;
-}
-
-static inline size_t changelog_rec_size(struct changelog_rec *rec)
-{
- return changelog_rec_offset(rec->cr_flags);
-}
-
-static inline size_t changelog_rec_varsize(struct changelog_rec *rec)
-{
- return changelog_rec_size(rec) - sizeof(*rec) + rec->cr_namelen;
-}
-
-static inline
-struct changelog_ext_rename *changelog_rec_rename(struct changelog_rec *rec)
-{
- enum changelog_rec_flags crf = rec->cr_flags & CLF_VERSION;
-
- return (struct changelog_ext_rename *)((char *)rec +
- changelog_rec_offset(crf));
-}
-
-/* The jobid follows the rename extension, if present */
-static inline
-struct changelog_ext_jobid *changelog_rec_jobid(struct changelog_rec *rec)
-{
- enum changelog_rec_flags crf = rec->cr_flags &
- (CLF_VERSION | CLF_RENAME);
-
- return (struct changelog_ext_jobid *)((char *)rec +
- changelog_rec_offset(crf));
-}
-
-/* The name follows the rename and jobid extensions, if present */
-static inline char *changelog_rec_name(struct changelog_rec *rec)
-{
- return (char *)rec + changelog_rec_offset(rec->cr_flags &
- CLF_SUPPORTED);
-}
-
-static inline size_t changelog_rec_snamelen(struct changelog_rec *rec)
-{
- return rec->cr_namelen - strlen(changelog_rec_name(rec)) - 1;
-}
-
-static inline char *changelog_rec_sname(struct changelog_rec *rec)
-{
- char *cr_name = changelog_rec_name(rec);
-
- return cr_name + strlen(cr_name) + 1;
-}
-
-/**
- * Remap a record to the desired format as specified by the crf flags.
- * The record must be big enough to contain the final remapped version.
- * Superfluous extension fields are removed and missing ones are added
- * and zeroed. The flags of the record are updated accordingly.
- *
- * The jobid and rename extensions can be added to a record, to match the
- * format an application expects, typically. In this case, the newly added
- * fields will be zeroed.
- * The Jobid field can be removed, to guarantee compatibility with older
- * clients that don't expect this field in the records they process.
- *
- * The following assumptions are being made:
- * - CLF_RENAME will not be removed
- * - CLF_JOBID will not be added without CLF_RENAME being added too
- *
- * @param[in,out] rec The record to remap.
- * @param[in] crf_wanted Flags describing the desired extensions.
- */
-static inline void changelog_remap_rec(struct changelog_rec *rec,
- enum changelog_rec_flags crf_wanted)
-{
- char *jid_mov, *rnm_mov;
-
- crf_wanted &= CLF_SUPPORTED;
-
- if ((rec->cr_flags & CLF_SUPPORTED) == crf_wanted)
- return;
-
- /* First move the variable-length name field */
- memmove((char *)rec + changelog_rec_offset(crf_wanted),
- changelog_rec_name(rec), rec->cr_namelen);
-
- /* Locations of jobid and rename extensions in the remapped record */
- jid_mov = (char *)rec +
- changelog_rec_offset(crf_wanted & ~CLF_JOBID);
- rnm_mov = (char *)rec +
- changelog_rec_offset(crf_wanted & ~(CLF_JOBID | CLF_RENAME));
-
- /* Move the extension fields to the desired positions */
- if ((crf_wanted & CLF_JOBID) && (rec->cr_flags & CLF_JOBID))
- memmove(jid_mov, changelog_rec_jobid(rec),
- sizeof(struct changelog_ext_jobid));
-
- if ((crf_wanted & CLF_RENAME) && (rec->cr_flags & CLF_RENAME))
- memmove(rnm_mov, changelog_rec_rename(rec),
- sizeof(struct changelog_ext_rename));
-
- /* Clear newly added fields */
- if ((crf_wanted & CLF_JOBID) && !(rec->cr_flags & CLF_JOBID))
- memset(jid_mov, 0, sizeof(struct changelog_ext_jobid));
-
- if ((crf_wanted & CLF_RENAME) && !(rec->cr_flags & CLF_RENAME))
- memset(rnm_mov, 0, sizeof(struct changelog_ext_rename));
-
- /* Update the record's flags accordingly */
- rec->cr_flags = (rec->cr_flags & CLF_FLAGMASK) | crf_wanted;
-}
-
-struct ioc_changelog {
- __u64 icc_recno;
- __u32 icc_mdtindex;
- __u32 icc_id;
- __u32 icc_flags;
-};
-
-enum changelog_message_type {
- CL_RECORD = 10, /* message is a changelog_rec */
- CL_EOF = 11, /* at end of current changelog */
-};
-
-/********* Misc **********/
-
-struct ioc_data_version {
- __u64 idv_version;
- __u64 idv_flags; /* See LL_DV_xxx */
-};
-
-#define LL_DV_RD_FLUSH (1 << 0) /* Flush dirty pages from clients */
-#define LL_DV_WR_FLUSH (1 << 1) /* Flush all caching pages from clients */
-
-#ifndef offsetof
-# define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb)))
-#endif
-
-#define dot_lustre_name ".lustre"
-
-/********* HSM **********/
-
-/** HSM per-file state
- * See HSM_FLAGS below.
- */
-enum hsm_states {
- HS_NONE = 0x00000000,
- HS_EXISTS = 0x00000001,
- HS_DIRTY = 0x00000002,
- HS_RELEASED = 0x00000004,
- HS_ARCHIVED = 0x00000008,
- HS_NORELEASE = 0x00000010,
- HS_NOARCHIVE = 0x00000020,
- HS_LOST = 0x00000040,
-};
-
-/* HSM user-setable flags. */
-#define HSM_USER_MASK (HS_NORELEASE | HS_NOARCHIVE | HS_DIRTY)
-
-/* Other HSM flags. */
-#define HSM_STATUS_MASK (HS_EXISTS | HS_LOST | HS_RELEASED | HS_ARCHIVED)
-
-/*
- * All HSM-related possible flags that could be applied to a file.
- * This should be kept in sync with hsm_states.
- */
-#define HSM_FLAGS_MASK (HSM_USER_MASK | HSM_STATUS_MASK)
-
-/**
- * HSM request progress state
- */
-enum hsm_progress_states {
- HPS_WAITING = 1,
- HPS_RUNNING = 2,
- HPS_DONE = 3,
-};
-
-#define HPS_NONE 0
-
-static inline char *hsm_progress_state2name(enum hsm_progress_states s)
-{
- switch (s) {
- case HPS_WAITING: return "waiting";
- case HPS_RUNNING: return "running";
- case HPS_DONE: return "done";
- default: return "unknown";
- }
-}
-
-struct hsm_extent {
- __u64 offset;
- __u64 length;
-} __packed;
-
-/**
- * Current HSM states of a Lustre file.
- *
- * This structure purpose is to be sent to user-space mainly. It describes the
- * current HSM flags and in-progress action.
- */
-struct hsm_user_state {
- /** Current HSM states, from enum hsm_states. */
- __u32 hus_states;
- __u32 hus_archive_id;
- /** The current undergoing action, if there is one */
- __u32 hus_in_progress_state;
- __u32 hus_in_progress_action;
- struct hsm_extent hus_in_progress_location;
- char hus_extended_info[];
-};
-
-struct hsm_state_set_ioc {
- struct lu_fid hssi_fid;
- __u64 hssi_setmask;
- __u64 hssi_clearmask;
-};
-
-/*
- * This structure describes the current in-progress action for a file.
- * it is returned to user space and send over the wire
- */
-struct hsm_current_action {
- /** The current undergoing action, if there is one */
- /* state is one of hsm_progress_states */
- __u32 hca_state;
- /* action is one of hsm_user_action */
- __u32 hca_action;
- struct hsm_extent hca_location;
-};
-
-/***** HSM user requests ******/
-/* User-generated (lfs/ioctl) request types */
-enum hsm_user_action {
- HUA_NONE = 1, /* no action (noop) */
- HUA_ARCHIVE = 10, /* copy to hsm */
- HUA_RESTORE = 11, /* prestage */
- HUA_RELEASE = 12, /* drop ost objects */
- HUA_REMOVE = 13, /* remove from archive */
- HUA_CANCEL = 14 /* cancel a request */
-};
-
-static inline char *hsm_user_action2name(enum hsm_user_action a)
-{
- switch (a) {
- case HUA_NONE: return "NOOP";
- case HUA_ARCHIVE: return "ARCHIVE";
- case HUA_RESTORE: return "RESTORE";
- case HUA_RELEASE: return "RELEASE";
- case HUA_REMOVE: return "REMOVE";
- case HUA_CANCEL: return "CANCEL";
- default: return "UNKNOWN";
- }
-}
-
-/*
- * List of hr_flags (bit field)
- */
-#define HSM_FORCE_ACTION 0x0001
-/* used by CT, connot be set by user */
-#define HSM_GHOST_COPY 0x0002
-
-/**
- * Contains all the fixed part of struct hsm_user_request.
- *
- */
-struct hsm_request {
- __u32 hr_action; /* enum hsm_user_action */
- __u32 hr_archive_id; /* archive id, used only with HUA_ARCHIVE */
- __u64 hr_flags; /* request flags */
- __u32 hr_itemcount; /* item count in hur_user_item vector */
- __u32 hr_data_len;
-};
-
-struct hsm_user_item {
- struct lu_fid hui_fid;
- struct hsm_extent hui_extent;
-} __packed;
-
-struct hsm_user_request {
- struct hsm_request hur_request;
- struct hsm_user_item hur_user_item[0];
- /* extra data blob at end of struct (after all
- * hur_user_items), only use helpers to access it
- */
-} __packed;
-
-/** Return pointer to data field in a hsm user request */
-static inline void *hur_data(struct hsm_user_request *hur)
-{
- return &hur->hur_user_item[hur->hur_request.hr_itemcount];
-}
-
-/**
- * Compute the current length of the provided hsm_user_request. This returns -1
- * instead of an errno because ssize_t is defined to be only [ -1, SSIZE_MAX ]
- *
- * return -1 on bounds check error.
- */
-static inline ssize_t hur_len(struct hsm_user_request *hur)
-{
- __u64 size;
-
- /* can't overflow a __u64 since hr_itemcount is only __u32 */
- size = offsetof(struct hsm_user_request, hur_user_item[0]) +
- (__u64)hur->hur_request.hr_itemcount *
- sizeof(hur->hur_user_item[0]) + hur->hur_request.hr_data_len;
-
- if (size != (ssize_t)size)
- return -1;
-
- return size;
-}
-
-/****** HSM RPCs to copytool *****/
-/* Message types the copytool may receive */
-enum hsm_message_type {
- HMT_ACTION_LIST = 100, /* message is a hsm_action_list */
-};
-
-/* Actions the copytool may be instructed to take for a given action_item */
-enum hsm_copytool_action {
- HSMA_NONE = 10, /* no action */
- HSMA_ARCHIVE = 20, /* arbitrary offset */
- HSMA_RESTORE = 21,
- HSMA_REMOVE = 22,
- HSMA_CANCEL = 23
-};
-
-static inline char *hsm_copytool_action2name(enum hsm_copytool_action a)
-{
- switch (a) {
- case HSMA_NONE: return "NOOP";
- case HSMA_ARCHIVE: return "ARCHIVE";
- case HSMA_RESTORE: return "RESTORE";
- case HSMA_REMOVE: return "REMOVE";
- case HSMA_CANCEL: return "CANCEL";
- default: return "UNKNOWN";
- }
-}
-
-/* Copytool item action description */
-struct hsm_action_item {
- __u32 hai_len; /* valid size of this struct */
- __u32 hai_action; /* hsm_copytool_action, but use known size */
- struct lu_fid hai_fid; /* Lustre FID to operated on */
- struct lu_fid hai_dfid; /* fid used for data access */
- struct hsm_extent hai_extent; /* byte range to operate on */
- __u64 hai_cookie; /* action cookie from coordinator */
- __u64 hai_gid; /* grouplock id */
- char hai_data[0]; /* variable length */
-} __packed;
-
-/*
- * helper function which print in hexa the first bytes of
- * hai opaque field
- * \param hai [IN] record to print
- * \param buffer [OUT] output buffer
- * \param len [IN] max buffer len
- * \retval buffer
- */
-static inline char *hai_dump_data_field(struct hsm_action_item *hai,
- char *buffer, size_t len)
-{
- int i, data_len;
- char *ptr;
-
- ptr = buffer;
- data_len = hai->hai_len - sizeof(*hai);
- for (i = 0; (i < data_len) && (len > 2); i++) {
- snprintf(ptr, 3, "%02X", (unsigned char)hai->hai_data[i]);
- ptr += 2;
- len -= 2;
- }
-
- *ptr = '\0';
-
- return buffer;
-}
-
-/* Copytool action list */
-#define HAL_VERSION 1
-#define HAL_MAXSIZE LNET_MTU /* bytes, used in userspace only */
-struct hsm_action_list {
- __u32 hal_version;
- __u32 hal_count; /* number of hai's to follow */
- __u64 hal_compound_id; /* returned by coordinator */
- __u64 hal_flags;
- __u32 hal_archive_id; /* which archive backend */
- __u32 padding1;
- char hal_fsname[0]; /* null-terminated */
- /* struct hsm_action_item[hal_count] follows, aligned on 8-byte
- * boundaries. See hai_first
- */
-} __packed;
-
-#ifndef HAVE_CFS_SIZE_ROUND
-static inline int cfs_size_round(int val)
-{
- return (val + 7) & (~0x7);
-}
-
-#define HAVE_CFS_SIZE_ROUND
-#endif
-
-/* Return pointer to first hai in action list */
-static inline struct hsm_action_item *hai_first(struct hsm_action_list *hal)
-{
- return (struct hsm_action_item *)(hal->hal_fsname +
- cfs_size_round(strlen(hal-> \
- hal_fsname)
- + 1));
-}
-
-/* Return pointer to next hai */
-static inline struct hsm_action_item *hai_next(struct hsm_action_item *hai)
-{
- return (struct hsm_action_item *)((char *)hai +
- cfs_size_round(hai->hai_len));
-}
-
-/* Return size of an hsm_action_list */
-static inline int hal_size(struct hsm_action_list *hal)
-{
- int i, sz;
- struct hsm_action_item *hai;
-
- sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname) + 1);
- hai = hai_first(hal);
- for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai))
- sz += cfs_size_round(hai->hai_len);
-
- return sz;
-}
-
-/* HSM file import
- * describe the attributes to be set on imported file
- */
-struct hsm_user_import {
- __u64 hui_size;
- __u64 hui_atime;
- __u64 hui_mtime;
- __u32 hui_atime_ns;
- __u32 hui_mtime_ns;
- __u32 hui_uid;
- __u32 hui_gid;
- __u32 hui_mode;
- __u32 hui_archive_id;
-};
-
-/* Copytool progress reporting */
-#define HP_FLAG_COMPLETED 0x01
-#define HP_FLAG_RETRY 0x02
-
-struct hsm_progress {
- struct lu_fid hp_fid;
- __u64 hp_cookie;
- struct hsm_extent hp_extent;
- __u16 hp_flags;
- __u16 hp_errval; /* positive val */
- __u32 padding;
-};
-
-struct hsm_copy {
- __u64 hc_data_version;
- __u16 hc_flags;
- __u16 hc_errval; /* positive val */
- __u32 padding;
- struct hsm_action_item hc_hai;
-};
-
-/** @} lustreuser */
-
-#endif /* _LUSTRE_USER_H */
diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ver.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ver.h
deleted file mode 100644
index 19c9135e2273..000000000000
--- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ver.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef _LUSTRE_VER_H_
-#define _LUSTRE_VER_H_
-
-#define LUSTRE_MAJOR 2
-#define LUSTRE_MINOR 6
-#define LUSTRE_PATCH 99
-#define LUSTRE_FIX 0
-#define LUSTRE_VERSION_STRING "2.6.99"
-
-#define OBD_OCD_VERSION(major, minor, patch, fix) \
- (((major) << 24) + ((minor) << 16) + ((patch) << 8) + (fix))
-
-#define OBD_OCD_VERSION_MAJOR(version) ((int)((version) >> 24) & 255)
-#define OBD_OCD_VERSION_MINOR(version) ((int)((version) >> 16) & 255)
-#define OBD_OCD_VERSION_PATCH(version) ((int)((version) >> 8) & 255)
-#define OBD_OCD_VERSION_FIX(version) ((int)((version) >> 0) & 255)
-
-#define LUSTRE_VERSION_CODE \
- OBD_OCD_VERSION(LUSTRE_MAJOR, LUSTRE_MINOR, LUSTRE_PATCH, LUSTRE_FIX)
-
-/*
- * If lustre version of client and servers it connects to differs by more
- * than this amount, client would issue a warning.
- */
-#define LUSTRE_VERSION_OFFSET_WARN OBD_OCD_VERSION(0, 4, 0, 0)
-
-#endif
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig
deleted file mode 100644
index ad049e6f24e4..000000000000
--- a/drivers/staging/lustre/lnet/Kconfig
+++ /dev/null
@@ -1,46 +0,0 @@
-config LNET
- tristate "Lustre networking subsystem (LNet)"
- depends on INET
- help
- The Lustre network layer, also known as LNet, is a networking abstaction
- level API that was initially created to allow Lustre Filesystem to utilize
- very different networks like tcp and ib verbs in a uniform way. In the
- case of Lustre routers only the LNet layer is required. Lately other
- projects are also looking into using LNet as their networking API as well.
-
-config LNET_MAX_PAYLOAD
- int "Lustre lnet max transfer payload (default 1MB)"
- depends on LNET
- default "1048576"
- help
- This option defines the maximum size of payload in bytes that lnet
- can put into its transport.
-
- If unsure, use default.
-
-config LNET_SELFTEST
- tristate "Lustre networking self testing"
- depends on LNET
- help
- Choose Y here if you want to do lnet self testing. To compile this
- as a module, choose M here: the module will be called lnet_selftest.
-
- To compile this as a kernel modules, choose M here and it will be
- called lnet_selftest.
-
- If unsure, say N.
-
- See also http://wiki.lustre.org/
-
-config LNET_XPRT_IB
- tristate "LNET infiniband support"
- depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS
- default LNET && INFINIBAND
- help
- This option allows the LNET users to use infiniband as an
- RDMA-enabled transport.
-
- To compile this as a kernel module, choose M here and it will be
- called ko2iblnd.
-
- If unsure, say N.
diff --git a/drivers/staging/lustre/lnet/Makefile b/drivers/staging/lustre/lnet/Makefile
deleted file mode 100644
index 0a380fe88ce8..000000000000
--- a/drivers/staging/lustre/lnet/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_LNET) += libcfs/ lnet/ klnds/ selftest/
diff --git a/drivers/staging/lustre/lnet/klnds/Makefile b/drivers/staging/lustre/lnet/klnds/Makefile
deleted file mode 100644
index c23e4f67f837..000000000000
--- a/drivers/staging/lustre/lnet/klnds/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_LNET) += o2iblnd/ socklnd/
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile b/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile
deleted file mode 100644
index 4affe1d79948..000000000000
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LNET_XPRT_IB) += ko2iblnd.o
-ko2iblnd-y := o2iblnd.o o2iblnd_cb.o o2iblnd_modparams.o
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
deleted file mode 100644
index 7ae2955c4db6..000000000000
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ /dev/null
@@ -1,2952 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/klnds/o2iblnd/o2iblnd.c
- *
- * Author: Eric Barton <eric@bartonsoftware.com>
- */
-
-#include <asm/div64.h>
-#include <asm/page.h>
-#include "o2iblnd.h"
-
-static struct lnet_lnd the_o2iblnd;
-
-struct kib_data kiblnd_data;
-
-static __u32 kiblnd_cksum(void *ptr, int nob)
-{
- char *c = ptr;
- __u32 sum = 0;
-
- while (nob-- > 0)
- sum = ((sum << 1) | (sum >> 31)) + *c++;
-
- /* ensure I don't return 0 (== no checksum) */
- return !sum ? 1 : sum;
-}
-
-static char *kiblnd_msgtype2str(int type)
-{
- switch (type) {
- case IBLND_MSG_CONNREQ:
- return "CONNREQ";
-
- case IBLND_MSG_CONNACK:
- return "CONNACK";
-
- case IBLND_MSG_NOOP:
- return "NOOP";
-
- case IBLND_MSG_IMMEDIATE:
- return "IMMEDIATE";
-
- case IBLND_MSG_PUT_REQ:
- return "PUT_REQ";
-
- case IBLND_MSG_PUT_NAK:
- return "PUT_NAK";
-
- case IBLND_MSG_PUT_ACK:
- return "PUT_ACK";
-
- case IBLND_MSG_PUT_DONE:
- return "PUT_DONE";
-
- case IBLND_MSG_GET_REQ:
- return "GET_REQ";
-
- case IBLND_MSG_GET_DONE:
- return "GET_DONE";
-
- default:
- return "???";
- }
-}
-
-static int kiblnd_msgtype2size(int type)
-{
- const int hdr_size = offsetof(struct kib_msg, ibm_u);
-
- switch (type) {
- case IBLND_MSG_CONNREQ:
- case IBLND_MSG_CONNACK:
- return hdr_size + sizeof(struct kib_connparams);
-
- case IBLND_MSG_NOOP:
- return hdr_size;
-
- case IBLND_MSG_IMMEDIATE:
- return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]);
-
- case IBLND_MSG_PUT_REQ:
- return hdr_size + sizeof(struct kib_putreq_msg);
-
- case IBLND_MSG_PUT_ACK:
- return hdr_size + sizeof(struct kib_putack_msg);
-
- case IBLND_MSG_GET_REQ:
- return hdr_size + sizeof(struct kib_get_msg);
-
- case IBLND_MSG_PUT_NAK:
- case IBLND_MSG_PUT_DONE:
- case IBLND_MSG_GET_DONE:
- return hdr_size + sizeof(struct kib_completion_msg);
- default:
- return -1;
- }
-}
-
-static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
-{
- struct kib_rdma_desc *rd;
- int msg_size;
- int nob;
- int n;
- int i;
-
- LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
- msg->ibm_type == IBLND_MSG_PUT_ACK);
-
- rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
- &msg->ibm_u.get.ibgm_rd :
- &msg->ibm_u.putack.ibpam_rd;
-
- if (flip) {
- __swab32s(&rd->rd_key);
- __swab32s(&rd->rd_nfrags);
- }
-
- n = rd->rd_nfrags;
-
- nob = offsetof(struct kib_msg, ibm_u) +
- kiblnd_rd_msg_size(rd, msg->ibm_type, n);
-
- if (msg->ibm_nob < nob) {
- CERROR("Short %s: %d(%d)\n",
- kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
- return 1;
- }
-
- msg_size = kiblnd_rd_size(rd);
- if (msg_size <= 0 || msg_size > LNET_MAX_PAYLOAD) {
- CERROR("Bad msg_size: %d, should be 0 < n <= %d\n",
- msg_size, LNET_MAX_PAYLOAD);
- return 1;
- }
-
- if (!flip)
- return 0;
-
- for (i = 0; i < n; i++) {
- __swab32s(&rd->rd_frags[i].rf_nob);
- __swab64s(&rd->rd_frags[i].rf_addr);
- }
-
- return 0;
-}
-
-void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
- int credits, lnet_nid_t dstnid, __u64 dststamp)
-{
- struct kib_net *net = ni->ni_data;
-
- /*
- * CAVEAT EMPTOR! all message fields not set here should have been
- * initialised previously.
- */
- msg->ibm_magic = IBLND_MSG_MAGIC;
- msg->ibm_version = version;
- /* ibm_type */
- msg->ibm_credits = credits;
- /* ibm_nob */
- msg->ibm_cksum = 0;
- msg->ibm_srcnid = ni->ni_nid;
- msg->ibm_srcstamp = net->ibn_incarnation;
- msg->ibm_dstnid = dstnid;
- msg->ibm_dststamp = dststamp;
-
- if (*kiblnd_tunables.kib_cksum) {
- /* NB ibm_cksum zero while computing cksum */
- msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
- }
-}
-
-int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
-{
- const int hdr_size = offsetof(struct kib_msg, ibm_u);
- __u32 msg_cksum;
- __u16 version;
- int msg_nob;
- int flip;
-
- /* 6 bytes are enough to have received magic + version */
- if (nob < 6) {
- CERROR("Short message: %d\n", nob);
- return -EPROTO;
- }
-
- if (msg->ibm_magic == IBLND_MSG_MAGIC) {
- flip = 0;
- } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
- flip = 1;
- } else {
- CERROR("Bad magic: %08x\n", msg->ibm_magic);
- return -EPROTO;
- }
-
- version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
- if (version != IBLND_MSG_VERSION &&
- version != IBLND_MSG_VERSION_1) {
- CERROR("Bad version: %x\n", version);
- return -EPROTO;
- }
-
- if (nob < hdr_size) {
- CERROR("Short message: %d\n", nob);
- return -EPROTO;
- }
-
- msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
- if (msg_nob > nob) {
- CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
- return -EPROTO;
- }
-
- /*
- * checksum must be computed with ibm_cksum zero and BEFORE anything
- * gets flipped
- */
- msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
- msg->ibm_cksum = 0;
- if (msg_cksum &&
- msg_cksum != kiblnd_cksum(msg, msg_nob)) {
- CERROR("Bad checksum\n");
- return -EPROTO;
- }
-
- msg->ibm_cksum = msg_cksum;
-
- if (flip) {
- /* leave magic unflipped as a clue to peer endianness */
- msg->ibm_version = version;
- BUILD_BUG_ON(sizeof(msg->ibm_type) != 1);
- BUILD_BUG_ON(sizeof(msg->ibm_credits) != 1);
- msg->ibm_nob = msg_nob;
- __swab64s(&msg->ibm_srcnid);
- __swab64s(&msg->ibm_srcstamp);
- __swab64s(&msg->ibm_dstnid);
- __swab64s(&msg->ibm_dststamp);
- }
-
- if (msg->ibm_srcnid == LNET_NID_ANY) {
- CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
- return -EPROTO;
- }
-
- if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
- CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
- msg_nob, kiblnd_msgtype2size(msg->ibm_type));
- return -EPROTO;
- }
-
- switch (msg->ibm_type) {
- default:
- CERROR("Unknown message type %x\n", msg->ibm_type);
- return -EPROTO;
-
- case IBLND_MSG_NOOP:
- case IBLND_MSG_IMMEDIATE:
- case IBLND_MSG_PUT_REQ:
- break;
-
- case IBLND_MSG_PUT_ACK:
- case IBLND_MSG_GET_REQ:
- if (kiblnd_unpack_rd(msg, flip))
- return -EPROTO;
- break;
-
- case IBLND_MSG_PUT_NAK:
- case IBLND_MSG_PUT_DONE:
- case IBLND_MSG_GET_DONE:
- if (flip)
- __swab32s(&msg->ibm_u.completion.ibcm_status);
- break;
-
- case IBLND_MSG_CONNREQ:
- case IBLND_MSG_CONNACK:
- if (flip) {
- __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
- __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
- __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
- }
- break;
- }
- return 0;
-}
-
-int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp,
- lnet_nid_t nid)
-{
- struct kib_peer *peer;
- struct kib_net *net = ni->ni_data;
- int cpt = lnet_cpt_of_nid(nid);
- unsigned long flags;
-
- LASSERT(net);
- LASSERT(nid != LNET_NID_ANY);
-
- peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt);
- if (!peer) {
- CERROR("Cannot allocate peer\n");
- return -ENOMEM;
- }
-
- peer->ibp_ni = ni;
- peer->ibp_nid = nid;
- peer->ibp_error = 0;
- peer->ibp_last_alive = 0;
- peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni);
- peer->ibp_queue_depth = ni->ni_peertxcredits;
- atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
-
- INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
- INIT_LIST_HEAD(&peer->ibp_conns);
- INIT_LIST_HEAD(&peer->ibp_tx_queue);
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- /* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT(!net->ibn_shutdown);
-
- /* npeers only grows with the global lock held */
- atomic_inc(&net->ibn_npeers);
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- *peerp = peer;
- return 0;
-}
-
-void kiblnd_destroy_peer(struct kib_peer *peer)
-{
- struct kib_net *net = peer->ibp_ni->ni_data;
-
- LASSERT(net);
- LASSERT(!atomic_read(&peer->ibp_refcount));
- LASSERT(!kiblnd_peer_active(peer));
- LASSERT(kiblnd_peer_idle(peer));
- LASSERT(list_empty(&peer->ibp_tx_queue));
-
- kfree(peer);
-
- /*
- * NB a peer's connections keep a reference on their peer until
- * they are destroyed, so we can be assured that _all_ state to do
- * with this peer has been cleaned up when its refcount drops to
- * zero.
- */
- atomic_dec(&net->ibn_npeers);
-}
-
-struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid)
-{
- /*
- * the caller is responsible for accounting the additional reference
- * that this creates
- */
- struct list_head *peer_list = kiblnd_nid2peerlist(nid);
- struct list_head *tmp;
- struct kib_peer *peer;
-
- list_for_each(tmp, peer_list) {
- peer = list_entry(tmp, struct kib_peer, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
-
- if (peer->ibp_nid != nid)
- continue;
-
- CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
- peer, libcfs_nid2str(nid),
- atomic_read(&peer->ibp_refcount),
- peer->ibp_version);
- return peer;
- }
- return NULL;
-}
-
-void kiblnd_unlink_peer_locked(struct kib_peer *peer)
-{
- LASSERT(list_empty(&peer->ibp_conns));
-
- LASSERT(kiblnd_peer_active(peer));
- list_del_init(&peer->ibp_list);
- /* lose peerlist's ref */
- kiblnd_peer_decref(peer);
-}
-
-static int kiblnd_get_peer_info(struct lnet_ni *ni, int index,
- lnet_nid_t *nidp, int *count)
-{
- struct kib_peer *peer;
- struct list_head *ptmp;
- int i;
- unsigned long flags;
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, struct kib_peer, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
-
- if (peer->ibp_ni != ni)
- continue;
-
- if (index-- > 0)
- continue;
-
- *nidp = peer->ibp_nid;
- *count = atomic_read(&peer->ibp_refcount);
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
- return 0;
- }
- }
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- return -ENOENT;
-}
-
-static void kiblnd_del_peer_locked(struct kib_peer *peer)
-{
- struct list_head *ctmp;
- struct list_head *cnxt;
- struct kib_conn *conn;
-
- if (list_empty(&peer->ibp_conns)) {
- kiblnd_unlink_peer_locked(peer);
- } else {
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, struct kib_conn, ibc_list);
-
- kiblnd_close_conn_locked(conn, 0);
- }
- /* NB closing peer's last conn unlinked it. */
- }
- /*
- * NB peer now unlinked; might even be freed if the peer table had the
- * last ref on it.
- */
-}
-
-static int kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid)
-{
- LIST_HEAD(zombies);
- struct list_head *ptmp;
- struct list_head *pnxt;
- struct kib_peer *peer;
- int lo;
- int hi;
- int i;
- unsigned long flags;
- int rc = -ENOENT;
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- if (nid != LNET_NID_ANY) {
- lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- } else {
- lo = 0;
- hi = kiblnd_data.kib_peer_hash_size - 1;
- }
-
- for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, struct kib_peer, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
-
- if (peer->ibp_ni != ni)
- continue;
-
- if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
- continue;
-
- if (!list_empty(&peer->ibp_tx_queue)) {
- LASSERT(list_empty(&peer->ibp_conns));
-
- list_splice_init(&peer->ibp_tx_queue,
- &zombies);
- }
-
- kiblnd_del_peer_locked(peer);
- rc = 0; /* matched something */
- }
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- kiblnd_txlist_done(ni, &zombies, -EIO);
-
- return rc;
-}
-
-static struct kib_conn *kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index)
-{
- struct kib_peer *peer;
- struct list_head *ptmp;
- struct kib_conn *conn;
- struct list_head *ctmp;
- int i;
- unsigned long flags;
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, struct kib_peer, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
-
- if (peer->ibp_ni != ni)
- continue;
-
- list_for_each(ctmp, &peer->ibp_conns) {
- if (index-- > 0)
- continue;
-
- conn = list_entry(ctmp, struct kib_conn,
- ibc_list);
- kiblnd_conn_addref(conn);
- read_unlock_irqrestore(
- &kiblnd_data.kib_global_lock,
- flags);
- return conn;
- }
- }
- }
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- return NULL;
-}
-
-int kiblnd_translate_mtu(int value)
-{
- switch (value) {
- default:
- return -1;
- case 0:
- return 0;
- case 256:
- return IB_MTU_256;
- case 512:
- return IB_MTU_512;
- case 1024:
- return IB_MTU_1024;
- case 2048:
- return IB_MTU_2048;
- case 4096:
- return IB_MTU_4096;
- }
-}
-
-static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
-{
- int mtu;
-
- /* XXX There is no path record for iWARP, set by netdev->change_mtu? */
- if (!cmid->route.path_rec)
- return;
-
- mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
- LASSERT(mtu >= 0);
- if (mtu)
- cmid->route.path_rec->mtu = mtu;
-}
-
-static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
-{
- cpumask_var_t *mask;
- int vectors;
- int off;
- int i;
- lnet_nid_t nid = conn->ibc_peer->ibp_nid;
-
- vectors = conn->ibc_cmid->device->num_comp_vectors;
- if (vectors <= 1)
- return 0;
-
- mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
- if (!mask)
- return 0;
-
- /* hash NID to CPU id in this partition... */
- off = do_div(nid, cpumask_weight(*mask));
- for_each_cpu(i, *mask) {
- if (!off--)
- return i % vectors;
- }
-
- LBUG();
- return 1;
-}
-
-struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid,
- int state, int version)
-{
- /*
- * CAVEAT EMPTOR:
- * If the new conn is created successfully it takes over the caller's
- * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
- * is destroyed. On failure, the caller's ref on 'peer' remains and
- * she must dispose of 'cmid'. (Actually I'd block forever if I tried
- * to destroy 'cmid' here since I'm called from the CM which still has
- * its ref on 'cmid').
- */
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- struct kib_net *net = peer->ibp_ni->ni_data;
- struct kib_dev *dev;
- struct ib_qp_init_attr *init_qp_attr;
- struct kib_sched_info *sched;
- struct ib_cq_init_attr cq_attr = {};
- struct kib_conn *conn;
- struct ib_cq *cq;
- unsigned long flags;
- int cpt;
- int rc;
- int i;
-
- LASSERT(net);
- LASSERT(!in_interrupt());
-
- dev = net->ibn_dev;
-
- cpt = lnet_cpt_of_nid(peer->ibp_nid);
- sched = kiblnd_data.kib_scheds[cpt];
-
- LASSERT(sched->ibs_nthreads > 0);
-
- init_qp_attr = kzalloc_cpt(sizeof(*init_qp_attr), GFP_NOFS, cpt);
- if (!init_qp_attr) {
- CERROR("Can't allocate qp_attr for %s\n",
- libcfs_nid2str(peer->ibp_nid));
- goto failed_0;
- }
-
- conn = kzalloc_cpt(sizeof(*conn), GFP_NOFS, cpt);
- if (!conn) {
- CERROR("Can't allocate connection for %s\n",
- libcfs_nid2str(peer->ibp_nid));
- goto failed_1;
- }
-
- conn->ibc_state = IBLND_CONN_INIT;
- conn->ibc_version = version;
- conn->ibc_peer = peer; /* I take the caller's ref */
- cmid->context = conn; /* for future CM callbacks */
- conn->ibc_cmid = cmid;
- conn->ibc_max_frags = peer->ibp_max_frags;
- conn->ibc_queue_depth = peer->ibp_queue_depth;
-
- INIT_LIST_HEAD(&conn->ibc_early_rxs);
- INIT_LIST_HEAD(&conn->ibc_tx_noops);
- INIT_LIST_HEAD(&conn->ibc_tx_queue);
- INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
- INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
- INIT_LIST_HEAD(&conn->ibc_active_txs);
- spin_lock_init(&conn->ibc_lock);
-
- conn->ibc_connvars = kzalloc_cpt(sizeof(*conn->ibc_connvars), GFP_NOFS, cpt);
- if (!conn->ibc_connvars) {
- CERROR("Can't allocate in-progress connection state\n");
- goto failed_2;
- }
-
- write_lock_irqsave(glock, flags);
- if (dev->ibd_failover) {
- write_unlock_irqrestore(glock, flags);
- CERROR("%s: failover in progress\n", dev->ibd_ifname);
- goto failed_2;
- }
-
- if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
- /* wakeup failover thread and teardown connection */
- if (kiblnd_dev_can_failover(dev)) {
- list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- wake_up(&kiblnd_data.kib_failover_waitq);
- }
-
- write_unlock_irqrestore(glock, flags);
- CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
- cmid->device->name, dev->ibd_ifname);
- goto failed_2;
- }
-
- kiblnd_hdev_addref_locked(dev->ibd_hdev);
- conn->ibc_hdev = dev->ibd_hdev;
-
- kiblnd_setup_mtu_locked(cmid);
-
- write_unlock_irqrestore(glock, flags);
-
- conn->ibc_rxs = kzalloc_cpt(IBLND_RX_MSGS(conn) * sizeof(struct kib_rx),
- GFP_NOFS, cpt);
- if (!conn->ibc_rxs) {
- CERROR("Cannot allocate RX buffers\n");
- goto failed_2;
- }
-
- rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
- IBLND_RX_MSG_PAGES(conn));
- if (rc)
- goto failed_2;
-
- kiblnd_map_rx_descs(conn);
-
- cq_attr.cqe = IBLND_CQ_ENTRIES(conn);
- cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
- cq = ib_create_cq(cmid->device,
- kiblnd_cq_completion, kiblnd_cq_event, conn,
- &cq_attr);
- if (IS_ERR(cq)) {
- CERROR("Failed to create CQ with %d CQEs: %ld\n",
- IBLND_CQ_ENTRIES(conn), PTR_ERR(cq));
- goto failed_2;
- }
-
- conn->ibc_cq = cq;
-
- rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- if (rc) {
- CERROR("Can't request completion notification: %d\n", rc);
- goto failed_2;
- }
-
- init_qp_attr->event_handler = kiblnd_qp_event;
- init_qp_attr->qp_context = conn;
- init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn);
- init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
- init_qp_attr->cap.max_send_sge = 1;
- init_qp_attr->cap.max_recv_sge = 1;
- init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
- init_qp_attr->qp_type = IB_QPT_RC;
- init_qp_attr->send_cq = cq;
- init_qp_attr->recv_cq = cq;
-
- conn->ibc_sched = sched;
-
- rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
- if (rc) {
- CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
- rc, init_qp_attr->cap.max_send_wr,
- init_qp_attr->cap.max_recv_wr);
- goto failed_2;
- }
-
- kfree(init_qp_attr);
-
- /* 1 ref for caller and each rxmsg */
- atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn));
- conn->ibc_nrx = IBLND_RX_MSGS(conn);
-
- /* post receives */
- for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
- rc = kiblnd_post_rx(&conn->ibc_rxs[i],
- IBLND_POSTRX_NO_CREDIT);
- if (rc) {
- CERROR("Can't post rxmsg: %d\n", rc);
-
- /* Make posted receives complete */
- kiblnd_abort_receives(conn);
-
- /*
- * correct # of posted buffers
- * NB locking needed now I'm racing with completion
- */
- spin_lock_irqsave(&sched->ibs_lock, flags);
- conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i;
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- /*
- * cmid will be destroyed by CM(ofed) after cm_callback
- * returned, so we can't refer it anymore
- * (by kiblnd_connd()->kiblnd_destroy_conn)
- */
- rdma_destroy_qp(conn->ibc_cmid);
- conn->ibc_cmid = NULL;
-
- /* Drop my own and unused rxbuffer refcounts */
- while (i++ <= IBLND_RX_MSGS(conn))
- kiblnd_conn_decref(conn);
-
- return NULL;
- }
- }
-
- /* Init successful! */
- LASSERT(state == IBLND_CONN_ACTIVE_CONNECT ||
- state == IBLND_CONN_PASSIVE_WAIT);
- conn->ibc_state = state;
-
- /* 1 more conn */
- atomic_inc(&net->ibn_nconns);
- return conn;
-
- failed_2:
- kiblnd_destroy_conn(conn);
- kfree(conn);
- failed_1:
- kfree(init_qp_attr);
- failed_0:
- return NULL;
-}
-
-void kiblnd_destroy_conn(struct kib_conn *conn)
-{
- struct rdma_cm_id *cmid = conn->ibc_cmid;
- struct kib_peer *peer = conn->ibc_peer;
- int rc;
-
- LASSERT(!in_interrupt());
- LASSERT(!atomic_read(&conn->ibc_refcount));
- LASSERT(list_empty(&conn->ibc_early_rxs));
- LASSERT(list_empty(&conn->ibc_tx_noops));
- LASSERT(list_empty(&conn->ibc_tx_queue));
- LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
- LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
- LASSERT(list_empty(&conn->ibc_active_txs));
- LASSERT(!conn->ibc_noops_posted);
- LASSERT(!conn->ibc_nsends_posted);
-
- switch (conn->ibc_state) {
- default:
- /* conn must be completely disengaged from the network */
- LBUG();
-
- case IBLND_CONN_DISCONNECTED:
- /* connvars should have been freed already */
- LASSERT(!conn->ibc_connvars);
- break;
-
- case IBLND_CONN_INIT:
- break;
- }
-
- /* conn->ibc_cmid might be destroyed by CM already */
- if (cmid && cmid->qp)
- rdma_destroy_qp(cmid);
-
- if (conn->ibc_cq) {
- rc = ib_destroy_cq(conn->ibc_cq);
- if (rc)
- CWARN("Error destroying CQ: %d\n", rc);
- }
-
- if (conn->ibc_rx_pages)
- kiblnd_unmap_rx_descs(conn);
-
- kfree(conn->ibc_rxs);
- kfree(conn->ibc_connvars);
-
- if (conn->ibc_hdev)
- kiblnd_hdev_decref(conn->ibc_hdev);
-
- /* See CAVEAT EMPTOR above in kiblnd_create_conn */
- if (conn->ibc_state != IBLND_CONN_INIT) {
- struct kib_net *net = peer->ibp_ni->ni_data;
-
- kiblnd_peer_decref(peer);
- rdma_destroy_id(cmid);
- atomic_dec(&net->ibn_nconns);
- }
-}
-
-int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why)
-{
- struct kib_conn *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
- int count = 0;
-
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, struct kib_conn, ibc_list);
-
- CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
- libcfs_nid2str(peer->ibp_nid),
- conn->ibc_version, why);
-
- kiblnd_close_conn_locked(conn, why);
- count++;
- }
-
- return count;
-}
-
-int kiblnd_close_stale_conns_locked(struct kib_peer *peer,
- int version, __u64 incarnation)
-{
- struct kib_conn *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
- int count = 0;
-
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, struct kib_conn, ibc_list);
-
- if (conn->ibc_version == version &&
- conn->ibc_incarnation == incarnation)
- continue;
-
- CDEBUG(D_NET,
- "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n",
- libcfs_nid2str(peer->ibp_nid),
- conn->ibc_version, conn->ibc_incarnation,
- version, incarnation);
-
- kiblnd_close_conn_locked(conn, -ESTALE);
- count++;
- }
-
- return count;
-}
-
-static int kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid)
-{
- struct kib_peer *peer;
- struct list_head *ptmp;
- struct list_head *pnxt;
- int lo;
- int hi;
- int i;
- unsigned long flags;
- int count = 0;
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- if (nid != LNET_NID_ANY) {
- lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- } else {
- lo = 0;
- hi = kiblnd_data.kib_peer_hash_size - 1;
- }
-
- for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, struct kib_peer, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
-
- if (peer->ibp_ni != ni)
- continue;
-
- if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
- continue;
-
- count += kiblnd_close_peer_conns_locked(peer, 0);
- }
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- /* wildcards always succeed */
- if (nid == LNET_NID_ANY)
- return 0;
-
- return !count ? -ENOENT : 0;
-}
-
-static int kiblnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
-{
- struct libcfs_ioctl_data *data = arg;
- int rc = -EINVAL;
-
- switch (cmd) {
- case IOC_LIBCFS_GET_PEER: {
- lnet_nid_t nid = 0;
- int count = 0;
-
- rc = kiblnd_get_peer_info(ni, data->ioc_count,
- &nid, &count);
- data->ioc_nid = nid;
- data->ioc_count = count;
- break;
- }
-
- case IOC_LIBCFS_DEL_PEER: {
- rc = kiblnd_del_peer(ni, data->ioc_nid);
- break;
- }
- case IOC_LIBCFS_GET_CONN: {
- struct kib_conn *conn;
-
- rc = 0;
- conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
- if (!conn) {
- rc = -ENOENT;
- break;
- }
-
- LASSERT(conn->ibc_cmid);
- data->ioc_nid = conn->ibc_peer->ibp_nid;
- if (!conn->ibc_cmid->route.path_rec)
- data->ioc_u32[0] = 0; /* iWarp has no path MTU */
- else
- data->ioc_u32[0] =
- ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
- kiblnd_conn_decref(conn);
- break;
- }
- case IOC_LIBCFS_CLOSE_CONNECTION: {
- rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
- break;
- }
-
- default:
- break;
- }
-
- return rc;
-}
-
-static void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid,
- unsigned long *when)
-{
- unsigned long last_alive = 0;
- unsigned long now = cfs_time_current();
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- struct kib_peer *peer;
- unsigned long flags;
-
- read_lock_irqsave(glock, flags);
-
- peer = kiblnd_find_peer_locked(nid);
- if (peer)
- last_alive = peer->ibp_last_alive;
-
- read_unlock_irqrestore(glock, flags);
-
- if (last_alive)
- *when = last_alive;
-
- /*
- * peer is not persistent in hash, trigger peer creation
- * and connection establishment with a NULL tx
- */
- if (!peer)
- kiblnd_launch_tx(ni, NULL, nid);
-
- CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
- libcfs_nid2str(nid), peer,
- last_alive ? cfs_duration_sec(now - last_alive) : -1);
-}
-
-static void kiblnd_free_pages(struct kib_pages *p)
-{
- int npages = p->ibp_npages;
- int i;
-
- for (i = 0; i < npages; i++) {
- if (p->ibp_pages[i])
- __free_page(p->ibp_pages[i]);
- }
-
- kfree(p);
-}
-
-int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
-{
- struct kib_pages *p;
- int i;
-
- p = kzalloc_cpt(offsetof(struct kib_pages, ibp_pages[npages]),
- GFP_NOFS, cpt);
- if (!p) {
- CERROR("Can't allocate descriptor for %d pages\n", npages);
- return -ENOMEM;
- }
-
- p->ibp_npages = npages;
-
- for (i = 0; i < npages; i++) {
- p->ibp_pages[i] = alloc_pages_node(
- cfs_cpt_spread_node(lnet_cpt_table(), cpt),
- GFP_NOFS, 0);
- if (!p->ibp_pages[i]) {
- CERROR("Can't allocate page %d of %d\n", i, npages);
- kiblnd_free_pages(p);
- return -ENOMEM;
- }
- }
-
- *pp = p;
- return 0;
-}
-
-void kiblnd_unmap_rx_descs(struct kib_conn *conn)
-{
- struct kib_rx *rx;
- int i;
-
- LASSERT(conn->ibc_rxs);
- LASSERT(conn->ibc_hdev);
-
- for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
- rx = &conn->ibc_rxs[i];
-
- LASSERT(rx->rx_nob >= 0); /* not posted */
-
- kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
- KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
- rx->rx_msgaddr),
- IBLND_MSG_SIZE, DMA_FROM_DEVICE);
- }
-
- kiblnd_free_pages(conn->ibc_rx_pages);
-
- conn->ibc_rx_pages = NULL;
-}
-
-void kiblnd_map_rx_descs(struct kib_conn *conn)
-{
- struct kib_rx *rx;
- struct page *pg;
- int pg_off;
- int ipg;
- int i;
-
- for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) {
- pg = conn->ibc_rx_pages->ibp_pages[ipg];
- rx = &conn->ibc_rxs[i];
-
- rx->rx_conn = conn;
- rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off);
-
- rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
- rx->rx_msg,
- IBLND_MSG_SIZE,
- DMA_FROM_DEVICE);
- LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
- rx->rx_msgaddr));
- KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
-
- CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
- i, rx->rx_msg, rx->rx_msgaddr,
- (__u64)(page_to_phys(pg) + pg_off));
-
- pg_off += IBLND_MSG_SIZE;
- LASSERT(pg_off <= PAGE_SIZE);
-
- if (pg_off == PAGE_SIZE) {
- pg_off = 0;
- ipg++;
- LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn));
- }
- }
-}
-
-static void kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo)
-{
- struct kib_hca_dev *hdev = tpo->tpo_hdev;
- struct kib_tx *tx;
- int i;
-
- LASSERT(!tpo->tpo_pool.po_allocated);
-
- if (!hdev)
- return;
-
- for (i = 0; i < tpo->tpo_pool.po_size; i++) {
- tx = &tpo->tpo_tx_descs[i];
- kiblnd_dma_unmap_single(hdev->ibh_ibdev,
- KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
- tx->tx_msgaddr),
- IBLND_MSG_SIZE, DMA_TO_DEVICE);
- }
-
- kiblnd_hdev_decref(hdev);
- tpo->tpo_hdev = NULL;
-}
-
-static struct kib_hca_dev *kiblnd_current_hdev(struct kib_dev *dev)
-{
- struct kib_hca_dev *hdev;
- unsigned long flags;
- int i = 0;
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (dev->ibd_failover) {
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (!(i++ % 50))
- CDEBUG(D_NET, "%s: Wait for failover\n",
- dev->ibd_ifname);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 100);
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- }
-
- kiblnd_hdev_addref_locked(dev->ibd_hdev);
- hdev = dev->ibd_hdev;
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- return hdev;
-}
-
-static void kiblnd_map_tx_pool(struct kib_tx_pool *tpo)
-{
- struct kib_pages *txpgs = tpo->tpo_tx_pages;
- struct kib_pool *pool = &tpo->tpo_pool;
- struct kib_net *net = pool->po_owner->ps_net;
- struct kib_dev *dev;
- struct page *page;
- struct kib_tx *tx;
- int page_offset;
- int ipage;
- int i;
-
- LASSERT(net);
-
- dev = net->ibn_dev;
-
- /* pre-mapped messages are not bigger than 1 page */
- BUILD_BUG_ON(IBLND_MSG_SIZE > PAGE_SIZE);
-
- /* No fancy arithmetic when we do the buffer calculations */
- BUILD_BUG_ON(PAGE_SIZE % IBLND_MSG_SIZE);
-
- tpo->tpo_hdev = kiblnd_current_hdev(dev);
-
- for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
- page = txpgs->ibp_pages[ipage];
- tx = &tpo->tpo_tx_descs[i];
-
- tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) +
- page_offset);
-
- tx->tx_msgaddr = kiblnd_dma_map_single(
- tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
- IBLND_MSG_SIZE, DMA_TO_DEVICE);
- LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
- tx->tx_msgaddr));
- KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
-
- list_add(&tx->tx_list, &pool->po_free_list);
-
- page_offset += IBLND_MSG_SIZE;
- LASSERT(page_offset <= PAGE_SIZE);
-
- if (page_offset == PAGE_SIZE) {
- page_offset = 0;
- ipage++;
- LASSERT(ipage <= txpgs->ibp_npages);
- }
- }
-}
-
-static void kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
-{
- LASSERT(!fpo->fpo_map_count);
-
- if (fpo->fpo_is_fmr) {
- if (fpo->fmr.fpo_fmr_pool)
- ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
- } else {
- struct kib_fast_reg_descriptor *frd, *tmp;
- int i = 0;
-
- list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
- frd_list) {
- list_del(&frd->frd_list);
- ib_dereg_mr(frd->frd_mr);
- kfree(frd);
- i++;
- }
- if (i < fpo->fast_reg.fpo_pool_size)
- CERROR("FastReg pool still has %d regions registered\n",
- fpo->fast_reg.fpo_pool_size - i);
- }
-
- if (fpo->fpo_hdev)
- kiblnd_hdev_decref(fpo->fpo_hdev);
-
- kfree(fpo);
-}
-
-static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
-{
- struct kib_fmr_pool *fpo, *tmp;
-
- list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
- list_del(&fpo->fpo_list);
- kiblnd_destroy_fmr_pool(fpo);
- }
-}
-
-static int
-kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
- int ncpts)
-{
- int size = tunables->lnd_fmr_pool_size / ncpts;
-
- return max(IBLND_FMR_POOL, size);
-}
-
-static int
-kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
- int ncpts)
-{
- int size = tunables->lnd_fmr_flush_trigger / ncpts;
-
- return max(IBLND_FMR_POOL_FLUSH, size);
-}
-
-static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo)
-{
- struct ib_fmr_pool_param param = {
- .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
- .page_shift = PAGE_SHIFT,
- .access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE),
- .pool_size = fps->fps_pool_size,
- .dirty_watermark = fps->fps_flush_trigger,
- .flush_function = NULL,
- .flush_arg = NULL,
- .cache = !!fps->fps_cache };
- int rc = 0;
-
- fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
- &param);
- if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
- rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
- if (rc != -ENOSYS)
- CERROR("Failed to create FMR pool: %d\n", rc);
- else
- CERROR("FMRs are not supported\n");
- }
-
- return rc;
-}
-
-static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo)
-{
- struct kib_fast_reg_descriptor *frd, *tmp;
- int i, rc;
-
- INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
- fpo->fast_reg.fpo_pool_size = 0;
- for (i = 0; i < fps->fps_pool_size; i++) {
- frd = kzalloc_cpt(sizeof(*frd), GFP_NOFS, fps->fps_cpt);
- if (!frd) {
- CERROR("Failed to allocate a new fast_reg descriptor\n");
- rc = -ENOMEM;
- goto out;
- }
-
- frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
- IB_MR_TYPE_MEM_REG,
- LNET_MAX_PAYLOAD / PAGE_SIZE);
- if (IS_ERR(frd->frd_mr)) {
- rc = PTR_ERR(frd->frd_mr);
- CERROR("Failed to allocate ib_alloc_mr: %d\n", rc);
- frd->frd_mr = NULL;
- goto out_middle;
- }
-
- frd->frd_valid = true;
-
- list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
- fpo->fast_reg.fpo_pool_size++;
- }
-
- return 0;
-
-out_middle:
- if (frd->frd_mr)
- ib_dereg_mr(frd->frd_mr);
- kfree(frd);
-
-out:
- list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
- frd_list) {
- list_del(&frd->frd_list);
- ib_dereg_mr(frd->frd_mr);
- kfree(frd);
- }
-
- return rc;
-}
-
-static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
- struct kib_fmr_pool **pp_fpo)
-{
- struct kib_dev *dev = fps->fps_net->ibn_dev;
- struct ib_device_attr *dev_attr;
- struct kib_fmr_pool *fpo;
- int rc;
-
- fpo = kzalloc_cpt(sizeof(*fpo), GFP_NOFS, fps->fps_cpt);
- if (!fpo)
- return -ENOMEM;
-
- fpo->fpo_hdev = kiblnd_current_hdev(dev);
- dev_attr = &fpo->fpo_hdev->ibh_ibdev->attrs;
-
- /* Check for FMR or FastReg support */
- fpo->fpo_is_fmr = 0;
- if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr &&
- fpo->fpo_hdev->ibh_ibdev->dealloc_fmr &&
- fpo->fpo_hdev->ibh_ibdev->map_phys_fmr &&
- fpo->fpo_hdev->ibh_ibdev->unmap_fmr) {
- LCONSOLE_INFO("Using FMR for registration\n");
- fpo->fpo_is_fmr = 1;
- } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
- LCONSOLE_INFO("Using FastReg for registration\n");
- } else {
- rc = -ENOSYS;
- LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n");
- goto out_fpo;
- }
-
- if (fpo->fpo_is_fmr)
- rc = kiblnd_alloc_fmr_pool(fps, fpo);
- else
- rc = kiblnd_alloc_freg_pool(fps, fpo);
- if (rc)
- goto out_fpo;
-
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_owner = fps;
- *pp_fpo = fpo;
-
- return 0;
-
-out_fpo:
- kiblnd_hdev_decref(fpo->fpo_hdev);
- kfree(fpo);
- return rc;
-}
-
-static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps,
- struct list_head *zombies)
-{
- if (!fps->fps_net) /* initialized? */
- return;
-
- spin_lock(&fps->fps_lock);
-
- while (!list_empty(&fps->fps_pool_list)) {
- struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next,
- struct kib_fmr_pool, fpo_list);
- fpo->fpo_failed = 1;
- list_del(&fpo->fpo_list);
- if (!fpo->fpo_map_count)
- list_add(&fpo->fpo_list, zombies);
- else
- list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
- }
-
- spin_unlock(&fps->fps_lock);
-}
-
-static void kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps)
-{
- if (fps->fps_net) { /* initialized? */
- kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
- kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
- }
-}
-
-static int
-kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts,
- struct kib_net *net,
- struct lnet_ioctl_config_o2iblnd_tunables *tunables)
-{
- struct kib_fmr_pool *fpo;
- int rc;
-
- memset(fps, 0, sizeof(*fps));
-
- fps->fps_net = net;
- fps->fps_cpt = cpt;
-
- fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
- fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
- fps->fps_cache = tunables->lnd_fmr_cache;
-
- spin_lock_init(&fps->fps_lock);
- INIT_LIST_HEAD(&fps->fps_pool_list);
- INIT_LIST_HEAD(&fps->fps_failed_pool_list);
-
- rc = kiblnd_create_fmr_pool(fps, &fpo);
- if (!rc)
- list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
-
- return rc;
-}
-
-static int kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, unsigned long now)
-{
- if (fpo->fpo_map_count) /* still in use */
- return 0;
- if (fpo->fpo_failed)
- return 1;
- return cfs_time_aftereq(now, fpo->fpo_deadline);
-}
-
-static int
-kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd)
-{
- __u64 *pages = tx->tx_pages;
- struct kib_hca_dev *hdev;
- int npages;
- int size;
- int i;
-
- hdev = tx->tx_pool->tpo_hdev;
-
- for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
- for (size = 0; size < rd->rd_frags[i].rf_nob;
- size += hdev->ibh_page_size) {
- pages[npages++] = (rd->rd_frags[i].rf_addr &
- hdev->ibh_page_mask) + size;
- }
- }
-
- return npages;
-}
-
-void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status)
-{
- LIST_HEAD(zombies);
- struct kib_fmr_pool *fpo = fmr->fmr_pool;
- struct kib_fmr_poolset *fps;
- unsigned long now = cfs_time_current();
- struct kib_fmr_pool *tmp;
- int rc;
-
- if (!fpo)
- return;
-
- fps = fpo->fpo_owner;
- if (fpo->fpo_is_fmr) {
- if (fmr->fmr_pfmr) {
- rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT(!rc);
- fmr->fmr_pfmr = NULL;
- }
-
- if (status) {
- rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
- LASSERT(!rc);
- }
- } else {
- struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
-
- if (frd) {
- frd->frd_valid = false;
- spin_lock(&fps->fps_lock);
- list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
- spin_unlock(&fps->fps_lock);
- fmr->fmr_frd = NULL;
- }
- }
- fmr->fmr_pool = NULL;
-
- spin_lock(&fps->fps_lock);
- fpo->fpo_map_count--; /* decref the pool */
-
- list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
- /* the first pool is persistent */
- if (fps->fps_pool_list.next == &fpo->fpo_list)
- continue;
-
- if (kiblnd_fmr_pool_is_idle(fpo, now)) {
- list_move(&fpo->fpo_list, &zombies);
- fps->fps_version++;
- }
- }
- spin_unlock(&fps->fps_lock);
-
- if (!list_empty(&zombies))
- kiblnd_destroy_fmr_pool_list(&zombies);
-}
-
-int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
- struct kib_rdma_desc *rd, __u32 nob, __u64 iov,
- struct kib_fmr *fmr)
-{
- __u64 *pages = tx->tx_pages;
- bool is_rx = (rd != tx->tx_rd);
- bool tx_pages_mapped = false;
- struct kib_fmr_pool *fpo;
- int npages = 0;
- __u64 version;
- int rc;
-
- again:
- spin_lock(&fps->fps_lock);
- version = fps->fps_version;
- list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_map_count++;
-
- if (fpo->fpo_is_fmr) {
- struct ib_pool_fmr *pfmr;
-
- spin_unlock(&fps->fps_lock);
-
- if (!tx_pages_mapped) {
- npages = kiblnd_map_tx_pages(tx, rd);
- tx_pages_mapped = 1;
- }
-
- pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
- pages, npages, iov);
- if (likely(!IS_ERR(pfmr))) {
- fmr->fmr_key = is_rx ? pfmr->fmr->rkey :
- pfmr->fmr->lkey;
- fmr->fmr_frd = NULL;
- fmr->fmr_pfmr = pfmr;
- fmr->fmr_pool = fpo;
- return 0;
- }
- rc = PTR_ERR(pfmr);
- } else {
- if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
- struct kib_fast_reg_descriptor *frd;
- struct ib_reg_wr *wr;
- struct ib_mr *mr;
- int n;
-
- frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
- struct kib_fast_reg_descriptor,
- frd_list);
- list_del(&frd->frd_list);
- spin_unlock(&fps->fps_lock);
-
- mr = frd->frd_mr;
-
- if (!frd->frd_valid) {
- __u32 key = is_rx ? mr->rkey : mr->lkey;
- struct ib_send_wr *inv_wr;
-
- inv_wr = &frd->frd_inv_wr;
- memset(inv_wr, 0, sizeof(*inv_wr));
- inv_wr->opcode = IB_WR_LOCAL_INV;
- inv_wr->wr_id = IBLND_WID_MR;
- inv_wr->ex.invalidate_rkey = key;
-
- /* Bump the key */
- key = ib_inc_rkey(key);
- ib_update_fast_reg_key(mr, key);
- }
-
- n = ib_map_mr_sg(mr, tx->tx_frags,
- tx->tx_nfrags, NULL, PAGE_SIZE);
- if (unlikely(n != tx->tx_nfrags)) {
- CERROR("Failed to map mr %d/%d elements\n",
- n, tx->tx_nfrags);
- return n < 0 ? n : -EINVAL;
- }
-
- mr->iova = iov;
-
- /* Prepare FastReg WR */
- wr = &frd->frd_fastreg_wr;
- memset(wr, 0, sizeof(*wr));
- wr->wr.opcode = IB_WR_REG_MR;
- wr->wr.wr_id = IBLND_WID_MR;
- wr->wr.num_sge = 0;
- wr->wr.send_flags = 0;
- wr->mr = mr;
- wr->key = is_rx ? mr->rkey : mr->lkey;
- wr->access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE);
-
- fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
- fmr->fmr_frd = frd;
- fmr->fmr_pfmr = NULL;
- fmr->fmr_pool = fpo;
- return 0;
- }
- spin_unlock(&fps->fps_lock);
- rc = -EBUSY;
- }
-
- spin_lock(&fps->fps_lock);
- fpo->fpo_map_count--;
- if (rc != -EAGAIN) {
- spin_unlock(&fps->fps_lock);
- return rc;
- }
-
- /* EAGAIN and ... */
- if (version != fps->fps_version) {
- spin_unlock(&fps->fps_lock);
- goto again;
- }
- }
-
- if (fps->fps_increasing) {
- spin_unlock(&fps->fps_lock);
- CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n");
- schedule();
- goto again;
- }
-
- if (time_before(cfs_time_current(), fps->fps_next_retry)) {
- /* someone failed recently */
- spin_unlock(&fps->fps_lock);
- return -EAGAIN;
- }
-
- fps->fps_increasing = 1;
- spin_unlock(&fps->fps_lock);
-
- CDEBUG(D_NET, "Allocate new FMR pool\n");
- rc = kiblnd_create_fmr_pool(fps, &fpo);
- spin_lock(&fps->fps_lock);
- fps->fps_increasing = 0;
- if (!rc) {
- fps->fps_version++;
- list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
- } else {
- fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
- }
- spin_unlock(&fps->fps_lock);
-
- goto again;
-}
-
-static void kiblnd_fini_pool(struct kib_pool *pool)
-{
- LASSERT(list_empty(&pool->po_free_list));
- LASSERT(!pool->po_allocated);
-
- CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
-}
-
-static void kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size)
-{
- CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
-
- memset(pool, 0, sizeof(*pool));
- INIT_LIST_HEAD(&pool->po_free_list);
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- pool->po_owner = ps;
- pool->po_size = size;
-}
-
-static void kiblnd_destroy_pool_list(struct list_head *head)
-{
- struct kib_pool *pool;
-
- while (!list_empty(head)) {
- pool = list_entry(head->next, struct kib_pool, po_list);
- list_del(&pool->po_list);
-
- LASSERT(pool->po_owner);
- pool->po_owner->ps_pool_destroy(pool);
- }
-}
-
-static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
-{
- if (!ps->ps_net) /* initialized? */
- return;
-
- spin_lock(&ps->ps_lock);
- while (!list_empty(&ps->ps_pool_list)) {
- struct kib_pool *po = list_entry(ps->ps_pool_list.next,
- struct kib_pool, po_list);
- po->po_failed = 1;
- list_del(&po->po_list);
- if (!po->po_allocated)
- list_add(&po->po_list, zombies);
- else
- list_add(&po->po_list, &ps->ps_failed_pool_list);
- }
- spin_unlock(&ps->ps_lock);
-}
-
-static void kiblnd_fini_poolset(struct kib_poolset *ps)
-{
- if (ps->ps_net) { /* initialized? */
- kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
- kiblnd_destroy_pool_list(&ps->ps_pool_list);
- }
-}
-
-static int kiblnd_init_poolset(struct kib_poolset *ps, int cpt,
- struct kib_net *net, char *name, int size,
- kib_ps_pool_create_t po_create,
- kib_ps_pool_destroy_t po_destroy,
- kib_ps_node_init_t nd_init,
- kib_ps_node_fini_t nd_fini)
-{
- struct kib_pool *pool;
- int rc;
-
- memset(ps, 0, sizeof(*ps));
-
- ps->ps_cpt = cpt;
- ps->ps_net = net;
- ps->ps_pool_create = po_create;
- ps->ps_pool_destroy = po_destroy;
- ps->ps_node_init = nd_init;
- ps->ps_node_fini = nd_fini;
- ps->ps_pool_size = size;
- if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name))
- >= sizeof(ps->ps_name))
- return -E2BIG;
- spin_lock_init(&ps->ps_lock);
- INIT_LIST_HEAD(&ps->ps_pool_list);
- INIT_LIST_HEAD(&ps->ps_failed_pool_list);
-
- rc = ps->ps_pool_create(ps, size, &pool);
- if (!rc)
- list_add(&pool->po_list, &ps->ps_pool_list);
- else
- CERROR("Failed to create the first pool for %s\n", ps->ps_name);
-
- return rc;
-}
-
-static int kiblnd_pool_is_idle(struct kib_pool *pool, unsigned long now)
-{
- if (pool->po_allocated) /* still in use */
- return 0;
- if (pool->po_failed)
- return 1;
- return cfs_time_aftereq(now, pool->po_deadline);
-}
-
-void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
-{
- LIST_HEAD(zombies);
- struct kib_poolset *ps = pool->po_owner;
- struct kib_pool *tmp;
- unsigned long now = cfs_time_current();
-
- spin_lock(&ps->ps_lock);
-
- if (ps->ps_node_fini)
- ps->ps_node_fini(pool, node);
-
- LASSERT(pool->po_allocated > 0);
- list_add(node, &pool->po_free_list);
- pool->po_allocated--;
-
- list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
- /* the first pool is persistent */
- if (ps->ps_pool_list.next == &pool->po_list)
- continue;
-
- if (kiblnd_pool_is_idle(pool, now))
- list_move(&pool->po_list, &zombies);
- }
- spin_unlock(&ps->ps_lock);
-
- if (!list_empty(&zombies))
- kiblnd_destroy_pool_list(&zombies);
-}
-
-struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps)
-{
- struct list_head *node;
- struct kib_pool *pool;
- unsigned int interval = 1;
- unsigned long time_before;
- unsigned int trips = 0;
- int rc;
-
- again:
- spin_lock(&ps->ps_lock);
- list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
- if (list_empty(&pool->po_free_list))
- continue;
-
- pool->po_allocated++;
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- node = pool->po_free_list.next;
- list_del(node);
-
- if (ps->ps_node_init) {
- /* still hold the lock */
- ps->ps_node_init(pool, node);
- }
- spin_unlock(&ps->ps_lock);
- return node;
- }
-
- /* no available tx pool and ... */
- if (ps->ps_increasing) {
- /* another thread is allocating a new pool */
- spin_unlock(&ps->ps_lock);
- trips++;
- CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting %d HZs for her to complete. trips = %d\n",
- ps->ps_name, interval, trips);
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(interval);
- if (interval < HZ)
- interval *= 2;
-
- goto again;
- }
-
- if (time_before(cfs_time_current(), ps->ps_next_retry)) {
- /* someone failed recently */
- spin_unlock(&ps->ps_lock);
- return NULL;
- }
-
- ps->ps_increasing = 1;
- spin_unlock(&ps->ps_lock);
-
- CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
- time_before = cfs_time_current();
- rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
- CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
- cfs_time_current() - time_before);
-
- spin_lock(&ps->ps_lock);
- ps->ps_increasing = 0;
- if (!rc) {
- list_add_tail(&pool->po_list, &ps->ps_pool_list);
- } else {
- ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
- CERROR("Can't allocate new %s pool because out of memory\n",
- ps->ps_name);
- }
- spin_unlock(&ps->ps_lock);
-
- goto again;
-}
-
-static void kiblnd_destroy_tx_pool(struct kib_pool *pool)
-{
- struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool, tpo_pool);
- int i;
-
- LASSERT(!pool->po_allocated);
-
- if (tpo->tpo_tx_pages) {
- kiblnd_unmap_tx_pool(tpo);
- kiblnd_free_pages(tpo->tpo_tx_pages);
- }
-
- if (!tpo->tpo_tx_descs)
- goto out;
-
- for (i = 0; i < pool->po_size; i++) {
- struct kib_tx *tx = &tpo->tpo_tx_descs[i];
-
- list_del(&tx->tx_list);
- kfree(tx->tx_pages);
- kfree(tx->tx_frags);
- kfree(tx->tx_wrq);
- kfree(tx->tx_sge);
- kfree(tx->tx_rd);
- }
-
- kfree(tpo->tpo_tx_descs);
-out:
- kiblnd_fini_pool(pool);
- kfree(tpo);
-}
-
-static int kiblnd_tx_pool_size(int ncpts)
-{
- int ntx = *kiblnd_tunables.kib_ntx / ncpts;
-
- return max(IBLND_TX_POOL, ntx);
-}
-
-static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
- struct kib_pool **pp_po)
-{
- int i;
- int npg;
- struct kib_pool *pool;
- struct kib_tx_pool *tpo;
-
- tpo = kzalloc_cpt(sizeof(*tpo), GFP_NOFS, ps->ps_cpt);
- if (!tpo) {
- CERROR("Failed to allocate TX pool\n");
- return -ENOMEM;
- }
-
- pool = &tpo->tpo_pool;
- kiblnd_init_pool(ps, pool, size);
- tpo->tpo_tx_descs = NULL;
- tpo->tpo_tx_pages = NULL;
-
- npg = DIV_ROUND_UP(size * IBLND_MSG_SIZE, PAGE_SIZE);
- if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) {
- CERROR("Can't allocate tx pages: %d\n", npg);
- kfree(tpo);
- return -ENOMEM;
- }
-
- tpo->tpo_tx_descs = kzalloc_cpt(size * sizeof(struct kib_tx),
- GFP_NOFS, ps->ps_cpt);
- if (!tpo->tpo_tx_descs) {
- CERROR("Can't allocate %d tx descriptors\n", size);
- ps->ps_pool_destroy(pool);
- return -ENOMEM;
- }
-
- memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx));
-
- for (i = 0; i < size; i++) {
- struct kib_tx *tx = &tpo->tpo_tx_descs[i];
-
- tx->tx_pool = tpo;
- if (ps->ps_net->ibn_fmr_ps) {
- tx->tx_pages = kzalloc_cpt(LNET_MAX_IOV * sizeof(*tx->tx_pages),
- GFP_NOFS, ps->ps_cpt);
- if (!tx->tx_pages)
- break;
- }
-
- tx->tx_frags = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_frags),
- GFP_NOFS, ps->ps_cpt);
- if (!tx->tx_frags)
- break;
-
- sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
-
- tx->tx_wrq = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_wrq),
- GFP_NOFS, ps->ps_cpt);
- if (!tx->tx_wrq)
- break;
-
- tx->tx_sge = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_sge),
- GFP_NOFS, ps->ps_cpt);
- if (!tx->tx_sge)
- break;
-
- tx->tx_rd = kzalloc_cpt(offsetof(struct kib_rdma_desc,
- rd_frags[IBLND_MAX_RDMA_FRAGS]),
- GFP_NOFS, ps->ps_cpt);
- if (!tx->tx_rd)
- break;
- }
-
- if (i == size) {
- kiblnd_map_tx_pool(tpo);
- *pp_po = pool;
- return 0;
- }
-
- ps->ps_pool_destroy(pool);
- return -ENOMEM;
-}
-
-static void kiblnd_tx_init(struct kib_pool *pool, struct list_head *node)
-{
- struct kib_tx_poolset *tps = container_of(pool->po_owner,
- struct kib_tx_poolset,
- tps_poolset);
- struct kib_tx *tx = list_entry(node, struct kib_tx, tx_list);
-
- tx->tx_cookie = tps->tps_next_tx_cookie++;
-}
-
-static void kiblnd_net_fini_pools(struct kib_net *net)
-{
- int i;
-
- cfs_cpt_for_each(i, lnet_cpt_table()) {
- struct kib_tx_poolset *tps;
- struct kib_fmr_poolset *fps;
-
- if (net->ibn_tx_ps) {
- tps = net->ibn_tx_ps[i];
- kiblnd_fini_poolset(&tps->tps_poolset);
- }
-
- if (net->ibn_fmr_ps) {
- fps = net->ibn_fmr_ps[i];
- kiblnd_fini_fmr_poolset(fps);
- }
- }
-
- if (net->ibn_tx_ps) {
- cfs_percpt_free(net->ibn_tx_ps);
- net->ibn_tx_ps = NULL;
- }
-
- if (net->ibn_fmr_ps) {
- cfs_percpt_free(net->ibn_fmr_ps);
- net->ibn_fmr_ps = NULL;
- }
-}
-
-static int kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni,
- __u32 *cpts, int ncpts)
-{
- struct lnet_ioctl_config_o2iblnd_tunables *tunables;
- int cpt;
- int rc;
- int i;
-
- tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
-
- if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
- CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
- tunables->lnd_fmr_pool_size,
- *kiblnd_tunables.kib_ntx / 4);
- rc = -EINVAL;
- goto failed;
- }
-
- /*
- * TX pool must be created later than FMR, see LU-2268
- * for details
- */
- LASSERT(!net->ibn_tx_ps);
-
- /*
- * premapping can fail if ibd_nmr > 1, so we always create
- * FMR pool and map-on-demand if premapping failed
- *
- * cfs_precpt_alloc is creating an array of struct kib_fmr_poolset
- * The number of struct kib_fmr_poolsets create is equal to the
- * number of CPTs that exist, i.e net->ibn_fmr_ps[cpt].
- */
- net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(struct kib_fmr_poolset));
- if (!net->ibn_fmr_ps) {
- CERROR("Failed to allocate FMR pool array\n");
- rc = -ENOMEM;
- goto failed;
- }
-
- for (i = 0; i < ncpts; i++) {
- cpt = !cpts ? i : cpts[i];
- rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
- net, tunables);
- if (rc) {
- CERROR("Can't initialize FMR pool for CPT %d: %d\n",
- cpt, rc);
- goto failed;
- }
- }
-
- if (i > 0)
- LASSERT(i == ncpts);
-
- /*
- * cfs_precpt_alloc is creating an array of struct kib_tx_poolset
- * The number of struct kib_tx_poolsets create is equal to the
- * number of CPTs that exist, i.e net->ibn_tx_ps[cpt].
- */
- net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(struct kib_tx_poolset));
- if (!net->ibn_tx_ps) {
- CERROR("Failed to allocate tx pool array\n");
- rc = -ENOMEM;
- goto failed;
- }
-
- for (i = 0; i < ncpts; i++) {
- cpt = !cpts ? i : cpts[i];
- rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
- cpt, net, "TX",
- kiblnd_tx_pool_size(ncpts),
- kiblnd_create_tx_pool,
- kiblnd_destroy_tx_pool,
- kiblnd_tx_init, NULL);
- if (rc) {
- CERROR("Can't initialize TX pool for CPT %d: %d\n",
- cpt, rc);
- goto failed;
- }
- }
-
- return 0;
- failed:
- kiblnd_net_fini_pools(net);
- LASSERT(rc);
- return rc;
-}
-
-static int kiblnd_hdev_get_attr(struct kib_hca_dev *hdev)
-{
- /*
- * It's safe to assume a HCA can handle a page size
- * matching that of the native system
- */
- hdev->ibh_page_shift = PAGE_SHIFT;
- hdev->ibh_page_size = 1 << PAGE_SHIFT;
- hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
-
- hdev->ibh_mr_size = hdev->ibh_ibdev->attrs.max_mr_size;
- if (hdev->ibh_mr_size == ~0ULL) {
- hdev->ibh_mr_shift = 64;
- return 0;
- }
-
- CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
- return -EINVAL;
-}
-
-void kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
-{
- if (hdev->ibh_pd)
- ib_dealloc_pd(hdev->ibh_pd);
-
- if (hdev->ibh_cmid)
- rdma_destroy_id(hdev->ibh_cmid);
-
- kfree(hdev);
-}
-
-/* DUMMY */
-static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
- struct rdma_cm_event *event)
-{
- return 0;
-}
-
-static int kiblnd_dev_need_failover(struct kib_dev *dev)
-{
- struct rdma_cm_id *cmid;
- struct sockaddr_in srcaddr;
- struct sockaddr_in dstaddr;
- int rc;
-
- if (!dev->ibd_hdev || /* initializing */
- !dev->ibd_hdev->ibh_cmid || /* listener is dead */
- *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
- return 1;
-
- /*
- * XXX: it's UGLY, but I don't have better way to find
- * ib-bonding HCA failover because:
- *
- * a. no reliable CM event for HCA failover...
- * b. no OFED API to get ib_device for current net_device...
- *
- * We have only two choices at this point:
- *
- * a. rdma_bind_addr(), it will conflict with listener cmid
- * b. rdma_resolve_addr() to zero addr
- */
- cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
- IB_QPT_RC);
- if (IS_ERR(cmid)) {
- rc = PTR_ERR(cmid);
- CERROR("Failed to create cmid for failover: %d\n", rc);
- return rc;
- }
-
- memset(&srcaddr, 0, sizeof(srcaddr));
- srcaddr.sin_family = AF_INET;
- srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
-
- memset(&dstaddr, 0, sizeof(dstaddr));
- dstaddr.sin_family = AF_INET;
- rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
- (struct sockaddr *)&dstaddr, 1);
- if (rc || !cmid->device) {
- CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
- dev->ibd_ifname, &dev->ibd_ifip,
- cmid->device, rc);
- rdma_destroy_id(cmid);
- return rc;
- }
-
- rc = dev->ibd_hdev->ibh_ibdev != cmid->device; /* true for failover */
- rdma_destroy_id(cmid);
-
- return rc;
-}
-
-int kiblnd_dev_failover(struct kib_dev *dev)
-{
- LIST_HEAD(zombie_tpo);
- LIST_HEAD(zombie_ppo);
- LIST_HEAD(zombie_fpo);
- struct rdma_cm_id *cmid = NULL;
- struct kib_hca_dev *hdev = NULL;
- struct ib_pd *pd;
- struct kib_net *net;
- struct sockaddr_in addr;
- unsigned long flags;
- int rc = 0;
- int i;
-
- LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
- dev->ibd_can_failover || !dev->ibd_hdev);
-
- rc = kiblnd_dev_need_failover(dev);
- if (rc <= 0)
- goto out;
-
- if (dev->ibd_hdev &&
- dev->ibd_hdev->ibh_cmid) {
- /*
- * XXX it's not good to close old listener at here,
- * because we can fail to create new listener.
- * But we have to close it now, otherwise rdma_bind_addr
- * will return EADDRINUSE... How crap!
- */
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- cmid = dev->ibd_hdev->ibh_cmid;
- /*
- * make next schedule of kiblnd_dev_need_failover()
- * return 1 for me
- */
- dev->ibd_hdev->ibh_cmid = NULL;
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- rdma_destroy_id(cmid);
- }
-
- cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
- IB_QPT_RC);
- if (IS_ERR(cmid)) {
- rc = PTR_ERR(cmid);
- CERROR("Failed to create cmid for failover: %d\n", rc);
- goto out;
- }
-
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(dev->ibd_ifip);
- addr.sin_port = htons(*kiblnd_tunables.kib_service);
-
- /* Bind to failover device or port */
- rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
- if (rc || !cmid->device) {
- CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
- dev->ibd_ifname, &dev->ibd_ifip,
- cmid->device, rc);
- rdma_destroy_id(cmid);
- goto out;
- }
-
- hdev = kzalloc(sizeof(*hdev), GFP_NOFS);
- if (!hdev) {
- CERROR("Failed to allocate kib_hca_dev\n");
- rdma_destroy_id(cmid);
- rc = -ENOMEM;
- goto out;
- }
-
- atomic_set(&hdev->ibh_ref, 1);
- hdev->ibh_dev = dev;
- hdev->ibh_cmid = cmid;
- hdev->ibh_ibdev = cmid->device;
-
- pd = ib_alloc_pd(cmid->device, 0);
- if (IS_ERR(pd)) {
- rc = PTR_ERR(pd);
- CERROR("Can't allocate PD: %d\n", rc);
- goto out;
- }
-
- hdev->ibh_pd = pd;
-
- rc = rdma_listen(cmid, 0);
- if (rc) {
- CERROR("Can't start new listener: %d\n", rc);
- goto out;
- }
-
- rc = kiblnd_hdev_get_attr(hdev);
- if (rc) {
- CERROR("Can't get device attributes: %d\n", rc);
- goto out;
- }
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- swap(dev->ibd_hdev, hdev); /* take over the refcount */
-
- list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
- cfs_cpt_for_each(i, lnet_cpt_table()) {
- kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
- &zombie_tpo);
-
- if (net->ibn_fmr_ps)
- kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
- &zombie_fpo);
- }
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- out:
- if (!list_empty(&zombie_tpo))
- kiblnd_destroy_pool_list(&zombie_tpo);
- if (!list_empty(&zombie_ppo))
- kiblnd_destroy_pool_list(&zombie_ppo);
- if (!list_empty(&zombie_fpo))
- kiblnd_destroy_fmr_pool_list(&zombie_fpo);
- if (hdev)
- kiblnd_hdev_decref(hdev);
-
- if (rc)
- dev->ibd_failed_failover++;
- else
- dev->ibd_failed_failover = 0;
-
- return rc;
-}
-
-void kiblnd_destroy_dev(struct kib_dev *dev)
-{
- LASSERT(!dev->ibd_nnets);
- LASSERT(list_empty(&dev->ibd_nets));
-
- list_del(&dev->ibd_fail_list);
- list_del(&dev->ibd_list);
-
- if (dev->ibd_hdev)
- kiblnd_hdev_decref(dev->ibd_hdev);
-
- kfree(dev);
-}
-
-static struct kib_dev *kiblnd_create_dev(char *ifname)
-{
- struct net_device *netdev;
- struct kib_dev *dev;
- __u32 netmask;
- __u32 ip;
- int up;
- int rc;
-
- rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
- if (rc) {
- CERROR("Can't query IPoIB interface %s: %d\n",
- ifname, rc);
- return NULL;
- }
-
- if (!up) {
- CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
- return NULL;
- }
-
- dev = kzalloc(sizeof(*dev), GFP_NOFS);
- if (!dev)
- return NULL;
-
- netdev = dev_get_by_name(&init_net, ifname);
- if (!netdev) {
- dev->ibd_can_failover = 0;
- } else {
- dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
- dev_put(netdev);
- }
-
- INIT_LIST_HEAD(&dev->ibd_nets);
- INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
- INIT_LIST_HEAD(&dev->ibd_fail_list);
- dev->ibd_ifip = ip;
- strcpy(&dev->ibd_ifname[0], ifname);
-
- /* initialize the device */
- rc = kiblnd_dev_failover(dev);
- if (rc) {
- CERROR("Can't initialize device: %d\n", rc);
- kfree(dev);
- return NULL;
- }
-
- list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs);
- return dev;
-}
-
-static void kiblnd_base_shutdown(void)
-{
- struct kib_sched_info *sched;
- int i;
-
- LASSERT(list_empty(&kiblnd_data.kib_devs));
-
- switch (kiblnd_data.kib_init) {
- default:
- LBUG();
-
- case IBLND_INIT_ALL:
- case IBLND_INIT_DATA:
- LASSERT(kiblnd_data.kib_peers);
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
- LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
- LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
- LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
- LASSERT(list_empty(&kiblnd_data.kib_reconn_list));
- LASSERT(list_empty(&kiblnd_data.kib_reconn_wait));
-
- /* flag threads to terminate; wake and wait for them to die */
- kiblnd_data.kib_shutdown = 1;
-
- /*
- * NB: we really want to stop scheduler threads net by net
- * instead of the whole module, this should be improved
- * with dynamic configuration LNet
- */
- cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
- wake_up_all(&sched->ibs_waitq);
-
- wake_up_all(&kiblnd_data.kib_connd_waitq);
- wake_up_all(&kiblnd_data.kib_failover_waitq);
-
- i = 2;
- while (atomic_read(&kiblnd_data.kib_nthreads)) {
- i++;
- /* power of 2 ? */
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
- "Waiting for %d threads to terminate\n",
- atomic_read(&kiblnd_data.kib_nthreads));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
- }
-
- /* fall through */
-
- case IBLND_INIT_NOTHING:
- break;
- }
-
- kvfree(kiblnd_data.kib_peers);
-
- if (kiblnd_data.kib_scheds)
- cfs_percpt_free(kiblnd_data.kib_scheds);
-
- kiblnd_data.kib_init = IBLND_INIT_NOTHING;
- module_put(THIS_MODULE);
-}
-
-static void kiblnd_shutdown(struct lnet_ni *ni)
-{
- struct kib_net *net = ni->ni_data;
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- int i;
- unsigned long flags;
-
- LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
-
- if (!net)
- goto out;
-
- write_lock_irqsave(g_lock, flags);
- net->ibn_shutdown = 1;
- write_unlock_irqrestore(g_lock, flags);
-
- switch (net->ibn_init) {
- default:
- LBUG();
-
- case IBLND_INIT_ALL:
- /* nuke all existing peers within this net */
- kiblnd_del_peer(ni, LNET_NID_ANY);
-
- /* Wait for all peer state to clean up */
- i = 2;
- while (atomic_read(&net->ibn_npeers)) {
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
- "%s: waiting for %d peers to disconnect\n",
- libcfs_nid2str(ni->ni_nid),
- atomic_read(&net->ibn_npeers));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
- }
-
- kiblnd_net_fini_pools(net);
-
- write_lock_irqsave(g_lock, flags);
- LASSERT(net->ibn_dev->ibd_nnets > 0);
- net->ibn_dev->ibd_nnets--;
- list_del(&net->ibn_list);
- write_unlock_irqrestore(g_lock, flags);
-
- /* fall through */
-
- case IBLND_INIT_NOTHING:
- LASSERT(!atomic_read(&net->ibn_nconns));
-
- if (net->ibn_dev && !net->ibn_dev->ibd_nnets)
- kiblnd_destroy_dev(net->ibn_dev);
-
- break;
- }
-
- net->ibn_init = IBLND_INIT_NOTHING;
- ni->ni_data = NULL;
-
- kfree(net);
-
-out:
- if (list_empty(&kiblnd_data.kib_devs))
- kiblnd_base_shutdown();
-}
-
-static int kiblnd_base_startup(void)
-{
- struct kib_sched_info *sched;
- int rc;
- int i;
-
- LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
-
- try_module_get(THIS_MODULE);
- /* zero pointers, flags etc */
- memset(&kiblnd_data, 0, sizeof(kiblnd_data));
-
- rwlock_init(&kiblnd_data.kib_global_lock);
-
- INIT_LIST_HEAD(&kiblnd_data.kib_devs);
- INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
-
- kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
- kiblnd_data.kib_peers = kvmalloc_array(kiblnd_data.kib_peer_hash_size,
- sizeof(struct list_head),
- GFP_KERNEL);
- if (!kiblnd_data.kib_peers)
- goto failed;
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
- INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
-
- spin_lock_init(&kiblnd_data.kib_connd_lock);
- INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
- INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
- INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list);
- INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait);
-
- init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
- init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
-
- kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*sched));
- if (!kiblnd_data.kib_scheds)
- goto failed;
-
- cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
- int nthrs;
-
- spin_lock_init(&sched->ibs_lock);
- INIT_LIST_HEAD(&sched->ibs_conns);
- init_waitqueue_head(&sched->ibs_waitq);
-
- nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
- if (*kiblnd_tunables.kib_nscheds > 0) {
- nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
- } else {
- /*
- * max to half of CPUs, another half is reserved for
- * upper layer modules
- */
- nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
- }
-
- sched->ibs_nthreads_max = nthrs;
- sched->ibs_cpt = i;
- }
-
- kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
-
- /* lists/ptrs/locks initialised */
- kiblnd_data.kib_init = IBLND_INIT_DATA;
- /*****************************************************/
-
- rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
- if (rc) {
- CERROR("Can't spawn o2iblnd connd: %d\n", rc);
- goto failed;
- }
-
- if (*kiblnd_tunables.kib_dev_failover)
- rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
- "kiblnd_failover");
-
- if (rc) {
- CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
- goto failed;
- }
-
- /* flag everything initialised */
- kiblnd_data.kib_init = IBLND_INIT_ALL;
- /*****************************************************/
-
- return 0;
-
- failed:
- kiblnd_base_shutdown();
- return -ENETDOWN;
-}
-
-static int kiblnd_start_schedulers(struct kib_sched_info *sched)
-{
- int rc = 0;
- int nthrs;
- int i;
-
- if (!sched->ibs_nthreads) {
- if (*kiblnd_tunables.kib_nscheds > 0) {
- nthrs = sched->ibs_nthreads_max;
- } else {
- nthrs = cfs_cpt_weight(lnet_cpt_table(),
- sched->ibs_cpt);
- nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
- nthrs = min(IBLND_N_SCHED_HIGH, nthrs);
- }
- } else {
- LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max);
- /* increase one thread if there is new interface */
- nthrs = sched->ibs_nthreads < sched->ibs_nthreads_max;
- }
-
- for (i = 0; i < nthrs; i++) {
- long id;
- char name[20];
-
- id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
- snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
- KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
- rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
- if (!rc)
- continue;
-
- CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
- sched->ibs_cpt, sched->ibs_nthreads + i, rc);
- break;
- }
-
- sched->ibs_nthreads += i;
- return rc;
-}
-
-static int kiblnd_dev_start_threads(struct kib_dev *dev, int newdev, __u32 *cpts,
- int ncpts)
-{
- int cpt;
- int rc;
- int i;
-
- for (i = 0; i < ncpts; i++) {
- struct kib_sched_info *sched;
-
- cpt = !cpts ? i : cpts[i];
- sched = kiblnd_data.kib_scheds[cpt];
-
- if (!newdev && sched->ibs_nthreads > 0)
- continue;
-
- rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
- if (rc) {
- CERROR("Failed to start scheduler threads for %s\n",
- dev->ibd_ifname);
- return rc;
- }
- }
- return 0;
-}
-
-static struct kib_dev *kiblnd_dev_search(char *ifname)
-{
- struct kib_dev *alias = NULL;
- struct kib_dev *dev;
- char *colon;
- char *colon2;
-
- colon = strchr(ifname, ':');
- list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
- if (!strcmp(&dev->ibd_ifname[0], ifname))
- return dev;
-
- if (alias)
- continue;
-
- colon2 = strchr(dev->ibd_ifname, ':');
- if (colon)
- *colon = 0;
- if (colon2)
- *colon2 = 0;
-
- if (!strcmp(&dev->ibd_ifname[0], ifname))
- alias = dev;
-
- if (colon)
- *colon = ':';
- if (colon2)
- *colon2 = ':';
- }
- return alias;
-}
-
-static int kiblnd_startup(struct lnet_ni *ni)
-{
- char *ifname;
- struct kib_dev *ibdev = NULL;
- struct kib_net *net;
- struct timespec64 tv;
- unsigned long flags;
- int rc;
- int newdev;
-
- LASSERT(ni->ni_lnd == &the_o2iblnd);
-
- if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
- rc = kiblnd_base_startup();
- if (rc)
- return rc;
- }
-
- net = kzalloc(sizeof(*net), GFP_NOFS);
- ni->ni_data = net;
- if (!net)
- goto net_failed;
-
- ktime_get_real_ts64(&tv);
- net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC +
- tv.tv_nsec / NSEC_PER_USEC;
-
- rc = kiblnd_tunables_setup(ni);
- if (rc)
- goto net_failed;
-
- if (ni->ni_interfaces[0]) {
- /* Use the IPoIB interface specified in 'networks=' */
-
- BUILD_BUG_ON(LNET_MAX_INTERFACES <= 1);
- if (ni->ni_interfaces[1]) {
- CERROR("Multiple interfaces not supported\n");
- goto failed;
- }
-
- ifname = ni->ni_interfaces[0];
- } else {
- ifname = *kiblnd_tunables.kib_default_ipif;
- }
-
- if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
- CERROR("IPoIB interface name too long: %s\n", ifname);
- goto failed;
- }
-
- ibdev = kiblnd_dev_search(ifname);
-
- newdev = !ibdev;
- /* hmm...create kib_dev even for alias */
- if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname))
- ibdev = kiblnd_create_dev(ifname);
-
- if (!ibdev)
- goto failed;
-
- net->ibn_dev = ibdev;
- ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
-
- rc = kiblnd_dev_start_threads(ibdev, newdev,
- ni->ni_cpts, ni->ni_ncpts);
- if (rc)
- goto failed;
-
- rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
- if (rc) {
- CERROR("Failed to initialize NI pools: %d\n", rc);
- goto failed;
- }
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- ibdev->ibd_nnets++;
- list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- net->ibn_init = IBLND_INIT_ALL;
-
- return 0;
-
-failed:
- if (!net->ibn_dev && ibdev)
- kiblnd_destroy_dev(ibdev);
-
-net_failed:
- kiblnd_shutdown(ni);
-
- CDEBUG(D_NET, "%s failed\n", __func__);
- return -ENETDOWN;
-}
-
-static struct lnet_lnd the_o2iblnd = {
- .lnd_type = O2IBLND,
- .lnd_startup = kiblnd_startup,
- .lnd_shutdown = kiblnd_shutdown,
- .lnd_ctl = kiblnd_ctl,
- .lnd_query = kiblnd_query,
- .lnd_send = kiblnd_send,
- .lnd_recv = kiblnd_recv,
-};
-
-static void __exit ko2iblnd_exit(void)
-{
- lnet_unregister_lnd(&the_o2iblnd);
-}
-
-static int __init ko2iblnd_init(void)
-{
- BUILD_BUG_ON(sizeof(struct kib_msg) > IBLND_MSG_SIZE);
- BUILD_BUG_ON(offsetof(struct kib_msg,
- ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- > IBLND_MSG_SIZE);
- BUILD_BUG_ON(offsetof(struct kib_msg,
- ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- > IBLND_MSG_SIZE);
-
- kiblnd_tunables_init();
-
- lnet_register_lnd(&the_o2iblnd);
-
- return 0;
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("OpenIB gen2 LNet Network Driver");
-MODULE_VERSION("2.7.0");
-MODULE_LICENSE("GPL");
-
-module_init(ko2iblnd_init);
-module_exit(ko2iblnd_exit);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
deleted file mode 100644
index b18911d09e9a..000000000000
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ /dev/null
@@ -1,1038 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/klnds/o2iblnd/o2iblnd.h
- *
- * Author: Eric Barton <eric@bartonsoftware.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/uio.h>
-#include <linux/uaccess.h>
-
-#include <linux/io.h>
-
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/list.h>
-#include <linux/kmod.h>
-#include <linux/sysctl.h>
-#include <linux/pci.h>
-
-#include <net/sock.h>
-#include <linux/in.h>
-
-#include <rdma/rdma_cm.h>
-#include <rdma/ib_cm.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_fmr_pool.h>
-
-#define DEBUG_SUBSYSTEM S_LND
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/lnet/lib-lnet.h>
-
-#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
-/* # scheduler loops before reschedule */
-#define IBLND_RESCHED 100
-
-#define IBLND_N_SCHED 2
-#define IBLND_N_SCHED_HIGH 4
-
-struct kib_tunables {
- int *kib_dev_failover; /* HCA failover */
- unsigned int *kib_service; /* IB service number */
- int *kib_min_reconnect_interval; /* first failed connection retry... */
- int *kib_max_reconnect_interval; /* exponentially increasing to this */
- int *kib_cksum; /* checksum struct kib_msg? */
- int *kib_timeout; /* comms timeout (seconds) */
- int *kib_keepalive; /* keepalive timeout (seconds) */
- int *kib_ntx; /* # tx descs */
- char **kib_default_ipif; /* default IPoIB interface */
- int *kib_retry_count;
- int *kib_rnr_retry_count;
- int *kib_ib_mtu; /* IB MTU */
- int *kib_require_priv_port; /* accept only privileged ports */
- int *kib_use_priv_port; /* use privileged port for active connect */
- int *kib_nscheds; /* # threads on each CPT */
-};
-
-extern struct kib_tunables kiblnd_tunables;
-
-#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
-#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
-
-#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
-#define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *)0)->ibm_credits)) - 1) /* Max # of peer credits */
-
-/* when eagerly to return credits */
-#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_CREDIT_HIGHWATER_V1 : \
- t->lnd_peercredits_hiw)
-
-#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(current->nsproxy->net_ns, \
- cb, dev, \
- ps, qpt)
-
-/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
-#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
-#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
-
-#define IBLND_FRAG_SHIFT (PAGE_SHIFT - 12) /* frag size on wire is in 4K units */
-#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
-#define IBLND_MAX_RDMA_FRAGS (LNET_MAX_PAYLOAD >> 12)/* max # of fragments supported in 4K size */
-
-/************************/
-/* derived constants... */
-/* Pools (shared by connections on each CPT) */
-/* These pools can grow at runtime, so don't need give a very large value */
-#define IBLND_TX_POOL 256
-#define IBLND_FMR_POOL 256
-#define IBLND_FMR_POOL_FLUSH 192
-
-#define IBLND_RX_MSGS(c) \
- ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version))
-#define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE)
-#define IBLND_RX_MSG_PAGES(c) \
- ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE)
-
-/* WRs and CQEs (per connection) */
-#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
-#define IBLND_SEND_WRS(c) \
- (((c->ibc_max_frags + 1) << IBLND_FRAG_SHIFT) * \
- kiblnd_concurrent_sends(c->ibc_version, c->ibc_peer->ibp_ni))
-#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
-
-struct kib_hca_dev;
-
-/* o2iblnd can run over aliased interface */
-#ifdef IFALIASZ
-#define KIB_IFNAME_SIZE IFALIASZ
-#else
-#define KIB_IFNAME_SIZE 256
-#endif
-
-struct kib_dev {
- struct list_head ibd_list; /* chain on kib_devs */
- struct list_head ibd_fail_list; /* chain on kib_failed_devs */
- __u32 ibd_ifip; /* IPoIB interface IP */
-
- /* IPoIB interface name */
- char ibd_ifname[KIB_IFNAME_SIZE];
- int ibd_nnets; /* # nets extant */
-
- unsigned long ibd_next_failover;
- int ibd_failed_failover; /* # failover failures */
- unsigned int ibd_failover; /* failover in progress */
- unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
- struct list_head ibd_nets;
- struct kib_hca_dev *ibd_hdev;
-};
-
-struct kib_hca_dev {
- struct rdma_cm_id *ibh_cmid; /* listener cmid */
- struct ib_device *ibh_ibdev; /* IB device */
- int ibh_page_shift; /* page shift of current HCA */
- int ibh_page_size; /* page size of current HCA */
- __u64 ibh_page_mask; /* page mask of current HCA */
- int ibh_mr_shift; /* bits shift of max MR size */
- __u64 ibh_mr_size; /* size of MR */
- struct ib_pd *ibh_pd; /* PD */
- struct kib_dev *ibh_dev; /* owner */
- atomic_t ibh_ref; /* refcount */
-};
-
-/** # of seconds to keep pool alive */
-#define IBLND_POOL_DEADLINE 300
-/** # of seconds to retry if allocation failed */
-#define IBLND_POOL_RETRY 1
-
-struct kib_pages {
- int ibp_npages; /* # pages */
- struct page *ibp_pages[0]; /* page array */
-};
-
-struct kib_pool;
-struct kib_poolset;
-
-typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
- int inc, struct kib_pool **pp_po);
-typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
-typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
-typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
-
-struct kib_net;
-
-#define IBLND_POOL_NAME_LEN 32
-
-struct kib_poolset {
- spinlock_t ps_lock; /* serialize */
- struct kib_net *ps_net; /* network it belongs to */
- char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
- struct list_head ps_pool_list; /* list of pools */
- struct list_head ps_failed_pool_list;/* failed pool list */
- unsigned long ps_next_retry; /* time stamp for retry if */
- /* failed to allocate */
- int ps_increasing; /* is allocating new pool */
- int ps_pool_size; /* new pool size */
- int ps_cpt; /* CPT id */
-
- kib_ps_pool_create_t ps_pool_create; /* create a new pool */
- kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
- kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
- kib_ps_node_fini_t ps_node_fini; /* finalize node */
-};
-
-struct kib_pool {
- struct list_head po_list; /* chain on pool list */
- struct list_head po_free_list; /* pre-allocated node */
- struct kib_poolset *po_owner; /* pool_set of this pool */
- unsigned long po_deadline; /* deadline of this pool */
- int po_allocated; /* # of elements in use */
- int po_failed; /* pool is created on failed HCA */
- int po_size; /* # of pre-allocated elements */
-};
-
-struct kib_tx_poolset {
- struct kib_poolset tps_poolset; /* pool-set */
- __u64 tps_next_tx_cookie; /* cookie of TX */
-};
-
-struct kib_tx_pool {
- struct kib_pool tpo_pool; /* pool */
- struct kib_hca_dev *tpo_hdev; /* device for this pool */
- struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
- struct kib_pages *tpo_tx_pages; /* premapped tx msg pages */
-};
-
-struct kib_fmr_poolset {
- spinlock_t fps_lock; /* serialize */
- struct kib_net *fps_net; /* IB network */
- struct list_head fps_pool_list; /* FMR pool list */
- struct list_head fps_failed_pool_list;/* FMR pool list */
- __u64 fps_version; /* validity stamp */
- int fps_cpt; /* CPT id */
- int fps_pool_size;
- int fps_flush_trigger;
- int fps_cache;
- int fps_increasing; /* is allocating new pool */
- unsigned long fps_next_retry; /* time stamp for retry if*/
- /* failed to allocate */
-};
-
-struct kib_fast_reg_descriptor { /* For fast registration */
- struct list_head frd_list;
- struct ib_send_wr frd_inv_wr;
- struct ib_reg_wr frd_fastreg_wr;
- struct ib_mr *frd_mr;
- bool frd_valid;
-};
-
-struct kib_fmr_pool {
- struct list_head fpo_list; /* chain on pool list */
- struct kib_hca_dev *fpo_hdev; /* device for this pool */
- struct kib_fmr_poolset *fpo_owner; /* owner of this pool */
- union {
- struct {
- struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
- } fmr;
- struct { /* For fast registration */
- struct list_head fpo_pool_list;
- int fpo_pool_size;
- } fast_reg;
- };
- unsigned long fpo_deadline; /* deadline of this pool */
- int fpo_failed; /* fmr pool is failed */
- int fpo_map_count; /* # of mapped FMR */
- int fpo_is_fmr;
-};
-
-struct kib_fmr {
- struct kib_fmr_pool *fmr_pool; /* pool of FMR */
- struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
- struct kib_fast_reg_descriptor *fmr_frd;
- u32 fmr_key;
-};
-
-struct kib_net {
- struct list_head ibn_list; /* chain on struct kib_dev::ibd_nets */
- __u64 ibn_incarnation;/* my epoch */
- int ibn_init; /* initialisation state */
- int ibn_shutdown; /* shutting down? */
-
- atomic_t ibn_npeers; /* # peers extant */
- atomic_t ibn_nconns; /* # connections extant */
-
- struct kib_tx_poolset **ibn_tx_ps; /* tx pool-set */
- struct kib_fmr_poolset **ibn_fmr_ps; /* fmr pool-set */
-
- struct kib_dev *ibn_dev; /* underlying IB device */
-};
-
-#define KIB_THREAD_SHIFT 16
-#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
-#define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
-#define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
-
-struct kib_sched_info {
- spinlock_t ibs_lock; /* serialise */
- wait_queue_head_t ibs_waitq; /* schedulers sleep here */
- struct list_head ibs_conns; /* conns to check for rx completions */
- int ibs_nthreads; /* number of scheduler threads */
- int ibs_nthreads_max; /* max allowed scheduler threads */
- int ibs_cpt; /* CPT id */
-};
-
-struct kib_data {
- int kib_init; /* initialisation state */
- int kib_shutdown; /* shut down? */
- struct list_head kib_devs; /* IB devices extant */
- struct list_head kib_failed_devs; /* list head of failed devices */
- wait_queue_head_t kib_failover_waitq; /* schedulers sleep here */
- atomic_t kib_nthreads; /* # live threads */
- rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */
- struct list_head *kib_peers; /* hash table of all my known peers */
- int kib_peer_hash_size; /* size of kib_peers */
- void *kib_connd; /* the connd task (serialisation assertions) */
- struct list_head kib_connd_conns; /* connections to setup/teardown */
- struct list_head kib_connd_zombies; /* connections with zero refcount */
- /* connections to reconnect */
- struct list_head kib_reconn_list;
- /* peers wait for reconnection */
- struct list_head kib_reconn_wait;
- /**
- * The second that peers are pulled out from \a kib_reconn_wait
- * for reconnection.
- */
- time64_t kib_reconn_sec;
-
- wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */
- spinlock_t kib_connd_lock; /* serialise */
- struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
- struct kib_sched_info **kib_scheds; /* percpt data for schedulers */
-};
-
-#define IBLND_INIT_NOTHING 0
-#define IBLND_INIT_DATA 1
-#define IBLND_INIT_ALL 2
-
-/************************************************************************
- * IB Wire message format.
- * These are sent in sender's byte order (i.e. receiver flips).
- */
-
-struct kib_connparams {
- __u16 ibcp_queue_depth;
- __u16 ibcp_max_frags;
- __u32 ibcp_max_msg_size;
-} WIRE_ATTR;
-
-struct kib_immediate_msg {
- struct lnet_hdr ibim_hdr; /* portals header */
- char ibim_payload[0]; /* piggy-backed payload */
-} WIRE_ATTR;
-
-struct kib_rdma_frag {
- __u32 rf_nob; /* # bytes this frag */
- __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
-} WIRE_ATTR;
-
-struct kib_rdma_desc {
- __u32 rd_key; /* local/remote key */
- __u32 rd_nfrags; /* # fragments */
- struct kib_rdma_frag rd_frags[0]; /* buffer frags */
-} WIRE_ATTR;
-
-struct kib_putreq_msg {
- struct lnet_hdr ibprm_hdr; /* portals header */
- __u64 ibprm_cookie; /* opaque completion cookie */
-} WIRE_ATTR;
-
-struct kib_putack_msg {
- __u64 ibpam_src_cookie; /* reflected completion cookie */
- __u64 ibpam_dst_cookie; /* opaque completion cookie */
- struct kib_rdma_desc ibpam_rd; /* sender's sink buffer */
-} WIRE_ATTR;
-
-struct kib_get_msg {
- struct lnet_hdr ibgm_hdr; /* portals header */
- __u64 ibgm_cookie; /* opaque completion cookie */
- struct kib_rdma_desc ibgm_rd; /* rdma descriptor */
-} WIRE_ATTR;
-
-struct kib_completion_msg {
- __u64 ibcm_cookie; /* opaque completion cookie */
- __s32 ibcm_status; /* < 0 failure: >= 0 length */
-} WIRE_ATTR;
-
-struct kib_msg {
- /* First 2 fields fixed FOR ALL TIME */
- __u32 ibm_magic; /* I'm an ibnal message */
- __u16 ibm_version; /* this is my version number */
-
- __u8 ibm_type; /* msg type */
- __u8 ibm_credits; /* returned credits */
- __u32 ibm_nob; /* # bytes in whole message */
- __u32 ibm_cksum; /* checksum (0 == no checksum) */
- __u64 ibm_srcnid; /* sender's NID */
- __u64 ibm_srcstamp; /* sender's incarnation */
- __u64 ibm_dstnid; /* destination's NID */
- __u64 ibm_dststamp; /* destination's incarnation */
-
- union {
- struct kib_connparams connparams;
- struct kib_immediate_msg immediate;
- struct kib_putreq_msg putreq;
- struct kib_putack_msg putack;
- struct kib_get_msg get;
- struct kib_completion_msg completion;
- } WIRE_ATTR ibm_u;
-} WIRE_ATTR;
-
-#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
-
-#define IBLND_MSG_VERSION_1 0x11
-#define IBLND_MSG_VERSION_2 0x12
-#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
-
-#define IBLND_MSG_CONNREQ 0xc0 /* connection request */
-#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
-#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */
-#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */
-#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
-#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
-#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
-#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
-#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
-#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
-
-struct kib_rej {
- __u32 ibr_magic; /* sender's magic */
- __u16 ibr_version; /* sender's version */
- __u8 ibr_why; /* reject reason */
- __u8 ibr_padding; /* padding */
- __u64 ibr_incarnation; /* incarnation of peer */
- struct kib_connparams ibr_cp; /* connection parameters */
-} WIRE_ATTR;
-
-/* connection rejection reasons */
-#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
-#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
-#define IBLND_REJECT_FATAL 3 /* Anything else */
-#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
-#define IBLND_REJECT_CONN_STALE 5 /* stale peer */
-/* peer's rdma frags doesn't match mine */
-#define IBLND_REJECT_RDMA_FRAGS 6
-/* peer's msg queue size doesn't match mine */
-#define IBLND_REJECT_MSG_QUEUE_SIZE 7
-
-/***********************************************************************/
-
-struct kib_rx { /* receive message */
- struct list_head rx_list; /* queue for attention */
- struct kib_conn *rx_conn; /* owning conn */
- int rx_nob; /* # bytes received (-1 while posted) */
- enum ib_wc_status rx_status; /* completion status */
- struct kib_msg *rx_msg; /* message buffer (host vaddr) */
- __u64 rx_msgaddr; /* message buffer (I/O addr) */
- DECLARE_PCI_UNMAP_ADDR(rx_msgunmap); /* for dma_unmap_single() */
- struct ib_recv_wr rx_wrq; /* receive work item... */
- struct ib_sge rx_sge; /* ...and its memory */
-};
-
-#define IBLND_POSTRX_DONT_POST 0 /* don't post */
-#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
-#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
-#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give self back 1 reserved credit */
-
-struct kib_tx { /* transmit message */
- struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
- struct kib_tx_pool *tx_pool; /* pool I'm from */
- struct kib_conn *tx_conn; /* owning conn */
- short tx_sending; /* # tx callbacks outstanding */
- short tx_queued; /* queued for sending */
- short tx_waiting; /* waiting for peer */
- int tx_status; /* LNET completion status */
- unsigned long tx_deadline; /* completion deadline */
- __u64 tx_cookie; /* completion cookie */
- struct lnet_msg *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
- struct kib_msg *tx_msg; /* message buffer (host vaddr) */
- __u64 tx_msgaddr; /* message buffer (I/O addr) */
- DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */
- int tx_nwrq; /* # send work items */
- struct ib_rdma_wr *tx_wrq; /* send work items... */
- struct ib_sge *tx_sge; /* ...and their memory */
- struct kib_rdma_desc *tx_rd; /* rdma descriptor */
- int tx_nfrags; /* # entries in... */
- struct scatterlist *tx_frags; /* dma_map_sg descriptor */
- __u64 *tx_pages; /* rdma phys page addrs */
- struct kib_fmr fmr; /* FMR */
- int tx_dmadir; /* dma direction */
-};
-
-struct kib_connvars {
- struct kib_msg cv_msg; /* connection-in-progress variables */
-};
-
-struct kib_conn {
- struct kib_sched_info *ibc_sched; /* scheduler information */
- struct kib_peer *ibc_peer; /* owning peer */
- struct kib_hca_dev *ibc_hdev; /* HCA bound on */
- struct list_head ibc_list; /* stash on peer's conn list */
- struct list_head ibc_sched_list; /* schedule for attention */
- __u16 ibc_version; /* version of connection */
- /* reconnect later */
- __u16 ibc_reconnect:1;
- __u64 ibc_incarnation; /* which instance of the peer */
- atomic_t ibc_refcount; /* # users */
- int ibc_state; /* what's happening */
- int ibc_nsends_posted; /* # uncompleted sends */
- int ibc_noops_posted; /* # uncompleted NOOPs */
- int ibc_credits; /* # credits I have */
- int ibc_outstanding_credits; /* # credits to return */
- int ibc_reserved_credits; /* # ACK/DONE msg credits */
- int ibc_comms_error; /* set on comms error */
- /* connections queue depth */
- __u16 ibc_queue_depth;
- /* connections max frags */
- __u16 ibc_max_frags;
- unsigned int ibc_nrx:16; /* receive buffers owned */
- unsigned int ibc_scheduled:1; /* scheduled for attention */
- unsigned int ibc_ready:1; /* CQ callback fired */
- unsigned long ibc_last_send; /* time of last send */
- struct list_head ibc_connd_list; /* link chain for */
- /* kiblnd_check_conns only */
- struct list_head ibc_early_rxs; /* rxs completed before ESTABLISHED */
- struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs for */
- /* IBLND_MSG_VERSION_1 */
- struct list_head ibc_tx_queue; /* sends that need a credit */
- struct list_head ibc_tx_queue_nocred; /* sends that don't need a */
- /* credit */
- struct list_head ibc_tx_queue_rsrvd; /* sends that need to */
- /* reserve an ACK/DONE msg */
- struct list_head ibc_active_txs; /* active tx awaiting completion */
- spinlock_t ibc_lock; /* serialise */
- struct kib_rx *ibc_rxs; /* the rx descs */
- struct kib_pages *ibc_rx_pages; /* premapped rx msg pages */
-
- struct rdma_cm_id *ibc_cmid; /* CM id */
- struct ib_cq *ibc_cq; /* completion queue */
-
- struct kib_connvars *ibc_connvars; /* in-progress connection state */
-};
-
-#define IBLND_CONN_INIT 0 /* being initialised */
-#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
-#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
-#define IBLND_CONN_ESTABLISHED 3 /* connection established */
-#define IBLND_CONN_CLOSING 4 /* being closed */
-#define IBLND_CONN_DISCONNECTED 5 /* disconnected */
-
-struct kib_peer {
- struct list_head ibp_list; /* stash on global peer list */
- lnet_nid_t ibp_nid; /* who's on the other end(s) */
- struct lnet_ni *ibp_ni; /* LNet interface */
- struct list_head ibp_conns; /* all active connections */
- struct list_head ibp_tx_queue; /* msgs waiting for a conn */
- __u64 ibp_incarnation; /* incarnation of peer */
- /* when (in jiffies) I was last alive */
- unsigned long ibp_last_alive;
- /* # users */
- atomic_t ibp_refcount;
- /* version of peer */
- __u16 ibp_version;
- /* current passive connection attempts */
- unsigned short ibp_accepting;
- /* current active connection attempts */
- unsigned short ibp_connecting;
- /* reconnect this peer later */
- unsigned short ibp_reconnecting:1;
- /* counter of how many times we triggered a conn race */
- unsigned char ibp_races;
- /* # consecutive reconnection attempts to this peer */
- unsigned int ibp_reconnected;
- /* errno on closing this peer */
- int ibp_error;
- /* max map_on_demand */
- __u16 ibp_max_frags;
- /* max_peer_credits */
- __u16 ibp_queue_depth;
-};
-
-extern struct kib_data kiblnd_data;
-
-void kiblnd_hdev_destroy(struct kib_hca_dev *hdev);
-
-int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
-
-/* max # of fragments configured by user */
-static inline int
-kiblnd_cfg_rdma_frags(struct lnet_ni *ni)
-{
- struct lnet_ioctl_config_o2iblnd_tunables *tunables;
- int mod;
-
- tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
- mod = tunables->lnd_map_on_demand;
- return mod ? mod : IBLND_MAX_RDMA_FRAGS >> IBLND_FRAG_SHIFT;
-}
-
-static inline int
-kiblnd_rdma_frags(int version, struct lnet_ni *ni)
-{
- return version == IBLND_MSG_VERSION_1 ?
- (IBLND_MAX_RDMA_FRAGS >> IBLND_FRAG_SHIFT) :
- kiblnd_cfg_rdma_frags(ni);
-}
-
-static inline int
-kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
-{
- struct lnet_ioctl_config_o2iblnd_tunables *tunables;
- int concurrent_sends;
-
- tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
- concurrent_sends = tunables->lnd_concurrent_sends;
-
- if (version == IBLND_MSG_VERSION_1) {
- if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
- return IBLND_MSG_QUEUE_SIZE_V1 * 2;
-
- if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
- return IBLND_MSG_QUEUE_SIZE_V1 / 2;
- }
-
- return concurrent_sends;
-}
-
-static inline void
-kiblnd_hdev_addref_locked(struct kib_hca_dev *hdev)
-{
- LASSERT(atomic_read(&hdev->ibh_ref) > 0);
- atomic_inc(&hdev->ibh_ref);
-}
-
-static inline void
-kiblnd_hdev_decref(struct kib_hca_dev *hdev)
-{
- LASSERT(atomic_read(&hdev->ibh_ref) > 0);
- if (atomic_dec_and_test(&hdev->ibh_ref))
- kiblnd_hdev_destroy(hdev);
-}
-
-static inline int
-kiblnd_dev_can_failover(struct kib_dev *dev)
-{
- if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
- return 0;
-
- if (!*kiblnd_tunables.kib_dev_failover) /* disabled */
- return 0;
-
- if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
- return 1;
-
- return dev->ibd_can_failover;
-}
-
-#define kiblnd_conn_addref(conn) \
-do { \
- CDEBUG(D_NET, "conn[%p] (%d)++\n", \
- (conn), atomic_read(&(conn)->ibc_refcount)); \
- atomic_inc(&(conn)->ibc_refcount); \
-} while (0)
-
-#define kiblnd_conn_decref(conn) \
-do { \
- unsigned long flags; \
- \
- CDEBUG(D_NET, "conn[%p] (%d)--\n", \
- (conn), atomic_read(&(conn)->ibc_refcount)); \
- LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
- if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
- list_add_tail(&(conn)->ibc_list, \
- &kiblnd_data.kib_connd_zombies); \
- wake_up(&kiblnd_data.kib_connd_waitq); \
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
- } \
-} while (0)
-
-#define kiblnd_peer_addref(peer) \
-do { \
- CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
- (peer), libcfs_nid2str((peer)->ibp_nid), \
- atomic_read(&(peer)->ibp_refcount)); \
- atomic_inc(&(peer)->ibp_refcount); \
-} while (0)
-
-#define kiblnd_peer_decref(peer) \
-do { \
- CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
- (peer), libcfs_nid2str((peer)->ibp_nid), \
- atomic_read(&(peer)->ibp_refcount)); \
- LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
- if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
- kiblnd_destroy_peer(peer); \
-} while (0)
-
-static inline bool
-kiblnd_peer_connecting(struct kib_peer *peer)
-{
- return peer->ibp_connecting ||
- peer->ibp_reconnecting ||
- peer->ibp_accepting;
-}
-
-static inline bool
-kiblnd_peer_idle(struct kib_peer *peer)
-{
- return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns);
-}
-
-static inline struct list_head *
-kiblnd_nid2peerlist(lnet_nid_t nid)
-{
- unsigned int hash =
- ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
-
- return &kiblnd_data.kib_peers[hash];
-}
-
-static inline int
-kiblnd_peer_active(struct kib_peer *peer)
-{
- /* Am I in the peer hash table? */
- return !list_empty(&peer->ibp_list);
-}
-
-static inline struct kib_conn *
-kiblnd_get_conn_locked(struct kib_peer *peer)
-{
- LASSERT(!list_empty(&peer->ibp_conns));
-
- /* just return the first connection */
- return list_entry(peer->ibp_conns.next, struct kib_conn, ibc_list);
-}
-
-static inline int
-kiblnd_send_keepalive(struct kib_conn *conn)
-{
- return (*kiblnd_tunables.kib_keepalive > 0) &&
- cfs_time_after(jiffies, conn->ibc_last_send +
- msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
- MSEC_PER_SEC));
-}
-
-static inline int
-kiblnd_need_noop(struct kib_conn *conn)
-{
- struct lnet_ioctl_config_o2iblnd_tunables *tunables;
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
-
- LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
-
- if (conn->ibc_outstanding_credits <
- IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) &&
- !kiblnd_send_keepalive(conn))
- return 0; /* No need to send NOOP */
-
- if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
- if (!list_empty(&conn->ibc_tx_queue_nocred))
- return 0; /* NOOP can be piggybacked */
-
- /* No tx to piggyback NOOP onto or no credit to send a tx */
- return (list_empty(&conn->ibc_tx_queue) ||
- !conn->ibc_credits);
- }
-
- if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
- !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
- !conn->ibc_credits) /* no credit */
- return 0;
-
- if (conn->ibc_credits == 1 && /* last credit reserved for */
- !conn->ibc_outstanding_credits) /* giving back credits */
- return 0;
-
- /* No tx to piggyback NOOP onto or no credit to send a tx */
- return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
-}
-
-static inline void
-kiblnd_abort_receives(struct kib_conn *conn)
-{
- ib_modify_qp(conn->ibc_cmid->qp,
- &kiblnd_data.kib_error_qpa, IB_QP_STATE);
-}
-
-static inline const char *
-kiblnd_queue2str(struct kib_conn *conn, struct list_head *q)
-{
- if (q == &conn->ibc_tx_queue)
- return "tx_queue";
-
- if (q == &conn->ibc_tx_queue_rsrvd)
- return "tx_queue_rsrvd";
-
- if (q == &conn->ibc_tx_queue_nocred)
- return "tx_queue_nocred";
-
- if (q == &conn->ibc_active_txs)
- return "active_txs";
-
- LBUG();
- return NULL;
-}
-
-/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the */
-/* lowest bits of the work request id to stash the work item type. */
-
-#define IBLND_WID_INVAL 0
-#define IBLND_WID_TX 1
-#define IBLND_WID_RX 2
-#define IBLND_WID_RDMA 3
-#define IBLND_WID_MR 4
-#define IBLND_WID_MASK 7UL
-
-static inline __u64
-kiblnd_ptr2wreqid(void *ptr, int type)
-{
- unsigned long lptr = (unsigned long)ptr;
-
- LASSERT(!(lptr & IBLND_WID_MASK));
- LASSERT(!(type & ~IBLND_WID_MASK));
- return (__u64)(lptr | type);
-}
-
-static inline void *
-kiblnd_wreqid2ptr(__u64 wreqid)
-{
- return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
-}
-
-static inline int
-kiblnd_wreqid2type(__u64 wreqid)
-{
- return wreqid & IBLND_WID_MASK;
-}
-
-static inline void
-kiblnd_set_conn_state(struct kib_conn *conn, int state)
-{
- conn->ibc_state = state;
- mb();
-}
-
-static inline void
-kiblnd_init_msg(struct kib_msg *msg, int type, int body_nob)
-{
- msg->ibm_type = type;
- msg->ibm_nob = offsetof(struct kib_msg, ibm_u) + body_nob;
-}
-
-static inline int
-kiblnd_rd_size(struct kib_rdma_desc *rd)
-{
- int i;
- int size;
-
- for (i = size = 0; i < rd->rd_nfrags; i++)
- size += rd->rd_frags[i].rf_nob;
-
- return size;
-}
-
-static inline __u64
-kiblnd_rd_frag_addr(struct kib_rdma_desc *rd, int index)
-{
- return rd->rd_frags[index].rf_addr;
-}
-
-static inline __u32
-kiblnd_rd_frag_size(struct kib_rdma_desc *rd, int index)
-{
- return rd->rd_frags[index].rf_nob;
-}
-
-static inline __u32
-kiblnd_rd_frag_key(struct kib_rdma_desc *rd, int index)
-{
- return rd->rd_key;
-}
-
-static inline int
-kiblnd_rd_consume_frag(struct kib_rdma_desc *rd, int index, __u32 nob)
-{
- if (nob < rd->rd_frags[index].rf_nob) {
- rd->rd_frags[index].rf_addr += nob;
- rd->rd_frags[index].rf_nob -= nob;
- } else {
- index++;
- }
-
- return index;
-}
-
-static inline int
-kiblnd_rd_msg_size(struct kib_rdma_desc *rd, int msgtype, int n)
-{
- LASSERT(msgtype == IBLND_MSG_GET_REQ ||
- msgtype == IBLND_MSG_PUT_ACK);
-
- return msgtype == IBLND_MSG_GET_REQ ?
- offsetof(struct kib_get_msg, ibgm_rd.rd_frags[n]) :
- offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[n]);
-}
-
-static inline __u64
-kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
- return ib_dma_mapping_error(dev, dma_addr);
-}
-
-static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
- void *msg, size_t size,
- enum dma_data_direction direction)
-{
- return ib_dma_map_single(dev, msg, size, direction);
-}
-
-static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
- __u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- ib_dma_unmap_single(dev, addr, size, direction);
-}
-
-#define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
-#define KIBLND_UNMAP_ADDR(p, m, a) (a)
-
-static inline int kiblnd_dma_map_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- return ib_dma_map_sg(dev, sg, nents, direction);
-}
-
-static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- ib_dma_unmap_sg(dev, sg, nents, direction);
-}
-
-static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
- struct scatterlist *sg)
-{
- return ib_sg_dma_address(dev, sg);
-}
-
-static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
- struct scatterlist *sg)
-{
- return ib_sg_dma_len(dev, sg);
-}
-
-/* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly */
-/* right because OFED1.2 defines it as const, to use it we have to add */
-/* (void *) cast to overcome "const" */
-
-#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
-#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
-
-void kiblnd_map_rx_descs(struct kib_conn *conn);
-void kiblnd_unmap_rx_descs(struct kib_conn *conn);
-void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node);
-struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps);
-
-int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
- struct kib_rdma_desc *rd, __u32 nob, __u64 iov,
- struct kib_fmr *fmr);
-void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status);
-
-int kiblnd_tunables_setup(struct lnet_ni *ni);
-void kiblnd_tunables_init(void);
-
-int kiblnd_connd(void *arg);
-int kiblnd_scheduler(void *arg);
-int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
-int kiblnd_failover_thread(void *arg);
-
-int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages);
-
-int kiblnd_cm_callback(struct rdma_cm_id *cmid,
- struct rdma_cm_event *event);
-int kiblnd_translate_mtu(int value);
-
-int kiblnd_dev_failover(struct kib_dev *dev);
-int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp,
- lnet_nid_t nid);
-void kiblnd_destroy_peer(struct kib_peer *peer);
-bool kiblnd_reconnect_peer(struct kib_peer *peer);
-void kiblnd_destroy_dev(struct kib_dev *dev);
-void kiblnd_unlink_peer_locked(struct kib_peer *peer);
-struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid);
-int kiblnd_close_stale_conns_locked(struct kib_peer *peer,
- int version, __u64 incarnation);
-int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why);
-
-struct kib_conn *kiblnd_create_conn(struct kib_peer *peer,
- struct rdma_cm_id *cmid,
- int state, int version);
-void kiblnd_destroy_conn(struct kib_conn *conn);
-void kiblnd_close_conn(struct kib_conn *conn, int error);
-void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
-
-void kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid);
-void kiblnd_txlist_done(struct lnet_ni *ni, struct list_head *txlist,
- int status);
-
-void kiblnd_qp_event(struct ib_event *event, void *arg);
-void kiblnd_cq_event(struct ib_event *event, void *arg);
-void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
-
-void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
- int credits, lnet_nid_t dstnid, __u64 dststamp);
-int kiblnd_unpack_msg(struct kib_msg *msg, int nob);
-int kiblnd_post_rx(struct kib_rx *rx, int credit);
-
-int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
-int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
- int delayed, struct iov_iter *to, unsigned int rlen);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
deleted file mode 100644
index 6690a6cd4e34..000000000000
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ /dev/null
@@ -1,3751 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/klnds/o2iblnd/o2iblnd_cb.c
- *
- * Author: Eric Barton <eric@bartonsoftware.com>
- */
-
-#include "o2iblnd.h"
-
-#define MAX_CONN_RACES_BEFORE_ABORT 20
-
-static void kiblnd_peer_alive(struct kib_peer *peer);
-static void kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error);
-static void kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx,
- int type, int body_nob);
-static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
- int resid, struct kib_rdma_desc *dstrd,
- __u64 dstcookie);
-static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
-static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
-static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx);
-static void kiblnd_check_sends_locked(struct kib_conn *conn);
-
-static void
-kiblnd_tx_done(struct lnet_ni *ni, struct kib_tx *tx)
-{
- struct lnet_msg *lntmsg[2];
- struct kib_net *net = ni->ni_data;
- int rc;
- int i;
-
- LASSERT(net);
- LASSERT(!in_interrupt());
- LASSERT(!tx->tx_queued); /* mustn't be queued for sending */
- LASSERT(!tx->tx_sending); /* mustn't be awaiting sent callback */
- LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */
- LASSERT(tx->tx_pool);
-
- kiblnd_unmap_tx(ni, tx);
-
- /* tx may have up to 2 lnet msgs to finalise */
- lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
- lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
- rc = tx->tx_status;
-
- if (tx->tx_conn) {
- LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni);
-
- kiblnd_conn_decref(tx->tx_conn);
- tx->tx_conn = NULL;
- }
-
- tx->tx_nwrq = 0;
- tx->tx_status = 0;
-
- kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
-
- /* delay finalize until my descs have been freed */
- for (i = 0; i < 2; i++) {
- if (!lntmsg[i])
- continue;
-
- lnet_finalize(ni, lntmsg[i], rc);
- }
-}
-
-void
-kiblnd_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int status)
-{
- struct kib_tx *tx;
-
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, struct kib_tx, tx_list);
-
- list_del(&tx->tx_list);
- /* complete now */
- tx->tx_waiting = 0;
- tx->tx_status = status;
- kiblnd_tx_done(ni, tx);
- }
-}
-
-static struct kib_tx *
-kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target)
-{
- struct kib_net *net = (struct kib_net *)ni->ni_data;
- struct list_head *node;
- struct kib_tx *tx;
- struct kib_tx_poolset *tps;
-
- tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)];
- node = kiblnd_pool_alloc_node(&tps->tps_poolset);
- if (!node)
- return NULL;
- tx = list_entry(node, struct kib_tx, tx_list);
-
- LASSERT(!tx->tx_nwrq);
- LASSERT(!tx->tx_queued);
- LASSERT(!tx->tx_sending);
- LASSERT(!tx->tx_waiting);
- LASSERT(!tx->tx_status);
- LASSERT(!tx->tx_conn);
- LASSERT(!tx->tx_lntmsg[0]);
- LASSERT(!tx->tx_lntmsg[1]);
- LASSERT(!tx->tx_nfrags);
-
- return tx;
-}
-
-static void
-kiblnd_drop_rx(struct kib_rx *rx)
-{
- struct kib_conn *conn = rx->rx_conn;
- struct kib_sched_info *sched = conn->ibc_sched;
- unsigned long flags;
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
- LASSERT(conn->ibc_nrx > 0);
- conn->ibc_nrx--;
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- kiblnd_conn_decref(conn);
-}
-
-int
-kiblnd_post_rx(struct kib_rx *rx, int credit)
-{
- struct kib_conn *conn = rx->rx_conn;
- struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
- struct ib_recv_wr *bad_wrq = NULL;
- int rc;
-
- LASSERT(net);
- LASSERT(!in_interrupt());
- LASSERT(credit == IBLND_POSTRX_NO_CREDIT ||
- credit == IBLND_POSTRX_PEER_CREDIT ||
- credit == IBLND_POSTRX_RSRVD_CREDIT);
-
- rx->rx_sge.lkey = conn->ibc_hdev->ibh_pd->local_dma_lkey;
- rx->rx_sge.addr = rx->rx_msgaddr;
- rx->rx_sge.length = IBLND_MSG_SIZE;
-
- rx->rx_wrq.next = NULL;
- rx->rx_wrq.sg_list = &rx->rx_sge;
- rx->rx_wrq.num_sge = 1;
- rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
-
- LASSERT(conn->ibc_state >= IBLND_CONN_INIT);
- LASSERT(rx->rx_nob >= 0); /* not posted */
-
- if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
- kiblnd_drop_rx(rx); /* No more posts for this rx */
- return 0;
- }
-
- rx->rx_nob = -1; /* flag posted */
-
- /* NB: need an extra reference after ib_post_recv because we don't
- * own this rx (and rx::rx_conn) anymore, LU-5678.
- */
- kiblnd_conn_addref(conn);
- rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
- if (unlikely(rc)) {
- CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
- rx->rx_nob = 0;
- }
-
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
- goto out;
-
- if (unlikely(rc)) {
- kiblnd_close_conn(conn, rc);
- kiblnd_drop_rx(rx); /* No more posts for this rx */
- goto out;
- }
-
- if (credit == IBLND_POSTRX_NO_CREDIT)
- goto out;
-
- spin_lock(&conn->ibc_lock);
- if (credit == IBLND_POSTRX_PEER_CREDIT)
- conn->ibc_outstanding_credits++;
- else
- conn->ibc_reserved_credits++;
- kiblnd_check_sends_locked(conn);
- spin_unlock(&conn->ibc_lock);
-
-out:
- kiblnd_conn_decref(conn);
- return rc;
-}
-
-static struct kib_tx *
-kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, __u64 cookie)
-{
- struct list_head *tmp;
-
- list_for_each(tmp, &conn->ibc_active_txs) {
- struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
-
- LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_sending || tx->tx_waiting);
-
- if (tx->tx_cookie != cookie)
- continue;
-
- if (tx->tx_waiting &&
- tx->tx_msg->ibm_type == txtype)
- return tx;
-
- CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
- tx->tx_waiting ? "" : "NOT ",
- tx->tx_msg->ibm_type, txtype);
- }
- return NULL;
-}
-
-static void
-kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, __u64 cookie)
-{
- struct kib_tx *tx;
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- int idle;
-
- spin_lock(&conn->ibc_lock);
-
- tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
- if (!tx) {
- spin_unlock(&conn->ibc_lock);
-
- CWARN("Unmatched completion type %x cookie %#llx from %s\n",
- txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- kiblnd_close_conn(conn, -EPROTO);
- return;
- }
-
- if (!tx->tx_status) { /* success so far */
- if (status < 0) /* failed? */
- tx->tx_status = status;
- else if (txtype == IBLND_MSG_GET_REQ)
- lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
- }
-
- tx->tx_waiting = 0;
-
- idle = !tx->tx_queued && !tx->tx_sending;
- if (idle)
- list_del(&tx->tx_list);
-
- spin_unlock(&conn->ibc_lock);
-
- if (idle)
- kiblnd_tx_done(ni, tx);
-}
-
-static void
-kiblnd_send_completion(struct kib_conn *conn, int type, int status, __u64 cookie)
-{
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
-
- if (!tx) {
- CERROR("Can't get tx for completion %x for %s\n",
- type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- return;
- }
-
- tx->tx_msg->ibm_u.completion.ibcm_status = status;
- tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
- kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg));
-
- kiblnd_queue_tx(tx, conn);
-}
-
-static void
-kiblnd_handle_rx(struct kib_rx *rx)
-{
- struct kib_msg *msg = rx->rx_msg;
- struct kib_conn *conn = rx->rx_conn;
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- int credits = msg->ibm_credits;
- struct kib_tx *tx;
- int rc = 0;
- int rc2;
- int post_credit;
-
- LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
-
- CDEBUG(D_NET, "Received %x[%d] from %s\n",
- msg->ibm_type, credits,
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
-
- if (credits) {
- /* Have I received credits that will let me send? */
- spin_lock(&conn->ibc_lock);
-
- if (conn->ibc_credits + credits >
- conn->ibc_queue_depth) {
- rc2 = conn->ibc_credits;
- spin_unlock(&conn->ibc_lock);
-
- CERROR("Bad credits from %s: %d + %d > %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- rc2, credits, conn->ibc_queue_depth);
-
- kiblnd_close_conn(conn, -EPROTO);
- kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
- return;
- }
-
- conn->ibc_credits += credits;
-
- /* This ensures the credit taken by NOOP can be returned */
- if (msg->ibm_type == IBLND_MSG_NOOP &&
- !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
- conn->ibc_outstanding_credits++;
-
- kiblnd_check_sends_locked(conn);
- spin_unlock(&conn->ibc_lock);
- }
-
- switch (msg->ibm_type) {
- default:
- CERROR("Bad IBLND message type %x from %s\n",
- msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- post_credit = IBLND_POSTRX_NO_CREDIT;
- rc = -EPROTO;
- break;
-
- case IBLND_MSG_NOOP:
- if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
- post_credit = IBLND_POSTRX_NO_CREDIT;
- break;
- }
-
- if (credits) /* credit already posted */
- post_credit = IBLND_POSTRX_NO_CREDIT;
- else /* a keepalive NOOP */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
-
- case IBLND_MSG_IMMEDIATE:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
- msg->ibm_srcnid, rx, 0);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
-
- case IBLND_MSG_PUT_REQ:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
- msg->ibm_srcnid, rx, 1);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
-
- case IBLND_MSG_PUT_NAK:
- CWARN("PUT_NACK from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- post_credit = IBLND_POSTRX_RSRVD_CREDIT;
- kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
- msg->ibm_u.completion.ibcm_status,
- msg->ibm_u.completion.ibcm_cookie);
- break;
-
- case IBLND_MSG_PUT_ACK:
- post_credit = IBLND_POSTRX_RSRVD_CREDIT;
-
- spin_lock(&conn->ibc_lock);
- tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
- msg->ibm_u.putack.ibpam_src_cookie);
- if (tx)
- list_del(&tx->tx_list);
- spin_unlock(&conn->ibc_lock);
-
- if (!tx) {
- CERROR("Unmatched PUT_ACK from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- rc = -EPROTO;
- break;
- }
-
- LASSERT(tx->tx_waiting);
- /*
- * CAVEAT EMPTOR: I could be racing with tx_complete, but...
- * (a) I can overwrite tx_msg since my peer has received it!
- * (b) tx_waiting set tells tx_complete() it's not done.
- */
- tx->tx_nwrq = 0; /* overwrite PUT_REQ */
-
- rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
- kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
- &msg->ibm_u.putack.ibpam_rd,
- msg->ibm_u.putack.ibpam_dst_cookie);
- if (rc2 < 0)
- CERROR("Can't setup rdma for PUT to %s: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
-
- spin_lock(&conn->ibc_lock);
- tx->tx_waiting = 0; /* clear waiting and queue atomically */
- kiblnd_queue_tx_locked(tx, conn);
- spin_unlock(&conn->ibc_lock);
- break;
-
- case IBLND_MSG_PUT_DONE:
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
- msg->ibm_u.completion.ibcm_status,
- msg->ibm_u.completion.ibcm_cookie);
- break;
-
- case IBLND_MSG_GET_REQ:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
- msg->ibm_srcnid, rx, 1);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
-
- case IBLND_MSG_GET_DONE:
- post_credit = IBLND_POSTRX_RSRVD_CREDIT;
- kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
- msg->ibm_u.completion.ibcm_status,
- msg->ibm_u.completion.ibcm_cookie);
- break;
- }
-
- if (rc < 0) /* protocol error */
- kiblnd_close_conn(conn, rc);
-
- if (post_credit != IBLND_POSTRX_DONT_POST)
- kiblnd_post_rx(rx, post_credit);
-}
-
-static void
-kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
-{
- struct kib_msg *msg = rx->rx_msg;
- struct kib_conn *conn = rx->rx_conn;
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- struct kib_net *net = ni->ni_data;
- int rc;
- int err = -EIO;
-
- LASSERT(net);
- LASSERT(rx->rx_nob < 0); /* was posted */
- rx->rx_nob = 0; /* isn't now */
-
- if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
- goto ignore;
-
- if (status != IB_WC_SUCCESS) {
- CNETERR("Rx from %s failed: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
- goto failed;
- }
-
- LASSERT(nob >= 0);
- rx->rx_nob = nob;
-
- rc = kiblnd_unpack_msg(msg, rx->rx_nob);
- if (rc) {
- CERROR("Error %d unpacking rx from %s\n",
- rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- goto failed;
- }
-
- if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
- msg->ibm_dstnid != ni->ni_nid ||
- msg->ibm_srcstamp != conn->ibc_incarnation ||
- msg->ibm_dststamp != net->ibn_incarnation) {
- CERROR("Stale rx from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- err = -ESTALE;
- goto failed;
- }
-
- /* set time last known alive */
- kiblnd_peer_alive(conn->ibc_peer);
-
- /* racing with connection establishment/teardown! */
-
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- unsigned long flags;
-
- write_lock_irqsave(g_lock, flags);
- /* must check holding global lock to eliminate race */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
- write_unlock_irqrestore(g_lock, flags);
- return;
- }
- write_unlock_irqrestore(g_lock, flags);
- }
- kiblnd_handle_rx(rx);
- return;
-
- failed:
- CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
- kiblnd_close_conn(conn, err);
- ignore:
- kiblnd_drop_rx(rx); /* Don't re-post rx. */
-}
-
-static struct page *
-kiblnd_kvaddr_to_page(unsigned long vaddr)
-{
- struct page *page;
-
- if (is_vmalloc_addr((void *)vaddr)) {
- page = vmalloc_to_page((void *)vaddr);
- LASSERT(page);
- return page;
- }
-#ifdef CONFIG_HIGHMEM
- if (vaddr >= PKMAP_BASE &&
- vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {
- /* No highmem pages only used for bulk (kiov) I/O */
- CERROR("find page for address in highmem\n");
- LBUG();
- }
-#endif
- page = virt_to_page(vaddr);
- LASSERT(page);
- return page;
-}
-
-static int
-kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, struct kib_rdma_desc *rd, __u32 nob)
-{
- struct kib_hca_dev *hdev;
- struct kib_fmr_poolset *fps;
- int cpt;
- int rc;
-
- LASSERT(tx->tx_pool);
- LASSERT(tx->tx_pool->tpo_pool.po_owner);
-
- hdev = tx->tx_pool->tpo_hdev;
- cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
-
- fps = net->ibn_fmr_ps[cpt];
- rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr);
- if (rc) {
- CERROR("Can't map %u bytes: %d\n", nob, rc);
- return rc;
- }
-
- /*
- * If rd is not tx_rd, it's going to get sent to a peer, who will need
- * the rkey
- */
- rd->rd_key = tx->fmr.fmr_key;
- rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
- rd->rd_frags[0].rf_nob = nob;
- rd->rd_nfrags = 1;
-
- return 0;
-}
-
-static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx)
-{
- struct kib_net *net = ni->ni_data;
-
- LASSERT(net);
-
- if (net->ibn_fmr_ps)
- kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
-
- if (tx->tx_nfrags) {
- kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
- tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
- tx->tx_nfrags = 0;
- }
-}
-
-static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
- struct kib_rdma_desc *rd, int nfrags)
-{
- struct kib_net *net = ni->ni_data;
- struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
- __u32 nob;
- int i;
-
- /*
- * If rd is not tx_rd, it's going to get sent to a peer and I'm the
- * RDMA sink
- */
- tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- tx->tx_nfrags = nfrags;
-
- rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
- tx->tx_nfrags, tx->tx_dmadir);
-
- for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
- rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len(
- hdev->ibh_ibdev, &tx->tx_frags[i]);
- rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
- hdev->ibh_ibdev, &tx->tx_frags[i]);
- nob += rd->rd_frags[i].rf_nob;
- }
-
- if (net->ibn_fmr_ps)
- return kiblnd_fmr_map_tx(net, tx, rd, nob);
-
- return -EINVAL;
-}
-
-static int
-kiblnd_setup_rd_iov(struct lnet_ni *ni, struct kib_tx *tx,
- struct kib_rdma_desc *rd, unsigned int niov,
- const struct kvec *iov, int offset, int nob)
-{
- struct kib_net *net = ni->ni_data;
- struct page *page;
- struct scatterlist *sg;
- unsigned long vaddr;
- int fragnob;
- int page_offset;
-
- LASSERT(nob > 0);
- LASSERT(niov > 0);
- LASSERT(net);
-
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- niov--;
- iov++;
- LASSERT(niov > 0);
- }
-
- sg = tx->tx_frags;
- do {
- LASSERT(niov > 0);
-
- vaddr = ((unsigned long)iov->iov_base) + offset;
- page_offset = vaddr & (PAGE_SIZE - 1);
- page = kiblnd_kvaddr_to_page(vaddr);
- if (!page) {
- CERROR("Can't find page\n");
- return -EFAULT;
- }
-
- fragnob = min((int)(iov->iov_len - offset), nob);
- fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
-
- sg_set_page(sg, page, fragnob, page_offset);
- sg = sg_next(sg);
- if (!sg) {
- CERROR("lacking enough sg entries to map tx\n");
- return -EFAULT;
- }
-
- if (offset + fragnob < iov->iov_len) {
- offset += fragnob;
- } else {
- offset = 0;
- iov++;
- niov--;
- }
- nob -= fragnob;
- } while (nob > 0);
-
- return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
-}
-
-static int
-kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
- struct kib_rdma_desc *rd, int nkiov,
- const struct bio_vec *kiov, int offset, int nob)
-{
- struct kib_net *net = ni->ni_data;
- struct scatterlist *sg;
- int fragnob;
-
- CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
-
- LASSERT(nob > 0);
- LASSERT(nkiov > 0);
- LASSERT(net);
-
- while (offset >= kiov->bv_len) {
- offset -= kiov->bv_len;
- nkiov--;
- kiov++;
- LASSERT(nkiov > 0);
- }
-
- sg = tx->tx_frags;
- do {
- LASSERT(nkiov > 0);
-
- fragnob = min((int)(kiov->bv_len - offset), nob);
-
- sg_set_page(sg, kiov->bv_page, fragnob,
- kiov->bv_offset + offset);
- sg = sg_next(sg);
- if (!sg) {
- CERROR("lacking enough sg entries to map tx\n");
- return -EFAULT;
- }
-
- offset = 0;
- kiov++;
- nkiov--;
- nob -= fragnob;
- } while (nob > 0);
-
- return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
-}
-
-static int
-kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
- __must_hold(&conn->ibc_lock)
-{
- struct kib_msg *msg = tx->tx_msg;
- struct kib_peer *peer = conn->ibc_peer;
- struct lnet_ni *ni = peer->ibp_ni;
- int ver = conn->ibc_version;
- int rc;
- int done;
-
- LASSERT(tx->tx_queued);
- /* We rely on this for QP sizing */
- LASSERT(tx->tx_nwrq > 0);
-
- LASSERT(!credit || credit == 1);
- LASSERT(conn->ibc_outstanding_credits >= 0);
- LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth);
- LASSERT(conn->ibc_credits >= 0);
- LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
-
- if (conn->ibc_nsends_posted == kiblnd_concurrent_sends(ver, ni)) {
- /* tx completions outstanding... */
- CDEBUG(D_NET, "%s: posted enough\n",
- libcfs_nid2str(peer->ibp_nid));
- return -EAGAIN;
- }
-
- if (credit && !conn->ibc_credits) { /* no credits */
- CDEBUG(D_NET, "%s: no credits\n",
- libcfs_nid2str(peer->ibp_nid));
- return -EAGAIN;
- }
-
- if (credit && !IBLND_OOB_CAPABLE(ver) &&
- conn->ibc_credits == 1 && /* last credit reserved */
- msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */
- CDEBUG(D_NET, "%s: not using last credit\n",
- libcfs_nid2str(peer->ibp_nid));
- return -EAGAIN;
- }
-
- /* NB don't drop ibc_lock before bumping tx_sending */
- list_del(&tx->tx_list);
- tx->tx_queued = 0;
-
- if (msg->ibm_type == IBLND_MSG_NOOP &&
- (!kiblnd_need_noop(conn) || /* redundant NOOP */
- (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
- conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
- /*
- * OK to drop when posted enough NOOPs, since
- * kiblnd_check_sends_locked will queue NOOP again when
- * posted NOOPs complete
- */
- spin_unlock(&conn->ibc_lock);
- kiblnd_tx_done(peer->ibp_ni, tx);
- spin_lock(&conn->ibc_lock);
- CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
- libcfs_nid2str(peer->ibp_nid),
- conn->ibc_noops_posted);
- return 0;
- }
-
- kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
- peer->ibp_nid, conn->ibc_incarnation);
-
- conn->ibc_credits -= credit;
- conn->ibc_outstanding_credits = 0;
- conn->ibc_nsends_posted++;
- if (msg->ibm_type == IBLND_MSG_NOOP)
- conn->ibc_noops_posted++;
-
- /*
- * CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
- * PUT. If so, it was first queued here as a PUT_REQ, sent and
- * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
- * and then re-queued here. It's (just) possible that
- * tx_sending is non-zero if we've not done the tx_complete()
- * from the first send; hence the ++ rather than = below.
- */
- tx->tx_sending++;
- list_add(&tx->tx_list, &conn->ibc_active_txs);
-
- /* I'm still holding ibc_lock! */
- if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
- rc = -ECONNABORTED;
- } else if (tx->tx_pool->tpo_pool.po_failed ||
- conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
- /* close_conn will launch failover */
- rc = -ENETDOWN;
- } else {
- struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd;
- struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
- struct ib_send_wr *wrq = &tx->tx_wrq[0].wr;
-
- if (frd) {
- if (!frd->frd_valid) {
- wrq = &frd->frd_inv_wr;
- wrq->next = &frd->frd_fastreg_wr.wr;
- } else {
- wrq = &frd->frd_fastreg_wr.wr;
- }
- frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
- }
-
- LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
- "bad wr_id %llx, opc %d, flags %d, peer: %s\n",
- bad->wr_id, bad->opcode, bad->send_flags,
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- bad = NULL;
- rc = ib_post_send(conn->ibc_cmid->qp, wrq, &bad);
- }
-
- conn->ibc_last_send = jiffies;
-
- if (!rc)
- return 0;
-
- /*
- * NB credits are transferred in the actual
- * message, which can only be the last work item
- */
- conn->ibc_credits += credit;
- conn->ibc_outstanding_credits += msg->ibm_credits;
- conn->ibc_nsends_posted--;
- if (msg->ibm_type == IBLND_MSG_NOOP)
- conn->ibc_noops_posted--;
-
- tx->tx_status = rc;
- tx->tx_waiting = 0;
- tx->tx_sending--;
-
- done = !tx->tx_sending;
- if (done)
- list_del(&tx->tx_list);
-
- spin_unlock(&conn->ibc_lock);
-
- if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
- CERROR("Error %d posting transmit to %s\n",
- rc, libcfs_nid2str(peer->ibp_nid));
- else
- CDEBUG(D_NET, "Error %d posting transmit to %s\n",
- rc, libcfs_nid2str(peer->ibp_nid));
-
- kiblnd_close_conn(conn, rc);
-
- if (done)
- kiblnd_tx_done(peer->ibp_ni, tx);
-
- spin_lock(&conn->ibc_lock);
-
- return -EIO;
-}
-
-static void
-kiblnd_check_sends_locked(struct kib_conn *conn)
-{
- int ver = conn->ibc_version;
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- struct kib_tx *tx;
-
- /* Don't send anything until after the connection is established */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- CDEBUG(D_NET, "%s too soon\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- return;
- }
-
- LASSERT(conn->ibc_nsends_posted <= kiblnd_concurrent_sends(ver, ni));
- LASSERT(!IBLND_OOB_CAPABLE(ver) ||
- conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
- LASSERT(conn->ibc_reserved_credits >= 0);
-
- while (conn->ibc_reserved_credits > 0 &&
- !list_empty(&conn->ibc_tx_queue_rsrvd)) {
- tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- struct kib_tx, tx_list);
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
- conn->ibc_reserved_credits--;
- }
-
- if (kiblnd_need_noop(conn)) {
- spin_unlock(&conn->ibc_lock);
-
- tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx)
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
-
- spin_lock(&conn->ibc_lock);
- if (tx)
- kiblnd_queue_tx_locked(tx, conn);
- }
-
- for (;;) {
- int credit;
-
- if (!list_empty(&conn->ibc_tx_queue_nocred)) {
- credit = 0;
- tx = list_entry(conn->ibc_tx_queue_nocred.next,
- struct kib_tx, tx_list);
- } else if (!list_empty(&conn->ibc_tx_noops)) {
- LASSERT(!IBLND_OOB_CAPABLE(ver));
- credit = 1;
- tx = list_entry(conn->ibc_tx_noops.next,
- struct kib_tx, tx_list);
- } else if (!list_empty(&conn->ibc_tx_queue)) {
- credit = 1;
- tx = list_entry(conn->ibc_tx_queue.next,
- struct kib_tx, tx_list);
- } else {
- break;
- }
-
- if (kiblnd_post_tx_locked(conn, tx, credit))
- break;
- }
-}
-
-static void
-kiblnd_tx_complete(struct kib_tx *tx, int status)
-{
- int failed = (status != IB_WC_SUCCESS);
- struct kib_conn *conn = tx->tx_conn;
- int idle;
-
- LASSERT(tx->tx_sending > 0);
-
- if (failed) {
- if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
- CNETERR("Tx -> %s cookie %#llx sending %d waiting %d: failed %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
- status);
-
- kiblnd_close_conn(conn, -EIO);
- } else {
- kiblnd_peer_alive(conn->ibc_peer);
- }
-
- spin_lock(&conn->ibc_lock);
-
- /*
- * I could be racing with rdma completion. Whoever makes 'tx' idle
- * gets to free it, which also drops its ref on 'conn'.
- */
- tx->tx_sending--;
- conn->ibc_nsends_posted--;
- if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
- conn->ibc_noops_posted--;
-
- if (failed) {
- tx->tx_waiting = 0; /* don't wait for peer */
- tx->tx_status = -EIO;
- }
-
- idle = !tx->tx_sending && /* This is the final callback */
- !tx->tx_waiting && /* Not waiting for peer */
- !tx->tx_queued; /* Not re-queued (PUT_DONE) */
- if (idle)
- list_del(&tx->tx_list);
-
- kiblnd_check_sends_locked(conn);
- spin_unlock(&conn->ibc_lock);
-
- if (idle)
- kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
-}
-
-static void
-kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, int type,
- int body_nob)
-{
- struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
- struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
- struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
- int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
-
- LASSERT(tx->tx_nwrq >= 0);
- LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
- LASSERT(nob <= IBLND_MSG_SIZE);
-
- kiblnd_init_msg(tx->tx_msg, type, body_nob);
-
- sge->lkey = hdev->ibh_pd->local_dma_lkey;
- sge->addr = tx->tx_msgaddr;
- sge->length = nob;
-
- memset(wrq, 0, sizeof(*wrq));
-
- wrq->wr.next = NULL;
- wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
- wrq->wr.sg_list = sge;
- wrq->wr.num_sge = 1;
- wrq->wr.opcode = IB_WR_SEND;
- wrq->wr.send_flags = IB_SEND_SIGNALED;
-
- tx->tx_nwrq++;
-}
-
-static int
-kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
- int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie)
-{
- struct kib_msg *ibmsg = tx->tx_msg;
- struct kib_rdma_desc *srcrd = tx->tx_rd;
- struct ib_sge *sge = &tx->tx_sge[0];
- struct ib_rdma_wr *wrq, *next;
- int rc = resid;
- int srcidx = 0;
- int dstidx = 0;
- int wrknob;
-
- LASSERT(!in_interrupt());
- LASSERT(!tx->tx_nwrq);
- LASSERT(type == IBLND_MSG_GET_DONE ||
- type == IBLND_MSG_PUT_DONE);
-
- if (kiblnd_rd_size(srcrd) > conn->ibc_max_frags << PAGE_SHIFT) {
- CERROR("RDMA is too large for peer %s (%d), src size: %d dst size: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- conn->ibc_max_frags << PAGE_SHIFT,
- kiblnd_rd_size(srcrd), kiblnd_rd_size(dstrd));
- rc = -EMSGSIZE;
- goto too_big;
- }
-
- while (resid > 0) {
- if (srcidx >= srcrd->rd_nfrags) {
- CERROR("Src buffer exhausted: %d frags\n", srcidx);
- rc = -EPROTO;
- break;
- }
-
- if (dstidx == dstrd->rd_nfrags) {
- CERROR("Dst buffer exhausted: %d frags\n", dstidx);
- rc = -EPROTO;
- break;
- }
-
- if (tx->tx_nwrq >= IBLND_MAX_RDMA_FRAGS) {
- CERROR("RDMA has too many fragments for peer %s (%d), src idx/frags: %d/%d dst idx/frags: %d/%d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- IBLND_MAX_RDMA_FRAGS,
- srcidx, srcrd->rd_nfrags,
- dstidx, dstrd->rd_nfrags);
- rc = -EMSGSIZE;
- break;
- }
-
- wrknob = min3(kiblnd_rd_frag_size(srcrd, srcidx),
- kiblnd_rd_frag_size(dstrd, dstidx),
- (__u32)resid);
-
- sge = &tx->tx_sge[tx->tx_nwrq];
- sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
- sge->lkey = kiblnd_rd_frag_key(srcrd, srcidx);
- sge->length = wrknob;
-
- wrq = &tx->tx_wrq[tx->tx_nwrq];
- next = wrq + 1;
-
- wrq->wr.next = &next->wr;
- wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
- wrq->wr.sg_list = sge;
- wrq->wr.num_sge = 1;
- wrq->wr.opcode = IB_WR_RDMA_WRITE;
- wrq->wr.send_flags = 0;
-
- wrq->remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
- wrq->rkey = kiblnd_rd_frag_key(dstrd, dstidx);
-
- srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
- dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
-
- resid -= wrknob;
-
- tx->tx_nwrq++;
- wrq++;
- sge++;
- }
-too_big:
- if (rc < 0) /* no RDMA if completing with failure */
- tx->tx_nwrq = 0;
-
- ibmsg->ibm_u.completion.ibcm_status = rc;
- ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
- kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
- type, sizeof(struct kib_completion_msg));
-
- return rc;
-}
-
-static void
-kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn)
-{
- struct list_head *q;
-
- LASSERT(tx->tx_nwrq > 0); /* work items set up */
- LASSERT(!tx->tx_queued); /* not queued for sending already */
- LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
-
- tx->tx_queued = 1;
- tx->tx_deadline = jiffies +
- msecs_to_jiffies(*kiblnd_tunables.kib_timeout *
- MSEC_PER_SEC);
-
- if (!tx->tx_conn) {
- kiblnd_conn_addref(conn);
- tx->tx_conn = conn;
- LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
- } else {
- /* PUT_DONE first attached to conn as a PUT_REQ */
- LASSERT(tx->tx_conn == conn);
- LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
- }
-
- switch (tx->tx_msg->ibm_type) {
- default:
- LBUG();
-
- case IBLND_MSG_PUT_REQ:
- case IBLND_MSG_GET_REQ:
- q = &conn->ibc_tx_queue_rsrvd;
- break;
-
- case IBLND_MSG_PUT_NAK:
- case IBLND_MSG_PUT_ACK:
- case IBLND_MSG_PUT_DONE:
- case IBLND_MSG_GET_DONE:
- q = &conn->ibc_tx_queue_nocred;
- break;
-
- case IBLND_MSG_NOOP:
- if (IBLND_OOB_CAPABLE(conn->ibc_version))
- q = &conn->ibc_tx_queue_nocred;
- else
- q = &conn->ibc_tx_noops;
- break;
-
- case IBLND_MSG_IMMEDIATE:
- q = &conn->ibc_tx_queue;
- break;
- }
-
- list_add_tail(&tx->tx_list, q);
-}
-
-static void
-kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
-{
- spin_lock(&conn->ibc_lock);
- kiblnd_queue_tx_locked(tx, conn);
- kiblnd_check_sends_locked(conn);
- spin_unlock(&conn->ibc_lock);
-}
-
-static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
- struct sockaddr_in *srcaddr,
- struct sockaddr_in *dstaddr,
- int timeout_ms)
-{
- unsigned short port;
- int rc;
-
- /* allow the port to be reused */
- rc = rdma_set_reuseaddr(cmid, 1);
- if (rc) {
- CERROR("Unable to set reuse on cmid: %d\n", rc);
- return rc;
- }
-
- /* look for a free privileged port */
- for (port = PROT_SOCK - 1; port > 0; port--) {
- srcaddr->sin_port = htons(port);
- rc = rdma_resolve_addr(cmid,
- (struct sockaddr *)srcaddr,
- (struct sockaddr *)dstaddr,
- timeout_ms);
- if (!rc) {
- CDEBUG(D_NET, "bound to port %hu\n", port);
- return 0;
- } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
- CDEBUG(D_NET, "bind to port %hu failed: %d\n",
- port, rc);
- } else {
- return rc;
- }
- }
-
- CERROR("Failed to bind to a free privileged port\n");
- return rc;
-}
-
-static void
-kiblnd_connect_peer(struct kib_peer *peer)
-{
- struct rdma_cm_id *cmid;
- struct kib_dev *dev;
- struct kib_net *net = peer->ibp_ni->ni_data;
- struct sockaddr_in srcaddr;
- struct sockaddr_in dstaddr;
- int rc;
-
- LASSERT(net);
- LASSERT(peer->ibp_connecting > 0);
- LASSERT(!peer->ibp_reconnecting);
-
- cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP,
- IB_QPT_RC);
-
- if (IS_ERR(cmid)) {
- CERROR("Can't create CMID for %s: %ld\n",
- libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid));
- rc = PTR_ERR(cmid);
- goto failed;
- }
-
- dev = net->ibn_dev;
- memset(&srcaddr, 0, sizeof(srcaddr));
- srcaddr.sin_family = AF_INET;
- srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
-
- memset(&dstaddr, 0, sizeof(dstaddr));
- dstaddr.sin_family = AF_INET;
- dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
- dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid));
-
- kiblnd_peer_addref(peer); /* cmid's ref */
-
- if (*kiblnd_tunables.kib_use_priv_port) {
- rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
- *kiblnd_tunables.kib_timeout * 1000);
- } else {
- rc = rdma_resolve_addr(cmid,
- (struct sockaddr *)&srcaddr,
- (struct sockaddr *)&dstaddr,
- *kiblnd_tunables.kib_timeout * 1000);
- }
- if (rc) {
- /* Can't initiate address resolution: */
- CERROR("Can't resolve addr for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc);
- goto failed2;
- }
-
- LASSERT(cmid->device);
- CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
- libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
- &dev->ibd_ifip, cmid->device->name);
-
- return;
-
- failed2:
- kiblnd_peer_connect_failed(peer, 1, rc);
- kiblnd_peer_decref(peer); /* cmid's ref */
- rdma_destroy_id(cmid);
- return;
- failed:
- kiblnd_peer_connect_failed(peer, 1, rc);
-}
-
-bool
-kiblnd_reconnect_peer(struct kib_peer *peer)
-{
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- char *reason = NULL;
- struct list_head txs;
- unsigned long flags;
-
- INIT_LIST_HEAD(&txs);
-
- write_lock_irqsave(glock, flags);
- if (!peer->ibp_reconnecting) {
- if (peer->ibp_accepting)
- reason = "accepting";
- else if (peer->ibp_connecting)
- reason = "connecting";
- else if (!list_empty(&peer->ibp_conns))
- reason = "connected";
- else /* connected then closed */
- reason = "closed";
-
- goto no_reconnect;
- }
-
- LASSERT(!peer->ibp_accepting && !peer->ibp_connecting &&
- list_empty(&peer->ibp_conns));
- peer->ibp_reconnecting = 0;
-
- if (!kiblnd_peer_active(peer)) {
- list_splice_init(&peer->ibp_tx_queue, &txs);
- reason = "unlinked";
- goto no_reconnect;
- }
-
- peer->ibp_connecting++;
- peer->ibp_reconnected++;
- write_unlock_irqrestore(glock, flags);
-
- kiblnd_connect_peer(peer);
- return true;
-
-no_reconnect:
- write_unlock_irqrestore(glock, flags);
-
- CWARN("Abort reconnection of %s: %s\n",
- libcfs_nid2str(peer->ibp_nid), reason);
- kiblnd_txlist_done(peer->ibp_ni, &txs, -ECONNABORTED);
- return false;
-}
-
-void
-kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid)
-{
- struct kib_peer *peer;
- struct kib_peer *peer2;
- struct kib_conn *conn;
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- unsigned long flags;
- int rc;
-
- /*
- * If I get here, I've committed to send, so I complete the tx with
- * failure on any problems
- */
- LASSERT(!tx || !tx->tx_conn); /* only set when assigned a conn */
- LASSERT(!tx || tx->tx_nwrq > 0); /* work items have been set up */
-
- /*
- * First time, just use a read lock since I expect to find my peer
- * connected
- */
- read_lock_irqsave(g_lock, flags);
-
- peer = kiblnd_find_peer_locked(nid);
- if (peer && !list_empty(&peer->ibp_conns)) {
- /* Found a peer with an established connection */
- conn = kiblnd_get_conn_locked(peer);
- kiblnd_conn_addref(conn); /* 1 ref for me... */
-
- read_unlock_irqrestore(g_lock, flags);
-
- if (tx)
- kiblnd_queue_tx(tx, conn);
- kiblnd_conn_decref(conn); /* ...to here */
- return;
- }
-
- read_unlock(g_lock);
- /* Re-try with a write lock */
- write_lock(g_lock);
-
- peer = kiblnd_find_peer_locked(nid);
- if (peer) {
- if (list_empty(&peer->ibp_conns)) {
- /* found a peer, but it's still connecting... */
- LASSERT(kiblnd_peer_connecting(peer));
- if (tx)
- list_add_tail(&tx->tx_list,
- &peer->ibp_tx_queue);
- write_unlock_irqrestore(g_lock, flags);
- } else {
- conn = kiblnd_get_conn_locked(peer);
- kiblnd_conn_addref(conn); /* 1 ref for me... */
-
- write_unlock_irqrestore(g_lock, flags);
-
- if (tx)
- kiblnd_queue_tx(tx, conn);
- kiblnd_conn_decref(conn); /* ...to here */
- }
- return;
- }
-
- write_unlock_irqrestore(g_lock, flags);
-
- /* Allocate a peer ready to add to the peer table and retry */
- rc = kiblnd_create_peer(ni, &peer, nid);
- if (rc) {
- CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
- if (tx) {
- tx->tx_status = -EHOSTUNREACH;
- tx->tx_waiting = 0;
- kiblnd_tx_done(ni, tx);
- }
- return;
- }
-
- write_lock_irqsave(g_lock, flags);
-
- peer2 = kiblnd_find_peer_locked(nid);
- if (peer2) {
- if (list_empty(&peer2->ibp_conns)) {
- /* found a peer, but it's still connecting... */
- LASSERT(kiblnd_peer_connecting(peer2));
- if (tx)
- list_add_tail(&tx->tx_list,
- &peer2->ibp_tx_queue);
- write_unlock_irqrestore(g_lock, flags);
- } else {
- conn = kiblnd_get_conn_locked(peer2);
- kiblnd_conn_addref(conn); /* 1 ref for me... */
-
- write_unlock_irqrestore(g_lock, flags);
-
- if (tx)
- kiblnd_queue_tx(tx, conn);
- kiblnd_conn_decref(conn); /* ...to here */
- }
-
- kiblnd_peer_decref(peer);
- return;
- }
-
- /* Brand new peer */
- LASSERT(!peer->ibp_connecting);
- peer->ibp_connecting = 1;
-
- /* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT(!((struct kib_net *)ni->ni_data)->ibn_shutdown);
-
- if (tx)
- list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
-
- kiblnd_peer_addref(peer);
- list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
-
- write_unlock_irqrestore(g_lock, flags);
-
- kiblnd_connect_peer(peer);
- kiblnd_peer_decref(peer);
-}
-
-int
-kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
-{
- struct lnet_hdr *hdr = &lntmsg->msg_hdr;
- int type = lntmsg->msg_type;
- struct lnet_process_id target = lntmsg->msg_target;
- int target_is_router = lntmsg->msg_target_is_router;
- int routing = lntmsg->msg_routing;
- unsigned int payload_niov = lntmsg->msg_niov;
- struct kvec *payload_iov = lntmsg->msg_iov;
- struct bio_vec *payload_kiov = lntmsg->msg_kiov;
- unsigned int payload_offset = lntmsg->msg_offset;
- unsigned int payload_nob = lntmsg->msg_len;
- struct iov_iter from;
- struct kib_msg *ibmsg;
- struct kib_rdma_desc *rd;
- struct kib_tx *tx;
- int nob;
- int rc;
-
- /* NB 'private' is different depending on what we're sending.... */
-
- CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
- payload_nob, payload_niov, libcfs_id2str(target));
-
- LASSERT(!payload_nob || payload_niov > 0);
- LASSERT(payload_niov <= LNET_MAX_IOV);
-
- /* Thread context */
- LASSERT(!in_interrupt());
- /* payload is either all vaddrs or all pages */
- LASSERT(!(payload_kiov && payload_iov));
-
- if (payload_kiov)
- iov_iter_bvec(&from, ITER_BVEC | WRITE,
- payload_kiov, payload_niov,
- payload_nob + payload_offset);
- else
- iov_iter_kvec(&from, ITER_KVEC | WRITE,
- payload_iov, payload_niov,
- payload_nob + payload_offset);
-
- iov_iter_advance(&from, payload_offset);
-
- switch (type) {
- default:
- LBUG();
- return -EIO;
-
- case LNET_MSG_ACK:
- LASSERT(!payload_nob);
- break;
-
- case LNET_MSG_GET:
- if (routing || target_is_router)
- break; /* send IMMEDIATE */
-
- /* is the REPLY message too small for RDMA? */
- nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
-
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (!tx) {
- CERROR("Can't allocate txd for GET to %s\n",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
-
- ibmsg = tx->tx_msg;
- rd = &ibmsg->ibm_u.get.ibgm_rd;
- if (!(lntmsg->msg_md->md_options & LNET_MD_KIOV))
- rc = kiblnd_setup_rd_iov(ni, tx, rd,
- lntmsg->msg_md->md_niov,
- lntmsg->msg_md->md_iov.iov,
- 0, lntmsg->msg_md->md_length);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, rd,
- lntmsg->msg_md->md_niov,
- lntmsg->msg_md->md_iov.kiov,
- 0, lntmsg->msg_md->md_length);
- if (rc) {
- CERROR("Can't setup GET sink for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- kiblnd_tx_done(ni, tx);
- return -EIO;
- }
-
- nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
- ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
- ibmsg->ibm_u.get.ibgm_hdr = *hdr;
-
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
-
- tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
- if (!tx->tx_lntmsg[1]) {
- CERROR("Can't create reply for GET -> %s\n",
- libcfs_nid2str(target.nid));
- kiblnd_tx_done(ni, tx);
- return -EIO;
- }
-
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */
- tx->tx_waiting = 1; /* waiting for GET_DONE */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
-
- case LNET_MSG_REPLY:
- case LNET_MSG_PUT:
- /* Is the payload small enough not to need RDMA? */
- nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
-
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (!tx) {
- CERROR("Can't allocate %s txd for %s\n",
- type == LNET_MSG_PUT ? "PUT" : "REPLY",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
-
- if (!payload_kiov)
- rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
- payload_niov, payload_iov,
- payload_offset, payload_nob);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
- payload_niov, payload_kiov,
- payload_offset, payload_nob);
- if (rc) {
- CERROR("Can't setup PUT src for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- kiblnd_tx_done(ni, tx);
- return -EIO;
- }
-
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
- ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(struct kib_putreq_msg));
-
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
- }
-
- /* send IMMEDIATE */
-
- LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
- <= IBLND_MSG_SIZE);
-
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (!tx) {
- CERROR("Can't send %d to %s: tx descs exhausted\n",
- type, libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
-
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
-
- rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob,
- &from);
- if (rc != payload_nob) {
- kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
- return -EFAULT;
- }
-
- nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
-
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
-}
-
-static void
-kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg)
-{
- struct lnet_process_id target = lntmsg->msg_target;
- unsigned int niov = lntmsg->msg_niov;
- struct kvec *iov = lntmsg->msg_iov;
- struct bio_vec *kiov = lntmsg->msg_kiov;
- unsigned int offset = lntmsg->msg_offset;
- unsigned int nob = lntmsg->msg_len;
- struct kib_tx *tx;
- int rc;
-
- tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
- if (!tx) {
- CERROR("Can't get tx for REPLY to %s\n",
- libcfs_nid2str(target.nid));
- goto failed_0;
- }
-
- if (!nob)
- rc = 0;
- else if (!kiov)
- rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
- niov, iov, offset, nob);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
- niov, kiov, offset, nob);
-
- if (rc) {
- CERROR("Can't setup GET src for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- goto failed_1;
- }
-
- rc = kiblnd_init_rdma(rx->rx_conn, tx,
- IBLND_MSG_GET_DONE, nob,
- &rx->rx_msg->ibm_u.get.ibgm_rd,
- rx->rx_msg->ibm_u.get.ibgm_cookie);
- if (rc < 0) {
- CERROR("Can't setup rdma for GET from %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- goto failed_1;
- }
-
- if (!nob) {
- /* No RDMA: local completion may happen now! */
- lnet_finalize(ni, lntmsg, 0);
- } else {
- /* RDMA: lnet_finalize(lntmsg) when it completes */
- tx->tx_lntmsg[0] = lntmsg;
- }
-
- kiblnd_queue_tx(tx, rx->rx_conn);
- return;
-
- failed_1:
- kiblnd_tx_done(ni, tx);
- failed_0:
- lnet_finalize(ni, lntmsg, -EIO);
-}
-
-int
-kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
- int delayed, struct iov_iter *to, unsigned int rlen)
-{
- struct kib_rx *rx = private;
- struct kib_msg *rxmsg = rx->rx_msg;
- struct kib_conn *conn = rx->rx_conn;
- struct kib_tx *tx;
- int nob;
- int post_credit = IBLND_POSTRX_PEER_CREDIT;
- int rc = 0;
-
- LASSERT(iov_iter_count(to) <= rlen);
- LASSERT(!in_interrupt());
- /* Either all pages or all vaddrs */
-
- switch (rxmsg->ibm_type) {
- default:
- LBUG();
-
- case IBLND_MSG_IMMEDIATE:
- nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
- if (nob > rx->rx_nob) {
- CERROR("Immediate message from %s too big: %d(%d)\n",
- libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
- nob, rx->rx_nob);
- rc = -EPROTO;
- break;
- }
-
- rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen,
- to);
- if (rc != rlen) {
- rc = -EFAULT;
- break;
- }
-
- rc = 0;
- lnet_finalize(ni, lntmsg, 0);
- break;
-
- case IBLND_MSG_PUT_REQ: {
- struct kib_msg *txmsg;
- struct kib_rdma_desc *rd;
-
- if (!iov_iter_count(to)) {
- lnet_finalize(ni, lntmsg, 0);
- kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
- rxmsg->ibm_u.putreq.ibprm_cookie);
- break;
- }
-
- tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (!tx) {
- CERROR("Can't allocate tx for %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- /* Not replying will break the connection */
- rc = -ENOMEM;
- break;
- }
-
- txmsg = tx->tx_msg;
- rd = &txmsg->ibm_u.putack.ibpam_rd;
- if (!(to->type & ITER_BVEC))
- rc = kiblnd_setup_rd_iov(ni, tx, rd,
- to->nr_segs, to->kvec,
- to->iov_offset,
- iov_iter_count(to));
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, rd,
- to->nr_segs, to->bvec,
- to->iov_offset,
- iov_iter_count(to));
- if (rc) {
- CERROR("Can't setup PUT sink for %s: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
- kiblnd_tx_done(ni, tx);
- /* tell peer it's over */
- kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
- rxmsg->ibm_u.putreq.ibprm_cookie);
- break;
- }
-
- nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]);
- txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
- txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
-
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
-
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- tx->tx_waiting = 1; /* waiting for PUT_DONE */
- kiblnd_queue_tx(tx, conn);
-
- /* reposted buffer reserved for PUT_DONE */
- post_credit = IBLND_POSTRX_NO_CREDIT;
- break;
- }
-
- case IBLND_MSG_GET_REQ:
- if (lntmsg) {
- /* Optimized GET; RDMA lntmsg's payload */
- kiblnd_reply(ni, rx, lntmsg);
- } else {
- /* GET didn't match anything */
- kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
- -ENODATA,
- rxmsg->ibm_u.get.ibgm_cookie);
- }
- break;
- }
-
- kiblnd_post_rx(rx, post_credit);
- return rc;
-}
-
-int
-kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
- struct task_struct *task = kthread_run(fn, arg, "%s", name);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- atomic_inc(&kiblnd_data.kib_nthreads);
- return 0;
-}
-
-static void
-kiblnd_thread_fini(void)
-{
- atomic_dec(&kiblnd_data.kib_nthreads);
-}
-
-static void
-kiblnd_peer_alive(struct kib_peer *peer)
-{
- /* This is racy, but everyone's only writing cfs_time_current() */
- peer->ibp_last_alive = cfs_time_current();
- mb();
-}
-
-static void
-kiblnd_peer_notify(struct kib_peer *peer)
-{
- int error = 0;
- unsigned long last_alive = 0;
- unsigned long flags;
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- if (kiblnd_peer_idle(peer) && peer->ibp_error) {
- error = peer->ibp_error;
- peer->ibp_error = 0;
-
- last_alive = peer->ibp_last_alive;
- }
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- if (error)
- lnet_notify(peer->ibp_ni,
- peer->ibp_nid, 0, last_alive);
-}
-
-void
-kiblnd_close_conn_locked(struct kib_conn *conn, int error)
-{
- /*
- * This just does the immediate housekeeping. 'error' is zero for a
- * normal shutdown which can happen only after the connection has been
- * established. If the connection is established, schedule the
- * connection to be finished off by the connd. Otherwise the connd is
- * already dealing with it (either to set it up or tear it down).
- * Caller holds kib_global_lock exclusively in irq context
- */
- struct kib_peer *peer = conn->ibc_peer;
- struct kib_dev *dev;
- unsigned long flags;
-
- LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
-
- if (error && !conn->ibc_comms_error)
- conn->ibc_comms_error = error;
-
- if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
- return; /* already being handled */
-
- if (!error &&
- list_empty(&conn->ibc_tx_noops) &&
- list_empty(&conn->ibc_tx_queue) &&
- list_empty(&conn->ibc_tx_queue_rsrvd) &&
- list_empty(&conn->ibc_tx_queue_nocred) &&
- list_empty(&conn->ibc_active_txs)) {
- CDEBUG(D_NET, "closing conn to %s\n",
- libcfs_nid2str(peer->ibp_nid));
- } else {
- CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
- libcfs_nid2str(peer->ibp_nid), error,
- list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
- list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
- list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
- list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
- list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
- }
-
- dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev;
- list_del(&conn->ibc_list);
- /* connd (see below) takes over ibc_list's ref */
-
- if (list_empty(&peer->ibp_conns) && /* no more conns */
- kiblnd_peer_active(peer)) { /* still in peer table */
- kiblnd_unlink_peer_locked(peer);
-
- /* set/clear error on last conn */
- peer->ibp_error = conn->ibc_comms_error;
- }
-
- kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
-
- if (error &&
- kiblnd_dev_can_failover(dev)) {
- list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- wake_up(&kiblnd_data.kib_failover_waitq);
- }
-
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
-
- list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
- wake_up(&kiblnd_data.kib_connd_waitq);
-
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
-}
-
-void
-kiblnd_close_conn(struct kib_conn *conn, int error)
-{
- unsigned long flags;
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- kiblnd_close_conn_locked(conn, error);
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-}
-
-static void
-kiblnd_handle_early_rxs(struct kib_conn *conn)
-{
- unsigned long flags;
- struct kib_rx *rx;
- struct kib_rx *tmp;
-
- LASSERT(!in_interrupt());
- LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- list_for_each_entry_safe(rx, tmp, &conn->ibc_early_rxs, rx_list) {
- list_del(&rx->rx_list);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- kiblnd_handle_rx(rx);
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- }
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-}
-
-static void
-kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
-{
- LIST_HEAD(zombies);
- struct list_head *tmp;
- struct list_head *nxt;
- struct kib_tx *tx;
-
- spin_lock(&conn->ibc_lock);
-
- list_for_each_safe(tmp, nxt, txs) {
- tx = list_entry(tmp, struct kib_tx, tx_list);
-
- if (txs == &conn->ibc_active_txs) {
- LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_waiting || tx->tx_sending);
- } else {
- LASSERT(tx->tx_queued);
- }
-
- tx->tx_status = -ECONNABORTED;
- tx->tx_waiting = 0;
-
- if (!tx->tx_sending) {
- tx->tx_queued = 0;
- list_del(&tx->tx_list);
- list_add(&tx->tx_list, &zombies);
- }
- }
-
- spin_unlock(&conn->ibc_lock);
-
- kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED);
-}
-
-static void
-kiblnd_finalise_conn(struct kib_conn *conn)
-{
- LASSERT(!in_interrupt());
- LASSERT(conn->ibc_state > IBLND_CONN_INIT);
-
- kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
-
- /*
- * abort_receives moves QP state to IB_QPS_ERR. This is only required
- * for connections that didn't get as far as being connected, because
- * rdma_disconnect() does this for free.
- */
- kiblnd_abort_receives(conn);
-
- /*
- * Complete all tx descs not waiting for sends to complete.
- * NB we should be safe from RDMA now that the QP has changed state
- */
- kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
- kiblnd_abort_txs(conn, &conn->ibc_active_txs);
-
- kiblnd_handle_early_rxs(conn);
-}
-
-static void
-kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error)
-{
- LIST_HEAD(zombies);
- unsigned long flags;
-
- LASSERT(error);
- LASSERT(!in_interrupt());
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- if (active) {
- LASSERT(peer->ibp_connecting > 0);
- peer->ibp_connecting--;
- } else {
- LASSERT(peer->ibp_accepting > 0);
- peer->ibp_accepting--;
- }
-
- if (kiblnd_peer_connecting(peer)) {
- /* another connection attempt under way... */
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
- return;
- }
-
- peer->ibp_reconnected = 0;
- if (list_empty(&peer->ibp_conns)) {
- /* Take peer's blocked transmits to complete with error */
- list_add(&zombies, &peer->ibp_tx_queue);
- list_del_init(&peer->ibp_tx_queue);
-
- if (kiblnd_peer_active(peer))
- kiblnd_unlink_peer_locked(peer);
-
- peer->ibp_error = error;
- } else {
- /* Can't have blocked transmits if there are connections */
- LASSERT(list_empty(&peer->ibp_tx_queue));
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- kiblnd_peer_notify(peer);
-
- if (list_empty(&zombies))
- return;
-
- CNETERR("Deleting messages for %s: connection failed\n",
- libcfs_nid2str(peer->ibp_nid));
-
- kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
-}
-
-static void
-kiblnd_connreq_done(struct kib_conn *conn, int status)
-{
- struct kib_peer *peer = conn->ibc_peer;
- struct kib_tx *tx;
- struct kib_tx *tmp;
- struct list_head txs;
- unsigned long flags;
- int active;
-
- active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
-
- CDEBUG(D_NET, "%s: active(%d), version(%x), status(%d)\n",
- libcfs_nid2str(peer->ibp_nid), active,
- conn->ibc_version, status);
-
- LASSERT(!in_interrupt());
- LASSERT((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
- peer->ibp_connecting > 0) ||
- (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
- peer->ibp_accepting > 0));
-
- kfree(conn->ibc_connvars);
- conn->ibc_connvars = NULL;
-
- if (status) {
- /* failed to establish connection */
- kiblnd_peer_connect_failed(peer, active, status);
- kiblnd_finalise_conn(conn);
- return;
- }
-
- /* connection established */
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- conn->ibc_last_send = jiffies;
- kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
- kiblnd_peer_alive(peer);
-
- /*
- * Add conn to peer's list and nuke any dangling conns from a different
- * peer instance...
- */
- kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
- list_add(&conn->ibc_list, &peer->ibp_conns);
- peer->ibp_reconnected = 0;
- if (active)
- peer->ibp_connecting--;
- else
- peer->ibp_accepting--;
-
- if (!peer->ibp_version) {
- peer->ibp_version = conn->ibc_version;
- peer->ibp_incarnation = conn->ibc_incarnation;
- }
-
- if (peer->ibp_version != conn->ibc_version ||
- peer->ibp_incarnation != conn->ibc_incarnation) {
- kiblnd_close_stale_conns_locked(peer, conn->ibc_version,
- conn->ibc_incarnation);
- peer->ibp_version = conn->ibc_version;
- peer->ibp_incarnation = conn->ibc_incarnation;
- }
-
- /* grab pending txs while I have the lock */
- list_add(&txs, &peer->ibp_tx_queue);
- list_del_init(&peer->ibp_tx_queue);
-
- if (!kiblnd_peer_active(peer) || /* peer has been deleted */
- conn->ibc_comms_error) { /* error has happened already */
- struct lnet_ni *ni = peer->ibp_ni;
-
- /* start to shut down connection */
- kiblnd_close_conn_locked(conn, -ECONNABORTED);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
-
- return;
- }
-
- /*
- * +1 ref for myself, this connection is visible to other threads
- * now, refcount of peer:ibp_conns can be released by connection
- * close from either a different thread, or the calling of
- * kiblnd_check_sends_locked() below. See bz21911 for details.
- */
- kiblnd_conn_addref(conn);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- /* Schedule blocked txs */
- spin_lock(&conn->ibc_lock);
- list_for_each_entry_safe(tx, tmp, &txs, tx_list) {
- list_del(&tx->tx_list);
-
- kiblnd_queue_tx_locked(tx, conn);
- }
- kiblnd_check_sends_locked(conn);
- spin_unlock(&conn->ibc_lock);
-
- /* schedule blocked rxs */
- kiblnd_handle_early_rxs(conn);
-
- kiblnd_conn_decref(conn);
-}
-
-static void
-kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
-{
- int rc;
-
- rc = rdma_reject(cmid, rej, sizeof(*rej));
-
- if (rc)
- CWARN("Error %d sending reject\n", rc);
-}
-
-static int
-kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
-{
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- struct kib_msg *reqmsg = priv;
- struct kib_msg *ackmsg;
- struct kib_dev *ibdev;
- struct kib_peer *peer;
- struct kib_peer *peer2;
- struct kib_conn *conn;
- struct lnet_ni *ni = NULL;
- struct kib_net *net = NULL;
- lnet_nid_t nid;
- struct rdma_conn_param cp;
- struct kib_rej rej;
- int version = IBLND_MSG_VERSION;
- unsigned long flags;
- int max_frags;
- int rc;
- struct sockaddr_in *peer_addr;
-
- LASSERT(!in_interrupt());
-
- /* cmid inherits 'context' from the corresponding listener id */
- ibdev = (struct kib_dev *)cmid->context;
- LASSERT(ibdev);
-
- memset(&rej, 0, sizeof(rej));
- rej.ibr_magic = IBLND_MSG_MAGIC;
- rej.ibr_why = IBLND_REJECT_FATAL;
- rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
-
- peer_addr = (struct sockaddr_in *)&cmid->route.addr.dst_addr;
- if (*kiblnd_tunables.kib_require_priv_port &&
- ntohs(peer_addr->sin_port) >= PROT_SOCK) {
- __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
-
- CERROR("Peer's port (%pI4h:%hu) is not privileged\n",
- &ip, ntohs(peer_addr->sin_port));
- goto failed;
- }
-
- if (priv_nob < offsetof(struct kib_msg, ibm_type)) {
- CERROR("Short connection request\n");
- goto failed;
- }
-
- /*
- * Future protocol version compatibility support! If the
- * o2iblnd-specific protocol changes, or when LNET unifies
- * protocols over all LNDs, the initial connection will
- * negotiate a protocol version. I trap this here to avoid
- * console errors; the reject tells the peer which protocol I
- * speak.
- */
- if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
- reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
- goto failed;
- if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
- reqmsg->ibm_version != IBLND_MSG_VERSION &&
- reqmsg->ibm_version != IBLND_MSG_VERSION_1)
- goto failed;
- if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
- reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
- reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
- goto failed;
-
- rc = kiblnd_unpack_msg(reqmsg, priv_nob);
- if (rc) {
- CERROR("Can't parse connection request: %d\n", rc);
- goto failed;
- }
-
- nid = reqmsg->ibm_srcnid;
- ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
-
- if (ni) {
- net = (struct kib_net *)ni->ni_data;
- rej.ibr_incarnation = net->ibn_incarnation;
- }
-
- if (!ni || /* no matching net */
- ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
- net->ibn_dev != ibdev) { /* wrong device */
- CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
- libcfs_nid2str(nid),
- !ni ? "NA" : libcfs_nid2str(ni->ni_nid),
- ibdev->ibd_ifname, ibdev->ibd_nnets,
- &ibdev->ibd_ifip,
- libcfs_nid2str(reqmsg->ibm_dstnid));
-
- goto failed;
- }
-
- /* check time stamp as soon as possible */
- if (reqmsg->ibm_dststamp &&
- reqmsg->ibm_dststamp != net->ibn_incarnation) {
- CWARN("Stale connection request\n");
- rej.ibr_why = IBLND_REJECT_CONN_STALE;
- goto failed;
- }
-
- /* I can accept peer's version */
- version = reqmsg->ibm_version;
-
- if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
- CERROR("Unexpected connreq msg type: %x from %s\n",
- reqmsg->ibm_type, libcfs_nid2str(nid));
- goto failed;
- }
-
- if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
- kiblnd_msg_queue_size(version, ni)) {
- CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n",
- libcfs_nid2str(nid),
- reqmsg->ibm_u.connparams.ibcp_queue_depth,
- kiblnd_msg_queue_size(version, ni));
-
- if (version == IBLND_MSG_VERSION)
- rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
-
- goto failed;
- }
-
- max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT;
- if (max_frags > kiblnd_rdma_frags(version, ni)) {
- CWARN("Can't accept conn from %s (version %x): max message size %d is too large (%d wanted)\n",
- libcfs_nid2str(nid), version, max_frags,
- kiblnd_rdma_frags(version, ni));
-
- if (version >= IBLND_MSG_VERSION)
- rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
-
- goto failed;
- } else if (max_frags < kiblnd_rdma_frags(version, ni) &&
- !net->ibn_fmr_ps) {
- CWARN("Can't accept conn from %s (version %x): max message size %d incompatible without FMR pool (%d wanted)\n",
- libcfs_nid2str(nid), version, max_frags,
- kiblnd_rdma_frags(version, ni));
-
- if (version == IBLND_MSG_VERSION)
- rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
-
- goto failed;
- }
-
- if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
- CERROR("Can't accept %s: message size %d too big (%d max)\n",
- libcfs_nid2str(nid),
- reqmsg->ibm_u.connparams.ibcp_max_msg_size,
- IBLND_MSG_SIZE);
- goto failed;
- }
-
- /* assume 'nid' is a new peer; create */
- rc = kiblnd_create_peer(ni, &peer, nid);
- if (rc) {
- CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
- rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
- goto failed;
- }
-
- /* We have validated the peer's parameters so use those */
- peer->ibp_max_frags = max_frags;
- peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
-
- write_lock_irqsave(g_lock, flags);
-
- peer2 = kiblnd_find_peer_locked(nid);
- if (peer2) {
- if (!peer2->ibp_version) {
- peer2->ibp_version = version;
- peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
- }
-
- /* not the guy I've talked with */
- if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
- peer2->ibp_version != version) {
- kiblnd_close_peer_conns_locked(peer2, -ESTALE);
-
- if (kiblnd_peer_active(peer2)) {
- peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
- peer2->ibp_version = version;
- }
- write_unlock_irqrestore(g_lock, flags);
-
- CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
- libcfs_nid2str(nid), peer2->ibp_version, version,
- peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
-
- kiblnd_peer_decref(peer);
- rej.ibr_why = IBLND_REJECT_CONN_STALE;
- goto failed;
- }
-
- /*
- * Tie-break connection race in favour of the higher NID.
- * If we keep running into a race condition multiple times,
- * we have to assume that the connection attempt with the
- * higher NID is stuck in a connecting state and will never
- * recover. As such, we pass through this if-block and let
- * the lower NID connection win so we can move forward.
- */
- if (peer2->ibp_connecting &&
- nid < ni->ni_nid && peer2->ibp_races <
- MAX_CONN_RACES_BEFORE_ABORT) {
- peer2->ibp_races++;
- write_unlock_irqrestore(g_lock, flags);
-
- CDEBUG(D_NET, "Conn race %s\n",
- libcfs_nid2str(peer2->ibp_nid));
-
- kiblnd_peer_decref(peer);
- rej.ibr_why = IBLND_REJECT_CONN_RACE;
- goto failed;
- }
- if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT)
- CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n",
- libcfs_nid2str(peer2->ibp_nid),
- MAX_CONN_RACES_BEFORE_ABORT);
- /**
- * passive connection is allowed even this peer is waiting for
- * reconnection.
- */
- peer2->ibp_reconnecting = 0;
- peer2->ibp_races = 0;
- peer2->ibp_accepting++;
- kiblnd_peer_addref(peer2);
-
- /**
- * Race with kiblnd_launch_tx (active connect) to create peer
- * so copy validated parameters since we now know what the
- * peer's limits are
- */
- peer2->ibp_max_frags = peer->ibp_max_frags;
- peer2->ibp_queue_depth = peer->ibp_queue_depth;
-
- write_unlock_irqrestore(g_lock, flags);
- kiblnd_peer_decref(peer);
- peer = peer2;
- } else {
- /* Brand new peer */
- LASSERT(!peer->ibp_accepting);
- LASSERT(!peer->ibp_version &&
- !peer->ibp_incarnation);
-
- peer->ibp_accepting = 1;
- peer->ibp_version = version;
- peer->ibp_incarnation = reqmsg->ibm_srcstamp;
-
- /* I have a ref on ni that prevents it being shutdown */
- LASSERT(!net->ibn_shutdown);
-
- kiblnd_peer_addref(peer);
- list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
-
- write_unlock_irqrestore(g_lock, flags);
- }
-
- conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT,
- version);
- if (!conn) {
- kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
- kiblnd_peer_decref(peer);
- rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
- goto failed;
- }
-
- /*
- * conn now "owns" cmid, so I return success from here on to ensure the
- * CM callback doesn't destroy cmid.
- */
- conn->ibc_incarnation = reqmsg->ibm_srcstamp;
- conn->ibc_credits = conn->ibc_queue_depth;
- conn->ibc_reserved_credits = conn->ibc_queue_depth;
- LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
- IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
-
- ackmsg = &conn->ibc_connvars->cv_msg;
- memset(ackmsg, 0, sizeof(*ackmsg));
-
- kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
- sizeof(ackmsg->ibm_u.connparams));
- ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
- ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags << IBLND_FRAG_SHIFT;
- ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
-
- kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
-
- memset(&cp, 0, sizeof(cp));
- cp.private_data = ackmsg;
- cp.private_data_len = ackmsg->ibm_nob;
- cp.responder_resources = 0; /* No atomic ops or RDMA reads */
- cp.initiator_depth = 0;
- cp.flow_control = 1;
- cp.retry_count = *kiblnd_tunables.kib_retry_count;
- cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
-
- CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
-
- rc = rdma_accept(cmid, &cp);
- if (rc) {
- CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
- rej.ibr_version = version;
- rej.ibr_why = IBLND_REJECT_FATAL;
-
- kiblnd_reject(cmid, &rej);
- kiblnd_connreq_done(conn, rc);
- kiblnd_conn_decref(conn);
- }
-
- lnet_ni_decref(ni);
- return 0;
-
- failed:
- if (ni) {
- rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
- rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
- lnet_ni_decref(ni);
- }
-
- rej.ibr_version = version;
- kiblnd_reject(cmid, &rej);
-
- return -ECONNREFUSED;
-}
-
-static void
-kiblnd_check_reconnect(struct kib_conn *conn, int version,
- __u64 incarnation, int why, struct kib_connparams *cp)
-{
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- struct kib_peer *peer = conn->ibc_peer;
- char *reason;
- int msg_size = IBLND_MSG_SIZE;
- int frag_num = -1;
- int queue_dep = -1;
- bool reconnect;
- unsigned long flags;
-
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
- LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */
- LASSERT(!peer->ibp_reconnecting);
-
- if (cp) {
- msg_size = cp->ibcp_max_msg_size;
- frag_num = cp->ibcp_max_frags << IBLND_FRAG_SHIFT;
- queue_dep = cp->ibcp_queue_depth;
- }
-
- write_lock_irqsave(glock, flags);
- /**
- * retry connection if it's still needed and no other connection
- * attempts (active or passive) are in progress
- * NB: reconnect is still needed even when ibp_tx_queue is
- * empty if ibp_version != version because reconnect may be
- * initiated by kiblnd_query()
- */
- reconnect = (!list_empty(&peer->ibp_tx_queue) ||
- peer->ibp_version != version) &&
- peer->ibp_connecting == 1 &&
- !peer->ibp_accepting;
- if (!reconnect) {
- reason = "no need";
- goto out;
- }
-
- switch (why) {
- default:
- reason = "Unknown";
- break;
-
- case IBLND_REJECT_RDMA_FRAGS: {
- struct lnet_ioctl_config_lnd_tunables *tunables;
-
- if (!cp) {
- reason = "can't negotiate max frags";
- goto out;
- }
- tunables = peer->ibp_ni->ni_lnd_tunables;
- if (!tunables->lt_tun_u.lt_o2ib.lnd_map_on_demand) {
- reason = "map_on_demand must be enabled";
- goto out;
- }
- if (conn->ibc_max_frags <= frag_num) {
- reason = "unsupported max frags";
- goto out;
- }
-
- peer->ibp_max_frags = frag_num;
- reason = "rdma fragments";
- break;
- }
- case IBLND_REJECT_MSG_QUEUE_SIZE:
- if (!cp) {
- reason = "can't negotiate queue depth";
- goto out;
- }
- if (conn->ibc_queue_depth <= queue_dep) {
- reason = "unsupported queue depth";
- goto out;
- }
-
- peer->ibp_queue_depth = queue_dep;
- reason = "queue depth";
- break;
-
- case IBLND_REJECT_CONN_STALE:
- reason = "stale";
- break;
-
- case IBLND_REJECT_CONN_RACE:
- reason = "conn race";
- break;
-
- case IBLND_REJECT_CONN_UNCOMPAT:
- reason = "version negotiation";
- break;
- }
-
- conn->ibc_reconnect = 1;
- peer->ibp_reconnecting = 1;
- peer->ibp_version = version;
- if (incarnation)
- peer->ibp_incarnation = incarnation;
-out:
- write_unlock_irqrestore(glock, flags);
-
- CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
- libcfs_nid2str(peer->ibp_nid),
- reconnect ? "reconnect" : "don't reconnect",
- reason, IBLND_MSG_VERSION, version, msg_size,
- conn->ibc_queue_depth, queue_dep,
- conn->ibc_max_frags, frag_num);
- /**
- * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer
- * while destroying the zombie
- */
-}
-
-static void
-kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
-{
- struct kib_peer *peer = conn->ibc_peer;
-
- LASSERT(!in_interrupt());
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
-
- switch (reason) {
- case IB_CM_REJ_STALE_CONN:
- kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_CONN_STALE, NULL);
- break;
-
- case IB_CM_REJ_INVALID_SERVICE_ID:
- CNETERR("%s rejected: no listener at %d\n",
- libcfs_nid2str(peer->ibp_nid),
- *kiblnd_tunables.kib_service);
- break;
-
- case IB_CM_REJ_CONSUMER_DEFINED:
- if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
- struct kib_rej *rej = priv;
- struct kib_connparams *cp = NULL;
- int flip = 0;
- __u64 incarnation = -1;
-
- /* NB. default incarnation is -1 because:
- * a) V1 will ignore dst incarnation in connreq.
- * b) V2 will provide incarnation while rejecting me,
- * -1 will be overwrote.
- *
- * if I try to connect to a V1 peer with V2 protocol,
- * it rejected me then upgrade to V2, I have no idea
- * about the upgrading and try to reconnect with V1,
- * in this case upgraded V2 can find out I'm trying to
- * talk to the old guy and reject me(incarnation is -1).
- */
-
- if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
- rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
- __swab32s(&rej->ibr_magic);
- __swab16s(&rej->ibr_version);
- flip = 1;
- }
-
- if (priv_nob >= sizeof(struct kib_rej) &&
- rej->ibr_version > IBLND_MSG_VERSION_1) {
- /*
- * priv_nob is always 148 in current version
- * of OFED, so we still need to check version.
- * (define of IB_CM_REJ_PRIVATE_DATA_SIZE)
- */
- cp = &rej->ibr_cp;
-
- if (flip) {
- __swab64s(&rej->ibr_incarnation);
- __swab16s(&cp->ibcp_queue_depth);
- __swab16s(&cp->ibcp_max_frags);
- __swab32s(&cp->ibcp_max_msg_size);
- }
-
- incarnation = rej->ibr_incarnation;
- }
-
- if (rej->ibr_magic != IBLND_MSG_MAGIC &&
- rej->ibr_magic != LNET_PROTO_MAGIC) {
- CERROR("%s rejected: consumer defined fatal error\n",
- libcfs_nid2str(peer->ibp_nid));
- break;
- }
-
- if (rej->ibr_version != IBLND_MSG_VERSION &&
- rej->ibr_version != IBLND_MSG_VERSION_1) {
- CERROR("%s rejected: o2iblnd version %x error\n",
- libcfs_nid2str(peer->ibp_nid),
- rej->ibr_version);
- break;
- }
-
- if (rej->ibr_why == IBLND_REJECT_FATAL &&
- rej->ibr_version == IBLND_MSG_VERSION_1) {
- CDEBUG(D_NET, "rejected by old version peer %s: %x\n",
- libcfs_nid2str(peer->ibp_nid), rej->ibr_version);
-
- if (conn->ibc_version != IBLND_MSG_VERSION_1)
- rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
- }
-
- switch (rej->ibr_why) {
- case IBLND_REJECT_CONN_RACE:
- case IBLND_REJECT_CONN_STALE:
- case IBLND_REJECT_CONN_UNCOMPAT:
- case IBLND_REJECT_MSG_QUEUE_SIZE:
- case IBLND_REJECT_RDMA_FRAGS:
- kiblnd_check_reconnect(conn, rej->ibr_version,
- incarnation,
- rej->ibr_why, cp);
- break;
-
- case IBLND_REJECT_NO_RESOURCES:
- CERROR("%s rejected: o2iblnd no resources\n",
- libcfs_nid2str(peer->ibp_nid));
- break;
-
- case IBLND_REJECT_FATAL:
- CERROR("%s rejected: o2iblnd fatal error\n",
- libcfs_nid2str(peer->ibp_nid));
- break;
-
- default:
- CERROR("%s rejected: o2iblnd reason %d\n",
- libcfs_nid2str(peer->ibp_nid),
- rej->ibr_why);
- break;
- }
- break;
- }
- /* fall through */
- default:
- CNETERR("%s rejected: reason %d, size %d\n",
- libcfs_nid2str(peer->ibp_nid), reason, priv_nob);
- break;
- }
-
- kiblnd_connreq_done(conn, -ECONNREFUSED);
-}
-
-static void
-kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
-{
- struct kib_peer *peer = conn->ibc_peer;
- struct lnet_ni *ni = peer->ibp_ni;
- struct kib_net *net = ni->ni_data;
- struct kib_msg *msg = priv;
- int ver = conn->ibc_version;
- int rc = kiblnd_unpack_msg(msg, priv_nob);
- unsigned long flags;
-
- LASSERT(net);
-
- if (rc) {
- CERROR("Can't unpack connack from %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc);
- goto failed;
- }
-
- if (msg->ibm_type != IBLND_MSG_CONNACK) {
- CERROR("Unexpected message %d from %s\n",
- msg->ibm_type, libcfs_nid2str(peer->ibp_nid));
- rc = -EPROTO;
- goto failed;
- }
-
- if (ver != msg->ibm_version) {
- CERROR("%s replied version %x is different with requested version %x\n",
- libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
- rc = -EPROTO;
- goto failed;
- }
-
- if (msg->ibm_u.connparams.ibcp_queue_depth >
- conn->ibc_queue_depth) {
- CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
- libcfs_nid2str(peer->ibp_nid),
- msg->ibm_u.connparams.ibcp_queue_depth,
- conn->ibc_queue_depth);
- rc = -EPROTO;
- goto failed;
- }
-
- if ((msg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT) >
- conn->ibc_max_frags) {
- CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
- libcfs_nid2str(peer->ibp_nid),
- msg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT,
- conn->ibc_max_frags);
- rc = -EPROTO;
- goto failed;
- }
-
- if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
- CERROR("%s max message size %d too big (%d max)\n",
- libcfs_nid2str(peer->ibp_nid),
- msg->ibm_u.connparams.ibcp_max_msg_size,
- IBLND_MSG_SIZE);
- rc = -EPROTO;
- goto failed;
- }
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (msg->ibm_dstnid == ni->ni_nid &&
- msg->ibm_dststamp == net->ibn_incarnation)
- rc = 0;
- else
- rc = -ESTALE;
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- if (rc) {
- CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc,
- msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
- goto failed;
- }
-
- conn->ibc_incarnation = msg->ibm_srcstamp;
- conn->ibc_credits = msg->ibm_u.connparams.ibcp_queue_depth;
- conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
- conn->ibc_queue_depth = msg->ibm_u.connparams.ibcp_queue_depth;
- conn->ibc_max_frags = msg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT;
- LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
- IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
-
- kiblnd_connreq_done(conn, 0);
- return;
-
- failed:
- /*
- * NB My QP has already established itself, so I handle anything going
- * wrong here by setting ibc_comms_error.
- * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
- * immediately tears it down.
- */
- LASSERT(rc);
- conn->ibc_comms_error = rc;
- kiblnd_connreq_done(conn, 0);
-}
-
-static int
-kiblnd_active_connect(struct rdma_cm_id *cmid)
-{
- struct kib_peer *peer = (struct kib_peer *)cmid->context;
- struct kib_conn *conn;
- struct kib_msg *msg;
- struct rdma_conn_param cp;
- int version;
- __u64 incarnation;
- unsigned long flags;
- int rc;
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- incarnation = peer->ibp_incarnation;
- version = !peer->ibp_version ? IBLND_MSG_VERSION :
- peer->ibp_version;
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT,
- version);
- if (!conn) {
- kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
- kiblnd_peer_decref(peer); /* lose cmid's ref */
- return -ENOMEM;
- }
-
- /*
- * conn "owns" cmid now, so I return success from here on to ensure the
- * CM callback doesn't destroy cmid. conn also takes over cmid's ref
- * on peer
- */
- msg = &conn->ibc_connvars->cv_msg;
-
- memset(msg, 0, sizeof(*msg));
- kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
- msg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
- msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags << IBLND_FRAG_SHIFT;
- msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
-
- kiblnd_pack_msg(peer->ibp_ni, msg, version,
- 0, peer->ibp_nid, incarnation);
-
- memset(&cp, 0, sizeof(cp));
- cp.private_data = msg;
- cp.private_data_len = msg->ibm_nob;
- cp.responder_resources = 0; /* No atomic ops or RDMA reads */
- cp.initiator_depth = 0;
- cp.flow_control = 1;
- cp.retry_count = *kiblnd_tunables.kib_retry_count;
- cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
-
- LASSERT(cmid->context == (void *)conn);
- LASSERT(conn->ibc_cmid == cmid);
-
- rc = rdma_connect(cmid, &cp);
- if (rc) {
- CERROR("Can't connect to %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc);
- kiblnd_connreq_done(conn, rc);
- kiblnd_conn_decref(conn);
- }
-
- return 0;
-}
-
-int
-kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
-{
- struct kib_peer *peer;
- struct kib_conn *conn;
- int rc;
-
- switch (event->event) {
- default:
- CERROR("Unexpected event: %d, status: %d\n",
- event->event, event->status);
- LBUG();
-
- case RDMA_CM_EVENT_CONNECT_REQUEST:
- /* destroy cmid on failure */
- rc = kiblnd_passive_connect(cmid,
- (void *)KIBLND_CONN_PARAM(event),
- KIBLND_CONN_PARAM_LEN(event));
- CDEBUG(D_NET, "connreq: %d\n", rc);
- return rc;
-
- case RDMA_CM_EVENT_ADDR_ERROR:
- peer = (struct kib_peer *)cmid->context;
- CNETERR("%s: ADDR ERROR %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
- kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
- kiblnd_peer_decref(peer);
- return -EHOSTUNREACH; /* rc destroys cmid */
-
- case RDMA_CM_EVENT_ADDR_RESOLVED:
- peer = (struct kib_peer *)cmid->context;
-
- CDEBUG(D_NET, "%s Addr resolved: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
-
- if (event->status) {
- CNETERR("Can't resolve address for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
- rc = event->status;
- } else {
- rc = rdma_resolve_route(
- cmid, *kiblnd_tunables.kib_timeout * 1000);
- if (!rc)
- return 0;
- /* Can't initiate route resolution */
- CERROR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc);
- }
- kiblnd_peer_connect_failed(peer, 1, rc);
- kiblnd_peer_decref(peer);
- return rc; /* rc destroys cmid */
-
- case RDMA_CM_EVENT_ROUTE_ERROR:
- peer = (struct kib_peer *)cmid->context;
- CNETERR("%s: ROUTE ERROR %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
- kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
- kiblnd_peer_decref(peer);
- return -EHOSTUNREACH; /* rc destroys cmid */
-
- case RDMA_CM_EVENT_ROUTE_RESOLVED:
- peer = (struct kib_peer *)cmid->context;
- CDEBUG(D_NET, "%s Route resolved: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
-
- if (!event->status)
- return kiblnd_active_connect(cmid);
-
- CNETERR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
- kiblnd_peer_connect_failed(peer, 1, event->status);
- kiblnd_peer_decref(peer);
- return event->status; /* rc destroys cmid */
-
- case RDMA_CM_EVENT_UNREACHABLE:
- conn = (struct kib_conn *)cmid->context;
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
- conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
- CNETERR("%s: UNREACHABLE %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
- kiblnd_connreq_done(conn, -ENETDOWN);
- kiblnd_conn_decref(conn);
- return 0;
-
- case RDMA_CM_EVENT_CONNECT_ERROR:
- conn = (struct kib_conn *)cmid->context;
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
- conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
- CNETERR("%s: CONNECT ERROR %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
- kiblnd_connreq_done(conn, -ENOTCONN);
- kiblnd_conn_decref(conn);
- return 0;
-
- case RDMA_CM_EVENT_REJECTED:
- conn = (struct kib_conn *)cmid->context;
- switch (conn->ibc_state) {
- default:
- LBUG();
-
- case IBLND_CONN_PASSIVE_WAIT:
- CERROR("%s: REJECTED %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- event->status);
- kiblnd_connreq_done(conn, -ECONNRESET);
- break;
-
- case IBLND_CONN_ACTIVE_CONNECT:
- kiblnd_rejected(conn, event->status,
- (void *)KIBLND_CONN_PARAM(event),
- KIBLND_CONN_PARAM_LEN(event));
- break;
- }
- kiblnd_conn_decref(conn);
- return 0;
-
- case RDMA_CM_EVENT_ESTABLISHED:
- conn = (struct kib_conn *)cmid->context;
- switch (conn->ibc_state) {
- default:
- LBUG();
-
- case IBLND_CONN_PASSIVE_WAIT:
- CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- kiblnd_connreq_done(conn, 0);
- break;
-
- case IBLND_CONN_ACTIVE_CONNECT:
- CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- kiblnd_check_connreply(conn,
- (void *)KIBLND_CONN_PARAM(event),
- KIBLND_CONN_PARAM_LEN(event));
- break;
- }
- /* net keeps its ref on conn! */
- return 0;
-
- case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
- return 0;
- case RDMA_CM_EVENT_DISCONNECTED:
- conn = (struct kib_conn *)cmid->context;
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- CERROR("%s DISCONNECTED\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- kiblnd_connreq_done(conn, -ECONNRESET);
- } else {
- kiblnd_close_conn(conn, 0);
- }
- kiblnd_conn_decref(conn);
- cmid->context = NULL;
- return 0;
-
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
- LCONSOLE_ERROR_MSG(0x131,
- "Received notification of device removal\n"
- "Please shutdown LNET to allow this to proceed\n");
- /*
- * Can't remove network from underneath LNET for now, so I have
- * to ignore this
- */
- return 0;
-
- case RDMA_CM_EVENT_ADDR_CHANGE:
- LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
- return 0;
- }
-}
-
-static int
-kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
-{
- struct kib_tx *tx;
- struct list_head *ttmp;
-
- list_for_each(ttmp, txs) {
- tx = list_entry(ttmp, struct kib_tx, tx_list);
-
- if (txs != &conn->ibc_active_txs) {
- LASSERT(tx->tx_queued);
- } else {
- LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_waiting || tx->tx_sending);
- }
-
- if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
- CERROR("Timed out tx: %s, %lu seconds\n",
- kiblnd_queue2str(conn, txs),
- cfs_duration_sec(jiffies - tx->tx_deadline));
- return 1;
- }
- }
-
- return 0;
-}
-
-static int
-kiblnd_conn_timed_out_locked(struct kib_conn *conn)
-{
- return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
- kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
- kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
- kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
- kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
-}
-
-static void
-kiblnd_check_conns(int idx)
-{
- LIST_HEAD(closes);
- LIST_HEAD(checksends);
- struct list_head *peers = &kiblnd_data.kib_peers[idx];
- struct list_head *ptmp;
- struct kib_peer *peer;
- struct kib_conn *conn;
- struct kib_conn *temp;
- struct kib_conn *tmp;
- struct list_head *ctmp;
- unsigned long flags;
-
- /*
- * NB. We expect to have a look at all the peers and not find any
- * RDMAs to time out, so we just use a shared lock while we
- * take a look...
- */
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- list_for_each(ptmp, peers) {
- peer = list_entry(ptmp, struct kib_peer, ibp_list);
-
- list_for_each(ctmp, &peer->ibp_conns) {
- int timedout;
- int sendnoop;
-
- conn = list_entry(ctmp, struct kib_conn, ibc_list);
-
- LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
-
- spin_lock(&conn->ibc_lock);
-
- sendnoop = kiblnd_need_noop(conn);
- timedout = kiblnd_conn_timed_out_locked(conn);
- if (!sendnoop && !timedout) {
- spin_unlock(&conn->ibc_lock);
- continue;
- }
-
- if (timedout) {
- CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n",
- libcfs_nid2str(peer->ibp_nid),
- cfs_duration_sec(cfs_time_current() -
- peer->ibp_last_alive),
- conn->ibc_credits,
- conn->ibc_outstanding_credits,
- conn->ibc_reserved_credits);
- list_add(&conn->ibc_connd_list, &closes);
- } else {
- list_add(&conn->ibc_connd_list, &checksends);
- }
- /* +ref for 'closes' or 'checksends' */
- kiblnd_conn_addref(conn);
-
- spin_unlock(&conn->ibc_lock);
- }
- }
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- /*
- * Handle timeout by closing the whole
- * connection. We can only be sure RDMA activity
- * has ceased once the QP has been modified.
- */
- list_for_each_entry_safe(conn, tmp, &closes, ibc_connd_list) {
- list_del(&conn->ibc_connd_list);
- kiblnd_close_conn(conn, -ETIMEDOUT);
- kiblnd_conn_decref(conn);
- }
-
- /*
- * In case we have enough credits to return via a
- * NOOP, but there were no non-blocking tx descs
- * free to do it last time...
- */
- list_for_each_entry_safe(conn, temp, &checksends, ibc_connd_list) {
- list_del(&conn->ibc_connd_list);
-
- spin_lock(&conn->ibc_lock);
- kiblnd_check_sends_locked(conn);
- spin_unlock(&conn->ibc_lock);
-
- kiblnd_conn_decref(conn);
- }
-}
-
-static void
-kiblnd_disconnect_conn(struct kib_conn *conn)
-{
- LASSERT(!in_interrupt());
- LASSERT(current == kiblnd_data.kib_connd);
- LASSERT(conn->ibc_state == IBLND_CONN_CLOSING);
-
- rdma_disconnect(conn->ibc_cmid);
- kiblnd_finalise_conn(conn);
-
- kiblnd_peer_notify(conn->ibc_peer);
-}
-
-/**
- * High-water for reconnection to the same peer, reconnection attempt should
- * be delayed after trying more than KIB_RECONN_HIGH_RACE.
- */
-#define KIB_RECONN_HIGH_RACE 10
-/**
- * Allow connd to take a break and handle other things after consecutive
- * reconnection attempts.
- */
-#define KIB_RECONN_BREAK 100
-
-int
-kiblnd_connd(void *arg)
-{
- spinlock_t *lock = &kiblnd_data.kib_connd_lock;
- wait_queue_entry_t wait;
- unsigned long flags;
- struct kib_conn *conn;
- int timeout;
- int i;
- int dropped_lock;
- int peer_index = 0;
- unsigned long deadline = jiffies;
-
- init_waitqueue_entry(&wait, current);
- kiblnd_data.kib_connd = current;
-
- spin_lock_irqsave(lock, flags);
-
- while (!kiblnd_data.kib_shutdown) {
- int reconn = 0;
-
- dropped_lock = 0;
-
- if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
- struct kib_peer *peer = NULL;
-
- conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- struct kib_conn, ibc_list);
- list_del(&conn->ibc_list);
- if (conn->ibc_reconnect) {
- peer = conn->ibc_peer;
- kiblnd_peer_addref(peer);
- }
-
- spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
-
- kiblnd_destroy_conn(conn);
-
- spin_lock_irqsave(lock, flags);
- if (!peer) {
- kfree(conn);
- continue;
- }
-
- conn->ibc_peer = peer;
- if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE)
- list_add_tail(&conn->ibc_list,
- &kiblnd_data.kib_reconn_list);
- else
- list_add_tail(&conn->ibc_list,
- &kiblnd_data.kib_reconn_wait);
- }
-
- if (!list_empty(&kiblnd_data.kib_connd_conns)) {
- conn = list_entry(kiblnd_data.kib_connd_conns.next,
- struct kib_conn, ibc_list);
- list_del(&conn->ibc_list);
-
- spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
-
- kiblnd_disconnect_conn(conn);
- kiblnd_conn_decref(conn);
-
- spin_lock_irqsave(lock, flags);
- }
-
- while (reconn < KIB_RECONN_BREAK) {
- if (kiblnd_data.kib_reconn_sec !=
- ktime_get_real_seconds()) {
- kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
- list_splice_init(&kiblnd_data.kib_reconn_wait,
- &kiblnd_data.kib_reconn_list);
- }
-
- if (list_empty(&kiblnd_data.kib_reconn_list))
- break;
-
- conn = list_entry(kiblnd_data.kib_reconn_list.next,
- struct kib_conn, ibc_list);
- list_del(&conn->ibc_list);
-
- spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
-
- reconn += kiblnd_reconnect_peer(conn->ibc_peer);
- kiblnd_peer_decref(conn->ibc_peer);
- kfree(conn);
-
- spin_lock_irqsave(lock, flags);
- }
-
- /* careful with the jiffy wrap... */
- timeout = (int)(deadline - jiffies);
- if (timeout <= 0) {
- const int n = 4;
- const int p = 1;
- int chunk = kiblnd_data.kib_peer_hash_size;
-
- spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
-
- /*
- * Time to check for RDMA timeouts on a few more
- * peers: I do checks every 'p' seconds on a
- * proportion of the peer table and I need to check
- * every connection 'n' times within a timeout
- * interval, to ensure I detect a timeout on any
- * connection within (n+1)/n times the timeout
- * interval.
- */
- if (*kiblnd_tunables.kib_timeout > n * p)
- chunk = (chunk * n * p) /
- *kiblnd_tunables.kib_timeout;
- if (!chunk)
- chunk = 1;
-
- for (i = 0; i < chunk; i++) {
- kiblnd_check_conns(peer_index);
- peer_index = (peer_index + 1) %
- kiblnd_data.kib_peer_hash_size;
- }
-
- deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
- spin_lock_irqsave(lock, flags);
- }
-
- if (dropped_lock)
- continue;
-
- /* Nothing to do for 'timeout' */
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
- spin_unlock_irqrestore(lock, flags);
-
- schedule_timeout(timeout);
-
- remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
- spin_lock_irqsave(lock, flags);
- }
-
- spin_unlock_irqrestore(lock, flags);
-
- kiblnd_thread_fini();
- return 0;
-}
-
-void
-kiblnd_qp_event(struct ib_event *event, void *arg)
-{
- struct kib_conn *conn = arg;
-
- switch (event->event) {
- case IB_EVENT_COMM_EST:
- CDEBUG(D_NET, "%s established\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- /*
- * We received a packet but connection isn't established
- * probably handshake packet was lost, so free to
- * force make connection established
- */
- rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
- return;
-
- default:
- CERROR("%s: Async QP event type %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
- return;
- }
-}
-
-static void
-kiblnd_complete(struct ib_wc *wc)
-{
- switch (kiblnd_wreqid2type(wc->wr_id)) {
- default:
- LBUG();
-
- case IBLND_WID_MR:
- if (wc->status != IB_WC_SUCCESS &&
- wc->status != IB_WC_WR_FLUSH_ERR)
- CNETERR("FastReg failed: %d\n", wc->status);
- break;
-
- case IBLND_WID_RDMA:
- /*
- * We only get RDMA completion notification if it fails. All
- * subsequent work items, including the final SEND will fail
- * too. However we can't print out any more info about the
- * failing RDMA because 'tx' might be back on the idle list or
- * even reused already if we didn't manage to post all our work
- * items
- */
- CNETERR("RDMA (tx: %p) failed: %d\n",
- kiblnd_wreqid2ptr(wc->wr_id), wc->status);
- return;
-
- case IBLND_WID_TX:
- kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
- return;
-
- case IBLND_WID_RX:
- kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
- wc->byte_len);
- return;
- }
-}
-
-void
-kiblnd_cq_completion(struct ib_cq *cq, void *arg)
-{
- /*
- * NB I'm not allowed to schedule this conn once its refcount has
- * reached 0. Since fundamentally I'm racing with scheduler threads
- * consuming my CQ I could be called after all completions have
- * occurred. But in this case, !ibc_nrx && !ibc_nsends_posted
- * and this CQ is about to be destroyed so I NOOP.
- */
- struct kib_conn *conn = arg;
- struct kib_sched_info *sched = conn->ibc_sched;
- unsigned long flags;
-
- LASSERT(cq == conn->ibc_cq);
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
-
- conn->ibc_ready = 1;
-
- if (!conn->ibc_scheduled &&
- (conn->ibc_nrx > 0 ||
- conn->ibc_nsends_posted > 0)) {
- kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
- conn->ibc_scheduled = 1;
- list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
-
- if (waitqueue_active(&sched->ibs_waitq))
- wake_up(&sched->ibs_waitq);
- }
-
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-}
-
-void
-kiblnd_cq_event(struct ib_event *event, void *arg)
-{
- struct kib_conn *conn = arg;
-
- CERROR("%s: async CQ event type %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
-}
-
-int
-kiblnd_scheduler(void *arg)
-{
- long id = (long)arg;
- struct kib_sched_info *sched;
- struct kib_conn *conn;
- wait_queue_entry_t wait;
- unsigned long flags;
- struct ib_wc wc;
- int did_something;
- int busy_loops = 0;
- int rc;
-
- init_waitqueue_entry(&wait, current);
-
- sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
-
- rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
- if (rc) {
- CWARN("Unable to bind on CPU partition %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
- sched->ibs_cpt);
- }
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
-
- while (!kiblnd_data.kib_shutdown) {
- if (busy_loops++ >= IBLND_RESCHED) {
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- cond_resched();
- busy_loops = 0;
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
- }
-
- did_something = 0;
-
- if (!list_empty(&sched->ibs_conns)) {
- conn = list_entry(sched->ibs_conns.next, struct kib_conn,
- ibc_sched_list);
- /* take over kib_sched_conns' ref on conn... */
- LASSERT(conn->ibc_scheduled);
- list_del(&conn->ibc_sched_list);
- conn->ibc_ready = 0;
-
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- wc.wr_id = IBLND_WID_INVAL;
-
- rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
- if (!rc) {
- rc = ib_req_notify_cq(conn->ibc_cq,
- IB_CQ_NEXT_COMP);
- if (rc < 0) {
- CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
- kiblnd_close_conn(conn, -EIO);
- kiblnd_conn_decref(conn);
- spin_lock_irqsave(&sched->ibs_lock,
- flags);
- continue;
- }
-
- rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
- }
-
- if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) {
- LCONSOLE_ERROR("ib_poll_cq (rc: %d) returned invalid wr_id, opcode %d, status: %d, vendor_err: %d, conn: %s status: %d\nplease upgrade firmware and OFED or contact vendor.\n",
- rc, wc.opcode, wc.status,
- wc.vendor_err,
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- conn->ibc_state);
- rc = -EINVAL;
- }
-
- if (rc < 0) {
- CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- rc);
- kiblnd_close_conn(conn, -EIO);
- kiblnd_conn_decref(conn);
- spin_lock_irqsave(&sched->ibs_lock, flags);
- continue;
- }
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
-
- if (rc || conn->ibc_ready) {
- /*
- * There may be another completion waiting; get
- * another scheduler to check while I handle
- * this one...
- */
- /* +1 ref for sched_conns */
- kiblnd_conn_addref(conn);
- list_add_tail(&conn->ibc_sched_list,
- &sched->ibs_conns);
- if (waitqueue_active(&sched->ibs_waitq))
- wake_up(&sched->ibs_waitq);
- } else {
- conn->ibc_scheduled = 0;
- }
-
- if (rc) {
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
- kiblnd_complete(&wc);
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
- }
-
- kiblnd_conn_decref(conn); /* ...drop my ref from above */
- did_something = 1;
- }
-
- if (did_something)
- continue;
-
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- schedule();
- busy_loops = 0;
-
- remove_wait_queue(&sched->ibs_waitq, &wait);
- spin_lock_irqsave(&sched->ibs_lock, flags);
- }
-
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- kiblnd_thread_fini();
- return 0;
-}
-
-int
-kiblnd_failover_thread(void *arg)
-{
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- struct kib_dev *dev;
- wait_queue_entry_t wait;
- unsigned long flags;
- int rc;
-
- LASSERT(*kiblnd_tunables.kib_dev_failover);
-
- init_waitqueue_entry(&wait, current);
- write_lock_irqsave(glock, flags);
-
- while (!kiblnd_data.kib_shutdown) {
- int do_failover = 0;
- int long_sleep;
-
- list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
- ibd_fail_list) {
- if (time_before(cfs_time_current(),
- dev->ibd_next_failover))
- continue;
- do_failover = 1;
- break;
- }
-
- if (do_failover) {
- list_del_init(&dev->ibd_fail_list);
- dev->ibd_failover = 1;
- write_unlock_irqrestore(glock, flags);
-
- rc = kiblnd_dev_failover(dev);
-
- write_lock_irqsave(glock, flags);
-
- LASSERT(dev->ibd_failover);
- dev->ibd_failover = 0;
- if (rc >= 0) { /* Device is OK or failover succeed */
- dev->ibd_next_failover = cfs_time_shift(3);
- continue;
- }
-
- /* failed to failover, retry later */
- dev->ibd_next_failover =
- cfs_time_shift(min(dev->ibd_failed_failover, 10));
- if (kiblnd_dev_can_failover(dev)) {
- list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- }
-
- continue;
- }
-
- /* long sleep if no more pending failover */
- long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
-
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
- write_unlock_irqrestore(glock, flags);
-
- rc = schedule_timeout(long_sleep ? 10 * HZ :
- HZ);
- remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
- write_lock_irqsave(glock, flags);
-
- if (!long_sleep || rc)
- continue;
-
- /*
- * have a long sleep, routine check all active devices,
- * we need checking like this because if there is not active
- * connection on the dev and no SEND from local, we may listen
- * on wrong HCA for ever while there is a bonding failover
- */
- list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
- if (kiblnd_dev_can_failover(dev)) {
- list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- }
- }
- }
-
- write_unlock_irqrestore(glock, flags);
-
- kiblnd_thread_fini();
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
deleted file mode 100644
index b9235400bf1d..000000000000
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ /dev/null
@@ -1,287 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/klnds/o2iblnd/o2iblnd_modparams.c
- *
- * Author: Eric Barton <eric@bartonsoftware.com>
- */
-
-#include "o2iblnd.h"
-
-static int service = 987;
-module_param(service, int, 0444);
-MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)");
-
-static int cksum;
-module_param(cksum, int, 0644);
-MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
-
-static int timeout = 50;
-module_param(timeout, int, 0644);
-MODULE_PARM_DESC(timeout, "timeout (seconds)");
-
-/*
- * Number of threads in each scheduler pool which is percpt,
- * we will estimate reasonable value based on CPUs if it's set to zero.
- */
-static int nscheds;
-module_param(nscheds, int, 0444);
-MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
-
-/* NB: this value is shared by all CPTs, it can grow at runtime */
-static int ntx = 512;
-module_param(ntx, int, 0444);
-MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
-
-/* NB: this value is shared by all CPTs */
-static int credits = 256;
-module_param(credits, int, 0444);
-MODULE_PARM_DESC(credits, "# concurrent sends");
-
-static int peer_credits = 8;
-module_param(peer_credits, int, 0444);
-MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
-
-static int peer_credits_hiw;
-module_param(peer_credits_hiw, int, 0444);
-MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits");
-
-static int peer_buffer_credits;
-module_param(peer_buffer_credits, int, 0444);
-MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
-
-static int peer_timeout = 180;
-module_param(peer_timeout, int, 0444);
-MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
-
-static char *ipif_name = "ib0";
-module_param(ipif_name, charp, 0444);
-MODULE_PARM_DESC(ipif_name, "IPoIB interface name");
-
-static int retry_count = 5;
-module_param(retry_count, int, 0644);
-MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received");
-
-static int rnr_retry_count = 6;
-module_param(rnr_retry_count, int, 0644);
-MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions");
-
-static int keepalive = 100;
-module_param(keepalive, int, 0644);
-MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive");
-
-static int ib_mtu;
-module_param(ib_mtu, int, 0444);
-MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096");
-
-static int concurrent_sends;
-module_param(concurrent_sends, int, 0444);
-MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
-
-#define IBLND_DEFAULT_MAP_ON_DEMAND IBLND_MAX_RDMA_FRAGS
-static int map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND;
-module_param(map_on_demand, int, 0444);
-MODULE_PARM_DESC(map_on_demand, "map on demand");
-
-/* NB: this value is shared by all CPTs, it can grow at runtime */
-static int fmr_pool_size = 512;
-module_param(fmr_pool_size, int, 0444);
-MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
-
-/* NB: this value is shared by all CPTs, it can grow at runtime */
-static int fmr_flush_trigger = 384;
-module_param(fmr_flush_trigger, int, 0444);
-MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
-
-static int fmr_cache = 1;
-module_param(fmr_cache, int, 0444);
-MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
-
-/*
- * 0: disable failover
- * 1: enable failover if necessary
- * 2: force to failover (for debug)
- */
-static int dev_failover;
-module_param(dev_failover, int, 0444);
-MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
-
-static int require_privileged_port;
-module_param(require_privileged_port, int, 0644);
-MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
-
-static int use_privileged_port = 1;
-module_param(use_privileged_port, int, 0644);
-MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
-
-struct kib_tunables kiblnd_tunables = {
- .kib_dev_failover = &dev_failover,
- .kib_service = &service,
- .kib_cksum = &cksum,
- .kib_timeout = &timeout,
- .kib_keepalive = &keepalive,
- .kib_ntx = &ntx,
- .kib_default_ipif = &ipif_name,
- .kib_retry_count = &retry_count,
- .kib_rnr_retry_count = &rnr_retry_count,
- .kib_ib_mtu = &ib_mtu,
- .kib_require_priv_port = &require_privileged_port,
- .kib_use_priv_port = &use_privileged_port,
- .kib_nscheds = &nscheds
-};
-
-static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
-
-/* # messages/RDMAs in-flight */
-int kiblnd_msg_queue_size(int version, struct lnet_ni *ni)
-{
- if (version == IBLND_MSG_VERSION_1)
- return IBLND_MSG_QUEUE_SIZE_V1;
- else if (ni)
- return ni->ni_peertxcredits;
- else
- return peer_credits;
-}
-
-int kiblnd_tunables_setup(struct lnet_ni *ni)
-{
- struct lnet_ioctl_config_o2iblnd_tunables *tunables;
-
- /*
- * if there was no tunables specified, setup the tunables to be
- * defaulted
- */
- if (!ni->ni_lnd_tunables) {
- ni->ni_lnd_tunables = kzalloc(sizeof(*ni->ni_lnd_tunables),
- GFP_NOFS);
- if (!ni->ni_lnd_tunables)
- return -ENOMEM;
-
- memcpy(&ni->ni_lnd_tunables->lt_tun_u.lt_o2ib,
- &default_tunables, sizeof(*tunables));
- }
- tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
-
- /* Current API version */
- tunables->lnd_version = 0;
-
- if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
- CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
- *kiblnd_tunables.kib_ib_mtu);
- return -EINVAL;
- }
-
- if (!ni->ni_peertimeout)
- ni->ni_peertimeout = peer_timeout;
-
- if (!ni->ni_maxtxcredits)
- ni->ni_maxtxcredits = credits;
-
- if (!ni->ni_peertxcredits)
- ni->ni_peertxcredits = peer_credits;
-
- if (!ni->ni_peerrtrcredits)
- ni->ni_peerrtrcredits = peer_buffer_credits;
-
- if (ni->ni_peertxcredits < IBLND_CREDITS_DEFAULT)
- ni->ni_peertxcredits = IBLND_CREDITS_DEFAULT;
-
- if (ni->ni_peertxcredits > IBLND_CREDITS_MAX)
- ni->ni_peertxcredits = IBLND_CREDITS_MAX;
-
- if (ni->ni_peertxcredits > credits)
- ni->ni_peertxcredits = credits;
-
- if (!tunables->lnd_peercredits_hiw)
- tunables->lnd_peercredits_hiw = peer_credits_hiw;
-
- if (tunables->lnd_peercredits_hiw < ni->ni_peertxcredits / 2)
- tunables->lnd_peercredits_hiw = ni->ni_peertxcredits / 2;
-
- if (tunables->lnd_peercredits_hiw >= ni->ni_peertxcredits)
- tunables->lnd_peercredits_hiw = ni->ni_peertxcredits - 1;
-
- if (tunables->lnd_map_on_demand <= 0 ||
- tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) {
- /* Use the default */
- CWARN("Invalid map_on_demand (%d), expects 1 - %d. Using default of %d\n",
- tunables->lnd_map_on_demand,
- IBLND_MAX_RDMA_FRAGS, IBLND_DEFAULT_MAP_ON_DEMAND);
- tunables->lnd_map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND;
- }
-
- if (tunables->lnd_map_on_demand == 1) {
- /* don't make sense to create map if only one fragment */
- tunables->lnd_map_on_demand = 2;
- }
-
- if (!tunables->lnd_concurrent_sends) {
- if (tunables->lnd_map_on_demand > 0 &&
- tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) {
- tunables->lnd_concurrent_sends =
- ni->ni_peertxcredits * 2;
- } else {
- tunables->lnd_concurrent_sends = ni->ni_peertxcredits;
- }
- }
-
- if (tunables->lnd_concurrent_sends > ni->ni_peertxcredits * 2)
- tunables->lnd_concurrent_sends = ni->ni_peertxcredits * 2;
-
- if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits / 2)
- tunables->lnd_concurrent_sends = ni->ni_peertxcredits / 2;
-
- if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits) {
- CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
- tunables->lnd_concurrent_sends, ni->ni_peertxcredits);
- }
-
- if (!tunables->lnd_fmr_pool_size)
- tunables->lnd_fmr_pool_size = fmr_pool_size;
- if (!tunables->lnd_fmr_flush_trigger)
- tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
- if (!tunables->lnd_fmr_cache)
- tunables->lnd_fmr_cache = fmr_cache;
-
- return 0;
-}
-
-void kiblnd_tunables_init(void)
-{
- default_tunables.lnd_version = 0;
- default_tunables.lnd_peercredits_hiw = peer_credits_hiw,
- default_tunables.lnd_map_on_demand = map_on_demand;
- default_tunables.lnd_concurrent_sends = concurrent_sends;
- default_tunables.lnd_fmr_pool_size = fmr_pool_size;
- default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
- default_tunables.lnd_fmr_cache = fmr_cache;
-}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/Makefile b/drivers/staging/lustre/lnet/klnds/socklnd/Makefile
deleted file mode 100644
index a7da1abfc804..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LNET) += ksocklnd.o
-
-ksocklnd-y := socklnd.o socklnd_cb.o socklnd_proto.o socklnd_modparams.o socklnd_lib.o
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
deleted file mode 100644
index 7086678e1c3e..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ /dev/null
@@ -1,2918 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/klnds/socklnd/socklnd.c
- *
- * Author: Zach Brown <zab@zabbo.net>
- * Author: Peter J. Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Eric Barton <eric@bartonsoftware.com>
- */
-
-#include "socklnd.h"
-
-static struct lnet_lnd the_ksocklnd;
-struct ksock_nal_data ksocknal_data;
-
-static struct ksock_interface *
-ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
-{
- struct ksock_net *net = ni->ni_data;
- int i;
- struct ksock_interface *iface;
-
- for (i = 0; i < net->ksnn_ninterfaces; i++) {
- LASSERT(i < LNET_MAX_INTERFACES);
- iface = &net->ksnn_interfaces[i];
-
- if (iface->ksni_ipaddr == ip)
- return iface;
- }
-
- return NULL;
-}
-
-static struct ksock_route *
-ksocknal_create_route(__u32 ipaddr, int port)
-{
- struct ksock_route *route;
-
- route = kzalloc(sizeof(*route), GFP_NOFS);
- if (!route)
- return NULL;
-
- atomic_set(&route->ksnr_refcount, 1);
- route->ksnr_peer = NULL;
- route->ksnr_retry_interval = 0; /* OK to connect at any time */
- route->ksnr_ipaddr = ipaddr;
- route->ksnr_port = port;
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
- route->ksnr_connected = 0;
- route->ksnr_deleted = 0;
- route->ksnr_conn_count = 0;
- route->ksnr_share_count = 0;
-
- return route;
-}
-
-void
-ksocknal_destroy_route(struct ksock_route *route)
-{
- LASSERT(!atomic_read(&route->ksnr_refcount));
-
- if (route->ksnr_peer)
- ksocknal_peer_decref(route->ksnr_peer);
-
- kfree(route);
-}
-
-static int
-ksocknal_create_peer(struct ksock_peer **peerp, struct lnet_ni *ni,
- struct lnet_process_id id)
-{
- int cpt = lnet_cpt_of_nid(id.nid);
- struct ksock_net *net = ni->ni_data;
- struct ksock_peer *peer;
-
- LASSERT(id.nid != LNET_NID_ANY);
- LASSERT(id.pid != LNET_PID_ANY);
- LASSERT(!in_interrupt());
-
- peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt);
- if (!peer)
- return -ENOMEM;
-
- peer->ksnp_ni = ni;
- peer->ksnp_id = id;
- atomic_set(&peer->ksnp_refcount, 1); /* 1 ref for caller */
- peer->ksnp_closing = 0;
- peer->ksnp_accepting = 0;
- peer->ksnp_proto = NULL;
- peer->ksnp_last_alive = 0;
- peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
-
- INIT_LIST_HEAD(&peer->ksnp_conns);
- INIT_LIST_HEAD(&peer->ksnp_routes);
- INIT_LIST_HEAD(&peer->ksnp_tx_queue);
- INIT_LIST_HEAD(&peer->ksnp_zc_req_list);
- spin_lock_init(&peer->ksnp_lock);
-
- spin_lock_bh(&net->ksnn_lock);
-
- if (net->ksnn_shutdown) {
- spin_unlock_bh(&net->ksnn_lock);
-
- kfree(peer);
- CERROR("Can't create peer: network shutdown\n");
- return -ESHUTDOWN;
- }
-
- net->ksnn_npeers++;
-
- spin_unlock_bh(&net->ksnn_lock);
-
- *peerp = peer;
- return 0;
-}
-
-void
-ksocknal_destroy_peer(struct ksock_peer *peer)
-{
- struct ksock_net *net = peer->ksnp_ni->ni_data;
-
- CDEBUG(D_NET, "peer %s %p deleted\n",
- libcfs_id2str(peer->ksnp_id), peer);
-
- LASSERT(!atomic_read(&peer->ksnp_refcount));
- LASSERT(!peer->ksnp_accepting);
- LASSERT(list_empty(&peer->ksnp_conns));
- LASSERT(list_empty(&peer->ksnp_routes));
- LASSERT(list_empty(&peer->ksnp_tx_queue));
- LASSERT(list_empty(&peer->ksnp_zc_req_list));
-
- kfree(peer);
-
- /*
- * NB a peer's connections and routes keep a reference on their peer
- * until they are destroyed, so we can be assured that _all_ state to
- * do with this peer has been cleaned up when its refcount drops to
- * zero.
- */
- spin_lock_bh(&net->ksnn_lock);
- net->ksnn_npeers--;
- spin_unlock_bh(&net->ksnn_lock);
-}
-
-struct ksock_peer *
-ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
-{
- struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
- struct ksock_peer *peer;
-
- list_for_each_entry(peer, peer_list, ksnp_list) {
- LASSERT(!peer->ksnp_closing);
-
- if (peer->ksnp_ni != ni)
- continue;
-
- if (peer->ksnp_id.nid != id.nid ||
- peer->ksnp_id.pid != id.pid)
- continue;
-
- CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
- peer, libcfs_id2str(id),
- atomic_read(&peer->ksnp_refcount));
- return peer;
- }
- return NULL;
-}
-
-struct ksock_peer *
-ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
-{
- struct ksock_peer *peer;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
- peer = ksocknal_find_peer_locked(ni, id);
- if (peer) /* +1 ref for caller? */
- ksocknal_peer_addref(peer);
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- return peer;
-}
-
-static void
-ksocknal_unlink_peer_locked(struct ksock_peer *peer)
-{
- int i;
- __u32 ip;
- struct ksock_interface *iface;
-
- for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
- LASSERT(i < LNET_MAX_INTERFACES);
- ip = peer->ksnp_passive_ips[i];
-
- iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
- /*
- * All IPs in peer->ksnp_passive_ips[] come from the
- * interface list, therefore the call must succeed.
- */
- LASSERT(iface);
-
- CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
- peer, iface, iface->ksni_nroutes);
- iface->ksni_npeers--;
- }
-
- LASSERT(list_empty(&peer->ksnp_conns));
- LASSERT(list_empty(&peer->ksnp_routes));
- LASSERT(!peer->ksnp_closing);
- peer->ksnp_closing = 1;
- list_del(&peer->ksnp_list);
- /* lose peerlist's ref */
- ksocknal_peer_decref(peer);
-}
-
-static int
-ksocknal_get_peer_info(struct lnet_ni *ni, int index,
- struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
- int *port, int *conn_count, int *share_count)
-{
- struct ksock_peer *peer;
- struct list_head *ptmp;
- struct ksock_route *route;
- struct list_head *rtmp;
- int i;
- int j;
- int rc = -ENOENT;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
-
- if (peer->ksnp_ni != ni)
- continue;
-
- if (!peer->ksnp_n_passive_ips &&
- list_empty(&peer->ksnp_routes)) {
- if (index-- > 0)
- continue;
-
- *id = peer->ksnp_id;
- *myip = 0;
- *peer_ip = 0;
- *port = 0;
- *conn_count = 0;
- *share_count = 0;
- rc = 0;
- goto out;
- }
-
- for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
- if (index-- > 0)
- continue;
-
- *id = peer->ksnp_id;
- *myip = peer->ksnp_passive_ips[j];
- *peer_ip = 0;
- *port = 0;
- *conn_count = 0;
- *share_count = 0;
- rc = 0;
- goto out;
- }
-
- list_for_each(rtmp, &peer->ksnp_routes) {
- if (index-- > 0)
- continue;
-
- route = list_entry(rtmp, struct ksock_route,
- ksnr_list);
-
- *id = peer->ksnp_id;
- *myip = route->ksnr_myipaddr;
- *peer_ip = route->ksnr_ipaddr;
- *port = route->ksnr_port;
- *conn_count = route->ksnr_conn_count;
- *share_count = route->ksnr_share_count;
- rc = 0;
- goto out;
- }
- }
- }
- out:
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return rc;
-}
-
-static void
-ksocknal_associate_route_conn_locked(struct ksock_route *route,
- struct ksock_conn *conn)
-{
- struct ksock_peer *peer = route->ksnr_peer;
- int type = conn->ksnc_type;
- struct ksock_interface *iface;
-
- conn->ksnc_route = route;
- ksocknal_route_addref(route);
-
- if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
- if (!route->ksnr_myipaddr) {
- /* route wasn't bound locally yet (the initial route) */
- CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
- libcfs_id2str(peer->ksnp_id),
- &route->ksnr_ipaddr,
- &conn->ksnc_myipaddr);
- } else {
- CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h to %pI4h\n",
- libcfs_id2str(peer->ksnp_id),
- &route->ksnr_ipaddr,
- &route->ksnr_myipaddr,
- &conn->ksnc_myipaddr);
-
- iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
- route->ksnr_myipaddr);
- if (iface)
- iface->ksni_nroutes--;
- }
- route->ksnr_myipaddr = conn->ksnc_myipaddr;
- iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
- route->ksnr_myipaddr);
- if (iface)
- iface->ksni_nroutes++;
- }
-
- route->ksnr_connected |= (1 << type);
- route->ksnr_conn_count++;
-
- /*
- * Successful connection => further attempts can
- * proceed immediately
- */
- route->ksnr_retry_interval = 0;
-}
-
-static void
-ksocknal_add_route_locked(struct ksock_peer *peer, struct ksock_route *route)
-{
- struct list_head *tmp;
- struct ksock_conn *conn;
- struct ksock_route *route2;
-
- LASSERT(!peer->ksnp_closing);
- LASSERT(!route->ksnr_peer);
- LASSERT(!route->ksnr_scheduled);
- LASSERT(!route->ksnr_connecting);
- LASSERT(!route->ksnr_connected);
-
- /* LASSERT(unique) */
- list_for_each(tmp, &peer->ksnp_routes) {
- route2 = list_entry(tmp, struct ksock_route, ksnr_list);
-
- if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
- CERROR("Duplicate route %s %pI4h\n",
- libcfs_id2str(peer->ksnp_id),
- &route->ksnr_ipaddr);
- LBUG();
- }
- }
-
- route->ksnr_peer = peer;
- ksocknal_peer_addref(peer);
- /* peer's routelist takes over my ref on 'route' */
- list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
-
- list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, struct ksock_conn, ksnc_list);
-
- if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
- continue;
-
- ksocknal_associate_route_conn_locked(route, conn);
- /* keep going (typed routes) */
- }
-}
-
-static void
-ksocknal_del_route_locked(struct ksock_route *route)
-{
- struct ksock_peer *peer = route->ksnr_peer;
- struct ksock_interface *iface;
- struct ksock_conn *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
-
- LASSERT(!route->ksnr_deleted);
-
- /* Close associated conns */
- list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
- conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
-
- if (conn->ksnc_route != route)
- continue;
-
- ksocknal_close_conn_locked(conn, 0);
- }
-
- if (route->ksnr_myipaddr) {
- iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
- route->ksnr_myipaddr);
- if (iface)
- iface->ksni_nroutes--;
- }
-
- route->ksnr_deleted = 1;
- list_del(&route->ksnr_list);
- ksocknal_route_decref(route); /* drop peer's ref */
-
- if (list_empty(&peer->ksnp_routes) &&
- list_empty(&peer->ksnp_conns)) {
- /*
- * I've just removed the last route to a peer with no active
- * connections
- */
- ksocknal_unlink_peer_locked(peer);
- }
-}
-
-int
-ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
- int port)
-{
- struct ksock_peer *peer;
- struct ksock_peer *peer2;
- struct ksock_route *route;
- struct ksock_route *route2;
- int rc;
-
- if (id.nid == LNET_NID_ANY ||
- id.pid == LNET_PID_ANY)
- return -EINVAL;
-
- /* Have a brand new peer ready... */
- rc = ksocknal_create_peer(&peer, ni, id);
- if (rc)
- return rc;
-
- route = ksocknal_create_route(ipaddr, port);
- if (!route) {
- ksocknal_peer_decref(peer);
- return -ENOMEM;
- }
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- /* always called with a ref on ni, so shutdown can't have started */
- LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
-
- peer2 = ksocknal_find_peer_locked(ni, id);
- if (peer2) {
- ksocknal_peer_decref(peer);
- peer = peer2;
- } else {
- /* peer table takes my ref on peer */
- list_add_tail(&peer->ksnp_list,
- ksocknal_nid2peerlist(id.nid));
- }
-
- list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) {
- if (route2->ksnr_ipaddr == ipaddr) {
- /* Route already exists, use the old one */
- ksocknal_route_decref(route);
- route2->ksnr_share_count++;
- goto out;
- }
- }
- /* Route doesn't already exist, add the new one */
- ksocknal_add_route_locked(peer, route);
- route->ksnr_share_count++;
-out:
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- return 0;
-}
-
-static void
-ksocknal_del_peer_locked(struct ksock_peer *peer, __u32 ip)
-{
- struct ksock_conn *conn;
- struct ksock_route *route;
- struct list_head *tmp;
- struct list_head *nxt;
- int nshared;
-
- LASSERT(!peer->ksnp_closing);
-
- /* Extra ref prevents peer disappearing until I'm done with it */
- ksocknal_peer_addref(peer);
-
- list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
-
- /* no match */
- if (!(!ip || route->ksnr_ipaddr == ip))
- continue;
-
- route->ksnr_share_count = 0;
- /* This deletes associated conns too */
- ksocknal_del_route_locked(route);
- }
-
- nshared = 0;
- list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
- nshared += route->ksnr_share_count;
- }
-
- if (!nshared) {
- /*
- * remove everything else if there are no explicit entries
- * left
- */
- list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
-
- /* we should only be removing auto-entries */
- LASSERT(!route->ksnr_share_count);
- ksocknal_del_route_locked(route);
- }
-
- list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
- conn = list_entry(tmp, struct ksock_conn, ksnc_list);
-
- ksocknal_close_conn_locked(conn, 0);
- }
- }
-
- ksocknal_peer_decref(peer);
- /* NB peer unlinks itself when last conn/route is removed */
-}
-
-static int
-ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
-{
- LIST_HEAD(zombies);
- struct list_head *ptmp;
- struct list_head *pnxt;
- struct ksock_peer *peer;
- int lo;
- int hi;
- int i;
- int rc = -ENOENT;
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- if (id.nid != LNET_NID_ANY) {
- lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- } else {
- lo = 0;
- hi = ksocknal_data.ksnd_peer_hash_size - 1;
- }
-
- for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
-
- if (peer->ksnp_ni != ni)
- continue;
-
- if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) &&
- (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid)))
- continue;
-
- ksocknal_peer_addref(peer); /* a ref for me... */
-
- ksocknal_del_peer_locked(peer, ip);
-
- if (peer->ksnp_closing &&
- !list_empty(&peer->ksnp_tx_queue)) {
- LASSERT(list_empty(&peer->ksnp_conns));
- LASSERT(list_empty(&peer->ksnp_routes));
-
- list_splice_init(&peer->ksnp_tx_queue,
- &zombies);
- }
-
- ksocknal_peer_decref(peer); /* ...till here */
-
- rc = 0; /* matched! */
- }
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_txlist_done(ni, &zombies, 1);
-
- return rc;
-}
-
-static struct ksock_conn *
-ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
-{
- struct ksock_peer *peer;
- struct list_head *ptmp;
- struct ksock_conn *conn;
- struct list_head *ctmp;
- int i;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
-
- LASSERT(!peer->ksnp_closing);
-
- if (peer->ksnp_ni != ni)
- continue;
-
- list_for_each(ctmp, &peer->ksnp_conns) {
- if (index-- > 0)
- continue;
-
- conn = list_entry(ctmp, struct ksock_conn,
- ksnc_list);
- ksocknal_conn_addref(conn);
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return conn;
- }
- }
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return NULL;
-}
-
-static struct ksock_sched *
-ksocknal_choose_scheduler_locked(unsigned int cpt)
-{
- struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
- struct ksock_sched *sched;
- int i;
-
- LASSERT(info->ksi_nthreads > 0);
-
- sched = &info->ksi_scheds[0];
- /*
- * NB: it's safe so far, but info->ksi_nthreads could be changed
- * at runtime when we have dynamic LNet configuration, then we
- * need to take care of this.
- */
- for (i = 1; i < info->ksi_nthreads; i++) {
- if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
- sched = &info->ksi_scheds[i];
- }
-
- return sched;
-}
-
-static int
-ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
-{
- struct ksock_net *net = ni->ni_data;
- int i;
- int nip;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- nip = net->ksnn_ninterfaces;
- LASSERT(nip <= LNET_MAX_INTERFACES);
-
- /*
- * Only offer interfaces for additional connections if I have
- * more than one.
- */
- if (nip < 2) {
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return 0;
- }
-
- for (i = 0; i < nip; i++) {
- ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
- LASSERT(ipaddrs[i]);
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return nip;
-}
-
-static int
-ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
-{
- int best_netmatch = 0;
- int best_xor = 0;
- int best = -1;
- int this_xor;
- int this_netmatch;
- int i;
-
- for (i = 0; i < nips; i++) {
- if (!ips[i])
- continue;
-
- this_xor = ips[i] ^ iface->ksni_ipaddr;
- this_netmatch = !(this_xor & iface->ksni_netmask) ? 1 : 0;
-
- if (!(best < 0 ||
- best_netmatch < this_netmatch ||
- (best_netmatch == this_netmatch &&
- best_xor > this_xor)))
- continue;
-
- best = i;
- best_netmatch = this_netmatch;
- best_xor = this_xor;
- }
-
- LASSERT(best >= 0);
- return best;
-}
-
-static int
-ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
-{
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- struct ksock_net *net = peer->ksnp_ni->ni_data;
- struct ksock_interface *iface;
- struct ksock_interface *best_iface;
- int n_ips;
- int i;
- int j;
- int k;
- __u32 ip;
- __u32 xor;
- int this_netmatch;
- int best_netmatch;
- int best_npeers;
-
- /*
- * CAVEAT EMPTOR: We do all our interface matching with an
- * exclusive hold of global lock at IRQ priority. We're only
- * expecting to be dealing with small numbers of interfaces, so the
- * O(n**3)-ness shouldn't matter
- */
- /*
- * Also note that I'm not going to return more than n_peerips
- * interfaces, even if I have more myself
- */
- write_lock_bh(global_lock);
-
- LASSERT(n_peerips <= LNET_MAX_INTERFACES);
- LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
-
- /*
- * Only match interfaces for additional connections
- * if I have > 1 interface
- */
- n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
- min(n_peerips, net->ksnn_ninterfaces);
-
- for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
- /* ^ yes really... */
-
- /*
- * If we have any new interfaces, first tick off all the
- * peer IPs that match old interfaces, then choose new
- * interfaces to match the remaining peer IPS.
- * We don't forget interfaces we've stopped using; we might
- * start using them again...
- */
- if (i < peer->ksnp_n_passive_ips) {
- /* Old interface. */
- ip = peer->ksnp_passive_ips[i];
- best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
-
- /* peer passive ips are kept up to date */
- LASSERT(best_iface);
- } else {
- /* choose a new interface */
- LASSERT(i == peer->ksnp_n_passive_ips);
-
- best_iface = NULL;
- best_netmatch = 0;
- best_npeers = 0;
-
- for (j = 0; j < net->ksnn_ninterfaces; j++) {
- iface = &net->ksnn_interfaces[j];
- ip = iface->ksni_ipaddr;
-
- for (k = 0; k < peer->ksnp_n_passive_ips; k++)
- if (peer->ksnp_passive_ips[k] == ip)
- break;
-
- if (k < peer->ksnp_n_passive_ips) /* using it already */
- continue;
-
- k = ksocknal_match_peerip(iface, peerips,
- n_peerips);
- xor = ip ^ peerips[k];
- this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
-
- if (!(!best_iface ||
- best_netmatch < this_netmatch ||
- (best_netmatch == this_netmatch &&
- best_npeers > iface->ksni_npeers)))
- continue;
-
- best_iface = iface;
- best_netmatch = this_netmatch;
- best_npeers = iface->ksni_npeers;
- }
-
- LASSERT(best_iface);
-
- best_iface->ksni_npeers++;
- ip = best_iface->ksni_ipaddr;
- peer->ksnp_passive_ips[i] = ip;
- peer->ksnp_n_passive_ips = i + 1;
- }
-
- /* mark the best matching peer IP used */
- j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
- peerips[j] = 0;
- }
-
- /* Overwrite input peer IP addresses */
- memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
-
- write_unlock_bh(global_lock);
-
- return n_ips;
-}
-
-static void
-ksocknal_create_routes(struct ksock_peer *peer, int port,
- __u32 *peer_ipaddrs, int npeer_ipaddrs)
-{
- struct ksock_route *newroute = NULL;
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- struct lnet_ni *ni = peer->ksnp_ni;
- struct ksock_net *net = ni->ni_data;
- struct list_head *rtmp;
- struct ksock_route *route;
- struct ksock_interface *iface;
- struct ksock_interface *best_iface;
- int best_netmatch;
- int this_netmatch;
- int best_nroutes;
- int i;
- int j;
-
- /*
- * CAVEAT EMPTOR: We do all our interface matching with an
- * exclusive hold of global lock at IRQ priority. We're only
- * expecting to be dealing with small numbers of interfaces, so the
- * O(n**3)-ness here shouldn't matter
- */
- write_lock_bh(global_lock);
-
- if (net->ksnn_ninterfaces < 2) {
- /*
- * Only create additional connections
- * if I have > 1 interface
- */
- write_unlock_bh(global_lock);
- return;
- }
-
- LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES);
-
- for (i = 0; i < npeer_ipaddrs; i++) {
- if (newroute) {
- newroute->ksnr_ipaddr = peer_ipaddrs[i];
- } else {
- write_unlock_bh(global_lock);
-
- newroute = ksocknal_create_route(peer_ipaddrs[i], port);
- if (!newroute)
- return;
-
- write_lock_bh(global_lock);
- }
-
- if (peer->ksnp_closing) {
- /* peer got closed under me */
- break;
- }
-
- /* Already got a route? */
- route = NULL;
- list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, struct ksock_route, ksnr_list);
-
- if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
- break;
-
- route = NULL;
- }
- if (route)
- continue;
-
- best_iface = NULL;
- best_nroutes = 0;
- best_netmatch = 0;
-
- LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
-
- /* Select interface to connect from */
- for (j = 0; j < net->ksnn_ninterfaces; j++) {
- iface = &net->ksnn_interfaces[j];
-
- /* Using this interface already? */
- list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, struct ksock_route,
- ksnr_list);
-
- if (route->ksnr_myipaddr == iface->ksni_ipaddr)
- break;
-
- route = NULL;
- }
- if (route)
- continue;
-
- this_netmatch = (!((iface->ksni_ipaddr ^
- newroute->ksnr_ipaddr) &
- iface->ksni_netmask)) ? 1 : 0;
-
- if (!(!best_iface ||
- best_netmatch < this_netmatch ||
- (best_netmatch == this_netmatch &&
- best_nroutes > iface->ksni_nroutes)))
- continue;
-
- best_iface = iface;
- best_netmatch = this_netmatch;
- best_nroutes = iface->ksni_nroutes;
- }
-
- if (!best_iface)
- continue;
-
- newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
- best_iface->ksni_nroutes++;
-
- ksocknal_add_route_locked(peer, newroute);
- newroute = NULL;
- }
-
- write_unlock_bh(global_lock);
- if (newroute)
- ksocknal_route_decref(newroute);
-}
-
-int
-ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
-{
- struct ksock_connreq *cr;
- int rc;
- __u32 peer_ip;
- int peer_port;
-
- rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
- LASSERT(!rc); /* we succeeded before */
-
- cr = kzalloc(sizeof(*cr), GFP_NOFS);
- if (!cr) {
- LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
- &peer_ip);
- return -ENOMEM;
- }
-
- lnet_ni_addref(ni);
- cr->ksncr_ni = ni;
- cr->ksncr_sock = sock;
-
- spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
-
- list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
- wake_up(&ksocknal_data.ksnd_connd_waitq);
-
- spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
- return 0;
-}
-
-static int
-ksocknal_connecting(struct ksock_peer *peer, __u32 ipaddr)
-{
- struct ksock_route *route;
-
- list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
- if (route->ksnr_ipaddr == ipaddr)
- return route->ksnr_connecting;
- }
- return 0;
-}
-
-int
-ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
- struct socket *sock, int type)
-{
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- LIST_HEAD(zombies);
- struct lnet_process_id peerid;
- struct list_head *tmp;
- __u64 incarnation;
- struct ksock_conn *conn;
- struct ksock_conn *conn2;
- struct ksock_peer *peer = NULL;
- struct ksock_peer *peer2;
- struct ksock_sched *sched;
- struct ksock_hello_msg *hello;
- int cpt;
- struct ksock_tx *tx;
- struct ksock_tx *txtmp;
- int rc;
- int active;
- char *warn = NULL;
-
- active = !!route;
-
- LASSERT(active == (type != SOCKLND_CONN_NONE));
-
- conn = kzalloc(sizeof(*conn), GFP_NOFS);
- if (!conn) {
- rc = -ENOMEM;
- goto failed_0;
- }
-
- conn->ksnc_peer = NULL;
- conn->ksnc_route = NULL;
- conn->ksnc_sock = sock;
- /*
- * 2 ref, 1 for conn, another extra ref prevents socket
- * being closed before establishment of connection
- */
- atomic_set(&conn->ksnc_sock_refcount, 2);
- conn->ksnc_type = type;
- ksocknal_lib_save_callback(sock, conn);
- atomic_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
-
- conn->ksnc_rx_ready = 0;
- conn->ksnc_rx_scheduled = 0;
-
- INIT_LIST_HEAD(&conn->ksnc_tx_queue);
- conn->ksnc_tx_ready = 0;
- conn->ksnc_tx_scheduled = 0;
- conn->ksnc_tx_carrier = NULL;
- atomic_set(&conn->ksnc_tx_nob, 0);
-
- hello = kvzalloc(offsetof(struct ksock_hello_msg,
- kshm_ips[LNET_MAX_INTERFACES]),
- GFP_KERNEL);
- if (!hello) {
- rc = -ENOMEM;
- goto failed_1;
- }
-
- /* stash conn's local and remote addrs */
- rc = ksocknal_lib_get_conn_addrs(conn);
- if (rc)
- goto failed_1;
-
- /*
- * Find out/confirm peer's NID and connection type and get the
- * vector of interfaces she's willing to let me connect to.
- * Passive connections use the listener timeout since the peer sends
- * eagerly
- */
- if (active) {
- peer = route->ksnr_peer;
- LASSERT(ni == peer->ksnp_ni);
-
- /* Active connection sends HELLO eagerly */
- hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
- peerid = peer->ksnp_id;
-
- write_lock_bh(global_lock);
- conn->ksnc_proto = peer->ksnp_proto;
- write_unlock_bh(global_lock);
-
- if (!conn->ksnc_proto) {
- conn->ksnc_proto = &ksocknal_protocol_v3x;
-#if SOCKNAL_VERSION_DEBUG
- if (*ksocknal_tunables.ksnd_protocol == 2)
- conn->ksnc_proto = &ksocknal_protocol_v2x;
- else if (*ksocknal_tunables.ksnd_protocol == 1)
- conn->ksnc_proto = &ksocknal_protocol_v1x;
-#endif
- }
-
- rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
- if (rc)
- goto failed_1;
- } else {
- peerid.nid = LNET_NID_ANY;
- peerid.pid = LNET_PID_ANY;
-
- /* Passive, get protocol from peer */
- conn->ksnc_proto = NULL;
- }
-
- rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation);
- if (rc < 0)
- goto failed_1;
-
- LASSERT(!rc || active);
- LASSERT(conn->ksnc_proto);
- LASSERT(peerid.nid != LNET_NID_ANY);
-
- cpt = lnet_cpt_of_nid(peerid.nid);
-
- if (active) {
- ksocknal_peer_addref(peer);
- write_lock_bh(global_lock);
- } else {
- rc = ksocknal_create_peer(&peer, ni, peerid);
- if (rc)
- goto failed_1;
-
- write_lock_bh(global_lock);
-
- /* called with a ref on ni, so shutdown can't have started */
- LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
-
- peer2 = ksocknal_find_peer_locked(ni, peerid);
- if (!peer2) {
- /*
- * NB this puts an "empty" peer in the peer
- * table (which takes my ref)
- */
- list_add_tail(&peer->ksnp_list,
- ksocknal_nid2peerlist(peerid.nid));
- } else {
- ksocknal_peer_decref(peer);
- peer = peer2;
- }
-
- /* +1 ref for me */
- ksocknal_peer_addref(peer);
- peer->ksnp_accepting++;
-
- /*
- * Am I already connecting to this guy? Resolve in
- * favour of higher NID...
- */
- if (peerid.nid < ni->ni_nid &&
- ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
- rc = EALREADY;
- warn = "connection race resolution";
- goto failed_2;
- }
- }
-
- if (peer->ksnp_closing ||
- (active && route->ksnr_deleted)) {
- /* peer/route got closed under me */
- rc = -ESTALE;
- warn = "peer/route removed";
- goto failed_2;
- }
-
- if (!peer->ksnp_proto) {
- /*
- * Never connected before.
- * NB recv_hello may have returned EPROTO to signal my peer
- * wants a different protocol than the one I asked for.
- */
- LASSERT(list_empty(&peer->ksnp_conns));
-
- peer->ksnp_proto = conn->ksnc_proto;
- peer->ksnp_incarnation = incarnation;
- }
-
- if (peer->ksnp_proto != conn->ksnc_proto ||
- peer->ksnp_incarnation != incarnation) {
- /* Peer rebooted or I've got the wrong protocol version */
- ksocknal_close_peer_conns_locked(peer, 0, 0);
-
- peer->ksnp_proto = NULL;
- rc = ESTALE;
- warn = peer->ksnp_incarnation != incarnation ?
- "peer rebooted" :
- "wrong proto version";
- goto failed_2;
- }
-
- switch (rc) {
- default:
- LBUG();
- case 0:
- break;
- case EALREADY:
- warn = "lost conn race";
- goto failed_2;
- case EPROTO:
- warn = "retry with different protocol version";
- goto failed_2;
- }
-
- /*
- * Refuse to duplicate an existing connection, unless this is a
- * loopback connection
- */
- if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
- list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
-
- if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
- conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
- conn2->ksnc_type != conn->ksnc_type)
- continue;
-
- /*
- * Reply on a passive connection attempt so the peer
- * realises we're connected.
- */
- LASSERT(!rc);
- if (!active)
- rc = EALREADY;
-
- warn = "duplicate";
- goto failed_2;
- }
- }
-
- /*
- * If the connection created by this route didn't bind to the IP
- * address the route connected to, the connection/route matching
- * code below probably isn't going to work.
- */
- if (active &&
- route->ksnr_ipaddr != conn->ksnc_ipaddr) {
- CERROR("Route %s %pI4h connected to %pI4h\n",
- libcfs_id2str(peer->ksnp_id),
- &route->ksnr_ipaddr,
- &conn->ksnc_ipaddr);
- }
-
- /*
- * Search for a route corresponding to the new connection and
- * create an association. This allows incoming connections created
- * by routes in my peer to match my own route entries so I don't
- * continually create duplicate routes.
- */
- list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
-
- if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
- continue;
-
- ksocknal_associate_route_conn_locked(route, conn);
- break;
- }
-
- conn->ksnc_peer = peer; /* conn takes my ref on peer */
- peer->ksnp_last_alive = cfs_time_current();
- peer->ksnp_send_keepalive = 0;
- peer->ksnp_error = 0;
-
- sched = ksocknal_choose_scheduler_locked(cpt);
- sched->kss_nconns++;
- conn->ksnc_scheduler = sched;
-
- conn->ksnc_tx_last_post = cfs_time_current();
- /* Set the deadline for the outgoing HELLO to drain */
- conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
- conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- mb(); /* order with adding to peer's conn list */
-
- list_add(&conn->ksnc_list, &peer->ksnp_conns);
- ksocknal_conn_addref(conn);
-
- ksocknal_new_packet(conn, 0);
-
- conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
-
- /* Take packets blocking for this connection. */
- list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
- int match = conn->ksnc_proto->pro_match_tx(conn, tx,
- tx->tx_nonblk);
-
- if (match == SOCKNAL_MATCH_NO)
- continue;
-
- list_del(&tx->tx_list);
- ksocknal_queue_tx_locked(tx, conn);
- }
-
- write_unlock_bh(global_lock);
-
- /*
- * We've now got a new connection. Any errors from here on are just
- * like "normal" comms errors and we close the connection normally.
- * NB (a) we still have to send the reply HELLO for passive
- * connections,
- * (b) normal I/O on the conn is blocked until I setup and call the
- * socket callbacks.
- */
- CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
- libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
- &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
- conn->ksnc_port, incarnation, cpt,
- (int)(sched - &sched->kss_info->ksi_scheds[0]));
-
- if (active) {
- /* additional routes after interface exchange? */
- ksocknal_create_routes(peer, conn->ksnc_port,
- hello->kshm_ips, hello->kshm_nips);
- } else {
- hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
- hello->kshm_nips);
- rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
- }
-
- kvfree(hello);
-
- /*
- * setup the socket AFTER I've received hello (it disables
- * SO_LINGER). I might call back to the acceptor who may want
- * to send a protocol version response and then close the
- * socket; this ensures the socket only tears down after the
- * response has been sent.
- */
- if (!rc)
- rc = ksocknal_lib_setup_sock(sock);
-
- write_lock_bh(global_lock);
-
- /* NB my callbacks block while I hold ksnd_global_lock */
- ksocknal_lib_set_callback(sock, conn);
-
- if (!active)
- peer->ksnp_accepting--;
-
- write_unlock_bh(global_lock);
-
- if (rc) {
- write_lock_bh(global_lock);
- if (!conn->ksnc_closing) {
- /* could be closed by another thread */
- ksocknal_close_conn_locked(conn, rc);
- }
- write_unlock_bh(global_lock);
- } else if (!ksocknal_connsock_addref(conn)) {
- /* Allow I/O to proceed. */
- ksocknal_read_callback(conn);
- ksocknal_write_callback(conn);
- ksocknal_connsock_decref(conn);
- }
-
- ksocknal_connsock_decref(conn);
- ksocknal_conn_decref(conn);
- return rc;
-
- failed_2:
- if (!peer->ksnp_closing &&
- list_empty(&peer->ksnp_conns) &&
- list_empty(&peer->ksnp_routes)) {
- list_add(&zombies, &peer->ksnp_tx_queue);
- list_del_init(&peer->ksnp_tx_queue);
- ksocknal_unlink_peer_locked(peer);
- }
-
- write_unlock_bh(global_lock);
-
- if (warn) {
- if (rc < 0)
- CERROR("Not creating conn %s type %d: %s\n",
- libcfs_id2str(peerid), conn->ksnc_type, warn);
- else
- CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
- libcfs_id2str(peerid), conn->ksnc_type, warn);
- }
-
- if (!active) {
- if (rc > 0) {
- /*
- * Request retry by replying with CONN_NONE
- * ksnc_proto has been set already
- */
- conn->ksnc_type = SOCKLND_CONN_NONE;
- hello->kshm_nips = 0;
- ksocknal_send_hello(ni, conn, peerid.nid, hello);
- }
-
- write_lock_bh(global_lock);
- peer->ksnp_accepting--;
- write_unlock_bh(global_lock);
- }
-
- ksocknal_txlist_done(ni, &zombies, 1);
- ksocknal_peer_decref(peer);
-
-failed_1:
- kvfree(hello);
-
- kfree(conn);
-
-failed_0:
- sock_release(sock);
- return rc;
-}
-
-void
-ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
-{
- /*
- * This just does the immmediate housekeeping, and queues the
- * connection for the reaper to terminate.
- * Caller holds ksnd_global_lock exclusively in irq context
- */
- struct ksock_peer *peer = conn->ksnc_peer;
- struct ksock_route *route;
- struct ksock_conn *conn2;
- struct list_head *tmp;
-
- LASSERT(!peer->ksnp_error);
- LASSERT(!conn->ksnc_closing);
- conn->ksnc_closing = 1;
-
- /* ksnd_deathrow_conns takes over peer's ref */
- list_del(&conn->ksnc_list);
-
- route = conn->ksnc_route;
- if (route) {
- /* dissociate conn from route... */
- LASSERT(!route->ksnr_deleted);
- LASSERT(route->ksnr_connected & (1 << conn->ksnc_type));
-
- conn2 = NULL;
- list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
-
- if (conn2->ksnc_route == route &&
- conn2->ksnc_type == conn->ksnc_type)
- break;
-
- conn2 = NULL;
- }
- if (!conn2)
- route->ksnr_connected &= ~(1 << conn->ksnc_type);
-
- conn->ksnc_route = NULL;
-
- ksocknal_route_decref(route); /* drop conn's ref on route */
- }
-
- if (list_empty(&peer->ksnp_conns)) {
- /* No more connections to this peer */
-
- if (!list_empty(&peer->ksnp_tx_queue)) {
- struct ksock_tx *tx;
-
- LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
-
- /*
- * throw them to the last connection...,
- * these TXs will be send to /dev/null by scheduler
- */
- list_for_each_entry(tx, &peer->ksnp_tx_queue,
- tx_list)
- ksocknal_tx_prep(conn, tx);
-
- spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
- list_splice_init(&peer->ksnp_tx_queue,
- &conn->ksnc_tx_queue);
- spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
- }
-
- peer->ksnp_proto = NULL; /* renegotiate protocol version */
- peer->ksnp_error = error; /* stash last conn close reason */
-
- if (list_empty(&peer->ksnp_routes)) {
- /*
- * I've just closed last conn belonging to a
- * peer with no routes to it
- */
- ksocknal_unlink_peer_locked(peer);
- }
- }
-
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- list_add_tail(&conn->ksnc_list,
- &ksocknal_data.ksnd_deathrow_conns);
- wake_up(&ksocknal_data.ksnd_reaper_waitq);
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-}
-
-void
-ksocknal_peer_failed(struct ksock_peer *peer)
-{
- int notify = 0;
- unsigned long last_alive = 0;
-
- /*
- * There has been a connection failure or comms error; but I'll only
- * tell LNET I think the peer is dead if it's to another kernel and
- * there are no connections or connection attempts in existence.
- */
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- if (!(peer->ksnp_id.pid & LNET_PID_USERFLAG) &&
- list_empty(&peer->ksnp_conns) &&
- !peer->ksnp_accepting &&
- !ksocknal_find_connecting_route_locked(peer)) {
- notify = 1;
- last_alive = peer->ksnp_last_alive;
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- if (notify)
- lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
- last_alive);
-}
-
-void
-ksocknal_finalize_zcreq(struct ksock_conn *conn)
-{
- struct ksock_peer *peer = conn->ksnc_peer;
- struct ksock_tx *tx;
- struct ksock_tx *temp;
- struct ksock_tx *tmp;
- LIST_HEAD(zlist);
-
- /*
- * NB safe to finalize TXs because closing of socket will
- * abort all buffered data
- */
- LASSERT(!conn->ksnc_sock);
-
- spin_lock(&peer->ksnp_lock);
-
- list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
- if (tx->tx_conn != conn)
- continue;
-
- LASSERT(tx->tx_msg.ksm_zc_cookies[0]);
-
- tx->tx_msg.ksm_zc_cookies[0] = 0;
- tx->tx_zc_aborted = 1; /* mark it as not-acked */
- list_del(&tx->tx_zc_list);
- list_add(&tx->tx_zc_list, &zlist);
- }
-
- spin_unlock(&peer->ksnp_lock);
-
- list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) {
- list_del(&tx->tx_zc_list);
- ksocknal_tx_decref(tx);
- }
-}
-
-void
-ksocknal_terminate_conn(struct ksock_conn *conn)
-{
- /*
- * This gets called by the reaper (guaranteed thread context) to
- * disengage the socket from its callbacks and close it.
- * ksnc_refcount will eventually hit zero, and then the reaper will
- * destroy it.
- */
- struct ksock_peer *peer = conn->ksnc_peer;
- struct ksock_sched *sched = conn->ksnc_scheduler;
- int failed = 0;
-
- LASSERT(conn->ksnc_closing);
-
- /* wake up the scheduler to "send" all remaining packets to /dev/null */
- spin_lock_bh(&sched->kss_lock);
-
- /* a closing conn is always ready to tx */
- conn->ksnc_tx_ready = 1;
-
- if (!conn->ksnc_tx_scheduled &&
- !list_empty(&conn->ksnc_tx_queue)) {
- list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
-
- wake_up(&sched->kss_waitq);
- }
-
- spin_unlock_bh(&sched->kss_lock);
-
- /* serialise with callbacks */
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
-
- /*
- * OK, so this conn may not be completely disengaged from its
- * scheduler yet, but it _has_ committed to terminate...
- */
- conn->ksnc_scheduler->kss_nconns--;
-
- if (peer->ksnp_error) {
- /* peer's last conn closed in error */
- LASSERT(list_empty(&peer->ksnp_conns));
- failed = 1;
- peer->ksnp_error = 0; /* avoid multiple notifications */
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- if (failed)
- ksocknal_peer_failed(peer);
-
- /*
- * The socket is closed on the final put; either here, or in
- * ksocknal_{send,recv}msg(). Since we set up the linger2 option
- * when the connection was established, this will close the socket
- * immediately, aborting anything buffered in it. Any hung
- * zero-copy transmits will therefore complete in finite time.
- */
- ksocknal_connsock_decref(conn);
-}
-
-void
-ksocknal_queue_zombie_conn(struct ksock_conn *conn)
-{
- /* Queue the conn for the reaper to destroy */
-
- LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
- wake_up(&ksocknal_data.ksnd_reaper_waitq);
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-}
-
-void
-ksocknal_destroy_conn(struct ksock_conn *conn)
-{
- unsigned long last_rcv;
-
- /* Final coup-de-grace of the reaper */
- CDEBUG(D_NET, "connection %p\n", conn);
-
- LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
- LASSERT(!atomic_read(&conn->ksnc_sock_refcount));
- LASSERT(!conn->ksnc_sock);
- LASSERT(!conn->ksnc_route);
- LASSERT(!conn->ksnc_tx_scheduled);
- LASSERT(!conn->ksnc_rx_scheduled);
- LASSERT(list_empty(&conn->ksnc_tx_queue));
-
- /* complete current receive if any */
- switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_LNET_PAYLOAD:
- last_rcv = conn->ksnc_rx_deadline -
- *ksocknal_tunables.ksnd_timeout * HZ;
- CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %zd, left: %d, last alive is %ld secs ago\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
- &conn->ksnc_ipaddr, conn->ksnc_port,
- iov_iter_count(&conn->ksnc_rx_to), conn->ksnc_rx_nob_left,
- cfs_duration_sec(cfs_time_sub(cfs_time_current(),
- last_rcv)));
- lnet_finalize(conn->ksnc_peer->ksnp_ni,
- conn->ksnc_cookie, -EIO);
- break;
- case SOCKNAL_RX_LNET_HEADER:
- if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port,
- conn->ksnc_proto->pro_version);
- break;
- case SOCKNAL_RX_KSM_HEADER:
- if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port,
- conn->ksnc_proto->pro_version);
- break;
- case SOCKNAL_RX_SLOP:
- if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port);
- break;
- default:
- LBUG();
- break;
- }
-
- ksocknal_peer_decref(conn->ksnc_peer);
-
- kfree(conn);
-}
-
-int
-ksocknal_close_peer_conns_locked(struct ksock_peer *peer, __u32 ipaddr, int why)
-{
- struct ksock_conn *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
- int count = 0;
-
- list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
- conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
-
- if (!ipaddr || conn->ksnc_ipaddr == ipaddr) {
- count++;
- ksocknal_close_conn_locked(conn, why);
- }
- }
-
- return count;
-}
-
-int
-ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
-{
- struct ksock_peer *peer = conn->ksnc_peer;
- __u32 ipaddr = conn->ksnc_ipaddr;
- int count;
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- count = ksocknal_close_peer_conns_locked(peer, ipaddr, why);
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- return count;
-}
-
-int
-ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
-{
- struct ksock_peer *peer;
- struct list_head *ptmp;
- struct list_head *pnxt;
- int lo;
- int hi;
- int i;
- int count = 0;
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- if (id.nid != LNET_NID_ANY) {
- lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- } else {
- lo = 0;
- hi = ksocknal_data.ksnd_peer_hash_size - 1;
- }
-
- for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt,
- &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
-
- if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
- (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
- continue;
-
- count += ksocknal_close_peer_conns_locked(peer, ipaddr,
- 0);
- }
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- /* wildcards always succeed */
- if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || !ipaddr)
- return 0;
-
- if (!count)
- return -ENOENT;
- else
- return 0;
-}
-
-void
-ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
-{
- /*
- * The router is telling me she's been notified of a change in
- * gateway state....
- */
- struct lnet_process_id id = {0};
-
- id.nid = gw_nid;
- id.pid = LNET_PID_ANY;
-
- CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
- alive ? "up" : "down");
-
- if (!alive) {
- /* If the gateway crashed, close all open connections... */
- ksocknal_close_matching_conns(id, 0);
- return;
- }
-
- /*
- * ...otherwise do nothing. We can only establish new connections
- * if we have autroutes, and these connect on demand.
- */
-}
-
-void
-ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when)
-{
- int connect = 1;
- unsigned long last_alive = 0;
- unsigned long now = cfs_time_current();
- struct ksock_peer *peer = NULL;
- rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
- struct lnet_process_id id = {
- .nid = nid,
- .pid = LNET_PID_LUSTRE,
- };
-
- read_lock(glock);
-
- peer = ksocknal_find_peer_locked(ni, id);
- if (peer) {
- struct ksock_conn *conn;
- int bufnob;
-
- list_for_each_entry(conn, &peer->ksnp_conns, ksnc_list) {
- bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
-
- if (bufnob < conn->ksnc_tx_bufnob) {
- /* something got ACKed */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- peer->ksnp_last_alive = now;
- conn->ksnc_tx_bufnob = bufnob;
- }
- }
-
- last_alive = peer->ksnp_last_alive;
- if (!ksocknal_find_connectable_route_locked(peer))
- connect = 0;
- }
-
- read_unlock(glock);
-
- if (last_alive)
- *when = last_alive;
-
- CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
- libcfs_nid2str(nid), peer,
- last_alive ? cfs_duration_sec(now - last_alive) : -1,
- connect);
-
- if (!connect)
- return;
-
- ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
-
- write_lock_bh(glock);
-
- peer = ksocknal_find_peer_locked(ni, id);
- if (peer)
- ksocknal_launch_all_connections_locked(peer);
-
- write_unlock_bh(glock);
-}
-
-static void
-ksocknal_push_peer(struct ksock_peer *peer)
-{
- int index;
- int i;
- struct list_head *tmp;
- struct ksock_conn *conn;
-
- for (index = 0; ; index++) {
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- i = 0;
- conn = NULL;
-
- list_for_each(tmp, &peer->ksnp_conns) {
- if (i++ == index) {
- conn = list_entry(tmp, struct ksock_conn,
- ksnc_list);
- ksocknal_conn_addref(conn);
- break;
- }
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- if (!conn)
- break;
-
- ksocknal_lib_push_conn(conn);
- ksocknal_conn_decref(conn);
- }
-}
-
-static int ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
-{
- struct list_head *start;
- struct list_head *end;
- struct list_head *tmp;
- int rc = -ENOENT;
- unsigned int hsize = ksocknal_data.ksnd_peer_hash_size;
-
- if (id.nid == LNET_NID_ANY) {
- start = &ksocknal_data.ksnd_peers[0];
- end = &ksocknal_data.ksnd_peers[hsize - 1];
- } else {
- start = ksocknal_nid2peerlist(id.nid);
- end = ksocknal_nid2peerlist(id.nid);
- }
-
- for (tmp = start; tmp <= end; tmp++) {
- int peer_off; /* searching offset in peer hash table */
-
- for (peer_off = 0; ; peer_off++) {
- struct ksock_peer *peer;
- int i = 0;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
- list_for_each_entry(peer, tmp, ksnp_list) {
- if (!((id.nid == LNET_NID_ANY ||
- id.nid == peer->ksnp_id.nid) &&
- (id.pid == LNET_PID_ANY ||
- id.pid == peer->ksnp_id.pid)))
- continue;
-
- if (i++ == peer_off) {
- ksocknal_peer_addref(peer);
- break;
- }
- }
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- if (!i) /* no match */
- break;
-
- rc = 0;
- ksocknal_push_peer(peer);
- ksocknal_peer_decref(peer);
- }
- }
- return rc;
-}
-
-static int
-ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
-{
- struct ksock_net *net = ni->ni_data;
- struct ksock_interface *iface;
- int rc;
- int i;
- int j;
- struct list_head *ptmp;
- struct ksock_peer *peer;
- struct list_head *rtmp;
- struct ksock_route *route;
-
- if (!ipaddress || !netmask)
- return -EINVAL;
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- iface = ksocknal_ip2iface(ni, ipaddress);
- if (iface) {
- /* silently ignore dups */
- rc = 0;
- } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
- rc = -ENOSPC;
- } else {
- iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
-
- iface->ksni_ipaddr = ipaddress;
- iface->ksni_netmask = netmask;
- iface->ksni_nroutes = 0;
- iface->ksni_npeers = 0;
-
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, struct ksock_peer,
- ksnp_list);
-
- for (j = 0; j < peer->ksnp_n_passive_ips; j++)
- if (peer->ksnp_passive_ips[j] == ipaddress)
- iface->ksni_npeers++;
-
- list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, struct ksock_route,
- ksnr_list);
-
- if (route->ksnr_myipaddr == ipaddress)
- iface->ksni_nroutes++;
- }
- }
- }
-
- rc = 0;
- /*
- * NB only new connections will pay attention to the
- * new interface!
- */
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- return rc;
-}
-
-static void
-ksocknal_peer_del_interface_locked(struct ksock_peer *peer, __u32 ipaddr)
-{
- struct list_head *tmp;
- struct list_head *nxt;
- struct ksock_route *route;
- struct ksock_conn *conn;
- int i;
- int j;
-
- for (i = 0; i < peer->ksnp_n_passive_ips; i++)
- if (peer->ksnp_passive_ips[i] == ipaddr) {
- for (j = i + 1; j < peer->ksnp_n_passive_ips; j++)
- peer->ksnp_passive_ips[j - 1] =
- peer->ksnp_passive_ips[j];
- peer->ksnp_n_passive_ips--;
- break;
- }
-
- list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
-
- if (route->ksnr_myipaddr != ipaddr)
- continue;
-
- if (route->ksnr_share_count) {
- /* Manually created; keep, but unbind */
- route->ksnr_myipaddr = 0;
- } else {
- ksocknal_del_route_locked(route);
- }
- }
-
- list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
- conn = list_entry(tmp, struct ksock_conn, ksnc_list);
-
- if (conn->ksnc_myipaddr == ipaddr)
- ksocknal_close_conn_locked(conn, 0);
- }
-}
-
-static int
-ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
-{
- struct ksock_net *net = ni->ni_data;
- int rc = -ENOENT;
- struct list_head *tmp;
- struct list_head *nxt;
- struct ksock_peer *peer;
- __u32 this_ip;
- int i;
- int j;
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- for (i = 0; i < net->ksnn_ninterfaces; i++) {
- this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
-
- if (!(!ipaddress || ipaddress == this_ip))
- continue;
-
- rc = 0;
-
- for (j = i + 1; j < net->ksnn_ninterfaces; j++)
- net->ksnn_interfaces[j - 1] =
- net->ksnn_interfaces[j];
-
- net->ksnn_ninterfaces--;
-
- for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
- list_for_each_safe(tmp, nxt,
- &ksocknal_data.ksnd_peers[j]) {
- peer = list_entry(tmp, struct ksock_peer, ksnp_list);
-
- if (peer->ksnp_ni != ni)
- continue;
-
- ksocknal_peer_del_interface_locked(peer, this_ip);
- }
- }
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- return rc;
-}
-
-int
-ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
-{
- struct lnet_process_id id = {0};
- struct libcfs_ioctl_data *data = arg;
- int rc;
-
- switch (cmd) {
- case IOC_LIBCFS_GET_INTERFACE: {
- struct ksock_net *net = ni->ni_data;
- struct ksock_interface *iface;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
- rc = -ENOENT;
- } else {
- rc = 0;
- iface = &net->ksnn_interfaces[data->ioc_count];
-
- data->ioc_u32[0] = iface->ksni_ipaddr;
- data->ioc_u32[1] = iface->ksni_netmask;
- data->ioc_u32[2] = iface->ksni_npeers;
- data->ioc_u32[3] = iface->ksni_nroutes;
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return rc;
- }
-
- case IOC_LIBCFS_ADD_INTERFACE:
- return ksocknal_add_interface(ni,
- data->ioc_u32[0], /* IP address */
- data->ioc_u32[1]); /* net mask */
-
- case IOC_LIBCFS_DEL_INTERFACE:
- return ksocknal_del_interface(ni,
- data->ioc_u32[0]); /* IP address */
-
- case IOC_LIBCFS_GET_PEER: {
- __u32 myip = 0;
- __u32 ip = 0;
- int port = 0;
- int conn_count = 0;
- int share_count = 0;
-
- rc = ksocknal_get_peer_info(ni, data->ioc_count,
- &id, &myip, &ip, &port,
- &conn_count, &share_count);
- if (rc)
- return rc;
-
- data->ioc_nid = id.nid;
- data->ioc_count = share_count;
- data->ioc_u32[0] = ip;
- data->ioc_u32[1] = port;
- data->ioc_u32[2] = myip;
- data->ioc_u32[3] = conn_count;
- data->ioc_u32[4] = id.pid;
- return 0;
- }
-
- case IOC_LIBCFS_ADD_PEER:
- id.nid = data->ioc_nid;
- id.pid = LNET_PID_LUSTRE;
- return ksocknal_add_peer(ni, id,
- data->ioc_u32[0], /* IP */
- data->ioc_u32[1]); /* port */
-
- case IOC_LIBCFS_DEL_PEER:
- id.nid = data->ioc_nid;
- id.pid = LNET_PID_ANY;
- return ksocknal_del_peer(ni, id,
- data->ioc_u32[0]); /* IP */
-
- case IOC_LIBCFS_GET_CONN: {
- int txmem;
- int rxmem;
- int nagle;
- struct ksock_conn *conn;
-
- conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
- if (!conn)
- return -ENOENT;
-
- ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
-
- data->ioc_count = txmem;
- data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
- data->ioc_flags = nagle;
- data->ioc_u32[0] = conn->ksnc_ipaddr;
- data->ioc_u32[1] = conn->ksnc_port;
- data->ioc_u32[2] = conn->ksnc_myipaddr;
- data->ioc_u32[3] = conn->ksnc_type;
- data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
- data->ioc_u32[5] = rxmem;
- data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
- ksocknal_conn_decref(conn);
- return 0;
- }
-
- case IOC_LIBCFS_CLOSE_CONNECTION:
- id.nid = data->ioc_nid;
- id.pid = LNET_PID_ANY;
- return ksocknal_close_matching_conns(id,
- data->ioc_u32[0]);
-
- case IOC_LIBCFS_REGISTER_MYNID:
- /* Ignore if this is a noop */
- if (data->ioc_nid == ni->ni_nid)
- return 0;
-
- CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
- libcfs_nid2str(data->ioc_nid),
- libcfs_nid2str(ni->ni_nid));
- return -EINVAL;
-
- case IOC_LIBCFS_PUSH_CONNECTION:
- id.nid = data->ioc_nid;
- id.pid = LNET_PID_ANY;
- return ksocknal_push(ni, id);
-
- default:
- return -EINVAL;
- }
- /* not reached */
-}
-
-static void
-ksocknal_free_buffers(void)
-{
- LASSERT(!atomic_read(&ksocknal_data.ksnd_nactive_txs));
-
- if (ksocknal_data.ksnd_sched_info) {
- struct ksock_sched_info *info;
- int i;
-
- cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info)
- kfree(info->ksi_scheds);
- cfs_percpt_free(ksocknal_data.ksnd_sched_info);
- }
-
- kvfree(ksocknal_data.ksnd_peers);
-
- spin_lock(&ksocknal_data.ksnd_tx_lock);
-
- if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- struct list_head zlist;
- struct ksock_tx *tx;
- struct ksock_tx *temp;
-
- list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
- list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
-
- list_for_each_entry_safe(tx, temp, &zlist, tx_list) {
- list_del(&tx->tx_list);
- kfree(tx);
- }
- } else {
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
- }
-}
-
-static void
-ksocknal_base_shutdown(void)
-{
- struct ksock_sched_info *info;
- struct ksock_sched *sched;
- int i;
- int j;
-
- LASSERT(!ksocknal_data.ksnd_nnets);
-
- switch (ksocknal_data.ksnd_init) {
- default:
- LASSERT(0);
- /* fall through */
- case SOCKNAL_INIT_ALL:
- case SOCKNAL_INIT_DATA:
- LASSERT(ksocknal_data.ksnd_peers);
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
- LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
-
- LASSERT(list_empty(&ksocknal_data.ksnd_nets));
- LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
- LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
- LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
- LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
-
- if (ksocknal_data.ksnd_sched_info) {
- cfs_percpt_for_each(info, i,
- ksocknal_data.ksnd_sched_info) {
- if (!info->ksi_scheds)
- continue;
-
- for (j = 0; j < info->ksi_nthreads_max; j++) {
- sched = &info->ksi_scheds[j];
- LASSERT(list_empty(
- &sched->kss_tx_conns));
- LASSERT(list_empty(
- &sched->kss_rx_conns));
- LASSERT(list_empty(
- &sched->kss_zombie_noop_txs));
- LASSERT(!sched->kss_nconns);
- }
- }
- }
-
- /* flag threads to terminate; wake and wait for them to die */
- ksocknal_data.ksnd_shuttingdown = 1;
- wake_up_all(&ksocknal_data.ksnd_connd_waitq);
- wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
-
- if (ksocknal_data.ksnd_sched_info) {
- cfs_percpt_for_each(info, i,
- ksocknal_data.ksnd_sched_info) {
- if (!info->ksi_scheds)
- continue;
-
- for (j = 0; j < info->ksi_nthreads_max; j++) {
- sched = &info->ksi_scheds[j];
- wake_up_all(&sched->kss_waitq);
- }
- }
- }
-
- i = 4;
- read_lock(&ksocknal_data.ksnd_global_lock);
- while (ksocknal_data.ksnd_nthreads) {
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "waiting for %d threads to terminate\n",
- ksocknal_data.ksnd_nthreads);
- read_unlock(&ksocknal_data.ksnd_global_lock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
- read_lock(&ksocknal_data.ksnd_global_lock);
- }
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_free_buffers();
-
- ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
- break;
- }
-
- module_put(THIS_MODULE);
-}
-
-static __u64
-ksocknal_new_incarnation(void)
-{
- /* The incarnation number is the time this module loaded and it
- * identifies this particular instance of the socknal.
- */
- return ktime_get_ns();
-}
-
-static int
-ksocknal_base_startup(void)
-{
- struct ksock_sched_info *info;
- int rc;
- int i;
-
- LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
- LASSERT(!ksocknal_data.ksnd_nnets);
-
- memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
-
- ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
- ksocknal_data.ksnd_peers = kvmalloc_array(ksocknal_data.ksnd_peer_hash_size,
- sizeof(struct list_head),
- GFP_KERNEL);
- if (!ksocknal_data.ksnd_peers)
- return -ENOMEM;
-
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
- INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
-
- rwlock_init(&ksocknal_data.ksnd_global_lock);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
-
- spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
- init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
-
- spin_lock_init(&ksocknal_data.ksnd_connd_lock);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
- init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
-
- spin_lock_init(&ksocknal_data.ksnd_tx_lock);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
-
- /* NB memset above zeros whole of ksocknal_data */
-
- /* flag lists/ptrs/locks initialised */
- ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
- try_module_get(THIS_MODULE);
-
- ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*info));
- if (!ksocknal_data.ksnd_sched_info)
- goto failed;
-
- cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- struct ksock_sched *sched;
- int nthrs;
-
- nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
- if (*ksocknal_tunables.ksnd_nscheds > 0) {
- nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
- } else {
- /*
- * max to half of CPUs, assume another half should be
- * reserved for upper layer modules
- */
- nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
- }
-
- info->ksi_nthreads_max = nthrs;
- info->ksi_cpt = i;
-
- info->ksi_scheds = kzalloc_cpt(info->ksi_nthreads_max * sizeof(*sched),
- GFP_NOFS, i);
- if (!info->ksi_scheds)
- goto failed;
-
- for (; nthrs > 0; nthrs--) {
- sched = &info->ksi_scheds[nthrs - 1];
-
- sched->kss_info = info;
- spin_lock_init(&sched->kss_lock);
- INIT_LIST_HEAD(&sched->kss_rx_conns);
- INIT_LIST_HEAD(&sched->kss_tx_conns);
- INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
- init_waitqueue_head(&sched->kss_waitq);
- }
- }
-
- ksocknal_data.ksnd_connd_starting = 0;
- ksocknal_data.ksnd_connd_failed_stamp = 0;
- ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
- /*
- * must have at least 2 connds to remain responsive to accepts while
- * connecting
- */
- if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
- *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
-
- if (*ksocknal_tunables.ksnd_nconnds_max <
- *ksocknal_tunables.ksnd_nconnds) {
- ksocknal_tunables.ksnd_nconnds_max =
- ksocknal_tunables.ksnd_nconnds;
- }
-
- for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
- char name[16];
-
- spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- ksocknal_data.ksnd_connd_starting++;
- spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-
- snprintf(name, sizeof(name), "socknal_cd%02d", i);
- rc = ksocknal_thread_start(ksocknal_connd,
- (void *)((uintptr_t)i), name);
- if (rc) {
- spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- ksocknal_data.ksnd_connd_starting--;
- spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
- CERROR("Can't spawn socknal connd: %d\n", rc);
- goto failed;
- }
- }
-
- rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
- if (rc) {
- CERROR("Can't spawn socknal reaper: %d\n", rc);
- goto failed;
- }
-
- /* flag everything initialised */
- ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
-
- return 0;
-
- failed:
- ksocknal_base_shutdown();
- return -ENETDOWN;
-}
-
-static void
-ksocknal_debug_peerhash(struct lnet_ni *ni)
-{
- struct ksock_peer *peer = NULL;
- struct list_head *tmp;
- int i;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(tmp, struct ksock_peer, ksnp_list);
-
- if (peer->ksnp_ni == ni)
- break;
-
- peer = NULL;
- }
- }
-
- if (peer) {
- struct ksock_route *route;
- struct ksock_conn *conn;
-
- CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
- libcfs_id2str(peer->ksnp_id),
- atomic_read(&peer->ksnp_refcount),
- peer->ksnp_sharecount, peer->ksnp_closing,
- peer->ksnp_accepting, peer->ksnp_error,
- peer->ksnp_zc_next_cookie,
- !list_empty(&peer->ksnp_tx_queue),
- !list_empty(&peer->ksnp_zc_req_list));
-
- list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
- CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
- atomic_read(&route->ksnr_refcount),
- route->ksnr_scheduled, route->ksnr_connecting,
- route->ksnr_connected, route->ksnr_deleted);
- }
-
- list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, struct ksock_conn, ksnc_list);
- CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
- atomic_read(&conn->ksnc_conn_refcount),
- atomic_read(&conn->ksnc_sock_refcount),
- conn->ksnc_type, conn->ksnc_closing);
- }
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-}
-
-void
-ksocknal_shutdown(struct lnet_ni *ni)
-{
- struct ksock_net *net = ni->ni_data;
- int i;
- struct lnet_process_id anyid = {0};
-
- anyid.nid = LNET_NID_ANY;
- anyid.pid = LNET_PID_ANY;
-
- LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
- LASSERT(ksocknal_data.ksnd_nnets > 0);
-
- spin_lock_bh(&net->ksnn_lock);
- net->ksnn_shutdown = 1; /* prevent new peers */
- spin_unlock_bh(&net->ksnn_lock);
-
- /* Delete all peers */
- ksocknal_del_peer(ni, anyid, 0);
-
- /* Wait for all peer state to clean up */
- i = 2;
- spin_lock_bh(&net->ksnn_lock);
- while (net->ksnn_npeers) {
- spin_unlock_bh(&net->ksnn_lock);
-
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "waiting for %d peers to disconnect\n",
- net->ksnn_npeers);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
-
- ksocknal_debug_peerhash(ni);
-
- spin_lock_bh(&net->ksnn_lock);
- }
- spin_unlock_bh(&net->ksnn_lock);
-
- for (i = 0; i < net->ksnn_ninterfaces; i++) {
- LASSERT(!net->ksnn_interfaces[i].ksni_npeers);
- LASSERT(!net->ksnn_interfaces[i].ksni_nroutes);
- }
-
- list_del(&net->ksnn_list);
- kfree(net);
-
- ksocknal_data.ksnd_nnets--;
- if (!ksocknal_data.ksnd_nnets)
- ksocknal_base_shutdown();
-}
-
-static int
-ksocknal_enumerate_interfaces(struct ksock_net *net)
-{
- char **names;
- int i;
- int j;
- int rc;
- int n;
-
- n = lnet_ipif_enumerate(&names);
- if (n <= 0) {
- CERROR("Can't enumerate interfaces: %d\n", n);
- return n;
- }
-
- for (i = j = 0; i < n; i++) {
- int up;
- __u32 ip;
- __u32 mask;
-
- if (!strcmp(names[i], "lo")) /* skip the loopback IF */
- continue;
-
- rc = lnet_ipif_query(names[i], &up, &ip, &mask);
- if (rc) {
- CWARN("Can't get interface %s info: %d\n",
- names[i], rc);
- continue;
- }
-
- if (!up) {
- CWARN("Ignoring interface %s (down)\n",
- names[i]);
- continue;
- }
-
- if (j == LNET_MAX_INTERFACES) {
- CWARN("Ignoring interface %s (too many interfaces)\n",
- names[i]);
- continue;
- }
-
- net->ksnn_interfaces[j].ksni_ipaddr = ip;
- net->ksnn_interfaces[j].ksni_netmask = mask;
- strlcpy(net->ksnn_interfaces[j].ksni_name,
- names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
- j++;
- }
-
- lnet_ipif_free_enumeration(names, n);
-
- if (!j)
- CERROR("Can't find any usable interfaces\n");
-
- return j;
-}
-
-static int
-ksocknal_search_new_ipif(struct ksock_net *net)
-{
- int new_ipif = 0;
- int i;
-
- for (i = 0; i < net->ksnn_ninterfaces; i++) {
- char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
- char *colon = strchr(ifnam, ':');
- int found = 0;
- struct ksock_net *tmp;
- int j;
-
- if (colon) /* ignore alias device */
- *colon = 0;
-
- list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) {
- for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
- char *ifnam2 =
- &tmp->ksnn_interfaces[j].ksni_name[0];
- char *colon2 = strchr(ifnam2, ':');
-
- if (colon2)
- *colon2 = 0;
-
- found = !strcmp(ifnam, ifnam2);
- if (colon2)
- *colon2 = ':';
- }
- if (found)
- break;
- }
-
- new_ipif += !found;
- if (colon)
- *colon = ':';
- }
-
- return new_ipif;
-}
-
-static int
-ksocknal_start_schedulers(struct ksock_sched_info *info)
-{
- int nthrs;
- int rc = 0;
- int i;
-
- if (!info->ksi_nthreads) {
- if (*ksocknal_tunables.ksnd_nscheds > 0) {
- nthrs = info->ksi_nthreads_max;
- } else {
- nthrs = cfs_cpt_weight(lnet_cpt_table(),
- info->ksi_cpt);
- nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
- nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
- }
- nthrs = min(nthrs, info->ksi_nthreads_max);
- } else {
- LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
- /* increase two threads if there is new interface */
- nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
- }
-
- for (i = 0; i < nthrs; i++) {
- long id;
- char name[20];
- struct ksock_sched *sched;
-
- id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
- sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
- snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
- info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
-
- rc = ksocknal_thread_start(ksocknal_scheduler,
- (void *)id, name);
- if (!rc)
- continue;
-
- CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
- info->ksi_cpt, info->ksi_nthreads + i, rc);
- break;
- }
-
- info->ksi_nthreads += i;
- return rc;
-}
-
-static int
-ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
-{
- int newif = ksocknal_search_new_ipif(net);
- int rc;
- int i;
-
- LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
-
- for (i = 0; i < ncpts; i++) {
- struct ksock_sched_info *info;
- int cpt = !cpts ? i : cpts[i];
-
- LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
- info = ksocknal_data.ksnd_sched_info[cpt];
-
- if (!newif && info->ksi_nthreads > 0)
- continue;
-
- rc = ksocknal_start_schedulers(info);
- if (rc)
- return rc;
- }
- return 0;
-}
-
-int
-ksocknal_startup(struct lnet_ni *ni)
-{
- struct ksock_net *net;
- int rc;
- int i;
-
- LASSERT(ni->ni_lnd == &the_ksocklnd);
-
- if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
- rc = ksocknal_base_startup();
- if (rc)
- return rc;
- }
-
- net = kzalloc(sizeof(*net), GFP_NOFS);
- if (!net)
- goto fail_0;
-
- spin_lock_init(&net->ksnn_lock);
- net->ksnn_incarnation = ksocknal_new_incarnation();
- ni->ni_data = net;
- ni->ni_peertimeout = *ksocknal_tunables.ksnd_peertimeout;
- ni->ni_maxtxcredits = *ksocknal_tunables.ksnd_credits;
- ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peertxcredits;
- ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
-
- if (!ni->ni_interfaces[0]) {
- rc = ksocknal_enumerate_interfaces(net);
- if (rc <= 0)
- goto fail_1;
-
- net->ksnn_ninterfaces = 1;
- } else {
- for (i = 0; i < LNET_MAX_INTERFACES; i++) {
- int up;
-
- if (!ni->ni_interfaces[i])
- break;
-
- rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
- &net->ksnn_interfaces[i].ksni_ipaddr,
- &net->ksnn_interfaces[i].ksni_netmask);
-
- if (rc) {
- CERROR("Can't get interface %s info: %d\n",
- ni->ni_interfaces[i], rc);
- goto fail_1;
- }
-
- if (!up) {
- CERROR("Interface %s is down\n",
- ni->ni_interfaces[i]);
- goto fail_1;
- }
-
- strlcpy(net->ksnn_interfaces[i].ksni_name,
- ni->ni_interfaces[i],
- sizeof(net->ksnn_interfaces[i].ksni_name));
- }
- net->ksnn_ninterfaces = i;
- }
-
- /* call it before add it to ksocknal_data.ksnd_nets */
- rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
- if (rc)
- goto fail_1;
-
- ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
- net->ksnn_interfaces[0].ksni_ipaddr);
- list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
-
- ksocknal_data.ksnd_nnets++;
-
- return 0;
-
- fail_1:
- kfree(net);
- fail_0:
- if (!ksocknal_data.ksnd_nnets)
- ksocknal_base_shutdown();
-
- return -ENETDOWN;
-}
-
-static void __exit ksocklnd_exit(void)
-{
- lnet_unregister_lnd(&the_ksocklnd);
-}
-
-static int __init ksocklnd_init(void)
-{
- int rc;
-
- /* check ksnr_connected/connecting field large enough */
- BUILD_BUG_ON(SOCKLND_CONN_NTYPES > 4);
- BUILD_BUG_ON(SOCKLND_CONN_ACK != SOCKLND_CONN_BULK_IN);
-
- /* initialize the_ksocklnd */
- the_ksocklnd.lnd_type = SOCKLND;
- the_ksocklnd.lnd_startup = ksocknal_startup;
- the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
- the_ksocklnd.lnd_ctl = ksocknal_ctl;
- the_ksocklnd.lnd_send = ksocknal_send;
- the_ksocklnd.lnd_recv = ksocknal_recv;
- the_ksocklnd.lnd_notify = ksocknal_notify;
- the_ksocklnd.lnd_query = ksocknal_query;
- the_ksocklnd.lnd_accept = ksocknal_accept;
-
- rc = ksocknal_tunables_init();
- if (rc)
- return rc;
-
- lnet_register_lnd(&the_ksocklnd);
-
- return 0;
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
-MODULE_VERSION("2.7.0");
-MODULE_LICENSE("GPL");
-
-module_init(ksocklnd_init);
-module_exit(ksocklnd_exit);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
deleted file mode 100644
index 570f54ed57b1..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ /dev/null
@@ -1,705 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- *
- * Author: Zach Brown <zab@zabbo.net>
- * Author: Peter J. Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Eric Barton <eric@bartonsoftware.com>
- *
- * This file is part of Lustre, http://www.lustre.org
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _SOCKLND_SOCKLND_H_
-#define _SOCKLND_SOCKLND_H_
-
-#define DEBUG_PORTAL_ALLOC
-#define DEBUG_SUBSYSTEM S_LND
-
-#include <linux/crc32.h>
-#include <linux/errno.h>
-#include <linux/if.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/kmod.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/syscalls.h>
-#include <linux/sysctl.h>
-#include <linux/uio.h>
-#include <linux/unistd.h>
-#include <asm/irq.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/lnet/lib-lnet.h>
-#include <linux/lnet/socklnd.h>
-
-/* assume one thread for each connection type */
-#define SOCKNAL_NSCHEDS 3
-#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1)
-
-#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */
-#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
-#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
-#define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */
-
-#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */
-#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */
-
-#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */
-
-/*
- * risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
- * no risk if we're not running on a CONFIG_HIGHMEM platform.
- */
-#ifdef CONFIG_HIGHMEM
-# define SOCKNAL_RISK_KMAP_DEADLOCK 0
-#else
-# define SOCKNAL_RISK_KMAP_DEADLOCK 1
-#endif
-
-struct ksock_sched_info;
-
-struct ksock_sched { /* per scheduler state */
- spinlock_t kss_lock; /* serialise */
- struct list_head kss_rx_conns; /* conn waiting to be read */
- struct list_head kss_tx_conns; /* conn waiting to be written */
- struct list_head kss_zombie_noop_txs; /* zombie noop tx list */
- wait_queue_head_t kss_waitq; /* where scheduler sleeps */
- int kss_nconns; /* # connections assigned to
- * this scheduler
- */
- struct ksock_sched_info *kss_info; /* owner of it */
-};
-
-struct ksock_sched_info {
- int ksi_nthreads_max; /* max allowed threads */
- int ksi_nthreads; /* number of threads */
- int ksi_cpt; /* CPT id */
- struct ksock_sched *ksi_scheds; /* array of schedulers */
-};
-
-#define KSOCK_CPT_SHIFT 16
-#define KSOCK_THREAD_ID(cpt, sid) (((cpt) << KSOCK_CPT_SHIFT) | (sid))
-#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT)
-#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
-
-struct ksock_interface { /* in-use interface */
- __u32 ksni_ipaddr; /* interface's IP address */
- __u32 ksni_netmask; /* interface's network mask */
- int ksni_nroutes; /* # routes using (active) */
- int ksni_npeers; /* # peers using (passive) */
- char ksni_name[IFNAMSIZ]; /* interface name */
-};
-
-struct ksock_tunables {
- int *ksnd_timeout; /* "stuck" socket timeout
- * (seconds)
- */
- int *ksnd_nscheds; /* # scheduler threads in each
- * pool while starting
- */
- int *ksnd_nconnds; /* # connection daemons */
- int *ksnd_nconnds_max; /* max # connection daemons */
- int *ksnd_min_reconnectms; /* first connection retry after
- * (ms)...
- */
- int *ksnd_max_reconnectms; /* ...exponentially increasing to
- * this
- */
- int *ksnd_eager_ack; /* make TCP ack eagerly? */
- int *ksnd_typed_conns; /* drive sockets by type? */
- int *ksnd_min_bulk; /* smallest "large" message */
- int *ksnd_tx_buffer_size; /* socket tx buffer size */
- int *ksnd_rx_buffer_size; /* socket rx buffer size */
- int *ksnd_nagle; /* enable NAGLE? */
- int *ksnd_round_robin; /* round robin for multiple
- * interfaces
- */
- int *ksnd_keepalive; /* # secs for sending keepalive
- * NOOP
- */
- int *ksnd_keepalive_idle; /* # idle secs before 1st probe
- */
- int *ksnd_keepalive_count; /* # probes */
- int *ksnd_keepalive_intvl; /* time between probes */
- int *ksnd_credits; /* # concurrent sends */
- int *ksnd_peertxcredits; /* # concurrent sends to 1 peer
- */
- int *ksnd_peerrtrcredits; /* # per-peer router buffer
- * credits
- */
- int *ksnd_peertimeout; /* seconds to consider peer dead
- */
- int *ksnd_enable_csum; /* enable check sum */
- int *ksnd_inject_csum_error; /* set non-zero to inject
- * checksum error
- */
- int *ksnd_nonblk_zcack; /* always send zc-ack on
- * non-blocking connection
- */
- unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload
- * size
- */
- int *ksnd_zc_recv; /* enable ZC receive (for
- * Chelsio TOE)
- */
- int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to
- * enable ZC receive
- */
-};
-
-struct ksock_net {
- __u64 ksnn_incarnation; /* my epoch */
- spinlock_t ksnn_lock; /* serialise */
- struct list_head ksnn_list; /* chain on global list */
- int ksnn_npeers; /* # peers */
- int ksnn_shutdown; /* shutting down? */
- int ksnn_ninterfaces; /* IP interfaces */
- struct ksock_interface ksnn_interfaces[LNET_MAX_INTERFACES];
-};
-
-/** connd timeout */
-#define SOCKNAL_CONND_TIMEOUT 120
-/** reserved thread for accepting & creating new connd */
-#define SOCKNAL_CONND_RESV 1
-
-struct ksock_nal_data {
- int ksnd_init; /* initialisation state
- */
- int ksnd_nnets; /* # networks set up */
- struct list_head ksnd_nets; /* list of nets */
- rwlock_t ksnd_global_lock; /* stabilize peer/conn
- * ops
- */
- struct list_head *ksnd_peers; /* hash table of all my
- * known peers
- */
- int ksnd_peer_hash_size; /* size of ksnd_peers */
-
- int ksnd_nthreads; /* # live threads */
- int ksnd_shuttingdown; /* tell threads to exit
- */
- struct ksock_sched_info **ksnd_sched_info; /* schedulers info */
-
- atomic_t ksnd_nactive_txs; /* #active txs */
-
- struct list_head ksnd_deathrow_conns; /* conns to close:
- * reaper_lock
- */
- struct list_head ksnd_zombie_conns; /* conns to free:
- * reaper_lock
- */
- struct list_head ksnd_enomem_conns; /* conns to retry:
- * reaper_lock
- */
- wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
- unsigned long ksnd_reaper_waketime; /* when reaper will wake
- */
- spinlock_t ksnd_reaper_lock; /* serialise */
-
- int ksnd_enomem_tx; /* test ENOMEM sender */
- int ksnd_stall_tx; /* test sluggish sender
- */
- int ksnd_stall_rx; /* test sluggish
- * receiver
- */
- struct list_head ksnd_connd_connreqs; /* incoming connection
- * requests
- */
- struct list_head ksnd_connd_routes; /* routes waiting to be
- * connected
- */
- wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
- int ksnd_connd_connecting; /* # connds connecting
- */
- time64_t ksnd_connd_failed_stamp;/* time stamp of the
- * last failed
- * connecting attempt
- */
- time64_t ksnd_connd_starting_stamp;/* time stamp of the
- * last starting connd
- */
- unsigned int ksnd_connd_starting; /* # starting connd */
- unsigned int ksnd_connd_running; /* # running connd */
- spinlock_t ksnd_connd_lock; /* serialise */
-
- struct list_head ksnd_idle_noop_txs; /* list head for freed
- * noop tx
- */
- spinlock_t ksnd_tx_lock; /* serialise, g_lock
- * unsafe
- */
-};
-
-#define SOCKNAL_INIT_NOTHING 0
-#define SOCKNAL_INIT_DATA 1
-#define SOCKNAL_INIT_ALL 2
-
-/*
- * A packet just assembled for transmission is represented by 1 or more
- * struct iovec fragments (the first frag contains the portals header),
- * followed by 0 or more struct bio_vec fragments.
- *
- * On the receive side, initially 1 struct iovec fragment is posted for
- * receive (the header). Once the header has been received, the payload is
- * received into either struct iovec or struct bio_vec fragments, depending on
- * what the header matched or whether the message needs forwarding.
- */
-struct ksock_conn; /* forward ref */
-struct ksock_peer; /* forward ref */
-struct ksock_route; /* forward ref */
-struct ksock_proto; /* forward ref */
-
-struct ksock_tx { /* transmit packet */
- struct list_head tx_list; /* queue on conn for transmission etc
- */
- struct list_head tx_zc_list; /* queue on peer for ZC request */
- atomic_t tx_refcount; /* tx reference count */
- int tx_nob; /* # packet bytes */
- int tx_resid; /* residual bytes */
- int tx_niov; /* # packet iovec frags */
- struct kvec *tx_iov; /* packet iovec frags */
- int tx_nkiov; /* # packet page frags */
- unsigned short tx_zc_aborted; /* aborted ZC request */
- unsigned short tx_zc_capable:1; /* payload is large enough for ZC */
- unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */
- unsigned short tx_nonblk:1; /* it's a non-blocking ACK */
- struct bio_vec *tx_kiov; /* packet page frags */
- struct ksock_conn *tx_conn; /* owning conn */
- struct lnet_msg *tx_lnetmsg; /* lnet message for lnet_finalize()
- */
- unsigned long tx_deadline; /* when (in jiffies) tx times out */
- struct ksock_msg tx_msg; /* socklnd message buffer */
- int tx_desc_size; /* size of this descriptor */
- union {
- struct {
- struct kvec iov; /* virt hdr */
- struct bio_vec kiov[0]; /* paged payload */
- } paged;
- struct {
- struct kvec iov[1]; /* virt hdr + payload */
- } virt;
- } tx_frags;
-};
-
-#define KSOCK_NOOP_TX_SIZE (offsetof(struct ksock_tx, tx_frags.paged.kiov[0]))
-
-/* network zero copy callback descriptor embedded in struct ksock_tx */
-
-#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
-#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */
-#define SOCKNAL_RX_PARSE 3 /* Calling lnet_parse() */
-#define SOCKNAL_RX_PARSE_WAIT 4 /* waiting to be told to read the body */
-#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */
-#define SOCKNAL_RX_SLOP 6 /* skipping body */
-
-struct ksock_conn {
- struct ksock_peer *ksnc_peer; /* owning peer */
- struct ksock_route *ksnc_route; /* owning route */
- struct list_head ksnc_list; /* stash on peer's conn list */
- struct socket *ksnc_sock; /* actual socket */
- void *ksnc_saved_data_ready; /* socket's original
- * data_ready() callback
- */
- void *ksnc_saved_write_space; /* socket's original
- * write_space() callback
- */
- atomic_t ksnc_conn_refcount;/* conn refcount */
- atomic_t ksnc_sock_refcount;/* sock refcount */
- struct ksock_sched *ksnc_scheduler; /* who schedules this connection
- */
- __u32 ksnc_myipaddr; /* my IP */
- __u32 ksnc_ipaddr; /* peer's IP */
- int ksnc_port; /* peer's port */
- signed int ksnc_type:3; /* type of connection, should be
- * signed value
- */
- unsigned int ksnc_closing:1; /* being shut down */
- unsigned int ksnc_flip:1; /* flip or not, only for V2.x */
- unsigned int ksnc_zc_capable:1; /* enable to ZC */
- struct ksock_proto *ksnc_proto; /* protocol for the connection */
-
- /* reader */
- struct list_head ksnc_rx_list; /* where I enq waiting input or a
- * forwarding descriptor
- */
- unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times
- * out
- */
- __u8 ksnc_rx_started; /* started receiving a message */
- __u8 ksnc_rx_ready; /* data ready to read */
- __u8 ksnc_rx_scheduled; /* being progressed */
- __u8 ksnc_rx_state; /* what is being read */
- int ksnc_rx_nob_left; /* # bytes to next hdr/body */
- struct iov_iter ksnc_rx_to; /* copy destination */
- struct kvec ksnc_rx_iov_space[LNET_MAX_IOV]; /* space for frag descriptors */
- __u32 ksnc_rx_csum; /* partial checksum for incoming
- * data
- */
- void *ksnc_cookie; /* rx lnet_finalize passthru arg
- */
- struct ksock_msg ksnc_msg; /* incoming message buffer:
- * V2.x message takes the
- * whole struct
- * V1.x message is a bare
- * struct lnet_hdr, it's stored in
- * ksnc_msg.ksm_u.lnetmsg
- */
- /* WRITER */
- struct list_head ksnc_tx_list; /* where I enq waiting for output
- * space
- */
- struct list_head ksnc_tx_queue; /* packets waiting to be sent */
- struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet
- * message or ZC-ACK
- */
- unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out
- */
- int ksnc_tx_bufnob; /* send buffer marker */
- atomic_t ksnc_tx_nob; /* # bytes queued */
- int ksnc_tx_ready; /* write space */
- int ksnc_tx_scheduled; /* being progressed */
- unsigned long ksnc_tx_last_post; /* time stamp of the last posted
- * TX
- */
-};
-
-struct ksock_route {
- struct list_head ksnr_list; /* chain on peer route list */
- struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
- struct ksock_peer *ksnr_peer; /* owning peer */
- atomic_t ksnr_refcount; /* # users */
- unsigned long ksnr_timeout; /* when (in jiffies) reconnection
- * can happen next
- */
- long ksnr_retry_interval; /* how long between retries */
- __u32 ksnr_myipaddr; /* my IP */
- __u32 ksnr_ipaddr; /* IP address to connect to */
- int ksnr_port; /* port to connect to */
- unsigned int ksnr_scheduled:1; /* scheduled for attention */
- unsigned int ksnr_connecting:1; /* connection establishment in
- * progress
- */
- unsigned int ksnr_connected:4; /* connections established by
- * type
- */
- unsigned int ksnr_deleted:1; /* been removed from peer? */
- unsigned int ksnr_share_count; /* created explicitly? */
- int ksnr_conn_count; /* # conns established by this
- * route
- */
-};
-
-#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
-
-struct ksock_peer {
- struct list_head ksnp_list; /* stash on global peer list */
- unsigned long ksnp_last_alive; /* when (in jiffies) I was last
- * alive
- */
- struct lnet_process_id ksnp_id; /* who's on the other end(s) */
- atomic_t ksnp_refcount; /* # users */
- int ksnp_sharecount; /* lconf usage counter */
- int ksnp_closing; /* being closed */
- int ksnp_accepting; /* # passive connections pending
- */
- int ksnp_error; /* errno on closing last conn */
- __u64 ksnp_zc_next_cookie; /* ZC completion cookie */
- __u64 ksnp_incarnation; /* latest known peer incarnation
- */
- struct ksock_proto *ksnp_proto; /* latest known peer protocol */
- struct list_head ksnp_conns; /* all active connections */
- struct list_head ksnp_routes; /* routes */
- struct list_head ksnp_tx_queue; /* waiting packets */
- spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
- struct list_head ksnp_zc_req_list; /* zero copy requests wait for
- * ACK
- */
- unsigned long ksnp_send_keepalive; /* time to send keepalive */
- struct lnet_ni *ksnp_ni; /* which network */
- int ksnp_n_passive_ips; /* # of... */
-
- /* preferred local interfaces */
- __u32 ksnp_passive_ips[LNET_MAX_INTERFACES];
-};
-
-struct ksock_connreq {
- struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */
- struct lnet_ni *ksncr_ni; /* chosen NI */
- struct socket *ksncr_sock; /* accepted socket */
-};
-
-extern struct ksock_nal_data ksocknal_data;
-extern struct ksock_tunables ksocknal_tunables;
-
-#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
-#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
-#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not
- * preferred
- */
-
-struct ksock_proto {
- /* version number of protocol */
- int pro_version;
-
- /* handshake function */
- int (*pro_send_hello)(struct ksock_conn *, struct ksock_hello_msg *);
-
- /* handshake function */
- int (*pro_recv_hello)(struct ksock_conn *, struct ksock_hello_msg *, int);
-
- /* message pack */
- void (*pro_pack)(struct ksock_tx *);
-
- /* message unpack */
- void (*pro_unpack)(struct ksock_msg *);
-
- /* queue tx on the connection */
- struct ksock_tx *(*pro_queue_tx_msg)(struct ksock_conn *, struct ksock_tx *);
-
- /* queue ZC ack on the connection */
- int (*pro_queue_tx_zcack)(struct ksock_conn *, struct ksock_tx *, __u64);
-
- /* handle ZC request */
- int (*pro_handle_zcreq)(struct ksock_conn *, __u64, int);
-
- /* handle ZC ACK */
- int (*pro_handle_zcack)(struct ksock_conn *, __u64, __u64);
-
- /*
- * msg type matches the connection type:
- * return value:
- * return MATCH_NO : no
- * return MATCH_YES : matching type
- * return MATCH_MAY : can be backup
- */
- int (*pro_match_tx)(struct ksock_conn *, struct ksock_tx *, int);
-};
-
-extern struct ksock_proto ksocknal_protocol_v1x;
-extern struct ksock_proto ksocknal_protocol_v2x;
-extern struct ksock_proto ksocknal_protocol_v3x;
-
-#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR
-#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR
-#define KSOCK_PROTO_V1 KSOCK_PROTO_V1_MAJOR
-
-#ifndef CPU_MASK_NONE
-#define CPU_MASK_NONE 0UL
-#endif
-
-static inline int
-ksocknal_route_mask(void)
-{
- if (!*ksocknal_tunables.ksnd_typed_conns)
- return (1 << SOCKLND_CONN_ANY);
-
- return ((1 << SOCKLND_CONN_CONTROL) |
- (1 << SOCKLND_CONN_BULK_IN) |
- (1 << SOCKLND_CONN_BULK_OUT));
-}
-
-static inline struct list_head *
-ksocknal_nid2peerlist(lnet_nid_t nid)
-{
- unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
-
- return &ksocknal_data.ksnd_peers[hash];
-}
-
-static inline void
-ksocknal_conn_addref(struct ksock_conn *conn)
-{
- LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
- atomic_inc(&conn->ksnc_conn_refcount);
-}
-
-void ksocknal_queue_zombie_conn(struct ksock_conn *conn);
-void ksocknal_finalize_zcreq(struct ksock_conn *conn);
-
-static inline void
-ksocknal_conn_decref(struct ksock_conn *conn)
-{
- LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
- if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
- ksocknal_queue_zombie_conn(conn);
-}
-
-static inline int
-ksocknal_connsock_addref(struct ksock_conn *conn)
-{
- int rc = -ESHUTDOWN;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
- if (!conn->ksnc_closing) {
- LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
- atomic_inc(&conn->ksnc_sock_refcount);
- rc = 0;
- }
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- return rc;
-}
-
-static inline void
-ksocknal_connsock_decref(struct ksock_conn *conn)
-{
- LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
- if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
- LASSERT(conn->ksnc_closing);
- sock_release(conn->ksnc_sock);
- conn->ksnc_sock = NULL;
- ksocknal_finalize_zcreq(conn);
- }
-}
-
-static inline void
-ksocknal_tx_addref(struct ksock_tx *tx)
-{
- LASSERT(atomic_read(&tx->tx_refcount) > 0);
- atomic_inc(&tx->tx_refcount);
-}
-
-void ksocknal_tx_prep(struct ksock_conn *, struct ksock_tx *tx);
-void ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx);
-
-static inline void
-ksocknal_tx_decref(struct ksock_tx *tx)
-{
- LASSERT(atomic_read(&tx->tx_refcount) > 0);
- if (atomic_dec_and_test(&tx->tx_refcount))
- ksocknal_tx_done(NULL, tx);
-}
-
-static inline void
-ksocknal_route_addref(struct ksock_route *route)
-{
- LASSERT(atomic_read(&route->ksnr_refcount) > 0);
- atomic_inc(&route->ksnr_refcount);
-}
-
-void ksocknal_destroy_route(struct ksock_route *route);
-
-static inline void
-ksocknal_route_decref(struct ksock_route *route)
-{
- LASSERT(atomic_read(&route->ksnr_refcount) > 0);
- if (atomic_dec_and_test(&route->ksnr_refcount))
- ksocknal_destroy_route(route);
-}
-
-static inline void
-ksocknal_peer_addref(struct ksock_peer *peer)
-{
- LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
- atomic_inc(&peer->ksnp_refcount);
-}
-
-void ksocknal_destroy_peer(struct ksock_peer *peer);
-
-static inline void
-ksocknal_peer_decref(struct ksock_peer *peer)
-{
- LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
- if (atomic_dec_and_test(&peer->ksnp_refcount))
- ksocknal_destroy_peer(peer);
-}
-
-int ksocknal_startup(struct lnet_ni *ni);
-void ksocknal_shutdown(struct lnet_ni *ni);
-int ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg);
-int ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
-int ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
- int delayed, struct iov_iter *to, unsigned int rlen);
-int ksocknal_accept(struct lnet_ni *ni, struct socket *sock);
-
-int ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip,
- int port);
-struct ksock_peer *ksocknal_find_peer_locked(struct lnet_ni *ni,
- struct lnet_process_id id);
-struct ksock_peer *ksocknal_find_peer(struct lnet_ni *ni,
- struct lnet_process_id id);
-void ksocknal_peer_failed(struct ksock_peer *peer);
-int ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
- struct socket *sock, int type);
-void ksocknal_close_conn_locked(struct ksock_conn *conn, int why);
-void ksocknal_terminate_conn(struct ksock_conn *conn);
-void ksocknal_destroy_conn(struct ksock_conn *conn);
-int ksocknal_close_peer_conns_locked(struct ksock_peer *peer,
- __u32 ipaddr, int why);
-int ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why);
-int ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr);
-struct ksock_conn *ksocknal_find_conn_locked(struct ksock_peer *peer,
- struct ksock_tx *tx, int nonblk);
-
-int ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
- struct lnet_process_id id);
-struct ksock_tx *ksocknal_alloc_tx(int type, int size);
-void ksocknal_free_tx(struct ksock_tx *tx);
-struct ksock_tx *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
-void ksocknal_next_tx_carrier(struct ksock_conn *conn);
-void ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn);
-void ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error);
-void ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive);
-void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
-int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
-void ksocknal_thread_fini(void);
-void ksocknal_launch_all_connections_locked(struct ksock_peer *peer);
-struct ksock_route *ksocknal_find_connectable_route_locked(struct ksock_peer *peer);
-struct ksock_route *ksocknal_find_connecting_route_locked(struct ksock_peer *peer);
-int ksocknal_new_packet(struct ksock_conn *conn, int skip);
-int ksocknal_scheduler(void *arg);
-int ksocknal_connd(void *arg);
-int ksocknal_reaper(void *arg);
-int ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
- lnet_nid_t peer_nid, struct ksock_hello_msg *hello);
-int ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
- struct ksock_hello_msg *hello,
- struct lnet_process_id *id,
- __u64 *incarnation);
-void ksocknal_read_callback(struct ksock_conn *conn);
-void ksocknal_write_callback(struct ksock_conn *conn);
-
-int ksocknal_lib_zc_capable(struct ksock_conn *conn);
-void ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn);
-void ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn);
-void ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn);
-void ksocknal_lib_push_conn(struct ksock_conn *conn);
-int ksocknal_lib_get_conn_addrs(struct ksock_conn *conn);
-int ksocknal_lib_setup_sock(struct socket *so);
-int ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx);
-int ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx);
-void ksocknal_lib_eager_ack(struct ksock_conn *conn);
-int ksocknal_lib_recv(struct ksock_conn *conn);
-int ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
- int *rxmem, int *nagle);
-
-void ksocknal_read_callback(struct ksock_conn *conn);
-void ksocknal_write_callback(struct ksock_conn *conn);
-
-int ksocknal_tunables_init(void);
-
-void ksocknal_lib_csum_tx(struct ksock_tx *tx);
-
-int ksocknal_lib_memory_pressure(struct ksock_conn *conn);
-int ksocknal_lib_bind_thread_to_cpu(int id);
-
-#endif /* _SOCKLND_SOCKLND_H_ */
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
deleted file mode 100644
index 036fecbcede8..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ /dev/null
@@ -1,2592 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- *
- * Author: Zach Brown <zab@zabbo.net>
- * Author: Peter J. Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Eric Barton <eric@bartonsoftware.com>
- *
- * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "socklnd.h"
-
-struct ksock_tx *
-ksocknal_alloc_tx(int type, int size)
-{
- struct ksock_tx *tx = NULL;
-
- if (type == KSOCK_MSG_NOOP) {
- LASSERT(size == KSOCK_NOOP_TX_SIZE);
-
- /* searching for a noop tx in free list */
- spin_lock(&ksocknal_data.ksnd_tx_lock);
-
- if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
- struct ksock_tx, tx_list);
- LASSERT(tx->tx_desc_size == size);
- list_del(&tx->tx_list);
- }
-
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
- }
-
- if (!tx)
- tx = kzalloc(size, GFP_NOFS);
-
- if (!tx)
- return NULL;
-
- atomic_set(&tx->tx_refcount, 1);
- tx->tx_zc_aborted = 0;
- tx->tx_zc_capable = 0;
- tx->tx_zc_checked = 0;
- tx->tx_desc_size = size;
-
- atomic_inc(&ksocknal_data.ksnd_nactive_txs);
-
- return tx;
-}
-
-struct ksock_tx *
-ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
-{
- struct ksock_tx *tx;
-
- tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
- if (!tx) {
- CERROR("Can't allocate noop tx desc\n");
- return NULL;
- }
-
- tx->tx_conn = NULL;
- tx->tx_lnetmsg = NULL;
- tx->tx_kiov = NULL;
- tx->tx_nkiov = 0;
- tx->tx_iov = tx->tx_frags.virt.iov;
- tx->tx_niov = 1;
- tx->tx_nonblk = nonblk;
-
- tx->tx_msg.ksm_csum = 0;
- tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
- tx->tx_msg.ksm_zc_cookies[0] = 0;
- tx->tx_msg.ksm_zc_cookies[1] = cookie;
-
- return tx;
-}
-
-void
-ksocknal_free_tx(struct ksock_tx *tx)
-{
- atomic_dec(&ksocknal_data.ksnd_nactive_txs);
-
- if (!tx->tx_lnetmsg && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
- /* it's a noop tx */
- spin_lock(&ksocknal_data.ksnd_tx_lock);
-
- list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
-
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
- } else {
- kfree(tx);
- }
-}
-
-static int
-ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
-{
- struct kvec *iov = tx->tx_iov;
- int nob;
- int rc;
-
- LASSERT(tx->tx_niov > 0);
-
- /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
- rc = ksocknal_lib_send_iov(conn, tx);
-
- if (rc <= 0) /* sent nothing? */
- return rc;
-
- nob = rc;
- LASSERT(nob <= tx->tx_resid);
- tx->tx_resid -= nob;
-
- /* "consume" iov */
- do {
- LASSERT(tx->tx_niov > 0);
-
- if (nob < (int)iov->iov_len) {
- iov->iov_base = (void *)((char *)iov->iov_base + nob);
- iov->iov_len -= nob;
- return rc;
- }
-
- nob -= iov->iov_len;
- tx->tx_iov = ++iov;
- tx->tx_niov--;
- } while (nob);
-
- return rc;
-}
-
-static int
-ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
-{
- struct bio_vec *kiov = tx->tx_kiov;
- int nob;
- int rc;
-
- LASSERT(!tx->tx_niov);
- LASSERT(tx->tx_nkiov > 0);
-
- /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
- rc = ksocknal_lib_send_kiov(conn, tx);
-
- if (rc <= 0) /* sent nothing? */
- return rc;
-
- nob = rc;
- LASSERT(nob <= tx->tx_resid);
- tx->tx_resid -= nob;
-
- /* "consume" kiov */
- do {
- LASSERT(tx->tx_nkiov > 0);
-
- if (nob < (int)kiov->bv_len) {
- kiov->bv_offset += nob;
- kiov->bv_len -= nob;
- return rc;
- }
-
- nob -= (int)kiov->bv_len;
- tx->tx_kiov = ++kiov;
- tx->tx_nkiov--;
- } while (nob);
-
- return rc;
-}
-
-static int
-ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
-{
- int rc;
- int bufnob;
-
- if (ksocknal_data.ksnd_stall_tx) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(ksocknal_data.ksnd_stall_tx * HZ);
- }
-
- LASSERT(tx->tx_resid);
-
- rc = ksocknal_connsock_addref(conn);
- if (rc) {
- LASSERT(conn->ksnc_closing);
- return -ESHUTDOWN;
- }
-
- do {
- if (ksocknal_data.ksnd_enomem_tx > 0) {
- /* testing... */
- ksocknal_data.ksnd_enomem_tx--;
- rc = -EAGAIN;
- } else if (tx->tx_niov) {
- rc = ksocknal_send_iov(conn, tx);
- } else {
- rc = ksocknal_send_kiov(conn, tx);
- }
-
- bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
- if (rc > 0) /* sent something? */
- conn->ksnc_tx_bufnob += rc; /* account it */
-
- if (bufnob < conn->ksnc_tx_bufnob) {
- /*
- * allocated send buffer bytes < computed; infer
- * something got ACKed
- */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_tx_bufnob = bufnob;
- mb();
- }
-
- if (rc <= 0) { /* Didn't write anything? */
-
- if (!rc) /* some stacks return 0 instead of -EAGAIN */
- rc = -EAGAIN;
-
- /* Check if EAGAIN is due to memory pressure */
- if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
- rc = -ENOMEM;
-
- break;
- }
-
- /* socket's wmem_queued now includes 'rc' bytes */
- atomic_sub(rc, &conn->ksnc_tx_nob);
- rc = 0;
-
- } while (tx->tx_resid);
-
- ksocknal_connsock_decref(conn);
- return rc;
-}
-
-static int
-ksocknal_recv_iter(struct ksock_conn *conn)
-{
- int nob;
- int rc;
-
- /*
- * Never touch conn->ksnc_rx_to or change connection
- * status inside ksocknal_lib_recv
- */
- rc = ksocknal_lib_recv(conn);
-
- if (rc <= 0)
- return rc;
-
- /* received something... */
- nob = rc;
-
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- mb(); /* order with setting rx_started */
- conn->ksnc_rx_started = 1;
-
- conn->ksnc_rx_nob_left -= nob;
-
- iov_iter_advance(&conn->ksnc_rx_to, nob);
- if (iov_iter_count(&conn->ksnc_rx_to))
- return -EAGAIN;
-
- return 1;
-}
-
-static int
-ksocknal_receive(struct ksock_conn *conn)
-{
- /*
- * Return 1 on success, 0 on EOF, < 0 on error.
- * Caller checks ksnc_rx_to to determine
- * progress/completion.
- */
- int rc;
-
- if (ksocknal_data.ksnd_stall_rx) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(ksocknal_data.ksnd_stall_rx * HZ);
- }
-
- rc = ksocknal_connsock_addref(conn);
- if (rc) {
- LASSERT(conn->ksnc_closing);
- return -ESHUTDOWN;
- }
-
- for (;;) {
- rc = ksocknal_recv_iter(conn);
- if (rc <= 0) {
- /* error/EOF or partial receive */
- if (rc == -EAGAIN) {
- rc = 1;
- } else if (!rc && conn->ksnc_rx_started) {
- /* EOF in the middle of a message */
- rc = -EPROTO;
- }
- break;
- }
-
- /* Completed a fragment */
-
- if (!iov_iter_count(&conn->ksnc_rx_to)) {
- rc = 1;
- break;
- }
- }
-
- ksocknal_connsock_decref(conn);
- return rc;
-}
-
-void
-ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx)
-{
- struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
- int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
-
- LASSERT(ni || tx->tx_conn);
-
- if (tx->tx_conn)
- ksocknal_conn_decref(tx->tx_conn);
-
- if (!ni && tx->tx_conn)
- ni = tx->tx_conn->ksnc_peer->ksnp_ni;
-
- ksocknal_free_tx(tx);
- if (lnetmsg) /* KSOCK_MSG_NOOP go without lnetmsg */
- lnet_finalize(ni, lnetmsg, rc);
-}
-
-void
-ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
-{
- struct ksock_tx *tx;
-
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, struct ksock_tx, tx_list);
-
- if (error && tx->tx_lnetmsg) {
- CNETERR("Deleting packet type %d len %d %s->%s\n",
- le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
- le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
- libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
- libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
- } else if (error) {
- CNETERR("Deleting noop packet\n");
- }
-
- list_del(&tx->tx_list);
-
- LASSERT(atomic_read(&tx->tx_refcount) == 1);
- ksocknal_tx_done(ni, tx);
- }
-}
-
-static void
-ksocknal_check_zc_req(struct ksock_tx *tx)
-{
- struct ksock_conn *conn = tx->tx_conn;
- struct ksock_peer *peer = conn->ksnc_peer;
-
- /*
- * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
- * to ksnp_zc_req_list if some fragment of this message should be sent
- * zero-copy. Our peer will send an ACK containing this cookie when
- * she has received this message to tell us we can signal completion.
- * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
- * ksnp_zc_req_list.
- */
- LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
- LASSERT(tx->tx_zc_capable);
-
- tx->tx_zc_checked = 1;
-
- if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
- !conn->ksnc_zc_capable)
- return;
-
- /*
- * assign cookie and queue tx to pending list, it will be released when
- * a matching ack is received. See ksocknal_handle_zcack()
- */
- ksocknal_tx_addref(tx);
-
- spin_lock(&peer->ksnp_lock);
-
- /* ZC_REQ is going to be pinned to the peer */
- tx->tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-
- LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
-
- tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
-
- if (!peer->ksnp_zc_next_cookie)
- peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
-
- list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
-
- spin_unlock(&peer->ksnp_lock);
-}
-
-static void
-ksocknal_uncheck_zc_req(struct ksock_tx *tx)
-{
- struct ksock_peer *peer = tx->tx_conn->ksnc_peer;
-
- LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
- LASSERT(tx->tx_zc_capable);
-
- tx->tx_zc_checked = 0;
-
- spin_lock(&peer->ksnp_lock);
-
- if (!tx->tx_msg.ksm_zc_cookies[0]) {
- /* Not waiting for an ACK */
- spin_unlock(&peer->ksnp_lock);
- return;
- }
-
- tx->tx_msg.ksm_zc_cookies[0] = 0;
- list_del(&tx->tx_zc_list);
-
- spin_unlock(&peer->ksnp_lock);
-
- ksocknal_tx_decref(tx);
-}
-
-static int
-ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
-{
- int rc;
-
- if (tx->tx_zc_capable && !tx->tx_zc_checked)
- ksocknal_check_zc_req(tx);
-
- rc = ksocknal_transmit(conn, tx);
-
- CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
-
- if (!tx->tx_resid) {
- /* Sent everything OK */
- LASSERT(!rc);
-
- return 0;
- }
-
- if (rc == -EAGAIN)
- return rc;
-
- if (rc == -ENOMEM) {
- static int counter;
-
- counter++; /* exponential backoff warnings */
- if ((counter & (-counter)) == counter)
- CWARN("%u ENOMEM tx %p\n", counter, conn);
-
- /* Queue on ksnd_enomem_conns for retry after a timeout */
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- /* enomem list takes over scheduler's ref... */
- LASSERT(conn->ksnc_tx_scheduled);
- list_add_tail(&conn->ksnc_tx_list,
- &ksocknal_data.ksnd_enomem_conns);
- if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
- SOCKNAL_ENOMEM_RETRY),
- ksocknal_data.ksnd_reaper_waketime))
- wake_up(&ksocknal_data.ksnd_reaper_waitq);
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
- return rc;
- }
-
- /* Actual error */
- LASSERT(rc < 0);
-
- if (!conn->ksnc_closing) {
- switch (rc) {
- case -ECONNRESET:
- LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
- &conn->ksnc_ipaddr);
- break;
- default:
- LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
- &conn->ksnc_ipaddr, rc);
- break;
- }
- CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
- conn, rc,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- }
-
- if (tx->tx_zc_checked)
- ksocknal_uncheck_zc_req(tx);
-
- /* it's not an error if conn is being closed */
- ksocknal_close_conn_and_siblings(conn, (conn->ksnc_closing) ? 0 : rc);
-
- return rc;
-}
-
-static void
-ksocknal_launch_connection_locked(struct ksock_route *route)
-{
- /* called holding write lock on ksnd_global_lock */
-
- LASSERT(!route->ksnr_scheduled);
- LASSERT(!route->ksnr_connecting);
- LASSERT(ksocknal_route_mask() & ~route->ksnr_connected);
-
- route->ksnr_scheduled = 1; /* scheduling conn for connd */
- ksocknal_route_addref(route); /* extra ref for connd */
-
- spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
-
- list_add_tail(&route->ksnr_connd_list,
- &ksocknal_data.ksnd_connd_routes);
- wake_up(&ksocknal_data.ksnd_connd_waitq);
-
- spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-}
-
-void
-ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
-{
- struct ksock_route *route;
-
- /* called holding write lock on ksnd_global_lock */
- for (;;) {
- /* launch any/all connections that need it */
- route = ksocknal_find_connectable_route_locked(peer);
- if (!route)
- return;
-
- ksocknal_launch_connection_locked(route);
- }
-}
-
-struct ksock_conn *
-ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
- int nonblk)
-{
- struct list_head *tmp;
- struct ksock_conn *conn;
- struct ksock_conn *typed = NULL;
- struct ksock_conn *fallback = NULL;
- int tnob = 0;
- int fnob = 0;
-
- list_for_each(tmp, &peer->ksnp_conns) {
- struct ksock_conn *c;
- int nob, rc;
-
- c = list_entry(tmp, struct ksock_conn, ksnc_list);
- nob = atomic_read(&c->ksnc_tx_nob) +
- c->ksnc_sock->sk->sk_wmem_queued;
-
- LASSERT(!c->ksnc_closing);
- LASSERT(c->ksnc_proto &&
- c->ksnc_proto->pro_match_tx);
-
- rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
-
- switch (rc) {
- default:
- LBUG();
- case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
- continue;
-
- case SOCKNAL_MATCH_YES: /* typed connection */
- if (!typed || tnob > nob ||
- (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
- cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
- typed = c;
- tnob = nob;
- }
- break;
-
- case SOCKNAL_MATCH_MAY: /* fallback connection */
- if (!fallback || fnob > nob ||
- (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
- cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
- fallback = c;
- fnob = nob;
- }
- break;
- }
- }
-
- /* prefer the typed selection */
- conn = (typed) ? typed : fallback;
-
- if (conn)
- conn->ksnc_tx_last_post = cfs_time_current();
-
- return conn;
-}
-
-void
-ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
-{
- conn->ksnc_proto->pro_pack(tx);
-
- atomic_add(tx->tx_nob, &conn->ksnc_tx_nob);
- ksocknal_conn_addref(conn); /* +1 ref for tx */
- tx->tx_conn = conn;
-}
-
-void
-ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
-{
- struct ksock_sched *sched = conn->ksnc_scheduler;
- struct ksock_msg *msg = &tx->tx_msg;
- struct ksock_tx *ztx = NULL;
- int bufnob = 0;
-
- /*
- * called holding global lock (read or irq-write) and caller may
- * not have dropped this lock between finding conn and calling me,
- * so we don't need the {get,put}connsock dance to deref
- * ksnc_sock...
- */
- LASSERT(!conn->ksnc_closing);
-
- CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port);
-
- ksocknal_tx_prep(conn, tx);
-
- /*
- * Ensure the frags we've been given EXACTLY match the number of
- * bytes we want to send. Many TCP/IP stacks disregard any total
- * size parameters passed to them and just look at the frags.
- *
- * We always expect at least 1 mapped fragment containing the
- * complete ksocknal message header.
- */
- LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) +
- lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
- (unsigned int)tx->tx_nob);
- LASSERT(tx->tx_niov >= 1);
- LASSERT(tx->tx_resid == tx->tx_nob);
-
- CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
- tx, (tx->tx_lnetmsg) ? tx->tx_lnetmsg->msg_hdr.type :
- KSOCK_MSG_NOOP,
- tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
-
- /*
- * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
- * but they're used inside spinlocks a lot.
- */
- bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
- spin_lock_bh(&sched->kss_lock);
-
- if (list_empty(&conn->ksnc_tx_queue) && !bufnob) {
- /* First packet starts the timeout */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_tx_bufnob = 0;
- mb(); /* order with adding to tx_queue */
- }
-
- if (msg->ksm_type == KSOCK_MSG_NOOP) {
- /*
- * The packet is noop ZC ACK, try to piggyback the ack_cookie
- * on a normal packet so I don't need to send it
- */
- LASSERT(msg->ksm_zc_cookies[1]);
- LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
-
- /* ZC ACK piggybacked on ztx release tx later */
- if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
- ztx = tx;
- } else {
- /*
- * It's a normal packet - can it piggback a noop zc-ack that
- * has been queued already?
- */
- LASSERT(!msg->ksm_zc_cookies[1]);
- LASSERT(conn->ksnc_proto->pro_queue_tx_msg);
-
- ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
- /* ztx will be released later */
- }
-
- if (ztx) {
- atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob);
- list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
- }
-
- if (conn->ksnc_tx_ready && /* able to send */
- !conn->ksnc_tx_scheduled) { /* not scheduled to send */
- /* +1 ref for scheduler */
- ksocknal_conn_addref(conn);
- list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- wake_up(&sched->kss_waitq);
- }
-
- spin_unlock_bh(&sched->kss_lock);
-}
-
-struct ksock_route *
-ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
-{
- unsigned long now = cfs_time_current();
- struct list_head *tmp;
- struct ksock_route *route;
-
- list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
-
- LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
-
- /* connections being established */
- if (route->ksnr_scheduled)
- continue;
-
- /* all route types connected ? */
- if (!(ksocknal_route_mask() & ~route->ksnr_connected))
- continue;
-
- if (!(!route->ksnr_retry_interval || /* first attempt */
- cfs_time_aftereq(now, route->ksnr_timeout))) {
- CDEBUG(D_NET,
- "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
- &route->ksnr_ipaddr,
- route->ksnr_connected,
- route->ksnr_retry_interval,
- cfs_duration_sec(route->ksnr_timeout - now));
- continue;
- }
-
- return route;
- }
-
- return NULL;
-}
-
-struct ksock_route *
-ksocknal_find_connecting_route_locked(struct ksock_peer *peer)
-{
- struct list_head *tmp;
- struct ksock_route *route;
-
- list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
-
- LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
-
- if (route->ksnr_scheduled)
- return route;
- }
-
- return NULL;
-}
-
-int
-ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
- struct lnet_process_id id)
-{
- struct ksock_peer *peer;
- struct ksock_conn *conn;
- rwlock_t *g_lock;
- int retry;
- int rc;
-
- LASSERT(!tx->tx_conn);
-
- g_lock = &ksocknal_data.ksnd_global_lock;
-
- for (retry = 0;; retry = 1) {
- read_lock(g_lock);
- peer = ksocknal_find_peer_locked(ni, id);
- if (peer) {
- if (!ksocknal_find_connectable_route_locked(peer)) {
- conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
- if (conn) {
- /*
- * I've got no routes that need to be
- * connecting and I do have an actual
- * connection...
- */
- ksocknal_queue_tx_locked(tx, conn);
- read_unlock(g_lock);
- return 0;
- }
- }
- }
-
- /* I'll need a write lock... */
- read_unlock(g_lock);
-
- write_lock_bh(g_lock);
-
- peer = ksocknal_find_peer_locked(ni, id);
- if (peer)
- break;
-
- write_unlock_bh(g_lock);
-
- if (id.pid & LNET_PID_USERFLAG) {
- CERROR("Refusing to create a connection to userspace process %s\n",
- libcfs_id2str(id));
- return -EHOSTUNREACH;
- }
-
- if (retry) {
- CERROR("Can't find peer %s\n", libcfs_id2str(id));
- return -EHOSTUNREACH;
- }
-
- rc = ksocknal_add_peer(ni, id,
- LNET_NIDADDR(id.nid),
- lnet_acceptor_port());
- if (rc) {
- CERROR("Can't add peer %s: %d\n",
- libcfs_id2str(id), rc);
- return rc;
- }
- }
-
- ksocknal_launch_all_connections_locked(peer);
-
- conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
- if (conn) {
- /* Connection exists; queue message on it */
- ksocknal_queue_tx_locked(tx, conn);
- write_unlock_bh(g_lock);
- return 0;
- }
-
- if (peer->ksnp_accepting > 0 ||
- ksocknal_find_connecting_route_locked(peer)) {
- /* the message is going to be pinned to the peer */
- tx->tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-
- /* Queue the message until a connection is established */
- list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
- write_unlock_bh(g_lock);
- return 0;
- }
-
- write_unlock_bh(g_lock);
-
- /* NB Routes may be ignored if connections to them failed recently */
- CNETERR("No usable routes to %s\n", libcfs_id2str(id));
- return -EHOSTUNREACH;
-}
-
-int
-ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
-{
- int mpflag = 1;
- int type = lntmsg->msg_type;
- struct lnet_process_id target = lntmsg->msg_target;
- unsigned int payload_niov = lntmsg->msg_niov;
- struct kvec *payload_iov = lntmsg->msg_iov;
- struct bio_vec *payload_kiov = lntmsg->msg_kiov;
- unsigned int payload_offset = lntmsg->msg_offset;
- unsigned int payload_nob = lntmsg->msg_len;
- struct ksock_tx *tx;
- int desc_size;
- int rc;
-
- /*
- * NB 'private' is different depending on what we're sending.
- * Just ignore it...
- */
- CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
- payload_nob, payload_niov, libcfs_id2str(target));
-
- LASSERT(!payload_nob || payload_niov > 0);
- LASSERT(payload_niov <= LNET_MAX_IOV);
- /* payload is either all vaddrs or all pages */
- LASSERT(!(payload_kiov && payload_iov));
- LASSERT(!in_interrupt());
-
- if (payload_iov)
- desc_size = offsetof(struct ksock_tx,
- tx_frags.virt.iov[1 + payload_niov]);
- else
- desc_size = offsetof(struct ksock_tx,
- tx_frags.paged.kiov[payload_niov]);
-
- if (lntmsg->msg_vmflush)
- mpflag = cfs_memory_pressure_get_and_set();
- tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
- if (!tx) {
- CERROR("Can't allocate tx desc type %d size %d\n",
- type, desc_size);
- if (lntmsg->msg_vmflush)
- cfs_memory_pressure_restore(mpflag);
- return -ENOMEM;
- }
-
- tx->tx_conn = NULL; /* set when assigned a conn */
- tx->tx_lnetmsg = lntmsg;
-
- if (payload_iov) {
- tx->tx_kiov = NULL;
- tx->tx_nkiov = 0;
- tx->tx_iov = tx->tx_frags.virt.iov;
- tx->tx_niov = 1 +
- lnet_extract_iov(payload_niov, &tx->tx_iov[1],
- payload_niov, payload_iov,
- payload_offset, payload_nob);
- } else {
- tx->tx_niov = 1;
- tx->tx_iov = &tx->tx_frags.paged.iov;
- tx->tx_kiov = tx->tx_frags.paged.kiov;
- tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
- payload_niov, payload_kiov,
- payload_offset, payload_nob);
-
- if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
- tx->tx_zc_capable = 1;
- }
-
- tx->tx_msg.ksm_csum = 0;
- tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
- tx->tx_msg.ksm_zc_cookies[0] = 0;
- tx->tx_msg.ksm_zc_cookies[1] = 0;
-
- /* The first fragment will be set later in pro_pack */
- rc = ksocknal_launch_packet(ni, tx, target);
- if (!mpflag)
- cfs_memory_pressure_restore(mpflag);
-
- if (!rc)
- return 0;
-
- ksocknal_free_tx(tx);
- return -EIO;
-}
-
-int
-ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
- struct task_struct *task = kthread_run(fn, arg, "%s", name);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads++;
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- return 0;
-}
-
-void
-ksocknal_thread_fini(void)
-{
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads--;
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-}
-
-int
-ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
-{
- static char ksocknal_slop_buffer[4096];
- struct kvec *kvec = conn->ksnc_rx_iov_space;
-
- int nob;
- unsigned int niov;
- int skipped;
-
- LASSERT(conn->ksnc_proto);
-
- if (*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) {
- /* Remind the socket to ack eagerly... */
- ksocknal_lib_eager_ack(conn);
- }
-
- if (!nob_to_skip) { /* right at next packet boundary now */
- conn->ksnc_rx_started = 0;
- mb(); /* racing with timeout thread */
-
- switch (conn->ksnc_proto->pro_version) {
- case KSOCK_PROTO_V2:
- case KSOCK_PROTO_V3:
- conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
- kvec->iov_base = &conn->ksnc_msg;
- kvec->iov_len = offsetof(struct ksock_msg, ksm_u);
- conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
- iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
- 1, offsetof(struct ksock_msg, ksm_u));
- break;
-
- case KSOCK_PROTO_V1:
- /* Receiving bare struct lnet_hdr */
- conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
- kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
- kvec->iov_len = sizeof(struct lnet_hdr);
- conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);
- iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
- 1, sizeof(struct lnet_hdr));
- break;
-
- default:
- LBUG();
- }
- conn->ksnc_rx_csum = ~0;
- return 1;
- }
-
- /*
- * Set up to skip as much as possible now. If there's more left
- * (ran out of iov entries) we'll get called again
- */
- conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
- conn->ksnc_rx_nob_left = nob_to_skip;
- skipped = 0;
- niov = 0;
-
- do {
- nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
-
- kvec[niov].iov_base = ksocknal_slop_buffer;
- kvec[niov].iov_len = nob;
- niov++;
- skipped += nob;
- nob_to_skip -= nob;
-
- } while (nob_to_skip && /* mustn't overflow conn's rx iov */
- niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
-
- iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, niov, skipped);
- return 0;
-}
-
-static int
-ksocknal_process_receive(struct ksock_conn *conn)
-{
- struct kvec *kvec = conn->ksnc_rx_iov_space;
- struct lnet_hdr *lhdr;
- struct lnet_process_id *id;
- int rc;
-
- LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
-
- /* NB: sched lock NOT held */
- /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
- LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
- conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
- conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
- conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
- again:
- if (iov_iter_count(&conn->ksnc_rx_to)) {
- rc = ksocknal_receive(conn);
-
- if (rc <= 0) {
- LASSERT(rc != -EAGAIN);
-
- if (!rc)
- CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
- conn,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- else if (!conn->ksnc_closing)
- CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
- conn, rc,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
-
- /* it's not an error if conn is being closed */
- ksocknal_close_conn_and_siblings(conn,
- (conn->ksnc_closing) ? 0 : rc);
- return (!rc ? -ESHUTDOWN : rc);
- }
-
- if (iov_iter_count(&conn->ksnc_rx_to)) {
- /* short read */
- return -EAGAIN;
- }
- }
- switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_KSM_HEADER:
- if (conn->ksnc_flip) {
- __swab32s(&conn->ksnc_msg.ksm_type);
- __swab32s(&conn->ksnc_msg.ksm_csum);
- __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
- __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
- }
-
- if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
- conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
- CERROR("%s: Unknown message type: %x\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- conn->ksnc_msg.ksm_type);
- ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings(conn, -EPROTO);
- return -EPROTO;
- }
-
- if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
- conn->ksnc_msg.ksm_csum && /* has checksum */
- conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
- /* NOOP Checksum error */
- CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
- ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings(conn, -EPROTO);
- return -EIO;
- }
-
- if (conn->ksnc_msg.ksm_zc_cookies[1]) {
- __u64 cookie = 0;
-
- LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
-
- if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
- cookie = conn->ksnc_msg.ksm_zc_cookies[0];
-
- rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
- conn->ksnc_msg.ksm_zc_cookies[1]);
-
- if (rc) {
- CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
- ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings(conn, -EPROTO);
- return rc;
- }
- }
-
- if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
- ksocknal_new_packet(conn, 0);
- return 0; /* NOOP is done and just return */
- }
-
- conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
- conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
-
- kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
- kvec->iov_len = sizeof(struct ksock_lnet_msg);
-
- iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
- 1, sizeof(struct ksock_lnet_msg));
-
- goto again; /* read lnet header now */
-
- case SOCKNAL_RX_LNET_HEADER:
- /* unpack message header */
- conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
-
- if (conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) {
- /* Userspace peer */
- lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
- id = &conn->ksnc_peer->ksnp_id;
-
- /* Substitute process ID assigned at connection time */
- lhdr->src_pid = cpu_to_le32(id->pid);
- lhdr->src_nid = cpu_to_le64(id->nid);
- }
-
- conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
- ksocknal_conn_addref(conn); /* ++ref while parsing */
-
- rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
- &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
- conn->ksnc_peer->ksnp_id.nid, conn, 0);
- if (rc < 0) {
- /* I just received garbage: give up on this conn */
- ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings(conn, rc);
- ksocknal_conn_decref(conn);
- return -EPROTO;
- }
-
- /* I'm racing with ksocknal_recv() */
- LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
- conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
-
- if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
- return 0;
-
- /* ksocknal_recv() got called */
- goto again;
-
- case SOCKNAL_RX_LNET_PAYLOAD:
- /* payload all received */
- rc = 0;
-
- if (!conn->ksnc_rx_nob_left && /* not truncating */
- conn->ksnc_msg.ksm_csum && /* has checksum */
- conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
- CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
- rc = -EIO;
- }
-
- if (!rc && conn->ksnc_msg.ksm_zc_cookies[0]) {
- LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
-
- lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
- id = &conn->ksnc_peer->ksnp_id;
-
- rc = conn->ksnc_proto->pro_handle_zcreq(conn,
- conn->ksnc_msg.ksm_zc_cookies[0],
- *ksocknal_tunables.ksnd_nonblk_zcack ||
- le64_to_cpu(lhdr->src_nid) != id->nid);
- }
-
- lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
-
- if (rc) {
- ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings(conn, rc);
- return -EPROTO;
- }
- /* Fall through */
-
- case SOCKNAL_RX_SLOP:
- /* starting new packet? */
- if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left))
- return 0; /* come back later */
- goto again; /* try to finish reading slop now */
-
- default:
- break;
- }
-
- /* Not Reached */
- LBUG();
- return -EINVAL; /* keep gcc happy */
-}
-
-int
-ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
- int delayed, struct iov_iter *to, unsigned int rlen)
-{
- struct ksock_conn *conn = private;
- struct ksock_sched *sched = conn->ksnc_scheduler;
-
- LASSERT(iov_iter_count(to) <= rlen);
- LASSERT(to->nr_segs <= LNET_MAX_IOV);
-
- conn->ksnc_cookie = msg;
- conn->ksnc_rx_nob_left = rlen;
-
- conn->ksnc_rx_to = *to;
-
- LASSERT(conn->ksnc_rx_scheduled);
-
- spin_lock_bh(&sched->kss_lock);
-
- switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_PARSE_WAIT:
- list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
- wake_up(&sched->kss_waitq);
- LASSERT(conn->ksnc_rx_ready);
- break;
-
- case SOCKNAL_RX_PARSE:
- /* scheduler hasn't noticed I'm parsing yet */
- break;
- }
-
- conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
-
- spin_unlock_bh(&sched->kss_lock);
- ksocknal_conn_decref(conn);
- return 0;
-}
-
-static inline int
-ksocknal_sched_cansleep(struct ksock_sched *sched)
-{
- int rc;
-
- spin_lock_bh(&sched->kss_lock);
-
- rc = !ksocknal_data.ksnd_shuttingdown &&
- list_empty(&sched->kss_rx_conns) &&
- list_empty(&sched->kss_tx_conns);
-
- spin_unlock_bh(&sched->kss_lock);
- return rc;
-}
-
-int ksocknal_scheduler(void *arg)
-{
- struct ksock_sched_info *info;
- struct ksock_sched *sched;
- struct ksock_conn *conn;
- struct ksock_tx *tx;
- int rc;
- int nloops = 0;
- long id = (long)arg;
-
- info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
- sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
-
- rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
- if (rc) {
- CWARN("Can't set CPU partition affinity to %d: %d\n",
- info->ksi_cpt, rc);
- }
-
- spin_lock_bh(&sched->kss_lock);
-
- while (!ksocknal_data.ksnd_shuttingdown) {
- int did_something = 0;
-
- /* Ensure I progress everything semi-fairly */
-
- if (!list_empty(&sched->kss_rx_conns)) {
- conn = list_entry(sched->kss_rx_conns.next,
- struct ksock_conn, ksnc_rx_list);
- list_del(&conn->ksnc_rx_list);
-
- LASSERT(conn->ksnc_rx_scheduled);
- LASSERT(conn->ksnc_rx_ready);
-
- /*
- * clear rx_ready in case receive isn't complete.
- * Do it BEFORE we call process_recv, since
- * data_ready can set it any time after we release
- * kss_lock.
- */
- conn->ksnc_rx_ready = 0;
- spin_unlock_bh(&sched->kss_lock);
-
- rc = ksocknal_process_receive(conn);
-
- spin_lock_bh(&sched->kss_lock);
-
- /* I'm the only one that can clear this flag */
- LASSERT(conn->ksnc_rx_scheduled);
-
- /* Did process_receive get everything it wanted? */
- if (!rc)
- conn->ksnc_rx_ready = 1;
-
- if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
- /*
- * Conn blocked waiting for ksocknal_recv()
- * I change its state (under lock) to signal
- * it can be rescheduled
- */
- conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
- } else if (conn->ksnc_rx_ready) {
- /* reschedule for rx */
- list_add_tail(&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
- } else {
- conn->ksnc_rx_scheduled = 0;
- /* drop my ref */
- ksocknal_conn_decref(conn);
- }
-
- did_something = 1;
- }
-
- if (!list_empty(&sched->kss_tx_conns)) {
- LIST_HEAD(zlist);
-
- if (!list_empty(&sched->kss_zombie_noop_txs)) {
- list_add(&zlist, &sched->kss_zombie_noop_txs);
- list_del_init(&sched->kss_zombie_noop_txs);
- }
-
- conn = list_entry(sched->kss_tx_conns.next,
- struct ksock_conn, ksnc_tx_list);
- list_del(&conn->ksnc_tx_list);
-
- LASSERT(conn->ksnc_tx_scheduled);
- LASSERT(conn->ksnc_tx_ready);
- LASSERT(!list_empty(&conn->ksnc_tx_queue));
-
- tx = list_entry(conn->ksnc_tx_queue.next,
- struct ksock_tx, tx_list);
-
- if (conn->ksnc_tx_carrier == tx)
- ksocknal_next_tx_carrier(conn);
-
- /* dequeue now so empty list => more to send */
- list_del(&tx->tx_list);
-
- /*
- * Clear tx_ready in case send isn't complete. Do
- * it BEFORE we call process_transmit, since
- * write_space can set it any time after we release
- * kss_lock.
- */
- conn->ksnc_tx_ready = 0;
- spin_unlock_bh(&sched->kss_lock);
-
- if (!list_empty(&zlist)) {
- /*
- * free zombie noop txs, it's fast because
- * noop txs are just put in freelist
- */
- ksocknal_txlist_done(NULL, &zlist, 0);
- }
-
- rc = ksocknal_process_transmit(conn, tx);
-
- if (rc == -ENOMEM || rc == -EAGAIN) {
- /*
- * Incomplete send: replace tx on HEAD of
- * tx_queue
- */
- spin_lock_bh(&sched->kss_lock);
- list_add(&tx->tx_list, &conn->ksnc_tx_queue);
- } else {
- /* Complete send; tx -ref */
- ksocknal_tx_decref(tx);
-
- spin_lock_bh(&sched->kss_lock);
- /* assume space for more */
- conn->ksnc_tx_ready = 1;
- }
-
- if (rc == -ENOMEM) {
- /*
- * Do nothing; after a short timeout, this
- * conn will be reposted on kss_tx_conns.
- */
- } else if (conn->ksnc_tx_ready &&
- !list_empty(&conn->ksnc_tx_queue)) {
- /* reschedule for tx */
- list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- } else {
- conn->ksnc_tx_scheduled = 0;
- /* drop my ref */
- ksocknal_conn_decref(conn);
- }
-
- did_something = 1;
- }
- if (!did_something || /* nothing to do */
- ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
- spin_unlock_bh(&sched->kss_lock);
-
- nloops = 0;
-
- if (!did_something) { /* wait for something to do */
- rc = wait_event_interruptible_exclusive(
- sched->kss_waitq,
- !ksocknal_sched_cansleep(sched));
- LASSERT(!rc);
- } else {
- cond_resched();
- }
-
- spin_lock_bh(&sched->kss_lock);
- }
- }
-
- spin_unlock_bh(&sched->kss_lock);
- ksocknal_thread_fini();
- return 0;
-}
-
-/*
- * Add connection to kss_rx_conns of scheduler
- * and wakeup the scheduler.
- */
-void ksocknal_read_callback(struct ksock_conn *conn)
-{
- struct ksock_sched *sched;
-
- sched = conn->ksnc_scheduler;
-
- spin_lock_bh(&sched->kss_lock);
-
- conn->ksnc_rx_ready = 1;
-
- if (!conn->ksnc_rx_scheduled) { /* not being progressed */
- list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
- conn->ksnc_rx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
-
- wake_up(&sched->kss_waitq);
- }
- spin_unlock_bh(&sched->kss_lock);
-}
-
-/*
- * Add connection to kss_tx_conns of scheduler
- * and wakeup the scheduler.
- */
-void ksocknal_write_callback(struct ksock_conn *conn)
-{
- struct ksock_sched *sched;
-
- sched = conn->ksnc_scheduler;
-
- spin_lock_bh(&sched->kss_lock);
-
- conn->ksnc_tx_ready = 1;
-
- if (!conn->ksnc_tx_scheduled && /* not being progressed */
- !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
- list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
-
- wake_up(&sched->kss_waitq);
- }
-
- spin_unlock_bh(&sched->kss_lock);
-}
-
-static struct ksock_proto *
-ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
-{
- __u32 version = 0;
-
- if (hello->kshm_magic == LNET_PROTO_MAGIC)
- version = hello->kshm_version;
- else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
- version = __swab32(hello->kshm_version);
-
- if (version) {
-#if SOCKNAL_VERSION_DEBUG
- if (*ksocknal_tunables.ksnd_protocol == 1)
- return NULL;
-
- if (*ksocknal_tunables.ksnd_protocol == 2 &&
- version == KSOCK_PROTO_V3)
- return NULL;
-#endif
- if (version == KSOCK_PROTO_V2)
- return &ksocknal_protocol_v2x;
-
- if (version == KSOCK_PROTO_V3)
- return &ksocknal_protocol_v3x;
-
- return NULL;
- }
-
- if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
- struct lnet_magicversion *hmv = (struct lnet_magicversion *)hello;
-
- BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
- offsetof(struct ksock_hello_msg, kshm_src_nid));
-
- if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) &&
- hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR))
- return &ksocknal_protocol_v1x;
- }
-
- return NULL;
-}
-
-int
-ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
- lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
-{
- /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
- struct ksock_net *net = (struct ksock_net *)ni->ni_data;
-
- LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
-
- /* rely on caller to hold a ref on socket so it wouldn't disappear */
- LASSERT(conn->ksnc_proto);
-
- hello->kshm_src_nid = ni->ni_nid;
- hello->kshm_dst_nid = peer_nid;
- hello->kshm_src_pid = the_lnet.ln_pid;
-
- hello->kshm_src_incarnation = net->ksnn_incarnation;
- hello->kshm_ctype = conn->ksnc_type;
-
- return conn->ksnc_proto->pro_send_hello(conn, hello);
-}
-
-static int
-ksocknal_invert_type(int type)
-{
- switch (type) {
- case SOCKLND_CONN_ANY:
- case SOCKLND_CONN_CONTROL:
- return type;
- case SOCKLND_CONN_BULK_IN:
- return SOCKLND_CONN_BULK_OUT;
- case SOCKLND_CONN_BULK_OUT:
- return SOCKLND_CONN_BULK_IN;
- default:
- return SOCKLND_CONN_NONE;
- }
-}
-
-int
-ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
- struct ksock_hello_msg *hello,
- struct lnet_process_id *peerid,
- __u64 *incarnation)
-{
- /* Return < 0 fatal error
- * 0 success
- * EALREADY lost connection race
- * EPROTO protocol version mismatch
- */
- struct socket *sock = conn->ksnc_sock;
- int active = !!conn->ksnc_proto;
- int timeout;
- int proto_match;
- int rc;
- struct ksock_proto *proto;
- struct lnet_process_id recv_id;
-
- /* socket type set on active connections - not set on passive */
- LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
-
- timeout = active ? *ksocknal_tunables.ksnd_timeout :
- lnet_acceptor_timeout();
-
- rc = lnet_sock_read(sock, &hello->kshm_magic,
- sizeof(hello->kshm_magic), timeout);
- if (rc) {
- CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0);
- return rc;
- }
-
- if (hello->kshm_magic != LNET_PROTO_MAGIC &&
- hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
- hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
- /* Unexpected magic! */
- CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
- __cpu_to_le32(hello->kshm_magic),
- LNET_PROTO_TCP_MAGIC,
- &conn->ksnc_ipaddr);
- return -EPROTO;
- }
-
- rc = lnet_sock_read(sock, &hello->kshm_version,
- sizeof(hello->kshm_version), timeout);
- if (rc) {
- CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0);
- return rc;
- }
-
- proto = ksocknal_parse_proto_version(hello);
- if (!proto) {
- if (!active) {
- /* unknown protocol from peer, tell peer my protocol */
- conn->ksnc_proto = &ksocknal_protocol_v3x;
-#if SOCKNAL_VERSION_DEBUG
- if (*ksocknal_tunables.ksnd_protocol == 2)
- conn->ksnc_proto = &ksocknal_protocol_v2x;
- else if (*ksocknal_tunables.ksnd_protocol == 1)
- conn->ksnc_proto = &ksocknal_protocol_v1x;
-#endif
- hello->kshm_nips = 0;
- ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
- }
-
- CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
- conn->ksnc_proto->pro_version,
- &conn->ksnc_ipaddr);
-
- return -EPROTO;
- }
-
- proto_match = (conn->ksnc_proto == proto);
- conn->ksnc_proto = proto;
-
- /* receive the rest of hello message anyway */
- rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
- if (rc) {
- CERROR("Error %d reading or checking hello from from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0);
- return rc;
- }
-
- *incarnation = hello->kshm_src_incarnation;
-
- if (hello->kshm_src_nid == LNET_NID_ANY) {
- CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
- &conn->ksnc_ipaddr);
- return -EPROTO;
- }
-
- if (!active &&
- conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
- /* Userspace NAL assigns peer process ID from socket */
- recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
- recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
- conn->ksnc_ipaddr);
- } else {
- recv_id.nid = hello->kshm_src_nid;
- recv_id.pid = hello->kshm_src_pid;
- }
-
- if (!active) {
- *peerid = recv_id;
-
- /* peer determines type */
- conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
- if (conn->ksnc_type == SOCKLND_CONN_NONE) {
- CERROR("Unexpected type %d from %s ip %pI4h\n",
- hello->kshm_ctype, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr);
- return -EPROTO;
- }
-
- return 0;
- }
-
- if (peerid->pid != recv_id.pid ||
- peerid->nid != recv_id.nid) {
- LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
- libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr,
- libcfs_id2str(recv_id));
- return -EPROTO;
- }
-
- if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
- /* Possible protocol mismatch or I lost the connection race */
- return proto_match ? EALREADY : EPROTO;
- }
-
- if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
- CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
- conn->ksnc_type, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr, hello->kshm_ctype);
- return -EPROTO;
- }
-
- return 0;
-}
-
-static int
-ksocknal_connect(struct ksock_route *route)
-{
- LIST_HEAD(zombies);
- struct ksock_peer *peer = route->ksnr_peer;
- int type;
- int wanted;
- struct socket *sock;
- unsigned long deadline;
- int retry_later = 0;
- int rc = 0;
-
- deadline = cfs_time_add(cfs_time_current(),
- *ksocknal_tunables.ksnd_timeout * HZ);
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- LASSERT(route->ksnr_scheduled);
- LASSERT(!route->ksnr_connecting);
-
- route->ksnr_connecting = 1;
-
- for (;;) {
- wanted = ksocknal_route_mask() & ~route->ksnr_connected;
-
- /*
- * stop connecting if peer/route got closed under me, or
- * route got connected while queued
- */
- if (peer->ksnp_closing || route->ksnr_deleted ||
- !wanted) {
- retry_later = 0;
- break;
- }
-
- /* reschedule if peer is connecting to me */
- if (peer->ksnp_accepting > 0) {
- CDEBUG(D_NET,
- "peer %s(%d) already connecting to me, retry later.\n",
- libcfs_nid2str(peer->ksnp_id.nid),
- peer->ksnp_accepting);
- retry_later = 1;
- }
-
- if (retry_later) /* needs reschedule */
- break;
-
- if (wanted & BIT(SOCKLND_CONN_ANY)) {
- type = SOCKLND_CONN_ANY;
- } else if (wanted & BIT(SOCKLND_CONN_CONTROL)) {
- type = SOCKLND_CONN_CONTROL;
- } else if (wanted & BIT(SOCKLND_CONN_BULK_IN)) {
- type = SOCKLND_CONN_BULK_IN;
- } else {
- LASSERT(wanted & BIT(SOCKLND_CONN_BULK_OUT));
- type = SOCKLND_CONN_BULK_OUT;
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- if (cfs_time_aftereq(cfs_time_current(), deadline)) {
- rc = -ETIMEDOUT;
- lnet_connect_console_error(rc, peer->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
- goto failed;
- }
-
- rc = lnet_connect(&sock, peer->ksnp_id.nid,
- route->ksnr_myipaddr,
- route->ksnr_ipaddr, route->ksnr_port);
- if (rc)
- goto failed;
-
- rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
- if (rc < 0) {
- lnet_connect_console_error(rc, peer->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
- goto failed;
- }
-
- /*
- * A +ve RC means I have to retry because I lost the connection
- * race or I have to renegotiate protocol version
- */
- retry_later = (rc);
- if (retry_later)
- CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
- libcfs_nid2str(peer->ksnp_id.nid));
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- }
-
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
-
- if (retry_later) {
- /*
- * re-queue for attention; this frees me up to handle
- * the peer's incoming connection request
- */
- if (rc == EALREADY ||
- (!rc && peer->ksnp_accepting > 0)) {
- /*
- * We want to introduce a delay before next
- * attempt to connect if we lost conn race,
- * but the race is resolved quickly usually,
- * so min_reconnectms should be good heuristic
- */
- route->ksnr_retry_interval =
- *ksocknal_tunables.ksnd_min_reconnectms * HZ / 1000;
- route->ksnr_timeout = cfs_time_add(cfs_time_current(),
- route->ksnr_retry_interval);
- }
-
- ksocknal_launch_connection_locked(route);
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- return retry_later;
-
- failed:
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
-
- /* This is a retry rather than a new connection */
- route->ksnr_retry_interval *= 2;
- route->ksnr_retry_interval =
- max(route->ksnr_retry_interval,
- (long)*ksocknal_tunables.ksnd_min_reconnectms * HZ / 1000);
- route->ksnr_retry_interval =
- min(route->ksnr_retry_interval,
- (long)*ksocknal_tunables.ksnd_max_reconnectms * HZ / 1000);
-
- LASSERT(route->ksnr_retry_interval);
- route->ksnr_timeout = cfs_time_add(cfs_time_current(),
- route->ksnr_retry_interval);
-
- if (!list_empty(&peer->ksnp_tx_queue) &&
- !peer->ksnp_accepting &&
- !ksocknal_find_connecting_route_locked(peer)) {
- struct ksock_conn *conn;
-
- /*
- * ksnp_tx_queue is queued on a conn on successful
- * connection for V1.x and V2.x
- */
- if (!list_empty(&peer->ksnp_conns)) {
- conn = list_entry(peer->ksnp_conns.next,
- struct ksock_conn, ksnc_list);
- LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
- }
-
- /*
- * take all the blocked packets while I've got the lock and
- * complete below...
- */
- list_splice_init(&peer->ksnp_tx_queue, &zombies);
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_peer_failed(peer);
- ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
- return 0;
-}
-
-/*
- * check whether we need to create more connds.
- * It will try to create new thread if it's necessary, @timeout can
- * be updated if failed to create, so caller wouldn't keep try while
- * running out of resource.
- */
-static int
-ksocknal_connd_check_start(time64_t sec, long *timeout)
-{
- char name[16];
- int rc;
- int total = ksocknal_data.ksnd_connd_starting +
- ksocknal_data.ksnd_connd_running;
-
- if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
- /* still in initializing */
- return 0;
- }
-
- if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
- total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
- /*
- * can't create more connd, or still have enough
- * threads to handle more connecting
- */
- return 0;
- }
-
- if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
- /* no pending connecting request */
- return 0;
- }
-
- if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
- /* may run out of resource, retry later */
- *timeout = HZ;
- return 0;
- }
-
- if (ksocknal_data.ksnd_connd_starting > 0) {
- /* serialize starting to avoid flood */
- return 0;
- }
-
- ksocknal_data.ksnd_connd_starting_stamp = sec;
- ksocknal_data.ksnd_connd_starting++;
- spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-
- /* NB: total is the next id */
- snprintf(name, sizeof(name), "socknal_cd%02d", total);
- rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
-
- spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- if (!rc)
- return 1;
-
- /* we tried ... */
- LASSERT(ksocknal_data.ksnd_connd_starting > 0);
- ksocknal_data.ksnd_connd_starting--;
- ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
-
- return 1;
-}
-
-/*
- * check whether current thread can exit, it will return 1 if there are too
- * many threads and no creating in past 120 seconds.
- * Also, this function may update @timeout to make caller come back
- * again to recheck these conditions.
- */
-static int
-ksocknal_connd_check_stop(time64_t sec, long *timeout)
-{
- int val;
-
- if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
- /* still in initializing */
- return 0;
- }
-
- if (ksocknal_data.ksnd_connd_starting > 0) {
- /* in progress of starting new thread */
- return 0;
- }
-
- if (ksocknal_data.ksnd_connd_running <=
- *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
- return 0;
- }
-
- /* created thread in past 120 seconds? */
- val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
- SOCKNAL_CONND_TIMEOUT - sec);
-
- *timeout = (val > 0) ? val * HZ :
- SOCKNAL_CONND_TIMEOUT * HZ;
- if (val > 0)
- return 0;
-
- /* no creating in past 120 seconds */
-
- return ksocknal_data.ksnd_connd_running >
- ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
-}
-
-/*
- * Go through connd_routes queue looking for a route that we can process
- * right now, @timeout_p can be updated if we need to come back later
- */
-static struct ksock_route *
-ksocknal_connd_get_route_locked(signed long *timeout_p)
-{
- struct ksock_route *route;
- unsigned long now;
-
- now = cfs_time_current();
-
- /* connd_routes can contain both pending and ordinary routes */
- list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
- ksnr_connd_list) {
- if (!route->ksnr_retry_interval ||
- cfs_time_aftereq(now, route->ksnr_timeout))
- return route;
-
- if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
- (int)*timeout_p > (int)(route->ksnr_timeout - now))
- *timeout_p = (int)(route->ksnr_timeout - now);
- }
-
- return NULL;
-}
-
-int
-ksocknal_connd(void *arg)
-{
- spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
- struct ksock_connreq *cr;
- wait_queue_entry_t wait;
- int nloops = 0;
- int cons_retry = 0;
-
- init_waitqueue_entry(&wait, current);
-
- spin_lock_bh(connd_lock);
-
- LASSERT(ksocknal_data.ksnd_connd_starting > 0);
- ksocknal_data.ksnd_connd_starting--;
- ksocknal_data.ksnd_connd_running++;
-
- while (!ksocknal_data.ksnd_shuttingdown) {
- struct ksock_route *route = NULL;
- time64_t sec = ktime_get_real_seconds();
- long timeout = MAX_SCHEDULE_TIMEOUT;
- int dropped_lock = 0;
-
- if (ksocknal_connd_check_stop(sec, &timeout)) {
- /* wakeup another one to check stop */
- wake_up(&ksocknal_data.ksnd_connd_waitq);
- break;
- }
-
- if (ksocknal_connd_check_start(sec, &timeout)) {
- /* created new thread */
- dropped_lock = 1;
- }
-
- if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
- /* Connection accepted by the listener */
- cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
- struct ksock_connreq, ksncr_list);
-
- list_del(&cr->ksncr_list);
- spin_unlock_bh(connd_lock);
- dropped_lock = 1;
-
- ksocknal_create_conn(cr->ksncr_ni, NULL,
- cr->ksncr_sock, SOCKLND_CONN_NONE);
- lnet_ni_decref(cr->ksncr_ni);
- kfree(cr);
-
- spin_lock_bh(connd_lock);
- }
-
- /*
- * Only handle an outgoing connection request if there
- * is a thread left to handle incoming connections and
- * create new connd
- */
- if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
- ksocknal_data.ksnd_connd_running) {
- route = ksocknal_connd_get_route_locked(&timeout);
- }
- if (route) {
- list_del(&route->ksnr_connd_list);
- ksocknal_data.ksnd_connd_connecting++;
- spin_unlock_bh(connd_lock);
- dropped_lock = 1;
-
- if (ksocknal_connect(route)) {
- /* consecutive retry */
- if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
- CWARN("massive consecutive re-connecting to %pI4h\n",
- &route->ksnr_ipaddr);
- cons_retry = 0;
- }
- } else {
- cons_retry = 0;
- }
-
- ksocknal_route_decref(route);
-
- spin_lock_bh(connd_lock);
- ksocknal_data.ksnd_connd_connecting--;
- }
-
- if (dropped_lock) {
- if (++nloops < SOCKNAL_RESCHED)
- continue;
- spin_unlock_bh(connd_lock);
- nloops = 0;
- cond_resched();
- spin_lock_bh(connd_lock);
- continue;
- }
-
- /* Nothing to do for 'timeout' */
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
- &wait);
- spin_unlock_bh(connd_lock);
-
- nloops = 0;
- schedule_timeout(timeout);
-
- remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
- spin_lock_bh(connd_lock);
- }
- ksocknal_data.ksnd_connd_running--;
- spin_unlock_bh(connd_lock);
-
- ksocknal_thread_fini();
- return 0;
-}
-
-static struct ksock_conn *
-ksocknal_find_timed_out_conn(struct ksock_peer *peer)
-{
- /* We're called with a shared lock on ksnd_global_lock */
- struct ksock_conn *conn;
- struct list_head *ctmp;
-
- list_for_each(ctmp, &peer->ksnp_conns) {
- int error;
-
- conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
-
- /* Don't need the {get,put}connsock dance to deref ksnc_sock */
- LASSERT(!conn->ksnc_closing);
-
- /*
- * SOCK_ERROR will reset error code of socket in
- * some platform (like Darwin8.x)
- */
- error = conn->ksnc_sock->sk->sk_err;
- if (error) {
- ksocknal_conn_addref(conn);
-
- switch (error) {
- case ECONNRESET:
- CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
- libcfs_id2str(peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- case ETIMEDOUT:
- CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
- libcfs_id2str(peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- default:
- CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
- error,
- libcfs_id2str(peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- }
-
- return conn;
- }
-
- if (conn->ksnc_rx_started &&
- cfs_time_aftereq(cfs_time_current(),
- conn->ksnc_rx_deadline)) {
- /* Timed out incomplete incoming message */
- ksocknal_conn_addref(conn);
- CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %zd left %d\n",
- libcfs_id2str(peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port,
- conn->ksnc_rx_state,
- iov_iter_count(&conn->ksnc_rx_to),
- conn->ksnc_rx_nob_left);
- return conn;
- }
-
- if ((!list_empty(&conn->ksnc_tx_queue) ||
- conn->ksnc_sock->sk->sk_wmem_queued) &&
- cfs_time_aftereq(cfs_time_current(),
- conn->ksnc_tx_deadline)) {
- /*
- * Timed out messages queued for sending or
- * buffered in the socket's send buffer
- */
- ksocknal_conn_addref(conn);
- CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
- libcfs_id2str(peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- return conn;
- }
- }
-
- return NULL;
-}
-
-static inline void
-ksocknal_flush_stale_txs(struct ksock_peer *peer)
-{
- struct ksock_tx *tx;
- struct ksock_tx *tmp;
- LIST_HEAD(stale_txs);
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) {
- if (!cfs_time_aftereq(cfs_time_current(),
- tx->tx_deadline))
- break;
-
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &stale_txs);
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
-}
-
-static int
-ksocknal_send_keepalive_locked(struct ksock_peer *peer)
- __must_hold(&ksocknal_data.ksnd_global_lock)
-{
- struct ksock_sched *sched;
- struct ksock_conn *conn;
- struct ksock_tx *tx;
-
- /* last_alive will be updated by create_conn */
- if (list_empty(&peer->ksnp_conns))
- return 0;
-
- if (peer->ksnp_proto != &ksocknal_protocol_v3x)
- return 0;
-
- if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
- time_before(cfs_time_current(),
- cfs_time_add(peer->ksnp_last_alive,
- *ksocknal_tunables.ksnd_keepalive * HZ)))
- return 0;
-
- if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
- return 0;
-
- /*
- * retry 10 secs later, so we wouldn't put pressure
- * on this peer if we failed to send keepalive this time
- */
- peer->ksnp_send_keepalive = cfs_time_shift(10);
-
- conn = ksocknal_find_conn_locked(peer, NULL, 1);
- if (conn) {
- sched = conn->ksnc_scheduler;
-
- spin_lock_bh(&sched->kss_lock);
- if (!list_empty(&conn->ksnc_tx_queue)) {
- spin_unlock_bh(&sched->kss_lock);
- /* there is an queued ACK, don't need keepalive */
- return 0;
- }
-
- spin_unlock_bh(&sched->kss_lock);
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- /* cookie = 1 is reserved for keepalive PING */
- tx = ksocknal_alloc_tx_noop(1, 1);
- if (!tx) {
- read_lock(&ksocknal_data.ksnd_global_lock);
- return -ENOMEM;
- }
-
- if (!ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) {
- read_lock(&ksocknal_data.ksnd_global_lock);
- return 1;
- }
-
- ksocknal_free_tx(tx);
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- return -EIO;
-}
-
-static void
-ksocknal_check_peer_timeouts(int idx)
-{
- struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
- struct ksock_peer *peer;
- struct ksock_conn *conn;
- struct ksock_tx *tx;
-
- again:
- /*
- * NB. We expect to have a look at all the peers and not find any
- * connections to time out, so we just use a shared lock while we
- * take a look...
- */
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- list_for_each_entry(peer, peers, ksnp_list) {
- unsigned long deadline = 0;
- struct ksock_tx *tx_stale;
- int resid = 0;
- int n = 0;
-
- if (ksocknal_send_keepalive_locked(peer)) {
- read_unlock(&ksocknal_data.ksnd_global_lock);
- goto again;
- }
-
- conn = ksocknal_find_timed_out_conn(peer);
-
- if (conn) {
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
-
- /*
- * NB we won't find this one again, but we can't
- * just proceed with the next peer, since we dropped
- * ksnd_global_lock and it might be dead already!
- */
- ksocknal_conn_decref(conn);
- goto again;
- }
-
- /*
- * we can't process stale txs right here because we're
- * holding only shared lock
- */
- if (!list_empty(&peer->ksnp_tx_queue)) {
- tx = list_entry(peer->ksnp_tx_queue.next,
- struct ksock_tx, tx_list);
-
- if (cfs_time_aftereq(cfs_time_current(),
- tx->tx_deadline)) {
- ksocknal_peer_addref(peer);
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_flush_stale_txs(peer);
-
- ksocknal_peer_decref(peer);
- goto again;
- }
- }
-
- if (list_empty(&peer->ksnp_zc_req_list))
- continue;
-
- tx_stale = NULL;
- spin_lock(&peer->ksnp_lock);
- list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
- if (!cfs_time_aftereq(cfs_time_current(),
- tx->tx_deadline))
- break;
- /* ignore the TX if connection is being closed */
- if (tx->tx_conn->ksnc_closing)
- continue;
- if (!tx_stale)
- tx_stale = tx;
- n++;
- }
-
- if (!tx_stale) {
- spin_unlock(&peer->ksnp_lock);
- continue;
- }
-
- deadline = tx_stale->tx_deadline;
- resid = tx_stale->tx_resid;
- conn = tx_stale->tx_conn;
- ksocknal_conn_addref(conn);
-
- spin_unlock(&peer->ksnp_lock);
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
- n, libcfs_nid2str(peer->ksnp_id.nid), tx_stale,
- cfs_duration_sec(cfs_time_current() - deadline),
- resid, conn->ksnc_sock->sk->sk_wmem_queued);
-
- ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
- ksocknal_conn_decref(conn);
- goto again;
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-}
-
-int
-ksocknal_reaper(void *arg)
-{
- wait_queue_entry_t wait;
- struct ksock_conn *conn;
- struct ksock_sched *sched;
- struct list_head enomem_conns;
- int nenomem_conns;
- long timeout;
- int i;
- int peer_index = 0;
- unsigned long deadline = cfs_time_current();
-
- INIT_LIST_HEAD(&enomem_conns);
- init_waitqueue_entry(&wait, current);
-
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- while (!ksocknal_data.ksnd_shuttingdown) {
- if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
- conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
- struct ksock_conn, ksnc_list);
- list_del(&conn->ksnc_list);
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- ksocknal_terminate_conn(conn);
- ksocknal_conn_decref(conn);
-
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- continue;
- }
-
- if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
- conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
- struct ksock_conn, ksnc_list);
- list_del(&conn->ksnc_list);
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- ksocknal_destroy_conn(conn);
-
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- continue;
- }
-
- if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
- list_add(&enomem_conns,
- &ksocknal_data.ksnd_enomem_conns);
- list_del_init(&ksocknal_data.ksnd_enomem_conns);
- }
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- /* reschedule all the connections that stalled with ENOMEM... */
- nenomem_conns = 0;
- while (!list_empty(&enomem_conns)) {
- conn = list_entry(enomem_conns.next, struct ksock_conn,
- ksnc_tx_list);
- list_del(&conn->ksnc_tx_list);
-
- sched = conn->ksnc_scheduler;
-
- spin_lock_bh(&sched->kss_lock);
-
- LASSERT(conn->ksnc_tx_scheduled);
- conn->ksnc_tx_ready = 1;
- list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- wake_up(&sched->kss_waitq);
-
- spin_unlock_bh(&sched->kss_lock);
- nenomem_conns++;
- }
-
- /* careful with the jiffy wrap... */
- while ((timeout = cfs_time_sub(deadline,
- cfs_time_current())) <= 0) {
- const int n = 4;
- const int p = 1;
- int chunk = ksocknal_data.ksnd_peer_hash_size;
-
- /*
- * Time to check for timeouts on a few more peers: I do
- * checks every 'p' seconds on a proportion of the peer
- * table and I need to check every connection 'n' times
- * within a timeout interval, to ensure I detect a
- * timeout on any connection within (n+1)/n times the
- * timeout interval.
- */
- if (*ksocknal_tunables.ksnd_timeout > n * p)
- chunk = (chunk * n * p) /
- *ksocknal_tunables.ksnd_timeout;
- if (!chunk)
- chunk = 1;
-
- for (i = 0; i < chunk; i++) {
- ksocknal_check_peer_timeouts(peer_index);
- peer_index = (peer_index + 1) %
- ksocknal_data.ksnd_peer_hash_size;
- }
-
- deadline = cfs_time_add(deadline, p * HZ);
- }
-
- if (nenomem_conns) {
- /*
- * Reduce my timeout if I rescheduled ENOMEM conns.
- * This also prevents me getting woken immediately
- * if any go back on my enomem list.
- */
- timeout = SOCKNAL_ENOMEM_RETRY;
- }
- ksocknal_data.ksnd_reaper_waketime =
- cfs_time_add(cfs_time_current(), timeout);
-
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
-
- if (!ksocknal_data.ksnd_shuttingdown &&
- list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
- list_empty(&ksocknal_data.ksnd_zombie_conns))
- schedule_timeout(timeout);
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
-
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- }
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- ksocknal_thread_fini();
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
deleted file mode 100644
index 7941cfa526bc..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ /dev/null
@@ -1,533 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include "socklnd.h"
-
-int
-ksocknal_lib_get_conn_addrs(struct ksock_conn *conn)
-{
- int rc = lnet_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr,
- &conn->ksnc_port);
-
- /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
- LASSERT(!conn->ksnc_closing);
-
- if (rc) {
- CERROR("Error %d getting sock peer IP\n", rc);
- return rc;
- }
-
- rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL);
- if (rc) {
- CERROR("Error %d getting sock local IP\n", rc);
- return rc;
- }
-
- return 0;
-}
-
-int
-ksocknal_lib_zc_capable(struct ksock_conn *conn)
-{
- int caps = conn->ksnc_sock->sk->sk_route_caps;
-
- if (conn->ksnc_proto == &ksocknal_protocol_v1x)
- return 0;
-
- /*
- * ZC if the socket supports scatter/gather and doesn't need software
- * checksums
- */
- return ((caps & NETIF_F_SG) && (caps & NETIF_F_CSUM_MASK));
-}
-
-int
-ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
-{
- struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
- struct socket *sock = conn->ksnc_sock;
- int nob, i;
-
- if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
- conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
- tx->tx_nob == tx->tx_resid && /* frist sending */
- !tx->tx_msg.ksm_csum) /* not checksummed */
- ksocknal_lib_csum_tx(tx);
-
- for (nob = i = 0; i < tx->tx_niov; i++)
- nob += tx->tx_iov[i].iov_len;
-
- if (!list_empty(&conn->ksnc_tx_queue) ||
- nob < tx->tx_resid)
- msg.msg_flags |= MSG_MORE;
-
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC,
- tx->tx_iov, tx->tx_niov, nob);
- return sock_sendmsg(sock, &msg);
-}
-
-int
-ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
-{
- struct socket *sock = conn->ksnc_sock;
- struct bio_vec *kiov = tx->tx_kiov;
- int rc;
- int nob;
-
- /* Not NOOP message */
- LASSERT(tx->tx_lnetmsg);
-
- if (tx->tx_msg.ksm_zc_cookies[0]) {
- /* Zero copy is enabled */
- struct sock *sk = sock->sk;
- struct page *page = kiov->bv_page;
- int offset = kiov->bv_offset;
- int fragsize = kiov->bv_len;
- int msgflg = MSG_DONTWAIT;
-
- CDEBUG(D_NET, "page %p + offset %x for %d\n",
- page, offset, kiov->bv_len);
-
- if (!list_empty(&conn->ksnc_tx_queue) ||
- fragsize < tx->tx_resid)
- msgflg |= MSG_MORE;
-
- if (sk->sk_prot->sendpage) {
- rc = sk->sk_prot->sendpage(sk, page,
- offset, fragsize, msgflg);
- } else {
- rc = tcp_sendpage(sk, page, offset, fragsize, msgflg);
- }
- } else {
- struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
- int i;
-
- for (nob = i = 0; i < tx->tx_nkiov; i++)
- nob += kiov[i].bv_len;
-
- if (!list_empty(&conn->ksnc_tx_queue) ||
- nob < tx->tx_resid)
- msg.msg_flags |= MSG_MORE;
-
- iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC,
- kiov, tx->tx_nkiov, nob);
- rc = sock_sendmsg(sock, &msg);
- }
- return rc;
-}
-
-void
-ksocknal_lib_eager_ack(struct ksock_conn *conn)
-{
- int opt = 1;
- struct socket *sock = conn->ksnc_sock;
-
- /*
- * Remind the socket to ACK eagerly. If I don't, the socket might
- * think I'm about to send something it could piggy-back the ACK
- * on, introducing delay in completing zero-copy sends in my
- * peer.
- */
- kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, (char *)&opt,
- sizeof(opt));
-}
-
-static int lustre_csum(struct kvec *v, void *context)
-{
- struct ksock_conn *conn = context;
- conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum,
- v->iov_base, v->iov_len);
- return 0;
-}
-
-int
-ksocknal_lib_recv(struct ksock_conn *conn)
-{
- struct msghdr msg = { .msg_iter = conn->ksnc_rx_to };
- __u32 saved_csum;
- int rc;
-
- rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT);
- if (rc <= 0)
- return rc;
-
- saved_csum = conn->ksnc_msg.ksm_csum;
- if (!saved_csum)
- return rc;
-
- /* header is included only in V2 - V3 checksums only the bulk data */
- if (!(conn->ksnc_rx_to.type & ITER_BVEC) &&
- conn->ksnc_proto != &ksocknal_protocol_v2x)
- return rc;
-
- /* accumulate checksum */
- conn->ksnc_msg.ksm_csum = 0;
- iov_iter_for_each_range(&conn->ksnc_rx_to, rc, lustre_csum, conn);
- conn->ksnc_msg.ksm_csum = saved_csum;
-
- return rc;
-}
-
-void
-ksocknal_lib_csum_tx(struct ksock_tx *tx)
-{
- int i;
- __u32 csum;
- void *base;
-
- LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg);
- LASSERT(tx->tx_conn);
- LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
-
- tx->tx_msg.ksm_csum = 0;
-
- csum = crc32_le(~0, tx->tx_iov[0].iov_base,
- tx->tx_iov[0].iov_len);
-
- if (tx->tx_kiov) {
- for (i = 0; i < tx->tx_nkiov; i++) {
- base = kmap(tx->tx_kiov[i].bv_page) +
- tx->tx_kiov[i].bv_offset;
-
- csum = crc32_le(csum, base, tx->tx_kiov[i].bv_len);
-
- kunmap(tx->tx_kiov[i].bv_page);
- }
- } else {
- for (i = 1; i < tx->tx_niov; i++)
- csum = crc32_le(csum, tx->tx_iov[i].iov_base,
- tx->tx_iov[i].iov_len);
- }
-
- if (*ksocknal_tunables.ksnd_inject_csum_error) {
- csum++;
- *ksocknal_tunables.ksnd_inject_csum_error = 0;
- }
-
- tx->tx_msg.ksm_csum = csum;
-}
-
-int
-ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
- int *rxmem, int *nagle)
-{
- struct socket *sock = conn->ksnc_sock;
- int len;
- int rc;
-
- rc = ksocknal_connsock_addref(conn);
- if (rc) {
- LASSERT(conn->ksnc_closing);
- *txmem = *rxmem = *nagle = 0;
- return -ESHUTDOWN;
- }
-
- rc = lnet_sock_getbuf(sock, txmem, rxmem);
- if (!rc) {
- len = sizeof(*nagle);
- rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char *)nagle, &len);
- }
-
- ksocknal_connsock_decref(conn);
-
- if (!rc)
- *nagle = !*nagle;
- else
- *txmem = *rxmem = *nagle = 0;
-
- return rc;
-}
-
-int
-ksocknal_lib_setup_sock(struct socket *sock)
-{
- int rc;
- int option;
- int keep_idle;
- int keep_intvl;
- int keep_count;
- int do_keepalive;
- struct linger linger;
-
- sock->sk->sk_allocation = GFP_NOFS;
-
- /*
- * Ensure this socket aborts active sends immediately when we close
- * it.
- */
- linger.l_onoff = 0;
- linger.l_linger = 0;
-
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger,
- sizeof(linger));
- if (rc) {
- CERROR("Can't set SO_LINGER: %d\n", rc);
- return rc;
- }
-
- option = -1;
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option,
- sizeof(option));
- if (rc) {
- CERROR("Can't set SO_LINGER2: %d\n", rc);
- return rc;
- }
-
- if (!*ksocknal_tunables.ksnd_nagle) {
- option = 1;
-
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char *)&option, sizeof(option));
- if (rc) {
- CERROR("Can't disable nagle: %d\n", rc);
- return rc;
- }
- }
-
- rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size,
- *ksocknal_tunables.ksnd_rx_buffer_size);
- if (rc) {
- CERROR("Can't set buffer tx %d, rx %d buffers: %d\n",
- *ksocknal_tunables.ksnd_tx_buffer_size,
- *ksocknal_tunables.ksnd_rx_buffer_size, rc);
- return rc;
- }
-
-/* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */
-
- /* snapshot tunables */
- keep_idle = *ksocknal_tunables.ksnd_keepalive_idle;
- keep_count = *ksocknal_tunables.ksnd_keepalive_count;
- keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl;
-
- do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
-
- option = (do_keepalive ? 1 : 0);
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option,
- sizeof(option));
- if (rc) {
- CERROR("Can't set SO_KEEPALIVE: %d\n", rc);
- return rc;
- }
-
- if (!do_keepalive)
- return 0;
-
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle,
- sizeof(keep_idle));
- if (rc) {
- CERROR("Can't set TCP_KEEPIDLE: %d\n", rc);
- return rc;
- }
-
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
- (char *)&keep_intvl, sizeof(keep_intvl));
- if (rc) {
- CERROR("Can't set TCP_KEEPINTVL: %d\n", rc);
- return rc;
- }
-
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count,
- sizeof(keep_count));
- if (rc) {
- CERROR("Can't set TCP_KEEPCNT: %d\n", rc);
- return rc;
- }
-
- return 0;
-}
-
-void
-ksocknal_lib_push_conn(struct ksock_conn *conn)
-{
- struct sock *sk;
- struct tcp_sock *tp;
- int nonagle;
- int val = 1;
- int rc;
-
- rc = ksocknal_connsock_addref(conn);
- if (rc) /* being shut down */
- return;
-
- sk = conn->ksnc_sock->sk;
- tp = tcp_sk(sk);
-
- lock_sock(sk);
- nonagle = tp->nonagle;
- tp->nonagle = 1;
- release_sock(sk);
-
- rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY,
- (char *)&val, sizeof(val));
- LASSERT(!rc);
-
- lock_sock(sk);
- tp->nonagle = nonagle;
- release_sock(sk);
-
- ksocknal_connsock_decref(conn);
-}
-
-/*
- * socket call back in Linux
- */
-static void
-ksocknal_data_ready(struct sock *sk)
-{
- struct ksock_conn *conn;
-
- /* interleave correctly with closing sockets... */
- LASSERT(!in_irq());
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- conn = sk->sk_user_data;
- if (!conn) { /* raced with ksocknal_terminate_conn */
- LASSERT(sk->sk_data_ready != &ksocknal_data_ready);
- sk->sk_data_ready(sk);
- } else {
- ksocknal_read_callback(conn);
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-}
-
-static void
-ksocknal_write_space(struct sock *sk)
-{
- struct ksock_conn *conn;
- int wspace;
- int min_wpace;
-
- /* interleave correctly with closing sockets... */
- LASSERT(!in_irq());
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- conn = sk->sk_user_data;
- wspace = sk_stream_wspace(sk);
- min_wpace = sk_stream_min_wspace(sk);
-
- CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
- sk, wspace, min_wpace, conn,
- !conn ? "" : (conn->ksnc_tx_ready ?
- " ready" : " blocked"),
- !conn ? "" : (conn->ksnc_tx_scheduled ?
- " scheduled" : " idle"),
- !conn ? "" : (list_empty(&conn->ksnc_tx_queue) ?
- " empty" : " queued"));
-
- if (!conn) { /* raced with ksocknal_terminate_conn */
- LASSERT(sk->sk_write_space != &ksocknal_write_space);
- sk->sk_write_space(sk);
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return;
- }
-
- if (wspace >= min_wpace) { /* got enough space */
- ksocknal_write_callback(conn);
-
- /*
- * Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
- * ENOMEM check in ksocknal_transmit is race-free (think about
- * it).
- */
- clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-}
-
-void
-ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn)
-{
- conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
- conn->ksnc_saved_write_space = sock->sk->sk_write_space;
-}
-
-void
-ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn)
-{
- sock->sk->sk_user_data = conn;
- sock->sk->sk_data_ready = ksocknal_data_ready;
- sock->sk->sk_write_space = ksocknal_write_space;
-}
-
-void
-ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn)
-{
- /*
- * Remove conn's network callbacks.
- * NB I _have_ to restore the callback, rather than storing a noop,
- * since the socket could survive past this module being unloaded!!
- */
- sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
- sock->sk->sk_write_space = conn->ksnc_saved_write_space;
-
- /*
- * A callback could be in progress already; they hold a read lock
- * on ksnd_global_lock (to serialise with me) and NOOP if
- * sk_user_data is NULL.
- */
- sock->sk->sk_user_data = NULL;
-}
-
-int
-ksocknal_lib_memory_pressure(struct ksock_conn *conn)
-{
- int rc = 0;
- struct ksock_sched *sched;
-
- sched = conn->ksnc_scheduler;
- spin_lock_bh(&sched->kss_lock);
-
- if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) &&
- !conn->ksnc_tx_ready) {
- /*
- * SOCK_NOSPACE is set when the socket fills
- * and cleared in the write_space callback
- * (which also sets ksnc_tx_ready). If
- * SOCK_NOSPACE and ksnc_tx_ready are BOTH
- * zero, I didn't fill the socket and
- * write_space won't reschedule me, so I
- * return -ENOMEM to get my caller to retry
- * after a timeout
- */
- rc = -ENOMEM;
- }
-
- spin_unlock_bh(&sched->kss_lock);
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
deleted file mode 100644
index 5663a4ca94d4..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- *
- * Author: Eric Barton <eric@bartonsoftware.com>
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "socklnd.h"
-
-static int sock_timeout = 50;
-module_param(sock_timeout, int, 0644);
-MODULE_PARM_DESC(sock_timeout, "dead socket timeout (seconds)");
-
-static int credits = 256;
-module_param(credits, int, 0444);
-MODULE_PARM_DESC(credits, "# concurrent sends");
-
-static int peer_credits = 8;
-module_param(peer_credits, int, 0444);
-MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
-
-static int peer_buffer_credits;
-module_param(peer_buffer_credits, int, 0444);
-MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
-
-static int peer_timeout = 180;
-module_param(peer_timeout, int, 0444);
-MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
-
-/*
- * Number of daemons in each thread pool which is percpt,
- * we will estimate reasonable value based on CPUs if it's not set.
- */
-static unsigned int nscheds;
-module_param(nscheds, int, 0444);
-MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting");
-
-static int nconnds = 4;
-module_param(nconnds, int, 0444);
-MODULE_PARM_DESC(nconnds, "# connection daemons while starting");
-
-static int nconnds_max = 64;
-module_param(nconnds_max, int, 0444);
-MODULE_PARM_DESC(nconnds_max, "max # connection daemons");
-
-static int min_reconnectms = 1000;
-module_param(min_reconnectms, int, 0644);
-MODULE_PARM_DESC(min_reconnectms, "min connection retry interval (mS)");
-
-static int max_reconnectms = 60000;
-module_param(max_reconnectms, int, 0644);
-MODULE_PARM_DESC(max_reconnectms, "max connection retry interval (mS)");
-
-# define DEFAULT_EAGER_ACK 0
-static int eager_ack = DEFAULT_EAGER_ACK;
-module_param(eager_ack, int, 0644);
-MODULE_PARM_DESC(eager_ack, "send tcp ack packets eagerly");
-
-static int typed_conns = 1;
-module_param(typed_conns, int, 0444);
-MODULE_PARM_DESC(typed_conns, "use different sockets for bulk");
-
-static int min_bulk = 1 << 10;
-module_param(min_bulk, int, 0644);
-MODULE_PARM_DESC(min_bulk, "smallest 'large' message");
-
-# define DEFAULT_BUFFER_SIZE 0
-static int tx_buffer_size = DEFAULT_BUFFER_SIZE;
-module_param(tx_buffer_size, int, 0644);
-MODULE_PARM_DESC(tx_buffer_size, "socket tx buffer size (0 for system default)");
-
-static int rx_buffer_size = DEFAULT_BUFFER_SIZE;
-module_param(rx_buffer_size, int, 0644);
-MODULE_PARM_DESC(rx_buffer_size, "socket rx buffer size (0 for system default)");
-
-static int nagle;
-module_param(nagle, int, 0644);
-MODULE_PARM_DESC(nagle, "enable NAGLE?");
-
-static int round_robin = 1;
-module_param(round_robin, int, 0644);
-MODULE_PARM_DESC(round_robin, "Round robin for multiple interfaces");
-
-static int keepalive = 30;
-module_param(keepalive, int, 0644);
-MODULE_PARM_DESC(keepalive, "# seconds before send keepalive");
-
-static int keepalive_idle = 30;
-module_param(keepalive_idle, int, 0644);
-MODULE_PARM_DESC(keepalive_idle, "# idle seconds before probe");
-
-#define DEFAULT_KEEPALIVE_COUNT 5
-static int keepalive_count = DEFAULT_KEEPALIVE_COUNT;
-module_param(keepalive_count, int, 0644);
-MODULE_PARM_DESC(keepalive_count, "# missed probes == dead");
-
-static int keepalive_intvl = 5;
-module_param(keepalive_intvl, int, 0644);
-MODULE_PARM_DESC(keepalive_intvl, "seconds between probes");
-
-static int enable_csum;
-module_param(enable_csum, int, 0644);
-MODULE_PARM_DESC(enable_csum, "enable check sum");
-
-static int inject_csum_error;
-module_param(inject_csum_error, int, 0644);
-MODULE_PARM_DESC(inject_csum_error, "set non-zero to inject a checksum error");
-
-static int nonblk_zcack = 1;
-module_param(nonblk_zcack, int, 0644);
-MODULE_PARM_DESC(nonblk_zcack, "always send ZC-ACK on non-blocking connection");
-
-static unsigned int zc_min_payload = 16 << 10;
-module_param(zc_min_payload, int, 0644);
-MODULE_PARM_DESC(zc_min_payload, "minimum payload size to zero copy");
-
-static unsigned int zc_recv;
-module_param(zc_recv, int, 0644);
-MODULE_PARM_DESC(zc_recv, "enable ZC recv for Chelsio driver");
-
-static unsigned int zc_recv_min_nfrags = 16;
-module_param(zc_recv_min_nfrags, int, 0644);
-MODULE_PARM_DESC(zc_recv_min_nfrags, "minimum # of fragments to enable ZC recv");
-
-#if SOCKNAL_VERSION_DEBUG
-static int protocol = 3;
-module_param(protocol, int, 0644);
-MODULE_PARM_DESC(protocol, "protocol version");
-#endif
-
-struct ksock_tunables ksocknal_tunables;
-
-int ksocknal_tunables_init(void)
-{
- /* initialize ksocknal_tunables structure */
- ksocknal_tunables.ksnd_timeout = &sock_timeout;
- ksocknal_tunables.ksnd_nscheds = &nscheds;
- ksocknal_tunables.ksnd_nconnds = &nconnds;
- ksocknal_tunables.ksnd_nconnds_max = &nconnds_max;
- ksocknal_tunables.ksnd_min_reconnectms = &min_reconnectms;
- ksocknal_tunables.ksnd_max_reconnectms = &max_reconnectms;
- ksocknal_tunables.ksnd_eager_ack = &eager_ack;
- ksocknal_tunables.ksnd_typed_conns = &typed_conns;
- ksocknal_tunables.ksnd_min_bulk = &min_bulk;
- ksocknal_tunables.ksnd_tx_buffer_size = &tx_buffer_size;
- ksocknal_tunables.ksnd_rx_buffer_size = &rx_buffer_size;
- ksocknal_tunables.ksnd_nagle = &nagle;
- ksocknal_tunables.ksnd_round_robin = &round_robin;
- ksocknal_tunables.ksnd_keepalive = &keepalive;
- ksocknal_tunables.ksnd_keepalive_idle = &keepalive_idle;
- ksocknal_tunables.ksnd_keepalive_count = &keepalive_count;
- ksocknal_tunables.ksnd_keepalive_intvl = &keepalive_intvl;
- ksocknal_tunables.ksnd_credits = &credits;
- ksocknal_tunables.ksnd_peertxcredits = &peer_credits;
- ksocknal_tunables.ksnd_peerrtrcredits = &peer_buffer_credits;
- ksocknal_tunables.ksnd_peertimeout = &peer_timeout;
- ksocknal_tunables.ksnd_enable_csum = &enable_csum;
- ksocknal_tunables.ksnd_inject_csum_error = &inject_csum_error;
- ksocknal_tunables.ksnd_nonblk_zcack = &nonblk_zcack;
- ksocknal_tunables.ksnd_zc_min_payload = &zc_min_payload;
- ksocknal_tunables.ksnd_zc_recv = &zc_recv;
- ksocknal_tunables.ksnd_zc_recv_min_nfrags = &zc_recv_min_nfrags;
-
-#if SOCKNAL_VERSION_DEBUG
- ksocknal_tunables.ksnd_protocol = &protocol;
-#endif
-
- if (*ksocknal_tunables.ksnd_zc_min_payload < (2 << 10))
- *ksocknal_tunables.ksnd_zc_min_payload = 2 << 10;
-
- return 0;
-};
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
deleted file mode 100644
index 05982dac781c..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ /dev/null
@@ -1,810 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2012, Intel Corporation.
- *
- * Author: Zach Brown <zab@zabbo.net>
- * Author: Peter J. Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Eric Barton <eric@bartonsoftware.com>
- *
- * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "socklnd.h"
-
-/*
- * Protocol entries :
- * pro_send_hello : send hello message
- * pro_recv_hello : receive hello message
- * pro_pack : pack message header
- * pro_unpack : unpack message header
- * pro_queue_tx_zcack() : Called holding BH lock: kss_lock
- * return 1 if ACK is piggybacked, otherwise return 0
- * pro_queue_tx_msg() : Called holding BH lock: kss_lock
- * return the ACK that piggybacked by my message, or NULL
- * pro_handle_zcreq() : handler of incoming ZC-REQ
- * pro_handle_zcack() : handler of incoming ZC-ACK
- * pro_match_tx() : Called holding glock
- */
-
-static struct ksock_tx *
-ksocknal_queue_tx_msg_v1(struct ksock_conn *conn, struct ksock_tx *tx_msg)
-{
- /* V1.x, just enqueue it */
- list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
- return NULL;
-}
-
-void
-ksocknal_next_tx_carrier(struct ksock_conn *conn)
-{
- struct ksock_tx *tx = conn->ksnc_tx_carrier;
-
- /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
- LASSERT(!list_empty(&conn->ksnc_tx_queue));
- LASSERT(tx);
-
- /* Next TX that can carry ZC-ACK or LNet message */
- if (tx->tx_list.next == &conn->ksnc_tx_queue) {
- /* no more packets queued */
- conn->ksnc_tx_carrier = NULL;
- } else {
- conn->ksnc_tx_carrier = list_next_entry(tx, tx_list);
- LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type);
- }
-}
-
-static int
-ksocknal_queue_tx_zcack_v2(struct ksock_conn *conn,
- struct ksock_tx *tx_ack, __u64 cookie)
-{
- struct ksock_tx *tx = conn->ksnc_tx_carrier;
-
- LASSERT(!tx_ack ||
- tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
-
- /*
- * Enqueue or piggyback tx_ack / cookie
- * . no tx can piggyback cookie of tx_ack (or cookie), just
- * enqueue the tx_ack (if tx_ack != NUL) and return NULL.
- * . There is tx can piggyback cookie of tx_ack (or cookie),
- * piggyback the cookie and return the tx.
- */
- if (!tx) {
- if (tx_ack) {
- list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
- conn->ksnc_tx_carrier = tx_ack;
- }
- return 0;
- }
-
- if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
- /* tx is noop zc-ack, can't piggyback zc-ack cookie */
- if (tx_ack)
- list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
- return 0;
- }
-
- LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
- LASSERT(!tx->tx_msg.ksm_zc_cookies[1]);
-
- if (tx_ack)
- cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
-
- /* piggyback the zc-ack cookie */
- tx->tx_msg.ksm_zc_cookies[1] = cookie;
- /* move on to the next TX which can carry cookie */
- ksocknal_next_tx_carrier(conn);
-
- return 1;
-}
-
-static struct ksock_tx *
-ksocknal_queue_tx_msg_v2(struct ksock_conn *conn, struct ksock_tx *tx_msg)
-{
- struct ksock_tx *tx = conn->ksnc_tx_carrier;
-
- /*
- * Enqueue tx_msg:
- * . If there is no NOOP on the connection, just enqueue
- * tx_msg and return NULL
- * . If there is NOOP on the connection, piggyback the cookie
- * and replace the NOOP tx, and return the NOOP tx.
- */
- if (!tx) { /* nothing on queue */
- list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
- conn->ksnc_tx_carrier = tx_msg;
- return NULL;
- }
-
- if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */
- list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
- return NULL;
- }
-
- LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
-
- /* There is a noop zc-ack can be piggybacked */
- tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1];
- ksocknal_next_tx_carrier(conn);
-
- /* use new_tx to replace the noop zc-ack packet */
- list_add(&tx_msg->tx_list, &tx->tx_list);
- list_del(&tx->tx_list);
-
- return tx;
-}
-
-static int
-ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
- struct ksock_tx *tx_ack, __u64 cookie)
-{
- struct ksock_tx *tx;
-
- if (conn->ksnc_type != SOCKLND_CONN_ACK)
- return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
-
- /* non-blocking ZC-ACK (to router) */
- LASSERT(!tx_ack ||
- tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
-
- tx = conn->ksnc_tx_carrier;
- if (!tx) {
- if (tx_ack) {
- list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
- conn->ksnc_tx_carrier = tx_ack;
- }
- return 0;
- }
-
- /* conn->ksnc_tx_carrier */
-
- if (tx_ack)
- cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
-
- if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */
- return 1;
-
- if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) {
- /* replace the keepalive PING with a real ACK */
- LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
- tx->tx_msg.ksm_zc_cookies[1] = cookie;
- return 1;
- }
-
- if (cookie == tx->tx_msg.ksm_zc_cookies[0] ||
- cookie == tx->tx_msg.ksm_zc_cookies[1]) {
- CWARN("%s: duplicated ZC cookie: %llu\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
- return 1; /* XXX return error in the future */
- }
-
- if (!tx->tx_msg.ksm_zc_cookies[0]) {
- /*
- * NOOP tx has only one ZC-ACK cookie,
- * can carry at least one more
- */
- if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
- tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
- tx->tx_msg.ksm_zc_cookies[1] = cookie;
- } else {
- tx->tx_msg.ksm_zc_cookies[0] = cookie;
- }
-
- if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
- /*
- * not likely to carry more ACKs, skip it
- * to simplify logic
- */
- ksocknal_next_tx_carrier(conn);
- }
-
- return 1;
- }
-
- /* takes two or more cookies already */
-
- if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) {
- __u64 tmp = 0;
-
- /* two separated cookies: (a+2, a) or (a+1, a) */
- LASSERT(tx->tx_msg.ksm_zc_cookies[0] -
- tx->tx_msg.ksm_zc_cookies[1] <= 2);
-
- if (tx->tx_msg.ksm_zc_cookies[0] -
- tx->tx_msg.ksm_zc_cookies[1] == 2) {
- if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1)
- tmp = cookie;
- } else if (cookie == tx->tx_msg.ksm_zc_cookies[1] - 1) {
- tmp = tx->tx_msg.ksm_zc_cookies[1];
- } else if (cookie == tx->tx_msg.ksm_zc_cookies[0] + 1) {
- tmp = tx->tx_msg.ksm_zc_cookies[0];
- }
-
- if (tmp) {
- /* range of cookies */
- tx->tx_msg.ksm_zc_cookies[0] = tmp - 1;
- tx->tx_msg.ksm_zc_cookies[1] = tmp + 1;
- return 1;
- }
-
- } else {
- /*
- * ksm_zc_cookies[0] < ksm_zc_cookies[1],
- * it is range of cookies
- */
- if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
- cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
- CWARN("%s: duplicated ZC cookie: %llu\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
- return 1; /* XXX: return error in the future */
- }
-
- if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) {
- tx->tx_msg.ksm_zc_cookies[1] = cookie;
- return 1;
- }
-
- if (cookie == tx->tx_msg.ksm_zc_cookies[0] - 1) {
- tx->tx_msg.ksm_zc_cookies[0] = cookie;
- return 1;
- }
- }
-
- /* failed to piggyback ZC-ACK */
- if (tx_ack) {
- list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
- /* the next tx can piggyback at least 1 ACK */
- ksocknal_next_tx_carrier(conn);
- }
-
- return 0;
-}
-
-static int
-ksocknal_match_tx(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
-{
- int nob;
-
-#if SOCKNAL_VERSION_DEBUG
- if (!*ksocknal_tunables.ksnd_typed_conns)
- return SOCKNAL_MATCH_YES;
-#endif
-
- if (!tx || !tx->tx_lnetmsg) {
- /* noop packet */
- nob = offsetof(struct ksock_msg, ksm_u);
- } else {
- nob = tx->tx_lnetmsg->msg_len +
- ((conn->ksnc_proto == &ksocknal_protocol_v1x) ?
- sizeof(struct lnet_hdr) : sizeof(struct ksock_msg));
- }
-
- /* default checking for typed connection */
- switch (conn->ksnc_type) {
- default:
- CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
- LBUG();
- case SOCKLND_CONN_ANY:
- return SOCKNAL_MATCH_YES;
-
- case SOCKLND_CONN_BULK_IN:
- return SOCKNAL_MATCH_MAY;
-
- case SOCKLND_CONN_BULK_OUT:
- if (nob < *ksocknal_tunables.ksnd_min_bulk)
- return SOCKNAL_MATCH_MAY;
- else
- return SOCKNAL_MATCH_YES;
-
- case SOCKLND_CONN_CONTROL:
- if (nob >= *ksocknal_tunables.ksnd_min_bulk)
- return SOCKNAL_MATCH_MAY;
- else
- return SOCKNAL_MATCH_YES;
- }
-}
-
-static int
-ksocknal_match_tx_v3(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
-{
- int nob;
-
- if (!tx || !tx->tx_lnetmsg)
- nob = offsetof(struct ksock_msg, ksm_u);
- else
- nob = tx->tx_lnetmsg->msg_len + sizeof(struct ksock_msg);
-
- switch (conn->ksnc_type) {
- default:
- CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
- LBUG();
- case SOCKLND_CONN_ANY:
- return SOCKNAL_MATCH_NO;
-
- case SOCKLND_CONN_ACK:
- if (nonblk)
- return SOCKNAL_MATCH_YES;
- else if (!tx || !tx->tx_lnetmsg)
- return SOCKNAL_MATCH_MAY;
- else
- return SOCKNAL_MATCH_NO;
-
- case SOCKLND_CONN_BULK_OUT:
- if (nonblk)
- return SOCKNAL_MATCH_NO;
- else if (nob < *ksocknal_tunables.ksnd_min_bulk)
- return SOCKNAL_MATCH_MAY;
- else
- return SOCKNAL_MATCH_YES;
-
- case SOCKLND_CONN_CONTROL:
- if (nonblk)
- return SOCKNAL_MATCH_NO;
- else if (nob >= *ksocknal_tunables.ksnd_min_bulk)
- return SOCKNAL_MATCH_MAY;
- else
- return SOCKNAL_MATCH_YES;
- }
-}
-
-/* (Sink) handle incoming ZC request from sender */
-static int
-ksocknal_handle_zcreq(struct ksock_conn *c, __u64 cookie, int remote)
-{
- struct ksock_peer *peer = c->ksnc_peer;
- struct ksock_conn *conn;
- struct ksock_tx *tx;
- int rc;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
- if (conn) {
- struct ksock_sched *sched = conn->ksnc_scheduler;
-
- LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
-
- spin_lock_bh(&sched->kss_lock);
-
- rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie);
-
- spin_unlock_bh(&sched->kss_lock);
-
- if (rc) { /* piggybacked */
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return 0;
- }
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- /* ACK connection is not ready, or can't piggyback the ACK */
- tx = ksocknal_alloc_tx_noop(cookie, !!remote);
- if (!tx)
- return -ENOMEM;
-
- rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id);
- if (!rc)
- return 0;
-
- ksocknal_free_tx(tx);
- return rc;
-}
-
-/* (Sender) handle ZC_ACK from sink */
-static int
-ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
-{
- struct ksock_peer *peer = conn->ksnc_peer;
- struct ksock_tx *tx;
- struct ksock_tx *temp;
- struct ksock_tx *tmp;
- LIST_HEAD(zlist);
- int count;
-
- if (!cookie1)
- cookie1 = cookie2;
-
- count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1);
-
- if (cookie2 == SOCKNAL_KEEPALIVE_PING &&
- conn->ksnc_proto == &ksocknal_protocol_v3x) {
- /* keepalive PING for V3.x, just ignore it */
- return count == 1 ? 0 : -EPROTO;
- }
-
- spin_lock(&peer->ksnp_lock);
-
- list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list,
- tx_zc_list) {
- __u64 c = tx->tx_msg.ksm_zc_cookies[0];
-
- if (c == cookie1 || c == cookie2 ||
- (cookie1 < c && c < cookie2)) {
- tx->tx_msg.ksm_zc_cookies[0] = 0;
- list_del(&tx->tx_zc_list);
- list_add(&tx->tx_zc_list, &zlist);
-
- if (!--count)
- break;
- }
- }
-
- spin_unlock(&peer->ksnp_lock);
-
- list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) {
- list_del(&tx->tx_zc_list);
- ksocknal_tx_decref(tx);
- }
-
- return !count ? 0 : -EPROTO;
-}
-
-static int
-ksocknal_send_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello)
-{
- struct socket *sock = conn->ksnc_sock;
- struct lnet_hdr *hdr;
- struct lnet_magicversion *hmv;
- int rc;
- int i;
-
- BUILD_BUG_ON(sizeof(struct lnet_magicversion) != offsetof(struct lnet_hdr, src_nid));
-
- hdr = kzalloc(sizeof(*hdr), GFP_NOFS);
- if (!hdr) {
- CERROR("Can't allocate struct lnet_hdr\n");
- return -ENOMEM;
- }
-
- hmv = (struct lnet_magicversion *)&hdr->dest_nid;
-
- /*
- * Re-organize V2.x message header to V1.x (struct lnet_hdr)
- * header and send out
- */
- hmv->magic = cpu_to_le32(LNET_PROTO_TCP_MAGIC);
- hmv->version_major = cpu_to_le16(KSOCK_PROTO_V1_MAJOR);
- hmv->version_minor = cpu_to_le16(KSOCK_PROTO_V1_MINOR);
-
- if (the_lnet.ln_testprotocompat) {
- /* single-shot proto check */
- LNET_LOCK();
- if (the_lnet.ln_testprotocompat & 1) {
- hmv->version_major++; /* just different! */
- the_lnet.ln_testprotocompat &= ~1;
- }
- if (the_lnet.ln_testprotocompat & 2) {
- hmv->magic = LNET_PROTO_MAGIC;
- the_lnet.ln_testprotocompat &= ~2;
- }
- LNET_UNLOCK();
- }
-
- hdr->src_nid = cpu_to_le64(hello->kshm_src_nid);
- hdr->src_pid = cpu_to_le32(hello->kshm_src_pid);
- hdr->type = cpu_to_le32(LNET_MSG_HELLO);
- hdr->payload_length = cpu_to_le32(hello->kshm_nips * sizeof(__u32));
- hdr->msg.hello.type = cpu_to_le32(hello->kshm_ctype);
- hdr->msg.hello.incarnation = cpu_to_le64(hello->kshm_src_incarnation);
-
- rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
- if (rc) {
- CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
- rc, &conn->ksnc_ipaddr, conn->ksnc_port);
- goto out;
- }
-
- if (!hello->kshm_nips)
- goto out;
-
- for (i = 0; i < (int)hello->kshm_nips; i++)
- hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]);
-
- rc = lnet_sock_write(sock, hello->kshm_ips,
- hello->kshm_nips * sizeof(__u32),
- lnet_acceptor_timeout());
- if (rc) {
- CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
- rc, hello->kshm_nips,
- &conn->ksnc_ipaddr, conn->ksnc_port);
- }
-out:
- kfree(hdr);
-
- return rc;
-}
-
-static int
-ksocknal_send_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello)
-{
- struct socket *sock = conn->ksnc_sock;
- int rc;
-
- hello->kshm_magic = LNET_PROTO_MAGIC;
- hello->kshm_version = conn->ksnc_proto->pro_version;
-
- if (the_lnet.ln_testprotocompat) {
- /* single-shot proto check */
- LNET_LOCK();
- if (the_lnet.ln_testprotocompat & 1) {
- hello->kshm_version++; /* just different! */
- the_lnet.ln_testprotocompat &= ~1;
- }
- LNET_UNLOCK();
- }
-
- rc = lnet_sock_write(sock, hello, offsetof(struct ksock_hello_msg, kshm_ips),
- lnet_acceptor_timeout());
- if (rc) {
- CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
- rc, &conn->ksnc_ipaddr, conn->ksnc_port);
- return rc;
- }
-
- if (!hello->kshm_nips)
- return 0;
-
- rc = lnet_sock_write(sock, hello->kshm_ips,
- hello->kshm_nips * sizeof(__u32),
- lnet_acceptor_timeout());
- if (rc) {
- CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
- rc, hello->kshm_nips,
- &conn->ksnc_ipaddr, conn->ksnc_port);
- }
-
- return rc;
-}
-
-static int
-ksocknal_recv_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello,
- int timeout)
-{
- struct socket *sock = conn->ksnc_sock;
- struct lnet_hdr *hdr;
- int rc;
- int i;
-
- hdr = kzalloc(sizeof(*hdr), GFP_NOFS);
- if (!hdr) {
- CERROR("Can't allocate struct lnet_hdr\n");
- return -ENOMEM;
- }
-
- rc = lnet_sock_read(sock, &hdr->src_nid,
- sizeof(*hdr) - offsetof(struct lnet_hdr, src_nid),
- timeout);
- if (rc) {
- CERROR("Error %d reading rest of HELLO hdr from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0 && rc != -EALREADY);
- goto out;
- }
-
- /* ...and check we got what we expected */
- if (hdr->type != cpu_to_le32(LNET_MSG_HELLO)) {
- CERROR("Expecting a HELLO hdr, but got type %d from %pI4h\n",
- le32_to_cpu(hdr->type),
- &conn->ksnc_ipaddr);
- rc = -EPROTO;
- goto out;
- }
-
- hello->kshm_src_nid = le64_to_cpu(hdr->src_nid);
- hello->kshm_src_pid = le32_to_cpu(hdr->src_pid);
- hello->kshm_src_incarnation = le64_to_cpu(hdr->msg.hello.incarnation);
- hello->kshm_ctype = le32_to_cpu(hdr->msg.hello.type);
- hello->kshm_nips = le32_to_cpu(hdr->payload_length) /
- sizeof(__u32);
-
- if (hello->kshm_nips > LNET_MAX_INTERFACES) {
- CERROR("Bad nips %d from ip %pI4h\n",
- hello->kshm_nips, &conn->ksnc_ipaddr);
- rc = -EPROTO;
- goto out;
- }
-
- if (!hello->kshm_nips)
- goto out;
-
- rc = lnet_sock_read(sock, hello->kshm_ips,
- hello->kshm_nips * sizeof(__u32), timeout);
- if (rc) {
- CERROR("Error %d reading IPs from ip %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0 && rc != -EALREADY);
- goto out;
- }
-
- for (i = 0; i < (int)hello->kshm_nips; i++) {
- hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
-
- if (!hello->kshm_ips[i]) {
- CERROR("Zero IP[%d] from ip %pI4h\n",
- i, &conn->ksnc_ipaddr);
- rc = -EPROTO;
- break;
- }
- }
-out:
- kfree(hdr);
-
- return rc;
-}
-
-static int
-ksocknal_recv_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello,
- int timeout)
-{
- struct socket *sock = conn->ksnc_sock;
- int rc;
- int i;
-
- if (hello->kshm_magic == LNET_PROTO_MAGIC)
- conn->ksnc_flip = 0;
- else
- conn->ksnc_flip = 1;
-
- rc = lnet_sock_read(sock, &hello->kshm_src_nid,
- offsetof(struct ksock_hello_msg, kshm_ips) -
- offsetof(struct ksock_hello_msg, kshm_src_nid),
- timeout);
- if (rc) {
- CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0 && rc != -EALREADY);
- return rc;
- }
-
- if (conn->ksnc_flip) {
- __swab32s(&hello->kshm_src_pid);
- __swab64s(&hello->kshm_src_nid);
- __swab32s(&hello->kshm_dst_pid);
- __swab64s(&hello->kshm_dst_nid);
- __swab64s(&hello->kshm_src_incarnation);
- __swab64s(&hello->kshm_dst_incarnation);
- __swab32s(&hello->kshm_ctype);
- __swab32s(&hello->kshm_nips);
- }
-
- if (hello->kshm_nips > LNET_MAX_INTERFACES) {
- CERROR("Bad nips %d from ip %pI4h\n",
- hello->kshm_nips, &conn->ksnc_ipaddr);
- return -EPROTO;
- }
-
- if (!hello->kshm_nips)
- return 0;
-
- rc = lnet_sock_read(sock, hello->kshm_ips,
- hello->kshm_nips * sizeof(__u32), timeout);
- if (rc) {
- CERROR("Error %d reading IPs from ip %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0 && rc != -EALREADY);
- return rc;
- }
-
- for (i = 0; i < (int)hello->kshm_nips; i++) {
- if (conn->ksnc_flip)
- __swab32s(&hello->kshm_ips[i]);
-
- if (!hello->kshm_ips[i]) {
- CERROR("Zero IP[%d] from ip %pI4h\n",
- i, &conn->ksnc_ipaddr);
- return -EPROTO;
- }
- }
-
- return 0;
-}
-
-static void
-ksocknal_pack_msg_v1(struct ksock_tx *tx)
-{
- /* V1.x has no KSOCK_MSG_NOOP */
- LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
- LASSERT(tx->tx_lnetmsg);
-
- tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr;
- tx->tx_iov[0].iov_len = sizeof(struct lnet_hdr);
-
- tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(struct lnet_hdr);
- tx->tx_resid = tx->tx_lnetmsg->msg_len + sizeof(struct lnet_hdr);
-}
-
-static void
-ksocknal_pack_msg_v2(struct ksock_tx *tx)
-{
- tx->tx_iov[0].iov_base = &tx->tx_msg;
-
- if (tx->tx_lnetmsg) {
- LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
-
- tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
- tx->tx_iov[0].iov_len = sizeof(struct ksock_msg);
- tx->tx_nob = sizeof(struct ksock_msg) + tx->tx_lnetmsg->msg_len;
- tx->tx_resid = sizeof(struct ksock_msg) + tx->tx_lnetmsg->msg_len;
- } else {
- LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
-
- tx->tx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr);
- tx->tx_nob = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr);
- tx->tx_resid = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr);
- }
- /*
- * Don't checksum before start sending, because packet can be
- * piggybacked with ACK
- */
-}
-
-static void
-ksocknal_unpack_msg_v1(struct ksock_msg *msg)
-{
- msg->ksm_csum = 0;
- msg->ksm_type = KSOCK_MSG_LNET;
- msg->ksm_zc_cookies[0] = 0;
- msg->ksm_zc_cookies[1] = 0;
-}
-
-static void
-ksocknal_unpack_msg_v2(struct ksock_msg *msg)
-{
- return; /* Do nothing */
-}
-
-struct ksock_proto ksocknal_protocol_v1x = {
- .pro_version = KSOCK_PROTO_V1,
- .pro_send_hello = ksocknal_send_hello_v1,
- .pro_recv_hello = ksocknal_recv_hello_v1,
- .pro_pack = ksocknal_pack_msg_v1,
- .pro_unpack = ksocknal_unpack_msg_v1,
- .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1,
- .pro_handle_zcreq = NULL,
- .pro_handle_zcack = NULL,
- .pro_queue_tx_zcack = NULL,
- .pro_match_tx = ksocknal_match_tx
-};
-
-struct ksock_proto ksocknal_protocol_v2x = {
- .pro_version = KSOCK_PROTO_V2,
- .pro_send_hello = ksocknal_send_hello_v2,
- .pro_recv_hello = ksocknal_recv_hello_v2,
- .pro_pack = ksocknal_pack_msg_v2,
- .pro_unpack = ksocknal_unpack_msg_v2,
- .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
- .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2,
- .pro_handle_zcreq = ksocknal_handle_zcreq,
- .pro_handle_zcack = ksocknal_handle_zcack,
- .pro_match_tx = ksocknal_match_tx
-};
-
-struct ksock_proto ksocknal_protocol_v3x = {
- .pro_version = KSOCK_PROTO_V3,
- .pro_send_hello = ksocknal_send_hello_v2,
- .pro_recv_hello = ksocknal_recv_hello_v2,
- .pro_pack = ksocknal_pack_msg_v2,
- .pro_unpack = ksocknal_unpack_msg_v2,
- .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
- .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3,
- .pro_handle_zcreq = ksocknal_handle_zcreq,
- .pro_handle_zcack = ksocknal_handle_zcack,
- .pro_match_tx = ksocknal_match_tx_v3
-};
diff --git a/drivers/staging/lustre/lnet/libcfs/Makefile b/drivers/staging/lustre/lnet/libcfs/Makefile
deleted file mode 100644
index b7dc7ac11cc5..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LNET) += libcfs.o
-
-libcfs-linux-objs := linux-tracefile.o linux-debug.o
-libcfs-linux-objs += linux-cpu.o
-libcfs-linux-objs += linux-module.o
-libcfs-linux-objs += linux-crypto.o
-libcfs-linux-objs += linux-crypto-adler.o
-
-libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs))
-
-libcfs-all-objs := debug.o fail.o module.o tracefile.o \
- libcfs_string.o hash.o \
- libcfs_cpu.o libcfs_mem.o libcfs_lock.o
-
-libcfs-objs := $(libcfs-linux-objs) $(libcfs-all-objs)
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
deleted file mode 100644
index 1371224a8cb9..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ /dev/null
@@ -1,458 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/debug.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- *
- */
-
-# define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-#include "tracefile.h"
-
-static char debug_file_name[1024];
-
-unsigned int libcfs_subsystem_debug = ~0;
-EXPORT_SYMBOL(libcfs_subsystem_debug);
-module_param(libcfs_subsystem_debug, int, 0644);
-MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask");
-
-unsigned int libcfs_debug = (D_CANTMASK |
- D_NETERROR | D_HA | D_CONFIG | D_IOCTL);
-EXPORT_SYMBOL(libcfs_debug);
-module_param(libcfs_debug, int, 0644);
-MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask");
-
-static int libcfs_param_debug_mb_set(const char *val,
- const struct kernel_param *kp)
-{
- int rc;
- unsigned int num;
-
- rc = kstrtouint(val, 0, &num);
- if (rc < 0)
- return rc;
-
- if (!*((unsigned int *)kp->arg)) {
- *((unsigned int *)kp->arg) = num;
- return 0;
- }
-
- rc = cfs_trace_set_debug_mb(num);
-
- if (!rc)
- *((unsigned int *)kp->arg) = cfs_trace_get_debug_mb();
-
- return rc;
-}
-
-/* While debug_mb setting look like unsigned int, in fact
- * it needs quite a bunch of extra processing, so we define special
- * debugmb parameter type with corresponding methods to handle this case
- */
-static const struct kernel_param_ops param_ops_debugmb = {
- .set = libcfs_param_debug_mb_set,
- .get = param_get_uint,
-};
-
-#define param_check_debugmb(name, p) \
- __param_check(name, p, unsigned int)
-
-static unsigned int libcfs_debug_mb;
-module_param(libcfs_debug_mb, debugmb, 0644);
-MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size.");
-
-unsigned int libcfs_printk = D_CANTMASK;
-module_param(libcfs_printk, uint, 0644);
-MODULE_PARM_DESC(libcfs_printk, "Lustre kernel debug console mask");
-
-unsigned int libcfs_console_ratelimit = 1;
-module_param(libcfs_console_ratelimit, uint, 0644);
-MODULE_PARM_DESC(libcfs_console_ratelimit, "Lustre kernel debug console ratelimit (0 to disable)");
-
-static int param_set_delay_minmax(const char *val,
- const struct kernel_param *kp,
- long min, long max)
-{
- long d;
- int sec;
- int rc;
-
- rc = kstrtoint(val, 0, &sec);
- if (rc)
- return -EINVAL;
-
- d = sec * HZ / 100;
- if (d < min || d > max)
- return -EINVAL;
-
- *((unsigned int *)kp->arg) = d;
-
- return 0;
-}
-
-static int param_get_delay(char *buffer, const struct kernel_param *kp)
-{
- unsigned int d = *(unsigned int *)kp->arg;
-
- return sprintf(buffer, "%u", (unsigned int)cfs_duration_sec(d * 100));
-}
-
-unsigned int libcfs_console_max_delay;
-unsigned int libcfs_console_min_delay;
-
-static int param_set_console_max_delay(const char *val,
- const struct kernel_param *kp)
-{
- return param_set_delay_minmax(val, kp,
- libcfs_console_min_delay, INT_MAX);
-}
-
-static const struct kernel_param_ops param_ops_console_max_delay = {
- .set = param_set_console_max_delay,
- .get = param_get_delay,
-};
-
-#define param_check_console_max_delay(name, p) \
- __param_check(name, p, unsigned int)
-
-module_param(libcfs_console_max_delay, console_max_delay, 0644);
-MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)");
-
-static int param_set_console_min_delay(const char *val,
- const struct kernel_param *kp)
-{
- return param_set_delay_minmax(val, kp,
- 1, libcfs_console_max_delay);
-}
-
-static const struct kernel_param_ops param_ops_console_min_delay = {
- .set = param_set_console_min_delay,
- .get = param_get_delay,
-};
-
-#define param_check_console_min_delay(name, p) \
- __param_check(name, p, unsigned int)
-
-module_param(libcfs_console_min_delay, console_min_delay, 0644);
-MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)");
-
-static int param_set_uint_minmax(const char *val,
- const struct kernel_param *kp,
- unsigned int min, unsigned int max)
-{
- unsigned int num;
- int ret;
-
- if (!val)
- return -EINVAL;
- ret = kstrtouint(val, 0, &num);
- if (ret < 0 || num < min || num > max)
- return -EINVAL;
- *((unsigned int *)kp->arg) = num;
- return 0;
-}
-
-static int param_set_uintpos(const char *val, const struct kernel_param *kp)
-{
- return param_set_uint_minmax(val, kp, 1, -1);
-}
-
-static const struct kernel_param_ops param_ops_uintpos = {
- .set = param_set_uintpos,
- .get = param_get_uint,
-};
-
-#define param_check_uintpos(name, p) \
- __param_check(name, p, unsigned int)
-
-unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF;
-module_param(libcfs_console_backoff, uintpos, 0644);
-MODULE_PARM_DESC(libcfs_console_backoff, "Lustre kernel debug console backoff factor");
-
-unsigned int libcfs_debug_binary = 1;
-
-unsigned int libcfs_stack = 3 * THREAD_SIZE / 4;
-EXPORT_SYMBOL(libcfs_stack);
-
-unsigned int libcfs_catastrophe;
-EXPORT_SYMBOL(libcfs_catastrophe);
-
-unsigned int libcfs_panic_on_lbug = 1;
-module_param(libcfs_panic_on_lbug, uint, 0644);
-MODULE_PARM_DESC(libcfs_panic_on_lbug, "Lustre kernel panic on LBUG");
-
-static wait_queue_head_t debug_ctlwq;
-
-char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
-
-/* We need to pass a pointer here, but elsewhere this must be a const */
-static char *libcfs_debug_file_path;
-module_param(libcfs_debug_file_path, charp, 0644);
-MODULE_PARM_DESC(libcfs_debug_file_path,
- "Path for dumping debug logs, set 'NONE' to prevent log dumping");
-
-int libcfs_panic_in_progress;
-
-/* libcfs_debug_token2mask() expects the returned string in lower-case */
-static const char *
-libcfs_debug_subsys2str(int subsys)
-{
- static const char * const libcfs_debug_subsystems[] =
- LIBCFS_DEBUG_SUBSYS_NAMES;
-
- if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems))
- return NULL;
-
- return libcfs_debug_subsystems[subsys];
-}
-
-/* libcfs_debug_token2mask() expects the returned string in lower-case */
-static const char *
-libcfs_debug_dbg2str(int debug)
-{
- static const char * const libcfs_debug_masks[] =
- LIBCFS_DEBUG_MASKS_NAMES;
-
- if (debug >= ARRAY_SIZE(libcfs_debug_masks))
- return NULL;
-
- return libcfs_debug_masks[debug];
-}
-
-int
-libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
-{
- const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
- libcfs_debug_dbg2str;
- int len = 0;
- const char *token;
- int i;
-
- if (!mask) { /* "0" */
- if (size > 0)
- str[0] = '0';
- len = 1;
- } else { /* space-separated tokens */
- for (i = 0; i < 32; i++) {
- if (!(mask & (1 << i)))
- continue;
-
- token = fn(i);
- if (!token) /* unused bit */
- continue;
-
- if (len > 0) { /* separator? */
- if (len < size)
- str[len] = ' ';
- len++;
- }
-
- while (*token) {
- if (len < size)
- str[len] = *token;
- token++;
- len++;
- }
- }
- }
-
- /* terminate 'str' */
- if (len < size)
- str[len] = 0;
- else
- str[size - 1] = 0;
-
- return len;
-}
-
-int
-libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
-{
- const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
- libcfs_debug_dbg2str;
- int m = 0;
- int matched;
- int n;
- int t;
-
- /* Allow a number for backwards compatibility */
-
- for (n = strlen(str); n > 0; n--)
- if (!isspace(str[n - 1]))
- break;
- matched = n;
- t = sscanf(str, "%i%n", &m, &matched);
- if (t >= 1 && matched == n) {
- /* don't print warning for lctl set_param debug=0 or -1 */
- if (m && m != -1)
- CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n");
- *mask = m;
- return 0;
- }
-
- return cfs_str2mask(str, fn, mask, is_subsys ? 0 : D_CANTMASK,
- 0xffffffff);
-}
-
-/**
- * Dump Lustre log to ::debug_file_path by calling tracefile_dump_all_pages()
- */
-void libcfs_debug_dumplog_internal(void *arg)
-{
- static time64_t last_dump_time;
- time64_t current_time;
- void *journal_info;
-
- journal_info = current->journal_info;
- current->journal_info = NULL;
- current_time = ktime_get_real_seconds();
-
- if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) &&
- current_time > last_dump_time) {
- last_dump_time = current_time;
- snprintf(debug_file_name, sizeof(debug_file_name) - 1,
- "%s.%lld.%ld", libcfs_debug_file_path_arr,
- (s64)current_time, (long)arg);
- pr_alert("LustreError: dumping log to %s\n", debug_file_name);
- cfs_tracefile_dump_all_pages(debug_file_name);
- libcfs_run_debug_log_upcall(debug_file_name);
- }
-
- current->journal_info = journal_info;
-}
-
-static int libcfs_debug_dumplog_thread(void *arg)
-{
- libcfs_debug_dumplog_internal(arg);
- wake_up(&debug_ctlwq);
- return 0;
-}
-
-void libcfs_debug_dumplog(void)
-{
- wait_queue_entry_t wait;
- struct task_struct *dumper;
-
- /* we're being careful to ensure that the kernel thread is
- * able to set our state to running as it exits before we
- * get to schedule()
- */
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&debug_ctlwq, &wait);
-
- dumper = kthread_run(libcfs_debug_dumplog_thread,
- (void *)(long)current_pid(),
- "libcfs_debug_dumper");
- set_current_state(TASK_INTERRUPTIBLE);
- if (IS_ERR(dumper))
- pr_err("LustreError: cannot start log dump thread: %ld\n",
- PTR_ERR(dumper));
- else
- schedule();
-
- /* be sure to teardown if cfs_create_thread() failed */
- remove_wait_queue(&debug_ctlwq, &wait);
- set_current_state(TASK_RUNNING);
-}
-EXPORT_SYMBOL(libcfs_debug_dumplog);
-
-int libcfs_debug_init(unsigned long bufsize)
-{
- unsigned int max = libcfs_debug_mb;
- int rc = 0;
-
- init_waitqueue_head(&debug_ctlwq);
-
- if (libcfs_console_max_delay <= 0 || /* not set by user or */
- libcfs_console_min_delay <= 0 || /* set to invalid values */
- libcfs_console_min_delay >= libcfs_console_max_delay) {
- libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY;
- libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
- }
-
- if (libcfs_debug_file_path) {
- strlcpy(libcfs_debug_file_path_arr,
- libcfs_debug_file_path,
- sizeof(libcfs_debug_file_path_arr));
- }
-
- /* If libcfs_debug_mb is set to an invalid value or uninitialized
- * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES
- */
- if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) {
- max = TCD_MAX_PAGES;
- } else {
- max = max / num_possible_cpus();
- max <<= (20 - PAGE_SHIFT);
- }
-
- rc = cfs_tracefile_init(max);
- if (!rc) {
- libcfs_register_panic_notifier();
- libcfs_debug_mb = cfs_trace_get_debug_mb();
- }
-
- return rc;
-}
-
-int libcfs_debug_cleanup(void)
-{
- libcfs_unregister_panic_notifier();
- cfs_tracefile_exit();
- return 0;
-}
-
-int libcfs_debug_clear_buffer(void)
-{
- cfs_trace_flush_pages();
- return 0;
-}
-
-/* Debug markers, although printed by S_LNET should not be marked as such. */
-#undef DEBUG_SUBSYSTEM
-#define DEBUG_SUBSYSTEM S_UNDEFINED
-int libcfs_debug_mark_buffer(const char *text)
-{
- CDEBUG(D_TRACE,
- "***************************************************\n");
- LCONSOLE(D_WARNING, "DEBUG MARKER: %s\n", text);
- CDEBUG(D_TRACE,
- "***************************************************\n");
-
- return 0;
-}
-
-#undef DEBUG_SUBSYSTEM
-#define DEBUG_SUBSYSTEM S_LNET
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
deleted file mode 100644
index d3f1e866c6a7..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ /dev/null
@@ -1,142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Oracle Corporation, Inc.
- */
-
-#include <linux/libcfs/libcfs.h>
-
-unsigned long cfs_fail_loc;
-EXPORT_SYMBOL(cfs_fail_loc);
-
-unsigned int cfs_fail_val;
-EXPORT_SYMBOL(cfs_fail_val);
-
-int cfs_fail_err;
-EXPORT_SYMBOL(cfs_fail_err);
-
-DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq);
-EXPORT_SYMBOL(cfs_race_waitq);
-
-int cfs_race_state;
-EXPORT_SYMBOL(cfs_race_state);
-
-int __cfs_fail_check_set(u32 id, u32 value, int set)
-{
- static atomic_t cfs_fail_count = ATOMIC_INIT(0);
-
- LASSERT(!(id & CFS_FAIL_ONCE));
-
- if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) ==
- (CFS_FAILED | CFS_FAIL_ONCE)) {
- atomic_set(&cfs_fail_count, 0); /* paranoia */
- return 0;
- }
-
- /* Fail 1/cfs_fail_val times */
- if (cfs_fail_loc & CFS_FAIL_RAND) {
- if (cfs_fail_val < 2 || prandom_u32_max(cfs_fail_val) > 0)
- return 0;
- }
-
- /* Skip the first cfs_fail_val, then fail */
- if (cfs_fail_loc & CFS_FAIL_SKIP) {
- if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val)
- return 0;
- }
-
- /* check cfs_fail_val... */
- if (set == CFS_FAIL_LOC_VALUE) {
- if (cfs_fail_val != -1 && cfs_fail_val != value)
- return 0;
- }
-
- /* Fail cfs_fail_val times, overridden by FAIL_ONCE */
- if (cfs_fail_loc & CFS_FAIL_SOME &&
- (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) {
- int count = atomic_inc_return(&cfs_fail_count);
-
- if (count >= cfs_fail_val) {
- set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
- atomic_set(&cfs_fail_count, 0);
- /* we are lost race to increase */
- if (count > cfs_fail_val)
- return 0;
- }
- }
-
- /* Take into account the current call for FAIL_ONCE for ORSET only,
- * as RESET is a new fail_loc, it does not change the current call
- */
- if ((set == CFS_FAIL_LOC_ORSET) && (value & CFS_FAIL_ONCE))
- set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
- /* Lost race to set CFS_FAILED_BIT. */
- if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
- /* If CFS_FAIL_ONCE is valid, only one process can fail,
- * otherwise multi-process can fail at the same time.
- */
- if (cfs_fail_loc & CFS_FAIL_ONCE)
- return 0;
- }
-
- switch (set) {
- case CFS_FAIL_LOC_NOSET:
- case CFS_FAIL_LOC_VALUE:
- break;
- case CFS_FAIL_LOC_ORSET:
- cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
- break;
- case CFS_FAIL_LOC_RESET:
- cfs_fail_loc = value;
- atomic_set(&cfs_fail_count, 0);
- break;
- default:
- LASSERTF(0, "called with bad set %u\n", set);
- break;
- }
-
- return 1;
-}
-EXPORT_SYMBOL(__cfs_fail_check_set);
-
-int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set)
-{
- int ret;
-
- ret = __cfs_fail_check_set(id, value, set);
- if (ret && likely(ms > 0)) {
- CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
- id, ms);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(ms * HZ / 1000);
- CERROR("cfs_fail_timeout id %x awake\n", id);
- }
- return ret;
-}
-EXPORT_SYMBOL(__cfs_fail_timeout_set);
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
deleted file mode 100644
index f7b3c9306456..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ /dev/null
@@ -1,2064 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/hash.c
- *
- * Implement a hash class for hash process in lustre system.
- *
- * Author: YuZhangyong <yzy@clusterfs.com>
- *
- * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
- * - Simplified API and improved documentation
- * - Added per-hash feature flags:
- * * CFS_HASH_DEBUG additional validation
- * * CFS_HASH_REHASH dynamic rehashing
- * - Added per-hash statistics
- * - General performance enhancements
- *
- * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
- * - move all stuff to libcfs
- * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
- * - ignore hs_rwlock if without CFS_HASH_REHASH setting
- * - buckets are allocated one by one(instead of contiguous memory),
- * to avoid unnecessary cacheline conflict
- *
- * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
- * - "bucket" is a group of hlist_head now, user can specify bucket size
- * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
- * one lock for reducing memory overhead.
- *
- * - support lockless hash, caller will take care of locks:
- * avoid lock overhead for hash tables that are already protected
- * by locking in the caller for another reason
- *
- * - support both spin_lock/rwlock for bucket:
- * overhead of spinlock contention is lower than read/write
- * contention of rwlock, so using spinlock to serialize operations on
- * bucket is more reasonable for those frequently changed hash tables
- *
- * - support one-single lock mode:
- * one lock to protect all hash operations to avoid overhead of
- * multiple locks if hash table is always small
- *
- * - removed a lot of unnecessary addref & decref on hash element:
- * addref & decref are atomic operations in many use-cases which
- * are expensive.
- *
- * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
- * some lustre use-cases require these functions to be strictly
- * non-blocking, we need to schedule required rehash on a different
- * thread on those cases.
- *
- * - safer rehash on large hash table
- * In old implementation, rehash function will exclusively lock the
- * hash table and finish rehash in one batch, it's dangerous on SMP
- * system because rehash millions of elements could take long time.
- * New implemented rehash can release lock and relax CPU in middle
- * of rehash, it's safe for another thread to search/change on the
- * hash table even it's in rehasing.
- *
- * - support two different refcount modes
- * . hash table has refcount on element
- * . hash table doesn't change refcount on adding/removing element
- *
- * - support long name hash table (for param-tree)
- *
- * - fix a bug for cfs_hash_rehash_key:
- * in old implementation, cfs_hash_rehash_key could screw up the
- * hash-table because @key is overwritten without any protection.
- * Now we need user to define hs_keycpy for those rehash enabled
- * hash tables, cfs_hash_rehash_key will overwrite hash-key
- * inside lock by calling hs_keycpy.
- *
- * - better hash iteration:
- * Now we support both locked iteration & lockless iteration of hash
- * table. Also, user can break the iteration by return 1 in callback.
- */
-#include <linux/seq_file.h>
-#include <linux/log2.h>
-
-#include <linux/libcfs/libcfs.h>
-
-#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static unsigned int warn_on_depth = 8;
-module_param(warn_on_depth, uint, 0644);
-MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
-#endif
-
-struct workqueue_struct *cfs_rehash_wq;
-
-static inline void
-cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
-
-static inline void
-cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
-
-static inline void
-cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
- __acquires(&lock->spin)
-{
- spin_lock(&lock->spin);
-}
-
-static inline void
-cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
- __releases(&lock->spin)
-{
- spin_unlock(&lock->spin);
-}
-
-static inline void
-cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
- __acquires(&lock->rw)
-{
- if (!exclusive)
- read_lock(&lock->rw);
- else
- write_lock(&lock->rw);
-}
-
-static inline void
-cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
- __releases(&lock->rw)
-{
- if (!exclusive)
- read_unlock(&lock->rw);
- else
- write_unlock(&lock->rw);
-}
-
-/** No lock hash */
-static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
- .hs_lock = cfs_hash_nl_lock,
- .hs_unlock = cfs_hash_nl_unlock,
- .hs_bkt_lock = cfs_hash_nl_lock,
- .hs_bkt_unlock = cfs_hash_nl_unlock,
-};
-
-/** no bucket lock, one spinlock to protect everything */
-static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
- .hs_lock = cfs_hash_spin_lock,
- .hs_unlock = cfs_hash_spin_unlock,
- .hs_bkt_lock = cfs_hash_nl_lock,
- .hs_bkt_unlock = cfs_hash_nl_unlock,
-};
-
-/** spin bucket lock, rehash is enabled */
-static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
- .hs_lock = cfs_hash_rw_lock,
- .hs_unlock = cfs_hash_rw_unlock,
- .hs_bkt_lock = cfs_hash_spin_lock,
- .hs_bkt_unlock = cfs_hash_spin_unlock,
-};
-
-/** rw bucket lock, rehash is enabled */
-static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
- .hs_lock = cfs_hash_rw_lock,
- .hs_unlock = cfs_hash_rw_unlock,
- .hs_bkt_lock = cfs_hash_rw_lock,
- .hs_bkt_unlock = cfs_hash_rw_unlock,
-};
-
-/** spin bucket lock, rehash is disabled */
-static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
- .hs_lock = cfs_hash_nl_lock,
- .hs_unlock = cfs_hash_nl_unlock,
- .hs_bkt_lock = cfs_hash_spin_lock,
- .hs_bkt_unlock = cfs_hash_spin_unlock,
-};
-
-/** rw bucket lock, rehash is disabled */
-static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
- .hs_lock = cfs_hash_nl_lock,
- .hs_unlock = cfs_hash_nl_unlock,
- .hs_bkt_lock = cfs_hash_rw_lock,
- .hs_bkt_unlock = cfs_hash_rw_unlock,
-};
-
-static void
-cfs_hash_lock_setup(struct cfs_hash *hs)
-{
- if (cfs_hash_with_no_lock(hs)) {
- hs->hs_lops = &cfs_hash_nl_lops;
-
- } else if (cfs_hash_with_no_bktlock(hs)) {
- hs->hs_lops = &cfs_hash_nbl_lops;
- spin_lock_init(&hs->hs_lock.spin);
-
- } else if (cfs_hash_with_rehash(hs)) {
- rwlock_init(&hs->hs_lock.rw);
-
- if (cfs_hash_with_rw_bktlock(hs))
- hs->hs_lops = &cfs_hash_bkt_rw_lops;
- else if (cfs_hash_with_spin_bktlock(hs))
- hs->hs_lops = &cfs_hash_bkt_spin_lops;
- else
- LBUG();
- } else {
- if (cfs_hash_with_rw_bktlock(hs))
- hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
- else if (cfs_hash_with_spin_bktlock(hs))
- hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
- else
- LBUG();
- }
-}
-
-/**
- * Simple hash head without depth tracking
- * new element is always added to head of hlist
- */
-struct cfs_hash_head {
- struct hlist_head hh_head; /**< entries list */
-};
-
-static int
-cfs_hash_hh_hhead_size(struct cfs_hash *hs)
-{
- return sizeof(struct cfs_hash_head);
-}
-
-static struct hlist_head *
-cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- struct cfs_hash_head *head;
-
- head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
- return &head[bd->bd_offset].hh_head;
-}
-
-static int
-cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
- return -1; /* unknown depth */
-}
-
-static int
-cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- hlist_del_init(hnode);
- return -1; /* unknown depth */
-}
-
-/**
- * Simple hash head with depth tracking
- * new element is always added to head of hlist
- */
-struct cfs_hash_head_dep {
- struct hlist_head hd_head; /**< entries list */
- unsigned int hd_depth; /**< list length */
-};
-
-static int
-cfs_hash_hd_hhead_size(struct cfs_hash *hs)
-{
- return sizeof(struct cfs_hash_head_dep);
-}
-
-static struct hlist_head *
-cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- struct cfs_hash_head_dep *head;
-
- head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
- return &head[bd->bd_offset].hd_head;
-}
-
-static int
-cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- struct cfs_hash_head_dep *hh;
-
- hh = container_of(cfs_hash_hd_hhead(hs, bd),
- struct cfs_hash_head_dep, hd_head);
- hlist_add_head(hnode, &hh->hd_head);
- return ++hh->hd_depth;
-}
-
-static int
-cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- struct cfs_hash_head_dep *hh;
-
- hh = container_of(cfs_hash_hd_hhead(hs, bd),
- struct cfs_hash_head_dep, hd_head);
- hlist_del_init(hnode);
- return --hh->hd_depth;
-}
-
-/**
- * double links hash head without depth tracking
- * new element is always added to tail of hlist
- */
-struct cfs_hash_dhead {
- struct hlist_head dh_head; /**< entries list */
- struct hlist_node *dh_tail; /**< the last entry */
-};
-
-static int
-cfs_hash_dh_hhead_size(struct cfs_hash *hs)
-{
- return sizeof(struct cfs_hash_dhead);
-}
-
-static struct hlist_head *
-cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- struct cfs_hash_dhead *head;
-
- head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
- return &head[bd->bd_offset].dh_head;
-}
-
-static int
-cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- struct cfs_hash_dhead *dh;
-
- dh = container_of(cfs_hash_dh_hhead(hs, bd),
- struct cfs_hash_dhead, dh_head);
- if (dh->dh_tail) /* not empty */
- hlist_add_behind(hnode, dh->dh_tail);
- else /* empty list */
- hlist_add_head(hnode, &dh->dh_head);
- dh->dh_tail = hnode;
- return -1; /* unknown depth */
-}
-
-static int
-cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnd)
-{
- struct cfs_hash_dhead *dh;
-
- dh = container_of(cfs_hash_dh_hhead(hs, bd),
- struct cfs_hash_dhead, dh_head);
- if (!hnd->next) { /* it's the tail */
- dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
- container_of(hnd->pprev, struct hlist_node, next);
- }
- hlist_del_init(hnd);
- return -1; /* unknown depth */
-}
-
-/**
- * double links hash head with depth tracking
- * new element is always added to tail of hlist
- */
-struct cfs_hash_dhead_dep {
- struct hlist_head dd_head; /**< entries list */
- struct hlist_node *dd_tail; /**< the last entry */
- unsigned int dd_depth; /**< list length */
-};
-
-static int
-cfs_hash_dd_hhead_size(struct cfs_hash *hs)
-{
- return sizeof(struct cfs_hash_dhead_dep);
-}
-
-static struct hlist_head *
-cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- struct cfs_hash_dhead_dep *head;
-
- head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
- return &head[bd->bd_offset].dd_head;
-}
-
-static int
-cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- struct cfs_hash_dhead_dep *dh;
-
- dh = container_of(cfs_hash_dd_hhead(hs, bd),
- struct cfs_hash_dhead_dep, dd_head);
- if (dh->dd_tail) /* not empty */
- hlist_add_behind(hnode, dh->dd_tail);
- else /* empty list */
- hlist_add_head(hnode, &dh->dd_head);
- dh->dd_tail = hnode;
- return ++dh->dd_depth;
-}
-
-static int
-cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnd)
-{
- struct cfs_hash_dhead_dep *dh;
-
- dh = container_of(cfs_hash_dd_hhead(hs, bd),
- struct cfs_hash_dhead_dep, dd_head);
- if (!hnd->next) { /* it's the tail */
- dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
- container_of(hnd->pprev, struct hlist_node, next);
- }
- hlist_del_init(hnd);
- return --dh->dd_depth;
-}
-
-static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
- .hop_hhead = cfs_hash_hh_hhead,
- .hop_hhead_size = cfs_hash_hh_hhead_size,
- .hop_hnode_add = cfs_hash_hh_hnode_add,
- .hop_hnode_del = cfs_hash_hh_hnode_del,
-};
-
-static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
- .hop_hhead = cfs_hash_hd_hhead,
- .hop_hhead_size = cfs_hash_hd_hhead_size,
- .hop_hnode_add = cfs_hash_hd_hnode_add,
- .hop_hnode_del = cfs_hash_hd_hnode_del,
-};
-
-static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
- .hop_hhead = cfs_hash_dh_hhead,
- .hop_hhead_size = cfs_hash_dh_hhead_size,
- .hop_hnode_add = cfs_hash_dh_hnode_add,
- .hop_hnode_del = cfs_hash_dh_hnode_del,
-};
-
-static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
- .hop_hhead = cfs_hash_dd_hhead,
- .hop_hhead_size = cfs_hash_dd_hhead_size,
- .hop_hnode_add = cfs_hash_dd_hnode_add,
- .hop_hnode_del = cfs_hash_dd_hnode_del,
-};
-
-static void
-cfs_hash_hlist_setup(struct cfs_hash *hs)
-{
- if (cfs_hash_with_add_tail(hs)) {
- hs->hs_hops = cfs_hash_with_depth(hs) ?
- &cfs_hash_dd_hops : &cfs_hash_dh_hops;
- } else {
- hs->hs_hops = cfs_hash_with_depth(hs) ?
- &cfs_hash_hd_hops : &cfs_hash_hh_hops;
- }
-}
-
-static void
-cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
- unsigned int bits, const void *key, struct cfs_hash_bd *bd)
-{
- unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
-
- LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
-
- bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
- bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
-}
-
-void
-cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
-{
- /* NB: caller should hold hs->hs_rwlock if REHASH is set */
- if (likely(!hs->hs_rehash_buckets)) {
- cfs_hash_bd_from_key(hs, hs->hs_buckets,
- hs->hs_cur_bits, key, bd);
- } else {
- LASSERT(hs->hs_rehash_bits);
- cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
- hs->hs_rehash_bits, key, bd);
- }
-}
-EXPORT_SYMBOL(cfs_hash_bd_get);
-
-static inline void
-cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
-{
- if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
- return;
-
- bd->bd_bucket->hsb_depmax = dep_cur;
-# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
- if (likely(!warn_on_depth ||
- max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
- return;
-
- spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_max = dep_cur;
- hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
- hs->hs_dep_off = bd->bd_offset;
- hs->hs_dep_bits = hs->hs_cur_bits;
- spin_unlock(&hs->hs_dep_lock);
-
- queue_work(cfs_rehash_wq, &hs->hs_dep_work);
-# endif
-}
-
-void
-cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- int rc;
-
- rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
- cfs_hash_bd_dep_record(hs, bd, rc);
- bd->bd_bucket->hsb_version++;
- if (unlikely(!bd->bd_bucket->hsb_version))
- bd->bd_bucket->hsb_version++;
- bd->bd_bucket->hsb_count++;
-
- if (cfs_hash_with_counter(hs))
- atomic_inc(&hs->hs_count);
- if (!cfs_hash_with_no_itemref(hs))
- cfs_hash_get(hs, hnode);
-}
-EXPORT_SYMBOL(cfs_hash_bd_add_locked);
-
-void
-cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- hs->hs_hops->hop_hnode_del(hs, bd, hnode);
-
- LASSERT(bd->bd_bucket->hsb_count > 0);
- bd->bd_bucket->hsb_count--;
- bd->bd_bucket->hsb_version++;
- if (unlikely(!bd->bd_bucket->hsb_version))
- bd->bd_bucket->hsb_version++;
-
- if (cfs_hash_with_counter(hs)) {
- LASSERT(atomic_read(&hs->hs_count) > 0);
- atomic_dec(&hs->hs_count);
- }
- if (!cfs_hash_with_no_itemref(hs))
- cfs_hash_put_locked(hs, hnode);
-}
-EXPORT_SYMBOL(cfs_hash_bd_del_locked);
-
-void
-cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
- struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
-{
- struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
- struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
- int rc;
-
- if (!cfs_hash_bd_compare(bd_old, bd_new))
- return;
-
- /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
- * in cfs_hash_bd_del/add_locked
- */
- hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
- rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
- cfs_hash_bd_dep_record(hs, bd_new, rc);
-
- LASSERT(obkt->hsb_count > 0);
- obkt->hsb_count--;
- obkt->hsb_version++;
- if (unlikely(!obkt->hsb_version))
- obkt->hsb_version++;
- nbkt->hsb_count++;
- nbkt->hsb_version++;
- if (unlikely(!nbkt->hsb_version))
- nbkt->hsb_version++;
-}
-
-enum {
- /** always set, for sanity (avoid ZERO intent) */
- CFS_HS_LOOKUP_MASK_FIND = BIT(0),
- /** return entry with a ref */
- CFS_HS_LOOKUP_MASK_REF = BIT(1),
- /** add entry if not existing */
- CFS_HS_LOOKUP_MASK_ADD = BIT(2),
- /** delete entry, ignore other masks */
- CFS_HS_LOOKUP_MASK_DEL = BIT(3),
-};
-
-enum cfs_hash_lookup_intent {
- /** return item w/o refcount */
- CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND,
- /** return item with refcount */
- CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND |
- CFS_HS_LOOKUP_MASK_REF),
- /** return item w/o refcount if existed, otherwise add */
- CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND |
- CFS_HS_LOOKUP_MASK_ADD),
- /** return item with refcount if existed, otherwise add */
- CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
- CFS_HS_LOOKUP_MASK_ADD),
- /** delete if existed */
- CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
- CFS_HS_LOOKUP_MASK_DEL)
-};
-
-static struct hlist_node *
-cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- const void *key, struct hlist_node *hnode,
- enum cfs_hash_lookup_intent intent)
-
-{
- struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
- struct hlist_node *ehnode;
- struct hlist_node *match;
- int intent_add = intent & CFS_HS_LOOKUP_MASK_ADD;
-
- /* with this function, we can avoid a lot of useless refcount ops,
- * which are expensive atomic operations most time.
- */
- match = intent_add ? NULL : hnode;
- hlist_for_each(ehnode, hhead) {
- if (!cfs_hash_keycmp(hs, key, ehnode))
- continue;
-
- if (match && match != ehnode) /* can't match */
- continue;
-
- /* match and ... */
- if (intent & CFS_HS_LOOKUP_MASK_DEL) {
- cfs_hash_bd_del_locked(hs, bd, ehnode);
- return ehnode;
- }
-
- /* caller wants refcount? */
- if (intent & CFS_HS_LOOKUP_MASK_REF)
- cfs_hash_get(hs, ehnode);
- return ehnode;
- }
- /* no match item */
- if (!intent_add)
- return NULL;
-
- LASSERT(hnode);
- cfs_hash_bd_add_locked(hs, bd, hnode);
- return hnode;
-}
-
-struct hlist_node *
-cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- const void *key)
-{
- return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
- CFS_HS_LOOKUP_IT_FIND);
-}
-EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
-
-struct hlist_node *
-cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- const void *key)
-{
- return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
- CFS_HS_LOOKUP_IT_PEEK);
-}
-EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
-
-static void
-cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned int n, int excl)
-{
- struct cfs_hash_bucket *prev = NULL;
- int i;
-
- /**
- * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
- * NB: it's possible that several bds point to the same bucket but
- * have different bd::bd_offset, so need take care of deadlock.
- */
- cfs_hash_for_each_bd(bds, n, i) {
- if (prev == bds[i].bd_bucket)
- continue;
-
- LASSERT(!prev || prev->hsb_index < bds[i].bd_bucket->hsb_index);
- cfs_hash_bd_lock(hs, &bds[i], excl);
- prev = bds[i].bd_bucket;
- }
-}
-
-static void
-cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned int n, int excl)
-{
- struct cfs_hash_bucket *prev = NULL;
- int i;
-
- cfs_hash_for_each_bd(bds, n, i) {
- if (prev != bds[i].bd_bucket) {
- cfs_hash_bd_unlock(hs, &bds[i], excl);
- prev = bds[i].bd_bucket;
- }
- }
-}
-
-static struct hlist_node *
-cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned int n, const void *key)
-{
- struct hlist_node *ehnode;
- unsigned int i;
-
- cfs_hash_for_each_bd(bds, n, i) {
- ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
- CFS_HS_LOOKUP_IT_FIND);
- if (ehnode)
- return ehnode;
- }
- return NULL;
-}
-
-static struct hlist_node *
-cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned int n, const void *key,
- struct hlist_node *hnode, int noref)
-{
- struct hlist_node *ehnode;
- int intent;
- unsigned int i;
-
- LASSERT(hnode);
- intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
-
- cfs_hash_for_each_bd(bds, n, i) {
- ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
- NULL, intent);
- if (ehnode)
- return ehnode;
- }
-
- if (i == 1) { /* only one bucket */
- cfs_hash_bd_add_locked(hs, &bds[0], hnode);
- } else {
- struct cfs_hash_bd mybd;
-
- cfs_hash_bd_get(hs, key, &mybd);
- cfs_hash_bd_add_locked(hs, &mybd, hnode);
- }
-
- return hnode;
-}
-
-static struct hlist_node *
-cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned int n, const void *key,
- struct hlist_node *hnode)
-{
- struct hlist_node *ehnode;
- unsigned int i;
-
- cfs_hash_for_each_bd(bds, n, i) {
- ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
- CFS_HS_LOOKUP_IT_FINDDEL);
- if (ehnode)
- return ehnode;
- }
- return NULL;
-}
-
-static void
-cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
-{
- int rc;
-
- if (!bd2->bd_bucket)
- return;
-
- if (!bd1->bd_bucket) {
- *bd1 = *bd2;
- bd2->bd_bucket = NULL;
- return;
- }
-
- rc = cfs_hash_bd_compare(bd1, bd2);
- if (!rc)
- bd2->bd_bucket = NULL;
- else if (rc > 0)
- swap(*bd1, *bd2); /* swap bd1 and bd2 */
-}
-
-void
-cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
- struct cfs_hash_bd *bds)
-{
- /* NB: caller should hold hs_lock.rw if REHASH is set */
- cfs_hash_bd_from_key(hs, hs->hs_buckets,
- hs->hs_cur_bits, key, &bds[0]);
- if (likely(!hs->hs_rehash_buckets)) {
- /* no rehash or not rehashing */
- bds[1].bd_bucket = NULL;
- return;
- }
-
- LASSERT(hs->hs_rehash_bits);
- cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
- hs->hs_rehash_bits, key, &bds[1]);
-
- cfs_hash_bd_order(&bds[0], &bds[1]);
-}
-
-void
-cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
-{
- cfs_hash_multi_bd_lock(hs, bds, 2, excl);
-}
-
-void
-cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
-{
- cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
-}
-
-struct hlist_node *
-cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- const void *key)
-{
- return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
-}
-
-struct hlist_node *
-cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- const void *key, struct hlist_node *hnode,
- int noref)
-{
- return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
- hnode, noref);
-}
-
-struct hlist_node *
-cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- const void *key, struct hlist_node *hnode)
-{
- return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
-}
-
-static void
-cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
- int bkt_size, int prev_size, int size)
-{
- int i;
-
- for (i = prev_size; i < size; i++)
- kfree(buckets[i]);
-
- kvfree(buckets);
-}
-
-/*
- * Create or grow bucket memory. Return old_buckets if no allocation was
- * needed, the newly allocated buckets if allocation was needed and
- * successful, and NULL on error.
- */
-static struct cfs_hash_bucket **
-cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
- unsigned int old_size, unsigned int new_size)
-{
- struct cfs_hash_bucket **new_bkts;
- int i;
-
- LASSERT(!old_size || old_bkts);
-
- if (old_bkts && old_size == new_size)
- return old_bkts;
-
- new_bkts = kvmalloc_array(new_size, sizeof(new_bkts[0]), GFP_KERNEL);
- if (!new_bkts)
- return NULL;
-
- if (old_bkts) {
- memcpy(new_bkts, old_bkts,
- min(old_size, new_size) * sizeof(*old_bkts));
- }
-
- for (i = old_size; i < new_size; i++) {
- struct hlist_head *hhead;
- struct cfs_hash_bd bd;
-
- new_bkts[i] = kzalloc(cfs_hash_bkt_size(hs), GFP_KERNEL);
- if (!new_bkts[i]) {
- cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
- old_size, new_size);
- return NULL;
- }
-
- new_bkts[i]->hsb_index = i;
- new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
- new_bkts[i]->hsb_depmax = -1; /* unknown */
- bd.bd_bucket = new_bkts[i];
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
- INIT_HLIST_HEAD(hhead);
-
- if (cfs_hash_with_no_lock(hs) ||
- cfs_hash_with_no_bktlock(hs))
- continue;
-
- if (cfs_hash_with_rw_bktlock(hs))
- rwlock_init(&new_bkts[i]->hsb_lock.rw);
- else if (cfs_hash_with_spin_bktlock(hs))
- spin_lock_init(&new_bkts[i]->hsb_lock.spin);
- else
- LBUG(); /* invalid use-case */
- }
- return new_bkts;
-}
-
-/**
- * Initialize new libcfs hash, where:
- * @name - Descriptive hash name
- * @cur_bits - Initial hash table size, in bits
- * @max_bits - Maximum allowed hash table resize, in bits
- * @ops - Registered hash table operations
- * @flags - CFS_HASH_REHASH enable synamic hash resizing
- * - CFS_HASH_SORT enable chained hash sort
- */
-static void cfs_hash_rehash_worker(struct work_struct *work);
-
-#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static void cfs_hash_dep_print(struct work_struct *work)
-{
- struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_dep_work);
- int dep;
- int bkt;
- int off;
- int bits;
-
- spin_lock(&hs->hs_dep_lock);
- dep = hs->hs_dep_max;
- bkt = hs->hs_dep_bkt;
- off = hs->hs_dep_off;
- bits = hs->hs_dep_bits;
- spin_unlock(&hs->hs_dep_lock);
-
- LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
- hs->hs_name, bits, dep, bkt, off);
- spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_bits = 0; /* mark as workitem done */
- spin_unlock(&hs->hs_dep_lock);
- return 0;
-}
-
-static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
-{
- spin_lock_init(&hs->hs_dep_lock);
- INIT_WORK(&hs->hs_dep_work, cfs_hash_dep_print);
-}
-
-static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
-{
- cancel_work_sync(&hs->hs_dep_work);
-}
-
-#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
-
-static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
-static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
-
-#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
-
-struct cfs_hash *
-cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
- unsigned int bkt_bits, unsigned int extra_bytes,
- unsigned int min_theta, unsigned int max_theta,
- struct cfs_hash_ops *ops, unsigned int flags)
-{
- struct cfs_hash *hs;
- int len;
-
- BUILD_BUG_ON(CFS_HASH_THETA_BITS >= 15);
-
- LASSERT(name);
- LASSERT(ops->hs_key);
- LASSERT(ops->hs_hash);
- LASSERT(ops->hs_object);
- LASSERT(ops->hs_keycmp);
- LASSERT(ops->hs_get);
- LASSERT(ops->hs_put || ops->hs_put_locked);
-
- if (flags & CFS_HASH_REHASH)
- flags |= CFS_HASH_COUNTER; /* must have counter */
-
- LASSERT(cur_bits > 0);
- LASSERT(cur_bits >= bkt_bits);
- LASSERT(max_bits >= cur_bits && max_bits < 31);
- LASSERT(ergo(!(flags & CFS_HASH_REHASH), cur_bits == max_bits));
- LASSERT(ergo(flags & CFS_HASH_REHASH, !(flags & CFS_HASH_NO_LOCK)));
- LASSERT(ergo(flags & CFS_HASH_REHASH_KEY, ops->hs_keycpy));
-
- len = !(flags & CFS_HASH_BIGNAME) ?
- CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
- hs = kzalloc(offsetof(struct cfs_hash, hs_name[len]), GFP_KERNEL);
- if (!hs)
- return NULL;
-
- strlcpy(hs->hs_name, name, len);
- hs->hs_flags = flags;
-
- atomic_set(&hs->hs_refcount, 1);
- atomic_set(&hs->hs_count, 0);
-
- cfs_hash_lock_setup(hs);
- cfs_hash_hlist_setup(hs);
-
- hs->hs_cur_bits = (u8)cur_bits;
- hs->hs_min_bits = (u8)cur_bits;
- hs->hs_max_bits = (u8)max_bits;
- hs->hs_bkt_bits = (u8)bkt_bits;
-
- hs->hs_ops = ops;
- hs->hs_extra_bytes = extra_bytes;
- hs->hs_rehash_bits = 0;
- INIT_WORK(&hs->hs_rehash_work, cfs_hash_rehash_worker);
- cfs_hash_depth_wi_init(hs);
-
- if (cfs_hash_with_rehash(hs))
- __cfs_hash_set_theta(hs, min_theta, max_theta);
-
- hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
- CFS_HASH_NBKT(hs));
- if (hs->hs_buckets)
- return hs;
-
- kfree(hs);
- return NULL;
-}
-EXPORT_SYMBOL(cfs_hash_create);
-
-/**
- * Cleanup libcfs hash @hs.
- */
-static void
-cfs_hash_destroy(struct cfs_hash *hs)
-{
- struct hlist_node *hnode;
- struct hlist_node *pos;
- struct cfs_hash_bd bd;
- int i;
-
- LASSERT(hs);
- LASSERT(!cfs_hash_is_exiting(hs) &&
- !cfs_hash_is_iterating(hs));
-
- /**
- * prohibit further rehashes, don't need any lock because
- * I'm the only (last) one can change it.
- */
- hs->hs_exiting = 1;
- if (cfs_hash_with_rehash(hs))
- cfs_hash_rehash_cancel(hs);
-
- cfs_hash_depth_wi_cancel(hs);
- /* rehash should be done/canceled */
- LASSERT(hs->hs_buckets && !hs->hs_rehash_buckets);
-
- cfs_hash_for_each_bucket(hs, &bd, i) {
- struct hlist_head *hhead;
-
- LASSERT(bd.bd_bucket);
- /* no need to take this lock, just for consistent code */
- cfs_hash_bd_lock(hs, &bd, 1);
-
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- hlist_for_each_safe(hnode, pos, hhead) {
- LASSERTF(!cfs_hash_with_assert_empty(hs),
- "hash %s bucket %u(%u) is not empty: %u items left\n",
- hs->hs_name, bd.bd_bucket->hsb_index,
- bd.bd_offset, bd.bd_bucket->hsb_count);
- /* can't assert key valicate, because we
- * can interrupt rehash
- */
- cfs_hash_bd_del_locked(hs, &bd, hnode);
- cfs_hash_exit(hs, hnode);
- }
- }
- LASSERT(!bd.bd_bucket->hsb_count);
- cfs_hash_bd_unlock(hs, &bd, 1);
- cond_resched();
- }
-
- LASSERT(!atomic_read(&hs->hs_count));
-
- cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
- 0, CFS_HASH_NBKT(hs));
- i = cfs_hash_with_bigname(hs) ?
- CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
- kfree(hs);
-}
-
-struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
-{
- if (atomic_inc_not_zero(&hs->hs_refcount))
- return hs;
- return NULL;
-}
-EXPORT_SYMBOL(cfs_hash_getref);
-
-void cfs_hash_putref(struct cfs_hash *hs)
-{
- if (atomic_dec_and_test(&hs->hs_refcount))
- cfs_hash_destroy(hs);
-}
-EXPORT_SYMBOL(cfs_hash_putref);
-
-static inline int
-cfs_hash_rehash_bits(struct cfs_hash *hs)
-{
- if (cfs_hash_with_no_lock(hs) ||
- !cfs_hash_with_rehash(hs))
- return -EOPNOTSUPP;
-
- if (unlikely(cfs_hash_is_exiting(hs)))
- return -ESRCH;
-
- if (unlikely(cfs_hash_is_rehashing(hs)))
- return -EALREADY;
-
- if (unlikely(cfs_hash_is_iterating(hs)))
- return -EAGAIN;
-
- /* XXX: need to handle case with max_theta != 2.0
- * and the case with min_theta != 0.5
- */
- if ((hs->hs_cur_bits < hs->hs_max_bits) &&
- (__cfs_hash_theta(hs) > hs->hs_max_theta))
- return hs->hs_cur_bits + 1;
-
- if (!cfs_hash_with_shrink(hs))
- return 0;
-
- if ((hs->hs_cur_bits > hs->hs_min_bits) &&
- (__cfs_hash_theta(hs) < hs->hs_min_theta))
- return hs->hs_cur_bits - 1;
-
- return 0;
-}
-
-/**
- * don't allow inline rehash if:
- * - user wants non-blocking change (add/del) on hash table
- * - too many elements
- */
-static inline int
-cfs_hash_rehash_inline(struct cfs_hash *hs)
-{
- return !cfs_hash_with_nblk_change(hs) &&
- atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
-}
-
-/**
- * Add item @hnode to libcfs hash @hs using @key. The registered
- * ops->hs_get function will be called when the item is added.
- */
-void
-cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
-{
- struct cfs_hash_bd bd;
- int bits;
-
- LASSERT(hlist_unhashed(hnode));
-
- cfs_hash_lock(hs, 0);
- cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
-
- cfs_hash_key_validate(hs, key, hnode);
- cfs_hash_bd_add_locked(hs, &bd, hnode);
-
- cfs_hash_bd_unlock(hs, &bd, 1);
-
- bits = cfs_hash_rehash_bits(hs);
- cfs_hash_unlock(hs, 0);
- if (bits > 0)
- cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
-}
-EXPORT_SYMBOL(cfs_hash_add);
-
-static struct hlist_node *
-cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode, int noref)
-{
- struct hlist_node *ehnode;
- struct cfs_hash_bd bds[2];
- int bits = 0;
-
- LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
-
- cfs_hash_lock(hs, 0);
- cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
-
- cfs_hash_key_validate(hs, key, hnode);
- ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
- hnode, noref);
- cfs_hash_dual_bd_unlock(hs, bds, 1);
-
- if (ehnode == hnode) /* new item added */
- bits = cfs_hash_rehash_bits(hs);
- cfs_hash_unlock(hs, 0);
- if (bits > 0)
- cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
-
- return ehnode;
-}
-
-/**
- * Add item @hnode to libcfs hash @hs using @key. The registered
- * ops->hs_get function will be called if the item was added.
- * Returns 0 on success or -EALREADY on key collisions.
- */
-int
-cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode)
-{
- return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
- -EALREADY : 0;
-}
-EXPORT_SYMBOL(cfs_hash_add_unique);
-
-/**
- * Add item @hnode to libcfs hash @hs using @key. If this @key
- * already exists in the hash then ops->hs_get will be called on the
- * conflicting entry and that entry will be returned to the caller.
- * Otherwise ops->hs_get is called on the item which was added.
- */
-void *
-cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode)
-{
- hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
-
- return cfs_hash_object(hs, hnode);
-}
-EXPORT_SYMBOL(cfs_hash_findadd_unique);
-
-/**
- * Delete item @hnode from the libcfs hash @hs using @key. The @key
- * is required to ensure the correct hash bucket is locked since there
- * is no direct linkage from the item to the bucket. The object
- * removed from the hash will be returned and obs->hs_put is called
- * on the removed object.
- */
-void *
-cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
-{
- void *obj = NULL;
- int bits = 0;
- struct cfs_hash_bd bds[2];
-
- cfs_hash_lock(hs, 0);
- cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
-
- /* NB: do nothing if @hnode is not in hash table */
- if (!hnode || !hlist_unhashed(hnode)) {
- if (!bds[1].bd_bucket && hnode) {
- cfs_hash_bd_del_locked(hs, &bds[0], hnode);
- } else {
- hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
- key, hnode);
- }
- }
-
- if (hnode) {
- obj = cfs_hash_object(hs, hnode);
- bits = cfs_hash_rehash_bits(hs);
- }
-
- cfs_hash_dual_bd_unlock(hs, bds, 1);
- cfs_hash_unlock(hs, 0);
- if (bits > 0)
- cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
-
- return obj;
-}
-EXPORT_SYMBOL(cfs_hash_del);
-
-/**
- * Delete item given @key in libcfs hash @hs. The first @key found in
- * the hash will be removed, if the key exists multiple times in the hash
- * @hs this function must be called once per key. The removed object
- * will be returned and ops->hs_put is called on the removed object.
- */
-void *
-cfs_hash_del_key(struct cfs_hash *hs, const void *key)
-{
- return cfs_hash_del(hs, key, NULL);
-}
-EXPORT_SYMBOL(cfs_hash_del_key);
-
-/**
- * Lookup an item using @key in the libcfs hash @hs and return it.
- * If the @key is found in the hash hs->hs_get() is called and the
- * matching objects is returned. It is the callers responsibility
- * to call the counterpart ops->hs_put using the cfs_hash_put() macro
- * when when finished with the object. If the @key was not found
- * in the hash @hs NULL is returned.
- */
-void *
-cfs_hash_lookup(struct cfs_hash *hs, const void *key)
-{
- void *obj = NULL;
- struct hlist_node *hnode;
- struct cfs_hash_bd bds[2];
-
- cfs_hash_lock(hs, 0);
- cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
-
- hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
- if (hnode)
- obj = cfs_hash_object(hs, hnode);
-
- cfs_hash_dual_bd_unlock(hs, bds, 0);
- cfs_hash_unlock(hs, 0);
-
- return obj;
-}
-EXPORT_SYMBOL(cfs_hash_lookup);
-
-static void
-cfs_hash_for_each_enter(struct cfs_hash *hs)
-{
- LASSERT(!cfs_hash_is_exiting(hs));
-
- if (!cfs_hash_with_rehash(hs))
- return;
- /*
- * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
- * because it's just an unreliable signal to rehash-thread,
- * rehash-thread will try to finish rehash ASAP when seeing this.
- */
- hs->hs_iterating = 1;
-
- cfs_hash_lock(hs, 1);
- hs->hs_iterators++;
- cfs_hash_unlock(hs, 1);
-
- /* NB: iteration is mostly called by service thread,
- * we tend to cancel pending rehash-request, instead of
- * blocking service thread, we will relaunch rehash request
- * after iteration
- */
- if (cfs_hash_is_rehashing(hs))
- cfs_hash_rehash_cancel(hs);
-}
-
-static void
-cfs_hash_for_each_exit(struct cfs_hash *hs)
-{
- int remained;
- int bits;
-
- if (!cfs_hash_with_rehash(hs))
- return;
- cfs_hash_lock(hs, 1);
- remained = --hs->hs_iterators;
- bits = cfs_hash_rehash_bits(hs);
- cfs_hash_unlock(hs, 1);
- /* NB: it's race on cfs_has_t::hs_iterating, see above */
- if (!remained)
- hs->hs_iterating = 0;
- if (bits > 0) {
- cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
- CFS_HASH_LOOP_HOG);
- }
-}
-
-/**
- * For each item in the libcfs hash @hs call the passed callback @func
- * and pass to it as an argument each hash item and the private @data.
- *
- * a) the function may sleep!
- * b) during the callback:
- * . the bucket lock is held so the callback must never sleep.
- * . if @removal_safe is true, use can remove current item by
- * cfs_hash_bd_del_locked
- */
-static u64
-cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data, int remove_safe)
-{
- struct hlist_node *hnode;
- struct hlist_node *pos;
- struct cfs_hash_bd bd;
- u64 count = 0;
- int excl = !!remove_safe;
- int loop = 0;
- int i;
-
- cfs_hash_for_each_enter(hs);
-
- cfs_hash_lock(hs, 0);
- LASSERT(!cfs_hash_is_rehashing(hs));
-
- cfs_hash_for_each_bucket(hs, &bd, i) {
- struct hlist_head *hhead;
-
- cfs_hash_bd_lock(hs, &bd, excl);
- if (!func) { /* only glimpse size */
- count += bd.bd_bucket->hsb_count;
- cfs_hash_bd_unlock(hs, &bd, excl);
- continue;
- }
-
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- hlist_for_each_safe(hnode, pos, hhead) {
- cfs_hash_bucket_validate(hs, &bd, hnode);
- count++;
- loop++;
- if (func(hs, &bd, hnode, data)) {
- cfs_hash_bd_unlock(hs, &bd, excl);
- goto out;
- }
- }
- }
- cfs_hash_bd_unlock(hs, &bd, excl);
- if (loop < CFS_HASH_LOOP_HOG)
- continue;
- loop = 0;
- cfs_hash_unlock(hs, 0);
- cond_resched();
- cfs_hash_lock(hs, 0);
- }
- out:
- cfs_hash_unlock(hs, 0);
-
- cfs_hash_for_each_exit(hs);
- return count;
-}
-
-struct cfs_hash_cond_arg {
- cfs_hash_cond_opt_cb_t func;
- void *arg;
-};
-
-static int
-cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *data)
-{
- struct cfs_hash_cond_arg *cond = data;
-
- if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
- cfs_hash_bd_del_locked(hs, bd, hnode);
- return 0;
-}
-
-/**
- * Delete item from the libcfs hash @hs when @func return true.
- * The write lock being hold during loop for each bucket to avoid
- * any object be reference.
- */
-void
-cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
-{
- struct cfs_hash_cond_arg arg = {
- .func = func,
- .arg = data,
- };
-
- cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
-}
-EXPORT_SYMBOL(cfs_hash_cond_del);
-
-void
-cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data)
-{
- cfs_hash_for_each_tight(hs, func, data, 0);
-}
-EXPORT_SYMBOL(cfs_hash_for_each);
-
-void
-cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data)
-{
- cfs_hash_for_each_tight(hs, func, data, 1);
-}
-EXPORT_SYMBOL(cfs_hash_for_each_safe);
-
-static int
-cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *data)
-{
- *(int *)data = 0;
- return 1; /* return 1 to break the loop */
-}
-
-int
-cfs_hash_is_empty(struct cfs_hash *hs)
-{
- int empty = 1;
-
- cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
- return empty;
-}
-EXPORT_SYMBOL(cfs_hash_is_empty);
-
-u64
-cfs_hash_size_get(struct cfs_hash *hs)
-{
- return cfs_hash_with_counter(hs) ?
- atomic_read(&hs->hs_count) :
- cfs_hash_for_each_tight(hs, NULL, NULL, 0);
-}
-EXPORT_SYMBOL(cfs_hash_size_get);
-
-/*
- * cfs_hash_for_each_relax:
- * Iterate the hash table and call @func on each item without
- * any lock. This function can't guarantee to finish iteration
- * if these features are enabled:
- *
- * a. if rehash_key is enabled, an item can be moved from
- * one bucket to another bucket
- * b. user can remove non-zero-ref item from hash-table,
- * so the item can be removed from hash-table, even worse,
- * it's possible that user changed key and insert to another
- * hash bucket.
- * there's no way for us to finish iteration correctly on previous
- * two cases, so iteration has to be stopped on change.
- */
-static int
-cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data, int start)
-{
- struct hlist_node *next = NULL;
- struct hlist_node *hnode;
- struct cfs_hash_bd bd;
- u32 version;
- int count = 0;
- int stop_on_change;
- int has_put_locked;
- int end = -1;
- int rc = 0;
- int i;
-
- stop_on_change = cfs_hash_with_rehash_key(hs) ||
- !cfs_hash_with_no_itemref(hs);
- has_put_locked = hs->hs_ops->hs_put_locked != NULL;
- cfs_hash_lock(hs, 0);
-again:
- LASSERT(!cfs_hash_is_rehashing(hs));
-
- cfs_hash_for_each_bucket(hs, &bd, i) {
- struct hlist_head *hhead;
-
- if (i < start)
- continue;
- else if (end > 0 && i >= end)
- break;
-
- cfs_hash_bd_lock(hs, &bd, 0);
- version = cfs_hash_bd_version_get(&bd);
-
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- hnode = hhead->first;
- if (!hnode)
- continue;
- cfs_hash_get(hs, hnode);
-
- for (; hnode; hnode = next) {
- cfs_hash_bucket_validate(hs, &bd, hnode);
- next = hnode->next;
- if (next)
- cfs_hash_get(hs, next);
- cfs_hash_bd_unlock(hs, &bd, 0);
- cfs_hash_unlock(hs, 0);
-
- rc = func(hs, &bd, hnode, data);
- if (stop_on_change || !has_put_locked)
- cfs_hash_put(hs, hnode);
- cond_resched();
- count++;
-
- cfs_hash_lock(hs, 0);
- cfs_hash_bd_lock(hs, &bd, 0);
- if (stop_on_change) {
- if (version !=
- cfs_hash_bd_version_get(&bd))
- rc = -EINTR;
- } else if (has_put_locked) {
- cfs_hash_put_locked(hs, hnode);
- }
- if (rc) /* callback wants to break iteration */
- break;
- }
- if (next) {
- if (has_put_locked) {
- cfs_hash_put_locked(hs, next);
- next = NULL;
- }
- break;
- } else if (rc) {
- break;
- }
- }
- cfs_hash_bd_unlock(hs, &bd, 0);
- if (next && !has_put_locked) {
- cfs_hash_put(hs, next);
- next = NULL;
- }
- if (rc) /* callback wants to break iteration */
- break;
- }
- if (start > 0 && !rc) {
- end = start;
- start = 0;
- goto again;
- }
-
- cfs_hash_unlock(hs, 0);
- return count;
-}
-
-int
-cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data, int start)
-{
- if (cfs_hash_with_no_lock(hs) ||
- cfs_hash_with_rehash_key(hs) ||
- !cfs_hash_with_no_itemref(hs))
- return -EOPNOTSUPP;
-
- if (!hs->hs_ops->hs_get ||
- (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
- return -EOPNOTSUPP;
-
- cfs_hash_for_each_enter(hs);
- cfs_hash_for_each_relax(hs, func, data, start);
- cfs_hash_for_each_exit(hs);
-
- return 0;
-}
-EXPORT_SYMBOL(cfs_hash_for_each_nolock);
-
-/**
- * For each hash bucket in the libcfs hash @hs call the passed callback
- * @func until all the hash buckets are empty. The passed callback @func
- * or the previously registered callback hs->hs_put must remove the item
- * from the hash. You may either use the cfs_hash_del() or hlist_del()
- * functions. No rwlocks will be held during the callback @func it is
- * safe to sleep if needed. This function will not terminate until the
- * hash is empty. Note it is still possible to concurrently add new
- * items in to the hash. It is the callers responsibility to ensure
- * the required locking is in place to prevent concurrent insertions.
- */
-int
-cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data)
-{
- unsigned int i = 0;
-
- if (cfs_hash_with_no_lock(hs))
- return -EOPNOTSUPP;
-
- if (!hs->hs_ops->hs_get ||
- (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
- return -EOPNOTSUPP;
-
- cfs_hash_for_each_enter(hs);
- while (cfs_hash_for_each_relax(hs, func, data, 0)) {
- CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
- hs->hs_name, i++);
- }
- cfs_hash_for_each_exit(hs);
- return 0;
-}
-EXPORT_SYMBOL(cfs_hash_for_each_empty);
-
-void
-cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
- cfs_hash_for_each_cb_t func, void *data)
-{
- struct hlist_head *hhead;
- struct hlist_node *hnode;
- struct cfs_hash_bd bd;
-
- cfs_hash_for_each_enter(hs);
- cfs_hash_lock(hs, 0);
- if (hindex >= CFS_HASH_NHLIST(hs))
- goto out;
-
- cfs_hash_bd_index_set(hs, hindex, &bd);
-
- cfs_hash_bd_lock(hs, &bd, 0);
- hhead = cfs_hash_bd_hhead(hs, &bd);
- hlist_for_each(hnode, hhead) {
- if (func(hs, &bd, hnode, data))
- break;
- }
- cfs_hash_bd_unlock(hs, &bd, 0);
-out:
- cfs_hash_unlock(hs, 0);
- cfs_hash_for_each_exit(hs);
-}
-EXPORT_SYMBOL(cfs_hash_hlist_for_each);
-
-/*
- * For each item in the libcfs hash @hs which matches the @key call
- * the passed callback @func and pass to it as an argument each hash
- * item and the private @data. During the callback the bucket lock
- * is held so the callback must never sleep.
- */
-void
-cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
- cfs_hash_for_each_cb_t func, void *data)
-{
- struct hlist_node *hnode;
- struct cfs_hash_bd bds[2];
- unsigned int i;
-
- cfs_hash_lock(hs, 0);
-
- cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
-
- cfs_hash_for_each_bd(bds, 2, i) {
- struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
-
- hlist_for_each(hnode, hlist) {
- cfs_hash_bucket_validate(hs, &bds[i], hnode);
-
- if (cfs_hash_keycmp(hs, key, hnode)) {
- if (func(hs, &bds[i], hnode, data))
- break;
- }
- }
- }
-
- cfs_hash_dual_bd_unlock(hs, bds, 0);
- cfs_hash_unlock(hs, 0);
-}
-EXPORT_SYMBOL(cfs_hash_for_each_key);
-
-/**
- * Rehash the libcfs hash @hs to the given @bits. This can be used
- * to grow the hash size when excessive chaining is detected, or to
- * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
- * flag is set in @hs the libcfs hash may be dynamically rehashed
- * during addition or removal if the hash's theta value exceeds
- * either the hs->hs_min_theta or hs->max_theta values. By default
- * these values are tuned to keep the chained hash depth small, and
- * this approach assumes a reasonably uniform hashing function. The
- * theta thresholds for @hs are tunable via cfs_hash_set_theta().
- */
-void
-cfs_hash_rehash_cancel(struct cfs_hash *hs)
-{
- LASSERT(cfs_hash_with_rehash(hs));
- cancel_work_sync(&hs->hs_rehash_work);
-}
-
-void
-cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
-{
- int rc;
-
- LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
-
- cfs_hash_lock(hs, 1);
-
- rc = cfs_hash_rehash_bits(hs);
- if (rc <= 0) {
- cfs_hash_unlock(hs, 1);
- return;
- }
-
- hs->hs_rehash_bits = rc;
- if (!do_rehash) {
- /* launch and return */
- queue_work(cfs_rehash_wq, &hs->hs_rehash_work);
- cfs_hash_unlock(hs, 1);
- return;
- }
-
- /* rehash right now */
- cfs_hash_unlock(hs, 1);
-
- cfs_hash_rehash_worker(&hs->hs_rehash_work);
-}
-
-static int
-cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
-{
- struct cfs_hash_bd new;
- struct hlist_head *hhead;
- struct hlist_node *hnode;
- struct hlist_node *pos;
- void *key;
- int c = 0;
-
- /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
- cfs_hash_bd_for_each_hlist(hs, old, hhead) {
- hlist_for_each_safe(hnode, pos, hhead) {
- key = cfs_hash_key(hs, hnode);
- LASSERT(key);
- /* Validate hnode is in the correct bucket. */
- cfs_hash_bucket_validate(hs, old, hnode);
- /*
- * Delete from old hash bucket; move to new bucket.
- * ops->hs_key must be defined.
- */
- cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
- hs->hs_rehash_bits, key, &new);
- cfs_hash_bd_move_locked(hs, old, &new, hnode);
- c++;
- }
- }
-
- return c;
-}
-
-static void
-cfs_hash_rehash_worker(struct work_struct *work)
-{
- struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_rehash_work);
- struct cfs_hash_bucket **bkts;
- struct cfs_hash_bd bd;
- unsigned int old_size;
- unsigned int new_size;
- int bsize;
- int count = 0;
- int rc = 0;
- int i;
-
- LASSERT(hs && cfs_hash_with_rehash(hs));
-
- cfs_hash_lock(hs, 0);
- LASSERT(cfs_hash_is_rehashing(hs));
-
- old_size = CFS_HASH_NBKT(hs);
- new_size = CFS_HASH_RH_NBKT(hs);
-
- cfs_hash_unlock(hs, 0);
-
- /*
- * don't need hs::hs_rwlock for hs::hs_buckets,
- * because nobody can change bkt-table except me.
- */
- bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
- old_size, new_size);
- cfs_hash_lock(hs, 1);
- if (!bkts) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (bkts == hs->hs_buckets) {
- bkts = NULL; /* do nothing */
- goto out;
- }
-
- rc = __cfs_hash_theta(hs);
- if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
- /* free the new allocated bkt-table */
- old_size = new_size;
- new_size = CFS_HASH_NBKT(hs);
- rc = -EALREADY;
- goto out;
- }
-
- LASSERT(!hs->hs_rehash_buckets);
- hs->hs_rehash_buckets = bkts;
-
- rc = 0;
- cfs_hash_for_each_bucket(hs, &bd, i) {
- if (cfs_hash_is_exiting(hs)) {
- rc = -ESRCH;
- /* someone wants to destroy the hash, abort now */
- if (old_size < new_size) /* OK to free old bkt-table */
- break;
- /* it's shrinking, need free new bkt-table */
- hs->hs_rehash_buckets = NULL;
- old_size = new_size;
- new_size = CFS_HASH_NBKT(hs);
- goto out;
- }
-
- count += cfs_hash_rehash_bd(hs, &bd);
- if (count < CFS_HASH_LOOP_HOG ||
- cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
- continue;
- }
-
- count = 0;
- cfs_hash_unlock(hs, 1);
- cond_resched();
- cfs_hash_lock(hs, 1);
- }
-
- hs->hs_rehash_count++;
-
- bkts = hs->hs_buckets;
- hs->hs_buckets = hs->hs_rehash_buckets;
- hs->hs_rehash_buckets = NULL;
-
- hs->hs_cur_bits = hs->hs_rehash_bits;
-out:
- hs->hs_rehash_bits = 0;
- bsize = cfs_hash_bkt_size(hs);
- cfs_hash_unlock(hs, 1);
- /* can't refer to @hs anymore because it could be destroyed */
- if (bkts)
- cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
- if (rc)
- CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
-}
-
-/**
- * Rehash the object referenced by @hnode in the libcfs hash @hs. The
- * @old_key must be provided to locate the objects previous location
- * in the hash, and the @new_key will be used to reinsert the object.
- * Use this function instead of a cfs_hash_add() + cfs_hash_del()
- * combo when it is critical that there is no window in time where the
- * object is missing from the hash. When an object is being rehashed
- * the registered cfs_hash_get() and cfs_hash_put() functions will
- * not be called.
- */
-void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
- void *new_key, struct hlist_node *hnode)
-{
- struct cfs_hash_bd bds[3];
- struct cfs_hash_bd old_bds[2];
- struct cfs_hash_bd new_bd;
-
- LASSERT(!hlist_unhashed(hnode));
-
- cfs_hash_lock(hs, 0);
-
- cfs_hash_dual_bd_get(hs, old_key, old_bds);
- cfs_hash_bd_get(hs, new_key, &new_bd);
-
- bds[0] = old_bds[0];
- bds[1] = old_bds[1];
- bds[2] = new_bd;
-
- /* NB: bds[0] and bds[1] are ordered already */
- cfs_hash_bd_order(&bds[1], &bds[2]);
- cfs_hash_bd_order(&bds[0], &bds[1]);
-
- cfs_hash_multi_bd_lock(hs, bds, 3, 1);
- if (likely(!old_bds[1].bd_bucket)) {
- cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
- } else {
- cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
- cfs_hash_bd_add_locked(hs, &new_bd, hnode);
- }
- /* overwrite key inside locks, otherwise may screw up with
- * other operations, i.e: rehash
- */
- cfs_hash_keycpy(hs, hnode, new_key);
-
- cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
- cfs_hash_unlock(hs, 0);
-}
-EXPORT_SYMBOL(cfs_hash_rehash_key);
-
-void cfs_hash_debug_header(struct seq_file *m)
-{
- seq_printf(m, "%-*s cur min max theta t-min t-max flags rehash count maxdep maxdepb distribution\n",
- CFS_HASH_BIGNAME_LEN, "name");
-}
-EXPORT_SYMBOL(cfs_hash_debug_header);
-
-static struct cfs_hash_bucket **
-cfs_hash_full_bkts(struct cfs_hash *hs)
-{
- /* NB: caller should hold hs->hs_rwlock if REHASH is set */
- if (!hs->hs_rehash_buckets)
- return hs->hs_buckets;
-
- LASSERT(hs->hs_rehash_bits);
- return hs->hs_rehash_bits > hs->hs_cur_bits ?
- hs->hs_rehash_buckets : hs->hs_buckets;
-}
-
-static unsigned int
-cfs_hash_full_nbkt(struct cfs_hash *hs)
-{
- /* NB: caller should hold hs->hs_rwlock if REHASH is set */
- if (!hs->hs_rehash_buckets)
- return CFS_HASH_NBKT(hs);
-
- LASSERT(hs->hs_rehash_bits);
- return hs->hs_rehash_bits > hs->hs_cur_bits ?
- CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
-}
-
-void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
-{
- int dist[8] = { 0, };
- int maxdep = -1;
- int maxdepb = -1;
- int total = 0;
- int theta;
- int i;
-
- cfs_hash_lock(hs, 0);
- theta = __cfs_hash_theta(hs);
-
- seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
- CFS_HASH_BIGNAME_LEN, hs->hs_name,
- 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
- 1 << hs->hs_max_bits,
- __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
- __cfs_hash_theta_int(hs->hs_min_theta),
- __cfs_hash_theta_frac(hs->hs_min_theta),
- __cfs_hash_theta_int(hs->hs_max_theta),
- __cfs_hash_theta_frac(hs->hs_max_theta),
- hs->hs_flags, hs->hs_rehash_count);
-
- /*
- * The distribution is a summary of the chained hash depth in
- * each of the libcfs hash buckets. Each buckets hsb_count is
- * divided by the hash theta value and used to generate a
- * histogram of the hash distribution. A uniform hash will
- * result in all hash buckets being close to the average thus
- * only the first few entries in the histogram will be non-zero.
- * If you hash function results in a non-uniform hash the will
- * be observable by outlier bucks in the distribution histogram.
- *
- * Uniform hash distribution: 128/128/0/0/0/0/0/0
- * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
- */
- for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
- struct cfs_hash_bd bd;
-
- bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
- cfs_hash_bd_lock(hs, &bd, 0);
- if (maxdep < bd.bd_bucket->hsb_depmax) {
- maxdep = bd.bd_bucket->hsb_depmax;
- maxdepb = ffz(~maxdep);
- }
- total += bd.bd_bucket->hsb_count;
- dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
- cfs_hash_bd_unlock(hs, &bd, 0);
- }
-
- seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
- for (i = 0; i < 8; i++)
- seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/');
-
- cfs_hash_unlock(hs, 0);
-}
-EXPORT_SYMBOL(cfs_hash_debug_str);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
deleted file mode 100644
index 76291a350406..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
+++ /dev/null
@@ -1,228 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Please see comments in libcfs/include/libcfs/libcfs_cpu.h for introduction
- *
- * Author: liang@whamcloud.com
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-
-/** Global CPU partition table */
-struct cfs_cpt_table *cfs_cpt_table __read_mostly;
-EXPORT_SYMBOL(cfs_cpt_table);
-
-#ifndef HAVE_LIBCFS_CPT
-
-#define CFS_CPU_VERSION_MAGIC 0xbabecafe
-
-struct cfs_cpt_table *
-cfs_cpt_table_alloc(unsigned int ncpt)
-{
- struct cfs_cpt_table *cptab;
-
- if (ncpt != 1) {
- CERROR("Can't support cpu partition number %d\n", ncpt);
- return NULL;
- }
-
- cptab = kzalloc(sizeof(*cptab), GFP_NOFS);
- if (cptab) {
- cptab->ctb_version = CFS_CPU_VERSION_MAGIC;
- node_set(0, cptab->ctb_nodemask);
- cptab->ctb_nparts = ncpt;
- }
-
- return cptab;
-}
-EXPORT_SYMBOL(cfs_cpt_table_alloc);
-
-void
-cfs_cpt_table_free(struct cfs_cpt_table *cptab)
-{
- LASSERT(cptab->ctb_version == CFS_CPU_VERSION_MAGIC);
-
- kfree(cptab);
-}
-EXPORT_SYMBOL(cfs_cpt_table_free);
-
-#ifdef CONFIG_SMP
-int
-cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
-{
- int rc;
-
- rc = snprintf(buf, len, "%d\t: %d\n", 0, 0);
- len -= rc;
- if (len <= 0)
- return -EFBIG;
-
- return rc;
-}
-EXPORT_SYMBOL(cfs_cpt_table_print);
-#endif /* CONFIG_SMP */
-
-int
-cfs_cpt_number(struct cfs_cpt_table *cptab)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_number);
-
-int
-cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_weight);
-
-int
-cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_online);
-
-nodemask_t *
-cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
-{
- return &cptab->ctb_nodemask;
-}
-EXPORT_SYMBOL(cfs_cpt_nodemask);
-
-int
-cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_cpu);
-
-void
-cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
-{
-}
-EXPORT_SYMBOL(cfs_cpt_unset_cpu);
-
-int
-cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_cpumask);
-
-void
-cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
-{
-}
-EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
-
-int
-cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_node);
-
-void
-cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
-{
-}
-EXPORT_SYMBOL(cfs_cpt_unset_node);
-
-int
-cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_nodemask);
-
-void
-cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
-{
-}
-EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
-
-void
-cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
-{
-}
-EXPORT_SYMBOL(cfs_cpt_clear);
-
-int
-cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
-{
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_spread_node);
-
-int
-cfs_cpu_ht_nsiblings(int cpu)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpu_ht_nsiblings);
-
-int
-cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
-{
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_current);
-
-int
-cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
-{
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_of_cpu);
-
-int
-cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
-{
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_bind);
-
-void
-cfs_cpu_fini(void)
-{
- if (cfs_cpt_table) {
- cfs_cpt_table_free(cfs_cpt_table);
- cfs_cpt_table = NULL;
- }
-}
-
-int
-cfs_cpu_init(void)
-{
- cfs_cpt_table = cfs_cpt_table_alloc(1);
-
- return cfs_cpt_table ? 0 : -1;
-}
-
-#endif /* HAVE_LIBCFS_CPT */
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
deleted file mode 100644
index 670ad5a34224..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ /dev/null
@@ -1,152 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * GPL HEADER END
- */
-/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Author: liang@whamcloud.com
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-
-/** destroy cpu-partition lock, see libcfs_private.h for more detail */
-void
-cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
-{
- LASSERT(pcl->pcl_locks);
- LASSERT(!pcl->pcl_locked);
-
- cfs_percpt_free(pcl->pcl_locks);
- kfree(pcl);
-}
-EXPORT_SYMBOL(cfs_percpt_lock_free);
-
-/**
- * create cpu-partition lock, see libcfs_private.h for more detail.
- *
- * cpu-partition lock is designed for large-scale SMP system, so we need to
- * reduce cacheline conflict as possible as we can, that's the
- * reason we always allocate cacheline-aligned memory block.
- */
-struct cfs_percpt_lock *
-cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
- struct lock_class_key *keys)
-{
- struct cfs_percpt_lock *pcl;
- spinlock_t *lock;
- int i;
-
- /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
- pcl = kzalloc(sizeof(*pcl), GFP_NOFS);
- if (!pcl)
- return NULL;
-
- pcl->pcl_cptab = cptab;
- pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock));
- if (!pcl->pcl_locks) {
- kfree(pcl);
- return NULL;
- }
-
- if (!keys)
- CWARN("Cannot setup class key for percpt lock, you may see recursive locking warnings which are actually fake.\n");
-
- cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
- spin_lock_init(lock);
- if (keys)
- lockdep_set_class(lock, &keys[i]);
- }
-
- return pcl;
-}
-EXPORT_SYMBOL(cfs_percpt_lock_create);
-
-/**
- * lock a CPU partition
- *
- * \a index != CFS_PERCPT_LOCK_EX
- * hold private lock indexed by \a index
- *
- * \a index == CFS_PERCPT_LOCK_EX
- * exclusively lock @pcl and nobody can take private lock
- */
-void
-cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
- __acquires(pcl->pcl_locks)
-{
- int ncpt = cfs_cpt_number(pcl->pcl_cptab);
- int i;
-
- LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
-
- if (ncpt == 1) {
- index = 0;
- } else { /* serialize with exclusive lock */
- while (pcl->pcl_locked)
- cpu_relax();
- }
-
- if (likely(index != CFS_PERCPT_LOCK_EX)) {
- spin_lock(pcl->pcl_locks[index]);
- return;
- }
-
- /* exclusive lock request */
- for (i = 0; i < ncpt; i++) {
- spin_lock(pcl->pcl_locks[i]);
- if (!i) {
- LASSERT(!pcl->pcl_locked);
- /* nobody should take private lock after this
- * so I wouldn't starve for too long time
- */
- pcl->pcl_locked = 1;
- }
- }
-}
-EXPORT_SYMBOL(cfs_percpt_lock);
-
-/** unlock a CPU partition */
-void
-cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
- __releases(pcl->pcl_locks)
-{
- int ncpt = cfs_cpt_number(pcl->pcl_cptab);
- int i;
-
- index = ncpt == 1 ? 0 : index;
-
- if (likely(index != CFS_PERCPT_LOCK_EX)) {
- spin_unlock(pcl->pcl_locks[index]);
- return;
- }
-
- for (i = ncpt - 1; i >= 0; i--) {
- if (!i) {
- LASSERT(pcl->pcl_locked);
- pcl->pcl_locked = 0;
- }
- spin_unlock(pcl->pcl_locks[i]);
- }
-}
-EXPORT_SYMBOL(cfs_percpt_unlock);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
deleted file mode 100644
index 7faed94994ea..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ /dev/null
@@ -1,167 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Author: liang@whamcloud.com
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-
-struct cfs_var_array {
- unsigned int va_count; /* # of buffers */
- unsigned int va_size; /* size of each var */
- struct cfs_cpt_table *va_cptab; /* cpu partition table */
- void *va_ptrs[0]; /* buffer addresses */
-};
-
-/*
- * free per-cpu data, see more detail in cfs_percpt_free
- */
-void
-cfs_percpt_free(void *vars)
-{
- struct cfs_var_array *arr;
- int i;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- for (i = 0; i < arr->va_count; i++)
- kfree(arr->va_ptrs[i]);
-
- kvfree(arr);
-}
-EXPORT_SYMBOL(cfs_percpt_free);
-
-/*
- * allocate per cpu-partition variables, returned value is an array of pointers,
- * variable can be indexed by CPU partition ID, i.e:
- *
- * arr = cfs_percpt_alloc(cfs_cpu_pt, size);
- * then caller can access memory block for CPU 0 by arr[0],
- * memory block for CPU 1 by arr[1]...
- * memory block for CPU N by arr[N]...
- *
- * cacheline aligned.
- */
-void *
-cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
-{
- struct cfs_var_array *arr;
- int count;
- int i;
-
- count = cfs_cpt_number(cptab);
-
- arr = kvzalloc(offsetof(struct cfs_var_array, va_ptrs[count]),
- GFP_KERNEL);
- if (!arr)
- return NULL;
-
- size = L1_CACHE_ALIGN(size);
- arr->va_size = size;
- arr->va_count = count;
- arr->va_cptab = cptab;
-
- for (i = 0; i < count; i++) {
- arr->va_ptrs[i] = kzalloc_node(size, GFP_KERNEL,
- cfs_cpt_spread_node(cptab, i));
- if (!arr->va_ptrs[i]) {
- cfs_percpt_free((void *)&arr->va_ptrs[0]);
- return NULL;
- }
- }
-
- return (void *)&arr->va_ptrs[0];
-}
-EXPORT_SYMBOL(cfs_percpt_alloc);
-
-/*
- * return number of CPUs (or number of elements in per-cpu data)
- * according to cptab of @vars
- */
-int
-cfs_percpt_number(void *vars)
-{
- struct cfs_var_array *arr;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- return arr->va_count;
-}
-EXPORT_SYMBOL(cfs_percpt_number);
-
-/*
- * free variable array, see more detail in cfs_array_alloc
- */
-void
-cfs_array_free(void *vars)
-{
- struct cfs_var_array *arr;
- int i;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- for (i = 0; i < arr->va_count; i++) {
- if (!arr->va_ptrs[i])
- continue;
-
- kvfree(arr->va_ptrs[i]);
- }
- kvfree(arr);
-}
-EXPORT_SYMBOL(cfs_array_free);
-
-/*
- * allocate a variable array, returned value is an array of pointers.
- * Caller can specify length of array by @count, @size is size of each
- * memory block in array.
- */
-void *
-cfs_array_alloc(int count, unsigned int size)
-{
- struct cfs_var_array *arr;
- int i;
-
- arr = kvmalloc(offsetof(struct cfs_var_array, va_ptrs[count]), GFP_KERNEL);
- if (!arr)
- return NULL;
-
- arr->va_count = count;
- arr->va_size = size;
-
- for (i = 0; i < count; i++) {
- arr->va_ptrs[i] = kvzalloc(size, GFP_KERNEL);
-
- if (!arr->va_ptrs[i]) {
- cfs_array_free((void *)&arr->va_ptrs[0]);
- return NULL;
- }
- }
-
- return (void *)&arr->va_ptrs[0];
-}
-EXPORT_SYMBOL(cfs_array_alloc);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
deleted file mode 100644
index 442889a3d729..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
+++ /dev/null
@@ -1,556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * String manipulation functions.
- *
- * libcfs/libcfs/libcfs_string.c
- *
- * Author: Nathan Rutman <nathan.rutman@sun.com>
- */
-
-#include <linux/libcfs/libcfs.h>
-
-/* Convert a text string to a bitmask */
-int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
- int *oldmask, int minmask, int allmask)
-{
- const char *debugstr;
- char op = '\0';
- int newmask = minmask, i, len, found = 0;
-
- /* <str> must be a list of tokens separated by whitespace
- * and optionally an operator ('+' or '-'). If an operator
- * appears first in <str>, '*oldmask' is used as the starting point
- * (relative), otherwise minmask is used (absolute). An operator
- * applies to all following tokens up to the next operator.
- */
- while (*str != '\0') {
- while (isspace(*str))
- str++;
- if (*str == '\0')
- break;
- if (*str == '+' || *str == '-') {
- op = *str++;
- if (!found)
- /* only if first token is relative */
- newmask = *oldmask;
- while (isspace(*str))
- str++;
- if (*str == '\0') /* trailing op */
- return -EINVAL;
- }
-
- /* find token length */
- len = 0;
- while (str[len] != '\0' && !isspace(str[len]) &&
- str[len] != '+' && str[len] != '-')
- len++;
-
- /* match token */
- found = 0;
- for (i = 0; i < 32; i++) {
- debugstr = bit2str(i);
- if (debugstr && strlen(debugstr) == len &&
- !strncasecmp(str, debugstr, len)) {
- if (op == '-')
- newmask &= ~(1 << i);
- else
- newmask |= (1 << i);
- found = 1;
- break;
- }
- }
- if (!found && len == 3 &&
- !strncasecmp(str, "ALL", len)) {
- if (op == '-')
- newmask = minmask;
- else
- newmask = allmask;
- found = 1;
- }
- if (!found) {
- CWARN("unknown mask '%.*s'.\n"
- "mask usage: [+|-]<all|type> ...\n", len, str);
- return -EINVAL;
- }
- str += len;
- }
-
- *oldmask = newmask;
- return 0;
-}
-
-/* get the first string out of @str */
-char *cfs_firststr(char *str, size_t size)
-{
- size_t i = 0;
- char *end;
-
- /* trim leading spaces */
- while (i < size && *str && isspace(*str)) {
- ++i;
- ++str;
- }
-
- /* string with all spaces */
- if (*str == '\0')
- goto out;
-
- end = str;
- while (i < size && *end != '\0' && !isspace(*end)) {
- ++i;
- ++end;
- }
-
- *end = '\0';
-out:
- return str;
-}
-EXPORT_SYMBOL(cfs_firststr);
-
-/**
- * Extracts tokens from strings.
- *
- * Looks for \a delim in string \a next, sets \a res to point to
- * substring before the delimiter, sets \a next right after the found
- * delimiter.
- *
- * \retval 1 if \a res points to a string of non-whitespace characters
- * \retval 0 otherwise
- */
-int
-cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
-{
- char *end;
-
- if (!next->ls_str)
- return 0;
-
- /* skip leading white spaces */
- while (next->ls_len) {
- if (!isspace(*next->ls_str))
- break;
- next->ls_str++;
- next->ls_len--;
- }
-
- if (!next->ls_len) /* whitespaces only */
- return 0;
-
- if (*next->ls_str == delim) {
- /* first non-writespace is the delimiter */
- return 0;
- }
-
- res->ls_str = next->ls_str;
- end = memchr(next->ls_str, delim, next->ls_len);
- if (!end) {
- /* there is no the delimeter in the string */
- end = next->ls_str + next->ls_len;
- next->ls_str = NULL;
- } else {
- next->ls_str = end + 1;
- next->ls_len -= (end - res->ls_str + 1);
- }
-
- /* skip ending whitespaces */
- while (--end != res->ls_str) {
- if (!isspace(*end))
- break;
- }
-
- res->ls_len = end - res->ls_str + 1;
- return 1;
-}
-EXPORT_SYMBOL(cfs_gettok);
-
-/**
- * Converts string to integer.
- *
- * Accepts decimal and hexadecimal number recordings.
- *
- * \retval 1 if first \a nob chars of \a str convert to decimal or
- * hexadecimal integer in the range [\a min, \a max]
- * \retval 0 otherwise
- */
-int
-cfs_str2num_check(char *str, int nob, unsigned int *num,
- unsigned int min, unsigned int max)
-{
- bool all_numbers = true;
- char *endp, cache;
- int rc;
-
- /**
- * kstrouint can only handle strings composed
- * of only numbers. We need to scan the string
- * passed in for the first non-digit character
- * and end the string at that location. If we
- * don't find any non-digit character we still
- * need to place a '\0' at position nob since
- * we are not interested in the rest of the
- * string which is longer than nob in size.
- * After we are done the character at the
- * position we placed '\0' must be restored.
- */
- for (endp = str; endp < str + nob; endp++) {
- if (!isdigit(*endp)) {
- all_numbers = false;
- break;
- }
- }
- cache = *endp;
- *endp = '\0';
-
- rc = kstrtouint(str, 10, num);
- *endp = cache;
- if (rc || !all_numbers)
- return 0;
-
- return (*num >= min && *num <= max);
-}
-EXPORT_SYMBOL(cfs_str2num_check);
-
-/**
- * Parses \<range_expr\> token of the syntax. If \a bracketed is false,
- * \a src should only have a single token which can be \<number\> or \*
- *
- * \retval pointer to allocated range_expr and initialized
- * range_expr::re_lo, range_expr::re_hi and range_expr:re_stride if \a
- `* src parses to
- * \<number\> |
- * \<number\> '-' \<number\> |
- * \<number\> '-' \<number\> '/' \<number\>
- * \retval 0 will be returned if it can be parsed, otherwise -EINVAL or
- * -ENOMEM will be returned.
- */
-static int
-cfs_range_expr_parse(struct cfs_lstr *src, unsigned int min, unsigned int max,
- int bracketed, struct cfs_range_expr **expr)
-{
- struct cfs_range_expr *re;
- struct cfs_lstr tok;
-
- re = kzalloc(sizeof(*re), GFP_NOFS);
- if (!re)
- return -ENOMEM;
-
- if (src->ls_len == 1 && src->ls_str[0] == '*') {
- re->re_lo = min;
- re->re_hi = max;
- re->re_stride = 1;
- goto out;
- }
-
- if (cfs_str2num_check(src->ls_str, src->ls_len,
- &re->re_lo, min, max)) {
- /* <number> is parsed */
- re->re_hi = re->re_lo;
- re->re_stride = 1;
- goto out;
- }
-
- if (!bracketed || !cfs_gettok(src, '-', &tok))
- goto failed;
-
- if (!cfs_str2num_check(tok.ls_str, tok.ls_len,
- &re->re_lo, min, max))
- goto failed;
-
- /* <number> - */
- if (cfs_str2num_check(src->ls_str, src->ls_len,
- &re->re_hi, min, max)) {
- /* <number> - <number> is parsed */
- re->re_stride = 1;
- goto out;
- }
-
- /* go to check <number> '-' <number> '/' <number> */
- if (cfs_gettok(src, '/', &tok)) {
- if (!cfs_str2num_check(tok.ls_str, tok.ls_len,
- &re->re_hi, min, max))
- goto failed;
-
- /* <number> - <number> / ... */
- if (cfs_str2num_check(src->ls_str, src->ls_len,
- &re->re_stride, min, max)) {
- /* <number> - <number> / <number> is parsed */
- goto out;
- }
- }
-
- out:
- *expr = re;
- return 0;
-
- failed:
- kfree(re);
- return -EINVAL;
-}
-
-/**
- * Print the range expression \a re into specified \a buffer.
- * If \a bracketed is true, expression does not need additional
- * brackets.
- *
- * \retval number of characters written
- */
-static int
-cfs_range_expr_print(char *buffer, int count, struct cfs_range_expr *expr,
- bool bracketed)
-{
- int i;
- char s[] = "[";
- char e[] = "]";
-
- if (bracketed) {
- s[0] = '\0';
- e[0] = '\0';
- }
-
- if (expr->re_lo == expr->re_hi)
- i = scnprintf(buffer, count, "%u", expr->re_lo);
- else if (expr->re_stride == 1)
- i = scnprintf(buffer, count, "%s%u-%u%s",
- s, expr->re_lo, expr->re_hi, e);
- else
- i = scnprintf(buffer, count, "%s%u-%u/%u%s",
- s, expr->re_lo, expr->re_hi, expr->re_stride, e);
- return i;
-}
-
-/**
- * Print a list of range expressions (\a expr_list) into specified \a buffer.
- * If the list contains several expressions, separate them with comma
- * and surround the list with brackets.
- *
- * \retval number of characters written
- */
-int
-cfs_expr_list_print(char *buffer, int count, struct cfs_expr_list *expr_list)
-{
- struct cfs_range_expr *expr;
- int i = 0, j = 0;
- int numexprs = 0;
-
- if (count <= 0)
- return 0;
-
- list_for_each_entry(expr, &expr_list->el_exprs, re_link)
- numexprs++;
-
- if (numexprs > 1)
- i += scnprintf(buffer + i, count - i, "[");
-
- list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
- if (j++)
- i += scnprintf(buffer + i, count - i, ",");
- i += cfs_range_expr_print(buffer + i, count - i, expr,
- numexprs > 1);
- }
-
- if (numexprs > 1)
- i += scnprintf(buffer + i, count - i, "]");
-
- return i;
-}
-EXPORT_SYMBOL(cfs_expr_list_print);
-
-/**
- * Matches value (\a value) against ranges expression list \a expr_list.
- *
- * \retval 1 if \a value matches
- * \retval 0 otherwise
- */
-int
-cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list)
-{
- struct cfs_range_expr *expr;
-
- list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
- if (value >= expr->re_lo && value <= expr->re_hi &&
- !((value - expr->re_lo) % expr->re_stride))
- return 1;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(cfs_expr_list_match);
-
-/**
- * Convert express list (\a expr_list) to an array of all matched values
- *
- * \retval N N is total number of all matched values
- * \retval 0 if expression list is empty
- * \retval < 0 for failure
- */
-int
-cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, u32 **valpp)
-{
- struct cfs_range_expr *expr;
- u32 *val;
- int count = 0;
- int i;
-
- list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
- for (i = expr->re_lo; i <= expr->re_hi; i++) {
- if (!((i - expr->re_lo) % expr->re_stride))
- count++;
- }
- }
-
- if (!count) /* empty expression list */
- return 0;
-
- if (count > max) {
- CERROR("Number of values %d exceeds max allowed %d\n",
- max, count);
- return -EINVAL;
- }
-
- val = kvmalloc_array(count, sizeof(val[0]), GFP_KERNEL | __GFP_ZERO);
- if (!val)
- return -ENOMEM;
-
- count = 0;
- list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
- for (i = expr->re_lo; i <= expr->re_hi; i++) {
- if (!((i - expr->re_lo) % expr->re_stride))
- val[count++] = i;
- }
- }
-
- *valpp = val;
- return count;
-}
-EXPORT_SYMBOL(cfs_expr_list_values);
-
-/**
- * Frees cfs_range_expr structures of \a expr_list.
- *
- * \retval none
- */
-void
-cfs_expr_list_free(struct cfs_expr_list *expr_list)
-{
- while (!list_empty(&expr_list->el_exprs)) {
- struct cfs_range_expr *expr;
-
- expr = list_entry(expr_list->el_exprs.next,
- struct cfs_range_expr, re_link);
- list_del(&expr->re_link);
- kfree(expr);
- }
-
- kfree(expr_list);
-}
-EXPORT_SYMBOL(cfs_expr_list_free);
-
-/**
- * Parses \<cfs_expr_list\> token of the syntax.
- *
- * \retval 0 if \a str parses to \<number\> | \<expr_list\>
- * \retval -errno otherwise
- */
-int
-cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
- struct cfs_expr_list **elpp)
-{
- struct cfs_expr_list *expr_list;
- struct cfs_range_expr *expr;
- struct cfs_lstr src;
- int rc;
-
- expr_list = kzalloc(sizeof(*expr_list), GFP_NOFS);
- if (!expr_list)
- return -ENOMEM;
-
- src.ls_str = str;
- src.ls_len = len;
-
- INIT_LIST_HEAD(&expr_list->el_exprs);
-
- if (src.ls_str[0] == '[' &&
- src.ls_str[src.ls_len - 1] == ']') {
- src.ls_str++;
- src.ls_len -= 2;
-
- rc = -EINVAL;
- while (src.ls_str) {
- struct cfs_lstr tok;
-
- if (!cfs_gettok(&src, ',', &tok)) {
- rc = -EINVAL;
- break;
- }
-
- rc = cfs_range_expr_parse(&tok, min, max, 1, &expr);
- if (rc)
- break;
-
- list_add_tail(&expr->re_link, &expr_list->el_exprs);
- }
- } else {
- rc = cfs_range_expr_parse(&src, min, max, 0, &expr);
- if (!rc)
- list_add_tail(&expr->re_link, &expr_list->el_exprs);
- }
-
- if (rc)
- cfs_expr_list_free(expr_list);
- else
- *elpp = expr_list;
-
- return rc;
-}
-EXPORT_SYMBOL(cfs_expr_list_parse);
-
-/**
- * Frees cfs_expr_list structures of \a list.
- *
- * For each struct cfs_expr_list structure found on \a list it frees
- * range_expr list attached to it and frees the cfs_expr_list itself.
- *
- * \retval none
- */
-void
-cfs_expr_list_free_list(struct list_head *list)
-{
- struct cfs_expr_list *el;
-
- while (!list_empty(list)) {
- el = list_entry(list->next, struct cfs_expr_list, el_link);
- list_del(&el->el_link);
- cfs_expr_list_free(el);
- }
-}
-EXPORT_SYMBOL(cfs_expr_list_free_list);
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
deleted file mode 100644
index 388521e4e354..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ /dev/null
@@ -1,1079 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2012, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Author: liang@whamcloud.com
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/cpu.h>
-#include <linux/sched.h>
-#include <linux/libcfs/libcfs.h>
-
-#ifdef CONFIG_SMP
-
-/**
- * modparam for setting number of partitions
- *
- * 0 : estimate best value based on cores or NUMA nodes
- * 1 : disable multiple partitions
- * >1 : specify number of partitions
- */
-static int cpu_npartitions;
-module_param(cpu_npartitions, int, 0444);
-MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
-
-/**
- * modparam for setting CPU partitions patterns:
- *
- * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID,
- * number in bracket is processor ID (core or HT)
- *
- * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
- * are NUMA node ID, number before bracket is CPU partition ID.
- *
- * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology
- *
- * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
- */
-static char *cpu_pattern = "N";
-module_param(cpu_pattern, charp, 0444);
-MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern");
-
-struct cfs_cpt_data {
- /* serialize hotplug etc */
- spinlock_t cpt_lock;
- /* reserved for hotplug */
- unsigned long cpt_version;
- /* mutex to protect cpt_cpumask */
- struct mutex cpt_mutex;
- /* scratch buffer for set/unset_node */
- cpumask_var_t cpt_cpumask;
-};
-
-static struct cfs_cpt_data cpt_data;
-
-static void
-cfs_node_to_cpumask(int node, cpumask_t *mask)
-{
- const cpumask_t *tmp = cpumask_of_node(node);
-
- if (tmp)
- cpumask_copy(mask, tmp);
- else
- cpumask_clear(mask);
-}
-
-void
-cfs_cpt_table_free(struct cfs_cpt_table *cptab)
-{
- int i;
-
- kvfree(cptab->ctb_cpu2cpt);
-
- for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) {
- struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
-
- kfree(part->cpt_nodemask);
- free_cpumask_var(part->cpt_cpumask);
- }
-
- kvfree(cptab->ctb_parts);
-
- kfree(cptab->ctb_nodemask);
- free_cpumask_var(cptab->ctb_cpumask);
-
- kfree(cptab);
-}
-EXPORT_SYMBOL(cfs_cpt_table_free);
-
-struct cfs_cpt_table *
-cfs_cpt_table_alloc(unsigned int ncpt)
-{
- struct cfs_cpt_table *cptab;
- int i;
-
- cptab = kzalloc(sizeof(*cptab), GFP_NOFS);
- if (!cptab)
- return NULL;
-
- cptab->ctb_nparts = ncpt;
-
- cptab->ctb_nodemask = kzalloc(sizeof(*cptab->ctb_nodemask),
- GFP_NOFS);
- if (!zalloc_cpumask_var(&cptab->ctb_cpumask, GFP_NOFS) ||
- !cptab->ctb_nodemask)
- goto failed;
-
- cptab->ctb_cpu2cpt = kvmalloc_array(num_possible_cpus(),
- sizeof(cptab->ctb_cpu2cpt[0]),
- GFP_KERNEL);
- if (!cptab->ctb_cpu2cpt)
- goto failed;
-
- memset(cptab->ctb_cpu2cpt, -1,
- num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
-
- cptab->ctb_parts = kvmalloc_array(ncpt, sizeof(cptab->ctb_parts[0]),
- GFP_KERNEL);
- if (!cptab->ctb_parts)
- goto failed;
-
- for (i = 0; i < ncpt; i++) {
- struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
-
- part->cpt_nodemask = kzalloc(sizeof(*part->cpt_nodemask),
- GFP_NOFS);
- if (!zalloc_cpumask_var(&part->cpt_cpumask, GFP_NOFS) ||
- !part->cpt_nodemask)
- goto failed;
- }
-
- spin_lock(&cpt_data.cpt_lock);
- /* Reserved for hotplug */
- cptab->ctb_version = cpt_data.cpt_version;
- spin_unlock(&cpt_data.cpt_lock);
-
- return cptab;
-
- failed:
- cfs_cpt_table_free(cptab);
- return NULL;
-}
-EXPORT_SYMBOL(cfs_cpt_table_alloc);
-
-int
-cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
-{
- char *tmp = buf;
- int rc = 0;
- int i;
- int j;
-
- for (i = 0; i < cptab->ctb_nparts; i++) {
- if (len > 0) {
- rc = snprintf(tmp, len, "%d\t: ", i);
- len -= rc;
- }
-
- if (len <= 0) {
- rc = -EFBIG;
- goto out;
- }
-
- tmp += rc;
- for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) {
- rc = snprintf(tmp, len, "%d ", j);
- len -= rc;
- if (len <= 0) {
- rc = -EFBIG;
- goto out;
- }
- tmp += rc;
- }
-
- *tmp = '\n';
- tmp++;
- len--;
- }
-
- out:
- if (rc < 0)
- return rc;
-
- return tmp - buf;
-}
-EXPORT_SYMBOL(cfs_cpt_table_print);
-
-int
-cfs_cpt_number(struct cfs_cpt_table *cptab)
-{
- return cptab->ctb_nparts;
-}
-EXPORT_SYMBOL(cfs_cpt_number);
-
-int
-cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
-{
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- return cpt == CFS_CPT_ANY ?
- cpumask_weight(cptab->ctb_cpumask) :
- cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask);
-}
-EXPORT_SYMBOL(cfs_cpt_weight);
-
-int
-cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
-{
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- return cpt == CFS_CPT_ANY ?
- cpumask_any_and(cptab->ctb_cpumask,
- cpu_online_mask) < nr_cpu_ids :
- cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask,
- cpu_online_mask) < nr_cpu_ids;
-}
-EXPORT_SYMBOL(cfs_cpt_online);
-
-cpumask_var_t *
-cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
-{
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- return cpt == CFS_CPT_ANY ?
- &cptab->ctb_cpumask : &cptab->ctb_parts[cpt].cpt_cpumask;
-}
-EXPORT_SYMBOL(cfs_cpt_cpumask);
-
-nodemask_t *
-cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
-{
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- return cpt == CFS_CPT_ANY ?
- cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask;
-}
-EXPORT_SYMBOL(cfs_cpt_nodemask);
-
-int
-cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
-{
- int node;
-
- LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
-
- if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) {
- CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
- return 0;
- }
-
- if (cptab->ctb_cpu2cpt[cpu] != -1) {
- CDEBUG(D_INFO, "CPU %d is already in partition %d\n",
- cpu, cptab->ctb_cpu2cpt[cpu]);
- return 0;
- }
-
- cptab->ctb_cpu2cpt[cpu] = cpt;
-
- LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask));
- LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
-
- cpumask_set_cpu(cpu, cptab->ctb_cpumask);
- cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
-
- node = cpu_to_node(cpu);
-
- /* first CPU of @node in this CPT table */
- if (!node_isset(node, *cptab->ctb_nodemask))
- node_set(node, *cptab->ctb_nodemask);
-
- /* first CPU of @node in this partition */
- if (!node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask))
- node_set(node, *cptab->ctb_parts[cpt].cpt_nodemask);
-
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_cpu);
-
-void
-cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
-{
- int node;
- int i;
-
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- if (cpu < 0 || cpu >= nr_cpu_ids) {
- CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
- return;
- }
-
- if (cpt == CFS_CPT_ANY) {
- /* caller doesn't know the partition ID */
- cpt = cptab->ctb_cpu2cpt[cpu];
- if (cpt < 0) { /* not set in this CPT-table */
- CDEBUG(D_INFO, "Try to unset cpu %d which is not in CPT-table %p\n",
- cpt, cptab);
- return;
- }
-
- } else if (cpt != cptab->ctb_cpu2cpt[cpu]) {
- CDEBUG(D_INFO,
- "CPU %d is not in cpu-partition %d\n", cpu, cpt);
- return;
- }
-
- LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
- LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask));
-
- cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
- cpumask_clear_cpu(cpu, cptab->ctb_cpumask);
- cptab->ctb_cpu2cpt[cpu] = -1;
-
- node = cpu_to_node(cpu);
-
- LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask));
- LASSERT(node_isset(node, *cptab->ctb_nodemask));
-
- for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) {
- /* this CPT has other CPU belonging to this node? */
- if (cpu_to_node(i) == node)
- break;
- }
-
- if (i >= nr_cpu_ids)
- node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask);
-
- for_each_cpu(i, cptab->ctb_cpumask) {
- /* this CPT-table has other CPU belonging to this node? */
- if (cpu_to_node(i) == node)
- break;
- }
-
- if (i >= nr_cpu_ids)
- node_clear(node, *cptab->ctb_nodemask);
-}
-EXPORT_SYMBOL(cfs_cpt_unset_cpu);
-
-int
-cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
-{
- int i;
-
- if (!cpumask_weight(mask) ||
- cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
- CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
- cpt);
- return 0;
- }
-
- for_each_cpu(i, mask) {
- if (!cfs_cpt_set_cpu(cptab, cpt, i))
- return 0;
- }
-
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_cpumask);
-
-void
-cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
-{
- int i;
-
- for_each_cpu(i, mask)
- cfs_cpt_unset_cpu(cptab, cpt, i);
-}
-EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
-
-int
-cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
-{
- int rc;
-
- if (node < 0 || node >= MAX_NUMNODES) {
- CDEBUG(D_INFO,
- "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
- return 0;
- }
-
- mutex_lock(&cpt_data.cpt_mutex);
-
- cfs_node_to_cpumask(node, cpt_data.cpt_cpumask);
-
- rc = cfs_cpt_set_cpumask(cptab, cpt, cpt_data.cpt_cpumask);
-
- mutex_unlock(&cpt_data.cpt_mutex);
-
- return rc;
-}
-EXPORT_SYMBOL(cfs_cpt_set_node);
-
-void
-cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
-{
- if (node < 0 || node >= MAX_NUMNODES) {
- CDEBUG(D_INFO,
- "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
- return;
- }
-
- mutex_lock(&cpt_data.cpt_mutex);
-
- cfs_node_to_cpumask(node, cpt_data.cpt_cpumask);
-
- cfs_cpt_unset_cpumask(cptab, cpt, cpt_data.cpt_cpumask);
-
- mutex_unlock(&cpt_data.cpt_mutex);
-}
-EXPORT_SYMBOL(cfs_cpt_unset_node);
-
-int
-cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
-{
- int i;
-
- for_each_node_mask(i, *mask) {
- if (!cfs_cpt_set_node(cptab, cpt, i))
- return 0;
- }
-
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_nodemask);
-
-void
-cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
-{
- int i;
-
- for_each_node_mask(i, *mask)
- cfs_cpt_unset_node(cptab, cpt, i);
-}
-EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
-
-void
-cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
-{
- int last;
- int i;
-
- if (cpt == CFS_CPT_ANY) {
- last = cptab->ctb_nparts - 1;
- cpt = 0;
- } else {
- last = cpt;
- }
-
- for (; cpt <= last; cpt++) {
- for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask)
- cfs_cpt_unset_cpu(cptab, cpt, i);
- }
-}
-EXPORT_SYMBOL(cfs_cpt_clear);
-
-int
-cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
-{
- nodemask_t *mask;
- int weight;
- int rotor;
- int node;
-
- /* convert CPU partition ID to HW node id */
-
- if (cpt < 0 || cpt >= cptab->ctb_nparts) {
- mask = cptab->ctb_nodemask;
- rotor = cptab->ctb_spread_rotor++;
- } else {
- mask = cptab->ctb_parts[cpt].cpt_nodemask;
- rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++;
- }
-
- weight = nodes_weight(*mask);
- LASSERT(weight > 0);
-
- rotor %= weight;
-
- for_each_node_mask(node, *mask) {
- if (!rotor--)
- return node;
- }
-
- LBUG();
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_spread_node);
-
-int
-cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
-{
- int cpu;
- int cpt;
-
- preempt_disable();
- cpu = smp_processor_id();
- cpt = cptab->ctb_cpu2cpt[cpu];
-
- if (cpt < 0 && remap) {
- /* don't return negative value for safety of upper layer,
- * instead we shadow the unknown cpu to a valid partition ID
- */
- cpt = cpu % cptab->ctb_nparts;
- }
- preempt_enable();
- return cpt;
-}
-EXPORT_SYMBOL(cfs_cpt_current);
-
-int
-cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
-{
- LASSERT(cpu >= 0 && cpu < nr_cpu_ids);
-
- return cptab->ctb_cpu2cpt[cpu];
-}
-EXPORT_SYMBOL(cfs_cpt_of_cpu);
-
-int
-cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
-{
- cpumask_var_t *cpumask;
- nodemask_t *nodemask;
- int rc;
- int i;
-
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- if (cpt == CFS_CPT_ANY) {
- cpumask = &cptab->ctb_cpumask;
- nodemask = cptab->ctb_nodemask;
- } else {
- cpumask = &cptab->ctb_parts[cpt].cpt_cpumask;
- nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
- }
-
- if (cpumask_any_and(*cpumask, cpu_online_mask) >= nr_cpu_ids) {
- CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
- cpt);
- return -EINVAL;
- }
-
- for_each_online_cpu(i) {
- if (cpumask_test_cpu(i, *cpumask))
- continue;
-
- rc = set_cpus_allowed_ptr(current, *cpumask);
- set_mems_allowed(*nodemask);
- if (!rc)
- schedule(); /* switch to allowed CPU */
-
- return rc;
- }
-
- /* don't need to set affinity because all online CPUs are covered */
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_bind);
-
-/**
- * Choose max to \a number CPUs from \a node and set them in \a cpt.
- * We always prefer to choose CPU in the same core/socket.
- */
-static int
-cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
- cpumask_t *node, int number)
-{
- cpumask_var_t socket;
- cpumask_var_t core;
- int rc = 0;
- int cpu;
-
- LASSERT(number > 0);
-
- if (number >= cpumask_weight(node)) {
- while (!cpumask_empty(node)) {
- cpu = cpumask_first(node);
-
- rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
- if (!rc)
- return -EINVAL;
- cpumask_clear_cpu(cpu, node);
- }
- return 0;
- }
-
- /*
- * Allocate scratch buffers
- * As we cannot initialize a cpumask_var_t, we need
- * to alloc both before we can risk trying to free either
- */
- if (!zalloc_cpumask_var(&socket, GFP_NOFS))
- rc = -ENOMEM;
- if (!zalloc_cpumask_var(&core, GFP_NOFS))
- rc = -ENOMEM;
- if (rc)
- goto out;
-
- while (!cpumask_empty(node)) {
- cpu = cpumask_first(node);
-
- /* get cpumask for cores in the same socket */
- cpumask_copy(socket, topology_core_cpumask(cpu));
- cpumask_and(socket, socket, node);
-
- LASSERT(!cpumask_empty(socket));
-
- while (!cpumask_empty(socket)) {
- int i;
-
- /* get cpumask for hts in the same core */
- cpumask_copy(core, topology_sibling_cpumask(cpu));
- cpumask_and(core, core, node);
-
- LASSERT(!cpumask_empty(core));
-
- for_each_cpu(i, core) {
- cpumask_clear_cpu(i, socket);
- cpumask_clear_cpu(i, node);
-
- rc = cfs_cpt_set_cpu(cptab, cpt, i);
- if (!rc) {
- rc = -EINVAL;
- goto out;
- }
-
- if (!--number)
- goto out;
- }
- cpu = cpumask_first(socket);
- }
- }
-
-out:
- free_cpumask_var(socket);
- free_cpumask_var(core);
- return rc;
-}
-
-#define CPT_WEIGHT_MIN 4u
-
-static unsigned int
-cfs_cpt_num_estimate(void)
-{
- unsigned int nnode = num_online_nodes();
- unsigned int ncpu = num_online_cpus();
- unsigned int ncpt;
-
- if (ncpu <= CPT_WEIGHT_MIN) {
- ncpt = 1;
- goto out;
- }
-
- /* generate reasonable number of CPU partitions based on total number
- * of CPUs, Preferred N should be power2 and match this condition:
- * 2 * (N - 1)^2 < NCPUS <= 2 * N^2
- */
- for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1)
- ;
-
- if (ncpt <= nnode) { /* fat numa system */
- while (nnode > ncpt)
- nnode >>= 1;
-
- } else { /* ncpt > nnode */
- while ((nnode << 1) <= ncpt)
- nnode <<= 1;
- }
-
- ncpt = nnode;
-
-out:
-#if (BITS_PER_LONG == 32)
- /* config many CPU partitions on 32-bit system could consume
- * too much memory
- */
- ncpt = min(2U, ncpt);
-#endif
- while (ncpu % ncpt)
- ncpt--; /* worst case is 1 */
-
- return ncpt;
-}
-
-static struct cfs_cpt_table *
-cfs_cpt_table_create(int ncpt)
-{
- struct cfs_cpt_table *cptab = NULL;
- cpumask_var_t mask;
- int cpt = 0;
- int num;
- int rc;
- int i;
-
- rc = cfs_cpt_num_estimate();
- if (ncpt <= 0)
- ncpt = rc;
-
- if (ncpt > num_online_cpus() || ncpt > 4 * rc) {
- CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n",
- ncpt, rc);
- }
-
- if (num_online_cpus() % ncpt) {
- CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n",
- (int)num_online_cpus(), ncpt);
- goto failed;
- }
-
- cptab = cfs_cpt_table_alloc(ncpt);
- if (!cptab) {
- CERROR("Failed to allocate CPU map(%d)\n", ncpt);
- goto failed;
- }
-
- num = num_online_cpus() / ncpt;
- if (!num) {
- CERROR("CPU changed while setting CPU partition\n");
- goto failed;
- }
-
- if (!zalloc_cpumask_var(&mask, GFP_NOFS)) {
- CERROR("Failed to allocate scratch cpumask\n");
- goto failed;
- }
-
- for_each_online_node(i) {
- cfs_node_to_cpumask(i, mask);
-
- while (!cpumask_empty(mask)) {
- struct cfs_cpu_partition *part;
- int n;
-
- /*
- * Each emulated NUMA node has all allowed CPUs in
- * the mask.
- * End loop when all partitions have assigned CPUs.
- */
- if (cpt == ncpt)
- break;
-
- part = &cptab->ctb_parts[cpt];
-
- n = num - cpumask_weight(part->cpt_cpumask);
- LASSERT(n > 0);
-
- rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
- if (rc < 0)
- goto failed_mask;
-
- LASSERT(num >= cpumask_weight(part->cpt_cpumask));
- if (num == cpumask_weight(part->cpt_cpumask))
- cpt++;
- }
- }
-
- if (cpt != ncpt ||
- num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
- CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n",
- cptab->ctb_nparts, num, cpt,
- cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask));
- goto failed_mask;
- }
-
- free_cpumask_var(mask);
-
- return cptab;
-
- failed_mask:
- free_cpumask_var(mask);
- failed:
- CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
- ncpt, num_online_nodes(), num_online_cpus());
-
- if (cptab)
- cfs_cpt_table_free(cptab);
-
- return NULL;
-}
-
-static struct cfs_cpt_table *
-cfs_cpt_table_create_pattern(char *pattern)
-{
- struct cfs_cpt_table *cptab;
- char *str;
- int node = 0;
- int high;
- int ncpt = 0;
- int cpt;
- int rc;
- int c;
- int i;
-
- str = strim(pattern);
- if (*str == 'n' || *str == 'N') {
- pattern = str + 1;
- if (*pattern != '\0') {
- node = 1;
- } else { /* shortcut to create CPT from NUMA & CPU topology */
- node = -1;
- ncpt = num_online_nodes();
- }
- }
-
- if (!ncpt) { /* scanning bracket which is mark of partition */
- for (str = pattern;; str++, ncpt++) {
- str = strchr(str, '[');
- if (!str)
- break;
- }
- }
-
- if (!ncpt ||
- (node && ncpt > num_online_nodes()) ||
- (!node && ncpt > num_online_cpus())) {
- CERROR("Invalid pattern %s, or too many partitions %d\n",
- pattern, ncpt);
- return NULL;
- }
-
- cptab = cfs_cpt_table_alloc(ncpt);
- if (!cptab) {
- CERROR("Failed to allocate cpu partition table\n");
- return NULL;
- }
-
- if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */
- cpt = 0;
-
- for_each_online_node(i) {
- if (cpt >= ncpt) {
- CERROR("CPU changed while setting CPU partition table, %d/%d\n",
- cpt, ncpt);
- goto failed;
- }
-
- rc = cfs_cpt_set_node(cptab, cpt++, i);
- if (!rc)
- goto failed;
- }
- return cptab;
- }
-
- high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
-
- for (str = strim(pattern), c = 0;; c++) {
- struct cfs_range_expr *range;
- struct cfs_expr_list *el;
- char *bracket = strchr(str, '[');
- int n;
-
- if (!bracket) {
- if (*str) {
- CERROR("Invalid pattern %s\n", str);
- goto failed;
- }
- if (c != ncpt) {
- CERROR("expect %d partitions but found %d\n",
- ncpt, c);
- goto failed;
- }
- break;
- }
-
- if (sscanf(str, "%d%n", &cpt, &n) < 1) {
- CERROR("Invalid cpu pattern %s\n", str);
- goto failed;
- }
-
- if (cpt < 0 || cpt >= ncpt) {
- CERROR("Invalid partition id %d, total partitions %d\n",
- cpt, ncpt);
- goto failed;
- }
-
- if (cfs_cpt_weight(cptab, cpt)) {
- CERROR("Partition %d has already been set.\n", cpt);
- goto failed;
- }
-
- str = strim(str + n);
- if (str != bracket) {
- CERROR("Invalid pattern %s\n", str);
- goto failed;
- }
-
- bracket = strchr(str, ']');
- if (!bracket) {
- CERROR("missing right bracket for cpt %d, %s\n",
- cpt, str);
- goto failed;
- }
-
- if (cfs_expr_list_parse(str, (bracket - str) + 1,
- 0, high, &el)) {
- CERROR("Can't parse number range: %s\n", str);
- goto failed;
- }
-
- list_for_each_entry(range, &el->el_exprs, re_link) {
- for (i = range->re_lo; i <= range->re_hi; i++) {
- if ((i - range->re_lo) % range->re_stride)
- continue;
-
- rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
- cfs_cpt_set_cpu(cptab, cpt, i);
- if (!rc) {
- cfs_expr_list_free(el);
- goto failed;
- }
- }
- }
-
- cfs_expr_list_free(el);
-
- if (!cfs_cpt_online(cptab, cpt)) {
- CERROR("No online CPU is found on partition %d\n", cpt);
- goto failed;
- }
-
- str = strim(bracket + 1);
- }
-
- return cptab;
-
- failed:
- cfs_cpt_table_free(cptab);
- return NULL;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static enum cpuhp_state lustre_cpu_online;
-
-static void cfs_cpu_incr_cpt_version(void)
-{
- spin_lock(&cpt_data.cpt_lock);
- cpt_data.cpt_version++;
- spin_unlock(&cpt_data.cpt_lock);
-}
-
-static int cfs_cpu_online(unsigned int cpu)
-{
- cfs_cpu_incr_cpt_version();
- return 0;
-}
-
-static int cfs_cpu_dead(unsigned int cpu)
-{
- bool warn;
-
- cfs_cpu_incr_cpt_version();
-
- mutex_lock(&cpt_data.cpt_mutex);
- /* if all HTs in a core are offline, it may break affinity */
- cpumask_copy(cpt_data.cpt_cpumask, topology_sibling_cpumask(cpu));
- warn = cpumask_any_and(cpt_data.cpt_cpumask,
- cpu_online_mask) >= nr_cpu_ids;
- mutex_unlock(&cpt_data.cpt_mutex);
- CDEBUG(warn ? D_WARNING : D_INFO,
- "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u]\n",
- cpu);
- return 0;
-}
-#endif
-
-void
-cfs_cpu_fini(void)
-{
- if (cfs_cpt_table)
- cfs_cpt_table_free(cfs_cpt_table);
-
-#ifdef CONFIG_HOTPLUG_CPU
- if (lustre_cpu_online > 0)
- cpuhp_remove_state_nocalls(lustre_cpu_online);
- cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
-#endif
- free_cpumask_var(cpt_data.cpt_cpumask);
-}
-
-int
-cfs_cpu_init(void)
-{
- int ret = 0;
-
- LASSERT(!cfs_cpt_table);
-
- memset(&cpt_data, 0, sizeof(cpt_data));
-
- if (!zalloc_cpumask_var(&cpt_data.cpt_cpumask, GFP_NOFS)) {
- CERROR("Failed to allocate scratch buffer\n");
- return -1;
- }
-
- spin_lock_init(&cpt_data.cpt_lock);
- mutex_init(&cpt_data.cpt_mutex);
-
-#ifdef CONFIG_HOTPLUG_CPU
- ret = cpuhp_setup_state_nocalls(CPUHP_LUSTRE_CFS_DEAD,
- "staging/lustre/cfe:dead", NULL,
- cfs_cpu_dead);
- if (ret < 0)
- goto failed;
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "staging/lustre/cfe:online",
- cfs_cpu_online, NULL);
- if (ret < 0)
- goto failed;
- lustre_cpu_online = ret;
-#endif
- ret = -EINVAL;
-
- if (*cpu_pattern) {
- char *cpu_pattern_dup = kstrdup(cpu_pattern, GFP_KERNEL);
-
- if (!cpu_pattern_dup) {
- CERROR("Failed to duplicate cpu_pattern\n");
- goto failed;
- }
-
- cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern_dup);
- kfree(cpu_pattern_dup);
- if (!cfs_cpt_table) {
- CERROR("Failed to create cptab from pattern %s\n",
- cpu_pattern);
- goto failed;
- }
-
- } else {
- cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
- if (!cfs_cpt_table) {
- CERROR("Failed to create ptable with npartitions %d\n",
- cpu_npartitions);
- goto failed;
- }
- }
-
- spin_lock(&cpt_data.cpt_lock);
- if (cfs_cpt_table->ctb_version != cpt_data.cpt_version) {
- spin_unlock(&cpt_data.cpt_lock);
- CERROR("CPU hotplug/unplug during setup\n");
- goto failed;
- }
- spin_unlock(&cpt_data.cpt_lock);
-
- LCONSOLE(0, "HW nodes: %d, HW CPU cores: %d, npartitions: %d\n",
- num_online_nodes(), num_online_cpus(),
- cfs_cpt_number(cfs_cpt_table));
- return 0;
-
- failed:
- cfs_cpu_fini();
- return ret;
-}
-
-#endif
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
deleted file mode 100644
index db81ed527452..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/*
- * Copyright 2012 Xyratex Technology Limited
- */
-
-/*
- * This is crypto api shash wrappers to zlib_adler32.
- */
-
-#include <linux/module.h>
-#include <linux/zutil.h>
-#include <crypto/internal/hash.h>
-#include "linux-crypto.h"
-
-#define CHKSUM_BLOCK_SIZE 1
-#define CHKSUM_DIGEST_SIZE 4
-
-static int adler32_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = 1;
-
- return 0;
-}
-
-static int adler32_setkey(struct crypto_shash *hash, const u8 *key,
- unsigned int keylen)
-{
- u32 *mctx = crypto_shash_ctx(hash);
-
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- *mctx = *(u32 *)key;
- return 0;
-}
-
-static int adler32_init(struct shash_desc *desc)
-{
- u32 *mctx = crypto_shash_ctx(desc->tfm);
- u32 *cksump = shash_desc_ctx(desc);
-
- *cksump = *mctx;
-
- return 0;
-}
-
-static int adler32_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- u32 *cksump = shash_desc_ctx(desc);
-
- *cksump = zlib_adler32(*cksump, data, len);
- return 0;
-}
-
-static int __adler32_finup(u32 *cksump, const u8 *data, unsigned int len,
- u8 *out)
-{
- *(u32 *)out = zlib_adler32(*cksump, data, len);
- return 0;
-}
-
-static int adler32_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __adler32_finup(shash_desc_ctx(desc), data, len, out);
-}
-
-static int adler32_final(struct shash_desc *desc, u8 *out)
-{
- u32 *cksump = shash_desc_ctx(desc);
-
- *(u32 *)out = *cksump;
- return 0;
-}
-
-static int adler32_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __adler32_finup(crypto_shash_ctx(desc->tfm), data, len,
- out);
-}
-
-static struct shash_alg alg = {
- .setkey = adler32_setkey,
- .init = adler32_init,
- .update = adler32_update,
- .final = adler32_final,
- .finup = adler32_finup,
- .digest = adler32_digest,
- .descsize = sizeof(u32),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "adler32",
- .cra_driver_name = "adler32-zlib",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(u32),
- .cra_module = THIS_MODULE,
- .cra_init = adler32_cra_init,
- }
-};
-
-int cfs_crypto_adler32_register(void)
-{
- return crypto_register_shash(&alg);
-}
-
-void cfs_crypto_adler32_unregister(void)
-{
- crypto_unregister_shash(&alg);
-}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
deleted file mode 100644
index b55006264155..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ /dev/null
@@ -1,443 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/*
- * Copyright 2012 Xyratex Technology Limited
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-
-#include <crypto/hash.h>
-#include <linux/scatterlist.h>
-#include <linux/libcfs/libcfs.h>
-#include <linux/libcfs/libcfs_crypto.h>
-#include "linux-crypto.h"
-
-/**
- * Array of hash algorithm speed in MByte per second
- */
-static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX];
-
-/**
- * Initialize the state descriptor for the specified hash algorithm.
- *
- * An internal routine to allocate the hash-specific state in \a req for
- * use with cfs_crypto_hash_digest() to compute the hash of a single message,
- * though possibly in multiple chunks. The descriptor internal state should
- * be freed with cfs_crypto_hash_final().
- *
- * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
- * \param[out] type pointer to the hash description in hash_types[]
- * array
- * \param[in,out] req hash state descriptor to be initialized
- * \param[in] key initial hash value/state, NULL to use default
- * value
- * \param[in] key_len length of \a key
- *
- * \retval 0 on success
- * \retval negative errno on failure
- */
-static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
- const struct cfs_crypto_hash_type **type,
- struct ahash_request **req,
- unsigned char *key,
- unsigned int key_len)
-{
- struct crypto_ahash *tfm;
- int err = 0;
-
- *type = cfs_crypto_hash_type(hash_alg);
-
- if (!*type) {
- CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
- hash_alg, CFS_HASH_ALG_MAX);
- return -EINVAL;
- }
- tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC);
-
- if (IS_ERR(tfm)) {
- CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n",
- (*type)->cht_name);
- return PTR_ERR(tfm);
- }
-
- *req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!*req) {
- CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n",
- (*type)->cht_name);
- crypto_free_ahash(tfm);
- return -ENOMEM;
- }
-
- ahash_request_set_callback(*req, 0, NULL, NULL);
-
- if (key)
- err = crypto_ahash_setkey(tfm, key, key_len);
- else if ((*type)->cht_key)
- err = crypto_ahash_setkey(tfm,
- (unsigned char *)&((*type)->cht_key),
- (*type)->cht_size);
-
- if (err) {
- ahash_request_free(*req);
- crypto_free_ahash(tfm);
- return err;
- }
-
- CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
- crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
- cfs_crypto_hash_speeds[hash_alg]);
-
- err = crypto_ahash_init(*req);
- if (err) {
- ahash_request_free(*req);
- crypto_free_ahash(tfm);
- }
- return err;
-}
-
-/**
- * Calculate hash digest for the passed buffer.
- *
- * This should be used when computing the hash on a single contiguous buffer.
- * It combines the hash initialization, computation, and cleanup.
- *
- * \param[in] hash_alg id of hash algorithm (CFS_HASH_ALG_*)
- * \param[in] buf data buffer on which to compute hash
- * \param[in] buf_len length of \a buf in bytes
- * \param[in] key initial value/state for algorithm,
- * if \a key = NULL use default initial value
- * \param[in] key_len length of \a key in bytes
- * \param[out] hash pointer to computed hash value,
- * if \a hash = NULL then \a hash_len is to digest
- * size in bytes, retval -ENOSPC
- * \param[in,out] hash_len size of \a hash buffer
- *
- * \retval -EINVAL \a buf, \a buf_len, \a hash_len,
- * \a hash_alg invalid
- * \retval -ENOENT \a hash_alg is unsupported
- * \retval -ENOSPC \a hash is NULL, or \a hash_len less than
- * digest size
- * \retval 0 for success
- * \retval negative errno for other errors from lower
- * layers.
- */
-int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
- const void *buf, unsigned int buf_len,
- unsigned char *key, unsigned int key_len,
- unsigned char *hash, unsigned int *hash_len)
-{
- struct scatterlist sl;
- struct ahash_request *req;
- int err;
- const struct cfs_crypto_hash_type *type;
-
- if (!buf || !buf_len || !hash_len)
- return -EINVAL;
-
- err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
- if (err)
- return err;
-
- if (!hash || *hash_len < type->cht_size) {
- *hash_len = type->cht_size;
- crypto_free_ahash(crypto_ahash_reqtfm(req));
- ahash_request_free(req);
- return -ENOSPC;
- }
- sg_init_one(&sl, buf, buf_len);
-
- ahash_request_set_crypt(req, &sl, hash, sl.length);
- err = crypto_ahash_digest(req);
- crypto_free_ahash(crypto_ahash_reqtfm(req));
- ahash_request_free(req);
-
- return err;
-}
-EXPORT_SYMBOL(cfs_crypto_hash_digest);
-
-/**
- * Allocate and initialize descriptor for hash algorithm.
- *
- * This should be used to initialize a hash descriptor for multiple calls
- * to a single hash function when computing the hash across multiple
- * separate buffers or pages using cfs_crypto_hash_update{,_page}().
- *
- * The hash descriptor should be freed with cfs_crypto_hash_final().
- *
- * \param[in] hash_alg algorithm id (CFS_HASH_ALG_*)
- * \param[in] key initial value/state for algorithm, if \a key = NULL
- * use default initial value
- * \param[in] key_len length of \a key in bytes
- *
- * \retval pointer to descriptor of hash instance
- * \retval ERR_PTR(errno) in case of error
- */
-struct ahash_request *
-cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
- unsigned char *key, unsigned int key_len)
-{
- struct ahash_request *req;
- int err;
- const struct cfs_crypto_hash_type *type;
-
- err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
-
- if (err)
- return ERR_PTR(err);
- return req;
-}
-EXPORT_SYMBOL(cfs_crypto_hash_init);
-
-/**
- * Update hash digest computed on data within the given \a page
- *
- * \param[in] hreq hash state descriptor
- * \param[in] page data page on which to compute the hash
- * \param[in] offset offset within \a page at which to start hash
- * \param[in] len length of data on which to compute hash
- *
- * \retval 0 for success
- * \retval negative errno on failure
- */
-int cfs_crypto_hash_update_page(struct ahash_request *req,
- struct page *page, unsigned int offset,
- unsigned int len)
-{
- struct scatterlist sl;
-
- sg_init_table(&sl, 1);
- sg_set_page(&sl, page, len, offset & ~PAGE_MASK);
-
- ahash_request_set_crypt(req, &sl, NULL, sl.length);
- return crypto_ahash_update(req);
-}
-EXPORT_SYMBOL(cfs_crypto_hash_update_page);
-
-/**
- * Update hash digest computed on the specified data
- *
- * \param[in] req hash state descriptor
- * \param[in] buf data buffer on which to compute the hash
- * \param[in] buf_len length of \buf on which to compute hash
- *
- * \retval 0 for success
- * \retval negative errno on failure
- */
-int cfs_crypto_hash_update(struct ahash_request *req,
- const void *buf, unsigned int buf_len)
-{
- struct scatterlist sl;
-
- sg_init_one(&sl, buf, buf_len);
-
- ahash_request_set_crypt(req, &sl, NULL, sl.length);
- return crypto_ahash_update(req);
-}
-EXPORT_SYMBOL(cfs_crypto_hash_update);
-
-/**
- * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor
- *
- * \param[in] req hash descriptor
- * \param[out] hash pointer to hash buffer to store hash digest
- * \param[in,out] hash_len pointer to hash buffer size, if \a req = NULL
- * only free \a req instead of computing the hash
- *
- * \retval 0 for success
- * \retval -EOVERFLOW if hash_len is too small for the hash digest
- * \retval negative errno for other errors from lower layers
- */
-int cfs_crypto_hash_final(struct ahash_request *req,
- unsigned char *hash, unsigned int *hash_len)
-{
- int err;
- int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
-
- if (!hash || !hash_len) {
- err = 0;
- goto free_ahash;
- }
- if (*hash_len < size) {
- err = -EOVERFLOW;
- goto free_ahash;
- }
-
- ahash_request_set_crypt(req, NULL, hash, 0);
- err = crypto_ahash_final(req);
- if (!err)
- *hash_len = size;
-free_ahash:
- crypto_free_ahash(crypto_ahash_reqtfm(req));
- ahash_request_free(req);
- return err;
-}
-EXPORT_SYMBOL(cfs_crypto_hash_final);
-
-/**
- * Compute the speed of specified hash function
- *
- * Run a speed test on the given hash algorithm on buffer of the given size.
- * The speed is stored internally in the cfs_crypto_hash_speeds[] array, and
- * is available through the cfs_crypto_hash_speed() function.
- *
- * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
- * \param[in] buf data buffer on which to compute the hash
- * \param[in] buf_len length of \buf on which to compute hash
- */
-static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
-{
- int buf_len = max(PAGE_SIZE, 1048576UL);
- void *buf;
- unsigned long start, end;
- int bcount, err = 0;
- struct page *page;
- unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
- unsigned int hash_len = sizeof(hash);
-
- page = alloc_page(GFP_KERNEL);
- if (!page) {
- err = -ENOMEM;
- goto out_err;
- }
-
- buf = kmap(page);
- memset(buf, 0xAD, PAGE_SIZE);
- kunmap(page);
-
- for (start = jiffies, end = start + msecs_to_jiffies(MSEC_PER_SEC),
- bcount = 0; time_before(jiffies, end); bcount++) {
- struct ahash_request *hdesc;
- int i;
-
- hdesc = cfs_crypto_hash_init(hash_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
- err = PTR_ERR(hdesc);
- break;
- }
-
- for (i = 0; i < buf_len / PAGE_SIZE; i++) {
- err = cfs_crypto_hash_update_page(hdesc, page, 0,
- PAGE_SIZE);
- if (err)
- break;
- }
-
- err = cfs_crypto_hash_final(hdesc, hash, &hash_len);
- if (err)
- break;
- }
- end = jiffies;
- __free_page(page);
-out_err:
- if (err) {
- cfs_crypto_hash_speeds[hash_alg] = err;
- CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
- cfs_crypto_hash_name(hash_alg), err);
- } else {
- unsigned long tmp;
-
- tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
- 1000) / (1024 * 1024);
- cfs_crypto_hash_speeds[hash_alg] = (int)tmp;
- CDEBUG(D_CONFIG, "Crypto hash algorithm %s speed = %d MB/s\n",
- cfs_crypto_hash_name(hash_alg),
- cfs_crypto_hash_speeds[hash_alg]);
- }
-}
-
-/**
- * hash speed in Mbytes per second for valid hash algorithm
- *
- * Return the performance of the specified \a hash_alg that was previously
- * computed using cfs_crypto_performance_test().
- *
- * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
- *
- * \retval positive speed of the hash function in MB/s
- * \retval -ENOENT if \a hash_alg is unsupported
- * \retval negative errno if \a hash_alg speed is unavailable
- */
-int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg)
-{
- if (hash_alg < CFS_HASH_ALG_MAX)
- return cfs_crypto_hash_speeds[hash_alg];
- return -ENOENT;
-}
-EXPORT_SYMBOL(cfs_crypto_hash_speed);
-
-/**
- * Run the performance test for all hash algorithms.
- *
- * Run the cfs_crypto_performance_test() benchmark for all of the available
- * hash functions using a 1MB buffer size. This is a reasonable buffer size
- * for Lustre RPCs, even if the actual RPC size is larger or smaller.
- *
- * Since the setup cost and computation speed of various hash algorithms is
- * a function of the buffer size (and possibly internal contention of offload
- * engines), this speed only represents an estimate of the actual speed under
- * actual usage, but is reasonable for comparing available algorithms.
- *
- * The actual speeds are available via cfs_crypto_hash_speed() for later
- * comparison.
- *
- * \retval 0 on success
- * \retval -ENOMEM if no memory is available for test buffer
- */
-static int cfs_crypto_test_hashes(void)
-{
- enum cfs_crypto_hash_alg hash_alg;
-
- for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
- cfs_crypto_performance_test(hash_alg);
-
- return 0;
-}
-
-static int adler32;
-
-/**
- * Register available hash functions
- *
- * \retval 0
- */
-int cfs_crypto_register(void)
-{
- request_module("crc32c");
-
- adler32 = cfs_crypto_adler32_register();
-
- /* check all algorithms and do performance test */
- cfs_crypto_test_hashes();
- return 0;
-}
-
-/**
- * Unregister previously registered hash functions
- */
-void cfs_crypto_unregister(void)
-{
- if (!adler32)
- cfs_crypto_adler32_unregister();
-}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
deleted file mode 100644
index 5616e9ea1450..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/**
- * Functions for start/stop shash adler32 algorithm.
- */
-int cfs_crypto_adler32_register(void);
-void cfs_crypto_adler32_unregister(void);
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
deleted file mode 100644
index 0092166af258..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
+++ /dev/null
@@ -1,145 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/linux/linux-debug.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/notifier.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/miscdevice.h>
-
-# define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-
-#include "../tracefile.h"
-
-#include <linux/kallsyms.h>
-
-char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
-
-/**
- * Upcall function once a Lustre log has been dumped.
- *
- * \param file path of the dumped log
- */
-void libcfs_run_debug_log_upcall(char *file)
-{
- char *argv[3];
- int rc;
- static const char * const envp[] = {
- "HOME=/",
- "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
- NULL
- };
-
- argv[0] = lnet_debug_log_upcall;
-
- LASSERTF(file, "called on a null filename\n");
- argv[1] = file; /* only need to pass the path of the file */
-
- argv[2] = NULL;
-
- rc = call_usermodehelper(argv[0], argv, (char **)envp, 1);
- if (rc < 0 && rc != -ENOENT) {
- CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n",
- rc, argv[0], argv[1]);
- } else {
- CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n",
- argv[0], argv[1]);
- }
-}
-
-/* coverity[+kill] */
-void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
-{
- libcfs_catastrophe = 1;
- libcfs_debug_msg(msgdata, "LBUG\n");
-
- if (in_interrupt()) {
- panic("LBUG in interrupt.\n");
- /* not reached */
- }
-
- dump_stack();
- if (!libcfs_panic_on_lbug)
- libcfs_debug_dumplog();
- if (libcfs_panic_on_lbug)
- panic("LBUG");
- set_current_state(TASK_UNINTERRUPTIBLE);
- while (1)
- schedule();
-}
-EXPORT_SYMBOL(lbug_with_loc);
-
-static int panic_notifier(struct notifier_block *self, unsigned long unused1,
- void *unused2)
-{
- if (libcfs_panic_in_progress)
- return 0;
-
- libcfs_panic_in_progress = 1;
- mb();
-
- return 0;
-}
-
-static struct notifier_block libcfs_panic_notifier = {
- .notifier_call = panic_notifier,
- .next = NULL,
- .priority = 10000,
-};
-
-void libcfs_register_panic_notifier(void)
-{
- atomic_notifier_chain_register(&panic_notifier_list,
- &libcfs_panic_notifier);
-}
-
-void libcfs_unregister_panic_notifier(void)
-{
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &libcfs_panic_notifier);
-}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
deleted file mode 100644
index ddf625669bff..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ /dev/null
@@ -1,197 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-
-#define LNET_MINOR 240
-
-static inline size_t libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
-{
- size_t len = sizeof(*data);
-
- len += cfs_size_round(data->ioc_inllen1);
- len += cfs_size_round(data->ioc_inllen2);
- return len;
-}
-
-static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
-{
- if (data->ioc_hdr.ioc_len > BIT(30)) {
- CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
- return true;
- }
- if (data->ioc_inllen1 > BIT(30)) {
- CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n");
- return true;
- }
- if (data->ioc_inllen2 > BIT(30)) {
- CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n");
- return true;
- }
- if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
- CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n");
- return true;
- }
- if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
- CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n");
- return true;
- }
- if (data->ioc_pbuf1 && !data->ioc_plen1) {
- CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n");
- return true;
- }
- if (data->ioc_pbuf2 && !data->ioc_plen2) {
- CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n");
- return true;
- }
- if (data->ioc_plen1 && !data->ioc_pbuf1) {
- CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n");
- return true;
- }
- if (data->ioc_plen2 && !data->ioc_pbuf2) {
- CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
- return true;
- }
- if ((u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
- CERROR("LIBCFS ioctl: packlen != ioc_len\n");
- return true;
- }
- if (data->ioc_inllen1 &&
- data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') {
- CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n");
- return true;
- }
- if (data->ioc_inllen2 &&
- data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
- data->ioc_inllen2 - 1] != '\0') {
- CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
- return true;
- }
- return false;
-}
-
-int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data)
-{
- if (libcfs_ioctl_is_invalid(data)) {
- CERROR("libcfs ioctl: parameter not correctly formatted\n");
- return -EINVAL;
- }
-
- if (data->ioc_inllen1)
- data->ioc_inlbuf1 = &data->ioc_bulk[0];
-
- if (data->ioc_inllen2)
- data->ioc_inlbuf2 = &data->ioc_bulk[0] +
- cfs_size_round(data->ioc_inllen1);
-
- return 0;
-}
-
-int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
- const struct libcfs_ioctl_hdr __user *uhdr)
-{
- struct libcfs_ioctl_hdr hdr;
- int err;
-
- if (copy_from_user(&hdr, uhdr, sizeof(hdr)))
- return -EFAULT;
-
- if (hdr.ioc_version != LIBCFS_IOCTL_VERSION &&
- hdr.ioc_version != LIBCFS_IOCTL_VERSION2) {
- CERROR("libcfs ioctl: version mismatch expected %#x, got %#x\n",
- LIBCFS_IOCTL_VERSION, hdr.ioc_version);
- return -EINVAL;
- }
-
- if (hdr.ioc_len < sizeof(hdr)) {
- CERROR("libcfs ioctl: user buffer too small for ioctl\n");
- return -EINVAL;
- }
-
- if (hdr.ioc_len > LIBCFS_IOC_DATA_MAX) {
- CERROR("libcfs ioctl: user buffer is too large %d/%d\n",
- hdr.ioc_len, LIBCFS_IOC_DATA_MAX);
- return -EINVAL;
- }
-
- *hdr_pp = kvmalloc(hdr.ioc_len, GFP_KERNEL);
- if (!*hdr_pp)
- return -ENOMEM;
-
- if (copy_from_user(*hdr_pp, uhdr, hdr.ioc_len)) {
- err = -EFAULT;
- goto free;
- }
-
- if ((*hdr_pp)->ioc_version != hdr.ioc_version ||
- (*hdr_pp)->ioc_len != hdr.ioc_len) {
- err = -EINVAL;
- goto free;
- }
-
- return 0;
-
-free:
- kvfree(*hdr_pp);
- return err;
-}
-
-static long
-libcfs_psdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (_IOC_TYPE(cmd) != IOC_LIBCFS_TYPE ||
- _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR ||
- _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) {
- CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n",
- _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
- return -EINVAL;
- }
-
- return libcfs_ioctl(cmd, (void __user *)arg);
-}
-
-static const struct file_operations libcfs_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = libcfs_psdev_ioctl,
-};
-
-struct miscdevice libcfs_dev = {
- .minor = LNET_MINOR,
- .name = "lnet",
- .fops = &libcfs_fops,
-};
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
deleted file mode 100644
index 7928d7182634..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
+++ /dev/null
@@ -1,257 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#define LUSTRE_TRACEFILE_PRIVATE
-
-#include <linux/libcfs/libcfs.h>
-#include "../tracefile.h"
-
-/* percents to share the total debug memory for each type */
-static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
- 80, /* 80% pages for CFS_TCD_TYPE_PROC */
- 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
- 10 /* 10% pages for CFS_TCD_TYPE_IRQ */
-};
-
-char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-
-static DECLARE_RWSEM(cfs_tracefile_sem);
-
-int cfs_tracefile_init_arch(void)
-{
- int i;
- int j;
- struct cfs_trace_cpu_data *tcd;
-
- /* initialize trace_data */
- memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
- for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
- cfs_trace_data[i] =
- kmalloc_array(num_possible_cpus(),
- sizeof(union cfs_trace_data_union),
- GFP_KERNEL);
- if (!cfs_trace_data[i])
- goto out;
- }
-
- /* arch related info initialized */
- cfs_tcd_for_each(tcd, i, j) {
- spin_lock_init(&tcd->tcd_lock);
- tcd->tcd_pages_factor = pages_factor[i];
- tcd->tcd_type = i;
- tcd->tcd_cpu = j;
- }
-
- for (i = 0; i < num_possible_cpus(); i++)
- for (j = 0; j < 3; j++) {
- cfs_trace_console_buffers[i][j] =
- kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
- GFP_KERNEL);
-
- if (!cfs_trace_console_buffers[i][j])
- goto out;
- }
-
- return 0;
-
-out:
- cfs_tracefile_fini_arch();
- pr_err("lnet: Not enough memory\n");
- return -ENOMEM;
-}
-
-void cfs_tracefile_fini_arch(void)
-{
- int i;
- int j;
-
- for (i = 0; i < num_possible_cpus(); i++)
- for (j = 0; j < 3; j++) {
- kfree(cfs_trace_console_buffers[i][j]);
- cfs_trace_console_buffers[i][j] = NULL;
- }
-
- for (i = 0; cfs_trace_data[i]; i++) {
- kfree(cfs_trace_data[i]);
- cfs_trace_data[i] = NULL;
- }
-}
-
-void cfs_tracefile_read_lock(void)
-{
- down_read(&cfs_tracefile_sem);
-}
-
-void cfs_tracefile_read_unlock(void)
-{
- up_read(&cfs_tracefile_sem);
-}
-
-void cfs_tracefile_write_lock(void)
-{
- down_write(&cfs_tracefile_sem);
-}
-
-void cfs_tracefile_write_unlock(void)
-{
- up_write(&cfs_tracefile_sem);
-}
-
-enum cfs_trace_buf_type cfs_trace_buf_idx_get(void)
-{
- if (in_irq())
- return CFS_TCD_TYPE_IRQ;
- if (in_softirq())
- return CFS_TCD_TYPE_SOFTIRQ;
- return CFS_TCD_TYPE_PROC;
-}
-
-/*
- * The walking argument indicates the locking comes from all tcd types
- * iterator and we must lock it and dissable local irqs to avoid deadlocks
- * with other interrupt locks that might be happening. See LU-1311
- * for details.
- */
-int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
- __acquires(&tcd->tc_lock)
-{
- __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
- if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
- spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
- spin_lock_bh(&tcd->tcd_lock);
- else if (unlikely(walking))
- spin_lock_irq(&tcd->tcd_lock);
- else
- spin_lock(&tcd->tcd_lock);
- return 1;
-}
-
-void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
- __releases(&tcd->tcd_lock)
-{
- __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
- if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
- spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
- spin_unlock_bh(&tcd->tcd_lock);
- else if (unlikely(walking))
- spin_unlock_irq(&tcd->tcd_lock);
- else
- spin_unlock(&tcd->tcd_lock);
-}
-
-void
-cfs_set_ptldebug_header(struct ptldebug_header *header,
- struct libcfs_debug_msg_data *msgdata,
- unsigned long stack)
-{
- struct timespec64 ts;
-
- ktime_get_real_ts64(&ts);
-
- header->ph_subsys = msgdata->msg_subsys;
- header->ph_mask = msgdata->msg_mask;
- header->ph_cpu_id = smp_processor_id();
- header->ph_type = cfs_trace_buf_idx_get();
- /* y2038 safe since all user space treats this as unsigned, but
- * will overflow in 2106
- */
- header->ph_sec = (u32)ts.tv_sec;
- header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
- header->ph_stack = stack;
- header->ph_pid = current->pid;
- header->ph_line_num = msgdata->msg_line;
- header->ph_extern_pid = 0;
-}
-
-static char *
-dbghdr_to_err_string(struct ptldebug_header *hdr)
-{
- switch (hdr->ph_subsys) {
- case S_LND:
- case S_LNET:
- return "LNetError";
- default:
- return "LustreError";
- }
-}
-
-static char *
-dbghdr_to_info_string(struct ptldebug_header *hdr)
-{
- switch (hdr->ph_subsys) {
- case S_LND:
- case S_LNET:
- return "LNet";
- default:
- return "Lustre";
- }
-}
-
-void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
- const char *buf, int len, const char *file,
- const char *fn)
-{
- char *prefix = "Lustre", *ptype = NULL;
-
- if (mask & D_EMERG) {
- prefix = dbghdr_to_err_string(hdr);
- ptype = KERN_EMERG;
- } else if (mask & D_ERROR) {
- prefix = dbghdr_to_err_string(hdr);
- ptype = KERN_ERR;
- } else if (mask & D_WARNING) {
- prefix = dbghdr_to_info_string(hdr);
- ptype = KERN_WARNING;
- } else if (mask & (D_CONSOLE | libcfs_printk)) {
- prefix = dbghdr_to_info_string(hdr);
- ptype = KERN_INFO;
- }
-
- if (mask & D_CONSOLE) {
- pr_info("%s%s: %.*s", ptype, prefix, len, buf);
- } else {
- pr_info("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
- hdr->ph_pid, hdr->ph_extern_pid, file,
- hdr->ph_line_num, fn, len, buf);
- }
-}
-
-int cfs_trace_max_debug_mb(void)
-{
- int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
-
- return max(512, (total_mb * 80) / 100);
-}
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
deleted file mode 100644
index a03f924f1d7c..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ /dev/null
@@ -1,604 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <net/sock.h>
-#include <linux/uio.h>
-
-#include <linux/uaccess.h>
-
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/list.h>
-
-#include <linux/sysctl.h>
-#include <linux/debugfs.h>
-
-# define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-#include <asm/div64.h>
-
-#include <linux/libcfs/libcfs_crypto.h>
-#include <linux/lnet/lib-lnet.h>
-#include <uapi/linux/lnet/lnet-dlc.h>
-#include "tracefile.h"
-
-static struct dentry *lnet_debugfs_root;
-
-static DECLARE_RWSEM(ioctl_list_sem);
-static LIST_HEAD(ioctl_list);
-
-int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand)
-{
- int rc = 0;
-
- down_write(&ioctl_list_sem);
- if (!list_empty(&hand->item))
- rc = -EBUSY;
- else
- list_add_tail(&hand->item, &ioctl_list);
- up_write(&ioctl_list_sem);
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_register_ioctl);
-
-int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
-{
- int rc = 0;
-
- down_write(&ioctl_list_sem);
- if (list_empty(&hand->item))
- rc = -ENOENT;
- else
- list_del_init(&hand->item);
- up_write(&ioctl_list_sem);
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_deregister_ioctl);
-
-int libcfs_ioctl(unsigned long cmd, void __user *uparam)
-{
- struct libcfs_ioctl_data *data = NULL;
- struct libcfs_ioctl_hdr *hdr;
- int err;
-
- /* 'cmd' and permissions get checked in our arch-specific caller */
- err = libcfs_ioctl_getdata(&hdr, uparam);
- if (err) {
- CDEBUG_LIMIT(D_ERROR,
- "libcfs ioctl: data header error %d\n", err);
- return err;
- }
-
- if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) {
- /*
- * The libcfs_ioctl_data_adjust() function performs adjustment
- * operations on the libcfs_ioctl_data structure to make
- * it usable by the code. This doesn't need to be called
- * for new data structures added.
- */
- data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
- err = libcfs_ioctl_data_adjust(data);
- if (err)
- goto out;
- }
-
- CDEBUG(D_IOCTL, "libcfs ioctl cmd %lu\n", cmd);
- switch (cmd) {
- case IOC_LIBCFS_CLEAR_DEBUG:
- libcfs_debug_clear_buffer();
- break;
-
- case IOC_LIBCFS_MARK_DEBUG:
- if (!data || !data->ioc_inlbuf1 ||
- data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') {
- err = -EINVAL;
- goto out;
- }
- libcfs_debug_mark_buffer(data->ioc_inlbuf1);
- break;
-
- default: {
- struct libcfs_ioctl_handler *hand;
-
- err = -EINVAL;
- down_read(&ioctl_list_sem);
- list_for_each_entry(hand, &ioctl_list, item) {
- err = hand->handle_ioctl(cmd, hdr);
- if (err == -EINVAL)
- continue;
-
- if (!err) {
- if (copy_to_user(uparam, hdr, hdr->ioc_len))
- err = -EFAULT;
- }
- break;
- }
- up_read(&ioctl_list_sem);
- break; }
- }
-out:
- kvfree(hdr);
- return err;
-}
-
-int lprocfs_call_handler(void *data, int write, loff_t *ppos,
- void __user *buffer, size_t *lenp,
- int (*handler)(void *data, int write, loff_t pos,
- void __user *buffer, int len))
-{
- int rc = handler(data, write, *ppos, buffer, *lenp);
-
- if (rc < 0)
- return rc;
-
- if (write) {
- *ppos += *lenp;
- } else {
- *lenp = rc;
- *ppos += rc;
- }
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_call_handler);
-
-static int __proc_dobitmasks(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- const int tmpstrlen = 512;
- char *tmpstr;
- int rc;
- unsigned int *mask = data;
- int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
- int is_printk = (mask == &libcfs_printk) ? 1 : 0;
-
- rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen);
- if (rc < 0)
- return rc;
-
- if (!write) {
- libcfs_debug_mask2str(tmpstr, tmpstrlen, *mask, is_subsys);
- rc = strlen(tmpstr);
-
- if (pos >= rc) {
- rc = 0;
- } else {
- rc = cfs_trace_copyout_string(buffer, nob,
- tmpstr + pos, "\n");
- }
- } else {
- rc = cfs_trace_copyin_string(tmpstr, tmpstrlen, buffer, nob);
- if (rc < 0) {
- kfree(tmpstr);
- return rc;
- }
-
- rc = libcfs_debug_str2mask(mask, tmpstr, is_subsys);
- /* Always print LBUG/LASSERT to console, so keep this mask */
- if (is_printk)
- *mask |= D_EMERG;
- }
-
- kfree(tmpstr);
- return rc;
-}
-
-static int proc_dobitmasks(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_dobitmasks);
-}
-
-static int __proc_dump_kernel(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- if (!write)
- return 0;
-
- return cfs_trace_dump_debug_buffer_usrstr(buffer, nob);
-}
-
-static int proc_dump_kernel(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_dump_kernel);
-}
-
-static int __proc_daemon_file(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- if (!write) {
- int len = strlen(cfs_tracefile);
-
- if (pos >= len)
- return 0;
-
- return cfs_trace_copyout_string(buffer, nob,
- cfs_tracefile + pos, "\n");
- }
-
- return cfs_trace_daemon_command_usrstr(buffer, nob);
-}
-
-static int proc_daemon_file(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_daemon_file);
-}
-
-static int libcfs_force_lbug(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
-{
- if (write)
- LBUG();
- return 0;
-}
-
-static int proc_fail_loc(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
-{
- int rc;
- long old_fail_loc = cfs_fail_loc;
-
- rc = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
- if (old_fail_loc != cfs_fail_loc)
- wake_up(&cfs_race_waitq);
- return rc;
-}
-
-static int __proc_cpt_table(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- char *buf = NULL;
- int len = 4096;
- int rc = 0;
-
- if (write)
- return -EPERM;
-
- LASSERT(cfs_cpt_table);
-
- while (1) {
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rc = cfs_cpt_table_print(cfs_cpt_table, buf, len);
- if (rc >= 0)
- break;
-
- if (rc == -EFBIG) {
- kfree(buf);
- len <<= 1;
- continue;
- }
- goto out;
- }
-
- if (pos >= rc) {
- rc = 0;
- goto out;
- }
-
- rc = cfs_trace_copyout_string(buffer, nob, buf + pos, NULL);
- out:
- kfree(buf);
- return rc;
-}
-
-static int proc_cpt_table(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_cpt_table);
-}
-
-static struct ctl_table lnet_table[] = {
- {
- .procname = "debug",
- .data = &libcfs_debug,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks,
- },
- {
- .procname = "subsystem_debug",
- .data = &libcfs_subsystem_debug,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks,
- },
- {
- .procname = "printk",
- .data = &libcfs_printk,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks,
- },
- {
- .procname = "cpu_partition_table",
- .maxlen = 128,
- .mode = 0444,
- .proc_handler = &proc_cpt_table,
- },
- {
- .procname = "debug_log_upcall",
- .data = lnet_debug_log_upcall,
- .maxlen = sizeof(lnet_debug_log_upcall),
- .mode = 0644,
- .proc_handler = &proc_dostring,
- },
- {
- .procname = "catastrophe",
- .data = &libcfs_catastrophe,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "dump_kernel",
- .maxlen = 256,
- .mode = 0200,
- .proc_handler = &proc_dump_kernel,
- },
- {
- .procname = "daemon_file",
- .mode = 0644,
- .maxlen = 256,
- .proc_handler = &proc_daemon_file,
- },
- {
- .procname = "force_lbug",
- .data = NULL,
- .maxlen = 0,
- .mode = 0200,
- .proc_handler = &libcfs_force_lbug
- },
- {
- .procname = "fail_loc",
- .data = &cfs_fail_loc,
- .maxlen = sizeof(cfs_fail_loc),
- .mode = 0644,
- .proc_handler = &proc_fail_loc
- },
- {
- .procname = "fail_val",
- .data = &cfs_fail_val,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .procname = "fail_err",
- .data = &cfs_fail_err,
- .maxlen = sizeof(cfs_fail_err),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- }
-};
-
-static const struct lnet_debugfs_symlink_def lnet_debugfs_symlinks[] = {
- { "console_ratelimit",
- "/sys/module/libcfs/parameters/libcfs_console_ratelimit"},
- { "debug_path",
- "/sys/module/libcfs/parameters/libcfs_debug_file_path"},
- { "panic_on_lbug",
- "/sys/module/libcfs/parameters/libcfs_panic_on_lbug"},
- { "libcfs_console_backoff",
- "/sys/module/libcfs/parameters/libcfs_console_backoff"},
- { "debug_mb",
- "/sys/module/libcfs/parameters/libcfs_debug_mb"},
- { "console_min_delay_centisecs",
- "/sys/module/libcfs/parameters/libcfs_console_min_delay"},
- { "console_max_delay_centisecs",
- "/sys/module/libcfs/parameters/libcfs_console_max_delay"},
- {},
-};
-
-static ssize_t lnet_debugfs_read(struct file *filp, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ctl_table *table = filp->private_data;
- int error;
-
- error = table->proc_handler(table, 0, (void __user *)buf, &count, ppos);
- if (!error)
- error = count;
-
- return error;
-}
-
-static ssize_t lnet_debugfs_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ctl_table *table = filp->private_data;
- int error;
-
- error = table->proc_handler(table, 1, (void __user *)buf, &count, ppos);
- if (!error)
- error = count;
-
- return error;
-}
-
-static const struct file_operations lnet_debugfs_file_operations_rw = {
- .open = simple_open,
- .read = lnet_debugfs_read,
- .write = lnet_debugfs_write,
- .llseek = default_llseek,
-};
-
-static const struct file_operations lnet_debugfs_file_operations_ro = {
- .open = simple_open,
- .read = lnet_debugfs_read,
- .llseek = default_llseek,
-};
-
-static const struct file_operations lnet_debugfs_file_operations_wo = {
- .open = simple_open,
- .write = lnet_debugfs_write,
- .llseek = default_llseek,
-};
-
-static const struct file_operations *lnet_debugfs_fops_select(umode_t mode)
-{
- if (!(mode & 0222))
- return &lnet_debugfs_file_operations_ro;
-
- if (!(mode & 0444))
- return &lnet_debugfs_file_operations_wo;
-
- return &lnet_debugfs_file_operations_rw;
-}
-
-void lustre_insert_debugfs(struct ctl_table *table,
- const struct lnet_debugfs_symlink_def *symlinks)
-{
- if (!lnet_debugfs_root)
- lnet_debugfs_root = debugfs_create_dir("lnet", NULL);
-
- /* Even if we cannot create, just ignore it altogether) */
- if (IS_ERR_OR_NULL(lnet_debugfs_root))
- return;
-
- /* We don't save the dentry returned in next two calls, because
- * we don't call debugfs_remove() but rather remove_recursive()
- */
- for (; table->procname; table++)
- debugfs_create_file(table->procname, table->mode,
- lnet_debugfs_root, table,
- lnet_debugfs_fops_select(table->mode));
-
- for (; symlinks && symlinks->name; symlinks++)
- debugfs_create_symlink(symlinks->name, lnet_debugfs_root,
- symlinks->target);
-}
-EXPORT_SYMBOL_GPL(lustre_insert_debugfs);
-
-static void lustre_remove_debugfs(void)
-{
- debugfs_remove_recursive(lnet_debugfs_root);
-
- lnet_debugfs_root = NULL;
-}
-
-static int libcfs_init(void)
-{
- int rc;
-
- rc = libcfs_debug_init(5 * 1024 * 1024);
- if (rc < 0) {
- pr_err("LustreError: libcfs_debug_init: %d\n", rc);
- return rc;
- }
-
- rc = cfs_cpu_init();
- if (rc)
- goto cleanup_debug;
-
- rc = misc_register(&libcfs_dev);
- if (rc) {
- CERROR("misc_register: error %d\n", rc);
- goto cleanup_cpu;
- }
-
- cfs_rehash_wq = alloc_workqueue("cfs_rh", WQ_SYSFS, 4);
- if (!cfs_rehash_wq) {
- CERROR("Failed to start rehash workqueue.\n");
- rc = -ENOMEM;
- goto cleanup_deregister;
- }
-
- rc = cfs_crypto_register();
- if (rc) {
- CERROR("cfs_crypto_register: error %d\n", rc);
- goto cleanup_deregister;
- }
-
- lustre_insert_debugfs(lnet_table, lnet_debugfs_symlinks);
-
- CDEBUG(D_OTHER, "portals setup OK\n");
- return 0;
- cleanup_deregister:
- misc_deregister(&libcfs_dev);
-cleanup_cpu:
- cfs_cpu_fini();
- cleanup_debug:
- libcfs_debug_cleanup();
- return rc;
-}
-
-static void libcfs_exit(void)
-{
- int rc;
-
- lustre_remove_debugfs();
-
- if (cfs_rehash_wq) {
- destroy_workqueue(cfs_rehash_wq);
- cfs_rehash_wq = NULL;
- }
-
- cfs_crypto_unregister();
-
- misc_deregister(&libcfs_dev);
-
- cfs_cpu_fini();
-
- rc = libcfs_debug_cleanup();
- if (rc)
- pr_err("LustreError: libcfs_debug_cleanup: %d\n", rc);
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre helper library");
-MODULE_VERSION(LIBCFS_VERSION);
-MODULE_LICENSE("GPL");
-
-module_init(libcfs_init);
-module_exit(libcfs_exit);
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
deleted file mode 100644
index 4affca750bc5..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ /dev/null
@@ -1,1191 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/tracefile.c
- *
- * Author: Zach Brown <zab@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#define LUSTRE_TRACEFILE_PRIVATE
-#define pr_fmt(fmt) "Lustre: " fmt
-#include "tracefile.h"
-
-#include <linux/libcfs/libcfs.h>
-
-/* XXX move things up to the top, comment */
-union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
-
-char cfs_tracefile[TRACEFILE_NAME_SIZE];
-long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
-static struct tracefiled_ctl trace_tctl;
-static DEFINE_MUTEX(cfs_trace_thread_mutex);
-static int thread_running;
-
-static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
-
-struct page_collection {
- struct list_head pc_pages;
- /*
- * if this flag is set, collect_pages() will spill both
- * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
- * only ->tcd_pages are spilled.
- */
- int pc_want_daemon_pages;
-};
-
-struct tracefiled_ctl {
- struct completion tctl_start;
- struct completion tctl_stop;
- wait_queue_head_t tctl_waitq;
- pid_t tctl_pid;
- atomic_t tctl_shutdown;
-};
-
-/*
- * small data-structure for each page owned by tracefiled.
- */
-struct cfs_trace_page {
- /*
- * page itself
- */
- struct page *page;
- /*
- * linkage into one of the lists in trace_data_union or
- * page_collection
- */
- struct list_head linkage;
- /*
- * number of bytes used within this page
- */
- unsigned int used;
- /*
- * cpu that owns this page
- */
- unsigned short cpu;
- /*
- * type(context) of this page
- */
- unsigned short type;
-};
-
-static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
- struct cfs_trace_cpu_data *tcd);
-
-static inline struct cfs_trace_page *
-cfs_tage_from_list(struct list_head *list)
-{
- return list_entry(list, struct cfs_trace_page, linkage);
-}
-
-static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
-{
- struct page *page;
- struct cfs_trace_page *tage;
-
- /* My caller is trying to free memory */
- if (!in_interrupt() && memory_pressure_get())
- return NULL;
-
- /*
- * Don't spam console with allocation failures: they will be reported
- * by upper layer anyway.
- */
- gfp |= __GFP_NOWARN;
- page = alloc_page(gfp);
- if (!page)
- return NULL;
-
- tage = kmalloc(sizeof(*tage), gfp);
- if (!tage) {
- __free_page(page);
- return NULL;
- }
-
- tage->page = page;
- atomic_inc(&cfs_tage_allocated);
- return tage;
-}
-
-static void cfs_tage_free(struct cfs_trace_page *tage)
-{
- __free_page(tage->page);
- kfree(tage);
- atomic_dec(&cfs_tage_allocated);
-}
-
-static void cfs_tage_to_tail(struct cfs_trace_page *tage,
- struct list_head *queue)
-{
- list_move_tail(&tage->linkage, queue);
-}
-
-int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
- struct list_head *stock)
-{
- int i;
-
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
-
- for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++i) {
- struct cfs_trace_page *tage;
-
- tage = cfs_tage_alloc(gfp);
- if (!tage)
- break;
- list_add_tail(&tage->linkage, stock);
- }
- return i;
-}
-
-/* return a page that has 'len' bytes left at the end */
-static struct cfs_trace_page *
-cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
-{
- struct cfs_trace_page *tage;
-
- if (tcd->tcd_cur_pages > 0) {
- __LASSERT(!list_empty(&tcd->tcd_pages));
- tage = cfs_tage_from_list(tcd->tcd_pages.prev);
- if (tage->used + len <= PAGE_SIZE)
- return tage;
- }
-
- if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
- if (tcd->tcd_cur_stock_pages > 0) {
- tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
- --tcd->tcd_cur_stock_pages;
- list_del_init(&tage->linkage);
- } else {
- tage = cfs_tage_alloc(GFP_ATOMIC);
- if (unlikely(!tage)) {
- if (!memory_pressure_get() || in_interrupt())
- pr_warn_ratelimited("cannot allocate a tage (%ld)\n",
- tcd->tcd_cur_pages);
- return NULL;
- }
- }
-
- tage->used = 0;
- tage->cpu = smp_processor_id();
- tage->type = tcd->tcd_type;
- list_add_tail(&tage->linkage, &tcd->tcd_pages);
- tcd->tcd_cur_pages++;
-
- if (tcd->tcd_cur_pages > 8 && thread_running) {
- struct tracefiled_ctl *tctl = &trace_tctl;
- /*
- * wake up tracefiled to process some pages.
- */
- wake_up(&tctl->tctl_waitq);
- }
- return tage;
- }
- return NULL;
-}
-
-static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
-{
- int pgcount = tcd->tcd_cur_pages / 10;
- struct page_collection pc;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
-
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
-
- pr_warn_ratelimited("debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
- pgcount + 1, tcd->tcd_cur_pages);
-
- INIT_LIST_HEAD(&pc.pc_pages);
-
- list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
- if (!pgcount--)
- break;
-
- list_move_tail(&tage->linkage, &pc.pc_pages);
- tcd->tcd_cur_pages--;
- }
- put_pages_on_tcd_daemon_list(&pc, tcd);
-}
-
-/* return a page that has 'len' bytes left at the end */
-static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
- unsigned long len)
-{
- struct cfs_trace_page *tage;
-
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
-
- if (len > PAGE_SIZE) {
- pr_err("cowardly refusing to write %lu bytes in a page\n", len);
- return NULL;
- }
-
- tage = cfs_trace_get_tage_try(tcd, len);
- if (tage)
- return tage;
- if (thread_running)
- cfs_tcd_shrink(tcd);
- if (tcd->tcd_cur_pages > 0) {
- tage = cfs_tage_from_list(tcd->tcd_pages.next);
- tage->used = 0;
- cfs_tage_to_tail(tage, &tcd->tcd_pages);
- }
- return tage;
-}
-
-int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
- const char *format, ...)
-{
- va_list args;
- int rc;
-
- va_start(args, format);
- rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
- va_end(args);
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_debug_msg);
-
-int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
- const char *format1, va_list args,
- const char *format2, ...)
-{
- struct cfs_trace_cpu_data *tcd = NULL;
- struct ptldebug_header header = { 0 };
- struct cfs_trace_page *tage;
- /* string_buf is used only if tcd != NULL, and is always set then */
- char *string_buf = NULL;
- char *debug_buf;
- int known_size;
- int needed = 85; /* average message length */
- int max_nob;
- va_list ap;
- int depth;
- int i;
- int remain;
- int mask = msgdata->msg_mask;
- const char *file = kbasename(msgdata->msg_file);
- struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
-
- tcd = cfs_trace_get_tcd();
-
- /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
- * pins us to a particular CPU. This avoids an smp_processor_id()
- * warning on Linux when debugging is enabled.
- */
- cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
-
- if (!tcd) /* arch may not log in IRQ context */
- goto console;
-
- if (!tcd->tcd_cur_pages)
- header.ph_flags |= PH_FLAG_FIRST_RECORD;
-
- if (tcd->tcd_shutting_down) {
- cfs_trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- depth = __current_nesting_level();
- known_size = strlen(file) + 1 + depth;
- if (msgdata->msg_fn)
- known_size += strlen(msgdata->msg_fn) + 1;
-
- if (libcfs_debug_binary)
- known_size += sizeof(header);
-
- /*
- * '2' used because vsnprintf return real size required for output
- * _without_ terminating NULL.
- * if needed is to small for this format.
- */
- for (i = 0; i < 2; i++) {
- tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
- if (!tage) {
- if (needed + known_size > PAGE_SIZE)
- mask |= D_ERROR;
-
- cfs_trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- string_buf = (char *)page_address(tage->page) +
- tage->used + known_size;
-
- max_nob = PAGE_SIZE - tage->used - known_size;
- if (max_nob <= 0) {
- pr_emerg("negative max_nob: %d\n", max_nob);
- mask |= D_ERROR;
- cfs_trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- needed = 0;
- if (format1) {
- va_copy(ap, args);
- needed = vsnprintf(string_buf, max_nob, format1, ap);
- va_end(ap);
- }
-
- if (format2) {
- remain = max_nob - needed;
- if (remain < 0)
- remain = 0;
-
- va_start(ap, format2);
- needed += vsnprintf(string_buf + needed, remain,
- format2, ap);
- va_end(ap);
- }
-
- if (needed < max_nob) /* well. printing ok.. */
- break;
- }
-
- if (*(string_buf + needed - 1) != '\n')
- pr_info("format at %s:%d:%s doesn't end in newline\n", file,
- msgdata->msg_line, msgdata->msg_fn);
-
- header.ph_len = known_size + needed;
- debug_buf = (char *)page_address(tage->page) + tage->used;
-
- if (libcfs_debug_binary) {
- memcpy(debug_buf, &header, sizeof(header));
- tage->used += sizeof(header);
- debug_buf += sizeof(header);
- }
-
- /* indent message according to the nesting level */
- while (depth-- > 0) {
- *(debug_buf++) = '.';
- ++tage->used;
- }
-
- strcpy(debug_buf, file);
- tage->used += strlen(file) + 1;
- debug_buf += strlen(file) + 1;
-
- if (msgdata->msg_fn) {
- strcpy(debug_buf, msgdata->msg_fn);
- tage->used += strlen(msgdata->msg_fn) + 1;
- debug_buf += strlen(msgdata->msg_fn) + 1;
- }
-
- __LASSERT(debug_buf == string_buf);
-
- tage->used += needed;
- __LASSERT(tage->used <= PAGE_SIZE);
-
-console:
- if (!(mask & libcfs_printk)) {
- /* no console output requested */
- if (tcd)
- cfs_trace_put_tcd(tcd);
- return 1;
- }
-
- if (cdls) {
- if (libcfs_console_ratelimit &&
- cdls->cdls_next && /* not first time ever */
- !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
- /* skipping a console message */
- cdls->cdls_count++;
- if (tcd)
- cfs_trace_put_tcd(tcd);
- return 1;
- }
-
- if (cfs_time_after(cfs_time_current(),
- cdls->cdls_next + libcfs_console_max_delay +
- 10 * HZ)) {
- /* last timeout was a long time ago */
- cdls->cdls_delay /= libcfs_console_backoff * 4;
- } else {
- cdls->cdls_delay *= libcfs_console_backoff;
- }
-
- if (cdls->cdls_delay < libcfs_console_min_delay)
- cdls->cdls_delay = libcfs_console_min_delay;
- else if (cdls->cdls_delay > libcfs_console_max_delay)
- cdls->cdls_delay = libcfs_console_max_delay;
-
- /* ensure cdls_next is never zero after it's been seen */
- cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
- }
-
- if (tcd) {
- cfs_print_to_console(&header, mask, string_buf, needed, file,
- msgdata->msg_fn);
- cfs_trace_put_tcd(tcd);
- } else {
- string_buf = cfs_trace_get_console_buffer();
-
- needed = 0;
- if (format1) {
- va_copy(ap, args);
- needed = vsnprintf(string_buf,
- CFS_TRACE_CONSOLE_BUFFER_SIZE,
- format1, ap);
- va_end(ap);
- }
- if (format2) {
- remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
- if (remain > 0) {
- va_start(ap, format2);
- needed += vsnprintf(string_buf + needed, remain,
- format2, ap);
- va_end(ap);
- }
- }
- cfs_print_to_console(&header, mask,
- string_buf, needed, file, msgdata->msg_fn);
-
- put_cpu();
- }
-
- if (cdls && cdls->cdls_count) {
- string_buf = cfs_trace_get_console_buffer();
-
- needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
- "Skipped %d previous similar message%s\n",
- cdls->cdls_count,
- (cdls->cdls_count > 1) ? "s" : "");
-
- cfs_print_to_console(&header, mask,
- string_buf, needed, file, msgdata->msg_fn);
-
- put_cpu();
- cdls->cdls_count = 0;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(libcfs_debug_vmsg2);
-
-void
-cfs_trace_assertion_failed(const char *str,
- struct libcfs_debug_msg_data *msgdata)
-{
- struct ptldebug_header hdr;
-
- libcfs_panic_in_progress = 1;
- libcfs_catastrophe = 1;
- mb();
-
- cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
-
- cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
- msgdata->msg_file, msgdata->msg_fn);
-
- panic("Lustre debug assertion failure\n");
-
- /* not reached */
-}
-
-static void
-panic_collect_pages(struct page_collection *pc)
-{
- /* Do the collect_pages job on a single CPU: assumes that all other
- * CPUs have been stopped during a panic. If this isn't true for some
- * arch, this will have to be implemented separately in each arch.
- */
- struct cfs_trace_cpu_data *tcd;
- int i;
- int j;
-
- INIT_LIST_HEAD(&pc->pc_pages);
-
- cfs_tcd_for_each(tcd, i, j) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
-
- if (pc->pc_want_daemon_pages) {
- list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
- tcd->tcd_cur_daemon_pages = 0;
- }
- }
-}
-
-static void collect_pages_on_all_cpus(struct page_collection *pc)
-{
- struct cfs_trace_cpu_data *tcd;
- int i, cpu;
-
- for_each_possible_cpu(cpu) {
- cfs_tcd_for_each_type_lock(tcd, i, cpu) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
- if (pc->pc_want_daemon_pages) {
- list_splice_init(&tcd->tcd_daemon_pages,
- &pc->pc_pages);
- tcd->tcd_cur_daemon_pages = 0;
- }
- }
- }
-}
-
-static void collect_pages(struct page_collection *pc)
-{
- INIT_LIST_HEAD(&pc->pc_pages);
-
- if (libcfs_panic_in_progress)
- panic_collect_pages(pc);
- else
- collect_pages_on_all_cpus(pc);
-}
-
-static void put_pages_back_on_all_cpus(struct page_collection *pc)
-{
- struct cfs_trace_cpu_data *tcd;
- struct list_head *cur_head;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- int i, cpu;
-
- for_each_possible_cpu(cpu) {
- cfs_tcd_for_each_type_lock(tcd, i, cpu) {
- cur_head = tcd->tcd_pages.next;
-
- list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
- linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
-
- if (tage->cpu != cpu || tage->type != i)
- continue;
-
- cfs_tage_to_tail(tage, cur_head);
- tcd->tcd_cur_pages++;
- }
- }
- }
-}
-
-static void put_pages_back(struct page_collection *pc)
-{
- if (!libcfs_panic_in_progress)
- put_pages_back_on_all_cpus(pc);
-}
-
-/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
- * we have a good amount of data at all times for dumping during an LBUG, even
- * if we have been steadily writing (and otherwise discarding) pages via the
- * debug daemon.
- */
-static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
- struct cfs_trace_cpu_data *tcd)
-{
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
-
- list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
-
- if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
- continue;
-
- cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
- tcd->tcd_cur_daemon_pages++;
-
- if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
- struct cfs_trace_page *victim;
-
- __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
- victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
-
- __LASSERT_TAGE_INVARIANT(victim);
-
- list_del(&victim->linkage);
- cfs_tage_free(victim);
- tcd->tcd_cur_daemon_pages--;
- }
- }
-}
-
-static void put_pages_on_daemon_list(struct page_collection *pc)
-{
- struct cfs_trace_cpu_data *tcd;
- int i, cpu;
-
- for_each_possible_cpu(cpu) {
- cfs_tcd_for_each_type_lock(tcd, i, cpu)
- put_pages_on_tcd_daemon_list(pc, tcd);
- }
-}
-
-void cfs_trace_debug_print(void)
-{
- struct page_collection pc;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
-
- pc.pc_want_daemon_pages = 1;
- collect_pages(&pc);
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
- char *p, *file, *fn;
- struct page *page;
-
- __LASSERT_TAGE_INVARIANT(tage);
-
- page = tage->page;
- p = page_address(page);
- while (p < ((char *)page_address(page) + tage->used)) {
- struct ptldebug_header *hdr;
- int len;
-
- hdr = (void *)p;
- p += sizeof(*hdr);
- file = p;
- p += strlen(file) + 1;
- fn = p;
- p += strlen(fn) + 1;
- len = hdr->ph_len - (int)(p - (char *)hdr);
-
- cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
-
- p += len;
- }
-
- list_del(&tage->linkage);
- cfs_tage_free(tage);
- }
-}
-
-int cfs_tracefile_dump_all_pages(char *filename)
-{
- struct page_collection pc;
- struct file *filp;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- char *buf;
- mm_segment_t __oldfs;
- int rc;
-
- cfs_tracefile_write_lock();
-
- filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE,
- 0600);
- if (IS_ERR(filp)) {
- rc = PTR_ERR(filp);
- filp = NULL;
- pr_err("LustreError: can't open %s for dump: rc %d\n",
- filename, rc);
- goto out;
- }
-
- pc.pc_want_daemon_pages = 1;
- collect_pages(&pc);
- if (list_empty(&pc.pc_pages)) {
- rc = 0;
- goto close;
- }
- __oldfs = get_fs();
- set_fs(get_ds());
-
- /* ok, for now, just write the pages. in the future we'll be building
- * iobufs with the pages and calling generic_direct_IO
- */
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
-
- buf = kmap(tage->page);
- rc = kernel_write(filp, buf, tage->used, &filp->f_pos);
- kunmap(tage->page);
-
- if (rc != (int)tage->used) {
- pr_warn("wanted to write %u but wrote %d\n", tage->used,
- rc);
- put_pages_back(&pc);
- __LASSERT(list_empty(&pc.pc_pages));
- break;
- }
- list_del(&tage->linkage);
- cfs_tage_free(tage);
- }
- set_fs(__oldfs);
- rc = vfs_fsync(filp, 1);
- if (rc)
- pr_err("sync returns %d\n", rc);
-close:
- filp_close(filp, NULL);
-out:
- cfs_tracefile_write_unlock();
- return rc;
-}
-
-void cfs_trace_flush_pages(void)
-{
- struct page_collection pc;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
-
- pc.pc_want_daemon_pages = 1;
- collect_pages(&pc);
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
-
- list_del(&tage->linkage);
- cfs_tage_free(tage);
- }
-}
-
-int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
- const char __user *usr_buffer, int usr_buffer_nob)
-{
- int nob;
-
- if (usr_buffer_nob > knl_buffer_nob)
- return -EOVERFLOW;
-
- if (copy_from_user((void *)knl_buffer,
- usr_buffer, usr_buffer_nob))
- return -EFAULT;
-
- nob = strnlen(knl_buffer, usr_buffer_nob);
- while (--nob >= 0) /* strip trailing whitespace */
- if (!isspace(knl_buffer[nob]))
- break;
-
- if (nob < 0) /* empty string */
- return -EINVAL;
-
- if (nob == knl_buffer_nob) /* no space to terminate */
- return -EOVERFLOW;
-
- knl_buffer[nob + 1] = 0; /* terminate */
- return 0;
-}
-EXPORT_SYMBOL(cfs_trace_copyin_string);
-
-int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
- const char *knl_buffer, char *append)
-{
- /*
- * NB if 'append' != NULL, it's a single character to append to the
- * copied out string - usually "\n" or "" (i.e. a terminating zero byte)
- */
- int nob = strlen(knl_buffer);
-
- if (nob > usr_buffer_nob)
- nob = usr_buffer_nob;
-
- if (copy_to_user(usr_buffer, knl_buffer, nob))
- return -EFAULT;
-
- if (append && nob < usr_buffer_nob) {
- if (copy_to_user(usr_buffer + nob, append, 1))
- return -EFAULT;
-
- nob++;
- }
-
- return nob;
-}
-EXPORT_SYMBOL(cfs_trace_copyout_string);
-
-int cfs_trace_allocate_string_buffer(char **str, int nob)
-{
- if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
- return -EINVAL;
-
- *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
- if (!*str)
- return -ENOMEM;
-
- return 0;
-}
-
-int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
-{
- char *str;
- int rc;
-
- rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
- if (rc)
- return rc;
-
- rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
- usr_str, usr_str_nob);
- if (rc)
- goto out;
-
- if (str[0] != '/') {
- rc = -EINVAL;
- goto out;
- }
- rc = cfs_tracefile_dump_all_pages(str);
-out:
- kfree(str);
- return rc;
-}
-
-int cfs_trace_daemon_command(char *str)
-{
- int rc = 0;
-
- cfs_tracefile_write_lock();
-
- if (!strcmp(str, "stop")) {
- cfs_tracefile_write_unlock();
- cfs_trace_stop_thread();
- cfs_tracefile_write_lock();
- memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
-
- } else if (!strncmp(str, "size=", 5)) {
- unsigned long tmp;
-
- rc = kstrtoul(str + 5, 10, &tmp);
- if (!rc) {
- if (tmp < 10 || tmp > 20480)
- cfs_tracefile_size = CFS_TRACEFILE_SIZE;
- else
- cfs_tracefile_size = tmp << 20;
- }
- } else if (strlen(str) >= sizeof(cfs_tracefile)) {
- rc = -ENAMETOOLONG;
- } else if (str[0] != '/') {
- rc = -EINVAL;
- } else {
- strcpy(cfs_tracefile, str);
-
- pr_info("debug daemon will attempt to start writing to %s (%lukB max)\n",
- cfs_tracefile,
- (long)(cfs_tracefile_size >> 10));
-
- cfs_trace_start_thread();
- }
-
- cfs_tracefile_write_unlock();
- return rc;
-}
-
-int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
-{
- char *str;
- int rc;
-
- rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
- if (rc)
- return rc;
-
- rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
- usr_str, usr_str_nob);
- if (!rc)
- rc = cfs_trace_daemon_command(str);
-
- kfree(str);
- return rc;
-}
-
-int cfs_trace_set_debug_mb(int mb)
-{
- int i;
- int j;
- int pages;
- int limit = cfs_trace_max_debug_mb();
- struct cfs_trace_cpu_data *tcd;
-
- if (mb < num_possible_cpus()) {
- pr_warn("%d MB is too small for debug buffer size, setting it to %d MB.\n",
- mb, num_possible_cpus());
- mb = num_possible_cpus();
- }
-
- if (mb > limit) {
- pr_warn("%d MB is too large for debug buffer size, setting it to %d MB.\n",
- mb, limit);
- mb = limit;
- }
-
- mb /= num_possible_cpus();
- pages = mb << (20 - PAGE_SHIFT);
-
- cfs_tracefile_write_lock();
-
- cfs_tcd_for_each(tcd, i, j)
- tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
-
- cfs_tracefile_write_unlock();
-
- return 0;
-}
-
-int cfs_trace_get_debug_mb(void)
-{
- int i;
- int j;
- struct cfs_trace_cpu_data *tcd;
- int total_pages = 0;
-
- cfs_tracefile_read_lock();
-
- cfs_tcd_for_each(tcd, i, j)
- total_pages += tcd->tcd_max_pages;
-
- cfs_tracefile_read_unlock();
-
- return (total_pages >> (20 - PAGE_SHIFT)) + 1;
-}
-
-static int tracefiled(void *arg)
-{
- struct page_collection pc;
- struct tracefiled_ctl *tctl = arg;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- struct file *filp;
- char *buf;
- int last_loop = 0;
- int rc;
-
- /* we're started late enough that we pick up init's fs context */
- /* this is so broken in uml? what on earth is going on? */
-
- complete(&tctl->tctl_start);
-
- while (1) {
- wait_queue_entry_t __wait;
-
- pc.pc_want_daemon_pages = 0;
- collect_pages(&pc);
- if (list_empty(&pc.pc_pages))
- goto end_loop;
-
- filp = NULL;
- cfs_tracefile_read_lock();
- if (cfs_tracefile[0]) {
- filp = filp_open(cfs_tracefile,
- O_CREAT | O_RDWR | O_LARGEFILE,
- 0600);
- if (IS_ERR(filp)) {
- rc = PTR_ERR(filp);
- filp = NULL;
- pr_warn("couldn't open %s: %d\n", cfs_tracefile,
- rc);
- }
- }
- cfs_tracefile_read_unlock();
- if (!filp) {
- put_pages_on_daemon_list(&pc);
- __LASSERT(list_empty(&pc.pc_pages));
- goto end_loop;
- }
-
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
- static loff_t f_pos;
-
- __LASSERT_TAGE_INVARIANT(tage);
-
- if (f_pos >= (off_t)cfs_tracefile_size)
- f_pos = 0;
- else if (f_pos > i_size_read(file_inode(filp)))
- f_pos = i_size_read(file_inode(filp));
-
- buf = kmap(tage->page);
- rc = kernel_write(filp, buf, tage->used, &f_pos);
- kunmap(tage->page);
-
- if (rc != (int)tage->used) {
- pr_warn("wanted to write %u but wrote %d\n",
- tage->used, rc);
- put_pages_back(&pc);
- __LASSERT(list_empty(&pc.pc_pages));
- break;
- }
- }
-
- filp_close(filp, NULL);
- put_pages_on_daemon_list(&pc);
- if (!list_empty(&pc.pc_pages)) {
- int i;
-
- pr_alert("trace pages aren't empty\n");
- pr_err("total cpus(%d): ", num_possible_cpus());
- for (i = 0; i < num_possible_cpus(); i++)
- if (cpu_online(i))
- pr_cont("%d(on) ", i);
- else
- pr_cont("%d(off) ", i);
- pr_cont("\n");
-
- i = 0;
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
- linkage)
- pr_err("page %d belongs to cpu %d\n",
- ++i, tage->cpu);
- pr_err("There are %d pages unwritten\n", i);
- }
- __LASSERT(list_empty(&pc.pc_pages));
-end_loop:
- if (atomic_read(&tctl->tctl_shutdown)) {
- if (!last_loop) {
- last_loop = 1;
- continue;
- } else {
- break;
- }
- }
- init_waitqueue_entry(&__wait, current);
- add_wait_queue(&tctl->tctl_waitq, &__wait);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- remove_wait_queue(&tctl->tctl_waitq, &__wait);
- }
- complete(&tctl->tctl_stop);
- return 0;
-}
-
-int cfs_trace_start_thread(void)
-{
- struct tracefiled_ctl *tctl = &trace_tctl;
- struct task_struct *task;
- int rc = 0;
-
- mutex_lock(&cfs_trace_thread_mutex);
- if (thread_running)
- goto out;
-
- init_completion(&tctl->tctl_start);
- init_completion(&tctl->tctl_stop);
- init_waitqueue_head(&tctl->tctl_waitq);
- atomic_set(&tctl->tctl_shutdown, 0);
-
- task = kthread_run(tracefiled, tctl, "ktracefiled");
- if (IS_ERR(task)) {
- rc = PTR_ERR(task);
- goto out;
- }
-
- wait_for_completion(&tctl->tctl_start);
- thread_running = 1;
-out:
- mutex_unlock(&cfs_trace_thread_mutex);
- return rc;
-}
-
-void cfs_trace_stop_thread(void)
-{
- struct tracefiled_ctl *tctl = &trace_tctl;
-
- mutex_lock(&cfs_trace_thread_mutex);
- if (thread_running) {
- pr_info("shutting down debug daemon thread...\n");
- atomic_set(&tctl->tctl_shutdown, 1);
- wait_for_completion(&tctl->tctl_stop);
- thread_running = 0;
- }
- mutex_unlock(&cfs_trace_thread_mutex);
-}
-
-int cfs_tracefile_init(int max_pages)
-{
- struct cfs_trace_cpu_data *tcd;
- int i;
- int j;
- int rc;
- int factor;
-
- rc = cfs_tracefile_init_arch();
- if (rc)
- return rc;
-
- cfs_tcd_for_each(tcd, i, j) {
- /* tcd_pages_factor is initialized int tracefile_init_arch. */
- factor = tcd->tcd_pages_factor;
- INIT_LIST_HEAD(&tcd->tcd_pages);
- INIT_LIST_HEAD(&tcd->tcd_stock_pages);
- INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
- tcd->tcd_cur_pages = 0;
- tcd->tcd_cur_stock_pages = 0;
- tcd->tcd_cur_daemon_pages = 0;
- tcd->tcd_max_pages = (max_pages * factor) / 100;
- LASSERT(tcd->tcd_max_pages > 0);
- tcd->tcd_shutting_down = 0;
- }
-
- return 0;
-}
-
-static void trace_cleanup_on_all_cpus(void)
-{
- struct cfs_trace_cpu_data *tcd;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- int i, cpu;
-
- for_each_possible_cpu(cpu) {
- cfs_tcd_for_each_type_lock(tcd, i, cpu) {
- tcd->tcd_shutting_down = 1;
-
- list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
- linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
-
- list_del(&tage->linkage);
- cfs_tage_free(tage);
- }
-
- tcd->tcd_cur_pages = 0;
- }
- }
-}
-
-static void cfs_trace_cleanup(void)
-{
- struct page_collection pc;
-
- INIT_LIST_HEAD(&pc.pc_pages);
-
- trace_cleanup_on_all_cpus();
-
- cfs_tracefile_fini_arch();
-}
-
-void cfs_tracefile_exit(void)
-{
- cfs_trace_stop_thread();
- cfs_trace_cleanup();
-}
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
deleted file mode 100644
index a29d6eb3a785..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ /dev/null
@@ -1,263 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LIBCFS_TRACEFILE_H__
-#define __LIBCFS_TRACEFILE_H__
-
-#include <linux/libcfs/libcfs.h>
-
-enum cfs_trace_buf_type {
- CFS_TCD_TYPE_PROC = 0,
- CFS_TCD_TYPE_SOFTIRQ,
- CFS_TCD_TYPE_IRQ,
- CFS_TCD_TYPE_MAX
-};
-
-/* trace file lock routines */
-
-#define TRACEFILE_NAME_SIZE 1024
-extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
-extern long long cfs_tracefile_size;
-
-void libcfs_run_debug_log_upcall(char *file);
-
-int cfs_tracefile_init_arch(void);
-void cfs_tracefile_fini_arch(void);
-
-void cfs_tracefile_read_lock(void);
-void cfs_tracefile_read_unlock(void);
-void cfs_tracefile_write_lock(void);
-void cfs_tracefile_write_unlock(void);
-
-int cfs_tracefile_dump_all_pages(char *filename);
-void cfs_trace_debug_print(void);
-void cfs_trace_flush_pages(void);
-int cfs_trace_start_thread(void);
-void cfs_trace_stop_thread(void);
-int cfs_tracefile_init(int max_pages);
-void cfs_tracefile_exit(void);
-
-int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
- const char __user *usr_buffer, int usr_buffer_nob);
-int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
- const char *knl_str, char *append);
-int cfs_trace_allocate_string_buffer(char **str, int nob);
-int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob);
-int cfs_trace_daemon_command(char *str);
-int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob);
-int cfs_trace_set_debug_mb(int mb);
-int cfs_trace_get_debug_mb(void);
-
-void libcfs_debug_dumplog_internal(void *arg);
-void libcfs_register_panic_notifier(void);
-void libcfs_unregister_panic_notifier(void);
-extern int libcfs_panic_in_progress;
-int cfs_trace_max_debug_mb(void);
-
-#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
-#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
-#define CFS_TRACEFILE_SIZE (500 << 20)
-
-#ifdef LUSTRE_TRACEFILE_PRIVATE
-
-/*
- * Private declare for tracefile
- */
-#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
-#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
-
-#define CFS_TRACEFILE_SIZE (500 << 20)
-
-/*
- * Size of a buffer for sprinting console messages if we can't get a page
- * from system
- */
-#define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024
-
-union cfs_trace_data_union {
- struct cfs_trace_cpu_data {
- /*
- * Even though this structure is meant to be per-CPU, locking
- * is needed because in some places the data may be accessed
- * from other CPUs. This lock is directly used in trace_get_tcd
- * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
- * tcd_for_each_type_lock
- */
- spinlock_t tcd_lock;
- unsigned long tcd_lock_flags;
-
- /*
- * pages with trace records not yet processed by tracefiled.
- */
- struct list_head tcd_pages;
- /* number of pages on ->tcd_pages */
- unsigned long tcd_cur_pages;
-
- /*
- * pages with trace records already processed by
- * tracefiled. These pages are kept in memory, so that some
- * portion of log can be written in the event of LBUG. This
- * list is maintained in LRU order.
- *
- * Pages are moved to ->tcd_daemon_pages by tracefiled()
- * (put_pages_on_daemon_list()). LRU pages from this list are
- * discarded when list grows too large.
- */
- struct list_head tcd_daemon_pages;
- /* number of pages on ->tcd_daemon_pages */
- unsigned long tcd_cur_daemon_pages;
-
- /*
- * Maximal number of pages allowed on ->tcd_pages and
- * ->tcd_daemon_pages each.
- * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current
- * implementation.
- */
- unsigned long tcd_max_pages;
-
- /*
- * preallocated pages to write trace records into. Pages from
- * ->tcd_stock_pages are moved to ->tcd_pages by
- * portals_debug_msg().
- *
- * This list is necessary, because on some platforms it's
- * impossible to perform efficient atomic page allocation in a
- * non-blockable context.
- *
- * Such platforms fill ->tcd_stock_pages "on occasion", when
- * tracing code is entered in blockable context.
- *
- * trace_get_tage_try() tries to get a page from
- * ->tcd_stock_pages first and resorts to atomic page
- * allocation only if this queue is empty. ->tcd_stock_pages
- * is replenished when tracing code is entered in blocking
- * context (darwin-tracefile.c:trace_get_tcd()). We try to
- * maintain TCD_STOCK_PAGES (40 by default) pages in this
- * queue. Atomic allocation is only required if more than
- * TCD_STOCK_PAGES pagesful are consumed by trace records all
- * emitted in non-blocking contexts. Which is quite unlikely.
- */
- struct list_head tcd_stock_pages;
- /* number of pages on ->tcd_stock_pages */
- unsigned long tcd_cur_stock_pages;
-
- unsigned short tcd_shutting_down;
- unsigned short tcd_cpu;
- unsigned short tcd_type;
- /* The factors to share debug memory. */
- unsigned short tcd_pages_factor;
- } tcd;
- char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
-};
-
-#define TCD_MAX_TYPES 8
-extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS];
-
-#define cfs_tcd_for_each(tcd, i, j) \
- for (i = 0; cfs_trace_data[i]; i++) \
- for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
- j < num_possible_cpus(); \
- j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
-
-#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
- for (i = 0; cfs_trace_data[i] && \
- (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
- cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
-
-void cfs_set_ptldebug_header(struct ptldebug_header *header,
- struct libcfs_debug_msg_data *m,
- unsigned long stack);
-void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
- const char *buf, int len, const char *file,
- const char *fn);
-
-int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
-void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
-
-extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-enum cfs_trace_buf_type cfs_trace_buf_idx_get(void);
-
-static inline char *
-cfs_trace_get_console_buffer(void)
-{
- unsigned int i = get_cpu();
- unsigned int j = cfs_trace_buf_idx_get();
-
- return cfs_trace_console_buffers[i][j];
-}
-
-static inline struct cfs_trace_cpu_data *
-cfs_trace_get_tcd(void)
-{
- struct cfs_trace_cpu_data *tcd =
- &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
-
- cfs_trace_lock_tcd(tcd, 0);
-
- return tcd;
-}
-
-static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
-{
- cfs_trace_unlock_tcd(tcd, 0);
-
- put_cpu();
-}
-
-int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
- struct list_head *stock);
-
-void cfs_trace_assertion_failed(const char *str,
- struct libcfs_debug_msg_data *m);
-
-/* ASSERTION that is safe to use within the debug system */
-#define __LASSERT(cond) \
-do { \
- if (unlikely(!(cond))) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
- cfs_trace_assertion_failed("ASSERTION("#cond") failed", \
- &msgdata); \
- } \
-} while (0)
-
-#define __LASSERT_TAGE_INVARIANT(tage) \
-do { \
- __LASSERT(tage); \
- __LASSERT(tage->page); \
- __LASSERT(tage->used <= PAGE_SIZE); \
- __LASSERT(page_count(tage->page) > 0); \
-} while (0)
-
-#endif /* LUSTRE_TRACEFILE_PRIVATE */
-
-#endif /* __LIBCFS_TRACEFILE_H__ */
diff --git a/drivers/staging/lustre/lnet/lnet/Makefile b/drivers/staging/lustre/lnet/lnet/Makefile
deleted file mode 100644
index 0a9d70924fe0..000000000000
--- a/drivers/staging/lustre/lnet/lnet/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LNET) += lnet.o
-
-lnet-y := api-ni.o config.o nidstrings.o net_fault.o \
- lib-me.o lib-msg.o lib-eq.o lib-md.o lib-ptl.o \
- lib-socket.o lib-move.o module.o lo.o \
- router.o router_proc.o acceptor.o peer.o
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
deleted file mode 100644
index 5648f17eddc0..000000000000
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ /dev/null
@@ -1,501 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include <linux/completion.h>
-#include <net/sock.h>
-#include <linux/lnet/lib-lnet.h>
-
-static int accept_port = 988;
-static int accept_backlog = 127;
-static int accept_timeout = 5;
-
-static struct {
- int pta_shutdown;
- struct socket *pta_sock;
- struct completion pta_signal;
-} lnet_acceptor_state = {
- .pta_shutdown = 1
-};
-
-int
-lnet_acceptor_port(void)
-{
- return accept_port;
-}
-EXPORT_SYMBOL(lnet_acceptor_port);
-
-static inline int
-lnet_accept_magic(__u32 magic, __u32 constant)
-{
- return (magic == constant ||
- magic == __swab32(constant));
-}
-
-static char *accept = "secure";
-
-module_param(accept, charp, 0444);
-MODULE_PARM_DESC(accept, "Accept connections (secure|all|none)");
-module_param(accept_port, int, 0444);
-MODULE_PARM_DESC(accept_port, "Acceptor's port (same on all nodes)");
-module_param(accept_backlog, int, 0444);
-MODULE_PARM_DESC(accept_backlog, "Acceptor's listen backlog");
-module_param(accept_timeout, int, 0644);
-MODULE_PARM_DESC(accept_timeout, "Acceptor's timeout (seconds)");
-
-static char *accept_type;
-
-static int
-lnet_acceptor_get_tunables(void)
-{
- /*
- * Userland acceptor uses 'accept_type' instead of 'accept', due to
- * conflict with 'accept(2)', but kernel acceptor still uses 'accept'
- * for compatibility. Hence the trick.
- */
- accept_type = accept;
- return 0;
-}
-
-int
-lnet_acceptor_timeout(void)
-{
- return accept_timeout;
-}
-EXPORT_SYMBOL(lnet_acceptor_timeout);
-
-void
-lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
- __u32 peer_ip, int peer_port)
-{
- switch (rc) {
- /* "normal" errors */
- case -ECONNREFUSED:
- CNETERR("Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
- libcfs_nid2str(peer_nid),
- &peer_ip, peer_port);
- break;
- case -EHOSTUNREACH:
- case -ENETUNREACH:
- CNETERR("Connection to %s at host %pI4h was unreachable: the network or that node may be down, or Lustre may be misconfigured.\n",
- libcfs_nid2str(peer_nid), &peer_ip);
- break;
- case -ETIMEDOUT:
- CNETERR("Connection to %s at host %pI4h on port %d took too long: that node may be hung or experiencing high load.\n",
- libcfs_nid2str(peer_nid),
- &peer_ip, peer_port);
- break;
- case -ECONNRESET:
- LCONSOLE_ERROR_MSG(0x11b, "Connection to %s at host %pI4h on port %d was reset: is it running a compatible version of Lustre and is %s one of its NIDs?\n",
- libcfs_nid2str(peer_nid),
- &peer_ip, peer_port,
- libcfs_nid2str(peer_nid));
- break;
- case -EPROTO:
- LCONSOLE_ERROR_MSG(0x11c, "Protocol error connecting to %s at host %pI4h on port %d: is it running a compatible version of Lustre?\n",
- libcfs_nid2str(peer_nid),
- &peer_ip, peer_port);
- break;
- case -EADDRINUSE:
- LCONSOLE_ERROR_MSG(0x11d, "No privileged ports available to connect to %s at host %pI4h on port %d\n",
- libcfs_nid2str(peer_nid),
- &peer_ip, peer_port);
- break;
- default:
- LCONSOLE_ERROR_MSG(0x11e, "Unexpected error %d connecting to %s at host %pI4h on port %d\n",
- rc, libcfs_nid2str(peer_nid),
- &peer_ip, peer_port);
- break;
- }
-}
-EXPORT_SYMBOL(lnet_connect_console_error);
-
-int
-lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
- __u32 local_ip, __u32 peer_ip, int peer_port)
-{
- struct lnet_acceptor_connreq cr;
- struct socket *sock;
- int rc;
- int port;
- int fatal;
-
- BUILD_BUG_ON(sizeof(cr) > 16); /* too big to be on the stack */
-
- for (port = LNET_ACCEPTOR_MAX_RESERVED_PORT;
- port >= LNET_ACCEPTOR_MIN_RESERVED_PORT;
- --port) {
- /* Iterate through reserved ports. */
-
- rc = lnet_sock_connect(&sock, &fatal, local_ip, port, peer_ip,
- peer_port);
- if (rc) {
- if (fatal)
- goto failed;
- continue;
- }
-
- BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
-
- cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
- cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
- cr.acr_nid = peer_nid;
-
- if (the_lnet.ln_testprotocompat) {
- /* single-shot proto check */
- lnet_net_lock(LNET_LOCK_EX);
- if (the_lnet.ln_testprotocompat & 4) {
- cr.acr_version++;
- the_lnet.ln_testprotocompat &= ~4;
- }
- if (the_lnet.ln_testprotocompat & 8) {
- cr.acr_magic = LNET_PROTO_MAGIC;
- the_lnet.ln_testprotocompat &= ~8;
- }
- lnet_net_unlock(LNET_LOCK_EX);
- }
-
- rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
- if (rc)
- goto failed_sock;
-
- *sockp = sock;
- return 0;
- }
-
- rc = -EADDRINUSE;
- goto failed;
-
- failed_sock:
- sock_release(sock);
- failed:
- lnet_connect_console_error(rc, peer_nid, peer_ip, peer_port);
- return rc;
-}
-EXPORT_SYMBOL(lnet_connect);
-
-static int
-lnet_accept(struct socket *sock, __u32 magic)
-{
- struct lnet_acceptor_connreq cr;
- __u32 peer_ip;
- int peer_port;
- int rc;
- int flip;
- struct lnet_ni *ni;
- char *str;
-
- LASSERT(sizeof(cr) <= 16); /* not too big for the stack */
-
- rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
- LASSERT(!rc); /* we succeeded before */
-
- if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) {
- if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) {
- /*
- * future version compatibility!
- * When LNET unifies protocols over all LNDs, the first
- * thing sent will be a version query. I send back
- * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old"
- */
- memset(&cr, 0, sizeof(cr));
- cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
- cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
- rc = lnet_sock_write(sock, &cr, sizeof(cr),
- accept_timeout);
-
- if (rc)
- CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n",
- &peer_ip, rc);
- return -EPROTO;
- }
-
- if (lnet_accept_magic(magic, LNET_PROTO_TCP_MAGIC))
- str = "'old' socknal/tcpnal";
- else
- str = "unrecognised";
-
- LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %pI4h magic %08x: %s acceptor protocol\n",
- &peer_ip, magic, str);
- return -EPROTO;
- }
-
- flip = (magic != LNET_PROTO_ACCEPTOR_MAGIC);
-
- rc = lnet_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version),
- accept_timeout);
- if (rc) {
- CERROR("Error %d reading connection request version from %pI4h\n",
- rc, &peer_ip);
- return -EIO;
- }
-
- if (flip)
- __swab32s(&cr.acr_version);
-
- if (cr.acr_version != LNET_PROTO_ACCEPTOR_VERSION) {
- /*
- * future version compatibility!
- * An acceptor-specific protocol rev will first send a version
- * query. I send back my current version to tell her I'm
- * "old".
- */
- int peer_version = cr.acr_version;
-
- memset(&cr, 0, sizeof(cr));
- cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
- cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
-
- rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
- if (rc)
- CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n",
- peer_version, &peer_ip, rc);
- return -EPROTO;
- }
-
- rc = lnet_sock_read(sock, &cr.acr_nid,
- sizeof(cr) -
- offsetof(struct lnet_acceptor_connreq, acr_nid),
- accept_timeout);
- if (rc) {
- CERROR("Error %d reading connection request from %pI4h\n",
- rc, &peer_ip);
- return -EIO;
- }
-
- if (flip)
- __swab64s(&cr.acr_nid);
-
- ni = lnet_net2ni(LNET_NIDNET(cr.acr_nid));
- if (!ni || /* no matching net */
- ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */
- if (ni)
- lnet_ni_decref(ni);
- LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n",
- &peer_ip, libcfs_nid2str(cr.acr_nid));
- return -EPERM;
- }
-
- if (!ni->ni_lnd->lnd_accept) {
- /* This catches a request for the loopback LND */
- lnet_ni_decref(ni);
- LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n",
- &peer_ip, libcfs_nid2str(cr.acr_nid));
- return -EPERM;
- }
-
- CDEBUG(D_NET, "Accept %s from %pI4h\n",
- libcfs_nid2str(cr.acr_nid), &peer_ip);
-
- rc = ni->ni_lnd->lnd_accept(ni, sock);
-
- lnet_ni_decref(ni);
- return rc;
-}
-
-static int
-lnet_acceptor(void *arg)
-{
- struct socket *newsock;
- int rc;
- __u32 magic;
- __u32 peer_ip;
- int peer_port;
- int secure = (int)((long)arg);
-
- LASSERT(!lnet_acceptor_state.pta_sock);
-
- rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock, 0, accept_port,
- accept_backlog);
- if (rc) {
- if (rc == -EADDRINUSE)
- LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n",
- accept_port);
- else
- LCONSOLE_ERROR_MSG(0x123, "Can't start acceptor on port %d: unexpected error %d\n",
- accept_port, rc);
-
- lnet_acceptor_state.pta_sock = NULL;
- } else {
- LCONSOLE(0, "Accept %s, port %d\n", accept_type, accept_port);
- }
-
- /* set init status and unblock parent */
- lnet_acceptor_state.pta_shutdown = rc;
- complete(&lnet_acceptor_state.pta_signal);
-
- if (rc)
- return rc;
-
- while (!lnet_acceptor_state.pta_shutdown) {
- rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock);
- if (rc) {
- if (rc != -EAGAIN) {
- CWARN("Accept error %d: pausing...\n", rc);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
- }
- continue;
- }
-
- /* maybe the LNet acceptor thread has been waken */
- if (lnet_acceptor_state.pta_shutdown) {
- sock_release(newsock);
- break;
- }
-
- rc = lnet_sock_getaddr(newsock, 1, &peer_ip, &peer_port);
- if (rc) {
- CERROR("Can't determine new connection's address\n");
- goto failed;
- }
-
- if (secure && peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
- CERROR("Refusing connection from %pI4h: insecure port %d\n",
- &peer_ip, peer_port);
- goto failed;
- }
-
- rc = lnet_sock_read(newsock, &magic, sizeof(magic),
- accept_timeout);
- if (rc) {
- CERROR("Error %d reading connection request from %pI4h\n",
- rc, &peer_ip);
- goto failed;
- }
-
- rc = lnet_accept(newsock, magic);
- if (rc)
- goto failed;
-
- continue;
-
-failed:
- sock_release(newsock);
- }
-
- sock_release(lnet_acceptor_state.pta_sock);
- lnet_acceptor_state.pta_sock = NULL;
-
- CDEBUG(D_NET, "Acceptor stopping\n");
-
- /* unblock lnet_acceptor_stop() */
- complete(&lnet_acceptor_state.pta_signal);
- return 0;
-}
-
-static inline int
-accept2secure(const char *acc, long *sec)
-{
- if (!strcmp(acc, "secure")) {
- *sec = 1;
- return 1;
- } else if (!strcmp(acc, "all")) {
- *sec = 0;
- return 1;
- } else if (!strcmp(acc, "none")) {
- return 0;
- }
-
- LCONSOLE_ERROR_MSG(0x124, "Can't parse 'accept=\"%s\"'\n",
- acc);
- return -EINVAL;
-}
-
-int
-lnet_acceptor_start(void)
-{
- struct task_struct *task;
- int rc;
- long rc2;
- long secure;
-
- /* if acceptor is already running return immediately */
- if (!lnet_acceptor_state.pta_shutdown)
- return 0;
-
- LASSERT(!lnet_acceptor_state.pta_sock);
-
- rc = lnet_acceptor_get_tunables();
- if (rc)
- return rc;
-
- init_completion(&lnet_acceptor_state.pta_signal);
- rc = accept2secure(accept_type, &secure);
- if (rc <= 0)
- return rc;
-
- if (!lnet_count_acceptor_nis()) /* not required */
- return 0;
-
- task = kthread_run(lnet_acceptor, (void *)(uintptr_t)secure,
- "acceptor_%03ld", secure);
- if (IS_ERR(task)) {
- rc2 = PTR_ERR(task);
- CERROR("Can't start acceptor thread: %ld\n", rc2);
-
- return -ESRCH;
- }
-
- /* wait for acceptor to startup */
- wait_for_completion(&lnet_acceptor_state.pta_signal);
-
- if (!lnet_acceptor_state.pta_shutdown) {
- /* started OK */
- LASSERT(lnet_acceptor_state.pta_sock);
- return 0;
- }
-
- LASSERT(!lnet_acceptor_state.pta_sock);
-
- return -ENETDOWN;
-}
-
-void
-lnet_acceptor_stop(void)
-{
- struct sock *sk;
-
- if (lnet_acceptor_state.pta_shutdown) /* not running */
- return;
-
- lnet_acceptor_state.pta_shutdown = 1;
-
- sk = lnet_acceptor_state.pta_sock->sk;
-
- /* awake any sleepers using safe method */
- sk->sk_state_change(sk);
-
- /* block until acceptor signals exit */
- wait_for_completion(&lnet_acceptor_state.pta_signal);
-}
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
deleted file mode 100644
index 90266be0132d..000000000000
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ /dev/null
@@ -1,2307 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include <linux/log2.h>
-#include <linux/ktime.h>
-
-#include <linux/lnet/lib-lnet.h>
-#include <uapi/linux/lnet/lnet-dlc.h>
-
-#define D_LNI D_CONSOLE
-
-struct lnet the_lnet; /* THE state of the network */
-EXPORT_SYMBOL(the_lnet);
-
-static char *ip2nets = "";
-module_param(ip2nets, charp, 0444);
-MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
-
-static char *networks = "";
-module_param(networks, charp, 0444);
-MODULE_PARM_DESC(networks, "local networks");
-
-static char *routes = "";
-module_param(routes, charp, 0444);
-MODULE_PARM_DESC(routes, "routes to non-local networks");
-
-static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
-module_param(rnet_htable_size, int, 0444);
-MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
-
-static int lnet_ping(struct lnet_process_id id, int timeout_ms,
- struct lnet_process_id __user *ids, int n_ids);
-
-static char *
-lnet_get_routes(void)
-{
- return routes;
-}
-
-static char *
-lnet_get_networks(void)
-{
- char *nets;
- int rc;
-
- if (*networks && *ip2nets) {
- LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
- return NULL;
- }
-
- if (*ip2nets) {
- rc = lnet_parse_ip2nets(&nets, ip2nets);
- return !rc ? nets : NULL;
- }
-
- if (*networks)
- return networks;
-
- return "tcp";
-}
-
-static void
-lnet_init_locks(void)
-{
- spin_lock_init(&the_lnet.ln_eq_wait_lock);
- init_waitqueue_head(&the_lnet.ln_eq_waitq);
- init_waitqueue_head(&the_lnet.ln_rc_waitq);
- mutex_init(&the_lnet.ln_lnd_mutex);
- mutex_init(&the_lnet.ln_api_mutex);
-}
-
-static int
-lnet_create_remote_nets_table(void)
-{
- int i;
- struct list_head *hash;
-
- LASSERT(!the_lnet.ln_remote_nets_hash);
- LASSERT(the_lnet.ln_remote_nets_hbits > 0);
- hash = kvmalloc_array(LNET_REMOTE_NETS_HASH_SIZE, sizeof(*hash),
- GFP_KERNEL);
- if (!hash) {
- CERROR("Failed to create remote nets hash table\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
- INIT_LIST_HEAD(&hash[i]);
- the_lnet.ln_remote_nets_hash = hash;
- return 0;
-}
-
-static void
-lnet_destroy_remote_nets_table(void)
-{
- int i;
-
- if (!the_lnet.ln_remote_nets_hash)
- return;
-
- for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
- LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
-
- kvfree(the_lnet.ln_remote_nets_hash);
- the_lnet.ln_remote_nets_hash = NULL;
-}
-
-static void
-lnet_destroy_locks(void)
-{
- if (the_lnet.ln_res_lock) {
- cfs_percpt_lock_free(the_lnet.ln_res_lock);
- the_lnet.ln_res_lock = NULL;
- }
-
- if (the_lnet.ln_net_lock) {
- cfs_percpt_lock_free(the_lnet.ln_net_lock);
- the_lnet.ln_net_lock = NULL;
- }
-}
-
-static int
-lnet_create_locks(void)
-{
- lnet_init_locks();
-
- the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
- if (!the_lnet.ln_res_lock)
- goto failed;
-
- the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
- if (!the_lnet.ln_net_lock)
- goto failed;
-
- return 0;
-
- failed:
- lnet_destroy_locks();
- return -ENOMEM;
-}
-
-static void lnet_assert_wire_constants(void)
-{
- /*
- * Wire protocol assertions generated by 'wirecheck'
- * running on Linux robert.bartonsoftware.com 2.6.8-1.521
- * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
- * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
- */
-
- /* Constants... */
- BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
- BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
- BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
- BUILD_BUG_ON(LNET_MSG_ACK != 0);
- BUILD_BUG_ON(LNET_MSG_PUT != 1);
- BUILD_BUG_ON(LNET_MSG_GET != 2);
- BUILD_BUG_ON(LNET_MSG_REPLY != 3);
- BUILD_BUG_ON(LNET_MSG_HELLO != 4);
-
- /* Checks for struct ptl_handle_wire_t */
- BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
- BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) != 0);
- BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
- BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire, wh_object_cookie) != 8);
- BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
-
- /* Checks for struct struct lnet_magicversion */
- BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
- BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
- BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
- BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
- BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
- BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_minor) != 6);
- BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
-
- /* Checks for struct struct lnet_hdr */
- BUILD_BUG_ON((int)sizeof(struct lnet_hdr) != 72);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_nid) != 0);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_nid) != 8);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_nid) != 8);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_nid) != 8);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_pid) != 16);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_pid) != 4);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_pid) != 20);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_pid) != 4);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, type) != 24);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->type) != 4);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, payload_length) != 28);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->payload_length) != 4);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg) != 32);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg) != 40);
-
- /* Ack */
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) != 32);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) != 16);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.match_bits) != 48);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) != 8);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.mlength) != 56);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) != 4);
-
- /* Put */
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) != 32);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) != 16);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.match_bits) != 48);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) != 8);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.hdr_data) != 56);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) != 8);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ptl_index) != 64);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) != 4);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.offset) != 68);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) != 4);
-
- /* Get */
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.return_wmd) != 32);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) != 16);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.match_bits) != 48);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) != 8);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.ptl_index) != 56);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) != 4);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.src_offset) != 60);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) != 4);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.sink_length) != 64);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) != 4);
-
- /* Reply */
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) != 32);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) != 16);
-
- /* Hello */
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.incarnation) != 32);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) != 8);
- BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.type) != 40);
- BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4);
-}
-
-static struct lnet_lnd *
-lnet_find_lnd_by_type(__u32 type)
-{
- struct lnet_lnd *lnd;
- struct list_head *tmp;
-
- /* holding lnd mutex */
- list_for_each(tmp, &the_lnet.ln_lnds) {
- lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
-
- if (lnd->lnd_type == type)
- return lnd;
- }
-
- return NULL;
-}
-
-void
-lnet_register_lnd(struct lnet_lnd *lnd)
-{
- mutex_lock(&the_lnet.ln_lnd_mutex);
-
- LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
- LASSERT(!lnet_find_lnd_by_type(lnd->lnd_type));
-
- list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
- lnd->lnd_refcount = 0;
-
- CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
-
- mutex_unlock(&the_lnet.ln_lnd_mutex);
-}
-EXPORT_SYMBOL(lnet_register_lnd);
-
-void
-lnet_unregister_lnd(struct lnet_lnd *lnd)
-{
- mutex_lock(&the_lnet.ln_lnd_mutex);
-
- LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
- LASSERT(!lnd->lnd_refcount);
-
- list_del(&lnd->lnd_list);
- CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
-
- mutex_unlock(&the_lnet.ln_lnd_mutex);
-}
-EXPORT_SYMBOL(lnet_unregister_lnd);
-
-void
-lnet_counters_get(struct lnet_counters *counters)
-{
- struct lnet_counters *ctr;
- int i;
-
- memset(counters, 0, sizeof(*counters));
-
- lnet_net_lock(LNET_LOCK_EX);
-
- cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
- counters->msgs_max += ctr->msgs_max;
- counters->msgs_alloc += ctr->msgs_alloc;
- counters->errors += ctr->errors;
- counters->send_count += ctr->send_count;
- counters->recv_count += ctr->recv_count;
- counters->route_count += ctr->route_count;
- counters->drop_count += ctr->drop_count;
- counters->send_length += ctr->send_length;
- counters->recv_length += ctr->recv_length;
- counters->route_length += ctr->route_length;
- counters->drop_length += ctr->drop_length;
- }
- lnet_net_unlock(LNET_LOCK_EX);
-}
-EXPORT_SYMBOL(lnet_counters_get);
-
-void
-lnet_counters_reset(void)
-{
- struct lnet_counters *counters;
- int i;
-
- lnet_net_lock(LNET_LOCK_EX);
-
- cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
- memset(counters, 0, sizeof(struct lnet_counters));
-
- lnet_net_unlock(LNET_LOCK_EX);
-}
-
-static char *
-lnet_res_type2str(int type)
-{
- switch (type) {
- default:
- LBUG();
- case LNET_COOKIE_TYPE_MD:
- return "MD";
- case LNET_COOKIE_TYPE_ME:
- return "ME";
- case LNET_COOKIE_TYPE_EQ:
- return "EQ";
- }
-}
-
-static void
-lnet_res_container_cleanup(struct lnet_res_container *rec)
-{
- int count = 0;
-
- if (!rec->rec_type) /* not set yet, it's uninitialized */
- return;
-
- while (!list_empty(&rec->rec_active)) {
- struct list_head *e = rec->rec_active.next;
-
- list_del_init(e);
- if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
- kfree(list_entry(e, struct lnet_eq, eq_list));
-
- } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
- kfree(list_entry(e, struct lnet_libmd, md_list));
-
- } else { /* NB: Active MEs should be attached on portals */
- LBUG();
- }
- count++;
- }
-
- if (count > 0) {
- /*
- * Found alive MD/ME/EQ, user really should unlink/free
- * all of them before finalize LNet, but if someone didn't,
- * we have to recycle garbage for him
- */
- CERROR("%d active elements on exit of %s container\n",
- count, lnet_res_type2str(rec->rec_type));
- }
-
- kfree(rec->rec_lh_hash);
- rec->rec_lh_hash = NULL;
-
- rec->rec_type = 0; /* mark it as finalized */
-}
-
-static int
-lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
-{
- int rc = 0;
- int i;
-
- LASSERT(!rec->rec_type);
-
- rec->rec_type = type;
- INIT_LIST_HEAD(&rec->rec_active);
- rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
-
- /* Arbitrary choice of hash table size */
- rec->rec_lh_hash = kvmalloc_cpt(LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]),
- GFP_KERNEL, cpt);
- if (!rec->rec_lh_hash) {
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < LNET_LH_HASH_SIZE; i++)
- INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
-
- return 0;
-
-out:
- CERROR("Failed to setup %s resource container\n",
- lnet_res_type2str(type));
- lnet_res_container_cleanup(rec);
- return rc;
-}
-
-static void
-lnet_res_containers_destroy(struct lnet_res_container **recs)
-{
- struct lnet_res_container *rec;
- int i;
-
- cfs_percpt_for_each(rec, i, recs)
- lnet_res_container_cleanup(rec);
-
- cfs_percpt_free(recs);
-}
-
-static struct lnet_res_container **
-lnet_res_containers_create(int type)
-{
- struct lnet_res_container **recs;
- struct lnet_res_container *rec;
- int rc;
- int i;
-
- recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
- if (!recs) {
- CERROR("Failed to allocate %s resource containers\n",
- lnet_res_type2str(type));
- return NULL;
- }
-
- cfs_percpt_for_each(rec, i, recs) {
- rc = lnet_res_container_setup(rec, i, type);
- if (rc) {
- lnet_res_containers_destroy(recs);
- return NULL;
- }
- }
-
- return recs;
-}
-
-struct lnet_libhandle *
-lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
-{
- /* ALWAYS called with lnet_res_lock held */
- struct list_head *head;
- struct lnet_libhandle *lh;
- unsigned int hash;
-
- if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
- return NULL;
-
- hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
- head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
-
- list_for_each_entry(lh, head, lh_hash_chain) {
- if (lh->lh_cookie == cookie)
- return lh;
- }
-
- return NULL;
-}
-
-void
-lnet_res_lh_initialize(struct lnet_res_container *rec,
- struct lnet_libhandle *lh)
-{
- /* ALWAYS called with lnet_res_lock held */
- unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
- unsigned int hash;
-
- lh->lh_cookie = rec->rec_lh_cookie;
- rec->rec_lh_cookie += 1 << ibits;
-
- hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
-
- list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
-}
-
-static int lnet_unprepare(void);
-
-static int
-lnet_prepare(lnet_pid_t requested_pid)
-{
- /* Prepare to bring up the network */
- struct lnet_res_container **recs;
- int rc = 0;
-
- if (requested_pid == LNET_PID_ANY) {
- /* Don't instantiate LNET just for me */
- return -ENETDOWN;
- }
-
- LASSERT(!the_lnet.ln_refcount);
-
- the_lnet.ln_routing = 0;
-
- LASSERT(!(requested_pid & LNET_PID_USERFLAG));
- the_lnet.ln_pid = requested_pid;
-
- INIT_LIST_HEAD(&the_lnet.ln_test_peers);
- INIT_LIST_HEAD(&the_lnet.ln_nis);
- INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
- INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
- INIT_LIST_HEAD(&the_lnet.ln_routers);
- INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
- INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
-
- rc = lnet_create_remote_nets_table();
- if (rc)
- goto failed;
- /*
- * NB the interface cookie in wire handles guards against delayed
- * replies and ACKs appearing valid after reboot.
- */
- the_lnet.ln_interface_cookie = ktime_get_ns();
-
- the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(struct lnet_counters));
- if (!the_lnet.ln_counters) {
- CERROR("Failed to allocate counters for LNet\n");
- rc = -ENOMEM;
- goto failed;
- }
-
- rc = lnet_peer_tables_create();
- if (rc)
- goto failed;
-
- rc = lnet_msg_containers_create();
- if (rc)
- goto failed;
-
- rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
- LNET_COOKIE_TYPE_EQ);
- if (rc)
- goto failed;
-
- recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
- if (!recs) {
- rc = -ENOMEM;
- goto failed;
- }
-
- the_lnet.ln_me_containers = recs;
-
- recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
- if (!recs) {
- rc = -ENOMEM;
- goto failed;
- }
-
- the_lnet.ln_md_containers = recs;
-
- rc = lnet_portals_create();
- if (rc) {
- CERROR("Failed to create portals for LNet: %d\n", rc);
- goto failed;
- }
-
- return 0;
-
- failed:
- lnet_unprepare();
- return rc;
-}
-
-static int
-lnet_unprepare(void)
-{
- /*
- * NB no LNET_LOCK since this is the last reference. All LND instances
- * have shut down already, so it is safe to unlink and free all
- * descriptors, even those that appear committed to a network op (eg MD
- * with non-zero pending count)
- */
- lnet_fail_nid(LNET_NID_ANY, 0);
-
- LASSERT(!the_lnet.ln_refcount);
- LASSERT(list_empty(&the_lnet.ln_test_peers));
- LASSERT(list_empty(&the_lnet.ln_nis));
- LASSERT(list_empty(&the_lnet.ln_nis_cpt));
- LASSERT(list_empty(&the_lnet.ln_nis_zombie));
-
- lnet_portals_destroy();
-
- if (the_lnet.ln_md_containers) {
- lnet_res_containers_destroy(the_lnet.ln_md_containers);
- the_lnet.ln_md_containers = NULL;
- }
-
- if (the_lnet.ln_me_containers) {
- lnet_res_containers_destroy(the_lnet.ln_me_containers);
- the_lnet.ln_me_containers = NULL;
- }
-
- lnet_res_container_cleanup(&the_lnet.ln_eq_container);
-
- lnet_msg_containers_destroy();
- lnet_peer_tables_destroy();
- lnet_rtrpools_free(0);
-
- if (the_lnet.ln_counters) {
- cfs_percpt_free(the_lnet.ln_counters);
- the_lnet.ln_counters = NULL;
- }
- lnet_destroy_remote_nets_table();
-
- return 0;
-}
-
-struct lnet_ni *
-lnet_net2ni_locked(__u32 net, int cpt)
-{
- struct list_head *tmp;
- struct lnet_ni *ni;
-
- LASSERT(cpt != LNET_LOCK_EX);
-
- list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, struct lnet_ni, ni_list);
-
- if (LNET_NIDNET(ni->ni_nid) == net) {
- lnet_ni_addref_locked(ni, cpt);
- return ni;
- }
- }
-
- return NULL;
-}
-
-struct lnet_ni *
-lnet_net2ni(__u32 net)
-{
- struct lnet_ni *ni;
-
- lnet_net_lock(0);
- ni = lnet_net2ni_locked(net, 0);
- lnet_net_unlock(0);
-
- return ni;
-}
-EXPORT_SYMBOL(lnet_net2ni);
-
-static unsigned int
-lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
-{
- __u64 key = nid;
- unsigned int val;
-
- LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
-
- if (number == 1)
- return 0;
-
- val = hash_long(key, LNET_CPT_BITS);
- /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
- if (val < number)
- return val;
-
- return (unsigned int)(key + val + (val >> 1)) % number;
-}
-
-int
-lnet_cpt_of_nid_locked(lnet_nid_t nid)
-{
- struct lnet_ni *ni;
-
- /* must called with hold of lnet_net_lock */
- if (LNET_CPT_NUMBER == 1)
- return 0; /* the only one */
-
- /* take lnet_net_lock(any) would be OK */
- if (!list_empty(&the_lnet.ln_nis_cpt)) {
- list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
- if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid))
- continue;
-
- LASSERT(ni->ni_cpts);
- return ni->ni_cpts[lnet_nid_cpt_hash
- (nid, ni->ni_ncpts)];
- }
- }
-
- return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
-}
-
-int
-lnet_cpt_of_nid(lnet_nid_t nid)
-{
- int cpt;
- int cpt2;
-
- if (LNET_CPT_NUMBER == 1)
- return 0; /* the only one */
-
- if (list_empty(&the_lnet.ln_nis_cpt))
- return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
-
- cpt = lnet_net_lock_current();
- cpt2 = lnet_cpt_of_nid_locked(nid);
- lnet_net_unlock(cpt);
-
- return cpt2;
-}
-EXPORT_SYMBOL(lnet_cpt_of_nid);
-
-int
-lnet_islocalnet(__u32 net)
-{
- struct lnet_ni *ni;
- int cpt;
-
- cpt = lnet_net_lock_current();
-
- ni = lnet_net2ni_locked(net, cpt);
- if (ni)
- lnet_ni_decref_locked(ni, cpt);
-
- lnet_net_unlock(cpt);
-
- return !!ni;
-}
-
-struct lnet_ni *
-lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
-{
- struct lnet_ni *ni;
- struct list_head *tmp;
-
- LASSERT(cpt != LNET_LOCK_EX);
-
- list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, struct lnet_ni, ni_list);
-
- if (ni->ni_nid == nid) {
- lnet_ni_addref_locked(ni, cpt);
- return ni;
- }
- }
-
- return NULL;
-}
-
-int
-lnet_islocalnid(lnet_nid_t nid)
-{
- struct lnet_ni *ni;
- int cpt;
-
- cpt = lnet_net_lock_current();
- ni = lnet_nid2ni_locked(nid, cpt);
- if (ni)
- lnet_ni_decref_locked(ni, cpt);
- lnet_net_unlock(cpt);
-
- return !!ni;
-}
-
-int
-lnet_count_acceptor_nis(void)
-{
- /* Return the # of NIs that need the acceptor. */
- int count = 0;
- struct list_head *tmp;
- struct lnet_ni *ni;
- int cpt;
-
- cpt = lnet_net_lock_current();
- list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, struct lnet_ni, ni_list);
-
- if (ni->ni_lnd->lnd_accept)
- count++;
- }
-
- lnet_net_unlock(cpt);
-
- return count;
-}
-
-static struct lnet_ping_info *
-lnet_ping_info_create(int num_ni)
-{
- struct lnet_ping_info *ping_info;
- unsigned int infosz;
-
- infosz = offsetof(struct lnet_ping_info, pi_ni[num_ni]);
- ping_info = kvzalloc(infosz, GFP_KERNEL);
- if (!ping_info) {
- CERROR("Can't allocate ping info[%d]\n", num_ni);
- return NULL;
- }
-
- ping_info->pi_nnis = num_ni;
- ping_info->pi_pid = the_lnet.ln_pid;
- ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
- ping_info->pi_features = LNET_PING_FEAT_NI_STATUS;
-
- return ping_info;
-}
-
-static inline int
-lnet_get_ni_count(void)
-{
- struct lnet_ni *ni;
- int count = 0;
-
- lnet_net_lock(0);
-
- list_for_each_entry(ni, &the_lnet.ln_nis, ni_list)
- count++;
-
- lnet_net_unlock(0);
-
- return count;
-}
-
-static inline void
-lnet_ping_info_free(struct lnet_ping_info *pinfo)
-{
- kvfree(pinfo);
-}
-
-static void
-lnet_ping_info_destroy(void)
-{
- struct lnet_ni *ni;
-
- lnet_net_lock(LNET_LOCK_EX);
-
- list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
- lnet_ni_lock(ni);
- ni->ni_status = NULL;
- lnet_ni_unlock(ni);
- }
-
- lnet_ping_info_free(the_lnet.ln_ping_info);
- the_lnet.ln_ping_info = NULL;
-
- lnet_net_unlock(LNET_LOCK_EX);
-}
-
-static void
-lnet_ping_event_handler(struct lnet_event *event)
-{
- struct lnet_ping_info *pinfo = event->md.user_ptr;
-
- if (event->unlinked)
- pinfo->pi_features = LNET_PING_FEAT_INVAL;
-}
-
-static int
-lnet_ping_info_setup(struct lnet_ping_info **ppinfo,
- struct lnet_handle_md *md_handle,
- int ni_count, bool set_eq)
-{
- struct lnet_process_id id = {LNET_NID_ANY, LNET_PID_ANY};
- struct lnet_handle_me me_handle;
- struct lnet_md md = { NULL };
- int rc, rc2;
-
- if (set_eq) {
- rc = LNetEQAlloc(0, lnet_ping_event_handler,
- &the_lnet.ln_ping_target_eq);
- if (rc) {
- CERROR("Can't allocate ping EQ: %d\n", rc);
- return rc;
- }
- }
-
- *ppinfo = lnet_ping_info_create(ni_count);
- if (!*ppinfo) {
- rc = -ENOMEM;
- goto failed_0;
- }
-
- rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
- LNET_PROTO_PING_MATCHBITS, 0,
- LNET_UNLINK, LNET_INS_AFTER,
- &me_handle);
- if (rc) {
- CERROR("Can't create ping ME: %d\n", rc);
- goto failed_1;
- }
-
- /* initialize md content */
- md.start = *ppinfo;
- md.length = offsetof(struct lnet_ping_info,
- pi_ni[(*ppinfo)->pi_nnis]);
- md.threshold = LNET_MD_THRESH_INF;
- md.max_size = 0;
- md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
- LNET_MD_MANAGE_REMOTE;
- md.user_ptr = NULL;
- md.eq_handle = the_lnet.ln_ping_target_eq;
- md.user_ptr = *ppinfo;
-
- rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle);
- if (rc) {
- CERROR("Can't attach ping MD: %d\n", rc);
- goto failed_2;
- }
-
- return 0;
-
-failed_2:
- rc2 = LNetMEUnlink(me_handle);
- LASSERT(!rc2);
-failed_1:
- lnet_ping_info_free(*ppinfo);
- *ppinfo = NULL;
-failed_0:
- if (set_eq)
- LNetEQFree(the_lnet.ln_ping_target_eq);
- return rc;
-}
-
-static void
-lnet_ping_md_unlink(struct lnet_ping_info *pinfo,
- struct lnet_handle_md *md_handle)
-{
- LNetMDUnlink(*md_handle);
- LNetInvalidateMDHandle(md_handle);
-
- /* NB md could be busy; this just starts the unlink */
- while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
- CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
- set_current_state(TASK_NOLOAD);
- schedule_timeout(HZ);
- }
-}
-
-static void
-lnet_ping_info_install_locked(struct lnet_ping_info *ping_info)
-{
- struct lnet_ni_status *ns;
- struct lnet_ni *ni;
- int i = 0;
-
- list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
- LASSERT(i < ping_info->pi_nnis);
-
- ns = &ping_info->pi_ni[i];
-
- ns->ns_nid = ni->ni_nid;
-
- lnet_ni_lock(ni);
- ns->ns_status = (ni->ni_status) ?
- ni->ni_status->ns_status : LNET_NI_STATUS_UP;
- ni->ni_status = ns;
- lnet_ni_unlock(ni);
-
- i++;
- }
-}
-
-static void
-lnet_ping_target_update(struct lnet_ping_info *pinfo,
- struct lnet_handle_md md_handle)
-{
- struct lnet_ping_info *old_pinfo = NULL;
- struct lnet_handle_md old_md;
-
- /* switch the NIs to point to the new ping info created */
- lnet_net_lock(LNET_LOCK_EX);
-
- if (!the_lnet.ln_routing)
- pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
- lnet_ping_info_install_locked(pinfo);
-
- if (the_lnet.ln_ping_info) {
- old_pinfo = the_lnet.ln_ping_info;
- old_md = the_lnet.ln_ping_target_md;
- }
- the_lnet.ln_ping_target_md = md_handle;
- the_lnet.ln_ping_info = pinfo;
-
- lnet_net_unlock(LNET_LOCK_EX);
-
- if (old_pinfo) {
- /* unlink the old ping info */
- lnet_ping_md_unlink(old_pinfo, &old_md);
- lnet_ping_info_free(old_pinfo);
- }
-}
-
-static void
-lnet_ping_target_fini(void)
-{
- int rc;
-
- lnet_ping_md_unlink(the_lnet.ln_ping_info,
- &the_lnet.ln_ping_target_md);
-
- rc = LNetEQFree(the_lnet.ln_ping_target_eq);
- LASSERT(!rc);
-
- lnet_ping_info_destroy();
-}
-
-static int
-lnet_ni_tq_credits(struct lnet_ni *ni)
-{
- int credits;
-
- LASSERT(ni->ni_ncpts >= 1);
-
- if (ni->ni_ncpts == 1)
- return ni->ni_maxtxcredits;
-
- credits = ni->ni_maxtxcredits / ni->ni_ncpts;
- credits = max(credits, 8 * ni->ni_peertxcredits);
- credits = min(credits, ni->ni_maxtxcredits);
-
- return credits;
-}
-
-static void
-lnet_ni_unlink_locked(struct lnet_ni *ni)
-{
- if (!list_empty(&ni->ni_cptlist)) {
- list_del_init(&ni->ni_cptlist);
- lnet_ni_decref_locked(ni, 0);
- }
-
- /* move it to zombie list and nobody can find it anymore */
- LASSERT(!list_empty(&ni->ni_list));
- list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
- lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */
-}
-
-static void
-lnet_clear_zombies_nis_locked(void)
-{
- int i;
- int islo;
- struct lnet_ni *ni;
- struct lnet_ni *temp;
-
- /*
- * Now wait for the NI's I just nuked to show up on ln_zombie_nis
- * and shut them down in guaranteed thread context
- */
- i = 2;
- list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis_zombie, ni_list) {
- int *ref;
- int j;
-
- list_del_init(&ni->ni_list);
- cfs_percpt_for_each(ref, j, ni->ni_refs) {
- if (!*ref)
- continue;
- /* still busy, add it back to zombie list */
- list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
- break;
- }
-
- if (!list_empty(&ni->ni_list)) {
- lnet_net_unlock(LNET_LOCK_EX);
- ++i;
- if ((i & (-i)) == i) {
- CDEBUG(D_WARNING, "Waiting for zombie LNI %s\n",
- libcfs_nid2str(ni->ni_nid));
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
- lnet_net_lock(LNET_LOCK_EX);
- continue;
- }
-
- ni->ni_lnd->lnd_refcount--;
- lnet_net_unlock(LNET_LOCK_EX);
-
- islo = ni->ni_lnd->lnd_type == LOLND;
-
- LASSERT(!in_interrupt());
- ni->ni_lnd->lnd_shutdown(ni);
-
- /*
- * can't deref lnd anymore now; it might have unregistered
- * itself...
- */
- if (!islo)
- CDEBUG(D_LNI, "Removed LNI %s\n",
- libcfs_nid2str(ni->ni_nid));
-
- lnet_ni_free(ni);
- i = 2;
-
- lnet_net_lock(LNET_LOCK_EX);
- }
-}
-
-static void
-lnet_shutdown_lndnis(void)
-{
- struct lnet_ni *ni;
- struct lnet_ni *temp;
- int i;
-
- /* NB called holding the global mutex */
-
- /* All quiet on the API front */
- LASSERT(!the_lnet.ln_shutdown);
- LASSERT(!the_lnet.ln_refcount);
- LASSERT(list_empty(&the_lnet.ln_nis_zombie));
-
- lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_shutdown = 1; /* flag shutdown */
-
- /* Unlink NIs from the global table */
- list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis, ni_list) {
- lnet_ni_unlink_locked(ni);
- }
-
- /* Drop the cached loopback NI. */
- if (the_lnet.ln_loni) {
- lnet_ni_decref_locked(the_lnet.ln_loni, 0);
- the_lnet.ln_loni = NULL;
- }
-
- lnet_net_unlock(LNET_LOCK_EX);
-
- /*
- * Clear lazy portals and drop delayed messages which hold refs
- * on their lnet_msg::msg_rxpeer
- */
- for (i = 0; i < the_lnet.ln_nportals; i++)
- LNetClearLazyPortal(i);
-
- /*
- * Clear the peer table and wait for all peers to go (they hold refs on
- * their NIs)
- */
- lnet_peer_tables_cleanup(NULL);
-
- lnet_net_lock(LNET_LOCK_EX);
-
- lnet_clear_zombies_nis_locked();
- the_lnet.ln_shutdown = 0;
- lnet_net_unlock(LNET_LOCK_EX);
-}
-
-/* shutdown down the NI and release refcount */
-static void
-lnet_shutdown_lndni(struct lnet_ni *ni)
-{
- int i;
-
- lnet_net_lock(LNET_LOCK_EX);
- lnet_ni_unlink_locked(ni);
- lnet_net_unlock(LNET_LOCK_EX);
-
- /* clear messages for this NI on the lazy portal */
- for (i = 0; i < the_lnet.ln_nportals; i++)
- lnet_clear_lazy_portal(ni, i, "Shutting down NI");
-
- /* Do peer table cleanup for this ni */
- lnet_peer_tables_cleanup(ni);
-
- lnet_net_lock(LNET_LOCK_EX);
- lnet_clear_zombies_nis_locked();
- lnet_net_unlock(LNET_LOCK_EX);
-}
-
-static int
-lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf)
-{
- struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL;
- int rc = -EINVAL;
- int lnd_type;
- struct lnet_lnd *lnd;
- struct lnet_tx_queue *tq;
- int i;
- u32 seed;
-
- lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
-
- LASSERT(libcfs_isknown_lnd(lnd_type));
-
- if (lnd_type == CIBLND || lnd_type == OPENIBLND ||
- lnd_type == IIBLND || lnd_type == VIBLND) {
- CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type));
- goto failed0;
- }
-
- /* Make sure this new NI is unique. */
- lnet_net_lock(LNET_LOCK_EX);
- rc = lnet_net_unique(LNET_NIDNET(ni->ni_nid), &the_lnet.ln_nis);
- lnet_net_unlock(LNET_LOCK_EX);
- if (!rc) {
- if (lnd_type == LOLND) {
- lnet_ni_free(ni);
- return 0;
- }
-
- CERROR("Net %s is not unique\n",
- libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
- rc = -EEXIST;
- goto failed0;
- }
-
- mutex_lock(&the_lnet.ln_lnd_mutex);
- lnd = lnet_find_lnd_by_type(lnd_type);
-
- if (!lnd) {
- mutex_unlock(&the_lnet.ln_lnd_mutex);
- rc = request_module("%s", libcfs_lnd2modname(lnd_type));
- mutex_lock(&the_lnet.ln_lnd_mutex);
-
- lnd = lnet_find_lnd_by_type(lnd_type);
- if (!lnd) {
- mutex_unlock(&the_lnet.ln_lnd_mutex);
- CERROR("Can't load LND %s, module %s, rc=%d\n",
- libcfs_lnd2str(lnd_type),
- libcfs_lnd2modname(lnd_type), rc);
- rc = -EINVAL;
- goto failed0;
- }
- }
-
- lnet_net_lock(LNET_LOCK_EX);
- lnd->lnd_refcount++;
- lnet_net_unlock(LNET_LOCK_EX);
-
- ni->ni_lnd = lnd;
-
- if (conf && conf->cfg_hdr.ioc_len > sizeof(*conf))
- lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk;
-
- if (lnd_tunables) {
- ni->ni_lnd_tunables = kzalloc(sizeof(*ni->ni_lnd_tunables),
- GFP_NOFS);
- if (!ni->ni_lnd_tunables) {
- mutex_unlock(&the_lnet.ln_lnd_mutex);
- rc = -ENOMEM;
- goto failed0;
- }
- memcpy(ni->ni_lnd_tunables, lnd_tunables,
- sizeof(*ni->ni_lnd_tunables));
- }
-
- /*
- * If given some LND tunable parameters, parse those now to
- * override the values in the NI structure.
- */
- if (conf) {
- if (conf->cfg_config_u.cfg_net.net_peer_rtr_credits >= 0)
- ni->ni_peerrtrcredits =
- conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
- if (conf->cfg_config_u.cfg_net.net_peer_timeout >= 0)
- ni->ni_peertimeout =
- conf->cfg_config_u.cfg_net.net_peer_timeout;
- if (conf->cfg_config_u.cfg_net.net_peer_tx_credits != -1)
- ni->ni_peertxcredits =
- conf->cfg_config_u.cfg_net.net_peer_tx_credits;
- if (conf->cfg_config_u.cfg_net.net_max_tx_credits >= 0)
- ni->ni_maxtxcredits =
- conf->cfg_config_u.cfg_net.net_max_tx_credits;
- }
-
- rc = lnd->lnd_startup(ni);
-
- mutex_unlock(&the_lnet.ln_lnd_mutex);
-
- if (rc) {
- LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
- rc, libcfs_lnd2str(lnd->lnd_type));
- lnet_net_lock(LNET_LOCK_EX);
- lnd->lnd_refcount--;
- lnet_net_unlock(LNET_LOCK_EX);
- goto failed0;
- }
-
- LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query);
-
- lnet_net_lock(LNET_LOCK_EX);
- /* refcount for ln_nis */
- lnet_ni_addref_locked(ni, 0);
- list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
- if (ni->ni_cpts) {
- lnet_ni_addref_locked(ni, 0);
- list_add_tail(&ni->ni_cptlist, &the_lnet.ln_nis_cpt);
- }
-
- lnet_net_unlock(LNET_LOCK_EX);
-
- if (lnd->lnd_type == LOLND) {
- lnet_ni_addref(ni);
- LASSERT(!the_lnet.ln_loni);
- the_lnet.ln_loni = ni;
- return 0;
- }
-
- if (!ni->ni_peertxcredits || !ni->ni_maxtxcredits) {
- LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
- libcfs_lnd2str(lnd->lnd_type),
- !ni->ni_peertxcredits ?
- "" : "per-peer ");
- /*
- * shutdown the NI since if we get here then it must've already
- * been started
- */
- lnet_shutdown_lndni(ni);
- return -EINVAL;
- }
-
- cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
- tq->tq_credits_min =
- tq->tq_credits_max =
- tq->tq_credits = lnet_ni_tq_credits(ni);
- }
-
- /* Nodes with small feet have little entropy. The NID for this
- * node gives the most entropy in the low bits.
- */
- seed = LNET_NIDADDR(ni->ni_nid);
- add_device_randomness(&seed, sizeof(seed));
-
- CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
- libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits,
- lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
- ni->ni_peerrtrcredits, ni->ni_peertimeout);
-
- return 0;
-failed0:
- lnet_ni_free(ni);
- return rc;
-}
-
-static int
-lnet_startup_lndnis(struct list_head *nilist)
-{
- struct lnet_ni *ni;
- int rc;
- int ni_count = 0;
-
- while (!list_empty(nilist)) {
- ni = list_entry(nilist->next, struct lnet_ni, ni_list);
- list_del(&ni->ni_list);
- rc = lnet_startup_lndni(ni, NULL);
-
- if (rc < 0)
- goto failed;
-
- ni_count++;
- }
-
- return ni_count;
-failed:
- lnet_shutdown_lndnis();
-
- return rc;
-}
-
-/**
- * Initialize LNet library.
- *
- * Automatically called at module loading time. Caller has to call
- * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
- * latter returned 0. It must be called exactly once.
- *
- * \retval 0 on success
- * \retval -ve on failures.
- */
-int lnet_lib_init(void)
-{
- int rc;
-
- lnet_assert_wire_constants();
-
- memset(&the_lnet, 0, sizeof(the_lnet));
-
- /* refer to global cfs_cpt_table for now */
- the_lnet.ln_cpt_table = cfs_cpt_table;
- the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
-
- LASSERT(the_lnet.ln_cpt_number > 0);
- if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
- /* we are under risk of consuming all lh_cookie */
- CERROR("Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n",
- the_lnet.ln_cpt_number, LNET_CPT_MAX);
- return -E2BIG;
- }
-
- while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
- the_lnet.ln_cpt_bits++;
-
- rc = lnet_create_locks();
- if (rc) {
- CERROR("Can't create LNet global locks: %d\n", rc);
- return rc;
- }
-
- the_lnet.ln_refcount = 0;
- LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
- INIT_LIST_HEAD(&the_lnet.ln_lnds);
- INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
- INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
-
- /*
- * The hash table size is the number of bits it takes to express the set
- * ln_num_routes, minus 1 (better to under estimate than over so we
- * don't waste memory).
- */
- if (rnet_htable_size <= 0)
- rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
- else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
- rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
- the_lnet.ln_remote_nets_hbits = max_t(int, 1,
- order_base_2(rnet_htable_size) - 1);
-
- /*
- * All LNDs apart from the LOLND are in separate modules. They
- * register themselves when their module loads, and unregister
- * themselves when their module is unloaded.
- */
- lnet_register_lnd(&the_lolnd);
- return 0;
-}
-
-/**
- * Finalize LNet library.
- *
- * \pre lnet_lib_init() called with success.
- * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
- */
-void lnet_lib_exit(void)
-{
- LASSERT(!the_lnet.ln_refcount);
-
- while (!list_empty(&the_lnet.ln_lnds))
- lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
- struct lnet_lnd, lnd_list));
- lnet_destroy_locks();
-}
-
-/**
- * Set LNet PID and start LNet interfaces, routing, and forwarding.
- *
- * Users must call this function at least once before any other functions.
- * For each successful call there must be a corresponding call to
- * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
- * ignored.
- *
- * The PID used by LNet may be different from the one requested.
- * See LNetGetId().
- *
- * \param requested_pid PID requested by the caller.
- *
- * \return >= 0 on success, and < 0 error code on failures.
- */
-int
-LNetNIInit(lnet_pid_t requested_pid)
-{
- int im_a_router = 0;
- int rc;
- int ni_count;
- struct lnet_ping_info *pinfo;
- struct lnet_handle_md md_handle;
- struct list_head net_head;
-
- INIT_LIST_HEAD(&net_head);
-
- mutex_lock(&the_lnet.ln_api_mutex);
-
- CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
-
- if (the_lnet.ln_refcount > 0) {
- rc = the_lnet.ln_refcount++;
- mutex_unlock(&the_lnet.ln_api_mutex);
- return rc;
- }
-
- rc = lnet_prepare(requested_pid);
- if (rc) {
- mutex_unlock(&the_lnet.ln_api_mutex);
- return rc;
- }
-
- /* Add in the loopback network */
- if (!lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, &net_head)) {
- rc = -ENOMEM;
- goto err_empty_list;
- }
-
- /*
- * If LNet is being initialized via DLC it is possible
- * that the user requests not to load module parameters (ones which
- * are supported by DLC) on initialization. Therefore, make sure not
- * to load networks, routes and forwarding from module parameters
- * in this case. On cleanup in case of failure only clean up
- * routes if it has been loaded
- */
- if (!the_lnet.ln_nis_from_mod_params) {
- rc = lnet_parse_networks(&net_head, lnet_get_networks());
- if (rc < 0)
- goto err_empty_list;
- }
-
- ni_count = lnet_startup_lndnis(&net_head);
- if (ni_count < 0) {
- rc = ni_count;
- goto err_empty_list;
- }
-
- if (!the_lnet.ln_nis_from_mod_params) {
- rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
- if (rc)
- goto err_shutdown_lndnis;
-
- rc = lnet_check_routes();
- if (rc)
- goto err_destroy_routes;
-
- rc = lnet_rtrpools_alloc(im_a_router);
- if (rc)
- goto err_destroy_routes;
- }
-
- rc = lnet_acceptor_start();
- if (rc)
- goto err_destroy_routes;
-
- the_lnet.ln_refcount = 1;
- /* Now I may use my own API functions... */
-
- rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true);
- if (rc)
- goto err_acceptor_stop;
-
- lnet_ping_target_update(pinfo, md_handle);
-
- rc = lnet_router_checker_start();
- if (rc)
- goto err_stop_ping;
-
- lnet_fault_init();
- lnet_router_debugfs_init();
-
- mutex_unlock(&the_lnet.ln_api_mutex);
-
- return 0;
-
-err_stop_ping:
- lnet_ping_target_fini();
-err_acceptor_stop:
- the_lnet.ln_refcount = 0;
- lnet_acceptor_stop();
-err_destroy_routes:
- if (!the_lnet.ln_nis_from_mod_params)
- lnet_destroy_routes();
-err_shutdown_lndnis:
- lnet_shutdown_lndnis();
-err_empty_list:
- lnet_unprepare();
- LASSERT(rc < 0);
- mutex_unlock(&the_lnet.ln_api_mutex);
- while (!list_empty(&net_head)) {
- struct lnet_ni *ni;
-
- ni = list_entry(net_head.next, struct lnet_ni, ni_list);
- list_del_init(&ni->ni_list);
- lnet_ni_free(ni);
- }
- return rc;
-}
-EXPORT_SYMBOL(LNetNIInit);
-
-/**
- * Stop LNet interfaces, routing, and forwarding.
- *
- * Users must call this function once for each successful call to LNetNIInit().
- * Once the LNetNIFini() operation has been started, the results of pending
- * API operations are undefined.
- *
- * \return always 0 for current implementation.
- */
-int
-LNetNIFini(void)
-{
- mutex_lock(&the_lnet.ln_api_mutex);
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (the_lnet.ln_refcount != 1) {
- the_lnet.ln_refcount--;
- } else {
- LASSERT(!the_lnet.ln_niinit_self);
-
- lnet_fault_fini();
- lnet_router_debugfs_fini();
- lnet_router_checker_stop();
- lnet_ping_target_fini();
-
- /* Teardown fns that use my own API functions BEFORE here */
- the_lnet.ln_refcount = 0;
-
- lnet_acceptor_stop();
- lnet_destroy_routes();
- lnet_shutdown_lndnis();
- lnet_unprepare();
- }
-
- mutex_unlock(&the_lnet.ln_api_mutex);
- return 0;
-}
-EXPORT_SYMBOL(LNetNIFini);
-
-/**
- * Grabs the ni data from the ni structure and fills the out
- * parameters
- *
- * \param[in] ni network interface structure
- * \param[out] config NI configuration
- */
-static void
-lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
-{
- struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
- struct lnet_ioctl_net_config *net_config;
- size_t min_size, tunable_size = 0;
- int i;
-
- if (!ni || !config)
- return;
-
- net_config = (struct lnet_ioctl_net_config *)config->cfg_bulk;
- if (!net_config)
- return;
-
- BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
- ARRAY_SIZE(net_config->ni_interfaces));
-
- for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
- if (!ni->ni_interfaces[i])
- break;
-
- strncpy(net_config->ni_interfaces[i],
- ni->ni_interfaces[i],
- sizeof(net_config->ni_interfaces[i]));
- }
-
- config->cfg_nid = ni->ni_nid;
- config->cfg_config_u.cfg_net.net_peer_timeout = ni->ni_peertimeout;
- config->cfg_config_u.cfg_net.net_max_tx_credits = ni->ni_maxtxcredits;
- config->cfg_config_u.cfg_net.net_peer_tx_credits = ni->ni_peertxcredits;
- config->cfg_config_u.cfg_net.net_peer_rtr_credits = ni->ni_peerrtrcredits;
-
- net_config->ni_status = ni->ni_status->ns_status;
-
- if (ni->ni_cpts) {
- int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
-
- for (i = 0; i < num_cpts; i++)
- net_config->ni_cpts[i] = ni->ni_cpts[i];
-
- config->cfg_ncpts = num_cpts;
- }
-
- /*
- * See if user land tools sent in a newer and larger version
- * of struct lnet_tunables than what the kernel uses.
- */
- min_size = sizeof(*config) + sizeof(*net_config);
-
- if (config->cfg_hdr.ioc_len > min_size)
- tunable_size = config->cfg_hdr.ioc_len - min_size;
-
- /* Don't copy to much data to user space */
- min_size = min(tunable_size, sizeof(*ni->ni_lnd_tunables));
- lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
-
- if (ni->ni_lnd_tunables && lnd_cfg && min_size) {
- memcpy(lnd_cfg, ni->ni_lnd_tunables, min_size);
- config->cfg_config_u.cfg_net.net_interface_count = 1;
-
- /* Tell user land that kernel side has less data */
- if (tunable_size > sizeof(*ni->ni_lnd_tunables)) {
- min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
- config->cfg_hdr.ioc_len -= min_size;
- }
- }
-}
-
-static int
-lnet_get_net_config(struct lnet_ioctl_config_data *config)
-{
- struct lnet_ni *ni;
- struct list_head *tmp;
- int idx = config->cfg_count;
- int cpt, i = 0;
- int rc = -ENOENT;
-
- cpt = lnet_net_lock_current();
-
- list_for_each(tmp, &the_lnet.ln_nis) {
- if (i++ != idx)
- continue;
-
- ni = list_entry(tmp, struct lnet_ni, ni_list);
- lnet_ni_lock(ni);
- lnet_fill_ni_info(ni, config);
- lnet_ni_unlock(ni);
- rc = 0;
- break;
- }
-
- lnet_net_unlock(cpt);
- return rc;
-}
-
-int
-lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf)
-{
- char *nets = conf->cfg_config_u.cfg_net.net_intf;
- struct lnet_ping_info *pinfo;
- struct lnet_handle_md md_handle;
- struct lnet_ni *ni;
- struct list_head net_head;
- struct lnet_remotenet *rnet;
- int rc;
-
- INIT_LIST_HEAD(&net_head);
-
- /* Create a ni structure for the network string */
- rc = lnet_parse_networks(&net_head, nets);
- if (rc <= 0)
- return !rc ? -EINVAL : rc;
-
- mutex_lock(&the_lnet.ln_api_mutex);
-
- if (rc > 1) {
- rc = -EINVAL; /* only add one interface per call */
- goto failed0;
- }
-
- ni = list_entry(net_head.next, struct lnet_ni, ni_list);
-
- lnet_net_lock(LNET_LOCK_EX);
- rnet = lnet_find_net_locked(LNET_NIDNET(ni->ni_nid));
- lnet_net_unlock(LNET_LOCK_EX);
- /*
- * make sure that the net added doesn't invalidate the current
- * configuration LNet is keeping
- */
- if (rnet) {
- CERROR("Adding net %s will invalidate routing configuration\n",
- nets);
- rc = -EUSERS;
- goto failed0;
- }
-
- rc = lnet_ping_info_setup(&pinfo, &md_handle, 1 + lnet_get_ni_count(),
- false);
- if (rc)
- goto failed0;
-
- list_del_init(&ni->ni_list);
-
- rc = lnet_startup_lndni(ni, conf);
- if (rc)
- goto failed1;
-
- if (ni->ni_lnd->lnd_accept) {
- rc = lnet_acceptor_start();
- if (rc < 0) {
- /* shutdown the ni that we just started */
- CERROR("Failed to start up acceptor thread\n");
- lnet_shutdown_lndni(ni);
- goto failed1;
- }
- }
-
- lnet_ping_target_update(pinfo, md_handle);
- mutex_unlock(&the_lnet.ln_api_mutex);
-
- return 0;
-
-failed1:
- lnet_ping_md_unlink(pinfo, &md_handle);
- lnet_ping_info_free(pinfo);
-failed0:
- mutex_unlock(&the_lnet.ln_api_mutex);
- while (!list_empty(&net_head)) {
- ni = list_entry(net_head.next, struct lnet_ni, ni_list);
- list_del_init(&ni->ni_list);
- lnet_ni_free(ni);
- }
- return rc;
-}
-
-int
-lnet_dyn_del_ni(__u32 net)
-{
- struct lnet_ni *ni;
- struct lnet_ping_info *pinfo;
- struct lnet_handle_md md_handle;
- int rc;
-
- /* don't allow userspace to shutdown the LOLND */
- if (LNET_NETTYP(net) == LOLND)
- return -EINVAL;
-
- mutex_lock(&the_lnet.ln_api_mutex);
- /* create and link a new ping info, before removing the old one */
- rc = lnet_ping_info_setup(&pinfo, &md_handle,
- lnet_get_ni_count() - 1, false);
- if (rc)
- goto out;
-
- ni = lnet_net2ni(net);
- if (!ni) {
- rc = -EINVAL;
- goto failed;
- }
-
- /* decrement the reference counter taken by lnet_net2ni() */
- lnet_ni_decref_locked(ni, 0);
-
- lnet_shutdown_lndni(ni);
-
- if (!lnet_count_acceptor_nis())
- lnet_acceptor_stop();
-
- lnet_ping_target_update(pinfo, md_handle);
- goto out;
-failed:
- lnet_ping_md_unlink(pinfo, &md_handle);
- lnet_ping_info_free(pinfo);
-out:
- mutex_unlock(&the_lnet.ln_api_mutex);
-
- return rc;
-}
-
-/**
- * LNet ioctl handler.
- *
- */
-int
-LNetCtl(unsigned int cmd, void *arg)
-{
- struct libcfs_ioctl_data *data = arg;
- struct lnet_ioctl_config_data *config;
- struct lnet_process_id id = {0};
- struct lnet_ni *ni;
- int rc;
- unsigned long secs_passed;
-
- BUILD_BUG_ON(LIBCFS_IOC_DATA_MAX <
- sizeof(struct lnet_ioctl_net_config) +
- sizeof(struct lnet_ioctl_config_data));
-
- switch (cmd) {
- case IOC_LIBCFS_GET_NI:
- rc = LNetGetId(data->ioc_count, &id);
- data->ioc_nid = id.nid;
- return rc;
-
- case IOC_LIBCFS_FAIL_NID:
- return lnet_fail_nid(data->ioc_nid, data->ioc_count);
-
- case IOC_LIBCFS_ADD_ROUTE:
- config = arg;
-
- if (config->cfg_hdr.ioc_len < sizeof(*config))
- return -EINVAL;
-
- mutex_lock(&the_lnet.ln_api_mutex);
- rc = lnet_add_route(config->cfg_net,
- config->cfg_config_u.cfg_route.rtr_hop,
- config->cfg_nid,
- config->cfg_config_u.cfg_route.rtr_priority);
- if (!rc) {
- rc = lnet_check_routes();
- if (rc)
- lnet_del_route(config->cfg_net,
- config->cfg_nid);
- }
- mutex_unlock(&the_lnet.ln_api_mutex);
- return rc;
-
- case IOC_LIBCFS_DEL_ROUTE:
- config = arg;
-
- if (config->cfg_hdr.ioc_len < sizeof(*config))
- return -EINVAL;
-
- mutex_lock(&the_lnet.ln_api_mutex);
- rc = lnet_del_route(config->cfg_net, config->cfg_nid);
- mutex_unlock(&the_lnet.ln_api_mutex);
- return rc;
-
- case IOC_LIBCFS_GET_ROUTE:
- config = arg;
-
- if (config->cfg_hdr.ioc_len < sizeof(*config))
- return -EINVAL;
-
- return lnet_get_route(config->cfg_count,
- &config->cfg_net,
- &config->cfg_config_u.cfg_route.rtr_hop,
- &config->cfg_nid,
- &config->cfg_config_u.cfg_route.rtr_flags,
- &config->cfg_config_u.cfg_route.rtr_priority);
-
- case IOC_LIBCFS_GET_NET: {
- size_t total = sizeof(*config) +
- sizeof(struct lnet_ioctl_net_config);
- config = arg;
-
- if (config->cfg_hdr.ioc_len < total)
- return -EINVAL;
-
- return lnet_get_net_config(config);
- }
-
- case IOC_LIBCFS_GET_LNET_STATS: {
- struct lnet_ioctl_lnet_stats *lnet_stats = arg;
-
- if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
- return -EINVAL;
-
- lnet_counters_get(&lnet_stats->st_cntrs);
- return 0;
- }
-
- case IOC_LIBCFS_CONFIG_RTR:
- config = arg;
-
- if (config->cfg_hdr.ioc_len < sizeof(*config))
- return -EINVAL;
-
- mutex_lock(&the_lnet.ln_api_mutex);
- if (config->cfg_config_u.cfg_buffers.buf_enable) {
- rc = lnet_rtrpools_enable();
- mutex_unlock(&the_lnet.ln_api_mutex);
- return rc;
- }
- lnet_rtrpools_disable();
- mutex_unlock(&the_lnet.ln_api_mutex);
- return 0;
-
- case IOC_LIBCFS_ADD_BUF:
- config = arg;
-
- if (config->cfg_hdr.ioc_len < sizeof(*config))
- return -EINVAL;
-
- mutex_lock(&the_lnet.ln_api_mutex);
- rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.buf_tiny,
- config->cfg_config_u.cfg_buffers.buf_small,
- config->cfg_config_u.cfg_buffers.buf_large);
- mutex_unlock(&the_lnet.ln_api_mutex);
- return rc;
-
- case IOC_LIBCFS_GET_BUF: {
- struct lnet_ioctl_pool_cfg *pool_cfg;
- size_t total = sizeof(*config) + sizeof(*pool_cfg);
-
- config = arg;
-
- if (config->cfg_hdr.ioc_len < total)
- return -EINVAL;
-
- pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
- return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
- }
-
- case IOC_LIBCFS_GET_PEER_INFO: {
- struct lnet_ioctl_peer *peer_info = arg;
-
- if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
- return -EINVAL;
-
- return lnet_get_peer_info(peer_info->pr_count,
- &peer_info->pr_nid,
- peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
- &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
- &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
- &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
- &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
- &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
- &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits,
- &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
- }
-
- case IOC_LIBCFS_NOTIFY_ROUTER:
- secs_passed = (ktime_get_real_seconds() - data->ioc_u64[0]);
- secs_passed *= msecs_to_jiffies(MSEC_PER_SEC);
-
- return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
- jiffies - secs_passed);
-
- case IOC_LIBCFS_LNET_DIST:
- rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
- if (rc < 0 && rc != -EHOSTUNREACH)
- return rc;
-
- data->ioc_u32[0] = rc;
- return 0;
-
- case IOC_LIBCFS_TESTPROTOCOMPAT:
- lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_testprotocompat = data->ioc_flags;
- lnet_net_unlock(LNET_LOCK_EX);
- return 0;
-
- case IOC_LIBCFS_LNET_FAULT:
- return lnet_fault_ctl(data->ioc_flags, data);
-
- case IOC_LIBCFS_PING:
- id.nid = data->ioc_nid;
- id.pid = data->ioc_u32[0];
- rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
- data->ioc_pbuf1,
- data->ioc_plen1 / sizeof(struct lnet_process_id));
- if (rc < 0)
- return rc;
- data->ioc_count = rc;
- return 0;
-
- default:
- ni = lnet_net2ni(data->ioc_net);
- if (!ni)
- return -EINVAL;
-
- if (!ni->ni_lnd->lnd_ctl)
- rc = -EINVAL;
- else
- rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg);
-
- lnet_ni_decref(ni);
- return rc;
- }
- /* not reached */
-}
-EXPORT_SYMBOL(LNetCtl);
-
-void LNetDebugPeer(struct lnet_process_id id)
-{
- lnet_debug_peer(id.nid);
-}
-EXPORT_SYMBOL(LNetDebugPeer);
-
-/**
- * Retrieve the lnet_process_id ID of LNet interface at \a index. Note that
- * all interfaces share a same PID, as requested by LNetNIInit().
- *
- * \param index Index of the interface to look up.
- * \param id On successful return, this location will hold the
- * lnet_process_id ID of the interface.
- *
- * \retval 0 If an interface exists at \a index.
- * \retval -ENOENT If no interface has been found.
- */
-int
-LNetGetId(unsigned int index, struct lnet_process_id *id)
-{
- struct lnet_ni *ni;
- struct list_head *tmp;
- int cpt;
- int rc = -ENOENT;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- cpt = lnet_net_lock_current();
-
- list_for_each(tmp, &the_lnet.ln_nis) {
- if (index--)
- continue;
-
- ni = list_entry(tmp, struct lnet_ni, ni_list);
-
- id->nid = ni->ni_nid;
- id->pid = the_lnet.ln_pid;
- rc = 0;
- break;
- }
-
- lnet_net_unlock(cpt);
- return rc;
-}
-EXPORT_SYMBOL(LNetGetId);
-
-static int lnet_ping(struct lnet_process_id id, int timeout_ms,
- struct lnet_process_id __user *ids, int n_ids)
-{
- struct lnet_handle_eq eqh;
- struct lnet_handle_md mdh;
- struct lnet_event event;
- struct lnet_md md = { NULL };
- int which;
- int unlinked = 0;
- int replied = 0;
- const int a_long_time = 60000; /* mS */
- int infosz;
- struct lnet_ping_info *info;
- struct lnet_process_id tmpid;
- int i;
- int nob;
- int rc;
- int rc2;
-
- infosz = offsetof(struct lnet_ping_info, pi_ni[n_ids]);
-
- if (n_ids <= 0 ||
- id.nid == LNET_NID_ANY ||
- timeout_ms > 500000 || /* arbitrary limit! */
- n_ids > 20) /* arbitrary limit! */
- return -EINVAL;
-
- if (id.pid == LNET_PID_ANY)
- id.pid = LNET_PID_LUSTRE;
-
- info = kzalloc(infosz, GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- /* NB 2 events max (including any unlink event) */
- rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
- if (rc) {
- CERROR("Can't allocate EQ: %d\n", rc);
- goto out_0;
- }
-
- /* initialize md content */
- md.start = info;
- md.length = infosz;
- md.threshold = 2; /*GET/REPLY*/
- md.max_size = 0;
- md.options = LNET_MD_TRUNCATE;
- md.user_ptr = NULL;
- md.eq_handle = eqh;
-
- rc = LNetMDBind(md, LNET_UNLINK, &mdh);
- if (rc) {
- CERROR("Can't bind MD: %d\n", rc);
- goto out_1;
- }
-
- rc = LNetGet(LNET_NID_ANY, mdh, id,
- LNET_RESERVED_PORTAL,
- LNET_PROTO_PING_MATCHBITS, 0);
-
- if (rc) {
- /* Don't CERROR; this could be deliberate! */
-
- rc2 = LNetMDUnlink(mdh);
- LASSERT(!rc2);
-
- /* NB must wait for the UNLINK event below... */
- unlinked = 1;
- timeout_ms = a_long_time;
- }
-
- do {
- /* MUST block for unlink to complete */
-
- rc2 = LNetEQPoll(&eqh, 1, timeout_ms, !unlinked,
- &event, &which);
-
- CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
- (rc2 <= 0) ? -1 : event.type,
- (rc2 <= 0) ? -1 : event.status,
- (rc2 > 0 && event.unlinked) ? " unlinked" : "");
-
- LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
-
- if (rc2 <= 0 || event.status) {
- /* timeout or error */
- if (!replied && !rc)
- rc = (rc2 < 0) ? rc2 :
- !rc2 ? -ETIMEDOUT :
- event.status;
-
- if (!unlinked) {
- /* Ensure completion in finite time... */
- LNetMDUnlink(mdh);
- /* No assertion (racing with network) */
- unlinked = 1;
- timeout_ms = a_long_time;
- } else if (!rc2) {
- /* timed out waiting for unlink */
- CWARN("ping %s: late network completion\n",
- libcfs_id2str(id));
- }
- } else if (event.type == LNET_EVENT_REPLY) {
- replied = 1;
- rc = event.mlength;
- }
-
- } while (rc2 <= 0 || !event.unlinked);
-
- if (!replied) {
- if (rc >= 0)
- CWARN("%s: Unexpected rc >= 0 but no reply!\n",
- libcfs_id2str(id));
- rc = -EIO;
- goto out_1;
- }
-
- nob = rc;
- LASSERT(nob >= 0 && nob <= infosz);
-
- rc = -EPROTO; /* if I can't parse... */
-
- if (nob < 8) {
- /* can't check magic/version */
- CERROR("%s: ping info too short %d\n",
- libcfs_id2str(id), nob);
- goto out_1;
- }
-
- if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
- lnet_swap_pinginfo(info);
- } else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
- CERROR("%s: Unexpected magic %08x\n",
- libcfs_id2str(id), info->pi_magic);
- goto out_1;
- }
-
- if (!(info->pi_features & LNET_PING_FEAT_NI_STATUS)) {
- CERROR("%s: ping w/o NI status: 0x%x\n",
- libcfs_id2str(id), info->pi_features);
- goto out_1;
- }
-
- if (nob < offsetof(struct lnet_ping_info, pi_ni[0])) {
- CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
- nob, (int)offsetof(struct lnet_ping_info, pi_ni[0]));
- goto out_1;
- }
-
- if (info->pi_nnis < n_ids)
- n_ids = info->pi_nnis;
-
- if (nob < offsetof(struct lnet_ping_info, pi_ni[n_ids])) {
- CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
- nob, (int)offsetof(struct lnet_ping_info, pi_ni[n_ids]));
- goto out_1;
- }
-
- rc = -EFAULT; /* If I SEGV... */
-
- memset(&tmpid, 0, sizeof(tmpid));
- for (i = 0; i < n_ids; i++) {
- tmpid.pid = info->pi_pid;
- tmpid.nid = info->pi_ni[i].ns_nid;
- if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
- goto out_1;
- }
- rc = info->pi_nnis;
-
- out_1:
- rc2 = LNetEQFree(eqh);
- if (rc2)
- CERROR("rc2 %d\n", rc2);
- LASSERT(!rc2);
-
- out_0:
- kfree(info);
- return rc;
-}
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
deleted file mode 100644
index 0aea268a4f1c..000000000000
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ /dev/null
@@ -1,1234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include <linux/nsproxy.h>
-#include <net/net_namespace.h>
-#include <linux/lnet/lib-lnet.h>
-
-struct lnet_text_buf { /* tmp struct for parsing routes */
- struct list_head ltb_list; /* stash on lists */
- int ltb_size; /* allocated size */
- char ltb_text[0]; /* text buffer */
-};
-
-static int lnet_tbnob; /* track text buf allocation */
-#define LNET_MAX_TEXTBUF_NOB (64 << 10) /* bound allocation */
-#define LNET_SINGLE_TEXTBUF_NOB (4 << 10)
-
-static void
-lnet_syntax(char *name, char *str, int offset, int width)
-{
- static char dots[LNET_SINGLE_TEXTBUF_NOB];
- static char dashes[LNET_SINGLE_TEXTBUF_NOB];
-
- memset(dots, '.', sizeof(dots));
- dots[sizeof(dots) - 1] = 0;
- memset(dashes, '-', sizeof(dashes));
- dashes[sizeof(dashes) - 1] = 0;
-
- LCONSOLE_ERROR_MSG(0x10f, "Error parsing '%s=\"%s\"'\n", name, str);
- LCONSOLE_ERROR_MSG(0x110, "here...........%.*s..%.*s|%.*s|\n",
- (int)strlen(name), dots, offset, dots,
- (width < 1) ? 0 : width - 1, dashes);
-}
-
-static int
-lnet_issep(char c)
-{
- switch (c) {
- case '\n':
- case '\r':
- case ';':
- return 1;
- default:
- return 0;
- }
-}
-
-int
-lnet_net_unique(__u32 net, struct list_head *nilist)
-{
- struct list_head *tmp;
- struct lnet_ni *ni;
-
- list_for_each(tmp, nilist) {
- ni = list_entry(tmp, struct lnet_ni, ni_list);
-
- if (LNET_NIDNET(ni->ni_nid) == net)
- return 0;
- }
-
- return 1;
-}
-
-void
-lnet_ni_free(struct lnet_ni *ni)
-{
- int i;
-
- if (ni->ni_refs)
- cfs_percpt_free(ni->ni_refs);
-
- if (ni->ni_tx_queues)
- cfs_percpt_free(ni->ni_tx_queues);
-
- if (ni->ni_cpts)
- cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
-
- kfree(ni->ni_lnd_tunables);
-
- for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++)
- kfree(ni->ni_interfaces[i]);
-
- /* release reference to net namespace */
- if (ni->ni_net_ns)
- put_net(ni->ni_net_ns);
-
- kfree(ni);
-}
-
-struct lnet_ni *
-lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
-{
- struct lnet_tx_queue *tq;
- struct lnet_ni *ni;
- int rc;
- int i;
-
- if (!lnet_net_unique(net, nilist)) {
- LCONSOLE_ERROR_MSG(0x111, "Duplicate network specified: %s\n",
- libcfs_net2str(net));
- return NULL;
- }
-
- ni = kzalloc(sizeof(*ni), GFP_NOFS);
- if (!ni) {
- CERROR("Out of memory creating network %s\n",
- libcfs_net2str(net));
- return NULL;
- }
-
- spin_lock_init(&ni->ni_lock);
- INIT_LIST_HEAD(&ni->ni_cptlist);
- ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*ni->ni_refs[0]));
- if (!ni->ni_refs)
- goto failed;
-
- ni->ni_tx_queues = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*ni->ni_tx_queues[0]));
- if (!ni->ni_tx_queues)
- goto failed;
-
- cfs_percpt_for_each(tq, i, ni->ni_tx_queues)
- INIT_LIST_HEAD(&tq->tq_delayed);
-
- if (!el) {
- ni->ni_cpts = NULL;
- ni->ni_ncpts = LNET_CPT_NUMBER;
- } else {
- rc = cfs_expr_list_values(el, LNET_CPT_NUMBER, &ni->ni_cpts);
- if (rc <= 0) {
- CERROR("Failed to set CPTs for NI %s: %d\n",
- libcfs_net2str(net), rc);
- goto failed;
- }
-
- LASSERT(rc <= LNET_CPT_NUMBER);
- if (rc == LNET_CPT_NUMBER) {
- cfs_expr_list_values_free(ni->ni_cpts, LNET_CPT_NUMBER);
- ni->ni_cpts = NULL;
- }
-
- ni->ni_ncpts = rc;
- }
-
- /* LND will fill in the address part of the NID */
- ni->ni_nid = LNET_MKNID(net, 0);
-
- /* Store net namespace in which current ni is being created */
- if (current->nsproxy->net_ns)
- ni->ni_net_ns = get_net(current->nsproxy->net_ns);
- else
- ni->ni_net_ns = NULL;
-
- ni->ni_last_alive = ktime_get_real_seconds();
- list_add_tail(&ni->ni_list, nilist);
- return ni;
- failed:
- lnet_ni_free(ni);
- return NULL;
-}
-
-int
-lnet_parse_networks(struct list_head *nilist, char *networks)
-{
- struct cfs_expr_list *el = NULL;
- char *tokens;
- char *str;
- char *tmp;
- struct lnet_ni *ni;
- __u32 net;
- int nnets = 0;
- struct list_head *temp_node;
-
- if (!networks) {
- CERROR("networks string is undefined\n");
- return -EINVAL;
- }
-
- if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) {
- /* _WAY_ conservative */
- LCONSOLE_ERROR_MSG(0x112,
- "Can't parse networks: string too long\n");
- return -EINVAL;
- }
-
- tokens = kstrdup(networks, GFP_KERNEL);
- if (!tokens) {
- CERROR("Can't allocate net tokens\n");
- return -ENOMEM;
- }
-
- tmp = tokens;
- str = tokens;
-
- while (str && *str) {
- char *comma = strchr(str, ',');
- char *bracket = strchr(str, '(');
- char *square = strchr(str, '[');
- char *iface;
- int niface;
- int rc;
-
- /*
- * NB we don't check interface conflicts here; it's the LNDs
- * responsibility (if it cares at all)
- */
- if (square && (!comma || square < comma)) {
- /*
- * i.e: o2ib0(ib0)[1,2], number between square
- * brackets are CPTs this NI needs to be bond
- */
- if (bracket && bracket > square) {
- tmp = square;
- goto failed_syntax;
- }
-
- tmp = strchr(square, ']');
- if (!tmp) {
- tmp = square;
- goto failed_syntax;
- }
-
- rc = cfs_expr_list_parse(square, tmp - square + 1,
- 0, LNET_CPT_NUMBER - 1, &el);
- if (rc) {
- tmp = square;
- goto failed_syntax;
- }
-
- while (square <= tmp)
- *square++ = ' ';
- }
-
- if (!bracket || (comma && comma < bracket)) {
- /* no interface list specified */
-
- if (comma)
- *comma++ = 0;
- net = libcfs_str2net(strim(str));
-
- if (net == LNET_NIDNET(LNET_NID_ANY)) {
- LCONSOLE_ERROR_MSG(0x113,
- "Unrecognised network type\n");
- tmp = str;
- goto failed_syntax;
- }
-
- if (LNET_NETTYP(net) != LOLND && /* LO is implicit */
- !lnet_ni_alloc(net, el, nilist))
- goto failed;
-
- if (el) {
- cfs_expr_list_free(el);
- el = NULL;
- }
-
- str = comma;
- continue;
- }
-
- *bracket = 0;
- net = libcfs_str2net(strim(str));
- if (net == LNET_NIDNET(LNET_NID_ANY)) {
- tmp = str;
- goto failed_syntax;
- }
-
- ni = lnet_ni_alloc(net, el, nilist);
- if (!ni)
- goto failed;
-
- if (el) {
- cfs_expr_list_free(el);
- el = NULL;
- }
-
- niface = 0;
- iface = bracket + 1;
-
- bracket = strchr(iface, ')');
- if (!bracket) {
- tmp = iface;
- goto failed_syntax;
- }
-
- *bracket = 0;
- do {
- comma = strchr(iface, ',');
- if (comma)
- *comma++ = 0;
-
- iface = strim(iface);
- if (!*iface) {
- tmp = iface;
- goto failed_syntax;
- }
-
- if (niface == LNET_MAX_INTERFACES) {
- LCONSOLE_ERROR_MSG(0x115,
- "Too many interfaces for net %s\n",
- libcfs_net2str(net));
- goto failed;
- }
-
- /*
- * Allocate a separate piece of memory and copy
- * into it the string, so we don't have
- * a depencency on the tokens string. This way we
- * can free the tokens at the end of the function.
- * The newly allocated ni_interfaces[] can be
- * freed when freeing the NI
- */
- ni->ni_interfaces[niface] = kstrdup(iface, GFP_KERNEL);
- if (!ni->ni_interfaces[niface]) {
- CERROR("Can't allocate net interface name\n");
- goto failed;
- }
- niface++;
- iface = comma;
- } while (iface);
-
- str = bracket + 1;
- comma = strchr(bracket + 1, ',');
- if (comma) {
- *comma = 0;
- str = strim(str);
- if (*str) {
- tmp = str;
- goto failed_syntax;
- }
- str = comma + 1;
- continue;
- }
-
- str = strim(str);
- if (*str) {
- tmp = str;
- goto failed_syntax;
- }
- }
-
- list_for_each(temp_node, nilist)
- nnets++;
-
- kfree(tokens);
- return nnets;
-
- failed_syntax:
- lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp));
- failed:
- while (!list_empty(nilist)) {
- ni = list_entry(nilist->next, struct lnet_ni, ni_list);
-
- list_del(&ni->ni_list);
- lnet_ni_free(ni);
- }
-
- if (el)
- cfs_expr_list_free(el);
-
- kfree(tokens);
-
- return -EINVAL;
-}
-
-static struct lnet_text_buf *
-lnet_new_text_buf(int str_len)
-{
- struct lnet_text_buf *ltb;
- int nob;
-
- /* NB allocate space for the terminating 0 */
- nob = offsetof(struct lnet_text_buf, ltb_text[str_len + 1]);
- if (nob > LNET_SINGLE_TEXTBUF_NOB) {
- /* _way_ conservative for "route net gateway..." */
- CERROR("text buffer too big\n");
- return NULL;
- }
-
- if (lnet_tbnob + nob > LNET_MAX_TEXTBUF_NOB) {
- CERROR("Too many text buffers\n");
- return NULL;
- }
-
- ltb = kzalloc(nob, GFP_KERNEL);
- if (!ltb)
- return NULL;
-
- ltb->ltb_size = nob;
- ltb->ltb_text[0] = 0;
- lnet_tbnob += nob;
- return ltb;
-}
-
-static void
-lnet_free_text_buf(struct lnet_text_buf *ltb)
-{
- lnet_tbnob -= ltb->ltb_size;
- kfree(ltb);
-}
-
-static void
-lnet_free_text_bufs(struct list_head *tbs)
-{
- struct lnet_text_buf *ltb;
-
- while (!list_empty(tbs)) {
- ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list);
-
- list_del(&ltb->ltb_list);
- lnet_free_text_buf(ltb);
- }
-}
-
-static int
-lnet_str2tbs_sep(struct list_head *tbs, char *str)
-{
- struct list_head pending;
- char *sep;
- int nob;
- int i;
- struct lnet_text_buf *ltb;
-
- INIT_LIST_HEAD(&pending);
-
- /* Split 'str' into separate commands */
- for (;;) {
- /* skip leading whitespace */
- while (isspace(*str))
- str++;
-
- /* scan for separator or comment */
- for (sep = str; *sep; sep++)
- if (lnet_issep(*sep) || *sep == '#')
- break;
-
- nob = (int)(sep - str);
- if (nob > 0) {
- ltb = lnet_new_text_buf(nob);
- if (!ltb) {
- lnet_free_text_bufs(&pending);
- return -ENOMEM;
- }
-
- for (i = 0; i < nob; i++)
- if (isspace(str[i]))
- ltb->ltb_text[i] = ' ';
- else
- ltb->ltb_text[i] = str[i];
-
- ltb->ltb_text[nob] = 0;
-
- list_add_tail(&ltb->ltb_list, &pending);
- }
-
- if (*sep == '#') {
- /* scan for separator */
- do {
- sep++;
- } while (*sep && !lnet_issep(*sep));
- }
-
- if (!*sep)
- break;
-
- str = sep + 1;
- }
-
- list_splice(&pending, tbs->prev);
- return 0;
-}
-
-static int
-lnet_expand1tb(struct list_head *list,
- char *str, char *sep1, char *sep2,
- char *item, int itemlen)
-{
- int len1 = (int)(sep1 - str);
- int len2 = strlen(sep2 + 1);
- struct lnet_text_buf *ltb;
-
- LASSERT(*sep1 == '[');
- LASSERT(*sep2 == ']');
-
- ltb = lnet_new_text_buf(len1 + itemlen + len2);
- if (!ltb)
- return -ENOMEM;
-
- memcpy(ltb->ltb_text, str, len1);
- memcpy(&ltb->ltb_text[len1], item, itemlen);
- memcpy(&ltb->ltb_text[len1 + itemlen], sep2 + 1, len2);
- ltb->ltb_text[len1 + itemlen + len2] = 0;
-
- list_add_tail(&ltb->ltb_list, list);
- return 0;
-}
-
-static int
-lnet_str2tbs_expand(struct list_head *tbs, char *str)
-{
- char num[16];
- struct list_head pending;
- char *sep;
- char *sep2;
- char *parsed;
- char *enditem;
- int lo;
- int hi;
- int stride;
- int i;
- int nob;
- int scanned;
-
- INIT_LIST_HEAD(&pending);
-
- sep = strchr(str, '[');
- if (!sep) /* nothing to expand */
- return 0;
-
- sep2 = strchr(sep, ']');
- if (!sep2)
- goto failed;
-
- for (parsed = sep; parsed < sep2; parsed = enditem) {
- enditem = ++parsed;
- while (enditem < sep2 && *enditem != ',')
- enditem++;
-
- if (enditem == parsed) /* no empty items */
- goto failed;
-
- if (sscanf(parsed, "%d-%d/%d%n", &lo, &hi,
- &stride, &scanned) < 3) {
- if (sscanf(parsed, "%d-%d%n", &lo, &hi, &scanned) < 2) {
- /* simple string enumeration */
- if (lnet_expand1tb(&pending, str, sep, sep2,
- parsed,
- (int)(enditem - parsed))) {
- goto failed;
- }
- continue;
- }
-
- stride = 1;
- }
-
- /* range expansion */
-
- if (enditem != parsed + scanned) /* no trailing junk */
- goto failed;
-
- if (hi < 0 || lo < 0 || stride < 0 || hi < lo ||
- (hi - lo) % stride)
- goto failed;
-
- for (i = lo; i <= hi; i += stride) {
- snprintf(num, sizeof(num), "%d", i);
- nob = strlen(num);
- if (nob + 1 == sizeof(num))
- goto failed;
-
- if (lnet_expand1tb(&pending, str, sep, sep2,
- num, nob))
- goto failed;
- }
- }
-
- list_splice(&pending, tbs->prev);
- return 1;
-
- failed:
- lnet_free_text_bufs(&pending);
- return -EINVAL;
-}
-
-static int
-lnet_parse_hops(char *str, unsigned int *hops)
-{
- int len = strlen(str);
- int nob = len;
-
- return (sscanf(str, "%u%n", hops, &nob) >= 1 &&
- nob == len &&
- *hops > 0 && *hops < 256);
-}
-
-#define LNET_PRIORITY_SEPARATOR (':')
-
-static int
-lnet_parse_priority(char *str, unsigned int *priority, char **token)
-{
- int nob;
- char *sep;
- int len;
-
- sep = strchr(str, LNET_PRIORITY_SEPARATOR);
- if (!sep) {
- *priority = 0;
- return 0;
- }
- len = strlen(sep + 1);
-
- if ((sscanf((sep + 1), "%u%n", priority, &nob) < 1) || (len != nob)) {
- /*
- * Update the caller's token pointer so it treats the found
- * priority as the token to report in the error message.
- */
- *token += sep - str + 1;
- return -EINVAL;
- }
-
- CDEBUG(D_NET, "gateway %s, priority %d, nob %d\n", str, *priority, nob);
-
- /*
- * Change priority separator to \0 to be able to parse NID
- */
- *sep = '\0';
- return 0;
-}
-
-static int
-lnet_parse_route(char *str, int *im_a_router)
-{
- /* static scratch buffer OK (single threaded) */
- static char cmd[LNET_SINGLE_TEXTBUF_NOB];
-
- struct list_head nets;
- struct list_head gateways;
- struct list_head *tmp1;
- struct list_head *tmp2;
- __u32 net;
- lnet_nid_t nid;
- struct lnet_text_buf *ltb;
- int rc;
- char *sep;
- char *token = str;
- int ntokens = 0;
- int myrc = -1;
- __u32 hops;
- int got_hops = 0;
- unsigned int priority = 0;
-
- INIT_LIST_HEAD(&gateways);
- INIT_LIST_HEAD(&nets);
-
- /* save a copy of the string for error messages */
- strncpy(cmd, str, sizeof(cmd));
- cmd[sizeof(cmd) - 1] = '\0';
-
- sep = str;
- for (;;) {
- /* scan for token start */
- while (isspace(*sep))
- sep++;
- if (!*sep) {
- if (ntokens < (got_hops ? 3 : 2))
- goto token_error;
- break;
- }
-
- ntokens++;
- token = sep++;
-
- /* scan for token end */
- while (*sep && !isspace(*sep))
- sep++;
- if (*sep)
- *sep++ = 0;
-
- if (ntokens == 1) {
- tmp2 = &nets; /* expanding nets */
- } else if (ntokens == 2 &&
- lnet_parse_hops(token, &hops)) {
- got_hops = 1; /* got a hop count */
- continue;
- } else {
- tmp2 = &gateways; /* expanding gateways */
- }
-
- ltb = lnet_new_text_buf(strlen(token));
- if (!ltb)
- goto out;
-
- strcpy(ltb->ltb_text, token);
- tmp1 = &ltb->ltb_list;
- list_add_tail(tmp1, tmp2);
-
- while (tmp1 != tmp2) {
- ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list);
-
- rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text);
- if (rc < 0)
- goto token_error;
-
- tmp1 = tmp1->next;
-
- if (rc > 0) { /* expanded! */
- list_del(&ltb->ltb_list);
- lnet_free_text_buf(ltb);
- continue;
- }
-
- if (ntokens == 1) {
- net = libcfs_str2net(ltb->ltb_text);
- if (net == LNET_NIDNET(LNET_NID_ANY) ||
- LNET_NETTYP(net) == LOLND)
- goto token_error;
- } else {
- rc = lnet_parse_priority(ltb->ltb_text,
- &priority, &token);
- if (rc < 0)
- goto token_error;
-
- nid = libcfs_str2nid(ltb->ltb_text);
- if (nid == LNET_NID_ANY ||
- LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
- goto token_error;
- }
- }
- }
-
- /**
- * if there are no hops set then we want to flag this value as
- * unset since hops is an optional parameter
- */
- if (!got_hops)
- hops = LNET_UNDEFINED_HOPS;
-
- LASSERT(!list_empty(&nets));
- LASSERT(!list_empty(&gateways));
-
- list_for_each(tmp1, &nets) {
- ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list);
- net = libcfs_str2net(ltb->ltb_text);
- LASSERT(net != LNET_NIDNET(LNET_NID_ANY));
-
- list_for_each(tmp2, &gateways) {
- ltb = list_entry(tmp2, struct lnet_text_buf, ltb_list);
- nid = libcfs_str2nid(ltb->ltb_text);
- LASSERT(nid != LNET_NID_ANY);
-
- if (lnet_islocalnid(nid)) {
- *im_a_router = 1;
- continue;
- }
-
- rc = lnet_add_route(net, hops, nid, priority);
- if (rc && rc != -EEXIST && rc != -EHOSTUNREACH) {
- CERROR("Can't create route to %s via %s\n",
- libcfs_net2str(net),
- libcfs_nid2str(nid));
- goto out;
- }
- }
- }
-
- myrc = 0;
- goto out;
-
- token_error:
- lnet_syntax("routes", cmd, (int)(token - str), strlen(token));
- out:
- lnet_free_text_bufs(&nets);
- lnet_free_text_bufs(&gateways);
- return myrc;
-}
-
-static int
-lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
-{
- struct lnet_text_buf *ltb;
-
- while (!list_empty(tbs)) {
- ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list);
-
- if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) {
- lnet_free_text_bufs(tbs);
- return -EINVAL;
- }
-
- list_del(&ltb->ltb_list);
- lnet_free_text_buf(ltb);
- }
-
- return 0;
-}
-
-int
-lnet_parse_routes(char *routes, int *im_a_router)
-{
- struct list_head tbs;
- int rc = 0;
-
- *im_a_router = 0;
-
- INIT_LIST_HEAD(&tbs);
-
- if (lnet_str2tbs_sep(&tbs, routes) < 0) {
- CERROR("Error parsing routes\n");
- rc = -EINVAL;
- } else {
- rc = lnet_parse_route_tbs(&tbs, im_a_router);
- }
-
- LASSERT(!lnet_tbnob);
- return rc;
-}
-
-static int
-lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip)
-{
- LIST_HEAD(list);
- int rc;
- int i;
-
- rc = cfs_ip_addr_parse(token, len, &list);
- if (rc)
- return rc;
-
- for (rc = i = 0; !rc && i < nip; i++)
- rc = cfs_ip_addr_match(ipaddrs[i], &list);
-
- cfs_expr_list_free_list(&list);
-
- return rc;
-}
-
-static int
-lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
-{
- static char tokens[LNET_SINGLE_TEXTBUF_NOB];
-
- int matched = 0;
- int ntokens = 0;
- int len;
- char *net = NULL;
- char *sep;
- char *token;
- int rc;
-
- LASSERT(strlen(net_entry) < sizeof(tokens));
-
- /* work on a copy of the string */
- strcpy(tokens, net_entry);
- sep = tokens;
- for (;;) {
- /* scan for token start */
- while (isspace(*sep))
- sep++;
- if (!*sep)
- break;
-
- token = sep++;
-
- /* scan for token end */
- while (*sep && !isspace(*sep))
- sep++;
- if (*sep)
- *sep++ = 0;
-
- if (!ntokens++) {
- net = token;
- continue;
- }
-
- len = strlen(token);
-
- rc = lnet_match_network_token(token, len, ipaddrs, nip);
- if (rc < 0) {
- lnet_syntax("ip2nets", net_entry,
- (int)(token - tokens), len);
- return rc;
- }
-
- if (rc)
- matched |= 1;
- }
-
- if (!matched)
- return 0;
-
- strcpy(net_entry, net); /* replace with matched net */
- return 1;
-}
-
-static __u32
-lnet_netspec2net(char *netspec)
-{
- char *bracket = strchr(netspec, '(');
- __u32 net;
-
- if (bracket)
- *bracket = 0;
-
- net = libcfs_str2net(netspec);
-
- if (bracket)
- *bracket = '(';
-
- return net;
-}
-
-static int
-lnet_splitnets(char *source, struct list_head *nets)
-{
- int offset = 0;
- int offset2;
- int len;
- struct lnet_text_buf *tb;
- struct lnet_text_buf *tb2;
- struct list_head *t;
- char *sep;
- char *bracket;
- __u32 net;
-
- LASSERT(!list_empty(nets));
- LASSERT(nets->next == nets->prev); /* single entry */
-
- tb = list_entry(nets->next, struct lnet_text_buf, ltb_list);
-
- for (;;) {
- sep = strchr(tb->ltb_text, ',');
- bracket = strchr(tb->ltb_text, '(');
-
- if (sep && bracket && bracket < sep) {
- /* netspec lists interfaces... */
-
- offset2 = offset + (int)(bracket - tb->ltb_text);
- len = strlen(bracket);
-
- bracket = strchr(bracket + 1, ')');
-
- if (!bracket ||
- !(bracket[1] == ',' || !bracket[1])) {
- lnet_syntax("ip2nets", source, offset2, len);
- return -EINVAL;
- }
-
- sep = !bracket[1] ? NULL : bracket + 1;
- }
-
- if (sep)
- *sep++ = 0;
-
- net = lnet_netspec2net(tb->ltb_text);
- if (net == LNET_NIDNET(LNET_NID_ANY)) {
- lnet_syntax("ip2nets", source, offset,
- strlen(tb->ltb_text));
- return -EINVAL;
- }
-
- list_for_each(t, nets) {
- tb2 = list_entry(t, struct lnet_text_buf, ltb_list);
-
- if (tb2 == tb)
- continue;
-
- if (net == lnet_netspec2net(tb2->ltb_text)) {
- /* duplicate network */
- lnet_syntax("ip2nets", source, offset,
- strlen(tb->ltb_text));
- return -EINVAL;
- }
- }
-
- if (!sep)
- return 0;
-
- offset += (int)(sep - tb->ltb_text);
- len = strlen(sep);
- tb2 = lnet_new_text_buf(len);
- if (!tb2)
- return -ENOMEM;
-
- strncpy(tb2->ltb_text, sep, len);
- tb2->ltb_text[len] = '\0';
- list_add_tail(&tb2->ltb_list, nets);
-
- tb = tb2;
- }
-}
-
-static int
-lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
-{
- static char networks[LNET_SINGLE_TEXTBUF_NOB];
- static char source[LNET_SINGLE_TEXTBUF_NOB];
-
- struct list_head raw_entries;
- struct list_head matched_nets;
- struct list_head current_nets;
- struct list_head *t;
- struct list_head *t2;
- struct lnet_text_buf *tb;
- struct lnet_text_buf *temp;
- struct lnet_text_buf *tb2;
- __u32 net1;
- __u32 net2;
- int len;
- int count;
- int dup;
- int rc;
-
- INIT_LIST_HEAD(&raw_entries);
- if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
- CERROR("Error parsing ip2nets\n");
- LASSERT(!lnet_tbnob);
- return -EINVAL;
- }
-
- INIT_LIST_HEAD(&matched_nets);
- INIT_LIST_HEAD(&current_nets);
- networks[0] = 0;
- count = 0;
- len = 0;
- rc = 0;
-
- list_for_each_entry_safe(tb, temp, &raw_entries, ltb_list) {
- strncpy(source, tb->ltb_text, sizeof(source));
- source[sizeof(source) - 1] = '\0';
-
- /* replace ltb_text with the network(s) add on match */
- rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip);
- if (rc < 0)
- break;
-
- list_del(&tb->ltb_list);
-
- if (!rc) { /* no match */
- lnet_free_text_buf(tb);
- continue;
- }
-
- /* split into separate networks */
- INIT_LIST_HEAD(&current_nets);
- list_add(&tb->ltb_list, &current_nets);
- rc = lnet_splitnets(source, &current_nets);
- if (rc < 0)
- break;
-
- dup = 0;
- list_for_each(t, &current_nets) {
- tb = list_entry(t, struct lnet_text_buf, ltb_list);
- net1 = lnet_netspec2net(tb->ltb_text);
- LASSERT(net1 != LNET_NIDNET(LNET_NID_ANY));
-
- list_for_each(t2, &matched_nets) {
- tb2 = list_entry(t2, struct lnet_text_buf,
- ltb_list);
- net2 = lnet_netspec2net(tb2->ltb_text);
- LASSERT(net2 != LNET_NIDNET(LNET_NID_ANY));
-
- if (net1 == net2) {
- dup = 1;
- break;
- }
- }
-
- if (dup)
- break;
- }
-
- if (dup) {
- lnet_free_text_bufs(&current_nets);
- continue;
- }
-
- list_for_each_safe(t, t2, &current_nets) {
- tb = list_entry(t, struct lnet_text_buf, ltb_list);
-
- list_del(&tb->ltb_list);
- list_add_tail(&tb->ltb_list, &matched_nets);
-
- len += snprintf(networks + len, sizeof(networks) - len,
- "%s%s", !len ? "" : ",",
- tb->ltb_text);
-
- if (len >= sizeof(networks)) {
- CERROR("Too many matched networks\n");
- rc = -E2BIG;
- goto out;
- }
- }
-
- count++;
- }
-
- out:
- lnet_free_text_bufs(&raw_entries);
- lnet_free_text_bufs(&matched_nets);
- lnet_free_text_bufs(&current_nets);
- LASSERT(!lnet_tbnob);
-
- if (rc < 0)
- return rc;
-
- *networksp = networks;
- return count;
-}
-
-static int
-lnet_ipaddr_enumerate(__u32 **ipaddrsp)
-{
- int up;
- __u32 netmask;
- __u32 *ipaddrs;
- __u32 *ipaddrs2;
- int nip;
- char **ifnames;
- int nif = lnet_ipif_enumerate(&ifnames);
- int i;
- int rc;
-
- if (nif <= 0)
- return nif;
-
- ipaddrs = kcalloc(nif, sizeof(*ipaddrs), GFP_KERNEL);
- if (!ipaddrs) {
- CERROR("Can't allocate ipaddrs[%d]\n", nif);
- lnet_ipif_free_enumeration(ifnames, nif);
- return -ENOMEM;
- }
-
- for (i = nip = 0; i < nif; i++) {
- if (!strcmp(ifnames[i], "lo"))
- continue;
-
- rc = lnet_ipif_query(ifnames[i], &up, &ipaddrs[nip], &netmask);
- if (rc) {
- CWARN("Can't query interface %s: %d\n",
- ifnames[i], rc);
- continue;
- }
-
- if (!up) {
- CWARN("Ignoring interface %s: it's down\n",
- ifnames[i]);
- continue;
- }
-
- nip++;
- }
-
- lnet_ipif_free_enumeration(ifnames, nif);
-
- if (nip == nif) {
- *ipaddrsp = ipaddrs;
- } else {
- if (nip > 0) {
- ipaddrs2 = kcalloc(nip, sizeof(*ipaddrs2),
- GFP_KERNEL);
- if (!ipaddrs2) {
- CERROR("Can't allocate ipaddrs[%d]\n", nip);
- nip = -ENOMEM;
- } else {
- memcpy(ipaddrs2, ipaddrs,
- nip * sizeof(*ipaddrs));
- *ipaddrsp = ipaddrs2;
- rc = nip;
- }
- }
- kfree(ipaddrs);
- }
- return nip;
-}
-
-int
-lnet_parse_ip2nets(char **networksp, char *ip2nets)
-{
- __u32 *ipaddrs = NULL;
- int nip = lnet_ipaddr_enumerate(&ipaddrs);
- int rc;
-
- if (nip < 0) {
- LCONSOLE_ERROR_MSG(0x117,
- "Error %d enumerating local IP interfaces for ip2nets to match\n",
- nip);
- return nip;
- }
-
- if (!nip) {
- LCONSOLE_ERROR_MSG(0x118,
- "No local IP interfaces for ip2nets to match\n");
- return -ENOENT;
- }
-
- rc = lnet_match_networks(networksp, ip2nets, ipaddrs, nip);
- kfree(ipaddrs);
-
- if (rc < 0) {
- LCONSOLE_ERROR_MSG(0x119, "Error %d parsing ip2nets\n", rc);
- return rc;
- }
-
- if (!rc) {
- LCONSOLE_ERROR_MSG(0x11a,
- "ip2nets does not match any local IP interfaces\n");
- return -ENOENT;
- }
-
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
deleted file mode 100644
index ea53b5cb3f72..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ /dev/null
@@ -1,426 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-eq.c
- *
- * Library level Event queue management routines
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/lnet/lib-lnet.h>
-
-/**
- * Create an event queue that has room for \a count number of events.
- *
- * The event queue is circular and older events will be overwritten by new
- * ones if they are not removed in time by the user using the functions
- * LNetEQGet(), LNetEQWait(), or LNetEQPoll(). It is up to the user to
- * determine the appropriate size of the event queue to prevent this loss
- * of events. Note that when EQ handler is specified in \a callback, no
- * event loss can happen, since the handler is run for each event deposited
- * into the EQ.
- *
- * \param count The number of events to be stored in the event queue. It
- * will be rounded up to the next power of two.
- * \param callback A handler function that runs when an event is deposited
- * into the EQ. The constant value LNET_EQ_HANDLER_NONE can be used to
- * indicate that no event handler is desired.
- * \param handle On successful return, this location will hold a handle for
- * the newly created EQ.
- *
- * \retval 0 On success.
- * \retval -EINVAL If an parameter is not valid.
- * \retval -ENOMEM If memory for the EQ can't be allocated.
- *
- * \see lnet_eq_handler_t for the discussion on EQ handler semantics.
- */
-int
-LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
- struct lnet_handle_eq *handle)
-{
- struct lnet_eq *eq;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- /*
- * We need count to be a power of 2 so that when eq_{enq,deq}_seq
- * overflow, they don't skip entries, so the queue has the same
- * apparent capacity at all times
- */
- if (count)
- count = roundup_pow_of_two(count);
-
- if (callback != LNET_EQ_HANDLER_NONE && count)
- CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
-
- /*
- * count can be 0 if only need callback, we can eliminate
- * overhead of enqueue event
- */
- if (!count && callback == LNET_EQ_HANDLER_NONE)
- return -EINVAL;
-
- eq = kzalloc(sizeof(*eq), GFP_NOFS);
- if (!eq)
- return -ENOMEM;
-
- if (count) {
- eq->eq_events = kvmalloc_array(count, sizeof(struct lnet_event),
- GFP_KERNEL | __GFP_ZERO);
- if (!eq->eq_events)
- goto failed;
- /*
- * NB allocator has set all event sequence numbers to 0,
- * so all them should be earlier than eq_deq_seq
- */
- }
-
- eq->eq_deq_seq = 1;
- eq->eq_enq_seq = 1;
- eq->eq_size = count;
- eq->eq_callback = callback;
-
- eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*eq->eq_refs[0]));
- if (!eq->eq_refs)
- goto failed;
-
- /* MUST hold both exclusive lnet_res_lock */
- lnet_res_lock(LNET_LOCK_EX);
- /*
- * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
- * both EQ lookup and poll event with only lnet_eq_wait_lock
- */
- lnet_eq_wait_lock();
-
- lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
- list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
-
- lnet_eq_wait_unlock();
- lnet_res_unlock(LNET_LOCK_EX);
-
- lnet_eq2handle(handle, eq);
- return 0;
-
-failed:
- kvfree(eq->eq_events);
-
- if (eq->eq_refs)
- cfs_percpt_free(eq->eq_refs);
-
- kfree(eq);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(LNetEQAlloc);
-
-/**
- * Release the resources associated with an event queue if it's idle;
- * otherwise do nothing and it's up to the user to try again.
- *
- * \param eqh A handle for the event queue to be released.
- *
- * \retval 0 If the EQ is not in use and freed.
- * \retval -ENOENT If \a eqh does not point to a valid EQ.
- * \retval -EBUSY If the EQ is still in use by some MDs.
- */
-int
-LNetEQFree(struct lnet_handle_eq eqh)
-{
- struct lnet_eq *eq;
- struct lnet_event *events = NULL;
- int **refs = NULL;
- int *ref;
- int rc = 0;
- int size = 0;
- int i;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- lnet_res_lock(LNET_LOCK_EX);
- /*
- * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
- * both EQ lookup and poll event with only lnet_eq_wait_lock
- */
- lnet_eq_wait_lock();
-
- eq = lnet_handle2eq(&eqh);
- if (!eq) {
- rc = -ENOENT;
- goto out;
- }
-
- cfs_percpt_for_each(ref, i, eq->eq_refs) {
- LASSERT(*ref >= 0);
- if (!*ref)
- continue;
-
- CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
- i, *ref);
- rc = -EBUSY;
- goto out;
- }
-
- /* stash for free after lock dropped */
- events = eq->eq_events;
- size = eq->eq_size;
- refs = eq->eq_refs;
-
- lnet_res_lh_invalidate(&eq->eq_lh);
- list_del(&eq->eq_list);
- kfree(eq);
- out:
- lnet_eq_wait_unlock();
- lnet_res_unlock(LNET_LOCK_EX);
-
- kvfree(events);
- if (refs)
- cfs_percpt_free(refs);
-
- return rc;
-}
-EXPORT_SYMBOL(LNetEQFree);
-
-void
-lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev)
-{
- /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
- int index;
-
- if (!eq->eq_size) {
- LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
- eq->eq_callback(ev);
- return;
- }
-
- lnet_eq_wait_lock();
- ev->sequence = eq->eq_enq_seq++;
-
- LASSERT(eq->eq_size == LOWEST_BIT_SET(eq->eq_size));
- index = ev->sequence & (eq->eq_size - 1);
-
- eq->eq_events[index] = *ev;
-
- if (eq->eq_callback != LNET_EQ_HANDLER_NONE)
- eq->eq_callback(ev);
-
- /* Wake anyone waiting in LNetEQPoll() */
- if (waitqueue_active(&the_lnet.ln_eq_waitq))
- wake_up_all(&the_lnet.ln_eq_waitq);
- lnet_eq_wait_unlock();
-}
-
-static int
-lnet_eq_dequeue_event(struct lnet_eq *eq, struct lnet_event *ev)
-{
- int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
- struct lnet_event *new_event = &eq->eq_events[new_index];
- int rc;
-
- /* must called with lnet_eq_wait_lock hold */
- if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
- return 0;
-
- /* We've got a new event... */
- *ev = *new_event;
-
- CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n",
- new_event, eq->eq_deq_seq, eq->eq_size);
-
- /* ...but did it overwrite an event we've not seen yet? */
- if (eq->eq_deq_seq == new_event->sequence) {
- rc = 1;
- } else {
- /*
- * don't complain with CERROR: some EQs are sized small
- * anyway; if it's important, the caller should complain
- */
- CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
- eq->eq_deq_seq, new_event->sequence);
- rc = -EOVERFLOW;
- }
-
- eq->eq_deq_seq = new_event->sequence + 1;
- return rc;
-}
-
-/**
- * A nonblocking function that can be used to get the next event in an EQ.
- * If an event handler is associated with the EQ, the handler will run before
- * this function returns successfully. The event is removed from the queue.
- *
- * \param eventq A handle for the event queue.
- * \param event On successful return (1 or -EOVERFLOW), this location will
- * hold the next event in the EQ.
- *
- * \retval 0 No pending event in the EQ.
- * \retval 1 Indicates success.
- * \retval -ENOENT If \a eventq does not point to a valid EQ.
- * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
- * at least one event between this event and the last event obtained from the
- * EQ has been dropped due to limited space in the EQ.
- */
-
-/**
- * Block the calling process until there is an event in the EQ.
- * If an event handler is associated with the EQ, the handler will run before
- * this function returns successfully. This function returns the next event
- * in the EQ and removes it from the EQ.
- *
- * \param eventq A handle for the event queue.
- * \param event On successful return (1 or -EOVERFLOW), this location will
- * hold the next event in the EQ.
- *
- * \retval 1 Indicates success.
- * \retval -ENOENT If \a eventq does not point to a valid EQ.
- * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
- * at least one event between this event and the last event obtained from the
- * EQ has been dropped due to limited space in the EQ.
- */
-
-static int
-lnet_eq_wait_locked(int *timeout_ms, long state)
-__must_hold(&the_lnet.ln_eq_wait_lock)
-{
- int tms = *timeout_ms;
- int wait;
- wait_queue_entry_t wl;
- unsigned long now;
-
- if (!tms)
- return -ENXIO; /* don't want to wait and no new event */
-
- init_waitqueue_entry(&wl, current);
- set_current_state(state);
- add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
-
- lnet_eq_wait_unlock();
-
- if (tms < 0) {
- schedule();
- } else {
- now = jiffies;
- schedule_timeout(msecs_to_jiffies(tms));
- tms -= jiffies_to_msecs(jiffies - now);
- if (tms < 0) /* no more wait but may have new event */
- tms = 0;
- }
-
- wait = tms; /* might need to call here again */
- *timeout_ms = tms;
-
- lnet_eq_wait_lock();
- remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
-
- return wait;
-}
-
-/**
- * Block the calling process until there's an event from a set of EQs or
- * timeout happens.
- *
- * If an event handler is associated with the EQ, the handler will run before
- * this function returns successfully, in which case the corresponding event
- * is consumed.
- *
- * LNetEQPoll() provides a timeout to allow applications to poll, block for a
- * fixed period, or block indefinitely.
- *
- * \param eventqs,neq An array of EQ handles, and size of the array.
- * \param timeout_ms Time in milliseconds to wait for an event to occur on
- * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an
- * infinite timeout.
- * \param interruptible, if true, use TASK_INTERRUPTIBLE, else TASK_NOLOAD
- * \param event,which On successful return (1 or -EOVERFLOW), \a event will
- * hold the next event in the EQs, and \a which will contain the index of the
- * EQ from which the event was taken.
- *
- * \retval 0 No pending event in the EQs after timeout.
- * \retval 1 Indicates success.
- * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
- * at least one event between this event and the last event obtained from the
- * EQ indicated by \a which has been dropped due to limited space in the EQ.
- * \retval -ENOENT If there's an invalid handle in \a eventqs.
- */
-int
-LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, int timeout_ms,
- int interruptible,
- struct lnet_event *event, int *which)
-{
- int wait = 1;
- int rc;
- int i;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (neq < 1)
- return -ENOENT;
-
- lnet_eq_wait_lock();
-
- for (;;) {
- for (i = 0; i < neq; i++) {
- struct lnet_eq *eq = lnet_handle2eq(&eventqs[i]);
-
- if (!eq) {
- lnet_eq_wait_unlock();
- return -ENOENT;
- }
-
- rc = lnet_eq_dequeue_event(eq, event);
- if (rc) {
- lnet_eq_wait_unlock();
- *which = i;
- return rc;
- }
- }
-
- if (!wait)
- break;
-
- /*
- * return value of lnet_eq_wait_locked:
- * -1 : did nothing and it's sure no new event
- * 1 : sleep inside and wait until new event
- * 0 : don't want to wait anymore, but might have new event
- * so need to call dequeue again
- */
- wait = lnet_eq_wait_locked(&timeout_ms,
- interruptible ? TASK_INTERRUPTIBLE
- : TASK_NOLOAD);
- if (wait < 0) /* no new event */
- break;
- }
-
- lnet_eq_wait_unlock();
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
deleted file mode 100644
index 8a22514aaf71..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ /dev/null
@@ -1,463 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-md.c
- *
- * Memory Descriptor management routines
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/lnet/lib-lnet.h>
-
-/* must be called with lnet_res_lock held */
-void
-lnet_md_unlink(struct lnet_libmd *md)
-{
- if (!(md->md_flags & LNET_MD_FLAG_ZOMBIE)) {
- /* first unlink attempt... */
- struct lnet_me *me = md->md_me;
-
- md->md_flags |= LNET_MD_FLAG_ZOMBIE;
-
- /*
- * Disassociate from ME (if any),
- * and unlink it if it was created
- * with LNET_UNLINK
- */
- if (me) {
- /* detach MD from portal */
- lnet_ptl_detach_md(me, md);
- if (me->me_unlink == LNET_UNLINK)
- lnet_me_unlink(me);
- }
-
- /* ensure all future handle lookups fail */
- lnet_res_lh_invalidate(&md->md_lh);
- }
-
- if (md->md_refcount) {
- CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
- return;
- }
-
- CDEBUG(D_NET, "Unlinking md %p\n", md);
-
- if (md->md_eq) {
- int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
-
- LASSERT(*md->md_eq->eq_refs[cpt] > 0);
- (*md->md_eq->eq_refs[cpt])--;
- }
-
- LASSERT(!list_empty(&md->md_list));
- list_del_init(&md->md_list);
- kfree(md);
-}
-
-static int
-lnet_md_build(struct lnet_libmd *lmd, struct lnet_md *umd, int unlink)
-{
- int i;
- unsigned int niov;
- int total_length = 0;
-
- lmd->md_me = NULL;
- lmd->md_start = umd->start;
- lmd->md_offset = 0;
- lmd->md_max_size = umd->max_size;
- lmd->md_options = umd->options;
- lmd->md_user_ptr = umd->user_ptr;
- lmd->md_eq = NULL;
- lmd->md_threshold = umd->threshold;
- lmd->md_refcount = 0;
- lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
-
- if (umd->options & LNET_MD_IOVEC) {
- if (umd->options & LNET_MD_KIOV) /* Can't specify both */
- return -EINVAL;
-
- niov = umd->length;
- lmd->md_niov = umd->length;
- memcpy(lmd->md_iov.iov, umd->start,
- niov * sizeof(lmd->md_iov.iov[0]));
-
- for (i = 0; i < (int)niov; i++) {
- /* We take the base address on trust */
- /* invalid length */
- if (lmd->md_iov.iov[i].iov_len <= 0)
- return -EINVAL;
-
- total_length += lmd->md_iov.iov[i].iov_len;
- }
-
- lmd->md_length = total_length;
-
- if ((umd->options & LNET_MD_MAX_SIZE) && /* use max size */
- (umd->max_size < 0 ||
- umd->max_size > total_length)) /* illegal max_size */
- return -EINVAL;
-
- } else if (umd->options & LNET_MD_KIOV) {
- niov = umd->length;
- lmd->md_niov = umd->length;
- memcpy(lmd->md_iov.kiov, umd->start,
- niov * sizeof(lmd->md_iov.kiov[0]));
-
- for (i = 0; i < (int)niov; i++) {
- /* We take the page pointer on trust */
- if (lmd->md_iov.kiov[i].bv_offset +
- lmd->md_iov.kiov[i].bv_len > PAGE_SIZE)
- return -EINVAL; /* invalid length */
-
- total_length += lmd->md_iov.kiov[i].bv_len;
- }
-
- lmd->md_length = total_length;
-
- if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
- (umd->max_size < 0 ||
- umd->max_size > total_length)) /* illegal max_size */
- return -EINVAL;
- } else { /* contiguous */
- lmd->md_length = umd->length;
- niov = 1;
- lmd->md_niov = 1;
- lmd->md_iov.iov[0].iov_base = umd->start;
- lmd->md_iov.iov[0].iov_len = umd->length;
-
- if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
- (umd->max_size < 0 ||
- umd->max_size > (int)umd->length)) /* illegal max_size */
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* must be called with resource lock held */
-static int
-lnet_md_link(struct lnet_libmd *md, struct lnet_handle_eq eq_handle, int cpt)
-{
- struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
-
- /*
- * NB we are passed an allocated, but inactive md.
- * if we return success, caller may lnet_md_unlink() it.
- * otherwise caller may only kfree() it.
- */
- /*
- * This implementation doesn't know how to create START events or
- * disable END events. Best to LASSERT our caller is compliant so
- * we find out quickly...
- */
- /*
- * TODO - reevaluate what should be here in light of
- * the removal of the start and end events
- * maybe there we shouldn't even allow LNET_EQ_NONE!)
- * LASSERT(!eq);
- */
- if (!LNetEQHandleIsInvalid(eq_handle)) {
- md->md_eq = lnet_handle2eq(&eq_handle);
-
- if (!md->md_eq)
- return -ENOENT;
-
- (*md->md_eq->eq_refs[cpt])++;
- }
-
- lnet_res_lh_initialize(container, &md->md_lh);
-
- LASSERT(list_empty(&md->md_list));
- list_add(&md->md_list, &container->rec_active);
-
- return 0;
-}
-
-/* must be called with lnet_res_lock held */
-void
-lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd)
-{
- /* NB this doesn't copy out all the iov entries so when a
- * discontiguous MD is copied out, the target gets to know the
- * original iov pointer (in start) and the number of entries it had
- * and that's all.
- */
- umd->start = lmd->md_start;
- umd->length = !(lmd->md_options &
- (LNET_MD_IOVEC | LNET_MD_KIOV)) ?
- lmd->md_length : lmd->md_niov;
- umd->threshold = lmd->md_threshold;
- umd->max_size = lmd->md_max_size;
- umd->options = lmd->md_options;
- umd->user_ptr = lmd->md_user_ptr;
- lnet_eq2handle(&umd->eq_handle, lmd->md_eq);
-}
-
-static int
-lnet_md_validate(struct lnet_md *umd)
-{
- if (!umd->start && umd->length) {
- CERROR("MD start pointer can not be NULL with length %u\n",
- umd->length);
- return -EINVAL;
- }
-
- if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) &&
- umd->length > LNET_MAX_IOV) {
- CERROR("Invalid option: too many fragments %u, %d max\n",
- umd->length, LNET_MAX_IOV);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * Create a memory descriptor and attach it to a ME
- *
- * \param meh A handle for a ME to associate the new MD with.
- * \param umd Provides initial values for the user-visible parts of a MD.
- * Other than its use for initialization, there is no linkage between this
- * structure and the MD maintained by the LNet.
- * \param unlink A flag to indicate whether the MD is automatically unlinked
- * when it becomes inactive, either because the operation threshold drops to
- * zero or because the available memory becomes less than \a umd.max_size.
- * (Note that the check for unlinking a MD only occurs after the completion
- * of a successful operation on the MD.) The value LNET_UNLINK enables auto
- * unlinking; the value LNET_RETAIN disables it.
- * \param handle On successful returns, a handle to the newly created MD is
- * saved here. This handle can be used later in LNetMDUnlink().
- *
- * \retval 0 On success.
- * \retval -EINVAL If \a umd is not valid.
- * \retval -ENOMEM If new MD cannot be allocated.
- * \retval -ENOENT Either \a meh or \a umd.eq_handle does not point to a
- * valid object. Note that it's OK to supply a NULL \a umd.eq_handle by
- * calling LNetInvalidateHandle() on it.
- * \retval -EBUSY If the ME pointed to by \a meh is already associated with
- * a MD.
- */
-int
-LNetMDAttach(struct lnet_handle_me meh, struct lnet_md umd,
- enum lnet_unlink unlink, struct lnet_handle_md *handle)
-{
- LIST_HEAD(matches);
- LIST_HEAD(drops);
- struct lnet_me *me;
- struct lnet_libmd *md;
- int cpt;
- int rc;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (lnet_md_validate(&umd))
- return -EINVAL;
-
- if (!(umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
- CERROR("Invalid option: no MD_OP set\n");
- return -EINVAL;
- }
-
- md = lnet_md_alloc(&umd);
- if (!md)
- return -ENOMEM;
-
- rc = lnet_md_build(md, &umd, unlink);
- if (rc)
- goto out_free;
-
- cpt = lnet_cpt_of_cookie(meh.cookie);
-
- lnet_res_lock(cpt);
-
- me = lnet_handle2me(&meh);
- if (!me)
- rc = -ENOENT;
- else if (me->me_md)
- rc = -EBUSY;
- else
- rc = lnet_md_link(md, umd.eq_handle, cpt);
-
- if (rc)
- goto out_unlock;
-
- /*
- * attach this MD to portal of ME and check if it matches any
- * blocked msgs on this portal
- */
- lnet_ptl_attach_md(me, md, &matches, &drops);
-
- lnet_md2handle(handle, md);
-
- lnet_res_unlock(cpt);
-
- lnet_drop_delayed_msg_list(&drops, "Bad match");
- lnet_recv_delayed_msg_list(&matches);
-
- return 0;
-
-out_unlock:
- lnet_res_unlock(cpt);
-out_free:
- kfree(md);
- return rc;
-}
-EXPORT_SYMBOL(LNetMDAttach);
-
-/**
- * Create a "free floating" memory descriptor - a MD that is not associated
- * with a ME. Such MDs are usually used in LNetPut() and LNetGet() operations.
- *
- * \param umd,unlink See the discussion for LNetMDAttach().
- * \param handle On successful returns, a handle to the newly created MD is
- * saved here. This handle can be used later in LNetMDUnlink(), LNetPut(),
- * and LNetGet() operations.
- *
- * \retval 0 On success.
- * \retval -EINVAL If \a umd is not valid.
- * \retval -ENOMEM If new MD cannot be allocated.
- * \retval -ENOENT \a umd.eq_handle does not point to a valid EQ. Note that
- * it's OK to supply a NULL \a umd.eq_handle by calling
- * LNetInvalidateHandle() on it.
- */
-int
-LNetMDBind(struct lnet_md umd, enum lnet_unlink unlink,
- struct lnet_handle_md *handle)
-{
- struct lnet_libmd *md;
- int cpt;
- int rc;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (lnet_md_validate(&umd))
- return -EINVAL;
-
- if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
- CERROR("Invalid option: GET|PUT illegal on active MDs\n");
- return -EINVAL;
- }
-
- md = lnet_md_alloc(&umd);
- if (!md)
- return -ENOMEM;
-
- rc = lnet_md_build(md, &umd, unlink);
- if (rc)
- goto out_free;
-
- cpt = lnet_res_lock_current();
-
- rc = lnet_md_link(md, umd.eq_handle, cpt);
- if (rc)
- goto out_unlock;
-
- lnet_md2handle(handle, md);
-
- lnet_res_unlock(cpt);
- return 0;
-
-out_unlock:
- lnet_res_unlock(cpt);
-out_free:
- kfree(md);
-
- return rc;
-}
-EXPORT_SYMBOL(LNetMDBind);
-
-/**
- * Unlink the memory descriptor from any ME it may be linked to and release
- * the internal resources associated with it. As a result, active messages
- * associated with the MD may get aborted.
- *
- * This function does not free the memory region associated with the MD;
- * i.e., the memory the user allocated for this MD. If the ME associated with
- * this MD is not NULL and was created with auto unlink enabled, the ME is
- * unlinked as well (see LNetMEAttach()).
- *
- * Explicitly unlinking a MD via this function call has the same behavior as
- * a MD that has been automatically unlinked, except that no LNET_EVENT_UNLINK
- * is generated in the latter case.
- *
- * An unlinked event can be reported in two ways:
- * - If there's no pending operations on the MD, it's unlinked immediately
- * and an LNET_EVENT_UNLINK event is logged before this function returns.
- * - Otherwise, the MD is only marked for deletion when this function
- * returns, and the unlinked event will be piggybacked on the event of
- * the completion of the last operation by setting the unlinked field of
- * the event. No dedicated LNET_EVENT_UNLINK event is generated.
- *
- * Note that in both cases the unlinked field of the event is always set; no
- * more event will happen on the MD after such an event is logged.
- *
- * \param mdh A handle for the MD to be unlinked.
- *
- * \retval 0 On success.
- * \retval -ENOENT If \a mdh does not point to a valid MD object.
- */
-int
-LNetMDUnlink(struct lnet_handle_md mdh)
-{
- struct lnet_event ev;
- struct lnet_libmd *md;
- int cpt;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- cpt = lnet_cpt_of_cookie(mdh.cookie);
- lnet_res_lock(cpt);
-
- md = lnet_handle2md(&mdh);
- if (!md) {
- lnet_res_unlock(cpt);
- return -ENOENT;
- }
-
- md->md_flags |= LNET_MD_FLAG_ABORTED;
- /*
- * If the MD is busy, lnet_md_unlink just marks it for deletion, and
- * when the LND is done, the completion event flags that the MD was
- * unlinked. Otherwise, we enqueue an event now...
- */
- if (md->md_eq && !md->md_refcount) {
- lnet_build_unlink_event(md, &ev);
- lnet_eq_enqueue_event(md->md_eq, &ev);
- }
-
- lnet_md_unlink(md);
-
- lnet_res_unlock(cpt);
- return 0;
-}
-EXPORT_SYMBOL(LNetMDUnlink);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
deleted file mode 100644
index 672e37bdd045..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ /dev/null
@@ -1,274 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-me.c
- *
- * Match Entry management routines
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/lnet/lib-lnet.h>
-
-/**
- * Create and attach a match entry to the match list of \a portal. The new
- * ME is empty, i.e. not associated with a memory descriptor. LNetMDAttach()
- * can be used to attach a MD to an empty ME.
- *
- * \param portal The portal table index where the ME should be attached.
- * \param match_id Specifies the match criteria for the process ID of
- * the requester. The constants LNET_PID_ANY and LNET_NID_ANY can be
- * used to wildcard either of the identifiers in the lnet_process_id
- * structure.
- * \param match_bits,ignore_bits Specify the match criteria to apply
- * to the match bits in the incoming request. The ignore bits are used
- * to mask out insignificant bits in the incoming match bits. The resulting
- * bits are then compared to the ME's match bits to determine if the
- * incoming request meets the match criteria.
- * \param unlink Indicates whether the ME should be unlinked when the memory
- * descriptor associated with it is unlinked (Note that the check for
- * unlinking a ME only occurs when the memory descriptor is unlinked.).
- * Valid values are LNET_RETAIN and LNET_UNLINK.
- * \param pos Indicates whether the new ME should be prepended or
- * appended to the match list. Allowed constants: LNET_INS_BEFORE,
- * LNET_INS_AFTER.
- * \param handle On successful returns, a handle to the newly created ME
- * object is saved here. This handle can be used later in LNetMEInsert(),
- * LNetMEUnlink(), or LNetMDAttach() functions.
- *
- * \retval 0 On success.
- * \retval -EINVAL If \a portal is invalid.
- * \retval -ENOMEM If new ME object cannot be allocated.
- */
-int
-LNetMEAttach(unsigned int portal,
- struct lnet_process_id match_id,
- __u64 match_bits, __u64 ignore_bits,
- enum lnet_unlink unlink, enum lnet_ins_pos pos,
- struct lnet_handle_me *handle)
-{
- struct lnet_match_table *mtable;
- struct lnet_me *me;
- struct list_head *head;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- if ((int)portal >= the_lnet.ln_nportals)
- return -EINVAL;
-
- mtable = lnet_mt_of_attach(portal, match_id,
- match_bits, ignore_bits, pos);
- if (!mtable) /* can't match portal type */
- return -EPERM;
-
- me = kzalloc(sizeof(*me), GFP_NOFS);
- if (!me)
- return -ENOMEM;
-
- lnet_res_lock(mtable->mt_cpt);
-
- me->me_portal = portal;
- me->me_match_id = match_id;
- me->me_match_bits = match_bits;
- me->me_ignore_bits = ignore_bits;
- me->me_unlink = unlink;
- me->me_md = NULL;
-
- lnet_res_lh_initialize(the_lnet.ln_me_containers[mtable->mt_cpt],
- &me->me_lh);
- if (ignore_bits)
- head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
- else
- head = lnet_mt_match_head(mtable, match_id, match_bits);
-
- me->me_pos = head - &mtable->mt_mhash[0];
- if (pos == LNET_INS_AFTER || pos == LNET_INS_LOCAL)
- list_add_tail(&me->me_list, head);
- else
- list_add(&me->me_list, head);
-
- lnet_me2handle(handle, me);
-
- lnet_res_unlock(mtable->mt_cpt);
- return 0;
-}
-EXPORT_SYMBOL(LNetMEAttach);
-
-/**
- * Create and a match entry and insert it before or after the ME pointed to by
- * \a current_meh. The new ME is empty, i.e. not associated with a memory
- * descriptor. LNetMDAttach() can be used to attach a MD to an empty ME.
- *
- * This function is identical to LNetMEAttach() except for the position
- * where the new ME is inserted.
- *
- * \param current_meh A handle for a ME. The new ME will be inserted
- * immediately before or immediately after this ME.
- * \param match_id,match_bits,ignore_bits,unlink,pos,handle See the discussion
- * for LNetMEAttach().
- *
- * \retval 0 On success.
- * \retval -ENOMEM If new ME object cannot be allocated.
- * \retval -ENOENT If \a current_meh does not point to a valid match entry.
- */
-int
-LNetMEInsert(struct lnet_handle_me current_meh,
- struct lnet_process_id match_id,
- __u64 match_bits, __u64 ignore_bits,
- enum lnet_unlink unlink, enum lnet_ins_pos pos,
- struct lnet_handle_me *handle)
-{
- struct lnet_me *current_me;
- struct lnet_me *new_me;
- struct lnet_portal *ptl;
- int cpt;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (pos == LNET_INS_LOCAL)
- return -EPERM;
-
- new_me = kzalloc(sizeof(*new_me), GFP_NOFS);
- if (!new_me)
- return -ENOMEM;
-
- cpt = lnet_cpt_of_cookie(current_meh.cookie);
-
- lnet_res_lock(cpt);
-
- current_me = lnet_handle2me(&current_meh);
- if (!current_me) {
- kfree(new_me);
-
- lnet_res_unlock(cpt);
- return -ENOENT;
- }
-
- LASSERT(current_me->me_portal < the_lnet.ln_nportals);
-
- ptl = the_lnet.ln_portals[current_me->me_portal];
- if (lnet_ptl_is_unique(ptl)) {
- /* nosense to insertion on unique portal */
- kfree(new_me);
- lnet_res_unlock(cpt);
- return -EPERM;
- }
-
- new_me->me_pos = current_me->me_pos;
- new_me->me_portal = current_me->me_portal;
- new_me->me_match_id = match_id;
- new_me->me_match_bits = match_bits;
- new_me->me_ignore_bits = ignore_bits;
- new_me->me_unlink = unlink;
- new_me->me_md = NULL;
-
- lnet_res_lh_initialize(the_lnet.ln_me_containers[cpt], &new_me->me_lh);
-
- if (pos == LNET_INS_AFTER)
- list_add(&new_me->me_list, &current_me->me_list);
- else
- list_add_tail(&new_me->me_list, &current_me->me_list);
-
- lnet_me2handle(handle, new_me);
-
- lnet_res_unlock(cpt);
-
- return 0;
-}
-EXPORT_SYMBOL(LNetMEInsert);
-
-/**
- * Unlink a match entry from its match list.
- *
- * This operation also releases any resources associated with the ME. If a
- * memory descriptor is attached to the ME, then it will be unlinked as well
- * and an unlink event will be generated. It is an error to use the ME handle
- * after calling LNetMEUnlink().
- *
- * \param meh A handle for the ME to be unlinked.
- *
- * \retval 0 On success.
- * \retval -ENOENT If \a meh does not point to a valid ME.
- * \see LNetMDUnlink() for the discussion on delivering unlink event.
- */
-int
-LNetMEUnlink(struct lnet_handle_me meh)
-{
- struct lnet_me *me;
- struct lnet_libmd *md;
- struct lnet_event ev;
- int cpt;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- cpt = lnet_cpt_of_cookie(meh.cookie);
- lnet_res_lock(cpt);
-
- me = lnet_handle2me(&meh);
- if (!me) {
- lnet_res_unlock(cpt);
- return -ENOENT;
- }
-
- md = me->me_md;
- if (md) {
- md->md_flags |= LNET_MD_FLAG_ABORTED;
- if (md->md_eq && !md->md_refcount) {
- lnet_build_unlink_event(md, &ev);
- lnet_eq_enqueue_event(md->md_eq, &ev);
- }
- }
-
- lnet_me_unlink(me);
-
- lnet_res_unlock(cpt);
- return 0;
-}
-EXPORT_SYMBOL(LNetMEUnlink);
-
-/* call with lnet_res_lock please */
-void
-lnet_me_unlink(struct lnet_me *me)
-{
- list_del(&me->me_list);
-
- if (me->me_md) {
- struct lnet_libmd *md = me->me_md;
-
- /* detach MD from portal of this ME */
- lnet_ptl_detach_md(me, md);
- lnet_md_unlink(md);
- }
-
- lnet_res_lh_invalidate(&me->me_lh);
- kfree(me);
-}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
deleted file mode 100644
index ed43b3f4b114..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ /dev/null
@@ -1,2388 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-move.c
- *
- * Data movement routines
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/lnet/lib-lnet.h>
-#include <linux/nsproxy.h>
-#include <net/net_namespace.h>
-
-static int local_nid_dist_zero = 1;
-module_param(local_nid_dist_zero, int, 0444);
-MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
-
-int
-lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
-{
- struct lnet_test_peer *tp;
- struct lnet_test_peer *temp;
- struct list_head *el;
- struct list_head *next;
- struct list_head cull;
-
- /* NB: use lnet_net_lock(0) to serialize operations on test peers */
- if (threshold) {
- /* Adding a new entry */
- tp = kzalloc(sizeof(*tp), GFP_NOFS);
- if (!tp)
- return -ENOMEM;
-
- tp->tp_nid = nid;
- tp->tp_threshold = threshold;
-
- lnet_net_lock(0);
- list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
- lnet_net_unlock(0);
- return 0;
- }
-
- /* removing entries */
- INIT_LIST_HEAD(&cull);
-
- lnet_net_lock(0);
-
- list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
- tp = list_entry(el, struct lnet_test_peer, tp_list);
-
- if (!tp->tp_threshold || /* needs culling anyway */
- nid == LNET_NID_ANY || /* removing all entries */
- tp->tp_nid == nid) { /* matched this one */
- list_del(&tp->tp_list);
- list_add(&tp->tp_list, &cull);
- }
- }
-
- lnet_net_unlock(0);
-
- list_for_each_entry_safe(tp, temp, &cull, tp_list) {
- list_del(&tp->tp_list);
- kfree(tp);
- }
- return 0;
-}
-
-static int
-fail_peer(lnet_nid_t nid, int outgoing)
-{
- struct lnet_test_peer *tp;
- struct lnet_test_peer *temp;
- struct list_head *el;
- struct list_head *next;
- struct list_head cull;
- int fail = 0;
-
- INIT_LIST_HEAD(&cull);
-
- /* NB: use lnet_net_lock(0) to serialize operations on test peers */
- lnet_net_lock(0);
-
- list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
- tp = list_entry(el, struct lnet_test_peer, tp_list);
-
- if (!tp->tp_threshold) {
- /* zombie entry */
- if (outgoing) {
- /*
- * only cull zombies on outgoing tests,
- * since we may be at interrupt priority on
- * incoming messages.
- */
- list_del(&tp->tp_list);
- list_add(&tp->tp_list, &cull);
- }
- continue;
- }
-
- if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
- nid == tp->tp_nid) { /* fail this peer */
- fail = 1;
-
- if (tp->tp_threshold != LNET_MD_THRESH_INF) {
- tp->tp_threshold--;
- if (outgoing &&
- !tp->tp_threshold) {
- /* see above */
- list_del(&tp->tp_list);
- list_add(&tp->tp_list, &cull);
- }
- }
- break;
- }
- }
-
- lnet_net_unlock(0);
-
- list_for_each_entry_safe(tp, temp, &cull, tp_list) {
- list_del(&tp->tp_list);
-
- kfree(tp);
- }
-
- return fail;
-}
-
-unsigned int
-lnet_iov_nob(unsigned int niov, struct kvec *iov)
-{
- unsigned int nob = 0;
-
- LASSERT(!niov || iov);
- while (niov-- > 0)
- nob += (iov++)->iov_len;
-
- return nob;
-}
-EXPORT_SYMBOL(lnet_iov_nob);
-
-void
-lnet_copy_iov2iter(struct iov_iter *to,
- unsigned int nsiov, const struct kvec *siov,
- unsigned int soffset, unsigned int nob)
-{
- /* NB diov, siov are READ-ONLY */
- const char *s;
- size_t left;
-
- if (!nob)
- return;
-
- /* skip complete frags before 'soffset' */
- LASSERT(nsiov > 0);
- while (soffset >= siov->iov_len) {
- soffset -= siov->iov_len;
- siov++;
- nsiov--;
- LASSERT(nsiov > 0);
- }
-
- s = (char *)siov->iov_base + soffset;
- left = siov->iov_len - soffset;
- do {
- size_t n, copy = left;
-
- LASSERT(nsiov > 0);
-
- if (copy > nob)
- copy = nob;
- n = copy_to_iter(s, copy, to);
- if (n != copy)
- return;
- nob -= n;
-
- siov++;
- s = (char *)siov->iov_base;
- left = siov->iov_len;
- nsiov--;
- } while (nob > 0);
-}
-EXPORT_SYMBOL(lnet_copy_iov2iter);
-
-void
-lnet_copy_kiov2iter(struct iov_iter *to,
- unsigned int nsiov, const struct bio_vec *siov,
- unsigned int soffset, unsigned int nob)
-{
- if (!nob)
- return;
-
- LASSERT(!in_interrupt());
-
- LASSERT(nsiov > 0);
- while (soffset >= siov->bv_len) {
- soffset -= siov->bv_len;
- siov++;
- nsiov--;
- LASSERT(nsiov > 0);
- }
-
- do {
- size_t copy = siov->bv_len - soffset, n;
-
- LASSERT(nsiov > 0);
-
- if (copy > nob)
- copy = nob;
- n = copy_page_to_iter(siov->bv_page,
- siov->bv_offset + soffset,
- copy, to);
- if (n != copy)
- return;
- nob -= n;
- siov++;
- nsiov--;
- soffset = 0;
- } while (nob > 0);
-}
-EXPORT_SYMBOL(lnet_copy_kiov2iter);
-
-int
-lnet_extract_iov(int dst_niov, struct kvec *dst,
- int src_niov, const struct kvec *src,
- unsigned int offset, unsigned int len)
-{
- /*
- * Initialise 'dst' to the subset of 'src' starting at 'offset',
- * for exactly 'len' bytes, and return the number of entries.
- * NB not destructive to 'src'
- */
- unsigned int frag_len;
- unsigned int niov;
-
- if (!len) /* no data => */
- return 0; /* no frags */
-
- LASSERT(src_niov > 0);
- while (offset >= src->iov_len) { /* skip initial frags */
- offset -= src->iov_len;
- src_niov--;
- src++;
- LASSERT(src_niov > 0);
- }
-
- niov = 1;
- for (;;) {
- LASSERT(src_niov > 0);
- LASSERT((int)niov <= dst_niov);
-
- frag_len = src->iov_len - offset;
- dst->iov_base = ((char *)src->iov_base) + offset;
-
- if (len <= frag_len) {
- dst->iov_len = len;
- return niov;
- }
-
- dst->iov_len = frag_len;
-
- len -= frag_len;
- dst++;
- src++;
- niov++;
- src_niov--;
- offset = 0;
- }
-}
-EXPORT_SYMBOL(lnet_extract_iov);
-
-unsigned int
-lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
-{
- unsigned int nob = 0;
-
- LASSERT(!niov || kiov);
- while (niov-- > 0)
- nob += (kiov++)->bv_len;
-
- return nob;
-}
-EXPORT_SYMBOL(lnet_kiov_nob);
-
-int
-lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
- int src_niov, const struct bio_vec *src,
- unsigned int offset, unsigned int len)
-{
- /*
- * Initialise 'dst' to the subset of 'src' starting at 'offset',
- * for exactly 'len' bytes, and return the number of entries.
- * NB not destructive to 'src'
- */
- unsigned int frag_len;
- unsigned int niov;
-
- if (!len) /* no data => */
- return 0; /* no frags */
-
- LASSERT(src_niov > 0);
- while (offset >= src->bv_len) { /* skip initial frags */
- offset -= src->bv_len;
- src_niov--;
- src++;
- LASSERT(src_niov > 0);
- }
-
- niov = 1;
- for (;;) {
- LASSERT(src_niov > 0);
- LASSERT((int)niov <= dst_niov);
-
- frag_len = src->bv_len - offset;
- dst->bv_page = src->bv_page;
- dst->bv_offset = src->bv_offset + offset;
-
- if (len <= frag_len) {
- dst->bv_len = len;
- LASSERT(dst->bv_offset + dst->bv_len
- <= PAGE_SIZE);
- return niov;
- }
-
- dst->bv_len = frag_len;
- LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
-
- len -= frag_len;
- dst++;
- src++;
- niov++;
- src_niov--;
- offset = 0;
- }
-}
-EXPORT_SYMBOL(lnet_extract_kiov);
-
-void
-lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
- int delayed, unsigned int offset, unsigned int mlen,
- unsigned int rlen)
-{
- unsigned int niov = 0;
- struct kvec *iov = NULL;
- struct bio_vec *kiov = NULL;
- struct iov_iter to;
- int rc;
-
- LASSERT(!in_interrupt());
- LASSERT(!mlen || msg);
-
- if (msg) {
- LASSERT(msg->msg_receiving);
- LASSERT(!msg->msg_sending);
- LASSERT(rlen == msg->msg_len);
- LASSERT(mlen <= msg->msg_len);
- LASSERT(msg->msg_offset == offset);
- LASSERT(msg->msg_wanted == mlen);
-
- msg->msg_receiving = 0;
-
- if (mlen) {
- niov = msg->msg_niov;
- iov = msg->msg_iov;
- kiov = msg->msg_kiov;
-
- LASSERT(niov > 0);
- LASSERT(!iov != !kiov);
- }
- }
-
- if (iov) {
- iov_iter_kvec(&to, ITER_KVEC | READ, iov, niov, mlen + offset);
- iov_iter_advance(&to, offset);
- } else {
- iov_iter_bvec(&to, ITER_BVEC | READ, kiov, niov, mlen + offset);
- iov_iter_advance(&to, offset);
- }
- rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed, &to, rlen);
- if (rc < 0)
- lnet_finalize(ni, msg, rc);
-}
-
-static void
-lnet_setpayloadbuffer(struct lnet_msg *msg)
-{
- struct lnet_libmd *md = msg->msg_md;
-
- LASSERT(msg->msg_len > 0);
- LASSERT(!msg->msg_routing);
- LASSERT(md);
- LASSERT(!msg->msg_niov);
- LASSERT(!msg->msg_iov);
- LASSERT(!msg->msg_kiov);
-
- msg->msg_niov = md->md_niov;
- if (md->md_options & LNET_MD_KIOV)
- msg->msg_kiov = md->md_iov.kiov;
- else
- msg->msg_iov = md->md_iov.iov;
-}
-
-void
-lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
- unsigned int offset, unsigned int len)
-{
- msg->msg_type = type;
- msg->msg_target = target;
- msg->msg_len = len;
- msg->msg_offset = offset;
-
- if (len)
- lnet_setpayloadbuffer(msg);
-
- memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
- msg->msg_hdr.type = cpu_to_le32(type);
- msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
- msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
- /* src_nid will be set later */
- msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
- msg->msg_hdr.payload_length = cpu_to_le32(len);
-}
-
-static void
-lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
-{
- void *priv = msg->msg_private;
- int rc;
-
- LASSERT(!in_interrupt());
- LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
- (msg->msg_txcredit && msg->msg_peertxcredit));
-
- rc = ni->ni_lnd->lnd_send(ni, priv, msg);
- if (rc < 0)
- lnet_finalize(ni, msg, rc);
-}
-
-static int
-lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
-{
- int rc;
-
- LASSERT(!msg->msg_sending);
- LASSERT(msg->msg_receiving);
- LASSERT(!msg->msg_rx_ready_delay);
- LASSERT(ni->ni_lnd->lnd_eager_recv);
-
- msg->msg_rx_ready_delay = 1;
- rc = ni->ni_lnd->lnd_eager_recv(ni, msg->msg_private, msg,
- &msg->msg_private);
- if (rc) {
- CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
- libcfs_nid2str(msg->msg_rxpeer->lp_nid),
- libcfs_id2str(msg->msg_target), rc);
- LASSERT(rc < 0); /* required by my callers */
- }
-
- return rc;
-}
-
-/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
-static void
-lnet_ni_query_locked(struct lnet_ni *ni, struct lnet_peer *lp)
-{
- unsigned long last_alive = 0;
-
- LASSERT(lnet_peer_aliveness_enabled(lp));
- LASSERT(ni->ni_lnd->lnd_query);
-
- lnet_net_unlock(lp->lp_cpt);
- ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive);
- lnet_net_lock(lp->lp_cpt);
-
- lp->lp_last_query = cfs_time_current();
-
- if (last_alive) /* NI has updated timestamp */
- lp->lp_last_alive = last_alive;
-}
-
-/* NB: always called with lnet_net_lock held */
-static inline int
-lnet_peer_is_alive(struct lnet_peer *lp, unsigned long now)
-{
- int alive;
- unsigned long deadline;
-
- LASSERT(lnet_peer_aliveness_enabled(lp));
-
- /* Trust lnet_notify() if it has more recent aliveness news, but
- * ignore the initial assumed death (see lnet_peers_start_down()).
- */
- if (!lp->lp_alive && lp->lp_alive_count > 0 &&
- cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
- return 0;
-
- deadline = cfs_time_add(lp->lp_last_alive,
- lp->lp_ni->ni_peertimeout * HZ);
- alive = cfs_time_after(deadline, now);
-
- /* Update obsolete lp_alive except for routers assumed to be dead
- * initially, because router checker would update aliveness in this
- * case, and moreover lp_last_alive at peer creation is assumed.
- */
- if (alive && !lp->lp_alive &&
- !(lnet_isrouter(lp) && !lp->lp_alive_count))
- lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
-
- return alive;
-}
-
-/*
- * NB: returns 1 when alive, 0 when dead, negative when error;
- * may drop the lnet_net_lock
- */
-static int
-lnet_peer_alive_locked(struct lnet_peer *lp)
-{
- unsigned long now = cfs_time_current();
-
- if (!lnet_peer_aliveness_enabled(lp))
- return -ENODEV;
-
- if (lnet_peer_is_alive(lp, now))
- return 1;
-
- /*
- * Peer appears dead, but we should avoid frequent NI queries (at
- * most once per lnet_queryinterval seconds).
- */
- if (lp->lp_last_query) {
- static const int lnet_queryinterval = 1;
-
- unsigned long next_query =
- cfs_time_add(lp->lp_last_query,
- lnet_queryinterval * HZ);
-
- if (time_before(now, next_query)) {
- if (lp->lp_alive)
- CWARN("Unexpected aliveness of peer %s: %d < %d (%d/%d)\n",
- libcfs_nid2str(lp->lp_nid),
- (int)now, (int)next_query,
- lnet_queryinterval,
- lp->lp_ni->ni_peertimeout);
- return 0;
- }
- }
-
- /* query NI for latest aliveness news */
- lnet_ni_query_locked(lp->lp_ni, lp);
-
- if (lnet_peer_is_alive(lp, now))
- return 1;
-
- lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
- return 0;
-}
-
-/**
- * \param msg The message to be sent.
- * \param do_send True if lnet_ni_send() should be called in this function.
- * lnet_send() is going to lnet_net_unlock immediately after this, so
- * it sets do_send FALSE and I don't do the unlock/send/lock bit.
- *
- * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
- * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
- * \retval -EHOSTUNREACH If the next hop of the message appears dead.
- * \retval -ECANCELED If the MD of the message has been unlinked.
- */
-static int
-lnet_post_send_locked(struct lnet_msg *msg, int do_send)
-{
- struct lnet_peer *lp = msg->msg_txpeer;
- struct lnet_ni *ni = lp->lp_ni;
- int cpt = msg->msg_tx_cpt;
- struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
-
- /* non-lnet_send() callers have checked before */
- LASSERT(!do_send || msg->msg_tx_delayed);
- LASSERT(!msg->msg_receiving);
- LASSERT(msg->msg_tx_committed);
-
- /* NB 'lp' is always the next hop */
- if (!(msg->msg_target.pid & LNET_PID_USERFLAG) &&
- !lnet_peer_alive_locked(lp)) {
- the_lnet.ln_counters[cpt]->drop_count++;
- the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
- lnet_net_unlock(cpt);
-
- CNETERR("Dropping message for %s: peer not alive\n",
- libcfs_id2str(msg->msg_target));
- if (do_send)
- lnet_finalize(ni, msg, -EHOSTUNREACH);
-
- lnet_net_lock(cpt);
- return -EHOSTUNREACH;
- }
-
- if (msg->msg_md &&
- (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED)) {
- lnet_net_unlock(cpt);
-
- CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
- libcfs_id2str(msg->msg_target));
- if (do_send)
- lnet_finalize(ni, msg, -ECANCELED);
-
- lnet_net_lock(cpt);
- return -ECANCELED;
- }
-
- if (!msg->msg_peertxcredit) {
- LASSERT((lp->lp_txcredits < 0) ==
- !list_empty(&lp->lp_txq));
-
- msg->msg_peertxcredit = 1;
- lp->lp_txqnob += msg->msg_len + sizeof(struct lnet_hdr);
- lp->lp_txcredits--;
-
- if (lp->lp_txcredits < lp->lp_mintxcredits)
- lp->lp_mintxcredits = lp->lp_txcredits;
-
- if (lp->lp_txcredits < 0) {
- msg->msg_tx_delayed = 1;
- list_add_tail(&msg->msg_list, &lp->lp_txq);
- return LNET_CREDIT_WAIT;
- }
- }
-
- if (!msg->msg_txcredit) {
- LASSERT((tq->tq_credits < 0) ==
- !list_empty(&tq->tq_delayed));
-
- msg->msg_txcredit = 1;
- tq->tq_credits--;
-
- if (tq->tq_credits < tq->tq_credits_min)
- tq->tq_credits_min = tq->tq_credits;
-
- if (tq->tq_credits < 0) {
- msg->msg_tx_delayed = 1;
- list_add_tail(&msg->msg_list, &tq->tq_delayed);
- return LNET_CREDIT_WAIT;
- }
- }
-
- if (do_send) {
- lnet_net_unlock(cpt);
- lnet_ni_send(ni, msg);
- lnet_net_lock(cpt);
- }
- return LNET_CREDIT_OK;
-}
-
-static struct lnet_rtrbufpool *
-lnet_msg2bufpool(struct lnet_msg *msg)
-{
- struct lnet_rtrbufpool *rbp;
- int cpt;
-
- LASSERT(msg->msg_rx_committed);
-
- cpt = msg->msg_rx_cpt;
- rbp = &the_lnet.ln_rtrpools[cpt][0];
-
- LASSERT(msg->msg_len <= LNET_MTU);
- while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
- rbp++;
- LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
- }
-
- return rbp;
-}
-
-static int
-lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
-{
- /*
- * lnet_parse is going to lnet_net_unlock immediately after this, so it
- * sets do_recv FALSE and I don't do the unlock/send/lock bit.
- * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
- * received or OK to receive
- */
- struct lnet_peer *lp = msg->msg_rxpeer;
- struct lnet_rtrbufpool *rbp;
- struct lnet_rtrbuf *rb;
-
- LASSERT(!msg->msg_iov);
- LASSERT(!msg->msg_kiov);
- LASSERT(!msg->msg_niov);
- LASSERT(msg->msg_routing);
- LASSERT(msg->msg_receiving);
- LASSERT(!msg->msg_sending);
-
- /* non-lnet_parse callers only receive delayed messages */
- LASSERT(!do_recv || msg->msg_rx_delayed);
-
- if (!msg->msg_peerrtrcredit) {
- LASSERT((lp->lp_rtrcredits < 0) ==
- !list_empty(&lp->lp_rtrq));
-
- msg->msg_peerrtrcredit = 1;
- lp->lp_rtrcredits--;
- if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
- lp->lp_minrtrcredits = lp->lp_rtrcredits;
-
- if (lp->lp_rtrcredits < 0) {
- /* must have checked eager_recv before here */
- LASSERT(msg->msg_rx_ready_delay);
- msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list, &lp->lp_rtrq);
- return LNET_CREDIT_WAIT;
- }
- }
-
- rbp = lnet_msg2bufpool(msg);
-
- if (!msg->msg_rtrcredit) {
- msg->msg_rtrcredit = 1;
- rbp->rbp_credits--;
- if (rbp->rbp_credits < rbp->rbp_mincredits)
- rbp->rbp_mincredits = rbp->rbp_credits;
-
- if (rbp->rbp_credits < 0) {
- /* must have checked eager_recv before here */
- LASSERT(msg->msg_rx_ready_delay);
- msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
- return LNET_CREDIT_WAIT;
- }
- }
-
- LASSERT(!list_empty(&rbp->rbp_bufs));
- rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
- list_del(&rb->rb_list);
-
- msg->msg_niov = rbp->rbp_npages;
- msg->msg_kiov = &rb->rb_kiov[0];
-
- if (do_recv) {
- int cpt = msg->msg_rx_cpt;
-
- lnet_net_unlock(cpt);
- lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
- 0, msg->msg_len, msg->msg_len);
- lnet_net_lock(cpt);
- }
- return LNET_CREDIT_OK;
-}
-
-void
-lnet_return_tx_credits_locked(struct lnet_msg *msg)
-{
- struct lnet_peer *txpeer = msg->msg_txpeer;
- struct lnet_msg *msg2;
-
- if (msg->msg_txcredit) {
- struct lnet_ni *ni = txpeer->lp_ni;
- struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
-
- /* give back NI txcredits */
- msg->msg_txcredit = 0;
-
- LASSERT((tq->tq_credits < 0) ==
- !list_empty(&tq->tq_delayed));
-
- tq->tq_credits++;
- if (tq->tq_credits <= 0) {
- msg2 = list_entry(tq->tq_delayed.next,
- struct lnet_msg, msg_list);
- list_del(&msg2->msg_list);
-
- LASSERT(msg2->msg_txpeer->lp_ni == ni);
- LASSERT(msg2->msg_tx_delayed);
-
- (void)lnet_post_send_locked(msg2, 1);
- }
- }
-
- if (msg->msg_peertxcredit) {
- /* give back peer txcredits */
- msg->msg_peertxcredit = 0;
-
- LASSERT((txpeer->lp_txcredits < 0) ==
- !list_empty(&txpeer->lp_txq));
-
- txpeer->lp_txqnob -= msg->msg_len + sizeof(struct lnet_hdr);
- LASSERT(txpeer->lp_txqnob >= 0);
-
- txpeer->lp_txcredits++;
- if (txpeer->lp_txcredits <= 0) {
- msg2 = list_entry(txpeer->lp_txq.next,
- struct lnet_msg, msg_list);
- list_del(&msg2->msg_list);
-
- LASSERT(msg2->msg_txpeer == txpeer);
- LASSERT(msg2->msg_tx_delayed);
-
- (void)lnet_post_send_locked(msg2, 1);
- }
- }
-
- if (txpeer) {
- msg->msg_txpeer = NULL;
- lnet_peer_decref_locked(txpeer);
- }
-}
-
-void
-lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
-{
- struct lnet_msg *msg;
-
- if (list_empty(&rbp->rbp_msgs))
- return;
- msg = list_entry(rbp->rbp_msgs.next,
- struct lnet_msg, msg_list);
- list_del(&msg->msg_list);
-
- (void)lnet_post_routed_recv_locked(msg, 1);
-}
-
-void
-lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
-{
- struct list_head drop;
- struct lnet_msg *msg;
- struct lnet_msg *tmp;
-
- INIT_LIST_HEAD(&drop);
-
- list_splice_init(list, &drop);
-
- lnet_net_unlock(cpt);
-
- list_for_each_entry_safe(msg, tmp, &drop, msg_list) {
- lnet_ni_recv(msg->msg_rxpeer->lp_ni, msg->msg_private, NULL,
- 0, 0, 0, msg->msg_hdr.payload_length);
- list_del_init(&msg->msg_list);
- lnet_finalize(NULL, msg, -ECANCELED);
- }
-
- lnet_net_lock(cpt);
-}
-
-void
-lnet_return_rx_credits_locked(struct lnet_msg *msg)
-{
- struct lnet_peer *rxpeer = msg->msg_rxpeer;
- struct lnet_msg *msg2;
-
- if (msg->msg_rtrcredit) {
- /* give back global router credits */
- struct lnet_rtrbuf *rb;
- struct lnet_rtrbufpool *rbp;
-
- /*
- * NB If a msg ever blocks for a buffer in rbp_msgs, it stays
- * there until it gets one allocated, or aborts the wait
- * itself
- */
- LASSERT(msg->msg_kiov);
-
- rb = container_of(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
- rbp = rb->rb_pool;
-
- msg->msg_kiov = NULL;
- msg->msg_rtrcredit = 0;
-
- LASSERT(rbp == lnet_msg2bufpool(msg));
-
- LASSERT((rbp->rbp_credits > 0) ==
- !list_empty(&rbp->rbp_bufs));
-
- /*
- * If routing is now turned off, we just drop this buffer and
- * don't bother trying to return credits.
- */
- if (!the_lnet.ln_routing) {
- lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
- goto routing_off;
- }
-
- /*
- * It is possible that a user has lowered the desired number of
- * buffers in this pool. Make sure we never put back
- * more buffers than the stated number.
- */
- if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
- /* Discard this buffer so we don't have too many. */
- lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
- rbp->rbp_nbuffers--;
- } else {
- list_add(&rb->rb_list, &rbp->rbp_bufs);
- rbp->rbp_credits++;
- if (rbp->rbp_credits <= 0)
- lnet_schedule_blocked_locked(rbp);
- }
- }
-
-routing_off:
- if (msg->msg_peerrtrcredit) {
- /* give back peer router credits */
- msg->msg_peerrtrcredit = 0;
-
- LASSERT((rxpeer->lp_rtrcredits < 0) ==
- !list_empty(&rxpeer->lp_rtrq));
-
- rxpeer->lp_rtrcredits++;
- /*
- * drop all messages which are queued to be routed on that
- * peer.
- */
- if (!the_lnet.ln_routing) {
- lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq,
- msg->msg_rx_cpt);
- } else if (rxpeer->lp_rtrcredits <= 0) {
- msg2 = list_entry(rxpeer->lp_rtrq.next,
- struct lnet_msg, msg_list);
- list_del(&msg2->msg_list);
-
- (void)lnet_post_routed_recv_locked(msg2, 1);
- }
- }
- if (rxpeer) {
- msg->msg_rxpeer = NULL;
- lnet_peer_decref_locked(rxpeer);
- }
-}
-
-static int
-lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
-{
- struct lnet_peer *p1 = r1->lr_gateway;
- struct lnet_peer *p2 = r2->lr_gateway;
- int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
- int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
-
- if (r1->lr_priority < r2->lr_priority)
- return 1;
-
- if (r1->lr_priority > r2->lr_priority)
- return -ERANGE;
-
- if (r1_hops < r2_hops)
- return 1;
-
- if (r1_hops > r2_hops)
- return -ERANGE;
-
- if (p1->lp_txqnob < p2->lp_txqnob)
- return 1;
-
- if (p1->lp_txqnob > p2->lp_txqnob)
- return -ERANGE;
-
- if (p1->lp_txcredits > p2->lp_txcredits)
- return 1;
-
- if (p1->lp_txcredits < p2->lp_txcredits)
- return -ERANGE;
-
- if (r1->lr_seq - r2->lr_seq <= 0)
- return 1;
-
- return -ERANGE;
-}
-
-static struct lnet_peer *
-lnet_find_route_locked(struct lnet_ni *ni, lnet_nid_t target,
- lnet_nid_t rtr_nid)
-{
- struct lnet_remotenet *rnet;
- struct lnet_route *route;
- struct lnet_route *best_route;
- struct lnet_route *last_route;
- struct lnet_peer *lp_best;
- struct lnet_peer *lp;
- int rc;
-
- /*
- * If @rtr_nid is not LNET_NID_ANY, return the gateway with
- * rtr_nid nid, otherwise find the best gateway I can use
- */
- rnet = lnet_find_net_locked(LNET_NIDNET(target));
- if (!rnet)
- return NULL;
-
- lp_best = NULL;
- best_route = NULL;
- last_route = NULL;
- list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
- lp = route->lr_gateway;
-
- if (!lnet_is_route_alive(route))
- continue;
-
- if (ni && lp->lp_ni != ni)
- continue;
-
- if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
- return lp;
-
- if (!lp_best) {
- best_route = route;
- last_route = route;
- lp_best = lp;
- continue;
- }
-
- /* no protection on below fields, but it's harmless */
- if (last_route->lr_seq - route->lr_seq < 0)
- last_route = route;
-
- rc = lnet_compare_routes(route, best_route);
- if (rc < 0)
- continue;
-
- best_route = route;
- lp_best = lp;
- }
-
- /*
- * set sequence number on the best router to the latest sequence + 1
- * so we can round-robin all routers, it's race and inaccurate but
- * harmless and functional
- */
- if (best_route)
- best_route->lr_seq = last_route->lr_seq + 1;
- return lp_best;
-}
-
-int
-lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
-{
- lnet_nid_t dst_nid = msg->msg_target.nid;
- struct lnet_ni *src_ni;
- struct lnet_ni *local_ni;
- struct lnet_peer *lp;
- int cpt;
- int cpt2;
- int rc;
-
- /*
- * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
- * but we might want to use pre-determined router for ACK/REPLY
- * in the future
- */
- /* NB: ni == interface pre-determined (ACK/REPLY) */
- LASSERT(!msg->msg_txpeer);
- LASSERT(!msg->msg_sending);
- LASSERT(!msg->msg_target_is_router);
- LASSERT(!msg->msg_receiving);
-
- msg->msg_sending = 1;
-
- LASSERT(!msg->msg_tx_committed);
- cpt = lnet_cpt_of_nid(rtr_nid == LNET_NID_ANY ? dst_nid : rtr_nid);
- again:
- lnet_net_lock(cpt);
-
- if (the_lnet.ln_shutdown) {
- lnet_net_unlock(cpt);
- return -ESHUTDOWN;
- }
-
- if (src_nid == LNET_NID_ANY) {
- src_ni = NULL;
- } else {
- src_ni = lnet_nid2ni_locked(src_nid, cpt);
- if (!src_ni) {
- lnet_net_unlock(cpt);
- LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n",
- libcfs_nid2str(dst_nid),
- libcfs_nid2str(src_nid));
- return -EINVAL;
- }
- LASSERT(!msg->msg_routing);
- }
-
- /* Is this for someone on a local network? */
- local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
-
- if (local_ni) {
- if (!src_ni) {
- src_ni = local_ni;
- src_nid = src_ni->ni_nid;
- } else if (src_ni == local_ni) {
- lnet_ni_decref_locked(local_ni, cpt);
- } else {
- lnet_ni_decref_locked(local_ni, cpt);
- lnet_ni_decref_locked(src_ni, cpt);
- lnet_net_unlock(cpt);
- LCONSOLE_WARN("No route to %s via from %s\n",
- libcfs_nid2str(dst_nid),
- libcfs_nid2str(src_nid));
- return -EINVAL;
- }
-
- LASSERT(src_nid != LNET_NID_ANY);
- lnet_msg_commit(msg, cpt);
-
- if (!msg->msg_routing)
- msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
-
- if (src_ni == the_lnet.ln_loni) {
- /* No send credit hassles with LOLND */
- lnet_net_unlock(cpt);
- lnet_ni_send(src_ni, msg);
-
- lnet_net_lock(cpt);
- lnet_ni_decref_locked(src_ni, cpt);
- lnet_net_unlock(cpt);
- return 0;
- }
-
- rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
- /* lp has ref on src_ni; lose mine */
- lnet_ni_decref_locked(src_ni, cpt);
- if (rc) {
- lnet_net_unlock(cpt);
- LCONSOLE_WARN("Error %d finding peer %s\n", rc,
- libcfs_nid2str(dst_nid));
- /* ENOMEM or shutting down */
- return rc;
- }
- LASSERT(lp->lp_ni == src_ni);
- } else {
- /* sending to a remote network */
- lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
- if (!lp) {
- if (src_ni)
- lnet_ni_decref_locked(src_ni, cpt);
- lnet_net_unlock(cpt);
-
- LCONSOLE_WARN("No route to %s via %s (all routers down)\n",
- libcfs_id2str(msg->msg_target),
- libcfs_nid2str(src_nid));
- return -EHOSTUNREACH;
- }
-
- /*
- * rtr_nid is LNET_NID_ANY or NID of pre-determined router,
- * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
- * pre-determined router, this can happen if router table
- * was changed when we release the lock
- */
- if (rtr_nid != lp->lp_nid) {
- cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
- if (cpt2 != cpt) {
- if (src_ni)
- lnet_ni_decref_locked(src_ni, cpt);
- lnet_net_unlock(cpt);
-
- rtr_nid = lp->lp_nid;
- cpt = cpt2;
- goto again;
- }
- }
-
- CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
- libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
- lnet_msgtyp2str(msg->msg_type), msg->msg_len);
-
- if (!src_ni) {
- src_ni = lp->lp_ni;
- src_nid = src_ni->ni_nid;
- } else {
- LASSERT(src_ni == lp->lp_ni);
- lnet_ni_decref_locked(src_ni, cpt);
- }
-
- lnet_peer_addref_locked(lp);
-
- LASSERT(src_nid != LNET_NID_ANY);
- lnet_msg_commit(msg, cpt);
-
- if (!msg->msg_routing) {
- /* I'm the source and now I know which NI to send on */
- msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
- }
-
- msg->msg_target_is_router = 1;
- msg->msg_target.nid = lp->lp_nid;
- msg->msg_target.pid = LNET_PID_LUSTRE;
- }
-
- /* 'lp' is our best choice of peer */
-
- LASSERT(!msg->msg_peertxcredit);
- LASSERT(!msg->msg_txcredit);
- LASSERT(!msg->msg_txpeer);
-
- msg->msg_txpeer = lp; /* msg takes my ref on lp */
-
- rc = lnet_post_send_locked(msg, 0);
- lnet_net_unlock(cpt);
-
- if (rc < 0)
- return rc;
-
- if (rc == LNET_CREDIT_OK)
- lnet_ni_send(src_ni, msg);
-
- return 0; /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */
-}
-
-void
-lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob)
-{
- lnet_net_lock(cpt);
- the_lnet.ln_counters[cpt]->drop_count++;
- the_lnet.ln_counters[cpt]->drop_length += nob;
- lnet_net_unlock(cpt);
-
- lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
-}
-
-static void
-lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
-{
- struct lnet_hdr *hdr = &msg->msg_hdr;
-
- if (msg->msg_wanted)
- lnet_setpayloadbuffer(msg);
-
- lnet_build_msg_event(msg, LNET_EVENT_PUT);
-
- /*
- * Must I ACK? If so I'll grab the ack_wmd out of the header and put
- * it back into the ACK during lnet_finalize()
- */
- msg->msg_ack = !lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
- !(msg->msg_md->md_options & LNET_MD_ACK_DISABLE);
-
- lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
- msg->msg_offset, msg->msg_wanted, hdr->payload_length);
-}
-
-static int
-lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
-{
- struct lnet_hdr *hdr = &msg->msg_hdr;
- struct lnet_match_info info;
- bool ready_delay;
- int rc;
-
- /* Convert put fields to host byte order */
- le64_to_cpus(&hdr->msg.put.match_bits);
- le32_to_cpus(&hdr->msg.put.ptl_index);
- le32_to_cpus(&hdr->msg.put.offset);
-
- info.mi_id.nid = hdr->src_nid;
- info.mi_id.pid = hdr->src_pid;
- info.mi_opc = LNET_MD_OP_PUT;
- info.mi_portal = hdr->msg.put.ptl_index;
- info.mi_rlength = hdr->payload_length;
- info.mi_roffset = hdr->msg.put.offset;
- info.mi_mbits = hdr->msg.put.match_bits;
-
- msg->msg_rx_ready_delay = !ni->ni_lnd->lnd_eager_recv;
- ready_delay = msg->msg_rx_ready_delay;
-
- again:
- rc = lnet_ptl_match_md(&info, msg);
- switch (rc) {
- default:
- LBUG();
-
- case LNET_MATCHMD_OK:
- lnet_recv_put(ni, msg);
- return 0;
-
- case LNET_MATCHMD_NONE:
- /**
- * no eager_recv or has already called it, should
- * have been attached on delayed list
- */
- if (ready_delay)
- return 0;
-
- rc = lnet_ni_eager_recv(ni, msg);
- if (!rc) {
- ready_delay = true;
- goto again;
- }
- /* fall through */
-
- case LNET_MATCHMD_DROP:
- CNETERR("Dropping PUT from %s portal %d match %llu offset %d length %d: %d\n",
- libcfs_id2str(info.mi_id), info.mi_portal,
- info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
-
- return -ENOENT; /* -ve: OK but no match */
- }
-}
-
-static int
-lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
-{
- struct lnet_match_info info;
- struct lnet_hdr *hdr = &msg->msg_hdr;
- struct lnet_handle_wire reply_wmd;
- int rc;
-
- /* Convert get fields to host byte order */
- le64_to_cpus(&hdr->msg.get.match_bits);
- le32_to_cpus(&hdr->msg.get.ptl_index);
- le32_to_cpus(&hdr->msg.get.sink_length);
- le32_to_cpus(&hdr->msg.get.src_offset);
-
- info.mi_id.nid = hdr->src_nid;
- info.mi_id.pid = hdr->src_pid;
- info.mi_opc = LNET_MD_OP_GET;
- info.mi_portal = hdr->msg.get.ptl_index;
- info.mi_rlength = hdr->msg.get.sink_length;
- info.mi_roffset = hdr->msg.get.src_offset;
- info.mi_mbits = hdr->msg.get.match_bits;
-
- rc = lnet_ptl_match_md(&info, msg);
- if (rc == LNET_MATCHMD_DROP) {
- CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n",
- libcfs_id2str(info.mi_id), info.mi_portal,
- info.mi_mbits, info.mi_roffset, info.mi_rlength);
- return -ENOENT; /* -ve: OK but no match */
- }
-
- LASSERT(rc == LNET_MATCHMD_OK);
-
- lnet_build_msg_event(msg, LNET_EVENT_GET);
-
- reply_wmd = hdr->msg.get.return_wmd;
-
- lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id,
- msg->msg_offset, msg->msg_wanted);
-
- msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
-
- if (rdma_get) {
- /* The LND completes the REPLY from her recv procedure */
- lnet_ni_recv(ni, msg->msg_private, msg, 0,
- msg->msg_offset, msg->msg_len, msg->msg_len);
- return 0;
- }
-
- lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
- msg->msg_receiving = 0;
-
- rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
- if (rc < 0) {
- /* didn't get as far as lnet_ni_send() */
- CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
- libcfs_nid2str(ni->ni_nid),
- libcfs_id2str(info.mi_id), rc);
-
- lnet_finalize(ni, msg, rc);
- }
-
- return 0;
-}
-
-static int
-lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
-{
- void *private = msg->msg_private;
- struct lnet_hdr *hdr = &msg->msg_hdr;
- struct lnet_process_id src = {0};
- struct lnet_libmd *md;
- int rlength;
- int mlength;
- int cpt;
-
- cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
- lnet_res_lock(cpt);
-
- src.nid = hdr->src_nid;
- src.pid = hdr->src_pid;
-
- /* NB handles only looked up by creator (no flips) */
- md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
- if (!md || !md->md_threshold || md->md_me) {
- CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- !md ? "invalid" : "inactive",
- hdr->msg.reply.dst_wmd.wh_interface_cookie,
- hdr->msg.reply.dst_wmd.wh_object_cookie);
- if (md && md->md_me)
- CERROR("REPLY MD also attached to portal %d\n",
- md->md_me->me_portal);
-
- lnet_res_unlock(cpt);
- return -ENOENT; /* -ve: OK but no match */
- }
-
- LASSERT(!md->md_offset);
-
- rlength = hdr->payload_length;
- mlength = min_t(uint, rlength, md->md_length);
-
- if (mlength < rlength &&
- !(md->md_options & LNET_MD_TRUNCATE)) {
- CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
- mlength);
- lnet_res_unlock(cpt);
- return -ENOENT; /* -ve: OK but no match */
- }
-
- CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
-
- lnet_msg_attach_md(msg, md, 0, mlength);
-
- if (mlength)
- lnet_setpayloadbuffer(msg);
-
- lnet_res_unlock(cpt);
-
- lnet_build_msg_event(msg, LNET_EVENT_REPLY);
-
- lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
- return 0;
-}
-
-static int
-lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
-{
- struct lnet_hdr *hdr = &msg->msg_hdr;
- struct lnet_process_id src = {0};
- struct lnet_libmd *md;
- int cpt;
-
- src.nid = hdr->src_nid;
- src.pid = hdr->src_pid;
-
- /* Convert ack fields to host byte order */
- le64_to_cpus(&hdr->msg.ack.match_bits);
- le32_to_cpus(&hdr->msg.ack.mlength);
-
- cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
- lnet_res_lock(cpt);
-
- /* NB handles only looked up by creator (no flips) */
- md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
- if (!md || !md->md_threshold || md->md_me) {
- /* Don't moan; this is expected */
- CDEBUG(D_NET,
- "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- !md ? "invalid" : "inactive",
- hdr->msg.ack.dst_wmd.wh_interface_cookie,
- hdr->msg.ack.dst_wmd.wh_object_cookie);
- if (md && md->md_me)
- CERROR("Source MD also attached to portal %d\n",
- md->md_me->me_portal);
-
- lnet_res_unlock(cpt);
- return -ENOENT; /* -ve! */
- }
-
- CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- hdr->msg.ack.dst_wmd.wh_object_cookie);
-
- lnet_msg_attach_md(msg, md, 0, 0);
-
- lnet_res_unlock(cpt);
-
- lnet_build_msg_event(msg, LNET_EVENT_ACK);
-
- lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
- return 0;
-}
-
-/**
- * \retval LNET_CREDIT_OK If \a msg is forwarded
- * \retval LNET_CREDIT_WAIT If \a msg is blocked because w/o buffer
- * \retval -ve error code
- */
-int
-lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
-{
- int rc = 0;
-
- if (!the_lnet.ln_routing)
- return -ECANCELED;
-
- if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
- lnet_msg2bufpool(msg)->rbp_credits <= 0) {
- if (!ni->ni_lnd->lnd_eager_recv) {
- msg->msg_rx_ready_delay = 1;
- } else {
- lnet_net_unlock(msg->msg_rx_cpt);
- rc = lnet_ni_eager_recv(ni, msg);
- lnet_net_lock(msg->msg_rx_cpt);
- }
- }
-
- if (!rc)
- rc = lnet_post_routed_recv_locked(msg, 0);
- return rc;
-}
-
-int
-lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
-{
- int rc;
-
- switch (msg->msg_type) {
- case LNET_MSG_ACK:
- rc = lnet_parse_ack(ni, msg);
- break;
- case LNET_MSG_PUT:
- rc = lnet_parse_put(ni, msg);
- break;
- case LNET_MSG_GET:
- rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
- break;
- case LNET_MSG_REPLY:
- rc = lnet_parse_reply(ni, msg);
- break;
- default: /* prevent an unused label if !kernel */
- LASSERT(0);
- return -EPROTO;
- }
-
- LASSERT(!rc || rc == -ENOENT);
- return rc;
-}
-
-char *
-lnet_msgtyp2str(int type)
-{
- switch (type) {
- case LNET_MSG_ACK:
- return "ACK";
- case LNET_MSG_PUT:
- return "PUT";
- case LNET_MSG_GET:
- return "GET";
- case LNET_MSG_REPLY:
- return "REPLY";
- case LNET_MSG_HELLO:
- return "HELLO";
- default:
- return "<UNKNOWN>";
- }
-}
-
-void
-lnet_print_hdr(struct lnet_hdr *hdr)
-{
- struct lnet_process_id src = {0};
- struct lnet_process_id dst = {0};
- char *type_str = lnet_msgtyp2str(hdr->type);
-
- src.nid = hdr->src_nid;
- src.pid = hdr->src_pid;
-
- dst.nid = hdr->dest_nid;
- dst.pid = hdr->dest_pid;
-
- CWARN("P3 Header at %p of type %s\n", hdr, type_str);
- CWARN(" From %s\n", libcfs_id2str(src));
- CWARN(" To %s\n", libcfs_id2str(dst));
-
- switch (hdr->type) {
- default:
- break;
-
- case LNET_MSG_PUT:
- CWARN(" Ptl index %d, ack md %#llx.%#llx, match bits %llu\n",
- hdr->msg.put.ptl_index,
- hdr->msg.put.ack_wmd.wh_interface_cookie,
- hdr->msg.put.ack_wmd.wh_object_cookie,
- hdr->msg.put.match_bits);
- CWARN(" Length %d, offset %d, hdr data %#llx\n",
- hdr->payload_length, hdr->msg.put.offset,
- hdr->msg.put.hdr_data);
- break;
-
- case LNET_MSG_GET:
- CWARN(" Ptl index %d, return md %#llx.%#llx, match bits %llu\n",
- hdr->msg.get.ptl_index,
- hdr->msg.get.return_wmd.wh_interface_cookie,
- hdr->msg.get.return_wmd.wh_object_cookie,
- hdr->msg.get.match_bits);
- CWARN(" Length %d, src offset %d\n",
- hdr->msg.get.sink_length,
- hdr->msg.get.src_offset);
- break;
-
- case LNET_MSG_ACK:
- CWARN(" dst md %#llx.%#llx, manipulated length %d\n",
- hdr->msg.ack.dst_wmd.wh_interface_cookie,
- hdr->msg.ack.dst_wmd.wh_object_cookie,
- hdr->msg.ack.mlength);
- break;
-
- case LNET_MSG_REPLY:
- CWARN(" dst md %#llx.%#llx, length %d\n",
- hdr->msg.reply.dst_wmd.wh_interface_cookie,
- hdr->msg.reply.dst_wmd.wh_object_cookie,
- hdr->payload_length);
- }
-}
-
-int
-lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
- void *private, int rdma_req)
-{
- int rc = 0;
- int cpt;
- int for_me;
- struct lnet_msg *msg;
- lnet_pid_t dest_pid;
- lnet_nid_t dest_nid;
- lnet_nid_t src_nid;
- __u32 payload_length;
- __u32 type;
-
- LASSERT(!in_interrupt());
-
- type = le32_to_cpu(hdr->type);
- src_nid = le64_to_cpu(hdr->src_nid);
- dest_nid = le64_to_cpu(hdr->dest_nid);
- dest_pid = le32_to_cpu(hdr->dest_pid);
- payload_length = le32_to_cpu(hdr->payload_length);
-
- for_me = (ni->ni_nid == dest_nid);
- cpt = lnet_cpt_of_nid(from_nid);
-
- switch (type) {
- case LNET_MSG_ACK:
- case LNET_MSG_GET:
- if (payload_length > 0) {
- CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type), payload_length);
- return -EPROTO;
- }
- break;
-
- case LNET_MSG_PUT:
- case LNET_MSG_REPLY:
- if (payload_length >
- (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
- CERROR("%s, src %s: bad %s payload %d (%d max expected)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type),
- payload_length,
- for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
- return -EPROTO;
- }
- break;
-
- default:
- CERROR("%s, src %s: Bad message type 0x%x\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid), type);
- return -EPROTO;
- }
-
- if (the_lnet.ln_routing &&
- ni->ni_last_alive != ktime_get_real_seconds()) {
- /* NB: so far here is the only place to set NI status to "up */
- lnet_ni_lock(ni);
- ni->ni_last_alive = ktime_get_real_seconds();
- if (ni->ni_status &&
- ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
- ni->ni_status->ns_status = LNET_NI_STATUS_UP;
- lnet_ni_unlock(ni);
- }
-
- /*
- * Regard a bad destination NID as a protocol error. Senders should
- * know what they're doing; if they don't they're misconfigured, buggy
- * or malicious so we chop them off at the knees :)
- */
- if (!for_me) {
- if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
- /* should have gone direct */
- CERROR("%s, src %s: Bad dest nid %s (should have been sent direct)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- return -EPROTO;
- }
-
- if (lnet_islocalnid(dest_nid)) {
- /*
- * dest is another local NI; sender should have used
- * this node's NID on its own network
- */
- CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- return -EPROTO;
- }
-
- if (rdma_req && type == LNET_MSG_GET) {
- CERROR("%s, src %s: Bad optimized GET for %s (final destination must be me)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- return -EPROTO;
- }
-
- if (!the_lnet.ln_routing) {
- CERROR("%s, src %s: Dropping message for %s (routing not enabled)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- goto drop;
- }
- }
-
- /*
- * Message looks OK; we're not going to return an error, so we MUST
- * call back lnd_recv() come what may...
- */
- if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer(src_nid, 0)) { /* shall we now? */
- CERROR("%s, src %s: Dropping %s to simulate failure\n",
- libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type));
- goto drop;
- }
-
- if (!list_empty(&the_lnet.ln_drop_rules) &&
- lnet_drop_rule_match(hdr)) {
- CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
- libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
- goto drop;
- }
-
- msg = kzalloc(sizeof(*msg), GFP_NOFS);
- if (!msg) {
- CERROR("%s, src %s: Dropping %s (out of memory)\n",
- libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type));
- goto drop;
- }
-
- /* msg zeroed by kzalloc()
- * i.e. flags all clear, pointers NULL etc
- */
- msg->msg_type = type;
- msg->msg_private = private;
- msg->msg_receiving = 1;
- msg->msg_rdma_get = rdma_req;
- msg->msg_wanted = payload_length;
- msg->msg_len = payload_length;
- msg->msg_offset = 0;
- msg->msg_hdr = *hdr;
- /* for building message event */
- msg->msg_from = from_nid;
- if (!for_me) {
- msg->msg_target.pid = dest_pid;
- msg->msg_target.nid = dest_nid;
- msg->msg_routing = 1;
-
- } else {
- /* convert common msg->hdr fields to host byteorder */
- msg->msg_hdr.type = type;
- msg->msg_hdr.src_nid = src_nid;
- le32_to_cpus(&msg->msg_hdr.src_pid);
- msg->msg_hdr.dest_nid = dest_nid;
- msg->msg_hdr.dest_pid = dest_pid;
- msg->msg_hdr.payload_length = payload_length;
- }
-
- lnet_net_lock(cpt);
- rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
- if (rc) {
- lnet_net_unlock(cpt);
- CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
- libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type), rc);
- kfree(msg);
- if (rc == -ESHUTDOWN)
- /* We are shutting down. Don't do anything more */
- return 0;
- goto drop;
- }
-
- if (lnet_isrouter(msg->msg_rxpeer)) {
- lnet_peer_set_alive(msg->msg_rxpeer);
- if (avoid_asym_router_failure &&
- LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
- /* received a remote message from router, update
- * remote NI status on this router.
- * NB: multi-hop routed message will be ignored.
- */
- lnet_router_ni_update_locked(msg->msg_rxpeer,
- LNET_NIDNET(src_nid));
- }
- }
-
- lnet_msg_commit(msg, cpt);
-
- /* message delay simulation */
- if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
- lnet_delay_rule_match_locked(hdr, msg))) {
- lnet_net_unlock(cpt);
- return 0;
- }
-
- if (!for_me) {
- rc = lnet_parse_forward_locked(ni, msg);
- lnet_net_unlock(cpt);
-
- if (rc < 0)
- goto free_drop;
-
- if (rc == LNET_CREDIT_OK) {
- lnet_ni_recv(ni, msg->msg_private, msg, 0,
- 0, payload_length, payload_length);
- }
- return 0;
- }
-
- lnet_net_unlock(cpt);
-
- rc = lnet_parse_local(ni, msg);
- if (rc)
- goto free_drop;
- return 0;
-
- free_drop:
- LASSERT(!msg->msg_md);
- lnet_finalize(ni, msg, rc);
-
- drop:
- lnet_drop_message(ni, cpt, private, payload_length);
- return 0;
-}
-EXPORT_SYMBOL(lnet_parse);
-
-void
-lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
-{
- while (!list_empty(head)) {
- struct lnet_process_id id = {0};
- struct lnet_msg *msg;
-
- msg = list_entry(head->next, struct lnet_msg, msg_list);
- list_del(&msg->msg_list);
-
- id.nid = msg->msg_hdr.src_nid;
- id.pid = msg->msg_hdr.src_pid;
-
- LASSERT(!msg->msg_md);
- LASSERT(msg->msg_rx_delayed);
- LASSERT(msg->msg_rxpeer);
- LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
-
- CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n",
- libcfs_id2str(id),
- msg->msg_hdr.msg.put.ptl_index,
- msg->msg_hdr.msg.put.match_bits,
- msg->msg_hdr.msg.put.offset,
- msg->msg_hdr.payload_length, reason);
-
- /*
- * NB I can't drop msg's ref on msg_rxpeer until after I've
- * called lnet_drop_message(), so I just hang onto msg as well
- * until that's done
- */
- lnet_drop_message(msg->msg_rxpeer->lp_ni,
- msg->msg_rxpeer->lp_cpt,
- msg->msg_private, msg->msg_len);
- /*
- * NB: message will not generate event because w/o attached MD,
- * but we still should give error code so lnet_msg_decommit()
- * can skip counters operations and other checks.
- */
- lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
- }
-}
-
-void
-lnet_recv_delayed_msg_list(struct list_head *head)
-{
- while (!list_empty(head)) {
- struct lnet_msg *msg;
- struct lnet_process_id id;
-
- msg = list_entry(head->next, struct lnet_msg, msg_list);
- list_del(&msg->msg_list);
-
- /*
- * md won't disappear under me, since each msg
- * holds a ref on it
- */
- id.nid = msg->msg_hdr.src_nid;
- id.pid = msg->msg_hdr.src_pid;
-
- LASSERT(msg->msg_rx_delayed);
- LASSERT(msg->msg_md);
- LASSERT(msg->msg_rxpeer);
- LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
-
- CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
- libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
- msg->msg_hdr.msg.put.match_bits,
- msg->msg_hdr.msg.put.offset,
- msg->msg_hdr.payload_length);
-
- lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
- }
-}
-
-/**
- * Initiate an asynchronous PUT operation.
- *
- * There are several events associated with a PUT: completion of the send on
- * the initiator node (LNET_EVENT_SEND), and when the send completes
- * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
- * that the operation was accepted by the target. The event LNET_EVENT_PUT is
- * used at the target node to indicate the completion of incoming data
- * delivery.
- *
- * The local events will be logged in the EQ associated with the MD pointed to
- * by \a mdh handle. Using a MD without an associated EQ results in these
- * events being discarded. In this case, the caller must have another
- * mechanism (e.g., a higher level protocol) for determining when it is safe
- * to modify the memory region associated with the MD.
- *
- * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
- * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
- *
- * \param self Indicates the NID of a local interface through which to send
- * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
- * \param mdh A handle for the MD that describes the memory to be sent. The MD
- * must be "free floating" (See LNetMDBind()).
- * \param ack Controls whether an acknowledgment is requested.
- * Acknowledgments are only sent when they are requested by the initiating
- * process and the target MD enables them.
- * \param target A process identifier for the target process.
- * \param portal The index in the \a target's portal table.
- * \param match_bits The match bits to use for MD selection at the target
- * process.
- * \param offset The offset into the target MD (only used when the target
- * MD has the LNET_MD_MANAGE_REMOTE option set).
- * \param hdr_data 64 bits of user data that can be included in the message
- * header. This data is written to an event queue entry at the target if an
- * EQ is present on the matching MD.
- *
- * \retval 0 Success, and only in this case events will be generated
- * and logged to EQ (if it exists).
- * \retval -EIO Simulated failure.
- * \retval -ENOMEM Memory allocation failure.
- * \retval -ENOENT Invalid MD object.
- *
- * \see lnet_event::hdr_data and lnet_event_kind.
- */
-int
-LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
- struct lnet_process_id target, unsigned int portal,
- __u64 match_bits, unsigned int offset,
- __u64 hdr_data)
-{
- struct lnet_msg *msg;
- struct lnet_libmd *md;
- int cpt;
- int rc;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer(target.nid, 1)) { /* shall we now? */
- CERROR("Dropping PUT to %s: simulated failure\n",
- libcfs_id2str(target));
- return -EIO;
- }
-
- msg = kzalloc(sizeof(*msg), GFP_NOFS);
- if (!msg) {
- CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
- libcfs_id2str(target));
- return -ENOMEM;
- }
- msg->msg_vmflush = !!memory_pressure_get();
-
- cpt = lnet_cpt_of_cookie(mdh.cookie);
- lnet_res_lock(cpt);
-
- md = lnet_handle2md(&mdh);
- if (!md || !md->md_threshold || md->md_me) {
- CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
- match_bits, portal, libcfs_id2str(target),
- !md ? -1 : md->md_threshold);
- if (md && md->md_me)
- CERROR("Source MD also attached to portal %d\n",
- md->md_me->me_portal);
- lnet_res_unlock(cpt);
-
- kfree(msg);
- return -ENOENT;
- }
-
- CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_id2str(target));
-
- lnet_msg_attach_md(msg, md, 0, 0);
-
- lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
-
- msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
- msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
- msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
- msg->msg_hdr.msg.put.hdr_data = hdr_data;
-
- /* NB handles only looked up by creator (no flips) */
- if (ack == LNET_ACK_REQ) {
- msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
- the_lnet.ln_interface_cookie;
- msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
- md->md_lh.lh_cookie;
- } else {
- msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
- LNET_WIRE_HANDLE_COOKIE_NONE;
- msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
- LNET_WIRE_HANDLE_COOKIE_NONE;
- }
-
- lnet_res_unlock(cpt);
-
- lnet_build_msg_event(msg, LNET_EVENT_SEND);
-
- rc = lnet_send(self, msg, LNET_NID_ANY);
- if (rc) {
- CNETERR("Error sending PUT to %s: %d\n",
- libcfs_id2str(target), rc);
- lnet_finalize(NULL, msg, rc);
- }
-
- /* completion will be signalled by an event */
- return 0;
-}
-EXPORT_SYMBOL(LNetPut);
-
-struct lnet_msg *
-lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
-{
- /*
- * The LND can DMA direct to the GET md (i.e. no REPLY msg). This
- * returns a msg for the LND to pass to lnet_finalize() when the sink
- * data has been received.
- *
- * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
- * lnet_finalize() is called on it, so the LND must call this first
- */
- struct lnet_msg *msg = kzalloc(sizeof(*msg), GFP_NOFS);
- struct lnet_libmd *getmd = getmsg->msg_md;
- struct lnet_process_id peer_id = getmsg->msg_target;
- int cpt;
-
- LASSERT(!getmsg->msg_target_is_router);
- LASSERT(!getmsg->msg_routing);
-
- if (!msg) {
- CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
- goto drop;
- }
-
- cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
- lnet_res_lock(cpt);
-
- LASSERT(getmd->md_refcount > 0);
-
- if (!getmd->md_threshold) {
- CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
- getmd);
- lnet_res_unlock(cpt);
- goto drop;
- }
-
- LASSERT(!getmd->md_offset);
-
- CDEBUG(D_NET, "%s: Reply from %s md %p\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
-
- /* setup information for lnet_build_msg_event */
- msg->msg_from = peer_id.nid;
- msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
- msg->msg_hdr.src_nid = peer_id.nid;
- msg->msg_hdr.payload_length = getmd->md_length;
- msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
-
- lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
- lnet_res_unlock(cpt);
-
- cpt = lnet_cpt_of_nid(peer_id.nid);
-
- lnet_net_lock(cpt);
- lnet_msg_commit(msg, cpt);
- lnet_net_unlock(cpt);
-
- lnet_build_msg_event(msg, LNET_EVENT_REPLY);
-
- return msg;
-
- drop:
- cpt = lnet_cpt_of_nid(peer_id.nid);
-
- lnet_net_lock(cpt);
- the_lnet.ln_counters[cpt]->drop_count++;
- the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
- lnet_net_unlock(cpt);
-
- kfree(msg);
-
- return NULL;
-}
-EXPORT_SYMBOL(lnet_create_reply_msg);
-
-void
-lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
- unsigned int len)
-{
- /*
- * Set the REPLY length, now the RDMA that elides the REPLY message has
- * completed and I know it.
- */
- LASSERT(reply);
- LASSERT(reply->msg_type == LNET_MSG_GET);
- LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
-
- /*
- * NB I trusted my peer to RDMA. If she tells me she's written beyond
- * the end of my buffer, I might as well be dead.
- */
- LASSERT(len <= reply->msg_ev.mlength);
-
- reply->msg_ev.mlength = len;
-}
-EXPORT_SYMBOL(lnet_set_reply_msg_len);
-
-/**
- * Initiate an asynchronous GET operation.
- *
- * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
- * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
- * the target node in the REPLY has been written to local MD.
- *
- * On the target node, an LNET_EVENT_GET is logged when the GET request
- * arrives and is accepted into a MD.
- *
- * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
- * \param mdh A handle for the MD that describes the memory into which the
- * requested data will be received. The MD must be "free floating"
- * (See LNetMDBind()).
- *
- * \retval 0 Success, and only in this case events will be generated
- * and logged to EQ (if it exists) of the MD.
- * \retval -EIO Simulated failure.
- * \retval -ENOMEM Memory allocation failure.
- * \retval -ENOENT Invalid MD object.
- */
-int
-LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
- struct lnet_process_id target, unsigned int portal,
- __u64 match_bits, unsigned int offset)
-{
- struct lnet_msg *msg;
- struct lnet_libmd *md;
- int cpt;
- int rc;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer(target.nid, 1)) { /* shall we now? */
- CERROR("Dropping GET to %s: simulated failure\n",
- libcfs_id2str(target));
- return -EIO;
- }
-
- msg = kzalloc(sizeof(*msg), GFP_NOFS);
- if (!msg) {
- CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
- libcfs_id2str(target));
- return -ENOMEM;
- }
-
- cpt = lnet_cpt_of_cookie(mdh.cookie);
- lnet_res_lock(cpt);
-
- md = lnet_handle2md(&mdh);
- if (!md || !md->md_threshold || md->md_me) {
- CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
- match_bits, portal, libcfs_id2str(target),
- !md ? -1 : md->md_threshold);
- if (md && md->md_me)
- CERROR("REPLY MD also attached to portal %d\n",
- md->md_me->me_portal);
-
- lnet_res_unlock(cpt);
-
- kfree(msg);
- return -ENOENT;
- }
-
- CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_id2str(target));
-
- lnet_msg_attach_md(msg, md, 0, 0);
-
- lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
-
- msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
- msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
- msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
- msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
-
- /* NB handles only looked up by creator (no flips) */
- msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
- the_lnet.ln_interface_cookie;
- msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
- md->md_lh.lh_cookie;
-
- lnet_res_unlock(cpt);
-
- lnet_build_msg_event(msg, LNET_EVENT_SEND);
-
- rc = lnet_send(self, msg, LNET_NID_ANY);
- if (rc < 0) {
- CNETERR("Error sending GET to %s: %d\n",
- libcfs_id2str(target), rc);
- lnet_finalize(NULL, msg, rc);
- }
-
- /* completion will be signalled by an event */
- return 0;
-}
-EXPORT_SYMBOL(LNetGet);
-
-/**
- * Calculate distance to node at \a dstnid.
- *
- * \param dstnid Target NID.
- * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
- * is saved here.
- * \param orderp If not NULL, order of the route to reach \a dstnid is saved
- * here.
- *
- * \retval 0 If \a dstnid belongs to a local interface, and reserved option
- * local_nid_dist_zero is set, which is the default.
- * \retval positives Distance to target NID, i.e. number of hops plus one.
- * \retval -EHOSTUNREACH If \a dstnid is not reachable.
- */
-int
-LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
-{
- struct list_head *e;
- struct lnet_ni *ni;
- struct lnet_remotenet *rnet;
- __u32 dstnet = LNET_NIDNET(dstnid);
- int hops;
- int cpt;
- __u32 order = 2;
- struct list_head *rn_list;
-
- /*
- * if !local_nid_dist_zero, I don't return a distance of 0 ever
- * (when lustre sees a distance of 0, it substitutes 0@lo), so I
- * keep order 0 free for 0@lo and order 1 free for a local NID
- * match
- */
- LASSERT(the_lnet.ln_refcount > 0);
-
- cpt = lnet_net_lock_current();
-
- list_for_each(e, &the_lnet.ln_nis) {
- ni = list_entry(e, struct lnet_ni, ni_list);
-
- if (ni->ni_nid == dstnid) {
- if (srcnidp)
- *srcnidp = dstnid;
- if (orderp) {
- if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
- *orderp = 0;
- else
- *orderp = 1;
- }
- lnet_net_unlock(cpt);
-
- return local_nid_dist_zero ? 0 : 1;
- }
-
- if (LNET_NIDNET(ni->ni_nid) == dstnet) {
- /*
- * Check if ni was originally created in
- * current net namespace.
- * If not, assign order above 0xffff0000,
- * to make this ni not a priority.
- */
- if (!net_eq(ni->ni_net_ns, current->nsproxy->net_ns))
- order += 0xffff0000;
-
- if (srcnidp)
- *srcnidp = ni->ni_nid;
- if (orderp)
- *orderp = order;
- lnet_net_unlock(cpt);
- return 1;
- }
-
- order++;
- }
-
- rn_list = lnet_net2rnethash(dstnet);
- list_for_each(e, rn_list) {
- rnet = list_entry(e, struct lnet_remotenet, lrn_list);
-
- if (rnet->lrn_net == dstnet) {
- struct lnet_route *route;
- struct lnet_route *shortest = NULL;
- __u32 shortest_hops = LNET_UNDEFINED_HOPS;
- __u32 route_hops;
-
- LASSERT(!list_empty(&rnet->lrn_routes));
-
- list_for_each_entry(route, &rnet->lrn_routes,
- lr_list) {
- route_hops = route->lr_hops;
- if (route_hops == LNET_UNDEFINED_HOPS)
- route_hops = 1;
- if (!shortest ||
- route_hops < shortest_hops) {
- shortest = route;
- shortest_hops = route_hops;
- }
- }
-
- LASSERT(shortest);
- hops = shortest_hops;
- if (srcnidp)
- *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
- if (orderp)
- *orderp = order;
- lnet_net_unlock(cpt);
- return hops + 1;
- }
- order++;
- }
-
- lnet_net_unlock(cpt);
- return -EHOSTUNREACH;
-}
-EXPORT_SYMBOL(LNetDist);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
deleted file mode 100644
index 0091273c04b9..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ /dev/null
@@ -1,625 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-msg.c
- *
- * Message decoding, parsing and finalizing routines
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/lnet/lib-lnet.h>
-
-void
-lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
-{
- memset(ev, 0, sizeof(*ev));
-
- ev->status = 0;
- ev->unlinked = 1;
- ev->type = LNET_EVENT_UNLINK;
- lnet_md_deconstruct(md, &ev->md);
- lnet_md2handle(&ev->md_handle, md);
-}
-
-/*
- * Don't need any lock, must be called after lnet_commit_md
- */
-void
-lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
-{
- struct lnet_hdr *hdr = &msg->msg_hdr;
- struct lnet_event *ev = &msg->msg_ev;
-
- LASSERT(!msg->msg_routing);
-
- ev->type = ev_type;
-
- if (ev_type == LNET_EVENT_SEND) {
- /* event for active message */
- ev->target.nid = le64_to_cpu(hdr->dest_nid);
- ev->target.pid = le32_to_cpu(hdr->dest_pid);
- ev->initiator.nid = LNET_NID_ANY;
- ev->initiator.pid = the_lnet.ln_pid;
- ev->sender = LNET_NID_ANY;
- } else {
- /* event for passive message */
- ev->target.pid = hdr->dest_pid;
- ev->target.nid = hdr->dest_nid;
- ev->initiator.pid = hdr->src_pid;
- ev->initiator.nid = hdr->src_nid;
- ev->rlength = hdr->payload_length;
- ev->sender = msg->msg_from;
- ev->mlength = msg->msg_wanted;
- ev->offset = msg->msg_offset;
- }
-
- switch (ev_type) {
- default:
- LBUG();
-
- case LNET_EVENT_PUT: /* passive PUT */
- ev->pt_index = hdr->msg.put.ptl_index;
- ev->match_bits = hdr->msg.put.match_bits;
- ev->hdr_data = hdr->msg.put.hdr_data;
- return;
-
- case LNET_EVENT_GET: /* passive GET */
- ev->pt_index = hdr->msg.get.ptl_index;
- ev->match_bits = hdr->msg.get.match_bits;
- ev->hdr_data = 0;
- return;
-
- case LNET_EVENT_ACK: /* ACK */
- ev->match_bits = hdr->msg.ack.match_bits;
- ev->mlength = hdr->msg.ack.mlength;
- return;
-
- case LNET_EVENT_REPLY: /* REPLY */
- return;
-
- case LNET_EVENT_SEND: /* active message */
- if (msg->msg_type == LNET_MSG_PUT) {
- ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index);
- ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
- ev->offset = le32_to_cpu(hdr->msg.put.offset);
- ev->mlength =
- ev->rlength = le32_to_cpu(hdr->payload_length);
- ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data);
-
- } else {
- LASSERT(msg->msg_type == LNET_MSG_GET);
- ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index);
- ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
- ev->mlength =
- ev->rlength = le32_to_cpu(hdr->msg.get.sink_length);
- ev->offset = le32_to_cpu(hdr->msg.get.src_offset);
- ev->hdr_data = 0;
- }
- return;
- }
-}
-
-void
-lnet_msg_commit(struct lnet_msg *msg, int cpt)
-{
- struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
- struct lnet_counters *counters = the_lnet.ln_counters[cpt];
-
- /* routed message can be committed for both receiving and sending */
- LASSERT(!msg->msg_tx_committed);
-
- if (msg->msg_sending) {
- LASSERT(!msg->msg_receiving);
-
- msg->msg_tx_cpt = cpt;
- msg->msg_tx_committed = 1;
- if (msg->msg_rx_committed) { /* routed message REPLY */
- LASSERT(msg->msg_onactivelist);
- return;
- }
- } else {
- LASSERT(!msg->msg_sending);
- msg->msg_rx_cpt = cpt;
- msg->msg_rx_committed = 1;
- }
-
- LASSERT(!msg->msg_onactivelist);
- msg->msg_onactivelist = 1;
- list_add(&msg->msg_activelist, &container->msc_active);
-
- counters->msgs_alloc++;
- if (counters->msgs_alloc > counters->msgs_max)
- counters->msgs_max = counters->msgs_alloc;
-}
-
-static void
-lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
-{
- struct lnet_counters *counters;
- struct lnet_event *ev = &msg->msg_ev;
-
- LASSERT(msg->msg_tx_committed);
- if (status)
- goto out;
-
- counters = the_lnet.ln_counters[msg->msg_tx_cpt];
- switch (ev->type) {
- default: /* routed message */
- LASSERT(msg->msg_routing);
- LASSERT(msg->msg_rx_committed);
- LASSERT(!ev->type);
-
- counters->route_length += msg->msg_len;
- counters->route_count++;
- goto out;
-
- case LNET_EVENT_PUT:
- /* should have been decommitted */
- LASSERT(!msg->msg_rx_committed);
- /* overwritten while sending ACK */
- LASSERT(msg->msg_type == LNET_MSG_ACK);
- msg->msg_type = LNET_MSG_PUT; /* fix type */
- break;
-
- case LNET_EVENT_SEND:
- LASSERT(!msg->msg_rx_committed);
- if (msg->msg_type == LNET_MSG_PUT)
- counters->send_length += msg->msg_len;
- break;
-
- case LNET_EVENT_GET:
- LASSERT(msg->msg_rx_committed);
- /*
- * overwritten while sending reply, we should never be
- * here for optimized GET
- */
- LASSERT(msg->msg_type == LNET_MSG_REPLY);
- msg->msg_type = LNET_MSG_GET; /* fix type */
- break;
- }
-
- counters->send_count++;
- out:
- lnet_return_tx_credits_locked(msg);
- msg->msg_tx_committed = 0;
-}
-
-static void
-lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
-{
- struct lnet_counters *counters;
- struct lnet_event *ev = &msg->msg_ev;
-
- LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
- LASSERT(msg->msg_rx_committed);
-
- if (status)
- goto out;
-
- counters = the_lnet.ln_counters[msg->msg_rx_cpt];
- switch (ev->type) {
- default:
- LASSERT(!ev->type);
- LASSERT(msg->msg_routing);
- goto out;
-
- case LNET_EVENT_ACK:
- LASSERT(msg->msg_type == LNET_MSG_ACK);
- break;
-
- case LNET_EVENT_GET:
- /*
- * type is "REPLY" if it's an optimized GET on passive side,
- * because optimized GET will never be committed for sending,
- * so message type wouldn't be changed back to "GET" by
- * lnet_msg_decommit_tx(), see details in lnet_parse_get()
- */
- LASSERT(msg->msg_type == LNET_MSG_REPLY ||
- msg->msg_type == LNET_MSG_GET);
- counters->send_length += msg->msg_wanted;
- break;
-
- case LNET_EVENT_PUT:
- LASSERT(msg->msg_type == LNET_MSG_PUT);
- break;
-
- case LNET_EVENT_REPLY:
- /*
- * type is "GET" if it's an optimized GET on active side,
- * see details in lnet_create_reply_msg()
- */
- LASSERT(msg->msg_type == LNET_MSG_GET ||
- msg->msg_type == LNET_MSG_REPLY);
- break;
- }
-
- counters->recv_count++;
- if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
- counters->recv_length += msg->msg_wanted;
-
- out:
- lnet_return_rx_credits_locked(msg);
- msg->msg_rx_committed = 0;
-}
-
-void
-lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
-{
- int cpt2 = cpt;
-
- LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
- LASSERT(msg->msg_onactivelist);
-
- if (msg->msg_tx_committed) { /* always decommit for sending first */
- LASSERT(cpt == msg->msg_tx_cpt);
- lnet_msg_decommit_tx(msg, status);
- }
-
- if (msg->msg_rx_committed) {
- /* forwarding msg committed for both receiving and sending */
- if (cpt != msg->msg_rx_cpt) {
- lnet_net_unlock(cpt);
- cpt2 = msg->msg_rx_cpt;
- lnet_net_lock(cpt2);
- }
- lnet_msg_decommit_rx(msg, status);
- }
-
- list_del(&msg->msg_activelist);
- msg->msg_onactivelist = 0;
-
- the_lnet.ln_counters[cpt2]->msgs_alloc--;
-
- if (cpt2 != cpt) {
- lnet_net_unlock(cpt2);
- lnet_net_lock(cpt);
- }
-}
-
-void
-lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
- unsigned int offset, unsigned int mlen)
-{
- /* NB: @offset and @len are only useful for receiving */
- /*
- * Here, we attach the MD on lnet_msg and mark it busy and
- * decrementing its threshold. Come what may, the lnet_msg "owns"
- * the MD until a call to lnet_msg_detach_md or lnet_finalize()
- * signals completion.
- */
- LASSERT(!msg->msg_routing);
-
- msg->msg_md = md;
- if (msg->msg_receiving) { /* committed for receiving */
- msg->msg_offset = offset;
- msg->msg_wanted = mlen;
- }
-
- md->md_refcount++;
- if (md->md_threshold != LNET_MD_THRESH_INF) {
- LASSERT(md->md_threshold > 0);
- md->md_threshold--;
- }
-
- /* build umd in event */
- lnet_md2handle(&msg->msg_ev.md_handle, md);
- lnet_md_deconstruct(md, &msg->msg_ev.md);
-}
-
-void
-lnet_msg_detach_md(struct lnet_msg *msg, int status)
-{
- struct lnet_libmd *md = msg->msg_md;
- int unlink;
-
- /* Now it's safe to drop my caller's ref */
- md->md_refcount--;
- LASSERT(md->md_refcount >= 0);
-
- unlink = lnet_md_unlinkable(md);
- if (md->md_eq) {
- msg->msg_ev.status = status;
- msg->msg_ev.unlinked = unlink;
- lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
- }
-
- if (unlink)
- lnet_md_unlink(md);
-
- msg->msg_md = NULL;
-}
-
-static int
-lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
-{
- struct lnet_handle_wire ack_wmd;
- int rc;
- int status = msg->msg_ev.status;
-
- LASSERT(msg->msg_onactivelist);
-
- if (!status && msg->msg_ack) {
- /* Only send an ACK if the PUT completed successfully */
-
- lnet_msg_decommit(msg, cpt, 0);
-
- msg->msg_ack = 0;
- lnet_net_unlock(cpt);
-
- LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
- LASSERT(!msg->msg_routing);
-
- ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
-
- lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.initiator, 0, 0);
-
- msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
- msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
- msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
-
- /*
- * NB: we probably want to use NID of msg::msg_from as 3rd
- * parameter (router NID) if it's routed message
- */
- rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
-
- lnet_net_lock(cpt);
- /*
- * NB: message is committed for sending, we should return
- * on success because LND will finalize this message later.
- *
- * Also, there is possibility that message is committed for
- * sending and also failed before delivering to LND,
- * i.e: ENOMEM, in that case we can't fall through either
- * because CPT for sending can be different with CPT for
- * receiving, so we should return back to lnet_finalize()
- * to make sure we are locking the correct partition.
- */
- return rc;
-
- } else if (!status && /* OK so far */
- (msg->msg_routing && !msg->msg_sending)) {
- /* not forwarded */
- LASSERT(!msg->msg_receiving); /* called back recv already */
- lnet_net_unlock(cpt);
-
- rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
-
- lnet_net_lock(cpt);
- /*
- * NB: message is committed for sending, we should return
- * on success because LND will finalize this message later.
- *
- * Also, there is possibility that message is committed for
- * sending and also failed before delivering to LND,
- * i.e: ENOMEM, in that case we can't fall through either:
- * - The rule is message must decommit for sending first if
- * the it's committed for both sending and receiving
- * - CPT for sending can be different with CPT for receiving,
- * so we should return back to lnet_finalize() to make
- * sure we are locking the correct partition.
- */
- return rc;
- }
-
- lnet_msg_decommit(msg, cpt, status);
- kfree(msg);
- return 0;
-}
-
-void
-lnet_finalize(struct lnet_ni *ni, struct lnet_msg *msg, int status)
-{
- struct lnet_msg_container *container;
- int my_slot;
- int cpt;
- int rc;
- int i;
-
- LASSERT(!in_interrupt());
-
- if (!msg)
- return;
-
- msg->msg_ev.status = status;
-
- if (msg->msg_md) {
- cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
-
- lnet_res_lock(cpt);
- lnet_msg_detach_md(msg, status);
- lnet_res_unlock(cpt);
- }
-
- again:
- rc = 0;
- if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
- /* not committed to network yet */
- LASSERT(!msg->msg_onactivelist);
- kfree(msg);
- return;
- }
-
- /*
- * NB: routed message can be committed for both receiving and sending,
- * we should finalize in LIFO order and keep counters correct.
- * (finalize sending first then finalize receiving)
- */
- cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
- lnet_net_lock(cpt);
-
- container = the_lnet.ln_msg_containers[cpt];
- list_add_tail(&msg->msg_list, &container->msc_finalizing);
-
- /*
- * Recursion breaker. Don't complete the message here if I am (or
- * enough other threads are) already completing messages
- */
- my_slot = -1;
- for (i = 0; i < container->msc_nfinalizers; i++) {
- if (container->msc_finalizers[i] == current)
- break;
-
- if (my_slot < 0 && !container->msc_finalizers[i])
- my_slot = i;
- }
-
- if (i < container->msc_nfinalizers || my_slot < 0) {
- lnet_net_unlock(cpt);
- return;
- }
-
- container->msc_finalizers[my_slot] = current;
-
- while (!list_empty(&container->msc_finalizing)) {
- msg = list_entry(container->msc_finalizing.next,
- struct lnet_msg, msg_list);
-
- list_del(&msg->msg_list);
-
- /*
- * NB drops and regains the lnet lock if it actually does
- * anything, so my finalizing friends can chomp along too
- */
- rc = lnet_complete_msg_locked(msg, cpt);
- if (rc)
- break;
- }
-
- if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
- lnet_net_unlock(cpt);
- lnet_delay_rule_check();
- lnet_net_lock(cpt);
- }
-
- container->msc_finalizers[my_slot] = NULL;
- lnet_net_unlock(cpt);
-
- if (rc)
- goto again;
-}
-EXPORT_SYMBOL(lnet_finalize);
-
-void
-lnet_msg_container_cleanup(struct lnet_msg_container *container)
-{
- int count = 0;
-
- if (!container->msc_init)
- return;
-
- while (!list_empty(&container->msc_active)) {
- struct lnet_msg *msg;
-
- msg = list_entry(container->msc_active.next,
- struct lnet_msg, msg_activelist);
- LASSERT(msg->msg_onactivelist);
- msg->msg_onactivelist = 0;
- list_del(&msg->msg_activelist);
- kfree(msg);
- count++;
- }
-
- if (count > 0)
- CERROR("%d active msg on exit\n", count);
-
- kvfree(container->msc_finalizers);
- container->msc_finalizers = NULL;
- container->msc_init = 0;
-}
-
-int
-lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
-{
- container->msc_init = 1;
-
- INIT_LIST_HEAD(&container->msc_active);
- INIT_LIST_HEAD(&container->msc_finalizing);
-
- /* number of CPUs */
- container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
-
- container->msc_finalizers = kvzalloc_cpt(container->msc_nfinalizers *
- sizeof(*container->msc_finalizers),
- GFP_KERNEL, cpt);
-
- if (!container->msc_finalizers) {
- CERROR("Failed to allocate message finalizers\n");
- lnet_msg_container_cleanup(container);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void
-lnet_msg_containers_destroy(void)
-{
- struct lnet_msg_container *container;
- int i;
-
- if (!the_lnet.ln_msg_containers)
- return;
-
- cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
- lnet_msg_container_cleanup(container);
-
- cfs_percpt_free(the_lnet.ln_msg_containers);
- the_lnet.ln_msg_containers = NULL;
-}
-
-int
-lnet_msg_containers_create(void)
-{
- struct lnet_msg_container *container;
- int rc;
- int i;
-
- the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*container));
-
- if (!the_lnet.ln_msg_containers) {
- CERROR("Failed to allocate cpu-partition data for network\n");
- return -ENOMEM;
- }
-
- cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
- rc = lnet_msg_container_setup(container, i);
- if (rc) {
- lnet_msg_containers_destroy();
- return rc;
- }
- }
-
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
deleted file mode 100644
index fc47379c5938..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ /dev/null
@@ -1,987 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-ptl.c
- *
- * portal & match routines
- *
- * Author: liang@whamcloud.com
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/lnet/lib-lnet.h>
-
-/* NB: add /proc interfaces in upcoming patches */
-int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
-module_param(portal_rotor, int, 0644);
-MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions");
-
-static int
-lnet_ptl_match_type(unsigned int index, struct lnet_process_id match_id,
- __u64 mbits, __u64 ignore_bits)
-{
- struct lnet_portal *ptl = the_lnet.ln_portals[index];
- int unique;
-
- unique = !ignore_bits &&
- match_id.nid != LNET_NID_ANY &&
- match_id.pid != LNET_PID_ANY;
-
- LASSERT(!lnet_ptl_is_unique(ptl) || !lnet_ptl_is_wildcard(ptl));
-
- /* prefer to check w/o any lock */
- if (likely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl)))
- goto match;
-
- /* unset, new portal */
- lnet_ptl_lock(ptl);
- /* check again with lock */
- if (unlikely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) {
- lnet_ptl_unlock(ptl);
- goto match;
- }
-
- /* still not set */
- if (unique)
- lnet_ptl_setopt(ptl, LNET_PTL_MATCH_UNIQUE);
- else
- lnet_ptl_setopt(ptl, LNET_PTL_MATCH_WILDCARD);
-
- lnet_ptl_unlock(ptl);
-
- return 1;
-
- match:
- if ((lnet_ptl_is_unique(ptl) && !unique) ||
- (lnet_ptl_is_wildcard(ptl) && unique))
- return 0;
- return 1;
-}
-
-static void
-lnet_ptl_enable_mt(struct lnet_portal *ptl, int cpt)
-{
- struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
- int i;
-
- /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
- LASSERT(lnet_ptl_is_wildcard(ptl));
-
- mtable->mt_enabled = 1;
-
- ptl->ptl_mt_maps[ptl->ptl_mt_nmaps] = cpt;
- for (i = ptl->ptl_mt_nmaps - 1; i >= 0; i--) {
- LASSERT(ptl->ptl_mt_maps[i] != cpt);
- if (ptl->ptl_mt_maps[i] < cpt)
- break;
-
- /* swap to order */
- ptl->ptl_mt_maps[i + 1] = ptl->ptl_mt_maps[i];
- ptl->ptl_mt_maps[i] = cpt;
- }
-
- ptl->ptl_mt_nmaps++;
-}
-
-static void
-lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt)
-{
- struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
- int i;
-
- /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
- LASSERT(lnet_ptl_is_wildcard(ptl));
-
- if (LNET_CPT_NUMBER == 1)
- return; /* never disable the only match-table */
-
- mtable->mt_enabled = 0;
-
- LASSERT(ptl->ptl_mt_nmaps > 0 &&
- ptl->ptl_mt_nmaps <= LNET_CPT_NUMBER);
-
- /* remove it from mt_maps */
- ptl->ptl_mt_nmaps--;
- for (i = 0; i < ptl->ptl_mt_nmaps; i++) {
- if (ptl->ptl_mt_maps[i] >= cpt) /* overwrite it */
- ptl->ptl_mt_maps[i] = ptl->ptl_mt_maps[i + 1];
- }
-}
-
-static int
-lnet_try_match_md(struct lnet_libmd *md,
- struct lnet_match_info *info, struct lnet_msg *msg)
-{
- /*
- * ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock;
- * lnet_match_blocked_msg() relies on this to avoid races
- */
- unsigned int offset;
- unsigned int mlength;
- struct lnet_me *me = md->md_me;
-
- /* MD exhausted */
- if (lnet_md_exhausted(md))
- return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED;
-
- /* mismatched MD op */
- if (!(md->md_options & info->mi_opc))
- return LNET_MATCHMD_NONE;
-
- /* mismatched ME nid/pid? */
- if (me->me_match_id.nid != LNET_NID_ANY &&
- me->me_match_id.nid != info->mi_id.nid)
- return LNET_MATCHMD_NONE;
-
- if (me->me_match_id.pid != LNET_PID_ANY &&
- me->me_match_id.pid != info->mi_id.pid)
- return LNET_MATCHMD_NONE;
-
- /* mismatched ME matchbits? */
- if ((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits)
- return LNET_MATCHMD_NONE;
-
- /* Hurrah! This _is_ a match; check it out... */
-
- if (!(md->md_options & LNET_MD_MANAGE_REMOTE))
- offset = md->md_offset;
- else
- offset = info->mi_roffset;
-
- if (md->md_options & LNET_MD_MAX_SIZE) {
- mlength = md->md_max_size;
- LASSERT(md->md_offset + mlength <= md->md_length);
- } else {
- mlength = md->md_length - offset;
- }
-
- if (info->mi_rlength <= mlength) { /* fits in allowed space */
- mlength = info->mi_rlength;
- } else if (!(md->md_options & LNET_MD_TRUNCATE)) {
- /* this packet _really_ is too big */
- CERROR("Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n",
- libcfs_id2str(info->mi_id), info->mi_mbits,
- info->mi_rlength, md->md_length - offset, mlength);
-
- return LNET_MATCHMD_DROP;
- }
-
- /* Commit to this ME/MD */
- CDEBUG(D_NET, "Incoming %s index %x from %s of length %d/%d into md %#llx [%d] + %d\n",
- (info->mi_opc == LNET_MD_OP_PUT) ? "put" : "get",
- info->mi_portal, libcfs_id2str(info->mi_id), mlength,
- info->mi_rlength, md->md_lh.lh_cookie, md->md_niov, offset);
-
- lnet_msg_attach_md(msg, md, offset, mlength);
- md->md_offset = offset + mlength;
-
- if (!lnet_md_exhausted(md))
- return LNET_MATCHMD_OK;
-
- /*
- * Auto-unlink NOW, so the ME gets unlinked if required.
- * We bumped md->md_refcount above so the MD just gets flagged
- * for unlink when it is finalized.
- */
- if (md->md_flags & LNET_MD_FLAG_AUTO_UNLINK)
- lnet_md_unlink(md);
-
- return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED;
-}
-
-static struct lnet_match_table *
-lnet_match2mt(struct lnet_portal *ptl, struct lnet_process_id id, __u64 mbits)
-{
- if (LNET_CPT_NUMBER == 1)
- return ptl->ptl_mtables[0]; /* the only one */
-
- /* if it's a unique portal, return match-table hashed by NID */
- return lnet_ptl_is_unique(ptl) ?
- ptl->ptl_mtables[lnet_cpt_of_nid(id.nid)] : NULL;
-}
-
-struct lnet_match_table *
-lnet_mt_of_attach(unsigned int index, struct lnet_process_id id,
- __u64 mbits, __u64 ignore_bits, enum lnet_ins_pos pos)
-{
- struct lnet_portal *ptl;
- struct lnet_match_table *mtable;
-
- /* NB: called w/o lock */
- LASSERT(index < the_lnet.ln_nportals);
-
- if (!lnet_ptl_match_type(index, id, mbits, ignore_bits))
- return NULL;
-
- ptl = the_lnet.ln_portals[index];
-
- mtable = lnet_match2mt(ptl, id, mbits);
- if (mtable) /* unique portal or only one match-table */
- return mtable;
-
- /* it's a wildcard portal */
- switch (pos) {
- default:
- return NULL;
- case LNET_INS_BEFORE:
- case LNET_INS_AFTER:
- /*
- * posted by no affinity thread, always hash to specific
- * match-table to avoid buffer stealing which is heavy
- */
- return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER];
- case LNET_INS_LOCAL:
- /* posted by cpu-affinity thread */
- return ptl->ptl_mtables[lnet_cpt_current()];
- }
-}
-
-static struct lnet_match_table *
-lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
-{
- struct lnet_match_table *mtable;
- struct lnet_portal *ptl;
- unsigned int nmaps;
- unsigned int rotor;
- unsigned int cpt;
- bool routed;
-
- /* NB: called w/o lock */
- LASSERT(info->mi_portal < the_lnet.ln_nportals);
- ptl = the_lnet.ln_portals[info->mi_portal];
-
- LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl));
-
- mtable = lnet_match2mt(ptl, info->mi_id, info->mi_mbits);
- if (mtable)
- return mtable;
-
- /* it's a wildcard portal */
- routed = LNET_NIDNET(msg->msg_hdr.src_nid) !=
- LNET_NIDNET(msg->msg_hdr.dest_nid);
-
- if (portal_rotor == LNET_PTL_ROTOR_OFF ||
- (portal_rotor != LNET_PTL_ROTOR_ON && !routed)) {
- cpt = lnet_cpt_current();
- if (ptl->ptl_mtables[cpt]->mt_enabled)
- return ptl->ptl_mtables[cpt];
- }
-
- rotor = ptl->ptl_rotor++; /* get round-robin factor */
- if (portal_rotor == LNET_PTL_ROTOR_HASH_RT && routed)
- cpt = lnet_cpt_of_nid(msg->msg_hdr.src_nid);
- else
- cpt = rotor % LNET_CPT_NUMBER;
-
- if (!ptl->ptl_mtables[cpt]->mt_enabled) {
- /* is there any active entry for this portal? */
- nmaps = ptl->ptl_mt_nmaps;
- /* map to an active mtable to avoid heavy "stealing" */
- if (nmaps) {
- /*
- * NB: there is possibility that ptl_mt_maps is being
- * changed because we are not under protection of
- * lnet_ptl_lock, but it shouldn't hurt anything
- */
- cpt = ptl->ptl_mt_maps[rotor % nmaps];
- }
- }
-
- return ptl->ptl_mtables[cpt];
-}
-
-static int
-lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
-{
- __u64 *bmap;
- int i;
-
- if (!lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
- return 0;
-
- if (pos < 0) { /* check all bits */
- for (i = 0; i < LNET_MT_EXHAUSTED_BMAP; i++) {
- if (mtable->mt_exhausted[i] != (__u64)(-1))
- return 0;
- }
- return 1;
- }
-
- LASSERT(pos <= LNET_MT_HASH_IGNORE);
- /* mtable::mt_mhash[pos] is marked as exhausted or not */
- bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
- pos &= (1 << LNET_MT_BITS_U64) - 1;
-
- return (*bmap & BIT(pos));
-}
-
-static void
-lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted)
-{
- __u64 *bmap;
-
- LASSERT(lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]));
- LASSERT(pos <= LNET_MT_HASH_IGNORE);
-
- /* set mtable::mt_mhash[pos] as exhausted/non-exhausted */
- bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
- pos &= (1 << LNET_MT_BITS_U64) - 1;
-
- if (!exhausted)
- *bmap &= ~(1ULL << pos);
- else
- *bmap |= 1ULL << pos;
-}
-
-struct list_head *
-lnet_mt_match_head(struct lnet_match_table *mtable,
- struct lnet_process_id id, __u64 mbits)
-{
- struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal];
- unsigned long hash = mbits;
-
- if (!lnet_ptl_is_wildcard(ptl)) {
- hash += id.nid + id.pid;
-
- LASSERT(lnet_ptl_is_unique(ptl));
- hash = hash_long(hash, LNET_MT_HASH_BITS);
- }
- return &mtable->mt_mhash[hash & LNET_MT_HASH_MASK];
-}
-
-int
-lnet_mt_match_md(struct lnet_match_table *mtable,
- struct lnet_match_info *info, struct lnet_msg *msg)
-{
- struct list_head *head;
- struct lnet_me *me;
- struct lnet_me *tmp;
- int exhausted = 0;
- int rc;
-
- /* any ME with ignore bits? */
- if (!list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE]))
- head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
- else
- head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
- again:
- /* NB: only wildcard portal needs to return LNET_MATCHMD_EXHAUSTED */
- if (lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
- exhausted = LNET_MATCHMD_EXHAUSTED;
-
- list_for_each_entry_safe(me, tmp, head, me_list) {
- /* ME attached but MD not attached yet */
- if (!me->me_md)
- continue;
-
- LASSERT(me == me->me_md->md_me);
-
- rc = lnet_try_match_md(me->me_md, info, msg);
- if (!(rc & LNET_MATCHMD_EXHAUSTED))
- exhausted = 0; /* mlist is not empty */
-
- if (rc & LNET_MATCHMD_FINISH) {
- /*
- * don't return EXHAUSTED bit because we don't know
- * whether the mlist is empty or not
- */
- return rc & ~LNET_MATCHMD_EXHAUSTED;
- }
- }
-
- if (exhausted == LNET_MATCHMD_EXHAUSTED) { /* @head is exhausted */
- lnet_mt_set_exhausted(mtable, head - mtable->mt_mhash, 1);
- if (!lnet_mt_test_exhausted(mtable, -1))
- exhausted = 0;
- }
-
- if (!exhausted && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
- head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
- goto again; /* re-check MEs w/o ignore-bits */
- }
-
- if (info->mi_opc == LNET_MD_OP_GET ||
- !lnet_ptl_is_lazy(the_lnet.ln_portals[info->mi_portal]))
- return exhausted | LNET_MATCHMD_DROP;
-
- return exhausted | LNET_MATCHMD_NONE;
-}
-
-static int
-lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg)
-{
- int rc;
-
- /*
- * message arrived before any buffer posting on this portal,
- * simply delay or drop this message
- */
- if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)))
- return 0;
-
- lnet_ptl_lock(ptl);
- /* check it again with hold of lock */
- if (lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)) {
- lnet_ptl_unlock(ptl);
- return 0;
- }
-
- if (lnet_ptl_is_lazy(ptl)) {
- if (msg->msg_rx_ready_delay) {
- msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_delayed);
- }
- rc = LNET_MATCHMD_NONE;
- } else {
- rc = LNET_MATCHMD_DROP;
- }
-
- lnet_ptl_unlock(ptl);
- return rc;
-}
-
-static int
-lnet_ptl_match_delay(struct lnet_portal *ptl,
- struct lnet_match_info *info, struct lnet_msg *msg)
-{
- int first = ptl->ptl_mt_maps[0]; /* read w/o lock */
- int rc = 0;
- int i;
-
- /**
- * Steal buffer from other CPTs, and delay msg if nothing to
- * steal. This function is more expensive than a regular
- * match, but we don't expect it can happen a lot. The return
- * code contains one of LNET_MATCHMD_OK, LNET_MATCHMD_DROP, or
- * LNET_MATCHMD_NONE.
- */
- LASSERT(lnet_ptl_is_wildcard(ptl));
-
- for (i = 0; i < LNET_CPT_NUMBER; i++) {
- struct lnet_match_table *mtable;
- int cpt;
-
- cpt = (first + i) % LNET_CPT_NUMBER;
- mtable = ptl->ptl_mtables[cpt];
- if (i && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
- continue;
-
- lnet_res_lock(cpt);
- lnet_ptl_lock(ptl);
-
- if (!i) {
- /* The first try, add to stealing list. */
- list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_stealing);
- }
-
- if (!list_empty(&msg->msg_list)) {
- /* On stealing list. */
- rc = lnet_mt_match_md(mtable, info, msg);
-
- if ((rc & LNET_MATCHMD_EXHAUSTED) &&
- mtable->mt_enabled)
- lnet_ptl_disable_mt(ptl, cpt);
-
- if (rc & LNET_MATCHMD_FINISH) {
- /* Match found, remove from stealing list. */
- list_del_init(&msg->msg_list);
- } else if (i == LNET_CPT_NUMBER - 1 || /* (1) */
- !ptl->ptl_mt_nmaps || /* (2) */
- (ptl->ptl_mt_nmaps == 1 && /* (3) */
- ptl->ptl_mt_maps[0] == cpt)) {
- /**
- * No match found, and this is either
- * (1) the last cpt to check, or
- * (2) there is no active cpt, or
- * (3) this is the only active cpt.
- * There is nothing to steal: delay or
- * drop the message.
- */
- list_del_init(&msg->msg_list);
-
- if (lnet_ptl_is_lazy(ptl)) {
- msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_delayed);
- rc = LNET_MATCHMD_NONE;
- } else {
- rc = LNET_MATCHMD_DROP;
- }
- } else {
- /* Do another iteration. */
- rc = 0;
- }
- } else {
- /**
- * No longer on stealing list: another thread
- * matched the message in lnet_ptl_attach_md().
- * We are now expected to handle the message.
- */
- rc = !msg->msg_md ?
- LNET_MATCHMD_DROP : LNET_MATCHMD_OK;
- }
-
- lnet_ptl_unlock(ptl);
- lnet_res_unlock(cpt);
-
- /**
- * Note that test (1) above ensures that we always
- * exit the loop through this break statement.
- *
- * LNET_MATCHMD_NONE means msg was added to the
- * delayed queue, and we may no longer reference it
- * after lnet_ptl_unlock() and lnet_res_unlock().
- */
- if (rc & (LNET_MATCHMD_FINISH | LNET_MATCHMD_NONE))
- break;
- }
-
- return rc;
-}
-
-int
-lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
-{
- struct lnet_match_table *mtable;
- struct lnet_portal *ptl;
- int rc;
-
- CDEBUG(D_NET, "Request from %s of length %d into portal %d MB=%#llx\n",
- libcfs_id2str(info->mi_id), info->mi_rlength, info->mi_portal,
- info->mi_mbits);
-
- if (info->mi_portal >= the_lnet.ln_nportals) {
- CERROR("Invalid portal %d not in [0-%d]\n",
- info->mi_portal, the_lnet.ln_nportals);
- return LNET_MATCHMD_DROP;
- }
-
- ptl = the_lnet.ln_portals[info->mi_portal];
- rc = lnet_ptl_match_early(ptl, msg);
- if (rc) /* matched or delayed early message */
- return rc;
-
- mtable = lnet_mt_of_match(info, msg);
- lnet_res_lock(mtable->mt_cpt);
-
- if (the_lnet.ln_shutdown) {
- rc = LNET_MATCHMD_DROP;
- goto out1;
- }
-
- rc = lnet_mt_match_md(mtable, info, msg);
- if ((rc & LNET_MATCHMD_EXHAUSTED) && mtable->mt_enabled) {
- lnet_ptl_lock(ptl);
- lnet_ptl_disable_mt(ptl, mtable->mt_cpt);
- lnet_ptl_unlock(ptl);
- }
-
- if (rc & LNET_MATCHMD_FINISH) /* matched or dropping */
- goto out1;
-
- if (!msg->msg_rx_ready_delay)
- goto out1;
-
- LASSERT(lnet_ptl_is_lazy(ptl));
- LASSERT(!msg->msg_rx_delayed);
-
- /* NB: we don't expect "delay" can happen a lot */
- if (lnet_ptl_is_unique(ptl) || LNET_CPT_NUMBER == 1) {
- lnet_ptl_lock(ptl);
-
- msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed);
-
- lnet_ptl_unlock(ptl);
- lnet_res_unlock(mtable->mt_cpt);
- rc = LNET_MATCHMD_NONE;
- } else {
- lnet_res_unlock(mtable->mt_cpt);
- rc = lnet_ptl_match_delay(ptl, info, msg);
- }
-
- /* LNET_MATCHMD_NONE means msg was added to the delay queue */
- if (rc & LNET_MATCHMD_NONE) {
- CDEBUG(D_NET,
- "Delaying %s from %s ptl %d MB %#llx off %d len %d\n",
- info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET",
- libcfs_id2str(info->mi_id), info->mi_portal,
- info->mi_mbits, info->mi_roffset, info->mi_rlength);
- }
- goto out0;
- out1:
- lnet_res_unlock(mtable->mt_cpt);
- out0:
- /* EXHAUSTED bit is only meaningful for internal functions */
- return rc & ~LNET_MATCHMD_EXHAUSTED;
-}
-
-void
-lnet_ptl_detach_md(struct lnet_me *me, struct lnet_libmd *md)
-{
- LASSERT(me->me_md == md && md->md_me == me);
-
- me->me_md = NULL;
- md->md_me = NULL;
-}
-
-/* called with lnet_res_lock held */
-void
-lnet_ptl_attach_md(struct lnet_me *me, struct lnet_libmd *md,
- struct list_head *matches, struct list_head *drops)
-{
- struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal];
- struct lnet_match_table *mtable;
- struct list_head *head;
- struct lnet_msg *tmp;
- struct lnet_msg *msg;
- int exhausted = 0;
- int cpt;
-
- LASSERT(!md->md_refcount); /* a brand new MD */
-
- me->me_md = md;
- md->md_me = me;
-
- cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
- mtable = ptl->ptl_mtables[cpt];
-
- if (list_empty(&ptl->ptl_msg_stealing) &&
- list_empty(&ptl->ptl_msg_delayed) &&
- !lnet_mt_test_exhausted(mtable, me->me_pos))
- return;
-
- lnet_ptl_lock(ptl);
- head = &ptl->ptl_msg_stealing;
- again:
- list_for_each_entry_safe(msg, tmp, head, msg_list) {
- struct lnet_match_info info;
- struct lnet_hdr *hdr;
- int rc;
-
- LASSERT(msg->msg_rx_delayed || head == &ptl->ptl_msg_stealing);
-
- hdr = &msg->msg_hdr;
- info.mi_id.nid = hdr->src_nid;
- info.mi_id.pid = hdr->src_pid;
- info.mi_opc = LNET_MD_OP_PUT;
- info.mi_portal = hdr->msg.put.ptl_index;
- info.mi_rlength = hdr->payload_length;
- info.mi_roffset = hdr->msg.put.offset;
- info.mi_mbits = hdr->msg.put.match_bits;
-
- rc = lnet_try_match_md(md, &info, msg);
-
- exhausted = (rc & LNET_MATCHMD_EXHAUSTED);
- if (rc & LNET_MATCHMD_NONE) {
- if (exhausted)
- break;
- continue;
- }
-
- /* Hurrah! This _is_ a match */
- LASSERT(rc & LNET_MATCHMD_FINISH);
- list_del_init(&msg->msg_list);
-
- if (head == &ptl->ptl_msg_stealing) {
- if (exhausted)
- break;
- /* stealing thread will handle the message */
- continue;
- }
-
- if (rc & LNET_MATCHMD_OK) {
- list_add_tail(&msg->msg_list, matches);
-
- CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
- libcfs_id2str(info.mi_id),
- info.mi_portal, info.mi_mbits,
- info.mi_roffset, info.mi_rlength);
- } else {
- list_add_tail(&msg->msg_list, drops);
- }
-
- if (exhausted)
- break;
- }
-
- if (!exhausted && head == &ptl->ptl_msg_stealing) {
- head = &ptl->ptl_msg_delayed;
- goto again;
- }
-
- if (lnet_ptl_is_wildcard(ptl) && !exhausted) {
- lnet_mt_set_exhausted(mtable, me->me_pos, 0);
- if (!mtable->mt_enabled)
- lnet_ptl_enable_mt(ptl, cpt);
- }
-
- lnet_ptl_unlock(ptl);
-}
-
-static void
-lnet_ptl_cleanup(struct lnet_portal *ptl)
-{
- struct lnet_match_table *mtable;
- int i;
-
- if (!ptl->ptl_mtables) /* uninitialized portal */
- return;
-
- LASSERT(list_empty(&ptl->ptl_msg_delayed));
- LASSERT(list_empty(&ptl->ptl_msg_stealing));
- cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
- struct list_head *mhash;
- struct lnet_me *me;
- int j;
-
- if (!mtable->mt_mhash) /* uninitialized match-table */
- continue;
-
- mhash = mtable->mt_mhash;
- /* cleanup ME */
- for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) {
- while (!list_empty(&mhash[j])) {
- me = list_entry(mhash[j].next,
- struct lnet_me, me_list);
- CERROR("Active ME %p on exit\n", me);
- list_del(&me->me_list);
- kfree(me);
- }
- }
- /* the extra entry is for MEs with ignore bits */
- kvfree(mhash);
- }
-
- cfs_percpt_free(ptl->ptl_mtables);
- ptl->ptl_mtables = NULL;
-}
-
-static int
-lnet_ptl_setup(struct lnet_portal *ptl, int index)
-{
- struct lnet_match_table *mtable;
- struct list_head *mhash;
- int i;
- int j;
-
- ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(struct lnet_match_table));
- if (!ptl->ptl_mtables) {
- CERROR("Failed to create match table for portal %d\n", index);
- return -ENOMEM;
- }
-
- ptl->ptl_index = index;
- INIT_LIST_HEAD(&ptl->ptl_msg_delayed);
- INIT_LIST_HEAD(&ptl->ptl_msg_stealing);
- spin_lock_init(&ptl->ptl_lock);
- cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
- /* the extra entry is for MEs with ignore bits */
- mhash = kvzalloc_cpt(sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1),
- GFP_KERNEL, i);
- if (!mhash) {
- CERROR("Failed to create match hash for portal %d\n",
- index);
- goto failed;
- }
-
- memset(&mtable->mt_exhausted[0], -1,
- sizeof(mtable->mt_exhausted[0]) *
- LNET_MT_EXHAUSTED_BMAP);
- mtable->mt_mhash = mhash;
- for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++)
- INIT_LIST_HEAD(&mhash[j]);
-
- mtable->mt_portal = index;
- mtable->mt_cpt = i;
- }
-
- return 0;
- failed:
- lnet_ptl_cleanup(ptl);
- return -ENOMEM;
-}
-
-void
-lnet_portals_destroy(void)
-{
- int i;
-
- if (!the_lnet.ln_portals)
- return;
-
- for (i = 0; i < the_lnet.ln_nportals; i++)
- lnet_ptl_cleanup(the_lnet.ln_portals[i]);
-
- cfs_array_free(the_lnet.ln_portals);
- the_lnet.ln_portals = NULL;
- the_lnet.ln_nportals = 0;
-}
-
-int
-lnet_portals_create(void)
-{
- int size;
- int i;
-
- size = offsetof(struct lnet_portal, ptl_mt_maps[LNET_CPT_NUMBER]);
-
- the_lnet.ln_portals = cfs_array_alloc(MAX_PORTALS, size);
- if (!the_lnet.ln_portals) {
- CERROR("Failed to allocate portals table\n");
- return -ENOMEM;
- }
- the_lnet.ln_nportals = MAX_PORTALS;
-
- for (i = 0; i < the_lnet.ln_nportals; i++) {
- if (lnet_ptl_setup(the_lnet.ln_portals[i], i)) {
- lnet_portals_destroy();
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-/**
- * Turn on the lazy portal attribute. Use with caution!
- *
- * This portal attribute only affects incoming PUT requests to the portal,
- * and is off by default. By default, if there's no matching MD for an
- * incoming PUT request, it is simply dropped. With the lazy attribute on,
- * such requests are queued indefinitely until either a matching MD is
- * posted to the portal or the lazy attribute is turned off.
- *
- * It would prevent dropped requests, however it should be regarded as the
- * last line of defense - i.e. users must keep a close watch on active
- * buffers on a lazy portal and once it becomes too low post more buffers as
- * soon as possible. This is because delayed requests usually have detrimental
- * effects on underlying network connections. A few delayed requests often
- * suffice to bring an underlying connection to a complete halt, due to flow
- * control mechanisms.
- *
- * There's also a DOS attack risk. If users don't post match-all MDs on a
- * lazy portal, a malicious peer can easily stop a service by sending some
- * PUT requests with match bits that won't match any MD. A routed server is
- * especially vulnerable since the connections to its neighbor routers are
- * shared among all clients.
- *
- * \param portal Index of the portal to enable the lazy attribute on.
- *
- * \retval 0 On success.
- * \retval -EINVAL If \a portal is not a valid index.
- */
-int
-LNetSetLazyPortal(int portal)
-{
- struct lnet_portal *ptl;
-
- if (portal < 0 || portal >= the_lnet.ln_nportals)
- return -EINVAL;
-
- CDEBUG(D_NET, "Setting portal %d lazy\n", portal);
- ptl = the_lnet.ln_portals[portal];
-
- lnet_res_lock(LNET_LOCK_EX);
- lnet_ptl_lock(ptl);
-
- lnet_ptl_setopt(ptl, LNET_PTL_LAZY);
-
- lnet_ptl_unlock(ptl);
- lnet_res_unlock(LNET_LOCK_EX);
-
- return 0;
-}
-EXPORT_SYMBOL(LNetSetLazyPortal);
-
-int
-lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason)
-{
- struct lnet_portal *ptl;
- LIST_HEAD(zombies);
-
- if (portal < 0 || portal >= the_lnet.ln_nportals)
- return -EINVAL;
-
- ptl = the_lnet.ln_portals[portal];
-
- lnet_res_lock(LNET_LOCK_EX);
- lnet_ptl_lock(ptl);
-
- if (!lnet_ptl_is_lazy(ptl)) {
- lnet_ptl_unlock(ptl);
- lnet_res_unlock(LNET_LOCK_EX);
- return 0;
- }
-
- if (ni) {
- struct lnet_msg *msg, *tmp;
-
- /* grab all messages which are on the NI passed in */
- list_for_each_entry_safe(msg, tmp, &ptl->ptl_msg_delayed,
- msg_list) {
- if (msg->msg_rxpeer->lp_ni == ni)
- list_move(&msg->msg_list, &zombies);
- }
- } else {
- if (the_lnet.ln_shutdown)
- CWARN("Active lazy portal %d on exit\n", portal);
- else
- CDEBUG(D_NET, "clearing portal %d lazy\n", portal);
-
- /* grab all the blocked messages atomically */
- list_splice_init(&ptl->ptl_msg_delayed, &zombies);
-
- lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY);
- }
-
- lnet_ptl_unlock(ptl);
- lnet_res_unlock(LNET_LOCK_EX);
-
- lnet_drop_delayed_msg_list(&zombies, reason);
-
- return 0;
-}
-
-/**
- * Turn off the lazy portal attribute. Delayed requests on the portal,
- * if any, will be all dropped when this function returns.
- *
- * \param portal Index of the portal to disable the lazy attribute on.
- *
- * \retval 0 On success.
- * \retval -EINVAL If \a portal is not a valid index.
- */
-int
-LNetClearLazyPortal(int portal)
-{
- return lnet_clear_lazy_portal(NULL, portal,
- "Clearing lazy portal attr");
-}
-EXPORT_SYMBOL(LNetClearLazyPortal);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
deleted file mode 100644
index 1bee667802b0..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ /dev/null
@@ -1,586 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- */
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/if.h>
-#include <linux/in.h>
-#include <linux/net.h>
-#include <linux/file.h>
-#include <linux/pagemap.h>
-/* For sys_open & sys_close */
-#include <linux/syscalls.h>
-#include <net/sock.h>
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/lnet/lib-lnet.h>
-
-static int
-kernel_sock_unlocked_ioctl(struct file *filp, int cmd, unsigned long arg)
-{
- mm_segment_t oldfs = get_fs();
- int err;
-
- set_fs(KERNEL_DS);
- err = filp->f_op->unlocked_ioctl(filp, cmd, arg);
- set_fs(oldfs);
-
- return err;
-}
-
-static int
-lnet_sock_ioctl(int cmd, unsigned long arg)
-{
- struct file *sock_filp;
- struct socket *sock;
- int rc;
-
- rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
- if (rc) {
- CERROR("Can't create socket: %d\n", rc);
- return rc;
- }
-
- sock_filp = sock_alloc_file(sock, 0, NULL);
- if (IS_ERR(sock_filp))
- return PTR_ERR(sock_filp);
-
- rc = kernel_sock_unlocked_ioctl(sock_filp, cmd, arg);
-
- fput(sock_filp);
- return rc;
-}
-
-int
-lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
-{
- struct ifreq ifr;
- int nob;
- int rc;
- __be32 val;
-
- nob = strnlen(name, IFNAMSIZ);
- if (nob == IFNAMSIZ) {
- CERROR("Interface name %s too long\n", name);
- return -EINVAL;
- }
-
- BUILD_BUG_ON(sizeof(ifr.ifr_name) < IFNAMSIZ);
-
- if (strlen(name) > sizeof(ifr.ifr_name) - 1)
- return -E2BIG;
- strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name));
-
- rc = lnet_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr);
- if (rc) {
- CERROR("Can't get flags for interface %s\n", name);
- return rc;
- }
-
- if (!(ifr.ifr_flags & IFF_UP)) {
- CDEBUG(D_NET, "Interface %s down\n", name);
- *up = 0;
- *ip = *mask = 0;
- return 0;
- }
- *up = 1;
-
- if (strlen(name) > sizeof(ifr.ifr_name) - 1)
- return -E2BIG;
- strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name));
-
- ifr.ifr_addr.sa_family = AF_INET;
- rc = lnet_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr);
- if (rc) {
- CERROR("Can't get IP address for interface %s\n", name);
- return rc;
- }
-
- val = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr.s_addr;
- *ip = ntohl(val);
-
- if (strlen(name) > sizeof(ifr.ifr_name) - 1)
- return -E2BIG;
- strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name));
-
- ifr.ifr_addr.sa_family = AF_INET;
- rc = lnet_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr);
- if (rc) {
- CERROR("Can't get netmask for interface %s\n", name);
- return rc;
- }
-
- val = ((struct sockaddr_in *)&ifr.ifr_netmask)->sin_addr.s_addr;
- *mask = ntohl(val);
-
- return 0;
-}
-EXPORT_SYMBOL(lnet_ipif_query);
-
-int
-lnet_ipif_enumerate(char ***namesp)
-{
- /* Allocate and fill in 'names', returning # interfaces/error */
- char **names;
- int toobig;
- int nalloc;
- int nfound;
- struct ifreq *ifr;
- struct ifconf ifc;
- int rc;
- int nob;
- int i;
-
- nalloc = 16; /* first guess at max interfaces */
- toobig = 0;
- for (;;) {
- if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
- toobig = 1;
- nalloc = PAGE_SIZE / sizeof(*ifr);
- CWARN("Too many interfaces: only enumerating first %d\n",
- nalloc);
- }
-
- ifr = kzalloc(nalloc * sizeof(*ifr), GFP_KERNEL);
- if (!ifr) {
- CERROR("ENOMEM enumerating up to %d interfaces\n",
- nalloc);
- rc = -ENOMEM;
- goto out0;
- }
-
- ifc.ifc_buf = (char *)ifr;
- ifc.ifc_len = nalloc * sizeof(*ifr);
-
- rc = lnet_sock_ioctl(SIOCGIFCONF, (unsigned long)&ifc);
- if (rc < 0) {
- CERROR("Error %d enumerating interfaces\n", rc);
- goto out1;
- }
-
- LASSERT(!rc);
-
- nfound = ifc.ifc_len / sizeof(*ifr);
- LASSERT(nfound <= nalloc);
-
- if (nfound < nalloc || toobig)
- break;
-
- kfree(ifr);
- nalloc *= 2;
- }
-
- if (!nfound)
- goto out1;
-
- names = kzalloc(nfound * sizeof(*names), GFP_KERNEL);
- if (!names) {
- rc = -ENOMEM;
- goto out1;
- }
-
- for (i = 0; i < nfound; i++) {
- nob = strnlen(ifr[i].ifr_name, IFNAMSIZ);
- if (nob == IFNAMSIZ) {
- /* no space for terminating NULL */
- CERROR("interface name %.*s too long (%d max)\n",
- nob, ifr[i].ifr_name, IFNAMSIZ);
- rc = -ENAMETOOLONG;
- goto out2;
- }
-
- names[i] = kmalloc(IFNAMSIZ, GFP_KERNEL);
- if (!names[i]) {
- rc = -ENOMEM;
- goto out2;
- }
-
- memcpy(names[i], ifr[i].ifr_name, nob);
- names[i][nob] = 0;
- }
-
- *namesp = names;
- rc = nfound;
-
-out2:
- if (rc < 0)
- lnet_ipif_free_enumeration(names, nfound);
-out1:
- kfree(ifr);
-out0:
- return rc;
-}
-EXPORT_SYMBOL(lnet_ipif_enumerate);
-
-void
-lnet_ipif_free_enumeration(char **names, int n)
-{
- int i;
-
- LASSERT(n > 0);
-
- for (i = 0; i < n && names[i]; i++)
- kfree(names[i]);
-
- kfree(names);
-}
-EXPORT_SYMBOL(lnet_ipif_free_enumeration);
-
-int
-lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
-{
- int rc;
- long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
- unsigned long then;
- struct timeval tv;
- struct kvec iov = { .iov_base = buffer, .iov_len = nob };
- struct msghdr msg = {NULL,};
-
- LASSERT(nob > 0);
- /*
- * Caller may pass a zero timeout if she thinks the socket buffer is
- * empty enough to take the whole message immediately
- */
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, nob);
- for (;;) {
- msg.msg_flags = !timeout ? MSG_DONTWAIT : 0;
- if (timeout) {
- /* Set send timeout to remaining time */
- jiffies_to_timeval(jiffies_left, &tv);
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
- (char *)&tv, sizeof(tv));
- if (rc) {
- CERROR("Can't set socket send timeout %ld.%06d: %d\n",
- (long)tv.tv_sec, (int)tv.tv_usec, rc);
- return rc;
- }
- }
-
- then = jiffies;
- rc = kernel_sendmsg(sock, &msg, &iov, 1, nob);
- jiffies_left -= jiffies - then;
-
- if (rc < 0)
- return rc;
-
- if (!rc) {
- CERROR("Unexpected zero rc\n");
- return -ECONNABORTED;
- }
-
- if (!msg_data_left(&msg))
- break;
-
- if (jiffies_left <= 0)
- return -EAGAIN;
- }
- return 0;
-}
-EXPORT_SYMBOL(lnet_sock_write);
-
-int
-lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
-{
- int rc;
- long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
- unsigned long then;
- struct timeval tv;
- struct kvec iov = {
- .iov_base = buffer,
- .iov_len = nob
- };
- struct msghdr msg = {
- .msg_flags = 0
- };
-
- LASSERT(nob > 0);
- LASSERT(jiffies_left > 0);
-
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, nob);
-
- for (;;) {
- /* Set receive timeout to remaining time */
- jiffies_to_timeval(jiffies_left, &tv);
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
- (char *)&tv, sizeof(tv));
- if (rc) {
- CERROR("Can't set socket recv timeout %ld.%06d: %d\n",
- (long)tv.tv_sec, (int)tv.tv_usec, rc);
- return rc;
- }
-
- then = jiffies;
- rc = sock_recvmsg(sock, &msg, 0);
- jiffies_left -= jiffies - then;
-
- if (rc < 0)
- return rc;
-
- if (!rc)
- return -ECONNRESET;
-
- if (!msg_data_left(&msg))
- return 0;
-
- if (jiffies_left <= 0)
- return -ETIMEDOUT;
- }
-}
-EXPORT_SYMBOL(lnet_sock_read);
-
-static int
-lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip,
- int local_port)
-{
- struct sockaddr_in locaddr;
- struct socket *sock;
- int rc;
- int option;
-
- /* All errors are fatal except bind failure if the port is in use */
- *fatal = 1;
-
- rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
- *sockp = sock;
- if (rc) {
- CERROR("Can't create socket: %d\n", rc);
- return rc;
- }
-
- option = 1;
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
- (char *)&option, sizeof(option));
- if (rc) {
- CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc);
- goto failed;
- }
-
- if (local_ip || local_port) {
- memset(&locaddr, 0, sizeof(locaddr));
- locaddr.sin_family = AF_INET;
- locaddr.sin_port = htons(local_port);
- if (!local_ip)
- locaddr.sin_addr.s_addr = htonl(INADDR_ANY);
- else
- locaddr.sin_addr.s_addr = htonl(local_ip);
-
- rc = kernel_bind(sock, (struct sockaddr *)&locaddr,
- sizeof(locaddr));
- if (rc == -EADDRINUSE) {
- CDEBUG(D_NET, "Port %d already in use\n", local_port);
- *fatal = 0;
- goto failed;
- }
- if (rc) {
- CERROR("Error trying to bind to port %d: %d\n",
- local_port, rc);
- goto failed;
- }
- }
- return 0;
-
-failed:
- sock_release(sock);
- return rc;
-}
-
-int
-lnet_sock_setbuf(struct socket *sock, int txbufsize, int rxbufsize)
-{
- int option;
- int rc;
-
- if (txbufsize) {
- option = txbufsize;
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
- (char *)&option, sizeof(option));
- if (rc) {
- CERROR("Can't set send buffer %d: %d\n",
- option, rc);
- return rc;
- }
- }
-
- if (rxbufsize) {
- option = rxbufsize;
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
- (char *)&option, sizeof(option));
- if (rc) {
- CERROR("Can't set receive buffer %d: %d\n",
- option, rc);
- return rc;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(lnet_sock_setbuf);
-
-int
-lnet_sock_getaddr(struct socket *sock, bool remote, __u32 *ip, int *port)
-{
- struct sockaddr_in sin;
- int rc;
-
- if (remote)
- rc = kernel_getpeername(sock, (struct sockaddr *)&sin);
- else
- rc = kernel_getsockname(sock, (struct sockaddr *)&sin);
- if (rc < 0) {
- CERROR("Error %d getting sock %s IP/port\n",
- rc, remote ? "peer" : "local");
- return rc;
- }
-
- if (ip)
- *ip = ntohl(sin.sin_addr.s_addr);
-
- if (port)
- *port = ntohs(sin.sin_port);
-
- return 0;
-}
-EXPORT_SYMBOL(lnet_sock_getaddr);
-
-int
-lnet_sock_getbuf(struct socket *sock, int *txbufsize, int *rxbufsize)
-{
- if (txbufsize)
- *txbufsize = sock->sk->sk_sndbuf;
-
- if (rxbufsize)
- *rxbufsize = sock->sk->sk_rcvbuf;
-
- return 0;
-}
-EXPORT_SYMBOL(lnet_sock_getbuf);
-
-int
-lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port,
- int backlog)
-{
- int fatal;
- int rc;
-
- rc = lnet_sock_create(sockp, &fatal, local_ip, local_port);
- if (rc) {
- if (!fatal)
- CERROR("Can't create socket: port %d already in use\n",
- local_port);
- return rc;
- }
-
- rc = kernel_listen(*sockp, backlog);
- if (!rc)
- return 0;
-
- CERROR("Can't set listen backlog %d: %d\n", backlog, rc);
- sock_release(*sockp);
- return rc;
-}
-
-int
-lnet_sock_accept(struct socket **newsockp, struct socket *sock)
-{
- wait_queue_entry_t wait;
- struct socket *newsock;
- int rc;
-
- /*
- * XXX this should add a ref to sock->ops->owner, if
- * TCP could be a module
- */
- rc = sock_create_lite(PF_PACKET, sock->type, IPPROTO_TCP, &newsock);
- if (rc) {
- CERROR("Can't allocate socket\n");
- return rc;
- }
-
- newsock->ops = sock->ops;
-
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
- if (rc == -EAGAIN) {
- /* Nothing ready, so wait for activity */
- init_waitqueue_entry(&wait, current);
- add_wait_queue(sk_sleep(sock->sk), &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- remove_wait_queue(sk_sleep(sock->sk), &wait);
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
- }
-
- if (rc)
- goto failed;
-
- *newsockp = newsock;
- return 0;
-
-failed:
- sock_release(newsock);
- return rc;
-}
-
-int
-lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip,
- int local_port, __u32 peer_ip, int peer_port)
-{
- struct sockaddr_in srvaddr;
- int rc;
-
- rc = lnet_sock_create(sockp, fatal, local_ip, local_port);
- if (rc)
- return rc;
-
- memset(&srvaddr, 0, sizeof(srvaddr));
- srvaddr.sin_family = AF_INET;
- srvaddr.sin_port = htons(peer_port);
- srvaddr.sin_addr.s_addr = htonl(peer_ip);
-
- rc = kernel_connect(*sockp, (struct sockaddr *)&srvaddr,
- sizeof(srvaddr), 0);
- if (!rc)
- return 0;
-
- /*
- * EADDRNOTAVAIL probably means we're already connected to the same
- * peer/port on the same local port on a differently typed
- * connection. Let our caller retry with a different local
- * port...
- */
- *fatal = !(rc == -EADDRNOTAVAIL);
-
- CDEBUG_LIMIT(*fatal ? D_NETERROR : D_NET,
- "Error %d connecting %pI4h/%d -> %pI4h/%d\n", rc,
- &local_ip, local_port, &peer_ip, peer_port);
-
- sock_release(*sockp);
- return rc;
-}
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
deleted file mode 100644
index 7456b989e451..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/lnet/lib-lnet.h>
-
-static int
-lolnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
-{
- LASSERT(!lntmsg->msg_routing);
- LASSERT(!lntmsg->msg_target_is_router);
-
- return lnet_parse(ni, &lntmsg->msg_hdr, ni->ni_nid, lntmsg, 0);
-}
-
-static int
-lolnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
- int delayed, struct iov_iter *to, unsigned int rlen)
-{
- struct lnet_msg *sendmsg = private;
-
- if (lntmsg) { /* not discarding */
- if (sendmsg->msg_iov)
- lnet_copy_iov2iter(to,
- sendmsg->msg_niov,
- sendmsg->msg_iov,
- sendmsg->msg_offset,
- iov_iter_count(to));
- else
- lnet_copy_kiov2iter(to,
- sendmsg->msg_niov,
- sendmsg->msg_kiov,
- sendmsg->msg_offset,
- iov_iter_count(to));
-
- lnet_finalize(ni, lntmsg, 0);
- }
-
- lnet_finalize(ni, sendmsg, 0);
- return 0;
-}
-
-static int lolnd_instanced;
-
-static void
-lolnd_shutdown(struct lnet_ni *ni)
-{
- CDEBUG(D_NET, "shutdown\n");
- LASSERT(lolnd_instanced);
-
- lolnd_instanced = 0;
-}
-
-static int
-lolnd_startup(struct lnet_ni *ni)
-{
- LASSERT(ni->ni_lnd == &the_lolnd);
- LASSERT(!lolnd_instanced);
- lolnd_instanced = 1;
-
- return 0;
-}
-
-struct lnet_lnd the_lolnd = {
- /* .lnd_list = */ {&the_lolnd.lnd_list, &the_lolnd.lnd_list},
- /* .lnd_refcount = */ 0,
- /* .lnd_type = */ LOLND,
- /* .lnd_startup = */ lolnd_startup,
- /* .lnd_shutdown = */ lolnd_shutdown,
- /* .lnt_ctl = */ NULL,
- /* .lnd_send = */ lolnd_send,
- /* .lnd_recv = */ lolnd_recv,
- /* .lnd_eager_recv = */ NULL,
- /* .lnd_notify = */ NULL,
- /* .lnd_accept = */ NULL
-};
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
deleted file mode 100644
index c0c4723f72fd..000000000000
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ /dev/null
@@ -1,223 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/lnet/lib-lnet.h>
-#include <uapi/linux/lnet/lnet-dlc.h>
-
-static int config_on_load;
-module_param(config_on_load, int, 0444);
-MODULE_PARM_DESC(config_on_load, "configure network at module load");
-
-static struct mutex lnet_config_mutex;
-
-static int
-lnet_configure(void *arg)
-{
- /* 'arg' only there so I can be passed to cfs_create_thread() */
- int rc = 0;
-
- mutex_lock(&lnet_config_mutex);
-
- if (!the_lnet.ln_niinit_self) {
- rc = try_module_get(THIS_MODULE);
-
- if (rc != 1)
- goto out;
-
- rc = LNetNIInit(LNET_PID_LUSTRE);
- if (rc >= 0) {
- the_lnet.ln_niinit_self = 1;
- rc = 0;
- } else {
- module_put(THIS_MODULE);
- }
- }
-
-out:
- mutex_unlock(&lnet_config_mutex);
- return rc;
-}
-
-static int
-lnet_unconfigure(void)
-{
- int refcount;
-
- mutex_lock(&lnet_config_mutex);
-
- if (the_lnet.ln_niinit_self) {
- the_lnet.ln_niinit_self = 0;
- LNetNIFini();
- module_put(THIS_MODULE);
- }
-
- mutex_lock(&the_lnet.ln_api_mutex);
- refcount = the_lnet.ln_refcount;
- mutex_unlock(&the_lnet.ln_api_mutex);
-
- mutex_unlock(&lnet_config_mutex);
- return !refcount ? 0 : -EBUSY;
-}
-
-static int
-lnet_dyn_configure(struct libcfs_ioctl_hdr *hdr)
-{
- struct lnet_ioctl_config_data *conf =
- (struct lnet_ioctl_config_data *)hdr;
- int rc;
-
- if (conf->cfg_hdr.ioc_len < sizeof(*conf))
- return -EINVAL;
-
- mutex_lock(&lnet_config_mutex);
- if (!the_lnet.ln_niinit_self) {
- rc = -EINVAL;
- goto out_unlock;
- }
- rc = lnet_dyn_add_ni(LNET_PID_LUSTRE, conf);
-out_unlock:
- mutex_unlock(&lnet_config_mutex);
-
- return rc;
-}
-
-static int
-lnet_dyn_unconfigure(struct libcfs_ioctl_hdr *hdr)
-{
- struct lnet_ioctl_config_data *conf =
- (struct lnet_ioctl_config_data *)hdr;
- int rc;
-
- if (conf->cfg_hdr.ioc_len < sizeof(*conf))
- return -EINVAL;
-
- mutex_lock(&lnet_config_mutex);
- if (!the_lnet.ln_niinit_self) {
- rc = -EINVAL;
- goto out_unlock;
- }
- rc = lnet_dyn_del_ni(conf->cfg_net);
-out_unlock:
- mutex_unlock(&lnet_config_mutex);
-
- return rc;
-}
-
-static int
-lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
-{
- int rc;
-
- switch (cmd) {
- case IOC_LIBCFS_CONFIGURE: {
- struct libcfs_ioctl_data *data =
- (struct libcfs_ioctl_data *)hdr;
-
- if (data->ioc_hdr.ioc_len < sizeof(*data))
- return -EINVAL;
-
- the_lnet.ln_nis_from_mod_params = data->ioc_flags;
- return lnet_configure(NULL);
- }
-
- case IOC_LIBCFS_UNCONFIGURE:
- return lnet_unconfigure();
-
- case IOC_LIBCFS_ADD_NET:
- return lnet_dyn_configure(hdr);
-
- case IOC_LIBCFS_DEL_NET:
- return lnet_dyn_unconfigure(hdr);
-
- default:
- /*
- * Passing LNET_PID_ANY only gives me a ref if the net is up
- * already; I'll need it to ensure the net can't go down while
- * I'm called into it
- */
- rc = LNetNIInit(LNET_PID_ANY);
- if (rc >= 0) {
- rc = LNetCtl(cmd, hdr);
- LNetNIFini();
- }
- return rc;
- }
-}
-
-static DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl);
-
-static int __init lnet_init(void)
-{
- int rc;
-
- mutex_init(&lnet_config_mutex);
-
- rc = lnet_lib_init();
- if (rc) {
- CERROR("lnet_lib_init: error %d\n", rc);
- return rc;
- }
-
- rc = libcfs_register_ioctl(&lnet_ioctl_handler);
- LASSERT(!rc);
-
- if (config_on_load) {
- /*
- * Have to schedule a separate thread to avoid deadlocking
- * in modload
- */
- (void)kthread_run(lnet_configure, NULL, "lnet_initd");
- }
-
- return 0;
-}
-
-static void __exit lnet_exit(void)
-{
- int rc;
-
- rc = libcfs_deregister_ioctl(&lnet_ioctl_handler);
- LASSERT(!rc);
-
- lnet_lib_exit();
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Networking layer");
-MODULE_VERSION(LNET_VERSION);
-MODULE_LICENSE("GPL");
-
-module_init(lnet_init);
-module_exit(lnet_exit);
diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c
deleted file mode 100644
index a63b7941d435..000000000000
--- a/drivers/staging/lustre/lnet/lnet/net_fault.c
+++ /dev/null
@@ -1,1023 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2014, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- *
- * lnet/lnet/net_fault.c
- *
- * Lustre network fault simulation
- *
- * Author: liang.zhen@intel.com
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/lnet/lib-lnet.h>
-#include <uapi/linux/lnet/lnetctl.h>
-
-#define LNET_MSG_MASK (LNET_PUT_BIT | LNET_ACK_BIT | \
- LNET_GET_BIT | LNET_REPLY_BIT)
-
-struct lnet_drop_rule {
- /** link chain on the_lnet.ln_drop_rules */
- struct list_head dr_link;
- /** attributes of this rule */
- struct lnet_fault_attr dr_attr;
- /** lock to protect \a dr_drop_at and \a dr_stat */
- spinlock_t dr_lock;
- /**
- * the message sequence to drop, which means message is dropped when
- * dr_stat.drs_count == dr_drop_at
- */
- unsigned long dr_drop_at;
- /**
- * seconds to drop the next message, it's exclusive with dr_drop_at
- */
- unsigned long dr_drop_time;
- /** baseline to caculate dr_drop_time */
- unsigned long dr_time_base;
- /** statistic of dropped messages */
- struct lnet_fault_stat dr_stat;
-};
-
-static bool
-lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid)
-{
- if (nid == msg_nid || nid == LNET_NID_ANY)
- return true;
-
- if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid))
- return false;
-
- /* 255.255.255.255@net is wildcard for all addresses in a network */
- return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY);
-}
-
-static bool
-lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src,
- lnet_nid_t dst, unsigned int type, unsigned int portal)
-{
- if (!lnet_fault_nid_match(attr->fa_src, src) ||
- !lnet_fault_nid_match(attr->fa_dst, dst))
- return false;
-
- if (!(attr->fa_msg_mask & (1 << type)))
- return false;
-
- /**
- * NB: ACK and REPLY have no portal, but they should have been
- * rejected by message mask
- */
- if (attr->fa_ptl_mask && /* has portal filter */
- !(attr->fa_ptl_mask & (1ULL << portal)))
- return false;
-
- return true;
-}
-
-static int
-lnet_fault_attr_validate(struct lnet_fault_attr *attr)
-{
- if (!attr->fa_msg_mask)
- attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */
-
- if (!attr->fa_ptl_mask) /* no portal filter */
- return 0;
-
- /* NB: only PUT and GET can be filtered if portal filter has been set */
- attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT;
- if (!attr->fa_msg_mask) {
- CDEBUG(D_NET, "can't find valid message type bits %x\n",
- attr->fa_msg_mask);
- return -EINVAL;
- }
- return 0;
-}
-
-static void
-lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type)
-{
- /* NB: fs_counter is NOT updated by this function */
- switch (type) {
- case LNET_MSG_PUT:
- stat->fs_put++;
- return;
- case LNET_MSG_ACK:
- stat->fs_ack++;
- return;
- case LNET_MSG_GET:
- stat->fs_get++;
- return;
- case LNET_MSG_REPLY:
- stat->fs_reply++;
- return;
- }
-}
-
-/**
- * LNet message drop simulation
- */
-
-/**
- * Add a new drop rule to LNet
- * There is no check for duplicated drop rule, all rules will be checked for
- * incoming message.
- */
-static int
-lnet_drop_rule_add(struct lnet_fault_attr *attr)
-{
- struct lnet_drop_rule *rule;
-
- if (attr->u.drop.da_rate & attr->u.drop.da_interval) {
- CDEBUG(D_NET, "please provide either drop rate or drop interval, but not both at the same time %d/%d\n",
- attr->u.drop.da_rate, attr->u.drop.da_interval);
- return -EINVAL;
- }
-
- if (lnet_fault_attr_validate(attr))
- return -EINVAL;
-
- rule = kzalloc(sizeof(*rule), GFP_NOFS);
- if (!rule)
- return -ENOMEM;
-
- spin_lock_init(&rule->dr_lock);
-
- rule->dr_attr = *attr;
- if (attr->u.drop.da_interval) {
- rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval);
- rule->dr_drop_time = cfs_time_shift(
- prandom_u32_max(attr->u.drop.da_interval));
- } else {
- rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
- }
-
- lnet_net_lock(LNET_LOCK_EX);
- list_add(&rule->dr_link, &the_lnet.ln_drop_rules);
- lnet_net_unlock(LNET_LOCK_EX);
-
- CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n",
- libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
- attr->u.drop.da_rate, attr->u.drop.da_interval);
- return 0;
-}
-
-/**
- * Remove matched drop rules from lnet, all rules that can match \a src and
- * \a dst will be removed.
- * If \a src is zero, then all rules have \a dst as destination will be remove
- * If \a dst is zero, then all rules have \a src as source will be removed
- * If both of them are zero, all rules will be removed
- */
-static int
-lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
-{
- struct lnet_drop_rule *rule;
- struct lnet_drop_rule *tmp;
- struct list_head zombies;
- int n = 0;
-
- INIT_LIST_HEAD(&zombies);
-
- lnet_net_lock(LNET_LOCK_EX);
- list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
- if (rule->dr_attr.fa_src != src && src)
- continue;
-
- if (rule->dr_attr.fa_dst != dst && dst)
- continue;
-
- list_move(&rule->dr_link, &zombies);
- }
- lnet_net_unlock(LNET_LOCK_EX);
-
- list_for_each_entry_safe(rule, tmp, &zombies, dr_link) {
- CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n",
- libcfs_nid2str(rule->dr_attr.fa_src),
- libcfs_nid2str(rule->dr_attr.fa_dst),
- rule->dr_attr.u.drop.da_rate,
- rule->dr_attr.u.drop.da_interval);
-
- list_del(&rule->dr_link);
- kfree(rule);
- n++;
- }
-
- return n;
-}
-
-/**
- * List drop rule at position of \a pos
- */
-static int
-lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr,
- struct lnet_fault_stat *stat)
-{
- struct lnet_drop_rule *rule;
- int cpt;
- int i = 0;
- int rc = -ENOENT;
-
- cpt = lnet_net_lock_current();
- list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
- if (i++ < pos)
- continue;
-
- spin_lock(&rule->dr_lock);
- *attr = rule->dr_attr;
- *stat = rule->dr_stat;
- spin_unlock(&rule->dr_lock);
- rc = 0;
- break;
- }
-
- lnet_net_unlock(cpt);
- return rc;
-}
-
-/**
- * reset counters for all drop rules
- */
-static void
-lnet_drop_rule_reset(void)
-{
- struct lnet_drop_rule *rule;
- int cpt;
-
- cpt = lnet_net_lock_current();
-
- list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
- struct lnet_fault_attr *attr = &rule->dr_attr;
-
- spin_lock(&rule->dr_lock);
-
- memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
- if (attr->u.drop.da_rate) {
- rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
- } else {
- rule->dr_drop_time = cfs_time_shift(
- prandom_u32_max(attr->u.drop.da_interval));
- rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval);
- }
- spin_unlock(&rule->dr_lock);
- }
-
- lnet_net_unlock(cpt);
-}
-
-/**
- * check source/destination NID, portal, message type and drop rate,
- * decide whether should drop this message or not
- */
-static bool
-drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
- lnet_nid_t dst, unsigned int type, unsigned int portal)
-{
- struct lnet_fault_attr *attr = &rule->dr_attr;
- bool drop;
-
- if (!lnet_fault_attr_match(attr, src, dst, type, portal))
- return false;
-
- /* match this rule, check drop rate now */
- spin_lock(&rule->dr_lock);
- if (rule->dr_drop_time) { /* time based drop */
- unsigned long now = cfs_time_current();
-
- rule->dr_stat.fs_count++;
- drop = cfs_time_aftereq(now, rule->dr_drop_time);
- if (drop) {
- if (cfs_time_after(now, rule->dr_time_base))
- rule->dr_time_base = now;
-
- rule->dr_drop_time = rule->dr_time_base +
- prandom_u32_max(attr->u.drop.da_interval) * HZ;
- rule->dr_time_base += attr->u.drop.da_interval * HZ;
-
- CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lu\n",
- libcfs_nid2str(attr->fa_src),
- libcfs_nid2str(attr->fa_dst),
- rule->dr_drop_time);
- }
-
- } else { /* rate based drop */
- drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
-
- if (!do_div(rule->dr_stat.fs_count, attr->u.drop.da_rate)) {
- rule->dr_drop_at = rule->dr_stat.fs_count +
- prandom_u32_max(attr->u.drop.da_rate);
- CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
- libcfs_nid2str(attr->fa_src),
- libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
- }
- }
-
- if (drop) { /* drop this message, update counters */
- lnet_fault_stat_inc(&rule->dr_stat, type);
- rule->dr_stat.u.drop.ds_dropped++;
- }
-
- spin_unlock(&rule->dr_lock);
- return drop;
-}
-
-/**
- * Check if message from \a src to \a dst can match any existed drop rule
- */
-bool
-lnet_drop_rule_match(struct lnet_hdr *hdr)
-{
- struct lnet_drop_rule *rule;
- lnet_nid_t src = le64_to_cpu(hdr->src_nid);
- lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
- unsigned int typ = le32_to_cpu(hdr->type);
- unsigned int ptl = -1;
- bool drop = false;
- int cpt;
-
- /**
- * NB: if Portal is specified, then only PUT and GET will be
- * filtered by drop rule
- */
- if (typ == LNET_MSG_PUT)
- ptl = le32_to_cpu(hdr->msg.put.ptl_index);
- else if (typ == LNET_MSG_GET)
- ptl = le32_to_cpu(hdr->msg.get.ptl_index);
-
- cpt = lnet_net_lock_current();
- list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
- drop = drop_rule_match(rule, src, dst, typ, ptl);
- if (drop)
- break;
- }
-
- lnet_net_unlock(cpt);
- return drop;
-}
-
-/**
- * LNet Delay Simulation
- */
-/** timestamp (second) to send delayed message */
-#define msg_delay_send msg_ev.hdr_data
-
-struct lnet_delay_rule {
- /** link chain on the_lnet.ln_delay_rules */
- struct list_head dl_link;
- /** link chain on delay_dd.dd_sched_rules */
- struct list_head dl_sched_link;
- /** attributes of this rule */
- struct lnet_fault_attr dl_attr;
- /** lock to protect \a below members */
- spinlock_t dl_lock;
- /** refcount of delay rule */
- atomic_t dl_refcount;
- /**
- * the message sequence to delay, which means message is delayed when
- * dl_stat.fs_count == dl_delay_at
- */
- unsigned long dl_delay_at;
- /**
- * seconds to delay the next message, it's exclusive with dl_delay_at
- */
- unsigned long dl_delay_time;
- /** baseline to caculate dl_delay_time */
- unsigned long dl_time_base;
- /** jiffies to send the next delayed message */
- unsigned long dl_msg_send;
- /** delayed message list */
- struct list_head dl_msg_list;
- /** statistic of delayed messages */
- struct lnet_fault_stat dl_stat;
- /** timer to wakeup delay_daemon */
- struct timer_list dl_timer;
-};
-
-struct delay_daemon_data {
- /** serialise rule add/remove */
- struct mutex dd_mutex;
- /** protect rules on \a dd_sched_rules */
- spinlock_t dd_lock;
- /** scheduled delay rules (by timer) */
- struct list_head dd_sched_rules;
- /** daemon thread sleeps at here */
- wait_queue_head_t dd_waitq;
- /** controller (lctl command) wait at here */
- wait_queue_head_t dd_ctl_waitq;
- /** daemon is running */
- unsigned int dd_running;
- /** daemon stopped */
- unsigned int dd_stopped;
-};
-
-static struct delay_daemon_data delay_dd;
-
-static unsigned long
-round_timeout(unsigned long timeout)
-{
- return (unsigned int)rounddown(timeout, HZ) + HZ;
-}
-
-static void
-delay_rule_decref(struct lnet_delay_rule *rule)
-{
- if (atomic_dec_and_test(&rule->dl_refcount)) {
- LASSERT(list_empty(&rule->dl_sched_link));
- LASSERT(list_empty(&rule->dl_msg_list));
- LASSERT(list_empty(&rule->dl_link));
-
- kfree(rule);
- }
-}
-
-/**
- * check source/destination NID, portal, message type and delay rate,
- * decide whether should delay this message or not
- */
-static bool
-delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
- lnet_nid_t dst, unsigned int type, unsigned int portal,
- struct lnet_msg *msg)
-{
- struct lnet_fault_attr *attr = &rule->dl_attr;
- bool delay;
-
- if (!lnet_fault_attr_match(attr, src, dst, type, portal))
- return false;
-
- /* match this rule, check delay rate now */
- spin_lock(&rule->dl_lock);
- if (rule->dl_delay_time) { /* time based delay */
- unsigned long now = cfs_time_current();
-
- rule->dl_stat.fs_count++;
- delay = cfs_time_aftereq(now, rule->dl_delay_time);
- if (delay) {
- if (cfs_time_after(now, rule->dl_time_base))
- rule->dl_time_base = now;
-
- rule->dl_delay_time = rule->dl_time_base +
- prandom_u32_max(attr->u.delay.la_interval) * HZ;
- rule->dl_time_base += attr->u.delay.la_interval * HZ;
-
- CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lu\n",
- libcfs_nid2str(attr->fa_src),
- libcfs_nid2str(attr->fa_dst),
- rule->dl_delay_time);
- }
-
- } else { /* rate based delay */
- delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
- /* generate the next random rate sequence */
- if (!do_div(rule->dl_stat.fs_count, attr->u.delay.la_rate)) {
- rule->dl_delay_at = rule->dl_stat.fs_count +
- prandom_u32_max(attr->u.delay.la_rate);
- CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
- libcfs_nid2str(attr->fa_src),
- libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
- }
- }
-
- if (!delay) {
- spin_unlock(&rule->dl_lock);
- return false;
- }
-
- /* delay this message, update counters */
- lnet_fault_stat_inc(&rule->dl_stat, type);
- rule->dl_stat.u.delay.ls_delayed++;
-
- list_add_tail(&msg->msg_list, &rule->dl_msg_list);
- msg->msg_delay_send = round_timeout(
- cfs_time_shift(attr->u.delay.la_latency));
- if (rule->dl_msg_send == -1) {
- rule->dl_msg_send = msg->msg_delay_send;
- mod_timer(&rule->dl_timer, rule->dl_msg_send);
- }
-
- spin_unlock(&rule->dl_lock);
- return true;
-}
-
-/**
- * check if \a msg can match any Delay Rule, receiving of this message
- * will be delayed if there is a match.
- */
-bool
-lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg)
-{
- struct lnet_delay_rule *rule;
- lnet_nid_t src = le64_to_cpu(hdr->src_nid);
- lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
- unsigned int typ = le32_to_cpu(hdr->type);
- unsigned int ptl = -1;
-
- /* NB: called with hold of lnet_net_lock */
-
- /**
- * NB: if Portal is specified, then only PUT and GET will be
- * filtered by delay rule
- */
- if (typ == LNET_MSG_PUT)
- ptl = le32_to_cpu(hdr->msg.put.ptl_index);
- else if (typ == LNET_MSG_GET)
- ptl = le32_to_cpu(hdr->msg.get.ptl_index);
-
- list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
- if (delay_rule_match(rule, src, dst, typ, ptl, msg))
- return true;
- }
-
- return false;
-}
-
-/** check out delayed messages for send */
-static void
-delayed_msg_check(struct lnet_delay_rule *rule, bool all,
- struct list_head *msg_list)
-{
- struct lnet_msg *msg;
- struct lnet_msg *tmp;
- unsigned long now = cfs_time_current();
-
- if (!all && rule->dl_msg_send > now)
- return;
-
- spin_lock(&rule->dl_lock);
- list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) {
- if (!all && msg->msg_delay_send > now)
- break;
-
- msg->msg_delay_send = 0;
- list_move_tail(&msg->msg_list, msg_list);
- }
-
- if (list_empty(&rule->dl_msg_list)) {
- del_timer(&rule->dl_timer);
- rule->dl_msg_send = -1;
-
- } else if (!list_empty(msg_list)) {
- /*
- * dequeued some timedout messages, update timer for the
- * next delayed message on rule
- */
- msg = list_entry(rule->dl_msg_list.next,
- struct lnet_msg, msg_list);
- rule->dl_msg_send = msg->msg_delay_send;
- mod_timer(&rule->dl_timer, rule->dl_msg_send);
- }
- spin_unlock(&rule->dl_lock);
-}
-
-static void
-delayed_msg_process(struct list_head *msg_list, bool drop)
-{
- struct lnet_msg *msg;
-
- while (!list_empty(msg_list)) {
- struct lnet_ni *ni;
- int cpt;
- int rc;
-
- msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
- LASSERT(msg->msg_rxpeer);
-
- ni = msg->msg_rxpeer->lp_ni;
- cpt = msg->msg_rx_cpt;
-
- list_del_init(&msg->msg_list);
- if (drop) {
- rc = -ECANCELED;
-
- } else if (!msg->msg_routing) {
- rc = lnet_parse_local(ni, msg);
- if (!rc)
- continue;
-
- } else {
- lnet_net_lock(cpt);
- rc = lnet_parse_forward_locked(ni, msg);
- lnet_net_unlock(cpt);
-
- switch (rc) {
- case LNET_CREDIT_OK:
- lnet_ni_recv(ni, msg->msg_private, msg, 0,
- 0, msg->msg_len, msg->msg_len);
- /* fall through */
- case LNET_CREDIT_WAIT:
- continue;
- default: /* failures */
- break;
- }
- }
-
- lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len);
- lnet_finalize(ni, msg, rc);
- }
-}
-
-/**
- * Process delayed messages for scheduled rules
- * This function can either be called by delay_rule_daemon, or by lnet_finalise
- */
-void
-lnet_delay_rule_check(void)
-{
- struct lnet_delay_rule *rule;
- struct list_head msgs;
-
- INIT_LIST_HEAD(&msgs);
- while (1) {
- if (list_empty(&delay_dd.dd_sched_rules))
- break;
-
- spin_lock_bh(&delay_dd.dd_lock);
- if (list_empty(&delay_dd.dd_sched_rules)) {
- spin_unlock_bh(&delay_dd.dd_lock);
- break;
- }
-
- rule = list_entry(delay_dd.dd_sched_rules.next,
- struct lnet_delay_rule, dl_sched_link);
- list_del_init(&rule->dl_sched_link);
- spin_unlock_bh(&delay_dd.dd_lock);
-
- delayed_msg_check(rule, false, &msgs);
- delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */
- }
-
- if (!list_empty(&msgs))
- delayed_msg_process(&msgs, false);
-}
-
-/** daemon thread to handle delayed messages */
-static int
-lnet_delay_rule_daemon(void *arg)
-{
- delay_dd.dd_running = 1;
- wake_up(&delay_dd.dd_ctl_waitq);
-
- while (delay_dd.dd_running) {
- wait_event_interruptible(delay_dd.dd_waitq,
- !delay_dd.dd_running ||
- !list_empty(&delay_dd.dd_sched_rules));
- lnet_delay_rule_check();
- }
-
- /* in case more rules have been enqueued after my last check */
- lnet_delay_rule_check();
- delay_dd.dd_stopped = 1;
- wake_up(&delay_dd.dd_ctl_waitq);
-
- return 0;
-}
-
-static void
-delay_timer_cb(struct timer_list *t)
-{
- struct lnet_delay_rule *rule = from_timer(rule, t, dl_timer);
-
- spin_lock_bh(&delay_dd.dd_lock);
- if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
- atomic_inc(&rule->dl_refcount);
- list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules);
- wake_up(&delay_dd.dd_waitq);
- }
- spin_unlock_bh(&delay_dd.dd_lock);
-}
-
-/**
- * Add a new delay rule to LNet
- * There is no check for duplicated delay rule, all rules will be checked for
- * incoming message.
- */
-int
-lnet_delay_rule_add(struct lnet_fault_attr *attr)
-{
- struct lnet_delay_rule *rule;
- int rc = 0;
-
- if (attr->u.delay.la_rate & attr->u.delay.la_interval) {
- CDEBUG(D_NET, "please provide either delay rate or delay interval, but not both at the same time %d/%d\n",
- attr->u.delay.la_rate, attr->u.delay.la_interval);
- return -EINVAL;
- }
-
- if (!attr->u.delay.la_latency) {
- CDEBUG(D_NET, "delay latency cannot be zero\n");
- return -EINVAL;
- }
-
- if (lnet_fault_attr_validate(attr))
- return -EINVAL;
-
- rule = kzalloc(sizeof(*rule), GFP_NOFS);
- if (!rule)
- return -ENOMEM;
-
- mutex_lock(&delay_dd.dd_mutex);
- if (!delay_dd.dd_running) {
- struct task_struct *task;
-
- /**
- * NB: although LND threads will process delayed message
- * in lnet_finalize, but there is no guarantee that LND
- * threads will be waken up if no other message needs to
- * be handled.
- * Only one daemon thread, performance is not the concern
- * of this simualation module.
- */
- task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd");
- if (IS_ERR(task)) {
- rc = PTR_ERR(task);
- goto failed;
- }
- wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
- }
-
- timer_setup(&rule->dl_timer, delay_timer_cb, 0);
-
- spin_lock_init(&rule->dl_lock);
- INIT_LIST_HEAD(&rule->dl_msg_list);
- INIT_LIST_HEAD(&rule->dl_sched_link);
-
- rule->dl_attr = *attr;
- if (attr->u.delay.la_interval) {
- rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval);
- rule->dl_delay_time = cfs_time_shift(
- prandom_u32_max(attr->u.delay.la_interval));
- } else {
- rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
- }
-
- rule->dl_msg_send = -1;
-
- lnet_net_lock(LNET_LOCK_EX);
- atomic_set(&rule->dl_refcount, 1);
- list_add(&rule->dl_link, &the_lnet.ln_delay_rules);
- lnet_net_unlock(LNET_LOCK_EX);
-
- CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n",
- libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
- attr->u.delay.la_rate);
-
- mutex_unlock(&delay_dd.dd_mutex);
- return 0;
-failed:
- mutex_unlock(&delay_dd.dd_mutex);
- kfree(rule);
- return rc;
-}
-
-/**
- * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src
- * and \a dst are zero, all rules will be removed, otherwise only matched rules
- * will be removed.
- * If \a src is zero, then all rules have \a dst as destination will be remove
- * If \a dst is zero, then all rules have \a src as source will be removed
- *
- * When a delay rule is removed, all delayed messages of this rule will be
- * processed immediately.
- */
-int
-lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
-{
- struct lnet_delay_rule *rule;
- struct lnet_delay_rule *tmp;
- struct list_head rule_list;
- struct list_head msg_list;
- int n = 0;
- bool cleanup;
-
- INIT_LIST_HEAD(&rule_list);
- INIT_LIST_HEAD(&msg_list);
-
- if (shutdown) {
- src = 0;
- dst = 0;
- }
-
- mutex_lock(&delay_dd.dd_mutex);
- lnet_net_lock(LNET_LOCK_EX);
-
- list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) {
- if (rule->dl_attr.fa_src != src && src)
- continue;
-
- if (rule->dl_attr.fa_dst != dst && dst)
- continue;
-
- CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n",
- libcfs_nid2str(rule->dl_attr.fa_src),
- libcfs_nid2str(rule->dl_attr.fa_dst),
- rule->dl_attr.u.delay.la_rate,
- rule->dl_attr.u.delay.la_interval);
- /* refcount is taken over by rule_list */
- list_move(&rule->dl_link, &rule_list);
- }
-
- /* check if we need to shutdown delay_daemon */
- cleanup = list_empty(&the_lnet.ln_delay_rules) &&
- !list_empty(&rule_list);
- lnet_net_unlock(LNET_LOCK_EX);
-
- list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) {
- list_del_init(&rule->dl_link);
-
- del_timer_sync(&rule->dl_timer);
- delayed_msg_check(rule, true, &msg_list);
- delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */
- n++;
- }
-
- if (cleanup) { /* no more delay rule, shutdown delay_daemon */
- LASSERT(delay_dd.dd_running);
- delay_dd.dd_running = 0;
- wake_up(&delay_dd.dd_waitq);
-
- while (!delay_dd.dd_stopped)
- wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped);
- }
- mutex_unlock(&delay_dd.dd_mutex);
-
- if (!list_empty(&msg_list))
- delayed_msg_process(&msg_list, shutdown);
-
- return n;
-}
-
-/**
- * List Delay Rule at position of \a pos
- */
-int
-lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
- struct lnet_fault_stat *stat)
-{
- struct lnet_delay_rule *rule;
- int cpt;
- int i = 0;
- int rc = -ENOENT;
-
- cpt = lnet_net_lock_current();
- list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
- if (i++ < pos)
- continue;
-
- spin_lock(&rule->dl_lock);
- *attr = rule->dl_attr;
- *stat = rule->dl_stat;
- spin_unlock(&rule->dl_lock);
- rc = 0;
- break;
- }
-
- lnet_net_unlock(cpt);
- return rc;
-}
-
-/**
- * reset counters for all Delay Rules
- */
-void
-lnet_delay_rule_reset(void)
-{
- struct lnet_delay_rule *rule;
- int cpt;
-
- cpt = lnet_net_lock_current();
-
- list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
- struct lnet_fault_attr *attr = &rule->dl_attr;
-
- spin_lock(&rule->dl_lock);
-
- memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
- if (attr->u.delay.la_rate) {
- rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
- } else {
- rule->dl_delay_time =
- cfs_time_shift(prandom_u32_max(
- attr->u.delay.la_interval));
- rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval);
- }
- spin_unlock(&rule->dl_lock);
- }
-
- lnet_net_unlock(cpt);
-}
-
-int
-lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data)
-{
- struct lnet_fault_attr *attr;
- struct lnet_fault_stat *stat;
-
- attr = (struct lnet_fault_attr *)data->ioc_inlbuf1;
-
- switch (opc) {
- default:
- return -EINVAL;
-
- case LNET_CTL_DROP_ADD:
- if (!attr)
- return -EINVAL;
-
- return lnet_drop_rule_add(attr);
-
- case LNET_CTL_DROP_DEL:
- if (!attr)
- return -EINVAL;
-
- data->ioc_count = lnet_drop_rule_del(attr->fa_src,
- attr->fa_dst);
- return 0;
-
- case LNET_CTL_DROP_RESET:
- lnet_drop_rule_reset();
- return 0;
-
- case LNET_CTL_DROP_LIST:
- stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
- if (!attr || !stat)
- return -EINVAL;
-
- return lnet_drop_rule_list(data->ioc_count, attr, stat);
-
- case LNET_CTL_DELAY_ADD:
- if (!attr)
- return -EINVAL;
-
- return lnet_delay_rule_add(attr);
-
- case LNET_CTL_DELAY_DEL:
- if (!attr)
- return -EINVAL;
-
- data->ioc_count = lnet_delay_rule_del(attr->fa_src,
- attr->fa_dst, false);
- return 0;
-
- case LNET_CTL_DELAY_RESET:
- lnet_delay_rule_reset();
- return 0;
-
- case LNET_CTL_DELAY_LIST:
- stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
- if (!attr || !stat)
- return -EINVAL;
-
- return lnet_delay_rule_list(data->ioc_count, attr, stat);
- }
-}
-
-int
-lnet_fault_init(void)
-{
- BUILD_BUG_ON(LNET_PUT_BIT != 1 << LNET_MSG_PUT);
- BUILD_BUG_ON(LNET_ACK_BIT != 1 << LNET_MSG_ACK);
- BUILD_BUG_ON(LNET_GET_BIT != 1 << LNET_MSG_GET);
- BUILD_BUG_ON(LNET_REPLY_BIT != 1 << LNET_MSG_REPLY);
-
- mutex_init(&delay_dd.dd_mutex);
- spin_lock_init(&delay_dd.dd_lock);
- init_waitqueue_head(&delay_dd.dd_waitq);
- init_waitqueue_head(&delay_dd.dd_ctl_waitq);
- INIT_LIST_HEAD(&delay_dd.dd_sched_rules);
-
- return 0;
-}
-
-void
-lnet_fault_fini(void)
-{
- lnet_drop_rule_del(0, 0);
- lnet_delay_rule_del(0, 0, true);
-
- LASSERT(list_empty(&the_lnet.ln_drop_rules));
- LASSERT(list_empty(&the_lnet.ln_delay_rules));
- LASSERT(list_empty(&delay_dd.dd_sched_rules));
-}
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
deleted file mode 100644
index 3aba1421c741..000000000000
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ /dev/null
@@ -1,1258 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/nidstrings.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-#include <uapi/linux/lnet/nidstr.h>
-
-/* max value for numeric network address */
-#define MAX_NUMERIC_VALUE 0xffffffff
-
-#define IPSTRING_LENGTH 16
-
-/* CAVEAT VENDITOR! Keep the canonical string representation of nets/nids
- * consistent in all conversion functions. Some code fragments are copied
- * around for the sake of clarity...
- */
-
-/* CAVEAT EMPTOR! Racey temporary buffer allocation!
- * Choose the number of nidstrings to support the MAXIMUM expected number of
- * concurrent users. If there are more, the returned string will be volatile.
- * NB this number must allow for a process to be descheduled for a timeslice
- * between getting its string and using it.
- */
-
-static char libcfs_nidstrings[LNET_NIDSTR_COUNT][LNET_NIDSTR_SIZE];
-static int libcfs_nidstring_idx;
-
-static DEFINE_SPINLOCK(libcfs_nidstring_lock);
-
-static struct netstrfns *libcfs_namenum2netstrfns(const char *name);
-
-char *
-libcfs_next_nidstring(void)
-{
- char *str;
- unsigned long flags;
-
- spin_lock_irqsave(&libcfs_nidstring_lock, flags);
-
- str = libcfs_nidstrings[libcfs_nidstring_idx++];
- if (libcfs_nidstring_idx == ARRAY_SIZE(libcfs_nidstrings))
- libcfs_nidstring_idx = 0;
-
- spin_unlock_irqrestore(&libcfs_nidstring_lock, flags);
- return str;
-}
-EXPORT_SYMBOL(libcfs_next_nidstring);
-
-/**
- * Nid range list syntax.
- * \verbatim
- *
- * <nidlist> :== <nidrange> [ ' ' <nidrange> ]
- * <nidrange> :== <addrrange> '@' <net>
- * <addrrange> :== '*' |
- * <ipaddr_range> |
- * <cfs_expr_list>
- * <ipaddr_range> :== <cfs_expr_list>.<cfs_expr_list>.<cfs_expr_list>.
- * <cfs_expr_list>
- * <cfs_expr_list> :== <number> |
- * <expr_list>
- * <expr_list> :== '[' <range_expr> [ ',' <range_expr>] ']'
- * <range_expr> :== <number> |
- * <number> '-' <number> |
- * <number> '-' <number> '/' <number>
- * <net> :== <netname> | <netname><number>
- * <netname> :== "lo" | "tcp" | "o2ib" | "cib" | "openib" | "iib" |
- * "vib" | "ra" | "elan" | "mx" | "ptl"
- * \endverbatim
- */
-
-/**
- * Structure to represent \<nidrange\> token of the syntax.
- *
- * One of this is created for each \<net\> parsed.
- */
-struct nidrange {
- /**
- * Link to list of this structures which is built on nid range
- * list parsing.
- */
- struct list_head nr_link;
- /**
- * List head for addrrange::ar_link.
- */
- struct list_head nr_addrranges;
- /**
- * Flag indicating that *@<net> is found.
- */
- int nr_all;
- /**
- * Pointer to corresponding element of libcfs_netstrfns.
- */
- struct netstrfns *nr_netstrfns;
- /**
- * Number of network. E.g. 5 if \<net\> is "elan5".
- */
- int nr_netnum;
-};
-
-/**
- * Structure to represent \<addrrange\> token of the syntax.
- */
-struct addrrange {
- /**
- * Link to nidrange::nr_addrranges.
- */
- struct list_head ar_link;
- /**
- * List head for cfs_expr_list::el_list.
- */
- struct list_head ar_numaddr_ranges;
-};
-
-/**
- * Parses \<addrrange\> token on the syntax.
- *
- * Allocates struct addrrange and links to \a nidrange via
- * (nidrange::nr_addrranges)
- *
- * \retval 0 if \a src parses to '*' | \<ipaddr_range\> | \<cfs_expr_list\>
- * \retval -errno otherwise
- */
-static int
-parse_addrange(const struct cfs_lstr *src, struct nidrange *nidrange)
-{
- struct addrrange *addrrange;
-
- if (src->ls_len == 1 && src->ls_str[0] == '*') {
- nidrange->nr_all = 1;
- return 0;
- }
-
- addrrange = kzalloc(sizeof(struct addrrange), GFP_NOFS);
- if (!addrrange)
- return -ENOMEM;
- list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
- INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges);
-
- return nidrange->nr_netstrfns->nf_parse_addrlist(src->ls_str,
- src->ls_len,
- &addrrange->ar_numaddr_ranges);
-}
-
-/**
- * Finds or creates struct nidrange.
- *
- * Checks if \a src is a valid network name, looks for corresponding
- * nidrange on the ist of nidranges (\a nidlist), creates new struct
- * nidrange if it is not found.
- *
- * \retval pointer to struct nidrange matching network specified via \a src
- * \retval NULL if \a src does not match any network
- */
-static struct nidrange *
-add_nidrange(const struct cfs_lstr *src,
- struct list_head *nidlist)
-{
- struct netstrfns *nf;
- struct nidrange *nr;
- int endlen;
- unsigned int netnum;
-
- if (src->ls_len >= LNET_NIDSTR_SIZE)
- return NULL;
-
- nf = libcfs_namenum2netstrfns(src->ls_str);
- if (!nf)
- return NULL;
- endlen = src->ls_len - strlen(nf->nf_name);
- if (!endlen)
- /* network name only, e.g. "elan" or "tcp" */
- netnum = 0;
- else {
- /*
- * e.g. "elan25" or "tcp23", refuse to parse if
- * network name is not appended with decimal or
- * hexadecimal number
- */
- if (!cfs_str2num_check(src->ls_str + strlen(nf->nf_name),
- endlen, &netnum, 0, MAX_NUMERIC_VALUE))
- return NULL;
- }
-
- list_for_each_entry(nr, nidlist, nr_link) {
- if (nr->nr_netstrfns != nf)
- continue;
- if (nr->nr_netnum != netnum)
- continue;
- return nr;
- }
-
- nr = kzalloc(sizeof(struct nidrange), GFP_NOFS);
- if (!nr)
- return NULL;
- list_add_tail(&nr->nr_link, nidlist);
- INIT_LIST_HEAD(&nr->nr_addrranges);
- nr->nr_netstrfns = nf;
- nr->nr_all = 0;
- nr->nr_netnum = netnum;
-
- return nr;
-}
-
-/**
- * Parses \<nidrange\> token of the syntax.
- *
- * \retval 1 if \a src parses to \<addrrange\> '@' \<net\>
- * \retval 0 otherwise
- */
-static int
-parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist)
-{
- struct cfs_lstr addrrange;
- struct cfs_lstr net;
- struct nidrange *nr;
-
- if (!cfs_gettok(src, '@', &addrrange))
- goto failed;
-
- if (!cfs_gettok(src, '@', &net) || src->ls_str)
- goto failed;
-
- nr = add_nidrange(&net, nidlist);
- if (!nr)
- goto failed;
-
- if (parse_addrange(&addrrange, nr))
- goto failed;
-
- return 1;
-failed:
- return 0;
-}
-
-/**
- * Frees addrrange structures of \a list.
- *
- * For each struct addrrange structure found on \a list it frees
- * cfs_expr_list list attached to it and frees the addrrange itself.
- *
- * \retval none
- */
-static void
-free_addrranges(struct list_head *list)
-{
- while (!list_empty(list)) {
- struct addrrange *ar;
-
- ar = list_entry(list->next, struct addrrange, ar_link);
-
- cfs_expr_list_free_list(&ar->ar_numaddr_ranges);
- list_del(&ar->ar_link);
- kfree(ar);
- }
-}
-
-/**
- * Frees nidrange strutures of \a list.
- *
- * For each struct nidrange structure found on \a list it frees
- * addrrange list attached to it and frees the nidrange itself.
- *
- * \retval none
- */
-void
-cfs_free_nidlist(struct list_head *list)
-{
- struct list_head *pos, *next;
- struct nidrange *nr;
-
- list_for_each_safe(pos, next, list) {
- nr = list_entry(pos, struct nidrange, nr_link);
- free_addrranges(&nr->nr_addrranges);
- list_del(pos);
- kfree(nr);
- }
-}
-EXPORT_SYMBOL(cfs_free_nidlist);
-
-/**
- * Parses nid range list.
- *
- * Parses with rigorous syntax and overflow checking \a str into
- * \<nidrange\> [ ' ' \<nidrange\> ], compiles \a str into set of
- * structures and links that structure to \a nidlist. The resulting
- * list can be used to match a NID againts set of NIDS defined by \a
- * str.
- * \see cfs_match_nid
- *
- * \retval 1 on success
- * \retval 0 otherwise
- */
-int
-cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
-{
- struct cfs_lstr src;
- struct cfs_lstr res;
- int rc;
-
- src.ls_str = str;
- src.ls_len = len;
- INIT_LIST_HEAD(nidlist);
- while (src.ls_str) {
- rc = cfs_gettok(&src, ' ', &res);
- if (!rc) {
- cfs_free_nidlist(nidlist);
- return 0;
- }
- rc = parse_nidrange(&res, nidlist);
- if (!rc) {
- cfs_free_nidlist(nidlist);
- return 0;
- }
- }
- return 1;
-}
-EXPORT_SYMBOL(cfs_parse_nidlist);
-
-/**
- * Matches a nid (\a nid) against the compiled list of nidranges (\a nidlist).
- *
- * \see cfs_parse_nidlist()
- *
- * \retval 1 on match
- * \retval 0 otherwises
- */
-int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
-{
- struct nidrange *nr;
- struct addrrange *ar;
-
- list_for_each_entry(nr, nidlist, nr_link) {
- if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid)))
- continue;
- if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid)))
- continue;
- if (nr->nr_all)
- return 1;
- list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
- if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid),
- &ar->ar_numaddr_ranges))
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(cfs_match_nid);
-
-/**
- * Print the network part of the nidrange \a nr into the specified \a buffer.
- *
- * \retval number of characters written
- */
-static int
-cfs_print_network(char *buffer, int count, struct nidrange *nr)
-{
- struct netstrfns *nf = nr->nr_netstrfns;
-
- if (!nr->nr_netnum)
- return scnprintf(buffer, count, "@%s", nf->nf_name);
- else
- return scnprintf(buffer, count, "@%s%u",
- nf->nf_name, nr->nr_netnum);
-}
-
-/**
- * Print a list of addrrange (\a addrranges) into the specified \a buffer.
- * At max \a count characters can be printed into \a buffer.
- *
- * \retval number of characters written
- */
-static int
-cfs_print_addrranges(char *buffer, int count, struct list_head *addrranges,
- struct nidrange *nr)
-{
- int i = 0;
- struct addrrange *ar;
- struct netstrfns *nf = nr->nr_netstrfns;
-
- list_for_each_entry(ar, addrranges, ar_link) {
- if (i)
- i += scnprintf(buffer + i, count - i, " ");
- i += nf->nf_print_addrlist(buffer + i, count - i,
- &ar->ar_numaddr_ranges);
- i += cfs_print_network(buffer + i, count - i, nr);
- }
- return i;
-}
-
-/**
- * Print a list of nidranges (\a nidlist) into the specified \a buffer.
- * At max \a count characters can be printed into \a buffer.
- * Nidranges are separated by a space character.
- *
- * \retval number of characters written
- */
-int cfs_print_nidlist(char *buffer, int count, struct list_head *nidlist)
-{
- int i = 0;
- struct nidrange *nr;
-
- if (count <= 0)
- return 0;
-
- list_for_each_entry(nr, nidlist, nr_link) {
- if (i)
- i += scnprintf(buffer + i, count - i, " ");
-
- if (nr->nr_all) {
- LASSERT(list_empty(&nr->nr_addrranges));
- i += scnprintf(buffer + i, count - i, "*");
- i += cfs_print_network(buffer + i, count - i, nr);
- } else {
- i += cfs_print_addrranges(buffer + i, count - i,
- &nr->nr_addrranges, nr);
- }
- }
- return i;
-}
-EXPORT_SYMBOL(cfs_print_nidlist);
-
-/**
- * Determines minimum and maximum addresses for a single
- * numeric address range
- *
- * \param ar
- * \param min_nid
- * \param max_nid
- */
-static void cfs_ip_ar_min_max(struct addrrange *ar, __u32 *min_nid,
- __u32 *max_nid)
-{
- struct cfs_expr_list *el;
- struct cfs_range_expr *re;
- __u32 tmp_ip_addr = 0;
- unsigned int min_ip[4] = {0};
- unsigned int max_ip[4] = {0};
- int re_count = 0;
-
- list_for_each_entry(el, &ar->ar_numaddr_ranges, el_link) {
- list_for_each_entry(re, &el->el_exprs, re_link) {
- min_ip[re_count] = re->re_lo;
- max_ip[re_count] = re->re_hi;
- re_count++;
- }
- }
-
- tmp_ip_addr = ((min_ip[0] << 24) | (min_ip[1] << 16) |
- (min_ip[2] << 8) | min_ip[3]);
-
- if (min_nid)
- *min_nid = tmp_ip_addr;
-
- tmp_ip_addr = ((max_ip[0] << 24) | (max_ip[1] << 16) |
- (max_ip[2] << 8) | max_ip[3]);
-
- if (max_nid)
- *max_nid = tmp_ip_addr;
-}
-
-/**
- * Determines minimum and maximum addresses for a single
- * numeric address range
- *
- * \param ar
- * \param min_nid
- * \param max_nid
- */
-static void cfs_num_ar_min_max(struct addrrange *ar, __u32 *min_nid,
- __u32 *max_nid)
-{
- struct cfs_expr_list *el;
- struct cfs_range_expr *re;
- unsigned int min_addr = 0;
- unsigned int max_addr = 0;
-
- list_for_each_entry(el, &ar->ar_numaddr_ranges, el_link) {
- list_for_each_entry(re, &el->el_exprs, re_link) {
- if (re->re_lo < min_addr || !min_addr)
- min_addr = re->re_lo;
- if (re->re_hi > max_addr)
- max_addr = re->re_hi;
- }
- }
-
- if (min_nid)
- *min_nid = min_addr;
- if (max_nid)
- *max_nid = max_addr;
-}
-
-/**
- * Determines whether an expression list in an nidrange contains exactly
- * one contiguous address range. Calls the correct netstrfns for the LND
- *
- * \param *nidlist
- *
- * \retval true if contiguous
- * \retval false if not contiguous
- */
-bool cfs_nidrange_is_contiguous(struct list_head *nidlist)
-{
- struct nidrange *nr;
- struct netstrfns *nf = NULL;
- char *lndname = NULL;
- int netnum = -1;
-
- list_for_each_entry(nr, nidlist, nr_link) {
- nf = nr->nr_netstrfns;
- if (!lndname)
- lndname = nf->nf_name;
- if (netnum == -1)
- netnum = nr->nr_netnum;
-
- if (strcmp(lndname, nf->nf_name) ||
- netnum != nr->nr_netnum)
- return false;
- }
-
- if (!nf)
- return false;
-
- if (!nf->nf_is_contiguous(nidlist))
- return false;
-
- return true;
-}
-EXPORT_SYMBOL(cfs_nidrange_is_contiguous);
-
-/**
- * Determines whether an expression list in an num nidrange contains exactly
- * one contiguous address range.
- *
- * \param *nidlist
- *
- * \retval true if contiguous
- * \retval false if not contiguous
- */
-static bool cfs_num_is_contiguous(struct list_head *nidlist)
-{
- struct nidrange *nr;
- struct addrrange *ar;
- struct cfs_expr_list *el;
- struct cfs_range_expr *re;
- int last_hi = 0;
- __u32 last_end_nid = 0;
- __u32 current_start_nid = 0;
- __u32 current_end_nid = 0;
-
- list_for_each_entry(nr, nidlist, nr_link) {
- list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
- cfs_num_ar_min_max(ar, &current_start_nid,
- &current_end_nid);
- if (last_end_nid &&
- (current_start_nid - last_end_nid != 1))
- return false;
- last_end_nid = current_end_nid;
- list_for_each_entry(el, &ar->ar_numaddr_ranges,
- el_link) {
- list_for_each_entry(re, &el->el_exprs,
- re_link) {
- if (re->re_stride > 1)
- return false;
- else if (last_hi &&
- re->re_hi - last_hi != 1)
- return false;
- last_hi = re->re_hi;
- }
- }
- }
- }
-
- return true;
-}
-
-/**
- * Determines whether an expression list in an ip nidrange contains exactly
- * one contiguous address range.
- *
- * \param *nidlist
- *
- * \retval true if contiguous
- * \retval false if not contiguous
- */
-static bool cfs_ip_is_contiguous(struct list_head *nidlist)
-{
- struct nidrange *nr;
- struct addrrange *ar;
- struct cfs_expr_list *el;
- struct cfs_range_expr *re;
- int expr_count;
- int last_hi = 255;
- int last_diff = 0;
- __u32 last_end_nid = 0;
- __u32 current_start_nid = 0;
- __u32 current_end_nid = 0;
-
- list_for_each_entry(nr, nidlist, nr_link) {
- list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
- last_hi = 255;
- last_diff = 0;
- cfs_ip_ar_min_max(ar, &current_start_nid,
- &current_end_nid);
- if (last_end_nid &&
- (current_start_nid - last_end_nid != 1))
- return false;
- last_end_nid = current_end_nid;
- list_for_each_entry(el, &ar->ar_numaddr_ranges,
- el_link) {
- expr_count = 0;
- list_for_each_entry(re, &el->el_exprs,
- re_link) {
- expr_count++;
- if (re->re_stride > 1 ||
- (last_diff > 0 && last_hi != 255) ||
- (last_diff > 0 && last_hi == 255 &&
- re->re_lo > 0))
- return false;
- last_hi = re->re_hi;
- last_diff = re->re_hi - re->re_lo;
- }
- }
- }
- }
-
- return true;
-}
-
-/**
- * Takes a linked list of nidrange expressions, determines the minimum
- * and maximum nid and creates appropriate nid structures
- *
- * \param *nidlist
- * \param *min_nid
- * \param *max_nid
- */
-void cfs_nidrange_find_min_max(struct list_head *nidlist, char *min_nid,
- char *max_nid, size_t nidstr_length)
-{
- struct nidrange *nr;
- struct netstrfns *nf = NULL;
- int netnum = -1;
- __u32 min_addr;
- __u32 max_addr;
- char *lndname = NULL;
- char min_addr_str[IPSTRING_LENGTH];
- char max_addr_str[IPSTRING_LENGTH];
-
- list_for_each_entry(nr, nidlist, nr_link) {
- nf = nr->nr_netstrfns;
- lndname = nf->nf_name;
- if (netnum == -1)
- netnum = nr->nr_netnum;
-
- nf->nf_min_max(nidlist, &min_addr, &max_addr);
- }
- nf->nf_addr2str(min_addr, min_addr_str, sizeof(min_addr_str));
- nf->nf_addr2str(max_addr, max_addr_str, sizeof(max_addr_str));
-
- snprintf(min_nid, nidstr_length, "%s@%s%d", min_addr_str, lndname,
- netnum);
- snprintf(max_nid, nidstr_length, "%s@%s%d", max_addr_str, lndname,
- netnum);
-}
-EXPORT_SYMBOL(cfs_nidrange_find_min_max);
-
-/**
- * Determines the min and max NID values for num LNDs
- *
- * \param *nidlist
- * \param *min_nid
- * \param *max_nid
- */
-static void cfs_num_min_max(struct list_head *nidlist, __u32 *min_nid,
- __u32 *max_nid)
-{
- struct nidrange *nr;
- struct addrrange *ar;
- unsigned int tmp_min_addr = 0;
- unsigned int tmp_max_addr = 0;
- unsigned int min_addr = 0;
- unsigned int max_addr = 0;
-
- list_for_each_entry(nr, nidlist, nr_link) {
- list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
- cfs_num_ar_min_max(ar, &tmp_min_addr,
- &tmp_max_addr);
- if (tmp_min_addr < min_addr || !min_addr)
- min_addr = tmp_min_addr;
- if (tmp_max_addr > max_addr)
- max_addr = tmp_min_addr;
- }
- }
- *max_nid = max_addr;
- *min_nid = min_addr;
-}
-
-/**
- * Takes an nidlist and determines the minimum and maximum
- * ip addresses.
- *
- * \param *nidlist
- * \param *min_nid
- * \param *max_nid
- */
-static void cfs_ip_min_max(struct list_head *nidlist, __u32 *min_nid,
- __u32 *max_nid)
-{
- struct nidrange *nr;
- struct addrrange *ar;
- __u32 tmp_min_ip_addr = 0;
- __u32 tmp_max_ip_addr = 0;
- __u32 min_ip_addr = 0;
- __u32 max_ip_addr = 0;
-
- list_for_each_entry(nr, nidlist, nr_link) {
- list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
- cfs_ip_ar_min_max(ar, &tmp_min_ip_addr,
- &tmp_max_ip_addr);
- if (tmp_min_ip_addr < min_ip_addr || !min_ip_addr)
- min_ip_addr = tmp_min_ip_addr;
- if (tmp_max_ip_addr > max_ip_addr)
- max_ip_addr = tmp_max_ip_addr;
- }
- }
-
- if (min_nid)
- *min_nid = min_ip_addr;
- if (max_nid)
- *max_nid = max_ip_addr;
-}
-
-static int
-libcfs_lo_str2addr(const char *str, int nob, __u32 *addr)
-{
- *addr = 0;
- return 1;
-}
-
-static void
-libcfs_ip_addr2str(__u32 addr, char *str, size_t size)
-{
- snprintf(str, size, "%u.%u.%u.%u",
- (addr >> 24) & 0xff, (addr >> 16) & 0xff,
- (addr >> 8) & 0xff, addr & 0xff);
-}
-
-/*
- * CAVEAT EMPTOR XscanfX
- * I use "%n" at the end of a sscanf format to detect trailing junk. However
- * sscanf may return immediately if it sees the terminating '0' in a string, so
- * I initialise the %n variable to the expected length. If sscanf sets it;
- * fine, if it doesn't, then the scan ended at the end of the string, which is
- * fine too :)
- */
-static int
-libcfs_ip_str2addr(const char *str, int nob, __u32 *addr)
-{
- unsigned int a;
- unsigned int b;
- unsigned int c;
- unsigned int d;
- int n = nob; /* XscanfX */
-
- /* numeric IP? */
- if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 &&
- n == nob &&
- !(a & ~0xff) && !(b & ~0xff) &&
- !(c & ~0xff) && !(d & ~0xff)) {
- *addr = ((a << 24) | (b << 16) | (c << 8) | d);
- return 1;
- }
-
- return 0;
-}
-
-/* Used by lnet/config.c so it can't be static */
-int
-cfs_ip_addr_parse(char *str, int len, struct list_head *list)
-{
- struct cfs_expr_list *el;
- struct cfs_lstr src;
- int rc;
- int i;
-
- src.ls_str = str;
- src.ls_len = len;
- i = 0;
-
- while (src.ls_str) {
- struct cfs_lstr res;
-
- if (!cfs_gettok(&src, '.', &res)) {
- rc = -EINVAL;
- goto out;
- }
-
- rc = cfs_expr_list_parse(res.ls_str, res.ls_len, 0, 255, &el);
- if (rc)
- goto out;
-
- list_add_tail(&el->el_link, list);
- i++;
- }
-
- if (i == 4)
- return 0;
-
- rc = -EINVAL;
-out:
- cfs_expr_list_free_list(list);
-
- return rc;
-}
-
-static int
-libcfs_ip_addr_range_print(char *buffer, int count, struct list_head *list)
-{
- int i = 0, j = 0;
- struct cfs_expr_list *el;
-
- list_for_each_entry(el, list, el_link) {
- LASSERT(j++ < 4);
- if (i)
- i += scnprintf(buffer + i, count - i, ".");
- i += cfs_expr_list_print(buffer + i, count - i, el);
- }
- return i;
-}
-
-/**
- * Matches address (\a addr) against address set encoded in \a list.
- *
- * \retval 1 if \a addr matches
- * \retval 0 otherwise
- */
-int
-cfs_ip_addr_match(__u32 addr, struct list_head *list)
-{
- struct cfs_expr_list *el;
- int i = 0;
-
- list_for_each_entry_reverse(el, list, el_link) {
- if (!cfs_expr_list_match(addr & 0xff, el))
- return 0;
- addr >>= 8;
- i++;
- }
-
- return i == 4;
-}
-
-static void
-libcfs_decnum_addr2str(__u32 addr, char *str, size_t size)
-{
- snprintf(str, size, "%u", addr);
-}
-
-static int
-libcfs_num_str2addr(const char *str, int nob, __u32 *addr)
-{
- int n;
-
- n = nob;
- if (sscanf(str, "0x%x%n", addr, &n) >= 1 && n == nob)
- return 1;
-
- n = nob;
- if (sscanf(str, "0X%x%n", addr, &n) >= 1 && n == nob)
- return 1;
-
- n = nob;
- if (sscanf(str, "%u%n", addr, &n) >= 1 && n == nob)
- return 1;
-
- return 0;
-}
-
-/**
- * Nf_parse_addrlist method for networks using numeric addresses.
- *
- * Examples of such networks are gm and elan.
- *
- * \retval 0 if \a str parsed to numeric address
- * \retval errno otherwise
- */
-static int
-libcfs_num_parse(char *str, int len, struct list_head *list)
-{
- struct cfs_expr_list *el;
- int rc;
-
- rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el);
- if (!rc)
- list_add_tail(&el->el_link, list);
-
- return rc;
-}
-
-static int
-libcfs_num_addr_range_print(char *buffer, int count, struct list_head *list)
-{
- int i = 0, j = 0;
- struct cfs_expr_list *el;
-
- list_for_each_entry(el, list, el_link) {
- LASSERT(j++ < 1);
- i += cfs_expr_list_print(buffer + i, count - i, el);
- }
- return i;
-}
-
-/*
- * Nf_match_addr method for networks using numeric addresses
- *
- * \retval 1 on match
- * \retval 0 otherwise
- */
-static int
-libcfs_num_match(__u32 addr, struct list_head *numaddr)
-{
- struct cfs_expr_list *el;
-
- LASSERT(!list_empty(numaddr));
- el = list_entry(numaddr->next, struct cfs_expr_list, el_link);
-
- return cfs_expr_list_match(addr, el);
-}
-
-static struct netstrfns libcfs_netstrfns[] = {
- { .nf_type = LOLND,
- .nf_name = "lo",
- .nf_modname = "klolnd",
- .nf_addr2str = libcfs_decnum_addr2str,
- .nf_str2addr = libcfs_lo_str2addr,
- .nf_parse_addrlist = libcfs_num_parse,
- .nf_print_addrlist = libcfs_num_addr_range_print,
- .nf_match_addr = libcfs_num_match,
- .nf_is_contiguous = cfs_num_is_contiguous,
- .nf_min_max = cfs_num_min_max },
- { .nf_type = SOCKLND,
- .nf_name = "tcp",
- .nf_modname = "ksocklnd",
- .nf_addr2str = libcfs_ip_addr2str,
- .nf_str2addr = libcfs_ip_str2addr,
- .nf_parse_addrlist = cfs_ip_addr_parse,
- .nf_print_addrlist = libcfs_ip_addr_range_print,
- .nf_match_addr = cfs_ip_addr_match,
- .nf_is_contiguous = cfs_ip_is_contiguous,
- .nf_min_max = cfs_ip_min_max },
- { .nf_type = O2IBLND,
- .nf_name = "o2ib",
- .nf_modname = "ko2iblnd",
- .nf_addr2str = libcfs_ip_addr2str,
- .nf_str2addr = libcfs_ip_str2addr,
- .nf_parse_addrlist = cfs_ip_addr_parse,
- .nf_print_addrlist = libcfs_ip_addr_range_print,
- .nf_match_addr = cfs_ip_addr_match,
- .nf_is_contiguous = cfs_ip_is_contiguous,
- .nf_min_max = cfs_ip_min_max },
- { .nf_type = GNILND,
- .nf_name = "gni",
- .nf_modname = "kgnilnd",
- .nf_addr2str = libcfs_decnum_addr2str,
- .nf_str2addr = libcfs_num_str2addr,
- .nf_parse_addrlist = libcfs_num_parse,
- .nf_print_addrlist = libcfs_num_addr_range_print,
- .nf_match_addr = libcfs_num_match,
- .nf_is_contiguous = cfs_num_is_contiguous,
- .nf_min_max = cfs_num_min_max },
- { .nf_type = GNIIPLND,
- .nf_name = "gip",
- .nf_modname = "kgnilnd",
- .nf_addr2str = libcfs_ip_addr2str,
- .nf_str2addr = libcfs_ip_str2addr,
- .nf_parse_addrlist = cfs_ip_addr_parse,
- .nf_print_addrlist = libcfs_ip_addr_range_print,
- .nf_match_addr = cfs_ip_addr_match,
- .nf_is_contiguous = cfs_ip_is_contiguous,
- .nf_min_max = cfs_ip_min_max },
-};
-
-static const size_t libcfs_nnetstrfns = ARRAY_SIZE(libcfs_netstrfns);
-
-static struct netstrfns *
-libcfs_lnd2netstrfns(__u32 lnd)
-{
- int i;
-
- for (i = 0; i < libcfs_nnetstrfns; i++)
- if (lnd == libcfs_netstrfns[i].nf_type)
- return &libcfs_netstrfns[i];
-
- return NULL;
-}
-
-static struct netstrfns *
-libcfs_namenum2netstrfns(const char *name)
-{
- struct netstrfns *nf;
- int i;
-
- for (i = 0; i < libcfs_nnetstrfns; i++) {
- nf = &libcfs_netstrfns[i];
- if (!strncmp(name, nf->nf_name, strlen(nf->nf_name)))
- return nf;
- }
- return NULL;
-}
-
-static struct netstrfns *
-libcfs_name2netstrfns(const char *name)
-{
- int i;
-
- for (i = 0; i < libcfs_nnetstrfns; i++)
- if (!strcmp(libcfs_netstrfns[i].nf_name, name))
- return &libcfs_netstrfns[i];
-
- return NULL;
-}
-
-int
-libcfs_isknown_lnd(__u32 lnd)
-{
- return !!libcfs_lnd2netstrfns(lnd);
-}
-EXPORT_SYMBOL(libcfs_isknown_lnd);
-
-char *
-libcfs_lnd2modname(__u32 lnd)
-{
- struct netstrfns *nf = libcfs_lnd2netstrfns(lnd);
-
- return nf ? nf->nf_modname : NULL;
-}
-EXPORT_SYMBOL(libcfs_lnd2modname);
-
-int
-libcfs_str2lnd(const char *str)
-{
- struct netstrfns *nf = libcfs_name2netstrfns(str);
-
- if (nf)
- return nf->nf_type;
-
- return -ENXIO;
-}
-EXPORT_SYMBOL(libcfs_str2lnd);
-
-char *
-libcfs_lnd2str_r(__u32 lnd, char *buf, size_t buf_size)
-{
- struct netstrfns *nf;
-
- nf = libcfs_lnd2netstrfns(lnd);
- if (!nf)
- snprintf(buf, buf_size, "?%u?", lnd);
- else
- snprintf(buf, buf_size, "%s", nf->nf_name);
-
- return buf;
-}
-EXPORT_SYMBOL(libcfs_lnd2str_r);
-
-char *
-libcfs_net2str_r(__u32 net, char *buf, size_t buf_size)
-{
- __u32 nnum = LNET_NETNUM(net);
- __u32 lnd = LNET_NETTYP(net);
- struct netstrfns *nf;
-
- nf = libcfs_lnd2netstrfns(lnd);
- if (!nf)
- snprintf(buf, buf_size, "<%u:%u>", lnd, nnum);
- else if (!nnum)
- snprintf(buf, buf_size, "%s", nf->nf_name);
- else
- snprintf(buf, buf_size, "%s%u", nf->nf_name, nnum);
-
- return buf;
-}
-EXPORT_SYMBOL(libcfs_net2str_r);
-
-char *
-libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size)
-{
- __u32 addr = LNET_NIDADDR(nid);
- __u32 net = LNET_NIDNET(nid);
- __u32 nnum = LNET_NETNUM(net);
- __u32 lnd = LNET_NETTYP(net);
- struct netstrfns *nf;
-
- if (nid == LNET_NID_ANY) {
- strncpy(buf, "<?>", buf_size);
- buf[buf_size - 1] = '\0';
- return buf;
- }
-
- nf = libcfs_lnd2netstrfns(lnd);
- if (!nf) {
- snprintf(buf, buf_size, "%x@<%u:%u>", addr, lnd, nnum);
- } else {
- size_t addr_len;
-
- nf->nf_addr2str(addr, buf, buf_size);
- addr_len = strlen(buf);
- if (!nnum)
- snprintf(buf + addr_len, buf_size - addr_len, "@%s",
- nf->nf_name);
- else
- snprintf(buf + addr_len, buf_size - addr_len, "@%s%u",
- nf->nf_name, nnum);
- }
-
- return buf;
-}
-EXPORT_SYMBOL(libcfs_nid2str_r);
-
-static struct netstrfns *
-libcfs_str2net_internal(const char *str, __u32 *net)
-{
- struct netstrfns *nf = NULL;
- int nob;
- unsigned int netnum;
- int i;
-
- for (i = 0; i < libcfs_nnetstrfns; i++) {
- nf = &libcfs_netstrfns[i];
- if (!strncmp(str, nf->nf_name, strlen(nf->nf_name)))
- break;
- }
-
- if (i == libcfs_nnetstrfns)
- return NULL;
-
- nob = strlen(nf->nf_name);
-
- if (strlen(str) == (unsigned int)nob) {
- netnum = 0;
- } else {
- if (nf->nf_type == LOLND) /* net number not allowed */
- return NULL;
-
- str += nob;
- i = strlen(str);
- if (sscanf(str, "%u%n", &netnum, &i) < 1 ||
- i != (int)strlen(str))
- return NULL;
- }
-
- *net = LNET_MKNET(nf->nf_type, netnum);
- return nf;
-}
-
-__u32
-libcfs_str2net(const char *str)
-{
- __u32 net;
-
- if (libcfs_str2net_internal(str, &net))
- return net;
-
- return LNET_NIDNET(LNET_NID_ANY);
-}
-EXPORT_SYMBOL(libcfs_str2net);
-
-lnet_nid_t
-libcfs_str2nid(const char *str)
-{
- const char *sep = strchr(str, '@');
- struct netstrfns *nf;
- __u32 net;
- __u32 addr;
-
- if (sep) {
- nf = libcfs_str2net_internal(sep + 1, &net);
- if (!nf)
- return LNET_NID_ANY;
- } else {
- sep = str + strlen(str);
- net = LNET_MKNET(SOCKLND, 0);
- nf = libcfs_lnd2netstrfns(SOCKLND);
- LASSERT(nf);
- }
-
- if (!nf->nf_str2addr(str, (int)(sep - str), &addr))
- return LNET_NID_ANY;
-
- return LNET_MKNID(net, addr);
-}
-EXPORT_SYMBOL(libcfs_str2nid);
-
-char *
-libcfs_id2str(struct lnet_process_id id)
-{
- char *str = libcfs_next_nidstring();
-
- if (id.pid == LNET_PID_ANY) {
- snprintf(str, LNET_NIDSTR_SIZE,
- "LNET_PID_ANY-%s", libcfs_nid2str(id.nid));
- return str;
- }
-
- snprintf(str, LNET_NIDSTR_SIZE, "%s%u-%s",
- id.pid & LNET_PID_USERFLAG ? "U" : "",
- id.pid & ~LNET_PID_USERFLAG, libcfs_nid2str(id.nid));
- return str;
-}
-EXPORT_SYMBOL(libcfs_id2str);
-
-int
-libcfs_str2anynid(lnet_nid_t *nidp, const char *str)
-{
- if (!strcmp(str, "*")) {
- *nidp = LNET_NID_ANY;
- return 1;
- }
-
- *nidp = libcfs_str2nid(str);
- return *nidp != LNET_NID_ANY;
-}
-EXPORT_SYMBOL(libcfs_str2anynid);
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
deleted file mode 100644
index 3d4caa609c83..000000000000
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ /dev/null
@@ -1,456 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/peer.c
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/lnet/lib-lnet.h>
-#include <uapi/linux/lnet/lnet-dlc.h>
-
-int
-lnet_peer_tables_create(void)
-{
- struct lnet_peer_table *ptable;
- struct list_head *hash;
- int i;
- int j;
-
- the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*ptable));
- if (!the_lnet.ln_peer_tables) {
- CERROR("Failed to allocate cpu-partition peer tables\n");
- return -ENOMEM;
- }
-
- cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- INIT_LIST_HEAD(&ptable->pt_deathrow);
-
- hash = kvmalloc_cpt(LNET_PEER_HASH_SIZE * sizeof(*hash),
- GFP_KERNEL, i);
- if (!hash) {
- CERROR("Failed to create peer hash table\n");
- lnet_peer_tables_destroy();
- return -ENOMEM;
- }
-
- for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
- INIT_LIST_HEAD(&hash[j]);
- ptable->pt_hash = hash; /* sign of initialization */
- }
-
- return 0;
-}
-
-void
-lnet_peer_tables_destroy(void)
-{
- struct lnet_peer_table *ptable;
- struct list_head *hash;
- int i;
- int j;
-
- if (!the_lnet.ln_peer_tables)
- return;
-
- cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- hash = ptable->pt_hash;
- if (!hash) /* not initialized */
- break;
-
- LASSERT(list_empty(&ptable->pt_deathrow));
-
- ptable->pt_hash = NULL;
- for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
- LASSERT(list_empty(&hash[j]));
-
- kvfree(hash);
- }
-
- cfs_percpt_free(the_lnet.ln_peer_tables);
- the_lnet.ln_peer_tables = NULL;
-}
-
-static void
-lnet_peer_table_cleanup_locked(struct lnet_ni *ni,
- struct lnet_peer_table *ptable)
-{
- int i;
- struct lnet_peer *lp;
- struct lnet_peer *tmp;
-
- for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
- list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
- lp_hashlist) {
- if (ni && ni != lp->lp_ni)
- continue;
- list_del_init(&lp->lp_hashlist);
- /* Lose hash table's ref */
- ptable->pt_zombies++;
- lnet_peer_decref_locked(lp);
- }
- }
-}
-
-static void
-lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable,
- int cpt_locked)
-{
- int i;
-
- for (i = 3; ptable->pt_zombies; i++) {
- lnet_net_unlock(cpt_locked);
-
- if (is_power_of_2(i)) {
- CDEBUG(D_WARNING,
- "Waiting for %d zombies on peer table\n",
- ptable->pt_zombies);
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ >> 1);
- lnet_net_lock(cpt_locked);
- }
-}
-
-static void
-lnet_peer_table_del_rtrs_locked(struct lnet_ni *ni,
- struct lnet_peer_table *ptable,
- int cpt_locked)
-{
- struct lnet_peer *lp;
- struct lnet_peer *tmp;
- lnet_nid_t lp_nid;
- int i;
-
- for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
- list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
- lp_hashlist) {
- if (ni != lp->lp_ni)
- continue;
-
- if (!lp->lp_rtr_refcount)
- continue;
-
- lp_nid = lp->lp_nid;
-
- lnet_net_unlock(cpt_locked);
- lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lp_nid);
- lnet_net_lock(cpt_locked);
- }
- }
-}
-
-void
-lnet_peer_tables_cleanup(struct lnet_ni *ni)
-{
- struct lnet_peer_table *ptable;
- struct list_head deathrow;
- struct lnet_peer *lp;
- struct lnet_peer *temp;
- int i;
-
- INIT_LIST_HEAD(&deathrow);
-
- LASSERT(the_lnet.ln_shutdown || ni);
- /*
- * If just deleting the peers for a NI, get rid of any routes these
- * peers are gateways for.
- */
- cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- lnet_net_lock(i);
- lnet_peer_table_del_rtrs_locked(ni, ptable, i);
- lnet_net_unlock(i);
- }
-
- /*
- * Start the process of moving the applicable peers to
- * deathrow.
- */
- cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- lnet_net_lock(i);
- lnet_peer_table_cleanup_locked(ni, ptable);
- lnet_net_unlock(i);
- }
-
- /* Cleanup all entries on deathrow. */
- cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- lnet_net_lock(i);
- lnet_peer_table_deathrow_wait_locked(ptable, i);
- list_splice_init(&ptable->pt_deathrow, &deathrow);
- lnet_net_unlock(i);
- }
-
- list_for_each_entry_safe(lp, temp, &deathrow, lp_hashlist) {
- list_del(&lp->lp_hashlist);
- kfree(lp);
- }
-}
-
-void
-lnet_destroy_peer_locked(struct lnet_peer *lp)
-{
- struct lnet_peer_table *ptable;
-
- LASSERT(!lp->lp_refcount);
- LASSERT(!lp->lp_rtr_refcount);
- LASSERT(list_empty(&lp->lp_txq));
- LASSERT(list_empty(&lp->lp_hashlist));
- LASSERT(!lp->lp_txqnob);
-
- ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
- LASSERT(ptable->pt_number > 0);
- ptable->pt_number--;
-
- lnet_ni_decref_locked(lp->lp_ni, lp->lp_cpt);
- lp->lp_ni = NULL;
-
- list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
- LASSERT(ptable->pt_zombies > 0);
- ptable->pt_zombies--;
-}
-
-struct lnet_peer *
-lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
-{
- struct list_head *peers;
- struct lnet_peer *lp;
-
- LASSERT(!the_lnet.ln_shutdown);
-
- peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
- list_for_each_entry(lp, peers, lp_hashlist) {
- if (lp->lp_nid == nid) {
- lnet_peer_addref_locked(lp);
- return lp;
- }
- }
-
- return NULL;
-}
-
-int
-lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt)
-{
- struct lnet_peer_table *ptable;
- struct lnet_peer *lp = NULL;
- struct lnet_peer *lp2;
- int cpt2;
- int rc = 0;
-
- *lpp = NULL;
- if (the_lnet.ln_shutdown) /* it's shutting down */
- return -ESHUTDOWN;
-
- /* cpt can be LNET_LOCK_EX if it's called from router functions */
- cpt2 = cpt != LNET_LOCK_EX ? cpt : lnet_cpt_of_nid_locked(nid);
-
- ptable = the_lnet.ln_peer_tables[cpt2];
- lp = lnet_find_peer_locked(ptable, nid);
- if (lp) {
- *lpp = lp;
- return 0;
- }
-
- if (!list_empty(&ptable->pt_deathrow)) {
- lp = list_entry(ptable->pt_deathrow.next,
- struct lnet_peer, lp_hashlist);
- list_del(&lp->lp_hashlist);
- }
-
- /*
- * take extra refcount in case another thread has shutdown LNet
- * and destroyed locks and peer-table before I finish the allocation
- */
- ptable->pt_number++;
- lnet_net_unlock(cpt);
-
- if (lp)
- memset(lp, 0, sizeof(*lp));
- else
- lp = kzalloc_cpt(sizeof(*lp), GFP_NOFS, cpt2);
-
- if (!lp) {
- rc = -ENOMEM;
- lnet_net_lock(cpt);
- goto out;
- }
-
- INIT_LIST_HEAD(&lp->lp_txq);
- INIT_LIST_HEAD(&lp->lp_rtrq);
- INIT_LIST_HEAD(&lp->lp_routes);
-
- lp->lp_notify = 0;
- lp->lp_notifylnd = 0;
- lp->lp_notifying = 0;
- lp->lp_alive_count = 0;
- lp->lp_timestamp = 0;
- lp->lp_alive = !lnet_peers_start_down(); /* 1 bit!! */
- lp->lp_last_alive = cfs_time_current(); /* assumes alive */
- lp->lp_last_query = 0; /* haven't asked NI yet */
- lp->lp_ping_timestamp = 0;
- lp->lp_ping_feats = LNET_PING_FEAT_INVAL;
- lp->lp_nid = nid;
- lp->lp_cpt = cpt2;
- lp->lp_refcount = 2; /* 1 for caller; 1 for hash */
- lp->lp_rtr_refcount = 0;
-
- lnet_net_lock(cpt);
-
- if (the_lnet.ln_shutdown) {
- rc = -ESHUTDOWN;
- goto out;
- }
-
- lp2 = lnet_find_peer_locked(ptable, nid);
- if (lp2) {
- *lpp = lp2;
- goto out;
- }
-
- lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid), cpt2);
- if (!lp->lp_ni) {
- rc = -EHOSTUNREACH;
- goto out;
- }
-
- lp->lp_txcredits = lp->lp_ni->ni_peertxcredits;
- lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
- lp->lp_rtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
- lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
-
- list_add_tail(&lp->lp_hashlist,
- &ptable->pt_hash[lnet_nid2peerhash(nid)]);
- ptable->pt_version++;
- *lpp = lp;
-
- return 0;
-out:
- if (lp)
- list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
- ptable->pt_number--;
- return rc;
-}
-
-void
-lnet_debug_peer(lnet_nid_t nid)
-{
- char *aliveness = "NA";
- struct lnet_peer *lp;
- int rc;
- int cpt;
-
- cpt = lnet_cpt_of_nid(nid);
- lnet_net_lock(cpt);
-
- rc = lnet_nid2peer_locked(&lp, nid, cpt);
- if (rc) {
- lnet_net_unlock(cpt);
- CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
- return;
- }
-
- if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
- aliveness = lp->lp_alive ? "up" : "down";
-
- CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
- libcfs_nid2str(lp->lp_nid), lp->lp_refcount,
- aliveness, lp->lp_ni->ni_peertxcredits,
- lp->lp_rtrcredits, lp->lp_minrtrcredits,
- lp->lp_txcredits, lp->lp_mintxcredits, lp->lp_txqnob);
-
- lnet_peer_decref_locked(lp);
-
- lnet_net_unlock(cpt);
-}
-
-int
-lnet_get_peer_info(__u32 peer_index, __u64 *nid,
- char aliveness[LNET_MAX_STR_LEN],
- __u32 *cpt_iter, __u32 *refcount,
- __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
- __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
- __u32 *peer_tx_qnob)
-{
- struct lnet_peer_table *peer_table;
- struct lnet_peer *lp;
- bool found = false;
- int lncpt, j;
-
- /* get the number of CPTs */
- lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
-
- /*
- * if the cpt number to be examined is >= the number of cpts in
- * the system then indicate that there are no more cpts to examin
- */
- if (*cpt_iter >= lncpt)
- return -ENOENT;
-
- /* get the current table */
- peer_table = the_lnet.ln_peer_tables[*cpt_iter];
- /* if the ptable is NULL then there are no more cpts to examine */
- if (!peer_table)
- return -ENOENT;
-
- lnet_net_lock(*cpt_iter);
-
- for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
- struct list_head *peers = &peer_table->pt_hash[j];
-
- list_for_each_entry(lp, peers, lp_hashlist) {
- if (peer_index-- > 0)
- continue;
-
- snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
- if (lnet_isrouter(lp) ||
- lnet_peer_aliveness_enabled(lp))
- snprintf(aliveness, LNET_MAX_STR_LEN,
- lp->lp_alive ? "up" : "down");
-
- *nid = lp->lp_nid;
- *refcount = lp->lp_refcount;
- *ni_peer_tx_credits = lp->lp_ni->ni_peertxcredits;
- *peer_tx_credits = lp->lp_txcredits;
- *peer_rtr_credits = lp->lp_rtrcredits;
- *peer_min_rtr_credits = lp->lp_mintxcredits;
- *peer_tx_qnob = lp->lp_txqnob;
-
- found = true;
- }
- }
- lnet_net_unlock(*cpt_iter);
-
- *cpt_iter = lncpt;
-
- return found ? 0 : -ENOENT;
-}
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
deleted file mode 100644
index a3c3f4959f46..000000000000
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ /dev/null
@@ -1,1800 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- *
- * This file is part of Portals
- * http://sourceforge.net/projects/sandiaportals/
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/completion.h>
-#include <linux/lnet/lib-lnet.h>
-
-#define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
-#define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
-#define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
-#define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
-#define LNET_NRB_SMALL_PAGES 1
-#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
-#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
-#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
- PAGE_SHIFT)
-
-static char *forwarding = "";
-module_param(forwarding, charp, 0444);
-MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
-
-static int tiny_router_buffers;
-module_param(tiny_router_buffers, int, 0444);
-MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
-static int small_router_buffers;
-module_param(small_router_buffers, int, 0444);
-MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
-static int large_router_buffers;
-module_param(large_router_buffers, int, 0444);
-MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
-static int peer_buffer_credits;
-module_param(peer_buffer_credits, int, 0444);
-MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
-
-static int auto_down = 1;
-module_param(auto_down, int, 0444);
-MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
-
-int
-lnet_peer_buffer_credits(struct lnet_ni *ni)
-{
- /* NI option overrides LNet default */
- if (ni->ni_peerrtrcredits > 0)
- return ni->ni_peerrtrcredits;
- if (peer_buffer_credits > 0)
- return peer_buffer_credits;
-
- /*
- * As an approximation, allow this peer the same number of router
- * buffers as it is allowed outstanding sends
- */
- return ni->ni_peertxcredits;
-}
-
-/* forward ref's */
-static int lnet_router_checker(void *);
-
-static int check_routers_before_use;
-module_param(check_routers_before_use, int, 0444);
-MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
-
-int avoid_asym_router_failure = 1;
-module_param(avoid_asym_router_failure, int, 0644);
-MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
-
-static int dead_router_check_interval = 60;
-module_param(dead_router_check_interval, int, 0644);
-MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
-
-static int live_router_check_interval = 60;
-module_param(live_router_check_interval, int, 0644);
-MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
-
-static int router_ping_timeout = 50;
-module_param(router_ping_timeout, int, 0644);
-MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
-
-int
-lnet_peers_start_down(void)
-{
- return check_routers_before_use;
-}
-
-void
-lnet_notify_locked(struct lnet_peer *lp, int notifylnd, int alive,
- unsigned long when)
-{
- if (time_before(when, lp->lp_timestamp)) { /* out of date information */
- CDEBUG(D_NET, "Out of date\n");
- return;
- }
-
- lp->lp_timestamp = when; /* update timestamp */
- lp->lp_ping_deadline = 0; /* disable ping timeout */
-
- if (lp->lp_alive_count && /* got old news */
- (!lp->lp_alive) == (!alive)) { /* new date for old news */
- CDEBUG(D_NET, "Old news\n");
- return;
- }
-
- /* Flag that notification is outstanding */
-
- lp->lp_alive_count++;
- lp->lp_alive = !(!alive); /* 1 bit! */
- lp->lp_notify = 1;
- lp->lp_notifylnd |= notifylnd;
- if (lp->lp_alive)
- lp->lp_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
-
- CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
-}
-
-static void
-lnet_ni_notify_locked(struct lnet_ni *ni, struct lnet_peer *lp)
-{
- int alive;
- int notifylnd;
-
- /*
- * Notify only in 1 thread at any time to ensure ordered notification.
- * NB individual events can be missed; the only guarantee is that you
- * always get the most recent news
- */
- if (lp->lp_notifying || !ni)
- return;
-
- lp->lp_notifying = 1;
-
- while (lp->lp_notify) {
- alive = lp->lp_alive;
- notifylnd = lp->lp_notifylnd;
-
- lp->lp_notifylnd = 0;
- lp->lp_notify = 0;
-
- if (notifylnd && ni->ni_lnd->lnd_notify) {
- lnet_net_unlock(lp->lp_cpt);
-
- /*
- * A new notification could happen now; I'll handle it
- * when control returns to me
- */
- ni->ni_lnd->lnd_notify(ni, lp->lp_nid, alive);
-
- lnet_net_lock(lp->lp_cpt);
- }
- }
-
- lp->lp_notifying = 0;
-}
-
-static void
-lnet_rtr_addref_locked(struct lnet_peer *lp)
-{
- LASSERT(lp->lp_refcount > 0);
- LASSERT(lp->lp_rtr_refcount >= 0);
-
- /* lnet_net_lock must be exclusively locked */
- lp->lp_rtr_refcount++;
- if (lp->lp_rtr_refcount == 1) {
- struct list_head *pos;
-
- /* a simple insertion sort */
- list_for_each_prev(pos, &the_lnet.ln_routers) {
- struct lnet_peer *rtr;
-
- rtr = list_entry(pos, struct lnet_peer, lp_rtr_list);
- if (rtr->lp_nid < lp->lp_nid)
- break;
- }
-
- list_add(&lp->lp_rtr_list, pos);
- /* addref for the_lnet.ln_routers */
- lnet_peer_addref_locked(lp);
- the_lnet.ln_routers_version++;
- }
-}
-
-static void
-lnet_rtr_decref_locked(struct lnet_peer *lp)
-{
- LASSERT(lp->lp_refcount > 0);
- LASSERT(lp->lp_rtr_refcount > 0);
-
- /* lnet_net_lock must be exclusively locked */
- lp->lp_rtr_refcount--;
- if (!lp->lp_rtr_refcount) {
- LASSERT(list_empty(&lp->lp_routes));
-
- if (lp->lp_rcd) {
- list_add(&lp->lp_rcd->rcd_list,
- &the_lnet.ln_rcd_deathrow);
- lp->lp_rcd = NULL;
- }
-
- list_del(&lp->lp_rtr_list);
- /* decref for the_lnet.ln_routers */
- lnet_peer_decref_locked(lp);
- the_lnet.ln_routers_version++;
- }
-}
-
-struct lnet_remotenet *
-lnet_find_net_locked(__u32 net)
-{
- struct lnet_remotenet *rnet;
- struct list_head *rn_list;
-
- LASSERT(!the_lnet.ln_shutdown);
-
- rn_list = lnet_net2rnethash(net);
- list_for_each_entry(rnet, rn_list, lrn_list) {
- if (rnet->lrn_net == net)
- return rnet;
- }
- return NULL;
-}
-
-static void lnet_shuffle_seed(void)
-{
- static int seeded;
- struct lnet_ni *ni;
-
- if (seeded)
- return;
-
- /*
- * Nodes with small feet have little entropy
- * the NID for this node gives the most entropy in the low bits
- */
- list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
- __u32 lnd_type, seed;
-
- lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
- if (lnd_type != LOLND) {
- seed = (LNET_NIDADDR(ni->ni_nid) | lnd_type);
- add_device_randomness(&seed, sizeof(seed));
- }
- }
-
- seeded = 1;
-}
-
-/* NB expects LNET_LOCK held */
-static void
-lnet_add_route_to_rnet(struct lnet_remotenet *rnet, struct lnet_route *route)
-{
- unsigned int len = 0;
- unsigned int offset = 0;
- struct list_head *e;
-
- lnet_shuffle_seed();
-
- list_for_each(e, &rnet->lrn_routes) {
- len++;
- }
-
- /* len+1 positions to add a new entry */
- offset = prandom_u32_max(len + 1);
- list_for_each(e, &rnet->lrn_routes) {
- if (!offset)
- break;
- offset--;
- }
- list_add(&route->lr_list, e);
- list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
-
- the_lnet.ln_remote_nets_version++;
- lnet_rtr_addref_locked(route->lr_gateway);
-}
-
-int
-lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
- unsigned int priority)
-{
- struct list_head *e;
- struct lnet_remotenet *rnet;
- struct lnet_remotenet *rnet2;
- struct lnet_route *route;
- struct lnet_ni *ni;
- int add_route;
- int rc;
-
- CDEBUG(D_NET, "Add route: net %s hops %d priority %u gw %s\n",
- libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
-
- if (gateway == LNET_NID_ANY ||
- LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
- net == LNET_NIDNET(LNET_NID_ANY) ||
- LNET_NETTYP(net) == LOLND ||
- LNET_NIDNET(gateway) == net ||
- (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
- return -EINVAL;
-
- if (lnet_islocalnet(net)) /* it's a local network */
- return -EEXIST;
-
- /* Assume net, route, all new */
- route = kzalloc(sizeof(*route), GFP_NOFS);
- rnet = kzalloc(sizeof(*rnet), GFP_NOFS);
- if (!route || !rnet) {
- CERROR("Out of memory creating route %s %d %s\n",
- libcfs_net2str(net), hops, libcfs_nid2str(gateway));
- kfree(route);
- kfree(rnet);
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&rnet->lrn_routes);
- rnet->lrn_net = net;
- route->lr_hops = hops;
- route->lr_net = net;
- route->lr_priority = priority;
-
- lnet_net_lock(LNET_LOCK_EX);
-
- rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
- if (rc) {
- lnet_net_unlock(LNET_LOCK_EX);
-
- kfree(route);
- kfree(rnet);
-
- if (rc == -EHOSTUNREACH) /* gateway is not on a local net */
- return rc; /* ignore the route entry */
- CERROR("Error %d creating route %s %d %s\n", rc,
- libcfs_net2str(net), hops,
- libcfs_nid2str(gateway));
- return rc;
- }
-
- LASSERT(!the_lnet.ln_shutdown);
-
- rnet2 = lnet_find_net_locked(net);
- if (!rnet2) {
- /* new network */
- list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
- rnet2 = rnet;
- }
-
- /* Search for a duplicate route (it's a NOOP if it is) */
- add_route = 1;
- list_for_each(e, &rnet2->lrn_routes) {
- struct lnet_route *route2;
-
- route2 = list_entry(e, struct lnet_route, lr_list);
- if (route2->lr_gateway == route->lr_gateway) {
- add_route = 0;
- break;
- }
-
- /* our lookups must be true */
- LASSERT(route2->lr_gateway->lp_nid != gateway);
- }
-
- if (add_route) {
- lnet_peer_addref_locked(route->lr_gateway); /* +1 for notify */
- lnet_add_route_to_rnet(rnet2, route);
-
- ni = route->lr_gateway->lp_ni;
- lnet_net_unlock(LNET_LOCK_EX);
-
- /* XXX Assume alive */
- if (ni->ni_lnd->lnd_notify)
- ni->ni_lnd->lnd_notify(ni, gateway, 1);
-
- lnet_net_lock(LNET_LOCK_EX);
- }
-
- /* -1 for notify or !add_route */
- lnet_peer_decref_locked(route->lr_gateway);
- lnet_net_unlock(LNET_LOCK_EX);
- rc = 0;
-
- if (!add_route) {
- rc = -EEXIST;
- kfree(route);
- }
-
- if (rnet != rnet2)
- kfree(rnet);
-
- /* indicate to startup the router checker if configured */
- wake_up(&the_lnet.ln_rc_waitq);
-
- return rc;
-}
-
-int
-lnet_check_routes(void)
-{
- struct lnet_remotenet *rnet;
- struct lnet_route *route;
- struct lnet_route *route2;
- struct list_head *e1;
- struct list_head *e2;
- int cpt;
- struct list_head *rn_list;
- int i;
-
- cpt = lnet_net_lock_current();
-
- for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
- rn_list = &the_lnet.ln_remote_nets_hash[i];
- list_for_each(e1, rn_list) {
- rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
-
- route2 = NULL;
- list_for_each(e2, &rnet->lrn_routes) {
- lnet_nid_t nid1;
- lnet_nid_t nid2;
- int net;
-
- route = list_entry(e2, struct lnet_route, lr_list);
-
- if (!route2) {
- route2 = route;
- continue;
- }
-
- if (route->lr_gateway->lp_ni ==
- route2->lr_gateway->lp_ni)
- continue;
-
- nid1 = route->lr_gateway->lp_nid;
- nid2 = route2->lr_gateway->lp_nid;
- net = rnet->lrn_net;
-
- lnet_net_unlock(cpt);
-
- CERROR("Routes to %s via %s and %s not supported\n",
- libcfs_net2str(net),
- libcfs_nid2str(nid1),
- libcfs_nid2str(nid2));
- return -EINVAL;
- }
- }
- }
-
- lnet_net_unlock(cpt);
- return 0;
-}
-
-int
-lnet_del_route(__u32 net, lnet_nid_t gw_nid)
-{
- struct lnet_peer *gateway;
- struct lnet_remotenet *rnet;
- struct lnet_route *route;
- struct list_head *e1;
- struct list_head *e2;
- int rc = -ENOENT;
- struct list_head *rn_list;
- int idx = 0;
-
- CDEBUG(D_NET, "Del route: net %s : gw %s\n",
- libcfs_net2str(net), libcfs_nid2str(gw_nid));
-
- /*
- * NB Caller may specify either all routes via the given gateway
- * or a specific route entry actual NIDs)
- */
- lnet_net_lock(LNET_LOCK_EX);
- if (net == LNET_NIDNET(LNET_NID_ANY))
- rn_list = &the_lnet.ln_remote_nets_hash[0];
- else
- rn_list = lnet_net2rnethash(net);
-
- again:
- list_for_each(e1, rn_list) {
- rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
-
- if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
- net == rnet->lrn_net))
- continue;
-
- list_for_each(e2, &rnet->lrn_routes) {
- route = list_entry(e2, struct lnet_route, lr_list);
-
- gateway = route->lr_gateway;
- if (!(gw_nid == LNET_NID_ANY ||
- gw_nid == gateway->lp_nid))
- continue;
-
- list_del(&route->lr_list);
- list_del(&route->lr_gwlist);
- the_lnet.ln_remote_nets_version++;
-
- if (list_empty(&rnet->lrn_routes))
- list_del(&rnet->lrn_list);
- else
- rnet = NULL;
-
- lnet_rtr_decref_locked(gateway);
- lnet_peer_decref_locked(gateway);
-
- lnet_net_unlock(LNET_LOCK_EX);
-
- kfree(route);
- kfree(rnet);
-
- rc = 0;
- lnet_net_lock(LNET_LOCK_EX);
- goto again;
- }
- }
-
- if (net == LNET_NIDNET(LNET_NID_ANY) &&
- ++idx < LNET_REMOTE_NETS_HASH_SIZE) {
- rn_list = &the_lnet.ln_remote_nets_hash[idx];
- goto again;
- }
- lnet_net_unlock(LNET_LOCK_EX);
-
- return rc;
-}
-
-void
-lnet_destroy_routes(void)
-{
- lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
-}
-
-int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg)
-{
- int i, rc = -ENOENT, j;
-
- if (!the_lnet.ln_rtrpools)
- return rc;
-
- for (i = 0; i < LNET_NRBPOOLS; i++) {
- struct lnet_rtrbufpool *rbp;
-
- lnet_net_lock(LNET_LOCK_EX);
- cfs_percpt_for_each(rbp, j, the_lnet.ln_rtrpools) {
- if (i++ != idx)
- continue;
-
- pool_cfg->pl_pools[i].pl_npages = rbp[i].rbp_npages;
- pool_cfg->pl_pools[i].pl_nbuffers = rbp[i].rbp_nbuffers;
- pool_cfg->pl_pools[i].pl_credits = rbp[i].rbp_credits;
- pool_cfg->pl_pools[i].pl_mincredits = rbp[i].rbp_mincredits;
- rc = 0;
- break;
- }
- lnet_net_unlock(LNET_LOCK_EX);
- }
-
- lnet_net_lock(LNET_LOCK_EX);
- pool_cfg->pl_routing = the_lnet.ln_routing;
- lnet_net_unlock(LNET_LOCK_EX);
-
- return rc;
-}
-
-int
-lnet_get_route(int idx, __u32 *net, __u32 *hops,
- lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
-{
- struct list_head *e1;
- struct list_head *e2;
- struct lnet_remotenet *rnet;
- struct lnet_route *route;
- int cpt;
- int i;
- struct list_head *rn_list;
-
- cpt = lnet_net_lock_current();
-
- for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
- rn_list = &the_lnet.ln_remote_nets_hash[i];
- list_for_each(e1, rn_list) {
- rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
-
- list_for_each(e2, &rnet->lrn_routes) {
- route = list_entry(e2, struct lnet_route,
- lr_list);
-
- if (!idx--) {
- *net = rnet->lrn_net;
- *hops = route->lr_hops;
- *priority = route->lr_priority;
- *gateway = route->lr_gateway->lp_nid;
- *alive = lnet_is_route_alive(route);
- lnet_net_unlock(cpt);
- return 0;
- }
- }
- }
- }
-
- lnet_net_unlock(cpt);
- return -ENOENT;
-}
-
-void
-lnet_swap_pinginfo(struct lnet_ping_info *info)
-{
- int i;
- struct lnet_ni_status *stat;
-
- __swab32s(&info->pi_magic);
- __swab32s(&info->pi_features);
- __swab32s(&info->pi_pid);
- __swab32s(&info->pi_nnis);
- for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
- stat = &info->pi_ni[i];
- __swab64s(&stat->ns_nid);
- __swab32s(&stat->ns_status);
- }
-}
-
-/**
- * parse router-checker pinginfo, record number of down NIs for remote
- * networks on that router.
- */
-static void
-lnet_parse_rc_info(struct lnet_rc_data *rcd)
-{
- struct lnet_ping_info *info = rcd->rcd_pinginfo;
- struct lnet_peer *gw = rcd->rcd_gateway;
- struct lnet_route *rte;
-
- if (!gw->lp_alive)
- return;
-
- if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
- lnet_swap_pinginfo(info);
-
- /* NB always racing with network! */
- if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
- CDEBUG(D_NET, "%s: Unexpected magic %08x\n",
- libcfs_nid2str(gw->lp_nid), info->pi_magic);
- gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
- return;
- }
-
- gw->lp_ping_feats = info->pi_features;
- if (!(gw->lp_ping_feats & LNET_PING_FEAT_MASK)) {
- CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
- libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
- return; /* nothing I can understand */
- }
-
- if (!(gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS))
- return; /* can't carry NI status info */
-
- list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
- int down = 0;
- int up = 0;
- int i;
-
- if (gw->lp_ping_feats & LNET_PING_FEAT_RTE_DISABLED) {
- rte->lr_downis = 1;
- continue;
- }
-
- for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
- struct lnet_ni_status *stat = &info->pi_ni[i];
- lnet_nid_t nid = stat->ns_nid;
-
- if (nid == LNET_NID_ANY) {
- CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
- libcfs_nid2str(gw->lp_nid));
- gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
- return;
- }
-
- if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
- continue;
-
- if (stat->ns_status == LNET_NI_STATUS_DOWN) {
- down++;
- continue;
- }
-
- if (stat->ns_status == LNET_NI_STATUS_UP) {
- if (LNET_NIDNET(nid) == rte->lr_net) {
- up = 1;
- break;
- }
- continue;
- }
-
- CDEBUG(D_NET, "%s: Unexpected status 0x%x\n",
- libcfs_nid2str(gw->lp_nid), stat->ns_status);
- gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
- return;
- }
-
- if (up) { /* ignore downed NIs if NI for dest network is up */
- rte->lr_downis = 0;
- continue;
- }
- /**
- * if @down is zero and this route is single-hop, it means
- * we can't find NI for target network
- */
- if (!down && rte->lr_hops == 1)
- down = 1;
-
- rte->lr_downis = down;
- }
-}
-
-static void
-lnet_router_checker_event(struct lnet_event *event)
-{
- struct lnet_rc_data *rcd = event->md.user_ptr;
- struct lnet_peer *lp;
-
- LASSERT(rcd);
-
- if (event->unlinked) {
- LNetInvalidateMDHandle(&rcd->rcd_mdh);
- return;
- }
-
- LASSERT(event->type == LNET_EVENT_SEND ||
- event->type == LNET_EVENT_REPLY);
-
- lp = rcd->rcd_gateway;
- LASSERT(lp);
-
- /*
- * NB: it's called with holding lnet_res_lock, we have a few
- * places need to hold both locks at the same time, please take
- * care of lock ordering
- */
- lnet_net_lock(lp->lp_cpt);
- if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) {
- /* ignore if no longer a router or rcd is replaced */
- goto out;
- }
-
- if (event->type == LNET_EVENT_SEND) {
- lp->lp_ping_notsent = 0;
- if (!event->status)
- goto out;
- }
-
- /* LNET_EVENT_REPLY */
- /*
- * A successful REPLY means the router is up. If _any_ comms
- * to the router fail I assume it's down (this will happen if
- * we ping alive routers to try to detect router death before
- * apps get burned).
- */
- lnet_notify_locked(lp, 1, !event->status, cfs_time_current());
-
- /*
- * The router checker will wake up very shortly and do the
- * actual notification.
- * XXX If 'lp' stops being a router before then, it will still
- * have the notification pending!!!
- */
- if (avoid_asym_router_failure && !event->status)
- lnet_parse_rc_info(rcd);
-
- out:
- lnet_net_unlock(lp->lp_cpt);
-}
-
-static void
-lnet_wait_known_routerstate(void)
-{
- struct lnet_peer *rtr;
- struct list_head *entry;
- int all_known;
-
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
-
- for (;;) {
- int cpt = lnet_net_lock_current();
-
- all_known = 1;
- list_for_each(entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, struct lnet_peer, lp_rtr_list);
-
- if (!rtr->lp_alive_count) {
- all_known = 0;
- break;
- }
- }
-
- lnet_net_unlock(cpt);
-
- if (all_known)
- return;
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
- }
-}
-
-void
-lnet_router_ni_update_locked(struct lnet_peer *gw, __u32 net)
-{
- struct lnet_route *rte;
-
- if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) {
- list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
- if (rte->lr_net == net) {
- rte->lr_downis = 0;
- break;
- }
- }
- }
-}
-
-static void
-lnet_update_ni_status_locked(void)
-{
- struct lnet_ni *ni;
- time64_t now;
- int timeout;
-
- LASSERT(the_lnet.ln_routing);
-
- timeout = router_ping_timeout +
- max(live_router_check_interval, dead_router_check_interval);
-
- now = ktime_get_real_seconds();
- list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
- if (ni->ni_lnd->lnd_type == LOLND)
- continue;
-
- if (now < ni->ni_last_alive + timeout)
- continue;
-
- lnet_ni_lock(ni);
- /* re-check with lock */
- if (now < ni->ni_last_alive + timeout) {
- lnet_ni_unlock(ni);
- continue;
- }
-
- LASSERT(ni->ni_status);
-
- if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
- CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
- libcfs_nid2str(ni->ni_nid), timeout);
- /*
- * NB: so far, this is the only place to set
- * NI status to "down"
- */
- ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
- }
- lnet_ni_unlock(ni);
- }
-}
-
-static void
-lnet_destroy_rc_data(struct lnet_rc_data *rcd)
-{
- LASSERT(list_empty(&rcd->rcd_list));
- /* detached from network */
- LASSERT(LNetMDHandleIsInvalid(rcd->rcd_mdh));
-
- if (rcd->rcd_gateway) {
- int cpt = rcd->rcd_gateway->lp_cpt;
-
- lnet_net_lock(cpt);
- lnet_peer_decref_locked(rcd->rcd_gateway);
- lnet_net_unlock(cpt);
- }
-
- kfree(rcd->rcd_pinginfo);
-
- kfree(rcd);
-}
-
-static struct lnet_rc_data *
-lnet_create_rc_data_locked(struct lnet_peer *gateway)
-{
- struct lnet_rc_data *rcd = NULL;
- struct lnet_ping_info *pi;
- struct lnet_md md;
- int rc;
- int i;
-
- lnet_net_unlock(gateway->lp_cpt);
-
- rcd = kzalloc(sizeof(*rcd), GFP_NOFS);
- if (!rcd)
- goto out;
-
- LNetInvalidateMDHandle(&rcd->rcd_mdh);
- INIT_LIST_HEAD(&rcd->rcd_list);
-
- pi = kzalloc(LNET_PINGINFO_SIZE, GFP_NOFS);
- if (!pi)
- goto out;
-
- for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
- pi->pi_ni[i].ns_nid = LNET_NID_ANY;
- pi->pi_ni[i].ns_status = LNET_NI_STATUS_INVALID;
- }
- rcd->rcd_pinginfo = pi;
-
- md.start = pi;
- md.user_ptr = rcd;
- md.length = LNET_PINGINFO_SIZE;
- md.threshold = LNET_MD_THRESH_INF;
- md.options = LNET_MD_TRUNCATE;
- md.eq_handle = the_lnet.ln_rc_eqh;
-
- LASSERT(!LNetEQHandleIsInvalid(the_lnet.ln_rc_eqh));
- rc = LNetMDBind(md, LNET_UNLINK, &rcd->rcd_mdh);
- if (rc < 0) {
- CERROR("Can't bind MD: %d\n", rc);
- goto out;
- }
- LASSERT(!rc);
-
- lnet_net_lock(gateway->lp_cpt);
- /* router table changed or someone has created rcd for this gateway */
- if (!lnet_isrouter(gateway) || gateway->lp_rcd) {
- lnet_net_unlock(gateway->lp_cpt);
- goto out;
- }
-
- lnet_peer_addref_locked(gateway);
- rcd->rcd_gateway = gateway;
- gateway->lp_rcd = rcd;
- gateway->lp_ping_notsent = 0;
-
- return rcd;
-
- out:
- if (rcd) {
- if (!LNetMDHandleIsInvalid(rcd->rcd_mdh)) {
- rc = LNetMDUnlink(rcd->rcd_mdh);
- LASSERT(!rc);
- }
- lnet_destroy_rc_data(rcd);
- }
-
- lnet_net_lock(gateway->lp_cpt);
- return gateway->lp_rcd;
-}
-
-static int
-lnet_router_check_interval(struct lnet_peer *rtr)
-{
- int secs;
-
- secs = rtr->lp_alive ? live_router_check_interval :
- dead_router_check_interval;
- if (secs < 0)
- secs = 0;
-
- return secs;
-}
-
-static void
-lnet_ping_router_locked(struct lnet_peer *rtr)
-{
- struct lnet_rc_data *rcd = NULL;
- unsigned long now = cfs_time_current();
- int secs;
-
- lnet_peer_addref_locked(rtr);
-
- if (rtr->lp_ping_deadline && /* ping timed out? */
- cfs_time_after(now, rtr->lp_ping_deadline))
- lnet_notify_locked(rtr, 1, 0, now);
-
- /* Run any outstanding notifications */
- lnet_ni_notify_locked(rtr->lp_ni, rtr);
-
- if (!lnet_isrouter(rtr) ||
- the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
- /* router table changed or router checker is shutting down */
- lnet_peer_decref_locked(rtr);
- return;
- }
-
- rcd = rtr->lp_rcd ?
- rtr->lp_rcd : lnet_create_rc_data_locked(rtr);
-
- if (!rcd)
- return;
-
- secs = lnet_router_check_interval(rtr);
-
- CDEBUG(D_NET,
- "rtr %s %d: deadline %lu ping_notsent %d alive %d alive_count %d lp_ping_timestamp %lu\n",
- libcfs_nid2str(rtr->lp_nid), secs,
- rtr->lp_ping_deadline, rtr->lp_ping_notsent,
- rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
-
- if (secs && !rtr->lp_ping_notsent &&
- cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
- secs * HZ))) {
- int rc;
- struct lnet_process_id id;
- struct lnet_handle_md mdh;
-
- id.nid = rtr->lp_nid;
- id.pid = LNET_PID_LUSTRE;
- CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
-
- rtr->lp_ping_notsent = 1;
- rtr->lp_ping_timestamp = now;
-
- mdh = rcd->rcd_mdh;
-
- if (!rtr->lp_ping_deadline) {
- rtr->lp_ping_deadline =
- cfs_time_shift(router_ping_timeout);
- }
-
- lnet_net_unlock(rtr->lp_cpt);
-
- rc = LNetGet(LNET_NID_ANY, mdh, id, LNET_RESERVED_PORTAL,
- LNET_PROTO_PING_MATCHBITS, 0);
-
- lnet_net_lock(rtr->lp_cpt);
- if (rc)
- rtr->lp_ping_notsent = 0; /* no event pending */
- }
-
- lnet_peer_decref_locked(rtr);
-}
-
-int
-lnet_router_checker_start(void)
-{
- struct task_struct *task;
- int rc;
- int eqsz = 0;
-
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
-
- if (check_routers_before_use &&
- dead_router_check_interval <= 0) {
- LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be set if 'check_routers_before_use' is set\n");
- return -EINVAL;
- }
-
- init_completion(&the_lnet.ln_rc_signal);
-
- rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
- if (rc) {
- CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
- return -ENOMEM;
- }
-
- the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
- task = kthread_run(lnet_router_checker, NULL, "router_checker");
- if (IS_ERR(task)) {
- rc = PTR_ERR(task);
- CERROR("Can't start router checker thread: %d\n", rc);
- /* block until event callback signals exit */
- wait_for_completion(&the_lnet.ln_rc_signal);
- rc = LNetEQFree(the_lnet.ln_rc_eqh);
- LASSERT(!rc);
- the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
- return -ENOMEM;
- }
-
- if (check_routers_before_use) {
- /*
- * Note that a helpful side-effect of pinging all known routers
- * at startup is that it makes them drop stale connections they
- * may have to a previous instance of me.
- */
- lnet_wait_known_routerstate();
- }
-
- return 0;
-}
-
-void
-lnet_router_checker_stop(void)
-{
- int rc;
-
- if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
- return;
-
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
- the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
- /* wakeup the RC thread if it's sleeping */
- wake_up(&the_lnet.ln_rc_waitq);
-
- /* block until event callback signals exit */
- wait_for_completion(&the_lnet.ln_rc_signal);
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
-
- rc = LNetEQFree(the_lnet.ln_rc_eqh);
- LASSERT(!rc);
-}
-
-static void
-lnet_prune_rc_data(int wait_unlink)
-{
- struct lnet_rc_data *rcd;
- struct lnet_rc_data *tmp;
- struct lnet_peer *lp;
- struct list_head head;
- int i = 2;
-
- if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
- list_empty(&the_lnet.ln_rcd_deathrow) &&
- list_empty(&the_lnet.ln_rcd_zombie)))
- return;
-
- INIT_LIST_HEAD(&head);
-
- lnet_net_lock(LNET_LOCK_EX);
-
- if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
- /* router checker is stopping, prune all */
- list_for_each_entry(lp, &the_lnet.ln_routers,
- lp_rtr_list) {
- if (!lp->lp_rcd)
- continue;
-
- LASSERT(list_empty(&lp->lp_rcd->rcd_list));
- list_add(&lp->lp_rcd->rcd_list,
- &the_lnet.ln_rcd_deathrow);
- lp->lp_rcd = NULL;
- }
- }
-
- /* unlink all RCDs on deathrow list */
- list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
-
- if (!list_empty(&head)) {
- lnet_net_unlock(LNET_LOCK_EX);
-
- list_for_each_entry(rcd, &head, rcd_list)
- LNetMDUnlink(rcd->rcd_mdh);
-
- lnet_net_lock(LNET_LOCK_EX);
- }
-
- list_splice_init(&head, &the_lnet.ln_rcd_zombie);
-
- /* release all zombie RCDs */
- while (!list_empty(&the_lnet.ln_rcd_zombie)) {
- list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
- rcd_list) {
- if (LNetMDHandleIsInvalid(rcd->rcd_mdh))
- list_move(&rcd->rcd_list, &head);
- }
-
- wait_unlink = wait_unlink &&
- !list_empty(&the_lnet.ln_rcd_zombie);
-
- lnet_net_unlock(LNET_LOCK_EX);
-
- while (!list_empty(&head)) {
- rcd = list_entry(head.next,
- struct lnet_rc_data, rcd_list);
- list_del_init(&rcd->rcd_list);
- lnet_destroy_rc_data(rcd);
- }
-
- if (!wait_unlink)
- return;
-
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
- "Waiting for rc buffers to unlink\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ / 4);
-
- lnet_net_lock(LNET_LOCK_EX);
- }
-
- lnet_net_unlock(LNET_LOCK_EX);
-}
-
-/*
- * This function is called to check if the RC should block indefinitely.
- * It's called from lnet_router_checker() as well as being passed to
- * wait_event_interruptible() to avoid the lost wake_up problem.
- *
- * When it's called from wait_event_interruptible() it is necessary to
- * also not sleep if the rc state is not running to avoid a deadlock
- * when the system is shutting down
- */
-static inline bool
-lnet_router_checker_active(void)
-{
- if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING)
- return true;
-
- /*
- * Router Checker thread needs to run when routing is enabled in
- * order to call lnet_update_ni_status_locked()
- */
- if (the_lnet.ln_routing)
- return true;
-
- return !list_empty(&the_lnet.ln_routers) &&
- (live_router_check_interval > 0 ||
- dead_router_check_interval > 0);
-}
-
-static int
-lnet_router_checker(void *arg)
-{
- struct lnet_peer *rtr;
- struct list_head *entry;
-
- while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
- __u64 version;
- int cpt;
- int cpt2;
-
- cpt = lnet_net_lock_current();
-rescan:
- version = the_lnet.ln_routers_version;
-
- list_for_each(entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, struct lnet_peer, lp_rtr_list);
-
- cpt2 = lnet_cpt_of_nid_locked(rtr->lp_nid);
- if (cpt != cpt2) {
- lnet_net_unlock(cpt);
- cpt = cpt2;
- lnet_net_lock(cpt);
- /* the routers list has changed */
- if (version != the_lnet.ln_routers_version)
- goto rescan;
- }
-
- lnet_ping_router_locked(rtr);
-
- /* NB dropped lock */
- if (version != the_lnet.ln_routers_version) {
- /* the routers list has changed */
- goto rescan;
- }
- }
-
- if (the_lnet.ln_routing)
- lnet_update_ni_status_locked();
-
- lnet_net_unlock(cpt);
-
- lnet_prune_rc_data(0); /* don't wait for UNLINK */
-
- /*
- * Call schedule_timeout() here always adds 1 to load average
- * because kernel counts # active tasks as nr_running
- * + nr_uninterruptible.
- */
- /*
- * if there are any routes then wakeup every second. If
- * there are no routes then sleep indefinitely until woken
- * up by a user adding a route
- */
- if (!lnet_router_checker_active())
- wait_event_interruptible(the_lnet.ln_rc_waitq,
- lnet_router_checker_active());
- else
- wait_event_interruptible_timeout(the_lnet.ln_rc_waitq,
- false,
- HZ);
- }
-
- lnet_prune_rc_data(1); /* wait for UNLINK */
-
- the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
- complete(&the_lnet.ln_rc_signal);
- /* The unlink event callback will signal final completion */
- return 0;
-}
-
-void
-lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
-{
- while (--npages >= 0)
- __free_page(rb->rb_kiov[npages].bv_page);
-
- kfree(rb);
-}
-
-static struct lnet_rtrbuf *
-lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
-{
- int npages = rbp->rbp_npages;
- int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
- struct page *page;
- struct lnet_rtrbuf *rb;
- int i;
-
- rb = kzalloc_cpt(sz, GFP_NOFS, cpt);
- if (!rb)
- return NULL;
-
- rb->rb_pool = rbp;
-
- for (i = 0; i < npages; i++) {
- page = alloc_pages_node(
- cfs_cpt_spread_node(lnet_cpt_table(), cpt),
- GFP_KERNEL | __GFP_ZERO, 0);
- if (!page) {
- while (--i >= 0)
- __free_page(rb->rb_kiov[i].bv_page);
-
- kfree(rb);
- return NULL;
- }
-
- rb->rb_kiov[i].bv_len = PAGE_SIZE;
- rb->rb_kiov[i].bv_offset = 0;
- rb->rb_kiov[i].bv_page = page;
- }
-
- return rb;
-}
-
-static void
-lnet_rtrpool_free_bufs(struct lnet_rtrbufpool *rbp, int cpt)
-{
- int npages = rbp->rbp_npages;
- struct list_head tmp;
- struct lnet_rtrbuf *rb;
- struct lnet_rtrbuf *temp;
-
- if (!rbp->rbp_nbuffers) /* not initialized or already freed */
- return;
-
- INIT_LIST_HEAD(&tmp);
-
- lnet_net_lock(cpt);
- lnet_drop_routed_msgs_locked(&rbp->rbp_msgs, cpt);
- list_splice_init(&rbp->rbp_bufs, &tmp);
- rbp->rbp_req_nbuffers = 0;
- rbp->rbp_nbuffers = 0;
- rbp->rbp_credits = 0;
- rbp->rbp_mincredits = 0;
- lnet_net_unlock(cpt);
-
- /* Free buffers on the free list. */
- list_for_each_entry_safe(rb, temp, &tmp, rb_list) {
- list_del(&rb->rb_list);
- lnet_destroy_rtrbuf(rb, npages);
- }
-}
-
-static int
-lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt)
-{
- struct list_head rb_list;
- struct lnet_rtrbuf *rb;
- int num_rb;
- int num_buffers = 0;
- int old_req_nbufs;
- int npages = rbp->rbp_npages;
-
- lnet_net_lock(cpt);
- /*
- * If we are called for less buffers than already in the pool, we
- * just lower the req_nbuffers number and excess buffers will be
- * thrown away as they are returned to the free list. Credits
- * then get adjusted as well.
- * If we already have enough buffers allocated to serve the
- * increase requested, then we can treat that the same way as we
- * do the decrease.
- */
- num_rb = nbufs - rbp->rbp_nbuffers;
- if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
- rbp->rbp_req_nbuffers = nbufs;
- lnet_net_unlock(cpt);
- return 0;
- }
- /*
- * store the older value of rbp_req_nbuffers and then set it to
- * the new request to prevent lnet_return_rx_credits_locked() from
- * freeing buffers that we need to keep around
- */
- old_req_nbufs = rbp->rbp_req_nbuffers;
- rbp->rbp_req_nbuffers = nbufs;
- lnet_net_unlock(cpt);
-
- INIT_LIST_HEAD(&rb_list);
-
- /*
- * allocate the buffers on a local list first. If all buffers are
- * allocated successfully then join this list to the rbp buffer
- * list. If not then free all allocated buffers.
- */
- while (num_rb-- > 0) {
- rb = lnet_new_rtrbuf(rbp, cpt);
- if (!rb) {
- CERROR("Failed to allocate %d route bufs of %d pages\n",
- nbufs, npages);
-
- lnet_net_lock(cpt);
- rbp->rbp_req_nbuffers = old_req_nbufs;
- lnet_net_unlock(cpt);
-
- goto failed;
- }
-
- list_add(&rb->rb_list, &rb_list);
- num_buffers++;
- }
-
- lnet_net_lock(cpt);
-
- list_splice_tail(&rb_list, &rbp->rbp_bufs);
- rbp->rbp_nbuffers += num_buffers;
- rbp->rbp_credits += num_buffers;
- rbp->rbp_mincredits = rbp->rbp_credits;
- /*
- * We need to schedule blocked msg using the newly
- * added buffers.
- */
- while (!list_empty(&rbp->rbp_bufs) &&
- !list_empty(&rbp->rbp_msgs))
- lnet_schedule_blocked_locked(rbp);
-
- lnet_net_unlock(cpt);
-
- return 0;
-
-failed:
- while (!list_empty(&rb_list)) {
- rb = list_entry(rb_list.next, struct lnet_rtrbuf, rb_list);
- list_del(&rb->rb_list);
- lnet_destroy_rtrbuf(rb, npages);
- }
-
- return -ENOMEM;
-}
-
-static void
-lnet_rtrpool_init(struct lnet_rtrbufpool *rbp, int npages)
-{
- INIT_LIST_HEAD(&rbp->rbp_msgs);
- INIT_LIST_HEAD(&rbp->rbp_bufs);
-
- rbp->rbp_npages = npages;
- rbp->rbp_credits = 0;
- rbp->rbp_mincredits = 0;
-}
-
-void
-lnet_rtrpools_free(int keep_pools)
-{
- struct lnet_rtrbufpool *rtrp;
- int i;
-
- if (!the_lnet.ln_rtrpools) /* uninitialized or freed */
- return;
-
- cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
- lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
- lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
- lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
- }
-
- if (!keep_pools) {
- cfs_percpt_free(the_lnet.ln_rtrpools);
- the_lnet.ln_rtrpools = NULL;
- }
-}
-
-static int
-lnet_nrb_tiny_calculate(void)
-{
- int nrbs = LNET_NRB_TINY;
-
- if (tiny_router_buffers < 0) {
- LCONSOLE_ERROR_MSG(0x10c,
- "tiny_router_buffers=%d invalid when routing enabled\n",
- tiny_router_buffers);
- return -EINVAL;
- }
-
- if (tiny_router_buffers > 0)
- nrbs = tiny_router_buffers;
-
- nrbs /= LNET_CPT_NUMBER;
- return max(nrbs, LNET_NRB_TINY_MIN);
-}
-
-static int
-lnet_nrb_small_calculate(void)
-{
- int nrbs = LNET_NRB_SMALL;
-
- if (small_router_buffers < 0) {
- LCONSOLE_ERROR_MSG(0x10c,
- "small_router_buffers=%d invalid when routing enabled\n",
- small_router_buffers);
- return -EINVAL;
- }
-
- if (small_router_buffers > 0)
- nrbs = small_router_buffers;
-
- nrbs /= LNET_CPT_NUMBER;
- return max(nrbs, LNET_NRB_SMALL_MIN);
-}
-
-static int
-lnet_nrb_large_calculate(void)
-{
- int nrbs = LNET_NRB_LARGE;
-
- if (large_router_buffers < 0) {
- LCONSOLE_ERROR_MSG(0x10c,
- "large_router_buffers=%d invalid when routing enabled\n",
- large_router_buffers);
- return -EINVAL;
- }
-
- if (large_router_buffers > 0)
- nrbs = large_router_buffers;
-
- nrbs /= LNET_CPT_NUMBER;
- return max(nrbs, LNET_NRB_LARGE_MIN);
-}
-
-int
-lnet_rtrpools_alloc(int im_a_router)
-{
- struct lnet_rtrbufpool *rtrp;
- int nrb_tiny;
- int nrb_small;
- int nrb_large;
- int rc;
- int i;
-
- if (!strcmp(forwarding, "")) {
- /* not set either way */
- if (!im_a_router)
- return 0;
- } else if (!strcmp(forwarding, "disabled")) {
- /* explicitly disabled */
- return 0;
- } else if (!strcmp(forwarding, "enabled")) {
- /* explicitly enabled */
- } else {
- LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either 'enabled' or 'disabled'\n");
- return -EINVAL;
- }
-
- nrb_tiny = lnet_nrb_tiny_calculate();
- if (nrb_tiny < 0)
- return -EINVAL;
-
- nrb_small = lnet_nrb_small_calculate();
- if (nrb_small < 0)
- return -EINVAL;
-
- nrb_large = lnet_nrb_large_calculate();
- if (nrb_large < 0)
- return -EINVAL;
-
- the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
- LNET_NRBPOOLS *
- sizeof(struct lnet_rtrbufpool));
- if (!the_lnet.ln_rtrpools) {
- LCONSOLE_ERROR_MSG(0x10c,
- "Failed to initialize router buffe pool\n");
- return -ENOMEM;
- }
-
- cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
- lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
- rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
- nrb_tiny, i);
- if (rc)
- goto failed;
-
- lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
- LNET_NRB_SMALL_PAGES);
- rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
- nrb_small, i);
- if (rc)
- goto failed;
-
- lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
- LNET_NRB_LARGE_PAGES);
- rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
- nrb_large, i);
- if (rc)
- goto failed;
- }
-
- lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_routing = 1;
- lnet_net_unlock(LNET_LOCK_EX);
-
- return 0;
-
- failed:
- lnet_rtrpools_free(0);
- return rc;
-}
-
-static int
-lnet_rtrpools_adjust_helper(int tiny, int small, int large)
-{
- int nrb = 0;
- int rc = 0;
- int i;
- struct lnet_rtrbufpool *rtrp;
-
- /*
- * If the provided values for each buffer pool are different than the
- * configured values, we need to take action.
- */
- if (tiny >= 0) {
- tiny_router_buffers = tiny;
- nrb = lnet_nrb_tiny_calculate();
- cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
- rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
- nrb, i);
- if (rc)
- return rc;
- }
- }
- if (small >= 0) {
- small_router_buffers = small;
- nrb = lnet_nrb_small_calculate();
- cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
- rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
- nrb, i);
- if (rc)
- return rc;
- }
- }
- if (large >= 0) {
- large_router_buffers = large;
- nrb = lnet_nrb_large_calculate();
- cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
- rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
- nrb, i);
- if (rc)
- return rc;
- }
- }
-
- return 0;
-}
-
-int
-lnet_rtrpools_adjust(int tiny, int small, int large)
-{
- /*
- * this function doesn't revert the changes if adding new buffers
- * failed. It's up to the user space caller to revert the
- * changes.
- */
- if (!the_lnet.ln_routing)
- return 0;
-
- return lnet_rtrpools_adjust_helper(tiny, small, large);
-}
-
-int
-lnet_rtrpools_enable(void)
-{
- int rc = 0;
-
- if (the_lnet.ln_routing)
- return 0;
-
- if (!the_lnet.ln_rtrpools)
- /*
- * If routing is turned off, and we have never
- * initialized the pools before, just call the
- * standard buffer pool allocation routine as
- * if we are just configuring this for the first
- * time.
- */
- rc = lnet_rtrpools_alloc(1);
- else
- rc = lnet_rtrpools_adjust_helper(0, 0, 0);
- if (rc)
- return rc;
-
- lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_routing = 1;
-
- the_lnet.ln_ping_info->pi_features &= ~LNET_PING_FEAT_RTE_DISABLED;
- lnet_net_unlock(LNET_LOCK_EX);
-
- return rc;
-}
-
-void
-lnet_rtrpools_disable(void)
-{
- if (!the_lnet.ln_routing)
- return;
-
- lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_routing = 0;
- the_lnet.ln_ping_info->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
-
- tiny_router_buffers = 0;
- small_router_buffers = 0;
- large_router_buffers = 0;
- lnet_net_unlock(LNET_LOCK_EX);
- lnet_rtrpools_free(1);
-}
-
-int
-lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, unsigned long when)
-{
- struct lnet_peer *lp = NULL;
- unsigned long now = cfs_time_current();
- int cpt = lnet_cpt_of_nid(nid);
-
- LASSERT(!in_interrupt());
-
- CDEBUG(D_NET, "%s notifying %s: %s\n",
- !ni ? "userspace" : libcfs_nid2str(ni->ni_nid),
- libcfs_nid2str(nid),
- alive ? "up" : "down");
-
- if (ni &&
- LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
- CWARN("Ignoring notification of %s %s by %s (different net)\n",
- libcfs_nid2str(nid), alive ? "birth" : "death",
- libcfs_nid2str(ni->ni_nid));
- return -EINVAL;
- }
-
- /* can't do predictions... */
- if (cfs_time_after(when, now)) {
- CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n",
- !ni ? "userspace" : libcfs_nid2str(ni->ni_nid),
- libcfs_nid2str(nid), alive ? "up" : "down",
- cfs_duration_sec(cfs_time_sub(when, now)));
- return -EINVAL;
- }
-
- if (ni && !alive && /* LND telling me she's down */
- !auto_down) { /* auto-down disabled */
- CDEBUG(D_NET, "Auto-down disabled\n");
- return 0;
- }
-
- lnet_net_lock(cpt);
-
- if (the_lnet.ln_shutdown) {
- lnet_net_unlock(cpt);
- return -ESHUTDOWN;
- }
-
- lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid);
- if (!lp) {
- /* nid not found */
- lnet_net_unlock(cpt);
- CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
- return 0;
- }
-
- /*
- * We can't fully trust LND on reporting exact peer last_alive
- * if he notifies us about dead peer. For example ksocklnd can
- * call us with when == _time_when_the_node_was_booted_ if
- * no connections were successfully established
- */
- if (ni && !alive && when < lp->lp_last_alive)
- when = lp->lp_last_alive;
-
- lnet_notify_locked(lp, !ni, alive, when);
-
- if (ni)
- lnet_ni_notify_locked(ni, lp);
-
- lnet_peer_decref_locked(lp);
-
- lnet_net_unlock(cpt);
- return 0;
-}
-EXPORT_SYMBOL(lnet_notify);
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
deleted file mode 100644
index 1a71ffebc889..000000000000
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ /dev/null
@@ -1,909 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- *
- * This file is part of Portals
- * http://sourceforge.net/projects/sandiaportals/
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/lnet/lib-lnet.h>
-
-/*
- * This is really lnet_proc.c. You might need to update sanity test 215
- * if any file format is changed.
- */
-
-#define LNET_LOFFT_BITS (sizeof(loff_t) * 8)
-/*
- * NB: max allowed LNET_CPT_BITS is 8 on 64-bit system and 2 on 32-bit system
- */
-#define LNET_PROC_CPT_BITS (LNET_CPT_BITS + 1)
-/* change version, 16 bits or 8 bits */
-#define LNET_PROC_VER_BITS max_t(size_t, min_t(size_t, LNET_LOFFT_BITS, 64) / 4, 8)
-
-#define LNET_PROC_HASH_BITS LNET_PEER_HASH_BITS
-/*
- * bits for peer hash offset
- * NB: we don't use the highest bit of *ppos because it's signed
- */
-#define LNET_PROC_HOFF_BITS (LNET_LOFFT_BITS - \
- LNET_PROC_CPT_BITS - \
- LNET_PROC_VER_BITS - \
- LNET_PROC_HASH_BITS - 1)
-/* bits for hash index + position */
-#define LNET_PROC_HPOS_BITS (LNET_PROC_HASH_BITS + LNET_PROC_HOFF_BITS)
-/* bits for peer hash table + hash version */
-#define LNET_PROC_VPOS_BITS (LNET_PROC_HPOS_BITS + LNET_PROC_VER_BITS)
-
-#define LNET_PROC_CPT_MASK ((1ULL << LNET_PROC_CPT_BITS) - 1)
-#define LNET_PROC_VER_MASK ((1ULL << LNET_PROC_VER_BITS) - 1)
-#define LNET_PROC_HASH_MASK ((1ULL << LNET_PROC_HASH_BITS) - 1)
-#define LNET_PROC_HOFF_MASK ((1ULL << LNET_PROC_HOFF_BITS) - 1)
-
-#define LNET_PROC_CPT_GET(pos) \
- (int)(((pos) >> LNET_PROC_VPOS_BITS) & LNET_PROC_CPT_MASK)
-
-#define LNET_PROC_VER_GET(pos) \
- (int)(((pos) >> LNET_PROC_HPOS_BITS) & LNET_PROC_VER_MASK)
-
-#define LNET_PROC_HASH_GET(pos) \
- (int)(((pos) >> LNET_PROC_HOFF_BITS) & LNET_PROC_HASH_MASK)
-
-#define LNET_PROC_HOFF_GET(pos) \
- (int)((pos) & LNET_PROC_HOFF_MASK)
-
-#define LNET_PROC_POS_MAKE(cpt, ver, hash, off) \
- (((((loff_t)(cpt)) & LNET_PROC_CPT_MASK) << LNET_PROC_VPOS_BITS) | \
- ((((loff_t)(ver)) & LNET_PROC_VER_MASK) << LNET_PROC_HPOS_BITS) | \
- ((((loff_t)(hash)) & LNET_PROC_HASH_MASK) << LNET_PROC_HOFF_BITS) | \
- ((off) & LNET_PROC_HOFF_MASK))
-
-#define LNET_PROC_VERSION(v) ((unsigned int)((v) & LNET_PROC_VER_MASK))
-
-static int __proc_lnet_stats(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- int rc;
- struct lnet_counters *ctrs;
- int len;
- char *tmpstr;
- const int tmpsiz = 256; /* 7 %u and 4 %llu */
-
- if (write) {
- lnet_counters_reset();
- return 0;
- }
-
- /* read */
-
- ctrs = kzalloc(sizeof(*ctrs), GFP_NOFS);
- if (!ctrs)
- return -ENOMEM;
-
- tmpstr = kmalloc(tmpsiz, GFP_KERNEL);
- if (!tmpstr) {
- kfree(ctrs);
- return -ENOMEM;
- }
-
- lnet_counters_get(ctrs);
-
- len = snprintf(tmpstr, tmpsiz,
- "%u %u %u %u %u %u %u %llu %llu %llu %llu",
- ctrs->msgs_alloc, ctrs->msgs_max,
- ctrs->errors,
- ctrs->send_count, ctrs->recv_count,
- ctrs->route_count, ctrs->drop_count,
- ctrs->send_length, ctrs->recv_length,
- ctrs->route_length, ctrs->drop_length);
-
- if (pos >= min_t(int, len, strlen(tmpstr)))
- rc = 0;
- else
- rc = cfs_trace_copyout_string(buffer, nob,
- tmpstr + pos, "\n");
-
- kfree(tmpstr);
- kfree(ctrs);
- return rc;
-}
-
-static int proc_lnet_stats(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_lnet_stats);
-}
-
-static int proc_lnet_routes(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- const int tmpsiz = 256;
- char *tmpstr;
- char *s;
- int rc = 0;
- int len;
- int ver;
- int off;
-
- BUILD_BUG_ON(sizeof(loff_t) < 4);
-
- off = LNET_PROC_HOFF_GET(*ppos);
- ver = LNET_PROC_VER_GET(*ppos);
-
- LASSERT(!write);
-
- if (!*lenp)
- return 0;
-
- tmpstr = kmalloc(tmpsiz, GFP_KERNEL);
- if (!tmpstr)
- return -ENOMEM;
-
- s = tmpstr; /* points to current position in tmpstr[] */
-
- if (!*ppos) {
- s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n",
- the_lnet.ln_routing ? "enabled" : "disabled");
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %8s %7s %s\n",
- "net", "hops", "priority", "state", "router");
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- lnet_net_lock(0);
- ver = (unsigned int)the_lnet.ln_remote_nets_version;
- lnet_net_unlock(0);
- *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
- } else {
- struct list_head *n;
- struct list_head *r;
- struct lnet_route *route = NULL;
- struct lnet_remotenet *rnet = NULL;
- int skip = off - 1;
- struct list_head *rn_list;
- int i;
-
- lnet_net_lock(0);
-
- if (ver != LNET_PROC_VERSION(the_lnet.ln_remote_nets_version)) {
- lnet_net_unlock(0);
- kfree(tmpstr);
- return -ESTALE;
- }
-
- for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && !route; i++) {
- rn_list = &the_lnet.ln_remote_nets_hash[i];
-
- n = rn_list->next;
-
- while (n != rn_list && !route) {
- rnet = list_entry(n, struct lnet_remotenet,
- lrn_list);
-
- r = rnet->lrn_routes.next;
-
- while (r != &rnet->lrn_routes) {
- struct lnet_route *re;
-
- re = list_entry(r, struct lnet_route,
- lr_list);
- if (!skip) {
- route = re;
- break;
- }
-
- skip--;
- r = r->next;
- }
-
- n = n->next;
- }
- }
-
- if (route) {
- __u32 net = rnet->lrn_net;
- __u32 hops = route->lr_hops;
- unsigned int priority = route->lr_priority;
- lnet_nid_t nid = route->lr_gateway->lp_nid;
- int alive = lnet_is_route_alive(route);
-
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-8s %4u %8u %7s %s\n",
- libcfs_net2str(net), hops,
- priority,
- alive ? "up" : "down",
- libcfs_nid2str(nid));
- LASSERT(tmpstr + tmpsiz - s > 0);
- }
-
- lnet_net_unlock(0);
- }
-
- len = s - tmpstr; /* how many bytes was written */
-
- if (len > *lenp) { /* linux-supplied buffer is too small */
- rc = -EINVAL;
- } else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len)) {
- rc = -EFAULT;
- } else {
- off += 1;
- *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
- }
- }
-
- kfree(tmpstr);
-
- if (!rc)
- *lenp = len;
-
- return rc;
-}
-
-static int proc_lnet_routers(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int rc = 0;
- char *tmpstr;
- char *s;
- const int tmpsiz = 256;
- int len;
- int ver;
- int off;
-
- off = LNET_PROC_HOFF_GET(*ppos);
- ver = LNET_PROC_VER_GET(*ppos);
-
- LASSERT(!write);
-
- if (!*lenp)
- return 0;
-
- tmpstr = kmalloc(tmpsiz, GFP_KERNEL);
- if (!tmpstr)
- return -ENOMEM;
-
- s = tmpstr; /* points to current position in tmpstr[] */
-
- if (!*ppos) {
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-4s %7s %9s %6s %12s %9s %8s %7s %s\n",
- "ref", "rtr_ref", "alive_cnt", "state",
- "last_ping", "ping_sent", "deadline",
- "down_ni", "router");
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- lnet_net_lock(0);
- ver = (unsigned int)the_lnet.ln_routers_version;
- lnet_net_unlock(0);
- *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
- } else {
- struct list_head *r;
- struct lnet_peer *peer = NULL;
- int skip = off - 1;
-
- lnet_net_lock(0);
-
- if (ver != LNET_PROC_VERSION(the_lnet.ln_routers_version)) {
- lnet_net_unlock(0);
-
- kfree(tmpstr);
- return -ESTALE;
- }
-
- r = the_lnet.ln_routers.next;
-
- while (r != &the_lnet.ln_routers) {
- struct lnet_peer *lp;
-
- lp = list_entry(r, struct lnet_peer, lp_rtr_list);
- if (!skip) {
- peer = lp;
- break;
- }
-
- skip--;
- r = r->next;
- }
-
- if (peer) {
- lnet_nid_t nid = peer->lp_nid;
- unsigned long now = cfs_time_current();
- unsigned long deadline = peer->lp_ping_deadline;
- int nrefs = peer->lp_refcount;
- int nrtrrefs = peer->lp_rtr_refcount;
- int alive_cnt = peer->lp_alive_count;
- int alive = peer->lp_alive;
- int pingsent = !peer->lp_ping_notsent;
- int last_ping = cfs_duration_sec(cfs_time_sub(now,
- peer->lp_ping_timestamp));
- int down_ni = 0;
- struct lnet_route *rtr;
-
- if ((peer->lp_ping_feats &
- LNET_PING_FEAT_NI_STATUS)) {
- list_for_each_entry(rtr, &peer->lp_routes,
- lr_gwlist) {
- /*
- * downis on any route should be the
- * number of downis on the gateway
- */
- if (rtr->lr_downis) {
- down_ni = rtr->lr_downis;
- break;
- }
- }
- }
-
- if (!deadline)
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-4d %7d %9d %6s %12d %9d %8s %7d %s\n",
- nrefs, nrtrrefs, alive_cnt,
- alive ? "up" : "down", last_ping,
- pingsent, "NA", down_ni,
- libcfs_nid2str(nid));
- else
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-4d %7d %9d %6s %12d %9d %8lu %7d %s\n",
- nrefs, nrtrrefs, alive_cnt,
- alive ? "up" : "down", last_ping,
- pingsent,
- cfs_duration_sec(cfs_time_sub(deadline, now)),
- down_ni, libcfs_nid2str(nid));
- LASSERT(tmpstr + tmpsiz - s > 0);
- }
-
- lnet_net_unlock(0);
- }
-
- len = s - tmpstr; /* how many bytes was written */
-
- if (len > *lenp) { /* linux-supplied buffer is too small */
- rc = -EINVAL;
- } else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len)) {
- rc = -EFAULT;
- } else {
- off += 1;
- *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
- }
- }
-
- kfree(tmpstr);
-
- if (!rc)
- *lenp = len;
-
- return rc;
-}
-
-static int proc_lnet_peers(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- const int tmpsiz = 256;
- struct lnet_peer_table *ptable;
- char *tmpstr;
- char *s;
- int cpt = LNET_PROC_CPT_GET(*ppos);
- int ver = LNET_PROC_VER_GET(*ppos);
- int hash = LNET_PROC_HASH_GET(*ppos);
- int hoff = LNET_PROC_HOFF_GET(*ppos);
- int rc = 0;
- int len;
-
- BUILD_BUG_ON(LNET_PROC_HASH_BITS < LNET_PEER_HASH_BITS);
- LASSERT(!write);
-
- if (!*lenp)
- return 0;
-
- if (cpt >= LNET_CPT_NUMBER) {
- *lenp = 0;
- return 0;
- }
-
- tmpstr = kmalloc(tmpsiz, GFP_KERNEL);
- if (!tmpstr)
- return -ENOMEM;
-
- s = tmpstr; /* points to current position in tmpstr[] */
-
- if (!*ppos) {
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n",
- "nid", "refs", "state", "last", "max",
- "rtr", "min", "tx", "min", "queue");
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- hoff++;
- } else {
- struct lnet_peer *peer;
- struct list_head *p;
- int skip;
- again:
- p = NULL;
- peer = NULL;
- skip = hoff - 1;
-
- lnet_net_lock(cpt);
- ptable = the_lnet.ln_peer_tables[cpt];
- if (hoff == 1)
- ver = LNET_PROC_VERSION(ptable->pt_version);
-
- if (ver != LNET_PROC_VERSION(ptable->pt_version)) {
- lnet_net_unlock(cpt);
- kfree(tmpstr);
- return -ESTALE;
- }
-
- while (hash < LNET_PEER_HASH_SIZE) {
- if (!p)
- p = ptable->pt_hash[hash].next;
-
- while (p != &ptable->pt_hash[hash]) {
- struct lnet_peer *lp;
-
- lp = list_entry(p, struct lnet_peer,
- lp_hashlist);
- if (!skip) {
- peer = lp;
-
- /*
- * minor optimization: start from idx+1
- * on next iteration if we've just
- * drained lp_hashlist
- */
- if (lp->lp_hashlist.next ==
- &ptable->pt_hash[hash]) {
- hoff = 1;
- hash++;
- } else {
- hoff++;
- }
-
- break;
- }
-
- skip--;
- p = lp->lp_hashlist.next;
- }
-
- if (peer)
- break;
-
- p = NULL;
- hoff = 1;
- hash++;
- }
-
- if (peer) {
- lnet_nid_t nid = peer->lp_nid;
- int nrefs = peer->lp_refcount;
- int lastalive = -1;
- char *aliveness = "NA";
- int maxcr = peer->lp_ni->ni_peertxcredits;
- int txcr = peer->lp_txcredits;
- int mintxcr = peer->lp_mintxcredits;
- int rtrcr = peer->lp_rtrcredits;
- int minrtrcr = peer->lp_minrtrcredits;
- int txqnob = peer->lp_txqnob;
-
- if (lnet_isrouter(peer) ||
- lnet_peer_aliveness_enabled(peer))
- aliveness = peer->lp_alive ? "up" : "down";
-
- if (lnet_peer_aliveness_enabled(peer)) {
- unsigned long now = cfs_time_current();
- long delta;
-
- delta = cfs_time_sub(now, peer->lp_last_alive);
- lastalive = cfs_duration_sec(delta);
-
- /* No need to mess up peers contents with
- * arbitrarily long integers - it suffices to
- * know that lastalive is more than 10000s old
- */
- if (lastalive >= 10000)
- lastalive = 9999;
- }
-
- lnet_net_unlock(cpt);
-
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-24s %4d %5s %5d %5d %5d %5d %5d %5d %d\n",
- libcfs_nid2str(nid), nrefs, aliveness,
- lastalive, maxcr, rtrcr, minrtrcr, txcr,
- mintxcr, txqnob);
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- } else { /* peer is NULL */
- lnet_net_unlock(cpt);
- }
-
- if (hash == LNET_PEER_HASH_SIZE) {
- cpt++;
- hash = 0;
- hoff = 1;
- if (!peer && cpt < LNET_CPT_NUMBER)
- goto again;
- }
- }
-
- len = s - tmpstr; /* how many bytes was written */
-
- if (len > *lenp) { /* linux-supplied buffer is too small */
- rc = -EINVAL;
- } else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
- rc = -EFAULT;
- else
- *ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff);
- }
-
- kfree(tmpstr);
-
- if (!rc)
- *lenp = len;
-
- return rc;
-}
-
-static int __proc_lnet_buffers(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- char *s;
- char *tmpstr;
- int tmpsiz;
- int idx;
- int len;
- int rc;
- int i;
-
- LASSERT(!write);
-
- /* (4 %d) * 4 * LNET_CPT_NUMBER */
- tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER;
- tmpstr = kvmalloc(tmpsiz, GFP_KERNEL);
- if (!tmpstr)
- return -ENOMEM;
-
- s = tmpstr; /* points to current position in tmpstr[] */
-
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%5s %5s %7s %7s\n",
- "pages", "count", "credits", "min");
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- if (!the_lnet.ln_rtrpools)
- goto out; /* I'm not a router */
-
- for (idx = 0; idx < LNET_NRBPOOLS; idx++) {
- struct lnet_rtrbufpool *rbp;
-
- lnet_net_lock(LNET_LOCK_EX);
- cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%5d %5d %7d %7d\n",
- rbp[idx].rbp_npages,
- rbp[idx].rbp_nbuffers,
- rbp[idx].rbp_credits,
- rbp[idx].rbp_mincredits);
- LASSERT(tmpstr + tmpsiz - s > 0);
- }
- lnet_net_unlock(LNET_LOCK_EX);
- }
-
- out:
- len = s - tmpstr;
-
- if (pos >= min_t(int, len, strlen(tmpstr)))
- rc = 0;
- else
- rc = cfs_trace_copyout_string(buffer, nob,
- tmpstr + pos, NULL);
-
- kvfree(tmpstr);
- return rc;
-}
-
-static int proc_lnet_buffers(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_lnet_buffers);
-}
-
-static int proc_lnet_nis(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int tmpsiz = 128 * LNET_CPT_NUMBER;
- int rc = 0;
- char *tmpstr;
- char *s;
- int len;
-
- LASSERT(!write);
-
- if (!*lenp)
- return 0;
-
- tmpstr = kvmalloc(tmpsiz, GFP_KERNEL);
- if (!tmpstr)
- return -ENOMEM;
-
- s = tmpstr; /* points to current position in tmpstr[] */
-
- if (!*ppos) {
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n",
- "nid", "status", "alive", "refs", "peer",
- "rtr", "max", "tx", "min");
- LASSERT(tmpstr + tmpsiz - s > 0);
- } else {
- struct list_head *n;
- struct lnet_ni *ni = NULL;
- int skip = *ppos - 1;
-
- lnet_net_lock(0);
-
- n = the_lnet.ln_nis.next;
-
- while (n != &the_lnet.ln_nis) {
- struct lnet_ni *a_ni;
-
- a_ni = list_entry(n, struct lnet_ni, ni_list);
- if (!skip) {
- ni = a_ni;
- break;
- }
-
- skip--;
- n = n->next;
- }
-
- if (ni) {
- struct lnet_tx_queue *tq;
- char *stat;
- time64_t now = ktime_get_real_seconds();
- int last_alive = -1;
- int i;
- int j;
-
- if (the_lnet.ln_routing)
- last_alive = now - ni->ni_last_alive;
-
- /* @lo forever alive */
- if (ni->ni_lnd->lnd_type == LOLND)
- last_alive = 0;
-
- lnet_ni_lock(ni);
- LASSERT(ni->ni_status);
- stat = (ni->ni_status->ns_status ==
- LNET_NI_STATUS_UP) ? "up" : "down";
- lnet_ni_unlock(ni);
-
- /*
- * we actually output credits information for
- * TX queue of each partition
- */
- cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
- for (j = 0; ni->ni_cpts &&
- j < ni->ni_ncpts; j++) {
- if (i == ni->ni_cpts[j])
- break;
- }
-
- if (j == ni->ni_ncpts)
- continue;
-
- if (i)
- lnet_net_lock(i);
-
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n",
- libcfs_nid2str(ni->ni_nid), stat,
- last_alive, *ni->ni_refs[i],
- ni->ni_peertxcredits,
- ni->ni_peerrtrcredits,
- tq->tq_credits_max,
- tq->tq_credits,
- tq->tq_credits_min);
- if (i)
- lnet_net_unlock(i);
- }
- LASSERT(tmpstr + tmpsiz - s > 0);
- }
-
- lnet_net_unlock(0);
- }
-
- len = s - tmpstr; /* how many bytes was written */
-
- if (len > *lenp) { /* linux-supplied buffer is too small */
- rc = -EINVAL;
- } else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
- rc = -EFAULT;
- else
- *ppos += 1;
- }
-
- kvfree(tmpstr);
-
- if (!rc)
- *lenp = len;
-
- return rc;
-}
-
-struct lnet_portal_rotors {
- int pr_value;
- const char *pr_name;
- const char *pr_desc;
-};
-
-static struct lnet_portal_rotors portal_rotors[] = {
- {
- .pr_value = LNET_PTL_ROTOR_OFF,
- .pr_name = "OFF",
- .pr_desc = "Turn off message rotor for wildcard portals"
- },
- {
- .pr_value = LNET_PTL_ROTOR_ON,
- .pr_name = "ON",
- .pr_desc = "round-robin dispatch all PUT messages for wildcard portals"
- },
- {
- .pr_value = LNET_PTL_ROTOR_RR_RT,
- .pr_name = "RR_RT",
- .pr_desc = "round-robin dispatch routed PUT message for wildcard portals"
- },
- {
- .pr_value = LNET_PTL_ROTOR_HASH_RT,
- .pr_name = "HASH_RT",
- .pr_desc = "dispatch routed PUT message by hashing source NID for wildcard portals"
- },
- {
- .pr_value = -1,
- .pr_name = NULL,
- .pr_desc = NULL
- },
-};
-
-static int __proc_lnet_portal_rotor(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- const int buf_len = 128;
- char *buf;
- char *tmp;
- int rc;
- int i;
-
- buf = kmalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (!write) {
- lnet_res_lock(0);
-
- for (i = 0; portal_rotors[i].pr_value >= 0; i++) {
- if (portal_rotors[i].pr_value == portal_rotor)
- break;
- }
-
- LASSERT(portal_rotors[i].pr_value == portal_rotor);
- lnet_res_unlock(0);
-
- rc = snprintf(buf, buf_len,
- "{\n\tportals: all\n"
- "\trotor: %s\n\tdescription: %s\n}",
- portal_rotors[i].pr_name,
- portal_rotors[i].pr_desc);
-
- if (pos >= min_t(int, rc, buf_len)) {
- rc = 0;
- } else {
- rc = cfs_trace_copyout_string(buffer, nob,
- buf + pos, "\n");
- }
- goto out;
- }
-
- rc = cfs_trace_copyin_string(buf, buf_len, buffer, nob);
- if (rc < 0)
- goto out;
-
- tmp = strim(buf);
-
- rc = -EINVAL;
- lnet_res_lock(0);
- for (i = 0; portal_rotors[i].pr_name; i++) {
- if (!strncasecmp(portal_rotors[i].pr_name, tmp,
- strlen(portal_rotors[i].pr_name))) {
- portal_rotor = portal_rotors[i].pr_value;
- rc = 0;
- break;
- }
- }
- lnet_res_unlock(0);
-out:
- kfree(buf);
- return rc;
-}
-
-static int proc_lnet_portal_rotor(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
-{
- return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_lnet_portal_rotor);
-}
-
-static struct ctl_table lnet_table[] = {
- /*
- * NB No .strategy entries have been provided since sysctl(8) prefers
- * to go via /proc for portability.
- */
- {
- .procname = "stats",
- .mode = 0644,
- .proc_handler = &proc_lnet_stats,
- },
- {
- .procname = "routes",
- .mode = 0444,
- .proc_handler = &proc_lnet_routes,
- },
- {
- .procname = "routers",
- .mode = 0444,
- .proc_handler = &proc_lnet_routers,
- },
- {
- .procname = "peers",
- .mode = 0444,
- .proc_handler = &proc_lnet_peers,
- },
- {
- .procname = "buffers",
- .mode = 0444,
- .proc_handler = &proc_lnet_buffers,
- },
- {
- .procname = "nis",
- .mode = 0444,
- .proc_handler = &proc_lnet_nis,
- },
- {
- .procname = "portal_rotor",
- .mode = 0644,
- .proc_handler = &proc_lnet_portal_rotor,
- },
- {
- }
-};
-
-void lnet_router_debugfs_init(void)
-{
- lustre_insert_debugfs(lnet_table, NULL);
-}
-
-void lnet_router_debugfs_fini(void)
-{
-}
diff --git a/drivers/staging/lustre/lnet/selftest/Makefile b/drivers/staging/lustre/lnet/selftest/Makefile
deleted file mode 100644
index 3ccc8966b566..000000000000
--- a/drivers/staging/lustre/lnet/selftest/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LNET_SELFTEST) := lnet_selftest.o
-
-lnet_selftest-y := console.o conrpc.o conctl.o framework.o timer.o rpc.o \
- module.o ping_test.o brw_test.o
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
deleted file mode 100644
index f1ee219bc8f3..000000000000
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ /dev/null
@@ -1,526 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/brw_test.c
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- */
-
-#include "selftest.h"
-
-static int brw_srv_workitems = SFW_TEST_WI_MAX;
-module_param(brw_srv_workitems, int, 0644);
-MODULE_PARM_DESC(brw_srv_workitems, "# BRW server workitems");
-
-static int brw_inject_errors;
-module_param(brw_inject_errors, int, 0644);
-MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
-
-#define BRW_POISON 0xbeefbeefbeefbeefULL
-#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
-#define BRW_MSIZE sizeof(u64)
-
-static void
-brw_client_fini(struct sfw_test_instance *tsi)
-{
- struct srpc_bulk *bulk;
- struct sfw_test_unit *tsu;
-
- LASSERT(tsi->tsi_is_client);
-
- list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
- bulk = tsu->tsu_private;
- if (!bulk)
- continue;
-
- srpc_free_bulk(bulk);
- tsu->tsu_private = NULL;
- }
-}
-
-static int
-brw_client_init(struct sfw_test_instance *tsi)
-{
- struct sfw_session *sn = tsi->tsi_batch->bat_session;
- int flags;
- int off;
- int npg;
- int len;
- int opc;
- struct srpc_bulk *bulk;
- struct sfw_test_unit *tsu;
-
- LASSERT(sn);
- LASSERT(tsi->tsi_is_client);
-
- if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
- struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
-
- opc = breq->blk_opc;
- flags = breq->blk_flags;
- npg = breq->blk_npg;
- /*
- * NB: this is not going to work for variable page size,
- * but we have to keep it for compatibility
- */
- len = npg * PAGE_SIZE;
- off = 0;
- } else {
- struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
-
- /*
- * I should never get this step if it's unknown feature
- * because make_session will reject unknown feature
- */
- LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
-
- opc = breq->blk_opc;
- flags = breq->blk_flags;
- len = breq->blk_len;
- off = breq->blk_offset & ~PAGE_MASK;
- npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- }
-
- if (off % BRW_MSIZE)
- return -EINVAL;
-
- if (npg > LNET_MAX_IOV || npg <= 0)
- return -EINVAL;
-
- if (opc != LST_BRW_READ && opc != LST_BRW_WRITE)
- return -EINVAL;
-
- if (flags != LST_BRW_CHECK_NONE &&
- flags != LST_BRW_CHECK_FULL && flags != LST_BRW_CHECK_SIMPLE)
- return -EINVAL;
-
- list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
- bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid),
- off, npg, len, opc == LST_BRW_READ);
- if (!bulk) {
- brw_client_fini(tsi);
- return -ENOMEM;
- }
-
- tsu->tsu_private = bulk;
- }
-
- return 0;
-}
-
-static int brw_inject_one_error(void)
-{
- struct timespec64 ts;
-
- if (brw_inject_errors <= 0)
- return 0;
-
- ktime_get_ts64(&ts);
-
- if (!((ts.tv_nsec / NSEC_PER_USEC) & 1))
- return 0;
-
- return brw_inject_errors--;
-}
-
-static void
-brw_fill_page(struct page *pg, int off, int len, int pattern, __u64 magic)
-{
- char *addr = page_address(pg) + off;
- int i;
-
- LASSERT(addr);
- LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
-
- if (pattern == LST_BRW_CHECK_NONE)
- return;
-
- if (magic == BRW_MAGIC)
- magic += brw_inject_one_error();
-
- if (pattern == LST_BRW_CHECK_SIMPLE) {
- memcpy(addr, &magic, BRW_MSIZE);
- if (len > BRW_MSIZE) {
- addr += PAGE_SIZE - BRW_MSIZE;
- memcpy(addr, &magic, BRW_MSIZE);
- }
- return;
- }
-
- if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < len; i += BRW_MSIZE)
- memcpy(addr + i, &magic, BRW_MSIZE);
- return;
- }
-
- LBUG();
-}
-
-static int
-brw_check_page(struct page *pg, int off, int len, int pattern, __u64 magic)
-{
- char *addr = page_address(pg) + off;
- __u64 data = 0; /* make compiler happy */
- int i;
-
- LASSERT(addr);
- LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
-
- if (pattern == LST_BRW_CHECK_NONE)
- return 0;
-
- if (pattern == LST_BRW_CHECK_SIMPLE) {
- data = *((__u64 *)addr);
- if (data != magic)
- goto bad_data;
-
- if (len > BRW_MSIZE) {
- addr += PAGE_SIZE - BRW_MSIZE;
- data = *((__u64 *)addr);
- if (data != magic)
- goto bad_data;
- }
- return 0;
- }
-
- if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < len; i += BRW_MSIZE) {
- data = *(u64 *)(addr + i);
- if (data != magic)
- goto bad_data;
- }
- return 0;
- }
-
- LBUG();
-
-bad_data:
- CERROR("Bad data in page %p: %#llx, %#llx expected\n",
- pg, data, magic);
- return 1;
-}
-
-static void
-brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
-{
- int i;
- struct page *pg;
-
- for (i = 0; i < bk->bk_niov; i++) {
- int off, len;
-
- pg = bk->bk_iovs[i].bv_page;
- off = bk->bk_iovs[i].bv_offset;
- len = bk->bk_iovs[i].bv_len;
- brw_fill_page(pg, off, len, pattern, magic);
- }
-}
-
-static int
-brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
-{
- int i;
- struct page *pg;
-
- for (i = 0; i < bk->bk_niov; i++) {
- int off, len;
-
- pg = bk->bk_iovs[i].bv_page;
- off = bk->bk_iovs[i].bv_offset;
- len = bk->bk_iovs[i].bv_len;
- if (brw_check_page(pg, off, len, pattern, magic)) {
- CERROR("Bulk page %p (%d/%d) is corrupted!\n",
- pg, i, bk->bk_niov);
- return 1;
- }
- }
-
- return 0;
-}
-
-static int
-brw_client_prep_rpc(struct sfw_test_unit *tsu, struct lnet_process_id dest,
- struct srpc_client_rpc **rpcpp)
-{
- struct srpc_bulk *bulk = tsu->tsu_private;
- struct sfw_test_instance *tsi = tsu->tsu_instance;
- struct sfw_session *sn = tsi->tsi_batch->bat_session;
- struct srpc_client_rpc *rpc;
- struct srpc_brw_reqst *req;
- int flags;
- int npg;
- int len;
- int opc;
- int rc;
-
- LASSERT(sn);
- LASSERT(bulk);
-
- if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
- struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
-
- opc = breq->blk_opc;
- flags = breq->blk_flags;
- npg = breq->blk_npg;
- len = npg * PAGE_SIZE;
- } else {
- struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
- int off;
-
- /*
- * I should never get this step if it's unknown feature
- * because make_session will reject unknown feature
- */
- LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
-
- opc = breq->blk_opc;
- flags = breq->blk_flags;
- len = breq->blk_len;
- off = breq->blk_offset;
- npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- }
-
- rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
- if (rc)
- return rc;
-
- memcpy(&rpc->crpc_bulk, bulk, offsetof(struct srpc_bulk, bk_iovs[npg]));
- if (opc == LST_BRW_WRITE)
- brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC);
- else
- brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_POISON);
-
- req = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
- req->brw_flags = flags;
- req->brw_rw = opc;
- req->brw_len = len;
-
- *rpcpp = rpc;
- return 0;
-}
-
-static void
-brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
-{
- __u64 magic = BRW_MAGIC;
- struct sfw_test_instance *tsi = tsu->tsu_instance;
- struct sfw_session *sn = tsi->tsi_batch->bat_session;
- struct srpc_msg *msg = &rpc->crpc_replymsg;
- struct srpc_brw_reply *reply = &msg->msg_body.brw_reply;
- struct srpc_brw_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
-
- LASSERT(sn);
-
- if (rpc->crpc_status) {
- CERROR("BRW RPC to %s failed with %d\n",
- libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
- if (!tsi->tsi_stopping) /* rpc could have been aborted */
- atomic_inc(&sn->sn_brw_errors);
- return;
- }
-
- if (msg->msg_magic != SRPC_MSG_MAGIC) {
- __swab64s(&magic);
- __swab32s(&reply->brw_status);
- }
-
- CDEBUG(reply->brw_status ? D_WARNING : D_NET,
- "BRW RPC to %s finished with brw_status: %d\n",
- libcfs_id2str(rpc->crpc_dest), reply->brw_status);
-
- if (reply->brw_status) {
- atomic_inc(&sn->sn_brw_errors);
- rpc->crpc_status = -(int)reply->brw_status;
- return;
- }
-
- if (reqst->brw_rw == LST_BRW_WRITE)
- return;
-
- if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic)) {
- CERROR("Bulk data from %s is corrupted!\n",
- libcfs_id2str(rpc->crpc_dest));
- atomic_inc(&sn->sn_brw_errors);
- rpc->crpc_status = -EBADMSG;
- }
-}
-
-static void
-brw_server_rpc_done(struct srpc_server_rpc *rpc)
-{
- struct srpc_bulk *blk = rpc->srpc_bulk;
-
- if (!blk)
- return;
-
- if (rpc->srpc_status)
- CERROR("Bulk transfer %s %s has failed: %d\n",
- blk->bk_sink ? "from" : "to",
- libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
- else
- CDEBUG(D_NET, "Transferred %d pages bulk data %s %s\n",
- blk->bk_niov, blk->bk_sink ? "from" : "to",
- libcfs_id2str(rpc->srpc_peer));
-
- sfw_free_pages(rpc);
-}
-
-static int
-brw_bulk_ready(struct srpc_server_rpc *rpc, int status)
-{
- __u64 magic = BRW_MAGIC;
- struct srpc_brw_reply *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
- struct srpc_brw_reqst *reqst;
- struct srpc_msg *reqstmsg;
-
- LASSERT(rpc->srpc_bulk);
- LASSERT(rpc->srpc_reqstbuf);
-
- reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- reqst = &reqstmsg->msg_body.brw_reqst;
-
- if (status) {
- CERROR("BRW bulk %s failed for RPC from %s: %d\n",
- reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE",
- libcfs_id2str(rpc->srpc_peer), status);
- return -EIO;
- }
-
- if (reqst->brw_rw == LST_BRW_READ)
- return 0;
-
- if (reqstmsg->msg_magic != SRPC_MSG_MAGIC)
- __swab64s(&magic);
-
- if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic)) {
- CERROR("Bulk data from %s is corrupted!\n",
- libcfs_id2str(rpc->srpc_peer));
- reply->brw_status = EBADMSG;
- }
-
- return 0;
-}
-
-static int
-brw_server_handle(struct srpc_server_rpc *rpc)
-{
- struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- struct srpc_msg *replymsg = &rpc->srpc_replymsg;
- struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- struct srpc_brw_reply *reply = &replymsg->msg_body.brw_reply;
- struct srpc_brw_reqst *reqst = &reqstmsg->msg_body.brw_reqst;
- int npg;
- int rc;
-
- LASSERT(sv->sv_id == SRPC_SERVICE_BRW);
-
- if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) {
- LASSERT(reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC));
-
- __swab32s(&reqst->brw_rw);
- __swab32s(&reqst->brw_len);
- __swab32s(&reqst->brw_flags);
- __swab64s(&reqst->brw_rpyid);
- __swab64s(&reqst->brw_bulkid);
- }
- LASSERT(reqstmsg->msg_type == (__u32)srpc_service2request(sv->sv_id));
-
- reply->brw_status = 0;
- rpc->srpc_done = brw_server_rpc_done;
-
- if ((reqst->brw_rw != LST_BRW_READ && reqst->brw_rw != LST_BRW_WRITE) ||
- (reqst->brw_flags != LST_BRW_CHECK_NONE &&
- reqst->brw_flags != LST_BRW_CHECK_FULL &&
- reqst->brw_flags != LST_BRW_CHECK_SIMPLE)) {
- reply->brw_status = EINVAL;
- return 0;
- }
-
- if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) {
- replymsg->msg_ses_feats = LST_FEATS_MASK;
- reply->brw_status = EPROTO;
- return 0;
- }
-
- if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
- /* compat with old version */
- if (reqst->brw_len & ~PAGE_MASK) {
- reply->brw_status = EINVAL;
- return 0;
- }
- npg = reqst->brw_len >> PAGE_SHIFT;
-
- } else {
- npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- }
-
- replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
-
- if (!reqst->brw_len || npg > LNET_MAX_IOV) {
- reply->brw_status = EINVAL;
- return 0;
- }
-
- rc = sfw_alloc_pages(rpc, rpc->srpc_scd->scd_cpt, npg,
- reqst->brw_len,
- reqst->brw_rw == LST_BRW_WRITE);
- if (rc)
- return rc;
-
- if (reqst->brw_rw == LST_BRW_READ)
- brw_fill_bulk(rpc->srpc_bulk, reqst->brw_flags, BRW_MAGIC);
- else
- brw_fill_bulk(rpc->srpc_bulk, reqst->brw_flags, BRW_POISON);
-
- return 0;
-}
-
-struct sfw_test_client_ops brw_test_client;
-
-void brw_init_test_client(void)
-{
- brw_test_client.tso_init = brw_client_init;
- brw_test_client.tso_fini = brw_client_fini;
- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
- brw_test_client.tso_done_rpc = brw_client_done_rpc;
-};
-
-struct srpc_service brw_test_service;
-
-void brw_init_test_service(void)
-{
- brw_test_service.sv_id = SRPC_SERVICE_BRW;
- brw_test_service.sv_name = "brw_test";
- brw_test_service.sv_handler = brw_server_handle;
- brw_test_service.sv_bulk_ready = brw_bulk_ready;
- brw_test_service.sv_wi_total = brw_srv_workitems;
-}
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
deleted file mode 100644
index a2d8092bdeb7..000000000000
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ /dev/null
@@ -1,799 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/conctl.c
- *
- * IOC handle in kernel
- *
- * Author: Liang Zhen <liangzhen@clusterfs.com>
- */
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/lnet/lib-lnet.h>
-#include <uapi/linux/lnet/lnetst.h>
-#include "console.h"
-
-static int
-lst_session_new_ioctl(struct lstio_session_new_args *args)
-{
- char name[LST_NAME_SIZE + 1];
- int rc;
-
- if (!args->lstio_ses_idp || /* address for output sid */
- !args->lstio_ses_key || /* no key is specified */
- !args->lstio_ses_namep || /* session name */
- args->lstio_ses_nmlen <= 0 ||
- args->lstio_ses_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (copy_from_user(name, args->lstio_ses_namep,
- args->lstio_ses_nmlen)) {
- return -EFAULT;
- }
-
- name[args->lstio_ses_nmlen] = 0;
-
- rc = lstcon_session_new(name,
- args->lstio_ses_key,
- args->lstio_ses_feats,
- args->lstio_ses_timeout,
- args->lstio_ses_force,
- args->lstio_ses_idp);
-
- return rc;
-}
-
-static int
-lst_session_end_ioctl(struct lstio_session_end_args *args)
-{
- if (args->lstio_ses_key != console_session.ses_key)
- return -EACCES;
-
- return lstcon_session_end();
-}
-
-static int
-lst_session_info_ioctl(struct lstio_session_info_args *args)
-{
- /* no checking of key */
-
- if (!args->lstio_ses_idp || /* address for output sid */
- !args->lstio_ses_keyp || /* address for output key */
- !args->lstio_ses_featp || /* address for output features */
- !args->lstio_ses_ndinfo || /* address for output ndinfo */
- !args->lstio_ses_namep || /* address for output name */
- args->lstio_ses_nmlen <= 0 ||
- args->lstio_ses_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- return lstcon_session_info(args->lstio_ses_idp,
- args->lstio_ses_keyp,
- args->lstio_ses_featp,
- args->lstio_ses_ndinfo,
- args->lstio_ses_namep,
- args->lstio_ses_nmlen);
-}
-
-static int
-lst_debug_ioctl(struct lstio_debug_args *args)
-{
- char name[LST_NAME_SIZE + 1];
- int client = 1;
- int rc;
-
- if (args->lstio_dbg_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_dbg_resultp)
- return -EINVAL;
-
- if (args->lstio_dbg_namep && /* name of batch/group */
- (args->lstio_dbg_nmlen <= 0 ||
- args->lstio_dbg_nmlen > LST_NAME_SIZE))
- return -EINVAL;
-
- if (args->lstio_dbg_namep) {
-
- if (copy_from_user(name, args->lstio_dbg_namep,
- args->lstio_dbg_nmlen))
- return -EFAULT;
-
- name[args->lstio_dbg_nmlen] = 0;
- }
-
- rc = -EINVAL;
-
- switch (args->lstio_dbg_type) {
- case LST_OPC_SESSION:
- rc = lstcon_session_debug(args->lstio_dbg_timeout,
- args->lstio_dbg_resultp);
- break;
-
- case LST_OPC_BATCHSRV:
- client = 0;
- /* fall through */
- case LST_OPC_BATCHCLI:
- if (!args->lstio_dbg_namep)
- goto out;
-
- rc = lstcon_batch_debug(args->lstio_dbg_timeout,
- name, client, args->lstio_dbg_resultp);
- break;
-
- case LST_OPC_GROUP:
- if (!args->lstio_dbg_namep)
- goto out;
-
- rc = lstcon_group_debug(args->lstio_dbg_timeout,
- name, args->lstio_dbg_resultp);
- break;
-
- case LST_OPC_NODES:
- if (args->lstio_dbg_count <= 0 ||
- !args->lstio_dbg_idsp)
- goto out;
-
- rc = lstcon_nodes_debug(args->lstio_dbg_timeout,
- args->lstio_dbg_count,
- args->lstio_dbg_idsp,
- args->lstio_dbg_resultp);
- break;
-
- default:
- break;
- }
-
-out:
- return rc;
-}
-
-static int
-lst_group_add_ioctl(struct lstio_group_add_args *args)
-{
- char name[LST_NAME_SIZE + 1];
- int rc;
-
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_grp_namep ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen))
- return -EFAULT;
-
- name[args->lstio_grp_nmlen] = 0;
-
- rc = lstcon_group_add(name);
-
- return rc;
-}
-
-static int
-lst_group_del_ioctl(struct lstio_group_del_args *args)
-{
- int rc;
- char name[LST_NAME_SIZE + 1];
-
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_grp_namep ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen))
- return -EFAULT;
-
- name[args->lstio_grp_nmlen] = 0;
-
- rc = lstcon_group_del(name);
-
- return rc;
-}
-
-static int
-lst_group_update_ioctl(struct lstio_group_update_args *args)
-{
- int rc;
- char name[LST_NAME_SIZE + 1];
-
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_grp_resultp ||
- !args->lstio_grp_namep ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen))
- return -EFAULT;
-
- name[args->lstio_grp_nmlen] = 0;
-
- switch (args->lstio_grp_opc) {
- case LST_GROUP_CLEAN:
- rc = lstcon_group_clean(name, args->lstio_grp_args);
- break;
-
- case LST_GROUP_REFRESH:
- rc = lstcon_group_refresh(name, args->lstio_grp_resultp);
- break;
-
- case LST_GROUP_RMND:
- if (args->lstio_grp_count <= 0 ||
- !args->lstio_grp_idsp) {
- rc = -EINVAL;
- break;
- }
- rc = lstcon_nodes_remove(name, args->lstio_grp_count,
- args->lstio_grp_idsp,
- args->lstio_grp_resultp);
- break;
-
- default:
- rc = -EINVAL;
- break;
- }
-
- return rc;
-}
-
-static int
-lst_nodes_add_ioctl(struct lstio_group_nodes_args *args)
-{
- unsigned int feats;
- int rc;
- char name[LST_NAME_SIZE + 1];
-
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_grp_idsp || /* array of ids */
- args->lstio_grp_count <= 0 ||
- !args->lstio_grp_resultp ||
- !args->lstio_grp_featp ||
- !args->lstio_grp_namep ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen))
- return -EFAULT;
-
- name[args->lstio_grp_nmlen] = 0;
-
- rc = lstcon_nodes_add(name, args->lstio_grp_count,
- args->lstio_grp_idsp, &feats,
- args->lstio_grp_resultp);
-
- if (!rc &&
- copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
- return -EINVAL;
- }
-
- return rc;
-}
-
-static int
-lst_group_list_ioctl(struct lstio_group_list_args *args)
-{
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_grp_idx < 0 ||
- !args->lstio_grp_namep ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- return lstcon_group_list(args->lstio_grp_idx,
- args->lstio_grp_nmlen,
- args->lstio_grp_namep);
-}
-
-static int
-lst_group_info_ioctl(struct lstio_group_info_args *args)
-{
- char name[LST_NAME_SIZE + 1];
- int ndent;
- int index;
- int rc;
-
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_grp_namep ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (!args->lstio_grp_entp && /* output: group entry */
- !args->lstio_grp_dentsp) /* output: node entry */
- return -EINVAL;
-
- if (args->lstio_grp_dentsp) { /* have node entry */
- if (!args->lstio_grp_idxp || /* node index */
- !args->lstio_grp_ndentp) /* # of node entry */
- return -EINVAL;
-
- if (copy_from_user(&ndent, args->lstio_grp_ndentp,
- sizeof(ndent)) ||
- copy_from_user(&index, args->lstio_grp_idxp,
- sizeof(index)))
- return -EFAULT;
-
- if (ndent <= 0 || index < 0)
- return -EINVAL;
- }
-
- if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen))
- return -EFAULT;
-
- name[args->lstio_grp_nmlen] = 0;
-
- rc = lstcon_group_info(name, args->lstio_grp_entp,
- &index, &ndent, args->lstio_grp_dentsp);
-
- if (rc)
- return rc;
-
- if (args->lstio_grp_dentsp &&
- (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
- copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-lst_batch_add_ioctl(struct lstio_batch_add_args *args)
-{
- int rc;
- char name[LST_NAME_SIZE + 1];
-
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_bat_namep ||
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (copy_from_user(name, args->lstio_bat_namep,
- args->lstio_bat_nmlen))
- return -EFAULT;
-
- name[args->lstio_bat_nmlen] = 0;
-
- rc = lstcon_batch_add(name);
-
- return rc;
-}
-
-static int
-lst_batch_run_ioctl(struct lstio_batch_run_args *args)
-{
- int rc;
- char name[LST_NAME_SIZE + 1];
-
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_bat_namep ||
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (copy_from_user(name, args->lstio_bat_namep,
- args->lstio_bat_nmlen))
- return -EFAULT;
-
- name[args->lstio_bat_nmlen] = 0;
-
- rc = lstcon_batch_run(name, args->lstio_bat_timeout,
- args->lstio_bat_resultp);
-
- return rc;
-}
-
-static int
-lst_batch_stop_ioctl(struct lstio_batch_stop_args *args)
-{
- int rc;
- char name[LST_NAME_SIZE + 1];
-
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_bat_resultp ||
- !args->lstio_bat_namep ||
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (copy_from_user(name, args->lstio_bat_namep,
- args->lstio_bat_nmlen))
- return -EFAULT;
-
- name[args->lstio_bat_nmlen] = 0;
-
- rc = lstcon_batch_stop(name, args->lstio_bat_force,
- args->lstio_bat_resultp);
-
- return rc;
-}
-
-static int
-lst_batch_query_ioctl(struct lstio_batch_query_args *args)
-{
- char name[LST_NAME_SIZE + 1];
- int rc;
-
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_bat_resultp ||
- !args->lstio_bat_namep ||
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (args->lstio_bat_testidx < 0)
- return -EINVAL;
-
- if (copy_from_user(name, args->lstio_bat_namep,
- args->lstio_bat_nmlen))
- return -EFAULT;
-
- name[args->lstio_bat_nmlen] = 0;
-
- rc = lstcon_test_batch_query(name,
- args->lstio_bat_testidx,
- args->lstio_bat_client,
- args->lstio_bat_timeout,
- args->lstio_bat_resultp);
-
- return rc;
-}
-
-static int
-lst_batch_list_ioctl(struct lstio_batch_list_args *args)
-{
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_bat_idx < 0 ||
- !args->lstio_bat_namep ||
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- return lstcon_batch_list(args->lstio_bat_idx,
- args->lstio_bat_nmlen,
- args->lstio_bat_namep);
-}
-
-static int
-lst_batch_info_ioctl(struct lstio_batch_info_args *args)
-{
- char name[LST_NAME_SIZE + 1];
- int rc;
- int index;
- int ndent;
-
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_bat_namep || /* batch name */
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (!args->lstio_bat_entp && /* output: batch entry */
- !args->lstio_bat_dentsp) /* output: node entry */
- return -EINVAL;
-
- if (args->lstio_bat_dentsp) { /* have node entry */
- if (!args->lstio_bat_idxp || /* node index */
- !args->lstio_bat_ndentp) /* # of node entry */
- return -EINVAL;
-
- if (copy_from_user(&index, args->lstio_bat_idxp,
- sizeof(index)) ||
- copy_from_user(&ndent, args->lstio_bat_ndentp,
- sizeof(ndent)))
- return -EFAULT;
-
- if (ndent <= 0 || index < 0)
- return -EINVAL;
- }
-
- if (copy_from_user(name, args->lstio_bat_namep,
- args->lstio_bat_nmlen))
- return -EFAULT;
-
- name[args->lstio_bat_nmlen] = 0;
-
- rc = lstcon_batch_info(name, args->lstio_bat_entp,
- args->lstio_bat_server, args->lstio_bat_testidx,
- &index, &ndent, args->lstio_bat_dentsp);
-
- if (rc)
- return rc;
-
- if (args->lstio_bat_dentsp &&
- (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
- copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
- rc = -EFAULT;
-
- return rc;
-}
-
-static int
-lst_stat_query_ioctl(struct lstio_stat_args *args)
-{
- int rc;
- char name[LST_NAME_SIZE + 1];
-
- /* TODO: not finished */
- if (args->lstio_sta_key != console_session.ses_key)
- return -EACCES;
-
- if (!args->lstio_sta_resultp)
- return -EINVAL;
-
- if (args->lstio_sta_idsp) {
- if (args->lstio_sta_count <= 0)
- return -EINVAL;
-
- rc = lstcon_nodes_stat(args->lstio_sta_count,
- args->lstio_sta_idsp,
- args->lstio_sta_timeout,
- args->lstio_sta_resultp);
- } else if (args->lstio_sta_namep) {
- if (args->lstio_sta_nmlen <= 0 ||
- args->lstio_sta_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- rc = copy_from_user(name, args->lstio_sta_namep,
- args->lstio_sta_nmlen);
- if (!rc)
- rc = lstcon_group_stat(name, args->lstio_sta_timeout,
- args->lstio_sta_resultp);
- else
- rc = -EFAULT;
- } else {
- rc = -EINVAL;
- }
-
- return rc;
-}
-
-static int lst_test_add_ioctl(struct lstio_test_args *args)
-{
- char batch_name[LST_NAME_SIZE + 1];
- char src_name[LST_NAME_SIZE + 1];
- char dst_name[LST_NAME_SIZE + 1];
- void *param = NULL;
- int ret = 0;
- int rc = -ENOMEM;
-
- if (!args->lstio_tes_resultp ||
- !args->lstio_tes_retp ||
- !args->lstio_tes_bat_name || /* no specified batch */
- args->lstio_tes_bat_nmlen <= 0 ||
- args->lstio_tes_bat_nmlen > LST_NAME_SIZE ||
- !args->lstio_tes_sgrp_name || /* no source group */
- args->lstio_tes_sgrp_nmlen <= 0 ||
- args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE ||
- !args->lstio_tes_dgrp_name || /* no target group */
- args->lstio_tes_dgrp_nmlen <= 0 ||
- args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (!args->lstio_tes_loop || /* negative is infinite */
- args->lstio_tes_concur <= 0 ||
- args->lstio_tes_dist <= 0 ||
- args->lstio_tes_span <= 0)
- return -EINVAL;
-
- /* have parameter, check if parameter length is valid */
- if (args->lstio_tes_param &&
- (args->lstio_tes_param_len <= 0 ||
- args->lstio_tes_param_len >
- PAGE_SIZE - sizeof(struct lstcon_test)))
- return -EINVAL;
-
- /* Enforce zero parameter length if there's no parameter */
- if (!args->lstio_tes_param && args->lstio_tes_param_len)
- return -EINVAL;
-
- if (args->lstio_tes_param) {
- param = memdup_user(args->lstio_tes_param,
- args->lstio_tes_param_len);
- if (IS_ERR(param))
- return PTR_ERR(param);
- }
-
- rc = -EFAULT;
- if (copy_from_user(batch_name, args->lstio_tes_bat_name,
- args->lstio_tes_bat_nmlen) ||
- copy_from_user(src_name, args->lstio_tes_sgrp_name,
- args->lstio_tes_sgrp_nmlen) ||
- copy_from_user(dst_name, args->lstio_tes_dgrp_name,
- args->lstio_tes_dgrp_nmlen))
- goto out;
-
- rc = lstcon_test_add(batch_name, args->lstio_tes_type,
- args->lstio_tes_loop, args->lstio_tes_concur,
- args->lstio_tes_dist, args->lstio_tes_span,
- src_name, dst_name, param,
- args->lstio_tes_param_len,
- &ret, args->lstio_tes_resultp);
-
- if (!rc && ret)
- rc = (copy_to_user(args->lstio_tes_retp, &ret,
- sizeof(ret))) ? -EFAULT : 0;
-out:
- kfree(param);
-
- return rc;
-}
-
-int
-lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
-{
- char *buf;
- struct libcfs_ioctl_data *data;
- int opc;
- int rc;
-
- if (cmd != IOC_LIBCFS_LNETST)
- return -EINVAL;
-
- data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
-
- opc = data->ioc_u32[0];
-
- if (data->ioc_plen1 > PAGE_SIZE)
- return -EINVAL;
-
- buf = kmalloc(data->ioc_plen1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* copy in parameter */
- if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
- kfree(buf);
- return -EFAULT;
- }
-
- mutex_lock(&console_session.ses_mutex);
-
- console_session.ses_laststamp = ktime_get_real_seconds();
-
- if (console_session.ses_shutdown) {
- rc = -ESHUTDOWN;
- goto out;
- }
-
- if (console_session.ses_expired)
- lstcon_session_end();
-
- if (opc != LSTIO_SESSION_NEW &&
- console_session.ses_state == LST_SESSION_NONE) {
- CDEBUG(D_NET, "LST no active session\n");
- rc = -ESRCH;
- goto out;
- }
-
- memset(&console_session.ses_trans_stat, 0, sizeof(struct lstcon_trans_stat));
-
- switch (opc) {
- case LSTIO_SESSION_NEW:
- rc = lst_session_new_ioctl((struct lstio_session_new_args *)buf);
- break;
- case LSTIO_SESSION_END:
- rc = lst_session_end_ioctl((struct lstio_session_end_args *)buf);
- break;
- case LSTIO_SESSION_INFO:
- rc = lst_session_info_ioctl((struct lstio_session_info_args *)buf);
- break;
- case LSTIO_DEBUG:
- rc = lst_debug_ioctl((struct lstio_debug_args *)buf);
- break;
- case LSTIO_GROUP_ADD:
- rc = lst_group_add_ioctl((struct lstio_group_add_args *)buf);
- break;
- case LSTIO_GROUP_DEL:
- rc = lst_group_del_ioctl((struct lstio_group_del_args *)buf);
- break;
- case LSTIO_GROUP_UPDATE:
- rc = lst_group_update_ioctl((struct lstio_group_update_args *)buf);
- break;
- case LSTIO_NODES_ADD:
- rc = lst_nodes_add_ioctl((struct lstio_group_nodes_args *)buf);
- break;
- case LSTIO_GROUP_LIST:
- rc = lst_group_list_ioctl((struct lstio_group_list_args *)buf);
- break;
- case LSTIO_GROUP_INFO:
- rc = lst_group_info_ioctl((struct lstio_group_info_args *)buf);
- break;
- case LSTIO_BATCH_ADD:
- rc = lst_batch_add_ioctl((struct lstio_batch_add_args *)buf);
- break;
- case LSTIO_BATCH_START:
- rc = lst_batch_run_ioctl((struct lstio_batch_run_args *)buf);
- break;
- case LSTIO_BATCH_STOP:
- rc = lst_batch_stop_ioctl((struct lstio_batch_stop_args *)buf);
- break;
- case LSTIO_BATCH_QUERY:
- rc = lst_batch_query_ioctl((struct lstio_batch_query_args *)buf);
- break;
- case LSTIO_BATCH_LIST:
- rc = lst_batch_list_ioctl((struct lstio_batch_list_args *)buf);
- break;
- case LSTIO_BATCH_INFO:
- rc = lst_batch_info_ioctl((struct lstio_batch_info_args *)buf);
- break;
- case LSTIO_TEST_ADD:
- rc = lst_test_add_ioctl((struct lstio_test_args *)buf);
- break;
- case LSTIO_STAT_QUERY:
- rc = lst_stat_query_ioctl((struct lstio_stat_args *)buf);
- break;
- default:
- rc = -EINVAL;
- }
-
- if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
- sizeof(struct lstcon_trans_stat)))
- rc = -EFAULT;
-out:
- mutex_unlock(&console_session.ses_mutex);
-
- kfree(buf);
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
deleted file mode 100644
index 6dcc966b293b..000000000000
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ /dev/null
@@ -1,1397 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/conctl.c
- *
- * Console framework rpcs
- *
- * Author: Liang Zhen <liang@whamcloud.com>
- */
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/lnet/lib-lnet.h>
-#include "timer.h"
-#include "conrpc.h"
-#include "console.h"
-
-void lstcon_rpc_stat_reply(struct lstcon_rpc_trans *, struct srpc_msg *,
- struct lstcon_node *, struct lstcon_trans_stat *);
-
-static void
-lstcon_rpc_done(struct srpc_client_rpc *rpc)
-{
- struct lstcon_rpc *crpc = (struct lstcon_rpc *)rpc->crpc_priv;
-
- LASSERT(crpc && rpc == crpc->crp_rpc);
- LASSERT(crpc->crp_posted && !crpc->crp_finished);
-
- spin_lock(&rpc->crpc_lock);
-
- if (!crpc->crp_trans) {
- /*
- * Orphan RPC is not in any transaction,
- * I'm just a poor body and nobody loves me
- */
- spin_unlock(&rpc->crpc_lock);
-
- /* release it */
- lstcon_rpc_put(crpc);
- return;
- }
-
- /* not an orphan RPC */
- crpc->crp_finished = 1;
-
- if (!crpc->crp_stamp) {
- /* not aborted */
- LASSERT(!crpc->crp_status);
-
- crpc->crp_stamp = cfs_time_current();
- crpc->crp_status = rpc->crpc_status;
- }
-
- /* wakeup (transaction)thread if I'm the last RPC in the transaction */
- if (atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
- wake_up(&crpc->crp_trans->tas_waitq);
-
- spin_unlock(&rpc->crpc_lock);
-}
-
-static int
-lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned int feats,
- int bulk_npg, int bulk_len, int embedded,
- struct lstcon_rpc *crpc)
-{
- crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
- feats, bulk_npg, bulk_len,
- lstcon_rpc_done, (void *)crpc);
- if (!crpc->crp_rpc)
- return -ENOMEM;
-
- crpc->crp_trans = NULL;
- crpc->crp_node = nd;
- crpc->crp_posted = 0;
- crpc->crp_finished = 0;
- crpc->crp_unpacked = 0;
- crpc->crp_status = 0;
- crpc->crp_stamp = 0;
- crpc->crp_embedded = embedded;
- INIT_LIST_HEAD(&crpc->crp_link);
-
- atomic_inc(&console_session.ses_rpc_counter);
-
- return 0;
-}
-
-static int
-lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned int feats,
- int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp)
-{
- struct lstcon_rpc *crpc = NULL;
- int rc;
-
- spin_lock(&console_session.ses_rpc_lock);
-
- crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist,
- struct lstcon_rpc, crp_link);
- if (crpc)
- list_del_init(&crpc->crp_link);
-
- spin_unlock(&console_session.ses_rpc_lock);
-
- if (!crpc) {
- crpc = kzalloc(sizeof(*crpc), GFP_NOFS);
- if (!crpc)
- return -ENOMEM;
- }
-
- rc = lstcon_rpc_init(nd, service, feats, bulk_npg, bulk_len, 0, crpc);
- if (!rc) {
- *crpcpp = crpc;
- return 0;
- }
-
- kfree(crpc);
-
- return rc;
-}
-
-void
-lstcon_rpc_put(struct lstcon_rpc *crpc)
-{
- struct srpc_bulk *bulk = &crpc->crp_rpc->crpc_bulk;
- int i;
-
- LASSERT(list_empty(&crpc->crp_link));
-
- for (i = 0; i < bulk->bk_niov; i++) {
- if (!bulk->bk_iovs[i].bv_page)
- continue;
-
- __free_page(bulk->bk_iovs[i].bv_page);
- }
-
- srpc_client_rpc_decref(crpc->crp_rpc);
-
- if (crpc->crp_embedded) {
- /* embedded RPC, don't recycle it */
- memset(crpc, 0, sizeof(*crpc));
- crpc->crp_embedded = 1;
-
- } else {
- spin_lock(&console_session.ses_rpc_lock);
-
- list_add(&crpc->crp_link,
- &console_session.ses_rpc_freelist);
-
- spin_unlock(&console_session.ses_rpc_lock);
- }
-
- /* RPC is not alive now */
- atomic_dec(&console_session.ses_rpc_counter);
-}
-
-static void
-lstcon_rpc_post(struct lstcon_rpc *crpc)
-{
- struct lstcon_rpc_trans *trans = crpc->crp_trans;
-
- LASSERT(trans);
-
- atomic_inc(&trans->tas_remaining);
- crpc->crp_posted = 1;
-
- sfw_post_rpc(crpc->crp_rpc);
-}
-
-static char *
-lstcon_rpc_trans_name(int transop)
-{
- if (transop == LST_TRANS_SESNEW)
- return "SESNEW";
-
- if (transop == LST_TRANS_SESEND)
- return "SESEND";
-
- if (transop == LST_TRANS_SESQRY)
- return "SESQRY";
-
- if (transop == LST_TRANS_SESPING)
- return "SESPING";
-
- if (transop == LST_TRANS_TSBCLIADD)
- return "TSBCLIADD";
-
- if (transop == LST_TRANS_TSBSRVADD)
- return "TSBSRVADD";
-
- if (transop == LST_TRANS_TSBRUN)
- return "TSBRUN";
-
- if (transop == LST_TRANS_TSBSTOP)
- return "TSBSTOP";
-
- if (transop == LST_TRANS_TSBCLIQRY)
- return "TSBCLIQRY";
-
- if (transop == LST_TRANS_TSBSRVQRY)
- return "TSBSRVQRY";
-
- if (transop == LST_TRANS_STATQRY)
- return "STATQRY";
-
- return "Unknown";
-}
-
-int
-lstcon_rpc_trans_prep(struct list_head *translist, int transop,
- struct lstcon_rpc_trans **transpp)
-{
- struct lstcon_rpc_trans *trans;
-
- if (translist) {
- list_for_each_entry(trans, translist, tas_link) {
- /*
- * Can't enqueue two private transaction on
- * the same object
- */
- if ((trans->tas_opc & transop) == LST_TRANS_PRIVATE)
- return -EPERM;
- }
- }
-
- /* create a trans group */
- trans = kzalloc(sizeof(*trans), GFP_NOFS);
- if (!trans)
- return -ENOMEM;
-
- trans->tas_opc = transop;
-
- if (!translist)
- INIT_LIST_HEAD(&trans->tas_olink);
- else
- list_add_tail(&trans->tas_olink, translist);
-
- list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
-
- INIT_LIST_HEAD(&trans->tas_rpcs_list);
- atomic_set(&trans->tas_remaining, 0);
- init_waitqueue_head(&trans->tas_waitq);
-
- spin_lock(&console_session.ses_rpc_lock);
- trans->tas_features = console_session.ses_features;
- spin_unlock(&console_session.ses_rpc_lock);
-
- *transpp = trans;
- return 0;
-}
-
-void
-lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *crpc)
-{
- list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
- crpc->crp_trans = trans;
-}
-
-void
-lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
-{
- struct srpc_client_rpc *rpc;
- struct lstcon_rpc *crpc;
- struct lstcon_node *nd;
-
- list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
- rpc = crpc->crp_rpc;
-
- spin_lock(&rpc->crpc_lock);
-
- if (!crpc->crp_posted || /* not posted */
- crpc->crp_stamp) { /* rpc done or aborted already */
- if (!crpc->crp_stamp) {
- crpc->crp_stamp = cfs_time_current();
- crpc->crp_status = -EINTR;
- }
- spin_unlock(&rpc->crpc_lock);
- continue;
- }
-
- crpc->crp_stamp = cfs_time_current();
- crpc->crp_status = error;
-
- spin_unlock(&rpc->crpc_lock);
-
- sfw_abort_rpc(rpc);
-
- if (error != -ETIMEDOUT)
- continue;
-
- nd = crpc->crp_node;
- if (cfs_time_after(nd->nd_stamp, crpc->crp_stamp))
- continue;
-
- nd->nd_stamp = crpc->crp_stamp;
- nd->nd_state = LST_NODE_DOWN;
- }
-}
-
-static int
-lstcon_rpc_trans_check(struct lstcon_rpc_trans *trans)
-{
- if (console_session.ses_shutdown &&
- !list_empty(&trans->tas_olink)) /* Not an end session RPC */
- return 1;
-
- return !atomic_read(&trans->tas_remaining) ? 1 : 0;
-}
-
-int
-lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout)
-{
- struct lstcon_rpc *crpc;
- int rc;
-
- if (list_empty(&trans->tas_rpcs_list))
- return 0;
-
- if (timeout < LST_TRANS_MIN_TIMEOUT)
- timeout = LST_TRANS_MIN_TIMEOUT;
-
- CDEBUG(D_NET, "Transaction %s started\n",
- lstcon_rpc_trans_name(trans->tas_opc));
-
- /* post all requests */
- list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
- LASSERT(!crpc->crp_posted);
-
- lstcon_rpc_post(crpc);
- }
-
- mutex_unlock(&console_session.ses_mutex);
-
- rc = wait_event_interruptible_timeout(trans->tas_waitq,
- lstcon_rpc_trans_check(trans),
- timeout * HZ);
- rc = (rc > 0) ? 0 : ((rc < 0) ? -EINTR : -ETIMEDOUT);
-
- mutex_lock(&console_session.ses_mutex);
-
- if (console_session.ses_shutdown)
- rc = -ESHUTDOWN;
-
- if (rc || atomic_read(&trans->tas_remaining)) {
- /* treat short timeout as canceled */
- if (rc == -ETIMEDOUT && timeout < LST_TRANS_MIN_TIMEOUT * 2)
- rc = -EINTR;
-
- lstcon_rpc_trans_abort(trans, rc);
- }
-
- CDEBUG(D_NET, "Transaction %s stopped: %d\n",
- lstcon_rpc_trans_name(trans->tas_opc), rc);
-
- lstcon_rpc_trans_stat(trans, lstcon_trans_stat());
-
- return rc;
-}
-
-static int
-lstcon_rpc_get_reply(struct lstcon_rpc *crpc, struct srpc_msg **msgpp)
-{
- struct lstcon_node *nd = crpc->crp_node;
- struct srpc_client_rpc *rpc = crpc->crp_rpc;
- struct srpc_generic_reply *rep;
-
- LASSERT(nd && rpc);
- LASSERT(crpc->crp_stamp);
-
- if (crpc->crp_status) {
- *msgpp = NULL;
- return crpc->crp_status;
- }
-
- *msgpp = &rpc->crpc_replymsg;
- if (!crpc->crp_unpacked) {
- sfw_unpack_message(*msgpp);
- crpc->crp_unpacked = 1;
- }
-
- if (cfs_time_after(nd->nd_stamp, crpc->crp_stamp))
- return 0;
-
- nd->nd_stamp = crpc->crp_stamp;
- rep = &(*msgpp)->msg_body.reply;
-
- if (rep->sid.ses_nid == LNET_NID_ANY)
- nd->nd_state = LST_NODE_UNKNOWN;
- else if (lstcon_session_match(rep->sid))
- nd->nd_state = LST_NODE_ACTIVE;
- else
- nd->nd_state = LST_NODE_BUSY;
-
- return 0;
-}
-
-void
-lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans, struct lstcon_trans_stat *stat)
-{
- struct lstcon_rpc *crpc;
- struct srpc_msg *rep;
- int error;
-
- LASSERT(stat);
-
- memset(stat, 0, sizeof(*stat));
-
- list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
- lstcon_rpc_stat_total(stat, 1);
-
- LASSERT(crpc->crp_stamp);
-
- error = lstcon_rpc_get_reply(crpc, &rep);
- if (error) {
- lstcon_rpc_stat_failure(stat, 1);
- if (!stat->trs_rpc_errno)
- stat->trs_rpc_errno = -error;
-
- continue;
- }
-
- lstcon_rpc_stat_success(stat, 1);
-
- lstcon_rpc_stat_reply(trans, rep, crpc->crp_node, stat);
- }
-
- if (trans->tas_opc == LST_TRANS_SESNEW && !stat->trs_fwk_errno) {
- stat->trs_fwk_errno =
- lstcon_session_feats_check(trans->tas_features);
- }
-
- CDEBUG(D_NET, "transaction %s : success %d, failure %d, total %d, RPC error(%d), Framework error(%d)\n",
- lstcon_rpc_trans_name(trans->tas_opc),
- lstcon_rpc_stat_success(stat, 0),
- lstcon_rpc_stat_failure(stat, 0),
- lstcon_rpc_stat_total(stat, 0),
- stat->trs_rpc_errno, stat->trs_fwk_errno);
-}
-
-int
-lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
- struct list_head __user *head_up,
- lstcon_rpc_readent_func_t readent)
-{
- struct list_head tmp;
- struct list_head __user *next;
- struct lstcon_rpc_ent *ent;
- struct srpc_generic_reply *rep;
- struct lstcon_rpc *crpc;
- struct srpc_msg *msg;
- struct lstcon_node *nd;
- long dur;
- struct timeval tv;
- int error;
-
- LASSERT(head_up);
-
- next = head_up;
-
- list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
- if (copy_from_user(&tmp, next,
- sizeof(struct list_head)))
- return -EFAULT;
-
- next = tmp.next;
- if (next == head_up)
- return 0;
-
- ent = list_entry(next, struct lstcon_rpc_ent, rpe_link);
-
- LASSERT(crpc->crp_stamp);
-
- error = lstcon_rpc_get_reply(crpc, &msg);
-
- nd = crpc->crp_node;
-
- dur = (long)cfs_time_sub(crpc->crp_stamp,
- (unsigned long)console_session.ses_id.ses_stamp);
- jiffies_to_timeval(dur, &tv);
-
- if (copy_to_user(&ent->rpe_peer, &nd->nd_id,
- sizeof(struct lnet_process_id)) ||
- copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
- copy_to_user(&ent->rpe_state, &nd->nd_state,
- sizeof(nd->nd_state)) ||
- copy_to_user(&ent->rpe_rpc_errno, &error,
- sizeof(error)))
- return -EFAULT;
-
- if (error)
- continue;
-
- /* RPC is done */
- rep = (struct srpc_generic_reply *)&msg->msg_body.reply;
-
- if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(rep->sid)) ||
- copy_to_user(&ent->rpe_fwk_errno, &rep->status,
- sizeof(rep->status)))
- return -EFAULT;
-
- if (!readent)
- continue;
-
- error = readent(trans->tas_opc, msg, ent);
- if (error)
- return error;
- }
-
- return 0;
-}
-
-void
-lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans)
-{
- struct srpc_client_rpc *rpc;
- struct lstcon_rpc *crpc;
- struct lstcon_rpc *tmp;
- int count = 0;
-
- list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) {
- rpc = crpc->crp_rpc;
-
- spin_lock(&rpc->crpc_lock);
-
- /* free it if not posted or finished already */
- if (!crpc->crp_posted || crpc->crp_finished) {
- spin_unlock(&rpc->crpc_lock);
-
- list_del_init(&crpc->crp_link);
- lstcon_rpc_put(crpc);
-
- continue;
- }
-
- /*
- * rpcs can be still not callbacked (even LNetMDUnlink is
- * called) because huge timeout for inaccessible network,
- * don't make user wait for them, just abandon them, they
- * will be recycled in callback
- */
- LASSERT(crpc->crp_status);
-
- crpc->crp_node = NULL;
- crpc->crp_trans = NULL;
- list_del_init(&crpc->crp_link);
- count++;
-
- spin_unlock(&rpc->crpc_lock);
-
- atomic_dec(&trans->tas_remaining);
- }
-
- LASSERT(!atomic_read(&trans->tas_remaining));
-
- list_del(&trans->tas_link);
- if (!list_empty(&trans->tas_olink))
- list_del(&trans->tas_olink);
-
- CDEBUG(D_NET, "Transaction %s destroyed with %d pending RPCs\n",
- lstcon_rpc_trans_name(trans->tas_opc), count);
-
- kfree(trans);
-}
-
-int
-lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
- unsigned int feats, struct lstcon_rpc **crpc)
-{
- struct srpc_mksn_reqst *msrq;
- struct srpc_rmsn_reqst *rsrq;
- int rc;
-
- switch (transop) {
- case LST_TRANS_SESNEW:
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_MAKE_SESSION,
- feats, 0, 0, crpc);
- if (rc)
- return rc;
-
- msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst;
- msrq->mksn_sid = console_session.ses_id;
- msrq->mksn_force = console_session.ses_force;
- strlcpy(msrq->mksn_name, console_session.ses_name,
- sizeof(msrq->mksn_name));
- break;
-
- case LST_TRANS_SESEND:
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_REMOVE_SESSION,
- feats, 0, 0, crpc);
- if (rc)
- return rc;
-
- rsrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.rmsn_reqst;
- rsrq->rmsn_sid = console_session.ses_id;
- break;
-
- default:
- LBUG();
- }
-
- return 0;
-}
-
-int
-lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned int feats,
- struct lstcon_rpc **crpc)
-{
- struct srpc_debug_reqst *drq;
- int rc;
-
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc);
- if (rc)
- return rc;
-
- drq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst;
-
- drq->dbg_sid = console_session.ses_id;
- drq->dbg_flags = 0;
-
- return rc;
-}
-
-int
-lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats,
- struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc)
-{
- struct lstcon_batch *batch;
- struct srpc_batch_reqst *brq;
- int rc;
-
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc);
- if (rc)
- return rc;
-
- brq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.bat_reqst;
-
- brq->bar_sid = console_session.ses_id;
- brq->bar_bid = tsb->tsb_id;
- brq->bar_testidx = tsb->tsb_index;
- brq->bar_opc = transop == LST_TRANS_TSBRUN ? SRPC_BATCH_OPC_RUN :
- (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP :
- SRPC_BATCH_OPC_QUERY);
-
- if (transop != LST_TRANS_TSBRUN &&
- transop != LST_TRANS_TSBSTOP)
- return 0;
-
- LASSERT(!tsb->tsb_index);
-
- batch = (struct lstcon_batch *)tsb;
- brq->bar_arg = batch->bat_arg;
-
- return 0;
-}
-
-int
-lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int feats,
- struct lstcon_rpc **crpc)
-{
- struct srpc_stat_reqst *srq;
- int rc;
-
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc);
- if (rc)
- return rc;
-
- srq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.stat_reqst;
-
- srq->str_sid = console_session.ses_id;
- srq->str_type = 0; /* XXX remove it */
-
- return 0;
-}
-
-static struct lnet_process_id_packed *
-lstcon_next_id(int idx, int nkiov, struct bio_vec *kiov)
-{
- struct lnet_process_id_packed *pid;
- int i;
-
- i = idx / SFW_ID_PER_PAGE;
-
- LASSERT(i < nkiov);
-
- pid = (struct lnet_process_id_packed *)page_address(kiov[i].bv_page);
-
- return &pid[idx % SFW_ID_PER_PAGE];
-}
-
-static int
-lstcon_dstnodes_prep(struct lstcon_group *grp, int idx,
- int dist, int span, int nkiov, struct bio_vec *kiov)
-{
- struct lnet_process_id_packed *pid;
- struct lstcon_ndlink *ndl;
- struct lstcon_node *nd;
- int start;
- int end;
- int i = 0;
-
- LASSERT(dist >= 1);
- LASSERT(span >= 1);
- LASSERT(grp->grp_nnode >= 1);
-
- if (span > grp->grp_nnode)
- return -EINVAL;
-
- start = ((idx / dist) * span) % grp->grp_nnode;
- end = ((idx / dist) * span + span - 1) % grp->grp_nnode;
-
- list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
- nd = ndl->ndl_node;
- if (i < start) {
- i++;
- continue;
- }
-
- if (i > (end >= start ? end : grp->grp_nnode))
- break;
-
- pid = lstcon_next_id((i - start), nkiov, kiov);
- pid->nid = nd->nd_id.nid;
- pid->pid = nd->nd_id.pid;
- i++;
- }
-
- if (start <= end) /* done */
- return 0;
-
- list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
- if (i > grp->grp_nnode + end)
- break;
-
- nd = ndl->ndl_node;
- pid = lstcon_next_id((i - start), nkiov, kiov);
- pid->nid = nd->nd_id.nid;
- pid->pid = nd->nd_id.pid;
- i++;
- }
-
- return 0;
-}
-
-static int
-lstcon_pingrpc_prep(struct lst_test_ping_param *param, struct srpc_test_reqst *req)
-{
- struct test_ping_req *prq = &req->tsr_u.ping;
-
- prq->png_size = param->png_size;
- prq->png_flags = param->png_flags;
- /* TODO dest */
- return 0;
-}
-
-static int
-lstcon_bulkrpc_v0_prep(struct lst_test_bulk_param *param,
- struct srpc_test_reqst *req)
-{
- struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
-
- brq->blk_opc = param->blk_opc;
- brq->blk_npg = DIV_ROUND_UP(param->blk_size, PAGE_SIZE);
- brq->blk_flags = param->blk_flags;
-
- return 0;
-}
-
-static int
-lstcon_bulkrpc_v1_prep(struct lst_test_bulk_param *param, bool is_client,
- struct srpc_test_reqst *req)
-{
- struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1;
-
- brq->blk_opc = param->blk_opc;
- brq->blk_flags = param->blk_flags;
- brq->blk_len = param->blk_size;
- brq->blk_offset = is_client ? param->blk_cli_off : param->blk_srv_off;
-
- return 0;
-}
-
-int
-lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats,
- struct lstcon_test *test, struct lstcon_rpc **crpc)
-{
- struct lstcon_group *sgrp = test->tes_src_grp;
- struct lstcon_group *dgrp = test->tes_dst_grp;
- struct srpc_test_reqst *trq;
- struct srpc_bulk *bulk;
- int i;
- int npg = 0;
- int nob = 0;
- int rc = 0;
-
- if (transop == LST_TRANS_TSBCLIADD) {
- npg = sfw_id_pages(test->tes_span);
- nob = !(feats & LST_FEAT_BULK_LEN) ?
- npg * PAGE_SIZE :
- sizeof(struct lnet_process_id_packed) * test->tes_span;
- }
-
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_TEST, feats, npg, nob, crpc);
- if (rc)
- return rc;
-
- trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst;
-
- if (transop == LST_TRANS_TSBSRVADD) {
- int ndist = DIV_ROUND_UP(sgrp->grp_nnode, test->tes_dist);
- int nspan = DIV_ROUND_UP(dgrp->grp_nnode, test->tes_span);
- int nmax = DIV_ROUND_UP(ndist, nspan);
-
- trq->tsr_ndest = 0;
- trq->tsr_loop = nmax * test->tes_dist * test->tes_concur;
- } else {
- bulk = &(*crpc)->crp_rpc->crpc_bulk;
-
- for (i = 0; i < npg; i++) {
- int len;
-
- LASSERT(nob > 0);
-
- len = !(feats & LST_FEAT_BULK_LEN) ?
- PAGE_SIZE :
- min_t(int, nob, PAGE_SIZE);
- nob -= len;
-
- bulk->bk_iovs[i].bv_offset = 0;
- bulk->bk_iovs[i].bv_len = len;
- bulk->bk_iovs[i].bv_page = alloc_page(GFP_KERNEL);
-
- if (!bulk->bk_iovs[i].bv_page) {
- lstcon_rpc_put(*crpc);
- return -ENOMEM;
- }
- }
-
- bulk->bk_sink = 0;
-
- LASSERT(transop == LST_TRANS_TSBCLIADD);
-
- rc = lstcon_dstnodes_prep(test->tes_dst_grp,
- test->tes_cliidx++,
- test->tes_dist,
- test->tes_span,
- npg, &bulk->bk_iovs[0]);
- if (rc) {
- lstcon_rpc_put(*crpc);
- return rc;
- }
-
- trq->tsr_ndest = test->tes_span;
- trq->tsr_loop = test->tes_loop;
- }
-
- trq->tsr_sid = console_session.ses_id;
- trq->tsr_bid = test->tes_hdr.tsb_id;
- trq->tsr_concur = test->tes_concur;
- trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0;
- trq->tsr_stop_onerr = !!test->tes_stop_onerr;
-
- switch (test->tes_type) {
- case LST_TEST_PING:
- trq->tsr_service = SRPC_SERVICE_PING;
- rc = lstcon_pingrpc_prep((struct lst_test_ping_param *)
- &test->tes_param[0], trq);
- break;
-
- case LST_TEST_BULK:
- trq->tsr_service = SRPC_SERVICE_BRW;
- if (!(feats & LST_FEAT_BULK_LEN)) {
- rc = lstcon_bulkrpc_v0_prep((struct lst_test_bulk_param *)
- &test->tes_param[0], trq);
- } else {
- rc = lstcon_bulkrpc_v1_prep((struct lst_test_bulk_param *)
- &test->tes_param[0],
- trq->tsr_is_client, trq);
- }
-
- break;
- default:
- LBUG();
- break;
- }
-
- return rc;
-}
-
-static int
-lstcon_sesnew_stat_reply(struct lstcon_rpc_trans *trans,
- struct lstcon_node *nd, struct srpc_msg *reply)
-{
- struct srpc_mksn_reply *mksn_rep = &reply->msg_body.mksn_reply;
- int status = mksn_rep->mksn_status;
-
- if (!status &&
- (reply->msg_ses_feats & ~LST_FEATS_MASK)) {
- mksn_rep->mksn_status = EPROTO;
- status = EPROTO;
- }
-
- if (status == EPROTO) {
- CNETERR("session protocol error from %s: %u\n",
- libcfs_nid2str(nd->nd_id.nid),
- reply->msg_ses_feats);
- }
-
- if (status)
- return status;
-
- if (!trans->tas_feats_updated) {
- spin_lock(&console_session.ses_rpc_lock);
- if (!trans->tas_feats_updated) { /* recheck with lock */
- trans->tas_feats_updated = 1;
- trans->tas_features = reply->msg_ses_feats;
- }
- spin_unlock(&console_session.ses_rpc_lock);
- }
-
- if (reply->msg_ses_feats != trans->tas_features) {
- CNETERR("Framework features %x from %s is different with features on this transaction: %x\n",
- reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
- trans->tas_features);
- mksn_rep->mksn_status = EPROTO;
- status = EPROTO;
- }
-
- if (!status) {
- /* session timeout on remote node */
- nd->nd_timeout = mksn_rep->mksn_timeout;
- }
-
- return status;
-}
-
-void
-lstcon_rpc_stat_reply(struct lstcon_rpc_trans *trans, struct srpc_msg *msg,
- struct lstcon_node *nd, struct lstcon_trans_stat *stat)
-{
- struct srpc_rmsn_reply *rmsn_rep;
- struct srpc_debug_reply *dbg_rep;
- struct srpc_batch_reply *bat_rep;
- struct srpc_test_reply *test_rep;
- struct srpc_stat_reply *stat_rep;
- int rc = 0;
-
- switch (trans->tas_opc) {
- case LST_TRANS_SESNEW:
- rc = lstcon_sesnew_stat_reply(trans, nd, msg);
- if (!rc) {
- lstcon_sesop_stat_success(stat, 1);
- return;
- }
-
- lstcon_sesop_stat_failure(stat, 1);
- break;
-
- case LST_TRANS_SESEND:
- rmsn_rep = &msg->msg_body.rmsn_reply;
- /* ESRCH is not an error for end session */
- if (!rmsn_rep->rmsn_status ||
- rmsn_rep->rmsn_status == ESRCH) {
- lstcon_sesop_stat_success(stat, 1);
- return;
- }
-
- lstcon_sesop_stat_failure(stat, 1);
- rc = rmsn_rep->rmsn_status;
- break;
-
- case LST_TRANS_SESQRY:
- case LST_TRANS_SESPING:
- dbg_rep = &msg->msg_body.dbg_reply;
-
- if (dbg_rep->dbg_status == ESRCH) {
- lstcon_sesqry_stat_unknown(stat, 1);
- return;
- }
-
- if (lstcon_session_match(dbg_rep->dbg_sid))
- lstcon_sesqry_stat_active(stat, 1);
- else
- lstcon_sesqry_stat_busy(stat, 1);
- return;
-
- case LST_TRANS_TSBRUN:
- case LST_TRANS_TSBSTOP:
- bat_rep = &msg->msg_body.bat_reply;
-
- if (!bat_rep->bar_status) {
- lstcon_tsbop_stat_success(stat, 1);
- return;
- }
-
- if (bat_rep->bar_status == EPERM &&
- trans->tas_opc == LST_TRANS_TSBSTOP) {
- lstcon_tsbop_stat_success(stat, 1);
- return;
- }
-
- lstcon_tsbop_stat_failure(stat, 1);
- rc = bat_rep->bar_status;
- break;
-
- case LST_TRANS_TSBCLIQRY:
- case LST_TRANS_TSBSRVQRY:
- bat_rep = &msg->msg_body.bat_reply;
-
- if (bat_rep->bar_active)
- lstcon_tsbqry_stat_run(stat, 1);
- else
- lstcon_tsbqry_stat_idle(stat, 1);
-
- if (!bat_rep->bar_status)
- return;
-
- lstcon_tsbqry_stat_failure(stat, 1);
- rc = bat_rep->bar_status;
- break;
-
- case LST_TRANS_TSBCLIADD:
- case LST_TRANS_TSBSRVADD:
- test_rep = &msg->msg_body.tes_reply;
-
- if (!test_rep->tsr_status) {
- lstcon_tsbop_stat_success(stat, 1);
- return;
- }
-
- lstcon_tsbop_stat_failure(stat, 1);
- rc = test_rep->tsr_status;
- break;
-
- case LST_TRANS_STATQRY:
- stat_rep = &msg->msg_body.stat_reply;
-
- if (!stat_rep->str_status) {
- lstcon_statqry_stat_success(stat, 1);
- return;
- }
-
- lstcon_statqry_stat_failure(stat, 1);
- rc = stat_rep->str_status;
- break;
-
- default:
- LBUG();
- }
-
- if (!stat->trs_fwk_errno)
- stat->trs_fwk_errno = rc;
-}
-
-int
-lstcon_rpc_trans_ndlist(struct list_head *ndlist,
- struct list_head *translist, int transop,
- void *arg, lstcon_rpc_cond_func_t condition,
- struct lstcon_rpc_trans **transpp)
-{
- struct lstcon_rpc_trans *trans;
- struct lstcon_ndlink *ndl;
- struct lstcon_node *nd;
- struct lstcon_rpc *rpc;
- unsigned int feats;
- int rc;
-
- /* Creating session RPG for list of nodes */
-
- rc = lstcon_rpc_trans_prep(translist, transop, &trans);
- if (rc) {
- CERROR("Can't create transaction %d: %d\n", transop, rc);
- return rc;
- }
-
- feats = trans->tas_features;
- list_for_each_entry(ndl, ndlist, ndl_link) {
- rc = !condition ? 1 :
- condition(transop, ndl->ndl_node, arg);
-
- if (!rc)
- continue;
-
- if (rc < 0) {
- CDEBUG(D_NET, "Condition error while creating RPC for transaction %d: %d\n",
- transop, rc);
- break;
- }
-
- nd = ndl->ndl_node;
-
- switch (transop) {
- case LST_TRANS_SESNEW:
- case LST_TRANS_SESEND:
- rc = lstcon_sesrpc_prep(nd, transop, feats, &rpc);
- break;
- case LST_TRANS_SESQRY:
- case LST_TRANS_SESPING:
- rc = lstcon_dbgrpc_prep(nd, feats, &rpc);
- break;
- case LST_TRANS_TSBCLIADD:
- case LST_TRANS_TSBSRVADD:
- rc = lstcon_testrpc_prep(nd, transop, feats,
- (struct lstcon_test *)arg,
- &rpc);
- break;
- case LST_TRANS_TSBRUN:
- case LST_TRANS_TSBSTOP:
- case LST_TRANS_TSBCLIQRY:
- case LST_TRANS_TSBSRVQRY:
- rc = lstcon_batrpc_prep(nd, transop, feats,
- (struct lstcon_tsb_hdr *)arg,
- &rpc);
- break;
- case LST_TRANS_STATQRY:
- rc = lstcon_statrpc_prep(nd, feats, &rpc);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- if (rc) {
- CERROR("Failed to create RPC for transaction %s: %d\n",
- lstcon_rpc_trans_name(transop), rc);
- break;
- }
-
- lstcon_rpc_trans_addreq(trans, rpc);
- }
-
- if (!rc) {
- *transpp = trans;
- return 0;
- }
-
- lstcon_rpc_trans_destroy(trans);
-
- return rc;
-}
-
-static void
-lstcon_rpc_pinger(void *arg)
-{
- struct stt_timer *ptimer = (struct stt_timer *)arg;
- struct lstcon_rpc_trans *trans;
- struct lstcon_rpc *crpc;
- struct srpc_msg *rep;
- struct srpc_debug_reqst *drq;
- struct lstcon_ndlink *ndl;
- struct lstcon_node *nd;
- int intv;
- int count = 0;
- int rc;
-
- /*
- * RPC pinger is a special case of transaction,
- * it's called by timer at 8 seconds interval.
- */
- mutex_lock(&console_session.ses_mutex);
-
- if (console_session.ses_shutdown || console_session.ses_expired) {
- mutex_unlock(&console_session.ses_mutex);
- return;
- }
-
- if (!console_session.ses_expired &&
- ktime_get_real_seconds() - console_session.ses_laststamp >
- (time64_t)console_session.ses_timeout)
- console_session.ses_expired = 1;
-
- trans = console_session.ses_ping;
-
- LASSERT(trans);
-
- list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) {
- nd = ndl->ndl_node;
-
- if (console_session.ses_expired) {
- /* idle console, end session on all nodes */
- if (nd->nd_state != LST_NODE_ACTIVE)
- continue;
-
- rc = lstcon_sesrpc_prep(nd, LST_TRANS_SESEND,
- trans->tas_features, &crpc);
- if (rc) {
- CERROR("Out of memory\n");
- break;
- }
-
- lstcon_rpc_trans_addreq(trans, crpc);
- lstcon_rpc_post(crpc);
-
- continue;
- }
-
- crpc = &nd->nd_ping;
-
- if (crpc->crp_rpc) {
- LASSERT(crpc->crp_trans == trans);
- LASSERT(!list_empty(&crpc->crp_link));
-
- spin_lock(&crpc->crp_rpc->crpc_lock);
-
- LASSERT(crpc->crp_posted);
-
- if (!crpc->crp_finished) {
- /* in flight */
- spin_unlock(&crpc->crp_rpc->crpc_lock);
- continue;
- }
-
- spin_unlock(&crpc->crp_rpc->crpc_lock);
-
- lstcon_rpc_get_reply(crpc, &rep);
-
- list_del_init(&crpc->crp_link);
-
- lstcon_rpc_put(crpc);
- }
-
- if (nd->nd_state != LST_NODE_ACTIVE)
- continue;
-
- intv = (jiffies - nd->nd_stamp) / msecs_to_jiffies(MSEC_PER_SEC);
- if (intv < nd->nd_timeout / 2)
- continue;
-
- rc = lstcon_rpc_init(nd, SRPC_SERVICE_DEBUG,
- trans->tas_features, 0, 0, 1, crpc);
- if (rc) {
- CERROR("Out of memory\n");
- break;
- }
-
- drq = &crpc->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst;
-
- drq->dbg_sid = console_session.ses_id;
- drq->dbg_flags = 0;
-
- lstcon_rpc_trans_addreq(trans, crpc);
- lstcon_rpc_post(crpc);
-
- count++;
- }
-
- if (console_session.ses_expired) {
- mutex_unlock(&console_session.ses_mutex);
- return;
- }
-
- CDEBUG(D_NET, "Ping %d nodes in session\n", count);
-
- ptimer->stt_expires = ktime_get_real_seconds() + LST_PING_INTERVAL;
- stt_add_timer(ptimer);
-
- mutex_unlock(&console_session.ses_mutex);
-}
-
-int
-lstcon_rpc_pinger_start(void)
-{
- struct stt_timer *ptimer;
- int rc;
-
- LASSERT(list_empty(&console_session.ses_rpc_freelist));
- LASSERT(!atomic_read(&console_session.ses_rpc_counter));
-
- rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
- &console_session.ses_ping);
- if (rc) {
- CERROR("Failed to create console pinger\n");
- return rc;
- }
-
- ptimer = &console_session.ses_ping_timer;
- ptimer->stt_expires = ktime_get_real_seconds() + LST_PING_INTERVAL;
-
- stt_add_timer(ptimer);
-
- return 0;
-}
-
-void
-lstcon_rpc_pinger_stop(void)
-{
- LASSERT(console_session.ses_shutdown);
-
- stt_del_timer(&console_session.ses_ping_timer);
-
- lstcon_rpc_trans_abort(console_session.ses_ping, -ESHUTDOWN);
- lstcon_rpc_trans_stat(console_session.ses_ping, lstcon_trans_stat());
- lstcon_rpc_trans_destroy(console_session.ses_ping);
-
- memset(lstcon_trans_stat(), 0, sizeof(struct lstcon_trans_stat));
-
- console_session.ses_ping = NULL;
-}
-
-void
-lstcon_rpc_cleanup_wait(void)
-{
- struct lstcon_rpc_trans *trans;
- struct lstcon_rpc *crpc;
- struct lstcon_rpc *temp;
- struct list_head *pacer;
- struct list_head zlist;
-
- /* Called with hold of global mutex */
-
- LASSERT(console_session.ses_shutdown);
-
- while (!list_empty(&console_session.ses_trans_list)) {
- list_for_each(pacer, &console_session.ses_trans_list) {
- trans = list_entry(pacer, struct lstcon_rpc_trans,
- tas_link);
-
- CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
- lstcon_rpc_trans_name(trans->tas_opc));
-
- wake_up(&trans->tas_waitq);
- }
-
- mutex_unlock(&console_session.ses_mutex);
-
- CWARN("Session is shutting down, waiting for termination of transactions\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
-
- mutex_lock(&console_session.ses_mutex);
- }
-
- spin_lock(&console_session.ses_rpc_lock);
-
- lst_wait_until(!atomic_read(&console_session.ses_rpc_counter),
- console_session.ses_rpc_lock,
- "Network is not accessible or target is down, waiting for %d console RPCs to being recycled\n",
- atomic_read(&console_session.ses_rpc_counter));
-
- list_add(&zlist, &console_session.ses_rpc_freelist);
- list_del_init(&console_session.ses_rpc_freelist);
-
- spin_unlock(&console_session.ses_rpc_lock);
-
- list_for_each_entry_safe(crpc, temp, &zlist, crp_link) {
- list_del(&crpc->crp_link);
- kfree(crpc);
- }
-}
-
-int
-lstcon_rpc_module_init(void)
-{
- INIT_LIST_HEAD(&console_session.ses_ping_timer.stt_list);
- console_session.ses_ping_timer.stt_func = lstcon_rpc_pinger;
- console_session.ses_ping_timer.stt_data = &console_session.ses_ping_timer;
-
- console_session.ses_ping = NULL;
-
- spin_lock_init(&console_session.ses_rpc_lock);
- atomic_set(&console_session.ses_rpc_counter, 0);
- INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
-
- return 0;
-}
-
-void
-lstcon_rpc_module_fini(void)
-{
- LASSERT(list_empty(&console_session.ses_rpc_freelist));
- LASSERT(!atomic_read(&console_session.ses_rpc_counter));
-}
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
deleted file mode 100644
index 374a5f31ef6f..000000000000
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ /dev/null
@@ -1,143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * /lnet/selftest/conrpc.h
- *
- * Console rpc
- *
- * Author: Liang Zhen <liang@whamcloud.com>
- */
-
-#ifndef __LST_CONRPC_H__
-#define __LST_CONRPC_H__
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/lnet/lib-types.h>
-#include <uapi/linux/lnet/lnetst.h>
-#include "rpc.h"
-#include "selftest.h"
-
-/* Console rpc and rpc transaction */
-#define LST_TRANS_TIMEOUT 30
-#define LST_TRANS_MIN_TIMEOUT 3
-
-#define LST_VALIDATE_TIMEOUT(t) min(max(t, LST_TRANS_MIN_TIMEOUT), LST_TRANS_TIMEOUT)
-
-#define LST_PING_INTERVAL 8
-
-struct lstcon_rpc_trans;
-struct lstcon_tsb_hdr;
-struct lstcon_test;
-struct lstcon_node;
-
-struct lstcon_rpc {
- struct list_head crp_link; /* chain on rpc transaction */
- struct srpc_client_rpc *crp_rpc; /* client rpc */
- struct lstcon_node *crp_node; /* destination node */
- struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
-
- unsigned int crp_posted:1; /* rpc is posted */
- unsigned int crp_finished:1; /* rpc is finished */
- unsigned int crp_unpacked:1; /* reply is unpacked */
- /** RPC is embedded in other structure and can't free it */
- unsigned int crp_embedded:1;
- int crp_status; /* console rpc errors */
- unsigned long crp_stamp; /* replied time stamp */
-};
-
-struct lstcon_rpc_trans {
- struct list_head tas_olink; /* link chain on owner list */
- struct list_head tas_link; /* link chain on global list */
- int tas_opc; /* operation code of transaction */
- unsigned int tas_feats_updated; /* features mask is uptodate */
- unsigned int tas_features; /* test features mask */
- wait_queue_head_t tas_waitq; /* wait queue head */
- atomic_t tas_remaining; /* # of un-scheduled rpcs */
- struct list_head tas_rpcs_list; /* queued requests */
-};
-
-#define LST_TRANS_PRIVATE 0x1000
-
-#define LST_TRANS_SESNEW (LST_TRANS_PRIVATE | 0x01)
-#define LST_TRANS_SESEND (LST_TRANS_PRIVATE | 0x02)
-#define LST_TRANS_SESQRY 0x03
-#define LST_TRANS_SESPING 0x04
-
-#define LST_TRANS_TSBCLIADD (LST_TRANS_PRIVATE | 0x11)
-#define LST_TRANS_TSBSRVADD (LST_TRANS_PRIVATE | 0x12)
-#define LST_TRANS_TSBRUN (LST_TRANS_PRIVATE | 0x13)
-#define LST_TRANS_TSBSTOP (LST_TRANS_PRIVATE | 0x14)
-#define LST_TRANS_TSBCLIQRY 0x15
-#define LST_TRANS_TSBSRVQRY 0x16
-
-#define LST_TRANS_STATQRY 0x21
-
-typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
-typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *,
- struct lstcon_rpc_ent __user *);
-
-int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
- unsigned int version, struct lstcon_rpc **crpc);
-int lstcon_dbgrpc_prep(struct lstcon_node *nd,
- unsigned int version, struct lstcon_rpc **crpc);
-int lstcon_batrpc_prep(struct lstcon_node *nd, int transop,
- unsigned int version, struct lstcon_tsb_hdr *tsb,
- struct lstcon_rpc **crpc);
-int lstcon_testrpc_prep(struct lstcon_node *nd, int transop,
- unsigned int version, struct lstcon_test *test,
- struct lstcon_rpc **crpc);
-int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int version,
- struct lstcon_rpc **crpc);
-void lstcon_rpc_put(struct lstcon_rpc *crpc);
-int lstcon_rpc_trans_prep(struct list_head *translist,
- int transop, struct lstcon_rpc_trans **transpp);
-int lstcon_rpc_trans_ndlist(struct list_head *ndlist,
- struct list_head *translist, int transop,
- void *arg, lstcon_rpc_cond_func_t condition,
- struct lstcon_rpc_trans **transpp);
-void lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans,
- struct lstcon_trans_stat *stat);
-int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
- struct list_head __user *head_up,
- lstcon_rpc_readent_func_t readent);
-void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
-void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
-void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans,
- struct lstcon_rpc *req);
-int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
-int lstcon_rpc_pinger_start(void);
-void lstcon_rpc_pinger_stop(void);
-void lstcon_rpc_cleanup_wait(void);
-int lstcon_rpc_module_init(void);
-void lstcon_rpc_module_fini(void);
-
-#endif
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
deleted file mode 100644
index 1acd5cb324b1..000000000000
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ /dev/null
@@ -1,2101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/conctl.c
- *
- * Infrastructure of LST console
- *
- * Author: Liang Zhen <liangzhen@clusterfs.com>
- */
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/lnet/lib-lnet.h>
-#include "console.h"
-#include "conrpc.h"
-
-#define LST_NODE_STATE_COUNTER(nd, p) \
-do { \
- if ((nd)->nd_state == LST_NODE_ACTIVE) \
- (p)->nle_nactive++; \
- else if ((nd)->nd_state == LST_NODE_BUSY) \
- (p)->nle_nbusy++; \
- else if ((nd)->nd_state == LST_NODE_DOWN) \
- (p)->nle_ndown++; \
- else \
- (p)->nle_nunknown++; \
- (p)->nle_nnode++; \
-} while (0)
-
-struct lstcon_session console_session;
-
-static void
-lstcon_node_get(struct lstcon_node *nd)
-{
- LASSERT(nd->nd_ref >= 1);
-
- nd->nd_ref++;
-}
-
-static int
-lstcon_node_find(struct lnet_process_id id, struct lstcon_node **ndpp,
- int create)
-{
- struct lstcon_ndlink *ndl;
- unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
-
- LASSERT(id.nid != LNET_NID_ANY);
-
- list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx],
- ndl_hlink) {
- if (ndl->ndl_node->nd_id.nid != id.nid ||
- ndl->ndl_node->nd_id.pid != id.pid)
- continue;
-
- lstcon_node_get(ndl->ndl_node);
- *ndpp = ndl->ndl_node;
- return 0;
- }
-
- if (!create)
- return -ENOENT;
-
- *ndpp = kzalloc(sizeof(**ndpp) + sizeof(*ndl), GFP_KERNEL);
- if (!*ndpp)
- return -ENOMEM;
-
- ndl = (struct lstcon_ndlink *)(*ndpp + 1);
-
- ndl->ndl_node = *ndpp;
-
- ndl->ndl_node->nd_ref = 1;
- ndl->ndl_node->nd_id = id;
- ndl->ndl_node->nd_stamp = cfs_time_current();
- ndl->ndl_node->nd_state = LST_NODE_UNKNOWN;
- ndl->ndl_node->nd_timeout = 0;
- memset(&ndl->ndl_node->nd_ping, 0, sizeof(struct lstcon_rpc));
-
- /*
- * queued in global hash & list, no refcount is taken by
- * global hash & list, if caller release his refcount,
- * node will be released
- */
- list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]);
- list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list);
-
- return 0;
-}
-
-static void
-lstcon_node_put(struct lstcon_node *nd)
-{
- struct lstcon_ndlink *ndl;
-
- LASSERT(nd->nd_ref > 0);
-
- if (--nd->nd_ref > 0)
- return;
-
- ndl = (struct lstcon_ndlink *)(nd + 1);
-
- LASSERT(!list_empty(&ndl->ndl_link));
- LASSERT(!list_empty(&ndl->ndl_hlink));
-
- /* remove from session */
- list_del(&ndl->ndl_link);
- list_del(&ndl->ndl_hlink);
-
- kfree(nd);
-}
-
-static int
-lstcon_ndlink_find(struct list_head *hash, struct lnet_process_id id,
- struct lstcon_ndlink **ndlpp, int create)
-{
- unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
- struct lstcon_ndlink *ndl;
- struct lstcon_node *nd;
- int rc;
-
- if (id.nid == LNET_NID_ANY)
- return -EINVAL;
-
- /* search in hash */
- list_for_each_entry(ndl, &hash[idx], ndl_hlink) {
- if (ndl->ndl_node->nd_id.nid != id.nid ||
- ndl->ndl_node->nd_id.pid != id.pid)
- continue;
-
- *ndlpp = ndl;
- return 0;
- }
-
- if (!create)
- return -ENOENT;
-
- /* find or create in session hash */
- rc = lstcon_node_find(id, &nd, (create == 1) ? 1 : 0);
- if (rc)
- return rc;
-
- ndl = kzalloc(sizeof(struct lstcon_ndlink), GFP_NOFS);
- if (!ndl) {
- lstcon_node_put(nd);
- return -ENOMEM;
- }
-
- *ndlpp = ndl;
-
- ndl->ndl_node = nd;
- INIT_LIST_HEAD(&ndl->ndl_link);
- list_add_tail(&ndl->ndl_hlink, &hash[idx]);
-
- return 0;
-}
-
-static void
-lstcon_ndlink_release(struct lstcon_ndlink *ndl)
-{
- LASSERT(list_empty(&ndl->ndl_link));
- LASSERT(!list_empty(&ndl->ndl_hlink));
-
- list_del(&ndl->ndl_hlink); /* delete from hash */
- lstcon_node_put(ndl->ndl_node);
-
- kfree(ndl);
-}
-
-static int
-lstcon_group_alloc(char *name, struct lstcon_group **grpp)
-{
- struct lstcon_group *grp;
- int i;
-
- grp = kmalloc(offsetof(struct lstcon_group,
- grp_ndl_hash[LST_NODE_HASHSIZE]),
- GFP_KERNEL);
- if (!grp)
- return -ENOMEM;
-
- grp->grp_ref = 1;
- if (name) {
- if (strlen(name) > sizeof(grp->grp_name) - 1) {
- kfree(grp);
- return -E2BIG;
- }
- strncpy(grp->grp_name, name, sizeof(grp->grp_name));
- }
-
- INIT_LIST_HEAD(&grp->grp_link);
- INIT_LIST_HEAD(&grp->grp_ndl_list);
- INIT_LIST_HEAD(&grp->grp_trans_list);
-
- for (i = 0; i < LST_NODE_HASHSIZE; i++)
- INIT_LIST_HEAD(&grp->grp_ndl_hash[i]);
-
- *grpp = grp;
-
- return 0;
-}
-
-static void
-lstcon_group_addref(struct lstcon_group *grp)
-{
- grp->grp_ref++;
-}
-
-static void lstcon_group_ndlink_release(struct lstcon_group *,
- struct lstcon_ndlink *);
-
-static void
-lstcon_group_drain(struct lstcon_group *grp, int keep)
-{
- struct lstcon_ndlink *ndl;
- struct lstcon_ndlink *tmp;
-
- list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
- if (!(ndl->ndl_node->nd_state & keep))
- lstcon_group_ndlink_release(grp, ndl);
- }
-}
-
-static void
-lstcon_group_decref(struct lstcon_group *grp)
-{
- int i;
-
- if (--grp->grp_ref > 0)
- return;
-
- if (!list_empty(&grp->grp_link))
- list_del(&grp->grp_link);
-
- lstcon_group_drain(grp, 0);
-
- for (i = 0; i < LST_NODE_HASHSIZE; i++)
- LASSERT(list_empty(&grp->grp_ndl_hash[i]));
-
- kfree(grp);
-}
-
-static int
-lstcon_group_find(const char *name, struct lstcon_group **grpp)
-{
- struct lstcon_group *grp;
-
- list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
- if (strncmp(grp->grp_name, name, LST_NAME_SIZE))
- continue;
-
- lstcon_group_addref(grp); /* +1 ref for caller */
- *grpp = grp;
- return 0;
- }
-
- return -ENOENT;
-}
-
-static int
-lstcon_group_ndlink_find(struct lstcon_group *grp, struct lnet_process_id id,
- struct lstcon_ndlink **ndlpp, int create)
-{
- int rc;
-
- rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create);
- if (rc)
- return rc;
-
- if (!list_empty(&(*ndlpp)->ndl_link))
- return 0;
-
- list_add_tail(&(*ndlpp)->ndl_link, &grp->grp_ndl_list);
- grp->grp_nnode++;
-
- return 0;
-}
-
-static void
-lstcon_group_ndlink_release(struct lstcon_group *grp, struct lstcon_ndlink *ndl)
-{
- list_del_init(&ndl->ndl_link);
- lstcon_ndlink_release(ndl);
- grp->grp_nnode--;
-}
-
-static void
-lstcon_group_ndlink_move(struct lstcon_group *old,
- struct lstcon_group *new, struct lstcon_ndlink *ndl)
-{
- unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
- LST_NODE_HASHSIZE;
-
- list_del(&ndl->ndl_hlink);
- list_del(&ndl->ndl_link);
- old->grp_nnode--;
-
- list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
- list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
- new->grp_nnode++;
-}
-
-static void
-lstcon_group_move(struct lstcon_group *old, struct lstcon_group *new)
-{
- struct lstcon_ndlink *ndl;
-
- while (!list_empty(&old->grp_ndl_list)) {
- ndl = list_entry(old->grp_ndl_list.next,
- struct lstcon_ndlink, ndl_link);
- lstcon_group_ndlink_move(old, new, ndl);
- }
-}
-
-static int
-lstcon_sesrpc_condition(int transop, struct lstcon_node *nd, void *arg)
-{
- struct lstcon_group *grp = (struct lstcon_group *)arg;
-
- switch (transop) {
- case LST_TRANS_SESNEW:
- if (nd->nd_state == LST_NODE_ACTIVE)
- return 0;
- break;
-
- case LST_TRANS_SESEND:
- if (nd->nd_state != LST_NODE_ACTIVE)
- return 0;
-
- if (grp && nd->nd_ref > 1)
- return 0;
- break;
-
- case LST_TRANS_SESQRY:
- break;
-
- default:
- LBUG();
- }
-
- return 1;
-}
-
-static int
-lstcon_sesrpc_readent(int transop, struct srpc_msg *msg,
- struct lstcon_rpc_ent __user *ent_up)
-{
- struct srpc_debug_reply *rep;
-
- switch (transop) {
- case LST_TRANS_SESNEW:
- case LST_TRANS_SESEND:
- return 0;
-
- case LST_TRANS_SESQRY:
- rep = &msg->msg_body.dbg_reply;
-
- if (copy_to_user(&ent_up->rpe_priv[0],
- &rep->dbg_timeout, sizeof(int)) ||
- copy_to_user(&ent_up->rpe_payload[0],
- &rep->dbg_name, LST_NAME_SIZE))
- return -EFAULT;
-
- return 0;
-
- default:
- LBUG();
- }
-
- return 0;
-}
-
-static int
-lstcon_group_nodes_add(struct lstcon_group *grp,
- int count, struct lnet_process_id __user *ids_up,
- unsigned int *featp,
- struct list_head __user *result_up)
-{
- struct lstcon_rpc_trans *trans;
- struct lstcon_ndlink *ndl;
- struct lstcon_group *tmp;
- struct lnet_process_id id;
- int i;
- int rc;
-
- rc = lstcon_group_alloc(NULL, &tmp);
- if (rc) {
- CERROR("Out of memory\n");
- return -ENOMEM;
- }
-
- for (i = 0 ; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
- rc = -EFAULT;
- break;
- }
-
- /* skip if it's in this group already */
- rc = lstcon_group_ndlink_find(grp, id, &ndl, 0);
- if (!rc)
- continue;
-
- /* add to tmp group */
- rc = lstcon_group_ndlink_find(tmp, id, &ndl, 1);
- if (rc) {
- CERROR("Can't create ndlink, out of memory\n");
- break;
- }
- }
-
- if (rc) {
- lstcon_group_decref(tmp);
- return rc;
- }
-
- rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
- &tmp->grp_trans_list, LST_TRANS_SESNEW,
- tmp, lstcon_sesrpc_condition, &trans);
- if (rc) {
- CERROR("Can't create transaction: %d\n", rc);
- lstcon_group_decref(tmp);
- return rc;
- }
-
- /* post all RPCs */
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up,
- lstcon_sesrpc_readent);
- *featp = trans->tas_features;
-
- /* destroy all RPGs */
- lstcon_rpc_trans_destroy(trans);
-
- lstcon_group_move(tmp, grp);
- lstcon_group_decref(tmp);
-
- return rc;
-}
-
-static int
-lstcon_group_nodes_remove(struct lstcon_group *grp,
- int count, struct lnet_process_id __user *ids_up,
- struct list_head __user *result_up)
-{
- struct lstcon_rpc_trans *trans;
- struct lstcon_ndlink *ndl;
- struct lstcon_group *tmp;
- struct lnet_process_id id;
- int rc;
- int i;
-
- /* End session and remove node from the group */
-
- rc = lstcon_group_alloc(NULL, &tmp);
- if (rc) {
- CERROR("Out of memory\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
- rc = -EFAULT;
- goto error;
- }
-
- /* move node to tmp group */
- if (!lstcon_group_ndlink_find(grp, id, &ndl, 0))
- lstcon_group_ndlink_move(grp, tmp, ndl);
- }
-
- rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
- &tmp->grp_trans_list, LST_TRANS_SESEND,
- tmp, lstcon_sesrpc_condition, &trans);
- if (rc) {
- CERROR("Can't create transaction: %d\n", rc);
- goto error;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL);
-
- lstcon_rpc_trans_destroy(trans);
- /* release nodes anyway, because we can't rollback status */
- lstcon_group_decref(tmp);
-
- return rc;
-error:
- lstcon_group_move(tmp, grp);
- lstcon_group_decref(tmp);
-
- return rc;
-}
-
-int
-lstcon_group_add(char *name)
-{
- struct lstcon_group *grp;
- int rc;
-
- rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST;
- if (rc) {
- /* find a group with same name */
- lstcon_group_decref(grp);
- return rc;
- }
-
- rc = lstcon_group_alloc(name, &grp);
- if (rc) {
- CERROR("Can't allocate descriptor for group %s\n", name);
- return -ENOMEM;
- }
-
- list_add_tail(&grp->grp_link, &console_session.ses_grp_list);
-
- return rc;
-}
-
-int
-lstcon_nodes_add(char *name, int count, struct lnet_process_id __user *ids_up,
- unsigned int *featp, struct list_head __user *result_up)
-{
- struct lstcon_group *grp;
- int rc;
-
- LASSERT(count > 0);
- LASSERT(ids_up);
-
- rc = lstcon_group_find(name, &grp);
- if (rc) {
- CDEBUG(D_NET, "Can't find group %s\n", name);
- return rc;
- }
-
- if (grp->grp_ref > 2) {
- /* referred by other threads or test */
- CDEBUG(D_NET, "Group %s is busy\n", name);
- lstcon_group_decref(grp);
-
- return -EBUSY;
- }
-
- rc = lstcon_group_nodes_add(grp, count, ids_up, featp, result_up);
-
- lstcon_group_decref(grp);
-
- return rc;
-}
-
-int
-lstcon_group_del(char *name)
-{
- struct lstcon_rpc_trans *trans;
- struct lstcon_group *grp;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc) {
- CDEBUG(D_NET, "Can't find group: %s\n", name);
- return rc;
- }
-
- if (grp->grp_ref > 2) {
- /* referred by others threads or test */
- CDEBUG(D_NET, "Group %s is busy\n", name);
- lstcon_group_decref(grp);
- return -EBUSY;
- }
-
- rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
- &grp->grp_trans_list, LST_TRANS_SESEND,
- grp, lstcon_sesrpc_condition, &trans);
- if (rc) {
- CERROR("Can't create transaction: %d\n", rc);
- lstcon_group_decref(grp);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- lstcon_rpc_trans_destroy(trans);
-
- lstcon_group_decref(grp);
- /*
- * -ref for session, it's destroyed,
- * status can't be rolled back, destroy group anyway
- */
- lstcon_group_decref(grp);
-
- return rc;
-}
-
-int
-lstcon_group_clean(char *name, int args)
-{
- struct lstcon_group *grp = NULL;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc) {
- CDEBUG(D_NET, "Can't find group %s\n", name);
- return rc;
- }
-
- if (grp->grp_ref > 2) {
- /* referred by test */
- CDEBUG(D_NET, "Group %s is busy\n", name);
- lstcon_group_decref(grp);
- return -EBUSY;
- }
-
- args = (LST_NODE_ACTIVE | LST_NODE_BUSY |
- LST_NODE_DOWN | LST_NODE_UNKNOWN) & ~args;
-
- lstcon_group_drain(grp, args);
-
- lstcon_group_decref(grp);
- /* release empty group */
- if (list_empty(&grp->grp_ndl_list))
- lstcon_group_decref(grp);
-
- return 0;
-}
-
-int
-lstcon_nodes_remove(char *name, int count,
- struct lnet_process_id __user *ids_up,
- struct list_head __user *result_up)
-{
- struct lstcon_group *grp = NULL;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc) {
- CDEBUG(D_NET, "Can't find group: %s\n", name);
- return rc;
- }
-
- if (grp->grp_ref > 2) {
- /* referred by test */
- CDEBUG(D_NET, "Group %s is busy\n", name);
- lstcon_group_decref(grp);
- return -EBUSY;
- }
-
- rc = lstcon_group_nodes_remove(grp, count, ids_up, result_up);
-
- lstcon_group_decref(grp);
- /* release empty group */
- if (list_empty(&grp->grp_ndl_list))
- lstcon_group_decref(grp);
-
- return rc;
-}
-
-int
-lstcon_group_refresh(char *name, struct list_head __user *result_up)
-{
- struct lstcon_rpc_trans *trans;
- struct lstcon_group *grp;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc) {
- CDEBUG(D_NET, "Can't find group: %s\n", name);
- return rc;
- }
-
- if (grp->grp_ref > 2) {
- /* referred by test */
- CDEBUG(D_NET, "Group %s is busy\n", name);
- lstcon_group_decref(grp);
- return -EBUSY;
- }
-
- /* re-invite all inactive nodes int the group */
- rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
- &grp->grp_trans_list, LST_TRANS_SESNEW,
- grp, lstcon_sesrpc_condition, &trans);
- if (rc) {
- /* local error, return */
- CDEBUG(D_NET, "Can't create transaction: %d\n", rc);
- lstcon_group_decref(grp);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL);
-
- lstcon_rpc_trans_destroy(trans);
- /* -ref for me */
- lstcon_group_decref(grp);
-
- return rc;
-}
-
-int
-lstcon_group_list(int index, int len, char __user *name_up)
-{
- struct lstcon_group *grp;
-
- LASSERT(index >= 0);
- LASSERT(name_up);
-
- list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
- if (!index--) {
- return copy_to_user(name_up, grp->grp_name, len) ?
- -EFAULT : 0;
- }
- }
-
- return -ENOENT;
-}
-
-static int
-lstcon_nodes_getent(struct list_head *head, int *index_p,
- int *count_p, struct lstcon_node_ent __user *dents_up)
-{
- struct lstcon_ndlink *ndl;
- struct lstcon_node *nd;
- int count = 0;
- int index = 0;
-
- LASSERT(index_p && count_p);
- LASSERT(dents_up);
- LASSERT(*index_p >= 0);
- LASSERT(*count_p > 0);
-
- list_for_each_entry(ndl, head, ndl_link) {
- if (index++ < *index_p)
- continue;
-
- if (count >= *count_p)
- break;
-
- nd = ndl->ndl_node;
- if (copy_to_user(&dents_up[count].nde_id,
- &nd->nd_id, sizeof(nd->nd_id)) ||
- copy_to_user(&dents_up[count].nde_state,
- &nd->nd_state, sizeof(nd->nd_state)))
- return -EFAULT;
-
- count++;
- }
-
- if (index <= *index_p)
- return -ENOENT;
-
- *count_p = count;
- *index_p = index;
-
- return 0;
-}
-
-int
-lstcon_group_info(char *name, struct lstcon_ndlist_ent __user *gents_p,
- int *index_p, int *count_p,
- struct lstcon_node_ent __user *dents_up)
-{
- struct lstcon_ndlist_ent *gentp;
- struct lstcon_group *grp;
- struct lstcon_ndlink *ndl;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc) {
- CDEBUG(D_NET, "Can't find group %s\n", name);
- return rc;
- }
-
- if (dents_up) {
- /* verbose query */
- rc = lstcon_nodes_getent(&grp->grp_ndl_list,
- index_p, count_p, dents_up);
- lstcon_group_decref(grp);
-
- return rc;
- }
-
- /* non-verbose query */
- gentp = kzalloc(sizeof(struct lstcon_ndlist_ent), GFP_NOFS);
- if (!gentp) {
- CERROR("Can't allocate ndlist_ent\n");
- lstcon_group_decref(grp);
-
- return -ENOMEM;
- }
-
- list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link)
- LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
-
- rc = copy_to_user(gents_p, gentp,
- sizeof(struct lstcon_ndlist_ent)) ? -EFAULT : 0;
-
- kfree(gentp);
-
- lstcon_group_decref(grp);
-
- return rc;
-}
-
-static int
-lstcon_batch_find(const char *name, struct lstcon_batch **batpp)
-{
- struct lstcon_batch *bat;
-
- list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
- if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) {
- *batpp = bat;
- return 0;
- }
- }
-
- return -ENOENT;
-}
-
-int
-lstcon_batch_add(char *name)
-{
- struct lstcon_batch *bat;
- int i;
- int rc;
-
- rc = !lstcon_batch_find(name, &bat) ? -EEXIST : 0;
- if (rc) {
- CDEBUG(D_NET, "Batch %s already exists\n", name);
- return rc;
- }
-
- bat = kzalloc(sizeof(struct lstcon_batch), GFP_NOFS);
- if (!bat) {
- CERROR("Can't allocate descriptor for batch %s\n", name);
- return -ENOMEM;
- }
-
- bat->bat_cli_hash = kmalloc(sizeof(struct list_head) * LST_NODE_HASHSIZE,
- GFP_KERNEL);
- if (!bat->bat_cli_hash) {
- CERROR("Can't allocate hash for batch %s\n", name);
- kfree(bat);
-
- return -ENOMEM;
- }
-
- bat->bat_srv_hash = kmalloc(sizeof(struct list_head) * LST_NODE_HASHSIZE,
- GFP_KERNEL);
- if (!bat->bat_srv_hash) {
- CERROR("Can't allocate hash for batch %s\n", name);
- kfree(bat->bat_cli_hash);
- kfree(bat);
-
- return -ENOMEM;
- }
-
- if (strlen(name) > sizeof(bat->bat_name) - 1) {
- kfree(bat->bat_srv_hash);
- kfree(bat->bat_cli_hash);
- kfree(bat);
- return -E2BIG;
- }
- strncpy(bat->bat_name, name, sizeof(bat->bat_name));
- bat->bat_hdr.tsb_index = 0;
- bat->bat_hdr.tsb_id.bat_id = ++console_session.ses_id_cookie;
-
- bat->bat_ntest = 0;
- bat->bat_state = LST_BATCH_IDLE;
-
- INIT_LIST_HEAD(&bat->bat_cli_list);
- INIT_LIST_HEAD(&bat->bat_srv_list);
- INIT_LIST_HEAD(&bat->bat_test_list);
- INIT_LIST_HEAD(&bat->bat_trans_list);
-
- for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- INIT_LIST_HEAD(&bat->bat_cli_hash[i]);
- INIT_LIST_HEAD(&bat->bat_srv_hash[i]);
- }
-
- list_add_tail(&bat->bat_link, &console_session.ses_bat_list);
-
- return rc;
-}
-
-int
-lstcon_batch_list(int index, int len, char __user *name_up)
-{
- struct lstcon_batch *bat;
-
- LASSERT(name_up);
- LASSERT(index >= 0);
-
- list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
- if (!index--) {
- return copy_to_user(name_up, bat->bat_name, len) ?
- -EFAULT : 0;
- }
- }
-
- return -ENOENT;
-}
-
-int
-lstcon_batch_info(char *name, struct lstcon_test_batch_ent __user *ent_up,
- int server, int testidx, int *index_p, int *ndent_p,
- struct lstcon_node_ent __user *dents_up)
-{
- struct lstcon_test_batch_ent *entp;
- struct list_head *clilst;
- struct list_head *srvlst;
- struct lstcon_test *test = NULL;
- struct lstcon_batch *bat;
- struct lstcon_ndlink *ndl;
- int rc;
-
- rc = lstcon_batch_find(name, &bat);
- if (rc) {
- CDEBUG(D_NET, "Can't find batch %s\n", name);
- return -ENOENT;
- }
-
- if (testidx > 0) {
- /* query test, test index start from 1 */
- list_for_each_entry(test, &bat->bat_test_list, tes_link) {
- if (testidx-- == 1)
- break;
- }
-
- if (testidx > 0) {
- CDEBUG(D_NET, "Can't find specified test in batch\n");
- return -ENOENT;
- }
- }
-
- clilst = !test ? &bat->bat_cli_list :
- &test->tes_src_grp->grp_ndl_list;
- srvlst = !test ? &bat->bat_srv_list :
- &test->tes_dst_grp->grp_ndl_list;
-
- if (dents_up) {
- rc = lstcon_nodes_getent((server ? srvlst : clilst),
- index_p, ndent_p, dents_up);
- return rc;
- }
-
- /* non-verbose query */
- entp = kzalloc(sizeof(struct lstcon_test_batch_ent), GFP_NOFS);
- if (!entp)
- return -ENOMEM;
-
- if (!test) {
- entp->u.tbe_batch.bae_ntest = bat->bat_ntest;
- entp->u.tbe_batch.bae_state = bat->bat_state;
- } else {
- entp->u.tbe_test.tse_type = test->tes_type;
- entp->u.tbe_test.tse_loop = test->tes_loop;
- entp->u.tbe_test.tse_concur = test->tes_concur;
- }
-
- list_for_each_entry(ndl, clilst, ndl_link)
- LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_cli_nle);
-
- list_for_each_entry(ndl, srvlst, ndl_link)
- LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle);
-
- rc = copy_to_user(ent_up, entp,
- sizeof(struct lstcon_test_batch_ent)) ? -EFAULT : 0;
-
- kfree(entp);
-
- return rc;
-}
-
-static int
-lstcon_batrpc_condition(int transop, struct lstcon_node *nd, void *arg)
-{
- switch (transop) {
- case LST_TRANS_TSBRUN:
- if (nd->nd_state != LST_NODE_ACTIVE)
- return -ENETDOWN;
- break;
-
- case LST_TRANS_TSBSTOP:
- if (nd->nd_state != LST_NODE_ACTIVE)
- return 0;
- break;
-
- case LST_TRANS_TSBCLIQRY:
- case LST_TRANS_TSBSRVQRY:
- break;
- }
-
- return 1;
-}
-
-static int
-lstcon_batch_op(struct lstcon_batch *bat, int transop,
- struct list_head __user *result_up)
-{
- struct lstcon_rpc_trans *trans;
- int rc;
-
- rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list,
- &bat->bat_trans_list, transop,
- bat, lstcon_batrpc_condition, &trans);
- if (rc) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL);
-
- lstcon_rpc_trans_destroy(trans);
-
- return rc;
-}
-
-int
-lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
-{
- struct lstcon_batch *bat;
- int rc;
-
- if (lstcon_batch_find(name, &bat)) {
- CDEBUG(D_NET, "Can't find batch %s\n", name);
- return -ENOENT;
- }
-
- bat->bat_arg = timeout;
-
- rc = lstcon_batch_op(bat, LST_TRANS_TSBRUN, result_up);
-
- /* mark batch as running if it's started in any node */
- if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0))
- bat->bat_state = LST_BATCH_RUNNING;
-
- return rc;
-}
-
-int
-lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
-{
- struct lstcon_batch *bat;
- int rc;
-
- if (lstcon_batch_find(name, &bat)) {
- CDEBUG(D_NET, "Can't find batch %s\n", name);
- return -ENOENT;
- }
-
- bat->bat_arg = force;
-
- rc = lstcon_batch_op(bat, LST_TRANS_TSBSTOP, result_up);
-
- /* mark batch as stopped if all RPCs finished */
- if (!lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0))
- bat->bat_state = LST_BATCH_IDLE;
-
- return rc;
-}
-
-static void
-lstcon_batch_destroy(struct lstcon_batch *bat)
-{
- struct lstcon_ndlink *ndl;
- struct lstcon_test *test;
- int i;
-
- list_del(&bat->bat_link);
-
- while (!list_empty(&bat->bat_test_list)) {
- test = list_entry(bat->bat_test_list.next,
- struct lstcon_test, tes_link);
- LASSERT(list_empty(&test->tes_trans_list));
-
- list_del(&test->tes_link);
-
- lstcon_group_decref(test->tes_src_grp);
- lstcon_group_decref(test->tes_dst_grp);
-
- kfree(test);
- }
-
- LASSERT(list_empty(&bat->bat_trans_list));
-
- while (!list_empty(&bat->bat_cli_list)) {
- ndl = list_entry(bat->bat_cli_list.next,
- struct lstcon_ndlink, ndl_link);
- list_del_init(&ndl->ndl_link);
-
- lstcon_ndlink_release(ndl);
- }
-
- while (!list_empty(&bat->bat_srv_list)) {
- ndl = list_entry(bat->bat_srv_list.next,
- struct lstcon_ndlink, ndl_link);
- list_del_init(&ndl->ndl_link);
-
- lstcon_ndlink_release(ndl);
- }
-
- for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- LASSERT(list_empty(&bat->bat_cli_hash[i]));
- LASSERT(list_empty(&bat->bat_srv_hash[i]));
- }
-
- kfree(bat->bat_cli_hash);
- kfree(bat->bat_srv_hash);
- kfree(bat);
-}
-
-static int
-lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg)
-{
- struct lstcon_test *test;
- struct lstcon_batch *batch;
- struct lstcon_ndlink *ndl;
- struct list_head *hash;
- struct list_head *head;
-
- test = (struct lstcon_test *)arg;
- LASSERT(test);
-
- batch = test->tes_batch;
- LASSERT(batch);
-
- if (test->tes_oneside &&
- transop == LST_TRANS_TSBSRVADD)
- return 0;
-
- if (nd->nd_state != LST_NODE_ACTIVE)
- return -ENETDOWN;
-
- if (transop == LST_TRANS_TSBCLIADD) {
- hash = batch->bat_cli_hash;
- head = &batch->bat_cli_list;
-
- } else {
- LASSERT(transop == LST_TRANS_TSBSRVADD);
-
- hash = batch->bat_srv_hash;
- head = &batch->bat_srv_list;
- }
-
- LASSERT(nd->nd_id.nid != LNET_NID_ANY);
-
- if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1))
- return -ENOMEM;
-
- if (list_empty(&ndl->ndl_link))
- list_add_tail(&ndl->ndl_link, head);
-
- return 1;
-}
-
-static int
-lstcon_test_nodes_add(struct lstcon_test *test,
- struct list_head __user *result_up)
-{
- struct lstcon_rpc_trans *trans;
- struct lstcon_group *grp;
- int transop;
- int rc;
-
- LASSERT(test->tes_src_grp);
- LASSERT(test->tes_dst_grp);
-
- transop = LST_TRANS_TSBSRVADD;
- grp = test->tes_dst_grp;
-again:
- rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
- &test->tes_trans_list, transop,
- test, lstcon_testrpc_condition, &trans);
- if (rc) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- if (lstcon_trans_stat()->trs_rpc_errno ||
- lstcon_trans_stat()->trs_fwk_errno) {
- lstcon_rpc_trans_interpreter(trans, result_up, NULL);
-
- lstcon_rpc_trans_destroy(trans);
- /* return if any error */
- CDEBUG(D_NET, "Failed to add test %s, RPC error %d, framework error %d\n",
- transop == LST_TRANS_TSBCLIADD ? "client" : "server",
- lstcon_trans_stat()->trs_rpc_errno,
- lstcon_trans_stat()->trs_fwk_errno);
-
- return rc;
- }
-
- lstcon_rpc_trans_destroy(trans);
-
- if (transop == LST_TRANS_TSBCLIADD)
- return rc;
-
- transop = LST_TRANS_TSBCLIADD;
- grp = test->tes_src_grp;
- test->tes_cliidx = 0;
-
- /* requests to test clients */
- goto again;
-}
-
-static int
-lstcon_verify_batch(const char *name, struct lstcon_batch **batch)
-{
- int rc;
-
- rc = lstcon_batch_find(name, batch);
- if (rc) {
- CDEBUG(D_NET, "Can't find batch %s\n", name);
- return rc;
- }
-
- if ((*batch)->bat_state != LST_BATCH_IDLE) {
- CDEBUG(D_NET, "Can't change running batch %s\n", name);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int
-lstcon_verify_group(const char *name, struct lstcon_group **grp)
-{
- int rc;
- struct lstcon_ndlink *ndl;
-
- rc = lstcon_group_find(name, grp);
- if (rc) {
- CDEBUG(D_NET, "can't find group %s\n", name);
- return rc;
- }
-
- list_for_each_entry(ndl, &(*grp)->grp_ndl_list, ndl_link) {
- if (ndl->ndl_node->nd_state == LST_NODE_ACTIVE)
- return 0;
- }
-
- CDEBUG(D_NET, "Group %s has no ACTIVE nodes\n", name);
-
- return -EINVAL;
-}
-
-int
-lstcon_test_add(char *batch_name, int type, int loop,
- int concur, int dist, int span,
- char *src_name, char *dst_name,
- void *param, int paramlen, int *retp,
- struct list_head __user *result_up)
-{
- struct lstcon_test *test = NULL;
- int rc;
- struct lstcon_group *src_grp = NULL;
- struct lstcon_group *dst_grp = NULL;
- struct lstcon_batch *batch = NULL;
-
- /*
- * verify that a batch of the given name exists, and the groups
- * that will be part of the batch exist and have at least one
- * active node
- */
- rc = lstcon_verify_batch(batch_name, &batch);
- if (rc)
- goto out;
-
- rc = lstcon_verify_group(src_name, &src_grp);
- if (rc)
- goto out;
-
- rc = lstcon_verify_group(dst_name, &dst_grp);
- if (rc)
- goto out;
-
- if (dst_grp->grp_userland)
- *retp = 1;
-
- test = kzalloc(offsetof(struct lstcon_test, tes_param[paramlen]),
- GFP_KERNEL);
- if (!test) {
- CERROR("Can't allocate test descriptor\n");
- rc = -ENOMEM;
-
- goto out;
- }
-
- test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id;
- test->tes_batch = batch;
- test->tes_type = type;
- test->tes_oneside = 0; /* TODO */
- test->tes_loop = loop;
- test->tes_concur = concur;
- test->tes_stop_onerr = 1; /* TODO */
- test->tes_span = span;
- test->tes_dist = dist;
- test->tes_cliidx = 0; /* just used for creating RPC */
- test->tes_src_grp = src_grp;
- test->tes_dst_grp = dst_grp;
- INIT_LIST_HEAD(&test->tes_trans_list);
-
- if (param) {
- test->tes_paramlen = paramlen;
- memcpy(&test->tes_param[0], param, paramlen);
- }
-
- rc = lstcon_test_nodes_add(test, result_up);
-
- if (rc)
- goto out;
-
- if (lstcon_trans_stat()->trs_rpc_errno ||
- lstcon_trans_stat()->trs_fwk_errno)
- CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type,
- batch_name);
-
- /* add to test list anyway, so user can check what's going on */
- list_add_tail(&test->tes_link, &batch->bat_test_list);
-
- batch->bat_ntest++;
- test->tes_hdr.tsb_index = batch->bat_ntest;
-
- /* hold groups so nobody can change them */
- return rc;
-out:
- kfree(test);
-
- if (dst_grp)
- lstcon_group_decref(dst_grp);
-
- if (src_grp)
- lstcon_group_decref(src_grp);
-
- return rc;
-}
-
-static int
-lstcon_test_find(struct lstcon_batch *batch, int idx,
- struct lstcon_test **testpp)
-{
- struct lstcon_test *test;
-
- list_for_each_entry(test, &batch->bat_test_list, tes_link) {
- if (idx == test->tes_hdr.tsb_index) {
- *testpp = test;
- return 0;
- }
- }
-
- return -ENOENT;
-}
-
-static int
-lstcon_tsbrpc_readent(int transop, struct srpc_msg *msg,
- struct lstcon_rpc_ent __user *ent_up)
-{
- struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
-
- LASSERT(transop == LST_TRANS_TSBCLIQRY ||
- transop == LST_TRANS_TSBSRVQRY);
-
- /* positive errno, framework error code */
- if (copy_to_user(&ent_up->rpe_priv[0], &rep->bar_active,
- sizeof(rep->bar_active)))
- return -EFAULT;
-
- return 0;
-}
-
-int
-lstcon_test_batch_query(char *name, int testidx, int client,
- int timeout, struct list_head __user *result_up)
-{
- struct lstcon_rpc_trans *trans;
- struct list_head *translist;
- struct list_head *ndlist;
- struct lstcon_tsb_hdr *hdr;
- struct lstcon_batch *batch;
- struct lstcon_test *test = NULL;
- int transop;
- int rc;
-
- rc = lstcon_batch_find(name, &batch);
- if (rc) {
- CDEBUG(D_NET, "Can't find batch: %s\n", name);
- return rc;
- }
-
- if (!testidx) {
- translist = &batch->bat_trans_list;
- ndlist = &batch->bat_cli_list;
- hdr = &batch->bat_hdr;
- } else {
- /* query specified test only */
- rc = lstcon_test_find(batch, testidx, &test);
- if (rc) {
- CDEBUG(D_NET, "Can't find test: %d\n", testidx);
- return rc;
- }
-
- translist = &test->tes_trans_list;
- ndlist = &test->tes_src_grp->grp_ndl_list;
- hdr = &test->tes_hdr;
- }
-
- transop = client ? LST_TRANS_TSBCLIQRY : LST_TRANS_TSBSRVQRY;
-
- rc = lstcon_rpc_trans_ndlist(ndlist, translist, transop, hdr,
- lstcon_batrpc_condition, &trans);
- if (rc) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, timeout);
-
- /* query a batch, not a test */
- if (!testidx &&
- !lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) &&
- !lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) {
- /* all RPCs finished, and no active test */
- batch->bat_state = LST_BATCH_IDLE;
- }
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up,
- lstcon_tsbrpc_readent);
- lstcon_rpc_trans_destroy(trans);
-
- return rc;
-}
-
-static int
-lstcon_statrpc_readent(int transop, struct srpc_msg *msg,
- struct lstcon_rpc_ent __user *ent_up)
-{
- struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
- struct sfw_counters __user *sfwk_stat;
- struct srpc_counters __user *srpc_stat;
- struct lnet_counters __user *lnet_stat;
-
- if (rep->str_status)
- return 0;
-
- sfwk_stat = (struct sfw_counters __user *)&ent_up->rpe_payload[0];
- srpc_stat = (struct srpc_counters __user *)(sfwk_stat + 1);
- lnet_stat = (struct lnet_counters __user *)(srpc_stat + 1);
-
- if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
- copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
- copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat)))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-lstcon_ndlist_stat(struct list_head *ndlist,
- int timeout, struct list_head __user *result_up)
-{
- struct list_head head;
- struct lstcon_rpc_trans *trans;
- int rc;
-
- INIT_LIST_HEAD(&head);
-
- rc = lstcon_rpc_trans_ndlist(ndlist, &head,
- LST_TRANS_STATQRY, NULL, NULL, &trans);
- if (rc) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_VALIDATE_TIMEOUT(timeout));
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up,
- lstcon_statrpc_readent);
- lstcon_rpc_trans_destroy(trans);
-
- return rc;
-}
-
-int
-lstcon_group_stat(char *grp_name, int timeout,
- struct list_head __user *result_up)
-{
- struct lstcon_group *grp;
- int rc;
-
- rc = lstcon_group_find(grp_name, &grp);
- if (rc) {
- CDEBUG(D_NET, "Can't find group %s\n", grp_name);
- return rc;
- }
-
- rc = lstcon_ndlist_stat(&grp->grp_ndl_list, timeout, result_up);
-
- lstcon_group_decref(grp);
-
- return rc;
-}
-
-int
-lstcon_nodes_stat(int count, struct lnet_process_id __user *ids_up,
- int timeout, struct list_head __user *result_up)
-{
- struct lstcon_ndlink *ndl;
- struct lstcon_group *tmp;
- struct lnet_process_id id;
- int i;
- int rc;
-
- rc = lstcon_group_alloc(NULL, &tmp);
- if (rc) {
- CERROR("Out of memory\n");
- return -ENOMEM;
- }
-
- for (i = 0 ; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
- rc = -EFAULT;
- break;
- }
-
- /* add to tmp group */
- rc = lstcon_group_ndlink_find(tmp, id, &ndl, 2);
- if (rc) {
- CDEBUG((rc == -ENOMEM) ? D_ERROR : D_NET,
- "Failed to find or create %s: %d\n",
- libcfs_id2str(id), rc);
- break;
- }
- }
-
- if (rc) {
- lstcon_group_decref(tmp);
- return rc;
- }
-
- rc = lstcon_ndlist_stat(&tmp->grp_ndl_list, timeout, result_up);
-
- lstcon_group_decref(tmp);
-
- return rc;
-}
-
-static int
-lstcon_debug_ndlist(struct list_head *ndlist,
- struct list_head *translist,
- int timeout, struct list_head __user *result_up)
-{
- struct lstcon_rpc_trans *trans;
- int rc;
-
- rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY,
- NULL, lstcon_sesrpc_condition, &trans);
- if (rc) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_VALIDATE_TIMEOUT(timeout));
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up,
- lstcon_sesrpc_readent);
- lstcon_rpc_trans_destroy(trans);
-
- return rc;
-}
-
-int
-lstcon_session_debug(int timeout, struct list_head __user *result_up)
-{
- return lstcon_debug_ndlist(&console_session.ses_ndl_list,
- NULL, timeout, result_up);
-}
-
-int
-lstcon_batch_debug(int timeout, char *name,
- int client, struct list_head __user *result_up)
-{
- struct lstcon_batch *bat;
- int rc;
-
- rc = lstcon_batch_find(name, &bat);
- if (rc)
- return -ENOENT;
-
- rc = lstcon_debug_ndlist(client ? &bat->bat_cli_list :
- &bat->bat_srv_list,
- NULL, timeout, result_up);
-
- return rc;
-}
-
-int
-lstcon_group_debug(int timeout, char *name,
- struct list_head __user *result_up)
-{
- struct lstcon_group *grp;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc)
- return -ENOENT;
-
- rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL,
- timeout, result_up);
- lstcon_group_decref(grp);
-
- return rc;
-}
-
-int
-lstcon_nodes_debug(int timeout, int count,
- struct lnet_process_id __user *ids_up,
- struct list_head __user *result_up)
-{
- struct lnet_process_id id;
- struct lstcon_ndlink *ndl;
- struct lstcon_group *grp;
- int i;
- int rc;
-
- rc = lstcon_group_alloc(NULL, &grp);
- if (rc) {
- CDEBUG(D_NET, "Out of memory\n");
- return rc;
- }
-
- for (i = 0; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
- rc = -EFAULT;
- break;
- }
-
- /* node is added to tmp group */
- rc = lstcon_group_ndlink_find(grp, id, &ndl, 1);
- if (rc) {
- CERROR("Can't create node link\n");
- break;
- }
- }
-
- if (rc) {
- lstcon_group_decref(grp);
- return rc;
- }
-
- rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL,
- timeout, result_up);
-
- lstcon_group_decref(grp);
-
- return rc;
-}
-
-int
-lstcon_session_match(struct lst_sid sid)
-{
- return (console_session.ses_id.ses_nid == sid.ses_nid &&
- console_session.ses_id.ses_stamp == sid.ses_stamp) ? 1 : 0;
-}
-
-static void
-lstcon_new_session_id(struct lst_sid *sid)
-{
- struct lnet_process_id id;
-
- LASSERT(console_session.ses_state == LST_SESSION_NONE);
-
- LNetGetId(1, &id);
- sid->ses_nid = id.nid;
- sid->ses_stamp = cfs_time_current();
-}
-
-int
-lstcon_session_new(char *name, int key, unsigned int feats,
- int timeout, int force, struct lst_sid __user *sid_up)
-{
- int rc = 0;
- int i;
-
- if (console_session.ses_state != LST_SESSION_NONE) {
- /* session exists */
- if (!force) {
- CNETERR("Session %s already exists\n",
- console_session.ses_name);
- return -EEXIST;
- }
-
- rc = lstcon_session_end();
-
- /* lstcon_session_end() only return local error */
- if (rc)
- return rc;
- }
-
- if (feats & ~LST_FEATS_MASK) {
- CNETERR("Unknown session features %x\n",
- (feats & ~LST_FEATS_MASK));
- return -EINVAL;
- }
-
- for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
- LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
-
- lstcon_new_session_id(&console_session.ses_id);
-
- console_session.ses_key = key;
- console_session.ses_state = LST_SESSION_ACTIVE;
- console_session.ses_force = !!force;
- console_session.ses_features = feats;
- console_session.ses_feats_updated = 0;
- console_session.ses_timeout = (timeout <= 0) ?
- LST_CONSOLE_TIMEOUT : timeout;
-
- if (strlen(name) > sizeof(console_session.ses_name) - 1)
- return -E2BIG;
- strlcpy(console_session.ses_name, name,
- sizeof(console_session.ses_name));
-
- rc = lstcon_batch_add(LST_DEFAULT_BATCH);
- if (rc)
- return rc;
-
- rc = lstcon_rpc_pinger_start();
- if (rc) {
- struct lstcon_batch *bat = NULL;
-
- lstcon_batch_find(LST_DEFAULT_BATCH, &bat);
- lstcon_batch_destroy(bat);
-
- return rc;
- }
-
- if (!copy_to_user(sid_up, &console_session.ses_id,
- sizeof(struct lst_sid)))
- return rc;
-
- lstcon_session_end();
-
- return -EFAULT;
-}
-
-int
-lstcon_session_info(struct lst_sid __user *sid_up, int __user *key_up,
- unsigned __user *featp,
- struct lstcon_ndlist_ent __user *ndinfo_up,
- char __user *name_up, int len)
-{
- struct lstcon_ndlist_ent *entp;
- struct lstcon_ndlink *ndl;
- int rc = 0;
-
- if (console_session.ses_state != LST_SESSION_ACTIVE)
- return -ESRCH;
-
- entp = kzalloc(sizeof(*entp), GFP_NOFS);
- if (!entp)
- return -ENOMEM;
-
- list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link)
- LST_NODE_STATE_COUNTER(ndl->ndl_node, entp);
-
- if (copy_to_user(sid_up, &console_session.ses_id,
- sizeof(*sid_up)) ||
- copy_to_user(key_up, &console_session.ses_key,
- sizeof(*key_up)) ||
- copy_to_user(featp, &console_session.ses_features,
- sizeof(*featp)) ||
- copy_to_user(ndinfo_up, entp, sizeof(*entp)) ||
- copy_to_user(name_up, console_session.ses_name, len))
- rc = -EFAULT;
-
- kfree(entp);
-
- return rc;
-}
-
-int
-lstcon_session_end(void)
-{
- struct lstcon_rpc_trans *trans;
- struct lstcon_group *grp;
- struct lstcon_batch *bat;
- int rc = 0;
-
- LASSERT(console_session.ses_state == LST_SESSION_ACTIVE);
-
- rc = lstcon_rpc_trans_ndlist(&console_session.ses_ndl_list,
- NULL, LST_TRANS_SESEND, NULL,
- lstcon_sesrpc_condition, &trans);
- if (rc) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- console_session.ses_shutdown = 1;
-
- lstcon_rpc_pinger_stop();
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- lstcon_rpc_trans_destroy(trans);
- /* User can do nothing even rpc failed, so go on */
-
- /* waiting for orphan rpcs to die */
- lstcon_rpc_cleanup_wait();
-
- console_session.ses_id = LST_INVALID_SID;
- console_session.ses_state = LST_SESSION_NONE;
- console_session.ses_key = 0;
- console_session.ses_force = 0;
- console_session.ses_feats_updated = 0;
-
- /* destroy all batches */
- while (!list_empty(&console_session.ses_bat_list)) {
- bat = list_entry(console_session.ses_bat_list.next,
- struct lstcon_batch, bat_link);
-
- lstcon_batch_destroy(bat);
- }
-
- /* destroy all groups */
- while (!list_empty(&console_session.ses_grp_list)) {
- grp = list_entry(console_session.ses_grp_list.next,
- struct lstcon_group, grp_link);
- LASSERT(grp->grp_ref == 1);
-
- lstcon_group_decref(grp);
- }
-
- /* all nodes should be released */
- LASSERT(list_empty(&console_session.ses_ndl_list));
-
- console_session.ses_shutdown = 0;
- console_session.ses_expired = 0;
-
- return rc;
-}
-
-int
-lstcon_session_feats_check(unsigned int feats)
-{
- int rc = 0;
-
- if (feats & ~LST_FEATS_MASK) {
- CERROR("Can't support these features: %x\n",
- (feats & ~LST_FEATS_MASK));
- return -EPROTO;
- }
-
- spin_lock(&console_session.ses_rpc_lock);
-
- if (!console_session.ses_feats_updated) {
- console_session.ses_feats_updated = 1;
- console_session.ses_features = feats;
- }
-
- if (console_session.ses_features != feats)
- rc = -EPROTO;
-
- spin_unlock(&console_session.ses_rpc_lock);
-
- if (rc) {
- CERROR("remote features %x do not match with session features %x of console\n",
- feats, console_session.ses_features);
- }
-
- return rc;
-}
-
-static int
-lstcon_acceptor_handle(struct srpc_server_rpc *rpc)
-{
- struct srpc_msg *rep = &rpc->srpc_replymsg;
- struct srpc_msg *req = &rpc->srpc_reqstbuf->buf_msg;
- struct srpc_join_reqst *jreq = &req->msg_body.join_reqst;
- struct srpc_join_reply *jrep = &rep->msg_body.join_reply;
- struct lstcon_group *grp = NULL;
- struct lstcon_ndlink *ndl;
- int rc = 0;
-
- sfw_unpack_message(req);
-
- mutex_lock(&console_session.ses_mutex);
-
- jrep->join_sid = console_session.ses_id;
-
- if (console_session.ses_id.ses_nid == LNET_NID_ANY) {
- jrep->join_status = ESRCH;
- goto out;
- }
-
- if (lstcon_session_feats_check(req->msg_ses_feats)) {
- jrep->join_status = EPROTO;
- goto out;
- }
-
- if (jreq->join_sid.ses_nid != LNET_NID_ANY &&
- !lstcon_session_match(jreq->join_sid)) {
- jrep->join_status = EBUSY;
- goto out;
- }
-
- if (lstcon_group_find(jreq->join_group, &grp)) {
- rc = lstcon_group_alloc(jreq->join_group, &grp);
- if (rc) {
- CERROR("Out of memory\n");
- goto out;
- }
-
- list_add_tail(&grp->grp_link,
- &console_session.ses_grp_list);
- lstcon_group_addref(grp);
- }
-
- if (grp->grp_ref > 2) {
- /* Group in using */
- jrep->join_status = EBUSY;
- goto out;
- }
-
- rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 0);
- if (!rc) {
- jrep->join_status = EEXIST;
- goto out;
- }
-
- rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 1);
- if (rc) {
- CERROR("Out of memory\n");
- goto out;
- }
-
- ndl->ndl_node->nd_state = LST_NODE_ACTIVE;
- ndl->ndl_node->nd_timeout = console_session.ses_timeout;
-
- if (!grp->grp_userland)
- grp->grp_userland = 1;
-
- strlcpy(jrep->join_session, console_session.ses_name,
- sizeof(jrep->join_session));
- jrep->join_timeout = console_session.ses_timeout;
- jrep->join_status = 0;
-
-out:
- rep->msg_ses_feats = console_session.ses_features;
- if (grp)
- lstcon_group_decref(grp);
-
- mutex_unlock(&console_session.ses_mutex);
-
- return rc;
-}
-
-static struct srpc_service lstcon_acceptor_service;
-
-static void lstcon_init_acceptor_service(void)
-{
- /* initialize selftest console acceptor service table */
- lstcon_acceptor_service.sv_name = "join session";
- lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle;
- lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN;
- lstcon_acceptor_service.sv_wi_total = SFW_FRWK_WI_MAX;
-}
-
-static DECLARE_IOCTL_HANDLER(lstcon_ioctl_handler, lstcon_ioctl_entry);
-
-/* initialize console */
-int
-lstcon_console_init(void)
-{
- int i;
- int rc;
-
- memset(&console_session, 0, sizeof(struct lstcon_session));
-
- console_session.ses_id = LST_INVALID_SID;
- console_session.ses_state = LST_SESSION_NONE;
- console_session.ses_timeout = 0;
- console_session.ses_force = 0;
- console_session.ses_expired = 0;
- console_session.ses_feats_updated = 0;
- console_session.ses_features = LST_FEATS_MASK;
- console_session.ses_laststamp = ktime_get_real_seconds();
-
- mutex_init(&console_session.ses_mutex);
-
- INIT_LIST_HEAD(&console_session.ses_ndl_list);
- INIT_LIST_HEAD(&console_session.ses_grp_list);
- INIT_LIST_HEAD(&console_session.ses_bat_list);
- INIT_LIST_HEAD(&console_session.ses_trans_list);
-
- console_session.ses_ndl_hash =
- kmalloc(sizeof(struct list_head) * LST_GLOBAL_HASHSIZE, GFP_KERNEL);
- if (!console_session.ses_ndl_hash)
- return -ENOMEM;
-
- for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
- INIT_LIST_HEAD(&console_session.ses_ndl_hash[i]);
-
- /* initialize acceptor service table */
- lstcon_init_acceptor_service();
-
- rc = srpc_add_service(&lstcon_acceptor_service);
- LASSERT(rc != -EBUSY);
- if (rc) {
- kfree(console_session.ses_ndl_hash);
- return rc;
- }
-
- rc = srpc_service_add_buffers(&lstcon_acceptor_service,
- lstcon_acceptor_service.sv_wi_total);
- if (rc) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = libcfs_register_ioctl(&lstcon_ioctl_handler);
-
- if (!rc) {
- lstcon_rpc_module_init();
- return 0;
- }
-
-out:
- srpc_shutdown_service(&lstcon_acceptor_service);
- srpc_remove_service(&lstcon_acceptor_service);
-
- kfree(console_session.ses_ndl_hash);
-
- srpc_wait_service_shutdown(&lstcon_acceptor_service);
-
- return rc;
-}
-
-int
-lstcon_console_fini(void)
-{
- int i;
-
- libcfs_deregister_ioctl(&lstcon_ioctl_handler);
-
- mutex_lock(&console_session.ses_mutex);
-
- srpc_shutdown_service(&lstcon_acceptor_service);
- srpc_remove_service(&lstcon_acceptor_service);
-
- if (console_session.ses_state != LST_SESSION_NONE)
- lstcon_session_end();
-
- lstcon_rpc_module_fini();
-
- mutex_unlock(&console_session.ses_mutex);
-
- LASSERT(list_empty(&console_session.ses_ndl_list));
- LASSERT(list_empty(&console_session.ses_grp_list));
- LASSERT(list_empty(&console_session.ses_bat_list));
- LASSERT(list_empty(&console_session.ses_trans_list));
-
- for (i = 0; i < LST_NODE_HASHSIZE; i++)
- LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
-
- kfree(console_session.ses_ndl_hash);
-
- srpc_wait_service_shutdown(&lstcon_acceptor_service);
-
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
deleted file mode 100644
index 3933ed4cca93..000000000000
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ /dev/null
@@ -1,244 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/console.h
- *
- * kernel structure for LST console
- *
- * Author: Liang Zhen <liangzhen@clusterfs.com>
- */
-
-#ifndef __LST_CONSOLE_H__
-#define __LST_CONSOLE_H__
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/lnet/lib-types.h>
-#include <uapi/linux/lnet/lnetst.h>
-#include "selftest.h"
-#include "conrpc.h"
-
-/* node descriptor */
-struct lstcon_node {
- struct lnet_process_id nd_id; /* id of the node */
- int nd_ref; /* reference count */
- int nd_state; /* state of the node */
- int nd_timeout; /* session timeout */
- unsigned long nd_stamp; /* timestamp of last replied RPC */
- struct lstcon_rpc nd_ping; /* ping rpc */
-};
-
-/* node link descriptor */
-struct lstcon_ndlink {
- struct list_head ndl_link; /* chain on list */
- struct list_head ndl_hlink; /* chain on hash */
- struct lstcon_node *ndl_node; /* pointer to node */
-};
-
-/* (alias of nodes) group descriptor */
-struct lstcon_group {
- struct list_head grp_link; /* chain on global group list
- */
- int grp_ref; /* reference count */
- int grp_userland; /* has userland nodes */
- int grp_nnode; /* # of nodes */
- char grp_name[LST_NAME_SIZE]; /* group name */
-
- struct list_head grp_trans_list; /* transaction list */
- struct list_head grp_ndl_list; /* nodes list */
- struct list_head grp_ndl_hash[0]; /* hash table for nodes */
-};
-
-#define LST_BATCH_IDLE 0xB0 /* idle batch */
-#define LST_BATCH_RUNNING 0xB1 /* running batch */
-
-struct lstcon_tsb_hdr {
- struct lst_bid tsb_id; /* batch ID */
- int tsb_index; /* test index */
-};
-
-/* (tests ) batch descriptor */
-struct lstcon_batch {
- struct lstcon_tsb_hdr bat_hdr; /* test_batch header */
- struct list_head bat_link; /* chain on session's batches list */
- int bat_ntest; /* # of test */
- int bat_state; /* state of the batch */
- int bat_arg; /* parameter for run|stop, timeout
- * for run, force for stop
- */
- char bat_name[LST_NAME_SIZE];/* name of batch */
-
- struct list_head bat_test_list; /* list head of tests (struct lstcon_test)
- */
- struct list_head bat_trans_list; /* list head of transaction */
- struct list_head bat_cli_list; /* list head of client nodes
- * (struct lstcon_node)
- */
- struct list_head *bat_cli_hash; /* hash table of client nodes */
- struct list_head bat_srv_list; /* list head of server nodes */
- struct list_head *bat_srv_hash; /* hash table of server nodes */
-};
-
-/* a single test descriptor */
-struct lstcon_test {
- struct lstcon_tsb_hdr tes_hdr; /* test batch header */
- struct list_head tes_link; /* chain on batch's tests list */
- struct lstcon_batch *tes_batch; /* pointer to batch */
-
- int tes_type; /* type of the test, i.e: bulk, ping */
- int tes_stop_onerr; /* stop on error */
- int tes_oneside; /* one-sided test */
- int tes_concur; /* concurrency */
- int tes_loop; /* loop count */
- int tes_dist; /* nodes distribution of target group */
- int tes_span; /* nodes span of target group */
- int tes_cliidx; /* client index, used for RPC creating */
-
- struct list_head tes_trans_list; /* transaction list */
- struct lstcon_group *tes_src_grp; /* group run the test */
- struct lstcon_group *tes_dst_grp; /* target group */
-
- int tes_paramlen; /* test parameter length */
- char tes_param[0]; /* test parameter */
-};
-
-#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */
-#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */
-
-#define LST_SESSION_NONE 0x0 /* no session */
-#define LST_SESSION_ACTIVE 0x1 /* working session */
-
-#define LST_CONSOLE_TIMEOUT 300 /* default console timeout */
-
-struct lstcon_session {
- struct mutex ses_mutex; /* only 1 thread in session */
- struct lst_sid ses_id; /* global session id */
- int ses_key; /* local session key */
- int ses_state; /* state of session */
- int ses_timeout; /* timeout in seconds */
- time64_t ses_laststamp; /* last operation stamp (seconds)
- */
- unsigned int ses_features; /* tests features of the session
- */
- unsigned int ses_feats_updated:1; /* features are synced with
- * remote test nodes
- */
- unsigned int ses_force:1; /* force creating */
- unsigned int ses_shutdown:1; /* session is shutting down */
- unsigned int ses_expired:1; /* console is timedout */
- __u64 ses_id_cookie; /* batch id cookie */
- char ses_name[LST_NAME_SIZE];/* session name */
- struct lstcon_rpc_trans *ses_ping; /* session pinger */
- struct stt_timer ses_ping_timer; /* timer for pinger */
- struct lstcon_trans_stat ses_trans_stat; /* transaction stats */
-
- struct list_head ses_trans_list; /* global list of transaction */
- struct list_head ses_grp_list; /* global list of groups */
- struct list_head ses_bat_list; /* global list of batches */
- struct list_head ses_ndl_list; /* global list of nodes */
- struct list_head *ses_ndl_hash; /* hash table of nodes */
-
- spinlock_t ses_rpc_lock; /* serialize */
- atomic_t ses_rpc_counter; /* # of initialized RPCs */
- struct list_head ses_rpc_freelist; /* idle console rpc */
-}; /* session descriptor */
-
-extern struct lstcon_session console_session;
-
-static inline struct lstcon_trans_stat *
-lstcon_trans_stat(void)
-{
- return &console_session.ses_trans_stat;
-}
-
-static inline struct list_head *
-lstcon_id2hash(struct lnet_process_id id, struct list_head *hash)
-{
- unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
-
- return &hash[idx];
-}
-
-int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
-int lstcon_console_init(void);
-int lstcon_console_fini(void);
-int lstcon_session_match(struct lst_sid sid);
-int lstcon_session_new(char *name, int key, unsigned int version,
- int timeout, int flags, struct lst_sid __user *sid_up);
-int lstcon_session_info(struct lst_sid __user *sid_up, int __user *key,
- unsigned __user *verp, struct lstcon_ndlist_ent __user *entp,
- char __user *name_up, int len);
-int lstcon_session_end(void);
-int lstcon_session_debug(int timeout, struct list_head __user *result_up);
-int lstcon_session_feats_check(unsigned int feats);
-int lstcon_batch_debug(int timeout, char *name,
- int client, struct list_head __user *result_up);
-int lstcon_group_debug(int timeout, char *name,
- struct list_head __user *result_up);
-int lstcon_nodes_debug(int timeout, int nnd,
- struct lnet_process_id __user *nds_up,
- struct list_head __user *result_up);
-int lstcon_group_add(char *name);
-int lstcon_group_del(char *name);
-int lstcon_group_clean(char *name, int args);
-int lstcon_group_refresh(char *name, struct list_head __user *result_up);
-int lstcon_nodes_add(char *name, int nnd, struct lnet_process_id __user *nds_up,
- unsigned int *featp, struct list_head __user *result_up);
-int lstcon_nodes_remove(char *name, int nnd,
- struct lnet_process_id __user *nds_up,
- struct list_head __user *result_up);
-int lstcon_group_info(char *name, struct lstcon_ndlist_ent __user *gent_up,
- int *index_p, int *ndent_p,
- struct lstcon_node_ent __user *ndents_up);
-int lstcon_group_list(int idx, int len, char __user *name_up);
-int lstcon_batch_add(char *name);
-int lstcon_batch_run(char *name, int timeout,
- struct list_head __user *result_up);
-int lstcon_batch_stop(char *name, int force,
- struct list_head __user *result_up);
-int lstcon_test_batch_query(char *name, int testidx,
- int client, int timeout,
- struct list_head __user *result_up);
-int lstcon_batch_del(char *name);
-int lstcon_batch_list(int idx, int namelen, char __user *name_up);
-int lstcon_batch_info(char *name, struct lstcon_test_batch_ent __user *ent_up,
- int server, int testidx, int *index_p,
- int *ndent_p, struct lstcon_node_ent __user *dents_up);
-int lstcon_group_stat(char *grp_name, int timeout,
- struct list_head __user *result_up);
-int lstcon_nodes_stat(int count, struct lnet_process_id __user *ids_up,
- int timeout, struct list_head __user *result_up);
-int lstcon_test_add(char *batch_name, int type, int loop,
- int concur, int dist, int span,
- char *src_name, char *dst_name,
- void *param, int paramlen, int *retp,
- struct list_head __user *result_up);
-#endif
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
deleted file mode 100644
index 0ca1e3a780ca..000000000000
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ /dev/null
@@ -1,1786 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/framework.c
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- * Author: Liang Zhen <liangzhen@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "selftest.h"
-
-struct lst_sid LST_INVALID_SID = {LNET_NID_ANY, -1};
-
-static int session_timeout = 100;
-module_param(session_timeout, int, 0444);
-MODULE_PARM_DESC(session_timeout, "test session timeout in seconds (100 by default, 0 == never)");
-
-static int rpc_timeout = 64;
-module_param(rpc_timeout, int, 0644);
-MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)");
-
-#define sfw_unpack_id(id) \
-do { \
- __swab64s(&(id).nid); \
- __swab32s(&(id).pid); \
-} while (0)
-
-#define sfw_unpack_sid(sid) \
-do { \
- __swab64s(&(sid).ses_nid); \
- __swab64s(&(sid).ses_stamp); \
-} while (0)
-
-#define sfw_unpack_fw_counters(fc) \
-do { \
- __swab32s(&(fc).running_ms); \
- __swab32s(&(fc).active_batches); \
- __swab32s(&(fc).zombie_sessions); \
- __swab32s(&(fc).brw_errors); \
- __swab32s(&(fc).ping_errors); \
-} while (0)
-
-#define sfw_unpack_rpc_counters(rc) \
-do { \
- __swab32s(&(rc).errors); \
- __swab32s(&(rc).rpcs_sent); \
- __swab32s(&(rc).rpcs_rcvd); \
- __swab32s(&(rc).rpcs_dropped); \
- __swab32s(&(rc).rpcs_expired); \
- __swab64s(&(rc).bulk_get); \
- __swab64s(&(rc).bulk_put); \
-} while (0)
-
-#define sfw_unpack_lnet_counters(lc) \
-do { \
- __swab32s(&(lc).errors); \
- __swab32s(&(lc).msgs_max); \
- __swab32s(&(lc).msgs_alloc); \
- __swab32s(&(lc).send_count); \
- __swab32s(&(lc).recv_count); \
- __swab32s(&(lc).drop_count); \
- __swab32s(&(lc).route_count); \
- __swab64s(&(lc).send_length); \
- __swab64s(&(lc).recv_length); \
- __swab64s(&(lc).drop_length); \
- __swab64s(&(lc).route_length); \
-} while (0)
-
-#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive))
-#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive))
-
-static struct smoketest_framework {
- struct list_head fw_zombie_rpcs; /* RPCs to be recycled */
- struct list_head fw_zombie_sessions; /* stopping sessions */
- struct list_head fw_tests; /* registered test cases */
- atomic_t fw_nzombies; /* # zombie sessions */
- spinlock_t fw_lock; /* serialise */
- struct sfw_session *fw_session; /* _the_ session */
- int fw_shuttingdown; /* shutdown in progress */
- struct srpc_server_rpc *fw_active_srpc;/* running RPC */
-} sfw_data;
-
-/* forward ref's */
-int sfw_stop_batch(struct sfw_batch *tsb, int force);
-void sfw_destroy_session(struct sfw_session *sn);
-
-static inline struct sfw_test_case *
-sfw_find_test_case(int id)
-{
- struct sfw_test_case *tsc;
-
- LASSERT(id <= SRPC_SERVICE_MAX_ID);
- LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
-
- list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
- if (tsc->tsc_srv_service->sv_id == id)
- return tsc;
- }
-
- return NULL;
-}
-
-static int
-sfw_register_test(struct srpc_service *service,
- struct sfw_test_client_ops *cliops)
-{
- struct sfw_test_case *tsc;
-
- if (sfw_find_test_case(service->sv_id)) {
- CERROR("Failed to register test %s (%d)\n",
- service->sv_name, service->sv_id);
- return -EEXIST;
- }
-
- tsc = kzalloc(sizeof(struct sfw_test_case), GFP_NOFS);
- if (!tsc)
- return -ENOMEM;
-
- tsc->tsc_cli_ops = cliops;
- tsc->tsc_srv_service = service;
-
- list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
- return 0;
-}
-
-static void
-sfw_add_session_timer(void)
-{
- struct sfw_session *sn = sfw_data.fw_session;
- struct stt_timer *timer = &sn->sn_timer;
-
- LASSERT(!sfw_data.fw_shuttingdown);
-
- if (!sn || !sn->sn_timeout)
- return;
-
- LASSERT(!sn->sn_timer_active);
-
- sn->sn_timer_active = 1;
- timer->stt_expires = ktime_get_real_seconds() + sn->sn_timeout;
- stt_add_timer(timer);
-}
-
-static int
-sfw_del_session_timer(void)
-{
- struct sfw_session *sn = sfw_data.fw_session;
-
- if (!sn || !sn->sn_timer_active)
- return 0;
-
- LASSERT(sn->sn_timeout);
-
- if (stt_del_timer(&sn->sn_timer)) { /* timer defused */
- sn->sn_timer_active = 0;
- return 0;
- }
-
- return -EBUSY; /* racing with sfw_session_expired() */
-}
-
-static void
-sfw_deactivate_session(void)
-__must_hold(&sfw_data.fw_lock)
-{
- struct sfw_session *sn = sfw_data.fw_session;
- int nactive = 0;
- struct sfw_batch *tsb;
- struct sfw_test_case *tsc;
-
- if (!sn)
- return;
-
- LASSERT(!sn->sn_timer_active);
-
- sfw_data.fw_session = NULL;
- atomic_inc(&sfw_data.fw_nzombies);
- list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
-
- spin_unlock(&sfw_data.fw_lock);
-
- list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
- srpc_abort_service(tsc->tsc_srv_service);
- }
-
- spin_lock(&sfw_data.fw_lock);
-
- list_for_each_entry(tsb, &sn->sn_batches, bat_list) {
- if (sfw_batch_active(tsb)) {
- nactive++;
- sfw_stop_batch(tsb, 1);
- }
- }
-
- if (nactive)
- return; /* wait for active batches to stop */
-
- list_del_init(&sn->sn_list);
- spin_unlock(&sfw_data.fw_lock);
-
- sfw_destroy_session(sn);
-
- spin_lock(&sfw_data.fw_lock);
-}
-
-static void
-sfw_session_expired(void *data)
-{
- struct sfw_session *sn = data;
-
- spin_lock(&sfw_data.fw_lock);
-
- LASSERT(sn->sn_timer_active);
- LASSERT(sn == sfw_data.fw_session);
-
- CWARN("Session expired! sid: %s-%llu, name: %s\n",
- libcfs_nid2str(sn->sn_id.ses_nid),
- sn->sn_id.ses_stamp, &sn->sn_name[0]);
-
- sn->sn_timer_active = 0;
- sfw_deactivate_session();
-
- spin_unlock(&sfw_data.fw_lock);
-}
-
-static inline void
-sfw_init_session(struct sfw_session *sn, struct lst_sid sid,
- unsigned int features, const char *name)
-{
- struct stt_timer *timer = &sn->sn_timer;
-
- memset(sn, 0, sizeof(struct sfw_session));
- INIT_LIST_HEAD(&sn->sn_list);
- INIT_LIST_HEAD(&sn->sn_batches);
- atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
- atomic_set(&sn->sn_brw_errors, 0);
- atomic_set(&sn->sn_ping_errors, 0);
- strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name));
-
- sn->sn_timer_active = 0;
- sn->sn_id = sid;
- sn->sn_features = features;
- sn->sn_timeout = session_timeout;
- sn->sn_started = cfs_time_current();
-
- timer->stt_data = sn;
- timer->stt_func = sfw_session_expired;
- INIT_LIST_HEAD(&timer->stt_list);
-}
-
-/* completion handler for incoming framework RPCs */
-static void
-sfw_server_rpc_done(struct srpc_server_rpc *rpc)
-{
- struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- int status = rpc->srpc_status;
-
- CDEBUG(D_NET, "Incoming framework RPC done: service %s, peer %s, status %s:%d\n",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer),
- swi_state2str(rpc->srpc_wi.swi_state),
- status);
-
- if (rpc->srpc_bulk)
- sfw_free_pages(rpc);
-}
-
-static void
-sfw_client_rpc_fini(struct srpc_client_rpc *rpc)
-{
- LASSERT(!rpc->crpc_bulk.bk_niov);
- LASSERT(list_empty(&rpc->crpc_list));
- LASSERT(!atomic_read(&rpc->crpc_refcount));
-
- CDEBUG(D_NET, "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- swi_state2str(rpc->crpc_wi.swi_state),
- rpc->crpc_aborted, rpc->crpc_status);
-
- spin_lock(&sfw_data.fw_lock);
-
- /* my callers must finish all RPCs before shutting me down */
- LASSERT(!sfw_data.fw_shuttingdown);
- list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
-
- spin_unlock(&sfw_data.fw_lock);
-}
-
-static struct sfw_batch *
-sfw_find_batch(struct lst_bid bid)
-{
- struct sfw_session *sn = sfw_data.fw_session;
- struct sfw_batch *bat;
-
- LASSERT(sn);
-
- list_for_each_entry(bat, &sn->sn_batches, bat_list) {
- if (bat->bat_id.bat_id == bid.bat_id)
- return bat;
- }
-
- return NULL;
-}
-
-static struct sfw_batch *
-sfw_bid2batch(struct lst_bid bid)
-{
- struct sfw_session *sn = sfw_data.fw_session;
- struct sfw_batch *bat;
-
- LASSERT(sn);
-
- bat = sfw_find_batch(bid);
- if (bat)
- return bat;
-
- bat = kzalloc(sizeof(struct sfw_batch), GFP_NOFS);
- if (!bat)
- return NULL;
-
- bat->bat_error = 0;
- bat->bat_session = sn;
- bat->bat_id = bid;
- atomic_set(&bat->bat_nactive, 0);
- INIT_LIST_HEAD(&bat->bat_tests);
-
- list_add_tail(&bat->bat_list, &sn->sn_batches);
- return bat;
-}
-
-static int
-sfw_get_stats(struct srpc_stat_reqst *request, struct srpc_stat_reply *reply)
-{
- struct sfw_session *sn = sfw_data.fw_session;
- struct sfw_counters *cnt = &reply->str_fw;
- struct sfw_batch *bat;
-
- reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id;
-
- if (request->str_sid.ses_nid == LNET_NID_ANY) {
- reply->str_status = EINVAL;
- return 0;
- }
-
- if (!sn || !sfw_sid_equal(request->str_sid, sn->sn_id)) {
- reply->str_status = ESRCH;
- return 0;
- }
-
- lnet_counters_get(&reply->str_lnet);
- srpc_get_counters(&reply->str_rpc);
-
- /*
- * send over the msecs since the session was started
- * with 32 bits to send, this is ~49 days
- */
- cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started);
- cnt->brw_errors = atomic_read(&sn->sn_brw_errors);
- cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
- cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
-
- cnt->active_batches = 0;
- list_for_each_entry(bat, &sn->sn_batches, bat_list) {
- if (atomic_read(&bat->bat_nactive) > 0)
- cnt->active_batches++;
- }
-
- reply->str_status = 0;
- return 0;
-}
-
-int
-sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
-{
- struct sfw_session *sn = sfw_data.fw_session;
- struct srpc_msg *msg = container_of(request, struct srpc_msg,
- msg_body.mksn_reqst);
- int cplen = 0;
-
- if (request->mksn_sid.ses_nid == LNET_NID_ANY) {
- reply->mksn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
- reply->mksn_status = EINVAL;
- return 0;
- }
-
- if (sn) {
- reply->mksn_status = 0;
- reply->mksn_sid = sn->sn_id;
- reply->mksn_timeout = sn->sn_timeout;
-
- if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) {
- atomic_inc(&sn->sn_refcount);
- return 0;
- }
-
- if (!request->mksn_force) {
- reply->mksn_status = EBUSY;
- cplen = strlcpy(&reply->mksn_name[0], &sn->sn_name[0],
- sizeof(reply->mksn_name));
- if (cplen >= sizeof(reply->mksn_name))
- return -E2BIG;
- return 0;
- }
- }
-
- /*
- * reject the request if it requires unknown features
- * NB: old version will always accept all features because it's not
- * aware of srpc_msg::msg_ses_feats, it's a defect but it's also
- * harmless because it will return zero feature to console, and it's
- * console's responsibility to make sure all nodes in a session have
- * same feature mask.
- */
- if (msg->msg_ses_feats & ~LST_FEATS_MASK) {
- reply->mksn_status = EPROTO;
- return 0;
- }
-
- /* brand new or create by force */
- sn = kzalloc(sizeof(struct sfw_session), GFP_NOFS);
- if (!sn) {
- CERROR("dropping RPC mksn under memory pressure\n");
- return -ENOMEM;
- }
-
- sfw_init_session(sn, request->mksn_sid,
- msg->msg_ses_feats, &request->mksn_name[0]);
-
- spin_lock(&sfw_data.fw_lock);
-
- sfw_deactivate_session();
- LASSERT(!sfw_data.fw_session);
- sfw_data.fw_session = sn;
-
- spin_unlock(&sfw_data.fw_lock);
-
- reply->mksn_status = 0;
- reply->mksn_sid = sn->sn_id;
- reply->mksn_timeout = sn->sn_timeout;
- return 0;
-}
-
-static int
-sfw_remove_session(struct srpc_rmsn_reqst *request,
- struct srpc_rmsn_reply *reply)
-{
- struct sfw_session *sn = sfw_data.fw_session;
-
- reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
-
- if (request->rmsn_sid.ses_nid == LNET_NID_ANY) {
- reply->rmsn_status = EINVAL;
- return 0;
- }
-
- if (!sn || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) {
- reply->rmsn_status = !sn ? ESRCH : EBUSY;
- return 0;
- }
-
- if (!atomic_dec_and_test(&sn->sn_refcount)) {
- reply->rmsn_status = 0;
- return 0;
- }
-
- spin_lock(&sfw_data.fw_lock);
- sfw_deactivate_session();
- spin_unlock(&sfw_data.fw_lock);
-
- reply->rmsn_status = 0;
- reply->rmsn_sid = LST_INVALID_SID;
- LASSERT(!sfw_data.fw_session);
- return 0;
-}
-
-static int
-sfw_debug_session(struct srpc_debug_reqst *request,
- struct srpc_debug_reply *reply)
-{
- struct sfw_session *sn = sfw_data.fw_session;
-
- if (!sn) {
- reply->dbg_status = ESRCH;
- reply->dbg_sid = LST_INVALID_SID;
- return 0;
- }
-
- reply->dbg_status = 0;
- reply->dbg_sid = sn->sn_id;
- reply->dbg_timeout = sn->sn_timeout;
- if (strlcpy(reply->dbg_name, &sn->sn_name[0], sizeof(reply->dbg_name))
- >= sizeof(reply->dbg_name))
- return -E2BIG;
-
- return 0;
-}
-
-static void
-sfw_test_rpc_fini(struct srpc_client_rpc *rpc)
-{
- struct sfw_test_unit *tsu = rpc->crpc_priv;
- struct sfw_test_instance *tsi = tsu->tsu_instance;
-
- /* Called with hold of tsi->tsi_lock */
- LASSERT(list_empty(&rpc->crpc_list));
- list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
-}
-
-static inline int
-sfw_test_buffers(struct sfw_test_instance *tsi)
-{
- struct sfw_test_case *tsc;
- struct srpc_service *svc;
- int nbuf;
-
- LASSERT(tsi);
- tsc = sfw_find_test_case(tsi->tsi_service);
- LASSERT(tsc);
- svc = tsc->tsc_srv_service;
- LASSERT(svc);
-
- nbuf = min(svc->sv_wi_total, tsi->tsi_loop) / svc->sv_ncpts;
- return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA);
-}
-
-static int
-sfw_load_test(struct sfw_test_instance *tsi)
-{
- struct sfw_test_case *tsc;
- struct srpc_service *svc;
- int nbuf;
- int rc;
-
- LASSERT(tsi);
- tsc = sfw_find_test_case(tsi->tsi_service);
- nbuf = sfw_test_buffers(tsi);
- LASSERT(tsc);
- svc = tsc->tsc_srv_service;
-
- if (tsi->tsi_is_client) {
- tsi->tsi_ops = tsc->tsc_cli_ops;
- return 0;
- }
-
- rc = srpc_service_add_buffers(svc, nbuf);
- if (rc) {
- CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
- svc->sv_name, nbuf, rc);
- /*
- * NB: this error handler is not strictly correct, because
- * it may release more buffers than already allocated,
- * but it doesn't matter because request portal should
- * be lazy portal and will grow buffers if necessary.
- */
- srpc_service_remove_buffers(svc, nbuf);
- return -ENOMEM;
- }
-
- CDEBUG(D_NET, "Reserved %d buffers for test %s\n",
- nbuf * (srpc_serv_is_framework(svc) ?
- 2 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name);
- return 0;
-}
-
-static void
-sfw_unload_test(struct sfw_test_instance *tsi)
-{
- struct sfw_test_case *tsc;
-
- LASSERT(tsi);
- tsc = sfw_find_test_case(tsi->tsi_service);
- LASSERT(tsc);
-
- if (tsi->tsi_is_client)
- return;
-
- /*
- * shrink buffers, because request portal is lazy portal
- * which can grow buffers at runtime so we may leave
- * some buffers behind, but never mind...
- */
- srpc_service_remove_buffers(tsc->tsc_srv_service,
- sfw_test_buffers(tsi));
-}
-
-static void
-sfw_destroy_test_instance(struct sfw_test_instance *tsi)
-{
- struct srpc_client_rpc *rpc;
- struct sfw_test_unit *tsu;
-
- if (!tsi->tsi_is_client)
- goto clean;
-
- tsi->tsi_ops->tso_fini(tsi);
-
- LASSERT(!tsi->tsi_stopping);
- LASSERT(list_empty(&tsi->tsi_active_rpcs));
- LASSERT(!sfw_test_active(tsi));
-
- while (!list_empty(&tsi->tsi_units)) {
- tsu = list_entry(tsi->tsi_units.next,
- struct sfw_test_unit, tsu_list);
- list_del(&tsu->tsu_list);
- kfree(tsu);
- }
-
- while (!list_empty(&tsi->tsi_free_rpcs)) {
- rpc = list_entry(tsi->tsi_free_rpcs.next,
- struct srpc_client_rpc, crpc_list);
- list_del(&rpc->crpc_list);
- kfree(rpc);
- }
-
-clean:
- sfw_unload_test(tsi);
- kfree(tsi);
-}
-
-static void
-sfw_destroy_batch(struct sfw_batch *tsb)
-{
- struct sfw_test_instance *tsi;
-
- LASSERT(!sfw_batch_active(tsb));
- LASSERT(list_empty(&tsb->bat_list));
-
- while (!list_empty(&tsb->bat_tests)) {
- tsi = list_entry(tsb->bat_tests.next,
- struct sfw_test_instance, tsi_list);
- list_del_init(&tsi->tsi_list);
- sfw_destroy_test_instance(tsi);
- }
-
- kfree(tsb);
-}
-
-void
-sfw_destroy_session(struct sfw_session *sn)
-{
- struct sfw_batch *batch;
-
- LASSERT(list_empty(&sn->sn_list));
- LASSERT(sn != sfw_data.fw_session);
-
- while (!list_empty(&sn->sn_batches)) {
- batch = list_entry(sn->sn_batches.next,
- struct sfw_batch, bat_list);
- list_del_init(&batch->bat_list);
- sfw_destroy_batch(batch);
- }
-
- kfree(sn);
- atomic_dec(&sfw_data.fw_nzombies);
-}
-
-static void
-sfw_unpack_addtest_req(struct srpc_msg *msg)
-{
- struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
-
- LASSERT(msg->msg_type == SRPC_MSG_TEST_REQST);
- LASSERT(req->tsr_is_client);
-
- if (msg->msg_magic == SRPC_MSG_MAGIC)
- return; /* no flipping needed */
-
- LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
-
- if (req->tsr_service == SRPC_SERVICE_BRW) {
- if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
- struct test_bulk_req *bulk = &req->tsr_u.bulk_v0;
-
- __swab32s(&bulk->blk_opc);
- __swab32s(&bulk->blk_npg);
- __swab32s(&bulk->blk_flags);
-
- } else {
- struct test_bulk_req_v1 *bulk = &req->tsr_u.bulk_v1;
-
- __swab16s(&bulk->blk_opc);
- __swab16s(&bulk->blk_flags);
- __swab32s(&bulk->blk_offset);
- __swab32s(&bulk->blk_len);
- }
-
- return;
- }
-
- if (req->tsr_service == SRPC_SERVICE_PING) {
- struct test_ping_req *ping = &req->tsr_u.ping;
-
- __swab32s(&ping->png_size);
- __swab32s(&ping->png_flags);
- return;
- }
-
- LBUG();
-}
-
-static int
-sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
-{
- struct srpc_msg *msg = &rpc->srpc_reqstbuf->buf_msg;
- struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
- struct srpc_bulk *bk = rpc->srpc_bulk;
- int ndest = req->tsr_ndest;
- struct sfw_test_unit *tsu;
- struct sfw_test_instance *tsi;
- int i;
- int rc;
-
- tsi = kzalloc(sizeof(*tsi), GFP_NOFS);
- if (!tsi) {
- CERROR("Can't allocate test instance for batch: %llu\n",
- tsb->bat_id.bat_id);
- return -ENOMEM;
- }
-
- spin_lock_init(&tsi->tsi_lock);
- atomic_set(&tsi->tsi_nactive, 0);
- INIT_LIST_HEAD(&tsi->tsi_units);
- INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
- INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
-
- tsi->tsi_stopping = 0;
- tsi->tsi_batch = tsb;
- tsi->tsi_loop = req->tsr_loop;
- tsi->tsi_concur = req->tsr_concur;
- tsi->tsi_service = req->tsr_service;
- tsi->tsi_is_client = !!(req->tsr_is_client);
- tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr);
-
- rc = sfw_load_test(tsi);
- if (rc) {
- kfree(tsi);
- return rc;
- }
-
- LASSERT(!sfw_batch_active(tsb));
-
- if (!tsi->tsi_is_client) {
- /* it's test server, just add it to tsb */
- list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
- return 0;
- }
-
- LASSERT(bk);
- LASSERT(bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest);
- LASSERT((unsigned int)bk->bk_len >=
- sizeof(struct lnet_process_id_packed) * ndest);
-
- sfw_unpack_addtest_req(msg);
- memcpy(&tsi->tsi_u, &req->tsr_u, sizeof(tsi->tsi_u));
-
- for (i = 0; i < ndest; i++) {
- struct lnet_process_id_packed *dests;
- struct lnet_process_id_packed id;
- int j;
-
- dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].bv_page);
- LASSERT(dests); /* my pages are within KVM always */
- id = dests[i % SFW_ID_PER_PAGE];
- if (msg->msg_magic != SRPC_MSG_MAGIC)
- sfw_unpack_id(id);
-
- for (j = 0; j < tsi->tsi_concur; j++) {
- tsu = kzalloc(sizeof(struct sfw_test_unit), GFP_NOFS);
- if (!tsu) {
- rc = -ENOMEM;
- CERROR("Can't allocate tsu for %d\n",
- tsi->tsi_service);
- goto error;
- }
-
- tsu->tsu_dest.nid = id.nid;
- tsu->tsu_dest.pid = id.pid;
- tsu->tsu_instance = tsi;
- tsu->tsu_private = NULL;
- list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
- }
- }
-
- rc = tsi->tsi_ops->tso_init(tsi);
- if (!rc) {
- list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
- return 0;
- }
-
-error:
- LASSERT(rc);
- sfw_destroy_test_instance(tsi);
- return rc;
-}
-
-static void
-sfw_test_unit_done(struct sfw_test_unit *tsu)
-{
- struct sfw_test_instance *tsi = tsu->tsu_instance;
- struct sfw_batch *tsb = tsi->tsi_batch;
- struct sfw_session *sn = tsb->bat_session;
-
- LASSERT(sfw_test_active(tsi));
-
- if (!atomic_dec_and_test(&tsi->tsi_nactive))
- return;
-
- /* the test instance is done */
- spin_lock(&tsi->tsi_lock);
-
- tsi->tsi_stopping = 0;
-
- spin_unlock(&tsi->tsi_lock);
-
- spin_lock(&sfw_data.fw_lock);
-
- if (!atomic_dec_and_test(&tsb->bat_nactive) || /* tsb still active */
- sn == sfw_data.fw_session) { /* sn also active */
- spin_unlock(&sfw_data.fw_lock);
- return;
- }
-
- LASSERT(!list_empty(&sn->sn_list)); /* I'm a zombie! */
-
- list_for_each_entry(tsb, &sn->sn_batches, bat_list) {
- if (sfw_batch_active(tsb)) {
- spin_unlock(&sfw_data.fw_lock);
- return;
- }
- }
-
- list_del_init(&sn->sn_list);
- spin_unlock(&sfw_data.fw_lock);
-
- sfw_destroy_session(sn);
-}
-
-static void
-sfw_test_rpc_done(struct srpc_client_rpc *rpc)
-{
- struct sfw_test_unit *tsu = rpc->crpc_priv;
- struct sfw_test_instance *tsi = tsu->tsu_instance;
- int done = 0;
-
- tsi->tsi_ops->tso_done_rpc(tsu, rpc);
-
- spin_lock(&tsi->tsi_lock);
-
- LASSERT(sfw_test_active(tsi));
- LASSERT(!list_empty(&rpc->crpc_list));
-
- list_del_init(&rpc->crpc_list);
-
- /* batch is stopping or loop is done or get error */
- if (tsi->tsi_stopping || !tsu->tsu_loop ||
- (rpc->crpc_status && tsi->tsi_stoptsu_onerr))
- done = 1;
-
- /* dec ref for poster */
- srpc_client_rpc_decref(rpc);
-
- spin_unlock(&tsi->tsi_lock);
-
- if (!done) {
- swi_schedule_workitem(&tsu->tsu_worker);
- return;
- }
-
- sfw_test_unit_done(tsu);
-}
-
-int
-sfw_create_test_rpc(struct sfw_test_unit *tsu, struct lnet_process_id peer,
- unsigned int features, int nblk, int blklen,
- struct srpc_client_rpc **rpcpp)
-{
- struct srpc_client_rpc *rpc = NULL;
- struct sfw_test_instance *tsi = tsu->tsu_instance;
-
- spin_lock(&tsi->tsi_lock);
-
- LASSERT(sfw_test_active(tsi));
- /* pick request from buffer */
- rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs,
- struct srpc_client_rpc, crpc_list);
- if (rpc) {
- LASSERT(nblk == rpc->crpc_bulk.bk_niov);
- list_del_init(&rpc->crpc_list);
- }
-
- spin_unlock(&tsi->tsi_lock);
-
- if (!rpc) {
- rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
- blklen, sfw_test_rpc_done,
- sfw_test_rpc_fini, tsu);
- } else {
- srpc_init_client_rpc(rpc, peer, tsi->tsi_service, nblk,
- blklen, sfw_test_rpc_done,
- sfw_test_rpc_fini, tsu);
- }
-
- if (!rpc) {
- CERROR("Can't create rpc for test %d\n", tsi->tsi_service);
- return -ENOMEM;
- }
-
- rpc->crpc_reqstmsg.msg_ses_feats = features;
- *rpcpp = rpc;
-
- return 0;
-}
-
-static void
-sfw_run_test(struct swi_workitem *wi)
-{
- struct sfw_test_unit *tsu = container_of(wi, struct sfw_test_unit, tsu_worker);
- struct sfw_test_instance *tsi = tsu->tsu_instance;
- struct srpc_client_rpc *rpc = NULL;
-
- if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc)) {
- LASSERT(!rpc);
- goto test_done;
- }
-
- LASSERT(rpc);
-
- spin_lock(&tsi->tsi_lock);
-
- if (tsi->tsi_stopping) {
- list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
- spin_unlock(&tsi->tsi_lock);
- goto test_done;
- }
-
- if (tsu->tsu_loop > 0)
- tsu->tsu_loop--;
-
- list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
- spin_unlock(&tsi->tsi_lock);
-
- spin_lock(&rpc->crpc_lock);
- rpc->crpc_timeout = rpc_timeout;
- srpc_post_rpc(rpc);
- spin_unlock(&rpc->crpc_lock);
- return;
-
-test_done:
- /*
- * No one can schedule me now since:
- * - previous RPC, if any, has done and
- * - no new RPC is initiated.
- * - my batch is still active; no one can run it again now.
- * Cancel pending schedules and prevent future schedule attempts:
- */
- sfw_test_unit_done(tsu);
-}
-
-static int
-sfw_run_batch(struct sfw_batch *tsb)
-{
- struct swi_workitem *wi;
- struct sfw_test_unit *tsu;
- struct sfw_test_instance *tsi;
-
- if (sfw_batch_active(tsb)) {
- CDEBUG(D_NET, "Batch already active: %llu (%d)\n",
- tsb->bat_id.bat_id, atomic_read(&tsb->bat_nactive));
- return 0;
- }
-
- list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
- if (!tsi->tsi_is_client) /* skip server instances */
- continue;
-
- LASSERT(!tsi->tsi_stopping);
- LASSERT(!sfw_test_active(tsi));
-
- atomic_inc(&tsb->bat_nactive);
-
- list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
- atomic_inc(&tsi->tsi_nactive);
- tsu->tsu_loop = tsi->tsi_loop;
- wi = &tsu->tsu_worker;
- swi_init_workitem(wi, sfw_run_test,
- lst_test_wq[lnet_cpt_of_nid(tsu->tsu_dest.nid)]);
- swi_schedule_workitem(wi);
- }
- }
-
- return 0;
-}
-
-int
-sfw_stop_batch(struct sfw_batch *tsb, int force)
-{
- struct sfw_test_instance *tsi;
- struct srpc_client_rpc *rpc;
-
- if (!sfw_batch_active(tsb)) {
- CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id);
- return 0;
- }
-
- list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
- spin_lock(&tsi->tsi_lock);
-
- if (!tsi->tsi_is_client ||
- !sfw_test_active(tsi) || tsi->tsi_stopping) {
- spin_unlock(&tsi->tsi_lock);
- continue;
- }
-
- tsi->tsi_stopping = 1;
-
- if (!force) {
- spin_unlock(&tsi->tsi_lock);
- continue;
- }
-
- /* abort launched rpcs in the test */
- list_for_each_entry(rpc, &tsi->tsi_active_rpcs, crpc_list) {
- spin_lock(&rpc->crpc_lock);
-
- srpc_abort_rpc(rpc, -EINTR);
-
- spin_unlock(&rpc->crpc_lock);
- }
-
- spin_unlock(&tsi->tsi_lock);
- }
-
- return 0;
-}
-
-static int
-sfw_query_batch(struct sfw_batch *tsb, int testidx,
- struct srpc_batch_reply *reply)
-{
- struct sfw_test_instance *tsi;
-
- if (testidx < 0)
- return -EINVAL;
-
- if (!testidx) {
- reply->bar_active = atomic_read(&tsb->bat_nactive);
- return 0;
- }
-
- list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
- if (testidx-- > 1)
- continue;
-
- reply->bar_active = atomic_read(&tsi->tsi_nactive);
- return 0;
- }
-
- return -ENOENT;
-}
-
-void
-sfw_free_pages(struct srpc_server_rpc *rpc)
-{
- srpc_free_bulk(rpc->srpc_bulk);
- rpc->srpc_bulk = NULL;
-}
-
-int
-sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
- int sink)
-{
- LASSERT(!rpc->srpc_bulk);
- LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
-
- rpc->srpc_bulk = srpc_alloc_bulk(cpt, 0, npages, len, sink);
- if (!rpc->srpc_bulk)
- return -ENOMEM;
-
- return 0;
-}
-
-static int
-sfw_add_test(struct srpc_server_rpc *rpc)
-{
- struct sfw_session *sn = sfw_data.fw_session;
- struct srpc_test_reply *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
- struct srpc_test_reqst *request;
- int rc;
- struct sfw_batch *bat;
-
- request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
- reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id;
-
- if (!request->tsr_loop ||
- !request->tsr_concur ||
- request->tsr_sid.ses_nid == LNET_NID_ANY ||
- request->tsr_ndest > SFW_MAX_NDESTS ||
- (request->tsr_is_client && !request->tsr_ndest) ||
- request->tsr_concur > SFW_MAX_CONCUR ||
- request->tsr_service > SRPC_SERVICE_MAX_ID ||
- request->tsr_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID) {
- reply->tsr_status = EINVAL;
- return 0;
- }
-
- if (!sn || !sfw_sid_equal(request->tsr_sid, sn->sn_id) ||
- !sfw_find_test_case(request->tsr_service)) {
- reply->tsr_status = ENOENT;
- return 0;
- }
-
- bat = sfw_bid2batch(request->tsr_bid);
- if (!bat) {
- CERROR("dropping RPC %s from %s under memory pressure\n",
- rpc->srpc_scd->scd_svc->sv_name,
- libcfs_id2str(rpc->srpc_peer));
- return -ENOMEM;
- }
-
- if (sfw_batch_active(bat)) {
- reply->tsr_status = EBUSY;
- return 0;
- }
-
- if (request->tsr_is_client && !rpc->srpc_bulk) {
- /* rpc will be resumed later in sfw_bulk_ready */
- int npg = sfw_id_pages(request->tsr_ndest);
- int len;
-
- if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
- len = npg * PAGE_SIZE;
-
- } else {
- len = sizeof(struct lnet_process_id_packed) *
- request->tsr_ndest;
- }
-
- return sfw_alloc_pages(rpc, CFS_CPT_ANY, npg, len, 1);
- }
-
- rc = sfw_add_test_instance(bat, rpc);
- CDEBUG(!rc ? D_NET : D_WARNING,
- "%s test: sv %d %s, loop %d, concur %d, ndest %d\n",
- !rc ? "Added" : "Failed to add", request->tsr_service,
- request->tsr_is_client ? "client" : "server",
- request->tsr_loop, request->tsr_concur, request->tsr_ndest);
-
- reply->tsr_status = (rc < 0) ? -rc : rc;
- return 0;
-}
-
-static int
-sfw_control_batch(struct srpc_batch_reqst *request,
- struct srpc_batch_reply *reply)
-{
- struct sfw_session *sn = sfw_data.fw_session;
- int rc = 0;
- struct sfw_batch *bat;
-
- reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id;
-
- if (!sn || !sfw_sid_equal(request->bar_sid, sn->sn_id)) {
- reply->bar_status = ESRCH;
- return 0;
- }
-
- bat = sfw_find_batch(request->bar_bid);
- if (!bat) {
- reply->bar_status = ENOENT;
- return 0;
- }
-
- switch (request->bar_opc) {
- case SRPC_BATCH_OPC_RUN:
- rc = sfw_run_batch(bat);
- break;
-
- case SRPC_BATCH_OPC_STOP:
- rc = sfw_stop_batch(bat, request->bar_arg);
- break;
-
- case SRPC_BATCH_OPC_QUERY:
- rc = sfw_query_batch(bat, request->bar_testidx, reply);
- break;
-
- default:
- return -EINVAL; /* drop it */
- }
-
- reply->bar_status = (rc < 0) ? -rc : rc;
- return 0;
-}
-
-static int
-sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
-{
- struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- struct srpc_msg *reply = &rpc->srpc_replymsg;
- struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
- unsigned int features = LST_FEATS_MASK;
- int rc = 0;
-
- LASSERT(!sfw_data.fw_active_srpc);
- LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
-
- spin_lock(&sfw_data.fw_lock);
-
- if (sfw_data.fw_shuttingdown) {
- spin_unlock(&sfw_data.fw_lock);
- return -ESHUTDOWN;
- }
-
- /* Remove timer to avoid racing with it or expiring active session */
- if (sfw_del_session_timer()) {
- CERROR("dropping RPC %s from %s: racing with expiry timer\n",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer));
- spin_unlock(&sfw_data.fw_lock);
- return -EAGAIN;
- }
-
- sfw_data.fw_active_srpc = rpc;
- spin_unlock(&sfw_data.fw_lock);
-
- sfw_unpack_message(request);
- LASSERT(request->msg_type == srpc_service2request(sv->sv_id));
-
- /* rpc module should have checked this */
- LASSERT(request->msg_version == SRPC_MSG_VERSION);
-
- if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION &&
- sv->sv_id != SRPC_SERVICE_DEBUG) {
- struct sfw_session *sn = sfw_data.fw_session;
-
- if (sn &&
- sn->sn_features != request->msg_ses_feats) {
- CNETERR("Features of framework RPC don't match features of current session: %x/%x\n",
- request->msg_ses_feats, sn->sn_features);
- reply->msg_body.reply.status = EPROTO;
- reply->msg_body.reply.sid = sn->sn_id;
- goto out;
- }
-
- } else if (request->msg_ses_feats & ~LST_FEATS_MASK) {
- /*
- * NB: at this point, old version will ignore features and
- * create new session anyway, so console should be able
- * to handle this
- */
- reply->msg_body.reply.status = EPROTO;
- goto out;
- }
-
- switch (sv->sv_id) {
- default:
- LBUG();
- case SRPC_SERVICE_TEST:
- rc = sfw_add_test(rpc);
- break;
-
- case SRPC_SERVICE_BATCH:
- rc = sfw_control_batch(&request->msg_body.bat_reqst,
- &reply->msg_body.bat_reply);
- break;
-
- case SRPC_SERVICE_QUERY_STAT:
- rc = sfw_get_stats(&request->msg_body.stat_reqst,
- &reply->msg_body.stat_reply);
- break;
-
- case SRPC_SERVICE_DEBUG:
- rc = sfw_debug_session(&request->msg_body.dbg_reqst,
- &reply->msg_body.dbg_reply);
- break;
-
- case SRPC_SERVICE_MAKE_SESSION:
- rc = sfw_make_session(&request->msg_body.mksn_reqst,
- &reply->msg_body.mksn_reply);
- break;
-
- case SRPC_SERVICE_REMOVE_SESSION:
- rc = sfw_remove_session(&request->msg_body.rmsn_reqst,
- &reply->msg_body.rmsn_reply);
- break;
- }
-
- if (sfw_data.fw_session)
- features = sfw_data.fw_session->sn_features;
- out:
- reply->msg_ses_feats = features;
- rpc->srpc_done = sfw_server_rpc_done;
- spin_lock(&sfw_data.fw_lock);
-
- if (!sfw_data.fw_shuttingdown)
- sfw_add_session_timer();
-
- sfw_data.fw_active_srpc = NULL;
- spin_unlock(&sfw_data.fw_lock);
- return rc;
-}
-
-static int
-sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
-{
- struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- int rc;
-
- LASSERT(rpc->srpc_bulk);
- LASSERT(sv->sv_id == SRPC_SERVICE_TEST);
- LASSERT(!sfw_data.fw_active_srpc);
- LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
-
- spin_lock(&sfw_data.fw_lock);
-
- if (status) {
- CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
- spin_unlock(&sfw_data.fw_lock);
- return -EIO;
- }
-
- if (sfw_data.fw_shuttingdown) {
- spin_unlock(&sfw_data.fw_lock);
- return -ESHUTDOWN;
- }
-
- if (sfw_del_session_timer()) {
- CERROR("dropping RPC %s from %s: racing with expiry timer\n",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer));
- spin_unlock(&sfw_data.fw_lock);
- return -EAGAIN;
- }
-
- sfw_data.fw_active_srpc = rpc;
- spin_unlock(&sfw_data.fw_lock);
-
- rc = sfw_add_test(rpc);
-
- spin_lock(&sfw_data.fw_lock);
-
- if (!sfw_data.fw_shuttingdown)
- sfw_add_session_timer();
-
- sfw_data.fw_active_srpc = NULL;
- spin_unlock(&sfw_data.fw_lock);
- return rc;
-}
-
-struct srpc_client_rpc *
-sfw_create_rpc(struct lnet_process_id peer, int service,
- unsigned int features, int nbulkiov, int bulklen,
- void (*done)(struct srpc_client_rpc *), void *priv)
-{
- struct srpc_client_rpc *rpc = NULL;
-
- spin_lock(&sfw_data.fw_lock);
-
- LASSERT(!sfw_data.fw_shuttingdown);
- LASSERT(service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
-
- if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) {
- rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- struct srpc_client_rpc, crpc_list);
- list_del(&rpc->crpc_list);
-
- srpc_init_client_rpc(rpc, peer, service, 0, 0,
- done, sfw_client_rpc_fini, priv);
- }
-
- spin_unlock(&sfw_data.fw_lock);
-
- if (!rpc) {
- rpc = srpc_create_client_rpc(peer, service,
- nbulkiov, bulklen, done,
- nbulkiov ? NULL :
- sfw_client_rpc_fini,
- priv);
- }
-
- if (rpc) /* "session" is concept in framework */
- rpc->crpc_reqstmsg.msg_ses_feats = features;
-
- return rpc;
-}
-
-void
-sfw_unpack_message(struct srpc_msg *msg)
-{
- if (msg->msg_magic == SRPC_MSG_MAGIC)
- return; /* no flipping needed */
-
- /* srpc module should guarantee I wouldn't get crap */
- LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
-
- if (msg->msg_type == SRPC_MSG_STAT_REQST) {
- struct srpc_stat_reqst *req = &msg->msg_body.stat_reqst;
-
- __swab32s(&req->str_type);
- __swab64s(&req->str_rpyid);
- sfw_unpack_sid(req->str_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_STAT_REPLY) {
- struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
-
- __swab32s(&rep->str_status);
- sfw_unpack_sid(rep->str_sid);
- sfw_unpack_fw_counters(rep->str_fw);
- sfw_unpack_rpc_counters(rep->str_rpc);
- sfw_unpack_lnet_counters(rep->str_lnet);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_MKSN_REQST) {
- struct srpc_mksn_reqst *req = &msg->msg_body.mksn_reqst;
-
- __swab64s(&req->mksn_rpyid);
- __swab32s(&req->mksn_force);
- sfw_unpack_sid(req->mksn_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_MKSN_REPLY) {
- struct srpc_mksn_reply *rep = &msg->msg_body.mksn_reply;
-
- __swab32s(&rep->mksn_status);
- __swab32s(&rep->mksn_timeout);
- sfw_unpack_sid(rep->mksn_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_RMSN_REQST) {
- struct srpc_rmsn_reqst *req = &msg->msg_body.rmsn_reqst;
-
- __swab64s(&req->rmsn_rpyid);
- sfw_unpack_sid(req->rmsn_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_RMSN_REPLY) {
- struct srpc_rmsn_reply *rep = &msg->msg_body.rmsn_reply;
-
- __swab32s(&rep->rmsn_status);
- sfw_unpack_sid(rep->rmsn_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_DEBUG_REQST) {
- struct srpc_debug_reqst *req = &msg->msg_body.dbg_reqst;
-
- __swab64s(&req->dbg_rpyid);
- __swab32s(&req->dbg_flags);
- sfw_unpack_sid(req->dbg_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) {
- struct srpc_debug_reply *rep = &msg->msg_body.dbg_reply;
-
- __swab32s(&rep->dbg_nbatch);
- __swab32s(&rep->dbg_timeout);
- sfw_unpack_sid(rep->dbg_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_BATCH_REQST) {
- struct srpc_batch_reqst *req = &msg->msg_body.bat_reqst;
-
- __swab32s(&req->bar_opc);
- __swab64s(&req->bar_rpyid);
- __swab32s(&req->bar_testidx);
- __swab32s(&req->bar_arg);
- sfw_unpack_sid(req->bar_sid);
- __swab64s(&req->bar_bid.bat_id);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_BATCH_REPLY) {
- struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
-
- __swab32s(&rep->bar_status);
- sfw_unpack_sid(rep->bar_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_TEST_REQST) {
- struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
-
- __swab64s(&req->tsr_rpyid);
- __swab64s(&req->tsr_bulkid);
- __swab32s(&req->tsr_loop);
- __swab32s(&req->tsr_ndest);
- __swab32s(&req->tsr_concur);
- __swab32s(&req->tsr_service);
- sfw_unpack_sid(req->tsr_sid);
- __swab64s(&req->tsr_bid.bat_id);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_TEST_REPLY) {
- struct srpc_test_reply *rep = &msg->msg_body.tes_reply;
-
- __swab32s(&rep->tsr_status);
- sfw_unpack_sid(rep->tsr_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_JOIN_REQST) {
- struct srpc_join_reqst *req = &msg->msg_body.join_reqst;
-
- __swab64s(&req->join_rpyid);
- sfw_unpack_sid(req->join_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_JOIN_REPLY) {
- struct srpc_join_reply *rep = &msg->msg_body.join_reply;
-
- __swab32s(&rep->join_status);
- __swab32s(&rep->join_timeout);
- sfw_unpack_sid(rep->join_sid);
- return;
- }
-
- LBUG();
-}
-
-void
-sfw_abort_rpc(struct srpc_client_rpc *rpc)
-{
- LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
- LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
-
- spin_lock(&rpc->crpc_lock);
- srpc_abort_rpc(rpc, -EINTR);
- spin_unlock(&rpc->crpc_lock);
-}
-
-void
-sfw_post_rpc(struct srpc_client_rpc *rpc)
-{
- spin_lock(&rpc->crpc_lock);
-
- LASSERT(!rpc->crpc_closed);
- LASSERT(!rpc->crpc_aborted);
- LASSERT(list_empty(&rpc->crpc_list));
- LASSERT(!sfw_data.fw_shuttingdown);
-
- rpc->crpc_timeout = rpc_timeout;
- srpc_post_rpc(rpc);
-
- spin_unlock(&rpc->crpc_lock);
-}
-
-static struct srpc_service sfw_services[] = {
- {
- /* sv_id */ SRPC_SERVICE_DEBUG,
- /* sv_name */ "debug",
- 0
- },
- {
- /* sv_id */ SRPC_SERVICE_QUERY_STAT,
- /* sv_name */ "query stats",
- 0
- },
- {
- /* sv_id */ SRPC_SERVICE_MAKE_SESSION,
- /* sv_name */ "make session",
- 0
- },
- {
- /* sv_id */ SRPC_SERVICE_REMOVE_SESSION,
- /* sv_name */ "remove session",
- 0
- },
- {
- /* sv_id */ SRPC_SERVICE_BATCH,
- /* sv_name */ "batch service",
- 0
- },
- {
- /* sv_id */ SRPC_SERVICE_TEST,
- /* sv_name */ "test service",
- 0
- },
- {
- /* sv_id */ 0,
- /* sv_name */ NULL,
- 0
- }
-};
-
-int
-sfw_startup(void)
-{
- int i;
- int rc;
- int error;
- struct srpc_service *sv;
- struct sfw_test_case *tsc;
-
- if (session_timeout < 0) {
- CERROR("Session timeout must be non-negative: %d\n",
- session_timeout);
- return -EINVAL;
- }
-
- if (rpc_timeout < 0) {
- CERROR("RPC timeout must be non-negative: %d\n",
- rpc_timeout);
- return -EINVAL;
- }
-
- if (!session_timeout)
- CWARN("Zero session_timeout specified - test sessions never expire.\n");
-
- if (!rpc_timeout)
- CWARN("Zero rpc_timeout specified - test RPC never expire.\n");
-
- memset(&sfw_data, 0, sizeof(struct smoketest_framework));
-
- sfw_data.fw_session = NULL;
- sfw_data.fw_active_srpc = NULL;
- spin_lock_init(&sfw_data.fw_lock);
- atomic_set(&sfw_data.fw_nzombies, 0);
- INIT_LIST_HEAD(&sfw_data.fw_tests);
- INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
- INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
-
- brw_init_test_client();
- brw_init_test_service();
- rc = sfw_register_test(&brw_test_service, &brw_test_client);
- LASSERT(!rc);
-
- ping_init_test_client();
- ping_init_test_service();
- rc = sfw_register_test(&ping_test_service, &ping_test_client);
- LASSERT(!rc);
-
- error = 0;
- list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
- sv = tsc->tsc_srv_service;
-
- rc = srpc_add_service(sv);
- LASSERT(rc != -EBUSY);
- if (rc) {
- CWARN("Failed to add %s service: %d\n",
- sv->sv_name, rc);
- error = rc;
- }
- }
-
- for (i = 0; ; i++) {
- sv = &sfw_services[i];
- if (!sv->sv_name)
- break;
-
- sv->sv_bulk_ready = NULL;
- sv->sv_handler = sfw_handle_server_rpc;
- sv->sv_wi_total = SFW_FRWK_WI_MAX;
- if (sv->sv_id == SRPC_SERVICE_TEST)
- sv->sv_bulk_ready = sfw_bulk_ready;
-
- rc = srpc_add_service(sv);
- LASSERT(rc != -EBUSY);
- if (rc) {
- CWARN("Failed to add %s service: %d\n",
- sv->sv_name, rc);
- error = rc;
- }
-
- /* about to sfw_shutdown, no need to add buffer */
- if (error)
- continue;
-
- rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
- if (rc) {
- CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
- sv->sv_name, sv->sv_wi_total, rc);
- error = -ENOMEM;
- }
- }
-
- if (error)
- sfw_shutdown();
- return error;
-}
-
-void
-sfw_shutdown(void)
-{
- struct srpc_service *sv;
- struct sfw_test_case *tsc;
- int i;
-
- spin_lock(&sfw_data.fw_lock);
-
- sfw_data.fw_shuttingdown = 1;
- lst_wait_until(!sfw_data.fw_active_srpc, sfw_data.fw_lock,
- "waiting for active RPC to finish.\n");
-
- if (sfw_del_session_timer())
- lst_wait_until(!sfw_data.fw_session, sfw_data.fw_lock,
- "waiting for session timer to explode.\n");
-
- sfw_deactivate_session();
- lst_wait_until(!atomic_read(&sfw_data.fw_nzombies),
- sfw_data.fw_lock,
- "waiting for %d zombie sessions to die.\n",
- atomic_read(&sfw_data.fw_nzombies));
-
- spin_unlock(&sfw_data.fw_lock);
-
- for (i = 0; ; i++) {
- sv = &sfw_services[i];
- if (!sv->sv_name)
- break;
-
- srpc_shutdown_service(sv);
- srpc_remove_service(sv);
- }
-
- list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
- sv = tsc->tsc_srv_service;
- srpc_shutdown_service(sv);
- srpc_remove_service(sv);
- }
-
- while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
- struct srpc_client_rpc *rpc;
-
- rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- struct srpc_client_rpc, crpc_list);
- list_del(&rpc->crpc_list);
-
- kfree(rpc);
- }
-
- for (i = 0; ; i++) {
- sv = &sfw_services[i];
- if (!sv->sv_name)
- break;
-
- srpc_wait_service_shutdown(sv);
- }
-
- while (!list_empty(&sfw_data.fw_tests)) {
- tsc = list_entry(sfw_data.fw_tests.next,
- struct sfw_test_case, tsc_list);
-
- srpc_wait_service_shutdown(tsc->tsc_srv_service);
-
- list_del(&tsc->tsc_list);
- kfree(tsc);
- }
-}
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
deleted file mode 100644
index 7359aa56d9b3..000000000000
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ /dev/null
@@ -1,165 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "selftest.h"
-#include "console.h"
-
-enum {
- LST_INIT_NONE = 0,
- LST_INIT_WI_SERIAL,
- LST_INIT_WI_TEST,
- LST_INIT_RPC,
- LST_INIT_FW,
- LST_INIT_CONSOLE
-};
-
-static int lst_init_step = LST_INIT_NONE;
-
-struct workqueue_struct *lst_serial_wq;
-struct workqueue_struct **lst_test_wq;
-
-static void
-lnet_selftest_exit(void)
-{
- int i;
-
- switch (lst_init_step) {
- case LST_INIT_CONSOLE:
- lstcon_console_fini();
- /* fall through */
- case LST_INIT_FW:
- sfw_shutdown();
- /* fall through */
- case LST_INIT_RPC:
- srpc_shutdown();
- /* fall through */
- case LST_INIT_WI_TEST:
- for (i = 0;
- i < cfs_cpt_number(lnet_cpt_table()); i++) {
- if (!lst_test_wq[i])
- continue;
- destroy_workqueue(lst_test_wq[i]);
- }
- kvfree(lst_test_wq);
- lst_test_wq = NULL;
- /* fall through */
- case LST_INIT_WI_SERIAL:
- destroy_workqueue(lst_serial_wq);
- lst_serial_wq = NULL;
- case LST_INIT_NONE:
- break;
- default:
- LBUG();
- }
-}
-
-static int
-lnet_selftest_init(void)
-{
- int nscheds;
- int rc = -ENOMEM;
- int i;
-
- lst_serial_wq = alloc_ordered_workqueue("lst_s", 0);
- if (!lst_serial_wq) {
- CERROR("Failed to create serial WI scheduler for LST\n");
- return -ENOMEM;
- }
- lst_init_step = LST_INIT_WI_SERIAL;
-
- nscheds = cfs_cpt_number(lnet_cpt_table());
- lst_test_wq = kvmalloc_array(nscheds, sizeof(lst_test_wq[0]),
- GFP_KERNEL | __GFP_ZERO);
- if (!lst_test_wq) {
- rc = -ENOMEM;
- goto error;
- }
-
- lst_init_step = LST_INIT_WI_TEST;
- for (i = 0; i < nscheds; i++) {
- int nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
- struct workqueue_attrs attrs = {0};
- cpumask_var_t *mask = cfs_cpt_cpumask(lnet_cpt_table(), i);
-
- /* reserve at least one CPU for LND */
- nthrs = max(nthrs - 1, 1);
- lst_test_wq[i] = alloc_workqueue("lst_t", WQ_UNBOUND, nthrs);
- if (!lst_test_wq[i]) {
- CWARN("Failed to create CPU partition affinity WI scheduler %d for LST\n",
- i);
- rc = -ENOMEM;
- goto error;
- }
-
- if (mask && alloc_cpumask_var(&attrs.cpumask, GFP_KERNEL)) {
- cpumask_copy(attrs.cpumask, *mask);
- apply_workqueue_attrs(lst_test_wq[i], &attrs);
- free_cpumask_var(attrs.cpumask);
- }
- }
-
- rc = srpc_startup();
- if (rc) {
- CERROR("LST can't startup rpc\n");
- goto error;
- }
- lst_init_step = LST_INIT_RPC;
-
- rc = sfw_startup();
- if (rc) {
- CERROR("LST can't startup framework\n");
- goto error;
- }
- lst_init_step = LST_INIT_FW;
-
- rc = lstcon_console_init();
- if (rc) {
- CERROR("LST can't startup console\n");
- goto error;
- }
- lst_init_step = LST_INIT_CONSOLE;
- return 0;
-error:
- lnet_selftest_exit();
- return rc;
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("LNet Selftest");
-MODULE_VERSION("2.7.0");
-MODULE_LICENSE("GPL");
-
-module_init(lnet_selftest_init);
-module_exit(lnet_selftest_exit);
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
deleted file mode 100644
index f54bd630dbf8..000000000000
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ /dev/null
@@ -1,228 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/conctl.c
- *
- * Test client & Server
- *
- * Author: Liang Zhen <liangzhen@clusterfs.com>
- */
-
-#include "selftest.h"
-
-#define LST_PING_TEST_MAGIC 0xbabeface
-
-static int ping_srv_workitems = SFW_TEST_WI_MAX;
-module_param(ping_srv_workitems, int, 0644);
-MODULE_PARM_DESC(ping_srv_workitems, "# PING server workitems");
-
-struct lst_ping_data {
- spinlock_t pnd_lock; /* serialize */
- int pnd_counter; /* sequence counter */
-};
-
-static struct lst_ping_data lst_ping_data;
-
-static int
-ping_client_init(struct sfw_test_instance *tsi)
-{
- struct sfw_session *sn = tsi->tsi_batch->bat_session;
-
- LASSERT(tsi->tsi_is_client);
- LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK));
-
- spin_lock_init(&lst_ping_data.pnd_lock);
- lst_ping_data.pnd_counter = 0;
-
- return 0;
-}
-
-static void
-ping_client_fini(struct sfw_test_instance *tsi)
-{
- struct sfw_session *sn = tsi->tsi_batch->bat_session;
- int errors;
-
- LASSERT(sn);
- LASSERT(tsi->tsi_is_client);
-
- errors = atomic_read(&sn->sn_ping_errors);
- if (errors)
- CWARN("%d pings have failed.\n", errors);
- else
- CDEBUG(D_NET, "Ping test finished OK.\n");
-}
-
-static int
-ping_client_prep_rpc(struct sfw_test_unit *tsu, struct lnet_process_id dest,
- struct srpc_client_rpc **rpc)
-{
- struct srpc_ping_reqst *req;
- struct sfw_test_instance *tsi = tsu->tsu_instance;
- struct sfw_session *sn = tsi->tsi_batch->bat_session;
- struct timespec64 ts;
- int rc;
-
- LASSERT(sn);
- LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
-
- rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, 0, 0, rpc);
- if (rc)
- return rc;
-
- req = &(*rpc)->crpc_reqstmsg.msg_body.ping_reqst;
-
- req->pnr_magic = LST_PING_TEST_MAGIC;
-
- spin_lock(&lst_ping_data.pnd_lock);
- req->pnr_seq = lst_ping_data.pnd_counter++;
- spin_unlock(&lst_ping_data.pnd_lock);
-
- ktime_get_real_ts64(&ts);
- req->pnr_time_sec = ts.tv_sec;
- req->pnr_time_usec = ts.tv_nsec / NSEC_PER_USEC;
-
- return rc;
-}
-
-static void
-ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
-{
- struct sfw_test_instance *tsi = tsu->tsu_instance;
- struct sfw_session *sn = tsi->tsi_batch->bat_session;
- struct srpc_ping_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst;
- struct srpc_ping_reply *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
- struct timespec64 ts;
-
- LASSERT(sn);
-
- if (rpc->crpc_status) {
- if (!tsi->tsi_stopping) /* rpc could have been aborted */
- atomic_inc(&sn->sn_ping_errors);
- CERROR("Unable to ping %s (%d): %d\n",
- libcfs_id2str(rpc->crpc_dest),
- reqst->pnr_seq, rpc->crpc_status);
- return;
- }
-
- if (rpc->crpc_replymsg.msg_magic != SRPC_MSG_MAGIC) {
- __swab32s(&reply->pnr_seq);
- __swab32s(&reply->pnr_magic);
- __swab32s(&reply->pnr_status);
- }
-
- if (reply->pnr_magic != LST_PING_TEST_MAGIC) {
- rpc->crpc_status = -EBADMSG;
- atomic_inc(&sn->sn_ping_errors);
- CERROR("Bad magic %u from %s, %u expected.\n",
- reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
- LST_PING_TEST_MAGIC);
- return;
- }
-
- if (reply->pnr_seq != reqst->pnr_seq) {
- rpc->crpc_status = -EBADMSG;
- atomic_inc(&sn->sn_ping_errors);
- CERROR("Bad seq %u from %s, %u expected.\n",
- reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
- reqst->pnr_seq);
- return;
- }
-
- ktime_get_real_ts64(&ts);
- CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq,
- (unsigned int)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 +
- (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec)));
-}
-
-static int
-ping_server_handle(struct srpc_server_rpc *rpc)
-{
- struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- struct srpc_msg *replymsg = &rpc->srpc_replymsg;
- struct srpc_ping_reqst *req = &reqstmsg->msg_body.ping_reqst;
- struct srpc_ping_reply *rep = &rpc->srpc_replymsg.msg_body.ping_reply;
-
- LASSERT(sv->sv_id == SRPC_SERVICE_PING);
-
- if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) {
- LASSERT(reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC));
-
- __swab32s(&req->pnr_seq);
- __swab32s(&req->pnr_magic);
- __swab64s(&req->pnr_time_sec);
- __swab64s(&req->pnr_time_usec);
- }
- LASSERT(reqstmsg->msg_type == srpc_service2request(sv->sv_id));
-
- if (req->pnr_magic != LST_PING_TEST_MAGIC) {
- CERROR("Unexpected magic %08x from %s\n",
- req->pnr_magic, libcfs_id2str(rpc->srpc_peer));
- return -EINVAL;
- }
-
- rep->pnr_seq = req->pnr_seq;
- rep->pnr_magic = LST_PING_TEST_MAGIC;
-
- if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) {
- replymsg->msg_ses_feats = LST_FEATS_MASK;
- rep->pnr_status = EPROTO;
- return 0;
- }
-
- replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
-
- CDEBUG(D_NET, "Get ping %d from %s\n",
- req->pnr_seq, libcfs_id2str(rpc->srpc_peer));
- return 0;
-}
-
-struct sfw_test_client_ops ping_test_client;
-
-void ping_init_test_client(void)
-{
- ping_test_client.tso_init = ping_client_init;
- ping_test_client.tso_fini = ping_client_fini;
- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
- ping_test_client.tso_done_rpc = ping_client_done_rpc;
-}
-
-struct srpc_service ping_test_service;
-
-void ping_init_test_service(void)
-{
- ping_test_service.sv_id = SRPC_SERVICE_PING;
- ping_test_service.sv_name = "ping_test";
- ping_test_service.sv_handler = ping_server_handle;
- ping_test_service.sv_wi_total = ping_srv_workitems;
-}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
deleted file mode 100644
index 9613b0a77007..000000000000
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ /dev/null
@@ -1,1682 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/rpc.c
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- *
- * 2012-05-13: Liang Zhen <liang@whamcloud.com>
- * - percpt data for service to improve smp performance
- * - code cleanup
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "selftest.h"
-
-enum srpc_state {
- SRPC_STATE_NONE,
- SRPC_STATE_NI_INIT,
- SRPC_STATE_EQ_INIT,
- SRPC_STATE_RUNNING,
- SRPC_STATE_STOPPING,
-};
-
-static struct smoketest_rpc {
- spinlock_t rpc_glock; /* global lock */
- struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
- struct lnet_handle_eq rpc_lnet_eq; /* _the_ LNet event queue */
- enum srpc_state rpc_state;
- struct srpc_counters rpc_counters;
- __u64 rpc_matchbits; /* matchbits counter */
-} srpc_data;
-
-static inline int
-srpc_serv_portal(int svc_id)
-{
- return svc_id < SRPC_FRAMEWORK_SERVICE_MAX_ID ?
- SRPC_FRAMEWORK_REQUEST_PORTAL : SRPC_REQUEST_PORTAL;
-}
-
-/* forward ref's */
-void srpc_handle_rpc(struct swi_workitem *wi);
-
-void srpc_get_counters(struct srpc_counters *cnt)
-{
- spin_lock(&srpc_data.rpc_glock);
- *cnt = srpc_data.rpc_counters;
- spin_unlock(&srpc_data.rpc_glock);
-}
-
-void srpc_set_counters(const struct srpc_counters *cnt)
-{
- spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters = *cnt;
- spin_unlock(&srpc_data.rpc_glock);
-}
-
-static int
-srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off,
- int nob)
-{
- LASSERT(off < PAGE_SIZE);
- LASSERT(nob > 0 && nob <= PAGE_SIZE);
-
- bk->bk_iovs[i].bv_offset = off;
- bk->bk_iovs[i].bv_page = pg;
- bk->bk_iovs[i].bv_len = nob;
- return nob;
-}
-
-void
-srpc_free_bulk(struct srpc_bulk *bk)
-{
- int i;
- struct page *pg;
-
- LASSERT(bk);
-
- for (i = 0; i < bk->bk_niov; i++) {
- pg = bk->bk_iovs[i].bv_page;
- if (!pg)
- break;
-
- __free_page(pg);
- }
-
- kfree(bk);
-}
-
-struct srpc_bulk *
-srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg,
- unsigned int bulk_len, int sink)
-{
- struct srpc_bulk *bk;
- int i;
-
- LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
-
- bk = kzalloc_cpt(offsetof(struct srpc_bulk, bk_iovs[bulk_npg]),
- GFP_KERNEL, cpt);
- if (!bk) {
- CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
- return NULL;
- }
-
- memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
- bk->bk_sink = sink;
- bk->bk_len = bulk_len;
- bk->bk_niov = bulk_npg;
-
- for (i = 0; i < bulk_npg; i++) {
- struct page *pg;
- int nob;
-
- pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt),
- GFP_KERNEL, 0);
- if (!pg) {
- CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
- srpc_free_bulk(bk);
- return NULL;
- }
-
- nob = min_t(unsigned int, bulk_off + bulk_len, PAGE_SIZE) -
- bulk_off;
- srpc_add_bulk_page(bk, pg, i, bulk_off, nob);
- bulk_len -= nob;
- bulk_off = 0;
- }
-
- return bk;
-}
-
-static inline __u64
-srpc_next_id(void)
-{
- __u64 id;
-
- spin_lock(&srpc_data.rpc_glock);
- id = srpc_data.rpc_matchbits++;
- spin_unlock(&srpc_data.rpc_glock);
- return id;
-}
-
-static void
-srpc_init_server_rpc(struct srpc_server_rpc *rpc,
- struct srpc_service_cd *scd,
- struct srpc_buffer *buffer)
-{
- memset(rpc, 0, sizeof(*rpc));
- swi_init_workitem(&rpc->srpc_wi, srpc_handle_rpc,
- srpc_serv_is_framework(scd->scd_svc) ?
- lst_serial_wq : lst_test_wq[scd->scd_cpt]);
-
- rpc->srpc_ev.ev_fired = 1; /* no event expected now */
-
- rpc->srpc_scd = scd;
- rpc->srpc_reqstbuf = buffer;
- rpc->srpc_peer = buffer->buf_peer;
- rpc->srpc_self = buffer->buf_self;
- LNetInvalidateMDHandle(&rpc->srpc_replymdh);
-}
-
-static void
-srpc_service_fini(struct srpc_service *svc)
-{
- struct srpc_service_cd *scd;
- struct srpc_server_rpc *rpc;
- struct srpc_buffer *buf;
- struct list_head *q;
- int i;
-
- if (!svc->sv_cpt_data)
- return;
-
- cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
- while (1) {
- if (!list_empty(&scd->scd_buf_posted))
- q = &scd->scd_buf_posted;
- else if (!list_empty(&scd->scd_buf_blocked))
- q = &scd->scd_buf_blocked;
- else
- break;
-
- while (!list_empty(q)) {
- buf = list_entry(q->next, struct srpc_buffer,
- buf_list);
- list_del(&buf->buf_list);
- kfree(buf);
- }
- }
-
- LASSERT(list_empty(&scd->scd_rpc_active));
-
- while (!list_empty(&scd->scd_rpc_free)) {
- rpc = list_entry(scd->scd_rpc_free.next,
- struct srpc_server_rpc,
- srpc_list);
- list_del(&rpc->srpc_list);
- kfree(rpc);
- }
- }
-
- cfs_percpt_free(svc->sv_cpt_data);
- svc->sv_cpt_data = NULL;
-}
-
-static int
-srpc_service_nrpcs(struct srpc_service *svc)
-{
- int nrpcs = svc->sv_wi_total / svc->sv_ncpts;
-
- return srpc_serv_is_framework(svc) ?
- max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
-}
-
-void srpc_add_buffer(struct swi_workitem *wi);
-
-static int
-srpc_service_init(struct srpc_service *svc)
-{
- struct srpc_service_cd *scd;
- struct srpc_server_rpc *rpc;
- int nrpcs;
- int i;
- int j;
-
- svc->sv_shuttingdown = 0;
-
- svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(**svc->sv_cpt_data));
- if (!svc->sv_cpt_data)
- return -ENOMEM;
-
- svc->sv_ncpts = srpc_serv_is_framework(svc) ?
- 1 : cfs_cpt_number(lnet_cpt_table());
- nrpcs = srpc_service_nrpcs(svc);
-
- cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
- scd->scd_cpt = i;
- scd->scd_svc = svc;
- spin_lock_init(&scd->scd_lock);
- INIT_LIST_HEAD(&scd->scd_rpc_free);
- INIT_LIST_HEAD(&scd->scd_rpc_active);
- INIT_LIST_HEAD(&scd->scd_buf_posted);
- INIT_LIST_HEAD(&scd->scd_buf_blocked);
-
- scd->scd_ev.ev_data = scd;
- scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
-
- /*
- * NB: don't use lst_serial_wq for adding buffer,
- * see details in srpc_service_add_buffers()
- */
- swi_init_workitem(&scd->scd_buf_wi,
- srpc_add_buffer, lst_test_wq[i]);
-
- if (i && srpc_serv_is_framework(svc)) {
- /*
- * NB: framework service only needs srpc_service_cd for
- * one partition, but we allocate for all to make
- * it easier to implement, it will waste a little
- * memory but nobody should care about this
- */
- continue;
- }
-
- for (j = 0; j < nrpcs; j++) {
- rpc = kzalloc_cpt(sizeof(*rpc), GFP_NOFS, i);
- if (!rpc) {
- srpc_service_fini(svc);
- return -ENOMEM;
- }
- list_add(&rpc->srpc_list, &scd->scd_rpc_free);
- }
- }
-
- return 0;
-}
-
-int
-srpc_add_service(struct srpc_service *sv)
-{
- int id = sv->sv_id;
-
- LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
-
- if (srpc_service_init(sv))
- return -ENOMEM;
-
- spin_lock(&srpc_data.rpc_glock);
-
- LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
-
- if (srpc_data.rpc_services[id]) {
- spin_unlock(&srpc_data.rpc_glock);
- goto failed;
- }
-
- srpc_data.rpc_services[id] = sv;
- spin_unlock(&srpc_data.rpc_glock);
-
- CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name);
- return 0;
-
- failed:
- srpc_service_fini(sv);
- return -EBUSY;
-}
-
-int
-srpc_remove_service(struct srpc_service *sv)
-{
- int id = sv->sv_id;
-
- spin_lock(&srpc_data.rpc_glock);
-
- if (srpc_data.rpc_services[id] != sv) {
- spin_unlock(&srpc_data.rpc_glock);
- return -ENOENT;
- }
-
- srpc_data.rpc_services[id] = NULL;
- spin_unlock(&srpc_data.rpc_glock);
- return 0;
-}
-
-static int
-srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
- int len, int options, struct lnet_process_id peer,
- struct lnet_handle_md *mdh, struct srpc_event *ev)
-{
- int rc;
- struct lnet_md md;
- struct lnet_handle_me meh;
-
- rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
- local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
- if (rc) {
- CERROR("LNetMEAttach failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
- return -ENOMEM;
- }
-
- md.threshold = 1;
- md.user_ptr = ev;
- md.start = buf;
- md.length = len;
- md.options = options;
- md.eq_handle = srpc_data.rpc_lnet_eq;
-
- rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh);
- if (rc) {
- CERROR("LNetMDAttach failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
-
- rc = LNetMEUnlink(meh);
- LASSERT(!rc);
- return -ENOMEM;
- }
-
- CDEBUG(D_NET, "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
- libcfs_id2str(peer), portal, matchbits);
- return 0;
-}
-
-static int
-srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
- int options, struct lnet_process_id peer,
- lnet_nid_t self, struct lnet_handle_md *mdh,
- struct srpc_event *ev)
-{
- int rc;
- struct lnet_md md;
-
- md.user_ptr = ev;
- md.start = buf;
- md.length = len;
- md.eq_handle = srpc_data.rpc_lnet_eq;
- md.threshold = options & LNET_MD_OP_GET ? 2 : 1;
- md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
-
- rc = LNetMDBind(md, LNET_UNLINK, mdh);
- if (rc) {
- CERROR("LNetMDBind failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
- return -ENOMEM;
- }
-
- /*
- * this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
- * they're only meaningful for MDs attached to an ME (i.e. passive
- * buffers...
- */
- if (options & LNET_MD_OP_PUT) {
- rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
- portal, matchbits, 0, 0);
- } else {
- LASSERT(options & LNET_MD_OP_GET);
-
- rc = LNetGet(self, *mdh, peer, portal, matchbits, 0);
- }
-
- if (rc) {
- CERROR("LNet%s(%s, %d, %lld) failed: %d\n",
- options & LNET_MD_OP_PUT ? "Put" : "Get",
- libcfs_id2str(peer), portal, matchbits, rc);
-
- /*
- * The forthcoming unlink event will complete this operation
- * with failure, so fall through and return success here.
- */
- rc = LNetMDUnlink(*mdh);
- LASSERT(!rc);
- } else {
- CDEBUG(D_NET, "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
- libcfs_id2str(peer), portal, matchbits);
- }
- return 0;
-}
-
-static int
-srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
- struct lnet_handle_md *mdh, struct srpc_event *ev)
-{
- struct lnet_process_id any = { 0 };
-
- any.nid = LNET_NID_ANY;
- any.pid = LNET_PID_ANY;
-
- return srpc_post_passive_rdma(srpc_serv_portal(service),
- local, service, buf, len,
- LNET_MD_OP_PUT, any, mdh, ev);
-}
-
-static int
-srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
-__must_hold(&scd->scd_lock)
-{
- struct srpc_service *sv = scd->scd_svc;
- struct srpc_msg *msg = &buf->buf_msg;
- int rc;
-
- LNetInvalidateMDHandle(&buf->buf_mdh);
- list_add(&buf->buf_list, &scd->scd_buf_posted);
- scd->scd_buf_nposted++;
- spin_unlock(&scd->scd_lock);
-
- rc = srpc_post_passive_rqtbuf(sv->sv_id,
- !srpc_serv_is_framework(sv),
- msg, sizeof(*msg), &buf->buf_mdh,
- &scd->scd_ev);
-
- /*
- * At this point, a RPC (new or delayed) may have arrived in
- * msg and its event handler has been called. So we must add
- * buf to scd_buf_posted _before_ dropping scd_lock
- */
- spin_lock(&scd->scd_lock);
-
- if (!rc) {
- if (!sv->sv_shuttingdown)
- return 0;
-
- spin_unlock(&scd->scd_lock);
- /*
- * srpc_shutdown_service might have tried to unlink me
- * when my buf_mdh was still invalid
- */
- LNetMDUnlink(buf->buf_mdh);
- spin_lock(&scd->scd_lock);
- return 0;
- }
-
- scd->scd_buf_nposted--;
- if (sv->sv_shuttingdown)
- return rc; /* don't allow to change scd_buf_posted */
-
- list_del(&buf->buf_list);
- spin_unlock(&scd->scd_lock);
-
- kfree(buf);
-
- spin_lock(&scd->scd_lock);
- return rc;
-}
-
-void
-srpc_add_buffer(struct swi_workitem *wi)
-{
- struct srpc_service_cd *scd = container_of(wi, struct srpc_service_cd, scd_buf_wi);
- struct srpc_buffer *buf;
- int rc = 0;
-
- /*
- * it's called by workitem scheduler threads, these threads
- * should have been set CPT affinity, so buffers will be posted
- * on CPT local list of Portal
- */
- spin_lock(&scd->scd_lock);
-
- while (scd->scd_buf_adjust > 0 &&
- !scd->scd_svc->sv_shuttingdown) {
- scd->scd_buf_adjust--; /* consume it */
- scd->scd_buf_posting++;
-
- spin_unlock(&scd->scd_lock);
-
- buf = kzalloc(sizeof(*buf), GFP_NOFS);
- if (!buf) {
- CERROR("Failed to add new buf to service: %s\n",
- scd->scd_svc->sv_name);
- spin_lock(&scd->scd_lock);
- rc = -ENOMEM;
- break;
- }
-
- spin_lock(&scd->scd_lock);
- if (scd->scd_svc->sv_shuttingdown) {
- spin_unlock(&scd->scd_lock);
- kfree(buf);
-
- spin_lock(&scd->scd_lock);
- rc = -ESHUTDOWN;
- break;
- }
-
- rc = srpc_service_post_buffer(scd, buf);
- if (rc)
- break; /* buf has been freed inside */
-
- LASSERT(scd->scd_buf_posting > 0);
- scd->scd_buf_posting--;
- scd->scd_buf_total++;
- scd->scd_buf_low = max(2, scd->scd_buf_total / 4);
- }
-
- if (rc) {
- scd->scd_buf_err_stamp = ktime_get_real_seconds();
- scd->scd_buf_err = rc;
-
- LASSERT(scd->scd_buf_posting > 0);
- scd->scd_buf_posting--;
- }
-
- spin_unlock(&scd->scd_lock);
-}
-
-int
-srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
-{
- struct srpc_service_cd *scd;
- int rc = 0;
- int i;
-
- LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer);
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- spin_lock(&scd->scd_lock);
-
- scd->scd_buf_err = 0;
- scd->scd_buf_err_stamp = 0;
- scd->scd_buf_posting = 0;
- scd->scd_buf_adjust = nbuffer;
- /* start to post buffers */
- swi_schedule_workitem(&scd->scd_buf_wi);
- spin_unlock(&scd->scd_lock);
-
- /* framework service only post buffer for one partition */
- if (srpc_serv_is_framework(sv))
- break;
- }
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- spin_lock(&scd->scd_lock);
- /*
- * NB: srpc_service_add_buffers() can be called inside
- * thread context of lst_serial_wq, and we don't normally
- * allow to sleep inside thread context of WI scheduler
- * because it will block current scheduler thread from doing
- * anything else, even worse, it could deadlock if it's
- * waiting on result from another WI of the same scheduler.
- * However, it's safe at here because scd_buf_wi is scheduled
- * by thread in a different WI scheduler (lst_test_wq),
- * so we don't have any risk of deadlock, though this could
- * block all WIs pending on lst_serial_wq for a moment
- * which is not good but not fatal.
- */
- lst_wait_until(scd->scd_buf_err ||
- (!scd->scd_buf_adjust &&
- !scd->scd_buf_posting),
- scd->scd_lock, "waiting for adding buffer\n");
-
- if (scd->scd_buf_err && !rc)
- rc = scd->scd_buf_err;
-
- spin_unlock(&scd->scd_lock);
- }
-
- return rc;
-}
-
-void
-srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer)
-{
- struct srpc_service_cd *scd;
- int num;
- int i;
-
- LASSERT(!sv->sv_shuttingdown);
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- spin_lock(&scd->scd_lock);
-
- num = scd->scd_buf_total + scd->scd_buf_posting;
- scd->scd_buf_adjust -= min(nbuffer, num);
-
- spin_unlock(&scd->scd_lock);
- }
-}
-
-/* returns 1 if sv has finished, otherwise 0 */
-int
-srpc_finish_service(struct srpc_service *sv)
-{
- struct srpc_service_cd *scd;
- struct srpc_server_rpc *rpc;
- int i;
-
- LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- swi_cancel_workitem(&scd->scd_buf_wi);
-
- spin_lock(&scd->scd_lock);
-
- if (scd->scd_buf_nposted > 0) {
- CDEBUG(D_NET, "waiting for %d posted buffers to unlink\n",
- scd->scd_buf_nposted);
- spin_unlock(&scd->scd_lock);
- return 0;
- }
-
- if (list_empty(&scd->scd_rpc_active)) {
- spin_unlock(&scd->scd_lock);
- continue;
- }
-
- rpc = list_entry(scd->scd_rpc_active.next,
- struct srpc_server_rpc, srpc_list);
- CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s, ev fired %d type %d status %d lnet %d\n",
- rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
- swi_state2str(rpc->srpc_wi.swi_state),
- rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
- rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
- spin_unlock(&scd->scd_lock);
- return 0;
- }
-
- /* no lock needed from now on */
- srpc_service_fini(sv);
- return 1;
-}
-
-/* called with sv->sv_lock held */
-static void
-srpc_service_recycle_buffer(struct srpc_service_cd *scd,
- struct srpc_buffer *buf)
-__must_hold(&scd->scd_lock)
-{
- if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
- if (srpc_service_post_buffer(scd, buf)) {
- CWARN("Failed to post %s buffer\n",
- scd->scd_svc->sv_name);
- }
- return;
- }
-
- /* service is shutting down, or we want to recycle some buffers */
- scd->scd_buf_total--;
-
- if (scd->scd_buf_adjust < 0) {
- scd->scd_buf_adjust++;
- if (scd->scd_buf_adjust < 0 &&
- !scd->scd_buf_total && !scd->scd_buf_posting) {
- CDEBUG(D_INFO,
- "Try to recycle %d buffers but nothing left\n",
- scd->scd_buf_adjust);
- scd->scd_buf_adjust = 0;
- }
- }
-
- spin_unlock(&scd->scd_lock);
- kfree(buf);
- spin_lock(&scd->scd_lock);
-}
-
-void
-srpc_abort_service(struct srpc_service *sv)
-{
- struct srpc_service_cd *scd;
- struct srpc_server_rpc *rpc;
- int i;
-
- CDEBUG(D_NET, "Aborting service: id %d, name %s\n",
- sv->sv_id, sv->sv_name);
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- spin_lock(&scd->scd_lock);
-
- /*
- * schedule in-flight RPCs to notice the abort, NB:
- * racing with incoming RPCs; complete fix should make test
- * RPCs carry session ID in its headers
- */
- list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
- rpc->srpc_aborted = 1;
- swi_schedule_workitem(&rpc->srpc_wi);
- }
-
- spin_unlock(&scd->scd_lock);
- }
-}
-
-void
-srpc_shutdown_service(struct srpc_service *sv)
-{
- struct srpc_service_cd *scd;
- struct srpc_server_rpc *rpc;
- struct srpc_buffer *buf;
- int i;
-
- CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
- sv->sv_id, sv->sv_name);
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
- spin_lock(&scd->scd_lock);
-
- sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
- spin_unlock(&scd->scd_lock);
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- spin_lock(&scd->scd_lock);
-
- /* schedule in-flight RPCs to notice the shutdown */
- list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
- swi_schedule_workitem(&rpc->srpc_wi);
-
- spin_unlock(&scd->scd_lock);
-
- /*
- * OK to traverse scd_buf_posted without lock, since no one
- * touches scd_buf_posted now
- */
- list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
- LNetMDUnlink(buf->buf_mdh);
- }
-}
-
-static int
-srpc_send_request(struct srpc_client_rpc *rpc)
-{
- struct srpc_event *ev = &rpc->crpc_reqstev;
- int rc;
-
- ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_REQUEST_SENT;
-
- rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
- rpc->crpc_service, &rpc->crpc_reqstmsg,
- sizeof(struct srpc_msg), LNET_MD_OP_PUT,
- rpc->crpc_dest, LNET_NID_ANY,
- &rpc->crpc_reqstmdh, ev);
- if (rc) {
- LASSERT(rc == -ENOMEM);
- ev->ev_fired = 1; /* no more event expected */
- }
- return rc;
-}
-
-static int
-srpc_prepare_reply(struct srpc_client_rpc *rpc)
-{
- struct srpc_event *ev = &rpc->crpc_replyev;
- __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
- int rc;
-
- ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_REPLY_RCVD;
-
- *id = srpc_next_id();
-
- rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
- &rpc->crpc_replymsg,
- sizeof(struct srpc_msg),
- LNET_MD_OP_PUT, rpc->crpc_dest,
- &rpc->crpc_replymdh, ev);
- if (rc) {
- LASSERT(rc == -ENOMEM);
- ev->ev_fired = 1; /* no more event expected */
- }
- return rc;
-}
-
-static int
-srpc_prepare_bulk(struct srpc_client_rpc *rpc)
-{
- struct srpc_bulk *bk = &rpc->crpc_bulk;
- struct srpc_event *ev = &rpc->crpc_bulkev;
- __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
- int rc;
- int opt;
-
- LASSERT(bk->bk_niov <= LNET_MAX_IOV);
-
- if (!bk->bk_niov)
- return 0; /* nothing to do */
-
- opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
- opt |= LNET_MD_KIOV;
-
- ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_BULK_REQ_RCVD;
-
- *id = srpc_next_id();
-
- rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
- &bk->bk_iovs[0], bk->bk_niov, opt,
- rpc->crpc_dest, &bk->bk_mdh, ev);
- if (rc) {
- LASSERT(rc == -ENOMEM);
- ev->ev_fired = 1; /* no more event expected */
- }
- return rc;
-}
-
-static int
-srpc_do_bulk(struct srpc_server_rpc *rpc)
-{
- struct srpc_event *ev = &rpc->srpc_ev;
- struct srpc_bulk *bk = rpc->srpc_bulk;
- __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
- int rc;
- int opt;
-
- LASSERT(bk);
-
- opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
- opt |= LNET_MD_KIOV;
-
- ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT;
-
- rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id,
- &bk->bk_iovs[0], bk->bk_niov, opt,
- rpc->srpc_peer, rpc->srpc_self,
- &bk->bk_mdh, ev);
- if (rc)
- ev->ev_fired = 1; /* no more event expected */
- return rc;
-}
-
-/* only called from srpc_handle_rpc */
-static void
-srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
-{
- struct srpc_service_cd *scd = rpc->srpc_scd;
- struct srpc_service *sv = scd->scd_svc;
- struct srpc_buffer *buffer;
-
- LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
-
- rpc->srpc_status = status;
-
- CDEBUG_LIMIT(!status ? D_NET : D_NETERROR,
- "Server RPC %p done: service %s, peer %s, status %s:%d\n",
- rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
- swi_state2str(rpc->srpc_wi.swi_state), status);
-
- if (status) {
- spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_dropped++;
- spin_unlock(&srpc_data.rpc_glock);
- }
-
- if (rpc->srpc_done)
- (*rpc->srpc_done) (rpc);
- LASSERT(!rpc->srpc_bulk);
-
- spin_lock(&scd->scd_lock);
-
- if (rpc->srpc_reqstbuf) {
- /*
- * NB might drop sv_lock in srpc_service_recycle_buffer, but
- * sv won't go away for scd_rpc_active must not be empty
- */
- srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf);
- rpc->srpc_reqstbuf = NULL;
- }
-
- list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */
-
- /*
- * No one can schedule me now since:
- * - I'm not on scd_rpc_active.
- * - all LNet events have been fired.
- * Cancel pending schedules and prevent future schedule attempts:
- */
- LASSERT(rpc->srpc_ev.ev_fired);
-
- if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
- buffer = list_entry(scd->scd_buf_blocked.next,
- struct srpc_buffer, buf_list);
- list_del(&buffer->buf_list);
-
- srpc_init_server_rpc(rpc, scd, buffer);
- list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active);
- swi_schedule_workitem(&rpc->srpc_wi);
- } else {
- list_add(&rpc->srpc_list, &scd->scd_rpc_free);
- }
-
- spin_unlock(&scd->scd_lock);
-}
-
-/* handles an incoming RPC */
-void
-srpc_handle_rpc(struct swi_workitem *wi)
-{
- struct srpc_server_rpc *rpc = container_of(wi, struct srpc_server_rpc, srpc_wi);
- struct srpc_service_cd *scd = rpc->srpc_scd;
- struct srpc_service *sv = scd->scd_svc;
- struct srpc_event *ev = &rpc->srpc_ev;
- int rc = 0;
-
- LASSERT(wi == &rpc->srpc_wi);
-
- spin_lock(&scd->scd_lock);
-
- if (sv->sv_shuttingdown || rpc->srpc_aborted) {
- spin_unlock(&scd->scd_lock);
-
- if (rpc->srpc_bulk)
- LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
- LNetMDUnlink(rpc->srpc_replymdh);
-
- if (ev->ev_fired) { /* no more event, OK to finish */
- srpc_server_rpc_done(rpc, -ESHUTDOWN);
- }
- return;
- }
-
- spin_unlock(&scd->scd_lock);
-
- switch (wi->swi_state) {
- default:
- LBUG();
- case SWI_STATE_NEWBORN: {
- struct srpc_msg *msg;
- struct srpc_generic_reply *reply;
-
- msg = &rpc->srpc_reqstbuf->buf_msg;
- reply = &rpc->srpc_replymsg.msg_body.reply;
-
- if (!msg->msg_magic) {
- /* moaned already in srpc_lnet_ev_handler */
- srpc_server_rpc_done(rpc, EBADMSG);
- return;
- }
-
- srpc_unpack_msg_hdr(msg);
- if (msg->msg_version != SRPC_MSG_VERSION) {
- CWARN("Version mismatch: %u, %u expected, from %s\n",
- msg->msg_version, SRPC_MSG_VERSION,
- libcfs_id2str(rpc->srpc_peer));
- reply->status = EPROTO;
- /* drop through and send reply */
- } else {
- reply->status = 0;
- rc = (*sv->sv_handler)(rpc);
- LASSERT(!reply->status || !rpc->srpc_bulk);
- if (rc) {
- srpc_server_rpc_done(rpc, rc);
- return;
- }
- }
-
- wi->swi_state = SWI_STATE_BULK_STARTED;
-
- if (rpc->srpc_bulk) {
- rc = srpc_do_bulk(rpc);
- if (!rc)
- return; /* wait for bulk */
-
- LASSERT(ev->ev_fired);
- ev->ev_status = rc;
- }
- }
- /* fall through */
- case SWI_STATE_BULK_STARTED:
- LASSERT(!rpc->srpc_bulk || ev->ev_fired);
-
- if (rpc->srpc_bulk) {
- rc = ev->ev_status;
-
- if (sv->sv_bulk_ready)
- rc = (*sv->sv_bulk_ready) (rpc, rc);
-
- if (rc) {
- srpc_server_rpc_done(rpc, rc);
- return;
- }
- }
-
- wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
- rc = srpc_send_reply(rpc);
- if (!rc)
- return; /* wait for reply */
- srpc_server_rpc_done(rpc, rc);
- return;
-
- case SWI_STATE_REPLY_SUBMITTED:
- if (!ev->ev_fired) {
- CERROR("RPC %p: bulk %p, service %d\n",
- rpc, rpc->srpc_bulk, sv->sv_id);
- CERROR("Event: status %d, type %d, lnet %d\n",
- ev->ev_status, ev->ev_type, ev->ev_lnet);
- LASSERT(ev->ev_fired);
- }
-
- wi->swi_state = SWI_STATE_DONE;
- srpc_server_rpc_done(rpc, ev->ev_status);
- return;
- }
-}
-
-static void
-srpc_client_rpc_expired(void *data)
-{
- struct srpc_client_rpc *rpc = data;
-
- CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- rpc->crpc_timeout);
-
- spin_lock(&rpc->crpc_lock);
-
- rpc->crpc_timeout = 0;
- srpc_abort_rpc(rpc, -ETIMEDOUT);
-
- spin_unlock(&rpc->crpc_lock);
-
- spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_expired++;
- spin_unlock(&srpc_data.rpc_glock);
-}
-
-static void
-srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc)
-{
- struct stt_timer *timer = &rpc->crpc_timer;
-
- if (!rpc->crpc_timeout)
- return;
-
- INIT_LIST_HEAD(&timer->stt_list);
- timer->stt_data = rpc;
- timer->stt_func = srpc_client_rpc_expired;
- timer->stt_expires = ktime_get_real_seconds() + rpc->crpc_timeout;
- stt_add_timer(timer);
-}
-
-/*
- * Called with rpc->crpc_lock held.
- *
- * Upon exit the RPC expiry timer is not queued and the handler is not
- * running on any CPU.
- */
-static void
-srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc)
-{
- /* timer not planted or already exploded */
- if (!rpc->crpc_timeout)
- return;
-
- /* timer successfully defused */
- if (stt_del_timer(&rpc->crpc_timer))
- return;
-
- /* timer detonated, wait for it to explode */
- while (rpc->crpc_timeout) {
- spin_unlock(&rpc->crpc_lock);
-
- schedule();
-
- spin_lock(&rpc->crpc_lock);
- }
-}
-
-static void
-srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
-{
- struct swi_workitem *wi = &rpc->crpc_wi;
-
- LASSERT(status || wi->swi_state == SWI_STATE_DONE);
-
- spin_lock(&rpc->crpc_lock);
-
- rpc->crpc_closed = 1;
- if (!rpc->crpc_status)
- rpc->crpc_status = status;
-
- srpc_del_client_rpc_timer(rpc);
-
- CDEBUG_LIMIT(!status ? D_NET : D_NETERROR,
- "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
-
- /*
- * No one can schedule me now since:
- * - RPC timer has been defused.
- * - all LNet events have been fired.
- * - crpc_closed has been set, preventing srpc_abort_rpc from
- * scheduling me.
- * Cancel pending schedules and prevent future schedule attempts:
- */
- LASSERT(!srpc_event_pending(rpc));
-
- spin_unlock(&rpc->crpc_lock);
-
- (*rpc->crpc_done)(rpc);
-}
-
-/* sends an outgoing RPC */
-void
-srpc_send_rpc(struct swi_workitem *wi)
-{
- int rc = 0;
- struct srpc_client_rpc *rpc;
- struct srpc_msg *reply;
- int do_bulk;
-
- LASSERT(wi);
-
- rpc = container_of(wi, struct srpc_client_rpc, crpc_wi);
-
- LASSERT(rpc);
- LASSERT(wi == &rpc->crpc_wi);
-
- reply = &rpc->crpc_replymsg;
- do_bulk = rpc->crpc_bulk.bk_niov > 0;
-
- spin_lock(&rpc->crpc_lock);
-
- if (rpc->crpc_aborted) {
- spin_unlock(&rpc->crpc_lock);
- goto abort;
- }
-
- spin_unlock(&rpc->crpc_lock);
-
- switch (wi->swi_state) {
- default:
- LBUG();
- case SWI_STATE_NEWBORN:
- LASSERT(!srpc_event_pending(rpc));
-
- rc = srpc_prepare_reply(rpc);
- if (rc) {
- srpc_client_rpc_done(rpc, rc);
- return;
- }
-
- rc = srpc_prepare_bulk(rpc);
- if (rc)
- break;
-
- wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
- rc = srpc_send_request(rpc);
- break;
-
- case SWI_STATE_REQUEST_SUBMITTED:
- /*
- * CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
- * order; however, they're processed in a strict order:
- * rqt, rpy, and bulk.
- */
- if (!rpc->crpc_reqstev.ev_fired)
- break;
-
- rc = rpc->crpc_reqstev.ev_status;
- if (rc)
- break;
-
- wi->swi_state = SWI_STATE_REQUEST_SENT;
- /* perhaps more events */
- /* fall through */
- case SWI_STATE_REQUEST_SENT: {
- enum srpc_msg_type type = srpc_service2reply(rpc->crpc_service);
-
- if (!rpc->crpc_replyev.ev_fired)
- break;
-
- rc = rpc->crpc_replyev.ev_status;
- if (rc)
- break;
-
- srpc_unpack_msg_hdr(reply);
- if (reply->msg_type != type ||
- (reply->msg_magic != SRPC_MSG_MAGIC &&
- reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
- CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
- libcfs_id2str(rpc->crpc_dest),
- reply->msg_type, type,
- reply->msg_magic, SRPC_MSG_MAGIC);
- rc = -EBADMSG;
- break;
- }
-
- if (do_bulk && reply->msg_body.reply.status) {
- CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
- reply->msg_body.reply.status,
- libcfs_id2str(rpc->crpc_dest));
- LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
- }
-
- wi->swi_state = SWI_STATE_REPLY_RECEIVED;
- }
- /* fall through */
- case SWI_STATE_REPLY_RECEIVED:
- if (do_bulk && !rpc->crpc_bulkev.ev_fired)
- break;
-
- rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0;
-
- /*
- * Bulk buffer was unlinked due to remote error. Clear error
- * since reply buffer still contains valid data.
- * NB rpc->crpc_done shouldn't look into bulk data in case of
- * remote error.
- */
- if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
- !rpc->crpc_status && reply->msg_body.reply.status)
- rc = 0;
-
- wi->swi_state = SWI_STATE_DONE;
- srpc_client_rpc_done(rpc, rc);
- return;
- }
-
- if (rc) {
- spin_lock(&rpc->crpc_lock);
- srpc_abort_rpc(rpc, rc);
- spin_unlock(&rpc->crpc_lock);
- }
-
-abort:
- if (rpc->crpc_aborted) {
- LNetMDUnlink(rpc->crpc_reqstmdh);
- LNetMDUnlink(rpc->crpc_replymdh);
- LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
-
- if (!srpc_event_pending(rpc)) {
- srpc_client_rpc_done(rpc, -EINTR);
- return;
- }
- }
-}
-
-struct srpc_client_rpc *
-srpc_create_client_rpc(struct lnet_process_id peer, int service,
- int nbulkiov, int bulklen,
- void (*rpc_done)(struct srpc_client_rpc *),
- void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
-{
- struct srpc_client_rpc *rpc;
-
- rpc = kzalloc(offsetof(struct srpc_client_rpc,
- crpc_bulk.bk_iovs[nbulkiov]), GFP_KERNEL);
- if (!rpc)
- return NULL;
-
- srpc_init_client_rpc(rpc, peer, service, nbulkiov,
- bulklen, rpc_done, rpc_fini, priv);
- return rpc;
-}
-
-/* called with rpc->crpc_lock held */
-void
-srpc_abort_rpc(struct srpc_client_rpc *rpc, int why)
-{
- LASSERT(why);
-
- if (rpc->crpc_aborted || /* already aborted */
- rpc->crpc_closed) /* callback imminent */
- return;
-
- CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- swi_state2str(rpc->crpc_wi.swi_state), why);
-
- rpc->crpc_aborted = 1;
- rpc->crpc_status = why;
- swi_schedule_workitem(&rpc->crpc_wi);
-}
-
-/* called with rpc->crpc_lock held */
-void
-srpc_post_rpc(struct srpc_client_rpc *rpc)
-{
- LASSERT(!rpc->crpc_aborted);
- LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
-
- CDEBUG(D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
- libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
- rpc->crpc_timeout);
-
- srpc_add_client_rpc_timer(rpc);
- swi_schedule_workitem(&rpc->crpc_wi);
-}
-
-int
-srpc_send_reply(struct srpc_server_rpc *rpc)
-{
- struct srpc_event *ev = &rpc->srpc_ev;
- struct srpc_msg *msg = &rpc->srpc_replymsg;
- struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
- struct srpc_service_cd *scd = rpc->srpc_scd;
- struct srpc_service *sv = scd->scd_svc;
- __u64 rpyid;
- int rc;
-
- LASSERT(buffer);
- rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
-
- spin_lock(&scd->scd_lock);
-
- if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
- /*
- * Repost buffer before replying since test client
- * might send me another RPC once it gets the reply
- */
- if (srpc_service_post_buffer(scd, buffer))
- CWARN("Failed to repost %s buffer\n", sv->sv_name);
- rpc->srpc_reqstbuf = NULL;
- }
-
- spin_unlock(&scd->scd_lock);
-
- ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_REPLY_SENT;
-
- msg->msg_magic = SRPC_MSG_MAGIC;
- msg->msg_version = SRPC_MSG_VERSION;
- msg->msg_type = srpc_service2reply(sv->sv_id);
-
- rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg,
- sizeof(*msg), LNET_MD_OP_PUT,
- rpc->srpc_peer, rpc->srpc_self,
- &rpc->srpc_replymdh, ev);
- if (rc)
- ev->ev_fired = 1; /* no more event expected */
- return rc;
-}
-
-/* when in kernel always called with LNET_LOCK() held, and in thread context */
-static void
-srpc_lnet_ev_handler(struct lnet_event *ev)
-{
- struct srpc_service_cd *scd;
- struct srpc_event *rpcev = ev->md.user_ptr;
- struct srpc_client_rpc *crpc;
- struct srpc_server_rpc *srpc;
- struct srpc_buffer *buffer;
- struct srpc_service *sv;
- struct srpc_msg *msg;
- enum srpc_msg_type type;
-
- LASSERT(!in_interrupt());
-
- if (ev->status) {
- __u32 errors;
-
- spin_lock(&srpc_data.rpc_glock);
- if (ev->status != -ECANCELED) /* cancellation is not error */
- srpc_data.rpc_counters.errors++;
- errors = srpc_data.rpc_counters.errors;
- spin_unlock(&srpc_data.rpc_glock);
-
- CNETERR("LNet event status %d type %d, RPC errors %u\n",
- ev->status, ev->type, errors);
- }
-
- rpcev->ev_lnet = ev->type;
-
- switch (rpcev->ev_type) {
- default:
- CERROR("Unknown event: status %d, type %d, lnet %d\n",
- rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
- LBUG();
- case SRPC_REQUEST_SENT:
- if (!ev->status && ev->type != LNET_EVENT_UNLINK) {
- spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_sent++;
- spin_unlock(&srpc_data.rpc_glock);
- }
- /* fall through */
- case SRPC_REPLY_RCVD:
- case SRPC_BULK_REQ_RCVD:
- crpc = rpcev->ev_data;
-
- if (rpcev != &crpc->crpc_reqstev &&
- rpcev != &crpc->crpc_replyev &&
- rpcev != &crpc->crpc_bulkev) {
- CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
- rpcev, crpc, &crpc->crpc_reqstev,
- &crpc->crpc_replyev, &crpc->crpc_bulkev);
- CERROR("Bad event: status %d, type %d, lnet %d\n",
- rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
- LBUG();
- }
-
- spin_lock(&crpc->crpc_lock);
-
- LASSERT(!rpcev->ev_fired);
- rpcev->ev_fired = 1;
- rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
- -EINTR : ev->status;
- swi_schedule_workitem(&crpc->crpc_wi);
-
- spin_unlock(&crpc->crpc_lock);
- break;
-
- case SRPC_REQUEST_RCVD:
- scd = rpcev->ev_data;
- sv = scd->scd_svc;
-
- LASSERT(rpcev == &scd->scd_ev);
-
- spin_lock(&scd->scd_lock);
-
- LASSERT(ev->unlinked);
- LASSERT(ev->type == LNET_EVENT_PUT ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT(ev->type != LNET_EVENT_UNLINK ||
- sv->sv_shuttingdown);
-
- buffer = container_of(ev->md.start, struct srpc_buffer, buf_msg);
- buffer->buf_peer = ev->initiator;
- buffer->buf_self = ev->target.nid;
-
- LASSERT(scd->scd_buf_nposted > 0);
- scd->scd_buf_nposted--;
-
- if (sv->sv_shuttingdown) {
- /*
- * Leave buffer on scd->scd_buf_nposted since
- * srpc_finish_service needs to traverse it.
- */
- spin_unlock(&scd->scd_lock);
- break;
- }
-
- if (scd->scd_buf_err_stamp &&
- scd->scd_buf_err_stamp < ktime_get_real_seconds()) {
- /* re-enable adding buffer */
- scd->scd_buf_err_stamp = 0;
- scd->scd_buf_err = 0;
- }
-
- if (!scd->scd_buf_err && /* adding buffer is enabled */
- !scd->scd_buf_adjust &&
- scd->scd_buf_nposted < scd->scd_buf_low) {
- scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
- SFW_TEST_WI_MIN);
- swi_schedule_workitem(&scd->scd_buf_wi);
- }
-
- list_del(&buffer->buf_list); /* from scd->scd_buf_posted */
- msg = &buffer->buf_msg;
- type = srpc_service2request(sv->sv_id);
-
- if (ev->status || ev->mlength != sizeof(*msg) ||
- (msg->msg_type != type &&
- msg->msg_type != __swab32(type)) ||
- (msg->msg_magic != SRPC_MSG_MAGIC &&
- msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
- CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
- sv->sv_name, libcfs_id2str(ev->initiator),
- ev->status, ev->mlength,
- msg->msg_type, msg->msg_magic);
-
- /*
- * NB can't call srpc_service_recycle_buffer here since
- * it may call LNetM[DE]Attach. The invalid magic tells
- * srpc_handle_rpc to drop this RPC
- */
- msg->msg_magic = 0;
- }
-
- if (!list_empty(&scd->scd_rpc_free)) {
- srpc = list_entry(scd->scd_rpc_free.next,
- struct srpc_server_rpc,
- srpc_list);
- list_del(&srpc->srpc_list);
-
- srpc_init_server_rpc(srpc, scd, buffer);
- list_add_tail(&srpc->srpc_list,
- &scd->scd_rpc_active);
- swi_schedule_workitem(&srpc->srpc_wi);
- } else {
- list_add_tail(&buffer->buf_list,
- &scd->scd_buf_blocked);
- }
-
- spin_unlock(&scd->scd_lock);
-
- spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_rcvd++;
- spin_unlock(&srpc_data.rpc_glock);
- break;
-
- case SRPC_BULK_GET_RPLD:
- LASSERT(ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_REPLY ||
- ev->type == LNET_EVENT_UNLINK);
-
- if (!ev->unlinked)
- break; /* wait for final event */
- /* fall through */
- case SRPC_BULK_PUT_SENT:
- if (!ev->status && ev->type != LNET_EVENT_UNLINK) {
- spin_lock(&srpc_data.rpc_glock);
-
- if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
- srpc_data.rpc_counters.bulk_get += ev->mlength;
- else
- srpc_data.rpc_counters.bulk_put += ev->mlength;
-
- spin_unlock(&srpc_data.rpc_glock);
- }
- /* fall through */
- case SRPC_REPLY_SENT:
- srpc = rpcev->ev_data;
- scd = srpc->srpc_scd;
-
- LASSERT(rpcev == &srpc->srpc_ev);
-
- spin_lock(&scd->scd_lock);
-
- rpcev->ev_fired = 1;
- rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
- -EINTR : ev->status;
- swi_schedule_workitem(&srpc->srpc_wi);
-
- spin_unlock(&scd->scd_lock);
- break;
- }
-}
-
-int
-srpc_startup(void)
-{
- int rc;
-
- memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
- spin_lock_init(&srpc_data.rpc_glock);
-
- /* 1 second pause to avoid timestamp reuse */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
- srpc_data.rpc_matchbits = ((__u64)ktime_get_real_seconds()) << 48;
-
- srpc_data.rpc_state = SRPC_STATE_NONE;
-
- rc = LNetNIInit(LNET_PID_LUSTRE);
- if (rc < 0) {
- CERROR("LNetNIInit() has failed: %d\n", rc);
- return rc;
- }
-
- srpc_data.rpc_state = SRPC_STATE_NI_INIT;
-
- LNetInvalidateEQHandle(&srpc_data.rpc_lnet_eq);
- rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
- if (rc) {
- CERROR("LNetEQAlloc() has failed: %d\n", rc);
- goto bail;
- }
-
- rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
- LASSERT(!rc);
- rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
- LASSERT(!rc);
-
- srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
-
- rc = stt_startup();
-
-bail:
- if (rc)
- srpc_shutdown();
- else
- srpc_data.rpc_state = SRPC_STATE_RUNNING;
-
- return rc;
-}
-
-void
-srpc_shutdown(void)
-{
- int i;
- int rc;
- int state;
-
- state = srpc_data.rpc_state;
- srpc_data.rpc_state = SRPC_STATE_STOPPING;
-
- switch (state) {
- default:
- LBUG();
- case SRPC_STATE_RUNNING:
- spin_lock(&srpc_data.rpc_glock);
-
- for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
- struct srpc_service *sv = srpc_data.rpc_services[i];
-
- LASSERTF(!sv, "service not empty: id %d, name %s\n",
- i, sv->sv_name);
- }
-
- spin_unlock(&srpc_data.rpc_glock);
-
- stt_shutdown();
- /* fall through */
- case SRPC_STATE_EQ_INIT:
- rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
- rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
- LASSERT(!rc);
- rc = LNetEQFree(srpc_data.rpc_lnet_eq);
- LASSERT(!rc); /* the EQ should have no user by now */
- /* fall through */
- case SRPC_STATE_NI_INIT:
- LNetNIFini();
- }
-}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
deleted file mode 100644
index 465b5b534423..000000000000
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ /dev/null
@@ -1,295 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __SELFTEST_RPC_H__
-#define __SELFTEST_RPC_H__
-
-#include <uapi/linux/lnet/lnetst.h>
-
-/*
- * LST wired structures
- *
- * XXX: *REPLY == *REQST + 1
- */
-enum srpc_msg_type {
- SRPC_MSG_MKSN_REQST = 0,
- SRPC_MSG_MKSN_REPLY = 1,
- SRPC_MSG_RMSN_REQST = 2,
- SRPC_MSG_RMSN_REPLY = 3,
- SRPC_MSG_BATCH_REQST = 4,
- SRPC_MSG_BATCH_REPLY = 5,
- SRPC_MSG_STAT_REQST = 6,
- SRPC_MSG_STAT_REPLY = 7,
- SRPC_MSG_TEST_REQST = 8,
- SRPC_MSG_TEST_REPLY = 9,
- SRPC_MSG_DEBUG_REQST = 10,
- SRPC_MSG_DEBUG_REPLY = 11,
- SRPC_MSG_BRW_REQST = 12,
- SRPC_MSG_BRW_REPLY = 13,
- SRPC_MSG_PING_REQST = 14,
- SRPC_MSG_PING_REPLY = 15,
- SRPC_MSG_JOIN_REQST = 16,
- SRPC_MSG_JOIN_REPLY = 17,
-};
-
-/* CAVEAT EMPTOR:
- * All srpc_*_reqst_t's 1st field must be matchbits of reply buffer,
- * and 2nd field matchbits of bulk buffer if any.
- *
- * All srpc_*_reply_t's 1st field must be a __u32 status, and 2nd field
- * session id if needed.
- */
-struct srpc_generic_reqst {
- __u64 rpyid; /* reply buffer matchbits */
- __u64 bulkid; /* bulk buffer matchbits */
-} WIRE_ATTR;
-
-struct srpc_generic_reply {
- __u32 status;
- struct lst_sid sid;
-} WIRE_ATTR;
-
-/* FRAMEWORK RPCs */
-struct srpc_mksn_reqst {
- __u64 mksn_rpyid; /* reply buffer matchbits */
- struct lst_sid mksn_sid; /* session id */
- __u32 mksn_force; /* use brute force */
- char mksn_name[LST_NAME_SIZE];
-} WIRE_ATTR; /* make session request */
-
-struct srpc_mksn_reply {
- __u32 mksn_status; /* session status */
- struct lst_sid mksn_sid; /* session id */
- __u32 mksn_timeout; /* session timeout */
- char mksn_name[LST_NAME_SIZE];
-} WIRE_ATTR; /* make session reply */
-
-struct srpc_rmsn_reqst {
- __u64 rmsn_rpyid; /* reply buffer matchbits */
- struct lst_sid rmsn_sid; /* session id */
-} WIRE_ATTR; /* remove session request */
-
-struct srpc_rmsn_reply {
- __u32 rmsn_status;
- struct lst_sid rmsn_sid; /* session id */
-} WIRE_ATTR; /* remove session reply */
-
-struct srpc_join_reqst {
- __u64 join_rpyid; /* reply buffer matchbits */
- struct lst_sid join_sid; /* session id to join */
- char join_group[LST_NAME_SIZE]; /* group name */
-} WIRE_ATTR;
-
-struct srpc_join_reply {
- __u32 join_status; /* returned status */
- struct lst_sid join_sid; /* session id */
- __u32 join_timeout; /* # seconds' inactivity to
- * expire
- */
- char join_session[LST_NAME_SIZE]; /* session name */
-} WIRE_ATTR;
-
-struct srpc_debug_reqst {
- __u64 dbg_rpyid; /* reply buffer matchbits */
- struct lst_sid dbg_sid; /* session id */
- __u32 dbg_flags; /* bitmap of debug */
-} WIRE_ATTR;
-
-struct srpc_debug_reply {
- __u32 dbg_status; /* returned code */
- struct lst_sid dbg_sid; /* session id */
- __u32 dbg_timeout; /* session timeout */
- __u32 dbg_nbatch; /* # of batches in the node */
- char dbg_name[LST_NAME_SIZE]; /* session name */
-} WIRE_ATTR;
-
-#define SRPC_BATCH_OPC_RUN 1
-#define SRPC_BATCH_OPC_STOP 2
-#define SRPC_BATCH_OPC_QUERY 3
-
-struct srpc_batch_reqst {
- __u64 bar_rpyid; /* reply buffer matchbits */
- struct lst_sid bar_sid; /* session id */
- struct lst_bid bar_bid; /* batch id */
- __u32 bar_opc; /* create/start/stop batch */
- __u32 bar_testidx; /* index of test */
- __u32 bar_arg; /* parameters */
-} WIRE_ATTR;
-
-struct srpc_batch_reply {
- __u32 bar_status; /* status of request */
- struct lst_sid bar_sid; /* session id */
- __u32 bar_active; /* # of active tests in batch/test */
- __u32 bar_time; /* remained time */
-} WIRE_ATTR;
-
-struct srpc_stat_reqst {
- __u64 str_rpyid; /* reply buffer matchbits */
- struct lst_sid str_sid; /* session id */
- __u32 str_type; /* type of stat */
-} WIRE_ATTR;
-
-struct srpc_stat_reply {
- __u32 str_status;
- struct lst_sid str_sid;
- struct sfw_counters str_fw;
- struct srpc_counters str_rpc;
- struct lnet_counters str_lnet;
-} WIRE_ATTR;
-
-struct test_bulk_req {
- __u32 blk_opc; /* bulk operation code */
- __u32 blk_npg; /* # of pages */
- __u32 blk_flags; /* reserved flags */
-} WIRE_ATTR;
-
-struct test_bulk_req_v1 {
- __u16 blk_opc; /* bulk operation code */
- __u16 blk_flags; /* data check flags */
- __u32 blk_len; /* data length */
- __u32 blk_offset; /* offset */
-} WIRE_ATTR;
-
-struct test_ping_req {
- __u32 png_size; /* size of ping message */
- __u32 png_flags; /* reserved flags */
-} WIRE_ATTR;
-
-struct srpc_test_reqst {
- __u64 tsr_rpyid; /* reply buffer matchbits */
- __u64 tsr_bulkid; /* bulk buffer matchbits */
- struct lst_sid tsr_sid; /* session id */
- struct lst_bid tsr_bid; /* batch id */
- __u32 tsr_service; /* test type: bulk|ping|... */
- __u32 tsr_loop; /* test client loop count or
- * # server buffers needed
- */
- __u32 tsr_concur; /* concurrency of test */
- __u8 tsr_is_client; /* is test client or not */
- __u8 tsr_stop_onerr; /* stop on error */
- __u32 tsr_ndest; /* # of dest nodes */
-
- union {
- struct test_ping_req ping;
- struct test_bulk_req bulk_v0;
- struct test_bulk_req_v1 bulk_v1;
- } tsr_u;
-} WIRE_ATTR;
-
-struct srpc_test_reply {
- __u32 tsr_status; /* returned code */
- struct lst_sid tsr_sid;
-} WIRE_ATTR;
-
-/* TEST RPCs */
-struct srpc_ping_reqst {
- __u64 pnr_rpyid;
- __u32 pnr_magic;
- __u32 pnr_seq;
- __u64 pnr_time_sec;
- __u64 pnr_time_usec;
-} WIRE_ATTR;
-
-struct srpc_ping_reply {
- __u32 pnr_status;
- __u32 pnr_magic;
- __u32 pnr_seq;
-} WIRE_ATTR;
-
-struct srpc_brw_reqst {
- __u64 brw_rpyid; /* reply buffer matchbits */
- __u64 brw_bulkid; /* bulk buffer matchbits */
- __u32 brw_rw; /* read or write */
- __u32 brw_len; /* bulk data len */
- __u32 brw_flags; /* bulk data patterns */
-} WIRE_ATTR; /* bulk r/w request */
-
-struct srpc_brw_reply {
- __u32 brw_status;
-} WIRE_ATTR; /* bulk r/w reply */
-
-#define SRPC_MSG_MAGIC 0xeeb0f00d
-#define SRPC_MSG_VERSION 1
-
-struct srpc_msg {
- __u32 msg_magic; /* magic number */
- __u32 msg_version; /* message version number */
- __u32 msg_type; /* type of message body: srpc_msg_type */
- __u32 msg_reserved0;
- __u32 msg_reserved1;
- __u32 msg_ses_feats; /* test session features */
- union {
- struct srpc_generic_reqst reqst;
- struct srpc_generic_reply reply;
-
- struct srpc_mksn_reqst mksn_reqst;
- struct srpc_mksn_reply mksn_reply;
- struct srpc_rmsn_reqst rmsn_reqst;
- struct srpc_rmsn_reply rmsn_reply;
- struct srpc_debug_reqst dbg_reqst;
- struct srpc_debug_reply dbg_reply;
- struct srpc_batch_reqst bat_reqst;
- struct srpc_batch_reply bat_reply;
- struct srpc_stat_reqst stat_reqst;
- struct srpc_stat_reply stat_reply;
- struct srpc_test_reqst tes_reqst;
- struct srpc_test_reply tes_reply;
- struct srpc_join_reqst join_reqst;
- struct srpc_join_reply join_reply;
-
- struct srpc_ping_reqst ping_reqst;
- struct srpc_ping_reply ping_reply;
- struct srpc_brw_reqst brw_reqst;
- struct srpc_brw_reply brw_reply;
- } msg_body;
-} WIRE_ATTR;
-
-static inline void
-srpc_unpack_msg_hdr(struct srpc_msg *msg)
-{
- if (msg->msg_magic == SRPC_MSG_MAGIC)
- return; /* no flipping needed */
-
- /*
- * We do not swap the magic number here as it is needed to
- * determine whether the body needs to be swapped.
- */
- /* __swab32s(&msg->msg_magic); */
- __swab32s(&msg->msg_type);
- __swab32s(&msg->msg_version);
- __swab32s(&msg->msg_ses_feats);
- __swab32s(&msg->msg_reserved0);
- __swab32s(&msg->msg_reserved1);
-}
-
-#endif /* __SELFTEST_RPC_H__ */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
deleted file mode 100644
index 05466b85e1c0..000000000000
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ /dev/null
@@ -1,623 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/selftest.h
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- */
-#ifndef __SELFTEST_SELFTEST_H__
-#define __SELFTEST_SELFTEST_H__
-
-#define LNET_ONLY
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/lnet/lib-lnet.h>
-#include <linux/lnet/lib-types.h>
-#include <uapi/linux/lnet/lnetst.h>
-
-#include "rpc.h"
-#include "timer.h"
-
-#ifndef MADE_WITHOUT_COMPROMISE
-#define MADE_WITHOUT_COMPROMISE
-#endif
-
-#define SWI_STATE_NEWBORN 0
-#define SWI_STATE_REPLY_SUBMITTED 1
-#define SWI_STATE_REPLY_SENT 2
-#define SWI_STATE_REQUEST_SUBMITTED 3
-#define SWI_STATE_REQUEST_SENT 4
-#define SWI_STATE_REPLY_RECEIVED 5
-#define SWI_STATE_BULK_STARTED 6
-#define SWI_STATE_DONE 10
-
-/* forward refs */
-struct srpc_service;
-struct srpc_service_cd;
-struct sfw_test_unit;
-struct sfw_test_instance;
-
-/* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework
- * services, e.g. create/modify session.
- */
-#define SRPC_SERVICE_DEBUG 0
-#define SRPC_SERVICE_MAKE_SESSION 1
-#define SRPC_SERVICE_REMOVE_SESSION 2
-#define SRPC_SERVICE_BATCH 3
-#define SRPC_SERVICE_TEST 4
-#define SRPC_SERVICE_QUERY_STAT 5
-#define SRPC_SERVICE_JOIN 6
-#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10
-/* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */
-#define SRPC_SERVICE_BRW 11
-#define SRPC_SERVICE_PING 12
-#define SRPC_SERVICE_MAX_ID 12
-
-#define SRPC_REQUEST_PORTAL 50
-/* a lazy portal for framework RPC requests */
-#define SRPC_FRAMEWORK_REQUEST_PORTAL 51
-/* all reply/bulk RDMAs go to this portal */
-#define SRPC_RDMA_PORTAL 52
-
-static inline enum srpc_msg_type
-srpc_service2request(int service)
-{
- switch (service) {
- default:
- LBUG();
- case SRPC_SERVICE_DEBUG:
- return SRPC_MSG_DEBUG_REQST;
-
- case SRPC_SERVICE_MAKE_SESSION:
- return SRPC_MSG_MKSN_REQST;
-
- case SRPC_SERVICE_REMOVE_SESSION:
- return SRPC_MSG_RMSN_REQST;
-
- case SRPC_SERVICE_BATCH:
- return SRPC_MSG_BATCH_REQST;
-
- case SRPC_SERVICE_TEST:
- return SRPC_MSG_TEST_REQST;
-
- case SRPC_SERVICE_QUERY_STAT:
- return SRPC_MSG_STAT_REQST;
-
- case SRPC_SERVICE_BRW:
- return SRPC_MSG_BRW_REQST;
-
- case SRPC_SERVICE_PING:
- return SRPC_MSG_PING_REQST;
-
- case SRPC_SERVICE_JOIN:
- return SRPC_MSG_JOIN_REQST;
- }
-}
-
-static inline enum srpc_msg_type
-srpc_service2reply(int service)
-{
- return srpc_service2request(service) + 1;
-}
-
-enum srpc_event_type {
- SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source)
- * received
- */
- SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
- SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */
- SRPC_REPLY_RCVD = 4, /* incoming reply received */
- SRPC_REPLY_SENT = 5, /* outgoing reply sent */
- SRPC_REQUEST_RCVD = 6, /* incoming request received */
- SRPC_REQUEST_SENT = 7, /* outgoing request sent */
-};
-
-/* RPC event */
-struct srpc_event {
- enum srpc_event_type ev_type; /* what's up */
- enum lnet_event_kind ev_lnet; /* LNet event type */
- int ev_fired; /* LNet event fired? */
- int ev_status; /* LNet event status */
- void *ev_data; /* owning server/client RPC */
-};
-
-/* bulk descriptor */
-struct srpc_bulk {
- int bk_len; /* len of bulk data */
- struct lnet_handle_md bk_mdh;
- int bk_sink; /* sink/source */
- int bk_niov; /* # iov in bk_iovs */
- struct bio_vec bk_iovs[0];
-};
-
-/* message buffer descriptor */
-struct srpc_buffer {
- struct list_head buf_list; /* chain on srpc_service::*_msgq */
- struct srpc_msg buf_msg;
- struct lnet_handle_md buf_mdh;
- lnet_nid_t buf_self;
- struct lnet_process_id buf_peer;
-};
-
-struct swi_workitem;
-typedef void (*swi_action_t) (struct swi_workitem *);
-
-struct swi_workitem {
- struct workqueue_struct *swi_wq;
- struct work_struct swi_work;
- swi_action_t swi_action;
- int swi_state;
-};
-
-/* server-side state of a RPC */
-struct srpc_server_rpc {
- /* chain on srpc_service::*_rpcq */
- struct list_head srpc_list;
- struct srpc_service_cd *srpc_scd;
- struct swi_workitem srpc_wi;
- struct srpc_event srpc_ev; /* bulk/reply event */
- lnet_nid_t srpc_self;
- struct lnet_process_id srpc_peer;
- struct srpc_msg srpc_replymsg;
- struct lnet_handle_md srpc_replymdh;
- struct srpc_buffer *srpc_reqstbuf;
- struct srpc_bulk *srpc_bulk;
-
- unsigned int srpc_aborted; /* being given up */
- int srpc_status;
- void (*srpc_done)(struct srpc_server_rpc *);
-};
-
-/* client-side state of a RPC */
-struct srpc_client_rpc {
- struct list_head crpc_list; /* chain on user's lists */
- spinlock_t crpc_lock; /* serialize */
- int crpc_service;
- atomic_t crpc_refcount;
- int crpc_timeout; /* # seconds to wait for reply */
- struct stt_timer crpc_timer;
- struct swi_workitem crpc_wi;
- struct lnet_process_id crpc_dest;
-
- void (*crpc_done)(struct srpc_client_rpc *);
- void (*crpc_fini)(struct srpc_client_rpc *);
- int crpc_status; /* completion status */
- void *crpc_priv; /* caller data */
-
- /* state flags */
- unsigned int crpc_aborted:1; /* being given up */
- unsigned int crpc_closed:1; /* completed */
-
- /* RPC events */
- struct srpc_event crpc_bulkev; /* bulk event */
- struct srpc_event crpc_reqstev; /* request event */
- struct srpc_event crpc_replyev; /* reply event */
-
- /* bulk, request(reqst), and reply exchanged on wire */
- struct srpc_msg crpc_reqstmsg;
- struct srpc_msg crpc_replymsg;
- struct lnet_handle_md crpc_reqstmdh;
- struct lnet_handle_md crpc_replymdh;
- struct srpc_bulk crpc_bulk;
-};
-
-#define srpc_client_rpc_size(rpc) \
-offsetof(struct srpc_client_rpc, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
-
-#define srpc_client_rpc_addref(rpc) \
-do { \
- CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \
- (rpc), libcfs_id2str((rpc)->crpc_dest), \
- atomic_read(&(rpc)->crpc_refcount)); \
- LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
- atomic_inc(&(rpc)->crpc_refcount); \
-} while (0)
-
-#define srpc_client_rpc_decref(rpc) \
-do { \
- CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \
- (rpc), libcfs_id2str((rpc)->crpc_dest), \
- atomic_read(&(rpc)->crpc_refcount)); \
- LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
- if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \
- srpc_destroy_client_rpc(rpc); \
-} while (0)
-
-#define srpc_event_pending(rpc) (!(rpc)->crpc_bulkev.ev_fired || \
- !(rpc)->crpc_reqstev.ev_fired || \
- !(rpc)->crpc_replyev.ev_fired)
-
-/* CPU partition data of srpc service */
-struct srpc_service_cd {
- /** serialize */
- spinlock_t scd_lock;
- /** backref to service */
- struct srpc_service *scd_svc;
- /** event buffer */
- struct srpc_event scd_ev;
- /** free RPC descriptors */
- struct list_head scd_rpc_free;
- /** in-flight RPCs */
- struct list_head scd_rpc_active;
- /** workitem for posting buffer */
- struct swi_workitem scd_buf_wi;
- /** CPT id */
- int scd_cpt;
- /** error code for scd_buf_wi */
- int scd_buf_err;
- /** timestamp for scd_buf_err */
- time64_t scd_buf_err_stamp;
- /** total # request buffers */
- int scd_buf_total;
- /** # posted request buffers */
- int scd_buf_nposted;
- /** in progress of buffer posting */
- int scd_buf_posting;
- /** allocate more buffers if scd_buf_nposted < scd_buf_low */
- int scd_buf_low;
- /** increase/decrease some buffers */
- int scd_buf_adjust;
- /** posted message buffers */
- struct list_head scd_buf_posted;
- /** blocked for RPC descriptor */
- struct list_head scd_buf_blocked;
-};
-
-/* number of server workitems (mini-thread) for testing service */
-#define SFW_TEST_WI_MIN 256
-#define SFW_TEST_WI_MAX 2048
-/* extra buffers for tolerating buggy peers, or unbalanced number
- * of peers between partitions
- */
-#define SFW_TEST_WI_EXTRA 64
-
-/* number of server workitems (mini-thread) for framework service */
-#define SFW_FRWK_WI_MIN 16
-#define SFW_FRWK_WI_MAX 256
-
-struct srpc_service {
- int sv_id; /* service id */
- const char *sv_name; /* human readable name */
- int sv_wi_total; /* total server workitems */
- int sv_shuttingdown;
- int sv_ncpts;
- /* percpt data for srpc_service */
- struct srpc_service_cd **sv_cpt_data;
- /* Service callbacks:
- * - sv_handler: process incoming RPC request
- * - sv_bulk_ready: notify bulk data
- */
- int (*sv_handler)(struct srpc_server_rpc *);
- int (*sv_bulk_ready)(struct srpc_server_rpc *, int);
-};
-
-struct sfw_session {
- struct list_head sn_list; /* chain on fw_zombie_sessions */
- struct lst_sid sn_id; /* unique identifier */
- unsigned int sn_timeout; /* # seconds' inactivity to expire */
- int sn_timer_active;
- unsigned int sn_features;
- struct stt_timer sn_timer;
- struct list_head sn_batches; /* list of batches */
- char sn_name[LST_NAME_SIZE];
- atomic_t sn_refcount;
- atomic_t sn_brw_errors;
- atomic_t sn_ping_errors;
- unsigned long sn_started;
-};
-
-#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
- (sid0).ses_stamp == (sid1).ses_stamp)
-
-struct sfw_batch {
- struct list_head bat_list; /* chain on sn_batches */
- struct lst_bid bat_id; /* batch id */
- int bat_error; /* error code of batch */
- struct sfw_session *bat_session; /* batch's session */
- atomic_t bat_nactive; /* # of active tests */
- struct list_head bat_tests; /* test instances */
-};
-
-struct sfw_test_client_ops {
- int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test
- * client
- */
- void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test
- * client
- */
- int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
- struct lnet_process_id dest,
- struct srpc_client_rpc **rpc); /* prep a tests rpc */
- void (*tso_done_rpc)(struct sfw_test_unit *tsu,
- struct srpc_client_rpc *rpc); /* done a test rpc */
-};
-
-struct sfw_test_instance {
- struct list_head tsi_list; /* chain on batch */
- int tsi_service; /* test type */
- struct sfw_batch *tsi_batch; /* batch */
- struct sfw_test_client_ops *tsi_ops; /* test client operation
- */
-
- /* public parameter for all test units */
- unsigned int tsi_is_client:1; /* is test client */
- unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */
- int tsi_concur; /* concurrency */
- int tsi_loop; /* loop count */
-
- /* status of test instance */
- spinlock_t tsi_lock; /* serialize */
- unsigned int tsi_stopping:1; /* test is stopping */
- atomic_t tsi_nactive; /* # of active test
- * unit
- */
- struct list_head tsi_units; /* test units */
- struct list_head tsi_free_rpcs; /* free rpcs */
- struct list_head tsi_active_rpcs; /* active rpcs */
-
- union {
- struct test_ping_req ping; /* ping parameter */
- struct test_bulk_req bulk_v0; /* bulk parameter */
- struct test_bulk_req_v1 bulk_v1; /* bulk v1 parameter */
- } tsi_u;
-};
-
-/*
- * XXX: trailing (PAGE_SIZE % sizeof(struct lnet_process_id)) bytes at the end
- * of pages are not used
- */
-#define SFW_MAX_CONCUR LST_MAX_CONCUR
-#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(struct lnet_process_id_packed))
-#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
-#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
-
-struct sfw_test_unit {
- struct list_head tsu_list; /* chain on lst_test_instance */
- struct lnet_process_id tsu_dest; /* id of dest node */
- int tsu_loop; /* loop count of the test */
- struct sfw_test_instance *tsu_instance; /* pointer to test instance */
- void *tsu_private; /* private data */
- struct swi_workitem tsu_worker; /* workitem of the test unit */
-};
-
-struct sfw_test_case {
- struct list_head tsc_list; /* chain on fw_tests */
- struct srpc_service *tsc_srv_service; /* test service */
- struct sfw_test_client_ops *tsc_cli_ops; /* ops of test client */
-};
-
-struct srpc_client_rpc *
-sfw_create_rpc(struct lnet_process_id peer, int service,
- unsigned int features, int nbulkiov, int bulklen,
- void (*done)(struct srpc_client_rpc *), void *priv);
-int sfw_create_test_rpc(struct sfw_test_unit *tsu,
- struct lnet_process_id peer, unsigned int features,
- int nblk, int blklen, struct srpc_client_rpc **rpc);
-void sfw_abort_rpc(struct srpc_client_rpc *rpc);
-void sfw_post_rpc(struct srpc_client_rpc *rpc);
-void sfw_client_rpc_done(struct srpc_client_rpc *rpc);
-void sfw_unpack_message(struct srpc_msg *msg);
-void sfw_free_pages(struct srpc_server_rpc *rpc);
-void sfw_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i);
-int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
- int sink);
-int sfw_make_session(struct srpc_mksn_reqst *request,
- struct srpc_mksn_reply *reply);
-
-struct srpc_client_rpc *
-srpc_create_client_rpc(struct lnet_process_id peer, int service,
- int nbulkiov, int bulklen,
- void (*rpc_done)(struct srpc_client_rpc *),
- void (*rpc_fini)(struct srpc_client_rpc *), void *priv);
-void srpc_post_rpc(struct srpc_client_rpc *rpc);
-void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why);
-void srpc_free_bulk(struct srpc_bulk *bk);
-struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned int off,
- unsigned int bulk_npg, unsigned int bulk_len,
- int sink);
-void srpc_send_rpc(struct swi_workitem *wi);
-int srpc_send_reply(struct srpc_server_rpc *rpc);
-int srpc_add_service(struct srpc_service *sv);
-int srpc_remove_service(struct srpc_service *sv);
-void srpc_shutdown_service(struct srpc_service *sv);
-void srpc_abort_service(struct srpc_service *sv);
-int srpc_finish_service(struct srpc_service *sv);
-int srpc_service_add_buffers(struct srpc_service *sv, int nbuffer);
-void srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer);
-void srpc_get_counters(struct srpc_counters *cnt);
-void srpc_set_counters(const struct srpc_counters *cnt);
-
-extern struct workqueue_struct *lst_serial_wq;
-extern struct workqueue_struct **lst_test_wq;
-
-static inline int
-srpc_serv_is_framework(struct srpc_service *svc)
-{
- return svc->sv_id < SRPC_FRAMEWORK_SERVICE_MAX_ID;
-}
-
-static void
-swi_wi_action(struct work_struct *wi)
-{
- struct swi_workitem *swi;
-
- swi = container_of(wi, struct swi_workitem, swi_work);
-
- swi->swi_action(swi);
-}
-
-static inline void
-swi_init_workitem(struct swi_workitem *swi,
- swi_action_t action, struct workqueue_struct *wq)
-{
- swi->swi_wq = wq;
- swi->swi_action = action;
- swi->swi_state = SWI_STATE_NEWBORN;
- INIT_WORK(&swi->swi_work, swi_wi_action);
-}
-
-static inline void
-swi_schedule_workitem(struct swi_workitem *wi)
-{
- queue_work(wi->swi_wq, &wi->swi_work);
-}
-
-static inline int
-swi_cancel_workitem(struct swi_workitem *swi)
-{
- return cancel_work_sync(&swi->swi_work);
-}
-
-int sfw_startup(void);
-int srpc_startup(void);
-void sfw_shutdown(void);
-void srpc_shutdown(void);
-
-static inline void
-srpc_destroy_client_rpc(struct srpc_client_rpc *rpc)
-{
- LASSERT(rpc);
- LASSERT(!srpc_event_pending(rpc));
- LASSERT(!atomic_read(&rpc->crpc_refcount));
-
- if (!rpc->crpc_fini)
- kfree(rpc);
- else
- (*rpc->crpc_fini)(rpc);
-}
-
-static inline void
-srpc_init_client_rpc(struct srpc_client_rpc *rpc, struct lnet_process_id peer,
- int service, int nbulkiov, int bulklen,
- void (*rpc_done)(struct srpc_client_rpc *),
- void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
-{
- LASSERT(nbulkiov <= LNET_MAX_IOV);
-
- memset(rpc, 0, offsetof(struct srpc_client_rpc,
- crpc_bulk.bk_iovs[nbulkiov]));
-
- INIT_LIST_HEAD(&rpc->crpc_list);
- swi_init_workitem(&rpc->crpc_wi, srpc_send_rpc,
- lst_test_wq[lnet_cpt_of_nid(peer.nid)]);
- spin_lock_init(&rpc->crpc_lock);
- atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
-
- rpc->crpc_dest = peer;
- rpc->crpc_priv = priv;
- rpc->crpc_service = service;
- rpc->crpc_bulk.bk_len = bulklen;
- rpc->crpc_bulk.bk_niov = nbulkiov;
- rpc->crpc_done = rpc_done;
- rpc->crpc_fini = rpc_fini;
- LNetInvalidateMDHandle(&rpc->crpc_reqstmdh);
- LNetInvalidateMDHandle(&rpc->crpc_replymdh);
- LNetInvalidateMDHandle(&rpc->crpc_bulk.bk_mdh);
-
- /* no event is expected at this point */
- rpc->crpc_bulkev.ev_fired = 1;
- rpc->crpc_reqstev.ev_fired = 1;
- rpc->crpc_replyev.ev_fired = 1;
-
- rpc->crpc_reqstmsg.msg_magic = SRPC_MSG_MAGIC;
- rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION;
- rpc->crpc_reqstmsg.msg_type = srpc_service2request(service);
-}
-
-static inline const char *
-swi_state2str(int state)
-{
-#define STATE2STR(x) case x: return #x
- switch (state) {
- default:
- LBUG();
- STATE2STR(SWI_STATE_NEWBORN);
- STATE2STR(SWI_STATE_REPLY_SUBMITTED);
- STATE2STR(SWI_STATE_REPLY_SENT);
- STATE2STR(SWI_STATE_REQUEST_SUBMITTED);
- STATE2STR(SWI_STATE_REQUEST_SENT);
- STATE2STR(SWI_STATE_REPLY_RECEIVED);
- STATE2STR(SWI_STATE_BULK_STARTED);
- STATE2STR(SWI_STATE_DONE);
- }
-#undef STATE2STR
-}
-
-#define selftest_wait_events() \
- do { \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- schedule_timeout(HZ / 10); \
- } while (0)
-
-#define lst_wait_until(cond, lock, fmt, ...) \
-do { \
- int __I = 2; \
- while (!(cond)) { \
- CDEBUG(is_power_of_2(++__I) ? D_WARNING : D_NET, \
- fmt, ## __VA_ARGS__); \
- spin_unlock(&(lock)); \
- \
- selftest_wait_events(); \
- \
- spin_lock(&(lock)); \
- } \
-} while (0)
-
-static inline void
-srpc_wait_service_shutdown(struct srpc_service *sv)
-{
- int i = 2;
-
- LASSERT(sv->sv_shuttingdown);
-
- while (!srpc_finish_service(sv)) {
- i++;
- CDEBUG(((i & -i) == i) ? D_WARNING : D_NET,
- "Waiting for %s service to shutdown...\n",
- sv->sv_name);
- selftest_wait_events();
- }
-}
-
-extern struct sfw_test_client_ops brw_test_client;
-void brw_init_test_client(void);
-
-extern struct srpc_service brw_test_service;
-void brw_init_test_service(void);
-
-extern struct sfw_test_client_ops ping_test_client;
-void ping_init_test_client(void);
-
-extern struct srpc_service ping_test_service;
-void ping_init_test_service(void);
-
-#endif /* __SELFTEST_SELFTEST_H__ */
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
deleted file mode 100644
index 1b2c5fc81358..000000000000
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ /dev/null
@@ -1,244 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/timer.c
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "selftest.h"
-
-/*
- * Timers are implemented as a sorted queue of expiry times. The queue
- * is slotted, with each slot holding timers which expire in a
- * 2**STTIMER_MINPOLL (8) second period. The timers in each slot are
- * sorted by increasing expiry time. The number of slots is 2**7 (128),
- * to cover a time period of 1024 seconds into the future before wrapping.
- */
-#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
-#define STTIMER_SLOTTIME BIT(STTIMER_MINPOLL)
-#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
-#define STTIMER_NSLOTS BIT(7)
-#define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
- (STTIMER_NSLOTS - 1))])
-
-static struct st_timer_data {
- spinlock_t stt_lock;
- unsigned long stt_prev_slot; /* start time of the slot processed
- * previously
- */
- struct list_head stt_hash[STTIMER_NSLOTS];
- int stt_shuttingdown;
- wait_queue_head_t stt_waitq;
- int stt_nthreads;
-} stt_data;
-
-void
-stt_add_timer(struct stt_timer *timer)
-{
- struct list_head *pos;
-
- spin_lock(&stt_data.stt_lock);
-
- LASSERT(stt_data.stt_nthreads > 0);
- LASSERT(!stt_data.stt_shuttingdown);
- LASSERT(timer->stt_func);
- LASSERT(list_empty(&timer->stt_list));
- LASSERT(timer->stt_expires > ktime_get_real_seconds());
-
- /* a simple insertion sort */
- list_for_each_prev(pos, STTIMER_SLOT(timer->stt_expires)) {
- struct stt_timer *old = list_entry(pos, struct stt_timer,
- stt_list);
-
- if (timer->stt_expires >= old->stt_expires)
- break;
- }
- list_add(&timer->stt_list, pos);
-
- spin_unlock(&stt_data.stt_lock);
-}
-
-/*
- * The function returns whether it has deactivated a pending timer or not.
- * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
- * active timer returns 1.)
- *
- * CAVEAT EMPTOR:
- * When 0 is returned, it is possible that timer->stt_func _is_ running on
- * another CPU.
- */
-int
-stt_del_timer(struct stt_timer *timer)
-{
- int ret = 0;
-
- spin_lock(&stt_data.stt_lock);
-
- LASSERT(stt_data.stt_nthreads > 0);
- LASSERT(!stt_data.stt_shuttingdown);
-
- if (!list_empty(&timer->stt_list)) {
- ret = 1;
- list_del_init(&timer->stt_list);
- }
-
- spin_unlock(&stt_data.stt_lock);
- return ret;
-}
-
-/* called with stt_data.stt_lock held */
-static int
-stt_expire_list(struct list_head *slot, time64_t now)
-{
- int expired = 0;
- struct stt_timer *timer;
-
- while (!list_empty(slot)) {
- timer = list_entry(slot->next, struct stt_timer, stt_list);
-
- if (timer->stt_expires > now)
- break;
-
- list_del_init(&timer->stt_list);
- spin_unlock(&stt_data.stt_lock);
-
- expired++;
- (*timer->stt_func) (timer->stt_data);
-
- spin_lock(&stt_data.stt_lock);
- }
-
- return expired;
-}
-
-static int
-stt_check_timers(unsigned long *last)
-{
- int expired = 0;
- time64_t now;
- unsigned long this_slot;
-
- now = ktime_get_real_seconds();
- this_slot = now & STTIMER_SLOTTIMEMASK;
-
- spin_lock(&stt_data.stt_lock);
-
- while (cfs_time_aftereq(this_slot, *last)) {
- expired += stt_expire_list(STTIMER_SLOT(this_slot), now);
- this_slot = cfs_time_sub(this_slot, STTIMER_SLOTTIME);
- }
-
- *last = now & STTIMER_SLOTTIMEMASK;
- spin_unlock(&stt_data.stt_lock);
- return expired;
-}
-
-static int
-stt_timer_main(void *arg)
-{
- int rc = 0;
-
- while (!stt_data.stt_shuttingdown) {
- stt_check_timers(&stt_data.stt_prev_slot);
-
- rc = wait_event_timeout(stt_data.stt_waitq,
- stt_data.stt_shuttingdown,
- STTIMER_SLOTTIME * HZ);
- }
-
- spin_lock(&stt_data.stt_lock);
- stt_data.stt_nthreads--;
- spin_unlock(&stt_data.stt_lock);
- return rc;
-}
-
-static int
-stt_start_timer_thread(void)
-{
- struct task_struct *task;
-
- LASSERT(!stt_data.stt_shuttingdown);
-
- task = kthread_run(stt_timer_main, NULL, "st_timer");
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- spin_lock(&stt_data.stt_lock);
- stt_data.stt_nthreads++;
- spin_unlock(&stt_data.stt_lock);
- return 0;
-}
-
-int
-stt_startup(void)
-{
- int rc = 0;
- int i;
-
- stt_data.stt_shuttingdown = 0;
- stt_data.stt_prev_slot = ktime_get_real_seconds() & STTIMER_SLOTTIMEMASK;
-
- spin_lock_init(&stt_data.stt_lock);
- for (i = 0; i < STTIMER_NSLOTS; i++)
- INIT_LIST_HEAD(&stt_data.stt_hash[i]);
-
- stt_data.stt_nthreads = 0;
- init_waitqueue_head(&stt_data.stt_waitq);
- rc = stt_start_timer_thread();
- if (rc)
- CERROR("Can't spawn timer thread: %d\n", rc);
-
- return rc;
-}
-
-void
-stt_shutdown(void)
-{
- int i;
-
- spin_lock(&stt_data.stt_lock);
-
- for (i = 0; i < STTIMER_NSLOTS; i++)
- LASSERT(list_empty(&stt_data.stt_hash[i]));
-
- stt_data.stt_shuttingdown = 1;
-
- wake_up(&stt_data.stt_waitq);
- lst_wait_until(!stt_data.stt_nthreads, stt_data.stt_lock,
- "waiting for %d threads to terminate\n",
- stt_data.stt_nthreads);
-
- spin_unlock(&stt_data.stt_lock);
-}
diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h
deleted file mode 100644
index 7f0ef9bd0cda..000000000000
--- a/drivers/staging/lustre/lnet/selftest/timer.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/timer.h
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- */
-#ifndef __SELFTEST_TIMER_H__
-#define __SELFTEST_TIMER_H__
-
-struct stt_timer {
- struct list_head stt_list;
- time64_t stt_expires;
- void (*stt_func)(void *);
- void *stt_data;
-};
-
-void stt_add_timer(struct stt_timer *timer);
-int stt_del_timer(struct stt_timer *timer);
-int stt_startup(void);
-void stt_shutdown(void);
-
-#endif /* __SELFTEST_TIMER_H__ */
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
deleted file mode 100644
index ccb78a945995..000000000000
--- a/drivers/staging/lustre/lustre/Kconfig
+++ /dev/null
@@ -1,45 +0,0 @@
-config LUSTRE_FS
- tristate "Lustre file system client support"
- depends on LNET
- select CRYPTO
- select CRYPTO_CRC32
- select CRYPTO_CRC32_PCLMUL if X86
- select CRYPTO_CRC32C
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
- depends on MULTIUSER
- help
- This option enables Lustre file system client support. Choose Y
- here if you want to access a Lustre file system cluster. To compile
- this file system support as a module, choose M here: the module will
- be called lustre.
-
- To mount Lustre file systems, you also need to install the user space
- mount.lustre and other user space commands which can be found in the
- lustre-client package, available from
- http://downloads.whamcloud.com/public/lustre/
-
- Lustre file system is the most popular cluster file system in high
- performance computing. Source code of both kernel space and user space
- Lustre components can also be found at
- http://git.whamcloud.com/?p=fs/lustre-release.git;a=summary
-
- If unsure, say N.
-
- See also http://wiki.lustre.org/
-
-config LUSTRE_DEBUG_EXPENSIVE_CHECK
- bool "Enable Lustre DEBUG checks"
- depends on LUSTRE_FS
- help
- This option is mainly for debug purpose. It enables Lustre code to do
- expensive checks that may have a performance impact.
-
- Use with caution. If unsure, say N.
-
-config LUSTRE_TRANSLATE_ERRNOS
- bool
- depends on LUSTRE_FS && !X86
- default y
diff --git a/drivers/staging/lustre/lustre/Makefile b/drivers/staging/lustre/lustre/Makefile
deleted file mode 100644
index 331e4fcdd5a2..000000000000
--- a/drivers/staging/lustre/lustre/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += obdclass/ ptlrpc/ fld/ osc/ mgc/ \
- fid/ lov/ mdc/ lmv/ llite/ obdecho/
diff --git a/drivers/staging/lustre/lustre/fid/Makefile b/drivers/staging/lustre/lustre/fid/Makefile
deleted file mode 100644
index 77b65b92667d..000000000000
--- a/drivers/staging/lustre/lustre/fid/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include/
-
-obj-$(CONFIG_LUSTRE_FS) += fid.o
-fid-y := fid_request.o fid_lib.o lproc_fid.o
diff --git a/drivers/staging/lustre/lustre/fid/fid_internal.h b/drivers/staging/lustre/lustre/fid/fid_internal.h
deleted file mode 100644
index b7b8f900df8e..000000000000
--- a/drivers/staging/lustre/lustre/fid/fid_internal.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fid/fid_internal.h
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-#ifndef __FID_INTERNAL_H
-#define __FID_INTERNAL_H
-
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <linux/libcfs/libcfs.h>
-
-/* Functions used internally in module. */
-
-extern struct lprocfs_vars seq_client_debugfs_list[];
-
-#endif /* __FID_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/fid/fid_lib.c b/drivers/staging/lustre/lustre/fid/fid_lib.c
deleted file mode 100644
index 9577da33e666..000000000000
--- a/drivers/staging/lustre/lustre/fid/fid_lib.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fid/fid_lib.c
- *
- * Miscellaneous fid functions.
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FID
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/module.h>
-#include <lustre_fid.h>
-
-/**
- * A cluster-wide range from which fid-sequences are granted to servers and
- * then clients.
- *
- * Fid namespace:
- * <pre>
- * Normal FID: seq:64 [2^33,2^64-1] oid:32 ver:32
- * IGIF : 0:32, ino:32 gen:32 0:32
- * IDIF : 0:31, 1:1, ost-index:16, objd:48 0:32
- * </pre>
- *
- * The first 0x400 sequences of normal FID are reserved for special purpose.
- * FID_SEQ_START + 1 is for local file id generation.
- * FID_SEQ_START + 2 is for .lustre directory and its objects
- */
-const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE = {
- .lsr_start = FID_SEQ_NORMAL,
- .lsr_end = (__u64)~0ULL,
-};
-
-/* Zero range, used for init and other purposes. */
-const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE = {
- .lsr_start = 0,
-};
-
-/* Lustre Big Fs Lock fid. */
-const struct lu_fid LUSTRE_BFL_FID = { .f_seq = FID_SEQ_SPECIAL,
- .f_oid = FID_OID_SPECIAL_BFL,
- .f_ver = 0x0000000000000000 };
-EXPORT_SYMBOL(LUSTRE_BFL_FID);
-
-/** Special fid for ".lustre" directory */
-const struct lu_fid LU_DOT_LUSTRE_FID = { .f_seq = FID_SEQ_DOT_LUSTRE,
- .f_oid = FID_OID_DOT_LUSTRE,
- .f_ver = 0x0000000000000000 };
-EXPORT_SYMBOL(LU_DOT_LUSTRE_FID);
-
-/** Special fid for "fid" special object in .lustre */
-const struct lu_fid LU_OBF_FID = { .f_seq = FID_SEQ_DOT_LUSTRE,
- .f_oid = FID_OID_DOT_LUSTRE_OBF,
- .f_ver = 0x0000000000000000 };
-EXPORT_SYMBOL(LU_OBF_FID);
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
deleted file mode 100644
index 030680f37c79..000000000000
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ /dev/null
@@ -1,440 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fid/fid_request.c
- *
- * Lustre Sequence Manager
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FID
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/module.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lustre_fid.h>
-/* mdc RPC locks */
-#include <lustre_mdc.h>
-#include "fid_internal.h"
-
-static struct dentry *seq_debugfs_dir;
-
-static int seq_client_rpc(struct lu_client_seq *seq,
- struct lu_seq_range *output, __u32 opc,
- const char *opcname)
-{
- struct obd_export *exp = seq->lcs_exp;
- struct ptlrpc_request *req;
- struct lu_seq_range *out, *in;
- __u32 *op;
- unsigned int debug_mask;
- int rc;
-
- LASSERT(exp && !IS_ERR(exp));
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
- LUSTRE_MDS_VERSION, SEQ_QUERY);
- if (!req)
- return -ENOMEM;
-
- /* Init operation code */
- op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
- *op = opc;
-
- /* Zero out input range, this is not recovery yet. */
- in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
- lu_seq_range_init(in);
-
- ptlrpc_request_set_replen(req);
-
- in->lsr_index = seq->lcs_space.lsr_index;
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
- fld_range_set_mdt(in);
- else
- fld_range_set_ost(in);
-
- if (opc == SEQ_ALLOC_SUPER) {
- req->rq_request_portal = SEQ_CONTROLLER_PORTAL;
- req->rq_reply_portal = MDC_REPLY_PORTAL;
- /* During allocating super sequence for data object,
- * the current thread might hold the export of MDT0(MDT0
- * precreating objects on this OST), and it will send the
- * request to MDT0 here, so we can not keep resending the
- * request here, otherwise if MDT0 is failed(umounted),
- * it can not release the export of MDT0
- */
- if (seq->lcs_type == LUSTRE_SEQ_DATA) {
- req->rq_no_delay = 1;
- req->rq_no_resend = 1;
- }
- debug_mask = D_CONSOLE;
- } else {
- if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
- req->rq_reply_portal = MDC_REPLY_PORTAL;
- req->rq_request_portal = SEQ_METADATA_PORTAL;
- } else {
- req->rq_reply_portal = OSC_REPLY_PORTAL;
- req->rq_request_portal = SEQ_DATA_PORTAL;
- }
- debug_mask = D_INFO;
- }
-
- ptlrpc_at_set_req_timeout(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out_req;
-
- out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
-
- if (!lu_seq_range_is_sane(out)) {
- CERROR("%s: Invalid range received from server: "
- DRANGE "\n", seq->lcs_name, PRANGE(out));
- rc = -EINVAL;
- goto out_req;
- }
-
- if (lu_seq_range_is_exhausted(out)) {
- CERROR("%s: Range received from server is exhausted: "
- DRANGE "]\n", seq->lcs_name, PRANGE(out));
- rc = -EINVAL;
- goto out_req;
- }
-
- *output = *out;
- CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence " DRANGE "]\n",
- seq->lcs_name, opcname, PRANGE(output));
-
-out_req:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-/* Request sequence-controller node to allocate new meta-sequence. */
-static int seq_client_alloc_meta(const struct lu_env *env,
- struct lu_client_seq *seq)
-{
- int rc;
-
- do {
- /* If meta server return -EINPROGRESS or EAGAIN,
- * it means meta server might not be ready to
- * allocate super sequence from sequence controller
- * (MDT0)yet
- */
- rc = seq_client_rpc(seq, &seq->lcs_space,
- SEQ_ALLOC_META, "meta");
- } while (rc == -EINPROGRESS || rc == -EAGAIN);
-
- return rc;
-}
-
-/* Allocate new sequence for client. */
-static int seq_client_alloc_seq(const struct lu_env *env,
- struct lu_client_seq *seq, u64 *seqnr)
-{
- int rc;
-
- LASSERT(lu_seq_range_is_sane(&seq->lcs_space));
-
- if (lu_seq_range_is_exhausted(&seq->lcs_space)) {
- rc = seq_client_alloc_meta(env, seq);
- if (rc) {
- CERROR("%s: Can't allocate new meta-sequence, rc %d\n",
- seq->lcs_name, rc);
- *seqnr = U64_MAX;
- return rc;
- }
- CDEBUG(D_INFO, "%s: New range - " DRANGE "\n",
- seq->lcs_name, PRANGE(&seq->lcs_space));
- } else {
- rc = 0;
- }
-
- LASSERT(!lu_seq_range_is_exhausted(&seq->lcs_space));
- *seqnr = seq->lcs_space.lsr_start;
- seq->lcs_space.lsr_start += 1;
-
- CDEBUG(D_INFO, "%s: Allocated sequence [%#llx]\n", seq->lcs_name,
- *seqnr);
-
- return rc;
-}
-
-/* Allocate new fid on passed client @seq and save it to @fid. */
-int seq_client_alloc_fid(const struct lu_env *env,
- struct lu_client_seq *seq, struct lu_fid *fid)
-{
- int rc;
-
- LASSERT(seq);
- LASSERT(fid);
-
- spin_lock(&seq->lcs_lock);
-
- if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
- seq->lcs_fid.f_oid = seq->lcs_width;
-
- wait_event_cmd(seq->lcs_waitq,
- (!fid_is_zero(&seq->lcs_fid) &&
- fid_oid(&seq->lcs_fid) < seq->lcs_width) ||
- !seq->lcs_update,
- spin_unlock(&seq->lcs_lock),
- spin_lock(&seq->lcs_lock));
-
- if (!fid_is_zero(&seq->lcs_fid) &&
- fid_oid(&seq->lcs_fid) < seq->lcs_width) {
- /* Just bump last allocated fid and return to caller. */
- seq->lcs_fid.f_oid += 1;
- rc = 0;
- } else {
- u64 seqnr;
-
- LASSERT(seq->lcs_update == 0);
- seq->lcs_update = 1;
- spin_unlock(&seq->lcs_lock);
-
- rc = seq_client_alloc_seq(env, seq, &seqnr);
-
- spin_lock(&seq->lcs_lock);
- seq->lcs_update = 0;
- wake_up(&seq->lcs_waitq);
-
- if (rc) {
- CERROR("%s: Can't allocate new sequence, rc %d\n",
- seq->lcs_name, rc);
- spin_unlock(&seq->lcs_lock);
- return rc;
- }
-
- CDEBUG(D_INFO, "%s: Switch to sequence [0x%16.16llx]\n",
- seq->lcs_name, seqnr);
-
- seq->lcs_fid.f_oid = LUSTRE_FID_INIT_OID;
- seq->lcs_fid.f_seq = seqnr;
- seq->lcs_fid.f_ver = 0;
-
- /*
- * Inform caller that sequence switch is performed to allow it
- * to setup FLD for it.
- */
- rc = 1;
- }
-
- *fid = seq->lcs_fid;
- spin_unlock(&seq->lcs_lock);
-
- CDEBUG(D_INFO,
- "%s: Allocated FID " DFID "\n", seq->lcs_name, PFID(fid));
- return rc;
-}
-EXPORT_SYMBOL(seq_client_alloc_fid);
-
-/*
- * Finish the current sequence due to disconnect.
- * See mdc_import_event()
- */
-void seq_client_flush(struct lu_client_seq *seq)
-{
-
- LASSERT(seq);
- spin_lock(&seq->lcs_lock);
-
- wait_event_cmd(seq->lcs_waitq,
- !seq->lcs_update,
- spin_unlock(&seq->lcs_lock),
- spin_lock(&seq->lcs_lock));
-
- fid_zero(&seq->lcs_fid);
- /**
- * this id shld not be used for seq range allocation.
- * set to -1 for dgb check.
- */
-
- seq->lcs_space.lsr_index = -1;
-
- lu_seq_range_init(&seq->lcs_space);
- spin_unlock(&seq->lcs_lock);
-}
-EXPORT_SYMBOL(seq_client_flush);
-
-static void seq_client_debugfs_fini(struct lu_client_seq *seq)
-{
- if (!IS_ERR_OR_NULL(seq->lcs_debugfs_entry))
- ldebugfs_remove(&seq->lcs_debugfs_entry);
-}
-
-static int seq_client_debugfs_init(struct lu_client_seq *seq)
-{
- int rc;
-
- seq->lcs_debugfs_entry = ldebugfs_register(seq->lcs_name,
- seq_debugfs_dir,
- NULL, NULL);
-
- if (IS_ERR_OR_NULL(seq->lcs_debugfs_entry)) {
- CERROR("%s: LdebugFS failed in seq-init\n", seq->lcs_name);
- rc = seq->lcs_debugfs_entry ? PTR_ERR(seq->lcs_debugfs_entry)
- : -ENOMEM;
- seq->lcs_debugfs_entry = NULL;
- return rc;
- }
-
- rc = ldebugfs_add_vars(seq->lcs_debugfs_entry,
- seq_client_debugfs_list, seq);
- if (rc) {
- CERROR("%s: Can't init sequence manager debugfs, rc %d\n",
- seq->lcs_name, rc);
- goto out_cleanup;
- }
-
- return 0;
-
-out_cleanup:
- seq_client_debugfs_fini(seq);
- return rc;
-}
-
-static void seq_client_fini(struct lu_client_seq *seq)
-{
- seq_client_debugfs_fini(seq);
-
- if (seq->lcs_exp) {
- class_export_put(seq->lcs_exp);
- seq->lcs_exp = NULL;
- }
-}
-
-static int seq_client_init(struct lu_client_seq *seq,
- struct obd_export *exp,
- enum lu_cli_type type,
- const char *prefix)
-{
- int rc;
-
- LASSERT(seq);
- LASSERT(prefix);
-
- seq->lcs_type = type;
-
- spin_lock_init(&seq->lcs_lock);
- if (type == LUSTRE_SEQ_METADATA)
- seq->lcs_width = LUSTRE_METADATA_SEQ_MAX_WIDTH;
- else
- seq->lcs_width = LUSTRE_DATA_SEQ_MAX_WIDTH;
-
- init_waitqueue_head(&seq->lcs_waitq);
- /* Make sure that things are clear before work is started. */
- seq_client_flush(seq);
-
- seq->lcs_exp = class_export_get(exp);
-
- snprintf(seq->lcs_name, sizeof(seq->lcs_name),
- "cli-%s", prefix);
-
- rc = seq_client_debugfs_init(seq);
- if (rc)
- seq_client_fini(seq);
- return rc;
-}
-
-int client_fid_init(struct obd_device *obd,
- struct obd_export *exp, enum lu_cli_type type)
-{
- struct client_obd *cli = &obd->u.cli;
- char *prefix;
- int rc;
-
- cli->cl_seq = kzalloc(sizeof(*cli->cl_seq), GFP_NOFS);
- if (!cli->cl_seq)
- return -ENOMEM;
-
- prefix = kzalloc(MAX_OBD_NAME + 5, GFP_NOFS);
- if (!prefix) {
- rc = -ENOMEM;
- goto out_free_seq;
- }
-
- snprintf(prefix, MAX_OBD_NAME + 5, "cli-%s", obd->obd_name);
-
- /* Init client side sequence-manager */
- rc = seq_client_init(cli->cl_seq, exp, type, prefix);
- kfree(prefix);
- if (rc)
- goto out_free_seq;
-
- return rc;
-out_free_seq:
- kfree(cli->cl_seq);
- cli->cl_seq = NULL;
- return rc;
-}
-EXPORT_SYMBOL(client_fid_init);
-
-int client_fid_fini(struct obd_device *obd)
-{
- struct client_obd *cli = &obd->u.cli;
-
- if (cli->cl_seq) {
- seq_client_fini(cli->cl_seq);
- kfree(cli->cl_seq);
- cli->cl_seq = NULL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(client_fid_fini);
-
-static int __init fid_init(void)
-{
- seq_debugfs_dir = ldebugfs_register(LUSTRE_SEQ_NAME,
- debugfs_lustre_root,
- NULL, NULL);
- return PTR_ERR_OR_ZERO(seq_debugfs_dir);
-}
-
-static void __exit fid_exit(void)
-{
- if (!IS_ERR_OR_NULL(seq_debugfs_dir))
- ldebugfs_remove(&seq_debugfs_dir);
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre File IDentifier");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(fid_init);
-module_exit(fid_exit);
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
deleted file mode 100644
index a1e5bf9f36ec..000000000000
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fid/lproc_fid.c
- *
- * Lustre Sequence Manager
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FID
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/module.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lustre_req_layout.h>
-#include <lustre_fid.h>
-#include "fid_internal.h"
-
-/* Format: [0x64BIT_INT - 0x64BIT_INT] + 32 bytes just in case */
-#define MAX_FID_RANGE_STRLEN (32 + 2 * 2 * sizeof(__u64))
-/*
- * Note: this function is only used for testing, it is no safe for production
- * use.
- */
-static int
-ldebugfs_fid_write_common(const char __user *buffer, size_t count,
- struct lu_seq_range *range)
-{
- struct lu_seq_range tmp;
- int rc;
- char kernbuf[MAX_FID_RANGE_STRLEN];
-
- LASSERT(range);
-
- if (count >= sizeof(kernbuf))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
-
- kernbuf[count] = 0;
-
- if (count == 5 && strcmp(kernbuf, "clear") == 0) {
- memset(range, 0, sizeof(*range));
- return count;
- }
-
- /* of the form "[0x0000000240000400 - 0x000000028000400]" */
- rc = sscanf(kernbuf, "[%llx - %llx]\n",
- (unsigned long long *)&tmp.lsr_start,
- (unsigned long long *)&tmp.lsr_end);
- if (rc != 2)
- return -EINVAL;
- if (!lu_seq_range_is_sane(&tmp) || lu_seq_range_is_zero(&tmp) ||
- tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end)
- return -EINVAL;
- *range = tmp;
- return count;
-}
-
-/* Client side debugfs stuff */
-static ssize_t
-ldebugfs_fid_space_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct lu_client_seq *seq;
- struct lu_seq_range range;
- int rc;
-
- seq = ((struct seq_file *)file->private_data)->private;
-
- rc = ldebugfs_fid_write_common(buffer, count, &range);
-
- spin_lock(&seq->lcs_lock);
- if (seq->lcs_update)
- /* An RPC call is active to update lcs_space */
- rc = -EBUSY;
- if (rc > 0)
- seq->lcs_space = range;
- spin_unlock(&seq->lcs_lock);
-
- if (rc > 0) {
- CDEBUG(D_INFO, "%s: Space: " DRANGE "\n",
- seq->lcs_name, PRANGE(&range));
- }
-
- return rc;
-}
-
-static int
-ldebugfs_fid_space_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
- int rc = 0;
-
- spin_lock(&seq->lcs_lock);
- if (seq->lcs_update)
- rc = -EBUSY;
- else
- seq_printf(m, "[%#llx - %#llx]:%x:%s\n", PRANGE(&seq->lcs_space));
- spin_unlock(&seq->lcs_lock);
-
- return rc;
-}
-
-static ssize_t
-ldebugfs_fid_width_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct lu_client_seq *seq;
- __u64 max;
- int rc, val;
-
- seq = ((struct seq_file *)file->private_data)->private;
-
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc)
- return rc;
-
- spin_lock(&seq->lcs_lock);
- if (seq->lcs_type == LUSTRE_SEQ_DATA)
- max = LUSTRE_DATA_SEQ_MAX_WIDTH;
- else
- max = LUSTRE_METADATA_SEQ_MAX_WIDTH;
-
- if (val <= max && val > 0) {
- seq->lcs_width = val;
-
- CDEBUG(D_INFO, "%s: Sequence size: %llu\n", seq->lcs_name,
- seq->lcs_width);
- }
-
- spin_unlock(&seq->lcs_lock);
-
- return count;
-}
-
-static int
-ldebugfs_fid_width_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
-
- spin_lock(&seq->lcs_lock);
- seq_printf(m, "%llu\n", seq->lcs_width);
- spin_unlock(&seq->lcs_lock);
-
- return 0;
-}
-
-static int
-ldebugfs_fid_fid_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
-
- spin_lock(&seq->lcs_lock);
- seq_printf(m, DFID "\n", PFID(&seq->lcs_fid));
- spin_unlock(&seq->lcs_lock);
-
- return 0;
-}
-
-static int
-ldebugfs_fid_server_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
- struct client_obd *cli;
-
- if (seq->lcs_exp) {
- cli = &seq->lcs_exp->exp_obd->u.cli;
- seq_printf(m, "%s\n", cli->cl_target_uuid.uuid);
- }
-
- return 0;
-}
-
-LPROC_SEQ_FOPS(ldebugfs_fid_space);
-LPROC_SEQ_FOPS(ldebugfs_fid_width);
-LPROC_SEQ_FOPS_RO(ldebugfs_fid_server);
-LPROC_SEQ_FOPS_RO(ldebugfs_fid_fid);
-
-struct lprocfs_vars seq_client_debugfs_list[] = {
- { .name = "space",
- .fops = &ldebugfs_fid_space_fops },
- { .name = "width",
- .fops = &ldebugfs_fid_width_fops },
- { .name = "server",
- .fops = &ldebugfs_fid_server_fops },
- { .name = "fid",
- .fops = &ldebugfs_fid_fid_fops },
- { NULL }
-};
diff --git a/drivers/staging/lustre/lustre/fld/Makefile b/drivers/staging/lustre/lustre/fld/Makefile
deleted file mode 100644
index 426deba8b815..000000000000
--- a/drivers/staging/lustre/lustre/fld/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include/
-
-obj-$(CONFIG_LUSTRE_FS) += fld.o
-fld-y := fld_request.o fld_cache.o lproc_fld.o
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
deleted file mode 100644
index 2d61ca4e51cf..000000000000
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ /dev/null
@@ -1,517 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fld/fld_cache.c
- *
- * FLD (Fids Location Database)
- *
- * Author: Pravin Shelar <pravin.shelar@sun.com>
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FLD
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/module.h>
-#include <asm/div64.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <uapi/linux/lustre/lustre_ver.h>
-#include <obd_support.h>
-#include <lprocfs_status.h>
-
-#include <lustre_req_layout.h>
-#include <lustre_fld.h>
-#include "fld_internal.h"
-
-/**
- * create fld cache.
- */
-struct fld_cache *fld_cache_init(const char *name,
- int cache_size, int cache_threshold)
-{
- struct fld_cache *cache;
-
- LASSERT(name);
- LASSERT(cache_threshold < cache_size);
-
- cache = kzalloc(sizeof(*cache), GFP_NOFS);
- if (!cache)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&cache->fci_entries_head);
- INIT_LIST_HEAD(&cache->fci_lru);
-
- cache->fci_cache_count = 0;
- rwlock_init(&cache->fci_lock);
-
- strlcpy(cache->fci_name, name,
- sizeof(cache->fci_name));
-
- cache->fci_cache_size = cache_size;
- cache->fci_threshold = cache_threshold;
-
- /* Init fld cache info. */
- memset(&cache->fci_stat, 0, sizeof(cache->fci_stat));
-
- CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
- cache->fci_name, cache_size, cache_threshold);
-
- return cache;
-}
-
-/**
- * destroy fld cache.
- */
-void fld_cache_fini(struct fld_cache *cache)
-{
- __u64 pct;
-
- LASSERT(cache);
- fld_cache_flush(cache);
-
- if (cache->fci_stat.fst_count > 0) {
- pct = cache->fci_stat.fst_cache * 100;
- do_div(pct, cache->fci_stat.fst_count);
- } else {
- pct = 0;
- }
-
- CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name);
- CDEBUG(D_INFO, " Total reqs: %llu\n", cache->fci_stat.fst_count);
- CDEBUG(D_INFO, " Cache reqs: %llu\n", cache->fci_stat.fst_cache);
- CDEBUG(D_INFO, " Cache hits: %llu%%\n", pct);
-
- kfree(cache);
-}
-
-/**
- * delete given node from list.
- */
-static void fld_cache_entry_delete(struct fld_cache *cache,
- struct fld_cache_entry *node)
-{
- list_del(&node->fce_list);
- list_del(&node->fce_lru);
- cache->fci_cache_count--;
- kfree(node);
-}
-
-/**
- * fix list by checking new entry with NEXT entry in order.
- */
-static void fld_fix_new_list(struct fld_cache *cache)
-{
- struct fld_cache_entry *f_curr;
- struct fld_cache_entry *f_next;
- struct lu_seq_range *c_range;
- struct lu_seq_range *n_range;
- struct list_head *head = &cache->fci_entries_head;
-
-restart_fixup:
-
- list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
- c_range = &f_curr->fce_range;
- n_range = &f_next->fce_range;
-
- LASSERT(lu_seq_range_is_sane(c_range));
- if (&f_next->fce_list == head)
- break;
-
- if (c_range->lsr_flags != n_range->lsr_flags)
- continue;
-
- LASSERTF(c_range->lsr_start <= n_range->lsr_start,
- "cur lsr_start " DRANGE " next lsr_start " DRANGE "\n",
- PRANGE(c_range), PRANGE(n_range));
-
- /* check merge possibility with next range */
- if (c_range->lsr_end == n_range->lsr_start) {
- if (c_range->lsr_index != n_range->lsr_index)
- continue;
- n_range->lsr_start = c_range->lsr_start;
- fld_cache_entry_delete(cache, f_curr);
- continue;
- }
-
- /* check if current range overlaps with next range. */
- if (n_range->lsr_start < c_range->lsr_end) {
- if (c_range->lsr_index == n_range->lsr_index) {
- n_range->lsr_start = c_range->lsr_start;
- n_range->lsr_end = max(c_range->lsr_end,
- n_range->lsr_end);
- fld_cache_entry_delete(cache, f_curr);
- } else {
- if (n_range->lsr_end <= c_range->lsr_end) {
- *n_range = *c_range;
- fld_cache_entry_delete(cache, f_curr);
- } else {
- n_range->lsr_start = c_range->lsr_end;
- }
- }
-
- /* we could have overlap over next
- * range too. better restart.
- */
- goto restart_fixup;
- }
-
- /* kill duplicates */
- if (c_range->lsr_start == n_range->lsr_start &&
- c_range->lsr_end == n_range->lsr_end)
- fld_cache_entry_delete(cache, f_curr);
- }
-}
-
-/**
- * add node to fld cache
- */
-static inline void fld_cache_entry_add(struct fld_cache *cache,
- struct fld_cache_entry *f_new,
- struct list_head *pos)
-{
- list_add(&f_new->fce_list, pos);
- list_add(&f_new->fce_lru, &cache->fci_lru);
-
- cache->fci_cache_count++;
- fld_fix_new_list(cache);
-}
-
-/**
- * Check if cache needs to be shrunk. If so - do it.
- * Remove one entry in list and so on until cache is shrunk enough.
- */
-static int fld_cache_shrink(struct fld_cache *cache)
-{
- int num = 0;
-
- if (cache->fci_cache_count < cache->fci_cache_size)
- return 0;
-
- while (cache->fci_cache_count + cache->fci_threshold >
- cache->fci_cache_size &&
- !list_empty(&cache->fci_lru)) {
- struct fld_cache_entry *flde =
- list_last_entry(&cache->fci_lru,
- struct fld_cache_entry, fce_lru);
-
- fld_cache_entry_delete(cache, flde);
- num++;
- }
-
- CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n",
- cache->fci_name, num);
-
- return 0;
-}
-
-/**
- * kill all fld cache entries.
- */
-void fld_cache_flush(struct fld_cache *cache)
-{
- write_lock(&cache->fci_lock);
- cache->fci_cache_size = 0;
- fld_cache_shrink(cache);
- write_unlock(&cache->fci_lock);
-}
-
-/**
- * punch hole in existing range. divide this range and add new
- * entry accordingly.
- */
-
-static void fld_cache_punch_hole(struct fld_cache *cache,
- struct fld_cache_entry *f_curr,
- struct fld_cache_entry *f_new)
-{
- const struct lu_seq_range *range = &f_new->fce_range;
- const u64 new_start = range->lsr_start;
- const u64 new_end = range->lsr_end;
- struct fld_cache_entry *fldt;
-
- fldt = kzalloc(sizeof(*fldt), GFP_ATOMIC);
- if (!fldt) {
- kfree(f_new);
- /* overlap is not allowed, so don't mess up list. */
- return;
- }
- /* break f_curr RANGE into three RANGES:
- * f_curr, f_new , fldt
- */
-
- /* f_new = *range */
-
- /* fldt */
- fldt->fce_range.lsr_start = new_end;
- fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end;
- fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index;
-
- /* f_curr */
- f_curr->fce_range.lsr_end = new_start;
-
- /* add these two entries to list */
- fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
- fld_cache_entry_add(cache, fldt, &f_new->fce_list);
-
- /* no need to fixup */
-}
-
-/**
- * handle range overlap in fld cache.
- */
-static void fld_cache_overlap_handle(struct fld_cache *cache,
- struct fld_cache_entry *f_curr,
- struct fld_cache_entry *f_new)
-{
- const struct lu_seq_range *range = &f_new->fce_range;
- const u64 new_start = range->lsr_start;
- const u64 new_end = range->lsr_end;
- const u32 mdt = range->lsr_index;
-
- /* this is overlap case, these case are checking overlapping with
- * prev range only. fixup will handle overlapping with next range.
- */
-
- if (f_curr->fce_range.lsr_index == mdt) {
- f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
- new_start);
-
- f_curr->fce_range.lsr_end = max(f_curr->fce_range.lsr_end,
- new_end);
-
- kfree(f_new);
- fld_fix_new_list(cache);
-
- } else if (new_start <= f_curr->fce_range.lsr_start &&
- f_curr->fce_range.lsr_end <= new_end) {
- /* case 1: new range completely overshadowed existing range.
- * e.g. whole range migrated. update fld cache entry
- */
-
- f_curr->fce_range = *range;
- kfree(f_new);
- fld_fix_new_list(cache);
-
- } else if (f_curr->fce_range.lsr_start < new_start &&
- new_end < f_curr->fce_range.lsr_end) {
- /* case 2: new range fit within existing range. */
-
- fld_cache_punch_hole(cache, f_curr, f_new);
-
- } else if (new_end <= f_curr->fce_range.lsr_end) {
- /* case 3: overlap:
- * [new_start [c_start new_end) c_end)
- */
-
- LASSERT(new_start <= f_curr->fce_range.lsr_start);
-
- f_curr->fce_range.lsr_start = new_end;
- fld_cache_entry_add(cache, f_new, f_curr->fce_list.prev);
-
- } else if (f_curr->fce_range.lsr_start <= new_start) {
- /* case 4: overlap:
- * [c_start [new_start c_end) new_end)
- */
-
- LASSERT(f_curr->fce_range.lsr_end <= new_end);
-
- f_curr->fce_range.lsr_end = new_start;
- fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
- } else {
- CERROR("NEW range =" DRANGE " curr = " DRANGE "\n",
- PRANGE(range), PRANGE(&f_curr->fce_range));
- }
-}
-
-struct fld_cache_entry
-*fld_cache_entry_create(const struct lu_seq_range *range)
-{
- struct fld_cache_entry *f_new;
-
- LASSERT(lu_seq_range_is_sane(range));
-
- f_new = kzalloc(sizeof(*f_new), GFP_NOFS);
- if (!f_new)
- return ERR_PTR(-ENOMEM);
-
- f_new->fce_range = *range;
- return f_new;
-}
-
-/**
- * Insert FLD entry in FLD cache.
- *
- * This function handles all cases of merging and breaking up of
- * ranges.
- */
-static int fld_cache_insert_nolock(struct fld_cache *cache,
- struct fld_cache_entry *f_new)
-{
- struct fld_cache_entry *f_curr;
- struct fld_cache_entry *n;
- struct list_head *head;
- struct list_head *prev = NULL;
- const u64 new_start = f_new->fce_range.lsr_start;
- const u64 new_end = f_new->fce_range.lsr_end;
- __u32 new_flags = f_new->fce_range.lsr_flags;
-
- /*
- * Duplicate entries are eliminated in insert op.
- * So we don't need to search new entry before starting
- * insertion loop.
- */
-
- if (!cache->fci_no_shrink)
- fld_cache_shrink(cache);
-
- head = &cache->fci_entries_head;
-
- list_for_each_entry_safe(f_curr, n, head, fce_list) {
- /* add list if next is end of list */
- if (new_end < f_curr->fce_range.lsr_start ||
- (new_end == f_curr->fce_range.lsr_start &&
- new_flags != f_curr->fce_range.lsr_flags))
- break;
-
- prev = &f_curr->fce_list;
- /* check if this range is to left of new range. */
- if (new_start < f_curr->fce_range.lsr_end &&
- new_flags == f_curr->fce_range.lsr_flags) {
- fld_cache_overlap_handle(cache, f_curr, f_new);
- goto out;
- }
- }
-
- if (!prev)
- prev = head;
-
- CDEBUG(D_INFO, "insert range " DRANGE "\n", PRANGE(&f_new->fce_range));
- /* Add new entry to cache and lru list. */
- fld_cache_entry_add(cache, f_new, prev);
-out:
- return 0;
-}
-
-int fld_cache_insert(struct fld_cache *cache,
- const struct lu_seq_range *range)
-{
- struct fld_cache_entry *flde;
- int rc;
-
- flde = fld_cache_entry_create(range);
- if (IS_ERR(flde))
- return PTR_ERR(flde);
-
- write_lock(&cache->fci_lock);
- rc = fld_cache_insert_nolock(cache, flde);
- write_unlock(&cache->fci_lock);
- if (rc)
- kfree(flde);
-
- return rc;
-}
-
-/**
- * Delete FLD entry in FLD cache.
- *
- */
-
-struct fld_cache_entry
-*fld_cache_entry_lookup_nolock(struct fld_cache *cache,
- struct lu_seq_range *range)
-{
- struct fld_cache_entry *flde;
- struct fld_cache_entry *got = NULL;
- struct list_head *head;
-
- head = &cache->fci_entries_head;
- list_for_each_entry(flde, head, fce_list) {
- if (range->lsr_start == flde->fce_range.lsr_start ||
- (range->lsr_end == flde->fce_range.lsr_end &&
- range->lsr_flags == flde->fce_range.lsr_flags)) {
- got = flde;
- break;
- }
- }
-
- return got;
-}
-
-/**
- * lookup \a seq sequence for range in fld cache.
- */
-struct fld_cache_entry
-*fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range)
-{
- struct fld_cache_entry *got = NULL;
-
- read_lock(&cache->fci_lock);
- got = fld_cache_entry_lookup_nolock(cache, range);
- read_unlock(&cache->fci_lock);
- return got;
-}
-
-/**
- * lookup \a seq sequence for range in fld cache.
- */
-int fld_cache_lookup(struct fld_cache *cache,
- const u64 seq, struct lu_seq_range *range)
-{
- struct fld_cache_entry *flde;
- struct fld_cache_entry *prev = NULL;
- struct list_head *head;
-
- read_lock(&cache->fci_lock);
- head = &cache->fci_entries_head;
-
- cache->fci_stat.fst_count++;
- list_for_each_entry(flde, head, fce_list) {
- if (flde->fce_range.lsr_start > seq) {
- if (prev)
- *range = prev->fce_range;
- break;
- }
-
- prev = flde;
- if (lu_seq_range_within(&flde->fce_range, seq)) {
- *range = flde->fce_range;
-
- cache->fci_stat.fst_cache++;
- read_unlock(&cache->fci_lock);
- return 0;
- }
- }
- read_unlock(&cache->fci_lock);
- return -ENOENT;
-}
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
deleted file mode 100644
index b5e3abaa508a..000000000000
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ /dev/null
@@ -1,171 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fld/fld_internal.h
- *
- * Subsystem Description:
- * FLD is FID Location Database, which stores where (IE, on which MDT)
- * FIDs are located.
- * The database is basically a record file, each record consists of a FID
- * sequence range, MDT/OST index, and flags. The FLD for the whole FS
- * is only stored on the sequence controller(MDT0) right now, but each target
- * also has its local FLD, which only stores the local sequence.
- *
- * The FLD subsystem usually has two tasks:
- * 1. maintain the database, i.e. when the sequence controller allocates
- * new sequence ranges to some nodes, it will call the FLD API to insert the
- * location information <sequence_range, node_index> in FLDB.
- *
- * 2. Handle requests from other nodes, i.e. if client needs to know where
- * the FID is located, if it can not find the information in the local cache,
- * it will send a FLD lookup RPC to the FLD service, and the FLD service will
- * look up the FLDB entry and return the location information to client.
- *
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- * Author: Tom WangDi <wangdi@clusterfs.com>
- */
-#ifndef __FLD_INTERNAL_H
-#define __FLD_INTERNAL_H
-
-#include <uapi/linux/lustre/lustre_idl.h>
-
-#include <linux/libcfs/libcfs.h>
-#include <lustre_req_layout.h>
-#include <lustre_fld.h>
-
-struct fld_stats {
- __u64 fst_count;
- __u64 fst_cache;
- __u64 fst_inflight;
-};
-
-struct lu_fld_hash {
- const char *fh_name;
- int (*fh_hash_func)(struct lu_client_fld *, __u64);
- struct lu_fld_target *(*fh_scan_func)(struct lu_client_fld *, __u64);
-};
-
-struct fld_cache_entry {
- struct list_head fce_lru;
- struct list_head fce_list;
- /** fld cache entries are sorted on range->lsr_start field. */
- struct lu_seq_range fce_range;
-};
-
-struct fld_cache {
- /**
- * Cache guard, protects fci_hash mostly because others immutable after
- * init is finished.
- */
- rwlock_t fci_lock;
-
- /** Cache shrink threshold */
- int fci_threshold;
-
- /** Preferred number of cached entries */
- int fci_cache_size;
-
- /** Current number of cached entries. Protected by \a fci_lock */
- int fci_cache_count;
-
- /** LRU list fld entries. */
- struct list_head fci_lru;
-
- /** sorted fld entries. */
- struct list_head fci_entries_head;
-
- /** Cache statistics. */
- struct fld_stats fci_stat;
-
- /** Cache name used for debug and messages. */
- char fci_name[LUSTRE_MDT_MAXNAMELEN];
- unsigned int fci_no_shrink:1;
-};
-
-enum {
- /* 4M of FLD cache will not hurt client a lot. */
- FLD_SERVER_CACHE_SIZE = (4 * 0x100000),
-
- /* 1M of FLD cache will not hurt client a lot. */
- FLD_CLIENT_CACHE_SIZE = (1 * 0x100000)
-};
-
-enum {
- /* Cache threshold is 10 percent of size. */
- FLD_SERVER_CACHE_THRESHOLD = 10,
-
- /* Cache threshold is 10 percent of size. */
- FLD_CLIENT_CACHE_THRESHOLD = 10
-};
-
-extern struct lu_fld_hash fld_hash[];
-
-int fld_client_rpc(struct obd_export *exp,
- struct lu_seq_range *range, __u32 fld_op,
- struct ptlrpc_request **reqp);
-
-extern struct lprocfs_vars fld_client_debugfs_list[];
-
-struct fld_cache *fld_cache_init(const char *name,
- int cache_size, int cache_threshold);
-
-void fld_cache_fini(struct fld_cache *cache);
-
-void fld_cache_flush(struct fld_cache *cache);
-
-int fld_cache_insert(struct fld_cache *cache,
- const struct lu_seq_range *range);
-
-struct fld_cache_entry
-*fld_cache_entry_create(const struct lu_seq_range *range);
-
-int fld_cache_lookup(struct fld_cache *cache,
- const u64 seq, struct lu_seq_range *range);
-
-struct fld_cache_entry*
-fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range);
-
-struct fld_cache_entry
-*fld_cache_entry_lookup_nolock(struct fld_cache *cache,
- struct lu_seq_range *range);
-
-static inline const char *
-fld_target_name(struct lu_fld_target *tar)
-{
- if (tar->ft_srv)
- return tar->ft_srv->lsf_name;
-
- return (const char *)tar->ft_exp->exp_obd->obd_name;
-}
-
-#endif /* __FLD_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
deleted file mode 100644
index 068c364adda8..000000000000
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ /dev/null
@@ -1,471 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fld/fld_request.c
- *
- * FLD (Fids Location Database)
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FLD
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/module.h>
-#include <asm/div64.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <uapi/linux/lustre/lustre_ver.h>
-#include <obd_support.h>
-#include <lprocfs_status.h>
-
-#include <lustre_req_layout.h>
-#include <lustre_fld.h>
-#include <lustre_mdc.h>
-#include "fld_internal.h"
-
-static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq)
-{
- LASSERT(fld->lcf_count > 0);
- return do_div(seq, fld->lcf_count);
-}
-
-static struct lu_fld_target *
-fld_rrb_scan(struct lu_client_fld *fld, u64 seq)
-{
- struct lu_fld_target *target;
- int hash;
-
- /* Because almost all of special sequence located in MDT0,
- * it should go to index 0 directly, instead of calculating
- * hash again, and also if other MDTs is not being connected,
- * the fld lookup requests(for seq on MDT0) should not be
- * blocked because of other MDTs
- */
- if (fid_seq_is_norm(seq))
- hash = fld_rrb_hash(fld, seq);
- else
- hash = 0;
-
-again:
- list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
- if (target->ft_idx == hash)
- return target;
- }
-
- if (hash != 0) {
- /* It is possible the remote target(MDT) are not connected to
- * with client yet, so we will refer this to MDT0, which should
- * be connected during mount
- */
- hash = 0;
- goto again;
- }
-
- CERROR("%s: Can't find target by hash %d (seq %#llx). Targets (%d):\n",
- fld->lcf_name, hash, seq, fld->lcf_count);
-
- list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
- const char *srv_name = target->ft_srv ?
- target->ft_srv->lsf_name : "<null>";
- const char *exp_name = target->ft_exp ?
- (char *)target->ft_exp->exp_obd->obd_uuid.uuid :
- "<null>";
-
- CERROR(" exp: 0x%p (%s), srv: 0x%p (%s), idx: %llu\n",
- target->ft_exp, exp_name, target->ft_srv,
- srv_name, target->ft_idx);
- }
-
- /*
- * If target is not found, there is logical error anyway, so here is
- * LBUG() to catch this situation.
- */
- LBUG();
- return NULL;
-}
-
-struct lu_fld_hash fld_hash[] = {
- {
- .fh_name = "RRB",
- .fh_hash_func = fld_rrb_hash,
- .fh_scan_func = fld_rrb_scan
- },
- {
- NULL,
- }
-};
-
-static struct lu_fld_target *
-fld_client_get_target(struct lu_client_fld *fld, u64 seq)
-{
- struct lu_fld_target *target;
-
- LASSERT(fld->lcf_hash);
-
- spin_lock(&fld->lcf_lock);
- target = fld->lcf_hash->fh_scan_func(fld, seq);
- spin_unlock(&fld->lcf_lock);
-
- if (target) {
- CDEBUG(D_INFO, "%s: Found target (idx %llu) by seq %#llx\n",
- fld->lcf_name, target->ft_idx, seq);
- }
-
- return target;
-}
-
-/*
- * Add export to FLD. This is usually done by CMM and LMV as they are main users
- * of FLD module.
- */
-int fld_client_add_target(struct lu_client_fld *fld,
- struct lu_fld_target *tar)
-{
- const char *name;
- struct lu_fld_target *target, *tmp;
-
- LASSERT(tar);
- name = fld_target_name(tar);
- LASSERT(name);
- LASSERT(tar->ft_srv || tar->ft_exp);
-
- CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n",
- fld->lcf_name, name, tar->ft_idx);
-
- target = kzalloc(sizeof(*target), GFP_NOFS);
- if (!target)
- return -ENOMEM;
-
- spin_lock(&fld->lcf_lock);
- list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
- if (tmp->ft_idx == tar->ft_idx) {
- spin_unlock(&fld->lcf_lock);
- kfree(target);
- CERROR("Target %s exists in FLD and known as %s:#%llu\n",
- name, fld_target_name(tmp), tmp->ft_idx);
- return -EEXIST;
- }
- }
-
- target->ft_exp = tar->ft_exp;
- if (target->ft_exp)
- class_export_get(target->ft_exp);
- target->ft_srv = tar->ft_srv;
- target->ft_idx = tar->ft_idx;
-
- list_add_tail(&target->ft_chain, &fld->lcf_targets);
-
- fld->lcf_count++;
- spin_unlock(&fld->lcf_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(fld_client_add_target);
-
-/* Remove export from FLD */
-int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
-{
- struct lu_fld_target *target, *tmp;
-
- spin_lock(&fld->lcf_lock);
- list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) {
- if (target->ft_idx == idx) {
- fld->lcf_count--;
- list_del(&target->ft_chain);
- spin_unlock(&fld->lcf_lock);
-
- if (target->ft_exp)
- class_export_put(target->ft_exp);
-
- kfree(target);
- return 0;
- }
- }
- spin_unlock(&fld->lcf_lock);
- return -ENOENT;
-}
-
-static struct dentry *fld_debugfs_dir;
-
-static int fld_client_debugfs_init(struct lu_client_fld *fld)
-{
- int rc;
-
- fld->lcf_debugfs_entry = ldebugfs_register(fld->lcf_name,
- fld_debugfs_dir,
- NULL, NULL);
-
- if (IS_ERR_OR_NULL(fld->lcf_debugfs_entry)) {
- CERROR("%s: LdebugFS failed in fld-init\n", fld->lcf_name);
- rc = fld->lcf_debugfs_entry ? PTR_ERR(fld->lcf_debugfs_entry)
- : -ENOMEM;
- fld->lcf_debugfs_entry = NULL;
- return rc;
- }
-
- rc = ldebugfs_add_vars(fld->lcf_debugfs_entry,
- fld_client_debugfs_list, fld);
- if (rc) {
- CERROR("%s: Can't init FLD debufs, rc %d\n", fld->lcf_name, rc);
- goto out_cleanup;
- }
-
- return 0;
-
-out_cleanup:
- fld_client_debugfs_fini(fld);
- return rc;
-}
-
-void fld_client_debugfs_fini(struct lu_client_fld *fld)
-{
- if (!IS_ERR_OR_NULL(fld->lcf_debugfs_entry))
- ldebugfs_remove(&fld->lcf_debugfs_entry);
-}
-EXPORT_SYMBOL(fld_client_debugfs_fini);
-
-static inline int hash_is_sane(int hash)
-{
- return (hash >= 0 && hash < ARRAY_SIZE(fld_hash));
-}
-
-int fld_client_init(struct lu_client_fld *fld,
- const char *prefix, int hash)
-{
- int cache_size, cache_threshold;
- int rc;
-
- snprintf(fld->lcf_name, sizeof(fld->lcf_name),
- "cli-%s", prefix);
-
- if (!hash_is_sane(hash)) {
- CERROR("%s: Wrong hash function %#x\n",
- fld->lcf_name, hash);
- return -EINVAL;
- }
-
- fld->lcf_count = 0;
- spin_lock_init(&fld->lcf_lock);
- fld->lcf_hash = &fld_hash[hash];
- INIT_LIST_HEAD(&fld->lcf_targets);
-
- cache_size = FLD_CLIENT_CACHE_SIZE /
- sizeof(struct fld_cache_entry);
-
- cache_threshold = cache_size *
- FLD_CLIENT_CACHE_THRESHOLD / 100;
-
- fld->lcf_cache = fld_cache_init(fld->lcf_name,
- cache_size, cache_threshold);
- if (IS_ERR(fld->lcf_cache)) {
- rc = PTR_ERR(fld->lcf_cache);
- fld->lcf_cache = NULL;
- goto out;
- }
-
- rc = fld_client_debugfs_init(fld);
- if (rc)
- goto out;
-out:
- if (rc)
- fld_client_fini(fld);
- else
- CDEBUG(D_INFO, "%s: Using \"%s\" hash\n",
- fld->lcf_name, fld->lcf_hash->fh_name);
- return rc;
-}
-EXPORT_SYMBOL(fld_client_init);
-
-void fld_client_fini(struct lu_client_fld *fld)
-{
- struct lu_fld_target *target, *tmp;
-
- spin_lock(&fld->lcf_lock);
- list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) {
- fld->lcf_count--;
- list_del(&target->ft_chain);
- if (target->ft_exp)
- class_export_put(target->ft_exp);
- kfree(target);
- }
- spin_unlock(&fld->lcf_lock);
-
- if (fld->lcf_cache) {
- if (!IS_ERR(fld->lcf_cache))
- fld_cache_fini(fld->lcf_cache);
- fld->lcf_cache = NULL;
- }
-}
-EXPORT_SYMBOL(fld_client_fini);
-
-int fld_client_rpc(struct obd_export *exp,
- struct lu_seq_range *range, __u32 fld_op,
- struct ptlrpc_request **reqp)
-{
- struct ptlrpc_request *req = NULL;
- struct lu_seq_range *prange;
- __u32 *op;
- int rc = 0;
- struct obd_import *imp;
-
- LASSERT(exp);
-
- imp = class_exp2cliimp(exp);
- switch (fld_op) {
- case FLD_QUERY:
- req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY,
- LUSTRE_MDS_VERSION, FLD_QUERY);
- if (!req)
- return -ENOMEM;
-
- /*
- * XXX: only needed when talking to old server(< 2.6), it should
- * be removed when < 2.6 server is not supported
- */
- op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
- *op = FLD_LOOKUP;
-
- if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS)
- req->rq_allow_replay = 1;
- break;
- case FLD_READ:
- req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_READ,
- LUSTRE_MDS_VERSION, FLD_READ);
- if (!req)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA,
- RCL_SERVER, PAGE_SIZE);
- break;
- default:
- rc = -EINVAL;
- break;
- }
- if (rc)
- return rc;
-
- prange = req_capsule_client_get(&req->rq_pill, &RMF_FLD_MDFLD);
- *prange = *range;
- ptlrpc_request_set_replen(req);
- req->rq_request_portal = FLD_REQUEST_PORTAL;
- req->rq_reply_portal = MDC_REPLY_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- obd_get_request_slot(&exp->exp_obd->u.cli);
- rc = ptlrpc_queue_wait(req);
- obd_put_request_slot(&exp->exp_obd->u.cli);
- if (rc)
- goto out_req;
-
- if (fld_op == FLD_QUERY) {
- prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
- if (!prange) {
- rc = -EFAULT;
- goto out_req;
- }
- *range = *prange;
- }
-
-out_req:
- if (rc || !reqp) {
- ptlrpc_req_finished(req);
- req = NULL;
- }
-
- if (reqp)
- *reqp = req;
-
- return rc;
-}
-
-int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
- __u32 flags, const struct lu_env *env)
-{
- struct lu_seq_range res = { 0 };
- struct lu_fld_target *target;
- int rc;
-
- rc = fld_cache_lookup(fld->lcf_cache, seq, &res);
- if (rc == 0) {
- *mds = res.lsr_index;
- return 0;
- }
-
- /* Can not find it in the cache */
- target = fld_client_get_target(fld, seq);
- LASSERT(target);
-
- CDEBUG(D_INFO,
- "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n",
- fld->lcf_name, seq, fld_target_name(target), target->ft_idx);
-
- res.lsr_start = seq;
- fld_range_set_type(&res, flags);
- rc = fld_client_rpc(target->ft_exp, &res, FLD_QUERY, NULL);
-
- if (rc == 0) {
- *mds = res.lsr_index;
-
- fld_cache_insert(fld->lcf_cache, &res);
- }
- return rc;
-}
-EXPORT_SYMBOL(fld_client_lookup);
-
-void fld_client_flush(struct lu_client_fld *fld)
-{
- fld_cache_flush(fld->lcf_cache);
-}
-
-static int __init fld_init(void)
-{
- fld_debugfs_dir = ldebugfs_register(LUSTRE_FLD_NAME,
- debugfs_lustre_root,
- NULL, NULL);
- return PTR_ERR_OR_ZERO(fld_debugfs_dir);
-}
-
-static void __exit fld_exit(void)
-{
- if (!IS_ERR_OR_NULL(fld_debugfs_dir))
- ldebugfs_remove(&fld_debugfs_dir);
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre FID Location Database");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(fld_init)
-module_exit(fld_exit)
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
deleted file mode 100644
index 1a6a76110c3e..000000000000
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fld/lproc_fld.c
- *
- * FLD (FIDs Location Database)
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- * Di Wang <di.wang@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FLD
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/module.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lustre_req_layout.h>
-#include <lustre_fld.h>
-#include <lustre_fid.h>
-#include "fld_internal.h"
-
-static int
-fld_debugfs_targets_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
- struct lu_fld_target *target;
-
- spin_lock(&fld->lcf_lock);
- list_for_each_entry(target, &fld->lcf_targets, ft_chain)
- seq_printf(m, "%s\n", fld_target_name(target));
- spin_unlock(&fld->lcf_lock);
-
- return 0;
-}
-
-static int
-fld_debugfs_hash_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
-
- spin_lock(&fld->lcf_lock);
- seq_printf(m, "%s\n", fld->lcf_hash->fh_name);
- spin_unlock(&fld->lcf_lock);
-
- return 0;
-}
-
-static ssize_t
-fld_debugfs_hash_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct lu_client_fld *fld;
- struct lu_fld_hash *hash = NULL;
- char fh_name[8];
- int i;
-
- if (count > sizeof(fh_name))
- return -ENAMETOOLONG;
-
- if (copy_from_user(fh_name, buffer, count) != 0)
- return -EFAULT;
-
- fld = ((struct seq_file *)file->private_data)->private;
-
- for (i = 0; fld_hash[i].fh_name; i++) {
- if (count != strlen(fld_hash[i].fh_name))
- continue;
-
- if (!strncmp(fld_hash[i].fh_name, fh_name, count)) {
- hash = &fld_hash[i];
- break;
- }
- }
-
- if (hash) {
- spin_lock(&fld->lcf_lock);
- fld->lcf_hash = hash;
- spin_unlock(&fld->lcf_lock);
-
- CDEBUG(D_INFO, "%s: Changed hash to \"%s\"\n",
- fld->lcf_name, hash->fh_name);
- }
-
- return count;
-}
-
-static ssize_t
-fld_debugfs_cache_flush_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- struct lu_client_fld *fld = file->private_data;
-
- fld_cache_flush(fld->lcf_cache);
-
- CDEBUG(D_INFO, "%s: Lookup cache is flushed\n", fld->lcf_name);
-
- return count;
-}
-
-static int
-fld_debugfs_cache_flush_release(struct inode *inode, struct file *file)
-{
- file->private_data = NULL;
- return 0;
-}
-
-static const struct file_operations fld_debugfs_cache_flush_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = fld_debugfs_cache_flush_write,
- .release = fld_debugfs_cache_flush_release,
-};
-
-LPROC_SEQ_FOPS_RO(fld_debugfs_targets);
-LPROC_SEQ_FOPS(fld_debugfs_hash);
-
-struct lprocfs_vars fld_client_debugfs_list[] = {
- { "targets", &fld_debugfs_targets_fops },
- { "hash", &fld_debugfs_hash_fops },
- { "cache_flush", &fld_debugfs_cache_flush_fops },
- { NULL }
-};
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
deleted file mode 100644
index 341a145c3331..000000000000
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ /dev/null
@@ -1,2463 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#ifndef _LUSTRE_CL_OBJECT_H
-#define _LUSTRE_CL_OBJECT_H
-
-/** \defgroup clio clio
- *
- * Client objects implement io operations and cache pages.
- *
- * Examples: lov and osc are implementations of cl interface.
- *
- * Big Theory Statement.
- *
- * Layered objects.
- *
- * Client implementation is based on the following data-types:
- *
- * - cl_object
- *
- * - cl_page
- *
- * - cl_lock represents an extent lock on an object.
- *
- * - cl_io represents high-level i/o activity such as whole read/write
- * system call, or write-out of pages from under the lock being
- * canceled. cl_io has sub-ios that can be stopped and resumed
- * independently, thus achieving high degree of transfer
- * parallelism. Single cl_io can be advanced forward by
- * the multiple threads (although in the most usual case of
- * read/write system call it is associated with the single user
- * thread, that issued the system call).
- *
- * Terminology
- *
- * - to avoid confusion high-level I/O operation like read or write system
- * call is referred to as "an io", whereas low-level I/O operation, like
- * RPC, is referred to as "a transfer"
- *
- * - "generic code" means generic (not file system specific) code in the
- * hosting environment. "cl-code" means code (mostly in cl_*.c files) that
- * is not layer specific.
- *
- * Locking.
- *
- * - i_mutex
- * - PG_locked
- * - cl_object_header::coh_page_guard
- * - lu_site::ls_guard
- *
- * See the top comment in cl_object.c for the description of overall locking and
- * reference-counting design.
- *
- * See comments below for the description of i/o, page, and dlm-locking
- * design.
- *
- * @{
- */
-
-/*
- * super-class definitions.
- */
-#include <lu_object.h>
-#include <lustre_compat.h>
-#include <linux/atomic.h>
-#include <linux/mutex.h>
-#include <linux/radix-tree.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-
-struct inode;
-
-struct cl_device;
-
-struct cl_object;
-
-struct cl_page;
-struct cl_page_slice;
-struct cl_lock;
-struct cl_lock_slice;
-
-struct cl_lock_operations;
-struct cl_page_operations;
-
-struct cl_io;
-struct cl_io_slice;
-
-struct cl_req_attr;
-
-/**
- * Device in the client stack.
- *
- * \see vvp_device, lov_device, lovsub_device, osc_device
- */
-struct cl_device {
- /** Super-class. */
- struct lu_device cd_lu_dev;
-};
-
-/** \addtogroup cl_object cl_object
- * @{
- */
-/**
- * "Data attributes" of cl_object. Data attributes can be updated
- * independently for a sub-object, and top-object's attributes are calculated
- * from sub-objects' ones.
- */
-struct cl_attr {
- /** Object size, in bytes */
- loff_t cat_size;
- /**
- * Known minimal size, in bytes.
- *
- * This is only valid when at least one DLM lock is held.
- */
- loff_t cat_kms;
- /** Modification time. Measured in seconds since epoch. */
- time64_t cat_mtime;
- /** Access time. Measured in seconds since epoch. */
- time64_t cat_atime;
- /** Change time. Measured in seconds since epoch. */
- time64_t cat_ctime;
- /**
- * Blocks allocated to this cl_object on the server file system.
- *
- * \todo XXX An interface for block size is needed.
- */
- __u64 cat_blocks;
- /**
- * User identifier for quota purposes.
- */
- uid_t cat_uid;
- /**
- * Group identifier for quota purposes.
- */
- gid_t cat_gid;
-
- /* nlink of the directory */
- __u64 cat_nlink;
-};
-
-/**
- * Fields in cl_attr that are being set.
- */
-enum cl_attr_valid {
- CAT_SIZE = 1 << 0,
- CAT_KMS = 1 << 1,
- CAT_MTIME = 1 << 3,
- CAT_ATIME = 1 << 4,
- CAT_CTIME = 1 << 5,
- CAT_BLOCKS = 1 << 6,
- CAT_UID = 1 << 7,
- CAT_GID = 1 << 8
-};
-
-/**
- * Sub-class of lu_object with methods common for objects on the client
- * stacks.
- *
- * cl_object: represents a regular file system object, both a file and a
- * stripe. cl_object is based on lu_object: it is identified by a fid,
- * layered, cached, hashed, and lrued. Important distinction with the server
- * side, where md_object and dt_object are used, is that cl_object "fans out"
- * at the lov/sns level: depending on the file layout, single file is
- * represented as a set of "sub-objects" (stripes). At the implementation
- * level, struct lov_object contains an array of cl_objects. Each sub-object
- * is a full-fledged cl_object, having its fid, living in the lru and hash
- * table.
- *
- * This leads to the next important difference with the server side: on the
- * client, it's quite usual to have objects with the different sequence of
- * layers. For example, typical top-object is composed of the following
- * layers:
- *
- * - vvp
- * - lov
- *
- * whereas its sub-objects are composed of
- *
- * - lovsub
- * - osc
- *
- * layers. Here "lovsub" is a mostly dummy layer, whose purpose is to keep
- * track of the object-subobject relationship.
- *
- * Sub-objects are not cached independently: when top-object is about to
- * be discarded from the memory, all its sub-objects are torn-down and
- * destroyed too.
- *
- * \see vvp_object, lov_object, lovsub_object, osc_object
- */
-struct cl_object {
- /** super class */
- struct lu_object co_lu;
- /** per-object-layer operations */
- const struct cl_object_operations *co_ops;
- /** offset of page slice in cl_page buffer */
- int co_slice_off;
-};
-
-/**
- * Description of the client object configuration. This is used for the
- * creation of a new client object that is identified by a more state than
- * fid.
- */
-struct cl_object_conf {
- /** Super-class. */
- struct lu_object_conf coc_lu;
- union {
- /**
- * Object layout. This is consumed by lov.
- */
- struct lu_buf coc_layout;
- /**
- * Description of particular stripe location in the
- * cluster. This is consumed by osc.
- */
- struct lov_oinfo *coc_oinfo;
- } u;
- /**
- * VFS inode. This is consumed by vvp.
- */
- struct inode *coc_inode;
- /**
- * Layout lock handle.
- */
- struct ldlm_lock *coc_lock;
- /**
- * Operation to handle layout, OBJECT_CONF_XYZ.
- */
- int coc_opc;
-};
-
-enum {
- /** configure layout, set up a new stripe, must be called while
- * holding layout lock.
- */
- OBJECT_CONF_SET = 0,
- /** invalidate the current stripe configuration due to losing
- * layout lock.
- */
- OBJECT_CONF_INVALIDATE = 1,
- /** wait for old layout to go away so that new layout can be set up. */
- OBJECT_CONF_WAIT = 2
-};
-
-enum {
- CL_LAYOUT_GEN_NONE = (u32)-2, /* layout lock was cancelled */
- CL_LAYOUT_GEN_EMPTY = (u32)-1, /* for empty layout */
-};
-
-struct cl_layout {
- /** the buffer to return the layout in lov_mds_md format. */
- struct lu_buf cl_buf;
- /** size of layout in lov_mds_md format. */
- size_t cl_size;
- /** Layout generation. */
- u32 cl_layout_gen;
-};
-
-/**
- * Operations implemented for each cl object layer.
- *
- * \see vvp_ops, lov_ops, lovsub_ops, osc_ops
- */
-struct cl_object_operations {
- /**
- * Initialize page slice for this layer. Called top-to-bottom through
- * every object layer when a new cl_page is instantiated. Layer
- * keeping private per-page data, or requiring its own page operations
- * vector should allocate these data here, and attach then to the page
- * by calling cl_page_slice_add(). \a vmpage is locked (in the VM
- * sense). Optional.
- *
- * \retval NULL success.
- *
- * \retval ERR_PTR(errno) failure code.
- *
- * \retval valid-pointer pointer to already existing referenced page
- * to be used instead of newly created.
- */
- int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index);
- /**
- * Initialize lock slice for this layer. Called top-to-bottom through
- * every object layer when a new cl_lock is instantiated. Layer
- * keeping private per-lock data, or requiring its own lock operations
- * vector should allocate these data here, and attach then to the lock
- * by calling cl_lock_slice_add(). Mandatory.
- */
- int (*coo_lock_init)(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *io);
- /**
- * Initialize io state for a given layer.
- *
- * called top-to-bottom once per io existence to initialize io
- * state. If layer wants to keep some state for this type of io, it
- * has to embed struct cl_io_slice in lu_env::le_ses, and register
- * slice with cl_io_slice_add(). It is guaranteed that all threads
- * participating in this io share the same session.
- */
- int (*coo_io_init)(const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
- /**
- * Fill portion of \a attr that this layer controls. This method is
- * called top-to-bottom through all object layers.
- *
- * \pre cl_object_header::coh_attr_guard of the top-object is locked.
- *
- * \return 0: to continue
- * \return +ve: to stop iterating through layers (but 0 is returned
- * from enclosing cl_object_attr_get())
- * \return -ve: to signal error
- */
- int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr);
- /**
- * Update attributes.
- *
- * \a valid is a bitmask composed from enum #cl_attr_valid, and
- * indicating what attributes are to be set.
- *
- * \pre cl_object_header::coh_attr_guard of the top-object is locked.
- *
- * \return the same convention as for
- * cl_object_operations::coo_attr_get() is used.
- */
- int (*coo_attr_update)(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned int valid);
- /**
- * Update object configuration. Called top-to-bottom to modify object
- * configuration.
- *
- * XXX error conditions and handling.
- */
- int (*coo_conf_set)(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf);
- /**
- * Glimpse ast. Executed when glimpse ast arrives for a lock on this
- * object. Layers are supposed to fill parts of \a lvb that will be
- * shipped to the glimpse originator as a glimpse result.
- *
- * \see vvp_object_glimpse(), lovsub_object_glimpse(),
- * \see osc_object_glimpse()
- */
- int (*coo_glimpse)(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb);
- /**
- * Object prune method. Called when the layout is going to change on
- * this object, therefore each layer has to clean up their cache,
- * mainly pages and locks.
- */
- int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
- /**
- * Object getstripe method.
- */
- int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
- struct lov_user_md __user *lum);
- /**
- * Get FIEMAP mapping from the object.
- */
- int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj,
- struct ll_fiemap_info_key *fmkey,
- struct fiemap *fiemap, size_t *buflen);
- /**
- * Get layout and generation of the object.
- */
- int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj,
- struct cl_layout *layout);
- /**
- * Get maximum size of the object.
- */
- loff_t (*coo_maxbytes)(struct cl_object *obj);
- /**
- * Set request attributes.
- */
- void (*coo_req_attr_set)(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_req_attr *attr);
-};
-
-/**
- * Extended header for client object.
- */
-struct cl_object_header {
- /** Standard lu_object_header. cl_object::co_lu::lo_header points
- * here.
- */
- struct lu_object_header coh_lu;
-
- /**
- * Parent object. It is assumed that an object has a well-defined
- * parent, but not a well-defined child (there may be multiple
- * sub-objects, for the same top-object). cl_object_header::coh_parent
- * field allows certain code to be written generically, without
- * limiting possible cl_object layouts unduly.
- */
- struct cl_object_header *coh_parent;
- /**
- * Protects consistency between cl_attr of parent object and
- * attributes of sub-objects, that the former is calculated ("merged")
- * from.
- *
- * \todo XXX this can be read/write lock if needed.
- */
- spinlock_t coh_attr_guard;
- /**
- * Size of cl_page + page slices
- */
- unsigned short coh_page_bufsize;
- /**
- * Number of objects above this one: 0 for a top-object, 1 for its
- * sub-object, etc.
- */
- unsigned char coh_nesting;
-};
-
-/**
- * Helper macro: iterate over all layers of the object \a obj, assigning every
- * layer top-to-bottom to \a slice.
- */
-#define cl_object_for_each(slice, obj) \
- list_for_each_entry((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
-/**
- * Helper macro: iterate over all layers of the object \a obj, assigning every
- * layer bottom-to-top to \a slice.
- */
-#define cl_object_for_each_reverse(slice, obj) \
- list_for_each_entry_reverse((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
-/** @} cl_object */
-
-#define CL_PAGE_EOF ((pgoff_t)~0ull)
-
-/** \addtogroup cl_page cl_page
- * @{
- */
-
-/** \struct cl_page
- * Layered client page.
- *
- * cl_page: represents a portion of a file, cached in the memory. All pages
- * of the given file are of the same size, and are kept in the radix tree
- * hanging off the cl_object. cl_page doesn't fan out, but as sub-objects
- * of the top-level file object are first class cl_objects, they have their
- * own radix trees of pages and hence page is implemented as a sequence of
- * struct cl_pages's, linked into double-linked list through
- * cl_page::cp_parent and cl_page::cp_child pointers, each residing in the
- * corresponding radix tree at the corresponding logical offset.
- *
- * cl_page is associated with VM page of the hosting environment (struct
- * page in Linux kernel, for example), struct page. It is assumed, that this
- * association is implemented by one of cl_page layers (top layer in the
- * current design) that
- *
- * - intercepts per-VM-page call-backs made by the environment (e.g.,
- * memory pressure),
- *
- * - translates state (page flag bits) and locking between lustre and
- * environment.
- *
- * The association between cl_page and struct page is immutable and
- * established when cl_page is created.
- *
- * cl_page can be "owned" by a particular cl_io (see below), guaranteeing
- * this io an exclusive access to this page w.r.t. other io attempts and
- * various events changing page state (such as transfer completion, or
- * eviction of the page from the memory). Note, that in general cl_io
- * cannot be identified with a particular thread, and page ownership is not
- * exactly equal to the current thread holding a lock on the page. Layer
- * implementing association between cl_page and struct page has to implement
- * ownership on top of available synchronization mechanisms.
- *
- * While lustre client maintains the notion of an page ownership by io,
- * hosting MM/VM usually has its own page concurrency control
- * mechanisms. For example, in Linux, page access is synchronized by the
- * per-page PG_locked bit-lock, and generic kernel code (generic_file_*())
- * takes care to acquire and release such locks as necessary around the
- * calls to the file system methods (->readpage(), ->prepare_write(),
- * ->commit_write(), etc.). This leads to the situation when there are two
- * different ways to own a page in the client:
- *
- * - client code explicitly and voluntary owns the page (cl_page_own());
- *
- * - VM locks a page and then calls the client, that has "to assume"
- * the ownership from the VM (cl_page_assume()).
- *
- * Dual methods to release ownership are cl_page_disown() and
- * cl_page_unassume().
- *
- * cl_page is reference counted (cl_page::cp_ref). When reference counter
- * drops to 0, the page is returned to the cache, unless it is in
- * cl_page_state::CPS_FREEING state, in which case it is immediately
- * destroyed.
- *
- * The general logic guaranteeing the absence of "existential races" for
- * pages is the following:
- *
- * - there are fixed known ways for a thread to obtain a new reference
- * to a page:
- *
- * - by doing a lookup in the cl_object radix tree, protected by the
- * spin-lock;
- *
- * - by starting from VM-locked struct page and following some
- * hosting environment method (e.g., following ->private pointer in
- * the case of Linux kernel), see cl_vmpage_page();
- *
- * - when the page enters cl_page_state::CPS_FREEING state, all these
- * ways are severed with the proper synchronization
- * (cl_page_delete());
- *
- * - entry into cl_page_state::CPS_FREEING is serialized by the VM page
- * lock;
- *
- * - no new references to the page in cl_page_state::CPS_FREEING state
- * are allowed (checked in cl_page_get()).
- *
- * Together this guarantees that when last reference to a
- * cl_page_state::CPS_FREEING page is released, it is safe to destroy the
- * page, as neither references to it can be acquired at that point, nor
- * ones exist.
- *
- * cl_page is a state machine. States are enumerated in enum
- * cl_page_state. Possible state transitions are enumerated in
- * cl_page_state_set(). State transition process (i.e., actual changing of
- * cl_page::cp_state field) is protected by the lock on the underlying VM
- * page.
- *
- * Linux Kernel implementation.
- *
- * Binding between cl_page and struct page (which is a typedef for
- * struct page) is implemented in the vvp layer. cl_page is attached to the
- * ->private pointer of the struct page, together with the setting of
- * PG_private bit in page->flags, and acquiring additional reference on the
- * struct page (much like struct buffer_head, or any similar file system
- * private data structures).
- *
- * PG_locked lock is used to implement both ownership and transfer
- * synchronization, that is, page is VM-locked in CPS_{OWNED,PAGE{IN,OUT}}
- * states. No additional references are acquired for the duration of the
- * transfer.
- *
- * \warning *THIS IS NOT* the behavior expected by the Linux kernel, where
- * write-out is "protected" by the special PG_writeback bit.
- */
-
-/**
- * States of cl_page. cl_page.c assumes particular order here.
- *
- * The page state machine is rather crude, as it doesn't recognize finer page
- * states like "dirty" or "up to date". This is because such states are not
- * always well defined for the whole stack (see, for example, the
- * implementation of the read-ahead, that hides page up-to-dateness to track
- * cache hits accurately). Such sub-states are maintained by the layers that
- * are interested in them.
- */
-enum cl_page_state {
- /**
- * Page is in the cache, un-owned. Page leaves cached state in the
- * following cases:
- *
- * - [cl_page_state::CPS_OWNED] io comes across the page and
- * owns it;
- *
- * - [cl_page_state::CPS_PAGEOUT] page is dirty, the
- * req-formation engine decides that it wants to include this page
- * into an RPC being constructed, and yanks it from the cache;
- *
- * - [cl_page_state::CPS_FREEING] VM callback is executed to
- * evict the page form the memory;
- *
- * \invariant cl_page::cp_owner == NULL && cl_page::cp_req == NULL
- */
- CPS_CACHED,
- /**
- * Page is exclusively owned by some cl_io. Page may end up in this
- * state as a result of
- *
- * - io creating new page and immediately owning it;
- *
- * - [cl_page_state::CPS_CACHED] io finding existing cached page
- * and owning it;
- *
- * - [cl_page_state::CPS_OWNED] io finding existing owned page
- * and waiting for owner to release the page;
- *
- * Page leaves owned state in the following cases:
- *
- * - [cl_page_state::CPS_CACHED] io decides to leave the page in
- * the cache, doing nothing;
- *
- * - [cl_page_state::CPS_PAGEIN] io starts read transfer for
- * this page;
- *
- * - [cl_page_state::CPS_PAGEOUT] io starts immediate write
- * transfer for this page;
- *
- * - [cl_page_state::CPS_FREEING] io decides to destroy this
- * page (e.g., as part of truncate or extent lock cancellation).
- *
- * \invariant cl_page::cp_owner != NULL && cl_page::cp_req == NULL
- */
- CPS_OWNED,
- /**
- * Page is being written out, as a part of a transfer. This state is
- * entered when req-formation logic decided that it wants this page to
- * be sent through the wire _now_. Specifically, it means that once
- * this state is achieved, transfer completion handler (with either
- * success or failure indication) is guaranteed to be executed against
- * this page independently of any locks and any scheduling decisions
- * made by the hosting environment (that effectively means that the
- * page is never put into cl_page_state::CPS_PAGEOUT state "in
- * advance". This property is mentioned, because it is important when
- * reasoning about possible dead-locks in the system). The page can
- * enter this state as a result of
- *
- * - [cl_page_state::CPS_OWNED] an io requesting an immediate
- * write-out of this page, or
- *
- * - [cl_page_state::CPS_CACHED] req-forming engine deciding
- * that it has enough dirty pages cached to issue a "good"
- * transfer.
- *
- * The page leaves cl_page_state::CPS_PAGEOUT state when the transfer
- * is completed---it is moved into cl_page_state::CPS_CACHED state.
- *
- * Underlying VM page is locked for the duration of transfer.
- *
- * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
- */
- CPS_PAGEOUT,
- /**
- * Page is being read in, as a part of a transfer. This is quite
- * similar to the cl_page_state::CPS_PAGEOUT state, except that
- * read-in is always "immediate"---there is no such thing a sudden
- * construction of read request from cached, presumably not up to date,
- * pages.
- *
- * Underlying VM page is locked for the duration of transfer.
- *
- * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
- */
- CPS_PAGEIN,
- /**
- * Page is being destroyed. This state is entered when client decides
- * that page has to be deleted from its host object, as, e.g., a part
- * of truncate.
- *
- * Once this state is reached, there is no way to escape it.
- *
- * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req == NULL
- */
- CPS_FREEING,
- CPS_NR
-};
-
-enum cl_page_type {
- /** Host page, the page is from the host inode which the cl_page
- * belongs to.
- */
- CPT_CACHEABLE = 1,
-
- /** Transient page, the transient cl_page is used to bind a cl_page
- * to vmpage which is not belonging to the same object of cl_page.
- * it is used in DirectIO and lockless IO.
- */
- CPT_TRANSIENT,
-};
-
-/**
- * Fields are protected by the lock on struct page, except for atomics and
- * immutables.
- *
- * \invariant Data type invariants are in cl_page_invariant(). Basically:
- * cl_page::cp_parent and cl_page::cp_child are a well-formed double-linked
- * list, consistent with the parent/child pointers in the cl_page::cp_obj and
- * cl_page::cp_owner (when set).
- */
-struct cl_page {
- /** Reference counter. */
- atomic_t cp_ref;
- /** An object this page is a part of. Immutable after creation. */
- struct cl_object *cp_obj;
- /** vmpage */
- struct page *cp_vmpage;
- /** Linkage of pages within group. Pages must be owned */
- struct list_head cp_batch;
- /** List of slices. Immutable after creation. */
- struct list_head cp_layers;
- /**
- * Page state. This field is const to avoid accidental update, it is
- * modified only internally within cl_page.c. Protected by a VM lock.
- */
- const enum cl_page_state cp_state;
- /**
- * Page type. Only CPT_TRANSIENT is used so far. Immutable after
- * creation.
- */
- enum cl_page_type cp_type;
-
- /**
- * Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned
- * by sub-io. Protected by a VM lock.
- */
- struct cl_io *cp_owner;
- /** List of references to this page, for debugging. */
- struct lu_ref cp_reference;
- /** Link to an object, for debugging. */
- struct lu_ref_link cp_obj_ref;
- /** Link to a queue, for debugging. */
- struct lu_ref_link cp_queue_ref;
- /** Assigned if doing a sync_io */
- struct cl_sync_io *cp_sync_io;
-};
-
-/**
- * Per-layer part of cl_page.
- *
- * \see vvp_page, lov_page, osc_page
- */
-struct cl_page_slice {
- struct cl_page *cpl_page;
- pgoff_t cpl_index;
- /**
- * Object slice corresponding to this page slice. Immutable after
- * creation.
- */
- struct cl_object *cpl_obj;
- const struct cl_page_operations *cpl_ops;
- /** Linkage into cl_page::cp_layers. Immutable after creation. */
- struct list_head cpl_linkage;
-};
-
-/**
- * Lock mode. For the client extent locks.
- *
- * \ingroup cl_lock
- */
-enum cl_lock_mode {
- CLM_READ,
- CLM_WRITE,
- CLM_GROUP
-};
-
-/**
- * Requested transfer type.
- */
-enum cl_req_type {
- CRT_READ,
- CRT_WRITE,
- CRT_NR
-};
-
-/**
- * Per-layer page operations.
- *
- * Methods taking an \a io argument are for the activity happening in the
- * context of given \a io. Page is assumed to be owned by that io, except for
- * the obvious cases (like cl_page_operations::cpo_own()).
- *
- * \see vvp_page_ops, lov_page_ops, osc_page_ops
- */
-struct cl_page_operations {
- /**
- * cl_page<->struct page methods. Only one layer in the stack has to
- * implement these. Current code assumes that this functionality is
- * provided by the topmost layer, see cl_page_disown0() as an example.
- */
-
- /**
- * Called when \a io acquires this page into the exclusive
- * ownership. When this method returns, it is guaranteed that the is
- * not owned by other io, and no transfer is going on against
- * it. Optional.
- *
- * \see cl_page_own()
- * \see vvp_page_own(), lov_page_own()
- */
- int (*cpo_own)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, int nonblock);
- /** Called when ownership it yielded. Optional.
- *
- * \see cl_page_disown()
- * \see vvp_page_disown()
- */
- void (*cpo_disown)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /**
- * Called for a page that is already "owned" by \a io from VM point of
- * view. Optional.
- *
- * \see cl_page_assume()
- * \see vvp_page_assume(), lov_page_assume()
- */
- void (*cpo_assume)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /** Dual to cl_page_operations::cpo_assume(). Optional. Called
- * bottom-to-top when IO releases a page without actually unlocking
- * it.
- *
- * \see cl_page_unassume()
- * \see vvp_page_unassume()
- */
- void (*cpo_unassume)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- /**
- * Announces whether the page contains valid data or not by \a uptodate.
- *
- * \see cl_page_export()
- * \see vvp_page_export()
- */
- void (*cpo_export)(const struct lu_env *env,
- const struct cl_page_slice *slice, int uptodate);
- /**
- * Checks whether underlying VM page is locked (in the suitable
- * sense). Used for assertions.
- *
- * \retval -EBUSY: page is protected by a lock of a given mode;
- * \retval -ENODATA: page is not protected by a lock;
- * \retval 0: this layer cannot decide. (Should never happen.)
- */
- int (*cpo_is_vmlocked)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /**
- * Page destruction.
- */
-
- /**
- * Called when page is truncated from the object. Optional.
- *
- * \see cl_page_discard()
- * \see vvp_page_discard(), osc_page_discard()
- */
- void (*cpo_discard)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- /**
- * Called when page is removed from the cache, and is about to being
- * destroyed. Optional.
- *
- * \see cl_page_delete()
- * \see vvp_page_delete(), osc_page_delete()
- */
- void (*cpo_delete)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /** Destructor. Frees resources and slice itself. */
- void (*cpo_fini)(const struct lu_env *env,
- struct cl_page_slice *slice);
- /**
- * Optional debugging helper. Prints given page slice.
- *
- * \see cl_page_print()
- */
- int (*cpo_print)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t p);
- /**
- * \name transfer
- *
- * Transfer methods.
- *
- * @{
- */
- /**
- * Request type dependent vector of operations.
- *
- * Transfer operations depend on transfer mode (cl_req_type). To avoid
- * passing transfer mode to each and every of these methods, and to
- * avoid branching on request type inside of the methods, separate
- * methods for cl_req_type:CRT_READ and cl_req_type:CRT_WRITE are
- * provided. That is, method invocation usually looks like
- *
- * slice->cp_ops.io[req->crq_type].cpo_method(env, slice, ...);
- */
- struct {
- /**
- * Called when a page is submitted for a transfer as a part of
- * cl_page_list.
- *
- * \return 0 : page is eligible for submission;
- * \return -EALREADY : skip this page;
- * \return -ve : error.
- *
- * \see cl_page_prep()
- */
- int (*cpo_prep)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- /**
- * Completion handler. This is guaranteed to be eventually
- * fired after cl_page_operations::cpo_prep() or
- * cl_page_operations::cpo_make_ready() call.
- *
- * This method can be called in a non-blocking context. It is
- * guaranteed however, that the page involved and its object
- * are pinned in memory (and, hence, calling cl_page_put() is
- * safe).
- *
- * \see cl_page_completion()
- */
- void (*cpo_completion)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret);
- /**
- * Called when cached page is about to be added to the
- * ptlrpc request as a part of req formation.
- *
- * \return 0 : proceed with this page;
- * \return -EAGAIN : skip this page;
- * \return -ve : error.
- *
- * \see cl_page_make_ready()
- */
- int (*cpo_make_ready)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- } io[CRT_NR];
- /**
- * Tell transfer engine that only [to, from] part of a page should be
- * transmitted.
- *
- * This is used for immediate transfers.
- *
- * \todo XXX this is not very good interface. It would be much better
- * if all transfer parameters were supplied as arguments to
- * cl_io_operations::cio_submit() call, but it is not clear how to do
- * this for page queues.
- *
- * \see cl_page_clip()
- */
- void (*cpo_clip)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int from, int to);
- /**
- * \pre the page was queued for transferring.
- * \post page is removed from client's pending list, or -EBUSY
- * is returned if it has already been in transferring.
- *
- * This is one of seldom page operation which is:
- * 0. called from top level;
- * 1. don't have vmpage locked;
- * 2. every layer should synchronize execution of its ->cpo_cancel()
- * with completion handlers. Osc uses client obd lock for this
- * purpose. Based on there is no vvp_page_cancel and
- * lov_page_cancel(), cpo_cancel is defacto protected by client lock.
- *
- * \see osc_page_cancel().
- */
- int (*cpo_cancel)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /**
- * Write out a page by kernel. This is only called by ll_writepage
- * right now.
- *
- * \see cl_page_flush()
- */
- int (*cpo_flush)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- /** @} transfer */
-};
-
-/**
- * Helper macro, dumping detailed information about \a page into a log.
- */
-#define CL_PAGE_DEBUG(mask, env, page, format, ...) \
-do { \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
- CDEBUG(mask, format, ## __VA_ARGS__); \
- } \
-} while (0)
-
-/**
- * Helper macro, dumping shorter information about \a page into a log.
- */
-#define CL_PAGE_HEADER(mask, env, page, format, ...) \
-do { \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
- CDEBUG(mask, format, ## __VA_ARGS__); \
- } \
-} while (0)
-
-static inline struct page *cl_page_vmpage(struct cl_page *page)
-{
- LASSERT(page->cp_vmpage);
- return page->cp_vmpage;
-}
-
-/**
- * Check if a cl_page is in use.
- *
- * Client cache holds a refcount, this refcount will be dropped when
- * the page is taken out of cache, see vvp_page_delete().
- */
-static inline bool __page_in_use(const struct cl_page *page, int refc)
-{
- return (atomic_read(&page->cp_ref) > refc + 1);
-}
-
-/**
- * Caller itself holds a refcount of cl_page.
- */
-#define cl_page_in_use(pg) __page_in_use(pg, 1)
-/**
- * Caller doesn't hold a refcount.
- */
-#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
-
-/** @} cl_page */
-
-/** \addtogroup cl_lock cl_lock
- * @{
- */
-/** \struct cl_lock
- *
- * Extent locking on the client.
- *
- * LAYERING
- *
- * The locking model of the new client code is built around
- *
- * struct cl_lock
- *
- * data-type representing an extent lock on a regular file. cl_lock is a
- * layered object (much like cl_object and cl_page), it consists of a header
- * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
- * cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
- *
- * Typical cl_lock consists of the two layers:
- *
- * - vvp_lock (vvp specific data), and
- * - lov_lock (lov specific data).
- *
- * lov_lock contains an array of sub-locks. Each of these sub-locks is a
- * normal cl_lock: it has a header (struct cl_lock) and a list of layers:
- *
- * - lovsub_lock, and
- * - osc_lock
- *
- * Each sub-lock is associated with a cl_object (representing stripe
- * sub-object or the file to which top-level cl_lock is associated to), and is
- * linked into that cl_object::coh_locks. In this respect cl_lock is similar to
- * cl_object (that at lov layer also fans out into multiple sub-objects), and
- * is different from cl_page, that doesn't fan out (there is usually exactly
- * one osc_page for every vvp_page). We shall call vvp-lov portion of the lock
- * a "top-lock" and its lovsub-osc portion a "sub-lock".
- *
- * LIFE CYCLE
- *
- * cl_lock is a cacheless data container for the requirements of locks to
- * complete the IO. cl_lock is created before I/O starts and destroyed when the
- * I/O is complete.
- *
- * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached
- * to cl_lock at OSC layer. LDLM lock is still cacheable.
- *
- * INTERFACE AND USAGE
- *
- * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A
- * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue()
- * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock
- * consists of multiple sub cl_locks, each sub locks will be enqueued
- * correspondingly. At OSC layer, the lock enqueue request will tend to reuse
- * cached LDLM lock; otherwise a new LDLM lock will have to be requested from
- * OST side.
- *
- * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel()
- * method will be called for each layer to release the resource held by this
- * lock. At OSC layer, the reference count of LDLM lock, which is held at
- * clo_enqueue time, is released.
- *
- * LDLM lock can only be canceled if there is no cl_lock using it.
- *
- * Overall process of the locking during IO operation is as following:
- *
- * - once parameters for IO are setup in cl_io, cl_io_operations::cio_lock()
- * is called on each layer. Responsibility of this method is to add locks,
- * needed by a given layer into cl_io.ci_lockset.
- *
- * - once locks for all layers were collected, they are sorted to avoid
- * dead-locks (cl_io_locks_sort()), and enqueued.
- *
- * - when all locks are acquired, IO is performed;
- *
- * - locks are released after IO is complete.
- *
- * Striping introduces major additional complexity into locking. The
- * fundamental problem is that it is generally unsafe to actively use (hold)
- * two locks on the different OST servers at the same time, as this introduces
- * inter-server dependency and can lead to cascading evictions.
- *
- * Basic solution is to sub-divide large read/write IOs into smaller pieces so
- * that no multi-stripe locks are taken (note that this design abandons POSIX
- * read/write semantics). Such pieces ideally can be executed concurrently. At
- * the same time, certain types of IO cannot be sub-divived, without
- * sacrificing correctness. This includes:
- *
- * - O_APPEND write, where [0, EOF] lock has to be taken, to guarantee
- * atomicity;
- *
- * - ftruncate(fd, offset), where [offset, EOF] lock has to be taken.
- *
- * Also, in the case of read(fd, buf, count) or write(fd, buf, count), where
- * buf is a part of memory mapped Lustre file, a lock or locks protecting buf
- * has to be held together with the usual lock on [offset, offset + count].
- *
- * Interaction with DLM
- *
- * In the expected setup, cl_lock is ultimately backed up by a collection of
- * DLM locks (struct ldlm_lock). Association between cl_lock and DLM lock is
- * implemented in osc layer, that also matches DLM events (ASTs, cancellation,
- * etc.) into cl_lock_operation calls. See struct osc_lock for a more detailed
- * description of interaction with DLM.
- */
-
-/**
- * Lock description.
- */
-struct cl_lock_descr {
- /** Object this lock is granted for. */
- struct cl_object *cld_obj;
- /** Index of the first page protected by this lock. */
- pgoff_t cld_start;
- /** Index of the last page (inclusive) protected by this lock. */
- pgoff_t cld_end;
- /** Group ID, for group lock */
- __u64 cld_gid;
- /** Lock mode. */
- enum cl_lock_mode cld_mode;
- /**
- * flags to enqueue lock. A combination of bit-flags from
- * enum cl_enq_flags.
- */
- __u32 cld_enq_flags;
-};
-
-#define DDESCR "%s(%d):[%lu, %lu]:%x"
-#define PDESCR(descr) \
- cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \
- (descr)->cld_start, (descr)->cld_end, (descr)->cld_enq_flags
-
-const char *cl_lock_mode_name(const enum cl_lock_mode mode);
-
-/**
- * Layered client lock.
- */
-struct cl_lock {
- /** List of slices. Immutable after creation. */
- struct list_head cll_layers;
- /** lock attribute, extent, cl_object, etc. */
- struct cl_lock_descr cll_descr;
-};
-
-/**
- * Per-layer part of cl_lock
- *
- * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
- */
-struct cl_lock_slice {
- struct cl_lock *cls_lock;
- /** Object slice corresponding to this lock slice. Immutable after
- * creation.
- */
- struct cl_object *cls_obj;
- const struct cl_lock_operations *cls_ops;
- /** Linkage into cl_lock::cll_layers. Immutable after creation. */
- struct list_head cls_linkage;
-};
-
-/**
- *
- * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
- */
-struct cl_lock_operations {
- /** @{ */
- /**
- * Attempts to enqueue the lock. Called top-to-bottom.
- *
- * \retval 0 this layer has enqueued the lock successfully
- * \retval >0 this layer has enqueued the lock, but need to wait on
- * @anchor for resources
- * \retval -ve failure
- *
- * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
- * \see osc_lock_enqueue()
- */
- int (*clo_enqueue)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *io, struct cl_sync_io *anchor);
- /**
- * Cancel a lock, release its DLM lock ref, while does not cancel the
- * DLM lock
- */
- void (*clo_cancel)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /** @} */
- /**
- * Destructor. Frees resources and the slice.
- *
- * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
- * \see osc_lock_fini()
- */
- void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
- /**
- * Optional debugging helper. Prints given lock slice.
- */
- int (*clo_print)(const struct lu_env *env,
- void *cookie, lu_printer_t p,
- const struct cl_lock_slice *slice);
-};
-
-#define CL_LOCK_DEBUG(mask, env, lock, format, ...) \
-do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \
- CDEBUG(mask, format, ## __VA_ARGS__); \
- } \
-} while (0)
-
-#define CL_LOCK_ASSERT(expr, env, lock) do { \
- if (likely(expr)) \
- break; \
- \
- CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \
- LBUG(); \
-} while (0)
-
-/** @} cl_lock */
-
-/** \addtogroup cl_page_list cl_page_list
- * Page list used to perform collective operations on a group of pages.
- *
- * Pages are added to the list one by one. cl_page_list acquires a reference
- * for every page in it. Page list is used to perform collective operations on
- * pages:
- *
- * - submit pages for an immediate transfer,
- *
- * - own pages on behalf of certain io (waiting for each page in turn),
- *
- * - discard pages.
- *
- * When list is finalized, it releases references on all pages it still has.
- *
- * \todo XXX concurrency control.
- *
- * @{
- */
-struct cl_page_list {
- unsigned int pl_nr;
- struct list_head pl_pages;
- struct task_struct *pl_owner;
-};
-
-/**
- * A 2-queue of pages. A convenience data-type for common use case, 2-queue
- * contains an incoming page list and an outgoing page list.
- */
-struct cl_2queue {
- struct cl_page_list c2_qin;
- struct cl_page_list c2_qout;
-};
-
-/** @} cl_page_list */
-
-/** \addtogroup cl_io cl_io
- * @{
- */
-/** \struct cl_io
- * I/O
- *
- * cl_io represents a high level I/O activity like
- * read(2)/write(2)/truncate(2) system call, or cancellation of an extent
- * lock.
- *
- * cl_io is a layered object, much like cl_{object,page,lock} but with one
- * important distinction. We want to minimize number of calls to the allocator
- * in the fast path, e.g., in the case of read(2) when everything is cached:
- * client already owns the lock over region being read, and data are cached
- * due to read-ahead. To avoid allocation of cl_io layers in such situations,
- * per-layer io state is stored in the session, associated with the io, see
- * struct {vvp,lov,osc}_io for example. Sessions allocation is amortized
- * by using free-lists, see cl_env_get().
- *
- * There is a small predefined number of possible io types, enumerated in enum
- * cl_io_type.
- *
- * cl_io is a state machine, that can be advanced concurrently by the multiple
- * threads. It is up to these threads to control the concurrency and,
- * specifically, to detect when io is done, and its state can be safely
- * released.
- *
- * For read/write io overall execution plan is as following:
- *
- * (0) initialize io state through all layers;
- *
- * (1) loop: prepare chunk of work to do
- *
- * (2) call all layers to collect locks they need to process current chunk
- *
- * (3) sort all locks to avoid dead-locks, and acquire them
- *
- * (4) process the chunk: call per-page methods
- * cl_io_operations::cio_prepare_write(),
- * cl_io_operations::cio_commit_write() for write)
- *
- * (5) release locks
- *
- * (6) repeat loop.
- *
- * To implement the "parallel IO mode", lov layer creates sub-io's (lazily to
- * address allocation efficiency issues mentioned above), and returns with the
- * special error condition from per-page method when current sub-io has to
- * block. This causes io loop to be repeated, and lov switches to the next
- * sub-io in its cl_io_operations::cio_iter_init() implementation.
- */
-
-/** IO types */
-enum cl_io_type {
- /** read system call */
- CIT_READ = 1,
- /** write system call */
- CIT_WRITE,
- /** truncate, utime system calls */
- CIT_SETATTR,
- /** get data version */
- CIT_DATA_VERSION,
- /**
- * page fault handling
- */
- CIT_FAULT,
- /**
- * fsync system call handling
- * To write out a range of file
- */
- CIT_FSYNC,
- /**
- * Miscellaneous io. This is used for occasional io activity that
- * doesn't fit into other types. Currently this is used for:
- *
- * - cancellation of an extent lock. This io exists as a context
- * to write dirty pages from under the lock being canceled back
- * to the server;
- *
- * - VM induced page write-out. An io context for writing page out
- * for memory cleansing;
- *
- * - glimpse. An io context to acquire glimpse lock.
- *
- * - grouplock. An io context to acquire group lock.
- *
- * CIT_MISC io is used simply as a context in which locks and pages
- * are manipulated. Such io has no internal "process", that is,
- * cl_io_loop() is never called for it.
- */
- CIT_MISC,
- CIT_OP_NR
-};
-
-/**
- * States of cl_io state machine
- */
-enum cl_io_state {
- /** Not initialized. */
- CIS_ZERO,
- /** Initialized. */
- CIS_INIT,
- /** IO iteration started. */
- CIS_IT_STARTED,
- /** Locks taken. */
- CIS_LOCKED,
- /** Actual IO is in progress. */
- CIS_IO_GOING,
- /** IO for the current iteration finished. */
- CIS_IO_FINISHED,
- /** Locks released. */
- CIS_UNLOCKED,
- /** Iteration completed. */
- CIS_IT_ENDED,
- /** cl_io finalized. */
- CIS_FINI
-};
-
-/**
- * IO state private for a layer.
- *
- * This is usually embedded into layer session data, rather than allocated
- * dynamically.
- *
- * \see vvp_io, lov_io, osc_io
- */
-struct cl_io_slice {
- struct cl_io *cis_io;
- /** corresponding object slice. Immutable after creation. */
- struct cl_object *cis_obj;
- /** io operations. Immutable after creation. */
- const struct cl_io_operations *cis_iop;
- /**
- * linkage into a list of all slices for a given cl_io, hanging off
- * cl_io::ci_layers. Immutable after creation.
- */
- struct list_head cis_linkage;
-};
-
-typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
- struct cl_page *);
-
-struct cl_read_ahead {
- /*
- * Maximum page index the readahead window will end.
- * This is determined DLM lock coverage, RPC and stripe boundary.
- * cra_end is included.
- */
- pgoff_t cra_end;
- /* optimal RPC size for this read, by pages */
- unsigned long cra_rpc_size;
- /*
- * Release callback. If readahead holds resources underneath, this
- * function should be called to release it.
- */
- void (*cra_release)(const struct lu_env *env, void *cbdata);
- /* Callback data for cra_release routine */
- void *cra_cbdata;
-};
-
-static inline void cl_read_ahead_release(const struct lu_env *env,
- struct cl_read_ahead *ra)
-{
- if (ra->cra_release)
- ra->cra_release(env, ra->cra_cbdata);
- memset(ra, 0, sizeof(*ra));
-}
-
-/**
- * Per-layer io operations.
- * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
- */
-struct cl_io_operations {
- /**
- * Vector of io state transition methods for every io type.
- *
- * \see cl_page_operations::io
- */
- struct {
- /**
- * Prepare io iteration at a given layer.
- *
- * Called top-to-bottom at the beginning of each iteration of
- * "io loop" (if it makes sense for this type of io). Here
- * layer selects what work it will do during this iteration.
- *
- * \see cl_io_operations::cio_iter_fini()
- */
- int (*cio_iter_init)(const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Finalize io iteration.
- *
- * Called bottom-to-top at the end of each iteration of "io
- * loop". Here layers can decide whether IO has to be
- * continued.
- *
- * \see cl_io_operations::cio_iter_init()
- */
- void (*cio_iter_fini)(const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Collect locks for the current iteration of io.
- *
- * Called top-to-bottom to collect all locks necessary for
- * this iteration. This methods shouldn't actually enqueue
- * anything, instead it should post a lock through
- * cl_io_lock_add(). Once all locks are collected, they are
- * sorted and enqueued in the proper order.
- */
- int (*cio_lock)(const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Finalize unlocking.
- *
- * Called bottom-to-top to finish layer specific unlocking
- * functionality, after generic code released all locks
- * acquired by cl_io_operations::cio_lock().
- */
- void (*cio_unlock)(const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Start io iteration.
- *
- * Once all locks are acquired, called top-to-bottom to
- * commence actual IO. In the current implementation,
- * top-level vvp_io_{read,write}_start() does all the work
- * synchronously by calling generic_file_*(), so other layers
- * are called when everything is done.
- */
- int (*cio_start)(const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Called top-to-bottom at the end of io loop. Here layer
- * might wait for an unfinished asynchronous io.
- */
- void (*cio_end)(const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Called bottom-to-top to notify layers that read/write IO
- * iteration finished, with \a nob bytes transferred.
- */
- void (*cio_advance)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- size_t nob);
- /**
- * Called once per io, bottom-to-top to release io resources.
- */
- void (*cio_fini)(const struct lu_env *env,
- const struct cl_io_slice *slice);
- } op[CIT_OP_NR];
-
- /**
- * Submit pages from \a queue->c2_qin for IO, and move
- * successfully submitted pages into \a queue->c2_qout. Return
- * non-zero if failed to submit even the single page. If
- * submission failed after some pages were moved into \a
- * queue->c2_qout, completion callback with non-zero ioret is
- * executed on them.
- */
- int (*cio_submit)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- enum cl_req_type crt,
- struct cl_2queue *queue);
- /**
- * Queue async page for write.
- * The difference between cio_submit and cio_queue is that
- * cio_submit is for urgent request.
- */
- int (*cio_commit_async)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- struct cl_page_list *queue, int from, int to,
- cl_commit_cbt cb);
- /**
- * Decide maximum read ahead extent
- *
- * \pre io->ci_type == CIT_READ
- */
- int (*cio_read_ahead)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- pgoff_t start, struct cl_read_ahead *ra);
- /**
- * Optional debugging helper. Print given io slice.
- */
- int (*cio_print)(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_io_slice *slice);
-};
-
-/**
- * Flags to lock enqueue procedure.
- * \ingroup cl_lock
- */
-enum cl_enq_flags {
- /**
- * instruct server to not block, if conflicting lock is found. Instead
- * -EWOULDBLOCK is returned immediately.
- */
- CEF_NONBLOCK = 0x00000001,
- /**
- * take lock asynchronously (out of order), as it cannot
- * deadlock. This is for LDLM_FL_HAS_INTENT locks used for glimpsing.
- */
- CEF_ASYNC = 0x00000002,
- /**
- * tell the server to instruct (though a flag in the blocking ast) an
- * owner of the conflicting lock, that it can drop dirty pages
- * protected by this lock, without sending them to the server.
- */
- CEF_DISCARD_DATA = 0x00000004,
- /**
- * tell the sub layers that it must be a `real' lock. This is used for
- * mmapped-buffer locks and glimpse locks that must be never converted
- * into lockless mode.
- *
- * \see vvp_mmap_locks(), cl_glimpse_lock().
- */
- CEF_MUST = 0x00000008,
- /**
- * tell the sub layers that never request a `real' lock. This flag is
- * not used currently.
- *
- * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless
- * conversion policy: ci_lockreq describes generic information of lock
- * requirement for this IO, especially for locks which belong to the
- * object doing IO; however, lock itself may have precise requirements
- * that are described by the enqueue flags.
- */
- CEF_NEVER = 0x00000010,
- /**
- * for async glimpse lock.
- */
- CEF_AGL = 0x00000020,
- /**
- * enqueue a lock to test DLM lock existence.
- */
- CEF_PEEK = 0x00000040,
- /**
- * Lock match only. Used by group lock in I/O as group lock
- * is known to exist.
- */
- CEF_LOCK_MATCH = BIT(7),
- /**
- * mask of enq_flags.
- */
- CEF_MASK = 0x000000ff,
-};
-
-/**
- * Link between lock and io. Intermediate structure is needed, because the
- * same lock can be part of multiple io's simultaneously.
- */
-struct cl_io_lock_link {
- /** linkage into one of cl_lockset lists. */
- struct list_head cill_linkage;
- struct cl_lock cill_lock;
- /** optional destructor */
- void (*cill_fini)(const struct lu_env *env,
- struct cl_io_lock_link *link);
-};
-#define cill_descr cill_lock.cll_descr
-
-/**
- * Lock-set represents a collection of locks, that io needs at a
- * time. Generally speaking, client tries to avoid holding multiple locks when
- * possible, because
- *
- * - holding extent locks over multiple ost's introduces the danger of
- * "cascading timeouts";
- *
- * - holding multiple locks over the same ost is still dead-lock prone,
- * see comment in osc_lock_enqueue(),
- *
- * but there are certain situations where this is unavoidable:
- *
- * - O_APPEND writes have to take [0, EOF] lock for correctness;
- *
- * - truncate has to take [new-size, EOF] lock for correctness;
- *
- * - SNS has to take locks across full stripe for correctness;
- *
- * - in the case when user level buffer, supplied to {read,write}(file0),
- * is a part of a memory mapped lustre file, client has to take a dlm
- * locks on file0, and all files that back up the buffer (or a part of
- * the buffer, that is being processed in the current chunk, in any
- * case, there are situations where at least 2 locks are necessary).
- *
- * In such cases we at least try to take locks in the same consistent
- * order. To this end, all locks are first collected, then sorted, and then
- * enqueued.
- */
-struct cl_lockset {
- /** locks to be acquired. */
- struct list_head cls_todo;
- /** locks acquired. */
- struct list_head cls_done;
-};
-
-/**
- * Lock requirements(demand) for IO. It should be cl_io_lock_req,
- * but 'req' is always to be thought as 'request' :-)
- */
-enum cl_io_lock_dmd {
- /** Always lock data (e.g., O_APPEND). */
- CILR_MANDATORY = 0,
- /** Layers are free to decide between local and global locking. */
- CILR_MAYBE,
- /** Never lock: there is no cache (e.g., lockless IO). */
- CILR_NEVER
-};
-
-enum cl_fsync_mode {
- /** start writeback, do not wait for them to finish */
- CL_FSYNC_NONE = 0,
- /** start writeback and wait for them to finish */
- CL_FSYNC_LOCAL = 1,
- /** discard all of dirty pages in a specific file range */
- CL_FSYNC_DISCARD = 2,
- /** start writeback and make sure they have reached storage before
- * return. OST_SYNC RPC must be issued and finished
- */
- CL_FSYNC_ALL = 3
-};
-
-struct cl_io_rw_common {
- loff_t crw_pos;
- size_t crw_count;
- int crw_nonblock;
-};
-
-/**
- * State for io.
- *
- * cl_io is shared by all threads participating in this IO (in current
- * implementation only one thread advances IO, but parallel IO design and
- * concurrent copy_*_user() require multiple threads acting on the same IO. It
- * is up to these threads to serialize their activities, including updates to
- * mutable cl_io fields.
- */
-struct cl_io {
- /** type of this IO. Immutable after creation. */
- enum cl_io_type ci_type;
- /** current state of cl_io state machine. */
- enum cl_io_state ci_state;
- /** main object this io is against. Immutable after creation. */
- struct cl_object *ci_obj;
- /**
- * Upper layer io, of which this io is a part of. Immutable after
- * creation.
- */
- struct cl_io *ci_parent;
- /** List of slices. Immutable after creation. */
- struct list_head ci_layers;
- /** list of locks (to be) acquired by this io. */
- struct cl_lockset ci_lockset;
- /** lock requirements, this is just a help info for sublayers. */
- enum cl_io_lock_dmd ci_lockreq;
- union {
- struct cl_rd_io {
- struct cl_io_rw_common rd;
- } ci_rd;
- struct cl_wr_io {
- struct cl_io_rw_common wr;
- int wr_append;
- int wr_sync;
- } ci_wr;
- struct cl_io_rw_common ci_rw;
- struct cl_setattr_io {
- struct ost_lvb sa_attr;
- unsigned int sa_attr_flags;
- unsigned int sa_valid;
- int sa_stripe_index;
- const struct lu_fid *sa_parent_fid;
- } ci_setattr;
- struct cl_data_version_io {
- u64 dv_data_version;
- int dv_flags;
- } ci_data_version;
- struct cl_fault_io {
- /** page index within file. */
- pgoff_t ft_index;
- /** bytes valid byte on a faulted page. */
- size_t ft_nob;
- /** writable page? for nopage() only */
- int ft_writable;
- /** page of an executable? */
- int ft_executable;
- /** page_mkwrite() */
- int ft_mkwrite;
- /** resulting page */
- struct cl_page *ft_page;
- } ci_fault;
- struct cl_fsync_io {
- loff_t fi_start;
- loff_t fi_end;
- /** file system level fid */
- struct lu_fid *fi_fid;
- enum cl_fsync_mode fi_mode;
- /* how many pages were written/discarded */
- unsigned int fi_nr_written;
- } ci_fsync;
- } u;
- struct cl_2queue ci_queue;
- size_t ci_nob;
- int ci_result;
- unsigned int ci_continue:1,
- /**
- * This io has held grouplock, to inform sublayers that
- * don't do lockless i/o.
- */
- ci_no_srvlock:1,
- /**
- * The whole IO need to be restarted because layout has been changed
- */
- ci_need_restart:1,
- /**
- * to not refresh layout - the IO issuer knows that the layout won't
- * change(page operations, layout change causes all page to be
- * discarded), or it doesn't matter if it changes(sync).
- */
- ci_ignore_layout:1,
- /**
- * Check if layout changed after the IO finishes. Mainly for HSM
- * requirement. If IO occurs to openning files, it doesn't need to
- * verify layout because HSM won't release openning files.
- * Right now, only two operations need to verify layout: glimpse
- * and setattr.
- */
- ci_verify_layout:1,
- /**
- * file is released, restore has to be triggered by vvp layer
- */
- ci_restore_needed:1,
- /**
- * O_NOATIME
- */
- ci_noatime:1;
- /**
- * Number of pages owned by this IO. For invariant checking.
- */
- unsigned int ci_owned_nr;
-};
-
-/** @} cl_io */
-
-/**
- * Per-transfer attributes.
- */
-struct cl_req_attr {
- enum cl_req_type cra_type;
- u64 cra_flags;
- struct cl_page *cra_page;
-
- /** Generic attributes for the server consumption. */
- struct obdo *cra_oa;
- /** Jobid */
- char cra_jobid[LUSTRE_JOBID_SIZE];
-};
-
-enum cache_stats_item {
- /** how many cache lookups were performed */
- CS_lookup = 0,
- /** how many times cache lookup resulted in a hit */
- CS_hit,
- /** how many entities are in the cache right now */
- CS_total,
- /** how many entities in the cache are actively used (and cannot be
- * evicted) right now
- */
- CS_busy,
- /** how many entities were created at all */
- CS_create,
- CS_NR
-};
-
-#define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
-
-/**
- * Stats for a generic cache (similar to inode, lu_object, etc. caches).
- */
-struct cache_stats {
- const char *cs_name;
- atomic_t cs_stats[CS_NR];
-};
-
-/** These are not exported so far */
-void cache_stats_init(struct cache_stats *cs, const char *name);
-
-/**
- * Client-side site. This represents particular client stack. "Global"
- * variables should (directly or indirectly) be added here to allow multiple
- * clients to co-exist in the single address space.
- */
-struct cl_site {
- struct lu_site cs_lu;
- /**
- * Statistical counters. Atomics do not scale, something better like
- * per-cpu counters is needed.
- *
- * These are exported as /sys/kernel/debug/lustre/llite/.../site
- *
- * When interpreting keep in mind that both sub-locks (and sub-pages)
- * and top-locks (and top-pages) are accounted here.
- */
- struct cache_stats cs_pages;
- atomic_t cs_pages_state[CPS_NR];
-};
-
-int cl_site_init(struct cl_site *s, struct cl_device *top);
-void cl_site_fini(struct cl_site *s);
-void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
-
-/**
- * Output client site statistical counters into a buffer. Suitable for
- * ll_rd_*()-style functions.
- */
-int cl_site_stats_print(const struct cl_site *site, struct seq_file *m);
-
-/**
- * \name helpers
- *
- * Type conversion and accessory functions.
- */
-/** @{ */
-
-static inline struct cl_site *lu2cl_site(const struct lu_site *site)
-{
- return container_of(site, struct cl_site, cs_lu);
-}
-
-static inline int lu_device_is_cl(const struct lu_device *d)
-{
- return d->ld_type->ldt_tags & LU_DEVICE_CL;
-}
-
-static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
-{
- LASSERT(!d || IS_ERR(d) || lu_device_is_cl(d));
- return container_of0(d, struct cl_device, cd_lu_dev);
-}
-
-static inline struct lu_device *cl2lu_dev(struct cl_device *d)
-{
- return &d->cd_lu_dev;
-}
-
-static inline struct cl_object *lu2cl(const struct lu_object *o)
-{
- LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
- return container_of0(o, struct cl_object, co_lu);
-}
-
-static inline const struct cl_object_conf *
-lu2cl_conf(const struct lu_object_conf *conf)
-{
- return container_of0(conf, struct cl_object_conf, coc_lu);
-}
-
-static inline struct cl_object *cl_object_next(const struct cl_object *obj)
-{
- return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
-}
-
-static inline struct cl_device *cl_object_device(const struct cl_object *o)
-{
- LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
- return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev);
-}
-
-static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
-{
- return container_of0(h, struct cl_object_header, coh_lu);
-}
-
-static inline struct cl_site *cl_object_site(const struct cl_object *obj)
-{
- return lu2cl_site(obj->co_lu.lo_dev->ld_site);
-}
-
-static inline
-struct cl_object_header *cl_object_header(const struct cl_object *obj)
-{
- return luh2coh(obj->co_lu.lo_header);
-}
-
-static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t)
-{
- return lu_device_init(&d->cd_lu_dev, t);
-}
-
-static inline void cl_device_fini(struct cl_device *d)
-{
- lu_device_fini(&d->cd_lu_dev);
-}
-
-void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj, pgoff_t index,
- const struct cl_page_operations *ops);
-void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
- struct cl_object *obj,
- const struct cl_lock_operations *ops);
-void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
- struct cl_object *obj, const struct cl_io_operations *ops);
-/** @} helpers */
-
-/** \defgroup cl_object cl_object
- * @{
- */
-struct cl_object *cl_object_top(struct cl_object *o);
-struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
- const struct lu_fid *fid,
- const struct cl_object_conf *c);
-
-int cl_object_header_init(struct cl_object_header *h);
-void cl_object_put(const struct lu_env *env, struct cl_object *o);
-void cl_object_get(struct cl_object *o);
-void cl_object_attr_lock(struct cl_object *o);
-void cl_object_attr_unlock(struct cl_object *o);
-int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr);
-int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned int valid);
-int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
- struct ost_lvb *lvb);
-int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf);
-int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
-void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
-int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
- struct lov_user_md __user *lum);
-int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
- struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap,
- size_t *buflen);
-int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_layout *cl);
-loff_t cl_object_maxbytes(struct cl_object *obj);
-
-/**
- * Returns true, iff \a o0 and \a o1 are slices of the same object.
- */
-static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
-{
- return cl_object_header(o0) == cl_object_header(o1);
-}
-
-static inline void cl_object_page_init(struct cl_object *clob, int size)
-{
- clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
- cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
- WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512);
-}
-
-static inline void *cl_object_page_slice(struct cl_object *clob,
- struct cl_page *page)
-{
- return (void *)((char *)page + clob->co_slice_off);
-}
-
-/**
- * Return refcount of cl_object.
- */
-static inline int cl_object_refc(struct cl_object *clob)
-{
- struct lu_object_header *header = clob->co_lu.lo_header;
-
- return atomic_read(&header->loh_ref);
-}
-
-/** @} cl_object */
-
-/** \defgroup cl_page cl_page
- * @{
- */
-enum {
- CLP_GANG_OKAY = 0,
- CLP_GANG_RESCHED,
- CLP_GANG_AGAIN,
- CLP_GANG_ABORT
-};
-
-/* callback of cl_page_gang_lookup() */
-struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type);
-struct cl_page *cl_page_alloc(const struct lu_env *env,
- struct cl_object *o, pgoff_t ind,
- struct page *vmpage,
- enum cl_page_type type);
-void cl_page_get(struct cl_page *page);
-void cl_page_put(const struct lu_env *env, struct cl_page *page);
-void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer,
- const struct cl_page *pg);
-void cl_page_header_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_page *pg);
-struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj);
-
-const struct cl_page_slice *cl_page_at(const struct cl_page *page,
- const struct lu_device_type *dtype);
-
-/**
- * \name ownership
- *
- * Functions dealing with the ownership of page by io.
- */
-/** @{ */
-
-int cl_page_own(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-int cl_page_own_try(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-void cl_page_assume(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-void cl_page_unassume(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg);
-void cl_page_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-void cl_page_disown0(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg);
-int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io);
-
-/** @} ownership */
-
-/**
- * \name transfer
- *
- * Functions dealing with the preparation of a page for a transfer, and
- * tracking transfer state.
- */
-/** @{ */
-int cl_page_prep(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt);
-void cl_page_completion(const struct lu_env *env,
- struct cl_page *pg, enum cl_req_type crt, int ioret);
-int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
- enum cl_req_type crt);
-int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt);
-void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
- int from, int to);
-int cl_page_cancel(const struct lu_env *env, struct cl_page *page);
-int cl_page_flush(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
-
-/** @} transfer */
-
-/**
- * \name helper routines
- * Functions to discard, delete and export a cl_page.
- */
-/** @{ */
-void cl_page_discard(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
-void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
-int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
-void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
-loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
-pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
-size_t cl_page_size(const struct cl_object *obj);
-int cl_pages_prune(const struct lu_env *env, struct cl_object *obj);
-
-void cl_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_lock *lock);
-void cl_lock_descr_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_lock_descr *descr);
-/* @} helper */
-
-/**
- * Data structure managing a client's cached pages. A count of
- * "unstable" pages is maintained, and an LRU of clean pages is
- * maintained. "unstable" pages are pages pinned by the ptlrpc
- * layer for recovery purposes.
- */
-struct cl_client_cache {
- /**
- * # of client cache refcount
- * # of users (OSCs) + 2 (held by llite and lov)
- */
- atomic_t ccc_users;
- /**
- * # of threads are doing shrinking
- */
- unsigned int ccc_lru_shrinkers;
- /**
- * # of LRU entries available
- */
- atomic_long_t ccc_lru_left;
- /**
- * List of entities(OSCs) for this LRU cache
- */
- struct list_head ccc_lru;
- /**
- * Max # of LRU entries
- */
- unsigned long ccc_lru_max;
- /**
- * Lock to protect ccc_lru list
- */
- spinlock_t ccc_lru_lock;
- /**
- * Set if unstable check is enabled
- */
- unsigned int ccc_unstable_check:1;
- /**
- * # of unstable pages for this mount point
- */
- atomic_long_t ccc_unstable_nr;
- /**
- * Waitq for awaiting unstable pages to reach zero.
- * Used at umounting time and signaled on BRW commit
- */
- wait_queue_head_t ccc_unstable_waitq;
-
-};
-
-/**
- * cl_cache functions
- */
-struct cl_client_cache *cl_cache_init(unsigned long lru_page_max);
-void cl_cache_incref(struct cl_client_cache *cache);
-void cl_cache_decref(struct cl_client_cache *cache);
-
-/** @} cl_page */
-
-/** \defgroup cl_lock cl_lock
- * @{
- */
-
-int cl_lock_request(const struct lu_env *env, struct cl_io *io,
- struct cl_lock *lock);
-int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
- const struct cl_io *io);
-void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock);
-const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
- const struct lu_device_type *dtype);
-void cl_lock_release(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
- struct cl_lock *lock, struct cl_sync_io *anchor);
-void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
-
-/** @} cl_lock */
-
-/** \defgroup cl_io cl_io
- * @{
- */
-
-int cl_io_init(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj);
-int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj);
-int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, loff_t pos, size_t count);
-int cl_io_loop(const struct lu_env *env, struct cl_io *io);
-
-void cl_io_fini(const struct lu_env *env, struct cl_io *io);
-int cl_io_iter_init(const struct lu_env *env, struct cl_io *io);
-void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io);
-int cl_io_lock(const struct lu_env *env, struct cl_io *io);
-void cl_io_unlock(const struct lu_env *env, struct cl_io *io);
-int cl_io_start(const struct lu_env *env, struct cl_io *io);
-void cl_io_end(const struct lu_env *env, struct cl_io *io);
-int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link);
-int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
- struct cl_lock_descr *descr);
-int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue);
-int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue,
- long timeout);
-int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, int from, int to,
- cl_commit_cbt cb);
-int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
- pgoff_t start, struct cl_read_ahead *ra);
-int cl_io_is_going(const struct lu_env *env);
-
-/**
- * True, iff \a io is an O_APPEND write(2).
- */
-static inline int cl_io_is_append(const struct cl_io *io)
-{
- return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
-}
-
-static inline int cl_io_is_sync_write(const struct cl_io *io)
-{
- return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync;
-}
-
-static inline int cl_io_is_mkwrite(const struct cl_io *io)
-{
- return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite;
-}
-
-/**
- * True, iff \a io is a truncate(2).
- */
-static inline int cl_io_is_trunc(const struct cl_io *io)
-{
- return io->ci_type == CIT_SETATTR &&
- (io->u.ci_setattr.sa_valid & ATTR_SIZE);
-}
-
-struct cl_io *cl_io_top(struct cl_io *io);
-
-#define CL_IO_SLICE_CLEAN(foo_io, base) \
-do { \
- typeof(foo_io) __foo_io = (foo_io); \
- \
- BUILD_BUG_ON(offsetof(typeof(*__foo_io), base) != 0); \
- memset(&__foo_io->base + 1, 0, \
- sizeof(*__foo_io) - sizeof(__foo_io->base)); \
-} while (0)
-
-/** @} cl_io */
-
-/** \defgroup cl_page_list cl_page_list
- * @{
- */
-
-/**
- * Last page in the page list.
- */
-static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
-{
- LASSERT(plist->pl_nr > 0);
- return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
-}
-
-static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
-{
- LASSERT(plist->pl_nr > 0);
- return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
-}
-
-/**
- * Iterate over pages in a page list.
- */
-#define cl_page_list_for_each(page, list) \
- list_for_each_entry((page), &(list)->pl_pages, cp_batch)
-
-/**
- * Iterate over pages in a page list, taking possible removals into account.
- */
-#define cl_page_list_for_each_safe(page, temp, list) \
- list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
-
-void cl_page_list_init(struct cl_page_list *plist);
-void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page);
-void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page);
-void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page);
-void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head);
-void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
- struct cl_page *page);
-void cl_page_list_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
-void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist);
-
-void cl_2queue_init(struct cl_2queue *queue);
-void cl_2queue_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue);
-void cl_2queue_discard(const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue);
-void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue);
-void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
-
-/** @} cl_page_list */
-
-void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
- struct cl_req_attr *attr);
-
-/** \defgroup cl_sync_io cl_sync_io
- * @{
- */
-
-/**
- * Anchor for synchronous transfer. This is allocated on a stack by thread
- * doing synchronous transfer, and a pointer to this structure is set up in
- * every page submitted for transfer. Transfer completion routine updates
- * anchor and wakes up waiting thread when transfer is complete.
- */
-struct cl_sync_io {
- /** number of pages yet to be transferred. */
- atomic_t csi_sync_nr;
- /** error code. */
- int csi_sync_rc;
- /** barrier of destroy this structure */
- atomic_t csi_barrier;
- /** completion to be signaled when transfer is complete. */
- wait_queue_head_t csi_waitq;
- /** callback to invoke when this IO is finished */
- void (*csi_end_io)(const struct lu_env *,
- struct cl_sync_io *);
-};
-
-void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
- void (*end)(const struct lu_env *, struct cl_sync_io *));
-int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
- long timeout);
-void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
- int ioret);
-void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
-
-/** @} cl_sync_io */
-
-/** \defgroup cl_env cl_env
- *
- * lu_env handling for a client.
- *
- * lu_env is an environment within which lustre code executes. Its major part
- * is lu_context---a fast memory allocation mechanism that is used to conserve
- * precious kernel stack space. Originally lu_env was designed for a server,
- * where
- *
- * - there is a (mostly) fixed number of threads, and
- *
- * - call chains have no non-lustre portions inserted between lustre code.
- *
- * On a client both these assumption fails, because every user thread can
- * potentially execute lustre code as part of a system call, and lustre calls
- * into VFS or MM that call back into lustre.
- *
- * To deal with that, cl_env wrapper functions implement the following
- * optimizations:
- *
- * - allocation and destruction of environment is amortized by caching no
- * longer used environments instead of destroying them;
- *
- * \see lu_env, lu_context, lu_context_key
- * @{
- */
-
-struct lu_env *cl_env_get(u16 *refcheck);
-struct lu_env *cl_env_alloc(u16 *refcheck, __u32 tags);
-void cl_env_put(struct lu_env *env, u16 *refcheck);
-unsigned int cl_env_cache_purge(unsigned int nr);
-struct lu_env *cl_env_percpu_get(void);
-void cl_env_percpu_put(struct lu_env *env);
-
-/** @} cl_env */
-
-/*
- * Misc
- */
-void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb);
-
-struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
- struct lu_device_type *ldt,
- struct lu_device *next);
-/** @} clio */
-
-int cl_global_init(void);
-void cl_global_fini(void);
-
-#endif /* _LINUX_CL_OBJECT_H */
diff --git a/drivers/staging/lustre/lustre/include/interval_tree.h b/drivers/staging/lustre/lustre/include/interval_tree.h
deleted file mode 100644
index 7d119c1a0469..000000000000
--- a/drivers/staging/lustre/lustre/include/interval_tree.h
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/interval_tree.h
- *
- * Author: Huang Wei <huangwei@clusterfs.com>
- * Author: Jay Xiong <jinshan.xiong@sun.com>
- */
-
-#ifndef _INTERVAL_H__
-#define _INTERVAL_H__
-
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-struct interval_node {
- struct interval_node *in_left;
- struct interval_node *in_right;
- struct interval_node *in_parent;
- unsigned in_color:1,
- in_intree:1, /** set if the node is in tree */
- in_res1:30;
- __u8 in_res2[4]; /** tags, 8-bytes aligned */
- __u64 in_max_high;
- struct interval_node_extent {
- __u64 start;
- __u64 end;
- } in_extent;
-};
-
-enum interval_iter {
- INTERVAL_ITER_CONT = 1,
- INTERVAL_ITER_STOP = 2
-};
-
-static inline int interval_is_intree(struct interval_node *node)
-{
- return node->in_intree == 1;
-}
-
-static inline __u64 interval_low(struct interval_node *node)
-{
- return node->in_extent.start;
-}
-
-static inline __u64 interval_high(struct interval_node *node)
-{
- return node->in_extent.end;
-}
-
-static inline int interval_set(struct interval_node *node,
- __u64 start, __u64 end)
-{
- if (start > end)
- return -ERANGE;
- node->in_extent.start = start;
- node->in_extent.end = end;
- node->in_max_high = end;
- return 0;
-}
-
-/*
- * Rules to write an interval callback.
- * - the callback returns INTERVAL_ITER_STOP when it thinks the iteration
- * should be stopped. It will then cause the iteration function to return
- * immediately with return value INTERVAL_ITER_STOP.
- * - callbacks for interval_iterate and interval_iterate_reverse: Every
- * nodes in the tree will be set to @node before the callback being called
- * - callback for interval_search: Only overlapped node will be set to @node
- * before the callback being called.
- */
-typedef enum interval_iter (*interval_callback_t)(struct interval_node *node,
- void *args);
-
-struct interval_node *interval_insert(struct interval_node *node,
- struct interval_node **root);
-void interval_erase(struct interval_node *node, struct interval_node **root);
-
-/*
- * Search the extents in the tree and call @func for each overlapped
- * extents.
- */
-enum interval_iter interval_search(struct interval_node *root,
- struct interval_node_extent *ex,
- interval_callback_t func, void *data);
-
-enum interval_iter interval_iterate_reverse(struct interval_node *root,
- interval_callback_t func,
- void *data);
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/llog_swab.h b/drivers/staging/lustre/lustre/include/llog_swab.h
deleted file mode 100644
index 0433b79efdcb..000000000000
--- a/drivers/staging/lustre/lustre/include/llog_swab.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2014, Intel Corporation.
- *
- * Copyright 2015 Cray Inc, all rights reserved.
- * Author: Ben Evans.
- *
- * We assume all nodes are either little-endian or big-endian, and we
- * always send messages in the sender's native format. The receiver
- * detects the message format by checking the 'magic' field of the message
- * (see lustre_msg_swabbed() below).
- *
- * Each type has corresponding 'lustre_swab_xxxtypexxx()' routines
- * are implemented in ptlrpc/pack_generic.c. These 'swabbers' convert the
- * type from "other" endian, in-place in the message buffer.
- *
- * A swabber takes a single pointer argument. The caller must already have
- * verified that the length of the message buffer >= sizeof (type).
- *
- * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
- * may be defined that swabs just the variable part, after the caller has
- * verified that the message buffer is large enough.
- */
-
-#ifndef _LLOG_SWAB_H_
-#define _LLOG_SWAB_H_
-
-#include <uapi/linux/lustre/lustre_idl.h>
-
-struct lustre_cfg;
-
-void lustre_swab_lu_fid(struct lu_fid *fid);
-void lustre_swab_ost_id(struct ost_id *oid);
-void lustre_swab_llogd_body(struct llogd_body *d);
-void lustre_swab_llog_hdr(struct llog_log_hdr *h);
-void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
-void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
-void lustre_swab_lu_seq_range(struct lu_seq_range *range);
-void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
-void lustre_swab_cfg_marker(struct cfg_marker *marker,
- int swab, int size);
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
deleted file mode 100644
index 426e8f3c9809..000000000000
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ /dev/null
@@ -1,672 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lprocfs_status.h
- *
- * Top level header file for LProc SNMP
- *
- * Author: Hariharan Thantry thantry@users.sourceforge.net
- */
-#ifndef _LPROCFS_SNMP_H
-#define _LPROCFS_SNMP_H
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <linux/libcfs/libcfs.h>
-#include <uapi/linux/lustre/lustre_cfg.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-
-struct lprocfs_vars {
- const char *name;
- const struct file_operations *fops;
- void *data;
- /**
- * sysfs file mode.
- */
- umode_t proc_mode;
-};
-
-struct lprocfs_static_vars {
- struct lprocfs_vars *obd_vars;
- const struct attribute_group *sysfs_vars;
-};
-
-/* if we find more consumers this could be generalized */
-#define OBD_HIST_MAX 32
-struct obd_histogram {
- spinlock_t oh_lock;
- unsigned long oh_buckets[OBD_HIST_MAX];
-};
-
-enum {
- BRW_R_PAGES = 0,
- BRW_W_PAGES,
- BRW_R_RPC_HIST,
- BRW_W_RPC_HIST,
- BRW_R_IO_TIME,
- BRW_W_IO_TIME,
- BRW_R_DISCONT_PAGES,
- BRW_W_DISCONT_PAGES,
- BRW_R_DISCONT_BLOCKS,
- BRW_W_DISCONT_BLOCKS,
- BRW_R_DISK_IOSIZE,
- BRW_W_DISK_IOSIZE,
- BRW_R_DIO_FRAGS,
- BRW_W_DIO_FRAGS,
- BRW_LAST,
-};
-
-struct brw_stats {
- struct obd_histogram hist[BRW_LAST];
-};
-
-enum {
- RENAME_SAMEDIR_SIZE = 0,
- RENAME_CROSSDIR_SRC_SIZE,
- RENAME_CROSSDIR_TGT_SIZE,
- RENAME_LAST,
-};
-
-struct rename_stats {
- struct obd_histogram hist[RENAME_LAST];
-};
-
-/* An lprocfs counter can be configured using the enum bit masks below.
- *
- * LPROCFS_CNTR_EXTERNALLOCK indicates that an external lock already
- * protects this counter from concurrent updates. If not specified,
- * lprocfs an internal per-counter lock variable. External locks are
- * not used to protect counter increments, but are used to protect
- * counter readout and resets.
- *
- * LPROCFS_CNTR_AVGMINMAX indicates a multi-valued counter samples,
- * (i.e. counter can be incremented by more than "1"). When specified,
- * the counter maintains min, max and sum in addition to a simple
- * invocation count. This allows averages to be computed.
- * If not specified, the counter is an increment-by-1 counter.
- * min, max, sum, etc. are not maintained.
- *
- * LPROCFS_CNTR_STDDEV indicates that the counter should track sum of
- * squares (for multi-valued counter samples only). This allows
- * external computation of standard deviation, but involves a 64-bit
- * multiply per counter increment.
- */
-
-enum {
- LPROCFS_CNTR_EXTERNALLOCK = 0x0001,
- LPROCFS_CNTR_AVGMINMAX = 0x0002,
- LPROCFS_CNTR_STDDEV = 0x0004,
-
- /* counter data type */
- LPROCFS_TYPE_REGS = 0x0100,
- LPROCFS_TYPE_BYTES = 0x0200,
- LPROCFS_TYPE_PAGES = 0x0400,
- LPROCFS_TYPE_CYCLE = 0x0800,
-};
-
-#define LC_MIN_INIT ((~(__u64)0) >> 1)
-
-struct lprocfs_counter_header {
- unsigned int lc_config;
- const char *lc_name; /* must be static */
- const char *lc_units; /* must be static */
-};
-
-struct lprocfs_counter {
- __s64 lc_count;
- __s64 lc_min;
- __s64 lc_max;
- __s64 lc_sumsquare;
- /*
- * Every counter has lc_array_sum[0], while lc_array_sum[1] is only
- * for irq context counter, i.e. stats with
- * LPROCFS_STATS_FLAG_IRQ_SAFE flag, its counter need
- * lc_array_sum[1]
- */
- __s64 lc_array_sum[1];
-};
-
-#define lc_sum lc_array_sum[0]
-#define lc_sum_irq lc_array_sum[1]
-
-struct lprocfs_percpu {
-#ifndef __GNUC__
- __s64 pad;
-#endif
- struct lprocfs_counter lp_cntr[0];
-};
-
-enum lprocfs_stats_lock_ops {
- LPROCFS_GET_NUM_CPU = 0x0001, /* number allocated per-CPU stats */
- LPROCFS_GET_SMP_ID = 0x0002, /* current stat to be updated */
-};
-
-enum lprocfs_stats_flags {
- LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */
- LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu
- * area and need locking
- */
- LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */
-};
-
-enum lprocfs_fields_flags {
- LPROCFS_FIELDS_FLAGS_CONFIG = 0x0001,
- LPROCFS_FIELDS_FLAGS_SUM = 0x0002,
- LPROCFS_FIELDS_FLAGS_MIN = 0x0003,
- LPROCFS_FIELDS_FLAGS_MAX = 0x0004,
- LPROCFS_FIELDS_FLAGS_AVG = 0x0005,
- LPROCFS_FIELDS_FLAGS_SUMSQUARE = 0x0006,
- LPROCFS_FIELDS_FLAGS_COUNT = 0x0007,
-};
-
-struct lprocfs_stats {
- /* # of counters */
- unsigned short ls_num;
- /* 1 + the biggest cpu # whose ls_percpu slot has been allocated */
- unsigned short ls_biggest_alloc_num;
- enum lprocfs_stats_flags ls_flags;
- /* Lock used when there are no percpu stats areas; For percpu stats,
- * it is used to protect ls_biggest_alloc_num change
- */
- spinlock_t ls_lock;
-
- /* has ls_num of counter headers */
- struct lprocfs_counter_header *ls_cnt_header;
- struct lprocfs_percpu *ls_percpu[0];
-};
-
-#define OPC_RANGE(seg) (seg ## _LAST_OPC - seg ## _FIRST_OPC)
-
-/* Pack all opcodes down into a single monotonically increasing index */
-static inline int opcode_offset(__u32 opc)
-{
- if (opc < OST_LAST_OPC) {
- /* OST opcode */
- return (opc - OST_FIRST_OPC);
- } else if (opc < MDS_LAST_OPC) {
- /* MDS opcode */
- return (opc - MDS_FIRST_OPC +
- OPC_RANGE(OST));
- } else if (opc < LDLM_LAST_OPC) {
- /* LDLM Opcode */
- return (opc - LDLM_FIRST_OPC +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < MGS_LAST_OPC) {
- /* MGS Opcode */
- return (opc - MGS_FIRST_OPC +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < OBD_LAST_OPC) {
- /* OBD Ping */
- return (opc - OBD_FIRST_OPC +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < LLOG_LAST_OPC) {
- /* LLOG Opcode */
- return (opc - LLOG_FIRST_OPC +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < QUOTA_LAST_OPC) {
- /* LQUOTA Opcode */
- return (opc - QUOTA_FIRST_OPC +
- OPC_RANGE(LLOG) +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < SEQ_LAST_OPC) {
- /* SEQ opcode */
- return (opc - SEQ_FIRST_OPC +
- OPC_RANGE(QUOTA) +
- OPC_RANGE(LLOG) +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < SEC_LAST_OPC) {
- /* SEC opcode */
- return (opc - SEC_FIRST_OPC +
- OPC_RANGE(SEQ) +
- OPC_RANGE(QUOTA) +
- OPC_RANGE(LLOG) +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < FLD_LAST_OPC) {
- /* FLD opcode */
- return (opc - FLD_FIRST_OPC +
- OPC_RANGE(SEC) +
- OPC_RANGE(SEQ) +
- OPC_RANGE(QUOTA) +
- OPC_RANGE(LLOG) +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else {
- /* Unknown Opcode */
- return -1;
- }
-}
-
-#define LUSTRE_MAX_OPCODES (OPC_RANGE(OST) + \
- OPC_RANGE(MDS) + \
- OPC_RANGE(LDLM) + \
- OPC_RANGE(MGS) + \
- OPC_RANGE(OBD) + \
- OPC_RANGE(LLOG) + \
- OPC_RANGE(SEC) + \
- OPC_RANGE(SEQ) + \
- OPC_RANGE(SEC) + \
- OPC_RANGE(FLD))
-
-#define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \
- OPC_RANGE(EXTRA))
-
-enum {
- PTLRPC_REQWAIT_CNTR = 0,
- PTLRPC_REQQDEPTH_CNTR,
- PTLRPC_REQACTIVE_CNTR,
- PTLRPC_TIMEOUT,
- PTLRPC_REQBUF_AVAIL_CNTR,
- PTLRPC_LAST_CNTR
-};
-
-#define PTLRPC_FIRST_CNTR PTLRPC_REQWAIT_CNTR
-
-enum {
- LDLM_GLIMPSE_ENQUEUE = 0,
- LDLM_PLAIN_ENQUEUE,
- LDLM_EXTENT_ENQUEUE,
- LDLM_FLOCK_ENQUEUE,
- LDLM_IBITS_ENQUEUE,
- MDS_REINT_SETATTR,
- MDS_REINT_CREATE,
- MDS_REINT_LINK,
- MDS_REINT_UNLINK,
- MDS_REINT_RENAME,
- MDS_REINT_OPEN,
- MDS_REINT_SETXATTR,
- BRW_READ_BYTES,
- BRW_WRITE_BYTES,
- EXTRA_LAST_OPC
-};
-
-#define EXTRA_FIRST_OPC LDLM_GLIMPSE_ENQUEUE
-/* class_obd.c */
-extern struct dentry *debugfs_lustre_root;
-extern struct kobject *lustre_kobj;
-
-struct obd_device;
-struct obd_histogram;
-
-/* Days / hours / mins / seconds format */
-struct dhms {
- int d, h, m, s;
-};
-
-static inline void s2dhms(struct dhms *ts, time64_t secs64)
-{
- unsigned int secs;
-
- ts->d = div_u64_rem(secs64, 86400, &secs);
- ts->h = secs / 3600;
- secs = secs % 3600;
- ts->m = secs / 60;
- ts->s = secs % 60;
-}
-
-#define DHMS_FMT "%dd%dh%02dm%02ds"
-#define DHMS_VARS(x) (x)->d, (x)->h, (x)->m, (x)->s
-
-#define JOBSTATS_JOBID_VAR_MAX_LEN 20
-#define JOBSTATS_DISABLE "disable"
-#define JOBSTATS_PROCNAME_UID "procname_uid"
-#define JOBSTATS_NODELOCAL "nodelocal"
-
-/* obd_config.c */
-void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg));
-
-int lprocfs_write_frac_helper(const char __user *buffer,
- unsigned long count, int *val, int mult);
-int lprocfs_read_frac_helper(char *buffer, unsigned long count,
- long val, int mult);
-
-int lprocfs_stats_alloc_one(struct lprocfs_stats *stats,
- unsigned int cpuid);
-int lprocfs_stats_lock(struct lprocfs_stats *stats,
- enum lprocfs_stats_lock_ops opc,
- unsigned long *flags);
-void lprocfs_stats_unlock(struct lprocfs_stats *stats,
- enum lprocfs_stats_lock_ops opc,
- unsigned long *flags);
-
-static inline unsigned int
-lprocfs_stats_counter_size(struct lprocfs_stats *stats)
-{
- unsigned int percpusize;
-
- percpusize = offsetof(struct lprocfs_percpu, lp_cntr[stats->ls_num]);
-
- /* irq safe stats need lc_array_sum[1] */
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- percpusize += stats->ls_num * sizeof(__s64);
-
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0)
- percpusize = L1_CACHE_ALIGN(percpusize);
-
- return percpusize;
-}
-
-static inline struct lprocfs_counter *
-lprocfs_stats_counter_get(struct lprocfs_stats *stats, unsigned int cpuid,
- int index)
-{
- struct lprocfs_counter *cntr;
-
- cntr = &stats->ls_percpu[cpuid]->lp_cntr[index];
-
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- cntr = (void *)cntr + index * sizeof(__s64);
-
- return cntr;
-}
-
-/* Two optimized LPROCFS counter increment functions are provided:
- * lprocfs_counter_incr(cntr, value) - optimized for by-one counters
- * lprocfs_counter_add(cntr) - use for multi-valued counters
- * Counter data layout allows config flag, counter lock and the
- * count itself to reside within a single cache line.
- */
-
-void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount);
-void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount);
-
-#define lprocfs_counter_incr(stats, idx) \
- lprocfs_counter_add(stats, idx, 1)
-#define lprocfs_counter_decr(stats, idx) \
- lprocfs_counter_sub(stats, idx, 1)
-
-__s64 lprocfs_read_helper(struct lprocfs_counter *lc,
- struct lprocfs_counter_header *header,
- enum lprocfs_stats_flags flags,
- enum lprocfs_fields_flags field);
-__u64 lprocfs_stats_collector(struct lprocfs_stats *stats, int idx,
- enum lprocfs_fields_flags field);
-
-extern struct lprocfs_stats *
-lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags);
-void lprocfs_clear_stats(struct lprocfs_stats *stats);
-void lprocfs_free_stats(struct lprocfs_stats **stats);
-void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
- unsigned int conf, const char *name,
- const char *units);
-struct obd_export;
-int lprocfs_exp_cleanup(struct obd_export *exp);
-struct dentry *ldebugfs_add_simple(struct dentry *root,
- char *name,
- void *data,
- const struct file_operations *fops);
-
-int ldebugfs_register_stats(struct dentry *parent,
- const char *name,
- struct lprocfs_stats *stats);
-
-/* lprocfs_status.c */
-int ldebugfs_add_vars(struct dentry *parent,
- struct lprocfs_vars *var,
- void *data);
-
-struct dentry *ldebugfs_register(const char *name,
- struct dentry *parent,
- struct lprocfs_vars *list,
- void *data);
-
-void ldebugfs_remove(struct dentry **entryp);
-
-int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list,
- const struct attribute_group *attrs);
-int lprocfs_obd_cleanup(struct obd_device *obd);
-
-int ldebugfs_seq_create(struct dentry *parent,
- const char *name,
- umode_t mode,
- const struct file_operations *seq_fops,
- void *data);
-int ldebugfs_obd_seq_create(struct obd_device *dev,
- const char *name,
- umode_t mode,
- const struct file_operations *seq_fops,
- void *data);
-
-/* Generic callbacks */
-
-int lprocfs_rd_uint(struct seq_file *m, void *data);
-int lprocfs_wr_uint(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-int lprocfs_rd_server_uuid(struct seq_file *m, void *data);
-int lprocfs_rd_conn_uuid(struct seq_file *m, void *data);
-int lprocfs_rd_import(struct seq_file *m, void *data);
-int lprocfs_rd_state(struct seq_file *m, void *data);
-int lprocfs_rd_connect_flags(struct seq_file *m, void *data);
-
-struct adaptive_timeout;
-int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at);
-int lprocfs_rd_timeouts(struct seq_file *m, void *data);
-int lprocfs_wr_ping(struct file *file, const char __user *buffer,
- size_t count, loff_t *off);
-int lprocfs_wr_import(struct file *file, const char __user *buffer,
- size_t count, loff_t *off);
-int lprocfs_rd_pinger_recov(struct seq_file *m, void *n);
-int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
- size_t count, loff_t *off);
-
-/* Statfs helpers */
-
-int lprocfs_write_helper(const char __user *buffer, unsigned long count,
- int *val);
-int lprocfs_write_u64_helper(const char __user *buffer,
- unsigned long count, __u64 *val);
-int lprocfs_write_frac_u64_helper(const char __user *buffer,
- unsigned long count,
- __u64 *val, int mult);
-char *lprocfs_find_named_value(const char *buffer, const char *name,
- size_t *count);
-void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value);
-void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value);
-void lprocfs_oh_clear(struct obd_histogram *oh);
-unsigned long lprocfs_oh_sum(struct obd_histogram *oh);
-
-void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
- struct lprocfs_counter *cnt);
-
-int lprocfs_single_release(struct inode *inode, struct file *file);
-int lprocfs_seq_release(struct inode *inode, struct file *file);
-
-/* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
- * proc entries; otherwise, you will define name##_seq_write function also for
- * a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
- * call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data);
- */
-#define __LPROC_SEQ_FOPS(name, custom_seq_write) \
-static int name##_single_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, name##_seq_show, inode->i_private); \
-} \
-static const struct file_operations name##_fops = { \
- .owner = THIS_MODULE, \
- .open = name##_single_open, \
- .read = seq_read, \
- .write = custom_seq_write, \
- .llseek = seq_lseek, \
- .release = lprocfs_single_release, \
-}
-
-#define LPROC_SEQ_FOPS_RO(name) __LPROC_SEQ_FOPS(name, NULL)
-#define LPROC_SEQ_FOPS(name) __LPROC_SEQ_FOPS(name, name##_seq_write)
-
-#define LPROC_SEQ_FOPS_RO_TYPE(name, type) \
- static int name##_##type##_seq_show(struct seq_file *m, void *v)\
- { \
- return lprocfs_rd_##type(m, m->private); \
- } \
- LPROC_SEQ_FOPS_RO(name##_##type)
-
-#define LPROC_SEQ_FOPS_RW_TYPE(name, type) \
- static int name##_##type##_seq_show(struct seq_file *m, void *v)\
- { \
- return lprocfs_rd_##type(m, m->private); \
- } \
- static ssize_t name##_##type##_seq_write(struct file *file, \
- const char __user *buffer, size_t count, \
- loff_t *off) \
- { \
- struct seq_file *seq = file->private_data; \
- return lprocfs_wr_##type(file, buffer, \
- count, seq->private); \
- } \
- LPROC_SEQ_FOPS(name##_##type)
-
-#define LPROC_SEQ_FOPS_WR_ONLY(name, type) \
- static ssize_t name##_##type##_write(struct file *file, \
- const char __user *buffer, size_t count, \
- loff_t *off) \
- { \
- return lprocfs_wr_##type(file, buffer, count, off); \
- } \
- static int name##_##type##_open(struct inode *inode, struct file *file) \
- { \
- return single_open(file, NULL, inode->i_private); \
- } \
- static const struct file_operations name##_##type##_fops = { \
- .open = name##_##type##_open, \
- .write = name##_##type##_write, \
- .release = lprocfs_single_release, \
- }
-
-struct lustre_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
- char *buf);
- ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len);
-};
-
-#define LUSTRE_ATTR(name, mode, show, store) \
-static struct lustre_attr lustre_attr_##name = __ATTR(name, mode, show, store)
-
-#define LUSTRE_RO_ATTR(name) LUSTRE_ATTR(name, 0444, name##_show, NULL)
-#define LUSTRE_RW_ATTR(name) LUSTRE_ATTR(name, 0644, name##_show, name##_store)
-
-extern const struct sysfs_ops lustre_sysfs_ops;
-
-struct root_squash_info;
-int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count,
- struct root_squash_info *squash, char *name);
-int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count,
- struct root_squash_info *squash, char *name);
-
-/* all quota proc functions */
-int lprocfs_quota_rd_bunit(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-int lprocfs_quota_wr_bunit(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_btune(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-int lprocfs_quota_wr_btune(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_iunit(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-int lprocfs_quota_wr_iunit(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_itune(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-int lprocfs_quota_wr_itune(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_type(char *page, char **start, loff_t off, int count,
- int *eof, void *data);
-int lprocfs_quota_wr_type(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_switch_seconds(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_switch_seconds(struct file *file,
- const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_sync_blk(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_sync_blk(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_switch_qs(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_switch_qs(struct file *file,
- const char *buffer, unsigned long count,
- void *data);
-int lprocfs_quota_rd_boundary_factor(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_boundary_factor(struct file *file,
- const char *buffer, unsigned long count,
- void *data);
-int lprocfs_quota_rd_least_bunit(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_least_bunit(struct file *file,
- const char *buffer, unsigned long count,
- void *data);
-int lprocfs_quota_rd_least_iunit(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_least_iunit(struct file *file,
- const char *buffer, unsigned long count,
- void *data);
-int lprocfs_quota_rd_qs_factor(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_qs_factor(struct file *file,
- const char *buffer, unsigned long count,
- void *data);
-#endif /* LPROCFS_SNMP_H */
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
deleted file mode 100644
index 35c7b582f36d..000000000000
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ /dev/null
@@ -1,1335 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LUSTRE_LU_OBJECT_H
-#define __LUSTRE_LU_OBJECT_H
-
-#include <stdarg.h>
-#include <linux/percpu_counter.h>
-#include <linux/libcfs/libcfs.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <lu_ref.h>
-
-struct seq_file;
-struct lustre_cfg;
-struct lprocfs_stats;
-
-/** \defgroup lu lu
- * lu_* data-types represent server-side entities shared by data and meta-data
- * stacks.
- *
- * Design goals:
- *
- * -# support for layering.
- *
- * Server side object is split into layers, one per device in the
- * corresponding device stack. Individual layer is represented by struct
- * lu_object. Compound layered object --- by struct lu_object_header. Most
- * interface functions take lu_object as an argument and operate on the
- * whole compound object. This decision was made due to the following
- * reasons:
- *
- * - it's envisaged that lu_object will be used much more often than
- * lu_object_header;
- *
- * - we want lower (non-top) layers to be able to initiate operations
- * on the whole object.
- *
- * Generic code supports layering more complex than simple stacking, e.g.,
- * it is possible that at some layer object "spawns" multiple sub-objects
- * on the lower layer.
- *
- * -# fid-based identification.
- *
- * Compound object is uniquely identified by its fid. Objects are indexed
- * by their fids (hash table is used for index).
- *
- * -# caching and life-cycle management.
- *
- * Object's life-time is controlled by reference counting. When reference
- * count drops to 0, object is returned to cache. Cached objects still
- * retain their identity (i.e., fid), and can be recovered from cache.
- *
- * Objects are kept in the global LRU list, and lu_site_purge() function
- * can be used to reclaim given number of unused objects from the tail of
- * the LRU.
- *
- * -# avoiding recursion.
- *
- * Generic code tries to replace recursion through layers by iterations
- * where possible. Additionally to the end of reducing stack consumption,
- * data, when practically possible, are allocated through lu_context_key
- * interface rather than on stack.
- * @{
- */
-
-struct lu_site;
-struct lu_object;
-struct lu_device;
-struct lu_object_header;
-struct lu_context;
-struct lu_env;
-
-/**
- * Operations common for data and meta-data devices.
- */
-struct lu_device_operations {
- /**
- * Allocate object for the given device (without lower-layer
- * parts). This is called by lu_object_operations::loo_object_init()
- * from the parent layer, and should setup at least lu_object::lo_dev
- * and lu_object::lo_ops fields of resulting lu_object.
- *
- * Object creation protocol.
- *
- * Due to design goal of avoiding recursion, object creation (see
- * lu_object_alloc()) is somewhat involved:
- *
- * - first, lu_device_operations::ldo_object_alloc() method of the
- * top-level device in the stack is called. It should allocate top
- * level object (including lu_object_header), but without any
- * lower-layer sub-object(s).
- *
- * - then lu_object_alloc() sets fid in the header of newly created
- * object.
- *
- * - then lu_object_operations::loo_object_init() is called. It has
- * to allocate lower-layer object(s). To do this,
- * lu_object_operations::loo_object_init() calls ldo_object_alloc()
- * of the lower-layer device(s).
- *
- * - for all new objects allocated by
- * lu_object_operations::loo_object_init() (and inserted into object
- * stack), lu_object_operations::loo_object_init() is called again
- * repeatedly, until no new objects are created.
- *
- * \post ergo(!IS_ERR(result), result->lo_dev == d &&
- * result->lo_ops != NULL);
- */
- struct lu_object *(*ldo_object_alloc)(const struct lu_env *env,
- const struct lu_object_header *h,
- struct lu_device *d);
- /**
- * process config specific for device.
- */
- int (*ldo_process_config)(const struct lu_env *env,
- struct lu_device *, struct lustre_cfg *);
- int (*ldo_recovery_complete)(const struct lu_env *,
- struct lu_device *);
-
- /**
- * initialize local objects for device. this method called after layer
- * has been initialized (after LCFG_SETUP stage) and before it starts
- * serving user requests.
- */
-
- int (*ldo_prepare)(const struct lu_env *,
- struct lu_device *parent,
- struct lu_device *dev);
-
-};
-
-/**
- * For lu_object_conf flags
- */
-enum loc_flags {
- /* This is a new object to be allocated, or the file
- * corresponding to the object does not exists.
- */
- LOC_F_NEW = 0x00000001,
-};
-
-/**
- * Object configuration, describing particulars of object being created. On
- * server this is not used, as server objects are full identified by fid. On
- * client configuration contains struct lustre_md.
- */
-struct lu_object_conf {
- /**
- * Some hints for obj find and alloc.
- */
- enum loc_flags loc_flags;
-};
-
-/**
- * Type of "printer" function used by lu_object_operations::loo_object_print()
- * method.
- *
- * Printer function is needed to provide some flexibility in (semi-)debugging
- * output: possible implementations: printk, CDEBUG, sysfs/seq_file
- */
-typedef int (*lu_printer_t)(const struct lu_env *env,
- void *cookie, const char *format, ...)
- __printf(3, 4);
-
-/**
- * Operations specific for particular lu_object.
- */
-struct lu_object_operations {
- /**
- * Allocate lower-layer parts of the object by calling
- * lu_device_operations::ldo_object_alloc() of the corresponding
- * underlying device.
- *
- * This method is called once for each object inserted into object
- * stack. It's responsibility of this method to insert lower-layer
- * object(s) it create into appropriate places of object stack.
- */
- int (*loo_object_init)(const struct lu_env *env,
- struct lu_object *o,
- const struct lu_object_conf *conf);
- /**
- * Called (in top-to-bottom order) during object allocation after all
- * layers were allocated and initialized. Can be used to perform
- * initialization depending on lower layers.
- */
- int (*loo_object_start)(const struct lu_env *env,
- struct lu_object *o);
- /**
- * Called before lu_object_operations::loo_object_free() to signal
- * that object is being destroyed. Dual to
- * lu_object_operations::loo_object_init().
- */
- void (*loo_object_delete)(const struct lu_env *env,
- struct lu_object *o);
- /**
- * Dual to lu_device_operations::ldo_object_alloc(). Called when
- * object is removed from memory.
- */
- void (*loo_object_free)(const struct lu_env *env,
- struct lu_object *o);
- /**
- * Called when last active reference to the object is released (and
- * object returns to the cache). This method is optional.
- */
- void (*loo_object_release)(const struct lu_env *env,
- struct lu_object *o);
- /**
- * Optional debugging helper. Print given object.
- */
- int (*loo_object_print)(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o);
- /**
- * Optional debugging method. Returns true iff method is internally
- * consistent.
- */
- int (*loo_object_invariant)(const struct lu_object *o);
-};
-
-/**
- * Type of lu_device.
- */
-struct lu_device_type;
-
-/**
- * Device: a layer in the server side abstraction stacking.
- */
-struct lu_device {
- /**
- * reference count. This is incremented, in particular, on each object
- * created at this layer.
- *
- * \todo XXX which means that atomic_t is probably too small.
- */
- atomic_t ld_ref;
- /**
- * Pointer to device type. Never modified once set.
- */
- struct lu_device_type *ld_type;
- /**
- * Operation vector for this device.
- */
- const struct lu_device_operations *ld_ops;
- /**
- * Stack this device belongs to.
- */
- struct lu_site *ld_site;
-
- /** \todo XXX: temporary back pointer into obd. */
- struct obd_device *ld_obd;
- /**
- * A list of references to this object, for debugging.
- */
- struct lu_ref ld_reference;
- /**
- * Link the device to the site.
- **/
- struct list_head ld_linkage;
-};
-
-struct lu_device_type_operations;
-
-/**
- * Tag bits for device type. They are used to distinguish certain groups of
- * device types.
- */
-enum lu_device_tag {
- /** this is meta-data device */
- LU_DEVICE_MD = (1 << 0),
- /** this is data device */
- LU_DEVICE_DT = (1 << 1),
- /** data device in the client stack */
- LU_DEVICE_CL = (1 << 2)
-};
-
-/**
- * Type of device.
- */
-struct lu_device_type {
- /**
- * Tag bits. Taken from enum lu_device_tag. Never modified once set.
- */
- __u32 ldt_tags;
- /**
- * Name of this class. Unique system-wide. Never modified once set.
- */
- char *ldt_name;
- /**
- * Operations for this type.
- */
- const struct lu_device_type_operations *ldt_ops;
- /**
- * \todo XXX: temporary pointer to associated obd_type.
- */
- struct obd_type *ldt_obd_type;
- /**
- * \todo XXX: temporary: context tags used by obd_*() calls.
- */
- __u32 ldt_ctx_tags;
- /**
- * Number of existing device type instances.
- */
- atomic_t ldt_device_nr;
- /**
- * Linkage into a global list of all device types.
- *
- * \see lu_device_types.
- */
- struct list_head ldt_linkage;
-};
-
-/**
- * Operations on a device type.
- */
-struct lu_device_type_operations {
- /**
- * Allocate new device.
- */
- struct lu_device *(*ldto_device_alloc)(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *lcfg);
- /**
- * Free device. Dual to
- * lu_device_type_operations::ldto_device_alloc(). Returns pointer to
- * the next device in the stack.
- */
- struct lu_device *(*ldto_device_free)(const struct lu_env *,
- struct lu_device *);
-
- /**
- * Initialize the devices after allocation
- */
- int (*ldto_device_init)(const struct lu_env *env,
- struct lu_device *, const char *,
- struct lu_device *);
- /**
- * Finalize device. Dual to
- * lu_device_type_operations::ldto_device_init(). Returns pointer to
- * the next device in the stack.
- */
- struct lu_device *(*ldto_device_fini)(const struct lu_env *env,
- struct lu_device *);
- /**
- * Initialize device type. This is called on module load.
- */
- int (*ldto_init)(struct lu_device_type *t);
- /**
- * Finalize device type. Dual to
- * lu_device_type_operations::ldto_init(). Called on module unload.
- */
- void (*ldto_fini)(struct lu_device_type *t);
- /**
- * Called when the first device is created.
- */
- void (*ldto_start)(struct lu_device_type *t);
- /**
- * Called when number of devices drops to 0.
- */
- void (*ldto_stop)(struct lu_device_type *t);
-};
-
-static inline int lu_device_is_md(const struct lu_device *d)
-{
- return ergo(d, d->ld_type->ldt_tags & LU_DEVICE_MD);
-}
-
-/**
- * Common object attributes.
- */
-struct lu_attr {
- /** size in bytes */
- __u64 la_size;
- /** modification time in seconds since Epoch */
- s64 la_mtime;
- /** access time in seconds since Epoch */
- s64 la_atime;
- /** change time in seconds since Epoch */
- s64 la_ctime;
- /** 512-byte blocks allocated to object */
- __u64 la_blocks;
- /** permission bits and file type */
- __u32 la_mode;
- /** owner id */
- __u32 la_uid;
- /** group id */
- __u32 la_gid;
- /** object flags */
- __u32 la_flags;
- /** number of persistent references to this object */
- __u32 la_nlink;
- /** blk bits of the object*/
- __u32 la_blkbits;
- /** blk size of the object*/
- __u32 la_blksize;
- /** real device */
- __u32 la_rdev;
- /**
- * valid bits
- *
- * \see enum la_valid
- */
- __u64 la_valid;
-};
-
-/** Bit-mask of valid attributes */
-enum la_valid {
- LA_ATIME = 1 << 0,
- LA_MTIME = 1 << 1,
- LA_CTIME = 1 << 2,
- LA_SIZE = 1 << 3,
- LA_MODE = 1 << 4,
- LA_UID = 1 << 5,
- LA_GID = 1 << 6,
- LA_BLOCKS = 1 << 7,
- LA_TYPE = 1 << 8,
- LA_FLAGS = 1 << 9,
- LA_NLINK = 1 << 10,
- LA_RDEV = 1 << 11,
- LA_BLKSIZE = 1 << 12,
- LA_KILL_SUID = 1 << 13,
- LA_KILL_SGID = 1 << 14,
-};
-
-/**
- * Layer in the layered object.
- */
-struct lu_object {
- /**
- * Header for this object.
- */
- struct lu_object_header *lo_header;
- /**
- * Device for this layer.
- */
- struct lu_device *lo_dev;
- /**
- * Operations for this object.
- */
- const struct lu_object_operations *lo_ops;
- /**
- * Linkage into list of all layers.
- */
- struct list_head lo_linkage;
- /**
- * Link to the device, for debugging.
- */
- struct lu_ref_link lo_dev_ref;
-};
-
-enum lu_object_header_flags {
- /**
- * Don't keep this object in cache. Object will be destroyed as soon
- * as last reference to it is released. This flag cannot be cleared
- * once set.
- */
- LU_OBJECT_HEARD_BANSHEE = 0,
- /**
- * Mark this object has already been taken out of cache.
- */
- LU_OBJECT_UNHASHED = 1,
-};
-
-enum lu_object_header_attr {
- LOHA_EXISTS = 1 << 0,
- LOHA_REMOTE = 1 << 1,
- /**
- * UNIX file type is stored in S_IFMT bits.
- */
- LOHA_FT_START = 001 << 12, /**< S_IFIFO */
- LOHA_FT_END = 017 << 12, /**< S_IFMT */
-};
-
-/**
- * "Compound" object, consisting of multiple layers.
- *
- * Compound object with given fid is unique with given lu_site.
- *
- * Note, that object does *not* necessary correspond to the real object in the
- * persistent storage: object is an anchor for locking and method calling, so
- * it is created for things like not-yet-existing child created by mkdir or
- * create calls. lu_object_operations::loo_exists() can be used to check
- * whether object is backed by persistent storage entity.
- */
-struct lu_object_header {
- /**
- * Fid, uniquely identifying this object.
- */
- struct lu_fid loh_fid;
- /**
- * Object flags from enum lu_object_header_flags. Set and checked
- * atomically.
- */
- unsigned long loh_flags;
- /**
- * Object reference count. Protected by lu_site::ls_guard.
- */
- atomic_t loh_ref;
- /**
- * Common object attributes, cached for efficiency. From enum
- * lu_object_header_attr.
- */
- __u32 loh_attr;
- /**
- * Linkage into per-site hash table. Protected by lu_site::ls_guard.
- */
- struct hlist_node loh_hash;
- /**
- * Linkage into per-site LRU list. Protected by lu_site::ls_guard.
- */
- struct list_head loh_lru;
- /**
- * Linkage into list of layers. Never modified once set (except lately
- * during object destruction). No locking is necessary.
- */
- struct list_head loh_layers;
- /**
- * A list of references to this object, for debugging.
- */
- struct lu_ref loh_reference;
-};
-
-struct fld;
-
-struct lu_site_bkt_data {
- /**
- * number of object in this bucket on the lsb_lru list.
- */
- long lsb_lru_len;
- /**
- * LRU list, updated on each access to object. Protected by
- * bucket lock of lu_site::ls_obj_hash.
- *
- * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
- * moved to the lu_site::ls_lru.prev (this is due to the non-existence
- * of list_for_each_entry_safe_reverse()).
- */
- struct list_head lsb_lru;
- /**
- * Wait-queue signaled when an object in this site is ultimately
- * destroyed (lu_object_free()). It is used by lu_object_find() to
- * wait before re-trying when object in the process of destruction is
- * found in the hash table.
- *
- * \see htable_lookup().
- */
- wait_queue_head_t lsb_marche_funebre;
-};
-
-enum {
- LU_SS_CREATED = 0,
- LU_SS_CACHE_HIT,
- LU_SS_CACHE_MISS,
- LU_SS_CACHE_RACE,
- LU_SS_CACHE_DEATH_RACE,
- LU_SS_LRU_PURGED,
- LU_SS_LAST_STAT
-};
-
-/**
- * lu_site is a "compartment" within which objects are unique, and LRU
- * discipline is maintained.
- *
- * lu_site exists so that multiple layered stacks can co-exist in the same
- * address space.
- *
- * lu_site has the same relation to lu_device as lu_object_header to
- * lu_object.
- */
-struct lu_site {
- /**
- * objects hash table
- */
- struct cfs_hash *ls_obj_hash;
- /**
- * index of bucket on hash table while purging
- */
- unsigned int ls_purge_start;
- /**
- * Top-level device for this stack.
- */
- struct lu_device *ls_top_dev;
- /**
- * Bottom-level device for this stack
- */
- struct lu_device *ls_bottom_dev;
- /**
- * Linkage into global list of sites.
- */
- struct list_head ls_linkage;
- /**
- * List for lu device for this site, protected
- * by ls_ld_lock.
- **/
- struct list_head ls_ld_linkage;
- spinlock_t ls_ld_lock;
-
- /**
- * Lock to serialize site purge.
- */
- struct mutex ls_purge_mutex;
-
- /**
- * lu_site stats
- */
- struct lprocfs_stats *ls_stats;
- /**
- * XXX: a hack! fld has to find md_site via site, remove when possible
- */
- struct seq_server_site *ld_seq_site;
- /**
- * Number of objects in lsb_lru_lists - used for shrinking
- */
- struct percpu_counter ls_lru_len_counter;
-};
-
-static inline struct lu_site_bkt_data *
-lu_site_bkt_from_fid(struct lu_site *site, struct lu_fid *fid)
-{
- struct cfs_hash_bd bd;
-
- cfs_hash_bd_get(site->ls_obj_hash, fid, &bd);
- return cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
-}
-
-static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
-{
- return s->ld_seq_site;
-}
-
-/** \name ctors
- * Constructors/destructors.
- * @{
- */
-
-int lu_site_init(struct lu_site *s, struct lu_device *d);
-void lu_site_fini(struct lu_site *s);
-int lu_site_init_finish(struct lu_site *s);
-void lu_stack_fini(const struct lu_env *env, struct lu_device *top);
-void lu_device_get(struct lu_device *d);
-void lu_device_put(struct lu_device *d);
-int lu_device_init(struct lu_device *d, struct lu_device_type *t);
-void lu_device_fini(struct lu_device *d);
-int lu_object_header_init(struct lu_object_header *h);
-void lu_object_header_fini(struct lu_object_header *h);
-int lu_object_init(struct lu_object *o,
- struct lu_object_header *h, struct lu_device *d);
-void lu_object_fini(struct lu_object *o);
-void lu_object_add_top(struct lu_object_header *h, struct lu_object *o);
-void lu_object_add(struct lu_object *before, struct lu_object *o);
-
-/**
- * Helpers to initialize and finalize device types.
- */
-
-int lu_device_type_init(struct lu_device_type *ldt);
-void lu_device_type_fini(struct lu_device_type *ldt);
-
-/** @} ctors */
-
-/** \name caching
- * Caching and reference counting.
- * @{
- */
-
-/**
- * Acquire additional reference to the given object. This function is used to
- * attain additional reference. To acquire initial reference use
- * lu_object_find().
- */
-static inline void lu_object_get(struct lu_object *o)
-{
- LASSERT(atomic_read(&o->lo_header->loh_ref) > 0);
- atomic_inc(&o->lo_header->loh_ref);
-}
-
-/**
- * Return true of object will not be cached after last reference to it is
- * released.
- */
-static inline int lu_object_is_dying(const struct lu_object_header *h)
-{
- return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
-}
-
-void lu_object_put(const struct lu_env *env, struct lu_object *o);
-void lu_object_unhash(const struct lu_env *env, struct lu_object *o);
-int lu_site_purge_objects(const struct lu_env *env, struct lu_site *s, int nr,
- bool canblock);
-
-static inline int lu_site_purge(const struct lu_env *env, struct lu_site *s,
- int nr)
-{
- return lu_site_purge_objects(env, s, nr, true);
-}
-
-void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
- lu_printer_t printer);
-struct lu_object *lu_object_find_at(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf);
-struct lu_object *lu_object_find_slice(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf);
-/** @} caching */
-
-/** \name helpers
- * Helpers.
- * @{
- */
-
-/**
- * First (topmost) sub-object of given compound object
- */
-static inline struct lu_object *lu_object_top(struct lu_object_header *h)
-{
- LASSERT(!list_empty(&h->loh_layers));
- return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
-}
-
-/**
- * Next sub-object in the layering
- */
-static inline struct lu_object *lu_object_next(const struct lu_object *o)
-{
- return container_of0(o->lo_linkage.next, struct lu_object, lo_linkage);
-}
-
-/**
- * Pointer to the fid of this object.
- */
-static inline const struct lu_fid *lu_object_fid(const struct lu_object *o)
-{
- return &o->lo_header->loh_fid;
-}
-
-/**
- * return device operations vector for this object
- */
-static inline const struct lu_device_operations *
-lu_object_ops(const struct lu_object *o)
-{
- return o->lo_dev->ld_ops;
-}
-
-/**
- * Given a compound object, find its slice, corresponding to the device type
- * \a dtype.
- */
-struct lu_object *lu_object_locate(struct lu_object_header *h,
- const struct lu_device_type *dtype);
-
-/**
- * Printer function emitting messages through libcfs_debug_msg().
- */
-int lu_cdebug_printer(const struct lu_env *env,
- void *cookie, const char *format, ...);
-
-/**
- * Print object description followed by a user-supplied message.
- */
-#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
-do { \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
- CDEBUG(mask, format "\n", ## __VA_ARGS__); \
- } \
-} while (0)
-
-/**
- * Print short object description followed by a user-supplied message.
- */
-#define LU_OBJECT_HEADER(mask, env, object, format, ...) \
-do { \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
- (object)->lo_header); \
- lu_cdebug_printer(env, &msgdata, "\n"); \
- CDEBUG(mask, format, ## __VA_ARGS__); \
- } \
-} while (0)
-
-void lu_object_print (const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct lu_object *o);
-void lu_object_header_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct lu_object_header *hdr);
-
-/**
- * Check object consistency.
- */
-int lu_object_invariant(const struct lu_object *o);
-
-/**
- * Check whether object exists, no matter on local or remote storage.
- * Note: LOHA_EXISTS will be set once some one created the object,
- * and it does not needs to be committed to storage.
- */
-#define lu_object_exists(o) ((o)->lo_header->loh_attr & LOHA_EXISTS)
-
-/**
- * Check whether object on the remote storage.
- */
-#define lu_object_remote(o) unlikely((o)->lo_header->loh_attr & LOHA_REMOTE)
-
-static inline int lu_object_assert_exists(const struct lu_object *o)
-{
- return lu_object_exists(o);
-}
-
-static inline int lu_object_assert_not_exists(const struct lu_object *o)
-{
- return !lu_object_exists(o);
-}
-
-/**
- * Attr of this object.
- */
-static inline __u32 lu_object_attr(const struct lu_object *o)
-{
- LASSERT(lu_object_exists(o) != 0);
- return o->lo_header->loh_attr;
-}
-
-static inline void lu_object_ref_add(struct lu_object *o,
- const char *scope,
- const void *source)
-{
- lu_ref_add(&o->lo_header->loh_reference, scope, source);
-}
-
-static inline void lu_object_ref_add_at(struct lu_object *o,
- struct lu_ref_link *link,
- const char *scope,
- const void *source)
-{
- lu_ref_add_at(&o->lo_header->loh_reference, link, scope, source);
-}
-
-static inline void lu_object_ref_del(struct lu_object *o,
- const char *scope, const void *source)
-{
- lu_ref_del(&o->lo_header->loh_reference, scope, source);
-}
-
-static inline void lu_object_ref_del_at(struct lu_object *o,
- struct lu_ref_link *link,
- const char *scope, const void *source)
-{
- lu_ref_del_at(&o->lo_header->loh_reference, link, scope, source);
-}
-
-/** input params, should be filled out by mdt */
-struct lu_rdpg {
- /** hash */
- __u64 rp_hash;
- /** count in bytes */
- unsigned int rp_count;
- /** number of pages */
- unsigned int rp_npages;
- /** requested attr */
- __u32 rp_attrs;
- /** pointers to pages */
- struct page **rp_pages;
-};
-
-enum lu_xattr_flags {
- LU_XATTR_REPLACE = (1 << 0),
- LU_XATTR_CREATE = (1 << 1)
-};
-
-/** @} helpers */
-
-/** \name lu_context
- * @{
- */
-
-/** For lu_context health-checks */
-enum lu_context_state {
- LCS_INITIALIZED = 1,
- LCS_ENTERED,
- LCS_LEFT,
- LCS_FINALIZED
-};
-
-/**
- * lu_context. Execution context for lu_object methods. Currently associated
- * with thread.
- *
- * All lu_object methods, except device and device type methods (called during
- * system initialization and shutdown) are executed "within" some
- * lu_context. This means, that pointer to some "current" lu_context is passed
- * as an argument to all methods.
- *
- * All service ptlrpc threads create lu_context as part of their
- * initialization. It is possible to create "stand-alone" context for other
- * execution environments (like system calls).
- *
- * lu_object methods mainly use lu_context through lu_context_key interface
- * that allows each layer to associate arbitrary pieces of data with each
- * context (see pthread_key_create(3) for similar interface).
- *
- * On a client, lu_context is bound to a thread, see cl_env_get().
- *
- * \see lu_context_key
- */
-struct lu_context {
- /**
- * lu_context is used on the client side too. Yet we don't want to
- * allocate values of server-side keys for the client contexts and
- * vice versa.
- *
- * To achieve this, set of tags in introduced. Contexts and keys are
- * marked with tags. Key value are created only for context whose set
- * of tags has non-empty intersection with one for key. Tags are taken
- * from enum lu_context_tag.
- */
- __u32 lc_tags;
- enum lu_context_state lc_state;
- /**
- * Pointer to the home service thread. NULL for other execution
- * contexts.
- */
- struct ptlrpc_thread *lc_thread;
- /**
- * Pointer to an array with key values. Internal implementation
- * detail.
- */
- void **lc_value;
- /**
- * Linkage into a list of all remembered contexts. Only
- * `non-transient' contexts, i.e., ones created for service threads
- * are placed here.
- */
- struct list_head lc_remember;
- /**
- * Version counter used to skip calls to lu_context_refill() when no
- * keys were registered.
- */
- unsigned int lc_version;
- /**
- * Debugging cookie.
- */
- unsigned int lc_cookie;
-};
-
-/**
- * lu_context_key interface. Similar to pthread_key.
- */
-
-enum lu_context_tag {
- /**
- * Thread on md server
- */
- LCT_MD_THREAD = 1 << 0,
- /**
- * Thread on dt server
- */
- LCT_DT_THREAD = 1 << 1,
- /**
- * Context for transaction handle
- */
- LCT_TX_HANDLE = 1 << 2,
- /**
- * Thread on client
- */
- LCT_CL_THREAD = 1 << 3,
- /**
- * A per-request session on a server, and a per-system-call session on
- * a client.
- */
- LCT_SESSION = 1 << 4,
- /**
- * A per-request data on OSP device
- */
- LCT_OSP_THREAD = 1 << 5,
- /**
- * MGS device thread
- */
- LCT_MG_THREAD = 1 << 6,
- /**
- * Context for local operations
- */
- LCT_LOCAL = 1 << 7,
- /**
- * session for server thread
- **/
- LCT_SERVER_SESSION = BIT(8),
- /**
- * Set when at least one of keys, having values in this context has
- * non-NULL lu_context_key::lct_exit() method. This is used to
- * optimize lu_context_exit() call.
- */
- LCT_HAS_EXIT = 1 << 28,
- /**
- * Don't add references for modules creating key values in that context.
- * This is only for contexts used internally by lu_object framework.
- */
- LCT_NOREF = 1 << 29,
- /**
- * Key is being prepared for retiring, don't create new values for it.
- */
- LCT_QUIESCENT = 1 << 30,
- /**
- * Context should be remembered.
- */
- LCT_REMEMBER = 1 << 31,
- /**
- * Contexts usable in cache shrinker thread.
- */
- LCT_SHRINKER = LCT_MD_THREAD | LCT_DT_THREAD | LCT_CL_THREAD |
- LCT_NOREF
-};
-
-/**
- * Key. Represents per-context value slot.
- *
- * Keys are usually registered when module owning the key is initialized, and
- * de-registered when module is unloaded. Once key is registered, all new
- * contexts with matching tags, will get key value. "Old" contexts, already
- * initialized at the time of key registration, can be forced to get key value
- * by calling lu_context_refill().
- *
- * Every key value is counted in lu_context_key::lct_used and acquires a
- * reference on an owning module. This means, that all key values have to be
- * destroyed before module can be unloaded. This is usually achieved by
- * stopping threads started by the module, that created contexts in their
- * entry functions. Situation is complicated by the threads shared by multiple
- * modules, like ptlrpcd daemon on a client. To work around this problem,
- * contexts, created in such threads, are `remembered' (see
- * LCT_REMEMBER)---i.e., added into a global list. When module is preparing
- * for unloading it does the following:
- *
- * - marks its keys as `quiescent' (lu_context_tag::LCT_QUIESCENT)
- * preventing new key values from being allocated in the new contexts,
- * and
- *
- * - scans a list of remembered contexts, destroying values of module
- * keys, thus releasing references to the module.
- *
- * This is done by lu_context_key_quiesce(). If module is re-activated
- * before key has been de-registered, lu_context_key_revive() call clears
- * `quiescent' marker.
- *
- * lu_context code doesn't provide any internal synchronization for these
- * activities---it's assumed that startup (including threads start-up) and
- * shutdown are serialized by some external means.
- *
- * \see lu_context
- */
-struct lu_context_key {
- /**
- * Set of tags for which values of this key are to be instantiated.
- */
- __u32 lct_tags;
- /**
- * Value constructor. This is called when new value is created for a
- * context. Returns pointer to new value of error pointer.
- */
- void *(*lct_init)(const struct lu_context *ctx,
- struct lu_context_key *key);
- /**
- * Value destructor. Called when context with previously allocated
- * value of this slot is destroyed. \a data is a value that was returned
- * by a matching call to lu_context_key::lct_init().
- */
- void (*lct_fini)(const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
- /**
- * Optional method called on lu_context_exit() for all allocated
- * keys. Can be used by debugging code checking that locks are
- * released, etc.
- */
- void (*lct_exit)(const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
- /**
- * Internal implementation detail: index within lu_context::lc_value[]
- * reserved for this key.
- */
- int lct_index;
- /**
- * Internal implementation detail: number of values created for this
- * key.
- */
- atomic_t lct_used;
- /**
- * Internal implementation detail: module for this key.
- */
- struct module *lct_owner;
- /**
- * References to this key. For debugging.
- */
- struct lu_ref lct_reference;
-};
-
-#define LU_KEY_INIT(mod, type) \
- static void *mod##_key_init(const struct lu_context *ctx, \
- struct lu_context_key *key) \
- { \
- type *value; \
- \
- BUILD_BUG_ON(sizeof(*value) > PAGE_SIZE); \
- \
- value = kzalloc(sizeof(*value), GFP_NOFS); \
- if (!value) \
- value = ERR_PTR(-ENOMEM); \
- \
- return value; \
- } \
- struct __##mod##__dummy_init {; } /* semicolon catcher */
-
-#define LU_KEY_FINI(mod, type) \
- static void mod##_key_fini(const struct lu_context *ctx, \
- struct lu_context_key *key, void *data) \
- { \
- type *info = data; \
- \
- kfree(info); \
- } \
- struct __##mod##__dummy_fini {; } /* semicolon catcher */
-
-#define LU_KEY_INIT_FINI(mod, type) \
- LU_KEY_INIT(mod, type); \
- LU_KEY_FINI(mod, type)
-
-#define LU_CONTEXT_KEY_DEFINE(mod, tags) \
- struct lu_context_key mod##_thread_key = { \
- .lct_tags = tags, \
- .lct_init = mod##_key_init, \
- .lct_fini = mod##_key_fini \
- }
-
-#define LU_CONTEXT_KEY_INIT(key) \
-do { \
- (key)->lct_owner = THIS_MODULE; \
-} while (0)
-
-int lu_context_key_register(struct lu_context_key *key);
-void lu_context_key_degister(struct lu_context_key *key);
-void *lu_context_key_get(const struct lu_context *ctx,
- const struct lu_context_key *key);
-void lu_context_key_quiesce(struct lu_context_key *key);
-void lu_context_key_revive(struct lu_context_key *key);
-
-/*
- * LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
- * owning module.
- */
-
-#define LU_KEY_INIT_GENERIC(mod) \
- static void mod##_key_init_generic(struct lu_context_key *k, ...) \
- { \
- struct lu_context_key *key = k; \
- va_list args; \
- \
- va_start(args, k); \
- do { \
- LU_CONTEXT_KEY_INIT(key); \
- key = va_arg(args, struct lu_context_key *); \
- } while (key); \
- va_end(args); \
- }
-
-#define LU_TYPE_INIT(mod, ...) \
- LU_KEY_INIT_GENERIC(mod) \
- static int mod##_type_init(struct lu_device_type *t) \
- { \
- mod##_key_init_generic(__VA_ARGS__, NULL); \
- return lu_context_key_register_many(__VA_ARGS__, NULL); \
- } \
- struct __##mod##_dummy_type_init {; }
-
-#define LU_TYPE_FINI(mod, ...) \
- static void mod##_type_fini(struct lu_device_type *t) \
- { \
- lu_context_key_degister_many(__VA_ARGS__, NULL); \
- } \
- struct __##mod##_dummy_type_fini {; }
-
-#define LU_TYPE_START(mod, ...) \
- static void mod##_type_start(struct lu_device_type *t) \
- { \
- lu_context_key_revive_many(__VA_ARGS__, NULL); \
- } \
- struct __##mod##_dummy_type_start {; }
-
-#define LU_TYPE_STOP(mod, ...) \
- static void mod##_type_stop(struct lu_device_type *t) \
- { \
- lu_context_key_quiesce_many(__VA_ARGS__, NULL); \
- } \
- struct __##mod##_dummy_type_stop {; }
-
-#define LU_TYPE_INIT_FINI(mod, ...) \
- LU_TYPE_INIT(mod, __VA_ARGS__); \
- LU_TYPE_FINI(mod, __VA_ARGS__); \
- LU_TYPE_START(mod, __VA_ARGS__); \
- LU_TYPE_STOP(mod, __VA_ARGS__)
-
-int lu_context_init(struct lu_context *ctx, __u32 tags);
-void lu_context_fini(struct lu_context *ctx);
-void lu_context_enter(struct lu_context *ctx);
-void lu_context_exit(struct lu_context *ctx);
-int lu_context_refill(struct lu_context *ctx);
-
-/*
- * Helper functions to operate on multiple keys. These are used by the default
- * device type operations, defined by LU_TYPE_INIT_FINI().
- */
-
-int lu_context_key_register_many(struct lu_context_key *k, ...);
-void lu_context_key_degister_many(struct lu_context_key *k, ...);
-void lu_context_key_revive_many(struct lu_context_key *k, ...);
-void lu_context_key_quiesce_many(struct lu_context_key *k, ...);
-
-/**
- * Environment.
- */
-struct lu_env {
- /**
- * "Local" context, used to store data instead of stack.
- */
- struct lu_context le_ctx;
- /**
- * "Session" context for per-request data.
- */
- struct lu_context *le_ses;
-};
-
-int lu_env_init(struct lu_env *env, __u32 tags);
-void lu_env_fini(struct lu_env *env);
-int lu_env_refill(struct lu_env *env);
-
-/** @} lu_context */
-
-/**
- * Output site statistical counters into a buffer. Suitable for
- * ll_rd_*()-style functions.
- */
-int lu_site_stats_print(const struct lu_site *s, struct seq_file *m);
-
-/**
- * Common name structure to be passed around for various name related methods.
- */
-struct lu_name {
- const char *ln_name;
- int ln_namelen;
-};
-
-/**
- * Validate names (path components)
- *
- * To be valid \a name must be non-empty, '\0' terminated of length \a
- * name_len, and not contain '/'. The maximum length of a name (before
- * say -ENAMETOOLONG will be returned) is really controlled by llite
- * and the server. We only check for something insane coming from bad
- * integer handling here.
- */
-static inline bool lu_name_is_valid_2(const char *name, size_t name_len)
-{
- return name && name_len > 0 && name_len < INT_MAX &&
- name[name_len] == '\0' && strlen(name) == name_len &&
- !memchr(name, '/', name_len);
-}
-
-/**
- * Common buffer structure to be passed around for various xattr_{s,g}et()
- * methods.
- */
-struct lu_buf {
- void *lb_buf;
- size_t lb_len;
-};
-
-/**
- * One-time initializers, called at obdclass module initialization, not
- * exported.
- */
-
-/**
- * Initialization of global lu_* data.
- */
-int lu_global_init(void);
-
-/**
- * Dual to lu_global_init().
- */
-void lu_global_fini(void);
-
-struct lu_kmem_descr {
- struct kmem_cache **ckd_cache;
- const char *ckd_name;
- const size_t ckd_size;
-};
-
-int lu_kmem_init(struct lu_kmem_descr *caches);
-void lu_kmem_fini(struct lu_kmem_descr *caches);
-
-extern __u32 lu_context_tags_default;
-extern __u32 lu_session_tags_default;
-
-/** @} lu */
-#endif /* __LUSTRE_LU_OBJECT_H */
diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h
deleted file mode 100644
index ad0c24d29ffa..000000000000
--- a/drivers/staging/lustre/lustre/include/lu_ref.h
+++ /dev/null
@@ -1,178 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- *
- * This file is part of Lustre, http://www.lustre.org.
- *
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __LUSTRE_LU_REF_H
-#define __LUSTRE_LU_REF_H
-
-#include <linux/list.h>
-
-/** \defgroup lu_ref lu_ref
- *
- * An interface to track references between objects. Mostly for debugging.
- *
- * Suppose there is a reference counted data-structure struct foo. To track
- * who acquired references to instance of struct foo, add lu_ref field to it:
- *
- * \code
- * struct foo {
- * atomic_t foo_refcount;
- * struct lu_ref foo_reference;
- * ...
- * };
- * \endcode
- *
- * foo::foo_reference has to be initialized by calling
- * lu_ref_init(). Typically there will be functions or macros to increment and
- * decrement foo::foo_refcount, let's say they are foo_get(struct foo *foo)
- * and foo_put(struct foo *foo), respectively.
- *
- * Whenever foo_get() is called to acquire a reference on a foo, lu_ref_add()
- * has to be called to insert into foo::foo_reference a record, describing
- * acquired reference. Dually, lu_ref_del() removes matching record. Typical
- * usages are:
- *
- * \code
- * struct bar *bar;
- *
- * // bar owns a reference to foo.
- * bar->bar_foo = foo_get(foo);
- * lu_ref_add(&foo->foo_reference, "bar", bar);
- *
- * ...
- *
- * // reference from bar to foo is released.
- * lu_ref_del(&foo->foo_reference, "bar", bar);
- * foo_put(bar->bar_foo);
- *
- *
- * // current thread acquired a temporary reference to foo.
- * foo_get(foo);
- * lu_ref_add(&foo->reference, __func__, current);
- *
- * ...
- *
- * // temporary reference is released.
- * lu_ref_del(&foo->reference, __func__, current);
- * foo_put(foo);
- * \endcode
- *
- * \e Et \e cetera. Often it makes sense to include lu_ref_add() and
- * lu_ref_del() calls into foo_get() and foo_put(). When an instance of struct
- * foo is destroyed, lu_ref_fini() has to be called that checks that no
- * pending references remain. lu_ref_print() can be used to dump a list of
- * pending references, while hunting down a leak.
- *
- * For objects to which a large number of references can be acquired,
- * lu_ref_del() can become cpu consuming, as it has to scan the list of
- * references. To work around this, remember result of lu_ref_add() (usually
- * in the same place where pointer to struct foo is stored), and use
- * lu_ref_del_at():
- *
- * \code
- * // There is a large number of bar's for a single foo.
- * bar->bar_foo = foo_get(foo);
- * bar->bar_foo_ref = lu_ref_add(&foo->foo_reference, "bar", bar);
- *
- * ...
- *
- * // reference from bar to foo is released.
- * lu_ref_del_at(&foo->foo_reference, bar->bar_foo_ref, "bar", bar);
- * foo_put(bar->bar_foo);
- * \endcode
- *
- * lu_ref interface degrades gracefully in case of memory shortages.
- *
- * @{
- */
-
-/*
- * dummy data structures/functions to pass compile for now.
- * We need to reimplement them with kref.
- */
-struct lu_ref {};
-struct lu_ref_link {};
-
-static inline void lu_ref_init(struct lu_ref *ref)
-{
-}
-
-static inline void lu_ref_fini(struct lu_ref *ref)
-{
-}
-
-static inline struct lu_ref_link *lu_ref_add(struct lu_ref *ref,
- const char *scope,
- const void *source)
-{
- return NULL;
-}
-
-static inline struct lu_ref_link *lu_ref_add_atomic(struct lu_ref *ref,
- const char *scope,
- const void *source)
-{
- return NULL;
-}
-
-static inline void lu_ref_add_at(struct lu_ref *ref,
- struct lu_ref_link *link,
- const char *scope,
- const void *source)
-{
-}
-
-static inline void lu_ref_del(struct lu_ref *ref, const char *scope,
- const void *source)
-{
-}
-
-static inline void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
- const char *scope, const void *source0,
- const void *source1)
-{
-}
-
-static inline void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
- const char *scope, const void *source)
-{
-}
-
-static inline int lu_ref_global_init(void)
-{
- return 0;
-}
-
-static inline void lu_ref_global_fini(void)
-{
-}
-
-static inline void lu_ref_print(const struct lu_ref *ref)
-{
-}
-
-static inline void lu_ref_print_all(void)
-{
-}
-
-/** @} lu */
-
-#endif /* __LUSTRE_LU_REF_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_acl.h b/drivers/staging/lustre/lustre/include/lustre_acl.h
deleted file mode 100644
index 35ff61ce4e9d..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_acl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_acl.h
- */
-
-#ifndef _LUSTRE_ACL_H
-#define _LUSTRE_ACL_H
-
-#include <linux/fs.h>
-#include <linux/dcache.h>
-#include <linux/posix_acl_xattr.h>
-
-#define LUSTRE_POSIX_ACL_MAX_ENTRIES 32
-#define LUSTRE_POSIX_ACL_MAX_SIZE \
- (sizeof(struct posix_acl_xattr_header) + \
- LUSTRE_POSIX_ACL_MAX_ENTRIES * sizeof(struct posix_acl_xattr_entry))
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h
deleted file mode 100644
index 9f488e605083..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_compat.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTRE_COMPAT_H
-#define _LUSTRE_COMPAT_H
-
-#include <linux/fs_struct.h>
-#include <linux/namei.h>
-#include <linux/cred.h>
-
-#include <lustre_patchless_compat.h>
-
-/*
- * set ATTR_BLOCKS to a high value to avoid any risk of collision with other
- * ATTR_* attributes (see bug 13828)
- */
-#define ATTR_BLOCKS (1 << 27)
-
-#define current_ngroups current_cred()->group_info->ngroups
-#define current_groups current_cred()->group_info->small_block
-
-/*
- * OBD need working random driver, thus all our
- * initialization routines must be called after device
- * driver initialization
- */
-#ifndef MODULE
-#undef module_init
-#define module_init(a) late_initcall(a)
-#endif
-
-#define LTIME_S(time) (time.tv_sec)
-
-#ifndef QUOTA_OK
-# define QUOTA_OK 0
-#endif
-#ifndef NO_QUOTA
-# define NO_QUOTA (-EDQUOT)
-#endif
-
-#if !defined(_ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_) && !defined(ext2_set_bit)
-# define ext2_set_bit __test_and_set_bit_le
-# define ext2_clear_bit __test_and_clear_bit_le
-# define ext2_test_bit test_bit_le
-# define ext2_find_first_zero_bit find_first_zero_bit_le
-# define ext2_find_next_zero_bit find_next_zero_bit_le
-#endif
-
-#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
-
-#endif /* _LUSTRE_COMPAT_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_debug.h b/drivers/staging/lustre/lustre/include/lustre_debug.h
deleted file mode 100644
index 721a81f923e3..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_debug.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTRE_DEBUG_H
-#define _LUSTRE_DEBUG_H
-
-/** \defgroup debug debug
- *
- * @{
- */
-
-#include <lustre_net.h>
-#include <obd.h>
-
-/* lib/debug.c */
-int dump_req(struct ptlrpc_request *req);
-int block_debug_setup(void *addr, int len, __u64 off, __u64 id);
-int block_debug_check(char *who, void *addr, int len, __u64 off, __u64 id);
-
-/** @} debug */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
deleted file mode 100644
index 100e993ab00b..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ /dev/null
@@ -1,153 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_disk.h
- *
- * Lustre disk format definitions.
- *
- * Author: Nathan Rutman <nathan@clusterfs.com>
- */
-
-#ifndef _LUSTRE_DISK_H
-#define _LUSTRE_DISK_H
-
-/** \defgroup disk disk
- *
- * @{
- */
-
-#include <asm/byteorder.h>
-#include <linux/types.h>
-#include <linux/backing-dev.h>
-#include <linux/libcfs/libcfs.h>
-
-/****************** persistent mount data *********************/
-
-#define LDD_F_SV_TYPE_MDT 0x0001
-#define LDD_F_SV_TYPE_OST 0x0002
-#define LDD_F_SV_TYPE_MGS 0x0004
-#define LDD_F_SV_TYPE_MASK (LDD_F_SV_TYPE_MDT | \
- LDD_F_SV_TYPE_OST | \
- LDD_F_SV_TYPE_MGS)
-#define LDD_F_SV_ALL 0x0008
-
-/****************** mount command *********************/
-
-/* The lmd is only used internally by Lustre; mount simply passes
- * everything as string options
- */
-
-#define LMD_MAGIC 0xbdacbd03
-#define LMD_PARAMS_MAXLEN 4096
-
-/* gleaned from the mount command - no persistent info here */
-struct lustre_mount_data {
- __u32 lmd_magic;
- __u32 lmd_flags; /* lustre mount flags */
- int lmd_mgs_failnodes; /* mgs failover node count */
- int lmd_exclude_count;
- int lmd_recovery_time_soft;
- int lmd_recovery_time_hard;
- char *lmd_dev; /* device name */
- char *lmd_profile; /* client only */
- char *lmd_mgssec; /* sptlrpc flavor to mgs */
- char *lmd_opts; /* lustre mount options (as opposed to
- * _device_ mount options)
- */
- char *lmd_params; /* lustre params */
- __u32 *lmd_exclude; /* array of OSTs to ignore */
- char *lmd_mgs; /* MGS nid */
- char *lmd_osd_type; /* OSD type */
-};
-
-#define LMD_FLG_SERVER 0x0001 /* Mounting a server */
-#define LMD_FLG_CLIENT 0x0002 /* Mounting a client */
-#define LMD_FLG_ABORT_RECOV 0x0008 /* Abort recovery */
-#define LMD_FLG_NOSVC 0x0010 /* Only start MGS/MGC for servers,
- * no other services
- */
-#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers,
- * reusing existing MGS services
- */
-#define LMD_FLG_WRITECONF 0x0040 /* Rewrite config log */
-#define LMD_FLG_NOIR 0x0080 /* NO imperative recovery */
-#define LMD_FLG_NOSCRUB 0x0100 /* Do not trigger scrub automatically */
-#define LMD_FLG_MGS 0x0200 /* Also start MGS along with server */
-#define LMD_FLG_IAM 0x0400 /* IAM dir */
-#define LMD_FLG_NO_PRIMNODE 0x0800 /* all nodes are service nodes */
-#define LMD_FLG_VIRGIN 0x1000 /* the service registers first time */
-#define LMD_FLG_UPDATE 0x2000 /* update parameters */
-#define LMD_FLG_HSM 0x4000 /* Start coordinator */
-
-#define lmd_is_client(x) ((x)->lmd_flags & LMD_FLG_CLIENT)
-
-/****************** superblock additional info *********************/
-
-struct ll_sb_info;
-
-struct lustre_sb_info {
- int lsi_flags;
- struct obd_device *lsi_mgc; /* mgc obd */
- struct lustre_mount_data *lsi_lmd; /* mount command info */
- struct ll_sb_info *lsi_llsbi; /* add'l client sbi info */
- struct dt_device *lsi_dt_dev; /* dt device to access disk fs*/
- atomic_t lsi_mounts; /* references to the srv_mnt */
- char lsi_svname[MTI_NAME_MAXLEN];
- char lsi_osd_obdname[64];
- char lsi_osd_uuid[64];
- struct obd_export *lsi_osd_exp;
- char lsi_osd_type[16];
- char lsi_fstype[16];
-};
-
-#define LSI_UMOUNT_FAILOVER 0x00200000
-
-#define s2lsi(sb) ((struct lustre_sb_info *)((sb)->s_fs_info))
-#define s2lsi_nocast(sb) ((sb)->s_fs_info)
-
-#define get_profile_name(sb) (s2lsi(sb)->lsi_lmd->lmd_profile)
-
-/****************** prototypes *********************/
-
-/* obd_mount.c */
-
-int lustre_start_mgc(struct super_block *sb);
-void lustre_register_super_ops(struct module *mod,
- int (*cfs)(struct super_block *sb),
- void (*ksc)(struct super_block *sb));
-int lustre_common_put_super(struct super_block *sb);
-
-int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type);
-
-/** @} disk */
-
-#endif /* _LUSTRE_DISK_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
deleted file mode 100644
index 239aa2b1268f..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ /dev/null
@@ -1,1354 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/** \defgroup LDLM Lustre Distributed Lock Manager
- *
- * Lustre DLM is based on VAX DLM.
- * Its two main roles are:
- * - To provide locking assuring consistency of data on all Lustre nodes.
- * - To allow clients to cache state protected by a lock by holding the
- * lock until a conflicting lock is requested or it is expired by the LRU.
- *
- * @{
- */
-
-#ifndef _LUSTRE_DLM_H__
-#define _LUSTRE_DLM_H__
-
-#include <lustre_lib.h>
-#include <lustre_net.h>
-#include <lustre_import.h>
-#include <lustre_handles.h>
-#include <interval_tree.h> /* for interval_node{}, ldlm_extent */
-#include <lu_ref.h>
-
-#include "lustre_dlm_flags.h"
-
-struct obd_ops;
-struct obd_device;
-
-#define OBD_LDLM_DEVICENAME "ldlm"
-
-#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
-#define LDLM_DEFAULT_MAX_ALIVE (65 * 60 * HZ) /* 65 min */
-#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
-
-/**
- * LDLM non-error return states
- */
-enum ldlm_error {
- ELDLM_OK = 0,
- ELDLM_LOCK_MATCHED = 1,
-
- ELDLM_LOCK_CHANGED = 300,
- ELDLM_LOCK_ABORTED = 301,
- ELDLM_LOCK_REPLACED = 302,
- ELDLM_NO_LOCK_DATA = 303,
- ELDLM_LOCK_WOULDBLOCK = 304,
-
- ELDLM_NAMESPACE_EXISTS = 400,
- ELDLM_BAD_NAMESPACE = 401
-};
-
-/**
- * LDLM namespace type.
- * The "client" type is actually an indication that this is a narrow local view
- * into complete namespace on the server. Such namespaces cannot make any
- * decisions about lack of conflicts or do any autonomous lock granting without
- * first speaking to a server.
- */
-enum ldlm_side {
- LDLM_NAMESPACE_SERVER = 1 << 0,
- LDLM_NAMESPACE_CLIENT = 1 << 1
-};
-
-/**
- * The blocking callback is overloaded to perform two functions. These flags
- * indicate which operation should be performed.
- */
-#define LDLM_CB_BLOCKING 1
-#define LDLM_CB_CANCELING 2
-
-/**
- * \name Lock Compatibility Matrix.
- *
- * A lock has both a type (extent, flock, inode bits, or plain) and a mode.
- * Lock types are described in their respective implementation files:
- * ldlm_{extent,flock,inodebits,plain}.c.
- *
- * There are six lock modes along with a compatibility matrix to indicate if
- * two locks are compatible.
- *
- * - EX: Exclusive mode. Before a new file is created, MDS requests EX lock
- * on the parent.
- * - PW: Protective Write (normal write) mode. When a client requests a write
- * lock from an OST, a lock with PW mode will be issued.
- * - PR: Protective Read (normal read) mode. When a client requests a read from
- * an OST, a lock with PR mode will be issued. Also, if the client opens a
- * file for execution, it is granted a lock with PR mode.
- * - CW: Concurrent Write mode. The type of lock that the MDS grants if a client
- * requests a write lock during a file open operation.
- * - CR Concurrent Read mode. When a client performs a path lookup, MDS grants
- * an inodebit lock with the CR mode on the intermediate path component.
- * - NL Null mode.
- *
- * <PRE>
- * NL CR CW PR PW EX
- * NL 1 1 1 1 1 1
- * CR 1 1 1 1 1 0
- * CW 1 1 1 0 0 0
- * PR 1 1 0 1 0 0
- * PW 1 1 0 0 0 0
- * EX 1 0 0 0 0 0
- * </PRE>
- */
-/** @{ */
-#define LCK_COMPAT_EX LCK_NL
-#define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR)
-#define LCK_COMPAT_PR (LCK_COMPAT_PW | LCK_PR)
-#define LCK_COMPAT_CW (LCK_COMPAT_PW | LCK_CW)
-#define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)
-#define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
-#define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)
-#define LCK_COMPAT_COS (LCK_COS)
-/** @} Lock Compatibility Matrix */
-
-extern enum ldlm_mode lck_compat_array[];
-
-static inline void lockmode_verify(enum ldlm_mode mode)
-{
- LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
-}
-
-static inline int lockmode_compat(enum ldlm_mode exist_mode,
- enum ldlm_mode new_mode)
-{
- return (lck_compat_array[exist_mode] & new_mode);
-}
-
-/*
- *
- * cluster name spaces
- *
- */
-
-#define DLM_OST_NAMESPACE 1
-#define DLM_MDS_NAMESPACE 2
-
-/* XXX
- - do we just separate this by security domains and use a prefix for
- multiple namespaces in the same domain?
- -
-*/
-
-/**
- * Locking rules for LDLM:
- *
- * lr_lock
- *
- * lr_lock
- * waiting_locks_spinlock
- *
- * lr_lock
- * led_lock
- *
- * lr_lock
- * ns_lock
- *
- * lr_lvb_mutex
- * lr_lock
- *
- */
-
-struct ldlm_pool;
-struct ldlm_lock;
-struct ldlm_resource;
-struct ldlm_namespace;
-
-/**
- * Operations on LDLM pools.
- * LDLM pool is a pool of locks in the namespace without any implicitly
- * specified limits.
- * Locks in the pool are organized in LRU.
- * Local memory pressure or server instructions (e.g. mempressure on server)
- * can trigger freeing of locks from the pool
- */
-struct ldlm_pool_ops {
- /** Recalculate pool \a pl usage */
- int (*po_recalc)(struct ldlm_pool *pl);
- /** Cancel at least \a nr locks from pool \a pl */
- int (*po_shrink)(struct ldlm_pool *pl, int nr,
- gfp_t gfp_mask);
-};
-
-/** One second for pools thread check interval. Each pool has own period. */
-#define LDLM_POOLS_THREAD_PERIOD (1)
-
-/** ~6% margin for modest pools. See ldlm_pool.c for details. */
-#define LDLM_POOLS_MODEST_MARGIN_SHIFT (4)
-
-/** Default recalc period for server side pools in sec. */
-#define LDLM_POOL_SRV_DEF_RECALC_PERIOD (1)
-
-/** Default recalc period for client side pools in sec. */
-#define LDLM_POOL_CLI_DEF_RECALC_PERIOD (10)
-
-/**
- * LDLM pool structure to track granted locks.
- * For purposes of determining when to release locks on e.g. memory pressure.
- * This feature is commonly referred to as lru_resize.
- */
-struct ldlm_pool {
- /** Pool debugfs directory. */
- struct dentry *pl_debugfs_entry;
- /** Pool name, must be long enough to hold compound proc entry name. */
- char pl_name[100];
- /** Lock for protecting SLV/CLV updates. */
- spinlock_t pl_lock;
- /** Number of allowed locks in in pool, both, client and server side. */
- atomic_t pl_limit;
- /** Number of granted locks in */
- atomic_t pl_granted;
- /** Grant rate per T. */
- atomic_t pl_grant_rate;
- /** Cancel rate per T. */
- atomic_t pl_cancel_rate;
- /** Server lock volume (SLV). Protected by pl_lock. */
- __u64 pl_server_lock_volume;
- /** Current biggest client lock volume. Protected by pl_lock. */
- __u64 pl_client_lock_volume;
- /** Lock volume factor. SLV on client is calculated as following:
- * server_slv * lock_volume_factor.
- */
- atomic_t pl_lock_volume_factor;
- /** Time when last SLV from server was obtained. */
- time64_t pl_recalc_time;
- /** Recalculation period for pool. */
- time64_t pl_recalc_period;
- /** Recalculation and shrink operations. */
- const struct ldlm_pool_ops *pl_ops;
- /** Number of planned locks for next period. */
- int pl_grant_plan;
- /** Pool statistics. */
- struct lprocfs_stats *pl_stats;
-
- /* sysfs object */
- struct kobject pl_kobj;
- struct completion pl_kobj_unregister;
-};
-
-typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock);
-
-/**
- * LVB operations.
- * LVB is Lock Value Block. This is a special opaque (to LDLM) value that could
- * be associated with an LDLM lock and transferred from client to server and
- * back.
- *
- * Currently LVBs are used by:
- * - OSC-OST code to maintain current object size/times
- * - layout lock code to return the layout when the layout lock is granted
- */
-struct ldlm_valblock_ops {
- int (*lvbo_init)(struct ldlm_resource *res);
- int (*lvbo_update)(struct ldlm_resource *res,
- struct ptlrpc_request *r,
- int increase);
- int (*lvbo_free)(struct ldlm_resource *res);
- /* Return size of lvb data appropriate RPC size can be reserved */
- int (*lvbo_size)(struct ldlm_lock *lock);
- /* Called to fill in lvb data to RPC buffer @buf */
- int (*lvbo_fill)(struct ldlm_lock *lock, void *buf, int buflen);
-};
-
-/**
- * LDLM pools related, type of lock pool in the namespace.
- * Greedy means release cached locks aggressively
- */
-enum ldlm_appetite {
- LDLM_NAMESPACE_GREEDY = 1 << 0,
- LDLM_NAMESPACE_MODEST = 1 << 1
-};
-
-struct ldlm_ns_bucket {
- /** back pointer to namespace */
- struct ldlm_namespace *nsb_namespace;
- /**
- * Estimated lock callback time. Used by adaptive timeout code to
- * avoid spurious client evictions due to unresponsiveness when in
- * fact the network or overall system load is at fault
- */
- struct adaptive_timeout nsb_at_estimate;
-};
-
-enum {
- /** LDLM namespace lock stats */
- LDLM_NSS_LOCKS = 0,
- LDLM_NSS_LAST
-};
-
-enum ldlm_ns_type {
- /** invalid type */
- LDLM_NS_TYPE_UNKNOWN = 0,
- /** mdc namespace */
- LDLM_NS_TYPE_MDC,
- /** mds namespace */
- LDLM_NS_TYPE_MDT,
- /** osc namespace */
- LDLM_NS_TYPE_OSC,
- /** ost namespace */
- LDLM_NS_TYPE_OST,
- /** mgc namespace */
- LDLM_NS_TYPE_MGC,
- /** mgs namespace */
- LDLM_NS_TYPE_MGT,
-};
-
-/**
- * LDLM Namespace.
- *
- * Namespace serves to contain locks related to a particular service.
- * There are two kinds of namespaces:
- * - Server namespace has knowledge of all locks and is therefore authoritative
- * to make decisions like what locks could be granted and what conflicts
- * exist during new lock enqueue.
- * - Client namespace only has limited knowledge about locks in the namespace,
- * only seeing locks held by the client.
- *
- * Every Lustre service has one server namespace present on the server serving
- * that service. Every client connected to the service has a client namespace
- * for it.
- * Every lock obtained by client in that namespace is actually represented by
- * two in-memory locks. One on the server and one on the client. The locks are
- * linked by a special cookie by which one node can tell to the other which lock
- * it actually means during communications. Such locks are called remote locks.
- * The locks held by server only without any reference to a client are called
- * local locks.
- */
-struct ldlm_namespace {
- /** Backward link to OBD, required for LDLM pool to store new SLV. */
- struct obd_device *ns_obd;
-
- /** Flag indicating if namespace is on client instead of server */
- enum ldlm_side ns_client;
-
- /** Resource hash table for namespace. */
- struct cfs_hash *ns_rs_hash;
-
- /** serialize */
- spinlock_t ns_lock;
-
- /** big refcount (by bucket) */
- atomic_t ns_bref;
-
- /**
- * Namespace connect flags supported by server (may be changed via
- * sysfs, LRU resize may be disabled/enabled).
- */
- __u64 ns_connect_flags;
-
- /** Client side original connect flags supported by server. */
- __u64 ns_orig_connect_flags;
-
- /* namespace debugfs dir entry */
- struct dentry *ns_debugfs_entry;
-
- /**
- * Position in global namespace list linking all namespaces on
- * the node.
- */
- struct list_head ns_list_chain;
-
- /**
- * List of unused locks for this namespace. This list is also called
- * LRU lock list.
- * Unused locks are locks with zero reader/writer reference counts.
- * This list is only used on clients for lock caching purposes.
- * When we want to release some locks voluntarily or if server wants
- * us to release some locks due to e.g. memory pressure, we take locks
- * to release from the head of this list.
- * Locks are linked via l_lru field in \see struct ldlm_lock.
- */
- struct list_head ns_unused_list;
- /** Number of locks in the LRU list above */
- int ns_nr_unused;
-
- /**
- * Maximum number of locks permitted in the LRU. If 0, means locks
- * are managed by pools and there is no preset limit, rather it is all
- * controlled by available memory on this client and on server.
- */
- unsigned int ns_max_unused;
- /** Maximum allowed age (last used time) for locks in the LRU */
- unsigned int ns_max_age;
-
- /**
- * Used to rate-limit ldlm_namespace_dump calls.
- * \see ldlm_namespace_dump. Increased by 10 seconds every time
- * it is called.
- */
- unsigned long ns_next_dump;
-
- /**
- * LVB operations for this namespace.
- * \see struct ldlm_valblock_ops
- */
- struct ldlm_valblock_ops *ns_lvbo;
-
- /**
- * Used by filter code to store pointer to OBD of the service.
- * Should be dropped in favor of \a ns_obd
- */
- void *ns_lvbp;
-
- /**
- * Wait queue used by __ldlm_namespace_free. Gets woken up every time
- * a resource is removed.
- */
- wait_queue_head_t ns_waitq;
- /** LDLM pool structure for this namespace */
- struct ldlm_pool ns_pool;
- /** Definition of how eagerly unused locks will be released from LRU */
- enum ldlm_appetite ns_appetite;
-
- /** Limit of parallel AST RPC count. */
- unsigned ns_max_parallel_ast;
-
- /**
- * Callback to check if a lock is good to be canceled by ELC or
- * during recovery.
- */
- ldlm_cancel_cbt ns_cancel;
-
- /** LDLM lock stats */
- struct lprocfs_stats *ns_stats;
-
- /**
- * Flag to indicate namespace is being freed. Used to determine if
- * recalculation of LDLM pool statistics should be skipped.
- */
- unsigned ns_stopping:1;
-
- struct kobject ns_kobj; /* sysfs object */
- struct completion ns_kobj_unregister;
-};
-
-/**
- * Returns 1 if namespace \a ns supports early lock cancel (ELC).
- */
-static inline int ns_connect_cancelset(struct ldlm_namespace *ns)
-{
- return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET);
-}
-
-/**
- * Returns 1 if this namespace supports lru_resize.
- */
-static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
-{
- return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
-}
-
-static inline void ns_register_cancel(struct ldlm_namespace *ns,
- ldlm_cancel_cbt arg)
-{
- ns->ns_cancel = arg;
-}
-
-struct ldlm_lock;
-
-/** Type for blocking callback function of a lock. */
-typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
- struct ldlm_lock_desc *new, void *data,
- int flag);
-/** Type for completion callback function of a lock. */
-typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, __u64 flags,
- void *data);
-/** Type for glimpse callback function of a lock. */
-typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
-
-/** Work list for sending GL ASTs to multiple locks. */
-struct ldlm_glimpse_work {
- struct ldlm_lock *gl_lock; /* lock to glimpse */
- struct list_head gl_list; /* linkage to other gl work structs */
- __u32 gl_flags;/* see LDLM_GL_WORK_* below */
- union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in
- * glimpse callback request
- */
-};
-
-/** The ldlm_glimpse_work is allocated on the stack and should not be freed. */
-#define LDLM_GL_WORK_NOFREE 0x1
-
-/** Interval node data for each LDLM_EXTENT lock. */
-struct ldlm_interval {
- struct interval_node li_node; /* node for tree management */
- struct list_head li_group; /* the locks which have the same
- * policy - group of the policy
- */
-};
-
-#define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
-
-/**
- * Interval tree for extent locks.
- * The interval tree must be accessed under the resource lock.
- * Interval trees are used for granted extent locks to speed up conflicts
- * lookup. See ldlm/interval_tree.c for more details.
- */
-struct ldlm_interval_tree {
- /** Tree size. */
- int lit_size;
- enum ldlm_mode lit_mode; /* lock mode */
- struct interval_node *lit_root; /* actual ldlm_interval */
-};
-
-/** Whether to track references to exports by LDLM locks. */
-#define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
-
-/** Cancel flags. */
-enum ldlm_cancel_flags {
- LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */
- LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */
- LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
- * in the same RPC
- */
-};
-
-struct ldlm_flock {
- __u64 start;
- __u64 end;
- __u64 owner;
- __u64 blocking_owner;
- struct obd_export *blocking_export;
- __u32 pid;
-};
-
-union ldlm_policy_data {
- struct ldlm_extent l_extent;
- struct ldlm_flock l_flock;
- struct ldlm_inodebits l_inodebits;
-};
-
-void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
- const union ldlm_wire_policy_data *wpolicy,
- union ldlm_policy_data *lpolicy);
-
-enum lvb_type {
- LVB_T_NONE = 0,
- LVB_T_OST = 1,
- LVB_T_LQUOTA = 2,
- LVB_T_LAYOUT = 3,
-};
-
-/**
- * LDLM_GID_ANY is used to match any group id in ldlm_lock_match().
- */
-#define LDLM_GID_ANY ((__u64)-1)
-
-/**
- * LDLM lock structure
- *
- * Represents a single LDLM lock and its state in memory. Each lock is
- * associated with a single ldlm_resource, the object which is being
- * locked. There may be multiple ldlm_locks on a single resource,
- * depending on the lock type and whether the locks are conflicting or
- * not.
- */
-struct ldlm_lock {
- /**
- * Local lock handle.
- * When remote side wants to tell us about a lock, they address
- * it by this opaque handle. The handle does not hold a
- * reference on the ldlm_lock, so it can be safely passed to
- * other threads or nodes. When the lock needs to be accessed
- * from the handle, it is looked up again in the lock table, and
- * may no longer exist.
- *
- * Must be first in the structure.
- */
- struct portals_handle l_handle;
- /**
- * Lock reference count.
- * This is how many users have pointers to actual structure, so that
- * we do not accidentally free lock structure that is in use.
- */
- atomic_t l_refc;
- /**
- * Internal spinlock protects l_resource. We should hold this lock
- * first before taking res_lock.
- */
- spinlock_t l_lock;
- /**
- * Pointer to actual resource this lock is in.
- * ldlm_lock_change_resource() can change this.
- */
- struct ldlm_resource *l_resource;
- /**
- * List item for client side LRU list.
- * Protected by ns_lock in struct ldlm_namespace.
- */
- struct list_head l_lru;
- /**
- * Linkage to resource's lock queues according to current lock state.
- * (could be granted, waiting or converting)
- * Protected by lr_lock in struct ldlm_resource.
- */
- struct list_head l_res_link;
- /**
- * Tree node for ldlm_extent.
- */
- struct ldlm_interval *l_tree_node;
- /**
- * Per export hash of locks.
- * Protected by per-bucket exp->exp_lock_hash locks.
- */
- struct hlist_node l_exp_hash;
- /**
- * Per export hash of flock locks.
- * Protected by per-bucket exp->exp_flock_hash locks.
- */
- struct hlist_node l_exp_flock_hash;
- /**
- * Requested mode.
- * Protected by lr_lock.
- */
- enum ldlm_mode l_req_mode;
- /**
- * Granted mode, also protected by lr_lock.
- */
- enum ldlm_mode l_granted_mode;
- /** Lock completion handler pointer. Called when lock is granted. */
- ldlm_completion_callback l_completion_ast;
- /**
- * Lock blocking AST handler pointer.
- * It plays two roles:
- * - as a notification of an attempt to queue a conflicting lock (once)
- * - as a notification when the lock is being cancelled.
- *
- * As such it's typically called twice: once for the initial conflict
- * and then once more when the last user went away and the lock is
- * cancelled (could happen recursively).
- */
- ldlm_blocking_callback l_blocking_ast;
- /**
- * Lock glimpse handler.
- * Glimpse handler is used to obtain LVB updates from a client by
- * server
- */
- ldlm_glimpse_callback l_glimpse_ast;
-
- /**
- * Lock export.
- * This is a pointer to actual client export for locks that were granted
- * to clients. Used server-side.
- */
- struct obd_export *l_export;
- /**
- * Lock connection export.
- * Pointer to server export on a client.
- */
- struct obd_export *l_conn_export;
-
- /**
- * Remote lock handle.
- * If the lock is remote, this is the handle of the other side lock
- * (l_handle)
- */
- struct lustre_handle l_remote_handle;
-
- /**
- * Representation of private data specific for a lock type.
- * Examples are: extent range for extent lock or bitmask for ibits locks
- */
- union ldlm_policy_data l_policy_data;
-
- /**
- * Lock state flags. Protected by lr_lock.
- * \see lustre_dlm_flags.h where the bits are defined.
- */
- __u64 l_flags;
-
- /**
- * Lock r/w usage counters.
- * Protected by lr_lock.
- */
- __u32 l_readers;
- __u32 l_writers;
- /**
- * If the lock is granted, a process sleeps on this waitq to learn when
- * it's no longer in use. If the lock is not granted, a process sleeps
- * on this waitq to learn when it becomes granted.
- */
- wait_queue_head_t l_waitq;
-
- /**
- * Seconds. It will be updated if there is any activity related to
- * the lock, e.g. enqueue the lock or send blocking AST.
- */
- time64_t l_last_activity;
-
- /**
- * Time last used by e.g. being matched by lock match.
- * Jiffies. Should be converted to time if needed.
- */
- unsigned long l_last_used;
-
- /** Originally requested extent for the extent lock. */
- struct ldlm_extent l_req_extent;
-
- /*
- * Client-side-only members.
- */
-
- enum lvb_type l_lvb_type;
-
- /**
- * Temporary storage for a LVB received during an enqueue operation.
- */
- __u32 l_lvb_len;
- void *l_lvb_data;
-
- /** Private storage for lock user. Opaque to LDLM. */
- void *l_ast_data;
-
- /*
- * Server-side-only members.
- */
-
- /**
- * Connection cookie for the client originating the operation.
- * Used by Commit on Share (COS) code. Currently only used for
- * inodebits locks on MDS.
- */
- __u64 l_client_cookie;
-
- /**
- * List item for locks waiting for cancellation from clients.
- * The lists this could be linked into are:
- * waiting_locks_list (protected by waiting_locks_spinlock),
- * then if the lock timed out, it is moved to
- * expired_lock_thread.elt_expired_locks for further processing.
- * Protected by elt_lock.
- */
- struct list_head l_pending_chain;
-
- /**
- * Set when lock is sent a blocking AST. Time in seconds when timeout
- * is reached and client holding this lock could be evicted.
- * This timeout could be further extended by e.g. certain IO activity
- * under this lock.
- * \see ost_rw_prolong_locks
- */
- unsigned long l_callback_timeout;
-
- /** Local PID of process which created this lock. */
- __u32 l_pid;
-
- /**
- * Number of times blocking AST was sent for this lock.
- * This is for debugging. Valid values are 0 and 1, if there is an
- * attempt to send blocking AST more than once, an assertion would be
- * hit. \see ldlm_work_bl_ast_lock
- */
- int l_bl_ast_run;
- /** List item ldlm_add_ast_work_item() for case of blocking ASTs. */
- struct list_head l_bl_ast;
- /** List item ldlm_add_ast_work_item() for case of completion ASTs. */
- struct list_head l_cp_ast;
- /** For ldlm_add_ast_work_item() for "revoke" AST used in COS. */
- struct list_head l_rk_ast;
-
- /**
- * Pointer to a conflicting lock that caused blocking AST to be sent
- * for this lock
- */
- struct ldlm_lock *l_blocking_lock;
-
- /**
- * Protected by lr_lock, linkages to "skip lists".
- * For more explanations of skip lists see ldlm/ldlm_inodebits.c
- */
- struct list_head l_sl_mode;
- struct list_head l_sl_policy;
-
- /** Reference tracking structure to debug leaked locks. */
- struct lu_ref l_reference;
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
- /* Debugging stuff for bug 20498, for tracking export references. */
- /** number of export references taken */
- int l_exp_refs_nr;
- /** link all locks referencing one export */
- struct list_head l_exp_refs_link;
- /** referenced export object */
- struct obd_export *l_exp_refs_target;
-#endif
-};
-
-/**
- * LDLM resource description.
- * Basically, resource is a representation for a single object.
- * Object has a name which is currently 4 64-bit integers. LDLM user is
- * responsible for creation of a mapping between objects it wants to be
- * protected and resource names.
- *
- * A resource can only hold locks of a single lock type, though there may be
- * multiple ldlm_locks on a single resource, depending on the lock type and
- * whether the locks are conflicting or not.
- */
-struct ldlm_resource {
- struct ldlm_ns_bucket *lr_ns_bucket;
-
- /**
- * List item for list in namespace hash.
- * protected by ns_lock
- */
- struct hlist_node lr_hash;
-
- /** Spinlock to protect locks under this resource. */
- spinlock_t lr_lock;
-
- /**
- * protected by lr_lock
- * @{
- */
- /** List of locks in granted state */
- struct list_head lr_granted;
- /**
- * List of locks that could not be granted due to conflicts and
- * that are waiting for conflicts to go away
- */
- struct list_head lr_waiting;
- /** @} */
-
- /** Type of locks this resource can hold. Only one type per resource. */
- enum ldlm_type lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */
-
- /** Resource name */
- struct ldlm_res_id lr_name;
- /** Reference count for this resource */
- atomic_t lr_refcount;
-
- /**
- * Interval trees (only for extent locks) for all modes of this resource
- */
- struct ldlm_interval_tree lr_itree[LCK_MODE_NUM];
-
- /**
- * Server-side-only lock value block elements.
- * To serialize lvbo_init.
- */
- struct mutex lr_lvb_mutex;
- int lr_lvb_len;
-
- /** When the resource was considered as contended. */
- unsigned long lr_contention_time;
- /** List of references to this resource. For debugging. */
- struct lu_ref lr_reference;
-
- struct inode *lr_lvb_inode;
-};
-
-static inline bool ldlm_has_layout(struct ldlm_lock *lock)
-{
- return lock->l_resource->lr_type == LDLM_IBITS &&
- lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_LAYOUT;
-}
-
-static inline char *
-ldlm_ns_name(struct ldlm_namespace *ns)
-{
- return ns->ns_rs_hash->hs_name;
-}
-
-static inline struct ldlm_namespace *
-ldlm_res_to_ns(struct ldlm_resource *res)
-{
- return res->lr_ns_bucket->nsb_namespace;
-}
-
-static inline struct ldlm_namespace *
-ldlm_lock_to_ns(struct ldlm_lock *lock)
-{
- return ldlm_res_to_ns(lock->l_resource);
-}
-
-static inline char *
-ldlm_lock_to_ns_name(struct ldlm_lock *lock)
-{
- return ldlm_ns_name(ldlm_lock_to_ns(lock));
-}
-
-static inline struct adaptive_timeout *
-ldlm_lock_to_ns_at(struct ldlm_lock *lock)
-{
- return &lock->l_resource->lr_ns_bucket->nsb_at_estimate;
-}
-
-static inline int ldlm_lvbo_init(struct ldlm_resource *res)
-{
- struct ldlm_namespace *ns = ldlm_res_to_ns(res);
-
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init)
- return ns->ns_lvbo->lvbo_init(res);
-
- return 0;
-}
-
-static inline int ldlm_lvbo_size(struct ldlm_lock *lock)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_size)
- return ns->ns_lvbo->lvbo_size(lock);
-
- return 0;
-}
-
-static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- if (ns->ns_lvbo)
- return ns->ns_lvbo->lvbo_fill(lock, buf, len);
-
- return 0;
-}
-
-struct ldlm_ast_work {
- struct ldlm_lock *w_lock;
- int w_blocking;
- struct ldlm_lock_desc w_desc;
- struct list_head w_list;
- int w_flags;
- void *w_data;
- int w_datalen;
-};
-
-/**
- * Common ldlm_enqueue parameters
- */
-struct ldlm_enqueue_info {
- enum ldlm_type ei_type; /** Type of the lock being enqueued. */
- enum ldlm_mode ei_mode; /** Mode of the lock being enqueued. */
- void *ei_cb_bl; /** blocking lock callback */
- void *ei_cb_cp; /** lock completion callback */
- void *ei_cb_gl; /** lock glimpse callback */
- void *ei_cbdata; /** Data to be passed into callbacks. */
- unsigned int ei_enq_slave:1; /* whether enqueue slave stripes */
-};
-
-extern struct obd_ops ldlm_obd_ops;
-
-extern char *ldlm_lockname[];
-const char *ldlm_it2str(enum ldlm_intent_flags it);
-
-/**
- * Just a fancy CDEBUG call with log level preset to LDLM_DEBUG.
- * For the cases where we do not have actual lock to print along
- * with a debugging message that is ldlm-related
- */
-#define LDLM_DEBUG_NOLOCK(format, a...) \
- CDEBUG(D_DLMTRACE, "### " format "\n", ##a)
-
-/**
- * Support function for lock information printing into debug logs.
- * \see LDLM_DEBUG
- */
-#define ldlm_lock_debug(msgdata, mask, cdls, lock, fmt, a...) do { \
- CFS_CHECK_STACK(msgdata, mask, cdls); \
- \
- if (((mask) & D_CANTMASK) != 0 || \
- ((libcfs_debug & (mask)) != 0 && \
- (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
- _ldlm_lock_debug(lock, msgdata, fmt, ##a); \
-} while (0)
-
-void _ldlm_lock_debug(struct ldlm_lock *lock,
- struct libcfs_debug_msg_data *data,
- const char *fmt, ...)
- __printf(3, 4);
-
-/**
- * Rate-limited version of lock printing function.
- */
-#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \
- static struct cfs_debug_limit_state _ldlm_cdls; \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, &_ldlm_cdls); \
- ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt, ##a);\
-} while (0)
-
-#define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
-#define LDLM_WARN(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
-
-/** Non-rate-limited lock printing function for debugging purposes. */
-#define LDLM_DEBUG(lock, fmt, a...) do { \
- if (likely(lock)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \
- ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, \
- "### " fmt, ##a); \
- } else { \
- LDLM_DEBUG_NOLOCK("no dlm lock: " fmt, ##a); \
- } \
-} while (0)
-
-typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, enum ldlm_error *err,
- struct list_head *work_list);
-
-/**
- * Return values for lock iterators.
- * Also used during deciding of lock grants and cancellations.
- */
-#define LDLM_ITER_CONTINUE 1 /* keep iterating */
-#define LDLM_ITER_STOP 2 /* stop iterating */
-
-typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
-typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
-
-/** \defgroup ldlm_iterator Lock iterators
- *
- * LDLM provides for a way to iterate through every lock on a resource or
- * namespace or every resource in a namespace.
- * @{
- */
-int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
- ldlm_iterator_t iter, void *data);
-/** @} ldlm_iterator */
-
-int ldlm_replay_locks(struct obd_import *imp);
-
-/* ldlm_flock.c */
-int ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
-
-/* ldlm_extent.c */
-__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
-
-struct ldlm_callback_suite {
- ldlm_completion_callback lcs_completion;
- ldlm_blocking_callback lcs_blocking;
- ldlm_glimpse_callback lcs_glimpse;
-};
-
-/* ldlm_lockd.c */
-int ldlm_get_ref(void);
-void ldlm_put_ref(void);
-struct ldlm_lock *ldlm_request_lock(struct ptlrpc_request *req);
-
-/* ldlm_lock.c */
-void ldlm_lock2handle(const struct ldlm_lock *lock,
- struct lustre_handle *lockh);
-struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, __u64 flags);
-void ldlm_cancel_callback(struct ldlm_lock *);
-int ldlm_lock_remove_from_lru(struct ldlm_lock *);
-int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data);
-
-/**
- * Obtain a lock reference by its handle.
- */
-static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
-{
- return __ldlm_handle2lock(h, 0);
-}
-
-#define LDLM_LOCK_REF_DEL(lock) \
- lu_ref_del(&lock->l_reference, "handle", current)
-
-static inline struct ldlm_lock *
-ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
-{
- struct ldlm_lock *lock;
-
- lock = __ldlm_handle2lock(h, flags);
- if (lock)
- LDLM_LOCK_REF_DEL(lock);
- return lock;
-}
-
-/**
- * Update Lock Value Block Operations (LVBO) on a resource taking into account
- * data from request \a r
- */
-static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
- struct ptlrpc_request *r, int increase)
-{
- if (ldlm_res_to_ns(res)->ns_lvbo &&
- ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
- return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, r,
- increase);
- }
- return 0;
-}
-
-int ldlm_error2errno(enum ldlm_error error);
-
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
-void ldlm_dump_export_locks(struct obd_export *exp);
-#endif
-
-/**
- * Release a temporary lock reference obtained by ldlm_handle2lock() or
- * __ldlm_handle2lock().
- */
-#define LDLM_LOCK_PUT(lock) \
-do { \
- LDLM_LOCK_REF_DEL(lock); \
- /*LDLM_DEBUG((lock), "put");*/ \
- ldlm_lock_put(lock); \
-} while (0)
-
-/**
- * Release a lock reference obtained by some other means (see
- * LDLM_LOCK_PUT()).
- */
-#define LDLM_LOCK_RELEASE(lock) \
-do { \
- /*LDLM_DEBUG((lock), "put");*/ \
- ldlm_lock_put(lock); \
-} while (0)
-
-#define LDLM_LOCK_GET(lock) \
-({ \
- ldlm_lock_get(lock); \
- /*LDLM_DEBUG((lock), "get");*/ \
- lock; \
-})
-
-#define ldlm_lock_list_put(head, member, count) \
-({ \
- struct ldlm_lock *_lock, *_next; \
- int c = count; \
- list_for_each_entry_safe(_lock, _next, head, member) { \
- if (c-- == 0) \
- break; \
- list_del_init(&_lock->member); \
- LDLM_LOCK_RELEASE(_lock); \
- } \
- LASSERT(c <= 0); \
-})
-
-struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
-void ldlm_lock_put(struct ldlm_lock *lock);
-void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
-void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode);
-int ldlm_lock_addref_try(const struct lustre_handle *lockh,
- enum ldlm_mode mode);
-void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode);
-void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
- enum ldlm_mode mode);
-void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
-void ldlm_lock_allow_match(struct ldlm_lock *lock);
-void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
-enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
- const struct ldlm_res_id *,
- enum ldlm_type type, union ldlm_policy_data *,
- enum ldlm_mode mode, struct lustre_handle *,
- int unref);
-enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
- __u64 *bits);
-void ldlm_lock_cancel(struct ldlm_lock *lock);
-void ldlm_lock_dump_handle(int level, const struct lustre_handle *);
-void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
-
-/* resource.c */
-struct ldlm_namespace *
-ldlm_namespace_new(struct obd_device *obd, char *name,
- enum ldlm_side client, enum ldlm_appetite apt,
- enum ldlm_ns_type ns_type);
-int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
-void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
- struct obd_import *imp,
- int force);
-void ldlm_namespace_free_post(struct ldlm_namespace *ns);
-void ldlm_namespace_get(struct ldlm_namespace *ns);
-void ldlm_namespace_put(struct ldlm_namespace *ns);
-int ldlm_debugfs_setup(void);
-void ldlm_debugfs_cleanup(void);
-
-/* resource.c - internal */
-struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
- struct ldlm_resource *parent,
- const struct ldlm_res_id *,
- enum ldlm_type type, int create);
-int ldlm_resource_putref(struct ldlm_resource *res);
-void ldlm_resource_add_lock(struct ldlm_resource *res,
- struct list_head *head,
- struct ldlm_lock *lock);
-void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
-void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
-void ldlm_dump_all_namespaces(enum ldlm_side client, int level);
-void ldlm_namespace_dump(int level, struct ldlm_namespace *);
-void ldlm_resource_dump(int level, struct ldlm_resource *);
-int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
- const struct ldlm_res_id *);
-
-#define LDLM_RESOURCE_ADDREF(res) do { \
- lu_ref_add_atomic(&(res)->lr_reference, __func__, current); \
-} while (0)
-
-#define LDLM_RESOURCE_DELREF(res) do { \
- lu_ref_del(&(res)->lr_reference, __func__, current); \
-} while (0)
-
-/* ldlm_request.c */
-/** \defgroup ldlm_local_ast Default AST handlers for local locks
- * These AST handlers are typically used for server-side local locks and are
- * also used by client-side lock handlers to perform minimum level base
- * processing.
- * @{
- */
-int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data);
-int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
-/** @} ldlm_local_ast */
-
-/** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users.
- * These are typically used by client and server (*_local versions)
- * to obtain and release locks.
- * @{
- */
-int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
- struct ldlm_enqueue_info *einfo,
- const struct ldlm_res_id *res_id,
- union ldlm_policy_data const *policy, __u64 *flags,
- void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
- struct lustre_handle *lockh, int async);
-int ldlm_prep_enqueue_req(struct obd_export *exp,
- struct ptlrpc_request *req,
- struct list_head *cancels,
- int count);
-int ldlm_prep_elc_req(struct obd_export *exp,
- struct ptlrpc_request *req,
- int version, int opc, int canceloff,
- struct list_head *cancels, int count);
-
-int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
- enum ldlm_type type, __u8 with_policy,
- enum ldlm_mode mode,
- __u64 *flags, void *lvb, __u32 lvb_len,
- const struct lustre_handle *lockh, int rc);
-int ldlm_cli_update_pool(struct ptlrpc_request *req);
-int ldlm_cli_cancel(const struct lustre_handle *lockh,
- enum ldlm_cancel_flags cancel_flags);
-int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
- enum ldlm_cancel_flags flags, void *opaque);
-int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode,
- enum ldlm_cancel_flags flags,
- void *opaque);
-int ldlm_cancel_resource_local(struct ldlm_resource *res,
- struct list_head *cancels,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode, __u64 lock_flags,
- enum ldlm_cancel_flags cancel_flags,
- void *opaque);
-int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
- enum ldlm_cancel_flags flags);
-int ldlm_cli_cancel_list(struct list_head *head, int count,
- struct ptlrpc_request *req,
- enum ldlm_cancel_flags flags);
-/** @} ldlm_cli_api */
-
-/* mds/handler.c */
-/* This has to be here because recursive inclusion sucks. */
-int intent_disposition(struct ldlm_reply *rep, int flag);
-void intent_set_disposition(struct ldlm_reply *rep, int flag);
-
-/**
- * "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
- * than one lock_res is dead-lock safe.
- */
-enum lock_res_type {
- LRT_NORMAL,
- LRT_NEW
-};
-
-/** Lock resource. */
-static inline void lock_res(struct ldlm_resource *res)
-{
- spin_lock(&res->lr_lock);
-}
-
-/** Lock resource with a way to instruct lockdep code about nestedness-safe. */
-static inline void lock_res_nested(struct ldlm_resource *res,
- enum lock_res_type mode)
-{
- spin_lock_nested(&res->lr_lock, mode);
-}
-
-/** Unlock resource. */
-static inline void unlock_res(struct ldlm_resource *res)
-{
- spin_unlock(&res->lr_lock);
-}
-
-/** Check if resource is already locked, assert if not. */
-static inline void check_res_locked(struct ldlm_resource *res)
-{
- assert_spin_locked(&res->lr_lock);
-}
-
-struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock);
-void unlock_res_and_lock(struct ldlm_lock *lock);
-
-/* ldlm_pool.c */
-/** \defgroup ldlm_pools Various LDLM pool related functions
- * There are not used outside of ldlm.
- * @{
- */
-int ldlm_pools_init(void);
-void ldlm_pools_fini(void);
-
-int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, enum ldlm_side client);
-void ldlm_pool_fini(struct ldlm_pool *pl);
-void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
-void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);
-/** @} */
-
-static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1,
- const struct ldlm_extent *ex2)
-{
- return ex1->start <= ex2->end && ex2->start <= ex1->end;
-}
-
-/* check if @ex1 contains @ex2 */
-static inline int ldlm_extent_contain(const struct ldlm_extent *ex1,
- const struct ldlm_extent *ex2)
-{
- return ex1->start <= ex2->start && ex1->end >= ex2->end;
-}
-
-#endif
-/** @} LDLM */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
deleted file mode 100644
index 53db031c4c8c..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ /dev/null
@@ -1,402 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* -*- buffer-read-only: t -*- vi: set ro:
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * Lustre is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-/**
- * \file lustre_dlm_flags.h
- * The flags and collections of flags (masks) for \see struct ldlm_lock.
- *
- * \addtogroup LDLM Lustre Distributed Lock Manager
- * @{
- *
- * \name flags
- * The flags and collections of flags (masks) for \see struct ldlm_lock.
- * @{
- */
-#ifndef LDLM_ALL_FLAGS_MASK
-
-/** l_flags bits marked as "all_flags" bits */
-#define LDLM_FL_ALL_FLAGS_MASK 0x00FFFFFFC08F932FULL
-
-/** extent, mode, or resource changed */
-#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL /* bit 0 */
-#define ldlm_is_lock_changed(_l) LDLM_TEST_FLAG((_l), 1ULL << 0)
-#define ldlm_set_lock_changed(_l) LDLM_SET_FLAG((_l), 1ULL << 0)
-#define ldlm_clear_lock_changed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 0)
-
-/**
- * Server placed lock on granted list, or a recovering client wants the
- * lock added to the granted list, no questions asked.
- */
-#define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL /* bit 1 */
-#define ldlm_is_block_granted(_l) LDLM_TEST_FLAG((_l), 1ULL << 1)
-#define ldlm_set_block_granted(_l) LDLM_SET_FLAG((_l), 1ULL << 1)
-#define ldlm_clear_block_granted(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 1)
-
-/**
- * Server placed lock on conv list, or a recovering client wants the lock
- * added to the conv list, no questions asked.
- */
-#define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL /* bit 2 */
-#define ldlm_is_block_conv(_l) LDLM_TEST_FLAG((_l), 1ULL << 2)
-#define ldlm_set_block_conv(_l) LDLM_SET_FLAG((_l), 1ULL << 2)
-#define ldlm_clear_block_conv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 2)
-
-/**
- * Server placed lock on wait list, or a recovering client wants the lock
- * added to the wait list, no questions asked.
- */
-#define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL /* bit 3 */
-#define ldlm_is_block_wait(_l) LDLM_TEST_FLAG((_l), 1ULL << 3)
-#define ldlm_set_block_wait(_l) LDLM_SET_FLAG((_l), 1ULL << 3)
-#define ldlm_clear_block_wait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 3)
-
-/** blocking or cancel packet was queued for sending. */
-#define LDLM_FL_AST_SENT 0x0000000000000020ULL /* bit 5 */
-#define ldlm_is_ast_sent(_l) LDLM_TEST_FLAG((_l), 1ULL << 5)
-#define ldlm_set_ast_sent(_l) LDLM_SET_FLAG((_l), 1ULL << 5)
-#define ldlm_clear_ast_sent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 5)
-
-/**
- * Lock is being replayed. This could probably be implied by the fact that
- * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous.
- */
-#define LDLM_FL_REPLAY 0x0000000000000100ULL /* bit 8 */
-#define ldlm_is_replay(_l) LDLM_TEST_FLAG((_l), 1ULL << 8)
-#define ldlm_set_replay(_l) LDLM_SET_FLAG((_l), 1ULL << 8)
-#define ldlm_clear_replay(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 8)
-
-/** Don't grant lock, just do intent. */
-#define LDLM_FL_INTENT_ONLY 0x0000000000000200ULL /* bit 9 */
-#define ldlm_is_intent_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 9)
-#define ldlm_set_intent_only(_l) LDLM_SET_FLAG((_l), 1ULL << 9)
-#define ldlm_clear_intent_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 9)
-
-/** lock request has intent */
-#define LDLM_FL_HAS_INTENT 0x0000000000001000ULL /* bit 12 */
-#define ldlm_is_has_intent(_l) LDLM_TEST_FLAG((_l), 1ULL << 12)
-#define ldlm_set_has_intent(_l) LDLM_SET_FLAG((_l), 1ULL << 12)
-#define ldlm_clear_has_intent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 12)
-
-/** flock deadlock detected */
-#define LDLM_FL_FLOCK_DEADLOCK 0x0000000000008000ULL /* bit 15 */
-#define ldlm_is_flock_deadlock(_l) LDLM_TEST_FLAG((_l), 1ULL << 15)
-#define ldlm_set_flock_deadlock(_l) LDLM_SET_FLAG((_l), 1ULL << 15)
-#define ldlm_clear_flock_deadlock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 15)
-
-/** discard (no writeback) on cancel */
-#define LDLM_FL_DISCARD_DATA 0x0000000000010000ULL /* bit 16 */
-#define ldlm_is_discard_data(_l) LDLM_TEST_FLAG((_l), 1ULL << 16)
-#define ldlm_set_discard_data(_l) LDLM_SET_FLAG((_l), 1ULL << 16)
-#define ldlm_clear_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 16)
-
-/** Blocked by group lock - wait indefinitely */
-#define LDLM_FL_NO_TIMEOUT 0x0000000000020000ULL /* bit 17 */
-#define ldlm_is_no_timeout(_l) LDLM_TEST_FLAG((_l), 1ULL << 17)
-#define ldlm_set_no_timeout(_l) LDLM_SET_FLAG((_l), 1ULL << 17)
-#define ldlm_clear_no_timeout(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 17)
-
-/**
- * Server told not to wait if blocked. For AGL, OST will not send glimpse
- * callback.
- */
-#define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL /* bit 18 */
-#define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG((_l), 1ULL << 18)
-#define ldlm_set_block_nowait(_l) LDLM_SET_FLAG((_l), 1ULL << 18)
-#define ldlm_clear_block_nowait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 18)
-
-/** return blocking lock */
-#define LDLM_FL_TEST_LOCK 0x0000000000080000ULL /* bit 19 */
-#define ldlm_is_test_lock(_l) LDLM_TEST_FLAG((_l), 1ULL << 19)
-#define ldlm_set_test_lock(_l) LDLM_SET_FLAG((_l), 1ULL << 19)
-#define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19)
-
-/** match lock only */
-#define LDLM_FL_MATCH_LOCK 0x0000000000100000ULL /* bit 20 */
-
-/**
- * Immediately cancel such locks when they block some other locks. Send
- * cancel notification to original lock holder, but expect no reply. This
- * is for clients (like liblustre) that cannot be expected to reliably
- * response to blocking AST.
- */
-#define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL /* bit 23 */
-#define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG((_l), 1ULL << 23)
-#define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG((_l), 1ULL << 23)
-#define ldlm_clear_cancel_on_block(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 23)
-
-/**
- * measure lock contention and return -EUSERS if locking contention is high
- */
-#define LDLM_FL_DENY_ON_CONTENTION 0x0000000040000000ULL /* bit 30 */
-#define ldlm_is_deny_on_contention(_l) LDLM_TEST_FLAG((_l), 1ULL << 30)
-#define ldlm_set_deny_on_contention(_l) LDLM_SET_FLAG((_l), 1ULL << 30)
-#define ldlm_clear_deny_on_contention(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 30)
-
-/**
- * These are flags that are mapped into the flags and ASTs of blocking
- * locks Add FL_DISCARD to blocking ASTs
- */
-#define LDLM_FL_AST_DISCARD_DATA 0x0000000080000000ULL /* bit 31 */
-#define ldlm_is_ast_discard_data(_l) LDLM_TEST_FLAG((_l), 1ULL << 31)
-#define ldlm_set_ast_discard_data(_l) LDLM_SET_FLAG((_l), 1ULL << 31)
-#define ldlm_clear_ast_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 31)
-
-/**
- * Used for marking lock as a target for -EINTR while cp_ast sleep emulation
- * + race with upcoming bl_ast.
- */
-#define LDLM_FL_FAIL_LOC 0x0000000100000000ULL /* bit 32 */
-#define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG((_l), 1ULL << 32)
-#define ldlm_set_fail_loc(_l) LDLM_SET_FLAG((_l), 1ULL << 32)
-#define ldlm_clear_fail_loc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 32)
-
-/**
- * Used while processing the unused list to know that we have already
- * handled this lock and decided to skip it.
- */
-#define LDLM_FL_SKIPPED 0x0000000200000000ULL /* bit 33 */
-#define ldlm_is_skipped(_l) LDLM_TEST_FLAG((_l), 1ULL << 33)
-#define ldlm_set_skipped(_l) LDLM_SET_FLAG((_l), 1ULL << 33)
-#define ldlm_clear_skipped(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 33)
-
-/** this lock is being destroyed */
-#define LDLM_FL_CBPENDING 0x0000000400000000ULL /* bit 34 */
-#define ldlm_is_cbpending(_l) LDLM_TEST_FLAG((_l), 1ULL << 34)
-#define ldlm_set_cbpending(_l) LDLM_SET_FLAG((_l), 1ULL << 34)
-#define ldlm_clear_cbpending(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 34)
-
-/** not a real flag, not saved in lock */
-#define LDLM_FL_WAIT_NOREPROC 0x0000000800000000ULL /* bit 35 */
-#define ldlm_is_wait_noreproc(_l) LDLM_TEST_FLAG((_l), 1ULL << 35)
-#define ldlm_set_wait_noreproc(_l) LDLM_SET_FLAG((_l), 1ULL << 35)
-#define ldlm_clear_wait_noreproc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 35)
-
-/** cancellation callback already run */
-#define LDLM_FL_CANCEL 0x0000001000000000ULL /* bit 36 */
-#define ldlm_is_cancel(_l) LDLM_TEST_FLAG((_l), 1ULL << 36)
-#define ldlm_set_cancel(_l) LDLM_SET_FLAG((_l), 1ULL << 36)
-#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36)
-
-/** whatever it might mean -- never transmitted? */
-#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL /* bit 37 */
-#define ldlm_is_local_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 37)
-#define ldlm_set_local_only(_l) LDLM_SET_FLAG((_l), 1ULL << 37)
-#define ldlm_clear_local_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 37)
-
-/** don't run the cancel callback under ldlm_cli_cancel_unused */
-#define LDLM_FL_FAILED 0x0000004000000000ULL /* bit 38 */
-#define ldlm_is_failed(_l) LDLM_TEST_FLAG((_l), 1ULL << 38)
-#define ldlm_set_failed(_l) LDLM_SET_FLAG((_l), 1ULL << 38)
-#define ldlm_clear_failed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 38)
-
-/** lock cancel has already been sent */
-#define LDLM_FL_CANCELING 0x0000008000000000ULL /* bit 39 */
-#define ldlm_is_canceling(_l) LDLM_TEST_FLAG((_l), 1ULL << 39)
-#define ldlm_set_canceling(_l) LDLM_SET_FLAG((_l), 1ULL << 39)
-#define ldlm_clear_canceling(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 39)
-
-/** local lock (ie, no srv/cli split) */
-#define LDLM_FL_LOCAL 0x0000010000000000ULL /* bit 40 */
-#define ldlm_is_local(_l) LDLM_TEST_FLAG((_l), 1ULL << 40)
-#define ldlm_set_local(_l) LDLM_SET_FLAG((_l), 1ULL << 40)
-#define ldlm_clear_local(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 40)
-
-/**
- * XXX FIXME: This is being added to b_size as a low-risk fix to the
- * fact that the LVB filling happens _after_ the lock has been granted,
- * so another thread can match it before the LVB has been updated. As a
- * dirty hack, we set LDLM_FL_LVB_READY only after we've done the LVB poop.
- * this is only needed on LOV/OSC now, where LVB is actually used and
- * callers must set it in input flags.
- *
- * The proper fix is to do the granting inside of the completion AST,
- * which can be replaced with a LVB-aware wrapping function for OSC locks.
- * That change is pretty high-risk, though, and would need a lot more
- * testing.
- */
-#define LDLM_FL_LVB_READY 0x0000020000000000ULL /* bit 41 */
-#define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG((_l), 1ULL << 41)
-#define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG((_l), 1ULL << 41)
-#define ldlm_clear_lvb_ready(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 41)
-
-/**
- * A lock contributes to the known minimum size (KMS) calculation until it
- * has finished the part of its cancellation that performs write back on its
- * dirty pages. It can remain on the granted list during this whole time.
- * Threads racing to update the KMS after performing their writeback need
- * to know to exclude each other's locks from the calculation as they walk
- * the granted list.
- */
-#define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL /* bit 42 */
-#define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG((_l), 1ULL << 42)
-#define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG((_l), 1ULL << 42)
-#define ldlm_clear_kms_ignore(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 42)
-
-/** completion AST to be executed */
-#define LDLM_FL_CP_REQD 0x0000080000000000ULL /* bit 43 */
-#define ldlm_is_cp_reqd(_l) LDLM_TEST_FLAG((_l), 1ULL << 43)
-#define ldlm_set_cp_reqd(_l) LDLM_SET_FLAG((_l), 1ULL << 43)
-#define ldlm_clear_cp_reqd(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 43)
-
-/** cleanup_resource has already handled the lock */
-#define LDLM_FL_CLEANED 0x0000100000000000ULL /* bit 44 */
-#define ldlm_is_cleaned(_l) LDLM_TEST_FLAG((_l), 1ULL << 44)
-#define ldlm_set_cleaned(_l) LDLM_SET_FLAG((_l), 1ULL << 44)
-#define ldlm_clear_cleaned(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 44)
-
-/**
- * optimization hint: LDLM can run blocking callback from current context
- * w/o involving separate thread. in order to decrease cs rate
- */
-#define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL /* bit 45 */
-#define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG((_l), 1ULL << 45)
-#define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG((_l), 1ULL << 45)
-#define ldlm_clear_atomic_cb(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 45)
-
-/**
- * It may happen that a client initiates two operations, e.g. unlink and
- * mkdir, such that the server sends a blocking AST for conflicting locks
- * to this client for the first operation, whereas the second operation
- * has canceled this lock and is waiting for rpc_lock which is taken by
- * the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
- * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it.
- */
-#define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */
-#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46)
-#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46)
-#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46)
-
-/**
- * Set by ldlm_cancel_callback() when lock cache is dropped to let
- * ldlm_callback_handler() return EINVAL to the server. It is used when
- * ELC RPC is already prepared and is waiting for rpc_lock, too late to
- * send a separate CANCEL RPC.
- */
-#define LDLM_FL_BL_DONE 0x0000800000000000ULL /* bit 47 */
-#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG((_l), 1ULL << 47)
-#define ldlm_set_bl_done(_l) LDLM_SET_FLAG((_l), 1ULL << 47)
-#define ldlm_clear_bl_done(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 47)
-
-/**
- * Don't put lock into the LRU list, so that it is not canceled due
- * to aging. Used by MGC locks, they are cancelled only at unmount or
- * by callback.
- */
-#define LDLM_FL_NO_LRU 0x0001000000000000ULL /* bit 48 */
-#define ldlm_is_no_lru(_l) LDLM_TEST_FLAG((_l), 1ULL << 48)
-#define ldlm_set_no_lru(_l) LDLM_SET_FLAG((_l), 1ULL << 48)
-#define ldlm_clear_no_lru(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 48)
-
-/**
- * Set for locks that failed and where the server has been notified.
- *
- * Protected by lock and resource locks.
- */
-#define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL /* bit 49 */
-#define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG((_l), 1ULL << 49)
-#define ldlm_set_fail_notified(_l) LDLM_SET_FLAG((_l), 1ULL << 49)
-#define ldlm_clear_fail_notified(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 49)
-
-/**
- * Set for locks that were removed from class hash table and will
- * be destroyed when last reference to them is released. Set by
- * ldlm_lock_destroy_internal().
- *
- * Protected by lock and resource locks.
- */
-#define LDLM_FL_DESTROYED 0x0004000000000000ULL /* bit 50 */
-#define ldlm_is_destroyed(_l) LDLM_TEST_FLAG((_l), 1ULL << 50)
-#define ldlm_set_destroyed(_l) LDLM_SET_FLAG((_l), 1ULL << 50)
-#define ldlm_clear_destroyed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 50)
-
-/** flag whether this is a server namespace lock */
-#define LDLM_FL_SERVER_LOCK 0x0008000000000000ULL /* bit 51 */
-#define ldlm_is_server_lock(_l) LDLM_TEST_FLAG((_l), 1ULL << 51)
-#define ldlm_set_server_lock(_l) LDLM_SET_FLAG((_l), 1ULL << 51)
-#define ldlm_clear_server_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 51)
-
-/**
- * It's set in lock_res_and_lock() and unset in unlock_res_and_lock().
- *
- * NB: compared with check_res_locked(), checking this bit is cheaper.
- * Also, spin_is_locked() is deprecated for kernel code; one reason is
- * because it works only for SMP so user needs to add extra macros like
- * LASSERT_SPIN_LOCKED for uniprocessor kernels.
- */
-#define LDLM_FL_RES_LOCKED 0x0010000000000000ULL /* bit 52 */
-#define ldlm_is_res_locked(_l) LDLM_TEST_FLAG((_l), 1ULL << 52)
-#define ldlm_set_res_locked(_l) LDLM_SET_FLAG((_l), 1ULL << 52)
-#define ldlm_clear_res_locked(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 52)
-
-/**
- * It's set once we call ldlm_add_waiting_lock_res_locked() to start the
- * lock-timeout timer and it will never be reset.
- *
- * Protected by lock and resource locks.
- */
-#define LDLM_FL_WAITED 0x0020000000000000ULL /* bit 53 */
-#define ldlm_is_waited(_l) LDLM_TEST_FLAG((_l), 1ULL << 53)
-#define ldlm_set_waited(_l) LDLM_SET_FLAG((_l), 1ULL << 53)
-#define ldlm_clear_waited(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 53)
-
-/** Flag whether this is a server namespace lock. */
-#define LDLM_FL_NS_SRV 0x0040000000000000ULL /* bit 54 */
-#define ldlm_is_ns_srv(_l) LDLM_TEST_FLAG((_l), 1ULL << 54)
-#define ldlm_set_ns_srv(_l) LDLM_SET_FLAG((_l), 1ULL << 54)
-#define ldlm_clear_ns_srv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 54)
-
-/** Flag whether this lock can be reused. Used by exclusive open. */
-#define LDLM_FL_EXCL 0x0080000000000000ULL /* bit 55 */
-#define ldlm_is_excl(_l) LDLM_TEST_FLAG((_l), 1ULL << 55)
-#define ldlm_set_excl(_l) LDLM_SET_FLAG((_l), 1ULL << 55)
-#define ldlm_clear_excl(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 55)
-
-/** l_flags bits marked as "ast" bits */
-#define LDLM_FL_AST_MASK (LDLM_FL_FLOCK_DEADLOCK |\
- LDLM_FL_AST_DISCARD_DATA)
-
-/** l_flags bits marked as "blocked" bits */
-#define LDLM_FL_BLOCKED_MASK (LDLM_FL_BLOCK_GRANTED |\
- LDLM_FL_BLOCK_CONV |\
- LDLM_FL_BLOCK_WAIT)
-
-/** l_flags bits marked as "gone" bits */
-#define LDLM_FL_GONE_MASK (LDLM_FL_DESTROYED |\
- LDLM_FL_FAILED)
-
-/** l_flags bits marked as "inherit" bits */
-/* Flags inherited from wire on enqueue/reply between client/server. */
-/* NO_TIMEOUT flag to force ldlm_lock_match() to wait with no timeout. */
-/* TEST_LOCK flag to not let TEST lock to be granted. */
-#define LDLM_FL_INHERIT_MASK (LDLM_FL_CANCEL_ON_BLOCK |\
- LDLM_FL_NO_TIMEOUT |\
- LDLM_FL_TEST_LOCK)
-
-/** test for ldlm_lock flag bit set */
-#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
-
-/** multi-bit test: are any of mask bits set? */
-#define LDLM_HAVE_MASK(_l, _m) ((_l)->l_flags & LDLM_FL_##_m##_MASK)
-
-/** set a ldlm_lock flag bit */
-#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b))
-
-/** clear a ldlm_lock flag bit */
-#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b))
-
-/** @} subgroup */
-/** @} group */
-
-#endif /* LDLM_ALL_FLAGS_MASK */
diff --git a/drivers/staging/lustre/lustre/include/lustre_errno.h b/drivers/staging/lustre/lustre/include/lustre_errno.h
deleted file mode 100644
index 59fbb9f47ff1..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_errno.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.txt
- *
- * GPL HEADER END
- */
-/*
- * Copyright (C) 2011 FUJITSU LIMITED. All rights reserved.
- *
- * Copyright (c) 2013, Intel Corporation.
- */
-
-#ifndef LUSTRE_ERRNO_H
-#define LUSTRE_ERRNO_H
-
-/*
- * Only "network" errnos, which are defined below, are allowed on wire (or on
- * disk). Generic routines exist to help translate between these and a subset
- * of the "host" errnos. Some host errnos (e.g., EDEADLOCK) are intentionally
- * left out. See also the comment on lustre_errno_hton_mapping[].
- *
- * To maintain compatibility with existing x86 clients and servers, each of
- * these network errnos has the same numerical value as its corresponding host
- * errno on x86.
- */
-#define LUSTRE_EPERM 1 /* Operation not permitted */
-#define LUSTRE_ENOENT 2 /* No such file or directory */
-#define LUSTRE_ESRCH 3 /* No such process */
-#define LUSTRE_EINTR 4 /* Interrupted system call */
-#define LUSTRE_EIO 5 /* I/O error */
-#define LUSTRE_ENXIO 6 /* No such device or address */
-#define LUSTRE_E2BIG 7 /* Argument list too long */
-#define LUSTRE_ENOEXEC 8 /* Exec format error */
-#define LUSTRE_EBADF 9 /* Bad file number */
-#define LUSTRE_ECHILD 10 /* No child processes */
-#define LUSTRE_EAGAIN 11 /* Try again */
-#define LUSTRE_ENOMEM 12 /* Out of memory */
-#define LUSTRE_EACCES 13 /* Permission denied */
-#define LUSTRE_EFAULT 14 /* Bad address */
-#define LUSTRE_ENOTBLK 15 /* Block device required */
-#define LUSTRE_EBUSY 16 /* Device or resource busy */
-#define LUSTRE_EEXIST 17 /* File exists */
-#define LUSTRE_EXDEV 18 /* Cross-device link */
-#define LUSTRE_ENODEV 19 /* No such device */
-#define LUSTRE_ENOTDIR 20 /* Not a directory */
-#define LUSTRE_EISDIR 21 /* Is a directory */
-#define LUSTRE_EINVAL 22 /* Invalid argument */
-#define LUSTRE_ENFILE 23 /* File table overflow */
-#define LUSTRE_EMFILE 24 /* Too many open files */
-#define LUSTRE_ENOTTY 25 /* Not a typewriter */
-#define LUSTRE_ETXTBSY 26 /* Text file busy */
-#define LUSTRE_EFBIG 27 /* File too large */
-#define LUSTRE_ENOSPC 28 /* No space left on device */
-#define LUSTRE_ESPIPE 29 /* Illegal seek */
-#define LUSTRE_EROFS 30 /* Read-only file system */
-#define LUSTRE_EMLINK 31 /* Too many links */
-#define LUSTRE_EPIPE 32 /* Broken pipe */
-#define LUSTRE_EDOM 33 /* Math argument out of func domain */
-#define LUSTRE_ERANGE 34 /* Math result not representable */
-#define LUSTRE_EDEADLK 35 /* Resource deadlock would occur */
-#define LUSTRE_ENAMETOOLONG 36 /* File name too long */
-#define LUSTRE_ENOLCK 37 /* No record locks available */
-#define LUSTRE_ENOSYS 38 /* Function not implemented */
-#define LUSTRE_ENOTEMPTY 39 /* Directory not empty */
-#define LUSTRE_ELOOP 40 /* Too many symbolic links found */
-#define LUSTRE_ENOMSG 42 /* No message of desired type */
-#define LUSTRE_EIDRM 43 /* Identifier removed */
-#define LUSTRE_ECHRNG 44 /* Channel number out of range */
-#define LUSTRE_EL2NSYNC 45 /* Level 2 not synchronized */
-#define LUSTRE_EL3HLT 46 /* Level 3 halted */
-#define LUSTRE_EL3RST 47 /* Level 3 reset */
-#define LUSTRE_ELNRNG 48 /* Link number out of range */
-#define LUSTRE_EUNATCH 49 /* Protocol driver not attached */
-#define LUSTRE_ENOCSI 50 /* No CSI structure available */
-#define LUSTRE_EL2HLT 51 /* Level 2 halted */
-#define LUSTRE_EBADE 52 /* Invalid exchange */
-#define LUSTRE_EBADR 53 /* Invalid request descriptor */
-#define LUSTRE_EXFULL 54 /* Exchange full */
-#define LUSTRE_ENOANO 55 /* No anode */
-#define LUSTRE_EBADRQC 56 /* Invalid request code */
-#define LUSTRE_EBADSLT 57 /* Invalid slot */
-#define LUSTRE_EBFONT 59 /* Bad font file format */
-#define LUSTRE_ENOSTR 60 /* Device not a stream */
-#define LUSTRE_ENODATA 61 /* No data available */
-#define LUSTRE_ETIME 62 /* Timer expired */
-#define LUSTRE_ENOSR 63 /* Out of streams resources */
-#define LUSTRE_ENONET 64 /* Machine is not on the network */
-#define LUSTRE_ENOPKG 65 /* Package not installed */
-#define LUSTRE_EREMOTE 66 /* Object is remote */
-#define LUSTRE_ENOLINK 67 /* Link has been severed */
-#define LUSTRE_EADV 68 /* Advertise error */
-#define LUSTRE_ESRMNT 69 /* Srmount error */
-#define LUSTRE_ECOMM 70 /* Communication error on send */
-#define LUSTRE_EPROTO 71 /* Protocol error */
-#define LUSTRE_EMULTIHOP 72 /* Multihop attempted */
-#define LUSTRE_EDOTDOT 73 /* RFS specific error */
-#define LUSTRE_EBADMSG 74 /* Not a data message */
-#define LUSTRE_EOVERFLOW 75 /* Value too large for data type */
-#define LUSTRE_ENOTUNIQ 76 /* Name not unique on network */
-#define LUSTRE_EBADFD 77 /* File descriptor in bad state */
-#define LUSTRE_EREMCHG 78 /* Remote address changed */
-#define LUSTRE_ELIBACC 79 /* Can't access needed shared library */
-#define LUSTRE_ELIBBAD 80 /* Access corrupted shared library */
-#define LUSTRE_ELIBSCN 81 /* .lib section in a.out corrupted */
-#define LUSTRE_ELIBMAX 82 /* Trying to link too many libraries */
-#define LUSTRE_ELIBEXEC 83 /* Cannot exec a shared lib directly */
-#define LUSTRE_EILSEQ 84 /* Illegal byte sequence */
-#define LUSTRE_ERESTART 85 /* Restart interrupted system call */
-#define LUSTRE_ESTRPIPE 86 /* Streams pipe error */
-#define LUSTRE_EUSERS 87 /* Too many users */
-#define LUSTRE_ENOTSOCK 88 /* Socket operation on non-socket */
-#define LUSTRE_EDESTADDRREQ 89 /* Destination address required */
-#define LUSTRE_EMSGSIZE 90 /* Message too long */
-#define LUSTRE_EPROTOTYPE 91 /* Protocol wrong type for socket */
-#define LUSTRE_ENOPROTOOPT 92 /* Protocol not available */
-#define LUSTRE_EPROTONOSUPPORT 93 /* Protocol not supported */
-#define LUSTRE_ESOCKTNOSUPPORT 94 /* Socket type not supported */
-#define LUSTRE_EOPNOTSUPP 95 /* Operation not supported */
-#define LUSTRE_EPFNOSUPPORT 96 /* Protocol family not supported */
-#define LUSTRE_EAFNOSUPPORT 97 /* Address family not supported */
-#define LUSTRE_EADDRINUSE 98 /* Address already in use */
-#define LUSTRE_EADDRNOTAVAIL 99 /* Cannot assign requested address */
-#define LUSTRE_ENETDOWN 100 /* Network is down */
-#define LUSTRE_ENETUNREACH 101 /* Network is unreachable */
-#define LUSTRE_ENETRESET 102 /* Network connection drop for reset */
-#define LUSTRE_ECONNABORTED 103 /* Software caused connection abort */
-#define LUSTRE_ECONNRESET 104 /* Connection reset by peer */
-#define LUSTRE_ENOBUFS 105 /* No buffer space available */
-#define LUSTRE_EISCONN 106 /* Transport endpoint is connected */
-#define LUSTRE_ENOTCONN 107 /* Transport endpoint not connected */
-#define LUSTRE_ESHUTDOWN 108 /* Cannot send after shutdown */
-#define LUSTRE_ETOOMANYREFS 109 /* Too many references: cannot splice */
-#define LUSTRE_ETIMEDOUT 110 /* Connection timed out */
-#define LUSTRE_ECONNREFUSED 111 /* Connection refused */
-#define LUSTRE_EHOSTDOWN 112 /* Host is down */
-#define LUSTRE_EHOSTUNREACH 113 /* No route to host */
-#define LUSTRE_EALREADY 114 /* Operation already in progress */
-#define LUSTRE_EINPROGRESS 115 /* Operation now in progress */
-#define LUSTRE_ESTALE 116 /* Stale file handle */
-#define LUSTRE_EUCLEAN 117 /* Structure needs cleaning */
-#define LUSTRE_ENOTNAM 118 /* Not a XENIX named type file */
-#define LUSTRE_ENAVAIL 119 /* No XENIX semaphores available */
-#define LUSTRE_EISNAM 120 /* Is a named type file */
-#define LUSTRE_EREMOTEIO 121 /* Remote I/O error */
-#define LUSTRE_EDQUOT 122 /* Quota exceeded */
-#define LUSTRE_ENOMEDIUM 123 /* No medium found */
-#define LUSTRE_EMEDIUMTYPE 124 /* Wrong medium type */
-#define LUSTRE_ECANCELED 125 /* Operation Canceled */
-#define LUSTRE_ENOKEY 126 /* Required key not available */
-#define LUSTRE_EKEYEXPIRED 127 /* Key has expired */
-#define LUSTRE_EKEYREVOKED 128 /* Key has been revoked */
-#define LUSTRE_EKEYREJECTED 129 /* Key was rejected by service */
-#define LUSTRE_EOWNERDEAD 130 /* Owner died */
-#define LUSTRE_ENOTRECOVERABLE 131 /* State not recoverable */
-#define LUSTRE_ERESTARTSYS 512
-#define LUSTRE_ERESTARTNOINTR 513
-#define LUSTRE_ERESTARTNOHAND 514 /* restart if no handler.. */
-#define LUSTRE_ENOIOCTLCMD 515 /* No ioctl command */
-#define LUSTRE_ERESTART_RESTARTBLOCK 516 /* restart via sys_restart_syscall */
-#define LUSTRE_EBADHANDLE 521 /* Illegal NFS file handle */
-#define LUSTRE_ENOTSYNC 522 /* Update synchronization mismatch */
-#define LUSTRE_EBADCOOKIE 523 /* Cookie is stale */
-#define LUSTRE_ENOTSUPP 524 /* Operation is not supported */
-#define LUSTRE_ETOOSMALL 525 /* Buffer or request is too small */
-#define LUSTRE_ESERVERFAULT 526 /* An untranslatable error occurred */
-#define LUSTRE_EBADTYPE 527 /* Type not supported by server */
-#define LUSTRE_EJUKEBOX 528 /* Request won't finish until timeout */
-#define LUSTRE_EIOCBQUEUED 529 /* iocb queued await completion event */
-#define LUSTRE_EIOCBRETRY 530 /* iocb queued, will trigger a retry */
-
-/*
- * Translations are optimized away on x86. Host errnos that shouldn't be put
- * on wire could leak through as a result. Do not count on this side effect.
- */
-#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
-unsigned int lustre_errno_hton(unsigned int h);
-unsigned int lustre_errno_ntoh(unsigned int n);
-#else
-#define lustre_errno_hton(h) (h)
-#define lustre_errno_ntoh(n) (n)
-#endif
-
-#endif /* LUSTRE_ERRNO_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
deleted file mode 100644
index 40cd168ed2ea..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ /dev/null
@@ -1,257 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/** \defgroup obd_export PortalRPC export definitions
- *
- * @{
- */
-
-#ifndef __EXPORT_H
-#define __EXPORT_H
-
-/** \defgroup export export
- *
- * @{
- */
-
-#include <lprocfs_status.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <lustre_dlm.h>
-
-enum obd_option {
- OBD_OPT_FORCE = 0x0001,
- OBD_OPT_FAILOVER = 0x0002,
- OBD_OPT_ABORT_RECOV = 0x0004,
-};
-
-/**
- * Export structure. Represents target-side of connection in portals.
- * Also used in Lustre to connect between layers on the same node when
- * there is no network-connection in-between.
- * For every connected client there is an export structure on the server
- * attached to the same obd device.
- */
-struct obd_export {
- /**
- * Export handle, it's id is provided to client on connect
- * Subsequent client RPCs contain this handle id to identify
- * what export they are talking to.
- */
- struct portals_handle exp_handle;
- atomic_t exp_refcount;
- /**
- * Set of counters below is to track where export references are
- * kept. The exp_rpc_count is used for reconnect handling also,
- * the cb_count and locks_count are for debug purposes only for now.
- * The sum of them should be less than exp_refcount by 3
- */
- atomic_t exp_rpc_count; /* RPC references */
- atomic_t exp_cb_count; /* Commit callback references */
- /** Number of queued replay requests to be processes */
- atomic_t exp_replay_count;
- atomic_t exp_locks_count; /** Lock references */
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
- struct list_head exp_locks_list;
- spinlock_t exp_locks_list_guard;
-#endif
- /** UUID of client connected to this export */
- struct obd_uuid exp_client_uuid;
- /** To link all exports on an obd device */
- struct list_head exp_obd_chain;
- /** work_struct for destruction of export */
- struct work_struct exp_zombie_work;
- struct hlist_node exp_uuid_hash; /** uuid-export hash*/
- /** Obd device of this export */
- struct obd_device *exp_obd;
- /**
- * "reverse" import to send requests (e.g. from ldlm) back to client
- * exp_lock protect its change
- */
- struct obd_import *exp_imp_reverse;
- struct lprocfs_stats *exp_md_stats;
- /** Active connection */
- struct ptlrpc_connection *exp_connection;
- /** Connection count value from last successful reconnect rpc */
- __u32 exp_conn_cnt;
- /** Hash list of all ldlm locks granted on this export */
- struct cfs_hash *exp_lock_hash;
- /**
- * Hash list for Posix lock deadlock detection, added with
- * ldlm_lock::l_exp_flock_hash.
- */
- struct cfs_hash *exp_flock_hash;
- struct list_head exp_outstanding_replies;
- struct list_head exp_uncommitted_replies;
- spinlock_t exp_uncommitted_replies_lock;
- /** Last committed transno for this export */
- __u64 exp_last_committed;
- /** On replay all requests waiting for replay are linked here */
- struct list_head exp_req_replay_queue;
- /**
- * protects exp_flags, exp_outstanding_replies and the change
- * of exp_imp_reverse
- */
- spinlock_t exp_lock;
- /** Compatibility flags for this export are embedded into
- * exp_connect_data
- */
- struct obd_connect_data exp_connect_data;
- enum obd_option exp_flags;
- unsigned long exp_failed:1,
- exp_disconnected:1,
- exp_connecting:1,
- exp_flvr_changed:1,
- exp_flvr_adapt:1;
- /* also protected by exp_lock */
- enum lustre_sec_part exp_sp_peer;
- struct sptlrpc_flavor exp_flvr; /* current */
- struct sptlrpc_flavor exp_flvr_old[2]; /* about-to-expire */
- time64_t exp_flvr_expire[2]; /* seconds */
-
- /** protects exp_hp_rpcs */
- spinlock_t exp_rpc_lock;
- struct list_head exp_hp_rpcs; /* (potential) HP RPCs */
-
- /** blocking dlm lock list, protected by exp_bl_list_lock */
- struct list_head exp_bl_list;
- spinlock_t exp_bl_list_lock;
-};
-
-static inline __u64 *exp_connect_flags_ptr(struct obd_export *exp)
-{
- return &exp->exp_connect_data.ocd_connect_flags;
-}
-
-static inline __u64 exp_connect_flags(struct obd_export *exp)
-{
- return *exp_connect_flags_ptr(exp);
-}
-
-static inline int exp_max_brw_size(struct obd_export *exp)
-{
- if (exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE)
- return exp->exp_connect_data.ocd_brw_size;
-
- return ONE_MB_BRW_SIZE;
-}
-
-static inline int exp_connect_multibulk(struct obd_export *exp)
-{
- return exp_max_brw_size(exp) > ONE_MB_BRW_SIZE;
-}
-
-static inline int exp_connect_cancelset(struct obd_export *exp)
-{
- return !!(exp_connect_flags(exp) & OBD_CONNECT_CANCELSET);
-}
-
-static inline int exp_connect_lru_resize(struct obd_export *exp)
-{
- return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE);
-}
-
-static inline int exp_connect_vbr(struct obd_export *exp)
-{
- return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR);
-}
-
-static inline int exp_connect_som(struct obd_export *exp)
-{
- return !!(exp_connect_flags(exp) & OBD_CONNECT_SOM);
-}
-
-static inline int exp_connect_umask(struct obd_export *exp)
-{
- return !!(exp_connect_flags(exp) & OBD_CONNECT_UMASK);
-}
-
-static inline int imp_connect_lru_resize(struct obd_import *imp)
-{
- struct obd_connect_data *ocd;
-
- ocd = &imp->imp_connect_data;
- return !!(ocd->ocd_connect_flags & OBD_CONNECT_LRU_RESIZE);
-}
-
-static inline int exp_connect_layout(struct obd_export *exp)
-{
- return !!(exp_connect_flags(exp) & OBD_CONNECT_LAYOUTLOCK);
-}
-
-static inline bool exp_connect_lvb_type(struct obd_export *exp)
-{
- if (exp_connect_flags(exp) & OBD_CONNECT_LVB_TYPE)
- return true;
- else
- return false;
-}
-
-static inline bool imp_connect_lvb_type(struct obd_import *imp)
-{
- struct obd_connect_data *ocd;
-
- ocd = &imp->imp_connect_data;
- if (ocd->ocd_connect_flags & OBD_CONNECT_LVB_TYPE)
- return true;
- else
- return false;
-}
-
-static inline __u64 exp_connect_ibits(struct obd_export *exp)
-{
- struct obd_connect_data *ocd;
-
- ocd = &exp->exp_connect_data;
- return ocd->ocd_ibits_known;
-}
-
-static inline bool imp_connect_disp_stripe(struct obd_import *imp)
-{
- struct obd_connect_data *ocd;
-
- ocd = &imp->imp_connect_data;
- return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE;
-}
-
-struct obd_export *class_conn2export(struct lustre_handle *conn);
-
-#define KKUC_CT_DATA_MAGIC 0x092013cea
-struct kkuc_ct_data {
- __u32 kcd_magic;
- struct obd_uuid kcd_uuid;
- __u32 kcd_archive;
-};
-
-/** @} export */
-
-#endif /* __EXPORT_H */
-/** @} obd_export */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
deleted file mode 100644
index 094ad282de2c..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ /dev/null
@@ -1,676 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_fid.h
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#ifndef __LUSTRE_FID_H
-#define __LUSTRE_FID_H
-
-/** \defgroup fid fid
- *
- * @{
- *
- * http://wiki.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
- * describes the FID namespace and interoperability requirements for FIDs.
- * The important parts of that document are included here for reference.
- *
- * FID
- * File IDentifier generated by client from range allocated by the SEQuence
- * service and stored in struct lu_fid. The FID is composed of three parts:
- * SEQuence, ObjectID, and VERsion. The SEQ component is a filesystem
- * unique 64-bit integer, and only one client is ever assigned any SEQ value.
- * The first 0x400 FID_SEQ_NORMAL [2^33, 2^33 + 0x400] values are reserved
- * for system use. The OID component is a 32-bit value generated by the
- * client on a per-SEQ basis to allow creating many unique FIDs without
- * communication with the server. The VER component is a 32-bit value that
- * distinguishes between different FID instantiations, such as snapshots or
- * separate subtrees within the filesystem. FIDs with the same VER field
- * are considered part of the same namespace.
- *
- * OLD filesystems are those upgraded from Lustre 1.x that predate FIDs, and
- * MDTs use 32-bit ldiskfs internal inode/generation numbers (IGIFs), while
- * OSTs use 64-bit Lustre object IDs and generation numbers.
- *
- * NEW filesystems are those formatted since the introduction of FIDs.
- *
- * IGIF
- * Inode and Generation In FID, a surrogate FID used to globally identify
- * an existing object on OLD formatted MDT file system. This would only be
- * used on MDT0 in a DNE filesystem, because there cannot be more than one
- * MDT in an OLD formatted filesystem. Belongs to sequence in [12, 2^32 - 1]
- * range, where inode number is stored in SEQ, and inode generation is in OID.
- * NOTE: This assumes no more than 2^32-1 inodes exist in the MDT filesystem,
- * which is the maximum possible for an ldiskfs backend. It also assumes
- * that the reserved ext3/ext4/ldiskfs inode numbers [0-11] are never visible
- * to clients, which has always been true.
- *
- * IDIF
- * object ID In FID, a surrogate FID used to globally identify an existing
- * OST object on OLD formatted OST file system. Belongs to a sequence in
- * [2^32, 2^33 - 1]. Sequence number is calculated as:
- *
- * 1 << 32 | (ost_index << 16) | ((objid >> 32) & 0xffff)
- *
- * that is, SEQ consists of 16-bit OST index, and higher 16 bits of object
- * ID. The generation of unique SEQ values per OST allows the IDIF FIDs to
- * be identified in the FLD correctly. The OID field is calculated as:
- *
- * objid & 0xffffffff
- *
- * that is, it consists of lower 32 bits of object ID. For objects within
- * the IDIF range, object ID extraction will be:
- *
- * o_id = (fid->f_seq & 0x7fff) << 16 | fid->f_oid;
- * o_seq = 0; // formerly group number
- *
- * NOTE: This assumes that no more than 2^48-1 objects have ever been created
- * on any OST, and that no more than 65535 OSTs are in use. Both are very
- * reasonable assumptions, i.e. an IDIF can uniquely map all objects assuming
- * a maximum creation rate of 1M objects per second for a maximum of 9 years,
- * or combinations thereof.
- *
- * OST_MDT0
- * Surrogate FID used to identify an existing object on OLD formatted OST
- * filesystem. Belongs to the reserved SEQuence 0, and is used prior to
- * the introduction of FID-on-OST, at which point IDIF will be used to
- * identify objects as residing on a specific OST.
- *
- * LLOG
- * For Lustre Log objects the object sequence 1 is used. This is compatible
- * with both OLD and NEW namespaces, as this SEQ number is in the
- * ext3/ldiskfs reserved inode range and does not conflict with IGIF
- * sequence numbers.
- *
- * ECHO
- * For testing OST IO performance the object sequence 2 is used. This is
- * compatible with both OLD and NEW namespaces, as this SEQ number is in
- * the ext3/ldiskfs reserved inode range and does not conflict with IGIF
- * sequence numbers.
- *
- * OST_MDT1 .. OST_MAX
- * For testing with multiple MDTs the object sequence 3 through 9 is used,
- * allowing direct mapping of MDTs 1 through 7 respectively, for a total
- * of 8 MDTs including OST_MDT0. This matches the legacy CMD project "group"
- * mappings. However, this SEQ range is only for testing prior to any
- * production DNE release, as the objects in this range conflict across all
- * OSTs, as the OST index is not part of the FID. For production DNE usage,
- * OST objects created by MDT1+ will use FID_SEQ_NORMAL FIDs.
- *
- * DLM OST objid to IDIF mapping
- * For compatibility with existing OLD OST network protocol structures, the
- * FID must map onto the o_id and o_seq in a manner that ensures existing
- * objects are identified consistently for IO, as well as onto the LDLM
- * namespace to ensure IDIFs there is only a single resource name for any
- * object in the DLM. The OLD OST object DLM resource mapping is:
- *
- * resource[] = {o_id, o_seq, 0, 0}; // o_seq == 0 for production releases
- *
- * The NEW OST object DLM resource mapping is the same for both MDT and OST:
- *
- * resource[] = {SEQ, OID, VER, HASH};
- *
- * NOTE: for mapping IDIF values to DLM resource names the o_id may be
- * larger than the 2^33 reserved sequence numbers for IDIF, so it is possible
- * for the o_id numbers to overlap FID SEQ numbers in the resource. However,
- * in all production releases the OLD o_seq field is always zero, and all
- * valid FID OID values are non-zero, so the lock resources will not collide.
- * Even so, the MDT and OST resources are also in different LDLM namespaces.
- */
-
-#include <linux/libcfs/libcfs.h>
-#include <uapi/linux/lustre/lustre_fid.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <uapi/linux/lustre/lustre_ostid.h>
-
-struct lu_env;
-struct lu_site;
-struct lu_context;
-struct obd_device;
-struct obd_export;
-
-/* Whole sequences space range and zero range definitions */
-extern const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE;
-extern const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE;
-extern const struct lu_fid LUSTRE_BFL_FID;
-extern const struct lu_fid LU_OBF_FID;
-extern const struct lu_fid LU_DOT_LUSTRE_FID;
-
-enum {
- /*
- * This is how may metadata FIDs may be allocated in one sequence(128k)
- */
- LUSTRE_METADATA_SEQ_MAX_WIDTH = 0x0000000000020000ULL,
-
- /*
- * This is how many data FIDs could be allocated in one sequence(4B - 1)
- */
- LUSTRE_DATA_SEQ_MAX_WIDTH = 0x00000000FFFFFFFFULL,
-
- /*
- * How many sequences to allocate to a client at once.
- */
- LUSTRE_SEQ_META_WIDTH = 0x0000000000000001ULL,
-
- /*
- * seq allocation pool size.
- */
- LUSTRE_SEQ_BATCH_WIDTH = LUSTRE_SEQ_META_WIDTH * 1000,
-
- /*
- * This is how many sequences may be in one super-sequence allocated to
- * MDTs.
- */
- LUSTRE_SEQ_SUPER_WIDTH = ((1ULL << 30ULL) * LUSTRE_SEQ_META_WIDTH)
-};
-
-enum {
- /** 2^6 FIDs for OI containers */
- OSD_OI_FID_OID_BITS = 6,
- /** reserve enough FIDs in case we want more in the future */
- OSD_OI_FID_OID_BITS_MAX = 10,
-};
-
-/** special OID for local objects */
-enum local_oid {
- /** \see fld_mod_init */
- FLD_INDEX_OID = 3UL,
- /** \see fid_mod_init */
- FID_SEQ_CTL_OID = 4UL,
- FID_SEQ_SRV_OID = 5UL,
- /** \see mdd_mod_init */
- MDD_ROOT_INDEX_OID = 6UL, /* deprecated in 2.4 */
- MDD_ORPHAN_OID = 7UL, /* deprecated in 2.4 */
- MDD_LOV_OBJ_OID = 8UL,
- MDD_CAPA_KEYS_OID = 9UL,
- /** \see mdt_mod_init */
- LAST_RECV_OID = 11UL,
- OSD_FS_ROOT_OID = 13UL,
- ACCT_USER_OID = 15UL,
- ACCT_GROUP_OID = 16UL,
- LFSCK_BOOKMARK_OID = 17UL,
- OTABLE_IT_OID = 18UL,
- /* These two definitions are obsolete
- * OFD_GROUP0_LAST_OID = 20UL,
- * OFD_GROUP4K_LAST_OID = 20UL+4096,
- */
- OFD_LAST_GROUP_OID = 4117UL,
- LLOG_CATALOGS_OID = 4118UL,
- MGS_CONFIGS_OID = 4119UL,
- OFD_HEALTH_CHECK_OID = 4120UL,
- MDD_LOV_OBJ_OSEQ = 4121UL,
- LFSCK_NAMESPACE_OID = 4122UL,
- REMOTE_PARENT_DIR_OID = 4123UL,
- SLAVE_LLOG_CATALOGS_OID = 4124UL,
-};
-
-static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid)
-{
- fid->f_seq = FID_SEQ_LOCAL_FILE;
- fid->f_oid = oid;
- fid->f_ver = 0;
-}
-
-static inline void lu_local_name_obj_fid(struct lu_fid *fid, __u32 oid)
-{
- fid->f_seq = FID_SEQ_LOCAL_NAME;
- fid->f_oid = oid;
- fid->f_ver = 0;
-}
-
-/* For new FS (>= 2.4), the root FID will be changed to
- * [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4),
- * the root FID will still be IGIF
- */
-static inline int fid_is_root(const struct lu_fid *fid)
-{
- return unlikely((fid_seq(fid) == FID_SEQ_ROOT &&
- fid_oid(fid) == 1));
-}
-
-static inline int fid_is_dot_lustre(const struct lu_fid *fid)
-{
- return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE &&
- fid_oid(fid) == FID_OID_DOT_LUSTRE);
-}
-
-static inline int fid_is_obf(const struct lu_fid *fid)
-{
- return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE &&
- fid_oid(fid) == FID_OID_DOT_LUSTRE_OBF);
-}
-
-static inline int fid_is_otable_it(const struct lu_fid *fid)
-{
- return unlikely(fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
- fid_oid(fid) == OTABLE_IT_OID);
-}
-
-static inline int fid_is_acct(const struct lu_fid *fid)
-{
- return fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
- (fid_oid(fid) == ACCT_USER_OID ||
- fid_oid(fid) == ACCT_GROUP_OID);
-}
-
-static inline int fid_is_quota(const struct lu_fid *fid)
-{
- return fid_seq(fid) == FID_SEQ_QUOTA ||
- fid_seq(fid) == FID_SEQ_QUOTA_GLB;
-}
-
-static inline int fid_seq_in_fldb(__u64 seq)
-{
- return fid_seq_is_igif(seq) || fid_seq_is_norm(seq) ||
- fid_seq_is_root(seq) || fid_seq_is_dot(seq);
-}
-
-static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq, __u32 ost_idx)
-{
- if (fid_seq_is_mdt0(seq)) {
- fid->f_seq = fid_idif_seq(0, ost_idx);
- } else {
- LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) ||
- fid_seq_is_idif(seq), "%#llx\n", seq);
- fid->f_seq = seq;
- }
- fid->f_oid = 0;
- fid->f_ver = 0;
-}
-
-/* seq client type */
-enum lu_cli_type {
- LUSTRE_SEQ_METADATA = 1,
- LUSTRE_SEQ_DATA
-};
-
-enum lu_mgr_type {
- LUSTRE_SEQ_SERVER,
- LUSTRE_SEQ_CONTROLLER
-};
-
-/* Client sequence manager interface. */
-struct lu_client_seq {
- /* Sequence-controller export. */
- struct obd_export *lcs_exp;
- spinlock_t lcs_lock;
-
- /*
- * Range of allowed for allocation sequences. When using lu_client_seq on
- * clients, this contains meta-sequence range. And for servers this
- * contains super-sequence range.
- */
- struct lu_seq_range lcs_space;
-
- /* Seq related proc */
- struct dentry *lcs_debugfs_entry;
-
- /* This holds last allocated fid in last obtained seq */
- struct lu_fid lcs_fid;
-
- /* LUSTRE_SEQ_METADATA or LUSTRE_SEQ_DATA */
- enum lu_cli_type lcs_type;
-
- /*
- * Service uuid, passed from MDT + seq name to form unique seq name to
- * use it with procfs.
- */
- char lcs_name[LUSTRE_MDT_MAXNAMELEN];
-
- /*
- * Sequence width, that is how many objects may be allocated in one
- * sequence. Default value for it is LUSTRE_SEQ_MAX_WIDTH.
- */
- __u64 lcs_width;
-
- /* wait queue for fid allocation and update indicator */
- wait_queue_head_t lcs_waitq;
- int lcs_update;
-};
-
-/* Client methods */
-void seq_client_flush(struct lu_client_seq *seq);
-
-int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq,
- struct lu_fid *fid);
-/* Fids common stuff */
-int fid_is_local(const struct lu_env *env,
- struct lu_site *site, const struct lu_fid *fid);
-
-enum lu_cli_type;
-int client_fid_init(struct obd_device *obd, struct obd_export *exp,
- enum lu_cli_type type);
-int client_fid_fini(struct obd_device *obd);
-
-/* fid locking */
-
-struct ldlm_namespace;
-
-/*
- * Build (DLM) resource name from FID.
- *
- * NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
- * but was moved into name[1] along with the OID to avoid consuming the
- * renaming name[2,3] fields that need to be used for the quota identifier.
- */
-static inline void
-fid_build_reg_res_name(const struct lu_fid *fid, struct ldlm_res_id *res)
-{
- memset(res, 0, sizeof(*res));
- res->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(fid);
- res->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(fid);
-}
-
-/*
- * Return true if resource is for object identified by FID.
- */
-static inline bool fid_res_name_eq(const struct lu_fid *fid,
- const struct ldlm_res_id *res)
-{
- return res->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(fid) &&
- res->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(fid);
-}
-
-/*
- * Extract FID from LDLM resource. Reverse of fid_build_reg_res_name().
- */
-static inline void
-fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res)
-{
- fid->f_seq = res->name[LUSTRE_RES_ID_SEQ_OFF];
- fid->f_oid = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF]);
- fid->f_ver = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
- LASSERT(fid_res_name_eq(fid, res));
-}
-
-/*
- * Build (DLM) resource identifier from global quota FID and quota ID.
- */
-static inline void
-fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid,
- struct ldlm_res_id *res)
-{
- fid_build_reg_res_name(glb_fid, res);
- res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid);
- res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] = fid_ver_oid(&qid->qid_fid);
-}
-
-/*
- * Extract global FID and quota ID from resource name
- */
-static inline void fid_extract_from_quota_res(struct lu_fid *glb_fid,
- union lquota_id *qid,
- const struct ldlm_res_id *res)
-{
- fid_extract_from_res_name(glb_fid, res);
- qid->qid_fid.f_seq = res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF];
- qid->qid_fid.f_oid = (__u32)res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF];
- qid->qid_fid.f_ver =
- (__u32)(res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] >> 32);
-}
-
-static inline void
-fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
- struct ldlm_res_id *res)
-{
- fid_build_reg_res_name(fid, res);
- res->name[LUSTRE_RES_ID_HSH_OFF] = hash;
-}
-
-/**
- * Build DLM resource name from object id & seq, which will be removed
- * finally, when we replace ost_id with FID in data stack.
- *
- * Currently, resid from the old client, whose res[0] = object_id,
- * res[1] = object_seq, is just opposite with Metatdata
- * resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid.
- * To unify the resid identification, we will reverse the data
- * resid to keep it same with Metadata resid, i.e.
- *
- * For resid from the old client,
- * res[0] = objid, res[1] = 0, still keep the original order,
- * for compatibility.
- *
- * For new resid
- * res will be built from normal FID directly, i.e. res[0] = f_seq,
- * res[1] = f_oid + f_ver.
- */
-static inline void ostid_build_res_name(const struct ost_id *oi,
- struct ldlm_res_id *name)
-{
- memset(name, 0, sizeof(*name));
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
- name->name[LUSTRE_RES_ID_SEQ_OFF] = ostid_id(oi);
- name->name[LUSTRE_RES_ID_VER_OID_OFF] = ostid_seq(oi);
- } else {
- fid_build_reg_res_name(&oi->oi_fid, name);
- }
-}
-
-/**
- * Return true if the resource is for the object identified by this id & group.
- */
-static inline int ostid_res_name_eq(const struct ost_id *oi,
- const struct ldlm_res_id *name)
-{
- /* Note: it is just a trick here to save some effort, probably the
- * correct way would be turn them into the FID and compare
- */
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
- return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) &&
- name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi);
- } else {
- return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_seq(oi) &&
- name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_id(oi);
- }
-}
-
-/**
- * Note: we need check oi_seq to decide where to set oi_id,
- * so oi_seq should always be set ahead of oi_id.
- */
-static inline int ostid_set_id(struct ost_id *oi, __u64 oid)
-{
- if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
- if (oid >= IDIF_MAX_OID)
- return -E2BIG;
- oi->oi.oi_id = oid;
- } else if (fid_is_idif(&oi->oi_fid)) {
- if (oid >= IDIF_MAX_OID)
- return -E2BIG;
- oi->oi_fid.f_seq = fid_idif_seq(oid,
- fid_idif_ost_idx(&oi->oi_fid));
- oi->oi_fid.f_oid = oid;
- oi->oi_fid.f_ver = oid >> 48;
- } else {
- if (oid >= OBIF_MAX_OID)
- return -E2BIG;
- oi->oi_fid.f_oid = oid;
- }
- return 0;
-}
-
-/* pack any OST FID into an ostid (id/seq) for the wire/disk */
-static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
-{
- int rc = 0;
-
- if (fid_seq_is_igif(fid->f_seq))
- return -EBADF;
-
- if (fid_is_idif(fid)) {
- u64 objid = fid_idif_id(fid_seq(fid), fid_oid(fid),
- fid_ver(fid));
-
- ostid_set_seq_mdt0(ostid);
- rc = ostid_set_id(ostid, objid);
- } else {
- ostid->oi_fid = *fid;
- }
-
- return rc;
-}
-
-/* The same as osc_build_res_name() */
-static inline void ost_fid_build_resid(const struct lu_fid *fid,
- struct ldlm_res_id *resname)
-{
- if (fid_is_mdt0(fid) || fid_is_idif(fid)) {
- struct ost_id oi;
-
- oi.oi.oi_id = 0; /* gcc 4.7.2 complains otherwise */
- if (fid_to_ostid(fid, &oi) != 0)
- return;
- ostid_build_res_name(&oi, resname);
- } else {
- fid_build_reg_res_name(fid, resname);
- }
-}
-
-/**
- * Flatten 128-bit FID values into a 64-bit value for use as an inode number.
- * For non-IGIF FIDs this starts just over 2^32, and continues without
- * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ
- * into the range where there may not be many OID values in use, to minimize
- * the risk of conflict.
- *
- * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true,
- * the time between re-used inode numbers is very long - 2^40 SEQ numbers,
- * or about 2^40 client mounts, if clients create less than 2^24 files/mount.
- */
-static inline __u64 fid_flatten(const struct lu_fid *fid)
-{
- __u64 ino;
- __u64 seq;
-
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- return ino;
- }
-
- seq = fid_seq(fid);
-
- ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
-
- return ino ? ino : fid_oid(fid);
-}
-
-static inline __u32 fid_hash(const struct lu_fid *f, int bits)
-{
- /* all objects with same id and different versions will belong to same
- * collisions list.
- */
- return hash_long(fid_flatten(f), bits);
-}
-
-/**
- * map fid to 32 bit value for ino on 32bit systems.
- */
-static inline __u32 fid_flatten32(const struct lu_fid *fid)
-{
- __u32 ino;
- __u64 seq;
-
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- return ino;
- }
-
- seq = fid_seq(fid) - FID_SEQ_START;
-
- /* Map the high bits of the OID into higher bits of the inode number so
- * that inodes generated at about the same time have a reduced chance
- * of collisions. This will give a period of 2^12 = 1024 unique clients
- * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
- * (from OID), or up to 128M inodes without collisions for new files.
- */
- ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
- (seq >> (64 - (40 - 8)) & 0xffffff00) +
- (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
-
- return ino ? ino : fid_oid(fid);
-}
-
-static inline int lu_fid_diff(const struct lu_fid *fid1,
- const struct lu_fid *fid2)
-{
- LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:" DFID ", fid2:" DFID "\n",
- PFID(fid1), PFID(fid2));
-
- if (fid_is_idif(fid1) && fid_is_idif(fid2))
- return fid_idif_id(fid1->f_seq, fid1->f_oid, fid1->f_ver) -
- fid_idif_id(fid2->f_seq, fid2->f_oid, fid2->f_ver);
-
- return fid_oid(fid1) - fid_oid(fid2);
-}
-
-#define LUSTRE_SEQ_SRV_NAME "seq_srv"
-#define LUSTRE_SEQ_CTL_NAME "seq_ctl"
-
-/* Range common stuff */
-static inline void range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src)
-{
- dst->lsr_start = cpu_to_le64(src->lsr_start);
- dst->lsr_end = cpu_to_le64(src->lsr_end);
- dst->lsr_index = cpu_to_le32(src->lsr_index);
- dst->lsr_flags = cpu_to_le32(src->lsr_flags);
-}
-
-static inline void range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
-{
- dst->lsr_start = le64_to_cpu(src->lsr_start);
- dst->lsr_end = le64_to_cpu(src->lsr_end);
- dst->lsr_index = le32_to_cpu(src->lsr_index);
- dst->lsr_flags = le32_to_cpu(src->lsr_flags);
-}
-
-static inline void range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src)
-{
- dst->lsr_start = cpu_to_be64(src->lsr_start);
- dst->lsr_end = cpu_to_be64(src->lsr_end);
- dst->lsr_index = cpu_to_be32(src->lsr_index);
- dst->lsr_flags = cpu_to_be32(src->lsr_flags);
-}
-
-static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
-{
- dst->lsr_start = be64_to_cpu(src->lsr_start);
- dst->lsr_end = be64_to_cpu(src->lsr_end);
- dst->lsr_index = be32_to_cpu(src->lsr_index);
- dst->lsr_flags = be32_to_cpu(src->lsr_flags);
-}
-
-/** @} fid */
-
-#endif /* __LUSTRE_FID_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
deleted file mode 100644
index 4055bbd24c55..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ /dev/null
@@ -1,138 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LINUX_FLD_H
-#define __LINUX_FLD_H
-
-/** \defgroup fld fld
- *
- * @{
- */
-
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <linux/libcfs/libcfs.h>
-#include <seq_range.h>
-
-struct lu_client_fld;
-struct lu_server_fld;
-struct lu_fld_hash;
-struct fld_cache;
-
-extern const struct dt_index_features fld_index_features;
-extern const char fld_index_name[];
-
-/*
- * FLD (Fid Location Database) interface.
- */
-enum {
- LUSTRE_CLI_FLD_HASH_DHT = 0,
- LUSTRE_CLI_FLD_HASH_RRB
-};
-
-struct lu_fld_target {
- struct list_head ft_chain;
- struct obd_export *ft_exp;
- struct lu_server_fld *ft_srv;
- __u64 ft_idx;
-};
-
-struct lu_server_fld {
- /**
- * super sequence controller export, needed to forward fld
- * lookup request.
- */
- struct obd_export *lsf_control_exp;
-
- /** Client FLD cache. */
- struct fld_cache *lsf_cache;
-
- /** Protect index modifications */
- struct mutex lsf_lock;
-
- /** Fld service name in form "fld-srv-lustre-MDTXXX" */
- char lsf_name[LUSTRE_MDT_MAXNAMELEN];
-
-};
-
-struct lu_client_fld {
- /** Client side debugfs entry. */
- struct dentry *lcf_debugfs_entry;
-
- /** List of exports client FLD knows about. */
- struct list_head lcf_targets;
-
- /** Current hash to be used to chose an export. */
- struct lu_fld_hash *lcf_hash;
-
- /** Exports count. */
- int lcf_count;
-
- /** Lock protecting exports list and fld_hash. */
- spinlock_t lcf_lock;
-
- /** Client FLD cache. */
- struct fld_cache *lcf_cache;
-
- /** Client fld debugfs entry name. */
- char lcf_name[LUSTRE_MDT_MAXNAMELEN];
-};
-
-/* Client methods */
-int fld_client_init(struct lu_client_fld *fld,
- const char *prefix, int hash);
-
-void fld_client_fini(struct lu_client_fld *fld);
-
-void fld_client_flush(struct lu_client_fld *fld);
-
-int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
- __u32 flags, const struct lu_env *env);
-
-int fld_client_create(struct lu_client_fld *fld,
- struct lu_seq_range *range,
- const struct lu_env *env);
-
-int fld_client_delete(struct lu_client_fld *fld, u64 seq,
- const struct lu_env *env);
-
-int fld_client_add_target(struct lu_client_fld *fld,
- struct lu_fld_target *tar);
-
-int fld_client_del_target(struct lu_client_fld *fld,
- __u64 idx);
-
-void fld_client_debugfs_fini(struct lu_client_fld *fld);
-
-/** @} fld */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
deleted file mode 100644
index cbd68985ada9..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTRE_HA_H
-#define _LUSTRE_HA_H
-
-/** \defgroup ha ha
- *
- * @{
- */
-
-struct obd_import;
-struct obd_export;
-struct obd_device;
-struct ptlrpc_request;
-
-int ptlrpc_replay(struct obd_import *imp);
-int ptlrpc_resend(struct obd_import *imp);
-void ptlrpc_free_committed(struct obd_import *imp);
-void ptlrpc_wake_delayed(struct obd_import *imp);
-int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async);
-int ptlrpc_set_import_active(struct obd_import *imp, int active);
-void ptlrpc_activate_import(struct obd_import *imp);
-void ptlrpc_deactivate_import(struct obd_import *imp);
-void ptlrpc_invalidate_import(struct obd_import *imp);
-void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt);
-void ptlrpc_pinger_force(struct obd_import *imp);
-
-/** @} ha */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h
deleted file mode 100644
index c48c97362cf6..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_handles.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LUSTRE_HANDLES_H_
-#define __LUSTRE_HANDLES_H_
-
-/** \defgroup handles handles
- *
- * @{
- */
-
-#include <linux/atomic.h>
-#include <linux/list.h>
-#include <linux/rcupdate.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <linux/libcfs/libcfs.h>
-
-struct portals_handle_ops {
- void (*hop_addref)(void *object);
- void (*hop_free)(void *object, int size);
-};
-
-/* These handles are most easily used by having them appear at the very top of
- * whatever object that you want to make handles for. ie:
- *
- * struct ldlm_lock {
- * struct portals_handle handle;
- * ...
- * };
- *
- * Now you're able to assign the results of cookie2handle directly to an
- * ldlm_lock. If it's not at the top, you'll want to use container_of()
- * to compute the start of the structure based on the handle field.
- */
-struct portals_handle {
- struct list_head h_link;
- __u64 h_cookie;
- const void *h_owner;
- struct portals_handle_ops *h_ops;
-
- /* newly added fields to handle the RCU issue. -jxiong */
- struct rcu_head h_rcu;
- spinlock_t h_lock;
- unsigned int h_size:31;
- unsigned int h_in:1;
-};
-
-/* handles.c */
-
-/* Add a handle to the hash table */
-void class_handle_hash(struct portals_handle *,
- struct portals_handle_ops *ops);
-void class_handle_unhash(struct portals_handle *);
-void *class_handle2object(__u64 cookie, const void *owner);
-void class_handle_free_cb(struct rcu_head *rcu);
-int class_handle_init(void);
-void class_handle_cleanup(void);
-
-/** @} handles */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
deleted file mode 100644
index 1731048f1ff2..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ /dev/null
@@ -1,368 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/** \defgroup obd_import PtlRPC import definitions
- * Imports are client-side representation of remote obd target.
- *
- * @{
- */
-
-#ifndef __IMPORT_H
-#define __IMPORT_H
-
-/** \defgroup export export
- *
- * @{
- */
-
-#include <lustre_handles.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-
-/**
- * Adaptive Timeout stuff
- *
- * @{
- */
-#define D_ADAPTTO D_OTHER
-#define AT_BINS 4 /* "bin" means "N seconds of history" */
-#define AT_FLG_NOHIST 0x1 /* use last reported value only */
-
-struct adaptive_timeout {
- time64_t at_binstart; /* bin start time */
- unsigned int at_hist[AT_BINS]; /* timeout history bins */
- unsigned int at_flags;
- unsigned int at_current; /* current timeout value */
- unsigned int at_worst_ever; /* worst-ever timeout value */
- time64_t at_worst_time; /* worst-ever timeout timestamp */
- spinlock_t at_lock;
-};
-
-struct ptlrpc_at_array {
- struct list_head *paa_reqs_array; /** array to hold requests */
- __u32 paa_size; /** the size of array */
- __u32 paa_count; /** the total count of reqs */
- time64_t paa_deadline; /** the earliest deadline of reqs */
- __u32 *paa_reqs_count; /** the count of reqs in each entry */
-};
-
-#define IMP_AT_MAX_PORTALS 8
-struct imp_at {
- int iat_portal[IMP_AT_MAX_PORTALS];
- struct adaptive_timeout iat_net_latency;
- struct adaptive_timeout iat_service_estimate[IMP_AT_MAX_PORTALS];
-};
-
-/** @} */
-
-/** Possible import states */
-enum lustre_imp_state {
- LUSTRE_IMP_CLOSED = 1,
- LUSTRE_IMP_NEW = 2,
- LUSTRE_IMP_DISCON = 3,
- LUSTRE_IMP_CONNECTING = 4,
- LUSTRE_IMP_REPLAY = 5,
- LUSTRE_IMP_REPLAY_LOCKS = 6,
- LUSTRE_IMP_REPLAY_WAIT = 7,
- LUSTRE_IMP_RECOVER = 8,
- LUSTRE_IMP_FULL = 9,
- LUSTRE_IMP_EVICTED = 10,
-};
-
-/** Returns test string representation of numeric import state \a state */
-static inline char *ptlrpc_import_state_name(enum lustre_imp_state state)
-{
- static char *import_state_names[] = {
- "<UNKNOWN>", "CLOSED", "NEW", "DISCONN",
- "CONNECTING", "REPLAY", "REPLAY_LOCKS", "REPLAY_WAIT",
- "RECOVER", "FULL", "EVICTED",
- };
-
- LASSERT(state <= LUSTRE_IMP_EVICTED);
- return import_state_names[state];
-}
-
-/**
- * List of import event types
- */
-enum obd_import_event {
- IMP_EVENT_DISCON = 0x808001,
- IMP_EVENT_INACTIVE = 0x808002,
- IMP_EVENT_INVALIDATE = 0x808003,
- IMP_EVENT_ACTIVE = 0x808004,
- IMP_EVENT_OCD = 0x808005,
- IMP_EVENT_DEACTIVATE = 0x808006,
- IMP_EVENT_ACTIVATE = 0x808007,
-};
-
-/**
- * Definition of import connection structure
- */
-struct obd_import_conn {
- /** Item for linking connections together */
- struct list_head oic_item;
- /** Pointer to actual PortalRPC connection */
- struct ptlrpc_connection *oic_conn;
- /** uuid of remote side */
- struct obd_uuid oic_uuid;
- /**
- * Time (64 bit jiffies) of last connection attempt on this connection
- */
- __u64 oic_last_attempt;
-};
-
-/* state history */
-#define IMP_STATE_HIST_LEN 16
-struct import_state_hist {
- enum lustre_imp_state ish_state;
- time64_t ish_time;
-};
-
-/**
- * Definition of PortalRPC import structure.
- * Imports are representing client-side view to remote target.
- */
-struct obd_import {
- /** Local handle (== id) for this import. */
- struct portals_handle imp_handle;
- /** Reference counter */
- atomic_t imp_refcount;
- struct lustre_handle imp_dlm_handle; /* client's ldlm export */
- /** Currently active connection */
- struct ptlrpc_connection *imp_connection;
- /** PortalRPC client structure for this import */
- struct ptlrpc_client *imp_client;
- /** List element for linking into pinger chain */
- struct list_head imp_pinger_chain;
- /** work struct for destruction of import */
- struct work_struct imp_zombie_work;
-
- /**
- * Lists of requests that are retained for replay, waiting for a reply,
- * or waiting for recovery to complete, respectively.
- * @{
- */
- struct list_head imp_replay_list;
- struct list_head imp_sending_list;
- struct list_head imp_delayed_list;
- /** @} */
-
- /**
- * List of requests that are retained for committed open replay. Once
- * open is committed, open replay request will be moved from the
- * imp_replay_list into the imp_committed_list.
- * The imp_replay_cursor is for accelerating searching during replay.
- * @{
- */
- struct list_head imp_committed_list;
- struct list_head *imp_replay_cursor;
- /** @} */
-
- /** List of not replied requests */
- struct list_head imp_unreplied_list;
- /** Known maximal replied XID */
- __u64 imp_known_replied_xid;
-
- /** obd device for this import */
- struct obd_device *imp_obd;
-
- /**
- * some seciruty-related fields
- * @{
- */
- struct ptlrpc_sec *imp_sec;
- struct mutex imp_sec_mutex;
- time64_t imp_sec_expire;
- /** @} */
-
- /** Wait queue for those who need to wait for recovery completion */
- wait_queue_head_t imp_recovery_waitq;
-
- /** Number of requests currently in-flight */
- atomic_t imp_inflight;
- /** Number of requests currently unregistering */
- atomic_t imp_unregistering;
- /** Number of replay requests inflight */
- atomic_t imp_replay_inflight;
- /** Number of currently happening import invalidations */
- atomic_t imp_inval_count;
- /** Numbner of request timeouts */
- atomic_t imp_timeouts;
- /** Current import state */
- enum lustre_imp_state imp_state;
- /** Last replay state */
- enum lustre_imp_state imp_replay_state;
- /** History of import states */
- struct import_state_hist imp_state_hist[IMP_STATE_HIST_LEN];
- int imp_state_hist_idx;
- /** Current import generation. Incremented on every reconnect */
- int imp_generation;
- /** Incremented every time we send reconnection request */
- __u32 imp_conn_cnt;
- /**
- * \see ptlrpc_free_committed remembers imp_generation value here
- * after a check to save on unnecessary replay list iterations
- */
- int imp_last_generation_checked;
- /** Last transno we replayed */
- __u64 imp_last_replay_transno;
- /** Last transno committed on remote side */
- __u64 imp_peer_committed_transno;
- /**
- * \see ptlrpc_free_committed remembers last_transno since its last
- * check here and if last_transno did not change since last run of
- * ptlrpc_free_committed and import generation is the same, we can
- * skip looking for requests to remove from replay list as optimisation
- */
- __u64 imp_last_transno_checked;
- /**
- * Remote export handle. This is how remote side knows what export
- * we are talking to. Filled from response to connect request
- */
- struct lustre_handle imp_remote_handle;
- /** When to perform next ping. time in jiffies. */
- unsigned long imp_next_ping;
- /** When we last successfully connected. time in 64bit jiffies */
- __u64 imp_last_success_conn;
-
- /** List of all possible connection for import. */
- struct list_head imp_conn_list;
- /**
- * Current connection. \a imp_connection is imp_conn_current->oic_conn
- */
- struct obd_import_conn *imp_conn_current;
-
- /** Protects flags, level, generation, conn_cnt, *_list */
- spinlock_t imp_lock;
-
- /* flags */
- unsigned long imp_no_timeout:1, /* timeouts are disabled */
- imp_invalid:1, /* evicted */
- /* administratively disabled */
- imp_deactive:1,
- /* try to recover the import */
- imp_replayable:1,
- /* don't run recovery (timeout instead) */
- imp_dlm_fake:1,
- /* use 1/2 timeout on MDS' OSCs */
- imp_server_timeout:1,
- /* VBR: imp in delayed recovery */
- imp_delayed_recovery:1,
- /* VBR: if gap was found then no lock replays
- */
- imp_no_lock_replay:1,
- /* recovery by versions was failed */
- imp_vbr_failed:1,
- /* force an immediate ping */
- imp_force_verify:1,
- /* force a scheduled ping */
- imp_force_next_verify:1,
- /* pingable */
- imp_pingable:1,
- /* resend for replay */
- imp_resend_replay:1,
- /* disable normal recovery, for test only. */
- imp_no_pinger_recover:1,
-#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE
- /* need IR MNE swab */
- imp_need_mne_swab:1,
-#endif
- /* import must be reconnected instead of
- * chosing new connection
- */
- imp_force_reconnect:1,
- /* import has tried to connect with server */
- imp_connect_tried:1,
- /* connected but not FULL yet */
- imp_connected:1;
- __u32 imp_connect_op;
- struct obd_connect_data imp_connect_data;
- __u64 imp_connect_flags_orig;
- int imp_connect_error;
-
- __u32 imp_msg_magic;
- __u32 imp_msghdr_flags; /* adjusted based on server capability */
-
- struct imp_at imp_at; /* adaptive timeout data */
- time64_t imp_last_reply_time; /* for health check */
-};
-
-/* import.c */
-static inline unsigned int at_est2timeout(unsigned int val)
-{
- /* add an arbitrary minimum: 125% +5 sec */
- return (val + (val >> 2) + 5);
-}
-
-static inline unsigned int at_timeout2est(unsigned int val)
-{
- /* restore estimate value from timeout: e=4/5(t-5) */
- LASSERT(val);
- return (max((val << 2) / 5, 5U) - 4);
-}
-
-static inline void at_reset(struct adaptive_timeout *at, int val)
-{
- spin_lock(&at->at_lock);
- at->at_current = val;
- at->at_worst_ever = val;
- at->at_worst_time = ktime_get_real_seconds();
- spin_unlock(&at->at_lock);
-}
-
-static inline void at_init(struct adaptive_timeout *at, int val, int flags)
-{
- memset(at, 0, sizeof(*at));
- spin_lock_init(&at->at_lock);
- at->at_flags = flags;
- at_reset(at, val);
-}
-
-extern unsigned int at_min;
-static inline int at_get(struct adaptive_timeout *at)
-{
- return (at->at_current > at_min) ? at->at_current : at_min;
-}
-
-int at_measured(struct adaptive_timeout *at, unsigned int val);
-int import_at_get_index(struct obd_import *imp, int portal);
-extern unsigned int at_max;
-#define AT_OFF (at_max == 0)
-
-/* genops.c */
-struct obd_export;
-struct obd_import *class_exp2cliimp(struct obd_export *);
-
-/** @} import */
-
-#endif /* __IMPORT_H */
-
-/** @} obd_import */
diff --git a/drivers/staging/lustre/lustre/include/lustre_intent.h b/drivers/staging/lustre/lustre/include/lustre_intent.h
deleted file mode 100644
index 519e94fc089d..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_intent.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef LUSTRE_INTENT_H
-#define LUSTRE_INTENT_H
-
-/* intent IT_XXX are defined in lustre/include/obd.h */
-
-struct lookup_intent {
- int it_op;
- int it_create_mode;
- __u64 it_flags;
- int it_disposition;
- int it_status;
- __u64 it_lock_handle;
- __u64 it_lock_bits;
- int it_lock_mode;
- int it_remote_lock_mode;
- __u64 it_remote_lock_handle;
- struct ptlrpc_request *it_request;
- unsigned int it_lock_set:1;
-};
-
-static inline int it_disposition(struct lookup_intent *it, int flag)
-{
- return it->it_disposition & flag;
-}
-
-static inline void it_set_disposition(struct lookup_intent *it, int flag)
-{
- it->it_disposition |= flag;
-}
-
-static inline void it_clear_disposition(struct lookup_intent *it, int flag)
-{
- it->it_disposition &= ~flag;
-}
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
deleted file mode 100644
index 2b3fa8430185..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2013 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- *
- * Author: Nathan Rutman <nathan.rutman@sun.com>
- *
- * Kernel <-> userspace communication routines.
- * The definitions below are used in the kernel and userspace.
- */
-
-#ifndef __LUSTRE_KERNELCOMM_H__
-#define __LUSTRE_KERNELCOMM_H__
-
-/* For declarations shared with userspace */
-#include <uapi/linux/lustre/lustre_kernelcomm.h>
-
-/* prototype for callback function on kuc groups */
-typedef int (*libcfs_kkuc_cb_t)(void *data, void *cb_arg);
-
-/* Kernel methods */
-int libcfs_kkuc_msg_put(struct file *fp, void *payload);
-int libcfs_kkuc_group_put(unsigned int group, void *payload);
-int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group,
- void *data, size_t data_len);
-int libcfs_kkuc_group_rem(int uid, unsigned int group);
-int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func,
- void *cb_arg);
-
-#endif /* __LUSTRE_KERNELCOMM_H__ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
deleted file mode 100644
index 0053eafc1c10..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ /dev/null
@@ -1,124 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_lib.h
- *
- * Basic Lustre library routines.
- */
-
-#ifndef _LUSTRE_LIB_H
-#define _LUSTRE_LIB_H
-
-/** \defgroup lib lib
- *
- * @{
- */
-
-#include <linux/sched/signal.h>
-#include <linux/signal.h>
-#include <linux/types.h>
-#include <linux/libcfs/libcfs.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <uapi/linux/lustre/lustre_ver.h>
-#include <uapi/linux/lustre/lustre_cfg.h>
-
-/* target.c */
-struct ptlrpc_request;
-struct obd_export;
-struct lu_target;
-struct l_wait_info;
-#include <lustre_ha.h>
-#include <lustre_net.h>
-
-#define LI_POISON 0x5a5a5a5a
-#if BITS_PER_LONG > 32
-# define LL_POISON 0x5a5a5a5a5a5a5a5aL
-#else
-# define LL_POISON 0x5a5a5a5aL
-#endif
-#define LP_POISON ((void *)LL_POISON)
-
-int target_pack_pool_reply(struct ptlrpc_request *req);
-int do_set_info_async(struct obd_import *imp,
- int opcode, int version,
- u32 keylen, void *key,
- u32 vallen, void *val,
- struct ptlrpc_request_set *set);
-
-void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
-
-#define LUSTRE_FATAL_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | \
- sigmask(SIGTERM) | sigmask(SIGQUIT) | \
- sigmask(SIGALRM))
-static inline int l_fatal_signal_pending(struct task_struct *p)
-{
- return signal_pending(p) && sigtestsetmask(&p->pending.signal, LUSTRE_FATAL_SIGS);
-}
-
-/** @} lib */
-
-
-
-/* l_wait_event_abortable() is a bit like wait_event_killable()
- * except there is a fixed set of signals which will abort:
- * LUSTRE_FATAL_SIGS
- */
-#define l_wait_event_abortable(wq, condition) \
-({ \
- sigset_t __old_blocked; \
- int __ret = 0; \
- cfs_block_sigsinv(LUSTRE_FATAL_SIGS, &__old_blocked); \
- __ret = wait_event_interruptible(wq, condition); \
- cfs_restore_sigs(&__old_blocked); \
- __ret; \
-})
-
-#define l_wait_event_abortable_timeout(wq, condition, timeout) \
-({ \
- sigset_t __old_blocked; \
- int __ret = 0; \
- cfs_block_sigsinv(LUSTRE_FATAL_SIGS, &__old_blocked); \
- __ret = wait_event_interruptible_timeout(wq, condition, timeout);\
- cfs_restore_sigs(&__old_blocked); \
- __ret; \
-})
-
-#define l_wait_event_abortable_exclusive(wq, condition) \
-({ \
- sigset_t __old_blocked; \
- int __ret = 0; \
- cfs_block_sigsinv(LUSTRE_FATAL_SIGS, &__old_blocked); \
- __ret = wait_event_interruptible_exclusive(wq, condition); \
- cfs_restore_sigs(&__old_blocked); \
- __ret; \
-})
-#endif /* _LUSTRE_LIB_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_linkea.h b/drivers/staging/lustre/lustre/include/lustre_linkea.h
deleted file mode 100644
index 03db1511bfd3..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_linkea.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2013, 2014, Intel Corporation.
- * Use is subject to license terms.
- *
- * Author: di wang <di.wang@intel.com>
- */
-
-/* There are several reasons to restrict the linkEA size:
- *
- * 1. Under DNE mode, if we do not restrict the linkEA size, and if there
- * are too many cross-MDTs hard links to the same object, then it will
- * casue the llog overflow.
- *
- * 2. Some backend has limited size for EA. For example, if without large
- * EA enabled, the ldiskfs will make all EAs to share one (4K) EA block.
- *
- * 3. Too many entries in linkEA will seriously affect linkEA performance
- * because we only support to locate linkEA entry consecutively.
- */
-#define MAX_LINKEA_SIZE 4096
-
-struct linkea_data {
- /**
- * Buffer to keep link EA body.
- */
- struct lu_buf *ld_buf;
- /**
- * The matched header, entry and its length in the EA
- */
- struct link_ea_header *ld_leh;
- struct link_ea_entry *ld_lee;
- int ld_reclen;
-};
-
-int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf);
-int linkea_init(struct linkea_data *ldata);
-int linkea_init_with_rec(struct linkea_data *ldata);
-void linkea_entry_unpack(const struct link_ea_entry *lee, int *reclen,
- struct lu_name *lname, struct lu_fid *pfid);
-int linkea_entry_pack(struct link_ea_entry *lee, const struct lu_name *lname,
- const struct lu_fid *pfid);
-int linkea_add_buf(struct linkea_data *ldata, const struct lu_name *lname,
- const struct lu_fid *pfid);
-void linkea_del_buf(struct linkea_data *ldata, const struct lu_name *lname);
-int linkea_links_find(struct linkea_data *ldata, const struct lu_name *lname,
- const struct lu_fid *pfid);
-
-static inline void linkea_first_entry(struct linkea_data *ldata)
-{
- LASSERT(ldata);
- LASSERT(ldata->ld_leh);
-
- if (ldata->ld_leh->leh_reccount == 0)
- ldata->ld_lee = NULL;
- else
- ldata->ld_lee = (struct link_ea_entry *)(ldata->ld_leh + 1);
-}
-
-static inline void linkea_next_entry(struct linkea_data *ldata)
-{
- LASSERT(ldata);
- LASSERT(ldata->ld_leh);
-
- if (ldata->ld_lee) {
- ldata->ld_lee = (struct link_ea_entry *)((char *)ldata->ld_lee +
- ldata->ld_reclen);
- if ((char *)ldata->ld_lee >= ((char *)ldata->ld_leh +
- ldata->ld_leh->leh_len))
- ldata->ld_lee = NULL;
- }
-}
diff --git a/drivers/staging/lustre/lustre/include/lustre_lmv.h b/drivers/staging/lustre/lustre/include/lustre_lmv.h
deleted file mode 100644
index 080ec1f8e19f..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_lmv.h
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details. A copy is
- * included in the COPYING file that accompanied this code.
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2013, Intel Corporation.
- */
-/*
- * lustre/include/lustre_lmv.h
- *
- * Lustre LMV structures and functions.
- *
- * Author: Di Wang <di.wang@intel.com>
- */
-
-#ifndef _LUSTRE_LMV_H
-#define _LUSTRE_LMV_H
-#include <uapi/linux/lustre/lustre_idl.h>
-
-struct lmv_oinfo {
- struct lu_fid lmo_fid;
- u32 lmo_mds;
- struct inode *lmo_root;
-};
-
-struct lmv_stripe_md {
- __u32 lsm_md_magic;
- __u32 lsm_md_stripe_count;
- __u32 lsm_md_master_mdt_index;
- __u32 lsm_md_hash_type;
- __u32 lsm_md_layout_version;
- __u32 lsm_md_default_count;
- __u32 lsm_md_default_index;
- char lsm_md_pool_name[LOV_MAXPOOLNAME + 1];
- struct lmv_oinfo lsm_md_oinfo[0];
-};
-
-static inline bool
-lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2)
-{
- __u32 idx;
-
- if (lsm1->lsm_md_magic != lsm2->lsm_md_magic ||
- lsm1->lsm_md_stripe_count != lsm2->lsm_md_stripe_count ||
- lsm1->lsm_md_master_mdt_index != lsm2->lsm_md_master_mdt_index ||
- lsm1->lsm_md_hash_type != lsm2->lsm_md_hash_type ||
- lsm1->lsm_md_layout_version != lsm2->lsm_md_layout_version ||
- strcmp(lsm1->lsm_md_pool_name, lsm2->lsm_md_pool_name) != 0)
- return false;
-
- for (idx = 0; idx < lsm1->lsm_md_stripe_count; idx++) {
- if (!lu_fid_eq(&lsm1->lsm_md_oinfo[idx].lmo_fid,
- &lsm2->lsm_md_oinfo[idx].lmo_fid))
- return false;
- }
-
- return true;
-}
-
-union lmv_mds_md;
-
-void lmv_free_memmd(struct lmv_stripe_md *lsm);
-
-static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst,
- const struct lmv_mds_md_v1 *lmv_src)
-{
- __u32 i;
-
- lmv_dst->lmv_magic = le32_to_cpu(lmv_src->lmv_magic);
- lmv_dst->lmv_stripe_count = le32_to_cpu(lmv_src->lmv_stripe_count);
- lmv_dst->lmv_master_mdt_index =
- le32_to_cpu(lmv_src->lmv_master_mdt_index);
- lmv_dst->lmv_hash_type = le32_to_cpu(lmv_src->lmv_hash_type);
- lmv_dst->lmv_layout_version = le32_to_cpu(lmv_src->lmv_layout_version);
-
- for (i = 0; i < lmv_src->lmv_stripe_count; i++)
- fid_le_to_cpu(&lmv_dst->lmv_stripe_fids[i],
- &lmv_src->lmv_stripe_fids[i]);
-}
-
-static inline void lmv_le_to_cpu(union lmv_mds_md *lmv_dst,
- const union lmv_mds_md *lmv_src)
-{
- switch (le32_to_cpu(lmv_src->lmv_magic)) {
- case LMV_MAGIC_V1:
- lmv1_le_to_cpu(&lmv_dst->lmv_md_v1, &lmv_src->lmv_md_v1);
- break;
- default:
- break;
- }
-}
-
-/* This hash is only for testing purpose */
-static inline unsigned int
-lmv_hash_all_chars(unsigned int count, const char *name, int namelen)
-{
- const unsigned char *p = (const unsigned char *)name;
- unsigned int c = 0;
-
- while (--namelen >= 0)
- c += p[namelen];
-
- c = c % count;
-
- return c;
-}
-
-static inline unsigned int
-lmv_hash_fnv1a(unsigned int count, const char *name, int namelen)
-{
- __u64 hash;
-
- hash = lustre_hash_fnv_1a_64(name, namelen);
-
- return do_div(hash, count);
-}
-
-static inline int lmv_name_to_stripe_index(__u32 lmv_hash_type,
- unsigned int stripe_count,
- const char *name, int namelen)
-{
- __u32 hash_type = lmv_hash_type & LMV_HASH_TYPE_MASK;
- int idx;
-
- LASSERT(namelen > 0);
- if (stripe_count <= 1)
- return 0;
-
- /* for migrating object, always start from 0 stripe */
- if (lmv_hash_type & LMV_HASH_FLAG_MIGRATION)
- return 0;
-
- switch (hash_type) {
- case LMV_HASH_TYPE_ALL_CHARS:
- idx = lmv_hash_all_chars(stripe_count, name, namelen);
- break;
- case LMV_HASH_TYPE_FNV_1A_64:
- idx = lmv_hash_fnv1a(stripe_count, name, namelen);
- break;
- default:
- idx = -EBADFD;
- break;
- }
- CDEBUG(D_INFO, "name %.*s hash_type %d idx %d\n", namelen, name,
- hash_type, idx);
-
- return idx;
-}
-
-static inline bool lmv_is_known_hash_type(__u32 type)
-{
- return (type & LMV_HASH_TYPE_MASK) == LMV_HASH_TYPE_FNV_1A_64 ||
- (type & LMV_HASH_TYPE_MASK) == LMV_HASH_TYPE_ALL_CHARS;
-}
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
deleted file mode 100644
index 07f4e600386b..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ /dev/null
@@ -1,382 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_log.h
- *
- * Generic infrastructure for managing a collection of logs.
- * These logs are used for:
- *
- * - orphan recovery: OST adds record on create
- * - mtime/size consistency: the OST adds a record on first write
- * - open/unlinked objects: OST adds a record on destroy
- *
- * - mds unlink log: the MDS adds an entry upon delete
- *
- * - raid1 replication log between OST's
- * - MDS replication logs
- */
-
-#ifndef _LUSTRE_LOG_H
-#define _LUSTRE_LOG_H
-
-/** \defgroup log log
- *
- * @{
- */
-
-#include <obd_class.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-
-#define LOG_NAME_LIMIT(logname, name) \
- snprintf(logname, sizeof(logname), "LOGS/%s", name)
-#define LLOG_EEMPTY 4711
-
-enum llog_open_param {
- LLOG_OPEN_EXISTS = 0x0000,
- LLOG_OPEN_NEW = 0x0001,
-};
-
-struct plain_handle_data {
- struct list_head phd_entry;
- struct llog_handle *phd_cat_handle;
- struct llog_cookie phd_cookie; /* cookie of this log in its cat */
-};
-
-struct cat_handle_data {
- struct list_head chd_head;
- struct llog_handle *chd_current_log; /* currently open log */
- struct llog_handle *chd_next_log; /* llog to be used next */
-};
-
-struct llog_handle;
-
-/* llog.c - general API */
-int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
- int flags, struct obd_uuid *uuid);
-int llog_process(const struct lu_env *env, struct llog_handle *loghandle,
- llog_cb_t cb, void *data, void *catdata);
-int llog_process_or_fork(const struct lu_env *env,
- struct llog_handle *loghandle,
- llog_cb_t cb, void *data, void *catdata, bool fork);
-int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
- struct llog_handle **lgh, struct llog_logid *logid,
- char *name, enum llog_open_param open_param);
-int llog_close(const struct lu_env *env, struct llog_handle *cathandle);
-
-/* llog_process flags */
-#define LLOG_FLAG_NODEAMON 0x0001
-
-/* llog_cat.c - catalog api */
-struct llog_process_data {
- /**
- * Any useful data needed while processing catalog. This is
- * passed later to process callback.
- */
- void *lpd_data;
- /**
- * Catalog process callback function, called for each record
- * in catalog.
- */
- llog_cb_t lpd_cb;
- /**
- * Start processing the catalog from startcat/startidx
- */
- int lpd_startcat;
- int lpd_startidx;
-};
-
-struct llog_process_cat_data {
- /**
- * Temporary stored first_idx while scanning log.
- */
- int lpcd_first_idx;
- /**
- * Temporary stored last_idx while scanning log.
- */
- int lpcd_last_idx;
-};
-
-struct thandle;
-
-int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle);
-int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
- llog_cb_t cb, void *data, int startcat, int startidx);
-
-/* llog_obd.c */
-int llog_setup(const struct lu_env *env, struct obd_device *obd,
- struct obd_llog_group *olg, int index,
- struct obd_device *disk_obd, struct llog_operations *op);
-int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt);
-int llog_cleanup(const struct lu_env *env, struct llog_ctxt *);
-
-/* llog_net.c */
-int llog_initiator_connect(struct llog_ctxt *ctxt);
-
-struct llog_operations {
- int (*lop_next_block)(const struct lu_env *env, struct llog_handle *h,
- int *curr_idx, int next_idx, __u64 *offset,
- void *buf, int len);
- int (*lop_prev_block)(const struct lu_env *env, struct llog_handle *h,
- int prev_idx, void *buf, int len);
- int (*lop_read_header)(const struct lu_env *env,
- struct llog_handle *handle);
- int (*lop_setup)(const struct lu_env *env, struct obd_device *obd,
- struct obd_llog_group *olg, int ctxt_idx,
- struct obd_device *disk_obd);
- int (*lop_sync)(struct llog_ctxt *ctxt, struct obd_export *exp,
- int flags);
- int (*lop_cleanup)(const struct lu_env *env, struct llog_ctxt *ctxt);
- int (*lop_cancel)(const struct lu_env *env, struct llog_ctxt *ctxt,
- struct llog_cookie *cookies, int flags);
- int (*lop_connect)(struct llog_ctxt *ctxt, struct llog_logid *logid,
- struct llog_gen *gen, struct obd_uuid *uuid);
- /**
- * Any llog file must be opened first using llog_open(). Llog can be
- * opened by name, logid or without both, in last case the new logid
- * will be generated.
- */
- int (*lop_open)(const struct lu_env *env, struct llog_handle *lgh,
- struct llog_logid *logid, char *name,
- enum llog_open_param);
- /**
- * Opened llog may not exist and this must be checked where needed using
- * the llog_exist() call.
- */
- int (*lop_exist)(struct llog_handle *lgh);
- /**
- * Close llog file and calls llog_free_handle() implicitly.
- * Any opened llog must be closed by llog_close() call.
- */
- int (*lop_close)(const struct lu_env *env, struct llog_handle *handle);
- /**
- * Create new llog file. The llog must be opened.
- * Must be used only for local llog operations.
- */
- int (*lop_declare_create)(const struct lu_env *env,
- struct llog_handle *handle,
- struct thandle *th);
- /**
- * write new record in llog. It appends records usually but can edit
- * existing records too.
- */
- int (*lop_declare_write_rec)(const struct lu_env *env,
- struct llog_handle *lgh,
- struct llog_rec_hdr *rec,
- int idx, struct thandle *th);
- int (*lop_write_rec)(const struct lu_env *env,
- struct llog_handle *loghandle,
- struct llog_rec_hdr *rec,
- struct llog_cookie *cookie, int cookiecount,
- void *buf, int idx, struct thandle *th);
- /**
- * Add new record in llog catalog. Does the same as llog_write_rec()
- * but using llog catalog.
- */
- int (*lop_declare_add)(const struct lu_env *env,
- struct llog_handle *lgh,
- struct llog_rec_hdr *rec, struct thandle *th);
- int (*lop_add)(const struct lu_env *env, struct llog_handle *lgh,
- struct llog_rec_hdr *rec, struct llog_cookie *cookie,
- void *buf, struct thandle *th);
-};
-
-/* In-memory descriptor for a log object or log catalog */
-struct llog_handle {
- struct rw_semaphore lgh_lock;
- spinlock_t lgh_hdr_lock; /* protect lgh_hdr data */
- struct llog_logid lgh_id; /* id of this log */
- struct llog_log_hdr *lgh_hdr;
- size_t lgh_hdr_size;
- int lgh_last_idx;
- int lgh_cur_idx; /* used during llog_process */
- __u64 lgh_cur_offset; /* used during llog_process */
- struct llog_ctxt *lgh_ctxt;
- union {
- struct plain_handle_data phd;
- struct cat_handle_data chd;
- } u;
- char *lgh_name;
- void *private_data;
- struct llog_operations *lgh_logops;
- atomic_t lgh_refcount;
-};
-
-#define LLOG_CTXT_FLAG_UNINITIALIZED 0x00000001
-#define LLOG_CTXT_FLAG_STOP 0x00000002
-
-struct llog_ctxt {
- int loc_idx; /* my index the obd array of ctxt's */
- struct obd_device *loc_obd; /* points back to the containing obd*/
- struct obd_llog_group *loc_olg; /* group containing that ctxt */
- struct obd_export *loc_exp; /* parent "disk" export (e.g. MDS) */
- struct obd_import *loc_imp; /* to use in RPC's: can be backward
- * pointing import
- */
- struct llog_operations *loc_logops;
- struct llog_handle *loc_handle;
- struct mutex loc_mutex; /* protect loc_imp */
- atomic_t loc_refcount;
- long loc_flags; /* flags, see above defines */
- /*
- * llog chunk size, and llog record size can not be bigger than
- * loc_chunk_size
- */
- __u32 loc_chunk_size;
-};
-
-#define LLOG_PROC_BREAK 0x0001
-#define LLOG_DEL_RECORD 0x0002
-
-static inline int llog_handle2ops(struct llog_handle *loghandle,
- struct llog_operations **lop)
-{
- if (!loghandle || !loghandle->lgh_logops)
- return -EINVAL;
-
- *lop = loghandle->lgh_logops;
- return 0;
-}
-
-static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt)
-{
- atomic_inc(&ctxt->loc_refcount);
- CDEBUG(D_INFO, "GETting ctxt %p : new refcount %d\n", ctxt,
- atomic_read(&ctxt->loc_refcount));
- return ctxt;
-}
-
-static inline void llog_ctxt_put(struct llog_ctxt *ctxt)
-{
- if (!ctxt)
- return;
- LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON);
- CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt,
- atomic_read(&ctxt->loc_refcount) - 1);
- __llog_ctxt_put(NULL, ctxt);
-}
-
-static inline void llog_group_init(struct obd_llog_group *olg)
-{
- init_waitqueue_head(&olg->olg_waitq);
- spin_lock_init(&olg->olg_lock);
- mutex_init(&olg->olg_cat_processing);
-}
-
-static inline int llog_group_set_ctxt(struct obd_llog_group *olg,
- struct llog_ctxt *ctxt, int index)
-{
- LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
-
- spin_lock(&olg->olg_lock);
- if (olg->olg_ctxts[index]) {
- spin_unlock(&olg->olg_lock);
- return -EEXIST;
- }
- olg->olg_ctxts[index] = ctxt;
- spin_unlock(&olg->olg_lock);
- return 0;
-}
-
-static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg,
- int index)
-{
- struct llog_ctxt *ctxt;
-
- LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
-
- spin_lock(&olg->olg_lock);
- if (!olg->olg_ctxts[index])
- ctxt = NULL;
- else
- ctxt = llog_ctxt_get(olg->olg_ctxts[index]);
- spin_unlock(&olg->olg_lock);
- return ctxt;
-}
-
-static inline void llog_group_clear_ctxt(struct obd_llog_group *olg, int index)
-{
- LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
- spin_lock(&olg->olg_lock);
- olg->olg_ctxts[index] = NULL;
- spin_unlock(&olg->olg_lock);
-}
-
-static inline struct llog_ctxt *llog_get_context(struct obd_device *obd,
- int index)
-{
- return llog_group_get_ctxt(&obd->obd_olg, index);
-}
-
-static inline int llog_group_ctxt_null(struct obd_llog_group *olg, int index)
-{
- return (!olg->olg_ctxts[index]);
-}
-
-static inline int llog_ctxt_null(struct obd_device *obd, int index)
-{
- return llog_group_ctxt_null(&obd->obd_olg, index);
-}
-
-static inline int llog_next_block(const struct lu_env *env,
- struct llog_handle *loghandle, int *cur_idx,
- int next_idx, __u64 *cur_offset, void *buf,
- int len)
-{
- struct llog_operations *lop;
- int rc;
-
- rc = llog_handle2ops(loghandle, &lop);
- if (rc)
- return rc;
- if (!lop->lop_next_block)
- return -EOPNOTSUPP;
-
- rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx,
- cur_offset, buf, len);
- return rc;
-}
-
-/* llog.c */
-int llog_declare_write_rec(const struct lu_env *env,
- struct llog_handle *handle,
- struct llog_rec_hdr *rec, int idx,
- struct thandle *th);
-int llog_write_rec(const struct lu_env *env, struct llog_handle *handle,
- struct llog_rec_hdr *rec, struct llog_cookie *logcookies,
- int numcookies, void *buf, int idx, struct thandle *th);
-int lustre_process_log(struct super_block *sb, char *logname,
- struct config_llog_instance *cfg);
-int lustre_end_log(struct super_block *sb, char *logname,
- struct config_llog_instance *cfg);
-/** @} log */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
deleted file mode 100644
index a9c9992a2502..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ /dev/null
@@ -1,229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_mdc.h
- *
- * MDS data structures.
- * See also lustre_idl.h for wire formats of requests.
- */
-
-#ifndef _LUSTRE_MDC_H
-#define _LUSTRE_MDC_H
-
-/** \defgroup mdc mdc
- *
- * @{
- */
-
-#include <linux/fs.h>
-#include <linux/dcache.h>
-#include <lustre_intent.h>
-#include <lustre_handles.h>
-#include <linux/libcfs/libcfs.h>
-#include <obd_class.h>
-#include <lustre_lib.h>
-#include <lustre_dlm.h>
-#include <lustre_export.h>
-
-struct ptlrpc_client;
-struct obd_export;
-struct ptlrpc_request;
-struct obd_device;
-
-/**
- * Serializes in-flight MDT-modifying RPC requests to preserve idempotency.
- *
- * This mutex is used to implement execute-once semantics on the MDT.
- * The MDT stores the last transaction ID and result for every client in
- * its last_rcvd file. If the client doesn't get a reply, it can safely
- * resend the request and the MDT will reconstruct the reply being aware
- * that the request has already been executed. Without this lock,
- * execution status of concurrent in-flight requests would be
- * overwritten.
- *
- * This design limits the extent to which we can keep a full pipeline of
- * in-flight requests from a single client. This limitation could be
- * overcome by allowing multiple slots per client in the last_rcvd file.
- */
-struct mdc_rpc_lock {
- /** Lock protecting in-flight RPC concurrency. */
- struct mutex rpcl_mutex;
- /** Intent associated with currently executing request. */
- struct lookup_intent *rpcl_it;
- /** Used for MDS/RPC load testing purposes. */
- int rpcl_fakes;
-};
-
-#define MDC_FAKE_RPCL_IT ((void *)0x2c0012bfUL)
-
-static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck)
-{
- mutex_init(&lck->rpcl_mutex);
- lck->rpcl_it = NULL;
-}
-
-static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
- struct lookup_intent *it)
-{
- if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
- it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
- return;
-
- /* This would normally block until the existing request finishes.
- * If fail_loc is set it will block until the regular request is
- * done, then set rpcl_it to MDC_FAKE_RPCL_IT. Once that is set
- * it will only be cleared when all fake requests are finished.
- * Only when all fake requests are finished can normal requests
- * be sent, to ensure they are recoverable again.
- */
- again:
- mutex_lock(&lck->rpcl_mutex);
-
- if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_MDC_RPCS_SEM)) {
- lck->rpcl_it = MDC_FAKE_RPCL_IT;
- lck->rpcl_fakes++;
- mutex_unlock(&lck->rpcl_mutex);
- return;
- }
-
- /* This will only happen when the CFS_FAIL_CHECK() was
- * just turned off but there are still requests in progress.
- * Wait until they finish. It doesn't need to be efficient
- * in this extremely rare case, just have low overhead in
- * the common case when it isn't true.
- */
- while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
- mutex_unlock(&lck->rpcl_mutex);
- schedule_timeout(HZ / 4);
- goto again;
- }
-
- LASSERT(!lck->rpcl_it);
- lck->rpcl_it = it;
-}
-
-static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
- struct lookup_intent *it)
-{
- if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
- it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
- return;
-
- if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */
- mutex_lock(&lck->rpcl_mutex);
-
- LASSERTF(lck->rpcl_fakes > 0, "%d\n", lck->rpcl_fakes);
- lck->rpcl_fakes--;
-
- if (lck->rpcl_fakes == 0)
- lck->rpcl_it = NULL;
-
- } else {
- LASSERTF(it == lck->rpcl_it, "%p != %p\n", it, lck->rpcl_it);
- lck->rpcl_it = NULL;
- }
-
- mutex_unlock(&lck->rpcl_mutex);
-}
-
-static inline void mdc_get_mod_rpc_slot(struct ptlrpc_request *req,
- struct lookup_intent *it)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- u32 opc;
- u16 tag;
-
- opc = lustre_msg_get_opc(req->rq_reqmsg);
- tag = obd_get_mod_rpc_slot(cli, opc, it);
- lustre_msg_set_tag(req->rq_reqmsg, tag);
-}
-
-static inline void mdc_put_mod_rpc_slot(struct ptlrpc_request *req,
- struct lookup_intent *it)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- u32 opc;
- u16 tag;
-
- opc = lustre_msg_get_opc(req->rq_reqmsg);
- tag = lustre_msg_get_tag(req->rq_reqmsg);
- obd_put_mod_rpc_slot(cli, opc, it, tag);
-}
-
-/**
- * Update the maximum possible easize.
- *
- * This value is learned from ptlrpc replies sent by the MDT. The
- * default easize is initialized to the minimum value but allowed
- * to grow up to a single page in size if required to handle the
- * common case.
- *
- * \see client_obd::cl_default_mds_easize
- *
- * \param[in] exp export for MDC device
- * \param[in] body body of ptlrpc reply from MDT
- *
- */
-static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
- struct mdt_body *body)
-{
- if (body->mbo_valid & OBD_MD_FLMODEASIZE) {
- struct client_obd *cli = &exp->exp_obd->u.cli;
- u32 def_easize;
-
- if (cli->cl_max_mds_easize < body->mbo_max_mdsize)
- cli->cl_max_mds_easize = body->mbo_max_mdsize;
-
- def_easize = min_t(__u32, body->mbo_max_mdsize,
- OBD_MAX_DEFAULT_EA_SIZE);
- cli->cl_default_mds_easize = def_easize;
- }
-}
-
-/* mdc/mdc_locks.c */
-int it_open_error(int phase, struct lookup_intent *it);
-
-static inline bool cl_is_lov_delay_create(unsigned int flags)
-{
- return (flags & O_LOV_DELAY_CREATE) == O_LOV_DELAY_CREATE;
-}
-
-static inline void cl_lov_delay_create_clear(unsigned int *flags)
-{
- if ((*flags & O_LOV_DELAY_CREATE) == O_LOV_DELAY_CREATE)
- *flags &= ~O_LOV_DELAY_CREATE;
-}
-
-/** @} mdc */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_mds.h b/drivers/staging/lustre/lustre/include/lustre_mds.h
deleted file mode 100644
index 6937546f1d46..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_mds.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_mds.h
- *
- * MDS data structures.
- * See also lustre_idl.h for wire formats of requests.
- */
-
-#ifndef _LUSTRE_MDS_H
-#define _LUSTRE_MDS_H
-
-/** \defgroup mds mds
- *
- * @{
- */
-
-#include <lustre_handles.h>
-#include <linux/libcfs/libcfs.h>
-#include <lustre_lib.h>
-#include <lustre_dlm.h>
-#include <lustre_export.h>
-
-struct mds_group_info {
- struct obd_uuid *uuid;
- int group;
-};
-
-#define MDD_OBD_NAME "mdd_obd"
-#define MDD_OBD_UUID "mdd_obd_uuid"
-
-/** @} mds */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
deleted file mode 100644
index d35ae0cda8d2..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ /dev/null
@@ -1,2359 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/** \defgroup PtlRPC Portal RPC and networking module.
- *
- * PortalRPC is the layer used by rest of lustre code to achieve network
- * communications: establish connections with corresponding export and import
- * states, listen for a service, send and receive RPCs.
- * PortalRPC also includes base recovery framework: packet resending and
- * replaying, reconnections, pinger.
- *
- * PortalRPC utilizes LNet as its transport layer.
- *
- * @{
- */
-
-#ifndef _LUSTRE_NET_H
-#define _LUSTRE_NET_H
-
-/** \defgroup net net
- *
- * @{
- */
-
-#include <linux/uio.h>
-#include <linux/libcfs/libcfs.h>
-#include <uapi/linux/lnet/nidstr.h>
-#include <linux/lnet/api.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <lustre_errno.h>
-#include <lustre_ha.h>
-#include <lustre_sec.h>
-#include <lustre_import.h>
-#include <lprocfs_status.h>
-#include <lu_object.h>
-#include <lustre_req_layout.h>
-
-#include <obd_support.h>
-#include <uapi/linux/lustre/lustre_ver.h>
-
-/* MD flags we _always_ use */
-#define PTLRPC_MD_OPTIONS 0
-
-/**
- * log2 max # of bulk operations in one request: 2=4MB/RPC, 5=32MB/RPC, ...
- * In order for the client and server to properly negotiate the maximum
- * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
- * value. The client is free to limit the actual RPC size for any bulk
- * transfer via cl_max_pages_per_rpc to some non-power-of-two value.
- * NOTE: This is limited to 16 (=64GB RPCs) by IOOBJ_MAX_BRW_BITS.
- */
-#define PTLRPC_BULK_OPS_BITS 4
-#if PTLRPC_BULK_OPS_BITS > 16
-#error "More than 65536 BRW RPCs not allowed by IOOBJ_MAX_BRW_BITS."
-#endif
-#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
-/**
- * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
- * should not be used on the server at all. Otherwise, it imposes a
- * protocol limitation on the maximum RPC size that can be used by any
- * RPC sent to that server in the future. Instead, the server should
- * use the negotiated per-client ocd_brw_size to determine the bulk
- * RPC count.
- */
-#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
-
-/**
- * Define maxima for bulk I/O.
- *
- * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT
- * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the
- * currently supported maximum between peers at connect via ocd_brw_size.
- */
-#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
-#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
-#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
-
-#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
-#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
-#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
-#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
-
-/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
-# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
-# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
-# endif
-# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
-# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
-# endif
-# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
-# error "PTLRPC_MAX_BRW_SIZE too big"
-# endif
-# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
-# error "PTLRPC_MAX_BRW_PAGES too big"
-# endif
-
-#define PTLRPC_NTHRS_INIT 2
-
-/**
- * Buffer Constants
- *
- * Constants determine how memory is used to buffer incoming service requests.
- *
- * ?_NBUFS # buffers to allocate when growing the pool
- * ?_BUFSIZE # bytes in a single request buffer
- * ?_MAXREQSIZE # maximum request service will receive
- *
- * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
- * of ?_NBUFS is added to the pool.
- *
- * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
- * considered full when less than ?_MAXREQSIZE is left in them.
- */
-/**
- * Thread Constants
- *
- * Constants determine how threads are created for ptlrpc service.
- *
- * ?_NTHRS_INIT # threads to create for each service partition on
- * initializing. If it's non-affinity service and
- * there is only one partition, it's the overall #
- * threads for the service while initializing.
- * ?_NTHRS_BASE # threads should be created at least for each
- * ptlrpc partition to keep the service healthy.
- * It's the low-water mark of threads upper-limit
- * for each partition.
- * ?_THR_FACTOR # threads can be added on threads upper-limit for
- * each CPU core. This factor is only for reference,
- * we might decrease value of factor if number of cores
- * per CPT is above a limit.
- * ?_NTHRS_MAX # overall threads can be created for a service,
- * it's a soft limit because if service is running
- * on machine with hundreds of cores and tens of
- * CPU partitions, we need to guarantee each partition
- * has ?_NTHRS_BASE threads, which means total threads
- * will be ?_NTHRS_BASE * number_of_cpts which can
- * exceed ?_NTHRS_MAX.
- *
- * Examples
- *
- * #define MDS_NTHRS_INIT 2
- * #define MDS_NTHRS_BASE 64
- * #define MDS_NTHRS_FACTOR 8
- * #define MDS_NTHRS_MAX 1024
- *
- * Example 1):
- * ---------------------------------------------------------------------
- * Server(A) has 16 cores, user configured it to 4 partitions so each
- * partition has 4 cores, then actual number of service threads on each
- * partition is:
- * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96
- *
- * Total number of threads for the service is:
- * 96 * partitions(4) = 384
- *
- * Example 2):
- * ---------------------------------------------------------------------
- * Server(B) has 32 cores, user configured it to 4 partitions so each
- * partition has 8 cores, then actual number of service threads on each
- * partition is:
- * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128
- *
- * Total number of threads for the service is:
- * 128 * partitions(4) = 512
- *
- * Example 3):
- * ---------------------------------------------------------------------
- * Server(B) has 96 cores, user configured it to 8 partitions so each
- * partition has 12 cores, then actual number of service threads on each
- * partition is:
- * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160
- *
- * Total number of threads for the service is:
- * 160 * partitions(8) = 1280
- *
- * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number
- * as upper limit of threads number for each partition:
- * MDS_NTHRS_MAX(1024) / partitions(8) = 128
- *
- * Example 4):
- * ---------------------------------------------------------------------
- * Server(C) have a thousand of cores and user configured it to 32 partitions
- * MDS_NTHRS_BASE(64) * 32 = 2048
- *
- * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need
- * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads
- * to keep service healthy, so total number of threads will just be 2048.
- *
- * NB: we don't suggest to choose server with that many cores because backend
- * filesystem itself, buffer cache, or underlying network stack might
- * have some SMP scalability issues at that large scale.
- *
- * If user already has a fat machine with hundreds or thousands of cores,
- * there are two choices for configuration:
- * a) create CPU table from subset of all CPUs and run Lustre on
- * top of this subset
- * b) bind service threads on a few partitions, see modparameters of
- * MDS and OSS for details
-*
- * NB: these calculations (and examples below) are simplified to help
- * understanding, the real implementation is a little more complex,
- * please see ptlrpc_server_nthreads_check() for details.
- *
- */
-
- /*
- * LDLM threads constants:
- *
- * Given 8 as factor and 24 as base threads number
- *
- * example 1)
- * On 4-core machine we will have 24 + 8 * 4 = 56 threads.
- *
- * example 2)
- * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56
- * threads for each partition and total threads number will be 112.
- *
- * example 3)
- * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24)
- * threads for each partition to keep service healthy, so total threads
- * number should be 24 * 8 = 192.
- *
- * So with these constants, threads number will be at the similar level
- * of old versions, unless target machine has over a hundred cores
- */
-#define LDLM_THR_FACTOR 8
-#define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define LDLM_NTHRS_BASE 24
-#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128)
-
-#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
-#define LDLM_CLIENT_NBUFS 1
-#define LDLM_SERVER_NBUFS 64
-#define LDLM_BUFSIZE (8 * 1024)
-#define LDLM_MAXREQSIZE (5 * 1024)
-#define LDLM_MAXREPSIZE (1024)
-
-#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
-
-/**
- * FIEMAP request can be 4K+ for now
- */
-#define OST_MAXREQSIZE (16 * 1024)
-
-/* Macro to hide a typecast. */
-#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
-
-struct ptlrpc_replay_async_args {
- int praa_old_state;
- int praa_old_status;
-};
-
-/**
- * Structure to single define portal connection.
- */
-struct ptlrpc_connection {
- /** linkage for connections hash table */
- struct hlist_node c_hash;
- /** Our own lnet nid for this connection */
- lnet_nid_t c_self;
- /** Remote side nid for this connection */
- struct lnet_process_id c_peer;
- /** UUID of the other side */
- struct obd_uuid c_remote_uuid;
- /** reference counter for this connection */
- atomic_t c_refcount;
-};
-
-/** Client definition for PortalRPC */
-struct ptlrpc_client {
- /** What lnet portal does this client send messages to by default */
- __u32 cli_request_portal;
- /** What portal do we expect replies on */
- __u32 cli_reply_portal;
- /** Name of the client */
- char *cli_name;
-};
-
-/** state flags of requests */
-/* XXX only ones left are those used by the bulk descs as well! */
-#define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
-#define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
-
-#define REQ_MAX_ACK_LOCKS 8
-
-union ptlrpc_async_args {
- /**
- * Scratchpad for passing args to completion interpreter. Users
- * cast to the struct of their choosing, and BUILD_BUG_ON oversized
- * arguments. For _tons_ of context, kmalloc a struct and store
- * a pointer to it here. The pointer_arg ensures this struct is at
- * least big enough for that.
- */
- void *pointer_arg[11];
- __u64 space[7];
-};
-
-struct ptlrpc_request_set;
-typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
-typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
-
-/**
- * Definition of request set structure.
- * Request set is a list of requests (not necessary to the same target) that
- * once populated with RPCs could be sent in parallel.
- * There are two kinds of request sets. General purpose and with dedicated
- * serving thread. Example of the latter is ptlrpcd set.
- * For general purpose sets once request set started sending it is impossible
- * to add new requests to such set.
- * Provides a way to call "completion callbacks" when all requests in the set
- * returned.
- */
-struct ptlrpc_request_set {
- atomic_t set_refcount;
- /** number of in queue requests */
- atomic_t set_new_count;
- /** number of uncompleted requests */
- atomic_t set_remaining;
- /** wait queue to wait on for request events */
- wait_queue_head_t set_waitq;
- wait_queue_head_t *set_wakeup_ptr;
- /** List of requests in the set */
- struct list_head set_requests;
- /**
- * List of completion callbacks to be called when the set is completed
- * This is only used if \a set_interpret is NULL.
- * Links struct ptlrpc_set_cbdata.
- */
- struct list_head set_cblist;
- /** Completion callback, if only one. */
- set_interpreter_func set_interpret;
- /** opaq argument passed to completion \a set_interpret callback. */
- void *set_arg;
- /**
- * Lock for \a set_new_requests manipulations
- * locked so that any old caller can communicate requests to
- * the set holder who can then fold them into the lock-free set
- */
- spinlock_t set_new_req_lock;
- /** List of new yet unsent requests. Only used with ptlrpcd now. */
- struct list_head set_new_requests;
-
- /** rq_status of requests that have been freed already */
- int set_rc;
- /** Additional fields used by the flow control extension */
- /** Maximum number of RPCs in flight */
- int set_max_inflight;
- /** Callback function used to generate RPCs */
- set_producer_func set_producer;
- /** opaq argument passed to the producer callback */
- void *set_producer_arg;
-};
-
-/**
- * Description of a single ptrlrpc_set callback
- */
-struct ptlrpc_set_cbdata {
- /** List linkage item */
- struct list_head psc_item;
- /** Pointer to interpreting function */
- set_interpreter_func psc_interpret;
- /** Opaq argument to pass to the callback */
- void *psc_data;
-};
-
-struct ptlrpc_bulk_desc;
-struct ptlrpc_service_part;
-struct ptlrpc_service;
-
-/**
- * ptlrpc callback & work item stuff
- */
-struct ptlrpc_cb_id {
- void (*cbid_fn)(struct lnet_event *ev); /* specific callback fn */
- void *cbid_arg; /* additional arg */
-};
-
-/** Maximum number of locks to fit into reply state */
-#define RS_MAX_LOCKS 8
-#define RS_DEBUG 0
-
-/**
- * Structure to define reply state on the server
- * Reply state holds various reply message information. Also for "difficult"
- * replies (rep-ack case) we store the state after sending reply and wait
- * for the client to acknowledge the reception. In these cases locks could be
- * added to the state for replay/failover consistency guarantees.
- */
-struct ptlrpc_reply_state {
- /** Callback description */
- struct ptlrpc_cb_id rs_cb_id;
- /** Linkage for list of all reply states in a system */
- struct list_head rs_list;
- /** Linkage for list of all reply states on same export */
- struct list_head rs_exp_list;
- /** Linkage for list of all reply states for same obd */
- struct list_head rs_obd_list;
-#if RS_DEBUG
- struct list_head rs_debug_list;
-#endif
- /** A spinlock to protect the reply state flags */
- spinlock_t rs_lock;
- /** Reply state flags */
- unsigned long rs_difficult:1; /* ACK/commit stuff */
- unsigned long rs_no_ack:1; /* no ACK, even for
- * difficult requests
- */
- unsigned long rs_scheduled:1; /* being handled? */
- unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
- unsigned long rs_handled:1; /* been handled yet? */
- unsigned long rs_on_net:1; /* reply_out_callback pending? */
- unsigned long rs_prealloc:1; /* rs from prealloc list */
- unsigned long rs_committed:1;/* the transaction was committed
- * and the rs was dispatched
- */
- atomic_t rs_refcount; /* number of users */
- /** Number of locks awaiting client ACK */
- int rs_nlocks;
-
- /** Size of the state */
- int rs_size;
- /** opcode */
- __u32 rs_opc;
- /** Transaction number */
- __u64 rs_transno;
- /** xid */
- __u64 rs_xid;
- struct obd_export *rs_export;
- struct ptlrpc_service_part *rs_svcpt;
- /** Lnet metadata handle for the reply */
- struct lnet_handle_md rs_md_h;
-
- /** Context for the service thread */
- struct ptlrpc_svc_ctx *rs_svc_ctx;
- /** Reply buffer (actually sent to the client), encoded if needed */
- struct lustre_msg *rs_repbuf; /* wrapper */
- /** Size of the reply buffer */
- int rs_repbuf_len; /* wrapper buf length */
- /** Size of the reply message */
- int rs_repdata_len; /* wrapper msg length */
- /**
- * Actual reply message. Its content is encrypted (if needed) to
- * produce reply buffer for actual sending. In simple case
- * of no network encryption we just set \a rs_repbuf to \a rs_msg
- */
- struct lustre_msg *rs_msg; /* reply message */
-
- /** Handles of locks awaiting client reply ACK */
- struct lustre_handle rs_locks[RS_MAX_LOCKS];
- /** Lock modes of locks in \a rs_locks */
- enum ldlm_mode rs_modes[RS_MAX_LOCKS];
-};
-
-struct ptlrpc_thread;
-
-/** RPC stages */
-enum rq_phase {
- RQ_PHASE_NEW = 0xebc0de00,
- RQ_PHASE_RPC = 0xebc0de01,
- RQ_PHASE_BULK = 0xebc0de02,
- RQ_PHASE_INTERPRET = 0xebc0de03,
- RQ_PHASE_COMPLETE = 0xebc0de04,
- RQ_PHASE_UNREG_RPC = 0xebc0de05,
- RQ_PHASE_UNREG_BULK = 0xebc0de06,
- RQ_PHASE_UNDEFINED = 0xebc0de07
-};
-
-/** Type of request interpreter call-back */
-typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *arg, int rc);
-
-/**
- * Definition of request pool structure.
- * The pool is used to store empty preallocated requests for the case
- * when we would actually need to send something without performing
- * any allocations (to avoid e.g. OOM).
- */
-struct ptlrpc_request_pool {
- /** Locks the list */
- spinlock_t prp_lock;
- /** list of ptlrpc_request structs */
- struct list_head prp_req_list;
- /** Maximum message size that would fit into a request from this pool */
- int prp_rq_size;
- /** Function to allocate more requests for this pool */
- int (*prp_populate)(struct ptlrpc_request_pool *, int);
-};
-
-struct lu_context;
-struct lu_env;
-
-struct ldlm_lock;
-
-#include <lustre_nrs.h>
-
-/**
- * Basic request prioritization operations structure.
- * The whole idea is centered around locks and RPCs that might affect locks.
- * When a lock is contended we try to give priority to RPCs that might lead
- * to fastest release of that lock.
- * Currently only implemented for OSTs only in a way that makes all
- * IO and truncate RPCs that are coming from a locked region where a lock is
- * contended a priority over other requests.
- */
-struct ptlrpc_hpreq_ops {
- /**
- * Check if the lock handle of the given lock is the same as
- * taken from the request.
- */
- int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
- /**
- * Check if the request is a high priority one.
- */
- int (*hpreq_check)(struct ptlrpc_request *);
- /**
- * Called after the request has been handled.
- */
- void (*hpreq_fini)(struct ptlrpc_request *);
-};
-
-struct ptlrpc_cli_req {
- /** For bulk requests on client only: bulk descriptor */
- struct ptlrpc_bulk_desc *cr_bulk;
- /** optional time limit for send attempts */
- long cr_delay_limit;
- /** time request was first queued */
- time_t cr_queued_time;
- /** request sent timeval */
- struct timespec64 cr_sent_tv;
- /** time for request really sent out */
- time64_t cr_sent_out;
- /** when req reply unlink must finish. */
- time64_t cr_reply_deadline;
- /** when req bulk unlink must finish. */
- time64_t cr_bulk_deadline;
- /** when req unlink must finish. */
- time64_t cr_req_deadline;
- /** Portal to which this request would be sent */
- short cr_req_ptl;
- /** Portal where to wait for reply and where reply would be sent */
- short cr_rep_ptl;
- /** request resending number */
- unsigned int cr_resend_nr;
- /** What was import generation when this request was sent */
- int cr_imp_gen;
- enum lustre_imp_state cr_send_state;
- /** Per-request waitq introduced by bug 21938 for recovery waiting */
- wait_queue_head_t cr_set_waitq;
- /** Link item for request set lists */
- struct list_head cr_set_chain;
- /** link to waited ctx */
- struct list_head cr_ctx_chain;
-
- /** client's half ctx */
- struct ptlrpc_cli_ctx *cr_cli_ctx;
- /** Link back to the request set */
- struct ptlrpc_request_set *cr_set;
- /** outgoing request MD handle */
- struct lnet_handle_md cr_req_md_h;
- /** request-out callback parameter */
- struct ptlrpc_cb_id cr_req_cbid;
- /** incoming reply MD handle */
- struct lnet_handle_md cr_reply_md_h;
- wait_queue_head_t cr_reply_waitq;
- /** reply callback parameter */
- struct ptlrpc_cb_id cr_reply_cbid;
- /** Async completion handler, called when reply is received */
- ptlrpc_interpterer_t cr_reply_interp;
- /** Async completion context */
- union ptlrpc_async_args cr_async_args;
- /** Opaq data for replay and commit callbacks. */
- void *cr_cb_data;
- /** Link to the imp->imp_unreplied_list */
- struct list_head cr_unreplied_list;
- /**
- * Commit callback, called when request is committed and about to be
- * freed.
- */
- void (*cr_commit_cb)(struct ptlrpc_request *);
- /** Replay callback, called after request is replayed at recovery */
- void (*cr_replay_cb)(struct ptlrpc_request *);
-};
-
-/** client request member alias */
-/* NB: these alias should NOT be used by any new code, instead they should
- * be removed step by step to avoid potential abuse
- */
-#define rq_bulk rq_cli.cr_bulk
-#define rq_delay_limit rq_cli.cr_delay_limit
-#define rq_queued_time rq_cli.cr_queued_time
-#define rq_sent_tv rq_cli.cr_sent_tv
-#define rq_real_sent rq_cli.cr_sent_out
-#define rq_reply_deadline rq_cli.cr_reply_deadline
-#define rq_bulk_deadline rq_cli.cr_bulk_deadline
-#define rq_req_deadline rq_cli.cr_req_deadline
-#define rq_nr_resend rq_cli.cr_resend_nr
-#define rq_request_portal rq_cli.cr_req_ptl
-#define rq_reply_portal rq_cli.cr_rep_ptl
-#define rq_import_generation rq_cli.cr_imp_gen
-#define rq_send_state rq_cli.cr_send_state
-#define rq_set_chain rq_cli.cr_set_chain
-#define rq_ctx_chain rq_cli.cr_ctx_chain
-#define rq_set rq_cli.cr_set
-#define rq_set_waitq rq_cli.cr_set_waitq
-#define rq_cli_ctx rq_cli.cr_cli_ctx
-#define rq_req_md_h rq_cli.cr_req_md_h
-#define rq_req_cbid rq_cli.cr_req_cbid
-#define rq_reply_md_h rq_cli.cr_reply_md_h
-#define rq_reply_waitq rq_cli.cr_reply_waitq
-#define rq_reply_cbid rq_cli.cr_reply_cbid
-#define rq_interpret_reply rq_cli.cr_reply_interp
-#define rq_async_args rq_cli.cr_async_args
-#define rq_cb_data rq_cli.cr_cb_data
-#define rq_unreplied_list rq_cli.cr_unreplied_list
-#define rq_commit_cb rq_cli.cr_commit_cb
-#define rq_replay_cb rq_cli.cr_replay_cb
-
-struct ptlrpc_srv_req {
- /** initial thread servicing this request */
- struct ptlrpc_thread *sr_svc_thread;
- /**
- * Server side list of incoming unserved requests sorted by arrival
- * time. Traversed from time to time to notice about to expire
- * requests and sent back "early replies" to clients to let them
- * know server is alive and well, just very busy to service their
- * requests in time
- */
- struct list_head sr_timed_list;
- /** server-side per-export list */
- struct list_head sr_exp_list;
- /** server-side history, used for debuging purposes. */
- struct list_head sr_hist_list;
- /** history sequence # */
- __u64 sr_hist_seq;
- /** the index of service's srv_at_array into which request is linked */
- time64_t sr_at_index;
- /** authed uid */
- uid_t sr_auth_uid;
- /** authed uid mapped to */
- uid_t sr_auth_mapped_uid;
- /** RPC is generated from what part of Lustre */
- enum lustre_sec_part sr_sp_from;
- /** request session context */
- struct lu_context sr_ses;
- /** \addtogroup nrs
- * @{
- */
- /** stub for NRS request */
- struct ptlrpc_nrs_request sr_nrq;
- /** @} nrs */
- /** request arrival time */
- struct timespec64 sr_arrival_time;
- /** server's half ctx */
- struct ptlrpc_svc_ctx *sr_svc_ctx;
- /** (server side), pointed directly into req buffer */
- struct ptlrpc_user_desc *sr_user_desc;
- /** separated reply state */
- struct ptlrpc_reply_state *sr_reply_state;
- /** server-side hp handlers */
- struct ptlrpc_hpreq_ops *sr_ops;
- /** incoming request buffer */
- struct ptlrpc_request_buffer_desc *sr_rqbd;
-};
-
-/** server request member alias */
-/* NB: these alias should NOT be used by any new code, instead they should
- * be removed step by step to avoid potential abuse
- */
-#define rq_svc_thread rq_srv.sr_svc_thread
-#define rq_timed_list rq_srv.sr_timed_list
-#define rq_exp_list rq_srv.sr_exp_list
-#define rq_history_list rq_srv.sr_hist_list
-#define rq_history_seq rq_srv.sr_hist_seq
-#define rq_at_index rq_srv.sr_at_index
-#define rq_auth_uid rq_srv.sr_auth_uid
-#define rq_auth_mapped_uid rq_srv.sr_auth_mapped_uid
-#define rq_sp_from rq_srv.sr_sp_from
-#define rq_session rq_srv.sr_ses
-#define rq_nrq rq_srv.sr_nrq
-#define rq_arrival_time rq_srv.sr_arrival_time
-#define rq_reply_state rq_srv.sr_reply_state
-#define rq_svc_ctx rq_srv.sr_svc_ctx
-#define rq_user_desc rq_srv.sr_user_desc
-#define rq_ops rq_srv.sr_ops
-#define rq_rqbd rq_srv.sr_rqbd
-
-/**
- * Represents remote procedure call.
- *
- * This is a staple structure used by everybody wanting to send a request
- * in Lustre.
- */
-struct ptlrpc_request {
- /* Request type: one of PTL_RPC_MSG_* */
- int rq_type;
- /** Result of request processing */
- int rq_status;
- /**
- * Linkage item through which this request is included into
- * sending/delayed lists on client and into rqbd list on server
- */
- struct list_head rq_list;
- /** Lock to protect request flags and some other important bits, like
- * rq_list
- */
- spinlock_t rq_lock;
- /** client-side flags are serialized by rq_lock @{ */
- unsigned int rq_intr:1, rq_replied:1, rq_err:1,
- rq_timedout:1, rq_resend:1, rq_restart:1,
- /**
- * when ->rq_replay is set, request is kept by the client even
- * after server commits corresponding transaction. This is
- * used for operations that require sequence of multiple
- * requests to be replayed. The only example currently is file
- * open/close. When last request in such a sequence is
- * committed, ->rq_replay is cleared on all requests in the
- * sequence.
- */
- rq_replay:1,
- rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
- rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
- rq_early:1,
- rq_req_unlinked:1, /* unlinked request buffer from lnet */
- rq_reply_unlinked:1, /* unlinked reply buffer from lnet */
- rq_memalloc:1, /* req originated from "kswapd" */
- rq_committed:1,
- rq_reply_truncated:1,
- /** whether the "rq_set" is a valid one */
- rq_invalid_rqset:1,
- rq_generation_set:1,
- /** do not resend request on -EINPROGRESS */
- rq_no_retry_einprogress:1,
- /* allow the req to be sent if the import is in recovery
- * status
- */
- rq_allow_replay:1,
- /* bulk request, sent to server, but uncommitted */
- rq_unstable:1;
- /** @} */
-
- /** server-side flags @{ */
- unsigned int
- rq_hp:1, /**< high priority RPC */
- rq_at_linked:1, /**< link into service's srv_at_array */
- rq_packed_final:1; /**< packed final reply */
- /** @} */
-
- /** one of RQ_PHASE_* */
- enum rq_phase rq_phase;
- /** one of RQ_PHASE_* to be used next */
- enum rq_phase rq_next_phase;
- /**
- * client-side refcount for SENT race, server-side refcount
- * for multiple replies
- */
- atomic_t rq_refcount;
- /**
- * client-side:
- * !rq_truncate : # reply bytes actually received,
- * rq_truncate : required repbuf_len for resend
- */
- int rq_nob_received;
- /** Request length */
- int rq_reqlen;
- /** Reply length */
- int rq_replen;
- /** Pool if request is from preallocated list */
- struct ptlrpc_request_pool *rq_pool;
- /** Request message - what client sent */
- struct lustre_msg *rq_reqmsg;
- /** Reply message - server response */
- struct lustre_msg *rq_repmsg;
- /** Transaction number */
- __u64 rq_transno;
- /** xid */
- __u64 rq_xid;
- /** bulk match bits */
- u64 rq_mbits;
- /**
- * List item to for replay list. Not yet committed requests get linked
- * there.
- * Also see \a rq_replay comment above.
- * It's also link chain on obd_export::exp_req_replay_queue
- */
- struct list_head rq_replay_list;
- /** non-shared members for client & server request*/
- union {
- struct ptlrpc_cli_req rq_cli;
- struct ptlrpc_srv_req rq_srv;
- };
- /**
- * security and encryption data
- * @{
- */
- /** description of flavors for client & server */
- struct sptlrpc_flavor rq_flvr;
-
- /* client/server security flags */
- unsigned int
- rq_ctx_init:1, /* context initiation */
- rq_ctx_fini:1, /* context destroy */
- rq_bulk_read:1, /* request bulk read */
- rq_bulk_write:1, /* request bulk write */
- /* server authentication flags */
- rq_auth_gss:1, /* authenticated by gss */
- rq_auth_usr_root:1, /* authed as root */
- rq_auth_usr_mdt:1, /* authed as mdt */
- rq_auth_usr_ost:1, /* authed as ost */
- /* security tfm flags */
- rq_pack_udesc:1,
- rq_pack_bulk:1,
- /* doesn't expect reply FIXME */
- rq_no_reply:1,
- rq_pill_init:1, /* pill initialized */
- rq_srv_req:1; /* server request */
-
- /** various buffer pointers */
- struct lustre_msg *rq_reqbuf; /**< req wrapper */
- char *rq_repbuf; /**< rep buffer */
- struct lustre_msg *rq_repdata; /**< rep wrapper msg */
- /** only in priv mode */
- struct lustre_msg *rq_clrbuf;
- int rq_reqbuf_len; /* req wrapper buf len */
- int rq_reqdata_len; /* req wrapper msg len */
- int rq_repbuf_len; /* rep buffer len */
- int rq_repdata_len; /* rep wrapper msg len */
- int rq_clrbuf_len; /* only in priv mode */
- int rq_clrdata_len; /* only in priv mode */
-
- /** early replies go to offset 0, regular replies go after that */
- unsigned int rq_reply_off;
-
- /** @} */
-
- /** Fields that help to see if request and reply were swabbed or not */
- __u32 rq_req_swab_mask;
- __u32 rq_rep_swab_mask;
-
- /** how many early replies (for stats) */
- int rq_early_count;
-
- /** Server-side, export on which request was received */
- struct obd_export *rq_export;
- /** import where request is being sent */
- struct obd_import *rq_import;
- /** our LNet NID */
- lnet_nid_t rq_self;
- /** Peer description (the other side) */
- struct lnet_process_id rq_peer;
- /**
- * service time estimate (secs)
- * If the request is not served by this time, it is marked as timed out.
- */
- int rq_timeout;
- /**
- * when request/reply sent (secs), or time when request should be sent
- */
- time64_t rq_sent;
- /** when request must finish. */
- time64_t rq_deadline;
- /** request format description */
- struct req_capsule rq_pill;
-};
-
-/**
- * Call completion handler for rpc if any, return it's status or original
- * rc if there was no handler defined for this request.
- */
-static inline int ptlrpc_req_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, int rc)
-{
- if (req->rq_interpret_reply) {
- req->rq_status = req->rq_interpret_reply(env, req,
- &req->rq_async_args,
- rc);
- return req->rq_status;
- }
- return rc;
-}
-
-/*
- * Can the request be moved from the regular NRS head to the high-priority NRS
- * head (of the same PTLRPC service partition), if any?
- *
- * For a reliable result, this should be checked under svcpt->scp_req lock.
- */
-static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
-{
- struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
-
- /**
- * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the
- * request has been enqueued first, and ptlrpc_nrs_request::nr_started
- * to make sure it has not been scheduled yet (analogous to previous
- * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list).
- */
- return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp;
-}
-
-/** @} nrs */
-
-/**
- * Returns 1 if request buffer at offset \a index was already swabbed
- */
-static inline int lustre_req_swabbed(struct ptlrpc_request *req, size_t index)
-{
- LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
- return req->rq_req_swab_mask & (1 << index);
-}
-
-/**
- * Returns 1 if request reply buffer at offset \a index was already swabbed
- */
-static inline int lustre_rep_swabbed(struct ptlrpc_request *req, size_t index)
-{
- LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
- return req->rq_rep_swab_mask & (1 << index);
-}
-
-/**
- * Returns 1 if request needs to be swabbed into local cpu byteorder
- */
-static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
-{
- return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
-}
-
-/**
- * Returns 1 if request reply needs to be swabbed into local cpu byteorder
- */
-static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
-{
- return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
-}
-
-/**
- * Mark request buffer at offset \a index that it was already swabbed
- */
-static inline void lustre_set_req_swabbed(struct ptlrpc_request *req,
- size_t index)
-{
- LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
- LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
- req->rq_req_swab_mask |= 1 << index;
-}
-
-/**
- * Mark request reply buffer at offset \a index that it was already swabbed
- */
-static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req,
- size_t index)
-{
- LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
- LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
- req->rq_rep_swab_mask |= 1 << index;
-}
-
-/**
- * Convert numerical request phase value \a phase into text string description
- */
-static inline const char *
-ptlrpc_phase2str(enum rq_phase phase)
-{
- switch (phase) {
- case RQ_PHASE_NEW:
- return "New";
- case RQ_PHASE_RPC:
- return "Rpc";
- case RQ_PHASE_BULK:
- return "Bulk";
- case RQ_PHASE_INTERPRET:
- return "Interpret";
- case RQ_PHASE_COMPLETE:
- return "Complete";
- case RQ_PHASE_UNREG_RPC:
- return "UnregRPC";
- case RQ_PHASE_UNREG_BULK:
- return "UnregBULK";
- default:
- return "?Phase?";
- }
-}
-
-/**
- * Convert numerical request phase of the request \a req into text stringi
- * description
- */
-static inline const char *
-ptlrpc_rqphase2str(struct ptlrpc_request *req)
-{
- return ptlrpc_phase2str(req->rq_phase);
-}
-
-/**
- * Debugging functions and helpers to print request structure into debug log
- * @{
- */
-/* Spare the preprocessor, spoil the bugs. */
-#define FLAG(field, str) (field ? str : "")
-
-/** Convert bit flags into a string */
-#define DEBUG_REQ_FLAGS(req) \
- ptlrpc_rqphase2str(req), \
- FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
- FLAG(req->rq_err, "E"), FLAG(req->rq_net_err, "e"), \
- FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
- FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
- FLAG(req->rq_no_resend, "N"), \
- FLAG(req->rq_waiting, "W"), \
- FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
- FLAG(req->rq_committed, "M")
-
-#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s"
-
-void _debug_req(struct ptlrpc_request *req,
- struct libcfs_debug_msg_data *data, const char *fmt, ...)
- __printf(3, 4);
-
-/**
- * Helper that decides if we need to print request according to current debug
- * level settings
- */
-#define debug_req(msgdata, mask, cdls, req, fmt, a...) \
-do { \
- CFS_CHECK_STACK(msgdata, mask, cdls); \
- \
- if (((mask) & D_CANTMASK) != 0 || \
- ((libcfs_debug & (mask)) != 0 && \
- (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
- _debug_req((req), msgdata, fmt, ##a); \
-} while (0)
-
-/**
- * This is the debug print function you need to use to print request structure
- * content into lustre debug log.
- * for most callers (level is a constant) this is resolved at compile time
- */
-#define DEBUG_REQ(level, req, fmt, args...) \
-do { \
- if ((level) & (D_ERROR | D_WARNING)) { \
- static struct cfs_debug_limit_state cdls; \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
- debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
- } else { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
- debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
- } \
-} while (0)
-/** @} */
-
-/**
- * Structure that defines a single page of a bulk transfer
- */
-struct ptlrpc_bulk_page {
- /** Linkage to list of pages in a bulk */
- struct list_head bp_link;
- /**
- * Number of bytes in a page to transfer starting from \a bp_pageoffset
- */
- int bp_buflen;
- /** offset within a page */
- int bp_pageoffset;
- /** The page itself */
- struct page *bp_page;
-};
-
-enum ptlrpc_bulk_op_type {
- PTLRPC_BULK_OP_ACTIVE = 0x00000001,
- PTLRPC_BULK_OP_PASSIVE = 0x00000002,
- PTLRPC_BULK_OP_PUT = 0x00000004,
- PTLRPC_BULK_OP_GET = 0x00000008,
- PTLRPC_BULK_BUF_KVEC = 0x00000010,
- PTLRPC_BULK_BUF_KIOV = 0x00000020,
- PTLRPC_BULK_GET_SOURCE = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_GET,
- PTLRPC_BULK_PUT_SINK = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_PUT,
- PTLRPC_BULK_GET_SINK = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_GET,
- PTLRPC_BULK_PUT_SOURCE = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_PUT,
-};
-
-static inline bool ptlrpc_is_bulk_op_get(enum ptlrpc_bulk_op_type type)
-{
- return (type & PTLRPC_BULK_OP_GET) == PTLRPC_BULK_OP_GET;
-}
-
-static inline bool ptlrpc_is_bulk_get_source(enum ptlrpc_bulk_op_type type)
-{
- return (type & PTLRPC_BULK_GET_SOURCE) == PTLRPC_BULK_GET_SOURCE;
-}
-
-static inline bool ptlrpc_is_bulk_put_sink(enum ptlrpc_bulk_op_type type)
-{
- return (type & PTLRPC_BULK_PUT_SINK) == PTLRPC_BULK_PUT_SINK;
-}
-
-static inline bool ptlrpc_is_bulk_get_sink(enum ptlrpc_bulk_op_type type)
-{
- return (type & PTLRPC_BULK_GET_SINK) == PTLRPC_BULK_GET_SINK;
-}
-
-static inline bool ptlrpc_is_bulk_put_source(enum ptlrpc_bulk_op_type type)
-{
- return (type & PTLRPC_BULK_PUT_SOURCE) == PTLRPC_BULK_PUT_SOURCE;
-}
-
-static inline bool ptlrpc_is_bulk_desc_kvec(enum ptlrpc_bulk_op_type type)
-{
- return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
- == PTLRPC_BULK_BUF_KVEC;
-}
-
-static inline bool ptlrpc_is_bulk_desc_kiov(enum ptlrpc_bulk_op_type type)
-{
- return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
- == PTLRPC_BULK_BUF_KIOV;
-}
-
-static inline bool ptlrpc_is_bulk_op_active(enum ptlrpc_bulk_op_type type)
-{
- return ((type & PTLRPC_BULK_OP_ACTIVE) |
- (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_ACTIVE;
-}
-
-static inline bool ptlrpc_is_bulk_op_passive(enum ptlrpc_bulk_op_type type)
-{
- return ((type & PTLRPC_BULK_OP_ACTIVE) |
- (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_PASSIVE;
-}
-
-struct ptlrpc_bulk_frag_ops {
- /**
- * Add a page \a page to the bulk descriptor \a desc
- * Data to transfer in the page starts at offset \a pageoffset and
- * amount of data to transfer from the page is \a len
- */
- void (*add_kiov_frag)(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset, int len);
-
- /*
- * Add a \a fragment to the bulk descriptor \a desc.
- * Data to transfer in the fragment is pointed to by \a frag
- * The size of the fragment is \a len
- */
- int (*add_iov_frag)(struct ptlrpc_bulk_desc *desc, void *frag, int len);
-
- /**
- * Uninitialize and free bulk descriptor \a desc.
- * Works on bulk descriptors both from server and client side.
- */
- void (*release_frags)(struct ptlrpc_bulk_desc *desc);
-};
-
-extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops;
-extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops;
-
-/**
- * Definition of bulk descriptor.
- * Bulks are special "Two phase" RPCs where initial request message
- * is sent first and it is followed bt a transfer (o receiving) of a large
- * amount of data to be settled into pages referenced from the bulk descriptors.
- * Bulks transfers (the actual data following the small requests) are done
- * on separate LNet portals.
- * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
- * Another user is readpage for MDT.
- */
-struct ptlrpc_bulk_desc {
- /** completed with failure */
- unsigned long bd_failure:1;
- /** client side */
- unsigned long bd_registered:1;
- /** For serialization with callback */
- spinlock_t bd_lock;
- /** Import generation when request for this bulk was sent */
- int bd_import_generation;
- /** {put,get}{source,sink}{kvec,kiov} */
- enum ptlrpc_bulk_op_type bd_type;
- /** LNet portal for this bulk */
- __u32 bd_portal;
- /** Server side - export this bulk created for */
- struct obd_export *bd_export;
- /** Client side - import this bulk was sent on */
- struct obd_import *bd_import;
- /** Back pointer to the request */
- struct ptlrpc_request *bd_req;
- struct ptlrpc_bulk_frag_ops *bd_frag_ops;
- wait_queue_head_t bd_waitq; /* server side only WQ */
- int bd_iov_count; /* # entries in bd_iov */
- int bd_max_iov; /* allocated size of bd_iov */
- int bd_nob; /* # bytes covered */
- int bd_nob_transferred; /* # bytes GOT/PUT */
-
- u64 bd_last_mbits;
-
- struct ptlrpc_cb_id bd_cbid; /* network callback info */
- lnet_nid_t bd_sender; /* stash event::sender */
- int bd_md_count; /* # valid entries in bd_mds */
- int bd_md_max_brw; /* max entries in bd_mds */
- /** array of associated MDs */
- struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT];
-
- union {
- struct {
- /*
- * encrypt iov, size is either 0 or bd_iov_count.
- */
- struct bio_vec *bd_enc_vec;
- struct bio_vec *bd_vec; /* Array of bio_vecs */
- } bd_kiov;
-
- struct {
- struct kvec *bd_enc_kvec;
- struct kvec *bd_kvec; /* Array of kvecs */
- } bd_kvec;
- } bd_u;
-};
-
-#define GET_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_vec)
-#define BD_GET_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_vec[i])
-#define GET_ENC_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_enc_vec)
-#define BD_GET_ENC_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_enc_vec[i])
-#define GET_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_kvec)
-#define BD_GET_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_kvec[i])
-#define GET_ENC_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_enc_kvec)
-#define BD_GET_ENC_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_enc_kvec[i])
-
-enum {
- SVC_STOPPED = 1 << 0,
- SVC_STOPPING = 1 << 1,
- SVC_STARTING = 1 << 2,
- SVC_RUNNING = 1 << 3,
-};
-
-#define PTLRPC_THR_NAME_LEN 32
-/**
- * Definition of server service thread structure
- */
-struct ptlrpc_thread {
- /**
- * List of active threads in svc->srv_threads
- */
- struct list_head t_link;
- /**
- * thread-private data (preallocated memory)
- */
- void *t_data;
- __u32 t_flags;
- /**
- * service thread index, from ptlrpc_start_threads
- */
- unsigned int t_id;
- /**
- * service thread pid
- */
- pid_t t_pid;
- /**
- * put watchdog in the structure per thread b=14840
- *
- * Lustre watchdog is removed for client in the hope
- * of a generic watchdog can be merged in kernel.
- * When that happens, we should add below back.
- *
- * struct lc_watchdog *t_watchdog;
- */
- /**
- * the svc this thread belonged to b=18582
- */
- struct ptlrpc_service_part *t_svcpt;
- wait_queue_head_t t_ctl_waitq;
- struct lu_env *t_env;
- char t_name[PTLRPC_THR_NAME_LEN];
-};
-
-static inline int thread_is_stopped(struct ptlrpc_thread *thread)
-{
- return !!(thread->t_flags & SVC_STOPPED);
-}
-
-static inline int thread_is_stopping(struct ptlrpc_thread *thread)
-{
- return !!(thread->t_flags & SVC_STOPPING);
-}
-
-static inline int thread_is_starting(struct ptlrpc_thread *thread)
-{
- return !!(thread->t_flags & SVC_STARTING);
-}
-
-static inline int thread_is_running(struct ptlrpc_thread *thread)
-{
- return !!(thread->t_flags & SVC_RUNNING);
-}
-
-static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
-{
- thread->t_flags &= ~flags;
-}
-
-static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
-{
- thread->t_flags = flags;
-}
-
-static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
-{
- thread->t_flags |= flags;
-}
-
-static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
- __u32 flags)
-{
- if (thread->t_flags & flags) {
- thread->t_flags &= ~flags;
- return 1;
- }
- return 0;
-}
-
-/**
- * Request buffer descriptor structure.
- * This is a structure that contains one posted request buffer for service.
- * Once data land into a buffer, event callback creates actual request and
- * notifies wakes one of the service threads to process new incoming request.
- * More than one request can fit into the buffer.
- */
-struct ptlrpc_request_buffer_desc {
- /** Link item for rqbds on a service */
- struct list_head rqbd_list;
- /** History of requests for this buffer */
- struct list_head rqbd_reqs;
- /** Back pointer to service for which this buffer is registered */
- struct ptlrpc_service_part *rqbd_svcpt;
- /** LNet descriptor */
- struct lnet_handle_md rqbd_md_h;
- int rqbd_refcount;
- /** The buffer itself */
- char *rqbd_buffer;
- struct ptlrpc_cb_id rqbd_cbid;
- /**
- * This "embedded" request structure is only used for the
- * last request to fit into the buffer
- */
- struct ptlrpc_request rqbd_req;
-};
-
-typedef int (*svc_handler_t)(struct ptlrpc_request *req);
-
-struct ptlrpc_service_ops {
- /**
- * if non-NULL called during thread creation (ptlrpc_start_thread())
- * to initialize service specific per-thread state.
- */
- int (*so_thr_init)(struct ptlrpc_thread *thr);
- /**
- * if non-NULL called during thread shutdown (ptlrpc_main()) to
- * destruct state created by ->srv_init().
- */
- void (*so_thr_done)(struct ptlrpc_thread *thr);
- /**
- * Handler function for incoming requests for this service
- */
- int (*so_req_handler)(struct ptlrpc_request *req);
- /**
- * function to determine priority of the request, it's called
- * on every new request
- */
- int (*so_hpreq_handler)(struct ptlrpc_request *);
- /**
- * service-specific print fn
- */
- void (*so_req_printer)(void *, struct ptlrpc_request *);
-};
-
-#ifndef __cfs_cacheline_aligned
-/* NB: put it here for reducing patche dependence */
-# define __cfs_cacheline_aligned
-#endif
-
-/**
- * How many high priority requests to serve before serving one normal
- * priority request
- */
-#define PTLRPC_SVC_HP_RATIO 10
-
-/**
- * Definition of PortalRPC service.
- * The service is listening on a particular portal (like tcp port)
- * and perform actions for a specific server like IO service for OST
- * or general metadata service for MDS.
- */
-struct ptlrpc_service {
- /** serialize sysfs operations */
- spinlock_t srv_lock;
- /** most often accessed fields */
- /** chain thru all services */
- struct list_head srv_list;
- /** service operations table */
- struct ptlrpc_service_ops srv_ops;
- /** only statically allocated strings here; we don't clean them */
- char *srv_name;
- /** only statically allocated strings here; we don't clean them */
- char *srv_thread_name;
- /** service thread list */
- struct list_head srv_threads;
- /** threads # should be created for each partition on initializing */
- int srv_nthrs_cpt_init;
- /** limit of threads number for each partition */
- int srv_nthrs_cpt_limit;
- /** Root of debugfs dir tree for this service */
- struct dentry *srv_debugfs_entry;
- /** Pointer to statistic data for this service */
- struct lprocfs_stats *srv_stats;
- /** # hp per lp reqs to handle */
- int srv_hpreq_ratio;
- /** biggest request to receive */
- int srv_max_req_size;
- /** biggest reply to send */
- int srv_max_reply_size;
- /** size of individual buffers */
- int srv_buf_size;
- /** # buffers to allocate in 1 group */
- int srv_nbuf_per_group;
- /** Local portal on which to receive requests */
- __u32 srv_req_portal;
- /** Portal on the client to send replies to */
- __u32 srv_rep_portal;
- /**
- * Tags for lu_context associated with this thread, see struct
- * lu_context.
- */
- __u32 srv_ctx_tags;
- /** soft watchdog timeout multiplier */
- int srv_watchdog_factor;
- /** under unregister_service */
- unsigned srv_is_stopping:1;
-
- /** max # request buffers in history per partition */
- int srv_hist_nrqbds_cpt_max;
- /** number of CPTs this service bound on */
- int srv_ncpts;
- /** CPTs array this service bound on */
- __u32 *srv_cpts;
- /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
- int srv_cpt_bits;
- /** CPT table this service is running over */
- struct cfs_cpt_table *srv_cptable;
-
- /* sysfs object */
- struct kobject srv_kobj;
- struct completion srv_kobj_unregister;
- /**
- * partition data for ptlrpc service
- */
- struct ptlrpc_service_part *srv_parts[0];
-};
-
-/**
- * Definition of PortalRPC service partition data.
- * Although a service only has one instance of it right now, but we
- * will have multiple instances very soon (instance per CPT).
- *
- * it has four locks:
- * \a scp_lock
- * serialize operations on rqbd and requests waiting for preprocess
- * \a scp_req_lock
- * serialize operations active requests sent to this portal
- * \a scp_at_lock
- * serialize adaptive timeout stuff
- * \a scp_rep_lock
- * serialize operations on RS list (reply states)
- *
- * We don't have any use-case to take two or more locks at the same time
- * for now, so there is no lock order issue.
- */
-struct ptlrpc_service_part {
- /** back reference to owner */
- struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
- /* CPT id, reserved */
- int scp_cpt;
- /** always increasing number */
- int scp_thr_nextid;
- /** # of starting threads */
- int scp_nthrs_starting;
- /** # of stopping threads, reserved for shrinking threads */
- int scp_nthrs_stopping;
- /** # running threads */
- int scp_nthrs_running;
- /** service threads list */
- struct list_head scp_threads;
-
- /**
- * serialize the following fields, used for protecting
- * rqbd list and incoming requests waiting for preprocess,
- * threads starting & stopping are also protected by this lock.
- */
- spinlock_t scp_lock __cfs_cacheline_aligned;
- /** total # req buffer descs allocated */
- int scp_nrqbds_total;
- /** # posted request buffers for receiving */
- int scp_nrqbds_posted;
- /** in progress of allocating rqbd */
- int scp_rqbd_allocating;
- /** # incoming reqs */
- int scp_nreqs_incoming;
- /** request buffers to be reposted */
- struct list_head scp_rqbd_idle;
- /** req buffers receiving */
- struct list_head scp_rqbd_posted;
- /** incoming reqs */
- struct list_head scp_req_incoming;
- /** timeout before re-posting reqs, in tick */
- long scp_rqbd_timeout;
- /**
- * all threads sleep on this. This wait-queue is signalled when new
- * incoming request arrives and when difficult reply has to be handled.
- */
- wait_queue_head_t scp_waitq;
-
- /** request history */
- struct list_head scp_hist_reqs;
- /** request buffer history */
- struct list_head scp_hist_rqbds;
- /** # request buffers in history */
- int scp_hist_nrqbds;
- /** sequence number for request */
- __u64 scp_hist_seq;
- /** highest seq culled from history */
- __u64 scp_hist_seq_culled;
-
- /**
- * serialize the following fields, used for processing requests
- * sent to this portal
- */
- spinlock_t scp_req_lock __cfs_cacheline_aligned;
- /** # reqs in either of the NRS heads below */
- /** # reqs being served */
- int scp_nreqs_active;
- /** # HPreqs being served */
- int scp_nhreqs_active;
- /** # hp requests handled */
- int scp_hreq_count;
-
- /** NRS head for regular requests */
- struct ptlrpc_nrs scp_nrs_reg;
- /** NRS head for HP requests; this is only valid for services that can
- * handle HP requests
- */
- struct ptlrpc_nrs *scp_nrs_hp;
-
- /** AT stuff */
- /** @{ */
- /**
- * serialize the following fields, used for changes on
- * adaptive timeout
- */
- spinlock_t scp_at_lock __cfs_cacheline_aligned;
- /** estimated rpc service time */
- struct adaptive_timeout scp_at_estimate;
- /** reqs waiting for replies */
- struct ptlrpc_at_array scp_at_array;
- /** early reply timer */
- struct timer_list scp_at_timer;
- /** debug */
- unsigned long scp_at_checktime;
- /** check early replies */
- unsigned scp_at_check;
- /** @} */
-
- /**
- * serialize the following fields, used for processing
- * replies for this portal
- */
- spinlock_t scp_rep_lock __cfs_cacheline_aligned;
- /** all the active replies */
- struct list_head scp_rep_active;
- /** List of free reply_states */
- struct list_head scp_rep_idle;
- /** waitq to run, when adding stuff to srv_free_rs_list */
- wait_queue_head_t scp_rep_waitq;
- /** # 'difficult' replies */
- atomic_t scp_nreps_difficult;
-};
-
-#define ptlrpc_service_for_each_part(part, i, svc) \
- for (i = 0; \
- i < (svc)->srv_ncpts && \
- (svc)->srv_parts && \
- ((part) = (svc)->srv_parts[i]); i++)
-
-/**
- * Declaration of ptlrpcd control structure
- */
-struct ptlrpcd_ctl {
- /**
- * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
- */
- unsigned long pc_flags;
- /**
- * Thread lock protecting structure fields.
- */
- spinlock_t pc_lock;
- /**
- * Start completion.
- */
- struct completion pc_starting;
- /**
- * Stop completion.
- */
- struct completion pc_finishing;
- /**
- * Thread requests set.
- */
- struct ptlrpc_request_set *pc_set;
- /**
- * Thread name used in kthread_run()
- */
- char pc_name[16];
- /**
- * CPT the thread is bound on.
- */
- int pc_cpt;
- /**
- * Index of ptlrpcd thread in the array.
- */
- int pc_index;
- /**
- * Pointer to the array of partners' ptlrpcd_ctl structure.
- */
- struct ptlrpcd_ctl **pc_partners;
- /**
- * Number of the ptlrpcd's partners.
- */
- int pc_npartners;
- /**
- * Record the partner index to be processed next.
- */
- int pc_cursor;
- /**
- * Error code if the thread failed to fully start.
- */
- int pc_error;
-};
-
-/* Bits for pc_flags */
-enum ptlrpcd_ctl_flags {
- /**
- * Ptlrpc thread start flag.
- */
- LIOD_START = 1 << 0,
- /**
- * Ptlrpc thread stop flag.
- */
- LIOD_STOP = 1 << 1,
- /**
- * Ptlrpc thread force flag (only stop force so far).
- * This will cause aborting any inflight rpcs handled
- * by thread if LIOD_STOP is specified.
- */
- LIOD_FORCE = 1 << 2,
- /**
- * This is a recovery ptlrpc thread.
- */
- LIOD_RECOVERY = 1 << 3,
-};
-
-/**
- * \addtogroup nrs
- * @{
- *
- * Service compatibility function; the policy is compatible with all services.
- *
- * \param[in] svc The service the policy is attempting to register with.
- * \param[in] desc The policy descriptor
- *
- * \retval true The policy is compatible with the service
- *
- * \see ptlrpc_nrs_pol_desc::pd_compat()
- */
-static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc)
-{
- return true;
-}
-
-/**
- * Service compatibility function; the policy is compatible with only a specific
- * service which is identified by its human-readable name at
- * ptlrpc_service::srv_name.
- *
- * \param[in] svc The service the policy is attempting to register with.
- * \param[in] desc The policy descriptor
- *
- * \retval false The policy is not compatible with the service
- * \retval true The policy is compatible with the service
- *
- * \see ptlrpc_nrs_pol_desc::pd_compat()
- */
-static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc)
-{
- return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
-}
-
-/** @} nrs */
-
-/* ptlrpc/events.c */
-extern struct lnet_handle_eq ptlrpc_eq_h;
-int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
- struct lnet_process_id *peer, lnet_nid_t *self);
-/**
- * These callbacks are invoked by LNet when something happened to
- * underlying buffer
- * @{
- */
-void request_out_callback(struct lnet_event *ev);
-void reply_in_callback(struct lnet_event *ev);
-void client_bulk_callback(struct lnet_event *ev);
-void request_in_callback(struct lnet_event *ev);
-void reply_out_callback(struct lnet_event *ev);
-/** @} */
-
-/* ptlrpc/connection.c */
-struct ptlrpc_connection *ptlrpc_connection_get(struct lnet_process_id peer,
- lnet_nid_t self,
- struct obd_uuid *uuid);
-int ptlrpc_connection_put(struct ptlrpc_connection *c);
-struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
-int ptlrpc_connection_init(void);
-void ptlrpc_connection_fini(void);
-
-/* ptlrpc/niobuf.c */
-/**
- * Actual interfacing with LNet to put/get/register/unregister stuff
- * @{
- */
-
-int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
-
-static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
-{
- struct ptlrpc_bulk_desc *desc;
- int rc;
-
- desc = req->rq_bulk;
-
- if (req->rq_bulk_deadline > ktime_get_real_seconds())
- return 1;
-
- if (!desc)
- return 0;
-
- spin_lock(&desc->bd_lock);
- rc = desc->bd_md_count;
- spin_unlock(&desc->bd_lock);
- return rc;
-}
-
-#define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
-#define PTLRPC_REPLY_EARLY 0x02
-int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
-int ptlrpc_reply(struct ptlrpc_request *req);
-int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
-int ptlrpc_error(struct ptlrpc_request *req);
-int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
-int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
-int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
-/** @} */
-
-/* ptlrpc/client.c */
-/**
- * Client-side portals API. Everything to send requests, receive replies,
- * request queues, request management, etc.
- * @{
- */
-void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
-
-int ptlrpc_inc_ref(void);
-void ptlrpc_dec_ref(void);
-
-void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
- struct ptlrpc_client *);
-struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
-
-int ptlrpc_queue_wait(struct ptlrpc_request *req);
-int ptlrpc_replay_req(struct ptlrpc_request *req);
-void ptlrpc_abort_inflight(struct obd_import *imp);
-void ptlrpc_abort_set(struct ptlrpc_request_set *set);
-
-struct ptlrpc_request_set *ptlrpc_prep_set(void);
-struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
- void *arg);
-int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
-int ptlrpc_set_wait(struct ptlrpc_request_set *);
-void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
-void ptlrpc_set_destroy(struct ptlrpc_request_set *);
-void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
-
-void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
-int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
-
-struct ptlrpc_request_pool *
-ptlrpc_init_rq_pool(int, int,
- int (*populate_pool)(struct ptlrpc_request_pool *, int));
-
-void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
-struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
- const struct req_format *format);
-struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
- struct ptlrpc_request_pool *,
- const struct req_format *);
-void ptlrpc_request_free(struct ptlrpc_request *request);
-int ptlrpc_request_pack(struct ptlrpc_request *request,
- __u32 version, int opcode);
-struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *,
- const struct req_format *,
- __u32, int);
-int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
- __u32 version, int opcode, char **bufs,
- struct ptlrpc_cli_ctx *ctx);
-void ptlrpc_req_finished(struct ptlrpc_request *request);
-struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
-struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
- unsigned int nfrags,
- unsigned int max_brw,
- unsigned int type,
- unsigned int portal,
- const struct ptlrpc_bulk_frag_ops *ops);
-
-int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
- void *frag, int len);
-void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset, int len,
- int pin);
-static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset,
- int len)
-{
- __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
-}
-
-static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset,
- int len)
-{
- __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
-}
-
-void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
-
-static inline void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
-{
- int i;
-
- for (i = 0; i < desc->bd_iov_count ; i++)
- put_page(BD_GET_KIOV(desc, i).bv_page);
-}
-
-void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
- struct obd_import *imp);
-__u64 ptlrpc_next_xid(void);
-__u64 ptlrpc_sample_next_xid(void);
-__u64 ptlrpc_req_xid(struct ptlrpc_request *request);
-
-/* Set of routines to run a function in ptlrpcd context */
-void *ptlrpcd_alloc_work(struct obd_import *imp,
- int (*cb)(const struct lu_env *, void *), void *data);
-void ptlrpcd_destroy_work(void *handler);
-int ptlrpcd_queue_work(void *handler);
-
-/** @} */
-struct ptlrpc_service_buf_conf {
- /* nbufs is buffers # to allocate when growing the pool */
- unsigned int bc_nbufs;
- /* buffer size to post */
- unsigned int bc_buf_size;
- /* portal to listed for requests on */
- unsigned int bc_req_portal;
- /* portal of where to send replies to */
- unsigned int bc_rep_portal;
- /* maximum request size to be accepted for this service */
- unsigned int bc_req_max_size;
- /* maximum reply size this service can ever send */
- unsigned int bc_rep_max_size;
-};
-
-struct ptlrpc_service_thr_conf {
- /* threadname should be 8 characters or less - 6 will be added on */
- char *tc_thr_name;
- /* threads increasing factor for each CPU */
- unsigned int tc_thr_factor;
- /* service threads # to start on each partition while initializing */
- unsigned int tc_nthrs_init;
- /*
- * low water of threads # upper-limit on each partition while running,
- * service availability may be impacted if threads number is lower
- * than this value. It can be ZERO if the service doesn't require
- * CPU affinity or there is only one partition.
- */
- unsigned int tc_nthrs_base;
- /* "soft" limit for total threads number */
- unsigned int tc_nthrs_max;
- /* user specified threads number, it will be validated due to
- * other members of this structure.
- */
- unsigned int tc_nthrs_user;
- /* set NUMA node affinity for service threads */
- unsigned int tc_cpu_affinity;
- /* Tags for lu_context associated with service thread */
- __u32 tc_ctx_tags;
-};
-
-struct ptlrpc_service_cpt_conf {
- struct cfs_cpt_table *cc_cptable;
- /* string pattern to describe CPTs for a service */
- char *cc_pattern;
-};
-
-struct ptlrpc_service_conf {
- /* service name */
- char *psc_name;
- /* soft watchdog timeout multiplifier to print stuck service traces */
- unsigned int psc_watchdog_factor;
- /* buffer information */
- struct ptlrpc_service_buf_conf psc_buf;
- /* thread information */
- struct ptlrpc_service_thr_conf psc_thr;
- /* CPU partition information */
- struct ptlrpc_service_cpt_conf psc_cpt;
- /* function table */
- struct ptlrpc_service_ops psc_ops;
-};
-
-/* ptlrpc/service.c */
-/**
- * Server-side services API. Register/unregister service, request state
- * management, service thread management
- *
- * @{
- */
-void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
-void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
-struct ptlrpc_service *ptlrpc_register_service(struct ptlrpc_service_conf *conf,
- struct kset *parent,
- struct dentry *debugfs_entry);
-
-int ptlrpc_start_threads(struct ptlrpc_service *svc);
-int ptlrpc_unregister_service(struct ptlrpc_service *service);
-
-int ptlrpc_hr_init(void);
-void ptlrpc_hr_fini(void);
-
-/** @} */
-
-/* ptlrpc/import.c */
-/**
- * Import API
- * @{
- */
-int ptlrpc_connect_import(struct obd_import *imp);
-int ptlrpc_init_import(struct obd_import *imp);
-int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
-int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
-
-/* ptlrpc/pack_generic.c */
-int ptlrpc_reconnect_import(struct obd_import *imp);
-/** @} */
-
-/**
- * ptlrpc msg buffer and swab interface
- *
- * @{
- */
-int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
- u32 index);
-void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
- u32 index);
-int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
-int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
-
-void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
- char **bufs);
-int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
- __u32 *lens, char **bufs);
-int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
- char **bufs);
-int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
- __u32 *lens, char **bufs, int flags);
-#define LPRFL_EARLY_REPLY 1
-int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
- char **bufs, int flags);
-int lustre_shrink_msg(struct lustre_msg *msg, int segment,
- unsigned int newlen, int move_data);
-void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
-int __lustre_unpack_msg(struct lustre_msg *m, int len);
-u32 lustre_msg_hdr_size(__u32 magic, u32 count);
-u32 lustre_msg_size(__u32 magic, int count, __u32 *lengths);
-u32 lustre_msg_size_v2(int count, __u32 *lengths);
-u32 lustre_packed_msg_size(struct lustre_msg *msg);
-u32 lustre_msg_early_size(void);
-void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, u32 n, u32 min_size);
-void *lustre_msg_buf(struct lustre_msg *m, u32 n, u32 minlen);
-u32 lustre_msg_buflen(struct lustre_msg *m, u32 n);
-u32 lustre_msg_bufcount(struct lustre_msg *m);
-char *lustre_msg_string(struct lustre_msg *m, u32 n, u32 max_len);
-__u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
-void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
-__u32 lustre_msg_get_flags(struct lustre_msg *msg);
-void lustre_msg_add_flags(struct lustre_msg *msg, u32 flags);
-void lustre_msg_set_flags(struct lustre_msg *msg, u32 flags);
-void lustre_msg_clear_flags(struct lustre_msg *msg, u32 flags);
-__u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
-void lustre_msg_add_op_flags(struct lustre_msg *msg, u32 flags);
-struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
-__u32 lustre_msg_get_type(struct lustre_msg *msg);
-void lustre_msg_add_version(struct lustre_msg *msg, u32 version);
-__u32 lustre_msg_get_opc(struct lustre_msg *msg);
-__u16 lustre_msg_get_tag(struct lustre_msg *msg);
-__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
-__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
-__u64 lustre_msg_get_transno(struct lustre_msg *msg);
-__u64 lustre_msg_get_slv(struct lustre_msg *msg);
-__u32 lustre_msg_get_limit(struct lustre_msg *msg);
-void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
-void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
-int lustre_msg_get_status(struct lustre_msg *msg);
-__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
-__u32 lustre_msg_get_magic(struct lustre_msg *msg);
-__u32 lustre_msg_get_timeout(struct lustre_msg *msg);
-__u32 lustre_msg_get_service_time(struct lustre_msg *msg);
-__u32 lustre_msg_get_cksum(struct lustre_msg *msg);
-__u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
-void lustre_msg_set_handle(struct lustre_msg *msg,
- struct lustre_handle *handle);
-void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
-void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
-void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid);
-void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag);
-void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
-void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
-void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
-void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
-void ptlrpc_request_set_replen(struct ptlrpc_request *req);
-void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
-void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
-void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
-void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
-void lustre_msg_set_mbits(struct lustre_msg *msg, u64 mbits);
-
-static inline void
-lustre_shrink_reply(struct ptlrpc_request *req, int segment,
- unsigned int newlen, int move_data)
-{
- LASSERT(req->rq_reply_state);
- LASSERT(req->rq_repmsg);
- req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
- newlen, move_data);
-}
-
-#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
-
-static inline int ptlrpc_status_hton(int h)
-{
- /*
- * Positive errnos must be network errnos, such as LUSTRE_EDEADLK,
- * ELDLM_LOCK_ABORTED, etc.
- */
- if (h < 0)
- return -lustre_errno_hton(-h);
- else
- return h;
-}
-
-static inline int ptlrpc_status_ntoh(int n)
-{
- /*
- * See the comment in ptlrpc_status_hton().
- */
- if (n < 0)
- return -lustre_errno_ntoh(-n);
- else
- return n;
-}
-
-#else
-
-#define ptlrpc_status_hton(h) (h)
-#define ptlrpc_status_ntoh(n) (n)
-
-#endif
-/** @} */
-
-/** Change request phase of \a req to \a new_phase */
-static inline void
-ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
-{
- if (req->rq_phase == new_phase)
- return;
-
- if (new_phase == RQ_PHASE_UNREG_RPC ||
- new_phase == RQ_PHASE_UNREG_BULK) {
- /* No embedded unregistering phases */
- if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
- req->rq_phase == RQ_PHASE_UNREG_BULK)
- return;
-
- req->rq_next_phase = req->rq_phase;
- if (req->rq_import)
- atomic_inc(&req->rq_import->imp_unregistering);
- }
-
- if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
- req->rq_phase == RQ_PHASE_UNREG_BULK) {
- if (req->rq_import)
- atomic_dec(&req->rq_import->imp_unregistering);
- }
-
- DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
- ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
-
- req->rq_phase = new_phase;
-}
-
-/**
- * Returns true if request \a req got early reply and hard deadline is not met
- */
-static inline int
-ptlrpc_client_early(struct ptlrpc_request *req)
-{
- return req->rq_early;
-}
-
-/**
- * Returns true if we got real reply from server for this request
- */
-static inline int
-ptlrpc_client_replied(struct ptlrpc_request *req)
-{
- if (req->rq_reply_deadline > ktime_get_real_seconds())
- return 0;
- return req->rq_replied;
-}
-
-/** Returns true if request \a req is in process of receiving server reply */
-static inline int
-ptlrpc_client_recv(struct ptlrpc_request *req)
-{
- if (req->rq_reply_deadline > ktime_get_real_seconds())
- return 1;
- return req->rq_receiving_reply;
-}
-
-static inline int
-ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
-{
- int rc;
-
- spin_lock(&req->rq_lock);
- if (req->rq_reply_deadline > ktime_get_real_seconds()) {
- spin_unlock(&req->rq_lock);
- return 1;
- }
- if (req->rq_req_deadline > ktime_get_real_seconds()) {
- spin_unlock(&req->rq_lock);
- return 1;
- }
- rc = !req->rq_req_unlinked || !req->rq_reply_unlinked ||
- req->rq_receiving_reply;
- spin_unlock(&req->rq_lock);
- return rc;
-}
-
-static inline void
-ptlrpc_client_wake_req(struct ptlrpc_request *req)
-{
- if (!req->rq_set)
- wake_up(&req->rq_reply_waitq);
- else
- wake_up(&req->rq_set->set_waitq);
-}
-
-static inline void
-ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
-{
- LASSERT(atomic_read(&rs->rs_refcount) > 0);
- atomic_inc(&rs->rs_refcount);
-}
-
-static inline void
-ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
-{
- LASSERT(atomic_read(&rs->rs_refcount) > 0);
- if (atomic_dec_and_test(&rs->rs_refcount))
- lustre_free_reply_state(rs);
-}
-
-/* Should only be called once per req */
-static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
-{
- if (!req->rq_reply_state)
- return; /* shouldn't occur */
- ptlrpc_rs_decref(req->rq_reply_state);
- req->rq_reply_state = NULL;
- req->rq_repmsg = NULL;
-}
-
-static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
-{
- return lustre_msg_get_magic(req->rq_reqmsg);
-}
-
-static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
-{
- switch (req->rq_reqmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return req->rq_reqmsg->lm_repsize;
- default:
- LASSERTF(0, "incorrect message magic: %08x\n",
- req->rq_reqmsg->lm_magic);
- return -EFAULT;
- }
-}
-
-static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
-{
- if (req->rq_delay_limit != 0 &&
- time_before(cfs_time_add(req->rq_queued_time,
- req->rq_delay_limit * HZ),
- cfs_time_current())) {
- return 1;
- }
- return 0;
-}
-
-static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
-{
- if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
- spin_lock(&req->rq_lock);
- req->rq_no_resend = 1;
- spin_unlock(&req->rq_lock);
- }
- return req->rq_no_resend;
-}
-
-static inline int
-ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
-{
- int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
-
- return svcpt->scp_service->srv_watchdog_factor *
- max_t(int, at, obd_timeout);
-}
-
-static inline struct ptlrpc_service *
-ptlrpc_req2svc(struct ptlrpc_request *req)
-{
- return req->rq_rqbd->rqbd_svcpt->scp_service;
-}
-
-/* ldlm/ldlm_lib.c */
-/**
- * Target client logic
- * @{
- */
-int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
-int client_obd_cleanup(struct obd_device *obddev);
-int client_connect_import(const struct lu_env *env,
- struct obd_export **exp, struct obd_device *obd,
- struct obd_uuid *cluuid, struct obd_connect_data *,
- void *localdata);
-int client_disconnect_export(struct obd_export *exp);
-int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
- int priority);
-int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
-int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
- struct obd_uuid *uuid);
-int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
-void client_destroy_import(struct obd_import *imp);
-/** @} */
-
-/* ptlrpc/pinger.c */
-/**
- * Pinger API (client side only)
- * @{
- */
-enum timeout_event {
- TIMEOUT_GRANT = 1
-};
-
-struct timeout_item;
-typedef int (*timeout_cb_t)(struct timeout_item *, void *);
-int ptlrpc_pinger_add_import(struct obd_import *imp);
-int ptlrpc_pinger_del_import(struct obd_import *imp);
-int ptlrpc_add_timeout_client(int time, enum timeout_event event,
- timeout_cb_t cb, void *data,
- struct list_head *obd_list);
-int ptlrpc_del_timeout_client(struct list_head *obd_list,
- enum timeout_event event);
-struct ptlrpc_request *ptlrpc_prep_ping(struct obd_import *imp);
-int ptlrpc_obd_ping(struct obd_device *obd);
-void ptlrpc_pinger_ir_up(void);
-void ptlrpc_pinger_ir_down(void);
-/** @} */
-int ptlrpc_pinger_suppress_pings(void);
-
-/* ptlrpc/ptlrpcd.c */
-void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
-void ptlrpcd_free(struct ptlrpcd_ctl *pc);
-void ptlrpcd_wake(struct ptlrpc_request *req);
-void ptlrpcd_add_req(struct ptlrpc_request *req);
-int ptlrpcd_addref(void);
-void ptlrpcd_decref(void);
-
-/* ptlrpc/lproc_ptlrpc.c */
-/**
- * procfs output related functions
- * @{
- */
-const char *ll_opcode2str(__u32 opcode);
-void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
-void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
-void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
-/** @} */
-
-/* ptlrpc/llog_client.c */
-extern struct llog_operations llog_client_ops;
-/** @} net */
-
-#endif
-/** @} PtlRPC */
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs.h b/drivers/staging/lustre/lustre/include/lustre_nrs.h
deleted file mode 100644
index ffa7317da35b..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_nrs.h
+++ /dev/null
@@ -1,718 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2014, Intel Corporation.
- *
- * Copyright 2012 Xyratex Technology Limited
- */
-/*
- *
- * Network Request Scheduler (NRS)
- *
- */
-
-#ifndef _LUSTRE_NRS_H
-#define _LUSTRE_NRS_H
-
-/**
- * \defgroup nrs Network Request Scheduler
- * @{
- */
-struct ptlrpc_nrs_policy;
-struct ptlrpc_nrs_resource;
-struct ptlrpc_nrs_request;
-
-/**
- * NRS control operations.
- *
- * These are common for all policies.
- */
-enum ptlrpc_nrs_ctl {
- /**
- * Not a valid opcode.
- */
- PTLRPC_NRS_CTL_INVALID,
- /**
- * Activate the policy.
- */
- PTLRPC_NRS_CTL_START,
- /**
- * Reserved for multiple primary policies, which may be a possibility
- * in the future.
- */
- PTLRPC_NRS_CTL_STOP,
- /**
- * Policies can start using opcodes from this value and onwards for
- * their own purposes; the assigned value itself is arbitrary.
- */
- PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
-};
-
-/**
- * NRS policy operations.
- *
- * These determine the behaviour of a policy, and are called in response to
- * NRS core events.
- */
-struct ptlrpc_nrs_pol_ops {
- /**
- * Called during policy registration; this operation is optional.
- *
- * \param[in,out] policy The policy being initialized
- */
- int (*op_policy_init)(struct ptlrpc_nrs_policy *policy);
- /**
- * Called during policy unregistration; this operation is optional.
- *
- * \param[in,out] policy The policy being unregistered/finalized
- */
- void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy);
- /**
- * Called when activating a policy via lprocfs; policies allocate and
- * initialize their resources here; this operation is optional.
- *
- * \param[in,out] policy The policy being started
- *
- * \see nrs_policy_start_locked()
- */
- int (*op_policy_start)(struct ptlrpc_nrs_policy *policy);
- /**
- * Called when deactivating a policy via lprocfs; policies deallocate
- * their resources here; this operation is optional
- *
- * \param[in,out] policy The policy being stopped
- *
- * \see nrs_policy_stop0()
- */
- void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy);
- /**
- * Used for policy-specific operations; i.e. not generic ones like
- * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
- * to an ioctl; this operation is optional.
- *
- * \param[in,out] policy The policy carrying out operation \a opc
- * \param[in] opc The command operation being carried out
- * \param[in,out] arg An generic buffer for communication between the
- * user and the control operation
- *
- * \retval -ve error
- * \retval 0 success
- *
- * \see ptlrpc_nrs_policy_control()
- */
- int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy,
- enum ptlrpc_nrs_ctl opc, void *arg);
-
- /**
- * Called when obtaining references to the resources of the resource
- * hierarchy for a request that has arrived for handling at the PTLRPC
- * service. Policies should return -ve for requests they do not wish
- * to handle. This operation is mandatory.
- *
- * \param[in,out] policy The policy we're getting resources for.
- * \param[in,out] nrq The request we are getting resources for.
- * \param[in] parent The parent resource of the resource being
- * requested; set to NULL if none.
- * \param[out] resp The resource is to be returned here; the
- * fallback policy in an NRS head should
- * \e always return a non-NULL pointer value.
- * \param[in] moving_req When set, signifies that this is an attempt
- * to obtain resources for a request being moved
- * to the high-priority NRS head by
- * ldlm_lock_reorder_req().
- * This implies two things:
- * 1. We are under obd_export::exp_rpc_lock and
- * so should not sleep.
- * 2. We should not perform non-idempotent or can
- * skip performing idempotent operations that
- * were carried out when resources were first
- * taken for the request when it was initialized
- * in ptlrpc_nrs_req_initialize().
- *
- * \retval 0, +ve The level of the returned resource in the resource
- * hierarchy; currently only 0 (for a non-leaf resource)
- * and 1 (for a leaf resource) are supported by the
- * framework.
- * \retval -ve error
- *
- * \see ptlrpc_nrs_req_initialize()
- * \see ptlrpc_nrs_hpreq_add_nolock()
- * \see ptlrpc_nrs_req_hp_move()
- */
- int (*op_res_get)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp,
- bool moving_req);
- /**
- * Called when releasing references taken for resources in the resource
- * hierarchy for the request; this operation is optional.
- *
- * \param[in,out] policy The policy the resource belongs to
- * \param[in] res The resource to be freed
- *
- * \see ptlrpc_nrs_req_finalize()
- * \see ptlrpc_nrs_hpreq_add_nolock()
- * \see ptlrpc_nrs_req_hp_move()
- */
- void (*op_res_put)(struct ptlrpc_nrs_policy *policy,
- const struct ptlrpc_nrs_resource *res);
-
- /**
- * Obtains a request for handling from the policy, and optionally
- * removes the request from the policy; this operation is mandatory.
- *
- * \param[in,out] policy The policy to poll
- * \param[in] peek When set, signifies that we just want to
- * examine the request, and not handle it, so the
- * request is not removed from the policy.
- * \param[in] force When set, it will force a policy to return a
- * request if it has one queued.
- *
- * \retval NULL No request available for handling
- * \retval valid-pointer The request polled for handling
- *
- * \see ptlrpc_nrs_req_get_nolock()
- */
- struct ptlrpc_nrs_request *
- (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek,
- bool force);
- /**
- * Called when attempting to add a request to a policy for later
- * handling; this operation is mandatory.
- *
- * \param[in,out] policy The policy on which to enqueue \a nrq
- * \param[in,out] nrq The request to enqueue
- *
- * \retval 0 success
- * \retval != 0 error
- *
- * \see ptlrpc_nrs_req_add_nolock()
- */
- int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Removes a request from the policy's set of pending requests. Normally
- * called after a request has been polled successfully from the policy
- * for handling; this operation is mandatory.
- *
- * \param[in,out] policy The policy the request \a nrq belongs to
- * \param[in,out] nrq The request to dequeue
- *
- * \see ptlrpc_nrs_req_del_nolock()
- */
- void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Called after the request being carried out. Could be used for
- * job/resource control; this operation is optional.
- *
- * \param[in,out] policy The policy which is stopping to handle request
- * \a nrq
- * \param[in,out] nrq The request
- *
- * \pre assert_spin_locked(&svcpt->scp_req_lock)
- *
- * \see ptlrpc_nrs_req_stop_nolock()
- */
- void (*op_req_stop)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Registers the policy's lprocfs interface with a PTLRPC service.
- *
- * \param[in] svc The service
- *
- * \retval 0 success
- * \retval != 0 error
- */
- int (*op_lprocfs_init)(struct ptlrpc_service *svc);
- /**
- * Unegisters the policy's lprocfs interface with a PTLRPC service.
- *
- * In cases of failed policy registration in
- * \e ptlrpc_nrs_policy_register(), this function may be called for a
- * service which has not registered the policy successfully, so
- * implementations of this method should make sure their operations are
- * safe in such cases.
- *
- * \param[in] svc The service
- */
- void (*op_lprocfs_fini)(struct ptlrpc_service *svc);
-};
-
-/**
- * Policy flags
- */
-enum nrs_policy_flags {
- /**
- * Fallback policy, use this flag only on a single supported policy per
- * service. The flag cannot be used on policies that use
- * \e PTLRPC_NRS_FL_REG_EXTERN
- */
- PTLRPC_NRS_FL_FALLBACK = BIT(0),
- /**
- * Start policy immediately after registering.
- */
- PTLRPC_NRS_FL_REG_START = BIT(1),
- /**
- * This is a policy registering from a module different to the one NRS
- * core ships in (currently ptlrpc).
- */
- PTLRPC_NRS_FL_REG_EXTERN = BIT(2),
-};
-
-/**
- * NRS queue type.
- *
- * Denotes whether an NRS instance is for handling normal or high-priority
- * RPCs, or whether an operation pertains to one or both of the NRS instances
- * in a service.
- */
-enum ptlrpc_nrs_queue_type {
- PTLRPC_NRS_QUEUE_REG = BIT(0),
- PTLRPC_NRS_QUEUE_HP = BIT(1),
- PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
-};
-
-/**
- * NRS head
- *
- * A PTLRPC service has at least one NRS head instance for handling normal
- * priority RPCs, and may optionally have a second NRS head instance for
- * handling high-priority RPCs. Each NRS head maintains a list of available
- * policies, of which one and only one policy is acting as the fallback policy,
- * and optionally a different policy may be acting as the primary policy. For
- * all RPCs handled by this NRS head instance, NRS core will first attempt to
- * enqueue the RPC using the primary policy (if any). The fallback policy is
- * used in the following cases:
- * - when there was no primary policy in the
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
- * was initialized.
- * - when the primary policy that was at the
- * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
- * RPC was initialized, denoted it did not wish, or for some other reason was
- * not able to handle the request, by returning a non-valid NRS resource
- * reference.
- * - when the primary policy that was at the
- * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
- * RPC was initialized, fails later during the request enqueueing stage.
- *
- * \see nrs_resource_get_safe()
- * \see nrs_request_enqueue()
- */
-struct ptlrpc_nrs {
- spinlock_t nrs_lock;
- /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
- /**
- * List of registered policies
- */
- struct list_head nrs_policy_list;
- /**
- * List of policies with queued requests. Policies that have any
- * outstanding requests are queued here, and this list is queried
- * in a round-robin manner from NRS core when obtaining a request
- * for handling. This ensures that requests from policies that at some
- * point transition away from the
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
- */
- struct list_head nrs_policy_queued;
- /**
- * Service partition for this NRS head
- */
- struct ptlrpc_service_part *nrs_svcpt;
- /**
- * Primary policy, which is the preferred policy for handling RPCs
- */
- struct ptlrpc_nrs_policy *nrs_policy_primary;
- /**
- * Fallback policy, which is the backup policy for handling RPCs
- */
- struct ptlrpc_nrs_policy *nrs_policy_fallback;
- /**
- * This NRS head handles either HP or regular requests
- */
- enum ptlrpc_nrs_queue_type nrs_queue_type;
- /**
- * # queued requests from all policies in this NRS head
- */
- unsigned long nrs_req_queued;
- /**
- * # scheduled requests from all policies in this NRS head
- */
- unsigned long nrs_req_started;
- /**
- * # policies on this NRS
- */
- unsigned int nrs_num_pols;
- /**
- * This NRS head is in progress of starting a policy
- */
- unsigned int nrs_policy_starting:1;
- /**
- * In progress of shutting down the whole NRS head; used during
- * unregistration
- */
- unsigned int nrs_stopping:1;
- /**
- * NRS policy is throttling request
- */
- unsigned int nrs_throttling:1;
-};
-
-#define NRS_POL_NAME_MAX 16
-#define NRS_POL_ARG_MAX 16
-
-struct ptlrpc_nrs_pol_desc;
-
-/**
- * Service compatibility predicate; this determines whether a policy is adequate
- * for handling RPCs of a particular PTLRPC service.
- *
- * XXX:This should give the same result during policy registration and
- * unregistration, and for all partitions of a service; so the result should not
- * depend on temporal service or other properties, that may influence the
- * result.
- */
-typedef bool (*nrs_pol_desc_compat_t)(const struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc);
-
-struct ptlrpc_nrs_pol_conf {
- /**
- * Human-readable policy name
- */
- char nc_name[NRS_POL_NAME_MAX];
- /**
- * NRS operations for this policy
- */
- const struct ptlrpc_nrs_pol_ops *nc_ops;
- /**
- * Service compatibility predicate
- */
- nrs_pol_desc_compat_t nc_compat;
- /**
- * Set for policies that support a single ptlrpc service, i.e. ones that
- * have \a pd_compat set to nrs_policy_compat_one(). The variable value
- * depicts the name of the single service that such policies are
- * compatible with.
- */
- const char *nc_compat_svc_name;
- /**
- * Owner module for this policy descriptor; policies registering from a
- * different module to the one the NRS framework is held within
- * (currently ptlrpc), should set this field to THIS_MODULE.
- */
- struct module *nc_owner;
- /**
- * Policy registration flags; a bitmask of \e nrs_policy_flags
- */
- unsigned int nc_flags;
-};
-
-/**
- * NRS policy registering descriptor
- *
- * Is used to hold a description of a policy that can be passed to NRS core in
- * order to register the policy with NRS heads in different PTLRPC services.
- */
-struct ptlrpc_nrs_pol_desc {
- /**
- * Human-readable policy name
- */
- char pd_name[NRS_POL_NAME_MAX];
- /**
- * Link into nrs_core::nrs_policies
- */
- struct list_head pd_list;
- /**
- * NRS operations for this policy
- */
- const struct ptlrpc_nrs_pol_ops *pd_ops;
- /**
- * Service compatibility predicate
- */
- nrs_pol_desc_compat_t pd_compat;
- /**
- * Set for policies that are compatible with only one PTLRPC service.
- *
- * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
- */
- const char *pd_compat_svc_name;
- /**
- * Owner module for this policy descriptor.
- *
- * We need to hold a reference to the module whenever we might make use
- * of any of the module's contents, i.e.
- * - If one or more instances of the policy are at a state where they
- * might be handling a request, i.e.
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
- * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
- * is taken on the module when
- * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
- * becomes 0, so that we hold only one reference to the module maximum
- * at any time.
- *
- * We do not need to hold a reference to the module, even though we
- * might use code and data from the module, in the following cases:
- * - During external policy registration, because this should happen in
- * the module's init() function, in which case the module is safe from
- * removal because a reference is being held on the module by the
- * kernel, and iirc kmod (and I guess module-init-tools also) will
- * serialize any racing processes properly anyway.
- * - During external policy unregistration, because this should happen
- * in a module's exit() function, and any attempts to start a policy
- * instance would need to take a reference on the module, and this is
- * not possible once we have reached the point where the exit()
- * handler is called.
- * - During service registration and unregistration, as service setup
- * and cleanup, and policy registration, unregistration and policy
- * instance starting, are serialized by \e nrs_core::nrs_mutex, so
- * as long as users adhere to the convention of registering policies
- * in init() and unregistering them in module exit() functions, there
- * should not be a race between these operations.
- * - During any policy-specific lprocfs operations, because a reference
- * is held by the kernel on a proc entry that has been entered by a
- * syscall, so as long as proc entries are removed during
- * unregistration time, then unregistration and lprocfs operations
- * will be properly serialized.
- */
- struct module *pd_owner;
- /**
- * Bitmask of \e nrs_policy_flags
- */
- unsigned int pd_flags;
- /**
- * # of references on this descriptor
- */
- atomic_t pd_refs;
-};
-
-/**
- * NRS policy state
- *
- * Policies transition from one state to the other during their lifetime
- */
-enum ptlrpc_nrs_pol_state {
- /**
- * Not a valid policy state.
- */
- NRS_POL_STATE_INVALID,
- /**
- * Policies are at this state either at the start of their life, or
- * transition here when the user selects a different policy to act
- * as the primary one.
- */
- NRS_POL_STATE_STOPPED,
- /**
- * Policy is progress of stopping
- */
- NRS_POL_STATE_STOPPING,
- /**
- * Policy is in progress of starting
- */
- NRS_POL_STATE_STARTING,
- /**
- * A policy is in this state in two cases:
- * - it is the fallback policy, which is always in this state.
- * - it has been activated by the user; i.e. it is the primary policy,
- */
- NRS_POL_STATE_STARTED,
-};
-
-/**
- * NRS policy information
- *
- * Used for obtaining information for the status of a policy via lprocfs
- */
-struct ptlrpc_nrs_pol_info {
- /**
- * Policy name
- */
- char pi_name[NRS_POL_NAME_MAX];
- /**
- * Policy argument
- */
- char pi_arg[NRS_POL_ARG_MAX];
- /**
- * Current policy state
- */
- enum ptlrpc_nrs_pol_state pi_state;
- /**
- * # RPCs enqueued for later dispatching by the policy
- */
- long pi_req_queued;
- /**
- * # RPCs started for dispatch by the policy
- */
- long pi_req_started;
- /**
- * Is this a fallback policy?
- */
- unsigned pi_fallback:1;
-};
-
-/**
- * NRS policy
- *
- * There is one instance of this for each policy in each NRS head of each
- * PTLRPC service partition.
- */
-struct ptlrpc_nrs_policy {
- /**
- * Linkage into the NRS head's list of policies,
- * ptlrpc_nrs:nrs_policy_list
- */
- struct list_head pol_list;
- /**
- * Linkage into the NRS head's list of policies with enqueued
- * requests ptlrpc_nrs:nrs_policy_queued
- */
- struct list_head pol_list_queued;
- /**
- * Current state of this policy
- */
- enum ptlrpc_nrs_pol_state pol_state;
- /**
- * Bitmask of nrs_policy_flags
- */
- unsigned int pol_flags;
- /**
- * # RPCs enqueued for later dispatching by the policy
- */
- long pol_req_queued;
- /**
- * # RPCs started for dispatch by the policy
- */
- long pol_req_started;
- /**
- * Usage Reference count taken on the policy instance
- */
- long pol_ref;
- /**
- * Human-readable policy argument
- */
- char pol_arg[NRS_POL_ARG_MAX];
- /**
- * The NRS head this policy has been created at
- */
- struct ptlrpc_nrs *pol_nrs;
- /**
- * Private policy data; varies by policy type
- */
- void *pol_private;
- /**
- * Policy descriptor for this policy instance.
- */
- struct ptlrpc_nrs_pol_desc *pol_desc;
-};
-
-/**
- * NRS resource
- *
- * Resources are embedded into two types of NRS entities:
- * - Inside NRS policies, in the policy's private data in
- * ptlrpc_nrs_policy::pol_private
- * - In objects that act as prime-level scheduling entities in different NRS
- * policies; e.g. on a policy that performs round robin or similar order
- * scheduling across client NIDs, there would be one NRS resource per unique
- * client NID. On a policy which performs round robin scheduling across
- * backend filesystem objects, there would be one resource associated with
- * each of the backend filesystem objects partaking in the scheduling
- * performed by the policy.
- *
- * NRS resources share a parent-child relationship, in which resources embedded
- * in policy instances are the parent entities, with all scheduling entities
- * a policy schedules across being the children, thus forming a simple resource
- * hierarchy. This hierarchy may be extended with one or more levels in the
- * future if the ability to have more than one primary policy is added.
- *
- * Upon request initialization, references to the then active NRS policies are
- * taken and used to later handle the dispatching of the request with one of
- * these policies.
- *
- * \see nrs_resource_get_safe()
- * \see ptlrpc_nrs_req_add()
- */
-struct ptlrpc_nrs_resource {
- /**
- * This NRS resource's parent; is NULL for resources embedded in NRS
- * policy instances; i.e. those are top-level ones.
- */
- struct ptlrpc_nrs_resource *res_parent;
- /**
- * The policy associated with this resource.
- */
- struct ptlrpc_nrs_policy *res_policy;
-};
-
-enum {
- NRS_RES_FALLBACK,
- NRS_RES_PRIMARY,
- NRS_RES_MAX
-};
-
-#include <lustre_nrs_fifo.h>
-
-/**
- * NRS request
- *
- * Instances of this object exist embedded within ptlrpc_request; the main
- * purpose of this object is to hold references to the request's resources
- * for the lifetime of the request, and to hold properties that policies use
- * use for determining the request's scheduling priority.
- **/
-struct ptlrpc_nrs_request {
- /**
- * The request's resource hierarchy.
- */
- struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
- /**
- * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
- * policy that was used to enqueue the request.
- *
- * \see nrs_request_enqueue()
- */
- unsigned int nr_res_idx;
- unsigned int nr_initialized:1;
- unsigned int nr_enqueued:1;
- unsigned int nr_started:1;
- unsigned int nr_finalized:1;
-
- /**
- * Policy-specific fields, used for determining a request's scheduling
- * priority, and other supporting functionality.
- */
- union {
- /**
- * Fields for the FIFO policy
- */
- struct nrs_fifo_req fifo;
- } nr_u;
- /**
- * Externally-registering policies may want to use this to allocate
- * their own request properties.
- */
- void *ext;
-};
-
-/** @} nrs */
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
deleted file mode 100644
index b70d97d4acbb..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2014, Intel Corporation.
- *
- * Copyright 2012 Xyratex Technology Limited
- */
-/*
- *
- * Network Request Scheduler (NRS) First-in First-out (FIFO) policy
- *
- */
-
-#ifndef _LUSTRE_NRS_FIFO_H
-#define _LUSTRE_NRS_FIFO_H
-
-/* \name fifo
- *
- * FIFO policy
- *
- * This policy is a logical wrapper around previous, non-NRS functionality.
- * It dispatches RPCs in the same order as they arrive from the network. This
- * policy is currently used as the fallback policy, and the only enabled policy
- * on all NRS heads of all PTLRPC service partitions.
- * @{
- */
-
-/**
- * Private data structure for the FIFO policy
- */
-struct nrs_fifo_head {
- /**
- * Resource object for policy instance.
- */
- struct ptlrpc_nrs_resource fh_res;
- /**
- * List of queued requests.
- */
- struct list_head fh_list;
- /**
- * For debugging purposes.
- */
- __u64 fh_sequence;
-};
-
-struct nrs_fifo_req {
- struct list_head fr_list;
- __u64 fr_sequence;
-};
-
-/** @} fifo */
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_obdo.h b/drivers/staging/lustre/lustre/include/lustre_obdo.h
deleted file mode 100644
index d67dcbb84f18..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_obdo.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2014, Intel Corporation.
- *
- * Copyright 2015 Cray Inc, all rights reserved.
- * Author: Ben Evans.
- *
- * Define obdo associated functions
- * obdo: OBject Device o...
- */
-
-#ifndef _LUSTRE_OBDO_H_
-#define _LUSTRE_OBDO_H_
-
-#include <uapi/linux/lustre/lustre_idl.h>
-
-/**
- * Create an obdo to send over the wire
- */
-void lustre_set_wire_obdo(const struct obd_connect_data *ocd,
- struct obdo *wobdo,
- const struct obdo *lobdo);
-
-/**
- * Create a local obdo from a wire based odbo
- */
-void lustre_get_wire_obdo(const struct obd_connect_data *ocd,
- struct obdo *lobdo,
- const struct obdo *wobdo);
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h
deleted file mode 100644
index ce28ed5c1ef8..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef LUSTRE_PATCHLESS_COMPAT_H
-#define LUSTRE_PATCHLESS_COMPAT_H
-
-#include <linux/fs.h>
-
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/hash.h>
-
-#define ll_delete_from_page_cache(page) delete_from_page_cache(page)
-
-static inline void
-truncate_complete_page(struct address_space *mapping, struct page *page)
-{
- if (page->mapping != mapping)
- return;
-
- if (PagePrivate(page))
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
-
- cancel_dirty_page(page);
- ClearPageMappedToDisk(page);
- ll_delete_from_page_cache(page);
-}
-
-#ifndef ATTR_CTIME_SET
-/*
- * set ATTR_CTIME_SET to a high value to avoid any risk of collision with other
- * ATTR_* attributes (see bug 13828)
- */
-#define ATTR_CTIME_SET (1 << 28)
-#endif
-
-#endif /* LUSTRE_PATCHLESS_COMPAT_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
deleted file mode 100644
index 213d0a01adcf..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ /dev/null
@@ -1,307 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_req_layout.h
- *
- * Lustre Metadata Target (mdt) request handler
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- */
-
-#ifndef _LUSTRE_REQ_LAYOUT_H__
-#define _LUSTRE_REQ_LAYOUT_H__
-
-#include <linux/types.h>
-
-/** \defgroup req_layout req_layout
- *
- * @{
- */
-
-struct req_msg_field;
-struct req_format;
-struct req_capsule;
-
-struct ptlrpc_request;
-
-enum req_location {
- RCL_CLIENT,
- RCL_SERVER,
- RCL_NR
-};
-
-/* Maximal number of fields (buffers) in a request message. */
-#define REQ_MAX_FIELD_NR 9
-
-struct req_capsule {
- struct ptlrpc_request *rc_req;
- const struct req_format *rc_fmt;
- enum req_location rc_loc;
- __u32 rc_area[RCL_NR][REQ_MAX_FIELD_NR];
-};
-
-void req_capsule_init(struct req_capsule *pill, struct ptlrpc_request *req,
- enum req_location location);
-void req_capsule_fini(struct req_capsule *pill);
-
-void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt);
-size_t req_capsule_filled_sizes(struct req_capsule *pill,
- enum req_location loc);
-int req_capsule_server_pack(struct req_capsule *pill);
-
-void *req_capsule_client_get(struct req_capsule *pill,
- const struct req_msg_field *field);
-void *req_capsule_client_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- void *swabber);
-void *req_capsule_client_sized_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- u32 len);
-void *req_capsule_server_get(struct req_capsule *pill,
- const struct req_msg_field *field);
-void *req_capsule_server_sized_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- u32 len);
-void *req_capsule_server_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- void *swabber);
-void *req_capsule_server_sized_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- u32 len, void *swabber);
-
-void req_capsule_set_size(struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc, u32 size);
-u32 req_capsule_get_size(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc);
-u32 req_capsule_msg_size(struct req_capsule *pill, enum req_location loc);
-u32 req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
- enum req_location loc);
-void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt);
-
-int req_capsule_has_field(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc);
-void req_capsule_shrink(struct req_capsule *pill,
- const struct req_msg_field *field,
- u32 newlen, enum req_location loc);
-int req_layout_init(void);
-void req_layout_fini(void);
-
-extern struct req_format RQF_OBD_PING;
-extern struct req_format RQF_OBD_SET_INFO;
-extern struct req_format RQF_SEC_CTX;
-/* MGS req_format */
-extern struct req_format RQF_MGS_TARGET_REG;
-extern struct req_format RQF_MGS_SET_INFO;
-extern struct req_format RQF_MGS_CONFIG_READ;
-/* fid/fld req_format */
-extern struct req_format RQF_SEQ_QUERY;
-extern struct req_format RQF_FLD_QUERY;
-extern struct req_format RQF_FLD_READ;
-/* MDS req_format */
-extern struct req_format RQF_MDS_CONNECT;
-extern struct req_format RQF_MDS_DISCONNECT;
-extern struct req_format RQF_MDS_STATFS;
-extern struct req_format RQF_MDS_GETSTATUS;
-extern struct req_format RQF_MDS_SYNC;
-extern struct req_format RQF_MDS_GETXATTR;
-extern struct req_format RQF_MDS_GETATTR;
-
-/*
- * This is format of direct (non-intent) MDS_GETATTR_NAME request.
- */
-extern struct req_format RQF_MDS_GETATTR_NAME;
-extern struct req_format RQF_MDS_CLOSE;
-extern struct req_format RQF_MDS_INTENT_CLOSE;
-extern struct req_format RQF_MDS_CONNECT;
-extern struct req_format RQF_MDS_DISCONNECT;
-extern struct req_format RQF_MDS_GET_INFO;
-extern struct req_format RQF_MDS_READPAGE;
-extern struct req_format RQF_MDS_WRITEPAGE;
-extern struct req_format RQF_MDS_REINT;
-extern struct req_format RQF_MDS_REINT_CREATE;
-extern struct req_format RQF_MDS_REINT_CREATE_ACL;
-extern struct req_format RQF_MDS_REINT_CREATE_SLAVE;
-extern struct req_format RQF_MDS_REINT_CREATE_SYM;
-extern struct req_format RQF_MDS_REINT_OPEN;
-extern struct req_format RQF_MDS_REINT_UNLINK;
-extern struct req_format RQF_MDS_REINT_LINK;
-extern struct req_format RQF_MDS_REINT_RENAME;
-extern struct req_format RQF_MDS_REINT_SETATTR;
-extern struct req_format RQF_MDS_REINT_SETXATTR;
-extern struct req_format RQF_MDS_QUOTACTL;
-extern struct req_format RQF_MDS_SWAP_LAYOUTS;
-extern struct req_format RQF_MDS_REINT_MIGRATE;
-/* MDS hsm formats */
-extern struct req_format RQF_MDS_HSM_STATE_GET;
-extern struct req_format RQF_MDS_HSM_STATE_SET;
-extern struct req_format RQF_MDS_HSM_ACTION;
-extern struct req_format RQF_MDS_HSM_PROGRESS;
-extern struct req_format RQF_MDS_HSM_CT_REGISTER;
-extern struct req_format RQF_MDS_HSM_CT_UNREGISTER;
-extern struct req_format RQF_MDS_HSM_REQUEST;
-/* OST req_format */
-extern struct req_format RQF_OST_CONNECT;
-extern struct req_format RQF_OST_DISCONNECT;
-extern struct req_format RQF_OST_QUOTACTL;
-extern struct req_format RQF_OST_GETATTR;
-extern struct req_format RQF_OST_SETATTR;
-extern struct req_format RQF_OST_CREATE;
-extern struct req_format RQF_OST_PUNCH;
-extern struct req_format RQF_OST_SYNC;
-extern struct req_format RQF_OST_DESTROY;
-extern struct req_format RQF_OST_BRW_READ;
-extern struct req_format RQF_OST_BRW_WRITE;
-extern struct req_format RQF_OST_STATFS;
-extern struct req_format RQF_OST_SET_GRANT_INFO;
-extern struct req_format RQF_OST_GET_INFO;
-extern struct req_format RQF_OST_GET_INFO_LAST_ID;
-extern struct req_format RQF_OST_GET_INFO_LAST_FID;
-extern struct req_format RQF_OST_SET_INFO_LAST_FID;
-extern struct req_format RQF_OST_GET_INFO_FIEMAP;
-
-/* LDLM req_format */
-extern struct req_format RQF_LDLM_ENQUEUE;
-extern struct req_format RQF_LDLM_ENQUEUE_LVB;
-extern struct req_format RQF_LDLM_CONVERT;
-extern struct req_format RQF_LDLM_INTENT;
-extern struct req_format RQF_LDLM_INTENT_BASIC;
-extern struct req_format RQF_LDLM_INTENT_LAYOUT;
-extern struct req_format RQF_LDLM_INTENT_GETATTR;
-extern struct req_format RQF_LDLM_INTENT_OPEN;
-extern struct req_format RQF_LDLM_INTENT_CREATE;
-extern struct req_format RQF_LDLM_INTENT_UNLINK;
-extern struct req_format RQF_LDLM_INTENT_GETXATTR;
-extern struct req_format RQF_LDLM_CANCEL;
-extern struct req_format RQF_LDLM_CALLBACK;
-extern struct req_format RQF_LDLM_CP_CALLBACK;
-extern struct req_format RQF_LDLM_BL_CALLBACK;
-extern struct req_format RQF_LDLM_GL_CALLBACK;
-extern struct req_format RQF_LDLM_GL_DESC_CALLBACK;
-/* LOG req_format */
-extern struct req_format RQF_LOG_CANCEL;
-extern struct req_format RQF_LLOG_ORIGIN_HANDLE_CREATE;
-extern struct req_format RQF_LLOG_ORIGIN_HANDLE_DESTROY;
-extern struct req_format RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK;
-extern struct req_format RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK;
-extern struct req_format RQF_LLOG_ORIGIN_HANDLE_READ_HEADER;
-extern struct req_format RQF_LLOG_ORIGIN_CONNECT;
-
-extern struct req_format RQF_CONNECT;
-
-extern struct req_msg_field RMF_GENERIC_DATA;
-extern struct req_msg_field RMF_PTLRPC_BODY;
-extern struct req_msg_field RMF_MDT_BODY;
-extern struct req_msg_field RMF_MDT_EPOCH;
-extern struct req_msg_field RMF_OBD_STATFS;
-extern struct req_msg_field RMF_NAME;
-extern struct req_msg_field RMF_SYMTGT;
-extern struct req_msg_field RMF_TGTUUID;
-extern struct req_msg_field RMF_CLUUID;
-extern struct req_msg_field RMF_SETINFO_VAL;
-extern struct req_msg_field RMF_SETINFO_KEY;
-extern struct req_msg_field RMF_GETINFO_VAL;
-extern struct req_msg_field RMF_GETINFO_VALLEN;
-extern struct req_msg_field RMF_GETINFO_KEY;
-extern struct req_msg_field RMF_CLOSE_DATA;
-
-/*
- * connection handle received in MDS_CONNECT request.
- */
-extern struct req_msg_field RMF_CONN;
-extern struct req_msg_field RMF_CONNECT_DATA;
-extern struct req_msg_field RMF_DLM_REQ;
-extern struct req_msg_field RMF_DLM_REP;
-extern struct req_msg_field RMF_DLM_LVB;
-extern struct req_msg_field RMF_DLM_GL_DESC;
-extern struct req_msg_field RMF_LDLM_INTENT;
-extern struct req_msg_field RMF_LAYOUT_INTENT;
-extern struct req_msg_field RMF_MDT_MD;
-extern struct req_msg_field RMF_REC_REINT;
-extern struct req_msg_field RMF_EADATA;
-extern struct req_msg_field RMF_EAVALS;
-extern struct req_msg_field RMF_EAVALS_LENS;
-extern struct req_msg_field RMF_ACL;
-extern struct req_msg_field RMF_LOGCOOKIES;
-extern struct req_msg_field RMF_CAPA1;
-extern struct req_msg_field RMF_CAPA2;
-extern struct req_msg_field RMF_OBD_QUOTACHECK;
-extern struct req_msg_field RMF_OBD_QUOTACTL;
-extern struct req_msg_field RMF_STRING;
-extern struct req_msg_field RMF_SWAP_LAYOUTS;
-extern struct req_msg_field RMF_MDS_HSM_PROGRESS;
-extern struct req_msg_field RMF_MDS_HSM_REQUEST;
-extern struct req_msg_field RMF_MDS_HSM_USER_ITEM;
-extern struct req_msg_field RMF_MDS_HSM_ARCHIVE;
-extern struct req_msg_field RMF_HSM_USER_STATE;
-extern struct req_msg_field RMF_HSM_STATE_SET;
-extern struct req_msg_field RMF_MDS_HSM_CURRENT_ACTION;
-extern struct req_msg_field RMF_MDS_HSM_REQUEST;
-
-/* seq-mgr fields */
-extern struct req_msg_field RMF_SEQ_OPC;
-extern struct req_msg_field RMF_SEQ_RANGE;
-extern struct req_msg_field RMF_FID_SPACE;
-
-/* FLD fields */
-extern struct req_msg_field RMF_FLD_OPC;
-extern struct req_msg_field RMF_FLD_MDFLD;
-
-extern struct req_msg_field RMF_LLOGD_BODY;
-extern struct req_msg_field RMF_LLOG_LOG_HDR;
-extern struct req_msg_field RMF_LLOGD_CONN_BODY;
-
-extern struct req_msg_field RMF_MGS_TARGET_INFO;
-extern struct req_msg_field RMF_MGS_SEND_PARAM;
-
-extern struct req_msg_field RMF_OST_BODY;
-extern struct req_msg_field RMF_OBD_IOOBJ;
-extern struct req_msg_field RMF_OBD_ID;
-extern struct req_msg_field RMF_FID;
-extern struct req_msg_field RMF_NIOBUF_REMOTE;
-extern struct req_msg_field RMF_RCS;
-extern struct req_msg_field RMF_FIEMAP_KEY;
-extern struct req_msg_field RMF_FIEMAP_VAL;
-extern struct req_msg_field RMF_OST_ID;
-
-/* MGS config read message format */
-extern struct req_msg_field RMF_MGS_CONFIG_BODY;
-extern struct req_msg_field RMF_MGS_CONFIG_RES;
-
-/* generic uint32 */
-extern struct req_msg_field RMF_U32;
-
-/** @} req_layout */
-
-#endif /* _LUSTRE_REQ_LAYOUT_H__ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
deleted file mode 100644
index c5cb07acd0da..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ /dev/null
@@ -1,1070 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTRE_SEC_H_
-#define _LUSTRE_SEC_H_
-
-/** \defgroup sptlrpc sptlrpc
- *
- * @{
- */
-
-/*
- * to avoid include
- */
-struct obd_import;
-struct obd_export;
-struct ptlrpc_request;
-struct ptlrpc_reply_state;
-struct ptlrpc_bulk_desc;
-struct brw_page;
-/* Linux specific */
-struct key;
-struct seq_file;
-struct lustre_cfg;
-
-/*
- * forward declaration
- */
-struct ptlrpc_sec_policy;
-struct ptlrpc_sec_cops;
-struct ptlrpc_sec_sops;
-struct ptlrpc_sec;
-struct ptlrpc_svc_ctx;
-struct ptlrpc_cli_ctx;
-struct ptlrpc_ctx_ops;
-
-/**
- * \addtogroup flavor flavor
- *
- * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits
- * are unused, must be set to 0 for future expansion.
- * <pre>
- * ------------------------------------------------------------------------
- * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) |
- * ------------------------------------------------------------------------
- * </pre>
- *
- * @{
- */
-
-/*
- * flavor constants
- */
-enum sptlrpc_policy {
- SPTLRPC_POLICY_NULL = 0,
- SPTLRPC_POLICY_PLAIN = 1,
- SPTLRPC_POLICY_GSS = 2,
- SPTLRPC_POLICY_MAX,
-};
-
-enum sptlrpc_mech_null {
- SPTLRPC_MECH_NULL = 0,
- SPTLRPC_MECH_NULL_MAX,
-};
-
-enum sptlrpc_mech_plain {
- SPTLRPC_MECH_PLAIN = 0,
- SPTLRPC_MECH_PLAIN_MAX,
-};
-
-enum sptlrpc_mech_gss {
- SPTLRPC_MECH_GSS_NULL = 0,
- SPTLRPC_MECH_GSS_KRB5 = 1,
- SPTLRPC_MECH_GSS_MAX,
-};
-
-enum sptlrpc_service_type {
- SPTLRPC_SVC_NULL = 0, /**< no security */
- SPTLRPC_SVC_AUTH = 1, /**< authentication only */
- SPTLRPC_SVC_INTG = 2, /**< integrity */
- SPTLRPC_SVC_PRIV = 3, /**< privacy */
- SPTLRPC_SVC_MAX,
-};
-
-enum sptlrpc_bulk_type {
- SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */
- SPTLRPC_BULK_HASH = 1, /**< hash integrity */
- SPTLRPC_BULK_MAX,
-};
-
-enum sptlrpc_bulk_service {
- SPTLRPC_BULK_SVC_NULL = 0, /**< no security */
- SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */
- SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */
- SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */
- SPTLRPC_BULK_SVC_MAX,
-};
-
-/*
- * compose/extract macros
- */
-#define FLVR_POLICY_OFFSET (0)
-#define FLVR_MECH_OFFSET (4)
-#define FLVR_SVC_OFFSET (8)
-#define FLVR_BULK_TYPE_OFFSET (12)
-#define FLVR_BULK_SVC_OFFSET (16)
-
-#define MAKE_FLVR(policy, mech, svc, btype, bsvc) \
- (((__u32)(policy) << FLVR_POLICY_OFFSET) | \
- ((__u32)(mech) << FLVR_MECH_OFFSET) | \
- ((__u32)(svc) << FLVR_SVC_OFFSET) | \
- ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \
- ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET))
-
-/*
- * extraction
- */
-#define SPTLRPC_FLVR_POLICY(flavor) \
- ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF)
-#define SPTLRPC_FLVR_MECH(flavor) \
- ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF)
-#define SPTLRPC_FLVR_SVC(flavor) \
- ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF)
-#define SPTLRPC_FLVR_BULK_TYPE(flavor) \
- ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF)
-#define SPTLRPC_FLVR_BULK_SVC(flavor) \
- ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF)
-
-#define SPTLRPC_FLVR_BASE(flavor) \
- ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF)
-#define SPTLRPC_FLVR_BASE_SUB(flavor) \
- ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF)
-
-/*
- * gss subflavors
- */
-#define MAKE_BASE_SUBFLVR(mech, svc) \
- ((__u32)(mech) | \
- ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET)))
-
-#define SPTLRPC_SUBFLVR_KRB5N \
- MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
-#define SPTLRPC_SUBFLVR_KRB5A \
- MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
-#define SPTLRPC_SUBFLVR_KRB5I \
- MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
-#define SPTLRPC_SUBFLVR_KRB5P \
- MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
-
-/*
- * "end user" flavors
- */
-#define SPTLRPC_FLVR_NULL \
- MAKE_FLVR(SPTLRPC_POLICY_NULL, \
- SPTLRPC_MECH_NULL, \
- SPTLRPC_SVC_NULL, \
- SPTLRPC_BULK_DEFAULT, \
- SPTLRPC_BULK_SVC_NULL)
-#define SPTLRPC_FLVR_PLAIN \
- MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \
- SPTLRPC_MECH_PLAIN, \
- SPTLRPC_SVC_NULL, \
- SPTLRPC_BULK_HASH, \
- SPTLRPC_BULK_SVC_INTG)
-#define SPTLRPC_FLVR_KRB5N \
- MAKE_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_NULL, \
- SPTLRPC_BULK_DEFAULT, \
- SPTLRPC_BULK_SVC_NULL)
-#define SPTLRPC_FLVR_KRB5A \
- MAKE_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_AUTH, \
- SPTLRPC_BULK_DEFAULT, \
- SPTLRPC_BULK_SVC_NULL)
-#define SPTLRPC_FLVR_KRB5I \
- MAKE_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_INTG, \
- SPTLRPC_BULK_DEFAULT, \
- SPTLRPC_BULK_SVC_INTG)
-#define SPTLRPC_FLVR_KRB5P \
- MAKE_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_PRIV, \
- SPTLRPC_BULK_DEFAULT, \
- SPTLRPC_BULK_SVC_PRIV)
-
-#define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
-
-#define SPTLRPC_FLVR_INVALID ((__u32)0xFFFFFFFF)
-#define SPTLRPC_FLVR_ANY ((__u32)0xFFF00000)
-
-/**
- * extract the useful part from wire flavor
- */
-#define WIRE_FLVR(wflvr) (((__u32)(wflvr)) & 0x000FFFFF)
-
-/** @} flavor */
-
-static inline void flvr_set_svc(__u32 *flvr, __u32 svc)
-{
- LASSERT(svc < SPTLRPC_SVC_MAX);
- *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
- SPTLRPC_FLVR_MECH(*flvr),
- svc,
- SPTLRPC_FLVR_BULK_TYPE(*flvr),
- SPTLRPC_FLVR_BULK_SVC(*flvr));
-}
-
-static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc)
-{
- LASSERT(svc < SPTLRPC_BULK_SVC_MAX);
- *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
- SPTLRPC_FLVR_MECH(*flvr),
- SPTLRPC_FLVR_SVC(*flvr),
- SPTLRPC_FLVR_BULK_TYPE(*flvr),
- svc);
-}
-
-struct bulk_spec_hash {
- __u8 hash_alg;
-};
-
-/**
- * Full description of flavors being used on a ptlrpc connection, include
- * both regular RPC and bulk transfer parts.
- */
-struct sptlrpc_flavor {
- /**
- * wire flavor, should be renamed to sf_wire.
- */
- __u32 sf_rpc;
- /**
- * general flags of PTLRPC_SEC_FL_*
- */
- __u32 sf_flags;
- /**
- * rpc flavor specification
- */
- union {
- /* nothing for now */
- } u_rpc;
- /**
- * bulk flavor specification
- */
- union {
- struct bulk_spec_hash hash;
- } u_bulk;
-};
-
-/**
- * identify the RPC is generated from what part of Lustre. It's encoded into
- * RPC requests and to be checked by ptlrpc service.
- */
-enum lustre_sec_part {
- LUSTRE_SP_CLI = 0,
- LUSTRE_SP_MDT,
- LUSTRE_SP_OST,
- LUSTRE_SP_MGC,
- LUSTRE_SP_MGS,
- LUSTRE_SP_ANY = 0xFF
-};
-
-enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd);
-
-/**
- * A rule specifies a flavor to be used by a ptlrpc connection between
- * two Lustre parts.
- */
-struct sptlrpc_rule {
- __u32 sr_netid; /* LNET network ID */
- __u8 sr_from; /* sec_part */
- __u8 sr_to; /* sec_part */
- __u16 sr_padding;
- struct sptlrpc_flavor sr_flvr;
-};
-
-/**
- * A set of rules in memory.
- *
- * Rules are generated and stored on MGS, and propagated to MDT, OST,
- * and client when needed.
- */
-struct sptlrpc_rule_set {
- int srs_nslot;
- int srs_nrule;
- struct sptlrpc_rule *srs_rules;
-};
-
-int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr);
-bool sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr);
-
-static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set)
-{
- memset(set, 0, sizeof(*set));
-}
-
-int sptlrpc_process_config(struct lustre_cfg *lcfg);
-void sptlrpc_conf_log_start(const char *logname);
-void sptlrpc_conf_log_stop(const char *logname);
-void sptlrpc_conf_log_update_begin(const char *logname);
-void sptlrpc_conf_log_update_end(const char *logname);
-void sptlrpc_conf_client_adapt(struct obd_device *obd);
-
-/* The maximum length of security payload. 1024 is enough for Kerberos 5,
- * and should be enough for other future mechanisms but not sure.
- * Only used by pre-allocated request/reply pool.
- */
-#define SPTLRPC_MAX_PAYLOAD (1024)
-
-struct vfs_cred {
- u32 vc_uid;
- u32 vc_gid;
-};
-
-struct ptlrpc_ctx_ops {
- /**
- * To determine whether it's suitable to use the \a ctx for \a vcred.
- */
- int (*match)(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred);
-
- /**
- * To bring the \a ctx uptodate.
- */
- int (*refresh)(struct ptlrpc_cli_ctx *ctx);
-
- /**
- * Validate the \a ctx.
- */
- int (*validate)(struct ptlrpc_cli_ctx *ctx);
-
- /**
- * Force the \a ctx to die.
- */
- void (*force_die)(struct ptlrpc_cli_ctx *ctx, int grace);
- int (*display)(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
-
- /**
- * Sign the request message using \a ctx.
- *
- * \pre req->rq_reqmsg point to request message.
- * \pre req->rq_reqlen is the request message length.
- * \post req->rq_reqbuf point to request message with signature.
- * \post req->rq_reqdata_len is set to the final request message size.
- *
- * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign().
- */
- int (*sign)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
-
- /**
- * Verify the reply message using \a ctx.
- *
- * \pre req->rq_repdata point to reply message with signature.
- * \pre req->rq_repdata_len is the total reply message length.
- * \post req->rq_repmsg point to reply message without signature.
- * \post req->rq_replen is the reply message length.
- *
- * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify().
- */
- int (*verify)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
-
- /**
- * Encrypt the request message using \a ctx.
- *
- * \pre req->rq_reqmsg point to request message in clear text.
- * \pre req->rq_reqlen is the request message length.
- * \post req->rq_reqbuf point to request message.
- * \post req->rq_reqdata_len is set to the final request message size.
- *
- * \see gss_cli_ctx_seal().
- */
- int (*seal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
-
- /**
- * Decrypt the reply message using \a ctx.
- *
- * \pre req->rq_repdata point to encrypted reply message.
- * \pre req->rq_repdata_len is the total cipher text length.
- * \post req->rq_repmsg point to reply message in clear text.
- * \post req->rq_replen is the reply message length in clear text.
- *
- * \see gss_cli_ctx_unseal().
- */
- int (*unseal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
-
- /**
- * Wrap bulk request data. This is called before wrapping RPC
- * request message.
- *
- * \pre bulk buffer is descripted by desc->bd_iov and
- * desc->bd_iov_count. note for read it's just buffer, no data
- * need to be sent; for write it contains data in clear text.
- * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared
- * (usually inside of RPC request message).
- * - encryption: cipher text bulk buffer is descripted by
- * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov
- * count remains the same).
- * - otherwise: bulk buffer is still desc->bd_iov and
- * desc->bd_iov_count.
- *
- * \return 0: success.
- * \return -ev: error code.
- *
- * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk().
- */
- int (*wrap_bulk)(struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-
- /**
- * Unwrap bulk reply data. This is called after wrapping RPC
- * reply message.
- *
- * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and
- * desc->bd_iov_count, according to wrap_bulk().
- * \post final bulk data in clear text is placed in buffer described
- * by desc->bd_iov and desc->bd_iov_count.
- * \return +ve nob of actual bulk data in clear text.
- * \return -ve error code.
- *
- * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk().
- */
- int (*unwrap_bulk)(struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-};
-
-#define PTLRPC_CTX_NEW_BIT (0) /* newly created */
-#define PTLRPC_CTX_UPTODATE_BIT (1) /* uptodate */
-#define PTLRPC_CTX_DEAD_BIT (2) /* mark expired gracefully */
-#define PTLRPC_CTX_ERROR_BIT (3) /* fatal error (refresh, etc.) */
-#define PTLRPC_CTX_CACHED_BIT (8) /* in ctx cache (hash etc.) */
-#define PTLRPC_CTX_ETERNAL_BIT (9) /* always valid */
-
-#define PTLRPC_CTX_NEW (1 << PTLRPC_CTX_NEW_BIT)
-#define PTLRPC_CTX_UPTODATE (1 << PTLRPC_CTX_UPTODATE_BIT)
-#define PTLRPC_CTX_DEAD (1 << PTLRPC_CTX_DEAD_BIT)
-#define PTLRPC_CTX_ERROR (1 << PTLRPC_CTX_ERROR_BIT)
-#define PTLRPC_CTX_CACHED (1 << PTLRPC_CTX_CACHED_BIT)
-#define PTLRPC_CTX_ETERNAL (1 << PTLRPC_CTX_ETERNAL_BIT)
-
-#define PTLRPC_CTX_STATUS_MASK (PTLRPC_CTX_NEW_BIT | \
- PTLRPC_CTX_UPTODATE | \
- PTLRPC_CTX_DEAD | \
- PTLRPC_CTX_ERROR)
-
-struct ptlrpc_cli_ctx {
- struct hlist_node cc_cache; /* linked into ctx cache */
- atomic_t cc_refcount;
- struct ptlrpc_sec *cc_sec;
- struct ptlrpc_ctx_ops *cc_ops;
- unsigned long cc_expire; /* in seconds */
- unsigned int cc_early_expire:1;
- unsigned long cc_flags;
- struct vfs_cred cc_vcred;
- spinlock_t cc_lock;
- struct list_head cc_req_list; /* waiting reqs linked here */
- struct list_head cc_gc_chain; /* linked to gc chain */
-};
-
-/**
- * client side policy operation vector.
- */
-struct ptlrpc_sec_cops {
- /**
- * Given an \a imp, create and initialize a ptlrpc_sec structure.
- * \param ctx service context:
- * - regular import: \a ctx should be NULL;
- * - reverse import: \a ctx is obtained from incoming request.
- * \param flavor specify what flavor to use.
- *
- * When necessary, policy module is responsible for taking reference
- * on the import.
- *
- * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr().
- */
- struct ptlrpc_sec *(*create_sec)(struct obd_import *imp,
- struct ptlrpc_svc_ctx *ctx,
- struct sptlrpc_flavor *flavor);
-
- /**
- * Destructor of ptlrpc_sec. When called, refcount has been dropped
- * to 0 and all contexts has been destroyed.
- *
- * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr().
- */
- void (*destroy_sec)(struct ptlrpc_sec *sec);
-
- /**
- * Notify that this ptlrpc_sec is going to die. Optionally, policy
- * module is supposed to set sec->ps_dying and whatever necessary
- * actions.
- *
- * \see plain_kill_sec(), gss_sec_kill().
- */
- void (*kill_sec)(struct ptlrpc_sec *sec);
-
- /**
- * Given \a vcred, lookup and/or create its context. The policy module
- * is supposed to maintain its own context cache.
- * XXX currently \a create and \a remove_dead is always 1, perhaps
- * should be removed completely.
- *
- * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr().
- */
- struct ptlrpc_cli_ctx *(*lookup_ctx)(struct ptlrpc_sec *sec,
- struct vfs_cred *vcred,
- int create, int remove_dead);
-
- /**
- * Called then the reference of \a ctx dropped to 0. The policy module
- * is supposed to destroy this context or whatever else according to
- * its cache maintenance mechanism.
- *
- * \param sync if zero, we shouldn't wait for the context being
- * destroyed completely.
- *
- * \see plain_release_ctx(), gss_sec_release_ctx_kr().
- */
- void (*release_ctx)(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx,
- int sync);
-
- /**
- * Flush the context cache.
- *
- * \param uid context of which user, -1 means all contexts.
- * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected
- * contexts should be cleared immediately.
- * \param force if zero, only idle contexts will be flushed.
- *
- * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr().
- */
- int (*flush_ctx_cache)(struct ptlrpc_sec *sec, uid_t uid,
- int grace, int force);
-
- /**
- * Called periodically by garbage collector to remove dead contexts
- * from cache.
- *
- * \see gss_sec_gc_ctx_kr().
- */
- void (*gc_ctx)(struct ptlrpc_sec *sec);
-
- /**
- * Given an context \a ctx, install a corresponding reverse service
- * context on client side.
- * XXX currently it's only used by GSS module, maybe we should remove
- * this from general API.
- */
- int (*install_rctx)(struct obd_import *imp, struct ptlrpc_sec *sec,
- struct ptlrpc_cli_ctx *ctx);
-
- /**
- * To allocate request buffer for \a req.
- *
- * \pre req->rq_reqmsg == NULL.
- * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated,
- * we are not supposed to free it.
- * \post if success, req->rq_reqmsg point to a buffer with size
- * at least \a lustre_msg_size.
- *
- * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf().
- */
- int (*alloc_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
- int lustre_msg_size);
-
- /**
- * To free request buffer for \a req.
- *
- * \pre req->rq_reqbuf != NULL.
- *
- * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf().
- */
- void (*free_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req);
-
- /**
- * To allocate reply buffer for \a req.
- *
- * \pre req->rq_repbuf == NULL.
- * \post if success, req->rq_repbuf point to a buffer with size
- * req->rq_repbuf_len, the size should be large enough to receive
- * reply which be transformed from \a lustre_msg_size of clear text.
- *
- * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf().
- */
- int (*alloc_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
- int lustre_msg_size);
-
- /**
- * To free reply buffer for \a req.
- *
- * \pre req->rq_repbuf != NULL.
- * \post req->rq_repbuf == NULL.
- * \post req->rq_repbuf_len == 0.
- *
- * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf().
- */
- void (*free_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req);
-
- /**
- * To expand the request buffer of \a req, thus the \a segment in
- * the request message pointed by req->rq_reqmsg can accommodate
- * at least \a newsize of data.
- *
- * \pre req->rq_reqmsg->lm_buflens[segment] < newsize.
- *
- * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(),
- * gss_enlarge_reqbuf().
- */
- int (*enlarge_reqbuf)(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int segment, int newsize);
- /*
- * misc
- */
- int (*display)(struct ptlrpc_sec *sec, struct seq_file *seq);
-};
-
-/**
- * server side policy operation vector.
- */
-struct ptlrpc_sec_sops {
- /**
- * verify an incoming request.
- *
- * \pre request message is pointed by req->rq_reqbuf, size is
- * req->rq_reqdata_len; and the message has been unpacked to
- * host byte order.
- *
- * \retval SECSVC_OK success, req->rq_reqmsg point to request message
- * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set;
- * req->rq_sp_from is decoded from request.
- * \retval SECSVC_COMPLETE success, the request has been fully
- * processed, and reply message has been prepared; req->rq_sp_from is
- * decoded from request.
- * \retval SECSVC_DROP failed, this request should be dropped.
- *
- * \see null_accept(), plain_accept(), gss_svc_accept_kr().
- */
- int (*accept)(struct ptlrpc_request *req);
-
- /**
- * Perform security transformation upon reply message.
- *
- * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
- * is req->rq_replen.
- * \post req->rs_repdata_len is the final message size.
- * \post req->rq_reply_off is set.
- *
- * \see null_authorize(), plain_authorize(), gss_svc_authorize().
- */
- int (*authorize)(struct ptlrpc_request *req);
-
- /**
- * Invalidate server context \a ctx.
- *
- * \see gss_svc_invalidate_ctx().
- */
- void (*invalidate_ctx)(struct ptlrpc_svc_ctx *ctx);
-
- /**
- * Allocate a ptlrpc_reply_state.
- *
- * \param msgsize size of the reply message in clear text.
- * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we
- * should simply use it; otherwise we'll responsible for allocating
- * a new one.
- * \post req->rq_reply_state != NULL;
- * \post req->rq_reply_state->rs_msg != NULL;
- *
- * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs().
- */
- int (*alloc_rs)(struct ptlrpc_request *req, int msgsize);
-
- /**
- * Free a ptlrpc_reply_state.
- */
- void (*free_rs)(struct ptlrpc_reply_state *rs);
-
- /**
- * Release the server context \a ctx.
- *
- * \see gss_svc_free_ctx().
- */
- void (*free_ctx)(struct ptlrpc_svc_ctx *ctx);
-
- /**
- * Install a reverse context based on the server context \a ctx.
- *
- * \see gss_svc_install_rctx_kr().
- */
- int (*install_rctx)(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx);
-
- /**
- * Prepare buffer for incoming bulk write.
- *
- * \pre desc->bd_iov and desc->bd_iov_count describes the buffer
- * intended to receive the write.
- *
- * \see gss_svc_prep_bulk().
- */
- int (*prep_bulk)(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-
- /**
- * Unwrap the bulk write data.
- *
- * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk().
- */
- int (*unwrap_bulk)(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-
- /**
- * Wrap the bulk read data.
- *
- * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk().
- */
- int (*wrap_bulk)(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-};
-
-struct ptlrpc_sec_policy {
- struct module *sp_owner;
- char *sp_name;
- __u16 sp_policy; /* policy number */
- struct ptlrpc_sec_cops *sp_cops; /* client ops */
- struct ptlrpc_sec_sops *sp_sops; /* server ops */
-};
-
-#define PTLRPC_SEC_FL_REVERSE 0x0001 /* reverse sec */
-#define PTLRPC_SEC_FL_ROOTONLY 0x0002 /* treat everyone as root */
-#define PTLRPC_SEC_FL_UDESC 0x0004 /* ship udesc */
-#define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */
-#define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */
-
-/**
- * The ptlrpc_sec represents the client side ptlrpc security facilities,
- * each obd_import (both regular and reverse import) must associate with
- * a ptlrpc_sec.
- *
- * \see sptlrpc_import_sec_adapt().
- */
-struct ptlrpc_sec {
- struct ptlrpc_sec_policy *ps_policy;
- atomic_t ps_refcount;
- /** statistic only */
- atomic_t ps_nctx;
- /** unique identifier */
- int ps_id;
- struct sptlrpc_flavor ps_flvr;
- enum lustre_sec_part ps_part;
- /** after set, no more new context will be created */
- unsigned int ps_dying:1;
- /** owning import */
- struct obd_import *ps_import;
- spinlock_t ps_lock;
-
- /*
- * garbage collection
- */
- struct list_head ps_gc_list;
- unsigned long ps_gc_interval; /* in seconds */
- time64_t ps_gc_next; /* in seconds */
-};
-
-static inline int sec_is_reverse(struct ptlrpc_sec *sec)
-{
- return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE);
-}
-
-static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
-{
- return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY);
-}
-
-struct ptlrpc_svc_ctx {
- atomic_t sc_refcount;
- struct ptlrpc_sec_policy *sc_policy;
-};
-
-/*
- * user identity descriptor
- */
-#define LUSTRE_MAX_GROUPS (128)
-
-struct ptlrpc_user_desc {
- __u32 pud_uid;
- __u32 pud_gid;
- __u32 pud_fsuid;
- __u32 pud_fsgid;
- __u32 pud_cap;
- __u32 pud_ngroups;
- __u32 pud_groups[0];
-};
-
-/*
- * bulk flavors
- */
-enum sptlrpc_bulk_hash_alg {
- BULK_HASH_ALG_NULL = 0,
- BULK_HASH_ALG_ADLER32,
- BULK_HASH_ALG_CRC32,
- BULK_HASH_ALG_MD5,
- BULK_HASH_ALG_SHA1,
- BULK_HASH_ALG_SHA256,
- BULK_HASH_ALG_SHA384,
- BULK_HASH_ALG_SHA512,
- BULK_HASH_ALG_MAX
-};
-
-const char *sptlrpc_get_hash_name(__u8 hash_alg);
-__u8 sptlrpc_get_hash_alg(const char *algname);
-
-enum {
- BSD_FL_ERR = 1,
-};
-
-struct ptlrpc_bulk_sec_desc {
- __u8 bsd_version; /* 0 */
- __u8 bsd_type; /* SPTLRPC_BULK_XXX */
- __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */
- __u8 bsd_flags; /* flags */
- __u32 bsd_nob; /* nob of bulk data */
- __u8 bsd_data[0]; /* policy-specific token */
-};
-
-/*
- * round size up to next power of 2, for slab allocation.
- * @size must be sane (can't overflow after round up)
- */
-static inline int size_roundup_power2(int size)
-{
- size--;
- size |= size >> 1;
- size |= size >> 2;
- size |= size >> 4;
- size |= size >> 8;
- size |= size >> 16;
- size++;
- return size;
-}
-
-/*
- * internal support libraries
- */
-void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
- int segment, int newsize);
-
-/*
- * security policies
- */
-int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy);
-int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy);
-
-__u32 sptlrpc_name2flavor_base(const char *name);
-const char *sptlrpc_flavor2name_base(__u32 flvr);
-char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
- char *buf, int bufsize);
-char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
-
-static inline
-struct ptlrpc_sec_policy *sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
-{
- __module_get(policy->sp_owner);
- return policy;
-}
-
-static inline
-void sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
-{
- module_put(policy->sp_owner);
-}
-
-/*
- * client credential
- */
-static inline
-unsigned long cli_ctx_status(struct ptlrpc_cli_ctx *ctx)
-{
- return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK);
-}
-
-static inline
-int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx)
-{
- return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
-}
-
-static inline
-int cli_ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx)
-{
- return (cli_ctx_status(ctx) != 0);
-}
-
-static inline
-int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
-{
- return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0);
-}
-
-static inline
-int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx)
-{
- return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0);
-}
-
-static inline
-int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
-{
- return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
-}
-
-static inline
-int cli_ctx_is_eternal(struct ptlrpc_cli_ctx *ctx)
-{
- return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0);
-}
-
-/*
- * sec get/put
- */
-void sptlrpc_sec_put(struct ptlrpc_sec *sec);
-
-/*
- * internal apis which only used by policy implementation
- */
-int sptlrpc_get_next_secid(void);
-
-/*
- * exported client context api
- */
-struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx);
-void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync);
-
-/*
- * exported client context wrap/buffers
- */
-int sptlrpc_cli_wrap_request(struct ptlrpc_request *req);
-int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req);
-int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize);
-void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req);
-int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize);
-void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req);
-int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
- int segment, int newsize);
-int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
- struct ptlrpc_request **req_ret);
-void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req);
-
-void sptlrpc_request_out_callback(struct ptlrpc_request *req);
-
-/*
- * exported higher interface of import & request
- */
-int sptlrpc_import_sec_adapt(struct obd_import *imp,
- struct ptlrpc_svc_ctx *ctx,
- struct sptlrpc_flavor *flvr);
-struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp);
-void sptlrpc_import_sec_put(struct obd_import *imp);
-
-int sptlrpc_import_check_ctx(struct obd_import *imp);
-void sptlrpc_import_flush_root_ctx(struct obd_import *imp);
-void sptlrpc_import_flush_my_ctx(struct obd_import *imp);
-void sptlrpc_import_flush_all_ctx(struct obd_import *imp);
-int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
-void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
-int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
-void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
-
-/* gc */
-void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
-void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
-
-/* misc */
-const char *sec2target_str(struct ptlrpc_sec *sec);
-/*
- * lprocfs
- */
-int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev);
-
-/*
- * server side
- */
-enum secsvc_accept_res {
- SECSVC_OK = 0,
- SECSVC_COMPLETE,
- SECSVC_DROP,
-};
-
-int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
-int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
-int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
-void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs);
-void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
-void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
-
-int sptlrpc_target_export_check(struct obd_export *exp,
- struct ptlrpc_request *req);
-
-/* bulk security api */
-void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
-int get_free_pages_in_pool(void);
-int pool_is_at_full_capacity(void);
-
-int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc,
- int nob);
-int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-
-/* bulk helpers (internal use only by policies) */
-int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
- void *buf, int buflen);
-
-int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed);
-
-/* user descriptor helpers */
-static inline int sptlrpc_user_desc_size(int ngroups)
-{
- return sizeof(struct ptlrpc_user_desc) + ngroups * sizeof(__u32);
-}
-
-int sptlrpc_current_user_desc_size(void);
-int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
-int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
-
-enum {
- LUSTRE_SEC_NONE = 0,
- LUSTRE_SEC_REMOTE = 1,
- LUSTRE_SEC_SPECIFY = 2,
- LUSTRE_SEC_ALL = 3
-};
-
-/** @} sptlrpc */
-
-#endif /* _LUSTRE_SEC_H_ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_swab.h b/drivers/staging/lustre/lustre/include/lustre_swab.h
deleted file mode 100644
index 9d786bbe7f3f..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_swab.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2014, Intel Corporation.
- *
- * Copyright 2015 Cray Inc, all rights reserved.
- * Author: Ben Evans.
- *
- * We assume all nodes are either little-endian or big-endian, and we
- * always send messages in the sender's native format. The receiver
- * detects the message format by checking the 'magic' field of the message
- * (see lustre_msg_swabbed() below).
- *
- * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines
- * are implemented in ptlrpc/lustre_swab.c. These 'swabbers' convert the
- * type from "other" endian, in-place in the message buffer.
- *
- * A swabber takes a single pointer argument. The caller must already have
- * verified that the length of the message buffer >= sizeof (type).
- *
- * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
- * may be defined that swabs just the variable part, after the caller has
- * verified that the message buffer is large enough.
- */
-
-#ifndef _LUSTRE_SWAB_H_
-#define _LUSTRE_SWAB_H_
-
-#include <uapi/linux/lustre/lustre_idl.h>
-
-void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
-void lustre_swab_connect(struct obd_connect_data *ocd);
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
-void lustre_swab_obd_statfs(struct obd_statfs *os);
-void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
-void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
-void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
-void lustre_swab_ost_lvb(struct ost_lvb *lvb);
-void lustre_swab_obd_quotactl(struct obd_quotactl *q);
-void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
-void lustre_swab_generic_32s(__u32 *val);
-void lustre_swab_mdt_body(struct mdt_body *b);
-void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
-void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
-void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
-void lustre_swab_lmv_desc(struct lmv_desc *ld);
-void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
-void lustre_swab_lov_desc(struct lov_desc *ld);
-void lustre_swab_gl_desc(union ldlm_gl_desc *desc);
-void lustre_swab_ldlm_intent(struct ldlm_intent *i);
-void lustre_swab_ldlm_request(struct ldlm_request *rq);
-void lustre_swab_ldlm_reply(struct ldlm_reply *r);
-void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
-void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
-void lustre_swab_mgs_config_body(struct mgs_config_body *body);
-void lustre_swab_mgs_config_res(struct mgs_config_res *body);
-void lustre_swab_ost_body(struct ost_body *b);
-void lustre_swab_ost_last_id(__u64 *id);
-void lustre_swab_fiemap(struct fiemap *fiemap);
-void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
-void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
-void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
- int stripe_count);
-void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
-void lustre_swab_lustre_capa(struct lustre_capa *c);
-void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
-void lustre_swab_fid2path(struct getinfo_fid2path *gf);
-void lustre_swab_layout_intent(struct layout_intent *li);
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_current_action(struct hsm_current_action *action);
-void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
-void lustre_swab_hsm_request(struct hsm_request *hr);
-void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
-void lustre_swab_close_data(struct close_data *data);
-void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
-
-/* Functions for dumping PTLRPC fields */
-void dump_rniobuf(struct niobuf_remote *rnb);
-void dump_ioo(struct obd_ioobj *nb);
-void dump_ost_body(struct ost_body *ob);
-void dump_rcs(__u32 *rc);
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
deleted file mode 100644
index f1233ca7d337..000000000000
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ /dev/null
@@ -1,1101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __OBD_H
-#define __OBD_H
-
-#include <linux/spinlock.h>
-
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <lustre_lib.h>
-#include <lu_ref.h>
-#include <lustre_export.h>
-#include <lustre_fid.h>
-#include <lustre_fld.h>
-#include <lustre_handles.h>
-#include <lustre_intent.h>
-#include <cl_object.h>
-
-#define MAX_OBD_DEVICES 8192
-
-struct osc_async_rc {
- int ar_rc;
- int ar_force_sync;
- __u64 ar_min_xid;
-};
-
-struct lov_oinfo { /* per-stripe data structure */
- struct ost_id loi_oi; /* object ID/Sequence on the target OST */
- int loi_ost_idx; /* OST stripe index in lov_tgt_desc->tgts */
- int loi_ost_gen; /* generation of this loi_ost_idx */
-
- unsigned long loi_kms_valid:1;
- __u64 loi_kms; /* known minimum size */
- struct ost_lvb loi_lvb;
- struct osc_async_rc loi_ar;
-};
-
-static inline void loi_kms_set(struct lov_oinfo *oinfo, __u64 kms)
-{
- oinfo->loi_kms = kms;
- oinfo->loi_kms_valid = 1;
-}
-
-static inline void loi_init(struct lov_oinfo *loi)
-{
-}
-
-struct lov_stripe_md;
-struct obd_info;
-
-int lov_read_and_clear_async_rc(struct cl_object *clob);
-
-typedef int (*obd_enqueue_update_f)(void *cookie, int rc);
-
-/* obd info for a particular level (lov, osc). */
-struct obd_info {
- /* OBD_STATFS_* flags */
- __u64 oi_flags;
- /* lsm data specific for every OSC. */
- struct lov_stripe_md *oi_md;
- /* statfs data specific for every OSC, if needed at all. */
- struct obd_statfs *oi_osfs;
- /* An update callback which is called to update some data on upper
- * level. E.g. it is used for update lsm->lsm_oinfo at every received
- * request in osc level for enqueue requests. It is also possible to
- * update some caller data from LOV layer if needed.
- */
- obd_enqueue_update_f oi_cb_up;
-};
-
-struct obd_type {
- struct list_head typ_chain;
- struct obd_ops *typ_dt_ops;
- struct md_ops *typ_md_ops;
- struct dentry *typ_debugfs_entry;
- char *typ_name;
- int typ_refcnt;
- struct lu_device_type *typ_lu;
- spinlock_t obd_type_lock;
- struct kobject *typ_kobj;
-};
-
-struct brw_page {
- u64 off;
- struct page *pg;
- unsigned int count;
- u32 flag;
-};
-
-struct timeout_item {
- enum timeout_event ti_event;
- unsigned long ti_timeout;
- timeout_cb_t ti_cb;
- void *ti_cb_data;
- struct list_head ti_obd_list;
- struct list_head ti_chain;
-};
-
-#define OBD_MAX_RIF_DEFAULT 8
-#define OBD_MAX_RIF_MAX 512
-#define OSC_MAX_RIF_MAX 256
-#define OSC_MAX_DIRTY_DEFAULT (OBD_MAX_RIF_DEFAULT * 4)
-#define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */
-#define OSC_DEFAULT_RESENDS 10
-
-/* possible values for fo_sync_lock_cancel */
-enum {
- NEVER_SYNC_ON_CANCEL = 0,
- BLOCKING_SYNC_ON_CANCEL = 1,
- ALWAYS_SYNC_ON_CANCEL = 2,
- NUM_SYNC_ON_CANCEL_STATES
-};
-
-enum obd_cl_sem_lock_class {
- OBD_CLI_SEM_NORMAL,
- OBD_CLI_SEM_MGC,
- OBD_CLI_SEM_MDCOSC,
-};
-
-/*
- * Limit reply buffer size for striping data to one x86_64 page. This
- * value is chosen to fit the striping data for common use cases while
- * staying well below the limit at which the buffer must be backed by
- * vmalloc(). Excessive use of vmalloc() may cause spinlock contention
- * on the MDS.
- */
-#define OBD_MAX_DEFAULT_EA_SIZE 4096
-
-struct mdc_rpc_lock;
-struct obd_import;
-struct client_obd {
- struct rw_semaphore cl_sem;
- struct obd_uuid cl_target_uuid;
- struct obd_import *cl_import; /* ptlrpc connection state */
- size_t cl_conn_count;
- /*
- * Cache maximum and default values for easize. This is
- * strictly a performance optimization to minimize calls to
- * obd_size_diskmd(). The default values are used to calculate the
- * initial size of a request buffer. The ptlrpc layer will resize the
- * buffer as needed to accommodate a larger reply from the
- * server. The default values should be small enough to avoid wasted
- * memory and excessive use of vmalloc(), yet large enough to avoid
- * reallocating the buffer in the common use case.
- */
- /*
- * Default EA size for striping attributes. It is initialized at
- * mount-time based on the default stripe width of the filesystem,
- * then it tracks the largest observed EA size advertised by
- * the MDT, up to a maximum value of OBD_MAX_DEFAULT_EA_SIZE.
- */
- u32 cl_default_mds_easize;
- /* Maximum possible EA size computed at mount-time based on
- * the number of OSTs in the filesystem. May be increased at
- * run-time if a larger observed size is advertised by the MDT.
- */
- u32 cl_max_mds_easize;
-
- enum lustre_sec_part cl_sp_me;
- enum lustre_sec_part cl_sp_to;
- struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */
-
- /* the grant values are protected by loi_list_lock below */
- unsigned long cl_dirty_pages; /* all _dirty_ in pages */
- unsigned long cl_dirty_max_pages; /* allowed w/o rpc */
- unsigned long cl_dirty_transit; /* dirty synchronous */
- unsigned long cl_avail_grant; /* bytes of credit for ost */
- unsigned long cl_lost_grant; /* lost credits (trunc) */
-
- /* since we allocate grant by blocks, we don't know how many grant will
- * be used to add a page into cache. As a solution, we reserve maximum
- * grant before trying to dirty a page and unreserve the rest.
- * See osc_{reserve|unreserve}_grant for details.
- */
- long cl_reserved_grant;
- struct list_head cl_cache_waiters; /* waiting for cache/grant */
- unsigned long cl_next_shrink_grant; /* jiffies */
- struct list_head cl_grant_shrink_list; /* Timeout event list */
- int cl_grant_shrink_interval; /* seconds */
-
- /* A chunk is an optimal size used by osc_extent to determine
- * the extent size. A chunk is max(PAGE_SIZE, OST block size)
- */
- int cl_chunkbits;
- unsigned int cl_extent_tax; /* extent overhead, by bytes */
-
- /* keep track of objects that have lois that contain pages which
- * have been queued for async brw. this lock also protects the
- * lists of osc_client_pages that hang off of the loi
- */
- /*
- * ->cl_loi_list_lock protects consistency of
- * ->cl_loi_{ready,read,write}_list. ->ap_make_ready() and
- * ->ap_completion() call-backs are executed under this lock. As we
- * cannot guarantee that these call-backs never block on all platforms
- * (as a matter of fact they do block on Mac OS X), type of
- * ->cl_loi_list_lock is platform dependent: it's a spin-lock on Linux
- * and blocking mutex on Mac OS X. (Alternative is to make this lock
- * blocking everywhere, but we don't want to slow down fast-path of
- * our main platform.)
- *
- * NB by Jinshan: though field names are still _loi_, but actually
- * osc_object{}s are in the list.
- */
- spinlock_t cl_loi_list_lock;
- struct list_head cl_loi_ready_list;
- struct list_head cl_loi_hp_ready_list;
- struct list_head cl_loi_write_list;
- struct list_head cl_loi_read_list;
- __u32 cl_r_in_flight;
- __u32 cl_w_in_flight;
- /* just a sum of the loi/lop pending numbers to be exported by sysfs */
- atomic_t cl_pending_w_pages;
- atomic_t cl_pending_r_pages;
- __u32 cl_max_pages_per_rpc;
- __u32 cl_max_rpcs_in_flight;
- struct obd_histogram cl_read_rpc_hist;
- struct obd_histogram cl_write_rpc_hist;
- struct obd_histogram cl_read_page_hist;
- struct obd_histogram cl_write_page_hist;
- struct obd_histogram cl_read_offset_hist;
- struct obd_histogram cl_write_offset_hist;
-
- /* LRU for osc caching pages */
- struct cl_client_cache *cl_cache;
- /** member of cl_cache->ccc_lru */
- struct list_head cl_lru_osc;
- /** # of available LRU slots left in the per-OSC cache.
- * Available LRU slots are shared by all OSCs of the same file system,
- * therefore this is a pointer to cl_client_cache::ccc_lru_left.
- */
- atomic_long_t *cl_lru_left;
- /** # of busy LRU pages. A page is considered busy if it's in writeback
- * queue, or in transfer. Busy pages can't be discarded so they are not
- * in LRU cache.
- */
- atomic_long_t cl_lru_busy;
- /** # of LRU pages in the cache for this client_obd */
- atomic_long_t cl_lru_in_list;
- /** # of threads are shrinking LRU cache. To avoid contention, it's not
- * allowed to have multiple threads shrinking LRU cache.
- */
- atomic_t cl_lru_shrinkers;
- /** The time when this LRU cache was last used. */
- time64_t cl_lru_last_used;
- /** stats: how many reclaims have happened for this client_obd.
- * reclaim and shrink - shrink is async, voluntarily rebalancing;
- * reclaim is sync, initiated by IO thread when the LRU slots are
- * in shortage.
- */
- u64 cl_lru_reclaim;
- /** List of LRU pages for this client_obd */
- struct list_head cl_lru_list;
- /** Lock for LRU page list */
- spinlock_t cl_lru_list_lock;
- /** # of unstable pages in this client_obd.
- * An unstable page is a page state that WRITE RPC has finished but
- * the transaction has NOT yet committed.
- */
- atomic_long_t cl_unstable_count;
- /** Link to osc_shrinker_list */
- struct list_head cl_shrink_list;
-
- /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
- atomic_t cl_destroy_in_flight;
- wait_queue_head_t cl_destroy_waitq;
-
- struct mdc_rpc_lock *cl_rpc_lock;
-
- /* modify rpcs in flight
- * currently used for metadata only
- */
- spinlock_t cl_mod_rpcs_lock;
- u16 cl_max_mod_rpcs_in_flight;
- u16 cl_mod_rpcs_in_flight;
- u16 cl_close_rpcs_in_flight;
- wait_queue_head_t cl_mod_rpcs_waitq;
- unsigned long *cl_mod_tag_bitmap;
- struct obd_histogram cl_mod_rpcs_hist;
-
- /* mgc datastruct */
- atomic_t cl_mgc_refcount;
- struct obd_export *cl_mgc_mgsexp;
-
- /* checksumming for data sent over the network */
- unsigned int cl_checksum:1; /* 0 = disabled, 1 = enabled */
- /* supported checksum types that are worked out at connect time */
- __u32 cl_supp_cksum_types;
- /* checksum algorithm to be used */
- enum cksum_type cl_cksum_type;
-
- /* also protected by the poorly named _loi_list_lock lock above */
- struct osc_async_rc cl_ar;
-
- /* sequence manager */
- struct lu_client_seq *cl_seq;
-
- atomic_t cl_resends; /* resend count */
-
- /* ptlrpc work for writeback in ptlrpcd context */
- void *cl_writeback_work;
- void *cl_lru_work;
- /* hash tables for osc_quota_info */
- struct cfs_hash *cl_quota_hash[MAXQUOTAS];
-};
-
-#define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
-
-struct obd_id_info {
- __u32 idx;
- u64 *data;
-};
-
-struct echo_client_obd {
- struct obd_export *ec_exp; /* the local connection to osc/lov */
- spinlock_t ec_lock;
- struct list_head ec_objects;
- struct list_head ec_locks;
- __u64 ec_unique;
-};
-
-/* Generic subset of OSTs */
-struct ost_pool {
- __u32 *op_array; /* array of index of lov_obd->lov_tgts */
- unsigned int op_count; /* number of OSTs in the array */
- unsigned int op_size; /* allocated size of lp_array */
- struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
-};
-
-/* allow statfs data caching for 1 second */
-#define OBD_STATFS_CACHE_SECONDS 1
-
-struct lov_tgt_desc {
- struct list_head ltd_kill;
- struct obd_uuid ltd_uuid;
- struct obd_device *ltd_obd;
- struct obd_export *ltd_exp;
- __u32 ltd_gen;
- __u32 ltd_index; /* index in lov_obd->tgts */
- unsigned long ltd_active:1,/* is this target up for requests */
- ltd_activate:1,/* should target be activated */
- ltd_reap:1; /* should this target be deleted */
-};
-
-struct lov_obd {
- struct lov_desc desc;
- struct lov_tgt_desc **lov_tgts; /* sparse array */
- struct ost_pool lov_packed; /* all OSTs in a packed array */
- struct mutex lov_lock;
- struct obd_connect_data lov_ocd;
- atomic_t lov_refcount;
- __u32 lov_death_row;/* tgts scheduled to be deleted */
- __u32 lov_tgt_size; /* size of tgts array */
- int lov_connects;
- int lov_pool_count;
- struct cfs_hash *lov_pools_hash_body; /* used for key access */
- struct list_head lov_pool_list; /* used for sequential access */
- struct dentry *lov_pool_debugfs_entry;
- enum lustre_sec_part lov_sp_me;
-
- /* Cached LRU and unstable data from upper layer */
- struct cl_client_cache *lov_cache;
-
- struct rw_semaphore lov_notify_lock;
-
- struct kobject *lov_tgts_kobj;
-};
-
-struct lmv_tgt_desc {
- struct obd_uuid ltd_uuid;
- struct obd_export *ltd_exp;
- u32 ltd_idx;
- struct mutex ltd_fid_mutex;
- unsigned long ltd_active:1; /* target up for requests */
-};
-
-struct lmv_obd {
- struct lu_client_fld lmv_fld;
- spinlock_t lmv_lock;
- struct lmv_desc desc;
- struct obd_uuid cluuid;
-
- struct mutex lmv_init_mutex;
- int connected;
- int max_easize;
- int max_def_easize;
-
- u32 tgts_size; /* size of tgts array */
- struct lmv_tgt_desc **tgts;
-
- struct obd_connect_data conn_data;
- struct kobject *lmv_tgts_kobj;
-};
-
-struct niobuf_local {
- __u64 lnb_file_offset;
- __u32 lnb_page_offset;
- __u32 lnb_len;
- __u32 lnb_flags;
- int lnb_rc;
- struct page *lnb_page;
- void *lnb_data;
-};
-
-#define LUSTRE_FLD_NAME "fld"
-#define LUSTRE_SEQ_NAME "seq"
-
-#define LUSTRE_MDD_NAME "mdd"
-#define LUSTRE_OSD_LDISKFS_NAME "osd-ldiskfs"
-#define LUSTRE_OSD_ZFS_NAME "osd-zfs"
-#define LUSTRE_VVP_NAME "vvp"
-#define LUSTRE_LMV_NAME "lmv"
-#define LUSTRE_SLP_NAME "slp"
-#define LUSTRE_LOD_NAME "lod"
-#define LUSTRE_OSP_NAME "osp"
-#define LUSTRE_LWP_NAME "lwp"
-
-/* obd device type names */
- /* FIXME all the references to LUSTRE_MDS_NAME should be swapped with LUSTRE_MDT_NAME */
-#define LUSTRE_MDS_NAME "mds"
-#define LUSTRE_MDT_NAME "mdt"
-#define LUSTRE_MDC_NAME "mdc"
-#define LUSTRE_OSS_NAME "ost" /* FIXME change name to oss */
-#define LUSTRE_OST_NAME "obdfilter" /* FIXME change name to ost */
-#define LUSTRE_OSC_NAME "osc"
-#define LUSTRE_LOV_NAME "lov"
-#define LUSTRE_MGS_NAME "mgs"
-#define LUSTRE_MGC_NAME "mgc"
-
-#define LUSTRE_ECHO_NAME "obdecho"
-#define LUSTRE_ECHO_CLIENT_NAME "echo_client"
-#define LUSTRE_QMT_NAME "qmt"
-
-/* Constant obd names (post-rename) */
-#define LUSTRE_MDS_OBDNAME "MDS"
-#define LUSTRE_OSS_OBDNAME "OSS"
-#define LUSTRE_MGS_OBDNAME "MGS"
-#define LUSTRE_MGC_OBDNAME "MGC"
-
-/* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */
-#define N_LOCAL_TEMP_PAGE 0x10000000
-
-/*
- * Events signalled through obd_notify() upcall-chain.
- */
-enum obd_notify_event {
- /* Device connect start */
- OBD_NOTIFY_CONNECT,
- /* Device activated */
- OBD_NOTIFY_ACTIVE,
- /* Device deactivated */
- OBD_NOTIFY_INACTIVE,
- /* Connect data for import were changed */
- OBD_NOTIFY_OCD,
- /* Sync request */
- OBD_NOTIFY_SYNC_NONBLOCK,
- OBD_NOTIFY_SYNC,
- /* Configuration event */
- OBD_NOTIFY_CONFIG,
- /* Administratively deactivate/activate event */
- OBD_NOTIFY_DEACTIVATE,
- OBD_NOTIFY_ACTIVATE
-};
-
-/*
- * Data structure used to pass obd_notify()-event to non-obd listeners (llite
- * being main example).
- */
-struct obd_notify_upcall {
- int (*onu_upcall)(struct obd_device *host, struct obd_device *watched,
- enum obd_notify_event ev, void *owner, void *data);
- /* Opaque datum supplied by upper layer listener */
- void *onu_owner;
-};
-
-struct target_recovery_data {
- svc_handler_t trd_recovery_handler;
- pid_t trd_processing_task;
- struct completion trd_starting;
- struct completion trd_finishing;
-};
-
-struct obd_llog_group {
- struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS];
- wait_queue_head_t olg_waitq;
- spinlock_t olg_lock;
- struct mutex olg_cat_processing;
-};
-
-/* corresponds to one of the obd's */
-#define OBD_DEVICE_MAGIC 0XAB5CD6EF
-
-struct lvfs_run_ctxt {
- struct dt_device *dt;
-};
-
-struct obd_device {
- struct obd_type *obd_type;
- u32 obd_magic; /* OBD_DEVICE_MAGIC */
- int obd_minor; /* device number: lctl dl */
- struct lu_device *obd_lu_dev;
-
- /* common and UUID name of this device */
- struct obd_uuid obd_uuid;
- char obd_name[MAX_OBD_NAME];
-
- /* bitfield modification is protected by obd_dev_lock */
- unsigned long obd_attached:1, /* finished attach */
- obd_set_up:1, /* finished setup */
- obd_version_recov:1, /* obd uses version checking */
- obd_replayable:1,/* recovery is enabled; inform clients */
- obd_no_transno:1, /* no committed-transno notification */
- obd_no_recov:1, /* fail instead of retry messages */
- obd_stopping:1, /* started cleanup */
- obd_starting:1, /* started setup */
- obd_force:1, /* cleanup with > 0 obd refcount */
- obd_fail:1, /* cleanup with failover */
- obd_no_conn:1, /* deny new connections */
- obd_inactive:1, /* device active/inactive
- * (for sysfs status only!!)
- */
- obd_no_ir:1, /* no imperative recovery. */
- obd_process_conf:1; /* device is processing mgs config */
- /* use separate field as it is set in interrupt to don't mess with
- * protection of other bits using _bh lock
- */
- unsigned long obd_recovery_expired:1;
- /* uuid-export hash body */
- struct cfs_hash *obd_uuid_hash;
- wait_queue_head_t obd_refcount_waitq;
- struct list_head obd_exports;
- struct list_head obd_unlinked_exports;
- struct list_head obd_delayed_exports;
- atomic_t obd_refcount;
- int obd_num_exports;
- spinlock_t obd_nid_lock;
- struct ldlm_namespace *obd_namespace;
- struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
- /* a spinlock is OK for what we do now, may need a semaphore later */
- spinlock_t obd_dev_lock; /* protect OBD bitfield above */
- spinlock_t obd_osfs_lock;
- struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
- __u64 obd_osfs_age;
- u64 obd_last_committed;
- struct mutex obd_dev_mutex;
- struct lvfs_run_ctxt obd_lvfs_ctxt;
- struct obd_llog_group obd_olg; /* default llog group */
- struct obd_device *obd_observer;
- struct rw_semaphore obd_observer_link_sem;
- struct obd_notify_upcall obd_upcall;
- struct obd_export *obd_self_export;
-
- union {
- struct client_obd cli;
- struct echo_client_obd echo_client;
- struct lov_obd lov;
- struct lmv_obd lmv;
- } u;
-
- /* Fields used by LProcFS */
- struct lprocfs_stats *obd_stats;
- unsigned int obd_cntr_base;
-
- struct lprocfs_stats *md_stats;
- unsigned int md_cntr_base;
-
- struct dentry *obd_debugfs_entry;
- struct dentry *obd_svc_debugfs_entry;
- struct lprocfs_stats *obd_svc_stats;
- atomic_t obd_evict_inprogress;
- wait_queue_head_t obd_evict_inprogress_waitq;
- struct list_head obd_evict_list; /* protected with pet_lock */
-
- /**
- * Ldlm pool part. Save last calculated SLV and Limit.
- */
- rwlock_t obd_pool_lock;
- u64 obd_pool_slv;
- int obd_pool_limit;
-
- int obd_conn_inprogress;
-
- /**
- * A list of outstanding class_incref()'s against this obd. For
- * debugging.
- */
- struct lu_ref obd_reference;
-
- struct kobject obd_kobj; /* sysfs object */
- struct completion obd_kobj_unregister;
-};
-
-/* get/set_info keys */
-#define KEY_ASYNC "async"
-#define KEY_CHANGELOG_CLEAR "changelog_clear"
-#define KEY_FID2PATH "fid2path"
-#define KEY_CHECKSUM "checksum"
-#define KEY_CLEAR_FS "clear_fs"
-#define KEY_CONN_DATA "conn_data"
-#define KEY_EVICT_BY_NID "evict_by_nid"
-#define KEY_FIEMAP "fiemap"
-#define KEY_FLUSH_CTX "flush_ctx"
-#define KEY_GRANT_SHRINK "grant_shrink"
-#define KEY_HSM_COPYTOOL_SEND "hsm_send"
-#define KEY_INIT_RECOV_BACKUP "init_recov_bk"
-#define KEY_INTERMDS "inter_mds"
-#define KEY_LAST_ID "last_id"
-#define KEY_LAST_FID "last_fid"
-#define KEY_MAX_EASIZE "max_easize"
-#define KEY_DEFAULT_EASIZE "default_easize"
-#define KEY_MGSSEC "mgssec"
-#define KEY_READ_ONLY "read-only"
-#define KEY_REGISTER_TARGET "register_target"
-#define KEY_SET_FS "set_fs"
-#define KEY_TGT_COUNT "tgt_count"
-/* KEY_SET_INFO in lustre_idl.h */
-#define KEY_SPTLRPC_CONF "sptlrpc_conf"
-
-#define KEY_CACHE_SET "cache_set"
-#define KEY_CACHE_LRU_SHRINK "cache_lru_shrink"
-
-struct lu_context;
-
-static inline int it_to_lock_mode(struct lookup_intent *it)
-{
- /* CREAT needs to be tested before open (both could be set) */
- if (it->it_op & IT_CREAT)
- return LCK_CW;
- else if (it->it_op & (IT_GETATTR | IT_OPEN | IT_LOOKUP |
- IT_LAYOUT))
- return LCK_CR;
- else if (it->it_op & IT_READDIR)
- return LCK_PR;
- else if (it->it_op & IT_GETXATTR)
- return LCK_PR;
- else if (it->it_op & IT_SETXATTR)
- return LCK_PW;
-
- LASSERTF(0, "Invalid it_op: %d\n", it->it_op);
- return -EINVAL;
-}
-
-enum md_op_flags {
- MF_MDC_CANCEL_FID1 = BIT(0),
- MF_MDC_CANCEL_FID2 = BIT(1),
- MF_MDC_CANCEL_FID3 = BIT(2),
- MF_MDC_CANCEL_FID4 = BIT(3),
- MF_GET_MDT_IDX = BIT(4),
-};
-
-enum md_cli_flags {
- CLI_SET_MEA = BIT(0),
- CLI_RM_ENTRY = BIT(1),
- CLI_HASH64 = BIT(2),
- CLI_API32 = BIT(3),
- CLI_MIGRATE = BIT(4),
-};
-
-struct md_op_data {
- struct lu_fid op_fid1; /* operation fid1 (usually parent) */
- struct lu_fid op_fid2; /* operation fid2 (usually child) */
- struct lu_fid op_fid3; /* 2 extra fids to find conflicting */
- struct lu_fid op_fid4; /* to the operation locks. */
- u32 op_mds; /* what mds server open will go to */
- struct lustre_handle op_handle;
- s64 op_mod_time;
- const char *op_name;
- size_t op_namelen;
- __u32 op_mode;
- struct lmv_stripe_md *op_mea1;
- struct lmv_stripe_md *op_mea2;
- __u32 op_suppgids[2];
- __u32 op_fsuid;
- __u32 op_fsgid;
- cfs_cap_t op_cap;
- void *op_data;
- size_t op_data_size;
-
- /* iattr fields and blocks. */
- struct iattr op_attr;
- unsigned int op_attr_flags;
- __u64 op_valid;
- loff_t op_attr_blocks;
-
- __u32 op_flags;
-
- /* Various operation flags. */
- enum mds_op_bias op_bias;
-
- /* Used by readdir */
- __u64 op_offset;
-
- /* Used by readdir */
- __u32 op_max_pages;
-
- /* used to transfer info between the stacks of MD client
- * see enum op_cli_flags
- */
- enum md_cli_flags op_cli_flags;
-
- /* File object data version for HSM release, on client */
- __u64 op_data_version;
- struct lustre_handle op_lease_handle;
-
- /* default stripe offset */
- __u32 op_default_stripe_offset;
-};
-
-struct md_callback {
- int (*md_blocking_ast)(struct ldlm_lock *lock,
- struct ldlm_lock_desc *desc,
- void *data, int flag);
-};
-
-struct md_enqueue_info;
-/* metadata stat-ahead */
-
-struct md_enqueue_info {
- struct md_op_data mi_data;
- struct lookup_intent mi_it;
- struct lustre_handle mi_lockh;
- struct inode *mi_dir;
- struct ldlm_enqueue_info mi_einfo;
- int (*mi_cb)(struct ptlrpc_request *req,
- struct md_enqueue_info *minfo, int rc);
- void *mi_cbdata;
-};
-
-struct obd_ops {
- struct module *owner;
- int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void __user *uarg);
- int (*get_info)(const struct lu_env *env, struct obd_export *,
- __u32 keylen, void *key, __u32 *vallen, void *val);
- int (*set_info_async)(const struct lu_env *, struct obd_export *,
- __u32 keylen, void *key,
- __u32 vallen, void *val,
- struct ptlrpc_request_set *set);
- int (*setup)(struct obd_device *dev, struct lustre_cfg *cfg);
- int (*precleanup)(struct obd_device *dev);
- int (*cleanup)(struct obd_device *dev);
- int (*process_config)(struct obd_device *dev, u32 len, void *data);
- int (*postrecov)(struct obd_device *dev);
- int (*add_conn)(struct obd_import *imp, struct obd_uuid *uuid,
- int priority);
- int (*del_conn)(struct obd_import *imp, struct obd_uuid *uuid);
- /* connect to the target device with given connection
- * data. @ocd->ocd_connect_flags is modified to reflect flags actually
- * granted by the target, which are guaranteed to be a subset of flags
- * asked for. If @ocd == NULL, use default parameters.
- */
- int (*connect)(const struct lu_env *env,
- struct obd_export **exp, struct obd_device *src,
- struct obd_uuid *cluuid, struct obd_connect_data *ocd,
- void *localdata);
- int (*reconnect)(const struct lu_env *env,
- struct obd_export *exp, struct obd_device *src,
- struct obd_uuid *cluuid,
- struct obd_connect_data *ocd,
- void *localdata);
- int (*disconnect)(struct obd_export *exp);
-
- /* Initialize/finalize fids infrastructure. */
- int (*fid_init)(struct obd_device *obd,
- struct obd_export *exp, enum lu_cli_type type);
- int (*fid_fini)(struct obd_device *obd);
-
- /* Allocate new fid according to passed @hint. */
- int (*fid_alloc)(const struct lu_env *env, struct obd_export *exp,
- struct lu_fid *fid, struct md_op_data *op_data);
-
- /*
- * Object with @fid is getting deleted, we may want to do something
- * about this.
- */
- int (*statfs)(const struct lu_env *, struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age, __u32 flags);
- int (*statfs_async)(struct obd_export *exp, struct obd_info *oinfo,
- __u64 max_age, struct ptlrpc_request_set *set);
- int (*create)(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa);
- int (*destroy)(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa);
- int (*setattr)(const struct lu_env *, struct obd_export *exp,
- struct obdo *oa);
- int (*getattr)(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa);
- int (*preprw)(const struct lu_env *env, int cmd,
- struct obd_export *exp, struct obdo *oa, int objcount,
- struct obd_ioobj *obj, struct niobuf_remote *remote,
- int *nr_pages, struct niobuf_local *local);
- int (*commitrw)(const struct lu_env *env, int cmd,
- struct obd_export *exp, struct obdo *oa,
- int objcount, struct obd_ioobj *obj,
- struct niobuf_remote *remote, int pages,
- struct niobuf_local *local, int rc);
- int (*init_export)(struct obd_export *exp);
- int (*destroy_export)(struct obd_export *exp);
-
- /* metadata-only methods */
- int (*import_event)(struct obd_device *, struct obd_import *,
- enum obd_import_event);
-
- int (*notify)(struct obd_device *obd, struct obd_device *watched,
- enum obd_notify_event ev, void *data);
-
- int (*health_check)(const struct lu_env *env, struct obd_device *);
- struct obd_uuid *(*get_uuid)(struct obd_export *exp);
-
- /* quota methods */
- int (*quotactl)(struct obd_device *, struct obd_export *,
- struct obd_quotactl *);
-
- /* pools methods */
- int (*pool_new)(struct obd_device *obd, char *poolname);
- int (*pool_del)(struct obd_device *obd, char *poolname);
- int (*pool_add)(struct obd_device *obd, char *poolname,
- char *ostname);
- int (*pool_rem)(struct obd_device *obd, char *poolname,
- char *ostname);
- void (*getref)(struct obd_device *obd);
- void (*putref)(struct obd_device *obd);
- /*
- * NOTE: If adding ops, add another LPROCFS_OBD_OP_INIT() line
- * to lprocfs_alloc_obd_stats() in obdclass/lprocfs_status.c.
- * Also, add a wrapper function in include/linux/obd_class.h.
- */
-};
-
-/* lmv structures */
-struct lustre_md {
- struct mdt_body *body;
- struct lu_buf layout;
- struct lmv_stripe_md *lmv;
-#ifdef CONFIG_FS_POSIX_ACL
- struct posix_acl *posix_acl;
-#endif
- struct mdt_remote_perm *remote_perm;
-};
-
-struct md_open_data {
- struct obd_client_handle *mod_och;
- struct ptlrpc_request *mod_open_req;
- struct ptlrpc_request *mod_close_req;
- atomic_t mod_refcount;
- bool mod_is_create;
-};
-
-struct obd_client_handle {
- struct lustre_handle och_fh;
- struct lu_fid och_fid;
- struct md_open_data *och_mod;
- struct lustre_handle och_lease_handle; /* open lock for lease */
- __u32 och_magic;
- fmode_t och_flags;
-};
-
-#define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
-
-struct lookup_intent;
-struct cl_attr;
-
-struct md_ops {
- int (*getstatus)(struct obd_export *, struct lu_fid *);
- int (*null_inode)(struct obd_export *, const struct lu_fid *);
- int (*close)(struct obd_export *, struct md_op_data *,
- struct md_open_data *, struct ptlrpc_request **);
- int (*create)(struct obd_export *, struct md_op_data *,
- const void *, size_t, umode_t, uid_t, gid_t,
- cfs_cap_t, __u64, struct ptlrpc_request **);
- int (*enqueue)(struct obd_export *, struct ldlm_enqueue_info *,
- const union ldlm_policy_data *,
- struct lookup_intent *, struct md_op_data *,
- struct lustre_handle *, __u64);
- int (*getattr)(struct obd_export *, struct md_op_data *,
- struct ptlrpc_request **);
- int (*getattr_name)(struct obd_export *, struct md_op_data *,
- struct ptlrpc_request **);
- int (*intent_lock)(struct obd_export *, struct md_op_data *,
- struct lookup_intent *,
- struct ptlrpc_request **,
- ldlm_blocking_callback, __u64);
- int (*link)(struct obd_export *, struct md_op_data *,
- struct ptlrpc_request **);
- int (*rename)(struct obd_export *, struct md_op_data *,
- const char *, size_t, const char *, size_t,
- struct ptlrpc_request **);
- int (*setattr)(struct obd_export *, struct md_op_data *, void *,
- size_t, struct ptlrpc_request **);
- int (*sync)(struct obd_export *, const struct lu_fid *,
- struct ptlrpc_request **);
- int (*read_page)(struct obd_export *, struct md_op_data *,
- struct md_callback *cb_op, __u64 hash_offset,
- struct page **ppage);
- int (*unlink)(struct obd_export *, struct md_op_data *,
- struct ptlrpc_request **);
-
- int (*setxattr)(struct obd_export *, const struct lu_fid *,
- u64, const char *, const char *, int, int, int, __u32,
- struct ptlrpc_request **);
-
- int (*getxattr)(struct obd_export *, const struct lu_fid *,
- u64, const char *, const char *, int, int, int,
- struct ptlrpc_request **);
-
- int (*init_ea_size)(struct obd_export *, u32, u32);
-
- int (*get_lustre_md)(struct obd_export *, struct ptlrpc_request *,
- struct obd_export *, struct obd_export *,
- struct lustre_md *);
-
- int (*free_lustre_md)(struct obd_export *, struct lustre_md *);
-
- int (*merge_attr)(struct obd_export *,
- const struct lmv_stripe_md *lsm,
- struct cl_attr *attr, ldlm_blocking_callback);
-
- int (*set_open_replay_data)(struct obd_export *,
- struct obd_client_handle *,
- struct lookup_intent *);
- int (*clear_open_replay_data)(struct obd_export *,
- struct obd_client_handle *);
- int (*set_lock_data)(struct obd_export *, const struct lustre_handle *,
- void *, __u64 *);
-
- enum ldlm_mode (*lock_match)(struct obd_export *, __u64,
- const struct lu_fid *, enum ldlm_type,
- union ldlm_policy_data *, enum ldlm_mode,
- struct lustre_handle *);
-
- int (*cancel_unused)(struct obd_export *, const struct lu_fid *,
- union ldlm_policy_data *, enum ldlm_mode,
- enum ldlm_cancel_flags flags, void *opaque);
-
- int (*get_fid_from_lsm)(struct obd_export *,
- const struct lmv_stripe_md *,
- const char *name, int namelen,
- struct lu_fid *fid);
-
- int (*intent_getattr_async)(struct obd_export *,
- struct md_enqueue_info *);
-
- int (*revalidate_lock)(struct obd_export *, struct lookup_intent *,
- struct lu_fid *, __u64 *bits);
-
- int (*unpackmd)(struct obd_export *exp, struct lmv_stripe_md **plsm,
- const union lmv_mds_md *lmv, size_t lmv_size);
- /*
- * NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to
- * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
- * wrapper function in include/linux/obd_class.h.
- */
-};
-
-static inline struct md_open_data *obd_mod_alloc(void)
-{
- struct md_open_data *mod;
-
- mod = kzalloc(sizeof(*mod), GFP_NOFS);
- if (!mod)
- return NULL;
- atomic_set(&mod->mod_refcount, 1);
- return mod;
-}
-
-#define obd_mod_get(mod) atomic_inc(&(mod)->mod_refcount)
-#define obd_mod_put(mod) \
-({ \
- if (atomic_dec_and_test(&(mod)->mod_refcount)) { \
- if ((mod)->mod_open_req) \
- ptlrpc_req_finished((mod)->mod_open_req); \
- kfree(mod); \
- } \
-})
-
-void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid);
-void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent);
-
-/* return 1 if client should be resend request */
-static inline int client_should_resend(int resend, struct client_obd *cli)
-{
- return atomic_read(&cli->cl_resends) ?
- atomic_read(&cli->cl_resends) > resend : 1;
-}
-
-/**
- * Return device name for this device
- *
- * XXX: lu_device is declared before obd_device, while a pointer pointing
- * back to obd_device in lu_device, so this helper function defines here
- * instead of in lu_object.h
- */
-static inline const char *lu_dev_name(const struct lu_device *lu_dev)
-{
- return lu_dev->ld_obd->obd_name;
-}
-
-static inline bool filename_is_volatile(const char *name, size_t namelen,
- int *idx)
-{
- const char *start;
- char *end;
-
- if (strncmp(name, LUSTRE_VOLATILE_HDR, LUSTRE_VOLATILE_HDR_LEN) != 0)
- return false;
-
- /* caller does not care of idx */
- if (!idx)
- return true;
-
- /* volatile file, the MDT can be set from name */
- /* name format is LUSTRE_VOLATILE_HDR:[idx]: */
- /* if no MDT is specified, use std way */
- if (namelen < LUSTRE_VOLATILE_HDR_LEN + 2)
- goto bad_format;
- /* test for no MDT idx case */
- if ((*(name + LUSTRE_VOLATILE_HDR_LEN) == ':') &&
- (*(name + LUSTRE_VOLATILE_HDR_LEN + 1) == ':')) {
- *idx = -1;
- return true;
- }
- /* we have an idx, read it */
- start = name + LUSTRE_VOLATILE_HDR_LEN + 1;
- *idx = simple_strtoul(start, &end, 0);
- /* error cases:
- * no digit, no trailing :, negative value
- */
- if (((*idx == 0) && (end == start)) ||
- (*end != ':') || (*idx < 0))
- goto bad_format;
-
- return true;
-bad_format:
- /* bad format of mdt idx, we cannot return an error
- * to caller so we use hash algo
- */
- CERROR("Bad volatile file name format: %s\n",
- name + LUSTRE_VOLATILE_HDR_LEN);
- return false;
-}
-
-static inline int cli_brw_size(struct obd_device *obd)
-{
- return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
-}
-
-/*
- * when RPC size or the max RPCs in flight is increased, the max dirty pages
- * of the client should be increased accordingly to avoid sending fragmented
- * RPCs over the network when the client runs out of the maximum dirty space
- * when so many RPCs are being generated.
- */
-static inline void client_adjust_max_dirty(struct client_obd *cli)
-{
- /* initializing */
- if (cli->cl_dirty_max_pages <= 0)
- cli->cl_dirty_max_pages =
- (OSC_MAX_DIRTY_DEFAULT * 1024 * 1024) >> PAGE_SHIFT;
- else {
- unsigned long dirty_max = cli->cl_max_rpcs_in_flight *
- cli->cl_max_pages_per_rpc;
-
- if (dirty_max > cli->cl_dirty_max_pages)
- cli->cl_dirty_max_pages = dirty_max;
- }
-
- if (cli->cl_dirty_max_pages > totalram_pages / 8)
- cli->cl_dirty_max_pages = totalram_pages / 8;
-}
-
-#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
deleted file mode 100644
index e5f7bb20415d..000000000000
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ /dev/null
@@ -1,153 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __OBD_CKSUM
-#define __OBD_CKSUM
-#include <linux/libcfs/libcfs.h>
-#include <linux/libcfs/libcfs_crypto.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-
-static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type)
-{
- switch (cksum_type) {
- case OBD_CKSUM_CRC32:
- return CFS_HASH_ALG_CRC32;
- case OBD_CKSUM_ADLER:
- return CFS_HASH_ALG_ADLER32;
- case OBD_CKSUM_CRC32C:
- return CFS_HASH_ALG_CRC32C;
- default:
- CERROR("Unknown checksum type (%x)!!!\n", cksum_type);
- LBUG();
- }
- return 0;
-}
-
-/* The OBD_FL_CKSUM_* flags is packed into 5 bits of o_flags, since there can
- * only be a single checksum type per RPC.
- *
- * The OBD_CHECKSUM_* type bits passed in ocd_cksum_types are a 32-bit bitmask
- * since they need to represent the full range of checksum algorithms that
- * both the client and server can understand.
- *
- * In case of an unsupported types/flags we fall back to ADLER
- * because that is supported by all clients since 1.8
- *
- * In case multiple algorithms are supported the best one is used.
- */
-static inline u32 cksum_type_pack(enum cksum_type cksum_type)
-{
- unsigned int performance = 0, tmp;
- u32 flag = OBD_FL_CKSUM_ADLER;
-
- if (cksum_type & OBD_CKSUM_CRC32) {
- tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32));
- if (tmp > performance) {
- performance = tmp;
- flag = OBD_FL_CKSUM_CRC32;
- }
- }
- if (cksum_type & OBD_CKSUM_CRC32C) {
- tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C));
- if (tmp > performance) {
- performance = tmp;
- flag = OBD_FL_CKSUM_CRC32C;
- }
- }
- if (cksum_type & OBD_CKSUM_ADLER) {
- tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER));
- if (tmp > performance) {
- performance = tmp;
- flag = OBD_FL_CKSUM_ADLER;
- }
- }
- if (unlikely(cksum_type && !(cksum_type & (OBD_CKSUM_CRC32C |
- OBD_CKSUM_CRC32 |
- OBD_CKSUM_ADLER))))
- CWARN("unknown cksum type %x\n", cksum_type);
-
- return flag;
-}
-
-static inline enum cksum_type cksum_type_unpack(u32 o_flags)
-{
- switch (o_flags & OBD_FL_CKSUM_ALL) {
- case OBD_FL_CKSUM_CRC32C:
- return OBD_CKSUM_CRC32C;
- case OBD_FL_CKSUM_CRC32:
- return OBD_CKSUM_CRC32;
- default:
- break;
- }
-
- return OBD_CKSUM_ADLER;
-}
-
-/* Return a bitmask of the checksum types supported on this system.
- * 1.8 supported ADLER it is base and not depend on hw
- * Client uses all available local algos
- */
-static inline enum cksum_type cksum_types_supported_client(void)
-{
- enum cksum_type ret = OBD_CKSUM_ADLER;
-
- CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n",
- cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)),
- cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)),
- cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER)));
-
- if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)) > 0)
- ret |= OBD_CKSUM_CRC32C;
- if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)) > 0)
- ret |= OBD_CKSUM_CRC32;
-
- return ret;
-}
-
-/* Select the best checksum algorithm among those supplied in the cksum_types
- * input.
- *
- * Currently, calling cksum_type_pack() with a mask will return the fastest
- * checksum type due to its benchmarking at libcfs module load.
- * Caution is advised, however, since what is fastest on a single client may
- * not be the fastest or most efficient algorithm on the server.
- */
-static inline enum cksum_type cksum_type_select(enum cksum_type cksum_types)
-{
- return cksum_type_unpack(cksum_type_pack(cksum_types));
-}
-
-/* Checksum algorithm names. Must be defined in the same order as the
- * OBD_CKSUM_* flags.
- */
-#define DECLARE_CKSUM_NAME char *cksum_name[] = {"crc32", "adler", "crc32c"}
-
-#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
deleted file mode 100644
index f24dd74ffa09..000000000000
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ /dev/null
@@ -1,1607 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#ifndef __CLASS_OBD_H
-#define __CLASS_OBD_H
-
-#include <obd_support.h>
-#include <lustre_import.h>
-#include <lustre_net.h>
-#include <obd.h>
-#include <lustre_lib.h>
-#include <lprocfs_status.h>
-
-/* requests should be send without delay and resends for avoid deadlocks */
-#define OBD_STATFS_NODELAY 0x0001
-/* the statfs callback should not update obd_osfs_age */
-#define OBD_STATFS_FROM_CACHE 0x0002
-/* the statfs is only for retrieving information from MDT0 */
-#define OBD_STATFS_FOR_MDT0 0x0004
-
-/* OBD Device Declarations */
-extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
-extern rwlock_t obd_dev_lock;
-
-/* OBD Operations Declarations */
-struct obd_device *class_exp2obd(struct obd_export *exp);
-int class_handle_ioctl(unsigned int cmd, unsigned long arg);
-int lustre_get_jobid(char *jobid);
-
-struct lu_device_type;
-
-/* genops.c */
-extern struct list_head obd_types;
-struct obd_export *class_conn2export(struct lustre_handle *conn);
-int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
- const char *name, struct lu_device_type *ldt);
-int class_unregister_type(const char *name);
-
-struct obd_device *class_newdev(const char *type_name, const char *name);
-void class_release_dev(struct obd_device *obd);
-
-int class_name2dev(const char *name);
-struct obd_device *class_name2obd(const char *name);
-int class_uuid2dev(struct obd_uuid *uuid);
-struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
- const char *typ_name,
- struct obd_uuid *grp_uuid);
-struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid,
- int *next);
-struct obd_device *class_num2obd(int num);
-
-int class_notify_sptlrpc_conf(const char *fsname, int namelen);
-
-int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep);
-
-int obd_zombie_impexp_init(void);
-void obd_zombie_impexp_stop(void);
-void obd_zombie_barrier(void);
-
-int obd_get_request_slot(struct client_obd *cli);
-void obd_put_request_slot(struct client_obd *cli);
-__u32 obd_get_max_rpcs_in_flight(struct client_obd *cli);
-int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max);
-int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, u16 max);
-int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq);
-
-u16 obd_get_mod_rpc_slot(struct client_obd *cli, u32 opc,
- struct lookup_intent *it);
-void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc,
- struct lookup_intent *it, u16 tag);
-
-struct llog_handle;
-struct llog_rec_hdr;
-typedef int (*llog_cb_t)(const struct lu_env *, struct llog_handle *,
- struct llog_rec_hdr *, void *);
-
-/* obd_config.c */
-char *lustre_cfg_string(struct lustre_cfg *lcfg, u32 index);
-int class_process_config(struct lustre_cfg *lcfg);
-int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
- struct lustre_cfg *lcfg, void *data);
-
-/* For interoperability */
-struct cfg_interop_param {
- char *old_param;
- char *new_param;
-};
-
-int class_find_param(char *buf, char *key, char **valp);
-struct cfg_interop_param *class_find_old_param(const char *param,
- struct cfg_interop_param *ptr);
-int class_get_next_param(char **params, char *copy);
-int class_parse_nid(char *buf, lnet_nid_t *nid, char **endh);
-int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh);
-int class_parse_net(char *buf, u32 *net, char **endh);
-int class_match_nid(char *buf, char *key, lnet_nid_t nid);
-int class_match_net(char *buf, char *key, u32 net);
-
-struct obd_device *class_incref(struct obd_device *obd,
- const char *scope, const void *source);
-void class_decref(struct obd_device *obd,
- const char *scope, const void *source);
-int class_config_llog_handler(const struct lu_env *env,
- struct llog_handle *handle,
- struct llog_rec_hdr *rec, void *data);
-int class_add_uuid(const char *uuid, __u64 nid);
-
-/* obdecho */
-void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars);
-
-#define CFG_F_START 0x01 /* Set when we start updating from a log */
-#define CFG_F_MARKER 0x02 /* We are within a maker */
-#define CFG_F_SKIP 0x04 /* We should ignore this cfg command */
-#define CFG_F_COMPAT146 0x08 /* Allow old-style logs */
-#define CFG_F_EXCLUDE 0x10 /* OST exclusion list */
-
-/* Passed as data param to class_config_parse_llog */
-struct config_llog_instance {
- char *cfg_obdname;
- void *cfg_instance;
- struct super_block *cfg_sb;
- struct obd_uuid cfg_uuid;
- llog_cb_t cfg_callback;
- int cfg_last_idx; /* for partial llog processing */
- int cfg_flags;
-};
-
-int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
- char *name, struct config_llog_instance *cfg);
-enum {
- CONFIG_T_CONFIG = 0,
- CONFIG_T_SPTLRPC = 1,
- CONFIG_T_RECOVER = 2,
- CONFIG_T_PARAMS = 3,
- CONFIG_T_MAX = 4
-};
-
-#define PARAMS_FILENAME "params"
-#define LCTL_UPCALL "lctl"
-
-/* list of active configuration logs */
-struct config_llog_data {
- struct ldlm_res_id cld_resid;
- struct config_llog_instance cld_cfg;
- struct list_head cld_list_chain;
- atomic_t cld_refcount;
- struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */
- struct config_llog_data *cld_params; /* common parameters log */
- struct config_llog_data *cld_recover;/* imperative recover log */
- struct obd_export *cld_mgcexp;
- struct mutex cld_lock;
- int cld_type;
- unsigned int cld_stopping:1, /*
- * we were told to stop
- * watching
- */
- cld_lostlock:1; /* lock not requeued */
- char cld_logname[0];
-};
-
-struct lustre_profile {
- struct list_head lp_list;
- char *lp_profile;
- char *lp_dt;
- char *lp_md;
- int lp_refs;
- bool lp_list_deleted;
-};
-
-struct lustre_profile *class_get_profile(const char *prof);
-void class_del_profile(const char *prof);
-void class_put_profile(struct lustre_profile *lprof);
-void class_del_profiles(void);
-
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
-
-void __class_export_add_lock_ref(struct obd_export *exp,
- struct ldlm_lock *lock);
-void __class_export_del_lock_ref(struct obd_export *exp,
- struct ldlm_lock *lock);
-extern void (*class_export_dump_hook)(struct obd_export *exp);
-
-#else
-
-#define __class_export_add_lock_ref(exp, lock) do {} while (0)
-#define __class_export_del_lock_ref(exp, lock) do {} while (0)
-
-#endif
-
-/* genops.c */
-struct obd_export *class_export_get(struct obd_export *exp);
-void class_export_put(struct obd_export *exp);
-struct obd_export *class_new_export(struct obd_device *obddev,
- struct obd_uuid *cluuid);
-void class_unlink_export(struct obd_export *exp);
-
-struct obd_import *class_import_get(struct obd_import *imp);
-void class_import_put(struct obd_import *imp);
-struct obd_import *class_new_import(struct obd_device *obd);
-void class_destroy_import(struct obd_import *exp);
-
-void class_put_type(struct obd_type *type);
-int class_connect(struct lustre_handle *conn, struct obd_device *obd,
- struct obd_uuid *cluuid);
-int class_disconnect(struct obd_export *exp);
-void class_fail_export(struct obd_export *exp);
-int class_manual_cleanup(struct obd_device *obd);
-
-static inline void class_export_rpc_inc(struct obd_export *exp)
-{
- atomic_inc(&(exp)->exp_rpc_count);
- CDEBUG(D_INFO, "RPC GETting export %p : new rpc_count %d\n",
- (exp), atomic_read(&(exp)->exp_rpc_count));
-}
-
-static inline void class_export_rpc_dec(struct obd_export *exp)
-{
- LASSERT_ATOMIC_POS(&exp->exp_rpc_count);
- atomic_dec(&(exp)->exp_rpc_count);
- CDEBUG(D_INFO, "RPC PUTting export %p : new rpc_count %d\n",
- (exp), atomic_read(&(exp)->exp_rpc_count));
-}
-
-static inline struct obd_export *class_export_lock_get(struct obd_export *exp,
- struct ldlm_lock *lock)
-{
- atomic_inc(&(exp)->exp_locks_count);
- __class_export_add_lock_ref(exp, lock);
- CDEBUG(D_INFO, "lock GETting export %p : new locks_count %d\n",
- (exp), atomic_read(&(exp)->exp_locks_count));
- return class_export_get(exp);
-}
-
-static inline void class_export_lock_put(struct obd_export *exp,
- struct ldlm_lock *lock)
-{
- LASSERT_ATOMIC_POS(&exp->exp_locks_count);
- atomic_dec(&(exp)->exp_locks_count);
- __class_export_del_lock_ref(exp, lock);
- CDEBUG(D_INFO, "lock PUTting export %p : new locks_count %d\n",
- (exp), atomic_read(&(exp)->exp_locks_count));
- class_export_put(exp);
-}
-
-static inline enum obd_option exp_flags_from_obd(struct obd_device *obd)
-{
- return ((obd->obd_fail ? OBD_OPT_FAILOVER : 0) |
- (obd->obd_force ? OBD_OPT_FORCE : 0) |
- 0);
-}
-
-static inline int lprocfs_climp_check(struct obd_device *obd)
-{
- down_read(&(obd)->u.cli.cl_sem);
- if (!(obd)->u.cli.cl_import) {
- up_read(&(obd)->u.cli.cl_sem);
- return -ENODEV;
- }
- return 0;
-}
-
-struct inode;
-struct lu_attr;
-struct obdo;
-
-void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj);
-
-#define OBT(dev) ((dev)->obd_type)
-#define OBP(dev, op) ((dev)->obd_type->typ_dt_ops->op)
-#define MDP(dev, op) ((dev)->obd_type->typ_md_ops->op)
-#define CTXTP(ctxt, op) ((ctxt)->loc_logops->lop_##op)
-
-/*
- * Ensure obd_setup: used for cleanup which must be called
- * while obd is stopping
- */
-static inline int obd_check_dev(struct obd_device *obd)
-{
- if (!obd) {
- CERROR("NULL device\n");
- return -ENODEV;
- }
- return 0;
-}
-
-/* ensure obd_setup and !obd_stopping */
-static inline int obd_check_dev_active(struct obd_device *obd)
-{
- int rc;
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
- if (!obd->obd_set_up || obd->obd_stopping) {
- CERROR("Device %d not setup\n", obd->obd_minor);
- return -ENODEV;
- }
- return rc;
-}
-
-#define OBD_COUNTER_OFFSET(op) \
- ((offsetof(struct obd_ops, op) - \
- offsetof(struct obd_ops, iocontrol)) \
- / sizeof(((struct obd_ops *)(0))->iocontrol))
-
-#define OBD_COUNTER_INCREMENT(obdx, op) \
-do { \
- if ((obdx)->obd_stats) { \
- unsigned int coffset; \
- coffset = (unsigned int)((obdx)->obd_cntr_base) + \
- OBD_COUNTER_OFFSET(op); \
- LASSERT(coffset < (obdx)->obd_stats->ls_num); \
- lprocfs_counter_incr((obdx)->obd_stats, coffset); \
- } \
-} while (0)
-
-#define EXP_COUNTER_INCREMENT(export, op) \
-do { \
- if ((export)->exp_obd->obd_stats) { \
- unsigned int coffset; \
- coffset = (unsigned int)((export)->exp_obd->obd_cntr_base) + \
- OBD_COUNTER_OFFSET(op); \
- LASSERT(coffset < (export)->exp_obd->obd_stats->ls_num); \
- lprocfs_counter_incr((export)->exp_obd->obd_stats, coffset); \
- } \
-} while (0)
-
-#define MD_COUNTER_OFFSET(op) \
- ((offsetof(struct md_ops, op) - \
- offsetof(struct md_ops, getstatus)) \
- / sizeof(((struct md_ops *)(0))->getstatus))
-
-#define MD_COUNTER_INCREMENT(obdx, op) \
-do { \
- if ((obd)->md_stats) { \
- unsigned int coffset; \
- coffset = (unsigned int)((obdx)->md_cntr_base) + \
- MD_COUNTER_OFFSET(op); \
- LASSERT(coffset < (obdx)->md_stats->ls_num); \
- lprocfs_counter_incr((obdx)->md_stats, coffset); \
- } \
-} while (0)
-
-#define EXP_MD_COUNTER_INCREMENT(export, op) \
-do { \
- if ((export)->exp_obd->obd_stats) { \
- unsigned int coffset; \
- coffset = (unsigned int)((export)->exp_obd->md_cntr_base) + \
- MD_COUNTER_OFFSET(op); \
- LASSERT(coffset < (export)->exp_obd->md_stats->ls_num); \
- lprocfs_counter_incr((export)->exp_obd->md_stats, coffset); \
- if ((export)->exp_md_stats) \
- lprocfs_counter_incr( \
- (export)->exp_md_stats, coffset); \
- } \
-} while (0)
-
-#define EXP_CHECK_MD_OP(exp, op) \
-do { \
- if (!(exp)) { \
- CERROR("obd_" #op ": NULL export\n"); \
- return -ENODEV; \
- } \
- if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \
- CERROR("obd_" #op ": cleaned up obd\n"); \
- return -EOPNOTSUPP; \
- } \
- if (!OBT((exp)->exp_obd) || !MDP((exp)->exp_obd, op)) { \
- CERROR("obd_" #op ": dev %s/%d no operation\n", \
- (exp)->exp_obd->obd_name, \
- (exp)->exp_obd->obd_minor); \
- return -EOPNOTSUPP; \
- } \
-} while (0)
-
-#define OBD_CHECK_DT_OP(obd, op, err) \
-do { \
- if (!OBT(obd) || !OBP((obd), op)) { \
- if (err) \
- CERROR("obd_" #op ": dev %d no operation\n", \
- obd->obd_minor); \
- return err; \
- } \
-} while (0)
-
-#define EXP_CHECK_DT_OP(exp, op) \
-do { \
- if (!(exp)) { \
- CERROR("obd_" #op ": NULL export\n"); \
- return -ENODEV; \
- } \
- if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \
- CERROR("obd_" #op ": cleaned up obd\n"); \
- return -EOPNOTSUPP; \
- } \
- if (!OBT((exp)->exp_obd) || !OBP((exp)->exp_obd, op)) { \
- CERROR("obd_" #op ": dev %d no operation\n", \
- (exp)->exp_obd->obd_minor); \
- return -EOPNOTSUPP; \
- } \
-} while (0)
-
-#define CTXT_CHECK_OP(ctxt, op, err) \
-do { \
- if (!OBT(ctxt->loc_obd) || !CTXTP((ctxt), op)) { \
- if (err) \
- CERROR("lop_" #op ": dev %d no operation\n", \
- ctxt->loc_obd->obd_minor); \
- return err; \
- } \
-} while (0)
-
-static inline int class_devno_max(void)
-{
- return MAX_OBD_DEVICES;
-}
-
-static inline int obd_get_info(const struct lu_env *env,
- struct obd_export *exp, __u32 keylen,
- void *key, __u32 *vallen, void *val)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, get_info);
- EXP_COUNTER_INCREMENT(exp, get_info);
-
- rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val);
- return rc;
-}
-
-static inline int obd_set_info_async(const struct lu_env *env,
- struct obd_export *exp, u32 keylen,
- void *key, u32 vallen, void *val,
- struct ptlrpc_request_set *set)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, set_info_async);
- EXP_COUNTER_INCREMENT(exp, set_info_async);
-
- rc = OBP(exp->exp_obd, set_info_async)(env, exp, keylen, key, vallen,
- val, set);
- return rc;
-}
-
-/*
- * obd-lu integration.
- *
- * Functionality is being moved into new lu_device-based layering, but some
- * pieces of configuration process are still based on obd devices.
- *
- * Specifically, lu_device_type_operations::ldto_device_alloc() methods fully
- * subsume ->o_setup() methods of obd devices they replace. The same for
- * lu_device_operations::ldo_process_config() and ->o_process_config(). As a
- * result, obd_setup() and obd_process_config() branch and call one XOR
- * another.
- *
- * Yet neither lu_device_type_operations::ldto_device_fini() nor
- * lu_device_type_operations::ldto_device_free() fully implement the
- * functionality of ->o_precleanup() and ->o_cleanup() they override. Hence,
- * obd_precleanup() and obd_cleanup() call both lu_device and obd operations.
- */
-
-static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
-{
- int rc;
- struct lu_device_type *ldt;
- struct lu_device *d;
-
- ldt = obd->obd_type->typ_lu;
- if (ldt) {
- struct lu_context session_ctx;
- struct lu_env env;
-
- lu_context_init(&session_ctx, LCT_SESSION | LCT_SERVER_SESSION);
- session_ctx.lc_thread = NULL;
- lu_context_enter(&session_ctx);
-
- rc = lu_env_init(&env, ldt->ldt_ctx_tags);
- if (rc == 0) {
- env.le_ses = &session_ctx;
- d = ldt->ldt_ops->ldto_device_alloc(&env, ldt, cfg);
- lu_env_fini(&env);
- if (!IS_ERR(d)) {
- obd->obd_lu_dev = d;
- d->ld_obd = obd;
- rc = 0;
- } else {
- rc = PTR_ERR(d);
- }
- }
- lu_context_exit(&session_ctx);
- lu_context_fini(&session_ctx);
-
- } else {
- OBD_CHECK_DT_OP(obd, setup, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, setup);
- rc = OBP(obd, setup)(obd, cfg);
- }
- return rc;
-}
-
-static inline int obd_precleanup(struct obd_device *obd)
-{
- int rc;
- struct lu_device_type *ldt;
- struct lu_device *d;
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
- ldt = obd->obd_type->typ_lu;
- d = obd->obd_lu_dev;
- if (ldt && d) {
- struct lu_env env;
-
- rc = lu_env_init(&env, ldt->ldt_ctx_tags);
- if (!rc) {
- ldt->ldt_ops->ldto_device_fini(&env, d);
- lu_env_fini(&env);
- }
- }
- OBD_CHECK_DT_OP(obd, precleanup, 0);
- OBD_COUNTER_INCREMENT(obd, precleanup);
-
- rc = OBP(obd, precleanup)(obd);
- return rc;
-}
-
-static inline int obd_cleanup(struct obd_device *obd)
-{
- int rc;
- struct lu_device_type *ldt;
- struct lu_device *d;
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
-
- ldt = obd->obd_type->typ_lu;
- d = obd->obd_lu_dev;
- if (ldt && d) {
- struct lu_env env;
-
- rc = lu_env_init(&env, ldt->ldt_ctx_tags);
- if (rc == 0) {
- ldt->ldt_ops->ldto_device_free(&env, d);
- lu_env_fini(&env);
- obd->obd_lu_dev = NULL;
- }
- }
- OBD_CHECK_DT_OP(obd, cleanup, 0);
- OBD_COUNTER_INCREMENT(obd, cleanup);
-
- rc = OBP(obd, cleanup)(obd);
- return rc;
-}
-
-static inline void obd_cleanup_client_import(struct obd_device *obd)
-{
- /*
- * If we set up but never connected, the
- * client import will not have been cleaned.
- */
- down_write(&obd->u.cli.cl_sem);
- if (obd->u.cli.cl_import) {
- struct obd_import *imp;
-
- imp = obd->u.cli.cl_import;
- CDEBUG(D_CONFIG, "%s: client import never connected\n",
- obd->obd_name);
- ptlrpc_invalidate_import(imp);
- client_destroy_import(imp);
- obd->u.cli.cl_import = NULL;
- }
- up_write(&obd->u.cli.cl_sem);
-}
-
-static inline int
-obd_process_config(struct obd_device *obd, int datalen, void *data)
-{
- int rc;
- struct lu_device_type *ldt;
- struct lu_device *d;
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
-
- obd->obd_process_conf = 1;
- ldt = obd->obd_type->typ_lu;
- d = obd->obd_lu_dev;
- if (ldt && d) {
- struct lu_env env;
-
- rc = lu_env_init(&env, ldt->ldt_ctx_tags);
- if (rc == 0) {
- rc = d->ld_ops->ldo_process_config(&env, d, data);
- lu_env_fini(&env);
- }
- } else {
- OBD_CHECK_DT_OP(obd, process_config, -EOPNOTSUPP);
- rc = OBP(obd, process_config)(obd, datalen, data);
- }
- OBD_COUNTER_INCREMENT(obd, process_config);
- obd->obd_process_conf = 0;
-
- return rc;
-}
-
-static inline int obd_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *obdo)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, create);
- EXP_COUNTER_INCREMENT(exp, create);
-
- rc = OBP(exp->exp_obd, create)(env, exp, obdo);
- return rc;
-}
-
-static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *obdo)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, destroy);
- EXP_COUNTER_INCREMENT(exp, destroy);
-
- rc = OBP(exp->exp_obd, destroy)(env, exp, obdo);
- return rc;
-}
-
-static inline int obd_getattr(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, getattr);
- EXP_COUNTER_INCREMENT(exp, getattr);
-
- rc = OBP(exp->exp_obd, getattr)(env, exp, oa);
- return rc;
-}
-
-static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, setattr);
- EXP_COUNTER_INCREMENT(exp, setattr);
-
- rc = OBP(exp->exp_obd, setattr)(env, exp, oa);
- return rc;
-}
-
-static inline int obd_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
- int priority)
-{
- struct obd_device *obd = imp->imp_obd;
- int rc;
-
- rc = obd_check_dev_active(obd);
- if (rc)
- return rc;
- OBD_CHECK_DT_OP(obd, add_conn, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, add_conn);
-
- rc = OBP(obd, add_conn)(imp, uuid, priority);
- return rc;
-}
-
-static inline int obd_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
-{
- struct obd_device *obd = imp->imp_obd;
- int rc;
-
- rc = obd_check_dev_active(obd);
- if (rc)
- return rc;
- OBD_CHECK_DT_OP(obd, del_conn, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, del_conn);
-
- rc = OBP(obd, del_conn)(imp, uuid);
- return rc;
-}
-
-static inline struct obd_uuid *obd_get_uuid(struct obd_export *exp)
-{
- struct obd_uuid *uuid;
-
- OBD_CHECK_DT_OP(exp->exp_obd, get_uuid, NULL);
- EXP_COUNTER_INCREMENT(exp, get_uuid);
-
- uuid = OBP(exp->exp_obd, get_uuid)(exp);
- return uuid;
-}
-
-/*
- * Create a new /a exp on device /a obd for the uuid /a cluuid
- * @param exp New export handle
- * @param d Connect data, supported flags are set, flags also understood
- * by obd are returned.
- */
-static inline int obd_connect(const struct lu_env *env,
- struct obd_export **exp, struct obd_device *obd,
- struct obd_uuid *cluuid,
- struct obd_connect_data *data,
- void *localdata)
-{
- int rc;
- __u64 ocf = data ? data->ocd_connect_flags : 0; /*
- * for post-condition
- * check
- */
-
- rc = obd_check_dev_active(obd);
- if (rc)
- return rc;
- OBD_CHECK_DT_OP(obd, connect, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, connect);
-
- rc = OBP(obd, connect)(env, exp, obd, cluuid, data, localdata);
- /* check that only subset is granted */
- LASSERT(ergo(data, (data->ocd_connect_flags & ocf) ==
- data->ocd_connect_flags));
- return rc;
-}
-
-static inline int obd_reconnect(const struct lu_env *env,
- struct obd_export *exp,
- struct obd_device *obd,
- struct obd_uuid *cluuid,
- struct obd_connect_data *d,
- void *localdata)
-{
- int rc;
- __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition check */
-
- rc = obd_check_dev_active(obd);
- if (rc)
- return rc;
- OBD_CHECK_DT_OP(obd, reconnect, 0);
- OBD_COUNTER_INCREMENT(obd, reconnect);
-
- rc = OBP(obd, reconnect)(env, exp, obd, cluuid, d, localdata);
- /* check that only subset is granted */
- LASSERT(ergo(d, (d->ocd_connect_flags & ocf) == d->ocd_connect_flags));
- return rc;
-}
-
-static inline int obd_disconnect(struct obd_export *exp)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, disconnect);
- EXP_COUNTER_INCREMENT(exp, disconnect);
-
- rc = OBP(exp->exp_obd, disconnect)(exp);
- return rc;
-}
-
-static inline int obd_fid_init(struct obd_device *obd, struct obd_export *exp,
- enum lu_cli_type type)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, fid_init, 0);
- OBD_COUNTER_INCREMENT(obd, fid_init);
-
- rc = OBP(obd, fid_init)(obd, exp, type);
- return rc;
-}
-
-static inline int obd_fid_fini(struct obd_device *obd)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, fid_fini, 0);
- OBD_COUNTER_INCREMENT(obd, fid_fini);
-
- rc = OBP(obd, fid_fini)(obd);
- return rc;
-}
-
-static inline int obd_fid_alloc(const struct lu_env *env,
- struct obd_export *exp,
- struct lu_fid *fid,
- struct md_op_data *op_data)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, fid_alloc);
- EXP_COUNTER_INCREMENT(exp, fid_alloc);
-
- rc = OBP(exp->exp_obd, fid_alloc)(env, exp, fid, op_data);
- return rc;
-}
-
-static inline int obd_pool_new(struct obd_device *obd, char *poolname)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, pool_new, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, pool_new);
-
- rc = OBP(obd, pool_new)(obd, poolname);
- return rc;
-}
-
-static inline int obd_pool_del(struct obd_device *obd, char *poolname)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, pool_del, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, pool_del);
-
- rc = OBP(obd, pool_del)(obd, poolname);
- return rc;
-}
-
-static inline int obd_pool_add(struct obd_device *obd,
- char *poolname,
- char *ostname)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, pool_add, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, pool_add);
-
- rc = OBP(obd, pool_add)(obd, poolname, ostname);
- return rc;
-}
-
-static inline int obd_pool_rem(struct obd_device *obd,
- char *poolname,
- char *ostname)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, pool_rem, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, pool_rem);
-
- rc = OBP(obd, pool_rem)(obd, poolname, ostname);
- return rc;
-}
-
-static inline void obd_getref(struct obd_device *obd)
-{
- if (OBT(obd) && OBP(obd, getref)) {
- OBD_COUNTER_INCREMENT(obd, getref);
- OBP(obd, getref)(obd);
- }
-}
-
-static inline void obd_putref(struct obd_device *obd)
-{
- if (OBT(obd) && OBP(obd, putref)) {
- OBD_COUNTER_INCREMENT(obd, putref);
- OBP(obd, putref)(obd);
- }
-}
-
-static inline int obd_init_export(struct obd_export *exp)
-{
- int rc = 0;
-
- if ((exp)->exp_obd && OBT((exp)->exp_obd) &&
- OBP((exp)->exp_obd, init_export))
- rc = OBP(exp->exp_obd, init_export)(exp);
- return rc;
-}
-
-static inline int obd_destroy_export(struct obd_export *exp)
-{
- if ((exp)->exp_obd && OBT((exp)->exp_obd) &&
- OBP((exp)->exp_obd, destroy_export))
- OBP(exp->exp_obd, destroy_export)(exp);
- return 0;
-}
-
-/*
- * @max_age is the oldest time in jiffies that we accept using a cached data.
- * If the cache is older than @max_age we will get a new value from the
- * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness.
- */
-static inline int obd_statfs_async(struct obd_export *exp,
- struct obd_info *oinfo,
- __u64 max_age,
- struct ptlrpc_request_set *rqset)
-{
- int rc = 0;
- struct obd_device *obd;
-
- if (!exp || !exp->exp_obd)
- return -EINVAL;
-
- obd = exp->exp_obd;
- OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, statfs);
-
- CDEBUG(D_SUPER, "%s: osfs %p age %llu, max_age %llu\n",
- obd->obd_name, &obd->obd_osfs, obd->obd_osfs_age, max_age);
- if (cfs_time_before_64(obd->obd_osfs_age, max_age)) {
- rc = OBP(obd, statfs_async)(exp, oinfo, max_age, rqset);
- } else {
- CDEBUG(D_SUPER,
- "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
- obd->obd_name, &obd->obd_osfs,
- obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
- obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
- spin_lock(&obd->obd_osfs_lock);
- memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
- spin_unlock(&obd->obd_osfs_lock);
- oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
- if (oinfo->oi_cb_up)
- oinfo->oi_cb_up(oinfo, 0);
- }
- return rc;
-}
-
-static inline int obd_statfs_rqset(struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age,
- __u32 flags)
-{
- struct ptlrpc_request_set *set = NULL;
- struct obd_info oinfo = {
- .oi_osfs = osfs,
- .oi_flags = flags,
- };
- int rc = 0;
-
- set = ptlrpc_prep_set();
- if (!set)
- return -ENOMEM;
-
- rc = obd_statfs_async(exp, &oinfo, max_age, set);
- if (rc == 0)
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
- return rc;
-}
-
-/*
- * @max_age is the oldest time in jiffies that we accept using a cached data.
- * If the cache is older than @max_age we will get a new value from the
- * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness.
- */
-static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age,
- __u32 flags)
-{
- int rc = 0;
- struct obd_device *obd = exp->exp_obd;
-
- if (!obd)
- return -EINVAL;
-
- OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, statfs);
-
- CDEBUG(D_SUPER, "osfs %llu, max_age %llu\n",
- obd->obd_osfs_age, max_age);
- if (cfs_time_before_64(obd->obd_osfs_age, max_age)) {
- rc = OBP(obd, statfs)(env, exp, osfs, max_age, flags);
- if (rc == 0) {
- spin_lock(&obd->obd_osfs_lock);
- memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs));
- obd->obd_osfs_age = cfs_time_current_64();
- spin_unlock(&obd->obd_osfs_lock);
- }
- } else {
- CDEBUG(D_SUPER,
- "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
- obd->obd_name, &obd->obd_osfs,
- obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
- obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
- spin_lock(&obd->obd_osfs_lock);
- memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
- spin_unlock(&obd->obd_osfs_lock);
- }
- return rc;
-}
-
-static inline int obd_preprw(const struct lu_env *env, int cmd,
- struct obd_export *exp, struct obdo *oa,
- int objcount, struct obd_ioobj *obj,
- struct niobuf_remote *remote, int *pages,
- struct niobuf_local *local)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, preprw);
- EXP_COUNTER_INCREMENT(exp, preprw);
-
- rc = OBP(exp->exp_obd, preprw)(env, cmd, exp, oa, objcount, obj, remote,
- pages, local);
- return rc;
-}
-
-static inline int obd_commitrw(const struct lu_env *env, int cmd,
- struct obd_export *exp, struct obdo *oa,
- int objcount, struct obd_ioobj *obj,
- struct niobuf_remote *rnb, int pages,
- struct niobuf_local *local, int rc)
-{
- EXP_CHECK_DT_OP(exp, commitrw);
- EXP_COUNTER_INCREMENT(exp, commitrw);
-
- rc = OBP(exp->exp_obd, commitrw)(env, cmd, exp, oa, objcount, obj,
- rnb, pages, local, rc);
- return rc;
-}
-
-static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp,
- int len, void *karg, void __user *uarg)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, iocontrol);
- EXP_COUNTER_INCREMENT(exp, iocontrol);
-
- rc = OBP(exp->exp_obd, iocontrol)(cmd, exp, len, karg, uarg);
- return rc;
-}
-
-static inline void obd_import_event(struct obd_device *obd,
- struct obd_import *imp,
- enum obd_import_event event)
-{
- if (!obd) {
- CERROR("NULL device\n");
- return;
- }
- if (obd->obd_set_up && OBP(obd, import_event)) {
- OBD_COUNTER_INCREMENT(obd, import_event);
- OBP(obd, import_event)(obd, imp, event);
- }
-}
-
-static inline int obd_notify(struct obd_device *obd,
- struct obd_device *watched,
- enum obd_notify_event ev,
- void *data)
-{
- int rc;
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
-
- if (!obd->obd_set_up) {
- CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name);
- return -EINVAL;
- }
-
- if (!OBP(obd, notify)) {
- CDEBUG(D_HA, "obd %s has no notify handler\n", obd->obd_name);
- return -ENOSYS;
- }
-
- OBD_COUNTER_INCREMENT(obd, notify);
- rc = OBP(obd, notify)(obd, watched, ev, data);
- return rc;
-}
-
-static inline int obd_notify_observer(struct obd_device *observer,
- struct obd_device *observed,
- enum obd_notify_event ev,
- void *data)
-{
- int rc1;
- int rc2;
-
- struct obd_notify_upcall *onu;
-
- if (observer->obd_observer)
- rc1 = obd_notify(observer->obd_observer, observed, ev, data);
- else
- rc1 = 0;
- /*
- * Also, call non-obd listener, if any
- */
- onu = &observer->obd_upcall;
- if (onu->onu_upcall)
- rc2 = onu->onu_upcall(observer, observed, ev,
- onu->onu_owner, NULL);
- else
- rc2 = 0;
-
- return rc1 ? rc1 : rc2;
-}
-
-static inline int obd_quotactl(struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, quotactl);
- EXP_COUNTER_INCREMENT(exp, quotactl);
-
- rc = OBP(exp->exp_obd, quotactl)(exp->exp_obd, exp, oqctl);
- return rc;
-}
-
-static inline int obd_health_check(const struct lu_env *env,
- struct obd_device *obd)
-{
- /*
- * returns: 0 on healthy
- * >0 on unhealthy + reason code/flag
- * however the only supported reason == 1 right now
- * We'll need to define some better reasons
- * or flags in the future.
- * <0 on error
- */
- int rc;
-
- /* don't use EXP_CHECK_DT_OP, because NULL method is normal here */
- if (!obd || !OBT(obd)) {
- CERROR("cleaned up obd\n");
- return -EOPNOTSUPP;
- }
- if (!obd->obd_set_up || obd->obd_stopping)
- return 0;
- if (!OBP(obd, health_check))
- return 0;
-
- rc = OBP(obd, health_check)(env, obd);
- return rc;
-}
-
-static inline int obd_register_observer(struct obd_device *obd,
- struct obd_device *observer)
-{
- int rc;
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
- down_write(&obd->obd_observer_link_sem);
- if (obd->obd_observer && observer) {
- up_write(&obd->obd_observer_link_sem);
- return -EALREADY;
- }
- obd->obd_observer = observer;
- up_write(&obd->obd_observer_link_sem);
- return 0;
-}
-
-/* metadata helpers */
-static inline int md_getstatus(struct obd_export *exp, struct lu_fid *fid)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, getstatus);
- EXP_MD_COUNTER_INCREMENT(exp, getstatus);
- rc = MDP(exp->exp_obd, getstatus)(exp, fid);
- return rc;
-}
-
-static inline int md_getattr(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, getattr);
- EXP_MD_COUNTER_INCREMENT(exp, getattr);
- rc = MDP(exp->exp_obd, getattr)(exp, op_data, request);
- return rc;
-}
-
-static inline int md_null_inode(struct obd_export *exp,
- const struct lu_fid *fid)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, null_inode);
- EXP_MD_COUNTER_INCREMENT(exp, null_inode);
- rc = MDP(exp->exp_obd, null_inode)(exp, fid);
- return rc;
-}
-
-static inline int md_close(struct obd_export *exp, struct md_op_data *op_data,
- struct md_open_data *mod,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, close);
- EXP_MD_COUNTER_INCREMENT(exp, close);
- rc = MDP(exp->exp_obd, close)(exp, op_data, mod, request);
- return rc;
-}
-
-static inline int md_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, size_t datalen, umode_t mode,
- uid_t uid, gid_t gid, cfs_cap_t cap_effective,
- __u64 rdev, struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, create);
- EXP_MD_COUNTER_INCREMENT(exp, create);
- rc = MDP(exp->exp_obd, create)(exp, op_data, data, datalen, mode,
- uid, gid, cap_effective, rdev, request);
- return rc;
-}
-
-static inline int md_enqueue(struct obd_export *exp,
- struct ldlm_enqueue_info *einfo,
- const union ldlm_policy_data *policy,
- struct lookup_intent *it,
- struct md_op_data *op_data,
- struct lustre_handle *lockh,
- __u64 extra_lock_flags)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, enqueue);
- EXP_MD_COUNTER_INCREMENT(exp, enqueue);
- rc = MDP(exp->exp_obd, enqueue)(exp, einfo, policy, it, op_data, lockh,
- extra_lock_flags);
- return rc;
-}
-
-static inline int md_getattr_name(struct obd_export *exp,
- struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, getattr_name);
- EXP_MD_COUNTER_INCREMENT(exp, getattr_name);
- rc = MDP(exp->exp_obd, getattr_name)(exp, op_data, request);
- return rc;
-}
-
-static inline int md_intent_lock(struct obd_export *exp,
- struct md_op_data *op_data,
- struct lookup_intent *it,
- struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, intent_lock);
- EXP_MD_COUNTER_INCREMENT(exp, intent_lock);
- rc = MDP(exp->exp_obd, intent_lock)(exp, op_data, it, reqp,
- cb_blocking, extra_lock_flags);
- return rc;
-}
-
-static inline int md_link(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, link);
- EXP_MD_COUNTER_INCREMENT(exp, link);
- rc = MDP(exp->exp_obd, link)(exp, op_data, request);
- return rc;
-}
-
-static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, size_t oldlen, const char *new,
- size_t newlen, struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, rename);
- EXP_MD_COUNTER_INCREMENT(exp, rename);
- rc = MDP(exp->exp_obd, rename)(exp, op_data, old, oldlen, new,
- newlen, request);
- return rc;
-}
-
-static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, setattr);
- EXP_MD_COUNTER_INCREMENT(exp, setattr);
- rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen, request);
- return rc;
-}
-
-static inline int md_sync(struct obd_export *exp, const struct lu_fid *fid,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, sync);
- EXP_MD_COUNTER_INCREMENT(exp, sync);
- rc = MDP(exp->exp_obd, sync)(exp, fid, request);
- return rc;
-}
-
-static inline int md_read_page(struct obd_export *exp,
- struct md_op_data *op_data,
- struct md_callback *cb_op,
- __u64 hash_offset,
- struct page **ppage)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, read_page);
- EXP_MD_COUNTER_INCREMENT(exp, read_page);
- rc = MDP(exp->exp_obd, read_page)(exp, op_data, cb_op, hash_offset,
- ppage);
- return rc;
-}
-
-static inline int md_unlink(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, unlink);
- EXP_MD_COUNTER_INCREMENT(exp, unlink);
- rc = MDP(exp->exp_obd, unlink)(exp, op_data, request);
- return rc;
-}
-
-static inline int md_get_lustre_md(struct obd_export *exp,
- struct ptlrpc_request *req,
- struct obd_export *dt_exp,
- struct obd_export *md_exp,
- struct lustre_md *md)
-{
- EXP_CHECK_MD_OP(exp, get_lustre_md);
- EXP_MD_COUNTER_INCREMENT(exp, get_lustre_md);
- return MDP(exp->exp_obd, get_lustre_md)(exp, req, dt_exp, md_exp, md);
-}
-
-static inline int md_free_lustre_md(struct obd_export *exp,
- struct lustre_md *md)
-{
- EXP_CHECK_MD_OP(exp, free_lustre_md);
- EXP_MD_COUNTER_INCREMENT(exp, free_lustre_md);
- return MDP(exp->exp_obd, free_lustre_md)(exp, md);
-}
-
-static inline int md_merge_attr(struct obd_export *exp,
- const struct lmv_stripe_md *lsm,
- struct cl_attr *attr,
- ldlm_blocking_callback cb)
-{
- EXP_CHECK_MD_OP(exp, merge_attr);
- EXP_MD_COUNTER_INCREMENT(exp, merge_attr);
- return MDP(exp->exp_obd, merge_attr)(exp, lsm, attr, cb);
-}
-
-static inline int md_setxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *name,
- const char *input, int input_size,
- int output_size, int flags, __u32 suppgid,
- struct ptlrpc_request **request)
-{
- EXP_CHECK_MD_OP(exp, setxattr);
- EXP_MD_COUNTER_INCREMENT(exp, setxattr);
- return MDP(exp->exp_obd, setxattr)(exp, fid, valid, name, input,
- input_size, output_size, flags,
- suppgid, request);
-}
-
-static inline int md_getxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *name,
- const char *input, int input_size,
- int output_size, int flags,
- struct ptlrpc_request **request)
-{
- EXP_CHECK_MD_OP(exp, getxattr);
- EXP_MD_COUNTER_INCREMENT(exp, getxattr);
- return MDP(exp->exp_obd, getxattr)(exp, fid, valid, name, input,
- input_size, output_size, flags,
- request);
-}
-
-static inline int md_set_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och,
- struct lookup_intent *it)
-{
- EXP_CHECK_MD_OP(exp, set_open_replay_data);
- EXP_MD_COUNTER_INCREMENT(exp, set_open_replay_data);
- return MDP(exp->exp_obd, set_open_replay_data)(exp, och, it);
-}
-
-static inline int md_clear_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och)
-{
- EXP_CHECK_MD_OP(exp, clear_open_replay_data);
- EXP_MD_COUNTER_INCREMENT(exp, clear_open_replay_data);
- return MDP(exp->exp_obd, clear_open_replay_data)(exp, och);
-}
-
-static inline int md_set_lock_data(struct obd_export *exp,
- const struct lustre_handle *lockh,
- void *data, __u64 *bits)
-{
- EXP_CHECK_MD_OP(exp, set_lock_data);
- EXP_MD_COUNTER_INCREMENT(exp, set_lock_data);
- return MDP(exp->exp_obd, set_lock_data)(exp, lockh, data, bits);
-}
-
-static inline int md_cancel_unused(struct obd_export *exp,
- const struct lu_fid *fid,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode,
- enum ldlm_cancel_flags flags,
- void *opaque)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, cancel_unused);
- EXP_MD_COUNTER_INCREMENT(exp, cancel_unused);
-
- rc = MDP(exp->exp_obd, cancel_unused)(exp, fid, policy, mode,
- flags, opaque);
- return rc;
-}
-
-static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid,
- enum ldlm_type type,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode,
- struct lustre_handle *lockh)
-{
- EXP_CHECK_MD_OP(exp, lock_match);
- EXP_MD_COUNTER_INCREMENT(exp, lock_match);
- return MDP(exp->exp_obd, lock_match)(exp, flags, fid, type,
- policy, mode, lockh);
-}
-
-static inline int md_init_ea_size(struct obd_export *exp, u32 easize,
- u32 def_asize)
-{
- EXP_CHECK_MD_OP(exp, init_ea_size);
- EXP_MD_COUNTER_INCREMENT(exp, init_ea_size);
- return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize);
-}
-
-static inline int md_intent_getattr_async(struct obd_export *exp,
- struct md_enqueue_info *minfo)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, intent_getattr_async);
- EXP_MD_COUNTER_INCREMENT(exp, intent_getattr_async);
- rc = MDP(exp->exp_obd, intent_getattr_async)(exp, minfo);
- return rc;
-}
-
-static inline int md_revalidate_lock(struct obd_export *exp,
- struct lookup_intent *it,
- struct lu_fid *fid, __u64 *bits)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, revalidate_lock);
- EXP_MD_COUNTER_INCREMENT(exp, revalidate_lock);
- rc = MDP(exp->exp_obd, revalidate_lock)(exp, it, fid, bits);
- return rc;
-}
-
-static inline int md_get_fid_from_lsm(struct obd_export *exp,
- const struct lmv_stripe_md *lsm,
- const char *name, int namelen,
- struct lu_fid *fid)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, get_fid_from_lsm);
- EXP_MD_COUNTER_INCREMENT(exp, get_fid_from_lsm);
- rc = MDP(exp->exp_obd, get_fid_from_lsm)(exp, lsm, name, namelen, fid);
- return rc;
-}
-
-/*
- * Unpack an MD struct from disk to in-memory format.
- * Returns +ve size of unpacked MD (0 for free), or -ve error.
- *
- * If *plsm != NULL and lmm == NULL then *lsm will be freed.
- * If *plsm == NULL then it will be allocated.
- */
-static inline int md_unpackmd(struct obd_export *exp,
- struct lmv_stripe_md **plsm,
- const union lmv_mds_md *lmm, size_t lmm_size)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, unpackmd);
- EXP_MD_COUNTER_INCREMENT(exp, unpackmd);
- rc = MDP(exp->exp_obd, unpackmd)(exp, plsm, lmm, lmm_size);
- return rc;
-}
-
-/* OBD Metadata Support */
-
-int obd_init_caches(void);
-void obd_cleanup_caches(void);
-
-/* support routines */
-extern struct kmem_cache *obdo_cachep;
-
-typedef int (*register_lwp_cb)(void *data);
-
-struct lwp_register_item {
- struct obd_export **lri_exp;
- register_lwp_cb lri_cb_func;
- void *lri_cb_data;
- struct list_head lri_list;
- char lri_name[MTI_NAME_MAXLEN];
-};
-
-/*
- * I'm as embarrassed about this as you are.
- *
- * <shaver> // XXX do not look into _superhack with remaining eye
- * <shaver> // XXX if this were any uglier, I'd get my own show on MTV
- */
-extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c);
-
-/* obd_mount.c */
-int lustre_unregister_fs(void);
-int lustre_register_fs(void);
-int lustre_check_exclusion(struct super_block *sb, char *svname);
-
-/* sysctl.c */
-int obd_sysctl_init(void);
-
-/* uuid.c */
-typedef __u8 class_uuid_t[16];
-void class_uuid_unparse(class_uuid_t in, struct obd_uuid *out);
-
-/* lustre_peer.c */
-int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index);
-int class_add_uuid(const char *uuid, __u64 nid);
-int class_del_uuid(const char *uuid);
-int class_check_uuid(struct obd_uuid *uuid, __u64 nid);
-void class_init_uuidlist(void);
-void class_exit_uuidlist(void);
-
-/* class_obd.c */
-extern char obd_jobid_node[];
-extern struct miscdevice obd_psdev;
-extern spinlock_t obd_types_lock;
-int class_procfs_init(void);
-int class_procfs_clean(void);
-
-/* prng.c */
-#define ll_generate_random_uuid(uuid_out) \
- get_random_bytes(uuid_out, sizeof(class_uuid_t))
-
-/* statfs_pack.c */
-struct kstatfs;
-void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
-void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
-
-/* root squash info */
-struct rw_semaphore;
-struct root_squash_info {
- uid_t rsi_uid;
- gid_t rsi_gid;
- struct list_head rsi_nosquash_nids;
- struct rw_semaphore rsi_sem;
-};
-
-/* linux-module.c */
-int obd_ioctl_getdata(char **buf, int *len, void __user *arg);
-
-#endif /* __LINUX_OBD_CLASS_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
deleted file mode 100644
index 8595091b8b86..000000000000
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ /dev/null
@@ -1,545 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _OBD_SUPPORT
-#define _OBD_SUPPORT
-
-#include <linux/slab.h>
-#include <linux/sched/signal.h>
-
-#include <linux/libcfs/libcfs.h>
-#include <lustre_compat.h>
-#include <lprocfs_status.h>
-
-/* global variables */
-extern unsigned int obd_debug_peer_on_timeout;
-extern unsigned int obd_dump_on_timeout;
-extern unsigned int obd_dump_on_eviction;
-/* obd_timeout should only be used for recovery, not for
- * networking / disk / timings affected by load (use Adaptive Timeouts)
- */
-extern unsigned int obd_timeout; /* seconds */
-extern unsigned int obd_timeout_set;
-extern unsigned int at_min;
-extern unsigned int at_max;
-extern unsigned int at_history;
-extern int at_early_margin;
-extern int at_extra;
-extern unsigned long obd_max_dirty_pages;
-extern atomic_long_t obd_dirty_pages;
-extern atomic_long_t obd_dirty_transit_pages;
-extern char obd_jobid_var[];
-
-/* Some hash init argument constants */
-#define HASH_POOLS_BKT_BITS 3
-#define HASH_POOLS_CUR_BITS 3
-#define HASH_POOLS_MAX_BITS 7
-#define HASH_UUID_BKT_BITS 5
-#define HASH_UUID_CUR_BITS 7
-#define HASH_UUID_MAX_BITS 12
-#define HASH_NID_BKT_BITS 5
-#define HASH_NID_CUR_BITS 7
-#define HASH_NID_MAX_BITS 12
-#define HASH_NID_STATS_BKT_BITS 5
-#define HASH_NID_STATS_CUR_BITS 7
-#define HASH_NID_STATS_MAX_BITS 12
-#define HASH_LQE_BKT_BITS 5
-#define HASH_LQE_CUR_BITS 7
-#define HASH_LQE_MAX_BITS 12
-#define HASH_CONN_BKT_BITS 5
-#define HASH_CONN_CUR_BITS 5
-#define HASH_CONN_MAX_BITS 15
-#define HASH_EXP_LOCK_BKT_BITS 5
-#define HASH_EXP_LOCK_CUR_BITS 7
-#define HASH_EXP_LOCK_MAX_BITS 16
-#define HASH_CL_ENV_BKT_BITS 5
-#define HASH_CL_ENV_BITS 10
-#define HASH_JOB_STATS_BKT_BITS 5
-#define HASH_JOB_STATS_CUR_BITS 7
-#define HASH_JOB_STATS_MAX_BITS 12
-
-/* Timeout definitions */
-#define OBD_TIMEOUT_DEFAULT 100
-/* Time to wait for all clients to reconnect during recovery (hard limit) */
-#define OBD_RECOVERY_TIME_HARD (obd_timeout * 9)
-/* Time to wait for all clients to reconnect during recovery (soft limit) */
-/* Should be very conservative; must catch the first reconnect after reboot */
-#define OBD_RECOVERY_TIME_SOFT (obd_timeout * 3)
-/* Change recovery-small 26b time if you change this */
-#define PING_INTERVAL max(obd_timeout / 4, 1U)
-/* a bit more than maximal journal commit time in seconds */
-#define PING_INTERVAL_SHORT min(PING_INTERVAL, 7U)
-/* Client may skip 1 ping; we must wait at least 2.5. But for multiple
- * failover targets the client only pings one server at a time, and pings
- * can be lost on a loaded network. Since eviction has serious consequences,
- * and there's no urgent need to evict a client just because it's idle, we
- * should be very conservative here.
- */
-#define PING_EVICT_TIMEOUT (PING_INTERVAL * 6)
-#define DISK_TIMEOUT 50 /* Beyond this we warn about disk speed */
-#define CONNECTION_SWITCH_MIN 5U /* Connection switching rate limiter */
-/* Max connect interval for nonresponsive servers; ~50s to avoid building up
- * connect requests in the LND queues, but within obd_timeout so we don't
- * miss the recovery window
- */
-#define CONNECTION_SWITCH_MAX min(50U, max(CONNECTION_SWITCH_MIN, obd_timeout))
-#define CONNECTION_SWITCH_INC 5 /* Connection timeout backoff */
-/* In general this should be low to have quick detection of a system
- * running on a backup server. (If it's too low, import_select_connection
- * will increase the timeout anyhow.)
- */
-#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout / 20)
-/* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */
-#define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \
- INITIAL_CONNECT_TIMEOUT)
-/* The min time a target should wait for clients to reconnect in recovery */
-#define OBD_RECOVERY_TIME_MIN (2 * RECONNECT_DELAY_MAX)
-#define OBD_IR_FACTOR_MIN 1
-#define OBD_IR_FACTOR_MAX 10
-#define OBD_IR_FACTOR_DEFAULT (OBD_IR_FACTOR_MAX / 2)
-/* default timeout for the MGS to become IR_FULL */
-#define OBD_IR_MGS_TIMEOUT (4 * obd_timeout)
-#define LONG_UNLINK 300 /* Unlink should happen before now */
-
-/**
- * Time interval of shrink, if the client is "idle" more than this interval,
- * then the ll_grant thread will return the requested grant space to filter
- */
-#define GRANT_SHRINK_INTERVAL 1200/*20 minutes*/
-
-#define OBD_FAIL_MDS 0x100
-#define OBD_FAIL_MDS_HANDLE_UNPACK 0x101
-#define OBD_FAIL_MDS_GETATTR_NET 0x102
-#define OBD_FAIL_MDS_GETATTR_PACK 0x103
-#define OBD_FAIL_MDS_READPAGE_NET 0x104
-#define OBD_FAIL_MDS_READPAGE_PACK 0x105
-#define OBD_FAIL_MDS_SENDPAGE 0x106
-#define OBD_FAIL_MDS_REINT_NET 0x107
-#define OBD_FAIL_MDS_REINT_UNPACK 0x108
-#define OBD_FAIL_MDS_REINT_SETATTR 0x109
-#define OBD_FAIL_MDS_REINT_SETATTR_WRITE 0x10a
-#define OBD_FAIL_MDS_REINT_CREATE 0x10b
-#define OBD_FAIL_MDS_REINT_CREATE_WRITE 0x10c
-#define OBD_FAIL_MDS_REINT_UNLINK 0x10d
-#define OBD_FAIL_MDS_REINT_UNLINK_WRITE 0x10e
-#define OBD_FAIL_MDS_REINT_LINK 0x10f
-#define OBD_FAIL_MDS_REINT_LINK_WRITE 0x110
-#define OBD_FAIL_MDS_REINT_RENAME 0x111
-#define OBD_FAIL_MDS_REINT_RENAME_WRITE 0x112
-#define OBD_FAIL_MDS_OPEN_NET 0x113
-#define OBD_FAIL_MDS_OPEN_PACK 0x114
-#define OBD_FAIL_MDS_CLOSE_NET 0x115
-#define OBD_FAIL_MDS_CLOSE_PACK 0x116
-#define OBD_FAIL_MDS_CONNECT_NET 0x117
-#define OBD_FAIL_MDS_CONNECT_PACK 0x118
-#define OBD_FAIL_MDS_REINT_NET_REP 0x119
-#define OBD_FAIL_MDS_DISCONNECT_NET 0x11a
-#define OBD_FAIL_MDS_GETSTATUS_NET 0x11b
-#define OBD_FAIL_MDS_GETSTATUS_PACK 0x11c
-#define OBD_FAIL_MDS_STATFS_PACK 0x11d
-#define OBD_FAIL_MDS_STATFS_NET 0x11e
-#define OBD_FAIL_MDS_GETATTR_NAME_NET 0x11f
-#define OBD_FAIL_MDS_PIN_NET 0x120
-#define OBD_FAIL_MDS_UNPIN_NET 0x121
-#define OBD_FAIL_MDS_ALL_REPLY_NET 0x122
-#define OBD_FAIL_MDS_ALL_REQUEST_NET 0x123
-#define OBD_FAIL_MDS_SYNC_NET 0x124
-#define OBD_FAIL_MDS_SYNC_PACK 0x125
-/* OBD_FAIL_MDS_DONE_WRITING_NET 0x126 obsolete since 2.8.0 */
-/* OBD_FAIL_MDS_DONE_WRITING_PACK 0x127 obsolete since 2.8.0 */
-#define OBD_FAIL_MDS_ALLOC_OBDO 0x128
-#define OBD_FAIL_MDS_PAUSE_OPEN 0x129
-#define OBD_FAIL_MDS_STATFS_LCW_SLEEP 0x12a
-#define OBD_FAIL_MDS_OPEN_CREATE 0x12b
-#define OBD_FAIL_MDS_OST_SETATTR 0x12c
-/* OBD_FAIL_MDS_QUOTACHECK_NET 0x12d obsolete since 2.4 */
-#define OBD_FAIL_MDS_QUOTACTL_NET 0x12e
-#define OBD_FAIL_MDS_CLIENT_ADD 0x12f
-#define OBD_FAIL_MDS_GETXATTR_NET 0x130
-#define OBD_FAIL_MDS_GETXATTR_PACK 0x131
-#define OBD_FAIL_MDS_SETXATTR_NET 0x132
-#define OBD_FAIL_MDS_SETXATTR 0x133
-#define OBD_FAIL_MDS_SETXATTR_WRITE 0x134
-#define OBD_FAIL_MDS_FS_SETUP 0x135
-#define OBD_FAIL_MDS_RESEND 0x136
-#define OBD_FAIL_MDS_LLOG_CREATE_FAILED 0x137
-#define OBD_FAIL_MDS_LOV_SYNC_RACE 0x138
-#define OBD_FAIL_MDS_OSC_PRECREATE 0x139
-#define OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x13a
-#define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
-#define OBD_FAIL_MDS_BLOCK_QUOTA_REQ 0x13c
-#define OBD_FAIL_MDS_DROP_QUOTA_REQ 0x13d
-#define OBD_FAIL_MDS_REMOVE_COMMON_EA 0x13e
-#define OBD_FAIL_MDS_ALLOW_COMMON_EA_SETTING 0x13f
-#define OBD_FAIL_MDS_FAIL_LOV_LOG_ADD 0x140
-#define OBD_FAIL_MDS_LOV_PREP_CREATE 0x141
-#define OBD_FAIL_MDS_REINT_DELAY 0x142
-#define OBD_FAIL_MDS_READLINK_EPROTO 0x143
-#define OBD_FAIL_MDS_OPEN_WAIT_CREATE 0x144
-#define OBD_FAIL_MDS_PDO_LOCK 0x145
-#define OBD_FAIL_MDS_PDO_LOCK2 0x146
-#define OBD_FAIL_MDS_OSC_CREATE_FAIL 0x147
-#define OBD_FAIL_MDS_NEGATIVE_POSITIVE 0x148
-#define OBD_FAIL_MDS_HSM_STATE_GET_NET 0x149
-#define OBD_FAIL_MDS_HSM_STATE_SET_NET 0x14a
-#define OBD_FAIL_MDS_HSM_PROGRESS_NET 0x14b
-#define OBD_FAIL_MDS_HSM_REQUEST_NET 0x14c
-#define OBD_FAIL_MDS_HSM_CT_REGISTER_NET 0x14d
-#define OBD_FAIL_MDS_HSM_CT_UNREGISTER_NET 0x14e
-#define OBD_FAIL_MDS_SWAP_LAYOUTS_NET 0x14f
-#define OBD_FAIL_MDS_HSM_ACTION_NET 0x150
-#define OBD_FAIL_MDS_CHANGELOG_INIT 0x151
-
-/* layout lock */
-#define OBD_FAIL_MDS_NO_LL_GETATTR 0x170
-#define OBD_FAIL_MDS_NO_LL_OPEN 0x171
-#define OBD_FAIL_MDS_LL_BLOCK 0x172
-
-/* CMD */
-#define OBD_FAIL_MDS_IS_SUBDIR_NET 0x180
-#define OBD_FAIL_MDS_IS_SUBDIR_PACK 0x181
-#define OBD_FAIL_MDS_SET_INFO_NET 0x182
-#define OBD_FAIL_MDS_WRITEPAGE_NET 0x183
-#define OBD_FAIL_MDS_WRITEPAGE_PACK 0x184
-#define OBD_FAIL_MDS_RECOVERY_ACCEPTS_GAPS 0x185
-#define OBD_FAIL_MDS_GET_INFO_NET 0x186
-#define OBD_FAIL_MDS_DQACQ_NET 0x187
-
-/* OI scrub */
-#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
-#define OBD_FAIL_OSD_SCRUB_CRASH 0x191
-#define OBD_FAIL_OSD_SCRUB_FATAL 0x192
-#define OBD_FAIL_OSD_FID_MAPPING 0x193
-#define OBD_FAIL_OSD_LMA_INCOMPAT 0x194
-#define OBD_FAIL_OSD_COMPAT_INVALID_ENTRY 0x195
-
-#define OBD_FAIL_OST 0x200
-#define OBD_FAIL_OST_CONNECT_NET 0x201
-#define OBD_FAIL_OST_DISCONNECT_NET 0x202
-#define OBD_FAIL_OST_GET_INFO_NET 0x203
-#define OBD_FAIL_OST_CREATE_NET 0x204
-#define OBD_FAIL_OST_DESTROY_NET 0x205
-#define OBD_FAIL_OST_GETATTR_NET 0x206
-#define OBD_FAIL_OST_SETATTR_NET 0x207
-#define OBD_FAIL_OST_OPEN_NET 0x208
-#define OBD_FAIL_OST_CLOSE_NET 0x209
-#define OBD_FAIL_OST_BRW_NET 0x20a
-#define OBD_FAIL_OST_PUNCH_NET 0x20b
-#define OBD_FAIL_OST_STATFS_NET 0x20c
-#define OBD_FAIL_OST_HANDLE_UNPACK 0x20d
-#define OBD_FAIL_OST_BRW_WRITE_BULK 0x20e
-#define OBD_FAIL_OST_BRW_READ_BULK 0x20f
-#define OBD_FAIL_OST_SYNC_NET 0x210
-#define OBD_FAIL_OST_ALL_REPLY_NET 0x211
-#define OBD_FAIL_OST_ALL_REQUEST_NET 0x212
-#define OBD_FAIL_OST_LDLM_REPLY_NET 0x213
-#define OBD_FAIL_OST_BRW_PAUSE_BULK 0x214
-#define OBD_FAIL_OST_ENOSPC 0x215
-#define OBD_FAIL_OST_EROFS 0x216
-#define OBD_FAIL_OST_ENOENT 0x217
-/* OBD_FAIL_OST_QUOTACHECK_NET 0x218 obsolete since 2.4 */
-#define OBD_FAIL_OST_QUOTACTL_NET 0x219
-#define OBD_FAIL_OST_CHECKSUM_RECEIVE 0x21a
-#define OBD_FAIL_OST_CHECKSUM_SEND 0x21b
-#define OBD_FAIL_OST_BRW_SIZE 0x21c
-#define OBD_FAIL_OST_DROP_REQ 0x21d
-#define OBD_FAIL_OST_SETATTR_CREDITS 0x21e
-#define OBD_FAIL_OST_HOLD_WRITE_RPC 0x21f
-#define OBD_FAIL_OST_BRW_WRITE_BULK2 0x220
-#define OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221
-#define OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222
-#define OBD_FAIL_OST_PAUSE_CREATE 0x223
-#define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
-#define OBD_FAIL_OST_CONNECT_NET2 0x225
-#define OBD_FAIL_OST_NOMEM 0x226
-#define OBD_FAIL_OST_BRW_PAUSE_BULK2 0x227
-#define OBD_FAIL_OST_MAPBLK_ENOSPC 0x228
-#define OBD_FAIL_OST_ENOINO 0x229
-#define OBD_FAIL_OST_DQACQ_NET 0x230
-#define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231
-#define OBD_FAIL_OST_SET_INFO_NET 0x232
-
-#define OBD_FAIL_LDLM 0x300
-#define OBD_FAIL_LDLM_NAMESPACE_NEW 0x301
-#define OBD_FAIL_LDLM_ENQUEUE_NET 0x302
-#define OBD_FAIL_LDLM_CONVERT_NET 0x303
-#define OBD_FAIL_LDLM_CANCEL_NET 0x304
-#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
-#define OBD_FAIL_LDLM_CP_CALLBACK_NET 0x306
-#define OBD_FAIL_LDLM_GL_CALLBACK_NET 0x307
-#define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308
-#define OBD_FAIL_LDLM_ENQUEUE_INTENT_ERR 0x309
-#define OBD_FAIL_LDLM_CREATE_RESOURCE 0x30a
-#define OBD_FAIL_LDLM_ENQUEUE_BLOCKED 0x30b
-#define OBD_FAIL_LDLM_REPLY 0x30c
-#define OBD_FAIL_LDLM_RECOV_CLIENTS 0x30d
-#define OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT 0x30e
-#define OBD_FAIL_LDLM_GLIMPSE 0x30f
-#define OBD_FAIL_LDLM_CANCEL_RACE 0x310
-#define OBD_FAIL_LDLM_CANCEL_EVICT_RACE 0x311
-#define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
-#define OBD_FAIL_LDLM_CLOSE_THREAD 0x313
-#define OBD_FAIL_LDLM_CANCEL_BL_CB_RACE 0x314
-#define OBD_FAIL_LDLM_CP_CB_WAIT 0x315
-#define OBD_FAIL_LDLM_OST_FAIL_RACE 0x316
-#define OBD_FAIL_LDLM_INTR_CP_AST 0x317
-#define OBD_FAIL_LDLM_CP_BL_RACE 0x318
-#define OBD_FAIL_LDLM_NEW_LOCK 0x319
-#define OBD_FAIL_LDLM_AGL_DELAY 0x31a
-#define OBD_FAIL_LDLM_AGL_NOLOCK 0x31b
-#define OBD_FAIL_LDLM_OST_LVB 0x31c
-#define OBD_FAIL_LDLM_ENQUEUE_HANG 0x31d
-#define OBD_FAIL_LDLM_PAUSE_CANCEL2 0x31f
-#define OBD_FAIL_LDLM_CP_CB_WAIT2 0x320
-#define OBD_FAIL_LDLM_CP_CB_WAIT3 0x321
-#define OBD_FAIL_LDLM_CP_CB_WAIT4 0x322
-#define OBD_FAIL_LDLM_CP_CB_WAIT5 0x323
-
-#define OBD_FAIL_LDLM_GRANT_CHECK 0x32a
-
-/* LOCKLESS IO */
-#define OBD_FAIL_LDLM_SET_CONTENTION 0x385
-
-#define OBD_FAIL_OSC 0x400
-#define OBD_FAIL_OSC_BRW_READ_BULK 0x401
-#define OBD_FAIL_OSC_BRW_WRITE_BULK 0x402
-#define OBD_FAIL_OSC_LOCK_BL_AST 0x403
-#define OBD_FAIL_OSC_LOCK_CP_AST 0x404
-#define OBD_FAIL_OSC_MATCH 0x405
-#define OBD_FAIL_OSC_BRW_PREP_REQ 0x406
-#define OBD_FAIL_OSC_SHUTDOWN 0x407
-#define OBD_FAIL_OSC_CHECKSUM_RECEIVE 0x408
-#define OBD_FAIL_OSC_CHECKSUM_SEND 0x409
-#define OBD_FAIL_OSC_BRW_PREP_REQ2 0x40a
-#define OBD_FAIL_OSC_CONNECT_CKSUM 0x40b
-#define OBD_FAIL_OSC_CKSUM_ADLER_ONLY 0x40c
-#define OBD_FAIL_OSC_DIO_PAUSE 0x40d
-#define OBD_FAIL_OSC_OBJECT_CONTENTION 0x40e
-#define OBD_FAIL_OSC_CP_CANCEL_RACE 0x40f
-#define OBD_FAIL_OSC_CP_ENQ_RACE 0x410
-#define OBD_FAIL_OSC_NO_GRANT 0x411
-#define OBD_FAIL_OSC_DELAY_SETTIME 0x412
-#define OBD_FAIL_OSC_DELAY_IO 0x414
-
-#define OBD_FAIL_PTLRPC 0x500
-#define OBD_FAIL_PTLRPC_ACK 0x501
-#define OBD_FAIL_PTLRPC_RQBD 0x502
-#define OBD_FAIL_PTLRPC_BULK_GET_NET 0x503
-#define OBD_FAIL_PTLRPC_BULK_PUT_NET 0x504
-#define OBD_FAIL_PTLRPC_DROP_RPC 0x505
-#define OBD_FAIL_PTLRPC_DELAY_SEND 0x506
-#define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
-#define OBD_FAIL_PTLRPC_CLIENT_BULK_CB 0x508
-#define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
-#define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
-#define OBD_FAIL_PTLRPC_IMP_DEACTIVE 0x50d
-#define OBD_FAIL_PTLRPC_DUMP_LOG 0x50e
-#define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f
-#define OBD_FAIL_PTLRPC_LONG_BULK_UNLINK 0x510
-#define OBD_FAIL_PTLRPC_HPREQ_TIMEOUT 0x511
-#define OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT 0x512
-#define OBD_FAIL_PTLRPC_DROP_REQ_OPC 0x513
-#define OBD_FAIL_PTLRPC_FINISH_REPLAY 0x514
-#define OBD_FAIL_PTLRPC_CLIENT_BULK_CB2 0x515
-#define OBD_FAIL_PTLRPC_DELAY_IMP_FULL 0x516
-#define OBD_FAIL_PTLRPC_CANCEL_RESEND 0x517
-#define OBD_FAIL_PTLRPC_DROP_BULK 0x51a
-#define OBD_FAIL_PTLRPC_LONG_REQ_UNLINK 0x51b
-#define OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK 0x51c
-
-#define OBD_FAIL_OBD_PING_NET 0x600
-#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
-#define OBD_FAIL_OBD_LOGD_NET 0x602
-/* OBD_FAIL_OBD_QC_CALLBACK_NET 0x603 obsolete since 2.4 */
-#define OBD_FAIL_OBD_DQACQ 0x604
-#define OBD_FAIL_OBD_LLOG_SETUP 0x605
-#define OBD_FAIL_OBD_LOG_CANCEL_REP 0x606
-#define OBD_FAIL_OBD_IDX_READ_NET 0x607
-#define OBD_FAIL_OBD_IDX_READ_BREAK 0x608
-#define OBD_FAIL_OBD_NO_LRU 0x609
-
-#define OBD_FAIL_TGT_REPLY_NET 0x700
-#define OBD_FAIL_TGT_CONN_RACE 0x701
-#define OBD_FAIL_TGT_FORCE_RECONNECT 0x702
-#define OBD_FAIL_TGT_DELAY_CONNECT 0x703
-#define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
-#define OBD_FAIL_TGT_DELAY_PRECREATE 0x705
-#define OBD_FAIL_TGT_TOOMANY_THREADS 0x706
-#define OBD_FAIL_TGT_REPLAY_DROP 0x707
-#define OBD_FAIL_TGT_FAKE_EXP 0x708
-#define OBD_FAIL_TGT_REPLAY_DELAY 0x709
-#define OBD_FAIL_TGT_LAST_REPLAY 0x710
-#define OBD_FAIL_TGT_CLIENT_ADD 0x711
-#define OBD_FAIL_TGT_RCVG_FLAG 0x712
-#define OBD_FAIL_TGT_DELAY_CONDITIONAL 0x713
-
-#define OBD_FAIL_MDC_REVALIDATE_PAUSE 0x800
-#define OBD_FAIL_MDC_ENQUEUE_PAUSE 0x801
-#define OBD_FAIL_MDC_OLD_EXT_FLAGS 0x802
-#define OBD_FAIL_MDC_GETATTR_ENQUEUE 0x803
-#define OBD_FAIL_MDC_RPCS_SEM 0x804
-#define OBD_FAIL_MDC_LIGHTWEIGHT 0x805
-#define OBD_FAIL_MDC_CLOSE 0x806
-
-#define OBD_FAIL_MGS 0x900
-#define OBD_FAIL_MGS_ALL_REQUEST_NET 0x901
-#define OBD_FAIL_MGS_ALL_REPLY_NET 0x902
-#define OBD_FAIL_MGC_PAUSE_PROCESS_LOG 0x903
-#define OBD_FAIL_MGS_PAUSE_REQ 0x904
-#define OBD_FAIL_MGS_PAUSE_TARGET_REG 0x905
-#define OBD_FAIL_MGS_CONNECT_NET 0x906
-#define OBD_FAIL_MGS_DISCONNECT_NET 0x907
-#define OBD_FAIL_MGS_SET_INFO_NET 0x908
-#define OBD_FAIL_MGS_EXCEPTION_NET 0x909
-#define OBD_FAIL_MGS_TARGET_REG_NET 0x90a
-#define OBD_FAIL_MGS_TARGET_DEL_NET 0x90b
-#define OBD_FAIL_MGS_CONFIG_READ_NET 0x90c
-
-#define OBD_FAIL_QUOTA_DQACQ_NET 0xA01
-#define OBD_FAIL_QUOTA_EDQUOT 0xA02
-#define OBD_FAIL_QUOTA_DELAY_REINT 0xA03
-#define OBD_FAIL_QUOTA_RECOVERABLE_ERR 0xA04
-
-#define OBD_FAIL_LPROC_REMOVE 0xB00
-
-#define OBD_FAIL_SEQ 0x1000
-#define OBD_FAIL_SEQ_QUERY_NET 0x1001
-#define OBD_FAIL_SEQ_EXHAUST 0x1002
-
-#define OBD_FAIL_FLD 0x1100
-#define OBD_FAIL_FLD_QUERY_NET 0x1101
-#define OBD_FAIL_FLD_READ_NET 0x1102
-
-#define OBD_FAIL_SEC_CTX 0x1200
-#define OBD_FAIL_SEC_CTX_INIT_NET 0x1201
-#define OBD_FAIL_SEC_CTX_INIT_CONT_NET 0x1202
-#define OBD_FAIL_SEC_CTX_FINI_NET 0x1203
-#define OBD_FAIL_SEC_CTX_HDL_PAUSE 0x1204
-
-#define OBD_FAIL_LLOG 0x1300
-#define OBD_FAIL_LLOG_ORIGIN_CONNECT_NET 0x1301
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_CREATE_NET 0x1302
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_DESTROY_NET 0x1303
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_READ_HEADER_NET 0x1304
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_NEXT_BLOCK_NET 0x1305
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_PREV_BLOCK_NET 0x1306
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_WRITE_REC_NET 0x1307
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_CLOSE_NET 0x1308
-#define OBD_FAIL_LLOG_CATINFO_NET 0x1309
-#define OBD_FAIL_MDS_SYNC_CAPA_SL 0x1310
-#define OBD_FAIL_SEQ_ALLOC 0x1311
-
-#define OBD_FAIL_LLITE 0x1400
-#define OBD_FAIL_LLITE_FAULT_TRUNC_RACE 0x1401
-#define OBD_FAIL_LOCK_STATE_WAIT_INTR 0x1402
-#define OBD_FAIL_LOV_INIT 0x1403
-#define OBD_FAIL_GLIMPSE_DELAY 0x1404
-#define OBD_FAIL_LLITE_XATTR_ENOMEM 0x1405
-#define OBD_FAIL_MAKE_LOVEA_HOLE 0x1406
-#define OBD_FAIL_LLITE_LOST_LAYOUT 0x1407
-#define OBD_FAIL_GETATTR_DELAY 0x1409
-
-#define OBD_FAIL_FID_INDIR 0x1501
-#define OBD_FAIL_FID_INLMA 0x1502
-#define OBD_FAIL_FID_IGIF 0x1504
-#define OBD_FAIL_FID_LOOKUP 0x1505
-#define OBD_FAIL_FID_NOLMA 0x1506
-
-/* LFSCK */
-#define OBD_FAIL_LFSCK_DELAY1 0x1600
-#define OBD_FAIL_LFSCK_DELAY2 0x1601
-#define OBD_FAIL_LFSCK_DELAY3 0x1602
-#define OBD_FAIL_LFSCK_LINKEA_CRASH 0x1603
-#define OBD_FAIL_LFSCK_LINKEA_MORE 0x1604
-#define OBD_FAIL_LFSCK_LINKEA_MORE2 0x1605
-#define OBD_FAIL_LFSCK_FATAL1 0x1608
-#define OBD_FAIL_LFSCK_FATAL2 0x1609
-#define OBD_FAIL_LFSCK_CRASH 0x160a
-#define OBD_FAIL_LFSCK_NO_AUTO 0x160b
-#define OBD_FAIL_LFSCK_NO_DOUBLESCAN 0x160c
-#define OBD_FAIL_LFSCK_INVALID_PFID 0x1619
-#define OBD_FAIL_LFSCK_BAD_NAME_HASH 0x1628
-
-/* UPDATE */
-#define OBD_FAIL_UPDATE_OBJ_NET 0x1700
-#define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
-
-/* LMV */
-#define OBD_FAIL_UNKNOWN_LMV_STRIPE 0x1901
-
-/* Assign references to moved code to reduce code changes */
-#define OBD_FAIL_PRECHECK(id) CFS_FAIL_PRECHECK(id)
-#define OBD_FAIL_CHECK(id) CFS_FAIL_CHECK(id)
-#define OBD_FAIL_CHECK_VALUE(id, value) CFS_FAIL_CHECK_VALUE(id, value)
-#define OBD_FAIL_CHECK_ORSET(id, value) CFS_FAIL_CHECK_ORSET(id, value)
-#define OBD_FAIL_CHECK_RESET(id, value) CFS_FAIL_CHECK_RESET(id, value)
-#define OBD_FAIL_RETURN(id, ret) CFS_FAIL_RETURN(id, ret)
-#define OBD_FAIL_TIMEOUT(id, secs) CFS_FAIL_TIMEOUT(id, secs)
-#define OBD_FAIL_TIMEOUT_MS(id, ms) CFS_FAIL_TIMEOUT_MS(id, ms)
-#define OBD_FAIL_TIMEOUT_ORSET(id, value, secs) CFS_FAIL_TIMEOUT_ORSET(id, value, secs)
-#define OBD_RACE(id) CFS_RACE(id)
-#define OBD_FAIL_ONCE CFS_FAIL_ONCE
-#define OBD_FAILED CFS_FAILED
-
-#ifdef CONFIG_DEBUG_SLAB
-#define POISON(ptr, c, s) do {} while (0)
-#define POISON_PTR(ptr) ((void)0)
-#else
-#define POISON(ptr, c, s) memset(ptr, c, s)
-#define POISON_PTR(ptr) ((ptr) = (void *)0xdeadbeef)
-#endif
-
-#ifdef POISON_BULK
-#define POISON_PAGE(page, val) do { \
- memset(kmap(page), val, PAGE_SIZE); \
- kunmap(page); \
-} while (0)
-#else
-#define POISON_PAGE(page, val) do { } while (0)
-#endif
-
-#define OBD_FREE_RCU(ptr, size, handle) \
-do { \
- struct portals_handle *__h = (handle); \
- \
- __h->h_cookie = (unsigned long)(ptr); \
- __h->h_size = (size); \
- call_rcu(&__h->h_rcu, class_handle_free_cb); \
- POISON_PTR(ptr); \
-} while (0)
-
-#define KEY_IS(str) \
- (keylen >= (sizeof(str) - 1) && \
- memcmp(key, str, (sizeof(str) - 1)) == 0)
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/seq_range.h b/drivers/staging/lustre/lustre/include/seq_range.h
deleted file mode 100644
index 9450da728160..000000000000
--- a/drivers/staging/lustre/lustre/include/seq_range.h
+++ /dev/null
@@ -1,200 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2014, Intel Corporation.
- *
- * Copyright 2015 Cray Inc, all rights reserved.
- * Author: Ben Evans.
- *
- * Define lu_seq_range associated functions
- */
-
-#ifndef _SEQ_RANGE_H_
-#define _SEQ_RANGE_H_
-
-#include <uapi/linux/lustre/lustre_idl.h>
-
-/**
- * computes the sequence range type \a range
- */
-
-static inline unsigned int fld_range_type(const struct lu_seq_range *range)
-{
- return range->lsr_flags & LU_SEQ_RANGE_MASK;
-}
-
-/**
- * Is this sequence range an OST? \a range
- */
-
-static inline bool fld_range_is_ost(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_OST;
-}
-
-/**
- * Is this sequence range an MDT? \a range
- */
-
-static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_MDT;
-}
-
-/**
- * ANY range is only used when the fld client sends a fld query request,
- * but it does not know whether the seq is an MDT or OST, so it will send the
- * request with ANY type, which means any seq type from the lookup can be
- * expected. /a range
- */
-static inline unsigned int fld_range_is_any(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_ANY;
-}
-
-/**
- * Apply flags to range \a range \a flags
- */
-
-static inline void fld_range_set_type(struct lu_seq_range *range,
- unsigned int flags)
-{
- range->lsr_flags |= flags;
-}
-
-/**
- * Add MDT to range type \a range
- */
-
-static inline void fld_range_set_mdt(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_MDT);
-}
-
-/**
- * Add OST to range type \a range
- */
-
-static inline void fld_range_set_ost(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_OST);
-}
-
-/**
- * Add ANY to range type \a range
- */
-
-static inline void fld_range_set_any(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_ANY);
-}
-
-/**
- * computes width of given sequence range \a range
- */
-
-static inline u64 lu_seq_range_space(const struct lu_seq_range *range)
-{
- return range->lsr_end - range->lsr_start;
-}
-
-/**
- * initialize range to zero \a range
- */
-
-static inline void lu_seq_range_init(struct lu_seq_range *range)
-{
- memset(range, 0, sizeof(*range));
-}
-
-/**
- * check if given seq id \a s is within given range \a range
- */
-
-static inline bool lu_seq_range_within(const struct lu_seq_range *range,
- u64 seq)
-{
- return seq >= range->lsr_start && seq < range->lsr_end;
-}
-
-/**
- * Is the range sane? Is the end after the beginning? \a range
- */
-
-static inline bool lu_seq_range_is_sane(const struct lu_seq_range *range)
-{
- return range->lsr_end >= range->lsr_start;
-}
-
-/**
- * Is the range 0? \a range
- */
-
-static inline bool lu_seq_range_is_zero(const struct lu_seq_range *range)
-{
- return range->lsr_start == 0 && range->lsr_end == 0;
-}
-
-/**
- * Is the range out of space? \a range
- */
-
-static inline bool lu_seq_range_is_exhausted(const struct lu_seq_range *range)
-{
- return lu_seq_range_space(range) == 0;
-}
-
-/**
- * return 0 if two ranges have the same location, nonzero if they are
- * different \a r1 \a r2
- */
-
-static inline int lu_seq_range_compare_loc(const struct lu_seq_range *r1,
- const struct lu_seq_range *r2)
-{
- return r1->lsr_index != r2->lsr_index ||
- r1->lsr_flags != r2->lsr_flags;
-}
-
-#if !defined(__REQ_LAYOUT_USER__)
-/**
- * byte swap range structure \a range
- */
-
-void lustre_swab_lu_seq_range(struct lu_seq_range *range);
-#endif
-/**
- * printf string and argument list for sequence range
- */
-#define DRANGE "[%#16.16llx-%#16.16llx]:%x:%s"
-
-#define PRANGE(range) \
- (range)->lsr_start, \
- (range)->lsr_end, \
- (range)->lsr_index, \
- fld_range_is_mdt(range) ? "mdt" : "ost"
-
-#endif
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
deleted file mode 100644
index 8df7a4463c21..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ /dev/null
@@ -1,599 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/interval_tree.c
- *
- * Interval tree library used by ldlm extent lock code
- *
- * Author: Huang Wei <huangwei@clusterfs.com>
- * Author: Jay Xiong <jinshan.xiong@sun.com>
- */
-#include <lustre_dlm.h>
-#include <obd_support.h>
-#include <interval_tree.h>
-
-enum {
- INTERVAL_RED = 0,
- INTERVAL_BLACK = 1
-};
-
-static inline int node_is_left_child(struct interval_node *node)
-{
- return node == node->in_parent->in_left;
-}
-
-static inline int node_is_right_child(struct interval_node *node)
-{
- return node == node->in_parent->in_right;
-}
-
-static inline int node_is_red(struct interval_node *node)
-{
- return node->in_color == INTERVAL_RED;
-}
-
-static inline int node_is_black(struct interval_node *node)
-{
- return node->in_color == INTERVAL_BLACK;
-}
-
-static inline int extent_compare(struct interval_node_extent *e1,
- struct interval_node_extent *e2)
-{
- int rc;
-
- if (e1->start == e2->start) {
- if (e1->end < e2->end)
- rc = -1;
- else if (e1->end > e2->end)
- rc = 1;
- else
- rc = 0;
- } else {
- if (e1->start < e2->start)
- rc = -1;
- else
- rc = 1;
- }
- return rc;
-}
-
-static inline int extent_equal(struct interval_node_extent *e1,
- struct interval_node_extent *e2)
-{
- return (e1->start == e2->start) && (e1->end == e2->end);
-}
-
-static inline int extent_overlapped(struct interval_node_extent *e1,
- struct interval_node_extent *e2)
-{
- return (e1->start <= e2->end) && (e2->start <= e1->end);
-}
-
-static inline int node_equal(struct interval_node *n1, struct interval_node *n2)
-{
- return extent_equal(&n1->in_extent, &n2->in_extent);
-}
-
-static struct interval_node *interval_first(struct interval_node *node)
-{
- if (!node)
- return NULL;
- while (node->in_left)
- node = node->in_left;
- return node;
-}
-
-static struct interval_node *interval_last(struct interval_node *node)
-{
- if (!node)
- return NULL;
- while (node->in_right)
- node = node->in_right;
- return node;
-}
-
-static struct interval_node *interval_next(struct interval_node *node)
-{
- if (!node)
- return NULL;
- if (node->in_right)
- return interval_first(node->in_right);
- while (node->in_parent && node_is_right_child(node))
- node = node->in_parent;
- return node->in_parent;
-}
-
-static struct interval_node *interval_prev(struct interval_node *node)
-{
- if (!node)
- return NULL;
-
- if (node->in_left)
- return interval_last(node->in_left);
-
- while (node->in_parent && node_is_left_child(node))
- node = node->in_parent;
-
- return node->in_parent;
-}
-
-enum interval_iter interval_iterate_reverse(struct interval_node *root,
- interval_callback_t func,
- void *data)
-{
- enum interval_iter rc = INTERVAL_ITER_CONT;
- struct interval_node *node;
-
- for (node = interval_last(root); node; node = interval_prev(node)) {
- rc = func(node, data);
- if (rc == INTERVAL_ITER_STOP)
- break;
- }
-
- return rc;
-}
-EXPORT_SYMBOL(interval_iterate_reverse);
-
-static void __rotate_change_maxhigh(struct interval_node *node,
- struct interval_node *rotate)
-{
- __u64 left_max, right_max;
-
- rotate->in_max_high = node->in_max_high;
- left_max = node->in_left ? node->in_left->in_max_high : 0;
- right_max = node->in_right ? node->in_right->in_max_high : 0;
- node->in_max_high = max(interval_high(node),
- max(left_max, right_max));
-}
-
-/* The left rotation "pivots" around the link from node to node->right, and
- * - node will be linked to node->right's left child, and
- * - node->right's left child will be linked to node's right child.
- */
-static void __rotate_left(struct interval_node *node,
- struct interval_node **root)
-{
- struct interval_node *right = node->in_right;
- struct interval_node *parent = node->in_parent;
-
- node->in_right = right->in_left;
- if (node->in_right)
- right->in_left->in_parent = node;
-
- right->in_left = node;
- right->in_parent = parent;
- if (parent) {
- if (node_is_left_child(node))
- parent->in_left = right;
- else
- parent->in_right = right;
- } else {
- *root = right;
- }
- node->in_parent = right;
-
- /* update max_high for node and right */
- __rotate_change_maxhigh(node, right);
-}
-
-/* The right rotation "pivots" around the link from node to node->left, and
- * - node will be linked to node->left's right child, and
- * - node->left's right child will be linked to node's left child.
- */
-static void __rotate_right(struct interval_node *node,
- struct interval_node **root)
-{
- struct interval_node *left = node->in_left;
- struct interval_node *parent = node->in_parent;
-
- node->in_left = left->in_right;
- if (node->in_left)
- left->in_right->in_parent = node;
- left->in_right = node;
-
- left->in_parent = parent;
- if (parent) {
- if (node_is_right_child(node))
- parent->in_right = left;
- else
- parent->in_left = left;
- } else {
- *root = left;
- }
- node->in_parent = left;
-
- /* update max_high for node and left */
- __rotate_change_maxhigh(node, left);
-}
-
-#define interval_swap(a, b) do { \
- struct interval_node *c = a; a = b; b = c; \
-} while (0)
-
-/*
- * Operations INSERT and DELETE, when run on a tree with n keys,
- * take O(logN) time.Because they modify the tree, the result
- * may violate the red-black properties.To restore these properties,
- * we must change the colors of some of the nodes in the tree
- * and also change the pointer structure.
- */
-static void interval_insert_color(struct interval_node *node,
- struct interval_node **root)
-{
- struct interval_node *parent, *gparent;
-
- while ((parent = node->in_parent) && node_is_red(parent)) {
- gparent = parent->in_parent;
- /* Parent is RED, so gparent must not be NULL */
- if (node_is_left_child(parent)) {
- struct interval_node *uncle;
-
- uncle = gparent->in_right;
- if (uncle && node_is_red(uncle)) {
- uncle->in_color = INTERVAL_BLACK;
- parent->in_color = INTERVAL_BLACK;
- gparent->in_color = INTERVAL_RED;
- node = gparent;
- continue;
- }
-
- if (parent->in_right == node) {
- __rotate_left(parent, root);
- interval_swap(node, parent);
- }
-
- parent->in_color = INTERVAL_BLACK;
- gparent->in_color = INTERVAL_RED;
- __rotate_right(gparent, root);
- } else {
- struct interval_node *uncle;
-
- uncle = gparent->in_left;
- if (uncle && node_is_red(uncle)) {
- uncle->in_color = INTERVAL_BLACK;
- parent->in_color = INTERVAL_BLACK;
- gparent->in_color = INTERVAL_RED;
- node = gparent;
- continue;
- }
-
- if (node_is_left_child(node)) {
- __rotate_right(parent, root);
- interval_swap(node, parent);
- }
-
- parent->in_color = INTERVAL_BLACK;
- gparent->in_color = INTERVAL_RED;
- __rotate_left(gparent, root);
- }
- }
-
- (*root)->in_color = INTERVAL_BLACK;
-}
-
-struct interval_node *interval_insert(struct interval_node *node,
- struct interval_node **root)
-
-{
- struct interval_node **p, *parent = NULL;
-
- LASSERT(!interval_is_intree(node));
- p = root;
- while (*p) {
- parent = *p;
- if (node_equal(parent, node))
- return parent;
-
- /* max_high field must be updated after each iteration */
- if (parent->in_max_high < interval_high(node))
- parent->in_max_high = interval_high(node);
-
- if (extent_compare(&node->in_extent, &parent->in_extent) < 0)
- p = &parent->in_left;
- else
- p = &parent->in_right;
- }
-
- /* link node into the tree */
- node->in_parent = parent;
- node->in_color = INTERVAL_RED;
- node->in_left = NULL;
- node->in_right = NULL;
- *p = node;
-
- interval_insert_color(node, root);
- node->in_intree = 1;
-
- return NULL;
-}
-EXPORT_SYMBOL(interval_insert);
-
-static inline int node_is_black_or_0(struct interval_node *node)
-{
- return !node || node_is_black(node);
-}
-
-static void interval_erase_color(struct interval_node *node,
- struct interval_node *parent,
- struct interval_node **root)
-{
- struct interval_node *tmp;
-
- while (node_is_black_or_0(node) && node != *root) {
- if (parent->in_left == node) {
- tmp = parent->in_right;
- if (node_is_red(tmp)) {
- tmp->in_color = INTERVAL_BLACK;
- parent->in_color = INTERVAL_RED;
- __rotate_left(parent, root);
- tmp = parent->in_right;
- }
- if (node_is_black_or_0(tmp->in_left) &&
- node_is_black_or_0(tmp->in_right)) {
- tmp->in_color = INTERVAL_RED;
- node = parent;
- parent = node->in_parent;
- } else {
- if (node_is_black_or_0(tmp->in_right)) {
- struct interval_node *o_left;
-
- o_left = tmp->in_left;
- if (o_left)
- o_left->in_color = INTERVAL_BLACK;
- tmp->in_color = INTERVAL_RED;
- __rotate_right(tmp, root);
- tmp = parent->in_right;
- }
- tmp->in_color = parent->in_color;
- parent->in_color = INTERVAL_BLACK;
- if (tmp->in_right)
- tmp->in_right->in_color = INTERVAL_BLACK;
- __rotate_left(parent, root);
- node = *root;
- break;
- }
- } else {
- tmp = parent->in_left;
- if (node_is_red(tmp)) {
- tmp->in_color = INTERVAL_BLACK;
- parent->in_color = INTERVAL_RED;
- __rotate_right(parent, root);
- tmp = parent->in_left;
- }
- if (node_is_black_or_0(tmp->in_left) &&
- node_is_black_or_0(tmp->in_right)) {
- tmp->in_color = INTERVAL_RED;
- node = parent;
- parent = node->in_parent;
- } else {
- if (node_is_black_or_0(tmp->in_left)) {
- struct interval_node *o_right;
-
- o_right = tmp->in_right;
- if (o_right)
- o_right->in_color = INTERVAL_BLACK;
- tmp->in_color = INTERVAL_RED;
- __rotate_left(tmp, root);
- tmp = parent->in_left;
- }
- tmp->in_color = parent->in_color;
- parent->in_color = INTERVAL_BLACK;
- if (tmp->in_left)
- tmp->in_left->in_color = INTERVAL_BLACK;
- __rotate_right(parent, root);
- node = *root;
- break;
- }
- }
- }
- if (node)
- node->in_color = INTERVAL_BLACK;
-}
-
-/*
- * if the @max_high value of @node is changed, this function traverse a path
- * from node up to the root to update max_high for the whole tree.
- */
-static void update_maxhigh(struct interval_node *node,
- __u64 old_maxhigh)
-{
- __u64 left_max, right_max;
-
- while (node) {
- left_max = node->in_left ? node->in_left->in_max_high : 0;
- right_max = node->in_right ? node->in_right->in_max_high : 0;
- node->in_max_high = max(interval_high(node),
- max(left_max, right_max));
-
- if (node->in_max_high >= old_maxhigh)
- break;
- node = node->in_parent;
- }
-}
-
-void interval_erase(struct interval_node *node,
- struct interval_node **root)
-{
- struct interval_node *child, *parent;
- int color;
-
- LASSERT(interval_is_intree(node));
- node->in_intree = 0;
- if (!node->in_left) {
- child = node->in_right;
- } else if (!node->in_right) {
- child = node->in_left;
- } else { /* Both left and right child are not NULL */
- struct interval_node *old = node;
-
- node = interval_next(node);
- child = node->in_right;
- parent = node->in_parent;
- color = node->in_color;
-
- if (child)
- child->in_parent = parent;
- if (parent == old)
- parent->in_right = child;
- else
- parent->in_left = child;
-
- node->in_color = old->in_color;
- node->in_right = old->in_right;
- node->in_left = old->in_left;
- node->in_parent = old->in_parent;
-
- if (old->in_parent) {
- if (node_is_left_child(old))
- old->in_parent->in_left = node;
- else
- old->in_parent->in_right = node;
- } else {
- *root = node;
- }
-
- old->in_left->in_parent = node;
- if (old->in_right)
- old->in_right->in_parent = node;
- update_maxhigh(child ? : parent, node->in_max_high);
- update_maxhigh(node, old->in_max_high);
- if (parent == old)
- parent = node;
- goto color;
- }
- parent = node->in_parent;
- color = node->in_color;
-
- if (child)
- child->in_parent = parent;
- if (parent) {
- if (node_is_left_child(node))
- parent->in_left = child;
- else
- parent->in_right = child;
- } else {
- *root = child;
- }
-
- update_maxhigh(child ? : parent, node->in_max_high);
-
-color:
- if (color == INTERVAL_BLACK)
- interval_erase_color(child, parent, root);
-}
-EXPORT_SYMBOL(interval_erase);
-
-static inline int interval_may_overlap(struct interval_node *node,
- struct interval_node_extent *ext)
-{
- return (ext->start <= node->in_max_high &&
- ext->end >= interval_low(node));
-}
-
-/*
- * This function finds all intervals that overlap interval ext,
- * and calls func to handle resulted intervals one by one.
- * in lustre, this function will find all conflicting locks in
- * the granted queue and add these locks to the ast work list.
- *
- * {
- * if (!node)
- * return 0;
- * if (ext->end < interval_low(node)) {
- * interval_search(node->in_left, ext, func, data);
- * } else if (interval_may_overlap(node, ext)) {
- * if (extent_overlapped(ext, &node->in_extent))
- * func(node, data);
- * interval_search(node->in_left, ext, func, data);
- * interval_search(node->in_right, ext, func, data);
- * }
- * return 0;
- * }
- *
- */
-enum interval_iter interval_search(struct interval_node *node,
- struct interval_node_extent *ext,
- interval_callback_t func,
- void *data)
-{
- enum interval_iter rc = INTERVAL_ITER_CONT;
- struct interval_node *parent;
-
- LASSERT(ext);
- LASSERT(func);
-
- while (node) {
- if (ext->end < interval_low(node)) {
- if (node->in_left) {
- node = node->in_left;
- continue;
- }
- } else if (interval_may_overlap(node, ext)) {
- if (extent_overlapped(ext, &node->in_extent)) {
- rc = func(node, data);
- if (rc == INTERVAL_ITER_STOP)
- break;
- }
-
- if (node->in_left) {
- node = node->in_left;
- continue;
- }
- if (node->in_right) {
- node = node->in_right;
- continue;
- }
- }
-
- parent = node->in_parent;
- while (parent) {
- if (node_is_left_child(node) &&
- parent->in_right) {
- /*
- * If we ever got the left, it means that the
- * parent met ext->end<interval_low(parent), or
- * may_overlap(parent). If the former is true,
- * we needn't go back. So stop early and check
- * may_overlap(parent) after this loop.
- */
- node = parent->in_right;
- break;
- }
- node = parent;
- parent = parent->in_parent;
- }
- if (!parent || !interval_may_overlap(parent, ext))
- break;
- }
-
- return rc;
-}
-EXPORT_SYMBOL(interval_search);
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
deleted file mode 100644
index 0662cec14b81..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-#include <linux/libcfs/libcfs.h>
-
-#include <lustre_dlm.h>
-#include <lustre_lib.h>
-
-/**
- * Lock a lock and its resource.
- *
- * LDLM locking uses resource to serialize access to locks
- * but there is a case when we change resource of lock upon
- * enqueue reply. We rely on lock->l_resource = new_res
- * being an atomic operation.
- */
-struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
- __acquires(&lock->l_lock)
- __acquires(&lock->l_resource->lr_lock)
-{
- spin_lock(&lock->l_lock);
-
- lock_res(lock->l_resource);
-
- ldlm_set_res_locked(lock);
- return lock->l_resource;
-}
-EXPORT_SYMBOL(lock_res_and_lock);
-
-/**
- * Unlock a lock and its resource previously locked with lock_res_and_lock
- */
-void unlock_res_and_lock(struct ldlm_lock *lock)
- __releases(&lock->l_resource->lr_lock)
- __releases(&lock->l_lock)
-{
- /* on server-side resource of lock doesn't change */
- ldlm_clear_res_locked(lock);
-
- unlock_res(lock->l_resource);
- spin_unlock(&lock->l_lock);
-}
-EXPORT_SYMBOL(unlock_res_and_lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
deleted file mode 100644
index 11b11b5f3216..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ /dev/null
@@ -1,259 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_extent.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-/**
- * This file contains implementation of EXTENT lock type
- *
- * EXTENT lock type is for locking a contiguous range of values, represented
- * by 64-bit starting and ending offsets (inclusive). There are several extent
- * lock modes, some of which may be mutually incompatible. Extent locks are
- * considered incompatible if their modes are incompatible and their extents
- * intersect. See the lock mode compatibility matrix in lustre_dlm.h.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-#include <linux/libcfs/libcfs.h>
-#include <lustre_dlm.h>
-#include <obd_support.h>
-#include <obd.h>
-#include <obd_class.h>
-#include <lustre_lib.h>
-#include "ldlm_internal.h"
-
-/* When a lock is cancelled by a client, the KMS may undergo change if this
- * is the "highest lock". This function returns the new KMS value.
- * Caller must hold lr_lock already.
- *
- * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes!
- */
-__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
-{
- struct ldlm_resource *res = lock->l_resource;
- struct ldlm_lock *lck;
- __u64 kms = 0;
-
- /* don't let another thread in ldlm_extent_shift_kms race in
- * just after we finish and take our lock into account in its
- * calculation of the kms
- */
- ldlm_set_kms_ignore(lock);
-
- list_for_each_entry(lck, &res->lr_granted, l_res_link) {
-
- if (ldlm_is_kms_ignore(lck))
- continue;
-
- if (lck->l_policy_data.l_extent.end >= old_kms)
- return old_kms;
-
- /* This extent _has_ to be smaller than old_kms (checked above)
- * so kms can only ever be smaller or the same as old_kms.
- */
- if (lck->l_policy_data.l_extent.end + 1 > kms)
- kms = lck->l_policy_data.l_extent.end + 1;
- }
- LASSERTF(kms <= old_kms, "kms %llu old_kms %llu\n", kms, old_kms);
-
- return kms;
-}
-EXPORT_SYMBOL(ldlm_extent_shift_kms);
-
-struct kmem_cache *ldlm_interval_slab;
-
-/* interval tree, for LDLM_EXTENT. */
-static void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l)
-{
- LASSERT(!l->l_tree_node);
- LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
-
- list_add_tail(&l->l_sl_policy, &n->li_group);
- l->l_tree_node = n;
-}
-
-struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
-{
- struct ldlm_interval *node;
-
- LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
- node = kmem_cache_zalloc(ldlm_interval_slab, GFP_NOFS);
- if (!node)
- return NULL;
-
- INIT_LIST_HEAD(&node->li_group);
- ldlm_interval_attach(node, lock);
- return node;
-}
-
-void ldlm_interval_free(struct ldlm_interval *node)
-{
- if (node) {
- LASSERT(list_empty(&node->li_group));
- LASSERT(!interval_is_intree(&node->li_node));
- kmem_cache_free(ldlm_interval_slab, node);
- }
-}
-
-struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
-{
- struct ldlm_interval *n = l->l_tree_node;
-
- if (!n)
- return NULL;
-
- LASSERT(!list_empty(&n->li_group));
- l->l_tree_node = NULL;
- list_del_init(&l->l_sl_policy);
-
- return list_empty(&n->li_group) ? n : NULL;
-}
-
-static inline int lock_mode_to_index(enum ldlm_mode mode)
-{
- int index;
-
- LASSERT(mode != 0);
- LASSERT(is_power_of_2(mode));
- for (index = -1; mode; index++)
- mode >>= 1;
- LASSERT(index < LCK_MODE_NUM);
- return index;
-}
-
-/** Add newly granted lock into interval tree for the resource. */
-void ldlm_extent_add_lock(struct ldlm_resource *res,
- struct ldlm_lock *lock)
-{
- struct interval_node *found, **root;
- struct ldlm_interval *node;
- struct ldlm_extent *extent;
- int idx, rc;
-
- LASSERT(lock->l_granted_mode == lock->l_req_mode);
-
- node = lock->l_tree_node;
- LASSERT(node);
- LASSERT(!interval_is_intree(&node->li_node));
-
- idx = lock_mode_to_index(lock->l_granted_mode);
- LASSERT(lock->l_granted_mode == 1 << idx);
- LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
-
- /* node extent initialize */
- extent = &lock->l_policy_data.l_extent;
- rc = interval_set(&node->li_node, extent->start, extent->end);
- LASSERT(!rc);
-
- root = &res->lr_itree[idx].lit_root;
- found = interval_insert(&node->li_node, root);
- if (found) { /* The policy group found. */
- struct ldlm_interval *tmp;
-
- tmp = ldlm_interval_detach(lock);
- ldlm_interval_free(tmp);
- ldlm_interval_attach(to_ldlm_interval(found), lock);
- }
- res->lr_itree[idx].lit_size++;
-
- /* even though we use interval tree to manage the extent lock, we also
- * add the locks into grant list, for debug purpose, ..
- */
- ldlm_resource_add_lock(res, &res->lr_granted, lock);
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
- struct ldlm_lock *lck;
-
- list_for_each_entry_reverse(lck, &res->lr_granted,
- l_res_link) {
- if (lck == lock)
- continue;
- if (lockmode_compat(lck->l_granted_mode,
- lock->l_granted_mode))
- continue;
- if (ldlm_extent_overlap(&lck->l_req_extent,
- &lock->l_req_extent)) {
- CDEBUG(D_ERROR,
- "granting conflicting lock %p %p\n",
- lck, lock);
- ldlm_resource_dump(D_ERROR, res);
- LBUG();
- }
- }
- }
-}
-
-/** Remove cancelled lock from resource interval tree. */
-void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
-{
- struct ldlm_resource *res = lock->l_resource;
- struct ldlm_interval *node = lock->l_tree_node;
- struct ldlm_interval_tree *tree;
- int idx;
-
- if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
- return;
-
- idx = lock_mode_to_index(lock->l_granted_mode);
- LASSERT(lock->l_granted_mode == 1 << idx);
- tree = &res->lr_itree[idx];
-
- LASSERT(tree->lit_root); /* assure the tree is not null */
-
- tree->lit_size--;
- node = ldlm_interval_detach(lock);
- if (node) {
- interval_erase(&node->li_node, &tree->lit_root);
- ldlm_interval_free(node);
- }
-}
-
-void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
- union ldlm_policy_data *lpolicy)
-{
- lpolicy->l_extent.start = wpolicy->l_extent.start;
- lpolicy->l_extent.end = wpolicy->l_extent.end;
- lpolicy->l_extent.gid = wpolicy->l_extent.gid;
-}
-
-void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
- union ldlm_wire_policy_data *wpolicy)
-{
- memset(wpolicy, 0, sizeof(*wpolicy));
- wpolicy->l_extent.start = lpolicy->l_extent.start;
- wpolicy->l_extent.end = lpolicy->l_extent.end;
- wpolicy->l_extent.gid = lpolicy->l_extent.gid;
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
deleted file mode 100644
index 411b540b96d9..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ /dev/null
@@ -1,495 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003 Hewlett-Packard Development Company LP.
- * Developed under the sponsorship of the US Government under
- * Subcontract No. B514193
- *
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/**
- * This file implements POSIX lock type for Lustre.
- * Its policy properties are start and end of extent and PID.
- *
- * These locks are only done through MDS due to POSIX semantics requiring
- * e.g. that locks could be only partially released and as such split into
- * two parts, and also that two adjacent locks from the same process may be
- * merged into a single wider lock.
- *
- * Lock modes are mapped like this:
- * PR and PW for READ and WRITE locks
- * NL to request a releasing of a portion of the lock
- *
- * These flock locks never timeout.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include <lustre_dlm.h>
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_lib.h>
-#include <linux/list.h>
-#include "ldlm_internal.h"
-
-static inline int
-ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
-{
- return((new->l_policy_data.l_flock.owner ==
- lock->l_policy_data.l_flock.owner) &&
- (new->l_export == lock->l_export));
-}
-
-static inline int
-ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
-{
- return((new->l_policy_data.l_flock.start <=
- lock->l_policy_data.l_flock.end) &&
- (new->l_policy_data.l_flock.end >=
- lock->l_policy_data.l_flock.start));
-}
-
-static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode)
-{
- LDLM_DEBUG(lock, "%s(mode: %d)",
- __func__, mode);
-
- /* Safe to not lock here, since it should be empty anyway */
- LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
-
- list_del_init(&lock->l_res_link);
-
- /* client side - set a flag to prevent sending a CANCEL */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
-
- /* when reaching here, it is under lock_res_and_lock(). Thus,
- * need call the nolock version of ldlm_lock_decref_internal
- */
- ldlm_lock_decref_internal_nolock(lock, mode);
-
- ldlm_lock_destroy_nolock(lock);
-}
-
-/**
- * Process a granting attempt for flock lock.
- * Must be called under ns lock held.
- *
- * This function looks for any conflicts for \a lock in the granted or
- * waiting queues. The lock is granted if no conflicts are found in
- * either queue.
- *
- * It is also responsible for splitting a lock if a portion of the lock
- * is released.
- *
- */
-static int ldlm_process_flock_lock(struct ldlm_lock *req)
-{
- struct ldlm_resource *res = req->l_resource;
- struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- struct ldlm_lock *tmp;
- struct ldlm_lock *lock;
- struct ldlm_lock *new = req;
- struct ldlm_lock *new2 = NULL;
- enum ldlm_mode mode = req->l_req_mode;
- int added = (mode == LCK_NL);
- int splitted = 0;
- const struct ldlm_callback_suite null_cbs = { };
-
- CDEBUG(D_DLMTRACE,
- "owner %llu pid %u mode %u start %llu end %llu\n",
- new->l_policy_data.l_flock.owner,
- new->l_policy_data.l_flock.pid, mode,
- req->l_policy_data.l_flock.start,
- req->l_policy_data.l_flock.end);
-
- /* No blocking ASTs are sent to the clients for
- * Posix file & record locks
- */
- req->l_blocking_ast = NULL;
-
-reprocess:
- /* This loop determines where this processes locks start
- * in the resource lr_granted list.
- */
- list_for_each_entry(lock, &res->lr_granted, l_res_link)
- if (ldlm_same_flock_owner(lock, req))
- break;
-
- /* Scan the locks owned by this process to find the insertion point
- * (as locks are ordered), and to handle overlaps.
- * We may have to merge or split existing locks.
- */
- list_for_each_entry_safe_from(lock, tmp, &res->lr_granted, l_res_link) {
-
- if (!ldlm_same_flock_owner(lock, new))
- break;
-
- if (lock->l_granted_mode == mode) {
- /* If the modes are the same then we need to process
- * locks that overlap OR adjoin the new lock. The extra
- * logic condition is necessary to deal with arithmetic
- * overflow and underflow.
- */
- if ((new->l_policy_data.l_flock.start >
- (lock->l_policy_data.l_flock.end + 1)) &&
- (lock->l_policy_data.l_flock.end != OBD_OBJECT_EOF))
- continue;
-
- if ((new->l_policy_data.l_flock.end <
- (lock->l_policy_data.l_flock.start - 1)) &&
- (lock->l_policy_data.l_flock.start != 0))
- break;
-
- if (new->l_policy_data.l_flock.start <
- lock->l_policy_data.l_flock.start) {
- lock->l_policy_data.l_flock.start =
- new->l_policy_data.l_flock.start;
- } else {
- new->l_policy_data.l_flock.start =
- lock->l_policy_data.l_flock.start;
- }
-
- if (new->l_policy_data.l_flock.end >
- lock->l_policy_data.l_flock.end) {
- lock->l_policy_data.l_flock.end =
- new->l_policy_data.l_flock.end;
- } else {
- new->l_policy_data.l_flock.end =
- lock->l_policy_data.l_flock.end;
- }
-
- if (added) {
- ldlm_flock_destroy(lock, mode);
- } else {
- new = lock;
- added = 1;
- }
- continue;
- }
-
- if (new->l_policy_data.l_flock.start >
- lock->l_policy_data.l_flock.end)
- continue;
-
- if (new->l_policy_data.l_flock.end <
- lock->l_policy_data.l_flock.start)
- break;
-
- if (new->l_policy_data.l_flock.start <=
- lock->l_policy_data.l_flock.start) {
- if (new->l_policy_data.l_flock.end <
- lock->l_policy_data.l_flock.end) {
- lock->l_policy_data.l_flock.start =
- new->l_policy_data.l_flock.end + 1;
- break;
- }
- ldlm_flock_destroy(lock, lock->l_req_mode);
- continue;
- }
- if (new->l_policy_data.l_flock.end >=
- lock->l_policy_data.l_flock.end) {
- lock->l_policy_data.l_flock.end =
- new->l_policy_data.l_flock.start - 1;
- continue;
- }
-
- /* split the existing lock into two locks */
-
- /* if this is an F_UNLCK operation then we could avoid
- * allocating a new lock and use the req lock passed in
- * with the request but this would complicate the reply
- * processing since updates to req get reflected in the
- * reply. The client side replays the lock request so
- * it must see the original lock data in the reply.
- */
-
- /* XXX - if ldlm_lock_new() can sleep we should
- * release the lr_lock, allocate the new lock,
- * and restart processing this lock.
- */
- if (!new2) {
- unlock_res_and_lock(req);
- new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
- lock->l_granted_mode, &null_cbs,
- NULL, 0, LVB_T_NONE);
- lock_res_and_lock(req);
- if (IS_ERR(new2)) {
- ldlm_flock_destroy(req, lock->l_granted_mode);
- return LDLM_ITER_STOP;
- }
- goto reprocess;
- }
-
- splitted = 1;
-
- new2->l_granted_mode = lock->l_granted_mode;
- new2->l_policy_data.l_flock.pid =
- new->l_policy_data.l_flock.pid;
- new2->l_policy_data.l_flock.owner =
- new->l_policy_data.l_flock.owner;
- new2->l_policy_data.l_flock.start =
- lock->l_policy_data.l_flock.start;
- new2->l_policy_data.l_flock.end =
- new->l_policy_data.l_flock.start - 1;
- lock->l_policy_data.l_flock.start =
- new->l_policy_data.l_flock.end + 1;
- new2->l_conn_export = lock->l_conn_export;
- if (lock->l_export) {
- new2->l_export = class_export_lock_get(lock->l_export,
- new2);
- if (new2->l_export->exp_lock_hash &&
- hlist_unhashed(&new2->l_exp_hash))
- cfs_hash_add(new2->l_export->exp_lock_hash,
- &new2->l_remote_handle,
- &new2->l_exp_hash);
- }
- ldlm_lock_addref_internal_nolock(new2,
- lock->l_granted_mode);
-
- /* insert new2 at lock */
- ldlm_resource_add_lock(res, &lock->l_res_link, new2);
- LDLM_LOCK_RELEASE(new2);
- break;
- }
-
- /* if new2 is created but never used, destroy it*/
- if (splitted == 0 && new2)
- ldlm_lock_destroy_nolock(new2);
-
- /* At this point we're granting the lock request. */
- req->l_granted_mode = req->l_req_mode;
-
- if (!added) {
- list_del_init(&req->l_res_link);
- /* insert new lock before "lock", which might be the
- * next lock for this owner, or might be the first
- * lock for the next owner, or might not be a lock at
- * all, but instead points at the head of the list
- */
- ldlm_resource_add_lock(res, &lock->l_res_link, req);
- }
-
- /* In case we're reprocessing the requested lock we can't destroy
- * it until after calling ldlm_add_ast_work_item() above so that laawi()
- * can bump the reference count on \a req. Otherwise \a req
- * could be freed before the completion AST can be sent.
- */
- if (added)
- ldlm_flock_destroy(req, mode);
-
- ldlm_resource_dump(D_INFO, res);
- return LDLM_ITER_CONTINUE;
-}
-
-/**
- * Flock completion callback function.
- *
- * \param lock [in,out]: A lock to be handled
- * \param flags [in]: flags
- * \param *data [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
- *
- * \retval 0 : success
- * \retval <0 : failure
- */
-int
-ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
-{
- struct file_lock *getlk = lock->l_ast_data;
- int rc = 0;
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
- if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) {
- lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_FAIL_LOC;
- unlock_res_and_lock(lock);
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT3, 4);
- }
- CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
- flags, data, getlk);
-
- LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
-
- if (flags & LDLM_FL_FAILED)
- goto granted;
-
- if (!(flags & LDLM_FL_BLOCKED_MASK)) {
- if (!data)
- /* mds granted the lock in the reply */
- goto granted;
- /* CP AST RPC: lock get granted, wake it up */
- wake_up(&lock->l_waitq);
- return 0;
- }
-
- LDLM_DEBUG(lock,
- "client-side enqueue returned a blocked lock, sleeping");
-
- /* Go to sleep until the lock is granted. */
- rc = l_wait_event_abortable(lock->l_waitq, is_granted_or_cancelled(lock));
-
- if (rc) {
- lock_res_and_lock(lock);
-
- /* client side - set flag to prevent lock from being put on LRU list */
- ldlm_set_cbpending(lock);
- unlock_res_and_lock(lock);
-
- LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
- rc);
- return rc;
- }
-
-granted:
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
-
- if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT4)) {
- lock_res_and_lock(lock);
- /* DEADLOCK is always set with CBPENDING */
- lock->l_flags |= LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
- unlock_res_and_lock(lock);
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT4, 4);
- }
- if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT5)) {
- lock_res_and_lock(lock);
- /* DEADLOCK is always set with CBPENDING */
- lock->l_flags |= LDLM_FL_FAIL_LOC |
- LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
- unlock_res_and_lock(lock);
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT5, 4);
- }
-
- lock_res_and_lock(lock);
-
- /*
- * Protect against race where lock could have been just destroyed
- * due to overlap in ldlm_process_flock_lock().
- */
- if (ldlm_is_destroyed(lock)) {
- unlock_res_and_lock(lock);
- LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- /*
- * An error is still to be returned, to propagate it up to
- * ldlm_cli_enqueue_fini() caller.
- */
- return -EIO;
- }
-
- /* ldlm_lock_enqueue() has already placed lock on the granted list. */
- ldlm_resource_unlink_lock(lock);
-
- /*
- * Import invalidation. We need to actually release the lock
- * references being held, so that it can go away. No point in
- * holding the lock even if app still believes it has it, since
- * server already dropped it anyway. Only for granted locks too.
- */
- /* Do the same for DEADLOCK'ed locks. */
- if (ldlm_is_failed(lock) || ldlm_is_flock_deadlock(lock)) {
- int mode;
-
- if (flags & LDLM_FL_TEST_LOCK)
- LASSERT(ldlm_is_test_lock(lock));
-
- if (ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
- mode = getlk->fl_type;
- else
- mode = lock->l_granted_mode;
-
- if (ldlm_is_flock_deadlock(lock)) {
- LDLM_DEBUG(lock,
- "client-side enqueue deadlock received");
- rc = -EDEADLK;
- }
- ldlm_flock_destroy(lock, mode);
- unlock_res_and_lock(lock);
-
- /* Need to wake up the waiter if we were evicted */
- wake_up(&lock->l_waitq);
-
- /*
- * An error is still to be returned, to propagate it up to
- * ldlm_cli_enqueue_fini() caller.
- */
- return rc ? : -EIO;
- }
-
- LDLM_DEBUG(lock, "client-side enqueue granted");
-
- if (flags & LDLM_FL_TEST_LOCK) {
- /* fcntl(F_GETLK) request */
- /* The old mode was saved in getlk->fl_type so that if the mode
- * in the lock changes we can decref the appropriate refcount.
- */
- LASSERT(ldlm_is_test_lock(lock));
- ldlm_flock_destroy(lock, getlk->fl_type);
- switch (lock->l_granted_mode) {
- case LCK_PR:
- getlk->fl_type = F_RDLCK;
- break;
- case LCK_PW:
- getlk->fl_type = F_WRLCK;
- break;
- default:
- getlk->fl_type = F_UNLCK;
- }
- getlk->fl_pid = -(pid_t)lock->l_policy_data.l_flock.pid;
- getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start;
- getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end;
- } else {
- /* We need to reprocess the lock to do merges or splits
- * with existing locks owned by this process.
- */
- ldlm_process_flock_lock(lock);
- }
- unlock_res_and_lock(lock);
- return rc;
-}
-EXPORT_SYMBOL(ldlm_flock_completion_ast);
-
-void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
- union ldlm_policy_data *lpolicy)
-{
- lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
- lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
- lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
- lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
-}
-
-void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
- union ldlm_wire_policy_data *wpolicy)
-{
- memset(wpolicy, 0, sizeof(*wpolicy));
- wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
- wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
- wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
- wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
deleted file mode 100644
index 2926208cdfa1..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_inodebits.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-/**
- * This file contains implementation of IBITS lock type
- *
- * IBITS lock type contains a bit mask determining various properties of an
- * object. The meanings of specific bits are specific to the caller and are
- * opaque to LDLM code.
- *
- * Locks with intersecting bitmasks and conflicting lock modes (e.g. LCK_PW)
- * are considered conflicting. See the lock mode compatibility matrix
- * in lustre_dlm.h.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include <lustre_dlm.h>
-#include <obd_support.h>
-#include <lustre_lib.h>
-#include "ldlm_internal.h"
-
-void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
- union ldlm_policy_data *lpolicy)
-{
- lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits;
-}
-
-void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
- union ldlm_wire_policy_data *wpolicy)
-{
- memset(wpolicy, 0, sizeof(*wpolicy));
- wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits;
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
deleted file mode 100644
index bc33ca100620..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ /dev/null
@@ -1,342 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define MAX_STRING_SIZE 128
-
-extern int ldlm_srv_namespace_nr;
-extern int ldlm_cli_namespace_nr;
-extern struct mutex ldlm_srv_namespace_lock;
-extern struct list_head ldlm_srv_namespace_list;
-extern struct mutex ldlm_cli_namespace_lock;
-extern struct list_head ldlm_cli_active_namespace_list;
-
-static inline int ldlm_namespace_nr_read(enum ldlm_side client)
-{
- return client == LDLM_NAMESPACE_SERVER ?
- ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
-}
-
-static inline void ldlm_namespace_nr_inc(enum ldlm_side client)
-{
- if (client == LDLM_NAMESPACE_SERVER)
- ldlm_srv_namespace_nr++;
- else
- ldlm_cli_namespace_nr++;
-}
-
-static inline void ldlm_namespace_nr_dec(enum ldlm_side client)
-{
- if (client == LDLM_NAMESPACE_SERVER)
- ldlm_srv_namespace_nr--;
- else
- ldlm_cli_namespace_nr--;
-}
-
-static inline struct list_head *ldlm_namespace_list(enum ldlm_side client)
-{
- return client == LDLM_NAMESPACE_SERVER ?
- &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
-}
-
-static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client)
-{
- return client == LDLM_NAMESPACE_SERVER ?
- &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
-}
-
-/* ns_bref is the number of resources in this namespace */
-static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
-{
- return atomic_read(&ns->ns_bref) == 0;
-}
-
-void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
- enum ldlm_side client);
-void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
- enum ldlm_side client);
-struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client);
-
-/* ldlm_request.c */
-/* Cancel lru flag, it indicates we cancel aged locks. */
-enum {
- LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel old non-LRU resize locks */
- LDLM_LRU_FLAG_PASSED = BIT(1), /* Cancel passed number of locks. */
- LDLM_LRU_FLAG_SHRINK = BIT(2), /* Cancel locks from shrinker. */
- LDLM_LRU_FLAG_LRUR = BIT(3), /* Cancel locks from lru resize. */
- LDLM_LRU_FLAG_NO_WAIT = BIT(4), /* Cancel locks w/o blocking (neither
- * sending nor waiting for any rpcs)
- */
- LDLM_LRU_FLAG_LRUR_NO_WAIT = BIT(5), /* LRUR + NO_WAIT */
-};
-
-int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
- enum ldlm_cancel_flags sync, int flags);
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
- struct list_head *cancels, int count, int max,
- enum ldlm_cancel_flags cancel_flags, int flags);
-extern unsigned int ldlm_enqueue_min;
-extern unsigned int ldlm_cancel_unused_locks_before_replay;
-
-/* ldlm_lock.c */
-
-struct ldlm_cb_set_arg {
- struct ptlrpc_request_set *set;
- int type; /* LDLM_{CP,BL,GL}_CALLBACK */
- atomic_t restart;
- struct list_head *list;
- union ldlm_gl_desc *gl_desc; /* glimpse AST descriptor */
-};
-
-enum ldlm_desc_ast_t {
- LDLM_WORK_BL_AST,
- LDLM_WORK_CP_AST,
- LDLM_WORK_REVOKE_AST,
- LDLM_WORK_GL_AST
-};
-
-void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
-int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
- enum req_location loc, void *data, int size);
-struct ldlm_lock *
-ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *id,
- enum ldlm_type type, enum ldlm_mode mode,
- const struct ldlm_callback_suite *cbs,
- void *data, __u32 lvb_len, enum lvb_type lvb_type);
-enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
- struct ldlm_lock **lock, void *cookie,
- __u64 *flags);
-void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode);
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
- enum ldlm_mode mode);
-void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode);
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
- enum ldlm_mode mode);
-int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
- enum ldlm_desc_ast_t ast_type);
-int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
-#define ldlm_lock_remove_from_lru(lock) ldlm_lock_remove_from_lru_check(lock, 0)
-int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
-void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
-
-/* ldlm_lockd.c */
-int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- struct ldlm_lock *lock);
-int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld,
- struct list_head *cancels, int count,
- enum ldlm_cancel_flags cancel_flags);
-int ldlm_bl_thread_wakeup(void);
-
-void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
-
-extern struct kmem_cache *ldlm_resource_slab;
-extern struct kset *ldlm_ns_kset;
-
-/* ldlm_lockd.c & ldlm_lock.c */
-extern struct kmem_cache *ldlm_lock_slab;
-
-/* ldlm_extent.c */
-void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
-void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
-
-/* l_lock.c */
-void l_check_ns_lock(struct ldlm_namespace *ns);
-void l_check_no_ns_lock(struct ldlm_namespace *ns);
-
-extern struct dentry *ldlm_svc_debugfs_dir;
-
-struct ldlm_state {
- struct ptlrpc_service *ldlm_cb_service;
- struct ptlrpc_service *ldlm_cancel_service;
- struct ptlrpc_client *ldlm_client;
- struct ptlrpc_connection *ldlm_server_conn;
- struct ldlm_bl_pool *ldlm_bl_pool;
-};
-
-/* ldlm_pool.c */
-__u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
-void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv);
-__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl);
-
-/* interval tree, for LDLM_EXTENT. */
-extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */
-struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l);
-struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock);
-void ldlm_interval_free(struct ldlm_interval *node);
-/* this function must be called with res lock held */
-static inline struct ldlm_extent *
-ldlm_interval_extent(struct ldlm_interval *node)
-{
- struct ldlm_lock *lock;
-
- LASSERT(!list_empty(&node->li_group));
-
- lock = list_entry(node->li_group.next, struct ldlm_lock, l_sl_policy);
- return &lock->l_policy_data.l_extent;
-}
-
-int ldlm_init(void);
-void ldlm_exit(void);
-
-enum ldlm_policy_res {
- LDLM_POLICY_CANCEL_LOCK,
- LDLM_POLICY_KEEP_LOCK,
- LDLM_POLICY_SKIP_LOCK
-};
-
-#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
-#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
-#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
-#define LDLM_POOL_SYSFS_SET_u64(a, b) { a = b; }
-#define LDLM_POOL_SYSFS_PRINT_atomic(v) sprintf(buf, "%d\n", atomic_read(&v))
-#define LDLM_POOL_SYSFS_SET_atomic(a, b) atomic_set(&a, b)
-
-#define LDLM_POOL_SYSFS_READER_SHOW(var, type) \
- static ssize_t var##_show(struct kobject *kobj, \
- struct attribute *attr, \
- char *buf) \
- { \
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
- pl_kobj); \
- type tmp; \
- \
- spin_lock(&pl->pl_lock); \
- tmp = pl->pl_##var; \
- spin_unlock(&pl->pl_lock); \
- \
- return LDLM_POOL_SYSFS_PRINT_##type(tmp); \
- } \
- struct __##var##__dummy_read {; } /* semicolon catcher */
-
-#define LDLM_POOL_SYSFS_WRITER_STORE(var, type) \
- static ssize_t var##_store(struct kobject *kobj, \
- struct attribute *attr, \
- const char *buffer, \
- size_t count) \
- { \
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
- pl_kobj); \
- unsigned long tmp; \
- int rc; \
- \
- rc = kstrtoul(buffer, 10, &tmp); \
- if (rc < 0) { \
- return rc; \
- } \
- \
- spin_lock(&pl->pl_lock); \
- LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
- spin_unlock(&pl->pl_lock); \
- \
- return count; \
- } \
- struct __##var##__dummy_write {; } /* semicolon catcher */
-
-#define LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(var, type) \
- static ssize_t var##_show(struct kobject *kobj, \
- struct attribute *attr, \
- char *buf) \
- { \
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
- pl_kobj); \
- \
- return LDLM_POOL_SYSFS_PRINT_##type(pl->pl_##var); \
- } \
- struct __##var##__dummy_read {; } /* semicolon catcher */
-
-#define LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(var, type) \
- static ssize_t var##_store(struct kobject *kobj, \
- struct attribute *attr, \
- const char *buffer, \
- size_t count) \
- { \
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
- pl_kobj); \
- unsigned long tmp; \
- int rc; \
- \
- rc = kstrtoul(buffer, 10, &tmp); \
- if (rc < 0) { \
- return rc; \
- } \
- \
- LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
- \
- return count; \
- } \
- struct __##var##__dummy_write {; } /* semicolon catcher */
-
-static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
-{
- int ret = 0;
-
- lock_res_and_lock(lock);
- if ((lock->l_req_mode == lock->l_granted_mode) &&
- !ldlm_is_cp_reqd(lock))
- ret = 1;
- else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
- ret = 1;
- unlock_res_and_lock(lock);
-
- return ret;
-}
-
-typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *,
- union ldlm_policy_data *);
-
-typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *,
- union ldlm_wire_policy_data *);
-
-void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
- union ldlm_policy_data *lpolicy);
-void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
- union ldlm_wire_policy_data *wpolicy);
-void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
- union ldlm_policy_data *lpolicy);
-void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
- union ldlm_wire_policy_data *wpolicy);
-void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
- union ldlm_policy_data *lpolicy);
-void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
- union ldlm_wire_policy_data *wpolicy);
-void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
- union ldlm_policy_data *lpolicy);
-void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
- union ldlm_wire_policy_data *wpolicy);
-
-static inline bool ldlm_res_eq(const struct ldlm_res_id *res0,
- const struct ldlm_res_id *res1)
-{
- return memcmp(res0, res1, sizeof(*res0)) == 0;
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
deleted file mode 100644
index 9efd26ec59dd..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ /dev/null
@@ -1,843 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/**
- * This file deals with various client/target related logic including recovery.
- *
- * TODO: This code more logically belongs in the ptlrpc module than in ldlm and
- * should be moved.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include <linux/libcfs/libcfs.h>
-#include <obd.h>
-#include <obd_class.h>
-#include <lustre_dlm.h>
-#include <lustre_net.h>
-#include <lustre_sec.h>
-#include "ldlm_internal.h"
-
-/* @priority: If non-zero, move the selected connection to the list head.
- * @create: If zero, only search in existing connections.
- */
-static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
- int priority, int create)
-{
- struct ptlrpc_connection *ptlrpc_conn;
- struct obd_import_conn *imp_conn = NULL, *item;
- int rc = 0;
-
- if (!create && !priority) {
- CDEBUG(D_HA, "Nothing to do\n");
- return -EINVAL;
- }
-
- ptlrpc_conn = ptlrpc_uuid_to_connection(uuid);
- if (!ptlrpc_conn) {
- CDEBUG(D_HA, "can't find connection %s\n", uuid->uuid);
- return -ENOENT;
- }
-
- if (create) {
- imp_conn = kzalloc(sizeof(*imp_conn), GFP_NOFS);
- if (!imp_conn) {
- rc = -ENOMEM;
- goto out_put;
- }
- }
-
- spin_lock(&imp->imp_lock);
- list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
- if (obd_uuid_equals(uuid, &item->oic_uuid)) {
- if (priority) {
- list_del(&item->oic_item);
- list_add(&item->oic_item,
- &imp->imp_conn_list);
- item->oic_last_attempt = 0;
- }
- CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n",
- imp, imp->imp_obd->obd_name, uuid->uuid,
- (priority ? ", moved to head" : ""));
- spin_unlock(&imp->imp_lock);
- rc = 0;
- goto out_free;
- }
- }
- /* No existing import connection found for \a uuid. */
- if (create) {
- imp_conn->oic_conn = ptlrpc_conn;
- imp_conn->oic_uuid = *uuid;
- imp_conn->oic_last_attempt = 0;
- if (priority)
- list_add(&imp_conn->oic_item, &imp->imp_conn_list);
- else
- list_add_tail(&imp_conn->oic_item,
- &imp->imp_conn_list);
- CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
- imp, imp->imp_obd->obd_name, uuid->uuid,
- (priority ? "head" : "tail"));
- } else {
- spin_unlock(&imp->imp_lock);
- rc = -ENOENT;
- goto out_free;
- }
-
- spin_unlock(&imp->imp_lock);
- return 0;
-out_free:
- kfree(imp_conn);
-out_put:
- ptlrpc_connection_put(ptlrpc_conn);
- return rc;
-}
-
-int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid)
-{
- return import_set_conn(imp, uuid, 1, 0);
-}
-
-int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
- int priority)
-{
- return import_set_conn(imp, uuid, priority, 1);
-}
-EXPORT_SYMBOL(client_import_add_conn);
-
-int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
-{
- struct obd_import_conn *imp_conn;
- struct obd_export *dlmexp;
- int rc = -ENOENT;
-
- spin_lock(&imp->imp_lock);
- if (list_empty(&imp->imp_conn_list)) {
- LASSERT(!imp->imp_connection);
- goto out;
- }
-
- list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
- if (!obd_uuid_equals(uuid, &imp_conn->oic_uuid))
- continue;
- LASSERT(imp_conn->oic_conn);
-
- if (imp_conn == imp->imp_conn_current) {
- LASSERT(imp_conn->oic_conn == imp->imp_connection);
-
- if (imp->imp_state != LUSTRE_IMP_CLOSED &&
- imp->imp_state != LUSTRE_IMP_DISCON) {
- CERROR("can't remove current connection\n");
- rc = -EBUSY;
- goto out;
- }
-
- ptlrpc_connection_put(imp->imp_connection);
- imp->imp_connection = NULL;
-
- dlmexp = class_conn2export(&imp->imp_dlm_handle);
- if (dlmexp && dlmexp->exp_connection) {
- LASSERT(dlmexp->exp_connection ==
- imp_conn->oic_conn);
- ptlrpc_connection_put(dlmexp->exp_connection);
- dlmexp->exp_connection = NULL;
- }
-
- if (dlmexp)
- class_export_put(dlmexp);
- }
-
- list_del(&imp_conn->oic_item);
- ptlrpc_connection_put(imp_conn->oic_conn);
- kfree(imp_conn);
- CDEBUG(D_HA, "imp %p@%s: remove connection %s\n",
- imp, imp->imp_obd->obd_name, uuid->uuid);
- rc = 0;
- break;
- }
-out:
- spin_unlock(&imp->imp_lock);
- if (rc == -ENOENT)
- CERROR("connection %s not found\n", uuid->uuid);
- return rc;
-}
-EXPORT_SYMBOL(client_import_del_conn);
-
-/**
- * Find conn UUID by peer NID. \a peer is a server NID. This function is used
- * to find a conn uuid of \a imp which can reach \a peer.
- */
-int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
- struct obd_uuid *uuid)
-{
- struct obd_import_conn *conn;
- int rc = -ENOENT;
-
- spin_lock(&imp->imp_lock);
- list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
- /* Check if conn UUID does have this peer NID. */
- if (class_check_uuid(&conn->oic_uuid, peer)) {
- *uuid = conn->oic_uuid;
- rc = 0;
- break;
- }
- }
- spin_unlock(&imp->imp_lock);
- return rc;
-}
-EXPORT_SYMBOL(client_import_find_conn);
-
-void client_destroy_import(struct obd_import *imp)
-{
- /* Drop security policy instance after all RPCs have finished/aborted
- * to let all busy contexts be released.
- */
- class_import_get(imp);
- class_destroy_import(imp);
- sptlrpc_import_sec_put(imp);
- class_import_put(imp);
-}
-EXPORT_SYMBOL(client_destroy_import);
-
-/* Configure an RPC client OBD device.
- *
- * lcfg parameters:
- * 1 - client UUID
- * 2 - server UUID
- * 3 - inactive-on-startup
- */
-int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
-{
- struct client_obd *cli = &obddev->u.cli;
- struct obd_import *imp;
- struct obd_uuid server_uuid;
- int rq_portal, rp_portal, connect_op;
- char *name = obddev->obd_type->typ_name;
- enum ldlm_ns_type ns_type = LDLM_NS_TYPE_UNKNOWN;
- int rc;
-
- /* In a more perfect world, we would hang a ptlrpc_client off of
- * obd_type and just use the values from there.
- */
- if (!strcmp(name, LUSTRE_OSC_NAME)) {
- rq_portal = OST_REQUEST_PORTAL;
- rp_portal = OSC_REPLY_PORTAL;
- connect_op = OST_CONNECT;
- cli->cl_sp_me = LUSTRE_SP_CLI;
- cli->cl_sp_to = LUSTRE_SP_OST;
- ns_type = LDLM_NS_TYPE_OSC;
- } else if (!strcmp(name, LUSTRE_MDC_NAME) ||
- !strcmp(name, LUSTRE_LWP_NAME)) {
- rq_portal = MDS_REQUEST_PORTAL;
- rp_portal = MDC_REPLY_PORTAL;
- connect_op = MDS_CONNECT;
- cli->cl_sp_me = LUSTRE_SP_CLI;
- cli->cl_sp_to = LUSTRE_SP_MDT;
- ns_type = LDLM_NS_TYPE_MDC;
- } else if (!strcmp(name, LUSTRE_MGC_NAME)) {
- rq_portal = MGS_REQUEST_PORTAL;
- rp_portal = MGC_REPLY_PORTAL;
- connect_op = MGS_CONNECT;
- cli->cl_sp_me = LUSTRE_SP_MGC;
- cli->cl_sp_to = LUSTRE_SP_MGS;
- cli->cl_flvr_mgc.sf_rpc = SPTLRPC_FLVR_INVALID;
- ns_type = LDLM_NS_TYPE_MGC;
- } else {
- CERROR("unknown client OBD type \"%s\", can't setup\n",
- name);
- return -EINVAL;
- }
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
- CERROR("requires a TARGET UUID\n");
- return -EINVAL;
- }
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) > 37) {
- CERROR("client UUID must be less than 38 characters\n");
- return -EINVAL;
- }
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 2) < 1) {
- CERROR("setup requires a SERVER UUID\n");
- return -EINVAL;
- }
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 2) > 37) {
- CERROR("target UUID must be less than 38 characters\n");
- return -EINVAL;
- }
-
- init_rwsem(&cli->cl_sem);
- cli->cl_conn_count = 0;
- memcpy(server_uuid.uuid, lustre_cfg_buf(lcfg, 2),
- min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
- sizeof(server_uuid)));
-
- cli->cl_dirty_pages = 0;
- cli->cl_avail_grant = 0;
- /* FIXME: Should limit this for the sum of all cl_dirty_max_pages. */
- /*
- * cl_dirty_max_pages may be changed at connect time in
- * ptlrpc_connect_interpret().
- */
- client_adjust_max_dirty(cli);
- INIT_LIST_HEAD(&cli->cl_cache_waiters);
- INIT_LIST_HEAD(&cli->cl_loi_ready_list);
- INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
- INIT_LIST_HEAD(&cli->cl_loi_write_list);
- INIT_LIST_HEAD(&cli->cl_loi_read_list);
- spin_lock_init(&cli->cl_loi_list_lock);
- atomic_set(&cli->cl_pending_w_pages, 0);
- atomic_set(&cli->cl_pending_r_pages, 0);
- cli->cl_r_in_flight = 0;
- cli->cl_w_in_flight = 0;
-
- spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
- spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
- spin_lock_init(&cli->cl_read_page_hist.oh_lock);
- spin_lock_init(&cli->cl_write_page_hist.oh_lock);
- spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
- spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
-
- /* lru for osc. */
- INIT_LIST_HEAD(&cli->cl_lru_osc);
- atomic_set(&cli->cl_lru_shrinkers, 0);
- atomic_long_set(&cli->cl_lru_busy, 0);
- atomic_long_set(&cli->cl_lru_in_list, 0);
- INIT_LIST_HEAD(&cli->cl_lru_list);
- spin_lock_init(&cli->cl_lru_list_lock);
- atomic_long_set(&cli->cl_unstable_count, 0);
- INIT_LIST_HEAD(&cli->cl_shrink_list);
-
- init_waitqueue_head(&cli->cl_destroy_waitq);
- atomic_set(&cli->cl_destroy_in_flight, 0);
- /* Turn on checksumming by default. */
- cli->cl_checksum = 1;
- /*
- * The supported checksum types will be worked out at connect time
- * Set cl_chksum* to CRC32 for now to avoid returning screwed info
- * through procfs.
- */
- cli->cl_cksum_type = OBD_CKSUM_CRC32;
- cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
- atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
-
- /*
- * Set it to possible maximum size. It may be reduced by ocd_brw_size
- * from OFD after connecting.
- */
- cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES;
-
- /*
- * set cl_chunkbits default value to PAGE_CACHE_SHIFT,
- * it will be updated at OSC connection time.
- */
- cli->cl_chunkbits = PAGE_SHIFT;
-
- if (!strcmp(name, LUSTRE_MDC_NAME))
- cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
- else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */)
- cli->cl_max_rpcs_in_flight = 2;
- else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */)
- cli->cl_max_rpcs_in_flight = 3;
- else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */)
- cli->cl_max_rpcs_in_flight = 4;
- else
- cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
-
- spin_lock_init(&cli->cl_mod_rpcs_lock);
- spin_lock_init(&cli->cl_mod_rpcs_hist.oh_lock);
- cli->cl_max_mod_rpcs_in_flight = 0;
- cli->cl_mod_rpcs_in_flight = 0;
- cli->cl_close_rpcs_in_flight = 0;
- init_waitqueue_head(&cli->cl_mod_rpcs_waitq);
- cli->cl_mod_tag_bitmap = NULL;
-
- if (connect_op == MDS_CONNECT) {
- cli->cl_max_mod_rpcs_in_flight = cli->cl_max_rpcs_in_flight - 1;
- cli->cl_mod_tag_bitmap = kcalloc(BITS_TO_LONGS(OBD_MAX_RIF_MAX),
- sizeof(long), GFP_NOFS);
- if (!cli->cl_mod_tag_bitmap) {
- rc = -ENOMEM;
- goto err;
- }
- }
-
- rc = ldlm_get_ref();
- if (rc) {
- CERROR("ldlm_get_ref failed: %d\n", rc);
- goto err;
- }
-
- ptlrpc_init_client(rq_portal, rp_portal, name,
- &obddev->obd_ldlm_client);
-
- imp = class_new_import(obddev);
- if (!imp) {
- rc = -ENOENT;
- goto err_ldlm;
- }
- imp->imp_client = &obddev->obd_ldlm_client;
- imp->imp_connect_op = connect_op;
- memcpy(cli->cl_target_uuid.uuid, lustre_cfg_buf(lcfg, 1),
- LUSTRE_CFG_BUFLEN(lcfg, 1));
- class_import_put(imp);
-
- rc = client_import_add_conn(imp, &server_uuid, 1);
- if (rc) {
- CERROR("can't add initial connection\n");
- goto err_import;
- }
-
- cli->cl_import = imp;
- /* cli->cl_max_mds_easize updated by mdc_init_ea_size() */
- cli->cl_max_mds_easize = sizeof(struct lov_mds_md_v3);
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
- if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) {
- CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
- name, obddev->obd_name,
- cli->cl_target_uuid.uuid);
- spin_lock(&imp->imp_lock);
- imp->imp_deactive = 1;
- spin_unlock(&imp->imp_lock);
- }
- }
-
- obddev->obd_namespace = ldlm_namespace_new(obddev, obddev->obd_name,
- LDLM_NAMESPACE_CLIENT,
- LDLM_NAMESPACE_GREEDY,
- ns_type);
- if (!obddev->obd_namespace) {
- CERROR("Unable to create client namespace - %s\n",
- obddev->obd_name);
- rc = -ENOMEM;
- goto err_import;
- }
-
- return rc;
-
-err_import:
- class_destroy_import(imp);
-err_ldlm:
- ldlm_put_ref();
-err:
- kfree(cli->cl_mod_tag_bitmap);
- cli->cl_mod_tag_bitmap = NULL;
- return rc;
-}
-EXPORT_SYMBOL(client_obd_setup);
-
-int client_obd_cleanup(struct obd_device *obddev)
-{
- struct client_obd *cli = &obddev->u.cli;
-
- ldlm_namespace_free_post(obddev->obd_namespace);
- obddev->obd_namespace = NULL;
-
- obd_cleanup_client_import(obddev);
- LASSERT(!obddev->u.cli.cl_import);
-
- ldlm_put_ref();
-
- kfree(cli->cl_mod_tag_bitmap);
- cli->cl_mod_tag_bitmap = NULL;
-
- return 0;
-}
-EXPORT_SYMBOL(client_obd_cleanup);
-
-/* ->o_connect() method for client side (OSC and MDC and MGC) */
-int client_connect_import(const struct lu_env *env,
- struct obd_export **exp,
- struct obd_device *obd, struct obd_uuid *cluuid,
- struct obd_connect_data *data, void *localdata)
-{
- struct client_obd *cli = &obd->u.cli;
- struct obd_import *imp = cli->cl_import;
- struct obd_connect_data *ocd;
- struct lustre_handle conn = { 0 };
- bool is_mdc = false;
- int rc;
-
- *exp = NULL;
- down_write(&cli->cl_sem);
- if (cli->cl_conn_count > 0) {
- rc = -EALREADY;
- goto out_sem;
- }
-
- rc = class_connect(&conn, obd, cluuid);
- if (rc)
- goto out_sem;
-
- cli->cl_conn_count++;
- *exp = class_conn2export(&conn);
-
- LASSERT(obd->obd_namespace);
-
- imp->imp_dlm_handle = conn;
- rc = ptlrpc_init_import(imp);
- if (rc != 0)
- goto out_ldlm;
-
- ocd = &imp->imp_connect_data;
- if (data) {
- *ocd = *data;
- is_mdc = !strncmp(imp->imp_obd->obd_type->typ_name,
- LUSTRE_MDC_NAME, 3);
- if (is_mdc)
- data->ocd_connect_flags |= OBD_CONNECT_MULTIMODRPCS;
- imp->imp_connect_flags_orig = data->ocd_connect_flags;
- }
-
- rc = ptlrpc_connect_import(imp);
- if (rc != 0) {
- if (data && is_mdc)
- data->ocd_connect_flags &= ~OBD_CONNECT_MULTIMODRPCS;
- LASSERT(imp->imp_state == LUSTRE_IMP_DISCON);
- goto out_ldlm;
- }
- LASSERT(*exp && (*exp)->exp_connection);
-
- if (data) {
- LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) ==
- ocd->ocd_connect_flags, "old %#llx, new %#llx\n",
- data->ocd_connect_flags, ocd->ocd_connect_flags);
- data->ocd_connect_flags = ocd->ocd_connect_flags;
- /* clear the flag as it was not set and is not known
- * by upper layers
- */
- if (is_mdc)
- data->ocd_connect_flags &= ~OBD_CONNECT_MULTIMODRPCS;
- }
-
- ptlrpc_pinger_add_import(imp);
-
- if (rc) {
-out_ldlm:
- cli->cl_conn_count--;
- class_disconnect(*exp);
- *exp = NULL;
- }
-out_sem:
- up_write(&cli->cl_sem);
-
- return rc;
-}
-EXPORT_SYMBOL(client_connect_import);
-
-int client_disconnect_export(struct obd_export *exp)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct client_obd *cli;
- struct obd_import *imp;
- int rc = 0, err;
-
- if (!obd) {
- CERROR("invalid export for disconnect: exp %p cookie %#llx\n",
- exp, exp ? exp->exp_handle.h_cookie : -1);
- return -EINVAL;
- }
-
- cli = &obd->u.cli;
- imp = cli->cl_import;
-
- down_write(&cli->cl_sem);
- CDEBUG(D_INFO, "disconnect %s - %zu\n", obd->obd_name,
- cli->cl_conn_count);
-
- if (!cli->cl_conn_count) {
- CERROR("disconnecting disconnected device (%s)\n",
- obd->obd_name);
- rc = -EINVAL;
- goto out_disconnect;
- }
-
- cli->cl_conn_count--;
- if (cli->cl_conn_count) {
- rc = 0;
- goto out_disconnect;
- }
-
- /* Mark import deactivated now, so we don't try to reconnect if any
- * of the cleanup RPCs fails (e.g. LDLM cancel, etc). We don't
- * fully deactivate the import, or that would drop all requests.
- */
- spin_lock(&imp->imp_lock);
- imp->imp_deactive = 1;
- spin_unlock(&imp->imp_lock);
-
- /* Some non-replayable imports (MDS's OSCs) are pinged, so just
- * delete it regardless. (It's safe to delete an import that was
- * never added.)
- */
- (void)ptlrpc_pinger_del_import(imp);
-
- if (obd->obd_namespace) {
- /* obd_force == local only */
- ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
- obd->obd_force ? LCF_LOCAL : 0, NULL);
- ldlm_namespace_free_prior(obd->obd_namespace, imp,
- obd->obd_force);
- }
-
- /* There's no need to hold sem while disconnecting an import,
- * and it may actually cause deadlock in GSS.
- */
- up_write(&cli->cl_sem);
- rc = ptlrpc_disconnect_import(imp, 0);
- down_write(&cli->cl_sem);
-
- ptlrpc_invalidate_import(imp);
-
-out_disconnect:
- /* Use server style - class_disconnect should be always called for
- * o_disconnect.
- */
- err = class_disconnect(exp);
- if (!rc && err)
- rc = err;
-
- up_write(&cli->cl_sem);
-
- return rc;
-}
-EXPORT_SYMBOL(client_disconnect_export);
-
-/**
- * Packs current SLV and Limit into \a req.
- */
-int target_pack_pool_reply(struct ptlrpc_request *req)
-{
- struct obd_device *obd;
-
- /* Check that we still have all structures alive as this may
- * be some late RPC at shutdown time.
- */
- if (unlikely(!req->rq_export || !req->rq_export->exp_obd ||
- !exp_connect_lru_resize(req->rq_export))) {
- lustre_msg_set_slv(req->rq_repmsg, 0);
- lustre_msg_set_limit(req->rq_repmsg, 0);
- return 0;
- }
-
- /* OBD is alive here as export is alive, which we checked above. */
- obd = req->rq_export->exp_obd;
-
- read_lock(&obd->obd_pool_lock);
- lustre_msg_set_slv(req->rq_repmsg, obd->obd_pool_slv);
- lustre_msg_set_limit(req->rq_repmsg, obd->obd_pool_limit);
- read_unlock(&obd->obd_pool_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(target_pack_pool_reply);
-
-static int
-target_send_reply_msg(struct ptlrpc_request *req, int rc, int fail_id)
-{
- if (OBD_FAIL_CHECK_ORSET(fail_id & ~OBD_FAIL_ONCE, OBD_FAIL_ONCE)) {
- DEBUG_REQ(D_ERROR, req, "dropping reply");
- return -ECOMM;
- }
-
- if (unlikely(rc)) {
- DEBUG_REQ(D_NET, req, "processing error (%d)", rc);
- req->rq_status = rc;
- return ptlrpc_send_error(req, 1);
- }
-
- DEBUG_REQ(D_NET, req, "sending reply");
- return ptlrpc_send_reply(req, PTLRPC_REPLY_MAYBE_DIFFICULT);
-}
-
-void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
-{
- struct ptlrpc_service_part *svcpt;
- int netrc;
- struct ptlrpc_reply_state *rs;
- struct obd_export *exp;
-
- if (req->rq_no_reply)
- return;
-
- svcpt = req->rq_rqbd->rqbd_svcpt;
- rs = req->rq_reply_state;
- if (!rs || !rs->rs_difficult) {
- /* no notifiers */
- target_send_reply_msg(req, rc, fail_id);
- return;
- }
-
- /* must be an export if locks saved */
- LASSERT(req->rq_export);
- /* req/reply consistent */
- LASSERT(rs->rs_svcpt == svcpt);
-
- /* "fresh" reply */
- LASSERT(!rs->rs_scheduled);
- LASSERT(!rs->rs_scheduled_ever);
- LASSERT(!rs->rs_handled);
- LASSERT(!rs->rs_on_net);
- LASSERT(!rs->rs_export);
- LASSERT(list_empty(&rs->rs_obd_list));
- LASSERT(list_empty(&rs->rs_exp_list));
-
- exp = class_export_get(req->rq_export);
-
- /* disable reply scheduling while I'm setting up */
- rs->rs_scheduled = 1;
- rs->rs_on_net = 1;
- rs->rs_xid = req->rq_xid;
- rs->rs_transno = req->rq_transno;
- rs->rs_export = exp;
- rs->rs_opc = lustre_msg_get_opc(req->rq_reqmsg);
-
- spin_lock(&exp->exp_uncommitted_replies_lock);
- CDEBUG(D_NET, "rs transno = %llu, last committed = %llu\n",
- rs->rs_transno, exp->exp_last_committed);
- if (rs->rs_transno > exp->exp_last_committed) {
- /* not committed already */
- list_add_tail(&rs->rs_obd_list,
- &exp->exp_uncommitted_replies);
- }
- spin_unlock(&exp->exp_uncommitted_replies_lock);
-
- spin_lock(&exp->exp_lock);
- list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
- spin_unlock(&exp->exp_lock);
-
- netrc = target_send_reply_msg(req, rc, fail_id);
-
- spin_lock(&svcpt->scp_rep_lock);
-
- atomic_inc(&svcpt->scp_nreps_difficult);
-
- if (netrc != 0) {
- /* error sending: reply is off the net. Also we need +1
- * reply ref until ptlrpc_handle_rs() is done
- * with the reply state (if the send was successful, there
- * would have been +1 ref for the net, which
- * reply_out_callback leaves alone)
- */
- rs->rs_on_net = 0;
- ptlrpc_rs_addref(rs);
- }
-
- spin_lock(&rs->rs_lock);
- if (rs->rs_transno <= exp->exp_last_committed ||
- (!rs->rs_on_net && !rs->rs_no_ack) ||
- list_empty(&rs->rs_exp_list) || /* completed already */
- list_empty(&rs->rs_obd_list)) {
- CDEBUG(D_HA, "Schedule reply immediately\n");
- ptlrpc_dispatch_difficult_reply(rs);
- } else {
- list_add(&rs->rs_list, &svcpt->scp_rep_active);
- rs->rs_scheduled = 0; /* allow notifier to schedule */
- }
- spin_unlock(&rs->rs_lock);
- spin_unlock(&svcpt->scp_rep_lock);
-}
-EXPORT_SYMBOL(target_send_reply);
-
-enum ldlm_mode lck_compat_array[] = {
- [LCK_EX] = LCK_COMPAT_EX,
- [LCK_PW] = LCK_COMPAT_PW,
- [LCK_PR] = LCK_COMPAT_PR,
- [LCK_CW] = LCK_COMPAT_CW,
- [LCK_CR] = LCK_COMPAT_CR,
- [LCK_NL] = LCK_COMPAT_NL,
- [LCK_GROUP] = LCK_COMPAT_GROUP,
- [LCK_COS] = LCK_COMPAT_COS,
-};
-
-/**
- * Rather arbitrary mapping from LDLM error codes to errno values. This should
- * not escape to the user level.
- */
-int ldlm_error2errno(enum ldlm_error error)
-{
- int result;
-
- switch (error) {
- case ELDLM_OK:
- case ELDLM_LOCK_MATCHED:
- result = 0;
- break;
- case ELDLM_LOCK_CHANGED:
- result = -ESTALE;
- break;
- case ELDLM_LOCK_ABORTED:
- result = -ENAVAIL;
- break;
- case ELDLM_LOCK_REPLACED:
- result = -ESRCH;
- break;
- case ELDLM_NO_LOCK_DATA:
- result = -ENOENT;
- break;
- case ELDLM_NAMESPACE_EXISTS:
- result = -EEXIST;
- break;
- case ELDLM_BAD_NAMESPACE:
- result = -EBADF;
- break;
- default:
- if (((int)error) < 0) /* cast to signed type */
- result = error; /* as enum ldlm_error can be unsigned */
- else {
- CERROR("Invalid DLM result code: %d\n", error);
- result = -EPROTO;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(ldlm_error2errno);
-
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
-void ldlm_dump_export_locks(struct obd_export *exp)
-{
- spin_lock(&exp->exp_locks_list_guard);
- if (!list_empty(&exp->exp_locks_list)) {
- struct ldlm_lock *lock;
-
- CERROR("dumping locks for export %p,ignore if the unmount doesn't hang\n",
- exp);
- list_for_each_entry(lock, &exp->exp_locks_list,
- l_exp_refs_link)
- LDLM_ERROR(lock, "lock:");
- }
- spin_unlock(&exp->exp_locks_list_guard);
-}
-#endif
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
deleted file mode 100644
index 95bea351d21d..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ /dev/null
@@ -1,2146 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_lock.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include <linux/libcfs/libcfs.h>
-#include <lustre_intent.h>
-#include <lustre_swab.h>
-#include <obd_class.h>
-#include "ldlm_internal.h"
-
-/* lock types */
-char *ldlm_lockname[] = {
- [0] = "--",
- [LCK_EX] = "EX",
- [LCK_PW] = "PW",
- [LCK_PR] = "PR",
- [LCK_CW] = "CW",
- [LCK_CR] = "CR",
- [LCK_NL] = "NL",
- [LCK_GROUP] = "GROUP",
- [LCK_COS] = "COS",
-};
-EXPORT_SYMBOL(ldlm_lockname);
-
-static char *ldlm_typename[] = {
- [LDLM_PLAIN] = "PLN",
- [LDLM_EXTENT] = "EXT",
- [LDLM_FLOCK] = "FLK",
- [LDLM_IBITS] = "IBT",
-};
-
-static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = {
- [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
- [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
- [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire_to_local,
- [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
-};
-
-static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
- [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_local_to_wire,
- [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_local_to_wire,
- [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_local_to_wire,
- [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_local_to_wire,
-};
-
-/**
- * Converts lock policy from local format to on the wire lock_desc format
- */
-static void ldlm_convert_policy_to_wire(enum ldlm_type type,
- const union ldlm_policy_data *lpolicy,
- union ldlm_wire_policy_data *wpolicy)
-{
- ldlm_policy_local_to_wire_t convert;
-
- convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
-
- convert(lpolicy, wpolicy);
-}
-
-/**
- * Converts lock policy from on the wire lock_desc format to local format
- */
-void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
- const union ldlm_wire_policy_data *wpolicy,
- union ldlm_policy_data *lpolicy)
-{
- ldlm_policy_wire_to_local_t convert;
-
- convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE];
-
- convert(wpolicy, lpolicy);
-}
-
-const char *ldlm_it2str(enum ldlm_intent_flags it)
-{
- switch (it) {
- case IT_OPEN:
- return "open";
- case IT_CREAT:
- return "creat";
- case (IT_OPEN | IT_CREAT):
- return "open|creat";
- case IT_READDIR:
- return "readdir";
- case IT_GETATTR:
- return "getattr";
- case IT_LOOKUP:
- return "lookup";
- case IT_UNLINK:
- return "unlink";
- case IT_GETXATTR:
- return "getxattr";
- case IT_LAYOUT:
- return "layout";
- default:
- CERROR("Unknown intent 0x%08x\n", it);
- return "UNKNOWN";
- }
-}
-EXPORT_SYMBOL(ldlm_it2str);
-
-/*
- * REFCOUNTED LOCK OBJECTS
- */
-
-/**
- * Get a reference on a lock.
- *
- * Lock refcounts, during creation:
- * - one special one for allocation, dec'd only once in destroy
- * - one for being a lock that's in-use
- * - one for the addref associated with a new lock
- */
-struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
-{
- atomic_inc(&lock->l_refc);
- return lock;
-}
-EXPORT_SYMBOL(ldlm_lock_get);
-
-/**
- * Release lock reference.
- *
- * Also frees the lock if it was last reference.
- */
-void ldlm_lock_put(struct ldlm_lock *lock)
-{
- LASSERT(lock->l_resource != LP_POISON);
- LASSERT(atomic_read(&lock->l_refc) > 0);
- if (atomic_dec_and_test(&lock->l_refc)) {
- struct ldlm_resource *res;
-
- LDLM_DEBUG(lock,
- "final lock_put on destroyed lock, freeing it.");
-
- res = lock->l_resource;
- LASSERT(ldlm_is_destroyed(lock));
- LASSERT(list_empty(&lock->l_res_link));
- LASSERT(list_empty(&lock->l_pending_chain));
-
- lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
- LDLM_NSS_LOCKS);
- lu_ref_del(&res->lr_reference, "lock", lock);
- ldlm_resource_putref(res);
- lock->l_resource = NULL;
- if (lock->l_export) {
- class_export_lock_put(lock->l_export, lock);
- lock->l_export = NULL;
- }
-
- kfree(lock->l_lvb_data);
-
- ldlm_interval_free(ldlm_interval_detach(lock));
- lu_ref_fini(&lock->l_reference);
- OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle);
- }
-}
-EXPORT_SYMBOL(ldlm_lock_put);
-
-/**
- * Removes LDLM lock \a lock from LRU. Assumes LRU is already locked.
- */
-int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
-{
- int rc = 0;
-
- if (!list_empty(&lock->l_lru)) {
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- list_del_init(&lock->l_lru);
- LASSERT(ns->ns_nr_unused > 0);
- ns->ns_nr_unused--;
- rc = 1;
- }
- return rc;
-}
-
-/**
- * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
- *
- * If \a last_use is non-zero, it will remove the lock from LRU only if
- * it matches lock's l_last_used.
- *
- * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
- * doesn't match lock's l_last_used;
- * otherwise, the lock hasn't been in the LRU list.
- * \retval 1 the lock was in LRU list and removed.
- */
-int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- int rc = 0;
-
- spin_lock(&ns->ns_lock);
- if (last_use == 0 || last_use == lock->l_last_used)
- rc = ldlm_lock_remove_from_lru_nolock(lock);
- spin_unlock(&ns->ns_lock);
-
- return rc;
-}
-
-/**
- * Adds LDLM lock \a lock to namespace LRU. Assumes LRU is already locked.
- */
-static void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- lock->l_last_used = cfs_time_current();
- LASSERT(list_empty(&lock->l_lru));
- LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- list_add_tail(&lock->l_lru, &ns->ns_unused_list);
- ldlm_clear_skipped(lock);
- LASSERT(ns->ns_nr_unused >= 0);
- ns->ns_nr_unused++;
-}
-
-/**
- * Adds LDLM lock \a lock to namespace LRU. Obtains necessary LRU locks
- * first.
- */
-static void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- spin_lock(&ns->ns_lock);
- ldlm_lock_add_to_lru_nolock(lock);
- spin_unlock(&ns->ns_lock);
-}
-
-/**
- * Moves LDLM lock \a lock that is already in namespace LRU to the tail of
- * the LRU. Performs necessary LRU locking
- */
-static void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- spin_lock(&ns->ns_lock);
- if (!list_empty(&lock->l_lru)) {
- ldlm_lock_remove_from_lru_nolock(lock);
- ldlm_lock_add_to_lru_nolock(lock);
- }
- spin_unlock(&ns->ns_lock);
-}
-
-/**
- * Helper to destroy a locked lock.
- *
- * Used by ldlm_lock_destroy and ldlm_lock_destroy_nolock
- * Must be called with l_lock and lr_lock held.
- *
- * Does not actually free the lock data, but rather marks the lock as
- * destroyed by setting l_destroyed field in the lock to 1. Destroys a
- * handle->lock association too, so that the lock can no longer be found
- * and removes the lock from LRU list. Actual lock freeing occurs when
- * last lock reference goes away.
- *
- * Original comment (of some historical value):
- * This used to have a 'strict' flag, which recovery would use to mark an
- * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
- * shall explain why it's gone: with the new hash table scheme, once you call
- * ldlm_lock_destroy, you can never drop your final references on this lock.
- * Because it's not in the hash table anymore. -phil
- */
-static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
-{
- if (lock->l_readers || lock->l_writers) {
- LDLM_ERROR(lock, "lock still has references");
- LBUG();
- }
-
- if (!list_empty(&lock->l_res_link)) {
- LDLM_ERROR(lock, "lock still on resource");
- LBUG();
- }
-
- if (ldlm_is_destroyed(lock)) {
- LASSERT(list_empty(&lock->l_lru));
- return 0;
- }
- ldlm_set_destroyed(lock);
-
- if (lock->l_export && lock->l_export->exp_lock_hash) {
- /* NB: it's safe to call cfs_hash_del() even lock isn't
- * in exp_lock_hash.
- */
- /* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp()
- */
- /* coverity[overrun-buffer-val] */
- cfs_hash_del(lock->l_export->exp_lock_hash,
- &lock->l_remote_handle, &lock->l_exp_hash);
- }
-
- ldlm_lock_remove_from_lru(lock);
- class_handle_unhash(&lock->l_handle);
-
- return 1;
-}
-
-/**
- * Destroys a LDLM lock \a lock. Performs necessary locking first.
- */
-static void ldlm_lock_destroy(struct ldlm_lock *lock)
-{
- int first;
-
- lock_res_and_lock(lock);
- first = ldlm_lock_destroy_internal(lock);
- unlock_res_and_lock(lock);
-
- /* drop reference from hashtable only for first destroy */
- if (first) {
- lu_ref_del(&lock->l_reference, "hash", lock);
- LDLM_LOCK_RELEASE(lock);
- }
-}
-
-/**
- * Destroys a LDLM lock \a lock that is already locked.
- */
-void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
-{
- int first;
-
- first = ldlm_lock_destroy_internal(lock);
- /* drop reference from hashtable only for first destroy */
- if (first) {
- lu_ref_del(&lock->l_reference, "hash", lock);
- LDLM_LOCK_RELEASE(lock);
- }
-}
-
-/* this is called by portals_handle2object with the handle lock taken */
-static void lock_handle_addref(void *lock)
-{
- LDLM_LOCK_GET((struct ldlm_lock *)lock);
-}
-
-static void lock_handle_free(void *lock, int size)
-{
- LASSERT(size == sizeof(struct ldlm_lock));
- kmem_cache_free(ldlm_lock_slab, lock);
-}
-
-static struct portals_handle_ops lock_handle_ops = {
- .hop_addref = lock_handle_addref,
- .hop_free = lock_handle_free,
-};
-
-/**
- *
- * Allocate and initialize new lock structure.
- *
- * usage: pass in a resource on which you have done ldlm_resource_get
- * new lock will take over the refcount.
- * returns: lock with refcount 2 - one for current caller and one for remote
- */
-static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
-{
- struct ldlm_lock *lock;
-
- LASSERT(resource);
-
- lock = kmem_cache_zalloc(ldlm_lock_slab, GFP_NOFS);
- if (!lock)
- return NULL;
-
- spin_lock_init(&lock->l_lock);
- lock->l_resource = resource;
- lu_ref_add(&resource->lr_reference, "lock", lock);
-
- atomic_set(&lock->l_refc, 2);
- INIT_LIST_HEAD(&lock->l_res_link);
- INIT_LIST_HEAD(&lock->l_lru);
- INIT_LIST_HEAD(&lock->l_pending_chain);
- INIT_LIST_HEAD(&lock->l_bl_ast);
- INIT_LIST_HEAD(&lock->l_cp_ast);
- INIT_LIST_HEAD(&lock->l_rk_ast);
- init_waitqueue_head(&lock->l_waitq);
- lock->l_blocking_lock = NULL;
- INIT_LIST_HEAD(&lock->l_sl_mode);
- INIT_LIST_HEAD(&lock->l_sl_policy);
- INIT_HLIST_NODE(&lock->l_exp_hash);
- INIT_HLIST_NODE(&lock->l_exp_flock_hash);
-
- lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
- LDLM_NSS_LOCKS);
- INIT_LIST_HEAD(&lock->l_handle.h_link);
- class_handle_hash(&lock->l_handle, &lock_handle_ops);
-
- lu_ref_init(&lock->l_reference);
- lu_ref_add(&lock->l_reference, "hash", lock);
- lock->l_callback_timeout = 0;
-
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
- INIT_LIST_HEAD(&lock->l_exp_refs_link);
- lock->l_exp_refs_nr = 0;
- lock->l_exp_refs_target = NULL;
-#endif
-
- return lock;
-}
-
-/**
- * Moves LDLM lock \a lock to another resource.
- * This is used on client when server returns some other lock than requested
- * (typically as a result of intent operation)
- */
-int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
- const struct ldlm_res_id *new_resid)
-{
- struct ldlm_resource *oldres = lock->l_resource;
- struct ldlm_resource *newres;
- int type;
-
- lock_res_and_lock(lock);
- if (memcmp(new_resid, &lock->l_resource->lr_name,
- sizeof(lock->l_resource->lr_name)) == 0) {
- /* Nothing to do */
- unlock_res_and_lock(lock);
- return 0;
- }
-
- LASSERT(new_resid->name[0] != 0);
-
- /* This function assumes that the lock isn't on any lists */
- LASSERT(list_empty(&lock->l_res_link));
-
- type = oldres->lr_type;
- unlock_res_and_lock(lock);
-
- newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
- if (IS_ERR(newres))
- return PTR_ERR(newres);
-
- lu_ref_add(&newres->lr_reference, "lock", lock);
- /*
- * To flip the lock from the old to the new resource, lock, oldres and
- * newres have to be locked. Resource spin-locks are nested within
- * lock->l_lock, and are taken in the memory address order to avoid
- * dead-locks.
- */
- spin_lock(&lock->l_lock);
- oldres = lock->l_resource;
- if (oldres < newres) {
- lock_res(oldres);
- lock_res_nested(newres, LRT_NEW);
- } else {
- lock_res(newres);
- lock_res_nested(oldres, LRT_NEW);
- }
- LASSERT(memcmp(new_resid, &oldres->lr_name,
- sizeof(oldres->lr_name)) != 0);
- lock->l_resource = newres;
- unlock_res(oldres);
- unlock_res_and_lock(lock);
-
- /* ...and the flowers are still standing! */
- lu_ref_del(&oldres->lr_reference, "lock", lock);
- ldlm_resource_putref(oldres);
-
- return 0;
-}
-
-/** \defgroup ldlm_handles LDLM HANDLES
- * Ways to get hold of locks without any addresses.
- * @{
- */
-
-/**
- * Fills in handle for LDLM lock \a lock into supplied \a lockh
- * Does not take any references.
- */
-void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
-{
- lockh->cookie = lock->l_handle.h_cookie;
-}
-EXPORT_SYMBOL(ldlm_lock2handle);
-
-/**
- * Obtain a lock reference by handle.
- *
- * if \a flags: atomically get the lock and set the flags.
- * Return NULL if flag already set
- */
-struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
- __u64 flags)
-{
- struct ldlm_lock *lock;
-
- LASSERT(handle);
-
- lock = class_handle2object(handle->cookie, NULL);
- if (!lock)
- return NULL;
-
- if (lock->l_export && lock->l_export->exp_failed) {
- CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n",
- lock, lock->l_export);
- LDLM_LOCK_PUT(lock);
- return NULL;
- }
-
- /* It's unlikely but possible that someone marked the lock as
- * destroyed after we did handle2object on it
- */
- if (flags == 0 && !ldlm_is_destroyed(lock)) {
- lu_ref_add(&lock->l_reference, "handle", current);
- return lock;
- }
-
- lock_res_and_lock(lock);
-
- LASSERT(lock->l_resource);
-
- lu_ref_add_atomic(&lock->l_reference, "handle", current);
- if (unlikely(ldlm_is_destroyed(lock))) {
- unlock_res_and_lock(lock);
- CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
- LDLM_LOCK_PUT(lock);
- return NULL;
- }
-
- if (flags) {
- if (lock->l_flags & flags) {
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- return NULL;
- }
-
- lock->l_flags |= flags;
- }
-
- unlock_res_and_lock(lock);
- return lock;
-}
-EXPORT_SYMBOL(__ldlm_handle2lock);
-/** @} ldlm_handles */
-
-/**
- * Fill in "on the wire" representation for given LDLM lock into supplied
- * lock descriptor \a desc structure.
- */
-void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
-{
- ldlm_res2desc(lock->l_resource, &desc->l_resource);
- desc->l_req_mode = lock->l_req_mode;
- desc->l_granted_mode = lock->l_granted_mode;
- ldlm_convert_policy_to_wire(lock->l_resource->lr_type,
- &lock->l_policy_data,
- &desc->l_policy_data);
-}
-
-/**
- * Add a lock to list of conflicting locks to send AST to.
- *
- * Only add if we have not sent a blocking AST to the lock yet.
- */
-static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- struct list_head *work_list)
-{
- if (!ldlm_is_ast_sent(lock)) {
- LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
- ldlm_set_ast_sent(lock);
- /* If the enqueuing client said so, tell the AST recipient to
- * discard dirty data, rather than writing back.
- */
- if (ldlm_is_ast_discard_data(new))
- ldlm_set_discard_data(lock);
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, work_list);
- LDLM_LOCK_GET(lock);
- LASSERT(!lock->l_blocking_lock);
- lock->l_blocking_lock = LDLM_LOCK_GET(new);
- }
-}
-
-/**
- * Add a lock to list of just granted locks to send completion AST to.
- */
-static void ldlm_add_cp_work_item(struct ldlm_lock *lock,
- struct list_head *work_list)
-{
- if (!ldlm_is_cp_reqd(lock)) {
- ldlm_set_cp_reqd(lock);
- LDLM_DEBUG(lock, "lock granted; sending completion AST.");
- LASSERT(list_empty(&lock->l_cp_ast));
- list_add(&lock->l_cp_ast, work_list);
- LDLM_LOCK_GET(lock);
- }
-}
-
-/**
- * Aggregator function to add AST work items into a list. Determines
- * what sort of an AST work needs to be done and calls the proper
- * adding function.
- * Must be called with lr_lock held.
- */
-static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
- struct ldlm_lock *new,
- struct list_head *work_list)
-{
- check_res_locked(lock->l_resource);
- if (new)
- ldlm_add_bl_work_item(lock, new, work_list);
- else
- ldlm_add_cp_work_item(lock, work_list);
-}
-
-/**
- * Add specified reader/writer reference to LDLM lock with handle \a lockh.
- * r/w reference type is determined by \a mode
- * Calls ldlm_lock_addref_internal.
- */
-void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode)
-{
- struct ldlm_lock *lock;
-
- lock = ldlm_handle2lock(lockh);
- LASSERTF(lock, "Non-existing lock: %llx\n", lockh->cookie);
- ldlm_lock_addref_internal(lock, mode);
- LDLM_LOCK_PUT(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_addref);
-
-/**
- * Helper function.
- * Add specified reader/writer reference to LDLM lock \a lock.
- * r/w reference type is determined by \a mode
- * Removes lock from LRU if it is there.
- * Assumes the LDLM lock is already locked.
- */
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
- enum ldlm_mode mode)
-{
- ldlm_lock_remove_from_lru(lock);
- if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
- lock->l_readers++;
- lu_ref_add_atomic(&lock->l_reference, "reader", lock);
- }
- if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
- lock->l_writers++;
- lu_ref_add_atomic(&lock->l_reference, "writer", lock);
- }
- LDLM_LOCK_GET(lock);
- lu_ref_add_atomic(&lock->l_reference, "user", lock);
- LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
-}
-
-/**
- * Attempts to add reader/writer reference to a lock with handle \a lockh, and
- * fails if lock is already LDLM_FL_CBPENDING or destroyed.
- *
- * \retval 0 success, lock was addref-ed
- *
- * \retval -EAGAIN lock is being canceled.
- */
-int ldlm_lock_addref_try(const struct lustre_handle *lockh, enum ldlm_mode mode)
-{
- struct ldlm_lock *lock;
- int result;
-
- result = -EAGAIN;
- lock = ldlm_handle2lock(lockh);
- if (lock) {
- lock_res_and_lock(lock);
- if (lock->l_readers != 0 || lock->l_writers != 0 ||
- !ldlm_is_cbpending(lock)) {
- ldlm_lock_addref_internal_nolock(lock, mode);
- result = 0;
- }
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- }
- return result;
-}
-EXPORT_SYMBOL(ldlm_lock_addref_try);
-
-/**
- * Add specified reader/writer reference to LDLM lock \a lock.
- * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
- * Only called for local locks.
- */
-void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
-{
- lock_res_and_lock(lock);
- ldlm_lock_addref_internal_nolock(lock, mode);
- unlock_res_and_lock(lock);
-}
-
-/**
- * Removes reader/writer reference for LDLM lock \a lock.
- * Assumes LDLM lock is already locked.
- * only called in ldlm_flock_destroy and for local locks.
- * Does NOT add lock to LRU if no r/w references left to accommodate flock locks
- * that cannot be placed in LRU.
- */
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
- enum ldlm_mode mode)
-{
- LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
- if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
- LASSERT(lock->l_readers > 0);
- lu_ref_del(&lock->l_reference, "reader", lock);
- lock->l_readers--;
- }
- if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
- LASSERT(lock->l_writers > 0);
- lu_ref_del(&lock->l_reference, "writer", lock);
- lock->l_writers--;
- }
-
- lu_ref_del(&lock->l_reference, "user", lock);
- LDLM_LOCK_RELEASE(lock); /* matches the LDLM_LOCK_GET() in addref */
-}
-
-/**
- * Removes reader/writer reference for LDLM lock \a lock.
- * Locks LDLM lock first.
- * If the lock is determined to be client lock on a client and r/w refcount
- * drops to zero and the lock is not blocked, the lock is added to LRU lock
- * on the namespace.
- * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
- */
-void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
-{
- struct ldlm_namespace *ns;
-
- lock_res_and_lock(lock);
-
- ns = ldlm_lock_to_ns(lock);
-
- ldlm_lock_decref_internal_nolock(lock, mode);
-
- if ((ldlm_is_local(lock) || lock->l_req_mode == LCK_GROUP) &&
- !lock->l_readers && !lock->l_writers) {
- /* If this is a local lock on a server namespace and this was
- * the last reference, cancel the lock.
- *
- * Group locks are special:
- * They must not go in LRU, but they are not called back
- * like non-group locks, instead they are manually released.
- * They have an l_writers reference which they keep until
- * they are manually released, so we remove them when they have
- * no more reader or writer references. - LU-6368
- */
- ldlm_set_cbpending(lock);
- }
-
- if (!lock->l_readers && !lock->l_writers && ldlm_is_cbpending(lock)) {
- /* If we received a blocked AST and this was the last reference,
- * run the callback.
- */
- LDLM_DEBUG(lock, "final decref done on cbpending lock");
-
- LDLM_LOCK_GET(lock); /* dropped by bl thread */
- ldlm_lock_remove_from_lru(lock);
- unlock_res_and_lock(lock);
-
- if (ldlm_is_fail_loc(lock))
- OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
-
- if (ldlm_is_atomic_cb(lock) ||
- ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
- ldlm_handle_bl_callback(ns, NULL, lock);
- } else if (!lock->l_readers && !lock->l_writers &&
- !ldlm_is_no_lru(lock) && !ldlm_is_bl_ast(lock)) {
- LDLM_DEBUG(lock, "add lock into lru list");
-
- /* If this is a client-side namespace and this was the last
- * reference, put it on the LRU.
- */
- ldlm_lock_add_to_lru(lock);
- unlock_res_and_lock(lock);
-
- if (ldlm_is_fail_loc(lock))
- OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
-
- /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
- * are not supported by the server, otherwise, it is done on
- * enqueue.
- */
- if (!exp_connect_cancelset(lock->l_conn_export) &&
- !ns_connect_lru_resize(ns))
- ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
- } else {
- LDLM_DEBUG(lock, "do not add lock into lru list");
- unlock_res_and_lock(lock);
- }
-}
-
-/**
- * Decrease reader/writer refcount for LDLM lock with handle \a lockh
- */
-void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode)
-{
- struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
-
- LASSERTF(lock, "Non-existing lock: %#llx\n", lockh->cookie);
- ldlm_lock_decref_internal(lock, mode);
- LDLM_LOCK_PUT(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_decref);
-
-/**
- * Decrease reader/writer refcount for LDLM lock with handle
- * \a lockh and mark it for subsequent cancellation once r/w refcount
- * drops to zero instead of putting into LRU.
- */
-void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
- enum ldlm_mode mode)
-{
- struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
-
- LASSERT(lock);
-
- LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
- lock_res_and_lock(lock);
- ldlm_set_cbpending(lock);
- unlock_res_and_lock(lock);
- ldlm_lock_decref_internal(lock, mode);
- LDLM_LOCK_PUT(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
-
-struct sl_insert_point {
- struct list_head *res_link;
- struct list_head *mode_link;
- struct list_head *policy_link;
-};
-
-/**
- * Finds a position to insert the new lock into granted lock list.
- *
- * Used for locks eligible for skiplist optimization.
- *
- * Parameters:
- * queue [input]: the granted list where search acts on;
- * req [input]: the lock whose position to be located;
- * prev [output]: positions within 3 lists to insert @req to
- * Return Value:
- * filled @prev
- * NOTE: called by
- * - ldlm_grant_lock_with_skiplist
- */
-static void search_granted_lock(struct list_head *queue,
- struct ldlm_lock *req,
- struct sl_insert_point *prev)
-{
- struct ldlm_lock *lock, *mode_end, *policy_end;
-
- list_for_each_entry(lock, queue, l_res_link) {
-
- mode_end = list_prev_entry(lock, l_sl_mode);
-
- if (lock->l_req_mode != req->l_req_mode) {
- /* jump to last lock of mode group */
- lock = mode_end;
- continue;
- }
-
- /* suitable mode group is found */
- if (lock->l_resource->lr_type == LDLM_PLAIN) {
- /* insert point is last lock of the mode group */
- prev->res_link = &mode_end->l_res_link;
- prev->mode_link = &mode_end->l_sl_mode;
- prev->policy_link = &req->l_sl_policy;
- return;
- }
-
- if (lock->l_resource->lr_type == LDLM_IBITS) {
- for (;;) {
- policy_end =
- list_prev_entry(lock, l_sl_policy);
-
- if (lock->l_policy_data.l_inodebits.bits ==
- req->l_policy_data.l_inodebits.bits) {
- /* insert point is last lock of
- * the policy group
- */
- prev->res_link =
- &policy_end->l_res_link;
- prev->mode_link =
- &policy_end->l_sl_mode;
- prev->policy_link =
- &policy_end->l_sl_policy;
- return;
- }
-
- if (policy_end == mode_end)
- /* done with mode group */
- break;
-
- /* go to next policy group within mode group */
- lock = list_next_entry(policy_end, l_res_link);
- } /* loop over policy groups within the mode group */
-
- /* insert point is last lock of the mode group,
- * new policy group is started
- */
- prev->res_link = &mode_end->l_res_link;
- prev->mode_link = &mode_end->l_sl_mode;
- prev->policy_link = &req->l_sl_policy;
- return;
- }
-
- LDLM_ERROR(lock, "is not LDLM_PLAIN or LDLM_IBITS lock");
- LBUG();
- }
-
- /* insert point is last lock on the queue,
- * new mode group and new policy group are started
- */
- prev->res_link = queue->prev;
- prev->mode_link = &req->l_sl_mode;
- prev->policy_link = &req->l_sl_policy;
-}
-
-/**
- * Add a lock into resource granted list after a position described by
- * \a prev.
- */
-static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
- struct sl_insert_point *prev)
-{
- struct ldlm_resource *res = lock->l_resource;
-
- check_res_locked(res);
-
- ldlm_resource_dump(D_INFO, res);
- LDLM_DEBUG(lock, "About to add lock:");
-
- if (ldlm_is_destroyed(lock)) {
- CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
- return;
- }
-
- LASSERT(list_empty(&lock->l_res_link));
- LASSERT(list_empty(&lock->l_sl_mode));
- LASSERT(list_empty(&lock->l_sl_policy));
-
- /*
- * lock->link == prev->link means lock is first starting the group.
- * Don't re-add to itself to suppress kernel warnings.
- */
- if (&lock->l_res_link != prev->res_link)
- list_add(&lock->l_res_link, prev->res_link);
- if (&lock->l_sl_mode != prev->mode_link)
- list_add(&lock->l_sl_mode, prev->mode_link);
- if (&lock->l_sl_policy != prev->policy_link)
- list_add(&lock->l_sl_policy, prev->policy_link);
-}
-
-/**
- * Add a lock to granted list on a resource maintaining skiplist
- * correctness.
- */
-static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
-{
- struct sl_insert_point prev;
-
- LASSERT(lock->l_req_mode == lock->l_granted_mode);
-
- search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
- ldlm_granted_list_add_lock(lock, &prev);
-}
-
-/**
- * Perform lock granting bookkeeping.
- *
- * Includes putting the lock into granted list and updating lock mode.
- * NOTE: called by
- * - ldlm_lock_enqueue
- * - ldlm_reprocess_queue
- * - ldlm_lock_convert
- *
- * must be called with lr_lock held
- */
-void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
-{
- struct ldlm_resource *res = lock->l_resource;
-
- check_res_locked(res);
-
- lock->l_granted_mode = lock->l_req_mode;
-
- if (work_list && lock->l_completion_ast)
- ldlm_add_ast_work_item(lock, NULL, work_list);
-
- if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
- ldlm_grant_lock_with_skiplist(lock);
- } else if (res->lr_type == LDLM_EXTENT) {
- ldlm_extent_add_lock(res, lock);
- } else if (res->lr_type == LDLM_FLOCK) {
- /*
- * We should not add locks to granted list in
- * the following cases:
- * - this is an UNLOCK but not a real lock;
- * - this is a TEST lock;
- * - this is a F_CANCELLK lock (async flock has req_mode == 0)
- * - this is a deadlock (flock cannot be granted)
- */
- if (!lock->l_req_mode || lock->l_req_mode == LCK_NL ||
- ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
- return;
- ldlm_resource_add_lock(res, &res->lr_granted, lock);
- } else {
- LBUG();
- }
-
- ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
-}
-
-/**
- * Describe the overlap between two locks. itree_overlap_cb data.
- */
-struct lock_match_data {
- struct ldlm_lock *lmd_old;
- struct ldlm_lock *lmd_lock;
- enum ldlm_mode *lmd_mode;
- union ldlm_policy_data *lmd_policy;
- __u64 lmd_flags;
- int lmd_unref;
-};
-
-/**
- * Check if the given @lock meets the criteria for a match.
- * A reference on the lock is taken if matched.
- *
- * \param lock test-against this lock
- * \param data parameters
- */
-static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data)
-{
- union ldlm_policy_data *lpol = &lock->l_policy_data;
- enum ldlm_mode match;
-
- if (lock == data->lmd_old)
- return INTERVAL_ITER_STOP;
-
- /*
- * Check if this lock can be matched.
- * Used by LU-2919(exclusive open) for open lease lock
- */
- if (ldlm_is_excl(lock))
- return INTERVAL_ITER_CONT;
-
- /*
- * llite sometimes wants to match locks that will be
- * canceled when their users drop, but we allow it to match
- * if it passes in CBPENDING and the lock still has users.
- * this is generally only going to be used by children
- * whose parents already hold a lock so forward progress
- * can still happen.
- */
- if (ldlm_is_cbpending(lock) &&
- !(data->lmd_flags & LDLM_FL_CBPENDING))
- return INTERVAL_ITER_CONT;
-
- if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
- !lock->l_readers && !lock->l_writers)
- return INTERVAL_ITER_CONT;
-
- if (!(lock->l_req_mode & *data->lmd_mode))
- return INTERVAL_ITER_CONT;
- match = lock->l_req_mode;
-
- switch (lock->l_resource->lr_type) {
- case LDLM_EXTENT:
- if (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
- lpol->l_extent.end < data->lmd_policy->l_extent.end)
- return INTERVAL_ITER_CONT;
-
- if (unlikely(match == LCK_GROUP) &&
- data->lmd_policy->l_extent.gid != LDLM_GID_ANY &&
- lpol->l_extent.gid != data->lmd_policy->l_extent.gid)
- return INTERVAL_ITER_CONT;
- break;
- case LDLM_IBITS:
- /*
- * We match if we have existing lock with same or wider set
- * of bits.
- */
- if ((lpol->l_inodebits.bits &
- data->lmd_policy->l_inodebits.bits) !=
- data->lmd_policy->l_inodebits.bits)
- return INTERVAL_ITER_CONT;
- break;
- default:
- break;
- }
- /*
- * We match if we have existing lock with same or wider set
- * of bits.
- */
- if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
- return INTERVAL_ITER_CONT;
-
- if (!equi(data->lmd_flags & LDLM_FL_LOCAL_ONLY, ldlm_is_local(lock)))
- return INTERVAL_ITER_CONT;
-
- if (data->lmd_flags & LDLM_FL_TEST_LOCK) {
- LDLM_LOCK_GET(lock);
- ldlm_lock_touch_in_lru(lock);
- } else {
- ldlm_lock_addref_internal_nolock(lock, match);
- }
-
- *data->lmd_mode = match;
- data->lmd_lock = lock;
-
- return INTERVAL_ITER_STOP;
-}
-
-static enum interval_iter itree_overlap_cb(struct interval_node *in, void *args)
-{
- struct ldlm_interval *node = to_ldlm_interval(in);
- struct lock_match_data *data = args;
- struct ldlm_lock *lock;
- int rc;
-
- list_for_each_entry(lock, &node->li_group, l_sl_policy) {
- rc = lock_matches(lock, data);
- if (rc == INTERVAL_ITER_STOP)
- return INTERVAL_ITER_STOP;
- }
- return INTERVAL_ITER_CONT;
-}
-
-/**
- * Search for a lock with given parameters in interval trees.
- *
- * \param res search for a lock in this resource
- * \param data parameters
- *
- * \retval a referenced lock or NULL.
- */
-static struct ldlm_lock *search_itree(struct ldlm_resource *res,
- struct lock_match_data *data)
-{
- struct interval_node_extent ext = {
- .start = data->lmd_policy->l_extent.start,
- .end = data->lmd_policy->l_extent.end
- };
- int idx;
-
- for (idx = 0; idx < LCK_MODE_NUM; idx++) {
- struct ldlm_interval_tree *tree = &res->lr_itree[idx];
-
- if (!tree->lit_root)
- continue;
-
- if (!(tree->lit_mode & *data->lmd_mode))
- continue;
-
- interval_search(tree->lit_root, &ext,
- itree_overlap_cb, data);
- }
- return data->lmd_lock;
-}
-
-/**
- * Search for a lock with given properties in a queue.
- *
- * \param queue search for a lock in this queue
- * \param data parameters
- *
- * \retval a referenced lock or NULL.
- */
-static struct ldlm_lock *search_queue(struct list_head *queue,
- struct lock_match_data *data)
-{
- struct ldlm_lock *lock;
- int rc;
-
- list_for_each_entry(lock, queue, l_res_link) {
- rc = lock_matches(lock, data);
- if (rc == INTERVAL_ITER_STOP)
- return data->lmd_lock;
- }
- return NULL;
-}
-
-void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
-{
- if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
- lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
- wake_up_all(&lock->l_waitq);
- }
-}
-
-/**
- * Mark lock as "matchable" by OST.
- *
- * Used to prevent certain races in LOV/OSC where the lock is granted, but LVB
- * is not yet valid.
- * Assumes LDLM lock is already locked.
- */
-void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
-{
- ldlm_set_lvb_ready(lock);
- wake_up_all(&lock->l_waitq);
-}
-EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
-
-/**
- * Mark lock as "matchable" by OST.
- * Locks the lock and then \see ldlm_lock_allow_match_locked
- */
-void ldlm_lock_allow_match(struct ldlm_lock *lock)
-{
- lock_res_and_lock(lock);
- ldlm_lock_allow_match_locked(lock);
- unlock_res_and_lock(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_allow_match);
-
-/**
- * Attempt to find a lock with specified properties.
- *
- * Typically returns a reference to matched lock unless LDLM_FL_TEST_LOCK is
- * set in \a flags
- *
- * Can be called in two ways:
- *
- * If 'ns' is NULL, then lockh describes an existing lock that we want to look
- * for a duplicate of.
- *
- * Otherwise, all of the fields must be filled in, to match against.
- *
- * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
- * server (ie, connh is NULL)
- * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
- * list will be considered
- * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
- * to be canceled can still be matched as long as they still have reader
- * or writer referneces
- * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
- * just tell us if we would have matched.
- *
- * \retval 1 if it finds an already-existing lock that is compatible; in this
- * case, lockh is filled in with a addref()ed lock
- *
- * We also check security context, and if that fails we simply return 0 (to
- * keep caller code unchanged), the context failure will be discovered by
- * caller sometime later.
- */
-enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
- const struct ldlm_res_id *res_id,
- enum ldlm_type type,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode,
- struct lustre_handle *lockh, int unref)
-{
- struct lock_match_data data = {
- .lmd_old = NULL,
- .lmd_lock = NULL,
- .lmd_mode = &mode,
- .lmd_policy = policy,
- .lmd_flags = flags,
- .lmd_unref = unref,
- };
- struct ldlm_resource *res;
- struct ldlm_lock *lock;
- int rc = 0;
-
- if (!ns) {
- data.lmd_old = ldlm_handle2lock(lockh);
- LASSERT(data.lmd_old);
-
- ns = ldlm_lock_to_ns(data.lmd_old);
- res_id = &data.lmd_old->l_resource->lr_name;
- type = data.lmd_old->l_resource->lr_type;
- *data.lmd_mode = data.lmd_old->l_req_mode;
- }
-
- res = ldlm_resource_get(ns, NULL, res_id, type, 0);
- if (IS_ERR(res)) {
- LASSERT(!data.lmd_old);
- return 0;
- }
-
- LDLM_RESOURCE_ADDREF(res);
- lock_res(res);
-
- if (res->lr_type == LDLM_EXTENT)
- lock = search_itree(res, &data);
- else
- lock = search_queue(&res->lr_granted, &data);
- if (lock) {
- rc = 1;
- goto out;
- }
- if (flags & LDLM_FL_BLOCK_GRANTED) {
- rc = 0;
- goto out;
- }
- lock = search_queue(&res->lr_waiting, &data);
- if (lock) {
- rc = 1;
- goto out;
- }
-out:
- unlock_res(res);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
-
- if (lock) {
- ldlm_lock2handle(lock, lockh);
- if ((flags & LDLM_FL_LVB_READY) && !ldlm_is_lvb_ready(lock)) {
- __u64 wait_flags = LDLM_FL_LVB_READY |
- LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
-
- if (lock->l_completion_ast) {
- int err = lock->l_completion_ast(lock,
- LDLM_FL_WAIT_NOREPROC,
- NULL);
- if (err) {
- if (flags & LDLM_FL_TEST_LOCK)
- LDLM_LOCK_RELEASE(lock);
- else
- ldlm_lock_decref_internal(lock,
- mode);
- rc = 0;
- goto out2;
- }
- }
-
- /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
- wait_event_idle_timeout(lock->l_waitq,
- lock->l_flags & wait_flags,
- obd_timeout * HZ);
- if (!ldlm_is_lvb_ready(lock)) {
- if (flags & LDLM_FL_TEST_LOCK)
- LDLM_LOCK_RELEASE(lock);
- else
- ldlm_lock_decref_internal(lock, mode);
- rc = 0;
- }
- }
- }
- out2:
- if (rc) {
- LDLM_DEBUG(lock, "matched (%llu %llu)",
- (type == LDLM_PLAIN || type == LDLM_IBITS) ?
- res_id->name[2] : policy->l_extent.start,
- (type == LDLM_PLAIN || type == LDLM_IBITS) ?
- res_id->name[3] : policy->l_extent.end);
-
- /* check user's security context */
- if (lock->l_conn_export &&
- sptlrpc_import_check_ctx(
- class_exp2cliimp(lock->l_conn_export))) {
- if (!(flags & LDLM_FL_TEST_LOCK))
- ldlm_lock_decref_internal(lock, mode);
- rc = 0;
- }
-
- if (flags & LDLM_FL_TEST_LOCK)
- LDLM_LOCK_RELEASE(lock);
-
- } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
- LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res %llu/%llu (%llu %llu)",
- ns, type, mode, res_id->name[0],
- res_id->name[1],
- (type == LDLM_PLAIN || type == LDLM_IBITS) ?
- res_id->name[2] : policy->l_extent.start,
- (type == LDLM_PLAIN || type == LDLM_IBITS) ?
- res_id->name[3] : policy->l_extent.end);
- }
- if (data.lmd_old)
- LDLM_LOCK_PUT(data.lmd_old);
-
- return rc ? mode : 0;
-}
-EXPORT_SYMBOL(ldlm_lock_match);
-
-enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
- __u64 *bits)
-{
- struct ldlm_lock *lock;
- enum ldlm_mode mode = 0;
-
- lock = ldlm_handle2lock(lockh);
- if (lock) {
- lock_res_and_lock(lock);
- if (LDLM_HAVE_MASK(lock, GONE))
- goto out;
-
- if (ldlm_is_cbpending(lock) &&
- lock->l_readers == 0 && lock->l_writers == 0)
- goto out;
-
- if (bits)
- *bits = lock->l_policy_data.l_inodebits.bits;
- mode = lock->l_granted_mode;
- ldlm_lock_addref_internal_nolock(lock, mode);
- }
-
-out:
- if (lock) {
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- }
- return mode;
-}
-EXPORT_SYMBOL(ldlm_revalidate_lock_handle);
-
-/** The caller must guarantee that the buffer is large enough. */
-int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
- enum req_location loc, void *data, int size)
-{
- void *lvb;
-
- LASSERT(data);
- LASSERT(size >= 0);
-
- switch (lock->l_lvb_type) {
- case LVB_T_OST:
- if (size == sizeof(struct ost_lvb)) {
- if (loc == RCL_CLIENT)
- lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb);
- else
- lvb = req_capsule_server_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb);
- if (unlikely(!lvb)) {
- LDLM_ERROR(lock, "no LVB");
- return -EPROTO;
- }
-
- memcpy(data, lvb, size);
- } else if (size == sizeof(struct ost_lvb_v1)) {
- struct ost_lvb *olvb = data;
-
- if (loc == RCL_CLIENT)
- lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb_v1);
- else
- lvb = req_capsule_server_sized_swab_get(pill,
- &RMF_DLM_LVB, size,
- lustre_swab_ost_lvb_v1);
- if (unlikely(!lvb)) {
- LDLM_ERROR(lock, "no LVB");
- return -EPROTO;
- }
-
- memcpy(data, lvb, size);
- olvb->lvb_mtime_ns = 0;
- olvb->lvb_atime_ns = 0;
- olvb->lvb_ctime_ns = 0;
- } else {
- LDLM_ERROR(lock, "Replied unexpected ost LVB size %d",
- size);
- return -EINVAL;
- }
- break;
- case LVB_T_LQUOTA:
- if (size == sizeof(struct lquota_lvb)) {
- if (loc == RCL_CLIENT)
- lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_lquota_lvb);
- else
- lvb = req_capsule_server_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_lquota_lvb);
- if (unlikely(!lvb)) {
- LDLM_ERROR(lock, "no LVB");
- return -EPROTO;
- }
-
- memcpy(data, lvb, size);
- } else {
- LDLM_ERROR(lock,
- "Replied unexpected lquota LVB size %d",
- size);
- return -EINVAL;
- }
- break;
- case LVB_T_LAYOUT:
- if (size == 0)
- break;
-
- if (loc == RCL_CLIENT)
- lvb = req_capsule_client_get(pill, &RMF_DLM_LVB);
- else
- lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
- if (unlikely(!lvb)) {
- LDLM_ERROR(lock, "no LVB");
- return -EPROTO;
- }
-
- memcpy(data, lvb, size);
- break;
- default:
- LDLM_ERROR(lock, "Unknown LVB type: %d", lock->l_lvb_type);
- dump_stack();
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * Create and fill in new LDLM lock with specified properties.
- * Returns a referenced lock
- */
-struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- enum ldlm_type type,
- enum ldlm_mode mode,
- const struct ldlm_callback_suite *cbs,
- void *data, __u32 lvb_len,
- enum lvb_type lvb_type)
-{
- struct ldlm_lock *lock;
- struct ldlm_resource *res;
- int rc;
-
- res = ldlm_resource_get(ns, NULL, res_id, type, 1);
- if (IS_ERR(res))
- return ERR_CAST(res);
-
- lock = ldlm_lock_new(res);
- if (!lock)
- return ERR_PTR(-ENOMEM);
-
- lock->l_req_mode = mode;
- lock->l_ast_data = data;
- lock->l_pid = current_pid();
- if (cbs) {
- lock->l_blocking_ast = cbs->lcs_blocking;
- lock->l_completion_ast = cbs->lcs_completion;
- lock->l_glimpse_ast = cbs->lcs_glimpse;
- }
-
- lock->l_tree_node = NULL;
- /* if this is the extent lock, allocate the interval tree node */
- if (type == LDLM_EXTENT) {
- if (!ldlm_interval_alloc(lock)) {
- rc = -ENOMEM;
- goto out;
- }
- }
-
- if (lvb_len) {
- lock->l_lvb_len = lvb_len;
- lock->l_lvb_data = kzalloc(lvb_len, GFP_NOFS);
- if (!lock->l_lvb_data) {
- rc = -ENOMEM;
- goto out;
- }
- }
-
- lock->l_lvb_type = lvb_type;
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK)) {
- rc = -ENOENT;
- goto out;
- }
-
- return lock;
-
-out:
- ldlm_lock_destroy(lock);
- LDLM_LOCK_RELEASE(lock);
- return ERR_PTR(rc);
-}
-
-/**
- * Enqueue (request) a lock.
- * On the client this is called from ldlm_cli_enqueue_fini
- * after we already got an initial reply from the server with some status.
- *
- * Does not block. As a result of enqueue the lock would be put
- * into granted or waiting list.
- */
-enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
- struct ldlm_lock **lockp,
- void *cookie, __u64 *flags)
-{
- struct ldlm_lock *lock = *lockp;
- struct ldlm_resource *res = lock->l_resource;
-
- lock_res_and_lock(lock);
- if (lock->l_req_mode == lock->l_granted_mode) {
- /* The server returned a blocked lock, but it was granted
- * before we got a chance to actually enqueue it. We don't
- * need to do anything else.
- */
- *flags &= ~LDLM_FL_BLOCKED_MASK;
- goto out;
- }
-
- ldlm_resource_unlink_lock(lock);
-
- /* Cannot happen unless on the server */
- if (res->lr_type == LDLM_EXTENT && !lock->l_tree_node)
- LBUG();
-
- /* Some flags from the enqueue want to make it into the AST, via the
- * lock's l_flags.
- */
- if (*flags & LDLM_FL_AST_DISCARD_DATA)
- ldlm_set_ast_discard_data(lock);
- if (*flags & LDLM_FL_TEST_LOCK)
- ldlm_set_test_lock(lock);
-
- /*
- * This distinction between local lock trees is very important; a client
- * namespace only has information about locks taken by that client, and
- * thus doesn't have enough information to decide for itself if it can
- * be granted (below). In this case, we do exactly what the server
- * tells us to do, as dictated by the 'flags'.
- */
- if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
- ldlm_resource_add_lock(res, &res->lr_waiting, lock);
- else
- ldlm_grant_lock(lock, NULL);
-
-out:
- unlock_res_and_lock(lock);
- return ELDLM_OK;
-}
-
-/**
- * Process a call to blocking AST callback for a lock in ast_work list
- */
-static int
-ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
-{
- struct ldlm_cb_set_arg *arg = opaq;
- struct ldlm_lock_desc d;
- int rc;
- struct ldlm_lock *lock;
-
- if (list_empty(arg->list))
- return -ENOENT;
-
- lock = list_first_entry(arg->list, struct ldlm_lock, l_bl_ast);
-
- /* nobody should touch l_bl_ast */
- lock_res_and_lock(lock);
- list_del_init(&lock->l_bl_ast);
-
- LASSERT(ldlm_is_ast_sent(lock));
- LASSERT(lock->l_bl_ast_run == 0);
- LASSERT(lock->l_blocking_lock);
- lock->l_bl_ast_run++;
- unlock_res_and_lock(lock);
-
- ldlm_lock2desc(lock->l_blocking_lock, &d);
-
- rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING);
- LDLM_LOCK_RELEASE(lock->l_blocking_lock);
- lock->l_blocking_lock = NULL;
- LDLM_LOCK_RELEASE(lock);
-
- return rc;
-}
-
-/**
- * Process a call to completion AST callback for a lock in ast_work list
- */
-static int
-ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
-{
- struct ldlm_cb_set_arg *arg = opaq;
- int rc = 0;
- struct ldlm_lock *lock;
- ldlm_completion_callback completion_callback;
-
- if (list_empty(arg->list))
- return -ENOENT;
-
- lock = list_first_entry(arg->list, struct ldlm_lock, l_cp_ast);
-
- /* It's possible to receive a completion AST before we've set
- * the l_completion_ast pointer: either because the AST arrived
- * before the reply, or simply because there's a small race
- * window between receiving the reply and finishing the local
- * enqueue. (bug 842)
- *
- * This can't happen with the blocking_ast, however, because we
- * will never call the local blocking_ast until we drop our
- * reader/writer reference, which we won't do until we get the
- * reply and finish enqueueing.
- */
-
- /* nobody should touch l_cp_ast */
- lock_res_and_lock(lock);
- list_del_init(&lock->l_cp_ast);
- LASSERT(ldlm_is_cp_reqd(lock));
- /* save l_completion_ast since it can be changed by
- * mds_intent_policy(), see bug 14225
- */
- completion_callback = lock->l_completion_ast;
- ldlm_clear_cp_reqd(lock);
- unlock_res_and_lock(lock);
-
- if (completion_callback)
- rc = completion_callback(lock, 0, (void *)arg);
- LDLM_LOCK_RELEASE(lock);
-
- return rc;
-}
-
-/**
- * Process a call to revocation AST callback for a lock in ast_work list
- */
-static int
-ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
-{
- struct ldlm_cb_set_arg *arg = opaq;
- struct ldlm_lock_desc desc;
- int rc;
- struct ldlm_lock *lock;
-
- if (list_empty(arg->list))
- return -ENOENT;
-
- lock = list_first_entry(arg->list, struct ldlm_lock, l_rk_ast);
- list_del_init(&lock->l_rk_ast);
-
- /* the desc just pretend to exclusive */
- ldlm_lock2desc(lock, &desc);
- desc.l_req_mode = LCK_EX;
- desc.l_granted_mode = 0;
-
- rc = lock->l_blocking_ast(lock, &desc, (void *)arg, LDLM_CB_BLOCKING);
- LDLM_LOCK_RELEASE(lock);
-
- return rc;
-}
-
-/**
- * Process a call to glimpse AST callback for a lock in ast_work list
- */
-static int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
-{
- struct ldlm_cb_set_arg *arg = opaq;
- struct ldlm_glimpse_work *gl_work;
- struct ldlm_lock *lock;
- int rc = 0;
-
- if (list_empty(arg->list))
- return -ENOENT;
-
- gl_work = list_first_entry(arg->list, struct ldlm_glimpse_work,
- gl_list);
- list_del_init(&gl_work->gl_list);
-
- lock = gl_work->gl_lock;
-
- /* transfer the glimpse descriptor to ldlm_cb_set_arg */
- arg->gl_desc = gl_work->gl_desc;
-
- /* invoke the actual glimpse callback */
- if (lock->l_glimpse_ast(lock, (void *)arg) == 0)
- rc = 1;
-
- LDLM_LOCK_RELEASE(lock);
-
- if ((gl_work->gl_flags & LDLM_GL_WORK_NOFREE) == 0)
- kfree(gl_work);
-
- return rc;
-}
-
-/**
- * Process list of locks in need of ASTs being sent.
- *
- * Used on server to send multiple ASTs together instead of sending one by
- * one.
- */
-int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
- enum ldlm_desc_ast_t ast_type)
-{
- struct ldlm_cb_set_arg *arg;
- set_producer_func work_ast_lock;
- int rc;
-
- if (list_empty(rpc_list))
- return 0;
-
- arg = kzalloc(sizeof(*arg), GFP_NOFS);
- if (!arg)
- return -ENOMEM;
-
- atomic_set(&arg->restart, 0);
- arg->list = rpc_list;
-
- switch (ast_type) {
- case LDLM_WORK_BL_AST:
- arg->type = LDLM_BL_CALLBACK;
- work_ast_lock = ldlm_work_bl_ast_lock;
- break;
- case LDLM_WORK_CP_AST:
- arg->type = LDLM_CP_CALLBACK;
- work_ast_lock = ldlm_work_cp_ast_lock;
- break;
- case LDLM_WORK_REVOKE_AST:
- arg->type = LDLM_BL_CALLBACK;
- work_ast_lock = ldlm_work_revoke_ast_lock;
- break;
- case LDLM_WORK_GL_AST:
- arg->type = LDLM_GL_CALLBACK;
- work_ast_lock = ldlm_work_gl_ast_lock;
- break;
- default:
- LBUG();
- }
-
- /* We create a ptlrpc request set with flow control extension.
- * This request set will use the work_ast_lock function to produce new
- * requests and will send a new request each time one completes in order
- * to keep the number of requests in flight to ns_max_parallel_ast
- */
- arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
- work_ast_lock, arg);
- if (!arg->set) {
- rc = -ENOMEM;
- goto out;
- }
-
- ptlrpc_set_wait(arg->set);
- ptlrpc_set_destroy(arg->set);
-
- rc = atomic_read(&arg->restart) ? -ERESTART : 0;
- goto out;
-out:
- kfree(arg);
- return rc;
-}
-
-static bool is_bl_done(struct ldlm_lock *lock)
-{
- bool bl_done = true;
-
- if (!ldlm_is_bl_done(lock)) {
- lock_res_and_lock(lock);
- bl_done = ldlm_is_bl_done(lock);
- unlock_res_and_lock(lock);
- }
-
- return bl_done;
-}
-
-/**
- * Helper function to call blocking AST for LDLM lock \a lock in a
- * "cancelling" mode.
- */
-void ldlm_cancel_callback(struct ldlm_lock *lock)
-{
- check_res_locked(lock->l_resource);
- if (!ldlm_is_cancel(lock)) {
- ldlm_set_cancel(lock);
- if (lock->l_blocking_ast) {
- unlock_res_and_lock(lock);
- lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
- LDLM_CB_CANCELING);
- lock_res_and_lock(lock);
- } else {
- LDLM_DEBUG(lock, "no blocking ast");
- }
- /* only canceller can set bl_done bit */
- ldlm_set_bl_done(lock);
- wake_up_all(&lock->l_waitq);
- } else if (!ldlm_is_bl_done(lock)) {
- /*
- * The lock is guaranteed to have been canceled once
- * returning from this function.
- */
- unlock_res_and_lock(lock);
- wait_event_idle(lock->l_waitq, is_bl_done(lock));
- lock_res_and_lock(lock);
- }
-}
-
-/**
- * Remove skiplist-enabled LDLM lock \a req from granted list
- */
-void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
-{
- if (req->l_resource->lr_type != LDLM_PLAIN &&
- req->l_resource->lr_type != LDLM_IBITS)
- return;
-
- list_del_init(&req->l_sl_policy);
- list_del_init(&req->l_sl_mode);
-}
-
-/**
- * Attempts to cancel LDLM lock \a lock that has no reader/writer references.
- */
-void ldlm_lock_cancel(struct ldlm_lock *lock)
-{
- struct ldlm_resource *res;
- struct ldlm_namespace *ns;
-
- lock_res_and_lock(lock);
-
- res = lock->l_resource;
- ns = ldlm_res_to_ns(res);
-
- /* Please do not, no matter how tempting, remove this LBUG without
- * talking to me first. -phik
- */
- if (lock->l_readers || lock->l_writers) {
- LDLM_ERROR(lock, "lock still has references");
- LBUG();
- }
-
- /* Releases cancel callback. */
- ldlm_cancel_callback(lock);
-
- ldlm_resource_unlink_lock(lock);
- ldlm_lock_destroy_nolock(lock);
-
- if (lock->l_granted_mode == lock->l_req_mode)
- ldlm_pool_del(&ns->ns_pool, lock);
-
- /* Make sure we will not be called again for same lock what is possible
- * if not to zero out lock->l_granted_mode
- */
- lock->l_granted_mode = LCK_MINMODE;
- unlock_res_and_lock(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_cancel);
-
-/**
- * Set opaque data into the lock that only makes sense to upper layer.
- */
-int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data)
-{
- struct ldlm_lock *lock = ldlm_handle2lock(lockh);
- int rc = -EINVAL;
-
- if (lock) {
- if (!lock->l_ast_data)
- lock->l_ast_data = data;
- if (lock->l_ast_data == data)
- rc = 0;
- LDLM_LOCK_PUT(lock);
- }
- return rc;
-}
-EXPORT_SYMBOL(ldlm_lock_set_data);
-
-struct export_cl_data {
- struct obd_export *ecl_exp;
- int ecl_loop;
-};
-
-/**
- * Print lock with lock handle \a lockh description into debug log.
- *
- * Used when printing all locks on a resource for debug purposes.
- */
-void ldlm_lock_dump_handle(int level, const struct lustre_handle *lockh)
-{
- struct ldlm_lock *lock;
-
- if (!((libcfs_debug | D_ERROR) & level))
- return;
-
- lock = ldlm_handle2lock(lockh);
- if (!lock)
- return;
-
- LDLM_DEBUG_LIMIT(level, lock, "###");
-
- LDLM_LOCK_PUT(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_dump_handle);
-
-/**
- * Print lock information with custom message into debug log.
- * Helper function.
- */
-void _ldlm_lock_debug(struct ldlm_lock *lock,
- struct libcfs_debug_msg_data *msgdata,
- const char *fmt, ...)
-{
- va_list args;
- struct obd_export *exp = lock->l_export;
- struct ldlm_resource *resource = lock->l_resource;
- char *nid = "local";
-
- va_start(args, fmt);
-
- if (exp && exp->exp_connection) {
- nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
- } else if (exp && exp->exp_obd) {
- struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
-
- nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
- }
-
- if (!resource) {
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
- lock,
- lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- lock->l_flags, nid,
- lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
- va_end(args);
- return;
- }
-
- switch (resource->lr_type) {
- case LDLM_EXTENT:
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_policy_data.l_extent.start,
- lock->l_policy_data.l_extent.end,
- lock->l_req_extent.start,
- lock->l_req_extent.end,
- lock->l_flags, nid,
- lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
- break;
-
- case LDLM_FLOCK:
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu\n",
- ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_policy_data.l_flock.pid,
- lock->l_policy_data.l_flock.start,
- lock->l_policy_data.l_flock.end,
- lock->l_flags, nid,
- lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout);
- break;
-
- case LDLM_IBITS:
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock),
- lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- lock->l_policy_data.l_inodebits.bits,
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_flags, nid,
- lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
- break;
-
- default:
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock),
- lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_flags, nid,
- lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
- break;
- }
- va_end(args);
-}
-EXPORT_SYMBOL(_ldlm_lock_debug);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
deleted file mode 100644
index c772c68e5a49..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ /dev/null
@@ -1,1163 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_lockd.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include <linux/libcfs/libcfs.h>
-#include <lustre_dlm.h>
-#include <obd_class.h>
-#include <linux/list.h>
-#include "ldlm_internal.h"
-
-static int ldlm_num_threads;
-module_param(ldlm_num_threads, int, 0444);
-MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
-
-static char *ldlm_cpts;
-module_param(ldlm_cpts, charp, 0444);
-MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
-
-static struct mutex ldlm_ref_mutex;
-static int ldlm_refcount;
-
-static struct kobject *ldlm_kobj;
-struct kset *ldlm_ns_kset;
-static struct kset *ldlm_svc_kset;
-
-struct ldlm_cb_async_args {
- struct ldlm_cb_set_arg *ca_set_arg;
- struct ldlm_lock *ca_lock;
-};
-
-/* LDLM state */
-
-static struct ldlm_state *ldlm_state;
-
-#define ELT_STOPPED 0
-#define ELT_READY 1
-#define ELT_TERMINATE 2
-
-struct ldlm_bl_pool {
- spinlock_t blp_lock;
-
- /*
- * blp_prio_list is used for callbacks that should be handled
- * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
- * see bug 13843
- */
- struct list_head blp_prio_list;
-
- /*
- * blp_list is used for all other callbacks which are likely
- * to take longer to process.
- */
- struct list_head blp_list;
-
- wait_queue_head_t blp_waitq;
- struct completion blp_comp;
- atomic_t blp_num_threads;
- atomic_t blp_busy_threads;
- int blp_min_threads;
- int blp_max_threads;
-};
-
-struct ldlm_bl_work_item {
- struct list_head blwi_entry;
- struct ldlm_namespace *blwi_ns;
- struct ldlm_lock_desc blwi_ld;
- struct ldlm_lock *blwi_lock;
- struct list_head blwi_head;
- int blwi_count;
- struct completion blwi_comp;
- enum ldlm_cancel_flags blwi_flags;
- int blwi_mem_pressure;
-};
-
-/**
- * Callback handler for receiving incoming blocking ASTs.
- *
- * This can only happen on client side.
- */
-void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
-{
- int do_ast;
-
- LDLM_DEBUG(lock, "client blocking AST callback handler");
-
- lock_res_and_lock(lock);
- ldlm_set_cbpending(lock);
-
- if (ldlm_is_cancel_on_block(lock))
- ldlm_set_cancel(lock);
-
- do_ast = !lock->l_readers && !lock->l_writers;
- unlock_res_and_lock(lock);
-
- if (do_ast) {
- CDEBUG(D_DLMTRACE,
- "Lock %p already unused, calling callback (%p)\n", lock,
- lock->l_blocking_ast);
- if (lock->l_blocking_ast)
- lock->l_blocking_ast(lock, ld, lock->l_ast_data,
- LDLM_CB_BLOCKING);
- } else {
- CDEBUG(D_DLMTRACE,
- "Lock %p is referenced, will be cancelled later\n",
- lock);
- }
-
- LDLM_DEBUG(lock, "client blocking callback handler END");
- LDLM_LOCK_RELEASE(lock);
-}
-
-/**
- * Callback handler for receiving incoming completion ASTs.
- *
- * This only can happen on client side.
- */
-static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
- struct ldlm_namespace *ns,
- struct ldlm_request *dlm_req,
- struct ldlm_lock *lock)
-{
- int lvb_len;
- LIST_HEAD(ast_list);
- int rc = 0;
-
- LDLM_DEBUG(lock, "client completion callback handler START");
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
- int to = HZ;
-
- while (to > 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(to);
- if (lock->l_granted_mode == lock->l_req_mode ||
- ldlm_is_destroyed(lock))
- break;
- }
- }
-
- lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
- if (lvb_len < 0) {
- LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len);
- rc = lvb_len;
- goto out;
- } else if (lvb_len > 0) {
- if (lock->l_lvb_len > 0) {
- /* for extent lock, lvb contains ost_lvb{}. */
- LASSERT(lock->l_lvb_data);
-
- if (unlikely(lock->l_lvb_len < lvb_len)) {
- LDLM_ERROR(lock,
- "Replied LVB is larger than expectation, expected = %d, replied = %d",
- lock->l_lvb_len, lvb_len);
- rc = -EINVAL;
- goto out;
- }
- } else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has
- * variable length
- */
- void *lvb_data;
-
- lvb_data = kzalloc(lvb_len, GFP_NOFS);
- if (!lvb_data) {
- LDLM_ERROR(lock, "No memory: %d.\n", lvb_len);
- rc = -ENOMEM;
- goto out;
- }
-
- lock_res_and_lock(lock);
- LASSERT(!lock->l_lvb_data);
- lock->l_lvb_type = LVB_T_LAYOUT;
- lock->l_lvb_data = lvb_data;
- lock->l_lvb_len = lvb_len;
- unlock_res_and_lock(lock);
- }
- }
-
- lock_res_and_lock(lock);
- if (ldlm_is_destroyed(lock) ||
- lock->l_granted_mode == lock->l_req_mode) {
- /* bug 11300: the lock has already been granted */
- unlock_res_and_lock(lock);
- LDLM_DEBUG(lock, "Double grant race happened");
- rc = 0;
- goto out;
- }
-
- /* If we receive the completion AST before the actual enqueue returned,
- * then we might need to switch lock modes, resources, or extents.
- */
- if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
- lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
- LDLM_DEBUG(lock, "completion AST, new lock mode");
- }
-
- if (lock->l_resource->lr_type != LDLM_PLAIN) {
- ldlm_convert_policy_to_local(req->rq_export,
- dlm_req->lock_desc.l_resource.lr_type,
- &dlm_req->lock_desc.l_policy_data,
- &lock->l_policy_data);
- LDLM_DEBUG(lock, "completion AST, new policy data");
- }
-
- ldlm_resource_unlink_lock(lock);
- if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
- &lock->l_resource->lr_name,
- sizeof(lock->l_resource->lr_name)) != 0) {
- unlock_res_and_lock(lock);
- rc = ldlm_lock_change_resource(ns, lock,
- &dlm_req->lock_desc.l_resource.lr_name);
- if (rc < 0) {
- LDLM_ERROR(lock, "Failed to allocate resource");
- goto out;
- }
- LDLM_DEBUG(lock, "completion AST, new resource");
- CERROR("change resource!\n");
- lock_res_and_lock(lock);
- }
-
- if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
- /* BL_AST locks are not needed in LRU.
- * Let ldlm_cancel_lru() be fast.
- */
- ldlm_lock_remove_from_lru(lock);
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
- LDLM_DEBUG(lock, "completion AST includes blocking AST");
- }
-
- if (lock->l_lvb_len > 0) {
- rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT,
- lock->l_lvb_data, lvb_len);
- if (rc < 0) {
- unlock_res_and_lock(lock);
- goto out;
- }
- }
-
- ldlm_grant_lock(lock, &ast_list);
- unlock_res_and_lock(lock);
-
- LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
-
- /* Let Enqueue to call osc_lock_upcall() and initialize l_ast_data */
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
-
- ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
-
- LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
- lock);
- goto out;
-
-out:
- if (rc < 0) {
- lock_res_and_lock(lock);
- ldlm_set_failed(lock);
- unlock_res_and_lock(lock);
- wake_up(&lock->l_waitq);
- }
- LDLM_LOCK_RELEASE(lock);
-}
-
-/**
- * Callback handler for receiving incoming glimpse ASTs.
- *
- * This only can happen on client side. After handling the glimpse AST
- * we also consider dropping the lock here if it is unused locally for a
- * long time.
- */
-static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
- struct ldlm_namespace *ns,
- struct ldlm_request *dlm_req,
- struct ldlm_lock *lock)
-{
- int rc = -ENOSYS;
-
- LDLM_DEBUG(lock, "client glimpse AST callback handler");
-
- if (lock->l_glimpse_ast)
- rc = lock->l_glimpse_ast(lock, req);
-
- if (req->rq_repmsg) {
- ptlrpc_reply(req);
- } else {
- req->rq_status = rc;
- ptlrpc_error(req);
- }
-
- lock_res_and_lock(lock);
- if (lock->l_granted_mode == LCK_PW &&
- !lock->l_readers && !lock->l_writers &&
- cfs_time_after(cfs_time_current(),
- cfs_time_add(lock->l_last_used,
- 10 * HZ))) {
- unlock_res_and_lock(lock);
- if (ldlm_bl_to_thread_lock(ns, NULL, lock))
- ldlm_handle_bl_callback(ns, NULL, lock);
-
- return;
- }
- unlock_res_and_lock(lock);
- LDLM_LOCK_RELEASE(lock);
-}
-
-static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
-{
- if (req->rq_no_reply)
- return 0;
-
- req->rq_status = rc;
- if (!req->rq_packed_final) {
- rc = lustre_pack_reply(req, 1, NULL, NULL);
- if (rc)
- return rc;
- }
- return ptlrpc_reply(req);
-}
-
-static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
- enum ldlm_cancel_flags cancel_flags)
-{
- struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
-
- spin_lock(&blp->blp_lock);
- if (blwi->blwi_lock && ldlm_is_discard_data(blwi->blwi_lock)) {
- /* add LDLM_FL_DISCARD_DATA requests to the priority list */
- list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
- } else {
- /* other blocking callbacks are added to the regular list */
- list_add_tail(&blwi->blwi_entry, &blp->blp_list);
- }
- spin_unlock(&blp->blp_lock);
-
- wake_up(&blp->blp_waitq);
-
- /* can not check blwi->blwi_flags as blwi could be already freed in
- * LCF_ASYNC mode
- */
- if (!(cancel_flags & LCF_ASYNC))
- wait_for_completion(&blwi->blwi_comp);
-
- return 0;
-}
-
-static inline void init_blwi(struct ldlm_bl_work_item *blwi,
- struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld,
- struct list_head *cancels, int count,
- struct ldlm_lock *lock,
- enum ldlm_cancel_flags cancel_flags)
-{
- init_completion(&blwi->blwi_comp);
- INIT_LIST_HEAD(&blwi->blwi_head);
-
- if (memory_pressure_get())
- blwi->blwi_mem_pressure = 1;
-
- blwi->blwi_ns = ns;
- blwi->blwi_flags = cancel_flags;
- if (ld)
- blwi->blwi_ld = *ld;
- if (count) {
- list_add(&blwi->blwi_head, cancels);
- list_del_init(cancels);
- blwi->blwi_count = count;
- } else {
- blwi->blwi_lock = lock;
- }
-}
-
-/**
- * Queues a list of locks \a cancels containing \a count locks
- * for later processing by a blocking thread. If \a count is zero,
- * then the lock referenced as \a lock is queued instead.
- *
- * The blocking thread would then call ->l_blocking_ast callback in the lock.
- * If list addition fails an error is returned and caller is supposed to
- * call ->l_blocking_ast itself.
- */
-static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld,
- struct ldlm_lock *lock,
- struct list_head *cancels, int count,
- enum ldlm_cancel_flags cancel_flags)
-{
- if (cancels && count == 0)
- return 0;
-
- if (cancel_flags & LCF_ASYNC) {
- struct ldlm_bl_work_item *blwi;
-
- blwi = kzalloc(sizeof(*blwi), GFP_NOFS);
- if (!blwi)
- return -ENOMEM;
- init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
-
- return __ldlm_bl_to_thread(blwi, cancel_flags);
- } else {
- /* if it is synchronous call do minimum mem alloc, as it could
- * be triggered from kernel shrinker
- */
- struct ldlm_bl_work_item blwi;
-
- memset(&blwi, 0, sizeof(blwi));
- init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags);
- return __ldlm_bl_to_thread(&blwi, cancel_flags);
- }
-}
-
-int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- struct ldlm_lock *lock)
-{
- return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC);
-}
-
-int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- struct list_head *cancels, int count,
- enum ldlm_cancel_flags cancel_flags)
-{
- return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
-}
-
-int ldlm_bl_thread_wakeup(void)
-{
- wake_up(&ldlm_state->ldlm_bl_pool->blp_waitq);
- return 0;
-}
-
-/* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
-static int ldlm_handle_setinfo(struct ptlrpc_request *req)
-{
- struct obd_device *obd = req->rq_export->exp_obd;
- char *key;
- void *val;
- int keylen, vallen;
- int rc = -ENOSYS;
-
- DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
-
- req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
-
- key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
- if (!key) {
- DEBUG_REQ(D_IOCTL, req, "no set_info key");
- return -EFAULT;
- }
- keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
- RCL_CLIENT);
- val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
- if (!val) {
- DEBUG_REQ(D_IOCTL, req, "no set_info val");
- return -EFAULT;
- }
- vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
- RCL_CLIENT);
-
- /* We are responsible for swabbing contents of val */
-
- if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
- /* Pass it on to mdc (the "export" in this case) */
- rc = obd_set_info_async(req->rq_svc_thread->t_env,
- req->rq_export,
- sizeof(KEY_HSM_COPYTOOL_SEND),
- KEY_HSM_COPYTOOL_SEND,
- vallen, val, NULL);
- else
- DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
-
- return rc;
-}
-
-static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
- const char *msg, int rc,
- const struct lustre_handle *handle)
-{
- DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
- "%s: [nid %s] [rc %d] [lock %#llx]",
- msg, libcfs_id2str(req->rq_peer), rc,
- handle ? handle->cookie : 0);
- if (req->rq_no_reply)
- CWARN("No reply was sent, maybe cause bug 21636.\n");
- else if (rc)
- CWARN("Send reply failed, maybe cause bug 21636.\n");
-}
-
-/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
-static int ldlm_callback_handler(struct ptlrpc_request *req)
-{
- struct ldlm_namespace *ns;
- struct ldlm_request *dlm_req;
- struct ldlm_lock *lock;
- int rc;
-
- /* Requests arrive in sender's byte order. The ptlrpc service
- * handler has already checked and, if necessary, byte-swapped the
- * incoming request message body, but I am responsible for the
- * message buffers.
- */
-
- /* do nothing for sec context finalize */
- if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
- return 0;
-
- req_capsule_init(&req->rq_pill, req, RCL_SERVER);
-
- if (!req->rq_export) {
- rc = ldlm_callback_reply(req, -ENOTCONN);
- ldlm_callback_errmsg(req, "Operate on unconnected server",
- rc, NULL);
- return 0;
- }
-
- LASSERT(req->rq_export->exp_obd);
-
- switch (lustre_msg_get_opc(req->rq_reqmsg)) {
- case LDLM_BL_CALLBACK:
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET)) {
- if (cfs_fail_err)
- ldlm_callback_reply(req, -(int)cfs_fail_err);
- return 0;
- }
- break;
- case LDLM_CP_CALLBACK:
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
- return 0;
- break;
- case LDLM_GL_CALLBACK:
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
- return 0;
- break;
- case LDLM_SET_INFO:
- rc = ldlm_handle_setinfo(req);
- ldlm_callback_reply(req, rc);
- return 0;
- default:
- CERROR("unknown opcode %u\n",
- lustre_msg_get_opc(req->rq_reqmsg));
- ldlm_callback_reply(req, -EPROTO);
- return 0;
- }
-
- ns = req->rq_export->exp_obd->obd_namespace;
- LASSERT(ns);
-
- req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
-
- dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- if (!dlm_req) {
- rc = ldlm_callback_reply(req, -EPROTO);
- ldlm_callback_errmsg(req, "Operate without parameter", rc,
- NULL);
- return 0;
- }
-
- /* Force a known safe race, send a cancel to the server for a lock
- * which the server has already started a blocking callback on.
- */
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
- lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
- rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
- if (rc < 0)
- CERROR("ldlm_cli_cancel: %d\n", rc);
- }
-
- lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
- if (!lock) {
- CDEBUG(D_DLMTRACE,
- "callback on lock %#llx - lock disappeared\n",
- dlm_req->lock_handle[0].cookie);
- rc = ldlm_callback_reply(req, -EINVAL);
- ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
- &dlm_req->lock_handle[0]);
- return 0;
- }
-
- if (ldlm_is_fail_loc(lock) &&
- lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
- OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
-
- /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
- lock_res_and_lock(lock);
- lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
- LDLM_FL_AST_MASK);
- if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
- /* If somebody cancels lock and cache is already dropped,
- * or lock is failed before cp_ast received on client,
- * we can tell the server we have no lock. Otherwise, we
- * should send cancel after dropping the cache.
- */
- if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
- ldlm_is_failed(lock)) {
- LDLM_DEBUG(lock,
- "callback on lock %#llx - lock disappeared",
- dlm_req->lock_handle[0].cookie);
- unlock_res_and_lock(lock);
- LDLM_LOCK_RELEASE(lock);
- rc = ldlm_callback_reply(req, -EINVAL);
- ldlm_callback_errmsg(req, "Operate on stale lock", rc,
- &dlm_req->lock_handle[0]);
- return 0;
- }
- /* BL_AST locks are not needed in LRU.
- * Let ldlm_cancel_lru() be fast.
- */
- ldlm_lock_remove_from_lru(lock);
- ldlm_set_bl_ast(lock);
- }
- unlock_res_and_lock(lock);
-
- /* We want the ost thread to get this reply so that it can respond
- * to ost requests (write cache writeback) that might be triggered
- * in the callback.
- *
- * But we'd also like to be able to indicate in the reply that we're
- * cancelling right now, because it's unused, or have an intent result
- * in the reply, so we might have to push the responsibility for sending
- * the reply down into the AST handlers, alas.
- */
-
- switch (lustre_msg_get_opc(req->rq_reqmsg)) {
- case LDLM_BL_CALLBACK:
- CDEBUG(D_INODE, "blocking ast\n");
- req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
- if (!ldlm_is_cancel_on_block(lock)) {
- rc = ldlm_callback_reply(req, 0);
- if (req->rq_no_reply || rc)
- ldlm_callback_errmsg(req, "Normal process", rc,
- &dlm_req->lock_handle[0]);
- }
- if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
- ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
- break;
- case LDLM_CP_CALLBACK:
- CDEBUG(D_INODE, "completion ast\n");
- req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
- ldlm_callback_reply(req, 0);
- ldlm_handle_cp_callback(req, ns, dlm_req, lock);
- break;
- case LDLM_GL_CALLBACK:
- CDEBUG(D_INODE, "glimpse ast\n");
- req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
- ldlm_handle_gl_callback(req, ns, dlm_req, lock);
- break;
- default:
- LBUG(); /* checked above */
- }
-
- return 0;
-}
-
-static int ldlm_bl_get_work(struct ldlm_bl_pool *blp,
- struct ldlm_bl_work_item **p_blwi,
- struct obd_export **p_exp)
-{
- int num_th = atomic_read(&blp->blp_num_threads);
- struct ldlm_bl_work_item *blwi = NULL;
- static unsigned int num_bl;
-
- spin_lock(&blp->blp_lock);
- /* process a request from the blp_list at least every blp_num_threads */
- if (!list_empty(&blp->blp_list) &&
- (list_empty(&blp->blp_prio_list) || num_bl == 0))
- blwi = list_first_entry(&blp->blp_list,
- struct ldlm_bl_work_item, blwi_entry);
- else
- if (!list_empty(&blp->blp_prio_list))
- blwi = list_first_entry(&blp->blp_prio_list,
- struct ldlm_bl_work_item,
- blwi_entry);
-
- if (blwi) {
- if (++num_bl >= num_th)
- num_bl = 0;
- list_del(&blwi->blwi_entry);
- }
- spin_unlock(&blp->blp_lock);
- *p_blwi = blwi;
-
- return (*p_blwi || *p_exp) ? 1 : 0;
-}
-
-/* This only contains temporary data until the thread starts */
-struct ldlm_bl_thread_data {
- struct ldlm_bl_pool *bltd_blp;
- struct completion bltd_comp;
- int bltd_num;
-};
-
-static int ldlm_bl_thread_main(void *arg);
-
-static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp, bool check_busy)
-{
- struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
- struct task_struct *task;
-
- init_completion(&bltd.bltd_comp);
-
- bltd.bltd_num = atomic_inc_return(&blp->blp_num_threads);
- if (bltd.bltd_num >= blp->blp_max_threads) {
- atomic_dec(&blp->blp_num_threads);
- return 0;
- }
-
- LASSERTF(bltd.bltd_num > 0, "thread num:%d\n", bltd.bltd_num);
- if (check_busy &&
- atomic_read(&blp->blp_busy_threads) < (bltd.bltd_num - 1)) {
- atomic_dec(&blp->blp_num_threads);
- return 0;
- }
-
- task = kthread_run(ldlm_bl_thread_main, &bltd, "ldlm_bl_%02d",
- bltd.bltd_num);
- if (IS_ERR(task)) {
- CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
- bltd.bltd_num, PTR_ERR(task));
- atomic_dec(&blp->blp_num_threads);
- return PTR_ERR(task);
- }
- wait_for_completion(&bltd.bltd_comp);
-
- return 0;
-}
-
-/* Not fatal if racy and have a few too many threads */
-static int ldlm_bl_thread_need_create(struct ldlm_bl_pool *blp,
- struct ldlm_bl_work_item *blwi)
-{
- if (atomic_read(&blp->blp_num_threads) >= blp->blp_max_threads)
- return 0;
-
- if (atomic_read(&blp->blp_busy_threads) <
- atomic_read(&blp->blp_num_threads))
- return 0;
-
- if (blwi && (!blwi->blwi_ns || blwi->blwi_mem_pressure))
- return 0;
-
- return 1;
-}
-
-static int ldlm_bl_thread_blwi(struct ldlm_bl_pool *blp,
- struct ldlm_bl_work_item *blwi)
-{
- if (!blwi->blwi_ns)
- /* added by ldlm_cleanup() */
- return LDLM_ITER_STOP;
-
- if (blwi->blwi_mem_pressure)
- memory_pressure_set();
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL2, 4);
-
- if (blwi->blwi_count) {
- int count;
-
- /*
- * The special case when we cancel locks in lru
- * asynchronously, we pass the list of locks here.
- * Thus locks are marked LDLM_FL_CANCELING, but NOT
- * canceled locally yet.
- */
- count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
- blwi->blwi_count,
- LCF_BL_AST);
- ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
- blwi->blwi_flags);
- } else {
- ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
- blwi->blwi_lock);
- }
- if (blwi->blwi_mem_pressure)
- memory_pressure_clr();
-
- if (blwi->blwi_flags & LCF_ASYNC)
- kfree(blwi);
- else
- complete(&blwi->blwi_comp);
-
- return 0;
-}
-
-/**
- * Main blocking requests processing thread.
- *
- * Callers put locks into its queue by calling ldlm_bl_to_thread.
- * This thread in the end ends up doing actual call to ->l_blocking_ast
- * for queued locks.
- */
-static int ldlm_bl_thread_main(void *arg)
-{
- struct ldlm_bl_pool *blp;
- struct ldlm_bl_thread_data *bltd = arg;
-
- blp = bltd->bltd_blp;
-
- complete(&bltd->bltd_comp);
- /* cannot use bltd after this, it is only on caller's stack */
-
- while (1) {
- struct ldlm_bl_work_item *blwi = NULL;
- struct obd_export *exp = NULL;
- int rc;
-
- rc = ldlm_bl_get_work(blp, &blwi, &exp);
- if (!rc)
- wait_event_idle_exclusive(blp->blp_waitq,
- ldlm_bl_get_work(blp, &blwi,
- &exp));
- atomic_inc(&blp->blp_busy_threads);
-
- if (ldlm_bl_thread_need_create(blp, blwi))
- /* discard the return value, we tried */
- ldlm_bl_thread_start(blp, true);
-
- if (blwi)
- rc = ldlm_bl_thread_blwi(blp, blwi);
-
- atomic_dec(&blp->blp_busy_threads);
-
- if (rc == LDLM_ITER_STOP)
- break;
- }
-
- atomic_dec(&blp->blp_num_threads);
- complete(&blp->blp_comp);
- return 0;
-}
-
-static int ldlm_setup(void);
-static int ldlm_cleanup(void);
-
-int ldlm_get_ref(void)
-{
- int rc = 0;
-
- rc = ptlrpc_inc_ref();
- if (rc)
- return rc;
-
- mutex_lock(&ldlm_ref_mutex);
- if (++ldlm_refcount == 1) {
- rc = ldlm_setup();
- if (rc)
- ldlm_refcount--;
- }
- mutex_unlock(&ldlm_ref_mutex);
-
- if (rc)
- ptlrpc_dec_ref();
-
- return rc;
-}
-
-void ldlm_put_ref(void)
-{
- int rc = 0;
- mutex_lock(&ldlm_ref_mutex);
- if (ldlm_refcount == 1) {
- rc = ldlm_cleanup();
-
- if (rc)
- CERROR("ldlm_cleanup failed: %d\n", rc);
- else
- ldlm_refcount--;
- } else {
- ldlm_refcount--;
- }
- mutex_unlock(&ldlm_ref_mutex);
- if (!rc)
- ptlrpc_dec_ref();
-}
-
-static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", ldlm_cancel_unused_locks_before_replay);
-}
-
-static ssize_t cancel_unused_locks_before_replay_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- ldlm_cancel_unused_locks_before_replay = val;
-
- return count;
-}
-LUSTRE_RW_ATTR(cancel_unused_locks_before_replay);
-
-/* These are for root of /sys/fs/lustre/ldlm */
-static struct attribute *ldlm_attrs[] = {
- &lustre_attr_cancel_unused_locks_before_replay.attr,
- NULL,
-};
-
-static const struct attribute_group ldlm_attr_group = {
- .attrs = ldlm_attrs,
-};
-
-static int ldlm_setup(void)
-{
- static struct ptlrpc_service_conf conf;
- struct ldlm_bl_pool *blp = NULL;
- int rc = 0;
- int i;
-
- if (ldlm_state)
- return -EALREADY;
-
- ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS);
- if (!ldlm_state)
- return -ENOMEM;
-
- ldlm_kobj = kobject_create_and_add("ldlm", lustre_kobj);
- if (!ldlm_kobj) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = sysfs_create_group(ldlm_kobj, &ldlm_attr_group);
- if (rc)
- goto out;
-
- ldlm_ns_kset = kset_create_and_add("namespaces", NULL, ldlm_kobj);
- if (!ldlm_ns_kset) {
- rc = -ENOMEM;
- goto out;
- }
-
- ldlm_svc_kset = kset_create_and_add("services", NULL, ldlm_kobj);
- if (!ldlm_svc_kset) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = ldlm_debugfs_setup();
- if (rc != 0)
- goto out;
-
- memset(&conf, 0, sizeof(conf));
- conf = (typeof(conf)) {
- .psc_name = "ldlm_cbd",
- .psc_watchdog_factor = 2,
- .psc_buf = {
- .bc_nbufs = LDLM_CLIENT_NBUFS,
- .bc_buf_size = LDLM_BUFSIZE,
- .bc_req_max_size = LDLM_MAXREQSIZE,
- .bc_rep_max_size = LDLM_MAXREPSIZE,
- .bc_req_portal = LDLM_CB_REQUEST_PORTAL,
- .bc_rep_portal = LDLM_CB_REPLY_PORTAL,
- },
- .psc_thr = {
- .tc_thr_name = "ldlm_cb",
- .tc_thr_factor = LDLM_THR_FACTOR,
- .tc_nthrs_init = LDLM_NTHRS_INIT,
- .tc_nthrs_base = LDLM_NTHRS_BASE,
- .tc_nthrs_max = LDLM_NTHRS_MAX,
- .tc_nthrs_user = ldlm_num_threads,
- .tc_cpu_affinity = 1,
- .tc_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD,
- },
- .psc_cpt = {
- .cc_pattern = ldlm_cpts,
- },
- .psc_ops = {
- .so_req_handler = ldlm_callback_handler,
- },
- };
- ldlm_state->ldlm_cb_service =
- ptlrpc_register_service(&conf, ldlm_svc_kset,
- ldlm_svc_debugfs_dir);
- if (IS_ERR(ldlm_state->ldlm_cb_service)) {
- CERROR("failed to start service\n");
- rc = PTR_ERR(ldlm_state->ldlm_cb_service);
- ldlm_state->ldlm_cb_service = NULL;
- goto out;
- }
-
- blp = kzalloc(sizeof(*blp), GFP_NOFS);
- if (!blp) {
- rc = -ENOMEM;
- goto out;
- }
- ldlm_state->ldlm_bl_pool = blp;
-
- spin_lock_init(&blp->blp_lock);
- INIT_LIST_HEAD(&blp->blp_list);
- INIT_LIST_HEAD(&blp->blp_prio_list);
- init_waitqueue_head(&blp->blp_waitq);
- atomic_set(&blp->blp_num_threads, 0);
- atomic_set(&blp->blp_busy_threads, 0);
-
- if (ldlm_num_threads == 0) {
- blp->blp_min_threads = LDLM_NTHRS_INIT;
- blp->blp_max_threads = LDLM_NTHRS_MAX;
- } else {
- blp->blp_min_threads = min_t(int, LDLM_NTHRS_MAX,
- max_t(int, LDLM_NTHRS_INIT,
- ldlm_num_threads));
-
- blp->blp_max_threads = blp->blp_min_threads;
- }
-
- for (i = 0; i < blp->blp_min_threads; i++) {
- rc = ldlm_bl_thread_start(blp, false);
- if (rc < 0)
- goto out;
- }
-
- rc = ldlm_pools_init();
- if (rc) {
- CERROR("Failed to initialize LDLM pools: %d\n", rc);
- goto out;
- }
- return 0;
-
- out:
- ldlm_cleanup();
- return rc;
-}
-
-static int ldlm_cleanup(void)
-{
- if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
- !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
- CERROR("ldlm still has namespaces; clean these up first.\n");
- ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
- ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
- return -EBUSY;
- }
-
- ldlm_pools_fini();
-
- if (ldlm_state->ldlm_bl_pool) {
- struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
-
- while (atomic_read(&blp->blp_num_threads) > 0) {
- struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
-
- init_completion(&blp->blp_comp);
-
- spin_lock(&blp->blp_lock);
- list_add_tail(&blwi.blwi_entry, &blp->blp_list);
- wake_up(&blp->blp_waitq);
- spin_unlock(&blp->blp_lock);
-
- wait_for_completion(&blp->blp_comp);
- }
-
- kfree(blp);
- }
-
- if (ldlm_state->ldlm_cb_service)
- ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
-
- if (ldlm_ns_kset)
- kset_unregister(ldlm_ns_kset);
- if (ldlm_svc_kset)
- kset_unregister(ldlm_svc_kset);
- if (ldlm_kobj) {
- sysfs_remove_group(ldlm_kobj, &ldlm_attr_group);
- kobject_put(ldlm_kobj);
- }
-
- ldlm_debugfs_cleanup();
-
- kfree(ldlm_state);
- ldlm_state = NULL;
-
- return 0;
-}
-
-int ldlm_init(void)
-{
- mutex_init(&ldlm_ref_mutex);
- mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
- mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
- ldlm_resource_slab = kmem_cache_create("ldlm_resources",
- sizeof(struct ldlm_resource), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!ldlm_resource_slab)
- return -ENOMEM;
-
- ldlm_lock_slab = kmem_cache_create("ldlm_locks",
- sizeof(struct ldlm_lock), 0,
- SLAB_HWCACHE_ALIGN |
- SLAB_TYPESAFE_BY_RCU, NULL);
- if (!ldlm_lock_slab) {
- kmem_cache_destroy(ldlm_resource_slab);
- return -ENOMEM;
- }
-
- ldlm_interval_slab = kmem_cache_create("interval_node",
- sizeof(struct ldlm_interval),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!ldlm_interval_slab) {
- kmem_cache_destroy(ldlm_resource_slab);
- kmem_cache_destroy(ldlm_lock_slab);
- return -ENOMEM;
- }
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
- class_export_dump_hook = ldlm_dump_export_locks;
-#endif
- return 0;
-}
-
-void ldlm_exit(void)
-{
- if (ldlm_refcount)
- CERROR("ldlm_refcount is %d in %s!\n", ldlm_refcount, __func__);
- kmem_cache_destroy(ldlm_resource_slab);
- /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
- * synchronize_rcu() to wait a grace period elapsed, so that
- * ldlm_lock_free() get a chance to be called.
- */
- synchronize_rcu();
- kmem_cache_destroy(ldlm_lock_slab);
- kmem_cache_destroy(ldlm_interval_slab);
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
deleted file mode 100644
index 33b5a3f96fcb..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_plain.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-/**
- * This file contains implementation of PLAIN lock type.
- *
- * PLAIN locks are the simplest form of LDLM locking, and are used when
- * there only needs to be a single lock on a resource. This avoids some
- * of the complexity of EXTENT and IBITS lock types, but doesn't allow
- * different "parts" of a resource to be locked concurrently. Example
- * use cases for PLAIN locks include locking of MGS configuration logs
- * and (as of Lustre 2.4) quota records.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include <lustre_dlm.h>
-#include <obd_support.h>
-#include <lustre_lib.h>
-
-#include "ldlm_internal.h"
-
-void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
- union ldlm_policy_data *lpolicy)
-{
- /* No policy for plain locks */
-}
-
-void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
- union ldlm_wire_policy_data *wpolicy)
-{
- /* No policy for plain locks */
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
deleted file mode 100644
index 53b8f33e54b5..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ /dev/null
@@ -1,1023 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_pool.c
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-/*
- * Idea of this code is rather simple. Each second, for each server namespace
- * we have SLV - server lock volume which is calculated on current number of
- * granted locks, grant speed for past period, etc - that is, locking load.
- * This SLV number may be thought as a flow definition for simplicity. It is
- * sent to clients with each occasion to let them know what is current load
- * situation on the server. By default, at the beginning, SLV on server is
- * set max value which is calculated as the following: allow to one client
- * have all locks of limit ->pl_limit for 10h.
- *
- * Next, on clients, number of cached locks is not limited artificially in any
- * way as it was before. Instead, client calculates CLV, that is, client lock
- * volume for each lock and compares it with last SLV from the server. CLV is
- * calculated as the number of locks in LRU * lock live time in seconds. If
- * CLV > SLV - lock is canceled.
- *
- * Client has LVF, that is, lock volume factor which regulates how much
- * sensitive client should be about last SLV from server. The higher LVF is the
- * more locks will be canceled on client. Default value for it is 1. Setting LVF
- * to 2 means that client will cancel locks 2 times faster.
- *
- * Locks on a client will be canceled more intensively in these cases:
- * (1) if SLV is smaller, that is, load is higher on the server;
- * (2) client has a lot of locks (the more locks are held by client, the bigger
- * chances that some of them should be canceled);
- * (3) client has old locks (taken some time ago);
- *
- * Thus, according to flow paradigm that we use for better understanding SLV,
- * CLV is the volume of particle in flow described by SLV. According to this,
- * if flow is getting thinner, more and more particles become outside of it and
- * as particles are locks, they should be canceled.
- *
- * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
- * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using
- * LVF and many cleanups. Flow definition to allow more easy understanding of
- * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many
- * cleanups and fixes. And design and implementation are done by Yury Umanets
- * (umka@clusterfs.com).
- *
- * Glossary for terms used:
- *
- * pl_limit - Number of allowed locks in pool. Applies to server and client
- * side (tunable);
- *
- * pl_granted - Number of granted locks (calculated);
- * pl_grant_rate - Number of granted locks for last T (calculated);
- * pl_cancel_rate - Number of canceled locks for last T (calculated);
- * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
- * pl_grant_plan - Planned number of granted locks for next T (calculated);
- * pl_server_lock_volume - Current server lock volume (calculated);
- *
- * As it may be seen from list above, we have few possible tunables which may
- * affect behavior much. They all may be modified via sysfs. However, they also
- * give a possibility for constructing few pre-defined behavior policies. If
- * none of predefines is suitable for a working pattern being used, new one may
- * be "constructed" via sysfs tunables.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include <lustre_dlm.h>
-#include <cl_object.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include "ldlm_internal.h"
-
-/*
- * 50 ldlm locks for 1MB of RAM.
- */
-#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
-
-/*
- * Maximal possible grant step plan in %.
- */
-#define LDLM_POOL_MAX_GSP (30)
-
-/*
- * Minimal possible grant step plan in %.
- */
-#define LDLM_POOL_MIN_GSP (1)
-
-/*
- * This controls the speed of reaching LDLM_POOL_MAX_GSP
- * with increasing thread period.
- */
-#define LDLM_POOL_GSP_STEP_SHIFT (2)
-
-/*
- * LDLM_POOL_GSP% of all locks is default GP.
- */
-#define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
-
-/*
- * Max age for locks on clients.
- */
-#define LDLM_POOL_MAX_AGE (36000)
-
-/*
- * The granularity of SLV calculation.
- */
-#define LDLM_POOL_SLV_SHIFT (10)
-
-static inline __u64 dru(__u64 val, __u32 shift, int round_up)
-{
- return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
-}
-
-static inline __u64 ldlm_pool_slv_max(__u32 L)
-{
- /*
- * Allow to have all locks for 1 client for 10 hrs.
- * Formula is the following: limit * 10h / 1 client.
- */
- __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
- return lim;
-}
-
-static inline __u64 ldlm_pool_slv_min(__u32 L)
-{
- return 1;
-}
-
-enum {
- LDLM_POOL_FIRST_STAT = 0,
- LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
- LDLM_POOL_GRANT_STAT,
- LDLM_POOL_CANCEL_STAT,
- LDLM_POOL_GRANT_RATE_STAT,
- LDLM_POOL_CANCEL_RATE_STAT,
- LDLM_POOL_GRANT_PLAN_STAT,
- LDLM_POOL_SLV_STAT,
- LDLM_POOL_SHRINK_REQTD_STAT,
- LDLM_POOL_SHRINK_FREED_STAT,
- LDLM_POOL_RECALC_STAT,
- LDLM_POOL_TIMING_STAT,
- LDLM_POOL_LAST_STAT
-};
-
-/**
- * Calculates suggested grant_step in % of available locks for passed
- * \a period. This is later used in grant_plan calculations.
- */
-static inline int ldlm_pool_t2gsp(unsigned int t)
-{
- /*
- * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
- * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
- *
- * How this will affect execution is the following:
- *
- * - for thread period 1s we will have grant_step 1% which good from
- * pov of taking some load off from server and push it out to clients.
- * This is like that because 1% for grant_step means that server will
- * not allow clients to get lots of locks in short period of time and
- * keep all old locks in their caches. Clients will always have to
- * get some locks back if they want to take some new;
- *
- * - for thread period 10s (which is default) we will have 23% which
- * means that clients will have enough of room to take some new locks
- * without getting some back. All locks from this 23% which were not
- * taken by clients in current period will contribute in SLV growing.
- * SLV growing means more locks cached on clients until limit or grant
- * plan is reached.
- */
- return LDLM_POOL_MAX_GSP -
- ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
- (t >> LDLM_POOL_GSP_STEP_SHIFT));
-}
-
-/**
- * Recalculates next stats on passed \a pl.
- *
- * \pre ->pl_lock is locked.
- */
-static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
-{
- int grant_plan = pl->pl_grant_plan;
- __u64 slv = pl->pl_server_lock_volume;
- int granted = atomic_read(&pl->pl_granted);
- int grant_rate = atomic_read(&pl->pl_grant_rate);
- int cancel_rate = atomic_read(&pl->pl_cancel_rate);
-
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
- slv);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
- granted);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
- grant_rate);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
- grant_plan);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
- cancel_rate);
-}
-
-/**
- * Sets SLV and Limit from container_of(pl, struct ldlm_namespace,
- * ns_pool)->ns_obd tp passed \a pl.
- */
-static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
-{
- struct obd_device *obd;
-
- /*
- * Get new SLV and Limit from obd which is updated with coming
- * RPCs.
- */
- obd = container_of(pl, struct ldlm_namespace,
- ns_pool)->ns_obd;
- read_lock(&obd->obd_pool_lock);
- pl->pl_server_lock_volume = obd->obd_pool_slv;
- atomic_set(&pl->pl_limit, obd->obd_pool_limit);
- read_unlock(&obd->obd_pool_lock);
-}
-
-/**
- * Recalculates client size pool \a pl according to current SLV and Limit.
- */
-static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
-{
- time64_t recalc_interval_sec;
- int ret;
-
- recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
- if (recalc_interval_sec < pl->pl_recalc_period)
- return 0;
-
- spin_lock(&pl->pl_lock);
- /*
- * Check if we need to recalc lists now.
- */
- recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
- if (recalc_interval_sec < pl->pl_recalc_period) {
- spin_unlock(&pl->pl_lock);
- return 0;
- }
-
- /*
- * Make sure that pool knows last SLV and Limit from obd.
- */
- ldlm_cli_pool_pop_slv(pl);
-
- spin_unlock(&pl->pl_lock);
-
- /*
- * Do not cancel locks in case lru resize is disabled for this ns.
- */
- if (!ns_connect_lru_resize(container_of(pl, struct ldlm_namespace,
- ns_pool))) {
- ret = 0;
- goto out;
- }
-
- /*
- * In the time of canceling locks on client we do not need to maintain
- * sharp timing, we only want to cancel locks asap according to new SLV.
- * It may be called when SLV has changed much, this is why we do not
- * take into account pl->pl_recalc_time here.
- */
- ret = ldlm_cancel_lru(container_of(pl, struct ldlm_namespace, ns_pool),
- 0, LCF_ASYNC, LDLM_LRU_FLAG_LRUR);
-
-out:
- spin_lock(&pl->pl_lock);
- /*
- * Time of LRU resizing might be longer than period,
- * so update after LRU resizing rather than before it.
- */
- pl->pl_recalc_time = ktime_get_real_seconds();
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
- recalc_interval_sec);
- spin_unlock(&pl->pl_lock);
- return ret;
-}
-
-/**
- * This function is main entry point for memory pressure handling on client
- * side. Main goal of this function is to cancel some number of locks on
- * passed \a pl according to \a nr and \a gfp_mask.
- */
-static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
- int nr, gfp_t gfp_mask)
-{
- struct ldlm_namespace *ns;
- int unused;
-
- ns = container_of(pl, struct ldlm_namespace, ns_pool);
-
- /*
- * Do not cancel locks in case lru resize is disabled for this ns.
- */
- if (!ns_connect_lru_resize(ns))
- return 0;
-
- /*
- * Make sure that pool knows last SLV and Limit from obd.
- */
- ldlm_cli_pool_pop_slv(pl);
-
- spin_lock(&ns->ns_lock);
- unused = ns->ns_nr_unused;
- spin_unlock(&ns->ns_lock);
-
- if (nr == 0)
- return (unused / 100) * sysctl_vfs_cache_pressure;
- else
- return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_LRU_FLAG_SHRINK);
-}
-
-static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
- .po_recalc = ldlm_cli_pool_recalc,
- .po_shrink = ldlm_cli_pool_shrink
-};
-
-/**
- * Pool recalc wrapper. Will call either client or server pool recalc callback
- * depending what pool \a pl is used.
- */
-static int ldlm_pool_recalc(struct ldlm_pool *pl)
-{
- u32 recalc_interval_sec;
- int count;
-
- recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
- if (recalc_interval_sec > 0) {
- spin_lock(&pl->pl_lock);
- recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
-
- if (recalc_interval_sec > 0) {
- /*
- * Update pool statistics every 1s.
- */
- ldlm_pool_recalc_stats(pl);
-
- /*
- * Zero out all rates and speed for the last period.
- */
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- }
- spin_unlock(&pl->pl_lock);
- }
-
- if (pl->pl_ops->po_recalc) {
- count = pl->pl_ops->po_recalc(pl);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
- count);
- }
-
- recalc_interval_sec = pl->pl_recalc_time - ktime_get_real_seconds() +
- pl->pl_recalc_period;
- if (recalc_interval_sec <= 0) {
- /* DEBUG: should be re-removed after LU-4536 is fixed */
- CDEBUG(D_DLMTRACE,
- "%s: Negative interval(%ld), too short period(%ld)\n",
- pl->pl_name, (long)recalc_interval_sec,
- (long)pl->pl_recalc_period);
-
- /* Prevent too frequent recalculation. */
- recalc_interval_sec = 1;
- }
-
- return recalc_interval_sec;
-}
-
-/*
- * Pool shrink wrapper. Will call either client or server pool recalc callback
- * depending what pool pl is used. When nr == 0, just return the number of
- * freeable locks. Otherwise, return the number of canceled locks.
- */
-static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
-{
- int cancel = 0;
-
- if (pl->pl_ops->po_shrink) {
- cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
- if (nr > 0) {
- lprocfs_counter_add(pl->pl_stats,
- LDLM_POOL_SHRINK_REQTD_STAT,
- nr);
- lprocfs_counter_add(pl->pl_stats,
- LDLM_POOL_SHRINK_FREED_STAT,
- cancel);
- CDEBUG(D_DLMTRACE,
- "%s: request to shrink %d locks, shrunk %d\n",
- pl->pl_name, nr, cancel);
- }
- }
- return cancel;
-}
-
-static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
-{
- int granted, grant_rate, cancel_rate;
- int grant_speed, lvf;
- struct ldlm_pool *pl = m->private;
- __u64 slv, clv;
- __u32 limit;
-
- spin_lock(&pl->pl_lock);
- slv = pl->pl_server_lock_volume;
- clv = pl->pl_client_lock_volume;
- limit = atomic_read(&pl->pl_limit);
- granted = atomic_read(&pl->pl_granted);
- grant_rate = atomic_read(&pl->pl_grant_rate);
- cancel_rate = atomic_read(&pl->pl_cancel_rate);
- grant_speed = grant_rate - cancel_rate;
- lvf = atomic_read(&pl->pl_lock_volume_factor);
- spin_unlock(&pl->pl_lock);
-
- seq_printf(m, "LDLM pool state (%s):\n"
- " SLV: %llu\n"
- " CLV: %llu\n"
- " LVF: %d\n",
- pl->pl_name, slv, clv, lvf);
-
- seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n"
- " G: %d\n L: %d\n",
- grant_rate, cancel_rate, grant_speed,
- granted, limit);
-
- return 0;
-}
-
-LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
-
-static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
- pl_kobj);
-
- int grant_speed;
-
- spin_lock(&pl->pl_lock);
- /* serialize with ldlm_pool_recalc */
- grant_speed = atomic_read(&pl->pl_grant_rate) -
- atomic_read(&pl->pl_cancel_rate);
- spin_unlock(&pl->pl_lock);
- return sprintf(buf, "%d\n", grant_speed);
-}
-LUSTRE_RO_ATTR(grant_speed);
-
-LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
-LUSTRE_RO_ATTR(grant_plan);
-
-LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
-LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
-LUSTRE_RW_ATTR(recalc_period);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
-LUSTRE_RO_ATTR(server_lock_volume);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
-LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
-LUSTRE_RW_ATTR(limit);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
-LUSTRE_RO_ATTR(granted);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
-LUSTRE_RO_ATTR(cancel_rate);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
-LUSTRE_RO_ATTR(grant_rate);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic);
-LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic);
-LUSTRE_RW_ATTR(lock_volume_factor);
-
-#define LDLM_POOL_ADD_VAR(name, var, ops) \
- do { \
- snprintf(var_name, MAX_STRING_SIZE, #name); \
- pool_vars[0].data = var; \
- pool_vars[0].fops = ops; \
- ldebugfs_add_vars(pl->pl_debugfs_entry, pool_vars, NULL);\
- } while (0)
-
-/* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
-static struct attribute *ldlm_pl_attrs[] = {
- &lustre_attr_grant_speed.attr,
- &lustre_attr_grant_plan.attr,
- &lustre_attr_recalc_period.attr,
- &lustre_attr_server_lock_volume.attr,
- &lustre_attr_limit.attr,
- &lustre_attr_granted.attr,
- &lustre_attr_cancel_rate.attr,
- &lustre_attr_grant_rate.attr,
- &lustre_attr_lock_volume_factor.attr,
- NULL,
-};
-
-static void ldlm_pl_release(struct kobject *kobj)
-{
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
- pl_kobj);
- complete(&pl->pl_kobj_unregister);
-}
-
-static struct kobj_type ldlm_pl_ktype = {
- .default_attrs = ldlm_pl_attrs,
- .sysfs_ops = &lustre_sysfs_ops,
- .release = ldlm_pl_release,
-};
-
-static int ldlm_pool_sysfs_init(struct ldlm_pool *pl)
-{
- struct ldlm_namespace *ns = container_of(pl, struct ldlm_namespace,
- ns_pool);
- int err;
-
- init_completion(&pl->pl_kobj_unregister);
- err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj,
- "pool");
-
- return err;
-}
-
-static int ldlm_pool_debugfs_init(struct ldlm_pool *pl)
-{
- struct ldlm_namespace *ns = container_of(pl, struct ldlm_namespace,
- ns_pool);
- struct dentry *debugfs_ns_parent;
- struct lprocfs_vars pool_vars[2];
- char *var_name = NULL;
- int rc = 0;
-
- var_name = kzalloc(MAX_STRING_SIZE + 1, GFP_NOFS);
- if (!var_name)
- return -ENOMEM;
-
- debugfs_ns_parent = ns->ns_debugfs_entry;
- if (IS_ERR_OR_NULL(debugfs_ns_parent)) {
- CERROR("%s: debugfs entry is not initialized\n",
- ldlm_ns_name(ns));
- rc = -EINVAL;
- goto out_free_name;
- }
- pl->pl_debugfs_entry = ldebugfs_register("pool", debugfs_ns_parent,
- NULL, NULL);
- if (IS_ERR(pl->pl_debugfs_entry)) {
- CERROR("LdebugFS failed in ldlm-pool-init\n");
- rc = PTR_ERR(pl->pl_debugfs_entry);
- pl->pl_debugfs_entry = NULL;
- goto out_free_name;
- }
-
- var_name[MAX_STRING_SIZE] = '\0';
- memset(pool_vars, 0, sizeof(pool_vars));
- pool_vars[0].name = var_name;
-
- LDLM_POOL_ADD_VAR(state, pl, &lprocfs_pool_state_fops);
-
- pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
- LDLM_POOL_FIRST_STAT, 0);
- if (!pl->pl_stats) {
- rc = -ENOMEM;
- goto out_free_name;
- }
-
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "granted", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "grant", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "cancel", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "grant_rate", "locks/s");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "cancel_rate", "locks/s");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "grant_plan", "locks/s");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "slv", "slv");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "shrink_request", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "shrink_freed", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "recalc_freed", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "recalc_timing", "sec");
- rc = ldebugfs_register_stats(pl->pl_debugfs_entry, "stats",
- pl->pl_stats);
-
-out_free_name:
- kfree(var_name);
- return rc;
-}
-
-static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
-{
- kobject_put(&pl->pl_kobj);
- wait_for_completion(&pl->pl_kobj_unregister);
-}
-
-static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
-{
- if (pl->pl_stats) {
- lprocfs_free_stats(&pl->pl_stats);
- pl->pl_stats = NULL;
- }
- if (pl->pl_debugfs_entry) {
- ldebugfs_remove(&pl->pl_debugfs_entry);
- pl->pl_debugfs_entry = NULL;
- }
-}
-
-int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, enum ldlm_side client)
-{
- int rc;
-
- spin_lock_init(&pl->pl_lock);
- atomic_set(&pl->pl_granted, 0);
- pl->pl_recalc_time = ktime_get_real_seconds();
- atomic_set(&pl->pl_lock_volume_factor, 1);
-
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
-
- snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
- ldlm_ns_name(ns), idx);
-
- atomic_set(&pl->pl_limit, 1);
- pl->pl_server_lock_volume = 0;
- pl->pl_ops = &ldlm_cli_pool_ops;
- pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
- pl->pl_client_lock_volume = 0;
- rc = ldlm_pool_debugfs_init(pl);
- if (rc)
- return rc;
-
- rc = ldlm_pool_sysfs_init(pl);
- if (rc)
- return rc;
-
- CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
-
- return rc;
-}
-
-void ldlm_pool_fini(struct ldlm_pool *pl)
-{
- ldlm_pool_sysfs_fini(pl);
- ldlm_pool_debugfs_fini(pl);
-
- /*
- * Pool should not be used after this point. We can't free it here as
- * it lives in struct ldlm_namespace, but still interested in catching
- * any abnormal using cases.
- */
- POISON(pl, 0x5a, sizeof(*pl));
-}
-
-/**
- * Add new taken ldlm lock \a lock into pool \a pl accounting.
- */
-void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
-{
- /*
- * FLOCK locks are special in a sense that they are almost never
- * cancelled, instead special kind of lock is used to drop them.
- * also there is no LRU for flock locks, so no point in tracking
- * them anyway.
- */
- if (lock->l_resource->lr_type == LDLM_FLOCK)
- return;
-
- atomic_inc(&pl->pl_granted);
- atomic_inc(&pl->pl_grant_rate);
- lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
- /*
- * Do not do pool recalc for client side as all locks which
- * potentially may be canceled has already been packed into
- * enqueue/cancel rpc. Also we do not want to run out of stack
- * with too long call paths.
- */
-}
-
-/**
- * Remove ldlm lock \a lock from pool \a pl accounting.
- */
-void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
-{
- /*
- * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
- */
- if (lock->l_resource->lr_type == LDLM_FLOCK)
- return;
-
- LASSERT(atomic_read(&pl->pl_granted) > 0);
- atomic_dec(&pl->pl_granted);
- atomic_inc(&pl->pl_cancel_rate);
-
- lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
-}
-
-/**
- * Returns current \a pl SLV.
- *
- * \pre ->pl_lock is not locked.
- */
-__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
-{
- __u64 slv;
-
- spin_lock(&pl->pl_lock);
- slv = pl->pl_server_lock_volume;
- spin_unlock(&pl->pl_lock);
- return slv;
-}
-
-/**
- * Sets passed \a clv to \a pl.
- *
- * \pre ->pl_lock is not locked.
- */
-void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
-{
- spin_lock(&pl->pl_lock);
- pl->pl_client_lock_volume = clv;
- spin_unlock(&pl->pl_lock);
-}
-
-/**
- * Returns current LVF from \a pl.
- */
-__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
-{
- return atomic_read(&pl->pl_lock_volume_factor);
-}
-
-static int ldlm_pool_granted(struct ldlm_pool *pl)
-{
- return atomic_read(&pl->pl_granted);
-}
-
-/*
- * count locks from all namespaces (if possible). Returns number of
- * cached locks.
- */
-static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask)
-{
- unsigned long total = 0;
- int nr_ns;
- struct ldlm_namespace *ns;
- struct ldlm_namespace *ns_old = NULL; /* loop detection */
-
- if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
- return 0;
-
- CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
- client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
-
- /*
- * Find out how many resources we may release.
- */
- for (nr_ns = ldlm_namespace_nr_read(client);
- nr_ns > 0; nr_ns--) {
- mutex_lock(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_unlock(ldlm_namespace_lock(client));
- return 0;
- }
- ns = ldlm_namespace_first_locked(client);
-
- if (ns == ns_old) {
- mutex_unlock(ldlm_namespace_lock(client));
- break;
- }
-
- if (ldlm_ns_empty(ns)) {
- ldlm_namespace_move_to_inactive_locked(ns, client);
- mutex_unlock(ldlm_namespace_lock(client));
- continue;
- }
-
- if (!ns_old)
- ns_old = ns;
-
- ldlm_namespace_get(ns);
- ldlm_namespace_move_to_active_locked(ns, client);
- mutex_unlock(ldlm_namespace_lock(client));
- total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
- ldlm_namespace_put(ns);
- }
-
- return total;
-}
-
-static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr,
- gfp_t gfp_mask)
-{
- unsigned long freed = 0;
- int tmp, nr_ns;
- struct ldlm_namespace *ns;
-
- if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
- return -1;
-
- /*
- * Shrink at least ldlm_namespace_nr_read(client) namespaces.
- */
- for (tmp = nr_ns = ldlm_namespace_nr_read(client);
- tmp > 0; tmp--) {
- int cancel, nr_locks;
-
- /*
- * Do not call shrink under ldlm_namespace_lock(client)
- */
- mutex_lock(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_unlock(ldlm_namespace_lock(client));
- break;
- }
- ns = ldlm_namespace_first_locked(client);
- ldlm_namespace_get(ns);
- ldlm_namespace_move_to_active_locked(ns, client);
- mutex_unlock(ldlm_namespace_lock(client));
-
- nr_locks = ldlm_pool_granted(&ns->ns_pool);
- /*
- * We use to shrink propotionally but with new shrinker API,
- * we lost the total number of freeable locks.
- */
- cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
- freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
- ldlm_namespace_put(ns);
- }
- /*
- * we only decrease the SLV in server pools shrinker, return
- * SHRINK_STOP to kernel to avoid needless loop. LU-1128
- */
- return freed;
-}
-
-static unsigned long ldlm_pools_cli_count(struct shrinker *s,
- struct shrink_control *sc)
-{
- return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
-}
-
-static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
- struct shrink_control *sc)
-{
- return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
- sc->gfp_mask);
-}
-
-static void ldlm_pools_recalc(struct work_struct *ws);
-static DECLARE_DELAYED_WORK(ldlm_recalc_pools, ldlm_pools_recalc);
-
-static void ldlm_pools_recalc(struct work_struct *ws)
-{
- enum ldlm_side client = LDLM_NAMESPACE_CLIENT;
- struct ldlm_namespace *ns;
- struct ldlm_namespace *ns_old = NULL;
- /* seconds of sleep if no active namespaces */
- int time = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
- int nr;
-
- /*
- * Recalc at least ldlm_namespace_nr_read(client) namespaces.
- */
- for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
- int skip;
- /*
- * Lock the list, get first @ns in the list, getref, move it
- * to the tail, unlock and call pool recalc. This way we avoid
- * calling recalc under @ns lock what is really good as we get
- * rid of potential deadlock on client nodes when canceling
- * locks synchronously.
- */
- mutex_lock(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_unlock(ldlm_namespace_lock(client));
- break;
- }
- ns = ldlm_namespace_first_locked(client);
-
- if (ns_old == ns) { /* Full pass complete */
- mutex_unlock(ldlm_namespace_lock(client));
- break;
- }
-
- /* We got an empty namespace, need to move it back to inactive
- * list.
- * The race with parallel resource creation is fine:
- * - If they do namespace_get before our check, we fail the
- * check and they move this item to the end of the list anyway
- * - If we do the check and then they do namespace_get, then
- * we move the namespace to inactive and they will move
- * it back to active (synchronised by the lock, so no clash
- * there).
- */
- if (ldlm_ns_empty(ns)) {
- ldlm_namespace_move_to_inactive_locked(ns, client);
- mutex_unlock(ldlm_namespace_lock(client));
- continue;
- }
-
- if (!ns_old)
- ns_old = ns;
-
- spin_lock(&ns->ns_lock);
- /*
- * skip ns which is being freed, and we don't want to increase
- * its refcount again, not even temporarily. bz21519 & LU-499.
- */
- if (ns->ns_stopping) {
- skip = 1;
- } else {
- skip = 0;
- ldlm_namespace_get(ns);
- }
- spin_unlock(&ns->ns_lock);
-
- ldlm_namespace_move_to_active_locked(ns, client);
- mutex_unlock(ldlm_namespace_lock(client));
-
- /*
- * After setup is done - recalc the pool.
- */
- if (!skip) {
- int ttime = ldlm_pool_recalc(&ns->ns_pool);
-
- if (ttime < time)
- time = ttime;
-
- ldlm_namespace_put(ns);
- }
- }
-
- /* Wake up the blocking threads from time to time. */
- ldlm_bl_thread_wakeup();
-
- schedule_delayed_work(&ldlm_recalc_pools, time * HZ);
-}
-
-static int ldlm_pools_thread_start(void)
-{
- schedule_delayed_work(&ldlm_recalc_pools, 0);
-
- return 0;
-}
-
-static void ldlm_pools_thread_stop(void)
-{
- cancel_delayed_work_sync(&ldlm_recalc_pools);
-}
-
-static struct shrinker ldlm_pools_cli_shrinker = {
- .count_objects = ldlm_pools_cli_count,
- .scan_objects = ldlm_pools_cli_scan,
- .seeks = DEFAULT_SEEKS,
-};
-
-int ldlm_pools_init(void)
-{
- int rc;
-
- rc = ldlm_pools_thread_start();
- if (!rc)
- rc = register_shrinker(&ldlm_pools_cli_shrinker);
-
- return rc;
-}
-
-void ldlm_pools_fini(void)
-{
- unregister_shrinker(&ldlm_pools_cli_shrinker);
-
- ldlm_pools_thread_stop();
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
deleted file mode 100644
index c3c9186b74ce..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ /dev/null
@@ -1,2080 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/**
- * This file contains Asynchronous System Trap (AST) handlers and related
- * LDLM request-processing routines.
- *
- * An AST is a callback issued on a lock when its state is changed. There are
- * several different types of ASTs (callbacks) registered for each lock:
- *
- * - completion AST: when a lock is enqueued by some process, but cannot be
- * granted immediately due to other conflicting locks on the same resource,
- * the completion AST is sent to notify the caller when the lock is
- * eventually granted
- *
- * - blocking AST: when a lock is granted to some process, if another process
- * enqueues a conflicting (blocking) lock on a resource, a blocking AST is
- * sent to notify the holder(s) of the lock(s) of the conflicting lock
- * request. The lock holder(s) must release their lock(s) on that resource in
- * a timely manner or be evicted by the server.
- *
- * - glimpse AST: this is used when a process wants information about a lock
- * (i.e. the lock value block (LVB)) but does not necessarily require holding
- * the lock. If the resource is locked, the lock holder(s) are sent glimpse
- * ASTs and the LVB is returned to the caller, and lock holder(s) may CANCEL
- * their lock(s) if they are idle. If the resource is not locked, the server
- * may grant the lock.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include <lustre_errno.h>
-#include <lustre_dlm.h>
-#include <obd_class.h>
-#include <obd.h>
-
-#include "ldlm_internal.h"
-
-unsigned int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
-module_param(ldlm_enqueue_min, uint, 0644);
-MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
-
-/* in client side, whether the cached locks will be canceled before replay */
-unsigned int ldlm_cancel_unused_locks_before_replay = 1;
-
-struct ldlm_async_args {
- struct lustre_handle lock_handle;
-};
-
-/**
- * ldlm_request_bufsize
- *
- * @count: number of ldlm handles
- * @type: ldlm opcode
- *
- * If opcode=LDLM_ENQUEUE, 1 slot is already occupied,
- * LDLM_LOCKREQ_HANDLE -1 slots are available.
- * Otherwise, LDLM_LOCKREQ_HANDLE slots are available.
- *
- * Return: size of the request buffer
- */
-static int ldlm_request_bufsize(int count, int type)
-{
- int avail = LDLM_LOCKREQ_HANDLES;
-
- if (type == LDLM_ENQUEUE)
- avail -= LDLM_ENQUEUE_CANCEL_OFF;
-
- if (count > avail)
- avail = (count - avail) * sizeof(struct lustre_handle);
- else
- avail = 0;
-
- return sizeof(struct ldlm_request) + avail;
-}
-
-static void ldlm_expired_completion_wait(struct ldlm_lock *lock, __u32 conn_cnt)
-{
- struct obd_import *imp;
- struct obd_device *obd;
-
- if (!lock->l_conn_export) {
- static unsigned long next_dump, last_dump;
-
- LDLM_ERROR(lock,
- "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
- (s64)lock->l_last_activity,
- (s64)(ktime_get_real_seconds() -
- lock->l_last_activity));
- if (cfs_time_after(cfs_time_current(), next_dump)) {
- last_dump = next_dump;
- next_dump = cfs_time_shift(300);
- ldlm_namespace_dump(D_DLMTRACE,
- ldlm_lock_to_ns(lock));
- if (last_dump == 0)
- libcfs_debug_dumplog();
- }
- return;
- }
-
- obd = lock->l_conn_export->exp_obd;
- imp = obd->u.cli.cl_import;
- ptlrpc_fail_import(imp, conn_cnt);
- LDLM_ERROR(lock,
- "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s",
- (s64)lock->l_last_activity,
- (s64)(ktime_get_real_seconds() - lock->l_last_activity),
- obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
-}
-
-/**
- * Calculate the Completion timeout (covering enqueue, BL AST, data flush,
- * lock cancel, and their replies). Used for lock completion timeout on the
- * client side.
- *
- * \param[in] lock lock which is waiting the completion callback
- *
- * \retval timeout in seconds to wait for the server reply
- */
-/* We use the same basis for both server side and client side functions
- * from a single node.
- */
-static unsigned int ldlm_cp_timeout(struct ldlm_lock *lock)
-{
- unsigned int timeout;
-
- if (AT_OFF)
- return obd_timeout;
-
- /*
- * Wait a long time for enqueue - server may have to callback a
- * lock from another client. Server will evict the other client if it
- * doesn't respond reasonably, and then give us the lock.
- */
- timeout = at_get(ldlm_lock_to_ns_at(lock));
- return max(3 * timeout, ldlm_enqueue_min);
-}
-
-/**
- * Helper function for ldlm_completion_ast(), updating timings when lock is
- * actually granted.
- */
-static int ldlm_completion_tail(struct ldlm_lock *lock, void *data)
-{
- long delay;
- int result = 0;
-
- if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
- LDLM_DEBUG(lock, "client-side enqueue: destroyed");
- result = -EIO;
- } else if (!data) {
- LDLM_DEBUG(lock, "client-side enqueue: granted");
- } else {
- /* Take into AT only CP RPC, not immediately granted locks */
- delay = ktime_get_real_seconds() - lock->l_last_activity;
- LDLM_DEBUG(lock, "client-side enqueue: granted after %lds",
- delay);
-
- /* Update our time estimate */
- at_measured(ldlm_lock_to_ns_at(lock), delay);
- }
- return result;
-}
-
-/**
- * Implementation of ->l_completion_ast() for a client, that doesn't wait
- * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
- * other threads that cannot block for long.
- */
-int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
-{
- if (flags == LDLM_FL_WAIT_NOREPROC) {
- LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
- return 0;
- }
-
- if (!(flags & LDLM_FL_BLOCKED_MASK)) {
- wake_up(&lock->l_waitq);
- return ldlm_completion_tail(lock, data);
- }
-
- LDLM_DEBUG(lock,
- "client-side enqueue returned a blocked lock, going forward");
- return 0;
-}
-EXPORT_SYMBOL(ldlm_completion_ast_async);
-
-/**
- * Generic LDLM "completion" AST. This is called in several cases:
- *
- * - when a reply to an ENQUEUE RPC is received from the server
- * (ldlm_cli_enqueue_fini()). Lock might be granted or not granted at
- * this point (determined by flags);
- *
- * - when LDLM_CP_CALLBACK RPC comes to client to notify it that lock has
- * been granted;
- *
- * - when ldlm_lock_match(LDLM_FL_LVB_READY) is about to wait until lock
- * gets correct lvb;
- *
- * - to force all locks when resource is destroyed (cleanup_resource());
- *
- * - during lock conversion (not used currently).
- *
- * If lock is not granted in the first case, this function waits until second
- * or penultimate cases happen in some other thread.
- *
- */
-int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
-{
- /* XXX ALLOCATE - 160 bytes */
- struct obd_device *obd;
- struct obd_import *imp = NULL;
- __u32 timeout;
- __u32 conn_cnt = 0;
- int rc = 0;
-
- if (flags == LDLM_FL_WAIT_NOREPROC) {
- LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
- goto noreproc;
- }
-
- if (!(flags & LDLM_FL_BLOCKED_MASK)) {
- wake_up(&lock->l_waitq);
- return 0;
- }
-
- LDLM_DEBUG(lock,
- "client-side enqueue returned a blocked lock, sleeping");
-
-noreproc:
-
- obd = class_exp2obd(lock->l_conn_export);
-
- /* if this is a local lock, then there is no import */
- if (obd)
- imp = obd->u.cli.cl_import;
-
- timeout = ldlm_cp_timeout(lock);
-
- lock->l_last_activity = ktime_get_real_seconds();
-
- if (imp) {
- spin_lock(&imp->imp_lock);
- conn_cnt = imp->imp_conn_cnt;
- spin_unlock(&imp->imp_lock);
- }
- if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
- OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
- ldlm_set_fail_loc(lock);
- rc = -EINTR;
- } else {
- /* Go to sleep until the lock is granted or canceled. */
- if (!ldlm_is_no_timeout(lock)) {
- /* Wait uninterruptible for a while first */
- rc = wait_event_idle_timeout(lock->l_waitq,
- is_granted_or_cancelled(lock),
- timeout * HZ);
- if (rc == 0)
- ldlm_expired_completion_wait(lock, conn_cnt);
- }
- /* Now wait abortable */
- if (rc == 0)
- rc = l_wait_event_abortable(lock->l_waitq,
- is_granted_or_cancelled(lock));
- else
- rc = 0;
- }
-
- if (rc) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
- rc);
- return rc;
- }
-
- return ldlm_completion_tail(lock, data);
-}
-EXPORT_SYMBOL(ldlm_completion_ast);
-
-static void failed_lock_cleanup(struct ldlm_namespace *ns,
- struct ldlm_lock *lock, int mode)
-{
- int need_cancel = 0;
-
- /* Set a flag to prevent us from sending a CANCEL (bug 407) */
- lock_res_and_lock(lock);
- /* Check that lock is not granted or failed, we might race. */
- if ((lock->l_req_mode != lock->l_granted_mode) &&
- !ldlm_is_failed(lock)) {
- /* Make sure that this lock will not be found by raced
- * bl_ast and -EINVAL reply is sent to server anyways.
- * bug 17645
- */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
- LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
- need_cancel = 1;
- }
- unlock_res_and_lock(lock);
-
- if (need_cancel)
- LDLM_DEBUG(lock,
- "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
- else
- LDLM_DEBUG(lock, "lock was granted or failed in race");
-
- /* XXX - HACK because we shouldn't call ldlm_lock_destroy()
- * from llite/file.c/ll_file_flock().
- */
- /* This code makes for the fact that we do not have blocking handler on
- * a client for flock locks. As such this is the place where we must
- * completely kill failed locks. (interrupted and those that
- * were waiting to be granted when server evicted us.
- */
- if (lock->l_resource->lr_type == LDLM_FLOCK) {
- lock_res_and_lock(lock);
- if (!ldlm_is_destroyed(lock)) {
- ldlm_resource_unlink_lock(lock);
- ldlm_lock_decref_internal_nolock(lock, mode);
- ldlm_lock_destroy_nolock(lock);
- }
- unlock_res_and_lock(lock);
- } else {
- ldlm_lock_decref_internal(lock, mode);
- }
-}
-
-/**
- * Finishing portion of client lock enqueue code.
- *
- * Called after receiving reply from server.
- */
-int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
- enum ldlm_type type, __u8 with_policy,
- enum ldlm_mode mode,
- __u64 *flags, void *lvb, __u32 lvb_len,
- const struct lustre_handle *lockh, int rc)
-{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- int is_replay = *flags & LDLM_FL_REPLAY;
- struct ldlm_lock *lock;
- struct ldlm_reply *reply;
- int cleanup_phase = 1;
-
- lock = ldlm_handle2lock(lockh);
- /* ldlm_cli_enqueue is holding a reference on this lock. */
- if (!lock) {
- LASSERT(type == LDLM_FLOCK);
- return -ENOLCK;
- }
-
- LASSERTF(ergo(lvb_len != 0, lvb_len == lock->l_lvb_len),
- "lvb_len = %d, l_lvb_len = %d\n", lvb_len, lock->l_lvb_len);
-
- if (rc != ELDLM_OK) {
- LASSERT(!is_replay);
- LDLM_DEBUG(lock, "client-side enqueue END (%s)",
- rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
-
- if (rc != ELDLM_LOCK_ABORTED)
- goto cleanup;
- }
-
- /* Before we return, swab the reply */
- reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- if (!reply) {
- rc = -EPROTO;
- goto cleanup;
- }
-
- if (lvb_len > 0) {
- int size = 0;
-
- size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
- RCL_SERVER);
- if (size < 0) {
- LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", size);
- rc = size;
- goto cleanup;
- } else if (unlikely(size > lvb_len)) {
- LDLM_ERROR(lock,
- "Replied LVB is larger than expectation, expected = %d, replied = %d",
- lvb_len, size);
- rc = -EINVAL;
- goto cleanup;
- }
- lvb_len = size;
- }
-
- if (rc == ELDLM_LOCK_ABORTED) {
- if (lvb_len > 0 && lvb)
- rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lvb, lvb_len);
- if (rc == 0)
- rc = ELDLM_LOCK_ABORTED;
- goto cleanup;
- }
-
- /* lock enqueued on the server */
- cleanup_phase = 0;
-
- lock_res_and_lock(lock);
- /* Key change rehash lock in per-export hash with new key */
- if (exp->exp_lock_hash) {
- /* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp()
- */
- /* coverity[overrun-buffer-val] */
- cfs_hash_rehash_key(exp->exp_lock_hash,
- &lock->l_remote_handle,
- &reply->lock_handle,
- &lock->l_exp_hash);
- } else {
- lock->l_remote_handle = reply->lock_handle;
- }
-
- *flags = ldlm_flags_from_wire(reply->lock_flags);
- lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_FL_INHERIT_MASK);
- unlock_res_and_lock(lock);
-
- CDEBUG(D_INFO, "local: %p, remote cookie: %#llx, flags: 0x%llx\n",
- lock, reply->lock_handle.cookie, *flags);
-
- /* If enqueue returned a blocked lock but the completion handler has
- * already run, then it fixed up the resource and we don't need to do it
- * again.
- */
- if ((*flags) & LDLM_FL_LOCK_CHANGED) {
- int newmode = reply->lock_desc.l_req_mode;
-
- LASSERT(!is_replay);
- if (newmode && newmode != lock->l_req_mode) {
- LDLM_DEBUG(lock, "server returned different mode %s",
- ldlm_lockname[newmode]);
- lock->l_req_mode = newmode;
- }
-
- if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
- &lock->l_resource->lr_name)) {
- CDEBUG(D_INFO,
- "remote intent success, locking " DLDLMRES " instead of " DLDLMRES "\n",
- PLDLMRES(&reply->lock_desc.l_resource),
- PLDLMRES(lock->l_resource));
-
- rc = ldlm_lock_change_resource(ns, lock,
- &reply->lock_desc.l_resource.lr_name);
- if (rc || !lock->l_resource) {
- rc = -ENOMEM;
- goto cleanup;
- }
- LDLM_DEBUG(lock, "client-side enqueue, new resource");
- }
- if (with_policy)
- if (!(type == LDLM_IBITS &&
- !(exp_connect_flags(exp) & OBD_CONNECT_IBITS)))
- /* We assume lock type cannot change on server*/
- ldlm_convert_policy_to_local(exp,
- lock->l_resource->lr_type,
- &reply->lock_desc.l_policy_data,
- &lock->l_policy_data);
- if (type != LDLM_PLAIN)
- LDLM_DEBUG(lock,
- "client-side enqueue, new policy data");
- }
-
- if ((*flags) & LDLM_FL_AST_SENT) {
- lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
- unlock_res_and_lock(lock);
- LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
- }
-
- /* If the lock has already been granted by a completion AST, don't
- * clobber the LVB with an older one.
- */
- if (lvb_len > 0) {
- /* We must lock or a racing completion might update lvb without
- * letting us know and we'll clobber the correct value.
- * Cannot unlock after the check either, as that still leaves
- * a tiny window for completion to get in
- */
- lock_res_and_lock(lock);
- if (lock->l_req_mode != lock->l_granted_mode)
- rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lock->l_lvb_data, lvb_len);
- unlock_res_and_lock(lock);
- if (rc < 0) {
- cleanup_phase = 1;
- goto cleanup;
- }
- }
-
- if (!is_replay) {
- rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
- if (lock->l_completion_ast) {
- int err = lock->l_completion_ast(lock, *flags, NULL);
-
- if (!rc)
- rc = err;
- if (rc)
- cleanup_phase = 1;
- }
- }
-
- if (lvb_len > 0 && lvb) {
- /* Copy the LVB here, and not earlier, because the completion
- * AST (if any) can override what we got in the reply
- */
- memcpy(lvb, lock->l_lvb_data, lvb_len);
- }
-
- LDLM_DEBUG(lock, "client-side enqueue END");
-cleanup:
- if (cleanup_phase == 1 && rc)
- failed_lock_cleanup(ns, lock, mode);
- /* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
- LDLM_LOCK_PUT(lock);
- LDLM_LOCK_RELEASE(lock);
- return rc;
-}
-EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
-
-/**
- * Estimate number of lock handles that would fit into request of given
- * size. PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into
- * a single page on the send/receive side. XXX: 512 should be changed to
- * more adequate value.
- */
-static inline int ldlm_req_handles_avail(int req_size, int off)
-{
- int avail;
-
- avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
- if (likely(avail >= 0))
- avail /= (int)sizeof(struct lustre_handle);
- else
- avail = 0;
- avail += LDLM_LOCKREQ_HANDLES - off;
-
- return avail;
-}
-
-static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
- enum req_location loc,
- int off)
-{
- u32 size = req_capsule_msg_size(pill, loc);
-
- return ldlm_req_handles_avail(size, off);
-}
-
-static inline int ldlm_format_handles_avail(struct obd_import *imp,
- const struct req_format *fmt,
- enum req_location loc, int off)
-{
- u32 size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
-
- return ldlm_req_handles_avail(size, off);
-}
-
-/**
- * Cancel LRU locks and pack them into the enqueue request. Pack there the given
- * \a count locks in \a cancels.
- *
- * This is to be called by functions preparing their own requests that
- * might contain lists of locks to cancel in addition to actual operation
- * that needs to be performed.
- */
-int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
- int version, int opc, int canceloff,
- struct list_head *cancels, int count)
-{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- struct req_capsule *pill = &req->rq_pill;
- struct ldlm_request *dlm = NULL;
- int flags, avail, to_free, pack = 0;
- LIST_HEAD(head);
- int rc;
-
- if (!cancels)
- cancels = &head;
- if (ns_connect_cancelset(ns)) {
- /* Estimate the amount of available space in the request. */
- req_capsule_filled_sizes(pill, RCL_CLIENT);
- avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
-
- flags = ns_connect_lru_resize(ns) ?
- LDLM_LRU_FLAG_LRUR_NO_WAIT : LDLM_LRU_FLAG_AGED;
- to_free = !ns_connect_lru_resize(ns) &&
- opc == LDLM_ENQUEUE ? 1 : 0;
-
- /* Cancel LRU locks here _only_ if the server supports
- * EARLY_CANCEL. Otherwise we have to send extra CANCEL
- * RPC, which will make us slower.
- */
- if (avail > count)
- count += ldlm_cancel_lru_local(ns, cancels, to_free,
- avail - count, 0, flags);
- if (avail > count)
- pack = count;
- else
- pack = avail;
- req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT,
- ldlm_request_bufsize(pack, opc));
- }
-
- rc = ptlrpc_request_pack(req, version, opc);
- if (rc) {
- ldlm_lock_list_put(cancels, l_bl_ast, count);
- return rc;
- }
-
- if (ns_connect_cancelset(ns)) {
- if (canceloff) {
- dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
- LASSERT(dlm);
- /* Skip first lock handler in ldlm_request_pack(),
- * this method will increment @lock_count according
- * to the lock handle amount actually written to
- * the buffer.
- */
- dlm->lock_count = canceloff;
- }
- /* Pack into the request @pack lock handles. */
- ldlm_cli_cancel_list(cancels, pack, req, 0);
- /* Prepare and send separate cancel RPC for others. */
- ldlm_cli_cancel_list(cancels, count - pack, NULL, 0);
- } else {
- ldlm_lock_list_put(cancels, l_bl_ast, count);
- }
- return 0;
-}
-EXPORT_SYMBOL(ldlm_prep_elc_req);
-
-int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
- struct list_head *cancels, int count)
-{
- return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
- LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
-}
-EXPORT_SYMBOL(ldlm_prep_enqueue_req);
-
-static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp,
- int lvb_len)
-{
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
- if (!req)
- return ERR_PTR(-ENOMEM);
-
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
- ptlrpc_request_set_replen(req);
- return req;
-}
-
-/**
- * Client-side lock enqueue.
- *
- * If a request has some specific initialisation it is passed in \a reqp,
- * otherwise it is created in ldlm_cli_enqueue.
- *
- * Supports sync and async requests, pass \a async flag accordingly. If a
- * request was created in ldlm_cli_enqueue and it is the async request,
- * pass it to the caller in \a reqp.
- */
-int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
- struct ldlm_enqueue_info *einfo,
- const struct ldlm_res_id *res_id,
- union ldlm_policy_data const *policy, __u64 *flags,
- void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
- struct lustre_handle *lockh, int async)
-{
- struct ldlm_namespace *ns;
- struct ldlm_lock *lock;
- struct ldlm_request *body;
- int is_replay = *flags & LDLM_FL_REPLAY;
- int req_passed_in = 1;
- int rc, err;
- struct ptlrpc_request *req;
-
- ns = exp->exp_obd->obd_namespace;
-
- /* If we're replaying this lock, just check some invariants.
- * If we're creating a new lock, get everything all setup nicely.
- */
- if (is_replay) {
- lock = ldlm_handle2lock_long(lockh, 0);
- LASSERT(lock);
- LDLM_DEBUG(lock, "client-side enqueue START");
- LASSERT(exp == lock->l_conn_export);
- } else {
- const struct ldlm_callback_suite cbs = {
- .lcs_completion = einfo->ei_cb_cp,
- .lcs_blocking = einfo->ei_cb_bl,
- .lcs_glimpse = einfo->ei_cb_gl
- };
- lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
- einfo->ei_mode, &cbs, einfo->ei_cbdata,
- lvb_len, lvb_type);
- if (IS_ERR(lock))
- return PTR_ERR(lock);
- /* for the local lock, add the reference */
- ldlm_lock_addref_internal(lock, einfo->ei_mode);
- ldlm_lock2handle(lock, lockh);
- if (policy)
- lock->l_policy_data = *policy;
-
- if (einfo->ei_type == LDLM_EXTENT) {
- /* extent lock without policy is a bug */
- if (!policy)
- LBUG();
-
- lock->l_req_extent = policy->l_extent;
- }
- LDLM_DEBUG(lock, "client-side enqueue START, flags %llx",
- *flags);
- }
-
- lock->l_conn_export = exp;
- lock->l_export = NULL;
- lock->l_blocking_ast = einfo->ei_cb_bl;
- lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL));
- lock->l_last_activity = ktime_get_real_seconds();
-
- /* lock not sent to server yet */
- if (!reqp || !*reqp) {
- req = ldlm_enqueue_pack(exp, lvb_len);
- if (IS_ERR(req)) {
- failed_lock_cleanup(ns, lock, einfo->ei_mode);
- LDLM_LOCK_RELEASE(lock);
- return PTR_ERR(req);
- }
-
- req_passed_in = 0;
- if (reqp)
- *reqp = req;
- } else {
- int len;
-
- req = *reqp;
- len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ,
- RCL_CLIENT);
- LASSERTF(len >= sizeof(*body), "buflen[%d] = %d, not %d\n",
- DLM_LOCKREQ_OFF, len, (int)sizeof(*body));
- }
-
- /* Dump lock data into the request buffer */
- body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- ldlm_lock2desc(lock, &body->lock_desc);
- body->lock_flags = ldlm_flags_to_wire(*flags);
- body->lock_handle[0] = *lockh;
-
- if (async) {
- LASSERT(reqp);
- return 0;
- }
-
- LDLM_DEBUG(lock, "sending request");
-
- rc = ptlrpc_queue_wait(req);
-
- err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
- einfo->ei_mode, flags, lvb, lvb_len,
- lockh, rc);
-
- /* If ldlm_cli_enqueue_fini did not find the lock, we need to free
- * one reference that we took
- */
- if (err == -ENOLCK)
- LDLM_LOCK_RELEASE(lock);
- else
- rc = err;
-
- if (!req_passed_in && req) {
- ptlrpc_req_finished(req);
- if (reqp)
- *reqp = NULL;
- }
-
- return rc;
-}
-EXPORT_SYMBOL(ldlm_cli_enqueue);
-
-/**
- * Cancel locks locally.
- * Returns:
- * \retval LDLM_FL_LOCAL_ONLY if there is no need for a CANCEL RPC to the server
- * \retval LDLM_FL_CANCELING otherwise;
- * \retval LDLM_FL_BL_AST if there is a need for a separate CANCEL RPC.
- */
-static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
-{
- __u64 rc = LDLM_FL_LOCAL_ONLY;
-
- if (lock->l_conn_export) {
- bool local_only;
-
- LDLM_DEBUG(lock, "client-side cancel");
- /* Set this flag to prevent others from getting new references*/
- lock_res_and_lock(lock);
- ldlm_set_cbpending(lock);
- local_only = !!(lock->l_flags &
- (LDLM_FL_LOCAL_ONLY | LDLM_FL_CANCEL_ON_BLOCK));
- ldlm_cancel_callback(lock);
- rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING;
- unlock_res_and_lock(lock);
-
- if (local_only) {
- CDEBUG(D_DLMTRACE,
- "not sending request (at caller's instruction)\n");
- rc = LDLM_FL_LOCAL_ONLY;
- }
- ldlm_lock_cancel(lock);
- } else {
- LDLM_ERROR(lock, "Trying to cancel local lock");
- LBUG();
- }
-
- return rc;
-}
-
-/**
- * Pack \a count locks in \a head into ldlm_request buffer of request \a req.
- */
-static void ldlm_cancel_pack(struct ptlrpc_request *req,
- struct list_head *head, int count)
-{
- struct ldlm_request *dlm;
- struct ldlm_lock *lock;
- int max, packed = 0;
-
- dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- LASSERT(dlm);
-
- /* Check the room in the request buffer. */
- max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
- sizeof(struct ldlm_request);
- max /= sizeof(struct lustre_handle);
- max += LDLM_LOCKREQ_HANDLES;
- LASSERT(max >= dlm->lock_count + count);
-
- /* XXX: it would be better to pack lock handles grouped by resource.
- * so that the server cancel would call filter_lvbo_update() less
- * frequently.
- */
- list_for_each_entry(lock, head, l_bl_ast) {
- if (!count--)
- break;
- LASSERT(lock->l_conn_export);
- /* Pack the lock handle to the given request buffer. */
- LDLM_DEBUG(lock, "packing");
- dlm->lock_handle[dlm->lock_count++] = lock->l_remote_handle;
- packed++;
- }
- CDEBUG(D_DLMTRACE, "%d locks packed\n", packed);
-}
-
-/**
- * Prepare and send a batched cancel RPC. It will include \a count lock
- * handles of locks given in \a cancels list.
- */
-static int ldlm_cli_cancel_req(struct obd_export *exp,
- struct list_head *cancels,
- int count, enum ldlm_cancel_flags flags)
-{
- struct ptlrpc_request *req = NULL;
- struct obd_import *imp;
- int free, sent = 0;
- int rc = 0;
-
- LASSERT(exp);
- LASSERT(count > 0);
-
- CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
-
- if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
- return count;
-
- free = ldlm_format_handles_avail(class_exp2cliimp(exp),
- &RQF_LDLM_CANCEL, RCL_CLIENT, 0);
- if (count > free)
- count = free;
-
- while (1) {
- imp = class_exp2cliimp(exp);
- if (!imp || imp->imp_invalid) {
- CDEBUG(D_DLMTRACE,
- "skipping cancel on invalid import %p\n", imp);
- return count;
- }
-
- req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
- if (!req) {
- rc = -ENOMEM;
- goto out;
- }
-
- req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT);
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT,
- ldlm_request_bufsize(count, LDLM_CANCEL));
-
- rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CANCEL);
- if (rc) {
- ptlrpc_request_free(req);
- goto out;
- }
-
- req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
- req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- ldlm_cancel_pack(req, cancels, count);
-
- ptlrpc_request_set_replen(req);
- if (flags & LCF_ASYNC) {
- ptlrpcd_add_req(req);
- sent = count;
- goto out;
- }
-
- rc = ptlrpc_queue_wait(req);
- if (rc == LUSTRE_ESTALE) {
- CDEBUG(D_DLMTRACE,
- "client/server (nid %s) out of sync -- not fatal\n",
- libcfs_nid2str(req->rq_import->
- imp_connection->c_peer.nid));
- rc = 0;
- } else if (rc == -ETIMEDOUT && /* check there was no reconnect*/
- req->rq_import_generation == imp->imp_generation) {
- ptlrpc_req_finished(req);
- continue;
- } else if (rc != ELDLM_OK) {
- /* -ESHUTDOWN is common on umount */
- CDEBUG_LIMIT(rc == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
- "Got rc %d from cancel RPC: canceling anyway\n",
- rc);
- break;
- }
- sent = count;
- break;
- }
-
- ptlrpc_req_finished(req);
-out:
- return sent ? sent : rc;
-}
-
-static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
-{
- return &imp->imp_obd->obd_namespace->ns_pool;
-}
-
-/**
- * Update client's OBD pool related fields with new SLV and Limit from \a req.
- */
-int ldlm_cli_update_pool(struct ptlrpc_request *req)
-{
- struct obd_device *obd;
- __u64 new_slv;
- __u32 new_limit;
-
- if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
- !imp_connect_lru_resize(req->rq_import))) {
- /*
- * Do nothing for corner cases.
- */
- return 0;
- }
-
- /* In some cases RPC may contain SLV and limit zeroed out. This
- * is the case when server does not support LRU resize feature.
- * This is also possible in some recovery cases when server-side
- * reqs have no reference to the OBD export and thus access to
- * server-side namespace is not possible.
- */
- if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
- lustre_msg_get_limit(req->rq_repmsg) == 0) {
- DEBUG_REQ(D_HA, req,
- "Zero SLV or Limit found (SLV: %llu, Limit: %u)",
- lustre_msg_get_slv(req->rq_repmsg),
- lustre_msg_get_limit(req->rq_repmsg));
- return 0;
- }
-
- new_limit = lustre_msg_get_limit(req->rq_repmsg);
- new_slv = lustre_msg_get_slv(req->rq_repmsg);
- obd = req->rq_import->imp_obd;
-
- /* Set new SLV and limit in OBD fields to make them accessible
- * to the pool thread. We do not access obd_namespace and pool
- * directly here as there is no reliable way to make sure that
- * they are still alive at cleanup time. Evil races are possible
- * which may cause Oops at that time.
- */
- write_lock(&obd->obd_pool_lock);
- obd->obd_pool_slv = new_slv;
- obd->obd_pool_limit = new_limit;
- write_unlock(&obd->obd_pool_lock);
-
- return 0;
-}
-
-/**
- * Client side lock cancel.
- *
- * Lock must not have any readers or writers by this time.
- */
-int ldlm_cli_cancel(const struct lustre_handle *lockh,
- enum ldlm_cancel_flags cancel_flags)
-{
- struct obd_export *exp;
- int avail, flags, count = 1;
- __u64 rc = 0;
- struct ldlm_namespace *ns;
- struct ldlm_lock *lock;
- LIST_HEAD(cancels);
-
- lock = ldlm_handle2lock_long(lockh, 0);
- if (!lock) {
- LDLM_DEBUG_NOLOCK("lock is already being destroyed");
- return 0;
- }
-
- lock_res_and_lock(lock);
- /* Lock is being canceled and the caller doesn't want to wait */
- if (ldlm_is_canceling(lock) && (cancel_flags & LCF_ASYNC)) {
- unlock_res_and_lock(lock);
- LDLM_LOCK_RELEASE(lock);
- return 0;
- }
-
- ldlm_set_canceling(lock);
- unlock_res_and_lock(lock);
-
- rc = ldlm_cli_cancel_local(lock);
- if (rc == LDLM_FL_LOCAL_ONLY || cancel_flags & LCF_LOCAL) {
- LDLM_LOCK_RELEASE(lock);
- return 0;
- }
- /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
- * RPC which goes to canceld portal, so we can cancel other LRU locks
- * here and send them all as one LDLM_CANCEL RPC.
- */
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, &cancels);
-
- exp = lock->l_conn_export;
- if (exp_connect_cancelset(exp)) {
- avail = ldlm_format_handles_avail(class_exp2cliimp(exp),
- &RQF_LDLM_CANCEL,
- RCL_CLIENT, 0);
- LASSERT(avail > 0);
-
- ns = ldlm_lock_to_ns(lock);
- flags = ns_connect_lru_resize(ns) ?
- LDLM_LRU_FLAG_LRUR : LDLM_LRU_FLAG_AGED;
- count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
- LCF_BL_AST, flags);
- }
- ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags);
- return 0;
-}
-EXPORT_SYMBOL(ldlm_cli_cancel);
-
-/**
- * Locally cancel up to \a count locks in list \a cancels.
- * Return the number of cancelled locks.
- */
-int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
- enum ldlm_cancel_flags flags)
-{
- LIST_HEAD(head);
- struct ldlm_lock *lock, *next;
- int left = 0, bl_ast = 0;
- __u64 rc;
-
- left = count;
- list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
- if (left-- == 0)
- break;
-
- if (flags & LCF_LOCAL) {
- rc = LDLM_FL_LOCAL_ONLY;
- ldlm_lock_cancel(lock);
- } else {
- rc = ldlm_cli_cancel_local(lock);
- }
- /* Until we have compound requests and can send LDLM_CANCEL
- * requests batched with generic RPCs, we need to send cancels
- * with the LDLM_FL_BL_AST flag in a separate RPC from
- * the one being generated now.
- */
- if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
- LDLM_DEBUG(lock, "Cancel lock separately");
- list_del_init(&lock->l_bl_ast);
- list_add(&lock->l_bl_ast, &head);
- bl_ast++;
- continue;
- }
- if (rc == LDLM_FL_LOCAL_ONLY) {
- /* CANCEL RPC should not be sent to server. */
- list_del_init(&lock->l_bl_ast);
- LDLM_LOCK_RELEASE(lock);
- count--;
- }
- }
- if (bl_ast > 0) {
- count -= bl_ast;
- ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);
- }
-
- return count;
-}
-
-/**
- * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back
- * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
- * readahead requests, ...)
- */
-static enum ldlm_policy_res
-ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
- int unused, int added, int count)
-{
- enum ldlm_policy_res result = LDLM_POLICY_CANCEL_LOCK;
-
- /* don't check added & count since we want to process all locks
- * from unused list.
- * It's fine to not take lock to access lock->l_resource since
- * the lock has already been granted so it won't change.
- */
- switch (lock->l_resource->lr_type) {
- case LDLM_EXTENT:
- case LDLM_IBITS:
- if (ns->ns_cancel && ns->ns_cancel(lock) != 0)
- break;
- /* fall through */
- default:
- result = LDLM_POLICY_SKIP_LOCK;
- lock_res_and_lock(lock);
- ldlm_set_skipped(lock);
- unlock_res_and_lock(lock);
- break;
- }
-
- return result;
-}
-
-/**
- * Callback function for LRU-resize policy. Decides whether to keep
- * \a lock in LRU for current \a LRU size \a unused, added in current
- * scan \a added and number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
-{
- unsigned long cur = cfs_time_current();
- struct ldlm_pool *pl = &ns->ns_pool;
- __u64 slv, lvf, lv;
- unsigned long la;
-
- /* Stop LRU processing when we reach past @count or have checked all
- * locks in LRU.
- */
- if (count && added >= count)
- return LDLM_POLICY_KEEP_LOCK;
-
- /*
- * Despite of the LV, It doesn't make sense to keep the lock which
- * is unused for ns_max_age time.
- */
- if (cfs_time_after(cfs_time_current(),
- cfs_time_add(lock->l_last_used, ns->ns_max_age)))
- return LDLM_POLICY_CANCEL_LOCK;
-
- slv = ldlm_pool_get_slv(pl);
- lvf = ldlm_pool_get_lvf(pl);
- la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used));
- lv = lvf * la * unused;
-
- /* Inform pool about current CLV to see it via debugfs. */
- ldlm_pool_set_clv(pl, lv);
-
- /* Stop when SLV is not yet come from server or lv is smaller than
- * it is.
- */
- if (slv == 0 || lv < slv)
- return LDLM_POLICY_KEEP_LOCK;
-
- return LDLM_POLICY_CANCEL_LOCK;
-}
-
-/**
- * Callback function for debugfs used policy. Makes decision whether to keep
- * \a lock in LRU for current \a LRU size \a unused, added in current scan \a
- * added and number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static enum ldlm_policy_res ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
-{
- /* Stop LRU processing when we reach past @count or have checked all
- * locks in LRU.
- */
- return (added >= count) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
-}
-
-/**
- * Callback function for aged policy. Makes decision whether to keep \a lock in
- * LRU for current LRU size \a unused, added in current scan \a added and
- * number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
-{
- if ((added >= count) &&
- time_before(cfs_time_current(),
- cfs_time_add(lock->l_last_used, ns->ns_max_age)))
- return LDLM_POLICY_KEEP_LOCK;
-
- return LDLM_POLICY_CANCEL_LOCK;
-}
-
-static enum ldlm_policy_res
-ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
-{
- enum ldlm_policy_res result;
-
- result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count);
- if (result == LDLM_POLICY_KEEP_LOCK)
- return result;
-
- return ldlm_cancel_no_wait_policy(ns, lock, unused, added, count);
-}
-
-/**
- * Callback function for default policy. Makes decision whether to keep \a lock
- * in LRU for current LRU size \a unused, added in current scan \a added and
- * number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static enum ldlm_policy_res
-ldlm_cancel_default_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
- int unused, int added, int count)
-{
- /* Stop LRU processing when we reach past count or have checked all
- * locks in LRU.
- */
- return (added >= count) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
-}
-
-typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
- struct ldlm_namespace *,
- struct ldlm_lock *, int,
- int, int);
-
-static ldlm_cancel_lru_policy_t
-ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
-{
- if (flags & LDLM_LRU_FLAG_NO_WAIT)
- return ldlm_cancel_no_wait_policy;
-
- if (ns_connect_lru_resize(ns)) {
- if (flags & LDLM_LRU_FLAG_SHRINK)
- /* We kill passed number of old locks. */
- return ldlm_cancel_passed_policy;
- else if (flags & LDLM_LRU_FLAG_LRUR)
- return ldlm_cancel_lrur_policy;
- else if (flags & LDLM_LRU_FLAG_PASSED)
- return ldlm_cancel_passed_policy;
- else if (flags & LDLM_LRU_FLAG_LRUR_NO_WAIT)
- return ldlm_cancel_lrur_no_wait_policy;
- } else {
- if (flags & LDLM_LRU_FLAG_AGED)
- return ldlm_cancel_aged_policy;
- }
-
- return ldlm_cancel_default_policy;
-}
-
-/**
- * - Free space in LRU for \a count new locks,
- * redundant unused locks are canceled locally;
- * - also cancel locally unused aged locks;
- * - do not cancel more than \a max locks;
- * - GET the found locks and add them into the \a cancels list.
- *
- * A client lock can be added to the l_bl_ast list only when it is
- * marked LDLM_FL_CANCELING. Otherwise, somebody is already doing
- * CANCEL. There are the following use cases:
- * ldlm_cancel_resource_local(), ldlm_cancel_lru_local() and
- * ldlm_cli_cancel(), which check and set this flag properly. As any
- * attempt to cancel a lock rely on this flag, l_bl_ast list is accessed
- * later without any special locking.
- *
- * Calling policies for enabled LRU resize:
- * ----------------------------------------
- * flags & LDLM_LRU_FLAG_LRUR - use LRU resize policy (SLV from server) to
- * cancel not more than \a count locks;
- *
- * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located
- * at the beginning of LRU list);
- *
- * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according
- * to memory pressure policy function;
- *
- * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to
- * "aged policy".
- *
- * flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible
- * (typically before replaying locks) w/o
- * sending any RPCs or waiting for any
- * outstanding RPC to complete.
- */
-static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
- struct list_head *cancels, int count, int max,
- int flags)
-{
- ldlm_cancel_lru_policy_t pf;
- struct ldlm_lock *lock, *next;
- int added = 0, unused, remained;
- int no_wait = flags &
- (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT);
-
- spin_lock(&ns->ns_lock);
- unused = ns->ns_nr_unused;
- remained = unused;
-
- if (!ns_connect_lru_resize(ns))
- count += unused - ns->ns_max_unused;
-
- pf = ldlm_cancel_lru_policy(ns, flags);
- LASSERT(pf);
-
- while (!list_empty(&ns->ns_unused_list)) {
- enum ldlm_policy_res result;
- time_t last_use = 0;
-
- /* all unused locks */
- if (remained-- <= 0)
- break;
-
- /* For any flags, stop scanning if @max is reached. */
- if (max && added >= max)
- break;
-
- list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
- l_lru) {
- /* No locks which got blocking requests. */
- LASSERT(!ldlm_is_bl_ast(lock));
-
- if (no_wait && ldlm_is_skipped(lock))
- /* already processed */
- continue;
-
- last_use = lock->l_last_used;
- if (last_use == cfs_time_current())
- continue;
-
- /* Somebody is already doing CANCEL. No need for this
- * lock in LRU, do not traverse it again.
- */
- if (!ldlm_is_canceling(lock))
- break;
-
- ldlm_lock_remove_from_lru_nolock(lock);
- }
- if (&lock->l_lru == &ns->ns_unused_list)
- break;
-
- LDLM_LOCK_GET(lock);
- spin_unlock(&ns->ns_lock);
- lu_ref_add(&lock->l_reference, __func__, current);
-
- /* Pass the lock through the policy filter and see if it
- * should stay in LRU.
- *
- * Even for shrinker policy we stop scanning if
- * we find a lock that should stay in the cache.
- * We should take into account lock age anyway
- * as a new lock is a valuable resource even if
- * it has a low weight.
- *
- * That is, for shrinker policy we drop only
- * old locks, but additionally choose them by
- * their weight. Big extent locks will stay in
- * the cache.
- */
- result = pf(ns, lock, unused, added, count);
- if (result == LDLM_POLICY_KEEP_LOCK) {
- lu_ref_del(&lock->l_reference,
- __func__, current);
- LDLM_LOCK_RELEASE(lock);
- spin_lock(&ns->ns_lock);
- break;
- }
- if (result == LDLM_POLICY_SKIP_LOCK) {
- lu_ref_del(&lock->l_reference,
- __func__, current);
- LDLM_LOCK_RELEASE(lock);
- spin_lock(&ns->ns_lock);
- continue;
- }
-
- lock_res_and_lock(lock);
- /* Check flags again under the lock. */
- if (ldlm_is_canceling(lock) ||
- (ldlm_lock_remove_from_lru_check(lock, last_use) == 0)) {
- /* Another thread is removing lock from LRU, or
- * somebody is already doing CANCEL, or there
- * is a blocking request which will send cancel
- * by itself, or the lock is no longer unused or
- * the lock has been used since the pf() call and
- * pages could be put under it.
- */
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference,
- __func__, current);
- LDLM_LOCK_RELEASE(lock);
- spin_lock(&ns->ns_lock);
- continue;
- }
- LASSERT(!lock->l_readers && !lock->l_writers);
-
- /* If we have chosen to cancel this lock voluntarily, we
- * better send cancel notification to server, so that it
- * frees appropriate state. This might lead to a race
- * where while we are doing cancel here, server is also
- * silently cancelling this lock.
- */
- ldlm_clear_cancel_on_block(lock);
-
- /* Setting the CBPENDING flag is a little misleading,
- * but prevents an important race; namely, once
- * CBPENDING is set, the lock can accumulate no more
- * readers/writers. Since readers and writers are
- * already zero here, ldlm_lock_decref() won't see
- * this flag and call l_blocking_ast
- */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
-
- /* We can't re-add to l_lru as it confuses the
- * refcounting in ldlm_lock_remove_from_lru() if an AST
- * arrives after we drop lr_lock below. We use l_bl_ast
- * and can't use l_pending_chain as it is used both on
- * server and client nevertheless bug 5666 says it is
- * used only on server
- */
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, cancels);
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference, __func__, current);
- spin_lock(&ns->ns_lock);
- added++;
- unused--;
- }
- spin_unlock(&ns->ns_lock);
- return added;
-}
-
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
- struct list_head *cancels, int count, int max,
- enum ldlm_cancel_flags cancel_flags, int flags)
-{
- int added;
-
- added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
- if (added <= 0)
- return added;
- return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
-}
-
-/**
- * Cancel at least \a nr locks from given namespace LRU.
- *
- * When called with LCF_ASYNC the blocking callback will be handled
- * in a thread and this function will return after the thread has been
- * asked to call the callback. When called with LCF_ASYNC the blocking
- * callback will be performed in this function.
- */
-int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
- enum ldlm_cancel_flags cancel_flags,
- int flags)
-{
- LIST_HEAD(cancels);
- int count, rc;
-
- /* Just prepare the list of locks, do not actually cancel them yet.
- * Locks are cancelled later in a separate thread.
- */
- count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
- rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
- if (rc == 0)
- return count;
-
- return 0;
-}
-
-/**
- * Find and cancel locally unused locks found on resource, matched to the
- * given policy, mode. GET the found locks and add them into the \a cancels
- * list.
- */
-int ldlm_cancel_resource_local(struct ldlm_resource *res,
- struct list_head *cancels,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode, __u64 lock_flags,
- enum ldlm_cancel_flags cancel_flags,
- void *opaque)
-{
- struct ldlm_lock *lock;
- int count = 0;
-
- lock_res(res);
- list_for_each_entry(lock, &res->lr_granted, l_res_link) {
- if (opaque && lock->l_ast_data != opaque) {
- LDLM_ERROR(lock, "data %p doesn't match opaque %p",
- lock->l_ast_data, opaque);
- continue;
- }
-
- if (lock->l_readers || lock->l_writers)
- continue;
-
- /* If somebody is already doing CANCEL, or blocking AST came,
- * skip this lock.
- */
- if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
- continue;
-
- if (lockmode_compat(lock->l_granted_mode, mode))
- continue;
-
- /* If policy is given and this is IBITS lock, add to list only
- * those locks that match by policy.
- */
- if (policy && (lock->l_resource->lr_type == LDLM_IBITS) &&
- !(lock->l_policy_data.l_inodebits.bits &
- policy->l_inodebits.bits))
- continue;
-
- /* See CBPENDING comment in ldlm_cancel_lru */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
- lock_flags;
-
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, cancels);
- LDLM_LOCK_GET(lock);
- count++;
- }
- unlock_res(res);
-
- return ldlm_cli_cancel_list_local(cancels, count, cancel_flags);
-}
-EXPORT_SYMBOL(ldlm_cancel_resource_local);
-
-/**
- * Cancel client-side locks from a list and send/prepare cancel RPCs to the
- * server.
- * If \a req is NULL, send CANCEL request to server with handles of locks
- * in the \a cancels. If EARLY_CANCEL is not supported, send CANCEL requests
- * separately per lock.
- * If \a req is not NULL, put handles of locks in \a cancels into the request
- * buffer at the offset \a off.
- * Destroy \a cancels at the end.
- */
-int ldlm_cli_cancel_list(struct list_head *cancels, int count,
- struct ptlrpc_request *req,
- enum ldlm_cancel_flags flags)
-{
- struct ldlm_lock *lock;
- int res = 0;
-
- if (list_empty(cancels) || count == 0)
- return 0;
-
- /* XXX: requests (both batched and not) could be sent in parallel.
- * Usually it is enough to have just 1 RPC, but it is possible that
- * there are too many locks to be cancelled in LRU or on a resource.
- * It would also speed up the case when the server does not support
- * the feature.
- */
- while (count > 0) {
- LASSERT(!list_empty(cancels));
- lock = list_first_entry(cancels, struct ldlm_lock, l_bl_ast);
- LASSERT(lock->l_conn_export);
-
- if (exp_connect_cancelset(lock->l_conn_export)) {
- res = count;
- if (req)
- ldlm_cancel_pack(req, cancels, count);
- else
- res = ldlm_cli_cancel_req(lock->l_conn_export,
- cancels, count,
- flags);
- } else {
- res = ldlm_cli_cancel_req(lock->l_conn_export,
- cancels, 1, flags);
- }
-
- if (res < 0) {
- CDEBUG_LIMIT(res == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
- "%s: %d\n", __func__, res);
- res = count;
- }
-
- count -= res;
- ldlm_lock_list_put(cancels, l_bl_ast, res);
- }
- LASSERT(count == 0);
- return 0;
-}
-EXPORT_SYMBOL(ldlm_cli_cancel_list);
-
-/**
- * Cancel all locks on a resource that have 0 readers/writers.
- *
- * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
- * to notify the server.
- */
-int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode,
- enum ldlm_cancel_flags flags,
- void *opaque)
-{
- struct ldlm_resource *res;
- LIST_HEAD(cancels);
- int count;
- int rc;
-
- res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (IS_ERR(res)) {
- /* This is not a problem. */
- CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]);
- return 0;
- }
-
- LDLM_RESOURCE_ADDREF(res);
- count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
- 0, flags | LCF_BL_AST, opaque);
- rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
- if (rc != ELDLM_OK)
- CERROR("canceling unused lock " DLDLMRES ": rc = %d\n",
- PLDLMRES(res), rc);
-
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- return 0;
-}
-EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
-
-struct ldlm_cli_cancel_arg {
- int lc_flags;
- void *lc_opaque;
-};
-
-static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs,
- struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
- struct ldlm_cli_cancel_arg *lc = arg;
-
- ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
- NULL, LCK_MINMODE,
- lc->lc_flags, lc->lc_opaque);
- /* must return 0 for hash iteration */
- return 0;
-}
-
-/**
- * Cancel all locks on a namespace (or a specific resource, if given)
- * that have 0 readers/writers.
- *
- * If flags & LCF_LOCAL, throw the locks away without trying
- * to notify the server.
- */
-int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- enum ldlm_cancel_flags flags, void *opaque)
-{
- struct ldlm_cli_cancel_arg arg = {
- .lc_flags = flags,
- .lc_opaque = opaque,
- };
-
- if (!ns)
- return ELDLM_OK;
-
- if (res_id) {
- return ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
- LCK_MINMODE, flags,
- opaque);
- } else {
- cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_cli_hash_cancel_unused, &arg, 0);
- return ELDLM_OK;
- }
-}
-EXPORT_SYMBOL(ldlm_cli_cancel_unused);
-
-/* Lock iterators. */
-
-static int ldlm_resource_foreach(struct ldlm_resource *res,
- ldlm_iterator_t iter, void *closure)
-{
- struct ldlm_lock *tmp;
- struct ldlm_lock *lock;
- int rc = LDLM_ITER_CONTINUE;
-
- if (!res)
- return LDLM_ITER_CONTINUE;
-
- lock_res(res);
- list_for_each_entry_safe(lock, tmp, &res->lr_granted, l_res_link) {
- if (iter(lock, closure) == LDLM_ITER_STOP) {
- rc = LDLM_ITER_STOP;
- goto out;
- }
- }
-
- list_for_each_entry_safe(lock, tmp, &res->lr_waiting, l_res_link) {
- if (iter(lock, closure) == LDLM_ITER_STOP) {
- rc = LDLM_ITER_STOP;
- goto out;
- }
- }
- out:
- unlock_res(res);
- return rc;
-}
-
-struct iter_helper_data {
- ldlm_iterator_t iter;
- void *closure;
-};
-
-static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
-{
- struct iter_helper_data *helper = closure;
-
- return helper->iter(lock, helper->closure);
-}
-
-static int ldlm_res_iter_helper(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
-
- return ldlm_resource_foreach(res, ldlm_iter_helper, arg) ==
- LDLM_ITER_STOP;
-}
-
-static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
- ldlm_iterator_t iter, void *closure)
-
-{
- struct iter_helper_data helper = {
- .iter = iter,
- .closure = closure,
- };
-
- cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_res_iter_helper, &helper, 0);
-}
-
-/* non-blocking function to manipulate a lock whose cb_data is being put away.
- * return 0: find no resource
- * > 0: must be LDLM_ITER_STOP/LDLM_ITER_CONTINUE.
- * < 0: errors
- */
-int ldlm_resource_iterate(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_iterator_t iter, void *data)
-{
- struct ldlm_resource *res;
- int rc;
-
- LASSERTF(ns, "must pass in namespace\n");
-
- res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (IS_ERR(res))
- return 0;
-
- LDLM_RESOURCE_ADDREF(res);
- rc = ldlm_resource_foreach(res, iter, data);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- return rc;
-}
-EXPORT_SYMBOL(ldlm_resource_iterate);
-
-/* Lock replay */
-
-static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
-{
- struct list_head *list = closure;
-
- /* we use l_pending_chain here, because it's unused on clients. */
- LASSERTF(list_empty(&lock->l_pending_chain),
- "lock %p next %p prev %p\n",
- lock, &lock->l_pending_chain.next,
- &lock->l_pending_chain.prev);
- /* bug 9573: don't replay locks left after eviction, or
- * bug 17614: locks being actively cancelled. Get a reference
- * on a lock so that it does not disappear under us (e.g. due to cancel)
- */
- if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_BL_DONE))) {
- list_add(&lock->l_pending_chain, list);
- LDLM_LOCK_GET(lock);
- }
-
- return LDLM_ITER_CONTINUE;
-}
-
-static int replay_lock_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct ldlm_async_args *aa, int rc)
-{
- struct ldlm_lock *lock;
- struct ldlm_reply *reply;
- struct obd_export *exp;
-
- atomic_dec(&req->rq_import->imp_replay_inflight);
- if (rc != ELDLM_OK)
- goto out;
-
- reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- if (!reply) {
- rc = -EPROTO;
- goto out;
- }
-
- lock = ldlm_handle2lock(&aa->lock_handle);
- if (!lock) {
- CERROR("received replay ack for unknown local cookie %#llx remote cookie %#llx from server %s id %s\n",
- aa->lock_handle.cookie, reply->lock_handle.cookie,
- req->rq_export->exp_client_uuid.uuid,
- libcfs_id2str(req->rq_peer));
- rc = -ESTALE;
- goto out;
- }
-
- /* Key change rehash lock in per-export hash with new key */
- exp = req->rq_export;
- if (exp && exp->exp_lock_hash) {
- /* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp()
- */
- /* coverity[overrun-buffer-val] */
- cfs_hash_rehash_key(exp->exp_lock_hash,
- &lock->l_remote_handle,
- &reply->lock_handle,
- &lock->l_exp_hash);
- } else {
- lock->l_remote_handle = reply->lock_handle;
- }
-
- LDLM_DEBUG(lock, "replayed lock:");
- ptlrpc_import_recovery_state_machine(req->rq_import);
- LDLM_LOCK_PUT(lock);
-out:
- if (rc != ELDLM_OK)
- ptlrpc_connect_import(req->rq_import);
-
- return rc;
-}
-
-static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
-{
- struct ptlrpc_request *req;
- struct ldlm_async_args *aa;
- struct ldlm_request *body;
- int flags;
-
- /* Bug 11974: Do not replay a lock which is actively being canceled */
- if (ldlm_is_bl_done(lock)) {
- LDLM_DEBUG(lock, "Not replaying canceled lock:");
- return 0;
- }
-
- /* If this is reply-less callback lock, we cannot replay it, since
- * server might have long dropped it, but notification of that event was
- * lost by network. (and server granted conflicting lock already)
- */
- if (ldlm_is_cancel_on_block(lock)) {
- LDLM_DEBUG(lock, "Not replaying reply-less lock:");
- ldlm_lock_cancel(lock);
- return 0;
- }
-
- /*
- * If granted mode matches the requested mode, this lock is granted.
- *
- * If they differ, but we have a granted mode, then we were granted
- * one mode and now want another: ergo, converting.
- *
- * If we haven't been granted anything and are on a resource list,
- * then we're blocked/waiting.
- *
- * If we haven't been granted anything and we're NOT on a resource list,
- * then we haven't got a reply yet and don't have a known disposition.
- * This happens whenever a lock enqueue is the request that triggers
- * recovery.
- */
- if (lock->l_granted_mode == lock->l_req_mode)
- flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
- else if (lock->l_granted_mode)
- flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
- else if (!list_empty(&lock->l_res_link))
- flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
- else
- flags = LDLM_FL_REPLAY;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
- LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
- if (!req)
- return -ENOMEM;
-
- /* We're part of recovery, so don't wait for it. */
- req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- ldlm_lock2desc(lock, &body->lock_desc);
- body->lock_flags = ldlm_flags_to_wire(flags);
-
- ldlm_lock2handle(lock, &body->lock_handle[0]);
- if (lock->l_lvb_len > 0)
- req_capsule_extend(&req->rq_pill, &RQF_LDLM_ENQUEUE_LVB);
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- lock->l_lvb_len);
- ptlrpc_request_set_replen(req);
- /* notify the server we've replayed all requests.
- * also, we mark the request to be put on a dedicated
- * queue to be processed after all request replayes.
- * bug 6063
- */
- lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
-
- LDLM_DEBUG(lock, "replaying lock:");
-
- atomic_inc(&req->rq_import->imp_replay_inflight);
- BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->lock_handle = body->lock_handle[0];
- req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
- ptlrpcd_add_req(req);
-
- return 0;
-}
-
-/**
- * Cancel as many unused locks as possible before replay. since we are
- * in recovery, we can't wait for any outstanding RPCs to send any RPC
- * to the server.
- *
- * Called only in recovery before replaying locks. there is no need to
- * replay locks that are unused. since the clients may hold thousands of
- * cached unused locks, dropping the unused locks can greatly reduce the
- * load on the servers at recovery time.
- */
-static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
-{
- int canceled;
- LIST_HEAD(cancels);
-
- CDEBUG(D_DLMTRACE,
- "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
- ldlm_ns_name(ns), ns->ns_nr_unused);
-
- /* We don't need to care whether or not LRU resize is enabled
- * because the LDLM_LRU_FLAG_NO_WAIT policy doesn't use the
- * count parameter
- */
- canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
- LCF_LOCAL, LDLM_LRU_FLAG_NO_WAIT);
-
- CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
- canceled, ldlm_ns_name(ns));
-}
-
-int ldlm_replay_locks(struct obd_import *imp)
-{
- struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
- LIST_HEAD(list);
- struct ldlm_lock *lock, *next;
- int rc = 0;
-
- LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
-
- /* don't replay locks if import failed recovery */
- if (imp->imp_vbr_failed)
- return 0;
-
- /* ensure this doesn't fall to 0 before all have been queued */
- atomic_inc(&imp->imp_replay_inflight);
-
- if (ldlm_cancel_unused_locks_before_replay)
- ldlm_cancel_unused_locks_for_replay(ns);
-
- ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
-
- list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
- list_del_init(&lock->l_pending_chain);
- if (rc) {
- LDLM_LOCK_RELEASE(lock);
- continue; /* or try to do the rest? */
- }
- rc = replay_one_lock(imp, lock);
- LDLM_LOCK_RELEASE(lock);
- }
-
- atomic_dec(&imp->imp_replay_inflight);
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
deleted file mode 100644
index 4c44603ab6f9..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ /dev/null
@@ -1,1369 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_resource.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Peter Braam <braam@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-#include <lustre_dlm.h>
-#include <lustre_fid.h>
-#include <obd_class.h>
-#include "ldlm_internal.h"
-
-struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
-
-int ldlm_srv_namespace_nr;
-int ldlm_cli_namespace_nr;
-
-struct mutex ldlm_srv_namespace_lock;
-LIST_HEAD(ldlm_srv_namespace_list);
-
-struct mutex ldlm_cli_namespace_lock;
-/* Client Namespaces that have active resources in them.
- * Once all resources go away, ldlm_poold moves such namespaces to the
- * inactive list
- */
-LIST_HEAD(ldlm_cli_active_namespace_list);
-/* Client namespaces that don't have any locks in them */
-static LIST_HEAD(ldlm_cli_inactive_namespace_list);
-
-static struct dentry *ldlm_debugfs_dir;
-static struct dentry *ldlm_ns_debugfs_dir;
-struct dentry *ldlm_svc_debugfs_dir;
-
-/* during debug dump certain amount of granted locks for one resource to avoid
- * DDOS.
- */
-static unsigned int ldlm_dump_granted_max = 256;
-
-static ssize_t
-lprocfs_wr_dump_ns(struct file *file, const char __user *buffer,
- size_t count, loff_t *off)
-{
- ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
- ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
- return count;
-}
-
-LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns);
-
-static int ldlm_rw_uint_seq_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%u\n", *(unsigned int *)m->private);
- return 0;
-}
-
-static ssize_t
-ldlm_rw_uint_seq_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
-
- if (count == 0)
- return 0;
- return kstrtouint_from_user(buffer, count, 0,
- (unsigned int *)seq->private);
-}
-
-LPROC_SEQ_FOPS(ldlm_rw_uint);
-
-static struct lprocfs_vars ldlm_debugfs_list[] = {
- { "dump_namespaces", &ldlm_dump_ns_fops, NULL, 0222 },
- { "dump_granted_max", &ldlm_rw_uint_fops, &ldlm_dump_granted_max },
- { NULL }
-};
-
-int ldlm_debugfs_setup(void)
-{
- int rc;
-
- ldlm_debugfs_dir = ldebugfs_register(OBD_LDLM_DEVICENAME,
- debugfs_lustre_root,
- NULL, NULL);
- if (IS_ERR_OR_NULL(ldlm_debugfs_dir)) {
- CERROR("LProcFS failed in ldlm-init\n");
- rc = ldlm_debugfs_dir ? PTR_ERR(ldlm_debugfs_dir) : -ENOMEM;
- goto err;
- }
-
- ldlm_ns_debugfs_dir = ldebugfs_register("namespaces",
- ldlm_debugfs_dir,
- NULL, NULL);
- if (IS_ERR_OR_NULL(ldlm_ns_debugfs_dir)) {
- CERROR("LProcFS failed in ldlm-init\n");
- rc = ldlm_ns_debugfs_dir ? PTR_ERR(ldlm_ns_debugfs_dir)
- : -ENOMEM;
- goto err_type;
- }
-
- ldlm_svc_debugfs_dir = ldebugfs_register("services",
- ldlm_debugfs_dir,
- NULL, NULL);
- if (IS_ERR_OR_NULL(ldlm_svc_debugfs_dir)) {
- CERROR("LProcFS failed in ldlm-init\n");
- rc = ldlm_svc_debugfs_dir ? PTR_ERR(ldlm_svc_debugfs_dir)
- : -ENOMEM;
- goto err_ns;
- }
-
- rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
- if (rc) {
- CERROR("LProcFS failed in ldlm-init\n");
- goto err_svc;
- }
-
- return 0;
-
-err_svc:
- ldebugfs_remove(&ldlm_svc_debugfs_dir);
-err_ns:
- ldebugfs_remove(&ldlm_ns_debugfs_dir);
-err_type:
- ldebugfs_remove(&ldlm_debugfs_dir);
-err:
- ldlm_svc_debugfs_dir = NULL;
- ldlm_ns_debugfs_dir = NULL;
- ldlm_debugfs_dir = NULL;
- return rc;
-}
-
-void ldlm_debugfs_cleanup(void)
-{
- if (!IS_ERR_OR_NULL(ldlm_svc_debugfs_dir))
- ldebugfs_remove(&ldlm_svc_debugfs_dir);
-
- if (!IS_ERR_OR_NULL(ldlm_ns_debugfs_dir))
- ldebugfs_remove(&ldlm_ns_debugfs_dir);
-
- if (!IS_ERR_OR_NULL(ldlm_debugfs_dir))
- ldebugfs_remove(&ldlm_debugfs_dir);
-
- ldlm_svc_debugfs_dir = NULL;
- ldlm_ns_debugfs_dir = NULL;
- ldlm_debugfs_dir = NULL;
-}
-
-static ssize_t resource_count_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- __u64 res = 0;
- struct cfs_hash_bd bd;
- int i;
-
- /* result is not strictly consistent */
- cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
- res += cfs_hash_bd_count_get(&bd);
- return sprintf(buf, "%lld\n", res);
-}
-LUSTRE_RO_ATTR(resource_count);
-
-static ssize_t lock_count_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- __u64 locks;
-
- locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
- LPROCFS_FIELDS_FLAGS_SUM);
- return sprintf(buf, "%lld\n", locks);
-}
-LUSTRE_RO_ATTR(lock_count);
-
-static ssize_t lock_unused_count_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
-
- return sprintf(buf, "%d\n", ns->ns_nr_unused);
-}
-LUSTRE_RO_ATTR(lock_unused_count);
-
-static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- __u32 *nr = &ns->ns_max_unused;
-
- if (ns_connect_lru_resize(ns))
- nr = &ns->ns_nr_unused;
- return sprintf(buf, "%u\n", *nr);
-}
-
-static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- unsigned long tmp;
- int lru_resize;
- int err;
-
- if (strncmp(buffer, "clear", 5) == 0) {
- CDEBUG(D_DLMTRACE,
- "dropping all unused locks from namespace %s\n",
- ldlm_ns_name(ns));
- if (ns_connect_lru_resize(ns)) {
- int canceled, unused = ns->ns_nr_unused;
-
- /* Try to cancel all @ns_nr_unused locks. */
- canceled = ldlm_cancel_lru(ns, unused, 0,
- LDLM_LRU_FLAG_PASSED);
- if (canceled < unused) {
- CDEBUG(D_DLMTRACE,
- "not all requested locks are canceled, requested: %d, canceled: %d\n",
- unused,
- canceled);
- return -EINVAL;
- }
- } else {
- tmp = ns->ns_max_unused;
- ns->ns_max_unused = 0;
- ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED);
- ns->ns_max_unused = tmp;
- }
- return count;
- }
-
- err = kstrtoul(buffer, 10, &tmp);
- if (err != 0) {
- CERROR("lru_size: invalid value written\n");
- return -EINVAL;
- }
- lru_resize = (tmp == 0);
-
- if (ns_connect_lru_resize(ns)) {
- if (!lru_resize)
- ns->ns_max_unused = (unsigned int)tmp;
-
- if (tmp > ns->ns_nr_unused)
- tmp = ns->ns_nr_unused;
- tmp = ns->ns_nr_unused - tmp;
-
- CDEBUG(D_DLMTRACE,
- "changing namespace %s unused locks from %u to %u\n",
- ldlm_ns_name(ns), ns->ns_nr_unused,
- (unsigned int)tmp);
- ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
-
- if (!lru_resize) {
- CDEBUG(D_DLMTRACE,
- "disable lru_resize for namespace %s\n",
- ldlm_ns_name(ns));
- ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
- }
- } else {
- CDEBUG(D_DLMTRACE,
- "changing namespace %s max_unused from %u to %u\n",
- ldlm_ns_name(ns), ns->ns_max_unused,
- (unsigned int)tmp);
- ns->ns_max_unused = (unsigned int)tmp;
- ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
-
- /* Make sure that LRU resize was originally supported before
- * turning it on here.
- */
- if (lru_resize &&
- (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
- CDEBUG(D_DLMTRACE,
- "enable lru_resize for namespace %s\n",
- ldlm_ns_name(ns));
- ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
- }
- }
-
- return count;
-}
-LUSTRE_RW_ATTR(lru_size);
-
-static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
-
- return sprintf(buf, "%u\n", ns->ns_max_age);
-}
-
-static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- unsigned long tmp;
- int err;
-
- err = kstrtoul(buffer, 10, &tmp);
- if (err != 0)
- return -EINVAL;
-
- ns->ns_max_age = tmp;
-
- return count;
-}
-LUSTRE_RW_ATTR(lru_max_age);
-
-static ssize_t early_lock_cancel_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
-
- return sprintf(buf, "%d\n", ns_connect_cancelset(ns));
-}
-
-static ssize_t early_lock_cancel_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- unsigned long supp = -1;
- int rc;
-
- rc = kstrtoul(buffer, 10, &supp);
- if (rc < 0)
- return rc;
-
- if (supp == 0)
- ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
- else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
- ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
- return count;
-}
-LUSTRE_RW_ATTR(early_lock_cancel);
-
-/* These are for namespaces in /sys/fs/lustre/ldlm/namespaces/ */
-static struct attribute *ldlm_ns_attrs[] = {
- &lustre_attr_resource_count.attr,
- &lustre_attr_lock_count.attr,
- &lustre_attr_lock_unused_count.attr,
- &lustre_attr_lru_size.attr,
- &lustre_attr_lru_max_age.attr,
- &lustre_attr_early_lock_cancel.attr,
- NULL,
-};
-
-static void ldlm_ns_release(struct kobject *kobj)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- complete(&ns->ns_kobj_unregister);
-}
-
-static struct kobj_type ldlm_ns_ktype = {
- .default_attrs = ldlm_ns_attrs,
- .sysfs_ops = &lustre_sysfs_ops,
- .release = ldlm_ns_release,
-};
-
-static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns)
-{
- if (IS_ERR_OR_NULL(ns->ns_debugfs_entry))
- CERROR("dlm namespace %s has no procfs dir?\n",
- ldlm_ns_name(ns));
- else
- ldebugfs_remove(&ns->ns_debugfs_entry);
-
- if (ns->ns_stats)
- lprocfs_free_stats(&ns->ns_stats);
-}
-
-static void ldlm_namespace_sysfs_unregister(struct ldlm_namespace *ns)
-{
- kobject_put(&ns->ns_kobj);
- wait_for_completion(&ns->ns_kobj_unregister);
-}
-
-static int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns)
-{
- int err;
-
- ns->ns_kobj.kset = ldlm_ns_kset;
- init_completion(&ns->ns_kobj_unregister);
- err = kobject_init_and_add(&ns->ns_kobj, &ldlm_ns_ktype, NULL,
- "%s", ldlm_ns_name(ns));
-
- ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
- if (!ns->ns_stats) {
- kobject_put(&ns->ns_kobj);
- return -ENOMEM;
- }
-
- lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
- LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
-
- return err;
-}
-
-static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns)
-{
- struct dentry *ns_entry;
-
- if (!IS_ERR_OR_NULL(ns->ns_debugfs_entry)) {
- ns_entry = ns->ns_debugfs_entry;
- } else {
- ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
- ldlm_ns_debugfs_dir);
- if (!ns_entry)
- return -ENOMEM;
- ns->ns_debugfs_entry = ns_entry;
- }
-
- return 0;
-}
-
-#undef MAX_STRING_SIZE
-
-static struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
-{
- LASSERT(res);
- LASSERT(res != LP_POISON);
- atomic_inc(&res->lr_refcount);
- CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
- atomic_read(&res->lr_refcount));
- return res;
-}
-
-static unsigned int ldlm_res_hop_hash(struct cfs_hash *hs,
- const void *key, unsigned int mask)
-{
- const struct ldlm_res_id *id = key;
- unsigned int val = 0;
- unsigned int i;
-
- for (i = 0; i < RES_NAME_SIZE; i++)
- val += id->name[i];
- return val & mask;
-}
-
-static unsigned int ldlm_res_hop_fid_hash(struct cfs_hash *hs,
- const void *key, unsigned int mask)
-{
- const struct ldlm_res_id *id = key;
- struct lu_fid fid;
- __u32 hash;
- __u32 val;
-
- fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
- fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
- fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
-
- hash = fid_flatten32(&fid);
- hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
- if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
- val = id->name[LUSTRE_RES_ID_HSH_OFF];
- hash += (val >> 5) + (val << 11);
- } else {
- val = fid_oid(&fid);
- }
- hash = hash_long(hash, hs->hs_bkt_bits);
- /* give me another random factor */
- hash -= hash_long((unsigned long)hs, val % 11 + 3);
-
- hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
- hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
-
- return hash & mask;
-}
-
-static void *ldlm_res_hop_key(struct hlist_node *hnode)
-{
- struct ldlm_resource *res;
-
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- return &res->lr_name;
-}
-
-static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
-{
- struct ldlm_resource *res;
-
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- return ldlm_res_eq((const struct ldlm_res_id *)key,
- (const struct ldlm_res_id *)&res->lr_name);
-}
-
-static void *ldlm_res_hop_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct ldlm_resource, lr_hash);
-}
-
-static void ldlm_res_hop_get_locked(struct cfs_hash *hs,
- struct hlist_node *hnode)
-{
- struct ldlm_resource *res;
-
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- ldlm_resource_getref(res);
-}
-
-static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ldlm_resource *res;
-
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- ldlm_resource_putref(res);
-}
-
-static struct cfs_hash_ops ldlm_ns_hash_ops = {
- .hs_hash = ldlm_res_hop_hash,
- .hs_key = ldlm_res_hop_key,
- .hs_keycmp = ldlm_res_hop_keycmp,
- .hs_keycpy = NULL,
- .hs_object = ldlm_res_hop_object,
- .hs_get = ldlm_res_hop_get_locked,
- .hs_put = ldlm_res_hop_put
-};
-
-static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
- .hs_hash = ldlm_res_hop_fid_hash,
- .hs_key = ldlm_res_hop_key,
- .hs_keycmp = ldlm_res_hop_keycmp,
- .hs_keycpy = NULL,
- .hs_object = ldlm_res_hop_object,
- .hs_get = ldlm_res_hop_get_locked,
- .hs_put = ldlm_res_hop_put
-};
-
-struct ldlm_ns_hash_def {
- enum ldlm_ns_type nsd_type;
- /** hash bucket bits */
- unsigned int nsd_bkt_bits;
- /** hash bits */
- unsigned int nsd_all_bits;
- /** hash operations */
- struct cfs_hash_ops *nsd_hops;
-};
-
-static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = {
- {
- .nsd_type = LDLM_NS_TYPE_MDC,
- .nsd_bkt_bits = 11,
- .nsd_all_bits = 16,
- .nsd_hops = &ldlm_ns_fid_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_MDT,
- .nsd_bkt_bits = 14,
- .nsd_all_bits = 21,
- .nsd_hops = &ldlm_ns_fid_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_OSC,
- .nsd_bkt_bits = 8,
- .nsd_all_bits = 12,
- .nsd_hops = &ldlm_ns_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_OST,
- .nsd_bkt_bits = 11,
- .nsd_all_bits = 17,
- .nsd_hops = &ldlm_ns_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_MGC,
- .nsd_bkt_bits = 4,
- .nsd_all_bits = 4,
- .nsd_hops = &ldlm_ns_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_MGT,
- .nsd_bkt_bits = 4,
- .nsd_all_bits = 4,
- .nsd_hops = &ldlm_ns_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_UNKNOWN,
- },
-};
-
-/** Register \a ns in the list of namespaces */
-static void ldlm_namespace_register(struct ldlm_namespace *ns,
- enum ldlm_side client)
-{
- mutex_lock(ldlm_namespace_lock(client));
- LASSERT(list_empty(&ns->ns_list_chain));
- list_add(&ns->ns_list_chain, &ldlm_cli_inactive_namespace_list);
- ldlm_namespace_nr_inc(client);
- mutex_unlock(ldlm_namespace_lock(client));
-}
-
-/**
- * Create and initialize new empty namespace.
- */
-struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
- enum ldlm_side client,
- enum ldlm_appetite apt,
- enum ldlm_ns_type ns_type)
-{
- struct ldlm_namespace *ns = NULL;
- struct ldlm_ns_bucket *nsb;
- struct ldlm_ns_hash_def *nsd;
- struct cfs_hash_bd bd;
- int idx;
- int rc;
-
- LASSERT(obd);
-
- rc = ldlm_get_ref();
- if (rc) {
- CERROR("ldlm_get_ref failed: %d\n", rc);
- return NULL;
- }
-
- for (idx = 0;; idx++) {
- nsd = &ldlm_ns_hash_defs[idx];
- if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
- CERROR("Unknown type %d for ns %s\n", ns_type, name);
- goto out_ref;
- }
-
- if (nsd->nsd_type == ns_type)
- break;
- }
-
- ns = kzalloc(sizeof(*ns), GFP_NOFS);
- if (!ns)
- goto out_ref;
-
- ns->ns_rs_hash = cfs_hash_create(name,
- nsd->nsd_all_bits, nsd->nsd_all_bits,
- nsd->nsd_bkt_bits, sizeof(*nsb),
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- nsd->nsd_hops,
- CFS_HASH_DEPTH |
- CFS_HASH_BIGNAME |
- CFS_HASH_SPIN_BKTLOCK |
- CFS_HASH_NO_ITEMREF);
- if (!ns->ns_rs_hash)
- goto out_ns;
-
- cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
- nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
- at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
- nsb->nsb_namespace = ns;
- }
-
- ns->ns_obd = obd;
- ns->ns_appetite = apt;
- ns->ns_client = client;
-
- INIT_LIST_HEAD(&ns->ns_list_chain);
- INIT_LIST_HEAD(&ns->ns_unused_list);
- spin_lock_init(&ns->ns_lock);
- atomic_set(&ns->ns_bref, 0);
- init_waitqueue_head(&ns->ns_waitq);
-
- ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
- ns->ns_nr_unused = 0;
- ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
- ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
- ns->ns_orig_connect_flags = 0;
- ns->ns_connect_flags = 0;
- ns->ns_stopping = 0;
-
- rc = ldlm_namespace_sysfs_register(ns);
- if (rc != 0) {
- CERROR("Can't initialize ns sysfs, rc %d\n", rc);
- goto out_hash;
- }
-
- rc = ldlm_namespace_debugfs_register(ns);
- if (rc != 0) {
- CERROR("Can't initialize ns proc, rc %d\n", rc);
- goto out_sysfs;
- }
-
- idx = ldlm_namespace_nr_read(client);
- rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
- if (rc) {
- CERROR("Can't initialize lock pool, rc %d\n", rc);
- goto out_proc;
- }
-
- ldlm_namespace_register(ns, client);
- return ns;
-out_proc:
- ldlm_namespace_debugfs_unregister(ns);
-out_sysfs:
- ldlm_namespace_sysfs_unregister(ns);
- ldlm_namespace_cleanup(ns, 0);
-out_hash:
- cfs_hash_putref(ns->ns_rs_hash);
-out_ns:
- kfree(ns);
-out_ref:
- ldlm_put_ref();
- return NULL;
-}
-EXPORT_SYMBOL(ldlm_namespace_new);
-
-extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
-
-/**
- * Cancel and destroy all locks on a resource.
- *
- * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
- * clean up. This is currently only used for recovery, and we make
- * certain assumptions as a result--notably, that we shouldn't cancel
- * locks with refs.
- */
-static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
- __u64 flags)
-{
- int rc = 0;
- bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
-
- do {
- struct ldlm_lock *lock = NULL, *tmp;
- struct lustre_handle lockh;
-
- /* First, we look for non-cleaned-yet lock
- * all cleaned locks are marked by CLEANED flag.
- */
- lock_res(res);
- list_for_each_entry(tmp, q, l_res_link) {
- if (ldlm_is_cleaned(tmp))
- continue;
-
- lock = tmp;
- LDLM_LOCK_GET(lock);
- ldlm_set_cleaned(lock);
- break;
- }
-
- if (!lock) {
- unlock_res(res);
- break;
- }
-
- /* Set CBPENDING so nothing in the cancellation path
- * can match this lock.
- */
- ldlm_set_cbpending(lock);
- ldlm_set_failed(lock);
- lock->l_flags |= flags;
-
- /* ... without sending a CANCEL message for local_only. */
- if (local_only)
- ldlm_set_local_only(lock);
-
- if (local_only && (lock->l_readers || lock->l_writers)) {
- /* This is a little bit gross, but much better than the
- * alternative: pretend that we got a blocking AST from
- * the server, so that when the lock is decref'd, it
- * will go away ...
- */
- unlock_res(res);
- LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
- if (lock->l_flags & LDLM_FL_FAIL_LOC) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(4 * HZ);
- set_current_state(TASK_RUNNING);
- }
- if (lock->l_completion_ast)
- lock->l_completion_ast(lock, LDLM_FL_FAILED,
- NULL);
- LDLM_LOCK_RELEASE(lock);
- continue;
- }
-
- unlock_res(res);
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh, LCF_LOCAL);
- if (rc)
- CERROR("ldlm_cli_cancel: %d\n", rc);
- LDLM_LOCK_RELEASE(lock);
- } while (1);
-}
-
-static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
- __u64 flags = *(__u64 *)arg;
-
- cleanup_resource(res, &res->lr_granted, flags);
- cleanup_resource(res, &res->lr_waiting, flags);
-
- return 0;
-}
-
-static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
-
- lock_res(res);
- CERROR("%s: namespace resource " DLDLMRES
- " (%p) refcount nonzero (%d) after lock cleanup; forcing cleanup.\n",
- ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
- atomic_read(&res->lr_refcount) - 1);
-
- ldlm_resource_dump(D_ERROR, res);
- unlock_res(res);
- return 0;
-}
-
-/**
- * Cancel and destroy all locks in the namespace.
- *
- * Typically used during evictions when server notified client that it was
- * evicted and all of its state needs to be destroyed.
- * Also used during shutdown.
- */
-int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
-{
- if (!ns) {
- CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
- return ELDLM_OK;
- }
-
- cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
- &flags, 0);
- cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
- NULL, 0);
- return ELDLM_OK;
-}
-EXPORT_SYMBOL(ldlm_namespace_cleanup);
-
-/**
- * Attempts to free namespace.
- *
- * Only used when namespace goes away, like during an unmount.
- */
-static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
-{
- /* At shutdown time, don't call the cancellation callback */
- ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
-
- if (atomic_read(&ns->ns_bref) > 0) {
- int rc;
-
- CDEBUG(D_DLMTRACE,
- "dlm namespace %s free waiting on refcount %d\n",
- ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
-force_wait:
- if (force)
- rc = wait_event_idle_timeout(ns->ns_waitq,
- atomic_read(&ns->ns_bref) == 0,
- obd_timeout * HZ / 4) ? 0 : -ETIMEDOUT;
- else
- rc = l_wait_event_abortable(ns->ns_waitq,
- atomic_read(&ns->ns_bref) == 0);
-
- /* Forced cleanups should be able to reclaim all references,
- * so it's safe to wait forever... we can't leak locks...
- */
- if (force && rc == -ETIMEDOUT) {
- LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
- ldlm_ns_name(ns),
- atomic_read(&ns->ns_bref), rc);
- goto force_wait;
- }
-
- if (atomic_read(&ns->ns_bref)) {
- LCONSOLE_ERROR("Cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
- ldlm_ns_name(ns),
- atomic_read(&ns->ns_bref), rc);
- return ELDLM_NAMESPACE_EXISTS;
- }
- CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
- ldlm_ns_name(ns));
- }
-
- return ELDLM_OK;
-}
-
-/**
- * Performs various cleanups for passed \a ns to make it drop refc and be
- * ready for freeing. Waits for refc == 0.
- *
- * The following is done:
- * (0) Unregister \a ns from its list to make inaccessible for potential
- * users like pools thread and others;
- * (1) Clear all locks in \a ns.
- */
-void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
- struct obd_import *imp,
- int force)
-{
- int rc;
-
- if (!ns)
- return;
-
- spin_lock(&ns->ns_lock);
- ns->ns_stopping = 1;
- spin_unlock(&ns->ns_lock);
-
- /*
- * Can fail with -EINTR when force == 0 in which case try harder.
- */
- rc = __ldlm_namespace_free(ns, force);
- if (rc != ELDLM_OK) {
- if (imp) {
- ptlrpc_disconnect_import(imp, 0);
- ptlrpc_invalidate_import(imp);
- }
-
- /*
- * With all requests dropped and the import inactive
- * we are guaranteed all reference will be dropped.
- */
- rc = __ldlm_namespace_free(ns, 1);
- LASSERT(rc == 0);
- }
-}
-
-/** Unregister \a ns from the list of namespaces. */
-static void ldlm_namespace_unregister(struct ldlm_namespace *ns,
- enum ldlm_side client)
-{
- mutex_lock(ldlm_namespace_lock(client));
- LASSERT(!list_empty(&ns->ns_list_chain));
- /* Some asserts and possibly other parts of the code are still
- * using list_empty(&ns->ns_list_chain). This is why it is
- * important to use list_del_init() here.
- */
- list_del_init(&ns->ns_list_chain);
- ldlm_namespace_nr_dec(client);
- mutex_unlock(ldlm_namespace_lock(client));
-}
-
-/**
- * Performs freeing memory structures related to \a ns. This is only done
- * when ldlm_namespce_free_prior() successfully removed all resources
- * referencing \a ns and its refc == 0.
- */
-void ldlm_namespace_free_post(struct ldlm_namespace *ns)
-{
- if (!ns)
- return;
-
- /* Make sure that nobody can find this ns in its list. */
- ldlm_namespace_unregister(ns, ns->ns_client);
- /* Fini pool _before_ parent proc dir is removed. This is important as
- * ldlm_pool_fini() removes own proc dir which is child to @dir.
- * Removing it after @dir may cause oops.
- */
- ldlm_pool_fini(&ns->ns_pool);
-
- ldlm_namespace_debugfs_unregister(ns);
- ldlm_namespace_sysfs_unregister(ns);
- cfs_hash_putref(ns->ns_rs_hash);
- /* Namespace \a ns should be not on list at this time, otherwise
- * this will cause issues related to using freed \a ns in poold
- * thread.
- */
- LASSERT(list_empty(&ns->ns_list_chain));
- kfree(ns);
- ldlm_put_ref();
-}
-
-void ldlm_namespace_get(struct ldlm_namespace *ns)
-{
- atomic_inc(&ns->ns_bref);
-}
-
-/* This is only for callers that care about refcount */
-static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
-{
- return atomic_inc_return(&ns->ns_bref);
-}
-
-void ldlm_namespace_put(struct ldlm_namespace *ns)
-{
- if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
- wake_up(&ns->ns_waitq);
- spin_unlock(&ns->ns_lock);
- }
-}
-
-/** Should be called with ldlm_namespace_lock(client) taken. */
-void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
- enum ldlm_side client)
-{
- LASSERT(!list_empty(&ns->ns_list_chain));
- LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
- list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
-}
-
-/** Should be called with ldlm_namespace_lock(client) taken. */
-void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
- enum ldlm_side client)
-{
- LASSERT(!list_empty(&ns->ns_list_chain));
- LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
- list_move_tail(&ns->ns_list_chain, &ldlm_cli_inactive_namespace_list);
-}
-
-/** Should be called with ldlm_namespace_lock(client) taken. */
-struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
-{
- LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
- LASSERT(!list_empty(ldlm_namespace_list(client)));
- return container_of(ldlm_namespace_list(client)->next,
- struct ldlm_namespace, ns_list_chain);
-}
-
-/** Create and initialize new resource. */
-static struct ldlm_resource *ldlm_resource_new(void)
-{
- struct ldlm_resource *res;
- int idx;
-
- res = kmem_cache_zalloc(ldlm_resource_slab, GFP_NOFS);
- if (!res)
- return NULL;
-
- INIT_LIST_HEAD(&res->lr_granted);
- INIT_LIST_HEAD(&res->lr_waiting);
-
- /* Initialize interval trees for each lock mode. */
- for (idx = 0; idx < LCK_MODE_NUM; idx++) {
- res->lr_itree[idx].lit_size = 0;
- res->lr_itree[idx].lit_mode = 1 << idx;
- res->lr_itree[idx].lit_root = NULL;
- }
-
- atomic_set(&res->lr_refcount, 1);
- spin_lock_init(&res->lr_lock);
- lu_ref_init(&res->lr_reference);
-
- /* The creator of the resource must unlock the mutex after LVB
- * initialization.
- */
- mutex_init(&res->lr_lvb_mutex);
- mutex_lock(&res->lr_lvb_mutex);
-
- return res;
-}
-
-/**
- * Return a reference to resource with given name, creating it if necessary.
- * Args: namespace with ns_lock unlocked
- * Locks: takes and releases NS hash-lock and res->lr_lock
- * Returns: referenced, unlocked ldlm_resource or NULL
- */
-struct ldlm_resource *
-ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
- const struct ldlm_res_id *name, enum ldlm_type type,
- int create)
-{
- struct hlist_node *hnode;
- struct ldlm_resource *res = NULL;
- struct cfs_hash_bd bd;
- __u64 version;
- int ns_refcount = 0;
- int rc;
-
- LASSERT(!parent);
- LASSERT(ns->ns_rs_hash);
- LASSERT(name->name[0] != 0);
-
- cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
- hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
- if (hnode) {
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
- goto lvbo_init;
- }
-
- version = cfs_hash_bd_version_get(&bd);
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
-
- if (create == 0)
- return ERR_PTR(-ENOENT);
-
- LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
- "type: %d\n", type);
- res = ldlm_resource_new();
- if (!res)
- return ERR_PTR(-ENOMEM);
-
- res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
- res->lr_name = *name;
- res->lr_type = type;
-
- cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
- hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
- cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
-
- if (hnode) {
- /* Someone won the race and already added the resource. */
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
- /* Clean lu_ref for failed resource. */
- lu_ref_fini(&res->lr_reference);
- /* We have taken lr_lvb_mutex. Drop it. */
- mutex_unlock(&res->lr_lvb_mutex);
- kmem_cache_free(ldlm_resource_slab, res);
-lvbo_init:
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- /* Synchronize with regard to resource creation. */
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- mutex_lock(&res->lr_lvb_mutex);
- mutex_unlock(&res->lr_lvb_mutex);
- }
-
- if (unlikely(res->lr_lvb_len < 0)) {
- rc = res->lr_lvb_len;
- ldlm_resource_putref(res);
- res = ERR_PTR(rc);
- }
- return res;
- }
- /* We won! Let's add the resource. */
- cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
- if (cfs_hash_bd_count_get(&bd) == 1)
- ns_refcount = ldlm_namespace_get_return(ns);
-
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
- rc = ns->ns_lvbo->lvbo_init(res);
- if (rc < 0) {
- CERROR("%s: lvbo_init failed for resource %#llx:%#llx: rc = %d\n",
- ns->ns_obd->obd_name, name->name[0],
- name->name[1], rc);
- res->lr_lvb_len = rc;
- mutex_unlock(&res->lr_lvb_mutex);
- ldlm_resource_putref(res);
- return ERR_PTR(rc);
- }
- }
-
- /* We create resource with locked lr_lvb_mutex. */
- mutex_unlock(&res->lr_lvb_mutex);
-
- /* Let's see if we happened to be the very first resource in this
- * namespace. If so, and this is a client namespace, we need to move
- * the namespace into the active namespaces list to be patrolled by
- * the ldlm_poold.
- */
- if (ns_refcount == 1) {
- mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
- ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
- mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
- }
-
- return res;
-}
-EXPORT_SYMBOL(ldlm_resource_get);
-
-static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
- struct ldlm_resource *res)
-{
- struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
-
- if (!list_empty(&res->lr_granted)) {
- ldlm_resource_dump(D_ERROR, res);
- LBUG();
- }
-
- if (!list_empty(&res->lr_waiting)) {
- ldlm_resource_dump(D_ERROR, res);
- LBUG();
- }
-
- cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
- bd, &res->lr_hash);
- lu_ref_fini(&res->lr_reference);
- if (cfs_hash_bd_count_get(bd) == 0)
- ldlm_namespace_put(nsb->nsb_namespace);
-}
-
-/* Returns 1 if the resource was freed, 0 if it remains. */
-int ldlm_resource_putref(struct ldlm_resource *res)
-{
- struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- struct cfs_hash_bd bd;
-
- LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
- CDEBUG(D_INFO, "putref res: %p count: %d\n",
- res, atomic_read(&res->lr_refcount) - 1);
-
- cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
- if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
- __ldlm_resource_putref_final(&bd, res);
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
- ns->ns_lvbo->lvbo_free(res);
- kmem_cache_free(ldlm_resource_slab, res);
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(ldlm_resource_putref);
-
-/**
- * Add a lock into a given resource into specified lock list.
- */
-void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
- struct ldlm_lock *lock)
-{
- check_res_locked(res);
-
- LDLM_DEBUG(lock, "About to add this lock:");
-
- if (ldlm_is_destroyed(lock)) {
- CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
- return;
- }
-
- LASSERT(list_empty(&lock->l_res_link));
-
- list_add_tail(&lock->l_res_link, head);
-}
-
-void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
-{
- int type = lock->l_resource->lr_type;
-
- check_res_locked(lock->l_resource);
- if (type == LDLM_IBITS || type == LDLM_PLAIN)
- ldlm_unlink_lock_skiplist(lock);
- else if (type == LDLM_EXTENT)
- ldlm_extent_unlink_lock(lock);
- list_del_init(&lock->l_res_link);
-}
-EXPORT_SYMBOL(ldlm_resource_unlink_lock);
-
-void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
-{
- desc->lr_type = res->lr_type;
- desc->lr_name = res->lr_name;
-}
-
-/**
- * Print information about all locks in all namespaces on this node to debug
- * log.
- */
-void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
-{
- struct ldlm_namespace *ns;
-
- if (!((libcfs_debug | D_ERROR) & level))
- return;
-
- mutex_lock(ldlm_namespace_lock(client));
-
- list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain)
- ldlm_namespace_dump(level, ns);
-
- mutex_unlock(ldlm_namespace_lock(client));
-}
-
-static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
- int level = (int)(unsigned long)arg;
-
- lock_res(res);
- ldlm_resource_dump(level, res);
- unlock_res(res);
-
- return 0;
-}
-
-/**
- * Print information about all locks in this namespace on this node to debug
- * log.
- */
-void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
-{
- if (!((libcfs_debug | D_ERROR) & level))
- return;
-
- CDEBUG(level, "--- Namespace: %s (rc: %d, side: client)\n",
- ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
-
- if (time_before(cfs_time_current(), ns->ns_next_dump))
- return;
-
- cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_res_hash_dump,
- (void *)(unsigned long)level, 0);
- spin_lock(&ns->ns_lock);
- ns->ns_next_dump = cfs_time_shift(10);
- spin_unlock(&ns->ns_lock);
-}
-
-/**
- * Print information about all locks in this resource to debug log.
- */
-void ldlm_resource_dump(int level, struct ldlm_resource *res)
-{
- struct ldlm_lock *lock;
- unsigned int granted = 0;
-
- BUILD_BUG_ON(RES_NAME_SIZE != 4);
-
- if (!((libcfs_debug | D_ERROR) & level))
- return;
-
- CDEBUG(level, "--- Resource: " DLDLMRES " (%p) refcount = %d\n",
- PLDLMRES(res), res, atomic_read(&res->lr_refcount));
-
- if (!list_empty(&res->lr_granted)) {
- CDEBUG(level, "Granted locks (in reverse order):\n");
- list_for_each_entry_reverse(lock, &res->lr_granted,
- l_res_link) {
- LDLM_DEBUG_LIMIT(level, lock, "###");
- if (!(level & D_CANTMASK) &&
- ++granted > ldlm_dump_granted_max) {
- CDEBUG(level,
- "only dump %d granted locks to avoid DDOS.\n",
- granted);
- break;
- }
- }
- }
- if (!list_empty(&res->lr_waiting)) {
- CDEBUG(level, "Waiting locks:\n");
- list_for_each_entry(lock, &res->lr_waiting, l_res_link)
- LDLM_DEBUG_LIMIT(level, lock, "###");
- }
-}
-EXPORT_SYMBOL(ldlm_resource_dump);
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
deleted file mode 100644
index 519fd747e3ad..000000000000
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LUSTRE_FS) += lustre.o
-lustre-y := dcache.o dir.o file.o llite_lib.o llite_nfs.o \
- rw.o rw26.o namei.o symlink.o llite_mmap.o range_lock.o \
- xattr.o xattr_cache.o xattr_security.o \
- super25.o statahead.o glimpse.o lcommon_cl.o lcommon_misc.o \
- vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o \
- lproc_llite.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
deleted file mode 100644
index 11b82c639bfe..000000000000
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/quotaops.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <obd_support.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <lustre_dlm.h>
-
-#include "llite_internal.h"
-
-static void free_dentry_data(struct rcu_head *head)
-{
- struct ll_dentry_data *lld;
-
- lld = container_of(head, struct ll_dentry_data, lld_rcu_head);
- kfree(lld);
-}
-
-/* should NOT be called with the dcache lock, see fs/dcache.c */
-static void ll_release(struct dentry *de)
-{
- struct ll_dentry_data *lld;
-
- LASSERT(de);
- lld = ll_d2d(de);
- if (lld->lld_it) {
- ll_intent_release(lld->lld_it);
- kfree(lld->lld_it);
- }
-
- de->d_fsdata = NULL;
- call_rcu(&lld->lld_rcu_head, free_dentry_data);
-}
-
-/* Compare if two dentries are the same. Don't match if the existing dentry
- * is marked invalid. Returns 1 if different, 0 if the same.
- *
- * This avoids a race where ll_lookup_it() instantiates a dentry, but we get
- * an AST before calling d_revalidate_it(). The dentry still exists (marked
- * INVALID) so d_lookup() matches it, but we have no lock on it (so
- * lock_match() fails) and we spin around real_lookup().
- *
- * This race doesn't apply to lookups in d_alloc_parallel(), and for
- * those we want to ensure that only one dentry with a given name is
- * in ll_lookup_nd() at a time. So allow invalid dentries to match
- * while d_in_lookup(). We will be called again when the lookup
- * completes, and can give a different answer then.
- */
-static int ll_dcompare(const struct dentry *dentry,
- unsigned int len, const char *str,
- const struct qstr *name)
-{
- if (len != name->len)
- return 1;
-
- if (memcmp(str, name->name, len))
- return 1;
-
- CDEBUG(D_DENTRY, "found name %.*s(%p) flags %#x refc %d\n",
- name->len, name->name, dentry, dentry->d_flags,
- d_count(dentry));
-
- /* mountpoint is always valid */
- if (d_mountpoint(dentry))
- return 0;
-
- /* ensure exclusion against parallel lookup of the same name */
- if (d_in_lookup((struct dentry *)dentry))
- return 0;
-
- if (d_lustre_invalid(dentry))
- return 1;
-
- return 0;
-}
-
-/**
- * Called when last reference to a dentry is dropped and dcache wants to know
- * whether or not it should cache it:
- * - return 1 to delete the dentry immediately
- * - return 0 to cache the dentry
- * Should NOT be called with the dcache lock, see fs/dcache.c
- */
-static int ll_ddelete(const struct dentry *de)
-{
- LASSERT(de);
-
- CDEBUG(D_DENTRY, "%s dentry %pd (%p, parent %p, inode %p) %s%s\n",
- d_lustre_invalid(de) ? "deleting" : "keeping",
- de, de, de->d_parent, d_inode(de),
- d_unhashed(de) ? "" : "hashed,",
- list_empty(&de->d_subdirs) ? "" : "subdirs");
-
- /* kernel >= 2.6.38 last refcount is decreased after this function. */
- LASSERT(d_count(de) == 1);
-
- if (d_lustre_invalid(de))
- return 1;
- return 0;
-}
-
-static int ll_d_init(struct dentry *de)
-{
- struct ll_dentry_data *lld = kzalloc(sizeof(*lld), GFP_KERNEL);
-
- if (unlikely(!lld))
- return -ENOMEM;
- lld->lld_invalid = 1;
- de->d_fsdata = lld;
- return 0;
-}
-
-void ll_intent_drop_lock(struct lookup_intent *it)
-{
- if (it->it_op && it->it_lock_mode) {
- struct lustre_handle handle;
-
- handle.cookie = it->it_lock_handle;
-
- CDEBUG(D_DLMTRACE,
- "releasing lock with cookie %#llx from it %p\n",
- handle.cookie, it);
- ldlm_lock_decref(&handle, it->it_lock_mode);
-
- /* bug 494: intent_release may be called multiple times, from
- * this thread and we don't want to double-decref this lock
- */
- it->it_lock_mode = 0;
- if (it->it_remote_lock_mode != 0) {
- handle.cookie = it->it_remote_lock_handle;
-
- CDEBUG(D_DLMTRACE,
- "releasing remote lock with cookie%#llx from it %p\n",
- handle.cookie, it);
- ldlm_lock_decref(&handle,
- it->it_remote_lock_mode);
- it->it_remote_lock_mode = 0;
- }
- }
-}
-
-void ll_intent_release(struct lookup_intent *it)
-{
- CDEBUG(D_INFO, "intent %p released\n", it);
- ll_intent_drop_lock(it);
- /* We are still holding extra reference on a request, need to free it */
- if (it_disposition(it, DISP_ENQ_OPEN_REF))
- ptlrpc_req_finished(it->it_request); /* ll_file_open */
-
- if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */
- ptlrpc_req_finished(it->it_request);
-
- it->it_disposition = 0;
- it->it_request = NULL;
-}
-
-void ll_invalidate_aliases(struct inode *inode)
-{
- struct dentry *dentry;
-
- CDEBUG(D_INODE, "marking dentries for ino " DFID "(%p) invalid\n",
- PFID(ll_inode2fid(inode)), inode);
-
- spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
- CDEBUG(D_DENTRY,
- "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
- dentry, dentry, dentry->d_parent,
- d_inode(dentry), dentry->d_flags);
-
- d_lustre_invalidate(dentry, 0);
- }
- spin_unlock(&inode->i_lock);
-}
-
-int ll_revalidate_it_finish(struct ptlrpc_request *request,
- struct lookup_intent *it,
- struct inode *inode)
-{
- int rc = 0;
-
- if (!request)
- return 0;
-
- if (it_disposition(it, DISP_LOOKUP_NEG))
- return -ENOENT;
-
- rc = ll_prep_inode(&inode, request, NULL, it);
-
- return rc;
-}
-
-void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
-{
- if (it->it_lock_mode && inode) {
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-
- CDEBUG(D_DLMTRACE, "setting l_data to inode " DFID "(%p)\n",
- PFID(ll_inode2fid(inode)), inode);
- ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
- }
-
- /* drop lookup or getattr locks immediately */
- if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) {
- /* on 2.6 there are situation when several lookups and
- * revalidations may be requested during single operation.
- * therefore, we don't release intent here -bzzz
- */
- ll_intent_drop_lock(it);
- }
-}
-
-static int ll_revalidate_dentry(struct dentry *dentry,
- unsigned int lookup_flags)
-{
- struct inode *dir = d_inode(dentry->d_parent);
-
- /* If this is intermediate component path lookup and we were able to get
- * to this dentry, then its lock has not been revoked and the
- * path component is valid.
- */
- if (lookup_flags & LOOKUP_PARENT)
- return 1;
-
- /* Symlink - always valid as long as the dentry was found */
- if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))
- return 1;
-
- /*
- * VFS warns us that this is the second go around and previous
- * operation failed (most likely open|creat), so this time
- * we better talk to the server via the lookup path by name,
- * not by fid.
- */
- if (lookup_flags & LOOKUP_REVAL)
- return 0;
-
- if (!dentry_may_statahead(dir, dentry))
- return 1;
-
- if (lookup_flags & LOOKUP_RCU)
- return -ECHILD;
-
- ll_statahead(dir, &dentry, !d_inode(dentry));
- return 1;
-}
-
-/*
- * Always trust cached dentries. Update statahead window if necessary.
- */
-static int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
-{
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, flags=%u\n",
- dentry, flags);
-
- return ll_revalidate_dentry(dentry, flags);
-}
-
-const struct dentry_operations ll_d_ops = {
- .d_init = ll_d_init,
- .d_revalidate = ll_revalidate_nd,
- .d_release = ll_release,
- .d_delete = ll_ddelete,
- .d_compare = ll_dcompare,
-};
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
deleted file mode 100644
index d10d27268323..000000000000
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ /dev/null
@@ -1,1706 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/dir.c
- *
- * Directory code for lustre client.
- */
-
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <linux/buffer_head.h> /* for wait_on_buffer */
-#include <linux/pagevec.h>
-#include <linux/prefetch.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <obd_support.h>
-#include <obd_class.h>
-#include <uapi/linux/lustre/lustre_ioctl.h>
-#include <lustre_lib.h>
-#include <lustre_dlm.h>
-#include <lustre_fid.h>
-#include <lustre_kernelcomm.h>
-#include <lustre_swab.h>
-
-#include "llite_internal.h"
-
-/*
- * (new) readdir implementation overview.
- *
- * Original lustre readdir implementation cached exact copy of raw directory
- * pages on the client. These pages were indexed in client page cache by
- * logical offset in the directory file. This design, while very simple and
- * intuitive had some inherent problems:
- *
- * . it implies that byte offset to the directory entry serves as a
- * telldir(3)/seekdir(3) cookie, but that offset is not stable: in
- * ext3/htree directory entries may move due to splits, and more
- * importantly,
- *
- * . it is incompatible with the design of split directories for cmd3,
- * that assumes that names are distributed across nodes based on their
- * hash, and so readdir should be done in hash order.
- *
- * New readdir implementation does readdir in hash order, and uses hash of a
- * file name as a telldir/seekdir cookie. This led to number of complications:
- *
- * . hash is not unique, so it cannot be used to index cached directory
- * pages on the client (note, that it requires a whole pageful of hash
- * collided entries to cause two pages to have identical hashes);
- *
- * . hash is not unique, so it cannot, strictly speaking, be used as an
- * entry cookie. ext3/htree has the same problem and lustre implementation
- * mimics their solution: seekdir(hash) positions directory at the first
- * entry with the given hash.
- *
- * Client side.
- *
- * 0. caching
- *
- * Client caches directory pages using hash of the first entry as an index. As
- * noted above hash is not unique, so this solution doesn't work as is:
- * special processing is needed for "page hash chains" (i.e., sequences of
- * pages filled with entries all having the same hash value).
- *
- * First, such chains have to be detected. To this end, server returns to the
- * client the hash of the first entry on the page next to one returned. When
- * client detects that this hash is the same as hash of the first entry on the
- * returned page, page hash collision has to be handled. Pages in the
- * hash chain, except first one, are termed "overflow pages".
- *
- * Solution to index uniqueness problem is to not cache overflow
- * pages. Instead, when page hash collision is detected, all overflow pages
- * from emerging chain are immediately requested from the server and placed in
- * a special data structure (struct ll_dir_chain). This data structure is used
- * by ll_readdir() to process entries from overflow pages. When readdir
- * invocation finishes, overflow pages are discarded. If page hash collision
- * chain weren't completely processed, next call to readdir will again detect
- * page hash collision, again read overflow pages in, process next portion of
- * entries and again discard the pages. This is not as wasteful as it looks,
- * because, given reasonable hash, page hash collisions are extremely rare.
- *
- * 1. directory positioning
- *
- * When seekdir(hash) is called, original
- *
- *
- *
- *
- *
- *
- *
- *
- * Server.
- *
- * identification of and access to overflow pages
- *
- * page format
- *
- * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
- * a header lu_dirpage which describes the start/end hash, and whether this
- * page is empty (contains no dir entry) or hash collide with next page.
- * After client receives reply, several pages will be integrated into dir page
- * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
- * for this integrated page will be adjusted. See lmv_adjust_dirpages().
- *
- */
-struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data,
- __u64 offset)
-{
- struct md_callback cb_op;
- struct page *page;
- int rc;
-
- cb_op.md_blocking_ast = ll_md_blocking_ast;
- rc = md_read_page(ll_i2mdexp(dir), op_data, &cb_op, offset, &page);
- if (rc)
- return ERR_PTR(rc);
-
- return page;
-}
-
-void ll_release_page(struct inode *inode, struct page *page, bool remove)
-{
- kunmap(page);
-
- /*
- * Always remove the page for striped dir, because the page is
- * built from temporarily in LMV layer
- */
- if (inode && S_ISDIR(inode->i_mode) &&
- ll_i2info(inode)->lli_lsm_md) {
- __free_page(page);
- return;
- }
-
- if (remove) {
- lock_page(page);
- if (likely(page->mapping))
- truncate_complete_page(page->mapping, page);
- unlock_page(page);
- }
- put_page(page);
-}
-
-/**
- * return IF_* type for given lu_dirent entry.
- * IF_* flag shld be converted to particular OS file type in
- * platform llite module.
- */
-static __u16 ll_dirent_type_get(struct lu_dirent *ent)
-{
- __u16 type = 0;
- struct luda_type *lt;
- int len = 0;
-
- if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
- const unsigned int align = sizeof(struct luda_type) - 1;
-
- len = le16_to_cpu(ent->lde_namelen);
- len = (len + align) & ~align;
- lt = (void *)ent->lde_name + len;
- type = IFTODT(le16_to_cpu(lt->lt_type));
- }
- return type;
-}
-
-int ll_dir_read(struct inode *inode, __u64 *ppos, struct md_op_data *op_data,
- struct dir_context *ctx)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- __u64 pos = *ppos;
- int is_api32 = ll_need_32bit_api(sbi);
- int is_hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
- struct page *page;
- bool done = false;
- int rc = 0;
-
- page = ll_get_dir_page(inode, op_data, pos);
-
- while (rc == 0 && !done) {
- struct lu_dirpage *dp;
- struct lu_dirent *ent;
- __u64 hash;
- __u64 next;
-
- if (IS_ERR(page)) {
- rc = PTR_ERR(page);
- break;
- }
-
- hash = MDS_DIR_END_OFF;
- dp = page_address(page);
- for (ent = lu_dirent_start(dp); ent && !done;
- ent = lu_dirent_next(ent)) {
- __u16 type;
- int namelen;
- struct lu_fid fid;
- __u64 lhash;
- __u64 ino;
-
- hash = le64_to_cpu(ent->lde_hash);
- if (hash < pos)
- /*
- * Skip until we find target hash
- * value.
- */
- continue;
-
- namelen = le16_to_cpu(ent->lde_namelen);
- if (namelen == 0)
- /*
- * Skip dummy record.
- */
- continue;
-
- if (is_api32 && is_hash64)
- lhash = hash >> 32;
- else
- lhash = hash;
- fid_le_to_cpu(&fid, &ent->lde_fid);
- ino = cl_fid_build_ino(&fid, is_api32);
- type = ll_dirent_type_get(ent);
- ctx->pos = lhash;
- /* For 'll_nfs_get_name_filldir()', it will try
- * to access the 'ent' through its 'lde_name',
- * so the parameter 'name' for 'ctx->actor()'
- * must be part of the 'ent'.
- */
- done = !dir_emit(ctx, ent->lde_name,
- namelen, ino, type);
- }
-
- if (done) {
- pos = hash;
- ll_release_page(inode, page, false);
- break;
- }
-
- next = le64_to_cpu(dp->ldp_hash_end);
- pos = next;
- if (pos == MDS_DIR_END_OFF) {
- /*
- * End of directory reached.
- */
- done = 1;
- ll_release_page(inode, page, false);
- } else {
- /*
- * Normal case: continue to the next
- * page.
- */
- ll_release_page(inode, page,
- le32_to_cpu(dp->ldp_flags) &
- LDF_COLLIDE);
- next = pos;
- page = ll_get_dir_page(inode, op_data, pos);
- }
- }
-
- ctx->pos = pos;
- return rc;
-}
-
-static int ll_readdir(struct file *filp, struct dir_context *ctx)
-{
- struct inode *inode = file_inode(filp);
- struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- __u64 pos = lfd ? lfd->lfd_pos : 0;
- int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
- int api32 = ll_need_32bit_api(sbi);
- struct md_op_data *op_data;
- int rc;
-
- CDEBUG(D_VFSTRACE,
- "VFS Op:inode=" DFID "(%p) pos/size %lu/%llu 32bit_api %d\n",
- PFID(ll_inode2fid(inode)), inode, (unsigned long)pos,
- i_size_read(inode), api32);
-
- if (pos == MDS_DIR_END_OFF) {
- /*
- * end-of-file.
- */
- rc = 0;
- goto out;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
- LUSTRE_OPC_ANY, inode);
- if (IS_ERR(op_data)) {
- rc = PTR_ERR(op_data);
- goto out;
- }
-
- if (unlikely(op_data->op_mea1)) {
- /*
- * This is only needed for striped dir to fill ..,
- * see lmv_read_page
- */
- if (file_dentry(filp)->d_parent &&
- file_dentry(filp)->d_parent->d_inode) {
- __u64 ibits = MDS_INODELOCK_UPDATE;
- struct inode *parent;
-
- parent = file_dentry(filp)->d_parent->d_inode;
- if (ll_have_md_lock(parent, &ibits, LCK_MINMODE))
- op_data->op_fid3 = *ll_inode2fid(parent);
- }
-
- /*
- * If it can not find in cache, do lookup .. on the master
- * object
- */
- if (fid_is_zero(&op_data->op_fid3)) {
- rc = ll_dir_get_parent_fid(inode, &op_data->op_fid3);
- if (rc) {
- ll_finish_md_op_data(op_data);
- return rc;
- }
- }
- }
- op_data->op_max_pages = sbi->ll_md_brw_pages;
- ctx->pos = pos;
- rc = ll_dir_read(inode, &pos, op_data, ctx);
- pos = ctx->pos;
- if (lfd)
- lfd->lfd_pos = pos;
-
- if (pos == MDS_DIR_END_OFF) {
- if (api32)
- pos = LL_DIR_END_OFF_32BIT;
- else
- pos = LL_DIR_END_OFF;
- } else {
- if (api32 && hash64)
- pos >>= 32;
- }
- ctx->pos = pos;
- ll_finish_md_op_data(op_data);
-out:
- if (!rc)
- ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
-
- return rc;
-}
-
-static int ll_send_mgc_param(struct obd_export *mgc, char *string)
-{
- struct mgs_send_param *msp;
- int rc = 0;
-
- msp = kzalloc(sizeof(*msp), GFP_NOFS);
- if (!msp)
- return -ENOMEM;
-
- strlcpy(msp->mgs_param, string, sizeof(msp->mgs_param));
- rc = obd_set_info_async(NULL, mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO,
- sizeof(struct mgs_send_param), msp, NULL);
- if (rc)
- CERROR("Failed to set parameter: %d\n", rc);
- kfree(msp);
-
- return rc;
-}
-
-/**
- * Create striped directory with specified stripe(@lump)
- *
- * param[in] parent the parent of the directory.
- * param[in] lump the specified stripes.
- * param[in] dirname the name of the directory.
- * param[in] mode the specified mode of the directory.
- *
- * retval =0 if striped directory is being created successfully.
- * <0 if the creation is failed.
- */
-static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
- const char *dirname, umode_t mode)
-{
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- struct ll_sb_info *sbi = ll_i2sbi(parent);
- struct inode *inode = NULL;
- struct dentry dentry;
- int err;
-
- if (unlikely(lump->lum_magic != LMV_USER_MAGIC))
- return -EINVAL;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p) name %s stripe_offset %d, stripe_count: %u\n",
- PFID(ll_inode2fid(parent)), parent, dirname,
- (int)lump->lum_stripe_offset, lump->lum_stripe_count);
-
- if (lump->lum_stripe_count > 1 &&
- !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_DIR_STRIPE))
- return -EINVAL;
-
- if (lump->lum_magic != cpu_to_le32(LMV_USER_MAGIC))
- lustre_swab_lmv_user_md(lump);
-
- if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent)))
- mode &= ~current_umask();
- mode = (mode & (0777 | S_ISVTX)) | S_IFDIR;
- op_data = ll_prep_md_op_data(NULL, parent, NULL, dirname,
- strlen(dirname), mode, LUSTRE_OPC_MKDIR,
- lump);
- if (IS_ERR(op_data)) {
- err = PTR_ERR(op_data);
- goto err_exit;
- }
-
- op_data->op_cli_flags |= CLI_SET_MEA;
- err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode,
- from_kuid(&init_user_ns, current_fsuid()),
- from_kgid(&init_user_ns, current_fsgid()),
- cfs_curproc_cap_pack(), 0, &request);
- ll_finish_md_op_data(op_data);
-
- err = ll_prep_inode(&inode, request, parent->i_sb, NULL);
- if (err)
- goto err_exit;
-
- memset(&dentry, 0, sizeof(dentry));
- dentry.d_inode = inode;
-
- err = ll_init_security(&dentry, inode, parent);
- iput(inode);
-
-err_exit:
- ptlrpc_req_finished(request);
- return err;
-}
-
-int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
- int set_default)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct md_op_data *op_data;
- struct ptlrpc_request *req = NULL;
- int rc = 0;
- struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
- struct obd_device *mgc = lsi->lsi_mgc;
- int lum_size;
-
- if (lump) {
- /*
- * This is coming from userspace, so should be in
- * local endian. But the MDS would like it in little
- * endian, so we swab it before we send it.
- */
- switch (lump->lmm_magic) {
- case LOV_USER_MAGIC_V1: {
- if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V1))
- lustre_swab_lov_user_md_v1(lump);
- lum_size = sizeof(struct lov_user_md_v1);
- break;
- }
- case LOV_USER_MAGIC_V3: {
- if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V3))
- lustre_swab_lov_user_md_v3(
- (struct lov_user_md_v3 *)lump);
- lum_size = sizeof(struct lov_user_md_v3);
- break;
- }
- case LMV_USER_MAGIC: {
- if (lump->lmm_magic != cpu_to_le32(LMV_USER_MAGIC))
- lustre_swab_lmv_user_md(
- (struct lmv_user_md *)lump);
- lum_size = sizeof(struct lmv_user_md);
- break;
- }
- default: {
- CDEBUG(D_IOCTL,
- "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
- lump->lmm_magic, LOV_USER_MAGIC_V1,
- LOV_USER_MAGIC_V3);
- return -EINVAL;
- }
- }
- } else {
- lum_size = sizeof(struct lov_user_md_v1);
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- /* swabbing is done in lov_setstripe() on server side */
- rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size, &req);
- ll_finish_md_op_data(op_data);
- ptlrpc_req_finished(req);
- if (rc)
- return rc;
-
-#if OBD_OCD_VERSION(2, 13, 53, 0) > LUSTRE_VERSION_CODE
- /*
- * 2.9 server has stored filesystem default stripe in ROOT xattr,
- * and it's stored into system config for backward compatibility.
- *
- * In the following we use the fact that LOV_USER_MAGIC_V1 and
- * LOV_USER_MAGIC_V3 have the same initial fields so we do not
- * need to make the distinction between the 2 versions
- */
- if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
- char *param = NULL;
- char *buf;
-
- param = kzalloc(MGS_PARAM_MAXLEN, GFP_NOFS);
- if (!param)
- return -ENOMEM;
-
- buf = param;
- /* Get fsname and assume devname to be -MDT0000. */
- ll_get_fsname(inode->i_sb, buf, MTI_NAME_MAXLEN);
- strcat(buf, "-MDT0000.lov");
- buf += strlen(buf);
-
- /* Set root stripesize */
- sprintf(buf, ".stripesize=%u",
- lump ? le32_to_cpu(lump->lmm_stripe_size) : 0);
- rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
- if (rc)
- goto end;
-
- /* Set root stripecount */
- sprintf(buf, ".stripecount=%hd",
- lump ? le16_to_cpu(lump->lmm_stripe_count) : 0);
- rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
- if (rc)
- goto end;
-
- /* Set root stripeoffset */
- sprintf(buf, ".stripeoffset=%hd",
- lump ? le16_to_cpu(lump->lmm_stripe_offset) :
- (typeof(lump->lmm_stripe_offset))(-1));
- rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
-
-end:
- kfree(param);
- }
-#endif
- return rc;
-}
-
-/**
- * This function will be used to get default LOV/LMV/Default LMV
- * @valid will be used to indicate which stripe it will retrieve
- * OBD_MD_MEA LMV stripe EA
- * OBD_MD_DEFAULT_MEA Default LMV stripe EA
- * otherwise Default LOV EA.
- * Each time, it can only retrieve 1 stripe EA
- **/
-int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size,
- struct ptlrpc_request **request, u64 valid)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct mdt_body *body;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *req = NULL;
- int rc, lmmsize;
- struct md_op_data *op_data;
-
- rc = ll_get_max_mdsize(sbi, &lmmsize);
- if (rc)
- return rc;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
- 0, lmmsize, LUSTRE_OPC_ANY,
- NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
- rc = md_getattr(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr failed on inode " DFID ": rc %d\n",
- PFID(ll_inode2fid(inode)), rc);
- goto out;
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-
- lmmsize = body->mbo_eadatasize;
-
- if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
- lmmsize == 0) {
- rc = -ENODATA;
- goto out;
- }
-
- lmm = req_capsule_server_sized_get(&req->rq_pill,
- &RMF_MDT_MD, lmmsize);
- LASSERT(lmm);
-
- /*
- * This is coming from the MDS, so is probably in
- * little endian. We convert it to host endian before
- * passing it to userspace.
- */
- /* We don't swab objects for directories */
- switch (le32_to_cpu(lmm->lmm_magic)) {
- case LOV_MAGIC_V1:
- if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC)
- lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
- break;
- case LOV_MAGIC_V3:
- if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC)
- lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
- break;
- case LMV_MAGIC_V1:
- if (cpu_to_le32(LMV_MAGIC) != LMV_MAGIC)
- lustre_swab_lmv_mds_md((union lmv_mds_md *)lmm);
- break;
- case LMV_USER_MAGIC:
- if (cpu_to_le32(LMV_USER_MAGIC) != LMV_USER_MAGIC)
- lustre_swab_lmv_user_md((struct lmv_user_md *)lmm);
- break;
- default:
- CERROR("unknown magic: %lX\n", (unsigned long)lmm->lmm_magic);
- rc = -EPROTO;
- }
-out:
- *plmm = lmm;
- *plmm_size = lmmsize;
- *request = req;
- return rc;
-}
-
-int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid)
-{
- struct md_op_data *op_data;
- int mdt_index, rc;
-
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data)
- return -ENOMEM;
-
- op_data->op_flags |= MF_GET_MDT_IDX;
- op_data->op_fid1 = *fid;
- rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
- mdt_index = op_data->op_mds;
- kvfree(op_data);
- if (rc < 0)
- return rc;
-
- return mdt_index;
-}
-
-/*
- * Get MDT index for the inode.
- */
-int ll_get_mdt_idx(struct inode *inode)
-{
- return ll_get_mdt_idx_by_fid(ll_i2sbi(inode), ll_inode2fid(inode));
-}
-
-/**
- * Generic handler to do any pre-copy work.
- *
- * It sends a first hsm_progress (with extent length == 0) to coordinator as a
- * first information for it that real work has started.
- *
- * Moreover, for a ARCHIVE request, it will sample the file data version and
- * store it in \a copy.
- *
- * \return 0 on success.
- */
-static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct hsm_progress_kernel hpk;
- int rc2, rc = 0;
-
- /* Forge a hsm_progress based on data from copy. */
- hpk.hpk_fid = copy->hc_hai.hai_fid;
- hpk.hpk_cookie = copy->hc_hai.hai_cookie;
- hpk.hpk_extent.offset = copy->hc_hai.hai_extent.offset;
- hpk.hpk_extent.length = 0;
- hpk.hpk_flags = 0;
- hpk.hpk_errval = 0;
- hpk.hpk_data_version = 0;
-
- /* For archive request, we need to read the current file version. */
- if (copy->hc_hai.hai_action == HSMA_ARCHIVE) {
- struct inode *inode;
- __u64 data_version = 0;
-
- /* Get inode for this fid */
- inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
- if (IS_ERR(inode)) {
- hpk.hpk_flags |= HP_FLAG_RETRY;
- /* hpk_errval is >= 0 */
- hpk.hpk_errval = -PTR_ERR(inode);
- rc = PTR_ERR(inode);
- goto progress;
- }
-
- /* Read current file data version */
- rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
- iput(inode);
- if (rc != 0) {
- CDEBUG(D_HSM,
- "Could not read file data version of " DFID " (rc = %d). Archive request (%#llx) could not be done.\n",
- PFID(&copy->hc_hai.hai_fid), rc,
- copy->hc_hai.hai_cookie);
- hpk.hpk_flags |= HP_FLAG_RETRY;
- /* hpk_errval must be >= 0 */
- hpk.hpk_errval = -rc;
- goto progress;
- }
-
- /* Store in the hsm_copy for later copytool use.
- * Always modified even if no lsm.
- */
- copy->hc_data_version = data_version;
- }
-
-progress:
- /* On error, the request should be considered as completed */
- if (hpk.hpk_errval > 0)
- hpk.hpk_flags |= HP_FLAG_COMPLETED;
- rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
- &hpk, NULL);
-
- return rc ? rc : rc2;
-}
-
-/**
- * Generic handler to do any post-copy work.
- *
- * It will send the last hsm_progress update to coordinator to inform it
- * that copy is finished and whether it was successful or not.
- *
- * Moreover,
- * - for ARCHIVE request, it will sample the file data version and compare it
- * with the version saved in ll_ioc_copy_start(). If they do not match, copy
- * will be considered as failed.
- * - for RESTORE request, it will sample the file data version and send it to
- * coordinator which is useful if the file was imported as 'released'.
- *
- * \return 0 on success.
- */
-static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct hsm_progress_kernel hpk;
- int rc2, rc = 0;
-
- /* If you modify the logic here, also check llapi_hsm_copy_end(). */
- /* Take care: copy->hc_hai.hai_action, len, gid and data are not
- * initialized if copy_end was called with copy == NULL.
- */
-
- /* Forge a hsm_progress based on data from copy. */
- hpk.hpk_fid = copy->hc_hai.hai_fid;
- hpk.hpk_cookie = copy->hc_hai.hai_cookie;
- hpk.hpk_extent = copy->hc_hai.hai_extent;
- hpk.hpk_flags = copy->hc_flags | HP_FLAG_COMPLETED;
- hpk.hpk_errval = copy->hc_errval;
- hpk.hpk_data_version = 0;
-
- /* For archive request, we need to check the file data was not changed.
- *
- * For restore request, we need to send the file data version, this is
- * useful when the file was created using hsm_import.
- */
- if (((copy->hc_hai.hai_action == HSMA_ARCHIVE) ||
- (copy->hc_hai.hai_action == HSMA_RESTORE)) &&
- (copy->hc_errval == 0)) {
- struct inode *inode;
- __u64 data_version = 0;
-
- /* Get lsm for this fid */
- inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
- if (IS_ERR(inode)) {
- hpk.hpk_flags |= HP_FLAG_RETRY;
- /* hpk_errval must be >= 0 */
- hpk.hpk_errval = -PTR_ERR(inode);
- rc = PTR_ERR(inode);
- goto progress;
- }
-
- rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
- iput(inode);
- if (rc) {
- CDEBUG(D_HSM,
- "Could not read file data version. Request could not be confirmed.\n");
- if (hpk.hpk_errval == 0)
- hpk.hpk_errval = -rc;
- goto progress;
- }
-
- /* Store in the hsm_copy for later copytool use.
- * Always modified even if no lsm.
- */
- hpk.hpk_data_version = data_version;
-
- /* File could have been stripped during archiving, so we need
- * to check anyway.
- */
- if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
- (copy->hc_data_version != data_version)) {
- CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. " DFID ", start:%#llx current:%#llx\n",
- PFID(&copy->hc_hai.hai_fid),
- copy->hc_data_version, data_version);
- /* File was changed, send error to cdt. Do not ask for
- * retry because if a file is modified frequently,
- * the cdt will loop on retried archive requests.
- * The policy engine will ask for a new archive later
- * when the file will not be modified for some tunable
- * time
- */
- hpk.hpk_flags &= ~HP_FLAG_RETRY;
- rc = -EBUSY;
- /* hpk_errval must be >= 0 */
- hpk.hpk_errval = -rc;
- }
- }
-
-progress:
- rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
- &hpk, NULL);
-
- return rc ? rc : rc2;
-}
-
-static int copy_and_ioctl(int cmd, struct obd_export *exp,
- const void __user *data, size_t size)
-{
- void *copy;
- int rc;
-
- copy = memdup_user(data, size);
- if (IS_ERR(copy))
- return PTR_ERR(copy);
-
- rc = obd_iocontrol(cmd, exp, size, copy, NULL);
- kfree(copy);
-
- return rc;
-}
-
-static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
-{
- int cmd = qctl->qc_cmd;
- int type = qctl->qc_type;
- int id = qctl->qc_id;
- int valid = qctl->qc_valid;
- int rc = 0;
-
- switch (cmd) {
- case Q_SETQUOTA:
- case Q_SETINFO:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- break;
- case Q_GETQUOTA:
- if (((type == USRQUOTA &&
- !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
- (type == GRPQUOTA &&
- !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
- !capable(CAP_SYS_ADMIN))
- return -EPERM;
- break;
- case Q_GETINFO:
- break;
- default:
- CERROR("unsupported quotactl op: %#x\n", cmd);
- return -ENOTTY;
- }
-
- if (valid != QC_GENERAL) {
- if (cmd == Q_GETINFO)
- qctl->qc_cmd = Q_GETOINFO;
- else if (cmd == Q_GETQUOTA)
- qctl->qc_cmd = Q_GETOQUOTA;
- else
- return -EINVAL;
-
- switch (valid) {
- case QC_MDTIDX:
- rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
- sizeof(*qctl), qctl, NULL);
- break;
- case QC_OSTIDX:
- rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp,
- sizeof(*qctl), qctl, NULL);
- break;
- case QC_UUID:
- rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
- sizeof(*qctl), qctl, NULL);
- if (rc == -EAGAIN)
- rc = obd_iocontrol(OBD_IOC_QUOTACTL,
- sbi->ll_dt_exp,
- sizeof(*qctl), qctl, NULL);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- if (rc)
- return rc;
-
- qctl->qc_cmd = cmd;
- } else {
- struct obd_quotactl *oqctl;
-
- oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (!oqctl)
- return -ENOMEM;
-
- QCTL_COPY(oqctl, qctl);
- rc = obd_quotactl(sbi->ll_md_exp, oqctl);
- if (rc) {
- kfree(oqctl);
- return rc;
- }
- /* If QIF_SPACE is not set, client should collect the
- * space usage from OSSs by itself
- */
- if (cmd == Q_GETQUOTA &&
- !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
- !oqctl->qc_dqblk.dqb_curspace) {
- struct obd_quotactl *oqctl_tmp;
-
- oqctl_tmp = kzalloc(sizeof(*oqctl_tmp), GFP_NOFS);
- if (!oqctl_tmp) {
- rc = -ENOMEM;
- goto out;
- }
-
- oqctl_tmp->qc_cmd = Q_GETOQUOTA;
- oqctl_tmp->qc_id = oqctl->qc_id;
- oqctl_tmp->qc_type = oqctl->qc_type;
-
- /* collect space usage from OSTs */
- oqctl_tmp->qc_dqblk.dqb_curspace = 0;
- rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp);
- if (!rc || rc == -EREMOTEIO) {
- oqctl->qc_dqblk.dqb_curspace =
- oqctl_tmp->qc_dqblk.dqb_curspace;
- oqctl->qc_dqblk.dqb_valid |= QIF_SPACE;
- }
-
- /* collect space & inode usage from MDTs */
- oqctl_tmp->qc_dqblk.dqb_curspace = 0;
- oqctl_tmp->qc_dqblk.dqb_curinodes = 0;
- rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp);
- if (!rc || rc == -EREMOTEIO) {
- oqctl->qc_dqblk.dqb_curspace +=
- oqctl_tmp->qc_dqblk.dqb_curspace;
- oqctl->qc_dqblk.dqb_curinodes =
- oqctl_tmp->qc_dqblk.dqb_curinodes;
- oqctl->qc_dqblk.dqb_valid |= QIF_INODES;
- } else {
- oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE;
- }
-
- kfree(oqctl_tmp);
- }
-out:
- QCTL_COPY(qctl, oqctl);
- kfree(oqctl);
- }
-
- return rc;
-}
-
-/* This function tries to get a single name component,
- * to send to the server. No actual path traversal involved,
- * so we limit to NAME_MAX
- */
-static char *ll_getname(const char __user *filename)
-{
- int ret = 0, len;
- char *tmp;
-
- tmp = kzalloc(NAME_MAX + 1, GFP_KERNEL);
- if (!tmp)
- return ERR_PTR(-ENOMEM);
-
- len = strncpy_from_user(tmp, filename, NAME_MAX + 1);
- if (len < 0)
- ret = len;
- else if (len == 0)
- ret = -ENOENT;
- else if (len > NAME_MAX && tmp[NAME_MAX] != 0)
- ret = -ENAMETOOLONG;
-
- if (ret) {
- kfree(tmp);
- tmp = ERR_PTR(ret);
- }
- return tmp;
-}
-
-#define ll_putname(filename) kfree(filename)
-
-static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct inode *inode = file_inode(file);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct obd_ioctl_data *data;
- int rc = 0;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), cmd=%#x\n",
- PFID(ll_inode2fid(inode)), inode, cmd);
-
- /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
- if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
- return -ENOTTY;
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
- switch (cmd) {
- case FSFILT_IOC_GETFLAGS:
- case FSFILT_IOC_SETFLAGS:
- return ll_iocontrol(inode, file, cmd, arg);
- case FSFILT_IOC_GETVERSION_OLD:
- case FSFILT_IOC_GETVERSION:
- return put_user(inode->i_generation, (int __user *)arg);
- /* We need to special case any other ioctls we want to handle,
- * to send them to the MDS/OST as appropriate and to properly
- * network encode the arg field.
- case FSFILT_IOC_SETVERSION_OLD:
- case FSFILT_IOC_SETVERSION:
- */
- case LL_IOC_GET_MDTIDX: {
- int mdtidx;
-
- mdtidx = ll_get_mdt_idx(inode);
- if (mdtidx < 0)
- return mdtidx;
-
- if (put_user((int)mdtidx, (int __user *)arg))
- return -EFAULT;
-
- return 0;
- }
- case IOC_MDC_LOOKUP: {
- int namelen, len = 0;
- char *buf = NULL;
- char *filename;
-
- rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
- if (rc)
- return rc;
- data = (void *)buf;
-
- filename = data->ioc_inlbuf1;
- namelen = strlen(filename);
-
- if (namelen < 1) {
- CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
- rc = -EINVAL;
- goto out_free;
- }
-
- rc = ll_get_fid_by_name(inode, filename, namelen, NULL, NULL);
- if (rc < 0) {
- CERROR("%s: lookup %.*s failed: rc = %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0), namelen,
- filename, rc);
- goto out_free;
- }
-out_free:
- kvfree(buf);
- return rc;
- }
- case LL_IOC_LMV_SETSTRIPE: {
- struct lmv_user_md *lum;
- char *buf = NULL;
- char *filename;
- int namelen = 0;
- int lumlen = 0;
- umode_t mode;
- int len;
- int rc;
-
- rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
- if (rc)
- return rc;
-
- data = (void *)buf;
- if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
- data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) {
- rc = -EINVAL;
- goto lmv_out_free;
- }
-
- filename = data->ioc_inlbuf1;
- namelen = data->ioc_inllen1;
-
- if (namelen < 1) {
- CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
- rc = -EINVAL;
- goto lmv_out_free;
- }
- lum = (struct lmv_user_md *)data->ioc_inlbuf2;
- lumlen = data->ioc_inllen2;
-
- if (lum->lum_magic != LMV_USER_MAGIC ||
- lumlen != sizeof(*lum)) {
- CERROR("%s: wrong lum magic %x or size %d: rc = %d\n",
- filename, lum->lum_magic, lumlen, -EFAULT);
- rc = -EINVAL;
- goto lmv_out_free;
- }
-
-#if OBD_OCD_VERSION(2, 9, 50, 0) > LUSTRE_VERSION_CODE
- mode = data->ioc_type != 0 ? data->ioc_type : 0777;
-#else
- mode = data->ioc_type;
-#endif
- rc = ll_dir_setdirstripe(inode, lum, filename, mode);
-lmv_out_free:
- kvfree(buf);
- return rc;
- }
- case LL_IOC_LMV_SET_DEFAULT_STRIPE: {
- struct lmv_user_md __user *ulump;
- struct lmv_user_md lum;
- int rc;
-
- ulump = (struct lmv_user_md __user *)arg;
- if (copy_from_user(&lum, ulump, sizeof(lum)))
- return -EFAULT;
-
- if (lum.lum_magic != LMV_USER_MAGIC)
- return -EINVAL;
-
- rc = ll_dir_setstripe(inode, (struct lov_user_md *)&lum, 0);
-
- return rc;
- }
- case LL_IOC_LOV_SETSTRIPE: {
- struct lov_user_md_v3 lumv3;
- struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
- struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
- struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
-
- int set_default = 0;
-
- LASSERT(sizeof(lumv3) == sizeof(*lumv3p));
- LASSERT(sizeof(lumv3.lmm_objects[0]) ==
- sizeof(lumv3p->lmm_objects[0]));
- /* first try with v1 which is smaller than v3 */
- if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
- return -EFAULT;
-
- if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
- if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
- return -EFAULT;
- }
-
- if (is_root_inode(inode))
- set_default = 1;
-
- /* in v1 and v3 cases lumv1 points to data */
- rc = ll_dir_setstripe(inode, lumv1, set_default);
-
- return rc;
- }
- case LL_IOC_LMV_GETSTRIPE: {
- struct lmv_user_md __user *ulmv;
- struct lmv_user_md lum;
- struct ptlrpc_request *request = NULL;
- struct lmv_user_md *tmp = NULL;
- union lmv_mds_md *lmm = NULL;
- u64 valid = 0;
- int max_stripe_count;
- int stripe_count;
- int mdt_index;
- int lum_size;
- int lmmsize;
- int rc;
- int i;
-
- ulmv = (struct lmv_user_md __user *)arg;
- if (copy_from_user(&lum, ulmv, sizeof(*ulmv)))
- return -EFAULT;
-
- max_stripe_count = lum.lum_stripe_count;
- /*
- * lum_magic will indicate which stripe the ioctl will like
- * to get, LMV_MAGIC_V1 is for normal LMV stripe, LMV_USER_MAGIC
- * is for default LMV stripe
- */
- if (lum.lum_magic == LMV_MAGIC_V1)
- valid |= OBD_MD_MEA;
- else if (lum.lum_magic == LMV_USER_MAGIC)
- valid |= OBD_MD_DEFAULT_MEA;
- else
- return -EINVAL;
-
- rc = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize, &request,
- valid);
- if (rc)
- goto finish_req;
-
- /* Get default LMV EA */
- if (lum.lum_magic == LMV_USER_MAGIC) {
- if (lmmsize > sizeof(*ulmv)) {
- rc = -EINVAL;
- goto finish_req;
- }
-
- if (copy_to_user(ulmv, lmm, lmmsize))
- rc = -EFAULT;
-
- goto finish_req;
- }
-
- stripe_count = lmv_mds_md_stripe_count_get(lmm);
- if (max_stripe_count < stripe_count) {
- lum.lum_stripe_count = stripe_count;
- if (copy_to_user(ulmv, &lum, sizeof(lum))) {
- rc = -EFAULT;
- goto finish_req;
- }
- rc = -E2BIG;
- goto finish_req;
- }
-
- lum_size = lmv_user_md_size(stripe_count, LMV_MAGIC_V1);
- tmp = kzalloc(lum_size, GFP_NOFS);
- if (!tmp) {
- rc = -ENOMEM;
- goto finish_req;
- }
-
- mdt_index = ll_get_mdt_idx(inode);
- if (mdt_index < 0) {
- rc = -ENOMEM;
- goto out_tmp;
- }
- tmp->lum_magic = LMV_MAGIC_V1;
- tmp->lum_stripe_count = 0;
- tmp->lum_stripe_offset = mdt_index;
- for (i = 0; i < stripe_count; i++) {
- struct lu_fid fid;
-
- fid_le_to_cpu(&fid, &lmm->lmv_md_v1.lmv_stripe_fids[i]);
- mdt_index = ll_get_mdt_idx_by_fid(sbi, &fid);
- if (mdt_index < 0) {
- rc = mdt_index;
- goto out_tmp;
- }
- tmp->lum_objects[i].lum_mds = mdt_index;
- tmp->lum_objects[i].lum_fid = fid;
- tmp->lum_stripe_count++;
- }
-
- if (copy_to_user(ulmv, tmp, lum_size)) {
- rc = -EFAULT;
- goto out_tmp;
- }
-out_tmp:
- kfree(tmp);
-finish_req:
- ptlrpc_req_finished(request);
- return rc;
- }
-
- case LL_IOC_LOV_SWAP_LAYOUTS:
- return -EPERM;
- case IOC_OBD_STATFS:
- return ll_obd_statfs(inode, (void __user *)arg);
- case LL_IOC_LOV_GETSTRIPE:
- case LL_IOC_MDC_GETINFO:
- case IOC_MDC_GETFILEINFO:
- case IOC_MDC_GETFILESTRIPE: {
- struct ptlrpc_request *request = NULL;
- struct lov_user_md __user *lump;
- struct lov_mds_md *lmm = NULL;
- struct mdt_body *body;
- char *filename = NULL;
- int lmmsize;
-
- if (cmd == IOC_MDC_GETFILEINFO ||
- cmd == IOC_MDC_GETFILESTRIPE) {
- filename = ll_getname((const char __user *)arg);
- if (IS_ERR(filename))
- return PTR_ERR(filename);
-
- rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
- &lmmsize, &request);
- } else {
- rc = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize,
- &request, 0);
- }
-
- if (request) {
- body = req_capsule_server_get(&request->rq_pill,
- &RMF_MDT_BODY);
- LASSERT(body);
- } else {
- goto out_req;
- }
-
- if (rc < 0) {
- if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO ||
- cmd == LL_IOC_MDC_GETINFO)) {
- rc = 0;
- goto skip_lmm;
- }
-
- goto out_req;
- }
-
- if (cmd == IOC_MDC_GETFILESTRIPE ||
- cmd == LL_IOC_LOV_GETSTRIPE) {
- lump = (struct lov_user_md __user *)arg;
- } else {
- struct lov_user_mds_data __user *lmdp;
-
- lmdp = (struct lov_user_mds_data __user *)arg;
- lump = &lmdp->lmd_lmm;
- }
- if (copy_to_user(lump, lmm, lmmsize)) {
- if (copy_to_user(lump, lmm, sizeof(*lump))) {
- rc = -EFAULT;
- goto out_req;
- }
- rc = -EOVERFLOW;
- }
-skip_lmm:
- if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
- struct lov_user_mds_data __user *lmdp;
- lstat_t st = { 0 };
-
- st.st_dev = inode->i_sb->s_dev;
- st.st_mode = body->mbo_mode;
- st.st_nlink = body->mbo_nlink;
- st.st_uid = body->mbo_uid;
- st.st_gid = body->mbo_gid;
- st.st_rdev = body->mbo_rdev;
- st.st_size = body->mbo_size;
- st.st_blksize = PAGE_SIZE;
- st.st_blocks = body->mbo_blocks;
- st.st_atime = body->mbo_atime;
- st.st_mtime = body->mbo_mtime;
- st.st_ctime = body->mbo_ctime;
- st.st_ino = cl_fid_build_ino(&body->mbo_fid1,
- sbi->ll_flags &
- LL_SBI_32BIT_API);
-
- lmdp = (struct lov_user_mds_data __user *)arg;
- if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) {
- rc = -EFAULT;
- goto out_req;
- }
- }
-
-out_req:
- ptlrpc_req_finished(request);
- if (filename)
- ll_putname(filename);
- return rc;
- }
- case OBD_IOC_QUOTACTL: {
- struct if_quotactl *qctl;
-
- qctl = kzalloc(sizeof(*qctl), GFP_NOFS);
- if (!qctl)
- return -ENOMEM;
-
- if (copy_from_user(qctl, (void __user *)arg, sizeof(*qctl))) {
- rc = -EFAULT;
- goto out_quotactl;
- }
-
- rc = quotactl_ioctl(sbi, qctl);
-
- if (rc == 0 && copy_to_user((void __user *)arg, qctl,
- sizeof(*qctl)))
- rc = -EFAULT;
-
-out_quotactl:
- kfree(qctl);
- return rc;
- }
- case OBD_IOC_GETDTNAME:
- case OBD_IOC_GETMDNAME:
- return ll_get_obd_name(inode, cmd, arg);
- case LL_IOC_FLUSHCTX:
- return ll_flush_ctx(inode);
- case LL_IOC_GETOBDCOUNT: {
- int count, vallen;
- struct obd_export *exp;
-
- if (copy_from_user(&count, (int __user *)arg, sizeof(int)))
- return -EFAULT;
-
- /* get ost count when count is zero, get mdt count otherwise */
- exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
- vallen = sizeof(count);
- rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
- KEY_TGT_COUNT, &vallen, &count);
- if (rc) {
- CERROR("get target count failed: %d\n", rc);
- return rc;
- }
-
- if (copy_to_user((int __user *)arg, &count, sizeof(int)))
- return -EFAULT;
-
- return 0;
- }
- case LL_IOC_PATH2FID:
- if (copy_to_user((void __user *)arg, ll_inode2fid(inode),
- sizeof(struct lu_fid)))
- return -EFAULT;
- return 0;
- case LL_IOC_GET_CONNECT_FLAGS: {
- return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL,
- (void __user *)arg);
- }
- case OBD_IOC_CHANGELOG_SEND:
- case OBD_IOC_CHANGELOG_CLEAR:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
- sizeof(struct ioc_changelog));
- return rc;
- case OBD_IOC_FID2PATH:
- return ll_fid2path(inode, (void __user *)arg);
- case LL_IOC_GETPARENT:
- return ll_getparent(file, (void __user *)arg);
- case LL_IOC_FID2MDTIDX: {
- struct obd_export *exp = ll_i2mdexp(inode);
- struct lu_fid fid;
- __u32 index;
-
- if (copy_from_user(&fid, (const struct lu_fid __user *)arg,
- sizeof(fid)))
- return -EFAULT;
-
- /* Call mdc_iocontrol */
- rc = obd_iocontrol(LL_IOC_FID2MDTIDX, exp, sizeof(fid), &fid,
- &index);
- if (rc)
- return rc;
-
- return index;
- }
- case LL_IOC_HSM_REQUEST: {
- struct hsm_user_request *hur;
- ssize_t totalsize;
-
- hur = memdup_user((void __user *)arg, sizeof(*hur));
- if (IS_ERR(hur))
- return PTR_ERR(hur);
-
- /* Compute the whole struct size */
- totalsize = hur_len(hur);
- kfree(hur);
- if (totalsize < 0)
- return -E2BIG;
-
- /* Final size will be more than double totalsize */
- if (totalsize >= MDS_MAXREQSIZE / 3)
- return -E2BIG;
-
- hur = kzalloc(totalsize, GFP_NOFS);
- if (!hur)
- return -ENOMEM;
-
- /* Copy the whole struct */
- if (copy_from_user(hur, (void __user *)arg, totalsize)) {
- kvfree(hur);
- return -EFAULT;
- }
-
- if (hur->hur_request.hr_action == HUA_RELEASE) {
- const struct lu_fid *fid;
- struct inode *f;
- int i;
-
- for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
- fid = &hur->hur_user_item[i].hui_fid;
- f = search_inode_for_lustre(inode->i_sb, fid);
- if (IS_ERR(f)) {
- rc = PTR_ERR(f);
- break;
- }
-
- rc = ll_hsm_release(f);
- iput(f);
- if (rc != 0)
- break;
- }
- } else {
- rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
- hur, NULL);
- }
-
- kvfree(hur);
-
- return rc;
- }
- case LL_IOC_HSM_PROGRESS: {
- struct hsm_progress_kernel hpk;
- struct hsm_progress hp;
-
- if (copy_from_user(&hp, (void __user *)arg, sizeof(hp)))
- return -EFAULT;
-
- hpk.hpk_fid = hp.hp_fid;
- hpk.hpk_cookie = hp.hp_cookie;
- hpk.hpk_extent = hp.hp_extent;
- hpk.hpk_flags = hp.hp_flags;
- hpk.hpk_errval = hp.hp_errval;
- hpk.hpk_data_version = 0;
-
- /* File may not exist in Lustre; all progress
- * reported to Lustre root
- */
- rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
- NULL);
- return rc;
- }
- case LL_IOC_HSM_CT_START:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
- sizeof(struct lustre_kernelcomm));
- return rc;
-
- case LL_IOC_HSM_COPY_START: {
- struct hsm_copy *copy;
- int rc;
-
- copy = memdup_user((char __user *)arg, sizeof(*copy));
- if (IS_ERR(copy))
- return PTR_ERR(copy);
-
- rc = ll_ioc_copy_start(inode->i_sb, copy);
- if (copy_to_user((char __user *)arg, copy, sizeof(*copy)))
- rc = -EFAULT;
-
- kfree(copy);
- return rc;
- }
- case LL_IOC_HSM_COPY_END: {
- struct hsm_copy *copy;
- int rc;
-
- copy = memdup_user((char __user *)arg, sizeof(*copy));
- if (IS_ERR(copy))
- return PTR_ERR(copy);
-
- rc = ll_ioc_copy_end(inode->i_sb, copy);
- if (copy_to_user((char __user *)arg, copy, sizeof(*copy)))
- rc = -EFAULT;
-
- kfree(copy);
- return rc;
- }
- case LL_IOC_MIGRATE: {
- char *buf = NULL;
- const char *filename;
- int namelen = 0;
- int len;
- int rc;
- int mdtidx;
-
- rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
- if (rc < 0)
- return rc;
-
- data = (struct obd_ioctl_data *)buf;
- if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
- !data->ioc_inllen1 || !data->ioc_inllen2) {
- rc = -EINVAL;
- goto migrate_free;
- }
-
- filename = data->ioc_inlbuf1;
- namelen = data->ioc_inllen1;
- if (namelen < 1 || namelen != strlen(filename) + 1) {
- rc = -EINVAL;
- goto migrate_free;
- }
-
- if (data->ioc_inllen2 != sizeof(mdtidx)) {
- rc = -EINVAL;
- goto migrate_free;
- }
- mdtidx = *(int *)data->ioc_inlbuf2;
-
- rc = ll_migrate(inode, file, mdtidx, filename, namelen - 1);
-migrate_free:
- kvfree(buf);
-
- return rc;
- }
-
- default:
- return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL,
- (void __user *)arg);
- }
-}
-
-static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
-{
- struct inode *inode = file->f_mapping->host;
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int api32 = ll_need_32bit_api(sbi);
- loff_t ret = -EINVAL;
-
- switch (origin) {
- case SEEK_SET:
- break;
- case SEEK_CUR:
- offset += file->f_pos;
- break;
- case SEEK_END:
- if (offset > 0)
- goto out;
- if (api32)
- offset += LL_DIR_END_OFF_32BIT;
- else
- offset += LL_DIR_END_OFF;
- break;
- default:
- goto out;
- }
-
- if (offset >= 0 &&
- ((api32 && offset <= LL_DIR_END_OFF_32BIT) ||
- (!api32 && offset <= LL_DIR_END_OFF))) {
- if (offset != file->f_pos) {
- if ((api32 && offset == LL_DIR_END_OFF_32BIT) ||
- (!api32 && offset == LL_DIR_END_OFF))
- fd->lfd_pos = MDS_DIR_END_OFF;
- else if (api32 && sbi->ll_flags & LL_SBI_64BIT_HASH)
- fd->lfd_pos = offset << 32;
- else
- fd->lfd_pos = offset;
- file->f_pos = offset;
- }
- ret = offset;
- }
- goto out;
-
-out:
- return ret;
-}
-
-static int ll_dir_open(struct inode *inode, struct file *file)
-{
- return ll_file_open(inode, file);
-}
-
-static int ll_dir_release(struct inode *inode, struct file *file)
-{
- return ll_file_release(inode, file);
-}
-
-const struct file_operations ll_dir_operations = {
- .llseek = ll_dir_seek,
- .open = ll_dir_open,
- .release = ll_dir_release,
- .read = generic_read_dir,
- .iterate_shared = ll_readdir,
- .unlocked_ioctl = ll_dir_ioctl,
- .fsync = ll_fsync,
-};
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
deleted file mode 100644
index ca5faea13b7e..000000000000
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ /dev/null
@@ -1,3600 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/file.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-#include <lustre_dlm.h>
-#include <linux/pagemap.h>
-#include <linux/file.h>
-#include <linux/sched.h>
-#include <linux/mount.h>
-#include <uapi/linux/lustre/lustre_fiemap.h>
-#include <uapi/linux/lustre/lustre_ioctl.h>
-#include <lustre_swab.h>
-
-#include <cl_object.h>
-#include "llite_internal.h"
-
-static int
-ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
-
-static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
- bool *lease_broken);
-
-static enum llioc_iter
-ll_iocontrol_call(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg, int *rcp);
-
-static struct ll_file_data *ll_file_data_get(void)
-{
- struct ll_file_data *fd;
-
- fd = kmem_cache_zalloc(ll_file_data_slab, GFP_NOFS);
- if (!fd)
- return NULL;
- fd->fd_write_failed = false;
- return fd;
-}
-
-static void ll_file_data_put(struct ll_file_data *fd)
-{
- if (fd)
- kmem_cache_free(ll_file_data_slab, fd);
-}
-
-/**
- * Packs all the attributes into @op_data for the CLOSE rpc.
- */
-static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle *och)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
-
- ll_prep_md_op_data(op_data, inode, NULL, NULL,
- 0, 0, LUSTRE_OPC_ANY, NULL);
-
- op_data->op_attr.ia_mode = inode->i_mode;
- op_data->op_attr.ia_atime = inode->i_atime;
- op_data->op_attr.ia_mtime = inode->i_mtime;
- op_data->op_attr.ia_ctime = inode->i_ctime;
- op_data->op_attr.ia_size = i_size_read(inode);
- op_data->op_attr.ia_valid |= ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
- ATTR_MTIME | ATTR_MTIME_SET |
- ATTR_CTIME | ATTR_CTIME_SET;
- op_data->op_attr_blocks = inode->i_blocks;
- op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
- op_data->op_handle = och->och_fh;
-
- /*
- * For HSM: if inode data has been modified, pack it so that
- * MDT can set data dirty flag in the archive.
- */
- if (och->och_flags & FMODE_WRITE &&
- test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags))
- op_data->op_bias |= MDS_DATA_MODIFIED;
-}
-
-/**
- * Perform a close, possibly with a bias.
- * The meaning of "data" depends on the value of "bias".
- *
- * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version.
- * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to
- * swap layouts with.
- */
-static int ll_close_inode_openhandle(struct inode *inode,
- struct obd_client_handle *och,
- enum mds_op_bias bias,
- void *data)
-{
- const struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_export *md_exp = ll_i2mdexp(inode);
- struct md_op_data *op_data;
- struct ptlrpc_request *req = NULL;
- int rc;
-
- if (!class_exp2obd(md_exp)) {
- CERROR("%s: invalid MDC connection handle closing " DFID "\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid));
- rc = 0;
- goto out;
- }
-
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- /*
- * We leak openhandle and request here on error, but not much to be
- * done in OOM case since app won't retry close on error either.
- */
- if (!op_data) {
- rc = -ENOMEM;
- goto out;
- }
-
- ll_prepare_close(inode, op_data, och);
- switch (bias) {
- case MDS_CLOSE_LAYOUT_SWAP:
- LASSERT(data);
- op_data->op_bias |= MDS_CLOSE_LAYOUT_SWAP;
- op_data->op_data_version = 0;
- op_data->op_lease_handle = och->och_lease_handle;
- op_data->op_fid2 = *ll_inode2fid(data);
- break;
-
- case MDS_HSM_RELEASE:
- LASSERT(data);
- op_data->op_bias |= MDS_HSM_RELEASE;
- op_data->op_data_version = *(__u64 *)data;
- op_data->op_lease_handle = och->och_lease_handle;
- op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
- break;
-
- default:
- LASSERT(!data);
- break;
- }
-
- rc = md_close(md_exp, op_data, och->och_mod, &req);
- if (rc && rc != -EINTR) {
- CERROR("%s: inode " DFID " mdc close failed: rc = %d\n",
- md_exp->exp_obd->obd_name, PFID(&lli->lli_fid), rc);
- }
-
- if (op_data->op_bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP) &&
- !rc) {
- struct mdt_body *body;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
- rc = -EBUSY;
- }
-
- ll_finish_md_op_data(op_data);
-
-out:
- md_clear_open_replay_data(md_exp, och);
- och->och_fh.cookie = DEAD_HANDLE_MAGIC;
- kfree(och);
-
- ptlrpc_req_finished(req);
- return rc;
-}
-
-int ll_md_real_close(struct inode *inode, fmode_t fmode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_client_handle **och_p;
- struct obd_client_handle *och;
- __u64 *och_usecount;
- int rc = 0;
-
- if (fmode & FMODE_WRITE) {
- och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else if (fmode & FMODE_EXEC) {
- och_p = &lli->lli_mds_exec_och;
- och_usecount = &lli->lli_open_fd_exec_count;
- } else {
- LASSERT(fmode & FMODE_READ);
- och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
-
- mutex_lock(&lli->lli_och_mutex);
- if (*och_usecount > 0) {
- /* There are still users of this handle, so skip
- * freeing it.
- */
- mutex_unlock(&lli->lli_och_mutex);
- return 0;
- }
-
- och = *och_p;
- *och_p = NULL;
- mutex_unlock(&lli->lli_och_mutex);
-
- if (och) {
- /* There might be a race and this handle may already
- * be closed.
- */
- rc = ll_close_inode_openhandle(inode, och, 0, NULL);
- }
-
- return rc;
-}
-
-static int ll_md_close(struct inode *inode, struct file *file)
-{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_inode_info *lli = ll_i2info(inode);
- int lockmode;
- __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
- struct lustre_handle lockh;
- union ldlm_policy_data policy = {
- .l_inodebits = { MDS_INODELOCK_OPEN }
- };
- int rc = 0;
-
- /* clear group lock, if present */
- if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
- ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
-
- if (fd->fd_lease_och) {
- bool lease_broken;
-
- /* Usually the lease is not released when the
- * application crashed, we need to release here.
- */
- rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken);
- CDEBUG(rc ? D_ERROR : D_INODE,
- "Clean up lease " DFID " %d/%d\n",
- PFID(&lli->lli_fid), rc, lease_broken);
-
- fd->fd_lease_och = NULL;
- }
-
- if (fd->fd_och) {
- rc = ll_close_inode_openhandle(inode, fd->fd_och, 0, NULL);
- fd->fd_och = NULL;
- goto out;
- }
-
- /* Let's see if we have good enough OPEN lock on the file and if
- * we can skip talking to MDS
- */
-
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_omode & FMODE_WRITE) {
- lockmode = LCK_CW;
- LASSERT(lli->lli_open_fd_write_count);
- lli->lli_open_fd_write_count--;
- } else if (fd->fd_omode & FMODE_EXEC) {
- lockmode = LCK_PR;
- LASSERT(lli->lli_open_fd_exec_count);
- lli->lli_open_fd_exec_count--;
- } else {
- lockmode = LCK_CR;
- LASSERT(lli->lli_open_fd_read_count);
- lli->lli_open_fd_read_count--;
- }
- mutex_unlock(&lli->lli_och_mutex);
-
- if (!md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode),
- LDLM_IBITS, &policy, lockmode, &lockh))
- rc = ll_md_real_close(inode, fd->fd_omode);
-
-out:
- LUSTRE_FPRIVATE(file) = NULL;
- ll_file_data_put(fd);
-
- return rc;
-}
-
-/* While this returns an error code, fput() the caller does not, so we need
- * to make every effort to clean up all of our state here. Also, applications
- * rarely check close errors and even if an error is returned they will not
- * re-try the close call.
- */
-int ll_file_release(struct inode *inode, struct file *file)
-{
- struct ll_file_data *fd;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
- PFID(ll_inode2fid(inode)), inode);
-
- if (!is_root_inode(inode))
- ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
- fd = LUSTRE_FPRIVATE(file);
- LASSERT(fd);
-
- /* The last ref on @file, maybe not be the owner pid of statahead,
- * because parent and child process can share the same file handle.
- */
- if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd)
- ll_deauthorize_statahead(inode, fd);
-
- if (is_root_inode(inode)) {
- LUSTRE_FPRIVATE(file) = NULL;
- ll_file_data_put(fd);
- return 0;
- }
-
- if (!S_ISDIR(inode->i_mode)) {
- if (lli->lli_clob)
- lov_read_and_clear_async_rc(lli->lli_clob);
- lli->lli_async_rc = 0;
- }
-
- rc = ll_md_close(inode, file);
-
- if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
- libcfs_debug_dumplog();
-
- return rc;
-}
-
-static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
- struct lookup_intent *itp)
-{
- struct inode *inode = d_inode(de);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct dentry *parent = de->d_parent;
- const char *name = NULL;
- struct md_op_data *op_data;
- struct ptlrpc_request *req = NULL;
- int len = 0, rc;
-
- LASSERT(parent);
- LASSERT(itp->it_flags & MDS_OPEN_BY_FID);
-
- /*
- * if server supports open-by-fid, or file name is invalid, don't pack
- * name in open request
- */
- if (!(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID) &&
- lu_name_is_valid_2(de->d_name.name, de->d_name.len)) {
- name = de->d_name.name;
- len = de->d_name.len;
- }
-
- op_data = ll_prep_md_op_data(NULL, d_inode(parent), inode, name, len,
- O_RDWR, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
- op_data->op_data = lmm;
- op_data->op_data_size = lmmsize;
-
- rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
- &ll_md_blocking_ast, 0);
- ll_finish_md_op_data(op_data);
- if (rc == -ESTALE) {
- /* reason for keep own exit path - don`t flood log
- * with messages with -ESTALE errors.
- */
- if (!it_disposition(itp, DISP_OPEN_OPEN) ||
- it_open_error(DISP_OPEN_OPEN, itp))
- goto out;
- ll_release_openhandle(inode, itp);
- goto out;
- }
-
- if (it_disposition(itp, DISP_LOOKUP_NEG)) {
- rc = -ENOENT;
- goto out;
- }
-
- if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
- rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, itp);
- CDEBUG(D_VFSTRACE, "lock enqueue: err: %d\n", rc);
- goto out;
- }
-
- rc = ll_prep_inode(&inode, req, NULL, itp);
- if (!rc && itp->it_lock_mode)
- ll_set_lock_data(sbi->ll_md_exp, inode, itp, NULL);
-
-out:
- ptlrpc_req_finished(req);
- ll_intent_drop_lock(itp);
-
- /*
- * We did open by fid, but by the time we got to the server,
- * the object disappeared. If this is a create, we cannot really
- * tell the userspace that the file it was trying to create
- * does not exist. Instead let's return -ESTALE, and the VFS will
- * retry the create with LOOKUP_REVAL that we are going to catch
- * in ll_revalidate_dentry() and use lookup then.
- */
- if (rc == -ENOENT && itp->it_op & IT_CREAT)
- rc = -ESTALE;
-
- return rc;
-}
-
-static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
- struct obd_client_handle *och)
-{
- struct mdt_body *body;
-
- body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
- och->och_fh = body->mbo_handle;
- och->och_fid = body->mbo_fid1;
- och->och_lease_handle.cookie = it->it_lock_handle;
- och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
- och->och_flags = it->it_flags;
-
- return md_set_open_replay_data(md_exp, och, it);
-}
-
-static int ll_local_open(struct file *file, struct lookup_intent *it,
- struct ll_file_data *fd, struct obd_client_handle *och)
-{
- struct inode *inode = file_inode(file);
-
- LASSERT(!LUSTRE_FPRIVATE(file));
-
- LASSERT(fd);
-
- if (och) {
- int rc;
-
- rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
- if (rc != 0)
- return rc;
- }
-
- LUSTRE_FPRIVATE(file) = fd;
- ll_readahead_init(inode, &fd->fd_ras);
- fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
-
- /* ll_cl_context initialize */
- rwlock_init(&fd->fd_lock);
- INIT_LIST_HEAD(&fd->fd_lccs);
-
- return 0;
-}
-
-/* Open a file, and (for the very first open) create objects on the OSTs at
- * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
- * creation or open until ll_lov_setstripe() ioctl is called.
- *
- * If we already have the stripe MD locally then we don't request it in
- * md_open(), by passing a lmm_size = 0.
- *
- * It is up to the application to ensure no other processes open this file
- * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
- * used. We might be able to avoid races of that sort by getting lli_open_sem
- * before returning in the O_LOV_DELAY_CREATE case and dropping it here
- * or in ll_file_release(), but I'm not sure that is desirable/necessary.
- */
-int ll_file_open(struct inode *inode, struct file *file)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lookup_intent *it, oit = { .it_op = IT_OPEN,
- .it_flags = file->f_flags };
- struct obd_client_handle **och_p = NULL;
- __u64 *och_usecount = NULL;
- struct ll_file_data *fd;
- int rc = 0;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), flags %o\n",
- PFID(ll_inode2fid(inode)), inode, file->f_flags);
-
- it = file->private_data; /* XXX: compat macro */
- file->private_data = NULL; /* prevent ll_local_open assertion */
-
- fd = ll_file_data_get();
- if (!fd) {
- rc = -ENOMEM;
- goto out_openerr;
- }
-
- fd->fd_file = file;
- if (S_ISDIR(inode->i_mode))
- ll_authorize_statahead(inode, fd);
-
- if (is_root_inode(inode)) {
- LUSTRE_FPRIVATE(file) = fd;
- return 0;
- }
-
- if (!it || !it->it_disposition) {
- /* Convert f_flags into access mode. We cannot use file->f_mode,
- * because everything but O_ACCMODE mask was stripped from
- * there
- */
- if ((oit.it_flags + 1) & O_ACCMODE)
- oit.it_flags++;
- if (file->f_flags & O_TRUNC)
- oit.it_flags |= FMODE_WRITE;
-
- /* kernel only call f_op->open in dentry_open. filp_open calls
- * dentry_open after call to open_namei that checks permissions.
- * Only nfsd_open call dentry_open directly without checking
- * permissions and because of that this code below is safe.
- */
- if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
- oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
-
- /* We do not want O_EXCL here, presumably we opened the file
- * already? XXX - NFS implications?
- */
- oit.it_flags &= ~O_EXCL;
-
- /* bug20584, if "it_flags" contains O_CREAT, the file will be
- * created if necessary, then "IT_CREAT" should be set to keep
- * consistent with it
- */
- if (oit.it_flags & O_CREAT)
- oit.it_op |= IT_CREAT;
-
- it = &oit;
- }
-
-restart:
- /* Let's see if we have file open on MDS already. */
- if (it->it_flags & FMODE_WRITE) {
- och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else if (it->it_flags & FMODE_EXEC) {
- och_p = &lli->lli_mds_exec_och;
- och_usecount = &lli->lli_open_fd_exec_count;
- } else {
- och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
-
- mutex_lock(&lli->lli_och_mutex);
- if (*och_p) { /* Open handle is present */
- if (it_disposition(it, DISP_OPEN_OPEN)) {
- /* Well, there's extra open request that we do not need,
- * let's close it somehow. This will decref request.
- */
- rc = it_open_error(DISP_OPEN_OPEN, it);
- if (rc) {
- mutex_unlock(&lli->lli_och_mutex);
- goto out_openerr;
- }
-
- ll_release_openhandle(inode, it);
- }
- (*och_usecount)++;
-
- rc = ll_local_open(file, it, fd, NULL);
- if (rc) {
- (*och_usecount)--;
- mutex_unlock(&lli->lli_och_mutex);
- goto out_openerr;
- }
- } else {
- LASSERT(*och_usecount == 0);
- if (!it->it_disposition) {
- /* We cannot just request lock handle now, new ELC code
- * means that one of other OPEN locks for this file
- * could be cancelled, and since blocking ast handler
- * would attempt to grab och_mutex as well, that would
- * result in a deadlock
- */
- mutex_unlock(&lli->lli_och_mutex);
- /*
- * Normally called under two situations:
- * 1. NFS export.
- * 2. revalidate with IT_OPEN (revalidate doesn't
- * execute this intent any more).
- *
- * Always fetch MDS_OPEN_LOCK if this is not setstripe.
- *
- * Always specify MDS_OPEN_BY_FID because we don't want
- * to get file with different fid.
- */
- it->it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID;
- rc = ll_intent_file_open(file->f_path.dentry,
- NULL, 0, it);
- if (rc)
- goto out_openerr;
-
- goto restart;
- }
- *och_p = kzalloc(sizeof(struct obd_client_handle), GFP_NOFS);
- if (!*och_p) {
- rc = -ENOMEM;
- goto out_och_free;
- }
-
- (*och_usecount)++;
-
- /* md_intent_lock() didn't get a request ref if there was an
- * open error, so don't do cleanup on the request here
- * (bug 3430)
- */
- /* XXX (green): Should not we bail out on any error here, not
- * just open error?
- */
- rc = it_open_error(DISP_OPEN_OPEN, it);
- if (rc)
- goto out_och_free;
-
- LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
- "inode %p: disposition %x, status %d\n", inode,
- it_disposition(it, ~0), it->it_status);
-
- rc = ll_local_open(file, it, fd, *och_p);
- if (rc)
- goto out_och_free;
- }
- mutex_unlock(&lli->lli_och_mutex);
- fd = NULL;
-
- /* Must do this outside lli_och_mutex lock to prevent deadlock where
- * different kind of OPEN lock for this same inode gets cancelled
- * by ldlm_cancel_lru
- */
- if (!S_ISREG(inode->i_mode))
- goto out_och_free;
-
- cl_lov_delay_create_clear(&file->f_flags);
- goto out_och_free;
-
-out_och_free:
- if (rc) {
- if (och_p && *och_p) {
- kfree(*och_p);
- *och_p = NULL;
- (*och_usecount)--;
- }
- mutex_unlock(&lli->lli_och_mutex);
-
-out_openerr:
- if (lli->lli_opendir_key == fd)
- ll_deauthorize_statahead(inode, fd);
- if (fd)
- ll_file_data_put(fd);
- } else {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
- }
-
- if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
- ptlrpc_req_finished(it->it_request);
- it_clear_disposition(it, DISP_ENQ_OPEN_REF);
- }
-
- return rc;
-}
-
-static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
- struct ldlm_lock_desc *desc,
- void *data, int flag)
-{
- int rc;
- struct lustre_handle lockh;
-
- switch (flag) {
- case LDLM_CB_BLOCKING:
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
- if (rc < 0) {
- CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
- return rc;
- }
- break;
- case LDLM_CB_CANCELING:
- /* do nothing */
- break;
- }
- return 0;
-}
-
-/**
- * Acquire a lease and open the file.
- */
-static struct obd_client_handle *
-ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
- __u64 open_flags)
-{
- struct lookup_intent it = { .it_op = IT_OPEN };
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct md_op_data *op_data;
- struct ptlrpc_request *req = NULL;
- struct lustre_handle old_handle = { 0 };
- struct obd_client_handle *och = NULL;
- int rc;
- int rc2;
-
- if (fmode != FMODE_WRITE && fmode != FMODE_READ)
- return ERR_PTR(-EINVAL);
-
- if (file) {
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct obd_client_handle **och_p;
- __u64 *och_usecount;
-
- if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
- return ERR_PTR(-EPERM);
-
- /* Get the openhandle of the file */
- rc = -EBUSY;
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och) {
- mutex_unlock(&lli->lli_och_mutex);
- return ERR_PTR(rc);
- }
-
- if (!fd->fd_och) {
- if (file->f_mode & FMODE_WRITE) {
- LASSERT(lli->lli_mds_write_och);
- och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else {
- LASSERT(lli->lli_mds_read_och);
- och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
- if (*och_usecount == 1) {
- fd->fd_och = *och_p;
- *och_p = NULL;
- *och_usecount = 0;
- rc = 0;
- }
- }
- mutex_unlock(&lli->lli_och_mutex);
- if (rc < 0) /* more than 1 opener */
- return ERR_PTR(rc);
-
- LASSERT(fd->fd_och);
- old_handle = fd->fd_och->och_fh;
- }
-
- och = kzalloc(sizeof(*och), GFP_NOFS);
- if (!och)
- return ERR_PTR(-ENOMEM);
-
- op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data)) {
- rc = PTR_ERR(op_data);
- goto out;
- }
-
- /* To tell the MDT this openhandle is from the same owner */
- op_data->op_handle = old_handle;
-
- it.it_flags = fmode | open_flags;
- it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
- rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
- &ll_md_blocking_lease_ast,
- /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
- * it can be cancelled which may mislead applications that the lease is
- * broken;
- * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
- * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
- * doesn't deal with openhandle, so normal openhandle will be leaked.
- */
- LDLM_FL_NO_LRU | LDLM_FL_EXCL);
- ll_finish_md_op_data(op_data);
- ptlrpc_req_finished(req);
- if (rc < 0)
- goto out_release_it;
-
- if (it_disposition(&it, DISP_LOOKUP_NEG)) {
- rc = -ENOENT;
- goto out_release_it;
- }
-
- rc = it_open_error(DISP_OPEN_OPEN, &it);
- if (rc)
- goto out_release_it;
-
- LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
- ll_och_fill(sbi->ll_md_exp, &it, och);
-
- if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */ {
- rc = -EOPNOTSUPP;
- goto out_close;
- }
-
- /* already get lease, handle lease lock */
- ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- if (it.it_lock_mode == 0 ||
- it.it_lock_bits != MDS_INODELOCK_OPEN) {
- /* open lock must return for lease */
- CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
- PFID(ll_inode2fid(inode)), it.it_lock_mode,
- it.it_lock_bits);
- rc = -EPROTO;
- goto out_close;
- }
-
- ll_intent_release(&it);
- return och;
-
-out_close:
- /* Cancel open lock */
- if (it.it_lock_mode != 0) {
- ldlm_lock_decref_and_cancel(&och->och_lease_handle,
- it.it_lock_mode);
- it.it_lock_mode = 0;
- och->och_lease_handle.cookie = 0ULL;
- }
- rc2 = ll_close_inode_openhandle(inode, och, 0, NULL);
- if (rc2 < 0)
- CERROR("%s: error closing file " DFID ": %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&ll_i2info(inode)->lli_fid), rc2);
- och = NULL; /* och has been freed in ll_close_inode_openhandle() */
-out_release_it:
- ll_intent_release(&it);
-out:
- kfree(och);
- return ERR_PTR(rc);
-}
-
-/**
- * Check whether a layout swap can be done between two inodes.
- *
- * \param[in] inode1 First inode to check
- * \param[in] inode2 Second inode to check
- *
- * \retval 0 on success, layout swap can be performed between both inodes
- * \retval negative error code if requirements are not met
- */
-static int ll_check_swap_layouts_validity(struct inode *inode1,
- struct inode *inode2)
-{
- if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
- return -EINVAL;
-
- if (inode_permission(inode1, MAY_WRITE) ||
- inode_permission(inode2, MAY_WRITE))
- return -EPERM;
-
- if (inode1->i_sb != inode2->i_sb)
- return -EXDEV;
-
- return 0;
-}
-
-static int ll_swap_layouts_close(struct obd_client_handle *och,
- struct inode *inode, struct inode *inode2)
-{
- const struct lu_fid *fid1 = ll_inode2fid(inode);
- const struct lu_fid *fid2;
- int rc;
-
- CDEBUG(D_INODE, "%s: biased close of file " DFID "\n",
- ll_get_fsname(inode->i_sb, NULL, 0), PFID(fid1));
-
- rc = ll_check_swap_layouts_validity(inode, inode2);
- if (rc < 0)
- goto out_free_och;
-
- /* We now know that inode2 is a lustre inode */
- fid2 = ll_inode2fid(inode2);
-
- rc = lu_fid_cmp(fid1, fid2);
- if (!rc) {
- rc = -EINVAL;
- goto out_free_och;
- }
-
- /*
- * Close the file and swap layouts between inode & inode2.
- * NB: lease lock handle is released in mdc_close_layout_swap_pack()
- * because we still need it to pack l_remote_handle to MDT.
- */
- rc = ll_close_inode_openhandle(inode, och, MDS_CLOSE_LAYOUT_SWAP,
- inode2);
-
- och = NULL; /* freed in ll_close_inode_openhandle() */
-
-out_free_och:
- kfree(och);
- return rc;
-}
-
-/**
- * Release lease and close the file.
- * It will check if the lease has ever broken.
- */
-static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
- bool *lease_broken)
-{
- struct ldlm_lock *lock;
- bool cancelled = true;
-
- lock = ldlm_handle2lock(&och->och_lease_handle);
- if (lock) {
- lock_res_and_lock(lock);
- cancelled = ldlm_is_cancel(lock);
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- }
-
- CDEBUG(D_INODE, "lease for " DFID " broken? %d\n",
- PFID(&ll_i2info(inode)->lli_fid), cancelled);
-
- if (!cancelled)
- ldlm_cli_cancel(&och->och_lease_handle, 0);
- if (lease_broken)
- *lease_broken = cancelled;
-
- return ll_close_inode_openhandle(inode, och, 0, NULL);
-}
-
-int ll_merge_attr(const struct lu_env *env, struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct cl_object *obj = lli->lli_clob;
- struct cl_attr *attr = vvp_env_thread_attr(env);
- s64 atime;
- s64 mtime;
- s64 ctime;
- int rc = 0;
-
- ll_inode_size_lock(inode);
-
- /* merge timestamps the most recently obtained from mds with
- * timestamps obtained from osts
- */
- LTIME_S(inode->i_atime) = lli->lli_atime;
- LTIME_S(inode->i_mtime) = lli->lli_mtime;
- LTIME_S(inode->i_ctime) = lli->lli_ctime;
-
- mtime = LTIME_S(inode->i_mtime);
- atime = LTIME_S(inode->i_atime);
- ctime = LTIME_S(inode->i_ctime);
-
- cl_object_attr_lock(obj);
- rc = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
-
- if (rc != 0)
- goto out_size_unlock;
-
- if (atime < attr->cat_atime)
- atime = attr->cat_atime;
-
- if (ctime < attr->cat_ctime)
- ctime = attr->cat_ctime;
-
- if (mtime < attr->cat_mtime)
- mtime = attr->cat_mtime;
-
- CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
- PFID(&lli->lli_fid), attr->cat_size);
-
- i_size_write(inode, attr->cat_size);
-
- inode->i_blocks = attr->cat_blocks;
-
- LTIME_S(inode->i_mtime) = mtime;
- LTIME_S(inode->i_atime) = atime;
- LTIME_S(inode->i_ctime) = ctime;
-
-out_size_unlock:
- ll_inode_size_unlock(inode);
-
- return rc;
-}
-
-static bool file_is_noatime(const struct file *file)
-{
- const struct vfsmount *mnt = file->f_path.mnt;
- const struct inode *inode = file_inode(file);
-
- /* Adapted from file_accessed() and touch_atime().*/
- if (file->f_flags & O_NOATIME)
- return true;
-
- if (inode->i_flags & S_NOATIME)
- return true;
-
- if (IS_NOATIME(inode))
- return true;
-
- if (mnt->mnt_flags & (MNT_NOATIME | MNT_READONLY))
- return true;
-
- if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
- return true;
-
- if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
- return true;
-
- return false;
-}
-
-static void ll_io_init(struct cl_io *io, const struct file *file, int write)
-{
- struct inode *inode = file_inode(file);
-
- io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
- if (write) {
- io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
- io->u.ci_wr.wr_sync = file->f_flags & O_SYNC ||
- file->f_flags & O_DIRECT ||
- IS_SYNC(inode);
- }
- io->ci_obj = ll_i2info(inode)->lli_clob;
- io->ci_lockreq = CILR_MAYBE;
- if (ll_file_nolock(file)) {
- io->ci_lockreq = CILR_NEVER;
- io->ci_no_srvlock = 1;
- } else if (file->f_flags & O_APPEND) {
- io->ci_lockreq = CILR_MANDATORY;
- }
-
- io->ci_noatime = file_is_noatime(file);
-}
-
-static ssize_t
-ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
- struct file *file, enum cl_io_type iot,
- loff_t *ppos, size_t count)
-{
- struct ll_inode_info *lli = ll_i2info(file_inode(file));
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct vvp_io *vio = vvp_env_io(env);
- struct range_lock range;
- struct cl_io *io;
- ssize_t result = 0;
- int rc = 0;
-
- CDEBUG(D_VFSTRACE, "file: %pD, type: %d ppos: %llu, count: %zu\n",
- file, iot, *ppos, count);
-
-restart:
- io = vvp_env_thread_io(env);
- ll_io_init(io, file, iot == CIT_WRITE);
-
- if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
- struct vvp_io *vio = vvp_env_io(env);
- bool range_locked = false;
-
- if (file->f_flags & O_APPEND)
- range_lock_init(&range, 0, LUSTRE_EOF);
- else
- range_lock_init(&range, *ppos, *ppos + count - 1);
-
- vio->vui_fd = LUSTRE_FPRIVATE(file);
- vio->vui_iter = args->u.normal.via_iter;
- vio->vui_iocb = args->u.normal.via_iocb;
- /*
- * Direct IO reads must also take range lock,
- * or multiple reads will try to work on the same pages
- * See LU-6227 for details.
- */
- if (((iot == CIT_WRITE) ||
- (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
- !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n",
- range.rl_node.in_extent.start,
- range.rl_node.in_extent.end);
- rc = range_lock(&lli->lli_write_tree, &range);
- if (rc < 0)
- goto out;
-
- range_locked = true;
- }
- ll_cl_add(file, env, io);
- rc = cl_io_loop(env, io);
- ll_cl_remove(file, env);
- if (range_locked) {
- CDEBUG(D_VFSTRACE, "Range unlock [%llu, %llu]\n",
- range.rl_node.in_extent.start,
- range.rl_node.in_extent.end);
- range_unlock(&lli->lli_write_tree, &range);
- }
- } else {
- /* cl_io_rw_init() handled IO */
- rc = io->ci_result;
- }
-
- if (io->ci_nob > 0) {
- result = io->ci_nob;
- count -= io->ci_nob;
- *ppos = io->u.ci_wr.wr.crw_pos;
-
- /* prepare IO restart */
- if (count > 0)
- args->u.normal.via_iter = vio->vui_iter;
- }
-out:
- cl_io_fini(env, io);
-
- if ((!rc || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
- CDEBUG(D_VFSTRACE,
- "%s: restart %s from %lld, count:%zu, result: %zd\n",
- file_dentry(file)->d_name.name,
- iot == CIT_READ ? "read" : "write",
- *ppos, count, result);
- goto restart;
- }
-
- if (iot == CIT_READ) {
- if (result >= 0)
- ll_stats_ops_tally(ll_i2sbi(file_inode(file)),
- LPROC_LL_READ_BYTES, result);
- } else if (iot == CIT_WRITE) {
- if (result >= 0) {
- ll_stats_ops_tally(ll_i2sbi(file_inode(file)),
- LPROC_LL_WRITE_BYTES, result);
- fd->fd_write_failed = false;
- } else if (!result && !rc) {
- rc = io->ci_result;
- if (rc < 0)
- fd->fd_write_failed = true;
- else
- fd->fd_write_failed = false;
- } else if (rc != -ERESTARTSYS) {
- fd->fd_write_failed = true;
- }
- }
- CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
-
- return result > 0 ? result : rc;
-}
-
-static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
-{
- struct lu_env *env;
- struct vvp_io_args *args;
- ssize_t result;
- u16 refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- args = ll_env_args(env);
- args->u.normal.via_iter = to;
- args->u.normal.via_iocb = iocb;
-
- result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
- &iocb->ki_pos, iov_iter_count(to));
- cl_env_put(env, &refcheck);
- return result;
-}
-
-/*
- * Write to a file (through the page cache).
- */
-static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct lu_env *env;
- struct vvp_io_args *args;
- ssize_t result;
- u16 refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- args = ll_env_args(env);
- args->u.normal.via_iter = from;
- args->u.normal.via_iocb = iocb;
-
- result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
- &iocb->ki_pos, iov_iter_count(from));
- cl_env_put(env, &refcheck);
- return result;
-}
-
-int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
- __u64 flags, struct lov_user_md *lum,
- int lum_size)
-{
- struct lookup_intent oit = {
- .it_op = IT_OPEN,
- .it_flags = flags | MDS_OPEN_BY_FID,
- };
- int rc = 0;
-
- ll_inode_size_lock(inode);
- rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
- if (rc < 0)
- goto out_unlock;
-
- ll_release_openhandle(inode, &oit);
-
-out_unlock:
- ll_inode_size_unlock(inode);
- ll_intent_release(&oit);
- return rc;
-}
-
-int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
- struct lov_mds_md **lmmp, int *lmm_size,
- struct ptlrpc_request **request)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct mdt_body *body;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *req = NULL;
- struct md_op_data *op_data;
- int rc, lmmsize;
-
- rc = ll_get_default_mdsize(sbi, &lmmsize);
- if (rc)
- return rc;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
- strlen(filename), lmmsize,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
- rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr_name failed on %s: rc %d\n",
- filename, rc);
- goto out;
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-
- lmmsize = body->mbo_eadatasize;
-
- if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
- lmmsize == 0) {
- rc = -ENODATA;
- goto out;
- }
-
- lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
-
- if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) &&
- (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) {
- rc = -EPROTO;
- goto out;
- }
-
- /*
- * This is coming from the MDS, so is probably in
- * little endian. We convert it to host endian before
- * passing it to userspace.
- */
- if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
- int stripe_count;
-
- stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
- if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
- stripe_count = 0;
-
- /* if function called for directory - we should
- * avoid swab not existent lsm objects
- */
- if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
- lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
- if (S_ISREG(body->mbo_mode))
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v1 *)lmm)->lmm_objects,
- stripe_count);
- } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
- lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
- if (S_ISREG(body->mbo_mode))
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v3 *)lmm)->lmm_objects,
- stripe_count);
- }
- }
-
-out:
- *lmmp = lmm;
- *lmm_size = lmmsize;
- *request = req;
- return rc;
-}
-
-static int ll_lov_setea(struct inode *inode, struct file *file,
- unsigned long arg)
-{
- __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
- struct lov_user_md *lump;
- int lum_size = sizeof(struct lov_user_md) +
- sizeof(struct lov_user_ost_data);
- int rc;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- lump = kzalloc(lum_size, GFP_NOFS);
- if (!lump)
- return -ENOMEM;
-
- if (copy_from_user(lump, (struct lov_user_md __user *)arg, lum_size)) {
- kvfree(lump);
- return -EFAULT;
- }
-
- rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, lump,
- lum_size);
- cl_lov_delay_create_clear(&file->f_flags);
-
- kvfree(lump);
- return rc;
-}
-
-static int ll_file_getstripe(struct inode *inode,
- struct lov_user_md __user *lum)
-{
- struct lu_env *env;
- u16 refcheck;
- int rc;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum);
- cl_env_put(env, &refcheck);
- return rc;
-}
-
-static int ll_lov_setstripe(struct inode *inode, struct file *file,
- unsigned long arg)
-{
- struct lov_user_md __user *lum = (struct lov_user_md __user *)arg;
- struct lov_user_md *klum;
- int lum_size, rc;
- __u64 flags = FMODE_WRITE;
-
- rc = ll_copy_user_md(lum, &klum);
- if (rc < 0)
- return rc;
-
- lum_size = rc;
- rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, klum,
- lum_size);
- cl_lov_delay_create_clear(&file->f_flags);
- if (rc == 0) {
- __u32 gen;
-
- put_user(0, &lum->lmm_stripe_count);
-
- ll_layout_refresh(inode, &gen);
- rc = ll_file_getstripe(inode, (struct lov_user_md __user *)arg);
- }
-
- kfree(klum);
- return rc;
-}
-
-static int
-ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_grouplock grouplock;
- int rc;
-
- if (arg == 0) {
- CWARN("group id for group lock must not be 0\n");
- return -EINVAL;
- }
-
- if (ll_file_nolock(file))
- return -EOPNOTSUPP;
-
- spin_lock(&lli->lli_lock);
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- CWARN("group lock already existed with gid %lu\n",
- fd->fd_grouplock.lg_gid);
- spin_unlock(&lli->lli_lock);
- return -EINVAL;
- }
- LASSERT(!fd->fd_grouplock.lg_lock);
- spin_unlock(&lli->lli_lock);
-
- rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
- arg, (file->f_flags & O_NONBLOCK), &grouplock);
- if (rc)
- return rc;
-
- spin_lock(&lli->lli_lock);
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- spin_unlock(&lli->lli_lock);
- CERROR("another thread just won the race\n");
- cl_put_grouplock(&grouplock);
- return -EINVAL;
- }
-
- fd->fd_flags |= LL_FILE_GROUP_LOCKED;
- fd->fd_grouplock = grouplock;
- spin_unlock(&lli->lli_lock);
-
- CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
- return 0;
-}
-
-static int ll_put_grouplock(struct inode *inode, struct file *file,
- unsigned long arg)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_grouplock grouplock;
-
- spin_lock(&lli->lli_lock);
- if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- spin_unlock(&lli->lli_lock);
- CWARN("no group lock held\n");
- return -EINVAL;
- }
- LASSERT(fd->fd_grouplock.lg_lock);
-
- if (fd->fd_grouplock.lg_gid != arg) {
- CWARN("group lock %lu doesn't match current id %lu\n",
- arg, fd->fd_grouplock.lg_gid);
- spin_unlock(&lli->lli_lock);
- return -EINVAL;
- }
-
- grouplock = fd->fd_grouplock;
- memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
- fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
- spin_unlock(&lli->lli_lock);
-
- cl_put_grouplock(&grouplock);
- CDEBUG(D_INFO, "group lock %lu released\n", arg);
- return 0;
-}
-
-/**
- * Close inode open handle
- *
- * \param inode [in] inode in question
- * \param it [in,out] intent which contains open info and result
- *
- * \retval 0 success
- * \retval <0 failure
- */
-int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
-{
- struct obd_client_handle *och;
- int rc;
-
- LASSERT(inode);
-
- /* Root ? Do nothing. */
- if (is_root_inode(inode))
- return 0;
-
- /* No open handle to close? Move away */
- if (!it_disposition(it, DISP_OPEN_OPEN))
- return 0;
-
- LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
-
- och = kzalloc(sizeof(*och), GFP_NOFS);
- if (!och) {
- rc = -ENOMEM;
- goto out;
- }
-
- ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
-
- rc = ll_close_inode_openhandle(inode, och, 0, NULL);
-out:
- /* this one is in place of ll_file_open */
- if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
- ptlrpc_req_finished(it->it_request);
- it_clear_disposition(it, DISP_ENQ_OPEN_REF);
- }
- return rc;
-}
-
-/**
- * Get size for inode for which FIEMAP mapping is requested.
- * Make the FIEMAP get_info call and returns the result.
- *
- * \param fiemap kernel buffer to hold extens
- * \param num_bytes kernel buffer size
- */
-static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
- size_t num_bytes)
-{
- struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
- struct lu_env *env;
- u16 refcheck;
- int rc = 0;
-
- /* Checks for fiemap flags */
- if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
- fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
- return -EBADR;
- }
-
- /* Check for FIEMAP_FLAG_SYNC */
- if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) {
- rc = filemap_fdatawrite(inode->i_mapping);
- if (rc)
- return rc;
- }
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- if (i_size_read(inode) == 0) {
- rc = ll_glimpse_size(inode);
- if (rc)
- goto out;
- }
-
- fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
- obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE);
- obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid);
-
- /* If filesize is 0, then there would be no objects for mapping */
- if (fmkey.lfik_oa.o_size == 0) {
- fiemap->fm_mapped_extents = 0;
- rc = 0;
- goto out;
- }
-
- memcpy(&fmkey.lfik_fiemap, fiemap, sizeof(*fiemap));
-
- rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob,
- &fmkey, fiemap, &num_bytes);
-out:
- cl_env_put(env, &refcheck);
- return rc;
-}
-
-int ll_fid2path(struct inode *inode, void __user *arg)
-{
- struct obd_export *exp = ll_i2mdexp(inode);
- const struct getinfo_fid2path __user *gfin = arg;
- struct getinfo_fid2path *gfout;
- u32 pathlen;
- size_t outsize;
- int rc;
-
- if (!capable(CAP_DAC_READ_SEARCH) &&
- !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
- return -EPERM;
-
- /* Only need to get the buflen */
- if (get_user(pathlen, &gfin->gf_pathlen))
- return -EFAULT;
-
- if (pathlen > PATH_MAX)
- return -EINVAL;
-
- outsize = sizeof(*gfout) + pathlen;
-
- gfout = kzalloc(outsize, GFP_NOFS);
- if (!gfout)
- return -ENOMEM;
-
- if (copy_from_user(gfout, arg, sizeof(*gfout))) {
- rc = -EFAULT;
- goto gf_free;
- }
-
- /* Call mdc_iocontrol */
- rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
- if (rc != 0)
- goto gf_free;
-
- if (copy_to_user(arg, gfout, outsize))
- rc = -EFAULT;
-
-gf_free:
- kfree(gfout);
- return rc;
-}
-
-/*
- * Read the data_version for inode.
- *
- * This value is computed using stripe object version on OST.
- * Version is computed using server side locking.
- *
- * @param flags if do sync on the OST side;
- * 0: no sync
- * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
- * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
- */
-int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
-{
- struct cl_object *obj = ll_i2info(inode)->lli_clob;
- struct lu_env *env;
- struct cl_io *io;
- u16 refcheck;
- int result;
-
- /* If no file object initialized, we consider its version is 0. */
- if (!obj) {
- *data_version = 0;
- return 0;
- }
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = vvp_env_thread_io(env);
- io->ci_obj = obj;
- io->u.ci_data_version.dv_data_version = 0;
- io->u.ci_data_version.dv_flags = flags;
-
-restart:
- if (!cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj))
- result = cl_io_loop(env, io);
- else
- result = io->ci_result;
-
- *data_version = io->u.ci_data_version.dv_data_version;
-
- cl_io_fini(env, io);
-
- if (unlikely(io->ci_need_restart))
- goto restart;
-
- cl_env_put(env, &refcheck);
-
- return result;
-}
-
-/*
- * Trigger a HSM release request for the provided inode.
- */
-int ll_hsm_release(struct inode *inode)
-{
- struct lu_env *env;
- struct obd_client_handle *och = NULL;
- __u64 data_version = 0;
- int rc;
- u16 refcheck;
-
- CDEBUG(D_INODE, "%s: Releasing file " DFID ".\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&ll_i2info(inode)->lli_fid));
-
- och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE);
- if (IS_ERR(och)) {
- rc = PTR_ERR(och);
- goto out;
- }
-
- /* Grab latest data_version and [am]time values */
- rc = ll_data_version(inode, &data_version, LL_DV_WR_FLUSH);
- if (rc != 0)
- goto out;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env)) {
- rc = PTR_ERR(env);
- goto out;
- }
-
- ll_merge_attr(env, inode);
- cl_env_put(env, &refcheck);
-
- /* Release the file.
- * NB: lease lock handle is released in mdc_hsm_release_pack() because
- * we still need it to pack l_remote_handle to MDT.
- */
- rc = ll_close_inode_openhandle(inode, och, MDS_HSM_RELEASE,
- &data_version);
- och = NULL;
-
-out:
- if (och && !IS_ERR(och)) /* close the file */
- ll_lease_close(och, inode, NULL);
-
- return rc;
-}
-
-struct ll_swap_stack {
- u64 dv1;
- u64 dv2;
- struct inode *inode1;
- struct inode *inode2;
- bool check_dv1;
- bool check_dv2;
-};
-
-static int ll_swap_layouts(struct file *file1, struct file *file2,
- struct lustre_swap_layouts *lsl)
-{
- struct mdc_swap_layouts msl;
- struct md_op_data *op_data;
- __u32 gid;
- __u64 dv;
- struct ll_swap_stack *llss = NULL;
- int rc;
-
- llss = kzalloc(sizeof(*llss), GFP_NOFS);
- if (!llss)
- return -ENOMEM;
-
- llss->inode1 = file_inode(file1);
- llss->inode2 = file_inode(file2);
-
- rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
- if (rc < 0)
- goto free;
-
- /* we use 2 bool because it is easier to swap than 2 bits */
- if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
- llss->check_dv1 = true;
-
- if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV2)
- llss->check_dv2 = true;
-
- /* we cannot use lsl->sl_dvX directly because we may swap them */
- llss->dv1 = lsl->sl_dv1;
- llss->dv2 = lsl->sl_dv2;
-
- rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
- if (!rc) /* same file, done! */
- goto free;
-
- if (rc < 0) { /* sequentialize it */
- swap(llss->inode1, llss->inode2);
- swap(file1, file2);
- swap(llss->dv1, llss->dv2);
- swap(llss->check_dv1, llss->check_dv2);
- }
-
- gid = lsl->sl_gid;
- if (gid != 0) { /* application asks to flush dirty cache */
- rc = ll_get_grouplock(llss->inode1, file1, gid);
- if (rc < 0)
- goto free;
-
- rc = ll_get_grouplock(llss->inode2, file2, gid);
- if (rc < 0) {
- ll_put_grouplock(llss->inode1, file1, gid);
- goto free;
- }
- }
-
- /* ultimate check, before swapping the layouts we check if
- * dataversion has changed (if requested)
- */
- if (llss->check_dv1) {
- rc = ll_data_version(llss->inode1, &dv, 0);
- if (rc)
- goto putgl;
- if (dv != llss->dv1) {
- rc = -EAGAIN;
- goto putgl;
- }
- }
-
- if (llss->check_dv2) {
- rc = ll_data_version(llss->inode2, &dv, 0);
- if (rc)
- goto putgl;
- if (dv != llss->dv2) {
- rc = -EAGAIN;
- goto putgl;
- }
- }
-
- /* struct md_op_data is used to send the swap args to the mdt
- * only flags is missing, so we use struct mdc_swap_layouts
- * through the md_op_data->op_data
- */
- /* flags from user space have to be converted before they are send to
- * server, no flag is sent today, they are only used on the client
- */
- msl.msl_flags = 0;
- rc = -ENOMEM;
- op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
- 0, LUSTRE_OPC_ANY, &msl);
- if (IS_ERR(op_data)) {
- rc = PTR_ERR(op_data);
- goto free;
- }
-
- rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS, ll_i2mdexp(llss->inode1),
- sizeof(*op_data), op_data, NULL);
- ll_finish_md_op_data(op_data);
-
-putgl:
- if (gid != 0) {
- ll_put_grouplock(llss->inode2, file2, gid);
- ll_put_grouplock(llss->inode1, file1, gid);
- }
-
-free:
- kfree(llss);
-
- return rc;
-}
-
-int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
-{
- struct md_op_data *op_data;
- int rc;
-
- /* Detect out-of range masks */
- if ((hss->hss_setmask | hss->hss_clearmask) & ~HSM_FLAGS_MASK)
- return -EINVAL;
-
- /* Non-root users are forbidden to set or clear flags which are
- * NOT defined in HSM_USER_MASK.
- */
- if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
- !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- /* Detect out-of range archive id */
- if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
- (hss->hss_archive_id > LL_HSM_MAX_ARCHIVE))
- return -EINVAL;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, hss);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, ll_i2mdexp(inode),
- sizeof(*op_data), op_data, NULL);
-
- ll_finish_md_op_data(op_data);
-
- return rc;
-}
-
-static int ll_hsm_import(struct inode *inode, struct file *file,
- struct hsm_user_import *hui)
-{
- struct hsm_state_set *hss = NULL;
- struct iattr *attr = NULL;
- int rc;
-
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
-
- /* set HSM flags */
- hss = kzalloc(sizeof(*hss), GFP_NOFS);
- if (!hss)
- return -ENOMEM;
-
- hss->hss_valid = HSS_SETMASK | HSS_ARCHIVE_ID;
- hss->hss_archive_id = hui->hui_archive_id;
- hss->hss_setmask = HS_ARCHIVED | HS_EXISTS | HS_RELEASED;
- rc = ll_hsm_state_set(inode, hss);
- if (rc != 0)
- goto free_hss;
-
- attr = kzalloc(sizeof(*attr), GFP_NOFS);
- if (!attr) {
- rc = -ENOMEM;
- goto free_hss;
- }
-
- attr->ia_mode = hui->hui_mode & 0777;
- attr->ia_mode |= S_IFREG;
- attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid);
- attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid);
- attr->ia_size = hui->hui_size;
- attr->ia_mtime.tv_sec = hui->hui_mtime;
- attr->ia_mtime.tv_nsec = hui->hui_mtime_ns;
- attr->ia_atime.tv_sec = hui->hui_atime;
- attr->ia_atime.tv_nsec = hui->hui_atime_ns;
-
- attr->ia_valid = ATTR_SIZE | ATTR_MODE | ATTR_FORCE |
- ATTR_UID | ATTR_GID |
- ATTR_MTIME | ATTR_MTIME_SET |
- ATTR_ATIME | ATTR_ATIME_SET;
-
- inode_lock(inode);
-
- rc = ll_setattr_raw(file->f_path.dentry, attr, true);
- if (rc == -ENODATA)
- rc = 0;
-
- inode_unlock(inode);
-
- kfree(attr);
-free_hss:
- kfree(hss);
- return rc;
-}
-
-static inline long ll_lease_type_from_fmode(fmode_t fmode)
-{
- return ((fmode & FMODE_READ) ? LL_LEASE_RDLCK : 0) |
- ((fmode & FMODE_WRITE) ? LL_LEASE_WRLCK : 0);
-}
-
-static long
-ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct inode *inode = file_inode(file);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- int flags, rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p),cmd=%x\n",
- PFID(ll_inode2fid(inode)), inode, cmd);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
-
- /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
- if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
- return -ENOTTY;
-
- switch (cmd) {
- case LL_IOC_GETFLAGS:
- /* Get the current value of the file flags */
- return put_user(fd->fd_flags, (int __user *)arg);
- case LL_IOC_SETFLAGS:
- case LL_IOC_CLRFLAGS:
- /* Set or clear specific file flags */
- /* XXX This probably needs checks to ensure the flags are
- * not abused, and to handle any flag side effects.
- */
- if (get_user(flags, (int __user *)arg))
- return -EFAULT;
-
- if (cmd == LL_IOC_SETFLAGS) {
- if ((flags & LL_FILE_IGNORE_LOCK) &&
- !(file->f_flags & O_DIRECT)) {
- CERROR("%s: unable to disable locking on non-O_DIRECT file\n",
- current->comm);
- return -EINVAL;
- }
-
- fd->fd_flags |= flags;
- } else {
- fd->fd_flags &= ~flags;
- }
- return 0;
- case LL_IOC_LOV_SETSTRIPE:
- return ll_lov_setstripe(inode, file, arg);
- case LL_IOC_LOV_SETEA:
- return ll_lov_setea(inode, file, arg);
- case LL_IOC_LOV_SWAP_LAYOUTS: {
- struct file *file2;
- struct lustre_swap_layouts lsl;
-
- if (copy_from_user(&lsl, (char __user *)arg,
- sizeof(struct lustre_swap_layouts)))
- return -EFAULT;
-
- if ((file->f_flags & O_ACCMODE) == O_RDONLY)
- return -EPERM;
-
- file2 = fget(lsl.sl_fd);
- if (!file2)
- return -EBADF;
-
- /* O_WRONLY or O_RDWR */
- if ((file2->f_flags & O_ACCMODE) == O_RDONLY) {
- rc = -EPERM;
- goto out;
- }
-
- if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) {
- struct obd_client_handle *och = NULL;
- struct ll_inode_info *lli;
- struct inode *inode2;
-
- if (lsl.sl_flags != SWAP_LAYOUTS_CLOSE) {
- rc = -EINVAL;
- goto out;
- }
-
- lli = ll_i2info(inode);
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och) {
- och = fd->fd_lease_och;
- fd->fd_lease_och = NULL;
- }
- mutex_unlock(&lli->lli_och_mutex);
- if (!och) {
- rc = -ENOLCK;
- goto out;
- }
- inode2 = file_inode(file2);
- rc = ll_swap_layouts_close(och, inode, inode2);
- } else {
- rc = ll_swap_layouts(file, file2, &lsl);
- }
-out:
- fput(file2);
- return rc;
- }
- case LL_IOC_LOV_GETSTRIPE:
- return ll_file_getstripe(inode,
- (struct lov_user_md __user *)arg);
- case FSFILT_IOC_GETFLAGS:
- case FSFILT_IOC_SETFLAGS:
- return ll_iocontrol(inode, file, cmd, arg);
- case FSFILT_IOC_GETVERSION_OLD:
- case FSFILT_IOC_GETVERSION:
- return put_user(inode->i_generation, (int __user *)arg);
- case LL_IOC_GROUP_LOCK:
- return ll_get_grouplock(inode, file, arg);
- case LL_IOC_GROUP_UNLOCK:
- return ll_put_grouplock(inode, file, arg);
- case IOC_OBD_STATFS:
- return ll_obd_statfs(inode, (void __user *)arg);
-
- /* We need to special case any other ioctls we want to handle,
- * to send them to the MDS/OST as appropriate and to properly
- * network encode the arg field.
- case FSFILT_IOC_SETVERSION_OLD:
- case FSFILT_IOC_SETVERSION:
- */
- case LL_IOC_FLUSHCTX:
- return ll_flush_ctx(inode);
- case LL_IOC_PATH2FID: {
- if (copy_to_user((void __user *)arg, ll_inode2fid(inode),
- sizeof(struct lu_fid)))
- return -EFAULT;
-
- return 0;
- }
- case LL_IOC_GETPARENT:
- return ll_getparent(file, (struct getparent __user *)arg);
- case OBD_IOC_FID2PATH:
- return ll_fid2path(inode, (void __user *)arg);
- case LL_IOC_DATA_VERSION: {
- struct ioc_data_version idv;
- int rc;
-
- if (copy_from_user(&idv, (char __user *)arg, sizeof(idv)))
- return -EFAULT;
-
- idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
- rc = ll_data_version(inode, &idv.idv_version, idv.idv_flags);
- if (rc == 0 && copy_to_user((char __user *)arg, &idv,
- sizeof(idv)))
- return -EFAULT;
-
- return rc;
- }
-
- case LL_IOC_GET_MDTIDX: {
- int mdtidx;
-
- mdtidx = ll_get_mdt_idx(inode);
- if (mdtidx < 0)
- return mdtidx;
-
- if (put_user(mdtidx, (int __user *)arg))
- return -EFAULT;
-
- return 0;
- }
- case OBD_IOC_GETDTNAME:
- case OBD_IOC_GETMDNAME:
- return ll_get_obd_name(inode, cmd, arg);
- case LL_IOC_HSM_STATE_GET: {
- struct md_op_data *op_data;
- struct hsm_user_state *hus;
- int rc;
-
- hus = kzalloc(sizeof(*hus), GFP_NOFS);
- if (!hus)
- return -ENOMEM;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, hus);
- if (IS_ERR(op_data)) {
- kfree(hus);
- return PTR_ERR(op_data);
- }
-
- rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
- op_data, NULL);
-
- if (copy_to_user((void __user *)arg, hus, sizeof(*hus)))
- rc = -EFAULT;
-
- ll_finish_md_op_data(op_data);
- kfree(hus);
- return rc;
- }
- case LL_IOC_HSM_STATE_SET: {
- struct hsm_state_set *hss;
- int rc;
-
- hss = memdup_user((char __user *)arg, sizeof(*hss));
- if (IS_ERR(hss))
- return PTR_ERR(hss);
-
- rc = ll_hsm_state_set(inode, hss);
-
- kfree(hss);
- return rc;
- }
- case LL_IOC_HSM_ACTION: {
- struct md_op_data *op_data;
- struct hsm_current_action *hca;
- int rc;
-
- hca = kzalloc(sizeof(*hca), GFP_NOFS);
- if (!hca)
- return -ENOMEM;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, hca);
- if (IS_ERR(op_data)) {
- kfree(hca);
- return PTR_ERR(op_data);
- }
-
- rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
- op_data, NULL);
-
- if (copy_to_user((char __user *)arg, hca, sizeof(*hca)))
- rc = -EFAULT;
-
- ll_finish_md_op_data(op_data);
- kfree(hca);
- return rc;
- }
- case LL_IOC_SET_LEASE: {
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_client_handle *och = NULL;
- bool lease_broken;
- fmode_t fmode;
-
- switch (arg) {
- case LL_LEASE_WRLCK:
- if (!(file->f_mode & FMODE_WRITE))
- return -EPERM;
- fmode = FMODE_WRITE;
- break;
- case LL_LEASE_RDLCK:
- if (!(file->f_mode & FMODE_READ))
- return -EPERM;
- fmode = FMODE_READ;
- break;
- case LL_LEASE_UNLCK:
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och) {
- och = fd->fd_lease_och;
- fd->fd_lease_och = NULL;
- }
- mutex_unlock(&lli->lli_och_mutex);
-
- if (!och)
- return -ENOLCK;
-
- fmode = och->och_flags;
- rc = ll_lease_close(och, inode, &lease_broken);
- if (rc < 0)
- return rc;
-
- if (lease_broken)
- fmode = 0;
-
- return ll_lease_type_from_fmode(fmode);
- default:
- return -EINVAL;
- }
-
- CDEBUG(D_INODE, "Set lease with mode %u\n", fmode);
-
- /* apply for lease */
- och = ll_lease_open(inode, file, fmode, 0);
- if (IS_ERR(och))
- return PTR_ERR(och);
-
- rc = 0;
- mutex_lock(&lli->lli_och_mutex);
- if (!fd->fd_lease_och) {
- fd->fd_lease_och = och;
- och = NULL;
- }
- mutex_unlock(&lli->lli_och_mutex);
- if (och) {
- /* impossible now that only excl is supported for now */
- ll_lease_close(och, inode, &lease_broken);
- rc = -EBUSY;
- }
- return rc;
- }
- case LL_IOC_GET_LEASE: {
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ldlm_lock *lock = NULL;
- fmode_t fmode = 0;
-
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och) {
- struct obd_client_handle *och = fd->fd_lease_och;
-
- lock = ldlm_handle2lock(&och->och_lease_handle);
- if (lock) {
- lock_res_and_lock(lock);
- if (!ldlm_is_cancel(lock))
- fmode = och->och_flags;
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- }
- }
- mutex_unlock(&lli->lli_och_mutex);
- return ll_lease_type_from_fmode(fmode);
- }
- case LL_IOC_HSM_IMPORT: {
- struct hsm_user_import *hui;
-
- hui = memdup_user((void __user *)arg, sizeof(*hui));
- if (IS_ERR(hui))
- return PTR_ERR(hui);
-
- rc = ll_hsm_import(inode, file, hui);
-
- kfree(hui);
- return rc;
- }
- default: {
- int err;
-
- if (ll_iocontrol_call(inode, file, cmd, arg, &err) ==
- LLIOC_STOP)
- return err;
-
- return obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
- (void __user *)arg);
- }
- }
-}
-
-static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
-{
- struct inode *inode = file_inode(file);
- loff_t retval, eof = 0;
-
- retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
- (origin == SEEK_CUR) ? file->f_pos : 0);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), to=%llu=%#llx(%d)\n",
- PFID(ll_inode2fid(inode)), inode, retval, retval, origin);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
-
- if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
- retval = ll_glimpse_size(inode);
- if (retval != 0)
- return retval;
- eof = i_size_read(inode);
- }
-
- return generic_file_llseek_size(file, offset, origin,
- ll_file_maxbytes(inode), eof);
-}
-
-static int ll_flush(struct file *file, fl_owner_t id)
-{
- struct inode *inode = file_inode(file);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- int rc, err;
-
- LASSERT(!S_ISDIR(inode->i_mode));
-
- /* catch async errors that were recorded back when async writeback
- * failed for pages in this mapping.
- */
- rc = lli->lli_async_rc;
- lli->lli_async_rc = 0;
- if (lli->lli_clob) {
- err = lov_read_and_clear_async_rc(lli->lli_clob);
- if (!rc)
- rc = err;
- }
-
- /* The application has been told about write failure already.
- * Do not report failure again.
- */
- if (fd->fd_write_failed)
- return 0;
- return rc ? -EIO : 0;
-}
-
-/**
- * Called to make sure a portion of file has been written out.
- * if @mode is not CL_FSYNC_LOCAL, it will send OST_SYNC RPCs to OST.
- *
- * Return how many pages have been written.
- */
-int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
- enum cl_fsync_mode mode, int ignore_layout)
-{
- struct lu_env *env;
- struct cl_io *io;
- struct cl_fsync_io *fio;
- int result;
- u16 refcheck;
-
- if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
- mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
- return -EINVAL;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = vvp_env_thread_io(env);
- io->ci_obj = ll_i2info(inode)->lli_clob;
- io->ci_ignore_layout = ignore_layout;
-
- /* initialize parameters for sync */
- fio = &io->u.ci_fsync;
- fio->fi_start = start;
- fio->fi_end = end;
- fio->fi_fid = ll_inode2fid(inode);
- fio->fi_mode = mode;
- fio->fi_nr_written = 0;
-
- if (cl_io_init(env, io, CIT_FSYNC, io->ci_obj) == 0)
- result = cl_io_loop(env, io);
- else
- result = io->ci_result;
- if (result == 0)
- result = fio->fi_nr_written;
- cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
-
- return result;
-}
-
-int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
-{
- struct inode *inode = file_inode(file);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ptlrpc_request *req;
- int rc, err;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
- PFID(ll_inode2fid(inode)), inode);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
-
- rc = file_write_and_wait_range(file, start, end);
- inode_lock(inode);
-
- /* catch async errors that were recorded back when async writeback
- * failed for pages in this mapping.
- */
- if (!S_ISDIR(inode->i_mode)) {
- err = lli->lli_async_rc;
- lli->lli_async_rc = 0;
- if (rc == 0)
- rc = err;
- if (lli->lli_clob) {
- err = lov_read_and_clear_async_rc(lli->lli_clob);
- if (rc == 0)
- rc = err;
- }
- }
-
- err = md_sync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req);
- if (!rc)
- rc = err;
- if (!err)
- ptlrpc_req_finished(req);
-
- if (S_ISREG(inode->i_mode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- err = cl_sync_file_range(inode, start, end, CL_FSYNC_ALL, 0);
- if (rc == 0 && err < 0)
- rc = err;
- if (rc < 0)
- fd->fd_write_failed = true;
- else
- fd->fd_write_failed = false;
- }
-
- inode_unlock(inode);
- return rc;
-}
-
-static int
-ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
-{
- struct inode *inode = file_inode(file);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_FLOCK,
- .ei_cb_cp = ldlm_flock_completion_ast,
- .ei_cbdata = file_lock,
- };
- struct md_op_data *op_data;
- struct lustre_handle lockh = {0};
- union ldlm_policy_data flock = { { 0 } };
- int fl_type = file_lock->fl_type;
- __u64 flags = 0;
- int rc;
- int rc2 = 0;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID " file_lock=%p\n",
- PFID(ll_inode2fid(inode)), file_lock);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
-
- if (file_lock->fl_flags & FL_FLOCK)
- LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
- else if (!(file_lock->fl_flags & FL_POSIX))
- return -EINVAL;
-
- flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
- flock.l_flock.pid = file_lock->fl_pid;
- flock.l_flock.start = file_lock->fl_start;
- flock.l_flock.end = file_lock->fl_end;
-
- /* Somewhat ugly workaround for svc lockd.
- * lockd installs custom fl_lmops->lm_compare_owner that checks
- * for the fl_owner to be the same (which it always is on local node
- * I guess between lockd processes) and then compares pid.
- * As such we assign pid to the owner field to make it all work,
- * conflict with normal locks is unlikely since pid space and
- * pointer space for current->files are not intersecting
- */
- if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
- flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
-
- switch (fl_type) {
- case F_RDLCK:
- einfo.ei_mode = LCK_PR;
- break;
- case F_UNLCK:
- /* An unlock request may or may not have any relation to
- * existing locks so we may not be able to pass a lock handle
- * via a normal ldlm_lock_cancel() request. The request may even
- * unlock a byte range in the middle of an existing lock. In
- * order to process an unlock request we need all of the same
- * information that is given with a normal read or write record
- * lock request. To avoid creating another ldlm unlock (cancel)
- * message we'll treat a LCK_NL flock request as an unlock.
- */
- einfo.ei_mode = LCK_NL;
- break;
- case F_WRLCK:
- einfo.ei_mode = LCK_PW;
- break;
- default:
- CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n", fl_type);
- return -ENOTSUPP;
- }
-
- switch (cmd) {
- case F_SETLKW:
-#ifdef F_SETLKW64
- case F_SETLKW64:
-#endif
- flags = 0;
- break;
- case F_SETLK:
-#ifdef F_SETLK64
- case F_SETLK64:
-#endif
- flags = LDLM_FL_BLOCK_NOWAIT;
- break;
- case F_GETLK:
-#ifdef F_GETLK64
- case F_GETLK64:
-#endif
- flags = LDLM_FL_TEST_LOCK;
- break;
- default:
- CERROR("unknown fcntl lock command: %d\n", cmd);
- return -EINVAL;
- }
-
- /*
- * Save the old mode so that if the mode in the lock changes we
- * can decrement the appropriate reader or writer refcount.
- */
- file_lock->fl_type = einfo.ei_mode;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- CDEBUG(D_DLMTRACE, "inode=" DFID ", pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
- PFID(ll_inode2fid(inode)), flock.l_flock.pid, flags,
- einfo.ei_mode, flock.l_flock.start, flock.l_flock.end);
-
- rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, NULL, op_data, &lockh,
- flags);
-
- /* Restore the file lock type if not TEST lock. */
- if (!(flags & LDLM_FL_TEST_LOCK))
- file_lock->fl_type = fl_type;
-
- if ((rc == 0 || file_lock->fl_type == F_UNLCK) &&
- !(flags & LDLM_FL_TEST_LOCK))
- rc2 = locks_lock_file_wait(file, file_lock);
-
- if (rc2 && file_lock->fl_type != F_UNLCK) {
- einfo.ei_mode = LCK_NL;
- md_enqueue(sbi->ll_md_exp, &einfo, &flock, NULL, op_data,
- &lockh, flags);
- rc = rc2;
- }
-
- ll_finish_md_op_data(op_data);
-
- return rc;
-}
-
-int ll_get_fid_by_name(struct inode *parent, const char *name,
- int namelen, struct lu_fid *fid,
- struct inode **inode)
-{
- struct md_op_data *op_data = NULL;
- struct ptlrpc_request *req;
- struct mdt_body *body;
- int rc;
-
- op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE;
- rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc < 0)
- return rc;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (!body) {
- rc = -EFAULT;
- goto out_req;
- }
- if (fid)
- *fid = body->mbo_fid1;
-
- if (inode)
- rc = ll_prep_inode(inode, req, parent->i_sb, NULL);
-out_req:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
- const char *name, int namelen)
-{
- struct ptlrpc_request *request = NULL;
- struct obd_client_handle *och = NULL;
- struct inode *child_inode = NULL;
- struct dentry *dchild = NULL;
- struct md_op_data *op_data;
- struct mdt_body *body;
- u64 data_version = 0;
- struct qstr qstr;
- int rc;
-
- CDEBUG(D_VFSTRACE, "migrate %s under " DFID " to MDT%d\n",
- name, PFID(ll_inode2fid(parent)), mdtidx);
-
- op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- /* Get child FID first */
- qstr.hash = full_name_hash(parent, name, namelen);
- qstr.name = name;
- qstr.len = namelen;
- dchild = d_lookup(file_dentry(file), &qstr);
- if (dchild) {
- op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
- if (dchild->d_inode)
- child_inode = igrab(dchild->d_inode);
- dput(dchild);
- }
-
- if (!child_inode) {
- rc = ll_get_fid_by_name(parent, name, namelen,
- &op_data->op_fid3, &child_inode);
- if (rc)
- goto out_free;
- }
-
- if (!child_inode) {
- rc = -EINVAL;
- goto out_free;
- }
-
- inode_lock(child_inode);
- op_data->op_fid3 = *ll_inode2fid(child_inode);
- if (!fid_is_sane(&op_data->op_fid3)) {
- CERROR("%s: migrate %s, but fid " DFID " is insane\n",
- ll_get_fsname(parent->i_sb, NULL, 0), name,
- PFID(&op_data->op_fid3));
- rc = -EINVAL;
- goto out_unlock;
- }
-
- rc = ll_get_mdt_idx_by_fid(ll_i2sbi(parent), &op_data->op_fid3);
- if (rc < 0)
- goto out_unlock;
-
- if (rc == mdtidx) {
- CDEBUG(D_INFO, "%s: " DFID " is already on MDT%d.\n", name,
- PFID(&op_data->op_fid3), mdtidx);
- rc = 0;
- goto out_unlock;
- }
-again:
- if (S_ISREG(child_inode->i_mode)) {
- och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
- if (IS_ERR(och)) {
- rc = PTR_ERR(och);
- och = NULL;
- goto out_unlock;
- }
-
- rc = ll_data_version(child_inode, &data_version,
- LL_DV_WR_FLUSH);
- if (rc)
- goto out_close;
-
- op_data->op_handle = och->och_fh;
- op_data->op_data = och->och_mod;
- op_data->op_data_version = data_version;
- op_data->op_lease_handle = och->och_lease_handle;
- op_data->op_bias |= MDS_RENAME_MIGRATE;
- }
-
- op_data->op_mds = mdtidx;
- op_data->op_cli_flags = CLI_MIGRATE;
- rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data, name,
- namelen, name, namelen, &request);
- if (!rc) {
- LASSERT(request);
- ll_update_times(request, parent);
-
- body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
- LASSERT(body);
-
- /*
- * If the server does release layout lock, then we cleanup
- * the client och here, otherwise release it in out_close:
- */
- if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
- obd_mod_put(och->och_mod);
- md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp,
- och);
- och->och_fh.cookie = DEAD_HANDLE_MAGIC;
- kfree(och);
- och = NULL;
- }
- }
-
- if (request) {
- ptlrpc_req_finished(request);
- request = NULL;
- }
-
- /* Try again if the file layout has changed. */
- if (rc == -EAGAIN && S_ISREG(child_inode->i_mode))
- goto again;
-
-out_close:
- if (och) /* close the file */
- ll_lease_close(och, child_inode, NULL);
- if (!rc)
- clear_nlink(child_inode);
-out_unlock:
- inode_unlock(child_inode);
- iput(child_inode);
-out_free:
- ll_finish_md_op_data(op_data);
- return rc;
-}
-
-static int
-ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
-{
- return -ENOSYS;
-}
-
-/**
- * test if some locks matching bits and l_req_mode are acquired
- * - bits can be in different locks
- * - if found clear the common lock bits in *bits
- * - the bits not found, are kept in *bits
- * \param inode [IN]
- * \param bits [IN] searched lock bits [IN]
- * \param l_req_mode [IN] searched lock mode
- * \retval boolean, true iff all bits are found
- */
-int ll_have_md_lock(struct inode *inode, __u64 *bits,
- enum ldlm_mode l_req_mode)
-{
- struct lustre_handle lockh;
- union ldlm_policy_data policy;
- enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
- (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
- struct lu_fid *fid;
- __u64 flags;
- int i;
-
- if (!inode)
- return 0;
-
- fid = &ll_i2info(inode)->lli_fid;
- CDEBUG(D_INFO, "trying to match res " DFID " mode %s\n", PFID(fid),
- ldlm_lockname[mode]);
-
- flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
- for (i = 0; i <= MDS_INODELOCK_MAXSHIFT && *bits != 0; i++) {
- policy.l_inodebits.bits = *bits & (1 << i);
- if (policy.l_inodebits.bits == 0)
- continue;
-
- if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS,
- &policy, mode, &lockh)) {
- struct ldlm_lock *lock;
-
- lock = ldlm_handle2lock(&lockh);
- if (lock) {
- *bits &=
- ~(lock->l_policy_data.l_inodebits.bits);
- LDLM_LOCK_PUT(lock);
- } else {
- *bits &= ~policy.l_inodebits.bits;
- }
- }
- }
- return *bits == 0;
-}
-
-enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags,
- enum ldlm_mode mode)
-{
- union ldlm_policy_data policy = { .l_inodebits = { bits } };
- struct lu_fid *fid;
-
- fid = &ll_i2info(inode)->lli_fid;
- CDEBUG(D_INFO, "trying to match res " DFID "\n", PFID(fid));
-
- return md_lock_match(ll_i2mdexp(inode), flags | LDLM_FL_BLOCK_GRANTED,
- fid, LDLM_IBITS, &policy, mode, lockh);
-}
-
-static int ll_inode_revalidate_fini(struct inode *inode, int rc)
-{
- /* Already unlinked. Just update nlink and return success */
- if (rc == -ENOENT) {
- clear_nlink(inode);
- /* If it is striped directory, and there is bad stripe
- * Let's revalidate the dentry again, instead of returning
- * error
- */
- if (S_ISDIR(inode->i_mode) && ll_i2info(inode)->lli_lsm_md)
- return 0;
-
- /* This path cannot be hit for regular files unless in
- * case of obscure races, so no need to validate size.
- */
- if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return 0;
- } else if (rc != 0) {
- CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
- "%s: revalidate FID " DFID " error: rc = %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), rc);
- }
-
- return rc;
-}
-
-static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
-{
- struct inode *inode = d_inode(dentry);
- struct ptlrpc_request *req = NULL;
- struct obd_export *exp;
- int rc = 0;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p),name=%pd\n",
- PFID(ll_inode2fid(inode)), inode, dentry);
-
- exp = ll_i2mdexp(inode);
-
- /* XXX: Enable OBD_CONNECT_ATTRFID to reduce unnecessary getattr RPC.
- * But under CMD case, it caused some lock issues, should be fixed
- * with new CMD ibits lock. See bug 12718
- */
- if (exp_connect_flags(exp) & OBD_CONNECT_ATTRFID) {
- struct lookup_intent oit = { .it_op = IT_GETATTR };
- struct md_op_data *op_data;
-
- if (ibits == MDS_INODELOCK_LOOKUP)
- oit.it_op = IT_LOOKUP;
-
- /* Call getattr by fid, so do not provide name at all. */
- op_data = ll_prep_md_op_data(NULL, inode,
- inode, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- rc = md_intent_lock(exp, op_data, &oit, &req,
- &ll_md_blocking_ast, 0);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- rc = ll_inode_revalidate_fini(inode, rc);
- goto out;
- }
-
- rc = ll_revalidate_it_finish(req, &oit, inode);
- if (rc != 0) {
- ll_intent_release(&oit);
- goto out;
- }
-
- /* Unlinked? Unhash dentry, so it is not picked up later by
- * do_lookup() -> ll_revalidate_it(). We cannot use d_drop
- * here to preserve get_cwd functionality on 2.6.
- * Bug 10503
- */
- if (!d_inode(dentry)->i_nlink) {
- spin_lock(&inode->i_lock);
- d_lustre_invalidate(dentry, 0);
- spin_unlock(&inode->i_lock);
- }
-
- ll_lookup_finish_locks(&oit, inode);
- } else if (!ll_have_md_lock(d_inode(dentry), &ibits, LCK_MINMODE)) {
- struct ll_sb_info *sbi = ll_i2sbi(d_inode(dentry));
- u64 valid = OBD_MD_FLGETATTR;
- struct md_op_data *op_data;
- int ealen = 0;
-
- if (S_ISREG(inode->i_mode)) {
- rc = ll_get_default_mdsize(sbi, &ealen);
- if (rc)
- return rc;
- valid |= OBD_MD_FLEASIZE | OBD_MD_FLMODEASIZE;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
- 0, ealen, LUSTRE_OPC_ANY,
- NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_valid = valid;
- rc = md_getattr(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc)
- return ll_inode_revalidate_fini(inode, rc);
-
- rc = ll_prep_inode(&inode, req, NULL, NULL);
- }
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int ll_merge_md_attr(struct inode *inode)
-{
- struct cl_attr attr = { 0 };
- int rc;
-
- LASSERT(ll_i2info(inode)->lli_lsm_md);
- rc = md_merge_attr(ll_i2mdexp(inode), ll_i2info(inode)->lli_lsm_md,
- &attr, ll_md_blocking_ast);
- if (rc)
- return rc;
-
- set_nlink(inode, attr.cat_nlink);
- inode->i_blocks = attr.cat_blocks;
- i_size_write(inode, attr.cat_size);
-
- ll_i2info(inode)->lli_atime = attr.cat_atime;
- ll_i2info(inode)->lli_mtime = attr.cat_mtime;
- ll_i2info(inode)->lli_ctime = attr.cat_ctime;
-
- return 0;
-}
-
-static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
-{
- struct inode *inode = d_inode(dentry);
- int rc;
-
- rc = __ll_inode_revalidate(dentry, ibits);
- if (rc != 0)
- return rc;
-
- /* if object isn't regular file, don't validate size */
- if (!S_ISREG(inode->i_mode)) {
- if (S_ISDIR(inode->i_mode) &&
- ll_i2info(inode)->lli_lsm_md) {
- rc = ll_merge_md_attr(inode);
- if (rc)
- return rc;
- }
-
- LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_atime;
- LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime;
- LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime;
- } else {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- /* In case of restore, the MDT has the right size and has
- * already send it back without granting the layout lock,
- * inode is up-to-date so glimpse is useless.
- * Also to glimpse we need the layout, in case of a running
- * restore the MDT holds the layout lock so the glimpse will
- * block up to the end of restore (getattr will block)
- */
- if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags))
- rc = ll_glimpse_size(inode);
- }
- return rc;
-}
-
-int ll_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
-{
- struct inode *inode = d_inode(path->dentry);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- int res;
-
- res = ll_inode_revalidate(path->dentry,
- MDS_INODELOCK_UPDATE | MDS_INODELOCK_LOOKUP);
- ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1);
-
- if (res)
- return res;
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
-
- stat->dev = inode->i_sb->s_dev;
- if (ll_need_32bit_api(sbi))
- stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
- else
- stat->ino = inode->i_ino;
- stat->mode = inode->i_mode;
- stat->uid = inode->i_uid;
- stat->gid = inode->i_gid;
- stat->rdev = inode->i_rdev;
- stat->atime = inode->i_atime;
- stat->mtime = inode->i_mtime;
- stat->ctime = inode->i_ctime;
- stat->blksize = 1 << inode->i_blkbits;
-
- stat->nlink = inode->i_nlink;
- stat->size = i_size_read(inode);
- stat->blocks = inode->i_blocks;
-
- return 0;
-}
-
-static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len)
-{
- int rc;
- size_t num_bytes;
- struct fiemap *fiemap;
- unsigned int extent_count = fieinfo->fi_extents_max;
-
- num_bytes = sizeof(*fiemap) + (extent_count *
- sizeof(struct fiemap_extent));
- fiemap = kvzalloc(num_bytes, GFP_KERNEL);
- if (!fiemap)
- return -ENOMEM;
-
- fiemap->fm_flags = fieinfo->fi_flags;
- fiemap->fm_extent_count = fieinfo->fi_extents_max;
- fiemap->fm_start = start;
- fiemap->fm_length = len;
-
- if (extent_count > 0 &&
- copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
- sizeof(struct fiemap_extent))) {
- rc = -EFAULT;
- goto out;
- }
-
- rc = ll_do_fiemap(inode, fiemap, num_bytes);
-
- fieinfo->fi_flags = fiemap->fm_flags;
- fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
- if (extent_count > 0 &&
- copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
- fiemap->fm_mapped_extents *
- sizeof(struct fiemap_extent))) {
- rc = -EFAULT;
- goto out;
- }
-out:
- kvfree(fiemap);
- return rc;
-}
-
-struct posix_acl *ll_get_acl(struct inode *inode, int type)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct posix_acl *acl = NULL;
-
- spin_lock(&lli->lli_lock);
- /* VFS' acl_permission_check->check_acl will release the refcount */
- acl = posix_acl_dup(lli->lli_posix_acl);
- spin_unlock(&lli->lli_lock);
-
- return acl;
-}
-
-int ll_inode_permission(struct inode *inode, int mask)
-{
- struct ll_sb_info *sbi;
- struct root_squash_info *squash;
- const struct cred *old_cred = NULL;
- struct cred *cred = NULL;
- bool squash_id = false;
- cfs_cap_t cap;
- int rc = 0;
-
- if (mask & MAY_NOT_BLOCK)
- return -ECHILD;
-
- /* as root inode are NOT getting validated in lookup operation,
- * need to do it before permission check.
- */
-
- if (is_root_inode(inode)) {
- rc = __ll_inode_revalidate(inode->i_sb->s_root,
- MDS_INODELOCK_LOOKUP);
- if (rc)
- return rc;
- }
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), inode mode %x mask %o\n",
- PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
-
- /* squash fsuid/fsgid if needed */
- sbi = ll_i2sbi(inode);
- squash = &sbi->ll_squash;
- if (unlikely(squash->rsi_uid &&
- uid_eq(current_fsuid(), GLOBAL_ROOT_UID) &&
- !(sbi->ll_flags & LL_SBI_NOROOTSQUASH))) {
- squash_id = true;
- }
-
- if (squash_id) {
- CDEBUG(D_OTHER, "squash creds (%d:%d)=>(%d:%d)\n",
- __kuid_val(current_fsuid()), __kgid_val(current_fsgid()),
- squash->rsi_uid, squash->rsi_gid);
-
- /*
- * update current process's credentials
- * and FS capability
- */
- cred = prepare_creds();
- if (!cred)
- return -ENOMEM;
-
- cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid);
- cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid);
- for (cap = 0; cap < sizeof(cfs_cap_t) * 8; cap++) {
- if ((1 << cap) & CFS_CAP_FS_MASK)
- cap_lower(cred->cap_effective, cap);
- }
- old_cred = override_creds(cred);
- }
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
- rc = generic_permission(inode, mask);
-
- /* restore current process's credentials and FS capability */
- if (squash_id) {
- revert_creds(old_cred);
- put_cred(cred);
- }
-
- return rc;
-}
-
-/* -o localflock - only provides locally consistent flock locks */
-const struct file_operations ll_file_operations = {
- .read_iter = ll_file_read_iter,
- .write_iter = ll_file_write_iter,
- .unlocked_ioctl = ll_file_ioctl,
- .open = ll_file_open,
- .release = ll_file_release,
- .mmap = ll_file_mmap,
- .llseek = ll_file_seek,
- .splice_read = generic_file_splice_read,
- .fsync = ll_fsync,
- .flush = ll_flush
-};
-
-const struct file_operations ll_file_operations_flock = {
- .read_iter = ll_file_read_iter,
- .write_iter = ll_file_write_iter,
- .unlocked_ioctl = ll_file_ioctl,
- .open = ll_file_open,
- .release = ll_file_release,
- .mmap = ll_file_mmap,
- .llseek = ll_file_seek,
- .splice_read = generic_file_splice_read,
- .fsync = ll_fsync,
- .flush = ll_flush,
- .flock = ll_file_flock,
- .lock = ll_file_flock
-};
-
-/* These are for -o noflock - to return ENOSYS on flock calls */
-const struct file_operations ll_file_operations_noflock = {
- .read_iter = ll_file_read_iter,
- .write_iter = ll_file_write_iter,
- .unlocked_ioctl = ll_file_ioctl,
- .open = ll_file_open,
- .release = ll_file_release,
- .mmap = ll_file_mmap,
- .llseek = ll_file_seek,
- .splice_read = generic_file_splice_read,
- .fsync = ll_fsync,
- .flush = ll_flush,
- .flock = ll_file_noflock,
- .lock = ll_file_noflock
-};
-
-const struct inode_operations ll_file_inode_operations = {
- .setattr = ll_setattr,
- .getattr = ll_getattr,
- .permission = ll_inode_permission,
- .listxattr = ll_listxattr,
- .fiemap = ll_fiemap,
- .get_acl = ll_get_acl,
-};
-
-/* dynamic ioctl number support routines */
-static struct llioc_ctl_data {
- struct rw_semaphore ioc_sem;
- struct list_head ioc_head;
-} llioc = {
- __RWSEM_INITIALIZER(llioc.ioc_sem),
- LIST_HEAD_INIT(llioc.ioc_head)
-};
-
-struct llioc_data {
- struct list_head iocd_list;
- unsigned int iocd_size;
- llioc_callback_t iocd_cb;
- unsigned int iocd_count;
- unsigned int iocd_cmd[0];
-};
-
-void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd)
-{
- unsigned int size;
- struct llioc_data *in_data = NULL;
-
- if (!cb || !cmd || count > LLIOC_MAX_CMD || count < 0)
- return NULL;
-
- size = sizeof(*in_data) + count * sizeof(unsigned int);
- in_data = kzalloc(size, GFP_NOFS);
- if (!in_data)
- return NULL;
-
- in_data->iocd_size = size;
- in_data->iocd_cb = cb;
- in_data->iocd_count = count;
- memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
-
- down_write(&llioc.ioc_sem);
- list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
- up_write(&llioc.ioc_sem);
-
- return in_data;
-}
-EXPORT_SYMBOL(ll_iocontrol_register);
-
-void ll_iocontrol_unregister(void *magic)
-{
- struct llioc_data *tmp;
-
- if (!magic)
- return;
-
- down_write(&llioc.ioc_sem);
- list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
- if (tmp == magic) {
- list_del(&tmp->iocd_list);
- up_write(&llioc.ioc_sem);
-
- kfree(tmp);
- return;
- }
- }
- up_write(&llioc.ioc_sem);
-
- CWARN("didn't find iocontrol register block with magic: %p\n", magic);
-}
-EXPORT_SYMBOL(ll_iocontrol_unregister);
-
-static enum llioc_iter
-ll_iocontrol_call(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg, int *rcp)
-{
- enum llioc_iter ret = LLIOC_CONT;
- struct llioc_data *data;
- int rc = -EINVAL, i;
-
- down_read(&llioc.ioc_sem);
- list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
- for (i = 0; i < data->iocd_count; i++) {
- if (cmd != data->iocd_cmd[i])
- continue;
-
- ret = data->iocd_cb(inode, file, cmd, arg, data, &rc);
- break;
- }
-
- if (ret == LLIOC_STOP)
- break;
- }
- up_read(&llioc.ioc_sem);
-
- if (rcp)
- *rcp = rc;
- return ret;
-}
-
-int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct cl_object *obj = lli->lli_clob;
- struct lu_env *env;
- int rc;
- u16 refcheck;
-
- if (!obj)
- return 0;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- rc = cl_conf_set(env, obj, conf);
- if (rc < 0)
- goto out;
-
- if (conf->coc_opc == OBJECT_CONF_SET) {
- struct ldlm_lock *lock = conf->coc_lock;
- struct cl_layout cl = {
- .cl_layout_gen = 0,
- };
-
- LASSERT(lock);
- LASSERT(ldlm_has_layout(lock));
-
- /* it can only be allowed to match after layout is
- * applied to inode otherwise false layout would be
- * seen. Applying layout should happen before dropping
- * the intent lock.
- */
- ldlm_lock_allow_match(lock);
-
- rc = cl_object_layout_get(env, obj, &cl);
- if (rc < 0)
- goto out;
-
- CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
- PFID(&lli->lli_fid), ll_layout_version_get(lli),
- cl.cl_layout_gen);
- ll_layout_version_set(lli, cl.cl_layout_gen);
- }
-out:
- cl_env_put(env, &refcheck);
- return rc;
-}
-
-/* Fetch layout from MDT with getxattr request, if it's not ready yet */
-static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
-
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req;
- struct mdt_body *body;
- void *lvbdata;
- void *lmm;
- int lmmsize;
- int rc;
-
- CDEBUG(D_INODE, DFID " LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
- PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
- lock->l_lvb_data, lock->l_lvb_len);
-
- if (lock->l_lvb_data && ldlm_is_lvb_ready(lock))
- return 0;
-
- /* if layout lock was granted right away, the layout is returned
- * within DLM_LVB of dlm reply; otherwise if the lock was ever
- * blocked and then granted via completion ast, we have to fetch
- * layout here. Please note that we can't use the LVB buffer in
- * completion AST because it doesn't have a large enough buffer
- */
- rc = ll_get_default_mdsize(sbi, &lmmsize);
- if (rc == 0)
- rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- OBD_MD_FLXATTR, XATTR_NAME_LOV, NULL, 0,
- lmmsize, 0, &req);
- if (rc < 0)
- return rc;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (!body) {
- rc = -EPROTO;
- goto out;
- }
-
- lmmsize = body->mbo_eadatasize;
- if (lmmsize == 0) /* empty layout */ {
- rc = 0;
- goto out;
- }
-
- lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize);
- if (!lmm) {
- rc = -EFAULT;
- goto out;
- }
-
- lvbdata = kvzalloc(lmmsize, GFP_NOFS);
- if (!lvbdata) {
- rc = -ENOMEM;
- goto out;
- }
-
- memcpy(lvbdata, lmm, lmmsize);
- lock_res_and_lock(lock);
- if (lock->l_lvb_data)
- kvfree(lock->l_lvb_data);
-
- lock->l_lvb_data = lvbdata;
- lock->l_lvb_len = lmmsize;
- unlock_res_and_lock(lock);
-
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-/**
- * Apply the layout to the inode. Layout lock is held and will be released
- * in this function.
- */
-static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
- struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ldlm_lock *lock;
- struct cl_object_conf conf;
- int rc = 0;
- bool lvb_ready;
- bool wait_layout = false;
-
- LASSERT(lustre_handle_is_used(lockh));
-
- lock = ldlm_handle2lock(lockh);
- LASSERT(lock);
- LASSERT(ldlm_has_layout(lock));
-
- LDLM_DEBUG(lock, "File " DFID "(%p) being reconfigured",
- PFID(&lli->lli_fid), inode);
-
- /* in case this is a caching lock and reinstate with new inode */
- md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
-
- lock_res_and_lock(lock);
- lvb_ready = ldlm_is_lvb_ready(lock);
- unlock_res_and_lock(lock);
- /* checking lvb_ready is racy but this is okay. The worst case is
- * that multi processes may configure the file on the same time.
- */
- if (lvb_ready) {
- rc = 0;
- goto out;
- }
-
- rc = ll_layout_fetch(inode, lock);
- if (rc < 0)
- goto out;
-
- /* for layout lock, lmm is returned in lock's lvb.
- * lvb_data is immutable if the lock is held so it's safe to access it
- * without res lock.
- *
- * set layout to file. Unlikely this will fail as old layout was
- * surely eliminated
- */
- memset(&conf, 0, sizeof(conf));
- conf.coc_opc = OBJECT_CONF_SET;
- conf.coc_inode = inode;
- conf.coc_lock = lock;
- conf.u.coc_layout.lb_buf = lock->l_lvb_data;
- conf.u.coc_layout.lb_len = lock->l_lvb_len;
- rc = ll_layout_conf(inode, &conf);
-
- /* refresh layout failed, need to wait */
- wait_layout = rc == -EBUSY;
-
-out:
- LDLM_LOCK_PUT(lock);
- ldlm_lock_decref(lockh, mode);
-
- /* wait for IO to complete if it's still being used. */
- if (wait_layout) {
- CDEBUG(D_INODE, "%s: " DFID "(%p) wait for layout reconf\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid), inode);
-
- memset(&conf, 0, sizeof(conf));
- conf.coc_opc = OBJECT_CONF_WAIT;
- conf.coc_inode = inode;
- rc = ll_layout_conf(inode, &conf);
- if (rc == 0)
- rc = -EAGAIN;
-
- CDEBUG(D_INODE,
- "%s: file=" DFID " waiting layout return: %d.\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid), rc);
- }
- return rc;
-}
-
-static int ll_layout_refresh_locked(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct md_op_data *op_data;
- struct lookup_intent it;
- struct lustre_handle lockh;
- enum ldlm_mode mode;
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_IBITS,
- .ei_mode = LCK_CR,
- .ei_cb_bl = &ll_md_blocking_ast,
- .ei_cb_cp = &ldlm_completion_ast,
- };
- int rc;
-
-again:
- /* mostly layout lock is caching on the local side, so try to match
- * it before grabbing layout lock mutex.
- */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
- LCK_CR | LCK_CW | LCK_PR | LCK_PW);
- if (mode != 0) { /* hit cached lock */
- rc = ll_layout_lock_set(&lockh, mode, inode);
- if (rc == -EAGAIN)
- goto again;
- return rc;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
- 0, 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- /* have to enqueue one */
- memset(&it, 0, sizeof(it));
- it.it_op = IT_LAYOUT;
- lockh.cookie = 0ULL;
-
- LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file " DFID "(%p)",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid), inode);
-
- rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL, &it, op_data, &lockh, 0);
- ptlrpc_req_finished(it.it_request);
- it.it_request = NULL;
-
- ll_finish_md_op_data(op_data);
-
- mode = it.it_lock_mode;
- it.it_lock_mode = 0;
- ll_intent_drop_lock(&it);
-
- if (rc == 0) {
- /* set lock data in case this is a new lock */
- ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- rc = ll_layout_lock_set(&lockh, mode, inode);
- if (rc == -EAGAIN)
- goto again;
- }
-
- return rc;
-}
-
-/**
- * This function checks if there exists a LAYOUT lock on the client side,
- * or enqueues it if it doesn't have one in cache.
- *
- * This function will not hold layout lock so it may be revoked any time after
- * this function returns. Any operations depend on layout should be redone
- * in that case.
- *
- * This function should be called before lov_io_init() to get an uptodate
- * layout version, the caller should save the version number and after IO
- * is finished, this function should be called again to verify that layout
- * is not changed during IO time.
- */
-int ll_layout_refresh(struct inode *inode, __u32 *gen)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc;
-
- *gen = ll_layout_version_get(lli);
- if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != CL_LAYOUT_GEN_NONE)
- return 0;
-
- /* sanity checks */
- LASSERT(fid_is_sane(ll_inode2fid(inode)));
- LASSERT(S_ISREG(inode->i_mode));
-
- /* take layout lock mutex to enqueue layout lock exclusively. */
- mutex_lock(&lli->lli_layout_mutex);
-
- rc = ll_layout_refresh_locked(inode);
- if (rc < 0)
- goto out;
-
- *gen = ll_layout_version_get(lli);
-out:
- mutex_unlock(&lli->lli_layout_mutex);
-
- return rc;
-}
-
-/**
- * This function send a restore request to the MDT
- */
-int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
-{
- struct hsm_user_request *hur;
- int len, rc;
-
- len = sizeof(struct hsm_user_request) +
- sizeof(struct hsm_user_item);
- hur = kzalloc(len, GFP_NOFS);
- if (!hur)
- return -ENOMEM;
-
- hur->hur_request.hr_action = HUA_RESTORE;
- hur->hur_request.hr_archive_id = 0;
- hur->hur_request.hr_flags = 0;
- memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
- sizeof(hur->hur_user_item[0].hui_fid));
- hur->hur_user_item[0].hui_extent.offset = offset;
- hur->hur_user_item[0].hui_extent.length = length;
- hur->hur_request.hr_itemcount = 1;
- rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp,
- len, hur, NULL);
- kfree(hur);
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
deleted file mode 100644
index 3075358f3f08..000000000000
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * glimpse code shared between vvp and liblustre (and other Lustre clients in
- * the future).
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Oleg Drokin <oleg.drokin@sun.com>
- */
-
-#include <linux/libcfs/libcfs.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include <obd.h>
-
-#include <lustre_dlm.h>
-#include <lustre_mdc.h>
-#include <linux/pagemap.h>
-#include <linux/file.h>
-
-#include <cl_object.h>
-#include "llite_internal.h"
-
-static const struct cl_lock_descr whole_file = {
- .cld_start = 0,
- .cld_end = CL_PAGE_EOF,
- .cld_mode = CLM_READ
-};
-
-/*
- * Check whether file has possible unwriten pages.
- *
- * \retval 1 file is mmap-ed or has dirty pages
- * 0 otherwise
- */
-blkcnt_t dirty_cnt(struct inode *inode)
-{
- blkcnt_t cnt = 0;
- struct vvp_object *vob = cl_inode2vvp(inode);
- void *results[1];
-
- if (inode->i_mapping)
- cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->i_pages,
- results, 0, 1,
- PAGECACHE_TAG_DIRTY);
- if (cnt == 0 && atomic_read(&vob->vob_mmap_cnt) > 0)
- cnt = 1;
-
- return (cnt > 0) ? 1 : 0;
-}
-
-int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
- struct inode *inode, struct cl_object *clob, int agl)
-{
- const struct lu_fid *fid = lu_object_fid(&clob->co_lu);
- struct cl_lock *lock = vvp_env_lock(env);
- struct cl_lock_descr *descr = &lock->cll_descr;
- int result = 0;
-
- CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
-
- /* NOTE: this looks like DLM lock request, but it may
- * not be one. Due to CEF_ASYNC flag (translated
- * to LDLM_FL_HAS_INTENT by osc), this is
- * glimpse request, that won't revoke any
- * conflicting DLM locks held. Instead,
- * ll_glimpse_callback() will be called on each
- * client holding a DLM lock against this file,
- * and resulting size will be returned for each
- * stripe. DLM lock on [0, EOF] is acquired only
- * if there were no conflicting locks. If there
- * were conflicting locks, enqueuing or waiting
- * fails with -ENAVAIL, but valid inode
- * attributes are returned anyway.
- */
- *descr = whole_file;
- descr->cld_obj = clob;
- descr->cld_mode = CLM_READ;
- descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
- if (agl)
- descr->cld_enq_flags |= CEF_AGL;
- /*
- * CEF_ASYNC is used because glimpse sub-locks cannot
- * deadlock (because they never conflict with other
- * locks) and, hence, can be enqueued out-of-order.
- *
- * CEF_MUST protects glimpse lock from conversion into
- * a lockless mode.
- */
- result = cl_lock_request(env, io, lock);
- if (result < 0)
- return result;
-
- if (!agl) {
- ll_merge_attr(env, inode);
- if (i_size_read(inode) > 0 && !inode->i_blocks) {
- /*
- * LU-417: Add dirty pages block count
- * lest i_blocks reports 0, some "cp" or
- * "tar" may think it's a completely
- * sparse file and skip it.
- */
- inode->i_blocks = dirty_cnt(inode);
- }
- }
-
- cl_lock_release(env, lock);
-
- return result;
-}
-
-static int cl_io_get(struct inode *inode, struct lu_env **envout,
- struct cl_io **ioout, u16 *refcheck)
-{
- struct lu_env *env;
- struct cl_io *io;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct cl_object *clob = lli->lli_clob;
- int result;
-
- if (S_ISREG(inode->i_mode)) {
- env = cl_env_get(refcheck);
- if (!IS_ERR(env)) {
- io = vvp_env_thread_io(env);
- io->ci_obj = clob;
- *envout = env;
- *ioout = io;
- result = 1;
- } else {
- result = PTR_ERR(env);
- }
- } else {
- result = 0;
- }
- return result;
-}
-
-int cl_glimpse_size0(struct inode *inode, int agl)
-{
- /*
- * We don't need ast_flags argument to cl_glimpse_size(), because
- * osc_lock_enqueue() takes care of the possible deadlock that said
- * argument was introduced to avoid.
- */
- /*
- * XXX but note that ll_file_seek() passes LDLM_FL_BLOCK_NOWAIT to
- * cl_glimpse_size(), which doesn't make sense: glimpse locks are not
- * blocking anyway.
- */
- struct lu_env *env = NULL;
- struct cl_io *io = NULL;
- int result;
- u16 refcheck;
-
- result = cl_io_get(inode, &env, &io, &refcheck);
- if (result > 0) {
-again:
- io->ci_verify_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (result > 0)
- /*
- * nothing to do for this io. This currently happens
- * when stripe sub-object's are not yet created.
- */
- result = io->ci_result;
- else if (result == 0)
- result = cl_glimpse_lock(env, io, inode, io->ci_obj,
- agl);
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_GLIMPSE_DELAY, 2);
- cl_io_fini(env, io);
- if (unlikely(io->ci_need_restart))
- goto again;
- cl_env_put(env, &refcheck);
- }
- return result;
-}
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
deleted file mode 100644
index df5c0c0ae703..000000000000
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ /dev/null
@@ -1,293 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * cl code shared between vvp and liblustre (and other Lustre clients in the
- * future).
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/quotaops.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/rbtree.h>
-
-#include <obd.h>
-#include <obd_support.h>
-#include <lustre_fid.h>
-#include <lustre_dlm.h>
-#include <lustre_mdc.h>
-#include <cl_object.h>
-
-#include "llite_internal.h"
-
-/*
- * ccc_ prefix stands for "Common Client Code".
- */
-
-/*****************************************************************************
- *
- * Vvp device and device type functions.
- *
- */
-
-/**
- * An `emergency' environment used by cl_inode_fini() when cl_env_get()
- * fails. Access to this environment is serialized by cl_inode_fini_guard
- * mutex.
- */
-struct lu_env *cl_inode_fini_env;
-u16 cl_inode_fini_refcheck;
-
-/**
- * A mutex serializing calls to slp_inode_fini() under extreme memory
- * pressure, when environments cannot be allocated.
- */
-static DEFINE_MUTEX(cl_inode_fini_guard);
-
-int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
- unsigned int attr_flags)
-{
- struct lu_env *env;
- struct cl_io *io;
- int result;
- u16 refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = vvp_env_thread_io(env);
- io->ci_obj = obj;
- io->ci_verify_layout = 1;
-
- io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
- io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
- io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
- io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
- io->u.ci_setattr.sa_attr_flags = attr_flags;
- io->u.ci_setattr.sa_valid = attr->ia_valid;
- io->u.ci_setattr.sa_parent_fid = lu_object_fid(&obj->co_lu);
-
-again:
- if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
- struct vvp_io *vio = vvp_env_io(env);
-
- if (attr->ia_valid & ATTR_FILE)
- /* populate the file descriptor for ftruncate to honor
- * group lock - see LU-787
- */
- vio->vui_fd = LUSTRE_FPRIVATE(attr->ia_file);
-
- result = cl_io_loop(env, io);
- } else {
- result = io->ci_result;
- }
- cl_io_fini(env, io);
- if (unlikely(io->ci_need_restart))
- goto again;
-
- cl_env_put(env, &refcheck);
- return result;
-}
-
-/**
- * Initialize or update CLIO structures for regular files when new
- * meta-data arrives from the server.
- *
- * \param inode regular file inode
- * \param md new file metadata from MDS
- * - allocates cl_object if necessary,
- * - updated layout, if object was already here.
- */
-int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
-{
- struct lu_env *env;
- struct ll_inode_info *lli;
- struct cl_object *clob;
- struct lu_site *site;
- struct lu_fid *fid;
- struct cl_object_conf conf = {
- .coc_inode = inode,
- .u = {
- .coc_layout = md->layout,
- }
- };
- int result = 0;
- u16 refcheck;
-
- LASSERT(md->body->mbo_valid & OBD_MD_FLID);
- LASSERT(S_ISREG(inode->i_mode));
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- site = ll_i2sbi(inode)->ll_site;
- lli = ll_i2info(inode);
- fid = &lli->lli_fid;
- LASSERT(fid_is_sane(fid));
-
- if (!lli->lli_clob) {
- /* clob is slave of inode, empty lli_clob means for new inode,
- * there is no clob in cache with the given fid, so it is
- * unnecessary to perform lookup-alloc-lookup-insert, just
- * alloc and insert directly.
- */
- LASSERT(inode->i_state & I_NEW);
- conf.coc_lu.loc_flags = LOC_F_NEW;
- clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
- fid, &conf);
- if (!IS_ERR(clob)) {
- /*
- * No locking is necessary, as new inode is
- * locked by I_NEW bit.
- */
- lli->lli_clob = clob;
- lu_object_ref_add(&clob->co_lu, "inode", inode);
- } else {
- result = PTR_ERR(clob);
- }
- } else {
- result = cl_conf_set(env, lli->lli_clob, &conf);
- }
-
- cl_env_put(env, &refcheck);
-
- if (result != 0)
- CERROR("Failure to initialize cl object " DFID ": %d\n",
- PFID(fid), result);
- return result;
-}
-
-/**
- * Wait for others drop their references of the object at first, then we drop
- * the last one, which will lead to the object be destroyed immediately.
- * Must be called after cl_object_kill() against this object.
- *
- * The reason we want to do this is: destroying top object will wait for sub
- * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
- * to initiate top object destroying which may deadlock. See bz22520.
- */
-static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
-{
- struct lu_object_header *header = obj->co_lu.lo_header;
- wait_queue_entry_t waiter;
-
- if (unlikely(atomic_read(&header->loh_ref) != 1)) {
- struct lu_site *site = obj->co_lu.lo_dev->ld_site;
- struct lu_site_bkt_data *bkt;
-
- bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
-
- init_waitqueue_entry(&waiter, current);
- add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
-
- while (1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (atomic_read(&header->loh_ref) == 1)
- break;
- schedule();
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
- }
-
- cl_object_put(env, obj);
-}
-
-void cl_inode_fini(struct inode *inode)
-{
- struct lu_env *env;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct cl_object *clob = lli->lli_clob;
- u16 refcheck;
- int emergency;
-
- if (clob) {
- env = cl_env_get(&refcheck);
- emergency = IS_ERR(env);
- if (emergency) {
- mutex_lock(&cl_inode_fini_guard);
- LASSERT(cl_inode_fini_env);
- env = cl_inode_fini_env;
- }
- /*
- * cl_object cache is a slave to inode cache (which, in turn
- * is a slave to dentry cache), don't keep cl_object in memory
- * when its master is evicted.
- */
- cl_object_kill(env, clob);
- lu_object_ref_del(&clob->co_lu, "inode", inode);
- cl_object_put_last(env, clob);
- lli->lli_clob = NULL;
- if (emergency)
- mutex_unlock(&cl_inode_fini_guard);
- else
- cl_env_put(env, &refcheck);
- }
-}
-
-/**
- * build inode number from passed @fid
- */
-__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
-{
- if (BITS_PER_LONG == 32 || api32)
- return fid_flatten32(fid);
- else
- return fid_flatten(fid);
-}
-
-/**
- * build inode generation from passed @fid. If our FID overflows the 32-bit
- * inode number then return a non-zero generation to distinguish them.
- */
-__u32 cl_fid_build_gen(const struct lu_fid *fid)
-{
- __u32 gen;
-
- if (fid_is_igif(fid)) {
- gen = lu_igif_gen(fid);
- return gen;
- }
-
- gen = fid_flatten(fid) >> 32;
- return gen;
-}
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
deleted file mode 100644
index a246b955306e..000000000000
--- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c
+++ /dev/null
@@ -1,186 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * cl code shared between vvp and liblustre (and other Lustre clients in the
- * future).
- *
- */
-#define DEBUG_SUBSYSTEM S_LLITE
-#include <obd_class.h>
-#include <obd_support.h>
-#include <obd.h>
-#include <cl_object.h>
-
-#include "llite_internal.h"
-
-/* Initialize the default and maximum LOV EA and cookie sizes. This allows
- * us to make MDS RPCs with large enough reply buffers to hold the
- * maximum-sized (= maximum striped) EA and cookie without having to
- * calculate this (via a call into the LOV + OSCs) each time we make an RPC.
- */
-int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
-{
- u32 val_size, max_easize, def_easize;
- int rc;
-
- val_size = sizeof(max_easize);
- rc = obd_get_info(NULL, dt_exp, sizeof(KEY_MAX_EASIZE), KEY_MAX_EASIZE,
- &val_size, &max_easize);
- if (rc)
- return rc;
-
- val_size = sizeof(def_easize);
- rc = obd_get_info(NULL, dt_exp, sizeof(KEY_DEFAULT_EASIZE),
- KEY_DEFAULT_EASIZE, &val_size, &def_easize);
- if (rc)
- return rc;
-
- /*
- * default cookiesize is 0 because from 2.4 server doesn't send
- * llog cookies to client.
- */
- CDEBUG(D_HA, "updating def/max_easize: %d/%d\n",
- def_easize, max_easize);
-
- rc = md_init_ea_size(md_exp, max_easize, def_easize);
- return rc;
-}
-
-/**
- * This function is used as an upcall-callback hooked by liblustre and llite
- * clients into obd_notify() listeners chain to handle notifications about
- * change of import connect_flags. See llu_fsswop_mount() and
- * lustre_common_fill_super().
- */
-int cl_ocd_update(struct obd_device *host,
- struct obd_device *watched,
- enum obd_notify_event ev, void *owner, void *data)
-{
- struct lustre_client_ocd *lco;
- struct client_obd *cli;
- __u64 flags;
- int result;
-
- if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME) &&
- watched->obd_set_up && !watched->obd_stopping) {
- cli = &watched->u.cli;
- lco = owner;
- flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
- CDEBUG(D_SUPER, "Changing connect_flags: %#llx -> %#llx\n",
- lco->lco_flags, flags);
- mutex_lock(&lco->lco_lock);
- lco->lco_flags &= flags;
- /* for each osc event update ea size */
- if (lco->lco_dt_exp)
- cl_init_ea_size(lco->lco_md_exp, lco->lco_dt_exp);
-
- mutex_unlock(&lco->lco_lock);
- result = 0;
- } else {
- CERROR("unexpected notification from %s %s (setup:%d,stopping:%d)!\n",
- watched->obd_type->typ_name,
- watched->obd_name, watched->obd_set_up,
- watched->obd_stopping);
- result = -EINVAL;
- }
- return result;
-}
-
-#define GROUPLOCK_SCOPE "grouplock"
-
-int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
- struct ll_grouplock *cg)
-{
- struct lu_env *env;
- struct cl_io *io;
- struct cl_lock *lock;
- struct cl_lock_descr *descr;
- __u32 enqflags;
- u16 refcheck;
- int rc;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = vvp_env_thread_io(env);
- io->ci_obj = obj;
-
- rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (rc != 0) {
- cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
- /* Does not make sense to take GL for released layout */
- if (rc > 0)
- rc = -ENOTSUPP;
- return rc;
- }
-
- lock = vvp_env_lock(env);
- descr = &lock->cll_descr;
- descr->cld_obj = obj;
- descr->cld_start = 0;
- descr->cld_end = CL_PAGE_EOF;
- descr->cld_gid = gid;
- descr->cld_mode = CLM_GROUP;
-
- enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
- descr->cld_enq_flags = enqflags;
-
- rc = cl_lock_request(env, io, lock);
- if (rc < 0) {
- cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
- return rc;
- }
-
- cg->lg_env = env;
- cg->lg_io = io;
- cg->lg_lock = lock;
- cg->lg_gid = gid;
-
- return 0;
-}
-
-void cl_put_grouplock(struct ll_grouplock *cg)
-{
- struct lu_env *env = cg->lg_env;
- struct cl_io *io = cg->lg_io;
- struct cl_lock *lock = cg->lg_lock;
-
- LASSERT(cg->lg_env);
- LASSERT(cg->lg_gid);
-
- cl_lock_release(env, lock);
- cl_io_fini(env, io);
- cl_env_put(env, NULL);
-}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
deleted file mode 100644
index d46bcf71b273..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ /dev/null
@@ -1,1337 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef LLITE_INTERNAL_H
-#define LLITE_INTERNAL_H
-#include <lustre_debug.h>
-#include <uapi/linux/lustre/lustre_ver.h>
-#include <lustre_disk.h> /* for s2sbi */
-#include <lustre_linkea.h>
-
-/* for struct cl_lock_descr and struct cl_io */
-#include <lustre_patchless_compat.h>
-#include <lustre_compat.h>
-#include <cl_object.h>
-#include <lustre_lmv.h>
-#include <lustre_mdc.h>
-#include <lustre_intent.h>
-#include <linux/compat.h>
-#include <linux/namei.h>
-#include <linux/xattr.h>
-#include <linux/posix_acl_xattr.h>
-#include "vvp_internal.h"
-#include "range_lock.h"
-
-#ifndef FMODE_EXEC
-#define FMODE_EXEC 0
-#endif
-
-#ifndef VM_FAULT_RETRY
-#define VM_FAULT_RETRY 0
-#endif
-
-/** Only used on client-side for indicating the tail of dir hash/offset. */
-#define LL_DIR_END_OFF 0x7fffffffffffffffULL
-#define LL_DIR_END_OFF_32BIT 0x7fffffffUL
-
-/* 4UL * 1024 * 1024 */
-#define LL_MAX_BLKSIZE_BITS 22
-
-#define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
-#define LUSTRE_FPRIVATE(file) ((file)->private_data)
-
-struct ll_dentry_data {
- struct lookup_intent *lld_it;
- unsigned int lld_sa_generation;
- unsigned int lld_invalid:1;
- unsigned int lld_nfs_dentry:1;
- struct rcu_head lld_rcu_head;
-};
-
-#define ll_d2d(de) ((struct ll_dentry_data *)((de)->d_fsdata))
-
-#define LLI_INODE_MAGIC 0x111d0de5
-#define LLI_INODE_DEAD 0xdeadd00d
-
-struct ll_getname_data {
- struct dir_context ctx;
- char *lgd_name; /* points to buffer with NAME_MAX+1 size */
- struct lu_fid lgd_fid; /* target fid we are looking for */
- int lgd_found; /* inode matched? */
-};
-
-struct ll_grouplock {
- struct lu_env *lg_env;
- struct cl_io *lg_io;
- struct cl_lock *lg_lock;
- unsigned long lg_gid;
-};
-
-enum ll_file_flags {
- /* File data is modified. */
- LLIF_DATA_MODIFIED = 0,
- /* File is being restored */
- LLIF_FILE_RESTORING = 1,
- /* Xattr cache is attached to the file */
- LLIF_XATTR_CACHE = 2,
-};
-
-struct ll_inode_info {
- __u32 lli_inode_magic;
-
- spinlock_t lli_lock;
- unsigned long lli_flags;
- struct posix_acl *lli_posix_acl;
-
- /* identifying fields for both metadata and data stacks. */
- struct lu_fid lli_fid;
- /* master inode fid for stripe directory */
- struct lu_fid lli_pfid;
-
- /* We need all three because every inode may be opened in different
- * modes
- */
- struct obd_client_handle *lli_mds_read_och;
- struct obd_client_handle *lli_mds_write_och;
- struct obd_client_handle *lli_mds_exec_och;
- __u64 lli_open_fd_read_count;
- __u64 lli_open_fd_write_count;
- __u64 lli_open_fd_exec_count;
- /* Protects access to och pointers and their usage counters */
- struct mutex lli_och_mutex;
-
- struct inode lli_vfs_inode;
-
- /* the most recent timestamps obtained from mds */
- s64 lli_atime;
- s64 lli_mtime;
- s64 lli_ctime;
- spinlock_t lli_agl_lock;
-
- /* Try to make the d::member and f::member are aligned. Before using
- * these members, make clear whether it is directory or not.
- */
- union {
- /* for directory */
- struct {
- /* serialize normal readdir and statahead-readdir. */
- struct mutex lli_readdir_mutex;
-
- /* metadata statahead */
- /* since parent-child threads can share the same @file
- * struct, "opendir_key" is the token when dir close for
- * case of parent exit before child -- it is me should
- * cleanup the dir readahead.
- */
- void *lli_opendir_key;
- struct ll_statahead_info *lli_sai;
- /* protect statahead stuff. */
- spinlock_t lli_sa_lock;
- /* "opendir_pid" is the token when lookup/revalidate
- * -- I am the owner of dir statahead.
- */
- pid_t lli_opendir_pid;
- /* stat will try to access statahead entries or start
- * statahead if this flag is set, and this flag will be
- * set upon dir open, and cleared when dir is closed,
- * statahead hit ratio is too low, or start statahead
- * thread failed.
- */
- unsigned int lli_sa_enabled:1;
- /* generation for statahead */
- unsigned int lli_sa_generation;
- /* directory stripe information */
- struct lmv_stripe_md *lli_lsm_md;
- /* default directory stripe offset. This is extracted
- * from the "dmv" xattr in order to decide which MDT to
- * create a subdirectory on. The MDS itself fetches
- * "dmv" and gets the rest of the default layout itself
- * (count, hash, etc).
- */
- __u32 lli_def_stripe_offset;
- };
-
- /* for non-directory */
- struct {
- struct mutex lli_size_mutex;
- char *lli_symlink_name;
- /*
- * struct rw_semaphore {
- * signed long count; // align d.d_def_acl
- * spinlock_t wait_lock; // align d.d_sa_lock
- * struct list_head wait_list;
- * }
- */
- struct rw_semaphore lli_trunc_sem;
- struct range_lock_tree lli_write_tree;
-
- struct rw_semaphore lli_glimpse_sem;
- unsigned long lli_glimpse_time;
- struct list_head lli_agl_list;
- __u64 lli_agl_index;
-
- /* for writepage() only to communicate to fsync */
- int lli_async_rc;
-
- /*
- * whenever a process try to read/write the file, the
- * jobid of the process will be saved here, and it'll
- * be packed into the write PRC when flush later.
- *
- * so the read/write statistics for jobid will not be
- * accurate if the file is shared by different jobs.
- */
- char lli_jobid[LUSTRE_JOBID_SIZE];
- };
- };
-
- /* XXX: For following frequent used members, although they maybe special
- * used for non-directory object, it is some time-wasting to check
- * whether the object is directory or not before using them. On the
- * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce
- * the "ll_inode_info" size even if moving those members into u.f.
- * So keep them out side.
- *
- * In the future, if more members are added only for directory,
- * some of the following members can be moved into u.f.
- */
- struct cl_object *lli_clob;
-
- /* mutex to request for layout lock exclusively. */
- struct mutex lli_layout_mutex;
- /* Layout version, protected by lli_layout_lock */
- __u32 lli_layout_gen;
- spinlock_t lli_layout_lock;
-
- struct rw_semaphore lli_xattrs_list_rwsem;
- struct mutex lli_xattrs_enq_lock;
- struct list_head lli_xattrs;/* ll_xattr_entry->xe_list */
-};
-
-static inline __u32 ll_layout_version_get(struct ll_inode_info *lli)
-{
- __u32 gen;
-
- spin_lock(&lli->lli_layout_lock);
- gen = lli->lli_layout_gen;
- spin_unlock(&lli->lli_layout_lock);
-
- return gen;
-}
-
-static inline void ll_layout_version_set(struct ll_inode_info *lli, __u32 gen)
-{
- spin_lock(&lli->lli_layout_lock);
- lli->lli_layout_gen = gen;
- spin_unlock(&lli->lli_layout_lock);
-}
-
-int ll_xattr_cache_destroy(struct inode *inode);
-
-int ll_xattr_cache_get(struct inode *inode, const char *name,
- char *buffer, size_t size, __u64 valid);
-
-int ll_init_security(struct dentry *dentry, struct inode *inode,
- struct inode *dir);
-
-/*
- * Locking to guarantee consistency of non-atomic updates to long long i_size,
- * consistency between file size and KMS.
- *
- * Implemented by ->lli_size_mutex and ->lsm_lock, nested in that order.
- */
-
-void ll_inode_size_lock(struct inode *inode);
-void ll_inode_size_unlock(struct inode *inode);
-
-/* FIXME: replace the name of this with LL_I to conform to kernel stuff */
-/* static inline struct ll_inode_info *LL_I(struct inode *inode) */
-static inline struct ll_inode_info *ll_i2info(struct inode *inode)
-{
- return container_of(inode, struct ll_inode_info, lli_vfs_inode);
-}
-
-/* default to about 64M of readahead on a given system. */
-#define SBI_DEFAULT_READAHEAD_MAX (64UL << (20 - PAGE_SHIFT))
-
-/* default to read-ahead full files smaller than 2MB on the second read */
-#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
-
-enum ra_stat {
- RA_STAT_HIT = 0,
- RA_STAT_MISS,
- RA_STAT_DISTANT_READPAGE,
- RA_STAT_MISS_IN_WINDOW,
- RA_STAT_FAILED_GRAB_PAGE,
- RA_STAT_FAILED_MATCH,
- RA_STAT_DISCARDED,
- RA_STAT_ZERO_LEN,
- RA_STAT_ZERO_WINDOW,
- RA_STAT_EOF,
- RA_STAT_MAX_IN_FLIGHT,
- RA_STAT_WRONG_GRAB_PAGE,
- RA_STAT_FAILED_REACH_END,
- _NR_RA_STAT,
-};
-
-struct ll_ra_info {
- atomic_t ra_cur_pages;
- unsigned long ra_max_pages;
- unsigned long ra_max_pages_per_file;
- unsigned long ra_max_read_ahead_whole_pages;
-};
-
-/* ra_io_arg will be filled in the beginning of ll_readahead with
- * ras_lock, then the following ll_read_ahead_pages will read RA
- * pages according to this arg, all the items in this structure are
- * counted by page index.
- */
-struct ra_io_arg {
- unsigned long ria_start; /* start offset of read-ahead*/
- unsigned long ria_end; /* end offset of read-ahead*/
- unsigned long ria_reserved; /* reserved pages for read-ahead */
- unsigned long ria_end_min; /* minimum end to cover current read */
- bool ria_eof; /* reach end of file */
- /* If stride read pattern is detected, ria_stoff means where
- * stride read is started. Note: for normal read-ahead, the
- * value here is meaningless, and also it will not be accessed
- */
- pgoff_t ria_stoff;
- /* ria_length and ria_pages are the length and pages length in the
- * stride I/O mode. And they will also be used to check whether
- * it is stride I/O read-ahead in the read-ahead pages
- */
- unsigned long ria_length;
- unsigned long ria_pages;
-};
-
-/* LL_HIST_MAX=32 causes an overflow */
-#define LL_HIST_MAX 28
-#define LL_HIST_START 12 /* buckets start at 2^12 = 4k */
-#define LL_PROCESS_HIST_MAX 10
-struct per_process_info {
- pid_t pid;
- struct obd_histogram pp_r_hist;
- struct obd_histogram pp_w_hist;
-};
-
-/* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */
-struct ll_rw_extents_info {
- struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1];
-};
-
-#define LL_OFFSET_HIST_MAX 100
-struct ll_rw_process_info {
- pid_t rw_pid;
- int rw_op;
- loff_t rw_range_start;
- loff_t rw_range_end;
- loff_t rw_last_file_pos;
- loff_t rw_offset;
- size_t rw_smallest_extent;
- size_t rw_largest_extent;
- struct ll_file_data *rw_last_file;
-};
-
-enum stats_track_type {
- STATS_TRACK_ALL = 0, /* track all processes */
- STATS_TRACK_PID, /* track process with this pid */
- STATS_TRACK_PPID, /* track processes with this ppid */
- STATS_TRACK_GID, /* track processes with this gid */
- STATS_TRACK_LAST,
-};
-
-/* flags for sbi->ll_flags */
-#define LL_SBI_NOLCK 0x01 /* DLM locking disabled (directio-only) */
-#define LL_SBI_CHECKSUM 0x02 /* checksum each page as it's written */
-#define LL_SBI_FLOCK 0x04
-#define LL_SBI_USER_XATTR 0x08 /* support user xattr */
-#define LL_SBI_ACL 0x10 /* support ACL */
-/* LL_SBI_RMT_CLIENT 0x40 remote client */
-#define LL_SBI_MDS_CAPA 0x80 /* support mds capa, obsolete */
-#define LL_SBI_OSS_CAPA 0x100 /* support oss capa, obsolete */
-#define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
-#define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */
-#define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */
-/* LL_SBI_SOM_PREVIEW 0x1000 SOM preview mount option, obsolete */
-#define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */
-#define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */
-#define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */
-#define LL_SBI_VERBOSE 0x10000 /* verbose mount/umount */
-#define LL_SBI_LAYOUT_LOCK 0x20000 /* layout lock support */
-#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
-#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
-#define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */
-#define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server
- * suppress_pings
- */
-
-#define LL_SBI_FLAGS { \
- "nolck", \
- "checksum", \
- "flock", \
- "user_xattr", \
- "acl", \
- "???", \
- "???", \
- "mds_capa", \
- "oss_capa", \
- "flock", \
- "lru_resize", \
- "lazy_statfs", \
- "som", \
- "32bit_api", \
- "64bit_hash", \
- "agl", \
- "verbose", \
- "layout", \
- "user_fid2path",\
- "xattr_cache", \
- "norootsquash", \
- "always_ping", \
-}
-
-/*
- * This is embedded into llite super-blocks to keep track of connect
- * flags (capabilities) supported by all imports given mount is
- * connected to.
- */
-struct lustre_client_ocd {
- /*
- * This is conjunction of connect_flags across all imports
- * (LOVs) this mount is connected to. This field is updated by
- * cl_ocd_update() under ->lco_lock.
- */
- __u64 lco_flags;
- struct mutex lco_lock;
- struct obd_export *lco_md_exp;
- struct obd_export *lco_dt_exp;
-};
-
-struct ll_sb_info {
- /* this protects pglist and ra_info. It isn't safe to
- * grab from interrupt contexts
- */
- spinlock_t ll_lock;
- spinlock_t ll_pp_extent_lock; /* pp_extent entry*/
- spinlock_t ll_process_lock; /* ll_rw_process_info */
- struct obd_uuid ll_sb_uuid;
- struct obd_export *ll_md_exp;
- struct obd_export *ll_dt_exp;
- struct dentry *ll_debugfs_entry;
- struct lu_fid ll_root_fid; /* root object fid */
-
- int ll_flags;
- unsigned int ll_umounting:1,
- ll_xattr_cache_enabled:1,
- ll_client_common_fill_super_succeeded:1;
-
- struct lustre_client_ocd ll_lco;
-
- struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
-
- /*
- * Used to track "unstable" pages on a client, and maintain a
- * LRU list of clean pages. An "unstable" page is defined as
- * any page which is sent to a server as part of a bulk request,
- * but is uncommitted to stable storage.
- */
- struct cl_client_cache *ll_cache;
-
- struct lprocfs_stats *ll_ra_stats;
-
- struct ll_ra_info ll_ra_info;
- unsigned int ll_namelen;
- const struct file_operations *ll_fop;
-
- unsigned int ll_md_brw_pages; /* readdir pages per RPC */
-
- struct lu_site *ll_site;
- struct cl_device *ll_cl;
- /* Statistics */
- struct ll_rw_extents_info ll_rw_extents_info;
- int ll_extent_process_count;
- struct ll_rw_process_info ll_rw_process_info[LL_PROCESS_HIST_MAX];
- unsigned int ll_offset_process_count;
- struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX];
- unsigned int ll_rw_offset_entry_count;
- int ll_stats_track_id;
- enum stats_track_type ll_stats_track_type;
- int ll_rw_stats_on;
-
- /* metadata stat-ahead */
- unsigned int ll_sa_max; /* max statahead RPCs */
- atomic_t ll_sa_total; /* statahead thread started
- * count
- */
- atomic_t ll_sa_wrong; /* statahead thread stopped for
- * low hit ratio
- */
- atomic_t ll_sa_running; /* running statahead thread
- * count
- */
- atomic_t ll_agl_total; /* AGL thread started count */
-
- dev_t ll_sdev_orig; /* save s_dev before assign for
- * clustered nfs
- */
- /* root squash */
- struct root_squash_info ll_squash;
- struct path ll_mnt;
-
- __kernel_fsid_t ll_fsid;
- struct kobject ll_kobj; /* sysfs object */
- struct super_block *ll_sb; /* struct super_block (for sysfs code)*/
- struct completion ll_kobj_unregister;
-};
-
-/*
- * per file-descriptor read-ahead data.
- */
-struct ll_readahead_state {
- spinlock_t ras_lock;
- /*
- * index of the last page that read(2) needed and that wasn't in the
- * cache. Used by ras_update() to detect seeks.
- *
- * XXX nikita: if access seeks into cached region, Lustre doesn't see
- * this.
- */
- unsigned long ras_last_readpage;
- /*
- * number of pages read after last read-ahead window reset. As window
- * is reset on each seek, this is effectively a number of consecutive
- * accesses. Maybe ->ras_accessed_in_window is better name.
- *
- * XXX nikita: window is also reset (by ras_update()) when Lustre
- * believes that memory pressure evicts read-ahead pages. In that
- * case, it probably doesn't make sense to expand window to
- * PTLRPC_MAX_BRW_PAGES on the third access.
- */
- unsigned long ras_consecutive_pages;
- /*
- * number of read requests after the last read-ahead window reset
- * As window is reset on each seek, this is effectively the number
- * on consecutive read request and is used to trigger read-ahead.
- */
- unsigned long ras_consecutive_requests;
- /*
- * Parameters of current read-ahead window. Handled by
- * ras_update(). On the initial access to the file or after a seek,
- * window is reset to 0. After 3 consecutive accesses, window is
- * expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
- * PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
- */
- unsigned long ras_window_start, ras_window_len;
- /*
- * Optimal RPC size. It decides how many pages will be sent
- * for each read-ahead.
- */
- unsigned long ras_rpc_size;
- /*
- * Where next read-ahead should start at. This lies within read-ahead
- * window. Read-ahead window is read in pieces rather than at once
- * because: 1. lustre limits total number of pages under read-ahead by
- * ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
- * not covered by DLM lock.
- */
- unsigned long ras_next_readahead;
- /*
- * Total number of ll_file_read requests issued, reads originating
- * due to mmap are not counted in this total. This value is used to
- * trigger full file read-ahead after multiple reads to a small file.
- */
- unsigned long ras_requests;
- /*
- * Page index with respect to the current request, these value
- * will not be accurate when dealing with reads issued via mmap.
- */
- unsigned long ras_request_index;
- /*
- * The following 3 items are used for detecting the stride I/O
- * mode.
- * In stride I/O mode,
- * ...............|-----data-----|****gap*****|--------|******|....
- * offset |-stride_pages-|-stride_gap-|
- * ras_stride_offset = offset;
- * ras_stride_length = stride_pages + stride_gap;
- * ras_stride_pages = stride_pages;
- * Note: all these three items are counted by pages.
- */
- unsigned long ras_stride_length;
- unsigned long ras_stride_pages;
- pgoff_t ras_stride_offset;
- /*
- * number of consecutive stride request count, and it is similar as
- * ras_consecutive_requests, but used for stride I/O mode.
- * Note: only more than 2 consecutive stride request are detected,
- * stride read-ahead will be enable
- */
- unsigned long ras_consecutive_stride_requests;
-};
-
-extern struct kmem_cache *ll_file_data_slab;
-struct lustre_handle;
-struct ll_file_data {
- struct ll_readahead_state fd_ras;
- struct ll_grouplock fd_grouplock;
- __u64 lfd_pos;
- __u32 fd_flags;
- fmode_t fd_omode;
- /* openhandle if lease exists for this file.
- * Borrow lli->lli_och_mutex to protect assignment
- */
- struct obd_client_handle *fd_lease_och;
- struct obd_client_handle *fd_och;
- struct file *fd_file;
- /* Indicate whether need to report failure when close.
- * true: failure is known, not report again.
- * false: unknown failure, should report.
- */
- bool fd_write_failed;
- rwlock_t fd_lock; /* protect lcc list */
- struct list_head fd_lccs; /* list of ll_cl_context */
-};
-
-extern struct dentry *llite_root;
-extern struct kset *llite_kset;
-
-static inline struct inode *ll_info2i(struct ll_inode_info *lli)
-{
- return &lli->lli_vfs_inode;
-}
-
-__u32 ll_i2suppgid(struct inode *i);
-void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2);
-
-static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
-{
-#if BITS_PER_LONG == 32
- return 1;
-#elif defined(CONFIG_COMPAT)
- return unlikely(in_compat_syscall() ||
- (sbi->ll_flags & LL_SBI_32BIT_API));
-#else
- return unlikely(sbi->ll_flags & LL_SBI_32BIT_API);
-#endif
-}
-
-void ll_ras_enter(struct file *f);
-
-/* llite/lcommon_misc.c */
-int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
-int cl_ocd_update(struct obd_device *host,
- struct obd_device *watched,
- enum obd_notify_event ev, void *owner, void *data);
-int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
- struct ll_grouplock *cg);
-void cl_put_grouplock(struct ll_grouplock *cg);
-
-/* llite/lproc_llite.c */
-int ldebugfs_register_mountpoint(struct dentry *parent,
- struct super_block *sb, char *osc, char *mdc);
-void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi);
-void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count);
-void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars);
-void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
- struct ll_file_data *file, loff_t pos,
- size_t count, int rw);
-
-enum {
- LPROC_LL_DIRTY_HITS,
- LPROC_LL_DIRTY_MISSES,
- LPROC_LL_READ_BYTES,
- LPROC_LL_WRITE_BYTES,
- LPROC_LL_BRW_READ,
- LPROC_LL_BRW_WRITE,
- LPROC_LL_IOCTL,
- LPROC_LL_OPEN,
- LPROC_LL_RELEASE,
- LPROC_LL_MAP,
- LPROC_LL_LLSEEK,
- LPROC_LL_FSYNC,
- LPROC_LL_READDIR,
- LPROC_LL_SETATTR,
- LPROC_LL_TRUNC,
- LPROC_LL_FLOCK,
- LPROC_LL_GETATTR,
- LPROC_LL_CREATE,
- LPROC_LL_LINK,
- LPROC_LL_UNLINK,
- LPROC_LL_SYMLINK,
- LPROC_LL_MKDIR,
- LPROC_LL_RMDIR,
- LPROC_LL_MKNOD,
- LPROC_LL_RENAME,
- LPROC_LL_STAFS,
- LPROC_LL_ALLOC_INODE,
- LPROC_LL_SETXATTR,
- LPROC_LL_GETXATTR,
- LPROC_LL_GETXATTR_HITS,
- LPROC_LL_LISTXATTR,
- LPROC_LL_REMOVEXATTR,
- LPROC_LL_INODE_PERM,
- LPROC_LL_FILE_OPCODES
-};
-
-/* llite/dir.c */
-extern const struct file_operations ll_dir_operations;
-extern const struct inode_operations ll_dir_inode_operations;
-int ll_dir_read(struct inode *inode, __u64 *ppos, struct md_op_data *op_data,
- struct dir_context *ctx);
-int ll_get_mdt_idx(struct inode *inode);
-int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid);
-struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data,
- __u64 offset);
-void ll_release_page(struct inode *inode, struct page *page, bool remove);
-
-/* llite/namei.c */
-extern const struct inode_operations ll_special_inode_operations;
-
-struct inode *ll_iget(struct super_block *sb, ino_t hash,
- struct lustre_md *lic);
-int ll_test_inode_by_fid(struct inode *inode, void *opaque);
-int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag);
-struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
-void ll_update_times(struct ptlrpc_request *request, struct inode *inode);
-
-/* llite/rw.c */
-int ll_writepage(struct page *page, struct writeback_control *wbc);
-int ll_writepages(struct address_space *mapping, struct writeback_control *wbc);
-int ll_readpage(struct file *file, struct page *page);
-void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
-int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
-struct ll_cl_context *ll_cl_find(struct file *file);
-void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io);
-void ll_cl_remove(struct file *file, const struct lu_env *env);
-
-extern const struct address_space_operations ll_aops;
-
-/* llite/file.c */
-extern const struct file_operations ll_file_operations;
-extern const struct file_operations ll_file_operations_flock;
-extern const struct file_operations ll_file_operations_noflock;
-extern const struct inode_operations ll_file_inode_operations;
-int ll_have_md_lock(struct inode *inode, __u64 *bits,
- enum ldlm_mode l_req_mode);
-enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags,
- enum ldlm_mode mode);
-int ll_file_open(struct inode *inode, struct file *file);
-int ll_file_release(struct inode *inode, struct file *file);
-int ll_release_openhandle(struct inode *inode, struct lookup_intent *it);
-int ll_md_real_close(struct inode *inode, fmode_t fmode);
-int ll_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags);
-struct posix_acl *ll_get_acl(struct inode *inode, int type);
-int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
- const char *name, int namelen);
-int ll_get_fid_by_name(struct inode *parent, const char *name,
- int namelen, struct lu_fid *fid, struct inode **inode);
-int ll_inode_permission(struct inode *inode, int mask);
-
-int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
- __u64 flags, struct lov_user_md *lum,
- int lum_size);
-int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
- struct lov_mds_md **lmm, int *lmm_size,
- struct ptlrpc_request **request);
-int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
- int set_default);
-int ll_dir_getstripe(struct inode *inode, void **lmmp, int *lmm_size,
- struct ptlrpc_request **request, u64 valid);
-int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
-int ll_merge_attr(const struct lu_env *env, struct inode *inode);
-int ll_fid2path(struct inode *inode, void __user *arg);
-int ll_data_version(struct inode *inode, __u64 *data_version, int flags);
-int ll_hsm_release(struct inode *inode);
-int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss);
-
-/* llite/dcache.c */
-
-extern const struct dentry_operations ll_d_ops;
-void ll_intent_drop_lock(struct lookup_intent *it);
-void ll_intent_release(struct lookup_intent *it);
-void ll_invalidate_aliases(struct inode *inode);
-void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode);
-int ll_revalidate_it_finish(struct ptlrpc_request *request,
- struct lookup_intent *it, struct inode *inode);
-
-/* llite/llite_lib.c */
-extern struct super_operations lustre_super_operations;
-
-void ll_lli_init(struct ll_inode_info *lli);
-int ll_fill_super(struct super_block *sb);
-void ll_put_super(struct super_block *sb);
-void ll_kill_super(struct super_block *sb);
-struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
-void ll_dir_clear_lsm_md(struct inode *inode);
-void ll_clear_inode(struct inode *inode);
-int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import);
-int ll_setattr(struct dentry *de, struct iattr *attr);
-int ll_statfs(struct dentry *de, struct kstatfs *sfs);
-int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
- __u64 max_age, __u32 flags);
-int ll_update_inode(struct inode *inode, struct lustre_md *md);
-int ll_read_inode2(struct inode *inode, void *opaque);
-void ll_delete_inode(struct inode *inode);
-int ll_iocontrol(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
-int ll_flush_ctx(struct inode *inode);
-void ll_umount_begin(struct super_block *sb);
-int ll_remount_fs(struct super_block *sb, int *flags, char *data);
-int ll_show_options(struct seq_file *seq, struct dentry *dentry);
-void ll_dirty_page_discard_warn(struct page *page, int ioret);
-int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
- struct super_block *sb, struct lookup_intent *it);
-int ll_obd_statfs(struct inode *inode, void __user *arg);
-int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
-int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize);
-int ll_set_default_mdsize(struct ll_sb_info *sbi, int default_mdsize);
-int ll_process_config(struct lustre_cfg *lcfg);
-
-enum {
- LUSTRE_OPC_MKDIR = 0,
- LUSTRE_OPC_SYMLINK = 1,
- LUSTRE_OPC_MKNOD = 2,
- LUSTRE_OPC_CREATE = 3,
- LUSTRE_OPC_ANY = 5,
-};
-
-struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
- struct inode *i1, struct inode *i2,
- const char *name, size_t namelen,
- u32 mode, __u32 opc, void *data);
-void ll_finish_md_op_data(struct md_op_data *op_data);
-int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg);
-char *ll_get_fsname(struct super_block *sb, char *buf, int buflen);
-void ll_compute_rootsquash_state(struct ll_sb_info *sbi);
-void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req);
-ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
- struct lov_user_md **kbuf);
-
-/* Compute expected user md size when passing in a md from user space */
-static inline ssize_t ll_lov_user_md_size(const struct lov_user_md *lum)
-{
- switch (lum->lmm_magic) {
- case LOV_USER_MAGIC_V1:
- return sizeof(struct lov_user_md_v1);
- case LOV_USER_MAGIC_V3:
- return sizeof(struct lov_user_md_v3);
- case LOV_USER_MAGIC_SPECIFIC:
- if (lum->lmm_stripe_count > LOV_MAX_STRIPE_COUNT)
- return -EINVAL;
-
- return lov_user_md_size(lum->lmm_stripe_count,
- LOV_USER_MAGIC_SPECIFIC);
- }
- return -EINVAL;
-}
-
-/* llite/llite_nfs.c */
-extern const struct export_operations lustre_export_operations;
-__u32 get_uuid2int(const char *name, int len);
-void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid);
-struct inode *search_inode_for_lustre(struct super_block *sb,
- const struct lu_fid *fid);
-int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid);
-
-/* llite/symlink.c */
-extern const struct inode_operations ll_fast_symlink_inode_operations;
-
-/**
- * IO arguments for various VFS I/O interfaces.
- */
-struct vvp_io_args {
- /** normal/splice */
- union {
- struct {
- struct kiocb *via_iocb;
- struct iov_iter *via_iter;
- } normal;
- } u;
-};
-
-struct ll_cl_context {
- struct list_head lcc_list;
- void *lcc_cookie;
- const struct lu_env *lcc_env;
- struct cl_io *lcc_io;
- struct cl_page *lcc_page;
-};
-
-struct ll_thread_info {
- struct vvp_io_args lti_args;
- struct ra_io_arg lti_ria;
- struct ll_cl_context lti_io_ctx;
-};
-
-extern struct lu_context_key ll_thread_key;
-static inline struct ll_thread_info *ll_env_info(const struct lu_env *env)
-{
- struct ll_thread_info *lti;
-
- lti = lu_context_key_get(&env->le_ctx, &ll_thread_key);
- LASSERT(lti);
- return lti;
-}
-
-static inline struct vvp_io_args *ll_env_args(const struct lu_env *env)
-{
- return &ll_env_info(env)->lti_args;
-}
-
-/* llite/llite_mmap.c */
-
-int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
-int ll_file_mmap(struct file *file, struct vm_area_struct *vma);
-void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
- unsigned long addr, size_t count);
-struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
- size_t count);
-
-static inline void ll_invalidate_page(struct page *vmpage)
-{
- struct address_space *mapping = vmpage->mapping;
- loff_t offset = vmpage->index << PAGE_SHIFT;
-
- LASSERT(PageLocked(vmpage));
- if (!mapping)
- return;
-
- /*
- * truncate_complete_page() calls
- * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
- */
- ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
- truncate_complete_page(mapping, vmpage);
-}
-
-#define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi)
-
-/* don't need an addref as the sb_info should be holding one */
-static inline struct obd_export *ll_s2dtexp(struct super_block *sb)
-{
- return ll_s2sbi(sb)->ll_dt_exp;
-}
-
-/* don't need an addref as the sb_info should be holding one */
-static inline struct obd_export *ll_s2mdexp(struct super_block *sb)
-{
- return ll_s2sbi(sb)->ll_md_exp;
-}
-
-static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi)
-{
- struct obd_device *obd = sbi->ll_md_exp->exp_obd;
-
- if (!obd)
- LBUG();
- return &obd->u.cli;
-}
-
-/* FIXME: replace the name of this with LL_SB to conform to kernel stuff */
-static inline struct ll_sb_info *ll_i2sbi(struct inode *inode)
-{
- return ll_s2sbi(inode->i_sb);
-}
-
-static inline struct obd_export *ll_i2dtexp(struct inode *inode)
-{
- return ll_s2dtexp(inode->i_sb);
-}
-
-static inline struct obd_export *ll_i2mdexp(struct inode *inode)
-{
- return ll_s2mdexp(inode->i_sb);
-}
-
-static inline struct lu_fid *ll_inode2fid(struct inode *inode)
-{
- struct lu_fid *fid;
-
- LASSERT(inode);
- fid = &ll_i2info(inode)->lli_fid;
-
- return fid;
-}
-
-static inline loff_t ll_file_maxbytes(struct inode *inode)
-{
- struct cl_object *obj = ll_i2info(inode)->lli_clob;
-
- if (!obj)
- return MAX_LFS_FILESIZE;
-
- return min_t(loff_t, cl_object_maxbytes(obj), MAX_LFS_FILESIZE);
-}
-
-/* llite/xattr.c */
-extern const struct xattr_handler *ll_xattr_handlers[];
-
-#define XATTR_USER_T 1
-#define XATTR_TRUSTED_T 2
-#define XATTR_SECURITY_T 3
-#define XATTR_ACL_ACCESS_T 4
-#define XATTR_ACL_DEFAULT_T 5
-#define XATTR_LUSTRE_T 6
-#define XATTR_OTHER_T 7
-
-ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
-int ll_xattr_list(struct inode *inode, const char *name, int type,
- void *buffer, size_t size, __u64 valid);
-const struct xattr_handler *get_xattr_type(const char *name);
-
-/**
- * Common IO arguments for various VFS I/O interfaces.
- */
-int cl_sb_init(struct super_block *sb);
-int cl_sb_fini(struct super_block *sb);
-
-enum ras_update_flags {
- LL_RAS_HIT = 0x1,
- LL_RAS_MMAP = 0x2
-};
-void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
-void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
-
-/* statahead.c */
-#define LL_SA_RPC_MIN 2
-#define LL_SA_RPC_DEF 32
-#define LL_SA_RPC_MAX 8192
-
-#define LL_SA_CACHE_BIT 5
-#define LL_SA_CACHE_SIZE (1 << LL_SA_CACHE_BIT)
-#define LL_SA_CACHE_MASK (LL_SA_CACHE_SIZE - 1)
-
-/* per inode struct, for dir only */
-struct ll_statahead_info {
- struct dentry *sai_dentry;
- atomic_t sai_refcount; /* when access this struct, hold
- * refcount
- */
- unsigned int sai_max; /* max ahead of lookup */
- __u64 sai_sent; /* stat requests sent count */
- __u64 sai_replied; /* stat requests which received
- * reply
- */
- __u64 sai_index; /* index of statahead entry */
- __u64 sai_index_wait; /* index of entry which is the
- * caller is waiting for
- */
- __u64 sai_hit; /* hit count */
- __u64 sai_miss; /* miss count:
- * for "ls -al" case, it includes
- * hidden dentry miss;
- * for "ls -l" case, it does not
- * include hidden dentry miss.
- * "sai_miss_hidden" is used for
- * the later case.
- */
- unsigned int sai_consecutive_miss; /* consecutive miss */
- unsigned int sai_miss_hidden;/* "ls -al", but first dentry
- * is not a hidden one
- */
- unsigned int sai_skip_hidden;/* skipped hidden dentry count */
- unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for
- * hidden entries
- */
- sai_agl_valid:1,/* AGL is valid for the dir */
- sai_in_readpage:1;/* statahead in readdir() */
- wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
- struct task_struct *sai_task; /* stat-ahead thread */
- struct task_struct *sai_agl_task; /* AGL thread */
- struct list_head sai_interim_entries; /* entries which got async
- * stat reply, but not
- * instantiated
- */
- struct list_head sai_entries; /* completed entries */
- struct list_head sai_agls; /* AGLs to be sent */
- struct list_head sai_cache[LL_SA_CACHE_SIZE];
- spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE];
- atomic_t sai_cache_count; /* entry count in cache */
-};
-
-int ll_statahead(struct inode *dir, struct dentry **dentry, bool unplug);
-void ll_authorize_statahead(struct inode *dir, void *key);
-void ll_deauthorize_statahead(struct inode *dir, void *key);
-
-blkcnt_t dirty_cnt(struct inode *inode);
-
-int cl_glimpse_size0(struct inode *inode, int agl);
-int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
- struct inode *inode, struct cl_object *clob, int agl);
-
-static inline int cl_glimpse_size(struct inode *inode)
-{
- return cl_glimpse_size0(inode, 0);
-}
-
-static inline int cl_agl(struct inode *inode)
-{
- return cl_glimpse_size0(inode, 1);
-}
-
-static inline int ll_glimpse_size(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc;
-
- down_read(&lli->lli_glimpse_sem);
- rc = cl_glimpse_size(inode);
- lli->lli_glimpse_time = cfs_time_current();
- up_read(&lli->lli_glimpse_sem);
- return rc;
-}
-
-/*
- * dentry may statahead when statahead is enabled and current process has opened
- * parent directory, and this dentry hasn't accessed statahead cache before
- */
-static inline bool
-dentry_may_statahead(struct inode *dir, struct dentry *dentry)
-{
- struct ll_inode_info *lli;
- struct ll_dentry_data *ldd;
-
- if (ll_i2sbi(dir)->ll_sa_max == 0)
- return false;
-
- lli = ll_i2info(dir);
-
- /*
- * statahead is not allowed for this dir, there may be three causes:
- * 1. dir is not opened.
- * 2. statahead hit ratio is too low.
- * 3. previous stat started statahead thread failed.
- */
- if (!lli->lli_sa_enabled)
- return false;
-
- /* not the same process, don't statahead */
- if (lli->lli_opendir_pid != current_pid())
- return false;
-
- /*
- * When stating a dentry, kernel may trigger 'revalidate' or 'lookup'
- * multiple times, eg. for 'getattr', 'getxattr' and etc.
- * For patchless client, lookup intent is not accurate, which may
- * misguide statahead. For example:
- * The 'revalidate' call for 'getattr' and 'getxattr' of a dentry will
- * have the same intent -- IT_GETATTR, while one dentry should access
- * statahead cache once, otherwise statahead windows is messed up.
- * The solution is as following:
- * Assign 'lld_sa_generation' with 'lli_sa_generation' when a dentry
- * IT_GETATTR for the first time, and subsequent IT_GETATTR will
- * bypass interacting with statahead cache by checking
- * 'lld_sa_generation == lli->lli_sa_generation'.
- */
- ldd = ll_d2d(dentry);
- if (ldd->lld_sa_generation == lli->lli_sa_generation)
- return false;
-
- return true;
-}
-
-/* llite ioctl register support routine */
-enum llioc_iter {
- LLIOC_CONT = 0,
- LLIOC_STOP
-};
-
-#define LLIOC_MAX_CMD 256
-
-/*
- * Rules to write a callback function:
- *
- * Parameters:
- * @magic: Dynamic ioctl call routine will feed this value with the pointer
- * returned to ll_iocontrol_register. Callback functions should use this
- * data to check the potential collasion of ioctl cmd. If collasion is
- * found, callback function should return LLIOC_CONT.
- * @rcp: The result of ioctl command.
- *
- * Return values:
- * If @magic matches the pointer returned by ll_iocontrol_data, the
- * callback should return LLIOC_STOP; return LLIOC_STOP otherwise.
- */
-typedef enum llioc_iter (*llioc_callback_t)(struct inode *inode,
- struct file *file, unsigned int cmd, unsigned long arg,
- void *magic, int *rcp);
-
-/* export functions */
-/* Register ioctl block dynamatically for a regular file.
- *
- * @cmd: the array of ioctl command set
- * @count: number of commands in the @cmd
- * @cb: callback function, it will be called if an ioctl command is found to
- * belong to the command list @cmd.
- *
- * Return value:
- * A magic pointer will be returned if success;
- * otherwise, NULL will be returned.
- */
-void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
-void ll_iocontrol_unregister(void *magic);
-
-int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
- enum cl_fsync_mode mode, int ignore_layout);
-
-/** direct write pages */
-struct ll_dio_pages {
- /** page array to be written. we don't support
- * partial pages except the last one.
- */
- struct page **ldp_pages;
- /* offset of each page */
- loff_t *ldp_offsets;
- /** if ldp_offsets is NULL, it means a sequential
- * pages to be written, then this is the file offset
- * of the first page.
- */
- loff_t ldp_start_offset;
- /** how many bytes are to be written. */
- size_t ldp_size;
- /** # of pages in the array. */
- int ldp_nr;
-};
-
-ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
- int rw, struct inode *inode,
- struct ll_dio_pages *pv);
-
-static inline int ll_file_nolock(const struct file *file)
-{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct inode *inode = file_inode(file);
-
- return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
- (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
-}
-
-static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
- struct lookup_intent *it, __u64 *bits)
-{
- if (!it->it_lock_set) {
- struct lustre_handle handle;
-
- /* If this inode is a remote object, it will get two
- * separate locks in different namespaces, Master MDT,
- * where the name entry is, will grant LOOKUP lock,
- * remote MDT, where the object is, will grant
- * UPDATE|PERM lock. The inode will be attached to both
- * LOOKUP and PERM locks, so revoking either locks will
- * case the dcache being cleared
- */
- if (it->it_remote_lock_mode) {
- handle.cookie = it->it_remote_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode " DFID "%p for remote lock %#llx\n",
- PFID(ll_inode2fid(inode)), inode,
- handle.cookie);
- md_set_lock_data(exp, &handle, inode, NULL);
- }
-
- handle.cookie = it->it_lock_handle;
-
- CDEBUG(D_DLMTRACE,
- "setting l_data to inode " DFID "%p for lock %#llx\n",
- PFID(ll_inode2fid(inode)), inode, handle.cookie);
-
- md_set_lock_data(exp, &handle, inode, &it->it_lock_bits);
- it->it_lock_set = 1;
- }
-
- if (bits)
- *bits = it->it_lock_bits;
-}
-
-static inline int d_lustre_invalid(const struct dentry *dentry)
-{
- return ll_d2d(dentry)->lld_invalid;
-}
-
-/*
- * Mark dentry INVALID, if dentry refcount is zero (this is normally case for
- * ll_md_blocking_ast), unhash this dentry, and let dcache to reclaim it later;
- * else dput() of the last refcount will unhash this dentry and kill it.
- */
-static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
-{
- CDEBUG(D_DENTRY,
- "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
- dentry, dentry,
- dentry->d_parent, d_inode(dentry), d_count(dentry));
-
- spin_lock_nested(&dentry->d_lock,
- nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
- ll_d2d(dentry)->lld_invalid = 1;
- if (d_count(dentry) == 0)
- __d_drop(dentry);
- spin_unlock(&dentry->d_lock);
-}
-
-static inline void d_lustre_revalidate(struct dentry *dentry)
-{
- spin_lock(&dentry->d_lock);
- LASSERT(ll_d2d(dentry));
- ll_d2d(dentry)->lld_invalid = 0;
- spin_unlock(&dentry->d_lock);
-}
-
-int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
-int ll_layout_refresh(struct inode *inode, __u32 *gen);
-int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
-
-int ll_xattr_init(void);
-void ll_xattr_fini(void);
-
-int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, enum cl_req_type crt);
-
-int ll_getparent(struct file *file, struct getparent __user *arg);
-
-/* lcommon_cl.c */
-int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
- unsigned int attr_flags);
-
-extern struct lu_env *cl_inode_fini_env;
-extern u16 cl_inode_fini_refcheck;
-
-int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
-void cl_inode_fini(struct inode *inode);
-
-__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
-__u32 cl_fid_build_gen(const struct lu_fid *fid);
-
-#endif /* LLITE_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
deleted file mode 100644
index e7500c53fafc..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ /dev/null
@@ -1,2666 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/llite_lib.c
- *
- * Lustre Light Super operations
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/module.h>
-#include <linux/statfs.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-
-#include <uapi/linux/lustre/lustre_ioctl.h>
-#include <lustre_ha.h>
-#include <lustre_dlm.h>
-#include <lprocfs_status.h>
-#include <lustre_disk.h>
-#include <uapi/linux/lustre/lustre_param.h>
-#include <lustre_log.h>
-#include <cl_object.h>
-#include <obd_cksum.h>
-#include "llite_internal.h"
-
-struct kmem_cache *ll_file_data_slab;
-struct dentry *llite_root;
-struct kset *llite_kset;
-
-#ifndef log2
-#define log2(n) ffz(~(n))
-#endif
-
-static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
-{
- struct ll_sb_info *sbi = NULL;
- unsigned long pages;
- unsigned long lru_page_max;
- struct sysinfo si;
- class_uuid_t uuid;
- int i;
-
- sbi = kzalloc(sizeof(*sbi), GFP_NOFS);
- if (!sbi)
- return NULL;
-
- spin_lock_init(&sbi->ll_lock);
- mutex_init(&sbi->ll_lco.lco_lock);
- spin_lock_init(&sbi->ll_pp_extent_lock);
- spin_lock_init(&sbi->ll_process_lock);
- sbi->ll_rw_stats_on = 0;
-
- si_meminfo(&si);
- pages = si.totalram - si.totalhigh;
- lru_page_max = pages / 2;
-
- sbi->ll_cache = cl_cache_init(lru_page_max);
- if (!sbi->ll_cache) {
- kfree(sbi);
- return NULL;
- }
-
- sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
- SBI_DEFAULT_READAHEAD_MAX);
- sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
- sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
- SBI_DEFAULT_READAHEAD_WHOLE_MAX;
-
- ll_generate_random_uuid(uuid);
- class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
- CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
-
- sbi->ll_flags |= LL_SBI_VERBOSE;
- sbi->ll_flags |= LL_SBI_CHECKSUM;
-
- sbi->ll_flags |= LL_SBI_LRU_RESIZE;
- sbi->ll_flags |= LL_SBI_LAZYSTATFS;
-
- for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
- spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
- pp_r_hist.oh_lock);
- spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
- pp_w_hist.oh_lock);
- }
-
- /* metadata statahead is enabled by default */
- sbi->ll_sa_max = LL_SA_RPC_DEF;
- atomic_set(&sbi->ll_sa_total, 0);
- atomic_set(&sbi->ll_sa_wrong, 0);
- atomic_set(&sbi->ll_sa_running, 0);
- atomic_set(&sbi->ll_agl_total, 0);
- sbi->ll_flags |= LL_SBI_AGL_ENABLED;
-
- /* root squash */
- sbi->ll_squash.rsi_uid = 0;
- sbi->ll_squash.rsi_gid = 0;
- INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
- init_rwsem(&sbi->ll_squash.rsi_sem);
-
- sbi->ll_sb = sb;
-
- return sbi;
-}
-
-static void ll_free_sbi(struct super_block *sb)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
-
- if (sbi->ll_cache) {
- if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
- cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
- cl_cache_decref(sbi->ll_cache);
- sbi->ll_cache = NULL;
- }
-
- kfree(sbi);
-}
-
-static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
-{
- struct inode *root = NULL;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct obd_device *obd;
- struct obd_statfs *osfs = NULL;
- struct ptlrpc_request *request = NULL;
- struct obd_connect_data *data = NULL;
- struct obd_uuid *uuid;
- struct md_op_data *op_data;
- struct lustre_md lmd;
- u64 valid;
- int size, err, checksum;
-
- obd = class_name2obd(md);
- if (!obd) {
- CERROR("MD %s: not setup or attached\n", md);
- return -EINVAL;
- }
-
- data = kzalloc(sizeof(*data), GFP_NOFS);
- if (!data)
- return -ENOMEM;
-
- osfs = kzalloc(sizeof(*osfs), GFP_NOFS);
- if (!osfs) {
- kfree(data);
- return -ENOMEM;
- }
-
- /* indicate the features supported by this client */
- data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
- OBD_CONNECT_ATTRFID |
- OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
- OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
- OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
- OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
- OBD_CONNECT_64BITHASH |
- OBD_CONNECT_EINPROGRESS |
- OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
- OBD_CONNECT_LAYOUTLOCK |
- OBD_CONNECT_PINGLESS |
- OBD_CONNECT_MAX_EASIZE |
- OBD_CONNECT_FLOCK_DEAD |
- OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
- OBD_CONNECT_OPEN_BY_FID |
- OBD_CONNECT_DIR_STRIPE |
- OBD_CONNECT_BULK_MBITS;
-
- if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
- data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
-#ifdef CONFIG_FS_POSIX_ACL
- data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
-#endif
-
- if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
- /* flag mdc connection as lightweight, only used for test
- * purpose, use with care
- */
- data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
-
- data->ocd_ibits_known = MDS_INODELOCK_FULL;
- data->ocd_version = LUSTRE_VERSION_CODE;
-
- if (sb_rdonly(sb))
- data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
- if (sbi->ll_flags & LL_SBI_USER_XATTR)
- data->ocd_connect_flags |= OBD_CONNECT_XATTR;
-
- if (sbi->ll_flags & LL_SBI_FLOCK)
- sbi->ll_fop = &ll_file_operations_flock;
- else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
- sbi->ll_fop = &ll_file_operations;
- else
- sbi->ll_fop = &ll_file_operations_noflock;
-
- /* always ping even if server suppress_pings */
- if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
- data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
-
- data->ocd_brw_size = MD_MAX_BRW_SIZE;
-
- err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
- data, NULL);
- if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x14f,
- "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
- md);
- goto out;
- }
-
- if (err) {
- CERROR("cannot connect to %s: rc = %d\n", md, err);
- goto out;
- }
-
- sbi->ll_md_exp->exp_connect_data = *data;
-
- err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
- LUSTRE_SEQ_METADATA);
- if (err) {
- CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
- sbi->ll_md_exp->exp_obd->obd_name, err);
- goto out_md;
- }
-
- /* For mount, we only need fs info from MDT0, and also in DNE, it
- * can make sure the client can be mounted as long as MDT0 is
- * available
- */
- err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_FOR_MDT0);
- if (err)
- goto out_md_fid;
-
- /* This needs to be after statfs to ensure connect has finished.
- * Note that "data" does NOT contain the valid connect reply.
- * If connecting to a 1.8 server there will be no LMV device, so
- * we can access the MDC export directly and exp_connect_flags will
- * be non-zero, but if accessing an upgraded 2.1 server it will
- * have the correct flags filled in.
- * XXX: fill in the LMV exp_connect_flags from MDC(s).
- */
- valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
- if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
- valid != CLIENT_CONNECT_MDT_REQD) {
- char *buf;
-
- buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto out_md_fid;
- }
- obd_connect_flags2str(buf, PAGE_SIZE,
- valid ^ CLIENT_CONNECT_MDT_REQD, ",");
- LCONSOLE_ERROR_MSG(0x170,
- "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
- sbi->ll_md_exp->exp_obd->obd_name, buf);
- kfree(buf);
- err = -EPROTO;
- goto out_md_fid;
- }
-
- size = sizeof(*data);
- err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
- KEY_CONN_DATA, &size, data);
- if (err) {
- CERROR("%s: Get connect data failed: rc = %d\n",
- sbi->ll_md_exp->exp_obd->obd_name, err);
- goto out_md_fid;
- }
-
- LASSERT(osfs->os_bsize);
- sb->s_blocksize = osfs->os_bsize;
- sb->s_blocksize_bits = log2(osfs->os_bsize);
- sb->s_magic = LL_SUPER_MAGIC;
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- sbi->ll_namelen = osfs->os_namelen;
- sbi->ll_mnt.mnt = current->fs->root.mnt;
-
- if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
- !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
- LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
- sbi->ll_flags &= ~LL_SBI_USER_XATTR;
- }
-
- if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
- sb->s_flags |= SB_POSIXACL;
- sbi->ll_flags |= LL_SBI_ACL;
- } else {
- LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
- sb->s_flags &= ~SB_POSIXACL;
- sbi->ll_flags &= ~LL_SBI_ACL;
- }
-
- if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
- sbi->ll_flags |= LL_SBI_64BIT_HASH;
-
- if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
- sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_SHIFT;
- else
- sbi->ll_md_brw_pages = 1;
-
- if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
- sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
-
- if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
- if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
- LCONSOLE_INFO(
- "%s: disabling xattr cache due to unknown maximum xattr size.\n",
- dt);
- } else {
- sbi->ll_flags |= LL_SBI_XATTR_CACHE;
- sbi->ll_xattr_cache_enabled = 1;
- }
- }
-
- obd = class_name2obd(dt);
- if (!obd) {
- CERROR("DT %s: not setup or attached\n", dt);
- err = -ENODEV;
- goto out_md_fid;
- }
-
- data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
- OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
- OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
- OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
- OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
- OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
- OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
- OBD_CONNECT_EINPROGRESS |
- OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
- OBD_CONNECT_LAYOUTLOCK |
- OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
- OBD_CONNECT_BULK_MBITS;
-
- if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
- /* OBD_CONNECT_CKSUM should always be set, even if checksums are
- * disabled by default, because it can still be enabled on the
- * fly via /sys. As a consequence, we still need to come to an
- * agreement on the supported algorithms at connect time
- */
- data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
- data->ocd_cksum_types = OBD_CKSUM_ADLER;
- else
- data->ocd_cksum_types = cksum_types_supported_client();
- }
-
- data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
-
- /* always ping even if server suppress_pings */
- if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
- data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
-
- CDEBUG(D_RPCTRACE,
- "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
- data->ocd_connect_flags,
- data->ocd_version, data->ocd_grant);
-
- obd->obd_upcall.onu_owner = &sbi->ll_lco;
- obd->obd_upcall.onu_upcall = cl_ocd_update;
-
- data->ocd_brw_size = DT_MAX_BRW_SIZE;
-
- err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
- NULL);
- if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x150,
- "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
- dt);
- goto out_md;
- } else if (err) {
- CERROR("%s: Cannot connect to %s: rc = %d\n",
- sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
- goto out_md;
- }
-
- sbi->ll_dt_exp->exp_connect_data = *data;
-
- err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
- LUSTRE_SEQ_METADATA);
- if (err) {
- CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
- sbi->ll_dt_exp->exp_obd->obd_name, err);
- goto out_dt;
- }
-
- mutex_lock(&sbi->ll_lco.lco_lock);
- sbi->ll_lco.lco_flags = data->ocd_connect_flags;
- sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
- sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
- mutex_unlock(&sbi->ll_lco.lco_lock);
-
- fid_zero(&sbi->ll_root_fid);
- err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid);
- if (err) {
- CERROR("cannot mds_connect: rc = %d\n", err);
- goto out_lock_cn_cb;
- }
- if (!fid_is_sane(&sbi->ll_root_fid)) {
- CERROR("%s: Invalid root fid " DFID " during mount\n",
- sbi->ll_md_exp->exp_obd->obd_name,
- PFID(&sbi->ll_root_fid));
- err = -EINVAL;
- goto out_lock_cn_cb;
- }
- CDEBUG(D_SUPER, "rootfid " DFID "\n", PFID(&sbi->ll_root_fid));
-
- sb->s_op = &lustre_super_operations;
- sb->s_xattr = ll_xattr_handlers;
-#if THREAD_SIZE >= 8192 /*b=17630*/
- sb->s_export_op = &lustre_export_operations;
-#endif
-
- /* make root inode
- * XXX: move this to after cbd setup?
- */
- valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
- if (sbi->ll_flags & LL_SBI_ACL)
- valid |= OBD_MD_FLACL;
-
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data) {
- err = -ENOMEM;
- goto out_lock_cn_cb;
- }
-
- op_data->op_fid1 = sbi->ll_root_fid;
- op_data->op_mode = 0;
- op_data->op_valid = valid;
-
- err = md_getattr(sbi->ll_md_exp, op_data, &request);
- kfree(op_data);
- if (err) {
- CERROR("%s: md_getattr failed for root: rc = %d\n",
- sbi->ll_md_exp->exp_obd->obd_name, err);
- goto out_lock_cn_cb;
- }
-
- err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
- sbi->ll_md_exp, &lmd);
- if (err) {
- CERROR("failed to understand root inode md: rc = %d\n", err);
- ptlrpc_req_finished(request);
- goto out_lock_cn_cb;
- }
-
- LASSERT(fid_is_sane(&sbi->ll_root_fid));
- root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
- sbi->ll_flags & LL_SBI_32BIT_API),
- &lmd);
- md_free_lustre_md(sbi->ll_md_exp, &lmd);
- ptlrpc_req_finished(request);
-
- if (IS_ERR(root)) {
-#ifdef CONFIG_FS_POSIX_ACL
- if (lmd.posix_acl) {
- posix_acl_release(lmd.posix_acl);
- lmd.posix_acl = NULL;
- }
-#endif
- err = -EBADF;
- CERROR("lustre_lite: bad iget4 for root\n");
- goto out_root;
- }
-
- checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
- err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
- KEY_CHECKSUM, sizeof(checksum), &checksum,
- NULL);
- if (err) {
- CERROR("%s: Set checksum failed: rc = %d\n",
- sbi->ll_dt_exp->exp_obd->obd_name, err);
- goto out_root;
- }
- cl_sb_init(sb);
-
- err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
- KEY_CACHE_SET, sizeof(*sbi->ll_cache),
- sbi->ll_cache, NULL);
- if (err) {
- CERROR("%s: Set cache_set failed: rc = %d\n",
- sbi->ll_dt_exp->exp_obd->obd_name, err);
- goto out_root;
- }
-
- sb->s_root = d_make_root(root);
- if (!sb->s_root) {
- CERROR("%s: can't make root dentry\n",
- ll_get_fsname(sb, NULL, 0));
- err = -ENOMEM;
- goto out_lock_cn_cb;
- }
-
- sbi->ll_sdev_orig = sb->s_dev;
-
- /* We set sb->s_dev equal on all lustre clients in order to support
- * NFS export clustering. NFSD requires that the FSID be the same
- * on all clients.
- */
- /* s_dev is also used in lt_compare() to compare two fs, but that is
- * only a node-local comparison.
- */
- uuid = obd_get_uuid(sbi->ll_md_exp);
- if (uuid) {
- sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
- get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
- }
-
- kfree(data);
- kfree(osfs);
-
- if (llite_root) {
- err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
- if (err < 0) {
- CERROR("%s: could not register mount in debugfs: "
- "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
- err = 0;
- }
- }
-
- return err;
-out_root:
- iput(root);
-out_lock_cn_cb:
- obd_fid_fini(sbi->ll_dt_exp->exp_obd);
-out_dt:
- obd_disconnect(sbi->ll_dt_exp);
- sbi->ll_dt_exp = NULL;
-out_md_fid:
- obd_fid_fini(sbi->ll_md_exp->exp_obd);
-out_md:
- obd_disconnect(sbi->ll_md_exp);
- sbi->ll_md_exp = NULL;
-out:
- kfree(data);
- kfree(osfs);
- return err;
-}
-
-int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
-{
- int size, rc;
-
- size = sizeof(*lmmsize);
- rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
- KEY_MAX_EASIZE, &size, lmmsize);
- if (rc) {
- CERROR("%s: cannot get max LOV EA size: rc = %d\n",
- sbi->ll_dt_exp->exp_obd->obd_name, rc);
- return rc;
- }
-
- size = sizeof(int);
- rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
- KEY_MAX_EASIZE, &size, lmmsize);
- if (rc)
- CERROR("Get max mdsize error rc %d\n", rc);
-
- return rc;
-}
-
-/**
- * Get the value of the default_easize parameter.
- *
- * \see client_obd::cl_default_mds_easize
- *
- * \param[in] sbi superblock info for this filesystem
- * \param[out] lmmsize pointer to storage location for value
- *
- * \retval 0 on success
- * \retval negative negated errno on failure
- */
-int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
-{
- int size, rc;
-
- size = sizeof(int);
- rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
- KEY_DEFAULT_EASIZE, &size, lmmsize);
- if (rc)
- CERROR("Get default mdsize error rc %d\n", rc);
-
- return rc;
-}
-
-/**
- * Set the default_easize parameter to the given value.
- *
- * \see client_obd::cl_default_mds_easize
- *
- * \param[in] sbi superblock info for this filesystem
- * \param[in] lmmsize the size to set
- *
- * \retval 0 on success
- * \retval negative negated errno on failure
- */
-int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
-{
- if (lmmsize < sizeof(struct lov_mds_md) ||
- lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
- return -EINVAL;
-
- return obd_set_info_async(NULL, sbi->ll_md_exp,
- sizeof(KEY_DEFAULT_EASIZE),
- KEY_DEFAULT_EASIZE,
- sizeof(int), &lmmsize, NULL);
-}
-
-static void client_common_put_super(struct super_block *sb)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
-
- cl_sb_fini(sb);
-
- obd_fid_fini(sbi->ll_dt_exp->exp_obd);
- obd_disconnect(sbi->ll_dt_exp);
- sbi->ll_dt_exp = NULL;
-
- ldebugfs_unregister_mountpoint(sbi);
-
- obd_fid_fini(sbi->ll_md_exp->exp_obd);
- obd_disconnect(sbi->ll_md_exp);
- sbi->ll_md_exp = NULL;
-}
-
-void ll_kill_super(struct super_block *sb)
-{
- struct ll_sb_info *sbi;
-
- /* not init sb ?*/
- if (!(sb->s_flags & SB_ACTIVE))
- return;
-
- sbi = ll_s2sbi(sb);
- /* we need to restore s_dev from changed for clustered NFS before
- * put_super because new kernels have cached s_dev and change sb->s_dev
- * in put_super not affected real removing devices
- */
- if (sbi) {
- sb->s_dev = sbi->ll_sdev_orig;
- sbi->ll_umounting = 1;
-
- /* wait running statahead threads to quit */
- while (atomic_read(&sbi->ll_sa_running) > 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC >> 3));
- }
- }
-}
-
-static inline int ll_set_opt(const char *opt, char *data, int fl)
-{
- if (strncmp(opt, data, strlen(opt)) != 0)
- return 0;
- else
- return fl;
-}
-
-/* non-client-specific mount options are parsed in lmd_parse */
-static int ll_options(char *options, int *flags)
-{
- int tmp;
- char *s1 = options, *s2;
-
- if (!options)
- return 0;
-
- CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
-
- while (*s1) {
- CDEBUG(D_SUPER, "next opt=%s\n", s1);
- tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("noflock", s1,
- LL_SBI_FLOCK | LL_SBI_LOCALFLOCK);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- tmp = ll_set_opt("context", s1, 1);
- if (tmp)
- goto next;
- tmp = ll_set_opt("fscontext", s1, 1);
- if (tmp)
- goto next;
- tmp = ll_set_opt("defcontext", s1, 1);
- if (tmp)
- goto next;
- tmp = ll_set_opt("rootcontext", s1, 1);
- if (tmp)
- goto next;
- tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
-
- tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
- s1);
- return -EINVAL;
-
-next:
- /* Find next opt */
- s2 = strchr(s1, ',');
- if (!s2)
- break;
- s1 = s2 + 1;
- }
- return 0;
-}
-
-void ll_lli_init(struct ll_inode_info *lli)
-{
- lli->lli_inode_magic = LLI_INODE_MAGIC;
- lli->lli_flags = 0;
- spin_lock_init(&lli->lli_lock);
- lli->lli_posix_acl = NULL;
- /* Do not set lli_fid, it has been initialized already. */
- fid_zero(&lli->lli_pfid);
- lli->lli_mds_read_och = NULL;
- lli->lli_mds_write_och = NULL;
- lli->lli_mds_exec_och = NULL;
- lli->lli_open_fd_read_count = 0;
- lli->lli_open_fd_write_count = 0;
- lli->lli_open_fd_exec_count = 0;
- mutex_init(&lli->lli_och_mutex);
- spin_lock_init(&lli->lli_agl_lock);
- spin_lock_init(&lli->lli_layout_lock);
- ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
- lli->lli_clob = NULL;
-
- init_rwsem(&lli->lli_xattrs_list_rwsem);
- mutex_init(&lli->lli_xattrs_enq_lock);
-
- LASSERT(lli->lli_vfs_inode.i_mode != 0);
- if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
- mutex_init(&lli->lli_readdir_mutex);
- lli->lli_opendir_key = NULL;
- lli->lli_sai = NULL;
- spin_lock_init(&lli->lli_sa_lock);
- lli->lli_opendir_pid = 0;
- lli->lli_sa_enabled = 0;
- lli->lli_def_stripe_offset = -1;
- } else {
- mutex_init(&lli->lli_size_mutex);
- lli->lli_symlink_name = NULL;
- init_rwsem(&lli->lli_trunc_sem);
- range_lock_tree_init(&lli->lli_write_tree);
- init_rwsem(&lli->lli_glimpse_sem);
- lli->lli_glimpse_time = 0;
- INIT_LIST_HEAD(&lli->lli_agl_list);
- lli->lli_agl_index = 0;
- lli->lli_async_rc = 0;
- }
- mutex_init(&lli->lli_layout_mutex);
-}
-
-int ll_fill_super(struct super_block *sb)
-{
- struct lustre_profile *lprof = NULL;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct ll_sb_info *sbi;
- char *dt = NULL, *md = NULL;
- char *profilenm = get_profile_name(sb);
- struct config_llog_instance *cfg;
- int err;
- static atomic_t ll_bdi_num = ATOMIC_INIT(0);
-
- CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
-
- err = ptlrpc_inc_ref();
- if (err)
- return err;
-
- cfg = kzalloc(sizeof(*cfg), GFP_NOFS);
- if (!cfg) {
- err = -ENOMEM;
- goto out_put;
- }
-
- try_module_get(THIS_MODULE);
-
- /* client additional sb info */
- sbi = ll_init_sbi(sb);
- lsi->lsi_llsbi = sbi;
- if (!sbi) {
- module_put(THIS_MODULE);
- kfree(cfg);
- err = -ENOMEM;
- goto out_put;
- }
-
- err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
- if (err)
- goto out_free;
-
- err = super_setup_bdi_name(sb, "lustre-%d",
- atomic_inc_return(&ll_bdi_num));
- if (err)
- goto out_free;
-
- /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
- sb->s_d_op = &ll_d_ops;
-
- /* Generate a string unique to this super, in case some joker tries
- * to mount the same fs at two mount points.
- * Use the address of the super itself.
- */
- cfg->cfg_instance = sb;
- cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
- cfg->cfg_callback = class_config_llog_handler;
- /* set up client obds */
- err = lustre_process_log(sb, profilenm, cfg);
- if (err < 0)
- goto out_free;
-
- /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
- lprof = class_get_profile(profilenm);
- if (!lprof) {
- LCONSOLE_ERROR_MSG(0x156,
- "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
- profilenm);
- err = -EINVAL;
- goto out_free;
- }
- CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
- lprof->lp_md, lprof->lp_dt);
-
- dt = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
- if (!dt) {
- err = -ENOMEM;
- goto out_free;
- }
-
- md = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_md, cfg->cfg_instance);
- if (!md) {
- err = -ENOMEM;
- goto out_free;
- }
-
- /* connections, registrations, sb setup */
- err = client_common_fill_super(sb, md, dt);
- if (!err)
- sbi->ll_client_common_fill_super_succeeded = 1;
-
-out_free:
- kfree(md);
- kfree(dt);
- if (lprof)
- class_put_profile(lprof);
- if (err)
- ll_put_super(sb);
- else if (sbi->ll_flags & LL_SBI_VERBOSE)
- LCONSOLE_WARN("Mounted %s\n", profilenm);
-
- kfree(cfg);
-out_put:
- if (err)
- ptlrpc_dec_ref();
- return err;
-} /* ll_fill_super */
-
-void ll_put_super(struct super_block *sb)
-{
- struct config_llog_instance cfg, params_cfg;
- struct obd_device *obd;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- char *profilenm = get_profile_name(sb);
- int next, force = 1, rc = 0;
- long ccc_count;
-
- CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
-
- cfg.cfg_instance = sb;
- lustre_end_log(sb, profilenm, &cfg);
-
- params_cfg.cfg_instance = sb;
- lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
-
- if (sbi->ll_md_exp) {
- obd = class_exp2obd(sbi->ll_md_exp);
- if (obd)
- force = obd->obd_force;
- }
-
- /* Wait for unstable pages to be committed to stable storage */
- if (!force)
- rc = l_wait_event_abortable(sbi->ll_cache->ccc_unstable_waitq,
- !atomic_long_read(&sbi->ll_cache->ccc_unstable_nr));
-
- ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
- if (!force && rc != -ERESTARTSYS)
- LASSERTF(!ccc_count, "count: %li\n", ccc_count);
-
- /* We need to set force before the lov_disconnect in
- * lustre_common_put_super, since l_d cleans up osc's as well.
- */
- if (force) {
- next = 0;
- while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
- &next)) != NULL) {
- obd->obd_force = force;
- }
- }
-
- if (sbi->ll_client_common_fill_super_succeeded) {
- /* Only if client_common_fill_super succeeded */
- client_common_put_super(sb);
- }
-
- next = 0;
- while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
- class_manual_cleanup(obd);
-
- if (sbi->ll_flags & LL_SBI_VERBOSE)
- LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
-
- if (profilenm)
- class_del_profile(profilenm);
-
- ll_free_sbi(sb);
- lsi->lsi_llsbi = NULL;
-
- lustre_common_put_super(sb);
-
- cl_env_cache_purge(~0);
-
- module_put(THIS_MODULE);
-
- ptlrpc_dec_ref();
-} /* client_put_super */
-
-struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
-{
- struct inode *inode = NULL;
-
- /* NOTE: we depend on atomic igrab() -bzzz */
- lock_res_and_lock(lock);
- if (lock->l_resource->lr_lvb_inode) {
- struct ll_inode_info *lli;
-
- lli = ll_i2info(lock->l_resource->lr_lvb_inode);
- if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
- inode = igrab(lock->l_resource->lr_lvb_inode);
- } else {
- inode = lock->l_resource->lr_lvb_inode;
- LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
- D_WARNING, lock,
- "lr_lvb_inode %p is bogus: magic %08x",
- lock->l_resource->lr_lvb_inode,
- lli->lli_inode_magic);
- inode = NULL;
- }
- }
- unlock_res_and_lock(lock);
- return inode;
-}
-
-void ll_dir_clear_lsm_md(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
-
- LASSERT(S_ISDIR(inode->i_mode));
-
- if (lli->lli_lsm_md) {
- lmv_free_memmd(lli->lli_lsm_md);
- lli->lli_lsm_md = NULL;
- }
-}
-
-static struct inode *ll_iget_anon_dir(struct super_block *sb,
- const struct lu_fid *fid,
- struct lustre_md *md)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct mdt_body *body = md->body;
- struct inode *inode;
- ino_t ino;
-
- ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
- inode = iget_locked(sb, ino);
- if (!inode) {
- CERROR("%s: failed get simple inode " DFID ": rc = -ENOENT\n",
- ll_get_fsname(sb, NULL, 0), PFID(fid));
- return ERR_PTR(-ENOENT);
- }
-
- if (inode->i_state & I_NEW) {
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lmv_stripe_md *lsm = md->lmv;
-
- inode->i_mode = (inode->i_mode & ~S_IFMT) |
- (body->mbo_mode & S_IFMT);
- LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode " DFID "\n",
- PFID(fid));
-
- LTIME_S(inode->i_mtime) = 0;
- LTIME_S(inode->i_atime) = 0;
- LTIME_S(inode->i_ctime) = 0;
- inode->i_rdev = 0;
-
- inode->i_op = &ll_dir_inode_operations;
- inode->i_fop = &ll_dir_operations;
- lli->lli_fid = *fid;
- ll_lli_init(lli);
-
- LASSERT(lsm);
- /* master object FID */
- lli->lli_pfid = body->mbo_fid1;
- CDEBUG(D_INODE, "lli %p slave " DFID " master " DFID "\n",
- lli, PFID(fid), PFID(&lli->lli_pfid));
- unlock_new_inode(inode);
- }
-
- return inode;
-}
-
-static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
-{
- struct lmv_stripe_md *lsm = md->lmv;
- struct lu_fid *fid;
- int i;
-
- LASSERT(lsm);
- /*
- * XXX sigh, this lsm_root initialization should be in
- * LMV layer, but it needs ll_iget right now, so we
- * put this here right now.
- */
- for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
- fid = &lsm->lsm_md_oinfo[i].lmo_fid;
- LASSERT(!lsm->lsm_md_oinfo[i].lmo_root);
- /* Unfortunately ll_iget will call ll_update_inode,
- * where the initialization of slave inode is slightly
- * different, so it reset lsm_md to NULL to avoid
- * initializing lsm for slave inode.
- */
- /* For migrating inode, master stripe and master object will
- * be same, so we only need assign this inode
- */
- if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && !i)
- lsm->lsm_md_oinfo[i].lmo_root = inode;
- else
- lsm->lsm_md_oinfo[i].lmo_root =
- ll_iget_anon_dir(inode->i_sb, fid, md);
- if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
- int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
-
- lsm->lsm_md_oinfo[i].lmo_root = NULL;
- return rc;
- }
- }
-
- return 0;
-}
-
-static inline int lli_lsm_md_eq(const struct lmv_stripe_md *lsm_md1,
- const struct lmv_stripe_md *lsm_md2)
-{
- return lsm_md1->lsm_md_magic == lsm_md2->lsm_md_magic &&
- lsm_md1->lsm_md_stripe_count == lsm_md2->lsm_md_stripe_count &&
- lsm_md1->lsm_md_master_mdt_index ==
- lsm_md2->lsm_md_master_mdt_index &&
- lsm_md1->lsm_md_hash_type == lsm_md2->lsm_md_hash_type &&
- lsm_md1->lsm_md_layout_version ==
- lsm_md2->lsm_md_layout_version &&
- !strcmp(lsm_md1->lsm_md_pool_name,
- lsm_md2->lsm_md_pool_name);
-}
-
-static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lmv_stripe_md *lsm = md->lmv;
- int rc;
-
- LASSERT(S_ISDIR(inode->i_mode));
- CDEBUG(D_INODE, "update lsm %p of " DFID "\n", lli->lli_lsm_md,
- PFID(ll_inode2fid(inode)));
-
- /* no striped information from request. */
- if (!lsm) {
- if (!lli->lli_lsm_md) {
- return 0;
- } else if (lli->lli_lsm_md->lsm_md_hash_type &
- LMV_HASH_FLAG_MIGRATION) {
- /*
- * migration is done, the temporay MIGRATE layout has
- * been removed
- */
- CDEBUG(D_INODE, DFID " finish migration.\n",
- PFID(ll_inode2fid(inode)));
- lmv_free_memmd(lli->lli_lsm_md);
- lli->lli_lsm_md = NULL;
- return 0;
- }
- /*
- * The lustre_md from req does not include stripeEA,
- * see ll_md_setattr
- */
- return 0;
- }
-
- /* set the directory layout */
- if (!lli->lli_lsm_md) {
- struct cl_attr *attr;
-
- rc = ll_init_lsm_md(inode, md);
- if (rc)
- return rc;
-
- /*
- * set lsm_md to NULL, so the following free lustre_md
- * will not free this lsm
- */
- md->lmv = NULL;
- lli->lli_lsm_md = lsm;
-
- attr = kzalloc(sizeof(*attr), GFP_NOFS);
- if (!attr)
- return -ENOMEM;
-
- /* validate the lsm */
- rc = md_merge_attr(ll_i2mdexp(inode), lsm, attr,
- ll_md_blocking_ast);
- if (rc) {
- kfree(attr);
- return rc;
- }
-
- if (md->body->mbo_valid & OBD_MD_FLNLINK)
- md->body->mbo_nlink = attr->cat_nlink;
- if (md->body->mbo_valid & OBD_MD_FLSIZE)
- md->body->mbo_size = attr->cat_size;
- if (md->body->mbo_valid & OBD_MD_FLATIME)
- md->body->mbo_atime = attr->cat_atime;
- if (md->body->mbo_valid & OBD_MD_FLCTIME)
- md->body->mbo_ctime = attr->cat_ctime;
- if (md->body->mbo_valid & OBD_MD_FLMTIME)
- md->body->mbo_mtime = attr->cat_mtime;
-
- kfree(attr);
-
- CDEBUG(D_INODE, "Set lsm %p magic %x to " DFID "\n", lsm,
- lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
- return 0;
- }
-
- /* Compare the old and new stripe information */
- if (!lsm_md_eq(lli->lli_lsm_md, lsm)) {
- struct lmv_stripe_md *old_lsm = lli->lli_lsm_md;
- int idx;
-
- CERROR("%s: inode " DFID "(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
- ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid),
- inode, lsm, old_lsm,
- lsm->lsm_md_magic, old_lsm->lsm_md_magic,
- lsm->lsm_md_stripe_count,
- old_lsm->lsm_md_stripe_count,
- lsm->lsm_md_master_mdt_index,
- old_lsm->lsm_md_master_mdt_index,
- lsm->lsm_md_hash_type, old_lsm->lsm_md_hash_type,
- lsm->lsm_md_layout_version,
- old_lsm->lsm_md_layout_version,
- lsm->lsm_md_pool_name,
- old_lsm->lsm_md_pool_name);
-
- for (idx = 0; idx < old_lsm->lsm_md_stripe_count; idx++) {
- CERROR("%s: sub FIDs in old lsm idx %d, old: " DFID "\n",
- ll_get_fsname(inode->i_sb, NULL, 0), idx,
- PFID(&old_lsm->lsm_md_oinfo[idx].lmo_fid));
- }
-
- for (idx = 0; idx < lsm->lsm_md_stripe_count; idx++) {
- CERROR("%s: sub FIDs in new lsm idx %d, new: " DFID "\n",
- ll_get_fsname(inode->i_sb, NULL, 0), idx,
- PFID(&lsm->lsm_md_oinfo[idx].lmo_fid));
- }
-
- return -EIO;
- }
-
- return 0;
-}
-
-void ll_clear_inode(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
- PFID(ll_inode2fid(inode)), inode);
-
- if (S_ISDIR(inode->i_mode)) {
- /* these should have been cleared in ll_file_release */
- LASSERT(!lli->lli_opendir_key);
- LASSERT(!lli->lli_sai);
- LASSERT(lli->lli_opendir_pid == 0);
- }
-
- md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
-
- LASSERT(!lli->lli_open_fd_write_count);
- LASSERT(!lli->lli_open_fd_read_count);
- LASSERT(!lli->lli_open_fd_exec_count);
-
- if (lli->lli_mds_write_och)
- ll_md_real_close(inode, FMODE_WRITE);
- if (lli->lli_mds_exec_och)
- ll_md_real_close(inode, FMODE_EXEC);
- if (lli->lli_mds_read_och)
- ll_md_real_close(inode, FMODE_READ);
-
- if (S_ISLNK(inode->i_mode)) {
- kfree(lli->lli_symlink_name);
- lli->lli_symlink_name = NULL;
- }
-
- ll_xattr_cache_destroy(inode);
-
-#ifdef CONFIG_FS_POSIX_ACL
- forget_all_cached_acls(inode);
- if (lli->lli_posix_acl) {
- posix_acl_release(lli->lli_posix_acl);
- lli->lli_posix_acl = NULL;
- }
-#endif
- lli->lli_inode_magic = LLI_INODE_DEAD;
-
- if (S_ISDIR(inode->i_mode))
- ll_dir_clear_lsm_md(inode);
- if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
- LASSERT(list_empty(&lli->lli_agl_list));
-
- /*
- * XXX This has to be done before lsm is freed below, because
- * cl_object still uses inode lsm.
- */
- cl_inode_fini(inode);
-}
-
-#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
-
-static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
-{
- struct lustre_md md;
- struct inode *inode = d_inode(dentry);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *request = NULL;
- int rc, ia_valid;
-
- op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
- if (rc) {
- ptlrpc_req_finished(request);
- if (rc == -ENOENT) {
- clear_nlink(inode);
- /* Unlinked special device node? Or just a race?
- * Pretend we did everything.
- */
- if (!S_ISREG(inode->i_mode) &&
- !S_ISDIR(inode->i_mode)) {
- ia_valid = op_data->op_attr.ia_valid;
- op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
- rc = simple_setattr(dentry, &op_data->op_attr);
- op_data->op_attr.ia_valid = ia_valid;
- }
- } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
- CERROR("md_setattr fails: rc = %d\n", rc);
- }
- return rc;
- }
-
- rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
- sbi->ll_md_exp, &md);
- if (rc) {
- ptlrpc_req_finished(request);
- return rc;
- }
-
- ia_valid = op_data->op_attr.ia_valid;
- /* inode size will be in cl_setattr_ost, can't do it now since dirty
- * cache is not cleared yet.
- */
- op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
- if (S_ISREG(inode->i_mode))
- inode_lock(inode);
- rc = simple_setattr(dentry, &op_data->op_attr);
- if (S_ISREG(inode->i_mode))
- inode_unlock(inode);
- op_data->op_attr.ia_valid = ia_valid;
-
- rc = ll_update_inode(inode, &md);
- ptlrpc_req_finished(request);
-
- return rc;
-}
-
-/* If this inode has objects allocated to it (lsm != NULL), then the OST
- * object(s) determine the file size and mtime. Otherwise, the MDS will
- * keep these values until such a time that objects are allocated for it.
- * We do the MDS operations first, as it is checking permissions for us.
- * We don't to the MDS RPC if there is nothing that we want to store there,
- * otherwise there is no harm in updating mtime/atime on the MDS if we are
- * going to do an RPC anyways.
- *
- * If we are doing a truncate, we will send the mtime and ctime updates
- * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
- * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
- * at the same time.
- *
- * In case of HSMimport, we only set attr on MDS.
- */
-int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
-{
- struct inode *inode = d_inode(dentry);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct md_op_data *op_data = NULL;
- int rc = 0;
-
- CDEBUG(D_VFSTRACE, "%s: setattr inode " DFID "(%p) from %llu to %llu, valid %x, hsm_import %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
- i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
-
- if (attr->ia_valid & ATTR_SIZE) {
- /* Check new size against VFS/VM file size limit and rlimit */
- rc = inode_newsize_ok(inode, attr->ia_size);
- if (rc)
- return rc;
-
- /* The maximum Lustre file size is variable, based on the
- * OST maximum object size and number of stripes. This
- * needs another check in addition to the VFS check above.
- */
- if (attr->ia_size > ll_file_maxbytes(inode)) {
- CDEBUG(D_INODE, "file " DFID " too large %llu > %llu\n",
- PFID(&lli->lli_fid), attr->ia_size,
- ll_file_maxbytes(inode));
- return -EFBIG;
- }
-
- attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
- }
-
- /* POSIX: check before ATTR_*TIME_SET set (from setattr_prepare) */
- if (attr->ia_valid & TIMES_SET_FLAGS) {
- if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
- !capable(CAP_FOWNER))
- return -EPERM;
- }
-
- /* We mark all of the fields "set" so MDS/OST does not re-set them */
- if (attr->ia_valid & ATTR_CTIME) {
- attr->ia_ctime = current_time(inode);
- attr->ia_valid |= ATTR_CTIME_SET;
- }
- if (!(attr->ia_valid & ATTR_ATIME_SET) &&
- (attr->ia_valid & ATTR_ATIME)) {
- attr->ia_atime = current_time(inode);
- attr->ia_valid |= ATTR_ATIME_SET;
- }
- if (!(attr->ia_valid & ATTR_MTIME_SET) &&
- (attr->ia_valid & ATTR_MTIME)) {
- attr->ia_mtime = current_time(inode);
- attr->ia_valid |= ATTR_MTIME_SET;
- }
-
- if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
- CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %llu\n",
- LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
- (s64)ktime_get_real_seconds());
-
- if (S_ISREG(inode->i_mode))
- inode_unlock(inode);
-
- /*
- * We always do an MDS RPC, even if we're only changing the size;
- * only the MDS knows whether truncate() should fail with -ETXTBUSY
- */
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
- /*
- * If we are changing file size, file content is
- * modified, flag it.
- */
- attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
- op_data->op_bias |= MDS_DATA_MODIFIED;
- clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
- }
-
- op_data->op_attr = *attr;
-
- rc = ll_md_setattr(dentry, op_data);
- if (rc)
- goto out;
-
- if (!S_ISREG(inode->i_mode) || hsm_import) {
- rc = 0;
- goto out;
- }
-
- if (attr->ia_valid & (ATTR_SIZE |
- ATTR_ATIME | ATTR_ATIME_SET |
- ATTR_MTIME | ATTR_MTIME_SET)) {
- /* For truncate and utimes sending attributes to OSTs, setting
- * mtime/atime to the past will be performed under PW [0:EOF]
- * extent lock (new_size:EOF for truncate). It may seem
- * excessive to send mtime/atime updates to OSTs when not
- * setting times to past, but it is necessary due to possible
- * time de-synchronization between MDT inode and OST objects
- */
- rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, attr, 0);
- }
-
- /*
- * If the file was restored, it needs to set dirty flag.
- *
- * We've already sent MDS_DATA_MODIFIED flag in
- * ll_md_setattr() for truncate. However, the MDT refuses to
- * set the HS_DIRTY flag on released files, so we have to set
- * it again if the file has been restored. Please check how
- * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
- *
- * Please notice that if the file is not released, the previous
- * MDS_DATA_MODIFIED has taken effect and usually
- * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
- * This way we can save an RPC for common open + trunc
- * operation.
- */
- if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
- struct hsm_state_set hss = {
- .hss_valid = HSS_SETMASK,
- .hss_setmask = HS_DIRTY,
- };
- int rc2;
-
- rc2 = ll_hsm_state_set(inode, &hss);
- /*
- * truncate and write can happen at the same time, so that
- * the file can be set modified even though the file is not
- * restored from released state, and ll_hsm_state_set() is
- * not applicable for the file, and rc2 < 0 is normal in this
- * case.
- */
- if (rc2 < 0)
- CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
- PFID(ll_inode2fid(inode)), rc2);
- }
-
-out:
- if (op_data)
- ll_finish_md_op_data(op_data);
-
- if (S_ISREG(inode->i_mode)) {
- inode_lock(inode);
- if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
- inode_dio_wait(inode);
- }
-
- ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
- LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
-
- return rc;
-}
-
-int ll_setattr(struct dentry *de, struct iattr *attr)
-{
- int mode = d_inode(de)->i_mode;
-
- if ((attr->ia_valid & (ATTR_CTIME | ATTR_SIZE | ATTR_MODE)) ==
- (ATTR_CTIME | ATTR_SIZE | ATTR_MODE))
- attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
-
- if (((attr->ia_valid & (ATTR_MODE | ATTR_FORCE | ATTR_SIZE)) ==
- (ATTR_SIZE | ATTR_MODE)) &&
- (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
- (((mode & (S_ISGID | 0010)) == (S_ISGID | 0010)) &&
- !(attr->ia_mode & S_ISGID))))
- attr->ia_valid |= ATTR_FORCE;
-
- if ((attr->ia_valid & ATTR_MODE) &&
- (mode & S_ISUID) &&
- !(attr->ia_mode & S_ISUID) &&
- !(attr->ia_valid & ATTR_KILL_SUID))
- attr->ia_valid |= ATTR_KILL_SUID;
-
- if ((attr->ia_valid & ATTR_MODE) &&
- ((mode & (S_ISGID | 0010)) == (S_ISGID | 0010)) &&
- !(attr->ia_mode & S_ISGID) &&
- !(attr->ia_valid & ATTR_KILL_SGID))
- attr->ia_valid |= ATTR_KILL_SGID;
-
- return ll_setattr_raw(de, attr, false);
-}
-
-int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
- __u64 max_age, __u32 flags)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct obd_statfs obd_osfs;
- int rc;
-
- rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
- if (rc) {
- CERROR("md_statfs fails: rc = %d\n", rc);
- return rc;
- }
-
- osfs->os_type = sb->s_magic;
-
- CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
- osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,
- osfs->os_files);
-
- if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
- flags |= OBD_STATFS_NODELAY;
-
- rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
- if (rc) {
- CERROR("obd_statfs fails: rc = %d\n", rc);
- return rc;
- }
-
- CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
- obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
- obd_osfs.os_files);
-
- osfs->os_bsize = obd_osfs.os_bsize;
- osfs->os_blocks = obd_osfs.os_blocks;
- osfs->os_bfree = obd_osfs.os_bfree;
- osfs->os_bavail = obd_osfs.os_bavail;
-
- /* If we don't have as many objects free on the OST as inodes
- * on the MDS, we reduce the total number of inodes to
- * compensate, so that the "inodes in use" number is correct.
- */
- if (obd_osfs.os_ffree < osfs->os_ffree) {
- osfs->os_files = (osfs->os_files - osfs->os_ffree) +
- obd_osfs.os_ffree;
- osfs->os_ffree = obd_osfs.os_ffree;
- }
-
- return rc;
-}
-
-int ll_statfs(struct dentry *de, struct kstatfs *sfs)
-{
- struct super_block *sb = de->d_sb;
- struct obd_statfs osfs;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64());
- ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
-
- /* Some amount of caching on the client is allowed */
- rc = ll_statfs_internal(sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- 0);
- if (rc)
- return rc;
-
- statfs_unpack(sfs, &osfs);
-
- /* We need to downshift for all 32-bit kernels, because we can't
- * tell if the kernel is being called via sys_statfs64() or not.
- * Stop before overflowing f_bsize - in which case it is better
- * to just risk EOVERFLOW if caller is using old sys_statfs().
- */
- if (sizeof(long) < 8) {
- while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
- sfs->f_bsize <<= 1;
-
- osfs.os_blocks >>= 1;
- osfs.os_bfree >>= 1;
- osfs.os_bavail >>= 1;
- }
- }
-
- sfs->f_blocks = osfs.os_blocks;
- sfs->f_bfree = osfs.os_bfree;
- sfs->f_bavail = osfs.os_bavail;
- sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
- return 0;
-}
-
-void ll_inode_size_lock(struct inode *inode)
-{
- struct ll_inode_info *lli;
-
- LASSERT(!S_ISDIR(inode->i_mode));
-
- lli = ll_i2info(inode);
- mutex_lock(&lli->lli_size_mutex);
-}
-
-void ll_inode_size_unlock(struct inode *inode)
-{
- struct ll_inode_info *lli;
-
- lli = ll_i2info(inode);
- mutex_unlock(&lli->lli_size_mutex);
-}
-
-int ll_update_inode(struct inode *inode, struct lustre_md *md)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct mdt_body *body = md->body;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-
- if (body->mbo_valid & OBD_MD_FLEASIZE)
- cl_file_inode_init(inode, md);
-
- if (S_ISDIR(inode->i_mode)) {
- int rc;
-
- rc = ll_update_lsm_md(inode, md);
- if (rc)
- return rc;
- }
-
-#ifdef CONFIG_FS_POSIX_ACL
- if (body->mbo_valid & OBD_MD_FLACL) {
- spin_lock(&lli->lli_lock);
- if (lli->lli_posix_acl)
- posix_acl_release(lli->lli_posix_acl);
- lli->lli_posix_acl = md->posix_acl;
- spin_unlock(&lli->lli_lock);
- }
-#endif
- inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
- sbi->ll_flags & LL_SBI_32BIT_API);
- inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
-
- if (body->mbo_valid & OBD_MD_FLATIME) {
- if (body->mbo_atime > LTIME_S(inode->i_atime))
- LTIME_S(inode->i_atime) = body->mbo_atime;
- lli->lli_atime = body->mbo_atime;
- }
- if (body->mbo_valid & OBD_MD_FLMTIME) {
- if (body->mbo_mtime > LTIME_S(inode->i_mtime)) {
- CDEBUG(D_INODE,
- "setting ino %lu mtime from %lu to %llu\n",
- inode->i_ino, LTIME_S(inode->i_mtime),
- body->mbo_mtime);
- LTIME_S(inode->i_mtime) = body->mbo_mtime;
- }
- lli->lli_mtime = body->mbo_mtime;
- }
- if (body->mbo_valid & OBD_MD_FLCTIME) {
- if (body->mbo_ctime > LTIME_S(inode->i_ctime))
- LTIME_S(inode->i_ctime) = body->mbo_ctime;
- lli->lli_ctime = body->mbo_ctime;
- }
- if (body->mbo_valid & OBD_MD_FLMODE)
- inode->i_mode = (inode->i_mode & S_IFMT) |
- (body->mbo_mode & ~S_IFMT);
- if (body->mbo_valid & OBD_MD_FLTYPE)
- inode->i_mode = (inode->i_mode & ~S_IFMT) |
- (body->mbo_mode & S_IFMT);
- LASSERT(inode->i_mode != 0);
- if (S_ISREG(inode->i_mode))
- inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
- LL_MAX_BLKSIZE_BITS);
- else
- inode->i_blkbits = inode->i_sb->s_blocksize_bits;
- if (body->mbo_valid & OBD_MD_FLUID)
- inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
- if (body->mbo_valid & OBD_MD_FLGID)
- inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
- if (body->mbo_valid & OBD_MD_FLFLAGS)
- inode->i_flags = ll_ext_to_inode_flags(body->mbo_flags);
- if (body->mbo_valid & OBD_MD_FLNLINK)
- set_nlink(inode, body->mbo_nlink);
- if (body->mbo_valid & OBD_MD_FLRDEV)
- inode->i_rdev = old_decode_dev(body->mbo_rdev);
-
- if (body->mbo_valid & OBD_MD_FLID) {
- /* FID shouldn't be changed! */
- if (fid_is_sane(&lli->lli_fid)) {
- LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
- "Trying to change FID " DFID " to the " DFID ", inode " DFID "(%p)\n",
- PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
- PFID(ll_inode2fid(inode)), inode);
- } else {
- lli->lli_fid = body->mbo_fid1;
- }
- }
-
- LASSERT(fid_seq(&lli->lli_fid) != 0);
-
- if (body->mbo_valid & OBD_MD_FLSIZE) {
- i_size_write(inode, body->mbo_size);
-
- CDEBUG(D_VFSTRACE, "inode=" DFID ", updating i_size %llu\n",
- PFID(ll_inode2fid(inode)),
- (unsigned long long)body->mbo_size);
-
- if (body->mbo_valid & OBD_MD_FLBLOCKS)
- inode->i_blocks = body->mbo_blocks;
- }
-
- if (body->mbo_valid & OBD_MD_TSTATE) {
- if (body->mbo_t_state & MS_RESTORE)
- set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
- }
-
- return 0;
-}
-
-int ll_read_inode2(struct inode *inode, void *opaque)
-{
- struct lustre_md *md = opaque;
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
- PFID(&lli->lli_fid), inode);
-
- /* Core attributes from the MDS first. This is a new inode, and
- * the VFS doesn't zero times in the core inode so we have to do
- * it ourselves. They will be overwritten by either MDS or OST
- * attributes - we just need to make sure they aren't newer.
- */
- LTIME_S(inode->i_mtime) = 0;
- LTIME_S(inode->i_atime) = 0;
- LTIME_S(inode->i_ctime) = 0;
- inode->i_rdev = 0;
- rc = ll_update_inode(inode, md);
- if (rc)
- return rc;
-
- /* OIDEBUG(inode); */
-
- if (S_ISREG(inode->i_mode)) {
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-
- inode->i_op = &ll_file_inode_operations;
- inode->i_fop = sbi->ll_fop;
- inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
- } else if (S_ISDIR(inode->i_mode)) {
- inode->i_op = &ll_dir_inode_operations;
- inode->i_fop = &ll_dir_operations;
- } else if (S_ISLNK(inode->i_mode)) {
- inode->i_op = &ll_fast_symlink_inode_operations;
- } else {
- inode->i_op = &ll_special_inode_operations;
-
- init_special_inode(inode, inode->i_mode,
- inode->i_rdev);
- }
-
- return 0;
-}
-
-void ll_delete_inode(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
-
- if (S_ISREG(inode->i_mode) && lli->lli_clob)
- /* discard all dirty pages before truncating them, required by
- * osc_extent implementation at LU-1030.
- */
- cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
- CL_FSYNC_LOCAL, 1);
-
- truncate_inode_pages_final(&inode->i_data);
-
- LASSERTF(!inode->i_data.nrpages,
- "inode=" DFID "(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
- PFID(ll_inode2fid(inode)), inode, inode->i_data.nrpages);
-
- ll_clear_inode(inode);
- clear_inode(inode);
-}
-
-int ll_iocontrol(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- int rc, flags = 0;
-
- switch (cmd) {
- case FSFILT_IOC_GETFLAGS: {
- struct mdt_body *body;
- struct md_op_data *op_data;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
- 0, 0, LUSTRE_OPC_ANY,
- NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_valid = OBD_MD_FLFLAGS;
- rc = md_getattr(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc) {
- CERROR("%s: failure inode " DFID ": rc = %d\n",
- sbi->ll_md_exp->exp_obd->obd_name,
- PFID(ll_inode2fid(inode)), rc);
- return -abs(rc);
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-
- flags = body->mbo_flags;
-
- ptlrpc_req_finished(req);
-
- return put_user(flags, (int __user *)arg);
- }
- case FSFILT_IOC_SETFLAGS: {
- struct md_op_data *op_data;
- struct cl_object *obj;
- struct iattr *attr;
-
- if (get_user(flags, (int __user *)arg))
- return -EFAULT;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_attr_flags = flags;
- op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
- rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
- ll_finish_md_op_data(op_data);
- ptlrpc_req_finished(req);
- if (rc)
- return rc;
-
- inode->i_flags = ll_ext_to_inode_flags(flags);
-
- obj = ll_i2info(inode)->lli_clob;
- if (!obj)
- return 0;
-
- attr = kzalloc(sizeof(*attr), GFP_NOFS);
- if (!attr)
- return -ENOMEM;
-
- attr->ia_valid = ATTR_ATTR_FLAG;
- rc = cl_setattr_ost(obj, attr, flags);
- kfree(attr);
- return rc;
- }
- default:
- return -ENOSYS;
- }
-
- return 0;
-}
-
-int ll_flush_ctx(struct inode *inode)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-
- CDEBUG(D_SEC, "flush context for user %d\n",
- from_kuid(&init_user_ns, current_uid()));
-
- obd_set_info_async(NULL, sbi->ll_md_exp,
- sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
- 0, NULL, NULL);
- obd_set_info_async(NULL, sbi->ll_dt_exp,
- sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
- 0, NULL, NULL);
- return 0;
-}
-
-/* umount -f client means force down, don't save state */
-void ll_umount_begin(struct super_block *sb)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct obd_device *obd;
- struct obd_ioctl_data *ioc_data;
- int cnt = 0;
-
- CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
- sb->s_count, atomic_read(&sb->s_active));
-
- obd = class_exp2obd(sbi->ll_md_exp);
- if (!obd) {
- CERROR("Invalid MDC connection handle %#llx\n",
- sbi->ll_md_exp->exp_handle.h_cookie);
- return;
- }
- obd->obd_force = 1;
-
- obd = class_exp2obd(sbi->ll_dt_exp);
- if (!obd) {
- CERROR("Invalid LOV connection handle %#llx\n",
- sbi->ll_dt_exp->exp_handle.h_cookie);
- return;
- }
- obd->obd_force = 1;
-
- ioc_data = kzalloc(sizeof(*ioc_data), GFP_NOFS);
- if (ioc_data) {
- obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
- sizeof(*ioc_data), ioc_data, NULL);
-
- obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
- sizeof(*ioc_data), ioc_data, NULL);
-
- kfree(ioc_data);
- }
-
- /* Really, we'd like to wait until there are no requests outstanding,
- * and then continue. For now, we just periodically checking for vfs
- * to decrement mnt_cnt and hope to finish it within 10sec.
- */
- while (cnt < 10 && !may_umount(sbi->ll_mnt.mnt)) {
- schedule_timeout_uninterruptible(HZ);
- cnt++;
- }
-
- schedule();
-}
-
-int ll_remount_fs(struct super_block *sb, int *flags, char *data)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- char *profilenm = get_profile_name(sb);
- int err;
- __u32 read_only;
-
- if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
- read_only = *flags & SB_RDONLY;
- err = obd_set_info_async(NULL, sbi->ll_md_exp,
- sizeof(KEY_READ_ONLY),
- KEY_READ_ONLY, sizeof(read_only),
- &read_only, NULL);
- if (err) {
- LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
- profilenm, read_only ?
- "read-only" : "read-write", err);
- return err;
- }
-
- if (read_only)
- sb->s_flags |= SB_RDONLY;
- else
- sb->s_flags &= ~SB_RDONLY;
-
- if (sbi->ll_flags & LL_SBI_VERBOSE)
- LCONSOLE_WARN("Remounted %s %s\n", profilenm,
- read_only ? "read-only" : "read-write");
- }
- return 0;
-}
-
-/**
- * Cleanup the open handle that is cached on MDT-side.
- *
- * For open case, the client side open handling thread may hit error
- * after the MDT grant the open. Under such case, the client should
- * send close RPC to the MDT as cleanup; otherwise, the open handle
- * on the MDT will be leaked there until the client umount or evicted.
- *
- * In further, if someone unlinked the file, because the open handle
- * holds the reference on such file/object, then it will block the
- * subsequent threads that want to locate such object via FID.
- *
- * \param[in] sb super block for this file-system
- * \param[in] open_req pointer to the original open request
- */
-void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
-{
- struct mdt_body *body;
- struct md_op_data *op_data;
- struct ptlrpc_request *close_req = NULL;
- struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp;
-
- body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data)
- return;
-
- op_data->op_fid1 = body->mbo_fid1;
- op_data->op_handle = body->mbo_handle;
- op_data->op_mod_time = get_seconds();
- md_close(exp, op_data, NULL, &close_req);
- ptlrpc_req_finished(close_req);
- ll_finish_md_op_data(op_data);
-}
-
-int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
- struct super_block *sb, struct lookup_intent *it)
-{
- struct ll_sb_info *sbi = NULL;
- struct lustre_md md = { NULL };
- int rc;
-
- LASSERT(*inode || sb);
- sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
- rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
- sbi->ll_md_exp, &md);
- if (rc)
- goto cleanup;
-
- if (*inode) {
- rc = ll_update_inode(*inode, &md);
- if (rc)
- goto out;
- } else {
- LASSERT(sb);
-
- /*
- * At this point server returns to client's same fid as client
- * generated for creating. So using ->fid1 is okay here.
- */
- if (!fid_is_sane(&md.body->mbo_fid1)) {
- CERROR("%s: Fid is insane " DFID "\n",
- ll_get_fsname(sb, NULL, 0),
- PFID(&md.body->mbo_fid1));
- rc = -EINVAL;
- goto out;
- }
-
- *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
- sbi->ll_flags & LL_SBI_32BIT_API),
- &md);
- if (IS_ERR(*inode)) {
-#ifdef CONFIG_FS_POSIX_ACL
- if (md.posix_acl) {
- posix_acl_release(md.posix_acl);
- md.posix_acl = NULL;
- }
-#endif
- rc = PTR_ERR(*inode);
- CERROR("new_inode -fatal: rc %d\n", rc);
- goto out;
- }
- }
-
- /* Handling piggyback layout lock.
- * Layout lock can be piggybacked by getattr and open request.
- * The lsm can be applied to inode only if it comes with a layout lock
- * otherwise correct layout may be overwritten, for example:
- * 1. proc1: mdt returns a lsm but not granting layout
- * 2. layout was changed by another client
- * 3. proc2: refresh layout and layout lock granted
- * 4. proc1: to apply a stale layout
- */
- if (it && it->it_lock_mode != 0) {
- struct lustre_handle lockh;
- struct ldlm_lock *lock;
-
- lockh.cookie = it->it_lock_handle;
- lock = ldlm_handle2lock(&lockh);
- LASSERT(lock);
- if (ldlm_has_layout(lock)) {
- struct cl_object_conf conf;
-
- memset(&conf, 0, sizeof(conf));
- conf.coc_opc = OBJECT_CONF_SET;
- conf.coc_inode = *inode;
- conf.coc_lock = lock;
- conf.u.coc_layout = md.layout;
- (void)ll_layout_conf(*inode, &conf);
- }
- LDLM_LOCK_PUT(lock);
- }
-
-out:
- md_free_lustre_md(sbi->ll_md_exp, &md);
-cleanup:
- if (rc != 0 && it && it->it_op & IT_OPEN)
- ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
-
- return rc;
-}
-
-int ll_obd_statfs(struct inode *inode, void __user *arg)
-{
- struct ll_sb_info *sbi = NULL;
- struct obd_export *exp;
- char *buf = NULL;
- struct obd_ioctl_data *data = NULL;
- __u32 type;
- int len = 0, rc;
-
- if (!inode) {
- rc = -EINVAL;
- goto out_statfs;
- }
-
- sbi = ll_i2sbi(inode);
- if (!sbi) {
- rc = -EINVAL;
- goto out_statfs;
- }
-
- rc = obd_ioctl_getdata(&buf, &len, arg);
- if (rc)
- goto out_statfs;
-
- data = (void *)buf;
- if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
- !data->ioc_pbuf1 || !data->ioc_pbuf2) {
- rc = -EINVAL;
- goto out_statfs;
- }
-
- if (data->ioc_inllen1 != sizeof(__u32) ||
- data->ioc_inllen2 != sizeof(__u32) ||
- data->ioc_plen1 != sizeof(struct obd_statfs) ||
- data->ioc_plen2 != sizeof(struct obd_uuid)) {
- rc = -EINVAL;
- goto out_statfs;
- }
-
- memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
- if (type & LL_STATFS_LMV) {
- exp = sbi->ll_md_exp;
- } else if (type & LL_STATFS_LOV) {
- exp = sbi->ll_dt_exp;
- } else {
- rc = -ENODEV;
- goto out_statfs;
- }
-
- rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
- if (rc)
- goto out_statfs;
-out_statfs:
- kvfree(buf);
- return rc;
-}
-
-int ll_process_config(struct lustre_cfg *lcfg)
-{
- char *ptr;
- void *sb;
- struct lprocfs_static_vars lvars;
- unsigned long x;
- int rc = 0;
-
- lprocfs_llite_init_vars(&lvars);
-
- /* The instance name contains the sb: lustre-client-aacfe000 */
- ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
- if (!ptr || !*(++ptr))
- return -EINVAL;
- rc = kstrtoul(ptr, 16, &x);
- if (rc != 0)
- return -EINVAL;
- sb = (void *)x;
- /* This better be a real Lustre superblock! */
- LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic ==
- LMD_MAGIC);
-
- /* Note we have not called client_common_fill_super yet, so
- * proc fns must be able to handle that!
- */
- rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
- lcfg, sb);
- if (rc > 0)
- rc = 0;
- return rc;
-}
-
-/* this function prepares md_op_data hint for passing ot down to MD stack. */
-struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
- struct inode *i1, struct inode *i2,
- const char *name, size_t namelen,
- u32 mode, __u32 opc, void *data)
-{
- if (!name) {
- /* Do not reuse namelen for something else. */
- if (namelen)
- return ERR_PTR(-EINVAL);
- } else {
- if (namelen > ll_i2sbi(i1)->ll_namelen)
- return ERR_PTR(-ENAMETOOLONG);
-
- if (!lu_name_is_valid_2(name, namelen))
- return ERR_PTR(-EINVAL);
- }
-
- if (!op_data)
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
-
- if (!op_data)
- return ERR_PTR(-ENOMEM);
-
- ll_i2gids(op_data->op_suppgids, i1, i2);
- op_data->op_fid1 = *ll_inode2fid(i1);
- op_data->op_default_stripe_offset = -1;
- if (S_ISDIR(i1->i_mode)) {
- op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
- if (opc == LUSTRE_OPC_MKDIR)
- op_data->op_default_stripe_offset =
- ll_i2info(i1)->lli_def_stripe_offset;
- }
-
- if (i2) {
- op_data->op_fid2 = *ll_inode2fid(i2);
- if (S_ISDIR(i2->i_mode))
- op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
- } else {
- fid_zero(&op_data->op_fid2);
- }
-
- if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
- op_data->op_cli_flags |= CLI_HASH64;
-
- if (ll_need_32bit_api(ll_i2sbi(i1)))
- op_data->op_cli_flags |= CLI_API32;
-
- op_data->op_name = name;
- op_data->op_namelen = namelen;
- op_data->op_mode = mode;
- op_data->op_mod_time = ktime_get_real_seconds();
- op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
- op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
- if ((opc == LUSTRE_OPC_CREATE) && name &&
- filename_is_volatile(name, namelen, &op_data->op_mds))
- op_data->op_bias |= MDS_CREATE_VOLATILE;
- else
- op_data->op_mds = 0;
- op_data->op_data = data;
-
- return op_data;
-}
-
-void ll_finish_md_op_data(struct md_op_data *op_data)
-{
- kfree(op_data);
-}
-
-int ll_show_options(struct seq_file *seq, struct dentry *dentry)
-{
- struct ll_sb_info *sbi;
-
- LASSERT(seq && dentry);
- sbi = ll_s2sbi(dentry->d_sb);
-
- if (sbi->ll_flags & LL_SBI_NOLCK)
- seq_puts(seq, ",nolock");
-
- if (sbi->ll_flags & LL_SBI_FLOCK)
- seq_puts(seq, ",flock");
-
- if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
- seq_puts(seq, ",localflock");
-
- if (sbi->ll_flags & LL_SBI_USER_XATTR)
- seq_puts(seq, ",user_xattr");
-
- if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
- seq_puts(seq, ",lazystatfs");
-
- if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
- seq_puts(seq, ",user_fid2path");
-
- if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
- seq_puts(seq, ",always_ping");
-
- return 0;
-}
-
-/**
- * Get obd name by cmd, and copy out to user space
- */
-int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct obd_device *obd;
-
- if (cmd == OBD_IOC_GETDTNAME)
- obd = class_exp2obd(sbi->ll_dt_exp);
- else if (cmd == OBD_IOC_GETMDNAME)
- obd = class_exp2obd(sbi->ll_md_exp);
- else
- return -EINVAL;
-
- if (!obd)
- return -ENOENT;
-
- if (copy_to_user((void __user *)arg, obd->obd_name,
- strlen(obd->obd_name) + 1))
- return -EFAULT;
-
- return 0;
-}
-
-/**
- * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
- * fsname will be returned in this buffer; otherwise, a static buffer will be
- * used to store the fsname and returned to caller.
- */
-char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
-{
- static char fsname_static[MTI_NAME_MAXLEN];
- struct lustre_sb_info *lsi = s2lsi(sb);
- char *ptr;
- int len;
-
- if (!buf) {
- /* this means the caller wants to use static buffer
- * and it doesn't care about race. Usually this is
- * in error reporting path
- */
- buf = fsname_static;
- buflen = sizeof(fsname_static);
- }
-
- len = strlen(lsi->lsi_lmd->lmd_profile);
- ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
- if (ptr && (strcmp(ptr, "-client") == 0))
- len -= 7;
-
- if (unlikely(len >= buflen))
- len = buflen - 1;
- strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
- buf[len] = '\0';
-
- return buf;
-}
-
-void ll_dirty_page_discard_warn(struct page *page, int ioret)
-{
- char *buf, *path = NULL;
- struct dentry *dentry = NULL;
- struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
-
- /* this can be called inside spin lock so use GFP_ATOMIC. */
- buf = (char *)__get_free_page(GFP_ATOMIC);
- if (buf) {
- dentry = d_find_alias(page->mapping->host);
- if (dentry)
- path = dentry_path_raw(dentry, buf, PAGE_SIZE);
- }
-
- CDEBUG(D_WARNING,
- "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
- ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
- s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
- PFID(&obj->vob_header.coh_lu.loh_fid),
- (path && !IS_ERR(path)) ? path : "", ioret);
-
- if (dentry)
- dput(dentry);
-
- if (buf)
- free_page((unsigned long)buf);
-}
-
-ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
- struct lov_user_md **kbuf)
-{
- struct lov_user_md lum;
- ssize_t lum_size;
-
- if (copy_from_user(&lum, md, sizeof(lum))) {
- lum_size = -EFAULT;
- goto no_kbuf;
- }
-
- lum_size = ll_lov_user_md_size(&lum);
- if (lum_size < 0)
- goto no_kbuf;
-
- *kbuf = kzalloc(lum_size, GFP_NOFS);
- if (!*kbuf) {
- lum_size = -ENOMEM;
- goto no_kbuf;
- }
-
- if (copy_from_user(*kbuf, md, lum_size) != 0) {
- kfree(*kbuf);
- *kbuf = NULL;
- lum_size = -EFAULT;
- }
-no_kbuf:
- return lum_size;
-}
-
-/*
- * Compute llite root squash state after a change of root squash
- * configuration setting or add/remove of a lnet nid
- */
-void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
-{
- struct root_squash_info *squash = &sbi->ll_squash;
- struct lnet_process_id id;
- bool matched;
- int i;
-
- /* Update norootsquash flag */
- down_write(&squash->rsi_sem);
- if (list_empty(&squash->rsi_nosquash_nids)) {
- sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
- } else {
- /*
- * Do not apply root squash as soon as one of our NIDs is
- * in the nosquash_nids list
- */
- matched = false;
- i = 0;
-
- while (LNetGetId(i++, &id) != -ENOENT) {
- if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND)
- continue;
- if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
- matched = true;
- break;
- }
- }
- if (matched)
- sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
- else
- sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
- }
- up_write(&squash->rsi_sem);
-}
-
-/**
- * Parse linkea content to extract information about a given hardlink
- *
- * \param[in] ldata - Initialized linkea data
- * \param[in] linkno - Link identifier
- * \param[out] parent_fid - The entry's parent FID
- * \param[in] size - Entry name destination buffer
- *
- * \retval 0 on success
- * \retval Appropriate negative error code on failure
- */
-static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
- struct lu_fid *parent_fid, struct lu_name *ln)
-{
- unsigned int idx;
- int rc;
-
- rc = linkea_init_with_rec(ldata);
- if (rc < 0)
- return rc;
-
- if (linkno >= ldata->ld_leh->leh_reccount)
- /* beyond last link */
- return -ENODATA;
-
- linkea_first_entry(ldata);
- for (idx = 0; ldata->ld_lee; idx++) {
- linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
- parent_fid);
- if (idx == linkno)
- break;
-
- linkea_next_entry(ldata);
- }
-
- if (idx < linkno)
- return -ENODATA;
-
- return 0;
-}
-
-/**
- * Get parent FID and name of an identified link. Operation is performed for
- * a given link number, letting the caller iterate over linkno to list one or
- * all links of an entry.
- *
- * \param[in] file - File descriptor against which to perform the operation
- * \param[in,out] arg - User-filled structure containing the linkno to operate
- * on and the available size. It is eventually filled
- * with the requested information or left untouched on
- * error
- *
- * \retval - 0 on success
- * \retval - Appropriate negative error code on failure
- */
-int ll_getparent(struct file *file, struct getparent __user *arg)
-{
- struct inode *inode = file_inode(file);
- struct linkea_data *ldata;
- struct lu_fid parent_fid;
- struct lu_buf buf = {
- .lb_buf = NULL,
- .lb_len = 0
- };
- struct lu_name ln;
- u32 name_size;
- u32 linkno;
- int rc;
-
- if (!capable(CAP_DAC_READ_SEARCH) &&
- !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
- return -EPERM;
-
- if (get_user(name_size, &arg->gp_name_size))
- return -EFAULT;
-
- if (get_user(linkno, &arg->gp_linkno))
- return -EFAULT;
-
- if (name_size > PATH_MAX)
- return -EINVAL;
-
- ldata = kzalloc(sizeof(*ldata), GFP_NOFS);
- if (!ldata)
- return -ENOMEM;
-
- rc = linkea_data_new(ldata, &buf);
- if (rc < 0)
- goto ldata_free;
-
- rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
- buf.lb_len, OBD_MD_FLXATTR);
- if (rc < 0)
- goto lb_free;
-
- rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
- if (rc < 0)
- goto lb_free;
-
- if (ln.ln_namelen >= name_size) {
- rc = -EOVERFLOW;
- goto lb_free;
- }
-
- if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid))) {
- rc = -EFAULT;
- goto lb_free;
- }
-
- if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen)) {
- rc = -EFAULT;
- goto lb_free;
- }
-
- if (put_user('\0', arg->gp_name + ln.ln_namelen)) {
- rc = -EFAULT;
- goto lb_free;
- }
-
-lb_free:
- kvfree(buf.lb_buf);
-ldata_free:
- kfree(ldata);
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
deleted file mode 100644
index 214b07554e62..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ /dev/null
@@ -1,478 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/uaccess.h>
-
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "llite_internal.h"
-
-static const struct vm_operations_struct ll_file_vm_ops;
-
-void policy_from_vma(union ldlm_policy_data *policy,
- struct vm_area_struct *vma, unsigned long addr,
- size_t count)
-{
- policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
- (vma->vm_pgoff << PAGE_SHIFT);
- policy->l_extent.end = (policy->l_extent.start + count - 1) |
- ~PAGE_MASK;
-}
-
-struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
- size_t count)
-{
- struct vm_area_struct *vma, *ret = NULL;
-
- /* mmap_sem must have been held by caller. */
- LASSERT(!down_write_trylock(&mm->mmap_sem));
-
- for (vma = find_vma(mm, addr);
- vma && vma->vm_start < (addr + count); vma = vma->vm_next) {
- if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
- vma->vm_flags & VM_SHARED) {
- ret = vma;
- break;
- }
- }
- return ret;
-}
-
-/**
- * API independent part for page fault initialization.
- * \param vma - virtual memory area addressed to page fault
- * \param env - corespondent lu_env to processing
- * \param index - page index corespondent to fault.
- * \parm ra_flags - vma readahead flags.
- *
- * \return error codes from cl_io_init.
- */
-static struct cl_io *
-ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
- pgoff_t index, unsigned long *ra_flags)
-{
- struct file *file = vma->vm_file;
- struct inode *inode = file_inode(file);
- struct cl_io *io;
- struct cl_fault_io *fio;
- int rc;
-
- if (ll_file_nolock(file))
- return ERR_PTR(-EOPNOTSUPP);
-
-restart:
- io = vvp_env_thread_io(env);
- io->ci_obj = ll_i2info(inode)->lli_clob;
- LASSERT(io->ci_obj);
-
- fio = &io->u.ci_fault;
- fio->ft_index = index;
- fio->ft_executable = vma->vm_flags & VM_EXEC;
-
- /*
- * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
- * the kernel will not read other pages not covered by ldlm in
- * filemap_nopage. we do our readahead in ll_readpage.
- */
- if (ra_flags)
- *ra_flags = vma->vm_flags & (VM_RAND_READ | VM_SEQ_READ);
- vma->vm_flags &= ~VM_SEQ_READ;
- vma->vm_flags |= VM_RAND_READ;
-
- CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
- fio->ft_index, fio->ft_executable);
-
- rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
- if (rc == 0) {
- struct vvp_io *vio = vvp_env_io(env);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- LASSERT(vio->vui_cl.cis_io == io);
-
- /* mmap lock must be MANDATORY it has to cache pages. */
- io->ci_lockreq = CILR_MANDATORY;
- vio->vui_fd = fd;
- } else {
- LASSERT(rc < 0);
- cl_io_fini(env, io);
- if (io->ci_need_restart)
- goto restart;
-
- io = ERR_PTR(rc);
- }
-
- return io;
-}
-
-/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
-static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
- bool *retry)
-{
- struct lu_env *env;
- struct cl_io *io;
- struct vvp_io *vio;
- int result;
- u16 refcheck;
- sigset_t set;
- struct inode *inode;
- struct ll_inode_info *lli;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = ll_fault_io_init(env, vma, vmpage->index, NULL);
- if (IS_ERR(io)) {
- result = PTR_ERR(io);
- goto out;
- }
-
- result = io->ci_result;
- if (result < 0)
- goto out_io;
-
- io->u.ci_fault.ft_mkwrite = 1;
- io->u.ci_fault.ft_writable = 1;
-
- vio = vvp_env_io(env);
- vio->u.fault.ft_vma = vma;
- vio->u.fault.ft_vmpage = vmpage;
-
- cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM), &set);
-
- inode = vvp_object_inode(io->ci_obj);
- lli = ll_i2info(inode);
-
- result = cl_io_loop(env, io);
-
- cfs_restore_sigs(&set);
-
- if (result == 0) {
- struct inode *inode = file_inode(vma->vm_file);
- struct ll_inode_info *lli = ll_i2info(inode);
-
- lock_page(vmpage);
- if (!vmpage->mapping) {
- unlock_page(vmpage);
-
- /* page was truncated and lock was cancelled, return
- * ENODATA so that VM_FAULT_NOPAGE will be returned
- * to handle_mm_fault().
- */
- if (result == 0)
- result = -ENODATA;
- } else if (!PageDirty(vmpage)) {
- /* race, the page has been cleaned by ptlrpcd after
- * it was unlocked, it has to be added into dirty
- * cache again otherwise this soon-to-dirty page won't
- * consume any grants, even worse if this page is being
- * transferred because it will break RPC checksum.
- */
- unlock_page(vmpage);
-
- CDEBUG(D_MMAP,
- "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
- vmpage, vmpage->index);
-
- *retry = true;
- result = -EAGAIN;
- }
-
- if (!result)
- set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
- }
-
-out_io:
- cl_io_fini(env, io);
-out:
- cl_env_put(env, &refcheck);
- CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
- LASSERT(ergo(result == 0, PageLocked(vmpage)));
-
- return result;
-}
-
-static inline int to_fault_error(int result)
-{
- switch (result) {
- case 0:
- result = VM_FAULT_LOCKED;
- break;
- case -EFAULT:
- result = VM_FAULT_NOPAGE;
- break;
- case -ENOMEM:
- result = VM_FAULT_OOM;
- break;
- default:
- result = VM_FAULT_SIGBUS;
- break;
- }
- return result;
-}
-
-/**
- * Lustre implementation of a vm_operations_struct::fault() method, called by
- * VM to server page fault (both in kernel and user space).
- *
- * \param vma - is virtual area struct related to page fault
- * \param vmf - structure which describe type and address where hit fault
- *
- * \return allocated and filled _locked_ page for address
- * \retval VM_FAULT_ERROR on general error
- * \retval NOPAGE_OOM not have memory for allocate new page
- */
-static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct lu_env *env;
- struct cl_io *io;
- struct vvp_io *vio = NULL;
- struct page *vmpage;
- unsigned long ra_flags;
- int result = 0;
- int fault_ret = 0;
- u16 refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
- if (IS_ERR(io)) {
- result = to_fault_error(PTR_ERR(io));
- goto out;
- }
-
- result = io->ci_result;
- if (result == 0) {
- vio = vvp_env_io(env);
- vio->u.fault.ft_vma = vma;
- vio->u.fault.ft_vmpage = NULL;
- vio->u.fault.ft_vmf = vmf;
- vio->u.fault.ft_flags = 0;
- vio->u.fault.ft_flags_valid = false;
-
- /* May call ll_readpage() */
- ll_cl_add(vma->vm_file, env, io);
-
- result = cl_io_loop(env, io);
-
- ll_cl_remove(vma->vm_file, env);
-
- /* ft_flags are only valid if we reached
- * the call to filemap_fault
- */
- if (vio->u.fault.ft_flags_valid)
- fault_ret = vio->u.fault.ft_flags;
-
- vmpage = vio->u.fault.ft_vmpage;
- if (result != 0 && vmpage) {
- put_page(vmpage);
- vmf->page = NULL;
- }
- }
- cl_io_fini(env, io);
-
- vma->vm_flags |= ra_flags;
-
-out:
- cl_env_put(env, &refcheck);
- if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
- fault_ret |= to_fault_error(result);
-
- CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
- return fault_ret;
-}
-
-static int ll_fault(struct vm_fault *vmf)
-{
- int count = 0;
- bool printed = false;
- int result;
- sigset_t set;
-
- /* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite
- * so that it can be killed by admin but not cause segfault by
- * other signals.
- */
- cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM), &set);
-
-restart:
- result = ll_fault0(vmf->vma, vmf);
- LASSERT(!(result & VM_FAULT_LOCKED));
- if (result == 0) {
- struct page *vmpage = vmf->page;
-
- /* check if this page has been truncated */
- lock_page(vmpage);
- if (unlikely(!vmpage->mapping)) { /* unlucky */
- unlock_page(vmpage);
- put_page(vmpage);
- vmf->page = NULL;
-
- if (!printed && ++count > 16) {
- CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
- current->comm);
- printed = true;
- }
-
- goto restart;
- }
-
- result = VM_FAULT_LOCKED;
- }
- cfs_restore_sigs(&set);
- return result;
-}
-
-static int ll_page_mkwrite(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- int count = 0;
- bool printed = false;
- bool retry;
- int result;
-
- file_update_time(vma->vm_file);
- do {
- retry = false;
- result = ll_page_mkwrite0(vma, vmf->page, &retry);
-
- if (!printed && ++count > 16) {
- const struct dentry *de = vma->vm_file->f_path.dentry;
-
- CWARN("app(%s): the page %lu of file " DFID " is under heavy contention\n",
- current->comm, vmf->pgoff,
- PFID(ll_inode2fid(de->d_inode)));
- printed = true;
- }
- } while (retry);
-
- switch (result) {
- case 0:
- LASSERT(PageLocked(vmf->page));
- result = VM_FAULT_LOCKED;
- break;
- case -ENODATA:
- case -EAGAIN:
- case -EFAULT:
- result = VM_FAULT_NOPAGE;
- break;
- case -ENOMEM:
- result = VM_FAULT_OOM;
- break;
- default:
- result = VM_FAULT_SIGBUS;
- break;
- }
-
- return result;
-}
-
-/**
- * To avoid cancel the locks covering mmapped region for lock cache pressure,
- * we track the mapped vma count in vvp_object::vob_mmap_cnt.
- */
-static void ll_vm_open(struct vm_area_struct *vma)
-{
- struct inode *inode = file_inode(vma->vm_file);
- struct vvp_object *vob = cl_inode2vvp(inode);
-
- LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
- atomic_inc(&vob->vob_mmap_cnt);
-}
-
-/**
- * Dual to ll_vm_open().
- */
-static void ll_vm_close(struct vm_area_struct *vma)
-{
- struct inode *inode = file_inode(vma->vm_file);
- struct vvp_object *vob = cl_inode2vvp(inode);
-
- atomic_dec(&vob->vob_mmap_cnt);
- LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
-}
-
-/* XXX put nice comment here. talk about __free_pte -> dirty pages and
- * nopage's reference passing to the pte
- */
-int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
-{
- int rc = -ENOENT;
-
- LASSERTF(last > first, "last %llu first %llu\n", last, first);
- if (mapping_mapped(mapping)) {
- rc = 0;
- unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
- last - first + 1, 0);
- }
-
- return rc;
-}
-
-static const struct vm_operations_struct ll_file_vm_ops = {
- .fault = ll_fault,
- .page_mkwrite = ll_page_mkwrite,
- .open = ll_vm_open,
- .close = ll_vm_close,
-};
-
-int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct inode *inode = file_inode(file);
- int rc;
-
- if (ll_file_nolock(file))
- return -EOPNOTSUPP;
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
- rc = generic_file_mmap(file, vma);
- if (rc == 0) {
- vma->vm_ops = &ll_file_vm_ops;
- vma->vm_ops->open(vma);
- /* update the inode's size and mtime */
- rc = ll_glimpse_size(inode);
- }
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
deleted file mode 100644
index a6a1d80c711a..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ /dev/null
@@ -1,375 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lustre/llite/llite_nfs.c
- *
- * NFS export of Lustre Light File System
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- * Author: Huang Hua <huanghua@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-#include "llite_internal.h"
-#include <linux/exportfs.h>
-
-__u32 get_uuid2int(const char *name, int len)
-{
- __u32 key0 = 0x12a3fe2d, key1 = 0x37abe8f9;
-
- while (len--) {
- __u32 key = key1 + (key0 ^ (*name++ * 7152373));
-
- if (key & 0x80000000)
- key -= 0x7fffffff;
- key1 = key0;
- key0 = key;
- }
- return (key0 << 1);
-}
-
-void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid)
-{
- __u64 key = 0, key0 = 0x12a3fe2d, key1 = 0x37abe8f9;
-
- while (len--) {
- key = key1 + (key0 ^ (*name++ * 7152373));
- if (key & 0x8000000000000000ULL)
- key -= 0x7fffffffffffffffULL;
- key1 = key0;
- key0 = key;
- }
-
- fsid->val[0] = key;
- fsid->val[1] = key >> 32;
-}
-
-struct inode *search_inode_for_lustre(struct super_block *sb,
- const struct lu_fid *fid)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct ptlrpc_request *req = NULL;
- struct inode *inode = NULL;
- int eadatalen = 0;
- unsigned long hash = cl_fid_build_ino(fid,
- ll_need_32bit_api(sbi));
- struct md_op_data *op_data;
- int rc;
-
- CDEBUG(D_INFO, "searching inode for:(%lu," DFID ")\n", hash, PFID(fid));
-
- inode = ilookup5(sb, hash, ll_test_inode_by_fid, (void *)fid);
- if (inode)
- return inode;
-
- rc = ll_get_default_mdsize(sbi, &eadatalen);
- if (rc)
- return ERR_PTR(rc);
-
- /* Because inode is NULL, ll_prep_md_op_data can not
- * be used here. So we allocate op_data ourselves
- */
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data)
- return ERR_PTR(-ENOMEM);
-
- op_data->op_fid1 = *fid;
- op_data->op_mode = eadatalen;
- op_data->op_valid = OBD_MD_FLEASIZE;
-
- /* mds_fid2dentry ignores f_type */
- rc = md_getattr(sbi->ll_md_exp, op_data, &req);
- kfree(op_data);
- if (rc) {
- CDEBUG(D_INFO, "can't get object attrs, fid " DFID ", rc %d\n",
- PFID(fid), rc);
- return ERR_PTR(rc);
- }
- rc = ll_prep_inode(&inode, req, sb, NULL);
- ptlrpc_req_finished(req);
- if (rc)
- return ERR_PTR(rc);
-
- return inode;
-}
-
-struct lustre_nfs_fid {
- struct lu_fid lnf_child;
- struct lu_fid lnf_parent;
-};
-
-static struct dentry *
-ll_iget_for_nfs(struct super_block *sb,
- struct lu_fid *fid, struct lu_fid *parent)
-{
- struct inode *inode;
- struct dentry *result;
-
- if (!fid_is_sane(fid))
- return ERR_PTR(-ESTALE);
-
- CDEBUG(D_INFO, "Get dentry for fid: " DFID "\n", PFID(fid));
-
- inode = search_inode_for_lustre(sb, fid);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
-
- if (is_bad_inode(inode)) {
- /* we didn't find the right inode.. */
- iput(inode);
- return ERR_PTR(-ESTALE);
- }
-
- result = d_obtain_alias(inode);
- if (IS_ERR(result)) {
- iput(inode);
- return result;
- }
-
- /**
- * In case d_obtain_alias() found a disconnected dentry, always update
- * lli_pfid to allow later operation (normally open) have parent fid,
- * which may be used by MDS to create data.
- */
- if (parent) {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_pfid = *parent;
- spin_unlock(&lli->lli_lock);
- }
-
- /* N.B. d_obtain_alias() drops inode ref on error */
- result = d_obtain_alias(inode);
- if (!IS_ERR(result)) {
- /*
- * Need to signal to the ll_intent_file_open that
- * we came from NFS and so opencache needs to be
- * enabled for this one
- */
- ll_d2d(result)->lld_nfs_dentry = 1;
- }
-
- return result;
-}
-
-/**
- * \a connectable - is nfsd will connect himself or this should be done
- * at lustre
- *
- * The return value is file handle type:
- * 1 -- contains child file handle;
- * 2 -- contains child file handle and parent file handle;
- * 255 -- error.
- */
-static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
- struct inode *parent)
-{
- int fileid_len = sizeof(struct lustre_nfs_fid) / 4;
- struct lustre_nfs_fid *nfs_fid = (void *)fh;
-
- CDEBUG(D_INFO, "%s: encoding for (" DFID ") maxlen=%d minlen=%d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), *plen, fileid_len);
-
- if (*plen < fileid_len) {
- *plen = fileid_len;
- return FILEID_INVALID;
- }
-
- nfs_fid->lnf_child = *ll_inode2fid(inode);
- if (parent)
- nfs_fid->lnf_parent = *ll_inode2fid(parent);
- else
- fid_zero(&nfs_fid->lnf_parent);
- *plen = fileid_len;
-
- return FILEID_LUSTRE;
-}
-
-static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
- int namelen, loff_t hash, u64 ino,
- unsigned int type)
-{
- /* It is hack to access lde_fid for comparison with lgd_fid.
- * So the input 'name' must be part of the 'lu_dirent'.
- */
- struct lu_dirent *lde = container_of0(name, struct lu_dirent, lde_name);
- struct ll_getname_data *lgd =
- container_of(ctx, struct ll_getname_data, ctx);
- struct lu_fid fid;
-
- fid_le_to_cpu(&fid, &lde->lde_fid);
- if (lu_fid_eq(&fid, &lgd->lgd_fid)) {
- memcpy(lgd->lgd_name, name, namelen);
- lgd->lgd_name[namelen] = 0;
- lgd->lgd_found = 1;
- }
- return lgd->lgd_found;
-}
-
-static int ll_get_name(struct dentry *dentry, char *name,
- struct dentry *child)
-{
- struct inode *dir = d_inode(dentry);
- int rc;
- struct ll_getname_data lgd = {
- .lgd_name = name,
- .lgd_fid = ll_i2info(d_inode(child))->lli_fid,
- .ctx.actor = ll_nfs_get_name_filldir,
- };
- struct md_op_data *op_data;
- __u64 pos = 0;
-
- if (!dir || !S_ISDIR(dir->i_mode)) {
- rc = -ENOTDIR;
- goto out;
- }
-
- if (!dir->i_fop) {
- rc = -EINVAL;
- goto out;
- }
-
- op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
- LUSTRE_OPC_ANY, dir);
- if (IS_ERR(op_data)) {
- rc = PTR_ERR(op_data);
- goto out;
- }
-
- op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
- inode_lock(dir);
- rc = ll_dir_read(dir, &pos, op_data, &lgd.ctx);
- inode_unlock(dir);
- ll_finish_md_op_data(op_data);
- if (!rc && !lgd.lgd_found)
- rc = -ENOENT;
-out:
- return rc;
-}
-
-static struct dentry *ll_fh_to_dentry(struct super_block *sb, struct fid *fid,
- int fh_len, int fh_type)
-{
- struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
-
- if (fh_type != FILEID_LUSTRE)
- return ERR_PTR(-EPROTO);
-
- return ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent);
-}
-
-static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid,
- int fh_len, int fh_type)
-{
- struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
-
- if (fh_type != FILEID_LUSTRE)
- return ERR_PTR(-EPROTO);
-
- return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL);
-}
-
-int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid)
-{
- struct ptlrpc_request *req = NULL;
- struct ll_sb_info *sbi;
- struct mdt_body *body;
- static const char dotdot[] = "..";
- struct md_op_data *op_data;
- int rc;
- int lmmsize;
-
- LASSERT(dir && S_ISDIR(dir->i_mode));
-
- sbi = ll_s2sbi(dir->i_sb);
-
- CDEBUG(D_INFO, "%s: getting parent for (" DFID ")\n",
- ll_get_fsname(dir->i_sb, NULL, 0),
- PFID(ll_inode2fid(dir)));
-
- rc = ll_get_default_mdsize(sbi, &lmmsize);
- if (rc != 0)
- return rc;
-
- op_data = ll_prep_md_op_data(NULL, dir, NULL, dotdot,
- strlen(dotdot), lmmsize,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc) {
- CERROR("%s: failure inode " DFID " get parent: rc = %d\n",
- ll_get_fsname(dir->i_sb, NULL, 0),
- PFID(ll_inode2fid(dir)), rc);
- return rc;
- }
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- /*
- * LU-3952: MDT may lost the FID of its parent, we should not crash
- * the NFS server, ll_iget_for_nfs() will handle the error.
- */
- if (body->mbo_valid & OBD_MD_FLID) {
- CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n",
- PFID(ll_inode2fid(dir)), PFID(&body->mbo_fid1));
- *parent_fid = body->mbo_fid1;
- }
-
- ptlrpc_req_finished(req);
- return 0;
-}
-
-static struct dentry *ll_get_parent(struct dentry *dchild)
-{
- struct lu_fid parent_fid = { 0 };
- struct dentry *dentry;
- int rc;
-
- rc = ll_dir_get_parent_fid(dchild->d_inode, &parent_fid);
- if (rc)
- return ERR_PTR(rc);
-
- dentry = ll_iget_for_nfs(dchild->d_inode->i_sb, &parent_fid, NULL);
-
- return dentry;
-}
-
-const struct export_operations lustre_export_operations = {
- .get_parent = ll_get_parent,
- .encode_fh = ll_encode_fh,
- .get_name = ll_get_name,
- .fh_to_dentry = ll_fh_to_dentry,
- .fh_to_parent = ll_fh_to_parent,
-};
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
deleted file mode 100644
index 644bea2f9d37..000000000000
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ /dev/null
@@ -1,1684 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <lprocfs_status.h>
-#include <linux/seq_file.h>
-#include <obd_support.h>
-
-#include "llite_internal.h"
-#include "vvp_internal.h"
-
-/* debugfs llite mount point registration */
-static const struct file_operations ll_rw_extents_stats_fops;
-static const struct file_operations ll_rw_extents_stats_pp_fops;
-static const struct file_operations ll_rw_offset_stats_fops;
-
-static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%u\n", osfs.os_bsize);
-
- return rc;
-}
-LUSTRE_RO_ATTR(blocksize);
-
-static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_blocks;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- rc = sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytestotal);
-
-static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_bfree;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- rc = sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytesfree);
-
-static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_bavail;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- rc = sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytesavail);
-
-static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%llu\n", osfs.os_files);
-
- return rc;
-}
-LUSTRE_RO_ATTR(filestotal);
-
-static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%llu\n", osfs.os_ffree);
-
- return rc;
-}
-LUSTRE_RO_ATTR(filesfree);
-
-static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- return sprintf(buf, "local client\n");
-}
-LUSTRE_RO_ATTR(client_type);
-
-static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%s\n", sbi->ll_sb->s_type->name);
-}
-LUSTRE_RO_ATTR(fstype);
-
-static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
-}
-LUSTRE_RO_ATTR(uuid);
-
-static int ll_site_stats_seq_show(struct seq_file *m, void *v)
-{
- struct super_block *sb = m->private;
-
- /*
- * See description of statistical counters in struct cl_site, and
- * struct lu_site.
- */
- return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
-}
-
-LPROC_SEQ_FOPS_RO(ll_site_stats);
-
-static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- long pages_number;
- int mult;
-
- spin_lock(&sbi->ll_lock);
- pages_number = sbi->ll_ra_info.ra_max_pages;
- spin_unlock(&sbi->ll_lock);
-
- mult = 1 << (20 - PAGE_SHIFT);
- return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
-}
-
-static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long pages_number;
-
- rc = kstrtoul(buffer, 10, &pages_number);
- if (rc)
- return rc;
-
- pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
-
- if (pages_number > totalram_pages / 2) {
- CERROR("can't set file readahead more than %lu MB\n",
- totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
- return -ERANGE;
- }
-
- spin_lock(&sbi->ll_lock);
- sbi->ll_ra_info.ra_max_pages = pages_number;
- spin_unlock(&sbi->ll_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_read_ahead_mb);
-
-static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- long pages_number;
- int mult;
-
- spin_lock(&sbi->ll_lock);
- pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
- spin_unlock(&sbi->ll_lock);
-
- mult = 1 << (20 - PAGE_SHIFT);
- return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
-}
-
-static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long pages_number;
-
- rc = kstrtoul(buffer, 10, &pages_number);
- if (rc)
- return rc;
-
- if (pages_number > sbi->ll_ra_info.ra_max_pages) {
- CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n",
- sbi->ll_ra_info.ra_max_pages);
- return -ERANGE;
- }
-
- spin_lock(&sbi->ll_lock);
- sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
- spin_unlock(&sbi->ll_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_read_ahead_per_file_mb);
-
-static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- long pages_number;
- int mult;
-
- spin_lock(&sbi->ll_lock);
- pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
- spin_unlock(&sbi->ll_lock);
-
- mult = 1 << (20 - PAGE_SHIFT);
- return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
-}
-
-static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long pages_number;
-
- rc = kstrtoul(buffer, 10, &pages_number);
- if (rc)
- return rc;
-
- /* Cap this at the current max readahead window size, the readahead
- * algorithm does this anyway so it's pointless to set it larger.
- */
- if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
- CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
- sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
- return -ERANGE;
- }
-
- spin_lock(&sbi->ll_lock);
- sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
- spin_unlock(&sbi->ll_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_read_ahead_whole_mb);
-
-static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
-{
- struct super_block *sb = m->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct cl_client_cache *cache = sbi->ll_cache;
- int shift = 20 - PAGE_SHIFT;
- long max_cached_mb;
- long unused_mb;
-
- max_cached_mb = cache->ccc_lru_max >> shift;
- unused_mb = atomic_long_read(&cache->ccc_lru_left) >> shift;
- seq_printf(m,
- "users: %d\n"
- "max_cached_mb: %ld\n"
- "used_mb: %ld\n"
- "unused_mb: %ld\n"
- "reclaim_count: %u\n",
- atomic_read(&cache->ccc_users),
- max_cached_mb,
- max_cached_mb - unused_mb,
- unused_mb,
- cache->ccc_lru_shrinkers);
- return 0;
-}
-
-static ssize_t ll_max_cached_mb_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct super_block *sb = ((struct seq_file *)file->private_data)->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct cl_client_cache *cache = sbi->ll_cache;
- struct lu_env *env;
- long diff = 0;
- long nrpages = 0;
- u16 refcheck;
- long pages_number;
- int mult;
- long rc;
- u64 val;
- char kernbuf[128];
-
- if (count >= sizeof(kernbuf))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
- kernbuf[count] = 0;
-
- mult = 1 << (20 - PAGE_SHIFT);
- buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
- kernbuf;
- rc = lprocfs_write_frac_u64_helper(buffer, count, &val, mult);
- if (rc)
- return rc;
-
- if (val > LONG_MAX)
- return -ERANGE;
- pages_number = (long)val;
-
- if (pages_number < 0 || pages_number > totalram_pages) {
- CERROR("%s: can't set max cache more than %lu MB\n",
- ll_get_fsname(sb, NULL, 0),
- totalram_pages >> (20 - PAGE_SHIFT));
- return -ERANGE;
- }
-
- spin_lock(&sbi->ll_lock);
- diff = pages_number - cache->ccc_lru_max;
- spin_unlock(&sbi->ll_lock);
-
- /* easy - add more LRU slots. */
- if (diff >= 0) {
- atomic_long_add(diff, &cache->ccc_lru_left);
- rc = 0;
- goto out;
- }
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return 0;
-
- diff = -diff;
- while (diff > 0) {
- long tmp;
-
- /* reduce LRU budget from free slots. */
- do {
- long ov, nv;
-
- ov = atomic_long_read(&cache->ccc_lru_left);
- if (ov == 0)
- break;
-
- nv = ov > diff ? ov - diff : 0;
- rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
- if (likely(ov == rc)) {
- diff -= ov - nv;
- nrpages += ov - nv;
- break;
- }
- } while (1);
-
- if (diff <= 0)
- break;
-
- if (!sbi->ll_dt_exp) { /* being initialized */
- rc = 0;
- goto out;
- }
-
- /* difficult - have to ask OSCs to drop LRU slots. */
- tmp = diff << 1;
- rc = obd_set_info_async(env, sbi->ll_dt_exp,
- sizeof(KEY_CACHE_LRU_SHRINK),
- KEY_CACHE_LRU_SHRINK,
- sizeof(tmp), &tmp, NULL);
- if (rc < 0)
- break;
- }
- cl_env_put(env, &refcheck);
-
-out:
- if (rc >= 0) {
- spin_lock(&sbi->ll_lock);
- cache->ccc_lru_max = pages_number;
- spin_unlock(&sbi->ll_lock);
- rc = count;
- } else {
- atomic_long_add(nrpages, &cache->ccc_lru_left);
- }
- return rc;
-}
-
-LPROC_SEQ_FOPS(ll_max_cached_mb);
-
-static ssize_t checksum_pages_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
-}
-
-static ssize_t checksum_pages_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long val;
-
- if (!sbi->ll_dt_exp)
- /* Not set up yet */
- return -EAGAIN;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
- if (val)
- sbi->ll_flags |= LL_SBI_CHECKSUM;
- else
- sbi->ll_flags &= ~LL_SBI_CHECKSUM;
-
- rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
- KEY_CHECKSUM, sizeof(val), &val, NULL);
- if (rc)
- CWARN("Failed to set OSC checksum flags: %d\n", rc);
-
- return count;
-}
-LUSTRE_RW_ATTR(checksum_pages);
-
-static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
- enum stats_track_type type)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- if (sbi->ll_stats_track_type == type)
- return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
- else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
- return sprintf(buf, "0 (all)\n");
- else
- return sprintf(buf, "untracked\n");
-}
-
-static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
- size_t count,
- enum stats_track_type type)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long pid;
-
- rc = kstrtoul(buffer, 10, &pid);
- if (rc)
- return rc;
- sbi->ll_stats_track_id = pid;
- if (pid == 0)
- sbi->ll_stats_track_type = STATS_TRACK_ALL;
- else
- sbi->ll_stats_track_type = type;
- lprocfs_clear_stats(sbi->ll_stats);
- return count;
-}
-
-static ssize_t stats_track_pid_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
-}
-
-static ssize_t stats_track_pid_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
-}
-LUSTRE_RW_ATTR(stats_track_pid);
-
-static ssize_t stats_track_ppid_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
-}
-
-static ssize_t stats_track_ppid_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
-}
-LUSTRE_RW_ATTR(stats_track_ppid);
-
-static ssize_t stats_track_gid_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
-}
-
-static ssize_t stats_track_gid_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
-}
-LUSTRE_RW_ATTR(stats_track_gid);
-
-static ssize_t statahead_max_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%u\n", sbi->ll_sa_max);
-}
-
-static ssize_t statahead_max_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val <= LL_SA_RPC_MAX)
- sbi->ll_sa_max = val;
- else
- CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
- val, LL_SA_RPC_MAX);
-
- return count;
-}
-LUSTRE_RW_ATTR(statahead_max);
-
-static ssize_t statahead_agl_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
-}
-
-static ssize_t statahead_agl_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val)
- sbi->ll_flags |= LL_SBI_AGL_ENABLED;
- else
- sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
-
- return count;
-}
-LUSTRE_RW_ATTR(statahead_agl);
-
-static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
-{
- struct super_block *sb = m->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
-
- seq_printf(m,
- "statahead total: %u\n"
- "statahead wrong: %u\n"
- "agl total: %u\n",
- atomic_read(&sbi->ll_sa_total),
- atomic_read(&sbi->ll_sa_wrong),
- atomic_read(&sbi->ll_agl_total));
- return 0;
-}
-
-LPROC_SEQ_FOPS_RO(ll_statahead_stats);
-
-static ssize_t lazystatfs_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_LAZYSTATFS ? 1 : 0);
-}
-
-static ssize_t lazystatfs_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val)
- sbi->ll_flags |= LL_SBI_LAZYSTATFS;
- else
- sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
-
- return count;
-}
-LUSTRE_RW_ATTR(lazystatfs);
-
-static ssize_t max_easize_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- unsigned int ealen;
- int rc;
-
- rc = ll_get_max_mdsize(sbi, &ealen);
- if (rc)
- return rc;
-
- return sprintf(buf, "%u\n", ealen);
-}
-LUSTRE_RO_ATTR(max_easize);
-
-/**
- * Get default_easize.
- *
- * \see client_obd::cl_default_mds_easize
- *
- * \param[in] kobj kernel object for sysfs tree
- * \param[in] attr attribute of this kernel object
- * \param[in] buf buffer to write data into
- *
- * \retval positive \a count on success
- * \retval negative negated errno on failure
- */
-static ssize_t default_easize_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- unsigned int ealen;
- int rc;
-
- rc = ll_get_default_mdsize(sbi, &ealen);
- if (rc)
- return rc;
-
- return sprintf(buf, "%u\n", ealen);
-}
-
-/**
- * Set default_easize.
- *
- * Range checking on the passed value is handled by
- * ll_set_default_mdsize().
- *
- * \see client_obd::cl_default_mds_easize
- *
- * \param[in] kobj kernel object for sysfs tree
- * \param[in] attr attribute of this kernel object
- * \param[in] buffer string passed from user space
- * \param[in] count \a buffer length
- *
- * \retval positive \a count on success
- * \retval negative negated errno on failure
- */
-static ssize_t default_easize_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- unsigned long val;
- int rc;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- rc = ll_set_default_mdsize(sbi, val);
- if (rc)
- return rc;
-
- return count;
-}
-LUSTRE_RW_ATTR(default_easize);
-
-static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
-{
- const char *str[] = LL_SBI_FLAGS;
- struct super_block *sb = m->private;
- int flags = ll_s2sbi(sb)->ll_flags;
- int i = 0;
-
- while (flags != 0) {
- if (ARRAY_SIZE(str) <= i) {
- CERROR("%s: Revise array LL_SBI_FLAGS to match sbi flags please.\n",
- ll_get_fsname(sb, NULL, 0));
- return -EINVAL;
- }
-
- if (flags & 0x1)
- seq_printf(m, "%s ", str[i]);
- flags >>= 1;
- ++i;
- }
- seq_puts(m, "\b\n");
- return 0;
-}
-
-LPROC_SEQ_FOPS_RO(ll_sbi_flags);
-
-static ssize_t xattr_cache_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
-}
-
-static ssize_t xattr_cache_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val != 0 && val != 1)
- return -ERANGE;
-
- if (val == 1 && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
- return -ENOTSUPP;
-
- sbi->ll_xattr_cache_enabled = val;
-
- return count;
-}
-LUSTRE_RW_ATTR(xattr_cache);
-
-static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
-{
- struct super_block *sb = m->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct cl_client_cache *cache = sbi->ll_cache;
- long pages;
- int mb;
-
- pages = atomic_long_read(&cache->ccc_unstable_nr);
- mb = (pages * PAGE_SIZE) >> 20;
-
- seq_printf(m,
- "unstable_check: %8d\n"
- "unstable_pages: %12ld\n"
- "unstable_mb: %8d\n",
- cache->ccc_unstable_check, pages, mb);
-
- return 0;
-}
-
-static ssize_t ll_unstable_stats_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct super_block *sb = ((struct seq_file *)file->private_data)->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- char kernbuf[128];
- int val, rc;
-
- if (!count)
- return 0;
- if (count >= sizeof(kernbuf))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
- kernbuf[count] = 0;
-
- buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
- kernbuf;
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc < 0)
- return rc;
-
- /* borrow lru lock to set the value */
- spin_lock(&sbi->ll_cache->ccc_lru_lock);
- sbi->ll_cache->ccc_unstable_check = !!val;
- spin_unlock(&sbi->ll_cache->ccc_lru_lock);
-
- return count;
-}
-LPROC_SEQ_FOPS(ll_unstable_stats);
-
-static int ll_root_squash_seq_show(struct seq_file *m, void *v)
-{
- struct super_block *sb = m->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct root_squash_info *squash = &sbi->ll_squash;
-
- seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
- return 0;
-}
-
-static ssize_t ll_root_squash_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct seq_file *m = file->private_data;
- struct super_block *sb = m->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct root_squash_info *squash = &sbi->ll_squash;
-
- return lprocfs_wr_root_squash(buffer, count, squash,
- ll_get_fsname(sb, NULL, 0));
-}
-LPROC_SEQ_FOPS(ll_root_squash);
-
-static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
-{
- struct super_block *sb = m->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct root_squash_info *squash = &sbi->ll_squash;
- int len;
-
- down_read(&squash->rsi_sem);
- if (!list_empty(&squash->rsi_nosquash_nids)) {
- len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
- &squash->rsi_nosquash_nids);
- m->count += len;
- seq_puts(m, "\n");
- } else {
- seq_puts(m, "NONE\n");
- }
- up_read(&squash->rsi_sem);
-
- return 0;
-}
-
-static ssize_t ll_nosquash_nids_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct seq_file *m = file->private_data;
- struct super_block *sb = m->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct root_squash_info *squash = &sbi->ll_squash;
- int rc;
-
- rc = lprocfs_wr_nosquash_nids(buffer, count, squash,
- ll_get_fsname(sb, NULL, 0));
- if (rc < 0)
- return rc;
-
- ll_compute_rootsquash_state(sbi);
-
- return rc;
-}
-
-LPROC_SEQ_FOPS(ll_nosquash_nids);
-
-static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
- /* { "mntpt_path", ll_rd_path, 0, 0 }, */
- { "site", &ll_site_stats_fops, NULL, 0 },
- /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
- { "max_cached_mb", &ll_max_cached_mb_fops, NULL },
- { "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
- { "unstable_stats", &ll_unstable_stats_fops, NULL },
- { "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
- { .name = "root_squash",
- .fops = &ll_root_squash_fops },
- { .name = "nosquash_nids",
- .fops = &ll_nosquash_nids_fops },
- { NULL }
-};
-
-#define MAX_STRING_SIZE 128
-
-static struct attribute *llite_attrs[] = {
- &lustre_attr_blocksize.attr,
- &lustre_attr_kbytestotal.attr,
- &lustre_attr_kbytesfree.attr,
- &lustre_attr_kbytesavail.attr,
- &lustre_attr_filestotal.attr,
- &lustre_attr_filesfree.attr,
- &lustre_attr_client_type.attr,
- &lustre_attr_fstype.attr,
- &lustre_attr_uuid.attr,
- &lustre_attr_max_read_ahead_mb.attr,
- &lustre_attr_max_read_ahead_per_file_mb.attr,
- &lustre_attr_max_read_ahead_whole_mb.attr,
- &lustre_attr_checksum_pages.attr,
- &lustre_attr_stats_track_pid.attr,
- &lustre_attr_stats_track_ppid.attr,
- &lustre_attr_stats_track_gid.attr,
- &lustre_attr_statahead_max.attr,
- &lustre_attr_statahead_agl.attr,
- &lustre_attr_lazystatfs.attr,
- &lustre_attr_max_easize.attr,
- &lustre_attr_default_easize.attr,
- &lustre_attr_xattr_cache.attr,
- NULL,
-};
-
-static void llite_sb_release(struct kobject *kobj)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- complete(&sbi->ll_kobj_unregister);
-}
-
-static struct kobj_type llite_ktype = {
- .default_attrs = llite_attrs,
- .sysfs_ops = &lustre_sysfs_ops,
- .release = llite_sb_release,
-};
-
-static const struct llite_file_opcode {
- __u32 opcode;
- __u32 type;
- const char *opname;
-} llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
- /* file operation */
- { LPROC_LL_DIRTY_HITS, LPROCFS_TYPE_REGS, "dirty_pages_hits" },
- { LPROC_LL_DIRTY_MISSES, LPROCFS_TYPE_REGS, "dirty_pages_misses" },
- { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
- "read_bytes" },
- { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
- "write_bytes" },
- { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
- "brw_read" },
- { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
- "brw_write" },
- { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
- { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
- { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" },
- { LPROC_LL_MAP, LPROCFS_TYPE_REGS, "mmap" },
- { LPROC_LL_LLSEEK, LPROCFS_TYPE_REGS, "seek" },
- { LPROC_LL_FSYNC, LPROCFS_TYPE_REGS, "fsync" },
- { LPROC_LL_READDIR, LPROCFS_TYPE_REGS, "readdir" },
- /* inode operation */
- { LPROC_LL_SETATTR, LPROCFS_TYPE_REGS, "setattr" },
- { LPROC_LL_TRUNC, LPROCFS_TYPE_REGS, "truncate" },
- { LPROC_LL_FLOCK, LPROCFS_TYPE_REGS, "flock" },
- { LPROC_LL_GETATTR, LPROCFS_TYPE_REGS, "getattr" },
- /* dir inode operation */
- { LPROC_LL_CREATE, LPROCFS_TYPE_REGS, "create" },
- { LPROC_LL_LINK, LPROCFS_TYPE_REGS, "link" },
- { LPROC_LL_UNLINK, LPROCFS_TYPE_REGS, "unlink" },
- { LPROC_LL_SYMLINK, LPROCFS_TYPE_REGS, "symlink" },
- { LPROC_LL_MKDIR, LPROCFS_TYPE_REGS, "mkdir" },
- { LPROC_LL_RMDIR, LPROCFS_TYPE_REGS, "rmdir" },
- { LPROC_LL_MKNOD, LPROCFS_TYPE_REGS, "mknod" },
- { LPROC_LL_RENAME, LPROCFS_TYPE_REGS, "rename" },
- /* special inode operation */
- { LPROC_LL_STAFS, LPROCFS_TYPE_REGS, "statfs" },
- { LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" },
- { LPROC_LL_SETXATTR, LPROCFS_TYPE_REGS, "setxattr" },
- { LPROC_LL_GETXATTR, LPROCFS_TYPE_REGS, "getxattr" },
- { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REGS, "getxattr_hits" },
- { LPROC_LL_LISTXATTR, LPROCFS_TYPE_REGS, "listxattr" },
- { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_REGS, "removexattr" },
- { LPROC_LL_INODE_PERM, LPROCFS_TYPE_REGS, "inode_permission" },
-};
-
-void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
-{
- if (!sbi->ll_stats)
- return;
- if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
- lprocfs_counter_add(sbi->ll_stats, op, count);
- else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
- sbi->ll_stats_track_id == current->pid)
- lprocfs_counter_add(sbi->ll_stats, op, count);
- else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
- sbi->ll_stats_track_id == current->real_parent->pid)
- lprocfs_counter_add(sbi->ll_stats, op, count);
- else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
- sbi->ll_stats_track_id ==
- from_kgid(&init_user_ns, current_gid()))
- lprocfs_counter_add(sbi->ll_stats, op, count);
-}
-EXPORT_SYMBOL(ll_stats_ops_tally);
-
-static const char *ra_stat_string[] = {
- [RA_STAT_HIT] = "hits",
- [RA_STAT_MISS] = "misses",
- [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
- [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
- [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
- [RA_STAT_FAILED_MATCH] = "failed lock match",
- [RA_STAT_DISCARDED] = "read but discarded",
- [RA_STAT_ZERO_LEN] = "zero length file",
- [RA_STAT_ZERO_WINDOW] = "zero size window",
- [RA_STAT_EOF] = "read-ahead to EOF",
- [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
- [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
- [RA_STAT_FAILED_REACH_END] = "failed to reach end"
-};
-
-int ldebugfs_register_mountpoint(struct dentry *parent,
- struct super_block *sb, char *osc, char *mdc)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct obd_device *obd;
- struct dentry *dir;
- char name[MAX_STRING_SIZE + 1], *ptr;
- int err, id, len, rc;
-
- name[MAX_STRING_SIZE] = '\0';
-
- LASSERT(sbi);
- LASSERT(mdc);
- LASSERT(osc);
-
- /* Get fsname */
- len = strlen(lsi->lsi_lmd->lmd_profile);
- ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
- if (ptr && (strcmp(ptr, "-client") == 0))
- len -= 7;
-
- /* Mount info */
- snprintf(name, MAX_STRING_SIZE, "%.*s-%p", len,
- lsi->lsi_lmd->lmd_profile, sb);
-
- dir = ldebugfs_register(name, parent, NULL, NULL);
- if (IS_ERR_OR_NULL(dir)) {
- err = dir ? PTR_ERR(dir) : -ENOMEM;
- sbi->ll_debugfs_entry = NULL;
- return err;
- }
- sbi->ll_debugfs_entry = dir;
-
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache", 0444,
- &vvp_dump_pgcache_file_ops, sbi);
- if (rc)
- CWARN("Error adding the dump_page_cache file\n");
-
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
- &ll_rw_extents_stats_fops, sbi);
- if (rc)
- CWARN("Error adding the extent_stats file\n");
-
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
- "extents_stats_per_process",
- 0644, &ll_rw_extents_stats_pp_fops, sbi);
- if (rc)
- CWARN("Error adding the extents_stats_per_process file\n");
-
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
- &ll_rw_offset_stats_fops, sbi);
- if (rc)
- CWARN("Error adding the offset_stats file\n");
-
- /* File operations stats */
- sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
- LPROCFS_STATS_FLAG_NONE);
- if (!sbi->ll_stats) {
- err = -ENOMEM;
- goto out;
- }
- /* do counter init */
- for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
- __u32 type = llite_opcode_table[id].type;
- void *ptr = NULL;
-
- if (type & LPROCFS_TYPE_REGS)
- ptr = "regs";
- else if (type & LPROCFS_TYPE_BYTES)
- ptr = "bytes";
- else if (type & LPROCFS_TYPE_PAGES)
- ptr = "pages";
- lprocfs_counter_init(sbi->ll_stats,
- llite_opcode_table[id].opcode,
- (type & LPROCFS_CNTR_AVGMINMAX),
- llite_opcode_table[id].opname, ptr);
- }
- err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
- sbi->ll_stats);
- if (err)
- goto out;
-
- sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
- LPROCFS_STATS_FLAG_NONE);
- if (!sbi->ll_ra_stats) {
- err = -ENOMEM;
- goto out;
- }
-
- for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
- lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
- ra_stat_string[id], "pages");
-
- err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
- sbi->ll_ra_stats);
- if (err)
- goto out;
-
- err = ldebugfs_add_vars(sbi->ll_debugfs_entry,
- lprocfs_llite_obd_vars, sb);
- if (err)
- goto out;
-
- sbi->ll_kobj.kset = llite_kset;
- init_completion(&sbi->ll_kobj_unregister);
- err = kobject_init_and_add(&sbi->ll_kobj, &llite_ktype, NULL,
- "%s", name);
- if (err)
- goto out;
-
- /* MDC info */
- obd = class_name2obd(mdc);
-
- err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
- obd->obd_type->typ_name);
- if (err)
- goto out;
-
- /* OSC */
- obd = class_name2obd(osc);
-
- err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
- obd->obd_type->typ_name);
-out:
- if (err) {
- ldebugfs_remove(&sbi->ll_debugfs_entry);
- lprocfs_free_stats(&sbi->ll_ra_stats);
- lprocfs_free_stats(&sbi->ll_stats);
- }
- return err;
-}
-
-void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi)
-{
- if (sbi->ll_debugfs_entry) {
- ldebugfs_remove(&sbi->ll_debugfs_entry);
- kobject_put(&sbi->ll_kobj);
- wait_for_completion(&sbi->ll_kobj_unregister);
- lprocfs_free_stats(&sbi->ll_ra_stats);
- lprocfs_free_stats(&sbi->ll_stats);
- }
-}
-
-#undef MAX_STRING_SIZE
-
-#define pct(a, b) (b ? a * 100 / b : 0)
-
-static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
- struct seq_file *seq, int which)
-{
- unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
- unsigned long start, end, r, w;
- char *unitp = "KMGTPEZY";
- int i, units = 10;
- struct per_process_info *pp_info = &io_extents->pp_extents[which];
-
- read_cum = 0;
- write_cum = 0;
- start = 0;
-
- for (i = 0; i < LL_HIST_MAX; i++) {
- read_tot += pp_info->pp_r_hist.oh_buckets[i];
- write_tot += pp_info->pp_w_hist.oh_buckets[i];
- }
-
- for (i = 0; i < LL_HIST_MAX; i++) {
- r = pp_info->pp_r_hist.oh_buckets[i];
- w = pp_info->pp_w_hist.oh_buckets[i];
- read_cum += r;
- write_cum += w;
- end = 1 << (i + LL_HIST_START - units);
- seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | %14lu %4lu %4lu\n",
- start, *unitp, end, *unitp,
- (i == LL_HIST_MAX - 1) ? '+' : ' ',
- r, pct(r, read_tot), pct(read_cum, read_tot),
- w, pct(w, write_tot), pct(write_cum, write_tot));
- start = end;
- if (start == 1024) {
- start = 1;
- units += 10;
- unitp++;
- }
- if (read_cum == read_tot && write_cum == write_tot)
- break;
- }
-}
-
-static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
-{
- struct timespec64 now;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
- int k;
-
- ktime_get_real_ts64(&now);
-
- if (!sbi->ll_rw_stats_on) {
- seq_printf(seq, "disabled\n"
- "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
- return 0;
- }
- seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
- (s64)now.tv_sec, (unsigned long)now.tv_nsec);
- seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
- seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
- "extents", "calls", "%", "cum%",
- "calls", "%", "cum%");
- spin_lock(&sbi->ll_pp_extent_lock);
- for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
- if (io_extents->pp_extents[k].pid != 0) {
- seq_printf(seq, "\nPID: %d\n",
- io_extents->pp_extents[k].pid);
- ll_display_extents_info(io_extents, seq, k);
- }
- }
- spin_unlock(&sbi->ll_pp_extent_lock);
- return 0;
-}
-
-static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
- const char __user *buf,
- size_t len,
- loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
- int i;
- int value = 1, rc = 0;
-
- if (len == 0)
- return -EINVAL;
-
- rc = lprocfs_write_helper(buf, len, &value);
- if (rc < 0 && len < 16) {
- char kernbuf[16];
-
- if (copy_from_user(kernbuf, buf, len))
- return -EFAULT;
- kernbuf[len] = 0;
-
- if (kernbuf[len - 1] == '\n')
- kernbuf[len - 1] = 0;
-
- if (strcmp(kernbuf, "disabled") == 0 ||
- strcmp(kernbuf, "Disabled") == 0)
- value = 0;
- }
-
- if (value == 0)
- sbi->ll_rw_stats_on = 0;
- else
- sbi->ll_rw_stats_on = 1;
-
- spin_lock(&sbi->ll_pp_extent_lock);
- for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
- io_extents->pp_extents[i].pid = 0;
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
- }
- spin_unlock(&sbi->ll_pp_extent_lock);
- return len;
-}
-
-LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
-
-static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
-{
- struct timespec64 now;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
-
- ktime_get_real_ts64(&now);
-
- if (!sbi->ll_rw_stats_on) {
- seq_printf(seq, "disabled\n"
- "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
- return 0;
- }
- seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
- (u64)now.tv_sec, (unsigned long)now.tv_nsec);
-
- seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
- seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
- "extents", "calls", "%", "cum%",
- "calls", "%", "cum%");
- spin_lock(&sbi->ll_lock);
- ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
- spin_unlock(&sbi->ll_lock);
-
- return 0;
-}
-
-static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
- int i;
- int value = 1, rc = 0;
-
- if (len == 0)
- return -EINVAL;
-
- rc = lprocfs_write_helper(buf, len, &value);
- if (rc < 0 && len < 16) {
- char kernbuf[16];
-
- if (copy_from_user(kernbuf, buf, len))
- return -EFAULT;
- kernbuf[len] = 0;
-
- if (kernbuf[len - 1] == '\n')
- kernbuf[len - 1] = 0;
-
- if (strcmp(kernbuf, "disabled") == 0 ||
- strcmp(kernbuf, "Disabled") == 0)
- value = 0;
- }
-
- if (value == 0)
- sbi->ll_rw_stats_on = 0;
- else
- sbi->ll_rw_stats_on = 1;
-
- spin_lock(&sbi->ll_pp_extent_lock);
- for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
- io_extents->pp_extents[i].pid = 0;
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
- }
- spin_unlock(&sbi->ll_pp_extent_lock);
-
- return len;
-}
-
-LPROC_SEQ_FOPS(ll_rw_extents_stats);
-
-void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
- struct ll_file_data *file, loff_t pos,
- size_t count, int rw)
-{
- int i, cur = -1;
- struct ll_rw_process_info *process;
- struct ll_rw_process_info *offset;
- int *off_count = &sbi->ll_rw_offset_entry_count;
- int *process_count = &sbi->ll_offset_process_count;
- struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
-
- if (!sbi->ll_rw_stats_on)
- return;
- process = sbi->ll_rw_process_info;
- offset = sbi->ll_rw_offset_info;
-
- spin_lock(&sbi->ll_pp_extent_lock);
- /* Extent statistics */
- for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
- if (io_extents->pp_extents[i].pid == pid) {
- cur = i;
- break;
- }
- }
-
- if (cur == -1) {
- /* new process */
- sbi->ll_extent_process_count =
- (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
- cur = sbi->ll_extent_process_count;
- io_extents->pp_extents[cur].pid = pid;
- lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
- lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
- }
-
- for (i = 0; (count >= (1 << LL_HIST_START << i)) &&
- (i < (LL_HIST_MAX - 1)); i++)
- ;
- if (rw == 0) {
- io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
- io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
- } else {
- io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
- io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
- }
- spin_unlock(&sbi->ll_pp_extent_lock);
-
- spin_lock(&sbi->ll_process_lock);
- /* Offset statistics */
- for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
- if (process[i].rw_pid == pid) {
- if (process[i].rw_last_file != file) {
- process[i].rw_range_start = pos;
- process[i].rw_last_file_pos = pos + count;
- process[i].rw_smallest_extent = count;
- process[i].rw_largest_extent = count;
- process[i].rw_offset = 0;
- process[i].rw_last_file = file;
- spin_unlock(&sbi->ll_process_lock);
- return;
- }
- if (process[i].rw_last_file_pos != pos) {
- *off_count =
- (*off_count + 1) % LL_OFFSET_HIST_MAX;
- offset[*off_count].rw_op = process[i].rw_op;
- offset[*off_count].rw_pid = pid;
- offset[*off_count].rw_range_start =
- process[i].rw_range_start;
- offset[*off_count].rw_range_end =
- process[i].rw_last_file_pos;
- offset[*off_count].rw_smallest_extent =
- process[i].rw_smallest_extent;
- offset[*off_count].rw_largest_extent =
- process[i].rw_largest_extent;
- offset[*off_count].rw_offset =
- process[i].rw_offset;
- process[i].rw_op = rw;
- process[i].rw_range_start = pos;
- process[i].rw_smallest_extent = count;
- process[i].rw_largest_extent = count;
- process[i].rw_offset = pos -
- process[i].rw_last_file_pos;
- }
- if (process[i].rw_smallest_extent > count)
- process[i].rw_smallest_extent = count;
- if (process[i].rw_largest_extent < count)
- process[i].rw_largest_extent = count;
- process[i].rw_last_file_pos = pos + count;
- spin_unlock(&sbi->ll_process_lock);
- return;
- }
- }
- *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
- process[*process_count].rw_pid = pid;
- process[*process_count].rw_op = rw;
- process[*process_count].rw_range_start = pos;
- process[*process_count].rw_last_file_pos = pos + count;
- process[*process_count].rw_smallest_extent = count;
- process[*process_count].rw_largest_extent = count;
- process[*process_count].rw_offset = 0;
- process[*process_count].rw_last_file = file;
- spin_unlock(&sbi->ll_process_lock);
-}
-
-static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
-{
- struct timespec64 now;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
- struct ll_rw_process_info *process = sbi->ll_rw_process_info;
- int i;
-
- ktime_get_real_ts64(&now);
-
- if (!sbi->ll_rw_stats_on) {
- seq_printf(seq, "disabled\n"
- "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
- return 0;
- }
- spin_lock(&sbi->ll_process_lock);
-
- seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
- (s64)now.tv_sec, (unsigned long)now.tv_nsec);
- seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
- "R/W", "PID", "RANGE START", "RANGE END",
- "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
- /* We stored the discontiguous offsets here; print them first */
- for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
- if (offset[i].rw_pid != 0)
- seq_printf(seq,
- "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
- offset[i].rw_op == READ ? 'R' : 'W',
- offset[i].rw_pid,
- offset[i].rw_range_start,
- offset[i].rw_range_end,
- (unsigned long)offset[i].rw_smallest_extent,
- (unsigned long)offset[i].rw_largest_extent,
- offset[i].rw_offset);
- }
- /* Then print the current offsets for each process */
- for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
- if (process[i].rw_pid != 0)
- seq_printf(seq,
- "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
- process[i].rw_op == READ ? 'R' : 'W',
- process[i].rw_pid,
- process[i].rw_range_start,
- process[i].rw_last_file_pos,
- (unsigned long)process[i].rw_smallest_extent,
- (unsigned long)process[i].rw_largest_extent,
- process[i].rw_offset);
- }
- spin_unlock(&sbi->ll_process_lock);
-
- return 0;
-}
-
-static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
- struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
- int value = 1, rc = 0;
-
- if (len == 0)
- return -EINVAL;
-
- rc = lprocfs_write_helper(buf, len, &value);
-
- if (rc < 0 && len < 16) {
- char kernbuf[16];
-
- if (copy_from_user(kernbuf, buf, len))
- return -EFAULT;
- kernbuf[len] = 0;
-
- if (kernbuf[len - 1] == '\n')
- kernbuf[len - 1] = 0;
-
- if (strcmp(kernbuf, "disabled") == 0 ||
- strcmp(kernbuf, "Disabled") == 0)
- value = 0;
- }
-
- if (value == 0)
- sbi->ll_rw_stats_on = 0;
- else
- sbi->ll_rw_stats_on = 1;
-
- spin_lock(&sbi->ll_process_lock);
- sbi->ll_offset_process_count = 0;
- sbi->ll_rw_offset_entry_count = 0;
- memset(process_info, 0, sizeof(struct ll_rw_process_info) *
- LL_PROCESS_HIST_MAX);
- memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
- LL_OFFSET_HIST_MAX);
- spin_unlock(&sbi->ll_process_lock);
-
- return len;
-}
-
-LPROC_SEQ_FOPS(ll_rw_offset_stats);
-
-void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->obd_vars = lprocfs_llite_obd_vars;
-}
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
deleted file mode 100644
index 6c9ec462eb41..000000000000
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ /dev/null
@@ -1,1202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/quotaops.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/security.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <obd_support.h>
-#include <lustre_fid.h>
-#include <lustre_dlm.h>
-#include "llite_internal.h"
-
-static int ll_create_it(struct inode *dir, struct dentry *dentry,
- struct lookup_intent *it);
-
-/* called from iget5_locked->find_inode() under inode_hash_lock spinlock */
-static int ll_test_inode(struct inode *inode, void *opaque)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lustre_md *md = opaque;
-
- if (unlikely(!(md->body->mbo_valid & OBD_MD_FLID))) {
- CERROR("MDS body missing FID\n");
- return 0;
- }
-
- if (!lu_fid_eq(&lli->lli_fid, &md->body->mbo_fid1))
- return 0;
-
- return 1;
-}
-
-static int ll_set_inode(struct inode *inode, void *opaque)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct mdt_body *body = ((struct lustre_md *)opaque)->body;
-
- if (unlikely(!(body->mbo_valid & OBD_MD_FLID))) {
- CERROR("MDS body missing FID\n");
- return -EINVAL;
- }
-
- lli->lli_fid = body->mbo_fid1;
- if (unlikely(!(body->mbo_valid & OBD_MD_FLTYPE))) {
- CERROR("Can not initialize inode " DFID
- " without object type: valid = %#llx\n",
- PFID(&lli->lli_fid), body->mbo_valid);
- return -EINVAL;
- }
-
- inode->i_mode = (inode->i_mode & ~S_IFMT) | (body->mbo_mode & S_IFMT);
- if (unlikely(inode->i_mode == 0)) {
- CERROR("Invalid inode " DFID " type\n", PFID(&lli->lli_fid));
- return -EINVAL;
- }
-
- ll_lli_init(lli);
-
- return 0;
-}
-
-/**
- * Get an inode by inode number(@hash), which is already instantiated by
- * the intent lookup).
- */
-struct inode *ll_iget(struct super_block *sb, ino_t hash,
- struct lustre_md *md)
-{
- struct inode *inode;
- int rc = 0;
-
- LASSERT(hash != 0);
- inode = iget5_locked(sb, hash, ll_test_inode, ll_set_inode, md);
- if (!inode)
- return ERR_PTR(-ENOMEM);
-
- if (inode->i_state & I_NEW) {
- rc = ll_read_inode2(inode, md);
- if (!rc && S_ISREG(inode->i_mode) &&
- !ll_i2info(inode)->lli_clob)
- rc = cl_file_inode_init(inode, md);
-
- if (rc) {
- /*
- * Let's clear directory lsm here, otherwise
- * make_bad_inode() will reset the inode mode
- * to regular, then ll_clear_inode will not
- * be able to clear lsm_md
- */
- if (S_ISDIR(inode->i_mode))
- ll_dir_clear_lsm_md(inode);
- make_bad_inode(inode);
- unlock_new_inode(inode);
- iput(inode);
- inode = ERR_PTR(rc);
- } else {
- unlock_new_inode(inode);
- }
- } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
- rc = ll_update_inode(inode, md);
- CDEBUG(D_VFSTRACE, "got inode: " DFID "(%p): rc = %d\n",
- PFID(&md->body->mbo_fid1), inode, rc);
- if (rc) {
- if (S_ISDIR(inode->i_mode))
- ll_dir_clear_lsm_md(inode);
- iput(inode);
- inode = ERR_PTR(rc);
- }
- }
- return inode;
-}
-
-static void ll_invalidate_negative_children(struct inode *dir)
-{
- struct dentry *dentry, *tmp_subdir;
-
- spin_lock(&dir->i_lock);
- hlist_for_each_entry(dentry, &dir->i_dentry, d_u.d_alias) {
- spin_lock(&dentry->d_lock);
- if (!list_empty(&dentry->d_subdirs)) {
- struct dentry *child;
-
- list_for_each_entry_safe(child, tmp_subdir,
- &dentry->d_subdirs,
- d_child) {
- if (d_really_is_negative(child))
- d_lustre_invalidate(child, 1);
- }
- }
- spin_unlock(&dentry->d_lock);
- }
- spin_unlock(&dir->i_lock);
-}
-
-int ll_test_inode_by_fid(struct inode *inode, void *opaque)
-{
- return lu_fid_eq(&ll_i2info(inode)->lli_fid, opaque);
-}
-
-int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
-{
- struct lustre_handle lockh;
- int rc;
-
- switch (flag) {
- case LDLM_CB_BLOCKING:
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
- if (rc < 0) {
- CDEBUG(D_INODE, "ldlm_cli_cancel: rc = %d\n", rc);
- return rc;
- }
- break;
- case LDLM_CB_CANCELING: {
- struct inode *inode = ll_inode_from_resource_lock(lock);
- __u64 bits = lock->l_policy_data.l_inodebits.bits;
-
- /* Inode is set to lock->l_resource->lr_lvb_inode
- * for mdc - bug 24555
- */
- LASSERT(!lock->l_ast_data);
-
- if (!inode)
- break;
-
- /* Invalidate all dentries associated with this inode */
- LASSERT(ldlm_is_canceling(lock));
-
- if (!fid_res_name_eq(ll_inode2fid(inode),
- &lock->l_resource->lr_name)) {
- LDLM_ERROR(lock,
- "data mismatch with object " DFID "(%p)",
- PFID(ll_inode2fid(inode)), inode);
- LBUG();
- }
-
- if (bits & MDS_INODELOCK_XATTR) {
- if (S_ISDIR(inode->i_mode))
- ll_i2info(inode)->lli_def_stripe_offset = -1;
- ll_xattr_cache_destroy(inode);
- bits &= ~MDS_INODELOCK_XATTR;
- }
-
- /* For OPEN locks we differentiate between lock modes
- * LCK_CR, LCK_CW, LCK_PR - bug 22891
- */
- if (bits & MDS_INODELOCK_OPEN)
- ll_have_md_lock(inode, &bits, lock->l_req_mode);
-
- if (bits & MDS_INODELOCK_OPEN) {
- fmode_t fmode;
-
- switch (lock->l_req_mode) {
- case LCK_CW:
- fmode = FMODE_WRITE;
- break;
- case LCK_PR:
- fmode = FMODE_EXEC;
- break;
- case LCK_CR:
- fmode = FMODE_READ;
- break;
- default:
- LDLM_ERROR(lock, "bad lock mode for OPEN lock");
- LBUG();
- }
-
- ll_md_real_close(inode, fmode);
- }
-
- if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LAYOUT | MDS_INODELOCK_PERM))
- ll_have_md_lock(inode, &bits, LCK_MINMODE);
-
- if (bits & MDS_INODELOCK_LAYOUT) {
- struct cl_object_conf conf = {
- .coc_opc = OBJECT_CONF_INVALIDATE,
- .coc_inode = inode,
- };
-
- rc = ll_layout_conf(inode, &conf);
- if (rc < 0)
- CDEBUG(D_INODE, "cannot invalidate layout of "
- DFID ": rc = %d\n",
- PFID(ll_inode2fid(inode)), rc);
- }
-
- if (bits & MDS_INODELOCK_UPDATE) {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- LTIME_S(inode->i_mtime) = 0;
- LTIME_S(inode->i_atime) = 0;
- LTIME_S(inode->i_ctime) = 0;
- spin_unlock(&lli->lli_lock);
- }
-
- if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- CDEBUG(D_INODE, "invalidating inode " DFID " lli = %p, pfid = " DFID "\n",
- PFID(ll_inode2fid(inode)), lli,
- PFID(&lli->lli_pfid));
-
- truncate_inode_pages(inode->i_mapping, 0);
-
- if (unlikely(!fid_is_zero(&lli->lli_pfid))) {
- struct inode *master_inode = NULL;
- unsigned long hash;
-
- /*
- * This is slave inode, since all of the child
- * dentry is connected on the master inode, so
- * we have to invalidate the negative children
- * on master inode
- */
- CDEBUG(D_INODE,
- "Invalidate s" DFID " m" DFID "\n",
- PFID(ll_inode2fid(inode)),
- PFID(&lli->lli_pfid));
-
- hash = cl_fid_build_ino(&lli->lli_pfid,
- ll_need_32bit_api(ll_i2sbi(inode)));
- /*
- * Do not lookup the inode with ilookup5,
- * otherwise it will cause dead lock,
- *
- * 1. Client1 send chmod req to the MDT0, then
- * on MDT0, it enqueues master and all of its
- * slaves lock, (mdt_attr_set() ->
- * mdt_lock_slaves()), after gets master and
- * stripe0 lock, it will send the enqueue req
- * (for stripe1) to MDT1, then MDT1 finds the
- * lock has been granted to client2. Then MDT1
- * sends blocking ast to client2.
- *
- * 2. At the same time, client2 tries to unlink
- * the striped dir (rm -rf striped_dir), and
- * during lookup, it will hold the master inode
- * of the striped directory, whose inode state
- * is NEW, then tries to revalidate all of its
- * slaves, (ll_prep_inode()->ll_iget()->
- * ll_read_inode2()-> ll_update_inode().). And
- * it will be blocked on the server side because
- * of 1.
- *
- * 3. Then the client get the blocking_ast req,
- * cancel the lock, but being blocked if using
- * ->ilookup5()), because master inode state is
- * NEW.
- */
- master_inode = ilookup5_nowait(inode->i_sb,
- hash,
- ll_test_inode_by_fid,
- (void *)&lli->lli_pfid);
- if (master_inode) {
- ll_invalidate_negative_children(master_inode);
- iput(master_inode);
- }
- } else {
- ll_invalidate_negative_children(inode);
- }
- }
-
- if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
- inode->i_sb->s_root &&
- !is_root_inode(inode))
- ll_invalidate_aliases(inode);
-
- iput(inode);
- break;
- }
- default:
- LBUG();
- }
-
- return 0;
-}
-
-__u32 ll_i2suppgid(struct inode *i)
-{
- if (in_group_p(i->i_gid))
- return (__u32)from_kgid(&init_user_ns, i->i_gid);
- else
- return (__u32)(-1);
-}
-
-/* Pack the required supplementary groups into the supplied groups array.
- * If we don't need to use the groups from the target inode(s) then we
- * instead pack one or more groups from the user's supplementary group
- * array in case it might be useful. Not needed if doing an MDS-side upcall.
- */
-void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2)
-{
- LASSERT(i1);
-
- suppgids[0] = ll_i2suppgid(i1);
-
- if (i2)
- suppgids[1] = ll_i2suppgid(i2);
- else
- suppgids[1] = -1;
-}
-
-/*
- * Try to reuse unhashed or invalidated dentries.
- * This is very similar to d_exact_alias(), and any changes in one should be
- * considered for inclusion in the other. The differences are that we don't
- * need an unhashed alias, and we don't want d_compare to be used for
- * comparison.
- */
-static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
-{
- struct dentry *alias;
-
- if (hlist_empty(&inode->i_dentry))
- return NULL;
-
- spin_lock(&inode->i_lock);
- hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
- LASSERT(alias != dentry);
- /*
- * Don't need alias->d_lock here, because aliases with
- * d_parent == entry->d_parent are not subject to name or
- * parent changes, because the parent inode i_mutex is held.
- */
-
- if (alias->d_parent != dentry->d_parent)
- continue;
- if (alias->d_name.hash != dentry->d_name.hash)
- continue;
- if (alias->d_name.len != dentry->d_name.len ||
- memcmp(alias->d_name.name, dentry->d_name.name,
- dentry->d_name.len) != 0)
- continue;
- spin_lock(&alias->d_lock);
- dget_dlock(alias);
- spin_unlock(&alias->d_lock);
- spin_unlock(&inode->i_lock);
- return alias;
- }
- spin_unlock(&inode->i_lock);
-
- return NULL;
-}
-
-/*
- * Similar to d_splice_alias(), but lustre treats invalid alias
- * similar to DCACHE_DISCONNECTED, and tries to use it anyway.
- */
-struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
-{
- if (inode && !S_ISDIR(inode->i_mode)) {
- struct dentry *new = ll_find_alias(inode, de);
-
- if (new) {
- d_move(new, de);
- iput(inode);
- CDEBUG(D_DENTRY,
- "Reuse dentry %p inode %p refc %d flags %#x\n",
- new, d_inode(new), d_count(new), new->d_flags);
- return new;
- }
- d_add(de, inode);
- } else {
- struct dentry *new = d_splice_alias(inode, de);
-
- if (new)
- de = new;
- }
- CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
- de, d_inode(de), d_count(de), de->d_flags);
- return de;
-}
-
-static int ll_lookup_it_finish(struct ptlrpc_request *request,
- struct lookup_intent *it,
- struct inode *parent, struct dentry **de)
-{
- struct inode *inode = NULL;
- __u64 bits = 0;
- int rc = 0;
- struct dentry *alias;
-
- /* NB 1 request reference will be taken away by ll_intent_lock()
- * when I return
- */
- CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it,
- it->it_disposition);
- if (!it_disposition(it, DISP_LOOKUP_NEG)) {
- rc = ll_prep_inode(&inode, request, (*de)->d_sb, it);
- if (rc)
- return rc;
-
- ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits);
-
- /* We used to query real size from OSTs here, but actually
- * this is not needed. For stat() calls size would be updated
- * from subsequent do_revalidate()->ll_inode_revalidate_it() in
- * 2.4 and
- * vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6
- * Everybody else who needs correct file size would call
- * ll_glimpse_size or some equivalent themselves anyway.
- * Also see bug 7198.
- */
- }
-
- alias = ll_splice_alias(inode, *de);
- if (IS_ERR(alias)) {
- rc = PTR_ERR(alias);
- goto out;
- }
- *de = alias;
-
- if (!it_disposition(it, DISP_LOOKUP_NEG)) {
- /* We have the "lookup" lock, so unhide dentry */
- if (bits & MDS_INODELOCK_LOOKUP)
- d_lustre_revalidate(*de);
- } else if (!it_disposition(it, DISP_OPEN_CREATE)) {
- /* If file created on server, don't depend on parent UPDATE
- * lock to unhide it. It is left hidden and next lookup can
- * find it in ll_splice_alias.
- */
- /* Check that parent has UPDATE lock. */
- struct lookup_intent parent_it = {
- .it_op = IT_GETATTR,
- .it_lock_handle = 0 };
- struct lu_fid fid = ll_i2info(parent)->lli_fid;
-
- /* If it is striped directory, get the real stripe parent */
- if (unlikely(ll_i2info(parent)->lli_lsm_md)) {
- rc = md_get_fid_from_lsm(ll_i2mdexp(parent),
- ll_i2info(parent)->lli_lsm_md,
- (*de)->d_name.name,
- (*de)->d_name.len, &fid);
- if (rc)
- return rc;
- }
-
- if (md_revalidate_lock(ll_i2mdexp(parent), &parent_it, &fid,
- NULL)) {
- d_lustre_revalidate(*de);
- ll_intent_release(&parent_it);
- }
- }
-
-out:
- if (rc != 0 && it->it_op & IT_OPEN)
- ll_open_cleanup((*de)->d_sb, request);
-
- return rc;
-}
-
-static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
- struct lookup_intent *it)
-{
- struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
- struct dentry *save = dentry, *retval;
- struct ptlrpc_request *req = NULL;
- struct md_op_data *op_data = NULL;
- struct inode *inode;
- __u32 opc;
- int rc;
-
- if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
- return ERR_PTR(-ENAMETOOLONG);
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),intent=%s\n",
- dentry, PFID(ll_inode2fid(parent)), parent, LL_IT2STR(it));
-
- if (d_mountpoint(dentry))
- CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it));
-
- if (!it || it->it_op == IT_GETXATTR)
- it = &lookup_it;
-
- if (it->it_op == IT_GETATTR && dentry_may_statahead(parent, dentry)) {
- rc = ll_statahead(parent, &dentry, 0);
- if (rc == 1) {
- if (dentry == save)
- retval = NULL;
- else
- retval = dentry;
- goto out;
- }
- }
-
- if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE && sb_rdonly(dentry->d_sb))
- return ERR_PTR(-EROFS);
-
- if (it->it_op & IT_CREAT)
- opc = LUSTRE_OPC_CREATE;
- else
- opc = LUSTRE_OPC_ANY;
-
- op_data = ll_prep_md_op_data(NULL, parent, NULL, dentry->d_name.name,
- dentry->d_name.len, 0, opc, NULL);
- if (IS_ERR(op_data))
- return (void *)op_data;
-
- /* enforce umask if acl disabled or MDS doesn't support umask */
- if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent)))
- it->it_create_mode &= ~current_umask();
-
- rc = md_intent_lock(ll_i2mdexp(parent), op_data, it, &req,
- &ll_md_blocking_ast, 0);
- /*
- * If the MDS allows the client to chgrp (CFS_SETGRP_PERM), but the
- * client does not know which suppgid should be sent to the MDS, or
- * some other(s) changed the target file's GID after this RPC sent
- * to the MDS with the suppgid as the original GID, then we should
- * try again with right suppgid.
- */
- if (rc == -EACCES && it->it_op & IT_OPEN &&
- it_disposition(it, DISP_OPEN_DENY)) {
- struct mdt_body *body;
-
- LASSERT(req);
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (op_data->op_suppgids[0] == body->mbo_gid ||
- op_data->op_suppgids[1] == body->mbo_gid ||
- !in_group_p(make_kgid(&init_user_ns, body->mbo_gid))) {
- retval = ERR_PTR(-EACCES);
- goto out;
- }
-
- fid_zero(&op_data->op_fid2);
- op_data->op_suppgids[1] = body->mbo_gid;
- ptlrpc_req_finished(req);
- req = NULL;
- ll_intent_release(it);
- rc = md_intent_lock(ll_i2mdexp(parent), op_data, it, &req,
- ll_md_blocking_ast, 0);
- }
-
- if (rc < 0) {
- retval = ERR_PTR(rc);
- goto out;
- }
-
- rc = ll_lookup_it_finish(req, it, parent, &dentry);
- if (rc != 0) {
- ll_intent_release(it);
- retval = ERR_PTR(rc);
- goto out;
- }
-
- inode = d_inode(dentry);
- if ((it->it_op & IT_OPEN) && inode &&
- !S_ISREG(inode->i_mode) &&
- !S_ISDIR(inode->i_mode)) {
- ll_release_openhandle(inode, it);
- }
- ll_lookup_finish_locks(it, inode);
-
- if (dentry == save)
- retval = NULL;
- else
- retval = dentry;
-out:
- if (op_data && !IS_ERR(op_data))
- ll_finish_md_op_data(op_data);
-
- ptlrpc_req_finished(req);
- return retval;
-}
-
-static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
- unsigned int flags)
-{
- struct lookup_intent *itp, it = { .it_op = IT_GETATTR };
- struct dentry *de;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),flags=%u\n",
- dentry, PFID(ll_inode2fid(parent)), parent, flags);
-
- /* Optimize away (CREATE && !OPEN). Let .create handle the race.
- * but only if we have write permissions there, otherwise we need
- * to proceed with lookup. LU-4185
- */
- if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN) &&
- (inode_permission(parent, MAY_WRITE | MAY_EXEC) == 0))
- return NULL;
-
- if (flags & (LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE))
- itp = NULL;
- else
- itp = &it;
- de = ll_lookup_it(parent, dentry, itp);
-
- if (itp)
- ll_intent_release(itp);
-
- return de;
-}
-
-/*
- * For cached negative dentry and new dentry, handle lookup/create/open
- * together.
- */
-static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned int open_flags,
- umode_t mode, int *opened)
-{
- struct lookup_intent *it;
- struct dentry *de;
- int rc = 0;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),file %p,open_flags %x,mode %x opened %d\n",
- dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
- *opened);
-
- /* Only negative dentries enter here */
- LASSERT(!d_inode(dentry));
-
- if (!d_in_lookup(dentry)) {
- /* A valid negative dentry that just passed revalidation,
- * there's little point to try and open it server-side,
- * even though there's a minuscle chance it might succeed.
- * Either way it's a valid race to just return -ENOENT here.
- */
- if (!(open_flags & O_CREAT))
- return -ENOENT;
-
- /* Otherwise we just unhash it to be rehashed afresh via
- * lookup if necessary
- */
- d_drop(dentry);
- }
-
- it = kzalloc(sizeof(*it), GFP_NOFS);
- if (!it)
- return -ENOMEM;
-
- it->it_op = IT_OPEN;
- if (open_flags & O_CREAT)
- it->it_op |= IT_CREAT;
- it->it_create_mode = (mode & S_IALLUGO) | S_IFREG;
- it->it_flags = (open_flags & ~O_ACCMODE) | OPEN_FMODE(open_flags);
- it->it_flags &= ~MDS_OPEN_FL_INTERNAL;
-
- /* Dentry added to dcache tree in ll_lookup_it */
- de = ll_lookup_it(dir, dentry, it);
- if (IS_ERR(de))
- rc = PTR_ERR(de);
- else if (de)
- dentry = de;
-
- if (!rc) {
- if (it_disposition(it, DISP_OPEN_CREATE)) {
- /* Dentry instantiated in ll_create_it. */
- rc = ll_create_it(dir, dentry, it);
- if (rc) {
- /* We dget in ll_splice_alias. */
- if (de)
- dput(de);
- goto out_release;
- }
-
- *opened |= FILE_CREATED;
- }
- if (d_really_is_positive(dentry) &&
- it_disposition(it, DISP_OPEN_OPEN)) {
- /* Open dentry. */
- if (S_ISFIFO(d_inode(dentry)->i_mode)) {
- /* We cannot call open here as it might
- * deadlock. This case is unreachable in
- * practice because of OBD_CONNECT_NODEVOH.
- */
- rc = finish_no_open(file, de);
- } else {
- file->private_data = it;
- rc = finish_open(file, dentry, NULL, opened);
- /* We dget in ll_splice_alias. finish_open takes
- * care of dget for fd open.
- */
- if (de)
- dput(de);
- }
- } else {
- rc = finish_no_open(file, de);
- }
- }
-
-out_release:
- ll_intent_release(it);
- kfree(it);
-
- return rc;
-}
-
-/* We depend on "mode" being set with the proper file type/umask by now */
-static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
-{
- struct inode *inode = NULL;
- struct ptlrpc_request *request = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- int rc;
-
- LASSERT(it && it->it_disposition);
-
- LASSERT(it_disposition(it, DISP_ENQ_CREATE_REF));
- request = it->it_request;
- it_clear_disposition(it, DISP_ENQ_CREATE_REF);
- rc = ll_prep_inode(&inode, request, dir->i_sb, it);
- if (rc) {
- inode = ERR_PTR(rc);
- goto out;
- }
-
- LASSERT(hlist_empty(&inode->i_dentry));
-
- /* We asked for a lock on the directory, but were granted a
- * lock on the inode. Since we finally have an inode pointer,
- * stuff it in the lock.
- */
- CDEBUG(D_DLMTRACE, "setting l_ast_data to inode " DFID "(%p)\n",
- PFID(ll_inode2fid(dir)), inode);
- ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
- out:
- ptlrpc_req_finished(request);
- return inode;
-}
-
-/*
- * By the time this is called, we already have created the directory cache
- * entry for the new file, but it is so far negative - it has no inode.
- *
- * We defer creating the OBD object(s) until open, to keep the intent and
- * non-intent code paths similar, and also because we do not have the MDS
- * inode number before calling ll_create_node() (which is needed for LOV),
- * so we would need to do yet another RPC to the MDS to store the LOV EA
- * data on the MDS. If needed, we would pass the PACKED lmm as data and
- * lmm_size in datalen (the MDS still has code which will handle that).
- *
- * If the create succeeds, we fill in the inode information
- * with d_instantiate().
- */
-static int ll_create_it(struct inode *dir, struct dentry *dentry,
- struct lookup_intent *it)
-{
- struct inode *inode;
- int rc = 0;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p), intent=%s\n",
- dentry, PFID(ll_inode2fid(dir)), dir, LL_IT2STR(it));
-
- rc = it_open_error(DISP_OPEN_CREATE, it);
- if (rc)
- return rc;
-
- inode = ll_create_node(dir, it);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
- d_instantiate(dentry, inode);
-
- return ll_init_security(dentry, inode, dir);
-}
-
-void ll_update_times(struct ptlrpc_request *request, struct inode *inode)
-{
- struct mdt_body *body = req_capsule_server_get(&request->rq_pill,
- &RMF_MDT_BODY);
-
- LASSERT(body);
- if (body->mbo_valid & OBD_MD_FLMTIME &&
- body->mbo_mtime > LTIME_S(inode->i_mtime)) {
- CDEBUG(D_INODE, "setting fid " DFID " mtime from %lu to %llu\n",
- PFID(ll_inode2fid(inode)), LTIME_S(inode->i_mtime),
- body->mbo_mtime);
- LTIME_S(inode->i_mtime) = body->mbo_mtime;
- }
- if (body->mbo_valid & OBD_MD_FLCTIME &&
- body->mbo_ctime > LTIME_S(inode->i_ctime))
- LTIME_S(inode->i_ctime) = body->mbo_ctime;
-}
-
-static int ll_new_node(struct inode *dir, struct dentry *dentry,
- const char *tgt, umode_t mode, int rdev,
- __u32 opc)
-{
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- struct inode *inode = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- int tgt_len = 0;
- int err;
-
- if (unlikely(tgt))
- tgt_len = strlen(tgt) + 1;
-again:
- op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dentry->d_name.name,
- dentry->d_name.len,
- 0, opc, NULL);
- if (IS_ERR(op_data)) {
- err = PTR_ERR(op_data);
- goto err_exit;
- }
-
- err = md_create(sbi->ll_md_exp, op_data, tgt, tgt_len, mode,
- from_kuid(&init_user_ns, current_fsuid()),
- from_kgid(&init_user_ns, current_fsgid()),
- cfs_curproc_cap_pack(), rdev, &request);
- ll_finish_md_op_data(op_data);
- if (err < 0 && err != -EREMOTE)
- goto err_exit;
-
- /*
- * If the client doesn't know where to create a subdirectory (or
- * in case of a race that sends the RPC to the wrong MDS), the
- * MDS will return -EREMOTE and the client will fetch the layout
- * of the directory, then create the directory on the right MDT.
- */
- if (unlikely(err == -EREMOTE)) {
- struct ll_inode_info *lli = ll_i2info(dir);
- struct lmv_user_md *lum;
- int lumsize, err2;
-
- ptlrpc_req_finished(request);
- request = NULL;
-
- err2 = ll_dir_getstripe(dir, (void **)&lum, &lumsize, &request,
- OBD_MD_DEFAULT_MEA);
- if (!err2) {
- /* Update stripe_offset and retry */
- lli->lli_def_stripe_offset = lum->lum_stripe_offset;
- } else if (err2 == -ENODATA &&
- lli->lli_def_stripe_offset != -1) {
- /*
- * If there are no default stripe EA on the MDT, but the
- * client has default stripe, then it probably means
- * default stripe EA has just been deleted.
- */
- lli->lli_def_stripe_offset = -1;
- } else {
- goto err_exit;
- }
-
- ptlrpc_req_finished(request);
- request = NULL;
- goto again;
- }
-
- ll_update_times(request, dir);
-
- err = ll_prep_inode(&inode, request, dir->i_sb, NULL);
- if (err)
- goto err_exit;
-
- d_instantiate(dentry, inode);
-
- err = ll_init_security(dentry, inode, dir);
-err_exit:
- if (request)
- ptlrpc_req_finished(request);
-
- return err;
-}
-
-static int ll_mknod(struct inode *dir, struct dentry *dchild,
- umode_t mode, dev_t rdev)
-{
- int err;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p) mode %o dev %x\n",
- dchild, PFID(ll_inode2fid(dir)), dir, mode,
- old_encode_dev(rdev));
-
- if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
- mode &= ~current_umask();
-
- switch (mode & S_IFMT) {
- case 0:
- mode |= S_IFREG;
- /* for mode = 0 case */
- /* fall through */
- case S_IFREG:
- case S_IFCHR:
- case S_IFBLK:
- case S_IFIFO:
- case S_IFSOCK:
- err = ll_new_node(dir, dchild, NULL, mode,
- old_encode_dev(rdev),
- LUSTRE_OPC_MKNOD);
- break;
- case S_IFDIR:
- err = -EPERM;
- break;
- default:
- err = -EINVAL;
- }
-
- if (!err)
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKNOD, 1);
-
- return err;
-}
-
-/*
- * Plain create. Intent create is handled in atomic_open.
- */
-static int ll_create_nd(struct inode *dir, struct dentry *dentry,
- umode_t mode, bool want_excl)
-{
- int rc;
-
- CDEBUG(D_VFSTRACE,
- "VFS Op:name=%pd, dir=" DFID "(%p), flags=%u, excl=%d\n",
- dentry, PFID(ll_inode2fid(dir)), dir, mode, want_excl);
-
- rc = ll_mknod(dir, dentry, mode, 0);
-
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE, 1);
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, unhashed %d\n",
- dentry, d_unhashed(dentry));
-
- return rc;
-}
-
-static int ll_unlink(struct inode *dir, struct dentry *dchild)
-{
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dchild, dir->i_ino, dir->i_generation, dir);
-
- op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dchild->d_name.name,
- dchild->d_name.len,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- if (dchild->d_inode)
- op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
-
- op_data->op_fid2 = op_data->op_fid3;
- rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
- ll_finish_md_op_data(op_data);
- if (rc)
- goto out;
-
- ll_update_times(request, dir);
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_UNLINK, 1);
-
- out:
- ptlrpc_req_finished(request);
- return rc;
-}
-
-static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
-{
- int err;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir" DFID "(%p)\n",
- dentry, PFID(ll_inode2fid(dir)), dir);
-
- if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
- mode &= ~current_umask();
- mode = (mode & (0777 | S_ISVTX)) | S_IFDIR;
-
- err = ll_new_node(dir, dentry, NULL, mode, 0, LUSTRE_OPC_MKDIR);
- if (!err)
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR, 1);
-
- return err;
-}
-
-static int ll_rmdir(struct inode *dir, struct dentry *dchild)
-{
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p)\n",
- dchild, PFID(ll_inode2fid(dir)), dir);
-
- op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dchild->d_name.name,
- dchild->d_name.len,
- S_IFDIR, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- if (dchild->d_inode)
- op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
-
- op_data->op_fid2 = op_data->op_fid3;
- rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
- ll_finish_md_op_data(op_data);
- if (rc == 0) {
- ll_update_times(request, dir);
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1);
- }
-
- ptlrpc_req_finished(request);
- return rc;
-}
-
-static int ll_symlink(struct inode *dir, struct dentry *dentry,
- const char *oldname)
-{
- int err;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),target=%.*s\n",
- dentry, PFID(ll_inode2fid(dir)), dir, 3000, oldname);
-
- err = ll_new_node(dir, dentry, oldname, S_IFLNK | 0777,
- 0, LUSTRE_OPC_SYMLINK);
-
- if (!err)
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1);
-
- return err;
-}
-
-static int ll_link(struct dentry *old_dentry, struct inode *dir,
- struct dentry *new_dentry)
-{
- struct inode *src = d_inode(old_dentry);
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- int err;
-
- CDEBUG(D_VFSTRACE,
- "VFS Op: inode=" DFID "(%p), dir=" DFID "(%p), target=%pd\n",
- PFID(ll_inode2fid(src)), src, PFID(ll_inode2fid(dir)), dir,
- new_dentry);
-
- op_data = ll_prep_md_op_data(NULL, src, dir, new_dentry->d_name.name,
- new_dentry->d_name.len,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- err = md_link(sbi->ll_md_exp, op_data, &request);
- ll_finish_md_op_data(op_data);
- if (err)
- goto out;
-
- ll_update_times(request, dir);
- ll_stats_ops_tally(sbi, LPROC_LL_LINK, 1);
-out:
- ptlrpc_req_finished(request);
- return err;
-}
-
-static int ll_rename(struct inode *src, struct dentry *src_dchild,
- struct inode *tgt, struct dentry *tgt_dchild,
- unsigned int flags)
-{
- struct ptlrpc_request *request = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(src);
- struct md_op_data *op_data;
- int err;
-
- if (flags)
- return -EINVAL;
-
- CDEBUG(D_VFSTRACE,
- "VFS Op:oldname=%pd, src_dir=" DFID "(%p), newname=%pd, tgt_dir=" DFID "(%p)\n",
- src_dchild, PFID(ll_inode2fid(src)), src,
- tgt_dchild, PFID(ll_inode2fid(tgt)), tgt);
-
- op_data = ll_prep_md_op_data(NULL, src, tgt, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- if (src_dchild->d_inode)
- op_data->op_fid3 = *ll_inode2fid(src_dchild->d_inode);
- if (tgt_dchild->d_inode)
- op_data->op_fid4 = *ll_inode2fid(tgt_dchild->d_inode);
-
- err = md_rename(sbi->ll_md_exp, op_data,
- src_dchild->d_name.name,
- src_dchild->d_name.len,
- tgt_dchild->d_name.name,
- tgt_dchild->d_name.len, &request);
- ll_finish_md_op_data(op_data);
- if (!err) {
- ll_update_times(request, src);
- ll_update_times(request, tgt);
- ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
- }
-
- ptlrpc_req_finished(request);
- if (!err)
- d_move(src_dchild, tgt_dchild);
- return err;
-}
-
-const struct inode_operations ll_dir_inode_operations = {
- .mknod = ll_mknod,
- .atomic_open = ll_atomic_open,
- .lookup = ll_lookup_nd,
- .create = ll_create_nd,
- /* We need all these non-raw things for NFSD, to not patch it. */
- .unlink = ll_unlink,
- .mkdir = ll_mkdir,
- .rmdir = ll_rmdir,
- .symlink = ll_symlink,
- .link = ll_link,
- .rename = ll_rename,
- .setattr = ll_setattr,
- .getattr = ll_getattr,
- .permission = ll_inode_permission,
- .listxattr = ll_listxattr,
- .get_acl = ll_get_acl,
-};
-
-const struct inode_operations ll_special_inode_operations = {
- .setattr = ll_setattr,
- .getattr = ll_getattr,
- .permission = ll_inode_permission,
- .listxattr = ll_listxattr,
- .get_acl = ll_get_acl,
-};
diff --git a/drivers/staging/lustre/lustre/llite/range_lock.c b/drivers/staging/lustre/lustre/llite/range_lock.c
deleted file mode 100644
index cc9565f6bfe2..000000000000
--- a/drivers/staging/lustre/lustre/llite/range_lock.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Range lock is used to allow multiple threads writing a single shared
- * file given each thread is writing to a non-overlapping portion of the
- * file.
- *
- * Refer to the possible upstream kernel version of range lock by
- * Jan Kara <jack@suse.cz>: https://lkml.org/lkml/2013/1/31/480
- *
- * This file could later replaced by the upstream kernel version.
- */
-/*
- * Author: Prakash Surya <surya1@llnl.gov>
- * Author: Bobi Jam <bobijam.xu@intel.com>
- */
-#include "range_lock.h"
-#include <uapi/linux/lustre/lustre_idl.h>
-
-/**
- * Initialize a range lock tree
- *
- * \param tree [in] an empty range lock tree
- *
- * Pre: Caller should have allocated the range lock tree.
- * Post: The range lock tree is ready to function.
- */
-void range_lock_tree_init(struct range_lock_tree *tree)
-{
- tree->rlt_root = NULL;
- tree->rlt_sequence = 0;
- spin_lock_init(&tree->rlt_lock);
-}
-
-/**
- * Initialize a range lock node
- *
- * \param lock [in] an empty range lock node
- * \param start [in] start of the covering region
- * \param end [in] end of the covering region
- *
- * Pre: Caller should have allocated the range lock node.
- * Post: The range lock node is meant to cover [start, end] region
- */
-int range_lock_init(struct range_lock *lock, __u64 start, __u64 end)
-{
- int rc;
-
- memset(&lock->rl_node, 0, sizeof(lock->rl_node));
- if (end != LUSTRE_EOF)
- end >>= PAGE_SHIFT;
- rc = interval_set(&lock->rl_node, start >> PAGE_SHIFT, end);
- if (rc)
- return rc;
-
- INIT_LIST_HEAD(&lock->rl_next_lock);
- lock->rl_task = NULL;
- lock->rl_lock_count = 0;
- lock->rl_blocking_ranges = 0;
- lock->rl_sequence = 0;
- return rc;
-}
-
-static inline struct range_lock *next_lock(struct range_lock *lock)
-{
- return list_entry(lock->rl_next_lock.next, typeof(*lock), rl_next_lock);
-}
-
-/**
- * Helper function of range_unlock()
- *
- * \param node [in] a range lock found overlapped during interval node
- * search
- * \param arg [in] the range lock to be tested
- *
- * \retval INTERVAL_ITER_CONT indicate to continue the search for next
- * overlapping range node
- * \retval INTERVAL_ITER_STOP indicate to stop the search
- */
-static enum interval_iter range_unlock_cb(struct interval_node *node, void *arg)
-{
- struct range_lock *lock = arg;
- struct range_lock *overlap = node2rangelock(node);
- struct range_lock *iter;
-
- list_for_each_entry(iter, &overlap->rl_next_lock, rl_next_lock) {
- if (iter->rl_sequence > lock->rl_sequence) {
- --iter->rl_blocking_ranges;
- LASSERT(iter->rl_blocking_ranges > 0);
- }
- }
- if (overlap->rl_sequence > lock->rl_sequence) {
- --overlap->rl_blocking_ranges;
- if (overlap->rl_blocking_ranges == 0)
- wake_up_process(overlap->rl_task);
- }
- return INTERVAL_ITER_CONT;
-}
-
-/**
- * Unlock a range lock, wake up locks blocked by this lock.
- *
- * \param tree [in] range lock tree
- * \param lock [in] range lock to be deleted
- *
- * If this lock has been granted, relase it; if not, just delete it from
- * the tree or the same region lock list. Wake up those locks only blocked
- * by this lock through range_unlock_cb().
- */
-void range_unlock(struct range_lock_tree *tree, struct range_lock *lock)
-{
- spin_lock(&tree->rlt_lock);
- if (!list_empty(&lock->rl_next_lock)) {
- struct range_lock *next;
-
- if (interval_is_intree(&lock->rl_node)) { /* first lock */
- /* Insert the next same range lock into the tree */
- next = next_lock(lock);
- next->rl_lock_count = lock->rl_lock_count - 1;
- interval_erase(&lock->rl_node, &tree->rlt_root);
- interval_insert(&next->rl_node, &tree->rlt_root);
- } else {
- /* find the first lock in tree */
- list_for_each_entry(next, &lock->rl_next_lock,
- rl_next_lock) {
- if (!interval_is_intree(&next->rl_node))
- continue;
-
- LASSERT(next->rl_lock_count > 0);
- next->rl_lock_count--;
- break;
- }
- }
- list_del_init(&lock->rl_next_lock);
- } else {
- LASSERT(interval_is_intree(&lock->rl_node));
- interval_erase(&lock->rl_node, &tree->rlt_root);
- }
-
- interval_search(tree->rlt_root, &lock->rl_node.in_extent,
- range_unlock_cb, lock);
- spin_unlock(&tree->rlt_lock);
-}
-
-/**
- * Helper function of range_lock()
- *
- * \param node [in] a range lock found overlapped during interval node
- * search
- * \param arg [in] the range lock to be tested
- *
- * \retval INTERVAL_ITER_CONT indicate to continue the search for next
- * overlapping range node
- * \retval INTERVAL_ITER_STOP indicate to stop the search
- */
-static enum interval_iter range_lock_cb(struct interval_node *node, void *arg)
-{
- struct range_lock *lock = arg;
- struct range_lock *overlap = node2rangelock(node);
-
- lock->rl_blocking_ranges += overlap->rl_lock_count + 1;
- return INTERVAL_ITER_CONT;
-}
-
-/**
- * Lock a region
- *
- * \param tree [in] range lock tree
- * \param lock [in] range lock node containing the region span
- *
- * \retval 0 get the range lock
- * \retval <0 error code while not getting the range lock
- *
- * If there exists overlapping range lock, the new lock will wait and
- * retry, if later it find that it is not the chosen one to wake up,
- * it wait again.
- */
-int range_lock(struct range_lock_tree *tree, struct range_lock *lock)
-{
- struct interval_node *node;
- int rc = 0;
-
- spin_lock(&tree->rlt_lock);
- /*
- * We need to check for all conflicting intervals
- * already in the tree.
- */
- interval_search(tree->rlt_root, &lock->rl_node.in_extent,
- range_lock_cb, lock);
- /*
- * Insert to the tree if I am unique, otherwise I've been linked to
- * the rl_next_lock of another lock which has the same range as mine
- * in range_lock_cb().
- */
- node = interval_insert(&lock->rl_node, &tree->rlt_root);
- if (node) {
- struct range_lock *tmp = node2rangelock(node);
-
- list_add_tail(&lock->rl_next_lock, &tmp->rl_next_lock);
- tmp->rl_lock_count++;
- }
- lock->rl_sequence = ++tree->rlt_sequence;
-
- while (lock->rl_blocking_ranges > 0) {
- lock->rl_task = current;
- __set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock(&tree->rlt_lock);
- schedule();
-
- if (signal_pending(current)) {
- range_unlock(tree, lock);
- rc = -EINTR;
- goto out;
- }
- spin_lock(&tree->rlt_lock);
- }
- spin_unlock(&tree->rlt_lock);
-out:
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/llite/range_lock.h b/drivers/staging/lustre/lustre/llite/range_lock.h
deleted file mode 100644
index 38b2be4e378f..000000000000
--- a/drivers/staging/lustre/lustre/llite/range_lock.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Range lock is used to allow multiple threads writing a single shared
- * file given each thread is writing to a non-overlapping portion of the
- * file.
- *
- * Refer to the possible upstream kernel version of range lock by
- * Jan Kara <jack@suse.cz>: https://lkml.org/lkml/2013/1/31/480
- *
- * This file could later replaced by the upstream kernel version.
- */
-/*
- * Author: Prakash Surya <surya1@llnl.gov>
- * Author: Bobi Jam <bobijam.xu@intel.com>
- */
-#ifndef _RANGE_LOCK_H
-#define _RANGE_LOCK_H
-
-#include <linux/libcfs/libcfs.h>
-#include <interval_tree.h>
-
-struct range_lock {
- struct interval_node rl_node;
- /**
- * Process to enqueue this lock.
- */
- struct task_struct *rl_task;
- /**
- * List of locks with the same range.
- */
- struct list_head rl_next_lock;
- /**
- * Number of locks in the list rl_next_lock
- */
- unsigned int rl_lock_count;
- /**
- * Number of ranges which are blocking acquisition of the lock
- */
- unsigned int rl_blocking_ranges;
- /**
- * Sequence number of range lock. This number is used to get to know
- * the order the locks are queued; this is required for range_cancel().
- */
- __u64 rl_sequence;
-};
-
-static inline struct range_lock *node2rangelock(const struct interval_node *n)
-{
- return container_of(n, struct range_lock, rl_node);
-}
-
-struct range_lock_tree {
- struct interval_node *rlt_root;
- spinlock_t rlt_lock; /* protect range lock tree */
- __u64 rlt_sequence;
-};
-
-void range_lock_tree_init(struct range_lock_tree *tree);
-int range_lock_init(struct range_lock *lock, __u64 start, __u64 end);
-int range_lock(struct range_lock_tree *tree, struct range_lock *lock);
-void range_unlock(struct range_lock_tree *tree, struct range_lock *lock);
-#endif
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
deleted file mode 100644
index 3e008ce7275d..000000000000
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ /dev/null
@@ -1,1214 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/rw.c
- *
- * Lustre Lite I/O page cache routines shared by different kernel revs
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/writeback.h>
-#include <linux/uaccess.h>
-
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-/* current_is_kswapd() */
-#include <linux/swap.h>
-#include <linux/bvec.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <obd_cksum.h>
-#include "llite_internal.h"
-
-static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
-
-/**
- * Get readahead pages from the filesystem readahead pool of the client for a
- * thread.
- *
- * /param sbi superblock for filesystem readahead state ll_ra_info
- * /param ria per-thread readahead state
- * /param pages number of pages requested for readahead for the thread.
- *
- * WARNING: This algorithm is used to reduce contention on sbi->ll_lock.
- * It should work well if the ra_max_pages is much greater than the single
- * file's read-ahead window, and not too many threads contending for
- * these readahead pages.
- *
- * TODO: There may be a 'global sync problem' if many threads are trying
- * to get an ra budget that is larger than the remaining readahead pages
- * and reach here at exactly the same time. They will compute /a ret to
- * consume the remaining pages, but will fail at atomic_add_return() and
- * get a zero ra window, although there is still ra space remaining. - Jay
- */
-static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
- struct ra_io_arg *ria,
- unsigned long pages, unsigned long min)
-{
- struct ll_ra_info *ra = &sbi->ll_ra_info;
- long ret;
-
- /* If read-ahead pages left are less than 1M, do not do read-ahead,
- * otherwise it will form small read RPC(< 1M), which hurt server
- * performance a lot.
- */
- ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages);
- if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) {
- ret = 0;
- goto out;
- }
-
- if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
- atomic_sub(ret, &ra->ra_cur_pages);
- ret = 0;
- }
-
-out:
- if (ret < min) {
- /* override ra limit for maximum performance */
- atomic_add(min - ret, &ra->ra_cur_pages);
- ret = min;
- }
- return ret;
-}
-
-void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
-{
- struct ll_ra_info *ra = &sbi->ll_ra_info;
-
- atomic_sub(len, &ra->ra_cur_pages);
-}
-
-static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
-{
- LASSERTF(which < _NR_RA_STAT, "which: %u\n", which);
- lprocfs_counter_incr(sbi->ll_ra_stats, which);
-}
-
-void ll_ra_stats_inc(struct inode *inode, enum ra_stat which)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-
- ll_ra_stats_inc_sbi(sbi, which);
-}
-
-#define RAS_CDEBUG(ras) \
- CDEBUG(D_READA, \
- "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu rpc %lu " \
- "r %lu ri %lu csr %lu sf %lu sp %lu sl %lu\n", \
- ras->ras_last_readpage, ras->ras_consecutive_requests, \
- ras->ras_consecutive_pages, ras->ras_window_start, \
- ras->ras_window_len, ras->ras_next_readahead, \
- ras->ras_rpc_size, \
- ras->ras_requests, ras->ras_request_index, \
- ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
- ras->ras_stride_pages, ras->ras_stride_length)
-
-static int index_in_window(unsigned long index, unsigned long point,
- unsigned long before, unsigned long after)
-{
- unsigned long start = point - before, end = point + after;
-
- if (start > point)
- start = 0;
- if (end < point)
- end = ~0;
-
- return start <= index && index <= end;
-}
-
-void ll_ras_enter(struct file *f)
-{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(f);
- struct ll_readahead_state *ras = &fd->fd_ras;
-
- spin_lock(&ras->ras_lock);
- ras->ras_requests++;
- ras->ras_request_index = 0;
- ras->ras_consecutive_requests++;
- spin_unlock(&ras->ras_lock);
-}
-
-/**
- * Initiates read-ahead of a page with given index.
- *
- * \retval +ve: page was already uptodate so it will be skipped
- * from being added;
- * \retval -ve: page wasn't added to \a queue for error;
- * \retval 0: page was added into \a queue for read ahead.
- */
-static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, pgoff_t index)
-{
- enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
- struct cl_object *clob = io->ci_obj;
- struct inode *inode = vvp_object_inode(clob);
- const char *msg = NULL;
- struct cl_page *page;
- struct vvp_page *vpg;
- struct page *vmpage;
- int rc = 0;
-
- vmpage = grab_cache_page_nowait(inode->i_mapping, index);
- if (!vmpage) {
- which = RA_STAT_FAILED_GRAB_PAGE;
- msg = "g_c_p_n failed";
- rc = -EBUSY;
- goto out;
- }
-
- /* Check if vmpage was truncated or reclaimed */
- if (vmpage->mapping != inode->i_mapping) {
- which = RA_STAT_WRONG_GRAB_PAGE;
- msg = "g_c_p_n returned invalid page";
- rc = -EBUSY;
- goto out;
- }
-
- page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
- if (IS_ERR(page)) {
- which = RA_STAT_FAILED_GRAB_PAGE;
- msg = "cl_page_find failed";
- rc = PTR_ERR(page);
- goto out;
- }
-
- lu_ref_add(&page->cp_reference, "ra", current);
- cl_page_assume(env, io, page);
- vpg = cl2vvp_page(cl_object_page_slice(clob, page));
- if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
- vpg->vpg_defer_uptodate = 1;
- vpg->vpg_ra_used = 0;
- cl_page_list_add(queue, page);
- } else {
- /* skip completed pages */
- cl_page_unassume(env, io, page);
- /* This page is already uptodate, returning a positive number
- * to tell the callers about this
- */
- rc = 1;
- }
-
- lu_ref_del(&page->cp_reference, "ra", current);
- cl_page_put(env, page);
-out:
- if (vmpage) {
- if (rc)
- unlock_page(vmpage);
- put_page(vmpage);
- }
- if (msg) {
- ll_ra_stats_inc(inode, which);
- CDEBUG(D_READA, "%s\n", msg);
- }
- return rc;
-}
-
-#define RIA_DEBUG(ria) \
- CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
- ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
- ria->ria_pages)
-
-static inline int stride_io_mode(struct ll_readahead_state *ras)
-{
- return ras->ras_consecutive_stride_requests > 1;
-}
-
-/* The function calculates how much pages will be read in
- * [off, off + length], in such stride IO area,
- * stride_offset = st_off, stride_length = st_len,
- * stride_pages = st_pgs
- *
- * |------------------|*****|------------------|*****|------------|*****|....
- * st_off
- * |--- st_pgs ---|
- * |----- st_len -----|
- *
- * How many pages it should read in such pattern
- * |-------------------------------------------------------------|
- * off
- * |<------ length ------->|
- *
- * = |<----->| + |-------------------------------------| + |---|
- * start_left st_pgs * i end_left
- */
-static unsigned long
-stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
- unsigned long off, unsigned long length)
-{
- __u64 start = off > st_off ? off - st_off : 0;
- __u64 end = off + length > st_off ? off + length - st_off : 0;
- unsigned long start_left = 0;
- unsigned long end_left = 0;
- unsigned long pg_count;
-
- if (st_len == 0 || length == 0 || end == 0)
- return length;
-
- start_left = do_div(start, st_len);
- if (start_left < st_pgs)
- start_left = st_pgs - start_left;
- else
- start_left = 0;
-
- end_left = do_div(end, st_len);
- if (end_left > st_pgs)
- end_left = st_pgs;
-
- CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu\n",
- start, end, start_left, end_left);
-
- if (start == end)
- pg_count = end_left - (st_pgs - start_left);
- else
- pg_count = start_left + st_pgs * (end - start - 1) + end_left;
-
- CDEBUG(D_READA,
- "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n",
- st_off, st_len, st_pgs, off, length, pg_count);
-
- return pg_count;
-}
-
-static int ria_page_count(struct ra_io_arg *ria)
-{
- __u64 length = ria->ria_end >= ria->ria_start ?
- ria->ria_end - ria->ria_start + 1 : 0;
-
- return stride_pg_count(ria->ria_stoff, ria->ria_length,
- ria->ria_pages, ria->ria_start,
- length);
-}
-
-static unsigned long ras_align(struct ll_readahead_state *ras,
- unsigned long index,
- unsigned long *remainder)
-{
- unsigned long rem = index % ras->ras_rpc_size;
-
- if (remainder)
- *remainder = rem;
- return index - rem;
-}
-
-/*Check whether the index is in the defined ra-window */
-static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
-{
- /* If ria_length == ria_pages, it means non-stride I/O mode,
- * idx should always inside read-ahead window in this case
- * For stride I/O mode, just check whether the idx is inside
- * the ria_pages.
- */
- return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
- (idx >= ria->ria_stoff && (idx - ria->ria_stoff) %
- ria->ria_length < ria->ria_pages);
-}
-
-static unsigned long
-ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct ll_readahead_state *ras,
- struct ra_io_arg *ria)
-{
- struct cl_read_ahead ra = { 0 };
- unsigned long ra_end = 0;
- bool stride_ria;
- pgoff_t page_idx;
- int rc;
-
- LASSERT(ria);
- RIA_DEBUG(ria);
-
- stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
- for (page_idx = ria->ria_start;
- page_idx <= ria->ria_end && ria->ria_reserved > 0; page_idx++) {
- if (ras_inside_ra_window(page_idx, ria)) {
- if (!ra.cra_end || ra.cra_end < page_idx) {
- unsigned long end;
-
- cl_read_ahead_release(env, &ra);
-
- rc = cl_io_read_ahead(env, io, page_idx, &ra);
- if (rc < 0)
- break;
-
- CDEBUG(D_READA, "idx: %lu, ra: %lu, rpc: %lu\n",
- page_idx, ra.cra_end, ra.cra_rpc_size);
- LASSERTF(ra.cra_end >= page_idx,
- "object: %p, indcies %lu / %lu\n",
- io->ci_obj, ra.cra_end, page_idx);
- /*
- * update read ahead RPC size.
- * NB: it's racy but doesn't matter
- */
- if (ras->ras_rpc_size > ra.cra_rpc_size &&
- ra.cra_rpc_size > 0)
- ras->ras_rpc_size = ra.cra_rpc_size;
- /* trim it to align with optimal RPC size */
- end = ras_align(ras, ria->ria_end + 1, NULL);
- if (end > 0 && !ria->ria_eof)
- ria->ria_end = end - 1;
- if (ria->ria_end < ria->ria_end_min)
- ria->ria_end = ria->ria_end_min;
- if (ria->ria_end > ra.cra_end)
- ria->ria_end = ra.cra_end;
- }
-
- /* If the page is inside the read-ahead window */
- rc = ll_read_ahead_page(env, io, queue, page_idx);
- if (rc < 0)
- break;
-
- ra_end = page_idx;
- if (!rc)
- ria->ria_reserved--;
- } else if (stride_ria) {
- /* If it is not in the read-ahead window, and it is
- * read-ahead mode, then check whether it should skip
- * the stride gap
- */
- pgoff_t offset;
- /* FIXME: This assertion only is valid when it is for
- * forward read-ahead, it will be fixed when backward
- * read-ahead is implemented
- */
- LASSERTF(page_idx >= ria->ria_stoff,
- "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
- page_idx,
- ria->ria_start, ria->ria_end, ria->ria_stoff,
- ria->ria_length, ria->ria_pages);
- offset = page_idx - ria->ria_stoff;
- offset = offset % (ria->ria_length);
- if (offset > ria->ria_pages) {
- page_idx += ria->ria_length - offset;
- CDEBUG(D_READA, "i %lu skip %lu\n", page_idx,
- ria->ria_length - offset);
- continue;
- }
- }
- }
- cl_read_ahead_release(env, &ra);
-
- return ra_end;
-}
-
-static int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue,
- struct ll_readahead_state *ras, bool hit)
-{
- struct vvp_io *vio = vvp_env_io(env);
- struct ll_thread_info *lti = ll_env_info(env);
- struct cl_attr *attr = vvp_env_thread_attr(env);
- unsigned long len, mlen = 0;
- pgoff_t ra_end, start = 0, end = 0;
- struct inode *inode;
- struct ra_io_arg *ria = &lti->lti_ria;
- struct cl_object *clob;
- int ret = 0;
- __u64 kms;
-
- clob = io->ci_obj;
- inode = vvp_object_inode(clob);
-
- memset(ria, 0, sizeof(*ria));
-
- cl_object_attr_lock(clob);
- ret = cl_object_attr_get(env, clob, attr);
- cl_object_attr_unlock(clob);
-
- if (ret != 0)
- return ret;
- kms = attr->cat_kms;
- if (kms == 0) {
- ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN);
- return 0;
- }
-
- spin_lock(&ras->ras_lock);
-
- /**
- * Note: other thread might rollback the ras_next_readahead,
- * if it can not get the full size of prepared pages, see the
- * end of this function. For stride read ahead, it needs to
- * make sure the offset is no less than ras_stride_offset,
- * so that stride read ahead can work correctly.
- */
- if (stride_io_mode(ras))
- start = max(ras->ras_next_readahead, ras->ras_stride_offset);
- else
- start = ras->ras_next_readahead;
-
- if (ras->ras_window_len > 0)
- end = ras->ras_window_start + ras->ras_window_len - 1;
-
- /* Enlarge the RA window to encompass the full read */
- if (vio->vui_ra_valid &&
- end < vio->vui_ra_start + vio->vui_ra_count - 1)
- end = vio->vui_ra_start + vio->vui_ra_count - 1;
-
- if (end) {
- unsigned long end_index;
-
- /* Truncate RA window to end of file */
- end_index = (unsigned long)((kms - 1) >> PAGE_SHIFT);
- if (end_index <= end) {
- end = end_index;
- ria->ria_eof = true;
- }
-
- ras->ras_next_readahead = max(end, end + 1);
- RAS_CDEBUG(ras);
- }
- ria->ria_start = start;
- ria->ria_end = end;
- /* If stride I/O mode is detected, get stride window*/
- if (stride_io_mode(ras)) {
- ria->ria_stoff = ras->ras_stride_offset;
- ria->ria_length = ras->ras_stride_length;
- ria->ria_pages = ras->ras_stride_pages;
- }
- spin_unlock(&ras->ras_lock);
-
- if (end == 0) {
- ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
- return 0;
- }
- len = ria_page_count(ria);
- if (len == 0) {
- ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
- return 0;
- }
-
- CDEBUG(D_READA, DFID ": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n",
- PFID(lu_object_fid(&clob->co_lu)),
- ria->ria_start, ria->ria_end,
- vio->vui_ra_valid ? vio->vui_ra_start : 0,
- vio->vui_ra_valid ? vio->vui_ra_count : 0,
- hit);
-
- /* at least to extend the readahead window to cover current read */
- if (!hit && vio->vui_ra_valid &&
- vio->vui_ra_start + vio->vui_ra_count > ria->ria_start) {
- unsigned long remainder;
-
- /* to the end of current read window. */
- mlen = vio->vui_ra_start + vio->vui_ra_count - ria->ria_start;
- /* trim to RPC boundary */
- ras_align(ras, ria->ria_start, &remainder);
- mlen = min(mlen, ras->ras_rpc_size - remainder);
- ria->ria_end_min = ria->ria_start + mlen;
- }
-
- ria->ria_reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len, mlen);
- if (ria->ria_reserved < len)
- ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
-
- CDEBUG(D_READA, "reserved pages %lu/%lu/%lu, ra_cur %d, ra_max %lu\n",
- ria->ria_reserved, len, mlen,
- atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
- ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
-
- ra_end = ll_read_ahead_pages(env, io, queue, ras, ria);
-
- if (ria->ria_reserved)
- ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved);
-
- if (ra_end == end && ra_end == (kms >> PAGE_SHIFT))
- ll_ra_stats_inc(inode, RA_STAT_EOF);
-
- /* if we didn't get to the end of the region we reserved from
- * the ras we need to go back and update the ras so that the
- * next read-ahead tries from where we left off. we only do so
- * if the region we failed to issue read-ahead on is still ahead
- * of the app and behind the next index to start read-ahead from
- */
- CDEBUG(D_READA, "ra_end = %lu end = %lu stride end = %lu pages = %d\n",
- ra_end, end, ria->ria_end, ret);
-
- if (ra_end > 0 && ra_end != end) {
- ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
- spin_lock(&ras->ras_lock);
- if (ra_end <= ras->ras_next_readahead &&
- index_in_window(ra_end, ras->ras_window_start, 0,
- ras->ras_window_len)) {
- ras->ras_next_readahead = ra_end + 1;
- RAS_CDEBUG(ras);
- }
- spin_unlock(&ras->ras_lock);
- }
-
- return ret;
-}
-
-static void ras_set_start(struct inode *inode, struct ll_readahead_state *ras,
- unsigned long index)
-{
- ras->ras_window_start = ras_align(ras, index, NULL);
-}
-
-/* called with the ras_lock held or from places where it doesn't matter */
-static void ras_reset(struct inode *inode, struct ll_readahead_state *ras,
- unsigned long index)
-{
- ras->ras_last_readpage = index;
- ras->ras_consecutive_requests = 0;
- ras->ras_consecutive_pages = 0;
- ras->ras_window_len = 0;
- ras_set_start(inode, ras, index);
- ras->ras_next_readahead = max(ras->ras_window_start, index + 1);
-
- RAS_CDEBUG(ras);
-}
-
-/* called with the ras_lock held or from places where it doesn't matter */
-static void ras_stride_reset(struct ll_readahead_state *ras)
-{
- ras->ras_consecutive_stride_requests = 0;
- ras->ras_stride_length = 0;
- ras->ras_stride_pages = 0;
- RAS_CDEBUG(ras);
-}
-
-void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
-{
- spin_lock_init(&ras->ras_lock);
- ras->ras_rpc_size = PTLRPC_MAX_BRW_PAGES;
- ras_reset(inode, ras, 0);
- ras->ras_requests = 0;
-}
-
-/*
- * Check whether the read request is in the stride window.
- * If it is in the stride window, return 1, otherwise return 0.
- */
-static int index_in_stride_window(struct ll_readahead_state *ras,
- unsigned long index)
-{
- unsigned long stride_gap;
-
- if (ras->ras_stride_length == 0 || ras->ras_stride_pages == 0 ||
- ras->ras_stride_pages == ras->ras_stride_length)
- return 0;
-
- stride_gap = index - ras->ras_last_readpage - 1;
-
- /* If it is contiguous read */
- if (stride_gap == 0)
- return ras->ras_consecutive_pages + 1 <= ras->ras_stride_pages;
-
- /* Otherwise check the stride by itself */
- return (ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
- ras->ras_consecutive_pages == ras->ras_stride_pages;
-}
-
-static void ras_update_stride_detector(struct ll_readahead_state *ras,
- unsigned long index)
-{
- unsigned long stride_gap = index - ras->ras_last_readpage - 1;
-
- if ((stride_gap != 0 || ras->ras_consecutive_stride_requests == 0) &&
- !stride_io_mode(ras)) {
- ras->ras_stride_pages = ras->ras_consecutive_pages;
- ras->ras_stride_length = ras->ras_consecutive_pages +
- stride_gap;
- }
- LASSERT(ras->ras_request_index == 0);
- LASSERT(ras->ras_consecutive_stride_requests == 0);
-
- if (index <= ras->ras_last_readpage) {
- /*Reset stride window for forward read*/
- ras_stride_reset(ras);
- return;
- }
-
- ras->ras_stride_pages = ras->ras_consecutive_pages;
- ras->ras_stride_length = stride_gap + ras->ras_consecutive_pages;
-
- RAS_CDEBUG(ras);
-}
-
-/* Stride Read-ahead window will be increased inc_len according to
- * stride I/O pattern
- */
-static void ras_stride_increase_window(struct ll_readahead_state *ras,
- struct ll_ra_info *ra,
- unsigned long inc_len)
-{
- unsigned long left, step, window_len;
- unsigned long stride_len;
-
- LASSERT(ras->ras_stride_length > 0);
- LASSERTF(ras->ras_window_start + ras->ras_window_len >=
- ras->ras_stride_offset,
- "window_start %lu, window_len %lu stride_offset %lu\n",
- ras->ras_window_start,
- ras->ras_window_len, ras->ras_stride_offset);
-
- stride_len = ras->ras_window_start + ras->ras_window_len -
- ras->ras_stride_offset;
-
- left = stride_len % ras->ras_stride_length;
- window_len = ras->ras_window_len - left;
-
- if (left < ras->ras_stride_pages)
- left += inc_len;
- else
- left = ras->ras_stride_pages + inc_len;
-
- LASSERT(ras->ras_stride_pages != 0);
-
- step = left / ras->ras_stride_pages;
- left %= ras->ras_stride_pages;
-
- window_len += step * ras->ras_stride_length + left;
-
- if (stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length,
- ras->ras_stride_pages, ras->ras_stride_offset,
- window_len) <= ra->ra_max_pages_per_file)
- ras->ras_window_len = window_len;
-
- RAS_CDEBUG(ras);
-}
-
-static void ras_increase_window(struct inode *inode,
- struct ll_readahead_state *ras,
- struct ll_ra_info *ra)
-{
- /* The stretch of ra-window should be aligned with max rpc_size
- * but current clio architecture does not support retrieve such
- * information from lower layer. FIXME later
- */
- if (stride_io_mode(ras)) {
- ras_stride_increase_window(ras, ra, ras->ras_rpc_size);
- } else {
- unsigned long wlen;
-
- wlen = min(ras->ras_window_len + ras->ras_rpc_size,
- ra->ra_max_pages_per_file);
- ras->ras_window_len = ras_align(ras, wlen, NULL);
- }
-}
-
-static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
- struct ll_readahead_state *ras, unsigned long index,
- enum ras_update_flags flags)
-{
- struct ll_ra_info *ra = &sbi->ll_ra_info;
- int zero = 0, stride_detect = 0, ra_miss = 0;
- bool hit = flags & LL_RAS_HIT;
-
- spin_lock(&ras->ras_lock);
-
- if (!hit)
- CDEBUG(D_READA, DFID " pages at %lu miss.\n",
- PFID(ll_inode2fid(inode)), index);
-
- ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
-
- /* reset the read-ahead window in two cases. First when the app seeks
- * or reads to some other part of the file. Secondly if we get a
- * read-ahead miss that we think we've previously issued. This can
- * be a symptom of there being so many read-ahead pages that the VM is
- * reclaiming it before we get to it.
- */
- if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
- zero = 1;
- ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
- } else if (!hit && ras->ras_window_len &&
- index < ras->ras_next_readahead &&
- index_in_window(index, ras->ras_window_start, 0,
- ras->ras_window_len)) {
- ra_miss = 1;
- ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
- }
-
- /* On the second access to a file smaller than the tunable
- * ra_max_read_ahead_whole_pages trigger RA on all pages in the
- * file up to ra_max_pages_per_file. This is simply a best effort
- * and only occurs once per open file. Normal RA behavior is reverted
- * to for subsequent IO. The mmap case does not increment
- * ras_requests and thus can never trigger this behavior.
- */
- if (ras->ras_requests >= 2 && !ras->ras_request_index) {
- __u64 kms_pages;
-
- kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
- PAGE_SHIFT;
-
- CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
- ra->ra_max_read_ahead_whole_pages,
- ra->ra_max_pages_per_file);
-
- if (kms_pages &&
- kms_pages <= ra->ra_max_read_ahead_whole_pages) {
- ras->ras_window_start = 0;
- ras->ras_next_readahead = index + 1;
- ras->ras_window_len = min(ra->ra_max_pages_per_file,
- ra->ra_max_read_ahead_whole_pages);
- goto out_unlock;
- }
- }
- if (zero) {
- /* check whether it is in stride I/O mode*/
- if (!index_in_stride_window(ras, index)) {
- if (ras->ras_consecutive_stride_requests == 0 &&
- ras->ras_request_index == 0) {
- ras_update_stride_detector(ras, index);
- ras->ras_consecutive_stride_requests++;
- } else {
- ras_stride_reset(ras);
- }
- ras_reset(inode, ras, index);
- ras->ras_consecutive_pages++;
- goto out_unlock;
- } else {
- ras->ras_consecutive_pages = 0;
- ras->ras_consecutive_requests = 0;
- if (++ras->ras_consecutive_stride_requests > 1)
- stride_detect = 1;
- RAS_CDEBUG(ras);
- }
- } else {
- if (ra_miss) {
- if (index_in_stride_window(ras, index) &&
- stride_io_mode(ras)) {
- if (index != ras->ras_last_readpage + 1)
- ras->ras_consecutive_pages = 0;
- ras_reset(inode, ras, index);
-
- /* If stride-RA hit cache miss, the stride
- * detector will not be reset to avoid the
- * overhead of redetecting read-ahead mode,
- * but on the condition that the stride window
- * is still intersect with normal sequential
- * read-ahead window.
- */
- if (ras->ras_window_start <
- ras->ras_stride_offset)
- ras_stride_reset(ras);
- RAS_CDEBUG(ras);
- } else {
- /* Reset both stride window and normal RA
- * window
- */
- ras_reset(inode, ras, index);
- ras->ras_consecutive_pages++;
- ras_stride_reset(ras);
- goto out_unlock;
- }
- } else if (stride_io_mode(ras)) {
- /* If this is contiguous read but in stride I/O mode
- * currently, check whether stride step still is valid,
- * if invalid, it will reset the stride ra window
- */
- if (!index_in_stride_window(ras, index)) {
- /* Shrink stride read-ahead window to be zero */
- ras_stride_reset(ras);
- ras->ras_window_len = 0;
- ras->ras_next_readahead = index;
- }
- }
- }
- ras->ras_consecutive_pages++;
- ras->ras_last_readpage = index;
- ras_set_start(inode, ras, index);
-
- if (stride_io_mode(ras)) {
- /* Since stride readahead is sensitive to the offset
- * of read-ahead, so we use original offset here,
- * instead of ras_window_start, which is RPC aligned
- */
- ras->ras_next_readahead = max(index, ras->ras_next_readahead);
- ras->ras_window_start = max(ras->ras_stride_offset,
- ras->ras_window_start);
- } else {
- if (ras->ras_next_readahead < ras->ras_window_start)
- ras->ras_next_readahead = ras->ras_window_start;
- if (!hit)
- ras->ras_next_readahead = index + 1;
- }
- RAS_CDEBUG(ras);
-
- /* Trigger RA in the mmap case where ras_consecutive_requests
- * is not incremented and thus can't be used to trigger RA
- */
- if (ras->ras_consecutive_pages >= 4 && flags & LL_RAS_MMAP) {
- ras_increase_window(inode, ras, ra);
- /*
- * reset consecutive pages so that the readahead window can
- * grow gradually.
- */
- ras->ras_consecutive_pages = 0;
- goto out_unlock;
- }
-
- /* Initially reset the stride window offset to next_readahead*/
- if (ras->ras_consecutive_stride_requests == 2 && stride_detect) {
- /**
- * Once stride IO mode is detected, next_readahead should be
- * reset to make sure next_readahead > stride offset
- */
- ras->ras_next_readahead = max(index, ras->ras_next_readahead);
- ras->ras_stride_offset = index;
- ras->ras_window_start = max(index, ras->ras_window_start);
- }
-
- /* The initial ras_window_len is set to the request size. To avoid
- * uselessly reading and discarding pages for random IO the window is
- * only increased once per consecutive request received.
- */
- if ((ras->ras_consecutive_requests > 1 || stride_detect) &&
- !ras->ras_request_index)
- ras_increase_window(inode, ras, ra);
-out_unlock:
- RAS_CDEBUG(ras);
- ras->ras_request_index++;
- spin_unlock(&ras->ras_lock);
-}
-
-int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
-{
- struct inode *inode = vmpage->mapping->host;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lu_env *env;
- struct cl_io *io;
- struct cl_page *page;
- struct cl_object *clob;
- bool redirtied = false;
- bool unlocked = false;
- int result;
- u16 refcheck;
-
- LASSERT(PageLocked(vmpage));
- LASSERT(!PageWriteback(vmpage));
-
- LASSERT(ll_i2dtexp(inode));
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env)) {
- result = PTR_ERR(env);
- goto out;
- }
-
- clob = ll_i2info(inode)->lli_clob;
- LASSERT(clob);
-
- io = vvp_env_thread_io(env);
- io->ci_obj = clob;
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, clob);
- if (result == 0) {
- page = cl_page_find(env, clob, vmpage->index,
- vmpage, CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- lu_ref_add(&page->cp_reference, "writepage",
- current);
- cl_page_assume(env, io, page);
- result = cl_page_flush(env, io, page);
- if (result != 0) {
- /*
- * Re-dirty page on error so it retries write,
- * but not in case when IO has actually
- * occurred and completed with an error.
- */
- if (!PageError(vmpage)) {
- redirty_page_for_writepage(wbc, vmpage);
- result = 0;
- redirtied = true;
- }
- }
- cl_page_disown(env, io, page);
- unlocked = true;
- lu_ref_del(&page->cp_reference,
- "writepage", current);
- cl_page_put(env, page);
- } else {
- result = PTR_ERR(page);
- }
- }
- cl_io_fini(env, io);
-
- if (redirtied && wbc->sync_mode == WB_SYNC_ALL) {
- loff_t offset = cl_offset(clob, vmpage->index);
-
- /* Flush page failed because the extent is being written out.
- * Wait for the write of extent to be finished to avoid
- * breaking kernel which assumes ->writepage should mark
- * PageWriteback or clean the page.
- */
- result = cl_sync_file_range(inode, offset,
- offset + PAGE_SIZE - 1,
- CL_FSYNC_LOCAL, 1);
- if (result > 0) {
- /* actually we may have written more than one page.
- * decreasing this page because the caller will count
- * it.
- */
- wbc->nr_to_write -= result - 1;
- result = 0;
- }
- }
-
- cl_env_put(env, &refcheck);
- goto out;
-
-out:
- if (result < 0) {
- if (!lli->lli_async_rc)
- lli->lli_async_rc = result;
- SetPageError(vmpage);
- if (!unlocked)
- unlock_page(vmpage);
- }
- return result;
-}
-
-int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
-{
- struct inode *inode = mapping->host;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- loff_t start;
- loff_t end;
- enum cl_fsync_mode mode;
- int range_whole = 0;
- int result;
- int ignore_layout = 0;
-
- if (wbc->range_cyclic) {
- start = mapping->writeback_index << PAGE_SHIFT;
- end = OBD_OBJECT_EOF;
- } else {
- start = wbc->range_start;
- end = wbc->range_end;
- if (end == LLONG_MAX) {
- end = OBD_OBJECT_EOF;
- range_whole = start == 0;
- }
- }
-
- mode = CL_FSYNC_NONE;
- if (wbc->sync_mode == WB_SYNC_ALL)
- mode = CL_FSYNC_LOCAL;
-
- if (sbi->ll_umounting)
- /* if the mountpoint is being umounted, all pages have to be
- * evicted to avoid hitting LBUG when truncate_inode_pages()
- * is called later on.
- */
- ignore_layout = 1;
-
- if (!ll_i2info(inode)->lli_clob)
- return 0;
-
- result = cl_sync_file_range(inode, start, end, mode, ignore_layout);
- if (result > 0) {
- wbc->nr_to_write -= result;
- result = 0;
- }
-
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
- if (end == OBD_OBJECT_EOF)
- mapping->writeback_index = 0;
- else
- mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
- }
- return result;
-}
-
-struct ll_cl_context *ll_cl_find(struct file *file)
-{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_cl_context *lcc;
- struct ll_cl_context *found = NULL;
-
- read_lock(&fd->fd_lock);
- list_for_each_entry(lcc, &fd->fd_lccs, lcc_list) {
- if (lcc->lcc_cookie == current) {
- found = lcc;
- break;
- }
- }
- read_unlock(&fd->fd_lock);
-
- return found;
-}
-
-void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io)
-{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
-
- memset(lcc, 0, sizeof(*lcc));
- INIT_LIST_HEAD(&lcc->lcc_list);
- lcc->lcc_cookie = current;
- lcc->lcc_env = env;
- lcc->lcc_io = io;
-
- write_lock(&fd->fd_lock);
- list_add(&lcc->lcc_list, &fd->fd_lccs);
- write_unlock(&fd->fd_lock);
-}
-
-void ll_cl_remove(struct file *file, const struct lu_env *env)
-{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
-
- write_lock(&fd->fd_lock);
- list_del_init(&lcc->lcc_list);
- write_unlock(&fd->fd_lock);
-}
-
-static int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
-{
- struct inode *inode = vvp_object_inode(page->cp_obj);
- struct ll_file_data *fd = vvp_env_io(env)->vui_fd;
- struct ll_readahead_state *ras = &fd->fd_ras;
- struct cl_2queue *queue = &io->ci_queue;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct vvp_page *vpg;
- bool uptodate;
- int rc = 0;
-
- vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
- uptodate = vpg->vpg_defer_uptodate;
-
- if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
- sbi->ll_ra_info.ra_max_pages > 0) {
- struct vvp_io *vio = vvp_env_io(env);
- enum ras_update_flags flags = 0;
-
- if (uptodate)
- flags |= LL_RAS_HIT;
- if (!vio->vui_ra_valid)
- flags |= LL_RAS_MMAP;
- ras_update(sbi, inode, ras, vvp_index(vpg), flags);
- }
-
- cl_2queue_init(queue);
- if (uptodate) {
- vpg->vpg_ra_used = 1;
- cl_page_export(env, page, 1);
- cl_page_disown(env, io, page);
- } else {
- cl_page_list_add(&queue->c2_qin, page);
- }
-
- if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
- sbi->ll_ra_info.ra_max_pages > 0) {
- int rc2;
-
- rc2 = ll_readahead(env, io, &queue->c2_qin, ras,
- uptodate);
- CDEBUG(D_READA, DFID "%d pages read ahead at %lu\n",
- PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
- }
-
- if (queue->c2_qin.pl_nr > 0)
- rc = cl_io_submit_rw(env, io, CRT_READ, queue);
-
- /*
- * Unlock unsent pages in case of error.
- */
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_2queue_fini(env, queue);
-
- return rc;
-}
-
-int ll_readpage(struct file *file, struct page *vmpage)
-{
- struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob;
- struct ll_cl_context *lcc;
- const struct lu_env *env;
- struct cl_io *io;
- struct cl_page *page;
- int result;
-
- lcc = ll_cl_find(file);
- if (!lcc) {
- unlock_page(vmpage);
- return -EIO;
- }
-
- env = lcc->lcc_env;
- io = lcc->lcc_io;
- LASSERT(io->ci_state == CIS_IO_GOING);
- page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- LASSERT(page->cp_type == CPT_CACHEABLE);
- if (likely(!PageUptodate(vmpage))) {
- cl_page_assume(env, io, page);
- result = ll_io_read_page(env, io, page);
- } else {
- /* Page from a non-object file. */
- unlock_page(vmpage);
- result = 0;
- }
- cl_page_put(env, page);
- } else {
- unlock_page(vmpage);
- result = PTR_ERR(page);
- }
- return result;
-}
-
-int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, enum cl_req_type crt)
-{
- struct cl_2queue *queue;
- int result;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- queue = &io->ci_queue;
- cl_2queue_init_page(queue, page);
-
- result = cl_io_submit_sync(env, io, crt, queue, 0);
- LASSERT(cl_page_is_owned(page, io));
-
- if (crt == CRT_READ)
- /*
- * in CRT_WRITE case page is left locked even in case of
- * error.
- */
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_2queue_fini(env, queue);
-
- return result;
-}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
deleted file mode 100644
index 722e5ea1af5f..000000000000
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ /dev/null
@@ -1,641 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lustre/llite/rw26.c
- *
- * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/uaccess.h>
-
-#include <linux/migrate.h>
-#include <linux/fs.h>
-#include <linux/buffer_head.h>
-#include <linux/mpage.h>
-#include <linux/writeback.h>
-#include <linux/pagemap.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "llite_internal.h"
-
-/**
- * Implements Linux VM address_space::invalidatepage() method. This method is
- * called when the page is truncate from a file, either as a result of
- * explicit truncate, or when inode is removed from memory (as a result of
- * final iput(), umount, or memory pressure induced icache shrinking).
- *
- * [0, offset] bytes of the page remain valid (this is for a case of not-page
- * aligned truncate). Lustre leaves partially truncated page in the cache,
- * relying on struct inode::i_size to limit further accesses.
- */
-static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
- unsigned int length)
-{
- struct inode *inode;
- struct lu_env *env;
- struct cl_page *page;
- struct cl_object *obj;
-
- LASSERT(PageLocked(vmpage));
- LASSERT(!PageWriteback(vmpage));
-
- /*
- * It is safe to not check anything in invalidatepage/releasepage
- * below because they are run with page locked and all our io is
- * happening with locked page too
- */
- if (offset == 0 && length == PAGE_SIZE) {
- /* See the comment in ll_releasepage() */
- env = cl_env_percpu_get();
- LASSERT(!IS_ERR(env));
- inode = vmpage->mapping->host;
- obj = ll_i2info(inode)->lli_clob;
- if (obj) {
- page = cl_vmpage_page(vmpage, obj);
- if (page) {
- cl_page_delete(env, page);
- cl_page_put(env, page);
- }
- } else {
- LASSERT(vmpage->private == 0);
- }
- cl_env_percpu_put(env);
- }
-}
-
-static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
-{
- struct lu_env *env;
- struct cl_object *obj;
- struct cl_page *page;
- struct address_space *mapping;
- int result = 0;
-
- LASSERT(PageLocked(vmpage));
- if (PageWriteback(vmpage) || PageDirty(vmpage))
- return 0;
-
- mapping = vmpage->mapping;
- if (!mapping)
- return 1;
-
- obj = ll_i2info(mapping->host)->lli_clob;
- if (!obj)
- return 1;
-
- /* 1 for caller, 1 for cl_page and 1 for page cache */
- if (page_count(vmpage) > 3)
- return 0;
-
- page = cl_vmpage_page(vmpage, obj);
- if (!page)
- return 1;
-
- env = cl_env_percpu_get();
- LASSERT(!IS_ERR(env));
-
- if (!cl_page_in_use(page)) {
- result = 1;
- cl_page_delete(env, page);
- }
-
- /* To use percpu env array, the call path can not be rescheduled;
- * otherwise percpu array will be messed if ll_releaspage() called
- * again on the same CPU.
- *
- * If this page holds the last refc of cl_object, the following
- * call path may cause reschedule:
- * cl_page_put -> cl_page_free -> cl_object_put ->
- * lu_object_put -> lu_object_free -> lov_delete_raid0.
- *
- * However, the kernel can't get rid of this inode until all pages have
- * been cleaned up. Now that we hold page lock here, it's pretty safe
- * that we won't get into object delete path.
- */
- LASSERT(cl_object_refc(obj) > 1);
- cl_page_put(env, page);
-
- cl_env_percpu_put(env);
- return result;
-}
-
-#define MAX_DIRECTIO_SIZE (2 * 1024 * 1024 * 1024UL)
-
-/* ll_free_user_pages - tear down page struct array
- * @pages: array of page struct pointers underlying target buffer
- */
-static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
-{
- int i;
-
- for (i = 0; i < npages; i++) {
- if (do_dirty)
- set_page_dirty_lock(pages[i]);
- put_page(pages[i]);
- }
- kvfree(pages);
-}
-
-ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
- int rw, struct inode *inode,
- struct ll_dio_pages *pv)
-{
- struct cl_page *clp;
- struct cl_2queue *queue;
- struct cl_object *obj = io->ci_obj;
- int i;
- ssize_t rc = 0;
- loff_t file_offset = pv->ldp_start_offset;
- size_t size = pv->ldp_size;
- int page_count = pv->ldp_nr;
- struct page **pages = pv->ldp_pages;
- size_t page_size = cl_page_size(obj);
- bool do_io;
- int io_pages = 0;
-
- queue = &io->ci_queue;
- cl_2queue_init(queue);
- for (i = 0; i < page_count; i++) {
- if (pv->ldp_offsets)
- file_offset = pv->ldp_offsets[i];
-
- LASSERT(!(file_offset & (page_size - 1)));
- clp = cl_page_find(env, obj, cl_index(obj, file_offset),
- pv->ldp_pages[i], CPT_TRANSIENT);
- if (IS_ERR(clp)) {
- rc = PTR_ERR(clp);
- break;
- }
-
- rc = cl_page_own(env, io, clp);
- if (rc) {
- LASSERT(clp->cp_state == CPS_FREEING);
- cl_page_put(env, clp);
- break;
- }
-
- do_io = true;
-
- /* check the page type: if the page is a host page, then do
- * write directly
- */
- if (clp->cp_type == CPT_CACHEABLE) {
- struct page *vmpage = cl_page_vmpage(clp);
- struct page *src_page;
- struct page *dst_page;
- void *src;
- void *dst;
-
- src_page = (rw == WRITE) ? pages[i] : vmpage;
- dst_page = (rw == WRITE) ? vmpage : pages[i];
-
- src = kmap_atomic(src_page);
- dst = kmap_atomic(dst_page);
- memcpy(dst, src, min(page_size, size));
- kunmap_atomic(dst);
- kunmap_atomic(src);
-
- /* make sure page will be added to the transfer by
- * cl_io_submit()->...->vvp_page_prep_write().
- */
- if (rw == WRITE)
- set_page_dirty(vmpage);
-
- if (rw == READ) {
- /* do not issue the page for read, since it
- * may reread a ra page which has NOT uptodate
- * bit set.
- */
- cl_page_disown(env, io, clp);
- do_io = false;
- }
- }
-
- if (likely(do_io)) {
- /*
- * Add a page to the incoming page list of 2-queue.
- */
- cl_page_list_add(&queue->c2_qin, clp);
-
- /*
- * Set page clip to tell transfer formation engine
- * that page has to be sent even if it is beyond KMS.
- */
- cl_page_clip(env, clp, 0, min(size, page_size));
-
- ++io_pages;
- }
-
- /* drop the reference count for cl_page_find */
- cl_page_put(env, clp);
- size -= page_size;
- file_offset += page_size;
- }
-
- if (rc == 0 && io_pages) {
- rc = cl_io_submit_sync(env, io,
- rw == READ ? CRT_READ : CRT_WRITE,
- queue, 0);
- }
- if (rc == 0)
- rc = pv->ldp_size;
-
- cl_2queue_discard(env, io, queue);
- cl_2queue_disown(env, io, queue);
- cl_2queue_fini(env, queue);
- return rc;
-}
-EXPORT_SYMBOL(ll_direct_rw_pages);
-
-static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
- int rw, struct inode *inode,
- struct address_space *mapping,
- size_t size, loff_t file_offset,
- struct page **pages, int page_count)
-{
- struct ll_dio_pages pvec = {
- .ldp_pages = pages,
- .ldp_nr = page_count,
- .ldp_size = size,
- .ldp_offsets = NULL,
- .ldp_start_offset = file_offset
- };
-
- return ll_direct_rw_pages(env, io, rw, inode, &pvec);
-}
-
-/* This is the maximum size of a single O_DIRECT request, based on the
- * kmalloc limit. We need to fit all of the brw_page structs, each one
- * representing PAGE_SIZE worth of user data, into a single buffer, and
- * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is
- * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
- */
-#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
- PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
-static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct ll_cl_context *lcc;
- const struct lu_env *env;
- struct cl_io *io;
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- loff_t file_offset = iocb->ki_pos;
- ssize_t count = iov_iter_count(iter);
- ssize_t tot_bytes = 0, result = 0;
- long size = MAX_DIO_SIZE;
-
- /* Check EOF by ourselves */
- if (iov_iter_rw(iter) == READ && file_offset >= i_size_read(inode))
- return 0;
-
- /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
- if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
- return -EINVAL;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
- PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE,
- file_offset, file_offset, count >> PAGE_SHIFT,
- MAX_DIO_SIZE >> PAGE_SHIFT);
-
- /* Check that all user buffers are aligned as well */
- if (iov_iter_alignment(iter) & ~PAGE_MASK)
- return -EINVAL;
-
- lcc = ll_cl_find(file);
- if (!lcc)
- return -EIO;
-
- env = lcc->lcc_env;
- LASSERT(!IS_ERR(env));
- io = lcc->lcc_io;
- LASSERT(io);
-
- while (iov_iter_count(iter)) {
- struct page **pages;
- size_t offs;
-
- count = min_t(size_t, iov_iter_count(iter), size);
- if (iov_iter_rw(iter) == READ) {
- if (file_offset >= i_size_read(inode))
- break;
- if (file_offset + count > i_size_read(inode))
- count = i_size_read(inode) - file_offset;
- }
-
- result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
- if (likely(result > 0)) {
- int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
-
- result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter),
- inode, file->f_mapping,
- result, file_offset, pages,
- n);
- ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ);
- }
- if (unlikely(result <= 0)) {
- /* If we can't allocate a large enough buffer
- * for the request, shrink it to a smaller
- * PAGE_SIZE multiple and try again.
- * We should always be able to kmalloc for a
- * page worth of page pointers = 4MB on i386.
- */
- if (result == -ENOMEM &&
- size > (PAGE_SIZE / sizeof(*pages)) *
- PAGE_SIZE) {
- size = ((((size / 2) - 1) |
- ~PAGE_MASK) + 1) &
- PAGE_MASK;
- CDEBUG(D_VFSTRACE, "DIO size now %lu\n",
- size);
- continue;
- }
-
- goto out;
- }
- iov_iter_advance(iter, result);
- tot_bytes += result;
- file_offset += result;
- }
-out:
- if (tot_bytes > 0) {
- struct vvp_io *vio = vvp_env_io(env);
-
- /* no commit async for direct IO */
- vio->u.write.vui_written += tot_bytes;
- }
-
- return tot_bytes ? tot_bytes : result;
-}
-
-/**
- * Prepare partially written-to page for a write.
- */
-static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg)
-{
- struct cl_attr *attr = vvp_env_thread_attr(env);
- struct cl_object *obj = io->ci_obj;
- struct vvp_page *vpg = cl_object_page_slice(obj, pg);
- loff_t offset = cl_offset(obj, vvp_index(vpg));
- int result;
-
- cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
- if (result == 0) {
- /*
- * If are writing to a new page, no need to read old data.
- * The extent locking will have updated the KMS, and for our
- * purposes here we can treat it like i_size.
- */
- if (attr->cat_kms <= offset) {
- char *kaddr = kmap_atomic(vpg->vpg_page);
-
- memset(kaddr, 0, cl_page_size(obj));
- kunmap_atomic(kaddr);
- } else if (vpg->vpg_defer_uptodate) {
- vpg->vpg_ra_used = 1;
- } else {
- result = ll_page_sync_io(env, io, pg, CRT_READ);
- }
- }
- return result;
-}
-
-static int ll_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int flags,
- struct page **pagep, void **fsdata)
-{
- struct ll_cl_context *lcc;
- const struct lu_env *env = NULL;
- struct cl_io *io;
- struct cl_page *page = NULL;
- struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
- pgoff_t index = pos >> PAGE_SHIFT;
- struct page *vmpage = NULL;
- unsigned int from = pos & (PAGE_SIZE - 1);
- unsigned int to = from + len;
- int result = 0;
-
- CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
-
- lcc = ll_cl_find(file);
- if (!lcc) {
- io = NULL;
- result = -EIO;
- goto out;
- }
-
- env = lcc->lcc_env;
- io = lcc->lcc_io;
-
- /* To avoid deadlock, try to lock page first. */
- vmpage = grab_cache_page_nowait(mapping, index);
- if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
- struct vvp_io *vio = vvp_env_io(env);
- struct cl_page_list *plist = &vio->u.write.vui_queue;
-
- /* if the page is already in dirty cache, we have to commit
- * the pages right now; otherwise, it may cause deadlock
- * because it holds page lock of a dirty page and request for
- * more grants. It's okay for the dirty page to be the first
- * one in commit page list, though.
- */
- if (vmpage && plist->pl_nr > 0) {
- unlock_page(vmpage);
- put_page(vmpage);
- vmpage = NULL;
- }
-
- /* commit pages and then wait for page lock */
- result = vvp_io_write_commit(env, io);
- if (result < 0)
- goto out;
-
- if (!vmpage) {
- vmpage = grab_cache_page_write_begin(mapping, index,
- flags);
- if (!vmpage) {
- result = -ENOMEM;
- goto out;
- }
- }
- }
-
- page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
- if (IS_ERR(page)) {
- result = PTR_ERR(page);
- goto out;
- }
-
- lcc->lcc_page = page;
- lu_ref_add(&page->cp_reference, "cl_io", io);
-
- cl_page_assume(env, io, page);
- if (!PageUptodate(vmpage)) {
- /*
- * We're completely overwriting an existing page,
- * so _don't_ set it up to date until commit_write
- */
- if (from == 0 && to == PAGE_SIZE) {
- CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n");
- POISON_PAGE(vmpage, 0x11);
- } else {
- /* TODO: can be optimized at OSC layer to check if it
- * is a lockless IO. In that case, it's not necessary
- * to read the data.
- */
- result = ll_prepare_partial_page(env, io, page);
- if (result == 0)
- SetPageUptodate(vmpage);
- }
- }
- if (result < 0)
- cl_page_unassume(env, io, page);
-out:
- if (result < 0) {
- if (vmpage) {
- unlock_page(vmpage);
- put_page(vmpage);
- }
- if (!IS_ERR_OR_NULL(page)) {
- lu_ref_del(&page->cp_reference, "cl_io", io);
- cl_page_put(env, page);
- }
- if (io)
- io->ci_result = result;
- } else {
- *pagep = vmpage;
- *fsdata = lcc;
- }
- return result;
-}
-
-static int ll_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int copied,
- struct page *vmpage, void *fsdata)
-{
- struct ll_cl_context *lcc = fsdata;
- const struct lu_env *env;
- struct cl_io *io;
- struct vvp_io *vio;
- struct cl_page *page;
- unsigned int from = pos & (PAGE_SIZE - 1);
- bool unplug = false;
- int result = 0;
-
- put_page(vmpage);
-
- env = lcc->lcc_env;
- page = lcc->lcc_page;
- io = lcc->lcc_io;
- vio = vvp_env_io(env);
-
- LASSERT(cl_page_is_owned(page, io));
- if (copied > 0) {
- struct cl_page_list *plist = &vio->u.write.vui_queue;
-
- lcc->lcc_page = NULL; /* page will be queued */
-
- /* Add it into write queue */
- cl_page_list_add(plist, page);
- if (plist->pl_nr == 1) /* first page */
- vio->u.write.vui_from = from;
- else
- LASSERT(from == 0);
- vio->u.write.vui_to = from + copied;
-
- /*
- * To address the deadlock in balance_dirty_pages() where
- * this dirty page may be written back in the same thread.
- */
- if (PageDirty(vmpage))
- unplug = true;
-
- /* We may have one full RPC, commit it soon */
- if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
- unplug = true;
-
- CL_PAGE_DEBUG(D_VFSTRACE, env, page,
- "queued page: %d.\n", plist->pl_nr);
- } else {
- cl_page_disown(env, io, page);
-
- lcc->lcc_page = NULL;
- lu_ref_del(&page->cp_reference, "cl_io", io);
- cl_page_put(env, page);
-
- /* page list is not contiguous now, commit it now */
- unplug = true;
- }
-
- if (unplug ||
- file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
- result = vvp_io_write_commit(env, io);
-
- if (result < 0)
- io->ci_result = result;
- return result >= 0 ? copied : result;
-}
-
-#ifdef CONFIG_MIGRATION
-static int ll_migratepage(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode
- )
-{
- /* Always fail page migration until we have a proper implementation */
- return -EIO;
-}
-#endif
-
-const struct address_space_operations ll_aops = {
- .readpage = ll_readpage,
- .direct_IO = ll_direct_IO_26,
- .writepage = ll_writepage,
- .writepages = ll_writepages,
- .set_page_dirty = __set_page_dirty_nobuffers,
- .write_begin = ll_write_begin,
- .write_end = ll_write_end,
- .invalidatepage = ll_invalidatepage,
- .releasepage = (void *)ll_releasepage,
-#ifdef CONFIG_MIGRATION
- .migratepage = ll_migratepage,
-#endif
-};
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
deleted file mode 100644
index 155ce3cf6f60..000000000000
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ /dev/null
@@ -1,1577 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <obd_support.h>
-#include <lustre_dlm.h>
-#include "llite_internal.h"
-
-#define SA_OMITTED_ENTRY_MAX 8ULL
-
-enum se_stat {
- /** negative values are for error cases */
- SA_ENTRY_INIT = 0, /** init entry */
- SA_ENTRY_SUCC = 1, /** stat succeed */
- SA_ENTRY_INVA = 2, /** invalid entry */
-};
-
-/*
- * sa_entry is not refcounted: statahead thread allocates it and do async stat,
- * and in async stat callback ll_statahead_interpret() will add it into
- * sai_interim_entries, later statahead thread will call sa_handle_callback() to
- * instantiate entry and move it into sai_entries, and then only scanner process
- * can access and free it.
- */
-struct sa_entry {
- /* link into sai_interim_entries or sai_entries */
- struct list_head se_list;
- /* link into sai hash table locally */
- struct list_head se_hash;
- /* entry index in the sai */
- __u64 se_index;
- /* low layer ldlm lock handle */
- __u64 se_handle;
- /* entry status */
- enum se_stat se_state;
- /* entry size, contains name */
- int se_size;
- /* pointer to async getattr enqueue info */
- struct md_enqueue_info *se_minfo;
- /* pointer to the async getattr request */
- struct ptlrpc_request *se_req;
- /* pointer to the target inode */
- struct inode *se_inode;
- /* entry name */
- struct qstr se_qstr;
- /* entry fid */
- struct lu_fid se_fid;
-};
-
-static unsigned int sai_generation;
-static DEFINE_SPINLOCK(sai_generation_lock);
-
-/* sa_entry is ready to use */
-static inline int sa_ready(struct sa_entry *entry)
-{
- smp_rmb();
- return (entry->se_state != SA_ENTRY_INIT);
-}
-
-/* hash value to put in sai_cache */
-static inline int sa_hash(int val)
-{
- return val & LL_SA_CACHE_MASK;
-}
-
-/* hash entry into sai_cache */
-static inline void
-sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry)
-{
- int i = sa_hash(entry->se_qstr.hash);
-
- spin_lock(&sai->sai_cache_lock[i]);
- list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
- spin_unlock(&sai->sai_cache_lock[i]);
-}
-
-/*
- * Remove entry from SA table.
- */
-static inline void
-sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry)
-{
- int i = sa_hash(entry->se_qstr.hash);
-
- spin_lock(&sai->sai_cache_lock[i]);
- list_del_init(&entry->se_hash);
- spin_unlock(&sai->sai_cache_lock[i]);
-}
-
-static inline int agl_should_run(struct ll_statahead_info *sai,
- struct inode *inode)
-{
- return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
-}
-
-/* statahead window is full */
-static inline int sa_sent_full(struct ll_statahead_info *sai)
-{
- return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
-}
-
-/* got async stat replies */
-static inline int sa_has_callback(struct ll_statahead_info *sai)
-{
- return !list_empty(&sai->sai_interim_entries);
-}
-
-static inline int agl_list_empty(struct ll_statahead_info *sai)
-{
- return list_empty(&sai->sai_agls);
-}
-
-/**
- * (1) hit ratio less than 80%
- * or
- * (2) consecutive miss more than 8
- * then means low hit.
- */
-static inline int sa_low_hit(struct ll_statahead_info *sai)
-{
- return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
- (sai->sai_consecutive_miss > 8));
-}
-
-/*
- * if the given index is behind of statahead window more than
- * SA_OMITTED_ENTRY_MAX, then it is old.
- */
-static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
-{
- return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
- sai->sai_index);
-}
-
-/* allocate sa_entry and hash it to allow scanner process to find it */
-static struct sa_entry *
-sa_alloc(struct dentry *parent, struct ll_statahead_info *sai, __u64 index,
- const char *name, int len, const struct lu_fid *fid)
-{
- struct ll_inode_info *lli;
- struct sa_entry *entry;
- int entry_size;
- char *dname;
-
- entry_size = sizeof(struct sa_entry) + (len & ~3) + 4;
- entry = kzalloc(entry_size, GFP_NOFS);
- if (unlikely(!entry))
- return ERR_PTR(-ENOMEM);
-
- CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
- len, name, entry, index);
-
- entry->se_index = index;
- entry->se_state = SA_ENTRY_INIT;
- entry->se_size = entry_size;
- dname = (char *)entry + sizeof(struct sa_entry);
- memcpy(dname, name, len);
- dname[len] = 0;
-
- entry->se_qstr.hash = full_name_hash(parent, name, len);
- entry->se_qstr.len = len;
- entry->se_qstr.name = dname;
- entry->se_fid = *fid;
-
- lli = ll_i2info(sai->sai_dentry->d_inode);
- spin_lock(&lli->lli_sa_lock);
- INIT_LIST_HEAD(&entry->se_list);
- sa_rehash(sai, entry);
- spin_unlock(&lli->lli_sa_lock);
-
- atomic_inc(&sai->sai_cache_count);
-
- return entry;
-}
-
-/* free sa_entry, which should have been unhashed and not in any list */
-static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry)
-{
- CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
- entry->se_qstr.len, entry->se_qstr.name, entry,
- entry->se_index);
-
- LASSERT(list_empty(&entry->se_list));
- LASSERT(list_empty(&entry->se_hash));
-
- kfree(entry);
- atomic_dec(&sai->sai_cache_count);
-}
-
-/*
- * find sa_entry by name, used by directory scanner, lock is not needed because
- * only scanner can remove the entry from cache.
- */
-static struct sa_entry *
-sa_get(struct ll_statahead_info *sai, const struct qstr *qstr)
-{
- struct sa_entry *entry;
- int i = sa_hash(qstr->hash);
-
- list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
- if (entry->se_qstr.hash == qstr->hash &&
- entry->se_qstr.len == qstr->len &&
- memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
- return entry;
- }
- return NULL;
-}
-
-/* unhash and unlink sa_entry, and then free it */
-static inline void
-sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry)
-{
- struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
-
- LASSERT(!list_empty(&entry->se_hash));
- LASSERT(!list_empty(&entry->se_list));
- LASSERT(sa_ready(entry));
-
- sa_unhash(sai, entry);
-
- spin_lock(&lli->lli_sa_lock);
- list_del_init(&entry->se_list);
- spin_unlock(&lli->lli_sa_lock);
-
- if (entry->se_inode)
- iput(entry->se_inode);
-
- sa_free(sai, entry);
-}
-
-/* called by scanner after use, sa_entry will be killed */
-static void
-sa_put(struct ll_statahead_info *sai, struct sa_entry *entry, struct ll_inode_info *lli)
-{
- struct sa_entry *tmp, *next;
-
- if (entry && entry->se_state == SA_ENTRY_SUCC) {
- struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
-
- sai->sai_hit++;
- sai->sai_consecutive_miss = 0;
- sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
- } else {
- sai->sai_miss++;
- sai->sai_consecutive_miss++;
- }
-
- if (entry)
- sa_kill(sai, entry);
-
- /*
- * kill old completed entries, only scanner process does this, no need
- * to lock
- */
- list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) {
- if (!is_omitted_entry(sai, tmp->se_index))
- break;
- sa_kill(sai, tmp);
- }
-
- spin_lock(&lli->lli_sa_lock);
- if (sai->sai_task)
- wake_up_process(sai->sai_task);
- spin_unlock(&lli->lli_sa_lock);
-
-}
-
-/*
- * update state and sort add entry to sai_entries by index, return true if
- * scanner is waiting on this entry.
- */
-static bool
-__sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
-{
- struct list_head *pos = &sai->sai_entries;
- __u64 index = entry->se_index;
- struct sa_entry *se;
-
- LASSERT(!sa_ready(entry));
- LASSERT(list_empty(&entry->se_list));
-
- list_for_each_entry_reverse(se, &sai->sai_entries, se_list) {
- if (se->se_index < entry->se_index) {
- pos = &se->se_list;
- break;
- }
- }
- list_add(&entry->se_list, pos);
- entry->se_state = ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC;
-
- return (index == sai->sai_index_wait);
-}
-
-/*
- * release resources used in async stat RPC, update entry state and wakeup if
- * scanner process it waiting on this entry.
- */
-static void
-sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
-{
- struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
- struct md_enqueue_info *minfo = entry->se_minfo;
- struct ptlrpc_request *req = entry->se_req;
- bool wakeup;
-
- /* release resources used in RPC */
- if (minfo) {
- entry->se_minfo = NULL;
- ll_intent_release(&minfo->mi_it);
- iput(minfo->mi_dir);
- kfree(minfo);
- }
-
- if (req) {
- entry->se_req = NULL;
- ptlrpc_req_finished(req);
- }
-
- spin_lock(&lli->lli_sa_lock);
- wakeup = __sa_make_ready(sai, entry, ret);
- spin_unlock(&lli->lli_sa_lock);
-
- if (wakeup)
- wake_up(&sai->sai_waitq);
-}
-
-/* Insert inode into the list of sai_agls. */
-static void ll_agl_add(struct ll_statahead_info *sai,
- struct inode *inode, int index)
-{
- struct ll_inode_info *child = ll_i2info(inode);
- struct ll_inode_info *parent = ll_i2info(sai->sai_dentry->d_inode);
- int added = 0;
-
- spin_lock(&child->lli_agl_lock);
- if (child->lli_agl_index == 0) {
- child->lli_agl_index = index;
- spin_unlock(&child->lli_agl_lock);
-
- LASSERT(list_empty(&child->lli_agl_list));
-
- igrab(inode);
- spin_lock(&parent->lli_agl_lock);
- if (list_empty(&sai->sai_agls))
- added = 1;
- list_add_tail(&child->lli_agl_list, &sai->sai_agls);
- spin_unlock(&parent->lli_agl_lock);
- } else {
- spin_unlock(&child->lli_agl_lock);
- }
-
- if (added > 0)
- wake_up_process(sai->sai_agl_task);
-}
-
-/* allocate sai */
-static struct ll_statahead_info *ll_sai_alloc(struct dentry *dentry)
-{
- struct ll_inode_info *lli = ll_i2info(dentry->d_inode);
- struct ll_statahead_info *sai;
- int i;
-
- sai = kzalloc(sizeof(*sai), GFP_NOFS);
- if (!sai)
- return NULL;
-
- sai->sai_dentry = dget(dentry);
- atomic_set(&sai->sai_refcount, 1);
-
- sai->sai_max = LL_SA_RPC_MIN;
- sai->sai_index = 1;
- init_waitqueue_head(&sai->sai_waitq);
-
- INIT_LIST_HEAD(&sai->sai_interim_entries);
- INIT_LIST_HEAD(&sai->sai_entries);
- INIT_LIST_HEAD(&sai->sai_agls);
-
- for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
- INIT_LIST_HEAD(&sai->sai_cache[i]);
- spin_lock_init(&sai->sai_cache_lock[i]);
- }
- atomic_set(&sai->sai_cache_count, 0);
-
- spin_lock(&sai_generation_lock);
- lli->lli_sa_generation = ++sai_generation;
- if (unlikely(!sai_generation))
- lli->lli_sa_generation = ++sai_generation;
- spin_unlock(&sai_generation_lock);
-
- return sai;
-}
-
-/* free sai */
-static inline void ll_sai_free(struct ll_statahead_info *sai)
-{
- LASSERT(sai->sai_dentry);
- dput(sai->sai_dentry);
- kfree(sai);
-}
-
-/*
- * take refcount of sai if sai for @dir exists, which means statahead is on for
- * this directory.
- */
-static inline struct ll_statahead_info *ll_sai_get(struct inode *dir)
-{
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = NULL;
-
- spin_lock(&lli->lli_sa_lock);
- sai = lli->lli_sai;
- if (sai)
- atomic_inc(&sai->sai_refcount);
- spin_unlock(&lli->lli_sa_lock);
-
- return sai;
-}
-
-/*
- * put sai refcount after use, if refcount reaches zero, free sai and sa_entries
- * attached to it.
- */
-static void ll_sai_put(struct ll_statahead_info *sai)
-{
- struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
-
- if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
- struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
- struct sa_entry *entry, *next;
-
- lli->lli_sai = NULL;
- spin_unlock(&lli->lli_sa_lock);
-
- LASSERT(sai->sai_task == NULL);
- LASSERT(sai->sai_agl_task == NULL);
- LASSERT(sai->sai_sent == sai->sai_replied);
- LASSERT(!sa_has_callback(sai));
-
- list_for_each_entry_safe(entry, next, &sai->sai_entries,
- se_list)
- sa_kill(sai, entry);
-
- LASSERT(atomic_read(&sai->sai_cache_count) == 0);
- LASSERT(list_empty(&sai->sai_agls));
-
- ll_sai_free(sai);
- atomic_dec(&sbi->ll_sa_running);
- }
-}
-
-/* Do NOT forget to drop inode refcount when into sai_agls. */
-static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- __u64 index = lli->lli_agl_index;
- int rc;
-
- LASSERT(list_empty(&lli->lli_agl_list));
-
- /* AGL maybe fall behind statahead with one entry */
- if (is_omitted_entry(sai, index + 1)) {
- lli->lli_agl_index = 0;
- iput(inode);
- return;
- }
-
- /* Someone is in glimpse (sync or async), do nothing. */
- rc = down_write_trylock(&lli->lli_glimpse_sem);
- if (rc == 0) {
- lli->lli_agl_index = 0;
- iput(inode);
- return;
- }
-
- /*
- * Someone triggered glimpse within 1 sec before.
- * 1) The former glimpse succeeded with glimpse lock granted by OST, and
- * if the lock is still cached on client, AGL needs to do nothing. If
- * it is cancelled by other client, AGL maybe cannot obtain new lock
- * for no glimpse callback triggered by AGL.
- * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
- * Under such case, it is quite possible that the OST will not grant
- * glimpse lock for AGL also.
- * 3) The former glimpse failed, compared with other two cases, it is
- * relative rare. AGL can ignore such case, and it will not muchly
- * affect the performance.
- */
- if (lli->lli_glimpse_time != 0 &&
- time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
- up_write(&lli->lli_glimpse_sem);
- lli->lli_agl_index = 0;
- iput(inode);
- return;
- }
-
- CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
- DFID ", idx = %llu\n", PFID(&lli->lli_fid), index);
-
- cl_agl(inode);
- lli->lli_agl_index = 0;
- lli->lli_glimpse_time = cfs_time_current();
- up_write(&lli->lli_glimpse_sem);
-
- CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
- DFID ", idx = %llu, rc = %d\n",
- PFID(&lli->lli_fid), index, rc);
-
- iput(inode);
-}
-
-/*
- * prepare inode for sa entry, add it into agl list, now sa_entry is ready
- * to be used by scanner process.
- */
-static void sa_instantiate(struct ll_statahead_info *sai,
- struct sa_entry *entry)
-{
- struct inode *dir = sai->sai_dentry->d_inode;
- struct inode *child;
- struct md_enqueue_info *minfo;
- struct lookup_intent *it;
- struct ptlrpc_request *req;
- struct mdt_body *body;
- int rc = 0;
-
- LASSERT(entry->se_handle != 0);
-
- minfo = entry->se_minfo;
- it = &minfo->mi_it;
- req = entry->se_req;
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (!body) {
- rc = -EFAULT;
- goto out;
- }
-
- child = entry->se_inode;
- if (child) {
- /* revalidate; unlinked and re-created with the same name */
- if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->mbo_fid1))) {
- entry->se_inode = NULL;
- iput(child);
- child = NULL;
- }
- }
-
- it->it_lock_handle = entry->se_handle;
- rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
- if (rc != 1) {
- rc = -EAGAIN;
- goto out;
- }
-
- rc = ll_prep_inode(&child, req, dir->i_sb, it);
- if (rc)
- goto out;
-
- CDEBUG(D_READA, "%s: setting %.*s" DFID " l_data to inode %p\n",
- ll_get_fsname(child->i_sb, NULL, 0),
- entry->se_qstr.len, entry->se_qstr.name,
- PFID(ll_inode2fid(child)), child);
- ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
-
- entry->se_inode = child;
-
- if (agl_should_run(sai, child))
- ll_agl_add(sai, child, entry->se_index);
-
-out:
- /*
- * sa_make_ready() will drop ldlm ibits lock refcount by calling
- * ll_intent_drop_lock() in spite of failures. Do not worry about
- * calling ll_intent_drop_lock() more than once.
- */
- sa_make_ready(sai, entry, rc);
-}
-
-/* once there are async stat replies, instantiate sa_entry from replies */
-static void sa_handle_callback(struct ll_statahead_info *sai)
-{
- struct ll_inode_info *lli;
-
- lli = ll_i2info(sai->sai_dentry->d_inode);
-
- while (sa_has_callback(sai)) {
- struct sa_entry *entry;
-
- spin_lock(&lli->lli_sa_lock);
- if (unlikely(!sa_has_callback(sai))) {
- spin_unlock(&lli->lli_sa_lock);
- break;
- }
- entry = list_entry(sai->sai_interim_entries.next,
- struct sa_entry, se_list);
- list_del_init(&entry->se_list);
- spin_unlock(&lli->lli_sa_lock);
-
- sa_instantiate(sai, entry);
- }
-}
-
-/*
- * callback for async stat, because this is called in ptlrpcd context, we only
- * put sa_entry in sai_cb_entries list, and let sa_handle_callback() to really
- * prepare inode and instantiate sa_entry later.
- */
-static int ll_statahead_interpret(struct ptlrpc_request *req,
- struct md_enqueue_info *minfo, int rc)
-{
- struct lookup_intent *it = &minfo->mi_it;
- struct inode *dir = minfo->mi_dir;
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = lli->lli_sai;
- struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
- __u64 handle = 0;
-
- if (it_disposition(it, DISP_LOOKUP_NEG))
- rc = -ENOENT;
-
- /*
- * because statahead thread will wait for all inflight RPC to finish,
- * sai should be always valid, no need to refcount
- */
- LASSERT(sai);
- LASSERT(entry);
-
- CDEBUG(D_READA, "sa_entry %.*s rc %d\n",
- entry->se_qstr.len, entry->se_qstr.name, rc);
-
- if (rc) {
- ll_intent_release(it);
- iput(dir);
- kfree(minfo);
- } else {
- /*
- * release ibits lock ASAP to avoid deadlock when statahead
- * thread enqueues lock on parent in readdir and another
- * process enqueues lock on child with parent lock held, eg.
- * unlink.
- */
- handle = it->it_lock_handle;
- ll_intent_drop_lock(it);
- }
-
- spin_lock(&lli->lli_sa_lock);
- if (rc) {
- if (__sa_make_ready(sai, entry, rc))
- wake_up(&sai->sai_waitq);
- } else {
- int first = 0;
- entry->se_minfo = minfo;
- entry->se_req = ptlrpc_request_addref(req);
- /*
- * Release the async ibits lock ASAP to avoid deadlock
- * when statahead thread tries to enqueue lock on parent
- * for readpage and other tries to enqueue lock on child
- * with parent's lock held, for example: unlink.
- */
- entry->se_handle = handle;
- if (!sa_has_callback(sai))
- first = 1;
-
- list_add_tail(&entry->se_list, &sai->sai_interim_entries);
-
- if (first && sai->sai_task)
- wake_up_process(sai->sai_task);
- }
- sai->sai_replied++;
-
- spin_unlock(&lli->lli_sa_lock);
-
- return rc;
-}
-
-/* finish async stat RPC arguments */
-static void sa_fini_data(struct md_enqueue_info *minfo)
-{
- iput(minfo->mi_dir);
- kfree(minfo);
-}
-
-/**
- * prepare arguments for async stat RPC.
- */
-static struct md_enqueue_info *
-sa_prep_data(struct inode *dir, struct inode *child, struct sa_entry *entry)
-{
- struct md_enqueue_info *minfo;
- struct ldlm_enqueue_info *einfo;
- struct md_op_data *op_data;
-
- minfo = kzalloc(sizeof(*minfo), GFP_NOFS);
- if (!minfo)
- return ERR_PTR(-ENOMEM);
-
- op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data)) {
- kfree(minfo);
- return (struct md_enqueue_info *)op_data;
- }
-
- if (!child)
- op_data->op_fid2 = entry->se_fid;
-
- minfo->mi_it.it_op = IT_GETATTR;
- minfo->mi_dir = igrab(dir);
- minfo->mi_cb = ll_statahead_interpret;
- minfo->mi_cbdata = entry;
-
- einfo = &minfo->mi_einfo;
- einfo->ei_type = LDLM_IBITS;
- einfo->ei_mode = it_to_lock_mode(&minfo->mi_it);
- einfo->ei_cb_bl = ll_md_blocking_ast;
- einfo->ei_cb_cp = ldlm_completion_ast;
- einfo->ei_cb_gl = NULL;
- einfo->ei_cbdata = NULL;
-
- return minfo;
-}
-
-/* async stat for file not found in dcache */
-static int sa_lookup(struct inode *dir, struct sa_entry *entry)
-{
- struct md_enqueue_info *minfo;
- int rc;
-
- minfo = sa_prep_data(dir, NULL, entry);
- if (IS_ERR(minfo))
- return PTR_ERR(minfo);
-
- rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
- if (rc)
- sa_fini_data(minfo);
-
- return rc;
-}
-
-/**
- * async stat for file found in dcache, similar to .revalidate
- *
- * \retval 1 dentry valid, no RPC sent
- * \retval 0 dentry invalid, will send async stat RPC
- * \retval negative number upon error
- */
-static int sa_revalidate(struct inode *dir, struct sa_entry *entry,
- struct dentry *dentry)
-{
- struct inode *inode = d_inode(dentry);
- struct lookup_intent it = { .it_op = IT_GETATTR,
- .it_lock_handle = 0 };
- struct md_enqueue_info *minfo;
- int rc;
-
- if (unlikely(!inode))
- return 1;
-
- if (d_mountpoint(dentry))
- return 1;
-
- entry->se_inode = igrab(inode);
- rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
- NULL);
- if (rc == 1) {
- entry->se_handle = it.it_lock_handle;
- ll_intent_release(&it);
- return 1;
- }
-
- minfo = sa_prep_data(dir, inode, entry);
- if (IS_ERR(minfo)) {
- entry->se_inode = NULL;
- iput(inode);
- return PTR_ERR(minfo);
- }
-
- rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
- if (rc) {
- entry->se_inode = NULL;
- iput(inode);
- sa_fini_data(minfo);
- }
-
- return rc;
-}
-
-/* async stat for file with @name */
-static void sa_statahead(struct dentry *parent, const char *name, int len,
- const struct lu_fid *fid)
-{
- struct inode *dir = d_inode(parent);
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = lli->lli_sai;
- struct dentry *dentry = NULL;
- struct sa_entry *entry;
- int rc;
-
- entry = sa_alloc(parent, sai, sai->sai_index, name, len, fid);
- if (IS_ERR(entry))
- return;
-
- dentry = d_lookup(parent, &entry->se_qstr);
- if (!dentry) {
- rc = sa_lookup(dir, entry);
- } else {
- rc = sa_revalidate(dir, entry, dentry);
- if (rc == 1 && agl_should_run(sai, d_inode(dentry)))
- ll_agl_add(sai, d_inode(dentry), entry->se_index);
- }
-
- if (dentry)
- dput(dentry);
-
- if (rc)
- sa_make_ready(sai, entry, rc);
- else
- sai->sai_sent++;
-
- sai->sai_index++;
-}
-
-/* async glimpse (agl) thread main function */
-static int ll_agl_thread(void *arg)
-{
- struct dentry *parent = arg;
- struct inode *dir = d_inode(parent);
- struct ll_inode_info *plli = ll_i2info(dir);
- struct ll_inode_info *clli;
- /* We already own this reference, so it is safe to take it without a lock. */
- struct ll_statahead_info *sai = plli->lli_sai;
-
- CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
- sai, parent);
-
- while (!kthread_should_stop()) {
-
- spin_lock(&plli->lli_agl_lock);
- /* The statahead thread maybe help to process AGL entries,
- * so check whether list empty again.
- */
- if (!list_empty(&sai->sai_agls)) {
- clli = list_entry(sai->sai_agls.next,
- struct ll_inode_info, lli_agl_list);
- list_del_init(&clli->lli_agl_list);
- spin_unlock(&plli->lli_agl_lock);
- ll_agl_trigger(&clli->lli_vfs_inode, sai);
- } else {
- spin_unlock(&plli->lli_agl_lock);
- }
-
- set_current_state(TASK_IDLE);
- if (list_empty(&sai->sai_agls) &&
- !kthread_should_stop())
- schedule();
- __set_current_state(TASK_RUNNING);
- }
-
- spin_lock(&plli->lli_agl_lock);
- sai->sai_agl_valid = 0;
- while (!list_empty(&sai->sai_agls)) {
- clli = list_entry(sai->sai_agls.next,
- struct ll_inode_info, lli_agl_list);
- list_del_init(&clli->lli_agl_list);
- spin_unlock(&plli->lli_agl_lock);
- clli->lli_agl_index = 0;
- iput(&clli->lli_vfs_inode);
- spin_lock(&plli->lli_agl_lock);
- }
- spin_unlock(&plli->lli_agl_lock);
- CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
- sai, parent);
- ll_sai_put(sai);
- return 0;
-}
-
-/* start agl thread */
-static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
-{
- struct ll_inode_info *plli;
- struct task_struct *task;
-
- CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
- sai, parent);
-
- plli = ll_i2info(d_inode(parent));
- task = kthread_create(ll_agl_thread, parent, "ll_agl_%u",
- plli->lli_opendir_pid);
- if (IS_ERR(task)) {
- CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
- return;
- }
-
- sai->sai_agl_task = task;
- atomic_inc(&ll_i2sbi(d_inode(parent))->ll_agl_total);
- spin_lock(&plli->lli_agl_lock);
- sai->sai_agl_valid = 1;
- spin_unlock(&plli->lli_agl_lock);
- /* Get an extra reference that the thread holds */
- ll_sai_get(d_inode(parent));
-
- wake_up_process(task);
-}
-
-/* statahead thread main function */
-static int ll_statahead_thread(void *arg)
-{
- struct dentry *parent = arg;
- struct inode *dir = d_inode(parent);
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- struct ll_statahead_info *sai = lli->lli_sai;
- struct page *page = NULL;
- __u64 pos = 0;
- int first = 0;
- int rc = 0;
- struct md_op_data *op_data;
-
- CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
- sai, parent);
-
- op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
- LUSTRE_OPC_ANY, dir);
- if (IS_ERR(op_data)) {
- rc = PTR_ERR(op_data);
- goto out;
- }
-
- op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
-
- while (pos != MDS_DIR_END_OFF && sai->sai_task) {
- struct lu_dirpage *dp;
- struct lu_dirent *ent;
-
- sai->sai_in_readpage = 1;
- page = ll_get_dir_page(dir, op_data, pos);
- sai->sai_in_readpage = 0;
- if (IS_ERR(page)) {
- rc = PTR_ERR(page);
- CDEBUG(D_READA, "error reading dir " DFID " at %llu/%llu: opendir_pid = %u: rc = %d\n",
- PFID(ll_inode2fid(dir)), pos, sai->sai_index,
- lli->lli_opendir_pid, rc);
- break;
- }
-
- dp = page_address(page);
- for (ent = lu_dirent_start(dp);
- ent && sai->sai_task && !sa_low_hit(sai);
- ent = lu_dirent_next(ent)) {
- struct lu_fid fid;
- __u64 hash;
- int namelen;
- char *name;
-
- hash = le64_to_cpu(ent->lde_hash);
- if (unlikely(hash < pos))
- /*
- * Skip until we find target hash value.
- */
- continue;
-
- namelen = le16_to_cpu(ent->lde_namelen);
- if (unlikely(namelen == 0))
- /*
- * Skip dummy record.
- */
- continue;
-
- name = ent->lde_name;
- if (name[0] == '.') {
- if (namelen == 1) {
- /*
- * skip "."
- */
- continue;
- } else if (name[1] == '.' && namelen == 2) {
- /*
- * skip ".."
- */
- continue;
- } else if (!sai->sai_ls_all) {
- /*
- * skip hidden files.
- */
- sai->sai_skip_hidden++;
- continue;
- }
- }
-
- /*
- * don't stat-ahead first entry.
- */
- if (unlikely(++first == 1))
- continue;
-
- fid_le_to_cpu(&fid, &ent->lde_fid);
-
- do {
- sa_handle_callback(sai);
-
- spin_lock(&lli->lli_agl_lock);
- while (sa_sent_full(sai) &&
- !agl_list_empty(sai)) {
- struct ll_inode_info *clli;
-
- clli = list_entry(sai->sai_agls.next,
- struct ll_inode_info,
- lli_agl_list);
- list_del_init(&clli->lli_agl_list);
- spin_unlock(&lli->lli_agl_lock);
-
- ll_agl_trigger(&clli->lli_vfs_inode,
- sai);
-
- spin_lock(&lli->lli_agl_lock);
- }
- spin_unlock(&lli->lli_agl_lock);
-
- set_current_state(TASK_IDLE);
- if (sa_sent_full(sai) &&
- !sa_has_callback(sai) &&
- agl_list_empty(sai) &&
- sai->sai_task)
- /* wait for spare statahead window */
- schedule();
- __set_current_state(TASK_RUNNING);
- } while (sa_sent_full(sai) && sai->sai_task);
-
- sa_statahead(parent, name, namelen, &fid);
- }
-
- pos = le64_to_cpu(dp->ldp_hash_end);
- ll_release_page(dir, page,
- le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
-
- if (sa_low_hit(sai)) {
- rc = -EFAULT;
- atomic_inc(&sbi->ll_sa_wrong);
- CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread: pid %d\n",
- PFID(&lli->lli_fid), sai->sai_hit,
- sai->sai_miss, sai->sai_sent,
- sai->sai_replied, current_pid());
- break;
- }
- }
- ll_finish_md_op_data(op_data);
-
- if (rc < 0) {
- spin_lock(&lli->lli_sa_lock);
- sai->sai_task = NULL;
- lli->lli_sa_enabled = 0;
- spin_unlock(&lli->lli_sa_lock);
- }
-
- /*
- * statahead is finished, but statahead entries need to be cached, wait
- * for file release to stop me.
- */
- while (sai->sai_task) {
- sa_handle_callback(sai);
-
- set_current_state(TASK_IDLE);
- if (!sa_has_callback(sai) &&
- sai->sai_task)
- schedule();
- __set_current_state(TASK_RUNNING);
- }
-out:
- if (sai->sai_agl_task) {
- kthread_stop(sai->sai_agl_task);
-
- CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
- sai, (unsigned int)sai->sai_agl_task->pid);
- sai->sai_agl_task = NULL;
- }
- /*
- * wait for inflight statahead RPCs to finish, and then we can free sai
- * safely because statahead RPC will access sai data
- */
- while (sai->sai_sent != sai->sai_replied) {
- /* in case we're not woken up, timeout wait */
- schedule_timeout_idle(HZ>>3);
- }
-
- /* release resources held by statahead RPCs */
- sa_handle_callback(sai);
-
- CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %pd\n",
- sai, parent);
-
- spin_lock(&lli->lli_sa_lock);
- sai->sai_task = NULL;
- spin_unlock(&lli->lli_sa_lock);
-
- wake_up(&sai->sai_waitq);
- ll_sai_put(sai);
-
- do_exit(rc);
-}
-
-/* authorize opened dir handle @key to statahead */
-void ll_authorize_statahead(struct inode *dir, void *key)
-{
- struct ll_inode_info *lli = ll_i2info(dir);
-
- spin_lock(&lli->lli_sa_lock);
- if (!lli->lli_opendir_key && !lli->lli_sai) {
- /*
- * if lli_sai is not NULL, it means previous statahead is not
- * finished yet, we'd better not start a new statahead for now.
- */
- LASSERT(!lli->lli_opendir_pid);
- lli->lli_opendir_key = key;
- lli->lli_opendir_pid = current_pid();
- lli->lli_sa_enabled = 1;
- }
- spin_unlock(&lli->lli_sa_lock);
-}
-
-/*
- * deauthorize opened dir handle @key to statahead, but statahead thread may
- * still be running, notify it to quit.
- */
-void ll_deauthorize_statahead(struct inode *dir, void *key)
-{
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai;
-
- LASSERT(lli->lli_opendir_key == key);
- LASSERT(lli->lli_opendir_pid);
-
- CDEBUG(D_READA, "deauthorize statahead for " DFID "\n",
- PFID(&lli->lli_fid));
-
- spin_lock(&lli->lli_sa_lock);
- lli->lli_opendir_key = NULL;
- lli->lli_opendir_pid = 0;
- lli->lli_sa_enabled = 0;
- sai = lli->lli_sai;
- if (sai && sai->sai_task) {
- /*
- * statahead thread may not quit yet because it needs to cache
- * entries, now it's time to tell it to quit.
- */
- wake_up_process(sai->sai_task);
- sai->sai_task = NULL;
- }
- spin_unlock(&lli->lli_sa_lock);
-}
-
-enum {
- /**
- * not first dirent, or is "."
- */
- LS_NOT_FIRST_DE = 0,
- /**
- * the first non-hidden dirent
- */
- LS_FIRST_DE,
- /**
- * the first hidden dirent, that is "."
- */
- LS_FIRST_DOT_DE
-};
-
-/* file is first dirent under @dir */
-static int is_first_dirent(struct inode *dir, struct dentry *dentry)
-{
- const struct qstr *target = &dentry->d_name;
- struct md_op_data *op_data;
- struct page *page;
- __u64 pos = 0;
- int dot_de;
- int rc = LS_NOT_FIRST_DE;
-
- op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
- LUSTRE_OPC_ANY, dir);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
- /**
- * FIXME choose the start offset of the readdir
- */
- op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
-
- page = ll_get_dir_page(dir, op_data, pos);
-
- while (1) {
- struct lu_dirpage *dp;
- struct lu_dirent *ent;
-
- if (IS_ERR(page)) {
- struct ll_inode_info *lli = ll_i2info(dir);
-
- rc = PTR_ERR(page);
- CERROR("%s: error reading dir " DFID " at %llu: opendir_pid = %u : rc = %d\n",
- ll_get_fsname(dir->i_sb, NULL, 0),
- PFID(ll_inode2fid(dir)), pos,
- lli->lli_opendir_pid, rc);
- break;
- }
-
- dp = page_address(page);
- for (ent = lu_dirent_start(dp); ent;
- ent = lu_dirent_next(ent)) {
- __u64 hash;
- int namelen;
- char *name;
-
- hash = le64_to_cpu(ent->lde_hash);
- /* The ll_get_dir_page() can return any page containing
- * the given hash which may be not the start hash.
- */
- if (unlikely(hash < pos))
- continue;
-
- namelen = le16_to_cpu(ent->lde_namelen);
- if (unlikely(namelen == 0))
- /*
- * skip dummy record.
- */
- continue;
-
- name = ent->lde_name;
- if (name[0] == '.') {
- if (namelen == 1)
- /*
- * skip "."
- */
- continue;
- else if (name[1] == '.' && namelen == 2)
- /*
- * skip ".."
- */
- continue;
- else
- dot_de = 1;
- } else {
- dot_de = 0;
- }
-
- if (dot_de && target->name[0] != '.') {
- CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
- target->len, target->name,
- namelen, name);
- continue;
- }
-
- if (target->len != namelen ||
- memcmp(target->name, name, namelen) != 0)
- rc = LS_NOT_FIRST_DE;
- else if (!dot_de)
- rc = LS_FIRST_DE;
- else
- rc = LS_FIRST_DOT_DE;
-
- ll_release_page(dir, page, false);
- goto out;
- }
- pos = le64_to_cpu(dp->ldp_hash_end);
- if (pos == MDS_DIR_END_OFF) {
- /*
- * End of directory reached.
- */
- ll_release_page(dir, page, false);
- goto out;
- } else {
- /*
- * chain is exhausted
- * Normal case: continue to the next page.
- */
- ll_release_page(dir, page,
- le32_to_cpu(dp->ldp_flags) &
- LDF_COLLIDE);
- page = ll_get_dir_page(dir, op_data, pos);
- }
- }
-out:
- ll_finish_md_op_data(op_data);
- return rc;
-}
-
-/**
- * revalidate @dentryp from statahead cache
- *
- * \param[in] dir parent directory
- * \param[in] sai sai structure
- * \param[out] dentryp pointer to dentry which will be revalidated
- * \param[in] unplug unplug statahead window only (normally for negative
- * dentry)
- * \retval 1 on success, dentry is saved in @dentryp
- * \retval 0 if revalidation failed (no proper lock on client)
- * \retval negative number upon error
- */
-static int revalidate_statahead_dentry(struct inode *dir,
- struct ll_statahead_info *sai,
- struct dentry **dentryp,
- bool unplug)
-{
- struct ll_inode_info *lli = ll_i2info(dir);
- struct sa_entry *entry = NULL;
- struct ll_dentry_data *ldd;
- int rc = 0;
-
- if ((*dentryp)->d_name.name[0] == '.') {
- if (sai->sai_ls_all ||
- sai->sai_miss_hidden >= sai->sai_skip_hidden) {
- /*
- * Hidden dentry is the first one, or statahead
- * thread does not skip so many hidden dentries
- * before "sai_ls_all" enabled as below.
- */
- } else {
- if (!sai->sai_ls_all)
- /*
- * It maybe because hidden dentry is not
- * the first one, "sai_ls_all" was not
- * set, then "ls -al" missed. Enable
- * "sai_ls_all" for such case.
- */
- sai->sai_ls_all = 1;
-
- /*
- * Such "getattr" has been skipped before
- * "sai_ls_all" enabled as above.
- */
- sai->sai_miss_hidden++;
- return -EAGAIN;
- }
- }
-
- if (unplug) {
- rc = 1;
- goto out_unplug;
- }
-
- entry = sa_get(sai, &(*dentryp)->d_name);
- if (!entry) {
- rc = -EAGAIN;
- goto out_unplug;
- }
-
- /* if statahead is busy in readdir, help it do post-work */
- if (!sa_ready(entry) && sai->sai_in_readpage)
- sa_handle_callback(sai);
-
- if (!sa_ready(entry)) {
- spin_lock(&lli->lli_sa_lock);
- sai->sai_index_wait = entry->se_index;
- spin_unlock(&lli->lli_sa_lock);
- if (0 == wait_event_idle_timeout(sai->sai_waitq,
- sa_ready(entry), 30 * HZ)) {
- /*
- * entry may not be ready, so it may be used by inflight
- * statahead RPC, don't free it.
- */
- entry = NULL;
- rc = -EAGAIN;
- goto out_unplug;
- }
- }
-
- if (entry->se_state == SA_ENTRY_SUCC && entry->se_inode) {
- struct inode *inode = entry->se_inode;
- struct lookup_intent it = { .it_op = IT_GETATTR,
- .it_lock_handle = entry->se_handle };
- __u64 bits;
-
- rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
- ll_inode2fid(inode), &bits);
- if (rc == 1) {
- if (!(*dentryp)->d_inode) {
- struct dentry *alias;
-
- alias = ll_splice_alias(inode, *dentryp);
- if (IS_ERR(alias)) {
- ll_intent_release(&it);
- rc = PTR_ERR(alias);
- goto out_unplug;
- }
- *dentryp = alias;
- /**
- * statahead prepared this inode, transfer inode
- * refcount from sa_entry to dentry
- */
- entry->se_inode = NULL;
- } else if ((*dentryp)->d_inode != inode) {
- /* revalidate, but inode is recreated */
- CDEBUG(D_READA,
- "%s: stale dentry %pd inode " DFID ", statahead inode " DFID "\n",
- ll_get_fsname((*dentryp)->d_inode->i_sb,
- NULL, 0),
- *dentryp,
- PFID(ll_inode2fid((*dentryp)->d_inode)),
- PFID(ll_inode2fid(inode)));
- ll_intent_release(&it);
- rc = -ESTALE;
- goto out_unplug;
- }
-
- if ((bits & MDS_INODELOCK_LOOKUP) &&
- d_lustre_invalid(*dentryp))
- d_lustre_revalidate(*dentryp);
- ll_intent_release(&it);
- }
- }
-out_unplug:
- /*
- * statahead cached sa_entry can be used only once, and will be killed
- * right after use, so if lookup/revalidate accessed statahead cache,
- * set dentry ldd_sa_generation to parent lli_sa_generation, later if we
- * stat this file again, we know we've done statahead before, see
- * dentry_may_statahead().
- */
- ldd = ll_d2d(*dentryp);
- ldd->lld_sa_generation = lli->lli_sa_generation;
- sa_put(sai, entry, lli);
- return rc;
-}
-
-/**
- * start statahead thread
- *
- * \param[in] dir parent directory
- * \param[in] dentry dentry that triggers statahead, normally the first
- * dirent under @dir
- * \retval -EAGAIN on success, because when this function is
- * called, it's already in lookup call, so client should
- * do it itself instead of waiting for statahead thread
- * to do it asynchronously.
- * \retval negative number upon error
- */
-static int start_statahead_thread(struct inode *dir, struct dentry *dentry)
-{
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = NULL;
- struct task_struct *task;
- struct dentry *parent = dentry->d_parent;
- int rc;
-
- /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
- rc = is_first_dirent(dir, dentry);
- if (rc == LS_NOT_FIRST_DE) {
- /* It is not "ls -{a}l" operation, no need statahead for it. */
- rc = -EFAULT;
- goto out;
- }
-
- sai = ll_sai_alloc(parent);
- if (!sai) {
- rc = -ENOMEM;
- goto out;
- }
-
- sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
- /*
- * if current lli_opendir_key was deauthorized, or dir re-opened by
- * another process, don't start statahead, otherwise the newly spawned
- * statahead thread won't be notified to quit.
- */
- spin_lock(&lli->lli_sa_lock);
- if (unlikely(lli->lli_sai || lli->lli_opendir_key ||
- lli->lli_opendir_pid != current->pid)) {
- spin_unlock(&lli->lli_sa_lock);
- rc = -EPERM;
- goto out;
- }
- lli->lli_sai = sai;
- spin_unlock(&lli->lli_sa_lock);
-
- atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_running);
-
- CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %pd]\n",
- current_pid(), parent);
-
- task = kthread_create(ll_statahead_thread, parent, "ll_sa_%u",
- lli->lli_opendir_pid);
- if (IS_ERR(task)) {
- rc = PTR_ERR(task);
- CERROR("can't start ll_sa thread, rc : %d\n", rc);
- goto out;
- }
-
- if (ll_i2sbi(parent->d_inode)->ll_flags & LL_SBI_AGL_ENABLED)
- ll_start_agl(parent, sai);
-
- atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_total);
- sai->sai_task = task;
-
- wake_up_process(task);
-
- /*
- * We don't stat-ahead for the first dirent since we are already in
- * lookup.
- */
- return -EAGAIN;
-
-out:
- /*
- * once we start statahead thread failed, disable statahead so
- * that subsequent stat won't waste time to try it.
- */
- spin_lock(&lli->lli_sa_lock);
- lli->lli_sa_enabled = 0;
- lli->lli_sai = NULL;
- spin_unlock(&lli->lli_sa_lock);
- if (sai)
- ll_sai_free(sai);
- return rc;
-}
-
-/**
- * statahead entry function, this is called when client getattr on a file, it
- * will start statahead thread if this is the first dir entry, else revalidate
- * dentry from statahead cache.
- *
- * \param[in] dir parent directory
- * \param[out] dentryp dentry to getattr
- * \param[in] unplug unplug statahead window only (normally for negative
- * dentry)
- * \retval 1 on success
- * \retval 0 revalidation from statahead cache failed, caller needs
- * to getattr from server directly
- * \retval negative number on error, caller often ignores this and
- * then getattr from server
- */
-int ll_statahead(struct inode *dir, struct dentry **dentryp, bool unplug)
-{
- struct ll_statahead_info *sai;
-
- sai = ll_sai_get(dir);
- if (sai) {
- int rc;
-
- rc = revalidate_statahead_dentry(dir, sai, dentryp, unplug);
- CDEBUG(D_READA, "revalidate statahead %pd: %d.\n",
- *dentryp, rc);
- ll_sai_put(sai);
- return rc;
- }
- return start_statahead_thread(dir, *dentryp);
-}
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
deleted file mode 100644
index 861e7a60f408..000000000000
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ /dev/null
@@ -1,185 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <lustre_ha.h>
-#include <lustre_dlm.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <lprocfs_status.h>
-#include "llite_internal.h"
-
-static struct kmem_cache *ll_inode_cachep;
-
-static struct inode *ll_alloc_inode(struct super_block *sb)
-{
- struct ll_inode_info *lli;
-
- ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1);
- lli = kmem_cache_zalloc(ll_inode_cachep, GFP_NOFS);
- if (!lli)
- return NULL;
-
- inode_init_once(&lli->lli_vfs_inode);
- return &lli->lli_vfs_inode;
-}
-
-static void ll_inode_destroy_callback(struct rcu_head *head)
-{
- struct inode *inode = container_of(head, struct inode, i_rcu);
- struct ll_inode_info *ptr = ll_i2info(inode);
-
- kmem_cache_free(ll_inode_cachep, ptr);
-}
-
-static void ll_destroy_inode(struct inode *inode)
-{
- call_rcu(&inode->i_rcu, ll_inode_destroy_callback);
-}
-
-/* exported operations */
-struct super_operations lustre_super_operations = {
- .alloc_inode = ll_alloc_inode,
- .destroy_inode = ll_destroy_inode,
- .evict_inode = ll_delete_inode,
- .put_super = ll_put_super,
- .statfs = ll_statfs,
- .umount_begin = ll_umount_begin,
- .remount_fs = ll_remount_fs,
- .show_options = ll_show_options,
-};
-MODULE_ALIAS_FS("lustre");
-
-static int __init lustre_init(void)
-{
- int rc;
-
- BUILD_BUG_ON(sizeof(LUSTRE_VOLATILE_HDR) !=
- LUSTRE_VOLATILE_HDR_LEN + 1);
-
- /* print an address of _any_ initialized kernel symbol from this
- * module, to allow debugging with gdb that doesn't support data
- * symbols from modules.
- */
- CDEBUG(D_INFO, "Lustre client module (%p).\n",
- &lustre_super_operations);
-
- rc = -ENOMEM;
- ll_inode_cachep = kmem_cache_create("lustre_inode_cache",
- sizeof(struct ll_inode_info), 0,
- SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
- NULL);
- if (!ll_inode_cachep)
- goto out_cache;
-
- ll_file_data_slab = kmem_cache_create("ll_file_data",
- sizeof(struct ll_file_data), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!ll_file_data_slab)
- goto out_cache;
-
- llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
- if (IS_ERR_OR_NULL(llite_root)) {
- rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
- llite_root = NULL;
- goto out_cache;
- }
-
- llite_kset = kset_create_and_add("llite", NULL, lustre_kobj);
- if (!llite_kset) {
- rc = -ENOMEM;
- goto out_debugfs;
- }
-
- rc = vvp_global_init();
- if (rc != 0)
- goto out_sysfs;
-
- cl_inode_fini_env = cl_env_alloc(&cl_inode_fini_refcheck,
- LCT_REMEMBER | LCT_NOREF);
- if (IS_ERR(cl_inode_fini_env)) {
- rc = PTR_ERR(cl_inode_fini_env);
- goto out_vvp;
- }
-
- cl_inode_fini_env->le_ctx.lc_cookie = 0x4;
-
- rc = ll_xattr_init();
- if (rc != 0)
- goto out_inode_fini_env;
-
- lustre_register_super_ops(THIS_MODULE, ll_fill_super, ll_kill_super);
- lustre_register_client_process_config(ll_process_config);
-
- return 0;
-
-out_inode_fini_env:
- cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
-out_vvp:
- vvp_global_fini();
-out_sysfs:
- kset_unregister(llite_kset);
-out_debugfs:
- debugfs_remove(llite_root);
-out_cache:
- kmem_cache_destroy(ll_inode_cachep);
- kmem_cache_destroy(ll_file_data_slab);
- return rc;
-}
-
-static void __exit lustre_exit(void)
-{
- lustre_register_super_ops(NULL, NULL, NULL);
- lustre_register_client_process_config(NULL);
-
- debugfs_remove(llite_root);
- kset_unregister(llite_kset);
-
- ll_xattr_fini();
- cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
- vvp_global_fini();
-
- kmem_cache_destroy(ll_inode_cachep);
- kmem_cache_destroy(ll_file_data_slab);
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Client File System");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(lustre_init);
-module_exit(lustre_exit);
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
deleted file mode 100644
index 0690fdbf49f5..000000000000
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ /dev/null
@@ -1,159 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/stat.h>
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "llite_internal.h"
-
-static int ll_readlink_internal(struct inode *inode,
- struct ptlrpc_request **request, char **symname)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc, symlen = i_size_read(inode) + 1;
- struct mdt_body *body;
- struct md_op_data *op_data;
-
- *request = NULL;
-
- if (lli->lli_symlink_name) {
- int print_limit = min_t(int, PAGE_SIZE - 128, symlen);
-
- *symname = lli->lli_symlink_name;
- /* If the total CDEBUG() size is larger than a page, it
- * will print a warning to the console, avoid this by
- * printing just the last part of the symlink.
- */
- CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n",
- print_limit < symlen ? "..." : "", print_limit,
- (*symname) + symlen - print_limit, symlen);
- return 0;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, symlen,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_valid = OBD_MD_LINKNAME;
- rc = md_getattr(sbi->ll_md_exp, op_data, request);
- ll_finish_md_op_data(op_data);
- if (rc) {
- if (rc != -ENOENT)
- CERROR("%s: inode " DFID ": rc = %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), rc);
- goto failed;
- }
-
- body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
- if ((body->mbo_valid & OBD_MD_LINKNAME) == 0) {
- CERROR("OBD_MD_LINKNAME not set on reply\n");
- rc = -EPROTO;
- goto failed;
- }
-
- LASSERT(symlen != 0);
- if (body->mbo_eadatasize != symlen) {
- CERROR("%s: inode " DFID ": symlink length %d not expected %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), body->mbo_eadatasize - 1,
- symlen - 1);
- rc = -EPROTO;
- goto failed;
- }
-
- *symname = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_MD);
- if (!*symname ||
- strnlen(*symname, symlen) != symlen - 1) {
- /* not full/NULL terminated */
- CERROR("inode %lu: symlink not NULL terminated string of length %d\n",
- inode->i_ino, symlen - 1);
- rc = -EPROTO;
- goto failed;
- }
-
- lli->lli_symlink_name = kzalloc(symlen, GFP_NOFS);
- /* do not return an error if we cannot cache the symlink locally */
- if (lli->lli_symlink_name) {
- memcpy(lli->lli_symlink_name, *symname, symlen);
- *symname = lli->lli_symlink_name;
- }
- return 0;
-
-failed:
- return rc;
-}
-
-static void ll_put_link(void *p)
-{
- ptlrpc_req_finished(p);
-}
-
-static const char *ll_get_link(struct dentry *dentry,
- struct inode *inode,
- struct delayed_call *done)
-{
- struct ptlrpc_request *request = NULL;
- int rc;
- char *symname = NULL;
-
- if (!dentry)
- return ERR_PTR(-ECHILD);
-
- CDEBUG(D_VFSTRACE, "VFS Op\n");
- ll_inode_size_lock(inode);
- rc = ll_readlink_internal(inode, &request, &symname);
- ll_inode_size_unlock(inode);
- if (rc) {
- ptlrpc_req_finished(request);
- return ERR_PTR(rc);
- }
-
- /* symname may contain a pointer to the request message buffer,
- * we delay request releasing then.
- */
- set_delayed_call(done, ll_put_link, request);
- return symname;
-}
-
-const struct inode_operations ll_fast_symlink_inode_operations = {
- .setattr = ll_setattr,
- .get_link = ll_get_link,
- .getattr = ll_getattr,
- .permission = ll_inode_permission,
- .listxattr = ll_listxattr,
-};
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
deleted file mode 100644
index 987c03b058e6..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ /dev/null
@@ -1,659 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * cl_device and cl_device_type implementation for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@intel.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <obd.h>
-#include "llite_internal.h"
-#include "vvp_internal.h"
-
-/*****************************************************************************
- *
- * Vvp device and device type functions.
- *
- */
-
-/*
- * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical
- * "llite_" (var. "ll_") prefix.
- */
-
-static struct kmem_cache *ll_thread_kmem;
-struct kmem_cache *vvp_lock_kmem;
-struct kmem_cache *vvp_object_kmem;
-static struct kmem_cache *vvp_session_kmem;
-static struct kmem_cache *vvp_thread_kmem;
-
-static struct lu_kmem_descr vvp_caches[] = {
- {
- .ckd_cache = &ll_thread_kmem,
- .ckd_name = "ll_thread_kmem",
- .ckd_size = sizeof(struct ll_thread_info),
- },
- {
- .ckd_cache = &vvp_lock_kmem,
- .ckd_name = "vvp_lock_kmem",
- .ckd_size = sizeof(struct vvp_lock),
- },
- {
- .ckd_cache = &vvp_object_kmem,
- .ckd_name = "vvp_object_kmem",
- .ckd_size = sizeof(struct vvp_object),
- },
- {
- .ckd_cache = &vvp_session_kmem,
- .ckd_name = "vvp_session_kmem",
- .ckd_size = sizeof(struct vvp_session)
- },
- {
- .ckd_cache = &vvp_thread_kmem,
- .ckd_name = "vvp_thread_kmem",
- .ckd_size = sizeof(struct vvp_thread_info),
- },
- {
- .ckd_cache = NULL
- }
-};
-
-static void *ll_thread_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct vvp_thread_info *info;
-
- info = kmem_cache_zalloc(ll_thread_kmem, GFP_NOFS);
- if (!info)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void ll_thread_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct vvp_thread_info *info = data;
-
- kmem_cache_free(ll_thread_kmem, info);
-}
-
-struct lu_context_key ll_thread_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = ll_thread_key_init,
- .lct_fini = ll_thread_key_fini
-};
-
-static void *vvp_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct vvp_session *session;
-
- session = kmem_cache_zalloc(vvp_session_kmem, GFP_NOFS);
- if (!session)
- session = ERR_PTR(-ENOMEM);
- return session;
-}
-
-static void vvp_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct vvp_session *session = data;
-
- kmem_cache_free(vvp_session_kmem, session);
-}
-
-struct lu_context_key vvp_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = vvp_session_key_init,
- .lct_fini = vvp_session_key_fini
-};
-
-static void *vvp_thread_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct vvp_thread_info *vti;
-
- vti = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS);
- if (!vti)
- vti = ERR_PTR(-ENOMEM);
- return vti;
-}
-
-static void vvp_thread_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct vvp_thread_info *vti = data;
-
- kmem_cache_free(vvp_thread_kmem, vti);
-}
-
-struct lu_context_key vvp_thread_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = vvp_thread_key_init,
- .lct_fini = vvp_thread_key_fini
-};
-
-/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
-LU_TYPE_INIT_FINI(vvp, &vvp_thread_key, &ll_thread_key, &vvp_session_key);
-
-static const struct lu_device_operations vvp_lu_ops = {
- .ldo_object_alloc = vvp_object_alloc
-};
-
-static struct lu_device *vvp_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct vvp_device *vdv = lu2vvp_dev(d);
- struct cl_site *site = lu2cl_site(d->ld_site);
- struct lu_device *next = cl2lu_dev(vdv->vdv_next);
-
- if (d->ld_site) {
- cl_site_fini(site);
- kfree(site);
- }
- cl_device_fini(lu2cl_dev(d));
- kfree(vdv);
- return next;
-}
-
-static struct lu_device *vvp_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg)
-{
- struct vvp_device *vdv;
- struct lu_device *lud;
- struct cl_site *site;
- int rc;
-
- vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
- if (!vdv)
- return ERR_PTR(-ENOMEM);
-
- lud = &vdv->vdv_cl.cd_lu_dev;
- cl_device_init(&vdv->vdv_cl, t);
- vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
-
- site = kzalloc(sizeof(*site), GFP_NOFS);
- if (site) {
- rc = cl_site_init(site, &vdv->vdv_cl);
- if (rc == 0) {
- rc = lu_site_init_finish(&site->cs_lu);
- } else {
- LASSERT(!lud->ld_site);
- CERROR("Cannot init lu_site, rc %d.\n", rc);
- kfree(site);
- }
- } else {
- rc = -ENOMEM;
- }
- if (rc != 0) {
- vvp_device_free(env, lud);
- lud = ERR_PTR(rc);
- }
- return lud;
-}
-
-static int vvp_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- struct vvp_device *vdv;
- int rc;
-
- vdv = lu2vvp_dev(d);
- vdv->vdv_next = lu2cl_dev(next);
-
- LASSERT(d->ld_site && next->ld_type);
- next->ld_site = d->ld_site;
- rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
- next->ld_type->ldt_name,
- NULL);
- if (rc == 0) {
- lu_device_get(next);
- lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
- }
- return rc;
-}
-
-static struct lu_device *vvp_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- return cl2lu_dev(lu2vvp_dev(d)->vdv_next);
-}
-
-static const struct lu_device_type_operations vvp_device_type_ops = {
- .ldto_init = vvp_type_init,
- .ldto_fini = vvp_type_fini,
-
- .ldto_start = vvp_type_start,
- .ldto_stop = vvp_type_stop,
-
- .ldto_device_alloc = vvp_device_alloc,
- .ldto_device_free = vvp_device_free,
- .ldto_device_init = vvp_device_init,
- .ldto_device_fini = vvp_device_fini,
-};
-
-struct lu_device_type vvp_device_type = {
- .ldt_tags = LU_DEVICE_CL,
- .ldt_name = LUSTRE_VVP_NAME,
- .ldt_ops = &vvp_device_type_ops,
- .ldt_ctx_tags = LCT_CL_THREAD
-};
-
-/**
- * A mutex serializing calls to vvp_inode_fini() under extreme memory
- * pressure, when environments cannot be allocated.
- */
-int vvp_global_init(void)
-{
- int rc;
-
- rc = lu_kmem_init(vvp_caches);
- if (rc != 0)
- return rc;
-
- rc = lu_device_type_init(&vvp_device_type);
- if (rc != 0)
- goto out_kmem;
-
- return 0;
-
-out_kmem:
- lu_kmem_fini(vvp_caches);
-
- return rc;
-}
-
-void vvp_global_fini(void)
-{
- lu_device_type_fini(&vvp_device_type);
- lu_kmem_fini(vvp_caches);
-}
-
-/*****************************************************************************
- *
- * mirror obd-devices into cl devices.
- *
- */
-
-int cl_sb_init(struct super_block *sb)
-{
- struct ll_sb_info *sbi;
- struct cl_device *cl;
- struct lu_env *env;
- int rc = 0;
- u16 refcheck;
-
- sbi = ll_s2sbi(sb);
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- cl = cl_type_setup(env, NULL, &vvp_device_type,
- sbi->ll_dt_exp->exp_obd->obd_lu_dev);
- if (!IS_ERR(cl)) {
- sbi->ll_cl = cl;
- sbi->ll_site = cl2lu_dev(cl)->ld_site;
- }
- cl_env_put(env, &refcheck);
- } else {
- rc = PTR_ERR(env);
- }
- return rc;
-}
-
-int cl_sb_fini(struct super_block *sb)
-{
- struct ll_sb_info *sbi;
- struct lu_env *env;
- struct cl_device *cld;
- u16 refcheck;
- int result;
-
- sbi = ll_s2sbi(sb);
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- cld = sbi->ll_cl;
-
- if (cld) {
- cl_stack_fini(env, cld);
- sbi->ll_cl = NULL;
- sbi->ll_site = NULL;
- }
- cl_env_put(env, &refcheck);
- result = 0;
- } else {
- CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
- result = PTR_ERR(env);
- }
- return result;
-}
-
-/****************************************************************************
- *
- * debugfs/lustre/llite/$MNT/dump_page_cache
- *
- ****************************************************************************/
-
-/*
- * To represent contents of a page cache as a byte stream, following
- * information if encoded in 64bit offset:
- *
- * - file hash bucket in lu_site::ls_hash[] 28bits
- *
- * - how far file is from bucket head 4bits
- *
- * - page index 32bits
- *
- * First two data identify a file in the cache uniquely.
- */
-
-#define PGC_OBJ_SHIFT (32 + 4)
-#define PGC_DEPTH_SHIFT (32)
-
-struct vvp_pgcache_id {
- unsigned int vpi_bucket;
- unsigned int vpi_depth;
- u32 vpi_index;
-
- unsigned int vpi_curdep;
- struct lu_object_header *vpi_obj;
-};
-
-static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id)
-{
- BUILD_BUG_ON(sizeof(pos) != sizeof(__u64));
-
- id->vpi_index = pos & 0xffffffff;
- id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf;
- id->vpi_bucket = (unsigned long long)pos >> PGC_OBJ_SHIFT;
-}
-
-static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
-{
- return
- ((__u64)id->vpi_index) |
- ((__u64)id->vpi_depth << PGC_DEPTH_SHIFT) |
- ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
-}
-
-static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *data)
-{
- struct vvp_pgcache_id *id = data;
- struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
-
- if (id->vpi_curdep-- > 0)
- return 0; /* continue */
-
- if (lu_object_is_dying(hdr))
- return 1;
-
- cfs_hash_get(hs, hnode);
- id->vpi_obj = hdr;
- return 1;
-}
-
-static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
- struct lu_device *dev,
- struct vvp_pgcache_id *id)
-{
- LASSERT(lu_device_is_cl(dev));
-
- id->vpi_depth &= 0xf;
- id->vpi_obj = NULL;
- id->vpi_curdep = id->vpi_depth;
-
- cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
- vvp_pgcache_obj_get, id);
- if (id->vpi_obj) {
- struct lu_object *lu_obj;
-
- lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
- if (lu_obj) {
- lu_object_ref_add(lu_obj, "dump", current);
- return lu2cl(lu_obj);
- }
- lu_object_put(env, lu_object_top(id->vpi_obj));
-
- } else if (id->vpi_curdep > 0) {
- id->vpi_depth = 0xf;
- }
- return NULL;
-}
-
-static loff_t vvp_pgcache_find(const struct lu_env *env,
- struct lu_device *dev, loff_t pos)
-{
- struct cl_object *clob;
- struct lu_site *site;
- struct vvp_pgcache_id id;
-
- site = dev->ld_site;
- vvp_pgcache_id_unpack(pos, &id);
-
- while (1) {
- if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
- return ~0ULL;
- clob = vvp_pgcache_obj(env, dev, &id);
- if (clob) {
- struct inode *inode = vvp_object_inode(clob);
- struct page *vmpage;
- int nr;
-
- nr = find_get_pages_contig(inode->i_mapping,
- id.vpi_index, 1, &vmpage);
- if (nr > 0) {
- id.vpi_index = vmpage->index;
- /* Cant support over 16T file */
- nr = !(vmpage->index > 0xffffffff);
- put_page(vmpage);
- }
-
- lu_object_ref_del(&clob->co_lu, "dump", current);
- cl_object_put(env, clob);
- if (nr > 0)
- return vvp_pgcache_id_pack(&id);
- }
- /* to the next object. */
- ++id.vpi_depth;
- id.vpi_depth &= 0xf;
- if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
- return ~0ULL;
- id.vpi_index = 0;
- }
-}
-
-#define seq_page_flag(seq, page, flag, has_flags) do { \
- if (test_bit(PG_##flag, &(page)->flags)) { \
- seq_printf(seq, "%s"#flag, has_flags ? "|" : ""); \
- has_flags = 1; \
- } \
-} while (0)
-
-static void vvp_pgcache_page_show(const struct lu_env *env,
- struct seq_file *seq, struct cl_page *page)
-{
- struct vvp_page *vpg;
- struct page *vmpage;
- int has_flags;
-
- vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
- vmpage = vpg->vpg_page;
- seq_printf(seq, " %5i | %p %p %s %s %s | %p " DFID "(%p) %lu %u [",
- 0 /* gen */,
- vpg, page,
- "none",
- vpg->vpg_defer_uptodate ? "du" : "- ",
- PageWriteback(vmpage) ? "wb" : "-",
- vmpage, PFID(ll_inode2fid(vmpage->mapping->host)),
- vmpage->mapping->host, vmpage->index,
- page_count(vmpage));
- has_flags = 0;
- seq_page_flag(seq, vmpage, locked, has_flags);
- seq_page_flag(seq, vmpage, error, has_flags);
- seq_page_flag(seq, vmpage, referenced, has_flags);
- seq_page_flag(seq, vmpage, uptodate, has_flags);
- seq_page_flag(seq, vmpage, dirty, has_flags);
- seq_page_flag(seq, vmpage, writeback, has_flags);
- seq_printf(seq, "%s]\n", has_flags ? "" : "-");
-}
-
-static int vvp_pgcache_show(struct seq_file *f, void *v)
-{
- loff_t pos;
- struct ll_sb_info *sbi;
- struct cl_object *clob;
- struct lu_env *env;
- struct vvp_pgcache_id id;
- u16 refcheck;
- int result;
-
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- pos = *(loff_t *)v;
- vvp_pgcache_id_unpack(pos, &id);
- sbi = f->private;
- clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
- if (clob) {
- struct inode *inode = vvp_object_inode(clob);
- struct cl_page *page = NULL;
- struct page *vmpage;
-
- result = find_get_pages_contig(inode->i_mapping,
- id.vpi_index, 1,
- &vmpage);
- if (result > 0) {
- lock_page(vmpage);
- page = cl_vmpage_page(vmpage, clob);
- unlock_page(vmpage);
- put_page(vmpage);
- }
-
- seq_printf(f, "%8x@" DFID ": ", id.vpi_index,
- PFID(lu_object_fid(&clob->co_lu)));
- if (page) {
- vvp_pgcache_page_show(env, f, page);
- cl_page_put(env, page);
- } else {
- seq_puts(f, "missing\n");
- }
- lu_object_ref_del(&clob->co_lu, "dump", current);
- cl_object_put(env, clob);
- } else {
- seq_printf(f, "%llx missing\n", pos);
- }
- cl_env_put(env, &refcheck);
- result = 0;
- } else {
- result = PTR_ERR(env);
- }
- return result;
-}
-
-static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
-{
- struct ll_sb_info *sbi;
- struct lu_env *env;
- u16 refcheck;
-
- sbi = f->private;
-
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- sbi = f->private;
- if (sbi->ll_site->ls_obj_hash->hs_cur_bits >
- 64 - PGC_OBJ_SHIFT) {
- pos = ERR_PTR(-EFBIG);
- } else {
- *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev,
- *pos);
- if (*pos == ~0ULL)
- pos = NULL;
- }
- cl_env_put(env, &refcheck);
- }
- return pos;
-}
-
-static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
-{
- struct ll_sb_info *sbi;
- struct lu_env *env;
- u16 refcheck;
-
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- sbi = f->private;
- *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos + 1);
- if (*pos == ~0ULL)
- pos = NULL;
- cl_env_put(env, &refcheck);
- }
- return pos;
-}
-
-static void vvp_pgcache_stop(struct seq_file *f, void *v)
-{
- /* Nothing to do */
-}
-
-static const struct seq_operations vvp_pgcache_ops = {
- .start = vvp_pgcache_start,
- .next = vvp_pgcache_next,
- .stop = vvp_pgcache_stop,
- .show = vvp_pgcache_show
-};
-
-static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp)
-{
- struct seq_file *seq;
- int rc;
-
- rc = seq_open(filp, &vvp_pgcache_ops);
- if (rc)
- return rc;
-
- seq = filp->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-const struct file_operations vvp_dump_pgcache_file_ops = {
- .owner = THIS_MODULE,
- .open = vvp_dump_pgcache_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
deleted file mode 100644
index 02ea5161d635..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ /dev/null
@@ -1,321 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2013, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Internal definitions for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#ifndef VVP_INTERNAL_H
-#define VVP_INTERNAL_H
-
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <cl_object.h>
-
-enum obd_notify_event;
-struct inode;
-struct lustre_md;
-struct obd_device;
-struct obd_export;
-struct page;
-
-/**
- * IO state private to IO state private to VVP layer.
- */
-struct vvp_io {
- /** super class */
- struct cl_io_slice vui_cl;
- struct cl_io_lock_link vui_link;
- /**
- * I/O vector information to or from which read/write is going.
- */
- struct iov_iter *vui_iter;
- /**
- * Total size for the left IO.
- */
- size_t vui_tot_count;
-
- union {
- struct vvp_fault_io {
- /**
- * Inode modification time that is checked across DLM
- * lock request.
- */
- time64_t ft_mtime;
- struct vm_area_struct *ft_vma;
- /**
- * locked page returned from vvp_io
- */
- struct page *ft_vmpage;
- /**
- * kernel fault info
- */
- struct vm_fault *ft_vmf;
- /**
- * fault API used bitflags for return code.
- */
- unsigned int ft_flags;
- /**
- * check that flags are from filemap_fault
- */
- bool ft_flags_valid;
- } fault;
- struct {
- struct cl_page_list vui_queue;
- unsigned long vui_written;
- int vui_from;
- int vui_to;
- } write;
- } u;
-
- /**
- * Layout version when this IO is initialized
- */
- __u32 vui_layout_gen;
- /**
- * File descriptor against which IO is done.
- */
- struct ll_file_data *vui_fd;
- struct kiocb *vui_iocb;
-
- /* Readahead state. */
- pgoff_t vui_ra_start;
- pgoff_t vui_ra_count;
- /* Set when vui_ra_{start,count} have been initialized. */
- bool vui_ra_valid;
-};
-
-extern struct lu_device_type vvp_device_type;
-
-extern struct lu_context_key vvp_session_key;
-extern struct lu_context_key vvp_thread_key;
-
-extern struct kmem_cache *vvp_lock_kmem;
-extern struct kmem_cache *vvp_object_kmem;
-
-struct vvp_thread_info {
- struct cl_lock vti_lock;
- struct cl_lock_descr vti_descr;
- struct cl_io vti_io;
- struct cl_attr vti_attr;
-};
-
-static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
-{
- struct vvp_thread_info *vti;
-
- vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key);
- LASSERT(vti);
-
- return vti;
-}
-
-static inline struct cl_lock *vvp_env_lock(const struct lu_env *env)
-{
- struct cl_lock *lock = &vvp_env_info(env)->vti_lock;
-
- memset(lock, 0, sizeof(*lock));
- return lock;
-}
-
-static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env)
-{
- struct cl_attr *attr = &vvp_env_info(env)->vti_attr;
-
- memset(attr, 0, sizeof(*attr));
-
- return attr;
-}
-
-static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env)
-{
- struct cl_io *io = &vvp_env_info(env)->vti_io;
-
- memset(io, 0, sizeof(*io));
-
- return io;
-}
-
-struct vvp_session {
- struct vvp_io cs_ios;
-};
-
-static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
-{
- struct vvp_session *ses;
-
- ses = lu_context_key_get(env->le_ses, &vvp_session_key);
- LASSERT(ses);
-
- return ses;
-}
-
-static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
-{
- return &vvp_env_session(env)->cs_ios;
-}
-
-/**
- * ccc-private object state.
- */
-struct vvp_object {
- struct cl_object_header vob_header;
- struct cl_object vob_cl;
- struct inode *vob_inode;
-
- /**
- * Number of transient pages. This is no longer protected by i_sem,
- * and needs to be atomic. This is not actually used for anything,
- * and can probably be removed.
- */
- atomic_t vob_transient_pages;
-
- /**
- * Number of outstanding mmaps on this file.
- *
- * \see ll_vm_open(), ll_vm_close().
- */
- atomic_t vob_mmap_cnt;
-
- /**
- * various flags
- * vob_discard_page_warned
- * if pages belonging to this object are discarded when a client
- * is evicted, some debug info will be printed, this flag will be set
- * during processing the first discarded page, then avoid flooding
- * debug message for lots of discarded pages.
- *
- * \see ll_dirty_page_discard_warn.
- */
- unsigned int vob_discard_page_warned:1;
-};
-
-/**
- * VVP-private page state.
- */
-struct vvp_page {
- struct cl_page_slice vpg_cl;
- unsigned int vpg_defer_uptodate:1,
- vpg_ra_used:1;
- /** VM page */
- struct page *vpg_page;
-};
-
-static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
-{
- return container_of(slice, struct vvp_page, vpg_cl);
-}
-
-static inline pgoff_t vvp_index(struct vvp_page *vvp)
-{
- return vvp->vpg_cl.cpl_index;
-}
-
-struct vvp_device {
- struct cl_device vdv_cl;
- struct cl_device *vdv_next;
-};
-
-struct vvp_lock {
- struct cl_lock_slice vlk_cl;
-};
-
-void *ccc_key_init(const struct lu_context *ctx,
- struct lu_context_key *key);
-void ccc_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
-
-void ccc_umount(const struct lu_env *env, struct cl_device *dev);
-
-static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv)
-{
- return &vdv->vdv_cl.cd_lu_dev;
-}
-
-static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d)
-{
- return container_of0(d, struct vvp_device, vdv_cl.cd_lu_dev);
-}
-
-static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d)
-{
- return container_of0(d, struct vvp_device, vdv_cl);
-}
-
-static inline struct vvp_object *cl2vvp(const struct cl_object *obj)
-{
- return container_of0(obj, struct vvp_object, vob_cl);
-}
-
-static inline struct vvp_object *lu2vvp(const struct lu_object *obj)
-{
- return container_of0(obj, struct vvp_object, vob_cl.co_lu);
-}
-
-static inline struct inode *vvp_object_inode(const struct cl_object *obj)
-{
- return cl2vvp(obj)->vob_inode;
-}
-
-int vvp_object_invariant(const struct cl_object *obj);
-struct vvp_object *cl_inode2vvp(struct inode *inode);
-
-static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
-{
- return cl2vvp_page(slice)->vpg_page;
-}
-
-static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
-{
- return container_of(slice, struct vvp_lock, vlk_cl);
-}
-
-# define CLOBINVRNT(env, clob, expr) \
- ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
-
-int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
-int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index);
-struct lu_object *vvp_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev);
-
-int vvp_global_init(void);
-void vvp_global_fini(void);
-
-extern const struct file_operations vvp_dump_pgcache_file_ops;
-
-#endif /* VVP_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
deleted file mode 100644
index e7a4778e02e4..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ /dev/null
@@ -1,1374 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_io for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <obd.h>
-
-#include "llite_internal.h"
-#include "vvp_internal.h"
-
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct vvp_io *vio;
-
- vio = container_of(slice, struct vvp_io, vui_cl);
- LASSERT(vio == vvp_env_io(env));
-
- return vio;
-}
-
-/**
- * For swapping layout. The file's layout may have changed.
- * To avoid populating pages to a wrong stripe, we have to verify the
- * correctness of layout. It works because swapping layout processes
- * have to acquire group lock.
- */
-static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
- struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct vvp_io *vio = vvp_env_io(env);
- bool rc = true;
-
- switch (io->ci_type) {
- case CIT_READ:
- case CIT_WRITE:
- /* don't need lock here to check lli_layout_gen as we have held
- * extent lock and GROUP lock has to hold to swap layout
- */
- if (ll_layout_version_get(lli) != vio->vui_layout_gen ||
- OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) {
- io->ci_need_restart = 1;
- /* this will cause a short read/write */
- io->ci_continue = 0;
- rc = false;
- }
- case CIT_FAULT:
- /* fault is okay because we've already had a page. */
- default:
- break;
- }
-
- return rc;
-}
-
-static void vvp_object_size_lock(struct cl_object *obj)
-{
- struct inode *inode = vvp_object_inode(obj);
-
- ll_inode_size_lock(inode);
- cl_object_attr_lock(obj);
-}
-
-static void vvp_object_size_unlock(struct cl_object *obj)
-{
- struct inode *inode = vvp_object_inode(obj);
-
- cl_object_attr_unlock(obj);
- ll_inode_size_unlock(inode);
-}
-
-/**
- * Helper function that if necessary adjusts file size (inode->i_size), when
- * position at the offset \a pos is accessed. File size can be arbitrary stale
- * on a Lustre client, but client at least knows KMS. If accessed area is
- * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
- *
- * Locking: cl_isize_lock is used to serialize changes to inode size and to
- * protect consistency between inode size and cl_object
- * attributes. cl_object_size_lock() protects consistency between cl_attr's of
- * top-object and sub-objects.
- */
-static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t start, size_t count,
- int *exceed)
-{
- struct cl_attr *attr = vvp_env_thread_attr(env);
- struct inode *inode = vvp_object_inode(obj);
- loff_t pos = start + count - 1;
- loff_t kms;
- int result;
-
- /*
- * Consistency guarantees: following possibilities exist for the
- * relation between region being accessed and real file size at this
- * moment:
- *
- * (A): the region is completely inside of the file;
- *
- * (B-x): x bytes of region are inside of the file, the rest is
- * outside;
- *
- * (C): the region is completely outside of the file.
- *
- * This classification is stable under DLM lock already acquired by
- * the caller, because to change the class, other client has to take
- * DLM lock conflicting with our lock. Also, any updates to ->i_size
- * by other threads on this client are serialized by
- * ll_inode_size_lock(). This guarantees that short reads are handled
- * correctly in the face of concurrent writes and truncates.
- */
- vvp_object_size_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- if (result == 0) {
- kms = attr->cat_kms;
- if (pos > kms) {
- /*
- * A glimpse is necessary to determine whether we
- * return a short read (B) or some zeroes at the end
- * of the buffer (C)
- */
- vvp_object_size_unlock(obj);
- result = cl_glimpse_lock(env, io, inode, obj, 0);
- if (result == 0 && exceed) {
- /* If objective page index exceed end-of-file
- * page index, return directly. Do not expect
- * kernel will check such case correctly.
- * linux-2.6.18-128.1.1 miss to do that.
- * --bug 17336
- */
- loff_t size = i_size_read(inode);
- loff_t cur_index = start >> PAGE_SHIFT;
- loff_t size_index = (size - 1) >> PAGE_SHIFT;
-
- if ((size == 0 && cur_index != 0) ||
- size_index < cur_index)
- *exceed = 1;
- }
- return result;
- }
- /*
- * region is within kms and, hence, within real file
- * size (A). We need to increase i_size to cover the
- * read region so that generic_file_read() will do its
- * job, but that doesn't mean the kms size is
- * _correct_, it is only the _minimum_ size. If
- * someone does a stat they will get the correct size
- * which will always be >= the kms value here.
- * b=11081
- */
- if (i_size_read(inode) < kms) {
- i_size_write(inode, kms);
- CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
- PFID(lu_object_fid(&obj->co_lu)),
- (__u64)i_size_read(inode));
- }
- }
-
- vvp_object_size_unlock(obj);
-
- return result;
-}
-
-/*****************************************************************************
- *
- * io operations.
- *
- */
-
-static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- pgoff_t start, pgoff_t end)
-{
- struct vvp_io *vio = vvp_env_io(env);
- struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
- struct cl_object *obj = io->ci_obj;
-
- CLOBINVRNT(env, obj, vvp_object_invariant(obj));
-
- CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
-
- memset(&vio->vui_link, 0, sizeof(vio->vui_link));
-
- if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- descr->cld_mode = CLM_GROUP;
- descr->cld_gid = vio->vui_fd->fd_grouplock.lg_gid;
- enqflags |= CEF_LOCK_MATCH;
- } else {
- descr->cld_mode = mode;
- }
- descr->cld_obj = obj;
- descr->cld_start = start;
- descr->cld_end = end;
- descr->cld_enq_flags = enqflags;
-
- cl_io_lock_add(env, io, &vio->vui_link);
- return 0;
-}
-
-static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- loff_t start, loff_t end)
-{
- struct cl_object *obj = io->ci_obj;
-
- return vvp_io_one_lock_index(env, io, enqflags, mode,
- cl_index(obj, start), cl_index(obj, end));
-}
-
-static int vvp_io_write_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
-
- cl_page_list_init(&vio->u.write.vui_queue);
- vio->u.write.vui_written = 0;
- vio->u.write.vui_from = 0;
- vio->u.write.vui_to = PAGE_SIZE;
-
- return 0;
-}
-
-static void vvp_io_write_iter_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
-
- LASSERT(vio->u.write.vui_queue.pl_nr == 0);
-}
-
-static int vvp_io_fault_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct inode *inode = vvp_object_inode(ios->cis_obj);
-
- LASSERT(inode == file_inode(vio->vui_fd->fd_file));
- vio->u.fault.ft_mtime = inode->i_mtime.tv_sec;
- return 0;
-}
-
-static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct inode *inode = vvp_object_inode(obj);
-
- CLOBINVRNT(env, obj, vvp_object_invariant(obj));
-
- CDEBUG(D_VFSTRACE, DFID
- " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
- PFID(lu_object_fid(&obj->co_lu)),
- io->ci_ignore_layout, io->ci_verify_layout,
- vio->vui_layout_gen, io->ci_restore_needed);
-
- if (io->ci_restore_needed) {
- int rc;
-
- /* file was detected release, we need to restore it
- * before finishing the io
- */
- rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF);
- /* if restore registration failed, no restart,
- * we will return -ENODATA
- */
- /* The layout will change after restore, so we need to
- * block on layout lock hold by the MDT
- * as MDT will not send new layout in lvb (see LU-3124)
- * we have to explicitly fetch it, all this will be done
- * by ll_layout_refresh()
- */
- if (rc == 0) {
- io->ci_restore_needed = 0;
- io->ci_need_restart = 1;
- io->ci_verify_layout = 1;
- } else {
- io->ci_restore_needed = 1;
- io->ci_need_restart = 0;
- io->ci_verify_layout = 0;
- io->ci_result = rc;
- }
- }
-
- if (!io->ci_ignore_layout && io->ci_verify_layout) {
- __u32 gen = 0;
-
- /* check layout version */
- ll_layout_refresh(inode, &gen);
- io->ci_need_restart = vio->vui_layout_gen != gen;
- if (io->ci_need_restart) {
- CDEBUG(D_VFSTRACE,
- DFID " layout changed from %d to %d.\n",
- PFID(lu_object_fid(&obj->co_lu)),
- vio->vui_layout_gen, gen);
- /* today successful restore is the only possible case */
- /* restore was done, clear restoring state */
- clear_bit(LLIF_FILE_RESTORING,
- &ll_i2info(inode)->lli_flags);
- }
- }
-}
-
-static void vvp_io_fault_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct cl_page *page = io->u.ci_fault.ft_page;
-
- CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
-
- if (page) {
- lu_ref_del(&page->cp_reference, "fault", io);
- cl_page_put(env, page);
- io->u.ci_fault.ft_page = NULL;
- }
- vvp_io_fini(env, ios);
-}
-
-static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
-{
- /*
- * we only want to hold PW locks if the mmap() can generate
- * writes back to the file and that only happens in shared
- * writable vmas
- */
- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
- return CLM_WRITE;
- return CLM_READ;
-}
-
-static int vvp_mmap_locks(const struct lu_env *env,
- struct vvp_io *vio, struct cl_io *io)
-{
- struct vvp_thread_info *cti = vvp_env_info(env);
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- struct cl_lock_descr *descr = &cti->vti_descr;
- union ldlm_policy_data policy;
- unsigned long addr;
- ssize_t count;
- int result = 0;
- struct iov_iter i;
- struct iovec iov;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- if (!vio->vui_iter) /* nfs or loop back device write */
- return 0;
-
- /* No MM (e.g. NFS)? No vmas too. */
- if (!mm)
- return 0;
-
- iov_for_each(iov, i, *vio->vui_iter) {
- addr = (unsigned long)iov.iov_base;
- count = iov.iov_len;
- if (count == 0)
- continue;
-
- count += addr & (~PAGE_MASK);
- addr &= PAGE_MASK;
-
- down_read(&mm->mmap_sem);
- while ((vma = our_vma(mm, addr, count)) != NULL) {
- struct inode *inode = file_inode(vma->vm_file);
- int flags = CEF_MUST;
-
- if (ll_file_nolock(vma->vm_file)) {
- /*
- * For no lock case is not allowed for mmap
- */
- result = -EINVAL;
- break;
- }
-
- /*
- * XXX: Required lock mode can be weakened: CIT_WRITE
- * io only ever reads user level buffer, and CIT_READ
- * only writes on it.
- */
- policy_from_vma(&policy, vma, addr, count);
- descr->cld_mode = vvp_mode_from_vma(vma);
- descr->cld_obj = ll_i2info(inode)->lli_clob;
- descr->cld_start = cl_index(descr->cld_obj,
- policy.l_extent.start);
- descr->cld_end = cl_index(descr->cld_obj,
- policy.l_extent.end);
- descr->cld_enq_flags = flags;
- result = cl_io_lock_alloc_add(env, io, descr);
-
- CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
- descr->cld_mode, descr->cld_start,
- descr->cld_end);
-
- if (result < 0)
- break;
-
- if (vma->vm_end - addr >= count)
- break;
-
- count -= vma->vm_end - addr;
- addr = vma->vm_end;
- }
- up_read(&mm->mmap_sem);
- if (result < 0)
- break;
- }
- return result;
-}
-
-static void vvp_io_advance(const struct lu_env *env,
- const struct cl_io_slice *ios,
- size_t nob)
-{
- struct cl_object *obj = ios->cis_io->ci_obj;
- struct vvp_io *vio = cl2vvp_io(env, ios);
-
- CLOBINVRNT(env, obj, vvp_object_invariant(obj));
-
- vio->vui_tot_count -= nob;
- iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count);
-}
-
-static void vvp_io_update_iov(const struct lu_env *env,
- struct vvp_io *vio, struct cl_io *io)
-{
- size_t size = io->u.ci_rw.crw_count;
-
- if (!vio->vui_iter)
- return;
-
- iov_iter_truncate(vio->vui_iter, size);
-}
-
-static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
- enum cl_lock_mode mode, loff_t start, loff_t end)
-{
- struct vvp_io *vio = vvp_env_io(env);
- int result;
- int ast_flags = 0;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- vvp_io_update_iov(env, vio, io);
-
- if (io->u.ci_rw.crw_nonblock)
- ast_flags |= CEF_NONBLOCK;
- result = vvp_mmap_locks(env, vio, io);
- if (result == 0)
- result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
- return result;
-}
-
-static int vvp_io_read_lock(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
- int result;
-
- result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
- rd->crw_pos + rd->crw_count - 1);
-
- return result;
-}
-
-static int vvp_io_fault_lock(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct vvp_io *vio = cl2vvp_io(env, ios);
- /*
- * XXX LDLM_FL_CBPENDING
- */
- return vvp_io_one_lock_index(env,
- io, 0,
- vvp_mode_from_vma(vio->u.fault.ft_vma),
- io->u.ci_fault.ft_index,
- io->u.ci_fault.ft_index);
-}
-
-static int vvp_io_write_lock(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- loff_t start;
- loff_t end;
-
- if (io->u.ci_wr.wr_append) {
- start = 0;
- end = OBD_OBJECT_EOF;
- } else {
- start = io->u.ci_wr.wr.crw_pos;
- end = start + io->u.ci_wr.wr.crw_count - 1;
- }
- return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
-}
-
-static int vvp_io_setattr_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- return 0;
-}
-
-/**
- * Implementation of cl_io_operations::vio_lock() method for CIT_SETATTR io.
- *
- * Handles "lockless io" mode when extent locking is done by server.
- */
-static int vvp_io_setattr_lock(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- __u64 new_size;
- __u32 enqflags = 0;
-
- if (cl_io_is_trunc(io)) {
- new_size = io->u.ci_setattr.sa_attr.lvb_size;
- if (new_size == 0)
- enqflags = CEF_DISCARD_DATA;
- } else {
- unsigned int valid = io->u.ci_setattr.sa_valid;
-
- if (!(valid & TIMES_SET_FLAGS))
- return 0;
-
- if ((!(valid & ATTR_MTIME) ||
- io->u.ci_setattr.sa_attr.lvb_mtime >=
- io->u.ci_setattr.sa_attr.lvb_ctime) &&
- (!(valid & ATTR_ATIME) ||
- io->u.ci_setattr.sa_attr.lvb_atime >=
- io->u.ci_setattr.sa_attr.lvb_ctime))
- return 0;
- new_size = 0;
- }
-
- return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
- new_size, OBD_OBJECT_EOF);
-}
-
-static int vvp_do_vmtruncate(struct inode *inode, size_t size)
-{
- int result;
- /*
- * Only ll_inode_size_lock is taken at this level.
- */
- ll_inode_size_lock(inode);
- result = inode_newsize_ok(inode, size);
- if (result < 0) {
- ll_inode_size_unlock(inode);
- return result;
- }
- truncate_setsize(inode, size);
- ll_inode_size_unlock(inode);
- return result;
-}
-
-static int vvp_io_setattr_time(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct cl_attr *attr = vvp_env_thread_attr(env);
- int result;
- unsigned valid = CAT_CTIME;
-
- cl_object_attr_lock(obj);
- attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
- if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
- attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
- valid |= CAT_ATIME;
- }
- if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
- attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
- valid |= CAT_MTIME;
- }
- result = cl_object_attr_update(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
-
- return result;
-}
-
-static int vvp_io_setattr_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct inode *inode = vvp_object_inode(io->ci_obj);
- struct ll_inode_info *lli = ll_i2info(inode);
-
- if (cl_io_is_trunc(io)) {
- down_write(&lli->lli_trunc_sem);
- inode_lock(inode);
- inode_dio_wait(inode);
- } else {
- inode_lock(inode);
- }
-
- if (io->u.ci_setattr.sa_valid & TIMES_SET_FLAGS)
- return vvp_io_setattr_time(env, ios);
-
- return 0;
-}
-
-static void vvp_io_setattr_end(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct inode *inode = vvp_object_inode(io->ci_obj);
- struct ll_inode_info *lli = ll_i2info(inode);
-
- if (cl_io_is_trunc(io)) {
- /* Truncate in memory pages - they must be clean pages
- * because osc has already notified to destroy osc_extents.
- */
- vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
- inode_unlock(inode);
- up_write(&lli->lli_trunc_sem);
- } else {
- inode_unlock(inode);
- }
-}
-
-static void vvp_io_setattr_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- bool restore_needed = ios->cis_io->ci_restore_needed;
- struct inode *inode = vvp_object_inode(ios->cis_obj);
-
- vvp_io_fini(env, ios);
-
- if (restore_needed && !ios->cis_io->ci_restore_needed) {
- /* restore finished, set data modified flag for HSM */
- set_bit(LLIF_DATA_MODIFIED, &(ll_i2info(inode))->lli_flags);
- }
-}
-
-static int vvp_io_read_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = vvp_object_inode(obj);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct file *file = vio->vui_fd->fd_file;
-
- int result;
- loff_t pos = io->u.ci_rd.rd.crw_pos;
- long cnt = io->u.ci_rd.rd.crw_count;
- long tot = vio->vui_tot_count;
- int exceed = 0;
-
- CLOBINVRNT(env, obj, vvp_object_invariant(obj));
-
- CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
-
- down_read(&lli->lli_trunc_sem);
-
- if (!can_populate_pages(env, io, inode))
- return 0;
-
- result = vvp_prep_size(env, obj, io, pos, tot, &exceed);
- if (result != 0)
- return result;
- if (exceed != 0)
- goto out;
-
- LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
- "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
- inode->i_ino, cnt, pos, i_size_read(inode));
-
- /* turn off the kernel's read-ahead */
- vio->vui_fd->fd_file->f_ra.ra_pages = 0;
-
- /* initialize read-ahead window once per syscall */
- if (!vio->vui_ra_valid) {
- vio->vui_ra_valid = true;
- vio->vui_ra_start = cl_index(obj, pos);
- vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1);
- ll_ras_enter(file);
- }
-
- /* BUG: 5972 */
- file_accessed(file);
- LASSERT(vio->vui_iocb->ki_pos == pos);
- result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
-
-out:
- if (result >= 0) {
- if (result < cnt)
- io->ci_continue = 0;
- io->ci_nob += result;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- vio->vui_fd, pos, result, READ);
- result = 0;
- }
- return result;
-}
-
-static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *plist, int from, int to)
-{
- struct cl_2queue *queue = &io->ci_queue;
- struct cl_page *page;
- unsigned int bytes = 0;
- int rc = 0;
-
- if (plist->pl_nr == 0)
- return 0;
-
- if (from > 0 || to != PAGE_SIZE) {
- page = cl_page_list_first(plist);
- if (plist->pl_nr == 1) {
- cl_page_clip(env, page, from, to);
- } else {
- if (from > 0)
- cl_page_clip(env, page, from, PAGE_SIZE);
- if (to != PAGE_SIZE) {
- page = cl_page_list_last(plist);
- cl_page_clip(env, page, 0, to);
- }
- }
- }
-
- cl_2queue_init(queue);
- cl_page_list_splice(plist, &queue->c2_qin);
- rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0);
-
- /* plist is not sorted any more */
- cl_page_list_splice(&queue->c2_qin, plist);
- cl_page_list_splice(&queue->c2_qout, plist);
- cl_2queue_fini(env, queue);
-
- if (rc == 0) {
- /* calculate bytes */
- bytes = plist->pl_nr << PAGE_SHIFT;
- bytes -= from + PAGE_SIZE - to;
-
- while (plist->pl_nr > 0) {
- page = cl_page_list_first(plist);
- cl_page_list_del(env, plist, page);
-
- cl_page_clip(env, page, 0, PAGE_SIZE);
-
- SetPageUptodate(cl_page_vmpage(page));
- cl_page_disown(env, io, page);
-
- /* held in ll_cl_init() */
- lu_ref_del(&page->cp_reference, "cl_io", io);
- cl_page_put(env, page);
- }
- }
-
- return bytes > 0 ? bytes : rc;
-}
-
-static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
-{
- struct page *vmpage = page->cp_vmpage;
-
- SetPageUptodate(vmpage);
- set_page_dirty(vmpage);
-
- cl_page_disown(env, io, page);
-
- /* held in ll_cl_init() */
- lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
- cl_page_put(env, page);
-}
-
-/* make sure the page list is contiguous */
-static bool page_list_sanity_check(struct cl_object *obj,
- struct cl_page_list *plist)
-{
- struct cl_page *page;
- pgoff_t index = CL_PAGE_EOF;
-
- cl_page_list_for_each(page, plist) {
- struct vvp_page *vpg = cl_object_page_slice(obj, page);
-
- if (index == CL_PAGE_EOF) {
- index = vvp_index(vpg);
- continue;
- }
-
- ++index;
- if (index == vvp_index(vpg))
- continue;
-
- return false;
- }
- return true;
-}
-
-/* Return how many bytes have queued or written */
-int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
-{
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = vvp_object_inode(obj);
- struct vvp_io *vio = vvp_env_io(env);
- struct cl_page_list *queue = &vio->u.write.vui_queue;
- struct cl_page *page;
- int rc = 0;
- int bytes = 0;
- unsigned int npages = vio->u.write.vui_queue.pl_nr;
-
- if (npages == 0)
- return 0;
-
- CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
- npages, vio->u.write.vui_from, vio->u.write.vui_to);
-
- LASSERT(page_list_sanity_check(obj, queue));
-
- /* submit IO with async write */
- rc = cl_io_commit_async(env, io, queue,
- vio->u.write.vui_from, vio->u.write.vui_to,
- write_commit_callback);
- npages -= queue->pl_nr; /* already committed pages */
- if (npages > 0) {
- /* calculate how many bytes were written */
- bytes = npages << PAGE_SHIFT;
-
- /* first page */
- bytes -= vio->u.write.vui_from;
- if (queue->pl_nr == 0) /* last page */
- bytes -= PAGE_SIZE - vio->u.write.vui_to;
- LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
-
- vio->u.write.vui_written += bytes;
-
- CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
- npages, bytes, vio->u.write.vui_written);
-
- /* the first page must have been written. */
- vio->u.write.vui_from = 0;
- }
- LASSERT(page_list_sanity_check(obj, queue));
- LASSERT(ergo(rc == 0, queue->pl_nr == 0));
-
- /* out of quota, try sync write */
- if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
- rc = vvp_io_commit_sync(env, io, queue,
- vio->u.write.vui_from,
- vio->u.write.vui_to);
- if (rc > 0) {
- vio->u.write.vui_written += rc;
- rc = 0;
- }
- }
-
- /* update inode size */
- ll_merge_attr(env, inode);
-
- /* Now the pages in queue were failed to commit, discard them
- * unless they were dirtied before.
- */
- while (queue->pl_nr > 0) {
- page = cl_page_list_first(queue);
- cl_page_list_del(env, queue, page);
-
- if (!PageDirty(cl_page_vmpage(page)))
- cl_page_discard(env, io, page);
-
- cl_page_disown(env, io, page);
-
- /* held in ll_cl_init() */
- lu_ref_del(&page->cp_reference, "cl_io", io);
- cl_page_put(env, page);
- }
- cl_page_list_fini(env, queue);
-
- return rc;
-}
-
-static int vvp_io_write_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = vvp_object_inode(obj);
- struct ll_inode_info *lli = ll_i2info(inode);
- ssize_t result = 0;
- loff_t pos = io->u.ci_wr.wr.crw_pos;
- size_t cnt = io->u.ci_wr.wr.crw_count;
-
- down_read(&lli->lli_trunc_sem);
-
- if (!can_populate_pages(env, io, inode))
- return 0;
-
- if (cl_io_is_append(io)) {
- /*
- * PARALLEL IO This has to be changed for parallel IO doing
- * out-of-order writes.
- */
- ll_merge_attr(env, inode);
- pos = i_size_read(inode);
- io->u.ci_wr.wr.crw_pos = pos;
- vio->vui_iocb->ki_pos = pos;
- } else {
- LASSERT(vio->vui_iocb->ki_pos == pos);
- }
-
- CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
-
- /*
- * The maximum Lustre file size is variable, based on the OST maximum
- * object size and number of stripes. This needs another check in
- * addition to the VFS checks earlier.
- */
- if (pos + cnt > ll_file_maxbytes(inode)) {
- CDEBUG(D_INODE,
- "%s: file " DFID " offset %llu > maxbytes %llu\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), pos + cnt,
- ll_file_maxbytes(inode));
- return -EFBIG;
- }
-
- if (!vio->vui_iter) {
- /* from a temp io in ll_cl_init(). */
- result = 0;
- } else {
- /*
- * When using the locked AIO function (generic_file_aio_write())
- * testing has shown the inode mutex to be a limiting factor
- * with multi-threaded single shared file performance. To get
- * around this, we now use the lockless version. To maintain
- * consistency, proper locking to protect against writes,
- * trucates, etc. is handled in the higher layers of lustre.
- */
- bool lock_node = !IS_NOSEC(inode);
-
- if (lock_node)
- inode_lock(inode);
- result = __generic_file_write_iter(vio->vui_iocb,
- vio->vui_iter);
- if (lock_node)
- inode_unlock(inode);
-
- if (result > 0 || result == -EIOCBQUEUED)
- result = generic_write_sync(vio->vui_iocb, result);
- }
-
- if (result > 0) {
- result = vvp_io_write_commit(env, io);
- if (vio->u.write.vui_written > 0) {
- result = vio->u.write.vui_written;
- io->ci_nob += result;
-
- CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n",
- io->ci_nob, result);
- }
- }
- if (result > 0) {
- set_bit(LLIF_DATA_MODIFIED, &(ll_i2info(inode))->lli_flags);
-
- if (result < cnt)
- io->ci_continue = 0;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- vio->vui_fd, pos, result, WRITE);
- result = 0;
- }
- return result;
-}
-
-static void vvp_io_rw_end(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct inode *inode = vvp_object_inode(ios->cis_obj);
- struct ll_inode_info *lli = ll_i2info(inode);
-
- up_read(&lli->lli_trunc_sem);
-}
-
-static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
-{
- struct vm_fault *vmf = cfio->ft_vmf;
-
- cfio->ft_flags = filemap_fault(vmf);
- cfio->ft_flags_valid = 1;
-
- if (vmf->page) {
- CDEBUG(D_PAGE,
- "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
- vmf->page, vmf->page->mapping, vmf->page->index,
- (long)vmf->page->flags, page_count(vmf->page),
- page_private(vmf->page), (void *)vmf->address);
- if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
- lock_page(vmf->page);
- cfio->ft_flags |= VM_FAULT_LOCKED;
- }
-
- cfio->ft_vmpage = vmf->page;
- return 0;
- }
-
- if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
- CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", (void *)vmf->address);
- return -EFAULT;
- }
-
- if (cfio->ft_flags & VM_FAULT_OOM) {
- CDEBUG(D_PAGE, "got addr %p - OOM\n", (void *)vmf->address);
- return -ENOMEM;
- }
-
- if (cfio->ft_flags & VM_FAULT_RETRY)
- return -EAGAIN;
-
- CERROR("Unknown error in page fault %d!\n", cfio->ft_flags);
- return -EINVAL;
-}
-
-static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
-{
- set_page_dirty(page->cp_vmpage);
-}
-
-static int vvp_io_fault_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = vvp_object_inode(obj);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct cl_fault_io *fio = &io->u.ci_fault;
- struct vvp_fault_io *cfio = &vio->u.fault;
- loff_t offset;
- int result = 0;
- struct page *vmpage = NULL;
- struct cl_page *page;
- loff_t size;
- pgoff_t last_index;
-
- down_read(&lli->lli_trunc_sem);
-
- /* offset of the last byte on the page */
- offset = cl_offset(obj, fio->ft_index + 1) - 1;
- LASSERT(cl_index(obj, offset) == fio->ft_index);
- result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
- if (result != 0)
- return result;
-
- /* must return locked page */
- if (fio->ft_mkwrite) {
- LASSERT(cfio->ft_vmpage);
- lock_page(cfio->ft_vmpage);
- } else {
- result = vvp_io_kernel_fault(cfio);
- if (result != 0)
- return result;
- }
-
- vmpage = cfio->ft_vmpage;
- LASSERT(PageLocked(vmpage));
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
- ll_invalidate_page(vmpage);
-
- size = i_size_read(inode);
- /* Though we have already held a cl_lock upon this page, but
- * it still can be truncated locally.
- */
- if (unlikely((vmpage->mapping != inode->i_mapping) ||
- (page_offset(vmpage) > size))) {
- CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
-
- /* return +1 to stop cl_io_loop() and ll_fault() will catch
- * and retry.
- */
- result = 1;
- goto out;
- }
-
- last_index = cl_index(obj, size - 1);
-
- if (fio->ft_mkwrite) {
- /*
- * Capture the size while holding the lli_trunc_sem from above
- * we want to make sure that we complete the mkwrite action
- * while holding this lock. We need to make sure that we are
- * not past the end of the file.
- */
- if (last_index < fio->ft_index) {
- CDEBUG(D_PAGE,
- "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
- vmpage->mapping, fio->ft_index, last_index);
- /*
- * We need to return if we are
- * passed the end of the file. This will propagate
- * up the call stack to ll_page_mkwrite where
- * we will return VM_FAULT_NOPAGE. Any non-negative
- * value returned here will be silently
- * converted to 0. If the vmpage->mapping is null
- * the error code would be converted back to ENODATA
- * in ll_page_mkwrite0. Thus we return -ENODATA
- * to handle both cases
- */
- result = -ENODATA;
- goto out;
- }
- }
-
- page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
- if (IS_ERR(page)) {
- result = PTR_ERR(page);
- goto out;
- }
-
- /* if page is going to be written, we should add this page into cache
- * earlier.
- */
- if (fio->ft_mkwrite) {
- wait_on_page_writeback(vmpage);
- if (!PageDirty(vmpage)) {
- struct cl_page_list *plist = &io->ci_queue.c2_qin;
- struct vvp_page *vpg = cl_object_page_slice(obj, page);
- int to = PAGE_SIZE;
-
- /* vvp_page_assume() calls wait_on_page_writeback(). */
- cl_page_assume(env, io, page);
-
- cl_page_list_init(plist);
- cl_page_list_add(plist, page);
-
- /* size fixup */
- if (last_index == vvp_index(vpg))
- to = size & ~PAGE_MASK;
-
- /* Do not set Dirty bit here so that in case IO is
- * started before the page is really made dirty, we
- * still have chance to detect it.
- */
- result = cl_io_commit_async(env, io, plist, 0, to,
- mkwrite_commit_callback);
- LASSERT(cl_page_is_owned(page, io));
- cl_page_list_fini(env, plist);
-
- vmpage = NULL;
- if (result < 0) {
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
-
- cl_page_put(env, page);
-
- /* we're in big trouble, what can we do now? */
- if (result == -EDQUOT)
- result = -ENOSPC;
- goto out;
- } else {
- cl_page_disown(env, io, page);
- }
- }
- }
-
- /*
- * The ft_index is only used in the case of
- * a mkwrite action. We need to check
- * our assertions are correct, since
- * we should have caught this above
- */
- LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
- if (fio->ft_index == last_index)
- /*
- * Last page is mapped partially.
- */
- fio->ft_nob = size - cl_offset(obj, fio->ft_index);
- else
- fio->ft_nob = cl_page_size(obj);
-
- lu_ref_add(&page->cp_reference, "fault", io);
- fio->ft_page = page;
-
-out:
- /* return unlocked vmpage to avoid deadlocking */
- if (vmpage)
- unlock_page(vmpage);
-
- cfio->ft_flags &= ~VM_FAULT_LOCKED;
-
- return result;
-}
-
-static void vvp_io_fault_end(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct inode *inode = vvp_object_inode(ios->cis_obj);
- struct ll_inode_info *lli = ll_i2info(inode);
-
- CLOBINVRNT(env, ios->cis_io->ci_obj,
- vvp_object_invariant(ios->cis_io->ci_obj));
- up_read(&lli->lli_trunc_sem);
-}
-
-static int vvp_io_fsync_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- /* we should mark TOWRITE bit to each dirty page in radix tree to
- * verify pages have been written, but this is difficult because of
- * race.
- */
- return 0;
-}
-
-static int vvp_io_read_ahead(const struct lu_env *env,
- const struct cl_io_slice *ios,
- pgoff_t start, struct cl_read_ahead *ra)
-{
- int result = 0;
-
- if (ios->cis_io->ci_type == CIT_READ ||
- ios->cis_io->ci_type == CIT_FAULT) {
- struct vvp_io *vio = cl2vvp_io(env, ios);
-
- if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- ra->cra_end = CL_PAGE_EOF;
- result = 1; /* no need to call down */
- }
- }
-
- return result;
-}
-
-static const struct cl_io_operations vvp_io_ops = {
- .op = {
- [CIT_READ] = {
- .cio_fini = vvp_io_fini,
- .cio_lock = vvp_io_read_lock,
- .cio_start = vvp_io_read_start,
- .cio_end = vvp_io_rw_end,
- .cio_advance = vvp_io_advance,
- },
- [CIT_WRITE] = {
- .cio_fini = vvp_io_fini,
- .cio_iter_init = vvp_io_write_iter_init,
- .cio_iter_fini = vvp_io_write_iter_fini,
- .cio_lock = vvp_io_write_lock,
- .cio_start = vvp_io_write_start,
- .cio_end = vvp_io_rw_end,
- .cio_advance = vvp_io_advance,
- },
- [CIT_SETATTR] = {
- .cio_fini = vvp_io_setattr_fini,
- .cio_iter_init = vvp_io_setattr_iter_init,
- .cio_lock = vvp_io_setattr_lock,
- .cio_start = vvp_io_setattr_start,
- .cio_end = vvp_io_setattr_end
- },
- [CIT_FAULT] = {
- .cio_fini = vvp_io_fault_fini,
- .cio_iter_init = vvp_io_fault_iter_init,
- .cio_lock = vvp_io_fault_lock,
- .cio_start = vvp_io_fault_start,
- .cio_end = vvp_io_fault_end,
- },
- [CIT_FSYNC] = {
- .cio_start = vvp_io_fsync_start,
- .cio_fini = vvp_io_fini
- },
- [CIT_MISC] = {
- .cio_fini = vvp_io_fini
- }
- },
- .cio_read_ahead = vvp_io_read_ahead,
-};
-
-int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- struct vvp_io *vio = vvp_env_io(env);
- struct inode *inode = vvp_object_inode(obj);
- int result;
-
- CLOBINVRNT(env, obj, vvp_object_invariant(obj));
-
- CDEBUG(D_VFSTRACE, DFID
- " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
- PFID(lu_object_fid(&obj->co_lu)),
- io->ci_ignore_layout, io->ci_verify_layout,
- vio->vui_layout_gen, io->ci_restore_needed);
-
- CL_IO_SLICE_CLEAN(vio, vui_cl);
- cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops);
- vio->vui_ra_valid = false;
- result = 0;
- if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
- size_t count;
- struct ll_inode_info *lli = ll_i2info(inode);
-
- count = io->u.ci_rw.crw_count;
- /* "If nbyte is 0, read() will return 0 and have no other
- * results." -- Single Unix Spec
- */
- if (count == 0)
- result = 1;
- else
- vio->vui_tot_count = count;
-
- /* for read/write, we store the jobid in the inode, and
- * it'll be fetched by osc when building RPC.
- *
- * it's not accurate if the file is shared by different
- * jobs.
- */
- lustre_get_jobid(lli->lli_jobid);
- } else if (io->ci_type == CIT_SETATTR) {
- if (!cl_io_is_trunc(io))
- io->ci_lockreq = CILR_MANDATORY;
- }
-
- /* Enqueue layout lock and get layout version. We need to do this
- * even for operations requiring to open file, such as read and write,
- * because it might not grant layout lock in IT_OPEN.
- */
- if (result == 0 && !io->ci_ignore_layout) {
- result = ll_layout_refresh(inode, &vio->vui_layout_gen);
- if (result == -ENOENT)
- /* If the inode on MDS has been removed, but the objects
- * on OSTs haven't been destroyed (async unlink), layout
- * fetch will return -ENOENT, we'd ignore this error
- * and continue with dirty flush. LU-3230.
- */
- result = 0;
- if (result < 0)
- CERROR("%s: refresh file layout " DFID " error %d.\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(lu_object_fid(&obj->co_lu)), result);
- }
-
- return result;
-}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
deleted file mode 100644
index 4b6c7143bd2c..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ /dev/null
@@ -1,87 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2014, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_lock for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <obd_support.h>
-
-#include "vvp_internal.h"
-
-/*****************************************************************************
- *
- * Vvp lock functions.
- *
- */
-
-static void vvp_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
-{
- struct vvp_lock *vlk = cl2vvp_lock(slice);
-
- kmem_cache_free(vvp_lock_kmem, vlk);
-}
-
-static int vvp_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *unused, struct cl_sync_io *anchor)
-{
- CLOBINVRNT(env, slice->cls_obj, vvp_object_invariant(slice->cls_obj));
-
- return 0;
-}
-
-static const struct cl_lock_operations vvp_lock_ops = {
- .clo_fini = vvp_lock_fini,
- .clo_enqueue = vvp_lock_enqueue,
-};
-
-int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *unused)
-{
- struct vvp_lock *vlk;
- int result;
-
- CLOBINVRNT(env, obj, vvp_object_invariant(obj));
-
- vlk = kmem_cache_zalloc(vvp_lock_kmem, GFP_NOFS);
- if (vlk) {
- cl_lock_slice_add(lock, &vlk->vlk_cl, obj, &vvp_lock_ops);
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
deleted file mode 100644
index 05ad3b322a29..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ /dev/null
@@ -1,305 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * cl_object implementation for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd.h>
-
-#include "llite_internal.h"
-#include "vvp_internal.h"
-
-/*****************************************************************************
- *
- * Object operations.
- *
- */
-
-int vvp_object_invariant(const struct cl_object *obj)
-{
- struct inode *inode = vvp_object_inode(obj);
- struct ll_inode_info *lli = ll_i2info(inode);
-
- return (S_ISREG(inode->i_mode) || inode->i_mode == 0) &&
- lli->lli_clob == obj;
-}
-
-static int vvp_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- struct vvp_object *obj = lu2vvp(o);
- struct inode *inode = obj->vob_inode;
- struct ll_inode_info *lli;
-
- (*p)(env, cookie, "(%d %d) inode: %p ",
- atomic_read(&obj->vob_transient_pages),
- atomic_read(&obj->vob_mmap_cnt), inode);
- if (inode) {
- lli = ll_i2info(inode);
- (*p)(env, cookie, "%lu/%u %o %u %d %p " DFID,
- inode->i_ino, inode->i_generation, inode->i_mode,
- inode->i_nlink, atomic_read(&inode->i_count),
- lli->lli_clob, PFID(&lli->lli_fid));
- }
- return 0;
-}
-
-static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- struct inode *inode = vvp_object_inode(obj);
-
- /*
- * lov overwrites most of these fields in
- * lov_attr_get()->...lov_merge_lvb_kms(), except when inode
- * attributes are newer.
- */
-
- attr->cat_size = i_size_read(inode);
- attr->cat_mtime = inode->i_mtime.tv_sec;
- attr->cat_atime = inode->i_atime.tv_sec;
- attr->cat_ctime = inode->i_ctime.tv_sec;
- attr->cat_blocks = inode->i_blocks;
- attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid);
- attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid);
- /* KMS is not known by this layer */
- return 0; /* layers below have to fill in the rest */
-}
-
-static int vvp_attr_update(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned int valid)
-{
- struct inode *inode = vvp_object_inode(obj);
-
- if (valid & CAT_UID)
- inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
- if (valid & CAT_GID)
- inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid);
- if (valid & CAT_ATIME)
- inode->i_atime.tv_sec = attr->cat_atime;
- if (valid & CAT_MTIME)
- inode->i_mtime.tv_sec = attr->cat_mtime;
- if (valid & CAT_CTIME)
- inode->i_ctime.tv_sec = attr->cat_ctime;
- if (0 && valid & CAT_SIZE)
- i_size_write(inode, attr->cat_size);
- /* not currently necessary */
- if (0 && valid & (CAT_UID | CAT_GID | CAT_SIZE))
- mark_inode_dirty(inode);
- return 0;
-}
-
-static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf)
-{
- struct ll_inode_info *lli = ll_i2info(conf->coc_inode);
-
- if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
- CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
- PFID(&lli->lli_fid));
-
- ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
-
- /* Clean up page mmap for this inode.
- * The reason for us to do this is that if the page has
- * already been installed into memory space, the process
- * can access it without interacting with lustre, so this
- * page may be stale due to layout change, and the process
- * will never be notified.
- * This operation is expensive but mmap processes have to pay
- * a price themselves.
- */
- unmap_mapping_range(conf->coc_inode->i_mapping,
- 0, OBD_OBJECT_EOF, 0);
- }
-
- return 0;
-}
-
-static int vvp_prune(const struct lu_env *env, struct cl_object *obj)
-{
- struct inode *inode = vvp_object_inode(obj);
- int rc;
-
- rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
- if (rc < 0) {
- CDEBUG(D_VFSTRACE, DFID ": writeback failed: %d\n",
- PFID(lu_object_fid(&obj->co_lu)), rc);
- return rc;
- }
-
- truncate_inode_pages(inode->i_mapping, 0);
- return 0;
-}
-
-static int vvp_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb)
-{
- struct inode *inode = vvp_object_inode(obj);
-
- lvb->lvb_mtime = LTIME_S(inode->i_mtime);
- lvb->lvb_atime = LTIME_S(inode->i_atime);
- lvb->lvb_ctime = LTIME_S(inode->i_ctime);
- /*
- * LU-417: Add dirty pages block count lest i_blocks reports 0, some
- * "cp" or "tar" on remote node may think it's a completely sparse file
- * and skip it.
- */
- if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
- lvb->lvb_blocks = dirty_cnt(inode);
- return 0;
-}
-
-static void vvp_req_attr_set(const struct lu_env *env, struct cl_object *obj,
- struct cl_req_attr *attr)
-{
- u64 valid_flags = OBD_MD_FLTYPE;
- struct inode *inode;
- struct obdo *oa;
-
- oa = attr->cra_oa;
- inode = vvp_object_inode(obj);
-
- if (attr->cra_type == CRT_WRITE)
- valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLUID | OBD_MD_FLGID;
- obdo_from_inode(oa, inode, valid_flags & attr->cra_flags);
- obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
- if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
- oa->o_parent_oid++;
- memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid, LUSTRE_JOBID_SIZE);
-}
-
-static const struct cl_object_operations vvp_ops = {
- .coo_page_init = vvp_page_init,
- .coo_lock_init = vvp_lock_init,
- .coo_io_init = vvp_io_init,
- .coo_attr_get = vvp_attr_get,
- .coo_attr_update = vvp_attr_update,
- .coo_conf_set = vvp_conf_set,
- .coo_prune = vvp_prune,
- .coo_glimpse = vvp_object_glimpse,
- .coo_req_attr_set = vvp_req_attr_set
-};
-
-static int vvp_object_init0(const struct lu_env *env,
- struct vvp_object *vob,
- const struct cl_object_conf *conf)
-{
- vob->vob_inode = conf->coc_inode;
- atomic_set(&vob->vob_transient_pages, 0);
- cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
- return 0;
-}
-
-static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct vvp_device *dev = lu2vvp_dev(obj->lo_dev);
- struct vvp_object *vob = lu2vvp(obj);
- struct lu_object *below;
- struct lu_device *under;
- int result;
-
- under = &dev->vdv_next->cd_lu_dev;
- below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
- if (below) {
- const struct cl_object_conf *cconf;
-
- cconf = lu2cl_conf(conf);
- lu_object_add(obj, below);
- result = vvp_object_init0(env, vob, cconf);
- } else {
- result = -ENOMEM;
- }
-
- return result;
-}
-
-static void vvp_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct vvp_object *vob = lu2vvp(obj);
-
- lu_object_fini(obj);
- lu_object_header_fini(obj->lo_header);
- kmem_cache_free(vvp_object_kmem, vob);
-}
-
-static const struct lu_object_operations vvp_lu_obj_ops = {
- .loo_object_init = vvp_object_init,
- .loo_object_free = vvp_object_free,
- .loo_object_print = vvp_object_print,
-};
-
-struct vvp_object *cl_inode2vvp(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct cl_object *obj = lli->lli_clob;
- struct lu_object *lu;
-
- lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
- LASSERT(lu);
- return lu2vvp(lu);
-}
-
-struct lu_object *vvp_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
-{
- struct vvp_object *vob;
- struct lu_object *obj;
-
- vob = kmem_cache_zalloc(vvp_object_kmem, GFP_NOFS);
- if (vob) {
- struct cl_object_header *hdr;
-
- obj = &vob->vob_cl.co_lu;
- hdr = &vob->vob_header;
- cl_object_header_init(hdr);
- hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
-
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
-
- vob->vob_cl.co_ops = &vvp_ops;
- obj->lo_ops = &vvp_lu_obj_ops;
- } else {
- obj = NULL;
- }
- return obj;
-}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
deleted file mode 100644
index 6eb0565ddc22..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ /dev/null
@@ -1,523 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_page for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/atomic.h>
-#include <linux/bitops.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/page-flags.h>
-#include <linux/pagemap.h>
-
-#include "llite_internal.h"
-#include "vvp_internal.h"
-
-/*****************************************************************************
- *
- * Page operations.
- *
- */
-
-static void vvp_page_fini_common(struct vvp_page *vpg)
-{
- struct page *vmpage = vpg->vpg_page;
-
- LASSERT(vmpage);
- put_page(vmpage);
-}
-
-static void vvp_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct page *vmpage = vpg->vpg_page;
-
- /*
- * vmpage->private was already cleared when page was moved into
- * VPG_FREEING state.
- */
- LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
- vvp_page_fini_common(vpg);
-}
-
-static int vvp_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io,
- int nonblock)
-{
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct page *vmpage = vpg->vpg_page;
-
- LASSERT(vmpage);
- if (nonblock) {
- if (!trylock_page(vmpage))
- return -EAGAIN;
-
- if (unlikely(PageWriteback(vmpage))) {
- unlock_page(vmpage);
- return -EAGAIN;
- }
-
- return 0;
- }
-
- lock_page(vmpage);
- wait_on_page_writeback(vmpage);
-
- return 0;
-}
-
-static void vvp_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- LASSERT(vmpage);
- LASSERT(PageLocked(vmpage));
- wait_on_page_writeback(vmpage);
-}
-
-static void vvp_page_unassume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- LASSERT(vmpage);
- LASSERT(PageLocked(vmpage));
-}
-
-static void vvp_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- LASSERT(vmpage);
- LASSERT(PageLocked(vmpage));
-
- unlock_page(cl2vm_page(slice));
-}
-
-static void vvp_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct vvp_page *vpg = cl2vvp_page(slice);
-
- LASSERT(vmpage);
- LASSERT(PageLocked(vmpage));
-
- if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
- ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
-
- ll_invalidate_page(vmpage);
-}
-
-static void vvp_page_delete(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct inode *inode = vmpage->mapping->host;
- struct cl_object *obj = slice->cpl_obj;
- struct cl_page *page = slice->cpl_page;
- int refc;
-
- LASSERT(PageLocked(vmpage));
- LASSERT((struct cl_page *)vmpage->private == page);
- LASSERT(inode == vvp_object_inode(obj));
-
- /* Drop the reference count held in vvp_page_init */
- refc = atomic_dec_return(&page->cp_ref);
- LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
-
- ClearPagePrivate(vmpage);
- vmpage->private = 0;
- /*
- * Reference from vmpage to cl_page is removed, but the reference back
- * is still here. It is removed later in vvp_page_fini().
- */
-}
-
-static void vvp_page_export(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int uptodate)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- LASSERT(vmpage);
- LASSERT(PageLocked(vmpage));
- if (uptodate)
- SetPageUptodate(vmpage);
- else
- ClearPageUptodate(vmpage);
-}
-
-static int vvp_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
-}
-
-static int vvp_page_prep_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- /* Skip the page already marked as PG_uptodate. */
- return PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0;
-}
-
-static int vvp_page_prep_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct cl_page *pg = slice->cpl_page;
-
- LASSERT(PageLocked(vmpage));
- LASSERT(!PageDirty(vmpage));
-
- /* ll_writepage path is not a sync write, so need to set page writeback
- * flag
- */
- if (!pg->cp_sync_io)
- set_page_writeback(vmpage);
-
- return 0;
-}
-
-/**
- * Handles page transfer errors at VM level.
- *
- * This takes inode as a separate argument, because inode on which error is to
- * be set can be different from \a vmpage inode in case of direct-io.
- */
-static void vvp_vmpage_error(struct inode *inode, struct page *vmpage,
- int ioret)
-{
- struct vvp_object *obj = cl_inode2vvp(inode);
-
- if (ioret == 0) {
- ClearPageError(vmpage);
- obj->vob_discard_page_warned = 0;
- } else {
- SetPageError(vmpage);
- mapping_set_error(inode->i_mapping, ioret);
-
- if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
- obj->vob_discard_page_warned == 0) {
- obj->vob_discard_page_warned = 1;
- ll_dirty_page_discard_warn(vmpage, ioret);
- }
- }
-}
-
-static void vvp_page_completion_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct page *vmpage = vpg->vpg_page;
- struct cl_page *page = slice->cpl_page;
- struct inode *inode = vvp_object_inode(page->cp_obj);
-
- LASSERT(PageLocked(vmpage));
- CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
-
- if (vpg->vpg_defer_uptodate)
- ll_ra_count_put(ll_i2sbi(inode), 1);
-
- if (ioret == 0) {
- if (!vpg->vpg_defer_uptodate)
- cl_page_export(env, page, 1);
- } else {
- vpg->vpg_defer_uptodate = 0;
- }
-
- if (!page->cp_sync_io)
- unlock_page(vmpage);
-}
-
-static void vvp_page_completion_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct cl_page *pg = slice->cpl_page;
- struct page *vmpage = vpg->vpg_page;
-
- CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
-
- if (pg->cp_sync_io) {
- LASSERT(PageLocked(vmpage));
- LASSERT(!PageWriteback(vmpage));
- } else {
- LASSERT(PageWriteback(vmpage));
- /*
- * Only mark the page error only when it's an async write
- * because applications won't wait for IO to finish.
- */
- vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
-
- end_page_writeback(vmpage);
- }
-}
-
-/**
- * Implements cl_page_operations::cpo_make_ready() method.
- *
- * This is called to yank a page from the transfer cache and to send it out as
- * a part of transfer. This function try-locks the page. If try-lock failed,
- * page is owned by some concurrent IO, and should be skipped (this is bad,
- * but hopefully rare situation, as it usually results in transfer being
- * shorter than possible).
- *
- * \retval 0 success, page can be placed into transfer
- *
- * \retval -EAGAIN page is either used by concurrent IO has been
- * truncated. Skip it.
- */
-static int vvp_page_make_ready(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct cl_page *pg = slice->cpl_page;
- int result = 0;
-
- lock_page(vmpage);
- if (clear_page_dirty_for_io(vmpage)) {
- LASSERT(pg->cp_state == CPS_CACHED);
- /* This actually clears the dirty bit in the radix tree. */
- set_page_writeback(vmpage);
- CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
- } else if (pg->cp_state == CPS_PAGEOUT) {
- /* is it possible for osc_flush_async_page() to already
- * make it ready?
- */
- result = -EALREADY;
- } else {
- CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
- pg->cp_state);
- LBUG();
- }
- unlock_page(vmpage);
- return result;
-}
-
-static int vvp_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct page *vmpage = vpg->vpg_page;
-
- (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d) vm@%p ",
- vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
- if (vmpage) {
- (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
- (long)vmpage->flags, page_count(vmpage),
- page_mapcount(vmpage), vmpage->private,
- vmpage->index,
- list_empty(&vmpage->lru) ? "not-" : "");
- }
-
- (*printer)(env, cookie, "\n");
-
- return 0;
-}
-
-static int vvp_page_fail(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- /*
- * Cached read?
- */
- LBUG();
-
- return 0;
-}
-
-static const struct cl_page_operations vvp_page_ops = {
- .cpo_own = vvp_page_own,
- .cpo_assume = vvp_page_assume,
- .cpo_unassume = vvp_page_unassume,
- .cpo_disown = vvp_page_disown,
- .cpo_discard = vvp_page_discard,
- .cpo_delete = vvp_page_delete,
- .cpo_export = vvp_page_export,
- .cpo_is_vmlocked = vvp_page_is_vmlocked,
- .cpo_fini = vvp_page_fini,
- .cpo_print = vvp_page_print,
- .io = {
- [CRT_READ] = {
- .cpo_prep = vvp_page_prep_read,
- .cpo_completion = vvp_page_completion_read,
- .cpo_make_ready = vvp_page_fail,
- },
- [CRT_WRITE] = {
- .cpo_prep = vvp_page_prep_write,
- .cpo_completion = vvp_page_completion_write,
- .cpo_make_ready = vvp_page_make_ready,
- },
- },
-};
-
-static int vvp_transient_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- /* transient page should always be sent. */
- return 0;
-}
-
-static int vvp_transient_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused, int nonblock)
-{
- return 0;
-}
-
-static void vvp_transient_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
-}
-
-static void vvp_transient_page_unassume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
-}
-
-static void vvp_transient_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
-}
-
-static void vvp_transient_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct cl_page *page = slice->cpl_page;
-
- /*
- * For transient pages, remove it from the radix tree.
- */
- cl_page_delete(env, page);
-}
-
-static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct inode *inode = vvp_object_inode(slice->cpl_obj);
- int locked;
-
- locked = !inode_trylock(inode);
- if (!locked)
- inode_unlock(inode);
- return locked ? -EBUSY : -ENODATA;
-}
-
-static void
-vvp_transient_page_completion(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
-}
-
-static void vvp_transient_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct cl_page *clp = slice->cpl_page;
- struct vvp_object *clobj = cl2vvp(clp->cp_obj);
-
- vvp_page_fini_common(vpg);
- atomic_dec(&clobj->vob_transient_pages);
-}
-
-static const struct cl_page_operations vvp_transient_page_ops = {
- .cpo_own = vvp_transient_page_own,
- .cpo_assume = vvp_transient_page_assume,
- .cpo_unassume = vvp_transient_page_unassume,
- .cpo_disown = vvp_transient_page_disown,
- .cpo_discard = vvp_transient_page_discard,
- .cpo_fini = vvp_transient_page_fini,
- .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
- .cpo_print = vvp_page_print,
- .io = {
- [CRT_READ] = {
- .cpo_prep = vvp_transient_page_prep,
- .cpo_completion = vvp_transient_page_completion,
- },
- [CRT_WRITE] = {
- .cpo_prep = vvp_transient_page_prep,
- .cpo_completion = vvp_transient_page_completion,
- }
- }
-};
-
-int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
-{
- struct vvp_page *vpg = cl_object_page_slice(obj, page);
- struct page *vmpage = page->cp_vmpage;
-
- CLOBINVRNT(env, obj, vvp_object_invariant(obj));
-
- vpg->vpg_page = vmpage;
- get_page(vmpage);
-
- if (page->cp_type == CPT_CACHEABLE) {
- /* in cache, decref in vvp_page_delete */
- atomic_inc(&page->cp_ref);
- SetPagePrivate(vmpage);
- vmpage->private = (unsigned long)page;
- cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
- &vvp_page_ops);
- } else {
- struct vvp_object *clobj = cl2vvp(obj);
-
- cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
- &vvp_transient_page_ops);
- atomic_inc(&clobj->vob_transient_pages);
- }
- return 0;
-}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
deleted file mode 100644
index 2d78432963dc..000000000000
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ /dev/null
@@ -1,638 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/xattr.h>
-#include <linux/selinux.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <obd_support.h>
-#include <lustre_dlm.h>
-
-#include "llite_internal.h"
-
-const struct xattr_handler *get_xattr_type(const char *name)
-{
- int i = 0;
-
- while (ll_xattr_handlers[i]) {
- size_t len = strlen(ll_xattr_handlers[i]->prefix);
-
- if (!strncmp(ll_xattr_handlers[i]->prefix, name, len))
- return ll_xattr_handlers[i];
- i++;
- }
- return NULL;
-}
-
-static int xattr_type_filter(struct ll_sb_info *sbi,
- const struct xattr_handler *handler)
-{
- /* No handler means XATTR_OTHER_T */
- if (!handler)
- return -EOPNOTSUPP;
-
- if ((handler->flags == XATTR_ACL_ACCESS_T ||
- handler->flags == XATTR_ACL_DEFAULT_T) &&
- !(sbi->ll_flags & LL_SBI_ACL))
- return -EOPNOTSUPP;
-
- if (handler->flags == XATTR_USER_T &&
- !(sbi->ll_flags & LL_SBI_USER_XATTR))
- return -EOPNOTSUPP;
-
- if (handler->flags == XATTR_TRUSTED_T &&
- !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- return 0;
-}
-
-static int
-ll_xattr_set_common(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, const void *value, size_t size,
- int flags)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- const char *pv = value;
- char *fullname;
- __u64 valid;
- int rc;
-
- if (flags == XATTR_REPLACE) {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
- valid = OBD_MD_FLXATTRRM;
- } else {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
- valid = OBD_MD_FLXATTR;
- }
-
- rc = xattr_type_filter(sbi, handler);
- if (rc)
- return rc;
-
- if ((handler->flags == XATTR_ACL_ACCESS_T ||
- handler->flags == XATTR_ACL_DEFAULT_T) &&
- !inode_owner_or_capable(inode))
- return -EPERM;
-
- /* b10667: ignore lustre special xattr for now */
- if (!strcmp(name, "hsm") ||
- ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) ||
- (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov"))))
- return 0;
-
- /* b15587: ignore security.capability xattr for now */
- if ((handler->flags == XATTR_SECURITY_T &&
- !strcmp(name, "capability")))
- return 0;
-
- /* LU-549: Disable security.selinux when selinux is disabled */
- if (handler->flags == XATTR_SECURITY_T && !selinux_is_enabled() &&
- strcmp(name, "selinux") == 0)
- return -EOPNOTSUPP;
-
- /*FIXME: enable IMA when the conditions are ready */
- if (handler->flags == XATTR_SECURITY_T &&
- (!strcmp(name, "ima") || !strcmp(name, "evm")))
- return -EOPNOTSUPP;
-
- /*
- * In user.* namespace, only regular files and directories can have
- * extended attributes.
- */
- if (handler->flags == XATTR_USER_T) {
- if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return -EPERM;
- }
-
- fullname = kasprintf(GFP_KERNEL, "%s%s\n", handler->prefix, name);
- if (!fullname)
- return -ENOMEM;
- rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- valid, fullname, pv, size, 0, flags,
- ll_i2suppgid(inode), &req);
- kfree(fullname);
- if (rc) {
- if (rc == -EOPNOTSUPP && handler->flags == XATTR_USER_T) {
- LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
- sbi->ll_flags &= ~LL_SBI_USER_XATTR;
- }
- return rc;
- }
-
- ptlrpc_req_finished(req);
- return 0;
-}
-
-static int get_hsm_state(struct inode *inode, u32 *hus_states)
-{
- struct md_op_data *op_data;
- struct hsm_user_state *hus;
- int rc;
-
- hus = kzalloc(sizeof(*hus), GFP_NOFS);
- if (!hus)
- return -ENOMEM;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, hus);
- if (!IS_ERR(op_data)) {
- rc = obd_iocontrol(LL_IOC_HSM_STATE_GET, ll_i2mdexp(inode),
- sizeof(*op_data), op_data, NULL);
- if (!rc)
- *hus_states = hus->hus_states;
- else
- CDEBUG(D_VFSTRACE, "obd_iocontrol failed. rc = %d\n",
- rc);
-
- ll_finish_md_op_data(op_data);
- } else {
- rc = PTR_ERR(op_data);
- CDEBUG(D_VFSTRACE, "Could not prepare the opdata. rc = %d\n",
- rc);
- }
- kfree(hus);
- return rc;
-}
-
-static int ll_xattr_set(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, const void *value, size_t size,
- int flags)
-{
- LASSERT(inode);
- LASSERT(name);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), xattr %s\n",
- PFID(ll_inode2fid(inode)), inode, name);
-
- if (!strcmp(name, "lov")) {
- struct lov_user_md *lump = (struct lov_user_md *)value;
- int op_type = flags == XATTR_REPLACE ? LPROC_LL_REMOVEXATTR :
- LPROC_LL_SETXATTR;
- int rc = 0;
-
- ll_stats_ops_tally(ll_i2sbi(inode), op_type, 1);
-
- if (size != 0 && size < sizeof(struct lov_user_md))
- return -EINVAL;
-
- /*
- * It is possible to set an xattr to a "" value of zero size.
- * For this case we are going to treat it as a removal.
- */
- if (!size && lump)
- lump = NULL;
-
- /* Attributes that are saved via getxattr will always have
- * the stripe_offset as 0. Instead, the MDS should be
- * allowed to pick the starting OST index. b=17846
- */
- if (lump && lump->lmm_stripe_offset == 0)
- lump->lmm_stripe_offset = -1;
-
- /* Avoid anyone directly setting the RELEASED flag. */
- if (lump && (lump->lmm_pattern & LOV_PATTERN_F_RELEASED)) {
- /* Only if we have a released flag check if the file
- * was indeed archived.
- */
- u32 state = HS_NONE;
-
- rc = get_hsm_state(inode, &state);
- if (rc)
- return rc;
-
- if (!(state & HS_ARCHIVED)) {
- CDEBUG(D_VFSTRACE,
- "hus_states state = %x, pattern = %x\n",
- state, lump->lmm_pattern);
- /*
- * Here the state is: real file is not
- * archived but user is requesting to set
- * the RELEASED flag so we mask off the
- * released flag from the request
- */
- lump->lmm_pattern ^= LOV_PATTERN_F_RELEASED;
- }
- }
-
- if (lump && S_ISREG(inode->i_mode)) {
- __u64 it_flags = FMODE_WRITE;
- int lum_size;
-
- lum_size = ll_lov_user_md_size(lump);
- if (lum_size < 0 || size < lum_size)
- return 0; /* b=10667: ignore error */
-
- rc = ll_lov_setstripe_ea_info(inode, dentry, it_flags,
- lump, lum_size);
- /* b=10667: rc always be 0 here for now */
- rc = 0;
- } else if (S_ISDIR(inode->i_mode)) {
- rc = ll_dir_setstripe(inode, lump, 0);
- }
-
- return rc;
-
- } else if (!strcmp(name, "lma") || !strcmp(name, "link")) {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
- return 0;
- }
-
- return ll_xattr_set_common(handler, dentry, inode, name, value, size,
- flags);
-}
-
-int
-ll_xattr_list(struct inode *inode, const char *name, int type, void *buffer,
- size_t size, __u64 valid)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- struct mdt_body *body;
- void *xdata;
- int rc;
-
- if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T &&
- (type != XATTR_SECURITY_T || strcmp(name, "security.selinux"))) {
- rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
- if (rc == -EAGAIN)
- goto getxattr_nocache;
- if (rc < 0)
- goto out_xattr;
-
- /* Add "system.posix_acl_access" to the list */
- if (lli->lli_posix_acl && valid & OBD_MD_FLXATTRLS) {
- if (size == 0) {
- rc += sizeof(XATTR_NAME_ACL_ACCESS);
- } else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) {
- memcpy(buffer + rc, XATTR_NAME_ACL_ACCESS,
- sizeof(XATTR_NAME_ACL_ACCESS));
- rc += sizeof(XATTR_NAME_ACL_ACCESS);
- } else {
- rc = -ERANGE;
- goto out_xattr;
- }
- }
- } else {
-getxattr_nocache:
- rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- valid, name, NULL, 0, size, 0, &req);
- if (rc < 0)
- goto out_xattr;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body);
-
- /* only detect the xattr size */
- if (size == 0) {
- rc = body->mbo_eadatasize;
- goto out;
- }
-
- if (size < body->mbo_eadatasize) {
- CERROR("server bug: replied size %u > %u\n",
- body->mbo_eadatasize, (int)size);
- rc = -ERANGE;
- goto out;
- }
-
- if (body->mbo_eadatasize == 0) {
- rc = -ENODATA;
- goto out;
- }
-
- /* do not need swab xattr data */
- xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
- body->mbo_eadatasize);
- if (!xdata) {
- rc = -EFAULT;
- goto out;
- }
-
- memcpy(buffer, xdata, body->mbo_eadatasize);
- rc = body->mbo_eadatasize;
- }
-
-out_xattr:
- if (rc == -EOPNOTSUPP && type == XATTR_USER_T) {
- LCONSOLE_INFO(
- "%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0), rc);
- sbi->ll_flags &= ~LL_SBI_USER_XATTR;
- }
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int ll_xattr_get_common(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, void *buffer, size_t size)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-#ifdef CONFIG_FS_POSIX_ACL
- struct ll_inode_info *lli = ll_i2info(inode);
-#endif
- char *fullname;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
- PFID(ll_inode2fid(inode)), inode);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
-
- rc = xattr_type_filter(sbi, handler);
- if (rc)
- return rc;
-
- /* b15587: ignore security.capability xattr for now */
- if ((handler->flags == XATTR_SECURITY_T && !strcmp(name, "capability")))
- return -ENODATA;
-
- /* LU-549: Disable security.selinux when selinux is disabled */
- if (handler->flags == XATTR_SECURITY_T && !selinux_is_enabled() &&
- !strcmp(name, "selinux"))
- return -EOPNOTSUPP;
-
-#ifdef CONFIG_FS_POSIX_ACL
- /* posix acl is under protection of LOOKUP lock. when calling to this,
- * we just have path resolution to the target inode, so we have great
- * chance that cached ACL is uptodate.
- */
- if (handler->flags == XATTR_ACL_ACCESS_T) {
- struct posix_acl *acl;
-
- spin_lock(&lli->lli_lock);
- acl = posix_acl_dup(lli->lli_posix_acl);
- spin_unlock(&lli->lli_lock);
-
- if (!acl)
- return -ENODATA;
-
- rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
- posix_acl_release(acl);
- return rc;
- }
- if (handler->flags == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
- return -ENODATA;
-#endif
- fullname = kasprintf(GFP_KERNEL, "%s%s\n", handler->prefix, name);
- if (!fullname)
- return -ENOMEM;
- rc = ll_xattr_list(inode, fullname, handler->flags, buffer, size,
- OBD_MD_FLXATTR);
- kfree(fullname);
- return rc;
-}
-
-static ssize_t ll_getxattr_lov(struct inode *inode, void *buf, size_t buf_size)
-{
- ssize_t rc;
-
- if (S_ISREG(inode->i_mode)) {
- struct cl_object *obj = ll_i2info(inode)->lli_clob;
- struct cl_layout cl = {
- .cl_buf.lb_buf = buf,
- .cl_buf.lb_len = buf_size,
- };
- struct lu_env *env;
- u16 refcheck;
-
- if (!obj)
- return -ENODATA;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- rc = cl_object_layout_get(env, obj, &cl);
- if (rc < 0)
- goto out_env;
-
- if (!cl.cl_size) {
- rc = -ENODATA;
- goto out_env;
- }
-
- rc = cl.cl_size;
-
- if (!buf_size)
- goto out_env;
-
- LASSERT(buf && rc <= buf_size);
-
- /*
- * Do not return layout gen for getxattr() since
- * otherwise it would confuse tar --xattr by
- * recognizing layout gen as stripe offset when the
- * file is restored. See LU-2809.
- */
- ((struct lov_mds_md *)buf)->lmm_layout_gen = 0;
-out_env:
- cl_env_put(env, &refcheck);
-
- return rc;
- } else if (S_ISDIR(inode->i_mode)) {
- struct ptlrpc_request *req = NULL;
- struct lov_mds_md *lmm = NULL;
- int lmm_size = 0;
-
- rc = ll_dir_getstripe(inode, (void **)&lmm, &lmm_size,
- &req, 0);
- if (rc < 0)
- goto out_req;
-
- if (!buf_size) {
- rc = lmm_size;
- goto out_req;
- }
-
- if (buf_size < lmm_size) {
- rc = -ERANGE;
- goto out_req;
- }
-
- memcpy(buf, lmm, lmm_size);
- rc = lmm_size;
-out_req:
- if (req)
- ptlrpc_req_finished(req);
-
- return rc;
- } else {
- return -ENODATA;
- }
-}
-
-static int ll_xattr_get(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, void *buffer, size_t size)
-{
- LASSERT(inode);
- LASSERT(name);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), xattr %s\n",
- PFID(ll_inode2fid(inode)), inode, name);
-
- if (!strcmp(name, "lov")) {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
-
- return ll_getxattr_lov(inode, buffer, size);
- }
-
- return ll_xattr_get_common(handler, dentry, inode, name, buffer, size);
-}
-
-ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
-{
- struct inode *inode = d_inode(dentry);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- char *xattr_name;
- ssize_t rc, rc2;
- size_t len, rem;
-
- LASSERT(inode);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
- PFID(ll_inode2fid(inode)), inode);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
-
- rc = ll_xattr_list(inode, NULL, XATTR_OTHER_T, buffer, size,
- OBD_MD_FLXATTRLS);
- if (rc < 0)
- return rc;
- /*
- * If we're being called to get the size of the xattr list
- * (buf_size == 0) then just assume that a lustre.lov xattr
- * exists.
- */
- if (!size)
- return rc + sizeof(XATTR_LUSTRE_LOV);
-
- xattr_name = buffer;
- rem = rc;
-
- while (rem > 0) {
- len = strnlen(xattr_name, rem - 1) + 1;
- rem -= len;
- if (!xattr_type_filter(sbi, get_xattr_type(xattr_name))) {
- /* Skip OK xattr type leave it in buffer */
- xattr_name += len;
- continue;
- }
-
- /*
- * Move up remaining xattrs in buffer
- * removing the xattr that is not OK
- */
- memmove(xattr_name, xattr_name + len, rem);
- rc -= len;
- }
-
- rc2 = ll_getxattr_lov(inode, NULL, 0);
- if (rc2 == -ENODATA)
- return rc;
-
- if (rc2 < 0)
- return rc2;
-
- if (size < rc + sizeof(XATTR_LUSTRE_LOV))
- return -ERANGE;
-
- memcpy(buffer + rc, XATTR_LUSTRE_LOV, sizeof(XATTR_LUSTRE_LOV));
-
- return rc + sizeof(XATTR_LUSTRE_LOV);
-}
-
-static const struct xattr_handler ll_user_xattr_handler = {
- .prefix = XATTR_USER_PREFIX,
- .flags = XATTR_USER_T,
- .get = ll_xattr_get_common,
- .set = ll_xattr_set_common,
-};
-
-static const struct xattr_handler ll_trusted_xattr_handler = {
- .prefix = XATTR_TRUSTED_PREFIX,
- .flags = XATTR_TRUSTED_T,
- .get = ll_xattr_get,
- .set = ll_xattr_set,
-};
-
-static const struct xattr_handler ll_security_xattr_handler = {
- .prefix = XATTR_SECURITY_PREFIX,
- .flags = XATTR_SECURITY_T,
- .get = ll_xattr_get_common,
- .set = ll_xattr_set_common,
-};
-
-static const struct xattr_handler ll_acl_access_xattr_handler = {
- .prefix = XATTR_NAME_POSIX_ACL_ACCESS,
- .flags = XATTR_ACL_ACCESS_T,
- .get = ll_xattr_get_common,
- .set = ll_xattr_set_common,
-};
-
-static const struct xattr_handler ll_acl_default_xattr_handler = {
- .prefix = XATTR_NAME_POSIX_ACL_DEFAULT,
- .flags = XATTR_ACL_DEFAULT_T,
- .get = ll_xattr_get_common,
- .set = ll_xattr_set_common,
-};
-
-static const struct xattr_handler ll_lustre_xattr_handler = {
- .prefix = XATTR_LUSTRE_PREFIX,
- .flags = XATTR_LUSTRE_T,
- .get = ll_xattr_get,
- .set = ll_xattr_set,
-};
-
-const struct xattr_handler *ll_xattr_handlers[] = {
- &ll_user_xattr_handler,
- &ll_trusted_xattr_handler,
- &ll_security_xattr_handler,
-#ifdef CONFIG_FS_POSIX_ACL
- &ll_acl_access_xattr_handler,
- &ll_acl_default_xattr_handler,
-#endif
- &ll_lustre_xattr_handler,
- NULL,
-};
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
deleted file mode 100644
index 4dc799d60a9f..000000000000
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ /dev/null
@@ -1,523 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2012 Xyratex Technology Limited
- *
- * Copyright (c) 2013, 2015, Intel Corporation.
- *
- * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
- *
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <obd_support.h>
-#include <lustre_dlm.h>
-#include "llite_internal.h"
-
-/* If we ever have hundreds of extended attributes, we might want to consider
- * using a hash or a tree structure instead of list for faster lookups.
- */
-struct ll_xattr_entry {
- struct list_head xe_list; /* protected with
- * lli_xattrs_list_rwsem
- */
- char *xe_name; /* xattr name, \0-terminated */
- char *xe_value; /* xattr value */
- unsigned int xe_namelen; /* strlen(xe_name) + 1 */
- unsigned int xe_vallen; /* xattr value length */
-};
-
-static struct kmem_cache *xattr_kmem;
-static struct lu_kmem_descr xattr_caches[] = {
- {
- .ckd_cache = &xattr_kmem,
- .ckd_name = "xattr_kmem",
- .ckd_size = sizeof(struct ll_xattr_entry)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-int ll_xattr_init(void)
-{
- return lu_kmem_init(xattr_caches);
-}
-
-void ll_xattr_fini(void)
-{
- lu_kmem_fini(xattr_caches);
-}
-
-/**
- * Initializes xattr cache for an inode.
- *
- * This initializes the xattr list and marks cache presence.
- */
-static void ll_xattr_cache_init(struct ll_inode_info *lli)
-{
- INIT_LIST_HEAD(&lli->lli_xattrs);
- set_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
-}
-
-/**
- * This looks for a specific extended attribute.
- *
- * Find in @cache and return @xattr_name attribute in @xattr,
- * for the NULL @xattr_name return the first cached @xattr.
- *
- * \retval 0 success
- * \retval -ENODATA if not found
- */
-static int ll_xattr_cache_find(struct list_head *cache,
- const char *xattr_name,
- struct ll_xattr_entry **xattr)
-{
- struct ll_xattr_entry *entry;
-
- list_for_each_entry(entry, cache, xe_list) {
- /* xattr_name == NULL means look for any entry */
- if (!xattr_name || strcmp(xattr_name, entry->xe_name) == 0) {
- *xattr = entry;
- CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
- entry->xe_name, entry->xe_vallen,
- entry->xe_value);
- return 0;
- }
- }
-
- return -ENODATA;
-}
-
-/**
- * This adds an xattr.
- *
- * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
- *
- * \retval 0 success
- * \retval -ENOMEM if no memory could be allocated for the cached attr
- * \retval -EPROTO if duplicate xattr is being added
- */
-static int ll_xattr_cache_add(struct list_head *cache,
- const char *xattr_name,
- const char *xattr_val,
- unsigned int xattr_val_len)
-{
- struct ll_xattr_entry *xattr;
-
- if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
- CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
- return -EPROTO;
- }
-
- xattr = kmem_cache_zalloc(xattr_kmem, GFP_NOFS);
- if (!xattr) {
- CDEBUG(D_CACHE, "failed to allocate xattr\n");
- return -ENOMEM;
- }
-
- xattr->xe_name = kstrdup(xattr_name, GFP_NOFS);
- if (!xattr->xe_name) {
- CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
- xattr->xe_namelen);
- goto err_name;
- }
- xattr->xe_value = kmemdup(xattr_val, xattr_val_len, GFP_NOFS);
- if (!xattr->xe_value)
- goto err_value;
-
- xattr->xe_vallen = xattr_val_len;
- list_add(&xattr->xe_list, cache);
-
- CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, xattr_val_len,
- xattr_val);
-
- return 0;
-err_value:
- kfree(xattr->xe_name);
-err_name:
- kmem_cache_free(xattr_kmem, xattr);
-
- return -ENOMEM;
-}
-
-/**
- * This removes an extended attribute from cache.
- *
- * Remove @xattr_name attribute from @cache.
- *
- * \retval 0 success
- * \retval -ENODATA if @xattr_name is not cached
- */
-static int ll_xattr_cache_del(struct list_head *cache,
- const char *xattr_name)
-{
- struct ll_xattr_entry *xattr;
-
- CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
-
- if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
- list_del(&xattr->xe_list);
- kfree(xattr->xe_name);
- kfree(xattr->xe_value);
- kmem_cache_free(xattr_kmem, xattr);
-
- return 0;
- }
-
- return -ENODATA;
-}
-
-/**
- * This iterates cached extended attributes.
- *
- * Walk over cached attributes in @cache and
- * fill in @xld_buffer or only calculate buffer
- * size if @xld_buffer is NULL.
- *
- * \retval >= 0 buffer list size
- * \retval -ENODATA if the list cannot fit @xld_size buffer
- */
-static int ll_xattr_cache_list(struct list_head *cache,
- char *xld_buffer,
- int xld_size)
-{
- struct ll_xattr_entry *xattr, *tmp;
- int xld_tail = 0;
-
- list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
- CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
- xld_buffer, xld_tail, xattr->xe_name);
-
- if (xld_buffer) {
- xld_size -= xattr->xe_namelen;
- if (xld_size < 0)
- break;
- memcpy(&xld_buffer[xld_tail],
- xattr->xe_name, xattr->xe_namelen);
- }
- xld_tail += xattr->xe_namelen;
- }
-
- if (xld_size < 0)
- return -ERANGE;
-
- return xld_tail;
-}
-
-/**
- * Check if the xattr cache is initialized (filled).
- *
- * \retval 0 @cache is not initialized
- * \retval 1 @cache is initialized
- */
-static int ll_xattr_cache_valid(struct ll_inode_info *lli)
-{
- return test_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
-}
-
-/**
- * This finalizes the xattr cache.
- *
- * Free all xattr memory. @lli is the inode info pointer.
- *
- * \retval 0 no error occurred
- */
-static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
-{
- if (!ll_xattr_cache_valid(lli))
- return 0;
-
- while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
- ; /* empty loop */
-
- clear_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
-
- return 0;
-}
-
-int ll_xattr_cache_destroy(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc;
-
- down_write(&lli->lli_xattrs_list_rwsem);
- rc = ll_xattr_cache_destroy_locked(lli);
- up_write(&lli->lli_xattrs_list_rwsem);
-
- return rc;
-}
-
-/**
- * Match or enqueue a PR lock.
- *
- * Find or request an LDLM lock with xattr data.
- * Since LDLM does not provide API for atomic match_or_enqueue,
- * the function handles it with a separate enq lock.
- * If successful, the function exits with the list lock held.
- *
- * \retval 0 no error occurred
- * \retval -ENOMEM not enough memory
- */
-static int ll_xattr_find_get_lock(struct inode *inode,
- struct lookup_intent *oit,
- struct ptlrpc_request **req)
-{
- enum ldlm_mode mode;
- struct lustre_handle lockh = { 0 };
- struct md_op_data *op_data;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_IBITS,
- .ei_mode = it_to_lock_mode(oit),
- .ei_cb_bl = &ll_md_blocking_ast,
- .ei_cb_cp = &ldlm_completion_ast,
- };
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct obd_export *exp = sbi->ll_md_exp;
- int rc;
-
- mutex_lock(&lli->lli_xattrs_enq_lock);
- /* inode may have been shrunk and recreated, so data is gone, match lock
- * only when data exists.
- */
- if (ll_xattr_cache_valid(lli)) {
- /* Try matching first. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
- LCK_PR);
- if (mode != 0) {
- /* fake oit in mdc_revalidate_lock() manner */
- oit->it_lock_handle = lockh.cookie;
- oit->it_lock_mode = mode;
- goto out;
- }
- }
-
- /* Enqueue if the lock isn't cached locally. */
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data)) {
- mutex_unlock(&lli->lli_xattrs_enq_lock);
- return PTR_ERR(op_data);
- }
-
- op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
-
- rc = md_enqueue(exp, &einfo, NULL, oit, op_data, &lockh, 0);
- ll_finish_md_op_data(op_data);
-
- if (rc < 0) {
- CDEBUG(D_CACHE,
- "md_intent_lock failed with %d for fid " DFID "\n",
- rc, PFID(ll_inode2fid(inode)));
- mutex_unlock(&lli->lli_xattrs_enq_lock);
- return rc;
- }
-
- *req = oit->it_request;
-out:
- down_write(&lli->lli_xattrs_list_rwsem);
- mutex_unlock(&lli->lli_xattrs_enq_lock);
-
- return 0;
-}
-
-/**
- * Refill the xattr cache.
- *
- * Fetch and cache the whole of xattrs for @inode, acquiring
- * a read or a write xattr lock depending on operation in @oit.
- * Intent is dropped on exit unless the operation is setxattr.
- *
- * \retval 0 no error occurred
- * \retval -EPROTO network protocol error
- * \retval -ENOMEM not enough memory for the cache
- */
-static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- const char *xdata, *xval, *xtail, *xvtail;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct mdt_body *body;
- __u32 *xsizes;
- int rc, i;
-
- rc = ll_xattr_find_get_lock(inode, oit, &req);
- if (rc)
- goto out_no_unlock;
-
- /* Do we have the data at this point? */
- if (ll_xattr_cache_valid(lli)) {
- ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
- rc = 0;
- goto out_maybe_drop;
- }
-
- /* Matched but no cache? Cancelled on error by a parallel refill. */
- if (unlikely(!req)) {
- CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
- rc = -EIO;
- goto out_maybe_drop;
- }
-
- if (oit->it_status < 0) {
- CDEBUG(D_CACHE,
- "getxattr intent returned %d for fid " DFID "\n",
- oit->it_status, PFID(ll_inode2fid(inode)));
- rc = oit->it_status;
- /* xattr data is so large that we don't want to cache it */
- if (rc == -ERANGE)
- rc = -EAGAIN;
- goto out_destroy;
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (!body) {
- CERROR("no MDT BODY in the refill xattr reply\n");
- rc = -EPROTO;
- goto out_destroy;
- }
- /* do not need swab xattr data */
- xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
- body->mbo_eadatasize);
- xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
- body->mbo_aclsize);
- xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
- body->mbo_max_mdsize * sizeof(__u32));
- if (!xdata || !xval || !xsizes) {
- CERROR("wrong setxattr reply\n");
- rc = -EPROTO;
- goto out_destroy;
- }
-
- xtail = xdata + body->mbo_eadatasize;
- xvtail = xval + body->mbo_aclsize;
-
- CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
-
- ll_xattr_cache_init(lli);
-
- for (i = 0; i < body->mbo_max_mdsize; i++) {
- CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
- /* Perform consistency checks: attr names and vals in pill */
- if (!memchr(xdata, 0, xtail - xdata)) {
- CERROR("xattr protocol violation (names are broken)\n");
- rc = -EPROTO;
- } else if (xval + *xsizes > xvtail) {
- CERROR("xattr protocol violation (vals are broken)\n");
- rc = -EPROTO;
- } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
- rc = -ENOMEM;
- } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
- /* Filter out ACL ACCESS since it's cached separately */
- CDEBUG(D_CACHE, "not caching %s\n",
- XATTR_NAME_ACL_ACCESS);
- rc = 0;
- } else if (!strcmp(xdata, "security.selinux")) {
- /* Filter out security.selinux, it is cached in slab */
- CDEBUG(D_CACHE, "not caching security.selinux\n");
- rc = 0;
- } else {
- rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
- *xsizes);
- }
- if (rc < 0) {
- ll_xattr_cache_destroy_locked(lli);
- goto out_destroy;
- }
- xdata += strlen(xdata) + 1;
- xval += *xsizes;
- xsizes++;
- }
-
- if (xdata != xtail || xval != xvtail)
- CERROR("a hole in xattr data\n");
-
- ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
-
- goto out_maybe_drop;
-out_maybe_drop:
-
- ll_intent_drop_lock(oit);
-
- if (rc != 0)
- up_write(&lli->lli_xattrs_list_rwsem);
-out_no_unlock:
- ptlrpc_req_finished(req);
-
- return rc;
-
-out_destroy:
- up_write(&lli->lli_xattrs_list_rwsem);
-
- ldlm_lock_decref_and_cancel((struct lustre_handle *)
- &oit->it_lock_handle,
- oit->it_lock_mode);
-
- goto out_no_unlock;
-}
-
-/**
- * Get an xattr value or list xattrs using the write-through cache.
- *
- * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
- * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
- * The resulting value/list is stored in @buffer if the former
- * is not larger than @size.
- *
- * \retval 0 no error occurred
- * \retval -EPROTO network protocol error
- * \retval -ENOMEM not enough memory for the cache
- * \retval -ERANGE the buffer is not large enough
- * \retval -ENODATA no such attr or the list is empty
- */
-int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer,
- size_t size, __u64 valid)
-{
- struct lookup_intent oit = { .it_op = IT_GETXATTR };
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc = 0;
-
- LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
-
- down_read(&lli->lli_xattrs_list_rwsem);
- if (!ll_xattr_cache_valid(lli)) {
- up_read(&lli->lli_xattrs_list_rwsem);
- rc = ll_xattr_cache_refill(inode, &oit);
- if (rc)
- return rc;
- downgrade_write(&lli->lli_xattrs_list_rwsem);
- } else {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
- }
-
- if (valid & OBD_MD_FLXATTR) {
- struct ll_xattr_entry *xattr;
-
- rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
- if (rc == 0) {
- rc = xattr->xe_vallen;
- /* zero size means we are only requested size in rc */
- if (size != 0) {
- if (size >= xattr->xe_vallen)
- memcpy(buffer, xattr->xe_value,
- xattr->xe_vallen);
- else
- rc = -ERANGE;
- }
- }
- } else if (valid & OBD_MD_FLXATTRLS) {
- rc = ll_xattr_cache_list(&lli->lli_xattrs,
- size ? buffer : NULL, size);
- }
-
- goto out;
-out:
- up_read(&lli->lli_xattrs_list_rwsem);
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/llite/xattr_security.c b/drivers/staging/lustre/lustre/llite/xattr_security.c
deleted file mode 100644
index 93ec07531ac7..000000000000
--- a/drivers/staging/lustre/lustre/llite/xattr_security.c
+++ /dev/null
@@ -1,96 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * GPL HEADER END
- */
-
-/*
- * Copyright (c) 2014 Bull SAS
- * Author: Sebastien Buisson sebastien.buisson@bull.net
- */
-
-/*
- * lustre/llite/xattr_security.c
- * Handler for storing security labels as extended attributes.
- */
-
-#include <linux/types.h>
-#include <linux/security.h>
-#include <linux/selinux.h>
-#include <linux/xattr.h>
-#include "llite_internal.h"
-
-/**
- * A helper function for ll_security_inode_init_security()
- * that takes care of setting xattrs
- *
- * Get security context of @inode from @xattr_array,
- * and put it in 'security.xxx' xattr of dentry
- * stored in @fs_info.
- *
- * \retval 0 success
- * \retval -ENOMEM if no memory could be allocated for xattr name
- * \retval < 0 failure to set xattr
- */
-static int
-ll_initxattrs(struct inode *inode, const struct xattr *xattr_array,
- void *fs_info)
-{
- struct dentry *dentry = fs_info;
- const struct xattr *xattr;
- int err = 0;
-
- for (xattr = xattr_array; xattr->name; xattr++) {
- char *full_name;
-
- full_name = kasprintf(GFP_KERNEL, "%s%s",
- XATTR_SECURITY_PREFIX, xattr->name);
- if (!full_name) {
- err = -ENOMEM;
- break;
- }
-
- err = __vfs_setxattr(dentry, inode, full_name, xattr->value,
- xattr->value_len, XATTR_CREATE);
- kfree(full_name);
- if (err < 0)
- break;
- }
- return err;
-}
-
-/**
- * Initializes security context
- *
- * Get security context of @inode in @dir,
- * and put it in 'security.xxx' xattr of @dentry.
- *
- * \retval 0 success, or SELinux is disabled
- * \retval -ENOMEM if no memory could be allocated for xattr name
- * \retval < 0 failure to get security context or set xattr
- */
-int
-ll_init_security(struct dentry *dentry, struct inode *inode, struct inode *dir)
-{
- if (!selinux_is_enabled())
- return 0;
-
- return security_inode_init_security(inode, dir, NULL,
- &ll_initxattrs, dentry);
-}
diff --git a/drivers/staging/lustre/lustre/lmv/Makefile b/drivers/staging/lustre/lustre/lmv/Makefile
deleted file mode 100644
index 91c99114aa13..000000000000
--- a/drivers/staging/lustre/lustre/lmv/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LUSTRE_FS) += lmv.o
-lmv-y := lmv_obd.o lmv_intent.o lmv_fld.o lproc_lmv.o
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
deleted file mode 100644
index 00dc858c10c9..000000000000
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LMV
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/pagemap.h>
-#include <asm/div64.h>
-#include <linux/seq_file.h>
-
-#include <obd_support.h>
-#include <lustre_fid.h>
-#include <lustre_lib.h>
-#include <lustre_net.h>
-#include <lustre_dlm.h>
-#include <obd_class.h>
-#include <lprocfs_status.h>
-#include "lmv_internal.h"
-
-int lmv_fld_lookup(struct lmv_obd *lmv, const struct lu_fid *fid, u32 *mds)
-{
- struct obd_device *obd = lmv2obd_dev(lmv);
- int rc;
-
- /*
- * FIXME: Currently ZFS still use local seq for ROOT unfortunately, and
- * this fid_is_local check should be removed once LU-2240 is fixed
- */
- if (!fid_is_sane(fid) || !(fid_seq_in_fldb(fid_seq(fid)) ||
- fid_seq_is_local_file(fid_seq(fid)))) {
- CERROR("%s: invalid FID " DFID "\n", obd->obd_name, PFID(fid));
- return -EINVAL;
- }
-
- rc = fld_client_lookup(&lmv->lmv_fld, fid_seq(fid), mds,
- LU_SEQ_RANGE_MDT, NULL);
- if (rc) {
- CERROR("Error while looking for mds number. Seq %#llx, err = %d\n",
- fid_seq(fid), rc);
- return rc;
- }
-
- CDEBUG(D_INODE, "FLD lookup got mds #%x for fid=" DFID "\n",
- *mds, PFID(fid));
-
- if (*mds >= lmv->desc.ld_tgt_count) {
- CERROR("FLD lookup got invalid mds #%x (max: %x) for fid=" DFID "\n", *mds, lmv->desc.ld_tgt_count,
- PFID(fid));
- rc = -EINVAL;
- }
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
deleted file mode 100644
index 1793c9f79b24..000000000000
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ /dev/null
@@ -1,517 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LMV
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/pagemap.h>
-#include <asm/div64.h>
-#include <linux/seq_file.h>
-#include <linux/namei.h>
-#include <lustre_intent.h>
-#include <obd_support.h>
-#include <lustre_lib.h>
-#include <lustre_net.h>
-#include <lustre_dlm.h>
-#include <lustre_mdc.h>
-#include <obd_class.h>
-#include <lprocfs_status.h>
-#include "lmv_internal.h"
-
-static int lmv_intent_remote(struct obd_export *exp, struct lookup_intent *it,
- const struct lu_fid *parent_fid,
- struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct ptlrpc_request *req = NULL;
- struct lustre_handle plock;
- struct md_op_data *op_data;
- struct lmv_tgt_desc *tgt;
- struct mdt_body *body;
- int pmode;
- int rc = 0;
-
- body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
- if (!body)
- return -EPROTO;
-
- LASSERT((body->mbo_valid & OBD_MD_MDS));
-
- /*
- * Unfortunately, we have to lie to MDC/MDS to retrieve
- * attributes llite needs and provideproper locking.
- */
- if (it->it_op & IT_LOOKUP)
- it->it_op = IT_GETATTR;
-
- /*
- * We got LOOKUP lock, but we really need attrs.
- */
- pmode = it->it_lock_mode;
- if (pmode) {
- plock.cookie = it->it_lock_handle;
- it->it_lock_mode = 0;
- it->it_request = NULL;
- }
-
- LASSERT(fid_is_sane(&body->mbo_fid1));
-
- tgt = lmv_find_target(lmv, &body->mbo_fid1);
- if (IS_ERR(tgt)) {
- rc = PTR_ERR(tgt);
- goto out;
- }
-
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data) {
- rc = -ENOMEM;
- goto out;
- }
-
- op_data->op_fid1 = body->mbo_fid1;
- /* Sent the parent FID to the remote MDT */
- if (parent_fid) {
- /* The parent fid is only for remote open to
- * check whether the open is from OBF,
- * see mdt_cross_open
- */
- LASSERT(it->it_op & IT_OPEN);
- op_data->op_fid2 = *parent_fid;
- }
-
- op_data->op_bias = MDS_CROSS_REF;
- CDEBUG(D_INODE, "REMOTE_INTENT with fid=" DFID " -> mds #%u\n",
- PFID(&body->mbo_fid1), tgt->ltd_idx);
-
- rc = md_intent_lock(tgt->ltd_exp, op_data, it, &req, cb_blocking,
- extra_lock_flags);
- if (rc)
- goto out_free_op_data;
-
- /*
- * LLite needs LOOKUP lock to track dentry revocation in order to
- * maintain dcache consistency. Thus drop UPDATE|PERM lock here
- * and put LOOKUP in request.
- */
- if (it->it_lock_mode != 0) {
- it->it_remote_lock_handle =
- it->it_lock_handle;
- it->it_remote_lock_mode = it->it_lock_mode;
- }
-
- if (pmode) {
- it->it_lock_handle = plock.cookie;
- it->it_lock_mode = pmode;
- }
-
-out_free_op_data:
- kfree(op_data);
-out:
- if (rc && pmode)
- ldlm_lock_decref(&plock, pmode);
-
- ptlrpc_req_finished(*reqp);
- *reqp = req;
- return rc;
-}
-
-int lmv_revalidate_slaves(struct obd_export *exp,
- const struct lmv_stripe_md *lsm,
- ldlm_blocking_callback cb_blocking,
- int extra_lock_flags)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct ptlrpc_request *req = NULL;
- struct mdt_body *body;
- struct md_op_data *op_data;
- int rc = 0, i;
-
- /**
- * revalidate slaves has some problems, temporarily return,
- * we may not need that
- */
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data)
- return -ENOMEM;
-
- /**
- * Loop over the stripe information, check validity and update them
- * from MDS if needed.
- */
- for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
- struct lookup_intent it = { .it_op = IT_GETATTR };
- struct lustre_handle *lockh = NULL;
- struct lmv_tgt_desc *tgt = NULL;
- struct inode *inode;
- struct lu_fid fid;
-
- fid = lsm->lsm_md_oinfo[i].lmo_fid;
- inode = lsm->lsm_md_oinfo[i].lmo_root;
-
- /*
- * Prepare op_data for revalidating. Note that @fid2 shluld be
- * defined otherwise it will go to server and take new lock
- * which is not needed here.
- */
- memset(op_data, 0, sizeof(*op_data));
- op_data->op_fid1 = fid;
- op_data->op_fid2 = fid;
-
- tgt = lmv_locate_mds(lmv, op_data, &fid);
- if (IS_ERR(tgt)) {
- rc = PTR_ERR(tgt);
- goto cleanup;
- }
-
- CDEBUG(D_INODE, "Revalidate slave " DFID " -> mds #%u\n",
- PFID(&fid), tgt->ltd_idx);
-
- if (req) {
- ptlrpc_req_finished(req);
- req = NULL;
- }
-
- rc = md_intent_lock(tgt->ltd_exp, op_data, &it, &req,
- cb_blocking, extra_lock_flags);
- if (rc < 0)
- goto cleanup;
-
- lockh = (struct lustre_handle *)&it.it_lock_handle;
- if (rc > 0 && !req) {
- /* slave inode is still valid */
- CDEBUG(D_INODE, "slave " DFID " is still valid.\n",
- PFID(&fid));
- rc = 0;
- } else {
- /* refresh slave from server */
- body = req_capsule_server_get(&req->rq_pill,
- &RMF_MDT_BODY);
- if (!body) {
- if (it.it_lock_mode && lockh) {
- ldlm_lock_decref(lockh, it.it_lock_mode);
- it.it_lock_mode = 0;
- }
-
- rc = -ENOENT;
- goto cleanup;
- }
-
- i_size_write(inode, body->mbo_size);
- inode->i_blocks = body->mbo_blocks;
- set_nlink(inode, body->mbo_nlink);
- LTIME_S(inode->i_atime) = body->mbo_atime;
- LTIME_S(inode->i_ctime) = body->mbo_ctime;
- LTIME_S(inode->i_mtime) = body->mbo_mtime;
- }
-
- md_set_lock_data(tgt->ltd_exp, lockh, inode, NULL);
-
- if (it.it_lock_mode && lockh) {
- ldlm_lock_decref(lockh, it.it_lock_mode);
- it.it_lock_mode = 0;
- }
- }
-
-cleanup:
- if (req)
- ptlrpc_req_finished(req);
-
- kfree(op_data);
- return rc;
-}
-
-/*
- * IT_OPEN is intended to open (and create, possible) an object. Parent (pid)
- * may be split dir.
- */
-static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
- struct lookup_intent *it,
- struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- struct mdt_body *body;
- int rc;
-
- if (it->it_flags & MDS_OPEN_BY_FID) {
- LASSERT(fid_is_sane(&op_data->op_fid2));
-
- /*
- * for striped directory, we can't know parent stripe fid
- * without name, but we can set it to child fid, and MDT
- * will obtain it from linkea in open in such case.
- */
- if (op_data->op_mea1)
- op_data->op_fid1 = op_data->op_fid2;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid2);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- op_data->op_mds = tgt->ltd_idx;
- } else {
- LASSERT(fid_is_sane(&op_data->op_fid1));
- LASSERT(fid_is_zero(&op_data->op_fid2));
- LASSERT(op_data->op_name);
-
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
- }
-
- /* If it is ready to open the file by FID, do not need
- * allocate FID at all, otherwise it will confuse MDT
- */
- if ((it->it_op & IT_CREAT) && !(it->it_flags & MDS_OPEN_BY_FID)) {
- /*
- * For lookup(IT_CREATE) cases allocate new fid and setup FLD
- * for it.
- */
- rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
- if (rc != 0)
- return rc;
- }
-
- CDEBUG(D_INODE, "OPEN_INTENT with fid1=" DFID ", fid2=" DFID ", name='%s' -> mds #%u\n",
- PFID(&op_data->op_fid1),
- PFID(&op_data->op_fid2), op_data->op_name, tgt->ltd_idx);
-
- rc = md_intent_lock(tgt->ltd_exp, op_data, it, reqp, cb_blocking,
- extra_lock_flags);
- if (rc != 0)
- return rc;
- /*
- * Nothing is found, do not access body->mbo_fid1 as it is zero and thus
- * pointless.
- */
- if ((it->it_disposition & DISP_LOOKUP_NEG) &&
- !(it->it_disposition & DISP_OPEN_CREATE) &&
- !(it->it_disposition & DISP_OPEN_OPEN))
- return rc;
-
- body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
- if (!body)
- return -EPROTO;
-
- /* Not cross-ref case, just get out of here. */
- if (unlikely((body->mbo_valid & OBD_MD_MDS))) {
- rc = lmv_intent_remote(exp, it, &op_data->op_fid1, reqp,
- cb_blocking, extra_lock_flags);
- if (rc != 0)
- return rc;
-
- body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
- if (!body)
- return -EPROTO;
- }
-
- return rc;
-}
-
-/*
- * Handler for: getattr, lookup and revalidate cases.
- */
-static int lmv_intent_lookup(struct obd_export *exp,
- struct md_op_data *op_data,
- struct lookup_intent *it,
- struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
-{
- struct lmv_stripe_md *lsm = op_data->op_mea1;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt = NULL;
- struct mdt_body *body;
- int rc = 0;
-
- /*
- * If it returns ERR_PTR(-EBADFD) then it is an unknown hash type
- * it will try all stripes to locate the object
- */
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt) && (PTR_ERR(tgt) != -EBADFD))
- return PTR_ERR(tgt);
-
- /*
- * Both migrating dir and unknown hash dir need to try
- * all of sub-stripes
- */
- if (lsm && !lmv_is_known_hash_type(lsm->lsm_md_hash_type)) {
- struct lmv_oinfo *oinfo = &lsm->lsm_md_oinfo[0];
-
- op_data->op_fid1 = oinfo->lmo_fid;
- op_data->op_mds = oinfo->lmo_mds;
- tgt = lmv_get_target(lmv, oinfo->lmo_mds, NULL);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
- }
-
- if (!fid_is_sane(&op_data->op_fid2))
- fid_zero(&op_data->op_fid2);
-
- CDEBUG(D_INODE, "LOOKUP_INTENT with fid1=" DFID ", fid2=" DFID ", name='%s' -> mds #%u lsm=%p lsm_magic=%x\n",
- PFID(&op_data->op_fid1), PFID(&op_data->op_fid2),
- op_data->op_name ? op_data->op_name : "<NULL>",
- tgt->ltd_idx, lsm, !lsm ? -1 : lsm->lsm_md_magic);
-
- op_data->op_bias &= ~MDS_CROSS_REF;
-
- rc = md_intent_lock(tgt->ltd_exp, op_data, it, reqp, cb_blocking,
- extra_lock_flags);
- if (rc < 0)
- return rc;
-
- if (!*reqp) {
- /*
- * If RPC happens, lsm information will be revalidated
- * during update_inode process (see ll_update_lsm_md)
- */
- if (op_data->op_mea2) {
- rc = lmv_revalidate_slaves(exp, op_data->op_mea2,
- cb_blocking,
- extra_lock_flags);
- if (rc != 0)
- return rc;
- }
- return rc;
- } else if (it_disposition(it, DISP_LOOKUP_NEG) && lsm &&
- lmv_need_try_all_stripes(lsm)) {
- /*
- * For migrating and unknown hash type directory, it will
- * try to target the entry on other stripes
- */
- int stripe_index;
-
- for (stripe_index = 1;
- stripe_index < lsm->lsm_md_stripe_count &&
- it_disposition(it, DISP_LOOKUP_NEG); stripe_index++) {
- struct lmv_oinfo *oinfo;
-
- /* release the previous request */
- ptlrpc_req_finished(*reqp);
- it->it_request = NULL;
- *reqp = NULL;
-
- oinfo = &lsm->lsm_md_oinfo[stripe_index];
- tgt = lmv_find_target(lmv, &oinfo->lmo_fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- CDEBUG(D_INODE, "Try other stripes " DFID "\n",
- PFID(&oinfo->lmo_fid));
-
- op_data->op_fid1 = oinfo->lmo_fid;
- it->it_disposition &= ~DISP_ENQ_COMPLETE;
- rc = md_intent_lock(tgt->ltd_exp, op_data, it, reqp,
- cb_blocking, extra_lock_flags);
- if (rc)
- return rc;
- }
- }
-
- /*
- * MDS has returned success. Probably name has been resolved in
- * remote inode. Let's check this.
- */
- body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
- if (!body)
- return -EPROTO;
-
- /* Not cross-ref case, just get out of here. */
- if (unlikely((body->mbo_valid & OBD_MD_MDS))) {
- rc = lmv_intent_remote(exp, it, NULL, reqp, cb_blocking,
- extra_lock_flags);
- if (rc != 0)
- return rc;
- body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
- if (!body)
- return -EPROTO;
- }
-
- return rc;
-}
-
-int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
- struct lookup_intent *it, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
-{
- int rc;
-
- LASSERT(fid_is_sane(&op_data->op_fid1));
-
- CDEBUG(D_INODE, "INTENT LOCK '%s' for " DFID " '%*s' on " DFID "\n",
- LL_IT2STR(it), PFID(&op_data->op_fid2),
- (int)op_data->op_namelen, op_data->op_name,
- PFID(&op_data->op_fid1));
-
- if (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_LAYOUT))
- rc = lmv_intent_lookup(exp, op_data, it, reqp, cb_blocking,
- extra_lock_flags);
- else if (it->it_op & IT_OPEN)
- rc = lmv_intent_open(exp, op_data, it, reqp, cb_blocking,
- extra_lock_flags);
- else
- LBUG();
-
- if (rc < 0) {
- struct lustre_handle lock_handle;
-
- if (it->it_lock_mode) {
- lock_handle.cookie = it->it_lock_handle;
- ldlm_lock_decref(&lock_handle, it->it_lock_mode);
- }
-
- it->it_lock_handle = 0;
- it->it_lock_mode = 0;
-
- if (it->it_remote_lock_mode) {
- lock_handle.cookie = it->it_remote_lock_handle;
- ldlm_lock_decref(&lock_handle,
- it->it_remote_lock_mode);
- }
-
- it->it_remote_lock_handle = 0;
- it->it_remote_lock_mode = 0;
- }
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
deleted file mode 100644
index c27c3c32188d..000000000000
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ /dev/null
@@ -1,164 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LMV_INTERNAL_H_
-#define _LMV_INTERNAL_H_
-
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <obd.h>
-#include <lustre_lmv.h>
-
-#define LMV_MAX_TGT_COUNT 128
-
-#define LL_IT2STR(it) \
- ((it) ? ldlm_it2str((it)->it_op) : "0")
-
-int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
- struct lookup_intent *it, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags);
-
-int lmv_fld_lookup(struct lmv_obd *lmv, const struct lu_fid *fid, u32 *mds);
-int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds);
-int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
- struct lu_fid *fid, struct md_op_data *op_data);
-
-int lmv_revalidate_slaves(struct obd_export *exp,
- const struct lmv_stripe_md *lsm,
- ldlm_blocking_callback cb_blocking,
- int extra_lock_flags);
-
-static inline struct obd_device *lmv2obd_dev(struct lmv_obd *lmv)
-{
- return container_of0(lmv, struct obd_device, u.lmv);
-}
-
-static inline struct lmv_tgt_desc *
-lmv_get_target(struct lmv_obd *lmv, u32 mdt_idx, int *index)
-{
- int i;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i])
- continue;
-
- if (lmv->tgts[i]->ltd_idx == mdt_idx) {
- if (index)
- *index = i;
- return lmv->tgts[i];
- }
- }
-
- return ERR_PTR(-ENODEV);
-}
-
-static inline int
-lmv_find_target_index(struct lmv_obd *lmv, const struct lu_fid *fid)
-{
- struct lmv_tgt_desc *ltd;
- u32 mdt_idx = 0;
- int index = 0;
-
- if (lmv->desc.ld_tgt_count > 1) {
- int rc;
-
- rc = lmv_fld_lookup(lmv, fid, &mdt_idx);
- if (rc < 0)
- return rc;
- }
-
- ltd = lmv_get_target(lmv, mdt_idx, &index);
- if (IS_ERR(ltd))
- return PTR_ERR(ltd);
-
- return index;
-}
-
-static inline struct lmv_tgt_desc *
-lmv_find_target(struct lmv_obd *lmv, const struct lu_fid *fid)
-{
- int index;
-
- index = lmv_find_target_index(lmv, fid);
- if (index < 0)
- return ERR_PTR(index);
-
- return lmv->tgts[index];
-}
-
-static inline int lmv_stripe_md_size(int stripe_count)
-{
- struct lmv_stripe_md *lsm;
-
- return sizeof(*lsm) + stripe_count * sizeof(lsm->lsm_md_oinfo[0]);
-}
-
-int lmv_name_to_stripe_index(enum lmv_hash_type hashtype,
- unsigned int max_mdt_index,
- const char *name, int namelen);
-
-static inline const struct lmv_oinfo *
-lsm_name_to_stripe_info(const struct lmv_stripe_md *lsm, const char *name,
- int namelen)
-{
- int stripe_index;
-
- stripe_index = lmv_name_to_stripe_index(lsm->lsm_md_hash_type,
- lsm->lsm_md_stripe_count,
- name, namelen);
- if (stripe_index < 0)
- return ERR_PTR(stripe_index);
-
- LASSERTF(stripe_index < lsm->lsm_md_stripe_count,
- "stripe_index = %d, stripe_count = %d hash_type = %x name = %.*s\n",
- stripe_index, lsm->lsm_md_stripe_count,
- lsm->lsm_md_hash_type, namelen, name);
-
- return &lsm->lsm_md_oinfo[stripe_index];
-}
-
-static inline bool lmv_need_try_all_stripes(const struct lmv_stripe_md *lsm)
-{
- return !lmv_is_known_hash_type(lsm->lsm_md_hash_type) ||
- lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION;
-}
-
-struct lmv_tgt_desc
-*lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
- struct lu_fid *fid);
-/* lproc_lmv.c */
-void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars);
-
-extern const struct file_operations lmv_proc_target_fops;
-
-#endif
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
deleted file mode 100644
index e8a9b9902c37..000000000000
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ /dev/null
@@ -1,3131 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LMV
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pagemap.h>
-#include <linux/mm.h>
-#include <asm/div64.h>
-#include <linux/seq_file.h>
-#include <linux/namei.h>
-#include <linux/uaccess.h>
-
-#include <obd_support.h>
-#include <lustre_net.h>
-#include <obd_class.h>
-#include <lustre_lmv.h>
-#include <lprocfs_status.h>
-#include <cl_object.h>
-#include <lustre_fid.h>
-#include <uapi/linux/lustre/lustre_ioctl.h>
-#include <lustre_kernelcomm.h>
-#include "lmv_internal.h"
-
-static int lmv_check_connect(struct obd_device *obd);
-
-static void lmv_activate_target(struct lmv_obd *lmv,
- struct lmv_tgt_desc *tgt,
- int activate)
-{
- if (tgt->ltd_active == activate)
- return;
-
- tgt->ltd_active = activate;
- lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
- tgt->ltd_exp->exp_obd->obd_inactive = !activate;
-}
-
-/**
- * Error codes:
- *
- * -EINVAL : UUID can't be found in the LMV's target list
- * -ENOTCONN: The UUID is found, but the target connection is bad (!)
- * -EBADF : The UUID is found, but the OBD of the wrong type (!)
- */
-static int lmv_set_mdc_active(struct lmv_obd *lmv, const struct obd_uuid *uuid,
- int activate)
-{
- struct lmv_tgt_desc *tgt = NULL;
- struct obd_device *obd;
- u32 i;
- int rc = 0;
-
- CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
- lmv, uuid->uuid, activate);
-
- spin_lock(&lmv->lmv_lock);
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp)
- continue;
-
- CDEBUG(D_INFO, "Target idx %d is %s conn %#llx\n", i,
- tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie);
-
- if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
- break;
- }
-
- if (i == lmv->desc.ld_tgt_count) {
- rc = -EINVAL;
- goto out_lmv_lock;
- }
-
- obd = class_exp2obd(tgt->ltd_exp);
- if (!obd) {
- rc = -ENOTCONN;
- goto out_lmv_lock;
- }
-
- CDEBUG(D_INFO, "Found OBD %s=%s device %d (%p) type %s at LMV idx %d\n",
- obd->obd_name, obd->obd_uuid.uuid, obd->obd_minor, obd,
- obd->obd_type->typ_name, i);
- LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0);
-
- if (tgt->ltd_active == activate) {
- CDEBUG(D_INFO, "OBD %p already %sactive!\n", obd,
- activate ? "" : "in");
- goto out_lmv_lock;
- }
-
- CDEBUG(D_INFO, "Marking OBD %p %sactive\n", obd,
- activate ? "" : "in");
- lmv_activate_target(lmv, tgt, activate);
-
- out_lmv_lock:
- spin_unlock(&lmv->lmv_lock);
- return rc;
-}
-
-static struct obd_uuid *lmv_get_uuid(struct obd_export *exp)
-{
- struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
- struct lmv_tgt_desc *tgt = lmv->tgts[0];
-
- return tgt ? obd_get_uuid(tgt->ltd_exp) : NULL;
-}
-
-static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
- enum obd_notify_event ev, void *data)
-{
- struct obd_connect_data *conn_data;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct obd_uuid *uuid;
- int rc = 0;
-
- if (strcmp(watched->obd_type->typ_name, LUSTRE_MDC_NAME)) {
- CERROR("unexpected notification of %s %s!\n",
- watched->obd_type->typ_name,
- watched->obd_name);
- return -EINVAL;
- }
-
- uuid = &watched->u.cli.cl_target_uuid;
- if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE) {
- /*
- * Set MDC as active before notifying the observer, so the
- * observer can use the MDC normally.
- */
- rc = lmv_set_mdc_active(lmv, uuid,
- ev == OBD_NOTIFY_ACTIVE);
- if (rc) {
- CERROR("%sactivation of %s failed: %d\n",
- ev == OBD_NOTIFY_ACTIVE ? "" : "de",
- uuid->uuid, rc);
- return rc;
- }
- } else if (ev == OBD_NOTIFY_OCD) {
- conn_data = &watched->u.cli.cl_import->imp_connect_data;
- /*
- * XXX: Make sure that ocd_connect_flags from all targets are
- * the same. Otherwise one of MDTs runs wrong version or
- * something like this. --umka
- */
- obd->obd_self_export->exp_connect_data = *conn_data;
- }
-
- /*
- * Pass the notification up the chain.
- */
- if (obd->obd_observer)
- rc = obd_notify(obd->obd_observer, watched, ev, data);
-
- return rc;
-}
-
-static int lmv_connect(const struct lu_env *env,
- struct obd_export **pexp, struct obd_device *obd,
- struct obd_uuid *cluuid, struct obd_connect_data *data,
- void *localdata)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lustre_handle conn = { 0 };
- struct obd_export *exp;
- int rc = 0;
-
- rc = class_connect(&conn, obd, cluuid);
- if (rc) {
- CERROR("class_connection() returned %d\n", rc);
- return rc;
- }
-
- exp = class_conn2export(&conn);
-
- lmv->connected = 0;
- lmv->cluuid = *cluuid;
- lmv->conn_data = *data;
-
- lmv->lmv_tgts_kobj = kobject_create_and_add("target_obds",
- &obd->obd_kobj);
- rc = lmv_check_connect(obd);
- if (rc)
- goto out_sysfs;
-
- *pexp = exp;
-
- return rc;
-
-out_sysfs:
- if (lmv->lmv_tgts_kobj)
- kobject_put(lmv->lmv_tgts_kobj);
-
- class_disconnect(exp);
-
- return rc;
-}
-
-static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- u32 i;
- int rc = 0;
- int change = 0;
-
- if (lmv->max_easize < easize) {
- lmv->max_easize = easize;
- change = 1;
- }
- if (lmv->max_def_easize < def_easize) {
- lmv->max_def_easize = def_easize;
- change = 1;
- }
-
- if (change == 0)
- return 0;
-
- if (lmv->connected == 0)
- return 0;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- struct lmv_tgt_desc *tgt = lmv->tgts[i];
-
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) {
- CWARN("%s: NULL export for %d\n", obd->obd_name, i);
- continue;
- }
-
- rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize);
- if (rc) {
- CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n",
- obd->obd_name, i, rc);
- break;
- }
- }
- return rc;
-}
-
-#define MAX_STRING_SIZE 128
-
-static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct obd_uuid *cluuid = &lmv->cluuid;
- struct obd_uuid lmv_mdc_uuid = { "LMV_MDC_UUID" };
- struct obd_device *mdc_obd;
- struct obd_export *mdc_exp;
- struct lu_fld_target target;
- int rc;
-
- mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME,
- &obd->obd_uuid);
- if (!mdc_obd) {
- CERROR("target %s not attached\n", tgt->ltd_uuid.uuid);
- return -EINVAL;
- }
-
- CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n",
- mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
- tgt->ltd_uuid.uuid, obd->obd_uuid.uuid, cluuid->uuid);
-
- if (!mdc_obd->obd_set_up) {
- CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid);
- return -EINVAL;
- }
-
- rc = obd_connect(NULL, &mdc_exp, mdc_obd, &lmv_mdc_uuid,
- &lmv->conn_data, NULL);
- if (rc) {
- CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc);
- return rc;
- }
-
- /*
- * Init fid sequence client for this mdc and add new fld target.
- */
- rc = obd_fid_init(mdc_obd, mdc_exp, LUSTRE_SEQ_METADATA);
- if (rc)
- return rc;
-
- target.ft_srv = NULL;
- target.ft_exp = mdc_exp;
- target.ft_idx = tgt->ltd_idx;
-
- fld_client_add_target(&lmv->lmv_fld, &target);
-
- rc = obd_register_observer(mdc_obd, obd);
- if (rc) {
- obd_disconnect(mdc_exp);
- CERROR("target %s register_observer error %d\n",
- tgt->ltd_uuid.uuid, rc);
- return rc;
- }
-
- if (obd->obd_observer) {
- /*
- * Tell the observer about the new target.
- */
- rc = obd_notify(obd->obd_observer, mdc_exp->exp_obd,
- OBD_NOTIFY_ACTIVE,
- (void *)(tgt - lmv->tgts[0]));
- if (rc) {
- obd_disconnect(mdc_exp);
- return rc;
- }
- }
-
- tgt->ltd_active = 1;
- tgt->ltd_exp = mdc_exp;
- lmv->desc.ld_active_tgt_count++;
-
- md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize);
-
- CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
- mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
-
- if (lmv->lmv_tgts_kobj)
- /* Even if we failed to create the link, that's fine */
- rc = sysfs_create_link(lmv->lmv_tgts_kobj, &mdc_obd->obd_kobj,
- mdc_obd->obd_name);
- return 0;
-}
-
-static void lmv_del_target(struct lmv_obd *lmv, int index)
-{
- if (!lmv->tgts[index])
- return;
-
- kfree(lmv->tgts[index]);
- lmv->tgts[index] = NULL;
-}
-
-static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
- __u32 index, int gen)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct obd_device *mdc_obd;
- struct lmv_tgt_desc *tgt;
- int orig_tgt_count = 0;
- int rc = 0;
-
- CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
-
- mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
- &obd->obd_uuid);
- if (!mdc_obd) {
- CERROR("%s: Target %s not attached: rc = %d\n",
- obd->obd_name, uuidp->uuid, -EINVAL);
- return -EINVAL;
- }
-
- mutex_lock(&lmv->lmv_init_mutex);
-
- if ((index < lmv->tgts_size) && lmv->tgts[index]) {
- tgt = lmv->tgts[index];
- CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
- obd->obd_name,
- obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
- mutex_unlock(&lmv->lmv_init_mutex);
- return -EEXIST;
- }
-
- if (index >= lmv->tgts_size) {
- /* We need to reallocate the lmv target array. */
- struct lmv_tgt_desc **newtgts, **old = NULL;
- __u32 newsize = 1;
- __u32 oldsize = 0;
-
- while (newsize < index + 1)
- newsize <<= 1;
- newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
- if (!newtgts) {
- mutex_unlock(&lmv->lmv_init_mutex);
- return -ENOMEM;
- }
-
- if (lmv->tgts_size) {
- memcpy(newtgts, lmv->tgts,
- sizeof(*newtgts) * lmv->tgts_size);
- old = lmv->tgts;
- oldsize = lmv->tgts_size;
- }
-
- lmv->tgts = newtgts;
- lmv->tgts_size = newsize;
- smp_rmb();
- kfree(old);
-
- CDEBUG(D_CONFIG, "tgts: %p size: %d\n", lmv->tgts,
- lmv->tgts_size);
- }
-
- tgt = kzalloc(sizeof(*tgt), GFP_NOFS);
- if (!tgt) {
- mutex_unlock(&lmv->lmv_init_mutex);
- return -ENOMEM;
- }
-
- mutex_init(&tgt->ltd_fid_mutex);
- tgt->ltd_idx = index;
- tgt->ltd_uuid = *uuidp;
- tgt->ltd_active = 0;
- lmv->tgts[index] = tgt;
- if (index >= lmv->desc.ld_tgt_count) {
- orig_tgt_count = lmv->desc.ld_tgt_count;
- lmv->desc.ld_tgt_count = index + 1;
- }
-
- if (!lmv->connected) {
- /* lmv_check_connect() will connect this target. */
- mutex_unlock(&lmv->lmv_init_mutex);
- return rc;
- }
-
- /* Otherwise let's connect it ourselves */
- mutex_unlock(&lmv->lmv_init_mutex);
- rc = lmv_connect_mdc(obd, tgt);
- if (rc) {
- spin_lock(&lmv->lmv_lock);
- if (lmv->desc.ld_tgt_count == index + 1)
- lmv->desc.ld_tgt_count = orig_tgt_count;
- memset(tgt, 0, sizeof(*tgt));
- spin_unlock(&lmv->lmv_lock);
- } else {
- int easize = sizeof(struct lmv_stripe_md) +
- lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
- lmv_init_ea_size(obd->obd_self_export, easize, 0);
- }
-
- return rc;
-}
-
-static int lmv_check_connect(struct obd_device *obd)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- u32 i;
- int rc;
- int easize;
-
- if (lmv->connected)
- return 0;
-
- mutex_lock(&lmv->lmv_init_mutex);
- if (lmv->connected) {
- mutex_unlock(&lmv->lmv_init_mutex);
- return 0;
- }
-
- if (lmv->desc.ld_tgt_count == 0) {
- mutex_unlock(&lmv->lmv_init_mutex);
- CERROR("%s: no targets configured.\n", obd->obd_name);
- return -EINVAL;
- }
-
- LASSERT(lmv->tgts);
-
- if (!lmv->tgts[0]) {
- mutex_unlock(&lmv->lmv_init_mutex);
- CERROR("%s: no target configured for index 0.\n",
- obd->obd_name);
- return -EINVAL;
- }
-
- CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
- lmv->cluuid.uuid, obd->obd_name);
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
- if (!tgt)
- continue;
- rc = lmv_connect_mdc(obd, tgt);
- if (rc)
- goto out_disc;
- }
-
- lmv->connected = 1;
- easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC);
- lmv_init_ea_size(obd->obd_self_export, easize, 0);
- mutex_unlock(&lmv->lmv_init_mutex);
- return 0;
-
- out_disc:
- while (i-- > 0) {
- int rc2;
-
- tgt = lmv->tgts[i];
- if (!tgt)
- continue;
- tgt->ltd_active = 0;
- if (tgt->ltd_exp) {
- --lmv->desc.ld_active_tgt_count;
- rc2 = obd_disconnect(tgt->ltd_exp);
- if (rc2) {
- CERROR("LMV target %s disconnect on MDC idx %d: error %d\n",
- tgt->ltd_uuid.uuid, i, rc2);
- }
- }
- }
-
- mutex_unlock(&lmv->lmv_init_mutex);
- return rc;
-}
-
-static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct obd_device *mdc_obd;
- int rc;
-
- mdc_obd = class_exp2obd(tgt->ltd_exp);
-
- if (mdc_obd) {
- mdc_obd->obd_force = obd->obd_force;
- mdc_obd->obd_fail = obd->obd_fail;
- mdc_obd->obd_no_recov = obd->obd_no_recov;
-
- if (lmv->lmv_tgts_kobj)
- sysfs_remove_link(lmv->lmv_tgts_kobj,
- mdc_obd->obd_name);
- }
-
- rc = obd_fid_fini(tgt->ltd_exp->exp_obd);
- if (rc)
- CERROR("Can't finalize fids factory\n");
-
- CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n",
- tgt->ltd_exp->exp_obd->obd_name,
- tgt->ltd_exp->exp_obd->obd_uuid.uuid);
-
- obd_register_observer(tgt->ltd_exp->exp_obd, NULL);
- rc = obd_disconnect(tgt->ltd_exp);
- if (rc) {
- if (tgt->ltd_active) {
- CERROR("Target %s disconnect error %d\n",
- tgt->ltd_uuid.uuid, rc);
- }
- }
-
- lmv_activate_target(lmv, tgt, 0);
- tgt->ltd_exp = NULL;
- return 0;
-}
-
-static int lmv_disconnect(struct obd_export *exp)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- int rc;
- u32 i;
-
- if (!lmv->tgts)
- goto out_local;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
- continue;
-
- lmv_disconnect_mdc(obd, lmv->tgts[i]);
- }
-
- if (lmv->lmv_tgts_kobj)
- kobject_put(lmv->lmv_tgts_kobj);
-
-out_local:
- /*
- * This is the case when no real connection is established by
- * lmv_check_connect().
- */
- if (!lmv->connected)
- class_export_put(exp);
- rc = class_disconnect(exp);
- lmv->connected = 0;
- return rc;
-}
-
-static int lmv_fid2path(struct obd_export *exp, int len, void *karg,
- void __user *uarg)
-{
- struct obd_device *obddev = class_exp2obd(exp);
- struct lmv_obd *lmv = &obddev->u.lmv;
- struct getinfo_fid2path *gf;
- struct lmv_tgt_desc *tgt;
- struct getinfo_fid2path *remote_gf = NULL;
- int remote_gf_size = 0;
- int rc;
-
- gf = karg;
- tgt = lmv_find_target(lmv, &gf->gf_fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
-repeat_fid2path:
- rc = obd_iocontrol(OBD_IOC_FID2PATH, tgt->ltd_exp, len, gf, uarg);
- if (rc != 0 && rc != -EREMOTE)
- goto out_fid2path;
-
- /* If remote_gf != NULL, it means just building the
- * path on the remote MDT, copy this path segment to gf
- */
- if (remote_gf) {
- struct getinfo_fid2path *ori_gf;
- char *ptr;
-
- ori_gf = karg;
- if (strlen(ori_gf->gf_path) + 1 +
- strlen(gf->gf_path) + 1 > ori_gf->gf_pathlen) {
- rc = -EOVERFLOW;
- goto out_fid2path;
- }
-
- ptr = ori_gf->gf_path;
-
- memmove(ptr + strlen(gf->gf_path) + 1, ptr,
- strlen(ori_gf->gf_path));
-
- strncpy(ptr, gf->gf_path, strlen(gf->gf_path));
- ptr += strlen(gf->gf_path);
- *ptr = '/';
- }
-
- CDEBUG(D_INFO, "%s: get path %s " DFID " rec: %llu ln: %u\n",
- tgt->ltd_exp->exp_obd->obd_name,
- gf->gf_path, PFID(&gf->gf_fid), gf->gf_recno,
- gf->gf_linkno);
-
- if (rc == 0)
- goto out_fid2path;
-
- /* sigh, has to go to another MDT to do path building further */
- if (!remote_gf) {
- remote_gf_size = sizeof(*remote_gf) + PATH_MAX;
- remote_gf = kzalloc(remote_gf_size, GFP_NOFS);
- if (!remote_gf) {
- rc = -ENOMEM;
- goto out_fid2path;
- }
- remote_gf->gf_pathlen = PATH_MAX;
- }
-
- if (!fid_is_sane(&gf->gf_fid)) {
- CERROR("%s: invalid FID " DFID ": rc = %d\n",
- tgt->ltd_exp->exp_obd->obd_name,
- PFID(&gf->gf_fid), -EINVAL);
- rc = -EINVAL;
- goto out_fid2path;
- }
-
- tgt = lmv_find_target(lmv, &gf->gf_fid);
- if (IS_ERR(tgt)) {
- rc = -EINVAL;
- goto out_fid2path;
- }
-
- remote_gf->gf_fid = gf->gf_fid;
- remote_gf->gf_recno = -1;
- remote_gf->gf_linkno = -1;
- memset(remote_gf->gf_path, 0, remote_gf->gf_pathlen);
- gf = remote_gf;
- goto repeat_fid2path;
-
-out_fid2path:
- kfree(remote_gf);
- return rc;
-}
-
-static int lmv_hsm_req_count(struct lmv_obd *lmv,
- const struct hsm_user_request *hur,
- const struct lmv_tgt_desc *tgt_mds)
-{
- u32 i, nr = 0;
- struct lmv_tgt_desc *curr_tgt;
-
- /* count how many requests must be sent to the given target */
- for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
- curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid);
- if (IS_ERR(curr_tgt))
- return PTR_ERR(curr_tgt);
- if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid))
- nr++;
- }
- return nr;
-}
-
-static int lmv_hsm_req_build(struct lmv_obd *lmv,
- struct hsm_user_request *hur_in,
- const struct lmv_tgt_desc *tgt_mds,
- struct hsm_user_request *hur_out)
-{
- int i, nr_out;
- struct lmv_tgt_desc *curr_tgt;
-
- /* build the hsm_user_request for the given target */
- hur_out->hur_request = hur_in->hur_request;
- nr_out = 0;
- for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
- curr_tgt = lmv_find_target(lmv,
- &hur_in->hur_user_item[i].hui_fid);
- if (IS_ERR(curr_tgt))
- return PTR_ERR(curr_tgt);
- if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
- hur_out->hur_user_item[nr_out] =
- hur_in->hur_user_item[i];
- nr_out++;
- }
- }
- hur_out->hur_request.hr_itemcount = nr_out;
- memcpy(hur_data(hur_out), hur_data(hur_in),
- hur_in->hur_request.hr_data_len);
-
- return 0;
-}
-
-static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
- struct lustre_kernelcomm *lk,
- void __user *uarg)
-{
- __u32 i;
-
- /* unregister request (call from llapi_hsm_copytool_fini) */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- struct lmv_tgt_desc *tgt = lmv->tgts[i];
-
- if (!tgt || !tgt->ltd_exp)
- continue;
-
- /* best effort: try to clean as much as possible
- * (continue on error)
- */
- obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg);
- }
-
- /* Whatever the result, remove copytool from kuc groups.
- * Unreached coordinators will get EPIPE on next requests
- * and will unregister automatically.
- */
- return libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group);
-}
-
-static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
- struct lustre_kernelcomm *lk, void __user *uarg)
-{
- struct file *filp;
- __u32 i, j;
- int err, rc = 0;
- bool any_set = false;
- struct kkuc_ct_data kcd = { 0 };
-
- /* All or nothing: try to register to all MDS.
- * In case of failure, unregister from previous MDS,
- * except if it because of inactive target.
- */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- struct lmv_tgt_desc *tgt = lmv->tgts[i];
-
- if (!tgt || !tgt->ltd_exp)
- continue;
-
- err = obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg);
- if (err) {
- if (tgt->ltd_active) {
- /* permanent error */
- CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
- tgt->ltd_uuid.uuid, i, cmd, err);
- rc = err;
- lk->lk_flags |= LK_FLG_STOP;
- /* unregister from previous MDS */
- for (j = 0; j < i; j++) {
- tgt = lmv->tgts[j];
-
- if (!tgt || !tgt->ltd_exp)
- continue;
- obd_iocontrol(cmd, tgt->ltd_exp, len,
- lk, uarg);
- }
- return rc;
- }
- /* else: transient error.
- * kuc will register to the missing MDT when it is back
- */
- } else {
- any_set = true;
- }
- }
-
- if (!any_set)
- /* no registration done: return error */
- return -ENOTCONN;
-
- /* at least one registration done, with no failure */
- filp = fget(lk->lk_wfd);
- if (!filp)
- return -EBADF;
-
- kcd.kcd_magic = KKUC_CT_DATA_MAGIC;
- kcd.kcd_uuid = lmv->cluuid;
- kcd.kcd_archive = lk->lk_data;
-
- rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group,
- &kcd, sizeof(kcd));
- if (rc)
- fput(filp);
-
- return rc;
-}
-
-static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
- int len, void *karg, void __user *uarg)
-{
- struct obd_device *obddev = class_exp2obd(exp);
- struct lmv_obd *lmv = &obddev->u.lmv;
- struct lmv_tgt_desc *tgt = NULL;
- u32 i = 0;
- int rc = 0;
- int set = 0;
- u32 count = lmv->desc.ld_tgt_count;
-
- if (count == 0)
- return -ENOTTY;
-
- switch (cmd) {
- case IOC_OBD_STATFS: {
- struct obd_ioctl_data *data = karg;
- struct obd_device *mdc_obd;
- struct obd_statfs stat_buf = {0};
- __u32 index;
-
- memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
- if (index >= count)
- return -ENODEV;
-
- tgt = lmv->tgts[index];
- if (!tgt || !tgt->ltd_active)
- return -ENODATA;
-
- mdc_obd = class_exp2obd(tgt->ltd_exp);
- if (!mdc_obd)
- return -EINVAL;
-
- /* copy UUID */
- if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
- min((int)data->ioc_plen2,
- (int)sizeof(struct obd_uuid))))
- return -EFAULT;
-
- rc = obd_statfs(NULL, tgt->ltd_exp, &stat_buf,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- 0);
- if (rc)
- return rc;
- if (copy_to_user(data->ioc_pbuf1, &stat_buf,
- min((int)data->ioc_plen1,
- (int)sizeof(stat_buf))))
- return -EFAULT;
- break;
- }
- case OBD_IOC_QUOTACTL: {
- struct if_quotactl *qctl = karg;
- struct obd_quotactl *oqctl;
-
- if (qctl->qc_valid == QC_MDTIDX) {
- if (count <= qctl->qc_idx)
- return -EINVAL;
-
- tgt = lmv->tgts[qctl->qc_idx];
- if (!tgt || !tgt->ltd_exp)
- return -EINVAL;
- } else if (qctl->qc_valid == QC_UUID) {
- for (i = 0; i < count; i++) {
- tgt = lmv->tgts[i];
- if (!tgt)
- continue;
- if (!obd_uuid_equals(&tgt->ltd_uuid,
- &qctl->obd_uuid))
- continue;
-
- if (!tgt->ltd_exp)
- return -EINVAL;
-
- break;
- }
- } else {
- return -EINVAL;
- }
-
- if (i >= count)
- return -EAGAIN;
-
- LASSERT(tgt && tgt->ltd_exp);
- oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (!oqctl)
- return -ENOMEM;
-
- QCTL_COPY(oqctl, qctl);
- rc = obd_quotactl(tgt->ltd_exp, oqctl);
- if (rc == 0) {
- QCTL_COPY(qctl, oqctl);
- qctl->qc_valid = QC_MDTIDX;
- qctl->obd_uuid = tgt->ltd_uuid;
- }
- kfree(oqctl);
- break;
- }
- case OBD_IOC_CHANGELOG_SEND:
- case OBD_IOC_CHANGELOG_CLEAR: {
- struct ioc_changelog *icc = karg;
-
- if (icc->icc_mdtindex >= count)
- return -ENODEV;
-
- tgt = lmv->tgts[icc->icc_mdtindex];
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
- return -ENODEV;
- rc = obd_iocontrol(cmd, tgt->ltd_exp, sizeof(*icc), icc, NULL);
- break;
- }
- case LL_IOC_GET_CONNECT_FLAGS: {
- tgt = lmv->tgts[0];
-
- if (!tgt || !tgt->ltd_exp)
- return -ENODATA;
- rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
- break;
- }
- case LL_IOC_FID2MDTIDX: {
- struct lu_fid *fid = karg;
- int mdt_index;
-
- rc = lmv_fld_lookup(lmv, fid, &mdt_index);
- if (rc)
- return rc;
-
- /*
- * Note: this is from llite(see ll_dir_ioctl()), @uarg does not
- * point to user space memory for FID2MDTIDX.
- */
- *(__u32 *)uarg = mdt_index;
- break;
- }
- case OBD_IOC_FID2PATH: {
- rc = lmv_fid2path(exp, len, karg, uarg);
- break;
- }
- case LL_IOC_HSM_STATE_GET:
- case LL_IOC_HSM_STATE_SET:
- case LL_IOC_HSM_ACTION: {
- struct md_op_data *op_data = karg;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- if (!tgt->ltd_exp)
- return -EINVAL;
-
- rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
- break;
- }
- case LL_IOC_HSM_PROGRESS: {
- const struct hsm_progress_kernel *hpk = karg;
-
- tgt = lmv_find_target(lmv, &hpk->hpk_fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
- rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
- break;
- }
- case LL_IOC_HSM_REQUEST: {
- struct hsm_user_request *hur = karg;
- unsigned int reqcount = hur->hur_request.hr_itemcount;
-
- if (reqcount == 0)
- return 0;
-
- /* if the request is about a single fid
- * or if there is a single MDS, no need to split
- * the request.
- */
- if (reqcount == 1 || count == 1) {
- tgt = lmv_find_target(lmv,
- &hur->hur_user_item[0].hui_fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
- rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
- } else {
- /* split fid list to their respective MDS */
- for (i = 0; i < count; i++) {
- struct hsm_user_request *req;
- size_t reqlen;
- int nr, rc1;
-
- tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp)
- continue;
-
- nr = lmv_hsm_req_count(lmv, hur, tgt);
- if (nr < 0)
- return nr;
- if (nr == 0) /* nothing for this MDS */
- continue;
-
- /* build a request with fids for this MDS */
- reqlen = offsetof(typeof(*hur),
- hur_user_item[nr])
- + hur->hur_request.hr_data_len;
- req = kvzalloc(reqlen, GFP_NOFS);
- if (!req)
- return -ENOMEM;
-
- rc1 = lmv_hsm_req_build(lmv, hur, tgt, req);
- if (rc1 < 0)
- goto hsm_req_err;
-
- rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen,
- req, uarg);
-hsm_req_err:
- if (rc1 != 0 && rc == 0)
- rc = rc1;
- kvfree(req);
- }
- }
- break;
- }
- case LL_IOC_LOV_SWAP_LAYOUTS: {
- struct md_op_data *op_data = karg;
- struct lmv_tgt_desc *tgt1, *tgt2;
-
- tgt1 = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt1))
- return PTR_ERR(tgt1);
-
- tgt2 = lmv_find_target(lmv, &op_data->op_fid2);
- if (IS_ERR(tgt2))
- return PTR_ERR(tgt2);
-
- if (!tgt1->ltd_exp || !tgt2->ltd_exp)
- return -EINVAL;
-
- /* only files on same MDT can have their layouts swapped */
- if (tgt1->ltd_idx != tgt2->ltd_idx)
- return -EPERM;
-
- rc = obd_iocontrol(cmd, tgt1->ltd_exp, len, karg, uarg);
- break;
- }
- case LL_IOC_HSM_CT_START: {
- struct lustre_kernelcomm *lk = karg;
-
- if (lk->lk_flags & LK_FLG_STOP)
- rc = lmv_hsm_ct_unregister(lmv, cmd, len, lk, uarg);
- else
- rc = lmv_hsm_ct_register(lmv, cmd, len, lk, uarg);
- break;
- }
- default:
- for (i = 0; i < count; i++) {
- struct obd_device *mdc_obd;
- int err;
-
- tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp)
- continue;
- /* ll_umount_begin() sets force flag but for lmv, not
- * mdc. Let's pass it through
- */
- mdc_obd = class_exp2obd(tgt->ltd_exp);
- mdc_obd->obd_force = obddev->obd_force;
- err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
- if (err) {
- if (tgt->ltd_active) {
- CERROR("%s: error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
- lmv2obd_dev(lmv)->obd_name,
- tgt->ltd_uuid.uuid, i, cmd, err);
- if (!rc)
- rc = err;
- }
- } else {
- set = 1;
- }
- }
- if (!set && !rc)
- rc = -EIO;
- }
- return rc;
-}
-
-/**
- * This is _inode_ placement policy function (not name).
- */
-static int lmv_placement_policy(struct obd_device *obd,
- struct md_op_data *op_data, u32 *mds)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
-
- LASSERT(mds);
-
- if (lmv->desc.ld_tgt_count == 1) {
- *mds = 0;
- return 0;
- }
-
- if (op_data->op_default_stripe_offset != -1) {
- *mds = op_data->op_default_stripe_offset;
- return 0;
- }
-
- /**
- * If stripe_offset is provided during setdirstripe
- * (setdirstripe -i xx), xx MDS will be chosen.
- */
- if (op_data->op_cli_flags & CLI_SET_MEA && op_data->op_data) {
- struct lmv_user_md *lum;
-
- lum = op_data->op_data;
- if (le32_to_cpu(lum->lum_stripe_offset) != (__u32)-1) {
- *mds = le32_to_cpu(lum->lum_stripe_offset);
- } else {
- /*
- * -1 means default, which will be in the same MDT with
- * the stripe
- */
- *mds = op_data->op_mds;
- lum->lum_stripe_offset = cpu_to_le32(op_data->op_mds);
- }
- } else {
- /*
- * Allocate new fid on target according to operation type and
- * parent home mds.
- */
- *mds = op_data->op_mds;
- }
-
- return 0;
-}
-
-int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds)
-{
- struct lmv_tgt_desc *tgt;
- int rc;
-
- tgt = lmv_get_target(lmv, mds, NULL);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- /*
- * New seq alloc and FLD setup should be atomic. Otherwise we may find
- * on server that seq in new allocated fid is not yet known.
- */
- mutex_lock(&tgt->ltd_fid_mutex);
-
- if (tgt->ltd_active == 0 || !tgt->ltd_exp) {
- rc = -ENODEV;
- goto out;
- }
-
- /*
- * Asking underlaying tgt layer to allocate new fid.
- */
- rc = obd_fid_alloc(NULL, tgt->ltd_exp, fid, NULL);
- if (rc > 0) {
- LASSERT(fid_is_sane(fid));
- rc = 0;
- }
-
-out:
- mutex_unlock(&tgt->ltd_fid_mutex);
- return rc;
-}
-
-int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
- struct lu_fid *fid, struct md_op_data *op_data)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- u32 mds = 0;
- int rc;
-
- LASSERT(op_data);
- LASSERT(fid);
-
- rc = lmv_placement_policy(obd, op_data, &mds);
- if (rc) {
- CERROR("Can't get target for allocating fid, rc %d\n",
- rc);
- return rc;
- }
-
- rc = __lmv_fid_alloc(lmv, fid, mds);
- if (rc) {
- CERROR("Can't alloc new fid, rc %d\n", rc);
- return rc;
- }
-
- return rc;
-}
-
-static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lprocfs_static_vars lvars = { NULL };
- struct lmv_desc *desc;
- int rc;
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
- CERROR("LMV setup requires a descriptor\n");
- return -EINVAL;
- }
-
- desc = (struct lmv_desc *)lustre_cfg_buf(lcfg, 1);
- if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
- CERROR("Lmv descriptor size wrong: %d > %d\n",
- (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
- return -EINVAL;
- }
-
- lmv->tgts_size = 32U;
- lmv->tgts = kcalloc(lmv->tgts_size, sizeof(*lmv->tgts), GFP_NOFS);
- if (!lmv->tgts)
- return -ENOMEM;
-
- obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
- lmv->desc.ld_tgt_count = 0;
- lmv->desc.ld_active_tgt_count = 0;
- lmv->max_def_easize = 0;
- lmv->max_easize = 0;
-
- spin_lock_init(&lmv->lmv_lock);
- mutex_init(&lmv->lmv_init_mutex);
-
- lprocfs_lmv_init_vars(&lvars);
-
- lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
- rc = ldebugfs_seq_create(obd->obd_debugfs_entry, "target_obd",
- 0444, &lmv_proc_target_fops, obd);
- if (rc)
- CWARN("%s: error adding LMV target_obd file: rc = %d\n",
- obd->obd_name, rc);
- rc = fld_client_init(&lmv->lmv_fld, obd->obd_name,
- LUSTRE_CLI_FLD_HASH_DHT);
- if (rc) {
- CERROR("Can't init FLD, err %d\n", rc);
- goto out;
- }
-
- return 0;
-
-out:
- return rc;
-}
-
-static int lmv_cleanup(struct obd_device *obd)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
-
- fld_client_fini(&lmv->lmv_fld);
- if (lmv->tgts) {
- int i;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i])
- continue;
- lmv_del_target(lmv, i);
- }
- kfree(lmv->tgts);
- lmv->tgts_size = 0;
- }
- return 0;
-}
-
-static int lmv_process_config(struct obd_device *obd, u32 len, void *buf)
-{
- struct lustre_cfg *lcfg = buf;
- struct obd_uuid obd_uuid;
- int gen;
- __u32 index;
- int rc;
-
- switch (lcfg->lcfg_command) {
- case LCFG_ADD_MDC:
- /* modify_mdc_tgts add 0:lustre-clilmv 1:lustre-MDT0000_UUID
- * 2:0 3:1 4:lustre-MDT0000-mdc_UUID
- */
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) {
- rc = -EINVAL;
- goto out;
- }
-
- obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1));
-
- if (sscanf(lustre_cfg_buf(lcfg, 2), "%u", &index) != 1) {
- rc = -EINVAL;
- goto out;
- }
- if (sscanf(lustre_cfg_buf(lcfg, 3), "%d", &gen) != 1) {
- rc = -EINVAL;
- goto out;
- }
- rc = lmv_add_target(obd, &obd_uuid, index, gen);
- goto out;
- default:
- CERROR("Unknown command: %d\n", lcfg->lcfg_command);
- rc = -EINVAL;
- goto out;
- }
-out:
- return rc;
-}
-
-static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age, __u32 flags)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- struct obd_statfs *temp;
- int rc = 0;
- u32 i;
-
- temp = kzalloc(sizeof(*temp), GFP_NOFS);
- if (!temp)
- return -ENOMEM;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
- continue;
-
- rc = obd_statfs(env, lmv->tgts[i]->ltd_exp, temp,
- max_age, flags);
- if (rc) {
- CERROR("can't stat MDS #%d (%s), error %d\n", i,
- lmv->tgts[i]->ltd_exp->exp_obd->obd_name,
- rc);
- goto out_free_temp;
- }
-
- if (i == 0) {
- *osfs = *temp;
- /* If the statfs is from mount, it will needs
- * retrieve necessary information from MDT0.
- * i.e. mount does not need the merged osfs
- * from all of MDT.
- * And also clients can be mounted as long as
- * MDT0 is in service
- */
- if (flags & OBD_STATFS_FOR_MDT0)
- goto out_free_temp;
- } else {
- osfs->os_bavail += temp->os_bavail;
- osfs->os_blocks += temp->os_blocks;
- osfs->os_ffree += temp->os_ffree;
- osfs->os_files += temp->os_files;
- }
- }
-
-out_free_temp:
- kfree(temp);
- return rc;
-}
-
-static int lmv_getstatus(struct obd_export *exp,
- struct lu_fid *fid)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
-
- return md_getstatus(lmv->tgts[0]->ltd_exp, fid);
-}
-
-static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *name,
- const char *input, int input_size, int output_size,
- int flags, struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- return md_getxattr(tgt->ltd_exp, fid, valid, name, input,
- input_size, output_size, flags, request);
-}
-
-static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *name,
- const char *input, int input_size, int output_size,
- int flags, __u32 suppgid,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- return md_setxattr(tgt->ltd_exp, fid, valid, name, input,
- input_size, output_size, flags, suppgid,
- request);
-}
-
-static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- if (op_data->op_flags & MF_GET_MDT_IDX) {
- op_data->op_mds = tgt->ltd_idx;
- return 0;
- }
-
- return md_getattr(tgt->ltd_exp, op_data, request);
-}
-
-static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- u32 i;
-
- CDEBUG(D_INODE, "CBDATA for " DFID "\n", PFID(fid));
-
- /*
- * With DNE every object can have two locks in different namespaces:
- * lookup lock in space of MDT storing direntry and update/open lock in
- * space of MDT storing inode.
- */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
- continue;
- md_null_inode(lmv->tgts[i]->ltd_exp, fid);
- }
-
- return 0;
-}
-
-static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
- struct md_open_data *mod, struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- CDEBUG(D_INODE, "CLOSE " DFID "\n", PFID(&op_data->op_fid1));
- return md_close(tgt->ltd_exp, op_data, mod, request);
-}
-
-/**
- * Choosing the MDT by name or FID in @op_data.
- * For non-striped directory, it will locate MDT by fid.
- * For striped-directory, it will locate MDT by name. And also
- * it will reset op_fid1 with the FID of the chosen stripe.
- **/
-static struct lmv_tgt_desc *
-lmv_locate_target_for_name(struct lmv_obd *lmv, struct lmv_stripe_md *lsm,
- const char *name, int namelen, struct lu_fid *fid,
- u32 *mds)
-{
- const struct lmv_oinfo *oinfo;
- struct lmv_tgt_desc *tgt;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_NAME_HASH)) {
- if (cfs_fail_val >= lsm->lsm_md_stripe_count)
- return ERR_PTR(-EBADF);
- oinfo = &lsm->lsm_md_oinfo[cfs_fail_val];
- } else {
- oinfo = lsm_name_to_stripe_info(lsm, name, namelen);
- if (IS_ERR(oinfo))
- return ERR_CAST(oinfo);
- }
-
- if (fid)
- *fid = oinfo->lmo_fid;
- if (mds)
- *mds = oinfo->lmo_mds;
-
- tgt = lmv_get_target(lmv, oinfo->lmo_mds, NULL);
-
- CDEBUG(D_INFO, "locate on mds %u " DFID "\n", oinfo->lmo_mds,
- PFID(&oinfo->lmo_fid));
- return tgt;
-}
-
-/**
- * Locate mds by fid or name
- *
- * For striped directory (lsm != NULL), it will locate the stripe
- * by name hash (see lsm_name_to_stripe_info()). Note: if the hash_type
- * is unknown, it will return -EBADFD, and lmv_intent_lookup might need
- * walk through all of stripes to locate the entry.
- *
- * For normal direcotry, it will locate MDS by FID directly.
- * \param[in] lmv LMV device
- * \param[in] op_data client MD stack parameters, name, namelen
- * mds_num etc.
- * \param[in] fid object FID used to locate MDS.
- *
- * retval pointer to the lmv_tgt_desc if succeed.
- * ERR_PTR(errno) if failed.
- */
-struct lmv_tgt_desc*
-lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
- struct lu_fid *fid)
-{
- struct lmv_stripe_md *lsm = op_data->op_mea1;
- struct lmv_tgt_desc *tgt;
-
- /*
- * During creating VOLATILE file, it should honor the mdt
- * index if the file under striped dir is being restored, see
- * ct_restore().
- */
- if (op_data->op_bias & MDS_CREATE_VOLATILE &&
- (int)op_data->op_mds != -1) {
- int i;
-
- tgt = lmv_get_target(lmv, op_data->op_mds, NULL);
- if (IS_ERR(tgt))
- return tgt;
-
- if (lsm) {
- /* refill the right parent fid */
- for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
- struct lmv_oinfo *oinfo;
-
- oinfo = &lsm->lsm_md_oinfo[i];
- if (oinfo->lmo_mds == op_data->op_mds) {
- *fid = oinfo->lmo_fid;
- break;
- }
- }
-
- if (i == lsm->lsm_md_stripe_count)
- *fid = lsm->lsm_md_oinfo[0].lmo_fid;
- }
-
- return tgt;
- }
-
- if (!lsm || !op_data->op_namelen) {
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return tgt;
-
- op_data->op_mds = tgt->ltd_idx;
-
- return tgt;
- }
-
- return lmv_locate_target_for_name(lmv, lsm, op_data->op_name,
- op_data->op_namelen, fid,
- &op_data->op_mds);
-}
-
-static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, size_t datalen, umode_t mode,
- uid_t uid, gid_t gid, cfs_cap_t cap_effective,
- __u64 rdev, struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- if (!lmv->desc.ld_active_tgt_count)
- return -EIO;
-
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- CDEBUG(D_INODE, "CREATE name '%.*s' on " DFID " -> mds #%x\n",
- (int)op_data->op_namelen, op_data->op_name,
- PFID(&op_data->op_fid1), op_data->op_mds);
-
- rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
- if (rc)
- return rc;
-
- if (exp_connect_flags(exp) & OBD_CONNECT_DIR_STRIPE) {
- /*
- * Send the create request to the MDT where the object
- * will be located
- */
- tgt = lmv_find_target(lmv, &op_data->op_fid2);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- op_data->op_mds = tgt->ltd_idx;
- } else {
- CDEBUG(D_CONFIG, "Server doesn't support striped dirs\n");
- }
-
- CDEBUG(D_INODE, "CREATE obj " DFID " -> mds #%x\n",
- PFID(&op_data->op_fid1), op_data->op_mds);
-
- op_data->op_flags |= MF_MDC_CANCEL_FID1;
- rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid,
- cap_effective, rdev, request);
-
- if (rc == 0) {
- if (!*request)
- return rc;
- CDEBUG(D_INODE, "Created - " DFID "\n", PFID(&op_data->op_fid2));
- }
- return rc;
-}
-
-static int
-lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- const union ldlm_policy_data *policy,
- struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, __u64 extra_lock_flags)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- CDEBUG(D_INODE, "ENQUEUE '%s' on " DFID "\n",
- LL_IT2STR(it), PFID(&op_data->op_fid1));
-
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- CDEBUG(D_INODE, "ENQUEUE '%s' on " DFID " -> mds #%u\n",
- LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx);
-
- return md_enqueue(tgt->ltd_exp, einfo, policy, it, op_data, lockh,
- extra_lock_flags);
-}
-
-static int
-lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **preq)
-{
- struct ptlrpc_request *req = NULL;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- struct mdt_body *body;
- int rc;
-
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- CDEBUG(D_INODE, "GETATTR_NAME for %*s on " DFID " -> mds #%u\n",
- (int)op_data->op_namelen, op_data->op_name,
- PFID(&op_data->op_fid1), tgt->ltd_idx);
-
- rc = md_getattr_name(tgt->ltd_exp, op_data, preq);
- if (rc != 0)
- return rc;
-
- body = req_capsule_server_get(&(*preq)->rq_pill, &RMF_MDT_BODY);
- if (body->mbo_valid & OBD_MD_MDS) {
- struct lu_fid rid = body->mbo_fid1;
-
- CDEBUG(D_INODE, "Request attrs for " DFID "\n",
- PFID(&rid));
-
- tgt = lmv_find_target(lmv, &rid);
- if (IS_ERR(tgt)) {
- ptlrpc_req_finished(*preq);
- *preq = NULL;
- return PTR_ERR(tgt);
- }
-
- op_data->op_fid1 = rid;
- op_data->op_valid |= OBD_MD_FLCROSSREF;
- op_data->op_namelen = 0;
- op_data->op_name = NULL;
- rc = md_getattr_name(tgt->ltd_exp, op_data, &req);
- ptlrpc_req_finished(*preq);
- *preq = req;
- }
-
- return rc;
-}
-
-#define md_op_data_fid(op_data, fl) \
- (fl == MF_MDC_CANCEL_FID1 ? &op_data->op_fid1 : \
- fl == MF_MDC_CANCEL_FID2 ? &op_data->op_fid2 : \
- fl == MF_MDC_CANCEL_FID3 ? &op_data->op_fid3 : \
- fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \
- NULL)
-
-static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt,
- struct md_op_data *op_data, int op_tgt,
- enum ldlm_mode mode, int bits, int flag)
-{
- struct lu_fid *fid = md_op_data_fid(op_data, flag);
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- union ldlm_policy_data policy = { { 0 } };
- int rc = 0;
-
- if (!fid_is_sane(fid))
- return 0;
-
- if (!tgt) {
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
- }
-
- if (tgt->ltd_idx != op_tgt) {
- CDEBUG(D_INODE, "EARLY_CANCEL on " DFID "\n", PFID(fid));
- policy.l_inodebits.bits = bits;
- rc = md_cancel_unused(tgt->ltd_exp, fid, &policy,
- mode, LCF_ASYNC, NULL);
- } else {
- CDEBUG(D_INODE,
- "EARLY_CANCEL skip operation target %d on " DFID "\n",
- op_tgt, PFID(fid));
- op_data->op_flags |= flag;
- rc = 0;
- }
-
- return rc;
-}
-
-/*
- * llite passes fid of an target inode in op_data->op_fid1 and id of directory in
- * op_data->op_fid2
- */
-static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- LASSERT(op_data->op_namelen != 0);
-
- CDEBUG(D_INODE, "LINK " DFID ":%*s to " DFID "\n",
- PFID(&op_data->op_fid2), (int)op_data->op_namelen,
- op_data->op_name, PFID(&op_data->op_fid1));
-
- op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
- op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
- if (op_data->op_mea2) {
- struct lmv_stripe_md *lsm = op_data->op_mea2;
- const struct lmv_oinfo *oinfo;
-
- oinfo = lsm_name_to_stripe_info(lsm, op_data->op_name,
- op_data->op_namelen);
- if (IS_ERR(oinfo))
- return PTR_ERR(oinfo);
-
- op_data->op_fid2 = oinfo->lmo_fid;
- }
-
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- /*
- * Cancel UPDATE lock on child (fid1).
- */
- op_data->op_flags |= MF_MDC_CANCEL_FID2;
- rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX,
- MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
- if (rc != 0)
- return rc;
-
- return md_link(tgt->ltd_exp, op_data, request);
-}
-
-static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, size_t oldlen,
- const char *new, size_t newlen,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct obd_export *target_exp;
- struct lmv_tgt_desc *src_tgt;
- struct lmv_tgt_desc *tgt_tgt;
- struct mdt_body *body;
- int rc;
-
- LASSERT(oldlen != 0);
-
- CDEBUG(D_INODE, "RENAME %.*s in " DFID ":%d to %.*s in " DFID ":%d\n",
- (int)oldlen, old, PFID(&op_data->op_fid1),
- op_data->op_mea1 ? op_data->op_mea1->lsm_md_stripe_count : 0,
- (int)newlen, new, PFID(&op_data->op_fid2),
- op_data->op_mea2 ? op_data->op_mea2->lsm_md_stripe_count : 0);
-
- op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
- op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
-
- if (op_data->op_cli_flags & CLI_MIGRATE) {
- LASSERTF(fid_is_sane(&op_data->op_fid3), "invalid FID " DFID "\n",
- PFID(&op_data->op_fid3));
-
- if (op_data->op_mea1) {
- struct lmv_stripe_md *lsm = op_data->op_mea1;
- struct lmv_tgt_desc *tmp;
-
- /* Fix the parent fid for striped dir */
- tmp = lmv_locate_target_for_name(lmv, lsm, old,
- oldlen,
- &op_data->op_fid1,
- NULL);
- if (IS_ERR(tmp))
- return PTR_ERR(tmp);
- }
-
- rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
- if (rc)
- return rc;
- src_tgt = lmv_find_target(lmv, &op_data->op_fid3);
- if (IS_ERR(src_tgt))
- return PTR_ERR(src_tgt);
-
- target_exp = src_tgt->ltd_exp;
- } else {
- if (op_data->op_mea1) {
- struct lmv_stripe_md *lsm = op_data->op_mea1;
-
- src_tgt = lmv_locate_target_for_name(lmv, lsm, old,
- oldlen,
- &op_data->op_fid1,
- &op_data->op_mds);
- } else {
- src_tgt = lmv_find_target(lmv, &op_data->op_fid1);
- }
- if (IS_ERR(src_tgt))
- return PTR_ERR(src_tgt);
-
- if (op_data->op_mea2) {
- struct lmv_stripe_md *lsm = op_data->op_mea2;
-
- tgt_tgt = lmv_locate_target_for_name(lmv, lsm, new,
- newlen,
- &op_data->op_fid2,
- &op_data->op_mds);
- } else {
- tgt_tgt = lmv_find_target(lmv, &op_data->op_fid2);
- }
- if (IS_ERR(tgt_tgt))
- return PTR_ERR(tgt_tgt);
-
- target_exp = tgt_tgt->ltd_exp;
- }
-
- /*
- * LOOKUP lock on src child (fid3) should also be cancelled for
- * src_tgt in mdc_rename.
- */
- op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
-
- /*
- * Cancel UPDATE locks on tgt parent (fid2), tgt_tgt is its
- * own target.
- */
- rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
- LCK_EX, MDS_INODELOCK_UPDATE,
- MF_MDC_CANCEL_FID2);
- if (rc)
- return rc;
- /*
- * Cancel LOOKUP locks on source child (fid3) for parent tgt_tgt.
- */
- if (fid_is_sane(&op_data->op_fid3)) {
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- /* Cancel LOOKUP lock on its parent */
- rc = lmv_early_cancel(exp, tgt, op_data, src_tgt->ltd_idx,
- LCK_EX, MDS_INODELOCK_LOOKUP,
- MF_MDC_CANCEL_FID3);
- if (rc)
- return rc;
-
- rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
- LCK_EX, MDS_INODELOCK_FULL,
- MF_MDC_CANCEL_FID3);
- if (rc)
- return rc;
- }
-
-retry_rename:
- /*
- * Cancel all the locks on tgt child (fid4).
- */
- if (fid_is_sane(&op_data->op_fid4)) {
- struct lmv_tgt_desc *tgt;
-
- rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
- LCK_EX, MDS_INODELOCK_FULL,
- MF_MDC_CANCEL_FID4);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid4);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- /*
- * Since the target child might be destroyed, and it might
- * become orphan, and we can only check orphan on the local
- * MDT right now, so we send rename request to the MDT where
- * target child is located. If target child does not exist,
- * then it will send the request to the target parent
- */
- target_exp = tgt->ltd_exp;
- }
-
- rc = md_rename(target_exp, op_data, old, oldlen, new, newlen, request);
- if (rc && rc != -EREMOTE)
- return rc;
-
- body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
- if (!body)
- return -EPROTO;
-
- /* Not cross-ref case, just get out of here. */
- if (likely(!(body->mbo_valid & OBD_MD_MDS)))
- return rc;
-
- CDEBUG(D_INODE, "%s: try rename to another MDT for " DFID "\n",
- exp->exp_obd->obd_name, PFID(&body->mbo_fid1));
-
- op_data->op_fid4 = body->mbo_fid1;
- ptlrpc_req_finished(*request);
- *request = NULL;
- goto retry_rename;
-}
-
-static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen, struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- CDEBUG(D_INODE, "SETATTR for " DFID ", valid 0x%x\n",
- PFID(&op_data->op_fid1), op_data->op_attr.ia_valid);
-
- op_data->op_flags |= MF_MDC_CANCEL_FID1;
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- return md_setattr(tgt->ltd_exp, op_data, ea, ealen, request);
-}
-
-static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- return md_sync(tgt->ltd_exp, fid, request);
-}
-
-/**
- * Get current minimum entry from striped directory
- *
- * This function will search the dir entry, whose hash value is the
- * closest(>=) to @hash_offset, from all of sub-stripes, and it is
- * only being called for striped directory.
- *
- * \param[in] exp export of LMV
- * \param[in] op_data parameters transferred beween client MD stack
- * stripe_information will be included in this
- * parameter
- * \param[in] cb_op ldlm callback being used in enqueue in
- * mdc_read_page
- * \param[in] hash_offset the hash value, which is used to locate
- * minum(closet) dir entry
- * \param[in|out] stripe_offset the caller use this to indicate the stripe
- * index of last entry, so to avoid hash conflict
- * between stripes. It will also be used to
- * return the stripe index of current dir entry.
- * \param[in|out] entp the minum entry and it also is being used
- * to input the last dir entry to resolve the
- * hash conflict
- *
- * \param[out] ppage the page which holds the minum entry
- *
- * \retval = 0 get the entry successfully
- * negative errno (< 0) does not get the entry
- */
-static int lmv_get_min_striped_entry(struct obd_export *exp,
- struct md_op_data *op_data,
- struct md_callback *cb_op,
- __u64 hash_offset, int *stripe_offset,
- struct lu_dirent **entp,
- struct page **ppage)
-{
- struct lmv_stripe_md *lsm = op_data->op_mea1;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lu_dirent *min_ent = NULL;
- struct page *min_page = NULL;
- struct lmv_tgt_desc *tgt;
- int stripe_count;
- int min_idx = 0;
- int rc = 0;
- int i;
-
- stripe_count = lsm->lsm_md_stripe_count;
- for (i = 0; i < stripe_count; i++) {
- __u64 stripe_hash = hash_offset;
- struct lu_dirent *ent = NULL;
- struct page *page = NULL;
- struct lu_dirpage *dp;
-
- tgt = lmv_get_target(lmv, lsm->lsm_md_oinfo[i].lmo_mds, NULL);
- if (IS_ERR(tgt)) {
- rc = PTR_ERR(tgt);
- goto out;
- }
-
- /*
- * op_data will be shared by each stripe, so we need
- * reset these value for each stripe
- */
- op_data->op_fid1 = lsm->lsm_md_oinfo[i].lmo_fid;
- op_data->op_fid2 = lsm->lsm_md_oinfo[i].lmo_fid;
- op_data->op_data = lsm->lsm_md_oinfo[i].lmo_root;
-next:
- rc = md_read_page(tgt->ltd_exp, op_data, cb_op, stripe_hash,
- &page);
- if (rc)
- goto out;
-
- dp = page_address(page);
- for (ent = lu_dirent_start(dp); ent;
- ent = lu_dirent_next(ent)) {
- /* Skip dummy entry */
- if (!le16_to_cpu(ent->lde_namelen))
- continue;
-
- if (le64_to_cpu(ent->lde_hash) < hash_offset)
- continue;
-
- if (le64_to_cpu(ent->lde_hash) == hash_offset &&
- (*entp == ent || i < *stripe_offset))
- continue;
-
- /* skip . and .. for other stripes */
- if (i && (!strncmp(ent->lde_name, ".",
- le16_to_cpu(ent->lde_namelen)) ||
- !strncmp(ent->lde_name, "..",
- le16_to_cpu(ent->lde_namelen))))
- continue;
- break;
- }
-
- if (!ent) {
- stripe_hash = le64_to_cpu(dp->ldp_hash_end);
-
- kunmap(page);
- put_page(page);
- page = NULL;
-
- /*
- * reach the end of current stripe, go to next stripe
- */
- if (stripe_hash == MDS_DIR_END_OFF)
- continue;
- else
- goto next;
- }
-
- if (min_ent) {
- if (le64_to_cpu(min_ent->lde_hash) >
- le64_to_cpu(ent->lde_hash)) {
- min_ent = ent;
- kunmap(min_page);
- put_page(min_page);
- min_idx = i;
- min_page = page;
- } else {
- kunmap(page);
- put_page(page);
- page = NULL;
- }
- } else {
- min_ent = ent;
- min_page = page;
- min_idx = i;
- }
- }
-
-out:
- if (*ppage) {
- kunmap(*ppage);
- put_page(*ppage);
- }
- *stripe_offset = min_idx;
- *entp = min_ent;
- *ppage = min_page;
- return rc;
-}
-
-/**
- * Build dir entry page from a striped directory
- *
- * This function gets one entry by @offset from a striped directory. It will
- * read entries from all of stripes, and choose one closest to the required
- * offset(&offset). A few notes
- * 1. skip . and .. for non-zero stripes, because there can only have one .
- * and .. in a directory.
- * 2. op_data will be shared by all of stripes, instead of allocating new
- * one, so need to restore before reusing.
- * 3. release the entry page if that is not being chosen.
- *
- * \param[in] exp obd export refer to LMV
- * \param[in] op_data hold those MD parameters of read_entry
- * \param[in] cb_op ldlm callback being used in enqueue in mdc_read_entry
- * \param[out] ldp the entry being read
- * \param[out] ppage the page holding the entry. Note: because the entry
- * will be accessed in upper layer, so we need hold the
- * page until the usages of entry is finished, see
- * ll_dir_entry_next.
- *
- * retval =0 if get entry successfully
- * <0 cannot get entry
- */
-static int lmv_read_striped_page(struct obd_export *exp,
- struct md_op_data *op_data,
- struct md_callback *cb_op,
- __u64 offset, struct page **ppage)
-{
- struct inode *master_inode = op_data->op_data;
- struct lu_fid master_fid = op_data->op_fid1;
- __u64 hash_offset = offset;
- __u32 ldp_flags;
- struct page *min_ent_page = NULL;
- struct page *ent_page = NULL;
- struct lu_dirent *min_ent = NULL;
- struct lu_dirent *last_ent;
- struct lu_dirent *ent;
- struct lu_dirpage *dp;
- size_t left_bytes;
- int ent_idx = 0;
- void *area;
- int rc;
-
- /*
- * Allocate a page and read entries from all of stripes and fill
- * the page by hash order
- */
- ent_page = alloc_page(GFP_KERNEL);
- if (!ent_page)
- return -ENOMEM;
-
- /* Initialize the entry page */
- dp = kmap(ent_page);
- memset(dp, 0, sizeof(*dp));
- dp->ldp_hash_start = cpu_to_le64(offset);
- ldp_flags = LDF_COLLIDE;
-
- area = dp + 1;
- left_bytes = PAGE_SIZE - sizeof(*dp);
- ent = area;
- last_ent = ent;
- do {
- __u16 ent_size;
-
- /* Find the minum entry from all sub-stripes */
- rc = lmv_get_min_striped_entry(exp, op_data, cb_op, hash_offset,
- &ent_idx, &min_ent,
- &min_ent_page);
- if (rc)
- goto out;
-
- /*
- * If it can not get minum entry, it means it already reaches
- * the end of this directory
- */
- if (!min_ent) {
- last_ent->lde_reclen = 0;
- hash_offset = MDS_DIR_END_OFF;
- goto out;
- }
-
- ent_size = le16_to_cpu(min_ent->lde_reclen);
-
- /*
- * the last entry lde_reclen is 0, but it might not
- * the end of this entry of this temporay entry
- */
- if (!ent_size)
- ent_size = lu_dirent_calc_size(
- le16_to_cpu(min_ent->lde_namelen),
- le32_to_cpu(min_ent->lde_attrs));
- if (ent_size > left_bytes) {
- last_ent->lde_reclen = cpu_to_le16(0);
- hash_offset = le64_to_cpu(min_ent->lde_hash);
- goto out;
- }
-
- memcpy(ent, min_ent, ent_size);
-
- /*
- * Replace . with master FID and Replace .. with the parent FID
- * of master object
- */
- if (!strncmp(ent->lde_name, ".",
- le16_to_cpu(ent->lde_namelen)) &&
- le16_to_cpu(ent->lde_namelen) == 1)
- fid_cpu_to_le(&ent->lde_fid, &master_fid);
- else if (!strncmp(ent->lde_name, "..",
- le16_to_cpu(ent->lde_namelen)) &&
- le16_to_cpu(ent->lde_namelen) == 2)
- fid_cpu_to_le(&ent->lde_fid, &op_data->op_fid3);
-
- left_bytes -= ent_size;
- ent->lde_reclen = cpu_to_le16(ent_size);
- last_ent = ent;
- ent = (void *)ent + ent_size;
- hash_offset = le64_to_cpu(min_ent->lde_hash);
- if (hash_offset == MDS_DIR_END_OFF) {
- last_ent->lde_reclen = 0;
- break;
- }
- } while (1);
-out:
- if (min_ent_page) {
- kunmap(min_ent_page);
- put_page(min_ent_page);
- }
-
- if (unlikely(rc)) {
- __free_page(ent_page);
- ent_page = NULL;
- } else {
- if (ent == area)
- ldp_flags |= LDF_EMPTY;
- dp->ldp_flags |= cpu_to_le32(ldp_flags);
- dp->ldp_hash_end = cpu_to_le64(hash_offset);
- }
-
- /*
- * We do not want to allocate md_op_data during each
- * dir entry reading, so op_data will be shared by every stripe,
- * then we need to restore it back to original value before
- * return to the upper layer
- */
- op_data->op_fid1 = master_fid;
- op_data->op_fid2 = master_fid;
- op_data->op_data = master_inode;
-
- *ppage = ent_page;
-
- return rc;
-}
-
-static int lmv_read_page(struct obd_export *exp, struct md_op_data *op_data,
- struct md_callback *cb_op, __u64 offset,
- struct page **ppage)
-{
- struct lmv_stripe_md *lsm = op_data->op_mea1;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- if (unlikely(lsm))
- return lmv_read_striped_page(exp, op_data, cb_op, offset, ppage);
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- return md_read_page(tgt->ltd_exp, op_data, cb_op, offset, ppage);
-}
-
-/**
- * Unlink a file/directory
- *
- * Unlink a file or directory under the parent dir. The unlink request
- * usually will be sent to the MDT where the child is located, but if
- * the client does not have the child FID then request will be sent to the
- * MDT where the parent is located.
- *
- * If the parent is a striped directory then it also needs to locate which
- * stripe the name of the child is located, and replace the parent FID
- * (@op->op_fid1) with the stripe FID. Note: if the stripe is unknown,
- * it will walk through all of sub-stripes until the child is being
- * unlinked finally.
- *
- * \param[in] exp export refer to LMV
- * \param[in] op_data different parameters transferred beween client
- * MD stacks, name, namelen, FIDs etc.
- * op_fid1 is the parent FID, op_fid2 is the child
- * FID.
- * \param[out] request point to the request of unlink.
- *
- * retval 0 if succeed
- * negative errno if failed.
- */
-static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- struct lmv_stripe_md *lsm = op_data->op_mea1;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *parent_tgt = NULL;
- struct lmv_tgt_desc *tgt = NULL;
- struct mdt_body *body;
- int stripe_index = 0;
- int rc;
-
-retry_unlink:
- /* For striped dir, we need to locate the parent as well */
- if (lsm) {
- struct lmv_tgt_desc *tmp;
-
- LASSERT(op_data->op_name && op_data->op_namelen);
-
- tmp = lmv_locate_target_for_name(lmv, lsm,
- op_data->op_name,
- op_data->op_namelen,
- &op_data->op_fid1,
- &op_data->op_mds);
-
- /*
- * return -EBADFD means unknown hash type, might
- * need try all sub-stripe here
- */
- if (IS_ERR(tmp) && PTR_ERR(tmp) != -EBADFD)
- return PTR_ERR(tmp);
-
- /*
- * Note: both migrating dir and unknown hash dir need to
- * try all of sub-stripes, so we need start search the
- * name from stripe 0, but migrating dir is already handled
- * inside lmv_locate_target_for_name(), so we only check
- * unknown hash type directory here
- */
- if (!lmv_is_known_hash_type(lsm->lsm_md_hash_type)) {
- struct lmv_oinfo *oinfo;
-
- oinfo = &lsm->lsm_md_oinfo[stripe_index];
-
- op_data->op_fid1 = oinfo->lmo_fid;
- op_data->op_mds = oinfo->lmo_mds;
- }
- }
-
-try_next_stripe:
- /* Send unlink requests to the MDT where the child is located */
- if (likely(!fid_is_zero(&op_data->op_fid2)))
- tgt = lmv_find_target(lmv, &op_data->op_fid2);
- else if (lsm)
- tgt = lmv_get_target(lmv, op_data->op_mds, NULL);
- else
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
-
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
- op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
-
- /*
- * If child's fid is given, cancel unused locks for it if it is from
- * another export than parent.
- *
- * LOOKUP lock for child (fid3) should also be cancelled on parent
- * tgt_tgt in mdc_unlink().
- */
- op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
-
- /*
- * Cancel FULL locks on child (fid3).
- */
- parent_tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(parent_tgt))
- return PTR_ERR(parent_tgt);
-
- if (parent_tgt != tgt) {
- rc = lmv_early_cancel(exp, parent_tgt, op_data, tgt->ltd_idx,
- LCK_EX, MDS_INODELOCK_LOOKUP,
- MF_MDC_CANCEL_FID3);
- }
-
- rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX,
- MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3);
- if (rc != 0)
- return rc;
-
- CDEBUG(D_INODE, "unlink with fid=" DFID "/" DFID " -> mds #%u\n",
- PFID(&op_data->op_fid1), PFID(&op_data->op_fid2), tgt->ltd_idx);
-
- rc = md_unlink(tgt->ltd_exp, op_data, request);
- if (rc != 0 && rc != -EREMOTE && rc != -ENOENT)
- return rc;
-
- /* Try next stripe if it is needed. */
- if (rc == -ENOENT && lsm && lmv_need_try_all_stripes(lsm)) {
- struct lmv_oinfo *oinfo;
-
- stripe_index++;
- if (stripe_index >= lsm->lsm_md_stripe_count)
- return rc;
-
- oinfo = &lsm->lsm_md_oinfo[stripe_index];
-
- op_data->op_fid1 = oinfo->lmo_fid;
- op_data->op_mds = oinfo->lmo_mds;
-
- ptlrpc_req_finished(*request);
- *request = NULL;
-
- goto try_next_stripe;
- }
-
- body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
- if (!body)
- return -EPROTO;
-
- /* Not cross-ref case, just get out of here. */
- if (likely(!(body->mbo_valid & OBD_MD_MDS)))
- return rc;
-
- CDEBUG(D_INODE, "%s: try unlink to another MDT for " DFID "\n",
- exp->exp_obd->obd_name, PFID(&body->mbo_fid1));
-
- /* This is a remote object, try remote MDT, Note: it may
- * try more than 1 time here, Considering following case
- * /mnt/lustre is root on MDT0, remote1 is on MDT1
- * 1. Initially A does not know where remote1 is, it send
- * unlink RPC to MDT0, MDT0 return -EREMOTE, it will
- * resend unlink RPC to MDT1 (retry 1st time).
- *
- * 2. During the unlink RPC in flight,
- * client B mv /mnt/lustre/remote1 /mnt/lustre/remote2
- * and create new remote1, but on MDT0
- *
- * 3. MDT1 get unlink RPC(from A), then do remote lock on
- * /mnt/lustre, then lookup get fid of remote1, and find
- * it is remote dir again, and replay -EREMOTE again.
- *
- * 4. Then A will resend unlink RPC to MDT0. (retry 2nd times).
- *
- * In theory, it might try unlimited time here, but it should
- * be very rare case.
- */
- op_data->op_fid2 = body->mbo_fid1;
- ptlrpc_req_finished(*request);
- *request = NULL;
-
- goto retry_unlink;
-}
-
-static int lmv_precleanup(struct obd_device *obd)
-{
- fld_client_debugfs_fini(&obd->u.lmv.lmv_fld);
- lprocfs_obd_cleanup(obd);
- return 0;
-}
-
-/**
- * Get by key a value associated with a LMV device.
- *
- * Dispatch request to lower-layer devices as needed.
- *
- * \param[in] env execution environment for this thread
- * \param[in] exp export for the LMV device
- * \param[in] keylen length of key identifier
- * \param[in] key identifier of key to get value for
- * \param[in] vallen size of \a val
- * \param[out] val pointer to storage location for value
- *
- * \retval 0 on success
- * \retval negative negated errno on failure
- */
-static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val)
-{
- struct obd_device *obd;
- struct lmv_obd *lmv;
- int rc = 0;
-
- obd = class_exp2obd(exp);
- if (!obd) {
- CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n",
- exp->exp_handle.h_cookie);
- return -EINVAL;
- }
-
- lmv = &obd->u.lmv;
- if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
- int i;
-
- LASSERT(*vallen == sizeof(__u32));
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- struct lmv_tgt_desc *tgt = lmv->tgts[i];
-
- /*
- * All tgts should be connected when this gets called.
- */
- if (!tgt || !tgt->ltd_exp)
- continue;
-
- if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
- vallen, val))
- return 0;
- }
- return -EINVAL;
- } else if (KEY_IS(KEY_MAX_EASIZE) ||
- KEY_IS(KEY_DEFAULT_EASIZE) ||
- KEY_IS(KEY_CONN_DATA)) {
- /*
- * Forwarding this request to first MDS, it should know LOV
- * desc.
- */
- rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key,
- vallen, val);
- if (!rc && KEY_IS(KEY_CONN_DATA))
- exp->exp_connect_data = *(struct obd_connect_data *)val;
- return rc;
- } else if (KEY_IS(KEY_TGT_COUNT)) {
- *((int *)val) = lmv->desc.ld_tgt_count;
- return 0;
- }
-
- CDEBUG(D_IOCTL, "Invalid key\n");
- return -EINVAL;
-}
-
-/**
- * Asynchronously set by key a value associated with a LMV device.
- *
- * Dispatch request to lower-layer devices as needed.
- *
- * \param[in] env execution environment for this thread
- * \param[in] exp export for the LMV device
- * \param[in] keylen length of key identifier
- * \param[in] key identifier of key to store value for
- * \param[in] vallen size of value to store
- * \param[in] val pointer to data to be stored
- * \param[in] set optional list of related ptlrpc requests
- *
- * \retval 0 on success
- * \retval negative negated errno on failure
- */
-static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set)
-{
- struct lmv_tgt_desc *tgt;
- struct obd_device *obd;
- struct lmv_obd *lmv;
- int rc = 0;
-
- obd = class_exp2obd(exp);
- if (!obd) {
- CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n",
- exp->exp_handle.h_cookie);
- return -EINVAL;
- }
- lmv = &obd->u.lmv;
-
- if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX) ||
- KEY_IS(KEY_DEFAULT_EASIZE)) {
- int i, err = 0;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
-
- if (!tgt || !tgt->ltd_exp)
- continue;
-
- err = obd_set_info_async(env, tgt->ltd_exp,
- keylen, key, vallen, val, set);
- if (err && rc == 0)
- rc = err;
- }
-
- return rc;
- }
-
- return -EINVAL;
-}
-
-static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
- const struct lmv_mds_md_v1 *lmm1)
-{
- struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
- int stripe_count;
- int rc = 0;
- int cplen;
- int i;
-
- lsm->lsm_md_magic = le32_to_cpu(lmm1->lmv_magic);
- lsm->lsm_md_stripe_count = le32_to_cpu(lmm1->lmv_stripe_count);
- lsm->lsm_md_master_mdt_index = le32_to_cpu(lmm1->lmv_master_mdt_index);
- if (OBD_FAIL_CHECK(OBD_FAIL_UNKNOWN_LMV_STRIPE))
- lsm->lsm_md_hash_type = LMV_HASH_TYPE_UNKNOWN;
- else
- lsm->lsm_md_hash_type = le32_to_cpu(lmm1->lmv_hash_type);
- lsm->lsm_md_layout_version = le32_to_cpu(lmm1->lmv_layout_version);
- cplen = strlcpy(lsm->lsm_md_pool_name, lmm1->lmv_pool_name,
- sizeof(lsm->lsm_md_pool_name));
-
- if (cplen >= sizeof(lsm->lsm_md_pool_name))
- return -E2BIG;
-
- CDEBUG(D_INFO, "unpack lsm count %d, master %d hash_type %d layout_version %d\n",
- lsm->lsm_md_stripe_count, lsm->lsm_md_master_mdt_index,
- lsm->lsm_md_hash_type, lsm->lsm_md_layout_version);
-
- stripe_count = le32_to_cpu(lmm1->lmv_stripe_count);
- for (i = 0; i < stripe_count; i++) {
- fid_le_to_cpu(&lsm->lsm_md_oinfo[i].lmo_fid,
- &lmm1->lmv_stripe_fids[i]);
- rc = lmv_fld_lookup(lmv, &lsm->lsm_md_oinfo[i].lmo_fid,
- &lsm->lsm_md_oinfo[i].lmo_mds);
- if (rc)
- return rc;
- CDEBUG(D_INFO, "unpack fid #%d " DFID "\n", i,
- PFID(&lsm->lsm_md_oinfo[i].lmo_fid));
- }
-
- return rc;
-}
-
-static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp,
- const union lmv_mds_md *lmm, size_t lmm_size)
-{
- struct lmv_stripe_md *lsm;
- bool allocated = false;
- int lsm_size, rc;
-
- LASSERT(lsmp);
-
- lsm = *lsmp;
- /* Free memmd */
- if (lsm && !lmm) {
- int i;
-
- for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
- /*
- * For migrating inode, the master stripe and master
- * object will be the same, so do not need iput, see
- * ll_update_lsm_md
- */
- if (!(lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION &&
- !i) && lsm->lsm_md_oinfo[i].lmo_root)
- iput(lsm->lsm_md_oinfo[i].lmo_root);
- }
-
- kvfree(lsm);
- *lsmp = NULL;
- return 0;
- }
-
- if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE)
- return -EPERM;
-
- /* Unpack memmd */
- if (le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_V1 &&
- le32_to_cpu(lmm->lmv_magic) != LMV_USER_MAGIC) {
- CERROR("%s: invalid lmv magic %x: rc = %d\n",
- exp->exp_obd->obd_name, le32_to_cpu(lmm->lmv_magic),
- -EIO);
- return -EIO;
- }
-
- if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_V1)
- lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm));
- else
- /**
- * Unpack default dirstripe(lmv_user_md) to lmv_stripe_md,
- * stripecount should be 0 then.
- */
- lsm_size = lmv_stripe_md_size(0);
-
- if (!lsm) {
- lsm = kvzalloc(lsm_size, GFP_NOFS);
- if (!lsm)
- return -ENOMEM;
- allocated = true;
- *lsmp = lsm;
- }
-
- switch (le32_to_cpu(lmm->lmv_magic)) {
- case LMV_MAGIC_V1:
- rc = lmv_unpack_md_v1(exp, lsm, &lmm->lmv_md_v1);
- break;
- default:
- CERROR("%s: unrecognized magic %x\n", exp->exp_obd->obd_name,
- le32_to_cpu(lmm->lmv_magic));
- rc = -EINVAL;
- break;
- }
-
- if (rc && allocated) {
- kvfree(lsm);
- *lsmp = NULL;
- lsm_size = rc;
- }
- return lsm_size;
-}
-
-void lmv_free_memmd(struct lmv_stripe_md *lsm)
-{
- lmv_unpackmd(NULL, &lsm, NULL, 0);
-}
-EXPORT_SYMBOL(lmv_free_memmd);
-
-static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode, enum ldlm_cancel_flags flags,
- void *opaque)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- int rc = 0;
- int err;
- u32 i;
-
- LASSERT(fid);
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- struct lmv_tgt_desc *tgt = lmv->tgts[i];
-
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
- continue;
-
- err = md_cancel_unused(tgt->ltd_exp, fid, policy, mode, flags,
- opaque);
- if (!rc)
- rc = err;
- }
- return rc;
-}
-
-static int lmv_set_lock_data(struct obd_export *exp,
- const struct lustre_handle *lockh,
- void *data, __u64 *bits)
-{
- struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
- struct lmv_tgt_desc *tgt = lmv->tgts[0];
-
- if (!tgt || !tgt->ltd_exp)
- return -EINVAL;
-
- return md_set_lock_data(tgt->ltd_exp, lockh, data, bits);
-}
-
-static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid,
- enum ldlm_type type,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode,
- struct lustre_handle *lockh)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- enum ldlm_mode rc;
- int tgt;
- u32 i;
-
- CDEBUG(D_INODE, "Lock match for " DFID "\n", PFID(fid));
-
- /*
- * With DNE every object can have two locks in different namespaces:
- * lookup lock in space of MDT storing direntry and update/open lock in
- * space of MDT storing inode. Try the MDT that the FID maps to first,
- * since this can be easily found, and only try others if that fails.
- */
- for (i = 0, tgt = lmv_find_target_index(lmv, fid);
- i < lmv->desc.ld_tgt_count;
- i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) {
- if (tgt < 0) {
- CDEBUG(D_HA, "%s: " DFID " is inaccessible: rc = %d\n",
- obd->obd_name, PFID(fid), tgt);
- tgt = 0;
- }
-
- if (!lmv->tgts[tgt] || !lmv->tgts[tgt]->ltd_exp ||
- !lmv->tgts[tgt]->ltd_active)
- continue;
-
- rc = md_lock_match(lmv->tgts[tgt]->ltd_exp, flags, fid,
- type, policy, mode, lockh);
- if (rc)
- return rc;
- }
-
- return 0;
-}
-
-static int lmv_get_lustre_md(struct obd_export *exp,
- struct ptlrpc_request *req,
- struct obd_export *dt_exp,
- struct obd_export *md_exp,
- struct lustre_md *md)
-{
- struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
- struct lmv_tgt_desc *tgt = lmv->tgts[0];
-
- if (!tgt || !tgt->ltd_exp)
- return -EINVAL;
- return md_get_lustre_md(tgt->ltd_exp, req, dt_exp, md_exp, md);
-}
-
-static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt = lmv->tgts[0];
-
- if (md->lmv) {
- lmv_free_memmd(md->lmv);
- md->lmv = NULL;
- }
- if (!tgt || !tgt->ltd_exp)
- return -EINVAL;
- return md_free_lustre_md(tgt->ltd_exp, md);
-}
-
-static int lmv_set_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och,
- struct lookup_intent *it)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, &och->och_fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- return md_set_open_replay_data(tgt->ltd_exp, och, it);
-}
-
-static int lmv_clear_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, &och->och_fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- return md_clear_open_replay_data(tgt->ltd_exp, och);
-}
-
-static int lmv_intent_getattr_async(struct obd_export *exp,
- struct md_enqueue_info *minfo)
-{
- struct md_op_data *op_data = &minfo->mi_data;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *ptgt = NULL;
- struct lmv_tgt_desc *ctgt = NULL;
-
- if (!fid_is_sane(&op_data->op_fid2))
- return -EINVAL;
-
- ptgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(ptgt))
- return PTR_ERR(ptgt);
-
- ctgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
- if (IS_ERR(ctgt))
- return PTR_ERR(ctgt);
-
- /*
- * if child is on remote MDT, we need 2 async RPCs to fetch both LOOKUP
- * lock on parent, and UPDATE lock on child MDT, which makes all
- * complicated. Considering remote dir is rare case, and not supporting
- * it in statahead won't cause any issue, drop its support for now.
- */
- if (ptgt != ctgt)
- return -ENOTSUPP;
-
- return md_intent_getattr_async(ptgt->ltd_exp, minfo);
-}
-
-static int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
- struct lu_fid *fid, __u64 *bits)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- return md_revalidate_lock(tgt->ltd_exp, it, fid, bits);
-}
-
-static int
-lmv_get_fid_from_lsm(struct obd_export *exp,
- const struct lmv_stripe_md *lsm,
- const char *name, int namelen, struct lu_fid *fid)
-{
- const struct lmv_oinfo *oinfo;
-
- LASSERT(lsm);
- oinfo = lsm_name_to_stripe_info(lsm, name, namelen);
- if (IS_ERR(oinfo))
- return PTR_ERR(oinfo);
-
- *fid = oinfo->lmo_fid;
-
- return 0;
-}
-
-/**
- * For lmv, only need to send request to master MDT, and the master MDT will
- * process with other slave MDTs. The only exception is Q_GETOQUOTA for which
- * we directly fetch data from the slave MDTs.
- */
-static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt = lmv->tgts[0];
- int rc = 0;
- __u64 curspace = 0, curinodes = 0;
- u32 i;
-
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active ||
- !lmv->desc.ld_tgt_count) {
- CERROR("master lmv inactive\n");
- return -EIO;
- }
-
- if (oqctl->qc_cmd != Q_GETOQUOTA)
- return obd_quotactl(tgt->ltd_exp, oqctl);
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- int err;
-
- tgt = lmv->tgts[i];
-
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
- continue;
-
- err = obd_quotactl(tgt->ltd_exp, oqctl);
- if (err) {
- CERROR("getquota on mdt %d failed. %d\n", i, err);
- if (!rc)
- rc = err;
- } else {
- curspace += oqctl->qc_dqblk.dqb_curspace;
- curinodes += oqctl->qc_dqblk.dqb_curinodes;
- }
- }
- oqctl->qc_dqblk.dqb_curspace = curspace;
- oqctl->qc_dqblk.dqb_curinodes = curinodes;
-
- return rc;
-}
-
-static int lmv_merge_attr(struct obd_export *exp,
- const struct lmv_stripe_md *lsm,
- struct cl_attr *attr,
- ldlm_blocking_callback cb_blocking)
-{
- int rc, i;
-
- rc = lmv_revalidate_slaves(exp, lsm, cb_blocking, 0);
- if (rc < 0)
- return rc;
-
- for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
- struct inode *inode = lsm->lsm_md_oinfo[i].lmo_root;
-
- CDEBUG(D_INFO, "" DFID " size %llu, blocks %llu nlink %u, atime %lu ctime %lu, mtime %lu.\n",
- PFID(&lsm->lsm_md_oinfo[i].lmo_fid),
- i_size_read(inode), (unsigned long long)inode->i_blocks,
- inode->i_nlink, LTIME_S(inode->i_atime),
- LTIME_S(inode->i_ctime), LTIME_S(inode->i_mtime));
-
- /* for slave stripe, it needs to subtract nlink for . and .. */
- if (i)
- attr->cat_nlink += inode->i_nlink - 2;
- else
- attr->cat_nlink = inode->i_nlink;
-
- attr->cat_size += i_size_read(inode);
- attr->cat_blocks += inode->i_blocks;
-
- if (attr->cat_atime < LTIME_S(inode->i_atime))
- attr->cat_atime = LTIME_S(inode->i_atime);
-
- if (attr->cat_ctime < LTIME_S(inode->i_ctime))
- attr->cat_ctime = LTIME_S(inode->i_ctime);
-
- if (attr->cat_mtime < LTIME_S(inode->i_mtime))
- attr->cat_mtime = LTIME_S(inode->i_mtime);
- }
- return 0;
-}
-
-static struct obd_ops lmv_obd_ops = {
- .owner = THIS_MODULE,
- .setup = lmv_setup,
- .cleanup = lmv_cleanup,
- .precleanup = lmv_precleanup,
- .process_config = lmv_process_config,
- .connect = lmv_connect,
- .disconnect = lmv_disconnect,
- .statfs = lmv_statfs,
- .get_info = lmv_get_info,
- .set_info_async = lmv_set_info_async,
- .notify = lmv_notify,
- .get_uuid = lmv_get_uuid,
- .iocontrol = lmv_iocontrol,
- .quotactl = lmv_quotactl
-};
-
-static struct md_ops lmv_md_ops = {
- .getstatus = lmv_getstatus,
- .null_inode = lmv_null_inode,
- .close = lmv_close,
- .create = lmv_create,
- .enqueue = lmv_enqueue,
- .getattr = lmv_getattr,
- .getxattr = lmv_getxattr,
- .getattr_name = lmv_getattr_name,
- .intent_lock = lmv_intent_lock,
- .link = lmv_link,
- .rename = lmv_rename,
- .setattr = lmv_setattr,
- .setxattr = lmv_setxattr,
- .sync = lmv_sync,
- .read_page = lmv_read_page,
- .unlink = lmv_unlink,
- .init_ea_size = lmv_init_ea_size,
- .cancel_unused = lmv_cancel_unused,
- .set_lock_data = lmv_set_lock_data,
- .lock_match = lmv_lock_match,
- .get_lustre_md = lmv_get_lustre_md,
- .free_lustre_md = lmv_free_lustre_md,
- .merge_attr = lmv_merge_attr,
- .set_open_replay_data = lmv_set_open_replay_data,
- .clear_open_replay_data = lmv_clear_open_replay_data,
- .intent_getattr_async = lmv_intent_getattr_async,
- .revalidate_lock = lmv_revalidate_lock,
- .get_fid_from_lsm = lmv_get_fid_from_lsm,
- .unpackmd = lmv_unpackmd,
-};
-
-static int __init lmv_init(void)
-{
- struct lprocfs_static_vars lvars;
-
- lprocfs_lmv_init_vars(&lvars);
-
- return class_register_type(&lmv_obd_ops, &lmv_md_ops,
- LUSTRE_LMV_NAME, NULL);
-}
-
-static void lmv_exit(void)
-{
- class_unregister_type(LUSTRE_LMV_NAME);
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Logical Metadata Volume");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(lmv_init);
-module_exit(lmv_exit);
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
deleted file mode 100644
index 30727b7acccc..000000000000
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/seq_file.h>
-#include <linux/statfs.h>
-#include <lprocfs_status.h>
-#include <obd_class.h>
-#include "lmv_internal.h"
-
-static ssize_t numobd_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct lmv_desc *desc;
-
- desc = &dev->u.lmv.desc;
- return sprintf(buf, "%u\n", desc->ld_tgt_count);
-}
-LUSTRE_RO_ATTR(numobd);
-
-static ssize_t activeobd_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct lmv_desc *desc;
-
- desc = &dev->u.lmv.desc;
- return sprintf(buf, "%u\n", desc->ld_active_tgt_count);
-}
-LUSTRE_RO_ATTR(activeobd);
-
-static int lmv_desc_uuid_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lmv_obd *lmv;
-
- LASSERT(dev);
- lmv = &dev->u.lmv;
- seq_printf(m, "%s\n", lmv->desc.ld_uuid.uuid);
- return 0;
-}
-
-LPROC_SEQ_FOPS_RO(lmv_desc_uuid);
-
-static void *lmv_tgt_seq_start(struct seq_file *p, loff_t *pos)
-{
- struct obd_device *dev = p->private;
- struct lmv_obd *lmv = &dev->u.lmv;
-
- while (*pos < lmv->tgts_size) {
- if (lmv->tgts[*pos])
- return lmv->tgts[*pos];
- ++*pos;
- }
-
- return NULL;
-}
-
-static void lmv_tgt_seq_stop(struct seq_file *p, void *v)
-{
-}
-
-static void *lmv_tgt_seq_next(struct seq_file *p, void *v, loff_t *pos)
-{
- struct obd_device *dev = p->private;
- struct lmv_obd *lmv = &dev->u.lmv;
-
- ++*pos;
- while (*pos < lmv->tgts_size) {
- if (lmv->tgts[*pos])
- return lmv->tgts[*pos];
- ++*pos;
- }
-
- return NULL;
-}
-
-static int lmv_tgt_seq_show(struct seq_file *p, void *v)
-{
- struct lmv_tgt_desc *tgt = v;
-
- if (!tgt)
- return 0;
- seq_printf(p, "%u: %s %sACTIVE\n",
- tgt->ltd_idx, tgt->ltd_uuid.uuid,
- tgt->ltd_active ? "" : "IN");
- return 0;
-}
-
-static const struct seq_operations lmv_tgt_sops = {
- .start = lmv_tgt_seq_start,
- .stop = lmv_tgt_seq_stop,
- .next = lmv_tgt_seq_next,
- .show = lmv_tgt_seq_show,
-};
-
-static int lmv_target_seq_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int rc;
-
- rc = seq_open(file, &lmv_tgt_sops);
- if (rc)
- return rc;
-
- seq = file->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-static struct lprocfs_vars lprocfs_lmv_obd_vars[] = {
- { "desc_uuid", &lmv_desc_uuid_fops, NULL, 0 },
- { NULL }
-};
-
-const struct file_operations lmv_proc_target_fops = {
- .owner = THIS_MODULE,
- .open = lmv_target_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-static struct attribute *lmv_attrs[] = {
- &lustre_attr_activeobd.attr,
- &lustre_attr_numobd.attr,
- NULL,
-};
-
-static const struct attribute_group lmv_attr_group = {
- .attrs = lmv_attrs,
-};
-
-void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->sysfs_vars = &lmv_attr_group;
- lvars->obd_vars = lprocfs_lmv_obd_vars;
-}
diff --git a/drivers/staging/lustre/lustre/lov/Makefile b/drivers/staging/lustre/lustre/lov/Makefile
deleted file mode 100644
index 1ebf0193f61a..000000000000
--- a/drivers/staging/lustre/lustre/lov/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LUSTRE_FS) += lov.o
-lov-y := lov_obd.o lov_pack.o lov_offset.o lov_merge.o \
- lov_request.o lov_ea.o lov_dev.o lov_object.o lov_page.o \
- lov_lock.o lov_io.o lovsub_dev.o lovsub_object.o lovsub_page.o \
- lovsub_lock.o lov_pool.o lproc_lov.o
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
deleted file mode 100644
index 1185eceaf497..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ /dev/null
@@ -1,641 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Internal interfaces of LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@intel.com>
- */
-
-#ifndef LOV_CL_INTERNAL_H
-#define LOV_CL_INTERNAL_H
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd.h>
-#include <cl_object.h>
-#include "lov_internal.h"
-
-/** \defgroup lov lov
- * Logical object volume layer. This layer implements data striping (raid0).
- *
- * At the lov layer top-entity (object, page, lock, io) is connected to one or
- * more sub-entities: top-object, representing a file is connected to a set of
- * sub-objects, each representing a stripe, file-level top-lock is connected
- * to a set of per-stripe sub-locks, top-page is connected to a (single)
- * sub-page, and a top-level IO is connected to a set of (potentially
- * concurrent) sub-IO's.
- *
- * Sub-object, sub-page, and sub-io have well-defined top-object and top-page
- * respectively, while a single sub-lock can be part of multiple top-locks.
- *
- * Reference counting models are different for different types of entities:
- *
- * - top-object keeps a reference to its sub-objects, and destroys them
- * when it is destroyed.
- *
- * - top-page keeps a reference to its sub-page, and destroys it when it
- * is destroyed.
- *
- * - IO's are not reference counted.
- *
- * To implement a connection between top and sub entities, lov layer is split
- * into two pieces: lov ("upper half"), and lovsub ("bottom half"), both
- * implementing full set of cl-interfaces. For example, top-object has vvp and
- * lov layers, and it's sub-object has lovsub and osc layers. lovsub layer is
- * used to track child-parent relationship.
- *
- * @{
- */
-
-struct lovsub_device;
-struct lovsub_object;
-struct lovsub_lock;
-
-enum lov_device_flags {
- LOV_DEV_INITIALIZED = 1 << 0
-};
-
-/*
- * Upper half.
- */
-
-struct lov_device {
- /*
- * XXX Locking of lov-private data is missing.
- */
- struct cl_device ld_cl;
- struct lov_obd *ld_lov;
- /** size of lov_device::ld_target[] array */
- __u32 ld_target_nr;
- struct lovsub_device **ld_target;
- __u32 ld_flags;
-};
-
-/**
- * Layout type.
- */
-enum lov_layout_type {
- LLT_EMPTY, /** empty file without body (mknod + truncate) */
- LLT_RAID0, /** striped file */
- LLT_RELEASED, /** file with no objects (data in HSM) */
- LLT_NR
-};
-
-static inline char *llt2str(enum lov_layout_type llt)
-{
- switch (llt) {
- case LLT_EMPTY:
- return "EMPTY";
- case LLT_RAID0:
- return "RAID0";
- case LLT_RELEASED:
- return "RELEASED";
- case LLT_NR:
- LBUG();
- }
- LBUG();
- return "";
-}
-
-/**
- * lov-specific file state.
- *
- * lov object has particular layout type, determining how top-object is built
- * on top of sub-objects. Layout type can change dynamically. When this
- * happens, lov_object::lo_type_guard semaphore is taken in exclusive mode,
- * all state pertaining to the old layout type is destroyed, and new state is
- * constructed. All object methods take said semaphore in the shared mode,
- * providing serialization against transition between layout types.
- *
- * To avoid multiple `if' or `switch' statements, selecting behavior for the
- * current layout type, object methods perform double-dispatch, invoking
- * function corresponding to the current layout type.
- */
-struct lov_object {
- struct cl_object lo_cl;
- /**
- * Serializes object operations with transitions between layout types.
- *
- * This semaphore is taken in shared mode by all object methods, and
- * is taken in exclusive mode when object type is changed.
- *
- * \see lov_object::lo_type
- */
- struct rw_semaphore lo_type_guard;
- /**
- * Type of an object. Protected by lov_object::lo_type_guard.
- */
- enum lov_layout_type lo_type;
- /**
- * True if layout is invalid. This bit is cleared when layout lock
- * is lost.
- */
- bool lo_layout_invalid;
- /**
- * How many IOs are on going on this object. Layout can be changed
- * only if there is no active IO.
- */
- atomic_t lo_active_ios;
- /**
- * Waitq - wait for no one else is using lo_lsm
- */
- wait_queue_head_t lo_waitq;
- /**
- * Layout metadata. NULL if empty layout.
- */
- struct lov_stripe_md *lo_lsm;
-
- union lov_layout_state {
- struct lov_layout_raid0 {
- unsigned int lo_nr;
- /**
- * When this is true, lov_object::lo_attr contains
- * valid up to date attributes for a top-level
- * object. This field is reset to 0 when attributes of
- * any sub-object change.
- */
- int lo_attr_valid;
- /**
- * Array of sub-objects. Allocated when top-object is
- * created (lov_init_raid0()).
- *
- * Top-object is a strict master of its sub-objects:
- * it is created before them, and outlives its
- * children (this later is necessary so that basic
- * functions like cl_object_top() always
- * work). Top-object keeps a reference on every
- * sub-object.
- *
- * When top-object is destroyed (lov_delete_raid0())
- * it releases its reference to a sub-object and waits
- * until the latter is finally destroyed.
- */
- struct lovsub_object **lo_sub;
- /**
- * protect lo_sub
- */
- spinlock_t lo_sub_lock;
- /**
- * Cached object attribute, built from sub-object
- * attributes.
- */
- struct cl_attr lo_attr;
- } raid0;
- struct lov_layout_state_empty {
- } empty;
- struct lov_layout_state_released {
- } released;
- } u;
- /**
- * Thread that acquired lov_object::lo_type_guard in an exclusive
- * mode.
- */
- struct task_struct *lo_owner;
-};
-
-/**
- * State lov_lock keeps for each sub-lock.
- */
-struct lov_lock_sub {
- /** sub-lock itself */
- struct cl_lock sub_lock;
- /** Set if the sublock has ever been enqueued, meaning it may
- * hold resources of underlying layers
- */
- unsigned int sub_is_enqueued:1,
- sub_initialized:1;
- int sub_stripe;
-};
-
-/**
- * lov-specific lock state.
- */
-struct lov_lock {
- struct cl_lock_slice lls_cl;
- /** Number of sub-locks in this lock */
- int lls_nr;
- /** sublock array */
- struct lov_lock_sub lls_sub[0];
-};
-
-struct lov_page {
- struct cl_page_slice lps_cl;
- unsigned int lps_stripe; /* stripe index */
-};
-
-/*
- * Bottom half.
- */
-
-struct lovsub_device {
- struct cl_device acid_cl;
- struct cl_device *acid_next;
-};
-
-struct lovsub_object {
- struct cl_object_header lso_header;
- struct cl_object lso_cl;
- struct lov_object *lso_super;
- int lso_index;
-};
-
-/**
- * Lock state at lovsub layer.
- */
-struct lovsub_lock {
- struct cl_lock_slice lss_cl;
-};
-
-/**
- * Describe the environment settings for sublocks.
- */
-struct lov_sublock_env {
- const struct lu_env *lse_env;
- struct cl_io *lse_io;
-};
-
-struct lovsub_page {
- struct cl_page_slice lsb_cl;
-};
-
-struct lov_thread_info {
- struct cl_object_conf lti_stripe_conf;
- struct lu_fid lti_fid;
- struct ost_lvb lti_lvb;
- struct cl_2queue lti_cl2q;
- struct cl_page_list lti_plist;
- wait_queue_entry_t lti_waiter;
-};
-
-/**
- * State that lov_io maintains for every sub-io.
- */
-struct lov_io_sub {
- u16 sub_stripe;
- /**
- * environment's refcheck.
- *
- * \see cl_env_get()
- */
- u16 sub_refcheck;
- /**
- * true, iff cl_io_init() was successfully executed against
- * lov_io_sub::sub_io.
- */
- u16 sub_io_initialized:1,
- /**
- * True, iff lov_io_sub::sub_io and lov_io_sub::sub_env weren't
- * allocated, but borrowed from a per-device emergency pool.
- */
- sub_borrowed:1;
- /**
- * Linkage into a list (hanging off lov_io::lis_active) of all
- * sub-io's active for the current IO iteration.
- */
- struct list_head sub_linkage;
- /**
- * sub-io for a stripe. Ideally sub-io's can be stopped and resumed
- * independently, with lov acting as a scheduler to maximize overall
- * throughput.
- */
- struct cl_io *sub_io;
- /**
- * environment, in which sub-io executes.
- */
- struct lu_env *sub_env;
-};
-
-/**
- * IO state private for LOV.
- */
-struct lov_io {
- /** super-class */
- struct cl_io_slice lis_cl;
- /**
- * Pointer to the object slice. This is a duplicate of
- * lov_io::lis_cl::cis_object.
- */
- struct lov_object *lis_object;
- /**
- * Original end-of-io position for this IO, set by the upper layer as
- * cl_io::u::ci_rw::pos + cl_io::u::ci_rw::count. lov remembers this,
- * changes pos and count to fit IO into a single stripe and uses saved
- * value to determine when IO iterations have to stop.
- *
- * This is used only for CIT_READ and CIT_WRITE io's.
- */
- loff_t lis_io_endpos;
-
- /**
- * starting position within a file, for the current io loop iteration
- * (stripe), used by ci_io_loop().
- */
- u64 lis_pos;
- /**
- * end position with in a file, for the current stripe io. This is
- * exclusive (i.e., next offset after last byte affected by io).
- */
- u64 lis_endpos;
-
- int lis_stripe_count;
- int lis_active_subios;
-
- /**
- * the index of ls_single_subio in ls_subios array
- */
- int lis_single_subio_index;
- struct cl_io lis_single_subio;
-
- /**
- * size of ls_subios array, actually the highest stripe #
- */
- int lis_nr_subios;
- struct lov_io_sub *lis_subs;
- /**
- * List of active sub-io's.
- */
- struct list_head lis_active;
-};
-
-struct lov_session {
- struct lov_io ls_io;
- struct lov_sublock_env ls_subenv;
-};
-
-extern struct lu_device_type lov_device_type;
-extern struct lu_device_type lovsub_device_type;
-
-extern struct lu_context_key lov_key;
-extern struct lu_context_key lov_session_key;
-
-extern struct kmem_cache *lov_lock_kmem;
-extern struct kmem_cache *lov_object_kmem;
-extern struct kmem_cache *lov_thread_kmem;
-extern struct kmem_cache *lov_session_kmem;
-
-extern struct kmem_cache *lovsub_lock_kmem;
-extern struct kmem_cache *lovsub_object_kmem;
-
-int lov_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int lov_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-
-int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-
-struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio,
- int stripe);
-
-int lov_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, pgoff_t index);
-int lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, pgoff_t index);
-int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index);
-int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index);
-struct lu_object *lov_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev);
-struct lu_object *lovsub_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev);
-
-struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
-int lov_page_stripe(const struct cl_page *page);
-
-#define lov_foreach_target(lov, var) \
- for (var = 0; var < lov_targets_nr(lov); ++var)
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- * Accessors.
- *
- */
-
-static inline struct lov_session *lov_env_session(const struct lu_env *env)
-{
- struct lov_session *ses;
-
- ses = lu_context_key_get(env->le_ses, &lov_session_key);
- LASSERT(ses);
- return ses;
-}
-
-static inline struct lov_io *lov_env_io(const struct lu_env *env)
-{
- return &lov_env_session(env)->ls_io;
-}
-
-static inline int lov_is_object(const struct lu_object *obj)
-{
- return obj->lo_dev->ld_type == &lov_device_type;
-}
-
-static inline int lovsub_is_object(const struct lu_object *obj)
-{
- return obj->lo_dev->ld_type == &lovsub_device_type;
-}
-
-static inline struct lu_device *lov2lu_dev(struct lov_device *lov)
-{
- return &lov->ld_cl.cd_lu_dev;
-}
-
-static inline struct lov_device *lu2lov_dev(const struct lu_device *d)
-{
- LINVRNT(d->ld_type == &lov_device_type);
- return container_of0(d, struct lov_device, ld_cl.cd_lu_dev);
-}
-
-static inline struct cl_device *lovsub2cl_dev(struct lovsub_device *lovsub)
-{
- return &lovsub->acid_cl;
-}
-
-static inline struct lu_device *lovsub2lu_dev(struct lovsub_device *lovsub)
-{
- return &lovsub2cl_dev(lovsub)->cd_lu_dev;
-}
-
-static inline struct lovsub_device *lu2lovsub_dev(const struct lu_device *d)
-{
- LINVRNT(d->ld_type == &lovsub_device_type);
- return container_of0(d, struct lovsub_device, acid_cl.cd_lu_dev);
-}
-
-static inline struct lovsub_device *cl2lovsub_dev(const struct cl_device *d)
-{
- LINVRNT(d->cd_lu_dev.ld_type == &lovsub_device_type);
- return container_of0(d, struct lovsub_device, acid_cl);
-}
-
-static inline struct lu_object *lov2lu(struct lov_object *lov)
-{
- return &lov->lo_cl.co_lu;
-}
-
-static inline struct cl_object *lov2cl(struct lov_object *lov)
-{
- return &lov->lo_cl;
-}
-
-static inline struct lov_object *lu2lov(const struct lu_object *obj)
-{
- LINVRNT(lov_is_object(obj));
- return container_of0(obj, struct lov_object, lo_cl.co_lu);
-}
-
-static inline struct lov_object *cl2lov(const struct cl_object *obj)
-{
- LINVRNT(lov_is_object(&obj->co_lu));
- return container_of0(obj, struct lov_object, lo_cl);
-}
-
-static inline struct lu_object *lovsub2lu(struct lovsub_object *los)
-{
- return &los->lso_cl.co_lu;
-}
-
-static inline struct cl_object *lovsub2cl(struct lovsub_object *los)
-{
- return &los->lso_cl;
-}
-
-static inline struct lovsub_object *cl2lovsub(const struct cl_object *obj)
-{
- LINVRNT(lovsub_is_object(&obj->co_lu));
- return container_of0(obj, struct lovsub_object, lso_cl);
-}
-
-static inline struct lovsub_object *lu2lovsub(const struct lu_object *obj)
-{
- LINVRNT(lovsub_is_object(obj));
- return container_of0(obj, struct lovsub_object, lso_cl.co_lu);
-}
-
-static inline struct lovsub_lock *
-cl2lovsub_lock(const struct cl_lock_slice *slice)
-{
- LINVRNT(lovsub_is_object(&slice->cls_obj->co_lu));
- return container_of(slice, struct lovsub_lock, lss_cl);
-}
-
-static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
-
- slice = cl_lock_at(lock, &lovsub_device_type);
- LASSERT(slice);
- return cl2lovsub_lock(slice);
-}
-
-static inline struct lov_lock *cl2lov_lock(const struct cl_lock_slice *slice)
-{
- LINVRNT(lov_is_object(&slice->cls_obj->co_lu));
- return container_of(slice, struct lov_lock, lls_cl);
-}
-
-static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
-{
- LINVRNT(lov_is_object(&slice->cpl_obj->co_lu));
- return container_of0(slice, struct lov_page, lps_cl);
-}
-
-static inline struct lovsub_page *
-cl2lovsub_page(const struct cl_page_slice *slice)
-{
- LINVRNT(lovsub_is_object(&slice->cpl_obj->co_lu));
- return container_of0(slice, struct lovsub_page, lsb_cl);
-}
-
-static inline struct lov_io *cl2lov_io(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_io *lio;
-
- lio = container_of(ios, struct lov_io, lis_cl);
- LASSERT(lio == lov_env_io(env));
- return lio;
-}
-
-static inline int lov_targets_nr(const struct lov_device *lov)
-{
- return lov->ld_lov->desc.ld_tgt_count;
-}
-
-static inline struct lov_thread_info *lov_env_info(const struct lu_env *env)
-{
- struct lov_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &lov_key);
- LASSERT(info);
- return info;
-}
-
-static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov)
-{
- LASSERT(lov->lo_type == LLT_RAID0);
- LASSERT(lov->lo_lsm->lsm_magic == LOV_MAGIC ||
- lov->lo_lsm->lsm_magic == LOV_MAGIC_V3);
- return &lov->u.raid0;
-}
-
-/* lov_pack.c */
-int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
- struct lov_user_md __user *lump);
-
-/** @} lov */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
deleted file mode 100644
index c7db23472346..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ /dev/null
@@ -1,384 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_device and cl_device_type for LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-/* class_name2obd() */
-#include <obd_class.h>
-
-#include "lov_cl_internal.h"
-#include "lov_internal.h"
-
-struct kmem_cache *lov_lock_kmem;
-struct kmem_cache *lov_object_kmem;
-struct kmem_cache *lov_thread_kmem;
-struct kmem_cache *lov_session_kmem;
-
-struct kmem_cache *lovsub_lock_kmem;
-struct kmem_cache *lovsub_object_kmem;
-
-struct lu_kmem_descr lov_caches[] = {
- {
- .ckd_cache = &lov_lock_kmem,
- .ckd_name = "lov_lock_kmem",
- .ckd_size = sizeof(struct lov_lock)
- },
- {
- .ckd_cache = &lov_object_kmem,
- .ckd_name = "lov_object_kmem",
- .ckd_size = sizeof(struct lov_object)
- },
- {
- .ckd_cache = &lov_thread_kmem,
- .ckd_name = "lov_thread_kmem",
- .ckd_size = sizeof(struct lov_thread_info)
- },
- {
- .ckd_cache = &lov_session_kmem,
- .ckd_name = "lov_session_kmem",
- .ckd_size = sizeof(struct lov_session)
- },
- {
- .ckd_cache = &lovsub_lock_kmem,
- .ckd_name = "lovsub_lock_kmem",
- .ckd_size = sizeof(struct lovsub_lock)
- },
- {
- .ckd_cache = &lovsub_object_kmem,
- .ckd_name = "lovsub_object_kmem",
- .ckd_size = sizeof(struct lovsub_object)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/*****************************************************************************
- *
- * Lov device and device type functions.
- *
- */
-
-static void *lov_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct lov_thread_info *info;
-
- info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS);
- if (!info)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void lov_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct lov_thread_info *info = data;
-
- kmem_cache_free(lov_thread_kmem, info);
-}
-
-struct lu_context_key lov_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = lov_key_init,
- .lct_fini = lov_key_fini
-};
-
-static void *lov_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct lov_session *info;
-
- info = kmem_cache_zalloc(lov_session_kmem, GFP_NOFS);
- if (!info)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void lov_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct lov_session *info = data;
-
- kmem_cache_free(lov_session_kmem, info);
-}
-
-struct lu_context_key lov_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = lov_session_key_init,
- .lct_fini = lov_session_key_fini
-};
-
-/* type constructor/destructor: lov_type_{init,fini,start,stop}() */
-LU_TYPE_INIT_FINI(lov, &lov_key, &lov_session_key);
-
-static struct lu_device *lov_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- int i;
- struct lov_device *ld = lu2lov_dev(d);
-
- LASSERT(ld->ld_lov);
- if (!ld->ld_target)
- return NULL;
-
- lov_foreach_target(ld, i) {
- struct lovsub_device *lsd;
-
- lsd = ld->ld_target[i];
- if (lsd) {
- cl_stack_fini(env, lovsub2cl_dev(lsd));
- ld->ld_target[i] = NULL;
- }
- }
- return NULL;
-}
-
-static int lov_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- struct lov_device *ld = lu2lov_dev(d);
- int i;
- int rc = 0;
-
- LASSERT(d->ld_site);
- if (!ld->ld_target)
- return rc;
-
- lov_foreach_target(ld, i) {
- struct lovsub_device *lsd;
- struct cl_device *cl;
- struct lov_tgt_desc *desc;
-
- desc = ld->ld_lov->lov_tgts[i];
- if (!desc)
- continue;
-
- cl = cl_type_setup(env, d->ld_site, &lovsub_device_type,
- desc->ltd_obd->obd_lu_dev);
- if (IS_ERR(cl)) {
- rc = PTR_ERR(cl);
- break;
- }
- lsd = cl2lovsub_dev(cl);
- ld->ld_target[i] = lsd;
- }
-
- if (rc)
- lov_device_fini(env, d);
- else
- ld->ld_flags |= LOV_DEV_INITIALIZED;
-
- return rc;
-}
-
-static struct lu_device *lov_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct lov_device *ld = lu2lov_dev(d);
-
- cl_device_fini(lu2cl_dev(d));
- kfree(ld->ld_target);
- kfree(ld);
- return NULL;
-}
-
-static void lov_cl_del_target(const struct lu_env *env, struct lu_device *dev,
- __u32 index)
-{
- struct lov_device *ld = lu2lov_dev(dev);
-
- if (ld->ld_target[index]) {
- cl_stack_fini(env, lovsub2cl_dev(ld->ld_target[index]));
- ld->ld_target[index] = NULL;
- }
-}
-
-static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
-{
- int result;
- __u32 tgt_size;
- __u32 sub_size;
-
- result = 0;
- tgt_size = dev->ld_lov->lov_tgt_size;
- sub_size = dev->ld_target_nr;
- if (sub_size < tgt_size) {
- struct lovsub_device **newd;
- const size_t sz = sizeof(newd[0]);
-
- newd = kcalloc(tgt_size, sz, GFP_NOFS);
- if (newd) {
- if (sub_size > 0) {
- memcpy(newd, dev->ld_target, sub_size * sz);
- kfree(dev->ld_target);
- }
- dev->ld_target = newd;
- dev->ld_target_nr = tgt_size;
- } else {
- result = -ENOMEM;
- }
- }
- return result;
-}
-
-static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev,
- __u32 index)
-{
- struct obd_device *obd = dev->ld_obd;
- struct lov_device *ld = lu2lov_dev(dev);
- struct lov_tgt_desc *tgt;
- struct lovsub_device *lsd;
- struct cl_device *cl;
- int rc;
-
- obd_getref(obd);
-
- tgt = obd->u.lov.lov_tgts[index];
-
- if (!tgt->ltd_obd->obd_set_up) {
- CERROR("Target %s not set up\n", obd_uuid2str(&tgt->ltd_uuid));
- return -EINVAL;
- }
-
- rc = lov_expand_targets(env, ld);
- if (rc == 0 && ld->ld_flags & LOV_DEV_INITIALIZED) {
- LASSERT(dev->ld_site);
-
- cl = cl_type_setup(env, dev->ld_site, &lovsub_device_type,
- tgt->ltd_obd->obd_lu_dev);
- if (!IS_ERR(cl)) {
- lsd = cl2lovsub_dev(cl);
- ld->ld_target[index] = lsd;
- } else {
- CERROR("add failed (%d), deleting %s\n", rc,
- obd_uuid2str(&tgt->ltd_uuid));
- lov_cl_del_target(env, dev, index);
- rc = PTR_ERR(cl);
- }
- }
- obd_putref(obd);
- return rc;
-}
-
-static int lov_process_config(const struct lu_env *env,
- struct lu_device *d, struct lustre_cfg *cfg)
-{
- struct obd_device *obd = d->ld_obd;
- int cmd;
- int rc;
- int gen;
- __u32 index;
-
- obd_getref(obd);
-
- cmd = cfg->lcfg_command;
- rc = lov_process_config_base(d->ld_obd, cfg, &index, &gen);
- if (rc == 0) {
- switch (cmd) {
- case LCFG_LOV_ADD_OBD:
- case LCFG_LOV_ADD_INA:
- rc = lov_cl_add_target(env, d, index);
- if (rc != 0)
- lov_del_target(d->ld_obd, index, NULL, 0);
- break;
- case LCFG_LOV_DEL_OBD:
- lov_cl_del_target(env, d, index);
- break;
- }
- }
- obd_putref(obd);
- return rc;
-}
-
-static const struct lu_device_operations lov_lu_ops = {
- .ldo_object_alloc = lov_object_alloc,
- .ldo_process_config = lov_process_config,
-};
-
-static struct lu_device *lov_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg)
-{
- struct lu_device *d;
- struct lov_device *ld;
- struct obd_device *obd;
- int rc;
-
- ld = kzalloc(sizeof(*ld), GFP_NOFS);
- if (!ld)
- return ERR_PTR(-ENOMEM);
-
- cl_device_init(&ld->ld_cl, t);
- d = lov2lu_dev(ld);
- d->ld_ops = &lov_lu_ops;
-
- /* setup the LOV OBD */
- obd = class_name2obd(lustre_cfg_string(cfg, 0));
- LASSERT(obd);
- rc = lov_setup(obd, cfg);
- if (rc) {
- lov_device_free(env, d);
- return ERR_PTR(rc);
- }
-
- ld->ld_lov = &obd->u.lov;
- return d;
-}
-
-static const struct lu_device_type_operations lov_device_type_ops = {
- .ldto_init = lov_type_init,
- .ldto_fini = lov_type_fini,
-
- .ldto_start = lov_type_start,
- .ldto_stop = lov_type_stop,
-
- .ldto_device_alloc = lov_device_alloc,
- .ldto_device_free = lov_device_free,
-
- .ldto_device_init = lov_device_init,
- .ldto_device_fini = lov_device_fini
-};
-
-struct lu_device_type lov_device_type = {
- .ldt_tags = LU_DEVICE_CL,
- .ldt_name = LUSTRE_LOV_NAME,
- .ldt_ops = &lov_device_type_ops,
- .ldt_ctx_tags = LCT_CL_THREAD
-};
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
deleted file mode 100644
index c56a971745e8..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ /dev/null
@@ -1,332 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lov/lov_ea.c
- *
- * Author: Wang Di <wangdi@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include <asm/div64.h>
-#include <linux/libcfs/libcfs.h>
-
-#include <obd_class.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-
-#include "lov_internal.h"
-
-static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
- __u16 stripe_count)
-{
- if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
- CERROR("bad stripe count %d\n", stripe_count);
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
-
- if (lmm_oi_id(&lmm->lmm_oi) == 0) {
- CERROR("zero object id\n");
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
-
- if (lov_pattern(le32_to_cpu(lmm->lmm_pattern)) != LOV_PATTERN_RAID0) {
- CERROR("bad striping pattern\n");
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
-
- if (lmm->lmm_stripe_size == 0 ||
- (le32_to_cpu(lmm->lmm_stripe_size) &
- (LOV_MIN_STRIPE_SIZE - 1)) != 0) {
- CERROR("bad stripe size %u\n",
- le32_to_cpu(lmm->lmm_stripe_size));
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
- return 0;
-}
-
-struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count)
-{
- size_t oinfo_ptrs_size, lsm_size;
- struct lov_stripe_md *lsm;
- struct lov_oinfo *loi;
- int i;
-
- LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT);
-
- oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count;
- lsm_size = sizeof(*lsm) + oinfo_ptrs_size;
-
- lsm = kvzalloc(lsm_size, GFP_NOFS);
- if (!lsm)
- return NULL;
-
- for (i = 0; i < stripe_count; i++) {
- loi = kmem_cache_zalloc(lov_oinfo_slab, GFP_NOFS);
- if (!loi)
- goto err;
- lsm->lsm_oinfo[i] = loi;
- }
- lsm->lsm_stripe_count = stripe_count;
- return lsm;
-
-err:
- while (--i >= 0)
- kmem_cache_free(lov_oinfo_slab, lsm->lsm_oinfo[i]);
- kvfree(lsm);
- return NULL;
-}
-
-void lsm_free_plain(struct lov_stripe_md *lsm)
-{
- __u16 stripe_count = lsm->lsm_stripe_count;
- int i;
-
- for (i = 0; i < stripe_count; i++)
- kmem_cache_free(lov_oinfo_slab, lsm->lsm_oinfo[i]);
- kvfree(lsm);
-}
-
-/*
- * Find minimum stripe maxbytes value. For inactive or
- * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
- */
-static loff_t lov_tgt_maxbytes(struct lov_tgt_desc *tgt)
-{
- loff_t maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
- struct obd_import *imp;
-
- if (!tgt->ltd_active)
- return maxbytes;
-
- imp = tgt->ltd_obd->u.cli.cl_import;
- if (!imp)
- return maxbytes;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_FULL &&
- (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
- imp->imp_connect_data.ocd_maxbytes > 0)
- maxbytes = imp->imp_connect_data.ocd_maxbytes;
-
- spin_unlock(&imp->imp_lock);
-
- return maxbytes;
-}
-
-static int lsm_unpackmd_common(struct lov_obd *lov,
- struct lov_stripe_md *lsm,
- struct lov_mds_md *lmm,
- struct lov_ost_data_v1 *objects)
-{
- loff_t min_stripe_maxbytes = 0;
- unsigned int stripe_count;
- struct lov_oinfo *loi;
- loff_t lov_bytes;
- unsigned int i;
-
- /*
- * This supposes lov_mds_md_v1/v3 first fields are
- * are the same
- */
- lmm_oi_le_to_cpu(&lsm->lsm_oi, &lmm->lmm_oi);
- lsm->lsm_stripe_size = le32_to_cpu(lmm->lmm_stripe_size);
- lsm->lsm_pattern = le32_to_cpu(lmm->lmm_pattern);
- lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
- lsm->lsm_pool_name[0] = '\0';
-
- stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
-
- for (i = 0; i < stripe_count; i++) {
- loi = lsm->lsm_oinfo[i];
- ostid_le_to_cpu(&objects[i].l_ost_oi, &loi->loi_oi);
- loi->loi_ost_idx = le32_to_cpu(objects[i].l_ost_idx);
- loi->loi_ost_gen = le32_to_cpu(objects[i].l_ost_gen);
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (loi->loi_ost_idx >= lov->desc.ld_tgt_count &&
- !lov2obd(lov)->obd_process_conf) {
- CERROR("%s: OST index %d more than OST count %d\n",
- (char *)lov->desc.ld_uuid.uuid,
- loi->loi_ost_idx, lov->desc.ld_tgt_count);
- lov_dump_lmm_v1(D_WARNING, lmm);
- return -EINVAL;
- }
-
- if (!lov->lov_tgts[loi->loi_ost_idx]) {
- CERROR("%s: OST index %d missing\n",
- (char *)lov->desc.ld_uuid.uuid,
- loi->loi_ost_idx);
- lov_dump_lmm_v1(D_WARNING, lmm);
- continue;
- }
-
- lov_bytes = lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx]);
- if (min_stripe_maxbytes == 0 || lov_bytes < min_stripe_maxbytes)
- min_stripe_maxbytes = lov_bytes;
- }
-
- if (min_stripe_maxbytes == 0)
- min_stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
-
- stripe_count = lsm->lsm_stripe_count ?: lov->desc.ld_tgt_count;
- lov_bytes = min_stripe_maxbytes * stripe_count;
-
- if (lov_bytes < min_stripe_maxbytes) /* handle overflow */
- lsm->lsm_maxbytes = MAX_LFS_FILESIZE;
- else
- lsm->lsm_maxbytes = lov_bytes;
-
- return 0;
-}
-
-static void
-lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
- loff_t *lov_off, loff_t *swidth)
-{
- if (swidth)
- *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
-}
-
-static void
-lsm_stripe_by_offset_plain(struct lov_stripe_md *lsm, int *stripeno,
- loff_t *lov_off, loff_t *swidth)
-{
- if (swidth)
- *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
-}
-
-static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
- __u16 *stripe_count)
-{
- if (lmm_bytes < sizeof(*lmm)) {
- CERROR("lov_mds_md_v1 too small: %d, need at least %d\n",
- lmm_bytes, (int)sizeof(*lmm));
- return -EINVAL;
- }
-
- *stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
- if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
- *stripe_count = 0;
-
- if (lmm_bytes < lov_mds_md_size(*stripe_count, LOV_MAGIC_V1)) {
- CERROR("LOV EA V1 too small: %d, need %d\n",
- lmm_bytes, lov_mds_md_size(*stripe_count, LOV_MAGIC_V1));
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
-
- return lsm_lmm_verify_common(lmm, lmm_bytes, *stripe_count);
-}
-
-static int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm,
- struct lov_mds_md_v1 *lmm)
-{
- return lsm_unpackmd_common(lov, lsm, lmm, lmm->lmm_objects);
-}
-
-const struct lsm_operations lsm_v1_ops = {
- .lsm_free = lsm_free_plain,
- .lsm_stripe_by_index = lsm_stripe_by_index_plain,
- .lsm_stripe_by_offset = lsm_stripe_by_offset_plain,
- .lsm_lmm_verify = lsm_lmm_verify_v1,
- .lsm_unpackmd = lsm_unpackmd_v1,
-};
-
-static int lsm_lmm_verify_v3(struct lov_mds_md *lmmv1, int lmm_bytes,
- __u16 *stripe_count)
-{
- struct lov_mds_md_v3 *lmm;
-
- lmm = (struct lov_mds_md_v3 *)lmmv1;
-
- if (lmm_bytes < sizeof(*lmm)) {
- CERROR("lov_mds_md_v3 too small: %d, need at least %d\n",
- lmm_bytes, (int)sizeof(*lmm));
- return -EINVAL;
- }
-
- *stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
- if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
- *stripe_count = 0;
-
- if (lmm_bytes < lov_mds_md_size(*stripe_count, LOV_MAGIC_V3)) {
- CERROR("LOV EA V3 too small: %d, need %d\n",
- lmm_bytes, lov_mds_md_size(*stripe_count, LOV_MAGIC_V3));
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
-
- return lsm_lmm_verify_common((struct lov_mds_md_v1 *)lmm, lmm_bytes,
- *stripe_count);
-}
-
-static int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm,
- struct lov_mds_md *lmm)
-{
- struct lov_mds_md_v3 *lmm_v3 = (struct lov_mds_md_v3 *)lmm;
- size_t cplen = 0;
- int rc;
-
- rc = lsm_unpackmd_common(lov, lsm, lmm, lmm_v3->lmm_objects);
- if (rc)
- return rc;
-
- cplen = strlcpy(lsm->lsm_pool_name, lmm_v3->lmm_pool_name,
- sizeof(lsm->lsm_pool_name));
- if (cplen >= sizeof(lsm->lsm_pool_name))
- return -E2BIG;
-
- return 0;
-}
-
-const struct lsm_operations lsm_v3_ops = {
- .lsm_free = lsm_free_plain,
- .lsm_stripe_by_index = lsm_stripe_by_index_plain,
- .lsm_stripe_by_offset = lsm_stripe_by_offset_plain,
- .lsm_lmm_verify = lsm_lmm_verify_v3,
- .lsm_unpackmd = lsm_unpackmd_v3,
-};
-
-void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm)
-{
- CDEBUG(level, "lsm %p, objid " DOSTID ", maxbytes %#llx, magic 0x%08X, stripe_size %u, stripe_count %u, refc: %d, layout_gen %u, pool [" LOV_POOLNAMEF "]\n",
- lsm,
- POSTID(&lsm->lsm_oi), lsm->lsm_maxbytes, lsm->lsm_magic,
- lsm->lsm_stripe_size, lsm->lsm_stripe_count,
- atomic_read(&lsm->lsm_refc), lsm->lsm_layout_gen,
- lsm->lsm_pool_name);
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
deleted file mode 100644
index a56d71c2dda2..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ /dev/null
@@ -1,283 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef LOV_INTERNAL_H
-#define LOV_INTERNAL_H
-
-#include <obd_class.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-
-/*
- * If we are unable to get the maximum object size from the OST in
- * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using
- * the old maximum object size from ext3.
- */
-#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL
-
-struct lov_stripe_md {
- atomic_t lsm_refc;
- spinlock_t lsm_lock;
- pid_t lsm_lock_owner; /* debugging */
-
- /*
- * maximum possible file size, might change as OSTs status changes,
- * e.g. disconnected, deactivated
- */
- loff_t lsm_maxbytes;
- struct ost_id lsm_oi;
- u32 lsm_magic;
- u32 lsm_stripe_size;
- u32 lsm_pattern; /* RAID0, RAID1, released, ... */
- u16 lsm_stripe_count;
- u16 lsm_layout_gen;
- char lsm_pool_name[LOV_MAXPOOLNAME + 1];
- struct lov_oinfo *lsm_oinfo[0];
-};
-
-static inline bool lsm_is_released(struct lov_stripe_md *lsm)
-{
- return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
-}
-
-static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
-{
- if (!lsm)
- return false;
-
- if (lsm_is_released(lsm))
- return false;
-
- return true;
-}
-
-struct lsm_operations {
- void (*lsm_free)(struct lov_stripe_md *);
- void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, loff_t *,
- loff_t *);
- void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, loff_t *,
- loff_t *);
- int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes,
- u16 *stripe_count);
- int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm,
- struct lov_mds_md *lmm);
-};
-
-extern const struct lsm_operations lsm_v1_ops;
-extern const struct lsm_operations lsm_v3_ops;
-
-static inline const struct lsm_operations *lsm_op_find(int magic)
-{
- switch (magic) {
- case LOV_MAGIC_V1:
- return &lsm_v1_ops;
- case LOV_MAGIC_V3:
- return &lsm_v3_ops;
- default:
- CERROR("unrecognized lsm_magic %08x\n", magic);
- return NULL;
- }
-}
-
-/* lov_do_div64(a, b) returns a % b, and a = a / b.
- * The 32-bit code is LOV-specific due to knowing about stripe limits in
- * order to reduce the divisor to a 32-bit number. If the divisor is
- * already a 32-bit value the compiler handles this directly.
- */
-#if BITS_PER_LONG == 64
-# define lov_do_div64(n, base) ({ \
- u64 __base = (base); \
- u64 __rem; \
- __rem = ((u64)(n)) % __base; \
- (n) = ((u64)(n)) / __base; \
- __rem; \
-})
-#elif BITS_PER_LONG == 32
-# define lov_do_div64(n, base) ({ \
- u64 __rem; \
- if ((sizeof(base) > 4) && (((base) & 0xffffffff00000000ULL) != 0)) { \
- int __remainder; \
- LASSERTF(!((base) & (LOV_MIN_STRIPE_SIZE - 1)), "64 bit lov " \
- "division %llu / %llu\n", (n), (u64)(base)); \
- __remainder = (n) & (LOV_MIN_STRIPE_SIZE - 1); \
- (n) >>= LOV_MIN_STRIPE_BITS; \
- __rem = do_div(n, (base) >> LOV_MIN_STRIPE_BITS); \
- __rem <<= LOV_MIN_STRIPE_BITS; \
- __rem += __remainder; \
- } else { \
- __rem = do_div(n, base); \
- } \
- __rem; \
-})
-#endif
-
-#define pool_tgt_size(p) ((p)->pool_obds.op_size)
-#define pool_tgt_count(p) ((p)->pool_obds.op_count)
-#define pool_tgt_array(p) ((p)->pool_obds.op_array)
-#define pool_tgt_rw_sem(p) ((p)->pool_obds.op_rw_sem)
-
-struct pool_desc {
- char pool_name[LOV_MAXPOOLNAME + 1];
- struct ost_pool pool_obds;
- atomic_t pool_refcount;
- struct hlist_node pool_hash; /* access by poolname */
- struct list_head pool_list; /* serial access */
- struct dentry *pool_debugfs_entry; /* file in debugfs */
- struct obd_device *pool_lobd; /* owner */
-};
-
-struct lov_request {
- struct obd_info rq_oi;
- struct lov_request_set *rq_rqset;
-
- struct list_head rq_link;
-
- int rq_idx; /* index in lov->tgts array */
-};
-
-struct lov_request_set {
- struct obd_info *set_oi;
- struct obd_device *set_obd;
- int set_count;
- atomic_t set_completes;
- atomic_t set_success;
- struct list_head set_list;
-};
-
-extern struct kmem_cache *lov_oinfo_slab;
-
-extern struct lu_kmem_descr lov_caches[];
-
-#define lov_uuid2str(lv, index) \
- (char *)((lv)->lov_tgts[index]->ltd_uuid.uuid)
-
-/* lov_merge.c */
-int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
- struct ost_lvb *lvb, __u64 *kms_place);
-
-/* lov_offset.c */
-u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno);
-int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off,
- int stripeno, u64 *u64);
-u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, int stripeno);
-int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
- u64 start, u64 end,
- u64 *obd_start, u64 *obd_end);
-int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off);
-pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
- int stripe);
-
-/* lov_request.c */
-int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
- struct lov_request_set **reqset);
-int lov_fini_statfs_set(struct lov_request_set *set);
-
-/* lov_obd.c */
-void lov_stripe_lock(struct lov_stripe_md *md);
-void lov_stripe_unlock(struct lov_stripe_md *md);
-void lov_fix_desc(struct lov_desc *desc);
-void lov_fix_desc_stripe_size(__u64 *val);
-void lov_fix_desc_stripe_count(__u32 *val);
-void lov_fix_desc_pattern(__u32 *val);
-void lov_fix_desc_qos_maxage(__u32 *val);
-__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count);
-int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
- struct obd_connect_data *data);
-int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
-int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
- __u32 *indexp, int *genp);
-int lov_del_target(struct obd_device *obd, __u32 index,
- struct obd_uuid *uuidp, int gen);
-
-/* lov_pack.c */
-ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
- size_t buf_size);
-struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm,
- size_t lmm_size);
-int lov_free_memmd(struct lov_stripe_md **lsmp);
-
-void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm);
-void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm);
-void lov_dump_lmm_common(int level, void *lmmp);
-
-/* lov_ea.c */
-struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count);
-void lsm_free_plain(struct lov_stripe_md *lsm);
-void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm);
-
-/* lproc_lov.c */
-extern const struct file_operations lov_proc_target_fops;
-void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars);
-
-/* lov_cl.c */
-extern struct lu_device_type lov_device_type;
-
-/* pools */
-extern struct cfs_hash_ops pool_hash_operations;
-/* ost_pool methods */
-int lov_ost_pool_init(struct ost_pool *op, unsigned int count);
-int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count);
-int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count);
-int lov_ost_pool_remove(struct ost_pool *op, __u32 idx);
-int lov_ost_pool_free(struct ost_pool *op);
-
-/* high level pool methods */
-int lov_pool_new(struct obd_device *obd, char *poolname);
-int lov_pool_del(struct obd_device *obd, char *poolname);
-int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname);
-int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname);
-void lov_pool_putref(struct pool_desc *pool);
-
-static inline struct lov_stripe_md *lsm_addref(struct lov_stripe_md *lsm)
-{
- LASSERT(atomic_read(&lsm->lsm_refc) > 0);
- atomic_inc(&lsm->lsm_refc);
- return lsm;
-}
-
-static inline bool lov_oinfo_is_dummy(const struct lov_oinfo *loi)
-{
- if (unlikely(loi->loi_oi.oi.oi_id == 0 &&
- loi->loi_oi.oi.oi_seq == 0 &&
- loi->loi_ost_idx == 0 &&
- loi->loi_ost_gen == 0))
- return true;
-
- return false;
-}
-
-static inline struct obd_device *lov2obd(const struct lov_obd *lov)
-{
- return container_of0(lov, struct obd_device, u.lov);
-}
-
-#endif
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
deleted file mode 100644
index b823f8a21856..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ /dev/null
@@ -1,1023 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_io for LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
- struct lov_io_sub *sub)
-{
- if (sub->sub_io) {
- if (sub->sub_io_initialized) {
- cl_io_fini(sub->sub_env, sub->sub_io);
- sub->sub_io_initialized = 0;
- lio->lis_active_subios--;
- }
- if (sub->sub_stripe == lio->lis_single_subio_index)
- lio->lis_single_subio_index = -1;
- else if (!sub->sub_borrowed)
- kfree(sub->sub_io);
- sub->sub_io = NULL;
- }
- if (!IS_ERR_OR_NULL(sub->sub_env)) {
- if (!sub->sub_borrowed)
- cl_env_put(sub->sub_env, &sub->sub_refcheck);
- sub->sub_env = NULL;
- }
-}
-
-static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
- int stripe, loff_t start, loff_t end)
-{
- struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
- struct cl_io *parent = lio->lis_cl.cis_io;
-
- switch (io->ci_type) {
- case CIT_SETATTR: {
- io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
- io->u.ci_setattr.sa_attr_flags =
- parent->u.ci_setattr.sa_attr_flags;
- io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
- io->u.ci_setattr.sa_stripe_index = stripe;
- io->u.ci_setattr.sa_parent_fid =
- parent->u.ci_setattr.sa_parent_fid;
- if (cl_io_is_trunc(io)) {
- loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size;
-
- new_size = lov_size_to_stripe(lsm, new_size, stripe);
- io->u.ci_setattr.sa_attr.lvb_size = new_size;
- }
- break;
- }
- case CIT_DATA_VERSION: {
- io->u.ci_data_version.dv_data_version = 0;
- io->u.ci_data_version.dv_flags =
- parent->u.ci_data_version.dv_flags;
- break;
- }
- case CIT_FAULT: {
- struct cl_object *obj = parent->ci_obj;
- loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
-
- io->u.ci_fault = parent->u.ci_fault;
- off = lov_size_to_stripe(lsm, off, stripe);
- io->u.ci_fault.ft_index = cl_index(obj, off);
- break;
- }
- case CIT_FSYNC: {
- io->u.ci_fsync.fi_start = start;
- io->u.ci_fsync.fi_end = end;
- io->u.ci_fsync.fi_fid = parent->u.ci_fsync.fi_fid;
- io->u.ci_fsync.fi_mode = parent->u.ci_fsync.fi_mode;
- break;
- }
- case CIT_READ:
- case CIT_WRITE: {
- io->u.ci_wr.wr_sync = cl_io_is_sync_write(parent);
- if (cl_io_is_append(parent)) {
- io->u.ci_wr.wr_append = 1;
- } else {
- io->u.ci_rw.crw_pos = start;
- io->u.ci_rw.crw_count = end - start;
- }
- break;
- }
- default:
- break;
- }
-}
-
-static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
- struct lov_io_sub *sub)
-{
- struct lov_object *lov = lio->lis_object;
- struct cl_io *sub_io;
- struct cl_object *sub_obj;
- struct cl_io *io = lio->lis_cl.cis_io;
- int stripe = sub->sub_stripe;
- int rc;
-
- LASSERT(!sub->sub_io);
- LASSERT(!sub->sub_env);
- LASSERT(sub->sub_stripe < lio->lis_stripe_count);
-
- if (unlikely(!lov_r0(lov)->lo_sub[stripe]))
- return -EIO;
-
- sub->sub_io_initialized = 0;
- sub->sub_borrowed = 0;
-
- /* obtain new environment */
- sub->sub_env = cl_env_get(&sub->sub_refcheck);
- if (IS_ERR(sub->sub_env)) {
- rc = PTR_ERR(sub->sub_env);
- goto fini_lov_io;
- }
-
- /*
- * First sub-io. Use ->lis_single_subio to
- * avoid dynamic allocation.
- */
- if (lio->lis_active_subios == 0) {
- sub->sub_io = &lio->lis_single_subio;
- lio->lis_single_subio_index = stripe;
- } else {
- sub->sub_io = kzalloc(sizeof(*sub->sub_io),
- GFP_NOFS);
- if (!sub->sub_io) {
- rc = -ENOMEM;
- goto fini_lov_io;
- }
- }
-
- sub_obj = lovsub2cl(lov_r0(lov)->lo_sub[stripe]);
- sub_io = sub->sub_io;
-
- sub_io->ci_obj = sub_obj;
- sub_io->ci_result = 0;
- sub_io->ci_parent = io;
- sub_io->ci_lockreq = io->ci_lockreq;
- sub_io->ci_type = io->ci_type;
- sub_io->ci_no_srvlock = io->ci_no_srvlock;
- sub_io->ci_noatime = io->ci_noatime;
-
- rc = cl_io_sub_init(sub->sub_env, sub_io, io->ci_type, sub_obj);
- if (rc >= 0) {
- lio->lis_active_subios++;
- sub->sub_io_initialized = 1;
- rc = 0;
- }
-fini_lov_io:
- if (rc)
- lov_io_sub_fini(env, lio, sub);
- return rc;
-}
-
-struct lov_io_sub *lov_sub_get(const struct lu_env *env,
- struct lov_io *lio, int stripe)
-{
- int rc;
- struct lov_io_sub *sub = &lio->lis_subs[stripe];
-
- LASSERT(stripe < lio->lis_stripe_count);
-
- if (!sub->sub_io_initialized) {
- sub->sub_stripe = stripe;
- rc = lov_io_sub_init(env, lio, sub);
- } else {
- rc = 0;
- }
- if (rc < 0)
- sub = ERR_PTR(rc);
-
- return sub;
-}
-
-/*****************************************************************************
- *
- * Lov io operations.
- *
- */
-
-int lov_page_stripe(const struct cl_page *page)
-{
- const struct cl_page_slice *slice;
-
- slice = cl_page_at(page, &lov_device_type);
- LASSERT(slice->cpl_obj);
-
- return cl2lov_page(slice)->lps_stripe;
-}
-
-static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
- struct cl_io *io)
-{
- struct lov_stripe_md *lsm;
- int result;
-
- LASSERT(lio->lis_object);
- lsm = lio->lis_object->lo_lsm;
-
- /*
- * Need to be optimized, we can't afford to allocate a piece of memory
- * when writing a page. -jay
- */
- lio->lis_subs =
- kvzalloc(lsm->lsm_stripe_count *
- sizeof(lio->lis_subs[0]),
- GFP_NOFS);
- if (lio->lis_subs) {
- lio->lis_nr_subios = lio->lis_stripe_count;
- lio->lis_single_subio_index = -1;
- lio->lis_active_subios = 0;
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
-
-static int lov_io_slice_init(struct lov_io *lio, struct lov_object *obj,
- struct cl_io *io)
-{
- io->ci_result = 0;
- lio->lis_object = obj;
-
- lio->lis_stripe_count = obj->lo_lsm->lsm_stripe_count;
-
- switch (io->ci_type) {
- case CIT_READ:
- case CIT_WRITE:
- lio->lis_pos = io->u.ci_rw.crw_pos;
- lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
- lio->lis_io_endpos = lio->lis_endpos;
- if (cl_io_is_append(io)) {
- LASSERT(io->ci_type == CIT_WRITE);
-
- /*
- * If there is LOV EA hole, then we may cannot locate
- * the current file-tail exactly.
- */
- if (unlikely(obj->lo_lsm->lsm_pattern &
- LOV_PATTERN_F_HOLE))
- return -EIO;
-
- lio->lis_pos = 0;
- lio->lis_endpos = OBD_OBJECT_EOF;
- }
- break;
-
- case CIT_SETATTR:
- if (cl_io_is_trunc(io))
- lio->lis_pos = io->u.ci_setattr.sa_attr.lvb_size;
- else
- lio->lis_pos = 0;
- lio->lis_endpos = OBD_OBJECT_EOF;
- break;
-
- case CIT_DATA_VERSION:
- lio->lis_pos = 0;
- lio->lis_endpos = OBD_OBJECT_EOF;
- break;
-
- case CIT_FAULT: {
- pgoff_t index = io->u.ci_fault.ft_index;
-
- lio->lis_pos = cl_offset(io->ci_obj, index);
- lio->lis_endpos = cl_offset(io->ci_obj, index + 1);
- break;
- }
-
- case CIT_FSYNC: {
- lio->lis_pos = io->u.ci_fsync.fi_start;
- lio->lis_endpos = io->u.ci_fsync.fi_end;
- break;
- }
-
- case CIT_MISC:
- lio->lis_pos = 0;
- lio->lis_endpos = OBD_OBJECT_EOF;
- break;
-
- default:
- LBUG();
- }
- return 0;
-}
-
-static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_object *lov = cl2lov(ios->cis_obj);
- int i;
-
- if (lio->lis_subs) {
- for (i = 0; i < lio->lis_nr_subios; i++)
- lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
- kvfree(lio->lis_subs);
- lio->lis_nr_subios = 0;
- }
-
- LASSERT(atomic_read(&lov->lo_active_ios) > 0);
- if (atomic_dec_and_test(&lov->lo_active_ios))
- wake_up_all(&lov->lo_waitq);
-}
-
-static u64 lov_offset_mod(u64 val, int delta)
-{
- if (val != OBD_OBJECT_EOF)
- val += delta;
- return val;
-}
-
-static int lov_io_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
- struct lov_io_sub *sub;
- u64 endpos;
- u64 start;
- u64 end;
- int stripe;
- int rc = 0;
-
- endpos = lov_offset_mod(lio->lis_endpos, -1);
- for (stripe = 0; stripe < lio->lis_stripe_count; stripe++) {
- if (!lov_stripe_intersects(lsm, stripe, lio->lis_pos,
- endpos, &start, &end))
- continue;
-
- if (unlikely(!lov_r0(lio->lis_object)->lo_sub[stripe])) {
- if (ios->cis_io->ci_type == CIT_READ ||
- ios->cis_io->ci_type == CIT_WRITE ||
- ios->cis_io->ci_type == CIT_FAULT)
- return -EIO;
-
- continue;
- }
-
- end = lov_offset_mod(end, 1);
- sub = lov_sub_get(env, lio, stripe);
- if (IS_ERR(sub)) {
- rc = PTR_ERR(sub);
- break;
- }
-
- lov_io_sub_inherit(sub->sub_io, lio, stripe, start, end);
- rc = cl_io_iter_init(sub->sub_env, sub->sub_io);
- if (rc) {
- cl_io_iter_fini(sub->sub_env, sub->sub_io);
- break;
- }
- CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
- stripe, start, end);
-
- list_add_tail(&sub->sub_linkage, &lio->lis_active);
- }
- return rc;
-}
-
-static int lov_io_rw_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
- __u64 start = io->u.ci_rw.crw_pos;
- loff_t next;
- unsigned long ssize = lsm->lsm_stripe_size;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- /* fast path for common case. */
- if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) {
- lov_do_div64(start, ssize);
- next = (start + 1) * ssize;
- if (next <= start * ssize)
- next = ~0ull;
-
- io->ci_continue = next < lio->lis_io_endpos;
- io->u.ci_rw.crw_count = min_t(loff_t, lio->lis_io_endpos,
- next) - io->u.ci_rw.crw_pos;
- lio->lis_pos = io->u.ci_rw.crw_pos;
- lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
- CDEBUG(D_VFSTRACE, "stripe: %llu chunk: [%llu, %llu) %llu\n",
- (__u64)start, lio->lis_pos, lio->lis_endpos,
- (__u64)lio->lis_io_endpos);
- }
- /*
- * XXX The following call should be optimized: we know, that
- * [lio->lis_pos, lio->lis_endpos) intersects with exactly one stripe.
- */
- return lov_io_iter_init(env, ios);
-}
-
-static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
- int (*iofunc)(const struct lu_env *, struct cl_io *))
-{
- struct cl_io *parent = lio->lis_cl.cis_io;
- struct lov_io_sub *sub;
- int rc = 0;
-
- list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
- rc = iofunc(sub->sub_env, sub->sub_io);
- if (rc)
- break;
-
- if (parent->ci_result == 0)
- parent->ci_result = sub->sub_io->ci_result;
- }
- return rc;
-}
-
-static int lov_io_lock(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- return lov_io_call(env, cl2lov_io(env, ios), cl_io_lock);
-}
-
-static int lov_io_start(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- return lov_io_call(env, cl2lov_io(env, ios), cl_io_start);
-}
-
-static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
-{
- /*
- * It's possible that lov_io_start() wasn't called against this
- * sub-io, either because previous sub-io failed, or upper layer
- * completed IO.
- */
- if (io->ci_state == CIS_IO_GOING)
- cl_io_end(env, io);
- else
- io->ci_state = CIS_IO_FINISHED;
- return 0;
-}
-
-static void
-lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct cl_io *parent = lio->lis_cl.cis_io;
- struct lov_io_sub *sub;
-
- list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
- lov_io_end_wrapper(sub->sub_env, sub->sub_io);
-
- parent->u.ci_data_version.dv_data_version +=
- sub->sub_io->u.ci_data_version.dv_data_version;
-
- if (!parent->ci_result)
- parent->ci_result = sub->sub_io->ci_result;
- }
-}
-
-static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
-{
- cl_io_iter_fini(env, io);
- return 0;
-}
-
-static int lov_io_unlock_wrapper(const struct lu_env *env, struct cl_io *io)
-{
- cl_io_unlock(env, io);
- return 0;
-}
-
-static void lov_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- int rc;
-
- rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_end_wrapper);
- LASSERT(rc == 0);
-}
-
-static void lov_io_iter_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- int rc;
-
- rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
- LASSERT(rc == 0);
- while (!list_empty(&lio->lis_active))
- list_del_init(lio->lis_active.next);
-}
-
-static void lov_io_unlock(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- int rc;
-
- rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_unlock_wrapper);
- LASSERT(rc == 0);
-}
-
-static int lov_io_read_ahead(const struct lu_env *env,
- const struct cl_io_slice *ios,
- pgoff_t start, struct cl_read_ahead *ra)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_object *loo = lio->lis_object;
- struct cl_object *obj = lov2cl(loo);
- struct lov_layout_raid0 *r0 = lov_r0(loo);
- unsigned int pps; /* pages per stripe */
- struct lov_io_sub *sub;
- pgoff_t ra_end;
- loff_t suboff;
- int stripe;
- int rc;
-
- stripe = lov_stripe_number(loo->lo_lsm, cl_offset(obj, start));
- if (unlikely(!r0->lo_sub[stripe]))
- return -EIO;
-
- sub = lov_sub_get(env, lio, stripe);
- if (IS_ERR(sub))
- return PTR_ERR(sub);
-
- lov_stripe_offset(loo->lo_lsm, cl_offset(obj, start), stripe, &suboff);
- rc = cl_io_read_ahead(sub->sub_env, sub->sub_io,
- cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
- ra);
-
- CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
- PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc);
- if (rc)
- return rc;
-
- /**
- * Adjust the stripe index by layout of raid0. ra->cra_end is
- * the maximum page index covered by an underlying DLM lock.
- * This function converts cra_end from stripe level to file
- * level, and make sure it's not beyond stripe boundary.
- */
- if (r0->lo_nr == 1) /* single stripe file */
- return 0;
-
- /* cra_end is stripe level, convert it into file level */
- ra_end = ra->cra_end;
- if (ra_end != CL_PAGE_EOF)
- ra_end = lov_stripe_pgoff(loo->lo_lsm, ra_end, stripe);
-
- pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
-
- CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, stripe_size = %u, stripe no = %u, start index = %lu\n",
- PFID(lu_object_fid(lov2lu(loo))), ra_end, pps,
- loo->lo_lsm->lsm_stripe_size, stripe, start);
-
- /* never exceed the end of the stripe */
- ra->cra_end = min_t(pgoff_t, ra_end, start + pps - start % pps - 1);
- return 0;
-}
-
-/**
- * lov implementation of cl_operations::cio_submit() method. It takes a list
- * of pages in \a queue, splits it into per-stripe sub-lists, invokes
- * cl_io_submit() on underlying devices to submit sub-lists, and then splices
- * everything back.
- *
- * Major complication of this function is a need to handle memory cleansing:
- * cl_io_submit() is called to write out pages as a part of VM memory
- * reclamation, and hence it may not fail due to memory shortages (system
- * dead-locks otherwise). To deal with this, some resources (sub-lists,
- * sub-environment, etc.) are allocated per-device on "startup" (i.e., in a
- * not-memory cleansing context), and in case of memory shortage, these
- * pre-allocated resources are used by lov_io_submit() under
- * lov_device::ld_mutex mutex.
- */
-static int lov_io_submit(const struct lu_env *env,
- const struct cl_io_slice *ios,
- enum cl_req_type crt, struct cl_2queue *queue)
-{
- struct cl_page_list *qin = &queue->c2_qin;
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_io_sub *sub;
- struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
- struct cl_page *page;
- int stripe;
-
- int rc = 0;
-
- if (lio->lis_active_subios == 1) {
- int idx = lio->lis_single_subio_index;
-
- LASSERT(idx < lio->lis_nr_subios);
- sub = lov_sub_get(env, lio, idx);
- LASSERT(!IS_ERR(sub));
- LASSERT(sub->sub_io == &lio->lis_single_subio);
- rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
- crt, queue);
- return rc;
- }
-
- LASSERT(lio->lis_subs);
-
- cl_page_list_init(plist);
- while (qin->pl_nr > 0) {
- struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
-
- cl_2queue_init(cl2q);
-
- page = cl_page_list_first(qin);
- cl_page_list_move(&cl2q->c2_qin, qin, page);
-
- stripe = lov_page_stripe(page);
- while (qin->pl_nr > 0) {
- page = cl_page_list_first(qin);
- if (stripe != lov_page_stripe(page))
- break;
-
- cl_page_list_move(&cl2q->c2_qin, qin, page);
- }
-
- sub = lov_sub_get(env, lio, stripe);
- if (!IS_ERR(sub)) {
- rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
- crt, cl2q);
- } else {
- rc = PTR_ERR(sub);
- }
-
- cl_page_list_splice(&cl2q->c2_qin, plist);
- cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
- cl_2queue_fini(env, cl2q);
-
- if (rc != 0)
- break;
- }
-
- cl_page_list_splice(plist, qin);
- cl_page_list_fini(env, plist);
-
- return rc;
-}
-
-static int lov_io_commit_async(const struct lu_env *env,
- const struct cl_io_slice *ios,
- struct cl_page_list *queue, int from, int to,
- cl_commit_cbt cb)
-{
- struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_io_sub *sub;
- struct cl_page *page;
- int rc = 0;
-
- if (lio->lis_active_subios == 1) {
- int idx = lio->lis_single_subio_index;
-
- LASSERT(idx < lio->lis_nr_subios);
- sub = lov_sub_get(env, lio, idx);
- LASSERT(!IS_ERR(sub));
- LASSERT(sub->sub_io == &lio->lis_single_subio);
- rc = cl_io_commit_async(sub->sub_env, sub->sub_io, queue,
- from, to, cb);
- return rc;
- }
-
- LASSERT(lio->lis_subs);
-
- cl_page_list_init(plist);
- while (queue->pl_nr > 0) {
- int stripe_to = to;
- int stripe;
-
- LASSERT(plist->pl_nr == 0);
- page = cl_page_list_first(queue);
- cl_page_list_move(plist, queue, page);
-
- stripe = lov_page_stripe(page);
- while (queue->pl_nr > 0) {
- page = cl_page_list_first(queue);
- if (stripe != lov_page_stripe(page))
- break;
-
- cl_page_list_move(plist, queue, page);
- }
-
- if (queue->pl_nr > 0) /* still has more pages */
- stripe_to = PAGE_SIZE;
-
- sub = lov_sub_get(env, lio, stripe);
- if (!IS_ERR(sub)) {
- rc = cl_io_commit_async(sub->sub_env, sub->sub_io,
- plist, from, stripe_to, cb);
- } else {
- rc = PTR_ERR(sub);
- break;
- }
-
- if (plist->pl_nr > 0) /* short write */
- break;
-
- from = 0;
- }
-
- /* for error case, add the page back into the qin list */
- LASSERT(ergo(rc == 0, plist->pl_nr == 0));
- while (plist->pl_nr > 0) {
- /* error occurred, add the uncommitted pages back into queue */
- page = cl_page_list_last(plist);
- cl_page_list_move_head(queue, plist, page);
- }
-
- return rc;
-}
-
-static int lov_io_fault_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_fault_io *fio;
- struct lov_io *lio;
- struct lov_io_sub *sub;
-
- fio = &ios->cis_io->u.ci_fault;
- lio = cl2lov_io(env, ios);
- sub = lov_sub_get(env, lio, lov_page_stripe(fio->ft_page));
- if (IS_ERR(sub))
- return PTR_ERR(sub);
- sub->sub_io->u.ci_fault.ft_nob = fio->ft_nob;
- return lov_io_start(env, ios);
-}
-
-static void lov_io_fsync_end(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_io_sub *sub;
- unsigned int *written = &ios->cis_io->u.ci_fsync.fi_nr_written;
-
- *written = 0;
- list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
- struct cl_io *subio = sub->sub_io;
-
- lov_io_end_wrapper(sub->sub_env, subio);
-
- if (subio->ci_result == 0)
- *written += subio->u.ci_fsync.fi_nr_written;
- }
-}
-
-static const struct cl_io_operations lov_io_ops = {
- .op = {
- [CIT_READ] = {
- .cio_fini = lov_io_fini,
- .cio_iter_init = lov_io_rw_iter_init,
- .cio_iter_fini = lov_io_iter_fini,
- .cio_lock = lov_io_lock,
- .cio_unlock = lov_io_unlock,
- .cio_start = lov_io_start,
- .cio_end = lov_io_end
- },
- [CIT_WRITE] = {
- .cio_fini = lov_io_fini,
- .cio_iter_init = lov_io_rw_iter_init,
- .cio_iter_fini = lov_io_iter_fini,
- .cio_lock = lov_io_lock,
- .cio_unlock = lov_io_unlock,
- .cio_start = lov_io_start,
- .cio_end = lov_io_end
- },
- [CIT_SETATTR] = {
- .cio_fini = lov_io_fini,
- .cio_iter_init = lov_io_iter_init,
- .cio_iter_fini = lov_io_iter_fini,
- .cio_lock = lov_io_lock,
- .cio_unlock = lov_io_unlock,
- .cio_start = lov_io_start,
- .cio_end = lov_io_end
- },
- [CIT_DATA_VERSION] = {
- .cio_fini = lov_io_fini,
- .cio_iter_init = lov_io_iter_init,
- .cio_iter_fini = lov_io_iter_fini,
- .cio_lock = lov_io_lock,
- .cio_unlock = lov_io_unlock,
- .cio_start = lov_io_start,
- .cio_end = lov_io_data_version_end,
- },
- [CIT_FAULT] = {
- .cio_fini = lov_io_fini,
- .cio_iter_init = lov_io_iter_init,
- .cio_iter_fini = lov_io_iter_fini,
- .cio_lock = lov_io_lock,
- .cio_unlock = lov_io_unlock,
- .cio_start = lov_io_fault_start,
- .cio_end = lov_io_end
- },
- [CIT_FSYNC] = {
- .cio_fini = lov_io_fini,
- .cio_iter_init = lov_io_iter_init,
- .cio_iter_fini = lov_io_iter_fini,
- .cio_lock = lov_io_lock,
- .cio_unlock = lov_io_unlock,
- .cio_start = lov_io_start,
- .cio_end = lov_io_fsync_end
- },
- [CIT_MISC] = {
- .cio_fini = lov_io_fini
- }
- },
- .cio_read_ahead = lov_io_read_ahead,
- .cio_submit = lov_io_submit,
- .cio_commit_async = lov_io_commit_async,
-};
-
-/*****************************************************************************
- *
- * Empty lov io operations.
- *
- */
-
-static void lov_empty_io_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_object *lov = cl2lov(ios->cis_obj);
-
- if (atomic_dec_and_test(&lov->lo_active_ios))
- wake_up_all(&lov->lo_waitq);
-}
-
-static int lov_empty_io_submit(const struct lu_env *env,
- const struct cl_io_slice *ios,
- enum cl_req_type crt, struct cl_2queue *queue)
-{
- return -EBADF;
-}
-
-static void lov_empty_impossible(const struct lu_env *env,
- struct cl_io_slice *ios)
-{
- LBUG();
-}
-
-#define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible)
-
-/**
- * An io operation vector for files without stripes.
- */
-static const struct cl_io_operations lov_empty_io_ops = {
- .op = {
- [CIT_READ] = {
- .cio_fini = lov_empty_io_fini,
- },
- [CIT_WRITE] = {
- .cio_fini = lov_empty_io_fini,
- .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
- .cio_lock = LOV_EMPTY_IMPOSSIBLE,
- .cio_start = LOV_EMPTY_IMPOSSIBLE,
- .cio_end = LOV_EMPTY_IMPOSSIBLE
- },
- [CIT_SETATTR] = {
- .cio_fini = lov_empty_io_fini,
- .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
- .cio_lock = LOV_EMPTY_IMPOSSIBLE,
- .cio_start = LOV_EMPTY_IMPOSSIBLE,
- .cio_end = LOV_EMPTY_IMPOSSIBLE
- },
- [CIT_FAULT] = {
- .cio_fini = lov_empty_io_fini,
- .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
- .cio_lock = LOV_EMPTY_IMPOSSIBLE,
- .cio_start = LOV_EMPTY_IMPOSSIBLE,
- .cio_end = LOV_EMPTY_IMPOSSIBLE
- },
- [CIT_FSYNC] = {
- .cio_fini = lov_empty_io_fini
- },
- [CIT_MISC] = {
- .cio_fini = lov_empty_io_fini
- }
- },
- .cio_submit = lov_empty_io_submit,
- .cio_commit_async = LOV_EMPTY_IMPOSSIBLE
-};
-
-int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- struct lov_io *lio = lov_env_io(env);
- struct lov_object *lov = cl2lov(obj);
-
- INIT_LIST_HEAD(&lio->lis_active);
- io->ci_result = lov_io_slice_init(lio, lov, io);
- if (io->ci_result == 0) {
- io->ci_result = lov_io_subio_init(env, lio, io);
- if (io->ci_result == 0) {
- cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops);
- atomic_inc(&lov->lo_active_ios);
- }
- }
- return io->ci_result;
-}
-
-int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- struct lov_object *lov = cl2lov(obj);
- struct lov_io *lio = lov_env_io(env);
- int result;
-
- lio->lis_object = lov;
- switch (io->ci_type) {
- default:
- LBUG();
- case CIT_MISC:
- case CIT_READ:
- result = 0;
- break;
- case CIT_FSYNC:
- case CIT_SETATTR:
- case CIT_DATA_VERSION:
- result = 1;
- break;
- case CIT_WRITE:
- result = -EBADF;
- break;
- case CIT_FAULT:
- result = -EFAULT;
- CERROR("Page fault on a file without stripes: " DFID "\n",
- PFID(lu_object_fid(&obj->co_lu)));
- break;
- }
- if (result == 0) {
- cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
- atomic_inc(&lov->lo_active_ios);
- }
-
- io->ci_result = result < 0 ? result : 0;
- return result;
-}
-
-int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- struct lov_object *lov = cl2lov(obj);
- struct lov_io *lio = lov_env_io(env);
- int result;
-
- LASSERT(lov->lo_lsm);
- lio->lis_object = lov;
-
- switch (io->ci_type) {
- default:
- LASSERTF(0, "invalid type %d\n", io->ci_type);
- result = -EOPNOTSUPP;
- break;
- case CIT_MISC:
- case CIT_FSYNC:
- case CIT_DATA_VERSION:
- result = 1;
- break;
- case CIT_SETATTR:
- /* the truncate to 0 is managed by MDT:
- * - in open, for open O_TRUNC
- * - in setattr, for truncate
- */
- /* the truncate is for size > 0 so triggers a restore */
- if (cl_io_is_trunc(io)) {
- io->ci_restore_needed = 1;
- result = -ENODATA;
- } else {
- result = 1;
- }
- break;
- case CIT_READ:
- case CIT_WRITE:
- case CIT_FAULT:
- io->ci_restore_needed = 1;
- result = -ENODATA;
- break;
- }
- if (result == 0) {
- cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
- atomic_inc(&lov->lo_active_ios);
- }
-
- io->ci_result = result < 0 ? result : 0;
- return result;
-}
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
deleted file mode 100644
index b0292100bf26..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ /dev/null
@@ -1,348 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_lock for LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lov lock operations.
- *
- */
-
-static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
- const struct cl_lock *parent,
- struct lov_lock_sub *lls)
-{
- struct lov_sublock_env *subenv;
- struct lov_io *lio = lov_env_io(env);
- struct cl_io *io = lio->lis_cl.cis_io;
- struct lov_io_sub *sub;
-
- subenv = &lov_env_session(env)->ls_subenv;
-
- /*
- * FIXME: We tend to use the subio's env & io to call the sublock
- * lock operations because osc lock sometimes stores some control
- * variables in thread's IO information(Now only lockless information).
- * However, if the lock's host(object) is different from the object
- * for current IO, we have no way to get the subenv and subio because
- * they are not initialized at all. As a temp fix, in this case,
- * we still borrow the parent's env to call sublock operations.
- */
- if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
- subenv->lse_env = env;
- subenv->lse_io = io;
- } else {
- sub = lov_sub_get(env, lio, lls->sub_stripe);
- if (!IS_ERR(sub)) {
- subenv->lse_env = sub->sub_env;
- subenv->lse_io = sub->sub_io;
- } else {
- subenv = (void *)sub;
- }
- }
- return subenv;
-}
-
-static int lov_sublock_init(const struct lu_env *env,
- const struct cl_lock *parent,
- struct lov_lock_sub *lls)
-{
- struct lov_sublock_env *subenv;
- int result;
-
- subenv = lov_sublock_env_get(env, parent, lls);
- if (!IS_ERR(subenv)) {
- result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
- subenv->lse_io);
- } else {
- /* error occurs. */
- result = PTR_ERR(subenv);
- }
- return result;
-}
-
-/**
- * Creates sub-locks for a given lov_lock for the first time.
- *
- * Goes through all sub-objects of top-object, and creates sub-locks on every
- * sub-object intersecting with top-lock extent. This is complicated by the
- * fact that top-lock (that is being created) can be accessed concurrently
- * through already created sub-locks (possibly shared with other top-locks).
- */
-static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
- const struct cl_object *obj,
- struct cl_lock *lock)
-{
- int result = 0;
- int i;
- int nr;
- u64 start;
- u64 end;
- u64 file_start;
- u64 file_end;
-
- struct lov_object *loo = cl2lov(obj);
- struct lov_layout_raid0 *r0 = lov_r0(loo);
- struct lov_lock *lovlck;
-
- CDEBUG(D_INODE, "%p: lock/io FID " DFID "/" DFID ", lock/io clobj %p/%p\n",
- loo, PFID(lu_object_fid(lov2lu(loo))),
- PFID(lu_object_fid(&obj->co_lu)),
- lov2cl(loo), obj);
-
- file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start);
- file_end = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1;
-
- for (i = 0, nr = 0; i < r0->lo_nr; i++) {
- /*
- * XXX for wide striping smarter algorithm is desirable,
- * breaking out of the loop, early.
- */
- if (likely(r0->lo_sub[i]) && /* spare layout */
- lov_stripe_intersects(loo->lo_lsm, i,
- file_start, file_end, &start, &end))
- nr++;
- }
- LASSERT(nr > 0);
- lovlck = kvzalloc(offsetof(struct lov_lock, lls_sub[nr]),
- GFP_NOFS);
- if (!lovlck)
- return ERR_PTR(-ENOMEM);
-
- lovlck->lls_nr = nr;
- for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
- if (likely(r0->lo_sub[i]) &&
- lov_stripe_intersects(loo->lo_lsm, i,
- file_start, file_end, &start, &end)) {
- struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
- struct cl_lock_descr *descr;
-
- descr = &lls->sub_lock.cll_descr;
-
- LASSERT(!descr->cld_obj);
- descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
- descr->cld_start = cl_index(descr->cld_obj, start);
- descr->cld_end = cl_index(descr->cld_obj, end);
- descr->cld_mode = lock->cll_descr.cld_mode;
- descr->cld_gid = lock->cll_descr.cld_gid;
- descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
- lls->sub_stripe = i;
-
- /* initialize sub lock */
- result = lov_sublock_init(env, lock, lls);
- if (result < 0)
- break;
-
- lls->sub_initialized = 1;
- nr++;
- }
- }
- LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
-
- if (result != 0) {
- for (i = 0; i < nr; ++i) {
- if (!lovlck->lls_sub[i].sub_initialized)
- break;
-
- cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
- }
- kvfree(lovlck);
- lovlck = ERR_PTR(result);
- }
-
- return lovlck;
-}
-
-static void lov_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct lov_lock *lovlck;
- int i;
-
- lovlck = cl2lov_lock(slice);
- for (i = 0; i < lovlck->lls_nr; ++i) {
- LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
- if (lovlck->lls_sub[i].sub_initialized)
- cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
- }
- kvfree(lovlck);
-}
-
-/**
- * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
- * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
- * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
- * state machines in the face of sub-locks sharing (by multiple top-locks),
- * and concurrent sub-lock cancellations.
- */
-static int lov_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *io, struct cl_sync_io *anchor)
-{
- struct cl_lock *lock = slice->cls_lock;
- struct lov_lock *lovlck = cl2lov_lock(slice);
- int i;
- int rc = 0;
-
- for (i = 0; i < lovlck->lls_nr; ++i) {
- struct lov_lock_sub *lls = &lovlck->lls_sub[i];
- struct lov_sublock_env *subenv;
-
- subenv = lov_sublock_env_get(env, lock, lls);
- if (IS_ERR(subenv)) {
- rc = PTR_ERR(subenv);
- break;
- }
- rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
- &lls->sub_lock, anchor);
- if (rc != 0)
- break;
-
- lls->sub_is_enqueued = 1;
- }
- return rc;
-}
-
-static void lov_lock_cancel(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct cl_lock *lock = slice->cls_lock;
- struct lov_lock *lovlck = cl2lov_lock(slice);
- int i;
-
- for (i = 0; i < lovlck->lls_nr; ++i) {
- struct lov_lock_sub *lls = &lovlck->lls_sub[i];
- struct cl_lock *sublock = &lls->sub_lock;
- struct lov_sublock_env *subenv;
-
- if (!lls->sub_is_enqueued)
- continue;
-
- lls->sub_is_enqueued = 0;
- subenv = lov_sublock_env_get(env, lock, lls);
- if (!IS_ERR(subenv)) {
- cl_lock_cancel(subenv->lse_env, sublock);
- } else {
- CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
- "%s fails with %ld.\n",
- __func__, PTR_ERR(subenv));
- }
- }
-}
-
-static int lov_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- int i;
-
- (*p)(env, cookie, "%d\n", lck->lls_nr);
- for (i = 0; i < lck->lls_nr; ++i) {
- struct lov_lock_sub *sub;
-
- sub = &lck->lls_sub[i];
- (*p)(env, cookie, " %d %x: ", i, sub->sub_is_enqueued);
- cl_lock_print(env, cookie, p, &sub->sub_lock);
- }
- return 0;
-}
-
-static const struct cl_lock_operations lov_lock_ops = {
- .clo_fini = lov_lock_fini,
- .clo_enqueue = lov_lock_enqueue,
- .clo_cancel = lov_lock_cancel,
- .clo_print = lov_lock_print
-};
-
-int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
-{
- struct lov_lock *lck;
- int result = 0;
-
- lck = lov_lock_sub_init(env, obj, lock);
- if (!IS_ERR(lck))
- cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
- else
- result = PTR_ERR(lck);
- return result;
-}
-
-static void lov_empty_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
-
- kmem_cache_free(lov_lock_kmem, lck);
-}
-
-static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p,
- const struct cl_lock_slice *slice)
-{
- (*p)(env, cookie, "empty\n");
- return 0;
-}
-
-/* XXX: more methods will be added later. */
-static const struct cl_lock_operations lov_empty_lock_ops = {
- .clo_fini = lov_empty_lock_fini,
- .clo_print = lov_empty_lock_print
-};
-
-int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
-{
- struct lov_lock *lck;
- int result = -ENOMEM;
-
- lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
- if (lck) {
- cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
- result = 0;
- }
- return result;
-}
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
deleted file mode 100644
index 3796bbb25305..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ /dev/null
@@ -1,107 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd_class.h>
-#include "lov_internal.h"
-
-/** Merge the lock value block(&lvb) attributes and KMS from each of the
- * stripes in a file into a single lvb. It is expected that the caller
- * initializes the current atime, mtime, ctime to avoid regressing a more
- * uptodate time on the local client.
- */
-int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
- struct ost_lvb *lvb, __u64 *kms_place)
-{
- __u64 size = 0;
- __u64 kms = 0;
- __u64 blocks = 0;
- s64 current_mtime = lvb->lvb_mtime;
- s64 current_atime = lvb->lvb_atime;
- s64 current_ctime = lvb->lvb_ctime;
- int i;
- int rc = 0;
-
- assert_spin_locked(&lsm->lsm_lock);
- LASSERT(lsm->lsm_lock_owner == current_pid());
-
- CDEBUG(D_INODE, "MDT ID " DOSTID " initial value: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
- POSTID(&lsm->lsm_oi), lvb->lvb_size, lvb->lvb_mtime,
- lvb->lvb_atime, lvb->lvb_ctime, lvb->lvb_blocks);
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *loi = lsm->lsm_oinfo[i];
- u64 lov_size, tmpsize;
-
- if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks)) {
- rc = OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
- continue;
- }
-
- tmpsize = loi->loi_kms;
- lov_size = lov_stripe_size(lsm, tmpsize, i);
- if (lov_size > kms)
- kms = lov_size;
-
- if (loi->loi_lvb.lvb_size > tmpsize)
- tmpsize = loi->loi_lvb.lvb_size;
-
- lov_size = lov_stripe_size(lsm, tmpsize, i);
- if (lov_size > size)
- size = lov_size;
- /* merge blocks, mtime, atime */
- blocks += loi->loi_lvb.lvb_blocks;
- if (loi->loi_lvb.lvb_mtime > current_mtime)
- current_mtime = loi->loi_lvb.lvb_mtime;
- if (loi->loi_lvb.lvb_atime > current_atime)
- current_atime = loi->loi_lvb.lvb_atime;
- if (loi->loi_lvb.lvb_ctime > current_ctime)
- current_ctime = loi->loi_lvb.lvb_ctime;
-
- CDEBUG(D_INODE, "MDT ID " DOSTID " on OST[%u]: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
- POSTID(&lsm->lsm_oi), loi->loi_ost_idx,
- loi->loi_lvb.lvb_size, loi->loi_lvb.lvb_mtime,
- loi->loi_lvb.lvb_atime, loi->loi_lvb.lvb_ctime,
- loi->loi_lvb.lvb_blocks);
- }
-
- *kms_place = kms;
- lvb->lvb_size = size;
- lvb->lvb_blocks = blocks;
- lvb->lvb_mtime = current_mtime;
- lvb->lvb_atime = current_atime;
- lvb->lvb_ctime = current_ctime;
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
deleted file mode 100644
index ec70c12e5b40..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ /dev/null
@@ -1,1448 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lov/lov_obd.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Mike Shaver <shaver@clusterfs.com>
- * Author: Nathan Rutman <nathan@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-#include <linux/libcfs/libcfs.h>
-
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <uapi/linux/lustre/lustre_ioctl.h>
-
-#include <cl_object.h>
-#include <lustre_dlm.h>
-#include <lustre_fid.h>
-#include <lustre_lib.h>
-#include <lustre_mds.h>
-#include <lustre_net.h>
-#include <uapi/linux/lustre/lustre_param.h>
-#include <lustre_swab.h>
-#include <lprocfs_status.h>
-#include <obd_class.h>
-#include <obd_support.h>
-
-#include "lov_internal.h"
-
-/* Keep a refcount of lov->tgt usage to prevent racing with addition/deletion.
- * Any function that expects lov_tgts to remain stationary must take a ref.
- */
-static void lov_getref(struct obd_device *obd)
-{
- struct lov_obd *lov = &obd->u.lov;
-
- /* nobody gets through here until lov_putref is done */
- mutex_lock(&lov->lov_lock);
- atomic_inc(&lov->lov_refcount);
- mutex_unlock(&lov->lov_lock);
-}
-
-static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt);
-
-static void lov_putref(struct obd_device *obd)
-{
- struct lov_obd *lov = &obd->u.lov;
-
- mutex_lock(&lov->lov_lock);
- /* ok to dec to 0 more than once -- ltd_exp's will be null */
- if (atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
- LIST_HEAD(kill);
- int i;
- struct lov_tgt_desc *tgt, *n;
-
- CDEBUG(D_CONFIG, "destroying %d lov targets\n",
- lov->lov_death_row);
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- tgt = lov->lov_tgts[i];
-
- if (!tgt || !tgt->ltd_reap)
- continue;
- list_add(&tgt->ltd_kill, &kill);
- /* XXX - right now there is a dependency on ld_tgt_count
- * being the maximum tgt index for computing the
- * mds_max_easize. So we can't shrink it.
- */
- lov_ost_pool_remove(&lov->lov_packed, i);
- lov->lov_tgts[i] = NULL;
- lov->lov_death_row--;
- }
- mutex_unlock(&lov->lov_lock);
-
- list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
- list_del(&tgt->ltd_kill);
- /* Disconnect */
- __lov_del_obd(obd, tgt);
- }
-
- if (lov->lov_tgts_kobj)
- kobject_put(lov->lov_tgts_kobj);
-
- } else {
- mutex_unlock(&lov->lov_lock);
- }
-}
-
-static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
- enum obd_notify_event ev);
-static int lov_notify(struct obd_device *obd, struct obd_device *watched,
- enum obd_notify_event ev, void *data);
-
-int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
- struct obd_connect_data *data)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct obd_uuid *tgt_uuid;
- struct obd_device *tgt_obd;
- static struct obd_uuid lov_osc_uuid = { "LOV_OSC_UUID" };
- struct obd_import *imp;
- int rc;
-
- if (!lov->lov_tgts[index])
- return -EINVAL;
-
- tgt_uuid = &lov->lov_tgts[index]->ltd_uuid;
- tgt_obd = lov->lov_tgts[index]->ltd_obd;
-
- if (!tgt_obd->obd_set_up) {
- CERROR("Target %s not set up\n", obd_uuid2str(tgt_uuid));
- return -EINVAL;
- }
-
- /* override the sp_me from lov */
- tgt_obd->u.cli.cl_sp_me = lov->lov_sp_me;
-
- if (data && (data->ocd_connect_flags & OBD_CONNECT_INDEX))
- data->ocd_index = index;
-
- /*
- * Divine LOV knows that OBDs under it are OSCs.
- */
- imp = tgt_obd->u.cli.cl_import;
-
- if (activate) {
- tgt_obd->obd_no_recov = 0;
- /* FIXME this is probably supposed to be
- * ptlrpc_set_import_active. Horrible naming.
- */
- ptlrpc_activate_import(imp);
- }
-
- rc = obd_register_observer(tgt_obd, obd);
- if (rc) {
- CERROR("Target %s register_observer error %d\n",
- obd_uuid2str(tgt_uuid), rc);
- return rc;
- }
-
- if (imp->imp_invalid) {
- CDEBUG(D_CONFIG, "not connecting OSC %s; administratively disabled\n",
- obd_uuid2str(tgt_uuid));
- return 0;
- }
-
- rc = obd_connect(NULL, &lov->lov_tgts[index]->ltd_exp, tgt_obd,
- &lov_osc_uuid, data, NULL);
- if (rc || !lov->lov_tgts[index]->ltd_exp) {
- CERROR("Target %s connect error %d\n",
- obd_uuid2str(tgt_uuid), rc);
- return -ENODEV;
- }
-
- lov->lov_tgts[index]->ltd_reap = 0;
-
- CDEBUG(D_CONFIG, "Connected tgt idx %d %s (%s) %sactive\n", index,
- obd_uuid2str(tgt_uuid), tgt_obd->obd_name, activate ? "":"in");
-
- if (lov->lov_tgts_kobj)
- /* Even if we failed, that's ok */
- rc = sysfs_create_link(lov->lov_tgts_kobj, &tgt_obd->obd_kobj,
- tgt_obd->obd_name);
-
- return 0;
-}
-
-static int lov_connect(const struct lu_env *env,
- struct obd_export **exp, struct obd_device *obd,
- struct obd_uuid *cluuid, struct obd_connect_data *data,
- void *localdata)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct lov_tgt_desc *tgt;
- struct lustre_handle conn;
- int i, rc;
-
- CDEBUG(D_CONFIG, "connect #%d\n", lov->lov_connects);
-
- rc = class_connect(&conn, obd, cluuid);
- if (rc)
- return rc;
-
- *exp = class_conn2export(&conn);
-
- /* Why should there ever be more than 1 connect? */
- lov->lov_connects++;
- LASSERT(lov->lov_connects == 1);
-
- memset(&lov->lov_ocd, 0, sizeof(lov->lov_ocd));
- if (data)
- lov->lov_ocd = *data;
-
- obd_getref(obd);
-
- lov->lov_tgts_kobj = kobject_create_and_add("target_obds",
- &obd->obd_kobj);
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- tgt = lov->lov_tgts[i];
- if (!tgt || obd_uuid_empty(&tgt->ltd_uuid))
- continue;
- /* Flags will be lowest common denominator */
- rc = lov_connect_obd(obd, i, tgt->ltd_activate, &lov->lov_ocd);
- if (rc) {
- CERROR("%s: lov connect tgt %d failed: %d\n",
- obd->obd_name, i, rc);
- continue;
- }
- /* connect to administrative disabled ost */
- if (!lov->lov_tgts[i]->ltd_exp)
- continue;
-
- rc = lov_notify(obd, lov->lov_tgts[i]->ltd_exp->exp_obd,
- OBD_NOTIFY_CONNECT, (void *)&i);
- if (rc) {
- CERROR("%s error sending notify %d\n",
- obd->obd_name, rc);
- }
- }
- obd_putref(obd);
-
- return 0;
-}
-
-static int lov_disconnect_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct obd_device *osc_obd;
- int rc;
-
- osc_obd = class_exp2obd(tgt->ltd_exp);
- CDEBUG(D_CONFIG, "%s: disconnecting target %s\n",
- obd->obd_name, osc_obd ? osc_obd->obd_name : "NULL");
-
- if (tgt->ltd_active) {
- tgt->ltd_active = 0;
- lov->desc.ld_active_tgt_count--;
- tgt->ltd_exp->exp_obd->obd_inactive = 1;
- }
-
- if (osc_obd) {
- if (lov->lov_tgts_kobj)
- sysfs_remove_link(lov->lov_tgts_kobj,
- osc_obd->obd_name);
-
- /* Pass it on to our clients.
- * XXX This should be an argument to disconnect,
- * XXX not a back-door flag on the OBD. Ah well.
- */
- osc_obd->obd_force = obd->obd_force;
- osc_obd->obd_fail = obd->obd_fail;
- osc_obd->obd_no_recov = obd->obd_no_recov;
- }
-
- obd_register_observer(osc_obd, NULL);
-
- rc = obd_disconnect(tgt->ltd_exp);
- if (rc) {
- CERROR("Target %s disconnect error %d\n",
- tgt->ltd_uuid.uuid, rc);
- rc = 0;
- }
-
- tgt->ltd_exp = NULL;
- return 0;
-}
-
-static int lov_disconnect(struct obd_export *exp)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lov_obd *lov = &obd->u.lov;
- int i, rc;
-
- if (!lov->lov_tgts)
- goto out;
-
- /* Only disconnect the underlying layers on the final disconnect. */
- lov->lov_connects--;
- if (lov->lov_connects != 0) {
- /* why should there be more than 1 connect? */
- CERROR("disconnect #%d\n", lov->lov_connects);
- goto out;
- }
-
- /* Let's hold another reference so lov_del_obd doesn't spin through
- * putref every time
- */
- obd_getref(obd);
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (lov->lov_tgts[i] && lov->lov_tgts[i]->ltd_exp) {
- /* Disconnection is the last we know about an obd */
- lov_del_target(obd, i, NULL, lov->lov_tgts[i]->ltd_gen);
- }
- }
-
- obd_putref(obd);
-
-out:
- rc = class_disconnect(exp); /* bz 9811 */
- return rc;
-}
-
-/* Error codes:
- *
- * -EINVAL : UUID can't be found in the LOV's target list
- * -ENOTCONN: The UUID is found, but the target connection is bad (!)
- * -EBADF : The UUID is found, but the OBD is the wrong type (!)
- * any >= 0 : is log target index
- */
-static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
- enum obd_notify_event ev)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct lov_tgt_desc *tgt;
- int index, activate, active;
-
- CDEBUG(D_INFO, "Searching in lov %p for uuid %s event(%d)\n",
- lov, uuid->uuid, ev);
-
- obd_getref(obd);
- for (index = 0; index < lov->desc.ld_tgt_count; index++) {
- tgt = lov->lov_tgts[index];
- if (!tgt)
- continue;
- /*
- * LU-642, initially inactive OSC could miss the obd_connect,
- * we make up for it here.
- */
- if (ev == OBD_NOTIFY_ACTIVATE && !tgt->ltd_exp &&
- obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
- struct obd_uuid lov_osc_uuid = {"LOV_OSC_UUID"};
-
- obd_connect(NULL, &tgt->ltd_exp, tgt->ltd_obd,
- &lov_osc_uuid, &lov->lov_ocd, NULL);
- }
- if (!tgt->ltd_exp)
- continue;
-
- CDEBUG(D_INFO, "lov idx %d is %s conn %#llx\n",
- index, obd_uuid2str(&tgt->ltd_uuid),
- tgt->ltd_exp->exp_handle.h_cookie);
- if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
- break;
- }
-
- if (index == lov->desc.ld_tgt_count) {
- index = -EINVAL;
- goto out;
- }
-
- if (ev == OBD_NOTIFY_DEACTIVATE || ev == OBD_NOTIFY_ACTIVATE) {
- activate = (ev == OBD_NOTIFY_ACTIVATE) ? 1 : 0;
-
- if (lov->lov_tgts[index]->ltd_activate == activate) {
- CDEBUG(D_INFO, "OSC %s already %sactivate!\n",
- uuid->uuid, activate ? "" : "de");
- } else {
- lov->lov_tgts[index]->ltd_activate = activate;
- CDEBUG(D_CONFIG, "%sactivate OSC %s\n",
- activate ? "" : "de", obd_uuid2str(uuid));
- }
-
- } else if (ev == OBD_NOTIFY_INACTIVE || ev == OBD_NOTIFY_ACTIVE) {
- active = (ev == OBD_NOTIFY_ACTIVE) ? 1 : 0;
-
- if (lov->lov_tgts[index]->ltd_active == active) {
- CDEBUG(D_INFO, "OSC %s already %sactive!\n",
- uuid->uuid, active ? "" : "in");
- goto out;
- }
- CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n",
- obd_uuid2str(uuid), active ? "" : "in");
-
- lov->lov_tgts[index]->ltd_active = active;
- if (active) {
- lov->desc.ld_active_tgt_count++;
- lov->lov_tgts[index]->ltd_exp->exp_obd->obd_inactive = 0;
- } else {
- lov->desc.ld_active_tgt_count--;
- lov->lov_tgts[index]->ltd_exp->exp_obd->obd_inactive = 1;
- }
- } else {
- CERROR("Unknown event(%d) for uuid %s", ev, uuid->uuid);
- }
-
- out:
- obd_putref(obd);
- return index;
-}
-
-static int lov_notify(struct obd_device *obd, struct obd_device *watched,
- enum obd_notify_event ev, void *data)
-{
- int rc = 0;
- struct lov_obd *lov = &obd->u.lov;
-
- down_read(&lov->lov_notify_lock);
- if (!lov->lov_connects) {
- up_read(&lov->lov_notify_lock);
- return rc;
- }
-
- if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE ||
- ev == OBD_NOTIFY_ACTIVATE || ev == OBD_NOTIFY_DEACTIVATE) {
- struct obd_uuid *uuid;
-
- LASSERT(watched);
-
- if (strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) {
- up_read(&lov->lov_notify_lock);
- CERROR("unexpected notification of %s %s!\n",
- watched->obd_type->typ_name,
- watched->obd_name);
- return -EINVAL;
- }
- uuid = &watched->u.cli.cl_target_uuid;
-
- /* Set OSC as active before notifying the observer, so the
- * observer can use the OSC normally.
- */
- rc = lov_set_osc_active(obd, uuid, ev);
- if (rc < 0) {
- up_read(&lov->lov_notify_lock);
- CERROR("event(%d) of %s failed: %d\n", ev,
- obd_uuid2str(uuid), rc);
- return rc;
- }
- /* active event should be pass lov target index as data */
- data = &rc;
- }
-
- /* Pass the notification up the chain. */
- if (watched) {
- rc = obd_notify_observer(obd, watched, ev, data);
- } else {
- /* NULL watched means all osc's in the lov (only for syncs) */
- /* sync event should be send lov idx as data */
- struct lov_obd *lov = &obd->u.lov;
- int i, is_sync;
-
- data = &i;
- is_sync = (ev == OBD_NOTIFY_SYNC) ||
- (ev == OBD_NOTIFY_SYNC_NONBLOCK);
-
- obd_getref(obd);
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (!lov->lov_tgts[i])
- continue;
-
- /* don't send sync event if target not
- * connected/activated
- */
- if (is_sync && !lov->lov_tgts[i]->ltd_active)
- continue;
-
- rc = obd_notify_observer(obd, lov->lov_tgts[i]->ltd_obd,
- ev, data);
- if (rc) {
- CERROR("%s: notify %s of %s failed %d\n",
- obd->obd_name,
- obd->obd_observer->obd_name,
- lov->lov_tgts[i]->ltd_obd->obd_name,
- rc);
- }
- }
- obd_putref(obd);
- }
-
- up_read(&lov->lov_notify_lock);
- return rc;
-}
-
-static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
- __u32 index, int gen, int active)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct lov_tgt_desc *tgt;
- struct obd_device *tgt_obd;
- int rc;
-
- CDEBUG(D_CONFIG, "uuid:%s idx:%d gen:%d active:%d\n",
- uuidp->uuid, index, gen, active);
-
- if (gen <= 0) {
- CERROR("request to add OBD %s with invalid generation: %d\n",
- uuidp->uuid, gen);
- return -EINVAL;
- }
-
- tgt_obd = class_find_client_obd(uuidp, LUSTRE_OSC_NAME,
- &obd->obd_uuid);
- if (!tgt_obd)
- return -EINVAL;
-
- mutex_lock(&lov->lov_lock);
-
- if ((index < lov->lov_tgt_size) && lov->lov_tgts[index]) {
- tgt = lov->lov_tgts[index];
- CERROR("UUID %s already assigned at LOV target index %d\n",
- obd_uuid2str(&tgt->ltd_uuid), index);
- mutex_unlock(&lov->lov_lock);
- return -EEXIST;
- }
-
- if (index >= lov->lov_tgt_size) {
- /* We need to reallocate the lov target array. */
- struct lov_tgt_desc **newtgts, **old = NULL;
- __u32 newsize, oldsize = 0;
-
- newsize = max_t(__u32, lov->lov_tgt_size, 2);
- while (newsize < index + 1)
- newsize <<= 1;
- newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
- if (!newtgts) {
- mutex_unlock(&lov->lov_lock);
- return -ENOMEM;
- }
-
- if (lov->lov_tgt_size) {
- memcpy(newtgts, lov->lov_tgts, sizeof(*newtgts) *
- lov->lov_tgt_size);
- old = lov->lov_tgts;
- oldsize = lov->lov_tgt_size;
- }
-
- lov->lov_tgts = newtgts;
- lov->lov_tgt_size = newsize;
- smp_rmb();
- kfree(old);
-
- CDEBUG(D_CONFIG, "tgts: %p size: %d\n",
- lov->lov_tgts, lov->lov_tgt_size);
- }
-
- tgt = kzalloc(sizeof(*tgt), GFP_NOFS);
- if (!tgt) {
- mutex_unlock(&lov->lov_lock);
- return -ENOMEM;
- }
-
- rc = lov_ost_pool_add(&lov->lov_packed, index, lov->lov_tgt_size);
- if (rc) {
- mutex_unlock(&lov->lov_lock);
- kfree(tgt);
- return rc;
- }
-
- tgt->ltd_uuid = *uuidp;
- tgt->ltd_obd = tgt_obd;
- /* XXX - add a sanity check on the generation number. */
- tgt->ltd_gen = gen;
- tgt->ltd_index = index;
- tgt->ltd_activate = active;
- lov->lov_tgts[index] = tgt;
- if (index >= lov->desc.ld_tgt_count)
- lov->desc.ld_tgt_count = index + 1;
-
- mutex_unlock(&lov->lov_lock);
-
- CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n",
- index, tgt->ltd_gen, lov->desc.ld_tgt_count);
-
- if (lov->lov_connects == 0) {
- /* lov_connect hasn't been called yet. We'll do the
- * lov_connect_obd on this target when that fn first runs,
- * because we don't know the connect flags yet.
- */
- return 0;
- }
-
- obd_getref(obd);
-
- rc = lov_connect_obd(obd, index, active, &lov->lov_ocd);
- if (rc)
- goto out;
-
- /* connect to administrative disabled ost */
- if (!tgt->ltd_exp) {
- rc = 0;
- goto out;
- }
-
- if (lov->lov_cache) {
- rc = obd_set_info_async(NULL, tgt->ltd_exp,
- sizeof(KEY_CACHE_SET), KEY_CACHE_SET,
- sizeof(struct cl_client_cache),
- lov->lov_cache, NULL);
- if (rc < 0)
- goto out;
- }
-
- rc = lov_notify(obd, tgt->ltd_exp->exp_obd,
- active ? OBD_NOTIFY_CONNECT : OBD_NOTIFY_INACTIVE,
- (void *)&index);
-
-out:
- if (rc) {
- CERROR("add failed (%d), deleting %s\n", rc,
- obd_uuid2str(&tgt->ltd_uuid));
- lov_del_target(obd, index, NULL, 0);
- }
- obd_putref(obd);
- return rc;
-}
-
-/* Schedule a target for deletion */
-int lov_del_target(struct obd_device *obd, __u32 index,
- struct obd_uuid *uuidp, int gen)
-{
- struct lov_obd *lov = &obd->u.lov;
- int count = lov->desc.ld_tgt_count;
- int rc = 0;
-
- if (index >= count) {
- CERROR("LOV target index %d >= number of LOV OBDs %d.\n",
- index, count);
- return -EINVAL;
- }
-
- /* to make sure there's no ongoing lov_notify() now */
- down_write(&lov->lov_notify_lock);
- obd_getref(obd);
-
- if (!lov->lov_tgts[index]) {
- CERROR("LOV target at index %d is not setup.\n", index);
- rc = -EINVAL;
- goto out;
- }
-
- if (uuidp && !obd_uuid_equals(uuidp, &lov->lov_tgts[index]->ltd_uuid)) {
- CERROR("LOV target UUID %s at index %d doesn't match %s.\n",
- lov_uuid2str(lov, index), index,
- obd_uuid2str(uuidp));
- rc = -EINVAL;
- goto out;
- }
-
- CDEBUG(D_CONFIG, "uuid: %s idx: %d gen: %d exp: %p active: %d\n",
- lov_uuid2str(lov, index), index,
- lov->lov_tgts[index]->ltd_gen, lov->lov_tgts[index]->ltd_exp,
- lov->lov_tgts[index]->ltd_active);
-
- lov->lov_tgts[index]->ltd_reap = 1;
- lov->lov_death_row++;
- /* we really delete it from obd_putref */
-out:
- obd_putref(obd);
- up_write(&lov->lov_notify_lock);
-
- return rc;
-}
-
-static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
-{
- struct obd_device *osc_obd;
-
- LASSERT(tgt);
- LASSERT(tgt->ltd_reap);
-
- osc_obd = class_exp2obd(tgt->ltd_exp);
-
- CDEBUG(D_CONFIG, "Removing tgt %s : %s\n",
- tgt->ltd_uuid.uuid,
- osc_obd ? osc_obd->obd_name : "<no obd>");
-
- if (tgt->ltd_exp)
- lov_disconnect_obd(obd, tgt);
-
- kfree(tgt);
-
- /* Manual cleanup - no cleanup logs to clean up the osc's. We must
- * do it ourselves. And we can't do it from lov_cleanup,
- * because we just lost our only reference to it.
- */
- if (osc_obd)
- class_manual_cleanup(osc_obd);
-}
-
-void lov_fix_desc_stripe_size(__u64 *val)
-{
- if (*val < LOV_MIN_STRIPE_SIZE) {
- if (*val != 0)
- LCONSOLE_INFO("Increasing default stripe size to minimum %u\n",
- LOV_DESC_STRIPE_SIZE_DEFAULT);
- *val = LOV_DESC_STRIPE_SIZE_DEFAULT;
- } else if (*val & (LOV_MIN_STRIPE_SIZE - 1)) {
- *val &= ~(LOV_MIN_STRIPE_SIZE - 1);
- LCONSOLE_WARN("Changing default stripe size to %llu (a multiple of %u)\n",
- *val, LOV_MIN_STRIPE_SIZE);
- }
-}
-
-void lov_fix_desc_stripe_count(__u32 *val)
-{
- if (*val == 0)
- *val = 1;
-}
-
-void lov_fix_desc_pattern(__u32 *val)
-{
- /* from lov_setstripe */
- if ((*val != 0) && (*val != LOV_PATTERN_RAID0)) {
- LCONSOLE_WARN("Unknown stripe pattern: %#x\n", *val);
- *val = 0;
- }
-}
-
-void lov_fix_desc_qos_maxage(__u32 *val)
-{
- if (*val == 0)
- *val = LOV_DESC_QOS_MAXAGE_DEFAULT;
-}
-
-void lov_fix_desc(struct lov_desc *desc)
-{
- lov_fix_desc_stripe_size(&desc->ld_default_stripe_size);
- lov_fix_desc_stripe_count(&desc->ld_default_stripe_count);
- lov_fix_desc_pattern(&desc->ld_pattern);
- lov_fix_desc_qos_maxage(&desc->ld_qos_maxage);
-}
-
-int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct lprocfs_static_vars lvars = { NULL };
- struct lov_desc *desc;
- struct lov_obd *lov = &obd->u.lov;
- int rc;
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
- CERROR("LOV setup requires a descriptor\n");
- return -EINVAL;
- }
-
- desc = (struct lov_desc *)lustre_cfg_buf(lcfg, 1);
-
- if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
- CERROR("descriptor size wrong: %d > %d\n",
- (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
- return -EINVAL;
- }
-
- if (desc->ld_magic != LOV_DESC_MAGIC) {
- if (desc->ld_magic == __swab32(LOV_DESC_MAGIC)) {
- CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n",
- obd->obd_name, desc);
- lustre_swab_lov_desc(desc);
- } else {
- CERROR("%s: Bad lov desc magic: %#x\n",
- obd->obd_name, desc->ld_magic);
- return -EINVAL;
- }
- }
-
- lov_fix_desc(desc);
-
- desc->ld_active_tgt_count = 0;
- lov->desc = *desc;
- lov->lov_tgt_size = 0;
-
- mutex_init(&lov->lov_lock);
- atomic_set(&lov->lov_refcount, 0);
- lov->lov_sp_me = LUSTRE_SP_CLI;
-
- init_rwsem(&lov->lov_notify_lock);
-
- lov->lov_pools_hash_body = cfs_hash_create("POOLS", HASH_POOLS_CUR_BITS,
- HASH_POOLS_MAX_BITS,
- HASH_POOLS_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &pool_hash_operations,
- CFS_HASH_DEFAULT);
- INIT_LIST_HEAD(&lov->lov_pool_list);
- lov->lov_pool_count = 0;
- rc = lov_ost_pool_init(&lov->lov_packed, 0);
- if (rc)
- goto out;
-
- lprocfs_lov_init_vars(&lvars);
- lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
-
- rc = ldebugfs_seq_create(obd->obd_debugfs_entry, "target_obd",
- 0444, &lov_proc_target_fops, obd);
- if (rc)
- CWARN("Error adding the target_obd file\n");
-
- lov->lov_pool_debugfs_entry = ldebugfs_register("pools",
- obd->obd_debugfs_entry,
- NULL, NULL);
- return 0;
-
-out:
- return rc;
-}
-
-static int lov_cleanup(struct obd_device *obd)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct pool_desc *pool, *tmp;
-
- list_for_each_entry_safe(pool, tmp, &lov->lov_pool_list, pool_list) {
- /* free pool structs */
- CDEBUG(D_INFO, "delete pool %p\n", pool);
- /* In the function below, .hs_keycmp resolves to
- * pool_hashkey_keycmp()
- */
- /* coverity[overrun-buffer-val] */
- lov_pool_del(obd, pool->pool_name);
- }
- cfs_hash_putref(lov->lov_pools_hash_body);
- lov_ost_pool_free(&lov->lov_packed);
-
- lprocfs_obd_cleanup(obd);
- if (lov->lov_tgts) {
- int i;
-
- obd_getref(obd);
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (!lov->lov_tgts[i])
- continue;
-
- /* Inactive targets may never have connected */
- if (lov->lov_tgts[i]->ltd_active ||
- atomic_read(&lov->lov_refcount))
- /* We should never get here - these
- * should have been removed in the
- * disconnect.
- */
- CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n",
- i, lov->lov_death_row,
- atomic_read(&lov->lov_refcount));
- lov_del_target(obd, i, NULL, 0);
- }
- obd_putref(obd);
- kfree(lov->lov_tgts);
- lov->lov_tgt_size = 0;
- }
-
- if (lov->lov_cache) {
- cl_cache_decref(lov->lov_cache);
- lov->lov_cache = NULL;
- }
-
- return 0;
-}
-
-int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
- __u32 *indexp, int *genp)
-{
- struct obd_uuid obd_uuid;
- int cmd;
- int rc = 0;
-
- switch (cmd = lcfg->lcfg_command) {
- case LCFG_LOV_ADD_OBD:
- case LCFG_LOV_ADD_INA:
- case LCFG_LOV_DEL_OBD: {
- __u32 index;
- int gen;
- /* lov_modify_tgts add 0:lov_mdsA 1:ost1_UUID 2:0 3:1 */
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) {
- rc = -EINVAL;
- goto out;
- }
-
- obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1));
-
- rc = kstrtoint(lustre_cfg_buf(lcfg, 2), 10, indexp);
- if (rc < 0)
- goto out;
- rc = kstrtoint(lustre_cfg_buf(lcfg, 3), 10, genp);
- if (rc < 0)
- goto out;
- index = *indexp;
- gen = *genp;
- if (cmd == LCFG_LOV_ADD_OBD)
- rc = lov_add_target(obd, &obd_uuid, index, gen, 1);
- else if (cmd == LCFG_LOV_ADD_INA)
- rc = lov_add_target(obd, &obd_uuid, index, gen, 0);
- else
- rc = lov_del_target(obd, index, &obd_uuid, gen);
- goto out;
- }
- case LCFG_PARAM: {
- struct lprocfs_static_vars lvars = { NULL };
- struct lov_desc *desc = &obd->u.lov.desc;
-
- if (!desc) {
- rc = -EINVAL;
- goto out;
- }
-
- lprocfs_lov_init_vars(&lvars);
-
- rc = class_process_proc_param(PARAM_LOV, lvars.obd_vars,
- lcfg, obd);
- if (rc > 0)
- rc = 0;
- goto out;
- }
- case LCFG_POOL_NEW:
- case LCFG_POOL_ADD:
- case LCFG_POOL_DEL:
- case LCFG_POOL_REM:
- goto out;
-
- default: {
- CERROR("Unknown command: %d\n", lcfg->lcfg_command);
- rc = -EINVAL;
- goto out;
- }
- }
-out:
- return rc;
-}
-
-static int
-lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc)
-{
- struct lov_request_set *lovset = (struct lov_request_set *)data;
- int err;
-
- if (rc)
- atomic_set(&lovset->set_completes, 0);
-
- err = lov_fini_statfs_set(lovset);
- return rc ? rc : err;
-}
-
-static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
- __u64 max_age, struct ptlrpc_request_set *rqset)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lov_request_set *set;
- struct lov_request *req;
- struct lov_obd *lov;
- int rc = 0;
-
- LASSERT(oinfo->oi_osfs);
-
- lov = &obd->u.lov;
- rc = lov_prep_statfs_set(obd, oinfo, &set);
- if (rc)
- return rc;
-
- list_for_each_entry(req, &set->set_list, rq_link) {
- rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
- &req->rq_oi, max_age, rqset);
- if (rc)
- break;
- }
-
- if (rc || list_empty(&rqset->set_requests)) {
- int err;
-
- if (rc)
- atomic_set(&set->set_completes, 0);
- err = lov_fini_statfs_set(set);
- return rc ? rc : err;
- }
-
- LASSERT(!rqset->set_interpret);
- rqset->set_interpret = lov_statfs_interpret;
- rqset->set_arg = (void *)set;
- return 0;
-}
-
-static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age, __u32 flags)
-{
- struct ptlrpc_request_set *set = NULL;
- struct obd_info oinfo = {
- .oi_osfs = osfs,
- .oi_flags = flags,
- };
- int rc = 0;
-
- /* for obdclass we forbid using obd_statfs_rqset, but prefer using async
- * statfs requests
- */
- set = ptlrpc_prep_set();
- if (!set)
- return -ENOMEM;
-
- rc = lov_statfs_async(exp, &oinfo, max_age, set);
- if (rc == 0)
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
-
- return rc;
-}
-
-static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void __user *uarg)
-{
- struct obd_device *obddev = class_exp2obd(exp);
- struct lov_obd *lov = &obddev->u.lov;
- int i = 0, rc = 0, count = lov->desc.ld_tgt_count;
- struct obd_uuid *uuidp;
-
- switch (cmd) {
- case IOC_OBD_STATFS: {
- struct obd_ioctl_data *data = karg;
- struct obd_device *osc_obd;
- struct obd_statfs stat_buf = {0};
- __u32 index;
- __u32 flags;
-
- memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
- if (index >= count)
- return -ENODEV;
-
- if (!lov->lov_tgts[index])
- /* Try again with the next index */
- return -EAGAIN;
- if (!lov->lov_tgts[index]->ltd_active)
- return -ENODATA;
-
- osc_obd = class_exp2obd(lov->lov_tgts[index]->ltd_exp);
- if (!osc_obd)
- return -EINVAL;
-
- /* copy UUID */
- if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
- min_t(unsigned long, data->ioc_plen2,
- sizeof(struct obd_uuid))))
- return -EFAULT;
-
- memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32));
- flags = flags & LL_STATFS_NODELAY ? OBD_STATFS_NODELAY : 0;
-
- /* got statfs data */
- rc = obd_statfs(NULL, lov->lov_tgts[index]->ltd_exp, &stat_buf,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- flags);
- if (rc)
- return rc;
- if (copy_to_user(data->ioc_pbuf1, &stat_buf,
- min_t(unsigned long, data->ioc_plen1,
- sizeof(stat_buf))))
- return -EFAULT;
- break;
- }
- case OBD_IOC_LOV_GET_CONFIG: {
- struct obd_ioctl_data *data;
- struct lov_desc *desc;
- char *buf = NULL;
- __u32 *genp;
-
- len = 0;
- if (obd_ioctl_getdata(&buf, &len, uarg))
- return -EINVAL;
-
- data = (struct obd_ioctl_data *)buf;
-
- if (sizeof(*desc) > data->ioc_inllen1) {
- kvfree(buf);
- return -EINVAL;
- }
-
- if (sizeof(uuidp->uuid) * count > data->ioc_inllen2) {
- kvfree(buf);
- return -EINVAL;
- }
-
- if (sizeof(__u32) * count > data->ioc_inllen3) {
- kvfree(buf);
- return -EINVAL;
- }
-
- desc = (struct lov_desc *)data->ioc_inlbuf1;
- memcpy(desc, &lov->desc, sizeof(*desc));
-
- uuidp = (struct obd_uuid *)data->ioc_inlbuf2;
- genp = (__u32 *)data->ioc_inlbuf3;
- /* the uuid will be empty for deleted OSTs */
- for (i = 0; i < count; i++, uuidp++, genp++) {
- if (!lov->lov_tgts[i])
- continue;
- *uuidp = lov->lov_tgts[i]->ltd_uuid;
- *genp = lov->lov_tgts[i]->ltd_gen;
- }
-
- if (copy_to_user(uarg, buf, len))
- rc = -EFAULT;
- kvfree(buf);
- break;
- }
- case OBD_IOC_QUOTACTL: {
- struct if_quotactl *qctl = karg;
- struct lov_tgt_desc *tgt = NULL;
- struct obd_quotactl *oqctl;
-
- if (qctl->qc_valid == QC_OSTIDX) {
- if (count <= qctl->qc_idx)
- return -EINVAL;
-
- tgt = lov->lov_tgts[qctl->qc_idx];
- if (!tgt || !tgt->ltd_exp)
- return -EINVAL;
- } else if (qctl->qc_valid == QC_UUID) {
- for (i = 0; i < count; i++) {
- tgt = lov->lov_tgts[i];
- if (!tgt ||
- !obd_uuid_equals(&tgt->ltd_uuid,
- &qctl->obd_uuid))
- continue;
-
- if (!tgt->ltd_exp)
- return -EINVAL;
-
- break;
- }
- } else {
- return -EINVAL;
- }
-
- if (i >= count)
- return -EAGAIN;
-
- LASSERT(tgt && tgt->ltd_exp);
- oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (!oqctl)
- return -ENOMEM;
-
- QCTL_COPY(oqctl, qctl);
- rc = obd_quotactl(tgt->ltd_exp, oqctl);
- if (rc == 0) {
- QCTL_COPY(qctl, oqctl);
- qctl->qc_valid = QC_OSTIDX;
- qctl->obd_uuid = tgt->ltd_uuid;
- }
- kfree(oqctl);
- break;
- }
- default: {
- int set = 0;
-
- if (count == 0)
- return -ENOTTY;
-
- for (i = 0; i < count; i++) {
- int err;
- struct obd_device *osc_obd;
-
- /* OST was disconnected */
- if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_exp)
- continue;
-
- /* ll_umount_begin() sets force flag but for lov, not
- * osc. Let's pass it through
- */
- osc_obd = class_exp2obd(lov->lov_tgts[i]->ltd_exp);
- osc_obd->obd_force = obddev->obd_force;
- err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
- len, karg, uarg);
- if (err) {
- if (lov->lov_tgts[i]->ltd_active) {
- CDEBUG(err == -ENOTTY ?
- D_IOCTL : D_WARNING,
- "iocontrol OSC %s on OST idx %d cmd %x: err = %d\n",
- lov_uuid2str(lov, i),
- i, cmd, err);
- if (!rc)
- rc = err;
- }
- } else {
- set = 1;
- }
- }
- if (!set && !rc)
- rc = -EIO;
- }
- }
-
- return rc;
-}
-
-static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val)
-{
- struct obd_device *obddev = class_exp2obd(exp);
- struct lov_obd *lov = &obddev->u.lov;
- struct lov_desc *ld = &lov->desc;
- int rc = 0;
-
- if (!vallen || !val)
- return -EFAULT;
-
- obd_getref(obddev);
-
- if (KEY_IS(KEY_MAX_EASIZE)) {
- u32 max_stripe_count = min_t(u32, ld->ld_active_tgt_count,
- LOV_MAX_STRIPE_COUNT);
-
- *((u32 *)val) = lov_mds_md_size(max_stripe_count, LOV_MAGIC_V3);
- } else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
- u32 def_stripe_count = min_t(u32, ld->ld_default_stripe_count,
- LOV_MAX_STRIPE_COUNT);
-
- *((u32 *)val) = lov_mds_md_size(def_stripe_count, LOV_MAGIC_V3);
- } else if (KEY_IS(KEY_TGT_COUNT)) {
- *((int *)val) = lov->desc.ld_tgt_count;
- } else {
- rc = -EINVAL;
- }
-
- obd_putref(obddev);
- return rc;
-}
-
-static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set)
-{
- struct obd_device *obddev = class_exp2obd(exp);
- struct lov_obd *lov = &obddev->u.lov;
- u32 count;
- int i, rc = 0, err;
- struct lov_tgt_desc *tgt;
- int do_inactive = 0, no_set = 0;
-
- if (!set) {
- no_set = 1;
- set = ptlrpc_prep_set();
- if (!set)
- return -ENOMEM;
- }
-
- obd_getref(obddev);
- count = lov->desc.ld_tgt_count;
-
- if (KEY_IS(KEY_CHECKSUM)) {
- do_inactive = 1;
- } else if (KEY_IS(KEY_CACHE_SET)) {
- LASSERT(!lov->lov_cache);
- lov->lov_cache = val;
- do_inactive = 1;
- cl_cache_incref(lov->lov_cache);
- }
-
- for (i = 0; i < count; i++) {
- tgt = lov->lov_tgts[i];
-
- /* OST was disconnected */
- if (!tgt || !tgt->ltd_exp)
- continue;
-
- /* OST is inactive and we don't want inactive OSCs */
- if (!tgt->ltd_active && !do_inactive)
- continue;
-
- err = obd_set_info_async(env, tgt->ltd_exp, keylen, key,
- vallen, val, set);
- if (!rc)
- rc = err;
- }
-
- obd_putref(obddev);
- if (no_set) {
- err = ptlrpc_set_wait(set);
- if (!rc)
- rc = err;
- ptlrpc_set_destroy(set);
- }
- return rc;
-}
-
-void lov_stripe_lock(struct lov_stripe_md *md)
- __acquires(&md->lsm_lock)
-{
- LASSERT(md->lsm_lock_owner != current_pid());
- spin_lock(&md->lsm_lock);
- LASSERT(md->lsm_lock_owner == 0);
- md->lsm_lock_owner = current_pid();
-}
-
-void lov_stripe_unlock(struct lov_stripe_md *md)
- __releases(&md->lsm_lock)
-{
- LASSERT(md->lsm_lock_owner == current_pid());
- md->lsm_lock_owner = 0;
- spin_unlock(&md->lsm_lock);
-}
-
-static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct lov_tgt_desc *tgt;
- __u64 curspace = 0;
- __u64 bhardlimit = 0;
- int i, rc = 0;
-
- if (oqctl->qc_cmd != Q_GETOQUOTA &&
- oqctl->qc_cmd != LUSTRE_Q_SETQUOTA) {
- CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd);
- return -EFAULT;
- }
-
- /* for lov tgt */
- obd_getref(obd);
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- int err;
-
- tgt = lov->lov_tgts[i];
-
- if (!tgt)
- continue;
-
- if (!tgt->ltd_active || tgt->ltd_reap) {
- if (oqctl->qc_cmd == Q_GETOQUOTA &&
- lov->lov_tgts[i]->ltd_activate) {
- rc = -EREMOTEIO;
- CERROR("ost %d is inactive\n", i);
- } else {
- CDEBUG(D_HA, "ost %d is inactive\n", i);
- }
- continue;
- }
-
- err = obd_quotactl(tgt->ltd_exp, oqctl);
- if (err) {
- if (tgt->ltd_active && !rc)
- rc = err;
- continue;
- }
-
- if (oqctl->qc_cmd == Q_GETOQUOTA) {
- curspace += oqctl->qc_dqblk.dqb_curspace;
- bhardlimit += oqctl->qc_dqblk.dqb_bhardlimit;
- }
- }
- obd_putref(obd);
-
- if (oqctl->qc_cmd == Q_GETOQUOTA) {
- oqctl->qc_dqblk.dqb_curspace = curspace;
- oqctl->qc_dqblk.dqb_bhardlimit = bhardlimit;
- }
- return rc;
-}
-
-static struct obd_ops lov_obd_ops = {
- .owner = THIS_MODULE,
- .setup = lov_setup,
- .cleanup = lov_cleanup,
- /*.process_config = lov_process_config,*/
- .connect = lov_connect,
- .disconnect = lov_disconnect,
- .statfs = lov_statfs,
- .statfs_async = lov_statfs_async,
- .iocontrol = lov_iocontrol,
- .get_info = lov_get_info,
- .set_info_async = lov_set_info_async,
- .notify = lov_notify,
- .pool_new = lov_pool_new,
- .pool_rem = lov_pool_remove,
- .pool_add = lov_pool_add,
- .pool_del = lov_pool_del,
- .getref = lov_getref,
- .putref = lov_putref,
- .quotactl = lov_quotactl,
-};
-
-struct kmem_cache *lov_oinfo_slab;
-
-static int __init lov_init(void)
-{
- struct lprocfs_static_vars lvars = { NULL };
- int rc;
-
- /* print an address of _any_ initialized kernel symbol from this
- * module, to allow debugging with gdb that doesn't support data
- * symbols from modules.
- */
- CDEBUG(D_INFO, "Lustre LOV module (%p).\n", &lov_caches);
-
- rc = lu_kmem_init(lov_caches);
- if (rc)
- return rc;
-
- lov_oinfo_slab = kmem_cache_create("lov_oinfo",
- sizeof(struct lov_oinfo),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!lov_oinfo_slab) {
- lu_kmem_fini(lov_caches);
- return -ENOMEM;
- }
- lprocfs_lov_init_vars(&lvars);
-
- rc = class_register_type(&lov_obd_ops, NULL,
- LUSTRE_LOV_NAME, &lov_device_type);
-
- if (rc) {
- kmem_cache_destroy(lov_oinfo_slab);
- lu_kmem_fini(lov_caches);
- }
-
- return rc;
-}
-
-static void /*__exit*/ lov_exit(void)
-{
- class_unregister_type(LUSTRE_LOV_NAME);
- kmem_cache_destroy(lov_oinfo_slab);
-
- lu_kmem_fini(lov_caches);
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Logical Object Volume");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-
-module_init(lov_init);
-module_exit(lov_exit);
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
deleted file mode 100644
index f7c69680cb7d..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ /dev/null
@@ -1,1625 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_object for LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-static inline struct lov_device *lov_object_dev(struct lov_object *obj)
-{
- return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
-}
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Layout operations.
- *
- */
-
-struct lov_layout_operations {
- int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
- struct lov_object *lov, struct lov_stripe_md *lsm,
- const struct cl_object_conf *conf,
- union lov_layout_state *state);
- int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state);
- void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state);
- void (*llo_install)(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state);
- int (*llo_print)(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o);
- int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index);
- int (*llo_lock_init)(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *io);
- int (*llo_io_init)(const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
- int (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr);
-};
-
-static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
-
-static void lov_lsm_put(struct lov_stripe_md *lsm)
-{
- if (lsm)
- lov_free_memmd(&lsm);
-}
-
-/*****************************************************************************
- *
- * Lov object layout operations.
- *
- */
-
-static void lov_install_empty(const struct lu_env *env,
- struct lov_object *lov,
- union lov_layout_state *state)
-{
- /*
- * File without objects.
- */
-}
-
-static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
- struct lov_object *lov, struct lov_stripe_md *lsm,
- const struct cl_object_conf *conf,
- union lov_layout_state *state)
-{
- return 0;
-}
-
-static void lov_install_raid0(const struct lu_env *env,
- struct lov_object *lov,
- union lov_layout_state *state)
-{
-}
-
-static struct cl_object *lov_sub_find(const struct lu_env *env,
- struct cl_device *dev,
- const struct lu_fid *fid,
- const struct cl_object_conf *conf)
-{
- struct lu_object *o;
-
- o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
- LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
- return lu2cl(o);
-}
-
-static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
- struct cl_object *stripe, struct lov_layout_raid0 *r0,
- int idx)
-{
- struct cl_object_header *hdr;
- struct cl_object_header *subhdr;
- struct cl_object_header *parent;
- struct lov_oinfo *oinfo;
- int result;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
- /* For sanity:test_206.
- * Do not leave the object in cache to avoid accessing
- * freed memory. This is because osc_object is referring to
- * lov_oinfo of lsm_stripe_data which will be freed due to
- * this failure.
- */
- cl_object_kill(env, stripe);
- cl_object_put(env, stripe);
- return -EIO;
- }
-
- hdr = cl_object_header(lov2cl(lov));
- subhdr = cl_object_header(stripe);
-
- oinfo = lov->lo_lsm->lsm_oinfo[idx];
- CDEBUG(D_INODE, DFID "@%p[%d] -> " DFID "@%p: ostid: " DOSTID " idx: %d gen: %d\n",
- PFID(&subhdr->coh_lu.loh_fid), subhdr, idx,
- PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi),
- oinfo->loi_ost_idx, oinfo->loi_ost_gen);
-
- /* reuse ->coh_attr_guard to protect coh_parent change */
- spin_lock(&subhdr->coh_attr_guard);
- parent = subhdr->coh_parent;
- if (!parent) {
- subhdr->coh_parent = hdr;
- spin_unlock(&subhdr->coh_attr_guard);
- subhdr->coh_nesting = hdr->coh_nesting + 1;
- lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
- r0->lo_sub[idx] = cl2lovsub(stripe);
- r0->lo_sub[idx]->lso_super = lov;
- r0->lo_sub[idx]->lso_index = idx;
- result = 0;
- } else {
- struct lu_object *old_obj;
- struct lov_object *old_lov;
- unsigned int mask = D_INODE;
-
- spin_unlock(&subhdr->coh_attr_guard);
- old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
- LASSERT(old_obj);
- old_lov = cl2lov(lu2cl(old_obj));
- if (old_lov->lo_layout_invalid) {
- /* the object's layout has already changed but isn't
- * refreshed
- */
- lu_object_unhash(env, &stripe->co_lu);
- result = -EAGAIN;
- } else {
- mask = D_ERROR;
- result = -EIO;
- }
-
- LU_OBJECT_DEBUG(mask, env, &stripe->co_lu,
- "stripe %d is already owned.", idx);
- LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
- LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
- cl_object_put(env, stripe);
- }
- return result;
-}
-
-static int lov_page_slice_fixup(struct lov_object *lov,
- struct cl_object *stripe)
-{
- struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
- struct cl_object *o;
-
- if (!stripe)
- return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off -
- cfs_size_round(sizeof(struct lov_page));
-
- cl_object_for_each(o, stripe)
- o->co_slice_off += hdr->coh_page_bufsize;
-
- return cl_object_header(stripe)->coh_page_bufsize;
-}
-
-static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
- struct lov_object *lov, struct lov_stripe_md *lsm,
- const struct cl_object_conf *conf,
- union lov_layout_state *state)
-{
- int result;
- int i;
-
- struct cl_object *stripe;
- struct lov_thread_info *lti = lov_env_info(env);
- struct cl_object_conf *subconf = &lti->lti_stripe_conf;
- struct lu_fid *ofid = &lti->lti_fid;
- struct lov_layout_raid0 *r0 = &state->raid0;
-
- if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
- dump_lsm(D_ERROR, lsm);
- LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n",
- LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic);
- }
-
- LASSERT(!lov->lo_lsm);
- lov->lo_lsm = lsm_addref(lsm);
- lov->lo_layout_invalid = true;
- r0->lo_nr = lsm->lsm_stripe_count;
- LASSERT(r0->lo_nr <= lov_targets_nr(dev));
-
- r0->lo_sub = kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]),
- GFP_NOFS);
- if (r0->lo_sub) {
- int psz = 0;
-
- result = 0;
- subconf->coc_inode = conf->coc_inode;
- spin_lock_init(&r0->lo_sub_lock);
- /*
- * Create stripe cl_objects.
- */
- for (i = 0; i < r0->lo_nr && result == 0; ++i) {
- struct cl_device *subdev;
- struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
- int ost_idx = oinfo->loi_ost_idx;
-
- if (lov_oinfo_is_dummy(oinfo))
- continue;
-
- result = ostid_to_fid(ofid, &oinfo->loi_oi,
- oinfo->loi_ost_idx);
- if (result != 0)
- goto out;
-
- if (!dev->ld_target[ost_idx]) {
- CERROR("%s: OST %04x is not initialized\n",
- lov2obd(dev->ld_lov)->obd_name, ost_idx);
- result = -EIO;
- goto out;
- }
-
- subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
- subconf->u.coc_oinfo = oinfo;
- LASSERTF(subdev, "not init ost %d\n", ost_idx);
- /* In the function below, .hs_keycmp resolves to
- * lu_obj_hop_keycmp()
- */
- /* coverity[overrun-buffer-val] */
- stripe = lov_sub_find(env, subdev, ofid, subconf);
- if (!IS_ERR(stripe)) {
- result = lov_init_sub(env, lov, stripe, r0, i);
- if (result == -EAGAIN) { /* try again */
- --i;
- result = 0;
- continue;
- }
- } else {
- result = PTR_ERR(stripe);
- }
-
- if (result == 0) {
- int sz = lov_page_slice_fixup(lov, stripe);
-
- LASSERT(ergo(psz > 0, psz == sz));
- psz = sz;
- }
- }
- if (result == 0)
- cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
- } else {
- result = -ENOMEM;
- }
-out:
- return result;
-}
-
-static int lov_init_released(const struct lu_env *env, struct lov_device *dev,
- struct lov_object *lov, struct lov_stripe_md *lsm,
- const struct cl_object_conf *conf,
- union lov_layout_state *state)
-{
- LASSERT(lsm);
- LASSERT(lsm_is_released(lsm));
- LASSERT(!lov->lo_lsm);
-
- lov->lo_lsm = lsm_addref(lsm);
- return 0;
-}
-
-static struct cl_object *lov_find_subobj(const struct lu_env *env,
- struct lov_object *lov,
- struct lov_stripe_md *lsm,
- int stripe_idx)
-{
- struct lov_device *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
- struct lov_oinfo *oinfo = lsm->lsm_oinfo[stripe_idx];
- struct lov_thread_info *lti = lov_env_info(env);
- struct lu_fid *ofid = &lti->lti_fid;
- struct cl_device *subdev;
- struct cl_object *result;
- int ost_idx;
- int rc;
-
- if (lov->lo_type != LLT_RAID0) {
- result = NULL;
- goto out;
- }
-
- ost_idx = oinfo->loi_ost_idx;
- rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
- if (rc) {
- result = NULL;
- goto out;
- }
-
- subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
- result = lov_sub_find(env, subdev, ofid, NULL);
-out:
- if (!result)
- result = ERR_PTR(-EINVAL);
- return result;
-}
-
-static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state)
-{
- LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
-
- lov_layout_wait(env, lov);
- return 0;
-}
-
-static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
- struct lovsub_object *los, int idx)
-{
- struct cl_object *sub;
- struct lov_layout_raid0 *r0;
- struct lu_site *site;
- struct lu_site_bkt_data *bkt;
- wait_queue_entry_t *waiter;
-
- r0 = &lov->u.raid0;
- LASSERT(r0->lo_sub[idx] == los);
-
- sub = lovsub2cl(los);
- site = sub->co_lu.lo_dev->ld_site;
- bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
-
- cl_object_kill(env, sub);
- /* release a reference to the sub-object and ... */
- lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
- cl_object_put(env, sub);
-
- /* ... wait until it is actually destroyed---sub-object clears its
- * ->lo_sub[] slot in lovsub_object_fini()
- */
- if (r0->lo_sub[idx] == los) {
- waiter = &lov_env_info(env)->lti_waiter;
- init_waitqueue_entry(waiter, current);
- add_wait_queue(&bkt->lsb_marche_funebre, waiter);
- set_current_state(TASK_UNINTERRUPTIBLE);
- while (1) {
- /* this wait-queue is signaled at the end of
- * lu_object_free().
- */
- set_current_state(TASK_UNINTERRUPTIBLE);
- spin_lock(&r0->lo_sub_lock);
- if (r0->lo_sub[idx] == los) {
- spin_unlock(&r0->lo_sub_lock);
- schedule();
- } else {
- spin_unlock(&r0->lo_sub_lock);
- set_current_state(TASK_RUNNING);
- break;
- }
- }
- remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
- }
- LASSERT(!r0->lo_sub[idx]);
-}
-
-static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state)
-{
- struct lov_layout_raid0 *r0 = &state->raid0;
- struct lov_stripe_md *lsm = lov->lo_lsm;
- int i;
-
- dump_lsm(D_INODE, lsm);
-
- lov_layout_wait(env, lov);
- if (r0->lo_sub) {
- for (i = 0; i < r0->lo_nr; ++i) {
- struct lovsub_object *los = r0->lo_sub[i];
-
- if (los) {
- cl_object_prune(env, &los->lso_cl);
- /*
- * If top-level object is to be evicted from
- * the cache, so are its sub-objects.
- */
- lov_subobject_kill(env, lov, los, i);
- }
- }
- }
- return 0;
-}
-
-static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state)
-{
- LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
-}
-
-static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state)
-{
- struct lov_layout_raid0 *r0 = &state->raid0;
-
- if (r0->lo_sub) {
- kvfree(r0->lo_sub);
- r0->lo_sub = NULL;
- }
-
- dump_lsm(D_INODE, lov->lo_lsm);
- lov_free_memmd(&lov->lo_lsm);
-}
-
-static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state)
-{
- dump_lsm(D_INODE, lov->lo_lsm);
- lov_free_memmd(&lov->lo_lsm);
-}
-
-static int lov_print_empty(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
- return 0;
-}
-
-static int lov_print_raid0(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- struct lov_object *lov = lu2lov(o);
- struct lov_layout_raid0 *r0 = lov_r0(lov);
- struct lov_stripe_md *lsm = lov->lo_lsm;
- int i;
-
- (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n",
- r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm,
- lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
- lsm->lsm_stripe_count, lsm->lsm_layout_gen);
- for (i = 0; i < r0->lo_nr; ++i) {
- struct lu_object *sub;
-
- if (r0->lo_sub[i]) {
- sub = lovsub2lu(r0->lo_sub[i]);
- lu_object_print(env, cookie, p, sub);
- } else {
- (*p)(env, cookie, "sub %d absent\n", i);
- }
- }
- return 0;
-}
-
-static int lov_print_released(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- struct lov_object *lov = lu2lov(o);
- struct lov_stripe_md *lsm = lov->lo_lsm;
-
- (*p)(env, cookie,
- "released: %s, lsm{%p 0x%08X %d %u %u}:\n",
- lov->lo_layout_invalid ? "invalid" : "valid", lsm,
- lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
- lsm->lsm_stripe_count, lsm->lsm_layout_gen);
- return 0;
-}
-
-/**
- * Implements cl_object_operations::coo_attr_get() method for an object
- * without stripes (LLT_EMPTY layout type).
- *
- * The only attributes this layer is authoritative in this case is
- * cl_attr::cat_blocks---it's 0.
- */
-static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- attr->cat_blocks = 0;
- return 0;
-}
-
-static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- struct lov_object *lov = cl2lov(obj);
- struct lov_layout_raid0 *r0 = lov_r0(lov);
- struct cl_attr *lov_attr = &r0->lo_attr;
- int result = 0;
-
- /* this is called w/o holding type guard mutex, so it must be inside
- * an on going IO otherwise lsm may be replaced.
- * LU-2117: it turns out there exists one exception. For mmaped files,
- * the lock of those files may be requested in the other file's IO
- * context, and this function is called in ccc_lock_state(), it will
- * hit this assertion.
- * Anyway, it's still okay to call attr_get w/o type guard as layout
- * can't go if locks exist.
- */
- /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */
-
- if (!r0->lo_attr_valid) {
- struct lov_stripe_md *lsm = lov->lo_lsm;
- struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
- __u64 kms = 0;
-
- memset(lvb, 0, sizeof(*lvb));
- /* XXX: timestamps can be negative by sanity:test_39m,
- * how can it be?
- */
- lvb->lvb_atime = LLONG_MIN;
- lvb->lvb_ctime = LLONG_MIN;
- lvb->lvb_mtime = LLONG_MIN;
-
- /*
- * XXX that should be replaced with a loop over sub-objects,
- * doing cl_object_attr_get() on them. But for now, let's
- * reuse old lov code.
- */
-
- /*
- * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
- * happy. It's not needed, because new code uses
- * ->coh_attr_guard spin-lock to protect consistency of
- * sub-object attributes.
- */
- lov_stripe_lock(lsm);
- result = lov_merge_lvb_kms(lsm, lvb, &kms);
- lov_stripe_unlock(lsm);
- if (result == 0) {
- cl_lvb2attr(lov_attr, lvb);
- lov_attr->cat_kms = kms;
- r0->lo_attr_valid = 1;
- }
- }
- if (result == 0) { /* merge results */
- attr->cat_blocks = lov_attr->cat_blocks;
- attr->cat_size = lov_attr->cat_size;
- attr->cat_kms = lov_attr->cat_kms;
- if (attr->cat_atime < lov_attr->cat_atime)
- attr->cat_atime = lov_attr->cat_atime;
- if (attr->cat_ctime < lov_attr->cat_ctime)
- attr->cat_ctime = lov_attr->cat_ctime;
- if (attr->cat_mtime < lov_attr->cat_mtime)
- attr->cat_mtime = lov_attr->cat_mtime;
- }
- return result;
-}
-
-static const struct lov_layout_operations lov_dispatch[] = {
- [LLT_EMPTY] = {
- .llo_init = lov_init_empty,
- .llo_delete = lov_delete_empty,
- .llo_fini = lov_fini_empty,
- .llo_install = lov_install_empty,
- .llo_print = lov_print_empty,
- .llo_page_init = lov_page_init_empty,
- .llo_lock_init = lov_lock_init_empty,
- .llo_io_init = lov_io_init_empty,
- .llo_getattr = lov_attr_get_empty
- },
- [LLT_RAID0] = {
- .llo_init = lov_init_raid0,
- .llo_delete = lov_delete_raid0,
- .llo_fini = lov_fini_raid0,
- .llo_install = lov_install_raid0,
- .llo_print = lov_print_raid0,
- .llo_page_init = lov_page_init_raid0,
- .llo_lock_init = lov_lock_init_raid0,
- .llo_io_init = lov_io_init_raid0,
- .llo_getattr = lov_attr_get_raid0
- },
- [LLT_RELEASED] = {
- .llo_init = lov_init_released,
- .llo_delete = lov_delete_empty,
- .llo_fini = lov_fini_released,
- .llo_install = lov_install_empty,
- .llo_print = lov_print_released,
- .llo_page_init = lov_page_init_empty,
- .llo_lock_init = lov_lock_init_empty,
- .llo_io_init = lov_io_init_released,
- .llo_getattr = lov_attr_get_empty
- }
-};
-
-/**
- * Performs a double-dispatch based on the layout type of an object.
- */
-#define LOV_2DISPATCH_NOLOCK(obj, op, ...) \
-({ \
- struct lov_object *__obj = (obj); \
- enum lov_layout_type __llt; \
- \
- __llt = __obj->lo_type; \
- LASSERT(__llt < ARRAY_SIZE(lov_dispatch)); \
- lov_dispatch[__llt].op(__VA_ARGS__); \
-})
-
-/**
- * Return lov_layout_type associated with a given lsm
- */
-static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
-{
- if (!lsm)
- return LLT_EMPTY;
- if (lsm_is_released(lsm))
- return LLT_RELEASED;
- return LLT_RAID0;
-}
-
-static inline void lov_conf_freeze(struct lov_object *lov)
-{
- CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n",
- lov, lov->lo_owner, current);
- if (lov->lo_owner != current)
- down_read(&lov->lo_type_guard);
-}
-
-static inline void lov_conf_thaw(struct lov_object *lov)
-{
- CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n",
- lov, lov->lo_owner, current);
- if (lov->lo_owner != current)
- up_read(&lov->lo_type_guard);
-}
-
-#define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \
-({ \
- struct lov_object *__obj = (obj); \
- int __lock = !!(lock); \
- typeof(lov_dispatch[0].op(__VA_ARGS__)) __result; \
- \
- if (__lock) \
- lov_conf_freeze(__obj); \
- __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__); \
- if (__lock) \
- lov_conf_thaw(__obj); \
- __result; \
-})
-
-/**
- * Performs a locked double-dispatch based on the layout type of an object.
- */
-#define LOV_2DISPATCH(obj, op, ...) \
- LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
-
-#define LOV_2DISPATCH_VOID(obj, op, ...) \
-do { \
- struct lov_object *__obj = (obj); \
- enum lov_layout_type __llt; \
- \
- lov_conf_freeze(__obj); \
- __llt = __obj->lo_type; \
- LASSERT(__llt < ARRAY_SIZE(lov_dispatch)); \
- lov_dispatch[__llt].op(__VA_ARGS__); \
- lov_conf_thaw(__obj); \
-} while (0)
-
-static void lov_conf_lock(struct lov_object *lov)
-{
- LASSERT(lov->lo_owner != current);
- down_write(&lov->lo_type_guard);
- LASSERT(!lov->lo_owner);
- lov->lo_owner = current;
- CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n",
- lov, lov->lo_owner);
-}
-
-static void lov_conf_unlock(struct lov_object *lov)
-{
- CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n",
- lov, lov->lo_owner);
- lov->lo_owner = NULL;
- up_write(&lov->lo_type_guard);
-}
-
-static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
-{
- while (atomic_read(&lov->lo_active_ios) > 0) {
- CDEBUG(D_INODE, "file:" DFID " wait for active IO, now: %d.\n",
- PFID(lu_object_fid(lov2lu(lov))),
- atomic_read(&lov->lo_active_ios));
-
- wait_event_idle(lov->lo_waitq,
- atomic_read(&lov->lo_active_ios) == 0);
- }
- return 0;
-}
-
-static int lov_layout_change(const struct lu_env *unused,
- struct lov_object *lov, struct lov_stripe_md *lsm,
- const struct cl_object_conf *conf)
-{
- struct lov_device *lov_dev = lov_object_dev(lov);
- enum lov_layout_type llt = lov_type(lsm);
- union lov_layout_state *state = &lov->u;
- const struct lov_layout_operations *old_ops;
- const struct lov_layout_operations *new_ops;
- struct lu_env *env;
- u16 refcheck;
- int rc;
-
- LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch));
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- LASSERT(llt < ARRAY_SIZE(lov_dispatch));
-
- CDEBUG(D_INODE, DFID " from %s to %s\n",
- PFID(lu_object_fid(lov2lu(lov))),
- llt2str(lov->lo_type), llt2str(llt));
-
- old_ops = &lov_dispatch[lov->lo_type];
- new_ops = &lov_dispatch[llt];
-
- rc = cl_object_prune(env, &lov->lo_cl);
- if (rc)
- goto out;
-
- rc = old_ops->llo_delete(env, lov, &lov->u);
- if (rc)
- goto out;
-
- old_ops->llo_fini(env, lov, &lov->u);
-
- LASSERT(!atomic_read(&lov->lo_active_ios));
-
- CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n",
- PFID(lu_object_fid(lov2lu(lov))), lov, llt);
-
- lov->lo_type = LLT_EMPTY;
-
- /* page bufsize fixup */
- cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
- lov_page_slice_fixup(lov, NULL);
-
- rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state);
- if (rc) {
- struct obd_device *obd = lov2obd(lov_dev->ld_lov);
-
- CERROR("%s: cannot apply new layout on " DFID " : rc = %d\n",
- obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc);
- new_ops->llo_delete(env, lov, state);
- new_ops->llo_fini(env, lov, state);
- /* this file becomes an EMPTY file. */
- goto out;
- }
-
- new_ops->llo_install(env, lov, state);
- lov->lo_type = llt;
-out:
- cl_env_put(env, &refcheck);
- return rc;
-}
-
-/*****************************************************************************
- *
- * Lov object operations.
- *
- */
-int lov_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct lov_object *lov = lu2lov(obj);
- struct lov_device *dev = lov_object_dev(lov);
- const struct cl_object_conf *cconf = lu2cl_conf(conf);
- union lov_layout_state *set = &lov->u;
- const struct lov_layout_operations *ops;
- struct lov_stripe_md *lsm = NULL;
- int rc;
-
- init_rwsem(&lov->lo_type_guard);
- atomic_set(&lov->lo_active_ios, 0);
- init_waitqueue_head(&lov->lo_waitq);
- cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
-
- lov->lo_type = LLT_EMPTY;
- if (cconf->u.coc_layout.lb_buf) {
- lsm = lov_unpackmd(dev->ld_lov,
- cconf->u.coc_layout.lb_buf,
- cconf->u.coc_layout.lb_len);
- if (IS_ERR(lsm))
- return PTR_ERR(lsm);
- }
-
- /* no locking is necessary, as object is being created */
- lov->lo_type = lov_type(lsm);
- ops = &lov_dispatch[lov->lo_type];
- rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
- if (!rc)
- ops->llo_install(env, lov, set);
-
- lov_lsm_put(lsm);
-
- return rc;
-}
-
-static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf)
-{
- struct lov_stripe_md *lsm = NULL;
- struct lov_object *lov = cl2lov(obj);
- int result = 0;
-
- if (conf->coc_opc == OBJECT_CONF_SET &&
- conf->u.coc_layout.lb_buf) {
- lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
- conf->u.coc_layout.lb_buf,
- conf->u.coc_layout.lb_len);
- if (IS_ERR(lsm))
- return PTR_ERR(lsm);
- }
-
- lov_conf_lock(lov);
- if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
- lov->lo_layout_invalid = true;
- result = 0;
- goto out;
- }
-
- if (conf->coc_opc == OBJECT_CONF_WAIT) {
- if (lov->lo_layout_invalid &&
- atomic_read(&lov->lo_active_ios) > 0) {
- lov_conf_unlock(lov);
- result = lov_layout_wait(env, lov);
- lov_conf_lock(lov);
- }
- goto out;
- }
-
- LASSERT(conf->coc_opc == OBJECT_CONF_SET);
-
- if ((!lsm && !lov->lo_lsm) ||
- ((lsm && lov->lo_lsm) &&
- (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
- (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) {
- /* same version of layout */
- lov->lo_layout_invalid = false;
- result = 0;
- goto out;
- }
-
- /* will change layout - check if there still exists active IO. */
- if (atomic_read(&lov->lo_active_ios) > 0) {
- lov->lo_layout_invalid = true;
- result = -EBUSY;
- goto out;
- }
-
- result = lov_layout_change(env, lov, lsm, conf);
- lov->lo_layout_invalid = result != 0;
-
-out:
- lov_conf_unlock(lov);
- lov_lsm_put(lsm);
- CDEBUG(D_INODE, DFID " lo_layout_invalid=%d\n",
- PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
- return result;
-}
-
-static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
-{
- struct lov_object *lov = lu2lov(obj);
-
- LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
-}
-
-static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct lov_object *lov = lu2lov(obj);
-
- LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
- lu_object_fini(obj);
- kmem_cache_free(lov_object_kmem, lov);
-}
-
-static int lov_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
-}
-
-int lov_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
-{
- return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
- index);
-}
-
-/**
- * Implements cl_object_operations::clo_io_init() method for lov
- * layer. Dispatches to the appropriate layout io initialization method.
- */
-int lov_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- CL_IO_SLICE_CLEAN(lov_env_io(env), lis_cl);
-
- CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n",
- PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type,
- io->ci_ignore_layout, io->ci_verify_layout);
-
- return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
- !io->ci_ignore_layout, env, obj, io);
-}
-
-/**
- * An implementation of cl_object_operations::clo_attr_get() method for lov
- * layer. For raid0 layout this collects and merges attributes of all
- * sub-objects.
- */
-static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- /* do not take lock, as this function is called under a
- * spin-lock. Layout is protected from changing by ongoing IO.
- */
- return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
-}
-
-static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned int valid)
-{
- /*
- * No dispatch is required here, as no layout implements this.
- */
- return 0;
-}
-
-int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
-{
- /* No need to lock because we've taken one refcount of layout. */
- return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
- io);
-}
-
-/**
- * We calculate on which OST the mapping will end. If the length of mapping
- * is greater than (stripe_size * stripe_count) then the last_stripe will
- * will be one just before start_stripe. Else we check if the mapping
- * intersects each OST and find last_stripe.
- * This function returns the last_stripe and also sets the stripe_count
- * over which the mapping is spread
- *
- * \param lsm [in] striping information for the file
- * \param fm_start [in] logical start of mapping
- * \param fm_end [in] logical end of mapping
- * \param start_stripe [in] starting stripe of the mapping
- * \param stripe_count [out] the number of stripes across which to map is
- * returned
- *
- * \retval last_stripe return the last stripe of the mapping
- */
-static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm,
- u64 fm_start, u64 fm_end,
- int start_stripe, int *stripe_count)
-{
- int last_stripe;
- u64 obd_start;
- u64 obd_end;
- int i, j;
-
- if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
- last_stripe = (start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
- start_stripe - 1);
- *stripe_count = lsm->lsm_stripe_count;
- } else {
- for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
- i = (i + 1) % lsm->lsm_stripe_count, j++) {
- if (!(lov_stripe_intersects(lsm, i, fm_start, fm_end,
- &obd_start, &obd_end)))
- break;
- }
- *stripe_count = j;
- last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
- }
-
- return last_stripe;
-}
-
-/**
- * Set fe_device and copy extents from local buffer into main return buffer.
- *
- * \param fiemap [out] fiemap to hold all extents
- * \param lcl_fm_ext [in] array of fiemap extents get from OSC layer
- * \param ost_index [in] OST index to be written into the fm_device
- * field for each extent
- * \param ext_count [in] number of extents to be copied
- * \param current_extent [in] where to start copying in the extent array
- */
-static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
- struct fiemap_extent *lcl_fm_ext,
- int ost_index, unsigned int ext_count,
- int current_extent)
-{
- unsigned int ext;
- char *to;
-
- for (ext = 0; ext < ext_count; ext++) {
- lcl_fm_ext[ext].fe_device = ost_index;
- lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
- }
-
- /* Copy fm_extent's from fm_local to return buffer */
- to = (char *)fiemap + fiemap_count_to_size(current_extent);
- memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
-}
-
-#define FIEMAP_BUFFER_SIZE 4096
-
-/**
- * Non-zero fe_logical indicates that this is a continuation FIEMAP
- * call. The local end offset and the device are sent in the first
- * fm_extent. This function calculates the stripe number from the index.
- * This function returns a stripe_no on which mapping is to be restarted.
- *
- * This function returns fm_end_offset which is the in-OST offset at which
- * mapping should be restarted. If fm_end_offset=0 is returned then caller
- * will re-calculate proper offset in next stripe.
- * Note that the first extent is passed to lov_get_info via the value field.
- *
- * \param fiemap [in] fiemap request header
- * \param lsm [in] striping information for the file
- * \param fm_start [in] logical start of mapping
- * \param fm_end [in] logical end of mapping
- * \param start_stripe [out] starting stripe will be returned in this
- */
-static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap,
- struct lov_stripe_md *lsm,
- u64 fm_start, u64 fm_end,
- int *start_stripe)
-{
- u64 local_end = fiemap->fm_extents[0].fe_logical;
- u64 lun_start, lun_end;
- u64 fm_end_offset;
- int stripe_no = -1;
- int i;
-
- if (!fiemap->fm_extent_count || !fiemap->fm_extents[0].fe_logical)
- return 0;
-
- /* Find out stripe_no from ost_index saved in the fe_device */
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
-
- if (lov_oinfo_is_dummy(oinfo))
- continue;
-
- if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
- stripe_no = i;
- break;
- }
- }
-
- if (stripe_no == -1)
- return -EINVAL;
-
- /*
- * If we have finished mapping on previous device, shift logical
- * offset to start of next device
- */
- if (lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
- &lun_start, &lun_end) &&
- local_end < lun_end) {
- fm_end_offset = local_end;
- *start_stripe = stripe_no;
- } else {
- /* This is a special value to indicate that caller should
- * calculate offset in next stripe.
- */
- fm_end_offset = 0;
- *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
- }
-
- return fm_end_offset;
-}
-
-struct fiemap_state {
- struct fiemap *fs_fm;
- u64 fs_start;
- u64 fs_length;
- u64 fs_end;
- u64 fs_end_offset;
- int fs_cur_extent;
- int fs_cnt_need;
- int fs_start_stripe;
- int fs_last_stripe;
- bool fs_device_done;
- bool fs_finish;
- bool fs_enough;
-};
-
-static int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
- struct lov_stripe_md *lsm,
- struct fiemap *fiemap, size_t *buflen,
- struct ll_fiemap_info_key *fmkey, int stripeno,
- struct fiemap_state *fs)
-{
- struct cl_object *subobj;
- struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
- struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
- u64 req_fm_len; /* Stores length of required mapping */
- u64 len_mapped_single_call;
- u64 lun_start;
- u64 lun_end;
- u64 obd_object_end;
- unsigned int ext_count;
- /* EOF for object */
- bool ost_eof = false;
- /* done with required mapping for this OST? */
- bool ost_done = false;
- int ost_index;
- int rc = 0;
-
- fs->fs_device_done = false;
- /* Find out range of mapping on this stripe */
- if ((lov_stripe_intersects(lsm, stripeno, fs->fs_start, fs->fs_end,
- &lun_start, &obd_object_end)) == 0)
- return 0;
-
- if (lov_oinfo_is_dummy(lsm->lsm_oinfo[stripeno]))
- return -EIO;
-
- /* If this is a continuation FIEMAP call and we are on
- * starting stripe then lun_start needs to be set to
- * end_offset
- */
- if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
- lun_start = fs->fs_end_offset;
-
- lun_end = fs->fs_length;
- if (lun_end != ~0ULL) {
- /* Handle fs->fs_start + fs->fs_length overflow */
- if (fs->fs_start + fs->fs_length < fs->fs_start)
- fs->fs_length = ~0ULL - fs->fs_start;
- lun_end = lov_size_to_stripe(lsm, fs->fs_start + fs->fs_length,
- stripeno);
- }
-
- if (lun_start == lun_end)
- return 0;
-
- req_fm_len = obd_object_end - lun_start;
- fs->fs_fm->fm_length = 0;
- len_mapped_single_call = 0;
-
- /* find lobsub object */
- subobj = lov_find_subobj(env, cl2lov(obj), lsm, stripeno);
- if (IS_ERR(subobj))
- return PTR_ERR(subobj);
- /* If the output buffer is very large and the objects have many
- * extents we may need to loop on a single OST repeatedly
- */
- do {
- if (fiemap->fm_extent_count > 0) {
- /* Don't get too many extents. */
- if (fs->fs_cur_extent + fs->fs_cnt_need >
- fiemap->fm_extent_count)
- fs->fs_cnt_need = fiemap->fm_extent_count -
- fs->fs_cur_extent;
- }
-
- lun_start += len_mapped_single_call;
- fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
- req_fm_len = fs->fs_fm->fm_length;
- fs->fs_fm->fm_extent_count = fs->fs_enough ?
- 1 : fs->fs_cnt_need;
- fs->fs_fm->fm_mapped_extents = 0;
- fs->fs_fm->fm_flags = fiemap->fm_flags;
-
- ost_index = lsm->lsm_oinfo[stripeno]->loi_ost_idx;
-
- if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count) {
- rc = -EINVAL;
- goto obj_put;
- }
- /* If OST is inactive, return extent with UNKNOWN flag. */
- if (!lov->lov_tgts[ost_index]->ltd_active) {
- fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST;
- fs->fs_fm->fm_mapped_extents = 1;
-
- fm_ext[0].fe_logical = lun_start;
- fm_ext[0].fe_length = obd_object_end - lun_start;
- fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
-
- goto inactive_tgt;
- }
-
- fs->fs_fm->fm_start = lun_start;
- fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
- memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
- *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
-
- rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen);
- if (rc)
- goto obj_put;
-inactive_tgt:
- ext_count = fs->fs_fm->fm_mapped_extents;
- if (ext_count == 0) {
- ost_done = true;
- fs->fs_device_done = true;
- /* If last stripe has hold at the end,
- * we need to return
- */
- if (stripeno == fs->fs_last_stripe) {
- fiemap->fm_mapped_extents = 0;
- fs->fs_finish = true;
- goto obj_put;
- }
- break;
- } else if (fs->fs_enough) {
- /*
- * We've collected enough extents and there are
- * more extents after it.
- */
- fs->fs_finish = true;
- goto obj_put;
- }
-
- /* If we just need num of extents, got to next device */
- if (fiemap->fm_extent_count == 0) {
- fs->fs_cur_extent += ext_count;
- break;
- }
-
- /* prepare to copy retrived map extents */
- len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
- fm_ext[ext_count - 1].fe_length -
- lun_start;
-
- /* Have we finished mapping on this device? */
- if (req_fm_len <= len_mapped_single_call) {
- ost_done = true;
- fs->fs_device_done = true;
- }
-
- /* Clear the EXTENT_LAST flag which can be present on
- * the last extent
- */
- if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST)
- fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST;
- if (lov_stripe_size(lsm, fm_ext[ext_count - 1].fe_logical +
- fm_ext[ext_count - 1].fe_length,
- stripeno) >= fmkey->lfik_oa.o_size) {
- ost_eof = true;
- fs->fs_device_done = true;
- }
-
- fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
- ext_count, fs->fs_cur_extent);
- fs->fs_cur_extent += ext_count;
-
- /* Ran out of available extents? */
- if (fs->fs_cur_extent >= fiemap->fm_extent_count)
- fs->fs_enough = true;
- } while (!ost_done && !ost_eof);
-
- if (stripeno == fs->fs_last_stripe)
- fs->fs_finish = true;
-obj_put:
- cl_object_put(env, subobj);
-
- return rc;
-}
-
-/**
- * Break down the FIEMAP request and send appropriate calls to individual OSTs.
- * This also handles the restarting of FIEMAP calls in case mapping overflows
- * the available number of extents in single call.
- *
- * \param env [in] lustre environment
- * \param obj [in] file object
- * \param fmkey [in] fiemap request header and other info
- * \param fiemap [out] fiemap buffer holding retrived map extents
- * \param buflen [in/out] max buffer length of @fiemap, when iterate
- * each OST, it is used to limit max map needed
- * \retval 0 success
- * \retval < 0 error
- */
-static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
- struct ll_fiemap_info_key *fmkey,
- struct fiemap *fiemap, size_t *buflen)
-{
- unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
- struct fiemap *fm_local = NULL;
- struct lov_stripe_md *lsm;
- int rc = 0;
- int cur_stripe;
- int stripe_count;
- struct fiemap_state fs = { NULL };
-
- lsm = lov_lsm_addref(cl2lov(obj));
- if (!lsm)
- return -ENODATA;
-
- /**
- * If the stripe_count > 1 and the application does not understand
- * DEVICE_ORDER flag, it cannot interpret the extents correctly.
- */
- if (lsm->lsm_stripe_count > 1 &&
- !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
- rc = -ENOTSUPP;
- goto out;
- }
-
- if (lsm_is_released(lsm)) {
- if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
- /**
- * released file, return a minimal FIEMAP if
- * request fits in file-size.
- */
- fiemap->fm_mapped_extents = 1;
- fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
- if (fiemap->fm_start + fiemap->fm_length <
- fmkey->lfik_oa.o_size)
- fiemap->fm_extents[0].fe_length =
- fiemap->fm_length;
- else
- fiemap->fm_extents[0].fe_length =
- fmkey->lfik_oa.o_size -
- fiemap->fm_start;
- fiemap->fm_extents[0].fe_flags |=
- FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
- }
- rc = 0;
- goto out;
- }
-
- if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
- buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
-
- fm_local = kvzalloc(buffer_size, GFP_NOFS);
- if (!fm_local) {
- rc = -ENOMEM;
- goto out;
- }
- fs.fs_fm = fm_local;
- fs.fs_cnt_need = fiemap_size_to_count(buffer_size);
-
- fs.fs_start = fiemap->fm_start;
- /* fs_start is beyond the end of the file */
- if (fs.fs_start > fmkey->lfik_oa.o_size) {
- rc = -EINVAL;
- goto out;
- }
- /* Calculate start stripe, last stripe and length of mapping */
- fs.fs_start_stripe = lov_stripe_number(lsm, fs.fs_start);
- fs.fs_end = (fs.fs_length == ~0ULL) ? fmkey->lfik_oa.o_size :
- fs.fs_start + fs.fs_length - 1;
- /* If fs_length != ~0ULL but fs_start+fs_length-1 exceeds file size */
- if (fs.fs_end > fmkey->lfik_oa.o_size) {
- fs.fs_end = fmkey->lfik_oa.o_size;
- fs.fs_length = fs.fs_end - fs.fs_start;
- }
-
- fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, fs.fs_start, fs.fs_end,
- fs.fs_start_stripe,
- &stripe_count);
- fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fs.fs_start,
- fs.fs_end,
- &fs.fs_start_stripe);
- if (fs.fs_end_offset == -EINVAL) {
- rc = -EINVAL;
- goto out;
- }
-
-
- /**
- * Requested extent count exceeds the fiemap buffer size, shrink our
- * ambition.
- */
- if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
- fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
- if (!fiemap->fm_extent_count)
- fs.fs_cnt_need = 0;
-
- fs.fs_finish = false;
- fs.fs_enough = false;
- fs.fs_cur_extent = 0;
-
- /* Check each stripe */
- for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
- --stripe_count,
- cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
- rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen, fmkey,
- cur_stripe, &fs);
- if (rc < 0)
- goto out;
- if (fs.fs_finish)
- break;
- } /* for each stripe */
- /*
- * Indicate that we are returning device offsets unless file just has
- * single stripe
- */
- if (lsm->lsm_stripe_count > 1)
- fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
-
- if (!fiemap->fm_extent_count)
- goto skip_last_device_calc;
-
- /*
- * Check if we have reached the last stripe and whether mapping for that
- * stripe is done.
- */
- if ((cur_stripe == fs.fs_last_stripe) && fs.fs_device_done)
- fiemap->fm_extents[fs.fs_cur_extent - 1].fe_flags |=
- FIEMAP_EXTENT_LAST;
-skip_last_device_calc:
- fiemap->fm_mapped_extents = fs.fs_cur_extent;
-out:
- kvfree(fm_local);
- lov_lsm_put(lsm);
- return rc;
-}
-
-static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
- struct lov_user_md __user *lum)
-{
- struct lov_object *lov = cl2lov(obj);
- struct lov_stripe_md *lsm;
- int rc = 0;
-
- lsm = lov_lsm_addref(lov);
- if (!lsm)
- return -ENODATA;
-
- rc = lov_getstripe(cl2lov(obj), lsm, lum);
- lov_lsm_put(lsm);
- return rc;
-}
-
-static int lov_object_layout_get(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_layout *cl)
-{
- struct lov_object *lov = cl2lov(obj);
- struct lov_stripe_md *lsm = lov_lsm_addref(lov);
- struct lu_buf *buf = &cl->cl_buf;
- ssize_t rc;
-
- if (!lsm) {
- cl->cl_size = 0;
- cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
- return 0;
- }
-
- cl->cl_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
- cl->cl_layout_gen = lsm->lsm_layout_gen;
-
- rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
- lov_lsm_put(lsm);
-
- return rc < 0 ? rc : 0;
-}
-
-static loff_t lov_object_maxbytes(struct cl_object *obj)
-{
- struct lov_object *lov = cl2lov(obj);
- struct lov_stripe_md *lsm = lov_lsm_addref(lov);
- loff_t maxbytes;
-
- if (!lsm)
- return LLONG_MAX;
-
- maxbytes = lsm->lsm_maxbytes;
-
- lov_lsm_put(lsm);
-
- return maxbytes;
-}
-
-static const struct cl_object_operations lov_ops = {
- .coo_page_init = lov_page_init,
- .coo_lock_init = lov_lock_init,
- .coo_io_init = lov_io_init,
- .coo_attr_get = lov_attr_get,
- .coo_attr_update = lov_attr_update,
- .coo_conf_set = lov_conf_set,
- .coo_getstripe = lov_object_getstripe,
- .coo_layout_get = lov_object_layout_get,
- .coo_maxbytes = lov_object_maxbytes,
- .coo_fiemap = lov_object_fiemap,
-};
-
-static const struct lu_object_operations lov_lu_obj_ops = {
- .loo_object_init = lov_object_init,
- .loo_object_delete = lov_object_delete,
- .loo_object_release = NULL,
- .loo_object_free = lov_object_free,
- .loo_object_print = lov_object_print,
- .loo_object_invariant = NULL
-};
-
-struct lu_object *lov_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
-{
- struct lov_object *lov;
- struct lu_object *obj;
-
- lov = kmem_cache_zalloc(lov_object_kmem, GFP_NOFS);
- if (lov) {
- obj = lov2lu(lov);
- lu_object_init(obj, NULL, dev);
- lov->lo_cl.co_ops = &lov_ops;
- lov->lo_type = -1; /* invalid, to catch uninitialized type */
- /*
- * object io operation vector (cl_object::co_iop) is installed
- * later in lov_object_init(), as different vectors are used
- * for object with different layouts.
- */
- obj->lo_ops = &lov_lu_obj_ops;
- } else {
- obj = NULL;
- }
- return obj;
-}
-
-struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
-{
- struct lov_stripe_md *lsm = NULL;
-
- lov_conf_freeze(lov);
- if (lov->lo_lsm) {
- lsm = lsm_addref(lov->lo_lsm);
- CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
- lsm, atomic_read(&lsm->lsm_refc),
- lov->lo_layout_invalid, current);
- }
- lov_conf_thaw(lov);
- return lsm;
-}
-
-int lov_read_and_clear_async_rc(struct cl_object *clob)
-{
- struct lu_object *luobj;
- int rc = 0;
-
- luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
- &lov_device_type);
- if (luobj) {
- struct lov_object *lov = lu2lov(luobj);
-
- lov_conf_freeze(lov);
- switch (lov->lo_type) {
- case LLT_RAID0: {
- struct lov_stripe_md *lsm;
- int i;
-
- lsm = lov->lo_lsm;
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *loi = lsm->lsm_oinfo[i];
-
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (loi->loi_ar.ar_rc && !rc)
- rc = loi->loi_ar.ar_rc;
- loi->loi_ar.ar_rc = 0;
- }
- }
- case LLT_RELEASED:
- case LLT_EMPTY:
- break;
- default:
- LBUG();
- }
- lov_conf_thaw(lov);
- }
- return rc;
-}
-EXPORT_SYMBOL(lov_read_and_clear_async_rc);
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
deleted file mode 100644
index 3e16e647b334..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ /dev/null
@@ -1,271 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd_class.h>
-
-#include "lov_internal.h"
-
-/* compute object size given "stripeno" and the ost size */
-u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno)
-{
- unsigned long ssize = lsm->lsm_stripe_size;
- unsigned long stripe_size;
- u64 swidth;
- u64 lov_size;
- int magic = lsm->lsm_magic;
-
- if (ost_size == 0)
- return 0;
-
- lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth);
-
- /* lov_do_div64(a, b) returns a % b, and a = a / b */
- stripe_size = lov_do_div64(ost_size, ssize);
- if (stripe_size)
- lov_size = ost_size * swidth + stripeno * ssize + stripe_size;
- else
- lov_size = (ost_size - 1) * swidth + (stripeno + 1) * ssize;
-
- return lov_size;
-}
-
-/**
- * Compute file level page index by stripe level page offset
- */
-pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
- int stripe)
-{
- loff_t offset;
-
- offset = lov_stripe_size(lsm, (stripe_index << PAGE_SHIFT) + 1, stripe);
- return offset >> PAGE_SHIFT;
-}
-
-/* we have an offset in file backed by an lov and want to find out where
- * that offset lands in our given stripe of the file. for the easy
- * case where the offset is within the stripe, we just have to scale the
- * offset down to make it relative to the stripe instead of the lov.
- *
- * the harder case is what to do when the offset doesn't intersect the
- * stripe. callers will want start offsets clamped ahead to the start
- * of the nearest stripe in the file. end offsets similarly clamped to the
- * nearest ending byte of a stripe in the file:
- *
- * all this function does is move offsets to the nearest region of the
- * stripe, and it does its work "mod" the full length of all the stripes.
- * consider a file with 3 stripes:
- *
- * S E
- * ---------------------------------------------------------------------
- * | 0 | 1 | 2 | 0 | 1 | 2 |
- * ---------------------------------------------------------------------
- *
- * to find stripe 1's offsets for S and E, it divides by the full stripe
- * width and does its math in the context of a single set of stripes:
- *
- * S E
- * -----------------------------------
- * | 0 | 1 | 2 |
- * -----------------------------------
- *
- * it'll notice that E is outside stripe 1 and clamp it to the end of the
- * stripe, then multiply it back out by lov_off to give the real offsets in
- * the stripe:
- *
- * S E
- * ---------------------------------------------------------------------
- * | 1 | 1 | 1 | 1 | 1 | 1 |
- * ---------------------------------------------------------------------
- *
- * it would have done similarly and pulled S forward to the start of a 1
- * stripe if, say, S had landed in a 0 stripe.
- *
- * this rounding isn't always correct. consider an E lov offset that lands
- * on a 0 stripe, the "mod stripe width" math will pull it forward to the
- * start of a 1 stripe, when in fact it wanted to be rounded back to the end
- * of a previous 1 stripe. this logic is handled by callers and this is why:
- *
- * this function returns < 0 when the offset was "before" the stripe and
- * was moved forward to the start of the stripe in question; 0 when it
- * falls in the stripe and no shifting was done; > 0 when the offset
- * was outside the stripe and was pulled back to its final byte.
- */
-int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off,
- int stripeno, u64 *obdoff)
-{
- unsigned long ssize = lsm->lsm_stripe_size;
- u64 stripe_off, this_stripe, swidth;
- int magic = lsm->lsm_magic;
- int ret = 0;
-
- if (lov_off == OBD_OBJECT_EOF) {
- *obdoff = OBD_OBJECT_EOF;
- return 0;
- }
-
- lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &lov_off,
- &swidth);
-
- /* lov_do_div64(a, b) returns a % b, and a = a / b */
- stripe_off = lov_do_div64(lov_off, swidth);
-
- this_stripe = (u64)stripeno * ssize;
- if (stripe_off < this_stripe) {
- stripe_off = 0;
- ret = -1;
- } else {
- stripe_off -= this_stripe;
-
- if (stripe_off >= ssize) {
- stripe_off = ssize;
- ret = 1;
- }
- }
-
- *obdoff = lov_off * ssize + stripe_off;
- return ret;
-}
-
-/* Given a whole-file size and a stripe number, give the file size which
- * corresponds to the individual object of that stripe.
- *
- * This behaves basically in the same was as lov_stripe_offset, except that
- * file sizes falling before the beginning of a stripe are clamped to the end
- * of the previous stripe, not the beginning of the next:
- *
- * S
- * ---------------------------------------------------------------------
- * | 0 | 1 | 2 | 0 | 1 | 2 |
- * ---------------------------------------------------------------------
- *
- * if clamped to stripe 2 becomes:
- *
- * S
- * ---------------------------------------------------------------------
- * | 0 | 1 | 2 | 0 | 1 | 2 |
- * ---------------------------------------------------------------------
- */
-u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size,
- int stripeno)
-{
- unsigned long ssize = lsm->lsm_stripe_size;
- u64 stripe_off, this_stripe, swidth;
- int magic = lsm->lsm_magic;
-
- if (file_size == OBD_OBJECT_EOF)
- return OBD_OBJECT_EOF;
-
- lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &file_size,
- &swidth);
-
- /* lov_do_div64(a, b) returns a % b, and a = a / b */
- stripe_off = lov_do_div64(file_size, swidth);
-
- this_stripe = (u64)stripeno * ssize;
- if (stripe_off < this_stripe) {
- /* Move to end of previous stripe, or zero */
- if (file_size > 0) {
- file_size--;
- stripe_off = ssize;
- } else {
- stripe_off = 0;
- }
- } else {
- stripe_off -= this_stripe;
-
- if (stripe_off >= ssize) {
- /* Clamp to end of this stripe */
- stripe_off = ssize;
- }
- }
-
- return (file_size * ssize + stripe_off);
-}
-
-/* given an extent in an lov and a stripe, calculate the extent of the stripe
- * that is contained within the lov extent. this returns true if the given
- * stripe does intersect with the lov extent.
- */
-int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
- u64 start, u64 end, u64 *obd_start, u64 *obd_end)
-{
- int start_side, end_side;
-
- start_side = lov_stripe_offset(lsm, start, stripeno, obd_start);
- end_side = lov_stripe_offset(lsm, end, stripeno, obd_end);
-
- CDEBUG(D_INODE, "[%llu->%llu] -> [(%d) %llu->%llu (%d)]\n",
- start, end, start_side, *obd_start, *obd_end, end_side);
-
- /* this stripe doesn't intersect the file extent when neither
- * start or the end intersected the stripe and obd_start and
- * obd_end got rounded up to the save value.
- */
- if (start_side != 0 && end_side != 0 && *obd_start == *obd_end)
- return 0;
-
- /* as mentioned in the lov_stripe_offset commentary, end
- * might have been shifted in the wrong direction. This
- * happens when an end offset is before the stripe when viewed
- * through the "mod stripe size" math. we detect it being shifted
- * in the wrong direction and touch it up.
- * interestingly, this can't underflow since end must be > start
- * if we passed through the previous check.
- * (should we assert for that somewhere?)
- */
- if (end_side != 0)
- (*obd_end)--;
-
- return 1;
-}
-
-/* compute which stripe number "lov_off" will be written into */
-int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off)
-{
- unsigned long ssize = lsm->lsm_stripe_size;
- u64 stripe_off, swidth;
- int magic = lsm->lsm_magic;
-
- lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth);
-
- stripe_off = lov_do_div64(lov_off, swidth);
-
- /* Puts stripe_off/ssize result into stripe_off */
- lov_do_div64(stripe_off, ssize);
-
- return stripe_off;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
deleted file mode 100644
index b1060d02a164..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ /dev/null
@@ -1,400 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lov/lov_pack.c
- *
- * (Un)packing of OST/MDS requests
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include <lustre_net.h>
-#include <lustre_swab.h>
-#include <obd.h>
-#include <obd_class.h>
-#include <obd_support.h>
-
-#include "lov_cl_internal.h"
-#include "lov_internal.h"
-
-void lov_dump_lmm_common(int level, void *lmmp)
-{
- struct lov_mds_md *lmm = lmmp;
- struct ost_id oi;
-
- lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
- CDEBUG(level, "objid " DOSTID ", magic 0x%08x, pattern %#x\n",
- POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
- le32_to_cpu(lmm->lmm_pattern));
- CDEBUG(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
- le32_to_cpu(lmm->lmm_stripe_size),
- le16_to_cpu(lmm->lmm_stripe_count),
- le16_to_cpu(lmm->lmm_layout_gen));
-}
-
-static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
- int stripe_count)
-{
- int i;
-
- if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
- CDEBUG(level, "bad stripe_count %u > max_stripe_count %u\n",
- stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
- return;
- }
-
- for (i = 0; i < stripe_count; ++i, ++lod) {
- struct ost_id oi;
-
- ostid_le_to_cpu(&lod->l_ost_oi, &oi);
- CDEBUG(level, "stripe %u idx %u subobj " DOSTID "\n", i,
- le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
- }
-}
-
-void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
-{
- lov_dump_lmm_common(level, lmm);
- lov_dump_lmm_objects(level, lmm->lmm_objects,
- le16_to_cpu(lmm->lmm_stripe_count));
-}
-
-void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
-{
- lov_dump_lmm_common(level, lmm);
- CDEBUG(level, "pool_name " LOV_POOLNAMEF "\n", lmm->lmm_pool_name);
- lov_dump_lmm_objects(level, lmm->lmm_objects,
- le16_to_cpu(lmm->lmm_stripe_count));
-}
-
-/**
- * Pack LOV striping metadata for disk storage format (in little
- * endian byte order).
- *
- * This follows the getxattr() conventions. If \a buf_size is zero
- * then return the size needed. If \a buf_size is too small then
- * return -ERANGE. Otherwise return the size of the result.
- */
-ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
- size_t buf_size)
-{
- struct lov_ost_data_v1 *lmm_objects;
- struct lov_mds_md_v1 *lmmv1 = buf;
- struct lov_mds_md_v3 *lmmv3 = buf;
- size_t lmm_size;
- unsigned int i;
-
- lmm_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
- if (!buf_size)
- return lmm_size;
-
- if (buf_size < lmm_size)
- return -ERANGE;
-
- /*
- * lmmv1 and lmmv3 point to the same struct and have the
- * same first fields
- */
- lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
- lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
- lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
- lmmv1->lmm_stripe_count = cpu_to_le16(lsm->lsm_stripe_count);
- lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
- lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
-
- if (lsm->lsm_magic == LOV_MAGIC_V3) {
- BUILD_BUG_ON(sizeof(lsm->lsm_pool_name) !=
- sizeof(lmmv3->lmm_pool_name));
- strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
- sizeof(lmmv3->lmm_pool_name));
- lmm_objects = lmmv3->lmm_objects;
- } else {
- lmm_objects = lmmv1->lmm_objects;
- }
-
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *loi = lsm->lsm_oinfo[i];
-
- ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
- lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
- lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
- }
-
- return lmm_size;
-}
-
-/* Find the max stripecount we should use */
-__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
-{
- __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
-
- if (!stripe_count)
- stripe_count = lov->desc.ld_default_stripe_count;
- if (stripe_count > lov->desc.ld_active_tgt_count)
- stripe_count = lov->desc.ld_active_tgt_count;
- if (!stripe_count)
- stripe_count = 1;
-
- /* stripe count is based on whether ldiskfs can handle
- * larger EA sizes
- */
- if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
- lov->lov_ocd.ocd_max_easize)
- max_stripes = lov_mds_md_max_stripe_count(
- lov->lov_ocd.ocd_max_easize, magic);
-
- if (stripe_count > max_stripes)
- stripe_count = max_stripes;
-
- return stripe_count;
-}
-
-static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
-{
- int rc;
-
- if (!lsm_op_find(le32_to_cpu(*(__u32 *)lmm))) {
- CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n",
- le32_to_cpu(*(__u32 *)lmm), lmm_bytes);
- CERROR("%*phN\n", lmm_bytes, lmm);
- return -EINVAL;
- }
- rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm,
- lmm_bytes,
- stripe_count);
- return rc;
-}
-
-static struct lov_stripe_md *lov_lsm_alloc(u16 stripe_count, u32 pattern,
- u32 magic)
-{
- struct lov_stripe_md *lsm;
- unsigned int i;
-
- CDEBUG(D_INFO, "alloc lsm, stripe_count %u\n", stripe_count);
-
- lsm = lsm_alloc_plain(stripe_count);
- if (!lsm) {
- CERROR("cannot allocate LSM stripe_count %u\n", stripe_count);
- return ERR_PTR(-ENOMEM);
- }
-
- atomic_set(&lsm->lsm_refc, 1);
- spin_lock_init(&lsm->lsm_lock);
- lsm->lsm_magic = magic;
- lsm->lsm_stripe_count = stripe_count;
- lsm->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count;
- lsm->lsm_pattern = pattern;
- lsm->lsm_pool_name[0] = '\0';
- lsm->lsm_layout_gen = 0;
- if (stripe_count > 0)
- lsm->lsm_oinfo[0]->loi_ost_idx = ~0;
-
- for (i = 0; i < stripe_count; i++)
- loi_init(lsm->lsm_oinfo[i]);
-
- return lsm;
-}
-
-int lov_free_memmd(struct lov_stripe_md **lsmp)
-{
- struct lov_stripe_md *lsm = *lsmp;
- int refc;
-
- *lsmp = NULL;
- LASSERT(atomic_read(&lsm->lsm_refc) > 0);
- refc = atomic_dec_return(&lsm->lsm_refc);
- if (refc == 0)
- lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
-
- return refc;
-}
-
-/* Unpack LOV object metadata from disk storage. It is packed in LE byte
- * order and is opaque to the networking layer.
- */
-struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm,
- size_t lmm_size)
-{
- struct lov_stripe_md *lsm;
- u16 stripe_count;
- u32 pattern;
- u32 magic;
- int rc;
-
- rc = lov_verify_lmm(lmm, lmm_size, &stripe_count);
- if (rc)
- return ERR_PTR(rc);
-
- magic = le32_to_cpu(lmm->lmm_magic);
- pattern = le32_to_cpu(lmm->lmm_pattern);
-
- lsm = lov_lsm_alloc(stripe_count, pattern, magic);
- if (IS_ERR(lsm))
- return lsm;
-
- LASSERT(lsm_op_find(magic));
- rc = lsm_op_find(magic)->lsm_unpackmd(lov, lsm, lmm);
- if (rc) {
- lov_free_memmd(&lsm);
- return ERR_PTR(rc);
- }
-
- return lsm;
-}
-
-/* Retrieve object striping information.
- *
- * @lump is a pointer to an in-core struct with lmm_ost_count indicating
- * the maximum number of OST indices which will fit in the user buffer.
- * lmm_magic must be LOV_USER_MAGIC.
- */
-int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
- struct lov_user_md __user *lump)
-{
- /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
- struct lov_user_md_v3 lum;
- struct lov_mds_md *lmmk;
- u32 stripe_count;
- ssize_t lmm_size;
- size_t lmmk_size;
- size_t lum_size;
- int rc;
-
- if (!lsm)
- return -ENODATA;
-
- if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
- CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
- lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
- rc = -EIO;
- goto out;
- }
-
- if (!lsm_is_released(lsm))
- stripe_count = lsm->lsm_stripe_count;
- else
- stripe_count = 0;
-
- /* we only need the header part from user space to get lmm_magic and
- * lmm_stripe_count, (the header part is common to v1 and v3)
- */
- lum_size = sizeof(struct lov_user_md_v1);
- if (copy_from_user(&lum, lump, lum_size)) {
- rc = -EFAULT;
- goto out;
- }
- if (lum.lmm_magic != LOV_USER_MAGIC_V1 &&
- lum.lmm_magic != LOV_USER_MAGIC_V3 &&
- lum.lmm_magic != LOV_USER_MAGIC_SPECIFIC) {
- rc = -EINVAL;
- goto out;
- }
-
- if (lum.lmm_stripe_count &&
- (lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
- /* Return right size of stripe to user */
- lum.lmm_stripe_count = stripe_count;
- rc = copy_to_user(lump, &lum, lum_size);
- rc = -EOVERFLOW;
- goto out;
- }
- lmmk_size = lov_mds_md_size(stripe_count, lsm->lsm_magic);
-
-
- lmmk = kvzalloc(lmmk_size, GFP_NOFS);
- if (!lmmk) {
- rc = -ENOMEM;
- goto out;
- }
-
- lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
- if (lmm_size < 0) {
- rc = lmm_size;
- goto out_free;
- }
-
- /* FIXME: Bug 1185 - copy fields properly when structs change */
- /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
- BUILD_BUG_ON(sizeof(lum) != sizeof(struct lov_mds_md_v3));
- BUILD_BUG_ON(sizeof(lum.lmm_objects[0]) != sizeof(lmmk->lmm_objects[0]));
-
- if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC &&
- (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
- lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3))) {
- lustre_swab_lov_mds_md(lmmk);
- lustre_swab_lov_user_md_objects(
- (struct lov_user_ost_data *)lmmk->lmm_objects,
- lmmk->lmm_stripe_count);
- }
-
- if (lum.lmm_magic == LOV_USER_MAGIC) {
- /* User request for v1, we need skip lmm_pool_name */
- if (lmmk->lmm_magic == LOV_MAGIC_V3) {
- memmove(((struct lov_mds_md_v1 *)lmmk)->lmm_objects,
- ((struct lov_mds_md_v3 *)lmmk)->lmm_objects,
- lmmk->lmm_stripe_count *
- sizeof(struct lov_ost_data_v1));
- lmm_size -= LOV_MAXPOOLNAME;
- }
- } else {
- /* if v3 we just have to update the lum_size */
- lum_size = sizeof(struct lov_user_md_v3);
- }
-
- /* User wasn't expecting this many OST entries */
- if (lum.lmm_stripe_count == 0) {
- lmm_size = lum_size;
- } else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
- rc = -EOVERFLOW;
- goto out_free;
- }
- /*
- * Have a difference between lov_mds_md & lov_user_md.
- * So we have to re-order the data before copy to user.
- */
- lum.lmm_stripe_count = lmmk->lmm_stripe_count;
- lum.lmm_layout_gen = lmmk->lmm_layout_gen;
- ((struct lov_user_md *)lmmk)->lmm_layout_gen = lum.lmm_layout_gen;
- ((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
- if (copy_to_user(lump, lmmk, lmm_size))
- rc = -EFAULT;
- else
- rc = 0;
-
-out_free:
- kvfree(lmmk);
-out:
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
deleted file mode 100644
index cfae1294d77a..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ /dev/null
@@ -1,136 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_page for LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@intel.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lov page operations.
- *
- */
-
-static int lov_raid0_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct lov_page *lp = cl2lov_page(slice);
-
- return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, raid0\n", lp);
-}
-
-static const struct cl_page_operations lov_raid0_page_ops = {
- .cpo_print = lov_raid0_page_print
-};
-
-int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
-{
- struct lov_object *loo = cl2lov(obj);
- struct lov_layout_raid0 *r0 = lov_r0(loo);
- struct lov_io *lio = lov_env_io(env);
- struct cl_object *subobj;
- struct cl_object *o;
- struct lov_io_sub *sub;
- struct lov_page *lpg = cl_object_page_slice(obj, page);
- loff_t offset;
- u64 suboff;
- int stripe;
- int rc;
-
- offset = cl_offset(obj, index);
- stripe = lov_stripe_number(loo->lo_lsm, offset);
- LASSERT(stripe < r0->lo_nr);
- rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff);
- LASSERT(rc == 0);
-
- lpg->lps_stripe = stripe;
- cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_raid0_page_ops);
-
- sub = lov_sub_get(env, lio, stripe);
- if (IS_ERR(sub))
- return PTR_ERR(sub);
-
- subobj = lovsub2cl(r0->lo_sub[stripe]);
- list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers,
- co_lu.lo_linkage) {
- if (o->co_ops->coo_page_init) {
- rc = o->co_ops->coo_page_init(sub->sub_env, o, page,
- cl_index(subobj, suboff));
- if (rc != 0)
- break;
- }
- }
-
- return rc;
-}
-
-static int lov_empty_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct lov_page *lp = cl2lov_page(slice);
-
- return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, empty.\n",
- lp);
-}
-
-static const struct cl_page_operations lov_empty_page_ops = {
- .cpo_print = lov_empty_page_print
-};
-
-int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
-{
- struct lov_page *lpg = cl_object_page_slice(obj, page);
- void *addr;
-
- cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_empty_page_ops);
- addr = kmap(page->cp_vmpage);
- memset(addr, 0, cl_page_size(obj));
- kunmap(page->cp_vmpage);
- cl_page_export(env, page, 1);
- return 0;
-}
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
deleted file mode 100644
index ecd9329cd073..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ /dev/null
@@ -1,586 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lov/lov_pool.c
- *
- * OST pool methods
- *
- * Author: Jacques-Charles LAFOUCRIERE <jc.lafoucriere@cea.fr>
- * Author: Alex Lyashkov <Alexey.Lyashkov@Sun.COM>
- * Author: Nathaniel Rutman <Nathan.Rutman@Sun.COM>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd.h>
-#include "lov_internal.h"
-
-#define pool_tgt(_p, _i) \
- _p->pool_lobd->u.lov.lov_tgts[_p->pool_obds.op_array[_i]]
-
-static void lov_pool_getref(struct pool_desc *pool)
-{
- CDEBUG(D_INFO, "pool %p\n", pool);
- atomic_inc(&pool->pool_refcount);
-}
-
-void lov_pool_putref(struct pool_desc *pool)
-{
- CDEBUG(D_INFO, "pool %p\n", pool);
- if (atomic_dec_and_test(&pool->pool_refcount)) {
- LASSERT(hlist_unhashed(&pool->pool_hash));
- LASSERT(list_empty(&pool->pool_list));
- LASSERT(!pool->pool_debugfs_entry);
- lov_ost_pool_free(&pool->pool_obds);
- kfree(pool);
- }
-}
-
-static void lov_pool_putref_locked(struct pool_desc *pool)
-{
- CDEBUG(D_INFO, "pool %p\n", pool);
- LASSERT(atomic_read(&pool->pool_refcount) > 1);
-
- atomic_dec(&pool->pool_refcount);
-}
-
-/*
- * hash function using a Rotating Hash algorithm
- * Knuth, D. The Art of Computer Programming,
- * Volume 3: Sorting and Searching,
- * Chapter 6.4.
- * Addison Wesley, 1973
- */
-static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key,
- unsigned int mask)
-{
- int i;
- __u32 result;
- char *poolname;
-
- result = 0;
- poolname = (char *)key;
- for (i = 0; i < LOV_MAXPOOLNAME; i++) {
- if (poolname[i] == '\0')
- break;
- result = (result << 4) ^ (result >> 28) ^ poolname[i];
- }
- return (result % mask);
-}
-
-static void *pool_key(struct hlist_node *hnode)
-{
- struct pool_desc *pool;
-
- pool = hlist_entry(hnode, struct pool_desc, pool_hash);
- return pool->pool_name;
-}
-
-static int pool_hashkey_keycmp(const void *key, struct hlist_node *compared_hnode)
-{
- char *pool_name;
- struct pool_desc *pool;
-
- pool_name = (char *)key;
- pool = hlist_entry(compared_hnode, struct pool_desc, pool_hash);
- return !strncmp(pool_name, pool->pool_name, LOV_MAXPOOLNAME);
-}
-
-static void *pool_hashobject(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct pool_desc, pool_hash);
-}
-
-static void pool_hashrefcount_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct pool_desc *pool;
-
- pool = hlist_entry(hnode, struct pool_desc, pool_hash);
- lov_pool_getref(pool);
-}
-
-static void pool_hashrefcount_put_locked(struct cfs_hash *hs,
- struct hlist_node *hnode)
-{
- struct pool_desc *pool;
-
- pool = hlist_entry(hnode, struct pool_desc, pool_hash);
- lov_pool_putref_locked(pool);
-}
-
-struct cfs_hash_ops pool_hash_operations = {
- .hs_hash = pool_hashfn,
- .hs_key = pool_key,
- .hs_keycmp = pool_hashkey_keycmp,
- .hs_object = pool_hashobject,
- .hs_get = pool_hashrefcount_get,
- .hs_put_locked = pool_hashrefcount_put_locked,
-
-};
-
-/*
- * pool debugfs seq_file methods
- */
-/*
- * iterator is used to go through the target pool entries
- * index is the current entry index in the lp_array[] array
- * index >= pos returned to the seq_file interface
- * pos is from 0 to (pool->pool_obds.op_count - 1)
- */
-#define POOL_IT_MAGIC 0xB001CEA0
-struct pool_iterator {
- int magic;
- struct pool_desc *pool;
- int idx; /* from 0 to pool_tgt_size - 1 */
-};
-
-static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct pool_iterator *iter = (struct pool_iterator *)s->private;
- int prev_idx;
-
- LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic);
-
- /* test if end of file */
- if (*pos >= pool_tgt_count(iter->pool))
- return NULL;
-
- /* iterate to find a non empty entry */
- prev_idx = iter->idx;
- down_read(&pool_tgt_rw_sem(iter->pool));
- iter->idx++;
- if (iter->idx == pool_tgt_count(iter->pool)) {
- iter->idx = prev_idx; /* we stay on the last entry */
- up_read(&pool_tgt_rw_sem(iter->pool));
- return NULL;
- }
- up_read(&pool_tgt_rw_sem(iter->pool));
- (*pos)++;
- /* return != NULL to continue */
- return iter;
-}
-
-static void *pool_proc_start(struct seq_file *s, loff_t *pos)
-{
- struct pool_desc *pool = (struct pool_desc *)s->private;
- struct pool_iterator *iter;
-
- lov_pool_getref(pool);
- if ((pool_tgt_count(pool) == 0) ||
- (*pos >= pool_tgt_count(pool))) {
- /* iter is not created, so stop() has no way to
- * find pool to dec ref
- */
- lov_pool_putref(pool);
- return NULL;
- }
-
- iter = kzalloc(sizeof(*iter), GFP_NOFS);
- if (!iter)
- return ERR_PTR(-ENOMEM);
- iter->magic = POOL_IT_MAGIC;
- iter->pool = pool;
- iter->idx = 0;
-
- /* we use seq_file private field to memorized iterator so
- * we can free it at stop()
- */
- /* /!\ do not forget to restore it to pool before freeing it */
- s->private = iter;
- if (*pos > 0) {
- loff_t i;
- void *ptr;
-
- i = 0;
- do {
- ptr = pool_proc_next(s, &iter, &i);
- } while ((i < *pos) && ptr);
- return ptr;
- }
- return iter;
-}
-
-static void pool_proc_stop(struct seq_file *s, void *v)
-{
- struct pool_iterator *iter = (struct pool_iterator *)s->private;
-
- /* in some cases stop() method is called 2 times, without
- * calling start() method (see seq_read() from fs/seq_file.c)
- * we have to free only if s->private is an iterator
- */
- if ((iter) && (iter->magic == POOL_IT_MAGIC)) {
- /* we restore s->private so next call to pool_proc_start()
- * will work
- */
- s->private = iter->pool;
- lov_pool_putref(iter->pool);
- kfree(iter);
- }
-}
-
-static int pool_proc_show(struct seq_file *s, void *v)
-{
- struct pool_iterator *iter = (struct pool_iterator *)v;
- struct lov_tgt_desc *tgt;
-
- LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic);
- LASSERT(iter->pool);
- LASSERT(iter->idx <= pool_tgt_count(iter->pool));
-
- down_read(&pool_tgt_rw_sem(iter->pool));
- tgt = pool_tgt(iter->pool, iter->idx);
- up_read(&pool_tgt_rw_sem(iter->pool));
- if (tgt)
- seq_printf(s, "%s\n", obd_uuid2str(&tgt->ltd_uuid));
-
- return 0;
-}
-
-static const struct seq_operations pool_proc_ops = {
- .start = pool_proc_start,
- .next = pool_proc_next,
- .stop = pool_proc_stop,
- .show = pool_proc_show,
-};
-
-static int pool_proc_open(struct inode *inode, struct file *file)
-{
- int rc;
-
- rc = seq_open(file, &pool_proc_ops);
- if (!rc) {
- struct seq_file *s = file->private_data;
-
- s->private = inode->i_private;
- }
- return rc;
-}
-
-static const struct file_operations pool_proc_operations = {
- .open = pool_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-#define LOV_POOL_INIT_COUNT 2
-int lov_ost_pool_init(struct ost_pool *op, unsigned int count)
-{
- if (count == 0)
- count = LOV_POOL_INIT_COUNT;
- op->op_array = NULL;
- op->op_count = 0;
- init_rwsem(&op->op_rw_sem);
- op->op_size = count;
- op->op_array = kcalloc(op->op_size, sizeof(op->op_array[0]), GFP_NOFS);
- if (!op->op_array) {
- op->op_size = 0;
- return -ENOMEM;
- }
- return 0;
-}
-
-/* Caller must hold write op_rwlock */
-int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count)
-{
- __u32 *new;
- int new_size;
-
- LASSERT(min_count != 0);
-
- if (op->op_count < op->op_size)
- return 0;
-
- new_size = max(min_count, 2 * op->op_size);
- new = kcalloc(new_size, sizeof(op->op_array[0]), GFP_NOFS);
- if (!new)
- return -ENOMEM;
-
- /* copy old array to new one */
- memcpy(new, op->op_array, op->op_size * sizeof(op->op_array[0]));
- kfree(op->op_array);
- op->op_array = new;
- op->op_size = new_size;
- return 0;
-}
-
-int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
-{
- int rc = 0, i;
-
- down_write(&op->op_rw_sem);
-
- rc = lov_ost_pool_extend(op, min_count);
- if (rc)
- goto out;
-
- /* search ost in pool array */
- for (i = 0; i < op->op_count; i++) {
- if (op->op_array[i] == idx) {
- rc = -EEXIST;
- goto out;
- }
- }
- /* ost not found we add it */
- op->op_array[op->op_count] = idx;
- op->op_count++;
-out:
- up_write(&op->op_rw_sem);
- return rc;
-}
-
-int lov_ost_pool_remove(struct ost_pool *op, __u32 idx)
-{
- int i;
-
- down_write(&op->op_rw_sem);
-
- for (i = 0; i < op->op_count; i++) {
- if (op->op_array[i] == idx) {
- memmove(&op->op_array[i], &op->op_array[i + 1],
- (op->op_count - i - 1) * sizeof(op->op_array[0]));
- op->op_count--;
- up_write(&op->op_rw_sem);
- return 0;
- }
- }
-
- up_write(&op->op_rw_sem);
- return -EINVAL;
-}
-
-int lov_ost_pool_free(struct ost_pool *op)
-{
- if (op->op_size == 0)
- return 0;
-
- down_write(&op->op_rw_sem);
-
- kfree(op->op_array);
- op->op_array = NULL;
- op->op_count = 0;
- op->op_size = 0;
-
- up_write(&op->op_rw_sem);
- return 0;
-}
-
-int lov_pool_new(struct obd_device *obd, char *poolname)
-{
- struct lov_obd *lov;
- struct pool_desc *new_pool;
- int rc;
-
- lov = &obd->u.lov;
-
- if (strlen(poolname) > LOV_MAXPOOLNAME)
- return -ENAMETOOLONG;
-
- new_pool = kzalloc(sizeof(*new_pool), GFP_NOFS);
- if (!new_pool)
- return -ENOMEM;
-
- strlcpy(new_pool->pool_name, poolname, sizeof(new_pool->pool_name));
- new_pool->pool_lobd = obd;
- /* ref count init to 1 because when created a pool is always used
- * up to deletion
- */
- atomic_set(&new_pool->pool_refcount, 1);
- rc = lov_ost_pool_init(&new_pool->pool_obds, 0);
- if (rc)
- goto out_err;
-
- INIT_HLIST_NODE(&new_pool->pool_hash);
-
- /* get ref for debugfs file */
- lov_pool_getref(new_pool);
- new_pool->pool_debugfs_entry = ldebugfs_add_simple(
- lov->lov_pool_debugfs_entry,
- poolname, new_pool,
- &pool_proc_operations);
- if (IS_ERR_OR_NULL(new_pool->pool_debugfs_entry)) {
- CWARN("Cannot add debugfs pool entry " LOV_POOLNAMEF "\n",
- poolname);
- new_pool->pool_debugfs_entry = NULL;
- lov_pool_putref(new_pool);
- }
- CDEBUG(D_INFO, "pool %p - proc %p\n",
- new_pool, new_pool->pool_debugfs_entry);
-
- spin_lock(&obd->obd_dev_lock);
- list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
- lov->lov_pool_count++;
- spin_unlock(&obd->obd_dev_lock);
-
- /* add to find only when it fully ready */
- rc = cfs_hash_add_unique(lov->lov_pools_hash_body, poolname,
- &new_pool->pool_hash);
- if (rc) {
- rc = -EEXIST;
- goto out_err;
- }
-
- CDEBUG(D_CONFIG, LOV_POOLNAMEF " is pool #%d\n",
- poolname, lov->lov_pool_count);
-
- return 0;
-
-out_err:
- spin_lock(&obd->obd_dev_lock);
- list_del_init(&new_pool->pool_list);
- lov->lov_pool_count--;
- spin_unlock(&obd->obd_dev_lock);
- ldebugfs_remove(&new_pool->pool_debugfs_entry);
- lov_ost_pool_free(&new_pool->pool_obds);
- kfree(new_pool);
-
- return rc;
-}
-
-int lov_pool_del(struct obd_device *obd, char *poolname)
-{
- struct lov_obd *lov;
- struct pool_desc *pool;
-
- lov = &obd->u.lov;
-
- /* lookup and kill hash reference */
- pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname);
- if (!pool)
- return -ENOENT;
-
- if (!IS_ERR_OR_NULL(pool->pool_debugfs_entry)) {
- CDEBUG(D_INFO, "proc entry %p\n", pool->pool_debugfs_entry);
- ldebugfs_remove(&pool->pool_debugfs_entry);
- lov_pool_putref(pool);
- }
-
- spin_lock(&obd->obd_dev_lock);
- list_del_init(&pool->pool_list);
- lov->lov_pool_count--;
- spin_unlock(&obd->obd_dev_lock);
-
- /* release last reference */
- lov_pool_putref(pool);
-
- return 0;
-}
-
-int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
-{
- struct obd_uuid ost_uuid;
- struct lov_obd *lov;
- struct pool_desc *pool;
- unsigned int lov_idx;
- int rc;
-
- lov = &obd->u.lov;
-
- pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
- if (!pool)
- return -ENOENT;
-
- obd_str2uuid(&ost_uuid, ostname);
-
- /* search ost in lov array */
- obd_getref(obd);
- for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) {
- if (!lov->lov_tgts[lov_idx])
- continue;
- if (obd_uuid_equals(&ost_uuid,
- &lov->lov_tgts[lov_idx]->ltd_uuid))
- break;
- }
- /* test if ost found in lov */
- if (lov_idx == lov->desc.ld_tgt_count) {
- rc = -EINVAL;
- goto out;
- }
-
- rc = lov_ost_pool_add(&pool->pool_obds, lov_idx, lov->lov_tgt_size);
- if (rc)
- goto out;
-
- CDEBUG(D_CONFIG, "Added %s to " LOV_POOLNAMEF " as member %d\n",
- ostname, poolname, pool_tgt_count(pool));
-
-out:
- obd_putref(obd);
- lov_pool_putref(pool);
- return rc;
-}
-
-int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
-{
- struct obd_uuid ost_uuid;
- struct lov_obd *lov;
- struct pool_desc *pool;
- unsigned int lov_idx;
- int rc = 0;
-
- lov = &obd->u.lov;
-
- pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
- if (!pool)
- return -ENOENT;
-
- obd_str2uuid(&ost_uuid, ostname);
-
- obd_getref(obd);
- /* search ost in lov array, to get index */
- for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) {
- if (!lov->lov_tgts[lov_idx])
- continue;
-
- if (obd_uuid_equals(&ost_uuid,
- &lov->lov_tgts[lov_idx]->ltd_uuid))
- break;
- }
-
- /* test if ost found in lov */
- if (lov_idx == lov->desc.ld_tgt_count) {
- rc = -EINVAL;
- goto out;
- }
-
- lov_ost_pool_remove(&pool->pool_obds, lov_idx);
-
- CDEBUG(D_CONFIG, "%s removed from " LOV_POOLNAMEF "\n", ostname,
- poolname);
-
-out:
- obd_putref(obd);
- lov_pool_putref(pool);
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
deleted file mode 100644
index 051450d67524..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ /dev/null
@@ -1,356 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd_class.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include "lov_internal.h"
-
-static void lov_init_set(struct lov_request_set *set)
-{
- set->set_count = 0;
- atomic_set(&set->set_completes, 0);
- atomic_set(&set->set_success, 0);
- INIT_LIST_HEAD(&set->set_list);
-}
-
-static void lov_finish_set(struct lov_request_set *set)
-{
- struct lov_request *req;
-
- LASSERT(set);
- while ((req = list_first_entry_or_null(&set->set_list,
- struct lov_request,
- rq_link)) != NULL) {
- list_del_init(&req->rq_link);
- kfree(req->rq_oi.oi_osfs);
- kfree(req);
- }
- kfree(set);
-}
-
-static void lov_update_set(struct lov_request_set *set,
- struct lov_request *req, int rc)
-{
- atomic_inc(&set->set_completes);
- if (rc == 0)
- atomic_inc(&set->set_success);
-}
-
-static void lov_set_add_req(struct lov_request *req,
- struct lov_request_set *set)
-{
- list_add_tail(&req->rq_link, &set->set_list);
- set->set_count++;
- req->rq_rqset = set;
-}
-
-static int lov_check_set(struct lov_obd *lov, int idx)
-{
- int rc;
- struct lov_tgt_desc *tgt;
-
- mutex_lock(&lov->lov_lock);
- tgt = lov->lov_tgts[idx];
- rc = !tgt || tgt->ltd_active ||
- (tgt->ltd_exp &&
- class_exp2cliimp(tgt->ltd_exp)->imp_connect_tried);
- mutex_unlock(&lov->lov_lock);
-
- return rc;
-}
-
-/* Check if the OSC connection exists and is active.
- * If the OSC has not yet had a chance to connect to the OST the first time,
- * wait once for it to connect instead of returning an error.
- */
-static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx)
-{
- int cnt = 0;
- struct lov_tgt_desc *tgt;
- int rc = 0;
-
- mutex_lock(&lov->lov_lock);
-
- tgt = lov->lov_tgts[ost_idx];
-
- if (unlikely(!tgt)) {
- rc = 0;
- goto out;
- }
-
- if (likely(tgt->ltd_active)) {
- rc = 1;
- goto out;
- }
-
- if (tgt->ltd_exp && class_exp2cliimp(tgt->ltd_exp)->imp_connect_tried) {
- rc = 0;
- goto out;
- }
-
- mutex_unlock(&lov->lov_lock);
-
- while (cnt < obd_timeout && !lov_check_set(lov, ost_idx)) {
- schedule_timeout_uninterruptible(HZ);
- cnt++;
- }
- if (tgt->ltd_active)
- return 1;
-
- return 0;
-
-out:
- mutex_unlock(&lov->lov_lock);
- return rc;
-}
-
-#define LOV_U64_MAX ((__u64)~0ULL)
-#define LOV_SUM_MAX(tot, add) \
- do { \
- if ((tot) + (add) < (tot)) \
- (tot) = LOV_U64_MAX; \
- else \
- (tot) += (add); \
- } while (0)
-
-static int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,
- int success)
-{
- if (success) {
- __u32 expected_stripes = lov_get_stripecnt(&obd->u.lov,
- LOV_MAGIC, 0);
- if (osfs->os_files != LOV_U64_MAX)
- lov_do_div64(osfs->os_files, expected_stripes);
- if (osfs->os_ffree != LOV_U64_MAX)
- lov_do_div64(osfs->os_ffree, expected_stripes);
-
- spin_lock(&obd->obd_osfs_lock);
- memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
- obd->obd_osfs_age = cfs_time_current_64();
- spin_unlock(&obd->obd_osfs_lock);
- return 0;
- }
-
- return -EIO;
-}
-
-int lov_fini_statfs_set(struct lov_request_set *set)
-{
- int rc = 0;
-
- if (!set)
- return 0;
-
- if (atomic_read(&set->set_completes)) {
- rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs,
- atomic_read(&set->set_success));
- }
-
- lov_finish_set(set);
-
- return rc;
-}
-
-static void lov_update_statfs(struct obd_statfs *osfs,
- struct obd_statfs *lov_sfs,
- int success)
-{
- int shift = 0, quit = 0;
- __u64 tmp;
-
- if (success == 0) {
- memcpy(osfs, lov_sfs, sizeof(*lov_sfs));
- } else {
- if (osfs->os_bsize != lov_sfs->os_bsize) {
- /* assume all block sizes are always powers of 2 */
- /* get the bits difference */
- tmp = osfs->os_bsize | lov_sfs->os_bsize;
- for (shift = 0; shift <= 64; ++shift) {
- if (tmp & 1) {
- if (quit)
- break;
- quit = 1;
- shift = 0;
- }
- tmp >>= 1;
- }
- }
-
- if (osfs->os_bsize < lov_sfs->os_bsize) {
- osfs->os_bsize = lov_sfs->os_bsize;
-
- osfs->os_bfree >>= shift;
- osfs->os_bavail >>= shift;
- osfs->os_blocks >>= shift;
- } else if (shift != 0) {
- lov_sfs->os_bfree >>= shift;
- lov_sfs->os_bavail >>= shift;
- lov_sfs->os_blocks >>= shift;
- }
- osfs->os_bfree += lov_sfs->os_bfree;
- osfs->os_bavail += lov_sfs->os_bavail;
- osfs->os_blocks += lov_sfs->os_blocks;
- /* XXX not sure about this one - depends on policy.
- * - could be minimum if we always stripe on all OBDs
- * (but that would be wrong for any other policy,
- * if one of the OBDs has no more objects left)
- * - could be sum if we stripe whole objects
- * - could be average, just to give a nice number
- *
- * To give a "reasonable" (if not wholly accurate)
- * number, we divide the total number of free objects
- * by expected stripe count (watch out for overflow).
- */
- LOV_SUM_MAX(osfs->os_files, lov_sfs->os_files);
- LOV_SUM_MAX(osfs->os_ffree, lov_sfs->os_ffree);
- }
-}
-
-/* The callback for osc_statfs_async that finalizes a request info when a
- * response is received.
- */
-static int cb_statfs_update(void *cookie, int rc)
-{
- struct obd_info *oinfo = cookie;
- struct lov_request *lovreq;
- struct lov_request_set *set;
- struct obd_statfs *osfs, *lov_sfs;
- struct lov_obd *lov;
- struct lov_tgt_desc *tgt;
- struct obd_device *lovobd, *tgtobd;
- int success;
-
- lovreq = container_of(oinfo, struct lov_request, rq_oi);
- set = lovreq->rq_rqset;
- lovobd = set->set_obd;
- lov = &lovobd->u.lov;
- osfs = set->set_oi->oi_osfs;
- lov_sfs = oinfo->oi_osfs;
- success = atomic_read(&set->set_success);
- /* XXX: the same is done in lov_update_common_set, however
- * lovset->set_exp is not initialized.
- */
- lov_update_set(set, lovreq, rc);
- if (rc)
- goto out;
-
- obd_getref(lovobd);
- tgt = lov->lov_tgts[lovreq->rq_idx];
- if (!tgt || !tgt->ltd_active)
- goto out_update;
-
- tgtobd = class_exp2obd(tgt->ltd_exp);
- spin_lock(&tgtobd->obd_osfs_lock);
- memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs));
- if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0)
- tgtobd->obd_osfs_age = cfs_time_current_64();
- spin_unlock(&tgtobd->obd_osfs_lock);
-
-out_update:
- lov_update_statfs(osfs, lov_sfs, success);
- obd_putref(lovobd);
-out:
- return 0;
-}
-
-int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
- struct lov_request_set **reqset)
-{
- struct lov_request_set *set;
- struct lov_obd *lov = &obd->u.lov;
- int rc = 0, i;
-
- set = kzalloc(sizeof(*set), GFP_NOFS);
- if (!set)
- return -ENOMEM;
- lov_init_set(set);
-
- set->set_obd = obd;
- set->set_oi = oinfo;
-
- /* We only get block data from the OBD */
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- struct lov_request *req;
-
- if (!lov->lov_tgts[i] ||
- (oinfo->oi_flags & OBD_STATFS_NODELAY &&
- !lov->lov_tgts[i]->ltd_active)) {
- CDEBUG(D_HA, "lov idx %d inactive\n", i);
- continue;
- }
-
- /* skip targets that have been explicitly disabled by the
- * administrator
- */
- if (!lov->lov_tgts[i]->ltd_exp) {
- CDEBUG(D_HA, "lov idx %d administratively disabled\n", i);
- continue;
- }
-
- if (!lov->lov_tgts[i]->ltd_active)
- lov_check_and_wait_active(lov, i);
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (!req) {
- rc = -ENOMEM;
- goto out_set;
- }
-
- req->rq_oi.oi_osfs = kzalloc(sizeof(*req->rq_oi.oi_osfs),
- GFP_NOFS);
- if (!req->rq_oi.oi_osfs) {
- kfree(req);
- rc = -ENOMEM;
- goto out_set;
- }
-
- req->rq_idx = i;
- req->rq_oi.oi_cb_up = cb_statfs_update;
- req->rq_oi.oi_flags = oinfo->oi_flags;
-
- lov_set_add_req(req, set);
- }
- if (!set->set_count) {
- rc = -EIO;
- goto out_set;
- }
- *reqset = set;
- return rc;
-out_set:
- lov_fini_statfs_set(set);
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
deleted file mode 100644
index 7e89a2e485fc..000000000000
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ /dev/null
@@ -1,147 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2013, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_device and cl_device_type for LOVSUB layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lov-sub device and device type functions.
- *
- */
-
-static int lovsub_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- struct lovsub_device *lsd = lu2lovsub_dev(d);
- struct lu_device_type *ldt;
- int rc;
-
- next->ld_site = d->ld_site;
- ldt = next->ld_type;
- rc = ldt->ldt_ops->ldto_device_init(env, next, ldt->ldt_name, NULL);
- if (rc) {
- next->ld_site = NULL;
- return rc;
- }
-
- lu_device_get(next);
- lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
- lsd->acid_next = lu2cl_dev(next);
- return rc;
-}
-
-static struct lu_device *lovsub_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- struct lu_device *next;
- struct lovsub_device *lsd;
-
- lsd = lu2lovsub_dev(d);
- next = cl2lu_dev(lsd->acid_next);
- lsd->acid_next = NULL;
- return next;
-}
-
-static struct lu_device *lovsub_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct lovsub_device *lsd = lu2lovsub_dev(d);
- struct lu_device *next = cl2lu_dev(lsd->acid_next);
-
- if (atomic_read(&d->ld_ref) && d->ld_site) {
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
- lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
- }
- cl_device_fini(lu2cl_dev(d));
- kfree(lsd);
- return next;
-}
-
-static const struct lu_device_operations lovsub_lu_ops = {
- .ldo_object_alloc = lovsub_object_alloc,
- .ldo_process_config = NULL,
- .ldo_recovery_complete = NULL
-};
-
-static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg)
-{
- struct lu_device *d;
- struct lovsub_device *lsd;
-
- lsd = kzalloc(sizeof(*lsd), GFP_NOFS);
- if (lsd) {
- int result;
-
- result = cl_device_init(&lsd->acid_cl, t);
- if (result == 0) {
- d = lovsub2lu_dev(lsd);
- d->ld_ops = &lovsub_lu_ops;
- } else {
- d = ERR_PTR(result);
- }
- } else {
- d = ERR_PTR(-ENOMEM);
- }
- return d;
-}
-
-static const struct lu_device_type_operations lovsub_device_type_ops = {
- .ldto_device_alloc = lovsub_device_alloc,
- .ldto_device_free = lovsub_device_free,
-
- .ldto_device_init = lovsub_device_init,
- .ldto_device_fini = lovsub_device_fini
-};
-
-#define LUSTRE_LOVSUB_NAME "lovsub"
-
-struct lu_device_type lovsub_device_type = {
- .ldt_tags = LU_DEVICE_CL,
- .ldt_name = LUSTRE_LOVSUB_NAME,
- .ldt_ops = &lovsub_device_type_ops,
- .ldt_ctx_tags = LCT_CL_THREAD
-};
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
deleted file mode 100644
index ea492be2eef3..000000000000
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_lock for LOVSUB layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lovsub lock operations.
- *
- */
-
-static void lovsub_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct lovsub_lock *lsl;
-
- lsl = cl2lovsub_lock(slice);
- kmem_cache_free(lovsub_lock_kmem, lsl);
-}
-
-static const struct cl_lock_operations lovsub_lock_ops = {
- .clo_fini = lovsub_lock_fini,
-};
-
-int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
-{
- struct lovsub_lock *lsk;
- int result;
-
- lsk = kmem_cache_zalloc(lovsub_lock_kmem, GFP_NOFS);
- if (lsk) {
- cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
deleted file mode 100644
index 13d452086b61..000000000000
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ /dev/null
@@ -1,180 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_object for LOVSUB layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lovsub object operations.
- *
- */
-
-int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct lovsub_device *dev = lu2lovsub_dev(obj->lo_dev);
- struct lu_object *below;
- struct lu_device *under;
-
- int result;
-
- under = &dev->acid_next->cd_lu_dev;
- below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
- if (below) {
- lu_object_add(obj, below);
- cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
-
-static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct lovsub_object *los = lu2lovsub(obj);
- struct lov_object *lov = los->lso_super;
-
- /* We can't assume lov was assigned here, because of the shadow
- * object handling in lu_object_find.
- */
- if (lov) {
- LASSERT(lov->lo_type == LLT_RAID0);
- LASSERT(lov->u.raid0.lo_sub[los->lso_index] == los);
- spin_lock(&lov->u.raid0.lo_sub_lock);
- lov->u.raid0.lo_sub[los->lso_index] = NULL;
- spin_unlock(&lov->u.raid0.lo_sub_lock);
- }
-
- lu_object_fini(obj);
- lu_object_header_fini(&los->lso_header.coh_lu);
- kmem_cache_free(lovsub_object_kmem, los);
-}
-
-static int lovsub_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *obj)
-{
- struct lovsub_object *los = lu2lovsub(obj);
-
- return (*p)(env, cookie, "[%d]", los->lso_index);
-}
-
-static int lovsub_attr_update(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned int valid)
-{
- struct lov_object *lov = cl2lovsub(obj)->lso_super;
-
- lov_r0(lov)->lo_attr_valid = 0;
- return 0;
-}
-
-static int lovsub_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj,
- struct ost_lvb *lvb)
-{
- struct lovsub_object *los = cl2lovsub(obj);
-
- return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb);
-}
-
-/**
- * Implementation of struct cl_object_operations::coo_req_attr_set() for lovsub
- * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
- * field, which is filled there.
- */
-static void lovsub_req_attr_set(const struct lu_env *env, struct cl_object *obj,
- struct cl_req_attr *attr)
-{
- struct lovsub_object *subobj = cl2lovsub(obj);
-
- cl_req_attr_set(env, &subobj->lso_super->lo_cl, attr);
-
- /*
- * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
- * unconditionally. It never changes anyway.
- */
- attr->cra_oa->o_stripe_idx = subobj->lso_index;
-}
-
-static const struct cl_object_operations lovsub_ops = {
- .coo_page_init = lovsub_page_init,
- .coo_lock_init = lovsub_lock_init,
- .coo_attr_update = lovsub_attr_update,
- .coo_glimpse = lovsub_object_glimpse,
- .coo_req_attr_set = lovsub_req_attr_set
-};
-
-static const struct lu_object_operations lovsub_lu_obj_ops = {
- .loo_object_init = lovsub_object_init,
- .loo_object_delete = NULL,
- .loo_object_release = NULL,
- .loo_object_free = lovsub_object_free,
- .loo_object_print = lovsub_object_print,
- .loo_object_invariant = NULL
-};
-
-struct lu_object *lovsub_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
-{
- struct lovsub_object *los;
- struct lu_object *obj;
-
- los = kmem_cache_zalloc(lovsub_object_kmem, GFP_NOFS);
- if (los) {
- struct cl_object_header *hdr;
-
- obj = lovsub2lu(los);
- hdr = &los->lso_header;
- cl_object_header_init(hdr);
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
- los->lso_cl.co_ops = &lovsub_ops;
- obj->lo_ops = &lovsub_lu_obj_ops;
- } else {
- obj = NULL;
- }
- return obj;
-}
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
deleted file mode 100644
index 915520bcdd60..000000000000
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_page for LOVSUB layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lovsub page operations.
- *
- */
-
-static void lovsub_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
-}
-
-static const struct cl_page_operations lovsub_page_ops = {
- .cpo_fini = lovsub_page_fini
-};
-
-int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
-{
- struct lovsub_page *lsb = cl_object_page_slice(obj, page);
-
- cl_page_slice_add(page, &lsb->lsb_cl, obj, index, &lovsub_page_ops);
- return 0;
-}
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
deleted file mode 100644
index 721440feef72..000000000000
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ /dev/null
@@ -1,299 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/statfs.h>
-#include <lprocfs_status.h>
-#include <obd_class.h>
-#include <linux/seq_file.h>
-#include "lov_internal.h"
-
-static int lov_stripesize_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lov_desc *desc;
-
- LASSERT(dev);
- desc = &dev->u.lov.desc;
- seq_printf(m, "%llu\n", desc->ld_default_stripe_size);
- return 0;
-}
-
-static ssize_t lov_stripesize_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
- struct lov_desc *desc;
- __u64 val;
- int rc;
-
- LASSERT(dev);
- desc = &dev->u.lov.desc;
- rc = lprocfs_write_u64_helper(buffer, count, &val);
- if (rc)
- return rc;
-
- lov_fix_desc_stripe_size(&val);
- desc->ld_default_stripe_size = val;
- return count;
-}
-
-LPROC_SEQ_FOPS(lov_stripesize);
-
-static int lov_stripeoffset_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lov_desc *desc;
-
- LASSERT(dev);
- desc = &dev->u.lov.desc;
- seq_printf(m, "%llu\n", desc->ld_default_stripe_offset);
- return 0;
-}
-
-static ssize_t lov_stripeoffset_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
- struct lov_desc *desc;
- __u64 val;
- int rc;
-
- LASSERT(dev);
- desc = &dev->u.lov.desc;
- rc = lprocfs_write_u64_helper(buffer, count, &val);
- if (rc)
- return rc;
-
- desc->ld_default_stripe_offset = val;
- return count;
-}
-
-LPROC_SEQ_FOPS(lov_stripeoffset);
-
-static int lov_stripetype_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lov_desc *desc;
-
- LASSERT(dev);
- desc = &dev->u.lov.desc;
- seq_printf(m, "%u\n", desc->ld_pattern);
- return 0;
-}
-
-static ssize_t lov_stripetype_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
- struct lov_desc *desc;
- int val, rc;
-
- LASSERT(dev);
- desc = &dev->u.lov.desc;
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc)
- return rc;
-
- lov_fix_desc_pattern(&val);
- desc->ld_pattern = val;
- return count;
-}
-
-LPROC_SEQ_FOPS(lov_stripetype);
-
-static int lov_stripecount_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lov_desc *desc;
-
- LASSERT(dev);
- desc = &dev->u.lov.desc;
- seq_printf(m, "%d\n", (__s16)(desc->ld_default_stripe_count + 1) - 1);
- return 0;
-}
-
-static ssize_t lov_stripecount_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
- struct lov_desc *desc;
- int val, rc;
-
- LASSERT(dev);
- desc = &dev->u.lov.desc;
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc)
- return rc;
-
- lov_fix_desc_stripe_count(&val);
- desc->ld_default_stripe_count = val;
- return count;
-}
-
-LPROC_SEQ_FOPS(lov_stripecount);
-
-static ssize_t numobd_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct lov_desc *desc;
-
- desc = &dev->u.lov.desc;
- return sprintf(buf, "%u\n", desc->ld_tgt_count);
-}
-LUSTRE_RO_ATTR(numobd);
-
-static ssize_t activeobd_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct lov_desc *desc;
-
- desc = &dev->u.lov.desc;
- return sprintf(buf, "%u\n", desc->ld_active_tgt_count);
-}
-LUSTRE_RO_ATTR(activeobd);
-
-static int lov_desc_uuid_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lov_obd *lov;
-
- LASSERT(dev);
- lov = &dev->u.lov;
- seq_printf(m, "%s\n", lov->desc.ld_uuid.uuid);
- return 0;
-}
-
-LPROC_SEQ_FOPS_RO(lov_desc_uuid);
-
-static void *lov_tgt_seq_start(struct seq_file *p, loff_t *pos)
-{
- struct obd_device *dev = p->private;
- struct lov_obd *lov = &dev->u.lov;
-
- while (*pos < lov->desc.ld_tgt_count) {
- if (lov->lov_tgts[*pos])
- return lov->lov_tgts[*pos];
- ++*pos;
- }
- return NULL;
-}
-
-static void lov_tgt_seq_stop(struct seq_file *p, void *v)
-{
-}
-
-static void *lov_tgt_seq_next(struct seq_file *p, void *v, loff_t *pos)
-{
- struct obd_device *dev = p->private;
- struct lov_obd *lov = &dev->u.lov;
-
- while (++*pos < lov->desc.ld_tgt_count) {
- if (lov->lov_tgts[*pos])
- return lov->lov_tgts[*pos];
- }
- return NULL;
-}
-
-static int lov_tgt_seq_show(struct seq_file *p, void *v)
-{
- struct lov_tgt_desc *tgt = v;
-
- seq_printf(p, "%d: %s %sACTIVE\n",
- tgt->ltd_index, obd_uuid2str(&tgt->ltd_uuid),
- tgt->ltd_active ? "" : "IN");
- return 0;
-}
-
-static const struct seq_operations lov_tgt_sops = {
- .start = lov_tgt_seq_start,
- .stop = lov_tgt_seq_stop,
- .next = lov_tgt_seq_next,
- .show = lov_tgt_seq_show,
-};
-
-static int lov_target_seq_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int rc;
-
- rc = seq_open(file, &lov_tgt_sops);
- if (rc)
- return rc;
-
- seq = file->private_data;
- seq->private = inode->i_private;
- return 0;
-}
-
-static struct lprocfs_vars lprocfs_lov_obd_vars[] = {
- { "stripesize", &lov_stripesize_fops, NULL },
- { "stripeoffset", &lov_stripeoffset_fops, NULL },
- { "stripecount", &lov_stripecount_fops, NULL },
- { "stripetype", &lov_stripetype_fops, NULL },
- /*{ "filegroups", lprocfs_rd_filegroups, NULL, 0 },*/
- { "desc_uuid", &lov_desc_uuid_fops, NULL, 0 },
- { NULL }
-};
-
-static struct attribute *lov_attrs[] = {
- &lustre_attr_activeobd.attr,
- &lustre_attr_numobd.attr,
- NULL,
-};
-
-static const struct attribute_group lov_attr_group = {
- .attrs = lov_attrs,
-};
-
-void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->sysfs_vars = &lov_attr_group;
- lvars->obd_vars = lprocfs_lov_obd_vars;
-}
-
-const struct file_operations lov_proc_target_fops = {
- .owner = THIS_MODULE,
- .open = lov_target_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = lprocfs_seq_release,
-};
diff --git a/drivers/staging/lustre/lustre/mdc/Makefile b/drivers/staging/lustre/lustre/mdc/Makefile
deleted file mode 100644
index c7bc3351ccb0..000000000000
--- a/drivers/staging/lustre/lustre/mdc/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LUSTRE_FS) += mdc.o
-mdc-y := mdc_request.o mdc_reint.o mdc_lib.o mdc_locks.o lproc_mdc.o
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
deleted file mode 100644
index 6cce32491eb5..000000000000
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ /dev/null
@@ -1,231 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/vfs.h>
-#include <obd_class.h>
-#include <lprocfs_status.h>
-#include "mdc_internal.h"
-
-static ssize_t active_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%u\n", !dev->u.cli.cl_import->imp_deactive);
-}
-
-static ssize_t active_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- unsigned long val;
- int rc;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val > 1)
- return -ERANGE;
-
- /* opposite senses */
- if (dev->u.cli.cl_import->imp_deactive == val) {
- rc = ptlrpc_set_import_active(dev->u.cli.cl_import, val);
- if (rc)
- count = rc;
- } else {
- CDEBUG(D_CONFIG, "activate %lu: ignoring repeat request\n", val);
- }
- return count;
-}
-LUSTRE_RW_ATTR(active);
-
-static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- int len;
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- __u32 max;
-
- max = obd_get_max_rpcs_in_flight(&dev->u.cli);
- len = sprintf(buf, "%u\n", max);
-
- return len;
-}
-
-static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- rc = obd_set_max_rpcs_in_flight(&dev->u.cli, val);
- if (rc)
- count = rc;
-
- return count;
-}
-LUSTRE_RW_ATTR(max_rpcs_in_flight);
-
-static ssize_t max_mod_rpcs_in_flight_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- u16 max;
- int len;
-
- max = dev->u.cli.cl_max_mod_rpcs_in_flight;
- len = sprintf(buf, "%hu\n", max);
-
- return len;
-}
-
-static ssize_t max_mod_rpcs_in_flight_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- u16 val;
- int rc;
-
- rc = kstrtou16(buffer, 10, &val);
- if (rc)
- return rc;
-
- rc = obd_set_max_mod_rpcs_in_flight(&dev->u.cli, val);
- if (rc)
- count = rc;
-
- return count;
-}
-LUSTRE_RW_ATTR(max_mod_rpcs_in_flight);
-
-static int mdc_rpc_stats_seq_show(struct seq_file *seq, void *v)
-{
- struct obd_device *dev = seq->private;
-
- return obd_mod_rpc_stats_seq_show(&dev->u.cli, seq);
-}
-
-static ssize_t mdc_rpc_stats_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct obd_device *dev = seq->private;
- struct client_obd *cli = &dev->u.cli;
-
- lprocfs_oh_clear(&cli->cl_mod_rpcs_hist);
-
- return len;
-}
-LPROC_SEQ_FOPS(mdc_rpc_stats);
-
-LPROC_SEQ_FOPS_WR_ONLY(mdc, ping);
-
-LPROC_SEQ_FOPS_RO_TYPE(mdc, connect_flags);
-LPROC_SEQ_FOPS_RO_TYPE(mdc, server_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(mdc, conn_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(mdc, timeouts);
-LPROC_SEQ_FOPS_RO_TYPE(mdc, state);
-
-/*
- * Note: below sysfs entry is provided, but not currently in use, instead
- * sbi->sb_md_brw_size is used, the per obd variable should be used
- * when DNE is enabled, and dir pages are managed in MDC layer.
- * Don't forget to enable sysfs store function then.
- */
-static ssize_t max_pages_per_rpc_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
-
- return sprintf(buf, "%d\n", cli->cl_max_pages_per_rpc);
-}
-LUSTRE_RO_ATTR(max_pages_per_rpc);
-
-LPROC_SEQ_FOPS_RW_TYPE(mdc, import);
-LPROC_SEQ_FOPS_RW_TYPE(mdc, pinger_recov);
-
-static struct lprocfs_vars lprocfs_mdc_obd_vars[] = {
- { "ping", &mdc_ping_fops, NULL, 0222 },
- { "connect_flags", &mdc_connect_flags_fops, NULL, 0 },
- /*{ "filegroups", lprocfs_rd_filegroups, NULL, 0 },*/
- { "mds_server_uuid", &mdc_server_uuid_fops, NULL, 0 },
- { "mds_conn_uuid", &mdc_conn_uuid_fops, NULL, 0 },
- { "timeouts", &mdc_timeouts_fops, NULL, 0 },
- { "import", &mdc_import_fops, NULL, 0 },
- { "state", &mdc_state_fops, NULL, 0 },
- { "pinger_recov", &mdc_pinger_recov_fops, NULL, 0 },
- { .name = "rpc_stats",
- .fops = &mdc_rpc_stats_fops },
- { NULL }
-};
-
-static struct attribute *mdc_attrs[] = {
- &lustre_attr_active.attr,
- &lustre_attr_max_rpcs_in_flight.attr,
- &lustre_attr_max_mod_rpcs_in_flight.attr,
- &lustre_attr_max_pages_per_rpc.attr,
- NULL,
-};
-
-static const struct attribute_group mdc_attr_group = {
- .attrs = mdc_attrs,
-};
-
-void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->sysfs_vars = &mdc_attr_group;
- lvars->obd_vars = lprocfs_mdc_obd_vars;
-}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
deleted file mode 100644
index e0300c34ca3a..000000000000
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ /dev/null
@@ -1,144 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _MDC_INTERNAL_H
-#define _MDC_INTERNAL_H
-
-#include <lustre_mdc.h>
-
-void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars);
-
-void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid,
- __u64 valid, size_t ea_size, __u32 suppgid, u32 flags);
-void mdc_swap_layouts_pack(struct ptlrpc_request *req,
- struct md_op_data *op_data);
-void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, size_t size,
- const struct lu_fid *fid);
-void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
- struct md_op_data *data, size_t ea_size);
-void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- void *ea, size_t ealen);
-void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const void *data, size_t datalen, umode_t mode, uid_t uid,
- gid_t gid, cfs_cap_t capability, __u64 rdev);
-void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- umode_t mode, __u64 rdev, __u64 flags, const void *data,
- size_t datalen);
-void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
-void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
-void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const char *old, size_t oldlen,
- const char *new, size_t newlen);
-void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
-
-/* mdc/mdc_locks.c */
-int mdc_set_lock_data(struct obd_export *exp,
- const struct lustre_handle *lockh,
- void *data, __u64 *bits);
-
-int mdc_null_inode(struct obd_export *exp, const struct lu_fid *fid);
-
-int mdc_intent_lock(struct obd_export *exp,
- struct md_op_data *op_data,
- struct lookup_intent *it,
- struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags);
-
-int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- const union ldlm_policy_data *policy,
- struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, __u64 extra_lock_flags);
-
-int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
- struct list_head *cancels, enum ldlm_mode mode,
- __u64 bits);
-/* mdc/mdc_request.c */
-int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
- struct lu_fid *fid, struct md_op_data *op_data);
-struct obd_client_handle;
-
-int mdc_set_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och,
- struct lookup_intent *it);
-
-void mdc_commit_open(struct ptlrpc_request *req);
-void mdc_replay_open(struct ptlrpc_request *req);
-
-int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, size_t datalen, umode_t mode, uid_t uid,
- gid_t gid, cfs_cap_t capability, __u64 rdev,
- struct ptlrpc_request **request);
-int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request);
-int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, size_t oldlen,
- const char *new, size_t newlen,
- struct ptlrpc_request **request);
-int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen, struct ptlrpc_request **request);
-int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request);
-int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- union ldlm_policy_data *policy, enum ldlm_mode mode,
- enum ldlm_cancel_flags flags, void *opaque);
-
-int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
- struct lu_fid *fid, __u64 *bits);
-
-int mdc_intent_getattr_async(struct obd_export *exp,
- struct md_enqueue_info *minfo);
-
-enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, enum ldlm_type type,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode,
- struct lustre_handle *lockh);
-
-static inline int mdc_prep_elc_req(struct obd_export *exp,
- struct ptlrpc_request *req, int opc,
- struct list_head *cancels, int count)
-{
- return ldlm_prep_elc_req(exp, req, LUSTRE_MDS_VERSION, opc, 0, cancels,
- count);
-}
-
-static inline unsigned long hash_x_index(__u64 hash, int hash64)
-{
- if (BITS_PER_LONG == 32 && hash64)
- hash >>= 32;
- /* save hash 0 with hash 1 */
- return ~0UL - (hash + !hash);
-}
-
-#endif
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
deleted file mode 100644
index 46eefdc09e3a..000000000000
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ /dev/null
@@ -1,497 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_MDC
-#include <lustre_net.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include "mdc_internal.h"
-
-static void set_mrc_cr_flags(struct mdt_rec_create *mrc, u64 flags)
-{
- mrc->cr_flags_l = (u32)(flags & 0xFFFFFFFFUll);
- mrc->cr_flags_h = (u32)(flags >> 32);
-}
-
-static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid)
-{
- b->mbo_suppgid = suppgid;
- b->mbo_uid = from_kuid(&init_user_ns, current_uid());
- b->mbo_gid = from_kgid(&init_user_ns, current_gid());
- b->mbo_fsuid = from_kuid(&init_user_ns, current_fsuid());
- b->mbo_fsgid = from_kgid(&init_user_ns, current_fsgid());
- b->mbo_capability = cfs_curproc_cap_pack();
-}
-
-void mdc_swap_layouts_pack(struct ptlrpc_request *req,
- struct md_op_data *op_data)
-{
- struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
- &RMF_MDT_BODY);
-
- __mdc_pack_body(b, op_data->op_suppgids[0]);
- b->mbo_fid1 = op_data->op_fid1;
- b->mbo_fid2 = op_data->op_fid2;
- b->mbo_valid |= OBD_MD_FLID;
-}
-
-void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid,
- __u64 valid, size_t ea_size, __u32 suppgid, u32 flags)
-{
- struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
- &RMF_MDT_BODY);
- b->mbo_valid = valid;
- b->mbo_eadatasize = ea_size;
- b->mbo_flags = flags;
- __mdc_pack_body(b, suppgid);
- if (fid) {
- b->mbo_fid1 = *fid;
- b->mbo_valid |= OBD_MD_FLID;
- }
-}
-
-/**
- * Pack a name (path component) into a request
- *
- * \param[in] req request
- * \param[in] field request field (usually RMF_NAME)
- * \param[in] name path component
- * \param[in] name_len length of path component
- *
- * \a field must be present in \a req and of size \a name_len + 1.
- *
- * \a name must be '\0' terminated of length \a name_len and represent
- * a single path component (not contain '/').
- */
-static void mdc_pack_name(struct ptlrpc_request *req,
- const struct req_msg_field *field,
- const char *name, size_t name_len)
-{
- size_t buf_size;
- size_t cpy_len;
- char *buf;
-
- buf = req_capsule_client_get(&req->rq_pill, field);
- buf_size = req_capsule_get_size(&req->rq_pill, field, RCL_CLIENT);
-
- LASSERT(name && name_len && buf && buf_size == name_len + 1);
-
- cpy_len = strlcpy(buf, name, buf_size);
-
- LASSERT(cpy_len == name_len && lu_name_is_valid_2(buf, cpy_len));
-}
-
-void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, size_t size,
- const struct lu_fid *fid)
-{
- struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
- &RMF_MDT_BODY);
- b->mbo_fid1 = *fid;
- b->mbo_valid |= OBD_MD_FLID;
- b->mbo_size = pgoff; /* !! */
- b->mbo_nlink = size; /* !! */
- __mdc_pack_body(b, -1);
- b->mbo_mode = LUDA_FID | LUDA_TYPE;
-}
-
-/* packing of MDS records */
-void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const void *data, size_t datalen, umode_t mode,
- uid_t uid, gid_t gid, cfs_cap_t cap_effective, __u64 rdev)
-{
- struct mdt_rec_create *rec;
- char *tmp;
- __u64 flags;
-
- BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_create));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-
- rec->cr_opcode = REINT_CREATE;
- rec->cr_fsuid = uid;
- rec->cr_fsgid = gid;
- rec->cr_cap = cap_effective;
- rec->cr_fid1 = op_data->op_fid1;
- rec->cr_fid2 = op_data->op_fid2;
- rec->cr_mode = mode;
- rec->cr_rdev = rdev;
- rec->cr_time = op_data->op_mod_time;
- rec->cr_suppgid1 = op_data->op_suppgids[0];
- rec->cr_suppgid2 = op_data->op_suppgids[1];
- flags = 0;
- if (op_data->op_bias & MDS_CREATE_VOLATILE)
- flags |= MDS_OPEN_VOLATILE;
- set_mrc_cr_flags(rec, flags);
- rec->cr_bias = op_data->op_bias;
- rec->cr_umask = current_umask();
-
- mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
- if (data) {
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
- memcpy(tmp, data, datalen);
- }
-}
-
-static inline __u64 mds_pack_open_flags(__u64 flags)
-{
- __u64 cr_flags = (flags & (FMODE_READ | FMODE_WRITE |
- MDS_OPEN_FL_INTERNAL));
- if (flags & O_CREAT)
- cr_flags |= MDS_OPEN_CREAT;
- if (flags & O_EXCL)
- cr_flags |= MDS_OPEN_EXCL;
- if (flags & O_TRUNC)
- cr_flags |= MDS_OPEN_TRUNC;
- if (flags & O_APPEND)
- cr_flags |= MDS_OPEN_APPEND;
- if (flags & O_SYNC)
- cr_flags |= MDS_OPEN_SYNC;
- if (flags & O_DIRECTORY)
- cr_flags |= MDS_OPEN_DIRECTORY;
- if (flags & __FMODE_EXEC)
- cr_flags |= MDS_FMODE_EXEC;
- if (cl_is_lov_delay_create(flags))
- cr_flags |= MDS_OPEN_DELAY_CREATE;
-
- if (flags & O_NONBLOCK)
- cr_flags |= MDS_OPEN_NORESTORE;
-
- return cr_flags;
-}
-
-/* packing of MDS records */
-void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- umode_t mode, __u64 rdev, __u64 flags, const void *lmm,
- size_t lmmlen)
-{
- struct mdt_rec_create *rec;
- char *tmp;
- __u64 cr_flags;
-
- BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_create));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-
- /* XXX do something about time, uid, gid */
- rec->cr_opcode = REINT_OPEN;
- rec->cr_fsuid = from_kuid(&init_user_ns, current_fsuid());
- rec->cr_fsgid = from_kgid(&init_user_ns, current_fsgid());
- rec->cr_cap = cfs_curproc_cap_pack();
- rec->cr_fid1 = op_data->op_fid1;
- rec->cr_fid2 = op_data->op_fid2;
-
- rec->cr_mode = mode;
- cr_flags = mds_pack_open_flags(flags);
- rec->cr_rdev = rdev;
- rec->cr_time = op_data->op_mod_time;
- rec->cr_suppgid1 = op_data->op_suppgids[0];
- rec->cr_suppgid2 = op_data->op_suppgids[1];
- rec->cr_bias = op_data->op_bias;
- rec->cr_umask = current_umask();
- rec->cr_old_handle = op_data->op_handle;
-
- if (op_data->op_name) {
- mdc_pack_name(req, &RMF_NAME, op_data->op_name,
- op_data->op_namelen);
-
- if (op_data->op_bias & MDS_CREATE_VOLATILE)
- cr_flags |= MDS_OPEN_VOLATILE;
- }
-
- if (lmm) {
- cr_flags |= MDS_OPEN_HAS_EA;
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
- memcpy(tmp, lmm, lmmlen);
- }
- set_mrc_cr_flags(rec, cr_flags);
-}
-
-static inline __u64 attr_pack(unsigned int ia_valid)
-{
- __u64 sa_valid = 0;
-
- if (ia_valid & ATTR_MODE)
- sa_valid |= MDS_ATTR_MODE;
- if (ia_valid & ATTR_UID)
- sa_valid |= MDS_ATTR_UID;
- if (ia_valid & ATTR_GID)
- sa_valid |= MDS_ATTR_GID;
- if (ia_valid & ATTR_SIZE)
- sa_valid |= MDS_ATTR_SIZE;
- if (ia_valid & ATTR_ATIME)
- sa_valid |= MDS_ATTR_ATIME;
- if (ia_valid & ATTR_MTIME)
- sa_valid |= MDS_ATTR_MTIME;
- if (ia_valid & ATTR_CTIME)
- sa_valid |= MDS_ATTR_CTIME;
- if (ia_valid & ATTR_ATIME_SET)
- sa_valid |= MDS_ATTR_ATIME_SET;
- if (ia_valid & ATTR_MTIME_SET)
- sa_valid |= MDS_ATTR_MTIME_SET;
- if (ia_valid & ATTR_FORCE)
- sa_valid |= MDS_ATTR_FORCE;
- if (ia_valid & ATTR_ATTR_FLAG)
- sa_valid |= MDS_ATTR_ATTR_FLAG;
- if (ia_valid & ATTR_KILL_SUID)
- sa_valid |= MDS_ATTR_KILL_SUID;
- if (ia_valid & ATTR_KILL_SGID)
- sa_valid |= MDS_ATTR_KILL_SGID;
- if (ia_valid & ATTR_CTIME_SET)
- sa_valid |= MDS_ATTR_CTIME_SET;
- if (ia_valid & ATTR_OPEN)
- sa_valid |= MDS_ATTR_FROM_OPEN;
- if (ia_valid & ATTR_BLOCKS)
- sa_valid |= MDS_ATTR_BLOCKS;
- if (ia_valid & MDS_OPEN_OWNEROVERRIDE)
- /* NFSD hack (see bug 5781) */
- sa_valid |= MDS_OPEN_OWNEROVERRIDE;
- return sa_valid;
-}
-
-static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
- struct md_op_data *op_data)
-{
- rec->sa_opcode = REINT_SETATTR;
- rec->sa_fsuid = from_kuid(&init_user_ns, current_fsuid());
- rec->sa_fsgid = from_kgid(&init_user_ns, current_fsgid());
- rec->sa_cap = cfs_curproc_cap_pack();
- rec->sa_suppgid = -1;
-
- rec->sa_fid = op_data->op_fid1;
- rec->sa_valid = attr_pack(op_data->op_attr.ia_valid);
- rec->sa_mode = op_data->op_attr.ia_mode;
- rec->sa_uid = from_kuid(&init_user_ns, op_data->op_attr.ia_uid);
- rec->sa_gid = from_kgid(&init_user_ns, op_data->op_attr.ia_gid);
- rec->sa_size = op_data->op_attr.ia_size;
- rec->sa_blocks = op_data->op_attr_blocks;
- rec->sa_atime = LTIME_S(op_data->op_attr.ia_atime);
- rec->sa_mtime = LTIME_S(op_data->op_attr.ia_mtime);
- rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime);
- rec->sa_attr_flags = op_data->op_attr_flags;
- if ((op_data->op_attr.ia_valid & ATTR_GID) &&
- in_group_p(op_data->op_attr.ia_gid))
- rec->sa_suppgid =
- from_kgid(&init_user_ns, op_data->op_attr.ia_gid);
- else
- rec->sa_suppgid = op_data->op_suppgids[0];
-
- rec->sa_bias = op_data->op_bias;
-}
-
-static void mdc_ioepoch_pack(struct mdt_ioepoch *epoch,
- struct md_op_data *op_data)
-{
- epoch->mio_handle = op_data->op_handle;
- epoch->mio_unused1 = 0;
- epoch->mio_unused2 = 0;
- epoch->mio_padding = 0;
-}
-
-void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- void *ea, size_t ealen)
-{
- struct mdt_rec_setattr *rec;
- struct lov_user_md *lum = NULL;
-
- BUILD_BUG_ON(sizeof(struct mdt_rec_reint) !=
- sizeof(struct mdt_rec_setattr));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
- mdc_setattr_pack_rec(rec, op_data);
-
- if (ealen == 0)
- return;
-
- lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
- if (!ea) { /* Remove LOV EA */
- lum->lmm_magic = cpu_to_le32(LOV_USER_MAGIC_V1);
- lum->lmm_stripe_size = 0;
- lum->lmm_stripe_count = 0;
- lum->lmm_stripe_offset = (typeof(lum->lmm_stripe_offset))(-1);
- } else {
- memcpy(lum, ea, ealen);
- }
-}
-
-void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
-{
- struct mdt_rec_unlink *rec;
-
- BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_unlink));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-
- rec->ul_opcode = op_data->op_cli_flags & CLI_RM_ENTRY ?
- REINT_RMENTRY : REINT_UNLINK;
- rec->ul_fsuid = op_data->op_fsuid;
- rec->ul_fsgid = op_data->op_fsgid;
- rec->ul_cap = op_data->op_cap;
- rec->ul_mode = op_data->op_mode;
- rec->ul_suppgid1 = op_data->op_suppgids[0];
- rec->ul_suppgid2 = -1;
- rec->ul_fid1 = op_data->op_fid1;
- rec->ul_fid2 = op_data->op_fid2;
- rec->ul_time = op_data->op_mod_time;
- rec->ul_bias = op_data->op_bias;
-
- mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
-}
-
-void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
-{
- struct mdt_rec_link *rec;
-
- BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_link));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-
- rec->lk_opcode = REINT_LINK;
- rec->lk_fsuid = op_data->op_fsuid; /* current->fsuid; */
- rec->lk_fsgid = op_data->op_fsgid; /* current->fsgid; */
- rec->lk_cap = op_data->op_cap; /* current->cap_effective; */
- rec->lk_suppgid1 = op_data->op_suppgids[0];
- rec->lk_suppgid2 = op_data->op_suppgids[1];
- rec->lk_fid1 = op_data->op_fid1;
- rec->lk_fid2 = op_data->op_fid2;
- rec->lk_time = op_data->op_mod_time;
- rec->lk_bias = op_data->op_bias;
-
- mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
-}
-
-static void mdc_intent_close_pack(struct ptlrpc_request *req,
- struct md_op_data *op_data)
-{
- enum mds_op_bias bias = op_data->op_bias;
- struct close_data *data;
- struct ldlm_lock *lock;
-
- if (!(bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP |
- MDS_RENAME_MIGRATE)))
- return;
-
- data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
- LASSERT(data);
-
- lock = ldlm_handle2lock(&op_data->op_lease_handle);
- if (lock) {
- data->cd_handle = lock->l_remote_handle;
- LDLM_LOCK_PUT(lock);
- }
- ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
-
- data->cd_data_version = op_data->op_data_version;
- data->cd_fid = op_data->op_fid2;
-}
-
-void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const char *old, size_t oldlen,
- const char *new, size_t newlen)
-{
- struct mdt_rec_rename *rec;
-
- BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_rename));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-
- /* XXX do something about time, uid, gid */
- rec->rn_opcode = op_data->op_cli_flags & CLI_MIGRATE ?
- REINT_MIGRATE : REINT_RENAME;
- rec->rn_opcode = REINT_RENAME;
- rec->rn_fsuid = op_data->op_fsuid;
- rec->rn_fsgid = op_data->op_fsgid;
- rec->rn_cap = op_data->op_cap;
- rec->rn_suppgid1 = op_data->op_suppgids[0];
- rec->rn_suppgid2 = op_data->op_suppgids[1];
- rec->rn_fid1 = op_data->op_fid1;
- rec->rn_fid2 = op_data->op_fid2;
- rec->rn_time = op_data->op_mod_time;
- rec->rn_mode = op_data->op_mode;
- rec->rn_bias = op_data->op_bias;
-
- mdc_pack_name(req, &RMF_NAME, old, oldlen);
-
- if (new)
- mdc_pack_name(req, &RMF_SYMTGT, new, newlen);
-
- if (op_data->op_cli_flags & CLI_MIGRATE &&
- op_data->op_bias & MDS_RENAME_MIGRATE) {
- struct mdt_ioepoch *epoch;
-
- mdc_intent_close_pack(req, op_data);
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
- mdc_ioepoch_pack(epoch, op_data);
- }
-}
-
-void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
- struct md_op_data *op_data, size_t ea_size)
-{
- struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
- &RMF_MDT_BODY);
-
- b->mbo_valid = valid;
- if (op_data->op_bias & MDS_CHECK_SPLIT)
- b->mbo_valid |= OBD_MD_FLCKSPLIT;
- if (op_data->op_bias & MDS_CROSS_REF)
- b->mbo_valid |= OBD_MD_FLCROSSREF;
- b->mbo_eadatasize = ea_size;
- b->mbo_flags = flags;
- __mdc_pack_body(b, op_data->op_suppgids[0]);
-
- b->mbo_fid1 = op_data->op_fid1;
- b->mbo_fid2 = op_data->op_fid2;
- b->mbo_valid |= OBD_MD_FLID;
-
- if (op_data->op_name)
- mdc_pack_name(req, &RMF_NAME, op_data->op_name,
- op_data->op_namelen);
-}
-
-void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
-{
- struct mdt_ioepoch *epoch;
- struct mdt_rec_setattr *rec;
-
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-
- mdc_setattr_pack_rec(rec, op_data);
- /*
- * The client will zero out local timestamps when losing the IBITS lock
- * so any new RPC timestamps will update the client inode's timestamps.
- * There was a defect on the server side which allowed the atime to be
- * overwritten by a zeroed-out atime packed into the close RPC.
- *
- * Proactively clear the MDS_ATTR_ATIME flag in the RPC in this case
- * to avoid zeroing the atime on old unpatched servers. See LU-8041.
- */
- if (rec->sa_atime == 0)
- rec->sa_valid &= ~MDS_ATTR_ATIME;
-
- mdc_ioepoch_pack(epoch, op_data);
- mdc_intent_close_pack(req, op_data);
-}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
deleted file mode 100644
index 695ef44532cf..000000000000
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ /dev/null
@@ -1,1202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_MDC
-
-#include <linux/module.h>
-
-#include <lustre_intent.h>
-#include <obd.h>
-#include <obd_class.h>
-#include <lustre_dlm.h>
-#include <lustre_fid.h>
-#include <lustre_mdc.h>
-#include <lustre_net.h>
-#include <lustre_req_layout.h>
-#include <lustre_swab.h>
-
-#include "mdc_internal.h"
-
-struct mdc_getattr_args {
- struct obd_export *ga_exp;
- struct md_enqueue_info *ga_minfo;
-};
-
-int it_open_error(int phase, struct lookup_intent *it)
-{
- if (it_disposition(it, DISP_OPEN_LEASE)) {
- if (phase >= DISP_OPEN_LEASE)
- return it->it_status;
- else
- return 0;
- }
- if (it_disposition(it, DISP_OPEN_OPEN)) {
- if (phase >= DISP_OPEN_OPEN)
- return it->it_status;
- else
- return 0;
- }
-
- if (it_disposition(it, DISP_OPEN_CREATE)) {
- if (phase >= DISP_OPEN_CREATE)
- return it->it_status;
- else
- return 0;
- }
-
- if (it_disposition(it, DISP_LOOKUP_EXECD)) {
- if (phase >= DISP_LOOKUP_EXECD)
- return it->it_status;
- else
- return 0;
- }
-
- if (it_disposition(it, DISP_IT_EXECD)) {
- if (phase >= DISP_IT_EXECD)
- return it->it_status;
- else
- return 0;
- }
- CERROR("it disp: %X, status: %d\n", it->it_disposition,
- it->it_status);
- LBUG();
- return 0;
-}
-EXPORT_SYMBOL(it_open_error);
-
-/* this must be called on a lockh that is known to have a referenced lock */
-int mdc_set_lock_data(struct obd_export *exp, const struct lustre_handle *lockh,
- void *data, __u64 *bits)
-{
- struct ldlm_lock *lock;
- struct inode *new_inode = data;
-
- if (bits)
- *bits = 0;
-
- if (!lustre_handle_is_used(lockh))
- return 0;
-
- lock = ldlm_handle2lock(lockh);
-
- LASSERT(lock);
- lock_res_and_lock(lock);
- if (lock->l_resource->lr_lvb_inode &&
- lock->l_resource->lr_lvb_inode != data) {
- struct inode *old_inode = lock->l_resource->lr_lvb_inode;
-
- LASSERTF(old_inode->i_state & I_FREEING,
- "Found existing inode %p/%lu/%u state %lu in lock: setting data to %p/%lu/%u\n",
- old_inode, old_inode->i_ino, old_inode->i_generation,
- old_inode->i_state, new_inode, new_inode->i_ino,
- new_inode->i_generation);
- }
- lock->l_resource->lr_lvb_inode = new_inode;
- if (bits)
- *bits = lock->l_policy_data.l_inodebits.bits;
-
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
-
- return 0;
-}
-
-enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, enum ldlm_type type,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode,
- struct lustre_handle *lockh)
-{
- struct ldlm_res_id res_id;
- enum ldlm_mode rc;
-
- fid_build_reg_res_name(fid, &res_id);
- /* LU-4405: Clear bits not supported by server */
- policy->l_inodebits.bits &= exp_connect_ibits(exp);
- rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags,
- &res_id, type, policy, mode, lockh, 0);
- return rc;
-}
-
-int mdc_cancel_unused(struct obd_export *exp,
- const struct lu_fid *fid,
- union ldlm_policy_data *policy,
- enum ldlm_mode mode,
- enum ldlm_cancel_flags flags,
- void *opaque)
-{
- struct ldlm_res_id res_id;
- struct obd_device *obd = class_exp2obd(exp);
- int rc;
-
- fid_build_reg_res_name(fid, &res_id);
- rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id,
- policy, mode, flags, opaque);
- return rc;
-}
-
-int mdc_null_inode(struct obd_export *exp,
- const struct lu_fid *fid)
-{
- struct ldlm_res_id res_id;
- struct ldlm_resource *res;
- struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace;
-
- LASSERTF(ns, "no namespace passed\n");
-
- fid_build_reg_res_name(fid, &res_id);
-
- res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if (IS_ERR(res))
- return 0;
-
- lock_res(res);
- res->lr_lvb_inode = NULL;
- unlock_res(res);
-
- ldlm_resource_putref(res);
- return 0;
-}
-
-static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
-{
- /* Don't hold error requests for replay. */
- if (req->rq_replay) {
- spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- spin_unlock(&req->rq_lock);
- }
- if (rc && req->rq_transno != 0) {
- DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
- LBUG();
- }
-}
-
-/* Save a large LOV EA into the request buffer so that it is available
- * for replay. We don't do this in the initial request because the
- * original request doesn't need this buffer (at most it sends just the
- * lov_mds_md) and it is a waste of RAM/bandwidth to send the empty
- * buffer and may also be difficult to allocate and save a very large
- * request buffer for each open. (bug 5707)
- *
- * OOM here may cause recovery failure if lmm is needed (only for the
- * original open if the MDS crashed just when this client also OOM'd)
- * but this is incredibly unlikely, and questionable whether the client
- * could do MDS recovery under OOM anyways...
- */
-static void mdc_realloc_openmsg(struct ptlrpc_request *req,
- struct mdt_body *body)
-{
- int rc;
-
- /* FIXME: remove this explicit offset. */
- rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4,
- body->mbo_eadatasize);
- if (rc) {
- CERROR("Can't enlarge segment %d size to %d\n",
- DLM_INTENT_REC_OFF + 4, body->mbo_eadatasize);
- body->mbo_valid &= ~OBD_MD_FLEASIZE;
- body->mbo_eadatasize = 0;
- }
-}
-
-static struct ptlrpc_request *
-mdc_intent_open_pack(struct obd_export *exp, struct lookup_intent *it,
- struct md_op_data *op_data)
-{
- struct ptlrpc_request *req;
- struct obd_device *obddev = class_exp2obd(exp);
- struct ldlm_intent *lit;
- const void *lmm = op_data->op_data;
- u32 lmmsize = op_data->op_data_size;
- LIST_HEAD(cancels);
- int count = 0;
- int mode;
- int rc;
-
- it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG;
-
- /* XXX: openlock is not cancelled for cross-refs. */
- /* If inode is known, cancel conflicting OPEN locks. */
- if (fid_is_sane(&op_data->op_fid2)) {
- if (it->it_flags & MDS_OPEN_LEASE) { /* try to get lease */
- if (it->it_flags & FMODE_WRITE)
- mode = LCK_EX;
- else
- mode = LCK_PR;
- } else {
- if (it->it_flags & (FMODE_WRITE | MDS_OPEN_TRUNC))
- mode = LCK_CW;
- else if (it->it_flags & __FMODE_EXEC)
- mode = LCK_PR;
- else
- mode = LCK_CR;
- }
- count = mdc_resource_get_unused(exp, &op_data->op_fid2,
- &cancels, mode,
- MDS_INODELOCK_OPEN);
- }
-
- /* If CREATE, cancel parent's UPDATE lock. */
- if (it->it_op & IT_CREAT)
- mode = LCK_EX;
- else
- mode = LCK_CR;
- count += mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, mode,
- MDS_INODELOCK_UPDATE);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_OPEN);
- if (!req) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return ERR_PTR(-ENOMEM);
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
- max(lmmsize, obddev->u.cli.cl_default_mds_easize));
-
- rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
- if (rc < 0) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- spin_lock(&req->rq_lock);
- req->rq_replay = req->rq_import->imp_replayable;
- spin_unlock(&req->rq_lock);
-
- /* pack the intent */
- lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
- lit->opc = (__u64)it->it_op;
-
- /* pack the intended request */
- mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm,
- lmmsize);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- obddev->u.cli.cl_max_mds_easize);
-
- ptlrpc_request_set_replen(req);
- return req;
-}
-
-static struct ptlrpc_request *
-mdc_intent_getxattr_pack(struct obd_export *exp,
- struct lookup_intent *it,
- struct md_op_data *op_data)
-{
- struct ptlrpc_request *req;
- struct ldlm_intent *lit;
- int rc, count = 0;
- u32 maxdata;
- LIST_HEAD(cancels);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_GETXATTR);
- if (!req)
- return ERR_PTR(-ENOMEM);
-
- rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- /* pack the intent */
- lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
- lit->opc = IT_GETXATTR;
-
- maxdata = class_exp2cliimp(exp)->imp_connect_data.ocd_max_easize;
-
- /* pack the intended request */
- mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid, maxdata, -1,
- 0);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, maxdata);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EAVALS, RCL_SERVER, maxdata);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EAVALS_LENS,
- RCL_SERVER, maxdata);
-
- ptlrpc_request_set_replen(req);
-
- return req;
-}
-
-static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
- struct lookup_intent *it,
- struct md_op_data *op_data)
-{
- struct ptlrpc_request *req;
- struct obd_device *obddev = class_exp2obd(exp);
- struct ldlm_intent *lit;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_UNLINK);
- if (!req)
- return ERR_PTR(-ENOMEM);
-
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
-
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- /* pack the intent */
- lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
- lit->opc = (__u64)it->it_op;
-
- /* pack the intended request */
- mdc_unlink_pack(req, op_data);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- obddev->u.cli.cl_default_mds_easize);
- ptlrpc_request_set_replen(req);
- return req;
-}
-
-static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
- struct lookup_intent *it,
- struct md_op_data *op_data)
-{
- struct ptlrpc_request *req;
- struct obd_device *obddev = class_exp2obd(exp);
- u64 valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE |
- OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA |
- OBD_MD_MEA | OBD_MD_FLACL;
- struct ldlm_intent *lit;
- int rc;
- u32 easize;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_GETATTR);
- if (!req)
- return ERR_PTR(-ENOMEM);
-
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
-
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- /* pack the intent */
- lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
- lit->opc = (__u64)it->it_op;
-
- if (obddev->u.cli.cl_default_mds_easize > 0)
- easize = obddev->u.cli.cl_default_mds_easize;
- else
- easize = obddev->u.cli.cl_max_mds_easize;
-
- /* pack the intended request */
- mdc_getattr_pack(req, valid, it->it_flags, op_data, easize);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, easize);
- ptlrpc_request_set_replen(req);
- return req;
-}
-
-static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp,
- struct lookup_intent *it,
- struct md_op_data *unused)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req;
- struct ldlm_intent *lit;
- struct layout_intent *layout;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_LAYOUT);
- if (!req)
- return ERR_PTR(-ENOMEM);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0);
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- /* pack the intent */
- lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
- lit->opc = (__u64)it->it_op;
-
- /* pack the layout intent request */
- layout = req_capsule_client_get(&req->rq_pill, &RMF_LAYOUT_INTENT);
- /* LAYOUT_INTENT_ACCESS is generic, specific operation will be
- * set for replication
- */
- layout->li_opc = LAYOUT_INTENT_ACCESS;
-
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- obd->u.cli.cl_default_mds_easize);
- ptlrpc_request_set_replen(req);
- return req;
-}
-
-static struct ptlrpc_request *
-mdc_enqueue_pack(struct obd_export *exp, int lvb_len)
-{
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
- if (!req)
- return ERR_PTR(-ENOMEM);
-
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
- ptlrpc_request_set_replen(req);
- return req;
-}
-
-static int mdc_finish_enqueue(struct obd_export *exp,
- struct ptlrpc_request *req,
- struct ldlm_enqueue_info *einfo,
- struct lookup_intent *it,
- struct lustre_handle *lockh,
- int rc)
-{
- struct req_capsule *pill = &req->rq_pill;
- struct ldlm_request *lockreq;
- struct ldlm_reply *lockrep;
- struct ldlm_lock *lock;
- void *lvb_data = NULL;
- u32 lvb_len = 0;
-
- LASSERT(rc >= 0);
- /* Similarly, if we're going to replay this request, we don't want to
- * actually get a lock, just perform the intent.
- */
- if (req->rq_transno || req->rq_replay) {
- lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ);
- lockreq->lock_flags |= ldlm_flags_to_wire(LDLM_FL_INTENT_ONLY);
- }
-
- if (rc == ELDLM_LOCK_ABORTED) {
- einfo->ei_mode = 0;
- memset(lockh, 0, sizeof(*lockh));
- rc = 0;
- } else { /* rc = 0 */
- lock = ldlm_handle2lock(lockh);
-
- /* If the server gave us back a different lock mode, we should
- * fix up our variables.
- */
- if (lock->l_req_mode != einfo->ei_mode) {
- ldlm_lock_addref(lockh, lock->l_req_mode);
- ldlm_lock_decref(lockh, einfo->ei_mode);
- einfo->ei_mode = lock->l_req_mode;
- }
- LDLM_LOCK_PUT(lock);
- }
-
- lockrep = req_capsule_server_get(pill, &RMF_DLM_REP);
-
- it->it_disposition = (int)lockrep->lock_policy_res1;
- it->it_status = (int)lockrep->lock_policy_res2;
- it->it_lock_mode = einfo->ei_mode;
- it->it_lock_handle = lockh->cookie;
- it->it_request = req;
-
- /* Technically speaking rq_transno must already be zero if
- * it_status is in error, so the check is a bit redundant
- */
- if ((!req->rq_transno || it->it_status < 0) && req->rq_replay)
- mdc_clear_replay_flag(req, it->it_status);
-
- /* If we're doing an IT_OPEN which did not result in an actual
- * successful open, then we need to remove the bit which saves
- * this request for unconditional replay.
- *
- * It's important that we do this first! Otherwise we might exit the
- * function without doing so, and try to replay a failed create
- * (bug 3440)
- */
- if (it->it_op & IT_OPEN && req->rq_replay &&
- (!it_disposition(it, DISP_OPEN_OPEN) || it->it_status != 0))
- mdc_clear_replay_flag(req, it->it_status);
-
- DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
- it->it_op, it->it_disposition, it->it_status);
-
- /* We know what to expect, so we do any byte flipping required here */
- if (it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR)) {
- struct mdt_body *body;
-
- body = req_capsule_server_get(pill, &RMF_MDT_BODY);
- if (!body) {
- CERROR("Can't swab mdt_body\n");
- return -EPROTO;
- }
-
- if (it_disposition(it, DISP_OPEN_OPEN) &&
- !it_open_error(DISP_OPEN_OPEN, it)) {
- /*
- * If this is a successful OPEN request, we need to set
- * replay handler and data early, so that if replay
- * happens immediately after swabbing below, new reply
- * is swabbed by that handler correctly.
- */
- mdc_set_open_replay_data(NULL, NULL, it);
- }
-
- if ((body->mbo_valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
- void *eadata;
-
- mdc_update_max_ea_from_body(exp, body);
-
- /*
- * The eadata is opaque; just check that it is there.
- * Eventually, obd_unpackmd() will check the contents.
- */
- eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
- body->mbo_eadatasize);
- if (!eadata)
- return -EPROTO;
-
- /* save lvb data and length in case this is for layout
- * lock
- */
- lvb_data = eadata;
- lvb_len = body->mbo_eadatasize;
-
- /*
- * We save the reply LOV EA in case we have to replay a
- * create for recovery. If we didn't allocate a large
- * enough request buffer above we need to reallocate it
- * here to hold the actual LOV EA.
- *
- * To not save LOV EA if request is not going to replay
- * (for example error one).
- */
- if ((it->it_op & IT_OPEN) && req->rq_replay) {
- void *lmm;
-
- if (req_capsule_get_size(pill, &RMF_EADATA,
- RCL_CLIENT) <
- body->mbo_eadatasize)
- mdc_realloc_openmsg(req, body);
- else
- req_capsule_shrink(pill, &RMF_EADATA,
- body->mbo_eadatasize,
- RCL_CLIENT);
-
- req_capsule_set_size(pill, &RMF_EADATA,
- RCL_CLIENT,
- body->mbo_eadatasize);
-
- lmm = req_capsule_client_get(pill, &RMF_EADATA);
- if (lmm)
- memcpy(lmm, eadata, body->mbo_eadatasize);
- }
- }
- } else if (it->it_op & IT_LAYOUT) {
- /* maybe the lock was granted right away and layout
- * is packed into RMF_DLM_LVB of req
- */
- lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER);
- if (lvb_len > 0) {
- lvb_data = req_capsule_server_sized_get(pill,
- &RMF_DLM_LVB,
- lvb_len);
- if (!lvb_data)
- return -EPROTO;
- }
- }
-
- /* fill in stripe data for layout lock */
- lock = ldlm_handle2lock(lockh);
- if (lock && ldlm_has_layout(lock) && lvb_data) {
- void *lmm;
-
- LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d",
- ldlm_it2str(it->it_op), lvb_len);
-
- lmm = kvzalloc(lvb_len, GFP_NOFS);
- if (!lmm) {
- LDLM_LOCK_PUT(lock);
- return -ENOMEM;
- }
- memcpy(lmm, lvb_data, lvb_len);
-
- /* install lvb_data */
- lock_res_and_lock(lock);
- if (!lock->l_lvb_data) {
- lock->l_lvb_type = LVB_T_LAYOUT;
- lock->l_lvb_data = lmm;
- lock->l_lvb_len = lvb_len;
- lmm = NULL;
- }
- unlock_res_and_lock(lock);
- if (lmm)
- kvfree(lmm);
- }
- if (lock)
- LDLM_LOCK_PUT(lock);
-
- return rc;
-}
-
-/* We always reserve enough space in the reply packet for a stripe MD, because
- * we don't know in advance the file type.
- */
-int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- const union ldlm_policy_data *policy,
- struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, u64 extra_lock_flags)
-{
- static const union ldlm_policy_data lookup_policy = {
- .l_inodebits = { MDS_INODELOCK_LOOKUP }
- };
- static const union ldlm_policy_data update_policy = {
- .l_inodebits = { MDS_INODELOCK_UPDATE }
- };
- static const union ldlm_policy_data layout_policy = {
- .l_inodebits = { MDS_INODELOCK_LAYOUT }
- };
- static const union ldlm_policy_data getxattr_policy = {
- .l_inodebits = { MDS_INODELOCK_XATTR }
- };
- struct obd_device *obddev = class_exp2obd(exp);
- struct ptlrpc_request *req = NULL;
- u64 flags, saved_flags = extra_lock_flags;
- struct ldlm_res_id res_id;
- int generation, resends = 0;
- struct ldlm_reply *lockrep;
- enum lvb_type lvb_type = LVB_T_NONE;
- int rc;
-
- LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n",
- einfo->ei_type);
- fid_build_reg_res_name(&op_data->op_fid1, &res_id);
-
- if (it) {
- LASSERT(!policy);
-
- saved_flags |= LDLM_FL_HAS_INTENT;
- if (it->it_op & (IT_UNLINK | IT_GETATTR | IT_READDIR))
- policy = &update_policy;
- else if (it->it_op & IT_LAYOUT)
- policy = &layout_policy;
- else if (it->it_op & (IT_GETXATTR | IT_SETXATTR))
- policy = &getxattr_policy;
- else
- policy = &lookup_policy;
- }
-
- generation = obddev->u.cli.cl_import->imp_generation;
-resend:
- flags = saved_flags;
- if (!it) {
- /* The only way right now is FLOCK. */
- LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n",
- einfo->ei_type);
- res_id.name[3] = LDLM_FLOCK;
- } else if (it->it_op & IT_OPEN) {
- req = mdc_intent_open_pack(exp, it, op_data);
- } else if (it->it_op & IT_UNLINK) {
- req = mdc_intent_unlink_pack(exp, it, op_data);
- } else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) {
- req = mdc_intent_getattr_pack(exp, it, op_data);
- } else if (it->it_op & IT_READDIR) {
- req = mdc_enqueue_pack(exp, 0);
- } else if (it->it_op & IT_LAYOUT) {
- if (!imp_connect_lvb_type(class_exp2cliimp(exp)))
- return -EOPNOTSUPP;
- req = mdc_intent_layout_pack(exp, it, op_data);
- lvb_type = LVB_T_LAYOUT;
- } else if (it->it_op & IT_GETXATTR) {
- req = mdc_intent_getxattr_pack(exp, it, op_data);
- } else {
- LBUG();
- return -EINVAL;
- }
-
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- if (resends) {
- req->rq_generation_set = 1;
- req->rq_import_generation = generation;
- req->rq_sent = ktime_get_real_seconds() + resends;
- }
-
- /* It is important to obtain modify RPC slot first (if applicable), so
- * that threads that are waiting for a modify RPC slot are not polluting
- * our rpcs in flight counter.
- * We do not do flock request limiting, though
- */
- if (it) {
- mdc_get_mod_rpc_slot(req, it);
- rc = obd_get_request_slot(&obddev->u.cli);
- if (rc != 0) {
- mdc_put_mod_rpc_slot(req, it);
- mdc_clear_replay_flag(req, 0);
- ptlrpc_req_finished(req);
- return rc;
- }
- }
-
- rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, policy, &flags, NULL,
- 0, lvb_type, lockh, 0);
- if (!it) {
- /* For flock requests we immediately return without further
- * delay and let caller deal with the rest, since rest of
- * this function metadata processing makes no sense for flock
- * requests anyway. But in case of problem during comms with
- * Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we
- * can not rely on caller and this mainly for F_UNLCKs
- * (explicits or automatically generated by Kernel to clean
- * current FLocks upon exit) that can't be trashed
- */
- if (((rc == -EINTR) || (rc == -ETIMEDOUT)) &&
- (einfo->ei_type == LDLM_FLOCK) &&
- (einfo->ei_mode == LCK_NL))
- goto resend;
- return rc;
- }
-
- obd_put_request_slot(&obddev->u.cli);
- mdc_put_mod_rpc_slot(req, it);
-
- if (rc < 0) {
- CDEBUG(D_INFO, "%s: ldlm_cli_enqueue failed: rc = %d\n",
- obddev->obd_name, rc);
-
- mdc_clear_replay_flag(req, rc);
- ptlrpc_req_finished(req);
- return rc;
- }
-
- lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
-
- lockrep->lock_policy_res2 =
- ptlrpc_status_ntoh(lockrep->lock_policy_res2);
-
- /*
- * Retry infinitely when the server returns -EINPROGRESS for the
- * intent operation, when server returns -EINPROGRESS for acquiring
- * intent lock, we'll retry in after_reply().
- */
- if (it->it_op && (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
- mdc_clear_replay_flag(req, rc);
- ptlrpc_req_finished(req);
- resends++;
-
- CDEBUG(D_HA, "%s: resend:%d op:%d " DFID "/" DFID "\n",
- obddev->obd_name, resends, it->it_op,
- PFID(&op_data->op_fid1), PFID(&op_data->op_fid2));
-
- if (generation == obddev->u.cli.cl_import->imp_generation) {
- goto resend;
- } else {
- CDEBUG(D_HA, "resend cross eviction\n");
- return -EIO;
- }
- }
-
- rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc);
- if (rc < 0) {
- if (lustre_handle_is_used(lockh)) {
- ldlm_lock_decref(lockh, einfo->ei_mode);
- memset(lockh, 0, sizeof(*lockh));
- }
- ptlrpc_req_finished(req);
-
- it->it_lock_handle = 0;
- it->it_lock_mode = 0;
- it->it_request = NULL;
- }
-
- return rc;
-}
-
-static int mdc_finish_intent_lock(struct obd_export *exp,
- struct ptlrpc_request *request,
- struct md_op_data *op_data,
- struct lookup_intent *it,
- struct lustre_handle *lockh)
-{
- struct lustre_handle old_lock;
- struct mdt_body *mdt_body;
- struct ldlm_lock *lock;
- int rc;
-
- LASSERT(request != LP_POISON);
- LASSERT(request->rq_repmsg != LP_POISON);
-
- if (it->it_op & IT_READDIR)
- return 0;
-
- if (!it_disposition(it, DISP_IT_EXECD)) {
- /* The server failed before it even started executing the
- * intent, i.e. because it couldn't unpack the request.
- */
- LASSERT(it->it_status != 0);
- return it->it_status;
- }
- rc = it_open_error(DISP_IT_EXECD, it);
- if (rc)
- return rc;
-
- mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
- LASSERT(mdt_body); /* mdc_enqueue checked */
-
- rc = it_open_error(DISP_LOOKUP_EXECD, it);
- if (rc)
- return rc;
-
- /* keep requests around for the multiple phases of the call
- * this shows the DISP_XX must guarantee we make it into the call
- */
- if (!it_disposition(it, DISP_ENQ_CREATE_REF) &&
- it_disposition(it, DISP_OPEN_CREATE) &&
- !it_open_error(DISP_OPEN_CREATE, it)) {
- it_set_disposition(it, DISP_ENQ_CREATE_REF);
- ptlrpc_request_addref(request); /* balanced in ll_create_node */
- }
- if (!it_disposition(it, DISP_ENQ_OPEN_REF) &&
- it_disposition(it, DISP_OPEN_OPEN) &&
- !it_open_error(DISP_OPEN_OPEN, it)) {
- it_set_disposition(it, DISP_ENQ_OPEN_REF);
- ptlrpc_request_addref(request); /* balanced in ll_file_open */
- /* BUG 11546 - eviction in the middle of open rpc processing */
- OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_ENQUEUE_PAUSE, obd_timeout);
- }
-
- if (it->it_op & IT_CREAT)
- /* XXX this belongs in ll_create_it */
- ;
- else if (it->it_op == IT_OPEN)
- LASSERT(!it_disposition(it, DISP_OPEN_CREATE));
- else
- LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP | IT_LAYOUT));
-
- /* If we already have a matching lock, then cancel the new
- * one. We have to set the data here instead of in
- * mdc_enqueue, because we need to use the child's inode as
- * the l_ast_data to match, and that's not available until
- * intent_finish has performed the iget().)
- */
- lock = ldlm_handle2lock(lockh);
- if (lock) {
- union ldlm_policy_data policy = lock->l_policy_data;
-
- LDLM_DEBUG(lock, "matching against this");
-
- LASSERTF(fid_res_name_eq(&mdt_body->mbo_fid1,
- &lock->l_resource->lr_name),
- "Lock res_id: " DLDLMRES ", fid: " DFID "\n",
- PLDLMRES(lock->l_resource), PFID(&mdt_body->mbo_fid1));
- LDLM_LOCK_PUT(lock);
-
- memcpy(&old_lock, lockh, sizeof(*lockh));
- if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL,
- LDLM_IBITS, &policy, LCK_NL,
- &old_lock, 0)) {
- ldlm_lock_decref_and_cancel(lockh,
- it->it_lock_mode);
- memcpy(lockh, &old_lock, sizeof(old_lock));
- it->it_lock_handle = lockh->cookie;
- }
- }
- CDEBUG(D_DENTRY,
- "D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
- (int)op_data->op_namelen, op_data->op_name,
- ldlm_it2str(it->it_op), it->it_status, it->it_disposition, rc);
- return rc;
-}
-
-int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
- struct lu_fid *fid, __u64 *bits)
-{
- /* We could just return 1 immediately, but since we should only
- * be called in revalidate_it if we already have a lock, let's
- * verify that.
- */
- struct ldlm_res_id res_id;
- struct lustre_handle lockh;
- union ldlm_policy_data policy;
- enum ldlm_mode mode;
-
- if (it->it_lock_handle) {
- lockh.cookie = it->it_lock_handle;
- mode = ldlm_revalidate_lock_handle(&lockh, bits);
- } else {
- fid_build_reg_res_name(fid, &res_id);
- switch (it->it_op) {
- case IT_GETATTR:
- /* File attributes are held under multiple bits:
- * nlink is under lookup lock, size and times are
- * under UPDATE lock and recently we've also got
- * a separate permissions lock for owner/group/acl that
- * were protected by lookup lock before.
- * Getattr must provide all of that information,
- * so we need to ensure we have all of those locks.
- * Unfortunately, if the bits are split across multiple
- * locks, there's no easy way to match all of them here,
- * so an extra RPC would be performed to fetch all
- * of those bits at once for now.
- */
- /* For new MDTs(> 2.4), UPDATE|PERM should be enough,
- * but for old MDTs (< 2.4), permission is covered
- * by LOOKUP lock, so it needs to match all bits here.
- */
- policy.l_inodebits.bits = MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LOOKUP |
- MDS_INODELOCK_PERM;
- break;
- case IT_READDIR:
- policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
- break;
- case IT_LAYOUT:
- policy.l_inodebits.bits = MDS_INODELOCK_LAYOUT;
- break;
- default:
- policy.l_inodebits.bits = MDS_INODELOCK_LOOKUP;
- break;
- }
-
- mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED, fid,
- LDLM_IBITS, &policy,
- LCK_CR | LCK_CW | LCK_PR | LCK_PW,
- &lockh);
- }
-
- if (mode) {
- it->it_lock_handle = lockh.cookie;
- it->it_lock_mode = mode;
- } else {
- it->it_lock_handle = 0;
- it->it_lock_mode = 0;
- }
-
- return !!mode;
-}
-
-/*
- * This long block is all about fixing up the lock and request state
- * so that it is correct as of the moment _before_ the operation was
- * applied; that way, the VFS will think that everything is normal and
- * call Lustre's regular VFS methods.
- *
- * If we're performing a creation, that means that unless the creation
- * failed with EEXIST, we should fake up a negative dentry.
- *
- * For everything else, we want the lookup to succeed.
- *
- * One additional note: if CREATE or OPEN succeeded, we add an extra
- * reference to the request because we need to keep it around until
- * ll_create/ll_open gets called.
- *
- * The server will return to us, in it_disposition, an indication of
- * exactly what it_status refers to.
- *
- * If DISP_OPEN_OPEN is set, then it_status refers to the open() call,
- * otherwise if DISP_OPEN_CREATE is set, then it_status is the
- * creation failure mode. In either case, one of DISP_LOOKUP_NEG or
- * DISP_LOOKUP_POS will be set, indicating whether the child lookup
- * was successful.
- *
- * Else, if DISP_LOOKUP_EXECD then it_status is the rc of the
- * child lookup.
- */
-int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
- struct lookup_intent *it, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking, __u64 extra_lock_flags)
-{
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_IBITS,
- .ei_mode = it_to_lock_mode(it),
- .ei_cb_bl = cb_blocking,
- .ei_cb_cp = ldlm_completion_ast,
- };
- struct lustre_handle lockh;
- int rc = 0;
-
- LASSERT(it);
-
- CDEBUG(D_DLMTRACE, "(name: %.*s," DFID ") in obj " DFID
- ", intent: %s flags %#Lo\n", (int)op_data->op_namelen,
- op_data->op_name, PFID(&op_data->op_fid2),
- PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
- it->it_flags);
-
- lockh.cookie = 0;
- if (fid_is_sane(&op_data->op_fid2) &&
- (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_READDIR))) {
- /* We could just return 1 immediately, but since we should only
- * be called in revalidate_it if we already have a lock, let's
- * verify that.
- */
- it->it_lock_handle = 0;
- rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL);
- /* Only return failure if it was not GETATTR by cfid
- * (from inode_revalidate)
- */
- if (rc || op_data->op_namelen != 0)
- return rc;
- }
-
- /* For case if upper layer did not alloc fid, do it now. */
- if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
- rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
- if (rc < 0) {
- CERROR("Can't alloc new fid, rc %d\n", rc);
- return rc;
- }
- }
- rc = mdc_enqueue(exp, &einfo, NULL, it, op_data, &lockh,
- extra_lock_flags);
- if (rc < 0)
- return rc;
-
- *reqp = it->it_request;
- rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh);
- return rc;
-}
-
-static int mdc_intent_getattr_async_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *args, int rc)
-{
- struct mdc_getattr_args *ga = args;
- struct obd_export *exp = ga->ga_exp;
- struct md_enqueue_info *minfo = ga->ga_minfo;
- struct ldlm_enqueue_info *einfo = &minfo->mi_einfo;
- struct lookup_intent *it;
- struct lustre_handle *lockh;
- struct obd_device *obddev;
- struct ldlm_reply *lockrep;
- __u64 flags = LDLM_FL_HAS_INTENT;
-
- it = &minfo->mi_it;
- lockh = &minfo->mi_lockh;
-
- obddev = class_exp2obd(exp);
-
- obd_put_request_slot(&obddev->u.cli);
- if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GETATTR_ENQUEUE))
- rc = -ETIMEDOUT;
-
- rc = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, 1, einfo->ei_mode,
- &flags, NULL, 0, lockh, rc);
- if (rc < 0) {
- CERROR("ldlm_cli_enqueue_fini: %d\n", rc);
- mdc_clear_replay_flag(req, rc);
- goto out;
- }
-
- lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
-
- lockrep->lock_policy_res2 =
- ptlrpc_status_ntoh(lockrep->lock_policy_res2);
-
- rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc);
- if (rc)
- goto out;
-
- rc = mdc_finish_intent_lock(exp, req, &minfo->mi_data, it, lockh);
-
-out:
- minfo->mi_cb(req, minfo, rc);
- return 0;
-}
-
-int mdc_intent_getattr_async(struct obd_export *exp,
- struct md_enqueue_info *minfo)
-{
- struct md_op_data *op_data = &minfo->mi_data;
- struct lookup_intent *it = &minfo->mi_it;
- struct ptlrpc_request *req;
- struct mdc_getattr_args *ga;
- struct obd_device *obddev = class_exp2obd(exp);
- struct ldlm_res_id res_id;
- union ldlm_policy_data policy = {
- .l_inodebits = { MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE }
- };
- int rc = 0;
- __u64 flags = LDLM_FL_HAS_INTENT;
-
- CDEBUG(D_DLMTRACE,
- "name: %.*s in inode " DFID ", intent: %s flags %#Lo\n",
- (int)op_data->op_namelen, op_data->op_name,
- PFID(&op_data->op_fid1), ldlm_it2str(it->it_op), it->it_flags);
-
- fid_build_reg_res_name(&op_data->op_fid1, &res_id);
- req = mdc_intent_getattr_pack(exp, it, op_data);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- rc = obd_get_request_slot(&obddev->u.cli);
- if (rc != 0) {
- ptlrpc_req_finished(req);
- return rc;
- }
-
- rc = ldlm_cli_enqueue(exp, &req, &minfo->mi_einfo, &res_id, &policy,
- &flags, NULL, 0, LVB_T_NONE, &minfo->mi_lockh, 1);
- if (rc < 0) {
- obd_put_request_slot(&obddev->u.cli);
- ptlrpc_req_finished(req);
- return rc;
- }
-
- BUILD_BUG_ON(sizeof(*ga) > sizeof(req->rq_async_args));
- ga = ptlrpc_req_async_args(req);
- ga->ga_exp = exp;
- ga->ga_minfo = minfo;
-
- req->rq_interpret_reply = mdc_intent_getattr_async_interpret;
- ptlrpcd_add_req(req);
-
- return 0;
-}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
deleted file mode 100644
index 488b98007558..000000000000
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ /dev/null
@@ -1,419 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_MDC
-
-# include <linux/module.h>
-# include <linux/kernel.h>
-
-#include <obd_class.h>
-#include "mdc_internal.h"
-#include <lustre_fid.h>
-
-/* mdc_setattr does its own semaphore handling */
-static int mdc_reint(struct ptlrpc_request *request, int level)
-{
- int rc;
-
- request->rq_send_state = level;
-
- mdc_get_mod_rpc_slot(request, NULL);
- rc = ptlrpc_queue_wait(request);
- mdc_put_mod_rpc_slot(request, NULL);
- if (rc)
- CDEBUG(D_INFO, "error in handling %d\n", rc);
- else if (!req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY))
- rc = -EPROTO;
-
- return rc;
-}
-
-/* Find and cancel locally locks matched by inode @bits & @mode in the resource
- * found by @fid. Found locks are added into @cancel list. Returns the amount of
- * locks added to @cancels list.
- */
-int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
- struct list_head *cancels, enum ldlm_mode mode,
- __u64 bits)
-{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- union ldlm_policy_data policy = {};
- struct ldlm_res_id res_id;
- struct ldlm_resource *res;
- int count;
-
- /* Return, i.e. cancel nothing, only if ELC is supported (flag in
- * export) but disabled through procfs (flag in NS).
- *
- * This distinguishes from a case when ELC is not supported originally,
- * when we still want to cancel locks in advance and just cancel them
- * locally, without sending any RPC.
- */
- if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
- return 0;
-
- fid_build_reg_res_name(fid, &res_id);
- res = ldlm_resource_get(exp->exp_obd->obd_namespace,
- NULL, &res_id, 0, 0);
- if (IS_ERR(res))
- return 0;
- LDLM_RESOURCE_ADDREF(res);
- /* Initialize ibits lock policy. */
- policy.l_inodebits.bits = bits;
- count = ldlm_cancel_resource_local(res, cancels, &policy,
- mode, 0, 0, NULL);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- return count;
-}
-
-int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen, struct ptlrpc_request **request)
-{
- LIST_HEAD(cancels);
- struct ptlrpc_request *req;
- int count = 0, rc;
- __u64 bits;
-
- bits = MDS_INODELOCK_UPDATE;
- if (op_data->op_attr.ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
- bits |= MDS_INODELOCK_LOOKUP;
- if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)))
- count = mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_EX, bits);
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_REINT_SETATTR);
- if (!req) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, 0);
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 0);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
- CDEBUG(D_INODE, "setting mtime %ld, ctime %ld\n",
- LTIME_S(op_data->op_attr.ia_mtime),
- LTIME_S(op_data->op_attr.ia_ctime));
- mdc_setattr_pack(req, op_data, ea, ealen);
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_reint(req, LUSTRE_IMP_FULL);
-
- if (rc == -ERESTARTSYS)
- rc = 0;
-
- *request = req;
-
- return rc;
-}
-
-int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, size_t datalen, umode_t mode,
- uid_t uid, gid_t gid, cfs_cap_t cap_effective,
- __u64 rdev, struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int level, rc;
- int count, resends = 0;
- struct obd_import *import = exp->exp_obd->u.cli.cl_import;
- int generation = import->imp_generation;
- LIST_HEAD(cancels);
-
- /* For case if upper layer did not alloc fid, do it now. */
- if (!fid_is_sane(&op_data->op_fid2)) {
- /*
- * mdc_fid_alloc() may return errno 1 in case of switch to new
- * sequence, handle this.
- */
- rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
- if (rc < 0)
- return rc;
- }
-
-rebuild:
- count = 0;
- if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)))
- count = mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_REINT_CREATE_ACL);
- if (!req) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
- data && datalen ? datalen : 0);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- /*
- * mdc_create_pack() fills msg->bufs[1] with name and msg->bufs[2] with
- * tgt, for symlinks or lov MD data.
- */
- mdc_create_pack(req, op_data, data, datalen, mode, uid,
- gid, cap_effective, rdev);
-
- ptlrpc_request_set_replen(req);
-
- /* ask ptlrpc not to resend on EINPROGRESS since we have our own retry
- * logic here
- */
- req->rq_no_retry_einprogress = 1;
-
- if (resends) {
- req->rq_generation_set = 1;
- req->rq_import_generation = generation;
- req->rq_sent = ktime_get_real_seconds() + resends;
- }
- level = LUSTRE_IMP_FULL;
- resend:
- rc = mdc_reint(req, level);
-
- /* Resend if we were told to. */
- if (rc == -ERESTARTSYS) {
- level = LUSTRE_IMP_RECOVER;
- goto resend;
- } else if (rc == -EINPROGRESS) {
- /* Retry create infinitely until succeed or get other
- * error code.
- */
- ptlrpc_req_finished(req);
- resends++;
-
- CDEBUG(D_HA, "%s: resend:%d create on " DFID "/" DFID "\n",
- exp->exp_obd->obd_name, resends,
- PFID(&op_data->op_fid1), PFID(&op_data->op_fid2));
-
- if (generation == import->imp_generation) {
- goto rebuild;
- } else {
- CDEBUG(D_HA, "resend cross eviction\n");
- return -EIO;
- }
- }
-
- *request = req;
- return rc;
-}
-
-int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- LIST_HEAD(cancels);
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req = *request;
- int count = 0, rc;
-
- LASSERT(!req);
-
- if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)))
- count = mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
- if ((op_data->op_flags & MF_MDC_CANCEL_FID3) &&
- (fid_is_sane(&op_data->op_fid3)))
- count += mdc_resource_get_unused(exp, &op_data->op_fid3,
- &cancels, LCK_EX,
- MDS_INODELOCK_FULL);
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_REINT_UNLINK);
- if (!req) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_unlink_pack(req, op_data);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- obd->u.cli.cl_default_mds_easize);
- ptlrpc_request_set_replen(req);
-
- *request = req;
-
- rc = mdc_reint(req, LUSTRE_IMP_FULL);
- if (rc == -ERESTARTSYS)
- rc = 0;
- return rc;
-}
-
-int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- LIST_HEAD(cancels);
- struct ptlrpc_request *req;
- int count = 0, rc;
-
- if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
- (fid_is_sane(&op_data->op_fid2)))
- count = mdc_resource_get_unused(exp, &op_data->op_fid2,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
- if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)))
- count += mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK);
- if (!req) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_link_pack(req, op_data);
- ptlrpc_request_set_replen(req);
-
- rc = mdc_reint(req, LUSTRE_IMP_FULL);
- *request = req;
- if (rc == -ERESTARTSYS)
- rc = 0;
-
- return rc;
-}
-
-int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, size_t oldlen, const char *new, size_t newlen,
- struct ptlrpc_request **request)
-{
- LIST_HEAD(cancels);
- struct obd_device *obd = exp->exp_obd;
- struct ptlrpc_request *req;
- int count = 0, rc;
-
- if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)))
- count = mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
- if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
- (fid_is_sane(&op_data->op_fid2)))
- count += mdc_resource_get_unused(exp, &op_data->op_fid2,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
- if ((op_data->op_flags & MF_MDC_CANCEL_FID3) &&
- (fid_is_sane(&op_data->op_fid3)))
- count += mdc_resource_get_unused(exp, &op_data->op_fid3,
- &cancels, LCK_EX,
- MDS_INODELOCK_LOOKUP);
- if ((op_data->op_flags & MF_MDC_CANCEL_FID4) &&
- (fid_is_sane(&op_data->op_fid4)))
- count += mdc_resource_get_unused(exp, &op_data->op_fid4,
- &cancels, LCK_EX,
- MDS_INODELOCK_FULL);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- op_data->op_cli_flags & CLI_MIGRATE ?
- &RQF_MDS_REINT_MIGRATE : &RQF_MDS_REINT_RENAME);
- if (!req) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, oldlen + 1);
- req_capsule_set_size(&req->rq_pill, &RMF_SYMTGT, RCL_CLIENT,
- newlen + 1);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- if (op_data->op_cli_flags & CLI_MIGRATE && op_data->op_data) {
- struct md_open_data *mod = op_data->op_data;
-
- LASSERTF(mod->mod_open_req &&
- mod->mod_open_req->rq_type != LI_POISON,
- "POISONED open %p!\n", mod->mod_open_req);
-
- DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
- /*
- * We no longer want to preserve this open for replay even
- * though the open was committed. b=3632, b=3633
- */
- spin_lock(&mod->mod_open_req->rq_lock);
- mod->mod_open_req->rq_replay = 0;
- spin_unlock(&mod->mod_open_req->rq_lock);
- }
-
- if (exp_connect_cancelset(exp) && req)
- ldlm_cli_cancel_list(&cancels, count, req, 0);
-
- mdc_rename_pack(req, op_data, old, oldlen, new, newlen);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- obd->u.cli.cl_default_mds_easize);
- ptlrpc_request_set_replen(req);
-
- rc = mdc_reint(req, LUSTRE_IMP_FULL);
- *request = req;
- if (rc == -ERESTARTSYS)
- rc = 0;
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
deleted file mode 100644
index 8ee7b4d273b2..000000000000
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ /dev/null
@@ -1,2754 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_MDC
-
-# include <linux/module.h>
-# include <linux/pagemap.h>
-# include <linux/miscdevice.h>
-# include <linux/init.h>
-# include <linux/utsname.h>
-
-#include <lustre_errno.h>
-#include <cl_object.h>
-#include <llog_swab.h>
-#include <lprocfs_status.h>
-#include <lustre_acl.h>
-#include <lustre_fid.h>
-#include <uapi/linux/lustre/lustre_ioctl.h>
-#include <lustre_kernelcomm.h>
-#include <lustre_lmv.h>
-#include <lustre_log.h>
-#include <uapi/linux/lustre/lustre_param.h>
-#include <lustre_swab.h>
-#include <obd_class.h>
-
-#include "mdc_internal.h"
-
-#define REQUEST_MINOR 244
-
-static int mdc_cleanup(struct obd_device *obd);
-
-static inline int mdc_queue_wait(struct ptlrpc_request *req)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- int rc;
-
- /* obd_get_request_slot() ensures that this client has no more
- * than cl_max_rpcs_in_flight RPCs simultaneously inf light
- * against an MDT.
- */
- rc = obd_get_request_slot(cli);
- if (rc != 0)
- return rc;
-
- rc = ptlrpc_queue_wait(req);
- obd_put_request_slot(cli);
-
- return rc;
-}
-
-static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid)
-{
- struct ptlrpc_request *req;
- struct mdt_body *body;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MDS_GETSTATUS,
- LUSTRE_MDS_VERSION, MDS_GETSTATUS);
- if (!req)
- return -ENOMEM;
-
- mdc_pack_body(req, NULL, 0, 0, -1, 0);
- req->rq_send_state = LUSTRE_IMP_FULL;
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (!body) {
- rc = -EPROTO;
- goto out;
- }
-
- *rootfid = body->mbo_fid1;
- CDEBUG(D_NET,
- "root fid=" DFID ", last_committed=%llu\n",
- PFID(rootfid),
- lustre_msg_get_last_committed(req->rq_repmsg));
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-/*
- * This function now is known to always saying that it will receive 4 buffers
- * from server. Even for cases when acl_size and md_size is zero, RPC header
- * will contain 4 fields and RPC itself will contain zero size fields. This is
- * because mdt_getattr*() _always_ returns 4 fields, but if acl is not needed
- * and thus zero, it shrinks it, making zero size. The same story about
- * md_size. And this is course of problem when client waits for smaller number
- * of fields. This issue will be fixed later when client gets aware of RPC
- * layouts. --umka
- */
-static int mdc_getattr_common(struct obd_export *exp,
- struct ptlrpc_request *req)
-{
- struct req_capsule *pill = &req->rq_pill;
- struct mdt_body *body;
- void *eadata;
- int rc;
-
- /* Request message already built. */
- rc = ptlrpc_queue_wait(req);
- if (rc != 0)
- return rc;
-
- /* sanity check for the reply */
- body = req_capsule_server_get(pill, &RMF_MDT_BODY);
- if (!body)
- return -EPROTO;
-
- CDEBUG(D_NET, "mode: %o\n", body->mbo_mode);
-
- mdc_update_max_ea_from_body(exp, body);
- if (body->mbo_eadatasize != 0) {
- eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
- body->mbo_eadatasize);
- if (!eadata)
- return -EPROTO;
- }
-
- return 0;
-}
-
-static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int rc;
-
- /* Single MDS without an LMV case */
- if (op_data->op_flags & MF_GET_MDT_IDX) {
- op_data->op_mds = 0;
- return 0;
- }
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid,
- op_data->op_mode, -1, 0);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- op_data->op_mode);
- ptlrpc_request_set_replen(req);
-
- rc = mdc_getattr_common(exp, req);
- if (rc)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
-static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int rc;
-
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_GETATTR_NAME);
- if (!req)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR_NAME);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid,
- op_data->op_mode, op_data->op_suppgids[0], 0);
-
- if (op_data->op_name) {
- char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
-
- LASSERT(strnlen(op_data->op_name, op_data->op_namelen) ==
- op_data->op_namelen);
- memcpy(name, op_data->op_name, op_data->op_namelen);
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- op_data->op_mode);
- ptlrpc_request_set_replen(req);
-
- rc = mdc_getattr_common(exp, req);
- if (rc)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
-static int mdc_xattr_common(struct obd_export *exp,
- const struct req_format *fmt,
- const struct lu_fid *fid,
- int opcode, u64 valid,
- const char *xattr_name, const char *input,
- int input_size, int output_size, int flags,
- __u32 suppgid, struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int xattr_namelen = 0;
- char *tmp;
- int rc;
-
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
- if (!req)
- return -ENOMEM;
-
- if (xattr_name) {
- xattr_namelen = strlen(xattr_name) + 1;
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- xattr_namelen);
- }
- if (input_size) {
- LASSERT(input);
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
- input_size);
- }
-
- /* Flush local XATTR locks to get rid of a possible cancel RPC */
- if (opcode == MDS_REINT && fid_is_sane(fid) &&
- exp->exp_connect_data.ocd_ibits_known & MDS_INODELOCK_XATTR) {
- LIST_HEAD(cancels);
- int count;
-
- /* Without that packing would fail */
- if (input_size == 0)
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
- RCL_CLIENT, 0);
-
- count = mdc_resource_get_unused(exp, fid,
- &cancels, LCK_EX,
- MDS_INODELOCK_XATTR);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- } else {
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- }
-
- if (opcode == MDS_REINT) {
- struct mdt_rec_setxattr *rec;
-
- BUILD_BUG_ON(sizeof(struct mdt_rec_setxattr) !=
- sizeof(struct mdt_rec_reint));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
- rec->sx_opcode = REINT_SETXATTR;
- rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid());
- rec->sx_fsgid = from_kgid(&init_user_ns, current_fsgid());
- rec->sx_cap = cfs_curproc_cap_pack();
- rec->sx_suppgid1 = suppgid;
- rec->sx_suppgid2 = -1;
- rec->sx_fid = *fid;
- rec->sx_valid = valid | OBD_MD_FLCTIME;
- rec->sx_time = ktime_get_real_seconds();
- rec->sx_size = output_size;
- rec->sx_flags = flags;
-
- } else {
- mdc_pack_body(req, fid, valid, output_size, suppgid, flags);
- }
-
- if (xattr_name) {
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- memcpy(tmp, xattr_name, xattr_namelen);
- }
- if (input_size) {
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
- memcpy(tmp, input, input_size);
- }
-
- if (req_capsule_has_field(&req->rq_pill, &RMF_EADATA, RCL_SERVER))
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
- RCL_SERVER, output_size);
- ptlrpc_request_set_replen(req);
-
- /* make rpc */
- if (opcode == MDS_REINT)
- mdc_get_mod_rpc_slot(req, NULL);
-
- rc = ptlrpc_queue_wait(req);
-
- if (opcode == MDS_REINT)
- mdc_put_mod_rpc_slot(req, NULL);
-
- if (rc)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
-static int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *xattr_name,
- const char *input, int input_size, int output_size,
- int flags, __u32 suppgid,
- struct ptlrpc_request **request)
-{
- return mdc_xattr_common(exp, &RQF_MDS_REINT_SETXATTR,
- fid, MDS_REINT, valid, xattr_name,
- input, input_size, output_size, flags,
- suppgid, request);
-}
-
-static int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *xattr_name,
- const char *input, int input_size, int output_size,
- int flags, struct ptlrpc_request **request)
-{
- return mdc_xattr_common(exp, &RQF_MDS_GETXATTR,
- fid, MDS_GETXATTR, valid, xattr_name,
- input, input_size, output_size, flags,
- -1, request);
-}
-
-#ifdef CONFIG_FS_POSIX_ACL
-static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md)
-{
- struct req_capsule *pill = &req->rq_pill;
- struct mdt_body *body = md->body;
- struct posix_acl *acl;
- void *buf;
- int rc;
-
- if (!body->mbo_aclsize)
- return 0;
-
- buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->mbo_aclsize);
-
- if (!buf)
- return -EPROTO;
-
- acl = posix_acl_from_xattr(&init_user_ns, buf, body->mbo_aclsize);
- if (!acl)
- return 0;
-
- if (IS_ERR(acl)) {
- rc = PTR_ERR(acl);
- CERROR("convert xattr to acl: %d\n", rc);
- return rc;
- }
-
- rc = posix_acl_valid(&init_user_ns, acl);
- if (rc) {
- CERROR("validate acl: %d\n", rc);
- posix_acl_release(acl);
- return rc;
- }
-
- md->posix_acl = acl;
- return 0;
-}
-#else
-#define mdc_unpack_acl(req, md) 0
-#endif
-
-static int mdc_get_lustre_md(struct obd_export *exp,
- struct ptlrpc_request *req,
- struct obd_export *dt_exp,
- struct obd_export *md_exp,
- struct lustre_md *md)
-{
- struct req_capsule *pill = &req->rq_pill;
- int rc;
-
- LASSERT(md);
- memset(md, 0, sizeof(*md));
-
- md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
-
- if (md->body->mbo_valid & OBD_MD_FLEASIZE) {
- if (!S_ISREG(md->body->mbo_mode)) {
- CDEBUG(D_INFO,
- "OBD_MD_FLEASIZE set, should be a regular file, but is not\n");
- rc = -EPROTO;
- goto out;
- }
-
- if (md->body->mbo_eadatasize == 0) {
- CDEBUG(D_INFO,
- "OBD_MD_FLEASIZE set, but eadatasize 0\n");
- rc = -EPROTO;
- goto out;
- }
-
- md->layout.lb_len = md->body->mbo_eadatasize;
- md->layout.lb_buf = req_capsule_server_sized_get(pill,
- &RMF_MDT_MD,
- md->layout.lb_len);
- if (!md->layout.lb_buf) {
- rc = -EPROTO;
- goto out;
- }
- } else if (md->body->mbo_valid & OBD_MD_FLDIREA) {
- const union lmv_mds_md *lmv;
- size_t lmv_size;
-
- if (!S_ISDIR(md->body->mbo_mode)) {
- CDEBUG(D_INFO,
- "OBD_MD_FLDIREA set, should be a directory, but is not\n");
- rc = -EPROTO;
- goto out;
- }
-
- lmv_size = md->body->mbo_eadatasize;
- if (!lmv_size) {
- CDEBUG(D_INFO,
- "OBD_MD_FLDIREA is set, but eadatasize 0\n");
- return -EPROTO;
- }
- if (md->body->mbo_valid & OBD_MD_MEA) {
- lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
- lmv_size);
- if (!lmv) {
- rc = -EPROTO;
- goto out;
- }
-
- rc = md_unpackmd(md_exp, &md->lmv, lmv, lmv_size);
- if (rc < 0)
- goto out;
-
- if (rc < (typeof(rc))sizeof(*md->lmv)) {
- CDEBUG(D_INFO,
- "size too small: rc < sizeof(*md->lmv) (%d < %d)\n",
- rc, (int)sizeof(*md->lmv));
- rc = -EPROTO;
- goto out;
- }
- }
- }
- rc = 0;
-
- if (md->body->mbo_valid & OBD_MD_FLACL) {
- /* for ACL, it's possible that FLACL is set but aclsize is zero.
- * only when aclsize != 0 there's an actual segment for ACL
- * in reply buffer.
- */
- if (md->body->mbo_aclsize) {
- rc = mdc_unpack_acl(req, md);
- if (rc)
- goto out;
-#ifdef CONFIG_FS_POSIX_ACL
- } else {
- md->posix_acl = NULL;
-#endif
- }
- }
-
-out:
- if (rc) {
-#ifdef CONFIG_FS_POSIX_ACL
- posix_acl_release(md->posix_acl);
-#endif
- }
- return rc;
-}
-
-static int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
-{
- return 0;
-}
-
-void mdc_replay_open(struct ptlrpc_request *req)
-{
- struct md_open_data *mod = req->rq_cb_data;
- struct ptlrpc_request *close_req;
- struct obd_client_handle *och;
- struct lustre_handle old;
- struct mdt_body *body;
-
- if (!mod) {
- DEBUG_REQ(D_ERROR, req,
- "Can't properly replay without open data.");
- return;
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-
- och = mod->mod_och;
- if (och) {
- struct lustre_handle *file_fh;
-
- LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC);
-
- file_fh = &och->och_fh;
- CDEBUG(D_HA, "updating handle from %#llx to %#llx\n",
- file_fh->cookie, body->mbo_handle.cookie);
- old = *file_fh;
- *file_fh = body->mbo_handle;
- }
- close_req = mod->mod_close_req;
- if (close_req) {
- __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
- struct mdt_ioepoch *epoch;
-
- LASSERT(opc == MDS_CLOSE);
- epoch = req_capsule_client_get(&close_req->rq_pill,
- &RMF_MDT_EPOCH);
- LASSERT(epoch);
-
- if (och)
- LASSERT(!memcmp(&old, &epoch->mio_handle, sizeof(old)));
- DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
- epoch->mio_handle = body->mbo_handle;
- }
-}
-
-void mdc_commit_open(struct ptlrpc_request *req)
-{
- struct md_open_data *mod = req->rq_cb_data;
-
- if (!mod)
- return;
-
- /**
- * No need to touch md_open_data::mod_och, it holds a reference on
- * \var mod and will zero references to each other, \var mod will be
- * freed after that when md_open_data::mod_och will put the reference.
- */
-
- /**
- * Do not let open request to disappear as it still may be needed
- * for close rpc to happen (it may happen on evict only, otherwise
- * ptlrpc_request::rq_replay does not let mdc_commit_open() to be
- * called), just mark this rpc as committed to distinguish these 2
- * cases, see mdc_close() for details. The open request reference will
- * be put along with freeing \var mod.
- */
- ptlrpc_request_addref(req);
- spin_lock(&req->rq_lock);
- req->rq_committed = 1;
- spin_unlock(&req->rq_lock);
- req->rq_cb_data = NULL;
- obd_mod_put(mod);
-}
-
-int mdc_set_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och,
- struct lookup_intent *it)
-{
- struct md_open_data *mod;
- struct mdt_rec_create *rec;
- struct mdt_body *body;
- struct ptlrpc_request *open_req = it->it_request;
- struct obd_import *imp = open_req->rq_import;
-
- if (!open_req->rq_replay)
- return 0;
-
- rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT);
- body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
- LASSERT(rec);
- /* Incoming message in my byte order (it's been swabbed). */
- /* Outgoing messages always in my byte order. */
- LASSERT(body);
-
- /* Only if the import is replayable, we set replay_open data */
- if (och && imp->imp_replayable) {
- mod = obd_mod_alloc();
- if (!mod) {
- DEBUG_REQ(D_ERROR, open_req,
- "Can't allocate md_open_data");
- return 0;
- }
-
- /**
- * Take a reference on \var mod, to be freed on mdc_close().
- * It protects \var mod from being freed on eviction (commit
- * callback is called despite rq_replay flag).
- * Another reference for \var och.
- */
- obd_mod_get(mod);
- obd_mod_get(mod);
-
- spin_lock(&open_req->rq_lock);
- och->och_mod = mod;
- mod->mod_och = och;
- mod->mod_is_create = it_disposition(it, DISP_OPEN_CREATE) ||
- it_disposition(it, DISP_OPEN_STRIPE);
- mod->mod_open_req = open_req;
- open_req->rq_cb_data = mod;
- open_req->rq_commit_cb = mdc_commit_open;
- spin_unlock(&open_req->rq_lock);
- }
-
- rec->cr_fid2 = body->mbo_fid1;
- rec->cr_ioepoch = body->mbo_ioepoch;
- rec->cr_old_handle.cookie = body->mbo_handle.cookie;
- open_req->rq_replay_cb = mdc_replay_open;
- if (!fid_is_sane(&body->mbo_fid1)) {
- DEBUG_REQ(D_ERROR, open_req,
- "Saving replay request with insane fid");
- LBUG();
- }
-
- DEBUG_REQ(D_RPCTRACE, open_req, "Set up open replay data");
- return 0;
-}
-
-static void mdc_free_open(struct md_open_data *mod)
-{
- int committed = 0;
-
- if (mod->mod_is_create == 0 &&
- imp_connect_disp_stripe(mod->mod_open_req->rq_import))
- committed = 1;
-
- /*
- * No reason to asssert here if the open request has
- * rq_replay == 1. It means that mdc_close failed, and
- * close request wasn`t sent. It is not fatal to client.
- * The worst thing is eviction if the client gets open lock
- */
- DEBUG_REQ(D_RPCTRACE, mod->mod_open_req,
- "free open request rq_replay = %d\n",
- mod->mod_open_req->rq_replay);
-
- ptlrpc_request_committed(mod->mod_open_req, committed);
- if (mod->mod_close_req)
- ptlrpc_request_committed(mod->mod_close_req, committed);
-}
-
-static int mdc_clear_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och)
-{
- struct md_open_data *mod = och->och_mod;
-
- /**
- * It is possible to not have \var mod in a case of eviction between
- * lookup and ll_file_open().
- **/
- if (!mod)
- return 0;
-
- LASSERT(mod != LP_POISON);
- LASSERT(mod->mod_open_req);
- mdc_free_open(mod);
-
- mod->mod_och = NULL;
- och->och_mod = NULL;
- obd_mod_put(mod);
-
- return 0;
-}
-
-static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
- struct md_open_data *mod, struct ptlrpc_request **request)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req;
- struct req_format *req_fmt;
- int rc;
- int saved_rc = 0;
-
- if (op_data->op_bias & MDS_HSM_RELEASE) {
- req_fmt = &RQF_MDS_INTENT_CLOSE;
-
- /* allocate a FID for volatile file */
- rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
- if (rc < 0) {
- CERROR("%s: " DFID " failed to allocate FID: %d\n",
- obd->obd_name, PFID(&op_data->op_fid1), rc);
- /* save the errcode and proceed to close */
- saved_rc = rc;
- }
- } else if (op_data->op_bias & MDS_CLOSE_LAYOUT_SWAP) {
- req_fmt = &RQF_MDS_INTENT_CLOSE;
- } else {
- req_fmt = &RQF_MDS_CLOSE;
- }
-
- *request = NULL;
- if (OBD_FAIL_CHECK(OBD_FAIL_MDC_CLOSE))
- req = NULL;
- else
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
-
- /* Ensure that this close's handle is fixed up during replay. */
- if (likely(mod)) {
- LASSERTF(mod->mod_open_req &&
- mod->mod_open_req->rq_type != LI_POISON,
- "POISONED open %p!\n", mod->mod_open_req);
-
- mod->mod_close_req = req;
-
- DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
- /* We no longer want to preserve this open for replay even
- * though the open was committed. b=3632, b=3633
- */
- spin_lock(&mod->mod_open_req->rq_lock);
- mod->mod_open_req->rq_replay = 0;
- spin_unlock(&mod->mod_open_req->rq_lock);
- } else {
- CDEBUG(D_HA,
- "couldn't find open req; expecting close error\n");
- }
- if (!req) {
- /*
- * TODO: repeat close after errors
- */
- CWARN("%s: close of FID " DFID " failed, file reference will be dropped when this client unmounts or is evicted\n",
- obd->obd_name, PFID(&op_data->op_fid1));
- rc = -ENOMEM;
- goto out;
- }
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
- if (rc) {
- ptlrpc_request_free(req);
- req = NULL;
- goto out;
- }
-
- /*
- * To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
- * portal whose threads are not taking any DLM locks and are therefore
- * always progressing
- */
- req->rq_request_portal = MDS_READPAGE_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- mdc_close_pack(req, op_data);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- obd->u.cli.cl_default_mds_easize);
-
- ptlrpc_request_set_replen(req);
-
- mdc_get_mod_rpc_slot(req, NULL);
- rc = ptlrpc_queue_wait(req);
- mdc_put_mod_rpc_slot(req, NULL);
-
- if (!req->rq_repmsg) {
- CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req,
- req->rq_status);
- if (rc == 0)
- rc = req->rq_status ?: -EIO;
- } else if (rc == 0 || rc == -EAGAIN) {
- struct mdt_body *body;
-
- rc = lustre_msg_get_status(req->rq_repmsg);
- if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
- DEBUG_REQ(D_ERROR, req,
- "type == PTL_RPC_MSG_ERR, err = %d", rc);
- if (rc > 0)
- rc = -rc;
- }
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (!body)
- rc = -EPROTO;
- } else if (rc == -ESTALE) {
- /**
- * it can be allowed error after 3633 if open was committed and
- * server failed before close was sent. Let's check if mod
- * exists and return no error in that case
- */
- if (mod) {
- DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc);
- if (mod->mod_open_req->rq_committed)
- rc = 0;
- }
- }
-
-out:
- if (mod) {
- if (rc != 0)
- mod->mod_close_req = NULL;
- /* Since now, mod is accessed through open_req only,
- * thus close req does not keep a reference on mod anymore.
- */
- obd_mod_put(mod);
- }
- *request = req;
- return rc < 0 ? rc : saved_rc;
-}
-
-static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
- u64 offset, struct page **pages, int npages,
- struct ptlrpc_request **request)
-{
- struct ptlrpc_bulk_desc *desc;
- struct ptlrpc_request *req;
- wait_queue_head_t waitq;
- int resends = 0;
- int rc;
- int i;
-
- *request = NULL;
- init_waitqueue_head(&waitq);
-
-restart_bulk:
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- req->rq_request_portal = MDS_READPAGE_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- desc = ptlrpc_prep_bulk_imp(req, npages, 1,
- PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
- MDS_BULK_PORTAL,
- &ptlrpc_bulk_kiov_pin_ops);
- if (!desc) {
- ptlrpc_request_free(req);
- return -ENOMEM;
- }
-
- /* NB req now owns desc and will free it when it gets freed */
- for (i = 0; i < npages; i++)
- desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE);
-
- mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid);
-
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc) {
- ptlrpc_req_finished(req);
- if (rc != -ETIMEDOUT)
- return rc;
-
- resends++;
- if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
- CERROR("%s: too many resend retries: rc = %d\n",
- exp->exp_obd->obd_name, -EIO);
- return -EIO;
- }
- wait_event_idle_timeout(waitq, 0, resends * HZ);
-
- goto restart_bulk;
- }
-
- rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
- req->rq_bulk->bd_nob_transferred);
- if (rc < 0) {
- ptlrpc_req_finished(req);
- return rc;
- }
-
- if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
- CERROR("%s: unexpected bytes transferred: %d (%ld expected)\n",
- exp->exp_obd->obd_name, req->rq_bulk->bd_nob_transferred,
- PAGE_SIZE * npages);
- ptlrpc_req_finished(req);
- return -EPROTO;
- }
-
- *request = req;
- return 0;
-}
-
-static void mdc_release_page(struct page *page, int remove)
-{
- if (remove) {
- lock_page(page);
- if (likely(page->mapping))
- truncate_complete_page(page->mapping, page);
- unlock_page(page);
- }
- put_page(page);
-}
-
-static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash,
- __u64 *start, __u64 *end, int hash64)
-{
- /*
- * Complement of hash is used as an index so that
- * radix_tree_gang_lookup() can be used to find a page with starting
- * hash _smaller_ than one we are looking for.
- */
- unsigned long offset = hash_x_index(*hash, hash64);
- struct page *page;
- int found;
-
- xa_lock_irq(&mapping->i_pages);
- found = radix_tree_gang_lookup(&mapping->i_pages,
- (void **)&page, offset, 1);
- if (found > 0 && !radix_tree_exceptional_entry(page)) {
- struct lu_dirpage *dp;
-
- get_page(page);
- xa_unlock_irq(&mapping->i_pages);
- /*
- * In contrast to find_lock_page() we are sure that directory
- * page cannot be truncated (while DLM lock is held) and,
- * hence, can avoid restart.
- *
- * In fact, page cannot be locked here at all, because
- * mdc_read_page_remote does synchronous io.
- */
- wait_on_page_locked(page);
- if (PageUptodate(page)) {
- dp = kmap(page);
- if (BITS_PER_LONG == 32 && hash64) {
- *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
- *end = le64_to_cpu(dp->ldp_hash_end) >> 32;
- *hash = *hash >> 32;
- } else {
- *start = le64_to_cpu(dp->ldp_hash_start);
- *end = le64_to_cpu(dp->ldp_hash_end);
- }
- if (unlikely(*start == 1 && *hash == 0))
- *hash = *start;
- else
- LASSERTF(*start <= *hash, "start = %#llx,end = %#llx,hash = %#llx\n",
- *start, *end, *hash);
- CDEBUG(D_VFSTRACE, "offset %lx [%#llx %#llx], hash %#llx\n",
- offset, *start, *end, *hash);
- if (*hash > *end) {
- kunmap(page);
- mdc_release_page(page, 0);
- page = NULL;
- } else if (*end != *start && *hash == *end) {
- /*
- * upon hash collision, remove this page,
- * otherwise put page reference, and
- * mdc_read_page_remote() will issue RPC to
- * fetch the page we want.
- */
- kunmap(page);
- mdc_release_page(page,
- le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
- page = NULL;
- }
- } else {
- put_page(page);
- page = ERR_PTR(-EIO);
- }
- } else {
- xa_unlock_irq(&mapping->i_pages);
- page = NULL;
- }
- return page;
-}
-
-/*
- * Adjust a set of pages, each page containing an array of lu_dirpages,
- * so that each page can be used as a single logical lu_dirpage.
- *
- * A lu_dirpage is laid out as follows, where s = ldp_hash_start,
- * e = ldp_hash_end, f = ldp_flags, p = padding, and each "ent" is a
- * struct lu_dirent. It has size up to LU_PAGE_SIZE. The ldp_hash_end
- * value is used as a cookie to request the next lu_dirpage in a
- * directory listing that spans multiple pages (two in this example):
- * ________
- * | |
- * .|--------v------- -----.
- * |s|e|f|p|ent|ent| ... |ent|
- * '--|-------------- -----' Each PAGE contains a single
- * '------. lu_dirpage.
- * .---------v------- -----.
- * |s|e|f|p|ent| 0 | ... | 0 |
- * '----------------- -----'
- *
- * However, on hosts where the native VM page size (PAGE_SIZE) is
- * larger than LU_PAGE_SIZE, a single host page may contain multiple
- * lu_dirpages. After reading the lu_dirpages from the MDS, the
- * ldp_hash_end of the first lu_dirpage refers to the one immediately
- * after it in the same PAGE (arrows simplified for brevity, but
- * in general e0==s1, e1==s2, etc.):
- *
- * .-------------------- -----.
- * |s0|e0|f0|p|ent|ent| ... |ent|
- * |---v---------------- -----|
- * |s1|e1|f1|p|ent|ent| ... |ent|
- * |---v---------------- -----| Here, each PAGE contains
- * ... multiple lu_dirpages.
- * |---v---------------- -----|
- * |s'|e'|f'|p|ent|ent| ... |ent|
- * '---|---------------- -----'
- * v
- * .----------------------------.
- * | next PAGE |
- *
- * This structure is transformed into a single logical lu_dirpage as follows:
- *
- * - Replace e0 with e' so the request for the next lu_dirpage gets the page
- * labeled 'next PAGE'.
- *
- * - Copy the LDF_COLLIDE flag from f' to f0 to correctly reflect whether
- * a hash collision with the next page exists.
- *
- * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
- * to the first entry of the next lu_dirpage.
- */
-#if PAGE_SIZE > LU_PAGE_SIZE
-static void mdc_adjust_dirpages(struct page **pages, int cfs_pgs, int lu_pgs)
-{
- int i;
-
- for (i = 0; i < cfs_pgs; i++) {
- struct lu_dirpage *dp = kmap(pages[i]);
- __u64 hash_end = le64_to_cpu(dp->ldp_hash_end);
- __u32 flags = le32_to_cpu(dp->ldp_flags);
- struct lu_dirpage *first = dp;
-
- while (--lu_pgs > 0) {
- struct lu_dirent *end_dirent = NULL;
- struct lu_dirent *ent;
-
- for (ent = lu_dirent_start(dp); ent;
- ent = lu_dirent_next(ent))
- end_dirent = ent;
-
- /* Advance dp to next lu_dirpage. */
- dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
-
- /* Check if we've reached the end of the CFS_PAGE. */
- if (!((unsigned long)dp & ~PAGE_MASK))
- break;
-
- /* Save the hash and flags of this lu_dirpage. */
- hash_end = le64_to_cpu(dp->ldp_hash_end);
- flags = le32_to_cpu(dp->ldp_flags);
-
- /* Check if lu_dirpage contains no entries. */
- if (!end_dirent)
- break;
-
- /*
- * Enlarge the end entry lde_reclen from 0 to
- * first entry of next lu_dirpage.
- */
- LASSERT(!le16_to_cpu(end_dirent->lde_reclen));
- end_dirent->lde_reclen =
- cpu_to_le16((char *)(dp->ldp_entries) -
- (char *)end_dirent);
- }
-
- first->ldp_hash_end = hash_end;
- first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
- first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
-
- kunmap(pages[i]);
- }
- LASSERTF(lu_pgs == 0, "left = %d", lu_pgs);
-}
-#else
-#define mdc_adjust_dirpages(pages, cfs_pgs, lu_pgs) do {} while (0)
-#endif /* PAGE_SIZE > LU_PAGE_SIZE */
-
-/* parameters for readdir page */
-struct readpage_param {
- struct md_op_data *rp_mod;
- __u64 rp_off;
- int rp_hash64;
- struct obd_export *rp_exp;
- struct md_callback *rp_cb;
-};
-
-/**
- * Read pages from server.
- *
- * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
- * a header lu_dirpage which describes the start/end hash, and whether this
- * page is empty (contains no dir entry) or hash collide with next page.
- * After client receives reply, several pages will be integrated into dir page
- * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the
- * lu_dirpage for this integrated page will be adjusted.
- **/
-static int mdc_read_page_remote(void *data, struct page *page0)
-{
- struct readpage_param *rp = data;
- struct page **page_pool;
- struct page *page;
- struct lu_dirpage *dp;
- int rd_pgs = 0; /* number of pages read actually */
- int npages;
- struct md_op_data *op_data = rp->rp_mod;
- struct ptlrpc_request *req;
- int max_pages = op_data->op_max_pages;
- struct inode *inode;
- struct lu_fid *fid;
- int i;
- int rc;
-
- LASSERT(max_pages > 0 && max_pages <= PTLRPC_MAX_BRW_PAGES);
- inode = op_data->op_data;
- fid = &op_data->op_fid1;
- LASSERT(inode);
-
- page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
- if (page_pool) {
- page_pool[0] = page0;
- } else {
- page_pool = &page0;
- max_pages = 1;
- }
-
- for (npages = 1; npages < max_pages; npages++) {
- page = page_cache_alloc(inode->i_mapping);
- if (!page)
- break;
- page_pool[npages] = page;
- }
-
- rc = mdc_getpage(rp->rp_exp, fid, rp->rp_off, page_pool, npages, &req);
- if (!rc) {
- int lu_pgs = req->rq_bulk->bd_nob_transferred;
-
- rd_pgs = (req->rq_bulk->bd_nob_transferred +
- PAGE_SIZE - 1) >> PAGE_SHIFT;
- lu_pgs >>= LU_PAGE_SHIFT;
- LASSERT(!(req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
-
- CDEBUG(D_INODE, "read %d(%d) pages\n", rd_pgs, lu_pgs);
-
- mdc_adjust_dirpages(page_pool, rd_pgs, lu_pgs);
-
- SetPageUptodate(page0);
- }
-
- unlock_page(page0);
- ptlrpc_req_finished(req);
- CDEBUG(D_CACHE, "read %d/%d pages\n", rd_pgs, npages);
- for (i = 1; i < npages; i++) {
- unsigned long offset;
- __u64 hash;
- int ret;
-
- page = page_pool[i];
-
- if (rc < 0 || i >= rd_pgs) {
- put_page(page);
- continue;
- }
-
- SetPageUptodate(page);
-
- dp = kmap(page);
- hash = le64_to_cpu(dp->ldp_hash_start);
- kunmap(page);
-
- offset = hash_x_index(hash, rp->rp_hash64);
-
- prefetchw(&page->flags);
- ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
- GFP_KERNEL);
- if (!ret)
- unlock_page(page);
- else
- CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: rc = %d\n",
- offset, ret);
- put_page(page);
- }
-
- if (page_pool != &page0)
- kfree(page_pool);
-
- return rc;
-}
-
-/**
- * Read dir page from cache first, if it can not find it, read it from
- * server and add into the cache.
- *
- * \param[in] exp MDC export
- * \param[in] op_data client MD stack parameters, transferring parameters
- * between different layers on client MD stack.
- * \param[in] cb_op callback required for ldlm lock enqueue during
- * read page
- * \param[in] hash_offset the hash offset of the page to be read
- * \param[in] ppage the page to be read
- *
- * retval = 0 get the page successfully
- * errno(<0) get the page failed
- */
-static int mdc_read_page(struct obd_export *exp, struct md_op_data *op_data,
- struct md_callback *cb_op, __u64 hash_offset,
- struct page **ppage)
-{
- struct lookup_intent it = { .it_op = IT_READDIR };
- struct page *page;
- struct inode *dir = op_data->op_data;
- struct address_space *mapping;
- struct lu_dirpage *dp;
- __u64 start = 0;
- __u64 end = 0;
- struct lustre_handle lockh;
- struct ptlrpc_request *enq_req = NULL;
- struct readpage_param rp_param;
- int rc;
-
- *ppage = NULL;
-
- LASSERT(dir);
- mapping = dir->i_mapping;
-
- rc = mdc_intent_lock(exp, op_data, &it, &enq_req,
- cb_op->md_blocking_ast, 0);
- if (enq_req)
- ptlrpc_req_finished(enq_req);
-
- if (rc < 0) {
- CERROR("%s: " DFID " lock enqueue fails: rc = %d\n",
- exp->exp_obd->obd_name, PFID(&op_data->op_fid1), rc);
- return rc;
- }
-
- rc = 0;
- lockh.cookie = it.it_lock_handle;
- mdc_set_lock_data(exp, &lockh, dir, NULL);
-
- rp_param.rp_off = hash_offset;
- rp_param.rp_hash64 = op_data->op_cli_flags & CLI_HASH64;
- page = mdc_page_locate(mapping, &rp_param.rp_off, &start, &end,
- rp_param.rp_hash64);
- if (IS_ERR(page)) {
- CDEBUG(D_INFO, "%s: dir page locate: " DFID " at %llu: rc %ld\n",
- exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
- rp_param.rp_off, PTR_ERR(page));
- rc = PTR_ERR(page);
- goto out_unlock;
- } else if (page) {
- /*
- * XXX nikita: not entirely correct handling of a corner case:
- * suppose hash chain of entries with hash value HASH crosses
- * border between pages P0 and P1. First both P0 and P1 are
- * cached, seekdir() is called for some entry from the P0 part
- * of the chain. Later P0 goes out of cache. telldir(HASH)
- * happens and finds P1, as it starts with matching hash
- * value. Remaining entries from P0 part of the chain are
- * skipped. (Is that really a bug?)
- *
- * Possible solutions: 0. don't cache P1 is such case, handle
- * it as an "overflow" page. 1. invalidate all pages at
- * once. 2. use HASH|1 as an index for P1.
- */
- goto hash_collision;
- }
-
- rp_param.rp_exp = exp;
- rp_param.rp_mod = op_data;
- page = read_cache_page(mapping,
- hash_x_index(rp_param.rp_off,
- rp_param.rp_hash64),
- mdc_read_page_remote, &rp_param);
- if (IS_ERR(page)) {
- CERROR("%s: read cache page: " DFID " at %llu: rc %ld\n",
- exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
- rp_param.rp_off, PTR_ERR(page));
- rc = PTR_ERR(page);
- goto out_unlock;
- }
-
- wait_on_page_locked(page);
- (void)kmap(page);
- if (!PageUptodate(page)) {
- CERROR("%s: page not updated: " DFID " at %llu: rc %d\n",
- exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
- rp_param.rp_off, -5);
- goto fail;
- }
- if (!PageChecked(page))
- SetPageChecked(page);
- if (PageError(page)) {
- CERROR("%s: page error: " DFID " at %llu: rc %d\n",
- exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
- rp_param.rp_off, -5);
- goto fail;
- }
-
-hash_collision:
- dp = page_address(page);
- if (BITS_PER_LONG == 32 && rp_param.rp_hash64) {
- start = le64_to_cpu(dp->ldp_hash_start) >> 32;
- end = le64_to_cpu(dp->ldp_hash_end) >> 32;
- rp_param.rp_off = hash_offset >> 32;
- } else {
- start = le64_to_cpu(dp->ldp_hash_start);
- end = le64_to_cpu(dp->ldp_hash_end);
- rp_param.rp_off = hash_offset;
- }
- if (end == start) {
- LASSERT(start == rp_param.rp_off);
- CWARN("Page-wide hash collision: %#lx\n", (unsigned long)end);
-#if BITS_PER_LONG == 32
- CWARN("Real page-wide hash collision at [%llu %llu] with hash %llu\n",
- le64_to_cpu(dp->ldp_hash_start),
- le64_to_cpu(dp->ldp_hash_end), hash_offset);
-#endif
- /*
- * Fetch whole overflow chain...
- *
- * XXX not yet.
- */
- goto fail;
- }
- *ppage = page;
-out_unlock:
- ldlm_lock_decref(&lockh, it.it_lock_mode);
- return rc;
-fail:
- kunmap(page);
- mdc_release_page(page, 1);
- rc = -EIO;
- goto out_unlock;
-}
-
-static int mdc_statfs(const struct lu_env *env,
- struct obd_export *exp, struct obd_statfs *osfs,
- __u64 max_age, __u32 flags)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req;
- struct obd_statfs *msfs;
- struct obd_import *imp = NULL;
- int rc;
-
- /*
- * Since the request might also come from lprocfs, so we need
- * sync this with client_disconnect_export Bug15684
- */
- down_read(&obd->u.cli.cl_sem);
- if (obd->u.cli.cl_import)
- imp = class_import_get(obd->u.cli.cl_import);
- up_read(&obd->u.cli.cl_sem);
- if (!imp)
- return -ENODEV;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS,
- LUSTRE_MDS_VERSION, MDS_STATFS);
- if (!req) {
- rc = -ENOMEM;
- goto output;
- }
-
- ptlrpc_request_set_replen(req);
-
- if (flags & OBD_STATFS_NODELAY) {
- /* procfs requests not want stay in wait for avoid deadlock */
- req->rq_no_resend = 1;
- req->rq_no_delay = 1;
- }
-
- rc = ptlrpc_queue_wait(req);
- if (rc) {
- /* check connection error first */
- if (imp->imp_connect_error)
- rc = imp->imp_connect_error;
- goto out;
- }
-
- msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
- if (!msfs) {
- rc = -EPROTO;
- goto out;
- }
-
- *osfs = *msfs;
-out:
- ptlrpc_req_finished(req);
-output:
- class_import_put(imp);
- return rc;
-}
-
-static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
-{
- __u32 keylen, vallen;
- void *key;
- int rc;
-
- if (gf->gf_pathlen > PATH_MAX)
- return -ENAMETOOLONG;
- if (gf->gf_pathlen < 2)
- return -EOVERFLOW;
-
- /* Key is KEY_FID2PATH + getinfo_fid2path description */
- keylen = cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf);
- key = kzalloc(keylen, GFP_NOFS);
- if (!key)
- return -ENOMEM;
- memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
- memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
-
- CDEBUG(D_IOCTL, "path get " DFID " from %llu #%d\n",
- PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno);
-
- if (!fid_is_sane(&gf->gf_fid)) {
- rc = -EINVAL;
- goto out;
- }
-
- /* Val is struct getinfo_fid2path result plus path */
- vallen = sizeof(*gf) + gf->gf_pathlen;
-
- rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf);
- if (rc != 0 && rc != -EREMOTE)
- goto out;
-
- if (vallen <= sizeof(*gf)) {
- rc = -EPROTO;
- goto out;
- } else if (vallen > sizeof(*gf) + gf->gf_pathlen) {
- rc = -EOVERFLOW;
- goto out;
- }
-
- CDEBUG(D_IOCTL, "path got " DFID " from %llu #%d: %s\n",
- PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno,
- gf->gf_pathlen < 512 ? gf->gf_path :
- /* only log the last 512 characters of the path */
- gf->gf_path + gf->gf_pathlen - 512);
-
-out:
- kfree(key);
- return rc;
-}
-
-static int mdc_ioc_hsm_progress(struct obd_export *exp,
- struct hsm_progress_kernel *hpk)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
- struct hsm_progress_kernel *req_hpk;
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS,
- LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS);
- if (!req) {
- rc = -ENOMEM;
- goto out;
- }
-
- mdc_pack_body(req, NULL, 0, 0, -1, 0);
-
- /* Copy hsm_progress struct */
- req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
- if (!req_hpk) {
- rc = -EPROTO;
- goto out;
- }
-
- *req_hpk = *hpk;
- req_hpk->hpk_errval = lustre_errno_hton(hpk->hpk_errval);
-
- ptlrpc_request_set_replen(req);
-
- mdc_get_mod_rpc_slot(req, NULL);
- rc = ptlrpc_queue_wait(req);
- mdc_put_mod_rpc_slot(req, NULL);
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
-{
- __u32 *archive_mask;
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER,
- LUSTRE_MDS_VERSION,
- MDS_HSM_CT_REGISTER);
- if (!req) {
- rc = -ENOMEM;
- goto out;
- }
-
- mdc_pack_body(req, NULL, 0, 0, -1, 0);
-
- /* Copy hsm_progress struct */
- archive_mask = req_capsule_client_get(&req->rq_pill,
- &RMF_MDS_HSM_ARCHIVE);
- if (!archive_mask) {
- rc = -EPROTO;
- goto out;
- }
-
- *archive_mask = archives;
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_queue_wait(req);
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_current_action(struct obd_export *exp,
- struct md_op_data *op_data)
-{
- struct hsm_current_action *hca = op_data->op_data;
- struct hsm_current_action *req_hca;
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_HSM_ACTION);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, &op_data->op_fid1, 0, 0,
- op_data->op_suppgids[0], 0);
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_queue_wait(req);
- if (rc)
- goto out;
-
- req_hca = req_capsule_server_get(&req->rq_pill,
- &RMF_MDS_HSM_CURRENT_ACTION);
- if (!req_hca) {
- rc = -EPROTO;
- goto out;
- }
-
- *hca = *req_hca;
-
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
-{
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER,
- LUSTRE_MDS_VERSION,
- MDS_HSM_CT_UNREGISTER);
- if (!req) {
- rc = -ENOMEM;
- goto out;
- }
-
- mdc_pack_body(req, NULL, 0, 0, -1, 0);
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_queue_wait(req);
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_state_get(struct obd_export *exp,
- struct md_op_data *op_data)
-{
- struct hsm_user_state *hus = op_data->op_data;
- struct hsm_user_state *req_hus;
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_HSM_STATE_GET);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET);
- if (rc != 0) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, &op_data->op_fid1, 0, 0,
- op_data->op_suppgids[0], 0);
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_queue_wait(req);
- if (rc)
- goto out;
-
- req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE);
- if (!req_hus) {
- rc = -EPROTO;
- goto out;
- }
-
- *hus = *req_hus;
-
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_state_set(struct obd_export *exp,
- struct md_op_data *op_data)
-{
- struct hsm_state_set *hss = op_data->op_data;
- struct hsm_state_set *req_hss;
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_HSM_STATE_SET);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, &op_data->op_fid1, 0, 0,
- op_data->op_suppgids[0], 0);
-
- /* Copy states */
- req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET);
- if (!req_hss) {
- rc = -EPROTO;
- goto out;
- }
- *req_hss = *hss;
-
- ptlrpc_request_set_replen(req);
-
- mdc_get_mod_rpc_slot(req, NULL);
- rc = ptlrpc_queue_wait(req);
- mdc_put_mod_rpc_slot(req, NULL);
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_request(struct obd_export *exp,
- struct hsm_user_request *hur)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
- struct ptlrpc_request *req;
- struct hsm_request *req_hr;
- struct hsm_user_item *req_hui;
- char *req_opaque;
- int rc;
-
- req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST);
- if (!req) {
- rc = -ENOMEM;
- goto out;
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM, RCL_CLIENT,
- hur->hur_request.hr_itemcount
- * sizeof(struct hsm_user_item));
- req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, RCL_CLIENT,
- hur->hur_request.hr_data_len);
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_REQUEST);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, NULL, 0, 0, -1, 0);
-
- /* Copy hsm_request struct */
- req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
- if (!req_hr) {
- rc = -EPROTO;
- goto out;
- }
- *req_hr = hur->hur_request;
-
- /* Copy hsm_user_item structs */
- req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM);
- if (!req_hui) {
- rc = -EPROTO;
- goto out;
- }
- memcpy(req_hui, hur->hur_user_item,
- hur->hur_request.hr_itemcount * sizeof(struct hsm_user_item));
-
- /* Copy opaque field */
- req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA);
- if (!req_opaque) {
- rc = -EPROTO;
- goto out;
- }
- memcpy(req_opaque, hur_data(hur), hur->hur_request.hr_data_len);
-
- ptlrpc_request_set_replen(req);
-
- mdc_get_mod_rpc_slot(req, NULL);
- rc = ptlrpc_queue_wait(req);
- mdc_put_mod_rpc_slot(req, NULL);
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static struct kuc_hdr *changelog_kuc_hdr(char *buf, size_t len, u32 flags)
-{
- struct kuc_hdr *lh = (struct kuc_hdr *)buf;
-
- LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
-
- lh->kuc_magic = KUC_MAGIC;
- lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
- lh->kuc_flags = flags;
- lh->kuc_msgtype = CL_RECORD;
- lh->kuc_msglen = len;
- return lh;
-}
-
-struct changelog_show {
- __u64 cs_startrec;
- enum changelog_send_flag cs_flags;
- struct file *cs_fp;
- char *cs_buf;
- struct obd_device *cs_obd;
-};
-
-static inline char *cs_obd_name(struct changelog_show *cs)
-{
- return cs->cs_obd->obd_name;
-}
-
-static int changelog_kkuc_cb(const struct lu_env *env, struct llog_handle *llh,
- struct llog_rec_hdr *hdr, void *data)
-{
- struct changelog_show *cs = data;
- struct llog_changelog_rec *rec = (struct llog_changelog_rec *)hdr;
- struct kuc_hdr *lh;
- size_t len;
- int rc;
-
- if (rec->cr_hdr.lrh_type != CHANGELOG_REC) {
- rc = -EINVAL;
- CERROR("%s: not a changelog rec %x/%d: rc = %d\n",
- cs_obd_name(cs), rec->cr_hdr.lrh_type,
- rec->cr.cr_type, rc);
- return rc;
- }
-
- if (rec->cr.cr_index < cs->cs_startrec) {
- /* Skip entries earlier than what we are interested in */
- CDEBUG(D_HSM, "rec=%llu start=%llu\n",
- rec->cr.cr_index, cs->cs_startrec);
- return 0;
- }
-
- CDEBUG(D_HSM, "%llu %02d%-5s %llu 0x%x t=" DFID " p=" DFID
- " %.*s\n", rec->cr.cr_index, rec->cr.cr_type,
- changelog_type2str(rec->cr.cr_type), rec->cr.cr_time,
- rec->cr.cr_flags & CLF_FLAGMASK,
- PFID(&rec->cr.cr_tfid), PFID(&rec->cr.cr_pfid),
- rec->cr.cr_namelen, changelog_rec_name(&rec->cr));
-
- len = sizeof(*lh) + changelog_rec_size(&rec->cr) + rec->cr.cr_namelen;
-
- /* Set up the message */
- lh = changelog_kuc_hdr(cs->cs_buf, len, cs->cs_flags);
- memcpy(lh + 1, &rec->cr, len - sizeof(*lh));
-
- rc = libcfs_kkuc_msg_put(cs->cs_fp, lh);
- CDEBUG(D_HSM, "kucmsg fp %p len %zu rc %d\n", cs->cs_fp, len, rc);
-
- return rc;
-}
-
-static int mdc_changelog_send_thread(void *csdata)
-{
- enum llog_flag flags = LLOG_F_IS_CAT;
- struct changelog_show *cs = csdata;
- struct llog_ctxt *ctxt = NULL;
- struct llog_handle *llh = NULL;
- struct kuc_hdr *kuch;
- int rc;
-
- CDEBUG(D_HSM, "changelog to fp=%p start %llu\n",
- cs->cs_fp, cs->cs_startrec);
-
- cs->cs_buf = kzalloc(KUC_CHANGELOG_MSG_MAXSIZE, GFP_NOFS);
- if (!cs->cs_buf) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* Set up the remote catalog handle */
- ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT);
- if (!ctxt) {
- rc = -ENOENT;
- goto out;
- }
- rc = llog_open(NULL, ctxt, &llh, NULL, CHANGELOG_CATALOG,
- LLOG_OPEN_EXISTS);
- if (rc) {
- CERROR("%s: fail to open changelog catalog: rc = %d\n",
- cs_obd_name(cs), rc);
- goto out;
- }
-
- if (cs->cs_flags & CHANGELOG_FLAG_JOBID)
- flags |= LLOG_F_EXT_JOBID;
-
- rc = llog_init_handle(NULL, llh, flags, NULL);
- if (rc) {
- CERROR("llog_init_handle failed %d\n", rc);
- goto out;
- }
-
- rc = llog_cat_process(NULL, llh, changelog_kkuc_cb, cs, 0, 0);
-
- /* Send EOF no matter what our result */
- kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags);
- kuch->kuc_msgtype = CL_EOF;
- libcfs_kkuc_msg_put(cs->cs_fp, kuch);
-
-out:
- fput(cs->cs_fp);
- if (llh)
- llog_cat_close(NULL, llh);
- if (ctxt)
- llog_ctxt_put(ctxt);
- kfree(cs->cs_buf);
- kfree(cs);
- return rc;
-}
-
-static int mdc_ioc_changelog_send(struct obd_device *obd,
- struct ioc_changelog *icc)
-{
- struct changelog_show *cs;
- struct task_struct *task;
- int rc;
-
- /* Freed in mdc_changelog_send_thread */
- cs = kzalloc(sizeof(*cs), GFP_NOFS);
- if (!cs)
- return -ENOMEM;
-
- cs->cs_obd = obd;
- cs->cs_startrec = icc->icc_recno;
- /* matching fput in mdc_changelog_send_thread */
- cs->cs_fp = fget(icc->icc_id);
- cs->cs_flags = icc->icc_flags;
-
- /*
- * New thread because we should return to user app before
- * writing into our pipe
- */
- task = kthread_run(mdc_changelog_send_thread, cs,
- "mdc_clg_send_thread");
- if (IS_ERR(task)) {
- rc = PTR_ERR(task);
- CERROR("%s: can't start changelog thread: rc = %d\n",
- cs_obd_name(cs), rc);
- kfree(cs);
- } else {
- rc = 0;
- CDEBUG(D_HSM, "%s: started changelog thread\n",
- cs_obd_name(cs));
- }
-
- CERROR("Failed to start changelog thread: %d\n", rc);
- return rc;
-}
-
-static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
- struct lustre_kernelcomm *lk);
-
-static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct ptlrpc_request *req;
- struct obd_quotactl *oqc;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION,
- MDS_QUOTACTL);
- if (!req)
- return -ENOMEM;
-
- oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- *oqc = *oqctl;
-
- ptlrpc_request_set_replen(req);
- ptlrpc_at_set_req_timeout(req);
- req->rq_no_resend = 1;
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
-
- if (req->rq_repmsg) {
- oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- if (oqc) {
- *oqctl = *oqc;
- } else if (!rc) {
- CERROR("Can't unpack obd_quotactl\n");
- rc = -EPROTO;
- }
- } else if (!rc) {
- CERROR("Can't unpack obd_quotactl\n");
- rc = -EPROTO;
- }
- ptlrpc_req_finished(req);
-
- return rc;
-}
-
-static int mdc_ioc_swap_layouts(struct obd_export *exp,
- struct md_op_data *op_data)
-{
- LIST_HEAD(cancels);
- struct ptlrpc_request *req;
- int rc, count;
- struct mdc_swap_layouts *msl, *payload;
-
- msl = op_data->op_data;
-
- /* When the MDT will get the MDS_SWAP_LAYOUTS RPC the
- * first thing it will do is to cancel the 2 layout
- * locks hold by this client.
- * So the client must cancel its layout locks on the 2 fids
- * with the request RPC to avoid extra RPC round trips
- */
- count = mdc_resource_get_unused(exp, &op_data->op_fid1, &cancels,
- LCK_CR, MDS_INODELOCK_LAYOUT |
- MDS_INODELOCK_XATTR);
- count += mdc_resource_get_unused(exp, &op_data->op_fid2, &cancels,
- LCK_CR, MDS_INODELOCK_LAYOUT |
- MDS_INODELOCK_XATTR);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_SWAP_LAYOUTS);
- if (!req) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
-
- rc = mdc_prep_elc_req(exp, req, MDS_SWAP_LAYOUTS, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_swap_layouts_pack(req, op_data);
-
- payload = req_capsule_client_get(&req->rq_pill, &RMF_SWAP_LAYOUTS);
- LASSERT(payload);
-
- *payload = *msl;
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
-
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void __user *uarg)
-{
- struct obd_device *obd = exp->exp_obd;
- struct obd_ioctl_data *data = karg;
- struct obd_import *imp = obd->u.cli.cl_import;
- int rc;
-
- if (!try_module_get(THIS_MODULE)) {
- CERROR("%s: cannot get module '%s'\n", obd->obd_name,
- module_name(THIS_MODULE));
- return -EINVAL;
- }
- switch (cmd) {
- case OBD_IOC_CHANGELOG_SEND:
- rc = mdc_ioc_changelog_send(obd, karg);
- goto out;
- case OBD_IOC_CHANGELOG_CLEAR: {
- struct ioc_changelog *icc = karg;
- struct changelog_setinfo cs = {
- .cs_recno = icc->icc_recno,
- .cs_id = icc->icc_id
- };
-
- rc = obd_set_info_async(NULL, exp, strlen(KEY_CHANGELOG_CLEAR),
- KEY_CHANGELOG_CLEAR, sizeof(cs), &cs,
- NULL);
- goto out;
- }
- case OBD_IOC_FID2PATH:
- rc = mdc_ioc_fid2path(exp, karg);
- goto out;
- case LL_IOC_HSM_CT_START:
- rc = mdc_ioc_hsm_ct_start(exp, karg);
- /* ignore if it was already registered on this MDS. */
- if (rc == -EEXIST)
- rc = 0;
- goto out;
- case LL_IOC_HSM_PROGRESS:
- rc = mdc_ioc_hsm_progress(exp, karg);
- goto out;
- case LL_IOC_HSM_STATE_GET:
- rc = mdc_ioc_hsm_state_get(exp, karg);
- goto out;
- case LL_IOC_HSM_STATE_SET:
- rc = mdc_ioc_hsm_state_set(exp, karg);
- goto out;
- case LL_IOC_HSM_ACTION:
- rc = mdc_ioc_hsm_current_action(exp, karg);
- goto out;
- case LL_IOC_HSM_REQUEST:
- rc = mdc_ioc_hsm_request(exp, karg);
- goto out;
- case OBD_IOC_CLIENT_RECOVER:
- rc = ptlrpc_recover_import(imp, data->ioc_inlbuf1, 0);
- if (rc < 0)
- goto out;
- rc = 0;
- goto out;
- case IOC_OSC_SET_ACTIVE:
- rc = ptlrpc_set_import_active(imp, data->ioc_offset);
- goto out;
- case OBD_IOC_PING_TARGET:
- rc = ptlrpc_obd_ping(obd);
- goto out;
- /*
- * Normally IOC_OBD_STATFS, OBD_IOC_QUOTACTL iocontrol are handled by
- * LMV instead of MDC. But when the cluster is upgraded from 1.8,
- * there'd be no LMV layer thus we might be called here. Eventually
- * this code should be removed.
- * bz20731, LU-592.
- */
- case IOC_OBD_STATFS: {
- struct obd_statfs stat_buf = {0};
-
- if (*((__u32 *)data->ioc_inlbuf2) != 0) {
- rc = -ENODEV;
- goto out;
- }
-
- /* copy UUID */
- if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
- min_t(size_t, data->ioc_plen2,
- sizeof(struct obd_uuid)))) {
- rc = -EFAULT;
- goto out;
- }
-
- rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- 0);
- if (rc != 0)
- goto out;
-
- if (copy_to_user(data->ioc_pbuf1, &stat_buf,
- min_t(size_t, data->ioc_plen1,
- sizeof(stat_buf)))) {
- rc = -EFAULT;
- goto out;
- }
-
- rc = 0;
- goto out;
- }
- case OBD_IOC_QUOTACTL: {
- struct if_quotactl *qctl = karg;
- struct obd_quotactl *oqctl;
-
- oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (!oqctl) {
- rc = -ENOMEM;
- goto out;
- }
-
- QCTL_COPY(oqctl, qctl);
- rc = obd_quotactl(exp, oqctl);
- if (rc == 0) {
- QCTL_COPY(qctl, oqctl);
- qctl->qc_valid = QC_MDTIDX;
- qctl->obd_uuid = obd->u.cli.cl_target_uuid;
- }
-
- kfree(oqctl);
- goto out;
- }
- case LL_IOC_GET_CONNECT_FLAGS:
- if (copy_to_user(uarg, exp_connect_flags_ptr(exp),
- sizeof(*exp_connect_flags_ptr(exp)))) {
- rc = -EFAULT;
- goto out;
- }
-
- rc = 0;
- goto out;
- case LL_IOC_LOV_SWAP_LAYOUTS:
- rc = mdc_ioc_swap_layouts(exp, karg);
- goto out;
- default:
- CERROR("unrecognised ioctl: cmd = %#x\n", cmd);
- rc = -ENOTTY;
- goto out;
- }
-out:
- module_put(THIS_MODULE);
-
- return rc;
-}
-
-static int mdc_get_info_rpc(struct obd_export *exp,
- u32 keylen, void *key,
- int vallen, void *val)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
- struct ptlrpc_request *req;
- char *tmp;
- int rc = -EINVAL;
-
- req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO);
- if (!req)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY,
- RCL_CLIENT, keylen);
- req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VALLEN,
- RCL_CLIENT, sizeof(__u32));
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
- memcpy(tmp, key, keylen);
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_VALLEN);
- memcpy(tmp, &vallen, sizeof(__u32));
-
- req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VAL,
- RCL_SERVER, vallen);
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- /* -EREMOTE means the get_info result is partial, and it needs to
- * continue on another MDT, see fid2path part in lmv_iocontrol
- */
- if (rc == 0 || rc == -EREMOTE) {
- tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL);
- memcpy(val, tmp, vallen);
- if (ptlrpc_rep_need_swab(req)) {
- if (KEY_IS(KEY_FID2PATH))
- lustre_swab_fid2path(val);
- }
- }
- ptlrpc_req_finished(req);
-
- return rc;
-}
-
-static void lustre_swab_hai(struct hsm_action_item *h)
-{
- __swab32s(&h->hai_len);
- __swab32s(&h->hai_action);
- lustre_swab_lu_fid(&h->hai_fid);
- lustre_swab_lu_fid(&h->hai_dfid);
- __swab64s(&h->hai_cookie);
- __swab64s(&h->hai_extent.offset);
- __swab64s(&h->hai_extent.length);
- __swab64s(&h->hai_gid);
-}
-
-static void lustre_swab_hal(struct hsm_action_list *h)
-{
- struct hsm_action_item *hai;
- u32 i;
-
- __swab32s(&h->hal_version);
- __swab32s(&h->hal_count);
- __swab32s(&h->hal_archive_id);
- __swab64s(&h->hal_flags);
- hai = hai_first(h);
- for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
- lustre_swab_hai(hai);
-}
-
-static void lustre_swab_kuch(struct kuc_hdr *l)
-{
- __swab16s(&l->kuc_magic);
- /* __u8 l->kuc_transport */
- __swab16s(&l->kuc_msgtype);
- __swab16s(&l->kuc_msglen);
-}
-
-static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
- struct lustre_kernelcomm *lk)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
- __u32 archive = lk->lk_data;
- int rc = 0;
-
- if (lk->lk_group != KUC_GRP_HSM) {
- CERROR("Bad copytool group %d\n", lk->lk_group);
- return -EINVAL;
- }
-
- CDEBUG(D_HSM, "CT start r%d w%d u%d g%d f%#x\n", lk->lk_rfd, lk->lk_wfd,
- lk->lk_uid, lk->lk_group, lk->lk_flags);
-
- if (lk->lk_flags & LK_FLG_STOP) {
- /* Unregister with the coordinator */
- rc = mdc_ioc_hsm_ct_unregister(imp);
- } else {
- rc = mdc_ioc_hsm_ct_register(imp, archive);
- }
-
- return rc;
-}
-
-/**
- * Send a message to any listening copytools
- * @param val KUC message (kuc_hdr + hsm_action_list)
- * @param len total length of message
- */
-static int mdc_hsm_copytool_send(size_t len, void *val)
-{
- struct kuc_hdr *lh = (struct kuc_hdr *)val;
- struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1);
-
- if (len < sizeof(*lh) + sizeof(*hal)) {
- CERROR("Short HSM message %zu < %zu\n", len,
- sizeof(*lh) + sizeof(*hal));
- return -EPROTO;
- }
- if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
- lustre_swab_kuch(lh);
- lustre_swab_hal(hal);
- } else if (lh->kuc_magic != KUC_MAGIC) {
- CERROR("Bad magic %x!=%x\n", lh->kuc_magic, KUC_MAGIC);
- return -EPROTO;
- }
-
- CDEBUG(D_HSM,
- "Received message mg=%x t=%d m=%d l=%d actions=%d on %s\n",
- lh->kuc_magic, lh->kuc_transport, lh->kuc_msgtype,
- lh->kuc_msglen, hal->hal_count, hal->hal_fsname);
-
- /* Broadcast to HSM listeners */
- return libcfs_kkuc_group_put(KUC_GRP_HSM, lh);
-}
-
-/**
- * callback function passed to kuc for re-registering each HSM copytool
- * running on MDC, after MDT shutdown/recovery.
- * @param data copytool registration data
- * @param cb_arg callback argument (obd_import)
- */
-static int mdc_hsm_ct_reregister(void *data, void *cb_arg)
-{
- struct kkuc_ct_data *kcd = data;
- struct obd_import *imp = (struct obd_import *)cb_arg;
- int rc;
-
- if (!kcd || kcd->kcd_magic != KKUC_CT_DATA_MAGIC)
- return -EPROTO;
-
- if (!obd_uuid_equals(&kcd->kcd_uuid, &imp->imp_obd->obd_uuid))
- return 0;
-
- CDEBUG(D_HA, "%s: recover copytool registration to MDT (archive=%#x)\n",
- imp->imp_obd->obd_name, kcd->kcd_archive);
- rc = mdc_ioc_hsm_ct_register(imp, kcd->kcd_archive);
-
- /* ignore error if the copytool is already registered */
- return (rc == -EEXIST) ? 0 : rc;
-}
-
-static int mdc_set_info_async(const struct lu_env *env,
- struct obd_export *exp,
- u32 keylen, void *key,
- u32 vallen, void *val,
- struct ptlrpc_request_set *set)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
- int rc;
-
- if (KEY_IS(KEY_READ_ONLY)) {
- if (vallen != sizeof(int))
- return -EINVAL;
-
- spin_lock(&imp->imp_lock);
- if (*((int *)val)) {
- imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
- imp->imp_connect_data.ocd_connect_flags |=
- OBD_CONNECT_RDONLY;
- } else {
- imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
- imp->imp_connect_data.ocd_connect_flags &=
- ~OBD_CONNECT_RDONLY;
- }
- spin_unlock(&imp->imp_lock);
-
- return do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
- keylen, key, vallen, val, set);
- }
- if (KEY_IS(KEY_SPTLRPC_CONF)) {
- sptlrpc_conf_client_adapt(exp->exp_obd);
- return 0;
- }
- if (KEY_IS(KEY_FLUSH_CTX)) {
- sptlrpc_import_flush_my_ctx(imp);
- return 0;
- }
- if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
- rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
- keylen, key, vallen, val, set);
- return rc;
- }
- if (KEY_IS(KEY_HSM_COPYTOOL_SEND)) {
- rc = mdc_hsm_copytool_send(vallen, val);
- return rc;
- }
- if (KEY_IS(KEY_DEFAULT_EASIZE)) {
- u32 *default_easize = val;
-
- exp->exp_obd->u.cli.cl_default_mds_easize = *default_easize;
- return 0;
- }
-
- CERROR("Unknown key %s\n", (char *)key);
- return -EINVAL;
-}
-
-static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val)
-{
- int rc = -EINVAL;
-
- if (KEY_IS(KEY_MAX_EASIZE)) {
- u32 mdsize, *max_easize;
-
- if (*vallen != sizeof(int))
- return -EINVAL;
- mdsize = *(u32 *)val;
- if (mdsize > exp->exp_obd->u.cli.cl_max_mds_easize)
- exp->exp_obd->u.cli.cl_max_mds_easize = mdsize;
- max_easize = val;
- *max_easize = exp->exp_obd->u.cli.cl_max_mds_easize;
- return 0;
- } else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
- u32 *default_easize;
-
- if (*vallen != sizeof(int))
- return -EINVAL;
- default_easize = val;
- *default_easize = exp->exp_obd->u.cli.cl_default_mds_easize;
- return 0;
- } else if (KEY_IS(KEY_CONN_DATA)) {
- struct obd_import *imp = class_exp2cliimp(exp);
- struct obd_connect_data *data = val;
-
- if (*vallen != sizeof(*data))
- return -EINVAL;
-
- *data = imp->imp_connect_data;
- return 0;
- } else if (KEY_IS(KEY_TGT_COUNT)) {
- *((u32 *)val) = 1;
- return 0;
- }
-
- rc = mdc_get_info_rpc(exp, keylen, key, *vallen, val);
-
- return rc;
-}
-
-static int mdc_sync(struct obd_export *exp, const struct lu_fid *fid,
- struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int rc;
-
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, fid, 0, 0, -1, 0);
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
-static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
- enum obd_import_event event)
-{
- int rc = 0;
-
- LASSERT(imp->imp_obd == obd);
-
- switch (event) {
- case IMP_EVENT_INACTIVE: {
- struct client_obd *cli = &obd->u.cli;
- /*
- * Flush current sequence to make client obtain new one
- * from server in case of disconnect/reconnect.
- */
- if (cli->cl_seq)
- seq_client_flush(cli->cl_seq);
-
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
- break;
- }
- case IMP_EVENT_INVALIDATE: {
- struct ldlm_namespace *ns = obd->obd_namespace;
-
- ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
-
- break;
- }
- case IMP_EVENT_ACTIVE:
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
- /* redo the kuc registration after reconnecting */
- if (rc == 0)
- /* re-register HSM agents */
- rc = libcfs_kkuc_group_foreach(KUC_GRP_HSM,
- mdc_hsm_ct_reregister,
- (void *)imp);
- break;
- case IMP_EVENT_OCD:
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
- break;
- case IMP_EVENT_DISCON:
- case IMP_EVENT_DEACTIVATE:
- case IMP_EVENT_ACTIVATE:
- break;
- default:
- CERROR("Unknown import event %x\n", event);
- LBUG();
- }
- return rc;
-}
-
-int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
- struct lu_fid *fid, struct md_op_data *op_data)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct lu_client_seq *seq = cli->cl_seq;
-
- return seq_client_alloc_fid(env, seq, fid);
-}
-
-static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
-
- return &cli->cl_target_uuid;
-}
-
-/**
- * Determine whether the lock can be canceled before replaying it during
- * recovery, non zero value will be return if the lock can be canceled,
- * or zero returned for not
- */
-static int mdc_cancel_weight(struct ldlm_lock *lock)
-{
- if (lock->l_resource->lr_type != LDLM_IBITS)
- return 0;
-
- /* FIXME: if we ever get into a situation where there are too many
- * opened files with open locks on a single node, then we really
- * should replay these open locks to reget it
- */
- if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
- return 0;
-
- return 1;
-}
-
-static int mdc_resource_inode_free(struct ldlm_resource *res)
-{
- if (res->lr_lvb_inode)
- res->lr_lvb_inode = NULL;
-
- return 0;
-}
-
-static struct ldlm_valblock_ops inode_lvbo = {
- .lvbo_free = mdc_resource_inode_free,
-};
-
-static int mdc_llog_init(struct obd_device *obd)
-{
- struct obd_llog_group *olg = &obd->obd_olg;
- struct llog_ctxt *ctxt;
- int rc;
-
- rc = llog_setup(NULL, obd, olg, LLOG_CHANGELOG_REPL_CTXT, obd,
- &llog_client_ops);
- if (rc)
- return rc;
-
- ctxt = llog_group_get_ctxt(olg, LLOG_CHANGELOG_REPL_CTXT);
- llog_initiator_connect(ctxt);
- llog_ctxt_put(ctxt);
-
- return 0;
-}
-
-static void mdc_llog_finish(struct obd_device *obd)
-{
- struct llog_ctxt *ctxt;
-
- ctxt = llog_get_context(obd, LLOG_CHANGELOG_REPL_CTXT);
- if (ctxt)
- llog_cleanup(NULL, ctxt);
-}
-
-static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
-{
- struct lprocfs_static_vars lvars = { NULL };
- int rc;
-
- rc = ptlrpcd_addref();
- if (rc < 0)
- return rc;
-
- rc = client_obd_setup(obd, cfg);
- if (rc)
- goto err_ptlrpcd_decref;
-
- lprocfs_mdc_init_vars(&lvars);
- lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
- sptlrpc_lprocfs_cliobd_attach(obd);
- ptlrpc_lprocfs_register_obd(obd);
-
- ns_register_cancel(obd->obd_namespace, mdc_cancel_weight);
-
- obd->obd_namespace->ns_lvbo = &inode_lvbo;
-
- rc = mdc_llog_init(obd);
- if (rc) {
- mdc_cleanup(obd);
- CERROR("failed to setup llogging subsystems\n");
- return rc;
- }
-
- return rc;
-
-err_ptlrpcd_decref:
- ptlrpcd_decref();
- return rc;
-}
-
-/* Initialize the default and maximum LOV EA sizes. This allows
- * us to make MDS RPCs with large enough reply buffers to hold a default
- * sized EA without having to calculate this (via a call into the
- * LOV + OSCs) each time we make an RPC. The maximum size is also tracked
- * but not used to avoid wastefully vmalloc()'ing large reply buffers when
- * a large number of stripes is possible. If a larger reply buffer is
- * required it will be reallocated in the ptlrpc layer due to overflow.
- */
-static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize)
-{
- struct obd_device *obd = exp->exp_obd;
- struct client_obd *cli = &obd->u.cli;
-
- if (cli->cl_max_mds_easize < easize)
- cli->cl_max_mds_easize = easize;
-
- if (cli->cl_default_mds_easize < def_easize)
- cli->cl_default_mds_easize = def_easize;
-
- return 0;
-}
-
-static int mdc_precleanup(struct obd_device *obd)
-{
- /* Failsafe, ok if racy */
- if (obd->obd_type->typ_refcnt <= 1)
- libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
-
- obd_cleanup_client_import(obd);
- ptlrpc_lprocfs_unregister_obd(obd);
- lprocfs_obd_cleanup(obd);
- mdc_llog_finish(obd);
- return 0;
-}
-
-static int mdc_cleanup(struct obd_device *obd)
-{
- ptlrpcd_decref();
-
- return client_obd_cleanup(obd);
-}
-
-static int mdc_process_config(struct obd_device *obd, u32 len, void *buf)
-{
- struct lustre_cfg *lcfg = buf;
- struct lprocfs_static_vars lvars = { NULL };
- int rc = 0;
-
- lprocfs_mdc_init_vars(&lvars);
- switch (lcfg->lcfg_command) {
- default:
- rc = class_process_proc_param(PARAM_MDC, lvars.obd_vars,
- lcfg, obd);
- if (rc > 0)
- rc = 0;
- break;
- }
- return rc;
-}
-
-static struct obd_ops mdc_obd_ops = {
- .owner = THIS_MODULE,
- .setup = mdc_setup,
- .precleanup = mdc_precleanup,
- .cleanup = mdc_cleanup,
- .add_conn = client_import_add_conn,
- .del_conn = client_import_del_conn,
- .connect = client_connect_import,
- .disconnect = client_disconnect_export,
- .iocontrol = mdc_iocontrol,
- .set_info_async = mdc_set_info_async,
- .statfs = mdc_statfs,
- .fid_init = client_fid_init,
- .fid_fini = client_fid_fini,
- .fid_alloc = mdc_fid_alloc,
- .import_event = mdc_import_event,
- .get_info = mdc_get_info,
- .process_config = mdc_process_config,
- .get_uuid = mdc_get_uuid,
- .quotactl = mdc_quotactl,
-};
-
-static struct md_ops mdc_md_ops = {
- .getstatus = mdc_getstatus,
- .null_inode = mdc_null_inode,
- .close = mdc_close,
- .create = mdc_create,
- .enqueue = mdc_enqueue,
- .getattr = mdc_getattr,
- .getattr_name = mdc_getattr_name,
- .intent_lock = mdc_intent_lock,
- .link = mdc_link,
- .rename = mdc_rename,
- .setattr = mdc_setattr,
- .setxattr = mdc_setxattr,
- .getxattr = mdc_getxattr,
- .sync = mdc_sync,
- .read_page = mdc_read_page,
- .unlink = mdc_unlink,
- .cancel_unused = mdc_cancel_unused,
- .init_ea_size = mdc_init_ea_size,
- .set_lock_data = mdc_set_lock_data,
- .lock_match = mdc_lock_match,
- .get_lustre_md = mdc_get_lustre_md,
- .free_lustre_md = mdc_free_lustre_md,
- .set_open_replay_data = mdc_set_open_replay_data,
- .clear_open_replay_data = mdc_clear_open_replay_data,
- .intent_getattr_async = mdc_intent_getattr_async,
- .revalidate_lock = mdc_revalidate_lock
-};
-
-static int __init mdc_init(void)
-{
- struct lprocfs_static_vars lvars = { NULL };
-
- lprocfs_mdc_init_vars(&lvars);
-
- return class_register_type(&mdc_obd_ops, &mdc_md_ops,
- LUSTRE_MDC_NAME, NULL);
-}
-
-static void /*__exit*/ mdc_exit(void)
-{
- class_unregister_type(LUSTRE_MDC_NAME);
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Metadata Client");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(mdc_init);
-module_exit(mdc_exit);
diff --git a/drivers/staging/lustre/lustre/mgc/Makefile b/drivers/staging/lustre/lustre/mgc/Makefile
deleted file mode 100644
index 8abf108dbcf7..000000000000
--- a/drivers/staging/lustre/lustre/mgc/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LUSTRE_FS) += mgc.o
-mgc-y := mgc_request.o lproc_mgc.o
diff --git a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
deleted file mode 100644
index 636770624e8f..000000000000
--- a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/vfs.h>
-#include <obd_class.h>
-#include <lprocfs_status.h>
-#include "mgc_internal.h"
-
-LPROC_SEQ_FOPS_RO_TYPE(mgc, connect_flags);
-LPROC_SEQ_FOPS_RO_TYPE(mgc, server_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(mgc, conn_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(mgc, import);
-LPROC_SEQ_FOPS_RO_TYPE(mgc, state);
-
-LPROC_SEQ_FOPS_WR_ONLY(mgc, ping);
-
-static int mgc_ir_state_seq_show(struct seq_file *m, void *v)
-{
- return lprocfs_mgc_rd_ir_state(m, m->private);
-}
-
-LPROC_SEQ_FOPS_RO(mgc_ir_state);
-
-static struct lprocfs_vars lprocfs_mgc_obd_vars[] = {
- { "ping", &mgc_ping_fops, NULL, 0222 },
- { "connect_flags", &mgc_connect_flags_fops, NULL, 0 },
- { "mgs_server_uuid", &mgc_server_uuid_fops, NULL, 0 },
- { "mgs_conn_uuid", &mgc_conn_uuid_fops, NULL, 0 },
- { "import", &mgc_import_fops, NULL, 0 },
- { "state", &mgc_state_fops, NULL, 0 },
- { "ir_state", &mgc_ir_state_fops, NULL, 0 },
- { NULL }
-};
-
-void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->obd_vars = lprocfs_mgc_obd_vars;
-}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_internal.h b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
deleted file mode 100644
index 2c571c180578..000000000000
--- a/drivers/staging/lustre/lustre/mgc/mgc_internal.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _MGC_INTERNAL_H
-#define _MGC_INTERNAL_H
-
-#include <linux/libcfs/libcfs.h>
-#include <lustre_lib.h>
-#include <lustre_dlm.h>
-#include <lustre_log.h>
-#include <lustre_export.h>
-
-void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars);
-int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data);
-
-int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld);
-
-static inline int cld_is_sptlrpc(struct config_llog_data *cld)
-{
- return cld->cld_type == CONFIG_T_SPTLRPC;
-}
-
-static inline int cld_is_recover(struct config_llog_data *cld)
-{
- return cld->cld_type == CONFIG_T_RECOVER;
-}
-
-#endif /* _MGC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
deleted file mode 100644
index c61cd23a96df..000000000000
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ /dev/null
@@ -1,1844 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/mgc/mgc_request.c
- *
- * Author: Nathan Rutman <nathan@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_MGC
-#define D_MGC D_CONFIG /*|D_WARNING*/
-
-#include <linux/module.h>
-
-#include <lprocfs_status.h>
-#include <lustre_dlm.h>
-#include <lustre_disk.h>
-#include <lustre_log.h>
-#include <lustre_swab.h>
-#include <obd_class.h>
-
-#include "mgc_internal.h"
-
-static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
- int type)
-{
- __u64 resname = 0;
-
- if (len > sizeof(resname)) {
- CERROR("name too long: %s\n", name);
- return -EINVAL;
- }
- if (len <= 0) {
- CERROR("missing name: %s\n", name);
- return -EINVAL;
- }
- memcpy(&resname, name, len);
-
- /* Always use the same endianness for the resid */
- memset(res_id, 0, sizeof(*res_id));
- res_id->name[0] = cpu_to_le64(resname);
- /* XXX: unfortunately, sptlprc and config llog share one lock */
- switch (type) {
- case CONFIG_T_CONFIG:
- case CONFIG_T_SPTLRPC:
- resname = 0;
- break;
- case CONFIG_T_RECOVER:
- case CONFIG_T_PARAMS:
- resname = type;
- break;
- default:
- LBUG();
- }
- res_id->name[1] = cpu_to_le64(resname);
- CDEBUG(D_MGC, "log %s to resid %#llx/%#llx (%.8s)\n", name,
- res_id->name[0], res_id->name[1], (char *)&res_id->name[0]);
- return 0;
-}
-
-int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type)
-{
- /* fsname is at most 8 chars long, maybe contain "-".
- * e.g. "lustre", "SUN-000"
- */
- return mgc_name2resid(fsname, strlen(fsname), res_id, type);
-}
-EXPORT_SYMBOL(mgc_fsname2resid);
-
-static int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type)
-{
- char *name_end;
- int len;
-
- /* logname consists of "fsname-nodetype".
- * e.g. "lustre-MDT0001", "SUN-000-client"
- * there is an exception: llog "params"
- */
- name_end = strrchr(logname, '-');
- if (!name_end)
- len = strlen(logname);
- else
- len = name_end - logname;
- return mgc_name2resid(logname, len, res_id, type);
-}
-
-/********************** config llog list **********************/
-static LIST_HEAD(config_llog_list);
-static DEFINE_SPINLOCK(config_list_lock);
-
-/* Take a reference to a config log */
-static int config_log_get(struct config_llog_data *cld)
-{
- atomic_inc(&cld->cld_refcount);
- CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
- atomic_read(&cld->cld_refcount));
- return 0;
-}
-
-/* Drop a reference to a config log. When no longer referenced,
- * we can free the config log data
- */
-static void config_log_put(struct config_llog_data *cld)
-{
- CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
- atomic_read(&cld->cld_refcount));
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
-
- /* spinlock to make sure no item with 0 refcount in the list */
- if (atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) {
- list_del(&cld->cld_list_chain);
- spin_unlock(&config_list_lock);
-
- CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
-
- if (cld->cld_recover)
- config_log_put(cld->cld_recover);
- if (cld->cld_params)
- config_log_put(cld->cld_params);
- if (cld->cld_sptlrpc)
- config_log_put(cld->cld_sptlrpc);
- if (cld_is_sptlrpc(cld))
- sptlrpc_conf_log_stop(cld->cld_logname);
-
- class_export_put(cld->cld_mgcexp);
- kfree(cld);
- }
-}
-
-/* Find a config log by name */
-static
-struct config_llog_data *config_log_find(char *logname,
- struct config_llog_instance *cfg)
-{
- struct config_llog_data *cld;
- struct config_llog_data *found = NULL;
- void *instance;
-
- LASSERT(logname);
-
- instance = cfg ? cfg->cfg_instance : NULL;
- spin_lock(&config_list_lock);
- list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
- /* check if instance equals */
- if (instance != cld->cld_cfg.cfg_instance)
- continue;
-
- /* instance may be NULL, should check name */
- if (strcmp(logname, cld->cld_logname) == 0) {
- found = cld;
- config_log_get(found);
- break;
- }
- }
- spin_unlock(&config_list_lock);
- return found;
-}
-
-static
-struct config_llog_data *do_config_log_add(struct obd_device *obd,
- char *logname,
- int type,
- struct config_llog_instance *cfg,
- struct super_block *sb)
-{
- struct config_llog_data *cld;
- int rc;
-
- CDEBUG(D_MGC, "do adding config log %s:%p\n", logname,
- cfg ? cfg->cfg_instance : NULL);
-
- cld = kzalloc(sizeof(*cld) + strlen(logname) + 1, GFP_NOFS);
- if (!cld)
- return ERR_PTR(-ENOMEM);
-
- rc = mgc_logname2resid(logname, &cld->cld_resid, type);
- if (rc) {
- kfree(cld);
- return ERR_PTR(rc);
- }
-
- strcpy(cld->cld_logname, logname);
- if (cfg)
- cld->cld_cfg = *cfg;
- else
- cld->cld_cfg.cfg_callback = class_config_llog_handler;
- mutex_init(&cld->cld_lock);
- cld->cld_cfg.cfg_last_idx = 0;
- cld->cld_cfg.cfg_flags = 0;
- cld->cld_cfg.cfg_sb = sb;
- cld->cld_type = type;
- atomic_set(&cld->cld_refcount, 1);
-
- /* Keep the mgc around until we are done */
- cld->cld_mgcexp = class_export_get(obd->obd_self_export);
-
- if (cld_is_sptlrpc(cld)) {
- sptlrpc_conf_log_start(logname);
- cld->cld_cfg.cfg_obdname = obd->obd_name;
- }
-
- spin_lock(&config_list_lock);
- list_add(&cld->cld_list_chain, &config_llog_list);
- spin_unlock(&config_list_lock);
-
- if (cld_is_sptlrpc(cld)) {
- rc = mgc_process_log(obd, cld);
- if (rc && rc != -ENOENT)
- CERROR("failed processing sptlrpc log: %d\n", rc);
- }
-
- return cld;
-}
-
-static struct config_llog_data *
-config_recover_log_add(struct obd_device *obd, char *fsname,
- struct config_llog_instance *cfg,
- struct super_block *sb)
-{
- struct config_llog_instance lcfg = *cfg;
- struct config_llog_data *cld;
- char logname[32];
-
- /* we have to use different llog for clients and mdts for cmd
- * where only clients are notified if one of cmd server restarts
- */
- LASSERT(strlen(fsname) < sizeof(logname) / 2);
- strcpy(logname, fsname);
- LASSERT(lcfg.cfg_instance);
- strcat(logname, "-cliir");
-
- cld = do_config_log_add(obd, logname, CONFIG_T_RECOVER, &lcfg, sb);
- return cld;
-}
-
-static struct config_llog_data *
-config_params_log_add(struct obd_device *obd,
- struct config_llog_instance *cfg, struct super_block *sb)
-{
- struct config_llog_instance lcfg = *cfg;
- struct config_llog_data *cld;
-
- lcfg.cfg_instance = sb;
-
- cld = do_config_log_add(obd, PARAMS_FILENAME, CONFIG_T_PARAMS,
- &lcfg, sb);
-
- return cld;
-}
-
-/** Add this log to the list of active logs watched by an MGC.
- * Active means we're watching for updates.
- * We have one active log per "mount" - client instance or servername.
- * Each instance may be at a different point in the log.
- */
-static struct config_llog_data *
-config_log_add(struct obd_device *obd, char *logname,
- struct config_llog_instance *cfg, struct super_block *sb)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct config_llog_data *cld;
- struct config_llog_data *sptlrpc_cld;
- struct config_llog_data *params_cld;
- struct config_llog_data *recover_cld = NULL;
- char seclogname[32];
- char *ptr;
- int rc;
-
- CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance);
-
- /*
- * for each regular log, the depended sptlrpc log name is
- * <fsname>-sptlrpc. multiple regular logs may share one sptlrpc log.
- */
- ptr = strrchr(logname, '-');
- if (!ptr || ptr - logname > 8) {
- CERROR("logname %s is too long\n", logname);
- return ERR_PTR(-EINVAL);
- }
-
- memcpy(seclogname, logname, ptr - logname);
- strcpy(seclogname + (ptr - logname), "-sptlrpc");
-
- sptlrpc_cld = config_log_find(seclogname, NULL);
- if (!sptlrpc_cld) {
- sptlrpc_cld = do_config_log_add(obd, seclogname,
- CONFIG_T_SPTLRPC, NULL, NULL);
- if (IS_ERR(sptlrpc_cld)) {
- CERROR("can't create sptlrpc log: %s\n", seclogname);
- rc = PTR_ERR(sptlrpc_cld);
- goto out_err;
- }
- }
- params_cld = config_params_log_add(obd, cfg, sb);
- if (IS_ERR(params_cld)) {
- rc = PTR_ERR(params_cld);
- CERROR("%s: can't create params log: rc = %d\n",
- obd->obd_name, rc);
- goto out_sptlrpc;
- }
-
- cld = do_config_log_add(obd, logname, CONFIG_T_CONFIG, cfg, sb);
- if (IS_ERR(cld)) {
- CERROR("can't create log: %s\n", logname);
- rc = PTR_ERR(cld);
- goto out_params;
- }
-
- LASSERT(lsi->lsi_lmd);
- if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) {
- ptr = strrchr(seclogname, '-');
- if (ptr) {
- *ptr = 0;
- } else {
- CERROR("%s: sptlrpc log name not correct, %s: rc = %d\n",
- obd->obd_name, seclogname, -EINVAL);
- rc = -EINVAL;
- goto out_cld;
- }
- recover_cld = config_recover_log_add(obd, seclogname, cfg, sb);
- if (IS_ERR(recover_cld)) {
- rc = PTR_ERR(recover_cld);
- goto out_cld;
- }
- }
-
- mutex_lock(&cld->cld_lock);
- cld->cld_recover = recover_cld;
- cld->cld_params = params_cld;
- cld->cld_sptlrpc = sptlrpc_cld;
- mutex_unlock(&cld->cld_lock);
-
- return cld;
-
-out_cld:
- config_log_put(cld);
-
-out_params:
- config_log_put(params_cld);
-
-out_sptlrpc:
- config_log_put(sptlrpc_cld);
-
-out_err:
- return ERR_PTR(rc);
-}
-
-static DEFINE_MUTEX(llog_process_lock);
-
-static inline void config_mark_cld_stop(struct config_llog_data *cld)
-{
- mutex_lock(&cld->cld_lock);
- spin_lock(&config_list_lock);
- cld->cld_stopping = 1;
- spin_unlock(&config_list_lock);
- mutex_unlock(&cld->cld_lock);
-}
-
-/** Stop watching for updates on this log.
- */
-static int config_log_end(char *logname, struct config_llog_instance *cfg)
-{
- struct config_llog_data *cld;
- struct config_llog_data *cld_sptlrpc = NULL;
- struct config_llog_data *cld_params = NULL;
- struct config_llog_data *cld_recover = NULL;
- int rc = 0;
-
- cld = config_log_find(logname, cfg);
- if (!cld)
- return -ENOENT;
-
- mutex_lock(&cld->cld_lock);
- /*
- * if cld_stopping is set, it means we didn't start the log thus
- * not owning the start ref. this can happen after previous umount:
- * the cld still hanging there waiting for lock cancel, and we
- * remount again but failed in the middle and call log_end without
- * calling start_log.
- */
- if (unlikely(cld->cld_stopping)) {
- mutex_unlock(&cld->cld_lock);
- /* drop the ref from the find */
- config_log_put(cld);
- return rc;
- }
-
- spin_lock(&config_list_lock);
- cld->cld_stopping = 1;
- spin_unlock(&config_list_lock);
-
- cld_recover = cld->cld_recover;
- cld->cld_recover = NULL;
-
- cld_params = cld->cld_params;
- cld->cld_params = NULL;
- cld_sptlrpc = cld->cld_sptlrpc;
- cld->cld_sptlrpc = NULL;
- mutex_unlock(&cld->cld_lock);
-
- if (cld_recover) {
- config_mark_cld_stop(cld_recover);
- config_log_put(cld_recover);
- }
-
- if (cld_params) {
- config_mark_cld_stop(cld_params);
- config_log_put(cld_params);
- }
-
- if (cld_sptlrpc)
- config_log_put(cld_sptlrpc);
-
- /* drop the ref from the find */
- config_log_put(cld);
- /* drop the start ref */
- config_log_put(cld);
-
- CDEBUG(D_MGC, "end config log %s (%d)\n", logname ? logname : "client",
- rc);
- return rc;
-}
-
-int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
-{
- struct obd_device *obd = data;
- struct obd_import *imp;
- struct obd_connect_data *ocd;
- struct config_llog_data *cld;
- int rc;
-
- rc = lprocfs_climp_check(obd);
- if (rc)
- return rc;
-
- imp = obd->u.cli.cl_import;
- ocd = &imp->imp_connect_data;
-
- seq_printf(m, "imperative_recovery: %s\n",
- OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
- seq_printf(m, "client_state:\n");
-
- spin_lock(&config_list_lock);
- list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
- if (!cld->cld_recover)
- continue;
- seq_printf(m, " - { client: %s, nidtbl_version: %u }\n",
- cld->cld_logname,
- cld->cld_recover->cld_cfg.cfg_last_idx);
- }
- spin_unlock(&config_list_lock);
-
- up_read(&obd->u.cli.cl_sem);
- return 0;
-}
-
-/* reenqueue any lost locks */
-#define RQ_RUNNING 0x1
-#define RQ_NOW 0x2
-#define RQ_LATER 0x4
-#define RQ_STOP 0x8
-#define RQ_PRECLEANUP 0x10
-static int rq_state;
-static wait_queue_head_t rq_waitq;
-static DECLARE_COMPLETION(rq_exit);
-static DECLARE_COMPLETION(rq_start);
-
-static void do_requeue(struct config_llog_data *cld)
-{
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
-
- /* Do not run mgc_process_log on a disconnected export or an
- * export which is being disconnected. Take the client
- * semaphore to make the check non-racy.
- */
- down_read_nested(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem,
- OBD_CLI_SEM_MGC);
-
- if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
- int rc;
-
- CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
- rc = mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
- if (rc && rc != -ENOENT)
- CERROR("failed processing log: %d\n", rc);
- } else {
- CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
- cld->cld_logname);
- }
- up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
-}
-
-/* this timeout represents how many seconds MGC should wait before
- * requeue config and recover lock to the MGS. We need to randomize this
- * in order to not flood the MGS.
- */
-#define MGC_TIMEOUT_MIN_SECONDS 5
-#define MGC_TIMEOUT_RAND_CENTISEC 500
-
-static int mgc_requeue_thread(void *data)
-{
- bool first = true;
-
- CDEBUG(D_MGC, "Starting requeue thread\n");
-
- /* Keep trying failed locks periodically */
- spin_lock(&config_list_lock);
- rq_state |= RQ_RUNNING;
- while (!(rq_state & RQ_STOP)) {
- struct config_llog_data *cld, *cld_prev;
- int rand = prandom_u32_max(MGC_TIMEOUT_RAND_CENTISEC);
- int to;
-
- /* Any new or requeued lostlocks will change the state */
- rq_state &= ~(RQ_NOW | RQ_LATER);
- spin_unlock(&config_list_lock);
-
- if (first) {
- first = false;
- complete(&rq_start);
- }
-
- /* Always wait a few seconds to allow the server who
- * caused the lock revocation to finish its setup, plus some
- * random so everyone doesn't try to reconnect at once.
- */
- to = msecs_to_jiffies(MGC_TIMEOUT_MIN_SECONDS * MSEC_PER_SEC);
- /* rand is centi-seconds */
- to += msecs_to_jiffies(rand * MSEC_PER_SEC / 100);
- wait_event_idle_timeout(rq_waitq,
- rq_state & (RQ_STOP | RQ_PRECLEANUP),
- to);
-
- /*
- * iterate & processing through the list. for each cld, process
- * its depending sptlrpc cld firstly (if any) and then itself.
- *
- * it's guaranteed any item in the list must have
- * reference > 0; and if cld_lostlock is set, at
- * least one reference is taken by the previous enqueue.
- */
- cld_prev = NULL;
-
- spin_lock(&config_list_lock);
- rq_state &= ~RQ_PRECLEANUP;
- list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
- if (!cld->cld_lostlock || cld->cld_stopping)
- continue;
-
- /*
- * hold reference to avoid being freed during
- * subsequent processing.
- */
- config_log_get(cld);
- cld->cld_lostlock = 0;
- spin_unlock(&config_list_lock);
-
- if (cld_prev)
- config_log_put(cld_prev);
- cld_prev = cld;
-
- if (likely(!(rq_state & RQ_STOP))) {
- do_requeue(cld);
- spin_lock(&config_list_lock);
- } else {
- spin_lock(&config_list_lock);
- break;
- }
- }
- spin_unlock(&config_list_lock);
- if (cld_prev)
- config_log_put(cld_prev);
-
- /* Wait a bit to see if anyone else needs a requeue */
- wait_event_idle(rq_waitq, rq_state & (RQ_NOW | RQ_STOP));
- spin_lock(&config_list_lock);
- }
-
- /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
- rq_state &= ~RQ_RUNNING;
- spin_unlock(&config_list_lock);
-
- complete(&rq_exit);
-
- CDEBUG(D_MGC, "Ending requeue thread\n");
- return 0;
-}
-
-/* Add a cld to the list to requeue. Start the requeue thread if needed.
- * We are responsible for dropping the config log reference from here on out.
- */
-static void mgc_requeue_add(struct config_llog_data *cld)
-{
- bool wakeup = false;
-
- CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
- cld->cld_logname, atomic_read(&cld->cld_refcount),
- cld->cld_stopping, rq_state);
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
-
- mutex_lock(&cld->cld_lock);
- spin_lock(&config_list_lock);
- if (!(rq_state & RQ_STOP) && !cld->cld_stopping && !cld->cld_lostlock) {
- cld->cld_lostlock = 1;
- rq_state |= RQ_NOW;
- wakeup = true;
- }
- spin_unlock(&config_list_lock);
- mutex_unlock(&cld->cld_lock);
- if (wakeup)
- wake_up(&rq_waitq);
-}
-
-static int mgc_llog_init(const struct lu_env *env, struct obd_device *obd)
-{
- struct llog_ctxt *ctxt;
- int rc;
-
- /* setup only remote ctxt, the local disk context is switched per each
- * filesystem during mgc_fs_setup()
- */
- rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_REPL_CTXT, obd,
- &llog_client_ops);
- if (rc)
- return rc;
-
- ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
- LASSERT(ctxt);
-
- llog_initiator_connect(ctxt);
- llog_ctxt_put(ctxt);
-
- return 0;
-}
-
-static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd)
-{
- struct llog_ctxt *ctxt;
-
- ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
- if (ctxt)
- llog_cleanup(env, ctxt);
-
- return 0;
-}
-
-static atomic_t mgc_count = ATOMIC_INIT(0);
-static int mgc_precleanup(struct obd_device *obd)
-{
- int rc = 0;
- int temp;
-
- if (atomic_dec_and_test(&mgc_count)) {
- LASSERT(rq_state & RQ_RUNNING);
- /* stop requeue thread */
- temp = RQ_STOP;
- } else {
- /* wakeup requeue thread to clean our cld */
- temp = RQ_NOW | RQ_PRECLEANUP;
- }
-
- spin_lock(&config_list_lock);
- rq_state |= temp;
- spin_unlock(&config_list_lock);
- wake_up(&rq_waitq);
-
- if (temp & RQ_STOP)
- wait_for_completion(&rq_exit);
- obd_cleanup_client_import(obd);
-
- rc = mgc_llog_fini(NULL, obd);
- if (rc)
- CERROR("failed to cleanup llogging subsystems\n");
-
- return rc;
-}
-
-static int mgc_cleanup(struct obd_device *obd)
-{
- /* COMPAT_146 - old config logs may have added profiles we don't
- * know about
- */
- if (obd->obd_type->typ_refcnt <= 1)
- /* Only for the last mgc */
- class_del_profiles();
-
- lprocfs_obd_cleanup(obd);
- ptlrpcd_decref();
-
- return client_obd_cleanup(obd);
-}
-
-static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct lprocfs_static_vars lvars = { NULL };
- struct task_struct *task;
- int rc;
-
- rc = ptlrpcd_addref();
- if (rc < 0)
- goto err_noref;
-
- rc = client_obd_setup(obd, lcfg);
- if (rc)
- goto err_decref;
-
- rc = mgc_llog_init(NULL, obd);
- if (rc) {
- CERROR("failed to setup llogging subsystems\n");
- goto err_cleanup;
- }
-
- lprocfs_mgc_init_vars(&lvars);
- lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
- sptlrpc_lprocfs_cliobd_attach(obd);
-
- if (atomic_inc_return(&mgc_count) == 1) {
- rq_state = 0;
- init_waitqueue_head(&rq_waitq);
-
- /* start requeue thread */
- task = kthread_run(mgc_requeue_thread, NULL, "ll_cfg_requeue");
- if (IS_ERR(task)) {
- rc = PTR_ERR(task);
- CERROR("%s: cannot start requeue thread: rc = %d; no more log updates\n",
- obd->obd_name, rc);
- goto err_cleanup;
- }
- /* rc is the task_struct pointer of mgc_requeue_thread. */
- rc = 0;
- wait_for_completion(&rq_start);
- }
-
- return rc;
-
-err_cleanup:
- client_obd_cleanup(obd);
-err_decref:
- ptlrpcd_decref();
-err_noref:
- return rc;
-}
-
-/* based on ll_mdc_blocking_ast */
-static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
-{
- struct lustre_handle lockh;
- struct config_llog_data *cld = data;
- int rc = 0;
-
- switch (flag) {
- case LDLM_CB_BLOCKING:
- /* mgs wants the lock, give it up... */
- LDLM_DEBUG(lock, "MGC blocking CB");
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
- break;
- case LDLM_CB_CANCELING:
- /* We've given up the lock, prepare ourselves to update. */
- LDLM_DEBUG(lock, "MGC cancel CB");
-
- CDEBUG(D_MGC, "Lock res " DLDLMRES " (%.8s)\n",
- PLDLMRES(lock->l_resource),
- (char *)&lock->l_resource->lr_name.name[0]);
-
- if (!cld) {
- CDEBUG(D_INFO, "missing data, won't requeue\n");
- break;
- }
-
- /* held at mgc_process_log(). */
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
-
- lock->l_ast_data = NULL;
- /* Are we done with this log? */
- if (cld->cld_stopping) {
- CDEBUG(D_MGC, "log %s: stopping, won't requeue\n",
- cld->cld_logname);
- config_log_put(cld);
- break;
- }
- /* Make sure not to re-enqueue when the mgc is stopping
- * (we get called from client_disconnect_export)
- */
- if (!lock->l_conn_export ||
- !lock->l_conn_export->exp_obd->u.cli.cl_conn_count) {
- CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n",
- cld->cld_logname);
- config_log_put(cld);
- break;
- }
-
- /* Re-enqueue now */
- mgc_requeue_add(cld);
- config_log_put(cld);
- break;
- default:
- LBUG();
- }
-
- return rc;
-}
-
-/* Not sure where this should go... */
-/* This is the timeout value for MGS_CONNECT request plus a ping interval, such
- * that we can have a chance to try the secondary MGS if any.
- */
-#define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \
- + PING_INTERVAL)
-#define MGC_TARGET_REG_LIMIT 10
-#define MGC_SEND_PARAM_LIMIT 10
-
-/* Send parameter to MGS*/
-static int mgc_set_mgs_param(struct obd_export *exp,
- struct mgs_send_param *msp)
-{
- struct ptlrpc_request *req;
- struct mgs_send_param *req_msp, *rep_msp;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MGS_SET_INFO, LUSTRE_MGS_VERSION,
- MGS_SET_INFO);
- if (!req)
- return -ENOMEM;
-
- req_msp = req_capsule_client_get(&req->rq_pill, &RMF_MGS_SEND_PARAM);
- if (!req_msp) {
- ptlrpc_req_finished(req);
- return -ENOMEM;
- }
-
- memcpy(req_msp, msp, sizeof(*req_msp));
- ptlrpc_request_set_replen(req);
-
- /* Limit how long we will wait for the enqueue to complete */
- req->rq_delay_limit = MGC_SEND_PARAM_LIMIT;
- rc = ptlrpc_queue_wait(req);
- if (!rc) {
- rep_msp = req_capsule_server_get(&req->rq_pill, &RMF_MGS_SEND_PARAM);
- memcpy(msp, rep_msp, sizeof(*rep_msp));
- }
-
- ptlrpc_req_finished(req);
-
- return rc;
-}
-
-/* Take a config lock so we can get cancel notifications */
-static int mgc_enqueue(struct obd_export *exp, __u32 type,
- union ldlm_policy_data *policy, __u32 mode,
- __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
- void *data, __u32 lvb_len, void *lvb_swabber,
- struct lustre_handle *lockh)
-{
- struct config_llog_data *cld = data;
- struct ldlm_enqueue_info einfo = {
- .ei_type = type,
- .ei_mode = mode,
- .ei_cb_bl = mgc_blocking_ast,
- .ei_cb_cp = ldlm_completion_ast,
- };
- struct ptlrpc_request *req;
- int short_limit = cld_is_sptlrpc(cld);
- int rc;
-
- CDEBUG(D_MGC, "Enqueue for %s (res %#llx)\n", cld->cld_logname,
- cld->cld_resid.name[0]);
-
- /* We need a callback for every lockholder, so don't try to
- * ldlm_lock_match (see rev 1.1.2.11.2.47)
- */
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION,
- LDLM_ENQUEUE);
- if (!req)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0);
- ptlrpc_request_set_replen(req);
-
- /* Limit how long we will wait for the enqueue to complete */
- req->rq_delay_limit = short_limit ? 5 : MGC_ENQUEUE_LIMIT;
- rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags,
- NULL, 0, LVB_T_NONE, lockh, 0);
- /* A failed enqueue should still call the mgc_blocking_ast,
- * where it will be requeued if needed ("grant failed").
- */
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static void mgc_notify_active(struct obd_device *unused)
-{
- /* wakeup mgc_requeue_thread to requeue mgc lock */
- spin_lock(&config_list_lock);
- rq_state |= RQ_NOW;
- spin_unlock(&config_list_lock);
- wake_up(&rq_waitq);
-
- /* TODO: Help the MGS rebuild nidtbl. -jay */
-}
-
-/* Send target_reg message to MGS */
-static int mgc_target_register(struct obd_export *exp,
- struct mgs_target_info *mti)
-{
- struct ptlrpc_request *req;
- struct mgs_target_info *req_mti, *rep_mti;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION,
- MGS_TARGET_REG);
- if (!req)
- return -ENOMEM;
-
- req_mti = req_capsule_client_get(&req->rq_pill, &RMF_MGS_TARGET_INFO);
- if (!req_mti) {
- ptlrpc_req_finished(req);
- return -ENOMEM;
- }
-
- memcpy(req_mti, mti, sizeof(*req_mti));
- ptlrpc_request_set_replen(req);
- CDEBUG(D_MGC, "register %s\n", mti->mti_svname);
- /* Limit how long we will wait for the enqueue to complete */
- req->rq_delay_limit = MGC_TARGET_REG_LIMIT;
-
- rc = ptlrpc_queue_wait(req);
- if (!rc) {
- rep_mti = req_capsule_server_get(&req->rq_pill,
- &RMF_MGS_TARGET_INFO);
- memcpy(mti, rep_mti, sizeof(*rep_mti));
- CDEBUG(D_MGC, "register %s got index = %d\n",
- mti->mti_svname, mti->mti_stripe_index);
- }
- ptlrpc_req_finished(req);
-
- return rc;
-}
-
-static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set)
-{
- int rc = -EINVAL;
-
- /* Turn off initial_recov after we try all backup servers once */
- if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
- struct obd_import *imp = class_exp2cliimp(exp);
- int value;
-
- if (vallen != sizeof(int))
- return -EINVAL;
- value = *(int *)val;
- CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n",
- imp->imp_obd->obd_name, value,
- imp->imp_deactive, imp->imp_invalid,
- imp->imp_replayable, imp->imp_obd->obd_replayable,
- ptlrpc_import_state_name(imp->imp_state));
- /* Resurrect if we previously died */
- if ((imp->imp_state != LUSTRE_IMP_FULL &&
- imp->imp_state != LUSTRE_IMP_NEW) || value > 1)
- ptlrpc_reconnect_import(imp);
- return 0;
- }
- if (KEY_IS(KEY_SET_INFO)) {
- struct mgs_send_param *msp;
-
- msp = val;
- rc = mgc_set_mgs_param(exp, msp);
- return rc;
- }
- if (KEY_IS(KEY_MGSSEC)) {
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct sptlrpc_flavor flvr;
-
- /*
- * empty string means using current flavor, if which haven't
- * been set yet, set it as null.
- *
- * if flavor has been set previously, check the asking flavor
- * must match the existing one.
- */
- if (vallen == 0) {
- if (cli->cl_flvr_mgc.sf_rpc != SPTLRPC_FLVR_INVALID)
- return 0;
- val = "null";
- vallen = 4;
- }
-
- rc = sptlrpc_parse_flavor(val, &flvr);
- if (rc) {
- CERROR("invalid sptlrpc flavor %s to MGS\n",
- (char *)val);
- return rc;
- }
-
- /*
- * caller already hold a mutex
- */
- if (cli->cl_flvr_mgc.sf_rpc == SPTLRPC_FLVR_INVALID) {
- cli->cl_flvr_mgc = flvr;
- } else if (memcmp(&cli->cl_flvr_mgc, &flvr,
- sizeof(flvr)) != 0) {
- char str[20];
-
- sptlrpc_flavor2name(&cli->cl_flvr_mgc,
- str, sizeof(str));
- LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but currently %s is in use\n",
- (char *)val, str);
- rc = -EPERM;
- }
- return rc;
- }
-
- return rc;
-}
-
-static int mgc_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val)
-{
- int rc = -EINVAL;
-
- if (KEY_IS(KEY_CONN_DATA)) {
- struct obd_import *imp = class_exp2cliimp(exp);
- struct obd_connect_data *data = val;
-
- if (*vallen == sizeof(*data)) {
- *data = imp->imp_connect_data;
- rc = 0;
- }
- }
-
- return rc;
-}
-
-static int mgc_import_event(struct obd_device *obd,
- struct obd_import *imp,
- enum obd_import_event event)
-{
- LASSERT(imp->imp_obd == obd);
- CDEBUG(D_MGC, "import event %#x\n", event);
-
- switch (event) {
- case IMP_EVENT_DISCON:
- /* MGC imports should not wait for recovery */
- if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
- ptlrpc_pinger_ir_down();
- break;
- case IMP_EVENT_INACTIVE:
- break;
- case IMP_EVENT_INVALIDATE: {
- struct ldlm_namespace *ns = obd->obd_namespace;
-
- ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
- break;
- }
- case IMP_EVENT_ACTIVE:
- CDEBUG(D_INFO, "%s: Reactivating import\n", obd->obd_name);
- /* Clearing obd_no_recov allows us to continue pinging */
- obd->obd_no_recov = 0;
- mgc_notify_active(obd);
- if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
- ptlrpc_pinger_ir_up();
- break;
- case IMP_EVENT_OCD:
- break;
- case IMP_EVENT_DEACTIVATE:
- case IMP_EVENT_ACTIVATE:
- break;
- default:
- CERROR("Unknown import event %#x\n", event);
- LBUG();
- }
- return 0;
-}
-
-enum {
- CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
- CONFIG_READ_NRPAGES = 4
-};
-
-static int mgc_apply_recover_logs(struct obd_device *mgc,
- struct config_llog_data *cld,
- __u64 max_version,
- void *data, int datalen, bool mne_swab)
-{
- struct config_llog_instance *cfg = &cld->cld_cfg;
- struct mgs_nidtbl_entry *entry;
- struct lustre_cfg *lcfg;
- struct lustre_cfg_bufs bufs;
- u64 prev_version = 0;
- char *inst;
- char *buf;
- int bufsz;
- int pos;
- int rc = 0;
- int off = 0;
-
- LASSERT(cfg->cfg_instance);
- LASSERT(cfg->cfg_sb == cfg->cfg_instance);
-
- inst = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
-
- pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
- if (pos >= PAGE_SIZE) {
- kfree(inst);
- return -E2BIG;
- }
-
- ++pos;
- buf = inst + pos;
- bufsz = PAGE_SIZE - pos;
-
- while (datalen > 0) {
- int entry_len = sizeof(*entry);
- int is_ost, i;
- struct obd_device *obd;
- char *obdname;
- char *cname;
- char *params;
- char *uuid;
- size_t len;
-
- rc = -EINVAL;
- if (datalen < sizeof(*entry))
- break;
-
- entry = (typeof(entry))(data + off);
-
- /* sanity check */
- if (entry->mne_nid_type != 0) /* only support type 0 for ipv4 */
- break;
- if (entry->mne_nid_count == 0) /* at least one nid entry */
- break;
- if (entry->mne_nid_size != sizeof(lnet_nid_t))
- break;
-
- entry_len += entry->mne_nid_count * entry->mne_nid_size;
- if (datalen < entry_len) /* must have entry_len at least */
- break;
-
- /* Keep this swab for normal mixed endian handling. LU-1644 */
- if (mne_swab)
- lustre_swab_mgs_nidtbl_entry(entry);
- if (entry->mne_length > PAGE_SIZE) {
- CERROR("MNE too large (%u)\n", entry->mne_length);
- break;
- }
-
- if (entry->mne_length < entry_len)
- break;
-
- off += entry->mne_length;
- datalen -= entry->mne_length;
- if (datalen < 0)
- break;
-
- if (entry->mne_version > max_version) {
- CERROR("entry index(%lld) is over max_index(%lld)\n",
- entry->mne_version, max_version);
- break;
- }
-
- if (prev_version >= entry->mne_version) {
- CERROR("index unsorted, prev %lld, now %lld\n",
- prev_version, entry->mne_version);
- break;
- }
- prev_version = entry->mne_version;
-
- /*
- * Write a string with format "nid::instance" to
- * lustre/<osc|mdc>/<target>-<osc|mdc>-<instance>/import.
- */
-
- is_ost = entry->mne_type == LDD_F_SV_TYPE_OST;
- memset(buf, 0, bufsz);
- obdname = buf;
- pos = 0;
-
- /* lustre-OST0001-osc-<instance #> */
- strcpy(obdname, cld->cld_logname);
- cname = strrchr(obdname, '-');
- if (!cname) {
- CERROR("mgc %s: invalid logname %s\n",
- mgc->obd_name, obdname);
- break;
- }
-
- pos = cname - obdname;
- obdname[pos] = 0;
- pos += sprintf(obdname + pos, "-%s%04x",
- is_ost ? "OST" : "MDT", entry->mne_index);
-
- cname = is_ost ? "osc" : "mdc";
- pos += sprintf(obdname + pos, "-%s-%s", cname, inst);
- lustre_cfg_bufs_reset(&bufs, obdname);
-
- /* find the obd by obdname */
- obd = class_name2obd(obdname);
- if (!obd) {
- CDEBUG(D_INFO, "mgc %s: cannot find obdname %s\n",
- mgc->obd_name, obdname);
- rc = 0;
- /* this is a safe race, when the ost is starting up...*/
- continue;
- }
-
- /* osc.import = "connection=<Conn UUID>::<target instance>" */
- ++pos;
- params = buf + pos;
- pos += sprintf(params, "%s.import=%s", cname, "connection=");
- uuid = buf + pos;
-
- down_read(&obd->u.cli.cl_sem);
- if (!obd->u.cli.cl_import) {
- /* client does not connect to the OST yet */
- up_read(&obd->u.cli.cl_sem);
- rc = 0;
- continue;
- }
-
- /* iterate all nids to find one */
- /* find uuid by nid */
- rc = -ENOENT;
- for (i = 0; i < entry->mne_nid_count; i++) {
- rc = client_import_find_conn(obd->u.cli.cl_import,
- entry->u.nids[0],
- (struct obd_uuid *)uuid);
- if (!rc)
- break;
- }
-
- up_read(&obd->u.cli.cl_sem);
- if (rc < 0) {
- CERROR("mgc: cannot find uuid by nid %s\n",
- libcfs_nid2str(entry->u.nids[0]));
- break;
- }
-
- CDEBUG(D_INFO, "Find uuid %s by nid %s\n",
- uuid, libcfs_nid2str(entry->u.nids[0]));
-
- pos += strlen(uuid);
- pos += sprintf(buf + pos, "::%u", entry->mne_instance);
- LASSERT(pos < bufsz);
-
- lustre_cfg_bufs_set_string(&bufs, 1, params);
-
- rc = -ENOMEM;
- len = lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen);
- lcfg = kzalloc(len, GFP_NOFS);
- if (!lcfg) {
- rc = -ENOMEM;
- break;
- }
- lustre_cfg_init(lcfg, LCFG_PARAM, &bufs);
-
- CDEBUG(D_INFO, "ir apply logs %lld/%lld for %s -> %s\n",
- prev_version, max_version, obdname, params);
-
- rc = class_process_config(lcfg);
- kfree(lcfg);
- if (rc)
- CDEBUG(D_INFO, "process config for %s error %d\n",
- obdname, rc);
-
- /* continue, even one with error */
- }
-
- kfree(inst);
- return rc;
-}
-
-/**
- * This function is called if this client was notified for target restarting
- * by the MGS. A CONFIG_READ RPC is going to send to fetch recovery logs.
- */
-static int mgc_process_recover_log(struct obd_device *obd,
- struct config_llog_data *cld)
-{
- struct ptlrpc_request *req = NULL;
- struct config_llog_instance *cfg = &cld->cld_cfg;
- struct mgs_config_body *body;
- struct mgs_config_res *res;
- struct ptlrpc_bulk_desc *desc;
- struct page **pages;
- int nrpages;
- bool eof = true;
- bool mne_swab;
- int i;
- int ealen;
- int rc;
-
- /* allocate buffer for bulk transfer.
- * if this is the first time for this mgs to read logs,
- * CONFIG_READ_NRPAGES_INIT will be used since it will read all logs
- * once; otherwise, it only reads increment of logs, this should be
- * small and CONFIG_READ_NRPAGES will be used.
- */
- nrpages = CONFIG_READ_NRPAGES;
- if (cfg->cfg_last_idx == 0) /* the first time */
- nrpages = CONFIG_READ_NRPAGES_INIT;
-
- pages = kcalloc(nrpages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < nrpages; i++) {
- pages[i] = alloc_page(GFP_KERNEL);
- if (!pages[i]) {
- rc = -ENOMEM;
- goto out;
- }
- }
-
-again:
- LASSERT(cld_is_recover(cld));
- LASSERT(mutex_is_locked(&cld->cld_lock));
- req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
- &RQF_MGS_CONFIG_READ);
- if (!req) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = ptlrpc_request_pack(req, LUSTRE_MGS_VERSION, MGS_CONFIG_READ);
- if (rc)
- goto out;
-
- /* pack request */
- body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
- LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname));
- if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name))
- >= sizeof(body->mcb_name)) {
- rc = -E2BIG;
- goto out;
- }
- body->mcb_offset = cfg->cfg_last_idx + 1;
- body->mcb_type = cld->cld_type;
- body->mcb_bits = PAGE_SHIFT;
- body->mcb_units = nrpages;
-
- /* allocate bulk transfer descriptor */
- desc = ptlrpc_prep_bulk_imp(req, nrpages, 1,
- PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
- MGS_BULK_PORTAL,
- &ptlrpc_bulk_kiov_pin_ops);
- if (!desc) {
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < nrpages; i++)
- desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE);
-
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES);
- if (res->mcr_size < res->mcr_offset) {
- rc = -EINVAL;
- goto out;
- }
-
- /* always update the index even though it might have errors with
- * handling the recover logs
- */
- cfg->cfg_last_idx = res->mcr_offset;
- eof = res->mcr_offset == res->mcr_size;
-
- CDEBUG(D_INFO, "Latest version %lld, more %d.\n",
- res->mcr_offset, eof == false);
-
- ealen = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, 0);
- if (ealen < 0) {
- rc = ealen;
- goto out;
- }
-
- if (ealen > nrpages << PAGE_SHIFT) {
- rc = -EINVAL;
- goto out;
- }
-
- if (ealen == 0) { /* no logs transferred */
- if (!eof)
- rc = -EINVAL;
- goto out;
- }
-
- mne_swab = !!ptlrpc_rep_need_swab(req);
-#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE
- /* This import flag means the server did an extra swab of IR MNE
- * records (fixed in LU-1252), reverse it here if needed. LU-1644
- */
- if (unlikely(req->rq_import->imp_need_mne_swab))
- mne_swab = !mne_swab;
-#endif
-
- for (i = 0; i < nrpages && ealen > 0; i++) {
- int rc2;
- void *ptr;
-
- ptr = kmap(pages[i]);
- rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
- min_t(int, ealen, PAGE_SIZE),
- mne_swab);
- kunmap(pages[i]);
- if (rc2 < 0) {
- CWARN("Process recover log %s error %d\n",
- cld->cld_logname, rc2);
- break;
- }
-
- ealen -= PAGE_SIZE;
- }
-
-out:
- if (req)
- ptlrpc_req_finished(req);
-
- if (rc == 0 && !eof)
- goto again;
-
- if (pages) {
- for (i = 0; i < nrpages; i++) {
- if (!pages[i])
- break;
- __free_page(pages[i]);
- }
- kfree(pages);
- }
- return rc;
-}
-
-/* local_only means it cannot get remote llogs */
-static int mgc_process_cfg_log(struct obd_device *mgc,
- struct config_llog_data *cld, int local_only)
-{
- struct llog_ctxt *ctxt;
- struct lustre_sb_info *lsi = NULL;
- int rc = 0;
- bool sptlrpc_started = false;
- struct lu_env *env;
-
- LASSERT(cld);
- LASSERT(mutex_is_locked(&cld->cld_lock));
-
- /*
- * local copy of sptlrpc log is controlled elsewhere, don't try to
- * read it up here.
- */
- if (cld_is_sptlrpc(cld) && local_only)
- return 0;
-
- if (cld->cld_cfg.cfg_sb)
- lsi = s2lsi(cld->cld_cfg.cfg_sb);
-
- env = kzalloc(sizeof(*env), GFP_KERNEL);
- if (!env)
- return -ENOMEM;
-
- rc = lu_env_init(env, LCT_MG_THREAD);
- if (rc)
- goto out_free;
-
- ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
- LASSERT(ctxt);
-
- if (local_only) /* no local log at client side */ {
- rc = -EIO;
- goto out_pop;
- }
-
- if (cld_is_sptlrpc(cld)) {
- sptlrpc_conf_log_update_begin(cld->cld_logname);
- sptlrpc_started = true;
- }
-
- /* logname and instance info should be the same, so use our
- * copy of the instance for the update. The cfg_last_idx will
- * be updated here.
- */
- rc = class_config_parse_llog(env, ctxt, cld->cld_logname,
- &cld->cld_cfg);
-
-out_pop:
- __llog_ctxt_put(env, ctxt);
-
- /*
- * update settings on existing OBDs. doing it inside
- * of llog_process_lock so no device is attaching/detaching
- * in parallel.
- * the logname must be <fsname>-sptlrpc
- */
- if (sptlrpc_started) {
- LASSERT(cld_is_sptlrpc(cld));
- sptlrpc_conf_log_update_end(cld->cld_logname);
- class_notify_sptlrpc_conf(cld->cld_logname,
- strlen(cld->cld_logname) -
- strlen("-sptlrpc"));
- }
-
- lu_env_fini(env);
-out_free:
- kfree(env);
- return rc;
-}
-
-static bool mgc_import_in_recovery(struct obd_import *imp)
-{
- bool in_recovery = true;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_FULL ||
- imp->imp_state == LUSTRE_IMP_CLOSED)
- in_recovery = false;
- spin_unlock(&imp->imp_lock);
-
- return in_recovery;
-}
-
-/**
- * Get a configuration log from the MGS and process it.
- *
- * This function is called for both clients and servers to process the
- * configuration log from the MGS. The MGC enqueues a DLM lock on the
- * log from the MGS, and if the lock gets revoked the MGC will be notified
- * by the lock cancellation callback that the config log has changed,
- * and will enqueue another MGS lock on it, and then continue processing
- * the new additions to the end of the log.
- *
- * Since the MGC import is not replayable, if the import is being evicted
- * (rcl == -ESHUTDOWN, \see ptlrpc_import_delay_req()), retry to process
- * the log until recovery is finished or the import is closed.
- *
- * Make a local copy of the log before parsing it if appropriate (non-MGS
- * server) so that the server can start even when the MGS is down.
- *
- * There shouldn't be multiple processes running process_log at once --
- * sounds like badness. It actually might be fine, as long as they're not
- * trying to update from the same log simultaneously, in which case we
- * should use a per-log semaphore instead of cld_lock.
- *
- * \param[in] mgc MGC device by which to fetch the configuration log
- * \param[in] cld log processing state (stored in lock callback data)
- *
- * \retval 0 on success
- * \retval negative errno on failure
- */
-int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
-{
- struct lustre_handle lockh = { 0 };
- __u64 flags = LDLM_FL_NO_LRU;
- bool retry = false;
- int rc = 0, rcl;
-
- LASSERT(cld);
-
- /* I don't want multiple processes running process_log at once --
- * sounds like badness. It actually might be fine, as long as
- * we're not trying to update from the same log
- * simultaneously (in which case we should use a per-log sem.)
- */
-restart:
- mutex_lock(&cld->cld_lock);
- if (cld->cld_stopping) {
- mutex_unlock(&cld->cld_lock);
- return 0;
- }
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20);
-
- CDEBUG(D_MGC, "Process log %s:%p from %d\n", cld->cld_logname,
- cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
-
- /* Get the cfg lock on the llog */
- rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, LDLM_PLAIN, NULL,
- LCK_CR, &flags, NULL, NULL, NULL,
- cld, 0, NULL, &lockh);
- if (rcl == 0) {
- /* Get the cld, it will be released in mgc_blocking_ast. */
- config_log_get(cld);
- rc = ldlm_lock_set_data(&lockh, (void *)cld);
- LASSERT(rc == 0);
- } else {
- CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
-
- if (rcl == -ESHUTDOWN &&
- atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) {
- struct obd_import *imp;
-
- mutex_unlock(&cld->cld_lock);
- imp = class_exp2cliimp(mgc->u.cli.cl_mgc_mgsexp);
-
- /*
- * Let's force the pinger, and wait the import to be
- * connected, note: since mgc import is non-replayable,
- * and even the import state is disconnected, it does
- * not mean the "recovery" is stopped, so we will keep
- * waitting until timeout or the import state is
- * FULL or closed
- */
- ptlrpc_pinger_force(imp);
-
- wait_event_idle_timeout(imp->imp_recovery_waitq,
- !mgc_import_in_recovery(imp),
- obd_timeout * HZ);
-
- if (imp->imp_state == LUSTRE_IMP_FULL) {
- retry = true;
- goto restart;
- } else {
- mutex_lock(&cld->cld_lock);
- spin_lock(&config_list_lock);
- cld->cld_lostlock = 1;
- spin_unlock(&config_list_lock);
- }
- } else {
- /* mark cld_lostlock so that it will requeue
- * after MGC becomes available.
- */
- spin_lock(&config_list_lock);
- cld->cld_lostlock = 1;
- spin_unlock(&config_list_lock);
- }
- }
-
- if (cld_is_recover(cld)) {
- rc = 0; /* this is not a fatal error for recover log */
- if (!rcl) {
- rc = mgc_process_recover_log(mgc, cld);
- if (rc) {
- CERROR("%s: recover log %s failed: rc = %d not fatal.\n",
- mgc->obd_name, cld->cld_logname, rc);
- rc = 0;
- spin_lock(&config_list_lock);
- cld->cld_lostlock = 1;
- spin_unlock(&config_list_lock);
- }
- }
- } else {
- rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
- }
-
- CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
- mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
-
- mutex_unlock(&cld->cld_lock);
-
- /* Now drop the lock so MGS can revoke it */
- if (!rcl)
- ldlm_lock_decref(&lockh, LCK_CR);
-
- return rc;
-}
-
-/** Called from lustre_process_log.
- * LCFG_LOG_START gets the config log from the MGS, processes it to start
- * any services, and adds it to the list logs to watch (follow).
- */
-static int mgc_process_config(struct obd_device *obd, u32 len, void *buf)
-{
- struct lustre_cfg *lcfg = buf;
- struct config_llog_instance *cfg = NULL;
- char *logname;
- int rc = 0;
-
- switch (lcfg->lcfg_command) {
- case LCFG_LOV_ADD_OBD: {
- /* Overloading this cfg command: register a new target */
- struct mgs_target_info *mti;
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) !=
- sizeof(struct mgs_target_info)) {
- rc = -EINVAL;
- goto out;
- }
-
- mti = (struct mgs_target_info *)lustre_cfg_buf(lcfg, 1);
- CDEBUG(D_MGC, "add_target %s %#x\n",
- mti->mti_svname, mti->mti_flags);
- rc = mgc_target_register(obd->u.cli.cl_mgc_mgsexp, mti);
- break;
- }
- case LCFG_LOV_DEL_OBD:
- /* Unregister has no meaning at the moment. */
- CERROR("lov_del_obd unimplemented\n");
- rc = -ENOSYS;
- break;
- case LCFG_SPTLRPC_CONF: {
- rc = sptlrpc_process_config(lcfg);
- break;
- }
- case LCFG_LOG_START: {
- struct config_llog_data *cld;
- struct super_block *sb;
-
- logname = lustre_cfg_string(lcfg, 1);
- cfg = (struct config_llog_instance *)lustre_cfg_buf(lcfg, 2);
- sb = *(struct super_block **)lustre_cfg_buf(lcfg, 3);
-
- CDEBUG(D_MGC, "parse_log %s from %d\n", logname,
- cfg->cfg_last_idx);
-
- /* We're only called through here on the initial mount */
- cld = config_log_add(obd, logname, cfg, sb);
- if (IS_ERR(cld)) {
- rc = PTR_ERR(cld);
- break;
- }
-
- /* COMPAT_146 */
- /* FIXME only set this for old logs! Right now this forces
- * us to always skip the "inside markers" check
- */
- cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146;
-
- rc = mgc_process_log(obd, cld);
- if (rc == 0 && cld->cld_recover) {
- if (OCD_HAS_FLAG(&obd->u.cli.cl_import->
- imp_connect_data, IMP_RECOV)) {
- rc = mgc_process_log(obd, cld->cld_recover);
- } else {
- struct config_llog_data *cir;
-
- mutex_lock(&cld->cld_lock);
- cir = cld->cld_recover;
- cld->cld_recover = NULL;
- mutex_unlock(&cld->cld_lock);
- config_log_put(cir);
- }
-
- if (rc)
- CERROR("Cannot process recover llog %d\n", rc);
- }
-
- if (rc == 0 && cld->cld_params) {
- rc = mgc_process_log(obd, cld->cld_params);
- if (rc == -ENOENT) {
- CDEBUG(D_MGC,
- "There is no params config file yet\n");
- rc = 0;
- }
- /* params log is optional */
- if (rc)
- CERROR(
- "%s: can't process params llog: rc = %d\n",
- obd->obd_name, rc);
- }
-
- break;
- }
- case LCFG_LOG_END: {
- logname = lustre_cfg_string(lcfg, 1);
-
- if (lcfg->lcfg_bufcount >= 2)
- cfg = (struct config_llog_instance *)lustre_cfg_buf(
- lcfg, 2);
- rc = config_log_end(logname, cfg);
- break;
- }
- default: {
- CERROR("Unknown command: %d\n", lcfg->lcfg_command);
- rc = -EINVAL;
- goto out;
- }
- }
-out:
- return rc;
-}
-
-static struct obd_ops mgc_obd_ops = {
- .owner = THIS_MODULE,
- .setup = mgc_setup,
- .precleanup = mgc_precleanup,
- .cleanup = mgc_cleanup,
- .add_conn = client_import_add_conn,
- .del_conn = client_import_del_conn,
- .connect = client_connect_import,
- .disconnect = client_disconnect_export,
- .set_info_async = mgc_set_info_async,
- .get_info = mgc_get_info,
- .import_event = mgc_import_event,
- .process_config = mgc_process_config,
-};
-
-static int __init mgc_init(void)
-{
- return class_register_type(&mgc_obd_ops, NULL,
- LUSTRE_MGC_NAME, NULL);
-}
-
-static void /*__exit*/ mgc_exit(void)
-{
- class_unregister_type(LUSTRE_MGC_NAME);
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Management Client");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(mgc_init);
-module_exit(mgc_exit);
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
deleted file mode 100644
index e3fa9acff4c4..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LUSTRE_FS) += obdclass.o
-
-obdclass-y := linux/linux-module.o linux/linux-sysctl.o \
- llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \
- genops.o uuid.o lprocfs_status.o lprocfs_counters.o \
- lustre_handles.o lustre_peer.o statfs_pack.o linkea.o \
- obdo.o obd_config.o obd_mount.o lu_object.o lu_ref.o \
- cl_object.o cl_page.o cl_lock.o cl_io.o kernelcomm.o
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
deleted file mode 100644
index a0db830ca841..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Internal cl interfaces.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-#ifndef _CL_INTERNAL_H
-#define _CL_INTERNAL_H
-
-#define CLT_PVEC_SIZE (14)
-
-/**
- * Possible levels of the nesting. Currently this is 2: there are "top"
- * entities (files, extent locks), and "sub" entities (stripes and stripe
- * locks). This is used only for debugging counters right now.
- */
-enum clt_nesting_level {
- CNL_TOP,
- CNL_SUB,
- CNL_NR
-};
-
-/**
- * Thread local state internal for generic cl-code.
- */
-struct cl_thread_info {
- /*
- * Common fields.
- */
- struct cl_io clt_io;
- struct cl_2queue clt_queue;
-
- /*
- * Fields used by cl_lock.c
- */
- struct cl_lock_descr clt_descr;
- struct cl_page_list clt_list;
- /** @} debugging */
-
- /*
- * Fields used by cl_page.c
- */
- struct cl_page *clt_pvec[CLT_PVEC_SIZE];
-
- /*
- * Fields used by cl_io.c
- */
- /**
- * Pointer to the topmost ongoing IO in this thread.
- */
- struct cl_io *clt_current_io;
- /**
- * Used for submitting a sync io.
- */
- struct cl_sync_io clt_anchor;
- /**
- * Fields used by cl_lock_discard_pages().
- */
- pgoff_t clt_next_index;
- pgoff_t clt_fn_index; /* first non-overlapped index */
-};
-
-struct cl_thread_info *cl_env_info(const struct lu_env *env);
-
-#endif /* _CL_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
deleted file mode 100644
index ab84e011b560..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ /dev/null
@@ -1,1152 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Client IO.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@intel.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lustre_fid.h>
-#include <linux/list.h>
-#include <linux/sched.h>
-#include <cl_object.h>
-#include "cl_internal.h"
-
-/*****************************************************************************
- *
- * cl_io interface.
- *
- */
-
-#define cl_io_for_each(slice, io) \
- list_for_each_entry((slice), &io->ci_layers, cis_linkage)
-#define cl_io_for_each_reverse(slice, io) \
- list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
-
-static inline int cl_io_type_is_valid(enum cl_io_type type)
-{
- return CIT_READ <= type && type < CIT_OP_NR;
-}
-
-static inline int cl_io_is_loopable(const struct cl_io *io)
-{
- return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
-}
-
-/**
- * Returns true iff there is an IO ongoing in the given environment.
- */
-int cl_io_is_going(const struct lu_env *env)
-{
- return cl_env_info(env)->clt_current_io != NULL;
-}
-
-/**
- * cl_io invariant that holds at all times when exported cl_io_*() functions
- * are entered and left.
- */
-static int cl_io_invariant(const struct cl_io *io)
-{
- struct cl_io *up;
-
- up = io->ci_parent;
- return
- /*
- * io can own pages only when it is ongoing. Sub-io might
- * still be in CIS_LOCKED state when top-io is in
- * CIS_IO_GOING.
- */
- ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
- (io->ci_state == CIS_LOCKED && up));
-}
-
-/**
- * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
- */
-void cl_io_fini(const struct lu_env *env, struct cl_io *io)
-{
- struct cl_io_slice *slice;
- struct cl_thread_info *info;
-
- LINVRNT(cl_io_type_is_valid(io->ci_type));
- LINVRNT(cl_io_invariant(io));
-
- while (!list_empty(&io->ci_layers)) {
- slice = container_of(io->ci_layers.prev, struct cl_io_slice,
- cis_linkage);
- list_del_init(&slice->cis_linkage);
- if (slice->cis_iop->op[io->ci_type].cio_fini)
- slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
- /*
- * Invalidate slice to catch use after free. This assumes that
- * slices are allocated within session and can be touched
- * after ->cio_fini() returns.
- */
- slice->cis_io = NULL;
- }
- io->ci_state = CIS_FINI;
- info = cl_env_info(env);
- if (info->clt_current_io == io)
- info->clt_current_io = NULL;
-
- /* sanity check for layout change */
- switch (io->ci_type) {
- case CIT_READ:
- case CIT_WRITE:
- case CIT_DATA_VERSION:
- break;
- case CIT_FAULT:
- break;
- case CIT_FSYNC:
- LASSERT(!io->ci_need_restart);
- break;
- case CIT_SETATTR:
- case CIT_MISC:
- /* Check ignore layout change conf */
- LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
- !io->ci_need_restart));
- break;
- default:
- LBUG();
- }
-}
-EXPORT_SYMBOL(cl_io_fini);
-
-static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj)
-{
- struct cl_object *scan;
- int result;
-
- LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
- LINVRNT(cl_io_type_is_valid(iot));
- LINVRNT(cl_io_invariant(io));
-
- io->ci_type = iot;
- INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
- INIT_LIST_HEAD(&io->ci_lockset.cls_done);
- INIT_LIST_HEAD(&io->ci_layers);
-
- result = 0;
- cl_object_for_each(scan, obj) {
- if (scan->co_ops->coo_io_init) {
- result = scan->co_ops->coo_io_init(env, scan, io);
- if (result != 0)
- break;
- }
- }
- if (result == 0)
- io->ci_state = CIS_INIT;
- return result;
-}
-
-/**
- * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
- *
- * \pre obj != cl_object_top(obj)
- */
-int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj)
-{
- struct cl_thread_info *info = cl_env_info(env);
-
- LASSERT(obj != cl_object_top(obj));
- if (!info->clt_current_io)
- info->clt_current_io = io;
- return cl_io_init0(env, io, iot, obj);
-}
-EXPORT_SYMBOL(cl_io_sub_init);
-
-/**
- * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
- *
- * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
- * what the latter returned.
- *
- * \pre obj == cl_object_top(obj)
- * \pre cl_io_type_is_valid(iot)
- * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
- */
-int cl_io_init(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj)
-{
- struct cl_thread_info *info = cl_env_info(env);
-
- LASSERT(obj == cl_object_top(obj));
- LASSERT(!info->clt_current_io);
-
- info->clt_current_io = io;
- return cl_io_init0(env, io, iot, obj);
-}
-EXPORT_SYMBOL(cl_io_init);
-
-/**
- * Initialize read or write io.
- *
- * \pre iot == CIT_READ || iot == CIT_WRITE
- */
-int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, loff_t pos, size_t count)
-{
- LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
- LINVRNT(io->ci_obj);
-
- LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
- "io range: %u [%llu, %llu) %u %u\n",
- iot, (__u64)pos, (__u64)pos + count,
- io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
- io->u.ci_rw.crw_pos = pos;
- io->u.ci_rw.crw_count = count;
- return cl_io_init(env, io, iot, io->ci_obj);
-}
-EXPORT_SYMBOL(cl_io_rw_init);
-
-static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
- lu_object_fid(&d1->cld_obj->co_lu));
-}
-
-/*
- * Sort locks in lexicographical order of their (fid, start-offset) pairs.
- */
-static void cl_io_locks_sort(struct cl_io *io)
-{
- int done = 0;
-
- /* hidden treasure: bubble sort for now. */
- do {
- struct cl_io_lock_link *curr;
- struct cl_io_lock_link *prev;
- struct cl_io_lock_link *temp;
-
- done = 1;
- prev = NULL;
-
- list_for_each_entry_safe(curr, temp,
- &io->ci_lockset.cls_todo,
- cill_linkage) {
- if (prev) {
- switch (cl_lock_descr_sort(&prev->cill_descr,
- &curr->cill_descr)) {
- case 0:
- /*
- * IMPOSSIBLE: Identical locks are
- * already removed at
- * this point.
- */
- default:
- LBUG();
- case 1:
- list_move_tail(&curr->cill_linkage,
- &prev->cill_linkage);
- done = 0;
- continue; /* don't change prev: it's
- * still "previous"
- */
- case -1: /* already in order */
- break;
- }
- }
- prev = curr;
- }
- } while (!done);
-}
-
-static void cl_lock_descr_merge(struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- d0->cld_start = min(d0->cld_start, d1->cld_start);
- d0->cld_end = max(d0->cld_end, d1->cld_end);
-
- if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
- d0->cld_mode = CLM_WRITE;
-
- if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
- d0->cld_mode = CLM_GROUP;
-}
-
-static int cl_lockset_merge(const struct cl_lockset *set,
- const struct cl_lock_descr *need)
-{
- struct cl_io_lock_link *scan;
-
- list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
- if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
- continue;
-
- /* Merge locks for the same object because ldlm lock server
- * may expand the lock extent, otherwise there is a deadlock
- * case if two conflicted locks are queueud for the same object
- * and lock server expands one lock to overlap the another.
- * The side effect is that it can generate a multi-stripe lock
- * that may cause casacading problem
- */
- cl_lock_descr_merge(&scan->cill_descr, need);
- CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
- scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
- scan->cill_descr.cld_end);
- return 1;
- }
- return 0;
-}
-
-static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_lockset *set)
-{
- struct cl_io_lock_link *link;
- struct cl_io_lock_link *temp;
- int result;
-
- result = 0;
- list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
- result = cl_lock_request(env, io, &link->cill_lock);
- if (result < 0)
- break;
-
- list_move(&link->cill_linkage, &set->cls_done);
- }
- return result;
-}
-
-/**
- * Takes locks necessary for the current iteration of io.
- *
- * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
- * by layers for the current iteration. Then sort locks (to avoid dead-locks),
- * and acquire them.
- */
-int cl_io_lock(const struct lu_env *env, struct cl_io *io)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(io->ci_state == CIS_IT_STARTED);
- LINVRNT(cl_io_invariant(io));
-
- cl_io_for_each(scan, io) {
- if (!scan->cis_iop->op[io->ci_type].cio_lock)
- continue;
- result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
- if (result != 0)
- break;
- }
- if (result == 0) {
- cl_io_locks_sort(io);
- result = cl_lockset_lock(env, io, &io->ci_lockset);
- }
- if (result != 0)
- cl_io_unlock(env, io);
- else
- io->ci_state = CIS_LOCKED;
- return result;
-}
-EXPORT_SYMBOL(cl_io_lock);
-
-/**
- * Release locks takes by io.
- */
-void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
-{
- struct cl_lockset *set;
- struct cl_io_lock_link *link;
- struct cl_io_lock_link *temp;
- const struct cl_io_slice *scan;
-
- LASSERT(cl_io_is_loopable(io));
- LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
- LINVRNT(cl_io_invariant(io));
-
- set = &io->ci_lockset;
-
- list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
- list_del_init(&link->cill_linkage);
- if (link->cill_fini)
- link->cill_fini(env, link);
- }
-
- list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
- list_del_init(&link->cill_linkage);
- cl_lock_release(env, &link->cill_lock);
- if (link->cill_fini)
- link->cill_fini(env, link);
- }
-
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_unlock)
- scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
- }
- io->ci_state = CIS_UNLOCKED;
-}
-EXPORT_SYMBOL(cl_io_unlock);
-
-/**
- * Prepares next iteration of io.
- *
- * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
- * layers a chance to modify io parameters, e.g., so that lov can restrict io
- * to a single stripe.
- */
-int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
-{
- const struct cl_io_slice *scan;
- int result;
-
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
- LINVRNT(cl_io_invariant(io));
-
- result = 0;
- cl_io_for_each(scan, io) {
- if (!scan->cis_iop->op[io->ci_type].cio_iter_init)
- continue;
- result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
- scan);
- if (result != 0)
- break;
- }
- if (result == 0)
- io->ci_state = CIS_IT_STARTED;
- return result;
-}
-EXPORT_SYMBOL(cl_io_iter_init);
-
-/**
- * Finalizes io iteration.
- *
- * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
- */
-void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
-{
- const struct cl_io_slice *scan;
-
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(io->ci_state == CIS_UNLOCKED);
- LINVRNT(cl_io_invariant(io));
-
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_iter_fini)
- scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
- }
- io->ci_state = CIS_IT_ENDED;
-}
-EXPORT_SYMBOL(cl_io_iter_fini);
-
-/**
- * Records that read or write io progressed \a nob bytes forward.
- */
-static void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io,
- size_t nob)
-{
- const struct cl_io_slice *scan;
-
- LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
- nob == 0);
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(cl_io_invariant(io));
-
- io->u.ci_rw.crw_pos += nob;
- io->u.ci_rw.crw_count -= nob;
-
- /* layers have to be notified. */
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_advance)
- scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
- nob);
- }
-}
-
-/**
- * Adds a lock to a lockset.
- */
-int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link)
-{
- int result;
-
- if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) {
- result = 1;
- } else {
- list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
- result = 0;
- }
- return result;
-}
-EXPORT_SYMBOL(cl_io_lock_add);
-
-static void cl_free_io_lock_link(const struct lu_env *env,
- struct cl_io_lock_link *link)
-{
- kfree(link);
-}
-
-/**
- * Allocates new lock link, and uses it to add a lock to a lockset.
- */
-int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
- struct cl_lock_descr *descr)
-{
- struct cl_io_lock_link *link;
- int result;
-
- link = kzalloc(sizeof(*link), GFP_NOFS);
- if (link) {
- link->cill_descr = *descr;
- link->cill_fini = cl_free_io_lock_link;
- result = cl_io_lock_add(env, io, link);
- if (result) /* lock match */
- link->cill_fini(env, link);
- } else {
- result = -ENOMEM;
- }
-
- return result;
-}
-EXPORT_SYMBOL(cl_io_lock_alloc_add);
-
-/**
- * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
- */
-int cl_io_start(const struct lu_env *env, struct cl_io *io)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
-
- io->ci_state = CIS_IO_GOING;
- cl_io_for_each(scan, io) {
- if (!scan->cis_iop->op[io->ci_type].cio_start)
- continue;
- result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
- if (result != 0)
- break;
- }
- if (result >= 0)
- result = 0;
- return result;
-}
-EXPORT_SYMBOL(cl_io_start);
-
-/**
- * Wait until current io iteration is finished by calling
- * cl_io_operations::cio_end() bottom-to-top.
- */
-void cl_io_end(const struct lu_env *env, struct cl_io *io)
-{
- const struct cl_io_slice *scan;
-
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(io->ci_state == CIS_IO_GOING);
- LINVRNT(cl_io_invariant(io));
-
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_end)
- scan->cis_iop->op[io->ci_type].cio_end(env, scan);
- /* TODO: error handling. */
- }
- io->ci_state = CIS_IO_FINISHED;
-}
-EXPORT_SYMBOL(cl_io_end);
-
-/**
- * Called by read io, to decide the readahead extent
- *
- * \see cl_io_operations::cio_read_ahead()
- */
-int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
- pgoff_t start, struct cl_read_ahead *ra)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
- LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
-
- cl_io_for_each(scan, io) {
- if (!scan->cis_iop->cio_read_ahead)
- continue;
-
- result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
- if (result)
- break;
- }
- return result > 0 ? 0 : result;
-}
-EXPORT_SYMBOL(cl_io_read_ahead);
-
-/**
- * Commit a list of contiguous pages into writeback cache.
- *
- * \returns 0 if all pages committed, or errcode if error occurred.
- * \see cl_io_operations::cio_commit_async()
- */
-int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, int from, int to,
- cl_commit_cbt cb)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- cl_io_for_each(scan, io) {
- if (!scan->cis_iop->cio_commit_async)
- continue;
- result = scan->cis_iop->cio_commit_async(env, scan, queue,
- from, to, cb);
- if (result != 0)
- break;
- }
- return result;
-}
-EXPORT_SYMBOL(cl_io_commit_async);
-
-/**
- * Submits a list of pages for immediate io.
- *
- * After the function gets returned, The submitted pages are moved to
- * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
- * to be submitted, and the pages are errant to submit.
- *
- * \returns 0 if at least one page was submitted, error code otherwise.
- * \see cl_io_operations::cio_submit()
- */
-int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type crt, struct cl_2queue *queue)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- cl_io_for_each(scan, io) {
- if (!scan->cis_iop->cio_submit)
- continue;
- result = scan->cis_iop->cio_submit(env, scan, crt, queue);
- if (result != 0)
- break;
- }
- /*
- * If ->cio_submit() failed, no pages were sent.
- */
- LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
- return result;
-}
-EXPORT_SYMBOL(cl_io_submit_rw);
-
-static void cl_page_list_assume(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
-
-/**
- * Submit a sync_io and wait for the IO to be finished, or error happens.
- * If \a timeout is zero, it means to wait for the IO unconditionally.
- */
-int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue,
- long timeout)
-{
- struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
- struct cl_page *pg;
- int rc;
-
- cl_page_list_for_each(pg, &queue->c2_qin) {
- LASSERT(!pg->cp_sync_io);
- pg->cp_sync_io = anchor;
- }
-
- cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
- rc = cl_io_submit_rw(env, io, iot, queue);
- if (rc == 0) {
- /*
- * If some pages weren't sent for any reason (e.g.,
- * read found up-to-date pages in the cache, or write found
- * clean pages), count them as completed to avoid infinite
- * wait.
- */
- cl_page_list_for_each(pg, &queue->c2_qin) {
- pg->cp_sync_io = NULL;
- cl_sync_io_note(env, anchor, 1);
- }
-
- /* wait for the IO to be finished. */
- rc = cl_sync_io_wait(env, anchor, timeout);
- cl_page_list_assume(env, io, &queue->c2_qout);
- } else {
- LASSERT(list_empty(&queue->c2_qout.pl_pages));
- cl_page_list_for_each(pg, &queue->c2_qin)
- pg->cp_sync_io = NULL;
- }
- return rc;
-}
-EXPORT_SYMBOL(cl_io_submit_sync);
-
-/**
- * Main io loop.
- *
- * Pumps io through iterations calling
- *
- * - cl_io_iter_init()
- *
- * - cl_io_lock()
- *
- * - cl_io_start()
- *
- * - cl_io_end()
- *
- * - cl_io_unlock()
- *
- * - cl_io_iter_fini()
- *
- * repeatedly until there is no more io to do.
- */
-int cl_io_loop(const struct lu_env *env, struct cl_io *io)
-{
- int result = 0;
-
- LINVRNT(cl_io_is_loopable(io));
-
- do {
- size_t nob;
-
- io->ci_continue = 0;
- result = cl_io_iter_init(env, io);
- if (result == 0) {
- nob = io->ci_nob;
- result = cl_io_lock(env, io);
- if (result == 0) {
- /*
- * Notify layers that locks has been taken,
- * and do actual i/o.
- *
- * - llite: kms, short read;
- * - llite: generic_file_read();
- */
- result = cl_io_start(env, io);
- /*
- * Send any remaining pending
- * io, etc.
- *
- * - llite: ll_rw_stats_tally.
- */
- cl_io_end(env, io);
- cl_io_unlock(env, io);
- cl_io_rw_advance(env, io, io->ci_nob - nob);
- }
- }
- cl_io_iter_fini(env, io);
- } while (result == 0 && io->ci_continue);
- if (result == 0)
- result = io->ci_result;
- return result < 0 ? result : 0;
-}
-EXPORT_SYMBOL(cl_io_loop);
-
-/**
- * Adds io slice to the cl_io.
- *
- * This is called by cl_object_operations::coo_io_init() methods to add a
- * per-layer state to the io. New state is added at the end of
- * cl_io::ci_layers list, that is, it is at the bottom of the stack.
- *
- * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
- */
-void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
- struct cl_object *obj,
- const struct cl_io_operations *ops)
-{
- struct list_head *linkage = &slice->cis_linkage;
-
- LASSERT((!linkage->prev && !linkage->next) ||
- list_empty(linkage));
-
- list_add_tail(linkage, &io->ci_layers);
- slice->cis_io = io;
- slice->cis_obj = obj;
- slice->cis_iop = ops;
-}
-EXPORT_SYMBOL(cl_io_slice_add);
-
-/**
- * Initializes page list.
- */
-void cl_page_list_init(struct cl_page_list *plist)
-{
- plist->pl_nr = 0;
- INIT_LIST_HEAD(&plist->pl_pages);
- plist->pl_owner = current;
-}
-EXPORT_SYMBOL(cl_page_list_init);
-
-/**
- * Adds a page to a page list.
- */
-void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
-{
- /* it would be better to check that page is owned by "current" io, but
- * it is not passed here.
- */
- LASSERT(page->cp_owner);
- LINVRNT(plist->pl_owner == current);
-
- LASSERT(list_empty(&page->cp_batch));
- list_add_tail(&page->cp_batch, &plist->pl_pages);
- ++plist->pl_nr;
- lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
- cl_page_get(page);
-}
-EXPORT_SYMBOL(cl_page_list_add);
-
-/**
- * Removes a page from a page list.
- */
-void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
- struct cl_page *page)
-{
- LASSERT(plist->pl_nr > 0);
- LASSERT(cl_page_is_vmlocked(env, page));
- LINVRNT(plist->pl_owner == current);
-
- list_del_init(&page->cp_batch);
- --plist->pl_nr;
- lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
- cl_page_put(env, page);
-}
-EXPORT_SYMBOL(cl_page_list_del);
-
-/**
- * Moves a page from one page list to another.
- */
-void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page)
-{
- LASSERT(src->pl_nr > 0);
- LINVRNT(dst->pl_owner == current);
- LINVRNT(src->pl_owner == current);
-
- list_move_tail(&page->cp_batch, &dst->pl_pages);
- --src->pl_nr;
- ++dst->pl_nr;
- lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
- src, dst);
-}
-EXPORT_SYMBOL(cl_page_list_move);
-
-/**
- * Moves a page from one page list to the head of another list.
- */
-void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page)
-{
- LASSERT(src->pl_nr > 0);
- LINVRNT(dst->pl_owner == current);
- LINVRNT(src->pl_owner == current);
-
- list_move(&page->cp_batch, &dst->pl_pages);
- --src->pl_nr;
- ++dst->pl_nr;
- lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
- src, dst);
-}
-EXPORT_SYMBOL(cl_page_list_move_head);
-
-/**
- * splice the cl_page_list, just as list head does
- */
-void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
-{
- struct cl_page *page;
- struct cl_page *tmp;
-
- LINVRNT(list->pl_owner == current);
- LINVRNT(head->pl_owner == current);
-
- cl_page_list_for_each_safe(page, tmp, list)
- cl_page_list_move(head, list, page);
-}
-EXPORT_SYMBOL(cl_page_list_splice);
-
-
-/**
- * Disowns pages in a queue.
- */
-void cl_page_list_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist)
-{
- struct cl_page *page;
- struct cl_page *temp;
-
- LINVRNT(plist->pl_owner == current);
-
- cl_page_list_for_each_safe(page, temp, plist) {
- LASSERT(plist->pl_nr > 0);
-
- list_del_init(&page->cp_batch);
- --plist->pl_nr;
- /*
- * cl_page_disown0 rather than usual cl_page_disown() is used,
- * because pages are possibly in CPS_FREEING state already due
- * to the call to cl_page_list_discard().
- */
- /*
- * XXX cl_page_disown0() will fail if page is not locked.
- */
- cl_page_disown0(env, io, page);
- lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
- plist);
- cl_page_put(env, page);
- }
-}
-EXPORT_SYMBOL(cl_page_list_disown);
-
-/**
- * Releases pages from queue.
- */
-void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
-{
- struct cl_page *page;
- struct cl_page *temp;
-
- LINVRNT(plist->pl_owner == current);
-
- cl_page_list_for_each_safe(page, temp, plist)
- cl_page_list_del(env, plist, page);
- LASSERT(plist->pl_nr == 0);
-}
-EXPORT_SYMBOL(cl_page_list_fini);
-
-/**
- * Assumes all pages in a queue.
- */
-static void cl_page_list_assume(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist)
-{
- struct cl_page *page;
-
- LINVRNT(plist->pl_owner == current);
-
- cl_page_list_for_each(page, plist)
- cl_page_assume(env, io, page);
-}
-
-/**
- * Discards all pages in a queue.
- */
-static void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *plist)
-{
- struct cl_page *page;
-
- LINVRNT(plist->pl_owner == current);
- cl_page_list_for_each(page, plist)
- cl_page_discard(env, io, page);
-}
-
-/**
- * Initialize dual page queue.
- */
-void cl_2queue_init(struct cl_2queue *queue)
-{
- cl_page_list_init(&queue->c2_qin);
- cl_page_list_init(&queue->c2_qout);
-}
-EXPORT_SYMBOL(cl_2queue_init);
-
-/**
- * Disown pages in both lists of a 2-queue.
- */
-void cl_2queue_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue)
-{
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_page_list_disown(env, io, &queue->c2_qout);
-}
-EXPORT_SYMBOL(cl_2queue_disown);
-
-/**
- * Discard (truncate) pages in both lists of a 2-queue.
- */
-void cl_2queue_discard(const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue)
-{
- cl_page_list_discard(env, io, &queue->c2_qin);
- cl_page_list_discard(env, io, &queue->c2_qout);
-}
-EXPORT_SYMBOL(cl_2queue_discard);
-
-/**
- * Finalize both page lists of a 2-queue.
- */
-void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
-{
- cl_page_list_fini(env, &queue->c2_qout);
- cl_page_list_fini(env, &queue->c2_qin);
-}
-EXPORT_SYMBOL(cl_2queue_fini);
-
-/**
- * Initialize a 2-queue to contain \a page in its incoming page list.
- */
-void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
-{
- cl_2queue_init(queue);
- /*
- * Add a page to the incoming page list of 2-queue.
- */
- cl_page_list_add(&queue->c2_qin, page);
-}
-EXPORT_SYMBOL(cl_2queue_init_page);
-
-/**
- * Returns top-level io.
- *
- * \see cl_object_top()
- */
-struct cl_io *cl_io_top(struct cl_io *io)
-{
- while (io->ci_parent)
- io = io->ci_parent;
- return io;
-}
-EXPORT_SYMBOL(cl_io_top);
-
-/**
- * Fills in attributes that are passed to server together with transfer. Only
- * attributes from \a flags may be touched. This can be called multiple times
- * for the same request.
- */
-void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
- struct cl_req_attr *attr)
-{
- struct cl_object *scan;
-
- cl_object_for_each(scan, obj) {
- if (scan->co_ops->coo_req_attr_set)
- scan->co_ops->coo_req_attr_set(env, scan, attr);
- }
-}
-EXPORT_SYMBOL(cl_req_attr_set);
-
-/* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
- * wait for the IO to finish.
- */
-void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
-{
- wake_up_all(&anchor->csi_waitq);
-
- /* it's safe to nuke or reuse anchor now */
- atomic_set(&anchor->csi_barrier, 0);
-}
-EXPORT_SYMBOL(cl_sync_io_end);
-
-/**
- * Initialize synchronous io wait anchor
- */
-void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
- void (*end)(const struct lu_env *, struct cl_sync_io *))
-{
- memset(anchor, 0, sizeof(*anchor));
- init_waitqueue_head(&anchor->csi_waitq);
- atomic_set(&anchor->csi_sync_nr, nr);
- atomic_set(&anchor->csi_barrier, nr > 0);
- anchor->csi_sync_rc = 0;
- anchor->csi_end_io = end;
- LASSERT(end);
-}
-EXPORT_SYMBOL(cl_sync_io_init);
-
-/**
- * Wait until all IO completes. Transfer completion routine has to call
- * cl_sync_io_note() for every entity.
- */
-int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
- long timeout)
-{
- int rc = 1;
-
- LASSERT(timeout >= 0);
-
- if (timeout == 0)
- wait_event_idle(anchor->csi_waitq,
- atomic_read(&anchor->csi_sync_nr) == 0);
- else
- rc = wait_event_idle_timeout(anchor->csi_waitq,
- atomic_read(&anchor->csi_sync_nr) == 0,
- timeout * HZ);
- if (rc == 0) {
- rc = -ETIMEDOUT;
- CERROR("IO failed: %d, still wait for %d remaining entries\n",
- rc, atomic_read(&anchor->csi_sync_nr));
-
- wait_event_idle(anchor->csi_waitq,
- atomic_read(&anchor->csi_sync_nr) == 0);
- } else {
- rc = anchor->csi_sync_rc;
- }
- LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
-
- /* wait until cl_sync_io_note() has done wakeup */
- while (unlikely(atomic_read(&anchor->csi_barrier) != 0))
- cpu_relax();
-
-
- return rc;
-}
-EXPORT_SYMBOL(cl_sync_io_wait);
-
-/**
- * Indicate that transfer of a single page completed.
- */
-void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
- int ioret)
-{
- if (anchor->csi_sync_rc == 0 && ioret < 0)
- anchor->csi_sync_rc = ioret;
- /*
- * Synchronous IO done without releasing page lock (e.g., as a part of
- * ->{prepare,commit}_write(). Completion is used to signal the end of
- * IO.
- */
- LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
- if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
- LASSERT(anchor->csi_end_io);
- anchor->csi_end_io(env, anchor);
- /* Can't access anchor any more */
- }
-}
-EXPORT_SYMBOL(cl_sync_io_note);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
deleted file mode 100644
index 9ca29a26a38b..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ /dev/null
@@ -1,275 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Client Extent Lock.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@intel.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lustre_fid.h>
-#include <linux/list.h>
-#include <cl_object.h>
-#include "cl_internal.h"
-
-static void cl_lock_trace0(int level, const struct lu_env *env,
- const char *prefix, const struct cl_lock *lock,
- const char *func, const int line)
-{
- struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
-
- CDEBUG(level, "%s: %p (%p/%d) at %s():%d\n",
- prefix, lock, env, h->coh_nesting, func, line);
-}
-#define cl_lock_trace(level, env, prefix, lock) \
- cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
-
-/**
- * Adds lock slice to the compound lock.
- *
- * This is called by cl_object_operations::coo_lock_init() methods to add a
- * per-layer state to the lock. New state is added at the end of
- * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
- *
- * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
- */
-void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
- struct cl_object *obj,
- const struct cl_lock_operations *ops)
-{
- slice->cls_lock = lock;
- list_add_tail(&slice->cls_linkage, &lock->cll_layers);
- slice->cls_obj = obj;
- slice->cls_ops = ops;
-}
-EXPORT_SYMBOL(cl_lock_slice_add);
-
-void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_lock_slice *slice;
- cl_lock_trace(D_DLMTRACE, env, "destroy lock", lock);
-
- while ((slice = list_first_entry_or_null(&lock->cll_layers,
- struct cl_lock_slice,
- cls_linkage)) != NULL) {
- list_del_init(lock->cll_layers.next);
- slice->cls_ops->clo_fini(env, slice);
- }
- POISON(lock, 0x5a, sizeof(*lock));
-}
-EXPORT_SYMBOL(cl_lock_fini);
-
-int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
- const struct cl_io *io)
-{
- struct cl_object *obj = lock->cll_descr.cld_obj;
- struct cl_object *scan;
- int result = 0;
-
- /* Make sure cl_lock::cll_descr is initialized. */
- LASSERT(obj);
-
- INIT_LIST_HEAD(&lock->cll_layers);
- list_for_each_entry(scan, &obj->co_lu.lo_header->loh_layers,
- co_lu.lo_linkage) {
- result = scan->co_ops->coo_lock_init(env, scan, lock, io);
- if (result != 0) {
- cl_lock_fini(env, lock);
- break;
- }
- }
-
- return result;
-}
-EXPORT_SYMBOL(cl_lock_init);
-
-/**
- * Returns a slice with a lock, corresponding to the given layer in the
- * device stack.
- *
- * \see cl_page_at()
- */
-const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
- const struct lu_device_type *dtype)
-{
- const struct cl_lock_slice *slice;
-
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
- return slice;
- }
- return NULL;
-}
-EXPORT_SYMBOL(cl_lock_at);
-
-void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
-
- cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
- list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_cancel)
- slice->cls_ops->clo_cancel(env, slice);
- }
-}
-EXPORT_SYMBOL(cl_lock_cancel);
-
-/**
- * Enqueue a lock.
- * \param anchor: if we need to wait for resources before getting the lock,
- * use @anchor for the purpose.
- * \retval 0 enqueue successfully
- * \retval <0 error code
- */
-int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
- struct cl_lock *lock, struct cl_sync_io *anchor)
-{
- const struct cl_lock_slice *slice;
- int rc = -ENOSYS;
-
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (!slice->cls_ops->clo_enqueue)
- continue;
-
- rc = slice->cls_ops->clo_enqueue(env, slice, io, anchor);
- if (rc != 0)
- break;
- }
- return rc;
-}
-EXPORT_SYMBOL(cl_lock_enqueue);
-
-/**
- * Main high-level entry point of cl_lock interface that finds existing or
- * enqueues new lock matching given description.
- */
-int cl_lock_request(const struct lu_env *env, struct cl_io *io,
- struct cl_lock *lock)
-{
- struct cl_sync_io *anchor = NULL;
- __u32 enq_flags = lock->cll_descr.cld_enq_flags;
- int rc;
-
- rc = cl_lock_init(env, lock, io);
- if (rc < 0)
- return rc;
-
- if ((enq_flags & CEF_ASYNC) && !(enq_flags & CEF_AGL)) {
- anchor = &cl_env_info(env)->clt_anchor;
- cl_sync_io_init(anchor, 1, cl_sync_io_end);
- }
-
- rc = cl_lock_enqueue(env, io, lock, anchor);
-
- if (anchor) {
- int rc2;
-
- /* drop the reference count held at initialization time */
- cl_sync_io_note(env, anchor, 0);
- rc2 = cl_sync_io_wait(env, anchor, 0);
- if (rc2 < 0 && rc == 0)
- rc = rc2;
- }
-
- if (rc < 0)
- cl_lock_release(env, lock);
-
- return rc;
-}
-EXPORT_SYMBOL(cl_lock_request);
-
-/**
- * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
- */
-void cl_lock_release(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
- cl_lock_cancel(env, lock);
- cl_lock_fini(env, lock);
-}
-EXPORT_SYMBOL(cl_lock_release);
-
-const char *cl_lock_mode_name(const enum cl_lock_mode mode)
-{
- static const char * const names[] = {
- [CLM_READ] = "R",
- [CLM_WRITE] = "W",
- [CLM_GROUP] = "G"
- };
- if (0 <= mode && mode < ARRAY_SIZE(names))
- return names[mode];
- else
- return "U";
-}
-EXPORT_SYMBOL(cl_lock_mode_name);
-
-/**
- * Prints human readable representation of a lock description.
- */
-void cl_lock_descr_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_lock_descr *descr)
-{
- const struct lu_fid *fid;
-
- fid = lu_object_fid(&descr->cld_obj->co_lu);
- (*printer)(env, cookie, DDESCR "@" DFID, PDESCR(descr), PFID(fid));
-}
-EXPORT_SYMBOL(cl_lock_descr_print);
-
-/**
- * Prints human readable representation of \a lock to the \a f.
- */
-void cl_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
-
- (*printer)(env, cookie, "lock@%p", lock);
- cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
- (*printer)(env, cookie, " {\n");
-
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- (*printer)(env, cookie, " %s@%p: ",
- slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
- slice);
- if (slice->cls_ops->clo_print)
- slice->cls_ops->clo_print(env, cookie, printer, slice);
- (*printer)(env, cookie, "\n");
- }
- (*printer)(env, cookie, "} lock@%p\n", lock);
-}
-EXPORT_SYMBOL(cl_lock_print);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
deleted file mode 100644
index 7809f6ae1809..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ /dev/null
@@ -1,1061 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Client Lustre Object.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@intel.com>
- */
-
-/*
- * Locking.
- *
- * i_mutex
- * PG_locked
- * ->coh_attr_guard
- * ->ls_guard
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/libcfs/libcfs.h>
-/* class_put_type() */
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lustre_fid.h>
-#include <linux/list.h>
-#include <linux/libcfs/libcfs_hash.h> /* for cfs_hash stuff */
-#include <cl_object.h>
-#include <lu_object.h>
-#include "cl_internal.h"
-
-static struct kmem_cache *cl_env_kmem;
-
-/** Lock class of cl_object_header::coh_attr_guard */
-static struct lock_class_key cl_attr_guard_class;
-
-/**
- * Initialize cl_object_header.
- */
-int cl_object_header_init(struct cl_object_header *h)
-{
- int result;
-
- result = lu_object_header_init(&h->coh_lu);
- if (result == 0) {
- spin_lock_init(&h->coh_attr_guard);
- lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
- h->coh_page_bufsize = 0;
- }
- return result;
-}
-EXPORT_SYMBOL(cl_object_header_init);
-
-/**
- * Returns a cl_object with a given \a fid.
- *
- * Returns either cached or newly created object. Additional reference on the
- * returned object is acquired.
- *
- * \see lu_object_find(), cl_page_find(), cl_lock_find()
- */
-struct cl_object *cl_object_find(const struct lu_env *env,
- struct cl_device *cd, const struct lu_fid *fid,
- const struct cl_object_conf *c)
-{
- might_sleep();
- return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
-}
-EXPORT_SYMBOL(cl_object_find);
-
-/**
- * Releases a reference on \a o.
- *
- * When last reference is released object is returned to the cache, unless
- * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
- *
- * \see cl_page_put(), cl_lock_put().
- */
-void cl_object_put(const struct lu_env *env, struct cl_object *o)
-{
- lu_object_put(env, &o->co_lu);
-}
-EXPORT_SYMBOL(cl_object_put);
-
-/**
- * Acquire an additional reference to the object \a o.
- *
- * This can only be used to acquire _additional_ reference, i.e., caller
- * already has to possess at least one reference to \a o before calling this.
- *
- * \see cl_page_get(), cl_lock_get().
- */
-void cl_object_get(struct cl_object *o)
-{
- lu_object_get(&o->co_lu);
-}
-EXPORT_SYMBOL(cl_object_get);
-
-/**
- * Returns the top-object for a given \a o.
- *
- * \see cl_io_top()
- */
-struct cl_object *cl_object_top(struct cl_object *o)
-{
- struct cl_object_header *hdr = cl_object_header(o);
- struct cl_object *top;
-
- while (hdr->coh_parent)
- hdr = hdr->coh_parent;
-
- top = lu2cl(lu_object_top(&hdr->coh_lu));
- CDEBUG(D_TRACE, "%p -> %p\n", o, top);
- return top;
-}
-EXPORT_SYMBOL(cl_object_top);
-
-/**
- * Returns pointer to the lock protecting data-attributes for the given object
- * \a o.
- *
- * Data-attributes are protected by the cl_object_header::coh_attr_guard
- * spin-lock in the top-object.
- *
- * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
- */
-static spinlock_t *cl_object_attr_guard(struct cl_object *o)
-{
- return &cl_object_header(cl_object_top(o))->coh_attr_guard;
-}
-
-/**
- * Locks data-attributes.
- *
- * Prevents data-attributes from changing, until lock is released by
- * cl_object_attr_unlock(). This has to be called before calls to
- * cl_object_attr_get(), cl_object_attr_update().
- */
-void cl_object_attr_lock(struct cl_object *o)
- __acquires(cl_object_attr_guard(o))
-{
- spin_lock(cl_object_attr_guard(o));
-}
-EXPORT_SYMBOL(cl_object_attr_lock);
-
-/**
- * Releases data-attributes lock, acquired by cl_object_attr_lock().
- */
-void cl_object_attr_unlock(struct cl_object *o)
- __releases(cl_object_attr_guard(o))
-{
- spin_unlock(cl_object_attr_guard(o));
-}
-EXPORT_SYMBOL(cl_object_attr_unlock);
-
-/**
- * Returns data-attributes of an object \a obj.
- *
- * Every layer is asked (by calling cl_object_operations::coo_attr_get())
- * top-to-bottom to fill in parts of \a attr that this layer is responsible
- * for.
- */
-int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- struct lu_object_header *top;
- int result;
-
- assert_spin_locked(cl_object_attr_guard(obj));
-
- top = obj->co_lu.lo_header;
- result = 0;
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_attr_get) {
- result = obj->co_ops->coo_attr_get(env, obj, attr);
- if (result != 0) {
- if (result > 0)
- result = 0;
- break;
- }
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_object_attr_get);
-
-/**
- * Updates data-attributes of an object \a obj.
- *
- * Only attributes, mentioned in a validness bit-mask \a v are
- * updated. Calls cl_object_operations::coo_attr_update() on every layer,
- * bottom to top.
- */
-int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned int v)
-{
- struct lu_object_header *top;
- int result;
-
- assert_spin_locked(cl_object_attr_guard(obj));
-
- top = obj->co_lu.lo_header;
- result = 0;
- list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_attr_update) {
- result = obj->co_ops->coo_attr_update(env, obj, attr,
- v);
- if (result != 0) {
- if (result > 0)
- result = 0;
- break;
- }
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_object_attr_update);
-
-/**
- * Notifies layers (bottom-to-top) that glimpse AST was received.
- *
- * Layers have to fill \a lvb fields with information that will be shipped
- * back to glimpse issuer.
- *
- * \see cl_lock_operations::clo_glimpse()
- */
-int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
- struct ost_lvb *lvb)
-{
- struct lu_object_header *top;
- int result;
-
- top = obj->co_lu.lo_header;
- result = 0;
- list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_glimpse) {
- result = obj->co_ops->coo_glimpse(env, obj, lvb);
- if (result != 0)
- break;
- }
- }
- LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
- "size: %llu mtime: %llu atime: %llu ctime: %llu blocks: %llu\n",
- lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
- lvb->lvb_ctime, lvb->lvb_blocks);
- return result;
-}
-EXPORT_SYMBOL(cl_object_glimpse);
-
-/**
- * Updates a configuration of an object \a obj.
- */
-int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf)
-{
- struct lu_object_header *top;
- int result;
-
- top = obj->co_lu.lo_header;
- result = 0;
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_conf_set) {
- result = obj->co_ops->coo_conf_set(env, obj, conf);
- if (result != 0)
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_conf_set);
-
-/**
- * Prunes caches of pages and locks for this object.
- */
-int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
-{
- struct lu_object_header *top;
- struct cl_object *o;
- int result;
-
- top = obj->co_lu.lo_header;
- result = 0;
- list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
- if (o->co_ops->coo_prune) {
- result = o->co_ops->coo_prune(env, o);
- if (result != 0)
- break;
- }
- }
-
- return result;
-}
-EXPORT_SYMBOL(cl_object_prune);
-
-/**
- * Get stripe information of this object.
- */
-int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
- struct lov_user_md __user *uarg)
-{
- struct lu_object_header *top;
- int result = 0;
-
- top = obj->co_lu.lo_header;
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_getstripe) {
- result = obj->co_ops->coo_getstripe(env, obj, uarg);
- if (result)
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_object_getstripe);
-
-/**
- * Get fiemap extents from file object.
- *
- * \param env [in] lustre environment
- * \param obj [in] file object
- * \param key [in] fiemap request argument
- * \param fiemap [out] fiemap extents mapping retrived
- * \param buflen [in] max buffer length of @fiemap
- *
- * \retval 0 success
- * \retval < 0 error
- */
-int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
- struct ll_fiemap_info_key *key,
- struct fiemap *fiemap, size_t *buflen)
-{
- struct lu_object_header *top;
- int result = 0;
-
- top = obj->co_lu.lo_header;
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_fiemap) {
- result = obj->co_ops->coo_fiemap(env, obj, key, fiemap,
- buflen);
- if (result)
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_object_fiemap);
-
-int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_layout *cl)
-{
- struct lu_object_header *top = obj->co_lu.lo_header;
-
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_layout_get)
- return obj->co_ops->coo_layout_get(env, obj, cl);
- }
-
- return -EOPNOTSUPP;
-}
-EXPORT_SYMBOL(cl_object_layout_get);
-
-loff_t cl_object_maxbytes(struct cl_object *obj)
-{
- struct lu_object_header *top = obj->co_lu.lo_header;
- loff_t maxbytes = LLONG_MAX;
-
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_maxbytes)
- maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj),
- maxbytes);
- }
-
- return maxbytes;
-}
-EXPORT_SYMBOL(cl_object_maxbytes);
-
-/**
- * Helper function removing all object locks, and marking object for
- * deletion. All object pages must have been deleted at this point.
- *
- * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
- * and sub- objects respectively.
- */
-void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
-{
- struct cl_object_header *hdr = cl_object_header(obj);
-
- set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
-}
-EXPORT_SYMBOL(cl_object_kill);
-
-void cache_stats_init(struct cache_stats *cs, const char *name)
-{
- int i;
-
- cs->cs_name = name;
- for (i = 0; i < CS_NR; i++)
- atomic_set(&cs->cs_stats[i], 0);
-}
-
-static int cache_stats_print(const struct cache_stats *cs,
- struct seq_file *m, int h)
-{
- int i;
- /*
- * lookup hit total cached create
- * env: ...... ...... ...... ...... ......
- */
- if (h) {
- const char *names[CS_NR] = CS_NAMES;
-
- seq_printf(m, "%6s", " ");
- for (i = 0; i < CS_NR; i++)
- seq_printf(m, "%8s", names[i]);
- seq_printf(m, "\n");
- }
-
- seq_printf(m, "%5.5s:", cs->cs_name);
- for (i = 0; i < CS_NR; i++)
- seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
- return 0;
-}
-
-static void cl_env_percpu_refill(void);
-
-/**
- * Initialize client site.
- *
- * Perform common initialization (lu_site_init()), and initialize statistical
- * counters. Also perform global initializations on the first call.
- */
-int cl_site_init(struct cl_site *s, struct cl_device *d)
-{
- size_t i;
- int result;
-
- result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
- if (result == 0) {
- cache_stats_init(&s->cs_pages, "pages");
- for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
- atomic_set(&s->cs_pages_state[0], 0);
- cl_env_percpu_refill();
- }
- return result;
-}
-EXPORT_SYMBOL(cl_site_init);
-
-/**
- * Finalize client site. Dual to cl_site_init().
- */
-void cl_site_fini(struct cl_site *s)
-{
- lu_site_fini(&s->cs_lu);
-}
-EXPORT_SYMBOL(cl_site_fini);
-
-static struct cache_stats cl_env_stats = {
- .cs_name = "envs",
- .cs_stats = { ATOMIC_INIT(0), }
-};
-
-/**
- * Outputs client site statistical counters into a buffer. Suitable for
- * ll_rd_*()-style functions.
- */
-int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
-{
- size_t i;
- static const char * const pstate[] = {
- [CPS_CACHED] = "c",
- [CPS_OWNED] = "o",
- [CPS_PAGEOUT] = "w",
- [CPS_PAGEIN] = "r",
- [CPS_FREEING] = "f"
- };
-/*
- lookup hit total busy create
-pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
-locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
- env: ...... ...... ...... ...... ......
- */
- lu_site_stats_print(&site->cs_lu, m);
- cache_stats_print(&site->cs_pages, m, 1);
- seq_puts(m, " [");
- for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
- seq_printf(m, "%s: %u ", pstate[i],
- atomic_read(&site->cs_pages_state[i]));
- seq_puts(m, "]\n");
- cache_stats_print(&cl_env_stats, m, 0);
- seq_puts(m, "\n");
- return 0;
-}
-EXPORT_SYMBOL(cl_site_stats_print);
-
-/*****************************************************************************
- *
- * lu_env handling on client.
- *
- */
-
-/**
- * The most efficient way is to store cl_env pointer in task specific
- * structures. On Linux, it wont' be easy to use task_struct->journal_info
- * because Lustre code may call into other fs which has certain assumptions
- * about journal_info. Currently following fields in task_struct are identified
- * can be used for this purpose:
- * - tux_info: only on RedHat kernel.
- * - ...
- * \note As long as we use task_struct to store cl_env, we assume that once
- * called into Lustre, we'll never call into the other part of the kernel
- * which will use those fields in task_struct without explicitly exiting
- * Lustre.
- *
- * If there's no space in task_struct is available, hash will be used.
- * bz20044, bz22683.
- */
-
-static unsigned int cl_envs_cached_max = 32; /* XXX: prototype: arbitrary limit
- * for now.
- */
-static struct cl_env_cache {
- rwlock_t cec_guard;
- unsigned int cec_count;
- struct list_head cec_envs;
-} *cl_envs = NULL;
-
-struct cl_env {
- void *ce_magic;
- struct lu_env ce_lu;
- struct lu_context ce_ses;
-
- /*
- * Linkage into global list of all client environments. Used for
- * garbage collection.
- */
- struct list_head ce_linkage;
- /*
- *
- */
- int ce_ref;
- /*
- * Debugging field: address of the caller who made original
- * allocation.
- */
- void *ce_debug;
-};
-
-#define CL_ENV_INC(counter)
-#define CL_ENV_DEC(counter)
-
-static void cl_env_init0(struct cl_env *cle, void *debug)
-{
- LASSERT(cle->ce_ref == 0);
- LASSERT(cle->ce_magic == &cl_env_init0);
- LASSERT(!cle->ce_debug);
-
- cle->ce_ref = 1;
- cle->ce_debug = debug;
- CL_ENV_INC(busy);
-}
-
-static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
-{
- struct lu_env *env;
- struct cl_env *cle;
-
- cle = kmem_cache_zalloc(cl_env_kmem, GFP_NOFS);
- if (cle) {
- int rc;
-
- INIT_LIST_HEAD(&cle->ce_linkage);
- cle->ce_magic = &cl_env_init0;
- env = &cle->ce_lu;
- rc = lu_env_init(env, ctx_tags | LCT_CL_THREAD);
- if (rc == 0) {
- rc = lu_context_init(&cle->ce_ses,
- ses_tags | LCT_SESSION);
- if (rc == 0) {
- lu_context_enter(&cle->ce_ses);
- env->le_ses = &cle->ce_ses;
- cl_env_init0(cle, debug);
- } else {
- lu_env_fini(env);
- }
- }
- if (rc != 0) {
- kmem_cache_free(cl_env_kmem, cle);
- env = ERR_PTR(rc);
- } else {
- CL_ENV_INC(create);
- CL_ENV_INC(total);
- }
- } else {
- env = ERR_PTR(-ENOMEM);
- }
- return env;
-}
-
-static void cl_env_fini(struct cl_env *cle)
-{
- CL_ENV_DEC(total);
- lu_context_fini(&cle->ce_lu.le_ctx);
- lu_context_fini(&cle->ce_ses);
- kmem_cache_free(cl_env_kmem, cle);
-}
-
-static struct lu_env *cl_env_obtain(void *debug)
-{
- struct cl_env *cle;
- struct lu_env *env;
- int cpu = get_cpu();
-
- read_lock(&cl_envs[cpu].cec_guard);
- LASSERT(equi(cl_envs[cpu].cec_count == 0,
- list_empty(&cl_envs[cpu].cec_envs)));
- if (cl_envs[cpu].cec_count > 0) {
- int rc;
-
- cle = container_of(cl_envs[cpu].cec_envs.next, struct cl_env,
- ce_linkage);
- list_del_init(&cle->ce_linkage);
- cl_envs[cpu].cec_count--;
- read_unlock(&cl_envs[cpu].cec_guard);
- put_cpu();
-
- env = &cle->ce_lu;
- rc = lu_env_refill(env);
- if (rc == 0) {
- cl_env_init0(cle, debug);
- lu_context_enter(&env->le_ctx);
- lu_context_enter(&cle->ce_ses);
- } else {
- cl_env_fini(cle);
- env = ERR_PTR(rc);
- }
- } else {
- read_unlock(&cl_envs[cpu].cec_guard);
- put_cpu();
- env = cl_env_new(lu_context_tags_default,
- lu_session_tags_default, debug);
- }
- return env;
-}
-
-static inline struct cl_env *cl_env_container(struct lu_env *env)
-{
- return container_of(env, struct cl_env, ce_lu);
-}
-
-/**
- * Returns lu_env: if there already is an environment associated with the
- * current thread, it is returned, otherwise, new environment is allocated.
- *
- * Allocations are amortized through the global cache of environments.
- *
- * \param refcheck pointer to a counter used to detect environment leaks. In
- * the usual case cl_env_get() and cl_env_put() are called in the same lexical
- * scope and pointer to the same integer is passed as \a refcheck. This is
- * used to detect missed cl_env_put().
- *
- * \see cl_env_put()
- */
-struct lu_env *cl_env_get(u16 *refcheck)
-{
- struct lu_env *env;
-
- env = cl_env_obtain(__builtin_return_address(0));
- if (!IS_ERR(env)) {
- struct cl_env *cle;
-
- cle = cl_env_container(env);
- *refcheck = cle->ce_ref;
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
- }
- return env;
-}
-EXPORT_SYMBOL(cl_env_get);
-
-/**
- * Forces an allocation of a fresh environment with given tags.
- *
- * \see cl_env_get()
- */
-struct lu_env *cl_env_alloc(u16 *refcheck, u32 tags)
-{
- struct lu_env *env;
-
- env = cl_env_new(tags, tags, __builtin_return_address(0));
- if (!IS_ERR(env)) {
- struct cl_env *cle;
-
- cle = cl_env_container(env);
- *refcheck = cle->ce_ref;
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
- }
- return env;
-}
-EXPORT_SYMBOL(cl_env_alloc);
-
-static void cl_env_exit(struct cl_env *cle)
-{
- lu_context_exit(&cle->ce_lu.le_ctx);
- lu_context_exit(&cle->ce_ses);
-}
-
-/**
- * Finalizes and frees a given number of cached environments. This is done to
- * (1) free some memory (not currently hooked into VM), or (2) release
- * references to modules.
- */
-unsigned int cl_env_cache_purge(unsigned int nr)
-{
- struct cl_env *cle;
- unsigned int i;
-
- for_each_possible_cpu(i) {
- write_lock(&cl_envs[i].cec_guard);
- for (; !list_empty(&cl_envs[i].cec_envs) && nr > 0; --nr) {
- cle = container_of(cl_envs[i].cec_envs.next,
- struct cl_env, ce_linkage);
- list_del_init(&cle->ce_linkage);
- LASSERT(cl_envs[i].cec_count > 0);
- cl_envs[i].cec_count--;
- write_unlock(&cl_envs[i].cec_guard);
-
- cl_env_fini(cle);
- write_lock(&cl_envs[i].cec_guard);
- }
- LASSERT(equi(cl_envs[i].cec_count == 0,
- list_empty(&cl_envs[i].cec_envs)));
- write_unlock(&cl_envs[i].cec_guard);
- }
- return nr;
-}
-EXPORT_SYMBOL(cl_env_cache_purge);
-
-/**
- * Release an environment.
- *
- * Decrement \a env reference counter. When counter drops to 0, nothing in
- * this thread is using environment and it is returned to the allocation
- * cache, or freed straight away, if cache is large enough.
- */
-void cl_env_put(struct lu_env *env, u16 *refcheck)
-{
- struct cl_env *cle;
-
- cle = cl_env_container(env);
-
- LASSERT(cle->ce_ref > 0);
- LASSERT(ergo(refcheck, cle->ce_ref == *refcheck));
-
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
- if (--cle->ce_ref == 0) {
- int cpu = get_cpu();
-
- CL_ENV_DEC(busy);
- cle->ce_debug = NULL;
- cl_env_exit(cle);
- /*
- * Don't bother to take a lock here.
- *
- * Return environment to the cache only when it was allocated
- * with the standard tags.
- */
- if (cl_envs[cpu].cec_count < cl_envs_cached_max &&
- (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
- (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
- read_lock(&cl_envs[cpu].cec_guard);
- list_add(&cle->ce_linkage, &cl_envs[cpu].cec_envs);
- cl_envs[cpu].cec_count++;
- read_unlock(&cl_envs[cpu].cec_guard);
- } else {
- cl_env_fini(cle);
- }
- put_cpu();
- }
-}
-EXPORT_SYMBOL(cl_env_put);
-
-/**
- * Converts struct ost_lvb to struct cl_attr.
- *
- * \see cl_attr2lvb
- */
-void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
-{
- attr->cat_size = lvb->lvb_size;
- attr->cat_mtime = lvb->lvb_mtime;
- attr->cat_atime = lvb->lvb_atime;
- attr->cat_ctime = lvb->lvb_ctime;
- attr->cat_blocks = lvb->lvb_blocks;
-}
-EXPORT_SYMBOL(cl_lvb2attr);
-
-static struct cl_env cl_env_percpu[NR_CPUS];
-
-static int cl_env_percpu_init(void)
-{
- struct cl_env *cle;
- int tags = LCT_REMEMBER | LCT_NOREF;
- int i, j;
- int rc = 0;
-
- for_each_possible_cpu(i) {
- struct lu_env *env;
-
- rwlock_init(&cl_envs[i].cec_guard);
- INIT_LIST_HEAD(&cl_envs[i].cec_envs);
- cl_envs[i].cec_count = 0;
-
- cle = &cl_env_percpu[i];
- env = &cle->ce_lu;
-
- INIT_LIST_HEAD(&cle->ce_linkage);
- cle->ce_magic = &cl_env_init0;
- rc = lu_env_init(env, LCT_CL_THREAD | tags);
- if (rc == 0) {
- rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
- if (rc == 0) {
- lu_context_enter(&cle->ce_ses);
- env->le_ses = &cle->ce_ses;
- } else {
- lu_env_fini(env);
- }
- }
- if (rc != 0)
- break;
- }
- if (rc != 0) {
- /* Indices 0 to i (excluding i) were correctly initialized,
- * thus we must uninitialize up to i, the rest are undefined.
- */
- for (j = 0; j < i; j++) {
- cle = &cl_env_percpu[j];
- lu_context_exit(&cle->ce_ses);
- lu_context_fini(&cle->ce_ses);
- lu_env_fini(&cle->ce_lu);
- }
- }
-
- return rc;
-}
-
-static void cl_env_percpu_fini(void)
-{
- int i;
-
- for_each_possible_cpu(i) {
- struct cl_env *cle = &cl_env_percpu[i];
-
- lu_context_exit(&cle->ce_ses);
- lu_context_fini(&cle->ce_ses);
- lu_env_fini(&cle->ce_lu);
- }
-}
-
-static void cl_env_percpu_refill(void)
-{
- int i;
-
- for_each_possible_cpu(i)
- lu_env_refill(&cl_env_percpu[i].ce_lu);
-}
-
-void cl_env_percpu_put(struct lu_env *env)
-{
- struct cl_env *cle;
- int cpu;
-
- cpu = smp_processor_id();
- cle = cl_env_container(env);
- LASSERT(cle == &cl_env_percpu[cpu]);
-
- cle->ce_ref--;
- LASSERT(cle->ce_ref == 0);
-
- CL_ENV_DEC(busy);
- cle->ce_debug = NULL;
-
- put_cpu();
-}
-EXPORT_SYMBOL(cl_env_percpu_put);
-
-struct lu_env *cl_env_percpu_get(void)
-{
- struct cl_env *cle;
-
- cle = &cl_env_percpu[get_cpu()];
- cl_env_init0(cle, __builtin_return_address(0));
-
- return &cle->ce_lu;
-}
-EXPORT_SYMBOL(cl_env_percpu_get);
-
-/*****************************************************************************
- *
- * Temporary prototype thing: mirror obd-devices into cl devices.
- *
- */
-
-struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
- struct lu_device_type *ldt,
- struct lu_device *next)
-{
- const char *typename;
- struct lu_device *d;
-
- typename = ldt->ldt_name;
- d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
- if (!IS_ERR(d)) {
- int rc;
-
- if (site)
- d->ld_site = site;
- rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
- if (rc == 0) {
- lu_device_get(d);
- lu_ref_add(&d->ld_reference,
- "lu-stack", &lu_site_init);
- } else {
- ldt->ldt_ops->ldto_device_free(env, d);
- CERROR("can't init device '%s', %d\n", typename, rc);
- d = ERR_PTR(rc);
- }
- } else {
- CERROR("Cannot allocate device: '%s'\n", typename);
- }
- return lu2cl_dev(d);
-}
-EXPORT_SYMBOL(cl_type_setup);
-
-/**
- * Finalize device stack by calling lu_stack_fini().
- */
-void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
-{
- lu_stack_fini(env, cl2lu_dev(cl));
-}
-EXPORT_SYMBOL(cl_stack_fini);
-
-static struct lu_context_key cl_key;
-
-struct cl_thread_info *cl_env_info(const struct lu_env *env)
-{
- return lu_context_key_get(&env->le_ctx, &cl_key);
-}
-
-/* defines cl0_key_{init,fini}() */
-LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
-
-static void *cl_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- return cl0_key_init(ctx, key);
-}
-
-static void cl_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- cl0_key_fini(ctx, key, data);
-}
-
-static struct lu_context_key cl_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = cl_key_init,
- .lct_fini = cl_key_fini,
-};
-
-static struct lu_kmem_descr cl_object_caches[] = {
- {
- .ckd_cache = &cl_env_kmem,
- .ckd_name = "cl_env_kmem",
- .ckd_size = sizeof(struct cl_env)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/**
- * Global initialization of cl-data. Create kmem caches, register
- * lu_context_key's, etc.
- *
- * \see cl_global_fini()
- */
-int cl_global_init(void)
-{
- int result;
-
- cl_envs = kcalloc(num_possible_cpus(), sizeof(*cl_envs), GFP_KERNEL);
- if (!cl_envs) {
- result = -ENOMEM;
- goto out;
- }
-
- result = lu_kmem_init(cl_object_caches);
- if (result)
- goto out_envs;
-
- LU_CONTEXT_KEY_INIT(&cl_key);
- result = lu_context_key_register(&cl_key);
- if (result)
- goto out_kmem;
-
- result = cl_env_percpu_init();
- if (result)
- /* no cl_env_percpu_fini on error */
- goto out_keys;
-
- return 0;
-
-out_keys:
- lu_context_key_degister(&cl_key);
-out_kmem:
- lu_kmem_fini(cl_object_caches);
-out_envs:
- kfree(cl_envs);
-out:
- return result;
-}
-
-/**
- * Finalization of global cl-data. Dual to cl_global_init().
- */
-void cl_global_fini(void)
-{
- cl_env_percpu_fini();
- lu_context_key_degister(&cl_key);
- lu_kmem_fini(cl_object_caches);
- kfree(cl_envs);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
deleted file mode 100644
index d3b25667bc3a..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ /dev/null
@@ -1,1046 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Client Lustre Page.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@intel.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/libcfs/libcfs.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include <linux/list.h>
-
-#include <cl_object.h>
-#include "cl_internal.h"
-
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
-
-# define PASSERT(env, page, expr) \
- do { \
- if (unlikely(!(expr))) { \
- CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
- LASSERT(0); \
- } \
- } while (0)
-
-# define PINVRNT(env, page, exp) \
- ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
-
-/**
- * Internal version of cl_page_get().
- *
- * This function can be used to obtain initial reference to previously
- * unreferenced cached object. It can be called only if concurrent page
- * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
- * associated with \a page.
- *
- * Use with care! Not exported.
- */
-static void cl_page_get_trust(struct cl_page *page)
-{
- LASSERT(atomic_read(&page->cp_ref) > 0);
- atomic_inc(&page->cp_ref);
-}
-
-/**
- * Returns a slice within a page, corresponding to the given layer in the
- * device stack.
- *
- * \see cl_lock_at()
- */
-static const struct cl_page_slice *
-cl_page_at_trusted(const struct cl_page *page,
- const struct lu_device_type *dtype)
-{
- const struct cl_page_slice *slice;
-
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
- return slice;
- }
- return NULL;
-}
-
-static void cl_page_free(const struct lu_env *env, struct cl_page *page)
-{
- struct cl_object *obj = page->cp_obj;
-
- PASSERT(env, page, list_empty(&page->cp_batch));
- PASSERT(env, page, !page->cp_owner);
- PASSERT(env, page, page->cp_state == CPS_FREEING);
-
- while (!list_empty(&page->cp_layers)) {
- struct cl_page_slice *slice;
-
- slice = list_entry(page->cp_layers.next,
- struct cl_page_slice, cpl_linkage);
- list_del_init(page->cp_layers.next);
- if (unlikely(slice->cpl_ops->cpo_fini))
- slice->cpl_ops->cpo_fini(env, slice);
- }
- lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
- cl_object_put(env, obj);
- lu_ref_fini(&page->cp_reference);
- kfree(page);
-}
-
-/**
- * Helper function updating page state. This is the only place in the code
- * where cl_page::cp_state field is mutated.
- */
-static inline void cl_page_state_set_trust(struct cl_page *page,
- enum cl_page_state state)
-{
- /* bypass const. */
- *(enum cl_page_state *)&page->cp_state = state;
-}
-
-struct cl_page *cl_page_alloc(const struct lu_env *env,
- struct cl_object *o, pgoff_t ind,
- struct page *vmpage,
- enum cl_page_type type)
-{
- struct cl_page *page;
- struct lu_object_header *head;
-
- page = kzalloc(cl_object_header(o)->coh_page_bufsize, GFP_NOFS);
- if (page) {
- int result = 0;
-
- atomic_set(&page->cp_ref, 1);
- page->cp_obj = o;
- cl_object_get(o);
- lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
- page);
- page->cp_vmpage = vmpage;
- cl_page_state_set_trust(page, CPS_CACHED);
- page->cp_type = type;
- INIT_LIST_HEAD(&page->cp_layers);
- INIT_LIST_HEAD(&page->cp_batch);
- lu_ref_init(&page->cp_reference);
- head = o->co_lu.lo_header;
- list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
- if (o->co_ops->coo_page_init) {
- result = o->co_ops->coo_page_init(env, o, page,
- ind);
- if (result != 0) {
- cl_page_delete0(env, page);
- cl_page_free(env, page);
- page = ERR_PTR(result);
- break;
- }
- }
- }
- } else {
- page = ERR_PTR(-ENOMEM);
- }
- return page;
-}
-
-/**
- * Returns a cl_page with index \a idx at the object \a o, and associated with
- * the VM page \a vmpage.
- *
- * This is the main entry point into the cl_page caching interface. First, a
- * cache (implemented as a per-object radix tree) is consulted. If page is
- * found there, it is returned immediately. Otherwise new page is allocated
- * and returned. In any case, additional reference to page is acquired.
- *
- * \see cl_object_find(), cl_lock_find()
- */
-struct cl_page *cl_page_find(const struct lu_env *env,
- struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type)
-{
- struct cl_page *page = NULL;
- struct cl_object_header *hdr;
-
- LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
- might_sleep();
-
- hdr = cl_object_header(o);
-
- CDEBUG(D_PAGE, "%lu@" DFID " %p %lx %d\n",
- idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
- /* fast path. */
- if (type == CPT_CACHEABLE) {
- /*
- * vmpage lock is used to protect the child/parent
- * relationship
- */
- LASSERT(PageLocked(vmpage));
- /*
- * cl_vmpage_page() can be called here without any locks as
- *
- * - "vmpage" is locked (which prevents ->private from
- * concurrent updates), and
- *
- * - "o" cannot be destroyed while current thread holds a
- * reference on it.
- */
- page = cl_vmpage_page(vmpage, o);
-
- if (page)
- return page;
- }
-
- /* allocate and initialize cl_page */
- page = cl_page_alloc(env, o, idx, vmpage, type);
- return page;
-}
-EXPORT_SYMBOL(cl_page_find);
-
-static inline int cl_page_invariant(const struct cl_page *pg)
-{
- return cl_page_in_use_noref(pg);
-}
-
-static void cl_page_state_set0(const struct lu_env *env,
- struct cl_page *page, enum cl_page_state state)
-{
- enum cl_page_state old;
-
- /*
- * Matrix of allowed state transitions [old][new], for sanity
- * checking.
- */
- static const int allowed_transitions[CPS_NR][CPS_NR] = {
- [CPS_CACHED] = {
- [CPS_CACHED] = 0,
- [CPS_OWNED] = 1, /* io finds existing cached page */
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 1, /* write-out from the cache */
- [CPS_FREEING] = 1, /* eviction on the memory pressure */
- },
- [CPS_OWNED] = {
- [CPS_CACHED] = 1, /* release to the cache */
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 1, /* start read immediately */
- [CPS_PAGEOUT] = 1, /* start write immediately */
- [CPS_FREEING] = 1, /* lock invalidation or truncate */
- },
- [CPS_PAGEIN] = {
- [CPS_CACHED] = 1, /* io completion */
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 0,
- [CPS_FREEING] = 0,
- },
- [CPS_PAGEOUT] = {
- [CPS_CACHED] = 1, /* io completion */
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 0,
- [CPS_FREEING] = 0,
- },
- [CPS_FREEING] = {
- [CPS_CACHED] = 0,
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 0,
- [CPS_FREEING] = 0,
- }
- };
-
- old = page->cp_state;
- PASSERT(env, page, allowed_transitions[old][state]);
- CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
- PASSERT(env, page, page->cp_state == old);
- PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner));
- cl_page_state_set_trust(page, state);
-}
-
-static void cl_page_state_set(const struct lu_env *env,
- struct cl_page *page, enum cl_page_state state)
-{
- cl_page_state_set0(env, page, state);
-}
-
-/**
- * Acquires an additional reference to a page.
- *
- * This can be called only by caller already possessing a reference to \a
- * page.
- *
- * \see cl_object_get(), cl_lock_get().
- */
-void cl_page_get(struct cl_page *page)
-{
- cl_page_get_trust(page);
-}
-EXPORT_SYMBOL(cl_page_get);
-
-/**
- * Releases a reference to a page.
- *
- * When last reference is released, page is returned to the cache, unless it
- * is in cl_page_state::CPS_FREEING state, in which case it is immediately
- * destroyed.
- *
- * \see cl_object_put(), cl_lock_put().
- */
-void cl_page_put(const struct lu_env *env, struct cl_page *page)
-{
- CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
- atomic_read(&page->cp_ref));
-
- if (atomic_dec_and_test(&page->cp_ref)) {
- LASSERT(page->cp_state == CPS_FREEING);
-
- LASSERT(atomic_read(&page->cp_ref) == 0);
- PASSERT(env, page, !page->cp_owner);
- PASSERT(env, page, list_empty(&page->cp_batch));
- /*
- * Page is no longer reachable by other threads. Tear
- * it down.
- */
- cl_page_free(env, page);
- }
-}
-EXPORT_SYMBOL(cl_page_put);
-
-/**
- * Returns a cl_page associated with a VM page, and given cl_object.
- */
-struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
-{
- struct cl_page *page;
-
- LASSERT(PageLocked(vmpage));
-
- /*
- * NOTE: absence of races and liveness of data are guaranteed by page
- * lock on a "vmpage". That works because object destruction has
- * bottom-to-top pass.
- */
-
- page = (struct cl_page *)vmpage->private;
- if (page) {
- cl_page_get_trust(page);
- LASSERT(page->cp_type == CPT_CACHEABLE);
- }
- return page;
-}
-EXPORT_SYMBOL(cl_vmpage_page);
-
-const struct cl_page_slice *cl_page_at(const struct cl_page *page,
- const struct lu_device_type *dtype)
-{
- return cl_page_at_trusted(page, dtype);
-}
-EXPORT_SYMBOL(cl_page_at);
-
-#define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
-
-#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
-({ \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- int __result; \
- ptrdiff_t __op = (_op); \
- int (*__method)_proto; \
- \
- __result = 0; \
- list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + __op); \
- if (__method) { \
- __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
- if (__result != 0) \
- break; \
- } \
- } \
- if (__result > 0) \
- __result = 0; \
- __result; \
-})
-
-#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
-do { \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- ptrdiff_t __op = (_op); \
- void (*__method)_proto; \
- \
- list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + __op); \
- if (__method) \
- (*__method)(__env, __scan, ## __VA_ARGS__); \
- } \
-} while (0)
-
-#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
-do { \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- ptrdiff_t __op = (_op); \
- void (*__method)_proto; \
- \
- list_for_each_entry_reverse(__scan, &__page->cp_layers, cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + __op); \
- if (__method) \
- (*__method)(__env, __scan, ## __VA_ARGS__); \
- } \
-} while (0)
-
-static int cl_page_invoke(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page, ptrdiff_t op)
-
-{
- PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- return CL_PAGE_INVOKE(env, page, op,
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
-}
-
-static void cl_page_invoid(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page, ptrdiff_t op)
-
-{
- PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- CL_PAGE_INVOID(env, page, op,
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *), io);
-}
-
-static void cl_page_owner_clear(struct cl_page *page)
-{
- if (page->cp_owner) {
- LASSERT(page->cp_owner->ci_owned_nr > 0);
- page->cp_owner->ci_owned_nr--;
- page->cp_owner = NULL;
- }
-}
-
-static void cl_page_owner_set(struct cl_page *page)
-{
- page->cp_owner->ci_owned_nr++;
-}
-
-void cl_page_disown0(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- enum cl_page_state state;
-
- state = pg->cp_state;
- PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
- PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
- cl_page_owner_clear(pg);
-
- if (state == CPS_OWNED)
- cl_page_state_set(env, pg, CPS_CACHED);
- /*
- * Completion call-backs are executed in the bottom-up order, so that
- * uppermost layer (llite), responsible for VFS/VM interaction runs
- * last and can release locks safely.
- */
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
-}
-
-/**
- * returns true, iff page is owned by the given io.
- */
-int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
-{
- struct cl_io *top = cl_io_top((struct cl_io *)io);
-
- LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
- return pg->cp_state == CPS_OWNED && pg->cp_owner == top;
-}
-EXPORT_SYMBOL(cl_page_is_owned);
-
-/**
- * Try to own a page by IO.
- *
- * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
- * into cl_page_state::CPS_OWNED state.
- *
- * \pre !cl_page_is_owned(pg, io)
- * \post result == 0 iff cl_page_is_owned(pg, io)
- *
- * \retval 0 success
- *
- * \retval -ve failure, e.g., page was destroyed (and landed in
- * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
- * or, page was owned by another thread, or in IO.
- *
- * \see cl_page_disown()
- * \see cl_page_operations::cpo_own()
- * \see cl_page_own_try()
- * \see cl_page_own
- */
-static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, int nonblock)
-{
- int result;
-
- PINVRNT(env, pg, !cl_page_is_owned(pg, io));
-
- io = cl_io_top(io);
-
- if (pg->cp_state == CPS_FREEING) {
- result = -ENOENT;
- } else {
- result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
- (const struct lu_env *,
- const struct cl_page_slice *,
- struct cl_io *, int),
- io, nonblock);
- if (result == 0) {
- PASSERT(env, pg, !pg->cp_owner);
- pg->cp_owner = cl_io_top(io);
- cl_page_owner_set(pg);
- if (pg->cp_state != CPS_FREEING) {
- cl_page_state_set(env, pg, CPS_OWNED);
- } else {
- cl_page_disown0(env, io, pg);
- result = -ENOENT;
- }
- }
- }
- PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
- return result;
-}
-
-/**
- * Own a page, might be blocked.
- *
- * \see cl_page_own0()
- */
-int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
-{
- return cl_page_own0(env, io, pg, 0);
-}
-EXPORT_SYMBOL(cl_page_own);
-
-/**
- * Nonblock version of cl_page_own().
- *
- * \see cl_page_own0()
- */
-int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg)
-{
- return cl_page_own0(env, io, pg, 1);
-}
-EXPORT_SYMBOL(cl_page_own_try);
-
-/**
- * Assume page ownership.
- *
- * Called when page is already locked by the hosting VM.
- *
- * \pre !cl_page_is_owned(pg, io)
- * \post cl_page_is_owned(pg, io)
- *
- * \see cl_page_operations::cpo_assume()
- */
-void cl_page_assume(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
-
- io = cl_io_top(io);
-
- cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
- PASSERT(env, pg, !pg->cp_owner);
- pg->cp_owner = cl_io_top(io);
- cl_page_owner_set(pg);
- cl_page_state_set(env, pg, CPS_OWNED);
-}
-EXPORT_SYMBOL(cl_page_assume);
-
-/**
- * Releases page ownership without unlocking the page.
- *
- * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
- * underlying VM page (as VM is supposed to do this itself).
- *
- * \pre cl_page_is_owned(pg, io)
- * \post !cl_page_is_owned(pg, io)
- *
- * \see cl_page_assume()
- */
-void cl_page_unassume(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- io = cl_io_top(io);
- cl_page_owner_clear(pg);
- cl_page_state_set(env, pg, CPS_CACHED);
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
-}
-EXPORT_SYMBOL(cl_page_unassume);
-
-/**
- * Releases page ownership.
- *
- * Moves page into cl_page_state::CPS_CACHED.
- *
- * \pre cl_page_is_owned(pg, io)
- * \post !cl_page_is_owned(pg, io)
- *
- * \see cl_page_own()
- * \see cl_page_operations::cpo_disown()
- */
-void cl_page_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
- pg->cp_state == CPS_FREEING);
-
- io = cl_io_top(io);
- cl_page_disown0(env, io, pg);
-}
-EXPORT_SYMBOL(cl_page_disown);
-
-/**
- * Called when page is to be removed from the object, e.g., as a result of
- * truncate.
- *
- * Calls cl_page_operations::cpo_discard() top-to-bottom.
- *
- * \pre cl_page_is_owned(pg, io)
- *
- * \see cl_page_operations::cpo_discard()
- */
-void cl_page_discard(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
-}
-EXPORT_SYMBOL(cl_page_discard);
-
-/**
- * Version of cl_page_delete() that can be called for not fully constructed
- * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
- * path. Doesn't check page invariant.
- */
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
-{
- PASSERT(env, pg, pg->cp_state != CPS_FREEING);
-
- /*
- * Sever all ways to obtain new pointers to @pg.
- */
- cl_page_owner_clear(pg);
-
- cl_page_state_set0(env, pg, CPS_FREEING);
-
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
- (const struct lu_env *,
- const struct cl_page_slice *));
-}
-
-/**
- * Called when a decision is made to throw page out of memory.
- *
- * Notifies all layers about page destruction by calling
- * cl_page_operations::cpo_delete() method top-to-bottom.
- *
- * Moves page into cl_page_state::CPS_FREEING state (this is the only place
- * where transition to this state happens).
- *
- * Eliminates all venues through which new references to the page can be
- * obtained:
- *
- * - removes page from the radix trees,
- *
- * - breaks linkage from VM page to cl_page.
- *
- * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
- * drain after some time, at which point page will be recycled.
- *
- * \pre VM page is locked
- * \post pg->cp_state == CPS_FREEING
- *
- * \see cl_page_operations::cpo_delete()
- */
-void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_invariant(pg));
- cl_page_delete0(env, pg);
-}
-EXPORT_SYMBOL(cl_page_delete);
-
-/**
- * Marks page up-to-date.
- *
- * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to mark/clear page as up-to-date
- * by the \a uptodate argument.
- *
- * \see cl_page_operations::cpo_export()
- */
-void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
-{
- PINVRNT(env, pg, cl_page_invariant(pg));
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
- (const struct lu_env *,
- const struct cl_page_slice *, int), uptodate);
-}
-EXPORT_SYMBOL(cl_page_export);
-
-/**
- * Returns true, iff \a pg is VM locked in a suitable sense by the calling
- * thread.
- */
-int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
-{
- int result;
- const struct cl_page_slice *slice;
-
- slice = container_of(pg->cp_layers.next,
- const struct cl_page_slice, cpl_linkage);
- PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked);
- /*
- * Call ->cpo_is_vmlocked() directly instead of going through
- * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
- * cl_page_invariant().
- */
- result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
- PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
- return result == -EBUSY;
-}
-EXPORT_SYMBOL(cl_page_is_vmlocked);
-
-static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
-{
- return crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN;
-}
-
-static void cl_page_io_start(const struct lu_env *env,
- struct cl_page *pg, enum cl_req_type crt)
-{
- /*
- * Page is queued for IO, change its state.
- */
- cl_page_owner_clear(pg);
- cl_page_state_set(env, pg, cl_req_type_state(crt));
-}
-
-/**
- * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
- * called top-to-bottom. Every layer either agrees to submit this page (by
- * returning 0), or requests to omit this page (by returning -EALREADY). Layer
- * handling interactions with the VM also has to inform VM that page is under
- * transfer now.
- */
-int cl_page_prep(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt)
-{
- int result;
-
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
- PINVRNT(env, pg, crt < CRT_NR);
-
- /*
- * XXX this has to be called bottom-to-top, so that llite can set up
- * PG_writeback without risking other layers deciding to skip this
- * page.
- */
- if (crt >= CRT_NR)
- return -EINVAL;
- result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
- if (result == 0)
- cl_page_io_start(env, pg, crt);
-
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- return result;
-}
-EXPORT_SYMBOL(cl_page_prep);
-
-/**
- * Notify layers about transfer completion.
- *
- * Invoked by transfer sub-system (which is a part of osc) to notify layers
- * that a transfer, of which this page is a part of has completed.
- *
- * Completion call-backs are executed in the bottom-up order, so that
- * uppermost layer (llite), responsible for the VFS/VM interaction runs last
- * and can release locks safely.
- *
- * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
- * \post pg->cp_state == CPS_CACHED
- *
- * \see cl_page_operations::cpo_completion()
- */
-void cl_page_completion(const struct lu_env *env,
- struct cl_page *pg, enum cl_req_type crt, int ioret)
-{
- struct cl_sync_io *anchor = pg->cp_sync_io;
-
- PASSERT(env, pg, crt < CRT_NR);
- PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
-
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
-
- cl_page_state_set(env, pg, CPS_CACHED);
- if (crt >= CRT_NR)
- return;
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
- (const struct lu_env *,
- const struct cl_page_slice *, int), ioret);
- if (anchor) {
- LASSERT(pg->cp_sync_io == anchor);
- pg->cp_sync_io = NULL;
- cl_sync_io_note(env, anchor, ioret);
- }
-}
-EXPORT_SYMBOL(cl_page_completion);
-
-/**
- * Notify layers that transfer formation engine decided to yank this page from
- * the cache and to make it a part of a transfer.
- *
- * \pre pg->cp_state == CPS_CACHED
- * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
- *
- * \see cl_page_operations::cpo_make_ready()
- */
-int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
- enum cl_req_type crt)
-{
- int result;
-
- PINVRNT(env, pg, crt < CRT_NR);
-
- if (crt >= CRT_NR)
- return -EINVAL;
- result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
- (const struct lu_env *,
- const struct cl_page_slice *));
- if (result == 0) {
- PASSERT(env, pg, pg->cp_state == CPS_CACHED);
- cl_page_io_start(env, pg, crt);
- }
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- return result;
-}
-EXPORT_SYMBOL(cl_page_make_ready);
-
-/**
- * Called if a pge is being written back by kernel's intention.
- *
- * \pre cl_page_is_owned(pg, io)
- * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
- *
- * \see cl_page_operations::cpo_flush()
- */
-int cl_page_flush(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg)
-{
- int result;
-
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
-
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
- return result;
-}
-EXPORT_SYMBOL(cl_page_flush);
-
-/**
- * Tells transfer engine that only part of a page is to be transmitted.
- *
- * \see cl_page_operations::cpo_clip()
- */
-void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
- int from, int to)
-{
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
- (const struct lu_env *,
- const struct cl_page_slice *, int, int),
- from, to);
-}
-EXPORT_SYMBOL(cl_page_clip);
-
-/**
- * Prints human readable representation of \a pg to the \a f.
- */
-void cl_page_header_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_page *pg)
-{
- (*printer)(env, cookie,
- "page@%p[%d %p %d %d %p]\n",
- pg, atomic_read(&pg->cp_ref), pg->cp_obj,
- pg->cp_state, pg->cp_type,
- pg->cp_owner);
-}
-EXPORT_SYMBOL(cl_page_header_print);
-
-/**
- * Prints human readable representation of \a pg to the \a f.
- */
-void cl_page_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_page *pg)
-{
- cl_page_header_print(env, cookie, printer, pg);
- CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
- (const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t p), cookie, printer);
- (*printer)(env, cookie, "end page@%p\n", pg);
-}
-EXPORT_SYMBOL(cl_page_print);
-
-/**
- * Cancel a page which is still in a transfer.
- */
-int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
-{
- return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
- (const struct lu_env *,
- const struct cl_page_slice *));
-}
-
-/**
- * Converts a byte offset within object \a obj into a page index.
- */
-loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
-{
- /*
- * XXX for now.
- */
- return (loff_t)idx << PAGE_SHIFT;
-}
-EXPORT_SYMBOL(cl_offset);
-
-/**
- * Converts a page index into a byte offset within object \a obj.
- */
-pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
-{
- /*
- * XXX for now.
- */
- return offset >> PAGE_SHIFT;
-}
-EXPORT_SYMBOL(cl_index);
-
-size_t cl_page_size(const struct cl_object *obj)
-{
- return 1UL << PAGE_SHIFT;
-}
-EXPORT_SYMBOL(cl_page_size);
-
-/**
- * Adds page slice to the compound page.
- *
- * This is called by cl_object_operations::coo_page_init() methods to add a
- * per-layer state to the page. New state is added at the end of
- * cl_page::cp_layers list, that is, it is at the bottom of the stack.
- *
- * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
- */
-void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj, pgoff_t index,
- const struct cl_page_operations *ops)
-{
- list_add_tail(&slice->cpl_linkage, &page->cp_layers);
- slice->cpl_obj = obj;
- slice->cpl_index = index;
- slice->cpl_ops = ops;
- slice->cpl_page = page;
-}
-EXPORT_SYMBOL(cl_page_slice_add);
-
-/**
- * Allocate and initialize cl_cache, called by ll_init_sbi().
- */
-struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
-{
- struct cl_client_cache *cache = NULL;
-
- cache = kzalloc(sizeof(*cache), GFP_KERNEL);
- if (!cache)
- return NULL;
-
- /* Initialize cache data */
- atomic_set(&cache->ccc_users, 1);
- cache->ccc_lru_max = lru_page_max;
- atomic_long_set(&cache->ccc_lru_left, lru_page_max);
- spin_lock_init(&cache->ccc_lru_lock);
- INIT_LIST_HEAD(&cache->ccc_lru);
-
- atomic_long_set(&cache->ccc_unstable_nr, 0);
- init_waitqueue_head(&cache->ccc_unstable_waitq);
-
- return cache;
-}
-EXPORT_SYMBOL(cl_cache_init);
-
-/**
- * Increase cl_cache refcount
- */
-void cl_cache_incref(struct cl_client_cache *cache)
-{
- atomic_inc(&cache->ccc_users);
-}
-EXPORT_SYMBOL(cl_cache_incref);
-
-/**
- * Decrease cl_cache refcount and free the cache if refcount=0.
- * Since llite, lov and osc all hold cl_cache refcount,
- * the free will not cause race. (LU-6173)
- */
-void cl_cache_decref(struct cl_client_cache *cache)
-{
- if (atomic_dec_and_test(&cache->ccc_users))
- kfree(cache);
-}
-EXPORT_SYMBOL(cl_cache_decref);
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
deleted file mode 100644
index 3e24b76f6301..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ /dev/null
@@ -1,535 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-# include <linux/atomic.h>
-
-#include <obd_support.h>
-#include <obd_class.h>
-#include <uapi/linux/lnet/lnetctl.h>
-#include <lustre_debug.h>
-#include <lprocfs_status.h>
-#include <linux/list.h>
-#include <cl_object.h>
-#include <uapi/linux/lustre/lustre_ioctl.h>
-#include "llog_internal.h"
-
-struct obd_device *obd_devs[MAX_OBD_DEVICES];
-struct list_head obd_types;
-DEFINE_RWLOCK(obd_dev_lock);
-
-/* The following are visible and mutable through /sys/fs/lustre. */
-unsigned int obd_debug_peer_on_timeout;
-EXPORT_SYMBOL(obd_debug_peer_on_timeout);
-unsigned int obd_dump_on_timeout;
-EXPORT_SYMBOL(obd_dump_on_timeout);
-unsigned int obd_dump_on_eviction;
-EXPORT_SYMBOL(obd_dump_on_eviction);
-unsigned long obd_max_dirty_pages;
-EXPORT_SYMBOL(obd_max_dirty_pages);
-atomic_long_t obd_dirty_pages;
-EXPORT_SYMBOL(obd_dirty_pages);
-unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
-EXPORT_SYMBOL(obd_timeout);
-unsigned int obd_timeout_set;
-EXPORT_SYMBOL(obd_timeout_set);
-/* Adaptive timeout defs here instead of ptlrpc module for /sys/fs/ access */
-unsigned int at_min;
-EXPORT_SYMBOL(at_min);
-unsigned int at_max = 600;
-EXPORT_SYMBOL(at_max);
-unsigned int at_history = 600;
-EXPORT_SYMBOL(at_history);
-int at_early_margin = 5;
-EXPORT_SYMBOL(at_early_margin);
-int at_extra = 30;
-EXPORT_SYMBOL(at_extra);
-
-atomic_long_t obd_dirty_transit_pages;
-EXPORT_SYMBOL(obd_dirty_transit_pages);
-
-char obd_jobid_var[JOBSTATS_JOBID_VAR_MAX_LEN + 1] = JOBSTATS_DISABLE;
-char obd_jobid_node[LUSTRE_JOBID_SIZE + 1];
-
-/* Get jobid of current process from stored variable or calculate
- * it from pid and user_id.
- *
- * Historically this was also done by reading the environment variable
- * stored in between the "env_start" & "env_end" of task struct.
- * This is now deprecated.
- */
-int lustre_get_jobid(char *jobid)
-{
- memset(jobid, 0, LUSTRE_JOBID_SIZE);
- /* Jobstats isn't enabled */
- if (strcmp(obd_jobid_var, JOBSTATS_DISABLE) == 0)
- return 0;
-
- /* Use process name + fsuid as jobid */
- if (strcmp(obd_jobid_var, JOBSTATS_PROCNAME_UID) == 0) {
- snprintf(jobid, LUSTRE_JOBID_SIZE, "%s.%u",
- current_comm(),
- from_kuid(&init_user_ns, current_fsuid()));
- return 0;
- }
-
- /* Whole node dedicated to single job */
- if (strcmp(obd_jobid_var, JOBSTATS_NODELOCAL) == 0) {
- strcpy(jobid, obd_jobid_node);
- return 0;
- }
-
- return -ENOENT;
-}
-EXPORT_SYMBOL(lustre_get_jobid);
-
-static int class_resolve_dev_name(__u32 len, const char *name)
-{
- int rc;
- int dev;
-
- if (!len || !name) {
- CERROR("No name passed,!\n");
- rc = -EINVAL;
- goto out;
- }
- if (name[len - 1] != 0) {
- CERROR("Name not nul terminated!\n");
- rc = -EINVAL;
- goto out;
- }
-
- CDEBUG(D_IOCTL, "device name %s\n", name);
- dev = class_name2dev(name);
- if (dev == -1) {
- CDEBUG(D_IOCTL, "No device for name %s!\n", name);
- rc = -EINVAL;
- goto out;
- }
-
- CDEBUG(D_IOCTL, "device name %s, dev %d\n", name, dev);
- rc = dev;
-
-out:
- return rc;
-}
-
-int class_handle_ioctl(unsigned int cmd, unsigned long arg)
-{
- char *buf = NULL;
- struct obd_ioctl_data *data;
- struct libcfs_debug_ioctl_data *debug_data;
- struct obd_device *obd = NULL;
- int err = 0, len = 0;
-
- /* only for debugging */
- if (cmd == LIBCFS_IOC_DEBUG_MASK) {
- debug_data = (struct libcfs_debug_ioctl_data *)arg;
- libcfs_subsystem_debug = debug_data->subs;
- libcfs_debug = debug_data->debug;
- return 0;
- }
-
- CDEBUG(D_IOCTL, "cmd = %x\n", cmd);
- if (obd_ioctl_getdata(&buf, &len, (void __user *)arg)) {
- CERROR("OBD ioctl: data error\n");
- return -EINVAL;
- }
- data = (struct obd_ioctl_data *)buf;
-
- switch (cmd) {
- case OBD_IOC_PROCESS_CFG: {
- struct lustre_cfg *lcfg;
-
- if (!data->ioc_plen1 || !data->ioc_pbuf1) {
- CERROR("No config buffer passed!\n");
- err = -EINVAL;
- goto out;
- }
- lcfg = kzalloc(data->ioc_plen1, GFP_NOFS);
- if (!lcfg) {
- err = -ENOMEM;
- goto out;
- }
- if (copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
- err = -EFAULT;
- if (!err)
- err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1);
- if (!err)
- err = class_process_config(lcfg);
-
- kfree(lcfg);
- goto out;
- }
-
- case OBD_GET_VERSION:
- if (!data->ioc_inlbuf1) {
- CERROR("No buffer passed in ioctl\n");
- err = -EINVAL;
- goto out;
- }
-
- if (strlen(LUSTRE_VERSION_STRING) + 1 > data->ioc_inllen1) {
- CERROR("ioctl buffer too small to hold version\n");
- err = -EINVAL;
- goto out;
- }
-
- memcpy(data->ioc_bulk, LUSTRE_VERSION_STRING,
- strlen(LUSTRE_VERSION_STRING) + 1);
-
- if (copy_to_user((void __user *)arg, data, len))
- err = -EFAULT;
- goto out;
-
- case OBD_IOC_NAME2DEV: {
- /* Resolve a device name. This does not change the
- * currently selected device.
- */
- int dev;
-
- dev = class_resolve_dev_name(data->ioc_inllen1,
- data->ioc_inlbuf1);
- data->ioc_dev = dev;
- if (dev < 0) {
- err = -EINVAL;
- goto out;
- }
-
- if (copy_to_user((void __user *)arg, data, sizeof(*data)))
- err = -EFAULT;
- goto out;
- }
-
- case OBD_IOC_UUID2DEV: {
- /* Resolve a device uuid. This does not change the
- * currently selected device.
- */
- int dev;
- struct obd_uuid uuid;
-
- if (!data->ioc_inllen1 || !data->ioc_inlbuf1) {
- CERROR("No UUID passed!\n");
- err = -EINVAL;
- goto out;
- }
- if (data->ioc_inlbuf1[data->ioc_inllen1 - 1] != 0) {
- CERROR("UUID not NUL terminated!\n");
- err = -EINVAL;
- goto out;
- }
-
- CDEBUG(D_IOCTL, "device name %s\n", data->ioc_inlbuf1);
- obd_str2uuid(&uuid, data->ioc_inlbuf1);
- dev = class_uuid2dev(&uuid);
- data->ioc_dev = dev;
- if (dev == -1) {
- CDEBUG(D_IOCTL, "No device for UUID %s!\n",
- data->ioc_inlbuf1);
- err = -EINVAL;
- goto out;
- }
-
- CDEBUG(D_IOCTL, "device name %s, dev %d\n", data->ioc_inlbuf1,
- dev);
-
- if (copy_to_user((void __user *)arg, data, sizeof(*data)))
- err = -EFAULT;
- goto out;
- }
-
- case OBD_IOC_GETDEVICE: {
- int index = data->ioc_count;
- char *status, *str;
-
- if (!data->ioc_inlbuf1) {
- CERROR("No buffer passed in ioctl\n");
- err = -EINVAL;
- goto out;
- }
- if (data->ioc_inllen1 < 128) {
- CERROR("ioctl buffer too small to hold version\n");
- err = -EINVAL;
- goto out;
- }
-
- obd = class_num2obd(index);
- if (!obd) {
- err = -ENOENT;
- goto out;
- }
-
- if (obd->obd_stopping)
- status = "ST";
- else if (obd->obd_set_up)
- status = "UP";
- else if (obd->obd_attached)
- status = "AT";
- else
- status = "--";
- str = (char *)data->ioc_bulk;
- snprintf(str, len - sizeof(*data), "%3d %s %s %s %s %d",
- (int)index, status, obd->obd_type->typ_name,
- obd->obd_name, obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
-
- if (copy_to_user((void __user *)arg, data, len))
- err = -EFAULT;
- goto out;
- }
- }
-
- if (data->ioc_dev == OBD_DEV_BY_DEVNAME) {
- if (data->ioc_inllen4 <= 0 || !data->ioc_inlbuf4) {
- err = -EINVAL;
- goto out;
- }
- if (strnlen(data->ioc_inlbuf4, MAX_OBD_NAME) >= MAX_OBD_NAME) {
- err = -EINVAL;
- goto out;
- }
- obd = class_name2obd(data->ioc_inlbuf4);
- } else if (data->ioc_dev < class_devno_max()) {
- obd = class_num2obd(data->ioc_dev);
- } else {
- CERROR("OBD ioctl: No device\n");
- err = -EINVAL;
- goto out;
- }
-
- if (!obd) {
- CERROR("OBD ioctl : No Device %d\n", data->ioc_dev);
- err = -EINVAL;
- goto out;
- }
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
-
- if (!obd->obd_set_up || obd->obd_stopping) {
- CERROR("OBD ioctl: device not setup %d\n", data->ioc_dev);
- err = -EINVAL;
- goto out;
- }
-
- switch (cmd) {
- case OBD_IOC_NO_TRANSNO: {
- if (!obd->obd_attached) {
- CERROR("Device %d not attached\n", obd->obd_minor);
- err = -ENODEV;
- goto out;
- }
- CDEBUG(D_HA, "%s: disabling committed-transno notification\n",
- obd->obd_name);
- obd->obd_no_transno = 1;
- err = 0;
- goto out;
- }
-
- default: {
- err = obd_iocontrol(cmd, obd->obd_self_export, len, data, NULL);
- if (err)
- goto out;
-
- if (copy_to_user((void __user *)arg, data, len))
- err = -EFAULT;
- goto out;
- }
- }
-
- out:
- kvfree(buf);
- return err;
-} /* class_handle_ioctl */
-
-#define OBD_INIT_CHECK
-static int obd_init_checks(void)
-{
- __u64 u64val, div64val;
- char buf[64];
- int len, ret = 0;
-
- CDEBUG(D_INFO, "LPU64=%s, LPD64=%s, LPX64=%s\n", "%llu", "%lld",
- "%#llx");
-
- CDEBUG(D_INFO, "OBD_OBJECT_EOF = %#llx\n", (__u64)OBD_OBJECT_EOF);
-
- u64val = OBD_OBJECT_EOF;
- CDEBUG(D_INFO, "u64val OBD_OBJECT_EOF = %#llx\n", u64val);
- if (u64val != OBD_OBJECT_EOF) {
- CERROR("__u64 %#llx(%d) != 0xffffffffffffffff\n",
- u64val, (int)sizeof(u64val));
- ret = -EINVAL;
- }
- len = snprintf(buf, sizeof(buf), "%#llx", u64val);
- if (len != 18) {
- CWARN("LPX64 wrong length! strlen(%s)=%d != 18\n", buf, len);
- ret = -EINVAL;
- }
-
- div64val = OBD_OBJECT_EOF;
- CDEBUG(D_INFO, "u64val OBD_OBJECT_EOF = %#llx\n", u64val);
- if (u64val != OBD_OBJECT_EOF) {
- CERROR("__u64 %#llx(%d) != 0xffffffffffffffff\n",
- u64val, (int)sizeof(u64val));
- ret = -EOVERFLOW;
- }
- if (u64val >> 8 != OBD_OBJECT_EOF >> 8) {
- CERROR("__u64 %#llx(%d) != 0xffffffffffffffff\n",
- u64val, (int)sizeof(u64val));
- return -EOVERFLOW;
- }
- if (do_div(div64val, 256) != (u64val & 255)) {
- CERROR("do_div(%#llx,256) != %llu\n", u64val, u64val & 255);
- return -EOVERFLOW;
- }
- if (u64val >> 8 != div64val) {
- CERROR("do_div(%#llx,256) %llu != %llu\n",
- u64val, div64val, u64val >> 8);
- return -EOVERFLOW;
- }
- len = snprintf(buf, sizeof(buf), "%#llx", u64val);
- if (len != 18) {
- CWARN("LPX64 wrong length! strlen(%s)=%d != 18\n", buf, len);
- ret = -EINVAL;
- }
- len = snprintf(buf, sizeof(buf), "%llu", u64val);
- if (len != 20) {
- CWARN("LPU64 wrong length! strlen(%s)=%d != 20\n", buf, len);
- ret = -EINVAL;
- }
- len = snprintf(buf, sizeof(buf), "%lld", u64val);
- if (len != 2) {
- CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
- ret = -EINVAL;
- }
- if ((u64val & ~PAGE_MASK) >= PAGE_SIZE) {
- CWARN("mask failed: u64val %llu >= %llu\n", u64val,
- (__u64)PAGE_SIZE);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int __init obdclass_init(void)
-{
- int i, err;
-
- LCONSOLE_INFO("Lustre: Build Version: " LUSTRE_VERSION_STRING "\n");
-
- spin_lock_init(&obd_types_lock);
- obd_zombie_impexp_init();
-
- err = obd_init_checks();
- if (err)
- return err;
-
- class_init_uuidlist();
- err = class_handle_init();
- if (err)
- return err;
-
- INIT_LIST_HEAD(&obd_types);
-
- err = misc_register(&obd_psdev);
- if (err) {
- CERROR("cannot register %d err %d\n", OBD_DEV_MINOR, err);
- return err;
- }
-
- /* This struct is already zeroed for us (static global) */
- for (i = 0; i < class_devno_max(); i++)
- obd_devs[i] = NULL;
-
- /* Default the dirty page cache cap to 1/2 of system memory.
- * For clients with less memory, a larger fraction is needed
- * for other purposes (mostly for BGL).
- */
- if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
- obd_max_dirty_pages = totalram_pages / 4;
- else
- obd_max_dirty_pages = totalram_pages / 2;
-
- err = obd_init_caches();
- if (err)
- return err;
-
- err = class_procfs_init();
- if (err)
- return err;
-
- err = obd_sysctl_init();
- if (err)
- return err;
-
- err = lu_global_init();
- if (err)
- return err;
-
- err = cl_global_init();
- if (err != 0)
- return err;
-
- err = llog_info_init();
- if (err)
- return err;
-
- err = lustre_register_fs();
-
- return err;
-}
-
-static void obdclass_exit(void)
-{
- lustre_unregister_fs();
-
- misc_deregister(&obd_psdev);
- llog_info_fini();
- cl_global_fini();
- lu_global_fini();
-
- obd_cleanup_caches();
-
- class_procfs_clean();
-
- class_handle_cleanup();
- class_exit_uuidlist();
- obd_zombie_impexp_stop();
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Class Driver");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(obdclass_init);
-module_exit(obdclass_exit);
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
deleted file mode 100644
index 2156a82a613a..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ /dev/null
@@ -1,96 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/debug.c
- *
- * Helper routines for dumping data structs for debugging.
- */
-
-#define DEBUG_SUBSYSTEM D_OTHER
-
-#include <asm/unaligned.h>
-
-#include <obd_support.h>
-#include <lustre_debug.h>
-#include <lustre_net.h>
-
-#define LPDS sizeof(__u64)
-int block_debug_setup(void *addr, int len, __u64 off, __u64 id)
-{
- LASSERT(addr);
-
- put_unaligned_le64(off, addr);
- put_unaligned_le64(id, addr + LPDS);
- addr += len - LPDS - LPDS;
- put_unaligned_le64(off, addr);
- put_unaligned_le64(id, addr + LPDS);
-
- return 0;
-}
-EXPORT_SYMBOL(block_debug_setup);
-
-int block_debug_check(char *who, void *addr, int end, __u64 off, __u64 id)
-{
- __u64 ne_off;
- int err = 0;
-
- LASSERT(addr);
-
- ne_off = le64_to_cpu(off);
- id = le64_to_cpu(id);
- if (memcmp(addr, (char *)&ne_off, LPDS)) {
- CDEBUG(D_ERROR, "%s: id %#llx offset %llu off: %#llx != %#llx\n",
- who, id, off, *(__u64 *)addr, ne_off);
- err = -EINVAL;
- }
- if (memcmp(addr + LPDS, (char *)&id, LPDS)) {
- CDEBUG(D_ERROR, "%s: id %#llx offset %llu id: %#llx != %#llx\n",
- who, id, off, *(__u64 *)(addr + LPDS), id);
- err = -EINVAL;
- }
-
- addr += end - LPDS - LPDS;
- if (memcmp(addr, (char *)&ne_off, LPDS)) {
- CDEBUG(D_ERROR, "%s: id %#llx offset %llu end off: %#llx != %#llx\n",
- who, id, off, *(__u64 *)addr, ne_off);
- err = -EINVAL;
- }
- if (memcmp(addr + LPDS, (char *)&id, LPDS)) {
- CDEBUG(D_ERROR, "%s: id %#llx offset %llu end id: %#llx != %#llx\n",
- who, id, off, *(__u64 *)(addr + LPDS), id);
- err = -EINVAL;
- }
-
- return err;
-}
-EXPORT_SYMBOL(block_debug_check);
-#undef LPDS
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
deleted file mode 100644
index 63ccbabb4c5a..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ /dev/null
@@ -1,1514 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/genops.c
- *
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-#include <obd_class.h>
-#include <lprocfs_status.h>
-#include <lustre_kernelcomm.h>
-
-spinlock_t obd_types_lock;
-
-static struct kmem_cache *obd_device_cachep;
-struct kmem_cache *obdo_cachep;
-EXPORT_SYMBOL(obdo_cachep);
-static struct kmem_cache *import_cachep;
-
-static struct workqueue_struct *zombie_wq;
-static void obd_zombie_export_add(struct obd_export *exp);
-static void obd_zombie_import_add(struct obd_import *imp);
-
-int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c);
-EXPORT_SYMBOL(ptlrpc_put_connection_superhack);
-
-/*
- * support functions: we could use inter-module communication, but this
- * is more portable to other OS's
- */
-static struct obd_device *obd_device_alloc(void)
-{
- struct obd_device *obd;
-
- obd = kmem_cache_zalloc(obd_device_cachep, GFP_NOFS);
- if (obd)
- obd->obd_magic = OBD_DEVICE_MAGIC;
- return obd;
-}
-
-static void obd_device_free(struct obd_device *obd)
-{
- LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08x != %08x\n",
- obd, obd->obd_magic, OBD_DEVICE_MAGIC);
- if (obd->obd_namespace) {
- CERROR("obd %p: namespace %p was not properly cleaned up (obd_force=%d)!\n",
- obd, obd->obd_namespace, obd->obd_force);
- LBUG();
- }
- lu_ref_fini(&obd->obd_reference);
- kmem_cache_free(obd_device_cachep, obd);
-}
-
-static struct obd_type *class_search_type(const char *name)
-{
- struct list_head *tmp;
- struct obd_type *type;
-
- spin_lock(&obd_types_lock);
- list_for_each(tmp, &obd_types) {
- type = list_entry(tmp, struct obd_type, typ_chain);
- if (strcmp(type->typ_name, name) == 0) {
- spin_unlock(&obd_types_lock);
- return type;
- }
- }
- spin_unlock(&obd_types_lock);
- return NULL;
-}
-
-static struct obd_type *class_get_type(const char *name)
-{
- struct obd_type *type = class_search_type(name);
-
- if (!type) {
- const char *modname = name;
-
- if (!request_module("%s", modname)) {
- CDEBUG(D_INFO, "Loaded module '%s'\n", modname);
- type = class_search_type(name);
- } else {
- LCONSOLE_ERROR_MSG(0x158, "Can't load module '%s'\n",
- modname);
- }
- }
- if (type) {
- spin_lock(&type->obd_type_lock);
- type->typ_refcnt++;
- try_module_get(type->typ_dt_ops->owner);
- spin_unlock(&type->obd_type_lock);
- }
- return type;
-}
-
-void class_put_type(struct obd_type *type)
-{
- LASSERT(type);
- spin_lock(&type->obd_type_lock);
- type->typ_refcnt--;
- module_put(type->typ_dt_ops->owner);
- spin_unlock(&type->obd_type_lock);
-}
-
-#define CLASS_MAX_NAME 1024
-
-int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
- const char *name,
- struct lu_device_type *ldt)
-{
- struct obd_type *type;
- int rc;
-
- /* sanity check */
- LASSERT(strnlen(name, CLASS_MAX_NAME) < CLASS_MAX_NAME);
-
- if (class_search_type(name)) {
- CDEBUG(D_IOCTL, "Type %s already registered\n", name);
- return -EEXIST;
- }
-
- rc = -ENOMEM;
- type = kzalloc(sizeof(*type), GFP_NOFS);
- if (!type)
- return rc;
-
- type->typ_dt_ops = kzalloc(sizeof(*type->typ_dt_ops), GFP_NOFS);
- type->typ_md_ops = kzalloc(sizeof(*type->typ_md_ops), GFP_NOFS);
- type->typ_name = kzalloc(strlen(name) + 1, GFP_NOFS);
-
- if (!type->typ_dt_ops ||
- !type->typ_md_ops ||
- !type->typ_name)
- goto failed;
-
- *type->typ_dt_ops = *dt_ops;
- /* md_ops is optional */
- if (md_ops)
- *type->typ_md_ops = *md_ops;
- strcpy(type->typ_name, name);
- spin_lock_init(&type->obd_type_lock);
-
- type->typ_debugfs_entry = ldebugfs_register(type->typ_name,
- debugfs_lustre_root,
- NULL, type);
- if (IS_ERR_OR_NULL(type->typ_debugfs_entry)) {
- rc = type->typ_debugfs_entry ? PTR_ERR(type->typ_debugfs_entry)
- : -ENOMEM;
- type->typ_debugfs_entry = NULL;
- goto failed;
- }
-
- type->typ_kobj = kobject_create_and_add(type->typ_name, lustre_kobj);
- if (!type->typ_kobj) {
- rc = -ENOMEM;
- goto failed;
- }
-
- if (ldt) {
- type->typ_lu = ldt;
- rc = lu_device_type_init(ldt);
- if (rc != 0)
- goto failed;
- }
-
- spin_lock(&obd_types_lock);
- list_add(&type->typ_chain, &obd_types);
- spin_unlock(&obd_types_lock);
-
- return 0;
-
- failed:
- if (type->typ_kobj)
- kobject_put(type->typ_kobj);
- kfree(type->typ_name);
- kfree(type->typ_md_ops);
- kfree(type->typ_dt_ops);
- kfree(type);
- return rc;
-}
-EXPORT_SYMBOL(class_register_type);
-
-int class_unregister_type(const char *name)
-{
- struct obd_type *type = class_search_type(name);
-
- if (!type) {
- CERROR("unknown obd type\n");
- return -EINVAL;
- }
-
- if (type->typ_refcnt) {
- CERROR("type %s has refcount (%d)\n", name, type->typ_refcnt);
- /* This is a bad situation, let's make the best of it */
- /* Remove ops, but leave the name for debugging */
- kfree(type->typ_dt_ops);
- kfree(type->typ_md_ops);
- return -EBUSY;
- }
-
- if (type->typ_kobj)
- kobject_put(type->typ_kobj);
-
- if (!IS_ERR_OR_NULL(type->typ_debugfs_entry))
- ldebugfs_remove(&type->typ_debugfs_entry);
-
- if (type->typ_lu)
- lu_device_type_fini(type->typ_lu);
-
- spin_lock(&obd_types_lock);
- list_del(&type->typ_chain);
- spin_unlock(&obd_types_lock);
- kfree(type->typ_name);
- kfree(type->typ_dt_ops);
- kfree(type->typ_md_ops);
- kfree(type);
- return 0;
-} /* class_unregister_type */
-EXPORT_SYMBOL(class_unregister_type);
-
-/**
- * Create a new obd device.
- *
- * Find an empty slot in ::obd_devs[], create a new obd device in it.
- *
- * \param[in] type_name obd device type string.
- * \param[in] name obd device name.
- *
- * \retval NULL if create fails, otherwise return the obd device
- * pointer created.
- */
-struct obd_device *class_newdev(const char *type_name, const char *name)
-{
- struct obd_device *result = NULL;
- struct obd_device *newdev;
- struct obd_type *type = NULL;
- int i;
- int new_obd_minor = 0;
-
- if (strlen(name) >= MAX_OBD_NAME) {
- CERROR("name/uuid must be < %u bytes long\n", MAX_OBD_NAME);
- return ERR_PTR(-EINVAL);
- }
-
- type = class_get_type(type_name);
- if (!type) {
- CERROR("OBD: unknown type: %s\n", type_name);
- return ERR_PTR(-ENODEV);
- }
-
- newdev = obd_device_alloc();
- if (!newdev) {
- result = ERR_PTR(-ENOMEM);
- goto out_type;
- }
-
- LASSERT(newdev->obd_magic == OBD_DEVICE_MAGIC);
-
- write_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
-
- if (obd && (strcmp(name, obd->obd_name) == 0)) {
- CERROR("Device %s already exists at %d, won't add\n",
- name, i);
- if (result) {
- LASSERTF(result->obd_magic == OBD_DEVICE_MAGIC,
- "%p obd_magic %08x != %08x\n", result,
- result->obd_magic, OBD_DEVICE_MAGIC);
- LASSERTF(result->obd_minor == new_obd_minor,
- "%p obd_minor %d != %d\n", result,
- result->obd_minor, new_obd_minor);
-
- obd_devs[result->obd_minor] = NULL;
- result->obd_name[0] = '\0';
- }
- result = ERR_PTR(-EEXIST);
- break;
- }
- if (!result && !obd) {
- result = newdev;
- result->obd_minor = i;
- new_obd_minor = i;
- result->obd_type = type;
- strncpy(result->obd_name, name,
- sizeof(result->obd_name) - 1);
- obd_devs[i] = result;
- }
- }
- write_unlock(&obd_dev_lock);
-
- if (!result && i >= class_devno_max()) {
- CERROR("all %u OBD devices used, increase MAX_OBD_DEVICES\n",
- class_devno_max());
- result = ERR_PTR(-EOVERFLOW);
- goto out;
- }
-
- if (IS_ERR(result))
- goto out;
-
- CDEBUG(D_IOCTL, "Adding new device %s (%p)\n",
- result->obd_name, result);
-
- return result;
-out:
- obd_device_free(newdev);
-out_type:
- class_put_type(type);
- return result;
-}
-
-void class_release_dev(struct obd_device *obd)
-{
- struct obd_type *obd_type = obd->obd_type;
-
- LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "%p obd_magic %08x != %08x\n",
- obd, obd->obd_magic, OBD_DEVICE_MAGIC);
- LASSERTF(obd == obd_devs[obd->obd_minor], "obd %p != obd_devs[%d] %p\n",
- obd, obd->obd_minor, obd_devs[obd->obd_minor]);
- LASSERT(obd_type);
-
- CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n",
- obd->obd_name, obd->obd_minor, obd->obd_type->typ_name);
-
- write_lock(&obd_dev_lock);
- obd_devs[obd->obd_minor] = NULL;
- write_unlock(&obd_dev_lock);
- obd_device_free(obd);
-
- class_put_type(obd_type);
-}
-
-int class_name2dev(const char *name)
-{
- int i;
-
- if (!name)
- return -1;
-
- read_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
-
- if (obd && strcmp(name, obd->obd_name) == 0) {
- /* Make sure we finished attaching before we give
- * out any references
- */
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- if (obd->obd_attached) {
- read_unlock(&obd_dev_lock);
- return i;
- }
- break;
- }
- }
- read_unlock(&obd_dev_lock);
-
- return -1;
-}
-
-struct obd_device *class_name2obd(const char *name)
-{
- int dev = class_name2dev(name);
-
- if (dev < 0 || dev > class_devno_max())
- return NULL;
- return class_num2obd(dev);
-}
-EXPORT_SYMBOL(class_name2obd);
-
-int class_uuid2dev(struct obd_uuid *uuid)
-{
- int i;
-
- read_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
-
- if (obd && obd_uuid_equals(uuid, &obd->obd_uuid)) {
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- read_unlock(&obd_dev_lock);
- return i;
- }
- }
- read_unlock(&obd_dev_lock);
-
- return -1;
-}
-
-/**
- * Get obd device from ::obd_devs[]
- *
- * \param num [in] array index
- *
- * \retval NULL if ::obd_devs[\a num] does not contains an obd device
- * otherwise return the obd device there.
- */
-struct obd_device *class_num2obd(int num)
-{
- struct obd_device *obd = NULL;
-
- if (num < class_devno_max()) {
- obd = obd_devs[num];
- if (!obd)
- return NULL;
-
- LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
- "%p obd_magic %08x != %08x\n",
- obd, obd->obd_magic, OBD_DEVICE_MAGIC);
- LASSERTF(obd->obd_minor == num,
- "%p obd_minor %0d != %0d\n",
- obd, obd->obd_minor, num);
- }
-
- return obd;
-}
-
-/* Search for a client OBD connected to tgt_uuid. If grp_uuid is
- * specified, then only the client with that uuid is returned,
- * otherwise any client connected to the tgt is returned.
- */
-struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
- const char *typ_name,
- struct obd_uuid *grp_uuid)
-{
- int i;
-
- read_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
-
- if (!obd)
- continue;
- if ((strncmp(obd->obd_type->typ_name, typ_name,
- strlen(typ_name)) == 0)) {
- if (obd_uuid_equals(tgt_uuid,
- &obd->u.cli.cl_target_uuid) &&
- ((grp_uuid) ? obd_uuid_equals(grp_uuid,
- &obd->obd_uuid) : 1)) {
- read_unlock(&obd_dev_lock);
- return obd;
- }
- }
- }
- read_unlock(&obd_dev_lock);
-
- return NULL;
-}
-EXPORT_SYMBOL(class_find_client_obd);
-
-/* Iterate the obd_device list looking devices have grp_uuid. Start
- * searching at *next, and if a device is found, the next index to look
- * at is saved in *next. If next is NULL, then the first matching device
- * will always be returned.
- */
-struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
-{
- int i;
-
- if (!next)
- i = 0;
- else if (*next >= 0 && *next < class_devno_max())
- i = *next;
- else
- return NULL;
-
- read_lock(&obd_dev_lock);
- for (; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
-
- if (!obd)
- continue;
- if (obd_uuid_equals(grp_uuid, &obd->obd_uuid)) {
- if (next)
- *next = i + 1;
- read_unlock(&obd_dev_lock);
- return obd;
- }
- }
- read_unlock(&obd_dev_lock);
-
- return NULL;
-}
-EXPORT_SYMBOL(class_devices_in_group);
-
-/**
- * to notify sptlrpc log for \a fsname has changed, let every relevant OBD
- * adjust sptlrpc settings accordingly.
- */
-int class_notify_sptlrpc_conf(const char *fsname, int namelen)
-{
- struct obd_device *obd;
- const char *type;
- int i, rc = 0, rc2;
-
- LASSERT(namelen > 0);
-
- read_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- obd = class_num2obd(i);
-
- if (!obd || obd->obd_set_up == 0 || obd->obd_stopping)
- continue;
-
- /* only notify mdc, osc, mdt, ost */
- type = obd->obd_type->typ_name;
- if (strcmp(type, LUSTRE_MDC_NAME) != 0 &&
- strcmp(type, LUSTRE_OSC_NAME) != 0 &&
- strcmp(type, LUSTRE_MDT_NAME) != 0 &&
- strcmp(type, LUSTRE_OST_NAME) != 0)
- continue;
-
- if (strncmp(obd->obd_name, fsname, namelen))
- continue;
-
- class_incref(obd, __func__, obd);
- read_unlock(&obd_dev_lock);
- rc2 = obd_set_info_async(NULL, obd->obd_self_export,
- sizeof(KEY_SPTLRPC_CONF),
- KEY_SPTLRPC_CONF, 0, NULL, NULL);
- rc = rc ? rc : rc2;
- class_decref(obd, __func__, obd);
- read_lock(&obd_dev_lock);
- }
- read_unlock(&obd_dev_lock);
- return rc;
-}
-EXPORT_SYMBOL(class_notify_sptlrpc_conf);
-
-void obd_cleanup_caches(void)
-{
- kmem_cache_destroy(obd_device_cachep);
- obd_device_cachep = NULL;
- kmem_cache_destroy(obdo_cachep);
- obdo_cachep = NULL;
- kmem_cache_destroy(import_cachep);
- import_cachep = NULL;
-}
-
-int obd_init_caches(void)
-{
- LASSERT(!obd_device_cachep);
- obd_device_cachep = kmem_cache_create("ll_obd_dev_cache",
- sizeof(struct obd_device),
- 0, 0, NULL);
- if (!obd_device_cachep)
- goto out;
-
- LASSERT(!obdo_cachep);
- obdo_cachep = kmem_cache_create("ll_obdo_cache", sizeof(struct obdo),
- 0, 0, NULL);
- if (!obdo_cachep)
- goto out;
-
- LASSERT(!import_cachep);
- import_cachep = kmem_cache_create("ll_import_cache",
- sizeof(struct obd_import),
- 0, 0, NULL);
- if (!import_cachep)
- goto out;
-
- return 0;
- out:
- obd_cleanup_caches();
- return -ENOMEM;
-}
-
-/* map connection to client */
-struct obd_export *class_conn2export(struct lustre_handle *conn)
-{
- struct obd_export *export;
-
- if (!conn) {
- CDEBUG(D_CACHE, "looking for null handle\n");
- return NULL;
- }
-
- if (conn->cookie == -1) { /* this means assign a new connection */
- CDEBUG(D_CACHE, "want a new connection\n");
- return NULL;
- }
-
- CDEBUG(D_INFO, "looking for export cookie %#llx\n", conn->cookie);
- export = class_handle2object(conn->cookie, NULL);
- return export;
-}
-EXPORT_SYMBOL(class_conn2export);
-
-struct obd_device *class_exp2obd(struct obd_export *exp)
-{
- if (exp)
- return exp->exp_obd;
- return NULL;
-}
-EXPORT_SYMBOL(class_exp2obd);
-
-struct obd_import *class_exp2cliimp(struct obd_export *exp)
-{
- struct obd_device *obd = exp->exp_obd;
-
- if (!obd)
- return NULL;
- return obd->u.cli.cl_import;
-}
-EXPORT_SYMBOL(class_exp2cliimp);
-
-/* Export management functions */
-static void class_export_destroy(struct obd_export *exp)
-{
- struct obd_device *obd = exp->exp_obd;
-
- LASSERT_ATOMIC_ZERO(&exp->exp_refcount);
- LASSERT(obd);
-
- CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp,
- exp->exp_client_uuid.uuid, obd->obd_name);
-
- /* "Local" exports (lctl, LOV->{mdc,osc}) have no connection. */
- if (exp->exp_connection)
- ptlrpc_put_connection_superhack(exp->exp_connection);
-
- LASSERT(list_empty(&exp->exp_outstanding_replies));
- LASSERT(list_empty(&exp->exp_uncommitted_replies));
- LASSERT(list_empty(&exp->exp_req_replay_queue));
- LASSERT(list_empty(&exp->exp_hp_rpcs));
- obd_destroy_export(exp);
- class_decref(obd, "export", exp);
-
- OBD_FREE_RCU(exp, sizeof(*exp), &exp->exp_handle);
-}
-
-static void export_handle_addref(void *export)
-{
- class_export_get(export);
-}
-
-static struct portals_handle_ops export_handle_ops = {
- .hop_addref = export_handle_addref,
- .hop_free = NULL,
-};
-
-struct obd_export *class_export_get(struct obd_export *exp)
-{
- atomic_inc(&exp->exp_refcount);
- CDEBUG(D_INFO, "GETting export %p : new refcount %d\n", exp,
- atomic_read(&exp->exp_refcount));
- return exp;
-}
-EXPORT_SYMBOL(class_export_get);
-
-void class_export_put(struct obd_export *exp)
-{
- LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON);
- CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp,
- atomic_read(&exp->exp_refcount) - 1);
-
- if (atomic_dec_and_test(&exp->exp_refcount)) {
- LASSERT(!list_empty(&exp->exp_obd_chain));
- CDEBUG(D_IOCTL, "final put %p/%s\n",
- exp, exp->exp_client_uuid.uuid);
-
- /* release nid stat refererence */
- lprocfs_exp_cleanup(exp);
-
- obd_zombie_export_add(exp);
- }
-}
-EXPORT_SYMBOL(class_export_put);
-
-static void obd_zombie_exp_cull(struct work_struct *ws)
-{
- struct obd_export *export = container_of(ws, struct obd_export, exp_zombie_work);
-
- class_export_destroy(export);
-}
-
-/* Creates a new export, adds it to the hash table, and returns a
- * pointer to it. The refcount is 2: one for the hash reference, and
- * one for the pointer returned by this function.
- */
-struct obd_export *class_new_export(struct obd_device *obd,
- struct obd_uuid *cluuid)
-{
- struct obd_export *export;
- struct cfs_hash *hash = NULL;
- int rc = 0;
-
- export = kzalloc(sizeof(*export), GFP_NOFS);
- if (!export)
- return ERR_PTR(-ENOMEM);
-
- export->exp_conn_cnt = 0;
- export->exp_lock_hash = NULL;
- export->exp_flock_hash = NULL;
- atomic_set(&export->exp_refcount, 2);
- atomic_set(&export->exp_rpc_count, 0);
- atomic_set(&export->exp_cb_count, 0);
- atomic_set(&export->exp_locks_count, 0);
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
- INIT_LIST_HEAD(&export->exp_locks_list);
- spin_lock_init(&export->exp_locks_list_guard);
-#endif
- atomic_set(&export->exp_replay_count, 0);
- export->exp_obd = obd;
- INIT_LIST_HEAD(&export->exp_outstanding_replies);
- spin_lock_init(&export->exp_uncommitted_replies_lock);
- INIT_LIST_HEAD(&export->exp_uncommitted_replies);
- INIT_LIST_HEAD(&export->exp_req_replay_queue);
- INIT_LIST_HEAD(&export->exp_handle.h_link);
- INIT_LIST_HEAD(&export->exp_hp_rpcs);
- class_handle_hash(&export->exp_handle, &export_handle_ops);
- spin_lock_init(&export->exp_lock);
- spin_lock_init(&export->exp_rpc_lock);
- INIT_HLIST_NODE(&export->exp_uuid_hash);
- spin_lock_init(&export->exp_bl_list_lock);
- INIT_LIST_HEAD(&export->exp_bl_list);
- INIT_WORK(&export->exp_zombie_work, obd_zombie_exp_cull);
-
- export->exp_sp_peer = LUSTRE_SP_ANY;
- export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
- export->exp_client_uuid = *cluuid;
- obd_init_export(export);
-
- spin_lock(&obd->obd_dev_lock);
- /* shouldn't happen, but might race */
- if (obd->obd_stopping) {
- rc = -ENODEV;
- goto exit_unlock;
- }
-
- hash = cfs_hash_getref(obd->obd_uuid_hash);
- if (!hash) {
- rc = -ENODEV;
- goto exit_unlock;
- }
- spin_unlock(&obd->obd_dev_lock);
-
- if (!obd_uuid_equals(cluuid, &obd->obd_uuid)) {
- rc = cfs_hash_add_unique(hash, cluuid, &export->exp_uuid_hash);
- if (rc != 0) {
- LCONSOLE_WARN("%s: denying duplicate export for %s, %d\n",
- obd->obd_name, cluuid->uuid, rc);
- rc = -EALREADY;
- goto exit_err;
- }
- }
-
- spin_lock(&obd->obd_dev_lock);
- if (obd->obd_stopping) {
- cfs_hash_del(hash, cluuid, &export->exp_uuid_hash);
- rc = -ENODEV;
- goto exit_unlock;
- }
-
- class_incref(obd, "export", export);
- list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports);
- export->exp_obd->obd_num_exports++;
- spin_unlock(&obd->obd_dev_lock);
- cfs_hash_putref(hash);
- return export;
-
-exit_unlock:
- spin_unlock(&obd->obd_dev_lock);
-exit_err:
- if (hash)
- cfs_hash_putref(hash);
- class_handle_unhash(&export->exp_handle);
- LASSERT(hlist_unhashed(&export->exp_uuid_hash));
- obd_destroy_export(export);
- kfree(export);
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL(class_new_export);
-
-void class_unlink_export(struct obd_export *exp)
-{
- class_handle_unhash(&exp->exp_handle);
-
- spin_lock(&exp->exp_obd->obd_dev_lock);
- /* delete an uuid-export hashitem from hashtables */
- if (!hlist_unhashed(&exp->exp_uuid_hash))
- cfs_hash_del(exp->exp_obd->obd_uuid_hash,
- &exp->exp_client_uuid,
- &exp->exp_uuid_hash);
-
- list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
- exp->exp_obd->obd_num_exports--;
- spin_unlock(&exp->exp_obd->obd_dev_lock);
- class_export_put(exp);
-}
-
-/* Import management functions */
-static void class_import_destroy(struct obd_import *imp)
-{
- CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp,
- imp->imp_obd->obd_name);
-
- LASSERT_ATOMIC_ZERO(&imp->imp_refcount);
-
- ptlrpc_put_connection_superhack(imp->imp_connection);
-
- while (!list_empty(&imp->imp_conn_list)) {
- struct obd_import_conn *imp_conn;
-
- imp_conn = list_entry(imp->imp_conn_list.next,
- struct obd_import_conn, oic_item);
- list_del_init(&imp_conn->oic_item);
- ptlrpc_put_connection_superhack(imp_conn->oic_conn);
- kfree(imp_conn);
- }
-
- LASSERT(!imp->imp_sec);
- class_decref(imp->imp_obd, "import", imp);
- OBD_FREE_RCU(imp, sizeof(*imp), &imp->imp_handle);
-}
-
-static void import_handle_addref(void *import)
-{
- class_import_get(import);
-}
-
-static struct portals_handle_ops import_handle_ops = {
- .hop_addref = import_handle_addref,
- .hop_free = NULL,
-};
-
-struct obd_import *class_import_get(struct obd_import *import)
-{
- atomic_inc(&import->imp_refcount);
- CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", import,
- atomic_read(&import->imp_refcount),
- import->imp_obd->obd_name);
- return import;
-}
-EXPORT_SYMBOL(class_import_get);
-
-void class_import_put(struct obd_import *imp)
-{
- LASSERT_ATOMIC_GT_LT(&imp->imp_refcount, 0, LI_POISON);
-
- CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", imp,
- atomic_read(&imp->imp_refcount) - 1,
- imp->imp_obd->obd_name);
-
- if (atomic_dec_and_test(&imp->imp_refcount)) {
- CDEBUG(D_INFO, "final put import %p\n", imp);
- obd_zombie_import_add(imp);
- }
-
- /* catch possible import put race */
- LASSERT_ATOMIC_GE_LT(&imp->imp_refcount, 0, LI_POISON);
-}
-EXPORT_SYMBOL(class_import_put);
-
-static void init_imp_at(struct imp_at *at)
-{
- int i;
-
- at_init(&at->iat_net_latency, 0, 0);
- for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
- /* max service estimates are tracked on the server side, so
- * don't use the AT history here, just use the last reported
- * val. (But keep hist for proc histogram, worst_ever)
- */
- at_init(&at->iat_service_estimate[i], INITIAL_CONNECT_TIMEOUT,
- AT_FLG_NOHIST);
- }
-}
-
-static void obd_zombie_imp_cull(struct work_struct *ws)
-{
- struct obd_import *import = container_of(ws, struct obd_import, imp_zombie_work);
-
- class_import_destroy(import);
-}
-
-struct obd_import *class_new_import(struct obd_device *obd)
-{
- struct obd_import *imp;
-
- imp = kzalloc(sizeof(*imp), GFP_NOFS);
- if (!imp)
- return NULL;
-
- INIT_LIST_HEAD(&imp->imp_pinger_chain);
- INIT_LIST_HEAD(&imp->imp_replay_list);
- INIT_LIST_HEAD(&imp->imp_sending_list);
- INIT_LIST_HEAD(&imp->imp_delayed_list);
- INIT_LIST_HEAD(&imp->imp_committed_list);
- INIT_LIST_HEAD(&imp->imp_unreplied_list);
- imp->imp_known_replied_xid = 0;
- imp->imp_replay_cursor = &imp->imp_committed_list;
- spin_lock_init(&imp->imp_lock);
- imp->imp_last_success_conn = 0;
- imp->imp_state = LUSTRE_IMP_NEW;
- imp->imp_obd = class_incref(obd, "import", imp);
- mutex_init(&imp->imp_sec_mutex);
- init_waitqueue_head(&imp->imp_recovery_waitq);
- INIT_WORK(&imp->imp_zombie_work, obd_zombie_imp_cull);
-
- atomic_set(&imp->imp_refcount, 2);
- atomic_set(&imp->imp_unregistering, 0);
- atomic_set(&imp->imp_inflight, 0);
- atomic_set(&imp->imp_replay_inflight, 0);
- atomic_set(&imp->imp_inval_count, 0);
- INIT_LIST_HEAD(&imp->imp_conn_list);
- INIT_LIST_HEAD(&imp->imp_handle.h_link);
- class_handle_hash(&imp->imp_handle, &import_handle_ops);
- init_imp_at(&imp->imp_at);
-
- /* the default magic is V2, will be used in connect RPC, and
- * then adjusted according to the flags in request/reply.
- */
- imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2;
-
- return imp;
-}
-EXPORT_SYMBOL(class_new_import);
-
-void class_destroy_import(struct obd_import *import)
-{
- LASSERT(import);
- LASSERT(import != LP_POISON);
-
- class_handle_unhash(&import->imp_handle);
-
- spin_lock(&import->imp_lock);
- import->imp_generation++;
- spin_unlock(&import->imp_lock);
- class_import_put(import);
-}
-EXPORT_SYMBOL(class_destroy_import);
-
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
-
-void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
-{
- spin_lock(&exp->exp_locks_list_guard);
-
- LASSERT(lock->l_exp_refs_nr >= 0);
-
- if (lock->l_exp_refs_target && lock->l_exp_refs_target != exp) {
- LCONSOLE_WARN("setting export %p for lock %p which already has export %p\n",
- exp, lock, lock->l_exp_refs_target);
- }
- if ((lock->l_exp_refs_nr++) == 0) {
- list_add(&lock->l_exp_refs_link, &exp->exp_locks_list);
- lock->l_exp_refs_target = exp;
- }
- CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
- lock, exp, lock->l_exp_refs_nr);
- spin_unlock(&exp->exp_locks_list_guard);
-}
-
-void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
-{
- spin_lock(&exp->exp_locks_list_guard);
- LASSERT(lock->l_exp_refs_nr > 0);
- if (lock->l_exp_refs_target != exp) {
- LCONSOLE_WARN("lock %p, mismatching export pointers: %p, %p\n",
- lock, lock->l_exp_refs_target, exp);
- }
- if (-- lock->l_exp_refs_nr == 0) {
- list_del_init(&lock->l_exp_refs_link);
- lock->l_exp_refs_target = NULL;
- }
- CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
- lock, exp, lock->l_exp_refs_nr);
- spin_unlock(&exp->exp_locks_list_guard);
-}
-#endif
-
-/* A connection defines an export context in which preallocation can
- * be managed. This releases the export pointer reference, and returns
- * the export handle, so the export refcount is 1 when this function
- * returns.
- */
-int class_connect(struct lustre_handle *conn, struct obd_device *obd,
- struct obd_uuid *cluuid)
-{
- struct obd_export *export;
-
- LASSERT(conn);
- LASSERT(obd);
- LASSERT(cluuid);
-
- export = class_new_export(obd, cluuid);
- if (IS_ERR(export))
- return PTR_ERR(export);
-
- conn->cookie = export->exp_handle.h_cookie;
- class_export_put(export);
-
- CDEBUG(D_IOCTL, "connect: client %s, cookie %#llx\n",
- cluuid->uuid, conn->cookie);
- return 0;
-}
-EXPORT_SYMBOL(class_connect);
-
-/* This function removes 1-3 references from the export:
- * 1 - for export pointer passed
- * and if disconnect really need
- * 2 - removing from hash
- * 3 - in client_unlink_export
- * The export pointer passed to this function can destroyed
- */
-int class_disconnect(struct obd_export *export)
-{
- int already_disconnected;
-
- if (!export) {
- CWARN("attempting to free NULL export %p\n", export);
- return -EINVAL;
- }
-
- spin_lock(&export->exp_lock);
- already_disconnected = export->exp_disconnected;
- export->exp_disconnected = 1;
- spin_unlock(&export->exp_lock);
-
- /* class_cleanup(), abort_recovery(), and class_fail_export()
- * all end up in here, and if any of them race we shouldn't
- * call extra class_export_puts().
- */
- if (already_disconnected)
- goto no_disconn;
-
- CDEBUG(D_IOCTL, "disconnect: cookie %#llx\n",
- export->exp_handle.h_cookie);
-
- class_unlink_export(export);
-no_disconn:
- class_export_put(export);
- return 0;
-}
-EXPORT_SYMBOL(class_disconnect);
-
-void class_fail_export(struct obd_export *exp)
-{
- int rc, already_failed;
-
- spin_lock(&exp->exp_lock);
- already_failed = exp->exp_failed;
- exp->exp_failed = 1;
- spin_unlock(&exp->exp_lock);
-
- if (already_failed) {
- CDEBUG(D_HA, "disconnecting dead export %p/%s; skipping\n",
- exp, exp->exp_client_uuid.uuid);
- return;
- }
-
- CDEBUG(D_HA, "disconnecting export %p/%s\n",
- exp, exp->exp_client_uuid.uuid);
-
- if (obd_dump_on_timeout)
- libcfs_debug_dumplog();
-
- /* need for safe call CDEBUG after obd_disconnect */
- class_export_get(exp);
-
- /* Most callers into obd_disconnect are removing their own reference
- * (request, for example) in addition to the one from the hash table.
- * We don't have such a reference here, so make one.
- */
- class_export_get(exp);
- rc = obd_disconnect(exp);
- if (rc)
- CERROR("disconnecting export %p failed: %d\n", exp, rc);
- else
- CDEBUG(D_HA, "disconnected export %p/%s\n",
- exp, exp->exp_client_uuid.uuid);
- class_export_put(exp);
-}
-EXPORT_SYMBOL(class_fail_export);
-
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
-void (*class_export_dump_hook)(struct obd_export *) = NULL;
-#endif
-
-/**
- * Add export to the obd_zombie thread and notify it.
- */
-static void obd_zombie_export_add(struct obd_export *exp)
-{
- spin_lock(&exp->exp_obd->obd_dev_lock);
- LASSERT(!list_empty(&exp->exp_obd_chain));
- list_del_init(&exp->exp_obd_chain);
- spin_unlock(&exp->exp_obd->obd_dev_lock);
- queue_work(zombie_wq, &exp->exp_zombie_work);
-}
-
-/**
- * Add import to the obd_zombie thread and notify it.
- */
-static void obd_zombie_import_add(struct obd_import *imp)
-{
- LASSERT(!imp->imp_sec);
- queue_work(zombie_wq, &imp->imp_zombie_work);
-}
-
-/**
- * wait when obd_zombie import/export queues become empty
- */
-void obd_zombie_barrier(void)
-{
- flush_workqueue(zombie_wq);
-}
-EXPORT_SYMBOL(obd_zombie_barrier);
-
-/**
- * start destroy zombie import/export thread
- */
-int obd_zombie_impexp_init(void)
-{
- zombie_wq = alloc_workqueue("obd_zombid", 0, 0);
- if (!zombie_wq)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * stop destroy zombie import/export thread
- */
-void obd_zombie_impexp_stop(void)
-{
- destroy_workqueue(zombie_wq);
-}
-
-struct obd_request_slot_waiter {
- struct list_head orsw_entry;
- wait_queue_head_t orsw_waitq;
- bool orsw_signaled;
-};
-
-static bool obd_request_slot_avail(struct client_obd *cli,
- struct obd_request_slot_waiter *orsw)
-{
- bool avail;
-
- spin_lock(&cli->cl_loi_list_lock);
- avail = !!list_empty(&orsw->orsw_entry);
- spin_unlock(&cli->cl_loi_list_lock);
-
- return avail;
-};
-
-/*
- * For network flow control, the RPC sponsor needs to acquire a credit
- * before sending the RPC. The credits count for a connection is defined
- * by the "cl_max_rpcs_in_flight". If all the credits are occpuied, then
- * the subsequent RPC sponsors need to wait until others released their
- * credits, or the administrator increased the "cl_max_rpcs_in_flight".
- */
-int obd_get_request_slot(struct client_obd *cli)
-{
- struct obd_request_slot_waiter orsw;
- int rc;
-
- spin_lock(&cli->cl_loi_list_lock);
- if (cli->cl_r_in_flight < cli->cl_max_rpcs_in_flight) {
- cli->cl_r_in_flight++;
- spin_unlock(&cli->cl_loi_list_lock);
- return 0;
- }
-
- init_waitqueue_head(&orsw.orsw_waitq);
- list_add_tail(&orsw.orsw_entry, &cli->cl_loi_read_list);
- orsw.orsw_signaled = false;
- spin_unlock(&cli->cl_loi_list_lock);
-
- rc = l_wait_event_abortable(orsw.orsw_waitq,
- obd_request_slot_avail(cli, &orsw) ||
- orsw.orsw_signaled);
-
- /*
- * Here, we must take the lock to avoid the on-stack 'orsw' to be
- * freed but other (such as obd_put_request_slot) is using it.
- */
- spin_lock(&cli->cl_loi_list_lock);
- if (rc) {
- if (!orsw.orsw_signaled) {
- if (list_empty(&orsw.orsw_entry))
- cli->cl_r_in_flight--;
- else
- list_del(&orsw.orsw_entry);
- }
- }
-
- if (orsw.orsw_signaled) {
- LASSERT(list_empty(&orsw.orsw_entry));
-
- rc = -EINTR;
- }
- spin_unlock(&cli->cl_loi_list_lock);
-
- return rc;
-}
-EXPORT_SYMBOL(obd_get_request_slot);
-
-void obd_put_request_slot(struct client_obd *cli)
-{
- struct obd_request_slot_waiter *orsw;
-
- spin_lock(&cli->cl_loi_list_lock);
- cli->cl_r_in_flight--;
-
- /* If there is free slot, wakeup the first waiter. */
- if (!list_empty(&cli->cl_loi_read_list) &&
- likely(cli->cl_r_in_flight < cli->cl_max_rpcs_in_flight)) {
- orsw = list_entry(cli->cl_loi_read_list.next,
- struct obd_request_slot_waiter, orsw_entry);
- list_del_init(&orsw->orsw_entry);
- cli->cl_r_in_flight++;
- wake_up(&orsw->orsw_waitq);
- }
- spin_unlock(&cli->cl_loi_list_lock);
-}
-EXPORT_SYMBOL(obd_put_request_slot);
-
-__u32 obd_get_max_rpcs_in_flight(struct client_obd *cli)
-{
- return cli->cl_max_rpcs_in_flight;
-}
-EXPORT_SYMBOL(obd_get_max_rpcs_in_flight);
-
-int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max)
-{
- struct obd_request_slot_waiter *orsw;
- const char *typ_name;
- __u32 old;
- int diff;
- int rc;
- int i;
-
- if (max > OBD_MAX_RIF_MAX || max < 1)
- return -ERANGE;
-
- typ_name = cli->cl_import->imp_obd->obd_type->typ_name;
- if (!strcmp(typ_name, LUSTRE_MDC_NAME)) {
- /*
- * adjust max_mod_rpcs_in_flight to ensure it is always
- * strictly lower that max_rpcs_in_flight
- */
- if (max < 2) {
- CERROR("%s: cannot set max_rpcs_in_flight to 1 because it must be higher than max_mod_rpcs_in_flight value\n",
- cli->cl_import->imp_obd->obd_name);
- return -ERANGE;
- }
- if (max <= cli->cl_max_mod_rpcs_in_flight) {
- rc = obd_set_max_mod_rpcs_in_flight(cli, max - 1);
- if (rc)
- return rc;
- }
- }
-
- spin_lock(&cli->cl_loi_list_lock);
- old = cli->cl_max_rpcs_in_flight;
- cli->cl_max_rpcs_in_flight = max;
- diff = max - old;
-
- /* We increase the max_rpcs_in_flight, then wakeup some waiters. */
- for (i = 0; i < diff; i++) {
- if (list_empty(&cli->cl_loi_read_list))
- break;
-
- orsw = list_entry(cli->cl_loi_read_list.next,
- struct obd_request_slot_waiter, orsw_entry);
- list_del_init(&orsw->orsw_entry);
- cli->cl_r_in_flight++;
- wake_up(&orsw->orsw_waitq);
- }
- spin_unlock(&cli->cl_loi_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(obd_set_max_rpcs_in_flight);
-
-int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, __u16 max)
-{
- struct obd_connect_data *ocd;
- u16 maxmodrpcs;
- u16 prev;
-
- if (max > OBD_MAX_RIF_MAX || max < 1)
- return -ERANGE;
-
- /* cannot exceed or equal max_rpcs_in_flight */
- if (max >= cli->cl_max_rpcs_in_flight) {
- CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher or equal to max_rpcs_in_flight value (%u)\n",
- cli->cl_import->imp_obd->obd_name,
- max, cli->cl_max_rpcs_in_flight);
- return -ERANGE;
- }
-
- /* cannot exceed max modify RPCs in flight supported by the server */
- ocd = &cli->cl_import->imp_connect_data;
- if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
- maxmodrpcs = ocd->ocd_maxmodrpcs;
- else
- maxmodrpcs = 1;
- if (max > maxmodrpcs) {
- CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher than max_mod_rpcs_per_client value (%hu) returned by the server at connection\n",
- cli->cl_import->imp_obd->obd_name,
- max, maxmodrpcs);
- return -ERANGE;
- }
-
- spin_lock(&cli->cl_mod_rpcs_lock);
-
- prev = cli->cl_max_mod_rpcs_in_flight;
- cli->cl_max_mod_rpcs_in_flight = max;
-
- /* wakeup waiters if limit has been increased */
- if (cli->cl_max_mod_rpcs_in_flight > prev)
- wake_up(&cli->cl_mod_rpcs_waitq);
-
- spin_unlock(&cli->cl_mod_rpcs_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(obd_set_max_mod_rpcs_in_flight);
-
-#define pct(a, b) (b ? (a * 100) / b : 0)
-
-int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq)
-{
- unsigned long mod_tot = 0, mod_cum;
- struct timespec64 now;
- int i;
-
- ktime_get_real_ts64(&now);
-
- spin_lock(&cli->cl_mod_rpcs_lock);
-
- seq_printf(seq, "snapshot_time: %llu.%9lu (secs.nsecs)\n",
- (s64)now.tv_sec, (unsigned long)now.tv_nsec);
- seq_printf(seq, "modify_RPCs_in_flight: %hu\n",
- cli->cl_mod_rpcs_in_flight);
-
- seq_puts(seq, "\n\t\t\tmodify\n");
- seq_puts(seq, "rpcs in flight rpcs %% cum %%\n");
-
- mod_tot = lprocfs_oh_sum(&cli->cl_mod_rpcs_hist);
-
- mod_cum = 0;
- for (i = 0; i < OBD_HIST_MAX; i++) {
- unsigned long mod = cli->cl_mod_rpcs_hist.oh_buckets[i];
-
- mod_cum += mod;
- seq_printf(seq, "%d:\t\t%10lu %3lu %3lu\n",
- i, mod, pct(mod, mod_tot),
- pct(mod_cum, mod_tot));
- if (mod_cum == mod_tot)
- break;
- }
-
- spin_unlock(&cli->cl_mod_rpcs_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(obd_mod_rpc_stats_seq_show);
-#undef pct
-
-/*
- * The number of modify RPCs sent in parallel is limited
- * because the server has a finite number of slots per client to
- * store request result and ensure reply reconstruction when needed.
- * On the client, this limit is stored in cl_max_mod_rpcs_in_flight
- * that takes into account server limit and cl_max_rpcs_in_flight
- * value.
- * On the MDC client, to avoid a potential deadlock (see Bugzilla 3462),
- * one close request is allowed above the maximum.
- */
-static inline bool obd_mod_rpc_slot_avail_locked(struct client_obd *cli,
- bool close_req)
-{
- bool avail;
-
- /* A slot is available if
- * - number of modify RPCs in flight is less than the max
- * - it's a close RPC and no other close request is in flight
- */
- avail = cli->cl_mod_rpcs_in_flight < cli->cl_max_mod_rpcs_in_flight ||
- (close_req && !cli->cl_close_rpcs_in_flight);
-
- return avail;
-}
-
-static inline bool obd_mod_rpc_slot_avail(struct client_obd *cli,
- bool close_req)
-{
- bool avail;
-
- spin_lock(&cli->cl_mod_rpcs_lock);
- avail = obd_mod_rpc_slot_avail_locked(cli, close_req);
- spin_unlock(&cli->cl_mod_rpcs_lock);
- return avail;
-}
-
-/* Get a modify RPC slot from the obd client @cli according
- * to the kind of operation @opc that is going to be sent
- * and the intent @it of the operation if it applies.
- * If the maximum number of modify RPCs in flight is reached
- * the thread is put to sleep.
- * Returns the tag to be set in the request message. Tag 0
- * is reserved for non-modifying requests.
- */
-u16 obd_get_mod_rpc_slot(struct client_obd *cli, __u32 opc,
- struct lookup_intent *it)
-{
- bool close_req = false;
- u16 i, max;
-
- /* read-only metadata RPCs don't consume a slot on MDT
- * for reply reconstruction
- */
- if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
- it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
- return 0;
-
- if (opc == MDS_CLOSE)
- close_req = true;
-
- do {
- spin_lock(&cli->cl_mod_rpcs_lock);
- max = cli->cl_max_mod_rpcs_in_flight;
- if (obd_mod_rpc_slot_avail_locked(cli, close_req)) {
- /* there is a slot available */
- cli->cl_mod_rpcs_in_flight++;
- if (close_req)
- cli->cl_close_rpcs_in_flight++;
- lprocfs_oh_tally(&cli->cl_mod_rpcs_hist,
- cli->cl_mod_rpcs_in_flight);
- /* find a free tag */
- i = find_first_zero_bit(cli->cl_mod_tag_bitmap,
- max + 1);
- LASSERT(i < OBD_MAX_RIF_MAX);
- LASSERT(!test_and_set_bit(i, cli->cl_mod_tag_bitmap));
- spin_unlock(&cli->cl_mod_rpcs_lock);
- /* tag 0 is reserved for non-modify RPCs */
- return i + 1;
- }
- spin_unlock(&cli->cl_mod_rpcs_lock);
-
- CDEBUG(D_RPCTRACE, "%s: sleeping for a modify RPC slot opc %u, max %hu\n",
- cli->cl_import->imp_obd->obd_name, opc, max);
-
- wait_event_idle(cli->cl_mod_rpcs_waitq,
- obd_mod_rpc_slot_avail(cli, close_req));
- } while (true);
-}
-EXPORT_SYMBOL(obd_get_mod_rpc_slot);
-
-/*
- * Put a modify RPC slot from the obd client @cli according
- * to the kind of operation @opc that has been sent and the
- * intent @it of the operation if it applies.
- */
-void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc,
- struct lookup_intent *it, u16 tag)
-{
- bool close_req = false;
-
- if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
- it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
- return;
-
- if (opc == MDS_CLOSE)
- close_req = true;
-
- spin_lock(&cli->cl_mod_rpcs_lock);
- cli->cl_mod_rpcs_in_flight--;
- if (close_req)
- cli->cl_close_rpcs_in_flight--;
- /* release the tag in the bitmap */
- LASSERT(tag - 1 < OBD_MAX_RIF_MAX);
- LASSERT(test_and_clear_bit(tag - 1, cli->cl_mod_tag_bitmap) != 0);
- spin_unlock(&cli->cl_mod_rpcs_lock);
- wake_up(&cli->cl_mod_rpcs_waitq);
-}
-EXPORT_SYMBOL(obd_put_mod_rpc_slot);
diff --git a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
deleted file mode 100644
index b9bf81607bbf..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
+++ /dev/null
@@ -1,238 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Author: Nathan Rutman <nathan.rutman@sun.com>
- *
- * Kernel <-> userspace communication routines.
- * Using pipes for all arches.
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-#define D_KUC D_OTHER
-
-#include <obd_support.h>
-#include <lustre_kernelcomm.h>
-
-/**
- * libcfs_kkuc_msg_put - send an message from kernel to userspace
- * @param fp to send the message to
- * @param payload Payload data. First field of payload is always
- * struct kuc_hdr
- */
-int libcfs_kkuc_msg_put(struct file *filp, void *payload)
-{
- struct kuc_hdr *kuch = (struct kuc_hdr *)payload;
- ssize_t count = kuch->kuc_msglen;
- loff_t offset = 0;
- int rc = -ENXIO;
-
- if (IS_ERR_OR_NULL(filp))
- return -EBADF;
-
- if (kuch->kuc_magic != KUC_MAGIC) {
- CERROR("KernelComm: bad magic %x\n", kuch->kuc_magic);
- return rc;
- }
-
- while (count > 0) {
- rc = kernel_write(filp, payload, count, &offset);
- if (rc < 0)
- break;
- count -= rc;
- payload += rc;
- rc = 0;
- }
-
- if (rc < 0)
- CWARN("message send failed (%d)\n", rc);
- else
- CDEBUG(D_KUC, "Sent message rc=%d, fp=%p\n", rc, filp);
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_kkuc_msg_put);
-
-/*
- * Broadcast groups are global across all mounted filesystems;
- * i.e. registering for a group on 1 fs will get messages for that
- * group from any fs
- */
-/** A single group registration has a uid and a file pointer */
-struct kkuc_reg {
- struct list_head kr_chain;
- int kr_uid;
- struct file *kr_fp;
- char kr_data[0];
-};
-
-static struct list_head kkuc_groups[KUC_GRP_MAX + 1] = {};
-/* Protect message sending against remove and adds */
-static DECLARE_RWSEM(kg_sem);
-
-/** Add a receiver to a broadcast group
- * @param filp pipe to write into
- * @param uid identifier for this receiver
- * @param group group number
- * @param data user data
- */
-int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group,
- void *data, size_t data_len)
-{
- struct kkuc_reg *reg;
-
- if (group > KUC_GRP_MAX) {
- CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group);
- return -EINVAL;
- }
-
- /* fput in group_rem */
- if (!filp)
- return -EBADF;
-
- /* freed in group_rem */
- reg = kmalloc(sizeof(*reg) + data_len, 0);
- if (!reg)
- return -ENOMEM;
-
- reg->kr_fp = filp;
- reg->kr_uid = uid;
- memcpy(reg->kr_data, data, data_len);
-
- down_write(&kg_sem);
- if (!kkuc_groups[group].next)
- INIT_LIST_HEAD(&kkuc_groups[group]);
- list_add(&reg->kr_chain, &kkuc_groups[group]);
- up_write(&kg_sem);
-
- CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group);
-
- return 0;
-}
-EXPORT_SYMBOL(libcfs_kkuc_group_add);
-
-int libcfs_kkuc_group_rem(int uid, unsigned int group)
-{
- struct kkuc_reg *reg, *next;
-
- if (!kkuc_groups[group].next)
- return 0;
-
- if (!uid) {
- /* Broadcast a shutdown message */
- struct kuc_hdr lh;
-
- lh.kuc_magic = KUC_MAGIC;
- lh.kuc_transport = KUC_TRANSPORT_GENERIC;
- lh.kuc_msgtype = KUC_MSG_SHUTDOWN;
- lh.kuc_msglen = sizeof(lh);
- libcfs_kkuc_group_put(group, &lh);
- }
-
- down_write(&kg_sem);
- list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) {
- if (!uid || (uid == reg->kr_uid)) {
- list_del(&reg->kr_chain);
- CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n",
- reg->kr_uid, reg->kr_fp, group);
- if (reg->kr_fp)
- fput(reg->kr_fp);
- kfree(reg);
- }
- }
- up_write(&kg_sem);
-
- return 0;
-}
-EXPORT_SYMBOL(libcfs_kkuc_group_rem);
-
-int libcfs_kkuc_group_put(unsigned int group, void *payload)
-{
- struct kkuc_reg *reg;
- int rc = 0;
- int one_success = 0;
-
- down_write(&kg_sem);
- list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
- if (reg->kr_fp) {
- rc = libcfs_kkuc_msg_put(reg->kr_fp, payload);
- if (!rc) {
- one_success = 1;
- } else if (rc == -EPIPE) {
- fput(reg->kr_fp);
- reg->kr_fp = NULL;
- }
- }
- }
- up_write(&kg_sem);
-
- /*
- * don't return an error if the message has been delivered
- * at least to one agent
- */
- if (one_success)
- rc = 0;
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_kkuc_group_put);
-
-/**
- * Calls a callback function for each link of the given kuc group.
- * @param group the group to call the function on.
- * @param cb_func the function to be called.
- * @param cb_arg extra argument to be passed to the callback function.
- */
-int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func,
- void *cb_arg)
-{
- struct kkuc_reg *reg;
- int rc = 0;
-
- if (group > KUC_GRP_MAX) {
- CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group);
- return -EINVAL;
- }
-
- /* no link for this group */
- if (!kkuc_groups[group].next)
- return 0;
-
- down_read(&kg_sem);
- list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
- if (reg->kr_fp)
- rc = cb_func(reg->kr_data, cb_arg);
- }
- up_read(&kg_sem);
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_kkuc_group_foreach);
diff --git a/drivers/staging/lustre/lustre/obdclass/linkea.c b/drivers/staging/lustre/lustre/obdclass/linkea.c
deleted file mode 100644
index 74c99ee216bb..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/linkea.c
+++ /dev/null
@@ -1,249 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2013, 2014, Intel Corporation.
- * Use is subject to license terms.
- *
- * Author: Di Wang <di.wang@intel.com>
- */
-
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <obd.h>
-#include <lustre_linkea.h>
-
-int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf)
-{
- buf->lb_buf = kzalloc(PAGE_SIZE, GFP_NOFS);
- if (!buf->lb_buf)
- return -ENOMEM;
- buf->lb_len = PAGE_SIZE;
- ldata->ld_buf = buf;
- ldata->ld_leh = ldata->ld_buf->lb_buf;
- ldata->ld_leh->leh_magic = LINK_EA_MAGIC;
- ldata->ld_leh->leh_len = sizeof(struct link_ea_header);
- ldata->ld_leh->leh_reccount = 0;
- ldata->ld_leh->leh_overflow_time = 0;
- ldata->ld_leh->leh_padding = 0;
- return 0;
-}
-EXPORT_SYMBOL(linkea_data_new);
-
-int linkea_init(struct linkea_data *ldata)
-{
- struct link_ea_header *leh;
-
- LASSERT(ldata->ld_buf);
- leh = ldata->ld_buf->lb_buf;
- if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
- leh->leh_magic = LINK_EA_MAGIC;
- leh->leh_reccount = __swab32(leh->leh_reccount);
- leh->leh_len = __swab64(leh->leh_len);
- leh->leh_overflow_time = __swab32(leh->leh_overflow_time);
- leh->leh_padding = __swab32(leh->leh_padding);
- /* individual entries are swabbed by linkea_entry_unpack() */
- }
-
- if (leh->leh_magic != LINK_EA_MAGIC)
- return -EINVAL;
-
- if (leh->leh_reccount == 0 && leh->leh_overflow_time == 0)
- return -ENODATA;
-
- ldata->ld_leh = leh;
- return 0;
-}
-EXPORT_SYMBOL(linkea_init);
-
-int linkea_init_with_rec(struct linkea_data *ldata)
-{
- int rc;
-
- rc = linkea_init(ldata);
- if (!rc && ldata->ld_leh->leh_reccount == 0)
- rc = -ENODATA;
-
- return rc;
-}
-EXPORT_SYMBOL(linkea_init_with_rec);
-
-/**
- * Pack a link_ea_entry.
- * All elements are stored as chars to avoid alignment issues.
- * Numbers are always big-endian
- * \retval record length
- */
-int linkea_entry_pack(struct link_ea_entry *lee, const struct lu_name *lname,
- const struct lu_fid *pfid)
-{
- struct lu_fid tmpfid;
- int reclen;
-
- tmpfid = *pfid;
- if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LINKEA_CRASH))
- tmpfid.f_ver = ~0;
- fid_cpu_to_be(&tmpfid, &tmpfid);
- memcpy(&lee->lee_parent_fid, &tmpfid, sizeof(tmpfid));
- memcpy(lee->lee_name, lname->ln_name, lname->ln_namelen);
- reclen = sizeof(struct link_ea_entry) + lname->ln_namelen;
-
- lee->lee_reclen[0] = (reclen >> 8) & 0xff;
- lee->lee_reclen[1] = reclen & 0xff;
- return reclen;
-}
-EXPORT_SYMBOL(linkea_entry_pack);
-
-void linkea_entry_unpack(const struct link_ea_entry *lee, int *reclen,
- struct lu_name *lname, struct lu_fid *pfid)
-{
- LASSERT(lee);
-
- *reclen = (lee->lee_reclen[0] << 8) | lee->lee_reclen[1];
- memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
- fid_be_to_cpu(pfid, pfid);
- if (lname) {
- lname->ln_name = lee->lee_name;
- lname->ln_namelen = *reclen - sizeof(struct link_ea_entry);
- }
-}
-EXPORT_SYMBOL(linkea_entry_unpack);
-
-/**
- * Add a record to the end of link ea buf
- **/
-int linkea_add_buf(struct linkea_data *ldata, const struct lu_name *lname,
- const struct lu_fid *pfid)
-{
- struct link_ea_header *leh = ldata->ld_leh;
- int reclen;
-
- LASSERT(leh);
-
- if (!lname || !pfid)
- return -EINVAL;
-
- reclen = lname->ln_namelen + sizeof(struct link_ea_entry);
- if (unlikely(leh->leh_len + reclen > MAX_LINKEA_SIZE)) {
- /*
- * Use 32-bits to save the overflow time, although it will
- * shrink the ktime_get_real_seconds() returned 64-bits value
- * to 32-bits value, it is still quite large and can be used
- * for about 140 years. That is enough.
- */
- leh->leh_overflow_time = ktime_get_real_seconds();
- if (unlikely(leh->leh_overflow_time == 0))
- leh->leh_overflow_time++;
-
- CDEBUG(D_INODE, "No enough space to hold linkea entry '" DFID ": %.*s' at %u\n",
- PFID(pfid), lname->ln_namelen,
- lname->ln_name, leh->leh_overflow_time);
- return 0;
- }
-
- if (leh->leh_len + reclen > ldata->ld_buf->lb_len) {
- /* Note: this never happens as MAX_LINKEA_SIZE is 4096, while
- * the initial allocation is PAGE_SIZE.
- */
- void *b = krealloc(ldata->ld_buf->lb_buf, leh->leh_len + reclen, GFP_NOFS);
- if (!b)
- return -ENOMEM;
-
- ldata->ld_buf->lb_len = leh->leh_len + reclen;
- leh = ldata->ld_leh = ldata->ld_buf->lb_buf = b;
- }
-
- ldata->ld_lee = ldata->ld_buf->lb_buf + leh->leh_len;
- ldata->ld_reclen = linkea_entry_pack(ldata->ld_lee, lname, pfid);
- leh->leh_len += ldata->ld_reclen;
- leh->leh_reccount++;
- CDEBUG(D_INODE, "New link_ea name '" DFID ":%.*s' is added\n",
- PFID(pfid), lname->ln_namelen, lname->ln_name);
- return 0;
-}
-EXPORT_SYMBOL(linkea_add_buf);
-
-/** Del the current record from the link ea buf */
-void linkea_del_buf(struct linkea_data *ldata, const struct lu_name *lname)
-{
- LASSERT(ldata->ld_leh && ldata->ld_lee);
- LASSERT(ldata->ld_leh->leh_reccount > 0);
-
- ldata->ld_leh->leh_reccount--;
- ldata->ld_leh->leh_len -= ldata->ld_reclen;
- memmove(ldata->ld_lee, (char *)ldata->ld_lee + ldata->ld_reclen,
- (char *)ldata->ld_leh + ldata->ld_leh->leh_len -
- (char *)ldata->ld_lee);
- CDEBUG(D_INODE, "Old link_ea name '%.*s' is removed\n",
- lname->ln_namelen, lname->ln_name);
-
- if ((char *)ldata->ld_lee >= ((char *)ldata->ld_leh +
- ldata->ld_leh->leh_len))
- ldata->ld_lee = NULL;
-}
-EXPORT_SYMBOL(linkea_del_buf);
-
-/**
- * Check if such a link exists in linkEA.
- *
- * \param ldata link data the search to be done on
- * \param lname name in the parent's directory entry pointing to this object
- * \param pfid parent fid the link to be found for
- *
- * \retval 0 success
- * \retval -ENOENT link does not exist
- * \retval -ve on error
- */
-int linkea_links_find(struct linkea_data *ldata, const struct lu_name *lname,
- const struct lu_fid *pfid)
-{
- struct lu_name tmpname;
- struct lu_fid tmpfid;
- int count;
-
- LASSERT(ldata->ld_leh);
-
- /* link #0, if leh_reccount == 0 we skip the loop and return -ENOENT */
- if (likely(ldata->ld_leh->leh_reccount > 0))
- ldata->ld_lee = (struct link_ea_entry *)(ldata->ld_leh + 1);
-
- for (count = 0; count < ldata->ld_leh->leh_reccount; count++) {
- linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen,
- &tmpname, &tmpfid);
- if (tmpname.ln_namelen == lname->ln_namelen &&
- lu_fid_eq(&tmpfid, pfid) &&
- (strncmp(tmpname.ln_name, lname->ln_name,
- tmpname.ln_namelen) == 0))
- break;
- ldata->ld_lee = (struct link_ea_entry *)((char *)ldata->ld_lee +
- ldata->ld_reclen);
- }
-
- if (count == ldata->ld_leh->leh_reccount) {
- CDEBUG(D_INODE, "Old link_ea name '%.*s' not found\n",
- lname->ln_namelen, lname->ln_name);
- ldata->ld_lee = NULL;
- ldata->ld_reclen = 0;
- return -ENOENT;
- }
- return 0;
-}
-EXPORT_SYMBOL(linkea_links_find);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
deleted file mode 100644
index 7bceee7f121e..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ /dev/null
@@ -1,531 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/linux/linux-module.c
- *
- * Object Devices Class Driver
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/sched.h>
-#include <linux/lp.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/fcntl.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
-#include <linux/list.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <asm/ioctls.h>
-#include <linux/uaccess.h>
-#include <linux/miscdevice.h>
-#include <linux/seq_file.h>
-#include <linux/kobject.h>
-
-#include <linux/libcfs/libcfs.h>
-#include <uapi/linux/lnet/lnetctl.h>
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lprocfs_status.h>
-#include <uapi/linux/lustre/lustre_ioctl.h>
-#include <uapi/linux/lustre/lustre_ver.h>
-
-#define OBD_MAX_IOCTL_BUFFER 8192
-
-static int obd_ioctl_is_invalid(struct obd_ioctl_data *data)
-{
- if (data->ioc_len > BIT(30)) {
- CERROR("OBD ioctl: ioc_len larger than 1<<30\n");
- return 1;
- }
-
- if (data->ioc_inllen1 > BIT(30)) {
- CERROR("OBD ioctl: ioc_inllen1 larger than 1<<30\n");
- return 1;
- }
-
- if (data->ioc_inllen2 > BIT(30)) {
- CERROR("OBD ioctl: ioc_inllen2 larger than 1<<30\n");
- return 1;
- }
-
- if (data->ioc_inllen3 > BIT(30)) {
- CERROR("OBD ioctl: ioc_inllen3 larger than 1<<30\n");
- return 1;
- }
-
- if (data->ioc_inllen4 > BIT(30)) {
- CERROR("OBD ioctl: ioc_inllen4 larger than 1<<30\n");
- return 1;
- }
-
- if (data->ioc_inlbuf1 && data->ioc_inllen1 == 0) {
- CERROR("OBD ioctl: inlbuf1 pointer but 0 length\n");
- return 1;
- }
-
- if (data->ioc_inlbuf2 && data->ioc_inllen2 == 0) {
- CERROR("OBD ioctl: inlbuf2 pointer but 0 length\n");
- return 1;
- }
-
- if (data->ioc_inlbuf3 && data->ioc_inllen3 == 0) {
- CERROR("OBD ioctl: inlbuf3 pointer but 0 length\n");
- return 1;
- }
-
- if (data->ioc_inlbuf4 && data->ioc_inllen4 == 0) {
- CERROR("OBD ioctl: inlbuf4 pointer but 0 length\n");
- return 1;
- }
-
- if (data->ioc_pbuf1 && data->ioc_plen1 == 0) {
- CERROR("OBD ioctl: pbuf1 pointer but 0 length\n");
- return 1;
- }
-
- if (data->ioc_pbuf2 && data->ioc_plen2 == 0) {
- CERROR("OBD ioctl: pbuf2 pointer but 0 length\n");
- return 1;
- }
-
- if (!data->ioc_pbuf1 && data->ioc_plen1 != 0) {
- CERROR("OBD ioctl: plen1 set but NULL pointer\n");
- return 1;
- }
-
- if (!data->ioc_pbuf2 && data->ioc_plen2 != 0) {
- CERROR("OBD ioctl: plen2 set but NULL pointer\n");
- return 1;
- }
-
- if (obd_ioctl_packlen(data) > data->ioc_len) {
- CERROR("OBD ioctl: packlen exceeds ioc_len (%d > %d)\n",
- obd_ioctl_packlen(data), data->ioc_len);
- return 1;
- }
-
- return 0;
-}
-
-/* buffer MUST be at least the size of obd_ioctl_hdr */
-int obd_ioctl_getdata(char **buf, int *len, void __user *arg)
-{
- struct obd_ioctl_hdr hdr;
- struct obd_ioctl_data *data;
- int err;
- int offset = 0;
-
- if (copy_from_user(&hdr, arg, sizeof(hdr)))
- return -EFAULT;
-
- if (hdr.ioc_version != OBD_IOCTL_VERSION) {
- CERROR("Version mismatch kernel (%x) vs application (%x)\n",
- OBD_IOCTL_VERSION, hdr.ioc_version);
- return -EINVAL;
- }
-
- if (hdr.ioc_len > OBD_MAX_IOCTL_BUFFER) {
- CERROR("User buffer len %d exceeds %d max buffer\n",
- hdr.ioc_len, OBD_MAX_IOCTL_BUFFER);
- return -EINVAL;
- }
-
- if (hdr.ioc_len < sizeof(struct obd_ioctl_data)) {
- CERROR("User buffer too small for ioctl (%d)\n", hdr.ioc_len);
- return -EINVAL;
- }
-
- /* When there are lots of processes calling vmalloc on multi-core
- * system, the high lock contention will hurt performance badly,
- * obdfilter-survey is an example, which relies on ioctl. So we'd
- * better avoid vmalloc on ioctl path. LU-66
- */
- *buf = kvzalloc(hdr.ioc_len, GFP_KERNEL);
- if (!*buf) {
- CERROR("Cannot allocate control buffer of len %d\n",
- hdr.ioc_len);
- return -EINVAL;
- }
- *len = hdr.ioc_len;
- data = (struct obd_ioctl_data *)*buf;
-
- if (copy_from_user(*buf, arg, hdr.ioc_len)) {
- err = -EFAULT;
- goto free_buf;
- }
- if (hdr.ioc_len != data->ioc_len) {
- err = -EINVAL;
- goto free_buf;
- }
-
- if (obd_ioctl_is_invalid(data)) {
- CERROR("ioctl not correctly formatted\n");
- err = -EINVAL;
- goto free_buf;
- }
-
- if (data->ioc_inllen1) {
- data->ioc_inlbuf1 = &data->ioc_bulk[0];
- offset += cfs_size_round(data->ioc_inllen1);
- }
-
- if (data->ioc_inllen2) {
- data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset;
- offset += cfs_size_round(data->ioc_inllen2);
- }
-
- if (data->ioc_inllen3) {
- data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset;
- offset += cfs_size_round(data->ioc_inllen3);
- }
-
- if (data->ioc_inllen4)
- data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset;
-
- return 0;
-
-free_buf:
- kvfree(*buf);
- return err;
-}
-EXPORT_SYMBOL(obd_ioctl_getdata);
-
-/* opening /dev/obd */
-static int obd_class_open(struct inode *inode, struct file *file)
-{
- try_module_get(THIS_MODULE);
- return 0;
-}
-
-/* closing /dev/obd */
-static int obd_class_release(struct inode *inode, struct file *file)
-{
- module_put(THIS_MODULE);
- return 0;
-}
-
-/* to control /dev/obd */
-static long obd_class_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- int err = 0;
-
- /* Allow non-root access for OBD_IOC_PING_TARGET - used by lfs check */
- if (!capable(CAP_SYS_ADMIN) && (cmd != OBD_IOC_PING_TARGET))
- return err = -EACCES;
- if ((cmd & 0xffffff00) == ((int)'T') << 8) /* ignore all tty ioctls */
- return err = -ENOTTY;
-
- err = class_handle_ioctl(cmd, (unsigned long)arg);
-
- return err;
-}
-
-/* declare character device */
-static const struct file_operations obd_psdev_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = obd_class_ioctl, /* unlocked_ioctl */
- .open = obd_class_open, /* open */
- .release = obd_class_release, /* release */
-};
-
-/* modules setup */
-struct miscdevice obd_psdev = {
- .minor = OBD_DEV_MINOR,
- .name = OBD_DEV_NAME,
- .fops = &obd_psdev_fops,
-};
-
-static ssize_t version_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%s\n", LUSTRE_VERSION_STRING);
-}
-
-static ssize_t pinger_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%s\n", "on");
-}
-
-static ssize_t
-health_check_show(struct kobject *kobj, struct attribute *attr, char *buf)
-{
- bool healthy = true;
- int i;
- size_t len = 0;
-
- if (libcfs_catastrophe)
- return sprintf(buf, "LBUG\n");
-
- read_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd;
-
- obd = class_num2obd(i);
- if (!obd || !obd->obd_attached || !obd->obd_set_up)
- continue;
-
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- if (obd->obd_stopping)
- continue;
-
- class_incref(obd, __func__, current);
- read_unlock(&obd_dev_lock);
-
- if (obd_health_check(NULL, obd))
- healthy = false;
- class_decref(obd, __func__, current);
- read_lock(&obd_dev_lock);
- }
- read_unlock(&obd_dev_lock);
-
- if (healthy)
- len = sprintf(buf, "healthy\n");
- else
- len = sprintf(buf, "NOT HEALTHY\n");
-
- return len;
-}
-
-static ssize_t jobid_var_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", obd_jobid_var);
-}
-
-static ssize_t jobid_var_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- if (!count || count > JOBSTATS_JOBID_VAR_MAX_LEN)
- return -EINVAL;
-
- memset(obd_jobid_var, 0, JOBSTATS_JOBID_VAR_MAX_LEN + 1);
-
- memcpy(obd_jobid_var, buffer, count);
-
- /* Trim the trailing '\n' if any */
- if (obd_jobid_var[count - 1] == '\n')
- obd_jobid_var[count - 1] = 0;
-
- return count;
-}
-
-static ssize_t jobid_name_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", obd_jobid_node);
-}
-
-static ssize_t jobid_name_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- if (!count || count > LUSTRE_JOBID_SIZE)
- return -EINVAL;
-
- memcpy(obd_jobid_node, buffer, count);
-
- obd_jobid_node[count] = 0;
-
- /* Trim the trailing '\n' if any */
- if (obd_jobid_node[count - 1] == '\n')
- obd_jobid_node[count - 1] = 0;
-
- return count;
-}
-
-/* Root for /sys/kernel/debug/lustre */
-struct dentry *debugfs_lustre_root;
-EXPORT_SYMBOL_GPL(debugfs_lustre_root);
-
-LUSTRE_RO_ATTR(version);
-LUSTRE_RO_ATTR(pinger);
-LUSTRE_RO_ATTR(health_check);
-LUSTRE_RW_ATTR(jobid_var);
-LUSTRE_RW_ATTR(jobid_name);
-
-static struct attribute *lustre_attrs[] = {
- &lustre_attr_version.attr,
- &lustre_attr_pinger.attr,
- &lustre_attr_health_check.attr,
- &lustre_attr_jobid_name.attr,
- &lustre_attr_jobid_var.attr,
- NULL,
-};
-
-static void *obd_device_list_seq_start(struct seq_file *p, loff_t *pos)
-{
- if (*pos >= class_devno_max())
- return NULL;
-
- return pos;
-}
-
-static void obd_device_list_seq_stop(struct seq_file *p, void *v)
-{
-}
-
-static void *obd_device_list_seq_next(struct seq_file *p, void *v, loff_t *pos)
-{
- ++*pos;
- if (*pos >= class_devno_max())
- return NULL;
-
- return pos;
-}
-
-static int obd_device_list_seq_show(struct seq_file *p, void *v)
-{
- loff_t index = *(loff_t *)v;
- struct obd_device *obd = class_num2obd((int)index);
- char *status;
-
- if (!obd)
- return 0;
-
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- if (obd->obd_stopping)
- status = "ST";
- else if (obd->obd_inactive)
- status = "IN";
- else if (obd->obd_set_up)
- status = "UP";
- else if (obd->obd_attached)
- status = "AT";
- else
- status = "--";
-
- seq_printf(p, "%3d %s %s %s %s %d\n",
- (int)index, status, obd->obd_type->typ_name,
- obd->obd_name, obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
- return 0;
-}
-
-static const struct seq_operations obd_device_list_sops = {
- .start = obd_device_list_seq_start,
- .stop = obd_device_list_seq_stop,
- .next = obd_device_list_seq_next,
- .show = obd_device_list_seq_show,
-};
-
-static int obd_device_list_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int rc = seq_open(file, &obd_device_list_sops);
-
- if (rc)
- return rc;
-
- seq = file->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-static const struct file_operations obd_device_list_fops = {
- .owner = THIS_MODULE,
- .open = obd_device_list_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-struct kobject *lustre_kobj;
-EXPORT_SYMBOL_GPL(lustre_kobj);
-
-static const struct attribute_group lustre_attr_group = {
- .attrs = lustre_attrs,
-};
-
-int class_procfs_init(void)
-{
- int rc = -ENOMEM;
- struct dentry *file;
-
- lustre_kobj = kobject_create_and_add("lustre", fs_kobj);
- if (!lustre_kobj)
- goto out;
-
- /* Create the files associated with this kobject */
- rc = sysfs_create_group(lustre_kobj, &lustre_attr_group);
- if (rc) {
- kobject_put(lustre_kobj);
- goto out;
- }
-
- debugfs_lustre_root = debugfs_create_dir("lustre", NULL);
- if (IS_ERR_OR_NULL(debugfs_lustre_root)) {
- rc = debugfs_lustre_root ? PTR_ERR(debugfs_lustre_root)
- : -ENOMEM;
- debugfs_lustre_root = NULL;
- sysfs_remove_group(lustre_kobj, &lustre_attr_group);
- kobject_put(lustre_kobj);
- goto out;
- }
-
- file = debugfs_create_file("devices", 0444, debugfs_lustre_root, NULL,
- &obd_device_list_fops);
- if (IS_ERR_OR_NULL(file)) {
- rc = file ? PTR_ERR(file) : -ENOMEM;
- sysfs_remove_group(lustre_kobj, &lustre_attr_group);
- kobject_put(lustre_kobj);
- goto out;
- }
-out:
- return rc;
-}
-
-int class_procfs_clean(void)
-{
- debugfs_remove_recursive(debugfs_lustre_root);
-
- debugfs_lustre_root = NULL;
-
- sysfs_remove_group(lustre_kobj, &lustre_attr_group);
- kobject_put(lustre_kobj);
-
- return 0;
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
deleted file mode 100644
index e5e8687784ee..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/module.h>
-#include <linux/sysctl.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/ctype.h>
-#include <linux/bitops.h>
-#include <linux/uaccess.h>
-#include <linux/utsname.h>
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <obd_support.h>
-#include <lprocfs_status.h>
-#include <obd_class.h>
-
-struct static_lustre_uintvalue_attr {
- struct {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
- char *buf);
- ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len);
- } u;
- int *value;
-};
-
-static ssize_t static_uintvalue_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct static_lustre_uintvalue_attr *lattr = (void *)attr;
-
- return sprintf(buf, "%d\n", *lattr->value);
-}
-
-static ssize_t static_uintvalue_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct static_lustre_uintvalue_attr *lattr = (void *)attr;
- int rc;
- unsigned int val;
-
- rc = kstrtouint(buffer, 10, &val);
- if (rc)
- return rc;
-
- *lattr->value = val;
-
- return count;
-}
-
-#define LUSTRE_STATIC_UINT_ATTR(name, value) \
-static struct static_lustre_uintvalue_attr lustre_sattr_##name = \
- {__ATTR(name, 0644, \
- static_uintvalue_show, \
- static_uintvalue_store),\
- value }
-
-LUSTRE_STATIC_UINT_ATTR(timeout, &obd_timeout);
-
-static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%lu\n",
- obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
-}
-
-static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
-
- if (val > ((totalram_pages / 10) * 9)) {
- /* Somebody wants to assign too much memory to dirty pages */
- return -EINVAL;
- }
-
- if (val < 4 << (20 - PAGE_SHIFT)) {
- /* Less than 4 Mb for dirty cache is also bad */
- return -EINVAL;
- }
-
- obd_max_dirty_pages = val;
-
- return count;
-}
-LUSTRE_RW_ATTR(max_dirty_mb);
-
-LUSTRE_STATIC_UINT_ATTR(debug_peer_on_timeout, &obd_debug_peer_on_timeout);
-LUSTRE_STATIC_UINT_ATTR(dump_on_timeout, &obd_dump_on_timeout);
-LUSTRE_STATIC_UINT_ATTR(dump_on_eviction, &obd_dump_on_eviction);
-LUSTRE_STATIC_UINT_ATTR(at_min, &at_min);
-LUSTRE_STATIC_UINT_ATTR(at_max, &at_max);
-LUSTRE_STATIC_UINT_ATTR(at_extra, &at_extra);
-LUSTRE_STATIC_UINT_ATTR(at_early_margin, &at_early_margin);
-LUSTRE_STATIC_UINT_ATTR(at_history, &at_history);
-
-static struct attribute *lustre_attrs[] = {
- &lustre_sattr_timeout.u.attr,
- &lustre_attr_max_dirty_mb.attr,
- &lustre_sattr_debug_peer_on_timeout.u.attr,
- &lustre_sattr_dump_on_timeout.u.attr,
- &lustre_sattr_dump_on_eviction.u.attr,
- &lustre_sattr_at_min.u.attr,
- &lustre_sattr_at_max.u.attr,
- &lustre_sattr_at_extra.u.attr,
- &lustre_sattr_at_early_margin.u.attr,
- &lustre_sattr_at_history.u.attr,
- NULL,
-};
-
-static const struct attribute_group lustre_attr_group = {
- .attrs = lustre_attrs,
-};
-
-int obd_sysctl_init(void)
-{
- return sysfs_create_group(lustre_kobj, &lustre_attr_group);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
deleted file mode 100644
index 693e1129f1f9..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ /dev/null
@@ -1,523 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/llog.c
- *
- * OST<->MDS recovery logging infrastructure.
- * Invariants in implementation:
- * - we do not share logs among different OST<->MDS connections, so that
- * if an OST or MDS fails it need only look at log(s) relevant to itself
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- * Author: Alex Zhuravlev <bzzz@whamcloud.com>
- * Author: Mikhail Pershin <tappro@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-#include <llog_swab.h>
-#include <lustre_log.h>
-#include <obd_class.h>
-#include "llog_internal.h"
-
-/*
- * Allocate a new log or catalog handle
- * Used inside llog_open().
- */
-static struct llog_handle *llog_alloc_handle(void)
-{
- struct llog_handle *loghandle;
-
- loghandle = kzalloc(sizeof(*loghandle), GFP_NOFS);
- if (!loghandle)
- return NULL;
-
- init_rwsem(&loghandle->lgh_lock);
- spin_lock_init(&loghandle->lgh_hdr_lock);
- INIT_LIST_HEAD(&loghandle->u.phd.phd_entry);
- atomic_set(&loghandle->lgh_refcount, 1);
-
- return loghandle;
-}
-
-/*
- * Free llog handle and header data if exists. Used in llog_close() only
- */
-static void llog_free_handle(struct llog_handle *loghandle)
-{
- /* failed llog_init_handle */
- if (!loghandle->lgh_hdr)
- goto out;
-
- if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)
- LASSERT(list_empty(&loghandle->u.phd.phd_entry));
- else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
- LASSERT(list_empty(&loghandle->u.chd.chd_head));
- kvfree(loghandle->lgh_hdr);
-out:
- kfree(loghandle);
-}
-
-void llog_handle_get(struct llog_handle *loghandle)
-{
- atomic_inc(&loghandle->lgh_refcount);
-}
-
-void llog_handle_put(struct llog_handle *loghandle)
-{
- LASSERT(atomic_read(&loghandle->lgh_refcount) > 0);
- if (atomic_dec_and_test(&loghandle->lgh_refcount))
- llog_free_handle(loghandle);
-}
-
-static int llog_read_header(const struct lu_env *env,
- struct llog_handle *handle,
- struct obd_uuid *uuid)
-{
- struct llog_operations *lop;
- int rc;
-
- rc = llog_handle2ops(handle, &lop);
- if (rc)
- return rc;
-
- if (!lop->lop_read_header)
- return -EOPNOTSUPP;
-
- rc = lop->lop_read_header(env, handle);
- if (rc == LLOG_EEMPTY) {
- struct llog_log_hdr *llh = handle->lgh_hdr;
- size_t len;
-
- /* lrh_len should be initialized in llog_init_handle */
- handle->lgh_last_idx = 0; /* header is record with index 0 */
- llh->llh_count = 1; /* for the header record */
- llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
- LASSERT(handle->lgh_ctxt->loc_chunk_size >= LLOG_MIN_CHUNK_SIZE);
- llh->llh_hdr.lrh_len = handle->lgh_ctxt->loc_chunk_size;
- llh->llh_hdr.lrh_index = 0;
- llh->llh_timestamp = ktime_get_real_seconds();
- if (uuid)
- memcpy(&llh->llh_tgtuuid, uuid,
- sizeof(llh->llh_tgtuuid));
- llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap);
- /*
- * Since update llog header might also call this function,
- * let's reset the bitmap to 0 here
- */
- len = llh->llh_hdr.lrh_len - llh->llh_bitmap_offset;
- memset(LLOG_HDR_BITMAP(llh), 0, len - sizeof(llh->llh_tail));
- ext2_set_bit(0, LLOG_HDR_BITMAP(llh));
- LLOG_HDR_TAIL(llh)->lrt_len = llh->llh_hdr.lrh_len;
- LLOG_HDR_TAIL(llh)->lrt_index = llh->llh_hdr.lrh_index;
- rc = 0;
- }
- return rc;
-}
-
-int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
- int flags, struct obd_uuid *uuid)
-{
- int chunk_size = handle->lgh_ctxt->loc_chunk_size;
- enum llog_flag fmt = flags & LLOG_F_EXT_MASK;
- struct llog_log_hdr *llh;
- int rc;
-
- LASSERT(!handle->lgh_hdr);
-
- LASSERT(chunk_size >= LLOG_MIN_CHUNK_SIZE);
- llh = kvzalloc(sizeof(*llh), GFP_KERNEL);
- if (!llh)
- return -ENOMEM;
- handle->lgh_hdr = llh;
- handle->lgh_hdr_size = chunk_size;
- /* first assign flags to use llog_client_ops */
- llh->llh_flags = flags;
- rc = llog_read_header(env, handle, uuid);
- if (rc == 0) {
- if (unlikely((llh->llh_flags & LLOG_F_IS_PLAIN &&
- flags & LLOG_F_IS_CAT) ||
- (llh->llh_flags & LLOG_F_IS_CAT &&
- flags & LLOG_F_IS_PLAIN))) {
- CERROR("%s: llog type is %s but initializing %s\n",
- handle->lgh_ctxt->loc_obd->obd_name,
- llh->llh_flags & LLOG_F_IS_CAT ?
- "catalog" : "plain",
- flags & LLOG_F_IS_CAT ? "catalog" : "plain");
- rc = -EINVAL;
- goto out;
- } else if (llh->llh_flags &
- (LLOG_F_IS_PLAIN | LLOG_F_IS_CAT)) {
- /*
- * it is possible to open llog without specifying llog
- * type so it is taken from llh_flags
- */
- flags = llh->llh_flags;
- } else {
- /* for some reason the llh_flags has no type set */
- CERROR("llog type is not specified!\n");
- rc = -EINVAL;
- goto out;
- }
- if (unlikely(uuid &&
- !obd_uuid_equals(uuid, &llh->llh_tgtuuid))) {
- CERROR("%s: llog uuid mismatch: %s/%s\n",
- handle->lgh_ctxt->loc_obd->obd_name,
- (char *)uuid->uuid,
- (char *)llh->llh_tgtuuid.uuid);
- rc = -EEXIST;
- goto out;
- }
- }
- if (flags & LLOG_F_IS_CAT) {
- LASSERT(list_empty(&handle->u.chd.chd_head));
- INIT_LIST_HEAD(&handle->u.chd.chd_head);
- llh->llh_size = sizeof(struct llog_logid_rec);
- llh->llh_flags |= LLOG_F_IS_FIXSIZE;
- } else if (!(flags & LLOG_F_IS_PLAIN)) {
- CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n",
- handle->lgh_ctxt->loc_obd->obd_name,
- flags, LLOG_F_IS_CAT, LLOG_F_IS_PLAIN);
- rc = -EINVAL;
- }
- llh->llh_flags |= fmt;
-out:
- if (rc) {
- kvfree(llh);
- handle->lgh_hdr = NULL;
- }
- return rc;
-}
-EXPORT_SYMBOL(llog_init_handle);
-
-static int llog_process_thread(void *arg)
-{
- struct llog_process_info *lpi = arg;
- struct llog_handle *loghandle = lpi->lpi_loghandle;
- struct llog_log_hdr *llh = loghandle->lgh_hdr;
- struct llog_process_cat_data *cd = lpi->lpi_catdata;
- char *buf;
- u64 cur_offset, tmp_offset;
- int chunk_size;
- int rc = 0, index = 1, last_index;
- int saved_index = 0;
- int last_called_index = 0;
-
- if (!llh)
- return -EINVAL;
-
- cur_offset = llh->llh_hdr.lrh_len;
- chunk_size = llh->llh_hdr.lrh_len;
- /* expect chunk_size to be power of two */
- LASSERT(is_power_of_2(chunk_size));
-
- buf = kvzalloc(chunk_size, GFP_NOFS);
- if (!buf) {
- lpi->lpi_rc = -ENOMEM;
- return 0;
- }
-
- if (cd) {
- last_called_index = cd->lpcd_first_idx;
- index = cd->lpcd_first_idx + 1;
- }
- if (cd && cd->lpcd_last_idx)
- last_index = cd->lpcd_last_idx;
- else
- last_index = LLOG_HDR_BITMAP_SIZE(llh) - 1;
-
- while (rc == 0) {
- unsigned int buf_offset = 0;
- struct llog_rec_hdr *rec;
- bool partial_chunk;
- off_t chunk_offset;
-
- /* skip records not set in bitmap */
- while (index <= last_index &&
- !ext2_test_bit(index, LLOG_HDR_BITMAP(llh)))
- ++index;
-
- if (index > last_index)
- break;
-
- CDEBUG(D_OTHER, "index: %d last_index %d\n",
- index, last_index);
-repeat:
- /* get the buf with our target record; avoid old garbage */
- memset(buf, 0, chunk_size);
- rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index,
- index, &cur_offset, buf, chunk_size);
- if (rc)
- goto out;
-
- /*
- * NB: after llog_next_block() call the cur_offset is the
- * offset of the next block after read one.
- * The absolute offset of the current chunk is calculated
- * from cur_offset value and stored in chunk_offset variable.
- */
- tmp_offset = cur_offset;
- if (do_div(tmp_offset, chunk_size)) {
- partial_chunk = true;
- chunk_offset = cur_offset & ~(chunk_size - 1);
- } else {
- partial_chunk = false;
- chunk_offset = cur_offset - chunk_size;
- }
-
- /* NB: when rec->lrh_len is accessed it is already swabbed
- * since it is used at the "end" of the loop and the rec
- * swabbing is done at the beginning of the loop.
- */
- for (rec = (struct llog_rec_hdr *)(buf + buf_offset);
- (char *)rec < buf + chunk_size;
- rec = llog_rec_hdr_next(rec)) {
- CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
- rec, rec->lrh_type);
-
- if (LLOG_REC_HDR_NEEDS_SWABBING(rec))
- lustre_swab_llog_rec(rec);
-
- CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n",
- rec->lrh_type, rec->lrh_index);
-
- /*
- * for partial chunk the end of it is zeroed, check
- * for index 0 to distinguish it.
- */
- if (partial_chunk && !rec->lrh_index) {
- /* concurrent llog_add() might add new records
- * while llog_processing, check this is not
- * the case and re-read the current chunk
- * otherwise.
- */
- if (index > loghandle->lgh_last_idx) {
- rc = 0;
- goto out;
- }
- CDEBUG(D_OTHER, "Re-read last llog buffer for new records, index %u, last %u\n",
- index, loghandle->lgh_last_idx);
- /* save offset inside buffer for the re-read */
- buf_offset = (char *)rec - (char *)buf;
- cur_offset = chunk_offset;
- goto repeat;
- }
-
- if (!rec->lrh_len || rec->lrh_len > chunk_size) {
- CWARN("invalid length %d in llog record for index %d/%d\n",
- rec->lrh_len,
- rec->lrh_index, index);
- rc = -EINVAL;
- goto out;
- }
-
- if (rec->lrh_index < index) {
- CDEBUG(D_OTHER, "skipping lrh_index %d\n",
- rec->lrh_index);
- continue;
- }
-
- if (rec->lrh_index != index) {
- CERROR("%s: Invalid record: index %u but expected %u\n",
- loghandle->lgh_ctxt->loc_obd->obd_name,
- rec->lrh_index, index);
- rc = -ERANGE;
- goto out;
- }
-
- CDEBUG(D_OTHER,
- "lrh_index: %d lrh_len: %d (%d remains)\n",
- rec->lrh_index, rec->lrh_len,
- (int)(buf + chunk_size - (char *)rec));
-
- loghandle->lgh_cur_idx = rec->lrh_index;
- loghandle->lgh_cur_offset = (char *)rec - (char *)buf +
- chunk_offset;
-
- /* if set, process the callback on this record */
- if (ext2_test_bit(index, LLOG_HDR_BITMAP(llh))) {
- rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec,
- lpi->lpi_cbdata);
- last_called_index = index;
- if (rc)
- goto out;
- }
-
- /* exit if the last index is reached */
- if (index >= last_index) {
- rc = 0;
- goto out;
- }
- index++;
- }
- }
-
-out:
- if (cd)
- cd->lpcd_last_idx = last_called_index;
-
- kfree(buf);
- lpi->lpi_rc = rc;
- return 0;
-}
-
-static int llog_process_thread_daemonize(void *arg)
-{
- struct llog_process_info *lpi = arg;
- struct lu_env env;
- int rc;
-
- unshare_fs_struct();
-
- /* client env has no keys, tags is just 0 */
- rc = lu_env_init(&env, LCT_LOCAL | LCT_MG_THREAD);
- if (rc)
- goto out;
- lpi->lpi_env = &env;
-
- rc = llog_process_thread(arg);
-
- lu_env_fini(&env);
-out:
- complete(&lpi->lpi_completion);
- return rc;
-}
-
-int llog_process_or_fork(const struct lu_env *env,
- struct llog_handle *loghandle,
- llog_cb_t cb, void *data, void *catdata, bool fork)
-{
- struct llog_process_info *lpi;
- int rc;
-
- lpi = kzalloc(sizeof(*lpi), GFP_NOFS);
- if (!lpi)
- return -ENOMEM;
- lpi->lpi_loghandle = loghandle;
- lpi->lpi_cb = cb;
- lpi->lpi_cbdata = data;
- lpi->lpi_catdata = catdata;
-
- if (fork) {
- struct task_struct *task;
-
- /* The new thread can't use parent env,
- * init the new one in llog_process_thread_daemonize.
- */
- lpi->lpi_env = NULL;
- init_completion(&lpi->lpi_completion);
- task = kthread_run(llog_process_thread_daemonize, lpi,
- "llog_process_thread");
- if (IS_ERR(task)) {
- rc = PTR_ERR(task);
- CERROR("%s: cannot start thread: rc = %d\n",
- loghandle->lgh_ctxt->loc_obd->obd_name, rc);
- goto out_lpi;
- }
- wait_for_completion(&lpi->lpi_completion);
- } else {
- lpi->lpi_env = env;
- llog_process_thread(lpi);
- }
- rc = lpi->lpi_rc;
-out_lpi:
- kfree(lpi);
- return rc;
-}
-EXPORT_SYMBOL(llog_process_or_fork);
-
-int llog_process(const struct lu_env *env, struct llog_handle *loghandle,
- llog_cb_t cb, void *data, void *catdata)
-{
- return llog_process_or_fork(env, loghandle, cb, data, catdata, true);
-}
-EXPORT_SYMBOL(llog_process);
-
-int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
- struct llog_handle **lgh, struct llog_logid *logid,
- char *name, enum llog_open_param open_param)
-{
- const struct cred *old_cred = NULL;
- int rc;
-
- LASSERT(ctxt);
- LASSERT(ctxt->loc_logops);
-
- if (!ctxt->loc_logops->lop_open) {
- *lgh = NULL;
- return -EOPNOTSUPP;
- }
-
- *lgh = llog_alloc_handle();
- if (!*lgh)
- return -ENOMEM;
- (*lgh)->lgh_ctxt = ctxt;
- (*lgh)->lgh_logops = ctxt->loc_logops;
-
- if (cap_raised(current_cap(), CAP_SYS_RESOURCE)) {
- struct cred *cred = prepare_creds();
-
- if (cred) {
- cap_raise(cred->cap_effective, CAP_SYS_RESOURCE);
- old_cred = override_creds(cred);
- }
- }
- rc = ctxt->loc_logops->lop_open(env, *lgh, logid, name, open_param);
- if (old_cred)
- revert_creds(old_cred);
-
- if (rc) {
- llog_free_handle(*lgh);
- *lgh = NULL;
- }
- return rc;
-}
-EXPORT_SYMBOL(llog_open);
-
-int llog_close(const struct lu_env *env, struct llog_handle *loghandle)
-{
- struct llog_operations *lop;
- int rc;
-
- rc = llog_handle2ops(loghandle, &lop);
- if (rc)
- goto out;
- if (!lop->lop_close) {
- rc = -EOPNOTSUPP;
- goto out;
- }
- rc = lop->lop_close(env, loghandle);
-out:
- llog_handle_put(loghandle);
- return rc;
-}
-EXPORT_SYMBOL(llog_close);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
deleted file mode 100644
index d9c63adff206..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ /dev/null
@@ -1,236 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/llog_cat.c
- *
- * OST<->MDS recovery logging infrastructure.
- *
- * Invariants in implementation:
- * - we do not share logs among different OST<->MDS connections, so that
- * if an OST or MDS fails it need only look at log(s) relevant to itself
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- * Author: Alexey Zhuravlev <alexey.zhuravlev@intel.com>
- * Author: Mikhail Pershin <mike.pershin@intel.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-#include <obd_class.h>
-
-#include "llog_internal.h"
-
-/* Open an existent log handle and add it to the open list.
- * This log handle will be closed when all of the records in it are removed.
- *
- * Assumes caller has already pushed us into the kernel context and is locking.
- * We return a lock on the handle to ensure nobody yanks it from us.
- *
- * This takes extra reference on llog_handle via llog_handle_get() and require
- * this reference to be put by caller using llog_handle_put()
- */
-static int llog_cat_id2handle(const struct lu_env *env,
- struct llog_handle *cathandle,
- struct llog_handle **res,
- struct llog_logid *logid)
-{
- struct llog_handle *loghandle;
- enum llog_flag fmt;
- int rc = 0;
-
- if (!cathandle)
- return -EBADF;
-
- fmt = cathandle->lgh_hdr->llh_flags & LLOG_F_EXT_MASK;
- down_write(&cathandle->lgh_lock);
- list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
- u.phd.phd_entry) {
- struct llog_logid *cgl = &loghandle->lgh_id;
-
- if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
- ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) {
- if (cgl->lgl_ogen != logid->lgl_ogen) {
- CERROR("%s: log " DOSTID " generation %x != %x\n",
- loghandle->lgh_ctxt->loc_obd->obd_name,
- POSTID(&logid->lgl_oi), cgl->lgl_ogen,
- logid->lgl_ogen);
- continue;
- }
- loghandle->u.phd.phd_cat_handle = cathandle;
- up_write(&cathandle->lgh_lock);
- rc = 0;
- goto out;
- }
- }
- up_write(&cathandle->lgh_lock);
-
- rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
- LLOG_OPEN_EXISTS);
- if (rc < 0) {
- CERROR("%s: error opening log id " DOSTID ":%x: rc = %d\n",
- cathandle->lgh_ctxt->loc_obd->obd_name,
- POSTID(&logid->lgl_oi), logid->lgl_ogen, rc);
- return rc;
- }
-
- rc = llog_init_handle(env, loghandle, fmt | LLOG_F_IS_PLAIN, NULL);
- if (rc < 0) {
- llog_close(env, loghandle);
- loghandle = NULL;
- return rc;
- }
-
- down_write(&cathandle->lgh_lock);
- list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
- up_write(&cathandle->lgh_lock);
-
- loghandle->u.phd.phd_cat_handle = cathandle;
- loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
- loghandle->u.phd.phd_cookie.lgc_index =
- loghandle->lgh_hdr->llh_cat_idx;
-out:
- llog_handle_get(loghandle);
- *res = loghandle;
- return 0;
-}
-
-int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
-{
- struct llog_handle *loghandle, *n;
-
- list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
- u.phd.phd_entry) {
- /* unlink open-not-created llogs */
- list_del_init(&loghandle->u.phd.phd_entry);
- llog_close(env, loghandle);
- }
- /* if handle was stored in ctxt, remove it too */
- if (cathandle->lgh_ctxt->loc_handle == cathandle)
- cathandle->lgh_ctxt->loc_handle = NULL;
- return llog_close(env, cathandle);
-}
-EXPORT_SYMBOL(llog_cat_close);
-
-static int llog_cat_process_cb(const struct lu_env *env,
- struct llog_handle *cat_llh,
- struct llog_rec_hdr *rec, void *data)
-{
- struct llog_process_data *d = data;
- struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
- struct llog_handle *llh;
- int rc;
-
- if (rec->lrh_type != LLOG_LOGID_MAGIC) {
- CERROR("invalid record in catalog\n");
- return -EINVAL;
- }
- CDEBUG(D_HA, "processing log " DOSTID ":%x at index %u of catalog "
- DOSTID "\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
- rec->lrh_index, POSTID(&cat_llh->lgh_id.lgl_oi));
-
- rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
- if (rc) {
- CERROR("%s: cannot find handle for llog " DOSTID ": %d\n",
- cat_llh->lgh_ctxt->loc_obd->obd_name,
- POSTID(&lir->lid_id.lgl_oi), rc);
- return rc;
- }
-
- if (rec->lrh_index < d->lpd_startcat)
- /* Skip processing of the logs until startcat */
- rc = 0;
- else if (d->lpd_startidx > 0) {
- struct llog_process_cat_data cd;
-
- cd.lpcd_first_idx = d->lpd_startidx;
- cd.lpcd_last_idx = 0;
- rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
- &cd, false);
- /* Continue processing the next log from idx 0 */
- d->lpd_startidx = 0;
- } else {
- rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
- NULL, false);
- }
-
- llog_handle_put(llh);
-
- return rc;
-}
-
-static int llog_cat_process_or_fork(const struct lu_env *env,
- struct llog_handle *cat_llh,
- llog_cb_t cb, void *data, int startcat,
- int startidx, bool fork)
-{
- struct llog_process_data d;
- struct llog_log_hdr *llh = cat_llh->lgh_hdr;
- int rc;
-
- LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
- d.lpd_data = data;
- d.lpd_cb = cb;
- d.lpd_startcat = startcat;
- d.lpd_startidx = startidx;
-
- if (llh->llh_cat_idx > cat_llh->lgh_last_idx) {
- struct llog_process_cat_data cd;
-
- CWARN("catlog " DOSTID " crosses index zero\n",
- POSTID(&cat_llh->lgh_id.lgl_oi));
-
- cd.lpcd_first_idx = llh->llh_cat_idx;
- cd.lpcd_last_idx = 0;
- rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
- &d, &cd, fork);
- if (rc != 0)
- return rc;
-
- cd.lpcd_first_idx = 0;
- cd.lpcd_last_idx = cat_llh->lgh_last_idx;
- rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
- &d, &cd, fork);
- } else {
- rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
- &d, NULL, fork);
- }
-
- return rc;
-}
-
-int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
- llog_cb_t cb, void *data, int startcat, int startidx)
-{
- return llog_cat_process_or_fork(env, cat_llh, cb, data, startcat,
- startidx, false);
-}
-EXPORT_SYMBOL(llog_cat_process);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_internal.h b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
deleted file mode 100644
index 4991d4e589dc..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/llog_internal.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LLOG_INTERNAL_H__
-#define __LLOG_INTERNAL_H__
-
-#include <lustre_log.h>
-
-struct llog_process_info {
- struct llog_handle *lpi_loghandle;
- llog_cb_t lpi_cb;
- void *lpi_cbdata;
- void *lpi_catdata;
- int lpi_rc;
- struct completion lpi_completion;
- const struct lu_env *lpi_env;
-
-};
-
-struct llog_thread_info {
- struct lu_attr lgi_attr;
- struct lu_fid lgi_fid;
- struct lu_buf lgi_buf;
- loff_t lgi_off;
- struct llog_rec_hdr lgi_lrh;
- struct llog_rec_tail lgi_tail;
-};
-
-extern struct lu_context_key llog_thread_key;
-
-int llog_info_init(void);
-void llog_info_fini(void);
-
-void llog_handle_get(struct llog_handle *loghandle);
-void llog_handle_put(struct llog_handle *loghandle);
-int class_config_dump_handler(const struct lu_env *env,
- struct llog_handle *handle,
- struct llog_rec_hdr *rec, void *data);
-int llog_process_or_fork(const struct lu_env *env,
- struct llog_handle *loghandle,
- llog_cb_t cb, void *data, void *catdata, bool fork);
-int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
- struct llog_handle *loghandle, int index);
-
-static inline struct llog_rec_hdr *llog_rec_hdr_next(struct llog_rec_hdr *rec)
-{
- return (struct llog_rec_hdr *)((char *)rec + rec->lrh_len);
-}
-#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
deleted file mode 100644
index 26aea114a29b..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ /dev/null
@@ -1,225 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-#include <obd_class.h>
-#include <lustre_log.h>
-#include "llog_internal.h"
-
-/* helper functions for calling the llog obd methods */
-static struct llog_ctxt *llog_new_ctxt(struct obd_device *obd)
-{
- struct llog_ctxt *ctxt;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_NOFS);
- if (!ctxt)
- return NULL;
-
- ctxt->loc_obd = obd;
- atomic_set(&ctxt->loc_refcount, 1);
-
- return ctxt;
-}
-
-static void llog_ctxt_destroy(struct llog_ctxt *ctxt)
-{
- if (ctxt->loc_exp) {
- class_export_put(ctxt->loc_exp);
- ctxt->loc_exp = NULL;
- }
- if (ctxt->loc_imp) {
- class_import_put(ctxt->loc_imp);
- ctxt->loc_imp = NULL;
- }
- kfree(ctxt);
-}
-
-int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt)
-{
- struct obd_llog_group *olg = ctxt->loc_olg;
- struct obd_device *obd;
- int rc = 0;
-
- spin_lock(&olg->olg_lock);
- if (!atomic_dec_and_test(&ctxt->loc_refcount)) {
- spin_unlock(&olg->olg_lock);
- return rc;
- }
- olg->olg_ctxts[ctxt->loc_idx] = NULL;
- spin_unlock(&olg->olg_lock);
-
- obd = ctxt->loc_obd;
- spin_lock(&obd->obd_dev_lock);
- /* sync with llog ctxt user thread */
- spin_unlock(&obd->obd_dev_lock);
-
- /* obd->obd_starting is needed for the case of cleanup
- * in error case while obd is starting up.
- */
- LASSERTF(obd->obd_starting == 1 ||
- obd->obd_stopping == 1 || obd->obd_set_up == 0,
- "wrong obd state: %d/%d/%d\n", !!obd->obd_starting,
- !!obd->obd_stopping, !!obd->obd_set_up);
-
- /* cleanup the llog ctxt here */
- if (CTXTP(ctxt, cleanup))
- rc = CTXTP(ctxt, cleanup)(env, ctxt);
-
- llog_ctxt_destroy(ctxt);
- wake_up(&olg->olg_waitq);
- return rc;
-}
-EXPORT_SYMBOL(__llog_ctxt_put);
-
-int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
-{
- struct obd_llog_group *olg;
- int rc, idx;
-
- olg = ctxt->loc_olg;
- LASSERT(olg);
- LASSERT(olg != LP_POISON);
-
- idx = ctxt->loc_idx;
-
- /*
- * Banlance the ctxt get when calling llog_cleanup()
- */
- LASSERT(atomic_read(&ctxt->loc_refcount) < LI_POISON);
- LASSERT(atomic_read(&ctxt->loc_refcount) > 1);
- llog_ctxt_put(ctxt);
-
- /*
- * Try to free the ctxt.
- */
- rc = __llog_ctxt_put(env, ctxt);
- if (rc)
- CERROR("Error %d while cleaning up ctxt %p\n",
- rc, ctxt);
-
- l_wait_event_abortable(olg->olg_waitq,
- llog_group_ctxt_null(olg, idx));
-
- return rc;
-}
-EXPORT_SYMBOL(llog_cleanup);
-
-int llog_setup(const struct lu_env *env, struct obd_device *obd,
- struct obd_llog_group *olg, int index,
- struct obd_device *disk_obd, struct llog_operations *op)
-{
- struct llog_ctxt *ctxt;
- int rc = 0;
-
- if (index < 0 || index >= LLOG_MAX_CTXTS)
- return -EINVAL;
-
- LASSERT(olg);
-
- ctxt = llog_new_ctxt(obd);
- if (!ctxt)
- return -ENOMEM;
-
- ctxt->loc_obd = obd;
- ctxt->loc_olg = olg;
- ctxt->loc_idx = index;
- ctxt->loc_logops = op;
- mutex_init(&ctxt->loc_mutex);
- ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
- ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
- ctxt->loc_chunk_size = LLOG_MIN_CHUNK_SIZE;
-
- rc = llog_group_set_ctxt(olg, ctxt, index);
- if (rc) {
- llog_ctxt_destroy(ctxt);
- if (rc == -EEXIST) {
- ctxt = llog_group_get_ctxt(olg, index);
- if (ctxt) {
- /*
- * mds_lov_update_desc() might call here multiple
- * times. So if the llog is already set up then
- * don't to do it again.
- */
- CDEBUG(D_CONFIG, "obd %s ctxt %d already set up\n",
- obd->obd_name, index);
- LASSERT(ctxt->loc_olg == olg);
- LASSERT(ctxt->loc_obd == obd);
- LASSERT(ctxt->loc_exp == disk_obd->obd_self_export);
- LASSERT(ctxt->loc_logops == op);
- llog_ctxt_put(ctxt);
- }
- rc = 0;
- }
- return rc;
- }
-
- if (op->lop_setup) {
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LLOG_SETUP))
- rc = -EOPNOTSUPP;
- else
- rc = op->lop_setup(env, obd, olg, index, disk_obd);
- }
-
- if (rc) {
- CERROR("%s: ctxt %d lop_setup=%p failed: rc = %d\n",
- obd->obd_name, index, op->lop_setup, rc);
- llog_group_clear_ctxt(olg, index);
- llog_ctxt_destroy(ctxt);
- } else {
- CDEBUG(D_CONFIG, "obd %s ctxt %d is initialized\n",
- obd->obd_name, index);
- ctxt->loc_flags &= ~LLOG_CTXT_FLAG_UNINITIALIZED;
- }
-
- return rc;
-}
-EXPORT_SYMBOL(llog_setup);
-
-/* context key constructor/destructor: llog_key_init, llog_key_fini */
-LU_KEY_INIT_FINI(llog, struct llog_thread_info);
-/* context key: llog_thread_key */
-LU_CONTEXT_KEY_DEFINE(llog, LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL);
-LU_KEY_INIT_GENERIC(llog);
-
-int llog_info_init(void)
-{
- llog_key_init_generic(&llog_thread_key, NULL);
- lu_context_key_register(&llog_thread_key);
- return 0;
-}
-
-void llog_info_fini(void)
-{
- lu_context_key_degister(&llog_thread_key);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
deleted file mode 100644
index b431c3408fe4..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ /dev/null
@@ -1,412 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/llog_swab.c
- *
- * Swabbing of llog datatypes (from disk or over the wire).
- *
- * Author: jacob berkman <jacob@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-#include <llog_swab.h>
-#include <lustre_log.h>
-
-static void print_llogd_body(struct llogd_body *d)
-{
- CDEBUG(D_OTHER, "llogd body: %p\n", d);
- CDEBUG(D_OTHER, "\tlgd_logid.lgl_oi: " DOSTID "\n",
- POSTID(&d->lgd_logid.lgl_oi));
- CDEBUG(D_OTHER, "\tlgd_logid.lgl_ogen: %#x\n", d->lgd_logid.lgl_ogen);
- CDEBUG(D_OTHER, "\tlgd_ctxt_idx: %#x\n", d->lgd_ctxt_idx);
- CDEBUG(D_OTHER, "\tlgd_llh_flags: %#x\n", d->lgd_llh_flags);
- CDEBUG(D_OTHER, "\tlgd_index: %#x\n", d->lgd_index);
- CDEBUG(D_OTHER, "\tlgd_saved_index: %#x\n", d->lgd_saved_index);
- CDEBUG(D_OTHER, "\tlgd_len: %#x\n", d->lgd_len);
- CDEBUG(D_OTHER, "\tlgd_cur_offset: %#llx\n", d->lgd_cur_offset);
-}
-
-void lustre_swab_lu_fid(struct lu_fid *fid)
-{
- __swab64s(&fid->f_seq);
- __swab32s(&fid->f_oid);
- __swab32s(&fid->f_ver);
-}
-EXPORT_SYMBOL(lustre_swab_lu_fid);
-
-void lustre_swab_ost_id(struct ost_id *oid)
-{
- if (fid_seq_is_mdt0(oid->oi.oi_seq)) {
- __swab64s(&oid->oi.oi_id);
- __swab64s(&oid->oi.oi_seq);
- } else {
- lustre_swab_lu_fid(&oid->oi_fid);
- }
-}
-EXPORT_SYMBOL(lustre_swab_ost_id);
-
-static void lustre_swab_llog_id(struct llog_logid *log_id)
-{
- __swab64s(&log_id->lgl_oi.oi.oi_id);
- __swab64s(&log_id->lgl_oi.oi.oi_seq);
- __swab32s(&log_id->lgl_ogen);
-}
-
-void lustre_swab_llogd_body(struct llogd_body *d)
-{
- print_llogd_body(d);
- lustre_swab_llog_id(&d->lgd_logid);
- __swab32s(&d->lgd_ctxt_idx);
- __swab32s(&d->lgd_llh_flags);
- __swab32s(&d->lgd_index);
- __swab32s(&d->lgd_saved_index);
- __swab32s(&d->lgd_len);
- __swab64s(&d->lgd_cur_offset);
- print_llogd_body(d);
-}
-EXPORT_SYMBOL(lustre_swab_llogd_body);
-
-void lustre_swab_llogd_conn_body(struct llogd_conn_body *d)
-{
- __swab64s(&d->lgdc_gen.mnt_cnt);
- __swab64s(&d->lgdc_gen.conn_cnt);
- lustre_swab_llog_id(&d->lgdc_logid);
- __swab32s(&d->lgdc_ctxt_idx);
-}
-EXPORT_SYMBOL(lustre_swab_llogd_conn_body);
-
-static void lustre_swab_ll_fid(struct ll_fid *fid)
-{
- __swab64s(&fid->id);
- __swab32s(&fid->generation);
- __swab32s(&fid->f_type);
-}
-
-void lustre_swab_lu_seq_range(struct lu_seq_range *range)
-{
- __swab64s(&range->lsr_start);
- __swab64s(&range->lsr_end);
- __swab32s(&range->lsr_index);
- __swab32s(&range->lsr_flags);
-}
-EXPORT_SYMBOL(lustre_swab_lu_seq_range);
-
-void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
-{
- struct llog_rec_tail *tail = NULL;
-
- __swab32s(&rec->lrh_len);
- __swab32s(&rec->lrh_index);
- __swab32s(&rec->lrh_type);
- __swab32s(&rec->lrh_id);
-
- switch (rec->lrh_type) {
- case OST_SZ_REC:
- {
- struct llog_size_change_rec *lsc =
- (struct llog_size_change_rec *)rec;
-
- lustre_swab_ll_fid(&lsc->lsc_fid);
- __swab32s(&lsc->lsc_ioepoch);
- tail = &lsc->lsc_tail;
- break;
- }
- case MDS_UNLINK_REC:
- {
- struct llog_unlink_rec *lur = (struct llog_unlink_rec *)rec;
-
- __swab64s(&lur->lur_oid);
- __swab32s(&lur->lur_oseq);
- __swab32s(&lur->lur_count);
- tail = &lur->lur_tail;
- break;
- }
- case MDS_UNLINK64_REC:
- {
- struct llog_unlink64_rec *lur =
- (struct llog_unlink64_rec *)rec;
-
- lustre_swab_lu_fid(&lur->lur_fid);
- __swab32s(&lur->lur_count);
- tail = &lur->lur_tail;
- break;
- }
- case CHANGELOG_REC:
- {
- struct llog_changelog_rec *cr =
- (struct llog_changelog_rec *)rec;
-
- __swab16s(&cr->cr.cr_namelen);
- __swab16s(&cr->cr.cr_flags);
- __swab32s(&cr->cr.cr_type);
- __swab64s(&cr->cr.cr_index);
- __swab64s(&cr->cr.cr_prev);
- __swab64s(&cr->cr.cr_time);
- lustre_swab_lu_fid(&cr->cr.cr_tfid);
- lustre_swab_lu_fid(&cr->cr.cr_pfid);
- if (cr->cr.cr_flags & CLF_RENAME) {
- struct changelog_ext_rename *rnm =
- changelog_rec_rename(&cr->cr);
-
- lustre_swab_lu_fid(&rnm->cr_sfid);
- lustre_swab_lu_fid(&rnm->cr_spfid);
- }
- /*
- * Because the tail follows a variable-length structure we need
- * to compute its location at runtime
- */
- tail = (struct llog_rec_tail *)((char *)&cr->cr +
- changelog_rec_size(&cr->cr) +
- cr->cr.cr_namelen);
- break;
- }
-
- case CHANGELOG_USER_REC:
- {
- struct llog_changelog_user_rec *cur =
- (struct llog_changelog_user_rec *)rec;
-
- __swab32s(&cur->cur_id);
- __swab64s(&cur->cur_endrec);
- tail = &cur->cur_tail;
- break;
- }
-
- case HSM_AGENT_REC: {
- struct llog_agent_req_rec *arr =
- (struct llog_agent_req_rec *)rec;
-
- __swab32s(&arr->arr_hai.hai_len);
- __swab32s(&arr->arr_hai.hai_action);
- lustre_swab_lu_fid(&arr->arr_hai.hai_fid);
- lustre_swab_lu_fid(&arr->arr_hai.hai_dfid);
- __swab64s(&arr->arr_hai.hai_cookie);
- __swab64s(&arr->arr_hai.hai_extent.offset);
- __swab64s(&arr->arr_hai.hai_extent.length);
- __swab64s(&arr->arr_hai.hai_gid);
- /* no swabing for opaque data */
- /* hai_data[0]; */
- break;
- }
-
- case MDS_SETATTR64_REC:
- {
- struct llog_setattr64_rec *lsr =
- (struct llog_setattr64_rec *)rec;
-
- lustre_swab_ost_id(&lsr->lsr_oi);
- __swab32s(&lsr->lsr_uid);
- __swab32s(&lsr->lsr_uid_h);
- __swab32s(&lsr->lsr_gid);
- __swab32s(&lsr->lsr_gid_h);
- __swab64s(&lsr->lsr_valid);
- tail = &lsr->lsr_tail;
- break;
- }
- case OBD_CFG_REC:
- /* these are swabbed as they are consumed */
- break;
- case LLOG_HDR_MAGIC:
- {
- struct llog_log_hdr *llh = (struct llog_log_hdr *)rec;
-
- __swab64s(&llh->llh_timestamp);
- __swab32s(&llh->llh_count);
- __swab32s(&llh->llh_bitmap_offset);
- __swab32s(&llh->llh_flags);
- __swab32s(&llh->llh_size);
- __swab32s(&llh->llh_cat_idx);
- tail = LLOG_HDR_TAIL(llh);
- break;
- }
- case LLOG_LOGID_MAGIC:
- {
- struct llog_logid_rec *lid = (struct llog_logid_rec *)rec;
-
- lustre_swab_llog_id(&lid->lid_id);
- tail = &lid->lid_tail;
- break;
- }
- case LLOG_GEN_REC:
- {
- struct llog_gen_rec *lgr = (struct llog_gen_rec *)rec;
-
- __swab64s(&lgr->lgr_gen.mnt_cnt);
- __swab64s(&lgr->lgr_gen.conn_cnt);
- tail = &lgr->lgr_tail;
- break;
- }
- case LLOG_PAD_MAGIC:
- break;
- default:
- CERROR("Unknown llog rec type %#x swabbing rec %p\n",
- rec->lrh_type, rec);
- }
-
- if (tail) {
- __swab32s(&tail->lrt_len);
- __swab32s(&tail->lrt_index);
- }
-}
-EXPORT_SYMBOL(lustre_swab_llog_rec);
-
-static void print_llog_hdr(struct llog_log_hdr *h)
-{
- CDEBUG(D_OTHER, "llog header: %p\n", h);
- CDEBUG(D_OTHER, "\tllh_hdr.lrh_index: %#x\n", h->llh_hdr.lrh_index);
- CDEBUG(D_OTHER, "\tllh_hdr.lrh_len: %#x\n", h->llh_hdr.lrh_len);
- CDEBUG(D_OTHER, "\tllh_hdr.lrh_type: %#x\n", h->llh_hdr.lrh_type);
- CDEBUG(D_OTHER, "\tllh_timestamp: %#llx\n", h->llh_timestamp);
- CDEBUG(D_OTHER, "\tllh_count: %#x\n", h->llh_count);
- CDEBUG(D_OTHER, "\tllh_bitmap_offset: %#x\n", h->llh_bitmap_offset);
- CDEBUG(D_OTHER, "\tllh_flags: %#x\n", h->llh_flags);
- CDEBUG(D_OTHER, "\tllh_size: %#x\n", h->llh_size);
- CDEBUG(D_OTHER, "\tllh_cat_idx: %#x\n", h->llh_cat_idx);
- CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n",
- LLOG_HDR_TAIL(h)->lrt_index);
- CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n",
- LLOG_HDR_TAIL(h)->lrt_len);
-}
-
-void lustre_swab_llog_hdr(struct llog_log_hdr *h)
-{
- print_llog_hdr(h);
-
- lustre_swab_llog_rec(&h->llh_hdr);
-
- print_llog_hdr(h);
-}
-EXPORT_SYMBOL(lustre_swab_llog_hdr);
-
-static void print_lustre_cfg(struct lustre_cfg *lcfg)
-{
- int i;
-
- if (!(libcfs_debug & D_OTHER)) /* don't loop on nothing */
- return;
- CDEBUG(D_OTHER, "lustre_cfg: %p\n", lcfg);
- CDEBUG(D_OTHER, "\tlcfg->lcfg_version: %#x\n", lcfg->lcfg_version);
-
- CDEBUG(D_OTHER, "\tlcfg->lcfg_command: %#x\n", lcfg->lcfg_command);
- CDEBUG(D_OTHER, "\tlcfg->lcfg_num: %#x\n", lcfg->lcfg_num);
- CDEBUG(D_OTHER, "\tlcfg->lcfg_flags: %#x\n", lcfg->lcfg_flags);
- CDEBUG(D_OTHER, "\tlcfg->lcfg_nid: %s\n", libcfs_nid2str(lcfg->lcfg_nid));
-
- CDEBUG(D_OTHER, "\tlcfg->lcfg_bufcount: %d\n", lcfg->lcfg_bufcount);
- if (lcfg->lcfg_bufcount < LUSTRE_CFG_MAX_BUFCOUNT)
- for (i = 0; i < lcfg->lcfg_bufcount; i++)
- CDEBUG(D_OTHER, "\tlcfg->lcfg_buflens[%d]: %d\n",
- i, lcfg->lcfg_buflens[i]);
-}
-
-void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg)
-{
- int i;
-
- __swab32s(&lcfg->lcfg_version);
-
- if (lcfg->lcfg_version != LUSTRE_CFG_VERSION) {
- CERROR("not swabbing lustre_cfg version %#x (expecting %#x)\n",
- lcfg->lcfg_version, LUSTRE_CFG_VERSION);
- return;
- }
-
- __swab32s(&lcfg->lcfg_command);
- __swab32s(&lcfg->lcfg_num);
- __swab32s(&lcfg->lcfg_flags);
- __swab64s(&lcfg->lcfg_nid);
- __swab32s(&lcfg->lcfg_bufcount);
- for (i = 0; i < lcfg->lcfg_bufcount && i < LUSTRE_CFG_MAX_BUFCOUNT; i++)
- __swab32s(&lcfg->lcfg_buflens[i]);
-
- print_lustre_cfg(lcfg);
-}
-
-/* used only for compatibility with old on-disk cfg_marker data */
-struct cfg_marker32 {
- __u32 cm_step;
- __u32 cm_flags;
- __u32 cm_vers;
- __u32 padding;
- __u32 cm_createtime;
- __u32 cm_canceltime;
- char cm_tgtname[MTI_NAME_MAXLEN];
- char cm_comment[MTI_NAME_MAXLEN];
-};
-
-#define MTI_NAMELEN32 (MTI_NAME_MAXLEN - \
- (sizeof(struct cfg_marker) - sizeof(struct cfg_marker32)))
-
-void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size)
-{
- struct cfg_marker32 *cm32 = (struct cfg_marker32 *)marker;
-
- if (swab) {
- __swab32s(&marker->cm_step);
- __swab32s(&marker->cm_flags);
- __swab32s(&marker->cm_vers);
- }
- if (size == sizeof(*cm32)) {
- __u32 createtime, canceltime;
- /* There was a problem with the original declaration of
- * cfg_marker on 32-bit systems because it used time_t as
- * a wire protocol structure, and didn't verify this in
- * wirecheck. We now have to convert the offsets of the
- * later fields in order to work on 32- and 64-bit systems.
- *
- * Fortunately, the cm_comment field has no functional use
- * so can be sacrificed when converting the timestamp size.
- *
- * Overwrite fields from the end first, so they are not
- * clobbered, and use memmove() instead of memcpy() because
- * the source and target buffers overlap. bug 16771
- */
- createtime = cm32->cm_createtime;
- canceltime = cm32->cm_canceltime;
- memmove(marker->cm_comment, cm32->cm_comment, MTI_NAMELEN32);
- marker->cm_comment[MTI_NAMELEN32 - 1] = '\0';
- memmove(marker->cm_tgtname, cm32->cm_tgtname,
- sizeof(marker->cm_tgtname));
- if (swab) {
- __swab32s(&createtime);
- __swab32s(&canceltime);
- }
- marker->cm_createtime = createtime;
- marker->cm_canceltime = canceltime;
- CDEBUG(D_CONFIG, "Find old cfg_marker(Srv32b,Clt64b) for target %s, converting\n",
- marker->cm_tgtname);
- } else if (swab) {
- __swab64s(&marker->cm_createtime);
- __swab64s(&marker->cm_canceltime);
- }
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
deleted file mode 100644
index c83b7d7f8e72..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
+++ /dev/null
@@ -1,133 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- *
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/lprocfs_counters.c
- *
- * Lustre lprocfs counter routines
- *
- * Author: Andreas Dilger <andreas.dilger@intel.com>
- */
-
-#include <linux/module.h>
-#include <lprocfs_status.h>
-#include <obd_support.h>
-
-void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount)
-{
- struct lprocfs_counter *percpu_cntr;
- struct lprocfs_counter_header *header;
- int smp_id;
- unsigned long flags = 0;
-
- if (!stats)
- return;
-
- LASSERTF(0 <= idx && idx < stats->ls_num,
- "idx %d, ls_num %hu\n", idx, stats->ls_num);
-
- /* With per-client stats, statistics are allocated only for
- * single CPU area, so the smp_id should be 0 always.
- */
- smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
- if (smp_id < 0)
- return;
-
- header = &stats->ls_cnt_header[idx];
- percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx);
- percpu_cntr->lc_count++;
-
- if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
- /*
- * lprocfs_counter_add() can be called in interrupt context,
- * as memory allocation could trigger memory shrinker call
- * ldlm_pool_shrink(), which calls lprocfs_counter_add().
- * LU-1727.
- *
- */
- if (in_interrupt() &&
- (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- percpu_cntr->lc_sum_irq += amount;
- else
- percpu_cntr->lc_sum += amount;
-
- if (header->lc_config & LPROCFS_CNTR_STDDEV)
- percpu_cntr->lc_sumsquare += (__s64)amount * amount;
- if (amount < percpu_cntr->lc_min)
- percpu_cntr->lc_min = amount;
- if (amount > percpu_cntr->lc_max)
- percpu_cntr->lc_max = amount;
- }
- lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags);
-}
-EXPORT_SYMBOL(lprocfs_counter_add);
-
-void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount)
-{
- struct lprocfs_counter *percpu_cntr;
- struct lprocfs_counter_header *header;
- int smp_id;
- unsigned long flags = 0;
-
- if (!stats)
- return;
-
- LASSERTF(0 <= idx && idx < stats->ls_num,
- "idx %d, ls_num %hu\n", idx, stats->ls_num);
-
- /* With per-client stats, statistics are allocated only for
- * single CPU area, so the smp_id should be 0 always.
- */
- smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
- if (smp_id < 0)
- return;
-
- header = &stats->ls_cnt_header[idx];
- percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx);
- if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
- /*
- * Sometimes we use RCU callbacks to free memory which calls
- * lprocfs_counter_sub(), and RCU callbacks may execute in
- * softirq context - right now that's the only case we're in
- * softirq context here, use separate counter for that.
- * bz20650.
- *
- */
- if (in_interrupt() &&
- (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- percpu_cntr->lc_sum_irq -= amount;
- else
- percpu_cntr->lc_sum -= amount;
- }
- lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags);
-}
-EXPORT_SYMBOL(lprocfs_counter_sub);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
deleted file mode 100644
index 2ed350527398..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ /dev/null
@@ -1,1810 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/lprocfs_status.c
- *
- * Author: Hariharan Thantry <thantry@users.sourceforge.net>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <obd_class.h>
-#include <lprocfs_status.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <linux/seq_file.h>
-#include <linux/ctype.h>
-
-static const char * const obd_connect_names[] = {
- "read_only",
- "lov_index",
- "connect_from_mds",
- "write_grant",
- "server_lock",
- "version",
- "request_portal",
- "acl",
- "xattr",
- "create_on_write",
- "truncate_lock",
- "initial_transno",
- "inode_bit_locks",
- "join_file(obsolete)",
- "getattr_by_fid",
- "no_oh_for_devices",
- "remote_client",
- "remote_client_by_force",
- "max_byte_per_rpc",
- "64bit_qdata",
- "mds_capability",
- "oss_capability",
- "early_lock_cancel",
- "som",
- "adaptive_timeouts",
- "lru_resize",
- "mds_mds_connection",
- "real_conn",
- "change_qunit_size",
- "alt_checksum_algorithm",
- "fid_is_enabled",
- "version_recovery",
- "pools",
- "grant_shrink",
- "skip_orphan",
- "large_ea",
- "full20",
- "layout_lock",
- "64bithash",
- "object_max_bytes",
- "imp_recov",
- "jobstats",
- "umask",
- "einprogress",
- "grant_param",
- "flock_owner",
- "lvb_type",
- "nanoseconds_times",
- "lightweight_conn",
- "short_io",
- "pingless",
- "flock_deadlock",
- "disp_stripe",
- "open_by_fid",
- "lfsck",
- "unknown",
- "unlink_close",
- "multi_mod_rpcs",
- "dir_stripe",
- "subtree",
- "lock_ahead",
- "bulk_mbits",
- "compact_obdo",
- "second_flags",
- NULL
-};
-
-int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
-{
- __u64 mask = 1;
- int i, ret = 0;
-
- for (i = 0; obd_connect_names[i]; i++, mask <<= 1) {
- if (flags & mask)
- ret += snprintf(page + ret, count - ret, "%s%s",
- ret ? sep : "", obd_connect_names[i]);
- }
- if (flags & ~(mask - 1))
- ret += snprintf(page + ret, count - ret,
- "%sunknown flags %#llx",
- ret ? sep : "", flags & ~(mask - 1));
- return ret;
-}
-EXPORT_SYMBOL(obd_connect_flags2str);
-
-static void obd_connect_data_seqprint(struct seq_file *m,
- struct obd_connect_data *ocd)
-{
- u64 flags;
-
- LASSERT(ocd);
- flags = ocd->ocd_connect_flags;
-
- seq_printf(m, " connect_data:\n"
- " flags: %llx\n"
- " instance: %u\n",
- ocd->ocd_connect_flags,
- ocd->ocd_instance);
- if (flags & OBD_CONNECT_VERSION)
- seq_printf(m, " target_version: %u.%u.%u.%u\n",
- OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
- OBD_OCD_VERSION_MINOR(ocd->ocd_version),
- OBD_OCD_VERSION_PATCH(ocd->ocd_version),
- OBD_OCD_VERSION_FIX(ocd->ocd_version));
- if (flags & OBD_CONNECT_MDS)
- seq_printf(m, " mdt_index: %d\n", ocd->ocd_group);
- if (flags & OBD_CONNECT_GRANT)
- seq_printf(m, " initial_grant: %d\n", ocd->ocd_grant);
- if (flags & OBD_CONNECT_INDEX)
- seq_printf(m, " target_index: %u\n", ocd->ocd_index);
- if (flags & OBD_CONNECT_BRW_SIZE)
- seq_printf(m, " max_brw_size: %d\n", ocd->ocd_brw_size);
- if (flags & OBD_CONNECT_IBITS)
- seq_printf(m, " ibits_known: %llx\n",
- ocd->ocd_ibits_known);
- if (flags & OBD_CONNECT_GRANT_PARAM)
- seq_printf(m, " grant_block_size: %d\n"
- " grant_inode_size: %d\n"
- " grant_extent_overhead: %d\n",
- ocd->ocd_blocksize,
- ocd->ocd_inodespace,
- ocd->ocd_grant_extent);
- if (flags & OBD_CONNECT_TRANSNO)
- seq_printf(m, " first_transno: %llx\n",
- ocd->ocd_transno);
- if (flags & OBD_CONNECT_CKSUM)
- seq_printf(m, " cksum_types: %#x\n",
- ocd->ocd_cksum_types);
- if (flags & OBD_CONNECT_MAX_EASIZE)
- seq_printf(m, " max_easize: %d\n", ocd->ocd_max_easize);
- if (flags & OBD_CONNECT_MAXBYTES)
- seq_printf(m, " max_object_bytes: %llx\n",
- ocd->ocd_maxbytes);
- if (flags & OBD_CONNECT_MULTIMODRPCS)
- seq_printf(m, " max_mod_rpcs: %hu\n",
- ocd->ocd_maxmodrpcs);
-}
-
-int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
- int mult)
-{
- long decimal_val, frac_val;
- int prtn;
-
- if (count < 10)
- return -EINVAL;
-
- decimal_val = val / mult;
- prtn = snprintf(buffer, count, "%ld", decimal_val);
- frac_val = val % mult;
-
- if (prtn < (count - 4) && frac_val > 0) {
- long temp_frac;
- int i, temp_mult = 1, frac_bits = 0;
-
- temp_frac = frac_val * 10;
- buffer[prtn++] = '.';
- while (frac_bits < 2 && (temp_frac / mult) < 1) {
- /* only reserved 2 bits fraction */
- buffer[prtn++] = '0';
- temp_frac *= 10;
- frac_bits++;
- }
- /*
- * Need to think these cases :
- * 1. #echo x.00 > /sys/xxx output result : x
- * 2. #echo x.0x > /sys/xxx output result : x.0x
- * 3. #echo x.x0 > /sys/xxx output result : x.x
- * 4. #echo x.xx > /sys/xxx output result : x.xx
- * Only reserved 2 bits fraction.
- */
- for (i = 0; i < (5 - prtn); i++)
- temp_mult *= 10;
-
- frac_bits = min((int)count - prtn, 3 - frac_bits);
- prtn += snprintf(buffer + prtn, frac_bits, "%ld",
- frac_val * temp_mult / mult);
-
- prtn--;
- while (buffer[prtn] < '1' || buffer[prtn] > '9') {
- prtn--;
- if (buffer[prtn] == '.') {
- prtn--;
- break;
- }
- }
- prtn++;
- }
- buffer[prtn++] = '\n';
- return prtn;
-}
-EXPORT_SYMBOL(lprocfs_read_frac_helper);
-
-int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count,
- int *val, int mult)
-{
- char kernbuf[20], *end, *pbuf;
-
- if (count > (sizeof(kernbuf) - 1))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
-
- kernbuf[count] = '\0';
- pbuf = kernbuf;
- if (*pbuf == '-') {
- mult = -mult;
- pbuf++;
- }
-
- *val = (int)simple_strtoul(pbuf, &end, 10) * mult;
- if (pbuf == end)
- return -EINVAL;
-
- if (end && *end == '.') {
- int temp_val, pow = 1;
- int i;
-
- pbuf = end + 1;
- if (strlen(pbuf) > 5)
- pbuf[5] = '\0'; /*only allow 5bits fractional*/
-
- temp_val = (int)simple_strtoul(pbuf, &end, 10) * mult;
-
- if (pbuf < end) {
- for (i = 0; i < (end - pbuf); i++)
- pow *= 10;
-
- *val += temp_val / pow;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_write_frac_helper);
-
-static int lprocfs_no_percpu_stats;
-module_param(lprocfs_no_percpu_stats, int, 0644);
-MODULE_PARM_DESC(lprocfs_no_percpu_stats, "Do not alloc percpu data for lprocfs stats");
-
-#define MAX_STRING_SIZE 128
-
-int lprocfs_single_release(struct inode *inode, struct file *file)
-{
- return single_release(inode, file);
-}
-EXPORT_SYMBOL(lprocfs_single_release);
-
-int lprocfs_seq_release(struct inode *inode, struct file *file)
-{
- return seq_release(inode, file);
-}
-EXPORT_SYMBOL(lprocfs_seq_release);
-
-/* lprocfs API calls */
-
-struct dentry *ldebugfs_add_simple(struct dentry *root,
- char *name, void *data,
- const struct file_operations *fops)
-{
- struct dentry *entry;
- umode_t mode = 0;
-
- if (!root || !name || !fops)
- return ERR_PTR(-EINVAL);
-
- if (fops->read)
- mode = 0444;
- if (fops->write)
- mode |= 0200;
- entry = debugfs_create_file(name, mode, root, data, fops);
- if (IS_ERR_OR_NULL(entry)) {
- CERROR("LprocFS: No memory to create <debugfs> entry %s\n", name);
- return entry ?: ERR_PTR(-ENOMEM);
- }
- return entry;
-}
-EXPORT_SYMBOL_GPL(ldebugfs_add_simple);
-
-static const struct file_operations lprocfs_generic_fops = { };
-
-int ldebugfs_add_vars(struct dentry *parent,
- struct lprocfs_vars *list,
- void *data)
-{
- if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(list))
- return -EINVAL;
-
- while (list->name) {
- struct dentry *entry;
- umode_t mode = 0;
-
- if (list->proc_mode != 0000) {
- mode = list->proc_mode;
- } else if (list->fops) {
- if (list->fops->read)
- mode = 0444;
- if (list->fops->write)
- mode |= 0200;
- }
- entry = debugfs_create_file(list->name, mode, parent,
- list->data ?: data,
- list->fops ?: &lprocfs_generic_fops
- );
- if (IS_ERR_OR_NULL(entry))
- return entry ? PTR_ERR(entry) : -ENOMEM;
- list++;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(ldebugfs_add_vars);
-
-void ldebugfs_remove(struct dentry **entryp)
-{
- debugfs_remove_recursive(*entryp);
- *entryp = NULL;
-}
-EXPORT_SYMBOL_GPL(ldebugfs_remove);
-
-struct dentry *ldebugfs_register(const char *name,
- struct dentry *parent,
- struct lprocfs_vars *list, void *data)
-{
- struct dentry *entry;
-
- entry = debugfs_create_dir(name, parent);
- if (IS_ERR_OR_NULL(entry)) {
- entry = entry ?: ERR_PTR(-ENOMEM);
- goto out;
- }
-
- if (!IS_ERR_OR_NULL(list)) {
- int rc;
-
- rc = ldebugfs_add_vars(entry, list, data);
- if (rc != 0) {
- debugfs_remove(entry);
- entry = ERR_PTR(rc);
- }
- }
-out:
- return entry;
-}
-EXPORT_SYMBOL_GPL(ldebugfs_register);
-
-/* Generic callbacks */
-static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%s\n", obd->obd_uuid.uuid);
-}
-LUSTRE_RO_ATTR(uuid);
-
-static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%u\n", osfs.os_bsize);
-
- return rc;
-}
-LUSTRE_RO_ATTR(blocksize);
-
-static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_blocks;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- return sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytestotal);
-
-static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_bfree;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- return sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytesfree);
-
-static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_bavail;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- return sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytesavail);
-
-static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%llu\n", osfs.os_files);
-
- return rc;
-}
-LUSTRE_RO_ATTR(filestotal);
-
-static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%llu\n", osfs.os_ffree);
-
- return rc;
-}
-LUSTRE_RO_ATTR(filesfree);
-
-int lprocfs_rd_server_uuid(struct seq_file *m, void *data)
-{
- struct obd_device *obd = data;
- struct obd_import *imp;
- char *imp_state_name = NULL;
- int rc;
-
- LASSERT(obd);
- rc = lprocfs_climp_check(obd);
- if (rc)
- return rc;
-
- imp = obd->u.cli.cl_import;
- imp_state_name = ptlrpc_import_state_name(imp->imp_state);
- seq_printf(m, "%s\t%s%s\n",
- obd2cli_tgt(obd), imp_state_name,
- imp->imp_deactive ? "\tDEACTIVATED" : "");
-
- up_read(&obd->u.cli.cl_sem);
-
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_server_uuid);
-
-int lprocfs_rd_conn_uuid(struct seq_file *m, void *data)
-{
- struct obd_device *obd = data;
- struct ptlrpc_connection *conn;
- int rc;
-
- LASSERT(obd);
-
- rc = lprocfs_climp_check(obd);
- if (rc)
- return rc;
-
- conn = obd->u.cli.cl_import->imp_connection;
- if (conn && obd->u.cli.cl_import)
- seq_printf(m, "%s\n", conn->c_remote_uuid.uuid);
- else
- seq_puts(m, "<none>\n");
-
- up_read(&obd->u.cli.cl_sem);
-
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_conn_uuid);
-
-/**
- * Lock statistics structure for access, possibly only on this CPU.
- *
- * The statistics struct may be allocated with per-CPU structures for
- * efficient concurrent update (usually only on server-wide stats), or
- * as a single global struct (e.g. for per-client or per-job statistics),
- * so the required locking depends on the type of structure allocated.
- *
- * For per-CPU statistics, pin the thread to the current cpuid so that
- * will only access the statistics for that CPU. If the stats structure
- * for the current CPU has not been allocated (or previously freed),
- * allocate it now. The per-CPU statistics do not need locking since
- * the thread is pinned to the CPU during update.
- *
- * For global statistics, lock the stats structure to prevent concurrent update.
- *
- * \param[in] stats statistics structure to lock
- * \param[in] opc type of operation:
- * LPROCFS_GET_SMP_ID: "lock" and return current CPU index
- * for incrementing statistics for that CPU
- * LPROCFS_GET_NUM_CPU: "lock" and return number of used
- * CPU indices to iterate over all indices
- * \param[out] flags CPU interrupt saved state for IRQ-safe locking
- *
- * \retval cpuid of current thread or number of allocated structs
- * \retval negative on error (only for opc LPROCFS_GET_SMP_ID + per-CPU stats)
- */
-int lprocfs_stats_lock(struct lprocfs_stats *stats,
- enum lprocfs_stats_lock_ops opc,
- unsigned long *flags)
-{
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_lock_irqsave(&stats->ls_lock, *flags);
- else
- spin_lock(&stats->ls_lock);
- return opc == LPROCFS_GET_NUM_CPU ? 1 : 0;
- }
-
- switch (opc) {
- case LPROCFS_GET_SMP_ID: {
- unsigned int cpuid = get_cpu();
-
- if (unlikely(!stats->ls_percpu[cpuid])) {
- int rc = lprocfs_stats_alloc_one(stats, cpuid);
-
- if (rc < 0) {
- put_cpu();
- return rc;
- }
- }
- return cpuid;
- }
- case LPROCFS_GET_NUM_CPU:
- return stats->ls_biggest_alloc_num;
- default:
- LBUG();
- }
-}
-
-/**
- * Unlock statistics structure after access.
- *
- * Unlock the lock acquired via lprocfs_stats_lock() for global statistics,
- * or unpin this thread from the current cpuid for per-CPU statistics.
- *
- * This function must be called using the same arguments as used when calling
- * lprocfs_stats_lock() so that the correct operation can be performed.
- *
- * \param[in] stats statistics structure to unlock
- * \param[in] opc type of operation (current cpuid or number of structs)
- * \param[in] flags CPU interrupt saved state for IRQ-safe locking
- */
-void lprocfs_stats_unlock(struct lprocfs_stats *stats,
- enum lprocfs_stats_lock_ops opc,
- unsigned long *flags)
-{
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_unlock_irqrestore(&stats->ls_lock, *flags);
- else
- spin_unlock(&stats->ls_lock);
- } else if (opc == LPROCFS_GET_SMP_ID) {
- put_cpu();
- }
-}
-
-/** add up per-cpu counters */
-void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
- struct lprocfs_counter *cnt)
-{
- unsigned int num_entry;
- struct lprocfs_counter *percpu_cntr;
- int i;
- unsigned long flags = 0;
-
- memset(cnt, 0, sizeof(*cnt));
-
- if (!stats) {
- /* set count to 1 to avoid divide-by-zero errs in callers */
- cnt->lc_count = 1;
- return;
- }
-
- cnt->lc_min = LC_MIN_INIT;
-
- num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
-
- for (i = 0; i < num_entry; i++) {
- if (!stats->ls_percpu[i])
- continue;
- percpu_cntr = lprocfs_stats_counter_get(stats, i, idx);
-
- cnt->lc_count += percpu_cntr->lc_count;
- cnt->lc_sum += percpu_cntr->lc_sum;
- if (percpu_cntr->lc_min < cnt->lc_min)
- cnt->lc_min = percpu_cntr->lc_min;
- if (percpu_cntr->lc_max > cnt->lc_max)
- cnt->lc_max = percpu_cntr->lc_max;
- cnt->lc_sumsquare += percpu_cntr->lc_sumsquare;
- }
-
- lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
-}
-
-/**
- * Append a space separated list of current set flags to str.
- */
-#define flag2str(flag, first) \
- do { \
- if (imp->imp_##flag) \
- seq_printf(m, "%s" #flag, first ? "" : ", "); \
- } while (0)
-static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m)
-{
- bool first = true;
-
- if (imp->imp_obd->obd_no_recov) {
- seq_puts(m, "no_recov");
- first = false;
- }
-
- flag2str(invalid, first);
- first = false;
- flag2str(deactive, first);
- flag2str(replayable, first);
- flag2str(pingable, first);
- return 0;
-}
-
-#undef flags2str
-
-static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep)
-{
- __u64 mask = 1;
- int i;
- bool first = true;
-
- for (i = 0; obd_connect_names[i]; i++, mask <<= 1) {
- if (flags & mask) {
- seq_printf(m, "%s%s",
- first ? sep : "", obd_connect_names[i]);
- first = false;
- }
- }
- if (flags & ~(mask - 1))
- seq_printf(m, "%sunknown flags %#llx",
- first ? sep : "", flags & ~(mask - 1));
-}
-
-int lprocfs_rd_import(struct seq_file *m, void *data)
-{
- char nidstr[LNET_NIDSTR_SIZE];
- struct lprocfs_counter ret;
- struct lprocfs_counter_header *header;
- struct obd_device *obd = data;
- struct obd_import *imp;
- struct obd_import_conn *conn;
- struct obd_connect_data *ocd;
- int j;
- int k;
- int rw = 0;
- int rc;
-
- LASSERT(obd);
- rc = lprocfs_climp_check(obd);
- if (rc)
- return rc;
-
- imp = obd->u.cli.cl_import;
- ocd = &imp->imp_connect_data;
-
- seq_printf(m, "import:\n"
- " name: %s\n"
- " target: %s\n"
- " state: %s\n"
- " instance: %u\n"
- " connect_flags: [ ",
- obd->obd_name,
- obd2cli_tgt(obd),
- ptlrpc_import_state_name(imp->imp_state),
- imp->imp_connect_data.ocd_instance);
- obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags,
- ", ");
- seq_puts(m, " ]\n");
- obd_connect_data_seqprint(m, ocd);
- seq_puts(m, " import_flags: [ ");
- obd_import_flags2str(imp, m);
-
- seq_puts(m,
- " ]\n"
- " connection:\n"
- " failover_nids: [ ");
- spin_lock(&imp->imp_lock);
- j = 0;
- list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
- libcfs_nid2str_r(conn->oic_conn->c_peer.nid,
- nidstr, sizeof(nidstr));
- seq_printf(m, "%s%s", j ? ", " : "", nidstr);
- j++;
- }
- if (imp->imp_connection)
- libcfs_nid2str_r(imp->imp_connection->c_peer.nid,
- nidstr, sizeof(nidstr));
- else
- strncpy(nidstr, "<none>", sizeof(nidstr));
- seq_printf(m,
- " ]\n"
- " current_connection: %s\n"
- " connection_attempts: %u\n"
- " generation: %u\n"
- " in-progress_invalidations: %u\n",
- nidstr,
- imp->imp_conn_cnt,
- imp->imp_generation,
- atomic_read(&imp->imp_inval_count));
- spin_unlock(&imp->imp_lock);
-
- if (!obd->obd_svc_stats)
- goto out_climp;
-
- header = &obd->obd_svc_stats->ls_cnt_header[PTLRPC_REQWAIT_CNTR];
- lprocfs_stats_collect(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, &ret);
- if (ret.lc_count != 0) {
- /* first argument to do_div MUST be __u64 */
- __u64 sum = ret.lc_sum;
-
- do_div(sum, ret.lc_count);
- ret.lc_sum = sum;
- } else {
- ret.lc_sum = 0;
- }
- seq_printf(m,
- " rpcs:\n"
- " inflight: %u\n"
- " unregistering: %u\n"
- " timeouts: %u\n"
- " avg_waittime: %llu %s\n",
- atomic_read(&imp->imp_inflight),
- atomic_read(&imp->imp_unregistering),
- atomic_read(&imp->imp_timeouts),
- ret.lc_sum, header->lc_units);
-
- k = 0;
- for (j = 0; j < IMP_AT_MAX_PORTALS; j++) {
- if (imp->imp_at.iat_portal[j] == 0)
- break;
- k = max_t(unsigned int, k,
- at_get(&imp->imp_at.iat_service_estimate[j]));
- }
- seq_printf(m,
- " service_estimates:\n"
- " services: %u sec\n"
- " network: %u sec\n",
- k,
- at_get(&imp->imp_at.iat_net_latency));
-
- seq_printf(m,
- " transactions:\n"
- " last_replay: %llu\n"
- " peer_committed: %llu\n"
- " last_checked: %llu\n",
- imp->imp_last_replay_transno,
- imp->imp_peer_committed_transno,
- imp->imp_last_transno_checked);
-
- /* avg data rates */
- for (rw = 0; rw <= 1; rw++) {
- lprocfs_stats_collect(obd->obd_svc_stats,
- PTLRPC_LAST_CNTR + BRW_READ_BYTES + rw,
- &ret);
- if (ret.lc_sum > 0 && ret.lc_count > 0) {
- /* first argument to do_div MUST be __u64 */
- __u64 sum = ret.lc_sum;
-
- do_div(sum, ret.lc_count);
- ret.lc_sum = sum;
- seq_printf(m,
- " %s_data_averages:\n"
- " bytes_per_rpc: %llu\n",
- rw ? "write" : "read",
- ret.lc_sum);
- }
- k = (int)ret.lc_sum;
- j = opcode_offset(OST_READ + rw) + EXTRA_MAX_OPCODES;
- header = &obd->obd_svc_stats->ls_cnt_header[j];
- lprocfs_stats_collect(obd->obd_svc_stats, j, &ret);
- if (ret.lc_sum > 0 && ret.lc_count != 0) {
- /* first argument to do_div MUST be __u64 */
- __u64 sum = ret.lc_sum;
-
- do_div(sum, ret.lc_count);
- ret.lc_sum = sum;
- seq_printf(m,
- " %s_per_rpc: %llu\n",
- header->lc_units, ret.lc_sum);
- j = (int)ret.lc_sum;
- if (j > 0)
- seq_printf(m,
- " MB_per_sec: %u.%.02u\n",
- k / j, (100 * k / j) % 100);
- }
- }
-
-out_climp:
- up_read(&obd->u.cli.cl_sem);
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_import);
-
-int lprocfs_rd_state(struct seq_file *m, void *data)
-{
- struct obd_device *obd = data;
- struct obd_import *imp;
- int j, k, rc;
-
- LASSERT(obd);
- rc = lprocfs_climp_check(obd);
- if (rc)
- return rc;
-
- imp = obd->u.cli.cl_import;
-
- seq_printf(m, "current_state: %s\n",
- ptlrpc_import_state_name(imp->imp_state));
- seq_puts(m, "state_history:\n");
- k = imp->imp_state_hist_idx;
- for (j = 0; j < IMP_STATE_HIST_LEN; j++) {
- struct import_state_hist *ish =
- &imp->imp_state_hist[(k + j) % IMP_STATE_HIST_LEN];
- if (ish->ish_state == 0)
- continue;
- seq_printf(m, " - [ %lld, %s ]\n", (s64)ish->ish_time,
- ptlrpc_import_state_name(ish->ish_state));
- }
-
- up_read(&obd->u.cli.cl_sem);
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_state);
-
-int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at)
-{
- int i;
-
- for (i = 0; i < AT_BINS; i++)
- seq_printf(m, "%3u ", at->at_hist[i]);
- seq_puts(m, "\n");
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_at_hist_helper);
-
-/* See also ptlrpc_lprocfs_rd_timeouts */
-int lprocfs_rd_timeouts(struct seq_file *m, void *data)
-{
- struct obd_device *obd = data;
- struct obd_import *imp;
- unsigned int cur, worst;
- time64_t now, worstt;
- struct dhms ts;
- int i, rc;
-
- LASSERT(obd);
- rc = lprocfs_climp_check(obd);
- if (rc)
- return rc;
-
- imp = obd->u.cli.cl_import;
-
- now = ktime_get_real_seconds();
-
- /* Some network health info for kicks */
- s2dhms(&ts, now - imp->imp_last_reply_time);
- seq_printf(m, "%-10s : %lld, " DHMS_FMT " ago\n",
- "last reply", (s64)imp->imp_last_reply_time, DHMS_VARS(&ts));
-
- cur = at_get(&imp->imp_at.iat_net_latency);
- worst = imp->imp_at.iat_net_latency.at_worst_ever;
- worstt = imp->imp_at.iat_net_latency.at_worst_time;
- s2dhms(&ts, now - worstt);
- seq_printf(m, "%-10s : cur %3u worst %3u (at %lld, " DHMS_FMT " ago) ",
- "network", cur, worst, (s64)worstt, DHMS_VARS(&ts));
- lprocfs_at_hist_helper(m, &imp->imp_at.iat_net_latency);
-
- for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
- if (imp->imp_at.iat_portal[i] == 0)
- break;
- cur = at_get(&imp->imp_at.iat_service_estimate[i]);
- worst = imp->imp_at.iat_service_estimate[i].at_worst_ever;
- worstt = imp->imp_at.iat_service_estimate[i].at_worst_time;
- s2dhms(&ts, now - worstt);
- seq_printf(m, "portal %-2d : cur %3u worst %3u (at %lld, "
- DHMS_FMT " ago) ", imp->imp_at.iat_portal[i],
- cur, worst, (s64)worstt, DHMS_VARS(&ts));
- lprocfs_at_hist_helper(m, &imp->imp_at.iat_service_estimate[i]);
- }
-
- up_read(&obd->u.cli.cl_sem);
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_timeouts);
-
-int lprocfs_rd_connect_flags(struct seq_file *m, void *data)
-{
- struct obd_device *obd = data;
- __u64 flags;
- int rc;
-
- rc = lprocfs_climp_check(obd);
- if (rc)
- return rc;
-
- flags = obd->u.cli.cl_import->imp_connect_data.ocd_connect_flags;
- seq_printf(m, "flags=%#llx\n", flags);
- obd_connect_seq_flags2str(m, flags, "\n");
- seq_puts(m, "\n");
- up_read(&obd->u.cli.cl_sem);
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_connect_flags);
-
-static struct attribute *obd_def_attrs[] = {
- &lustre_attr_blocksize.attr,
- &lustre_attr_kbytestotal.attr,
- &lustre_attr_kbytesfree.attr,
- &lustre_attr_kbytesavail.attr,
- &lustre_attr_filestotal.attr,
- &lustre_attr_filesfree.attr,
- &lustre_attr_uuid.attr,
- NULL,
-};
-
-static void obd_sysfs_release(struct kobject *kobj)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- complete(&obd->obd_kobj_unregister);
-}
-
-static struct kobj_type obd_ktype = {
- .default_attrs = obd_def_attrs,
- .sysfs_ops = &lustre_sysfs_ops,
- .release = obd_sysfs_release,
-};
-
-int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list,
- const struct attribute_group *attrs)
-{
- int rc = 0;
-
- init_completion(&obd->obd_kobj_unregister);
- rc = kobject_init_and_add(&obd->obd_kobj, &obd_ktype,
- obd->obd_type->typ_kobj,
- "%s", obd->obd_name);
- if (rc)
- return rc;
-
- if (attrs) {
- rc = sysfs_create_group(&obd->obd_kobj, attrs);
- if (rc) {
- kobject_put(&obd->obd_kobj);
- return rc;
- }
- }
-
- obd->obd_debugfs_entry = ldebugfs_register(obd->obd_name,
- obd->obd_type->typ_debugfs_entry,
- list, obd);
- if (IS_ERR_OR_NULL(obd->obd_debugfs_entry)) {
- rc = obd->obd_debugfs_entry ? PTR_ERR(obd->obd_debugfs_entry)
- : -ENOMEM;
- CERROR("error %d setting up lprocfs for %s\n",
- rc, obd->obd_name);
- obd->obd_debugfs_entry = NULL;
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(lprocfs_obd_setup);
-
-int lprocfs_obd_cleanup(struct obd_device *obd)
-{
- if (!obd)
- return -EINVAL;
-
- if (!IS_ERR_OR_NULL(obd->obd_debugfs_entry))
- ldebugfs_remove(&obd->obd_debugfs_entry);
-
- kobject_put(&obd->obd_kobj);
- wait_for_completion(&obd->obd_kobj_unregister);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(lprocfs_obd_cleanup);
-
-int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
-{
- struct lprocfs_counter *cntr;
- unsigned int percpusize;
- int rc = -ENOMEM;
- unsigned long flags = 0;
- int i;
-
- LASSERT(!stats->ls_percpu[cpuid]);
- LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0);
-
- percpusize = lprocfs_stats_counter_size(stats);
- stats->ls_percpu[cpuid] = kzalloc(percpusize, GFP_ATOMIC);
- if (stats->ls_percpu[cpuid]) {
- rc = 0;
- if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_lock_irqsave(&stats->ls_lock, flags);
- else
- spin_lock(&stats->ls_lock);
- if (stats->ls_biggest_alloc_num <= cpuid)
- stats->ls_biggest_alloc_num = cpuid + 1;
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_unlock_irqrestore(&stats->ls_lock, flags);
- else
- spin_unlock(&stats->ls_lock);
- }
- /* initialize the ls_percpu[cpuid] non-zero counter */
- for (i = 0; i < stats->ls_num; ++i) {
- cntr = lprocfs_stats_counter_get(stats, cpuid, i);
- cntr->lc_min = LC_MIN_INIT;
- }
- }
- return rc;
-}
-
-struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
- enum lprocfs_stats_flags flags)
-{
- struct lprocfs_stats *stats;
- unsigned int num_entry;
- unsigned int percpusize = 0;
- int i;
-
- if (num == 0)
- return NULL;
-
- if (lprocfs_no_percpu_stats != 0)
- flags |= LPROCFS_STATS_FLAG_NOPERCPU;
-
- if (flags & LPROCFS_STATS_FLAG_NOPERCPU)
- num_entry = 1;
- else
- num_entry = num_possible_cpus();
-
- /* alloc percpu pointers for all possible cpu slots */
- stats = kvzalloc(offsetof(typeof(*stats), ls_percpu[num_entry]),
- GFP_KERNEL);
- if (!stats)
- return NULL;
-
- stats->ls_num = num;
- stats->ls_flags = flags;
- spin_lock_init(&stats->ls_lock);
-
- /* alloc num of counter headers */
- stats->ls_cnt_header = kvmalloc_array(stats->ls_num,
- sizeof(struct lprocfs_counter_header),
- GFP_KERNEL | __GFP_ZERO);
- if (!stats->ls_cnt_header)
- goto fail;
-
- if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) {
- /* contains only one set counters */
- percpusize = lprocfs_stats_counter_size(stats);
- stats->ls_percpu[0] = kzalloc(percpusize, GFP_ATOMIC);
- if (!stats->ls_percpu[0])
- goto fail;
- stats->ls_biggest_alloc_num = 1;
- } else if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) {
- /* alloc all percpu data */
- for (i = 0; i < num_entry; ++i)
- if (lprocfs_stats_alloc_one(stats, i) < 0)
- goto fail;
- }
-
- return stats;
-
-fail:
- lprocfs_free_stats(&stats);
- return NULL;
-}
-EXPORT_SYMBOL(lprocfs_alloc_stats);
-
-void lprocfs_free_stats(struct lprocfs_stats **statsh)
-{
- struct lprocfs_stats *stats = *statsh;
- unsigned int num_entry;
- unsigned int percpusize;
- unsigned int i;
-
- if (!stats || stats->ls_num == 0)
- return;
- *statsh = NULL;
-
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
- num_entry = 1;
- else
- num_entry = num_possible_cpus();
-
- percpusize = lprocfs_stats_counter_size(stats);
- for (i = 0; i < num_entry; i++)
- kfree(stats->ls_percpu[i]);
- kvfree(stats->ls_cnt_header);
- kvfree(stats);
-}
-EXPORT_SYMBOL(lprocfs_free_stats);
-
-__u64 lprocfs_stats_collector(struct lprocfs_stats *stats, int idx,
- enum lprocfs_fields_flags field)
-{
- unsigned int i;
- unsigned int num_cpu;
- unsigned long flags = 0;
- __u64 ret = 0;
-
- LASSERT(stats);
-
- num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
- for (i = 0; i < num_cpu; i++) {
- if (!stats->ls_percpu[i])
- continue;
- ret += lprocfs_read_helper(
- lprocfs_stats_counter_get(stats, i, idx),
- &stats->ls_cnt_header[idx], stats->ls_flags,
- field);
- }
- lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
- return ret;
-}
-EXPORT_SYMBOL(lprocfs_stats_collector);
-
-void lprocfs_clear_stats(struct lprocfs_stats *stats)
-{
- struct lprocfs_counter *percpu_cntr;
- int i;
- int j;
- unsigned int num_entry;
- unsigned long flags = 0;
-
- num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
-
- for (i = 0; i < num_entry; i++) {
- if (!stats->ls_percpu[i])
- continue;
- for (j = 0; j < stats->ls_num; j++) {
- percpu_cntr = lprocfs_stats_counter_get(stats, i, j);
- percpu_cntr->lc_count = 0;
- percpu_cntr->lc_min = LC_MIN_INIT;
- percpu_cntr->lc_max = 0;
- percpu_cntr->lc_sumsquare = 0;
- percpu_cntr->lc_sum = 0;
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- percpu_cntr->lc_sum_irq = 0;
- }
- }
-
- lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
-}
-EXPORT_SYMBOL(lprocfs_clear_stats);
-
-static ssize_t lprocfs_stats_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct lprocfs_stats *stats = seq->private;
-
- lprocfs_clear_stats(stats);
-
- return len;
-}
-
-static void *lprocfs_stats_seq_start(struct seq_file *p, loff_t *pos)
-{
- struct lprocfs_stats *stats = p->private;
-
- return (*pos < stats->ls_num) ? pos : NULL;
-}
-
-static void lprocfs_stats_seq_stop(struct seq_file *p, void *v)
-{
-}
-
-static void *lprocfs_stats_seq_next(struct seq_file *p, void *v, loff_t *pos)
-{
- (*pos)++;
- return lprocfs_stats_seq_start(p, pos);
-}
-
-/* seq file export of one lprocfs counter */
-static int lprocfs_stats_seq_show(struct seq_file *p, void *v)
-{
- struct lprocfs_stats *stats = p->private;
- struct lprocfs_counter_header *hdr;
- struct lprocfs_counter ctr;
- int idx = *(loff_t *)v;
-
- if (idx == 0) {
- struct timespec64 now;
-
- ktime_get_real_ts64(&now);
- seq_printf(p, "%-25s %llu.%9lu secs.usecs\n",
- "snapshot_time",
- (s64)now.tv_sec, (unsigned long)now.tv_nsec);
- }
-
- hdr = &stats->ls_cnt_header[idx];
- lprocfs_stats_collect(stats, idx, &ctr);
-
- if (ctr.lc_count != 0) {
- seq_printf(p, "%-25s %lld samples [%s]",
- hdr->lc_name, ctr.lc_count, hdr->lc_units);
-
- if ((hdr->lc_config & LPROCFS_CNTR_AVGMINMAX) &&
- (ctr.lc_count > 0)) {
- seq_printf(p, " %lld %lld %lld",
- ctr.lc_min, ctr.lc_max, ctr.lc_sum);
- if (hdr->lc_config & LPROCFS_CNTR_STDDEV)
- seq_printf(p, " %lld", ctr.lc_sumsquare);
- }
- seq_putc(p, '\n');
- }
-
- return 0;
-}
-
-static const struct seq_operations lprocfs_stats_seq_sops = {
- .start = lprocfs_stats_seq_start,
- .stop = lprocfs_stats_seq_stop,
- .next = lprocfs_stats_seq_next,
- .show = lprocfs_stats_seq_show,
-};
-
-static int lprocfs_stats_seq_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int rc;
-
- rc = seq_open(file, &lprocfs_stats_seq_sops);
- if (rc)
- return rc;
-
- seq = file->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-static const struct file_operations lprocfs_stats_seq_fops = {
- .owner = THIS_MODULE,
- .open = lprocfs_stats_seq_open,
- .read = seq_read,
- .write = lprocfs_stats_seq_write,
- .llseek = seq_lseek,
- .release = lprocfs_seq_release,
-};
-
-int ldebugfs_register_stats(struct dentry *parent, const char *name,
- struct lprocfs_stats *stats)
-{
- struct dentry *entry;
-
- LASSERT(!IS_ERR_OR_NULL(parent));
-
- entry = debugfs_create_file(name, 0644, parent, stats,
- &lprocfs_stats_seq_fops);
- if (IS_ERR_OR_NULL(entry))
- return entry ? PTR_ERR(entry) : -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ldebugfs_register_stats);
-
-void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
- unsigned int conf, const char *name,
- const char *units)
-{
- struct lprocfs_counter_header *header;
- struct lprocfs_counter *percpu_cntr;
- unsigned long flags = 0;
- unsigned int i;
- unsigned int num_cpu;
-
- header = &stats->ls_cnt_header[index];
- LASSERTF(header, "Failed to allocate stats header:[%d]%s/%s\n",
- index, name, units);
-
- header->lc_config = conf;
- header->lc_name = name;
- header->lc_units = units;
-
- num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
- for (i = 0; i < num_cpu; ++i) {
- if (!stats->ls_percpu[i])
- continue;
- percpu_cntr = lprocfs_stats_counter_get(stats, i, index);
- percpu_cntr->lc_count = 0;
- percpu_cntr->lc_min = LC_MIN_INIT;
- percpu_cntr->lc_max = 0;
- percpu_cntr->lc_sumsquare = 0;
- percpu_cntr->lc_sum = 0;
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- percpu_cntr->lc_sum_irq = 0;
- }
- lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
-}
-EXPORT_SYMBOL(lprocfs_counter_init);
-
-int lprocfs_exp_cleanup(struct obd_export *exp)
-{
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_exp_cleanup);
-
-__s64 lprocfs_read_helper(struct lprocfs_counter *lc,
- struct lprocfs_counter_header *header,
- enum lprocfs_stats_flags flags,
- enum lprocfs_fields_flags field)
-{
- __s64 ret = 0;
-
- if (!lc || !header)
- return 0;
-
- switch (field) {
- case LPROCFS_FIELDS_FLAGS_CONFIG:
- ret = header->lc_config;
- break;
- case LPROCFS_FIELDS_FLAGS_SUM:
- ret = lc->lc_sum;
- if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- ret += lc->lc_sum_irq;
- break;
- case LPROCFS_FIELDS_FLAGS_MIN:
- ret = lc->lc_min;
- break;
- case LPROCFS_FIELDS_FLAGS_MAX:
- ret = lc->lc_max;
- break;
- case LPROCFS_FIELDS_FLAGS_AVG:
- ret = (lc->lc_max - lc->lc_min) / 2;
- break;
- case LPROCFS_FIELDS_FLAGS_SUMSQUARE:
- ret = lc->lc_sumsquare;
- break;
- case LPROCFS_FIELDS_FLAGS_COUNT:
- ret = lc->lc_count;
- break;
- default:
- break;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_read_helper);
-
-int lprocfs_write_helper(const char __user *buffer, unsigned long count,
- int *val)
-{
- return lprocfs_write_frac_helper(buffer, count, val, 1);
-}
-EXPORT_SYMBOL(lprocfs_write_helper);
-
-int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count,
- __u64 *val)
-{
- return lprocfs_write_frac_u64_helper(buffer, count, val, 1);
-}
-EXPORT_SYMBOL(lprocfs_write_u64_helper);
-
-int lprocfs_write_frac_u64_helper(const char __user *buffer,
- unsigned long count, __u64 *val, int mult)
-{
- char kernbuf[22], *end, *pbuf;
- __u64 whole, frac = 0, units;
- unsigned int frac_d = 1;
- int sign = 1;
-
- if (count > (sizeof(kernbuf) - 1))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
-
- kernbuf[count] = '\0';
- pbuf = kernbuf;
- if (*pbuf == '-') {
- sign = -1;
- pbuf++;
- }
-
- whole = simple_strtoull(pbuf, &end, 10);
- if (pbuf == end)
- return -EINVAL;
-
- if (*end == '.') {
- int i;
-
- pbuf = end + 1;
-
- /* need to limit frac_d to a __u32 */
- if (strlen(pbuf) > 10)
- pbuf[10] = '\0';
-
- frac = simple_strtoull(pbuf, &end, 10);
- /* count decimal places */
- for (i = 0; i < (end - pbuf); i++)
- frac_d *= 10;
- }
-
- units = 1;
- if (end) {
- switch (tolower(*end)) {
- case 'p':
- units <<= 10;
- /* fall through */
- case 't':
- units <<= 10;
- /* fall through */
- case 'g':
- units <<= 10;
- /* fall through */
- case 'm':
- units <<= 10;
- /* fall through */
- case 'k':
- units <<= 10;
- }
- }
- /* Specified units override the multiplier */
- if (units > 1)
- mult = units;
-
- frac *= mult;
- do_div(frac, frac_d);
- *val = sign * (whole * mult + frac);
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_write_frac_u64_helper);
-
-static char *lprocfs_strnstr(const char *s1, const char *s2, size_t len)
-{
- size_t l2;
-
- l2 = strlen(s2);
- if (!l2)
- return (char *)s1;
- while (len >= l2) {
- len--;
- if (!memcmp(s1, s2, l2))
- return (char *)s1;
- s1++;
- }
- return NULL;
-}
-
-/**
- * Find the string \a name in the input \a buffer, and return a pointer to the
- * value immediately following \a name, reducing \a count appropriately.
- * If \a name is not found the original \a buffer is returned.
- */
-char *lprocfs_find_named_value(const char *buffer, const char *name,
- size_t *count)
-{
- char *val;
- size_t buflen = *count;
-
- /* there is no strnstr() in rhel5 and ubuntu kernels */
- val = lprocfs_strnstr(buffer, name, buflen);
- if (!val)
- return (char *)buffer;
-
- val += strlen(name); /* skip prefix */
- while (val < buffer + buflen && isspace(*val)) /* skip separator */
- val++;
-
- *count = 0;
- while (val < buffer + buflen && isalnum(*val)) {
- ++*count;
- ++val;
- }
-
- return val - *count;
-}
-EXPORT_SYMBOL(lprocfs_find_named_value);
-
-int ldebugfs_seq_create(struct dentry *parent, const char *name,
- umode_t mode, const struct file_operations *seq_fops,
- void *data)
-{
- struct dentry *entry;
-
- /* Disallow secretly (un)writable entries. */
- LASSERT((!seq_fops->write) == ((mode & 0222) == 0));
-
- entry = debugfs_create_file(name, mode, parent, data, seq_fops);
- if (IS_ERR_OR_NULL(entry))
- return entry ? PTR_ERR(entry) : -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ldebugfs_seq_create);
-
-int ldebugfs_obd_seq_create(struct obd_device *dev,
- const char *name,
- umode_t mode,
- const struct file_operations *seq_fops,
- void *data)
-{
- return ldebugfs_seq_create(dev->obd_debugfs_entry, name,
- mode, seq_fops, data);
-}
-EXPORT_SYMBOL_GPL(ldebugfs_obd_seq_create);
-
-void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value)
-{
- if (value >= OBD_HIST_MAX)
- value = OBD_HIST_MAX - 1;
-
- spin_lock(&oh->oh_lock);
- oh->oh_buckets[value]++;
- spin_unlock(&oh->oh_lock);
-}
-EXPORT_SYMBOL(lprocfs_oh_tally);
-
-void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value)
-{
- unsigned int val = 0;
-
- if (likely(value != 0))
- val = min(fls(value - 1), OBD_HIST_MAX);
-
- lprocfs_oh_tally(oh, val);
-}
-EXPORT_SYMBOL(lprocfs_oh_tally_log2);
-
-unsigned long lprocfs_oh_sum(struct obd_histogram *oh)
-{
- unsigned long ret = 0;
- int i;
-
- for (i = 0; i < OBD_HIST_MAX; i++)
- ret += oh->oh_buckets[i];
- return ret;
-}
-EXPORT_SYMBOL(lprocfs_oh_sum);
-
-void lprocfs_oh_clear(struct obd_histogram *oh)
-{
- spin_lock(&oh->oh_lock);
- memset(oh->oh_buckets, 0, sizeof(oh->oh_buckets));
- spin_unlock(&oh->oh_lock);
-}
-EXPORT_SYMBOL(lprocfs_oh_clear);
-
-int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count,
- struct root_squash_info *squash, char *name)
-{
- char kernbuf[64], *tmp, *errmsg;
- unsigned long uid, gid;
- int rc;
-
- if (count >= sizeof(kernbuf)) {
- errmsg = "string too long";
- rc = -EINVAL;
- goto failed_noprint;
- }
- if (copy_from_user(kernbuf, buffer, count)) {
- errmsg = "bad address";
- rc = -EFAULT;
- goto failed_noprint;
- }
- kernbuf[count] = '\0';
-
- /* look for uid gid separator */
- tmp = strchr(kernbuf, ':');
- if (!tmp) {
- errmsg = "needs uid:gid format";
- rc = -EINVAL;
- goto failed;
- }
- *tmp = '\0';
- tmp++;
-
- /* parse uid */
- if (kstrtoul(kernbuf, 0, &uid) != 0) {
- errmsg = "bad uid";
- rc = -EINVAL;
- goto failed;
- }
- /* parse gid */
- if (kstrtoul(tmp, 0, &gid) != 0) {
- errmsg = "bad gid";
- rc = -EINVAL;
- goto failed;
- }
-
- squash->rsi_uid = uid;
- squash->rsi_gid = gid;
-
- LCONSOLE_INFO("%s: root_squash is set to %u:%u\n",
- name, squash->rsi_uid, squash->rsi_gid);
- return count;
-
-failed:
- if (tmp) {
- tmp--;
- *tmp = ':';
- }
- CWARN("%s: failed to set root_squash to \"%s\", %s, rc = %d\n",
- name, kernbuf, errmsg, rc);
- return rc;
-failed_noprint:
- CWARN("%s: failed to set root_squash due to %s, rc = %d\n",
- name, errmsg, rc);
- return rc;
-}
-EXPORT_SYMBOL(lprocfs_wr_root_squash);
-
-int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count,
- struct root_squash_info *squash, char *name)
-{
- char *kernbuf = NULL, *errmsg;
- struct list_head tmp;
- int len = count;
- int rc;
-
- if (count > 4096) {
- errmsg = "string too long";
- rc = -EINVAL;
- goto failed;
- }
-
- kernbuf = kzalloc(count + 1, GFP_NOFS);
- if (!kernbuf) {
- errmsg = "no memory";
- rc = -ENOMEM;
- goto failed;
- }
-
- if (copy_from_user(kernbuf, buffer, count)) {
- errmsg = "bad address";
- rc = -EFAULT;
- goto failed;
- }
- kernbuf[count] = '\0';
-
- if (count > 0 && kernbuf[count - 1] == '\n')
- len = count - 1;
-
- if ((len == 4 && !strncmp(kernbuf, "NONE", len)) ||
- (len == 5 && !strncmp(kernbuf, "clear", len))) {
- /* empty string is special case */
- down_write(&squash->rsi_sem);
- if (!list_empty(&squash->rsi_nosquash_nids))
- cfs_free_nidlist(&squash->rsi_nosquash_nids);
- up_write(&squash->rsi_sem);
- LCONSOLE_INFO("%s: nosquash_nids is cleared\n", name);
- kfree(kernbuf);
- return count;
- }
-
- INIT_LIST_HEAD(&tmp);
- if (cfs_parse_nidlist(kernbuf, count, &tmp) <= 0) {
- errmsg = "can't parse";
- rc = -EINVAL;
- goto failed;
- }
- LCONSOLE_INFO("%s: nosquash_nids set to %s\n",
- name, kernbuf);
- kfree(kernbuf);
- kernbuf = NULL;
-
- down_write(&squash->rsi_sem);
- if (!list_empty(&squash->rsi_nosquash_nids))
- cfs_free_nidlist(&squash->rsi_nosquash_nids);
- list_splice(&tmp, &squash->rsi_nosquash_nids);
- up_write(&squash->rsi_sem);
-
- return count;
-
-failed:
- if (kernbuf) {
- CWARN("%s: failed to set nosquash_nids to \"%s\", %s rc = %d\n",
- name, kernbuf, errmsg, rc);
- kfree(kernbuf);
- kernbuf = NULL;
- } else {
- CWARN("%s: failed to set nosquash_nids due to %s rc = %d\n",
- name, errmsg, rc);
- }
- return rc;
-}
-EXPORT_SYMBOL(lprocfs_wr_nosquash_nids);
-
-static ssize_t lustre_attr_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct lustre_attr *a = container_of(attr, struct lustre_attr, attr);
-
- return a->show ? a->show(kobj, attr, buf) : 0;
-}
-
-static ssize_t lustre_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len)
-{
- struct lustre_attr *a = container_of(attr, struct lustre_attr, attr);
-
- return a->store ? a->store(kobj, attr, buf, len) : len;
-}
-
-const struct sysfs_ops lustre_sysfs_ops = {
- .show = lustre_attr_show,
- .store = lustre_attr_store,
-};
-EXPORT_SYMBOL_GPL(lustre_sysfs_ops);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
deleted file mode 100644
index 3ae16e8501c2..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ /dev/null
@@ -1,2058 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/lu_object.c
- *
- * Lustre Object.
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/libcfs/libcfs.h>
-
-#include <linux/module.h>
-
-/* hash_long() */
-#include <linux/libcfs/libcfs_hash.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lustre_disk.h>
-#include <lustre_fid.h>
-#include <lu_object.h>
-#include <cl_object.h>
-#include <lu_ref.h>
-#include <linux/list.h>
-
-enum {
- LU_CACHE_PERCENT_MAX = 50,
- LU_CACHE_PERCENT_DEFAULT = 20
-};
-
-#define LU_CACHE_NR_MAX_ADJUST 512
-#define LU_CACHE_NR_UNLIMITED -1
-#define LU_CACHE_NR_DEFAULT LU_CACHE_NR_UNLIMITED
-#define LU_CACHE_NR_LDISKFS_LIMIT LU_CACHE_NR_UNLIMITED
-#define LU_CACHE_NR_ZFS_LIMIT 256
-
-#define LU_SITE_BITS_MIN 12
-#define LU_SITE_BITS_MAX 24
-#define LU_SITE_BITS_MAX_CL 19
-/**
- * total 256 buckets, we don't want too many buckets because:
- * - consume too much memory
- * - avoid unbalanced LRU list
- */
-#define LU_SITE_BKT_BITS 8
-
-static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
-module_param(lu_cache_percent, int, 0644);
-MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache");
-
-static long lu_cache_nr = LU_CACHE_NR_DEFAULT;
-module_param(lu_cache_nr, long, 0644);
-MODULE_PARM_DESC(lu_cache_nr, "Maximum number of objects in lu_object cache");
-
-static void lu_object_free(const struct lu_env *env, struct lu_object *o);
-static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx);
-
-/**
- * Decrease reference counter on object. If last reference is freed, return
- * object to the cache, unless lu_object_is_dying(o) holds. In the latter
- * case, free object immediately.
- */
-void lu_object_put(const struct lu_env *env, struct lu_object *o)
-{
- struct lu_site_bkt_data *bkt;
- struct lu_object_header *top;
- struct lu_site *site;
- struct lu_object *orig;
- struct cfs_hash_bd bd;
- const struct lu_fid *fid;
-
- top = o->lo_header;
- site = o->lo_dev->ld_site;
- orig = o;
-
- /*
- * till we have full fids-on-OST implemented anonymous objects
- * are possible in OSP. such an object isn't listed in the site
- * so we should not remove it from the site.
- */
- fid = lu_object_fid(o);
- if (fid_is_zero(fid)) {
- LASSERT(!top->loh_hash.next && !top->loh_hash.pprev);
- LASSERT(list_empty(&top->loh_lru));
- if (!atomic_dec_and_test(&top->loh_ref))
- return;
- list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
- if (o->lo_ops->loo_object_release)
- o->lo_ops->loo_object_release(env, o);
- }
- lu_object_free(env, orig);
- return;
- }
-
- cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
- bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
-
- if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
- if (lu_object_is_dying(top)) {
- /*
- * somebody may be waiting for this, currently only
- * used for cl_object, see cl_object_put_last().
- */
- wake_up_all(&bkt->lsb_marche_funebre);
- }
- return;
- }
-
- /*
- * When last reference is released, iterate over object
- * layers, and notify them that object is no longer busy.
- */
- list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
- if (o->lo_ops->loo_object_release)
- o->lo_ops->loo_object_release(env, o);
- }
-
- if (!lu_object_is_dying(top)) {
- LASSERT(list_empty(&top->loh_lru));
- list_add_tail(&top->loh_lru, &bkt->lsb_lru);
- bkt->lsb_lru_len++;
- percpu_counter_inc(&site->ls_lru_len_counter);
- CDEBUG(D_INODE, "Add %p to site lru. hash: %p, bkt: %p, lru_len: %ld\n",
- o, site->ls_obj_hash, bkt, bkt->lsb_lru_len);
- cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
- return;
- }
-
- /*
- * If object is dying (will not be cached), then removed it
- * from hash table and LRU.
- *
- * This is done with hash table and LRU lists locked. As the only
- * way to acquire first reference to previously unreferenced
- * object is through hash-table lookup (lu_object_find()),
- * or LRU scanning (lu_site_purge()), that are done under hash-table
- * and LRU lock, no race with concurrent object lookup is possible
- * and we can safely destroy object below.
- */
- if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
- cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
- cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
- /*
- * Object was already removed from hash and lru above, can
- * kill it.
- */
- lu_object_free(env, orig);
-}
-EXPORT_SYMBOL(lu_object_put);
-
-/**
- * Kill the object and take it out of LRU cache.
- * Currently used by client code for layout change.
- */
-void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
-{
- struct lu_object_header *top;
-
- top = o->lo_header;
- set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
- if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
- struct lu_site *site = o->lo_dev->ld_site;
- struct cfs_hash *obj_hash = site->ls_obj_hash;
- struct cfs_hash_bd bd;
-
- cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
- if (!list_empty(&top->loh_lru)) {
- struct lu_site_bkt_data *bkt;
-
- list_del_init(&top->loh_lru);
- bkt = cfs_hash_bd_extra_get(obj_hash, &bd);
- bkt->lsb_lru_len--;
- percpu_counter_dec(&site->ls_lru_len_counter);
- }
- cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
- cfs_hash_bd_unlock(obj_hash, &bd, 1);
- }
-}
-EXPORT_SYMBOL(lu_object_unhash);
-
-/**
- * Allocate new object.
- *
- * This follows object creation protocol, described in the comment within
- * struct lu_device_operations definition.
- */
-static struct lu_object *lu_object_alloc(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- struct lu_object *scan;
- struct lu_object *top;
- struct list_head *layers;
- unsigned int init_mask = 0;
- unsigned int init_flag;
- int clean;
- int result;
-
- /*
- * Create top-level object slice. This will also create
- * lu_object_header.
- */
- top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
- if (!top)
- return ERR_PTR(-ENOMEM);
- if (IS_ERR(top))
- return top;
- /*
- * This is the only place where object fid is assigned. It's constant
- * after this point.
- */
- top->lo_header->loh_fid = *f;
- layers = &top->lo_header->loh_layers;
-
- do {
- /*
- * Call ->loo_object_init() repeatedly, until no more new
- * object slices are created.
- */
- clean = 1;
- init_flag = 1;
- list_for_each_entry(scan, layers, lo_linkage) {
- if (init_mask & init_flag)
- goto next;
- clean = 0;
- scan->lo_header = top->lo_header;
- result = scan->lo_ops->loo_object_init(env, scan, conf);
- if (result != 0) {
- lu_object_free(env, top);
- return ERR_PTR(result);
- }
- init_mask |= init_flag;
-next:
- init_flag <<= 1;
- }
- } while (!clean);
-
- list_for_each_entry_reverse(scan, layers, lo_linkage) {
- if (scan->lo_ops->loo_object_start) {
- result = scan->lo_ops->loo_object_start(env, scan);
- if (result != 0) {
- lu_object_free(env, top);
- return ERR_PTR(result);
- }
- }
- }
-
- lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
- return top;
-}
-
-/**
- * Free an object.
- */
-static void lu_object_free(const struct lu_env *env, struct lu_object *o)
-{
- struct lu_site_bkt_data *bkt;
- struct lu_site *site;
- struct lu_object *scan;
- struct list_head *layers;
- struct list_head splice;
-
- site = o->lo_dev->ld_site;
- layers = &o->lo_header->loh_layers;
- bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
- /*
- * First call ->loo_object_delete() method to release all resources.
- */
- list_for_each_entry_reverse(scan, layers, lo_linkage) {
- if (scan->lo_ops->loo_object_delete)
- scan->lo_ops->loo_object_delete(env, scan);
- }
-
- /*
- * Then, splice object layers into stand-alone list, and call
- * ->loo_object_free() on all layers to free memory. Splice is
- * necessary, because lu_object_header is freed together with the
- * top-level slice.
- */
- INIT_LIST_HEAD(&splice);
- list_splice_init(layers, &splice);
- while (!list_empty(&splice)) {
- /*
- * Free layers in bottom-to-top order, so that object header
- * lives as long as possible and ->loo_object_free() methods
- * can look at its contents.
- */
- o = container_of0(splice.prev, struct lu_object, lo_linkage);
- list_del_init(&o->lo_linkage);
- o->lo_ops->loo_object_free(env, o);
- }
-
- if (waitqueue_active(&bkt->lsb_marche_funebre))
- wake_up_all(&bkt->lsb_marche_funebre);
-}
-
-/**
- * Free \a nr objects from the cold end of the site LRU list.
- * if canblock is false, then don't block awaiting for another
- * instance of lu_site_purge() to complete
- */
-int lu_site_purge_objects(const struct lu_env *env, struct lu_site *s,
- int nr, bool canblock)
-{
- struct lu_object_header *h;
- struct lu_object_header *temp;
- struct lu_site_bkt_data *bkt;
- struct cfs_hash_bd bd;
- struct cfs_hash_bd bd2;
- struct list_head dispose;
- int did_sth;
- unsigned int start = 0;
- int count;
- int bnr;
- unsigned int i;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
- return 0;
-
- INIT_LIST_HEAD(&dispose);
- /*
- * Under LRU list lock, scan LRU list and move unreferenced objects to
- * the dispose list, removing them from LRU and hash table.
- */
- if (nr != ~0)
- start = s->ls_purge_start;
- bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1;
- again:
- /*
- * It doesn't make any sense to make purge threads parallel, that can
- * only bring troubles to us. See LU-5331.
- */
- if (canblock)
- mutex_lock(&s->ls_purge_mutex);
- else if (!mutex_trylock(&s->ls_purge_mutex))
- goto out;
-
- did_sth = 0;
- cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
- if (i < start)
- continue;
- count = bnr;
- cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
- bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
-
- list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
- LASSERT(atomic_read(&h->loh_ref) == 0);
-
- cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
- LASSERT(bd.bd_bucket == bd2.bd_bucket);
-
- cfs_hash_bd_del_locked(s->ls_obj_hash,
- &bd2, &h->loh_hash);
- list_move(&h->loh_lru, &dispose);
- bkt->lsb_lru_len--;
- percpu_counter_dec(&s->ls_lru_len_counter);
- if (did_sth == 0)
- did_sth = 1;
-
- if (nr != ~0 && --nr == 0)
- break;
-
- if (count > 0 && --count == 0)
- break;
- }
- cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
- cond_resched();
- /*
- * Free everything on the dispose list. This is safe against
- * races due to the reasons described in lu_object_put().
- */
- while (!list_empty(&dispose)) {
- h = container_of0(dispose.next,
- struct lu_object_header, loh_lru);
- list_del_init(&h->loh_lru);
- lu_object_free(env, lu_object_top(h));
- lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
- }
-
- if (nr == 0)
- break;
- }
- mutex_unlock(&s->ls_purge_mutex);
-
- if (nr != 0 && did_sth && start != 0) {
- start = 0; /* restart from the first bucket */
- goto again;
- }
- /* race on s->ls_purge_start, but nobody cares */
- s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
-out:
- return nr;
-}
-EXPORT_SYMBOL(lu_site_purge_objects);
-
-/*
- * Object printing.
- *
- * Code below has to jump through certain loops to output object description
- * into libcfs_debug_msg-based log. The problem is that lu_object_print()
- * composes object description from strings that are parts of _lines_ of
- * output (i.e., strings that are not terminated by newline). This doesn't fit
- * very well into libcfs_debug_msg() interface that assumes that each message
- * supplied to it is a self-contained output line.
- *
- * To work around this, strings are collected in a temporary buffer
- * (implemented as a value of lu_cdebug_key key), until terminating newline
- * character is detected.
- *
- */
-
-enum {
- /**
- * Maximal line size.
- *
- * XXX overflow is not handled correctly.
- */
- LU_CDEBUG_LINE = 512
-};
-
-struct lu_cdebug_data {
- /**
- * Temporary buffer.
- */
- char lck_area[LU_CDEBUG_LINE];
-};
-
-/* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
-LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
-
-/**
- * Key, holding temporary buffer. This key is registered very early by
- * lu_global_init().
- */
-static struct lu_context_key lu_global_key = {
- .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
- LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL,
- .lct_init = lu_global_key_init,
- .lct_fini = lu_global_key_fini
-};
-
-/**
- * Printer function emitting messages through libcfs_debug_msg().
- */
-int lu_cdebug_printer(const struct lu_env *env,
- void *cookie, const char *format, ...)
-{
- struct libcfs_debug_msg_data *msgdata = cookie;
- struct lu_cdebug_data *key;
- int used;
- int complete;
- va_list args;
-
- va_start(args, format);
-
- key = lu_context_key_get(&env->le_ctx, &lu_global_key);
-
- used = strlen(key->lck_area);
- complete = format[strlen(format) - 1] == '\n';
- /*
- * Append new chunk to the buffer.
- */
- vsnprintf(key->lck_area + used,
- ARRAY_SIZE(key->lck_area) - used, format, args);
- if (complete) {
- if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
- libcfs_debug_msg(msgdata, "%s\n", key->lck_area);
- key->lck_area[0] = 0;
- }
- va_end(args);
- return 0;
-}
-EXPORT_SYMBOL(lu_cdebug_printer);
-
-/**
- * Print object header.
- */
-void lu_object_header_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct lu_object_header *hdr)
-{
- (*printer)(env, cookie, "header@%p[%#lx, %d, " DFID "%s%s%s]",
- hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
- PFID(&hdr->loh_fid),
- hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
- list_empty((struct list_head *)&hdr->loh_lru) ? \
- "" : " lru",
- hdr->loh_attr & LOHA_EXISTS ? " exist":"");
-}
-EXPORT_SYMBOL(lu_object_header_print);
-
-/**
- * Print human readable representation of the \a o to the \a printer.
- */
-void lu_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct lu_object *o)
-{
- static const char ruler[] = "........................................";
- struct lu_object_header *top;
- int depth = 4;
-
- top = o->lo_header;
- lu_object_header_print(env, cookie, printer, top);
- (*printer)(env, cookie, "{\n");
-
- list_for_each_entry(o, &top->loh_layers, lo_linkage) {
- /*
- * print `.' \a depth times followed by type name and address
- */
- (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
- o->lo_dev->ld_type->ldt_name, o);
-
- if (o->lo_ops->loo_object_print)
- (*o->lo_ops->loo_object_print)(env, cookie, printer, o);
-
- (*printer)(env, cookie, "\n");
- }
-
- (*printer)(env, cookie, "} header@%p\n", top);
-}
-EXPORT_SYMBOL(lu_object_print);
-
-static struct lu_object *htable_lookup(struct lu_site *s,
- struct cfs_hash_bd *bd,
- const struct lu_fid *f,
- wait_queue_entry_t *waiter,
- __u64 *version)
-{
- struct lu_site_bkt_data *bkt;
- struct lu_object_header *h;
- struct hlist_node *hnode;
- __u64 ver = cfs_hash_bd_version_get(bd);
-
- if (*version == ver)
- return ERR_PTR(-ENOENT);
-
- *version = ver;
- bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
- /* cfs_hash_bd_peek_locked is a somehow "internal" function
- * of cfs_hash, it doesn't add refcount on object.
- */
- hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
- if (!hnode) {
- lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
- return ERR_PTR(-ENOENT);
- }
-
- h = container_of0(hnode, struct lu_object_header, loh_hash);
- if (likely(!lu_object_is_dying(h))) {
- cfs_hash_get(s->ls_obj_hash, hnode);
- lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
- if (!list_empty(&h->loh_lru)) {
- list_del_init(&h->loh_lru);
- bkt->lsb_lru_len--;
- percpu_counter_dec(&s->ls_lru_len_counter);
- }
- return lu_object_top(h);
- }
-
- /*
- * Lookup found an object being destroyed this object cannot be
- * returned (to assure that references to dying objects are eventually
- * drained), and moreover, lookup has to wait until object is freed.
- */
-
- init_waitqueue_entry(waiter, current);
- add_wait_queue(&bkt->lsb_marche_funebre, waiter);
- set_current_state(TASK_UNINTERRUPTIBLE);
- lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
- return ERR_PTR(-EAGAIN);
-}
-
-/**
- * Search cache for an object with the fid \a f. If such object is found,
- * return it. Otherwise, create new object, insert it into cache and return
- * it. In any case, additional reference is acquired on the returned object.
- */
-static struct lu_object *lu_object_find(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
-}
-
-/*
- * Limit the lu_object cache to a maximum of lu_cache_nr objects. Because
- * the calculation for the number of objects to reclaim is not covered by
- * a lock the maximum number of objects is capped by LU_CACHE_MAX_ADJUST.
- * This ensures that many concurrent threads will not accidentally purge
- * the entire cache.
- */
-static void lu_object_limit(const struct lu_env *env, struct lu_device *dev)
-{
- __u64 size, nr;
-
- if (lu_cache_nr == LU_CACHE_NR_UNLIMITED)
- return;
-
- size = cfs_hash_size_get(dev->ld_site->ls_obj_hash);
- nr = (__u64)lu_cache_nr;
- if (size <= nr)
- return;
-
- lu_site_purge_objects(env, dev->ld_site,
- min_t(__u64, size - nr, LU_CACHE_NR_MAX_ADJUST),
- false);
-}
-
-static struct lu_object *lu_object_new(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- struct lu_object *o;
- struct cfs_hash *hs;
- struct cfs_hash_bd bd;
-
- o = lu_object_alloc(env, dev, f, conf);
- if (IS_ERR(o))
- return o;
-
- hs = dev->ld_site->ls_obj_hash;
- cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
- cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
- cfs_hash_bd_unlock(hs, &bd, 1);
-
- lu_object_limit(env, dev);
-
- return o;
-}
-
-/**
- * Core logic of lu_object_find*() functions.
- */
-static struct lu_object *lu_object_find_try(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf,
- wait_queue_entry_t *waiter)
-{
- struct lu_object *o;
- struct lu_object *shadow;
- struct lu_site *s;
- struct cfs_hash *hs;
- struct cfs_hash_bd bd;
- __u64 version = 0;
-
- /*
- * This uses standard index maintenance protocol:
- *
- * - search index under lock, and return object if found;
- * - otherwise, unlock index, allocate new object;
- * - lock index and search again;
- * - if nothing is found (usual case), insert newly created
- * object into index;
- * - otherwise (race: other thread inserted object), free
- * object just allocated.
- * - unlock index;
- * - return object.
- *
- * For "LOC_F_NEW" case, we are sure the object is new established.
- * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
- * just alloc and insert directly.
- *
- * If dying object is found during index search, add @waiter to the
- * site wait-queue and return ERR_PTR(-EAGAIN).
- */
- if (conf && conf->loc_flags & LOC_F_NEW)
- return lu_object_new(env, dev, f, conf);
-
- s = dev->ld_site;
- hs = s->ls_obj_hash;
- cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
- o = htable_lookup(s, &bd, f, waiter, &version);
- cfs_hash_bd_unlock(hs, &bd, 1);
- if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT)
- return o;
-
- /*
- * Allocate new object. This may result in rather complicated
- * operations, including fld queries, inode loading, etc.
- */
- o = lu_object_alloc(env, dev, f, conf);
- if (IS_ERR(o))
- return o;
-
- LASSERT(lu_fid_eq(lu_object_fid(o), f));
-
- cfs_hash_bd_lock(hs, &bd, 1);
-
- shadow = htable_lookup(s, &bd, f, waiter, &version);
- if (likely(PTR_ERR(shadow) == -ENOENT)) {
- cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
- cfs_hash_bd_unlock(hs, &bd, 1);
-
- lu_object_limit(env, dev);
-
- return o;
- }
-
- lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
- cfs_hash_bd_unlock(hs, &bd, 1);
- lu_object_free(env, o);
- return shadow;
-}
-
-/**
- * Much like lu_object_find(), but top level device of object is specifically
- * \a dev rather than top level device of the site. This interface allows
- * objects of different "stacking" to be created within the same site.
- */
-struct lu_object *lu_object_find_at(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- struct lu_site_bkt_data *bkt;
- struct lu_object *obj;
- wait_queue_entry_t wait;
-
- while (1) {
- obj = lu_object_find_try(env, dev, f, conf, &wait);
- if (obj != ERR_PTR(-EAGAIN))
- return obj;
- /*
- * lu_object_find_try() already added waiter into the
- * wait queue.
- */
- schedule();
- bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
- remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
- }
-}
-EXPORT_SYMBOL(lu_object_find_at);
-
-/**
- * Find object with given fid, and return its slice belonging to given device.
- */
-struct lu_object *lu_object_find_slice(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- struct lu_object *top;
- struct lu_object *obj;
-
- top = lu_object_find(env, dev, f, conf);
- if (IS_ERR(top))
- return top;
-
- obj = lu_object_locate(top->lo_header, dev->ld_type);
- if (unlikely(!obj)) {
- lu_object_put(env, top);
- obj = ERR_PTR(-ENOENT);
- }
-
- return obj;
-}
-EXPORT_SYMBOL(lu_object_find_slice);
-
-/**
- * Global list of all device types.
- */
-static LIST_HEAD(lu_device_types);
-
-int lu_device_type_init(struct lu_device_type *ldt)
-{
- int result = 0;
-
- atomic_set(&ldt->ldt_device_nr, 0);
- INIT_LIST_HEAD(&ldt->ldt_linkage);
- if (ldt->ldt_ops->ldto_init)
- result = ldt->ldt_ops->ldto_init(ldt);
-
- if (!result) {
- spin_lock(&obd_types_lock);
- list_add(&ldt->ldt_linkage, &lu_device_types);
- spin_unlock(&obd_types_lock);
- }
-
- return result;
-}
-EXPORT_SYMBOL(lu_device_type_init);
-
-void lu_device_type_fini(struct lu_device_type *ldt)
-{
- spin_lock(&obd_types_lock);
- list_del_init(&ldt->ldt_linkage);
- spin_unlock(&obd_types_lock);
- if (ldt->ldt_ops->ldto_fini)
- ldt->ldt_ops->ldto_fini(ldt);
-}
-EXPORT_SYMBOL(lu_device_type_fini);
-
-/**
- * Global list of all sites on this node
- */
-static LIST_HEAD(lu_sites);
-static DECLARE_RWSEM(lu_sites_guard);
-
-/**
- * Global environment used by site shrinker.
- */
-static struct lu_env lu_shrink_env;
-
-struct lu_site_print_arg {
- struct lu_env *lsp_env;
- void *lsp_cookie;
- lu_printer_t lsp_printer;
-};
-
-static int
-lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *data)
-{
- struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
- struct lu_object_header *h;
-
- h = hlist_entry(hnode, struct lu_object_header, loh_hash);
- if (!list_empty(&h->loh_layers)) {
- const struct lu_object *o;
-
- o = lu_object_top(h);
- lu_object_print(arg->lsp_env, arg->lsp_cookie,
- arg->lsp_printer, o);
- } else {
- lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
- arg->lsp_printer, h);
- }
- return 0;
-}
-
-/**
- * Print all objects in \a s.
- */
-void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
- lu_printer_t printer)
-{
- struct lu_site_print_arg arg = {
- .lsp_env = (struct lu_env *)env,
- .lsp_cookie = cookie,
- .lsp_printer = printer,
- };
-
- cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
-}
-EXPORT_SYMBOL(lu_site_print);
-
-/**
- * Return desired hash table order.
- */
-static unsigned long lu_htable_order(struct lu_device *top)
-{
- unsigned long bits_max = LU_SITE_BITS_MAX;
- unsigned long cache_size;
- unsigned long bits;
-
- if (!strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME))
- bits_max = LU_SITE_BITS_MAX_CL;
-
- /*
- * Calculate hash table size, assuming that we want reasonable
- * performance when 20% of total memory is occupied by cache of
- * lu_objects.
- *
- * Size of lu_object is (arbitrary) taken as 1K (together with inode).
- */
- cache_size = totalram_pages;
-
-#if BITS_PER_LONG == 32
- /* limit hashtable size for lowmem systems to low RAM */
- if (cache_size > 1 << (30 - PAGE_SHIFT))
- cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
-#endif
-
- /* clear off unreasonable cache setting. */
- if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
- CWARN("obdclass: invalid lu_cache_percent: %u, it must be in the range of (0, %u]. Will use default value: %u.\n",
- lu_cache_percent, LU_CACHE_PERCENT_MAX,
- LU_CACHE_PERCENT_DEFAULT);
-
- lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
- }
- cache_size = cache_size / 100 * lu_cache_percent *
- (PAGE_SIZE / 1024);
-
- for (bits = 1; (1 << bits) < cache_size; ++bits)
- ;
- return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max);
-}
-
-static unsigned int lu_obj_hop_hash(struct cfs_hash *hs,
- const void *key, unsigned int mask)
-{
- struct lu_fid *fid = (struct lu_fid *)key;
- __u32 hash;
-
- hash = fid_flatten32(fid);
- hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
- hash = hash_long(hash, hs->hs_bkt_bits);
-
- /* give me another random factor */
- hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
-
- hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
- hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
-
- return hash & mask;
-}
-
-static void *lu_obj_hop_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct lu_object_header, loh_hash);
-}
-
-static void *lu_obj_hop_key(struct hlist_node *hnode)
-{
- struct lu_object_header *h;
-
- h = hlist_entry(hnode, struct lu_object_header, loh_hash);
- return &h->loh_fid;
-}
-
-static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
-{
- struct lu_object_header *h;
-
- h = hlist_entry(hnode, struct lu_object_header, loh_hash);
- return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
-}
-
-static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct lu_object_header *h;
-
- h = hlist_entry(hnode, struct lu_object_header, loh_hash);
- atomic_inc(&h->loh_ref);
-}
-
-static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- LBUG(); /* we should never called it */
-}
-
-static struct cfs_hash_ops lu_site_hash_ops = {
- .hs_hash = lu_obj_hop_hash,
- .hs_key = lu_obj_hop_key,
- .hs_keycmp = lu_obj_hop_keycmp,
- .hs_object = lu_obj_hop_object,
- .hs_get = lu_obj_hop_get,
- .hs_put_locked = lu_obj_hop_put_locked,
-};
-
-static void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
-{
- spin_lock(&s->ls_ld_lock);
- if (list_empty(&d->ld_linkage))
- list_add(&d->ld_linkage, &s->ls_ld_linkage);
- spin_unlock(&s->ls_ld_lock);
-}
-
-/**
- * Initialize site \a s, with \a d as the top level device.
- */
-int lu_site_init(struct lu_site *s, struct lu_device *top)
-{
- struct lu_site_bkt_data *bkt;
- struct cfs_hash_bd bd;
- unsigned long bits;
- unsigned long i;
- char name[16];
- int rc;
-
- memset(s, 0, sizeof(*s));
- mutex_init(&s->ls_purge_mutex);
-
- rc = percpu_counter_init(&s->ls_lru_len_counter, 0, GFP_NOFS);
- if (rc)
- return -ENOMEM;
-
- snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name);
- for (bits = lu_htable_order(top); bits >= LU_SITE_BITS_MIN; bits--) {
- s->ls_obj_hash = cfs_hash_create(name, bits, bits,
- bits - LU_SITE_BKT_BITS,
- sizeof(*bkt), 0, 0,
- &lu_site_hash_ops,
- CFS_HASH_SPIN_BKTLOCK |
- CFS_HASH_NO_ITEMREF |
- CFS_HASH_DEPTH |
- CFS_HASH_ASSERT_EMPTY |
- CFS_HASH_COUNTER);
- if (s->ls_obj_hash)
- break;
- }
-
- if (!s->ls_obj_hash) {
- CERROR("failed to create lu_site hash with bits: %lu\n", bits);
- return -ENOMEM;
- }
-
- cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
- bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
- INIT_LIST_HEAD(&bkt->lsb_lru);
- init_waitqueue_head(&bkt->lsb_marche_funebre);
- }
-
- s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
- if (!s->ls_stats) {
- cfs_hash_putref(s->ls_obj_hash);
- s->ls_obj_hash = NULL;
- return -ENOMEM;
- }
-
- lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
- 0, "created", "created");
- lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
- 0, "cache_hit", "cache_hit");
- lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
- 0, "cache_miss", "cache_miss");
- lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
- 0, "cache_race", "cache_race");
- lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
- 0, "cache_death_race", "cache_death_race");
- lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
- 0, "lru_purged", "lru_purged");
-
- INIT_LIST_HEAD(&s->ls_linkage);
- s->ls_top_dev = top;
- top->ld_site = s;
- lu_device_get(top);
- lu_ref_add(&top->ld_reference, "site-top", s);
-
- INIT_LIST_HEAD(&s->ls_ld_linkage);
- spin_lock_init(&s->ls_ld_lock);
-
- lu_dev_add_linkage(s, top);
-
- return 0;
-}
-EXPORT_SYMBOL(lu_site_init);
-
-/**
- * Finalize \a s and release its resources.
- */
-void lu_site_fini(struct lu_site *s)
-{
- down_write(&lu_sites_guard);
- list_del_init(&s->ls_linkage);
- up_write(&lu_sites_guard);
-
- percpu_counter_destroy(&s->ls_lru_len_counter);
-
- if (s->ls_obj_hash) {
- cfs_hash_putref(s->ls_obj_hash);
- s->ls_obj_hash = NULL;
- }
-
- if (s->ls_top_dev) {
- s->ls_top_dev->ld_site = NULL;
- lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
- lu_device_put(s->ls_top_dev);
- s->ls_top_dev = NULL;
- }
-
- if (s->ls_stats)
- lprocfs_free_stats(&s->ls_stats);
-}
-EXPORT_SYMBOL(lu_site_fini);
-
-/**
- * Called when initialization of stack for this site is completed.
- */
-int lu_site_init_finish(struct lu_site *s)
-{
- int result;
-
- down_write(&lu_sites_guard);
- result = lu_context_refill(&lu_shrink_env.le_ctx);
- if (result == 0)
- list_add(&s->ls_linkage, &lu_sites);
- up_write(&lu_sites_guard);
- return result;
-}
-EXPORT_SYMBOL(lu_site_init_finish);
-
-/**
- * Acquire additional reference on device \a d
- */
-void lu_device_get(struct lu_device *d)
-{
- atomic_inc(&d->ld_ref);
-}
-EXPORT_SYMBOL(lu_device_get);
-
-/**
- * Release reference on device \a d.
- */
-void lu_device_put(struct lu_device *d)
-{
- LASSERT(atomic_read(&d->ld_ref) > 0);
- atomic_dec(&d->ld_ref);
-}
-EXPORT_SYMBOL(lu_device_put);
-
-/**
- * Initialize device \a d of type \a t.
- */
-int lu_device_init(struct lu_device *d, struct lu_device_type *t)
-{
- if (atomic_inc_return(&t->ldt_device_nr) == 1 &&
- t->ldt_ops->ldto_start)
- t->ldt_ops->ldto_start(t);
-
- memset(d, 0, sizeof(*d));
- atomic_set(&d->ld_ref, 0);
- d->ld_type = t;
- lu_ref_init(&d->ld_reference);
- INIT_LIST_HEAD(&d->ld_linkage);
- return 0;
-}
-EXPORT_SYMBOL(lu_device_init);
-
-/**
- * Finalize device \a d.
- */
-void lu_device_fini(struct lu_device *d)
-{
- struct lu_device_type *t = d->ld_type;
-
- if (d->ld_obd) {
- d->ld_obd->obd_lu_dev = NULL;
- d->ld_obd = NULL;
- }
-
- lu_ref_fini(&d->ld_reference);
- LASSERTF(atomic_read(&d->ld_ref) == 0,
- "Refcount is %u\n", atomic_read(&d->ld_ref));
- LASSERT(atomic_read(&t->ldt_device_nr) > 0);
-
- if (atomic_dec_and_test(&t->ldt_device_nr) &&
- t->ldt_ops->ldto_stop)
- t->ldt_ops->ldto_stop(t);
-}
-EXPORT_SYMBOL(lu_device_fini);
-
-/**
- * Initialize object \a o that is part of compound object \a h and was created
- * by device \a d.
- */
-int lu_object_init(struct lu_object *o, struct lu_object_header *h,
- struct lu_device *d)
-{
- memset(o, 0, sizeof(*o));
- o->lo_header = h;
- o->lo_dev = d;
- lu_device_get(d);
- lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
- INIT_LIST_HEAD(&o->lo_linkage);
-
- return 0;
-}
-EXPORT_SYMBOL(lu_object_init);
-
-/**
- * Finalize object and release its resources.
- */
-void lu_object_fini(struct lu_object *o)
-{
- struct lu_device *dev = o->lo_dev;
-
- LASSERT(list_empty(&o->lo_linkage));
-
- if (dev) {
- lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
- "lu_object", o);
- lu_device_put(dev);
- o->lo_dev = NULL;
- }
-}
-EXPORT_SYMBOL(lu_object_fini);
-
-/**
- * Add object \a o as first layer of compound object \a h
- *
- * This is typically called by the ->ldo_object_alloc() method of top-level
- * device.
- */
-void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
-{
- list_move(&o->lo_linkage, &h->loh_layers);
-}
-EXPORT_SYMBOL(lu_object_add_top);
-
-/**
- * Add object \a o as a layer of compound object, going after \a before.
- *
- * This is typically called by the ->ldo_object_alloc() method of \a
- * before->lo_dev.
- */
-void lu_object_add(struct lu_object *before, struct lu_object *o)
-{
- list_move(&o->lo_linkage, &before->lo_linkage);
-}
-EXPORT_SYMBOL(lu_object_add);
-
-/**
- * Initialize compound object.
- */
-int lu_object_header_init(struct lu_object_header *h)
-{
- memset(h, 0, sizeof(*h));
- atomic_set(&h->loh_ref, 1);
- INIT_HLIST_NODE(&h->loh_hash);
- INIT_LIST_HEAD(&h->loh_lru);
- INIT_LIST_HEAD(&h->loh_layers);
- lu_ref_init(&h->loh_reference);
- return 0;
-}
-EXPORT_SYMBOL(lu_object_header_init);
-
-/**
- * Finalize compound object.
- */
-void lu_object_header_fini(struct lu_object_header *h)
-{
- LASSERT(list_empty(&h->loh_layers));
- LASSERT(list_empty(&h->loh_lru));
- LASSERT(hlist_unhashed(&h->loh_hash));
- lu_ref_fini(&h->loh_reference);
-}
-EXPORT_SYMBOL(lu_object_header_fini);
-
-/**
- * Given a compound object, find its slice, corresponding to the device type
- * \a dtype.
- */
-struct lu_object *lu_object_locate(struct lu_object_header *h,
- const struct lu_device_type *dtype)
-{
- struct lu_object *o;
-
- list_for_each_entry(o, &h->loh_layers, lo_linkage) {
- if (o->lo_dev->ld_type == dtype)
- return o;
- }
- return NULL;
-}
-EXPORT_SYMBOL(lu_object_locate);
-
-/**
- * Finalize and free devices in the device stack.
- *
- * Finalize device stack by purging object cache, and calling
- * lu_device_type_operations::ldto_device_fini() and
- * lu_device_type_operations::ldto_device_free() on all devices in the stack.
- */
-void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
-{
- struct lu_site *site = top->ld_site;
- struct lu_device *scan;
- struct lu_device *next;
-
- lu_site_purge(env, site, ~0);
- for (scan = top; scan; scan = next) {
- next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
- lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
- lu_device_put(scan);
- }
-
- /* purge again. */
- lu_site_purge(env, site, ~0);
-
- for (scan = top; scan; scan = next) {
- const struct lu_device_type *ldt = scan->ld_type;
- struct obd_type *type;
-
- next = ldt->ldt_ops->ldto_device_free(env, scan);
- type = ldt->ldt_obd_type;
- if (type) {
- type->typ_refcnt--;
- class_put_type(type);
- }
- }
-}
-
-enum {
- /**
- * Maximal number of tld slots.
- */
- LU_CONTEXT_KEY_NR = 40
-};
-
-static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
-
-static DEFINE_SPINLOCK(lu_keys_guard);
-static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0);
-
-/**
- * Global counter incremented whenever key is registered, unregistered,
- * revived or quiesced. This is used to void unnecessary calls to
- * lu_context_refill(). No locking is provided, as initialization and shutdown
- * are supposed to be externally serialized.
- */
-static unsigned int key_set_version;
-
-/**
- * Register new key.
- */
-int lu_context_key_register(struct lu_context_key *key)
-{
- int result;
- unsigned int i;
-
- LASSERT(key->lct_init);
- LASSERT(key->lct_fini);
- LASSERT(key->lct_tags != 0);
-
- result = -ENFILE;
- spin_lock(&lu_keys_guard);
- for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
- if (!lu_keys[i]) {
- key->lct_index = i;
- atomic_set(&key->lct_used, 1);
- lu_keys[i] = key;
- lu_ref_init(&key->lct_reference);
- result = 0;
- ++key_set_version;
- break;
- }
- }
- spin_unlock(&lu_keys_guard);
- return result;
-}
-EXPORT_SYMBOL(lu_context_key_register);
-
-static void key_fini(struct lu_context *ctx, int index)
-{
- if (ctx->lc_value && ctx->lc_value[index]) {
- struct lu_context_key *key;
-
- key = lu_keys[index];
- LASSERT(atomic_read(&key->lct_used) > 1);
-
- key->lct_fini(ctx, key, ctx->lc_value[index]);
- lu_ref_del(&key->lct_reference, "ctx", ctx);
- atomic_dec(&key->lct_used);
-
- if ((ctx->lc_tags & LCT_NOREF) == 0)
- module_put(key->lct_owner);
- ctx->lc_value[index] = NULL;
- }
-}
-
-/**
- * Deregister key.
- */
-void lu_context_key_degister(struct lu_context_key *key)
-{
- LASSERT(atomic_read(&key->lct_used) >= 1);
- LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
-
- lu_context_key_quiesce(key);
-
- ++key_set_version;
- spin_lock(&lu_keys_guard);
- key_fini(&lu_shrink_env.le_ctx, key->lct_index);
-
- /**
- * Wait until all transient contexts referencing this key have
- * run lu_context_key::lct_fini() method.
- */
- while (atomic_read(&key->lct_used) > 1) {
- spin_unlock(&lu_keys_guard);
- CDEBUG(D_INFO, "%s: \"%s\" %p, %d\n",
- __func__, module_name(key->lct_owner),
- key, atomic_read(&key->lct_used));
- schedule();
- spin_lock(&lu_keys_guard);
- }
- if (lu_keys[key->lct_index]) {
- lu_keys[key->lct_index] = NULL;
- lu_ref_fini(&key->lct_reference);
- }
- spin_unlock(&lu_keys_guard);
-
- LASSERTF(atomic_read(&key->lct_used) == 1,
- "key has instances: %d\n",
- atomic_read(&key->lct_used));
-}
-EXPORT_SYMBOL(lu_context_key_degister);
-
-/**
- * Register a number of keys. This has to be called after all keys have been
- * initialized by a call to LU_CONTEXT_KEY_INIT().
- */
-int lu_context_key_register_many(struct lu_context_key *k, ...)
-{
- struct lu_context_key *key = k;
- va_list args;
- int result;
-
- va_start(args, k);
- do {
- result = lu_context_key_register(key);
- if (result)
- break;
- key = va_arg(args, struct lu_context_key *);
- } while (key);
- va_end(args);
-
- if (result != 0) {
- va_start(args, k);
- while (k != key) {
- lu_context_key_degister(k);
- k = va_arg(args, struct lu_context_key *);
- }
- va_end(args);
- }
-
- return result;
-}
-EXPORT_SYMBOL(lu_context_key_register_many);
-
-/**
- * De-register a number of keys. This is a dual to
- * lu_context_key_register_many().
- */
-void lu_context_key_degister_many(struct lu_context_key *k, ...)
-{
- va_list args;
-
- va_start(args, k);
- do {
- lu_context_key_degister(k);
- k = va_arg(args, struct lu_context_key*);
- } while (k);
- va_end(args);
-}
-EXPORT_SYMBOL(lu_context_key_degister_many);
-
-/**
- * Revive a number of keys.
- */
-void lu_context_key_revive_many(struct lu_context_key *k, ...)
-{
- va_list args;
-
- va_start(args, k);
- do {
- lu_context_key_revive(k);
- k = va_arg(args, struct lu_context_key*);
- } while (k);
- va_end(args);
-}
-EXPORT_SYMBOL(lu_context_key_revive_many);
-
-/**
- * Quiescent a number of keys.
- */
-void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
-{
- va_list args;
-
- va_start(args, k);
- do {
- lu_context_key_quiesce(k);
- k = va_arg(args, struct lu_context_key*);
- } while (k);
- va_end(args);
-}
-EXPORT_SYMBOL(lu_context_key_quiesce_many);
-
-/**
- * Return value associated with key \a key in context \a ctx.
- */
-void *lu_context_key_get(const struct lu_context *ctx,
- const struct lu_context_key *key)
-{
- LINVRNT(ctx->lc_state == LCS_ENTERED);
- LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
- LASSERT(lu_keys[key->lct_index] == key);
- return ctx->lc_value[key->lct_index];
-}
-EXPORT_SYMBOL(lu_context_key_get);
-
-/**
- * List of remembered contexts. XXX document me.
- */
-static LIST_HEAD(lu_context_remembered);
-
-/**
- * Destroy \a key in all remembered contexts. This is used to destroy key
- * values in "shared" contexts (like service threads), when a module owning
- * the key is about to be unloaded.
- */
-void lu_context_key_quiesce(struct lu_context_key *key)
-{
- struct lu_context *ctx;
-
- if (!(key->lct_tags & LCT_QUIESCENT)) {
- /*
- * XXX memory barrier has to go here.
- */
- spin_lock(&lu_keys_guard);
- key->lct_tags |= LCT_QUIESCENT;
-
- /**
- * Wait until all lu_context_key::lct_init() methods
- * have completed.
- */
- while (atomic_read(&lu_key_initing_cnt) > 0) {
- spin_unlock(&lu_keys_guard);
- CDEBUG(D_INFO, "%s: \"%s\" %p, %d (%d)\n",
- __func__,
- module_name(key->lct_owner),
- key, atomic_read(&key->lct_used),
- atomic_read(&lu_key_initing_cnt));
- schedule();
- spin_lock(&lu_keys_guard);
- }
-
- list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
- key_fini(ctx, key->lct_index);
- spin_unlock(&lu_keys_guard);
- ++key_set_version;
- }
-}
-
-void lu_context_key_revive(struct lu_context_key *key)
-{
- key->lct_tags &= ~LCT_QUIESCENT;
- ++key_set_version;
-}
-
-static void keys_fini(struct lu_context *ctx)
-{
- unsigned int i;
-
- if (!ctx->lc_value)
- return;
-
- for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
- key_fini(ctx, i);
-
- kfree(ctx->lc_value);
- ctx->lc_value = NULL;
-}
-
-static int keys_fill(struct lu_context *ctx)
-{
- unsigned int i;
-
- /*
- * A serialisation with lu_context_key_quiesce() is needed, but some
- * "key->lct_init()" are calling kernel memory allocation routine and
- * can't be called while holding a spin_lock.
- * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt"
- * to ensure the start of the serialisation.
- * An atomic_t variable is still used, in order not to reacquire the
- * lock when decrementing the counter.
- */
- spin_lock(&lu_keys_guard);
- atomic_inc(&lu_key_initing_cnt);
- spin_unlock(&lu_keys_guard);
-
- LINVRNT(ctx->lc_value);
- for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
- struct lu_context_key *key;
-
- key = lu_keys[i];
- if (!ctx->lc_value[i] && key &&
- (key->lct_tags & ctx->lc_tags) &&
- /*
- * Don't create values for a LCT_QUIESCENT key, as this
- * will pin module owning a key.
- */
- !(key->lct_tags & LCT_QUIESCENT)) {
- void *value;
-
- LINVRNT(key->lct_init);
- LINVRNT(key->lct_index == i);
-
- if (!(ctx->lc_tags & LCT_NOREF) &&
- !try_module_get(key->lct_owner)) {
- /* module is unloading, skip this key */
- continue;
- }
-
- value = key->lct_init(ctx, key);
- if (unlikely(IS_ERR(value))) {
- atomic_dec(&lu_key_initing_cnt);
- return PTR_ERR(value);
- }
-
- lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
- atomic_inc(&key->lct_used);
- /*
- * This is the only place in the code, where an
- * element of ctx->lc_value[] array is set to non-NULL
- * value.
- */
- ctx->lc_value[i] = value;
- if (key->lct_exit)
- ctx->lc_tags |= LCT_HAS_EXIT;
- }
- ctx->lc_version = key_set_version;
- }
- atomic_dec(&lu_key_initing_cnt);
- return 0;
-}
-
-static int keys_init(struct lu_context *ctx)
-{
- ctx->lc_value = kcalloc(ARRAY_SIZE(lu_keys), sizeof(ctx->lc_value[0]),
- GFP_NOFS);
- if (likely(ctx->lc_value))
- return keys_fill(ctx);
-
- return -ENOMEM;
-}
-
-/**
- * Initialize context data-structure. Create values for all keys.
- */
-int lu_context_init(struct lu_context *ctx, __u32 tags)
-{
- int rc;
-
- memset(ctx, 0, sizeof(*ctx));
- ctx->lc_state = LCS_INITIALIZED;
- ctx->lc_tags = tags;
- if (tags & LCT_REMEMBER) {
- spin_lock(&lu_keys_guard);
- list_add(&ctx->lc_remember, &lu_context_remembered);
- spin_unlock(&lu_keys_guard);
- } else {
- INIT_LIST_HEAD(&ctx->lc_remember);
- }
-
- rc = keys_init(ctx);
- if (rc != 0)
- lu_context_fini(ctx);
-
- return rc;
-}
-EXPORT_SYMBOL(lu_context_init);
-
-/**
- * Finalize context data-structure. Destroy key values.
- */
-void lu_context_fini(struct lu_context *ctx)
-{
- LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
- ctx->lc_state = LCS_FINALIZED;
-
- if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
- LASSERT(list_empty(&ctx->lc_remember));
- keys_fini(ctx);
-
- } else { /* could race with key degister */
- spin_lock(&lu_keys_guard);
- keys_fini(ctx);
- list_del_init(&ctx->lc_remember);
- spin_unlock(&lu_keys_guard);
- }
-}
-EXPORT_SYMBOL(lu_context_fini);
-
-/**
- * Called before entering context.
- */
-void lu_context_enter(struct lu_context *ctx)
-{
- LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
- ctx->lc_state = LCS_ENTERED;
-}
-EXPORT_SYMBOL(lu_context_enter);
-
-/**
- * Called after exiting from \a ctx
- */
-void lu_context_exit(struct lu_context *ctx)
-{
- unsigned int i;
-
- LINVRNT(ctx->lc_state == LCS_ENTERED);
- ctx->lc_state = LCS_LEFT;
- if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) {
- for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
- /* could race with key quiescency */
- if (ctx->lc_tags & LCT_REMEMBER)
- spin_lock(&lu_keys_guard);
- if (ctx->lc_value[i]) {
- struct lu_context_key *key;
-
- key = lu_keys[i];
- if (key->lct_exit)
- key->lct_exit(ctx,
- key, ctx->lc_value[i]);
- }
- if (ctx->lc_tags & LCT_REMEMBER)
- spin_unlock(&lu_keys_guard);
- }
- }
-}
-EXPORT_SYMBOL(lu_context_exit);
-
-/**
- * Allocate for context all missing keys that were registered after context
- * creation. key_set_version is only changed in rare cases when modules
- * are loaded and removed.
- */
-int lu_context_refill(struct lu_context *ctx)
-{
- return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
-}
-
-/**
- * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
- * obd being added. Currently, this is only used on client side, specifically
- * for echo device client, for other stack (like ptlrpc threads), context are
- * predefined when the lu_device type are registered, during the module probe
- * phase.
- */
-__u32 lu_context_tags_default;
-__u32 lu_session_tags_default;
-
-int lu_env_init(struct lu_env *env, __u32 tags)
-{
- int result;
-
- env->le_ses = NULL;
- result = lu_context_init(&env->le_ctx, tags);
- if (likely(result == 0))
- lu_context_enter(&env->le_ctx);
- return result;
-}
-EXPORT_SYMBOL(lu_env_init);
-
-void lu_env_fini(struct lu_env *env)
-{
- lu_context_exit(&env->le_ctx);
- lu_context_fini(&env->le_ctx);
- env->le_ses = NULL;
-}
-EXPORT_SYMBOL(lu_env_fini);
-
-int lu_env_refill(struct lu_env *env)
-{
- int result;
-
- result = lu_context_refill(&env->le_ctx);
- if (result == 0 && env->le_ses)
- result = lu_context_refill(env->le_ses);
- return result;
-}
-EXPORT_SYMBOL(lu_env_refill);
-
-struct lu_site_stats {
- unsigned int lss_populated;
- unsigned int lss_max_search;
- unsigned int lss_total;
- unsigned int lss_busy;
-};
-
-static void lu_site_stats_get(struct cfs_hash *hs,
- struct lu_site_stats *stats, int populated)
-{
- struct cfs_hash_bd bd;
- unsigned int i;
-
- cfs_hash_for_each_bucket(hs, &bd, i) {
- struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
- struct hlist_head *hhead;
-
- cfs_hash_bd_lock(hs, &bd, 1);
- stats->lss_busy +=
- cfs_hash_bd_count_get(&bd) - bkt->lsb_lru_len;
- stats->lss_total += cfs_hash_bd_count_get(&bd);
- stats->lss_max_search = max((int)stats->lss_max_search,
- cfs_hash_bd_depmax_get(&bd));
- if (!populated) {
- cfs_hash_bd_unlock(hs, &bd, 1);
- continue;
- }
-
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- if (!hlist_empty(hhead))
- stats->lss_populated++;
- }
- cfs_hash_bd_unlock(hs, &bd, 1);
- }
-}
-
-/*
- * lu_cache_shrink_count() returns an approximate number of cached objects
- * that can be freed by shrink_slab(). A counter, which tracks the
- * number of items in the site's lru, is maintained in a percpu_counter
- * for each site. The percpu values are incremented and decremented as
- * objects are added or removed from the lru. The percpu values are summed
- * and saved whenever a percpu value exceeds a threshold. Thus the saved,
- * summed value at any given time may not accurately reflect the current
- * lru length. But this value is sufficiently accurate for the needs of
- * a shrinker.
- *
- * Using a per cpu counter is a compromise solution to concurrent access:
- * lu_object_put() can update the counter without locking the site and
- * lu_cache_shrink_count can sum the counters without locking each
- * ls_obj_hash bucket.
- */
-static unsigned long lu_cache_shrink_count(struct shrinker *sk,
- struct shrink_control *sc)
-{
- struct lu_site *s;
- struct lu_site *tmp;
- unsigned long cached = 0;
-
- if (!(sc->gfp_mask & __GFP_FS))
- return 0;
-
- down_read(&lu_sites_guard);
- list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage)
- cached += percpu_counter_read_positive(&s->ls_lru_len_counter);
- up_read(&lu_sites_guard);
-
- cached = (cached / 100) * sysctl_vfs_cache_pressure;
- CDEBUG(D_INODE, "%ld objects cached, cache pressure %d\n",
- cached, sysctl_vfs_cache_pressure);
-
- return cached;
-}
-
-static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
- struct shrink_control *sc)
-{
- struct lu_site *s;
- struct lu_site *tmp;
- unsigned long remain = sc->nr_to_scan, freed = 0;
- LIST_HEAD(splice);
-
- if (!(sc->gfp_mask & __GFP_FS))
- /* We must not take the lu_sites_guard lock when
- * __GFP_FS is *not* set because of the deadlock
- * possibility detailed above. Additionally,
- * since we cannot determine the number of
- * objects in the cache without taking this
- * lock, we're in a particularly tough spot. As
- * a result, we'll just lie and say our cache is
- * empty. This _should_ be ok, as we can't
- * reclaim objects when __GFP_FS is *not* set
- * anyways.
- */
- return SHRINK_STOP;
-
- down_write(&lu_sites_guard);
- list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
- freed = lu_site_purge(&lu_shrink_env, s, remain);
- remain -= freed;
- /*
- * Move just shrunk site to the tail of site list to
- * assure shrinking fairness.
- */
- list_move_tail(&s->ls_linkage, &splice);
- }
- list_splice(&splice, lu_sites.prev);
- up_write(&lu_sites_guard);
-
- return sc->nr_to_scan - remain;
-}
-
-/**
- * Debugging printer function using printk().
- */
-static struct shrinker lu_site_shrinker = {
- .count_objects = lu_cache_shrink_count,
- .scan_objects = lu_cache_shrink_scan,
- .seeks = DEFAULT_SEEKS,
-};
-
-/**
- * Initialization of global lu_* data.
- */
-int lu_global_init(void)
-{
- int result;
-
- CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
-
- result = lu_ref_global_init();
- if (result != 0)
- return result;
-
- LU_CONTEXT_KEY_INIT(&lu_global_key);
- result = lu_context_key_register(&lu_global_key);
- if (result != 0) {
- lu_ref_global_fini();
- return result;
- }
-
- /*
- * At this level, we don't know what tags are needed, so allocate them
- * conservatively. This should not be too bad, because this
- * environment is global.
- */
- down_write(&lu_sites_guard);
- result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
- up_write(&lu_sites_guard);
- if (result != 0) {
- lu_context_key_degister(&lu_global_key);
- lu_ref_global_fini();
- return result;
- }
-
- /*
- * seeks estimation: 3 seeks to read a record from oi, one to read
- * inode, one for ea. Unfortunately setting this high value results in
- * lu_object/inode cache consuming all the memory.
- */
- result = register_shrinker(&lu_site_shrinker);
- if (result != 0) {
- /* Order explained in lu_global_fini(). */
- lu_context_key_degister(&lu_global_key);
-
- down_write(&lu_sites_guard);
- lu_env_fini(&lu_shrink_env);
- up_write(&lu_sites_guard);
-
- lu_ref_global_fini();
- return result;
- }
-
- return 0;
-}
-
-/**
- * Dual to lu_global_init().
- */
-void lu_global_fini(void)
-{
- unregister_shrinker(&lu_site_shrinker);
- lu_context_key_degister(&lu_global_key);
-
- /*
- * Tear shrinker environment down _after_ de-registering
- * lu_global_key, because the latter has a value in the former.
- */
- down_write(&lu_sites_guard);
- lu_env_fini(&lu_shrink_env);
- up_write(&lu_sites_guard);
-
- lu_ref_global_fini();
-}
-
-static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
-{
- struct lprocfs_counter ret;
-
- lprocfs_stats_collect(stats, idx, &ret);
- return (__u32)ret.lc_count;
-}
-
-/**
- * Output site statistical counters into a buffer. Suitable for
- * lprocfs_rd_*()-style functions.
- */
-int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
-{
- struct lu_site_stats stats;
-
- memset(&stats, 0, sizeof(stats));
- lu_site_stats_get(s->ls_obj_hash, &stats, 1);
-
- seq_printf(m, "%d/%d %d/%ld %d %d %d %d %d %d %d\n",
- stats.lss_busy,
- stats.lss_total,
- stats.lss_populated,
- CFS_HASH_NHLIST(s->ls_obj_hash),
- stats.lss_max_search,
- ls_stats_read(s->ls_stats, LU_SS_CREATED),
- ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
- ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
- ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
- ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
- ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
- return 0;
-}
-EXPORT_SYMBOL(lu_site_stats_print);
-
-/**
- * Helper function to initialize a number of kmem slab caches at once.
- */
-int lu_kmem_init(struct lu_kmem_descr *caches)
-{
- int result;
- struct lu_kmem_descr *iter = caches;
-
- for (result = 0; iter->ckd_cache; ++iter) {
- *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
- iter->ckd_size,
- 0, 0, NULL);
- if (!*iter->ckd_cache) {
- result = -ENOMEM;
- /* free all previously allocated caches */
- lu_kmem_fini(caches);
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(lu_kmem_init);
-
-/**
- * Helper function to finalize a number of kmem slab cached at once. Dual to
- * lu_kmem_init().
- */
-void lu_kmem_fini(struct lu_kmem_descr *caches)
-{
- for (; caches->ckd_cache; ++caches) {
- kmem_cache_destroy(*caches->ckd_cache);
- *caches->ckd_cache = NULL;
- }
-}
-EXPORT_SYMBOL(lu_kmem_fini);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_ref.c b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
deleted file mode 100644
index 54fc88206534..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lu_ref.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/lu_ref.c
- *
- * Lustre reference.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lu_ref.h>
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
deleted file mode 100644
index f53b1a3c342e..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/lustre_handles.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <obd_support.h>
-#include <lustre_handles.h>
-#include <lustre_lib.h>
-
-static __u64 handle_base;
-#define HANDLE_INCR 7
-static spinlock_t handle_base_lock;
-
-static struct handle_bucket {
- spinlock_t lock;
- struct list_head head;
-} *handle_hash;
-
-#define HANDLE_HASH_SIZE (1 << 16)
-#define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
-
-/*
- * Generate a unique 64bit cookie (hash) for a handle and insert it into
- * global (per-node) hash-table.
- */
-void class_handle_hash(struct portals_handle *h,
- struct portals_handle_ops *ops)
-{
- struct handle_bucket *bucket;
-
- LASSERT(h);
- LASSERT(list_empty(&h->h_link));
-
- /*
- * This is fast, but simplistic cookie generation algorithm, it will
- * need a re-do at some point in the future for security.
- */
- spin_lock(&handle_base_lock);
- handle_base += HANDLE_INCR;
-
- if (unlikely(handle_base == 0)) {
- /*
- * Cookie of zero is "dangerous", because in many places it's
- * assumed that 0 means "unassigned" handle, not bound to any
- * object.
- */
- CWARN("The universe has been exhausted: cookie wrap-around.\n");
- handle_base += HANDLE_INCR;
- }
- h->h_cookie = handle_base;
- spin_unlock(&handle_base_lock);
-
- h->h_ops = ops;
- spin_lock_init(&h->h_lock);
-
- bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
- spin_lock(&bucket->lock);
- list_add_rcu(&h->h_link, &bucket->head);
- h->h_in = 1;
- spin_unlock(&bucket->lock);
-
- CDEBUG(D_INFO, "added object %p with handle %#llx to hash\n",
- h, h->h_cookie);
-}
-EXPORT_SYMBOL(class_handle_hash);
-
-static void class_handle_unhash_nolock(struct portals_handle *h)
-{
- if (list_empty(&h->h_link)) {
- CERROR("removing an already-removed handle (%#llx)\n",
- h->h_cookie);
- return;
- }
-
- CDEBUG(D_INFO, "removing object %p with handle %#llx from hash\n",
- h, h->h_cookie);
-
- spin_lock(&h->h_lock);
- if (h->h_in == 0) {
- spin_unlock(&h->h_lock);
- return;
- }
- h->h_in = 0;
- spin_unlock(&h->h_lock);
- list_del_rcu(&h->h_link);
-}
-
-void class_handle_unhash(struct portals_handle *h)
-{
- struct handle_bucket *bucket;
-
- bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
-
- spin_lock(&bucket->lock);
- class_handle_unhash_nolock(h);
- spin_unlock(&bucket->lock);
-}
-EXPORT_SYMBOL(class_handle_unhash);
-
-void *class_handle2object(__u64 cookie, const void *owner)
-{
- struct handle_bucket *bucket;
- struct portals_handle *h;
- void *retval = NULL;
-
- LASSERT(handle_hash);
-
- /* Be careful when you want to change this code. See the
- * rcu_read_lock() definition on top this file. - jxiong
- */
- bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
-
- rcu_read_lock();
- list_for_each_entry_rcu(h, &bucket->head, h_link) {
- if (h->h_cookie != cookie || h->h_owner != owner)
- continue;
-
- spin_lock(&h->h_lock);
- if (likely(h->h_in != 0)) {
- h->h_ops->hop_addref(h);
- retval = h;
- }
- spin_unlock(&h->h_lock);
- break;
- }
- rcu_read_unlock();
-
- return retval;
-}
-EXPORT_SYMBOL(class_handle2object);
-
-void class_handle_free_cb(struct rcu_head *rcu)
-{
- struct portals_handle *h;
- void *ptr;
-
- h = container_of(rcu, struct portals_handle, h_rcu);
- ptr = (void *)(unsigned long)h->h_cookie;
-
- if (h->h_ops->hop_free)
- h->h_ops->hop_free(ptr, h->h_size);
- else
- kfree(ptr);
-}
-EXPORT_SYMBOL(class_handle_free_cb);
-
-int class_handle_init(void)
-{
- struct handle_bucket *bucket;
-
- LASSERT(!handle_hash);
-
- handle_hash = kvzalloc(sizeof(*bucket) * HANDLE_HASH_SIZE,
- GFP_KERNEL);
- if (!handle_hash)
- return -ENOMEM;
-
- spin_lock_init(&handle_base_lock);
- for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
- bucket--) {
- INIT_LIST_HEAD(&bucket->head);
- spin_lock_init(&bucket->lock);
- }
-
- get_random_bytes(&handle_base, sizeof(handle_base));
- LASSERT(handle_base != 0ULL);
-
- return 0;
-}
-
-static int cleanup_all_handles(void)
-{
- int rc;
- int i;
-
- for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
- struct portals_handle *h;
-
- spin_lock(&handle_hash[i].lock);
- list_for_each_entry_rcu(h, &handle_hash[i].head, h_link) {
- CERROR("force clean handle %#llx addr %p ops %p\n",
- h->h_cookie, h, h->h_ops);
-
- class_handle_unhash_nolock(h);
- rc++;
- }
- spin_unlock(&handle_hash[i].lock);
- }
-
- return rc;
-}
-
-void class_handle_cleanup(void)
-{
- int count;
-
- LASSERT(handle_hash);
-
- count = cleanup_all_handles();
-
- kvfree(handle_hash);
- handle_hash = NULL;
-
- if (count != 0)
- CERROR("handle_count at cleanup: %d\n", count);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
deleted file mode 100644
index e286a2665423..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ /dev/null
@@ -1,214 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <obd.h>
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_lib.h>
-#include <lustre_ha.h>
-#include <lustre_net.h>
-#include <lprocfs_status.h>
-
-#define NIDS_MAX 32
-
-struct uuid_nid_data {
- struct list_head un_list;
- struct obd_uuid un_uuid;
- int un_nid_count;
- lnet_nid_t un_nids[NIDS_MAX];
-};
-
-/* FIXME: This should probably become more elegant than a global linked list */
-static struct list_head g_uuid_list;
-static spinlock_t g_uuid_lock;
-
-void class_init_uuidlist(void)
-{
- INIT_LIST_HEAD(&g_uuid_list);
- spin_lock_init(&g_uuid_lock);
-}
-
-void class_exit_uuidlist(void)
-{
- /* delete all */
- class_del_uuid(NULL);
-}
-
-int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index)
-{
- struct uuid_nid_data *data;
- struct obd_uuid tmp;
- int rc = -ENOENT;
-
- obd_str2uuid(&tmp, uuid);
- spin_lock(&g_uuid_lock);
- list_for_each_entry(data, &g_uuid_list, un_list) {
- if (obd_uuid_equals(&data->un_uuid, &tmp)) {
- if (index >= data->un_nid_count)
- break;
-
- rc = 0;
- *peer_nid = data->un_nids[index];
- break;
- }
- }
- spin_unlock(&g_uuid_lock);
- return rc;
-}
-EXPORT_SYMBOL(lustre_uuid_to_peer);
-
-/* Add a nid to a niduuid. Multiple nids can be added to a single uuid;
- * LNET will choose the best one.
- */
-int class_add_uuid(const char *uuid, __u64 nid)
-{
- struct uuid_nid_data *data, *entry;
- int found = 0;
-
- LASSERT(nid != 0); /* valid newconfig NID is never zero */
-
- if (strlen(uuid) > UUID_MAX - 1)
- return -EOVERFLOW;
-
- data = kzalloc(sizeof(*data), GFP_NOFS);
- if (!data)
- return -ENOMEM;
-
- obd_str2uuid(&data->un_uuid, uuid);
- data->un_nids[0] = nid;
- data->un_nid_count = 1;
-
- spin_lock(&g_uuid_lock);
- list_for_each_entry(entry, &g_uuid_list, un_list) {
- if (obd_uuid_equals(&entry->un_uuid, &data->un_uuid)) {
- int i;
-
- found = 1;
- for (i = 0; i < entry->un_nid_count; i++)
- if (nid == entry->un_nids[i])
- break;
-
- if (i == entry->un_nid_count) {
- LASSERT(entry->un_nid_count < NIDS_MAX);
- entry->un_nids[entry->un_nid_count++] = nid;
- }
- break;
- }
- }
- if (!found)
- list_add(&data->un_list, &g_uuid_list);
- spin_unlock(&g_uuid_lock);
-
- if (found) {
- CDEBUG(D_INFO, "found uuid %s %s cnt=%d\n", uuid,
- libcfs_nid2str(nid), entry->un_nid_count);
- kfree(data);
- } else {
- CDEBUG(D_INFO, "add uuid %s %s\n", uuid, libcfs_nid2str(nid));
- }
- return 0;
-}
-
-/* Delete the nids for one uuid if specified, otherwise delete all */
-int class_del_uuid(const char *uuid)
-{
- LIST_HEAD(deathrow);
- struct uuid_nid_data *data;
- struct uuid_nid_data *temp;
-
- spin_lock(&g_uuid_lock);
- if (uuid) {
- struct obd_uuid tmp;
-
- obd_str2uuid(&tmp, uuid);
- list_for_each_entry(data, &g_uuid_list, un_list) {
- if (obd_uuid_equals(&data->un_uuid, &tmp)) {
- list_move(&data->un_list, &deathrow);
- break;
- }
- }
- } else {
- list_splice_init(&g_uuid_list, &deathrow);
- }
- spin_unlock(&g_uuid_lock);
-
- if (uuid && list_empty(&deathrow)) {
- CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid);
- return -EINVAL;
- }
-
- list_for_each_entry_safe(data, temp, &deathrow, un_list) {
- list_del(&data->un_list);
-
- CDEBUG(D_INFO, "del uuid %s %s/%d\n",
- obd_uuid2str(&data->un_uuid),
- libcfs_nid2str(data->un_nids[0]),
- data->un_nid_count);
-
- kfree(data);
- }
-
- return 0;
-}
-
-/* check if @nid exists in nid list of @uuid */
-int class_check_uuid(struct obd_uuid *uuid, __u64 nid)
-{
- struct uuid_nid_data *entry;
- int found = 0;
-
- CDEBUG(D_INFO, "check if uuid %s has %s.\n",
- obd_uuid2str(uuid), libcfs_nid2str(nid));
-
- spin_lock(&g_uuid_lock);
- list_for_each_entry(entry, &g_uuid_list, un_list) {
- int i;
-
- if (!obd_uuid_equals(&entry->un_uuid, uuid))
- continue;
-
- /* found the uuid, check if it has @nid */
- for (i = 0; i < entry->un_nid_count; i++) {
- if (entry->un_nids[i] == nid) {
- found = 1;
- break;
- }
- }
- break;
- }
- spin_unlock(&g_uuid_lock);
- return found;
-}
-EXPORT_SYMBOL(class_check_uuid);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
deleted file mode 100644
index 277576b586db..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ /dev/null
@@ -1,1559 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/obd_config.c
- *
- * Config API
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/string.h>
-
-#include <uapi/linux/lustre/lustre_ioctl.h>
-#include <llog_swab.h>
-#include <lprocfs_status.h>
-#include <lustre_log.h>
-#include <uapi/linux/lustre/lustre_param.h>
-#include <obd_class.h>
-
-#include "llog_internal.h"
-
-static struct cfs_hash_ops uuid_hash_ops;
-
-/*********** string parsing utils *********/
-
-/* returns 0 if we find this key in the buffer, else 1 */
-int class_find_param(char *buf, char *key, char **valp)
-{
- char *ptr;
-
- if (!buf)
- return 1;
-
- ptr = strstr(buf, key);
- if (!ptr)
- return 1;
-
- if (valp)
- *valp = ptr + strlen(key);
-
- return 0;
-}
-EXPORT_SYMBOL(class_find_param);
-
-/* returns 0 if this is the first key in the buffer, else 1.
- * valp points to first char after key.
- */
-static int class_match_param(char *buf, const char *key, char **valp)
-{
- if (!buf)
- return 1;
-
- if (memcmp(buf, key, strlen(key)) != 0)
- return 1;
-
- if (valp)
- *valp = buf + strlen(key);
-
- return 0;
-}
-
-static int parse_nid(char *buf, void *value, int quiet)
-{
- lnet_nid_t *nid = value;
-
- *nid = libcfs_str2nid(buf);
- if (*nid != LNET_NID_ANY)
- return 0;
-
- if (!quiet)
- LCONSOLE_ERROR_MSG(0x159, "Can't parse NID '%s'\n", buf);
- return -EINVAL;
-}
-
-static int parse_net(char *buf, void *value)
-{
- __u32 *net = value;
-
- *net = libcfs_str2net(buf);
- CDEBUG(D_INFO, "Net %s\n", libcfs_net2str(*net));
- return 0;
-}
-
-enum {
- CLASS_PARSE_NID = 1,
- CLASS_PARSE_NET,
-};
-
-/* 0 is good nid,
- * 1 not found
- * < 0 error
- * endh is set to next separator
- */
-static int class_parse_value(char *buf, int opc, void *value, char **endh,
- int quiet)
-{
- char *endp;
- char tmp;
- int rc = 0;
-
- if (!buf)
- return 1;
- while (*buf == ',' || *buf == ':')
- buf++;
- if (*buf == ' ' || *buf == '/' || *buf == '\0')
- return 1;
-
- /* nid separators or end of nids */
- endp = strpbrk(buf, ",: /");
- if (!endp)
- endp = buf + strlen(buf);
-
- tmp = *endp;
- *endp = '\0';
- switch (opc) {
- default:
- LBUG();
- case CLASS_PARSE_NID:
- rc = parse_nid(buf, value, quiet);
- break;
- case CLASS_PARSE_NET:
- rc = parse_net(buf, value);
- break;
- }
- *endp = tmp;
- if (rc != 0)
- return rc;
- if (endh)
- *endh = endp;
- return 0;
-}
-
-int class_parse_nid(char *buf, lnet_nid_t *nid, char **endh)
-{
- return class_parse_value(buf, CLASS_PARSE_NID, (void *)nid, endh, 0);
-}
-EXPORT_SYMBOL(class_parse_nid);
-
-int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh)
-{
- return class_parse_value(buf, CLASS_PARSE_NID, (void *)nid, endh, 1);
-}
-EXPORT_SYMBOL(class_parse_nid_quiet);
-
-char *lustre_cfg_string(struct lustre_cfg *lcfg, u32 index)
-{
- char *s;
-
- if (!lcfg->lcfg_buflens[index])
- return NULL;
-
- s = lustre_cfg_buf(lcfg, index);
- if (!s)
- return NULL;
-
- /*
- * make sure it's NULL terminated, even if this kills a char
- * of data. Try to use the padding first though.
- */
- if (s[lcfg->lcfg_buflens[index] - 1] != '\0') {
- size_t last = ALIGN(lcfg->lcfg_buflens[index], 8) - 1;
- char lost;
-
- /* Use the smaller value */
- if (last > lcfg->lcfg_buflens[index])
- last = lcfg->lcfg_buflens[index];
-
- lost = s[last];
- s[last] = '\0';
- if (lost != '\0') {
- CWARN("Truncated buf %d to '%s' (lost '%c'...)\n",
- index, s, lost);
- }
- }
- return s;
-}
-EXPORT_SYMBOL(lustre_cfg_string);
-
-/********************** class fns **********************/
-
-/**
- * Create a new obd device and set the type, name and uuid. If successful,
- * the new device can be accessed by either name or uuid.
- */
-static int class_attach(struct lustre_cfg *lcfg)
-{
- struct obd_device *obd = NULL;
- char *typename, *name, *uuid;
- int rc, len;
-
- if (!LUSTRE_CFG_BUFLEN(lcfg, 1)) {
- CERROR("No type passed!\n");
- return -EINVAL;
- }
- typename = lustre_cfg_string(lcfg, 1);
-
- if (!LUSTRE_CFG_BUFLEN(lcfg, 0)) {
- CERROR("No name passed!\n");
- return -EINVAL;
- }
- name = lustre_cfg_string(lcfg, 0);
-
- if (!LUSTRE_CFG_BUFLEN(lcfg, 2)) {
- CERROR("No UUID passed!\n");
- return -EINVAL;
- }
- uuid = lustre_cfg_string(lcfg, 2);
-
- CDEBUG(D_IOCTL, "attach type %s name: %s uuid: %s\n",
- typename, name, uuid);
-
- obd = class_newdev(typename, name);
- if (IS_ERR(obd)) {
- /* Already exists or out of obds */
- rc = PTR_ERR(obd);
- obd = NULL;
- CERROR("Cannot create device %s of type %s : %d\n",
- name, typename, rc);
- goto out;
- }
- LASSERTF(obd, "Cannot get obd device %s of type %s\n",
- name, typename);
- LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
- "obd %p obd_magic %08X != %08X\n",
- obd, obd->obd_magic, OBD_DEVICE_MAGIC);
- LASSERTF(strncmp(obd->obd_name, name, strlen(name)) == 0,
- "%p obd_name %s != %s\n", obd, obd->obd_name, name);
-
- rwlock_init(&obd->obd_pool_lock);
- obd->obd_pool_limit = 0;
- obd->obd_pool_slv = 0;
-
- INIT_LIST_HEAD(&obd->obd_exports);
- INIT_LIST_HEAD(&obd->obd_unlinked_exports);
- INIT_LIST_HEAD(&obd->obd_delayed_exports);
- spin_lock_init(&obd->obd_nid_lock);
- spin_lock_init(&obd->obd_dev_lock);
- mutex_init(&obd->obd_dev_mutex);
- spin_lock_init(&obd->obd_osfs_lock);
- /* obd->obd_osfs_age must be set to a value in the distant
- * past to guarantee a fresh statfs is fetched on mount.
- */
- obd->obd_osfs_age = cfs_time_shift_64(-1000);
-
- /* XXX belongs in setup not attach */
- init_rwsem(&obd->obd_observer_link_sem);
- /* recovery data */
- init_waitqueue_head(&obd->obd_evict_inprogress_waitq);
-
- llog_group_init(&obd->obd_olg);
-
- obd->obd_conn_inprogress = 0;
-
- len = strlen(uuid);
- if (len >= sizeof(obd->obd_uuid)) {
- CERROR("uuid must be < %d bytes long\n",
- (int)sizeof(obd->obd_uuid));
- rc = -EINVAL;
- goto out;
- }
- memcpy(obd->obd_uuid.uuid, uuid, len);
-
- /* Detach drops this */
- spin_lock(&obd->obd_dev_lock);
- atomic_set(&obd->obd_refcount, 1);
- spin_unlock(&obd->obd_dev_lock);
- lu_ref_init(&obd->obd_reference);
- lu_ref_add(&obd->obd_reference, "attach", obd);
-
- obd->obd_attached = 1;
- CDEBUG(D_IOCTL, "OBD: dev %d attached type %s with refcount %d\n",
- obd->obd_minor, typename, atomic_read(&obd->obd_refcount));
- return 0;
- out:
- if (obd)
- class_release_dev(obd);
-
- return rc;
-}
-
-/** Create hashes, self-export, and call type-specific setup.
- * Setup is effectively the "start this obd" call.
- */
-static int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- int err = 0;
- struct obd_export *exp;
-
- LASSERT(obd);
- LASSERTF(obd == class_num2obd(obd->obd_minor),
- "obd %p != obd_devs[%d] %p\n",
- obd, obd->obd_minor, class_num2obd(obd->obd_minor));
- LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
- "obd %p obd_magic %08x != %08x\n",
- obd, obd->obd_magic, OBD_DEVICE_MAGIC);
-
- /* have we attached a type to this device? */
- if (!obd->obd_attached) {
- CERROR("Device %d not attached\n", obd->obd_minor);
- return -ENODEV;
- }
-
- if (obd->obd_set_up) {
- CERROR("Device %d already setup (type %s)\n",
- obd->obd_minor, obd->obd_type->typ_name);
- return -EEXIST;
- }
-
- /* is someone else setting us up right now? (attach inits spinlock) */
- spin_lock(&obd->obd_dev_lock);
- if (obd->obd_starting) {
- spin_unlock(&obd->obd_dev_lock);
- CERROR("Device %d setup in progress (type %s)\n",
- obd->obd_minor, obd->obd_type->typ_name);
- return -EEXIST;
- }
- /* just leave this on forever. I can't use obd_set_up here because
- * other fns check that status, and we're not actually set up yet.
- */
- obd->obd_starting = 1;
- obd->obd_uuid_hash = NULL;
- spin_unlock(&obd->obd_dev_lock);
-
- /* create an uuid-export lustre hash */
- obd->obd_uuid_hash = cfs_hash_create("UUID_HASH",
- HASH_UUID_CUR_BITS,
- HASH_UUID_MAX_BITS,
- HASH_UUID_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &uuid_hash_ops, CFS_HASH_DEFAULT);
- if (!obd->obd_uuid_hash) {
- err = -ENOMEM;
- goto err_hash;
- }
-
- exp = class_new_export(obd, &obd->obd_uuid);
- if (IS_ERR(exp)) {
- err = PTR_ERR(exp);
- goto err_hash;
- }
-
- obd->obd_self_export = exp;
- class_export_put(exp);
-
- err = obd_setup(obd, lcfg);
- if (err)
- goto err_exp;
-
- obd->obd_set_up = 1;
-
- spin_lock(&obd->obd_dev_lock);
- /* cleanup drops this */
- class_incref(obd, "setup", obd);
- spin_unlock(&obd->obd_dev_lock);
-
- CDEBUG(D_IOCTL, "finished setup of obd %s (uuid %s)\n",
- obd->obd_name, obd->obd_uuid.uuid);
-
- return 0;
-err_exp:
- if (obd->obd_self_export) {
- class_unlink_export(obd->obd_self_export);
- obd->obd_self_export = NULL;
- }
-err_hash:
- if (obd->obd_uuid_hash) {
- cfs_hash_putref(obd->obd_uuid_hash);
- obd->obd_uuid_hash = NULL;
- }
- obd->obd_starting = 0;
- CERROR("setup %s failed (%d)\n", obd->obd_name, err);
- return err;
-}
-
-/** We have finished using this obd and are ready to destroy it.
- * There can be no more references to this obd.
- */
-static int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- if (obd->obd_set_up) {
- CERROR("OBD device %d still set up\n", obd->obd_minor);
- return -EBUSY;
- }
-
- spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_attached) {
- spin_unlock(&obd->obd_dev_lock);
- CERROR("OBD device %d not attached\n", obd->obd_minor);
- return -ENODEV;
- }
- obd->obd_attached = 0;
- spin_unlock(&obd->obd_dev_lock);
-
- CDEBUG(D_IOCTL, "detach on obd %s (uuid %s)\n",
- obd->obd_name, obd->obd_uuid.uuid);
-
- class_decref(obd, "attach", obd);
- return 0;
-}
-
-/** Start shutting down the obd. There may be in-progress ops when
- * this is called. We tell them to start shutting down with a call
- * to class_disconnect_exports().
- */
-static int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- int err = 0;
- char *flag;
-
- OBD_RACE(OBD_FAIL_LDLM_RECOV_CLIENTS);
-
- if (!obd->obd_set_up) {
- CERROR("Device %d not setup\n", obd->obd_minor);
- return -ENODEV;
- }
-
- spin_lock(&obd->obd_dev_lock);
- if (obd->obd_stopping) {
- spin_unlock(&obd->obd_dev_lock);
- CERROR("OBD %d already stopping\n", obd->obd_minor);
- return -ENODEV;
- }
- /* Leave this on forever */
- obd->obd_stopping = 1;
- spin_unlock(&obd->obd_dev_lock);
-
- while (obd->obd_conn_inprogress > 0)
- cond_resched();
- smp_rmb();
-
- if (lcfg->lcfg_bufcount >= 2 && LUSTRE_CFG_BUFLEN(lcfg, 1) > 0) {
- for (flag = lustre_cfg_string(lcfg, 1); *flag != 0; flag++)
- switch (*flag) {
- case 'F':
- obd->obd_force = 1;
- break;
- case 'A':
- LCONSOLE_WARN("Failing over %s\n",
- obd->obd_name);
- obd->obd_fail = 1;
- obd->obd_no_transno = 1;
- obd->obd_no_recov = 1;
- if (OBP(obd, iocontrol)) {
- obd_iocontrol(OBD_IOC_SYNC,
- obd->obd_self_export,
- 0, NULL, NULL);
- }
- break;
- default:
- CERROR("Unrecognised flag '%c'\n", *flag);
- }
- }
-
- LASSERT(obd->obd_self_export);
-
- /* Precleanup, we must make sure all exports get destroyed. */
- err = obd_precleanup(obd);
- if (err)
- CERROR("Precleanup %s returned %d\n",
- obd->obd_name, err);
-
- /* destroy an uuid-export hash body */
- if (obd->obd_uuid_hash) {
- cfs_hash_putref(obd->obd_uuid_hash);
- obd->obd_uuid_hash = NULL;
- }
-
- class_decref(obd, "setup", obd);
- obd->obd_set_up = 0;
-
- return 0;
-}
-
-struct obd_device *class_incref(struct obd_device *obd,
- const char *scope, const void *source)
-{
- lu_ref_add_atomic(&obd->obd_reference, scope, source);
- atomic_inc(&obd->obd_refcount);
- CDEBUG(D_INFO, "incref %s (%p) now %d\n", obd->obd_name, obd,
- atomic_read(&obd->obd_refcount));
-
- return obd;
-}
-EXPORT_SYMBOL(class_incref);
-
-void class_decref(struct obd_device *obd, const char *scope, const void *source)
-{
- int err;
- int refs;
-
- spin_lock(&obd->obd_dev_lock);
- atomic_dec(&obd->obd_refcount);
- refs = atomic_read(&obd->obd_refcount);
- spin_unlock(&obd->obd_dev_lock);
- lu_ref_del(&obd->obd_reference, scope, source);
-
- CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs);
-
- if ((refs == 1) && obd->obd_stopping) {
- /* All exports have been destroyed; there should
- * be no more in-progress ops by this point.
- */
-
- spin_lock(&obd->obd_self_export->exp_lock);
- obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd);
- spin_unlock(&obd->obd_self_export->exp_lock);
-
- /* note that we'll recurse into class_decref again */
- class_unlink_export(obd->obd_self_export);
- return;
- }
-
- if (refs == 0) {
- CDEBUG(D_CONFIG, "finishing cleanup of obd %s (%s)\n",
- obd->obd_name, obd->obd_uuid.uuid);
- LASSERT(!obd->obd_attached);
- if (obd->obd_stopping) {
- /* If we're not stopping, we were never set up */
- err = obd_cleanup(obd);
- if (err)
- CERROR("Cleanup %s returned %d\n",
- obd->obd_name, err);
- }
- class_release_dev(obd);
- }
-}
-EXPORT_SYMBOL(class_decref);
-
-/** Add a failover nid location.
- * Client obd types contact server obd types using this nid list.
- */
-static int class_add_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct obd_import *imp;
- struct obd_uuid uuid;
- int rc;
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
- LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) {
- CERROR("invalid conn_uuid\n");
- return -EINVAL;
- }
- if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_OSP_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_LWP_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
- CERROR("can't add connection on non-client dev\n");
- return -EINVAL;
- }
-
- imp = obd->u.cli.cl_import;
- if (!imp) {
- CERROR("try to add conn on immature client dev\n");
- return -EINVAL;
- }
-
- obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1));
- rc = obd_add_conn(imp, &uuid, lcfg->lcfg_num);
-
- return rc;
-}
-
-/** Remove a failover nid location.
- */
-static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct obd_import *imp;
- struct obd_uuid uuid;
- int rc;
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
- LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) {
- CERROR("invalid conn_uuid\n");
- return -EINVAL;
- }
- if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME)) {
- CERROR("can't del connection on non-client dev\n");
- return -EINVAL;
- }
-
- imp = obd->u.cli.cl_import;
- if (!imp) {
- CERROR("try to del conn on immature client dev\n");
- return -EINVAL;
- }
-
- obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1));
- rc = obd_del_conn(imp, &uuid);
-
- return rc;
-}
-
-static LIST_HEAD(lustre_profile_list);
-static DEFINE_SPINLOCK(lustre_profile_list_lock);
-
-struct lustre_profile *class_get_profile(const char *prof)
-{
- struct lustre_profile *lprof;
-
- spin_lock(&lustre_profile_list_lock);
- list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
- if (!strcmp(lprof->lp_profile, prof)) {
- lprof->lp_refs++;
- spin_unlock(&lustre_profile_list_lock);
- return lprof;
- }
- }
- spin_unlock(&lustre_profile_list_lock);
- return NULL;
-}
-EXPORT_SYMBOL(class_get_profile);
-
-/** Create a named "profile".
- * This defines the mdc and osc names to use for a client.
- * This also is used to define the lov to be used by a mdt.
- */
-static int class_add_profile(int proflen, char *prof, int osclen, char *osc,
- int mdclen, char *mdc)
-{
- struct lustre_profile *lprof;
- int err = 0;
-
- CDEBUG(D_CONFIG, "Add profile %s\n", prof);
-
- lprof = kzalloc(sizeof(*lprof), GFP_NOFS);
- if (!lprof)
- return -ENOMEM;
- INIT_LIST_HEAD(&lprof->lp_list);
-
- LASSERT(proflen == (strlen(prof) + 1));
- lprof->lp_profile = kmemdup(prof, proflen, GFP_NOFS);
- if (!lprof->lp_profile) {
- err = -ENOMEM;
- goto free_lprof;
- }
-
- LASSERT(osclen == (strlen(osc) + 1));
- lprof->lp_dt = kmemdup(osc, osclen, GFP_NOFS);
- if (!lprof->lp_dt) {
- err = -ENOMEM;
- goto free_lp_profile;
- }
-
- if (mdclen > 0) {
- LASSERT(mdclen == (strlen(mdc) + 1));
- lprof->lp_md = kmemdup(mdc, mdclen, GFP_NOFS);
- if (!lprof->lp_md) {
- err = -ENOMEM;
- goto free_lp_dt;
- }
- }
-
- spin_lock(&lustre_profile_list_lock);
- lprof->lp_refs = 1;
- lprof->lp_list_deleted = false;
- list_add(&lprof->lp_list, &lustre_profile_list);
- spin_unlock(&lustre_profile_list_lock);
- return err;
-
-free_lp_dt:
- kfree(lprof->lp_dt);
-free_lp_profile:
- kfree(lprof->lp_profile);
-free_lprof:
- kfree(lprof);
- return err;
-}
-
-void class_del_profile(const char *prof)
-{
- struct lustre_profile *lprof;
-
- CDEBUG(D_CONFIG, "Del profile %s\n", prof);
-
- lprof = class_get_profile(prof);
- if (lprof) {
- spin_lock(&lustre_profile_list_lock);
- /* because get profile increments the ref counter */
- lprof->lp_refs--;
- list_del(&lprof->lp_list);
- lprof->lp_list_deleted = true;
- spin_unlock(&lustre_profile_list_lock);
-
- class_put_profile(lprof);
- }
-}
-EXPORT_SYMBOL(class_del_profile);
-
-void class_put_profile(struct lustre_profile *lprof)
-{
- spin_lock(&lustre_profile_list_lock);
- if (--lprof->lp_refs > 0) {
- LASSERT(lprof->lp_refs > 0);
- spin_unlock(&lustre_profile_list_lock);
- return;
- }
- spin_unlock(&lustre_profile_list_lock);
-
- /* confirm not a negative number */
- LASSERT(!lprof->lp_refs);
-
- /*
- * At least one class_del_profile/profiles must be called
- * on the target profile or lustre_profile_list will corrupt
- */
- LASSERT(lprof->lp_list_deleted);
- kfree(lprof->lp_profile);
- kfree(lprof->lp_dt);
- kfree(lprof->lp_md);
- kfree(lprof);
-}
-EXPORT_SYMBOL(class_put_profile);
-
-/* COMPAT_146 */
-void class_del_profiles(void)
-{
- struct lustre_profile *lprof, *n;
-
- spin_lock(&lustre_profile_list_lock);
- list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
- list_del(&lprof->lp_list);
- lprof->lp_list_deleted = true;
- spin_unlock(&lustre_profile_list_lock);
-
- class_put_profile(lprof);
-
- spin_lock(&lustre_profile_list_lock);
- }
- spin_unlock(&lustre_profile_list_lock);
-}
-EXPORT_SYMBOL(class_del_profiles);
-
-static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg)
-{
- if (class_match_param(ptr, PARAM_AT_MIN, NULL) == 0)
- at_min = val;
- else if (class_match_param(ptr, PARAM_AT_MAX, NULL) == 0)
- at_max = val;
- else if (class_match_param(ptr, PARAM_AT_EXTRA, NULL) == 0)
- at_extra = val;
- else if (class_match_param(ptr, PARAM_AT_EARLY_MARGIN, NULL) == 0)
- at_early_margin = val;
- else if (class_match_param(ptr, PARAM_AT_HISTORY, NULL) == 0)
- at_history = val;
- else if (class_match_param(ptr, PARAM_JOBID_VAR, NULL) == 0)
- strlcpy(obd_jobid_var, lustre_cfg_string(lcfg, 2),
- JOBSTATS_JOBID_VAR_MAX_LEN + 1);
- else
- return -EINVAL;
-
- CDEBUG(D_IOCTL, "global %s = %d\n", ptr, val);
- return 0;
-}
-
-/* We can't call ll_process_config or lquota_process_config directly because
- * it lives in a module that must be loaded after this one.
- */
-static int (*client_process_config)(struct lustre_cfg *lcfg);
-static int (*quota_process_config)(struct lustre_cfg *lcfg);
-
-void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg))
-{
- client_process_config = cpc;
-}
-EXPORT_SYMBOL(lustre_register_client_process_config);
-
-static int process_param2_config(struct lustre_cfg *lcfg)
-{
- char *param = lustre_cfg_string(lcfg, 1);
- char *upcall = lustre_cfg_string(lcfg, 2);
- char *argv[] = {
- [0] = "/usr/sbin/lctl",
- [1] = "set_param",
- [2] = param,
- [3] = NULL
- };
- ktime_t start;
- ktime_t end;
- int rc;
-
- /* Add upcall processing here. Now only lctl is supported */
- if (strcmp(upcall, LCTL_UPCALL) != 0) {
- CERROR("Unsupported upcall %s\n", upcall);
- return -EINVAL;
- }
-
- start = ktime_get();
- rc = call_usermodehelper(argv[0], argv, NULL, UMH_WAIT_PROC);
- end = ktime_get();
-
- if (rc < 0) {
- CERROR(
- "lctl: error invoking upcall %s %s %s: rc = %d; time %ldus\n",
- argv[0], argv[1], argv[2], rc,
- (long)ktime_us_delta(end, start));
- } else {
- CDEBUG(D_HA, "lctl: invoked upcall %s %s %s, time %ldus\n",
- argv[0], argv[1], argv[2],
- (long)ktime_us_delta(end, start));
- rc = 0;
- }
-
- return rc;
-}
-
-/** Process configuration commands given in lustre_cfg form.
- * These may come from direct calls (e.g. class_manual_cleanup)
- * or processing the config llog, or ioctl from lctl.
- */
-int class_process_config(struct lustre_cfg *lcfg)
-{
- struct obd_device *obd;
- int err;
-
- LASSERT(lcfg && !IS_ERR(lcfg));
- CDEBUG(D_IOCTL, "processing cmd: %x\n", lcfg->lcfg_command);
-
- /* Commands that don't need a device */
- switch (lcfg->lcfg_command) {
- case LCFG_ATTACH: {
- err = class_attach(lcfg);
- goto out;
- }
- case LCFG_ADD_UUID: {
- CDEBUG(D_IOCTL, "adding mapping from uuid %s to nid %#llx (%s)\n",
- lustre_cfg_string(lcfg, 1), lcfg->lcfg_nid,
- libcfs_nid2str(lcfg->lcfg_nid));
-
- err = class_add_uuid(lustre_cfg_string(lcfg, 1), lcfg->lcfg_nid);
- goto out;
- }
- case LCFG_DEL_UUID: {
- CDEBUG(D_IOCTL, "removing mappings for uuid %s\n",
- (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) == 0)
- ? "<all uuids>" : lustre_cfg_string(lcfg, 1));
-
- err = class_del_uuid(lustre_cfg_string(lcfg, 1));
- goto out;
- }
- case LCFG_MOUNTOPT: {
- CDEBUG(D_IOCTL, "mountopt: profile %s osc %s mdc %s\n",
- lustre_cfg_string(lcfg, 1),
- lustre_cfg_string(lcfg, 2),
- lustre_cfg_string(lcfg, 3));
- /* set these mount options somewhere, so ll_fill_super
- * can find them.
- */
- err = class_add_profile(LUSTRE_CFG_BUFLEN(lcfg, 1),
- lustre_cfg_string(lcfg, 1),
- LUSTRE_CFG_BUFLEN(lcfg, 2),
- lustre_cfg_string(lcfg, 2),
- LUSTRE_CFG_BUFLEN(lcfg, 3),
- lustre_cfg_string(lcfg, 3));
- goto out;
- }
- case LCFG_DEL_MOUNTOPT: {
- CDEBUG(D_IOCTL, "mountopt: profile %s\n",
- lustre_cfg_string(lcfg, 1));
- class_del_profile(lustre_cfg_string(lcfg, 1));
- err = 0;
- goto out;
- }
- case LCFG_SET_TIMEOUT: {
- CDEBUG(D_IOCTL, "changing lustre timeout from %d to %d\n",
- obd_timeout, lcfg->lcfg_num);
- obd_timeout = max(lcfg->lcfg_num, 1U);
- obd_timeout_set = 1;
- err = 0;
- goto out;
- }
- case LCFG_SET_LDLM_TIMEOUT: {
- /* ldlm_timeout is not used on the client */
- err = 0;
- goto out;
- }
- case LCFG_SET_UPCALL: {
- LCONSOLE_ERROR_MSG(0x15a, "recovery upcall is deprecated\n");
- /* COMPAT_146 Don't fail on old configs */
- err = 0;
- goto out;
- }
- case LCFG_MARKER: {
- struct cfg_marker *marker;
-
- marker = lustre_cfg_buf(lcfg, 1);
- CDEBUG(D_IOCTL, "marker %d (%#x) %.16s %s\n", marker->cm_step,
- marker->cm_flags, marker->cm_tgtname, marker->cm_comment);
- err = 0;
- goto out;
- }
- case LCFG_PARAM: {
- char *tmp;
- /* llite has no obd */
- if ((class_match_param(lustre_cfg_string(lcfg, 1),
- PARAM_LLITE, NULL) == 0) &&
- client_process_config) {
- err = (*client_process_config)(lcfg);
- goto out;
- } else if ((class_match_param(lustre_cfg_string(lcfg, 1),
- PARAM_SYS, &tmp) == 0)) {
- /* Global param settings */
- err = class_set_global(tmp, lcfg->lcfg_num, lcfg);
- /*
- * Client or server should not fail to mount if
- * it hits an unknown configuration parameter.
- */
- if (err != 0)
- CWARN("Ignoring unknown param %s\n", tmp);
-
- err = 0;
- goto out;
- } else if ((class_match_param(lustre_cfg_string(lcfg, 1),
- PARAM_QUOTA, &tmp) == 0) &&
- quota_process_config) {
- err = (*quota_process_config)(lcfg);
- goto out;
- }
-
- break;
- }
- case LCFG_SET_PARAM: {
- err = process_param2_config(lcfg);
- goto out;
- }
- }
- /* Commands that require a device */
- obd = class_name2obd(lustre_cfg_string(lcfg, 0));
- if (!obd) {
- if (!LUSTRE_CFG_BUFLEN(lcfg, 0))
- CERROR("this lcfg command requires a device name\n");
- else
- CERROR("no device for: %s\n",
- lustre_cfg_string(lcfg, 0));
-
- err = -EINVAL;
- goto out;
- }
-
- switch (lcfg->lcfg_command) {
- case LCFG_SETUP: {
- err = class_setup(obd, lcfg);
- goto out;
- }
- case LCFG_DETACH: {
- err = class_detach(obd, lcfg);
- err = 0;
- goto out;
- }
- case LCFG_CLEANUP: {
- err = class_cleanup(obd, lcfg);
- err = 0;
- goto out;
- }
- case LCFG_ADD_CONN: {
- err = class_add_conn(obd, lcfg);
- err = 0;
- goto out;
- }
- case LCFG_DEL_CONN: {
- err = class_del_conn(obd, lcfg);
- err = 0;
- goto out;
- }
- case LCFG_POOL_NEW: {
- err = obd_pool_new(obd, lustre_cfg_string(lcfg, 2));
- err = 0;
- goto out;
- }
- case LCFG_POOL_ADD: {
- err = obd_pool_add(obd, lustre_cfg_string(lcfg, 2),
- lustre_cfg_string(lcfg, 3));
- err = 0;
- goto out;
- }
- case LCFG_POOL_REM: {
- err = obd_pool_rem(obd, lustre_cfg_string(lcfg, 2),
- lustre_cfg_string(lcfg, 3));
- err = 0;
- goto out;
- }
- case LCFG_POOL_DEL: {
- err = obd_pool_del(obd, lustre_cfg_string(lcfg, 2));
- err = 0;
- goto out;
- }
- default: {
- err = obd_process_config(obd, sizeof(*lcfg), lcfg);
- goto out;
- }
- }
-out:
- if ((err < 0) && !(lcfg->lcfg_command & LCFG_REQUIRED)) {
- CWARN("Ignoring error %d on optional command %#x\n", err,
- lcfg->lcfg_command);
- err = 0;
- }
- return err;
-}
-EXPORT_SYMBOL(class_process_config);
-
-int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
- struct lustre_cfg *lcfg, void *data)
-{
- struct lprocfs_vars *var;
- struct file fakefile;
- struct seq_file fake_seqfile;
- char *key, *sval;
- int i, keylen, vallen;
- int matched = 0, j = 0;
- int rc = 0;
- int skip = 0;
-
- if (lcfg->lcfg_command != LCFG_PARAM) {
- CERROR("Unknown command: %d\n", lcfg->lcfg_command);
- return -EINVAL;
- }
-
- /* fake a seq file so that var->fops->write can work... */
- fakefile.private_data = &fake_seqfile;
- fake_seqfile.private = data;
- /* e.g. tunefs.lustre --param mdt.group_upcall=foo /r/tmp/lustre-mdt
- * or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar
- * or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36
- */
- for (i = 1; i < lcfg->lcfg_bufcount; i++) {
- key = lustre_cfg_buf(lcfg, i);
- /* Strip off prefix */
- if (class_match_param(key, prefix, &key)) {
- /*
- * If the prefix doesn't match, return error so we
- * can pass it down the stack
- */
- return -ENOSYS;
- }
- sval = strchr(key, '=');
- if (!sval || (*(sval + 1) == 0)) {
- CERROR("Can't parse param %s (missing '=')\n", key);
- /* rc = -EINVAL; continue parsing other params */
- continue;
- }
- keylen = sval - key;
- sval++;
- vallen = strlen(sval);
- matched = 0;
- j = 0;
- /* Search proc entries */
- while (lvars[j].name) {
- var = &lvars[j];
- if (!class_match_param(key, var->name, NULL) &&
- keylen == strlen(var->name)) {
- matched++;
- rc = -EROFS;
- if (var->fops && var->fops->write) {
- mm_segment_t oldfs;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- rc = var->fops->write(&fakefile,
- (const char __user *)sval,
- vallen, NULL);
- set_fs(oldfs);
- }
- break;
- }
- j++;
- }
- if (!matched) {
- CERROR("%.*s: %s unknown param %s\n",
- (int)strlen(prefix) - 1, prefix,
- (char *)lustre_cfg_string(lcfg, 0), key);
- /* rc = -EINVAL; continue parsing other params */
- skip++;
- } else if (rc < 0) {
- CERROR("%s: error writing proc entry '%s': rc = %d\n",
- prefix, var->name, rc);
- rc = 0;
- } else {
- CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n",
- lustre_cfg_string(lcfg, 0),
- (int)strlen(prefix) - 1, prefix,
- (int)(sval - key - 1), key, sval);
- }
- }
-
- if (rc > 0)
- rc = 0;
- if (!rc && skip)
- rc = skip;
- return rc;
-}
-EXPORT_SYMBOL(class_process_proc_param);
-
-/** Parse a configuration llog, doing various manipulations on them
- * for various reasons, (modifications for compatibility, skip obsolete
- * records, change uuids, etc), then class_process_config() resulting
- * net records.
- */
-int class_config_llog_handler(const struct lu_env *env,
- struct llog_handle *handle,
- struct llog_rec_hdr *rec, void *data)
-{
- struct config_llog_instance *clli = data;
- int cfg_len = rec->lrh_len;
- char *cfg_buf = (char *)(rec + 1);
- int rc = 0;
-
- switch (rec->lrh_type) {
- case OBD_CFG_REC: {
- struct lustre_cfg *lcfg, *lcfg_new;
- struct lustre_cfg_bufs bufs;
- char *inst_name = NULL;
- int inst_len = 0;
- size_t lcfg_len;
- int swab = 0;
-
- lcfg = (struct lustre_cfg *)cfg_buf;
- if (lcfg->lcfg_version == __swab32(LUSTRE_CFG_VERSION)) {
- lustre_swab_lustre_cfg(lcfg);
- swab = 1;
- }
-
- rc = lustre_cfg_sanity_check(cfg_buf, cfg_len);
- if (rc)
- goto out;
-
- /* Figure out config state info */
- if (lcfg->lcfg_command == LCFG_MARKER) {
- struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1);
-
- lustre_swab_cfg_marker(marker, swab,
- LUSTRE_CFG_BUFLEN(lcfg, 1));
- CDEBUG(D_CONFIG, "Marker, inst_flg=%#x mark_flg=%#x\n",
- clli->cfg_flags, marker->cm_flags);
- if (marker->cm_flags & CM_START) {
- /* all previous flags off */
- clli->cfg_flags = CFG_F_MARKER;
- if (marker->cm_flags & CM_SKIP) {
- clli->cfg_flags |= CFG_F_SKIP;
- CDEBUG(D_CONFIG, "SKIP #%d\n",
- marker->cm_step);
- } else if ((marker->cm_flags & CM_EXCLUDE) ||
- (clli->cfg_sb &&
- lustre_check_exclusion(clli->cfg_sb,
- marker->cm_tgtname))) {
- clli->cfg_flags |= CFG_F_EXCLUDE;
- CDEBUG(D_CONFIG, "EXCLUDE %d\n",
- marker->cm_step);
- }
- } else if (marker->cm_flags & CM_END) {
- clli->cfg_flags = 0;
- }
- }
- /* A config command without a start marker before it is
- * illegal (post 146)
- */
- if (!(clli->cfg_flags & CFG_F_COMPAT146) &&
- !(clli->cfg_flags & CFG_F_MARKER) &&
- (lcfg->lcfg_command != LCFG_MARKER)) {
- CWARN("Config not inside markers, ignoring! (inst: %p, uuid: %s, flags: %#x)\n",
- clli->cfg_instance,
- clli->cfg_uuid.uuid, clli->cfg_flags);
- clli->cfg_flags |= CFG_F_SKIP;
- }
- if (clli->cfg_flags & CFG_F_SKIP) {
- CDEBUG(D_CONFIG, "skipping %#x\n",
- clli->cfg_flags);
- rc = 0;
- /* No processing! */
- break;
- }
-
- /*
- * For interoperability between 1.8 and 2.0,
- * rename "mds" obd device type to "mdt".
- */
- {
- char *typename = lustre_cfg_string(lcfg, 1);
- char *index = lustre_cfg_string(lcfg, 2);
-
- if ((lcfg->lcfg_command == LCFG_ATTACH && typename &&
- strcmp(typename, "mds") == 0)) {
- CWARN("For 1.8 interoperability, rename obd type from mds to mdt\n");
- typename[2] = 't';
- }
- if ((lcfg->lcfg_command == LCFG_SETUP && index &&
- strcmp(index, "type") == 0)) {
- CDEBUG(D_INFO, "For 1.8 interoperability, set this index to '0'\n");
- index[0] = '0';
- index[1] = 0;
- }
- }
-
- if (clli->cfg_flags & CFG_F_EXCLUDE) {
- CDEBUG(D_CONFIG, "cmd: %x marked EXCLUDED\n",
- lcfg->lcfg_command);
- if (lcfg->lcfg_command == LCFG_LOV_ADD_OBD)
- /* Add inactive instead */
- lcfg->lcfg_command = LCFG_LOV_ADD_INA;
- }
-
- lustre_cfg_bufs_init(&bufs, lcfg);
-
- if (clli && clli->cfg_instance &&
- LUSTRE_CFG_BUFLEN(lcfg, 0) > 0) {
- inst_len = LUSTRE_CFG_BUFLEN(lcfg, 0) +
- sizeof(clli->cfg_instance) * 2 + 4;
- inst_name = kasprintf(GFP_NOFS, "%s-%p",
- lustre_cfg_string(lcfg, 0),
- clli->cfg_instance);
- if (!inst_name) {
- rc = -ENOMEM;
- goto out;
- }
- lustre_cfg_bufs_set_string(&bufs, 0, inst_name);
- CDEBUG(D_CONFIG, "cmd %x, instance name: %s\n",
- lcfg->lcfg_command, inst_name);
- }
-
- /* we override the llog's uuid for clients, to insure they
- * are unique
- */
- if (clli && clli->cfg_instance &&
- lcfg->lcfg_command == LCFG_ATTACH) {
- lustre_cfg_bufs_set_string(&bufs, 2,
- clli->cfg_uuid.uuid);
- }
- /*
- * sptlrpc config record, we expect 2 data segments:
- * [0]: fs_name/target_name,
- * [1]: rule string
- * moving them to index [1] and [2], and insert MGC's
- * obdname at index [0].
- */
- if (clli && !clli->cfg_instance &&
- lcfg->lcfg_command == LCFG_SPTLRPC_CONF) {
- lustre_cfg_bufs_set(&bufs, 2, bufs.lcfg_buf[1],
- bufs.lcfg_buflen[1]);
- lustre_cfg_bufs_set(&bufs, 1, bufs.lcfg_buf[0],
- bufs.lcfg_buflen[0]);
- lustre_cfg_bufs_set_string(&bufs, 0,
- clli->cfg_obdname);
- }
-
- lcfg_len = lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen);
- lcfg_new = kzalloc(lcfg_len, GFP_NOFS);
- if (!lcfg_new) {
- rc = -ENOMEM;
- goto out;
- }
-
- lustre_cfg_init(lcfg_new, lcfg->lcfg_command, &bufs);
- lcfg_new->lcfg_num = lcfg->lcfg_num;
- lcfg_new->lcfg_flags = lcfg->lcfg_flags;
-
- /* XXX Hack to try to remain binary compatible with
- * pre-newconfig logs
- */
- if (lcfg->lcfg_nal != 0 && /* pre-newconfig log? */
- (lcfg->lcfg_nid >> 32) == 0) {
- __u32 addr = (__u32)(lcfg->lcfg_nid & 0xffffffff);
-
- lcfg_new->lcfg_nid =
- LNET_MKNID(LNET_MKNET(lcfg->lcfg_nal, 0), addr);
- CWARN("Converted pre-newconfig NAL %d NID %x to %s\n",
- lcfg->lcfg_nal, addr,
- libcfs_nid2str(lcfg_new->lcfg_nid));
- } else {
- lcfg_new->lcfg_nid = lcfg->lcfg_nid;
- }
-
- lcfg_new->lcfg_nal = 0; /* illegal value for obsolete field */
-
- rc = class_process_config(lcfg_new);
- kfree(lcfg_new);
- kfree(inst_name);
- break;
- }
- default:
- CERROR("Unknown llog record type %#x encountered\n",
- rec->lrh_type);
- break;
- }
-out:
- if (rc) {
- CERROR("%s: cfg command failed: rc = %d\n",
- handle->lgh_ctxt->loc_obd->obd_name, rc);
- class_config_dump_handler(NULL, handle, rec, data);
- }
- return rc;
-}
-EXPORT_SYMBOL(class_config_llog_handler);
-
-int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
- char *name, struct config_llog_instance *cfg)
-{
- struct llog_process_cat_data cd = {0, 0};
- struct llog_handle *llh;
- llog_cb_t callback;
- int rc;
-
- CDEBUG(D_INFO, "looking up llog %s\n", name);
- rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
- if (rc)
- return rc;
-
- rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL);
- if (rc)
- goto parse_out;
-
- /* continue processing from where we last stopped to end-of-log */
- if (cfg) {
- cd.lpcd_first_idx = cfg->cfg_last_idx;
- callback = cfg->cfg_callback;
- LASSERT(callback);
- } else {
- callback = class_config_llog_handler;
- }
-
- cd.lpcd_last_idx = 0;
-
- rc = llog_process(env, llh, callback, cfg, &cd);
-
- CDEBUG(D_CONFIG, "Processed log %s gen %d-%d (rc=%d)\n", name,
- cd.lpcd_first_idx + 1, cd.lpcd_last_idx, rc);
- if (cfg)
- cfg->cfg_last_idx = cd.lpcd_last_idx;
-
-parse_out:
- llog_close(env, llh);
- return rc;
-}
-EXPORT_SYMBOL(class_config_parse_llog);
-
-/**
- * parse config record and output dump in supplied buffer.
- * This is separated from class_config_dump_handler() to use
- * for ioctl needs as well
- */
-static int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf,
- int size)
-{
- struct lustre_cfg *lcfg = (struct lustre_cfg *)(rec + 1);
- char *ptr = buf;
- char *end = buf + size;
- int rc = 0;
-
- LASSERT(rec->lrh_type == OBD_CFG_REC);
- rc = lustre_cfg_sanity_check(lcfg, rec->lrh_len);
- if (rc < 0)
- return rc;
-
- ptr += snprintf(ptr, end - ptr, "cmd=%05x ", lcfg->lcfg_command);
- if (lcfg->lcfg_flags)
- ptr += snprintf(ptr, end - ptr, "flags=%#08x ",
- lcfg->lcfg_flags);
-
- if (lcfg->lcfg_num)
- ptr += snprintf(ptr, end - ptr, "num=%#08x ", lcfg->lcfg_num);
-
- if (lcfg->lcfg_nid) {
- char nidstr[LNET_NIDSTR_SIZE];
-
- libcfs_nid2str_r(lcfg->lcfg_nid, nidstr, sizeof(nidstr));
- ptr += snprintf(ptr, end - ptr, "nid=%s(%#llx)\n ",
- nidstr, lcfg->lcfg_nid);
- }
-
- if (lcfg->lcfg_command == LCFG_MARKER) {
- struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1);
-
- ptr += snprintf(ptr, end - ptr, "marker=%d(%#x)%s '%s'",
- marker->cm_step, marker->cm_flags,
- marker->cm_tgtname, marker->cm_comment);
- } else {
- int i;
-
- for (i = 0; i < lcfg->lcfg_bufcount; i++) {
- ptr += snprintf(ptr, end - ptr, "%d:%s ", i,
- lustre_cfg_string(lcfg, i));
- }
- }
- ptr += snprintf(ptr, end - ptr, "\n");
- /* return consumed bytes */
- rc = ptr - buf;
- return rc;
-}
-
-int class_config_dump_handler(const struct lu_env *env,
- struct llog_handle *handle,
- struct llog_rec_hdr *rec, void *data)
-{
- char *outstr;
- int rc = 0;
-
- outstr = kzalloc(256, GFP_NOFS);
- if (!outstr)
- return -ENOMEM;
-
- if (rec->lrh_type == OBD_CFG_REC) {
- class_config_parse_rec(rec, outstr, 256);
- LCONSOLE(D_WARNING, " %s", outstr);
- } else {
- LCONSOLE(D_WARNING, "unhandled lrh_type: %#x\n", rec->lrh_type);
- rc = -EINVAL;
- }
-
- kfree(outstr);
- return rc;
-}
-
-/** Call class_cleanup and class_detach.
- * "Manual" only in the sense that we're faking lcfg commands.
- */
-int class_manual_cleanup(struct obd_device *obd)
-{
- char flags[3] = "";
- struct lustre_cfg *lcfg;
- struct lustre_cfg_bufs bufs;
- int rc;
-
- if (!obd) {
- CERROR("empty cleanup\n");
- return -EALREADY;
- }
-
- if (obd->obd_force)
- strcat(flags, "F");
- if (obd->obd_fail)
- strcat(flags, "A");
-
- CDEBUG(D_CONFIG, "Manual cleanup of %s (flags='%s')\n",
- obd->obd_name, flags);
-
- lustre_cfg_bufs_reset(&bufs, obd->obd_name);
- lustre_cfg_bufs_set_string(&bufs, 1, flags);
- lcfg = kzalloc(lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen),
- GFP_NOFS);
- if (!lcfg)
- return -ENOMEM;
- lustre_cfg_init(lcfg, LCFG_CLEANUP, &bufs);
-
- rc = class_process_config(lcfg);
- if (rc) {
- CERROR("cleanup failed %d: %s\n", rc, obd->obd_name);
- goto out;
- }
-
- /* the lcfg is almost the same for both ops */
- lcfg->lcfg_command = LCFG_DETACH;
- rc = class_process_config(lcfg);
- if (rc)
- CERROR("detach failed %d: %s\n", rc, obd->obd_name);
-out:
- kfree(lcfg);
- return rc;
-}
-EXPORT_SYMBOL(class_manual_cleanup);
-
-/*
- * uuid<->export lustre hash operations
- */
-
-static unsigned int
-uuid_hash(struct cfs_hash *hs, const void *key, unsigned int mask)
-{
- return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid,
- sizeof(((struct obd_uuid *)key)->uuid), mask);
-}
-
-static void *
-uuid_key(struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
-
- return &exp->exp_client_uuid;
-}
-
-/*
- * NOTE: It is impossible to find an export that is in failed
- * state with this function
- */
-static int
-uuid_keycmp(const void *key, struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- LASSERT(key);
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
-
- return obd_uuid_equals(key, &exp->exp_client_uuid) &&
- !exp->exp_failed;
-}
-
-static void *
-uuid_export_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct obd_export, exp_uuid_hash);
-}
-
-static void
-uuid_export_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
- class_export_get(exp);
-}
-
-static void
-uuid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
- class_export_put(exp);
-}
-
-static struct cfs_hash_ops uuid_hash_ops = {
- .hs_hash = uuid_hash,
- .hs_key = uuid_key,
- .hs_keycmp = uuid_keycmp,
- .hs_object = uuid_export_object,
- .hs_get = uuid_export_get,
- .hs_put_locked = uuid_export_put_locked,
-};
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
deleted file mode 100644
index f5e8214ac37b..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ /dev/null
@@ -1,1244 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/obd_mount.c
- *
- * Client mount routines
- *
- * Author: Nathan Rutman <nathan@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-#define D_MOUNT (D_SUPER | D_CONFIG/*|D_WARNING */)
-#define PRINT_CMD CDEBUG
-
-#include <obd.h>
-#include <lustre_compat.h>
-#include <obd_class.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <lustre_log.h>
-#include <lustre_disk.h>
-#include <uapi/linux/lustre/lustre_param.h>
-
-static DEFINE_SPINLOCK(client_lock);
-static struct module *client_mod;
-static int (*client_fill_super)(struct super_block *sb);
-static void (*kill_super_cb)(struct super_block *sb);
-
-/**************** config llog ********************/
-
-/** Get a config log from the MGS and process it.
- * This func is called for both clients and servers.
- * Continue to process new statements appended to the logs
- * (whenever the config lock is revoked) until lustre_end_log
- * is called.
- * @param sb The superblock is used by the MGC to write to the local copy of
- * the config log
- * @param logname The name of the llog to replicate from the MGS
- * @param cfg Since the same mgc may be used to follow multiple config logs
- * (e.g. ost1, ost2, client), the config_llog_instance keeps the state for
- * this log, and is added to the mgc's list of logs to follow.
- */
-int lustre_process_log(struct super_block *sb, char *logname,
- struct config_llog_instance *cfg)
-{
- struct lustre_cfg *lcfg;
- struct lustre_cfg_bufs *bufs;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct obd_device *mgc = lsi->lsi_mgc;
- int rc;
-
- LASSERT(mgc);
- LASSERT(cfg);
-
- bufs = kzalloc(sizeof(*bufs), GFP_NOFS);
- if (!bufs)
- return -ENOMEM;
-
- /* mgc_process_config */
- lustre_cfg_bufs_reset(bufs, mgc->obd_name);
- lustre_cfg_bufs_set_string(bufs, 1, logname);
- lustre_cfg_bufs_set(bufs, 2, cfg, sizeof(*cfg));
- lustre_cfg_bufs_set(bufs, 3, &sb, sizeof(sb));
- lcfg = kzalloc(lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen),
- GFP_NOFS);
- if (!lcfg) {
- rc = -ENOMEM;
- goto out;
- }
- lustre_cfg_init(lcfg, LCFG_LOG_START, bufs);
-
- rc = obd_process_config(mgc, sizeof(*lcfg), lcfg);
- kfree(lcfg);
-out:
- kfree(bufs);
-
- if (rc == -EINVAL)
- LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s' failed from the MGS (%d). Make sure this client and the MGS are running compatible versions of Lustre.\n",
- mgc->obd_name, logname, rc);
-
- else if (rc)
- LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' failed (%d). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.\n",
- mgc->obd_name, logname,
- rc);
-
- /* class_obd_list(); */
- return rc;
-}
-EXPORT_SYMBOL(lustre_process_log);
-
-/* Stop watching this config log for updates */
-int lustre_end_log(struct super_block *sb, char *logname,
- struct config_llog_instance *cfg)
-{
- struct lustre_cfg *lcfg;
- struct lustre_cfg_bufs bufs;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct obd_device *mgc = lsi->lsi_mgc;
- int rc;
-
- if (!mgc)
- return -ENOENT;
-
- /* mgc_process_config */
- lustre_cfg_bufs_reset(&bufs, mgc->obd_name);
- lustre_cfg_bufs_set_string(&bufs, 1, logname);
- if (cfg)
- lustre_cfg_bufs_set(&bufs, 2, cfg, sizeof(*cfg));
- lcfg = kzalloc(lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen),
- GFP_NOFS);
- if (!lcfg)
- return -ENOMEM;
- lustre_cfg_init(lcfg, LCFG_LOG_END, &bufs);
-
- rc = obd_process_config(mgc, sizeof(*lcfg), lcfg);
- kfree(lcfg);
- return rc;
-}
-EXPORT_SYMBOL(lustre_end_log);
-
-/**************** obd start *******************/
-
-/** lustre_cfg_bufs are a holdover from 1.4; we can still set these up from
- * lctl (and do for echo cli/srv.
- */
-static int do_lcfg(char *cfgname, lnet_nid_t nid, int cmd,
- char *s1, char *s2, char *s3, char *s4)
-{
- struct lustre_cfg_bufs bufs;
- struct lustre_cfg *lcfg = NULL;
- int rc;
-
- CDEBUG(D_TRACE, "lcfg %s %#x %s %s %s %s\n", cfgname,
- cmd, s1, s2, s3, s4);
-
- lustre_cfg_bufs_reset(&bufs, cfgname);
- if (s1)
- lustre_cfg_bufs_set_string(&bufs, 1, s1);
- if (s2)
- lustre_cfg_bufs_set_string(&bufs, 2, s2);
- if (s3)
- lustre_cfg_bufs_set_string(&bufs, 3, s3);
- if (s4)
- lustre_cfg_bufs_set_string(&bufs, 4, s4);
-
- lcfg = kzalloc(lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen),
- GFP_NOFS);
- if (!lcfg)
- return -ENOMEM;
- lustre_cfg_init(lcfg, cmd, &bufs);
- lcfg->lcfg_nid = nid;
- rc = class_process_config(lcfg);
- kfree(lcfg);
- return rc;
-}
-
-/** Call class_attach and class_setup. These methods in turn call
- * obd type-specific methods.
- */
-static int lustre_start_simple(char *obdname, char *type, char *uuid,
- char *s1, char *s2, char *s3, char *s4)
-{
- int rc;
-
- CDEBUG(D_MOUNT, "Starting obd %s (typ=%s)\n", obdname, type);
-
- rc = do_lcfg(obdname, 0, LCFG_ATTACH, type, uuid, NULL, NULL);
- if (rc) {
- CERROR("%s attach error %d\n", obdname, rc);
- return rc;
- }
- rc = do_lcfg(obdname, 0, LCFG_SETUP, s1, s2, s3, s4);
- if (rc) {
- CERROR("%s setup error %d\n", obdname, rc);
- do_lcfg(obdname, 0, LCFG_DETACH, NULL, NULL, NULL, NULL);
- }
- return rc;
-}
-
-static DEFINE_MUTEX(mgc_start_lock);
-
-/** Set up a mgc obd to process startup logs
- *
- * \param sb [in] super block of the mgc obd
- *
- * \retval 0 success, otherwise error code
- */
-int lustre_start_mgc(struct super_block *sb)
-{
- struct obd_connect_data *data = NULL;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct obd_device *obd;
- struct obd_export *exp;
- struct obd_uuid *uuid;
- class_uuid_t uuidc;
- lnet_nid_t nid;
- char nidstr[LNET_NIDSTR_SIZE];
- char *mgcname = NULL, *niduuid = NULL, *mgssec = NULL;
- char *ptr;
- int rc = 0, i = 0, j;
-
- LASSERT(lsi->lsi_lmd);
-
- /* Use nids from mount line: uml1,1@elan:uml2,2@elan:/lustre */
- ptr = lsi->lsi_lmd->lmd_dev;
- if (class_parse_nid(ptr, &nid, &ptr) == 0)
- i++;
- if (i == 0) {
- CERROR("No valid MGS nids found.\n");
- return -EINVAL;
- }
-
- mutex_lock(&mgc_start_lock);
-
- libcfs_nid2str_r(nid, nidstr, sizeof(nidstr));
- mgcname = kasprintf(GFP_NOFS,
- "%s%s", LUSTRE_MGC_OBDNAME, nidstr);
- niduuid = kasprintf(GFP_NOFS, "%s_%x", mgcname, 0);
- if (!mgcname || !niduuid) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- mgssec = lsi->lsi_lmd->lmd_mgssec ? lsi->lsi_lmd->lmd_mgssec : "";
-
- data = kzalloc(sizeof(*data), GFP_NOFS);
- if (!data) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- obd = class_name2obd(mgcname);
- if (obd && !obd->obd_stopping) {
- int recov_bk;
-
- rc = obd_set_info_async(NULL, obd->obd_self_export,
- strlen(KEY_MGSSEC), KEY_MGSSEC,
- strlen(mgssec), mgssec, NULL);
- if (rc)
- goto out_free;
-
- /* Re-using an existing MGC */
- atomic_inc(&obd->u.cli.cl_mgc_refcount);
-
- /* IR compatibility check, only for clients */
- if (lmd_is_client(lsi->lsi_lmd)) {
- int has_ir;
- int vallen = sizeof(*data);
- __u32 *flags = &lsi->lsi_lmd->lmd_flags;
-
- rc = obd_get_info(NULL, obd->obd_self_export,
- strlen(KEY_CONN_DATA), KEY_CONN_DATA,
- &vallen, data);
- LASSERT(rc == 0);
- has_ir = OCD_HAS_FLAG(data, IMP_RECOV);
- if (has_ir ^ !(*flags & LMD_FLG_NOIR)) {
- /* LMD_FLG_NOIR is for test purpose only */
- LCONSOLE_WARN(
- "Trying to mount a client with IR setting not compatible with current mgc. Force to use current mgc setting that is IR %s.\n",
- has_ir ? "enabled" : "disabled");
- if (has_ir)
- *flags &= ~LMD_FLG_NOIR;
- else
- *flags |= LMD_FLG_NOIR;
- }
- }
-
- recov_bk = 0;
-
- /* Try all connections, but only once (again).
- * We don't want to block another target from starting
- * (using its local copy of the log), but we do want to connect
- * if at all possible.
- */
- recov_bk++;
- CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname,
- recov_bk);
- rc = obd_set_info_async(NULL, obd->obd_self_export,
- sizeof(KEY_INIT_RECOV_BACKUP),
- KEY_INIT_RECOV_BACKUP,
- sizeof(recov_bk), &recov_bk, NULL);
- rc = 0;
- goto out;
- }
-
- CDEBUG(D_MOUNT, "Start MGC '%s'\n", mgcname);
-
- /* Add the primary nids for the MGS */
- i = 0;
- /* Use nids from mount line: uml1,1@elan:uml2,2@elan:/lustre */
- ptr = lsi->lsi_lmd->lmd_dev;
- while (class_parse_nid(ptr, &nid, &ptr) == 0) {
- rc = do_lcfg(mgcname, nid,
- LCFG_ADD_UUID, niduuid, NULL, NULL, NULL);
- if (!rc)
- i++;
- /* Stop at the first failover nid */
- if (*ptr == ':')
- break;
- }
- if (i == 0) {
- CERROR("No valid MGS nids found.\n");
- rc = -EINVAL;
- goto out_free;
- }
- lsi->lsi_lmd->lmd_mgs_failnodes = 1;
-
- /* Random uuid for MGC allows easier reconnects */
- uuid = kzalloc(sizeof(*uuid), GFP_NOFS);
- if (!uuid) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- ll_generate_random_uuid(uuidc);
- class_uuid_unparse(uuidc, uuid);
-
- /* Start the MGC */
- rc = lustre_start_simple(mgcname, LUSTRE_MGC_NAME,
- (char *)uuid->uuid, LUSTRE_MGS_OBDNAME,
- niduuid, NULL, NULL);
- kfree(uuid);
- if (rc)
- goto out_free;
-
- /* Add any failover MGS nids */
- i = 1;
- while (ptr && ((*ptr == ':' ||
- class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) {
- /* New failover node */
- sprintf(niduuid, "%s_%x", mgcname, i);
- j = 0;
- while (class_parse_nid_quiet(ptr, &nid, &ptr) == 0) {
- rc = do_lcfg(mgcname, nid, LCFG_ADD_UUID, niduuid,
- NULL, NULL, NULL);
- if (!rc)
- ++j;
- if (*ptr == ':')
- break;
- }
- if (j > 0) {
- rc = do_lcfg(mgcname, 0, LCFG_ADD_CONN,
- niduuid, NULL, NULL, NULL);
- if (!rc)
- i++;
- } else {
- /* at ":/fsname" */
- break;
- }
- }
- lsi->lsi_lmd->lmd_mgs_failnodes = i;
-
- obd = class_name2obd(mgcname);
- if (!obd) {
- CERROR("Can't find mgcobd %s\n", mgcname);
- rc = -ENOTCONN;
- goto out_free;
- }
-
- rc = obd_set_info_async(NULL, obd->obd_self_export,
- strlen(KEY_MGSSEC), KEY_MGSSEC,
- strlen(mgssec), mgssec, NULL);
- if (rc)
- goto out_free;
-
- /* Keep a refcount of servers/clients who started with "mount",
- * so we know when we can get rid of the mgc.
- */
- atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
-
- /* We connect to the MGS at setup, and don't disconnect until cleanup */
- data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_AT |
- OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV |
- OBD_CONNECT_LVB_TYPE | OBD_CONNECT_BULK_MBITS;
-
-#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE
- data->ocd_connect_flags |= OBD_CONNECT_MNE_SWAB;
-#endif
-
- if (lmd_is_client(lsi->lsi_lmd) &&
- lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)
- data->ocd_connect_flags &= ~OBD_CONNECT_IMP_RECOV;
- data->ocd_version = LUSTRE_VERSION_CODE;
- rc = obd_connect(NULL, &exp, obd, &obd->obd_uuid, data, NULL);
- if (rc) {
- CERROR("connect failed %d\n", rc);
- goto out;
- }
-
- obd->u.cli.cl_mgc_mgsexp = exp;
-
-out:
- /* Keep the mgc info in the sb. Note that many lsi's can point
- * to the same mgc.
- */
- lsi->lsi_mgc = obd;
-out_free:
- mutex_unlock(&mgc_start_lock);
-
- kfree(data);
- kfree(mgcname);
- kfree(niduuid);
- return rc;
-}
-
-static int lustre_stop_mgc(struct super_block *sb)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct obd_device *obd;
- char *niduuid = NULL, *ptr = NULL;
- int i, rc = 0, len = 0;
-
- if (!lsi)
- return -ENOENT;
- obd = lsi->lsi_mgc;
- if (!obd)
- return -ENOENT;
- lsi->lsi_mgc = NULL;
-
- mutex_lock(&mgc_start_lock);
- LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
- if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
- /* This is not fatal, every client that stops
- * will call in here.
- */
- CDEBUG(D_MOUNT, "mgc still has %d references.\n",
- atomic_read(&obd->u.cli.cl_mgc_refcount));
- rc = -EBUSY;
- goto out;
- }
-
- /* The MGC has no recoverable data in any case.
- * force shutdown set in umount_begin
- */
- obd->obd_no_recov = 1;
-
- if (obd->u.cli.cl_mgc_mgsexp) {
- /* An error is not fatal, if we are unable to send the
- * disconnect mgs ping evictor cleans up the export
- */
- rc = obd_disconnect(obd->u.cli.cl_mgc_mgsexp);
- if (rc)
- CDEBUG(D_MOUNT, "disconnect failed %d\n", rc);
- }
-
- /* Save the obdname for cleaning the nid uuids, which are obdname_XX */
- len = strlen(obd->obd_name) + 6;
- niduuid = kzalloc(len, GFP_NOFS);
- if (niduuid) {
- strcpy(niduuid, obd->obd_name);
- ptr = niduuid + strlen(niduuid);
- }
-
- rc = class_manual_cleanup(obd);
- if (rc)
- goto out;
-
- /* Clean the nid uuids */
- if (!niduuid) {
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < lsi->lsi_lmd->lmd_mgs_failnodes; i++) {
- sprintf(ptr, "_%x", i);
- rc = do_lcfg(LUSTRE_MGC_OBDNAME, 0, LCFG_DEL_UUID,
- niduuid, NULL, NULL, NULL);
- if (rc)
- CERROR("del MDC UUID %s failed: rc = %d\n",
- niduuid, rc);
- }
-out:
- kfree(niduuid);
-
- /* class_import_put will get rid of the additional connections */
- mutex_unlock(&mgc_start_lock);
- return rc;
-}
-
-/***************** lustre superblock **************/
-
-static struct lustre_sb_info *lustre_init_lsi(struct super_block *sb)
-{
- struct lustre_sb_info *lsi;
-
- lsi = kzalloc(sizeof(*lsi), GFP_NOFS);
- if (!lsi)
- return NULL;
- lsi->lsi_lmd = kzalloc(sizeof(*lsi->lsi_lmd), GFP_NOFS);
- if (!lsi->lsi_lmd) {
- kfree(lsi);
- return NULL;
- }
-
- lsi->lsi_lmd->lmd_exclude_count = 0;
- lsi->lsi_lmd->lmd_recovery_time_soft = 0;
- lsi->lsi_lmd->lmd_recovery_time_hard = 0;
- s2lsi_nocast(sb) = lsi;
- /* we take 1 extra ref for our setup */
- atomic_set(&lsi->lsi_mounts, 1);
-
- /* Default umount style */
- lsi->lsi_flags = LSI_UMOUNT_FAILOVER;
-
- return lsi;
-}
-
-static int lustre_free_lsi(struct super_block *sb)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
-
- CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
-
- /* someone didn't call server_put_mount. */
- LASSERT(atomic_read(&lsi->lsi_mounts) == 0);
-
- if (lsi->lsi_lmd) {
- kfree(lsi->lsi_lmd->lmd_dev);
- kfree(lsi->lsi_lmd->lmd_profile);
- kfree(lsi->lsi_lmd->lmd_mgssec);
- kfree(lsi->lsi_lmd->lmd_opts);
- if (lsi->lsi_lmd->lmd_exclude_count)
- kfree(lsi->lsi_lmd->lmd_exclude);
- kfree(lsi->lsi_lmd->lmd_mgs);
- kfree(lsi->lsi_lmd->lmd_osd_type);
- kfree(lsi->lsi_lmd->lmd_params);
-
- kfree(lsi->lsi_lmd);
- }
-
- LASSERT(!lsi->lsi_llsbi);
- kfree(lsi);
- s2lsi_nocast(sb) = NULL;
-
- return 0;
-}
-
-/* The lsi has one reference for every server that is using the disk -
- * e.g. MDT, MGS, and potentially MGC
- */
-static int lustre_put_lsi(struct super_block *sb)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
-
- CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
- if (atomic_dec_and_test(&lsi->lsi_mounts)) {
- lustre_free_lsi(sb);
- return 1;
- }
- return 0;
-}
-
-/*** SERVER NAME ***
- * <FSNAME><SEPARATOR><TYPE><INDEX>
- * FSNAME is between 1 and 8 characters (inclusive).
- * Excluded characters are '/' and ':'
- * SEPARATOR is either ':' or '-'
- * TYPE: "OST", "MDT", etc.
- * INDEX: Hex representation of the index
- */
-
-/** Get the fsname ("lustre") from the server name ("lustre-OST003F").
- * @param [in] svname server name including type and index
- * @param [out] fsname Buffer to copy filesystem name prefix into.
- * Must have at least 'strlen(fsname) + 1' chars.
- * @param [out] endptr if endptr isn't NULL it is set to end of fsname
- * rc < 0 on error
- */
-static int server_name2fsname(const char *svname, char *fsname,
- const char **endptr)
-{
- const char *dash;
-
- dash = svname + strnlen(svname, 8); /* max fsname length is 8 */
- for (; dash > svname && *dash != '-' && *dash != ':'; dash--)
- ;
- if (dash == svname)
- return -EINVAL;
-
- if (fsname) {
- strncpy(fsname, svname, dash - svname);
- fsname[dash - svname] = '\0';
- }
-
- if (endptr)
- *endptr = dash;
-
- return 0;
-}
-
-/* Get the index from the obd name.
- * rc = server type, or
- * rc < 0 on error
- * if endptr isn't NULL it is set to end of name
- */
-static int server_name2index(const char *svname, __u32 *idx,
- const char **endptr)
-{
- unsigned long index;
- int rc;
- const char *dash;
-
- /* We use server_name2fsname() just for parsing */
- rc = server_name2fsname(svname, NULL, &dash);
- if (rc != 0)
- return rc;
-
- dash++;
-
- if (strncmp(dash, "MDT", 3) == 0)
- rc = LDD_F_SV_TYPE_MDT;
- else if (strncmp(dash, "OST", 3) == 0)
- rc = LDD_F_SV_TYPE_OST;
- else
- return -EINVAL;
-
- dash += 3;
-
- if (strncmp(dash, "all", 3) == 0) {
- if (endptr)
- *endptr = dash + 3;
- return rc | LDD_F_SV_ALL;
- }
-
- index = simple_strtoul(dash, (char **)endptr, 16);
- if (idx)
- *idx = index;
-
- /* Account for -mdc after index that is possible when specifying mdt */
- if (endptr && strncmp(LUSTRE_MDC_NAME, *endptr + 1,
- sizeof(LUSTRE_MDC_NAME) - 1) == 0)
- *endptr += sizeof(LUSTRE_MDC_NAME);
-
- return rc;
-}
-
-/*************** mount common between server and client ***************/
-
-/* Common umount */
-int lustre_common_put_super(struct super_block *sb)
-{
- int rc;
-
- CDEBUG(D_MOUNT, "dropping sb %p\n", sb);
-
- /* Drop a ref to the MGC */
- rc = lustre_stop_mgc(sb);
- if (rc && (rc != -ENOENT)) {
- if (rc != -EBUSY) {
- CERROR("Can't stop MGC: %d\n", rc);
- return rc;
- }
- /* BUSY just means that there's some other obd that
- * needs the mgc. Let him clean it up.
- */
- CDEBUG(D_MOUNT, "MGC still in use\n");
- }
- /* Drop a ref to the mounted disk */
- lustre_put_lsi(sb);
- return rc;
-}
-EXPORT_SYMBOL(lustre_common_put_super);
-
-static void lmd_print(struct lustre_mount_data *lmd)
-{
- int i;
-
- PRINT_CMD(D_MOUNT, " mount data:\n");
- if (lmd_is_client(lmd))
- PRINT_CMD(D_MOUNT, "profile: %s\n", lmd->lmd_profile);
- PRINT_CMD(D_MOUNT, "device: %s\n", lmd->lmd_dev);
- PRINT_CMD(D_MOUNT, "flags: %x\n", lmd->lmd_flags);
-
- if (lmd->lmd_opts)
- PRINT_CMD(D_MOUNT, "options: %s\n", lmd->lmd_opts);
-
- if (lmd->lmd_recovery_time_soft)
- PRINT_CMD(D_MOUNT, "recovery time soft: %d\n",
- lmd->lmd_recovery_time_soft);
-
- if (lmd->lmd_recovery_time_hard)
- PRINT_CMD(D_MOUNT, "recovery time hard: %d\n",
- lmd->lmd_recovery_time_hard);
-
- for (i = 0; i < lmd->lmd_exclude_count; i++) {
- PRINT_CMD(D_MOUNT, "exclude %d: OST%04x\n", i,
- lmd->lmd_exclude[i]);
- }
-}
-
-/* Is this server on the exclusion list */
-int lustre_check_exclusion(struct super_block *sb, char *svname)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct lustre_mount_data *lmd = lsi->lsi_lmd;
- __u32 index;
- int i, rc;
-
- rc = server_name2index(svname, &index, NULL);
- if (rc != LDD_F_SV_TYPE_OST)
- /* Only exclude OSTs */
- return 0;
-
- CDEBUG(D_MOUNT, "Check exclusion %s (%d) in %d of %s\n", svname,
- index, lmd->lmd_exclude_count, lmd->lmd_dev);
-
- for (i = 0; i < lmd->lmd_exclude_count; i++) {
- if (index == lmd->lmd_exclude[i]) {
- CWARN("Excluding %s (on exclusion list)\n", svname);
- return 1;
- }
- }
- return 0;
-}
-
-/* mount -v -o exclude=lustre-OST0001:lustre-OST0002 -t lustre ... */
-static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr)
-{
- const char *s1 = ptr, *s2;
- __u32 index = 0, *exclude_list;
- int rc = 0, devmax;
-
- /* The shortest an ost name can be is 8 chars: -OST0000.
- * We don't actually know the fsname at this time, so in fact
- * a user could specify any fsname.
- */
- devmax = strlen(ptr) / 8 + 1;
-
- /* temp storage until we figure out how many we have */
- exclude_list = kcalloc(devmax, sizeof(index), GFP_NOFS);
- if (!exclude_list)
- return -ENOMEM;
-
- /* we enter this fn pointing at the '=' */
- while (*s1 && *s1 != ' ' && *s1 != ',') {
- s1++;
- rc = server_name2index(s1, &index, &s2);
- if (rc < 0) {
- CERROR("Can't parse server name '%s': rc = %d\n",
- s1, rc);
- break;
- }
- if (rc == LDD_F_SV_TYPE_OST)
- exclude_list[lmd->lmd_exclude_count++] = index;
- else
- CDEBUG(D_MOUNT, "ignoring exclude %.*s: type = %#x\n",
- (uint)(s2 - s1), s1, rc);
- s1 = s2;
- /* now we are pointing at ':' (next exclude)
- * or ',' (end of excludes)
- */
- if (lmd->lmd_exclude_count >= devmax)
- break;
- }
- if (rc >= 0) /* non-err */
- rc = 0;
-
- if (lmd->lmd_exclude_count) {
- /* permanent, freed in lustre_free_lsi */
- lmd->lmd_exclude = kcalloc(lmd->lmd_exclude_count,
- sizeof(index), GFP_NOFS);
- if (lmd->lmd_exclude) {
- memcpy(lmd->lmd_exclude, exclude_list,
- sizeof(index) * lmd->lmd_exclude_count);
- } else {
- rc = -ENOMEM;
- lmd->lmd_exclude_count = 0;
- }
- }
- kfree(exclude_list);
- return rc;
-}
-
-static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr)
-{
- char *tail;
- int length;
-
- kfree(lmd->lmd_mgssec);
- lmd->lmd_mgssec = NULL;
-
- tail = strchr(ptr, ',');
- if (!tail)
- length = strlen(ptr);
- else
- length = tail - ptr;
-
- lmd->lmd_mgssec = kzalloc(length + 1, GFP_NOFS);
- if (!lmd->lmd_mgssec)
- return -ENOMEM;
-
- memcpy(lmd->lmd_mgssec, ptr, length);
- lmd->lmd_mgssec[length] = '\0';
- return 0;
-}
-
-static int lmd_parse_string(char **handle, char *ptr)
-{
- char *tail;
- int length;
-
- if (!handle || !ptr)
- return -EINVAL;
-
- kfree(*handle);
- *handle = NULL;
-
- tail = strchr(ptr, ',');
- if (!tail)
- length = strlen(ptr);
- else
- length = tail - ptr;
-
- *handle = kzalloc(length + 1, GFP_NOFS);
- if (!*handle)
- return -ENOMEM;
-
- memcpy(*handle, ptr, length);
- (*handle)[length] = '\0';
-
- return 0;
-}
-
-/* Collect multiple values for mgsnid specifiers */
-static int lmd_parse_mgs(struct lustre_mount_data *lmd, char **ptr)
-{
- lnet_nid_t nid;
- char *tail = *ptr;
- char *mgsnid;
- int length;
- int oldlen = 0;
-
- /* Find end of nidlist */
- while (class_parse_nid_quiet(tail, &nid, &tail) == 0)
- ;
- length = tail - *ptr;
- if (length == 0) {
- LCONSOLE_ERROR_MSG(0x159, "Can't parse NID '%s'\n", *ptr);
- return -EINVAL;
- }
-
- if (lmd->lmd_mgs)
- oldlen = strlen(lmd->lmd_mgs) + 1;
-
- mgsnid = kzalloc(oldlen + length + 1, GFP_NOFS);
- if (!mgsnid)
- return -ENOMEM;
-
- if (lmd->lmd_mgs) {
- /* Multiple mgsnid= are taken to mean failover locations */
- memcpy(mgsnid, lmd->lmd_mgs, oldlen);
- mgsnid[oldlen - 1] = ':';
- kfree(lmd->lmd_mgs);
- }
- memcpy(mgsnid + oldlen, *ptr, length);
- mgsnid[oldlen + length] = '\0';
- lmd->lmd_mgs = mgsnid;
- *ptr = tail;
-
- return 0;
-}
-
-/** Parse mount line options
- * e.g. mount -v -t lustre -o abort_recov uml1:uml2:/lustre-client /mnt/lustre
- * dev is passed as device=uml1:/lustre by mount.lustre
- */
-static int lmd_parse(char *options, struct lustre_mount_data *lmd)
-{
- char *s1, *s2, *devname = NULL;
- struct lustre_mount_data *raw = (struct lustre_mount_data *)options;
- int rc = 0;
-
- LASSERT(lmd);
- if (!options) {
- LCONSOLE_ERROR_MSG(0x162, "Missing mount data: check that /sbin/mount.lustre is installed.\n");
- return -EINVAL;
- }
-
- /* Options should be a string - try to detect old lmd data */
- if ((raw->lmd_magic & 0xffffff00) == (LMD_MAGIC & 0xffffff00)) {
- LCONSOLE_ERROR_MSG(0x163, "You're using an old version of /sbin/mount.lustre. Please install version %s\n",
- LUSTRE_VERSION_STRING);
- return -EINVAL;
- }
- lmd->lmd_magic = LMD_MAGIC;
-
- lmd->lmd_params = kzalloc(LMD_PARAMS_MAXLEN, GFP_NOFS);
- if (!lmd->lmd_params)
- return -ENOMEM;
- lmd->lmd_params[0] = '\0';
-
- /* Set default flags here */
-
- s1 = options;
- while (*s1) {
- int clear = 0;
- int time_min = OBD_RECOVERY_TIME_MIN;
- char *s3;
-
- /* Skip whitespace and extra commas */
- while (*s1 == ' ' || *s1 == ',')
- s1++;
- s3 = s1;
-
- /* Client options are parsed in ll_options: eg. flock,
- * user_xattr, acl
- */
-
- /* Parse non-ldiskfs options here. Rather than modifying
- * ldiskfs, we just zero these out here
- */
- if (strncmp(s1, "abort_recov", 11) == 0) {
- lmd->lmd_flags |= LMD_FLG_ABORT_RECOV;
- clear++;
- } else if (strncmp(s1, "recovery_time_soft=", 19) == 0) {
- lmd->lmd_recovery_time_soft = max_t(int,
- simple_strtoul(s1 + 19, NULL, 10), time_min);
- clear++;
- } else if (strncmp(s1, "recovery_time_hard=", 19) == 0) {
- lmd->lmd_recovery_time_hard = max_t(int,
- simple_strtoul(s1 + 19, NULL, 10), time_min);
- clear++;
- } else if (strncmp(s1, "noir", 4) == 0) {
- lmd->lmd_flags |= LMD_FLG_NOIR; /* test purpose only. */
- clear++;
- } else if (strncmp(s1, "nosvc", 5) == 0) {
- lmd->lmd_flags |= LMD_FLG_NOSVC;
- clear++;
- } else if (strncmp(s1, "nomgs", 5) == 0) {
- lmd->lmd_flags |= LMD_FLG_NOMGS;
- clear++;
- } else if (strncmp(s1, "noscrub", 7) == 0) {
- lmd->lmd_flags |= LMD_FLG_NOSCRUB;
- clear++;
- } else if (strncmp(s1, PARAM_MGSNODE,
- sizeof(PARAM_MGSNODE) - 1) == 0) {
- s2 = s1 + sizeof(PARAM_MGSNODE) - 1;
- /* Assume the next mount opt is the first
- * invalid nid we get to.
- */
- rc = lmd_parse_mgs(lmd, &s2);
- if (rc)
- goto invalid;
- clear++;
- } else if (strncmp(s1, "writeconf", 9) == 0) {
- lmd->lmd_flags |= LMD_FLG_WRITECONF;
- clear++;
- } else if (strncmp(s1, "update", 6) == 0) {
- lmd->lmd_flags |= LMD_FLG_UPDATE;
- clear++;
- } else if (strncmp(s1, "virgin", 6) == 0) {
- lmd->lmd_flags |= LMD_FLG_VIRGIN;
- clear++;
- } else if (strncmp(s1, "noprimnode", 10) == 0) {
- lmd->lmd_flags |= LMD_FLG_NO_PRIMNODE;
- clear++;
- } else if (strncmp(s1, "mgssec=", 7) == 0) {
- rc = lmd_parse_mgssec(lmd, s1 + 7);
- if (rc)
- goto invalid;
- s3 = s2;
- clear++;
- /* ost exclusion list */
- } else if (strncmp(s1, "exclude=", 8) == 0) {
- rc = lmd_make_exclusion(lmd, s1 + 7);
- if (rc)
- goto invalid;
- clear++;
- } else if (strncmp(s1, "mgs", 3) == 0) {
- /* We are an MGS */
- lmd->lmd_flags |= LMD_FLG_MGS;
- clear++;
- } else if (strncmp(s1, "svname=", 7) == 0) {
- rc = lmd_parse_string(&lmd->lmd_profile, s1 + 7);
- if (rc)
- goto invalid;
- clear++;
- } else if (strncmp(s1, "param=", 6) == 0) {
- size_t length, params_length;
- char *tail = strchr(s1 + 6, ',');
-
- if (!tail) {
- length = strlen(s1);
- } else {
- lnet_nid_t nid;
- char *param_str = tail + 1;
- int supplementary = 1;
-
- while (!class_parse_nid_quiet(param_str, &nid,
- &param_str)) {
- supplementary = 0;
- }
- length = param_str - s1 - supplementary;
- }
- length -= 6;
- params_length = strlen(lmd->lmd_params);
- if (params_length + length + 1 >= LMD_PARAMS_MAXLEN)
- return -E2BIG;
- strncat(lmd->lmd_params, s1 + 6, length);
- lmd->lmd_params[params_length + length] = '\0';
- strlcat(lmd->lmd_params, " ", LMD_PARAMS_MAXLEN);
- s3 = s1 + 6 + length;
- clear++;
- } else if (strncmp(s1, "osd=", 4) == 0) {
- rc = lmd_parse_string(&lmd->lmd_osd_type, s1 + 4);
- if (rc)
- goto invalid;
- clear++;
- }
- /* Linux 2.4 doesn't pass the device, so we stuck it at the
- * end of the options.
- */
- else if (strncmp(s1, "device=", 7) == 0) {
- devname = s1 + 7;
- /* terminate options right before device. device
- * must be the last one.
- */
- *s1 = '\0';
- break;
- }
-
- /* Find next opt */
- s2 = strchr(s1, ',');
- if (!s2) {
- if (clear)
- *s1 = '\0';
- break;
- }
- s2++;
- if (clear)
- memmove(s1, s2, strlen(s2) + 1);
- else
- s1 = s2;
- }
-
- if (!devname) {
- LCONSOLE_ERROR_MSG(0x164, "Can't find the device name (need mount option 'device=...')\n");
- goto invalid;
- }
-
- s1 = strstr(devname, ":/");
- if (s1) {
- ++s1;
- lmd->lmd_flags |= LMD_FLG_CLIENT;
- /* Remove leading /s from fsname */
- while (*++s1 == '/')
- ;
- /* Freed in lustre_free_lsi */
- lmd->lmd_profile = kasprintf(GFP_NOFS, "%s-client", s1);
- if (!lmd->lmd_profile)
- return -ENOMEM;
- }
-
- /* Freed in lustre_free_lsi */
- lmd->lmd_dev = kzalloc(strlen(devname) + 1, GFP_NOFS);
- if (!lmd->lmd_dev)
- return -ENOMEM;
- strcpy(lmd->lmd_dev, devname);
-
- /* Save mount options */
- s1 = options + strlen(options) - 1;
- while (s1 >= options && (*s1 == ',' || *s1 == ' '))
- *s1-- = 0;
- if (*options != 0) {
- /* Freed in lustre_free_lsi */
- lmd->lmd_opts = kzalloc(strlen(options) + 1, GFP_NOFS);
- if (!lmd->lmd_opts)
- return -ENOMEM;
- strcpy(lmd->lmd_opts, options);
- }
-
- lmd_print(lmd);
- lmd->lmd_magic = LMD_MAGIC;
-
- return rc;
-
-invalid:
- CERROR("Bad mount options %s\n", options);
- return -EINVAL;
-}
-
-/** This is the entry point for the mount call into Lustre.
- * This is called when a server or client is mounted,
- * and this is where we start setting things up.
- * @param data Mount options (e.g. -o flock,abort_recov)
- */
-static int lustre_fill_super(struct super_block *sb, void *lmd2_data, int silent)
-{
- struct lustre_mount_data *lmd;
- struct lustre_sb_info *lsi;
- int rc;
-
- CDEBUG(D_MOUNT | D_VFSTRACE, "VFS Op: sb %p\n", sb);
-
- lsi = lustre_init_lsi(sb);
- if (!lsi)
- return -ENOMEM;
- lmd = lsi->lsi_lmd;
-
- /*
- * Disable lockdep during mount, because mount locking patterns are
- * `special'.
- */
- lockdep_off();
-
- /*
- * LU-639: the obd cleanup of last mount may not finish yet, wait here.
- */
- obd_zombie_barrier();
-
- /* Figure out the lmd from the mount options */
- if (lmd_parse(lmd2_data, lmd)) {
- lustre_put_lsi(sb);
- rc = -EINVAL;
- goto out;
- }
-
- if (lmd_is_client(lmd)) {
- bool have_client = false;
- CDEBUG(D_MOUNT, "Mounting client %s\n", lmd->lmd_profile);
- if (!client_fill_super)
- request_module("lustre");
- spin_lock(&client_lock);
- if (client_fill_super && try_module_get(client_mod))
- have_client = true;
- spin_unlock(&client_lock);
- if (!have_client) {
- LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n");
- lustre_put_lsi(sb);
- rc = -ENODEV;
- } else {
- rc = lustre_start_mgc(sb);
- if (rc) {
- lustre_common_put_super(sb);
- goto out;
- }
- /* Connect and start */
- /* (should always be ll_fill_super) */
- rc = (*client_fill_super)(sb);
- /* c_f_s will call lustre_common_put_super on failure, otherwise
- * c_f_s will have taken another reference to the module */
- module_put(client_mod);
- }
- } else {
- CERROR("This is client-side-only module, cannot handle server mount.\n");
- rc = -EINVAL;
- }
-
- /* If error happens in fill_super() call, @lsi will be killed there.
- * This is why we do not put it here.
- */
- goto out;
-out:
- if (rc) {
- CERROR("Unable to mount %s (%d)\n",
- s2lsi(sb) ? lmd->lmd_dev : "", rc);
- } else {
- CDEBUG(D_SUPER, "Mount %s complete\n",
- lmd->lmd_dev);
- }
- lockdep_on();
- return rc;
-}
-
-/* We can't call ll_fill_super by name because it lives in a module that
- * must be loaded after this one.
- */
-void lustre_register_super_ops(struct module *mod,
- int (*cfs)(struct super_block *sb),
- void (*ksc)(struct super_block *sb))
-{
- spin_lock(&client_lock);
- client_mod = mod;
- client_fill_super = cfs;
- kill_super_cb = ksc;
- spin_unlock(&client_lock);
-}
-EXPORT_SYMBOL(lustre_register_super_ops);
-
-/***************** FS registration ******************/
-static struct dentry *lustre_mount(struct file_system_type *fs_type, int flags,
- const char *devname, void *data)
-{
- return mount_nodev(fs_type, flags, data, lustre_fill_super);
-}
-
-static void lustre_kill_super(struct super_block *sb)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
-
- if (kill_super_cb && lsi)
- (*kill_super_cb)(sb);
-
- kill_anon_super(sb);
-}
-
-/** Register the "lustre" fs type
- */
-static struct file_system_type lustre_fs_type = {
- .owner = THIS_MODULE,
- .name = "lustre",
- .mount = lustre_mount,
- .kill_sb = lustre_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE,
-};
-MODULE_ALIAS_FS("lustre");
-
-int lustre_register_fs(void)
-{
- return register_filesystem(&lustre_fs_type);
-}
-
-int lustre_unregister_fs(void)
-{
- return unregister_filesystem(&lustre_fs_type);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
deleted file mode 100644
index c4503bc36591..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ /dev/null
@@ -1,181 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/obdo.c
- *
- * Object Devices Class Driver
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <obd_class.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <lustre_obdo.h>
-
-void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent)
-{
- dst->o_parent_oid = fid_oid(parent);
- dst->o_parent_seq = fid_seq(parent);
- dst->o_parent_ver = fid_ver(parent);
- dst->o_valid |= OBD_MD_FLGENER | OBD_MD_FLFID;
-}
-EXPORT_SYMBOL(obdo_set_parent_fid);
-
-/* WARNING: the file systems must take care not to tinker with
- * attributes they don't manage (such as blocks).
- */
-void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid)
-{
- u32 newvalid = 0;
-
- if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
- CDEBUG(D_INODE, "valid %x, new time %lu/%lu\n",
- valid, LTIME_S(src->i_mtime),
- LTIME_S(src->i_ctime));
-
- if (valid & OBD_MD_FLATIME) {
- dst->o_atime = LTIME_S(src->i_atime);
- newvalid |= OBD_MD_FLATIME;
- }
- if (valid & OBD_MD_FLMTIME) {
- dst->o_mtime = LTIME_S(src->i_mtime);
- newvalid |= OBD_MD_FLMTIME;
- }
- if (valid & OBD_MD_FLCTIME) {
- dst->o_ctime = LTIME_S(src->i_ctime);
- newvalid |= OBD_MD_FLCTIME;
- }
- if (valid & OBD_MD_FLSIZE) {
- dst->o_size = i_size_read(src);
- newvalid |= OBD_MD_FLSIZE;
- }
- if (valid & OBD_MD_FLBLOCKS) { /* allocation of space (x512 bytes) */
- dst->o_blocks = src->i_blocks;
- newvalid |= OBD_MD_FLBLOCKS;
- }
- if (valid & OBD_MD_FLBLKSZ) { /* optimal block size */
- dst->o_blksize = 1 << src->i_blkbits;
- newvalid |= OBD_MD_FLBLKSZ;
- }
- if (valid & OBD_MD_FLTYPE) {
- dst->o_mode = (dst->o_mode & S_IALLUGO) |
- (src->i_mode & S_IFMT);
- newvalid |= OBD_MD_FLTYPE;
- }
- if (valid & OBD_MD_FLMODE) {
- dst->o_mode = (dst->o_mode & S_IFMT) |
- (src->i_mode & S_IALLUGO);
- newvalid |= OBD_MD_FLMODE;
- }
- if (valid & OBD_MD_FLUID) {
- dst->o_uid = from_kuid(&init_user_ns, src->i_uid);
- newvalid |= OBD_MD_FLUID;
- }
- if (valid & OBD_MD_FLGID) {
- dst->o_gid = from_kgid(&init_user_ns, src->i_gid);
- newvalid |= OBD_MD_FLGID;
- }
- if (valid & OBD_MD_FLFLAGS) {
- dst->o_flags = src->i_flags;
- newvalid |= OBD_MD_FLFLAGS;
- }
- dst->o_valid |= newvalid;
-}
-EXPORT_SYMBOL(obdo_from_inode);
-
-void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj)
-{
- ioobj->ioo_oid = oa->o_oi;
- if (unlikely(!(oa->o_valid & OBD_MD_FLGROUP)))
- ostid_set_seq_mdt0(&ioobj->ioo_oid);
-
- /* Since 2.4 this does not contain o_mode in the low 16 bits.
- * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs
- */
- ioobj->ioo_max_brw = 0;
-}
-EXPORT_SYMBOL(obdo_to_ioobj);
-
-/**
- * Create an obdo to send over the wire
- */
-void lustre_set_wire_obdo(const struct obd_connect_data *ocd,
- struct obdo *wobdo, const struct obdo *lobdo)
-{
- *wobdo = *lobdo;
- wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
- if (!ocd)
- return;
-
- if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
- fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
- /*
- * Currently OBD_FL_OSTID will only be used when 2.4 echo
- * client communicate with pre-2.4 server
- */
- wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
- wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
- }
-}
-EXPORT_SYMBOL(lustre_set_wire_obdo);
-
-/**
- * Create a local obdo from a wire based odbo
- */
-void lustre_get_wire_obdo(const struct obd_connect_data *ocd,
- struct obdo *lobdo, const struct obdo *wobdo)
-{
- u32 local_flags = 0;
-
- if (lobdo->o_valid & OBD_MD_FLFLAGS)
- local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
-
- *lobdo = *wobdo;
- if (local_flags) {
- lobdo->o_valid |= OBD_MD_FLFLAGS;
- lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
- lobdo->o_flags |= local_flags;
- }
- if (!ocd)
- return;
-
- if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
- fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
- /* see above */
- lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
- lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
- lobdo->o_oi.oi_fid.f_ver = 0;
- }
-}
-EXPORT_SYMBOL(lustre_get_wire_obdo);
diff --git a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
deleted file mode 100644
index 355e888885f4..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
+++ /dev/null
@@ -1,58 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/statfs_pack.c
- *
- * (Un)packing of OST/MDS requests
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/statfs.h>
-#include <lustre_export.h>
-#include <lustre_net.h>
-#include <obd_support.h>
-#include <obd_class.h>
-
-void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs)
-{
- memset(sfs, 0, sizeof(*sfs));
- sfs->f_type = osfs->os_type;
- sfs->f_blocks = osfs->os_blocks;
- sfs->f_bfree = osfs->os_bfree;
- sfs->f_bavail = osfs->os_bavail;
- sfs->f_files = osfs->os_files;
- sfs->f_ffree = osfs->os_ffree;
- sfs->f_bsize = osfs->os_bsize;
- sfs->f_namelen = osfs->os_namelen;
-}
-EXPORT_SYMBOL(statfs_unpack);
diff --git a/drivers/staging/lustre/lustre/obdclass/uuid.c b/drivers/staging/lustre/lustre/obdclass/uuid.c
deleted file mode 100644
index 6cf7a03f048f..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/uuid.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/uuid.c
- *
- * Public include file for the UUID library
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd_support.h>
-#include <obd_class.h>
-
-void class_uuid_unparse(class_uuid_t uu, struct obd_uuid *out)
-{
- sprintf(out->uuid, "%pU", uu);
-}
-EXPORT_SYMBOL(class_uuid_unparse);
diff --git a/drivers/staging/lustre/lustre/obdecho/Makefile b/drivers/staging/lustre/lustre/obdecho/Makefile
deleted file mode 100644
index 6be66fbab872..000000000000
--- a/drivers/staging/lustre/lustre/obdecho/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LUSTRE_FS) += obdecho.o
-obdecho-y := echo_client.o
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
deleted file mode 100644
index 99a76db51ae0..000000000000
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ /dev/null
@@ -1,1724 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_ECHO
-#include <linux/libcfs/libcfs.h>
-
-#include <obd.h>
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_debug.h>
-#include <lprocfs_status.h>
-#include <cl_object.h>
-#include <lustre_fid.h>
-#include <lustre_acl.h>
-#include <uapi/linux/lustre/lustre_ioctl.h>
-#include <lustre_net.h>
-
-#include "echo_internal.h"
-
-/** \defgroup echo_client Echo Client
- * @{
- */
-
-struct echo_device {
- struct cl_device ed_cl;
- struct echo_client_obd *ed_ec;
-
- struct cl_site ed_site_myself;
- struct lu_site *ed_site;
- struct lu_device *ed_next;
-};
-
-struct echo_object {
- struct cl_object eo_cl;
- struct cl_object_header eo_hdr;
-
- struct echo_device *eo_dev;
- struct list_head eo_obj_chain;
- struct lov_oinfo *eo_oinfo;
- atomic_t eo_npages;
- int eo_deleted;
-};
-
-struct echo_object_conf {
- struct cl_object_conf eoc_cl;
- struct lov_oinfo **eoc_oinfo;
-};
-
-struct echo_page {
- struct cl_page_slice ep_cl;
- struct mutex ep_lock;
-};
-
-struct echo_lock {
- struct cl_lock_slice el_cl;
- struct list_head el_chain;
- struct echo_object *el_object;
- __u64 el_cookie;
- atomic_t el_refcount;
-};
-
-static int echo_client_setup(const struct lu_env *env,
- struct obd_device *obddev,
- struct lustre_cfg *lcfg);
-static int echo_client_cleanup(struct obd_device *obddev);
-
-/** \defgroup echo_helpers Helper functions
- * @{
- */
-static inline struct echo_device *cl2echo_dev(const struct cl_device *dev)
-{
- return container_of0(dev, struct echo_device, ed_cl);
-}
-
-static inline struct cl_device *echo_dev2cl(struct echo_device *d)
-{
- return &d->ed_cl;
-}
-
-static inline struct echo_device *obd2echo_dev(const struct obd_device *obd)
-{
- return cl2echo_dev(lu2cl_dev(obd->obd_lu_dev));
-}
-
-static inline struct cl_object *echo_obj2cl(struct echo_object *eco)
-{
- return &eco->eo_cl;
-}
-
-static inline struct echo_object *cl2echo_obj(const struct cl_object *o)
-{
- return container_of(o, struct echo_object, eo_cl);
-}
-
-static inline struct echo_page *cl2echo_page(const struct cl_page_slice *s)
-{
- return container_of(s, struct echo_page, ep_cl);
-}
-
-static inline struct echo_lock *cl2echo_lock(const struct cl_lock_slice *s)
-{
- return container_of(s, struct echo_lock, el_cl);
-}
-
-static inline struct cl_lock *echo_lock2cl(const struct echo_lock *ecl)
-{
- return ecl->el_cl.cls_lock;
-}
-
-static struct lu_context_key echo_thread_key;
-static inline struct echo_thread_info *echo_env_info(const struct lu_env *env)
-{
- struct echo_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &echo_thread_key);
- LASSERT(info);
- return info;
-}
-
-static inline
-struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c)
-{
- return container_of(c, struct echo_object_conf, eoc_cl);
-}
-
-/** @} echo_helpers */
-static int cl_echo_object_put(struct echo_object *eco);
-static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
- struct page **pages, int npages, int async);
-
-struct echo_thread_info {
- struct echo_object_conf eti_conf;
- struct lustre_md eti_md;
-
- struct cl_2queue eti_queue;
- struct cl_io eti_io;
- struct cl_lock eti_lock;
- struct lu_fid eti_fid;
- struct lu_fid eti_fid2;
-};
-
-/* No session used right now */
-struct echo_session_info {
- unsigned long dummy;
-};
-
-static struct kmem_cache *echo_lock_kmem;
-static struct kmem_cache *echo_object_kmem;
-static struct kmem_cache *echo_thread_kmem;
-static struct kmem_cache *echo_session_kmem;
-
-static struct lu_kmem_descr echo_caches[] = {
- {
- .ckd_cache = &echo_lock_kmem,
- .ckd_name = "echo_lock_kmem",
- .ckd_size = sizeof(struct echo_lock)
- },
- {
- .ckd_cache = &echo_object_kmem,
- .ckd_name = "echo_object_kmem",
- .ckd_size = sizeof(struct echo_object)
- },
- {
- .ckd_cache = &echo_thread_kmem,
- .ckd_name = "echo_thread_kmem",
- .ckd_size = sizeof(struct echo_thread_info)
- },
- {
- .ckd_cache = &echo_session_kmem,
- .ckd_name = "echo_session_kmem",
- .ckd_size = sizeof(struct echo_session_info)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/** \defgroup echo_page Page operations
- *
- * Echo page operations.
- *
- * @{
- */
-static int echo_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, int nonblock)
-{
- struct echo_page *ep = cl2echo_page(slice);
-
- if (!nonblock)
- mutex_lock(&ep->ep_lock);
- else if (!mutex_trylock(&ep->ep_lock))
- return -EAGAIN;
- return 0;
-}
-
-static void echo_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct echo_page *ep = cl2echo_page(slice);
-
- LASSERT(mutex_is_locked(&ep->ep_lock));
- mutex_unlock(&ep->ep_lock);
-}
-
-static void echo_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- cl_page_delete(env, slice->cpl_page);
-}
-
-static int echo_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- if (mutex_is_locked(&cl2echo_page(slice)->ep_lock))
- return -EBUSY;
- return -ENODATA;
-}
-
-static void echo_page_completion(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- LASSERT(slice->cpl_page->cp_sync_io);
-}
-
-static void echo_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
-
- atomic_dec(&eco->eo_npages);
- put_page(slice->cpl_page->cp_vmpage);
-}
-
-static int echo_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- return 0;
-}
-
-static int echo_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct echo_page *ep = cl2echo_page(slice);
-
- (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME "-page@%p %d vm@%p\n",
- ep, mutex_is_locked(&ep->ep_lock),
- slice->cpl_page->cp_vmpage);
- return 0;
-}
-
-static const struct cl_page_operations echo_page_ops = {
- .cpo_own = echo_page_own,
- .cpo_disown = echo_page_disown,
- .cpo_discard = echo_page_discard,
- .cpo_fini = echo_page_fini,
- .cpo_print = echo_page_print,
- .cpo_is_vmlocked = echo_page_is_vmlocked,
- .io = {
- [CRT_READ] = {
- .cpo_prep = echo_page_prep,
- .cpo_completion = echo_page_completion,
- },
- [CRT_WRITE] = {
- .cpo_prep = echo_page_prep,
- .cpo_completion = echo_page_completion,
- }
- }
-};
-
-/** @} echo_page */
-
-/** \defgroup echo_lock Locking
- *
- * echo lock operations
- *
- * @{
- */
-static void echo_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct echo_lock *ecl = cl2echo_lock(slice);
-
- LASSERT(list_empty(&ecl->el_chain));
- kmem_cache_free(echo_lock_kmem, ecl);
-}
-
-static const struct cl_lock_operations echo_lock_ops = {
- .clo_fini = echo_lock_fini,
-};
-
-/** @} echo_lock */
-
-/** \defgroup echo_cl_ops cl_object operations
- *
- * operations for cl_object
- *
- * @{
- */
-static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
-{
- struct echo_page *ep = cl_object_page_slice(obj, page);
- struct echo_object *eco = cl2echo_obj(obj);
-
- get_page(page->cp_vmpage);
- mutex_init(&ep->ep_lock);
- cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops);
- atomic_inc(&eco->eo_npages);
- return 0;
-}
-
-static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- return 0;
-}
-
-static int echo_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused)
-{
- struct echo_lock *el;
-
- el = kmem_cache_zalloc(echo_lock_kmem, GFP_NOFS);
- if (el) {
- cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
- el->el_object = cl2echo_obj(obj);
- INIT_LIST_HEAD(&el->el_chain);
- atomic_set(&el->el_refcount, 0);
- }
- return !el ? -ENOMEM : 0;
-}
-
-static int echo_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf)
-{
- return 0;
-}
-
-static const struct cl_object_operations echo_cl_obj_ops = {
- .coo_page_init = echo_page_init,
- .coo_lock_init = echo_lock_init,
- .coo_io_init = echo_io_init,
- .coo_conf_set = echo_conf_set
-};
-
-/** @} echo_cl_ops */
-
-/** \defgroup echo_lu_ops lu_object operations
- *
- * operations for echo lu object.
- *
- * @{
- */
-static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct echo_device *ed = cl2echo_dev(lu2cl_dev(obj->lo_dev));
- struct echo_client_obd *ec = ed->ed_ec;
- struct echo_object *eco = cl2echo_obj(lu2cl(obj));
- const struct cl_object_conf *cconf;
- struct echo_object_conf *econf;
-
- if (ed->ed_next) {
- struct lu_object *below;
- struct lu_device *under;
-
- under = ed->ed_next;
- below = under->ld_ops->ldo_object_alloc(env, obj->lo_header,
- under);
- if (!below)
- return -ENOMEM;
- lu_object_add(obj, below);
- }
-
- cconf = lu2cl_conf(conf);
- econf = cl2echo_conf(cconf);
-
- LASSERT(econf->eoc_oinfo);
- /*
- * Transfer the oinfo pointer to eco that it won't be
- * freed.
- */
- eco->eo_oinfo = *econf->eoc_oinfo;
- *econf->eoc_oinfo = NULL;
-
- eco->eo_dev = ed;
- atomic_set(&eco->eo_npages, 0);
- cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
-
- spin_lock(&ec->ec_lock);
- list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
- spin_unlock(&ec->ec_lock);
-
- return 0;
-}
-
-static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct echo_object *eco = cl2echo_obj(lu2cl(obj));
- struct echo_client_obd *ec = eco->eo_dev->ed_ec;
-
- LASSERT(atomic_read(&eco->eo_npages) == 0);
-
- spin_lock(&ec->ec_lock);
- list_del_init(&eco->eo_obj_chain);
- spin_unlock(&ec->ec_lock);
-
- lu_object_fini(obj);
- lu_object_header_fini(obj->lo_header);
-
- kfree(eco->eo_oinfo);
- kmem_cache_free(echo_object_kmem, eco);
-}
-
-static int echo_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- struct echo_object *obj = cl2echo_obj(lu2cl(o));
-
- return (*p)(env, cookie, "echoclient-object@%p", obj);
-}
-
-static const struct lu_object_operations echo_lu_obj_ops = {
- .loo_object_init = echo_object_init,
- .loo_object_delete = NULL,
- .loo_object_release = NULL,
- .loo_object_free = echo_object_free,
- .loo_object_print = echo_object_print,
- .loo_object_invariant = NULL
-};
-
-/** @} echo_lu_ops */
-
-/** \defgroup echo_lu_dev_ops lu_device operations
- *
- * Operations for echo lu device.
- *
- * @{
- */
-static struct lu_object *echo_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev)
-{
- struct echo_object *eco;
- struct lu_object *obj = NULL;
-
- /* we're the top dev. */
- LASSERT(!hdr);
- eco = kmem_cache_zalloc(echo_object_kmem, GFP_NOFS);
- if (eco) {
- struct cl_object_header *hdr = &eco->eo_hdr;
-
- obj = &echo_obj2cl(eco)->co_lu;
- cl_object_header_init(hdr);
- hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
-
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
-
- eco->eo_cl.co_ops = &echo_cl_obj_ops;
- obj->lo_ops = &echo_lu_obj_ops;
- }
- return obj;
-}
-
-static const struct lu_device_operations echo_device_lu_ops = {
- .ldo_object_alloc = echo_object_alloc,
-};
-
-/** @} echo_lu_dev_ops */
-
-/** \defgroup echo_init Setup and teardown
- *
- * Init and fini functions for echo client.
- *
- * @{
- */
-static int echo_site_init(const struct lu_env *env, struct echo_device *ed)
-{
- struct cl_site *site = &ed->ed_site_myself;
- int rc;
-
- /* initialize site */
- rc = cl_site_init(site, &ed->ed_cl);
- if (rc) {
- CERROR("Cannot initialize site for echo client(%d)\n", rc);
- return rc;
- }
-
- rc = lu_site_init_finish(&site->cs_lu);
- if (rc) {
- cl_site_fini(site);
- return rc;
- }
-
- ed->ed_site = &site->cs_lu;
- return 0;
-}
-
-static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
-{
- if (ed->ed_site) {
- lu_site_fini(ed->ed_site);
- ed->ed_site = NULL;
- }
-}
-
-static void *echo_thread_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct echo_thread_info *info;
-
- info = kmem_cache_zalloc(echo_thread_kmem, GFP_NOFS);
- if (!info)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void echo_thread_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct echo_thread_info *info = data;
-
- kmem_cache_free(echo_thread_kmem, info);
-}
-
-static struct lu_context_key echo_thread_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = echo_thread_key_init,
- .lct_fini = echo_thread_key_fini,
-};
-
-static void *echo_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct echo_session_info *session;
-
- session = kmem_cache_zalloc(echo_session_kmem, GFP_NOFS);
- if (!session)
- session = ERR_PTR(-ENOMEM);
- return session;
-}
-
-static void echo_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct echo_session_info *session = data;
-
- kmem_cache_free(echo_session_kmem, session);
-}
-
-static struct lu_context_key echo_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = echo_session_key_init,
- .lct_fini = echo_session_key_fini,
-};
-
-LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
-
-static struct lu_device *echo_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg)
-{
- struct lu_device *next;
- struct echo_device *ed;
- struct cl_device *cd;
- struct obd_device *obd = NULL; /* to keep compiler happy */
- struct obd_device *tgt;
- const char *tgt_type_name;
- int rc, err;
-
- ed = kzalloc(sizeof(*ed), GFP_NOFS);
- if (!ed) {
- rc = -ENOMEM;
- goto out;
- }
-
- cd = &ed->ed_cl;
- rc = cl_device_init(cd, t);
- if (rc)
- goto out_free;
-
- cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
-
- obd = class_name2obd(lustre_cfg_string(cfg, 0));
- LASSERT(obd);
- LASSERT(env);
-
- tgt = class_name2obd(lustre_cfg_string(cfg, 1));
- if (!tgt) {
- CERROR("Can not find tgt device %s\n",
- lustre_cfg_string(cfg, 1));
- rc = -ENODEV;
- goto out_device_fini;
- }
-
- next = tgt->obd_lu_dev;
- if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
- CERROR("echo MDT client must be run on server\n");
- rc = -EOPNOTSUPP;
- goto out_device_fini;
- }
-
- rc = echo_site_init(env, ed);
- if (rc)
- goto out_device_fini;
-
- rc = echo_client_setup(env, obd, cfg);
- if (rc)
- goto out_site_fini;
-
- ed->ed_ec = &obd->u.echo_client;
-
- /* if echo client is to be stacked upon ost device, the next is
- * NULL since ost is not a clio device so far
- */
- if (next && !lu_device_is_cl(next))
- next = NULL;
-
- tgt_type_name = tgt->obd_type->typ_name;
- if (next) {
- if (next->ld_site) {
- rc = -EBUSY;
- goto out_cleanup;
- }
-
- next->ld_site = ed->ed_site;
- rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
- next->ld_type->ldt_name,
- NULL);
- if (rc)
- goto out_cleanup;
-
- } else {
- LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
- }
-
- ed->ed_next = next;
- return &cd->cd_lu_dev;
-
-out_cleanup:
- err = echo_client_cleanup(obd);
- if (err)
- CERROR("Cleanup obd device %s error(%d)\n",
- obd->obd_name, err);
-out_site_fini:
- echo_site_fini(env, ed);
-out_device_fini:
- cl_device_fini(&ed->ed_cl);
-out_free:
- kfree(ed);
-out:
- return ERR_PTR(rc);
-}
-
-static int echo_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- LBUG();
- return 0;
-}
-
-static struct lu_device *echo_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
- struct lu_device *next = ed->ed_next;
-
- while (next)
- next = next->ld_type->ldt_ops->ldto_device_fini(env, next);
- return NULL;
-}
-
-static void echo_lock_release(const struct lu_env *env,
- struct echo_lock *ecl,
- int still_used)
-{
- struct cl_lock *clk = echo_lock2cl(ecl);
-
- cl_lock_release(env, clk);
-}
-
-static struct lu_device *echo_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
- struct echo_client_obd *ec = ed->ed_ec;
- struct echo_object *eco;
- struct lu_device *next = ed->ed_next;
-
- CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
- ed, next);
-
- lu_site_purge(env, ed->ed_site, -1);
-
- /* check if there are objects still alive.
- * It shouldn't have any object because lu_site_purge would cleanup
- * all of cached objects. Anyway, probably the echo device is being
- * parallelly accessed.
- */
- spin_lock(&ec->ec_lock);
- list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
- eco->eo_deleted = 1;
- spin_unlock(&ec->ec_lock);
-
- /* purge again */
- lu_site_purge(env, ed->ed_site, -1);
-
- CDEBUG(D_INFO,
- "Waiting for the reference of echo object to be dropped\n");
-
- /* Wait for the last reference to be dropped. */
- spin_lock(&ec->ec_lock);
- while (!list_empty(&ec->ec_objects)) {
- spin_unlock(&ec->ec_lock);
- CERROR("echo_client still has objects at cleanup time, wait for 1 second\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
- lu_site_purge(env, ed->ed_site, -1);
- spin_lock(&ec->ec_lock);
- }
- spin_unlock(&ec->ec_lock);
-
- LASSERT(list_empty(&ec->ec_locks));
-
- CDEBUG(D_INFO, "No object exists, exiting...\n");
-
- echo_client_cleanup(d->ld_obd);
-
- while (next)
- next = next->ld_type->ldt_ops->ldto_device_free(env, next);
-
- LASSERT(ed->ed_site == d->ld_site);
- echo_site_fini(env, ed);
- cl_device_fini(&ed->ed_cl);
- kfree(ed);
-
- cl_env_cache_purge(~0);
-
- return NULL;
-}
-
-static const struct lu_device_type_operations echo_device_type_ops = {
- .ldto_init = echo_type_init,
- .ldto_fini = echo_type_fini,
-
- .ldto_start = echo_type_start,
- .ldto_stop = echo_type_stop,
-
- .ldto_device_alloc = echo_device_alloc,
- .ldto_device_free = echo_device_free,
- .ldto_device_init = echo_device_init,
- .ldto_device_fini = echo_device_fini
-};
-
-static struct lu_device_type echo_device_type = {
- .ldt_tags = LU_DEVICE_CL,
- .ldt_name = LUSTRE_ECHO_CLIENT_NAME,
- .ldt_ops = &echo_device_type_ops,
- .ldt_ctx_tags = LCT_CL_THREAD,
-};
-
-/** @} echo_init */
-
-/** \defgroup echo_exports Exported operations
- *
- * exporting functions to echo client
- *
- * @{
- */
-
-/* Interfaces to echo client obd device */
-static struct echo_object *
-cl_echo_object_find(struct echo_device *d, const struct ost_id *oi)
-{
- struct lu_env *env;
- struct echo_thread_info *info;
- struct echo_object_conf *conf;
- struct lov_oinfo *oinfo = NULL;
- struct echo_object *eco;
- struct cl_object *obj;
- struct lu_fid *fid;
- u16 refcheck;
- int rc;
-
- LASSERTF(ostid_id(oi), DOSTID "\n", POSTID(oi));
- LASSERTF(ostid_seq(oi) == FID_SEQ_ECHO, DOSTID "\n", POSTID(oi));
-
- /* Never return an object if the obd is to be freed. */
- if (echo_dev2cl(d)->cd_lu_dev.ld_obd->obd_stopping)
- return ERR_PTR(-ENODEV);
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return (void *)env;
-
- info = echo_env_info(env);
- conf = &info->eti_conf;
- if (d->ed_next) {
- oinfo = kzalloc(sizeof(*oinfo), GFP_NOFS);
- if (!oinfo) {
- eco = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- oinfo->loi_oi = *oi;
- conf->eoc_cl.u.coc_oinfo = oinfo;
- }
-
- /*
- * If echo_object_init() is successful then ownership of oinfo
- * is transferred to the object.
- */
- conf->eoc_oinfo = &oinfo;
-
- fid = &info->eti_fid;
- rc = ostid_to_fid(fid, (struct ost_id *)oi, 0);
- if (rc != 0) {
- eco = ERR_PTR(rc);
- goto out;
- }
-
- /* In the function below, .hs_keycmp resolves to
- * lu_obj_hop_keycmp()
- */
- /* coverity[overrun-buffer-val] */
- obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
- if (IS_ERR(obj)) {
- eco = (void *)obj;
- goto out;
- }
-
- eco = cl2echo_obj(obj);
- if (eco->eo_deleted) {
- cl_object_put(env, obj);
- eco = ERR_PTR(-EAGAIN);
- }
-
-out:
- kfree(oinfo);
- cl_env_put(env, &refcheck);
- return eco;
-}
-
-static int cl_echo_object_put(struct echo_object *eco)
-{
- struct lu_env *env;
- struct cl_object *obj = echo_obj2cl(eco);
- u16 refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- /* an external function to kill an object? */
- if (eco->eo_deleted) {
- struct lu_object_header *loh = obj->co_lu.lo_header;
-
- LASSERT(&eco->eo_hdr == luh2coh(loh));
- set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
- }
-
- cl_object_put(env, obj);
- cl_env_put(env, &refcheck);
- return 0;
-}
-
-static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
- u64 start, u64 end, int mode,
- __u64 *cookie, __u32 enqflags)
-{
- struct cl_io *io;
- struct cl_lock *lck;
- struct cl_object *obj;
- struct cl_lock_descr *descr;
- struct echo_thread_info *info;
- int rc = -ENOMEM;
-
- info = echo_env_info(env);
- io = &info->eti_io;
- lck = &info->eti_lock;
- obj = echo_obj2cl(eco);
-
- memset(lck, 0, sizeof(*lck));
- descr = &lck->cll_descr;
- descr->cld_obj = obj;
- descr->cld_start = cl_index(obj, start);
- descr->cld_end = cl_index(obj, end);
- descr->cld_mode = mode == LCK_PW ? CLM_WRITE : CLM_READ;
- descr->cld_enq_flags = enqflags;
- io->ci_obj = obj;
-
- rc = cl_lock_request(env, io, lck);
- if (rc == 0) {
- struct echo_client_obd *ec = eco->eo_dev->ed_ec;
- struct echo_lock *el;
-
- el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
- spin_lock(&ec->ec_lock);
- if (list_empty(&el->el_chain)) {
- list_add(&el->el_chain, &ec->ec_locks);
- el->el_cookie = ++ec->ec_unique;
- }
- atomic_inc(&el->el_refcount);
- *cookie = el->el_cookie;
- spin_unlock(&ec->ec_lock);
- }
- return rc;
-}
-
-static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
- __u64 cookie)
-{
- struct echo_client_obd *ec = ed->ed_ec;
- struct echo_lock *ecl = NULL;
- struct list_head *el;
- int found = 0, still_used = 0;
-
- spin_lock(&ec->ec_lock);
- list_for_each(el, &ec->ec_locks) {
- ecl = list_entry(el, struct echo_lock, el_chain);
- CDEBUG(D_INFO, "ecl: %p, cookie: %#llx\n", ecl, ecl->el_cookie);
- found = (ecl->el_cookie == cookie);
- if (found) {
- if (atomic_dec_and_test(&ecl->el_refcount))
- list_del_init(&ecl->el_chain);
- else
- still_used = 1;
- break;
- }
- }
- spin_unlock(&ec->ec_lock);
-
- if (!found)
- return -ENOENT;
-
- echo_lock_release(env, ecl, still_used);
- return 0;
-}
-
-static void echo_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
-{
- struct echo_thread_info *info;
- struct cl_2queue *queue;
-
- info = echo_env_info(env);
- LASSERT(io == &info->eti_io);
-
- queue = &info->eti_queue;
- cl_page_list_add(&queue->c2_qout, page);
-}
-
-static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
- struct page **pages, int npages, int async)
-{
- struct lu_env *env;
- struct echo_thread_info *info;
- struct cl_object *obj = echo_obj2cl(eco);
- struct echo_device *ed = eco->eo_dev;
- struct cl_2queue *queue;
- struct cl_io *io;
- struct cl_page *clp;
- struct lustre_handle lh = { 0 };
- size_t page_size = cl_page_size(obj);
- u16 refcheck;
- int rc;
- int i;
-
- LASSERT((offset & ~PAGE_MASK) == 0);
- LASSERT(ed->ed_next);
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- info = echo_env_info(env);
- io = &info->eti_io;
- queue = &info->eti_queue;
-
- cl_2queue_init(queue);
-
- io->ci_ignore_layout = 1;
- rc = cl_io_init(env, io, CIT_MISC, obj);
- if (rc < 0)
- goto out;
- LASSERT(rc == 0);
-
- rc = cl_echo_enqueue0(env, eco, offset,
- offset + npages * PAGE_SIZE - 1,
- rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
- CEF_NEVER);
- if (rc < 0)
- goto error_lock;
-
- for (i = 0; i < npages; i++) {
- LASSERT(pages[i]);
- clp = cl_page_find(env, obj, cl_index(obj, offset),
- pages[i], CPT_TRANSIENT);
- if (IS_ERR(clp)) {
- rc = PTR_ERR(clp);
- break;
- }
- LASSERT(clp->cp_type == CPT_TRANSIENT);
-
- rc = cl_page_own(env, io, clp);
- if (rc) {
- LASSERT(clp->cp_state == CPS_FREEING);
- cl_page_put(env, clp);
- break;
- }
- /*
- * Add a page to the incoming page list of 2-queue.
- */
- cl_page_list_add(&queue->c2_qin, clp);
-
- /* drop the reference count for cl_page_find, so that the page
- * will be freed in cl_2queue_fini.
- */
- cl_page_put(env, clp);
- cl_page_clip(env, clp, 0, page_size);
-
- offset += page_size;
- }
-
- if (rc == 0) {
- enum cl_req_type typ = rw == READ ? CRT_READ : CRT_WRITE;
-
- async = async && (typ == CRT_WRITE);
- if (async)
- rc = cl_io_commit_async(env, io, &queue->c2_qin,
- 0, PAGE_SIZE,
- echo_commit_callback);
- else
- rc = cl_io_submit_sync(env, io, typ, queue, 0);
- CDEBUG(D_INFO, "echo_client %s write returns %d\n",
- async ? "async" : "sync", rc);
- }
-
- cl_echo_cancel0(env, ed, lh.cookie);
-error_lock:
- cl_2queue_discard(env, io, queue);
- cl_2queue_disown(env, io, queue);
- cl_2queue_fini(env, queue);
- cl_io_fini(env, io);
-out:
- cl_env_put(env, &refcheck);
- return rc;
-}
-
-/** @} echo_exports */
-
-static u64 last_object_id;
-
-static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
- struct obdo *oa)
-{
- struct echo_object *eco;
- struct echo_client_obd *ec = ed->ed_ec;
- int rc;
- int created = 0;
-
- if (!(oa->o_valid & OBD_MD_FLID) ||
- !(oa->o_valid & OBD_MD_FLGROUP) ||
- !fid_seq_is_echo(ostid_seq(&oa->o_oi))) {
- CERROR("invalid oid " DOSTID "\n", POSTID(&oa->o_oi));
- return -EINVAL;
- }
-
- if (!ostid_id(&oa->o_oi)) {
- rc = ostid_set_id(&oa->o_oi, ++last_object_id);
- if (rc)
- goto failed;
- }
-
- rc = obd_create(env, ec->ec_exp, oa);
- if (rc != 0) {
- CERROR("Cannot create objects: rc = %d\n", rc);
- goto failed;
- }
- created = 1;
-
- oa->o_valid |= OBD_MD_FLID;
-
- eco = cl_echo_object_find(ed, &oa->o_oi);
- if (IS_ERR(eco)) {
- rc = PTR_ERR(eco);
- goto failed;
- }
- cl_echo_object_put(eco);
-
- CDEBUG(D_INFO, "oa oid " DOSTID "\n", POSTID(&oa->o_oi));
-
- failed:
- if (created && rc)
- obd_destroy(env, ec->ec_exp, oa);
- if (rc)
- CERROR("create object failed with: rc = %d\n", rc);
- return rc;
-}
-
-static int echo_get_object(struct echo_object **ecop, struct echo_device *ed,
- struct obdo *oa)
-{
- struct echo_object *eco;
- int rc;
-
- if (!(oa->o_valid & OBD_MD_FLID) || !(oa->o_valid & OBD_MD_FLGROUP) ||
- !ostid_id(&oa->o_oi)) {
- CERROR("invalid oid " DOSTID "\n", POSTID(&oa->o_oi));
- return -EINVAL;
- }
-
- rc = 0;
- eco = cl_echo_object_find(ed, &oa->o_oi);
- if (!IS_ERR(eco))
- *ecop = eco;
- else
- rc = PTR_ERR(eco);
- return rc;
-}
-
-static void echo_put_object(struct echo_object *eco)
-{
- int rc;
-
- rc = cl_echo_object_put(eco);
- if (rc)
- CERROR("%s: echo client drop an object failed: rc = %d\n",
- eco->eo_dev->ed_ec->ec_exp->exp_obd->obd_name, rc);
-}
-
-static void
-echo_client_page_debug_setup(struct page *page, int rw, u64 id,
- u64 offset, u64 count)
-{
- char *addr;
- u64 stripe_off;
- u64 stripe_id;
- int delta;
-
- /* no partial pages on the client */
- LASSERT(count == PAGE_SIZE);
-
- addr = kmap(page);
-
- for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
- if (rw == OBD_BRW_WRITE) {
- stripe_off = offset + delta;
- stripe_id = id;
- } else {
- stripe_off = 0xdeadbeef00c0ffeeULL;
- stripe_id = 0xdeadbeef00c0ffeeULL;
- }
- block_debug_setup(addr + delta, OBD_ECHO_BLOCK_SIZE,
- stripe_off, stripe_id);
- }
-
- kunmap(page);
-}
-
-static int echo_client_page_debug_check(struct page *page, u64 id,
- u64 offset, u64 count)
-{
- u64 stripe_off;
- u64 stripe_id;
- char *addr;
- int delta;
- int rc;
- int rc2;
-
- /* no partial pages on the client */
- LASSERT(count == PAGE_SIZE);
-
- addr = kmap(page);
-
- for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
- stripe_off = offset + delta;
- stripe_id = id;
-
- rc2 = block_debug_check("test_brw",
- addr + delta, OBD_ECHO_BLOCK_SIZE,
- stripe_off, stripe_id);
- if (rc2 != 0) {
- CERROR("Error in echo object %#llx\n", id);
- rc = rc2;
- }
- }
-
- kunmap(page);
- return rc;
-}
-
-static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
- struct echo_object *eco, u64 offset,
- u64 count, int async)
-{
- u32 npages;
- struct brw_page *pga;
- struct brw_page *pgp;
- struct page **pages;
- u64 off;
- int i;
- int rc;
- int verify;
- gfp_t gfp_mask;
- int brw_flags = 0;
-
- verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID &&
- (oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
- (oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
-
- gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER;
-
- LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
-
- if (count <= 0 ||
- (count & (~PAGE_MASK)) != 0)
- return -EINVAL;
-
- /* XXX think again with misaligned I/O */
- npages = count >> PAGE_SHIFT;
-
- if (rw == OBD_BRW_WRITE)
- brw_flags = OBD_BRW_ASYNC;
-
- pga = kcalloc(npages, sizeof(*pga), GFP_NOFS);
- if (!pga)
- return -ENOMEM;
-
- pages = kcalloc(npages, sizeof(*pages), GFP_NOFS);
- if (!pages) {
- kfree(pga);
- return -ENOMEM;
- }
-
- for (i = 0, pgp = pga, off = offset;
- i < npages;
- i++, pgp++, off += PAGE_SIZE) {
- LASSERT(!pgp->pg); /* for cleanup */
-
- rc = -ENOMEM;
- pgp->pg = alloc_page(gfp_mask);
- if (!pgp->pg)
- goto out;
-
- pages[i] = pgp->pg;
- pgp->count = PAGE_SIZE;
- pgp->off = off;
- pgp->flag = brw_flags;
-
- if (verify)
- echo_client_page_debug_setup(pgp->pg, rw,
- ostid_id(&oa->o_oi), off,
- pgp->count);
- }
-
- /* brw mode can only be used at client */
- LASSERT(ed->ed_next);
- rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async);
-
- out:
- if (rc != 0 || rw != OBD_BRW_READ)
- verify = 0;
-
- for (i = 0, pgp = pga; i < npages; i++, pgp++) {
- if (!pgp->pg)
- continue;
-
- if (verify) {
- int vrc;
-
- vrc = echo_client_page_debug_check(pgp->pg,
- ostid_id(&oa->o_oi),
- pgp->off, pgp->count);
- if (vrc != 0 && rc == 0)
- rc = vrc;
- }
- __free_page(pgp->pg);
- }
- kfree(pga);
- kfree(pages);
- return rc;
-}
-
-static int echo_client_prep_commit(const struct lu_env *env,
- struct obd_export *exp, int rw,
- struct obdo *oa, struct echo_object *eco,
- u64 offset, u64 count,
- u64 batch, int async)
-{
- struct obd_ioobj ioo;
- struct niobuf_local *lnb;
- struct niobuf_remote rnb;
- u64 off;
- u64 npages, tot_pages;
- int i, ret = 0, brw_flags = 0;
-
- if (count <= 0 || (count & (~PAGE_MASK)) != 0)
- return -EINVAL;
-
- npages = batch >> PAGE_SHIFT;
- tot_pages = count >> PAGE_SHIFT;
-
- lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
- if (!lnb) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (rw == OBD_BRW_WRITE && async)
- brw_flags |= OBD_BRW_ASYNC;
-
- obdo_to_ioobj(oa, &ioo);
-
- off = offset;
-
- for (; tot_pages > 0; tot_pages -= npages) {
- int lpages;
-
- if (tot_pages < npages)
- npages = tot_pages;
-
- rnb.rnb_offset = off;
- rnb.rnb_len = npages * PAGE_SIZE;
- rnb.rnb_flags = brw_flags;
- ioo.ioo_bufcnt = 1;
- off += npages * PAGE_SIZE;
-
- lpages = npages;
- ret = obd_preprw(env, rw, exp, oa, 1, &ioo, &rnb, &lpages, lnb);
- if (ret != 0)
- goto out;
-
- for (i = 0; i < lpages; i++) {
- struct page *page = lnb[i].lnb_page;
-
- /* read past eof? */
- if (!page && lnb[i].lnb_rc == 0)
- continue;
-
- if (async)
- lnb[i].lnb_flags |= OBD_BRW_ASYNC;
-
- if (ostid_id(&oa->o_oi) == ECHO_PERSISTENT_OBJID ||
- (oa->o_valid & OBD_MD_FLFLAGS) == 0 ||
- (oa->o_flags & OBD_FL_DEBUG_CHECK) == 0)
- continue;
-
- if (rw == OBD_BRW_WRITE)
- echo_client_page_debug_setup(page, rw,
- ostid_id(&oa->o_oi),
- lnb[i].lnb_file_offset,
- lnb[i].lnb_len);
- else
- echo_client_page_debug_check(page,
- ostid_id(&oa->o_oi),
- lnb[i].lnb_file_offset,
- lnb[i].lnb_len);
- }
-
- ret = obd_commitrw(env, rw, exp, oa, 1, &ioo, &rnb, npages, lnb,
- ret);
- if (ret != 0)
- goto out;
-
- /* Reuse env context. */
- lu_context_exit((struct lu_context *)&env->le_ctx);
- lu_context_enter((struct lu_context *)&env->le_ctx);
- }
-
-out:
- kfree(lnb);
- return ret;
-}
-
-static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
- struct obd_export *exp,
- struct obd_ioctl_data *data)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct echo_device *ed = obd2echo_dev(obd);
- struct echo_client_obd *ec = ed->ed_ec;
- struct obdo *oa = &data->ioc_obdo1;
- struct echo_object *eco;
- int rc;
- int async = 1;
- long test_mode;
-
- LASSERT(oa->o_valid & OBD_MD_FLGROUP);
-
- rc = echo_get_object(&eco, ed, oa);
- if (rc)
- return rc;
-
- oa->o_valid &= ~OBD_MD_FLHANDLE;
-
- /* OFD/obdfilter works only via prep/commit */
- test_mode = (long)data->ioc_pbuf1;
- if (test_mode == 1)
- async = 0;
-
- if (!ed->ed_next && test_mode != 3) {
- test_mode = 3;
- data->ioc_plen1 = data->ioc_count;
- }
-
- /* Truncate batch size to maximum */
- if (data->ioc_plen1 > PTLRPC_MAX_BRW_SIZE)
- data->ioc_plen1 = PTLRPC_MAX_BRW_SIZE;
-
- switch (test_mode) {
- case 1:
- /* fall through */
- case 2:
- rc = echo_client_kbrw(ed, rw, oa, eco, data->ioc_offset,
- data->ioc_count, async);
- break;
- case 3:
- rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, eco,
- data->ioc_offset, data->ioc_count,
- data->ioc_plen1, async);
- break;
- default:
- rc = -EINVAL;
- }
- echo_put_object(eco);
- return rc;
-}
-
-static int
-echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void __user *uarg)
-{
- struct obd_device *obd = exp->exp_obd;
- struct echo_device *ed = obd2echo_dev(obd);
- struct echo_client_obd *ec = ed->ed_ec;
- struct echo_object *eco;
- struct obd_ioctl_data *data = karg;
- struct lu_env *env;
- struct obdo *oa;
- struct lu_fid fid;
- int rw = OBD_BRW_READ;
- int rc = 0;
-
- oa = &data->ioc_obdo1;
- if (!(oa->o_valid & OBD_MD_FLGROUP)) {
- oa->o_valid |= OBD_MD_FLGROUP;
- ostid_set_seq_echo(&oa->o_oi);
- }
-
- /* This FID is unpacked just for validation at this point */
- rc = ostid_to_fid(&fid, &oa->o_oi, 0);
- if (rc < 0)
- return rc;
-
- env = kzalloc(sizeof(*env), GFP_NOFS);
- if (!env)
- return -ENOMEM;
-
- rc = lu_env_init(env, LCT_DT_THREAD);
- if (rc) {
- rc = -ENOMEM;
- goto out;
- }
-
- switch (cmd) {
- case OBD_IOC_CREATE: /* may create echo object */
- if (!capable(CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- rc = echo_create_object(env, ed, oa);
- goto out;
-
- case OBD_IOC_DESTROY:
- if (!capable(CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- rc = echo_get_object(&eco, ed, oa);
- if (rc == 0) {
- rc = obd_destroy(env, ec->ec_exp, oa);
- if (rc == 0)
- eco->eo_deleted = 1;
- echo_put_object(eco);
- }
- goto out;
-
- case OBD_IOC_GETATTR:
- rc = echo_get_object(&eco, ed, oa);
- if (rc == 0) {
- rc = obd_getattr(env, ec->ec_exp, oa);
- echo_put_object(eco);
- }
- goto out;
-
- case OBD_IOC_SETATTR:
- if (!capable(CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- rc = echo_get_object(&eco, ed, oa);
- if (rc == 0) {
- rc = obd_setattr(env, ec->ec_exp, oa);
- echo_put_object(eco);
- }
- goto out;
-
- case OBD_IOC_BRW_WRITE:
- if (!capable(CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- rw = OBD_BRW_WRITE;
- /* fall through */
- case OBD_IOC_BRW_READ:
- rc = echo_client_brw_ioctl(env, rw, exp, data);
- goto out;
-
- default:
- CERROR("echo_ioctl(): unrecognised ioctl %#x\n", cmd);
- rc = -ENOTTY;
- goto out;
- }
-
-out:
- lu_env_fini(env);
- kfree(env);
-
- return rc;
-}
-
-static int echo_client_setup(const struct lu_env *env,
- struct obd_device *obddev, struct lustre_cfg *lcfg)
-{
- struct echo_client_obd *ec = &obddev->u.echo_client;
- struct obd_device *tgt;
- struct obd_uuid echo_uuid = { "ECHO_UUID" };
- struct obd_connect_data *ocd = NULL;
- int rc;
-
- if (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
- CERROR("requires a TARGET OBD name\n");
- return -EINVAL;
- }
-
- tgt = class_name2obd(lustre_cfg_string(lcfg, 1));
- if (!tgt || !tgt->obd_attached || !tgt->obd_set_up) {
- CERROR("device not attached or not set up (%s)\n",
- lustre_cfg_string(lcfg, 1));
- return -EINVAL;
- }
-
- spin_lock_init(&ec->ec_lock);
- INIT_LIST_HEAD(&ec->ec_objects);
- INIT_LIST_HEAD(&ec->ec_locks);
- ec->ec_unique = 0;
-
- ocd = kzalloc(sizeof(*ocd), GFP_NOFS);
- if (!ocd)
- return -ENOMEM;
-
- ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL |
- OBD_CONNECT_BRW_SIZE |
- OBD_CONNECT_GRANT | OBD_CONNECT_FULL20 |
- OBD_CONNECT_64BITHASH | OBD_CONNECT_LVB_TYPE |
- OBD_CONNECT_FID;
- ocd->ocd_brw_size = DT_MAX_BRW_SIZE;
- ocd->ocd_version = LUSTRE_VERSION_CODE;
- ocd->ocd_group = FID_SEQ_ECHO;
-
- rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
-
- kfree(ocd);
-
- if (rc != 0) {
- CERROR("fail to connect to device %s\n",
- lustre_cfg_string(lcfg, 1));
- return rc;
- }
-
- return rc;
-}
-
-static int echo_client_cleanup(struct obd_device *obddev)
-{
- struct echo_client_obd *ec = &obddev->u.echo_client;
- int rc;
-
- if (!list_empty(&obddev->obd_exports)) {
- CERROR("still has clients!\n");
- return -EBUSY;
- }
-
- LASSERT(atomic_read(&ec->ec_exp->exp_refcount) > 0);
- rc = obd_disconnect(ec->ec_exp);
- if (rc != 0)
- CERROR("fail to disconnect device: %d\n", rc);
-
- return rc;
-}
-
-static int echo_client_connect(const struct lu_env *env,
- struct obd_export **exp,
- struct obd_device *src, struct obd_uuid *cluuid,
- struct obd_connect_data *data, void *localdata)
-{
- int rc;
- struct lustre_handle conn = { 0 };
-
- rc = class_connect(&conn, src, cluuid);
- if (rc == 0)
- *exp = class_conn2export(&conn);
-
- return rc;
-}
-
-static int echo_client_disconnect(struct obd_export *exp)
-{
- int rc;
-
- if (!exp) {
- rc = -EINVAL;
- goto out;
- }
-
- rc = class_disconnect(exp);
- goto out;
- out:
- return rc;
-}
-
-static struct obd_ops echo_client_obd_ops = {
- .owner = THIS_MODULE,
- .iocontrol = echo_client_iocontrol,
- .connect = echo_client_connect,
- .disconnect = echo_client_disconnect
-};
-
-static int echo_client_init(void)
-{
- int rc;
-
- rc = lu_kmem_init(echo_caches);
- if (rc == 0) {
- rc = class_register_type(&echo_client_obd_ops, NULL,
- LUSTRE_ECHO_CLIENT_NAME,
- &echo_device_type);
- if (rc)
- lu_kmem_fini(echo_caches);
- }
- return rc;
-}
-
-static void echo_client_exit(void)
-{
- class_unregister_type(LUSTRE_ECHO_CLIENT_NAME);
- lu_kmem_fini(echo_caches);
-}
-
-static int __init obdecho_init(void)
-{
- LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
-
- LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
-
- return echo_client_init();
-}
-
-static void /*__exit*/ obdecho_exit(void)
-{
- echo_client_exit();
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Echo Client test driver");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(obdecho_init);
-module_exit(obdecho_exit);
-
-/** @} echo_client */
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
deleted file mode 100644
index 42faa164fabb..000000000000
--- a/drivers/staging/lustre/lustre/obdecho/echo_internal.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Whamcloud, Inc.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdecho/echo_internal.h
- */
-
-#ifndef _ECHO_INTERNAL_H
-#define _ECHO_INTERNAL_H
-
-/* The persistent object (i.e. actually stores stuff!) */
-#define ECHO_PERSISTENT_OBJID 1ULL
-#define ECHO_PERSISTENT_SIZE ((__u64)(1 << 20))
-
-/* block size to use for data verification */
-#define OBD_ECHO_BLOCK_SIZE (4 << 10)
-
-#endif
diff --git a/drivers/staging/lustre/lustre/osc/Makefile b/drivers/staging/lustre/lustre/osc/Makefile
deleted file mode 100644
index 30dec90e64e8..000000000000
--- a/drivers/staging/lustre/lustre/osc/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LUSTRE_FS) += osc.o
-osc-y := osc_request.o osc_dev.o osc_object.o \
- osc_page.o osc_lock.o osc_io.o osc_quota.o osc_cache.o lproc_osc.o
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
deleted file mode 100644
index dc76c35ae801..000000000000
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ /dev/null
@@ -1,843 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/statfs.h>
-#include <obd_cksum.h>
-#include <obd_class.h>
-#include <lprocfs_status.h>
-#include <linux/seq_file.h>
-#include "osc_internal.h"
-
-static ssize_t active_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%d\n", !dev->u.cli.cl_import->imp_deactive);
-}
-
-static ssize_t active_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
- if (val > 1)
- return -ERANGE;
-
- /* opposite senses */
- if (dev->u.cli.cl_import->imp_deactive == val)
- rc = ptlrpc_set_import_active(dev->u.cli.cl_import, val);
- else
- CDEBUG(D_CONFIG, "activate %ld: ignoring repeat request\n",
- val);
-
- return count;
-}
-LUSTRE_RW_ATTR(active);
-
-static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
-
- return sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight);
-}
-
-static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- int rc;
- unsigned long val;
- int adding, added, req_count;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val < 1 || val > OSC_MAX_RIF_MAX)
- return -ERANGE;
-
- adding = val - cli->cl_max_rpcs_in_flight;
- req_count = atomic_read(&osc_pool_req_count);
- if (adding > 0 && req_count < osc_reqpool_maxreqcount) {
- /*
- * There might be some race which will cause over-limit
- * allocation, but it is fine.
- */
- if (req_count + adding > osc_reqpool_maxreqcount)
- adding = osc_reqpool_maxreqcount - req_count;
-
- added = osc_rq_pool->prp_populate(osc_rq_pool, adding);
- atomic_add(added, &osc_pool_req_count);
- }
-
- spin_lock(&cli->cl_loi_list_lock);
- cli->cl_max_rpcs_in_flight = val;
- client_adjust_max_dirty(cli);
- spin_unlock(&cli->cl_loi_list_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_rpcs_in_flight);
-
-static ssize_t max_dirty_mb_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- long val;
- int mult;
-
- spin_lock(&cli->cl_loi_list_lock);
- val = cli->cl_dirty_max_pages;
- spin_unlock(&cli->cl_loi_list_lock);
-
- mult = 1 << (20 - PAGE_SHIFT);
- return lprocfs_read_frac_helper(buf, PAGE_SIZE, val, mult);
-}
-
-static ssize_t max_dirty_mb_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- int rc;
- unsigned long pages_number;
-
- rc = kstrtoul(buffer, 10, &pages_number);
- if (rc)
- return rc;
-
- pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
-
- if (pages_number <= 0 ||
- pages_number >= OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
- pages_number > totalram_pages / 4) /* 1/4 of RAM */
- return -ERANGE;
-
- spin_lock(&cli->cl_loi_list_lock);
- cli->cl_dirty_max_pages = pages_number;
- osc_wake_cache_waiters(cli);
- spin_unlock(&cli->cl_loi_list_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_dirty_mb);
-
-static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = m->private;
- struct client_obd *cli = &dev->u.cli;
- int shift = 20 - PAGE_SHIFT;
-
- seq_printf(m,
- "used_mb: %ld\n"
- "busy_cnt: %ld\n"
- "reclaim: %llu\n",
- (atomic_long_read(&cli->cl_lru_in_list) +
- atomic_long_read(&cli->cl_lru_busy)) >> shift,
- atomic_long_read(&cli->cl_lru_busy),
- cli->cl_lru_reclaim);
-
- return 0;
-}
-
-/* shrink the number of caching pages to a specific number */
-static ssize_t osc_cached_mb_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
- struct client_obd *cli = &dev->u.cli;
- long pages_number, rc;
- char kernbuf[128];
- int mult;
- u64 val;
-
- if (count >= sizeof(kernbuf))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
- kernbuf[count] = 0;
-
- mult = 1 << (20 - PAGE_SHIFT);
- buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
- kernbuf;
- rc = lprocfs_write_frac_u64_helper(buffer, count, &val, mult);
- if (rc)
- return rc;
-
- if (val > LONG_MAX)
- return -ERANGE;
- pages_number = (long)val;
-
- if (pages_number < 0)
- return -ERANGE;
-
- rc = atomic_long_read(&cli->cl_lru_in_list) - pages_number;
- if (rc > 0) {
- struct lu_env *env;
- u16 refcheck;
-
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- (void)osc_lru_shrink(env, cli, rc, true);
- cl_env_put(env, &refcheck);
- }
- }
-
- return count;
-}
-
-LPROC_SEQ_FOPS(osc_cached_mb);
-
-static ssize_t cur_dirty_bytes_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- int len;
-
- spin_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%lu\n", cli->cl_dirty_pages << PAGE_SHIFT);
- spin_unlock(&cli->cl_loi_list_lock);
-
- return len;
-}
-LUSTRE_RO_ATTR(cur_dirty_bytes);
-
-static ssize_t cur_grant_bytes_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- int len;
-
- spin_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%lu\n", cli->cl_avail_grant);
- spin_unlock(&cli->cl_loi_list_lock);
-
- return len;
-}
-
-static ssize_t cur_grant_bytes_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &obd->u.cli;
- int rc;
- unsigned long long val;
-
- rc = kstrtoull(buffer, 10, &val);
- if (rc)
- return rc;
-
- /* this is only for shrinking grant */
- spin_lock(&cli->cl_loi_list_lock);
- if (val >= cli->cl_avail_grant) {
- spin_unlock(&cli->cl_loi_list_lock);
- return -EINVAL;
- }
- spin_unlock(&cli->cl_loi_list_lock);
-
- if (cli->cl_import->imp_state == LUSTRE_IMP_FULL)
- rc = osc_shrink_grant_to_target(cli, val);
- if (rc)
- return rc;
- return count;
-}
-LUSTRE_RW_ATTR(cur_grant_bytes);
-
-static ssize_t cur_lost_grant_bytes_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- int len;
-
- spin_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%lu\n", cli->cl_lost_grant);
- spin_unlock(&cli->cl_loi_list_lock);
-
- return len;
-}
-LUSTRE_RO_ATTR(cur_lost_grant_bytes);
-
-static ssize_t grant_shrink_interval_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%d\n", obd->u.cli.cl_grant_shrink_interval);
-}
-
-static ssize_t grant_shrink_interval_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val <= 0)
- return -ERANGE;
-
- obd->u.cli.cl_grant_shrink_interval = val;
-
- return count;
-}
-LUSTRE_RW_ATTR(grant_shrink_interval);
-
-static ssize_t checksums_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%d\n", obd->u.cli.cl_checksum ? 1 : 0);
-}
-
-static ssize_t checksums_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- obd->u.cli.cl_checksum = (val ? 1 : 0);
-
- return count;
-}
-LUSTRE_RW_ATTR(checksums);
-
-static int osc_checksum_type_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *obd = m->private;
- int i;
-
- DECLARE_CKSUM_NAME;
-
- if (!obd)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
- if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0)
- continue;
- if (obd->u.cli.cl_cksum_type == (1 << i))
- seq_printf(m, "[%s] ", cksum_name[i]);
- else
- seq_printf(m, "%s ", cksum_name[i]);
- }
- seq_putc(m, '\n');
- return 0;
-}
-
-static ssize_t osc_checksum_type_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
- int i;
-
- DECLARE_CKSUM_NAME;
- char kernbuf[10];
-
- if (!obd)
- return 0;
-
- if (count > sizeof(kernbuf) - 1)
- return -EINVAL;
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
- if (count > 0 && kernbuf[count - 1] == '\n')
- kernbuf[count - 1] = '\0';
- else
- kernbuf[count] = '\0';
-
- for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
- if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0)
- continue;
- if (!strcmp(kernbuf, cksum_name[i])) {
- obd->u.cli.cl_cksum_type = 1 << i;
- return count;
- }
- }
- return -EINVAL;
-}
-
-LPROC_SEQ_FOPS(osc_checksum_type);
-
-static ssize_t resend_count_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%u\n", atomic_read(&obd->u.cli.cl_resends));
-}
-
-static ssize_t resend_count_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- atomic_set(&obd->u.cli.cl_resends, val);
-
- return count;
-}
-LUSTRE_RW_ATTR(resend_count);
-
-static ssize_t contention_seconds_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct osc_device *od = obd2osc_dev(obd);
-
- return sprintf(buf, "%u\n", od->od_contention_time);
-}
-
-static ssize_t contention_seconds_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct osc_device *od = obd2osc_dev(obd);
- int rc;
- int val;
-
- rc = kstrtoint(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val < 0)
- return -EINVAL;
-
- od->od_contention_time = val;
-
- return count;
-}
-LUSTRE_RW_ATTR(contention_seconds);
-
-static ssize_t lockless_truncate_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct osc_device *od = obd2osc_dev(obd);
-
- return sprintf(buf, "%u\n", od->od_lockless_truncate);
-}
-
-static ssize_t lockless_truncate_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct osc_device *od = obd2osc_dev(obd);
- int rc;
- unsigned int val;
-
- rc = kstrtouint(buffer, 10, &val);
- if (rc)
- return rc;
-
- od->od_lockless_truncate = val;
-
- return count;
-}
-LUSTRE_RW_ATTR(lockless_truncate);
-
-static ssize_t destroys_in_flight_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%u\n",
- atomic_read(&obd->u.cli.cl_destroy_in_flight));
-}
-LUSTRE_RO_ATTR(destroys_in_flight);
-
-static ssize_t max_pages_per_rpc_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
-
- return sprintf(buf, "%d\n", cli->cl_max_pages_per_rpc);
-}
-
-static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- struct obd_connect_data *ocd = &cli->cl_import->imp_connect_data;
- int chunk_mask, rc;
- unsigned long long val;
-
- rc = kstrtoull(buffer, 10, &val);
- if (rc)
- return rc;
-
- /* if the max_pages is specified in bytes, convert to pages */
- if (val >= ONE_MB_BRW_SIZE)
- val >>= PAGE_SHIFT;
-
- chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
- /* max_pages_per_rpc must be chunk aligned */
- val = (val + ~chunk_mask) & chunk_mask;
- if (!val || (ocd->ocd_brw_size &&
- val > ocd->ocd_brw_size >> PAGE_SHIFT)) {
- return -ERANGE;
- }
- spin_lock(&cli->cl_loi_list_lock);
- cli->cl_max_pages_per_rpc = val;
- client_adjust_max_dirty(cli);
- spin_unlock(&cli->cl_loi_list_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_pages_per_rpc);
-
-static ssize_t unstable_stats_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- long pages;
- int mb;
-
- pages = atomic_long_read(&cli->cl_unstable_count);
- mb = (pages * PAGE_SIZE) >> 20;
-
- return sprintf(buf, "unstable_pages: %20ld\n"
- "unstable_mb: %10d\n", pages, mb);
-}
-LUSTRE_RO_ATTR(unstable_stats);
-
-LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags);
-LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(osc, conn_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(osc, timeouts);
-LPROC_SEQ_FOPS_RO_TYPE(osc, state);
-
-LPROC_SEQ_FOPS_WR_ONLY(osc, ping);
-
-LPROC_SEQ_FOPS_RW_TYPE(osc, import);
-LPROC_SEQ_FOPS_RW_TYPE(osc, pinger_recov);
-
-static struct lprocfs_vars lprocfs_osc_obd_vars[] = {
- { "ping", &osc_ping_fops, NULL, 0222 },
- { "connect_flags", &osc_connect_flags_fops, NULL, 0 },
- /*{ "filegroups", lprocfs_rd_filegroups, NULL, 0 },*/
- { "ost_server_uuid", &osc_server_uuid_fops, NULL, 0 },
- { "ost_conn_uuid", &osc_conn_uuid_fops, NULL, 0 },
- { "osc_cached_mb", &osc_cached_mb_fops, NULL },
- { "checksum_type", &osc_checksum_type_fops, NULL },
- { "timeouts", &osc_timeouts_fops, NULL, 0 },
- { "import", &osc_import_fops, NULL },
- { "state", &osc_state_fops, NULL, 0 },
- { "pinger_recov", &osc_pinger_recov_fops, NULL },
- { NULL }
-};
-
-#define pct(a, b) (b ? a * 100 / b : 0)
-
-static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
-{
- struct timespec64 now;
- struct obd_device *dev = seq->private;
- struct client_obd *cli = &dev->u.cli;
- unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
- int i;
-
- ktime_get_real_ts64(&now);
-
- spin_lock(&cli->cl_loi_list_lock);
-
- seq_printf(seq, "snapshot_time: %llu.%9lu (secs.usecs)\n",
- (s64)now.tv_sec, (unsigned long)now.tv_nsec);
- seq_printf(seq, "read RPCs in flight: %d\n",
- cli->cl_r_in_flight);
- seq_printf(seq, "write RPCs in flight: %d\n",
- cli->cl_w_in_flight);
- seq_printf(seq, "pending write pages: %d\n",
- atomic_read(&cli->cl_pending_w_pages));
- seq_printf(seq, "pending read pages: %d\n",
- atomic_read(&cli->cl_pending_r_pages));
-
- seq_puts(seq, "\n\t\t\tread\t\t\twrite\n");
- seq_puts(seq, "pages per rpc rpcs % cum % |");
- seq_puts(seq, " rpcs % cum %\n");
-
- read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist);
- write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist);
-
- read_cum = 0;
- write_cum = 0;
- for (i = 0; i < OBD_HIST_MAX; i++) {
- unsigned long r = cli->cl_read_page_hist.oh_buckets[i];
- unsigned long w = cli->cl_write_page_hist.oh_buckets[i];
-
- read_cum += r;
- write_cum += w;
- seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
- 1 << i, r, pct(r, read_tot),
- pct(read_cum, read_tot), w,
- pct(w, write_tot),
- pct(write_cum, write_tot));
- if (read_cum == read_tot && write_cum == write_tot)
- break;
- }
-
- seq_puts(seq, "\n\t\t\tread\t\t\twrite\n");
- seq_puts(seq, "rpcs in flight rpcs % cum % |");
- seq_puts(seq, " rpcs % cum %\n");
-
- read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist);
- write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist);
-
- read_cum = 0;
- write_cum = 0;
- for (i = 0; i < OBD_HIST_MAX; i++) {
- unsigned long r = cli->cl_read_rpc_hist.oh_buckets[i];
- unsigned long w = cli->cl_write_rpc_hist.oh_buckets[i];
-
- read_cum += r;
- write_cum += w;
- seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
- i, r, pct(r, read_tot),
- pct(read_cum, read_tot), w,
- pct(w, write_tot),
- pct(write_cum, write_tot));
- if (read_cum == read_tot && write_cum == write_tot)
- break;
- }
-
- seq_puts(seq, "\n\t\t\tread\t\t\twrite\n");
- seq_puts(seq, "offset rpcs % cum % |");
- seq_puts(seq, " rpcs % cum %\n");
-
- read_tot = lprocfs_oh_sum(&cli->cl_read_offset_hist);
- write_tot = lprocfs_oh_sum(&cli->cl_write_offset_hist);
-
- read_cum = 0;
- write_cum = 0;
- for (i = 0; i < OBD_HIST_MAX; i++) {
- unsigned long r = cli->cl_read_offset_hist.oh_buckets[i];
- unsigned long w = cli->cl_write_offset_hist.oh_buckets[i];
-
- read_cum += r;
- write_cum += w;
- seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
- (i == 0) ? 0 : 1 << (i - 1),
- r, pct(r, read_tot), pct(read_cum, read_tot),
- w, pct(w, write_tot), pct(write_cum, write_tot));
- if (read_cum == read_tot && write_cum == write_tot)
- break;
- }
-
- spin_unlock(&cli->cl_loi_list_lock);
-
- return 0;
-}
-
-#undef pct
-
-static ssize_t osc_rpc_stats_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct obd_device *dev = seq->private;
- struct client_obd *cli = &dev->u.cli;
-
- lprocfs_oh_clear(&cli->cl_read_rpc_hist);
- lprocfs_oh_clear(&cli->cl_write_rpc_hist);
- lprocfs_oh_clear(&cli->cl_read_page_hist);
- lprocfs_oh_clear(&cli->cl_write_page_hist);
- lprocfs_oh_clear(&cli->cl_read_offset_hist);
- lprocfs_oh_clear(&cli->cl_write_offset_hist);
-
- return len;
-}
-
-LPROC_SEQ_FOPS(osc_rpc_stats);
-
-static int osc_stats_seq_show(struct seq_file *seq, void *v)
-{
- struct timespec64 now;
- struct obd_device *dev = seq->private;
- struct osc_stats *stats = &obd2osc_dev(dev)->od_stats;
-
- ktime_get_real_ts64(&now);
-
- seq_printf(seq, "snapshot_time: %llu.%9lu (secs.usecs)\n",
- (s64)now.tv_sec, (unsigned long)now.tv_nsec);
- seq_printf(seq, "lockless_write_bytes\t\t%llu\n",
- stats->os_lockless_writes);
- seq_printf(seq, "lockless_read_bytes\t\t%llu\n",
- stats->os_lockless_reads);
- seq_printf(seq, "lockless_truncate\t\t%llu\n",
- stats->os_lockless_truncates);
- return 0;
-}
-
-static ssize_t osc_stats_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct obd_device *dev = seq->private;
- struct osc_stats *stats = &obd2osc_dev(dev)->od_stats;
-
- memset(stats, 0, sizeof(*stats));
- return len;
-}
-
-LPROC_SEQ_FOPS(osc_stats);
-
-int lproc_osc_attach_seqstat(struct obd_device *dev)
-{
- int rc;
-
- rc = ldebugfs_seq_create(dev->obd_debugfs_entry, "osc_stats", 0644,
- &osc_stats_fops, dev);
- if (rc == 0)
- rc = ldebugfs_obd_seq_create(dev, "rpc_stats", 0644,
- &osc_rpc_stats_fops, dev);
-
- return rc;
-}
-
-static struct attribute *osc_attrs[] = {
- &lustre_attr_active.attr,
- &lustre_attr_checksums.attr,
- &lustre_attr_contention_seconds.attr,
- &lustre_attr_cur_dirty_bytes.attr,
- &lustre_attr_cur_grant_bytes.attr,
- &lustre_attr_cur_lost_grant_bytes.attr,
- &lustre_attr_destroys_in_flight.attr,
- &lustre_attr_grant_shrink_interval.attr,
- &lustre_attr_lockless_truncate.attr,
- &lustre_attr_max_dirty_mb.attr,
- &lustre_attr_max_pages_per_rpc.attr,
- &lustre_attr_max_rpcs_in_flight.attr,
- &lustre_attr_resend_count.attr,
- &lustre_attr_unstable_stats.attr,
- NULL,
-};
-
-static const struct attribute_group osc_attr_group = {
- .attrs = osc_attrs,
-};
-
-void lprocfs_osc_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->sysfs_vars = &osc_attr_group;
- lvars->obd_vars = lprocfs_osc_obd_vars;
-}
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
deleted file mode 100644
index 459503727ce3..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ /dev/null
@@ -1,3306 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- *
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * osc cache management.
- *
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include "osc_cl_internal.h"
-#include "osc_internal.h"
-
-static int extent_debug; /* set it to be true for more debug */
-
-static void osc_update_pending(struct osc_object *obj, int cmd, int delta);
-static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
- enum osc_extent_state state);
-static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
- struct osc_async_page *oap, int sent, int rc);
-static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
- int cmd);
-static int osc_refresh_count(const struct lu_env *env,
- struct osc_async_page *oap, int cmd);
-static int osc_io_unplug_async(const struct lu_env *env,
- struct client_obd *cli, struct osc_object *osc);
-static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
- unsigned int lost_grant);
-
-static void osc_extent_tree_dump0(int level, struct osc_object *obj,
- const char *func, int line);
-#define osc_extent_tree_dump(lvl, obj) \
- osc_extent_tree_dump0(lvl, obj, __func__, __LINE__)
-
-/** \addtogroup osc
- * @{
- */
-
-/* ------------------ osc extent ------------------ */
-static inline char *ext_flags(struct osc_extent *ext, char *flags)
-{
- char *buf = flags;
- *buf++ = ext->oe_rw ? 'r' : 'w';
- if (ext->oe_intree)
- *buf++ = 'i';
- if (ext->oe_sync)
- *buf++ = 'S';
- if (ext->oe_srvlock)
- *buf++ = 's';
- if (ext->oe_hp)
- *buf++ = 'h';
- if (ext->oe_urgent)
- *buf++ = 'u';
- if (ext->oe_memalloc)
- *buf++ = 'm';
- if (ext->oe_trunc_pending)
- *buf++ = 't';
- if (ext->oe_fsync_wait)
- *buf++ = 'Y';
- *buf = 0;
- return flags;
-}
-
-static inline char list_empty_marker(struct list_head *list)
-{
- return list_empty(list) ? '-' : '+';
-}
-
-#define EXTSTR "[%lu -> %lu/%lu]"
-#define EXTPARA(ext) (ext)->oe_start, (ext)->oe_end, (ext)->oe_max_end
-static const char *oes_strings[] = {
- "inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL };
-
-#define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \
- struct osc_extent *__ext = (extent); \
- char __buf[16]; \
- \
- CDEBUG(lvl, \
- "extent %p@{" EXTSTR ", " \
- "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \
- /* ----- extent part 0 ----- */ \
- __ext, EXTPARA(__ext), \
- /* ----- part 1 ----- */ \
- atomic_read(&__ext->oe_refc), \
- atomic_read(&__ext->oe_users), \
- list_empty_marker(&__ext->oe_link), \
- oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \
- __ext->oe_obj, \
- /* ----- part 2 ----- */ \
- __ext->oe_grants, __ext->oe_nr_pages, \
- list_empty_marker(&__ext->oe_pages), \
- waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
- __ext->oe_dlmlock, __ext->oe_mppr, __ext->oe_owner, \
- /* ----- part 4 ----- */ \
- ## __VA_ARGS__); \
- if (lvl == D_ERROR && __ext->oe_dlmlock) \
- LDLM_ERROR(__ext->oe_dlmlock, "extent: %p", __ext); \
- else \
- LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p", __ext); \
-} while (0)
-
-#undef EASSERTF
-#define EASSERTF(expr, ext, fmt, args...) do { \
- if (!(expr)) { \
- OSC_EXTENT_DUMP(D_ERROR, (ext), fmt, ##args); \
- osc_extent_tree_dump(D_ERROR, (ext)->oe_obj); \
- LASSERT(expr); \
- } \
-} while (0)
-
-#undef EASSERT
-#define EASSERT(expr, ext) EASSERTF(expr, ext, "\n")
-
-static inline struct osc_extent *rb_extent(struct rb_node *n)
-{
- return rb_entry_safe(n, struct osc_extent, oe_node);
-}
-
-static inline struct osc_extent *next_extent(struct osc_extent *ext)
-{
- if (!ext)
- return NULL;
-
- LASSERT(ext->oe_intree);
- return rb_extent(rb_next(&ext->oe_node));
-}
-
-static inline struct osc_extent *prev_extent(struct osc_extent *ext)
-{
- if (!ext)
- return NULL;
-
- LASSERT(ext->oe_intree);
- return rb_extent(rb_prev(&ext->oe_node));
-}
-
-static inline struct osc_extent *first_extent(struct osc_object *obj)
-{
- return rb_extent(rb_first(&obj->oo_root));
-}
-
-/* object must be locked by caller. */
-static int osc_extent_sanity_check0(struct osc_extent *ext,
- const char *func, const int line)
-{
- struct osc_object *obj = ext->oe_obj;
- struct osc_async_page *oap;
- size_t page_count;
- int rc = 0;
-
- if (!osc_object_is_locked(obj)) {
- rc = 9;
- goto out;
- }
-
- if (ext->oe_state >= OES_STATE_MAX) {
- rc = 10;
- goto out;
- }
-
- if (atomic_read(&ext->oe_refc) <= 0) {
- rc = 20;
- goto out;
- }
-
- if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users)) {
- rc = 30;
- goto out;
- }
-
- switch (ext->oe_state) {
- case OES_INV:
- if (ext->oe_nr_pages > 0 || !list_empty(&ext->oe_pages))
- rc = 35;
- else
- rc = 0;
- goto out;
- case OES_ACTIVE:
- if (atomic_read(&ext->oe_users) == 0) {
- rc = 40;
- goto out;
- }
- if (ext->oe_hp) {
- rc = 50;
- goto out;
- }
- if (ext->oe_fsync_wait && !ext->oe_urgent) {
- rc = 55;
- goto out;
- }
- break;
- case OES_CACHE:
- if (ext->oe_grants == 0) {
- rc = 60;
- goto out;
- }
- if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp) {
- rc = 65;
- goto out;
- }
- /* fall through */
- default:
- if (atomic_read(&ext->oe_users) > 0) {
- rc = 70;
- goto out;
- }
- }
-
- if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start) {
- rc = 80;
- goto out;
- }
-
- if (ext->oe_sync && ext->oe_grants > 0) {
- rc = 90;
- goto out;
- }
-
- if (ext->oe_dlmlock && !ldlm_is_failed(ext->oe_dlmlock)) {
- struct ldlm_extent *extent;
-
- extent = &ext->oe_dlmlock->l_policy_data.l_extent;
- if (!(extent->start <= cl_offset(osc2cl(obj), ext->oe_start) &&
- extent->end >= cl_offset(osc2cl(obj), ext->oe_max_end))) {
- rc = 100;
- goto out;
- }
-
- if (!(ext->oe_dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))) {
- rc = 102;
- goto out;
- }
- }
-
- if (ext->oe_nr_pages > ext->oe_mppr) {
- rc = 105;
- goto out;
- }
-
- /* Do not verify page list if extent is in RPC. This is because an
- * in-RPC extent is supposed to be exclusively accessible w/o lock.
- */
- if (ext->oe_state > OES_CACHE) {
- rc = 0;
- goto out;
- }
-
- if (!extent_debug) {
- rc = 0;
- goto out;
- }
-
- page_count = 0;
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- pgoff_t index = osc_index(oap2osc(oap));
- ++page_count;
- if (index > ext->oe_end || index < ext->oe_start) {
- rc = 110;
- goto out;
- }
- }
- if (page_count != ext->oe_nr_pages) {
- rc = 120;
- goto out;
- }
-
-out:
- if (rc != 0)
- OSC_EXTENT_DUMP(D_ERROR, ext,
- "%s:%d sanity check %p failed with rc = %d\n",
- func, line, ext, rc);
- return rc;
-}
-
-#define sanity_check_nolock(ext) \
- osc_extent_sanity_check0(ext, __func__, __LINE__)
-
-#define sanity_check(ext) ({ \
- int __res; \
- osc_object_lock((ext)->oe_obj); \
- __res = sanity_check_nolock(ext); \
- osc_object_unlock((ext)->oe_obj); \
- __res; \
-})
-
-/**
- * sanity check - to make sure there is no overlapped extent in the tree.
- */
-static int osc_extent_is_overlapped(struct osc_object *obj,
- struct osc_extent *ext)
-{
- struct osc_extent *tmp;
-
- LASSERT(osc_object_is_locked(obj));
-
- if (!extent_debug)
- return 0;
-
- for (tmp = first_extent(obj); tmp; tmp = next_extent(tmp)) {
- if (tmp == ext)
- continue;
- if (tmp->oe_end >= ext->oe_start &&
- tmp->oe_start <= ext->oe_end)
- return 1;
- }
- return 0;
-}
-
-static void osc_extent_state_set(struct osc_extent *ext, int state)
-{
- LASSERT(osc_object_is_locked(ext->oe_obj));
- LASSERT(state >= OES_INV && state < OES_STATE_MAX);
-
- /* Never try to sanity check a state changing extent :-) */
- /* LASSERT(sanity_check_nolock(ext) == 0); */
-
- /* TODO: validate the state machine */
- ext->oe_state = state;
- wake_up_all(&ext->oe_waitq);
-}
-
-static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
-{
- struct osc_extent *ext;
-
- ext = kmem_cache_zalloc(osc_extent_kmem, GFP_NOFS);
- if (!ext)
- return NULL;
-
- RB_CLEAR_NODE(&ext->oe_node);
- ext->oe_obj = obj;
- cl_object_get(osc2cl(obj));
- atomic_set(&ext->oe_refc, 1);
- atomic_set(&ext->oe_users, 0);
- INIT_LIST_HEAD(&ext->oe_link);
- ext->oe_state = OES_INV;
- INIT_LIST_HEAD(&ext->oe_pages);
- init_waitqueue_head(&ext->oe_waitq);
- ext->oe_dlmlock = NULL;
-
- return ext;
-}
-
-static void osc_extent_free(struct osc_extent *ext)
-{
- kmem_cache_free(osc_extent_kmem, ext);
-}
-
-static struct osc_extent *osc_extent_get(struct osc_extent *ext)
-{
- LASSERT(atomic_read(&ext->oe_refc) >= 0);
- atomic_inc(&ext->oe_refc);
- return ext;
-}
-
-static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
-{
- LASSERT(atomic_read(&ext->oe_refc) > 0);
- if (atomic_dec_and_test(&ext->oe_refc)) {
- LASSERT(list_empty(&ext->oe_link));
- LASSERT(atomic_read(&ext->oe_users) == 0);
- LASSERT(ext->oe_state == OES_INV);
- LASSERT(!ext->oe_intree);
-
- if (ext->oe_dlmlock) {
- lu_ref_add(&ext->oe_dlmlock->l_reference,
- "osc_extent", ext);
- LDLM_LOCK_PUT(ext->oe_dlmlock);
- ext->oe_dlmlock = NULL;
- }
- cl_object_put(env, osc2cl(ext->oe_obj));
- osc_extent_free(ext);
- }
-}
-
-/**
- * osc_extent_put_trust() is a special version of osc_extent_put() when
- * it's known that the caller is not the last user. This is to address the
- * problem of lacking of lu_env ;-).
- */
-static void osc_extent_put_trust(struct osc_extent *ext)
-{
- LASSERT(atomic_read(&ext->oe_refc) > 1);
- LASSERT(osc_object_is_locked(ext->oe_obj));
- atomic_dec(&ext->oe_refc);
-}
-
-/**
- * Return the extent which includes pgoff @index, or return the greatest
- * previous extent in the tree.
- */
-static struct osc_extent *osc_extent_search(struct osc_object *obj,
- pgoff_t index)
-{
- struct rb_node *n = obj->oo_root.rb_node;
- struct osc_extent *tmp, *p = NULL;
-
- LASSERT(osc_object_is_locked(obj));
- while (n) {
- tmp = rb_extent(n);
- if (index < tmp->oe_start) {
- n = n->rb_left;
- } else if (index > tmp->oe_end) {
- p = rb_extent(n);
- n = n->rb_right;
- } else {
- return tmp;
- }
- }
- return p;
-}
-
-/*
- * Return the extent covering @index, otherwise return NULL.
- * caller must have held object lock.
- */
-static struct osc_extent *osc_extent_lookup(struct osc_object *obj,
- pgoff_t index)
-{
- struct osc_extent *ext;
-
- ext = osc_extent_search(obj, index);
- if (ext && ext->oe_start <= index && index <= ext->oe_end)
- return osc_extent_get(ext);
- return NULL;
-}
-
-/* caller must have held object lock. */
-static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext)
-{
- struct rb_node **n = &obj->oo_root.rb_node;
- struct rb_node *parent = NULL;
- struct osc_extent *tmp;
-
- LASSERT(ext->oe_intree == 0);
- LASSERT(ext->oe_obj == obj);
- LASSERT(osc_object_is_locked(obj));
- while (*n) {
- tmp = rb_extent(*n);
- parent = *n;
-
- if (ext->oe_end < tmp->oe_start)
- n = &(*n)->rb_left;
- else if (ext->oe_start > tmp->oe_end)
- n = &(*n)->rb_right;
- else
- EASSERTF(0, tmp, EXTSTR "\n", EXTPARA(ext));
- }
- rb_link_node(&ext->oe_node, parent, n);
- rb_insert_color(&ext->oe_node, &obj->oo_root);
- osc_extent_get(ext);
- ext->oe_intree = 1;
-}
-
-/* caller must have held object lock. */
-static void osc_extent_erase(struct osc_extent *ext)
-{
- struct osc_object *obj = ext->oe_obj;
-
- LASSERT(osc_object_is_locked(obj));
- if (ext->oe_intree) {
- rb_erase(&ext->oe_node, &obj->oo_root);
- ext->oe_intree = 0;
- /* rbtree held a refcount */
- osc_extent_put_trust(ext);
- }
-}
-
-static struct osc_extent *osc_extent_hold(struct osc_extent *ext)
-{
- struct osc_object *obj = ext->oe_obj;
-
- LASSERT(osc_object_is_locked(obj));
- LASSERT(ext->oe_state == OES_ACTIVE || ext->oe_state == OES_CACHE);
- if (ext->oe_state == OES_CACHE) {
- osc_extent_state_set(ext, OES_ACTIVE);
- osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages);
- }
- atomic_inc(&ext->oe_users);
- list_del_init(&ext->oe_link);
- return osc_extent_get(ext);
-}
-
-static void __osc_extent_remove(struct osc_extent *ext)
-{
- LASSERT(osc_object_is_locked(ext->oe_obj));
- LASSERT(list_empty(&ext->oe_pages));
- osc_extent_erase(ext);
- list_del_init(&ext->oe_link);
- osc_extent_state_set(ext, OES_INV);
- OSC_EXTENT_DUMP(D_CACHE, ext, "destroyed.\n");
-}
-
-static void osc_extent_remove(struct osc_extent *ext)
-{
- struct osc_object *obj = ext->oe_obj;
-
- osc_object_lock(obj);
- __osc_extent_remove(ext);
- osc_object_unlock(obj);
-}
-
-/**
- * This function is used to merge extents to get better performance. It checks
- * if @cur and @victim are contiguous at chunk level.
- */
-static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
- struct osc_extent *victim)
-{
- struct osc_object *obj = cur->oe_obj;
- pgoff_t chunk_start;
- pgoff_t chunk_end;
- int ppc_bits;
-
- LASSERT(cur->oe_state == OES_CACHE);
- LASSERT(osc_object_is_locked(obj));
- if (!victim)
- return -EINVAL;
-
- if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait)
- return -EBUSY;
-
- if (cur->oe_max_end != victim->oe_max_end)
- return -ERANGE;
-
- LASSERT(cur->oe_dlmlock == victim->oe_dlmlock);
- ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
- chunk_start = cur->oe_start >> ppc_bits;
- chunk_end = cur->oe_end >> ppc_bits;
- if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
- chunk_end + 1 != victim->oe_start >> ppc_bits)
- return -ERANGE;
-
- OSC_EXTENT_DUMP(D_CACHE, victim, "will be merged by %p.\n", cur);
-
- cur->oe_start = min(cur->oe_start, victim->oe_start);
- cur->oe_end = max(cur->oe_end, victim->oe_end);
- cur->oe_grants += victim->oe_grants;
- cur->oe_nr_pages += victim->oe_nr_pages;
- /* only the following bits are needed to merge */
- cur->oe_urgent |= victim->oe_urgent;
- cur->oe_memalloc |= victim->oe_memalloc;
- list_splice_init(&victim->oe_pages, &cur->oe_pages);
- list_del_init(&victim->oe_link);
- victim->oe_nr_pages = 0;
-
- osc_extent_get(victim);
- __osc_extent_remove(victim);
- osc_extent_put(env, victim);
-
- OSC_EXTENT_DUMP(D_CACHE, cur, "after merging %p.\n", victim);
- return 0;
-}
-
-/**
- * Drop user count of osc_extent, and unplug IO asynchronously.
- */
-void osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
-{
- struct osc_object *obj = ext->oe_obj;
-
- LASSERT(atomic_read(&ext->oe_users) > 0);
- LASSERT(sanity_check(ext) == 0);
- LASSERT(ext->oe_grants > 0);
-
- if (atomic_dec_and_lock(&ext->oe_users, &obj->oo_lock)) {
- LASSERT(ext->oe_state == OES_ACTIVE);
- if (ext->oe_trunc_pending) {
- /* a truncate process is waiting for this extent.
- * This may happen due to a race, check
- * osc_cache_truncate_start().
- */
- osc_extent_state_set(ext, OES_TRUNC);
- ext->oe_trunc_pending = 0;
- } else {
- osc_extent_state_set(ext, OES_CACHE);
- osc_update_pending(obj, OBD_BRW_WRITE,
- ext->oe_nr_pages);
-
- /* try to merge the previous and next extent. */
- osc_extent_merge(env, ext, prev_extent(ext));
- osc_extent_merge(env, ext, next_extent(ext));
-
- if (ext->oe_urgent)
- list_move_tail(&ext->oe_link,
- &obj->oo_urgent_exts);
- }
- osc_object_unlock(obj);
-
- osc_io_unplug_async(env, osc_cli(obj), obj);
- }
- osc_extent_put(env, ext);
-}
-
-static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
-{
- return !(ex1->oe_end < ex2->oe_start || ex2->oe_end < ex1->oe_start);
-}
-
-/**
- * Find or create an extent which includes @index, core function to manage
- * extent tree.
- */
-static struct osc_extent *osc_extent_find(const struct lu_env *env,
- struct osc_object *obj, pgoff_t index,
- unsigned int *grants)
-{
- struct client_obd *cli = osc_cli(obj);
- struct osc_lock *olck;
- struct cl_lock_descr *descr;
- struct osc_extent *cur;
- struct osc_extent *ext;
- struct osc_extent *conflict = NULL;
- struct osc_extent *found = NULL;
- pgoff_t chunk;
- pgoff_t max_end;
- unsigned int max_pages; /* max_pages_per_rpc */
- unsigned int chunksize;
- int ppc_bits; /* pages per chunk bits */
- pgoff_t chunk_mask;
- int rc;
-
- cur = osc_extent_alloc(obj);
- if (!cur)
- return ERR_PTR(-ENOMEM);
-
- olck = osc_env_io(env)->oi_write_osclock;
- LASSERTF(olck, "page %lu is not covered by lock\n", index);
- LASSERT(olck->ols_state == OLS_GRANTED);
-
- descr = &olck->ols_cl.cls_lock->cll_descr;
- LASSERT(descr->cld_mode >= CLM_WRITE);
-
- LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
- ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
- chunk_mask = ~((1 << ppc_bits) - 1);
- chunksize = 1 << cli->cl_chunkbits;
- chunk = index >> ppc_bits;
-
- /* align end to rpc edge, rpc size may not be a power 2 integer. */
- max_pages = cli->cl_max_pages_per_rpc;
- LASSERT((max_pages & ~chunk_mask) == 0);
- max_end = index - (index % max_pages) + max_pages - 1;
- max_end = min_t(pgoff_t, max_end, descr->cld_end);
-
- /* initialize new extent by parameters so far */
- cur->oe_max_end = max_end;
- cur->oe_start = index & chunk_mask;
- cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1;
- if (cur->oe_start < descr->cld_start)
- cur->oe_start = descr->cld_start;
- if (cur->oe_end > max_end)
- cur->oe_end = max_end;
- cur->oe_grants = 0;
- cur->oe_mppr = max_pages;
- if (olck->ols_dlmlock) {
- LASSERT(olck->ols_hold);
- cur->oe_dlmlock = LDLM_LOCK_GET(olck->ols_dlmlock);
- lu_ref_add(&olck->ols_dlmlock->l_reference, "osc_extent", cur);
- }
-
- /* grants has been allocated by caller */
- LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
- "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax);
- LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR "\n",
- EXTPARA(cur));
-
-restart:
- osc_object_lock(obj);
- ext = osc_extent_search(obj, cur->oe_start);
- if (!ext)
- ext = first_extent(obj);
- while (ext) {
- pgoff_t ext_chk_start = ext->oe_start >> ppc_bits;
- pgoff_t ext_chk_end = ext->oe_end >> ppc_bits;
-
- LASSERT(sanity_check_nolock(ext) == 0);
- if (chunk > ext_chk_end + 1)
- break;
-
- /* if covering by different locks, no chance to match */
- if (olck->ols_dlmlock != ext->oe_dlmlock) {
- EASSERTF(!overlapped(ext, cur), ext,
- EXTSTR "\n", EXTPARA(cur));
-
- ext = next_extent(ext);
- continue;
- }
-
- /* discontiguous chunks? */
- if (chunk + 1 < ext_chk_start) {
- ext = next_extent(ext);
- continue;
- }
-
- /* ok, from now on, ext and cur have these attrs:
- * 1. covered by the same lock
- * 2. contiguous at chunk level or overlapping.
- */
-
- if (overlapped(ext, cur)) {
- /* cur is the minimum unit, so overlapping means
- * full contain.
- */
- EASSERTF((ext->oe_start <= cur->oe_start &&
- ext->oe_end >= cur->oe_end),
- ext, EXTSTR "\n", EXTPARA(cur));
-
- if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) {
- /* for simplicity, we wait for this extent to
- * finish before going forward.
- */
- conflict = osc_extent_get(ext);
- break;
- }
-
- found = osc_extent_hold(ext);
- break;
- }
-
- /* non-overlapped extent */
- if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) {
- /* we can't do anything for a non OES_CACHE extent, or
- * if there is someone waiting for this extent to be
- * flushed, try next one.
- */
- ext = next_extent(ext);
- continue;
- }
-
- /* check if they belong to the same rpc slot before trying to
- * merge. the extents are not overlapped and contiguous at
- * chunk level to get here.
- */
- if (ext->oe_max_end != max_end) {
- /* if they don't belong to the same RPC slot or
- * max_pages_per_rpc has ever changed, do not merge.
- */
- ext = next_extent(ext);
- continue;
- }
-
- /* it's required that an extent must be contiguous at chunk
- * level so that we know the whole extent is covered by grant
- * (the pages in the extent are NOT required to be contiguous).
- * Otherwise, it will be too much difficult to know which
- * chunks have grants allocated.
- */
-
- /* try to do front merge - extend ext's start */
- if (chunk + 1 == ext_chk_start) {
- /* ext must be chunk size aligned */
- EASSERT((ext->oe_start & ~chunk_mask) == 0, ext);
-
- /* pull ext's start back to cover cur */
- ext->oe_start = cur->oe_start;
- ext->oe_grants += chunksize;
- LASSERT(*grants >= chunksize);
- *grants -= chunksize;
-
- found = osc_extent_hold(ext);
- } else if (chunk == ext_chk_end + 1) {
- /* rear merge */
- ext->oe_end = cur->oe_end;
- ext->oe_grants += chunksize;
- LASSERT(*grants >= chunksize);
- *grants -= chunksize;
-
- /* try to merge with the next one because we just fill
- * in a gap
- */
- if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
- /* we can save extent tax from next extent */
- *grants += cli->cl_extent_tax;
-
- found = osc_extent_hold(ext);
- }
- if (found)
- break;
-
- ext = next_extent(ext);
- }
-
- osc_extent_tree_dump(D_CACHE, obj);
- if (found) {
- LASSERT(!conflict);
- if (!IS_ERR(found)) {
- LASSERT(found->oe_dlmlock == cur->oe_dlmlock);
- OSC_EXTENT_DUMP(D_CACHE, found,
- "found caching ext for %lu.\n", index);
- }
- } else if (!conflict) {
- /* create a new extent */
- EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur);
- cur->oe_grants = chunksize + cli->cl_extent_tax;
- LASSERT(*grants >= cur->oe_grants);
- *grants -= cur->oe_grants;
-
- cur->oe_state = OES_CACHE;
- found = osc_extent_hold(cur);
- osc_extent_insert(obj, cur);
- OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n",
- index, descr->cld_end);
- }
- osc_object_unlock(obj);
-
- if (conflict) {
- LASSERT(!found);
-
- /* waiting for IO to finish. Please notice that it's impossible
- * to be an OES_TRUNC extent.
- */
- rc = osc_extent_wait(env, conflict, OES_INV);
- osc_extent_put(env, conflict);
- conflict = NULL;
- if (rc < 0) {
- found = ERR_PTR(rc);
- goto out;
- }
-
- goto restart;
- }
-
-out:
- osc_extent_put(env, cur);
- return found;
-}
-
-/**
- * Called when IO is finished to an extent.
- */
-int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
- int sent, int rc)
-{
- struct client_obd *cli = osc_cli(ext->oe_obj);
- struct osc_async_page *oap;
- struct osc_async_page *tmp;
- int nr_pages = ext->oe_nr_pages;
- int lost_grant = 0;
- int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
- __u64 last_off = 0;
- int last_count = -1;
-
- OSC_EXTENT_DUMP(D_CACHE, ext, "extent finished.\n");
-
- ext->oe_rc = rc ?: ext->oe_nr_pages;
- EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
-
- osc_lru_add_batch(cli, &ext->oe_pages);
- list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
- list_del_init(&oap->oap_rpc_item);
- list_del_init(&oap->oap_pending_item);
- if (last_off <= oap->oap_obj_off) {
- last_off = oap->oap_obj_off;
- last_count = oap->oap_count;
- }
-
- --ext->oe_nr_pages;
- osc_ap_completion(env, cli, oap, sent, rc);
- }
- EASSERT(ext->oe_nr_pages == 0, ext);
-
- if (!sent) {
- lost_grant = ext->oe_grants;
- } else if (blocksize < PAGE_SIZE &&
- last_count != PAGE_SIZE) {
- /* For short writes we shouldn't count parts of pages that
- * span a whole chunk on the OST side, or our accounting goes
- * wrong. Should match the code in filter_grant_check.
- */
- int offset = last_off & ~PAGE_MASK;
- int count = last_count + (offset & (blocksize - 1));
- int end = (offset + last_count) & (blocksize - 1);
-
- if (end)
- count += blocksize - end;
-
- lost_grant = PAGE_SIZE - count;
- }
- if (ext->oe_grants > 0)
- osc_free_grant(cli, nr_pages, lost_grant);
-
- osc_extent_remove(ext);
- /* put the refcount for RPC */
- osc_extent_put(env, ext);
- return 0;
-}
-
-static int extent_wait_cb(struct osc_extent *ext, enum osc_extent_state state)
-{
- int ret;
-
- osc_object_lock(ext->oe_obj);
- ret = ext->oe_state == state;
- osc_object_unlock(ext->oe_obj);
-
- return ret;
-}
-
-/**
- * Wait for the extent's state to become @state.
- */
-static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
- enum osc_extent_state state)
-{
- struct osc_object *obj = ext->oe_obj;
- int rc = 0;
-
- osc_object_lock(obj);
- LASSERT(sanity_check_nolock(ext) == 0);
- /* `Kick' this extent only if the caller is waiting for it to be
- * written out.
- */
- if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp &&
- !ext->oe_trunc_pending) {
- if (ext->oe_state == OES_ACTIVE) {
- ext->oe_urgent = 1;
- } else if (ext->oe_state == OES_CACHE) {
- ext->oe_urgent = 1;
- osc_extent_hold(ext);
- rc = 1;
- }
- }
- osc_object_unlock(obj);
- if (rc == 1)
- osc_extent_release(env, ext);
-
- /* wait for the extent until its state becomes @state */
- rc = wait_event_idle_timeout(ext->oe_waitq,
- extent_wait_cb(ext, state), 600 * HZ);
- if (rc == 0) {
- OSC_EXTENT_DUMP(D_ERROR, ext,
- "%s: wait ext to %u timedout, recovery in progress?\n",
- cli_name(osc_cli(obj)), state);
-
- wait_event_idle(ext->oe_waitq, extent_wait_cb(ext, state));
- }
- if (ext->oe_rc < 0)
- rc = ext->oe_rc;
- else
- rc = 0;
- return rc;
-}
-
-/**
- * Discard pages with index greater than @size. If @ext is overlapped with
- * @size, then partial truncate happens.
- */
-static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
- bool partial)
-{
- struct lu_env *env;
- struct cl_io *io;
- struct osc_object *obj = ext->oe_obj;
- struct client_obd *cli = osc_cli(obj);
- struct osc_async_page *oap;
- struct osc_async_page *tmp;
- int pages_in_chunk = 0;
- int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
- __u64 trunc_chunk = trunc_index >> ppc_bits;
- int grants = 0;
- int nr_pages = 0;
- int rc = 0;
- u16 refcheck;
-
- LASSERT(sanity_check(ext) == 0);
- EASSERT(ext->oe_state == OES_TRUNC, ext);
- EASSERT(!ext->oe_urgent, ext);
-
- /* Request new lu_env.
- * We can't use that env from osc_cache_truncate_start() because
- * it's from lov_io_sub and not fully initialized.
- */
- env = cl_env_get(&refcheck);
- io = &osc_env_info(env)->oti_io;
- io->ci_obj = cl_object_top(osc2cl(obj));
- io->ci_ignore_layout = 1;
- rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (rc < 0)
- goto out;
-
- /* discard all pages with index greater then trunc_index */
- list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
- pgoff_t index = osc_index(oap2osc(oap));
- struct cl_page *page = oap2cl_page(oap);
-
- LASSERT(list_empty(&oap->oap_rpc_item));
-
- /* only discard the pages with their index greater than
- * trunc_index, and ...
- */
- if (index < trunc_index ||
- (index == trunc_index && partial)) {
- /* accounting how many pages remaining in the chunk
- * so that we can calculate grants correctly. */
- if (index >> ppc_bits == trunc_chunk)
- ++pages_in_chunk;
- continue;
- }
-
- list_del_init(&oap->oap_pending_item);
-
- cl_page_get(page);
- lu_ref_add(&page->cp_reference, "truncate", current);
-
- if (cl_page_own(env, io, page) == 0) {
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
- LASSERT(0);
- }
-
- lu_ref_del(&page->cp_reference, "truncate", current);
- cl_page_put(env, page);
-
- --ext->oe_nr_pages;
- ++nr_pages;
- }
- EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial,
- ext->oe_nr_pages == 0),
- ext, "trunc_index %lu, partial %d\n", trunc_index, partial);
-
- osc_object_lock(obj);
- if (ext->oe_nr_pages == 0) {
- LASSERT(pages_in_chunk == 0);
- grants = ext->oe_grants;
- ext->oe_grants = 0;
- } else { /* calculate how many grants we can free */
- int chunks = (ext->oe_end >> ppc_bits) - trunc_chunk;
- pgoff_t last_index;
-
- /* if there is no pages in this chunk, we can also free grants
- * for the last chunk
- */
- if (pages_in_chunk == 0) {
- /* if this is the 1st chunk and no pages in this chunk,
- * ext->oe_nr_pages must be zero, so we should be in
- * the other if-clause.
- */
- LASSERT(trunc_chunk > 0);
- --trunc_chunk;
- ++chunks;
- }
-
- /* this is what we can free from this extent */
- grants = chunks << cli->cl_chunkbits;
- ext->oe_grants -= grants;
- last_index = ((trunc_chunk + 1) << ppc_bits) - 1;
- ext->oe_end = min(last_index, ext->oe_max_end);
- LASSERT(ext->oe_end >= ext->oe_start);
- LASSERT(ext->oe_grants > 0);
- }
- osc_object_unlock(obj);
-
- if (grants > 0 || nr_pages > 0)
- osc_free_grant(cli, nr_pages, grants);
-
-out:
- cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
- return rc;
-}
-
-/**
- * This function is used to make the extent prepared for transfer.
- * A race with flushing page - ll_writepage() has to be handled cautiously.
- */
-static int osc_extent_make_ready(const struct lu_env *env,
- struct osc_extent *ext)
-{
- struct osc_async_page *oap;
- struct osc_async_page *last = NULL;
- struct osc_object *obj = ext->oe_obj;
- unsigned int page_count = 0;
- int rc;
-
- /* we're going to grab page lock, so object lock must not be taken. */
- LASSERT(sanity_check(ext) == 0);
- /* in locking state, any process should not touch this extent. */
- EASSERT(ext->oe_state == OES_LOCKING, ext);
- EASSERT(ext->oe_owner, ext);
-
- OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n");
-
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- ++page_count;
- if (!last || last->oap_obj_off < oap->oap_obj_off)
- last = oap;
-
- /* checking ASYNC_READY is race safe */
- if ((oap->oap_async_flags & ASYNC_READY) != 0)
- continue;
-
- rc = osc_make_ready(env, oap, OBD_BRW_WRITE);
- switch (rc) {
- case 0:
- spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_READY;
- spin_unlock(&oap->oap_lock);
- break;
- case -EALREADY:
- LASSERT((oap->oap_async_flags & ASYNC_READY) != 0);
- break;
- default:
- LASSERTF(0, "unknown return code: %d\n", rc);
- }
- }
-
- LASSERT(page_count == ext->oe_nr_pages);
- LASSERT(last);
- /* the last page is the only one we need to refresh its count by
- * the size of file.
- */
- if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
- int last_oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
-
- LASSERT(last_oap_count > 0);
- LASSERT(last->oap_page_off + last_oap_count <= PAGE_SIZE);
- last->oap_count = last_oap_count;
- spin_lock(&last->oap_lock);
- last->oap_async_flags |= ASYNC_COUNT_STABLE;
- spin_unlock(&last->oap_lock);
- }
-
- /* for the rest of pages, we don't need to call osf_refresh_count()
- * because it's known they are not the last page
- */
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
- oap->oap_count = PAGE_SIZE - oap->oap_page_off;
- spin_lock(&last->oap_lock);
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- spin_unlock(&last->oap_lock);
- }
- }
-
- osc_object_lock(obj);
- osc_extent_state_set(ext, OES_RPC);
- osc_object_unlock(obj);
- /* get a refcount for RPC. */
- osc_extent_get(ext);
-
- return 0;
-}
-
-/**
- * Quick and simple version of osc_extent_find(). This function is frequently
- * called to expand the extent for the same IO. To expand the extent, the
- * page index must be in the same or next chunk of ext->oe_end.
- */
-static int osc_extent_expand(struct osc_extent *ext, pgoff_t index,
- unsigned int *grants)
-{
- struct osc_object *obj = ext->oe_obj;
- struct client_obd *cli = osc_cli(obj);
- struct osc_extent *next;
- int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
- pgoff_t chunk = index >> ppc_bits;
- pgoff_t end_chunk;
- pgoff_t end_index;
- unsigned int chunksize = 1 << cli->cl_chunkbits;
- int rc = 0;
-
- LASSERT(ext->oe_max_end >= index && ext->oe_start <= index);
- osc_object_lock(obj);
- LASSERT(sanity_check_nolock(ext) == 0);
- end_chunk = ext->oe_end >> ppc_bits;
- if (chunk > end_chunk + 1) {
- rc = -ERANGE;
- goto out;
- }
-
- if (end_chunk >= chunk) {
- rc = 0;
- goto out;
- }
-
- LASSERT(end_chunk + 1 == chunk);
- /* try to expand this extent to cover @index */
- end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1);
-
- next = next_extent(ext);
- if (next && next->oe_start <= end_index) {
- /* complex mode - overlapped with the next extent,
- * this case will be handled by osc_extent_find()
- */
- rc = -EAGAIN;
- goto out;
- }
-
- ext->oe_end = end_index;
- ext->oe_grants += chunksize;
- LASSERT(*grants >= chunksize);
- *grants -= chunksize;
- EASSERTF(osc_extent_is_overlapped(obj, ext) == 0, ext,
- "overlapped after expanding for %lu.\n", index);
-
-out:
- osc_object_unlock(obj);
- return rc;
-}
-
-static void osc_extent_tree_dump0(int level, struct osc_object *obj,
- const char *func, int line)
-{
- struct osc_extent *ext;
- int cnt;
-
- CDEBUG(level, "Dump object %p extents at %s:%d, mppr: %u.\n",
- obj, func, line, osc_cli(obj)->cl_max_pages_per_rpc);
-
- /* osc_object_lock(obj); */
- cnt = 1;
- for (ext = first_extent(obj); ext; ext = next_extent(ext))
- OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++);
-
- cnt = 1;
- list_for_each_entry(ext, &obj->oo_hp_exts, oe_link)
- OSC_EXTENT_DUMP(level, ext, "hp %d.\n", cnt++);
-
- cnt = 1;
- list_for_each_entry(ext, &obj->oo_urgent_exts, oe_link)
- OSC_EXTENT_DUMP(level, ext, "urgent %d.\n", cnt++);
-
- cnt = 1;
- list_for_each_entry(ext, &obj->oo_reading_exts, oe_link)
- OSC_EXTENT_DUMP(level, ext, "reading %d.\n", cnt++);
- /* osc_object_unlock(obj); */
-}
-
-/* ------------------ osc extent end ------------------ */
-
-static inline int osc_is_ready(struct osc_object *osc)
-{
- return !list_empty(&osc->oo_ready_item) ||
- !list_empty(&osc->oo_hp_ready_item);
-}
-
-#define OSC_IO_DEBUG(OSC, STR, args...) \
- CDEBUG(D_CACHE, "obj %p ready %d|%c|%c wr %d|%c|%c rd %d|%c " STR, \
- (OSC), osc_is_ready(OSC), \
- list_empty_marker(&(OSC)->oo_hp_ready_item), \
- list_empty_marker(&(OSC)->oo_ready_item), \
- atomic_read(&(OSC)->oo_nr_writes), \
- list_empty_marker(&(OSC)->oo_hp_exts), \
- list_empty_marker(&(OSC)->oo_urgent_exts), \
- atomic_read(&(OSC)->oo_nr_reads), \
- list_empty_marker(&(OSC)->oo_reading_exts), \
- ##args)
-
-static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
- int cmd)
-{
- struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = oap2cl_page(oap);
- int result;
-
- LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
-
- result = cl_page_make_ready(env, page, CRT_WRITE);
- if (result == 0)
- opg->ops_submit_time = cfs_time_current();
- return result;
-}
-
-static int osc_refresh_count(const struct lu_env *env,
- struct osc_async_page *oap, int cmd)
-{
- struct osc_page *opg = oap2osc_page(oap);
- pgoff_t index = osc_index(oap2osc(oap));
- struct cl_object *obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
-
- int result;
- loff_t kms;
-
- /* readpage queues with _COUNT_STABLE, shouldn't get here. */
- LASSERT(!(cmd & OBD_BRW_READ));
- obj = opg->ops_cl.cpl_obj;
-
- cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
- if (result < 0)
- return result;
- kms = attr->cat_kms;
- if (cl_offset(obj, index) >= kms)
- /* catch race with truncate */
- return 0;
- else if (cl_offset(obj, index + 1) > kms)
- /* catch sub-page write at end of file */
- return kms % PAGE_SIZE;
- else
- return PAGE_SIZE;
-}
-
-static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
- int cmd, int rc)
-{
- struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = oap2cl_page(oap);
- enum cl_req_type crt;
- int srvlock;
-
- cmd &= ~OBD_BRW_NOQUOTA;
- LASSERTF(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ),
- "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
- LASSERTF(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE),
- "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
- LASSERT(opg->ops_transfer_pinned);
-
- crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
- /* Clear opg->ops_transfer_pinned before VM lock is released. */
- opg->ops_transfer_pinned = 0;
-
- opg->ops_submit_time = 0;
- srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
-
- /* statistic */
- if (rc == 0 && srvlock) {
- struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
- struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
- size_t bytes = oap->oap_count;
-
- if (crt == CRT_READ)
- stats->os_lockless_reads += bytes;
- else
- stats->os_lockless_writes += bytes;
- }
-
- /*
- * This has to be the last operation with the page, as locks are
- * released in cl_page_completion() and nothing except for the
- * reference counter protects page from concurrent reclaim.
- */
- lu_ref_del(&page->cp_reference, "transfer", page);
-
- cl_page_completion(env, page, crt, rc);
- cl_page_put(env, page);
-
- return 0;
-}
-
-#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
- struct client_obd *__tmp = (cli); \
- CDEBUG(lvl, "%s: grant { dirty: %lu/%lu dirty_pages: %ld/%lu " \
- "dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \
- "lru {in list: %ld, left: %ld, waiters: %d }" fmt "\n", \
- cli_name(__tmp), \
- __tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \
- atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
- __tmp->cl_lost_grant, __tmp->cl_avail_grant, \
- __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
- atomic_long_read(&__tmp->cl_lru_in_list), \
- atomic_long_read(&__tmp->cl_lru_busy), \
- atomic_read(&__tmp->cl_lru_shrinkers), ##args); \
-} while (0)
-
-/* caller must hold loi_list_lock */
-static void osc_consume_write_grant(struct client_obd *cli,
- struct brw_page *pga)
-{
- assert_spin_locked(&cli->cl_loi_list_lock);
- LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
- atomic_long_inc(&obd_dirty_pages);
- cli->cl_dirty_pages++;
- pga->flag |= OBD_BRW_FROM_GRANT;
- CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
- PAGE_SIZE, pga, pga->pg);
- osc_update_next_shrink(cli);
-}
-
-/* the companion to osc_consume_write_grant, called when a brw has completed.
- * must be called with the loi lock held.
- */
-static void osc_release_write_grant(struct client_obd *cli,
- struct brw_page *pga)
-{
- assert_spin_locked(&cli->cl_loi_list_lock);
- if (!(pga->flag & OBD_BRW_FROM_GRANT))
- return;
-
- pga->flag &= ~OBD_BRW_FROM_GRANT;
- atomic_long_dec(&obd_dirty_pages);
- cli->cl_dirty_pages--;
- if (pga->flag & OBD_BRW_NOCACHE) {
- pga->flag &= ~OBD_BRW_NOCACHE;
- atomic_long_dec(&obd_dirty_transit_pages);
- cli->cl_dirty_transit--;
- }
-}
-
-/**
- * To avoid sleeping with object lock held, it's good for us allocate enough
- * grants before entering into critical section.
- *
- * spin_lock held by caller
- */
-static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes)
-{
- int rc = -EDQUOT;
-
- if (cli->cl_avail_grant >= bytes) {
- cli->cl_avail_grant -= bytes;
- cli->cl_reserved_grant += bytes;
- rc = 0;
- }
- return rc;
-}
-
-static void __osc_unreserve_grant(struct client_obd *cli,
- unsigned int reserved, unsigned int unused)
-{
- /* it's quite normal for us to get more grant than reserved.
- * Thinking about a case that two extents merged by adding a new
- * chunk, we can save one extent tax. If extent tax is greater than
- * one chunk, we can save more grant by adding a new chunk
- */
- cli->cl_reserved_grant -= reserved;
- if (unused > reserved) {
- cli->cl_avail_grant += reserved;
- cli->cl_lost_grant += unused - reserved;
- } else {
- cli->cl_avail_grant += unused;
- }
-}
-
-static void osc_unreserve_grant(struct client_obd *cli,
- unsigned int reserved, unsigned int unused)
-{
- spin_lock(&cli->cl_loi_list_lock);
- __osc_unreserve_grant(cli, reserved, unused);
- if (unused > 0)
- osc_wake_cache_waiters(cli);
- spin_unlock(&cli->cl_loi_list_lock);
-}
-
-/**
- * Free grant after IO is finished or canceled.
- *
- * @lost_grant is used to remember how many grants we have allocated but not
- * used, we should return these grants to OST. There're two cases where grants
- * can be lost:
- * 1. truncate;
- * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
- * written. In this case OST may use less chunks to serve this partial
- * write. OSTs don't actually know the page size on the client side. so
- * clients have to calculate lost grant by the blocksize on the OST.
- * See filter_grant_check() for details.
- */
-static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
- unsigned int lost_grant)
-{
- unsigned long grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
-
- spin_lock(&cli->cl_loi_list_lock);
- atomic_long_sub(nr_pages, &obd_dirty_pages);
- cli->cl_dirty_pages -= nr_pages;
- cli->cl_lost_grant += lost_grant;
- if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
- /* borrow some grant from truncate to avoid the case that
- * truncate uses up all avail grant
- */
- cli->cl_lost_grant -= grant;
- cli->cl_avail_grant += grant;
- }
- osc_wake_cache_waiters(cli);
- spin_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
- lost_grant, cli->cl_lost_grant,
- cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_SHIFT);
-}
-
-/**
- * The companion to osc_enter_cache(), called when @oap is no longer part of
- * the dirty accounting due to error.
- */
-static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
-{
- spin_lock(&cli->cl_loi_list_lock);
- osc_release_write_grant(cli, &oap->oap_brw_page);
- spin_unlock(&cli->cl_loi_list_lock);
-}
-
-/**
- * Non-blocking version of osc_enter_cache() that consumes grant only when it
- * is available.
- */
-static int osc_enter_cache_try(struct client_obd *cli,
- struct osc_async_page *oap,
- int bytes, int transient)
-{
- int rc;
-
- OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
-
- rc = osc_reserve_grant(cli, bytes);
- if (rc < 0)
- return 0;
-
- if (cli->cl_dirty_pages < cli->cl_dirty_max_pages &&
- atomic_long_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
- osc_consume_write_grant(cli, &oap->oap_brw_page);
- if (transient) {
- cli->cl_dirty_transit++;
- atomic_long_inc(&obd_dirty_transit_pages);
- oap->oap_brw_flags |= OBD_BRW_NOCACHE;
- }
- rc = 1;
- } else {
- __osc_unreserve_grant(cli, bytes, bytes);
- rc = 0;
- }
- return rc;
-}
-
-static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
-{
- int rc;
-
- spin_lock(&cli->cl_loi_list_lock);
- rc = list_empty(&ocw->ocw_entry);
- spin_unlock(&cli->cl_loi_list_lock);
- return rc;
-}
-
-/**
- * The main entry to reserve dirty page accounting. Usually the grant reserved
- * in this function will be freed in bulk in osc_free_grant() unless it fails
- * to add osc cache, in that case, it will be freed in osc_exit_cache().
- *
- * The process will be put into sleep if it's already run out of grant.
- */
-static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
- struct osc_async_page *oap, int bytes)
-{
- struct osc_object *osc = oap->oap_obj;
- struct lov_oinfo *loi = osc->oo_oinfo;
- struct osc_cache_waiter ocw;
- unsigned long timeout = (AT_OFF ? obd_timeout : at_max) * HZ;
- int rc = -EDQUOT;
-
- OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
-
- spin_lock(&cli->cl_loi_list_lock);
-
- /* force the caller to try sync io. this can jump the list
- * of queued writes and create a discontiguous rpc stream
- */
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
- !cli->cl_dirty_max_pages || cli->cl_ar.ar_force_sync ||
- loi->loi_ar.ar_force_sync) {
- OSC_DUMP_GRANT(D_CACHE, cli, "forced sync i/o\n");
- rc = -EDQUOT;
- goto out;
- }
-
- /* Hopefully normal case - cache space and write credits available */
- if (osc_enter_cache_try(cli, oap, bytes, 0)) {
- OSC_DUMP_GRANT(D_CACHE, cli, "granted from cache\n");
- rc = 0;
- goto out;
- }
-
- /* We can get here for two reasons: too many dirty pages in cache, or
- * run out of grants. In both cases we should write dirty pages out.
- * Adding a cache waiter will trigger urgent write-out no matter what
- * RPC size will be.
- * The exiting condition is no avail grants and no dirty pages caching,
- * that really means there is no space on the OST.
- */
- init_waitqueue_head(&ocw.ocw_waitq);
- ocw.ocw_oap = oap;
- ocw.ocw_grant = bytes;
- while (cli->cl_dirty_pages > 0 || cli->cl_w_in_flight > 0) {
- list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
- ocw.ocw_rc = 0;
- spin_unlock(&cli->cl_loi_list_lock);
-
- osc_io_unplug_async(env, cli, NULL);
-
- CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
- cli_name(cli), &ocw, oap);
-
- rc = wait_event_idle_timeout(ocw.ocw_waitq,
- ocw_granted(cli, &ocw), timeout);
-
- spin_lock(&cli->cl_loi_list_lock);
-
- if (rc == 0) {
- /* wait_event is interrupted by signal, or timed out */
- list_del_init(&ocw.ocw_entry);
- rc = -ETIMEDOUT;
- break;
- }
- LASSERT(list_empty(&ocw.ocw_entry));
- rc = ocw.ocw_rc;
-
- if (rc != -EDQUOT)
- break;
- if (osc_enter_cache_try(cli, oap, bytes, 0)) {
- rc = 0;
- break;
- }
- }
-
- switch (rc) {
- case 0:
- OSC_DUMP_GRANT(D_CACHE, cli, "finally got grant space\n");
- break;
- case -ETIMEDOUT:
- OSC_DUMP_GRANT(D_CACHE, cli,
- "timeout, fall back to sync i/o\n");
- osc_extent_tree_dump(D_CACHE, osc);
- /* fall back to synchronous I/O */
- rc = -EDQUOT;
- break;
- case -EINTR:
- /* Ensures restartability - LU-3581 */
- OSC_DUMP_GRANT(D_CACHE, cli, "interrupted\n");
- rc = -ERESTARTSYS;
- break;
- case -EDQUOT:
- OSC_DUMP_GRANT(D_CACHE, cli,
- "no grant space, fall back to sync i/o\n");
- break;
- default:
- CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived due to %d, fall back to sync i/o\n",
- cli_name(cli), &ocw, rc);
- break;
- }
-out:
- spin_unlock(&cli->cl_loi_list_lock);
- return rc;
-}
-
-/* caller must hold loi_list_lock */
-void osc_wake_cache_waiters(struct client_obd *cli)
-{
- struct list_head *l, *tmp;
- struct osc_cache_waiter *ocw;
-
- list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
- ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
- list_del_init(&ocw->ocw_entry);
-
- ocw->ocw_rc = -EDQUOT;
- /* we can't dirty more */
- if ((cli->cl_dirty_pages > cli->cl_dirty_max_pages) ||
- (atomic_long_read(&obd_dirty_pages) + 1 >
- obd_max_dirty_pages)) {
- CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %ld\n",
- cli->cl_dirty_pages, cli->cl_dirty_max_pages,
- obd_max_dirty_pages);
- goto wakeup;
- }
-
- if (osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant, 0))
- ocw->ocw_rc = 0;
-wakeup:
- CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n",
- ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc);
-
- wake_up(&ocw->ocw_waitq);
- }
-}
-
-static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc)
-{
- int hprpc = !!list_empty(&osc->oo_hp_exts);
-
- return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
-}
-
-/* This maintains the lists of pending pages to read/write for a given object
- * (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint()
- * to quickly find objects that are ready to send an RPC.
- */
-static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
- int cmd)
-{
- int invalid_import = 0;
-
- /* if we have an invalid import we want to drain the queued pages
- * by forcing them through rpcs that immediately fail and complete
- * the pages. recovery relies on this to empty the queued pages
- * before canceling the locks and evicting down the llite pages
- */
- if (!cli->cl_import || cli->cl_import->imp_invalid)
- invalid_import = 1;
-
- if (cmd & OBD_BRW_WRITE) {
- if (atomic_read(&osc->oo_nr_writes) == 0)
- return 0;
- if (invalid_import) {
- CDEBUG(D_CACHE, "invalid import forcing RPC\n");
- return 1;
- }
- if (!list_empty(&osc->oo_hp_exts)) {
- CDEBUG(D_CACHE, "high prio request forcing RPC\n");
- return 1;
- }
- if (!list_empty(&osc->oo_urgent_exts)) {
- CDEBUG(D_CACHE, "urgent request forcing RPC\n");
- return 1;
- }
- /* trigger a write rpc stream as long as there are dirtiers
- * waiting for space. as they're waiting, they're not going to
- * create more pages to coalesce with what's waiting..
- */
- if (!list_empty(&cli->cl_cache_waiters)) {
- CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
- return 1;
- }
- if (atomic_read(&osc->oo_nr_writes) >=
- cli->cl_max_pages_per_rpc)
- return 1;
- } else {
- if (atomic_read(&osc->oo_nr_reads) == 0)
- return 0;
- if (invalid_import) {
- CDEBUG(D_CACHE, "invalid import forcing RPC\n");
- return 1;
- }
- /* all read are urgent. */
- if (!list_empty(&osc->oo_reading_exts))
- return 1;
- }
-
- return 0;
-}
-
-static void osc_update_pending(struct osc_object *obj, int cmd, int delta)
-{
- struct client_obd *cli = osc_cli(obj);
-
- if (cmd & OBD_BRW_WRITE) {
- atomic_add(delta, &obj->oo_nr_writes);
- atomic_add(delta, &cli->cl_pending_w_pages);
- LASSERT(atomic_read(&obj->oo_nr_writes) >= 0);
- } else {
- atomic_add(delta, &obj->oo_nr_reads);
- atomic_add(delta, &cli->cl_pending_r_pages);
- LASSERT(atomic_read(&obj->oo_nr_reads) >= 0);
- }
- OSC_IO_DEBUG(obj, "update pending cmd %d delta %d.\n", cmd, delta);
-}
-
-static int osc_makes_hprpc(struct osc_object *obj)
-{
- return !list_empty(&obj->oo_hp_exts);
-}
-
-static void on_list(struct list_head *item, struct list_head *list, int should_be_on)
-{
- if (list_empty(item) && should_be_on)
- list_add_tail(item, list);
- else if (!list_empty(item) && !should_be_on)
- list_del_init(item);
-}
-
-/* maintain the osc's cli list membership invariants so that osc_send_oap_rpc
- * can find pages to build into rpcs quickly
- */
-static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc)
-{
- if (osc_makes_hprpc(osc)) {
- /* HP rpc */
- on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list, 0);
- on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
- } else {
- on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
- on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list,
- osc_makes_rpc(cli, osc, OBD_BRW_WRITE) ||
- osc_makes_rpc(cli, osc, OBD_BRW_READ));
- }
-
- on_list(&osc->oo_write_item, &cli->cl_loi_write_list,
- atomic_read(&osc->oo_nr_writes) > 0);
-
- on_list(&osc->oo_read_item, &cli->cl_loi_read_list,
- atomic_read(&osc->oo_nr_reads) > 0);
-
- return osc_is_ready(osc);
-}
-
-static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
-{
- int is_ready;
-
- spin_lock(&cli->cl_loi_list_lock);
- is_ready = __osc_list_maint(cli, osc);
- spin_unlock(&cli->cl_loi_list_lock);
-
- return is_ready;
-}
-
-/* this is trying to propagate async writeback errors back up to the
- * application. As an async write fails we record the error code for later if
- * the app does an fsync. As long as errors persist we force future rpcs to be
- * sync so that the app can get a sync error and break the cycle of queueing
- * pages for which writeback will fail.
- */
-static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
- int rc)
-{
- if (rc) {
- if (!ar->ar_rc)
- ar->ar_rc = rc;
-
- ar->ar_force_sync = 1;
- ar->ar_min_xid = ptlrpc_sample_next_xid();
- return;
- }
-
- if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
- ar->ar_force_sync = 0;
-}
-
-/* this must be called holding the loi list lock to give coverage to exit_cache,
- * async_flag maintenance, and oap_request
- */
-static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
- struct osc_async_page *oap, int sent, int rc)
-{
- struct osc_object *osc = oap->oap_obj;
- struct lov_oinfo *loi = osc->oo_oinfo;
- __u64 xid = 0;
-
- if (oap->oap_request) {
- xid = ptlrpc_req_xid(oap->oap_request);
- ptlrpc_req_finished(oap->oap_request);
- oap->oap_request = NULL;
- }
-
- /* As the transfer for this page is being done, clear the flags */
- spin_lock(&oap->oap_lock);
- oap->oap_async_flags = 0;
- spin_unlock(&oap->oap_lock);
- oap->oap_interrupted = 0;
-
- if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
- spin_lock(&cli->cl_loi_list_lock);
- osc_process_ar(&cli->cl_ar, xid, rc);
- osc_process_ar(&loi->loi_ar, xid, rc);
- spin_unlock(&cli->cl_loi_list_lock);
- }
-
- rc = osc_completion(env, oap, oap->oap_cmd, rc);
- if (rc)
- CERROR("completion on oap %p obj %p returns %d.\n",
- oap, osc, rc);
-}
-
-struct extent_rpc_data {
- struct list_head *erd_rpc_list;
- unsigned int erd_page_count;
- unsigned int erd_max_pages;
- unsigned int erd_max_chunks;
- unsigned int erd_max_extents;
-};
-
-static inline unsigned int osc_extent_chunks(const struct osc_extent *ext)
-{
- struct client_obd *cli = osc_cli(ext->oe_obj);
- unsigned int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
-
- return (ext->oe_end >> ppc_bits) - (ext->oe_start >> ppc_bits) + 1;
-}
-
-/**
- * Try to add extent to one RPC. We need to think about the following things:
- * - # of pages must not be over max_pages_per_rpc
- * - extent must be compatible with previous ones
- */
-static int try_to_add_extent_for_io(struct client_obd *cli,
- struct osc_extent *ext,
- struct extent_rpc_data *data)
-{
- struct osc_extent *tmp;
- unsigned int chunk_count;
- struct osc_async_page *oap = list_first_entry(&ext->oe_pages,
- struct osc_async_page,
- oap_pending_item);
-
- EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE),
- ext);
-
- if (!data->erd_max_extents)
- return 0;
-
- chunk_count = osc_extent_chunks(ext);
- EASSERTF(data->erd_page_count != 0 ||
- chunk_count <= data->erd_max_chunks, ext,
- "The first extent to be fit in a RPC contains %u chunks, which is over the limit %u.\n",
- chunk_count, data->erd_max_chunks);
-
- if (chunk_count > data->erd_max_chunks)
- return 0;
-
- data->erd_max_pages = max(ext->oe_mppr, data->erd_max_pages);
- EASSERTF(data->erd_page_count != 0 ||
- ext->oe_nr_pages <= data->erd_max_pages, ext,
- "The first extent to be fit in a RPC contains %u pages, which is over the limit %u.\n",
- ext->oe_nr_pages, data->erd_max_pages);
- if (data->erd_page_count + ext->oe_nr_pages > data->erd_max_pages)
- return 0;
-
- list_for_each_entry(tmp, data->erd_rpc_list, oe_link) {
- struct osc_async_page *oap2;
-
- oap2 = list_first_entry(&tmp->oe_pages, struct osc_async_page,
- oap_pending_item);
- EASSERT(tmp->oe_owner == current, tmp);
- if (oap2cl_page(oap)->cp_type != oap2cl_page(oap2)->cp_type) {
- CDEBUG(D_CACHE, "Do not permit different type of IO in one RPC\n");
- return 0;
- }
-
- if (tmp->oe_srvlock != ext->oe_srvlock ||
- !tmp->oe_grants != !ext->oe_grants ||
- tmp->oe_no_merge || ext->oe_no_merge)
- return 0;
-
- /* remove break for strict check */
- break;
- }
-
- data->erd_max_extents--;
- data->erd_max_chunks -= chunk_count;
- data->erd_page_count += ext->oe_nr_pages;
- list_move_tail(&ext->oe_link, data->erd_rpc_list);
- ext->oe_owner = current;
- return 1;
-}
-
-static inline unsigned int osc_max_write_chunks(const struct client_obd *cli)
-{
- /*
- * LU-8135:
- *
- * The maximum size of a single transaction is about 64MB in ZFS.
- * #define DMU_MAX_ACCESS (64 * 1024 * 1024)
- *
- * Since ZFS is a copy-on-write file system, a single dirty page in
- * a chunk will result in the rewrite of the whole chunk, therefore
- * an RPC shouldn't be allowed to contain too many chunks otherwise
- * it will make transaction size much bigger than 64MB, especially
- * with big block size for ZFS.
- *
- * This piece of code is to make sure that OSC won't send write RPCs
- * with too many chunks. The maximum chunk size that an RPC can cover
- * is set to PTLRPC_MAX_BRW_SIZE, which is defined to 16MB. Ideally
- * OST should tell the client what the biggest transaction size is,
- * but it's good enough for now.
- *
- * This limitation doesn't apply to ldiskfs, which allows as many
- * chunks in one RPC as we want. However, it won't have any benefits
- * to have too many discontiguous pages in one RPC.
- *
- * An osc_extent won't cover over a RPC size, so the chunks in an
- * osc_extent won't bigger than PTLRPC_MAX_BRW_SIZE >> chunkbits.
- */
- return PTLRPC_MAX_BRW_SIZE >> cli->cl_chunkbits;
-}
-
-/**
- * In order to prevent multiple ptlrpcd from breaking contiguous extents,
- * get_write_extent() takes all appropriate extents in atomic.
- *
- * The following policy is used to collect extents for IO:
- * 1. Add as many HP extents as possible;
- * 2. Add the first urgent extent in urgent extent list and take it out of
- * urgent list;
- * 3. Add subsequent extents of this urgent extent;
- * 4. If urgent list is not empty, goto 2;
- * 5. Traverse the extent tree from the 1st extent;
- * 6. Above steps exit if there is no space in this RPC.
- */
-static unsigned int get_write_extents(struct osc_object *obj,
- struct list_head *rpclist)
-{
- struct client_obd *cli = osc_cli(obj);
- struct osc_extent *ext;
- struct osc_extent *temp;
- struct extent_rpc_data data = {
- .erd_rpc_list = rpclist,
- .erd_page_count = 0,
- .erd_max_pages = cli->cl_max_pages_per_rpc,
- .erd_max_chunks = osc_max_write_chunks(cli),
- .erd_max_extents = 256,
- };
-
- LASSERT(osc_object_is_locked(obj));
- list_for_each_entry_safe(ext, temp, &obj->oo_hp_exts, oe_link) {
- LASSERT(ext->oe_state == OES_CACHE);
- if (!try_to_add_extent_for_io(cli, ext, &data))
- return data.erd_page_count;
- EASSERT(ext->oe_nr_pages <= data.erd_max_pages, ext);
- }
- if (data.erd_page_count == data.erd_max_pages)
- return data.erd_page_count;
-
- while (!list_empty(&obj->oo_urgent_exts)) {
- ext = list_entry(obj->oo_urgent_exts.next,
- struct osc_extent, oe_link);
- if (!try_to_add_extent_for_io(cli, ext, &data))
- return data.erd_page_count;
-
- if (!ext->oe_intree)
- continue;
-
- while ((ext = next_extent(ext)) != NULL) {
- if ((ext->oe_state != OES_CACHE) ||
- (!list_empty(&ext->oe_link) &&
- ext->oe_owner))
- continue;
-
- if (!try_to_add_extent_for_io(cli, ext, &data))
- return data.erd_page_count;
- }
- }
- if (data.erd_page_count == data.erd_max_pages)
- return data.erd_page_count;
-
- ext = first_extent(obj);
- while (ext) {
- if ((ext->oe_state != OES_CACHE) ||
- /* this extent may be already in current rpclist */
- (!list_empty(&ext->oe_link) && ext->oe_owner)) {
- ext = next_extent(ext);
- continue;
- }
-
- if (!try_to_add_extent_for_io(cli, ext, &data))
- return data.erd_page_count;
-
- ext = next_extent(ext);
- }
- return data.erd_page_count;
-}
-
-static int
-osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc)
- __must_hold(osc)
-{
- LIST_HEAD(rpclist);
- struct osc_extent *ext;
- struct osc_extent *tmp;
- struct osc_extent *first = NULL;
- u32 page_count = 0;
- int srvlock = 0;
- int rc = 0;
-
- LASSERT(osc_object_is_locked(osc));
-
- page_count = get_write_extents(osc, &rpclist);
- LASSERT(equi(page_count == 0, list_empty(&rpclist)));
-
- if (list_empty(&rpclist))
- return 0;
-
- osc_update_pending(osc, OBD_BRW_WRITE, -page_count);
-
- list_for_each_entry(ext, &rpclist, oe_link) {
- LASSERT(ext->oe_state == OES_CACHE ||
- ext->oe_state == OES_LOCK_DONE);
- if (ext->oe_state == OES_CACHE)
- osc_extent_state_set(ext, OES_LOCKING);
- else
- osc_extent_state_set(ext, OES_RPC);
- }
-
- /* we're going to grab page lock, so release object lock because
- * lock order is page lock -> object lock.
- */
- osc_object_unlock(osc);
-
- list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) {
- if (ext->oe_state == OES_LOCKING) {
- rc = osc_extent_make_ready(env, ext);
- if (unlikely(rc < 0)) {
- list_del_init(&ext->oe_link);
- osc_extent_finish(env, ext, 0, rc);
- continue;
- }
- }
- if (!first) {
- first = ext;
- srvlock = ext->oe_srvlock;
- } else {
- LASSERT(srvlock == ext->oe_srvlock);
- }
- }
-
- if (!list_empty(&rpclist)) {
- LASSERT(page_count > 0);
- rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE);
- LASSERT(list_empty(&rpclist));
- }
-
- osc_object_lock(osc);
- return rc;
-}
-
-/**
- * prepare pages for ASYNC io and put pages in send queue.
- *
- * \param cmd OBD_BRW_* macroses
- * \param lop pending pages
- *
- * \return zero if no page added to send queue.
- * \return 1 if pages successfully added to send queue.
- * \return negative on errors.
- */
-static int
-osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc)
- __must_hold(osc)
-{
- struct osc_extent *ext;
- struct osc_extent *next;
- LIST_HEAD(rpclist);
- struct extent_rpc_data data = {
- .erd_rpc_list = &rpclist,
- .erd_page_count = 0,
- .erd_max_pages = cli->cl_max_pages_per_rpc,
- .erd_max_chunks = UINT_MAX,
- .erd_max_extents = UINT_MAX,
- };
- int rc = 0;
-
- LASSERT(osc_object_is_locked(osc));
- list_for_each_entry_safe(ext, next, &osc->oo_reading_exts, oe_link) {
- EASSERT(ext->oe_state == OES_LOCK_DONE, ext);
- if (!try_to_add_extent_for_io(cli, ext, &data))
- break;
- osc_extent_state_set(ext, OES_RPC);
- EASSERT(ext->oe_nr_pages <= data.erd_max_pages, ext);
- }
- LASSERT(data.erd_page_count <= data.erd_max_pages);
-
- osc_update_pending(osc, OBD_BRW_READ, -data.erd_page_count);
-
- if (!list_empty(&rpclist)) {
- osc_object_unlock(osc);
-
- rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ);
- LASSERT(list_empty(&rpclist));
-
- osc_object_lock(osc);
- }
- return rc;
-}
-
-#define list_to_obj(list, item) ({ \
- struct list_head *__tmp = (list)->next; \
- list_del_init(__tmp); \
- list_entry(__tmp, struct osc_object, oo_##item); \
-})
-
-/* This is called by osc_check_rpcs() to find which objects have pages that
- * we could be sending. These lists are maintained by osc_makes_rpc().
- */
-static struct osc_object *osc_next_obj(struct client_obd *cli)
-{
- /* First return objects that have blocked locks so that they
- * will be flushed quickly and other clients can get the lock,
- * then objects which have pages ready to be stuffed into RPCs
- */
- if (!list_empty(&cli->cl_loi_hp_ready_list))
- return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item);
- if (!list_empty(&cli->cl_loi_ready_list))
- return list_to_obj(&cli->cl_loi_ready_list, ready_item);
-
- /* then if we have cache waiters, return all objects with queued
- * writes. This is especially important when many small files
- * have filled up the cache and not been fired into rpcs because
- * they don't pass the nr_pending/object threshold
- */
- if (!list_empty(&cli->cl_cache_waiters) &&
- !list_empty(&cli->cl_loi_write_list))
- return list_to_obj(&cli->cl_loi_write_list, write_item);
-
- /* then return all queued objects when we have an invalid import
- * so that they get flushed
- */
- if (!cli->cl_import || cli->cl_import->imp_invalid) {
- if (!list_empty(&cli->cl_loi_write_list))
- return list_to_obj(&cli->cl_loi_write_list, write_item);
- if (!list_empty(&cli->cl_loi_read_list))
- return list_to_obj(&cli->cl_loi_read_list, read_item);
- }
- return NULL;
-}
-
-/* called with the loi list lock held */
-static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
- __must_hold(&cli->cl_loi_list_lock)
-{
- struct osc_object *osc;
- int rc = 0;
-
- while ((osc = osc_next_obj(cli)) != NULL) {
- struct cl_object *obj = osc2cl(osc);
- struct lu_ref_link link;
-
- OSC_IO_DEBUG(osc, "%lu in flight\n", rpcs_in_flight(cli));
-
- if (osc_max_rpc_in_flight(cli, osc)) {
- __osc_list_maint(cli, osc);
- break;
- }
-
- cl_object_get(obj);
- spin_unlock(&cli->cl_loi_list_lock);
- lu_object_ref_add_at(&obj->co_lu, &link, "check", current);
-
- /* attempt some read/write balancing by alternating between
- * reads and writes in an object. The makes_rpc checks here
- * would be redundant if we were getting read/write work items
- * instead of objects. we don't want send_oap_rpc to drain a
- * partial read pending queue when we're given this object to
- * do io on writes while there are cache waiters
- */
- osc_object_lock(osc);
- if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) {
- rc = osc_send_write_rpc(env, cli, osc);
- if (rc < 0) {
- CERROR("Write request failed with %d\n", rc);
-
- /* osc_send_write_rpc failed, mostly because of
- * memory pressure.
- *
- * It can't break here, because if:
- * - a page was submitted by osc_io_submit, so
- * page locked;
- * - no request in flight
- * - no subsequent request
- * The system will be in live-lock state,
- * because there is no chance to call
- * osc_io_unplug() and osc_check_rpcs() any
- * more. pdflush can't help in this case,
- * because it might be blocked at grabbing
- * the page lock as we mentioned.
- *
- * Anyway, continue to drain pages.
- */
- /* break; */
- }
- }
- if (osc_makes_rpc(cli, osc, OBD_BRW_READ)) {
- rc = osc_send_read_rpc(env, cli, osc);
- if (rc < 0)
- CERROR("Read request failed with %d\n", rc);
- }
- osc_object_unlock(osc);
-
- osc_list_maint(cli, osc);
- lu_object_ref_del_at(&obj->co_lu, &link, "check", current);
- cl_object_put(env, obj);
-
- spin_lock(&cli->cl_loi_list_lock);
- }
-}
-
-static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc, int async)
-{
- int rc = 0;
-
- if (osc && osc_list_maint(cli, osc) == 0)
- return 0;
-
- if (!async) {
- spin_lock(&cli->cl_loi_list_lock);
- osc_check_rpcs(env, cli);
- spin_unlock(&cli->cl_loi_list_lock);
- } else {
- CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
- LASSERT(cli->cl_writeback_work);
- rc = ptlrpcd_queue_work(cli->cl_writeback_work);
- }
- return rc;
-}
-
-static int osc_io_unplug_async(const struct lu_env *env,
- struct client_obd *cli, struct osc_object *osc)
-{
- return osc_io_unplug0(env, cli, osc, 1);
-}
-
-void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc)
-{
- (void)osc_io_unplug0(env, cli, osc, 0);
-}
-
-int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
- struct page *page, loff_t offset)
-{
- struct obd_export *exp = osc_export(osc);
- struct osc_async_page *oap = &ops->ops_oap;
-
- if (!page)
- return cfs_size_round(sizeof(*oap));
-
- oap->oap_magic = OAP_MAGIC;
- oap->oap_cli = &exp->exp_obd->u.cli;
- oap->oap_obj = osc;
-
- oap->oap_page = page;
- oap->oap_obj_off = offset;
- LASSERT(!(offset & ~PAGE_MASK));
-
- if (capable(CAP_SYS_RESOURCE))
- oap->oap_brw_flags = OBD_BRW_NOQUOTA;
-
- INIT_LIST_HEAD(&oap->oap_pending_item);
- INIT_LIST_HEAD(&oap->oap_rpc_item);
-
- spin_lock_init(&oap->oap_lock);
- CDEBUG(D_INFO, "oap %p page %p obj off %llu\n",
- oap, page, oap->oap_obj_off);
- return 0;
-}
-
-int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops)
-{
- struct osc_io *oio = osc_env_io(env);
- struct osc_extent *ext = NULL;
- struct osc_async_page *oap = &ops->ops_oap;
- struct client_obd *cli = oap->oap_cli;
- struct osc_object *osc = oap->oap_obj;
- pgoff_t index;
- unsigned int grants = 0, tmp;
- int brw_flags = OBD_BRW_ASYNC;
- int cmd = OBD_BRW_WRITE;
- int need_release = 0;
- int rc = 0;
-
- if (oap->oap_magic != OAP_MAGIC)
- return -EINVAL;
-
- if (!cli->cl_import || cli->cl_import->imp_invalid)
- return -EIO;
-
- if (!list_empty(&oap->oap_pending_item) ||
- !list_empty(&oap->oap_rpc_item))
- return -EBUSY;
-
- /* Set the OBD_BRW_SRVLOCK before the page is queued. */
- brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
- if (capable(CAP_SYS_RESOURCE)) {
- brw_flags |= OBD_BRW_NOQUOTA;
- cmd |= OBD_BRW_NOQUOTA;
- }
-
- /* check if the file's owner/group is over quota */
- if (!(cmd & OBD_BRW_NOQUOTA)) {
- struct cl_object *obj;
- struct cl_attr *attr;
- unsigned int qid[MAXQUOTAS];
-
- obj = cl_object_top(&osc->oo_cl);
- attr = &osc_env_info(env)->oti_attr;
-
- cl_object_attr_lock(obj);
- rc = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
-
- qid[USRQUOTA] = attr->cat_uid;
- qid[GRPQUOTA] = attr->cat_gid;
- if (rc == 0 && osc_quota_chkdq(cli, qid) == NO_QUOTA)
- rc = -EDQUOT;
- if (rc)
- return rc;
- }
-
- oap->oap_cmd = cmd;
- oap->oap_page_off = ops->ops_from;
- oap->oap_count = ops->ops_to - ops->ops_from;
- /*
- * No need to hold a lock here,
- * since this page is not in any list yet.
- */
- oap->oap_async_flags = 0;
- oap->oap_brw_flags = brw_flags;
-
- OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
- oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
-
- index = osc_index(oap2osc(oap));
-
- /* Add this page into extent by the following steps:
- * 1. if there exists an active extent for this IO, mostly this page
- * can be added to the active extent and sometimes we need to
- * expand extent to accommodate this page;
- * 2. otherwise, a new extent will be allocated.
- */
-
- ext = oio->oi_active;
- if (ext && ext->oe_start <= index && ext->oe_max_end >= index) {
- /* one chunk plus extent overhead must be enough to write this
- * page
- */
- grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
- if (ext->oe_end >= index)
- grants = 0;
-
- /* it doesn't need any grant to dirty this page */
- spin_lock(&cli->cl_loi_list_lock);
- rc = osc_enter_cache_try(cli, oap, grants, 0);
- spin_unlock(&cli->cl_loi_list_lock);
- if (rc == 0) { /* try failed */
- grants = 0;
- need_release = 1;
- } else if (ext->oe_end < index) {
- tmp = grants;
- /* try to expand this extent */
- rc = osc_extent_expand(ext, index, &tmp);
- if (rc < 0) {
- need_release = 1;
- /* don't free reserved grant */
- } else {
- OSC_EXTENT_DUMP(D_CACHE, ext,
- "expanded for %lu.\n", index);
- osc_unreserve_grant(cli, grants, tmp);
- grants = 0;
- }
- }
- rc = 0;
- } else if (ext) {
- /* index is located outside of active extent */
- need_release = 1;
- }
- if (need_release) {
- osc_extent_release(env, ext);
- oio->oi_active = NULL;
- ext = NULL;
- }
-
- if (!ext) {
- tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
-
- /* try to find new extent to cover this page */
- LASSERT(!oio->oi_active);
- /* we may have allocated grant for this page if we failed
- * to expand the previous active extent.
- */
- LASSERT(ergo(grants > 0, grants >= tmp));
-
- rc = 0;
- if (grants == 0) {
- /* we haven't allocated grant for this page. */
- rc = osc_enter_cache(env, cli, oap, tmp);
- if (rc == 0)
- grants = tmp;
- }
-
- tmp = grants;
- if (rc == 0) {
- ext = osc_extent_find(env, osc, index, &tmp);
- if (IS_ERR(ext)) {
- LASSERT(tmp == grants);
- osc_exit_cache(cli, oap);
- rc = PTR_ERR(ext);
- ext = NULL;
- } else {
- oio->oi_active = ext;
- }
- }
- if (grants > 0)
- osc_unreserve_grant(cli, grants, tmp);
- }
-
- LASSERT(ergo(rc == 0, ext));
- if (ext) {
- EASSERTF(ext->oe_end >= index && ext->oe_start <= index,
- ext, "index = %lu.\n", index);
- LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0);
-
- osc_object_lock(osc);
- if (ext->oe_nr_pages == 0)
- ext->oe_srvlock = ops->ops_srvlock;
- else
- LASSERT(ext->oe_srvlock == ops->ops_srvlock);
- ++ext->oe_nr_pages;
- list_add_tail(&oap->oap_pending_item, &ext->oe_pages);
- osc_object_unlock(osc);
- }
- return rc;
-}
-
-int osc_teardown_async_page(const struct lu_env *env,
- struct osc_object *obj, struct osc_page *ops)
-{
- struct osc_async_page *oap = &ops->ops_oap;
- int rc = 0;
-
- LASSERT(oap->oap_magic == OAP_MAGIC);
-
- CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
- oap, ops, osc_index(oap2osc(oap)));
-
- if (!list_empty(&oap->oap_rpc_item)) {
- CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
- rc = -EBUSY;
- } else if (!list_empty(&oap->oap_pending_item)) {
- struct osc_extent *ext = NULL;
-
- osc_object_lock(obj);
- ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
- osc_object_unlock(obj);
- /* only truncated pages are allowed to be taken out.
- * See osc_extent_truncate() and osc_cache_truncate_start()
- * for details.
- */
- if (ext && ext->oe_state != OES_TRUNC) {
- OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
- osc_index(oap2osc(oap)));
- rc = -EBUSY;
- }
- if (ext)
- osc_extent_put(env, ext);
- }
- return rc;
-}
-
-/**
- * This is called when a page is picked up by kernel to write out.
- *
- * We should find out the corresponding extent and add the whole extent
- * into urgent list. The extent may be being truncated or used, handle it
- * carefully.
- */
-int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops)
-{
- struct osc_extent *ext = NULL;
- struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj);
- struct cl_page *cp = ops->ops_cl.cpl_page;
- pgoff_t index = osc_index(ops);
- struct osc_async_page *oap = &ops->ops_oap;
- bool unplug = false;
- int rc = 0;
-
- osc_object_lock(obj);
- ext = osc_extent_lookup(obj, index);
- if (!ext) {
- osc_extent_tree_dump(D_ERROR, obj);
- LASSERTF(0, "page index %lu is NOT covered.\n", index);
- }
-
- switch (ext->oe_state) {
- case OES_RPC:
- case OES_LOCK_DONE:
- CL_PAGE_DEBUG(D_ERROR, env, cp, "flush an in-rpc page?\n");
- LASSERT(0);
- break;
- case OES_LOCKING:
- /* If we know this extent is being written out, we should abort
- * so that the writer can make this page ready. Otherwise, there
- * exists a deadlock problem because other process can wait for
- * page writeback bit holding page lock; and meanwhile in
- * vvp_page_make_ready(), we need to grab page lock before
- * really sending the RPC.
- */
- case OES_TRUNC:
- /* race with truncate, page will be redirtied */
- case OES_ACTIVE:
- /* The extent is active so we need to abort and let the caller
- * re-dirty the page. If we continued on here, and we were the
- * one making the extent active, we could deadlock waiting for
- * the page writeback to clear but it won't because the extent
- * is active and won't be written out.
- */
- rc = -EAGAIN;
- goto out;
- default:
- break;
- }
-
- rc = cl_page_prep(env, io, cp, CRT_WRITE);
- if (rc)
- goto out;
-
- spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_READY | ASYNC_URGENT;
- spin_unlock(&oap->oap_lock);
-
- if (memory_pressure_get())
- ext->oe_memalloc = 1;
-
- ext->oe_urgent = 1;
- if (ext->oe_state == OES_CACHE) {
- OSC_EXTENT_DUMP(D_CACHE, ext,
- "flush page %p make it urgent.\n", oap);
- if (list_empty(&ext->oe_link))
- list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
- unplug = true;
- }
- rc = 0;
-
-out:
- osc_object_unlock(obj);
- osc_extent_put(env, ext);
- if (unplug)
- osc_io_unplug_async(env, osc_cli(obj), obj);
- return rc;
-}
-
-/**
- * this is called when a sync waiter receives an interruption. Its job is to
- * get the caller woken as soon as possible. If its page hasn't been put in an
- * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
- * desiring interruption which will forcefully complete the rpc once the rpc
- * has timed out.
- */
-int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
-{
- struct osc_async_page *oap = &ops->ops_oap;
- struct osc_object *obj = oap->oap_obj;
- struct client_obd *cli = osc_cli(obj);
- struct osc_extent *ext;
- struct osc_extent *found = NULL;
- struct list_head *plist;
- pgoff_t index = osc_index(ops);
- int rc = -EBUSY;
- int cmd;
-
- LASSERT(!oap->oap_interrupted);
- oap->oap_interrupted = 1;
-
- /* Find out the caching extent */
- osc_object_lock(obj);
- if (oap->oap_cmd & OBD_BRW_WRITE) {
- plist = &obj->oo_urgent_exts;
- cmd = OBD_BRW_WRITE;
- } else {
- plist = &obj->oo_reading_exts;
- cmd = OBD_BRW_READ;
- }
- list_for_each_entry(ext, plist, oe_link) {
- if (ext->oe_start <= index && ext->oe_end >= index) {
- LASSERT(ext->oe_state == OES_LOCK_DONE);
- /* For OES_LOCK_DONE state extent, it has already held
- * a refcount for RPC.
- */
- found = osc_extent_get(ext);
- break;
- }
- }
- if (found) {
- list_del_init(&found->oe_link);
- osc_update_pending(obj, cmd, -found->oe_nr_pages);
- osc_object_unlock(obj);
-
- osc_extent_finish(env, found, 0, -EINTR);
- osc_extent_put(env, found);
- rc = 0;
- } else {
- osc_object_unlock(obj);
- /* ok, it's been put in an rpc. only one oap gets a request
- * reference
- */
- if (oap->oap_request) {
- ptlrpc_mark_interrupted(oap->oap_request);
- ptlrpcd_wake(oap->oap_request);
- ptlrpc_req_finished(oap->oap_request);
- oap->oap_request = NULL;
- }
- }
-
- osc_list_maint(cli, obj);
- return rc;
-}
-
-int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
- struct list_head *list, int cmd, int brw_flags)
-{
- struct client_obd *cli = osc_cli(obj);
- struct osc_extent *ext;
- struct osc_async_page *oap, *tmp;
- int page_count = 0;
- int mppr = cli->cl_max_pages_per_rpc;
- bool can_merge = true;
- pgoff_t start = CL_PAGE_EOF;
- pgoff_t end = 0;
-
- list_for_each_entry(oap, list, oap_pending_item) {
- struct osc_page *opg = oap2osc_page(oap);
- pgoff_t index = osc_index(opg);
-
- if (index > end)
- end = index;
- if (index < start)
- start = index;
- ++page_count;
- mppr <<= (page_count > mppr);
-
- if (unlikely(opg->ops_from > 0 || opg->ops_to < PAGE_SIZE))
- can_merge = false;
- }
-
- ext = osc_extent_alloc(obj);
- if (!ext) {
- list_for_each_entry_safe(oap, tmp, list, oap_pending_item) {
- list_del_init(&oap->oap_pending_item);
- osc_ap_completion(env, cli, oap, 0, -ENOMEM);
- }
- return -ENOMEM;
- }
-
- ext->oe_rw = !!(cmd & OBD_BRW_READ);
- ext->oe_sync = 1;
- ext->oe_no_merge = !can_merge;
- ext->oe_urgent = 1;
- ext->oe_start = start;
- ext->oe_end = end;
- ext->oe_max_end = end;
- ext->oe_obj = obj;
- ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
- ext->oe_nr_pages = page_count;
- ext->oe_mppr = mppr;
- list_splice_init(list, &ext->oe_pages);
-
- osc_object_lock(obj);
- /* Reuse the initial refcount for RPC, don't drop it */
- osc_extent_state_set(ext, OES_LOCK_DONE);
- if (cmd & OBD_BRW_WRITE) {
- list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
- osc_update_pending(obj, OBD_BRW_WRITE, page_count);
- } else {
- list_add_tail(&ext->oe_link, &obj->oo_reading_exts);
- osc_update_pending(obj, OBD_BRW_READ, page_count);
- }
- osc_object_unlock(obj);
-
- osc_io_unplug_async(env, cli, obj);
- return 0;
-}
-
-/**
- * Called by osc_io_setattr_start() to freeze and destroy covering extents.
- */
-int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
- u64 size, struct osc_extent **extp)
-{
- struct client_obd *cli = osc_cli(obj);
- struct osc_extent *ext;
- struct osc_extent *temp;
- struct osc_extent *waiting = NULL;
- pgoff_t index;
- LIST_HEAD(list);
- int result = 0;
- bool partial;
-
- /* pages with index greater or equal to index will be truncated. */
- index = cl_index(osc2cl(obj), size);
- partial = size > cl_offset(osc2cl(obj), index);
-
-again:
- osc_object_lock(obj);
- ext = osc_extent_search(obj, index);
- if (!ext)
- ext = first_extent(obj);
- else if (ext->oe_end < index)
- ext = next_extent(ext);
- while (ext) {
- EASSERT(ext->oe_state != OES_TRUNC, ext);
-
- if (ext->oe_state > OES_CACHE || ext->oe_urgent) {
- /* if ext is in urgent state, it means there must exist
- * a page already having been flushed by write_page().
- * We have to wait for this extent because we can't
- * truncate that page.
- */
- OSC_EXTENT_DUMP(D_CACHE, ext,
- "waiting for busy extent\n");
- waiting = osc_extent_get(ext);
- break;
- }
-
- OSC_EXTENT_DUMP(D_CACHE, ext, "try to trunc:%llu.\n", size);
-
- osc_extent_get(ext);
- if (ext->oe_state == OES_ACTIVE) {
- /* though we grab inode mutex for write path, but we
- * release it before releasing extent(in osc_io_end()),
- * so there is a race window that an extent is still
- * in OES_ACTIVE when truncate starts.
- */
- LASSERT(!ext->oe_trunc_pending);
- ext->oe_trunc_pending = 1;
- } else {
- EASSERT(ext->oe_state == OES_CACHE, ext);
- osc_extent_state_set(ext, OES_TRUNC);
- osc_update_pending(obj, OBD_BRW_WRITE,
- -ext->oe_nr_pages);
- }
- EASSERT(list_empty(&ext->oe_link), ext);
- list_add_tail(&ext->oe_link, &list);
-
- ext = next_extent(ext);
- }
- osc_object_unlock(obj);
-
- osc_list_maint(cli, obj);
-
- list_for_each_entry_safe(ext, temp, &list, oe_link) {
- int rc;
-
- list_del_init(&ext->oe_link);
-
- /* extent may be in OES_ACTIVE state because inode mutex
- * is released before osc_io_end() in file write case
- */
- if (ext->oe_state != OES_TRUNC)
- osc_extent_wait(env, ext, OES_TRUNC);
-
- rc = osc_extent_truncate(ext, index, partial);
- if (rc < 0) {
- if (result == 0)
- result = rc;
-
- OSC_EXTENT_DUMP(D_ERROR, ext,
- "truncate error %d\n", rc);
- } else if (ext->oe_nr_pages == 0) {
- osc_extent_remove(ext);
- } else {
- /* this must be an overlapped extent which means only
- * part of pages in this extent have been truncated.
- */
- EASSERTF(ext->oe_start <= index, ext,
- "trunc index = %lu/%d.\n", index, partial);
- /* fix index to skip this partially truncated extent */
- index = ext->oe_end + 1;
- partial = false;
-
- /* we need to hold this extent in OES_TRUNC state so
- * that no writeback will happen. This is to avoid
- * BUG 17397.
- * Only partial truncate can reach here, if @size is
- * not zero, the caller should provide a valid @extp.
- */
- LASSERT(!*extp);
- *extp = osc_extent_get(ext);
- OSC_EXTENT_DUMP(D_CACHE, ext,
- "trunc at %llu\n", size);
- }
- osc_extent_put(env, ext);
- }
- if (waiting) {
- int rc;
-
- /* ignore the result of osc_extent_wait the write initiator
- * should take care of it.
- */
- rc = osc_extent_wait(env, waiting, OES_INV);
- if (rc < 0)
- OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc);
-
- osc_extent_put(env, waiting);
- waiting = NULL;
- goto again;
- }
- return result;
-}
-
-/**
- * Called after osc_io_setattr_end to add oio->oi_trunc back to cache.
- */
-void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext)
-{
- if (ext) {
- struct osc_object *obj = ext->oe_obj;
- bool unplug = false;
-
- EASSERT(ext->oe_nr_pages > 0, ext);
- EASSERT(ext->oe_state == OES_TRUNC, ext);
- EASSERT(!ext->oe_urgent, ext);
-
- OSC_EXTENT_DUMP(D_CACHE, ext, "trunc -> cache.\n");
- osc_object_lock(obj);
- osc_extent_state_set(ext, OES_CACHE);
- if (ext->oe_fsync_wait && !ext->oe_urgent) {
- ext->oe_urgent = 1;
- list_move_tail(&ext->oe_link, &obj->oo_urgent_exts);
- unplug = true;
- }
- osc_update_pending(obj, OBD_BRW_WRITE, ext->oe_nr_pages);
- osc_object_unlock(obj);
- osc_extent_put(env, ext);
-
- if (unplug)
- osc_io_unplug_async(env, osc_cli(obj), obj);
- }
-}
-
-/**
- * Wait for extents in a specific range to be written out.
- * The caller must have called osc_cache_writeback_range() to issue IO
- * otherwise it will take a long time for this function to finish.
- *
- * Caller must hold inode_mutex , or cancel exclusive dlm lock so that
- * nobody else can dirty this range of file while we're waiting for
- * extents to be written.
- */
-int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
- pgoff_t start, pgoff_t end)
-{
- struct osc_extent *ext;
- pgoff_t index = start;
- int result = 0;
-
-again:
- osc_object_lock(obj);
- ext = osc_extent_search(obj, index);
- if (!ext)
- ext = first_extent(obj);
- else if (ext->oe_end < index)
- ext = next_extent(ext);
- while (ext) {
- int rc;
-
- if (ext->oe_start > end)
- break;
-
- if (!ext->oe_fsync_wait) {
- ext = next_extent(ext);
- continue;
- }
-
- EASSERT(ergo(ext->oe_state == OES_CACHE,
- ext->oe_hp || ext->oe_urgent), ext);
- EASSERT(ergo(ext->oe_state == OES_ACTIVE,
- !ext->oe_hp && ext->oe_urgent), ext);
-
- index = ext->oe_end + 1;
- osc_extent_get(ext);
- osc_object_unlock(obj);
-
- rc = osc_extent_wait(env, ext, OES_INV);
- if (result == 0)
- result = rc;
- osc_extent_put(env, ext);
- goto again;
- }
- osc_object_unlock(obj);
-
- OSC_IO_DEBUG(obj, "sync file range.\n");
- return result;
-}
-
-/**
- * Called to write out a range of osc object.
- *
- * @hp : should be set this is caused by lock cancel;
- * @discard: is set if dirty pages should be dropped - file will be deleted or
- * truncated, this implies there is no partially discarding extents.
- *
- * Return how many pages will be issued, or error code if error occurred.
- */
-int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
- pgoff_t start, pgoff_t end, int hp, int discard)
-{
- struct osc_extent *ext;
- LIST_HEAD(discard_list);
- bool unplug = false;
- int result = 0;
-
- osc_object_lock(obj);
- ext = osc_extent_search(obj, start);
- if (!ext)
- ext = first_extent(obj);
- else if (ext->oe_end < start)
- ext = next_extent(ext);
- while (ext) {
- if (ext->oe_start > end)
- break;
-
- ext->oe_fsync_wait = 1;
- switch (ext->oe_state) {
- case OES_CACHE:
- result += ext->oe_nr_pages;
- if (!discard) {
- struct list_head *list = NULL;
-
- if (hp) {
- EASSERT(!ext->oe_hp, ext);
- ext->oe_hp = 1;
- list = &obj->oo_hp_exts;
- } else if (!ext->oe_urgent) {
- ext->oe_urgent = 1;
- list = &obj->oo_urgent_exts;
- }
- if (list)
- list_move_tail(&ext->oe_link, list);
- unplug = true;
- } else {
- /* the only discarder is lock cancelling, so
- * [start, end] must contain this extent
- */
- EASSERT(ext->oe_start >= start &&
- ext->oe_max_end <= end, ext);
- osc_extent_state_set(ext, OES_LOCKING);
- ext->oe_owner = current;
- list_move_tail(&ext->oe_link, &discard_list);
- osc_update_pending(obj, OBD_BRW_WRITE,
- -ext->oe_nr_pages);
- }
- break;
- case OES_ACTIVE:
- /* It's pretty bad to wait for ACTIVE extents, because
- * we don't know how long we will wait for it to be
- * flushed since it may be blocked at awaiting more
- * grants. We do this for the correctness of fsync.
- */
- LASSERT(hp == 0 && discard == 0);
- ext->oe_urgent = 1;
- break;
- case OES_TRUNC:
- /* this extent is being truncated, can't do anything
- * for it now. it will be set to urgent after truncate
- * is finished in osc_cache_truncate_end().
- */
- default:
- break;
- }
- ext = next_extent(ext);
- }
- osc_object_unlock(obj);
-
- LASSERT(ergo(!discard, list_empty(&discard_list)));
- if (!list_empty(&discard_list)) {
- struct osc_extent *tmp;
- int rc;
-
- osc_list_maint(osc_cli(obj), obj);
- list_for_each_entry_safe(ext, tmp, &discard_list, oe_link) {
- list_del_init(&ext->oe_link);
- EASSERT(ext->oe_state == OES_LOCKING, ext);
-
- /* Discard caching pages. We don't actually write this
- * extent out but we complete it as if we did.
- */
- rc = osc_extent_make_ready(env, ext);
- if (unlikely(rc < 0)) {
- OSC_EXTENT_DUMP(D_ERROR, ext,
- "make_ready returned %d\n", rc);
- if (result >= 0)
- result = rc;
- }
-
- /* finish the extent as if the pages were sent */
- osc_extent_finish(env, ext, 0, 0);
- }
- }
-
- if (unplug)
- osc_io_unplug(env, osc_cli(obj), obj);
-
- if (hp || discard) {
- int rc;
-
- rc = osc_cache_wait_range(env, obj, start, end);
- if (result >= 0 && rc < 0)
- result = rc;
- }
-
- OSC_IO_DEBUG(obj, "pageout [%lu, %lu], %d.\n", start, end, result);
- return result;
-}
-
-/**
- * Returns a list of pages by a given [start, end] of \a obj.
- *
- * \param resched If not NULL, then we give up before hogging CPU for too
- * long and set *resched = 1, in that case caller should implement a retry
- * logic.
- *
- * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
- * crucial in the face of [offset, EOF] locks.
- *
- * Return at least one page in @queue unless there is no covered page.
- */
-int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
- struct osc_object *osc, pgoff_t start, pgoff_t end,
- osc_page_gang_cbt cb, void *cbdata)
-{
- struct osc_page *ops;
- void **pvec;
- pgoff_t idx;
- unsigned int nr;
- unsigned int i;
- unsigned int j;
- int res = CLP_GANG_OKAY;
- bool tree_lock = true;
-
- idx = start;
- pvec = osc_env_info(env)->oti_pvec;
- spin_lock(&osc->oo_tree_lock);
- while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec,
- idx, OTI_PVEC_SIZE)) > 0) {
- struct cl_page *page;
- bool end_of_region = false;
-
- for (i = 0, j = 0; i < nr; ++i) {
- ops = pvec[i];
- pvec[i] = NULL;
-
- idx = osc_index(ops);
- if (idx > end) {
- end_of_region = true;
- break;
- }
-
- page = ops->ops_cl.cpl_page;
- LASSERT(page->cp_type == CPT_CACHEABLE);
- if (page->cp_state == CPS_FREEING)
- continue;
-
- cl_page_get(page);
- lu_ref_add_atomic(&page->cp_reference,
- "gang_lookup", current);
- pvec[j++] = ops;
- }
- ++idx;
-
- /*
- * Here a delicate locking dance is performed. Current thread
- * holds a reference to a page, but has to own it before it
- * can be placed into queue. Owning implies waiting, so
- * radix-tree lock is to be released. After a wait one has to
- * check that pages weren't truncated (cl_page_own() returns
- * error in the latter case).
- */
- spin_unlock(&osc->oo_tree_lock);
- tree_lock = false;
-
- for (i = 0; i < j; ++i) {
- ops = pvec[i];
- if (res == CLP_GANG_OKAY)
- res = (*cb)(env, io, ops, cbdata);
-
- page = ops->ops_cl.cpl_page;
- lu_ref_del(&page->cp_reference, "gang_lookup", current);
- cl_page_put(env, page);
- }
- if (nr < OTI_PVEC_SIZE || end_of_region)
- break;
-
- if (res == CLP_GANG_OKAY && need_resched())
- res = CLP_GANG_RESCHED;
- if (res != CLP_GANG_OKAY)
- break;
-
- spin_lock(&osc->oo_tree_lock);
- tree_lock = true;
- }
- if (tree_lock)
- spin_unlock(&osc->oo_tree_lock);
- return res;
-}
-
-/**
- * Check if page @page is covered by an extra lock or discard it.
- */
-static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops, void *cbdata)
-{
- struct osc_thread_info *info = osc_env_info(env);
- struct osc_object *osc = cbdata;
- pgoff_t index;
-
- index = osc_index(ops);
- if (index >= info->oti_fn_index) {
- struct ldlm_lock *tmp;
- struct cl_page *page = ops->ops_cl.cpl_page;
-
- /* refresh non-overlapped index */
- tmp = osc_dlmlock_at_pgoff(env, osc, index,
- OSC_DAP_FL_TEST_LOCK);
- if (tmp) {
- __u64 end = tmp->l_policy_data.l_extent.end;
- /* Cache the first-non-overlapped index so as to skip
- * all pages within [index, oti_fn_index). This is safe
- * because if tmp lock is canceled, it will discard
- * these pages.
- */
- info->oti_fn_index = cl_index(osc2cl(osc), end + 1);
- if (end == OBD_OBJECT_EOF)
- info->oti_fn_index = CL_PAGE_EOF;
- LDLM_LOCK_PUT(tmp);
- } else if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
- }
- }
-
- info->oti_next_index = index + 1;
- return CLP_GANG_OKAY;
-}
-
-static int discard_cb(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops, void *cbdata)
-{
- struct osc_thread_info *info = osc_env_info(env);
- struct cl_page *page = ops->ops_cl.cpl_page;
-
- /* page is top page. */
- info->oti_next_index = osc_index(ops) + 1;
- if (cl_page_own(env, io, page) == 0) {
- if (page->cp_type == CPT_CACHEABLE &&
- PageDirty(cl_page_vmpage(page)))
- CL_PAGE_DEBUG(D_ERROR, env, page,
- "discard dirty page?\n");
-
- /* discard the page */
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
- }
-
- return CLP_GANG_OKAY;
-}
-
-/**
- * Discard pages protected by the given lock. This function traverses radix
- * tree to find all covering pages and discard them. If a page is being covered
- * by other locks, it should remain in cache.
- *
- * If error happens on any step, the process continues anyway (the reasoning
- * behind this being that lock cancellation cannot be delayed indefinitely).
- */
-int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
- pgoff_t start, pgoff_t end, enum cl_lock_mode mode)
-{
- struct osc_thread_info *info = osc_env_info(env);
- struct cl_io *io = &info->oti_io;
- osc_page_gang_cbt cb;
- int res;
- int result;
-
- io->ci_obj = cl_object_top(osc2cl(osc));
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (result != 0)
- goto out;
-
- cb = mode == CLM_READ ? check_and_discard_cb : discard_cb;
- info->oti_fn_index = start;
- info->oti_next_index = start;
- do {
- res = osc_page_gang_lookup(env, io, osc,
- info->oti_next_index, end, cb, osc);
- if (info->oti_next_index > end)
- break;
-
- if (res == CLP_GANG_RESCHED)
- cond_resched();
- } while (res != CLP_GANG_OKAY);
-out:
- cl_io_fini(env, io);
- return result;
-}
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
deleted file mode 100644
index 1449013722f6..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ /dev/null
@@ -1,683 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Internal interfaces of OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#ifndef OSC_CL_INTERNAL_H
-#define OSC_CL_INTERNAL_H
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd.h>
-/* osc_build_res_name() */
-#include <cl_object.h>
-#include "osc_internal.h"
-
-/** \defgroup osc osc
- * @{
- */
-
-struct osc_extent;
-
-/**
- * State maintained by osc layer for each IO context.
- */
-struct osc_io {
- /** super class */
- struct cl_io_slice oi_cl;
- /** true if this io is lockless. */
- unsigned int oi_lockless:1,
- /** true if this io is counted as active IO */
- oi_is_active:1;
- /** how many LRU pages are reserved for this IO */
- unsigned long oi_lru_reserved;
-
- /** active extents, we know how many bytes is going to be written,
- * so having an active extent will prevent it from being fragmented
- */
- struct osc_extent *oi_active;
- /** partially truncated extent, we need to hold this extent to prevent
- * page writeback from happening.
- */
- struct osc_extent *oi_trunc;
-
- /** write osc_lock for this IO, used by osc_extent_find(). */
- struct osc_lock *oi_write_osclock;
- struct obdo oi_oa;
- struct osc_async_cbargs {
- bool opc_rpc_sent;
- int opc_rc;
- struct completion opc_sync;
- } oi_cbarg;
-};
-
-/**
- * State maintained by osc layer for the duration of a system call.
- */
-struct osc_session {
- struct osc_io os_io;
-};
-
-#define OTI_PVEC_SIZE 256
-struct osc_thread_info {
- struct ldlm_res_id oti_resname;
- union ldlm_policy_data oti_policy;
- struct cl_lock_descr oti_descr;
- struct cl_attr oti_attr;
- struct lustre_handle oti_handle;
- struct cl_page_list oti_plist;
- struct cl_io oti_io;
- void *oti_pvec[OTI_PVEC_SIZE];
- /**
- * Fields used by cl_lock_discard_pages().
- */
- pgoff_t oti_next_index;
- pgoff_t oti_fn_index; /* first non-overlapped index */
- struct cl_sync_io oti_anchor;
- struct cl_req_attr oti_req_attr;
-};
-
-struct osc_object {
- struct cl_object oo_cl;
- struct lov_oinfo *oo_oinfo;
- /**
- * True if locking against this stripe got -EUSERS.
- */
- int oo_contended;
- unsigned long oo_contention_time;
- /**
- * used by the osc to keep track of what objects to build into rpcs.
- * Protected by client_obd->cli_loi_list_lock.
- */
- struct list_head oo_ready_item;
- struct list_head oo_hp_ready_item;
- struct list_head oo_write_item;
- struct list_head oo_read_item;
-
- /**
- * extent is a red black tree to manage (async) dirty pages.
- */
- struct rb_root oo_root;
- /**
- * Manage write(dirty) extents.
- */
- struct list_head oo_hp_exts; /* list of hp extents */
- struct list_head oo_urgent_exts; /* list of writeback extents */
- struct list_head oo_rpc_exts;
-
- struct list_head oo_reading_exts;
-
- atomic_t oo_nr_reads;
- atomic_t oo_nr_writes;
-
- /** Protect extent tree. Will be used to protect
- * oo_{read|write}_pages soon.
- */
- spinlock_t oo_lock;
-
- /**
- * Radix tree for caching pages
- */
- struct radix_tree_root oo_tree;
- spinlock_t oo_tree_lock;
- unsigned long oo_npages;
-
- /* Protect osc_lock this osc_object has */
- spinlock_t oo_ol_spin;
- struct list_head oo_ol_list;
-
- /** number of active IOs of this object */
- atomic_t oo_nr_ios;
- wait_queue_head_t oo_io_waitq;
-};
-
-static inline void osc_object_lock(struct osc_object *obj)
-{
- spin_lock(&obj->oo_lock);
-}
-
-static inline int osc_object_trylock(struct osc_object *obj)
-{
- return spin_trylock(&obj->oo_lock);
-}
-
-static inline void osc_object_unlock(struct osc_object *obj)
-{
- spin_unlock(&obj->oo_lock);
-}
-
-static inline int osc_object_is_locked(struct osc_object *obj)
-{
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- return spin_is_locked(&obj->oo_lock);
-#else
- /*
- * It is not perfect to return true all the time.
- * But since this function is only used for assertion
- * and checking, it seems OK.
- */
- return 1;
-#endif
-}
-
-/*
- * Lock "micro-states" for osc layer.
- */
-enum osc_lock_state {
- OLS_NEW,
- OLS_ENQUEUED,
- OLS_UPCALL_RECEIVED,
- OLS_GRANTED,
- OLS_CANCELLED
-};
-
-/**
- * osc-private state of cl_lock.
- *
- * Interaction with DLM.
- *
- * Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
- * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_dlmlock.
- *
- * This pointer is protected through a reference, acquired by
- * osc_lock_upcall0(). Also, an additional reference is acquired by
- * ldlm_lock_addref() call protecting the lock from cancellation, until
- * osc_lock_unuse() releases it.
- *
- * Below is a description of how lock references are acquired and released
- * inside of DLM.
- *
- * - When new lock is created and enqueued to the server (ldlm_cli_enqueue())
- * - ldlm_lock_create()
- * - ldlm_lock_new(): initializes a lock with 2 references. One for
- * the caller (released when reply from the server is received, or on
- * error), and another for the hash table.
- * - ldlm_lock_addref_internal(): protects the lock from cancellation.
- *
- * - When reply is received from the server (osc_enqueue_interpret())
- * - ldlm_cli_enqueue_fini()
- * - LDLM_LOCK_PUT(): releases caller reference acquired by
- * ldlm_lock_new().
- * - if (rc != 0)
- * ldlm_lock_decref(): error case: matches ldlm_cli_enqueue().
- * - ldlm_lock_decref(): for async locks, matches ldlm_cli_enqueue().
- *
- * - When lock is being cancelled (ldlm_lock_cancel())
- * - ldlm_lock_destroy()
- * - LDLM_LOCK_PUT(): releases hash-table reference acquired by
- * ldlm_lock_new().
- *
- * osc_lock is detached from ldlm_lock by osc_lock_detach() that is called
- * either when lock is cancelled (osc_lock_blocking()), or when locks is
- * deleted without cancellation (e.g., from cl_locks_prune()). In the latter
- * case ldlm lock remains in memory, and can be re-attached to osc_lock in the
- * future.
- */
-struct osc_lock {
- struct cl_lock_slice ols_cl;
- /** Internal lock to protect states, etc. */
- spinlock_t ols_lock;
- /** Owner sleeps on this channel for state change */
- struct cl_sync_io *ols_owner;
- /** waiting list for this lock to be cancelled */
- struct list_head ols_waiting_list;
- /** wait entry of ols_waiting_list */
- struct list_head ols_wait_entry;
- /** list entry for osc_object::oo_ol_list */
- struct list_head ols_nextlock_oscobj;
-
- /** underlying DLM lock */
- struct ldlm_lock *ols_dlmlock;
- /** DLM flags with which osc_lock::ols_lock was enqueued */
- __u64 ols_flags;
- /** osc_lock::ols_lock handle */
- struct lustre_handle ols_handle;
- struct ldlm_enqueue_info ols_einfo;
- enum osc_lock_state ols_state;
- /** lock value block */
- struct ost_lvb ols_lvb;
-
- /**
- * true, if ldlm_lock_addref() was called against
- * osc_lock::ols_lock. This is used for sanity checking.
- *
- * \see osc_lock::ols_has_ref
- */
- unsigned ols_hold :1,
- /**
- * this is much like osc_lock::ols_hold, except that this bit is
- * cleared _after_ reference in released in osc_lock_unuse(). This
- * fine distinction is needed because:
- *
- * - if ldlm lock still has a reference, osc_ast_data_get() needs
- * to return associated cl_lock (so that a flag is needed that is
- * cleared after ldlm_lock_decref() returned), and
- *
- * - ldlm_lock_decref() can invoke blocking ast (for a
- * LDLM_FL_CBPENDING lock), and osc_lock functions like
- * osc_lock_cancel() called from there need to know whether to
- * release lock reference (so that a flag is needed that is
- * cleared before ldlm_lock_decref() is called).
- */
- ols_has_ref:1,
- /**
- * inherit the lockless attribute from top level cl_io.
- * If true, osc_lock_enqueue is able to tolerate the -EUSERS error.
- */
- ols_locklessable:1,
- /**
- * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
- * the EVAVAIL error as tolerable, this will make upper logic happy
- * to wait all glimpse locks to each OSTs to be completed.
- * Glimpse lock converts to normal lock if the server lock is
- * granted.
- * Glimpse lock should be destroyed immediately after use.
- */
- ols_glimpse:1,
- /**
- * For async glimpse lock.
- */
- ols_agl:1;
-};
-
-/**
- * Page state private for osc layer.
- */
-struct osc_page {
- struct cl_page_slice ops_cl;
- /**
- * Page queues used by osc to detect when RPC can be formed.
- */
- struct osc_async_page ops_oap;
- /**
- * An offset within page from which next transfer starts. This is used
- * by cl_page_clip() to submit partial page transfers.
- */
- int ops_from;
- /**
- * An offset within page at which next transfer ends.
- *
- * \see osc_page::ops_from.
- */
- int ops_to;
- /**
- * Boolean, true iff page is under transfer. Used for sanity checking.
- */
- unsigned ops_transfer_pinned:1,
- /**
- * in LRU?
- */
- ops_in_lru:1,
- /**
- * Set if the page must be transferred with OBD_BRW_SRVLOCK.
- */
- ops_srvlock:1;
- /**
- * lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
- */
- struct list_head ops_lru;
- /**
- * Submit time - the time when the page is starting RPC. For debugging.
- */
- unsigned long ops_submit_time;
-};
-
-extern struct kmem_cache *osc_lock_kmem;
-extern struct kmem_cache *osc_object_kmem;
-extern struct kmem_cache *osc_thread_kmem;
-extern struct kmem_cache *osc_session_kmem;
-extern struct kmem_cache *osc_extent_kmem;
-
-extern struct lu_device_type osc_device_type;
-extern struct lu_context_key osc_key;
-extern struct lu_context_key osc_session_key;
-
-#define OSC_FLAGS (ASYNC_URGENT | ASYNC_READY)
-
-int osc_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *io);
-int osc_io_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
-struct lu_object *osc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev);
-int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t ind);
-
-void osc_index2policy(union ldlm_policy_data *policy,
- const struct cl_object *obj,
- pgoff_t start, pgoff_t end);
-int osc_lvb_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct ost_lvb *lvb);
-
-void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
-void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
- enum cl_req_type crt, int brw_flags);
-int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops);
-int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg,
- u32 async_flags);
-int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
- struct page *page, loff_t offset);
-int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops);
-int osc_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
-int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
- struct osc_page *ops);
-int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops);
-int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
- struct list_head *list, int cmd, int brw_flags);
-int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
- u64 size, struct osc_extent **extp);
-void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext);
-int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
- pgoff_t start, pgoff_t end, int hp, int discard);
-int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
- pgoff_t start, pgoff_t end);
-void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc);
-int lru_queue_work(const struct lu_env *env, void *data);
-
-void osc_object_set_contended(struct osc_object *obj);
-void osc_object_clear_contended(struct osc_object *obj);
-int osc_object_is_contended(struct osc_object *obj);
-
-int osc_lock_is_lockless(const struct osc_lock *olck);
-
-/*****************************************************************************
- *
- * Accessors.
- *
- */
-
-static inline struct osc_thread_info *osc_env_info(const struct lu_env *env)
-{
- struct osc_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &osc_key);
- LASSERT(info);
- return info;
-}
-
-static inline struct osc_session *osc_env_session(const struct lu_env *env)
-{
- struct osc_session *ses;
-
- ses = lu_context_key_get(env->le_ses, &osc_session_key);
- LASSERT(ses);
- return ses;
-}
-
-static inline struct osc_io *osc_env_io(const struct lu_env *env)
-{
- return &osc_env_session(env)->os_io;
-}
-
-static inline int osc_is_object(const struct lu_object *obj)
-{
- return obj->lo_dev->ld_type == &osc_device_type;
-}
-
-static inline struct osc_device *lu2osc_dev(const struct lu_device *d)
-{
- LINVRNT(d->ld_type == &osc_device_type);
- return container_of0(d, struct osc_device, od_cl.cd_lu_dev);
-}
-
-static inline struct obd_export *osc_export(const struct osc_object *obj)
-{
- return lu2osc_dev(obj->oo_cl.co_lu.lo_dev)->od_exp;
-}
-
-static inline struct client_obd *osc_cli(const struct osc_object *obj)
-{
- return &osc_export(obj)->exp_obd->u.cli;
-}
-
-static inline struct osc_object *cl2osc(const struct cl_object *obj)
-{
- LINVRNT(osc_is_object(&obj->co_lu));
- return container_of0(obj, struct osc_object, oo_cl);
-}
-
-static inline struct cl_object *osc2cl(const struct osc_object *obj)
-{
- return (struct cl_object *)&obj->oo_cl;
-}
-
-static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode)
-{
- LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
- if (mode == CLM_READ)
- return LCK_PR;
- else if (mode == CLM_WRITE)
- return LCK_PW;
- else
- return LCK_GROUP;
-}
-
-static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode)
-{
- LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
- if (mode == LCK_PR)
- return CLM_READ;
- else if (mode == LCK_PW)
- return CLM_WRITE;
- else
- return CLM_GROUP;
-}
-
-static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice)
-{
- LINVRNT(osc_is_object(&slice->cpl_obj->co_lu));
- return container_of0(slice, struct osc_page, ops_cl);
-}
-
-static inline struct osc_page *oap2osc(struct osc_async_page *oap)
-{
- return container_of0(oap, struct osc_page, ops_oap);
-}
-
-static inline pgoff_t osc_index(struct osc_page *opg)
-{
- return opg->ops_cl.cpl_index;
-}
-
-static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
-{
- return oap2osc(oap)->ops_cl.cpl_page;
-}
-
-static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
-{
- return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
-}
-
-static inline struct osc_page *
-osc_cl_page_osc(struct cl_page *page, struct osc_object *osc)
-{
- const struct cl_page_slice *slice;
-
- LASSERT(osc);
- slice = cl_object_page_slice(&osc->oo_cl, page);
- return cl2osc_page(slice);
-}
-
-static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
-{
- LINVRNT(osc_is_object(&slice->cls_obj->co_lu));
- return container_of0(slice, struct osc_lock, ols_cl);
-}
-
-static inline struct osc_lock *osc_lock_at(const struct cl_lock *lock)
-{
- return cl2osc_lock(cl_lock_at(lock, &osc_device_type));
-}
-
-static inline int osc_io_srvlock(struct osc_io *oio)
-{
- return (oio->oi_lockless && !oio->oi_cl.cis_io->ci_no_srvlock);
-}
-
-enum osc_extent_state {
- OES_INV = 0, /** extent is just initialized or destroyed */
- OES_ACTIVE = 1, /** process is using this extent */
- OES_CACHE = 2, /** extent is ready for IO */
- OES_LOCKING = 3, /** locking page to prepare IO */
- OES_LOCK_DONE = 4, /** locking finished, ready to send */
- OES_RPC = 5, /** in RPC */
- OES_TRUNC = 6, /** being truncated */
- OES_STATE_MAX
-};
-
-/**
- * osc_extent data to manage dirty pages.
- * osc_extent has the following attributes:
- * 1. all pages in the same must be in one RPC in write back;
- * 2. # of pages must be less than max_pages_per_rpc - implied by 1;
- * 3. must be covered by only 1 osc_lock;
- * 4. exclusive. It's impossible to have overlapped osc_extent.
- *
- * The lifetime of an extent is from when the 1st page is dirtied to when
- * all pages inside it are written out.
- *
- * LOCKING ORDER
- * =============
- * page lock -> cl_loi_list_lock -> object lock(osc_object::oo_lock)
- */
-struct osc_extent {
- /** red-black tree node */
- struct rb_node oe_node;
- /** osc_object of this extent */
- struct osc_object *oe_obj;
- /** refcount, removed from red-black tree if reaches zero. */
- atomic_t oe_refc;
- /** busy if non-zero */
- atomic_t oe_users;
- /** link list of osc_object's oo_{hp|urgent|locking}_exts. */
- struct list_head oe_link;
- /** state of this extent */
- enum osc_extent_state oe_state;
- /** flags for this extent. */
- unsigned int oe_intree:1,
- /** 0 is write, 1 is read */
- oe_rw:1,
- /** sync extent, queued by osc_queue_sync_pages() */
- oe_sync:1,
- /** set if this extent has partial, sync pages.
- * Extents with partial page(s) can't merge with others in RPC
- */
- oe_no_merge:1,
- oe_srvlock:1,
- oe_memalloc:1,
- /** an ACTIVE extent is going to be truncated, so when this extent
- * is released, it will turn into TRUNC state instead of CACHE.
- */
- oe_trunc_pending:1,
- /** this extent should be written asap and someone may wait for the
- * write to finish. This bit is usually set along with urgent if
- * the extent was CACHE state.
- * fsync_wait extent can't be merged because new extent region may
- * exceed fsync range.
- */
- oe_fsync_wait:1,
- /** covering lock is being canceled */
- oe_hp:1,
- /** this extent should be written back asap. set if one of pages is
- * called by page WB daemon, or sync write or reading requests.
- */
- oe_urgent:1;
- /** how many grants allocated for this extent.
- * Grant allocated for this extent. There is no grant allocated
- * for reading extents and sync write extents.
- */
- unsigned int oe_grants;
- /** # of dirty pages in this extent */
- unsigned int oe_nr_pages;
- /** list of pending oap pages. Pages in this list are NOT sorted. */
- struct list_head oe_pages;
- /** Since an extent has to be written out in atomic, this is used to
- * remember the next page need to be locked to write this extent out.
- * Not used right now.
- */
- struct osc_page *oe_next_page;
- /** start and end index of this extent, include start and end
- * themselves. Page offset here is the page index of osc_pages.
- * oe_start is used as keyword for red-black tree.
- */
- pgoff_t oe_start;
- pgoff_t oe_end;
- /** maximum ending index of this extent, this is limited by
- * max_pages_per_rpc, lock extent and chunk size.
- */
- pgoff_t oe_max_end;
- /** waitqueue - for those who want to be notified if this extent's
- * state has changed.
- */
- wait_queue_head_t oe_waitq;
- /** lock covering this extent */
- struct ldlm_lock *oe_dlmlock;
- /** terminator of this extent. Must be true if this extent is in IO. */
- struct task_struct *oe_owner;
- /** return value of writeback. If somebody is waiting for this extent,
- * this value can be known by outside world.
- */
- int oe_rc;
- /** max pages per rpc when this extent was created */
- unsigned int oe_mppr;
-};
-
-int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
- int sent, int rc);
-void osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
-
-int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
- pgoff_t start, pgoff_t end, enum cl_lock_mode mode);
-
-typedef int (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
- struct osc_page *, void *);
-int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
- struct osc_object *osc, pgoff_t start, pgoff_t end,
- osc_page_gang_cbt cb, void *cbdata);
-/** @} osc */
-
-#endif /* OSC_CL_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
deleted file mode 100644
index 2b5f324743e2..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ /dev/null
@@ -1,246 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_device, for OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-/* class_name2obd() */
-#include <obd_class.h>
-
-#include "osc_cl_internal.h"
-
-/** \addtogroup osc
- * @{
- */
-
-struct kmem_cache *osc_lock_kmem;
-struct kmem_cache *osc_object_kmem;
-struct kmem_cache *osc_thread_kmem;
-struct kmem_cache *osc_session_kmem;
-struct kmem_cache *osc_extent_kmem;
-struct kmem_cache *osc_quota_kmem;
-
-struct lu_kmem_descr osc_caches[] = {
- {
- .ckd_cache = &osc_lock_kmem,
- .ckd_name = "osc_lock_kmem",
- .ckd_size = sizeof(struct osc_lock)
- },
- {
- .ckd_cache = &osc_object_kmem,
- .ckd_name = "osc_object_kmem",
- .ckd_size = sizeof(struct osc_object)
- },
- {
- .ckd_cache = &osc_thread_kmem,
- .ckd_name = "osc_thread_kmem",
- .ckd_size = sizeof(struct osc_thread_info)
- },
- {
- .ckd_cache = &osc_session_kmem,
- .ckd_name = "osc_session_kmem",
- .ckd_size = sizeof(struct osc_session)
- },
- {
- .ckd_cache = &osc_extent_kmem,
- .ckd_name = "osc_extent_kmem",
- .ckd_size = sizeof(struct osc_extent)
- },
- {
- .ckd_cache = &osc_quota_kmem,
- .ckd_name = "osc_quota_kmem",
- .ckd_size = sizeof(struct osc_quota_info)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-static struct lu_device *osc2lu_dev(struct osc_device *osc)
-{
- return &osc->od_cl.cd_lu_dev;
-}
-
-/*****************************************************************************
- *
- * Osc device and device type functions.
- *
- */
-
-static void *osc_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct osc_thread_info *info;
-
- info = kmem_cache_zalloc(osc_thread_kmem, GFP_NOFS);
- if (!info)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void osc_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct osc_thread_info *info = data;
-
- kmem_cache_free(osc_thread_kmem, info);
-}
-
-struct lu_context_key osc_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = osc_key_init,
- .lct_fini = osc_key_fini
-};
-
-static void *osc_session_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct osc_session *info;
-
- info = kmem_cache_zalloc(osc_session_kmem, GFP_NOFS);
- if (!info)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void osc_session_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct osc_session *info = data;
-
- kmem_cache_free(osc_session_kmem, info);
-}
-
-struct lu_context_key osc_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = osc_session_init,
- .lct_fini = osc_session_fini
-};
-
-/* type constructor/destructor: osc_type_{init,fini,start,stop}(). */
-LU_TYPE_INIT_FINI(osc, &osc_key, &osc_session_key);
-
-static int osc_cl_process_config(const struct lu_env *env,
- struct lu_device *d, struct lustre_cfg *cfg)
-{
- return osc_process_config_base(d->ld_obd, cfg);
-}
-
-static const struct lu_device_operations osc_lu_ops = {
- .ldo_object_alloc = osc_object_alloc,
- .ldo_process_config = osc_cl_process_config,
- .ldo_recovery_complete = NULL
-};
-
-static int osc_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- return 0;
-}
-
-static struct lu_device *osc_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- return NULL;
-}
-
-static struct lu_device *osc_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct osc_device *od = lu2osc_dev(d);
-
- cl_device_fini(lu2cl_dev(d));
- kfree(od);
- return NULL;
-}
-
-static struct lu_device *osc_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg)
-{
- struct lu_device *d;
- struct osc_device *od;
- struct obd_device *obd;
- int rc;
-
- od = kzalloc(sizeof(*od), GFP_NOFS);
- if (!od)
- return ERR_PTR(-ENOMEM);
-
- cl_device_init(&od->od_cl, t);
- d = osc2lu_dev(od);
- d->ld_ops = &osc_lu_ops;
-
- /* Setup OSC OBD */
- obd = class_name2obd(lustre_cfg_string(cfg, 0));
- LASSERT(obd);
- rc = osc_setup(obd, cfg);
- if (rc) {
- osc_device_free(env, d);
- return ERR_PTR(rc);
- }
- od->od_exp = obd->obd_self_export;
- return d;
-}
-
-static const struct lu_device_type_operations osc_device_type_ops = {
- .ldto_init = osc_type_init,
- .ldto_fini = osc_type_fini,
-
- .ldto_start = osc_type_start,
- .ldto_stop = osc_type_stop,
-
- .ldto_device_alloc = osc_device_alloc,
- .ldto_device_free = osc_device_free,
-
- .ldto_device_init = osc_device_init,
- .ldto_device_fini = osc_device_fini
-};
-
-struct lu_device_type osc_device_type = {
- .ldt_tags = LU_DEVICE_CL,
- .ldt_name = LUSTRE_OSC_NAME,
- .ldt_ops = &osc_device_type_ops,
- .ldt_ctx_tags = LCT_CL_THREAD
-};
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
deleted file mode 100644
index 32db150fd42e..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ /dev/null
@@ -1,236 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef OSC_INTERNAL_H
-#define OSC_INTERNAL_H
-
-#define OAP_MAGIC 8675309
-
-extern atomic_t osc_pool_req_count;
-extern unsigned int osc_reqpool_maxreqcount;
-extern struct ptlrpc_request_pool *osc_rq_pool;
-
-struct lu_env;
-
-enum async_flags {
- ASYNC_READY = 0x1, /* ap_make_ready will not be called before this
- * page is added to an rpc
- */
- ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */
- ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called
- * to give the caller a chance to update
- * or cancel the size of the io
- */
- ASYNC_HP = 0x10,
-};
-
-struct osc_async_page {
- int oap_magic;
- unsigned short oap_cmd;
- unsigned short oap_interrupted:1;
-
- struct list_head oap_pending_item;
- struct list_head oap_rpc_item;
-
- u64 oap_obj_off;
- unsigned int oap_page_off;
- enum async_flags oap_async_flags;
-
- struct brw_page oap_brw_page;
-
- struct ptlrpc_request *oap_request;
- struct client_obd *oap_cli;
- struct osc_object *oap_obj;
-
- spinlock_t oap_lock;
-};
-
-#define oap_page oap_brw_page.pg
-#define oap_count oap_brw_page.count
-#define oap_brw_flags oap_brw_page.flag
-
-static inline struct osc_async_page *brw_page2oap(struct brw_page *pga)
-{
- return (struct osc_async_page *)container_of(pga, struct osc_async_page,
- oap_brw_page);
-}
-
-struct osc_cache_waiter {
- struct list_head ocw_entry;
- wait_queue_head_t ocw_waitq;
- struct osc_async_page *ocw_oap;
- int ocw_grant;
- int ocw_rc;
-};
-
-void osc_wake_cache_waiters(struct client_obd *cli);
-int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes);
-void osc_update_next_shrink(struct client_obd *cli);
-
-/*
- * cl integration.
- */
-#include <cl_object.h>
-
-extern struct ptlrpc_request_set *PTLRPCD_SET;
-
-typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
- int rc);
-
-int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u64 *flags, union ldlm_policy_data *policy,
- struct ost_lvb *lvb, int kms_valid,
- osc_enqueue_upcall_f upcall,
- void *cookie, struct ldlm_enqueue_info *einfo,
- struct ptlrpc_request_set *rqset, int async, int agl);
-
-int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- enum ldlm_type type, union ldlm_policy_data *policy,
- enum ldlm_mode mode, __u64 *flags, void *data,
- struct lustre_handle *lockh, int unref);
-
-int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset);
-int osc_punch_base(struct obd_export *exp, struct obdo *oa,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset);
-int osc_sync_base(struct osc_object *exp, struct obdo *oa,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset);
-
-int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg);
-int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
- struct list_head *ext_list, int cmd);
-long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
- long target, bool force);
-unsigned long osc_lru_reserve(struct client_obd *cli, unsigned long npages);
-void osc_lru_unreserve(struct client_obd *cli, unsigned long npages);
-
-unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
-
-int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
-
-int lproc_osc_attach_seqstat(struct obd_device *dev);
-void lprocfs_osc_init_vars(struct lprocfs_static_vars *lvars);
-
-extern struct lu_device_type osc_device_type;
-
-static inline int osc_recoverable_error(int rc)
-{
- return (rc == -EIO || rc == -EROFS || rc == -ENOMEM ||
- rc == -EAGAIN || rc == -EINPROGRESS);
-}
-
-static inline unsigned long rpcs_in_flight(struct client_obd *cli)
-{
- return cli->cl_r_in_flight + cli->cl_w_in_flight;
-}
-
-static inline char *cli_name(struct client_obd *cli)
-{
- return cli->cl_import->imp_obd->obd_name;
-}
-
-struct osc_device {
- struct cl_device od_cl;
- struct obd_export *od_exp;
-
- /* Write stats is actually protected by client_obd's lock. */
- struct osc_stats {
- u64 os_lockless_writes; /* by bytes */
- u64 os_lockless_reads; /* by bytes */
- u64 os_lockless_truncates; /* by times */
- } od_stats;
-
- /* configuration item(s) */
- int od_contention_time;
- int od_lockless_truncate;
-};
-
-static inline struct osc_device *obd2osc_dev(const struct obd_device *d)
-{
- return container_of0(d->obd_lu_dev, struct osc_device, od_cl.cd_lu_dev);
-}
-
-extern struct lu_kmem_descr osc_caches[];
-
-extern struct kmem_cache *osc_quota_kmem;
-struct osc_quota_info {
- /** linkage for quota hash table */
- struct hlist_node oqi_hash;
- u32 oqi_id;
-};
-
-int osc_quota_setup(struct obd_device *obd);
-int osc_quota_cleanup(struct obd_device *obd);
-int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
- u32 valid, u32 flags);
-int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]);
-int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl);
-void osc_inc_unstable_pages(struct ptlrpc_request *req);
-void osc_dec_unstable_pages(struct ptlrpc_request *req);
-bool osc_over_unstable_soft_limit(struct client_obd *cli);
-
-/**
- * Bit flags for osc_dlm_lock_at_pageoff().
- */
-enum osc_dap_flags {
- /**
- * Just check if the desired lock exists, it won't hold reference
- * count on lock.
- */
- OSC_DAP_FL_TEST_LOCK = BIT(0),
- /**
- * Return the lock even if it is being canceled.
- */
- OSC_DAP_FL_CANCELING = BIT(1),
-};
-
-struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
- struct osc_object *obj, pgoff_t index,
- enum osc_dap_flags flags);
-
-int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc);
-
-/** osc shrink list to link all osc client obd */
-extern struct list_head osc_shrink_list;
-/** spin lock to protect osc_shrink_list */
-extern spinlock_t osc_shrink_lock;
-unsigned long osc_cache_shrink_count(struct shrinker *sk,
- struct shrink_control *sc);
-unsigned long osc_cache_shrink_scan(struct shrinker *sk,
- struct shrink_control *sc);
-
-#endif /* OSC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
deleted file mode 100644
index 76743faf3e6d..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ /dev/null
@@ -1,918 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_io for OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include <lustre_obdo.h>
-
-#include "osc_cl_internal.h"
-
-/** \addtogroup osc
- * @{
- */
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-static struct osc_io *cl2osc_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl);
-
- LINVRNT(oio == osc_env_io(env));
- return oio;
-}
-
-/*****************************************************************************
- *
- * io operations.
- *
- */
-
-static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
-{
-}
-
-static void osc_read_ahead_release(const struct lu_env *env, void *cbdata)
-{
- struct ldlm_lock *dlmlock = cbdata;
- struct lustre_handle lockh;
-
- ldlm_lock2handle(dlmlock, &lockh);
- ldlm_lock_decref(&lockh, LCK_PR);
- LDLM_LOCK_PUT(dlmlock);
-}
-
-static int osc_io_read_ahead(const struct lu_env *env,
- const struct cl_io_slice *ios,
- pgoff_t start, struct cl_read_ahead *ra)
-{
- struct osc_object *osc = cl2osc(ios->cis_obj);
- struct ldlm_lock *dlmlock;
- int result = -ENODATA;
-
- dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
- if (dlmlock) {
- LASSERT(dlmlock->l_ast_data == osc);
- if (dlmlock->l_req_mode != LCK_PR) {
- struct lustre_handle lockh;
-
- ldlm_lock2handle(dlmlock, &lockh);
- ldlm_lock_addref(&lockh, LCK_PR);
- ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
- }
-
- ra->cra_rpc_size = osc_cli(osc)->cl_max_pages_per_rpc;
- ra->cra_end = cl_index(osc2cl(osc),
- dlmlock->l_policy_data.l_extent.end);
- ra->cra_release = osc_read_ahead_release;
- ra->cra_cbdata = dlmlock;
- result = 0;
- }
-
- return result;
-}
-
-/**
- * An implementation of cl_io_operations::cio_io_submit() method for osc
- * layer. Iterates over pages in the in-queue, prepares each for io by calling
- * cl_page_prep() and then either submits them through osc_io_submit_page()
- * or, if page is already submitted, changes osc flags through
- * osc_set_async_flags().
- */
-static int osc_io_submit(const struct lu_env *env,
- const struct cl_io_slice *ios,
- enum cl_req_type crt, struct cl_2queue *queue)
-{
- struct cl_page *page;
- struct cl_page *tmp;
- struct client_obd *cli = NULL;
- struct osc_object *osc = NULL; /* to keep gcc happy */
- struct osc_page *opg;
- struct cl_io *io;
- LIST_HEAD(list);
-
- struct cl_page_list *qin = &queue->c2_qin;
- struct cl_page_list *qout = &queue->c2_qout;
- unsigned int queued = 0;
- int result = 0;
- int cmd;
- int brw_flags;
- unsigned int max_pages;
-
- LASSERT(qin->pl_nr > 0);
-
- CDEBUG(D_CACHE | D_READA, "%d %d\n", qin->pl_nr, crt);
-
- osc = cl2osc(ios->cis_obj);
- cli = osc_cli(osc);
- max_pages = cli->cl_max_pages_per_rpc;
-
- cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
- brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;
-
- /*
- * NOTE: here @page is a top-level page. This is done to avoid
- * creation of sub-page-list.
- */
- cl_page_list_for_each_safe(page, tmp, qin) {
- struct osc_async_page *oap;
-
- /* Top level IO. */
- io = page->cp_owner;
- LASSERT(io);
-
- opg = osc_cl_page_osc(page, osc);
- oap = &opg->ops_oap;
- LASSERT(osc == oap->oap_obj);
-
- if (!list_empty(&oap->oap_pending_item) ||
- !list_empty(&oap->oap_rpc_item)) {
- CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
- oap, opg);
- result = -EBUSY;
- break;
- }
-
- result = cl_page_prep(env, io, page, crt);
- if (result != 0) {
- LASSERT(result < 0);
- if (result != -EALREADY)
- break;
- /*
- * Handle -EALREADY error: for read case, the page is
- * already in UPTODATE state; for write, the page
- * is not dirty.
- */
- result = 0;
- continue;
- }
-
- spin_lock(&oap->oap_lock);
- oap->oap_async_flags = ASYNC_URGENT | ASYNC_READY;
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- spin_unlock(&oap->oap_lock);
-
- osc_page_submit(env, opg, crt, brw_flags);
- list_add_tail(&oap->oap_pending_item, &list);
-
- if (page->cp_sync_io)
- cl_page_list_move(qout, qin, page);
- else /* async IO */
- cl_page_list_del(env, qin, page);
-
- if (++queued == max_pages) {
- queued = 0;
- result = osc_queue_sync_pages(env, osc, &list, cmd,
- brw_flags);
- if (result < 0)
- break;
- }
- }
-
- if (queued > 0)
- result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags);
-
- /* Update c/mtime for sync write. LU-7310 */
- if (qout->pl_nr > 0 && !result) {
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- struct cl_object *obj = ios->cis_obj;
-
- cl_object_attr_lock(obj);
- attr->cat_mtime = ktime_get_real_seconds();
- attr->cat_ctime = attr->cat_mtime;
- cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
- cl_object_attr_unlock(obj);
- }
-
- CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
- return qout->pl_nr > 0 ? 0 : result;
-}
-
-/**
- * This is called when a page is accessed within file in a way that creates
- * new page, if one were missing (i.e., if there were a hole at that place in
- * the file, or accessed page is beyond the current file size).
- *
- * Expand stripe KMS if necessary.
- */
-static void osc_page_touch_at(const struct lu_env *env,
- struct cl_object *obj, pgoff_t idx, size_t to)
-{
- struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- int valid;
- __u64 kms;
-
- /* offset within stripe */
- kms = cl_offset(obj, idx) + to;
-
- cl_object_attr_lock(obj);
- /*
- * XXX old code used
- *
- * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
- *
- * here
- */
- CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
- kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
- loi->loi_lvb.lvb_size);
-
- attr->cat_ctime = ktime_get_real_seconds();
- attr->cat_mtime = attr->cat_ctime;
- valid = CAT_MTIME | CAT_CTIME;
- if (kms > loi->loi_kms) {
- attr->cat_kms = kms;
- valid |= CAT_KMS;
- }
- if (kms > loi->loi_lvb.lvb_size) {
- attr->cat_size = kms;
- valid |= CAT_SIZE;
- }
- cl_object_attr_update(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
-}
-
-static int osc_io_commit_async(const struct lu_env *env,
- const struct cl_io_slice *ios,
- struct cl_page_list *qin, int from, int to,
- cl_commit_cbt cb)
-{
- struct cl_io *io = ios->cis_io;
- struct osc_io *oio = cl2osc_io(env, ios);
- struct osc_object *osc = cl2osc(ios->cis_obj);
- struct cl_page *page;
- struct cl_page *last_page;
- struct osc_page *opg;
- int result = 0;
-
- LASSERT(qin->pl_nr > 0);
-
- /* Handle partial page cases */
- last_page = cl_page_list_last(qin);
- if (oio->oi_lockless) {
- page = cl_page_list_first(qin);
- if (page == last_page) {
- cl_page_clip(env, page, from, to);
- } else {
- if (from != 0)
- cl_page_clip(env, page, from, PAGE_SIZE);
- if (to != PAGE_SIZE)
- cl_page_clip(env, last_page, 0, to);
- }
- }
-
- while (qin->pl_nr > 0) {
- struct osc_async_page *oap;
-
- page = cl_page_list_first(qin);
- opg = osc_cl_page_osc(page, osc);
- oap = &opg->ops_oap;
-
- if (!list_empty(&oap->oap_rpc_item)) {
- CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
- oap, opg);
- result = -EBUSY;
- break;
- }
-
- /* The page may be already in dirty cache. */
- if (list_empty(&oap->oap_pending_item)) {
- result = osc_page_cache_add(env, &opg->ops_cl, io);
- if (result != 0)
- break;
- }
-
- osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
- page == last_page ? to : PAGE_SIZE);
-
- cl_page_list_del(env, qin, page);
-
- (*cb)(env, io, page);
- /* Can't access page any more. Page can be in transfer and
- * complete at any time.
- */
- }
-
- /* for sync write, kernel will wait for this page to be flushed before
- * osc_io_end() is called, so release it earlier.
- * for mkwrite(), it's known there is no further pages.
- */
- if (cl_io_is_sync_write(io) && oio->oi_active) {
- osc_extent_release(env, oio->oi_active);
- oio->oi_active = NULL;
- }
-
- CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
- return result;
-}
-
-static int osc_io_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct osc_object *osc = cl2osc(ios->cis_obj);
- struct obd_import *imp = osc_cli(osc)->cl_import;
- int rc = -EIO;
-
- spin_lock(&imp->imp_lock);
- if (likely(!imp->imp_invalid)) {
- struct osc_io *oio = osc_env_io(env);
-
- atomic_inc(&osc->oo_nr_ios);
- oio->oi_is_active = 1;
- rc = 0;
- }
- spin_unlock(&imp->imp_lock);
-
- return rc;
-}
-
-static int osc_io_write_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct osc_io *oio = osc_env_io(env);
- struct osc_object *osc = cl2osc(ios->cis_obj);
- unsigned long npages;
-
- if (cl_io_is_append(io))
- return osc_io_iter_init(env, ios);
-
- npages = io->u.ci_rw.crw_count >> PAGE_SHIFT;
- if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
- ++npages;
-
- oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages);
-
- return osc_io_iter_init(env, ios);
-}
-
-static void osc_io_iter_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct osc_io *oio = osc_env_io(env);
-
- if (oio->oi_is_active) {
- struct osc_object *osc = cl2osc(ios->cis_obj);
-
- oio->oi_is_active = 0;
- LASSERT(atomic_read(&osc->oo_nr_ios) > 0);
- if (atomic_dec_and_test(&osc->oo_nr_ios))
- wake_up_all(&osc->oo_io_waitq);
- }
-}
-
-static void osc_io_write_iter_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct osc_io *oio = osc_env_io(env);
- struct osc_object *osc = cl2osc(ios->cis_obj);
-
- if (oio->oi_lru_reserved > 0) {
- osc_lru_unreserve(osc_cli(osc), oio->oi_lru_reserved);
- oio->oi_lru_reserved = 0;
- }
- oio->oi_write_osclock = NULL;
-
- osc_io_iter_fini(env, ios);
-}
-
-static int osc_io_fault_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io;
- struct cl_fault_io *fio;
-
- io = ios->cis_io;
- fio = &io->u.ci_fault;
- CDEBUG(D_INFO, "%lu %d %zu\n",
- fio->ft_index, fio->ft_writable, fio->ft_nob);
- /*
- * If mapping is writeable, adjust kms to cover this page,
- * but do not extend kms beyond actual file size.
- * See bug 10919.
- */
- if (fio->ft_writable)
- osc_page_touch_at(env, ios->cis_obj,
- fio->ft_index, fio->ft_nob);
- return 0;
-}
-
-static int osc_async_upcall(void *a, int rc)
-{
- struct osc_async_cbargs *args = a;
-
- args->opc_rc = rc;
- complete(&args->opc_sync);
- return 0;
-}
-
-/**
- * Checks that there are no pages being written in the extent being truncated.
- */
-static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops, void *cbdata)
-{
- struct cl_page *page = ops->ops_cl.cpl_page;
- struct osc_async_page *oap;
- __u64 start = *(__u64 *)cbdata;
-
- oap = &ops->ops_oap;
- if (oap->oap_cmd & OBD_BRW_WRITE &&
- !list_empty(&oap->oap_pending_item))
- CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
- start, current->comm);
-
- if (PageLocked(page->cp_vmpage))
- CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
- ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
-
- return CLP_GANG_OKAY;
-}
-
-static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
- struct osc_io *oio, __u64 size)
-{
- struct cl_object *clob;
- int partial;
- pgoff_t start;
-
- clob = oio->oi_cl.cis_obj;
- start = cl_index(clob, size);
- partial = cl_offset(clob, start) < size;
-
- /*
- * Complain if there are pages in the truncated region.
- */
- osc_page_gang_lookup(env, io, cl2osc(clob),
- start + partial, CL_PAGE_EOF,
- trunc_check_cb, (void *)&size);
-}
-
-static int osc_io_setattr_start(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_io *io = slice->cis_io;
- struct osc_io *oio = cl2osc_io(env, slice);
- struct cl_object *obj = slice->cis_obj;
- struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- struct obdo *oa = &oio->oi_oa;
- struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
- __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
- unsigned int ia_valid = io->u.ci_setattr.sa_valid;
- int result = 0;
-
- /* truncate cache dirty pages first */
- if (cl_io_is_trunc(io))
- result = osc_cache_truncate_start(env, cl2osc(obj), size,
- &oio->oi_trunc);
-
- if (result == 0 && oio->oi_lockless == 0) {
- cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- if (result == 0) {
- struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
- unsigned int cl_valid = 0;
-
- if (ia_valid & ATTR_SIZE) {
- attr->cat_size = size;
- attr->cat_kms = size;
- cl_valid = CAT_SIZE | CAT_KMS;
- }
- if (ia_valid & ATTR_MTIME_SET) {
- attr->cat_mtime = lvb->lvb_mtime;
- cl_valid |= CAT_MTIME;
- }
- if (ia_valid & ATTR_ATIME_SET) {
- attr->cat_atime = lvb->lvb_atime;
- cl_valid |= CAT_ATIME;
- }
- if (ia_valid & ATTR_CTIME_SET) {
- attr->cat_ctime = lvb->lvb_ctime;
- cl_valid |= CAT_CTIME;
- }
- result = cl_object_attr_update(env, obj, attr,
- cl_valid);
- }
- cl_object_attr_unlock(obj);
- }
- memset(oa, 0, sizeof(*oa));
- if (result == 0) {
- oa->o_oi = loi->loi_oi;
- obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
- oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
- oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP;
- if (ia_valid & ATTR_CTIME) {
- oa->o_valid |= OBD_MD_FLCTIME;
- oa->o_ctime = attr->cat_ctime;
- }
- if (ia_valid & ATTR_ATIME) {
- oa->o_valid |= OBD_MD_FLATIME;
- oa->o_atime = attr->cat_atime;
- }
- if (ia_valid & ATTR_MTIME) {
- oa->o_valid |= OBD_MD_FLMTIME;
- oa->o_mtime = attr->cat_mtime;
- }
- if (ia_valid & ATTR_SIZE) {
- oa->o_size = size;
- oa->o_blocks = OBD_OBJECT_EOF;
- oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
-
- if (oio->oi_lockless) {
- oa->o_flags = OBD_FL_SRVLOCK;
- oa->o_valid |= OBD_MD_FLFLAGS;
- }
- } else {
- LASSERT(oio->oi_lockless == 0);
- }
- if (ia_valid & ATTR_ATTR_FLAG) {
- oa->o_flags = io->u.ci_setattr.sa_attr_flags;
- oa->o_valid |= OBD_MD_FLFLAGS;
- }
-
- init_completion(&cbargs->opc_sync);
-
- if (ia_valid & ATTR_SIZE)
- result = osc_punch_base(osc_export(cl2osc(obj)),
- oa, osc_async_upcall,
- cbargs, PTLRPCD_SET);
- else
- result = osc_setattr_async(osc_export(cl2osc(obj)),
- oa, osc_async_upcall,
- cbargs, PTLRPCD_SET);
- cbargs->opc_rpc_sent = result == 0;
- }
- return result;
-}
-
-static void osc_io_setattr_end(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_io *io = slice->cis_io;
- struct osc_io *oio = cl2osc_io(env, slice);
- struct cl_object *obj = slice->cis_obj;
- struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
- int result = 0;
-
- if (cbargs->opc_rpc_sent) {
- wait_for_completion(&cbargs->opc_sync);
- result = cbargs->opc_rc;
- io->ci_result = cbargs->opc_rc;
- }
- if (result == 0) {
- if (oio->oi_lockless) {
- /* lockless truncate */
- struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
-
- LASSERT(cl_io_is_trunc(io));
- /* XXX: Need a lock. */
- osd->od_stats.os_lockless_truncates++;
- }
- }
-
- if (cl_io_is_trunc(io)) {
- __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
-
- osc_trunc_check(env, io, oio, size);
- osc_cache_truncate_end(env, oio->oi_trunc);
- oio->oi_trunc = NULL;
- }
-}
-
-struct osc_data_version_args {
- struct osc_io *dva_oio;
-};
-
-static int
-osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
- void *arg, int rc)
-{
- struct osc_data_version_args *dva = arg;
- struct osc_io *oio = dva->dva_oio;
- const struct ost_body *body;
-
- if (rc < 0)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (!body) {
- rc = -EPROTO;
- goto out;
- }
-
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
- &body->oa);
-out:
- oio->oi_cbarg.opc_rc = rc;
- complete(&oio->oi_cbarg.opc_sync);
-
- return 0;
-}
-
-static int osc_io_data_version_start(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
- struct osc_io *oio = cl2osc_io(env, slice);
- struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
- struct osc_object *obj = cl2osc(slice->cis_obj);
- struct obd_export *exp = osc_export(obj);
- struct lov_oinfo *loi = obj->oo_oinfo;
- struct osc_data_version_args *dva;
- struct obdo *oa = &oio->oi_oa;
- struct ptlrpc_request *req;
- struct ost_body *body;
- int rc;
-
- memset(oa, 0, sizeof(*oa));
- oa->o_oi = loi->loi_oi;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
-
- if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
- oa->o_valid |= OBD_MD_FLFLAGS;
- oa->o_flags |= OBD_FL_SRVLOCK;
- if (dv->dv_flags & LL_DV_WR_FLUSH)
- oa->o_flags |= OBD_FL_FLUSH;
- }
-
- init_completion(&cbargs->opc_sync);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
- if (rc < 0) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
-
- ptlrpc_request_set_replen(req);
- req->rq_interpret_reply = osc_data_version_interpret;
- BUILD_BUG_ON(sizeof(*dva) > sizeof(req->rq_async_args));
- dva = ptlrpc_req_async_args(req);
- dva->dva_oio = oio;
-
- ptlrpcd_add_req(req);
-
- return 0;
-}
-
-static void osc_io_data_version_end(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
- struct osc_io *oio = cl2osc_io(env, slice);
- struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
-
- wait_for_completion(&cbargs->opc_sync);
-
- if (cbargs->opc_rc) {
- slice->cis_io->ci_result = cbargs->opc_rc;
- } else if (!(oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)) {
- slice->cis_io->ci_result = -EOPNOTSUPP;
- } else {
- dv->dv_data_version = oio->oi_oa.o_data_version;
- slice->cis_io->ci_result = 0;
- }
-}
-
-static int osc_io_read_start(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_object *obj = slice->cis_obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- int rc = 0;
-
- if (!slice->cis_io->ci_noatime) {
- cl_object_attr_lock(obj);
- attr->cat_atime = ktime_get_real_seconds();
- rc = cl_object_attr_update(env, obj, attr, CAT_ATIME);
- cl_object_attr_unlock(obj);
- }
- return rc;
-}
-
-static int osc_io_write_start(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_object *obj = slice->cis_obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- int rc = 0;
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
- cl_object_attr_lock(obj);
- attr->cat_ctime = ktime_get_real_seconds();
- attr->cat_mtime = attr->cat_ctime;
- rc = cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
- cl_object_attr_unlock(obj);
-
- return rc;
-}
-
-static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
- struct cl_fsync_io *fio)
-{
- struct osc_io *oio = osc_env_io(env);
- struct obdo *oa = &oio->oi_oa;
- struct lov_oinfo *loi = obj->oo_oinfo;
- struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
- int rc = 0;
-
- memset(oa, 0, sizeof(*oa));
- oa->o_oi = loi->loi_oi;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
-
- /* reload size abd blocks for start and end of sync range */
- oa->o_size = fio->fi_start;
- oa->o_blocks = fio->fi_end;
- oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
-
- obdo_set_parent_fid(oa, fio->fi_fid);
-
- init_completion(&cbargs->opc_sync);
-
- rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET);
- return rc;
-}
-
-static int osc_io_fsync_start(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_io *io = slice->cis_io;
- struct cl_fsync_io *fio = &io->u.ci_fsync;
- struct cl_object *obj = slice->cis_obj;
- struct osc_object *osc = cl2osc(obj);
- pgoff_t start = cl_index(obj, fio->fi_start);
- pgoff_t end = cl_index(obj, fio->fi_end);
- int result = 0;
-
- if (fio->fi_end == OBD_OBJECT_EOF)
- end = CL_PAGE_EOF;
-
- result = osc_cache_writeback_range(env, osc, start, end, 0,
- fio->fi_mode == CL_FSYNC_DISCARD);
- if (result > 0) {
- fio->fi_nr_written += result;
- result = 0;
- }
- if (fio->fi_mode == CL_FSYNC_ALL) {
- int rc;
-
- /* we have to wait for writeback to finish before we can
- * send OST_SYNC RPC. This is bad because it causes extents
- * to be written osc by osc. However, we usually start
- * writeback before CL_FSYNC_ALL so this won't have any real
- * problem.
- */
- rc = osc_cache_wait_range(env, osc, start, end);
- if (result == 0)
- result = rc;
- rc = osc_fsync_ost(env, osc, fio);
- if (result == 0)
- result = rc;
- }
-
- return result;
-}
-
-static void osc_io_fsync_end(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
- struct cl_object *obj = slice->cis_obj;
- pgoff_t start = cl_index(obj, fio->fi_start);
- pgoff_t end = cl_index(obj, fio->fi_end);
- int result = 0;
-
- if (fio->fi_mode == CL_FSYNC_LOCAL) {
- result = osc_cache_wait_range(env, cl2osc(obj), start, end);
- } else if (fio->fi_mode == CL_FSYNC_ALL) {
- struct osc_io *oio = cl2osc_io(env, slice);
- struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
-
- wait_for_completion(&cbargs->opc_sync);
- if (result == 0)
- result = cbargs->opc_rc;
- }
- slice->cis_io->ci_result = result;
-}
-
-static void osc_io_end(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct osc_io *oio = cl2osc_io(env, slice);
-
- if (oio->oi_active) {
- osc_extent_release(env, oio->oi_active);
- oio->oi_active = NULL;
- }
-}
-
-static const struct cl_io_operations osc_io_ops = {
- .op = {
- [CIT_READ] = {
- .cio_iter_init = osc_io_iter_init,
- .cio_iter_fini = osc_io_iter_fini,
- .cio_start = osc_io_read_start,
- .cio_fini = osc_io_fini
- },
- [CIT_WRITE] = {
- .cio_iter_init = osc_io_write_iter_init,
- .cio_iter_fini = osc_io_write_iter_fini,
- .cio_start = osc_io_write_start,
- .cio_end = osc_io_end,
- .cio_fini = osc_io_fini
- },
- [CIT_SETATTR] = {
- .cio_iter_init = osc_io_iter_init,
- .cio_iter_fini = osc_io_iter_fini,
- .cio_start = osc_io_setattr_start,
- .cio_end = osc_io_setattr_end
- },
- [CIT_DATA_VERSION] = {
- .cio_start = osc_io_data_version_start,
- .cio_end = osc_io_data_version_end,
- },
- [CIT_FAULT] = {
- .cio_iter_init = osc_io_iter_init,
- .cio_iter_fini = osc_io_iter_fini,
- .cio_start = osc_io_fault_start,
- .cio_end = osc_io_end,
- .cio_fini = osc_io_fini
- },
- [CIT_FSYNC] = {
- .cio_start = osc_io_fsync_start,
- .cio_end = osc_io_fsync_end,
- .cio_fini = osc_io_fini
- },
- [CIT_MISC] = {
- .cio_fini = osc_io_fini
- }
- },
- .cio_read_ahead = osc_io_read_ahead,
- .cio_submit = osc_io_submit,
- .cio_commit_async = osc_io_commit_async
-};
-
-/*****************************************************************************
- *
- * Transfer operations.
- *
- */
-
-int osc_io_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io)
-{
- struct osc_io *oio = osc_env_io(env);
-
- CL_IO_SLICE_CLEAN(oio, oi_cl);
- cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);
- return 0;
-}
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
deleted file mode 100644
index fe8ed0d0497a..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ /dev/null
@@ -1,1231 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_lock for OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@intel.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include <linux/libcfs/libcfs.h>
-/* fid_build_reg_res_name() */
-#include <lustre_fid.h>
-
-#include "osc_cl_internal.h"
-
-/** \addtogroup osc
- * @{
- */
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-static const struct cl_lock_operations osc_lock_ops;
-static const struct cl_lock_operations osc_lock_lockless_ops;
-static void osc_lock_to_lockless(const struct lu_env *env,
- struct osc_lock *ols, int force);
-
-int osc_lock_is_lockless(const struct osc_lock *olck)
-{
- return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
-}
-
-/**
- * Returns a weak pointer to the ldlm lock identified by a handle. Returned
- * pointer cannot be dereferenced, as lock is not protected from concurrent
- * reclaim. This function is a helper for osc_lock_invariant().
- */
-static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
-{
- struct ldlm_lock *lock;
-
- lock = ldlm_handle2lock(handle);
- if (lock)
- LDLM_LOCK_PUT(lock);
- return lock;
-}
-
-/**
- * Invariant that has to be true all of the time.
- */
-static int osc_lock_invariant(struct osc_lock *ols)
-{
- struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
- struct ldlm_lock *olock = ols->ols_dlmlock;
- int handle_used = lustre_handle_is_used(&ols->ols_handle);
-
- if (ergo(osc_lock_is_lockless(ols),
- ols->ols_locklessable && !ols->ols_dlmlock))
- return 1;
-
- /*
- * If all the following "ergo"s are true, return 1, otherwise 0
- */
- if (!ergo(olock, handle_used))
- return 0;
-
- if (!ergo(olock, olock->l_handle.h_cookie == ols->ols_handle.cookie))
- return 0;
-
- if (!ergo(handle_used,
- ergo(lock && olock, lock == olock) &&
- ergo(!lock, !olock)))
- return 0;
- /*
- * Check that ->ols_handle and ->ols_dlmlock are consistent, but
- * take into account that they are set at the different time.
- */
- if (!ergo(ols->ols_state == OLS_CANCELLED,
- !olock && !handle_used))
- return 0;
- /*
- * DLM lock is destroyed only after we have seen cancellation
- * ast.
- */
- if (!ergo(olock && ols->ols_state < OLS_CANCELLED,
- !ldlm_is_destroyed(olock)))
- return 0;
-
- if (!ergo(ols->ols_state == OLS_GRANTED,
- olock && olock->l_req_mode == olock->l_granted_mode &&
- ols->ols_hold))
- return 0;
- return 1;
-}
-
-/*****************************************************************************
- *
- * Lock operations.
- *
- */
-
-static void osc_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
-
- LINVRNT(osc_lock_invariant(ols));
- LASSERT(!ols->ols_dlmlock);
-
- kmem_cache_free(osc_lock_kmem, ols);
-}
-
-static void osc_lock_build_policy(const struct lu_env *env,
- const struct cl_lock *lock,
- union ldlm_policy_data *policy)
-{
- const struct cl_lock_descr *d = &lock->cll_descr;
-
- osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
- policy->l_extent.gid = d->cld_gid;
-}
-
-static __u64 osc_enq2ldlm_flags(__u32 enqflags)
-{
- __u64 result = 0;
-
- LASSERT((enqflags & ~CEF_MASK) == 0);
-
- if (enqflags & CEF_NONBLOCK)
- result |= LDLM_FL_BLOCK_NOWAIT;
- if (enqflags & CEF_ASYNC)
- result |= LDLM_FL_HAS_INTENT;
- if (enqflags & CEF_DISCARD_DATA)
- result |= LDLM_FL_AST_DISCARD_DATA;
- if (enqflags & CEF_PEEK)
- result |= LDLM_FL_TEST_LOCK;
- if (enqflags & CEF_LOCK_MATCH)
- result |= LDLM_FL_MATCH_LOCK;
- return result;
-}
-
-/**
- * Updates object attributes from a lock value block (lvb) received together
- * with the DLM lock reply from the server. Copy of osc_update_enqueue()
- * logic.
- *
- * This can be optimized to not update attributes when lock is a result of a
- * local match.
- *
- * Called under lock and resource spin-locks.
- */
-static void osc_lock_lvb_update(const struct lu_env *env,
- struct osc_object *osc,
- struct ldlm_lock *dlmlock,
- struct ost_lvb *lvb)
-{
- struct cl_object *obj = osc2cl(osc);
- struct lov_oinfo *oinfo = osc->oo_oinfo;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- unsigned int valid;
-
- valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
- if (!lvb)
- lvb = dlmlock->l_lvb_data;
-
- cl_lvb2attr(attr, lvb);
-
- cl_object_attr_lock(obj);
- if (dlmlock) {
- __u64 size;
-
- check_res_locked(dlmlock->l_resource);
- LASSERT(lvb == dlmlock->l_lvb_data);
- size = lvb->lvb_size;
-
- /* Extend KMS up to the end of this lock and no further
- * A lock on [x,y] means a KMS of up to y + 1 bytes!
- */
- if (size > dlmlock->l_policy_data.l_extent.end)
- size = dlmlock->l_policy_data.l_extent.end + 1;
- if (size >= oinfo->loi_kms) {
- LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu, kms=%llu",
- lvb->lvb_size, size);
- valid |= CAT_KMS;
- attr->cat_kms = size;
- } else {
- LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu; leaving kms=%llu, end=%llu",
- lvb->lvb_size, oinfo->loi_kms,
- dlmlock->l_policy_data.l_extent.end);
- }
- ldlm_lock_allow_match_locked(dlmlock);
- }
-
- cl_object_attr_update(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
-}
-
-static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
- struct lustre_handle *lockh, bool lvb_update)
-{
- struct ldlm_lock *dlmlock;
-
- dlmlock = ldlm_handle2lock_long(lockh, 0);
- LASSERT(dlmlock);
-
- /* lock reference taken by ldlm_handle2lock_long() is
- * owned by osc_lock and released in osc_lock_detach()
- */
- lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
- oscl->ols_has_ref = 1;
-
- LASSERT(!oscl->ols_dlmlock);
- oscl->ols_dlmlock = dlmlock;
-
- /* This may be a matched lock for glimpse request, do not hold
- * lock reference in that case.
- */
- if (!oscl->ols_glimpse) {
- /* hold a refc for non glimpse lock which will
- * be released in osc_lock_cancel()
- */
- lustre_handle_copy(&oscl->ols_handle, lockh);
- ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
- oscl->ols_hold = 1;
- }
-
- /* Lock must have been granted. */
- lock_res_and_lock(dlmlock);
- if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
- struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
- struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
-
- /* extend the lock extent, otherwise it will have problem when
- * we decide whether to grant a lockless lock.
- */
- descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
- descr->cld_start = cl_index(descr->cld_obj, ext->start);
- descr->cld_end = cl_index(descr->cld_obj, ext->end);
- descr->cld_gid = ext->gid;
-
- /* no lvb update for matched lock */
- if (lvb_update) {
- LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
- osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
- dlmlock, NULL);
- }
- LINVRNT(osc_lock_invariant(oscl));
- }
- unlock_res_and_lock(dlmlock);
-
- LASSERT(oscl->ols_state != OLS_GRANTED);
- oscl->ols_state = OLS_GRANTED;
-}
-
-/**
- * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
- * received from a server, or after osc_enqueue_base() matched a local DLM
- * lock.
- */
-static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
- int errcode)
-{
- struct osc_lock *oscl = cookie;
- struct cl_lock_slice *slice = &oscl->ols_cl;
- struct lu_env *env;
- int rc;
- u16 refcheck;
-
- env = cl_env_get(&refcheck);
- /* should never happen, similar to osc_ldlm_blocking_ast(). */
- LASSERT(!IS_ERR(env));
-
- rc = ldlm_error2errno(errcode);
- if (oscl->ols_state == OLS_ENQUEUED) {
- oscl->ols_state = OLS_UPCALL_RECEIVED;
- } else if (oscl->ols_state == OLS_CANCELLED) {
- rc = -EIO;
- } else {
- CERROR("Impossible state: %d\n", oscl->ols_state);
- LBUG();
- }
-
- if (rc == 0)
- osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);
-
- /* Error handling, some errors are tolerable. */
- if (oscl->ols_locklessable && rc == -EUSERS) {
- /* This is a tolerable error, turn this lock into
- * lockless lock.
- */
- osc_object_set_contended(cl2osc(slice->cls_obj));
- LASSERT(slice->cls_ops == &osc_lock_ops);
-
- /* Change this lock to ldlmlock-less lock. */
- osc_lock_to_lockless(env, oscl, 1);
- oscl->ols_state = OLS_GRANTED;
- rc = 0;
- } else if (oscl->ols_glimpse && rc == -ENAVAIL) {
- LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
- osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
- NULL, &oscl->ols_lvb);
- /* Hide the error. */
- rc = 0;
- }
-
- if (oscl->ols_owner)
- cl_sync_io_note(env, oscl->ols_owner, rc);
- cl_env_put(env, &refcheck);
-
- return rc;
-}
-
-static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
- int errcode)
-{
- struct osc_object *osc = cookie;
- struct ldlm_lock *dlmlock;
- struct lu_env *env;
- u16 refcheck;
-
- env = cl_env_get(&refcheck);
- LASSERT(!IS_ERR(env));
-
- if (errcode == ELDLM_LOCK_MATCHED) {
- errcode = ELDLM_OK;
- goto out;
- }
-
- if (errcode != ELDLM_OK)
- goto out;
-
- dlmlock = ldlm_handle2lock(lockh);
- LASSERT(dlmlock);
-
- lock_res_and_lock(dlmlock);
- LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
-
- /* there is no osc_lock associated with AGL lock */
- osc_lock_lvb_update(env, osc, dlmlock, NULL);
-
- unlock_res_and_lock(dlmlock);
- LDLM_LOCK_PUT(dlmlock);
-
-out:
- cl_object_put(env, osc2cl(osc));
- cl_env_put(env, &refcheck);
- return ldlm_error2errno(errcode);
-}
-
-static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
- enum cl_lock_mode mode, int discard)
-{
- struct lu_env *env;
- u16 refcheck;
- int rc = 0;
- int rc2 = 0;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- if (mode == CLM_WRITE) {
- rc = osc_cache_writeback_range(env, obj, start, end, 1,
- discard);
- CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
- obj, start, end, rc,
- discard ? "discarded" : "written back");
- if (rc > 0)
- rc = 0;
- }
-
- rc2 = osc_lock_discard_pages(env, obj, start, end, mode);
- if (rc == 0 && rc2 < 0)
- rc = rc2;
-
- cl_env_put(env, &refcheck);
- return rc;
-}
-
-/**
- * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
- * and ldlm_lock caches.
- */
-static int osc_dlm_blocking_ast0(const struct lu_env *env,
- struct ldlm_lock *dlmlock,
- void *data, int flag)
-{
- struct cl_object *obj = NULL;
- int result = 0;
- int discard;
- enum cl_lock_mode mode = CLM_READ;
-
- LASSERT(flag == LDLM_CB_CANCELING);
-
- lock_res_and_lock(dlmlock);
- if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
- dlmlock->l_ast_data = NULL;
- unlock_res_and_lock(dlmlock);
- return 0;
- }
-
- discard = ldlm_is_discard_data(dlmlock);
- if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
- mode = CLM_WRITE;
-
- if (dlmlock->l_ast_data) {
- obj = osc2cl(dlmlock->l_ast_data);
- dlmlock->l_ast_data = NULL;
-
- cl_object_get(obj);
- }
-
- unlock_res_and_lock(dlmlock);
-
- /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
- * the object has been destroyed.
- */
- if (obj) {
- struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- __u64 old_kms;
-
- /* Destroy pages covered by the extent of the DLM lock */
- result = osc_lock_flush(cl2osc(obj),
- cl_index(obj, extent->start),
- cl_index(obj, extent->end),
- mode, discard);
-
- /* losing a lock, update kms */
- lock_res_and_lock(dlmlock);
- cl_object_attr_lock(obj);
- /* Must get the value under the lock to avoid race. */
- old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
- /* Update the kms. Need to loop all granted locks.
- * Not a problem for the client
- */
- attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
-
- cl_object_attr_update(env, obj, attr, CAT_KMS);
- cl_object_attr_unlock(obj);
- unlock_res_and_lock(dlmlock);
-
- cl_object_put(env, obj);
- }
- return result;
-}
-
-/**
- * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
- * some other lock, or is canceled. This function is installed as a
- * ldlm_lock::l_blocking_ast() for client extent locks.
- *
- * Control flow is tricky, because ldlm uses the same call-back
- * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
- *
- * \param dlmlock lock for which ast occurred.
- *
- * \param new description of a conflicting lock in case of blocking ast.
- *
- * \param data value of dlmlock->l_ast_data
- *
- * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
- * cancellation and blocking ast's.
- *
- * Possible use cases:
- *
- * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
- * lock due to lock lru pressure, or explicit user request to purge
- * locks.
- *
- * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
- * us that dlmlock conflicts with another lock that some client is
- * enqueing. Lock is canceled.
- *
- * - cl_lock_cancel() is called. osc_lock_cancel() calls
- * ldlm_cli_cancel() that calls
- *
- * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
- *
- * recursively entering osc_ldlm_blocking_ast().
- *
- * - client cancels lock voluntary (e.g., as a part of early cancellation):
- *
- * cl_lock_cancel()->
- * osc_lock_cancel()->
- * ldlm_cli_cancel()->
- * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
- *
- */
-static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
- struct ldlm_lock_desc *new, void *data,
- int flag)
-{
- int result = 0;
-
- switch (flag) {
- case LDLM_CB_BLOCKING: {
- struct lustre_handle lockh;
-
- ldlm_lock2handle(dlmlock, &lockh);
- result = ldlm_cli_cancel(&lockh, LCF_ASYNC);
- if (result == -ENODATA)
- result = 0;
- break;
- }
- case LDLM_CB_CANCELING: {
- struct lu_env *env;
- u16 refcheck;
-
- /*
- * This can be called in the context of outer IO, e.g.,
- *
- * osc_enqueue_base()->...
- * ->ldlm_prep_elc_req()->...
- * ->ldlm_cancel_callback()->...
- * ->osc_ldlm_blocking_ast()
- *
- * new environment has to be created to not corrupt outer
- * context.
- */
- env = cl_env_get(&refcheck);
- if (IS_ERR(env)) {
- result = PTR_ERR(env);
- break;
- }
-
- result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
- cl_env_put(env, &refcheck);
- break;
- }
- default:
- LBUG();
- }
- return result;
-}
-
-static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
-{
- struct ptlrpc_request *req = data;
- struct lu_env *env;
- struct ost_lvb *lvb;
- struct req_capsule *cap;
- struct cl_object *obj = NULL;
- int result;
- u16 refcheck;
-
- LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env)) {
- result = PTR_ERR(env);
- goto out;
- }
-
- lock_res_and_lock(dlmlock);
- if (dlmlock->l_ast_data) {
- obj = osc2cl(dlmlock->l_ast_data);
- cl_object_get(obj);
- }
- unlock_res_and_lock(dlmlock);
-
- if (obj) {
- /* Do not grab the mutex of cl_lock for glimpse.
- * See LU-1274 for details.
- * BTW, it's okay for cl_lock to be cancelled during
- * this period because server can handle this race.
- * See ldlm_server_glimpse_ast() for details.
- * cl_lock_mutex_get(env, lock);
- */
- cap = &req->rq_pill;
- req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
- req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
- sizeof(*lvb));
- result = req_capsule_server_pack(cap);
- if (result == 0) {
- lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
- result = cl_object_glimpse(env, obj, lvb);
- }
- if (!exp_connect_lvb_type(req->rq_export)) {
- req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB,
- sizeof(struct ost_lvb_v1),
- RCL_SERVER);
- }
- cl_object_put(env, obj);
- } else {
- /*
- * These errors are normal races, so we don't want to
- * fill the console with messages by calling
- * ptlrpc_error()
- */
- lustre_pack_reply(req, 1, NULL, NULL);
- result = -ELDLM_NO_LOCK_DATA;
- }
- cl_env_put(env, &refcheck);
-
-out:
- req->rq_status = result;
- return result;
-}
-
-static int weigh_cb(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops, void *cbdata)
-{
- struct cl_page *page = ops->ops_cl.cpl_page;
-
- if (cl_page_is_vmlocked(env, page) ||
- PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
- )
- return CLP_GANG_ABORT;
-
- *(pgoff_t *)cbdata = osc_index(ops) + 1;
- return CLP_GANG_OKAY;
-}
-
-static unsigned long osc_lock_weight(const struct lu_env *env,
- struct osc_object *oscobj,
- struct ldlm_extent *extent)
-{
- struct cl_io *io = &osc_env_info(env)->oti_io;
- struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
- pgoff_t page_index;
- int result;
-
- io->ci_obj = obj;
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (result != 0)
- return result;
-
- page_index = cl_index(obj, extent->start);
- do {
- result = osc_page_gang_lookup(env, io, oscobj,
- page_index,
- cl_index(obj, extent->end),
- weigh_cb, (void *)&page_index);
- if (result == CLP_GANG_ABORT)
- break;
- if (result == CLP_GANG_RESCHED)
- cond_resched();
- } while (result != CLP_GANG_OKAY);
- cl_io_fini(env, io);
-
- return result == CLP_GANG_ABORT ? 1 : 0;
-}
-
-/**
- * Get the weight of dlm lock for early cancellation.
- */
-unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
-{
- struct lu_env *env;
- struct osc_object *obj;
- struct osc_lock *oscl;
- unsigned long weight;
- bool found = false;
- u16 refcheck;
-
- might_sleep();
- /*
- * osc_ldlm_weigh_ast has a complex context since it might be called
- * because of lock canceling, or from user's input. We have to make
- * a new environment for it. Probably it is implementation safe to use
- * the upper context because cl_lock_put don't modify environment
- * variables. But just in case ..
- */
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- /* Mostly because lack of memory, do not eliminate this lock */
- return 1;
-
- LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
- obj = dlmlock->l_ast_data;
- if (!obj) {
- weight = 1;
- goto out;
- }
-
- spin_lock(&obj->oo_ol_spin);
- list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) {
- if (oscl->ols_dlmlock && oscl->ols_dlmlock != dlmlock)
- continue;
- found = true;
- }
- spin_unlock(&obj->oo_ol_spin);
- if (found) {
- /*
- * If the lock is being used by an IO, definitely not cancel it.
- */
- weight = 1;
- goto out;
- }
-
- weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
-
-out:
- cl_env_put(env, &refcheck);
- return weight;
-}
-
-static void osc_lock_build_einfo(const struct lu_env *env,
- const struct cl_lock *lock,
- struct osc_object *osc,
- struct ldlm_enqueue_info *einfo)
-{
- einfo->ei_type = LDLM_EXTENT;
- einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode);
- einfo->ei_cb_bl = osc_ldlm_blocking_ast;
- einfo->ei_cb_cp = ldlm_completion_ast;
- einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
- einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
-}
-
-/**
- * Determine if the lock should be converted into a lockless lock.
- *
- * Steps to check:
- * - if the lock has an explicit requirement for a non-lockless lock;
- * - if the io lock request type ci_lockreq;
- * - send the enqueue rpc to ost to make the further decision;
- * - special treat to truncate lockless lock
- *
- * Additional policy can be implemented here, e.g., never do lockless-io
- * for large extents.
- */
-static void osc_lock_to_lockless(const struct lu_env *env,
- struct osc_lock *ols, int force)
-{
- struct cl_lock_slice *slice = &ols->ols_cl;
-
- LASSERT(ols->ols_state == OLS_NEW ||
- ols->ols_state == OLS_UPCALL_RECEIVED);
-
- if (force) {
- ols->ols_locklessable = 1;
- slice->cls_ops = &osc_lock_lockless_ops;
- } else {
- struct osc_io *oio = osc_env_io(env);
- struct cl_io *io = oio->oi_cl.cis_io;
- struct cl_object *obj = slice->cls_obj;
- struct osc_object *oob = cl2osc(obj);
- const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
- struct obd_connect_data *ocd;
-
- LASSERT(io->ci_lockreq == CILR_MANDATORY ||
- io->ci_lockreq == CILR_MAYBE ||
- io->ci_lockreq == CILR_NEVER);
-
- ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
- ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
- (io->ci_lockreq == CILR_MAYBE) &&
- (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
- if (io->ci_lockreq == CILR_NEVER ||
- /* lockless IO */
- (ols->ols_locklessable && osc_object_is_contended(oob)) ||
- /* lockless truncate */
- (cl_io_is_trunc(io) &&
- (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
- osd->od_lockless_truncate)) {
- ols->ols_locklessable = 1;
- slice->cls_ops = &osc_lock_lockless_ops;
- }
- }
- LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
-}
-
-static bool osc_lock_compatible(const struct osc_lock *qing,
- const struct osc_lock *qed)
-{
- struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr;
- struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr;
-
- if (qed->ols_glimpse)
- return true;
-
- if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ)
- return true;
-
- if (qed->ols_state < OLS_GRANTED)
- return true;
-
- if (qed_descr->cld_mode >= qing_descr->cld_mode &&
- qed_descr->cld_start <= qing_descr->cld_start &&
- qed_descr->cld_end >= qing_descr->cld_end)
- return true;
-
- return false;
-}
-
-static void osc_lock_wake_waiters(const struct lu_env *env,
- struct osc_object *osc,
- struct osc_lock *oscl)
-{
- spin_lock(&osc->oo_ol_spin);
- list_del_init(&oscl->ols_nextlock_oscobj);
- spin_unlock(&osc->oo_ol_spin);
-
- spin_lock(&oscl->ols_lock);
- while (!list_empty(&oscl->ols_waiting_list)) {
- struct osc_lock *scan;
-
- scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock,
- ols_wait_entry);
- list_del_init(&scan->ols_wait_entry);
-
- cl_sync_io_note(env, scan->ols_owner, 0);
- }
- spin_unlock(&oscl->ols_lock);
-}
-
-static int osc_lock_enqueue_wait(const struct lu_env *env,
- struct osc_object *obj,
- struct osc_lock *oscl)
-{
- struct osc_lock *tmp_oscl;
- struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr;
- struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor;
- int rc = 0;
-
- spin_lock(&obj->oo_ol_spin);
- list_add_tail(&oscl->ols_nextlock_oscobj, &obj->oo_ol_list);
-
-restart:
- list_for_each_entry(tmp_oscl, &obj->oo_ol_list,
- ols_nextlock_oscobj) {
- struct cl_lock_descr *descr;
-
- if (tmp_oscl == oscl)
- break;
-
- descr = &tmp_oscl->ols_cl.cls_lock->cll_descr;
- if (descr->cld_start > need->cld_end ||
- descr->cld_end < need->cld_start)
- continue;
-
- /* We're not supposed to give up group lock */
- if (descr->cld_mode == CLM_GROUP)
- break;
-
- if (!osc_lock_is_lockless(oscl) &&
- osc_lock_compatible(oscl, tmp_oscl))
- continue;
-
- /* wait for conflicting lock to be canceled */
- cl_sync_io_init(waiter, 1, cl_sync_io_end);
- oscl->ols_owner = waiter;
-
- spin_lock(&tmp_oscl->ols_lock);
- /* add oscl into tmp's ols_waiting list */
- list_add_tail(&oscl->ols_wait_entry,
- &tmp_oscl->ols_waiting_list);
- spin_unlock(&tmp_oscl->ols_lock);
-
- spin_unlock(&obj->oo_ol_spin);
- rc = cl_sync_io_wait(env, waiter, 0);
- spin_lock(&obj->oo_ol_spin);
- if (rc < 0)
- break;
-
- oscl->ols_owner = NULL;
- goto restart;
- }
- spin_unlock(&obj->oo_ol_spin);
-
- return rc;
-}
-
-/**
- * Implementation of cl_lock_operations::clo_enqueue() method for osc
- * layer. This initiates ldlm enqueue:
- *
- * - cancels conflicting locks early (osc_lock_enqueue_wait());
- *
- * - calls osc_enqueue_base() to do actual enqueue.
- *
- * osc_enqueue_base() is supplied with an upcall function that is executed
- * when lock is received either after a local cached ldlm lock is matched, or
- * when a reply from the server is received.
- *
- * This function does not wait for the network communication to complete.
- */
-static int osc_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *unused, struct cl_sync_io *anchor)
-{
- struct osc_thread_info *info = osc_env_info(env);
- struct osc_io *oio = osc_env_io(env);
- struct osc_object *osc = cl2osc(slice->cls_obj);
- struct osc_lock *oscl = cl2osc_lock(slice);
- struct cl_lock *lock = slice->cls_lock;
- struct ldlm_res_id *resname = &info->oti_resname;
- union ldlm_policy_data *policy = &info->oti_policy;
- osc_enqueue_upcall_f upcall = osc_lock_upcall;
- void *cookie = oscl;
- bool async = false;
- int result;
-
- LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
- "lock = %p, ols = %p\n", lock, oscl);
-
- if (oscl->ols_state == OLS_GRANTED)
- return 0;
-
- if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
- goto enqueue_base;
-
- if (oscl->ols_glimpse) {
- LASSERT(equi(oscl->ols_agl, !anchor));
- async = true;
- goto enqueue_base;
- }
-
- result = osc_lock_enqueue_wait(env, osc, oscl);
- if (result < 0)
- goto out;
-
- /* we can grant lockless lock right after all conflicting locks
- * are canceled.
- */
- if (osc_lock_is_lockless(oscl)) {
- oscl->ols_state = OLS_GRANTED;
- oio->oi_lockless = 1;
- return 0;
- }
-
-enqueue_base:
- oscl->ols_state = OLS_ENQUEUED;
- if (anchor) {
- atomic_inc(&anchor->csi_sync_nr);
- oscl->ols_owner = anchor;
- }
-
- /**
- * DLM lock's ast data must be osc_object;
- * if glimpse or AGL lock, async of osc_enqueue_base() must be true,
- * DLM's enqueue callback set to osc_lock_upcall() with cookie as
- * osc_lock.
- */
- ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
- osc_lock_build_policy(env, lock, policy);
- if (oscl->ols_agl) {
- oscl->ols_einfo.ei_cbdata = NULL;
- /* hold a reference for callback */
- cl_object_get(osc2cl(osc));
- upcall = osc_lock_upcall_agl;
- cookie = osc;
- }
- result = osc_enqueue_base(osc_export(osc), resname, &oscl->ols_flags,
- policy, &oscl->ols_lvb,
- osc->oo_oinfo->loi_kms_valid,
- upcall, cookie,
- &oscl->ols_einfo, PTLRPCD_SET, async,
- oscl->ols_agl);
- if (!result) {
- if (osc_lock_is_lockless(oscl)) {
- oio->oi_lockless = 1;
- } else if (!async) {
- LASSERT(oscl->ols_state == OLS_GRANTED);
- LASSERT(oscl->ols_hold);
- LASSERT(oscl->ols_dlmlock);
- }
- } else if (oscl->ols_agl) {
- cl_object_put(env, osc2cl(osc));
- result = 0;
- }
-
-out:
- if (result < 0) {
- oscl->ols_state = OLS_CANCELLED;
- osc_lock_wake_waiters(env, osc, oscl);
-
- if (anchor)
- cl_sync_io_note(env, anchor, result);
- }
- return result;
-}
-
-/**
- * Breaks a link between osc_lock and dlm_lock.
- */
-static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
-{
- struct ldlm_lock *dlmlock;
-
- dlmlock = olck->ols_dlmlock;
- if (!dlmlock)
- return;
-
- if (olck->ols_hold) {
- olck->ols_hold = 0;
- ldlm_lock_decref(&olck->ols_handle, olck->ols_einfo.ei_mode);
- olck->ols_handle.cookie = 0ULL;
- }
-
- olck->ols_dlmlock = NULL;
-
- /* release a reference taken in osc_lock_upcall(). */
- LASSERT(olck->ols_has_ref);
- lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
- LDLM_LOCK_RELEASE(dlmlock);
- olck->ols_has_ref = 0;
-}
-
-/**
- * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
- * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
- * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
- * with some other lock some where in the cluster. This function does the
- * following:
- *
- * - invalidates all pages protected by this lock (after sending dirty
- * ones to the server, as necessary);
- *
- * - decref's underlying ldlm lock;
- *
- * - cancels ldlm lock (ldlm_cli_cancel()).
- */
-static void osc_lock_cancel(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_object *obj = cl2osc(slice->cls_obj);
- struct osc_lock *oscl = cl2osc_lock(slice);
-
- LINVRNT(osc_lock_invariant(oscl));
-
- osc_lock_detach(env, oscl);
- oscl->ols_state = OLS_CANCELLED;
- oscl->ols_flags &= ~LDLM_FL_LVB_READY;
-
- osc_lock_wake_waiters(env, obj, oscl);
-}
-
-static int osc_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
-
- (*p)(env, cookie, "%p %#16llx %#llx %d %p ",
- lock->ols_dlmlock, lock->ols_flags, lock->ols_handle.cookie,
- lock->ols_state, lock->ols_owner);
- osc_lvb_print(env, cookie, p, &lock->ols_lvb);
- return 0;
-}
-
-static const struct cl_lock_operations osc_lock_ops = {
- .clo_fini = osc_lock_fini,
- .clo_enqueue = osc_lock_enqueue,
- .clo_cancel = osc_lock_cancel,
- .clo_print = osc_lock_print,
-};
-
-static void osc_lock_lockless_cancel(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
- struct osc_object *osc = cl2osc(slice->cls_obj);
- struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
- int result;
-
- LASSERT(!ols->ols_dlmlock);
- result = osc_lock_flush(osc, descr->cld_start, descr->cld_end,
- descr->cld_mode, 0);
- if (result)
- CERROR("Pages for lockless lock %p were not purged(%d)\n",
- ols, result);
-
- osc_lock_wake_waiters(env, osc, ols);
-}
-
-static const struct cl_lock_operations osc_lock_lockless_ops = {
- .clo_fini = osc_lock_fini,
- .clo_enqueue = osc_lock_enqueue,
- .clo_cancel = osc_lock_lockless_cancel,
- .clo_print = osc_lock_print
-};
-
-static void osc_lock_set_writer(const struct lu_env *env,
- const struct cl_io *io,
- struct cl_object *obj, struct osc_lock *oscl)
-{
- struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
- pgoff_t io_start;
- pgoff_t io_end;
-
- if (!cl_object_same(io->ci_obj, obj))
- return;
-
- if (likely(io->ci_type == CIT_WRITE)) {
- io_start = cl_index(obj, io->u.ci_rw.crw_pos);
- io_end = cl_index(obj, io->u.ci_rw.crw_pos +
- io->u.ci_rw.crw_count - 1);
- if (cl_io_is_append(io)) {
- io_start = 0;
- io_end = CL_PAGE_EOF;
- }
- } else {
- LASSERT(cl_io_is_mkwrite(io));
- io_start = io->u.ci_fault.ft_index;
- io_end = io->u.ci_fault.ft_index;
- }
-
- if (descr->cld_mode >= CLM_WRITE &&
- descr->cld_start <= io_start && descr->cld_end >= io_end) {
- struct osc_io *oio = osc_env_io(env);
-
- /* There must be only one lock to match the write region */
- LASSERT(!oio->oi_write_osclock);
- oio->oi_write_osclock = oscl;
- }
-}
-
-int osc_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *io)
-{
- struct osc_lock *oscl;
- __u32 enqflags = lock->cll_descr.cld_enq_flags;
-
- oscl = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS);
- if (!oscl)
- return -ENOMEM;
-
- oscl->ols_state = OLS_NEW;
- spin_lock_init(&oscl->ols_lock);
- INIT_LIST_HEAD(&oscl->ols_waiting_list);
- INIT_LIST_HEAD(&oscl->ols_wait_entry);
- INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj);
-
- oscl->ols_flags = osc_enq2ldlm_flags(enqflags);
- oscl->ols_agl = !!(enqflags & CEF_AGL);
- if (oscl->ols_agl)
- oscl->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
- if (oscl->ols_flags & LDLM_FL_HAS_INTENT) {
- oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED;
- oscl->ols_glimpse = 1;
- }
- osc_lock_build_einfo(env, lock, cl2osc(obj), &oscl->ols_einfo);
-
- cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops);
-
- if (!(enqflags & CEF_MUST))
- /* try to convert this lock to a lockless lock */
- osc_lock_to_lockless(env, oscl, (enqflags & CEF_NEVER));
- if (oscl->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
- oscl->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
-
- if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
- osc_lock_set_writer(env, io, obj, oscl);
-
-
- LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx",
- lock, oscl, oscl->ols_flags);
-
- return 0;
-}
-
-/**
- * Finds an existing lock covering given index and optionally different from a
- * given \a except lock.
- */
-struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
- struct osc_object *obj, pgoff_t index,
- enum osc_dap_flags dap_flags)
-{
- struct osc_thread_info *info = osc_env_info(env);
- struct ldlm_res_id *resname = &info->oti_resname;
- union ldlm_policy_data *policy = &info->oti_policy;
- struct lustre_handle lockh;
- struct ldlm_lock *lock = NULL;
- enum ldlm_mode mode;
- __u64 flags;
-
- ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
- osc_index2policy(policy, osc2cl(obj), index, index);
- policy->l_extent.gid = LDLM_GID_ANY;
-
- flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
- if (dap_flags & OSC_DAP_FL_TEST_LOCK)
- flags |= LDLM_FL_TEST_LOCK;
-
- /*
- * It is fine to match any group lock since there could be only one
- * with a uniq gid and it conflicts with all other lock modes too
- */
-again:
- mode = osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
- LCK_PR | LCK_PW | LCK_GROUP, &flags, obj, &lockh,
- dap_flags & OSC_DAP_FL_CANCELING);
- if (mode != 0) {
- lock = ldlm_handle2lock(&lockh);
- /* RACE: the lock is cancelled so let's try again */
- if (unlikely(!lock))
- goto again;
- }
- return lock;
-}
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
deleted file mode 100644
index 6baa8e2e00c9..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ /dev/null
@@ -1,474 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_object for OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@intel.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include "osc_cl_internal.h"
-
-/** \addtogroup osc
- * @{
- */
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-static struct lu_object *osc2lu(struct osc_object *osc)
-{
- return &osc->oo_cl.co_lu;
-}
-
-static struct osc_object *lu2osc(const struct lu_object *obj)
-{
- LINVRNT(osc_is_object(obj));
- return container_of0(obj, struct osc_object, oo_cl.co_lu);
-}
-
-/*****************************************************************************
- *
- * Object operations.
- *
- */
-
-static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct osc_object *osc = lu2osc(obj);
- const struct cl_object_conf *cconf = lu2cl_conf(conf);
-
- osc->oo_oinfo = cconf->u.coc_oinfo;
- INIT_LIST_HEAD(&osc->oo_ready_item);
- INIT_LIST_HEAD(&osc->oo_hp_ready_item);
- INIT_LIST_HEAD(&osc->oo_write_item);
- INIT_LIST_HEAD(&osc->oo_read_item);
-
- atomic_set(&osc->oo_nr_ios, 0);
- init_waitqueue_head(&osc->oo_io_waitq);
-
- osc->oo_root.rb_node = NULL;
- INIT_LIST_HEAD(&osc->oo_hp_exts);
- INIT_LIST_HEAD(&osc->oo_urgent_exts);
- INIT_LIST_HEAD(&osc->oo_rpc_exts);
- INIT_LIST_HEAD(&osc->oo_reading_exts);
- atomic_set(&osc->oo_nr_reads, 0);
- atomic_set(&osc->oo_nr_writes, 0);
- spin_lock_init(&osc->oo_lock);
- spin_lock_init(&osc->oo_tree_lock);
- spin_lock_init(&osc->oo_ol_spin);
- INIT_LIST_HEAD(&osc->oo_ol_list);
-
- cl_object_page_init(lu2cl(obj), sizeof(struct osc_page));
-
- return 0;
-}
-
-static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct osc_object *osc = lu2osc(obj);
-
- LASSERT(list_empty(&osc->oo_ready_item));
- LASSERT(list_empty(&osc->oo_hp_ready_item));
- LASSERT(list_empty(&osc->oo_write_item));
- LASSERT(list_empty(&osc->oo_read_item));
-
- LASSERT(!osc->oo_root.rb_node);
- LASSERT(list_empty(&osc->oo_hp_exts));
- LASSERT(list_empty(&osc->oo_urgent_exts));
- LASSERT(list_empty(&osc->oo_rpc_exts));
- LASSERT(list_empty(&osc->oo_reading_exts));
- LASSERT(atomic_read(&osc->oo_nr_reads) == 0);
- LASSERT(atomic_read(&osc->oo_nr_writes) == 0);
- LASSERT(list_empty(&osc->oo_ol_list));
- LASSERT(!atomic_read(&osc->oo_nr_ios));
-
- lu_object_fini(obj);
- kmem_cache_free(osc_object_kmem, osc);
-}
-
-int osc_lvb_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct ost_lvb *lvb)
-{
- return (*p)(env, cookie, "size: %llu mtime: %llu atime: %llu ctime: %llu blocks: %llu",
- lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
- lvb->lvb_ctime, lvb->lvb_blocks);
-}
-
-static int osc_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *obj)
-{
- struct osc_object *osc = lu2osc(obj);
- struct lov_oinfo *oinfo = osc->oo_oinfo;
- struct osc_async_rc *ar = &oinfo->loi_ar;
-
- (*p)(env, cookie, "id: " DOSTID " idx: %d gen: %d kms_valid: %u kms %llu rc: %d force_sync: %d min_xid: %llu ",
- POSTID(&oinfo->loi_oi), oinfo->loi_ost_idx,
- oinfo->loi_ost_gen, oinfo->loi_kms_valid, oinfo->loi_kms,
- ar->ar_rc, ar->ar_force_sync, ar->ar_min_xid);
- osc_lvb_print(env, cookie, p, &oinfo->loi_lvb);
- return 0;
-}
-
-static int osc_attr_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
-
- cl_lvb2attr(attr, &oinfo->loi_lvb);
- attr->cat_kms = oinfo->loi_kms_valid ? oinfo->loi_kms : 0;
- return 0;
-}
-
-static int osc_attr_update(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned int valid)
-{
- struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
- struct ost_lvb *lvb = &oinfo->loi_lvb;
-
- if (valid & CAT_SIZE)
- lvb->lvb_size = attr->cat_size;
- if (valid & CAT_MTIME)
- lvb->lvb_mtime = attr->cat_mtime;
- if (valid & CAT_ATIME)
- lvb->lvb_atime = attr->cat_atime;
- if (valid & CAT_CTIME)
- lvb->lvb_ctime = attr->cat_ctime;
- if (valid & CAT_BLOCKS)
- lvb->lvb_blocks = attr->cat_blocks;
- if (valid & CAT_KMS) {
- CDEBUG(D_CACHE, "set kms from %llu to %llu\n",
- oinfo->loi_kms, (__u64)attr->cat_kms);
- loi_kms_set(oinfo, attr->cat_kms);
- }
- return 0;
-}
-
-static int osc_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb)
-{
- struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
-
- lvb->lvb_size = oinfo->loi_kms;
- lvb->lvb_blocks = oinfo->loi_lvb.lvb_blocks;
- return 0;
-}
-
-static int osc_object_ast_clear(struct ldlm_lock *lock, void *data)
-{
- if (lock->l_ast_data == data)
- lock->l_ast_data = NULL;
- return LDLM_ITER_CONTINUE;
-}
-
-static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
-{
- struct osc_object *osc = cl2osc(obj);
- struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
-
- /* DLM locks don't hold a reference of osc_object so we have to
- * clear it before the object is being destroyed.
- */
- ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
- ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname,
- osc_object_ast_clear, osc);
- return 0;
-}
-
-static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj,
- struct ll_fiemap_info_key *fmkey,
- struct fiemap *fiemap, size_t *buflen)
-{
- struct obd_export *exp = osc_export(cl2osc(obj));
- union ldlm_policy_data policy;
- struct ptlrpc_request *req;
- struct lustre_handle lockh;
- struct ldlm_res_id resid;
- enum ldlm_mode mode = 0;
- struct fiemap *reply;
- char *tmp;
- int rc;
-
- fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi;
- if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC))
- goto skip_locking;
-
- policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK;
-
- if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <=
- fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1)
- policy.l_extent.end = OBD_OBJECT_EOF;
- else
- policy.l_extent.end = (fmkey->lfik_fiemap.fm_start +
- fmkey->lfik_fiemap.fm_length +
- PAGE_SIZE - 1) & PAGE_MASK;
-
- ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid);
- mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
- LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY,
- &resid, LDLM_EXTENT, &policy,
- LCK_PR | LCK_PW, &lockh, 0);
- if (mode) { /* lock is cached on client */
- if (mode != LCK_PR) {
- ldlm_lock_addref(&lockh, LCK_PR);
- ldlm_lock_decref(&lockh, LCK_PW);
- }
- } else { /* no cached lock, needs acquire lock on server side */
- fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS;
- fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK;
- }
-
-skip_locking:
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_OST_GET_INFO_FIEMAP);
- if (!req) {
- rc = -ENOMEM;
- goto drop_lock;
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT,
- sizeof(*fmkey));
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT,
- *buflen);
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER,
- *buflen);
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- goto drop_lock;
- }
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
- memcpy(tmp, fmkey, sizeof(*fmkey));
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
- memcpy(tmp, fiemap, *buflen);
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto fini_req;
-
- reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
- if (!reply) {
- rc = -EPROTO;
- goto fini_req;
- }
-
- memcpy(fiemap, reply, *buflen);
-fini_req:
- ptlrpc_req_finished(req);
-drop_lock:
- if (mode)
- ldlm_lock_decref(&lockh, LCK_PR);
- return rc;
-}
-
-void osc_object_set_contended(struct osc_object *obj)
-{
- obj->oo_contention_time = cfs_time_current();
- /* mb(); */
- obj->oo_contended = 1;
-}
-
-void osc_object_clear_contended(struct osc_object *obj)
-{
- obj->oo_contended = 0;
-}
-
-int osc_object_is_contended(struct osc_object *obj)
-{
- struct osc_device *dev = lu2osc_dev(obj->oo_cl.co_lu.lo_dev);
- int osc_contention_time = dev->od_contention_time;
- unsigned long cur_time = cfs_time_current();
- unsigned long retry_time;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_OBJECT_CONTENTION))
- return 1;
-
- if (!obj->oo_contended)
- return 0;
-
- /*
- * I like copy-paste. the code is copied from
- * ll_file_is_contended.
- */
- retry_time = cfs_time_add(obj->oo_contention_time,
- osc_contention_time * HZ);
- if (cfs_time_after(cur_time, retry_time)) {
- osc_object_clear_contended(obj);
- return 0;
- }
- return 1;
-}
-
-/**
- * Implementation of struct cl_object_operations::coo_req_attr_set() for osc
- * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
- * fields.
- */
-static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
- struct cl_req_attr *attr)
-{
- u64 flags = attr->cra_flags;
- struct lov_oinfo *oinfo;
- struct ost_lvb *lvb;
- struct obdo *oa;
-
- oinfo = cl2osc(obj)->oo_oinfo;
- lvb = &oinfo->loi_lvb;
- oa = attr->cra_oa;
-
- if (flags & OBD_MD_FLMTIME) {
- oa->o_mtime = lvb->lvb_mtime;
- oa->o_valid |= OBD_MD_FLMTIME;
- }
- if (flags & OBD_MD_FLATIME) {
- oa->o_atime = lvb->lvb_atime;
- oa->o_valid |= OBD_MD_FLATIME;
- }
- if (flags & OBD_MD_FLCTIME) {
- oa->o_ctime = lvb->lvb_ctime;
- oa->o_valid |= OBD_MD_FLCTIME;
- }
- if (flags & OBD_MD_FLGROUP) {
- ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
- oa->o_valid |= OBD_MD_FLGROUP;
- }
- if (flags & OBD_MD_FLID) {
- int rc;
-
- rc = ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
- if (rc) {
- CERROR("Bad %llu to set " DOSTID " : rc %d\n",
- (unsigned long long)ostid_id(&oinfo->loi_oi),
- POSTID(&oa->o_oi), rc);
- }
- oa->o_valid |= OBD_MD_FLID;
- }
- if (flags & OBD_MD_FLHANDLE) {
- struct ldlm_lock *lock;
- struct osc_page *opg;
-
- opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
- lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
- OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING);
- if (!lock && !opg->ops_srvlock) {
- struct ldlm_resource *res;
- struct ldlm_res_id *resname;
-
- CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
- "uncovered page!\n");
-
- resname = &osc_env_info(env)->oti_resname;
- ostid_build_res_name(&oinfo->loi_oi, resname);
- res = ldlm_resource_get(
- osc_export(cl2osc(obj))->exp_obd->obd_namespace,
- NULL, resname, LDLM_EXTENT, 0);
- ldlm_resource_dump(D_ERROR, res);
-
- LBUG();
- }
-
- /* check for lockless io. */
- if (lock) {
- oa->o_handle = lock->l_remote_handle;
- oa->o_valid |= OBD_MD_FLHANDLE;
- LDLM_LOCK_PUT(lock);
- }
- }
-}
-
-static const struct cl_object_operations osc_ops = {
- .coo_page_init = osc_page_init,
- .coo_lock_init = osc_lock_init,
- .coo_io_init = osc_io_init,
- .coo_attr_get = osc_attr_get,
- .coo_attr_update = osc_attr_update,
- .coo_glimpse = osc_object_glimpse,
- .coo_prune = osc_object_prune,
- .coo_fiemap = osc_object_fiemap,
- .coo_req_attr_set = osc_req_attr_set
-};
-
-static const struct lu_object_operations osc_lu_obj_ops = {
- .loo_object_init = osc_object_init,
- .loo_object_release = NULL,
- .loo_object_free = osc_object_free,
- .loo_object_print = osc_object_print,
- .loo_object_invariant = NULL
-};
-
-struct lu_object *osc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
-{
- struct osc_object *osc;
- struct lu_object *obj;
-
- osc = kmem_cache_zalloc(osc_object_kmem, GFP_NOFS);
- if (osc) {
- obj = osc2lu(osc);
- lu_object_init(obj, NULL, dev);
- osc->oo_cl.co_ops = &osc_ops;
- obj->lo_ops = &osc_lu_obj_ops;
- } else {
- obj = NULL;
- }
- return obj;
-}
-
-int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc)
-{
- CDEBUG(D_INODE, "Invalidate osc object: %p, # of active IOs: %d\n",
- osc, atomic_read(&osc->oo_nr_ios));
-
- wait_event_idle(osc->oo_io_waitq, !atomic_read(&osc->oo_nr_ios));
-
- /* Discard all dirty pages of this object. */
- osc_cache_truncate_start(env, osc, 0, NULL);
-
- /* Discard all caching pages */
- osc_lock_discard_pages(env, osc, 0, CL_PAGE_EOF, CLM_WRITE);
-
- /* Clear ast data of dlm lock. Do this after discarding all pages */
- osc_object_prune(env, osc2cl(osc));
-
- return 0;
-}
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
deleted file mode 100644
index 01a930dbbf64..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ /dev/null
@@ -1,1094 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_page for OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@intel.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include <linux/math64.h>
-#include "osc_cl_internal.h"
-
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
-static void osc_lru_use(struct client_obd *cli, struct osc_page *opg);
-static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
- struct osc_page *opg);
-
-/** \addtogroup osc
- * @{
- */
-
-/*****************************************************************************
- *
- * Page operations.
- *
- */
-static void osc_page_transfer_get(struct osc_page *opg, const char *label)
-{
- struct cl_page *page = opg->ops_cl.cpl_page;
-
- LASSERT(!opg->ops_transfer_pinned);
- cl_page_get(page);
- lu_ref_add_atomic(&page->cp_reference, label, page);
- opg->ops_transfer_pinned = 1;
-}
-
-static void osc_page_transfer_put(const struct lu_env *env,
- struct osc_page *opg)
-{
- struct cl_page *page = opg->ops_cl.cpl_page;
-
- if (opg->ops_transfer_pinned) {
- opg->ops_transfer_pinned = 0;
- lu_ref_del(&page->cp_reference, "transfer", page);
- cl_page_put(env, page);
- }
-}
-
-/**
- * This is called once for every page when it is submitted for a transfer
- * either opportunistic (osc_page_cache_add()), or immediate
- * (osc_page_submit()).
- */
-static void osc_page_transfer_add(const struct lu_env *env,
- struct osc_page *opg, enum cl_req_type crt)
-{
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
-
- osc_lru_use(osc_cli(obj), opg);
-}
-
-int osc_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io)
-{
- struct osc_page *opg = cl2osc_page(slice);
- int result;
-
- osc_page_transfer_get(opg, "transfer\0cache");
- result = osc_queue_async_io(env, io, opg);
- if (result != 0)
- osc_page_transfer_put(env, opg);
- else
- osc_page_transfer_add(env, opg, CRT_WRITE);
-
- return result;
-}
-
-void osc_index2policy(union ldlm_policy_data *policy,
- const struct cl_object *obj,
- pgoff_t start, pgoff_t end)
-{
- memset(policy, 0, sizeof(*policy));
- policy->l_extent.start = cl_offset(obj, start);
- policy->l_extent.end = cl_offset(obj, end + 1) - 1;
-}
-
-static const char *osc_list(struct list_head *head)
-{
- return list_empty(head) ? "-" : "+";
-}
-
-static inline unsigned long osc_submit_duration(struct osc_page *opg)
-{
- if (opg->ops_submit_time == 0)
- return 0;
-
- return (cfs_time_current() - opg->ops_submit_time);
-}
-
-static int osc_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_async_page *oap = &opg->ops_oap;
- struct osc_object *obj = cl2osc(slice->cpl_obj);
- struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
-
- return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
- opg, osc_index(opg),
- /* 1 */
- oap->oap_magic, oap->oap_cmd,
- oap->oap_interrupted,
- osc_list(&oap->oap_pending_item),
- osc_list(&oap->oap_rpc_item),
- /* 2 */
- oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
- oap->oap_async_flags, oap->oap_brw_flags,
- oap->oap_request, oap->oap_cli, obj,
- /* 3 */
- opg->ops_transfer_pinned,
- osc_submit_duration(opg), opg->ops_srvlock,
- /* 4 */
- cli->cl_r_in_flight, cli->cl_w_in_flight,
- cli->cl_max_rpcs_in_flight,
- cli->cl_avail_grant,
- osc_list(&cli->cl_cache_waiters),
- osc_list(&cli->cl_loi_ready_list),
- osc_list(&cli->cl_loi_hp_ready_list),
- osc_list(&cli->cl_loi_write_list),
- osc_list(&cli->cl_loi_read_list),
- /* 5 */
- osc_list(&obj->oo_ready_item),
- osc_list(&obj->oo_hp_ready_item),
- osc_list(&obj->oo_write_item),
- osc_list(&obj->oo_read_item),
- atomic_read(&obj->oo_nr_reads),
- osc_list(&obj->oo_reading_exts),
- atomic_read(&obj->oo_nr_writes),
- osc_list(&obj->oo_hp_exts),
- osc_list(&obj->oo_urgent_exts));
-}
-
-static void osc_page_delete(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- int rc;
-
- CDEBUG(D_TRACE, "%p\n", opg);
- osc_page_transfer_put(env, opg);
- rc = osc_teardown_async_page(env, obj, opg);
- if (rc) {
- CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page,
- "Trying to teardown failed: %d\n", rc);
- LASSERT(0);
- }
-
- osc_lru_del(osc_cli(obj), opg);
-
- if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
- void *value;
-
- spin_lock(&obj->oo_tree_lock);
- value = radix_tree_delete(&obj->oo_tree, osc_index(opg));
- if (value)
- --obj->oo_npages;
- spin_unlock(&obj->oo_tree_lock);
-
- LASSERT(ergo(value, value == opg));
- }
-}
-
-static void osc_page_clip(const struct lu_env *env,
- const struct cl_page_slice *slice, int from, int to)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_async_page *oap = &opg->ops_oap;
-
- opg->ops_from = from;
- opg->ops_to = to;
- spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- spin_unlock(&oap->oap_lock);
-}
-
-static int osc_page_cancel(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct osc_page *opg = cl2osc_page(slice);
- int rc = 0;
-
- /* Check if the transferring against this page
- * is completed, or not even queued.
- */
- if (opg->ops_transfer_pinned)
- /* FIXME: may not be interrupted.. */
- rc = osc_cancel_async_page(env, opg);
- LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
- return rc;
-}
-
-static int osc_page_flush(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct osc_page *opg = cl2osc_page(slice);
- int rc;
-
- rc = osc_flush_async_page(env, io, opg);
- return rc;
-}
-
-static const struct cl_page_operations osc_page_ops = {
- .cpo_print = osc_page_print,
- .cpo_delete = osc_page_delete,
- .cpo_clip = osc_page_clip,
- .cpo_cancel = osc_page_cancel,
- .cpo_flush = osc_page_flush
-};
-
-int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
-{
- struct osc_object *osc = cl2osc(obj);
- struct osc_page *opg = cl_object_page_slice(obj, page);
- int result;
-
- opg->ops_from = 0;
- opg->ops_to = PAGE_SIZE;
-
- result = osc_prep_async_page(osc, opg, page->cp_vmpage,
- cl_offset(obj, index));
- if (result == 0) {
- struct osc_io *oio = osc_env_io(env);
-
- opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj, index,
- &osc_page_ops);
- }
- INIT_LIST_HEAD(&opg->ops_lru);
-
- /* reserve an LRU space for this page */
- if (page->cp_type == CPT_CACHEABLE && result == 0) {
- result = osc_lru_alloc(env, osc_cli(osc), opg);
- if (result == 0) {
- spin_lock(&osc->oo_tree_lock);
- result = radix_tree_insert(&osc->oo_tree, index, opg);
- if (result == 0)
- ++osc->oo_npages;
- spin_unlock(&osc->oo_tree_lock);
- LASSERT(result == 0);
- }
- }
-
- return result;
-}
-
-/**
- * Helper function called by osc_io_submit() for every page in an immediate
- * transfer (i.e., transferred synchronously).
- */
-void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
- enum cl_req_type crt, int brw_flags)
-{
- struct osc_async_page *oap = &opg->ops_oap;
-
- LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, magic 0x%x\n",
- oap, oap->oap_magic);
- LASSERT(oap->oap_async_flags & ASYNC_READY);
- LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
-
- oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
- oap->oap_page_off = opg->ops_from;
- oap->oap_count = opg->ops_to - opg->ops_from;
- oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC;
-
- if (capable(CAP_SYS_RESOURCE)) {
- oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
- oap->oap_cmd |= OBD_BRW_NOQUOTA;
- }
-
- opg->ops_submit_time = cfs_time_current();
- osc_page_transfer_get(opg, "transfer\0imm");
- osc_page_transfer_add(env, opg, crt);
-}
-
-/* --------------- LRU page management ------------------ */
-
-/* OSC is a natural place to manage LRU pages as applications are specialized
- * to write OSC by OSC. Ideally, if one OSC is used more frequently it should
- * occupy more LRU slots. On the other hand, we should avoid using up all LRU
- * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep
- * for free LRU slots - this will be very bad so the algorithm requires each
- * OSC to free slots voluntarily to maintain a reasonable number of free slots
- * at any time.
- */
-static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
-
-/**
- * LRU pages are freed in batch mode. OSC should at least free this
- * number of pages to avoid running out of LRU slots.
- */
-static inline int lru_shrink_min(struct client_obd *cli)
-{
- return cli->cl_max_pages_per_rpc * 2;
-}
-
-/**
- * free this number at most otherwise it will take too long time to finish.
- */
-static inline int lru_shrink_max(struct client_obd *cli)
-{
- return cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
-}
-
-/**
- * Check if we can free LRU slots from this OSC. If there exists LRU waiters,
- * we should free slots aggressively. In this way, slots are freed in a steady
- * step to maintain fairness among OSCs.
- *
- * Return how many LRU pages should be freed.
- */
-static int osc_cache_too_much(struct client_obd *cli)
-{
- struct cl_client_cache *cache = cli->cl_cache;
- long pages = atomic_long_read(&cli->cl_lru_in_list);
- unsigned long budget;
-
- budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
-
- /* if it's going to run out LRU slots, we should free some, but not
- * too much to maintain fairness among OSCs.
- */
- if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 2) {
- if (pages >= budget)
- return lru_shrink_max(cli);
- else if (pages >= budget / 2)
- return lru_shrink_min(cli);
- } else {
- time64_t duration = ktime_get_real_seconds();
- long timediff;
-
- /* knock out pages by duration of no IO activity */
- duration -= cli->cl_lru_last_used;
- /*
- * The difference shouldn't be more than 70 years
- * so we can safely case to a long. Round to
- * approximately 1 minute.
- */
- timediff = (long)(duration >> 6);
- if (timediff > 0 && pages >= budget / timediff)
- return lru_shrink_min(cli);
- }
- return 0;
-}
-
-int lru_queue_work(const struct lu_env *env, void *data)
-{
- struct client_obd *cli = data;
- int count;
-
- CDEBUG(D_CACHE, "%s: run LRU work for client obd\n", cli_name(cli));
-
- count = osc_cache_too_much(cli);
- if (count > 0) {
- int rc = osc_lru_shrink(env, cli, count, false);
-
- CDEBUG(D_CACHE, "%s: shrank %d/%d pages from client obd\n",
- cli_name(cli), rc, count);
- if (rc >= count) {
- CDEBUG(D_CACHE, "%s: queue again\n", cli_name(cli));
- ptlrpcd_queue_work(cli->cl_lru_work);
- }
- }
-
- return 0;
-}
-
-void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
-{
- LIST_HEAD(lru);
- struct osc_async_page *oap;
- long npages = 0;
-
- list_for_each_entry(oap, plist, oap_pending_item) {
- struct osc_page *opg = oap2osc_page(oap);
-
- if (!opg->ops_in_lru)
- continue;
-
- ++npages;
- LASSERT(list_empty(&opg->ops_lru));
- list_add(&opg->ops_lru, &lru);
- }
-
- if (npages > 0) {
- spin_lock(&cli->cl_lru_list_lock);
- list_splice_tail(&lru, &cli->cl_lru_list);
- atomic_long_sub(npages, &cli->cl_lru_busy);
- atomic_long_add(npages, &cli->cl_lru_in_list);
- cli->cl_lru_last_used = ktime_get_real_seconds();
- spin_unlock(&cli->cl_lru_list_lock);
-
- if (waitqueue_active(&osc_lru_waitq))
- (void)ptlrpcd_queue_work(cli->cl_lru_work);
- }
-}
-
-static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
-{
- LASSERT(atomic_long_read(&cli->cl_lru_in_list) > 0);
- list_del_init(&opg->ops_lru);
- atomic_long_dec(&cli->cl_lru_in_list);
-}
-
-/**
- * Page is being destroyed. The page may be not in LRU list, if the transfer
- * has never finished(error occurred).
- */
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
-{
- if (opg->ops_in_lru) {
- spin_lock(&cli->cl_lru_list_lock);
- if (!list_empty(&opg->ops_lru)) {
- __osc_lru_del(cli, opg);
- } else {
- LASSERT(atomic_long_read(&cli->cl_lru_busy) > 0);
- atomic_long_dec(&cli->cl_lru_busy);
- }
- spin_unlock(&cli->cl_lru_list_lock);
-
- atomic_long_inc(cli->cl_lru_left);
- /* this is a great place to release more LRU pages if
- * this osc occupies too many LRU pages and kernel is
- * stealing one of them.
- */
- if (osc_cache_too_much(cli)) {
- CDEBUG(D_CACHE, "%s: queue LRU work\n", cli_name(cli));
- (void)ptlrpcd_queue_work(cli->cl_lru_work);
- }
- wake_up(&osc_lru_waitq);
- } else {
- LASSERT(list_empty(&opg->ops_lru));
- }
-}
-
-/**
- * Delete page from LRUlist for redirty.
- */
-static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
-{
- /* If page is being transferred for the first time,
- * ops_lru should be empty
- */
- if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) {
- spin_lock(&cli->cl_lru_list_lock);
- __osc_lru_del(cli, opg);
- spin_unlock(&cli->cl_lru_list_lock);
- atomic_long_inc(&cli->cl_lru_busy);
- }
-}
-
-static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
- struct cl_page **pvec, int max_index)
-{
- int i;
-
- for (i = 0; i < max_index; i++) {
- struct cl_page *page = pvec[i];
-
- LASSERT(cl_page_is_owned(page, io));
- cl_page_delete(env, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- cl_page_put(env, page);
-
- pvec[i] = NULL;
- }
-}
-
-/**
- * Check if a cl_page can be released, i.e, it's not being used.
- *
- * If unstable account is turned on, bulk transfer may hold one refcount
- * for recovery so we need to check vmpage refcount as well; otherwise,
- * even we can destroy cl_page but the corresponding vmpage can't be reused.
- */
-static inline bool lru_page_busy(struct client_obd *cli, struct cl_page *page)
-{
- if (cl_page_in_use_noref(page))
- return true;
-
- if (cli->cl_cache->ccc_unstable_check) {
- struct page *vmpage = cl_page_vmpage(page);
-
- /* vmpage have two known users: cl_page and VM page cache */
- if (page_count(vmpage) - page_mapcount(vmpage) > 2)
- return true;
- }
- return false;
-}
-
-/**
- * Drop @target of pages from LRU at most.
- */
-long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
- long target, bool force)
-{
- struct cl_io *io;
- struct cl_object *clobj = NULL;
- struct cl_page **pvec;
- struct osc_page *opg;
- int maxscan = 0;
- long count = 0;
- int index = 0;
- int rc = 0;
-
- LASSERT(atomic_long_read(&cli->cl_lru_in_list) >= 0);
- if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
- return 0;
-
- CDEBUG(D_CACHE, "%s: shrinkers: %d, force: %d\n",
- cli_name(cli), atomic_read(&cli->cl_lru_shrinkers), force);
- if (!force) {
- if (atomic_read(&cli->cl_lru_shrinkers) > 0)
- return -EBUSY;
-
- if (atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
- atomic_dec(&cli->cl_lru_shrinkers);
- return -EBUSY;
- }
- } else {
- atomic_inc(&cli->cl_lru_shrinkers);
- }
-
- pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
- io = &osc_env_info(env)->oti_io;
-
- spin_lock(&cli->cl_lru_list_lock);
- if (force)
- cli->cl_lru_reclaim++;
- maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
- while (!list_empty(&cli->cl_lru_list)) {
- struct cl_page *page;
- bool will_free = false;
-
- if (!force && atomic_read(&cli->cl_lru_shrinkers) > 1)
- break;
-
- if (--maxscan < 0)
- break;
-
- opg = list_entry(cli->cl_lru_list.next, struct osc_page,
- ops_lru);
- page = opg->ops_cl.cpl_page;
- if (lru_page_busy(cli, page)) {
- list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
- continue;
- }
-
- LASSERT(page->cp_obj);
- if (clobj != page->cp_obj) {
- struct cl_object *tmp = page->cp_obj;
-
- cl_object_get(tmp);
- spin_unlock(&cli->cl_lru_list_lock);
-
- if (clobj) {
- discard_pagevec(env, io, pvec, index);
- index = 0;
-
- cl_io_fini(env, io);
- cl_object_put(env, clobj);
- clobj = NULL;
- }
-
- clobj = tmp;
- io->ci_obj = clobj;
- io->ci_ignore_layout = 1;
- rc = cl_io_init(env, io, CIT_MISC, clobj);
-
- spin_lock(&cli->cl_lru_list_lock);
-
- if (rc != 0)
- break;
-
- ++maxscan;
- continue;
- }
-
- if (cl_page_own_try(env, io, page) == 0) {
- if (!lru_page_busy(cli, page)) {
- /* remove it from lru list earlier to avoid
- * lock contention
- */
- __osc_lru_del(cli, opg);
- opg->ops_in_lru = 0; /* will be discarded */
-
- cl_page_get(page);
- will_free = true;
- } else {
- cl_page_disown(env, io, page);
- }
- }
-
- if (!will_free) {
- list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
- continue;
- }
-
- /* Don't discard and free the page with cl_lru_list held */
- pvec[index++] = page;
- if (unlikely(index == OTI_PVEC_SIZE)) {
- spin_unlock(&cli->cl_lru_list_lock);
- discard_pagevec(env, io, pvec, index);
- index = 0;
-
- spin_lock(&cli->cl_lru_list_lock);
- }
-
- if (++count >= target)
- break;
- }
- spin_unlock(&cli->cl_lru_list_lock);
-
- if (clobj) {
- discard_pagevec(env, io, pvec, index);
-
- cl_io_fini(env, io);
- cl_object_put(env, clobj);
- }
-
- atomic_dec(&cli->cl_lru_shrinkers);
- if (count > 0) {
- atomic_long_add(count, cli->cl_lru_left);
- wake_up_all(&osc_lru_waitq);
- }
- return count > 0 ? count : rc;
-}
-
-/**
- * Reclaim LRU pages by an IO thread. The caller wants to reclaim at least
- * \@npages of LRU slots. For performance consideration, it's better to drop
- * LRU pages in batch. Therefore, the actual number is adjusted at least
- * max_pages_per_rpc.
- */
-static long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
-{
- struct lu_env *env;
- struct cl_client_cache *cache = cli->cl_cache;
- int max_scans;
- u16 refcheck;
- long rc = 0;
-
- LASSERT(cache);
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return 0;
-
- npages = max_t(int, npages, cli->cl_max_pages_per_rpc);
- CDEBUG(D_CACHE, "%s: start to reclaim %ld pages from LRU\n",
- cli_name(cli), npages);
- rc = osc_lru_shrink(env, cli, npages, true);
- if (rc >= npages) {
- CDEBUG(D_CACHE, "%s: reclaimed %ld/%ld pages from LRU\n",
- cli_name(cli), rc, npages);
- if (osc_cache_too_much(cli) > 0)
- ptlrpcd_queue_work(cli->cl_lru_work);
- goto out;
- } else if (rc > 0) {
- npages -= rc;
- }
-
- CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld/%ld, want: %ld\n",
- cli_name(cli), cli, atomic_long_read(&cli->cl_lru_in_list),
- atomic_long_read(&cli->cl_lru_busy), npages);
-
- /* Reclaim LRU slots from other client_obd as it can't free enough
- * from its own. This should rarely happen.
- */
- spin_lock(&cache->ccc_lru_lock);
- LASSERT(!list_empty(&cache->ccc_lru));
-
- cache->ccc_lru_shrinkers++;
- list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
-
- max_scans = atomic_read(&cache->ccc_users) - 2;
- while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
- cli = list_entry(cache->ccc_lru.next, struct client_obd,
- cl_lru_osc);
-
- CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
- cli_name(cli), cli,
- atomic_long_read(&cli->cl_lru_in_list),
- atomic_long_read(&cli->cl_lru_busy));
-
- list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
- if (osc_cache_too_much(cli) > 0) {
- spin_unlock(&cache->ccc_lru_lock);
-
- rc = osc_lru_shrink(env, cli, npages, true);
- spin_lock(&cache->ccc_lru_lock);
- if (rc >= npages)
- break;
- if (rc > 0)
- npages -= rc;
- }
- }
- spin_unlock(&cache->ccc_lru_lock);
-
-out:
- cl_env_put(env, &refcheck);
- CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
- cli_name(cli), cli, rc);
- return rc;
-}
-
-/**
- * osc_lru_alloc() is called to reserve an LRU slot for a cl_page.
- *
- * Usually the LRU slots are reserved in osc_io_iter_rw_init().
- * Only in the case that the LRU slots are in extreme shortage, it should
- * have reserved enough slots for an IO.
- */
-static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
- struct osc_page *opg)
-{
- struct osc_io *oio = osc_env_io(env);
- int rc = 0;
-
- if (!cli->cl_cache) /* shall not be in LRU */
- return 0;
-
- if (oio->oi_lru_reserved > 0) {
- --oio->oi_lru_reserved;
- goto out;
- }
-
- LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
- while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
- /* run out of LRU spaces, try to drop some by itself */
- rc = osc_lru_reclaim(cli, 1);
- if (rc < 0)
- break;
- if (rc > 0)
- continue;
-
- cond_resched();
-
- rc = l_wait_event_abortable(osc_lru_waitq,
- atomic_long_read(cli->cl_lru_left) > 0);
-
- if (rc < 0)
- break;
- }
-
-out:
- if (rc >= 0) {
- atomic_long_inc(&cli->cl_lru_busy);
- opg->ops_in_lru = 1;
- rc = 0;
- }
-
- return rc;
-}
-
-/**
- * osc_lru_reserve() is called to reserve enough LRU slots for I/O.
- *
- * The benefit of doing this is to reduce contention against atomic counter
- * cl_lru_left by changing it from per-page access to per-IO access.
- */
-unsigned long osc_lru_reserve(struct client_obd *cli, unsigned long npages)
-{
- unsigned long reserved = 0;
- unsigned long max_pages;
- unsigned long c;
-
- /*
- * reserve a full RPC window at most to avoid that a thread accidentally
- * consumes too many LRU slots
- */
- max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
- if (npages > max_pages)
- npages = max_pages;
-
- c = atomic_long_read(cli->cl_lru_left);
- if (c < npages && osc_lru_reclaim(cli, npages) > 0)
- c = atomic_long_read(cli->cl_lru_left);
- while (c >= npages) {
- if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
- reserved = npages;
- break;
- }
- c = atomic_long_read(cli->cl_lru_left);
- }
- if (atomic_long_read(cli->cl_lru_left) < max_pages) {
- /*
- * If there aren't enough pages in the per-OSC LRU then
- * wake up the LRU thread to try and clear out space, so
- * we don't block if pages are being dirtied quickly.
- */
- CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n",
- cli_name(cli), atomic_long_read(cli->cl_lru_left),
- max_pages);
- (void)ptlrpcd_queue_work(cli->cl_lru_work);
- }
-
- return reserved;
-}
-
-/**
- * osc_lru_unreserve() is called to unreserve LRU slots.
- *
- * LRU slots reserved by osc_lru_reserve() may have entries left due to several
- * reasons such as page already existing or I/O error. Those reserved slots
- * should be freed by calling this function.
- */
-void osc_lru_unreserve(struct client_obd *cli, unsigned long npages)
-{
- atomic_long_add(npages, cli->cl_lru_left);
- wake_up_all(&osc_lru_waitq);
-}
-
-/**
- * Atomic operations are expensive. We accumulate the accounting for the
- * same page pgdat to get better performance.
- * In practice this can work pretty good because the pages in the same RPC
- * are likely from the same page zone.
- */
-static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
- int factor)
-{
- int page_count = desc->bd_iov_count;
- pg_data_t *last = NULL;
- int count = 0;
- int i;
-
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
-
- for (i = 0; i < page_count; i++) {
- pg_data_t *pgdat = page_pgdat(BD_GET_KIOV(desc, i).bv_page);
-
- if (likely(pgdat == last)) {
- ++count;
- continue;
- }
-
- if (count > 0) {
- mod_node_page_state(pgdat, NR_UNSTABLE_NFS,
- factor * count);
- count = 0;
- }
- last = pgdat;
- ++count;
- }
- if (count > 0)
- mod_node_page_state(last, NR_UNSTABLE_NFS, factor * count);
-}
-
-static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
-{
- unstable_page_accounting(desc, 1);
-}
-
-static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
-{
- unstable_page_accounting(desc, -1);
-}
-
-/**
- * Performs "unstable" page accounting. This function balances the
- * increment operations performed in osc_inc_unstable_pages. It is
- * registered as the RPC request callback, and is executed when the
- * bulk RPC is committed on the server. Thus at this point, the pages
- * involved in the bulk transfer are no longer considered unstable.
- *
- * If this function is called, the request should have been committed
- * or req:rq_unstable must have been set; it implies that the unstable
- * statistic have been added.
- */
-void osc_dec_unstable_pages(struct ptlrpc_request *req)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- int page_count = desc->bd_iov_count;
- long unstable_count;
-
- LASSERT(page_count >= 0);
- dec_unstable_page_accounting(desc);
-
- unstable_count = atomic_long_sub_return(page_count,
- &cli->cl_unstable_count);
- LASSERT(unstable_count >= 0);
-
- unstable_count = atomic_long_sub_return(page_count,
- &cli->cl_cache->ccc_unstable_nr);
- LASSERT(unstable_count >= 0);
- if (!unstable_count)
- wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
-
- if (waitqueue_active(&osc_lru_waitq))
- (void)ptlrpcd_queue_work(cli->cl_lru_work);
-}
-
-/**
- * "unstable" page accounting. See: osc_dec_unstable_pages.
- */
-void osc_inc_unstable_pages(struct ptlrpc_request *req)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- long page_count = desc->bd_iov_count;
-
- /* No unstable page tracking */
- if (!cli->cl_cache || !cli->cl_cache->ccc_unstable_check)
- return;
-
- add_unstable_page_accounting(desc);
- atomic_long_add(page_count, &cli->cl_unstable_count);
- atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr);
-
- /*
- * If the request has already been committed (i.e. brw_commit
- * called via rq_commit_cb), we need to undo the unstable page
- * increments we just performed because rq_commit_cb wont be
- * called again.
- */
- spin_lock(&req->rq_lock);
- if (unlikely(req->rq_committed)) {
- spin_unlock(&req->rq_lock);
-
- osc_dec_unstable_pages(req);
- } else {
- req->rq_unstable = 1;
- spin_unlock(&req->rq_lock);
- }
-}
-
-/**
- * Check if it piggybacks SOFT_SYNC flag to OST from this OSC.
- * This function will be called by every BRW RPC so it's critical
- * to make this function fast.
- */
-bool osc_over_unstable_soft_limit(struct client_obd *cli)
-{
- long unstable_nr, osc_unstable_count;
-
- /* Can't check cli->cl_unstable_count, therefore, no soft limit */
- if (!cli->cl_cache || !cli->cl_cache->ccc_unstable_check)
- return false;
-
- osc_unstable_count = atomic_long_read(&cli->cl_unstable_count);
- unstable_nr = atomic_long_read(&cli->cl_cache->ccc_unstable_nr);
-
- CDEBUG(D_CACHE,
- "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
- cli_name(cli), cli, unstable_nr, osc_unstable_count);
-
- /*
- * If the LRU slots are in shortage - 25% remaining AND this OSC
- * has one full RPC window of unstable pages, it's a good chance
- * to piggyback a SOFT_SYNC flag.
- * Please notice that the OST won't take immediate response for the
- * SOFT_SYNC request so active OSCs will have more chance to carry
- * the flag, this is reasonable.
- */
- return unstable_nr > cli->cl_cache->ccc_lru_max >> 2 &&
- osc_unstable_count > cli->cl_max_pages_per_rpc *
- cli->cl_max_rpcs_in_flight;
-}
-
-/**
- * Return how many LRU pages in the cache of all OSC devices
- *
- * Return: return # of cached LRU pages times reclaimation tendency
- * SHRINK_STOP if it cannot do any scanning in this time
- */
-unsigned long osc_cache_shrink_count(struct shrinker *sk,
- struct shrink_control *sc)
-{
- struct client_obd *cli;
- unsigned long cached = 0;
-
- spin_lock(&osc_shrink_lock);
- list_for_each_entry(cli, &osc_shrink_list, cl_shrink_list)
- cached += atomic_long_read(&cli->cl_lru_in_list);
- spin_unlock(&osc_shrink_lock);
-
- return (cached * sysctl_vfs_cache_pressure) / 100;
-}
-
-/**
- * Scan and try to reclaim sc->nr_to_scan cached LRU pages
- *
- * Return: number of cached LRU pages reclaimed
- * SHRINK_STOP if it cannot do any scanning in this time
- *
- * Linux kernel will loop calling this shrinker scan routine with
- * sc->nr_to_scan = SHRINK_BATCH(128 for now) until kernel got enough memory.
- *
- * If sc->nr_to_scan is 0, the VM is querying the cache size, we don't need
- * to scan and try to reclaim LRU pages, just return 0 and
- * osc_cache_shrink_count() will report the LRU page number.
- */
-unsigned long osc_cache_shrink_scan(struct shrinker *sk,
- struct shrink_control *sc)
-{
- struct client_obd *stop_anchor = NULL;
- struct client_obd *cli;
- struct lu_env *env;
- long shrank = 0;
- u16 refcheck;
- int rc;
-
- if (!sc->nr_to_scan)
- return 0;
-
- if (!(sc->gfp_mask & __GFP_FS))
- return SHRINK_STOP;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return SHRINK_STOP;
-
- spin_lock(&osc_shrink_lock);
- while (!list_empty(&osc_shrink_list)) {
- cli = list_entry(osc_shrink_list.next, struct client_obd,
- cl_shrink_list);
-
- if (!stop_anchor)
- stop_anchor = cli;
- else if (cli == stop_anchor)
- break;
-
- list_move_tail(&cli->cl_shrink_list, &osc_shrink_list);
- spin_unlock(&osc_shrink_lock);
-
- /* shrink no more than max_pages_per_rpc for an OSC */
- rc = osc_lru_shrink(env, cli, (sc->nr_to_scan - shrank) >
- cli->cl_max_pages_per_rpc ?
- cli->cl_max_pages_per_rpc :
- sc->nr_to_scan - shrank, true);
- if (rc > 0)
- shrank += rc;
-
- if (shrank >= sc->nr_to_scan)
- goto out;
-
- spin_lock(&osc_shrink_lock);
- }
- spin_unlock(&osc_shrink_lock);
-
-out:
- cl_env_put(env, &refcheck);
-
- return shrank;
-}
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
deleted file mode 100644
index ce1731dc604f..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ /dev/null
@@ -1,284 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- *
- * Code originally extracted from quota directory
- */
-
-#include <obd_class.h>
-#include "osc_internal.h"
-
-static inline struct osc_quota_info *osc_oqi_alloc(u32 id)
-{
- struct osc_quota_info *oqi;
-
- oqi = kmem_cache_zalloc(osc_quota_kmem, GFP_NOFS);
- if (oqi)
- oqi->oqi_id = id;
-
- return oqi;
-}
-
-int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
-{
- int type;
-
- for (type = 0; type < MAXQUOTAS; type++) {
- struct osc_quota_info *oqi;
-
- oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
- if (oqi) {
- /* do not try to access oqi here, it could have been
- * freed by osc_quota_setdq()
- */
-
- /* the slot is busy, the user is about to run out of
- * quota space on this OST
- */
- CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
- type == USRQUOTA ? "user" : "grout", qid[type]);
- return NO_QUOTA;
- }
- }
-
- return QUOTA_OK;
-}
-
-#define MD_QUOTA_FLAG(type) ((type == USRQUOTA) ? OBD_MD_FLUSRQUOTA \
- : OBD_MD_FLGRPQUOTA)
-#define FL_QUOTA_FLAG(type) ((type == USRQUOTA) ? OBD_FL_NO_USRQUOTA \
- : OBD_FL_NO_GRPQUOTA)
-
-int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
- u32 valid, u32 flags)
-{
- int type;
- int rc = 0;
-
- if ((valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) == 0)
- return 0;
-
- for (type = 0; type < MAXQUOTAS; type++) {
- struct osc_quota_info *oqi;
-
- if ((valid & MD_QUOTA_FLAG(type)) == 0)
- continue;
-
- /* lookup the ID in the per-type hash table */
- oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
- if ((flags & FL_QUOTA_FLAG(type)) != 0) {
- /* This ID is getting close to its quota limit, let's
- * switch to sync I/O
- */
- if (oqi)
- continue;
-
- oqi = osc_oqi_alloc(qid[type]);
- if (!oqi) {
- rc = -ENOMEM;
- break;
- }
-
- rc = cfs_hash_add_unique(cli->cl_quota_hash[type],
- &qid[type], &oqi->oqi_hash);
- /* race with others? */
- if (rc == -EALREADY) {
- rc = 0;
- kmem_cache_free(osc_quota_kmem, oqi);
- }
-
- CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n",
- cli_name(cli),
- type == USRQUOTA ? "user" : "group",
- qid[type], rc);
- } else {
- /* This ID is now off the hook, let's remove it from
- * the hash table
- */
- if (!oqi)
- continue;
-
- oqi = cfs_hash_del_key(cli->cl_quota_hash[type],
- &qid[type]);
- if (oqi)
- kmem_cache_free(osc_quota_kmem, oqi);
-
- CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n",
- cli_name(cli),
- type == USRQUOTA ? "user" : "group",
- qid[type], oqi);
- }
- }
-
- return rc;
-}
-
-/*
- * Hash operations for uid/gid <-> osc_quota_info
- */
-static unsigned int
-oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask)
-{
- return cfs_hash_u32_hash(*((__u32 *)key), mask);
-}
-
-static int
-oqi_keycmp(const void *key, struct hlist_node *hnode)
-{
- struct osc_quota_info *oqi;
- u32 uid;
-
- LASSERT(key);
- uid = *((u32 *)key);
- oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
-
- return uid == oqi->oqi_id;
-}
-
-static void *
-oqi_key(struct hlist_node *hnode)
-{
- struct osc_quota_info *oqi;
-
- oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
- return &oqi->oqi_id;
-}
-
-static void *
-oqi_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct osc_quota_info, oqi_hash);
-}
-
-static void
-oqi_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
-}
-
-static void
-oqi_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
-{
-}
-
-static void
-oqi_exit(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct osc_quota_info *oqi;
-
- oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
-
- kmem_cache_free(osc_quota_kmem, oqi);
-}
-
-#define HASH_QUOTA_BKT_BITS 5
-#define HASH_QUOTA_CUR_BITS 5
-#define HASH_QUOTA_MAX_BITS 15
-
-static struct cfs_hash_ops quota_hash_ops = {
- .hs_hash = oqi_hashfn,
- .hs_keycmp = oqi_keycmp,
- .hs_key = oqi_key,
- .hs_object = oqi_object,
- .hs_get = oqi_get,
- .hs_put_locked = oqi_put_locked,
- .hs_exit = oqi_exit,
-};
-
-int osc_quota_setup(struct obd_device *obd)
-{
- struct client_obd *cli = &obd->u.cli;
- int i, type;
-
- for (type = 0; type < MAXQUOTAS; type++) {
- cli->cl_quota_hash[type] = cfs_hash_create("QUOTA_HASH",
- HASH_QUOTA_CUR_BITS,
- HASH_QUOTA_MAX_BITS,
- HASH_QUOTA_BKT_BITS,
- 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &quota_hash_ops,
- CFS_HASH_DEFAULT);
- if (!cli->cl_quota_hash[type])
- break;
- }
-
- if (type == MAXQUOTAS)
- return 0;
-
- for (i = 0; i < type; i++)
- cfs_hash_putref(cli->cl_quota_hash[i]);
-
- return -ENOMEM;
-}
-
-int osc_quota_cleanup(struct obd_device *obd)
-{
- struct client_obd *cli = &obd->u.cli;
- int type;
-
- for (type = 0; type < MAXQUOTAS; type++)
- cfs_hash_putref(cli->cl_quota_hash[type]);
-
- return 0;
-}
-
-int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct ptlrpc_request *req;
- struct obd_quotactl *oqc;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_OST_QUOTACTL, LUSTRE_OST_VERSION,
- OST_QUOTACTL);
- if (!req)
- return -ENOMEM;
-
- oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- *oqc = *oqctl;
-
- ptlrpc_request_set_replen(req);
- ptlrpc_at_set_req_timeout(req);
- req->rq_no_resend = 1;
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
-
- if (req->rq_repmsg) {
- oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- if (oqc) {
- *oqctl = *oqc;
- } else if (!rc) {
- CERROR("Can't unpack obd_quotactl\n");
- rc = -EPROTO;
- }
- } else if (!rc) {
- CERROR("Can't unpack obd_quotactl\n");
- rc = -EPROTO;
- }
- ptlrpc_req_finished(req);
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
deleted file mode 100644
index 1c2bbbf5d864..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ /dev/null
@@ -1,2899 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include <linux/libcfs/libcfs.h>
-
-#include <lustre_dlm.h>
-#include <lustre_net.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <obd_cksum.h>
-
-#include <lustre_ha.h>
-#include <lprocfs_status.h>
-#include <uapi/linux/lustre/lustre_ioctl.h>
-#include <lustre_debug.h>
-#include <lustre_obdo.h>
-#include <uapi/linux/lustre/lustre_param.h>
-#include <lustre_fid.h>
-#include <obd_class.h>
-#include <obd.h>
-#include "osc_internal.h"
-#include "osc_cl_internal.h"
-
-atomic_t osc_pool_req_count;
-unsigned int osc_reqpool_maxreqcount;
-struct ptlrpc_request_pool *osc_rq_pool;
-
-/* max memory used for request pool, unit is MB */
-static unsigned int osc_reqpool_mem_max = 5;
-module_param(osc_reqpool_mem_max, uint, 0444);
-
-struct osc_brw_async_args {
- struct obdo *aa_oa;
- int aa_requested_nob;
- int aa_nio_count;
- u32 aa_page_count;
- int aa_resends;
- struct brw_page **aa_ppga;
- struct client_obd *aa_cli;
- struct list_head aa_oaps;
- struct list_head aa_exts;
-};
-
-struct osc_async_args {
- struct obd_info *aa_oi;
-};
-
-struct osc_setattr_args {
- struct obdo *sa_oa;
- obd_enqueue_update_f sa_upcall;
- void *sa_cookie;
-};
-
-struct osc_fsync_args {
- struct osc_object *fa_obj;
- struct obdo *fa_oa;
- obd_enqueue_update_f fa_upcall;
- void *fa_cookie;
-};
-
-struct osc_enqueue_args {
- struct obd_export *oa_exp;
- enum ldlm_type oa_type;
- enum ldlm_mode oa_mode;
- __u64 *oa_flags;
- osc_enqueue_upcall_f oa_upcall;
- void *oa_cookie;
- struct ost_lvb *oa_lvb;
- struct lustre_handle oa_lockh;
- unsigned int oa_agl:1;
-};
-
-static void osc_release_ppga(struct brw_page **ppga, u32 count);
-static int brw_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc);
-
-static inline void osc_pack_req_body(struct ptlrpc_request *req,
- struct obdo *oa)
-{
- struct ost_body *body;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
-
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
-}
-
-static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa)
-{
- struct ptlrpc_request *req;
- struct ost_body *body;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- osc_pack_req_body(req, oa);
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (!body) {
- rc = -EPROTO;
- goto out;
- }
-
- CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa,
- &body->oa);
-
- oa->o_blksize = cli_brw_size(exp->exp_obd);
- oa->o_valid |= OBD_MD_FLBLKSZ;
-
- out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa)
-{
- struct ptlrpc_request *req;
- struct ost_body *body;
- int rc;
-
- LASSERT(oa->o_valid & OBD_MD_FLGROUP);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- osc_pack_req_body(req, oa);
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (!body) {
- rc = -EPROTO;
- goto out;
- }
-
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa,
- &body->oa);
-
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int osc_setattr_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_setattr_args *sa, int rc)
-{
- struct ost_body *body;
-
- if (rc != 0)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (!body) {
- rc = -EPROTO;
- goto out;
- }
-
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
- &body->oa);
-out:
- rc = sa->sa_upcall(sa->sa_cookie, rc);
- return rc;
-}
-
-int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset)
-{
- struct ptlrpc_request *req;
- struct osc_setattr_args *sa;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- osc_pack_req_body(req, oa);
-
- ptlrpc_request_set_replen(req);
-
- /* do mds to ost setattr asynchronously */
- if (!rqset) {
- /* Do not wait for response. */
- ptlrpcd_add_req(req);
- } else {
- req->rq_interpret_reply =
- (ptlrpc_interpterer_t)osc_setattr_interpret;
-
- BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
- sa = ptlrpc_req_async_args(req);
- sa->sa_oa = oa;
- sa->sa_upcall = upcall;
- sa->sa_cookie = cookie;
-
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
- }
-
- return 0;
-}
-
-static int osc_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa)
-{
- struct ptlrpc_request *req;
- struct ost_body *body;
- int rc;
-
- LASSERT(oa);
- LASSERT(oa->o_valid & OBD_MD_FLGROUP);
- LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
- if (!req) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
- if (rc) {
- ptlrpc_request_free(req);
- goto out;
- }
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
-
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out_req;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (!body) {
- rc = -EPROTO;
- goto out_req;
- }
-
- CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
-
- oa->o_blksize = cli_brw_size(exp->exp_obd);
- oa->o_valid |= OBD_MD_FLBLKSZ;
-
- CDEBUG(D_HA, "transno: %lld\n",
- lustre_msg_get_transno(req->rq_repmsg));
-out_req:
- ptlrpc_req_finished(req);
-out:
- return rc;
-}
-
-int osc_punch_base(struct obd_export *exp, struct obdo *oa,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset)
-{
- struct ptlrpc_request *req;
- struct osc_setattr_args *sa;
- struct ost_body *body;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
- ptlrpc_at_set_req_timeout(req);
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
- oa);
-
- ptlrpc_request_set_replen(req);
-
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
- BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
- sa = ptlrpc_req_async_args(req);
- sa->sa_oa = oa;
- sa->sa_upcall = upcall;
- sa->sa_cookie = cookie;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
-
- return 0;
-}
-
-static int osc_sync_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *arg, int rc)
-{
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- struct osc_fsync_args *fa = arg;
- unsigned long valid = 0;
- struct ost_body *body;
- struct cl_object *obj;
-
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (!body) {
- CERROR("can't unpack ost_body\n");
- rc = -EPROTO;
- goto out;
- }
-
- *fa->fa_oa = body->oa;
- obj = osc2cl(fa->fa_obj);
-
- /* Update osc object's blocks attribute */
- cl_object_attr_lock(obj);
- if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
- attr->cat_blocks = body->oa.o_blocks;
- valid |= CAT_BLOCKS;
- }
-
- if (valid)
- cl_object_attr_update(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
-
-out:
- rc = fa->fa_upcall(fa->fa_cookie, rc);
- return rc;
-}
-
-int osc_sync_base(struct osc_object *obj, struct obdo *oa,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset)
-{
- struct obd_export *exp = osc_export(obj);
- struct ptlrpc_request *req;
- struct ost_body *body;
- struct osc_fsync_args *fa;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- /* overload the size and blocks fields in the oa with start/end */
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
- oa);
-
- ptlrpc_request_set_replen(req);
- req->rq_interpret_reply = osc_sync_interpret;
-
- BUILD_BUG_ON(sizeof(*fa) > sizeof(req->rq_async_args));
- fa = ptlrpc_req_async_args(req);
- fa->fa_obj = obj;
- fa->fa_oa = oa;
- fa->fa_upcall = upcall;
- fa->fa_cookie = cookie;
-
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
-
- return 0;
-}
-
-/* Find and cancel locally locks matched by @mode in the resource found by
- * @objid. Found locks are added into @cancel list. Returns the amount of
- * locks added to @cancels list.
- */
-static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
- struct list_head *cancels,
- enum ldlm_mode mode, __u64 lock_flags)
-{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- struct ldlm_res_id res_id;
- struct ldlm_resource *res;
- int count;
-
- /* Return, i.e. cancel nothing, only if ELC is supported (flag in
- * export) but disabled through procfs (flag in NS).
- *
- * This distinguishes from a case when ELC is not supported originally,
- * when we still want to cancel locks in advance and just cancel them
- * locally, without sending any RPC.
- */
- if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
- return 0;
-
- ostid_build_res_name(&oa->o_oi, &res_id);
- res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if (IS_ERR(res))
- return 0;
-
- LDLM_RESOURCE_ADDREF(res);
- count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
- lock_flags, 0, NULL);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- return count;
-}
-
-static int osc_destroy_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *data,
- int rc)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
-
- atomic_dec(&cli->cl_destroy_in_flight);
- wake_up(&cli->cl_destroy_waitq);
- return 0;
-}
-
-static int osc_can_send_destroy(struct client_obd *cli)
-{
- if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
- cli->cl_max_rpcs_in_flight) {
- /* The destroy request can be sent */
- return 1;
- }
- if (atomic_dec_return(&cli->cl_destroy_in_flight) <
- cli->cl_max_rpcs_in_flight) {
- /*
- * The counter has been modified between the two atomic
- * operations.
- */
- wake_up(&cli->cl_destroy_waitq);
- }
- return 0;
-}
-
-static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct ptlrpc_request *req;
- struct ost_body *body;
- LIST_HEAD(cancels);
- int rc, count;
-
- if (!oa) {
- CDEBUG(D_INFO, "oa NULL\n");
- return -EINVAL;
- }
-
- count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
- LDLM_FL_DISCARD_DATA);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
- if (!req) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
-
- rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
- 0, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
- ptlrpc_at_set_req_timeout(req);
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
-
- ptlrpc_request_set_replen(req);
-
- req->rq_interpret_reply = osc_destroy_interpret;
- if (!osc_can_send_destroy(cli)) {
- /*
- * Wait until the number of on-going destroy RPCs drops
- * under max_rpc_in_flight
- */
- l_wait_event_abortable_exclusive(cli->cl_destroy_waitq,
- osc_can_send_destroy(cli));
- }
-
- /* Do not wait for response */
- ptlrpcd_add_req(req);
- return 0;
-}
-
-static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
- long writing_bytes)
-{
- u32 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
-
- LASSERT(!(oa->o_valid & bits));
-
- oa->o_valid |= bits;
- spin_lock(&cli->cl_loi_list_lock);
- oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
- if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
- cli->cl_dirty_max_pages)) {
- CERROR("dirty %lu - %lu > dirty_max %lu\n",
- cli->cl_dirty_pages, cli->cl_dirty_transit,
- cli->cl_dirty_max_pages);
- oa->o_undirty = 0;
- } else if (unlikely(atomic_long_read(&obd_dirty_pages) -
- atomic_long_read(&obd_dirty_transit_pages) >
- (long)(obd_max_dirty_pages + 1))) {
- /* The atomic_read() allowing the atomic_inc() are
- * not covered by a lock thus they may safely race and trip
- * this CERROR() unless we add in a small fudge factor (+1).
- */
- CERROR("%s: dirty %ld + %ld > system dirty_max %ld\n",
- cli_name(cli), atomic_long_read(&obd_dirty_pages),
- atomic_long_read(&obd_dirty_transit_pages),
- obd_max_dirty_pages);
- oa->o_undirty = 0;
- } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
- 0x7fffffff)) {
- CERROR("dirty %lu - dirty_max %lu too big???\n",
- cli->cl_dirty_pages, cli->cl_dirty_max_pages);
- oa->o_undirty = 0;
- } else {
- unsigned long max_in_flight;
-
- max_in_flight = (cli->cl_max_pages_per_rpc << PAGE_SHIFT) *
- (cli->cl_max_rpcs_in_flight + 1);
- oa->o_undirty = max(cli->cl_dirty_max_pages << PAGE_SHIFT,
- max_in_flight);
- }
- oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
- oa->o_dropped = cli->cl_lost_grant;
- cli->cl_lost_grant = 0;
- spin_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
- oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
-}
-
-void osc_update_next_shrink(struct client_obd *cli)
-{
- cli->cl_next_shrink_grant =
- cfs_time_shift(cli->cl_grant_shrink_interval);
- CDEBUG(D_CACHE, "next time %ld to shrink grant\n",
- cli->cl_next_shrink_grant);
-}
-
-static void __osc_update_grant(struct client_obd *cli, u64 grant)
-{
- spin_lock(&cli->cl_loi_list_lock);
- cli->cl_avail_grant += grant;
- spin_unlock(&cli->cl_loi_list_lock);
-}
-
-static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
-{
- if (body->oa.o_valid & OBD_MD_FLGRANT) {
- CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
- __osc_update_grant(cli, body->oa.o_grant);
- }
-}
-
-static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set);
-
-static int osc_shrink_grant_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *aa, int rc)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- struct obdo *oa = ((struct osc_brw_async_args *)aa)->aa_oa;
- struct ost_body *body;
-
- if (rc != 0) {
- __osc_update_grant(cli, oa->o_grant);
- goto out;
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
- osc_update_grant(cli, body);
-out:
- kmem_cache_free(obdo_cachep, oa);
- return rc;
-}
-
-static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
-{
- spin_lock(&cli->cl_loi_list_lock);
- oa->o_grant = cli->cl_avail_grant / 4;
- cli->cl_avail_grant -= oa->o_grant;
- spin_unlock(&cli->cl_loi_list_lock);
- if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
- oa->o_valid |= OBD_MD_FLFLAGS;
- oa->o_flags = 0;
- }
- oa->o_flags |= OBD_FL_SHRINK_GRANT;
- osc_update_next_shrink(cli);
-}
-
-/* Shrink the current grant, either from some large amount to enough for a
- * full set of in-flight RPCs, or if we have already shrunk to that limit
- * then to enough for a single RPC. This avoids keeping more grant than
- * needed, and avoids shrinking the grant piecemeal.
- */
-static int osc_shrink_grant(struct client_obd *cli)
-{
- __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
- (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
-
- spin_lock(&cli->cl_loi_list_lock);
- if (cli->cl_avail_grant <= target_bytes)
- target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
- spin_unlock(&cli->cl_loi_list_lock);
-
- return osc_shrink_grant_to_target(cli, target_bytes);
-}
-
-int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
-{
- int rc = 0;
- struct ost_body *body;
-
- spin_lock(&cli->cl_loi_list_lock);
- /* Don't shrink if we are already above or below the desired limit
- * We don't want to shrink below a single RPC, as that will negatively
- * impact block allocation and long-term performance.
- */
- if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
- target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
-
- if (target_bytes >= cli->cl_avail_grant) {
- spin_unlock(&cli->cl_loi_list_lock);
- return 0;
- }
- spin_unlock(&cli->cl_loi_list_lock);
-
- body = kzalloc(sizeof(*body), GFP_NOFS);
- if (!body)
- return -ENOMEM;
-
- osc_announce_cached(cli, &body->oa, 0);
-
- spin_lock(&cli->cl_loi_list_lock);
- body->oa.o_grant = cli->cl_avail_grant - target_bytes;
- cli->cl_avail_grant = target_bytes;
- spin_unlock(&cli->cl_loi_list_lock);
- if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
- body->oa.o_valid |= OBD_MD_FLFLAGS;
- body->oa.o_flags = 0;
- }
- body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
- osc_update_next_shrink(cli);
-
- rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
- sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
- sizeof(*body), body, NULL);
- if (rc != 0)
- __osc_update_grant(cli, body->oa.o_grant);
- kfree(body);
- return rc;
-}
-
-static int osc_should_shrink_grant(struct client_obd *client)
-{
- unsigned long time = cfs_time_current();
- unsigned long next_shrink = client->cl_next_shrink_grant;
-
- if ((client->cl_import->imp_connect_data.ocd_connect_flags &
- OBD_CONNECT_GRANT_SHRINK) == 0)
- return 0;
-
- if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
- /* Get the current RPC size directly, instead of going via:
- * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
- * Keep comment here so that it can be found by searching.
- */
- int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
-
- if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
- client->cl_avail_grant > brw_size)
- return 1;
-
- osc_update_next_shrink(client);
- }
- return 0;
-}
-
-static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
-{
- struct client_obd *client;
-
- list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
- if (osc_should_shrink_grant(client))
- osc_shrink_grant(client);
- }
- return 0;
-}
-
-static int osc_add_shrink_grant(struct client_obd *client)
-{
- int rc;
-
- rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
- TIMEOUT_GRANT,
- osc_grant_shrink_grant_cb, NULL,
- &client->cl_grant_shrink_list);
- if (rc) {
- CERROR("add grant client %s error %d\n", cli_name(client), rc);
- return rc;
- }
- CDEBUG(D_CACHE, "add grant client %s\n", cli_name(client));
- osc_update_next_shrink(client);
- return 0;
-}
-
-static int osc_del_shrink_grant(struct client_obd *client)
-{
- return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
- TIMEOUT_GRANT);
-}
-
-static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
-{
- /*
- * ocd_grant is the total grant amount we're expect to hold: if we've
- * been evicted, it's the new avail_grant amount, cl_dirty_pages will
- * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
- * dirty.
- *
- * race is tolerable here: if we're evicted, but imp_state already
- * left EVICTED state, then cl_dirty_pages must be 0 already.
- */
- spin_lock(&cli->cl_loi_list_lock);
- if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
- cli->cl_avail_grant = ocd->ocd_grant;
- else
- cli->cl_avail_grant = ocd->ocd_grant -
- (cli->cl_dirty_pages << PAGE_SHIFT);
-
- /* determine the appropriate chunk size used by osc_extent. */
- cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
- spin_unlock(&cli->cl_loi_list_lock);
-
- CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
- cli_name(cli), cli->cl_avail_grant, cli->cl_lost_grant,
- cli->cl_chunkbits);
-
- if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
- list_empty(&cli->cl_grant_shrink_list))
- osc_add_shrink_grant(cli);
-}
-
-/* We assume that the reason this OSC got a short read is because it read
- * beyond the end of a stripe file; i.e. lustre is reading a sparse file
- * via the LOV, and it _knows_ it's reading inside the file, it's just that
- * this stripe never got written at or beyond this stripe offset yet.
- */
-static void handle_short_read(int nob_read, u32 page_count,
- struct brw_page **pga)
-{
- char *ptr;
- int i = 0;
-
- /* skip bytes read OK */
- while (nob_read > 0) {
- LASSERT(page_count > 0);
-
- if (pga[i]->count > nob_read) {
- /* EOF inside this page */
- ptr = kmap(pga[i]->pg) +
- (pga[i]->off & ~PAGE_MASK);
- memset(ptr + nob_read, 0, pga[i]->count - nob_read);
- kunmap(pga[i]->pg);
- page_count--;
- i++;
- break;
- }
-
- nob_read -= pga[i]->count;
- page_count--;
- i++;
- }
-
- /* zero remaining pages */
- while (page_count-- > 0) {
- ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
- memset(ptr, 0, pga[i]->count);
- kunmap(pga[i]->pg);
- i++;
- }
-}
-
-static int check_write_rcs(struct ptlrpc_request *req,
- int requested_nob, int niocount,
- u32 page_count, struct brw_page **pga)
-{
- int i;
- __u32 *remote_rcs;
-
- remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
- sizeof(*remote_rcs) *
- niocount);
- if (!remote_rcs) {
- CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
- return -EPROTO;
- }
-
- /* return error if any niobuf was in error */
- for (i = 0; i < niocount; i++) {
- if ((int)remote_rcs[i] < 0)
- return remote_rcs[i];
-
- if (remote_rcs[i] != 0) {
- CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
- i, remote_rcs[i], req);
- return -EPROTO;
- }
- }
-
- if (req->rq_bulk->bd_nob_transferred != requested_nob) {
- CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
- req->rq_bulk->bd_nob_transferred, requested_nob);
- return -EPROTO;
- }
-
- return 0;
-}
-
-static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
-{
- if (p1->flag != p2->flag) {
- unsigned int mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
- OBD_BRW_SYNC | OBD_BRW_ASYNC |
- OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
-
- /* warn if we try to combine flags that we don't know to be
- * safe to combine
- */
- if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
- CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n",
- p1->flag, p2->flag);
- }
- return 0;
- }
-
- return (p1->off + p1->count == p2->off);
-}
-
-static u32 osc_checksum_bulk(int nob, u32 pg_count,
- struct brw_page **pga, int opc,
- enum cksum_type cksum_type)
-{
- __u32 cksum;
- int i = 0;
- struct ahash_request *hdesc;
- unsigned int bufsize;
- unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
-
- LASSERT(pg_count > 0);
-
- hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
- CERROR("Unable to initialize checksum hash %s\n",
- cfs_crypto_hash_name(cfs_alg));
- return PTR_ERR(hdesc);
- }
-
- while (nob > 0 && pg_count > 0) {
- unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
-
- /* corrupt the data before we compute the checksum, to
- * simulate an OST->client data error
- */
- if (i == 0 && opc == OST_READ &&
- OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
- unsigned char *ptr = kmap(pga[i]->pg);
- int off = pga[i]->off & ~PAGE_MASK;
-
- memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
- kunmap(pga[i]->pg);
- }
- cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
- pga[i]->off & ~PAGE_MASK,
- count);
- CDEBUG(D_PAGE,
- "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
- pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index,
- (long)pga[i]->pg->flags, page_count(pga[i]->pg),
- page_private(pga[i]->pg),
- (int)(pga[i]->off & ~PAGE_MASK));
-
- nob -= pga[i]->count;
- pg_count--;
- i++;
- }
-
- bufsize = sizeof(cksum);
- cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
-
- /* For sending we only compute the wrong checksum instead
- * of corrupting the data so it is still correct on a redo
- */
- if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
- cksum++;
-
- return cksum;
-}
-
-static int osc_brw_prep_request(int cmd, struct client_obd *cli,
- struct obdo *oa, u32 page_count,
- struct brw_page **pga,
- struct ptlrpc_request **reqp,
- int reserve,
- int resend)
-{
- struct ptlrpc_request *req;
- struct ptlrpc_bulk_desc *desc;
- struct ost_body *body;
- struct obd_ioobj *ioobj;
- struct niobuf_remote *niobuf;
- int niocount, i, requested_nob, opc, rc;
- struct osc_brw_async_args *aa;
- struct req_capsule *pill;
- struct brw_page *pg_prev;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
- return -ENOMEM; /* Recoverable */
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
- return -EINVAL; /* Fatal */
-
- if ((cmd & OBD_BRW_WRITE) != 0) {
- opc = OST_WRITE;
- req = ptlrpc_request_alloc_pool(cli->cl_import,
- osc_rq_pool,
- &RQF_OST_BRW_WRITE);
- } else {
- opc = OST_READ;
- req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
- }
- if (!req)
- return -ENOMEM;
-
- for (niocount = i = 1; i < page_count; i++) {
- if (!can_merge_pages(pga[i - 1], pga[i]))
- niocount++;
- }
-
- pill = &req->rq_pill;
- req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
- sizeof(*ioobj));
- req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
- niocount * sizeof(*niobuf));
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
- ptlrpc_at_set_req_timeout(req);
- /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
- * retry logic
- */
- req->rq_no_retry_einprogress = 1;
-
- desc = ptlrpc_prep_bulk_imp(req, page_count,
- cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
- (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
- PTLRPC_BULK_PUT_SINK) | PTLRPC_BULK_BUF_KIOV, OST_BULK_PORTAL,
- &ptlrpc_bulk_kiov_pin_ops);
-
- if (!desc) {
- rc = -ENOMEM;
- goto out;
- }
- /* NB request now owns desc and will free it when it gets freed */
-
- body = req_capsule_client_get(pill, &RMF_OST_BODY);
- ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
- niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
- LASSERT(body && ioobj && niobuf);
-
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
-
- obdo_to_ioobj(oa, ioobj);
- ioobj->ioo_bufcnt = niocount;
- /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
- * that might be send for this request. The actual number is decided
- * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
- * "max - 1" for old client compatibility sending "0", and also so the
- * the actual maximum is a power-of-two number, not one less. LU-1431
- */
- ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
- LASSERT(page_count > 0);
- pg_prev = pga[0];
- for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
- struct brw_page *pg = pga[i];
- int poff = pg->off & ~PAGE_MASK;
-
- LASSERT(pg->count > 0);
- /* make sure there is no gap in the middle of page array */
- LASSERTF(page_count == 1 ||
- (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
- ergo(i > 0 && i < page_count - 1,
- poff == 0 && pg->count == PAGE_SIZE) &&
- ergo(i == page_count - 1, poff == 0)),
- "i: %d/%d pg: %p off: %llu, count: %u\n",
- i, page_count, pg, pg->off, pg->count);
- LASSERTF(i == 0 || pg->off > pg_prev->off,
- "i %d p_c %u pg %p [pri %lu ind %lu] off %llu prev_pg %p [pri %lu ind %lu] off %llu\n",
- i, page_count,
- pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
- pg_prev->pg, page_private(pg_prev->pg),
- pg_prev->pg->index, pg_prev->off);
- LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
- (pg->flag & OBD_BRW_SRVLOCK));
-
- desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff, pg->count);
- requested_nob += pg->count;
-
- if (i > 0 && can_merge_pages(pg_prev, pg)) {
- niobuf--;
- niobuf->rnb_len += pg->count;
- } else {
- niobuf->rnb_offset = pg->off;
- niobuf->rnb_len = pg->count;
- niobuf->rnb_flags = pg->flag;
- }
- pg_prev = pg;
- }
-
- LASSERTF((void *)(niobuf - niocount) ==
- req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
- "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
- &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
-
- osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
- if (resend) {
- if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
- body->oa.o_valid |= OBD_MD_FLFLAGS;
- body->oa.o_flags = 0;
- }
- body->oa.o_flags |= OBD_FL_RECOV_RESEND;
- }
-
- if (osc_should_shrink_grant(cli))
- osc_shrink_grant_local(cli, &body->oa);
-
- /* size[REQ_REC_OFF] still sizeof (*body) */
- if (opc == OST_WRITE) {
- if (cli->cl_checksum &&
- !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
- /* store cl_cksum_type in a local variable since
- * it can be changed via lprocfs
- */
- enum cksum_type cksum_type = cli->cl_cksum_type;
-
- if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
- oa->o_flags &= OBD_FL_LOCAL_MASK;
- body->oa.o_flags = 0;
- }
- body->oa.o_flags |= cksum_type_pack(cksum_type);
- body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
- body->oa.o_cksum = osc_checksum_bulk(requested_nob,
- page_count, pga,
- OST_WRITE,
- cksum_type);
- CDEBUG(D_PAGE, "checksum at write origin: %x\n",
- body->oa.o_cksum);
- /* save this in 'oa', too, for later checking */
- oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
- oa->o_flags |= cksum_type_pack(cksum_type);
- } else {
- /* clear out the checksum flag, in case this is a
- * resend but cl_checksum is no longer set. b=11238
- */
- oa->o_valid &= ~OBD_MD_FLCKSUM;
- }
- oa->o_cksum = body->oa.o_cksum;
- /* 1 RC per niobuf */
- req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
- sizeof(__u32) * niocount);
- } else {
- if (cli->cl_checksum &&
- !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
- if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
- body->oa.o_flags = 0;
- body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
- body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
- }
- }
- ptlrpc_request_set_replen(req);
-
- BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->aa_oa = oa;
- aa->aa_requested_nob = requested_nob;
- aa->aa_nio_count = niocount;
- aa->aa_page_count = page_count;
- aa->aa_resends = 0;
- aa->aa_ppga = pga;
- aa->aa_cli = cli;
- INIT_LIST_HEAD(&aa->aa_oaps);
-
- *reqp = req;
- niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
- CDEBUG(D_RPCTRACE, "brw rpc %p - object " DOSTID " offset %lld<>%lld\n",
- req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
- niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
-
- return 0;
-
- out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int check_write_checksum(struct obdo *oa,
- const struct lnet_process_id *peer,
- __u32 client_cksum, __u32 server_cksum, int nob,
- u32 page_count, struct brw_page **pga,
- enum cksum_type client_cksum_type)
-{
- __u32 new_cksum;
- char *msg;
- enum cksum_type cksum_type;
-
- if (server_cksum == client_cksum) {
- CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
- return 0;
- }
-
- cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
- oa->o_flags : 0);
- new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
- cksum_type);
-
- if (cksum_type != client_cksum_type)
- msg = "the server did not use the checksum type specified in the original request - likely a protocol problem"
- ;
- else if (new_cksum == server_cksum)
- msg = "changed on the client after we checksummed it - likely false positive due to mmap IO (bug 11742)"
- ;
- else if (new_cksum == client_cksum)
- msg = "changed in transit before arrival at OST";
- else
- msg = "changed in transit AND doesn't match the original - likely false positive due to mmap IO (bug 11742)"
- ;
-
- LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode " DFID " object " DOSTID " extent [%llu-%llu]\n",
- msg, libcfs_nid2str(peer->nid),
- oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
- oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
- oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
- POSTID(&oa->o_oi), pga[0]->off,
- pga[page_count - 1]->off +
- pga[page_count - 1]->count - 1);
- CERROR("original client csum %x (type %x), server csum %x (type %x), client csum now %x\n",
- client_cksum, client_cksum_type,
- server_cksum, cksum_type, new_cksum);
- return 1;
-}
-
-/* Note rc enters this function as number of bytes transferred */
-static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
-{
- struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
- const struct lnet_process_id *peer =
- &req->rq_import->imp_connection->c_peer;
- struct client_obd *cli = aa->aa_cli;
- struct ost_body *body;
- __u32 client_cksum = 0;
-
- if (rc < 0 && rc != -EDQUOT) {
- DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
- return rc;
- }
-
- LASSERTF(req->rq_repmsg, "rc = %d\n", rc);
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (!body) {
- DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
- return -EPROTO;
- }
-
- /* set/clear over quota flag for a uid/gid */
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
- body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
- unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
-
- CDEBUG(D_QUOTA, "setdq for [%u %u] with valid %#llx, flags %x\n",
- body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
- body->oa.o_flags);
- osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
- }
-
- osc_update_grant(cli, body);
-
- if (rc < 0)
- return rc;
-
- if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
- client_cksum = aa->aa_oa->o_cksum; /* save for later */
-
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
- if (rc > 0) {
- CERROR("Unexpected +ve rc %d\n", rc);
- return -EPROTO;
- }
- LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
-
- if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
- return -EAGAIN;
-
- if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
- check_write_checksum(&body->oa, peer, client_cksum,
- body->oa.o_cksum, aa->aa_requested_nob,
- aa->aa_page_count, aa->aa_ppga,
- cksum_type_unpack(aa->aa_oa->o_flags)))
- return -EAGAIN;
-
- rc = check_write_rcs(req, aa->aa_requested_nob,
- aa->aa_nio_count,
- aa->aa_page_count, aa->aa_ppga);
- goto out;
- }
-
- /* The rest of this function executes only for OST_READs */
-
- /* if unwrap_bulk failed, return -EAGAIN to retry */
- rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
- if (rc < 0) {
- rc = -EAGAIN;
- goto out;
- }
-
- if (rc > aa->aa_requested_nob) {
- CERROR("Unexpected rc %d (%d requested)\n", rc,
- aa->aa_requested_nob);
- return -EPROTO;
- }
-
- if (rc != req->rq_bulk->bd_nob_transferred) {
- CERROR("Unexpected rc %d (%d transferred)\n",
- rc, req->rq_bulk->bd_nob_transferred);
- return -EPROTO;
- }
-
- if (rc < aa->aa_requested_nob)
- handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
-
- if (body->oa.o_valid & OBD_MD_FLCKSUM) {
- static int cksum_counter;
- __u32 server_cksum = body->oa.o_cksum;
- char *via = "";
- char *router = "";
- enum cksum_type cksum_type;
-
- cksum_type = cksum_type_unpack(body->oa.o_valid &
- OBD_MD_FLFLAGS ?
- body->oa.o_flags : 0);
- client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
- aa->aa_ppga, OST_READ,
- cksum_type);
-
- if (peer->nid != req->rq_bulk->bd_sender) {
- via = " via ";
- router = libcfs_nid2str(req->rq_bulk->bd_sender);
- }
-
- if (server_cksum != client_cksum) {
- LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from %s%s%s inode " DFID " object " DOSTID " extent [%llu-%llu]\n",
- req->rq_import->imp_obd->obd_name,
- libcfs_nid2str(peer->nid),
- via, router,
- body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_seq : (__u64)0,
- body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_oid : 0,
- body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_ver : 0,
- POSTID(&body->oa.o_oi),
- aa->aa_ppga[0]->off,
- aa->aa_ppga[aa->aa_page_count-1]->off +
- aa->aa_ppga[aa->aa_page_count-1]->count -
- 1);
- CERROR("client %x, server %x, cksum_type %x\n",
- client_cksum, server_cksum, cksum_type);
- cksum_counter = 0;
- aa->aa_oa->o_cksum = client_cksum;
- rc = -EAGAIN;
- } else {
- cksum_counter++;
- CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
- rc = 0;
- }
- } else if (unlikely(client_cksum)) {
- static int cksum_missed;
-
- cksum_missed++;
- if ((cksum_missed & (-cksum_missed)) == cksum_missed)
- CERROR("Checksum %u requested from %s but not sent\n",
- cksum_missed, libcfs_nid2str(peer->nid));
- } else {
- rc = 0;
- }
-out:
- if (rc >= 0)
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
- aa->aa_oa, &body->oa);
-
- return rc;
-}
-
-static int osc_brw_redo_request(struct ptlrpc_request *request,
- struct osc_brw_async_args *aa, int rc)
-{
- struct ptlrpc_request *new_req;
- struct osc_brw_async_args *new_aa;
- struct osc_async_page *oap;
-
- DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
- "redo for recoverable error %d", rc);
-
- rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
- OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
- aa->aa_cli, aa->aa_oa,
- aa->aa_page_count, aa->aa_ppga,
- &new_req, 0, 1);
- if (rc)
- return rc;
-
- list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
- if (oap->oap_request) {
- LASSERTF(request == oap->oap_request,
- "request %p != oap_request %p\n",
- request, oap->oap_request);
- if (oap->oap_interrupted) {
- ptlrpc_req_finished(new_req);
- return -EINTR;
- }
- }
- }
- /* New request takes over pga and oaps from old request.
- * Note that copying a list_head doesn't work, need to move it...
- */
- aa->aa_resends++;
- new_req->rq_interpret_reply = request->rq_interpret_reply;
- new_req->rq_async_args = request->rq_async_args;
- new_req->rq_commit_cb = request->rq_commit_cb;
- /* cap resend delay to the current request timeout, this is similar to
- * what ptlrpc does (see after_reply())
- */
- if (aa->aa_resends > new_req->rq_timeout)
- new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
- else
- new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
- new_req->rq_generation_set = 1;
- new_req->rq_import_generation = request->rq_import_generation;
-
- new_aa = ptlrpc_req_async_args(new_req);
-
- INIT_LIST_HEAD(&new_aa->aa_oaps);
- list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
- INIT_LIST_HEAD(&new_aa->aa_exts);
- list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
- new_aa->aa_resends = aa->aa_resends;
-
- list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
- if (oap->oap_request) {
- ptlrpc_req_finished(oap->oap_request);
- oap->oap_request = ptlrpc_request_addref(new_req);
- }
- }
-
- /* XXX: This code will run into problem if we're going to support
- * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
- * and wait for all of them to be finished. We should inherit request
- * set from old request.
- */
- ptlrpcd_add_req(new_req);
-
- DEBUG_REQ(D_INFO, new_req, "new request");
- return 0;
-}
-
-/*
- * ugh, we want disk allocation on the target to happen in offset order. we'll
- * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
- * fine for our small page arrays and doesn't require allocation. its an
- * insertion sort that swaps elements that are strides apart, shrinking the
- * stride down until its '1' and the array is sorted.
- */
-static void sort_brw_pages(struct brw_page **array, int num)
-{
- int stride, i, j;
- struct brw_page *tmp;
-
- if (num == 1)
- return;
- for (stride = 1; stride < num ; stride = (stride * 3) + 1)
- ;
-
- do {
- stride /= 3;
- for (i = stride ; i < num ; i++) {
- tmp = array[i];
- j = i;
- while (j >= stride && array[j - stride]->off > tmp->off) {
- array[j] = array[j - stride];
- j -= stride;
- }
- array[j] = tmp;
- }
- } while (stride > 1);
-}
-
-static void osc_release_ppga(struct brw_page **ppga, u32 count)
-{
- LASSERT(ppga);
- kfree(ppga);
-}
-
-static int brw_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc)
-{
- struct osc_brw_async_args *aa = data;
- struct osc_extent *ext;
- struct osc_extent *tmp;
- struct client_obd *cli = aa->aa_cli;
-
- rc = osc_brw_fini_request(req, rc);
- CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
- /* When server return -EINPROGRESS, client should always retry
- * regardless of the number of times the bulk was resent already.
- */
- if (osc_recoverable_error(rc)) {
- if (req->rq_import_generation !=
- req->rq_import->imp_generation) {
- CDEBUG(D_HA, "%s: resend cross eviction for object: " DOSTID ", rc = %d.\n",
- req->rq_import->imp_obd->obd_name,
- POSTID(&aa->aa_oa->o_oi), rc);
- } else if (rc == -EINPROGRESS ||
- client_should_resend(aa->aa_resends, aa->aa_cli)) {
- rc = osc_brw_redo_request(req, aa, rc);
- } else {
- CERROR("%s: too many resent retries for object: %llu:%llu, rc = %d.\n",
- req->rq_import->imp_obd->obd_name,
- POSTID(&aa->aa_oa->o_oi), rc);
- }
-
- if (rc == 0)
- return 0;
- else if (rc == -EAGAIN || rc == -EINPROGRESS)
- rc = -EIO;
- }
-
- if (rc == 0) {
- struct obdo *oa = aa->aa_oa;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- unsigned long valid = 0;
- struct cl_object *obj;
- struct osc_async_page *last;
-
- last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
- obj = osc2cl(last->oap_obj);
-
- cl_object_attr_lock(obj);
- if (oa->o_valid & OBD_MD_FLBLOCKS) {
- attr->cat_blocks = oa->o_blocks;
- valid |= CAT_BLOCKS;
- }
- if (oa->o_valid & OBD_MD_FLMTIME) {
- attr->cat_mtime = oa->o_mtime;
- valid |= CAT_MTIME;
- }
- if (oa->o_valid & OBD_MD_FLATIME) {
- attr->cat_atime = oa->o_atime;
- valid |= CAT_ATIME;
- }
- if (oa->o_valid & OBD_MD_FLCTIME) {
- attr->cat_ctime = oa->o_ctime;
- valid |= CAT_CTIME;
- }
-
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
- struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
- loff_t last_off = last->oap_count + last->oap_obj_off +
- last->oap_page_off;
-
- /* Change file size if this is an out of quota or
- * direct IO write and it extends the file size
- */
- if (loi->loi_lvb.lvb_size < last_off) {
- attr->cat_size = last_off;
- valid |= CAT_SIZE;
- }
- /* Extend KMS if it's not a lockless write */
- if (loi->loi_kms < last_off &&
- oap2osc_page(last)->ops_srvlock == 0) {
- attr->cat_kms = last_off;
- valid |= CAT_KMS;
- }
- }
-
- if (valid != 0)
- cl_object_attr_update(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
- }
- kmem_cache_free(obdo_cachep, aa->aa_oa);
-
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
- osc_inc_unstable_pages(req);
-
- list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
- list_del_init(&ext->oe_link);
- osc_extent_finish(env, ext, 1, rc);
- }
- LASSERT(list_empty(&aa->aa_exts));
- LASSERT(list_empty(&aa->aa_oaps));
-
- osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
- ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
-
- spin_lock(&cli->cl_loi_list_lock);
- /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
- * is called so we know whether to go to sync BRWs or wait for more
- * RPCs to complete
- */
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
- cli->cl_w_in_flight--;
- else
- cli->cl_r_in_flight--;
- osc_wake_cache_waiters(cli);
- spin_unlock(&cli->cl_loi_list_lock);
-
- osc_io_unplug(env, cli, NULL);
- return rc;
-}
-
-static void brw_commit(struct ptlrpc_request *req)
-{
- /*
- * If osc_inc_unstable_pages (via osc_extent_finish) races with
- * this called via the rq_commit_cb, I need to ensure
- * osc_dec_unstable_pages is still called. Otherwise unstable
- * pages may be leaked.
- */
- spin_lock(&req->rq_lock);
- if (unlikely(req->rq_unstable)) {
- req->rq_unstable = 0;
- spin_unlock(&req->rq_lock);
- osc_dec_unstable_pages(req);
- } else {
- req->rq_committed = 1;
- spin_unlock(&req->rq_lock);
- }
-}
-
-/**
- * Build an RPC by the list of extent @ext_list. The caller must ensure
- * that the total pages in this list are NOT over max pages per RPC.
- * Extents in the list must be in OES_RPC state.
- */
-int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
- struct list_head *ext_list, int cmd)
-{
- struct ptlrpc_request *req = NULL;
- struct osc_extent *ext;
- struct brw_page **pga = NULL;
- struct osc_brw_async_args *aa = NULL;
- struct obdo *oa = NULL;
- struct osc_async_page *oap;
- struct osc_object *obj = NULL;
- struct cl_req_attr *crattr = NULL;
- u64 starting_offset = OBD_OBJECT_EOF;
- u64 ending_offset = 0;
- int mpflag = 0;
- int mem_tight = 0;
- int page_count = 0;
- bool soft_sync = false;
- bool interrupted = false;
- int i;
- int rc;
- struct ost_body *body;
- LIST_HEAD(rpc_list);
-
- LASSERT(!list_empty(ext_list));
-
- /* add pages into rpc_list to build BRW rpc */
- list_for_each_entry(ext, ext_list, oe_link) {
- LASSERT(ext->oe_state == OES_RPC);
- mem_tight |= ext->oe_memalloc;
- page_count += ext->oe_nr_pages;
- if (!obj)
- obj = ext->oe_obj;
- }
-
- soft_sync = osc_over_unstable_soft_limit(cli);
- if (mem_tight)
- mpflag = cfs_memory_pressure_get_and_set();
-
- pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
- if (!pga) {
- rc = -ENOMEM;
- goto out;
- }
-
- oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!oa) {
- rc = -ENOMEM;
- goto out;
- }
-
- i = 0;
- list_for_each_entry(ext, ext_list, oe_link) {
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- if (mem_tight)
- oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
- if (soft_sync)
- oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
- pga[i] = &oap->oap_brw_page;
- pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
- i++;
-
- list_add_tail(&oap->oap_rpc_item, &rpc_list);
- if (starting_offset == OBD_OBJECT_EOF ||
- starting_offset > oap->oap_obj_off)
- starting_offset = oap->oap_obj_off;
- else
- LASSERT(!oap->oap_page_off);
- if (ending_offset < oap->oap_obj_off + oap->oap_count)
- ending_offset = oap->oap_obj_off +
- oap->oap_count;
- else
- LASSERT(oap->oap_page_off + oap->oap_count ==
- PAGE_SIZE);
- if (oap->oap_interrupted)
- interrupted = true;
- }
- }
-
- /* first page in the list */
- oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
-
- crattr = &osc_env_info(env)->oti_req_attr;
- memset(crattr, 0, sizeof(*crattr));
- crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
- crattr->cra_flags = ~0ULL;
- crattr->cra_page = oap2cl_page(oap);
- crattr->cra_oa = oa;
- cl_req_attr_set(env, osc2cl(obj), crattr);
-
- sort_brw_pages(pga, page_count);
- rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 1, 0);
- if (rc != 0) {
- CERROR("prep_req failed: %d\n", rc);
- goto out;
- }
-
- req->rq_commit_cb = brw_commit;
- req->rq_interpret_reply = brw_interpret;
-
- req->rq_memalloc = mem_tight != 0;
- oap->oap_request = ptlrpc_request_addref(req);
- if (interrupted && !req->rq_intr)
- ptlrpc_mark_interrupted(req);
-
- /* Need to update the timestamps after the request is built in case
- * we race with setattr (locally or in queue at OST). If OST gets
- * later setattr before earlier BRW (as determined by the request xid),
- * the OST will not use BRW timestamps. Sadly, there is no obvious
- * way to do this in a single call. bug 10150
- */
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- crattr->cra_oa = &body->oa;
- crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
- cl_req_attr_set(env, osc2cl(obj), crattr);
- lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
-
- BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- INIT_LIST_HEAD(&aa->aa_oaps);
- list_splice_init(&rpc_list, &aa->aa_oaps);
- INIT_LIST_HEAD(&aa->aa_exts);
- list_splice_init(ext_list, &aa->aa_exts);
-
- spin_lock(&cli->cl_loi_list_lock);
- starting_offset >>= PAGE_SHIFT;
- if (cmd == OBD_BRW_READ) {
- cli->cl_r_in_flight++;
- lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
- lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
- lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
- starting_offset + 1);
- } else {
- cli->cl_w_in_flight++;
- lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
- lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
- lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
- starting_offset + 1);
- }
- spin_unlock(&cli->cl_loi_list_lock);
-
- DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight",
- page_count, aa, cli->cl_r_in_flight,
- cli->cl_w_in_flight);
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
-
- ptlrpcd_add_req(req);
- rc = 0;
-
-out:
- if (mem_tight != 0)
- cfs_memory_pressure_restore(mpflag);
-
- if (rc != 0) {
- LASSERT(!req);
-
- if (oa)
- kmem_cache_free(obdo_cachep, oa);
- kfree(pga);
- /* this should happen rarely and is pretty bad, it makes the
- * pending list not follow the dirty order
- */
- while (!list_empty(ext_list)) {
- ext = list_entry(ext_list->next, struct osc_extent,
- oe_link);
- list_del_init(&ext->oe_link);
- osc_extent_finish(env, ext, 0, rc);
- }
- }
- return rc;
-}
-
-static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
-{
- int set = 0;
-
- LASSERT(lock);
-
- lock_res_and_lock(lock);
-
- if (!lock->l_ast_data)
- lock->l_ast_data = data;
- if (lock->l_ast_data == data)
- set = 1;
-
- unlock_res_and_lock(lock);
-
- return set;
-}
-
-static int osc_enqueue_fini(struct ptlrpc_request *req,
- osc_enqueue_upcall_f upcall, void *cookie,
- struct lustre_handle *lockh, enum ldlm_mode mode,
- __u64 *flags, int agl, int errcode)
-{
- bool intent = *flags & LDLM_FL_HAS_INTENT;
- int rc;
-
- /* The request was created before ldlm_cli_enqueue call. */
- if (intent && errcode == ELDLM_LOCK_ABORTED) {
- struct ldlm_reply *rep;
-
- rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
-
- rep->lock_policy_res1 =
- ptlrpc_status_ntoh(rep->lock_policy_res1);
- if (rep->lock_policy_res1)
- errcode = rep->lock_policy_res1;
- if (!agl)
- *flags |= LDLM_FL_LVB_READY;
- } else if (errcode == ELDLM_OK) {
- *flags |= LDLM_FL_LVB_READY;
- }
-
- /* Call the update callback. */
- rc = (*upcall)(cookie, lockh, errcode);
- /* release the reference taken in ldlm_cli_enqueue() */
- if (errcode == ELDLM_LOCK_MATCHED)
- errcode = ELDLM_OK;
- if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
- ldlm_lock_decref(lockh, mode);
-
- return rc;
-}
-
-static int osc_enqueue_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_enqueue_args *aa, int rc)
-{
- struct ldlm_lock *lock;
- struct lustre_handle *lockh = &aa->oa_lockh;
- enum ldlm_mode mode = aa->oa_mode;
- struct ost_lvb *lvb = aa->oa_lvb;
- __u32 lvb_len = sizeof(*lvb);
- __u64 flags = 0;
-
-
- /* ldlm_cli_enqueue is holding a reference on the lock, so it must
- * be valid.
- */
- lock = ldlm_handle2lock(lockh);
- LASSERTF(lock, "lockh %llx, req %p, aa %p - client evicted?\n",
- lockh->cookie, req, aa);
-
- /* Take an additional reference so that a blocking AST that
- * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
- * to arrive after an upcall has been executed by
- * osc_enqueue_fini().
- */
- ldlm_lock_addref(lockh, mode);
-
- /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
-
- /* Let CP AST to grant the lock first. */
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
-
- if (aa->oa_agl) {
- LASSERT(!aa->oa_lvb);
- LASSERT(!aa->oa_flags);
- aa->oa_flags = &flags;
- }
-
- /* Complete obtaining the lock procedure. */
- rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1,
- aa->oa_mode, aa->oa_flags, lvb, lvb_len,
- lockh, rc);
- /* Complete osc stuff. */
- rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
- aa->oa_flags, aa->oa_agl, rc);
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
-
- ldlm_lock_decref(lockh, mode);
- LDLM_LOCK_PUT(lock);
- return rc;
-}
-
-struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
-
-/* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
- * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
- * other synchronous requests, however keeping some locks and trying to obtain
- * others may take a considerable amount of time in a case of ost failure; and
- * when other sync requests do not get released lock from a client, the client
- * is evicted from the cluster -- such scenaries make the life difficult, so
- * release locks just after they are obtained.
- */
-int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u64 *flags, union ldlm_policy_data *policy,
- struct ost_lvb *lvb, int kms_valid,
- osc_enqueue_upcall_f upcall, void *cookie,
- struct ldlm_enqueue_info *einfo,
- struct ptlrpc_request_set *rqset, int async, int agl)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lustre_handle lockh = { 0 };
- struct ptlrpc_request *req = NULL;
- int intent = *flags & LDLM_FL_HAS_INTENT;
- __u64 match_flags = *flags;
- enum ldlm_mode mode;
- int rc;
-
- /* Filesystem lock extents are extended to page boundaries so that
- * dealing with the page cache is a little smoother.
- */
- policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
- policy->l_extent.end |= ~PAGE_MASK;
-
- /*
- * kms is not valid when either object is completely fresh (so that no
- * locks are cached), or object was evicted. In the latter case cached
- * lock cannot be used, because it would prime inode state with
- * potentially stale LVB.
- */
- if (!kms_valid)
- goto no_match;
-
- /* Next, search for already existing extent locks that will cover us */
- /* If we're trying to read, we also search for an existing PW lock. The
- * VFS and page cache already protect us locally, so lots of readers/
- * writers can share a single PW lock.
- *
- * There are problems with conversion deadlocks, so instead of
- * converting a read lock to a write lock, we'll just enqueue a new
- * one.
- *
- * At some point we should cancel the read lock instead of making them
- * send us a blocking callback, but there are problems with canceling
- * locks out from other users right now, too.
- */
- mode = einfo->ei_mode;
- if (einfo->ei_mode == LCK_PR)
- mode |= LCK_PW;
- if (agl == 0)
- match_flags |= LDLM_FL_LVB_READY;
- if (intent != 0)
- match_flags |= LDLM_FL_BLOCK_GRANTED;
- mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
- einfo->ei_type, policy, mode, &lockh, 0);
- if (mode) {
- struct ldlm_lock *matched;
-
- if (*flags & LDLM_FL_TEST_LOCK)
- return ELDLM_OK;
-
- matched = ldlm_handle2lock(&lockh);
- if (agl) {
- /* AGL enqueues DLM locks speculatively. Therefore if
- * it already exists a DLM lock, it wll just inform the
- * caller to cancel the AGL process for this stripe.
- */
- ldlm_lock_decref(&lockh, mode);
- LDLM_LOCK_PUT(matched);
- return -ECANCELED;
- } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
- *flags |= LDLM_FL_LVB_READY;
- /* We already have a lock, and it's referenced. */
- (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
-
- ldlm_lock_decref(&lockh, mode);
- LDLM_LOCK_PUT(matched);
- return ELDLM_OK;
- } else {
- ldlm_lock_decref(&lockh, mode);
- LDLM_LOCK_PUT(matched);
- }
- }
-
-no_match:
- if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
- return -ENOLCK;
- if (intent) {
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_ENQUEUE_LVB);
- if (!req)
- return -ENOMEM;
-
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- sizeof(*lvb));
- ptlrpc_request_set_replen(req);
- }
-
- /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
- *flags &= ~LDLM_FL_BLOCK_GRANTED;
-
- rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
- sizeof(*lvb), LVB_T_OST, &lockh, async);
- if (async) {
- if (!rc) {
- struct osc_enqueue_args *aa;
-
- BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->oa_exp = exp;
- aa->oa_mode = einfo->ei_mode;
- aa->oa_type = einfo->ei_type;
- lustre_handle_copy(&aa->oa_lockh, &lockh);
- aa->oa_upcall = upcall;
- aa->oa_cookie = cookie;
- aa->oa_agl = !!agl;
- if (!agl) {
- aa->oa_flags = flags;
- aa->oa_lvb = lvb;
- } else {
- /* AGL is essentially to enqueue an DLM lock
- * in advance, so we don't care about the
- * result of AGL enqueue.
- */
- aa->oa_lvb = NULL;
- aa->oa_flags = NULL;
- }
-
- req->rq_interpret_reply =
- (ptlrpc_interpterer_t)osc_enqueue_interpret;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
- } else if (intent) {
- ptlrpc_req_finished(req);
- }
- return rc;
- }
-
- rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
- flags, agl, rc);
- if (intent)
- ptlrpc_req_finished(req);
-
- return rc;
-}
-
-int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- enum ldlm_type type, union ldlm_policy_data *policy,
- enum ldlm_mode mode, __u64 *flags, void *data,
- struct lustre_handle *lockh, int unref)
-{
- struct obd_device *obd = exp->exp_obd;
- __u64 lflags = *flags;
- enum ldlm_mode rc;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
- return -EIO;
-
- /* Filesystem lock extents are extended to page boundaries so that
- * dealing with the page cache is a little smoother
- */
- policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
- policy->l_extent.end |= ~PAGE_MASK;
-
- /* Next, search for already existing extent locks that will cover us */
- /* If we're trying to read, we also search for an existing PW lock. The
- * VFS and page cache already protect us locally, so lots of readers/
- * writers can share a single PW lock.
- */
- rc = mode;
- if (mode == LCK_PR)
- rc |= LCK_PW;
- rc = ldlm_lock_match(obd->obd_namespace, lflags,
- res_id, type, policy, rc, lockh, unref);
- if (!rc || lflags & LDLM_FL_TEST_LOCK)
- return rc;
-
- if (data) {
- struct ldlm_lock *lock = ldlm_handle2lock(lockh);
-
- LASSERT(lock);
- if (!osc_set_lock_data(lock, data)) {
- ldlm_lock_decref(lockh, rc);
- rc = 0;
- }
- LDLM_LOCK_PUT(lock);
- }
- return rc;
-}
-
-static int osc_statfs_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_async_args *aa, int rc)
-{
- struct obd_statfs *msfs;
-
- if (rc == -EBADR)
- /* The request has in fact never been sent
- * due to issues at a higher level (LOV).
- * Exit immediately since the caller is
- * aware of the problem and takes care
- * of the clean up
- */
- return rc;
-
- if ((rc == -ENOTCONN || rc == -EAGAIN) &&
- (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) {
- rc = 0;
- goto out;
- }
-
- if (rc != 0)
- goto out;
-
- msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
- if (!msfs) {
- rc = -EPROTO;
- goto out;
- }
-
- *aa->aa_oi->oi_osfs = *msfs;
-out:
- rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
- return rc;
-}
-
-static int osc_statfs_async(struct obd_export *exp,
- struct obd_info *oinfo, __u64 max_age,
- struct ptlrpc_request_set *rqset)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req;
- struct osc_async_args *aa;
- int rc;
-
- /* We could possibly pass max_age in the request (as an absolute
- * timestamp or a "seconds.usec ago") so the target can avoid doing
- * extra calls into the filesystem if that isn't necessary (e.g.
- * during mount that would help a bit). Having relative timestamps
- * is not so great if request processing is slow, while absolute
- * timestamps are not ideal because they need time synchronization.
- */
- req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- ptlrpc_request_set_replen(req);
- req->rq_request_portal = OST_CREATE_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
- /* procfs requests not want stat in wait for avoid deadlock */
- req->rq_no_resend = 1;
- req->rq_no_delay = 1;
- }
-
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
- BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->aa_oi = oinfo;
-
- ptlrpc_set_add_req(rqset, req);
- return 0;
-}
-
-static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age, __u32 flags)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct obd_statfs *msfs;
- struct ptlrpc_request *req;
- struct obd_import *imp = NULL;
- int rc;
-
- /* Since the request might also come from lprocfs, so we need
- * sync this with client_disconnect_export Bug15684
- */
- down_read(&obd->u.cli.cl_sem);
- if (obd->u.cli.cl_import)
- imp = class_import_get(obd->u.cli.cl_import);
- up_read(&obd->u.cli.cl_sem);
- if (!imp)
- return -ENODEV;
-
- /* We could possibly pass max_age in the request (as an absolute
- * timestamp or a "seconds.usec ago") so the target can avoid doing
- * extra calls into the filesystem if that isn't necessary (e.g.
- * during mount that would help a bit). Having relative timestamps
- * is not so great if request processing is slow, while absolute
- * timestamps are not ideal because they need time synchronization.
- */
- req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
-
- class_import_put(imp);
-
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- ptlrpc_request_set_replen(req);
- req->rq_request_portal = OST_CREATE_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- if (flags & OBD_STATFS_NODELAY) {
- /* procfs requests not want stat in wait for avoid deadlock */
- req->rq_no_resend = 1;
- req->rq_no_delay = 1;
- }
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
- if (!msfs) {
- rc = -EPROTO;
- goto out;
- }
-
- *osfs = *msfs;
-
- out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void __user *uarg)
-{
- struct obd_device *obd = exp->exp_obd;
- struct obd_ioctl_data *data = karg;
- int err = 0;
-
- if (!try_module_get(THIS_MODULE)) {
- CERROR("%s: cannot get module '%s'\n", obd->obd_name,
- module_name(THIS_MODULE));
- return -EINVAL;
- }
- switch (cmd) {
- case OBD_IOC_CLIENT_RECOVER:
- err = ptlrpc_recover_import(obd->u.cli.cl_import,
- data->ioc_inlbuf1, 0);
- if (err > 0)
- err = 0;
- goto out;
- case IOC_OSC_SET_ACTIVE:
- err = ptlrpc_set_import_active(obd->u.cli.cl_import,
- data->ioc_offset);
- goto out;
- case OBD_IOC_PING_TARGET:
- err = ptlrpc_obd_ping(obd);
- goto out;
- default:
- CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
- cmd, current_comm());
- err = -ENOTTY;
- goto out;
- }
-out:
- module_put(THIS_MODULE);
- return err;
-}
-
-static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req;
- struct obd_device *obd = exp->exp_obd;
- struct obd_import *imp = class_exp2cliimp(exp);
- char *tmp;
- int rc;
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
-
- if (KEY_IS(KEY_CHECKSUM)) {
- if (vallen != sizeof(int))
- return -EINVAL;
- exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
- return 0;
- }
-
- if (KEY_IS(KEY_SPTLRPC_CONF)) {
- sptlrpc_conf_client_adapt(obd);
- return 0;
- }
-
- if (KEY_IS(KEY_FLUSH_CTX)) {
- sptlrpc_import_flush_my_ctx(imp);
- return 0;
- }
-
- if (KEY_IS(KEY_CACHE_SET)) {
- struct client_obd *cli = &obd->u.cli;
-
- LASSERT(!cli->cl_cache); /* only once */
- cli->cl_cache = val;
- cl_cache_incref(cli->cl_cache);
- cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
-
- /* add this osc into entity list */
- LASSERT(list_empty(&cli->cl_lru_osc));
- spin_lock(&cli->cl_cache->ccc_lru_lock);
- list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
- spin_unlock(&cli->cl_cache->ccc_lru_lock);
-
- return 0;
- }
-
- if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
- struct client_obd *cli = &obd->u.cli;
- long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
- long target = *(long *)val;
-
- nr = osc_lru_shrink(env, cli, min(nr, target), true);
- *(long *)val -= nr;
- return 0;
- }
-
- if (!set && !KEY_IS(KEY_GRANT_SHRINK))
- return -EINVAL;
-
- /* We pass all other commands directly to OST. Since nobody calls osc
- * methods directly and everybody is supposed to go through LOV, we
- * assume lov checked invalid values for us.
- * The only recognised values so far are evict_by_nid and mds_conn.
- * Even if something bad goes through, we'd get a -EINVAL from OST
- * anyway.
- */
-
- req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
- &RQF_OST_SET_GRANT_INFO :
- &RQF_OBD_SET_INFO);
- if (!req)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
- RCL_CLIENT, keylen);
- if (!KEY_IS(KEY_GRANT_SHRINK))
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
- RCL_CLIENT, vallen);
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
- memcpy(tmp, key, keylen);
- tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
- &RMF_OST_BODY :
- &RMF_SETINFO_VAL);
- memcpy(tmp, val, vallen);
-
- if (KEY_IS(KEY_GRANT_SHRINK)) {
- struct osc_brw_async_args *aa;
- struct obdo *oa;
-
- BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!oa) {
- ptlrpc_req_finished(req);
- return -ENOMEM;
- }
- *oa = ((struct ost_body *)val)->oa;
- aa->aa_oa = oa;
- req->rq_interpret_reply = osc_shrink_grant_interpret;
- }
-
- ptlrpc_request_set_replen(req);
- if (!KEY_IS(KEY_GRANT_SHRINK)) {
- LASSERT(set);
- ptlrpc_set_add_req(set, req);
- ptlrpc_check_set(NULL, set);
- } else {
- ptlrpcd_add_req(req);
- }
-
- return 0;
-}
-
-static int osc_reconnect(const struct lu_env *env,
- struct obd_export *exp, struct obd_device *obd,
- struct obd_uuid *cluuid,
- struct obd_connect_data *data,
- void *localdata)
-{
- struct client_obd *cli = &obd->u.cli;
-
- if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
- long lost_grant;
-
- spin_lock(&cli->cl_loi_list_lock);
- data->ocd_grant = (cli->cl_avail_grant +
- (cli->cl_dirty_pages << PAGE_SHIFT)) ?:
- 2 * cli_brw_size(obd);
- lost_grant = cli->cl_lost_grant;
- cli->cl_lost_grant = 0;
- spin_unlock(&cli->cl_loi_list_lock);
-
- CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n",
- data->ocd_connect_flags,
- data->ocd_version, data->ocd_grant, lost_grant);
- }
-
- return 0;
-}
-
-static int osc_disconnect(struct obd_export *exp)
-{
- struct obd_device *obd = class_exp2obd(exp);
- int rc;
-
- rc = client_disconnect_export(exp);
- /**
- * Initially we put del_shrink_grant before disconnect_export, but it
- * causes the following problem if setup (connect) and cleanup
- * (disconnect) are tangled together.
- * connect p1 disconnect p2
- * ptlrpc_connect_import
- * ............... class_manual_cleanup
- * osc_disconnect
- * del_shrink_grant
- * ptlrpc_connect_interrupt
- * init_grant_shrink
- * add this client to shrink list
- * cleanup_osc
- * Bang! pinger trigger the shrink.
- * So the osc should be disconnected from the shrink list, after we
- * are sure the import has been destroyed. BUG18662
- */
- if (!obd->u.cli.cl_import)
- osc_del_shrink_grant(&obd->u.cli);
- return rc;
-}
-
-static int osc_ldlm_resource_invalidate(struct cfs_hash *hs,
- struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
- struct osc_object *osc = NULL;
- struct lu_env *env = arg;
- struct ldlm_lock *lock;
-
- lock_res(res);
- list_for_each_entry(lock, &res->lr_granted, l_res_link) {
- if (lock->l_ast_data && !osc) {
- osc = lock->l_ast_data;
- cl_object_get(osc2cl(osc));
- }
-
- /*
- * clear LDLM_FL_CLEANED flag to make sure it will be canceled
- * by the 2nd round of ldlm_namespace_clean() call in
- * osc_import_event().
- */
- ldlm_clear_cleaned(lock);
- }
- unlock_res(res);
-
- if (osc) {
- osc_object_invalidate(env, osc);
- cl_object_put(env, osc2cl(osc));
- }
-
- return 0;
-}
-
-static int osc_import_event(struct obd_device *obd,
- struct obd_import *imp,
- enum obd_import_event event)
-{
- struct client_obd *cli;
- int rc = 0;
-
- LASSERT(imp->imp_obd == obd);
-
- switch (event) {
- case IMP_EVENT_DISCON: {
- cli = &obd->u.cli;
- spin_lock(&cli->cl_loi_list_lock);
- cli->cl_avail_grant = 0;
- cli->cl_lost_grant = 0;
- spin_unlock(&cli->cl_loi_list_lock);
- break;
- }
- case IMP_EVENT_INACTIVE: {
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
- break;
- }
- case IMP_EVENT_INVALIDATE: {
- struct ldlm_namespace *ns = obd->obd_namespace;
- struct lu_env *env;
- u16 refcheck;
-
- ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
-
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- osc_io_unplug(env, &obd->u.cli, NULL);
-
- cfs_hash_for_each_nolock(ns->ns_rs_hash,
- osc_ldlm_resource_invalidate,
- env, 0);
- cl_env_put(env, &refcheck);
-
- ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
- } else {
- rc = PTR_ERR(env);
- }
- break;
- }
- case IMP_EVENT_ACTIVE: {
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
- break;
- }
- case IMP_EVENT_OCD: {
- struct obd_connect_data *ocd = &imp->imp_connect_data;
-
- if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
- osc_init_grant(&obd->u.cli, ocd);
-
- /* See bug 7198 */
- if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
- imp->imp_client->cli_request_portal = OST_REQUEST_PORTAL;
-
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
- break;
- }
- case IMP_EVENT_DEACTIVATE: {
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
- break;
- }
- case IMP_EVENT_ACTIVATE: {
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
- break;
- }
- default:
- CERROR("Unknown import event %d\n", event);
- LBUG();
- }
- return rc;
-}
-
-/**
- * Determine whether the lock can be canceled before replaying the lock
- * during recovery, see bug16774 for detailed information.
- *
- * \retval zero the lock can't be canceled
- * \retval other ok to cancel
- */
-static int osc_cancel_weight(struct ldlm_lock *lock)
-{
- /*
- * Cancel all unused and granted extent lock.
- */
- if (lock->l_resource->lr_type == LDLM_EXTENT &&
- lock->l_granted_mode == lock->l_req_mode &&
- osc_ldlm_weigh_ast(lock) == 0)
- return 1;
-
- return 0;
-}
-
-static int brw_queue_work(const struct lu_env *env, void *data)
-{
- struct client_obd *cli = data;
-
- CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
-
- osc_io_unplug(env, cli, NULL);
- return 0;
-}
-
-int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct lprocfs_static_vars lvars = { NULL };
- struct client_obd *cli = &obd->u.cli;
- void *handler;
- int rc;
- int adding;
- int added;
- int req_count;
-
- rc = ptlrpcd_addref();
- if (rc)
- return rc;
-
- rc = client_obd_setup(obd, lcfg);
- if (rc)
- goto out_ptlrpcd;
-
- handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
- if (IS_ERR(handler)) {
- rc = PTR_ERR(handler);
- goto out_client_setup;
- }
- cli->cl_writeback_work = handler;
-
- handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
- if (IS_ERR(handler)) {
- rc = PTR_ERR(handler);
- goto out_ptlrpcd_work;
- }
-
- cli->cl_lru_work = handler;
-
- rc = osc_quota_setup(obd);
- if (rc)
- goto out_ptlrpcd_work;
-
- cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
- lprocfs_osc_init_vars(&lvars);
- if (lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars) == 0) {
- lproc_osc_attach_seqstat(obd);
- sptlrpc_lprocfs_cliobd_attach(obd);
- ptlrpc_lprocfs_register_obd(obd);
- }
-
- /*
- * We try to control the total number of requests with a upper limit
- * osc_reqpool_maxreqcount. There might be some race which will cause
- * over-limit allocation, but it is fine.
- */
- req_count = atomic_read(&osc_pool_req_count);
- if (req_count < osc_reqpool_maxreqcount) {
- adding = cli->cl_max_rpcs_in_flight + 2;
- if (req_count + adding > osc_reqpool_maxreqcount)
- adding = osc_reqpool_maxreqcount - req_count;
-
- added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
- atomic_add(added, &osc_pool_req_count);
- }
-
- INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
- ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
-
- spin_lock(&osc_shrink_lock);
- list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
- spin_unlock(&osc_shrink_lock);
-
- return rc;
-
-out_ptlrpcd_work:
- if (cli->cl_writeback_work) {
- ptlrpcd_destroy_work(cli->cl_writeback_work);
- cli->cl_writeback_work = NULL;
- }
- if (cli->cl_lru_work) {
- ptlrpcd_destroy_work(cli->cl_lru_work);
- cli->cl_lru_work = NULL;
- }
-out_client_setup:
- client_obd_cleanup(obd);
-out_ptlrpcd:
- ptlrpcd_decref();
- return rc;
-}
-
-static int osc_precleanup(struct obd_device *obd)
-{
- struct client_obd *cli = &obd->u.cli;
-
- /* LU-464
- * for echo client, export may be on zombie list, wait for
- * zombie thread to cull it, because cli.cl_import will be
- * cleared in client_disconnect_export():
- * class_export_destroy() -> obd_cleanup() ->
- * echo_device_free() -> echo_client_cleanup() ->
- * obd_disconnect() -> osc_disconnect() ->
- * client_disconnect_export()
- */
- obd_zombie_barrier();
- if (cli->cl_writeback_work) {
- ptlrpcd_destroy_work(cli->cl_writeback_work);
- cli->cl_writeback_work = NULL;
- }
-
- if (cli->cl_lru_work) {
- ptlrpcd_destroy_work(cli->cl_lru_work);
- cli->cl_lru_work = NULL;
- }
-
- obd_cleanup_client_import(obd);
- ptlrpc_lprocfs_unregister_obd(obd);
- lprocfs_obd_cleanup(obd);
- return 0;
-}
-
-static int osc_cleanup(struct obd_device *obd)
-{
- struct client_obd *cli = &obd->u.cli;
- int rc;
-
- spin_lock(&osc_shrink_lock);
- list_del(&cli->cl_shrink_list);
- spin_unlock(&osc_shrink_lock);
-
- /* lru cleanup */
- if (cli->cl_cache) {
- LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
- spin_lock(&cli->cl_cache->ccc_lru_lock);
- list_del_init(&cli->cl_lru_osc);
- spin_unlock(&cli->cl_cache->ccc_lru_lock);
- cli->cl_lru_left = NULL;
- cl_cache_decref(cli->cl_cache);
- cli->cl_cache = NULL;
- }
-
- /* free memory of osc quota cache */
- osc_quota_cleanup(obd);
-
- rc = client_obd_cleanup(obd);
-
- ptlrpcd_decref();
- return rc;
-}
-
-int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct lprocfs_static_vars lvars = { NULL };
- int rc = 0;
-
- lprocfs_osc_init_vars(&lvars);
-
- switch (lcfg->lcfg_command) {
- default:
- rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
- lcfg, obd);
- if (rc > 0)
- rc = 0;
- break;
- }
-
- return rc;
-}
-
-static int osc_process_config(struct obd_device *obd, u32 len, void *buf)
-{
- return osc_process_config_base(obd, buf);
-}
-
-static struct obd_ops osc_obd_ops = {
- .owner = THIS_MODULE,
- .setup = osc_setup,
- .precleanup = osc_precleanup,
- .cleanup = osc_cleanup,
- .add_conn = client_import_add_conn,
- .del_conn = client_import_del_conn,
- .connect = client_connect_import,
- .reconnect = osc_reconnect,
- .disconnect = osc_disconnect,
- .statfs = osc_statfs,
- .statfs_async = osc_statfs_async,
- .create = osc_create,
- .destroy = osc_destroy,
- .getattr = osc_getattr,
- .setattr = osc_setattr,
- .iocontrol = osc_iocontrol,
- .set_info_async = osc_set_info_async,
- .import_event = osc_import_event,
- .process_config = osc_process_config,
- .quotactl = osc_quotactl,
-};
-
-struct list_head osc_shrink_list = LIST_HEAD_INIT(osc_shrink_list);
-DEFINE_SPINLOCK(osc_shrink_lock);
-
-static struct shrinker osc_cache_shrinker = {
- .count_objects = osc_cache_shrink_count,
- .scan_objects = osc_cache_shrink_scan,
- .seeks = DEFAULT_SEEKS,
-};
-
-static int __init osc_init(void)
-{
- struct lprocfs_static_vars lvars = { NULL };
- unsigned int reqpool_size;
- unsigned int reqsize;
- int rc;
-
- /* print an address of _any_ initialized kernel symbol from this
- * module, to allow debugging with gdb that doesn't support data
- * symbols from modules.
- */
- CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
-
- rc = lu_kmem_init(osc_caches);
- if (rc)
- return rc;
-
- lprocfs_osc_init_vars(&lvars);
-
- rc = class_register_type(&osc_obd_ops, NULL,
- LUSTRE_OSC_NAME, &osc_device_type);
- if (rc)
- goto out_kmem;
-
- rc = register_shrinker(&osc_cache_shrinker);
- if (rc)
- goto out_type;
-
- /* This is obviously too much memory, only prevent overflow here */
- if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) {
- rc = -EINVAL;
- goto out_type;
- }
-
- reqpool_size = osc_reqpool_mem_max << 20;
-
- reqsize = 1;
- while (reqsize < OST_MAXREQSIZE)
- reqsize = reqsize << 1;
-
- /*
- * We don't enlarge the request count in OSC pool according to
- * cl_max_rpcs_in_flight. The allocation from the pool will only be
- * tried after normal allocation failed. So a small OSC pool won't
- * cause much performance degression in most of cases.
- */
- osc_reqpool_maxreqcount = reqpool_size / reqsize;
-
- atomic_set(&osc_pool_req_count, 0);
- osc_rq_pool = ptlrpc_init_rq_pool(0, OST_MAXREQSIZE,
- ptlrpc_add_rqs_to_pool);
-
- if (osc_rq_pool)
- return 0;
-
- rc = -ENOMEM;
-
-out_type:
- class_unregister_type(LUSTRE_OSC_NAME);
-out_kmem:
- lu_kmem_fini(osc_caches);
- return rc;
-}
-
-static void /*__exit*/ osc_exit(void)
-{
- unregister_shrinker(&osc_cache_shrinker);
- class_unregister_type(LUSTRE_OSC_NAME);
- lu_kmem_fini(osc_caches);
- ptlrpc_free_rq_pool(osc_rq_pool);
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-
-module_init(osc_init);
-module_exit(osc_exit);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/Makefile b/drivers/staging/lustre/lustre/ptlrpc/Makefile
deleted file mode 100644
index 1deb1971b39e..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
-subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
-
-obj-$(CONFIG_LUSTRE_FS) += ptlrpc.o
-LDLM := ../../lustre/ldlm/
-
-ldlm_objs := $(LDLM)l_lock.o $(LDLM)ldlm_lock.o
-ldlm_objs += $(LDLM)ldlm_resource.o $(LDLM)ldlm_lib.o
-ldlm_objs += $(LDLM)ldlm_plain.o $(LDLM)ldlm_extent.o
-ldlm_objs += $(LDLM)ldlm_request.o $(LDLM)ldlm_lockd.o
-ldlm_objs += $(LDLM)ldlm_flock.o $(LDLM)ldlm_inodebits.o
-ldlm_objs += $(LDLM)ldlm_pool.o
-ldlm_objs += $(LDLM)interval_tree.o
-ptlrpc_objs := client.o recover.o connection.o niobuf.o pack_generic.o
-ptlrpc_objs += events.o ptlrpc_module.o service.o pinger.o
-ptlrpc_objs += llog_net.o llog_client.o import.o ptlrpcd.o
-ptlrpc_objs += pers.o lproc_ptlrpc.o wiretest.o layout.o
-ptlrpc_objs += sec.o sec_bulk.o sec_gc.o sec_config.o
-ptlrpc_objs += sec_null.o sec_plain.o nrs.o nrs_fifo.o
-
-ptlrpc-y := $(ldlm_objs) $(ptlrpc_objs) sec_lproc.o
-ptlrpc-$(CONFIG_LUSTRE_TRANSLATE_ERRNOS) += errno.o
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
deleted file mode 100644
index ca096fadb9c0..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ /dev/null
@@ -1,3269 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/** Implementation of client-side PortalRPC interfaces */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_lib.h>
-#include <lustre_ha.h>
-#include <lustre_import.h>
-#include <lustre_req_layout.h>
-
-#include "ptlrpc_internal.h"
-
-const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
- .add_kiov_frag = ptlrpc_prep_bulk_page_pin,
- .release_frags = ptlrpc_release_bulk_page_pin,
-};
-EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
-
-const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
- .add_kiov_frag = ptlrpc_prep_bulk_page_nopin,
- .release_frags = NULL,
-};
-EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
-
-static int ptlrpc_send_new_req(struct ptlrpc_request *req);
-static int ptlrpcd_check_work(struct ptlrpc_request *req);
-static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
-
-/**
- * Initialize passed in client structure \a cl.
- */
-void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
- struct ptlrpc_client *cl)
-{
- cl->cli_request_portal = req_portal;
- cl->cli_reply_portal = rep_portal;
- cl->cli_name = name;
-}
-EXPORT_SYMBOL(ptlrpc_init_client);
-
-/**
- * Return PortalRPC connection for remote uud \a uuid
- */
-struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
-{
- struct ptlrpc_connection *c;
- lnet_nid_t self;
- struct lnet_process_id peer;
- int err;
-
- /*
- * ptlrpc_uuid_to_peer() initializes its 2nd parameter
- * before accessing its values.
- * coverity[uninit_use_in_call]
- */
- err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
- if (err != 0) {
- CNETERR("cannot find peer %s!\n", uuid->uuid);
- return NULL;
- }
-
- c = ptlrpc_connection_get(peer, self, uuid);
- if (c) {
- memcpy(c->c_remote_uuid.uuid,
- uuid->uuid, sizeof(c->c_remote_uuid.uuid));
- }
-
- CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
-
- return c;
-}
-
-/**
- * Allocate and initialize new bulk descriptor on the sender.
- * Returns pointer to the descriptor or NULL on error.
- */
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
- unsigned int max_brw,
- enum ptlrpc_bulk_op_type type,
- unsigned int portal,
- const struct ptlrpc_bulk_frag_ops *ops)
-{
- struct ptlrpc_bulk_desc *desc;
- int i;
-
- /* ensure that only one of KIOV or IOVEC is set but not both */
- LASSERT((ptlrpc_is_bulk_desc_kiov(type) && ops->add_kiov_frag) ||
- (ptlrpc_is_bulk_desc_kvec(type) && ops->add_iov_frag));
-
- desc = kzalloc(sizeof(*desc), GFP_NOFS);
- if (!desc)
- return NULL;
-
- if (type & PTLRPC_BULK_BUF_KIOV) {
- GET_KIOV(desc) = kcalloc(nfrags, sizeof(*GET_KIOV(desc)),
- GFP_NOFS);
- if (!GET_KIOV(desc))
- goto free_desc;
- } else {
- GET_KVEC(desc) = kcalloc(nfrags, sizeof(*GET_KVEC(desc)),
- GFP_NOFS);
- if (!GET_KVEC(desc))
- goto free_desc;
- }
-
- spin_lock_init(&desc->bd_lock);
- init_waitqueue_head(&desc->bd_waitq);
- desc->bd_max_iov = nfrags;
- desc->bd_iov_count = 0;
- desc->bd_portal = portal;
- desc->bd_type = type;
- desc->bd_md_count = 0;
- desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *)ops;
- LASSERT(max_brw > 0);
- desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
- /*
- * PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
- * node. Negotiated ocd_brw_size will always be <= this number.
- */
- for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
- LNetInvalidateMDHandle(&desc->bd_mds[i]);
-
- return desc;
-free_desc:
- kfree(desc);
- return NULL;
-}
-
-/**
- * Prepare bulk descriptor for specified outgoing request \a req that
- * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
- * the bulk to be sent. Used on client-side.
- * Returns pointer to newly allocated initialized bulk descriptor or NULL on
- * error.
- */
-struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
- unsigned int nfrags,
- unsigned int max_brw,
- unsigned int type,
- unsigned int portal,
- const struct ptlrpc_bulk_frag_ops *ops)
-{
- struct obd_import *imp = req->rq_import;
- struct ptlrpc_bulk_desc *desc;
-
- LASSERT(ptlrpc_is_bulk_op_passive(type));
-
- desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
- if (!desc)
- return NULL;
-
- desc->bd_import_generation = req->rq_import_generation;
- desc->bd_import = class_import_get(imp);
- desc->bd_req = req;
-
- desc->bd_cbid.cbid_fn = client_bulk_callback;
- desc->bd_cbid.cbid_arg = desc;
-
- /* This makes req own desc, and free it when she frees herself */
- req->rq_bulk = desc;
-
- return desc;
-}
-EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
-
-void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset, int len, int pin)
-{
- struct bio_vec *kiov;
-
- LASSERT(desc->bd_iov_count < desc->bd_max_iov);
- LASSERT(page);
- LASSERT(pageoffset >= 0);
- LASSERT(len > 0);
- LASSERT(pageoffset + len <= PAGE_SIZE);
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
-
- kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
-
- desc->bd_nob += len;
-
- if (pin)
- get_page(page);
-
- kiov->bv_page = page;
- kiov->bv_offset = pageoffset;
- kiov->bv_len = len;
-
- desc->bd_iov_count++;
-}
-EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
-
-int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
- void *frag, int len)
-{
- struct kvec *iovec;
-
- LASSERT(desc->bd_iov_count < desc->bd_max_iov);
- LASSERT(frag);
- LASSERT(len > 0);
- LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type));
-
- iovec = &BD_GET_KVEC(desc, desc->bd_iov_count);
-
- desc->bd_nob += len;
-
- iovec->iov_base = frag;
- iovec->iov_len = len;
-
- desc->bd_iov_count++;
-
- return desc->bd_nob;
-}
-EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
-
-void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
-{
- LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
- LASSERT(desc->bd_md_count == 0); /* network hands off */
- LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
- LASSERT(desc->bd_frag_ops);
-
- if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
- sptlrpc_enc_pool_put_pages(desc);
-
- if (desc->bd_export)
- class_export_put(desc->bd_export);
- else
- class_import_put(desc->bd_import);
-
- if (desc->bd_frag_ops->release_frags)
- desc->bd_frag_ops->release_frags(desc);
-
- if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
- kfree(GET_KIOV(desc));
- else
- kfree(GET_KVEC(desc));
-
- kfree(desc);
-}
-EXPORT_SYMBOL(ptlrpc_free_bulk);
-
-/**
- * Set server timelimit for this req, i.e. how long are we willing to wait
- * for reply before timing out this request.
- */
-void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
-{
- __u32 serv_est;
- int idx;
- struct imp_at *at;
-
- LASSERT(req->rq_import);
-
- if (AT_OFF) {
- /*
- * non-AT settings
- *
- * \a imp_server_timeout means this is reverse import and
- * we send (currently only) ASTs to the client and cannot afford
- * to wait too long for the reply, otherwise the other client
- * (because of which we are sending this request) would
- * timeout waiting for us
- */
- req->rq_timeout = req->rq_import->imp_server_timeout ?
- obd_timeout / 2 : obd_timeout;
- } else {
- at = &req->rq_import->imp_at;
- idx = import_at_get_index(req->rq_import,
- req->rq_request_portal);
- serv_est = at_get(&at->iat_service_estimate[idx]);
- req->rq_timeout = at_est2timeout(serv_est);
- }
- /*
- * We could get even fancier here, using history to predict increased
- * loading...
- */
-
- /*
- * Let the server know what this RPC timeout is by putting it in the
- * reqmsg
- */
- lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
-}
-EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
-
-/* Adjust max service estimate based on server value */
-static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
- unsigned int serv_est)
-{
- int idx;
- unsigned int oldse;
- struct imp_at *at;
-
- LASSERT(req->rq_import);
- at = &req->rq_import->imp_at;
-
- idx = import_at_get_index(req->rq_import, req->rq_request_portal);
- /*
- * max service estimates are tracked on the server side,
- * so just keep minimal history here
- */
- oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
- if (oldse != 0)
- CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
- req->rq_import->imp_obd->obd_name, req->rq_request_portal,
- oldse, at_get(&at->iat_service_estimate[idx]));
-}
-
-/* Expected network latency per remote node (secs) */
-int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
-{
- return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency);
-}
-
-/* Adjust expected network latency */
-void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
- unsigned int service_time)
-{
- unsigned int nl, oldnl;
- struct imp_at *at;
- time64_t now = ktime_get_real_seconds();
-
- LASSERT(req->rq_import);
-
- if (service_time > now - req->rq_sent + 3) {
- /*
- * bz16408, however, this can also happen if early reply
- * is lost and client RPC is expired and resent, early reply
- * or reply of original RPC can still be fit in reply buffer
- * of resent RPC, now client is measuring time from the
- * resent time, but server sent back service time of original
- * RPC.
- */
- CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ?
- D_ADAPTTO : D_WARNING,
- "Reported service time %u > total measured time %lld\n",
- service_time, now - req->rq_sent);
- return;
- }
-
- /* Network latency is total time less server processing time */
- nl = max_t(int, now - req->rq_sent -
- service_time, 0) + 1; /* st rounding */
- at = &req->rq_import->imp_at;
-
- oldnl = at_measured(&at->iat_net_latency, nl);
- if (oldnl != 0)
- CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) has changed from %d to %d\n",
- req->rq_import->imp_obd->obd_name,
- obd_uuid2str(
- &req->rq_import->imp_connection->c_remote_uuid),
- oldnl, at_get(&at->iat_net_latency));
-}
-
-static int unpack_reply(struct ptlrpc_request *req)
-{
- int rc;
-
- if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
- rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
- if (rc) {
- DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc);
- return -EPROTO;
- }
- }
-
- rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
- if (rc) {
- DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: %d", rc);
- return -EPROTO;
- }
- return 0;
-}
-
-/**
- * Handle an early reply message, called with the rq_lock held.
- * If anything goes wrong just ignore it - same as if it never happened
- */
-static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
- __must_hold(&req->rq_lock)
-{
- struct ptlrpc_request *early_req;
- time64_t olddl;
- int rc;
-
- req->rq_early = 0;
- spin_unlock(&req->rq_lock);
-
- rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
- if (rc) {
- spin_lock(&req->rq_lock);
- return rc;
- }
-
- rc = unpack_reply(early_req);
- if (rc) {
- sptlrpc_cli_finish_early_reply(early_req);
- spin_lock(&req->rq_lock);
- return rc;
- }
-
- /*
- * Use new timeout value just to adjust the local value for this
- * request, don't include it into at_history. It is unclear yet why
- * service time increased and should it be counted or skipped, e.g.
- * that can be recovery case or some error or server, the real reply
- * will add all new data if it is worth to add.
- */
- req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg);
- lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
-
- /* Network latency can be adjusted, it is pure network delays */
- ptlrpc_at_adj_net_latency(req,
- lustre_msg_get_service_time(early_req->rq_repmsg));
-
- sptlrpc_cli_finish_early_reply(early_req);
-
- spin_lock(&req->rq_lock);
- olddl = req->rq_deadline;
- /*
- * server assumes it now has rq_timeout from when the request
- * arrived, so the client should give it at least that long.
- * since we don't know the arrival time we'll use the original
- * sent time
- */
- req->rq_deadline = req->rq_sent + req->rq_timeout +
- ptlrpc_at_get_net_latency(req);
-
- DEBUG_REQ(D_ADAPTTO, req,
- "Early reply #%d, new deadline in %lds (%lds)",
- req->rq_early_count,
- (long)(req->rq_deadline - ktime_get_real_seconds()),
- (long)(req->rq_deadline - olddl));
-
- return rc;
-}
-
-static struct kmem_cache *request_cache;
-
-int ptlrpc_request_cache_init(void)
-{
- request_cache = kmem_cache_create("ptlrpc_cache",
- sizeof(struct ptlrpc_request),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- return !request_cache ? -ENOMEM : 0;
-}
-
-void ptlrpc_request_cache_fini(void)
-{
- kmem_cache_destroy(request_cache);
-}
-
-struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
-{
- struct ptlrpc_request *req;
-
- req = kmem_cache_zalloc(request_cache, flags);
- return req;
-}
-
-void ptlrpc_request_cache_free(struct ptlrpc_request *req)
-{
- kmem_cache_free(request_cache, req);
-}
-
-/**
- * Wind down request pool \a pool.
- * Frees all requests from the pool too
- */
-void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
-{
- struct ptlrpc_request *req;
-
- while ((req = list_first_entry_or_null(&pool->prp_req_list,
- struct ptlrpc_request, rq_list))) {
- list_del(&req->rq_list);
- LASSERT(req->rq_reqbuf);
- LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
- kvfree(req->rq_reqbuf);
- ptlrpc_request_cache_free(req);
- }
- kfree(pool);
-}
-EXPORT_SYMBOL(ptlrpc_free_rq_pool);
-
-/**
- * Allocates, initializes and adds \a num_rq requests to the pool \a pool
- */
-int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
-{
- int i;
- int size = 1;
-
- while (size < pool->prp_rq_size)
- size <<= 1;
-
- LASSERTF(list_empty(&pool->prp_req_list) ||
- size == pool->prp_rq_size,
- "Trying to change pool size with nonempty pool from %d to %d bytes\n",
- pool->prp_rq_size, size);
-
- spin_lock(&pool->prp_lock);
- pool->prp_rq_size = size;
- for (i = 0; i < num_rq; i++) {
- struct ptlrpc_request *req;
- struct lustre_msg *msg;
-
- spin_unlock(&pool->prp_lock);
- req = ptlrpc_request_cache_alloc(GFP_KERNEL);
- if (!req)
- return i;
- msg = kvzalloc(size, GFP_KERNEL);
- if (!msg) {
- ptlrpc_request_cache_free(req);
- return i;
- }
- req->rq_reqbuf = msg;
- req->rq_reqbuf_len = size;
- req->rq_pool = pool;
- spin_lock(&pool->prp_lock);
- list_add_tail(&req->rq_list, &pool->prp_req_list);
- }
- spin_unlock(&pool->prp_lock);
- return num_rq;
-}
-EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
-
-/**
- * Create and initialize new request pool with given attributes:
- * \a num_rq - initial number of requests to create for the pool
- * \a msgsize - maximum message size possible for requests in thid pool
- * \a populate_pool - function to be called when more requests need to be added
- * to the pool
- * Returns pointer to newly created pool or NULL on error.
- */
-struct ptlrpc_request_pool *
-ptlrpc_init_rq_pool(int num_rq, int msgsize,
- int (*populate_pool)(struct ptlrpc_request_pool *, int))
-{
- struct ptlrpc_request_pool *pool;
-
- pool = kzalloc(sizeof(struct ptlrpc_request_pool), GFP_NOFS);
- if (!pool)
- return NULL;
-
- /*
- * Request next power of two for the allocation, because internally
- * kernel would do exactly this
- */
-
- spin_lock_init(&pool->prp_lock);
- INIT_LIST_HEAD(&pool->prp_req_list);
- pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
- pool->prp_populate = populate_pool;
-
- populate_pool(pool, num_rq);
-
- return pool;
-}
-EXPORT_SYMBOL(ptlrpc_init_rq_pool);
-
-/**
- * Fetches one request from pool \a pool
- */
-static struct ptlrpc_request *
-ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
-{
- struct ptlrpc_request *request;
- struct lustre_msg *reqbuf;
-
- if (!pool)
- return NULL;
-
- spin_lock(&pool->prp_lock);
-
- /*
- * See if we have anything in a pool, and bail out if nothing,
- * in writeout path, where this matters, this is safe to do, because
- * nothing is lost in this case, and when some in-flight requests
- * complete, this code will be called again.
- */
- if (unlikely(list_empty(&pool->prp_req_list))) {
- spin_unlock(&pool->prp_lock);
- return NULL;
- }
-
- request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
- list_del_init(&request->rq_list);
- spin_unlock(&pool->prp_lock);
-
- LASSERT(request->rq_reqbuf);
- LASSERT(request->rq_pool);
-
- reqbuf = request->rq_reqbuf;
- memset(request, 0, sizeof(*request));
- request->rq_reqbuf = reqbuf;
- request->rq_reqbuf_len = pool->prp_rq_size;
- request->rq_pool = pool;
-
- return request;
-}
-
-/**
- * Returns freed \a request to pool.
- */
-static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
-{
- struct ptlrpc_request_pool *pool = request->rq_pool;
-
- spin_lock(&pool->prp_lock);
- LASSERT(list_empty(&request->rq_list));
- LASSERT(!request->rq_receiving_reply);
- list_add_tail(&request->rq_list, &pool->prp_req_list);
- spin_unlock(&pool->prp_lock);
-}
-
-void ptlrpc_add_unreplied(struct ptlrpc_request *req)
-{
- struct obd_import *imp = req->rq_import;
- struct ptlrpc_request *iter;
-
- assert_spin_locked(&imp->imp_lock);
- LASSERT(list_empty(&req->rq_unreplied_list));
-
- /* unreplied list is sorted by xid in ascending order */
- list_for_each_entry_reverse(iter, &imp->imp_unreplied_list, rq_unreplied_list) {
-
- LASSERT(req->rq_xid != iter->rq_xid);
- if (req->rq_xid < iter->rq_xid)
- continue;
- list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
- return;
- }
- list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
-}
-
-void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
-{
- req->rq_xid = ptlrpc_next_xid();
- ptlrpc_add_unreplied(req);
-}
-
-static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
-{
- spin_lock(&req->rq_import->imp_lock);
- ptlrpc_assign_next_xid_nolock(req);
- spin_unlock(&req->rq_import->imp_lock);
-}
-
-int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
- __u32 version, int opcode, char **bufs,
- struct ptlrpc_cli_ctx *ctx)
-{
- int count;
- struct obd_import *imp;
- __u32 *lengths;
- int rc;
-
- count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
- imp = request->rq_import;
- lengths = request->rq_pill.rc_area[RCL_CLIENT];
-
- if (unlikely(ctx)) {
- request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
- } else {
- rc = sptlrpc_req_get_ctx(request);
- if (rc)
- goto out_free;
- }
- sptlrpc_req_set_flavor(request, opcode);
-
- rc = lustre_pack_request(request, imp->imp_msg_magic, count,
- lengths, bufs);
- if (rc)
- goto out_ctx;
-
- lustre_msg_add_version(request->rq_reqmsg, version);
- request->rq_send_state = LUSTRE_IMP_FULL;
- request->rq_type = PTL_RPC_MSG_REQUEST;
-
- request->rq_req_cbid.cbid_fn = request_out_callback;
- request->rq_req_cbid.cbid_arg = request;
-
- request->rq_reply_cbid.cbid_fn = reply_in_callback;
- request->rq_reply_cbid.cbid_arg = request;
-
- request->rq_reply_deadline = 0;
- request->rq_bulk_deadline = 0;
- request->rq_req_deadline = 0;
- request->rq_phase = RQ_PHASE_NEW;
- request->rq_next_phase = RQ_PHASE_UNDEFINED;
-
- request->rq_request_portal = imp->imp_client->cli_request_portal;
- request->rq_reply_portal = imp->imp_client->cli_reply_portal;
-
- ptlrpc_at_set_req_timeout(request);
-
- lustre_msg_set_opc(request->rq_reqmsg, opcode);
- ptlrpc_assign_next_xid(request);
-
- /* Let's setup deadline for req/reply/bulk unlink for opcode. */
- if (cfs_fail_val == opcode) {
- time64_t *fail_t = NULL, *fail2_t = NULL;
-
- if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
- fail_t = &request->rq_bulk_deadline;
- } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
- fail_t = &request->rq_reply_deadline;
- } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) {
- fail_t = &request->rq_req_deadline;
- } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
- fail_t = &request->rq_reply_deadline;
- fail2_t = &request->rq_bulk_deadline;
- }
-
- if (fail_t) {
- *fail_t = ktime_get_real_seconds() + LONG_UNLINK;
-
- if (fail2_t)
- *fail2_t = ktime_get_real_seconds() +
- LONG_UNLINK;
-
- /* The RPC is infected, let the test change the
- * fail_loc
- */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(2 * HZ);
- set_current_state(TASK_RUNNING);
- }
- }
-
- return 0;
-
-out_ctx:
- LASSERT(!request->rq_pool);
- sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
-out_free:
- class_import_put(imp);
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
-
-/**
- * Pack request buffers for network transfer, performing necessary encryption
- * steps if necessary.
- */
-int ptlrpc_request_pack(struct ptlrpc_request *request,
- __u32 version, int opcode)
-{
- int rc;
-
- rc = ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
- if (rc)
- return rc;
-
- /*
- * For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
- * ptlrpc_body sent from server equal to local ptlrpc_body size, so we
- * have to send old ptlrpc_body to keep interoperability with these
- * clients.
- *
- * Only three kinds of server->client RPCs so far:
- * - LDLM_BL_CALLBACK
- * - LDLM_CP_CALLBACK
- * - LDLM_GL_CALLBACK
- *
- * XXX This should be removed whenever we drop the interoperability with
- * the these old clients.
- */
- if (opcode == LDLM_BL_CALLBACK || opcode == LDLM_CP_CALLBACK ||
- opcode == LDLM_GL_CALLBACK)
- req_capsule_shrink(&request->rq_pill, &RMF_PTLRPC_BODY,
- sizeof(struct ptlrpc_body_v2), RCL_CLIENT);
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_request_pack);
-
-/**
- * Helper function to allocate new request on import \a imp
- * and possibly using existing request from pool \a pool if provided.
- * Returns allocated request structure with import field filled or
- * NULL on error.
- */
-static inline
-struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
- struct ptlrpc_request_pool *pool)
-{
- struct ptlrpc_request *request;
-
- request = ptlrpc_request_cache_alloc(GFP_NOFS);
-
- if (!request && pool)
- request = ptlrpc_prep_req_from_pool(pool);
-
- if (request) {
- ptlrpc_cli_req_init(request);
-
- LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
- LASSERT(imp != LP_POISON);
- LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n",
- imp->imp_client);
- LASSERT(imp->imp_client != LP_POISON);
-
- request->rq_import = class_import_get(imp);
- } else {
- CERROR("request allocation out of memory\n");
- }
-
- return request;
-}
-
-/**
- * Helper function for creating a request.
- * Calls __ptlrpc_request_alloc to allocate new request structure and inits
- * buffer structures according to capsule template \a format.
- * Returns allocated request structure pointer or NULL on error.
- */
-static struct ptlrpc_request *
-ptlrpc_request_alloc_internal(struct obd_import *imp,
- struct ptlrpc_request_pool *pool,
- const struct req_format *format)
-{
- struct ptlrpc_request *request;
-
- request = __ptlrpc_request_alloc(imp, pool);
- if (!request)
- return NULL;
-
- req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
- req_capsule_set(&request->rq_pill, format);
- return request;
-}
-
-/**
- * Allocate new request structure for import \a imp and initialize its
- * buffer structure according to capsule template \a format.
- */
-struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
- const struct req_format *format)
-{
- return ptlrpc_request_alloc_internal(imp, NULL, format);
-}
-EXPORT_SYMBOL(ptlrpc_request_alloc);
-
-/**
- * Allocate new request structure for import \a imp from pool \a pool and
- * initialize its buffer structure according to capsule template \a format.
- */
-struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
- struct ptlrpc_request_pool *pool,
- const struct req_format *format)
-{
- return ptlrpc_request_alloc_internal(imp, pool, format);
-}
-EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
-
-/**
- * For requests not from pool, free memory of the request structure.
- * For requests obtained from a pool earlier, return request back to pool.
- */
-void ptlrpc_request_free(struct ptlrpc_request *request)
-{
- if (request->rq_pool)
- __ptlrpc_free_req_to_pool(request);
- else
- ptlrpc_request_cache_free(request);
-}
-EXPORT_SYMBOL(ptlrpc_request_free);
-
-/**
- * Allocate new request for operation \a opcode and immediately pack it for
- * network transfer.
- * Only used for simple requests like OBD_PING where the only important
- * part of the request is operation itself.
- * Returns allocated request or NULL on error.
- */
-struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
- const struct req_format *format,
- __u32 version, int opcode)
-{
- struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
- int rc;
-
- if (req) {
- rc = ptlrpc_request_pack(req, version, opcode);
- if (rc) {
- ptlrpc_request_free(req);
- req = NULL;
- }
- }
- return req;
-}
-EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
-
-/**
- * Allocate and initialize new request set structure on the current CPT.
- * Returns a pointer to the newly allocated set structure or NULL on error.
- */
-struct ptlrpc_request_set *ptlrpc_prep_set(void)
-{
- struct ptlrpc_request_set *set;
- int cpt;
-
- cpt = cfs_cpt_current(cfs_cpt_table, 0);
- set = kzalloc_node(sizeof(*set), GFP_NOFS,
- cfs_cpt_spread_node(cfs_cpt_table, cpt));
- if (!set)
- return NULL;
- atomic_set(&set->set_refcount, 1);
- INIT_LIST_HEAD(&set->set_requests);
- init_waitqueue_head(&set->set_waitq);
- atomic_set(&set->set_new_count, 0);
- atomic_set(&set->set_remaining, 0);
- spin_lock_init(&set->set_new_req_lock);
- INIT_LIST_HEAD(&set->set_new_requests);
- INIT_LIST_HEAD(&set->set_cblist);
- set->set_max_inflight = UINT_MAX;
- set->set_producer = NULL;
- set->set_producer_arg = NULL;
- set->set_rc = 0;
-
- return set;
-}
-EXPORT_SYMBOL(ptlrpc_prep_set);
-
-/**
- * Allocate and initialize new request set structure with flow control
- * extension. This extension allows to control the number of requests in-flight
- * for the whole set. A callback function to generate requests must be provided
- * and the request set will keep the number of requests sent over the wire to
- * @max_inflight.
- * Returns a pointer to the newly allocated set structure or NULL on error.
- */
-struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
- void *arg)
-
-{
- struct ptlrpc_request_set *set;
-
- set = ptlrpc_prep_set();
- if (!set)
- return NULL;
-
- set->set_max_inflight = max;
- set->set_producer = func;
- set->set_producer_arg = arg;
-
- return set;
-}
-
-/**
- * Wind down and free request set structure previously allocated with
- * ptlrpc_prep_set.
- * Ensures that all requests on the set have completed and removes
- * all requests from the request list in a set.
- * If any unsent request happen to be on the list, pretends that they got
- * an error in flight and calls their completion handler.
- */
-void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req;
- int expected_phase;
- int n = 0;
-
- /* Requests on the set should either all be completed, or all be new */
- expected_phase = (atomic_read(&set->set_remaining) == 0) ?
- RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
- list_for_each_entry(req, &set->set_requests, rq_set_chain) {
- LASSERT(req->rq_phase == expected_phase);
- n++;
- }
-
- LASSERTF(atomic_read(&set->set_remaining) == 0 ||
- atomic_read(&set->set_remaining) == n, "%d / %d\n",
- atomic_read(&set->set_remaining), n);
-
- while ((req = list_first_entry_or_null(&set->set_requests,
- struct ptlrpc_request,
- rq_set_chain))) {
- list_del_init(&req->rq_set_chain);
-
- LASSERT(req->rq_phase == expected_phase);
-
- if (req->rq_phase == RQ_PHASE_NEW) {
- ptlrpc_req_interpret(NULL, req, -EBADR);
- atomic_dec(&set->set_remaining);
- }
-
- spin_lock(&req->rq_lock);
- req->rq_set = NULL;
- req->rq_invalid_rqset = 0;
- spin_unlock(&req->rq_lock);
-
- ptlrpc_req_finished(req);
- }
-
- LASSERT(atomic_read(&set->set_remaining) == 0);
-
- ptlrpc_reqset_put(set);
-}
-EXPORT_SYMBOL(ptlrpc_set_destroy);
-
-/**
- * Add a new request to the general purpose request set.
- * Assumes request reference from the caller.
- */
-void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
- struct ptlrpc_request *req)
-{
- LASSERT(list_empty(&req->rq_set_chain));
-
- /* The set takes over the caller's request reference */
- list_add_tail(&req->rq_set_chain, &set->set_requests);
- req->rq_set = set;
- atomic_inc(&set->set_remaining);
- req->rq_queued_time = cfs_time_current();
-
- if (req->rq_reqmsg)
- lustre_msg_set_jobid(req->rq_reqmsg, NULL);
-
- if (set->set_producer)
- /*
- * If the request set has a producer callback, the RPC must be
- * sent straight away
- */
- ptlrpc_send_new_req(req);
-}
-EXPORT_SYMBOL(ptlrpc_set_add_req);
-
-/**
- * Add a request to a request with dedicated server thread
- * and wake the thread to make any necessary processing.
- * Currently only used for ptlrpcd.
- */
-void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
- struct ptlrpc_request *req)
-{
- struct ptlrpc_request_set *set = pc->pc_set;
- int count, i;
-
- LASSERT(!req->rq_set);
- LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
-
- spin_lock(&set->set_new_req_lock);
- /* The set takes over the caller's request reference. */
- req->rq_set = set;
- req->rq_queued_time = cfs_time_current();
- list_add_tail(&req->rq_set_chain, &set->set_new_requests);
- count = atomic_inc_return(&set->set_new_count);
- spin_unlock(&set->set_new_req_lock);
-
- /* Only need to call wakeup once for the first entry. */
- if (count == 1) {
- wake_up(&set->set_waitq);
-
- /*
- * XXX: It maybe unnecessary to wakeup all the partners. But to
- * guarantee the async RPC can be processed ASAP, we have
- * no other better choice. It maybe fixed in future.
- */
- for (i = 0; i < pc->pc_npartners; i++)
- wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
- }
-}
-
-/**
- * Based on the current state of the import, determine if the request
- * can be sent, is an error, or should be delayed.
- *
- * Returns true if this request should be delayed. If false, and
- * *status is set, then the request can not be sent and *status is the
- * error code. If false and status is 0, then request can be sent.
- *
- * The imp->imp_lock must be held.
- */
-static int ptlrpc_import_delay_req(struct obd_import *imp,
- struct ptlrpc_request *req, int *status)
-{
- int delay = 0;
-
- *status = 0;
-
- if (req->rq_ctx_init || req->rq_ctx_fini) {
- /* always allow ctx init/fini rpc go through */
- } else if (imp->imp_state == LUSTRE_IMP_NEW) {
- DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
- *status = -EIO;
- } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- /* pings may safely race with umount */
- DEBUG_REQ(lustre_msg_get_opc(req->rq_reqmsg) == OBD_PING ?
- D_HA : D_ERROR, req, "IMP_CLOSED ");
- *status = -EIO;
- } else if (ptlrpc_send_limit_expired(req)) {
- /* probably doesn't need to be a D_ERROR after initial testing */
- DEBUG_REQ(D_HA, req, "send limit expired ");
- *status = -ETIMEDOUT;
- } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
- imp->imp_state == LUSTRE_IMP_CONNECTING) {
- /* allow CONNECT even if import is invalid */
- if (atomic_read(&imp->imp_inval_count) != 0) {
- DEBUG_REQ(D_ERROR, req, "invalidate in flight");
- *status = -EIO;
- }
- } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
- if (!imp->imp_deactive)
- DEBUG_REQ(D_NET, req, "IMP_INVALID");
- *status = -ESHUTDOWN; /* bz 12940 */
- } else if (req->rq_import_generation != imp->imp_generation) {
- DEBUG_REQ(D_ERROR, req, "req wrong generation:");
- *status = -EIO;
- } else if (req->rq_send_state != imp->imp_state) {
- /* invalidate in progress - any requests should be drop */
- if (atomic_read(&imp->imp_inval_count) != 0) {
- DEBUG_REQ(D_ERROR, req, "invalidate in flight");
- *status = -EIO;
- } else if (req->rq_no_delay) {
- *status = -EWOULDBLOCK;
- } else if (req->rq_allow_replay &&
- (imp->imp_state == LUSTRE_IMP_REPLAY ||
- imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
- imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
- imp->imp_state == LUSTRE_IMP_RECOVER)) {
- DEBUG_REQ(D_HA, req, "allow during recovery.\n");
- } else {
- delay = 1;
- }
- }
-
- return delay;
-}
-
-/**
- * Decide if the error message should be printed to the console or not.
- * Makes its decision based on request type, status, and failure frequency.
- *
- * \param[in] req request that failed and may need a console message
- *
- * \retval false if no message should be printed
- * \retval true if console message should be printed
- */
-static bool ptlrpc_console_allow(struct ptlrpc_request *req)
-{
- __u32 opc;
-
- LASSERT(req->rq_reqmsg);
- opc = lustre_msg_get_opc(req->rq_reqmsg);
-
- /* Suppress particular reconnect errors which are to be expected. */
- if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) {
- int err;
-
- /* Suppress timed out reconnect requests */
- if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) ||
- req->rq_timedout)
- return false;
-
- /*
- * Suppress most unavailable/again reconnect requests, but
- * print occasionally so it is clear client is trying to
- * connect to a server where no target is running.
- */
- err = lustre_msg_get_status(req->rq_repmsg);
- if ((err == -ENODEV || err == -EAGAIN) &&
- req->rq_import->imp_conn_cnt % 30 != 20)
- return false;
- }
-
- return true;
-}
-
-/**
- * Check request processing status.
- * Returns the status.
- */
-static int ptlrpc_check_status(struct ptlrpc_request *req)
-{
- int err;
-
- err = lustre_msg_get_status(req->rq_repmsg);
- if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
- struct obd_import *imp = req->rq_import;
- lnet_nid_t nid = imp->imp_connection->c_peer.nid;
- __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
-
- /* -EAGAIN is normal when using POSIX flocks */
- if (ptlrpc_console_allow(req) &&
- !(opc == LDLM_ENQUEUE && err == -EAGAIN))
- LCONSOLE_ERROR_MSG(0x011, "%s: operation %s to node %s failed: rc = %d\n",
- imp->imp_obd->obd_name,
- ll_opcode2str(opc),
- libcfs_nid2str(nid), err);
- return err < 0 ? err : -EINVAL;
- }
-
- if (err < 0)
- DEBUG_REQ(D_INFO, req, "status is %d", err);
- else if (err > 0)
- /* XXX: translate this error from net to host */
- DEBUG_REQ(D_INFO, req, "status is %d", err);
-
- return err;
-}
-
-/**
- * save pre-versions of objects into request for replay.
- * Versions are obtained from server reply.
- * used for VBR.
- */
-static void ptlrpc_save_versions(struct ptlrpc_request *req)
-{
- struct lustre_msg *repmsg = req->rq_repmsg;
- struct lustre_msg *reqmsg = req->rq_reqmsg;
- __u64 *versions = lustre_msg_get_versions(repmsg);
-
- if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
- return;
-
- LASSERT(versions);
- lustre_msg_set_versions(reqmsg, versions);
- CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n",
- versions[0], versions[1]);
-}
-
-__u64 ptlrpc_known_replied_xid(struct obd_import *imp)
-{
- struct ptlrpc_request *req;
-
- assert_spin_locked(&imp->imp_lock);
- if (list_empty(&imp->imp_unreplied_list))
- return 0;
-
- req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
- rq_unreplied_list);
- LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
-
- if (imp->imp_known_replied_xid < req->rq_xid - 1)
- imp->imp_known_replied_xid = req->rq_xid - 1;
-
- return req->rq_xid - 1;
-}
-
-/**
- * Callback function called when client receives RPC reply for \a req.
- * Returns 0 on success or error code.
- * The return value would be assigned to req->rq_status by the caller
- * as request processing status.
- * This function also decides if the request needs to be saved for later replay.
- */
-static int after_reply(struct ptlrpc_request *req)
-{
- struct obd_import *imp = req->rq_import;
- struct obd_device *obd = req->rq_import->imp_obd;
- int rc;
- struct timespec64 work_start;
- long timediff;
- u64 committed;
-
- LASSERT(obd);
- /* repbuf must be unlinked */
- LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
-
- if (req->rq_reply_truncated) {
- if (ptlrpc_no_resend(req)) {
- DEBUG_REQ(D_ERROR, req, "reply buffer overflow, expected: %d, actual size: %d",
- req->rq_nob_received, req->rq_repbuf_len);
- return -EOVERFLOW;
- }
-
- sptlrpc_cli_free_repbuf(req);
- /*
- * Pass the required reply buffer size (include space for early
- * reply). NB: no need to round up because alloc_repbuf will
- * round it up
- */
- req->rq_replen = req->rq_nob_received;
- req->rq_nob_received = 0;
- spin_lock(&req->rq_lock);
- req->rq_resend = 1;
- spin_unlock(&req->rq_lock);
- return 0;
- }
-
- ktime_get_real_ts64(&work_start);
- timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
- (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
- NSEC_PER_USEC;
- /*
- * NB Until this point, the whole of the incoming message,
- * including buflens, status etc is in the sender's byte order.
- */
- rc = sptlrpc_cli_unwrap_reply(req);
- if (rc) {
- DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc);
- return rc;
- }
-
- /* Security layer unwrap might ask resend this request. */
- if (req->rq_resend)
- return 0;
-
- rc = unpack_reply(req);
- if (rc)
- return rc;
-
- /* retry indefinitely on EINPROGRESS */
- if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
- ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
- time64_t now = ktime_get_real_seconds();
-
- DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
- spin_lock(&req->rq_lock);
- req->rq_resend = 1;
- spin_unlock(&req->rq_lock);
- req->rq_nr_resend++;
-
- /* Readjust the timeout for current conditions */
- ptlrpc_at_set_req_timeout(req);
- /*
- * delay resend to give a chance to the server to get ready.
- * The delay is increased by 1s on every resend and is capped to
- * the current request timeout (i.e. obd_timeout if AT is off,
- * or AT service time x 125% + 5s, see at_est2timeout)
- */
- if (req->rq_nr_resend > req->rq_timeout)
- req->rq_sent = now + req->rq_timeout;
- else
- req->rq_sent = now + req->rq_nr_resend;
-
- /* Resend for EINPROGRESS will use a new XID */
- spin_lock(&imp->imp_lock);
- list_del_init(&req->rq_unreplied_list);
- spin_unlock(&imp->imp_lock);
-
- return 0;
- }
-
- if (obd->obd_svc_stats) {
- lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
- timediff);
- ptlrpc_lprocfs_rpc_sent(req, timediff);
- }
-
- if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
- lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
- DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
- lustre_msg_get_type(req->rq_repmsg));
- return -EPROTO;
- }
-
- if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
- CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
- ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
- ptlrpc_at_adj_net_latency(req,
- lustre_msg_get_service_time(req->rq_repmsg));
-
- rc = ptlrpc_check_status(req);
- imp->imp_connect_error = rc;
-
- if (rc) {
- /*
- * Either we've been evicted, or the server has failed for
- * some reason. Try to reconnect, and if that fails, punt to
- * the upcall.
- */
- if (ptlrpc_recoverable_error(rc)) {
- if (req->rq_send_state != LUSTRE_IMP_FULL ||
- imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
- return rc;
- }
- ptlrpc_request_handle_notconn(req);
- return rc;
- }
- } else {
- /*
- * Let's look if server sent slv. Do it only for RPC with
- * rc == 0.
- */
- ldlm_cli_update_pool(req);
- }
-
- /* Store transno in reqmsg for replay. */
- if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
- req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
- lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
- }
-
- if (imp->imp_replayable) {
- spin_lock(&imp->imp_lock);
- /*
- * No point in adding already-committed requests to the replay
- * list, we will just remove them immediately. b=9829
- */
- if (req->rq_transno != 0 &&
- (req->rq_transno >
- lustre_msg_get_last_committed(req->rq_repmsg) ||
- req->rq_replay)) {
- /* version recovery */
- ptlrpc_save_versions(req);
- ptlrpc_retain_replayable_request(req, imp);
- } else if (req->rq_commit_cb &&
- list_empty(&req->rq_replay_list)) {
- /*
- * NB: don't call rq_commit_cb if it's already on
- * rq_replay_list, ptlrpc_free_committed() will call
- * it later, see LU-3618 for details
- */
- spin_unlock(&imp->imp_lock);
- req->rq_commit_cb(req);
- spin_lock(&imp->imp_lock);
- }
-
- /* Replay-enabled imports return commit-status information. */
- committed = lustre_msg_get_last_committed(req->rq_repmsg);
- if (likely(committed > imp->imp_peer_committed_transno))
- imp->imp_peer_committed_transno = committed;
-
- ptlrpc_free_committed(imp);
-
- if (!list_empty(&imp->imp_replay_list)) {
- struct ptlrpc_request *last;
-
- last = list_entry(imp->imp_replay_list.prev,
- struct ptlrpc_request,
- rq_replay_list);
- /*
- * Requests with rq_replay stay on the list even if no
- * commit is expected.
- */
- if (last->rq_transno > imp->imp_peer_committed_transno)
- ptlrpc_pinger_commit_expected(imp);
- }
-
- spin_unlock(&imp->imp_lock);
- }
-
- return rc;
-}
-
-/**
- * Helper function to send request \a req over the network for the first time
- * Also adjusts request phase.
- * Returns 0 on success or error code.
- */
-static int ptlrpc_send_new_req(struct ptlrpc_request *req)
-{
- struct obd_import *imp = req->rq_import;
- u64 min_xid = 0;
- int rc;
-
- LASSERT(req->rq_phase == RQ_PHASE_NEW);
-
- /* do not try to go further if there is not enough memory in enc_pool */
- if (req->rq_sent && req->rq_bulk)
- if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
- pool_is_at_full_capacity())
- return -ENOMEM;
-
- if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
- (!req->rq_generation_set ||
- req->rq_import_generation == imp->imp_generation))
- return 0;
-
- ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
-
- spin_lock(&imp->imp_lock);
-
- LASSERT(req->rq_xid);
- LASSERT(!list_empty(&req->rq_unreplied_list));
-
- if (!req->rq_generation_set)
- req->rq_import_generation = imp->imp_generation;
-
- if (ptlrpc_import_delay_req(imp, req, &rc)) {
- spin_lock(&req->rq_lock);
- req->rq_waiting = 1;
- spin_unlock(&req->rq_lock);
-
- DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: (%s != %s)",
- lustre_msg_get_status(req->rq_reqmsg),
- ptlrpc_import_state_name(req->rq_send_state),
- ptlrpc_import_state_name(imp->imp_state));
- LASSERT(list_empty(&req->rq_list));
- list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- atomic_inc(&req->rq_import->imp_inflight);
- spin_unlock(&imp->imp_lock);
- return 0;
- }
-
- if (rc != 0) {
- spin_unlock(&imp->imp_lock);
- req->rq_status = rc;
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- return rc;
- }
-
- LASSERT(list_empty(&req->rq_list));
- list_add_tail(&req->rq_list, &imp->imp_sending_list);
- atomic_inc(&req->rq_import->imp_inflight);
-
- /* find the known replied XID from the unreplied list, CONNECT
- * and DISCONNECT requests are skipped to make the sanity check
- * on server side happy. see process_req_last_xid().
- *
- * For CONNECT: Because replay requests have lower XID, it'll
- * break the sanity check if CONNECT bump the exp_last_xid on
- * server.
- *
- * For DISCONNECT: Since client will abort inflight RPC before
- * sending DISCONNECT, DISCONNECT may carry an XID which higher
- * than the inflight RPC.
- */
- if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
- min_xid = ptlrpc_known_replied_xid(imp);
- spin_unlock(&imp->imp_lock);
-
- lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
-
- lustre_msg_set_status(req->rq_reqmsg, current_pid());
-
- rc = sptlrpc_req_refresh_ctx(req, -1);
- if (rc) {
- if (req->rq_err) {
- req->rq_status = rc;
- return 1;
- }
- spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 1;
- spin_unlock(&req->rq_lock);
- return 0;
- }
-
- CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
- current_comm(),
- imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
-
- rc = ptl_send_rpc(req, 0);
- if (rc == -ENOMEM) {
- spin_lock(&imp->imp_lock);
- if (!list_empty(&req->rq_list)) {
- list_del_init(&req->rq_list);
- if (atomic_dec_and_test(&req->rq_import->imp_inflight))
- wake_up_all(&req->rq_import->imp_recovery_waitq);
- }
- spin_unlock(&imp->imp_lock);
- ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
- return rc;
- }
- if (rc) {
- DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
- spin_lock(&req->rq_lock);
- req->rq_net_err = 1;
- spin_unlock(&req->rq_lock);
- return rc;
- }
- return 0;
-}
-
-static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
-{
- int remaining, rc;
-
- LASSERT(set->set_producer);
-
- remaining = atomic_read(&set->set_remaining);
-
- /*
- * populate the ->set_requests list with requests until we
- * reach the maximum number of RPCs in flight for this set
- */
- while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
- rc = set->set_producer(set, set->set_producer_arg);
- if (rc == -ENOENT) {
- /* no more RPC to produce */
- set->set_producer = NULL;
- set->set_producer_arg = NULL;
- return 0;
- }
- }
-
- return (atomic_read(&set->set_remaining) - remaining);
-}
-
-/**
- * this sends any unsent RPCs in \a set and returns 1 if all are sent
- * and no more replies are expected.
- * (it is possible to get less replies than requests sent e.g. due to timed out
- * requests or requests that we had trouble to send out)
- *
- * NOTE: This function contains a potential schedule point (cond_resched()).
- */
-int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req, *next;
- struct list_head comp_reqs;
- int force_timer_recalc = 0;
-
- if (atomic_read(&set->set_remaining) == 0)
- return 1;
-
- INIT_LIST_HEAD(&comp_reqs);
- list_for_each_entry_safe(req, next, &set->set_requests, rq_set_chain) {
- struct obd_import *imp = req->rq_import;
- int unregistered = 0;
- int rc = 0;
-
- /*
- * This schedule point is mainly for the ptlrpcd caller of this
- * function. Most ptlrpc sets are not long-lived and unbounded
- * in length, but at the least the set used by the ptlrpcd is.
- * Since the processing time is unbounded, we need to insert an
- * explicit schedule point to make the thread well-behaved.
- */
- cond_resched();
-
- if (req->rq_phase == RQ_PHASE_NEW &&
- ptlrpc_send_new_req(req)) {
- force_timer_recalc = 1;
- }
-
- /* delayed send - skip */
- if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
- continue;
-
- /* delayed resend - skip */
- if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
- req->rq_sent > ktime_get_real_seconds())
- continue;
-
- if (!(req->rq_phase == RQ_PHASE_RPC ||
- req->rq_phase == RQ_PHASE_BULK ||
- req->rq_phase == RQ_PHASE_INTERPRET ||
- req->rq_phase == RQ_PHASE_UNREG_RPC ||
- req->rq_phase == RQ_PHASE_UNREG_BULK ||
- req->rq_phase == RQ_PHASE_COMPLETE)) {
- DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
- LBUG();
- }
-
- if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
- req->rq_phase == RQ_PHASE_UNREG_BULK) {
- LASSERT(req->rq_next_phase != req->rq_phase);
- LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
-
- if (req->rq_req_deadline &&
- !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
- req->rq_req_deadline = 0;
- if (req->rq_reply_deadline &&
- !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
- req->rq_reply_deadline = 0;
- if (req->rq_bulk_deadline &&
- !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
- req->rq_bulk_deadline = 0;
-
- /*
- * Skip processing until reply is unlinked. We
- * can't return to pool before that and we can't
- * call interpret before that. We need to make
- * sure that all rdma transfers finished and will
- * not corrupt any data.
- */
- if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
- ptlrpc_client_recv_or_unlink(req))
- continue;
- if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
- ptlrpc_client_bulk_active(req))
- continue;
-
- /*
- * Turn fail_loc off to prevent it from looping
- * forever.
- */
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
- OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
- OBD_FAIL_ONCE);
- }
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
- OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
- OBD_FAIL_ONCE);
- }
-
- /* Move to next phase if reply was successfully
- * unlinked.
- */
- ptlrpc_rqphase_move(req, req->rq_next_phase);
- }
-
- if (req->rq_phase == RQ_PHASE_COMPLETE) {
- list_move_tail(&req->rq_set_chain, &comp_reqs);
- continue;
- }
-
- if (req->rq_phase == RQ_PHASE_INTERPRET)
- goto interpret;
-
- /* Note that this also will start async reply unlink. */
- if (req->rq_net_err && !req->rq_timedout) {
- ptlrpc_expire_one_request(req, 1);
-
- /* Check if we still need to wait for unlink. */
- if (ptlrpc_client_recv_or_unlink(req) ||
- ptlrpc_client_bulk_active(req))
- continue;
- /* If there is no need to resend, fail it now. */
- if (req->rq_no_resend) {
- if (req->rq_status == 0)
- req->rq_status = -EIO;
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- goto interpret;
- } else {
- continue;
- }
- }
-
- if (req->rq_err) {
- spin_lock(&req->rq_lock);
- req->rq_replied = 0;
- spin_unlock(&req->rq_lock);
- if (req->rq_status == 0)
- req->rq_status = -EIO;
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- goto interpret;
- }
-
- /*
- * ptlrpc_set_wait allow signal to abort the timeout
- * so it sets rq_intr regardless of individual rpc
- * timeouts. The synchronous IO waiting path sets
- * rq_intr irrespective of whether ptlrpcd
- * has seen a timeout. Our policy is to only interpret
- * interrupted rpcs after they have timed out, so we
- * need to enforce that here.
- */
-
- if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
- req->rq_wait_ctx)) {
- req->rq_status = -EINTR;
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- goto interpret;
- }
-
- if (req->rq_phase == RQ_PHASE_RPC) {
- if (req->rq_timedout || req->rq_resend ||
- req->rq_waiting || req->rq_wait_ctx) {
- int status;
-
- if (!ptlrpc_unregister_reply(req, 1)) {
- ptlrpc_unregister_bulk(req, 1);
- continue;
- }
-
- spin_lock(&imp->imp_lock);
- if (ptlrpc_import_delay_req(imp, req,
- &status)) {
- /*
- * put on delay list - only if we wait
- * recovery finished - before send
- */
- list_del_init(&req->rq_list);
- list_add_tail(&req->rq_list,
- &imp->imp_delayed_list);
- spin_unlock(&imp->imp_lock);
- continue;
- }
-
- if (status != 0) {
- req->rq_status = status;
- ptlrpc_rqphase_move(req,
- RQ_PHASE_INTERPRET);
- spin_unlock(&imp->imp_lock);
- goto interpret;
- }
- if (ptlrpc_no_resend(req) &&
- !req->rq_wait_ctx) {
- req->rq_status = -ENOTCONN;
- ptlrpc_rqphase_move(req,
- RQ_PHASE_INTERPRET);
- spin_unlock(&imp->imp_lock);
- goto interpret;
- }
-
- list_del_init(&req->rq_list);
- list_add_tail(&req->rq_list,
- &imp->imp_sending_list);
-
- spin_unlock(&imp->imp_lock);
-
- spin_lock(&req->rq_lock);
- req->rq_waiting = 0;
- spin_unlock(&req->rq_lock);
-
- if (req->rq_timedout || req->rq_resend) {
- /* This is re-sending anyway, let's mark req as resend. */
- spin_lock(&req->rq_lock);
- req->rq_resend = 1;
- spin_unlock(&req->rq_lock);
- if (req->rq_bulk &&
- !ptlrpc_unregister_bulk(req, 1))
- continue;
- }
- /*
- * rq_wait_ctx is only touched by ptlrpcd,
- * so no lock is needed here.
- */
- status = sptlrpc_req_refresh_ctx(req, -1);
- if (status) {
- if (req->rq_err) {
- req->rq_status = status;
- spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 0;
- spin_unlock(&req->rq_lock);
- force_timer_recalc = 1;
- } else {
- spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 1;
- spin_unlock(&req->rq_lock);
- }
-
- continue;
- } else {
- spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 0;
- spin_unlock(&req->rq_lock);
- }
-
- rc = ptl_send_rpc(req, 0);
- if (rc == -ENOMEM) {
- spin_lock(&imp->imp_lock);
- if (!list_empty(&req->rq_list))
- list_del_init(&req->rq_list);
- spin_unlock(&imp->imp_lock);
- ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
- continue;
- }
- if (rc) {
- DEBUG_REQ(D_HA, req,
- "send failed: rc = %d", rc);
- force_timer_recalc = 1;
- spin_lock(&req->rq_lock);
- req->rq_net_err = 1;
- spin_unlock(&req->rq_lock);
- continue;
- }
- /* need to reset the timeout */
- force_timer_recalc = 1;
- }
-
- spin_lock(&req->rq_lock);
-
- if (ptlrpc_client_early(req)) {
- ptlrpc_at_recv_early_reply(req);
- spin_unlock(&req->rq_lock);
- continue;
- }
-
- /* Still waiting for a reply? */
- if (ptlrpc_client_recv(req)) {
- spin_unlock(&req->rq_lock);
- continue;
- }
-
- /* Did we actually receive a reply? */
- if (!ptlrpc_client_replied(req)) {
- spin_unlock(&req->rq_lock);
- continue;
- }
-
- spin_unlock(&req->rq_lock);
-
- /*
- * unlink from net because we are going to
- * swab in-place of reply buffer
- */
- unregistered = ptlrpc_unregister_reply(req, 1);
- if (!unregistered)
- continue;
-
- req->rq_status = after_reply(req);
- if (req->rq_resend)
- continue;
-
- /*
- * If there is no bulk associated with this request,
- * then we're done and should let the interpreter
- * process the reply. Similarly if the RPC returned
- * an error, and therefore the bulk will never arrive.
- */
- if (!req->rq_bulk || req->rq_status < 0) {
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- goto interpret;
- }
-
- ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
- }
-
- LASSERT(req->rq_phase == RQ_PHASE_BULK);
- if (ptlrpc_client_bulk_active(req))
- continue;
-
- if (req->rq_bulk->bd_failure) {
- /*
- * The RPC reply arrived OK, but the bulk screwed
- * up! Dead weird since the server told us the RPC
- * was good after getting the REPLY for her GET or
- * the ACK for her PUT.
- */
- DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
- req->rq_status = -EIO;
- }
-
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
-
-interpret:
- LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
-
- /*
- * This moves to "unregistering" phase we need to wait for
- * reply unlink.
- */
- if (!unregistered && !ptlrpc_unregister_reply(req, 1)) {
- /* start async bulk unlink too */
- ptlrpc_unregister_bulk(req, 1);
- continue;
- }
-
- if (!ptlrpc_unregister_bulk(req, 1))
- continue;
-
- /* When calling interpret receive should already be finished. */
- LASSERT(!req->rq_receiving_reply);
-
- ptlrpc_req_interpret(env, req, req->rq_status);
-
- if (ptlrpcd_check_work(req)) {
- atomic_dec(&set->set_remaining);
- continue;
- }
- ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
-
- CDEBUG(req->rq_reqmsg ? D_RPCTRACE : 0,
- "Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
- current_comm(), imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
-
- spin_lock(&imp->imp_lock);
- /*
- * Request already may be not on sending or delaying list. This
- * may happen in the case of marking it erroneous for the case
- * ptlrpc_import_delay_req(req, status) find it impossible to
- * allow sending this rpc and returns *status != 0.
- */
- if (!list_empty(&req->rq_list)) {
- list_del_init(&req->rq_list);
- atomic_dec(&imp->imp_inflight);
- }
- list_del_init(&req->rq_unreplied_list);
- spin_unlock(&imp->imp_lock);
-
- atomic_dec(&set->set_remaining);
- wake_up_all(&imp->imp_recovery_waitq);
-
- if (set->set_producer) {
- /* produce a new request if possible */
- if (ptlrpc_set_producer(set) > 0)
- force_timer_recalc = 1;
-
- /*
- * free the request that has just been completed
- * in order not to pollute set->set_requests
- */
- list_del_init(&req->rq_set_chain);
- spin_lock(&req->rq_lock);
- req->rq_set = NULL;
- req->rq_invalid_rqset = 0;
- spin_unlock(&req->rq_lock);
-
- /* record rq_status to compute the final status later */
- if (req->rq_status != 0)
- set->set_rc = req->rq_status;
- ptlrpc_req_finished(req);
- } else {
- list_move_tail(&req->rq_set_chain, &comp_reqs);
- }
- }
-
- /*
- * move completed request at the head of list so it's easier for
- * caller to find them
- */
- list_splice(&comp_reqs, &set->set_requests);
-
- /* If we hit an error, we want to recover promptly. */
- return atomic_read(&set->set_remaining) == 0 || force_timer_recalc;
-}
-EXPORT_SYMBOL(ptlrpc_check_set);
-
-/**
- * Time out request \a req. is \a async_unlink is set, that means do not wait
- * until LNet actually confirms network buffer unlinking.
- * Return 1 if we should give up further retrying attempts or 0 otherwise.
- */
-int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
-{
- struct obd_import *imp = req->rq_import;
- int rc = 0;
-
- spin_lock(&req->rq_lock);
- req->rq_timedout = 1;
- spin_unlock(&req->rq_lock);
-
- DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent %lld/real %lld]",
- req->rq_net_err ? "failed due to network error" :
- ((req->rq_real_sent == 0 ||
- req->rq_real_sent < req->rq_sent ||
- req->rq_real_sent >= req->rq_deadline) ?
- "timed out for sent delay" : "timed out for slow reply"),
- (s64)req->rq_sent, (s64)req->rq_real_sent);
-
- if (imp && obd_debug_peer_on_timeout)
- LNetDebugPeer(imp->imp_connection->c_peer);
-
- ptlrpc_unregister_reply(req, async_unlink);
- ptlrpc_unregister_bulk(req, async_unlink);
-
- if (obd_dump_on_timeout)
- libcfs_debug_dumplog();
-
- if (!imp) {
- DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
- return 1;
- }
-
- atomic_inc(&imp->imp_timeouts);
-
- /* The DLM server doesn't want recovery run on its imports. */
- if (imp->imp_dlm_fake)
- return 1;
-
- /*
- * If this request is for recovery or other primordial tasks,
- * then error it out here.
- */
- if (req->rq_ctx_init || req->rq_ctx_fini ||
- req->rq_send_state != LUSTRE_IMP_FULL ||
- imp->imp_obd->obd_no_recov) {
- DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
- ptlrpc_import_state_name(req->rq_send_state),
- ptlrpc_import_state_name(imp->imp_state));
- spin_lock(&req->rq_lock);
- req->rq_status = -ETIMEDOUT;
- req->rq_err = 1;
- spin_unlock(&req->rq_lock);
- return 1;
- }
-
- /*
- * if a request can't be resent we can't wait for an answer after
- * the timeout
- */
- if (ptlrpc_no_resend(req)) {
- DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
- rc = 1;
- }
-
- ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
-
- return rc;
-}
-
-/**
- * Time out all uncompleted requests in request set pointed by \a data
- * Called when wait_event_idle_timeout times out.
- */
-void ptlrpc_expired_set(struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req;
- time64_t now = ktime_get_real_seconds();
-
- /* A timeout expired. See which reqs it applies to... */
- list_for_each_entry(req, &set->set_requests, rq_set_chain) {
-
- /* don't expire request waiting for context */
- if (req->rq_wait_ctx)
- continue;
-
- /* Request in-flight? */
- if (!((req->rq_phase == RQ_PHASE_RPC &&
- !req->rq_waiting && !req->rq_resend) ||
- (req->rq_phase == RQ_PHASE_BULK)))
- continue;
-
- if (req->rq_timedout || /* already dealt with */
- req->rq_deadline > now) /* not expired */
- continue;
-
- /*
- * Deal with this guy. Do it asynchronously to not block
- * ptlrpcd thread.
- */
- ptlrpc_expire_one_request(req, 1);
- }
-}
-
-/**
- * Sets rq_intr flag in \a req under spinlock.
- */
-void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
-{
- spin_lock(&req->rq_lock);
- req->rq_intr = 1;
- spin_unlock(&req->rq_lock);
-}
-EXPORT_SYMBOL(ptlrpc_mark_interrupted);
-
-/**
- * Interrupts (sets interrupted flag) all uncompleted requests in
- * a set \a data. Called when l_wait_event_abortable_timeout receives signal.
- */
-static void ptlrpc_interrupted_set(struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req;
- CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
-
- list_for_each_entry(req, &set->set_requests, rq_set_chain) {
-
- if (req->rq_phase != RQ_PHASE_RPC &&
- req->rq_phase != RQ_PHASE_UNREG_RPC)
- continue;
-
- ptlrpc_mark_interrupted(req);
- }
-}
-
-/**
- * Get the smallest timeout in the set; this does NOT set a timeout.
- */
-int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
-{
- time64_t now = ktime_get_real_seconds();
- int timeout = 0;
- struct ptlrpc_request *req;
- time64_t deadline;
-
- list_for_each_entry(req, &set->set_requests, rq_set_chain) {
-
- /* Request in-flight? */
- if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
- (req->rq_phase == RQ_PHASE_BULK) ||
- (req->rq_phase == RQ_PHASE_NEW)))
- continue;
-
- /* Already timed out. */
- if (req->rq_timedout)
- continue;
-
- /* Waiting for ctx. */
- if (req->rq_wait_ctx)
- continue;
-
- if (req->rq_phase == RQ_PHASE_NEW)
- deadline = req->rq_sent;
- else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
- deadline = req->rq_sent;
- else
- deadline = req->rq_sent + req->rq_timeout;
-
- if (deadline <= now) /* actually expired already */
- timeout = 1; /* ASAP */
- else if (timeout == 0 || timeout > deadline - now)
- timeout = deadline - now;
- }
- return timeout;
-}
-
-/**
- * Send all unset request from the set and then wait until all
- * requests in the set complete (either get a reply, timeout, get an
- * error or otherwise be interrupted).
- * Returns 0 on success or error code otherwise.
- */
-int ptlrpc_set_wait(struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req;
- int rc, timeout;
-
- if (set->set_producer)
- (void)ptlrpc_set_producer(set);
- else
- list_for_each_entry(req, &set->set_requests, rq_set_chain) {
- if (req->rq_phase == RQ_PHASE_NEW)
- (void)ptlrpc_send_new_req(req);
- }
-
- if (list_empty(&set->set_requests))
- return 0;
-
- do {
- timeout = ptlrpc_set_next_timeout(set);
-
- /*
- * wait until all complete, interrupted, or an in-flight
- * req times out
- */
- CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
- set, timeout);
-
- if (timeout == 0 && !signal_pending(current)) {
- /*
- * No requests are in-flight (ether timed out
- * or delayed), so we can allow interrupts.
- * We still want to block for a limited time,
- * so we allow interrupts during the timeout.
- */
- rc = l_wait_event_abortable_timeout(set->set_waitq,
- ptlrpc_check_set(NULL, set),
- HZ);
- if (rc == 0) {
- rc = -ETIMEDOUT;
- ptlrpc_expired_set(set);
- } else if (rc < 0) {
- rc = -EINTR;
- ptlrpc_interrupted_set(set);
- } else
- rc = 0;
- } else {
- /*
- * At least one request is in flight, so no
- * interrupts are allowed. Wait until all
- * complete, or an in-flight req times out.
- */
- rc = wait_event_idle_timeout(set->set_waitq,
- ptlrpc_check_set(NULL, set),
- (timeout ? timeout : 1) * HZ);
- if (rc == 0) {
- ptlrpc_expired_set(set);
- rc = -ETIMEDOUT;
- /*
- * LU-769 - if we ignored the signal
- * because it was already pending when
- * we started, we need to handle it
- * now or we risk it being ignored
- * forever
- */
- if (l_fatal_signal_pending(current))
- ptlrpc_interrupted_set(set);
- } else
- rc = 0;
- }
-
- LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
-
- /*
- * -EINTR => all requests have been flagged rq_intr so next
- * check completes.
- * -ETIMEDOUT => someone timed out. When all reqs have
- * timed out, signals are enabled allowing completion with
- * EINTR.
- * I don't really care if we go once more round the loop in
- * the error cases -eeb.
- */
- if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
- list_for_each_entry(req, &set->set_requests, rq_set_chain) {
- spin_lock(&req->rq_lock);
- req->rq_invalid_rqset = 1;
- spin_unlock(&req->rq_lock);
- }
- }
- } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
-
- LASSERT(atomic_read(&set->set_remaining) == 0);
-
- rc = set->set_rc; /* rq_status of already freed requests if any */
- list_for_each_entry(req, &set->set_requests, rq_set_chain) {
- LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
- if (req->rq_status != 0)
- rc = req->rq_status;
- }
-
- if (set->set_interpret) {
- int (*interpreter)(struct ptlrpc_request_set *set, void *, int) =
- set->set_interpret;
- rc = interpreter(set, set->set_arg, rc);
- } else {
- struct ptlrpc_set_cbdata *cbdata, *n;
- int err;
-
- list_for_each_entry_safe(cbdata, n,
- &set->set_cblist, psc_item) {
- list_del_init(&cbdata->psc_item);
- err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
- if (err && !rc)
- rc = err;
- kfree(cbdata);
- }
- }
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_set_wait);
-
-/**
- * Helper function for request freeing.
- * Called when request count reached zero and request needs to be freed.
- * Removes request from all sorts of sending/replay lists it might be on,
- * frees network buffers if any are present.
- * If \a locked is set, that means caller is already holding import imp_lock
- * and so we no longer need to reobtain it (for certain lists manipulations)
- */
-static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
-{
- if (!request)
- return;
- LASSERT(!request->rq_srv_req);
- LASSERT(!request->rq_export);
- LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
- LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
- LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
- LASSERTF(!request->rq_replay, "req %p\n", request);
-
- req_capsule_fini(&request->rq_pill);
-
- /*
- * We must take it off the imp_replay_list first. Otherwise, we'll set
- * request->rq_reqmsg to NULL while osc_close is dereferencing it.
- */
- if (request->rq_import) {
- if (!locked)
- spin_lock(&request->rq_import->imp_lock);
- list_del_init(&request->rq_replay_list);
- list_del_init(&request->rq_unreplied_list);
- if (!locked)
- spin_unlock(&request->rq_import->imp_lock);
- }
- LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
-
- if (atomic_read(&request->rq_refcount) != 0) {
- DEBUG_REQ(D_ERROR, request,
- "freeing request with nonzero refcount");
- LBUG();
- }
-
- if (request->rq_repbuf)
- sptlrpc_cli_free_repbuf(request);
-
- if (request->rq_import) {
- class_import_put(request->rq_import);
- request->rq_import = NULL;
- }
- if (request->rq_bulk)
- ptlrpc_free_bulk(request->rq_bulk);
-
- if (request->rq_reqbuf || request->rq_clrbuf)
- sptlrpc_cli_free_reqbuf(request);
-
- if (request->rq_cli_ctx)
- sptlrpc_req_put_ctx(request, !locked);
-
- if (request->rq_pool)
- __ptlrpc_free_req_to_pool(request);
- else
- ptlrpc_request_cache_free(request);
-}
-
-/**
- * Helper function
- * Drops one reference count for request \a request.
- * \a locked set indicates that caller holds import imp_lock.
- * Frees the request when reference count reaches zero.
- */
-static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
-{
- if (!request)
- return 1;
-
- if (request == LP_POISON ||
- request->rq_reqmsg == LP_POISON) {
- CERROR("dereferencing freed request (bug 575)\n");
- LBUG();
- return 1;
- }
-
- DEBUG_REQ(D_INFO, request, "refcount now %u",
- atomic_read(&request->rq_refcount) - 1);
-
- if (atomic_dec_and_test(&request->rq_refcount)) {
- __ptlrpc_free_req(request, locked);
- return 1;
- }
-
- return 0;
-}
-
-/**
- * Drops one reference count for a request.
- */
-void ptlrpc_req_finished(struct ptlrpc_request *request)
-{
- __ptlrpc_req_finished(request, 0);
-}
-EXPORT_SYMBOL(ptlrpc_req_finished);
-
-/**
- * Returns xid of a \a request
- */
-__u64 ptlrpc_req_xid(struct ptlrpc_request *request)
-{
- return request->rq_xid;
-}
-EXPORT_SYMBOL(ptlrpc_req_xid);
-
-/**
- * Disengage the client's reply buffer from the network
- * NB does _NOT_ unregister any client-side bulk.
- * IDEMPOTENT, but _not_ safe against concurrent callers.
- * The request owner (i.e. the thread doing the I/O) must call...
- * Returns 0 on success or 1 if unregistering cannot be made.
- */
-static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
-{
- int rc;
- wait_queue_head_t *wq;
-
- /* Might sleep. */
- LASSERT(!in_interrupt());
-
- /* Let's setup deadline for reply unlink. */
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
- request->rq_reply_deadline =
- ktime_get_real_seconds() + LONG_UNLINK;
-
- /* Nothing left to do. */
- if (!ptlrpc_client_recv_or_unlink(request))
- return 1;
-
- LNetMDUnlink(request->rq_reply_md_h);
-
- /* Let's check it once again. */
- if (!ptlrpc_client_recv_or_unlink(request))
- return 1;
-
- /* Move to "Unregistering" phase as reply was not unlinked yet. */
- ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
-
- /* Do not wait for unlink to finish. */
- if (async)
- return 0;
-
- /*
- * We have to wait_event_idle_timeout() whatever the result, to give liblustre
- * a chance to run reply_in_callback(), and to make sure we've
- * unlinked before returning a req to the pool.
- */
- if (request->rq_set)
- wq = &request->rq_set->set_waitq;
- else
- wq = &request->rq_reply_waitq;
-
- for (;;) {
- /*
- * Network access will complete in finite time but the HUGE
- * timeout lets us CWARN for visibility of sluggish NALs
- */
- int cnt = 0;
- while (cnt < LONG_UNLINK &&
- (rc = wait_event_idle_timeout(*wq,
- !ptlrpc_client_recv_or_unlink(request),
- HZ)) == 0)
- cnt += 1;
- if (rc > 0) {
- ptlrpc_rqphase_move(request, request->rq_next_phase);
- return 1;
- }
-
- DEBUG_REQ(D_WARNING, request,
- "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
- request->rq_receiving_reply,
- request->rq_req_unlinked,
- request->rq_reply_unlinked);
- }
- return 0;
-}
-
-static void ptlrpc_free_request(struct ptlrpc_request *req)
-{
- spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- spin_unlock(&req->rq_lock);
-
- if (req->rq_commit_cb)
- req->rq_commit_cb(req);
- list_del_init(&req->rq_replay_list);
-
- __ptlrpc_req_finished(req, 1);
-}
-
-/**
- * the request is committed and dropped from the replay list of its import
- */
-void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
-{
- struct obd_import *imp = req->rq_import;
-
- spin_lock(&imp->imp_lock);
- if (list_empty(&req->rq_replay_list)) {
- spin_unlock(&imp->imp_lock);
- return;
- }
-
- if (force || req->rq_transno <= imp->imp_peer_committed_transno)
- ptlrpc_free_request(req);
-
- spin_unlock(&imp->imp_lock);
-}
-EXPORT_SYMBOL(ptlrpc_request_committed);
-
-/**
- * Iterates through replay_list on import and prunes
- * all requests have transno smaller than last_committed for the
- * import and don't have rq_replay set.
- * Since requests are sorted in transno order, stops when meeting first
- * transno bigger than last_committed.
- * caller must hold imp->imp_lock
- */
-void ptlrpc_free_committed(struct obd_import *imp)
-{
- struct ptlrpc_request *req, *saved;
- struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
- bool skip_committed_list = true;
-
- assert_spin_locked(&imp->imp_lock);
-
- if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
- imp->imp_generation == imp->imp_last_generation_checked) {
- CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n",
- imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
- return;
- }
- CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n",
- imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
- imp->imp_generation);
-
- if (imp->imp_generation != imp->imp_last_generation_checked ||
- !imp->imp_last_transno_checked)
- skip_committed_list = false;
-
- imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
- imp->imp_last_generation_checked = imp->imp_generation;
-
- list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
- rq_replay_list) {
- /* XXX ok to remove when 1357 resolved - rread 05/29/03 */
- LASSERT(req != last_req);
- last_req = req;
-
- if (req->rq_transno == 0) {
- DEBUG_REQ(D_EMERG, req, "zero transno during replay");
- LBUG();
- }
- if (req->rq_import_generation < imp->imp_generation) {
- DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
- goto free_req;
- }
-
- /* not yet committed */
- if (req->rq_transno > imp->imp_peer_committed_transno) {
- DEBUG_REQ(D_RPCTRACE, req, "stopping search");
- break;
- }
-
- if (req->rq_replay) {
- DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
- list_move_tail(&req->rq_replay_list,
- &imp->imp_committed_list);
- continue;
- }
-
- DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)",
- imp->imp_peer_committed_transno);
-free_req:
- ptlrpc_free_request(req);
- }
- if (skip_committed_list)
- return;
-
- list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
- rq_replay_list) {
- LASSERT(req->rq_transno != 0);
- if (req->rq_import_generation < imp->imp_generation ||
- !req->rq_replay) {
- DEBUG_REQ(D_RPCTRACE, req, "free %s open request",
- req->rq_import_generation <
- imp->imp_generation ? "stale" : "closed");
-
- if (imp->imp_replay_cursor == &req->rq_replay_list)
- imp->imp_replay_cursor =
- req->rq_replay_list.next;
-
- ptlrpc_free_request(req);
- }
- }
-}
-
-/**
- * Schedule previously sent request for resend.
- * For bulk requests we assign new xid (to avoid problems with
- * lost replies and therefore several transfers landing into same buffer
- * from different sending attempts).
- */
-void ptlrpc_resend_req(struct ptlrpc_request *req)
-{
- DEBUG_REQ(D_HA, req, "going to resend");
- spin_lock(&req->rq_lock);
-
- /*
- * Request got reply but linked to the import list still.
- * Let ptlrpc_check_set() to process it.
- */
- if (ptlrpc_client_replied(req)) {
- spin_unlock(&req->rq_lock);
- DEBUG_REQ(D_HA, req, "it has reply, so skip it");
- return;
- }
-
- lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
- req->rq_status = -EAGAIN;
-
- req->rq_resend = 1;
- req->rq_net_err = 0;
- req->rq_timedout = 0;
- ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
-}
-
-/**
- * Grab additional reference on a request \a req
- */
-struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
-{
- atomic_inc(&req->rq_refcount);
- return req;
-}
-EXPORT_SYMBOL(ptlrpc_request_addref);
-
-/**
- * Add a request to import replay_list.
- * Must be called under imp_lock
- */
-void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
- struct obd_import *imp)
-{
- struct ptlrpc_request *iter;
- assert_spin_locked(&imp->imp_lock);
-
- if (req->rq_transno == 0) {
- DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
- LBUG();
- }
-
- /*
- * clear this for new requests that were resent as well
- * as resent replayed requests.
- */
- lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
-
- /* don't re-add requests that have been replayed */
- if (!list_empty(&req->rq_replay_list))
- return;
-
- lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
-
- spin_lock(&req->rq_lock);
- req->rq_resend = 0;
- spin_unlock(&req->rq_lock);
-
- LASSERT(imp->imp_replayable);
- /* Balanced in ptlrpc_free_committed, usually. */
- ptlrpc_request_addref(req);
- list_for_each_entry_reverse(iter, &imp->imp_replay_list, rq_replay_list) {
- /*
- * We may have duplicate transnos if we create and then
- * open a file, or for closes retained if to match creating
- * opens, so use req->rq_xid as a secondary key.
- * (See bugs 684, 685, and 428.)
- * XXX no longer needed, but all opens need transnos!
- */
- if (iter->rq_transno > req->rq_transno)
- continue;
-
- if (iter->rq_transno == req->rq_transno) {
- LASSERT(iter->rq_xid != req->rq_xid);
- if (iter->rq_xid > req->rq_xid)
- continue;
- }
-
- list_add(&req->rq_replay_list, &iter->rq_replay_list);
- return;
- }
-
- list_add(&req->rq_replay_list, &imp->imp_replay_list);
-}
-
-/**
- * Send request and wait until it completes.
- * Returns request processing status.
- */
-int ptlrpc_queue_wait(struct ptlrpc_request *req)
-{
- struct ptlrpc_request_set *set;
- int rc;
-
- LASSERT(!req->rq_set);
- LASSERT(!req->rq_receiving_reply);
-
- set = ptlrpc_prep_set();
- if (!set) {
- CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
- return -ENOMEM;
- }
-
- /* for distributed debugging */
- lustre_msg_set_status(req->rq_reqmsg, current_pid());
-
- /* add a ref for the set (see comment in ptlrpc_set_add_req) */
- ptlrpc_request_addref(req);
- ptlrpc_set_add_req(set, req);
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_queue_wait);
-
-/**
- * Callback used for replayed requests reply processing.
- * In case of successful reply calls registered request replay callback.
- * In case of error restart replay process.
- */
-static int ptlrpc_replay_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *data, int rc)
-{
- struct ptlrpc_replay_async_args *aa = data;
- struct obd_import *imp = req->rq_import;
-
- atomic_dec(&imp->imp_replay_inflight);
-
- /*
- * Note: if it is bulk replay (MDS-MDS replay), then even if
- * server got the request, but bulk transfer timeout, let's
- * replay the bulk req again
- */
- if (!ptlrpc_client_replied(req) ||
- (req->rq_bulk &&
- lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
- DEBUG_REQ(D_ERROR, req, "request replay timed out.\n");
- rc = -ETIMEDOUT;
- goto out;
- }
-
- if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
- (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
- lustre_msg_get_status(req->rq_repmsg) == -ENODEV)) {
- rc = lustre_msg_get_status(req->rq_repmsg);
- goto out;
- }
-
- /** VBR: check version failure */
- if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
- /** replay was failed due to version mismatch */
- DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
- spin_lock(&imp->imp_lock);
- imp->imp_vbr_failed = 1;
- imp->imp_no_lock_replay = 1;
- spin_unlock(&imp->imp_lock);
- lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
- } else {
- /** The transno had better not change over replay. */
- LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
- lustre_msg_get_transno(req->rq_repmsg) ||
- lustre_msg_get_transno(req->rq_repmsg) == 0,
- "%#llx/%#llx\n",
- lustre_msg_get_transno(req->rq_reqmsg),
- lustre_msg_get_transno(req->rq_repmsg));
- }
-
- spin_lock(&imp->imp_lock);
- /** if replays by version then gap occur on server, no trust to locks */
- if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
- imp->imp_no_lock_replay = 1;
- imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
- spin_unlock(&imp->imp_lock);
- LASSERT(imp->imp_last_replay_transno);
-
- /* transaction number shouldn't be bigger than the latest replayed */
- if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
- DEBUG_REQ(D_ERROR, req,
- "Reported transno %llu is bigger than the replayed one: %llu",
- req->rq_transno,
- lustre_msg_get_transno(req->rq_reqmsg));
- rc = -EINVAL;
- goto out;
- }
-
- DEBUG_REQ(D_HA, req, "got rep");
-
- /* let the callback do fixups, possibly including in the request */
- if (req->rq_replay_cb)
- req->rq_replay_cb(req);
-
- if (ptlrpc_client_replied(req) &&
- lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
- DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
- lustre_msg_get_status(req->rq_repmsg),
- aa->praa_old_status);
- } else {
- /* Put it back for re-replay. */
- lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
- }
-
- /*
- * Errors while replay can set transno to 0, but
- * imp_last_replay_transno shouldn't be set to 0 anyway
- */
- if (req->rq_transno == 0)
- CERROR("Transno is 0 during replay!\n");
-
- /* continue with recovery */
- rc = ptlrpc_import_recovery_state_machine(imp);
- out:
- req->rq_send_state = aa->praa_old_state;
-
- if (rc != 0)
- /* this replay failed, so restart recovery */
- ptlrpc_connect_import(imp);
-
- return rc;
-}
-
-/**
- * Prepares and queues request for replay.
- * Adds it to ptlrpcd queue for actual sending.
- * Returns 0 on success.
- */
-int ptlrpc_replay_req(struct ptlrpc_request *req)
-{
- struct ptlrpc_replay_async_args *aa;
-
- LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
-
- LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- memset(aa, 0, sizeof(*aa));
-
- /* Prepare request to be resent with ptlrpcd */
- aa->praa_old_state = req->rq_send_state;
- req->rq_send_state = LUSTRE_IMP_REPLAY;
- req->rq_phase = RQ_PHASE_NEW;
- req->rq_next_phase = RQ_PHASE_UNDEFINED;
- if (req->rq_repmsg)
- aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
- req->rq_status = 0;
- req->rq_interpret_reply = ptlrpc_replay_interpret;
- /* Readjust the timeout for current conditions */
- ptlrpc_at_set_req_timeout(req);
-
- /*
- * Tell server the net_latency, so the server can calculate how long
- * it should wait for next replay
- */
- lustre_msg_set_service_time(req->rq_reqmsg,
- ptlrpc_at_get_net_latency(req));
- DEBUG_REQ(D_HA, req, "REPLAY");
-
- atomic_inc(&req->rq_import->imp_replay_inflight);
- ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
-
- ptlrpcd_add_req(req);
- return 0;
-}
-
-/**
- * Aborts all in-flight request on import \a imp sending and delayed lists
- */
-void ptlrpc_abort_inflight(struct obd_import *imp)
-{
- struct ptlrpc_request *req, *n;
-
- /*
- * Make sure that no new requests get processed for this import.
- * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
- * this flag and then putting requests on sending_list or delayed_list.
- */
- spin_lock(&imp->imp_lock);
-
- /*
- * XXX locking? Maybe we should remove each request with the list
- * locked? Also, how do we know if the requests on the list are
- * being freed at this time?
- */
- list_for_each_entry_safe(req, n, &imp->imp_sending_list, rq_list) {
- DEBUG_REQ(D_RPCTRACE, req, "inflight");
-
- spin_lock(&req->rq_lock);
- if (req->rq_import_generation < imp->imp_generation) {
- req->rq_err = 1;
- req->rq_status = -EIO;
- ptlrpc_client_wake_req(req);
- }
- spin_unlock(&req->rq_lock);
- }
-
- list_for_each_entry_safe(req, n, &imp->imp_delayed_list, rq_list) {
- DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
-
- spin_lock(&req->rq_lock);
- if (req->rq_import_generation < imp->imp_generation) {
- req->rq_err = 1;
- req->rq_status = -EIO;
- ptlrpc_client_wake_req(req);
- }
- spin_unlock(&req->rq_lock);
- }
-
- /*
- * Last chance to free reqs left on the replay list, but we
- * will still leak reqs that haven't committed.
- */
- if (imp->imp_replayable)
- ptlrpc_free_committed(imp);
-
- spin_unlock(&imp->imp_lock);
-}
-
-/**
- * Abort all uncompleted requests in request set \a set
- */
-void ptlrpc_abort_set(struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req, *tmp;
-
- list_for_each_entry_safe(req, tmp, &set->set_requests, rq_set_chain) {
- spin_lock(&req->rq_lock);
- if (req->rq_phase != RQ_PHASE_RPC) {
- spin_unlock(&req->rq_lock);
- continue;
- }
-
- req->rq_err = 1;
- req->rq_status = -EINTR;
- ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
- }
-}
-
-static __u64 ptlrpc_last_xid;
-static spinlock_t ptlrpc_last_xid_lock;
-
-/**
- * Initialize the XID for the node. This is common among all requests on
- * this node, and only requires the property that it is monotonically
- * increasing. It does not need to be sequential. Since this is also used
- * as the RDMA match bits, it is important that a single client NOT have
- * the same match bits for two different in-flight requests, hence we do
- * NOT want to have an XID per target or similar.
- *
- * To avoid an unlikely collision between match bits after a client reboot
- * (which would deliver old data into the wrong RDMA buffer) initialize
- * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
- * If the time is clearly incorrect, we instead use a 62-bit random number.
- * In the worst case the random number will overflow 1M RPCs per second in
- * 9133 years, or permutations thereof.
- */
-#define YEAR_2004 (1ULL << 30)
-void ptlrpc_init_xid(void)
-{
- time64_t now = ktime_get_real_seconds();
-
- spin_lock_init(&ptlrpc_last_xid_lock);
- if (now < YEAR_2004) {
- get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
- ptlrpc_last_xid >>= 2;
- ptlrpc_last_xid |= (1ULL << 61);
- } else {
- ptlrpc_last_xid = (__u64)now << 20;
- }
-
- /* Always need to be aligned to a power-of-two for multi-bulk BRW */
- BUILD_BUG_ON(((PTLRPC_BULK_OPS_COUNT - 1) & PTLRPC_BULK_OPS_COUNT) != 0);
- ptlrpc_last_xid &= PTLRPC_BULK_OPS_MASK;
-}
-
-/**
- * Increase xid and returns resulting new value to the caller.
- *
- * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
- * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
- * itself uses the last bulk xid needed, so the server can determine the
- * the number of bulk transfers from the RPC XID and a bitmask. The starting
- * xid must align to a power-of-two value.
- *
- * This is assumed to be true due to the initial ptlrpc_last_xid
- * value also being initialized to a power-of-two value. LU-1431
- */
-__u64 ptlrpc_next_xid(void)
-{
- __u64 next;
-
- spin_lock(&ptlrpc_last_xid_lock);
- next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
- ptlrpc_last_xid = next;
- spin_unlock(&ptlrpc_last_xid_lock);
-
- return next;
-}
-
-/**
- * If request has a new allocated XID (new request or EINPROGRESS resend),
- * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
- * request to ensure previous bulk fails and avoid problems with lost replies
- * and therefore several transfers landing into the same buffer from different
- * sending attempts.
- */
-void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
-{
- struct ptlrpc_bulk_desc *bd = req->rq_bulk;
-
- LASSERT(bd);
-
- /*
- * Generate new matchbits for all resend requests, including
- * resend replay.
- */
- if (req->rq_resend) {
- u64 old_mbits = req->rq_mbits;
-
- /*
- * First time resend on -EINPROGRESS will generate new xid,
- * so we can actually use the rq_xid as rq_mbits in such case,
- * however, it's bit hard to distinguish such resend with a
- * 'resend for the -EINPROGRESS resend'. To make it simple,
- * we opt to generate mbits for all resend cases.
- */
- if ((bd->bd_import->imp_connect_data.ocd_connect_flags &
- OBD_CONNECT_BULK_MBITS)) {
- req->rq_mbits = ptlrpc_next_xid();
- } else {
- /* old version transfers rq_xid to peer as matchbits */
- spin_lock(&req->rq_import->imp_lock);
- list_del_init(&req->rq_unreplied_list);
- ptlrpc_assign_next_xid_nolock(req);
- spin_unlock(&req->rq_import->imp_lock);
- req->rq_mbits = req->rq_xid;
- }
-
- CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
- old_mbits, req->rq_mbits);
- } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
- /* Request being sent first time, use xid as matchbits. */
- req->rq_mbits = req->rq_xid;
- } else {
- /*
- * Replay request, xid and matchbits have already been
- * correctly assigned.
- */
- return;
- }
-
- /*
- * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
- * that server can infer the number of bulks that were prepared,
- * see LU-1431
- */
- req->rq_mbits += DIV_ROUND_UP(bd->bd_iov_count, LNET_MAX_IOV) - 1;
-}
-
-/**
- * Get a glimpse at what next xid value might have been.
- * Returns possible next xid.
- */
-__u64 ptlrpc_sample_next_xid(void)
-{
-#if BITS_PER_LONG == 32
- /* need to avoid possible word tearing on 32-bit systems */
- __u64 next;
-
- spin_lock(&ptlrpc_last_xid_lock);
- next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
- spin_unlock(&ptlrpc_last_xid_lock);
-
- return next;
-#else
- /* No need to lock, since returned value is racy anyways */
- return ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
-#endif
-}
-EXPORT_SYMBOL(ptlrpc_sample_next_xid);
-
-/**
- * Functions for operating ptlrpc workers.
- *
- * A ptlrpc work is a function which will be running inside ptlrpc context.
- * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
- *
- * 1. after a work is created, it can be used many times, that is:
- * handler = ptlrpcd_alloc_work();
- * ptlrpcd_queue_work();
- *
- * queue it again when necessary:
- * ptlrpcd_queue_work();
- * ptlrpcd_destroy_work();
- * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
- * it will only be queued once in any time. Also as its name implies, it may
- * have delay before it really runs by ptlrpcd thread.
- */
-struct ptlrpc_work_async_args {
- int (*cb)(const struct lu_env *, void *);
- void *cbdata;
-};
-
-static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
-{
- /* re-initialize the req */
- req->rq_timeout = obd_timeout;
- req->rq_sent = ktime_get_real_seconds();
- req->rq_deadline = req->rq_sent + req->rq_timeout;
- req->rq_phase = RQ_PHASE_INTERPRET;
- req->rq_next_phase = RQ_PHASE_COMPLETE;
- req->rq_xid = ptlrpc_next_xid();
- req->rq_import_generation = req->rq_import->imp_generation;
-
- ptlrpcd_add_req(req);
-}
-
-static int work_interpreter(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc)
-{
- struct ptlrpc_work_async_args *arg = data;
-
- LASSERT(ptlrpcd_check_work(req));
-
- rc = arg->cb(env, arg->cbdata);
-
- list_del_init(&req->rq_set_chain);
- req->rq_set = NULL;
-
- if (atomic_dec_return(&req->rq_refcount) > 1) {
- atomic_set(&req->rq_refcount, 2);
- ptlrpcd_add_work_req(req);
- }
- return rc;
-}
-
-static int worker_format;
-
-static int ptlrpcd_check_work(struct ptlrpc_request *req)
-{
- return req->rq_pill.rc_fmt == (void *)&worker_format;
-}
-
-/**
- * Create a work for ptlrpc.
- */
-void *ptlrpcd_alloc_work(struct obd_import *imp,
- int (*cb)(const struct lu_env *, void *), void *cbdata)
-{
- struct ptlrpc_request *req = NULL;
- struct ptlrpc_work_async_args *args;
-
- might_sleep();
-
- if (!cb)
- return ERR_PTR(-EINVAL);
-
- /* copy some code from deprecated fakereq. */
- req = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (!req) {
- CERROR("ptlrpc: run out of memory!\n");
- return ERR_PTR(-ENOMEM);
- }
-
- ptlrpc_cli_req_init(req);
-
- req->rq_send_state = LUSTRE_IMP_FULL;
- req->rq_type = PTL_RPC_MSG_REQUEST;
- req->rq_import = class_import_get(imp);
- req->rq_interpret_reply = work_interpreter;
- /* don't want reply */
- req->rq_no_delay = 1;
- req->rq_no_resend = 1;
- req->rq_pill.rc_fmt = (void *)&worker_format;
-
- BUILD_BUG_ON(sizeof(*args) > sizeof(req->rq_async_args));
- args = ptlrpc_req_async_args(req);
- args->cb = cb;
- args->cbdata = cbdata;
-
- return req;
-}
-EXPORT_SYMBOL(ptlrpcd_alloc_work);
-
-void ptlrpcd_destroy_work(void *handler)
-{
- struct ptlrpc_request *req = handler;
-
- if (req)
- ptlrpc_req_finished(req);
-}
-EXPORT_SYMBOL(ptlrpcd_destroy_work);
-
-int ptlrpcd_queue_work(void *handler)
-{
- struct ptlrpc_request *req = handler;
-
- /*
- * Check if the req is already being queued.
- *
- * Here comes a trick: it lacks a way of checking if a req is being
- * processed reliably in ptlrpc. Here I have to use refcount of req
- * for this purpose. This is okay because the caller should use this
- * req as opaque data. - Jinshan
- */
- LASSERT(atomic_read(&req->rq_refcount) > 0);
- if (atomic_inc_return(&req->rq_refcount) == 2)
- ptlrpcd_add_work_req(req);
- return 0;
-}
-EXPORT_SYMBOL(ptlrpcd_queue_work);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
deleted file mode 100644
index dfdb4587d49d..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ /dev/null
@@ -1,234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_net.h>
-
-#include "ptlrpc_internal.h"
-
-static struct cfs_hash *conn_hash;
-static struct cfs_hash_ops conn_hash_ops;
-
-struct ptlrpc_connection *
-ptlrpc_connection_get(struct lnet_process_id peer, lnet_nid_t self,
- struct obd_uuid *uuid)
-{
- struct ptlrpc_connection *conn, *conn2;
-
- conn = cfs_hash_lookup(conn_hash, &peer);
- if (conn)
- goto out;
-
- conn = kzalloc(sizeof(*conn), GFP_NOFS);
- if (!conn)
- return NULL;
-
- conn->c_peer = peer;
- conn->c_self = self;
- INIT_HLIST_NODE(&conn->c_hash);
- atomic_set(&conn->c_refcount, 1);
- if (uuid)
- obd_str2uuid(&conn->c_remote_uuid, uuid->uuid);
-
- /*
- * Add the newly created conn to the hash, on key collision we
- * lost a racing addition and must destroy our newly allocated
- * connection. The object which exists in the has will be
- * returned and may be compared against out object.
- */
- /* In the function below, .hs_keycmp resolves to
- * conn_keycmp()
- */
- /* coverity[overrun-buffer-val] */
- conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
- if (conn != conn2) {
- kfree(conn);
- conn = conn2;
- }
-out:
- CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
- conn, atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
- return conn;
-}
-
-int ptlrpc_connection_put(struct ptlrpc_connection *conn)
-{
- int rc = 0;
-
- if (!conn)
- return rc;
-
- LASSERT(atomic_read(&conn->c_refcount) > 1);
-
- /*
- * We do not remove connection from hashtable and
- * do not free it even if last caller released ref,
- * as we want to have it cached for the case it is
- * needed again.
- *
- * Deallocating it and later creating new connection
- * again would be wastful. This way we also avoid
- * expensive locking to protect things from get/put
- * race when found cached connection is freed by
- * ptlrpc_connection_put().
- *
- * It will be freed later in module unload time,
- * when ptlrpc_connection_fini()->lh_exit->conn_exit()
- * path is called.
- */
- if (atomic_dec_return(&conn->c_refcount) == 1)
- rc = 1;
-
- CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n",
- conn, atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
-
- return rc;
-}
-
-struct ptlrpc_connection *
-ptlrpc_connection_addref(struct ptlrpc_connection *conn)
-{
- atomic_inc(&conn->c_refcount);
- CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
- conn, atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
-
- return conn;
-}
-
-int ptlrpc_connection_init(void)
-{
- conn_hash = cfs_hash_create("CONN_HASH",
- HASH_CONN_CUR_BITS,
- HASH_CONN_MAX_BITS,
- HASH_CONN_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &conn_hash_ops, CFS_HASH_DEFAULT);
- if (!conn_hash)
- return -ENOMEM;
-
- return 0;
-}
-
-void ptlrpc_connection_fini(void)
-{
- cfs_hash_putref(conn_hash);
-}
-
-/*
- * Hash operations for net_peer<->connection
- */
-static unsigned int
-conn_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask)
-{
- return cfs_hash_djb2_hash(key, sizeof(struct lnet_process_id), mask);
-}
-
-static int
-conn_keycmp(const void *key, struct hlist_node *hnode)
-{
- struct ptlrpc_connection *conn;
- const struct lnet_process_id *conn_key;
-
- LASSERT(key);
- conn_key = key;
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
-
- return conn_key->nid == conn->c_peer.nid &&
- conn_key->pid == conn->c_peer.pid;
-}
-
-static void *
-conn_key(struct hlist_node *hnode)
-{
- struct ptlrpc_connection *conn;
-
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- return &conn->c_peer;
-}
-
-static void *
-conn_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct ptlrpc_connection, c_hash);
-}
-
-static void
-conn_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ptlrpc_connection *conn;
-
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- atomic_inc(&conn->c_refcount);
-}
-
-static void
-conn_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ptlrpc_connection *conn;
-
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- atomic_dec(&conn->c_refcount);
-}
-
-static void
-conn_exit(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ptlrpc_connection *conn;
-
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- /*
- * Nothing should be left. Connection user put it and
- * connection also was deleted from table by this time
- * so we should have 0 refs.
- */
- LASSERTF(atomic_read(&conn->c_refcount) == 0,
- "Busy connection with %d refs\n",
- atomic_read(&conn->c_refcount));
- kfree(conn);
-}
-
-static struct cfs_hash_ops conn_hash_ops = {
- .hs_hash = conn_hashfn,
- .hs_keycmp = conn_keycmp,
- .hs_key = conn_key,
- .hs_object = conn_object,
- .hs_get = conn_get,
- .hs_put_locked = conn_put_locked,
- .hs_exit = conn_exit,
-};
diff --git a/drivers/staging/lustre/lustre/ptlrpc/errno.c b/drivers/staging/lustre/lustre/ptlrpc/errno.c
deleted file mode 100644
index 54f0c36dc2bd..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/errno.c
+++ /dev/null
@@ -1,381 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.txt
- *
- * GPL HEADER END
- */
-/*
- * Copyright (C) 2011 FUJITSU LIMITED. All rights reserved.
- *
- * Copyright (c) 2013, Intel Corporation.
- */
-
-#include <linux/libcfs/libcfs.h>
-#include <lustre_errno.h>
-
-/*
- * The two translation tables below must define a one-to-one mapping between
- * host and network errnos.
- *
- * EWOULDBLOCK is equal to EAGAIN on all architectures except for parisc, which
- * appears irrelevant. Thus, existing references to EWOULDBLOCK are fine.
- *
- * EDEADLOCK is equal to EDEADLK on x86 but not on sparc, at least. A sparc
- * host has no context-free way to determine if a LUSTRE_EDEADLK represents an
- * EDEADLK or an EDEADLOCK. Therefore, all existing references to EDEADLOCK
- * that need to be transferred on wire have been replaced with EDEADLK.
- */
-static int lustre_errno_hton_mapping[] = {
- [EPERM] = LUSTRE_EPERM,
- [ENOENT] = LUSTRE_ENOENT,
- [ESRCH] = LUSTRE_ESRCH,
- [EINTR] = LUSTRE_EINTR,
- [EIO] = LUSTRE_EIO,
- [ENXIO] = LUSTRE_ENXIO,
- [E2BIG] = LUSTRE_E2BIG,
- [ENOEXEC] = LUSTRE_ENOEXEC,
- [EBADF] = LUSTRE_EBADF,
- [ECHILD] = LUSTRE_ECHILD,
- [EAGAIN] = LUSTRE_EAGAIN,
- [ENOMEM] = LUSTRE_ENOMEM,
- [EACCES] = LUSTRE_EACCES,
- [EFAULT] = LUSTRE_EFAULT,
- [ENOTBLK] = LUSTRE_ENOTBLK,
- [EBUSY] = LUSTRE_EBUSY,
- [EEXIST] = LUSTRE_EEXIST,
- [EXDEV] = LUSTRE_EXDEV,
- [ENODEV] = LUSTRE_ENODEV,
- [ENOTDIR] = LUSTRE_ENOTDIR,
- [EISDIR] = LUSTRE_EISDIR,
- [EINVAL] = LUSTRE_EINVAL,
- [ENFILE] = LUSTRE_ENFILE,
- [EMFILE] = LUSTRE_EMFILE,
- [ENOTTY] = LUSTRE_ENOTTY,
- [ETXTBSY] = LUSTRE_ETXTBSY,
- [EFBIG] = LUSTRE_EFBIG,
- [ENOSPC] = LUSTRE_ENOSPC,
- [ESPIPE] = LUSTRE_ESPIPE,
- [EROFS] = LUSTRE_EROFS,
- [EMLINK] = LUSTRE_EMLINK,
- [EPIPE] = LUSTRE_EPIPE,
- [EDOM] = LUSTRE_EDOM,
- [ERANGE] = LUSTRE_ERANGE,
- [EDEADLK] = LUSTRE_EDEADLK,
- [ENAMETOOLONG] = LUSTRE_ENAMETOOLONG,
- [ENOLCK] = LUSTRE_ENOLCK,
- [ENOSYS] = LUSTRE_ENOSYS,
- [ENOTEMPTY] = LUSTRE_ENOTEMPTY,
- [ELOOP] = LUSTRE_ELOOP,
- [ENOMSG] = LUSTRE_ENOMSG,
- [EIDRM] = LUSTRE_EIDRM,
- [ECHRNG] = LUSTRE_ECHRNG,
- [EL2NSYNC] = LUSTRE_EL2NSYNC,
- [EL3HLT] = LUSTRE_EL3HLT,
- [EL3RST] = LUSTRE_EL3RST,
- [ELNRNG] = LUSTRE_ELNRNG,
- [EUNATCH] = LUSTRE_EUNATCH,
- [ENOCSI] = LUSTRE_ENOCSI,
- [EL2HLT] = LUSTRE_EL2HLT,
- [EBADE] = LUSTRE_EBADE,
- [EBADR] = LUSTRE_EBADR,
- [EXFULL] = LUSTRE_EXFULL,
- [ENOANO] = LUSTRE_ENOANO,
- [EBADRQC] = LUSTRE_EBADRQC,
- [EBADSLT] = LUSTRE_EBADSLT,
- [EBFONT] = LUSTRE_EBFONT,
- [ENOSTR] = LUSTRE_ENOSTR,
- [ENODATA] = LUSTRE_ENODATA,
- [ETIME] = LUSTRE_ETIME,
- [ENOSR] = LUSTRE_ENOSR,
- [ENONET] = LUSTRE_ENONET,
- [ENOPKG] = LUSTRE_ENOPKG,
- [EREMOTE] = LUSTRE_EREMOTE,
- [ENOLINK] = LUSTRE_ENOLINK,
- [EADV] = LUSTRE_EADV,
- [ESRMNT] = LUSTRE_ESRMNT,
- [ECOMM] = LUSTRE_ECOMM,
- [EPROTO] = LUSTRE_EPROTO,
- [EMULTIHOP] = LUSTRE_EMULTIHOP,
- [EDOTDOT] = LUSTRE_EDOTDOT,
- [EBADMSG] = LUSTRE_EBADMSG,
- [EOVERFLOW] = LUSTRE_EOVERFLOW,
- [ENOTUNIQ] = LUSTRE_ENOTUNIQ,
- [EBADFD] = LUSTRE_EBADFD,
- [EREMCHG] = LUSTRE_EREMCHG,
- [ELIBACC] = LUSTRE_ELIBACC,
- [ELIBBAD] = LUSTRE_ELIBBAD,
- [ELIBSCN] = LUSTRE_ELIBSCN,
- [ELIBMAX] = LUSTRE_ELIBMAX,
- [ELIBEXEC] = LUSTRE_ELIBEXEC,
- [EILSEQ] = LUSTRE_EILSEQ,
- [ERESTART] = LUSTRE_ERESTART,
- [ESTRPIPE] = LUSTRE_ESTRPIPE,
- [EUSERS] = LUSTRE_EUSERS,
- [ENOTSOCK] = LUSTRE_ENOTSOCK,
- [EDESTADDRREQ] = LUSTRE_EDESTADDRREQ,
- [EMSGSIZE] = LUSTRE_EMSGSIZE,
- [EPROTOTYPE] = LUSTRE_EPROTOTYPE,
- [ENOPROTOOPT] = LUSTRE_ENOPROTOOPT,
- [EPROTONOSUPPORT] = LUSTRE_EPROTONOSUPPORT,
- [ESOCKTNOSUPPORT] = LUSTRE_ESOCKTNOSUPPORT,
- [EOPNOTSUPP] = LUSTRE_EOPNOTSUPP,
- [EPFNOSUPPORT] = LUSTRE_EPFNOSUPPORT,
- [EAFNOSUPPORT] = LUSTRE_EAFNOSUPPORT,
- [EADDRINUSE] = LUSTRE_EADDRINUSE,
- [EADDRNOTAVAIL] = LUSTRE_EADDRNOTAVAIL,
- [ENETDOWN] = LUSTRE_ENETDOWN,
- [ENETUNREACH] = LUSTRE_ENETUNREACH,
- [ENETRESET] = LUSTRE_ENETRESET,
- [ECONNABORTED] = LUSTRE_ECONNABORTED,
- [ECONNRESET] = LUSTRE_ECONNRESET,
- [ENOBUFS] = LUSTRE_ENOBUFS,
- [EISCONN] = LUSTRE_EISCONN,
- [ENOTCONN] = LUSTRE_ENOTCONN,
- [ESHUTDOWN] = LUSTRE_ESHUTDOWN,
- [ETOOMANYREFS] = LUSTRE_ETOOMANYREFS,
- [ETIMEDOUT] = LUSTRE_ETIMEDOUT,
- [ECONNREFUSED] = LUSTRE_ECONNREFUSED,
- [EHOSTDOWN] = LUSTRE_EHOSTDOWN,
- [EHOSTUNREACH] = LUSTRE_EHOSTUNREACH,
- [EALREADY] = LUSTRE_EALREADY,
- [EINPROGRESS] = LUSTRE_EINPROGRESS,
- [ESTALE] = LUSTRE_ESTALE,
- [EUCLEAN] = LUSTRE_EUCLEAN,
- [ENOTNAM] = LUSTRE_ENOTNAM,
- [ENAVAIL] = LUSTRE_ENAVAIL,
- [EISNAM] = LUSTRE_EISNAM,
- [EREMOTEIO] = LUSTRE_EREMOTEIO,
- [EDQUOT] = LUSTRE_EDQUOT,
- [ENOMEDIUM] = LUSTRE_ENOMEDIUM,
- [EMEDIUMTYPE] = LUSTRE_EMEDIUMTYPE,
- [ECANCELED] = LUSTRE_ECANCELED,
- [ENOKEY] = LUSTRE_ENOKEY,
- [EKEYEXPIRED] = LUSTRE_EKEYEXPIRED,
- [EKEYREVOKED] = LUSTRE_EKEYREVOKED,
- [EKEYREJECTED] = LUSTRE_EKEYREJECTED,
- [EOWNERDEAD] = LUSTRE_EOWNERDEAD,
- [ENOTRECOVERABLE] = LUSTRE_ENOTRECOVERABLE,
- [ERESTARTSYS] = LUSTRE_ERESTARTSYS,
- [ERESTARTNOINTR] = LUSTRE_ERESTARTNOINTR,
- [ERESTARTNOHAND] = LUSTRE_ERESTARTNOHAND,
- [ENOIOCTLCMD] = LUSTRE_ENOIOCTLCMD,
- [ERESTART_RESTARTBLOCK] = LUSTRE_ERESTART_RESTARTBLOCK,
- [EBADHANDLE] = LUSTRE_EBADHANDLE,
- [ENOTSYNC] = LUSTRE_ENOTSYNC,
- [EBADCOOKIE] = LUSTRE_EBADCOOKIE,
- [ENOTSUPP] = LUSTRE_ENOTSUPP,
- [ETOOSMALL] = LUSTRE_ETOOSMALL,
- [ESERVERFAULT] = LUSTRE_ESERVERFAULT,
- [EBADTYPE] = LUSTRE_EBADTYPE,
- [EJUKEBOX] = LUSTRE_EJUKEBOX,
- [EIOCBQUEUED] = LUSTRE_EIOCBQUEUED,
-};
-
-static int lustre_errno_ntoh_mapping[] = {
- [LUSTRE_EPERM] = EPERM,
- [LUSTRE_ENOENT] = ENOENT,
- [LUSTRE_ESRCH] = ESRCH,
- [LUSTRE_EINTR] = EINTR,
- [LUSTRE_EIO] = EIO,
- [LUSTRE_ENXIO] = ENXIO,
- [LUSTRE_E2BIG] = E2BIG,
- [LUSTRE_ENOEXEC] = ENOEXEC,
- [LUSTRE_EBADF] = EBADF,
- [LUSTRE_ECHILD] = ECHILD,
- [LUSTRE_EAGAIN] = EAGAIN,
- [LUSTRE_ENOMEM] = ENOMEM,
- [LUSTRE_EACCES] = EACCES,
- [LUSTRE_EFAULT] = EFAULT,
- [LUSTRE_ENOTBLK] = ENOTBLK,
- [LUSTRE_EBUSY] = EBUSY,
- [LUSTRE_EEXIST] = EEXIST,
- [LUSTRE_EXDEV] = EXDEV,
- [LUSTRE_ENODEV] = ENODEV,
- [LUSTRE_ENOTDIR] = ENOTDIR,
- [LUSTRE_EISDIR] = EISDIR,
- [LUSTRE_EINVAL] = EINVAL,
- [LUSTRE_ENFILE] = ENFILE,
- [LUSTRE_EMFILE] = EMFILE,
- [LUSTRE_ENOTTY] = ENOTTY,
- [LUSTRE_ETXTBSY] = ETXTBSY,
- [LUSTRE_EFBIG] = EFBIG,
- [LUSTRE_ENOSPC] = ENOSPC,
- [LUSTRE_ESPIPE] = ESPIPE,
- [LUSTRE_EROFS] = EROFS,
- [LUSTRE_EMLINK] = EMLINK,
- [LUSTRE_EPIPE] = EPIPE,
- [LUSTRE_EDOM] = EDOM,
- [LUSTRE_ERANGE] = ERANGE,
- [LUSTRE_EDEADLK] = EDEADLK,
- [LUSTRE_ENAMETOOLONG] = ENAMETOOLONG,
- [LUSTRE_ENOLCK] = ENOLCK,
- [LUSTRE_ENOSYS] = ENOSYS,
- [LUSTRE_ENOTEMPTY] = ENOTEMPTY,
- [LUSTRE_ELOOP] = ELOOP,
- [LUSTRE_ENOMSG] = ENOMSG,
- [LUSTRE_EIDRM] = EIDRM,
- [LUSTRE_ECHRNG] = ECHRNG,
- [LUSTRE_EL2NSYNC] = EL2NSYNC,
- [LUSTRE_EL3HLT] = EL3HLT,
- [LUSTRE_EL3RST] = EL3RST,
- [LUSTRE_ELNRNG] = ELNRNG,
- [LUSTRE_EUNATCH] = EUNATCH,
- [LUSTRE_ENOCSI] = ENOCSI,
- [LUSTRE_EL2HLT] = EL2HLT,
- [LUSTRE_EBADE] = EBADE,
- [LUSTRE_EBADR] = EBADR,
- [LUSTRE_EXFULL] = EXFULL,
- [LUSTRE_ENOANO] = ENOANO,
- [LUSTRE_EBADRQC] = EBADRQC,
- [LUSTRE_EBADSLT] = EBADSLT,
- [LUSTRE_EBFONT] = EBFONT,
- [LUSTRE_ENOSTR] = ENOSTR,
- [LUSTRE_ENODATA] = ENODATA,
- [LUSTRE_ETIME] = ETIME,
- [LUSTRE_ENOSR] = ENOSR,
- [LUSTRE_ENONET] = ENONET,
- [LUSTRE_ENOPKG] = ENOPKG,
- [LUSTRE_EREMOTE] = EREMOTE,
- [LUSTRE_ENOLINK] = ENOLINK,
- [LUSTRE_EADV] = EADV,
- [LUSTRE_ESRMNT] = ESRMNT,
- [LUSTRE_ECOMM] = ECOMM,
- [LUSTRE_EPROTO] = EPROTO,
- [LUSTRE_EMULTIHOP] = EMULTIHOP,
- [LUSTRE_EDOTDOT] = EDOTDOT,
- [LUSTRE_EBADMSG] = EBADMSG,
- [LUSTRE_EOVERFLOW] = EOVERFLOW,
- [LUSTRE_ENOTUNIQ] = ENOTUNIQ,
- [LUSTRE_EBADFD] = EBADFD,
- [LUSTRE_EREMCHG] = EREMCHG,
- [LUSTRE_ELIBACC] = ELIBACC,
- [LUSTRE_ELIBBAD] = ELIBBAD,
- [LUSTRE_ELIBSCN] = ELIBSCN,
- [LUSTRE_ELIBMAX] = ELIBMAX,
- [LUSTRE_ELIBEXEC] = ELIBEXEC,
- [LUSTRE_EILSEQ] = EILSEQ,
- [LUSTRE_ERESTART] = ERESTART,
- [LUSTRE_ESTRPIPE] = ESTRPIPE,
- [LUSTRE_EUSERS] = EUSERS,
- [LUSTRE_ENOTSOCK] = ENOTSOCK,
- [LUSTRE_EDESTADDRREQ] = EDESTADDRREQ,
- [LUSTRE_EMSGSIZE] = EMSGSIZE,
- [LUSTRE_EPROTOTYPE] = EPROTOTYPE,
- [LUSTRE_ENOPROTOOPT] = ENOPROTOOPT,
- [LUSTRE_EPROTONOSUPPORT] = EPROTONOSUPPORT,
- [LUSTRE_ESOCKTNOSUPPORT] = ESOCKTNOSUPPORT,
- [LUSTRE_EOPNOTSUPP] = EOPNOTSUPP,
- [LUSTRE_EPFNOSUPPORT] = EPFNOSUPPORT,
- [LUSTRE_EAFNOSUPPORT] = EAFNOSUPPORT,
- [LUSTRE_EADDRINUSE] = EADDRINUSE,
- [LUSTRE_EADDRNOTAVAIL] = EADDRNOTAVAIL,
- [LUSTRE_ENETDOWN] = ENETDOWN,
- [LUSTRE_ENETUNREACH] = ENETUNREACH,
- [LUSTRE_ENETRESET] = ENETRESET,
- [LUSTRE_ECONNABORTED] = ECONNABORTED,
- [LUSTRE_ECONNRESET] = ECONNRESET,
- [LUSTRE_ENOBUFS] = ENOBUFS,
- [LUSTRE_EISCONN] = EISCONN,
- [LUSTRE_ENOTCONN] = ENOTCONN,
- [LUSTRE_ESHUTDOWN] = ESHUTDOWN,
- [LUSTRE_ETOOMANYREFS] = ETOOMANYREFS,
- [LUSTRE_ETIMEDOUT] = ETIMEDOUT,
- [LUSTRE_ECONNREFUSED] = ECONNREFUSED,
- [LUSTRE_EHOSTDOWN] = EHOSTDOWN,
- [LUSTRE_EHOSTUNREACH] = EHOSTUNREACH,
- [LUSTRE_EALREADY] = EALREADY,
- [LUSTRE_EINPROGRESS] = EINPROGRESS,
- [LUSTRE_ESTALE] = ESTALE,
- [LUSTRE_EUCLEAN] = EUCLEAN,
- [LUSTRE_ENOTNAM] = ENOTNAM,
- [LUSTRE_ENAVAIL] = ENAVAIL,
- [LUSTRE_EISNAM] = EISNAM,
- [LUSTRE_EREMOTEIO] = EREMOTEIO,
- [LUSTRE_EDQUOT] = EDQUOT,
- [LUSTRE_ENOMEDIUM] = ENOMEDIUM,
- [LUSTRE_EMEDIUMTYPE] = EMEDIUMTYPE,
- [LUSTRE_ECANCELED] = ECANCELED,
- [LUSTRE_ENOKEY] = ENOKEY,
- [LUSTRE_EKEYEXPIRED] = EKEYEXPIRED,
- [LUSTRE_EKEYREVOKED] = EKEYREVOKED,
- [LUSTRE_EKEYREJECTED] = EKEYREJECTED,
- [LUSTRE_EOWNERDEAD] = EOWNERDEAD,
- [LUSTRE_ENOTRECOVERABLE] = ENOTRECOVERABLE,
- [LUSTRE_ERESTARTSYS] = ERESTARTSYS,
- [LUSTRE_ERESTARTNOINTR] = ERESTARTNOINTR,
- [LUSTRE_ERESTARTNOHAND] = ERESTARTNOHAND,
- [LUSTRE_ENOIOCTLCMD] = ENOIOCTLCMD,
- [LUSTRE_ERESTART_RESTARTBLOCK] = ERESTART_RESTARTBLOCK,
- [LUSTRE_EBADHANDLE] = EBADHANDLE,
- [LUSTRE_ENOTSYNC] = ENOTSYNC,
- [LUSTRE_EBADCOOKIE] = EBADCOOKIE,
- [LUSTRE_ENOTSUPP] = ENOTSUPP,
- [LUSTRE_ETOOSMALL] = ETOOSMALL,
- [LUSTRE_ESERVERFAULT] = ESERVERFAULT,
- [LUSTRE_EBADTYPE] = EBADTYPE,
- [LUSTRE_EJUKEBOX] = EJUKEBOX,
- [LUSTRE_EIOCBQUEUED] = EIOCBQUEUED,
-};
-
-unsigned int lustre_errno_hton(unsigned int h)
-{
- unsigned int n;
-
- if (h == 0) {
- n = 0;
- } else if (h < ARRAY_SIZE(lustre_errno_hton_mapping)) {
- n = lustre_errno_hton_mapping[h];
- if (n == 0)
- goto generic;
- } else {
-generic:
- /*
- * A generic errno is better than the unknown one that could
- * mean anything to a different host.
- */
- n = LUSTRE_EIO;
- }
-
- return n;
-}
-EXPORT_SYMBOL(lustre_errno_hton);
-
-unsigned int lustre_errno_ntoh(unsigned int n)
-{
- unsigned int h;
-
- if (n == 0) {
- h = 0;
- } else if (n < ARRAY_SIZE(lustre_errno_ntoh_mapping)) {
- h = lustre_errno_ntoh_mapping[n];
- if (h == 0)
- goto generic;
- } else {
-generic:
- /*
- * Similar to the situation in lustre_errno_hton(), an unknown
- * network errno could coincide with anything. Hence, it is
- * better to return a generic errno.
- */
- h = EIO;
- }
-
- return h;
-}
-EXPORT_SYMBOL(lustre_errno_ntoh);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
deleted file mode 100644
index 130bacc2c891..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ /dev/null
@@ -1,585 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <linux/libcfs/libcfs.h>
-# ifdef __mips64__
-# include <linux/kernel.h>
-# endif
-
-#include <obd_class.h>
-#include <lustre_net.h>
-#include <lustre_sec.h>
-#include "ptlrpc_internal.h"
-
-struct lnet_handle_eq ptlrpc_eq_h;
-
-/*
- * Client's outgoing request callback
- */
-void request_out_callback(struct lnet_event *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_request *req = cbid->cbid_arg;
- bool wakeup = false;
-
- LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK);
- LASSERT(ev->unlinked);
-
- DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
-
- sptlrpc_request_out_callback(req);
-
- spin_lock(&req->rq_lock);
- req->rq_real_sent = ktime_get_real_seconds();
- req->rq_req_unlinked = 1;
- /* reply_in_callback happened before request_out_callback? */
- if (req->rq_reply_unlinked)
- wakeup = true;
-
- if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
- /* Failed send: make it seem like the reply timed out, just
- * like failing sends in client.c does currently...
- */
- req->rq_net_err = 1;
- wakeup = true;
- }
-
- if (wakeup)
- ptlrpc_client_wake_req(req);
-
- spin_unlock(&req->rq_lock);
-
- ptlrpc_req_finished(req);
-}
-
-/*
- * Client's incoming reply callback
- */
-void reply_in_callback(struct lnet_event *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_request *req = cbid->cbid_arg;
-
- DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
-
- LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
- LASSERT(ev->md.start == req->rq_repbuf);
- LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
- /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
- * for adaptive timeouts' early reply.
- */
- LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
-
- spin_lock(&req->rq_lock);
-
- req->rq_receiving_reply = 0;
- req->rq_early = 0;
- if (ev->unlinked)
- req->rq_reply_unlinked = 1;
-
- if (ev->status)
- goto out_wake;
-
- if (ev->type == LNET_EVENT_UNLINK) {
- LASSERT(ev->unlinked);
- DEBUG_REQ(D_NET, req, "unlink");
- goto out_wake;
- }
-
- if (ev->mlength < ev->rlength) {
- CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
- req->rq_replen, ev->rlength, ev->offset);
- req->rq_reply_truncated = 1;
- req->rq_replied = 1;
- req->rq_status = -EOVERFLOW;
- req->rq_nob_received = ev->rlength + ev->offset;
- goto out_wake;
- }
-
- if ((ev->offset == 0) &&
- ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
- /* Early reply */
- DEBUG_REQ(D_ADAPTTO, req,
- "Early reply received: mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
- ev->mlength, ev->offset,
- req->rq_replen, req->rq_replied, ev->unlinked);
-
- req->rq_early_count++; /* number received, client side */
-
- /* already got the real reply or buffers are already unlinked */
- if (req->rq_replied || req->rq_reply_unlinked == 1)
- goto out_wake;
-
- req->rq_early = 1;
- req->rq_reply_off = ev->offset;
- req->rq_nob_received = ev->mlength;
- /* And we're still receiving */
- req->rq_receiving_reply = 1;
- } else {
- /* Real reply */
- req->rq_rep_swab_mask = 0;
- req->rq_replied = 1;
- /* Got reply, no resend required */
- req->rq_resend = 0;
- req->rq_reply_off = ev->offset;
- req->rq_nob_received = ev->mlength;
- /* LNetMDUnlink can't be called under the LNET_LOCK,
- * so we must unlink in ptlrpc_unregister_reply
- */
- DEBUG_REQ(D_INFO, req,
- "reply in flags=%x mlen=%u offset=%d replen=%d",
- lustre_msg_get_flags(req->rq_reqmsg),
- ev->mlength, ev->offset, req->rq_replen);
- }
-
- req->rq_import->imp_last_reply_time = ktime_get_real_seconds();
-
-out_wake:
- /* NB don't unlock till after wakeup; req can disappear under us
- * since we don't have our own ref
- */
- ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
-}
-
-/*
- * Client's bulk has been written/read
- */
-void client_bulk_callback(struct lnet_event *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
- struct ptlrpc_request *req;
-
- LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) &&
- ev->type == LNET_EVENT_PUT) ||
- (ptlrpc_is_bulk_get_source(desc->bd_type) &&
- ev->type == LNET_EVENT_GET) ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT(ev->unlinked);
-
- if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
- ev->status = -EIO;
-
- if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,
- CFS_FAIL_ONCE))
- ev->status = -EIO;
-
- CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
- "event type %d, status %d, desc %p\n",
- ev->type, ev->status, desc);
-
- spin_lock(&desc->bd_lock);
- req = desc->bd_req;
- LASSERT(desc->bd_md_count > 0);
- desc->bd_md_count--;
-
- if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
- desc->bd_nob_transferred += ev->mlength;
- desc->bd_sender = ev->sender;
- } else {
- /* start reconnect and resend if network error hit */
- spin_lock(&req->rq_lock);
- req->rq_net_err = 1;
- spin_unlock(&req->rq_lock);
- }
-
- if (ev->status != 0)
- desc->bd_failure = 1;
-
- /* NB don't unlock till after wakeup; desc can disappear under us
- * otherwise
- */
- if (desc->bd_md_count == 0)
- ptlrpc_client_wake_req(desc->bd_req);
-
- spin_unlock(&desc->bd_lock);
-}
-
-/*
- * We will have percpt request history list for ptlrpc service in upcoming
- * patches because we don't want to be serialized by current per-service
- * history operations. So we require history ID can (somehow) show arriving
- * order w/o grabbing global lock, and user can sort them in userspace.
- *
- * This is how we generate history ID for ptlrpc_request:
- * ----------------------------------------------------
- * | 32 bits | 16 bits | (16 - X)bits | X bits |
- * ----------------------------------------------------
- * | seconds | usec / 16 | sequence | CPT id |
- * ----------------------------------------------------
- *
- * it might not be precise but should be good enough.
- */
-
-#define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits)
-
-#define REQS_SEC_SHIFT 32
-#define REQS_USEC_SHIFT 16
-#define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt)
-
-static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req)
-{
- __u64 sec = req->rq_arrival_time.tv_sec;
- __u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
- __u64 new_seq;
-
- /* set sequence ID for request and add it to history list,
- * it must be called with hold svcpt::scp_lock
- */
-
- new_seq = (sec << REQS_SEC_SHIFT) |
- (usec << REQS_USEC_SHIFT) |
- (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
-
- if (new_seq > svcpt->scp_hist_seq) {
- /* This handles the initial case of scp_hist_seq == 0 or
- * we just jumped into a new time window
- */
- svcpt->scp_hist_seq = new_seq;
- } else {
- LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
- /* NB: increase sequence number in current usec bucket,
- * however, it's possible that we used up all bits for
- * sequence and jumped into the next usec bucket (future time),
- * then we hope there will be less RPCs per bucket at some
- * point, and sequence will catch up again
- */
- svcpt->scp_hist_seq += (1ULL << REQS_SEQ_SHIFT(svcpt));
- new_seq = svcpt->scp_hist_seq;
- }
-
- req->rq_history_seq = new_seq;
-
- list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
-}
-
-/*
- * Server's incoming request callback
- */
-void request_in_callback(struct lnet_event *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
- struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
- struct ptlrpc_service *service = svcpt->scp_service;
- struct ptlrpc_request *req;
-
- LASSERT(ev->type == LNET_EVENT_PUT ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer);
- LASSERT((char *)ev->md.start + ev->offset + ev->mlength <=
- rqbd->rqbd_buffer + service->srv_buf_size);
-
- CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
- "event type %d, status %d, service %s\n",
- ev->type, ev->status, service->srv_name);
-
- if (ev->unlinked) {
- /* If this is the last request message to fit in the
- * request buffer we can use the request object embedded in
- * rqbd. Note that if we failed to allocate a request,
- * we'd have to re-post the rqbd, which we can't do in this
- * context.
- */
- req = &rqbd->rqbd_req;
- memset(req, 0, sizeof(*req));
- } else {
- LASSERT(ev->type == LNET_EVENT_PUT);
- if (ev->status != 0) {
- /* We moaned above already... */
- return;
- }
- req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
- if (!req) {
- CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n",
- service->srv_name,
- libcfs_id2str(ev->initiator));
- return;
- }
- }
-
- ptlrpc_srv_req_init(req);
- /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
- * flags are reset and scalars are zero. We only set the message
- * size to non-zero if this was a successful receive.
- */
- req->rq_xid = ev->match_bits;
- req->rq_reqbuf = ev->md.start + ev->offset;
- if (ev->type == LNET_EVENT_PUT && ev->status == 0)
- req->rq_reqdata_len = ev->mlength;
- ktime_get_real_ts64(&req->rq_arrival_time);
- req->rq_peer = ev->initiator;
- req->rq_self = ev->target.nid;
- req->rq_rqbd = rqbd;
- req->rq_phase = RQ_PHASE_NEW;
- if (ev->type == LNET_EVENT_PUT)
- CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
- req, req->rq_xid, ev->mlength);
-
- CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
-
- spin_lock(&svcpt->scp_lock);
-
- ptlrpc_req_add_history(svcpt, req);
-
- if (ev->unlinked) {
- svcpt->scp_nrqbds_posted--;
- CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
- svcpt->scp_nrqbds_posted);
-
- /* Normally, don't complain about 0 buffers posted; LNET won't
- * drop incoming reqs since we set the portal lazy
- */
- if (test_req_buffer_pressure &&
- ev->type != LNET_EVENT_UNLINK &&
- svcpt->scp_nrqbds_posted == 0)
- CWARN("All %s request buffers busy\n",
- service->srv_name);
-
- /* req takes over the network's ref on rqbd */
- } else {
- /* req takes a ref on rqbd */
- rqbd->rqbd_refcount++;
- }
-
- list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
- svcpt->scp_nreqs_incoming++;
-
- /* NB everything can disappear under us once the request
- * has been queued and we unlock, so do the wake now...
- */
- wake_up(&svcpt->scp_waitq);
-
- spin_unlock(&svcpt->scp_lock);
-}
-
-/*
- * Server's outgoing reply callback
- */
-void reply_out_callback(struct lnet_event *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_reply_state *rs = cbid->cbid_arg;
- struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
-
- LASSERT(ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_ACK ||
- ev->type == LNET_EVENT_UNLINK);
-
- if (!rs->rs_difficult) {
- /* 'Easy' replies have no further processing so I drop the
- * net's ref on 'rs'
- */
- LASSERT(ev->unlinked);
- ptlrpc_rs_decref(rs);
- return;
- }
-
- LASSERT(rs->rs_on_net);
-
- if (ev->unlinked) {
- /* Last network callback. The net's ref on 'rs' stays put
- * until ptlrpc_handle_rs() is done with it
- */
- spin_lock(&svcpt->scp_rep_lock);
- spin_lock(&rs->rs_lock);
-
- rs->rs_on_net = 0;
- if (!rs->rs_no_ack ||
- rs->rs_transno <=
- rs->rs_export->exp_obd->obd_last_committed ||
- list_empty(&rs->rs_obd_list))
- ptlrpc_schedule_difficult_reply(rs);
-
- spin_unlock(&rs->rs_lock);
- spin_unlock(&svcpt->scp_rep_lock);
- }
-}
-
-static void ptlrpc_master_callback(struct lnet_event *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- void (*callback)(struct lnet_event *ev) = cbid->cbid_fn;
-
- /* Honestly, it's best to find out early. */
- LASSERT(cbid->cbid_arg != LP_POISON);
- LASSERT(callback == request_out_callback ||
- callback == reply_in_callback ||
- callback == client_bulk_callback ||
- callback == request_in_callback ||
- callback == reply_out_callback);
-
- callback(ev);
-}
-
-int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
- struct lnet_process_id *peer, lnet_nid_t *self)
-{
- int best_dist = 0;
- __u32 best_order = 0;
- int count = 0;
- int rc = -ENOENT;
- int dist;
- __u32 order;
- lnet_nid_t dst_nid;
- lnet_nid_t src_nid;
-
- peer->pid = LNET_PID_LUSTRE;
-
- /* Choose the matching UUID that's closest */
- while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
- dist = LNetDist(dst_nid, &src_nid, &order);
- if (dist < 0)
- continue;
-
- if (dist == 0) { /* local! use loopback LND */
- peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
- rc = 0;
- break;
- }
-
- if (rc < 0 ||
- dist < best_dist ||
- (dist == best_dist && order < best_order)) {
- best_dist = dist;
- best_order = order;
-
- peer->nid = dst_nid;
- *self = src_nid;
- rc = 0;
- }
- }
-
- CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
- return rc;
-}
-
-static void ptlrpc_ni_fini(void)
-{
- int rc;
- int retries;
-
- /* Wait for the event queue to become idle since there may still be
- * messages in flight with pending events (i.e. the fire-and-forget
- * messages == client requests and "non-difficult" server
- * replies
- */
-
- for (retries = 0;; retries++) {
- rc = LNetEQFree(ptlrpc_eq_h);
- switch (rc) {
- default:
- LBUG();
-
- case 0:
- LNetNIFini();
- return;
-
- case -EBUSY:
- if (retries != 0)
- CWARN("Event queue still busy\n");
-
- schedule_timeout_uninterruptible(2 * HZ);
- break;
- }
- }
- /* notreached */
-}
-
-static lnet_pid_t ptl_get_pid(void)
-{
- lnet_pid_t pid;
-
- pid = LNET_PID_LUSTRE;
- return pid;
-}
-
-static int ptlrpc_ni_init(void)
-{
- int rc;
- lnet_pid_t pid;
-
- pid = ptl_get_pid();
- CDEBUG(D_NET, "My pid is: %x\n", pid);
-
- /* We're not passing any limits yet... */
- rc = LNetNIInit(pid);
- if (rc < 0) {
- CDEBUG(D_NET, "Can't init network interface: %d\n", rc);
- return rc;
- }
-
- /* CAVEAT EMPTOR: how we process portals events is _radically_
- * different depending on...
- */
- /* kernel LNet calls our master callback when there are new event,
- * because we are guaranteed to get every event via callback,
- * so we just set EQ size to 0 to avoid overhead of serializing
- * enqueue/dequeue operations in LNet.
- */
- rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
- if (rc == 0)
- return 0;
-
- CERROR("Failed to allocate event queue: %d\n", rc);
- LNetNIFini();
-
- return rc;
-}
-
-int ptlrpc_init_portals(void)
-{
- int rc = ptlrpc_ni_init();
-
- if (rc != 0) {
- CERROR("network initialisation failed\n");
- return rc;
- }
- rc = ptlrpcd_addref();
- if (rc == 0)
- return 0;
-
- CERROR("rpcd initialisation failed\n");
- ptlrpc_ni_fini();
- return rc;
-}
-
-void ptlrpc_exit_portals(void)
-{
- ptlrpcd_decref();
- ptlrpc_ni_fini();
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
deleted file mode 100644
index a2c4fc3488b1..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ /dev/null
@@ -1,1676 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/import.c
- *
- * Author: Mike Shaver <shaver@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <obd_support.h>
-#include <lustre_ha.h>
-#include <lustre_net.h>
-#include <lustre_import.h>
-#include <lustre_export.h>
-#include <obd.h>
-#include <obd_cksum.h>
-#include <obd_class.h>
-
-#include "ptlrpc_internal.h"
-
-struct ptlrpc_connect_async_args {
- __u64 pcaa_peer_committed;
- int pcaa_initial_connect;
-};
-
-/**
- * Updates import \a imp current state to provided \a state value
- * Helper function. Must be called under imp_lock.
- */
-static void __import_set_state(struct obd_import *imp,
- enum lustre_imp_state state)
-{
- switch (state) {
- case LUSTRE_IMP_CLOSED:
- case LUSTRE_IMP_NEW:
- case LUSTRE_IMP_DISCON:
- case LUSTRE_IMP_CONNECTING:
- break;
- case LUSTRE_IMP_REPLAY_WAIT:
- imp->imp_replay_state = LUSTRE_IMP_REPLAY_LOCKS;
- break;
- default:
- imp->imp_replay_state = LUSTRE_IMP_REPLAY;
- }
-
- imp->imp_state = state;
- imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state;
- imp->imp_state_hist[imp->imp_state_hist_idx].ish_time =
- ktime_get_real_seconds();
- imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) %
- IMP_STATE_HIST_LEN;
-}
-
-/* A CLOSED import should remain so. */
-#define IMPORT_SET_STATE_NOLOCK(imp, state) \
-do { \
- if (imp->imp_state != LUSTRE_IMP_CLOSED) { \
- CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n", \
- imp, obd2cli_tgt(imp->imp_obd), \
- ptlrpc_import_state_name(imp->imp_state), \
- ptlrpc_import_state_name(state)); \
- __import_set_state(imp, state); \
- } \
-} while (0)
-
-#define IMPORT_SET_STATE(imp, state) \
-do { \
- spin_lock(&imp->imp_lock); \
- IMPORT_SET_STATE_NOLOCK(imp, state); \
- spin_unlock(&imp->imp_lock); \
-} while (0)
-
-static int ptlrpc_connect_interpret(const struct lu_env *env,
- struct ptlrpc_request *request,
- void *data, int rc);
-int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
-
-/* Only this function is allowed to change the import state when it is
- * CLOSED. I would rather refcount the import and free it after
- * disconnection like we do with exports. To do that, the client_obd
- * will need to save the peer info somewhere other than in the import,
- * though.
- */
-int ptlrpc_init_import(struct obd_import *imp)
-{
- spin_lock(&imp->imp_lock);
-
- imp->imp_generation++;
- imp->imp_state = LUSTRE_IMP_NEW;
-
- spin_unlock(&imp->imp_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_init_import);
-
-#define UUID_STR "_UUID"
-static void deuuidify(char *uuid, const char *prefix, char **uuid_start,
- int *uuid_len)
-{
- *uuid_start = !prefix || strncmp(uuid, prefix, strlen(prefix))
- ? uuid : uuid + strlen(prefix);
-
- *uuid_len = strlen(*uuid_start);
-
- if (*uuid_len < strlen(UUID_STR))
- return;
-
- if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR),
- UUID_STR, strlen(UUID_STR)))
- *uuid_len -= strlen(UUID_STR);
-}
-
-/**
- * Returns true if import was FULL, false if import was already not
- * connected.
- * @imp - import to be disconnected
- * @conn_cnt - connection count (epoch) of the request that timed out
- * and caused the disconnection. In some cases, multiple
- * inflight requests can fail to a single target (e.g. OST
- * bulk requests) and if one has already caused a reconnection
- * (increasing the import->conn_cnt) the older failure should
- * not also cause a reconnection. If zero it forces a reconnect.
- */
-int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
-{
- int rc = 0;
-
- spin_lock(&imp->imp_lock);
-
- if (imp->imp_state == LUSTRE_IMP_FULL &&
- (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
- char *target_start;
- int target_len;
-
- deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
- &target_start, &target_len);
-
- if (imp->imp_replayable) {
- LCONSOLE_WARN("%s: Connection to %.*s (at %s) was lost; in progress operations using this service will wait for recovery to complete\n",
- imp->imp_obd->obd_name, target_len, target_start,
- libcfs_nid2str(imp->imp_connection->c_peer.nid));
- } else {
- LCONSOLE_ERROR_MSG(0x166, "%s: Connection to %.*s (at %s) was lost; in progress operations using this service will fail\n",
- imp->imp_obd->obd_name,
- target_len, target_start,
- libcfs_nid2str(imp->imp_connection->c_peer.nid));
- }
- IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
- spin_unlock(&imp->imp_lock);
-
- if (obd_dump_on_timeout)
- libcfs_debug_dumplog();
-
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
- rc = 1;
- } else {
- spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
- imp->imp_client->cli_name, imp,
- (imp->imp_state == LUSTRE_IMP_FULL &&
- imp->imp_conn_cnt > conn_cnt) ?
- "reconnected" : "not connected", imp->imp_conn_cnt,
- conn_cnt, ptlrpc_import_state_name(imp->imp_state));
- }
-
- return rc;
-}
-
-/*
- * This acts as a barrier; all existing requests are rejected, and
- * no new requests will be accepted until the import is valid again.
- */
-void ptlrpc_deactivate_import(struct obd_import *imp)
-{
- CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
-
- spin_lock(&imp->imp_lock);
- imp->imp_invalid = 1;
- imp->imp_generation++;
- spin_unlock(&imp->imp_lock);
-
- ptlrpc_abort_inflight(imp);
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
-}
-EXPORT_SYMBOL(ptlrpc_deactivate_import);
-
-static unsigned int
-ptlrpc_inflight_deadline(struct ptlrpc_request *req, time64_t now)
-{
- long dl;
-
- if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
- (req->rq_phase == RQ_PHASE_BULK) ||
- (req->rq_phase == RQ_PHASE_NEW)))
- return 0;
-
- if (req->rq_timedout)
- return 0;
-
- if (req->rq_phase == RQ_PHASE_NEW)
- dl = req->rq_sent;
- else
- dl = req->rq_deadline;
-
- if (dl <= now)
- return 0;
-
- return dl - now;
-}
-
-static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
-{
- time64_t now = ktime_get_real_seconds();
- struct ptlrpc_request *req, *n;
- unsigned int timeout = 0;
-
- spin_lock(&imp->imp_lock);
- list_for_each_entry_safe(req, n, &imp->imp_sending_list, rq_list)
- timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
-
- spin_unlock(&imp->imp_lock);
- return timeout;
-}
-
-/**
- * This function will invalidate the import, if necessary, then block
- * for all the RPC completions, and finally notify the obd to
- * invalidate its state (ie cancel locks, clear pending requests,
- * etc).
- */
-void ptlrpc_invalidate_import(struct obd_import *imp)
-{
- struct ptlrpc_request *req, *n;
- unsigned int timeout;
- int rc;
-
- atomic_inc(&imp->imp_inval_count);
-
- if (!imp->imp_invalid || imp->imp_obd->obd_no_recov)
- ptlrpc_deactivate_import(imp);
-
- CFS_FAIL_TIMEOUT(OBD_FAIL_MGS_CONNECT_NET, 3 * cfs_fail_val / 2);
- LASSERT(imp->imp_invalid);
-
- /* Wait forever until inflight == 0. We really can't do it another
- * way because in some cases we need to wait for very long reply
- * unlink. We can't do anything before that because there is really
- * no guarantee that some rdma transfer is not in progress right now.
- */
- do {
- /* Calculate max timeout for waiting on rpcs to error
- * out. Use obd_timeout if calculated value is smaller
- * than it.
- */
- if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
- timeout = ptlrpc_inflight_timeout(imp);
- timeout += timeout / 3;
-
- if (timeout == 0)
- timeout = obd_timeout;
- } else {
- /* decrease the interval to increase race condition */
- timeout = 1;
- }
-
- CDEBUG(D_RPCTRACE,
- "Sleeping %d sec for inflight to error out\n",
- timeout);
-
- /* Wait for all requests to error out and call completion
- * callbacks. Cap it at obd_timeout -- these should all
- * have been locally cancelled by ptlrpc_abort_inflight.
- */
- rc = wait_event_idle_timeout(imp->imp_recovery_waitq,
- atomic_read(&imp->imp_inflight) == 0,
- obd_timeout * HZ);
-
- if (rc == 0) {
- const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
-
- CERROR("%s: timeout waiting for callback (%d != 0)\n",
- cli_tgt,
- atomic_read(&imp->imp_inflight));
-
- spin_lock(&imp->imp_lock);
- if (atomic_read(&imp->imp_inflight) == 0) {
- int count = atomic_read(&imp->imp_unregistering);
-
- /* We know that "unregistering" rpcs only can
- * survive in sending or delaying lists (they
- * maybe waiting for long reply unlink in
- * sluggish nets). Let's check this. If there
- * is no inflight and unregistering != 0, this
- * is bug.
- */
- LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n",
- count);
-
- /* Let's save one loop as soon as inflight have
- * dropped to zero. No new inflights possible at
- * this point.
- */
- rc = 0;
- } else {
- list_for_each_entry_safe(req, n,
- &imp->imp_sending_list, rq_list) {
- DEBUG_REQ(D_ERROR, req,
- "still on sending list");
- }
- list_for_each_entry_safe(req, n,
- &imp->imp_delayed_list, rq_list) {
- DEBUG_REQ(D_ERROR, req,
- "still on delayed list");
- }
-
- CERROR("%s: Unregistering RPCs found (%d). Network is sluggish? Waiting them to error out.\n",
- cli_tgt,
- atomic_read(&imp->
- imp_unregistering));
- }
- spin_unlock(&imp->imp_lock);
- }
- } while (rc == 0);
-
- /*
- * Let's additionally check that no new rpcs added to import in
- * "invalidate" state.
- */
- LASSERT(atomic_read(&imp->imp_inflight) == 0);
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
- sptlrpc_import_flush_all_ctx(imp);
-
- atomic_dec(&imp->imp_inval_count);
- wake_up_all(&imp->imp_recovery_waitq);
-}
-EXPORT_SYMBOL(ptlrpc_invalidate_import);
-
-/* unset imp_invalid */
-void ptlrpc_activate_import(struct obd_import *imp)
-{
- struct obd_device *obd = imp->imp_obd;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_deactive != 0) {
- spin_unlock(&imp->imp_lock);
- return;
- }
-
- imp->imp_invalid = 0;
- spin_unlock(&imp->imp_lock);
- obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
-}
-EXPORT_SYMBOL(ptlrpc_activate_import);
-
-void ptlrpc_pinger_force(struct obd_import *imp)
-{
- CDEBUG(D_HA, "%s: waking up pinger s:%s\n", obd2cli_tgt(imp->imp_obd),
- ptlrpc_import_state_name(imp->imp_state));
-
- spin_lock(&imp->imp_lock);
- imp->imp_force_verify = 1;
- spin_unlock(&imp->imp_lock);
-
- if (imp->imp_state != LUSTRE_IMP_CONNECTING)
- ptlrpc_pinger_wake_up();
-}
-EXPORT_SYMBOL(ptlrpc_pinger_force);
-
-void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
-{
- LASSERT(!imp->imp_dlm_fake);
-
- if (ptlrpc_set_import_discon(imp, conn_cnt)) {
- if (!imp->imp_replayable) {
- CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_obd->obd_name);
- ptlrpc_deactivate_import(imp);
- }
-
- ptlrpc_pinger_force(imp);
- }
-}
-
-int ptlrpc_reconnect_import(struct obd_import *imp)
-{
- int rc;
-
- ptlrpc_pinger_force(imp);
-
- CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
- obd2cli_tgt(imp->imp_obd), obd_timeout);
-
- rc = wait_event_idle_timeout(imp->imp_recovery_waitq,
- !ptlrpc_import_in_recovery(imp),
- obd_timeout * HZ);
- CDEBUG(D_HA, "%s: recovery finished s:%s\n", obd2cli_tgt(imp->imp_obd),
- ptlrpc_import_state_name(imp->imp_state));
- return rc == 0 ? -ETIMEDOUT : 0;
-}
-EXPORT_SYMBOL(ptlrpc_reconnect_import);
-
-/**
- * Connection on import \a imp is changed to another one (if more than one is
- * present). We typically chose connection that we have not tried to connect to
- * the longest
- */
-static int import_select_connection(struct obd_import *imp)
-{
- struct obd_import_conn *imp_conn = NULL, *conn;
- struct obd_export *dlmexp;
- char *target_start;
- int target_len, tried_all = 1;
-
- spin_lock(&imp->imp_lock);
-
- if (list_empty(&imp->imp_conn_list)) {
- CERROR("%s: no connections available\n",
- imp->imp_obd->obd_name);
- spin_unlock(&imp->imp_lock);
- return -EINVAL;
- }
-
- list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
- CDEBUG(D_HA, "%s: connect to NID %s last attempt %llu\n",
- imp->imp_obd->obd_name,
- libcfs_nid2str(conn->oic_conn->c_peer.nid),
- conn->oic_last_attempt);
-
- /* If we have not tried this connection since
- * the last successful attempt, go with this one
- */
- if ((conn->oic_last_attempt == 0) ||
- cfs_time_beforeq_64(conn->oic_last_attempt,
- imp->imp_last_success_conn)) {
- imp_conn = conn;
- tried_all = 0;
- break;
- }
-
- /* If all of the connections have already been tried
- * since the last successful connection; just choose the
- * least recently used
- */
- if (!imp_conn)
- imp_conn = conn;
- else if (cfs_time_before_64(conn->oic_last_attempt,
- imp_conn->oic_last_attempt))
- imp_conn = conn;
- }
-
- /* if not found, simply choose the current one */
- if (!imp_conn || imp->imp_force_reconnect) {
- LASSERT(imp->imp_conn_current);
- imp_conn = imp->imp_conn_current;
- tried_all = 0;
- }
- LASSERT(imp_conn->oic_conn);
-
- /* If we've tried everything, and we're back to the beginning of the
- * list, increase our timeout and try again. It will be reset when
- * we do finally connect. (FIXME: really we should wait for all network
- * state associated with the last connection attempt to drain before
- * trying to reconnect on it.)
- */
- if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) {
- struct adaptive_timeout *at = &imp->imp_at.iat_net_latency;
-
- if (at_get(at) < CONNECTION_SWITCH_MAX) {
- at_measured(at, at_get(at) + CONNECTION_SWITCH_INC);
- if (at_get(at) > CONNECTION_SWITCH_MAX)
- at_reset(at, CONNECTION_SWITCH_MAX);
- }
- LASSERT(imp_conn->oic_last_attempt);
- CDEBUG(D_HA, "%s: tried all connections, increasing latency to %ds\n",
- imp->imp_obd->obd_name, at_get(at));
- }
-
- imp_conn->oic_last_attempt = cfs_time_current_64();
-
- /* switch connection, don't mind if it's same as the current one */
- ptlrpc_connection_put(imp->imp_connection);
- imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
-
- dlmexp = class_conn2export(&imp->imp_dlm_handle);
- ptlrpc_connection_put(dlmexp->exp_connection);
- dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
- class_export_put(dlmexp);
-
- if (imp->imp_conn_current != imp_conn) {
- if (imp->imp_conn_current) {
- deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
- &target_start, &target_len);
-
- CDEBUG(D_HA, "%s: Connection changing to %.*s (at %s)\n",
- imp->imp_obd->obd_name,
- target_len, target_start,
- libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
- }
-
- imp->imp_conn_current = imp_conn;
- }
-
- CDEBUG(D_HA, "%s: import %p using connection %s/%s\n",
- imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
- libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
-
- spin_unlock(&imp->imp_lock);
-
- return 0;
-}
-
-/*
- * must be called under imp_lock
- */
-static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
-{
- struct ptlrpc_request *req;
-
- /* The requests in committed_list always have smaller transnos than
- * the requests in replay_list
- */
- if (!list_empty(&imp->imp_committed_list)) {
- req = list_first_entry(&imp->imp_committed_list,
- struct ptlrpc_request, rq_replay_list);
- *transno = req->rq_transno;
- if (req->rq_transno == 0) {
- DEBUG_REQ(D_ERROR, req,
- "zero transno in committed_list");
- LBUG();
- }
- return 1;
- }
- if (!list_empty(&imp->imp_replay_list)) {
- req = list_first_entry(&imp->imp_replay_list,
- struct ptlrpc_request, rq_replay_list);
- *transno = req->rq_transno;
- if (req->rq_transno == 0) {
- DEBUG_REQ(D_ERROR, req, "zero transno in replay_list");
- LBUG();
- }
- return 1;
- }
- return 0;
-}
-
-/**
- * Attempt to (re)connect import \a imp. This includes all preparations,
- * initializing CONNECT RPC request and passing it to ptlrpcd for
- * actual sending.
- * Returns 0 on success or error code.
- */
-int ptlrpc_connect_import(struct obd_import *imp)
-{
- struct obd_device *obd = imp->imp_obd;
- int initial_connect = 0;
- int set_transno = 0;
- __u64 committed_before_reconnect = 0;
- struct ptlrpc_request *request;
- char *bufs[] = { NULL,
- obd2cli_tgt(imp->imp_obd),
- obd->obd_uuid.uuid,
- (char *)&imp->imp_dlm_handle,
- (char *)&imp->imp_connect_data };
- struct ptlrpc_connect_async_args *aa;
- int rc;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- spin_unlock(&imp->imp_lock);
- CERROR("can't connect to a closed import\n");
- return -EINVAL;
- } else if (imp->imp_state == LUSTRE_IMP_FULL) {
- spin_unlock(&imp->imp_lock);
- CERROR("already connected\n");
- return 0;
- } else if (imp->imp_state == LUSTRE_IMP_CONNECTING ||
- imp->imp_connected) {
- spin_unlock(&imp->imp_lock);
- CERROR("already connecting\n");
- return -EALREADY;
- }
-
- IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
-
- imp->imp_conn_cnt++;
- imp->imp_resend_replay = 0;
-
- if (!lustre_handle_is_used(&imp->imp_remote_handle))
- initial_connect = 1;
- else
- committed_before_reconnect = imp->imp_peer_committed_transno;
-
- set_transno = ptlrpc_first_transno(imp,
- &imp->imp_connect_data.ocd_transno);
- spin_unlock(&imp->imp_lock);
-
- rc = import_select_connection(imp);
- if (rc)
- goto out;
-
- rc = sptlrpc_import_sec_adapt(imp, NULL, NULL);
- if (rc)
- goto out;
-
- /* Reset connect flags to the originally requested flags, in case
- * the server is updated on-the-fly we will get the new features.
- */
- imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
- /* Reset ocd_version each time so the server knows the exact versions */
- imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE;
- imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
- imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
-
- rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd,
- &obd->obd_uuid, &imp->imp_connect_data, NULL);
- if (rc)
- goto out;
-
- request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT);
- if (!request) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION,
- imp->imp_connect_op, bufs, NULL);
- if (rc) {
- ptlrpc_request_free(request);
- goto out;
- }
-
- /* Report the rpc service time to the server so that it knows how long
- * to wait for clients to join recovery
- */
- lustre_msg_set_service_time(request->rq_reqmsg,
- at_timeout2est(request->rq_timeout));
-
- /* The amount of time we give the server to process the connect req.
- * import_select_connection will increase the net latency on
- * repeated reconnect attempts to cover slow networks.
- * We override/ignore the server rpc completion estimate here,
- * which may be large if this is a reconnect attempt
- */
- request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
- lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
-
- request->rq_no_resend = 1;
- request->rq_no_delay = 1;
- request->rq_send_state = LUSTRE_IMP_CONNECTING;
- /* Allow a slightly larger reply for future growth compatibility */
- req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
- sizeof(struct obd_connect_data) +
- 16 * sizeof(__u64));
- ptlrpc_request_set_replen(request);
- request->rq_interpret_reply = ptlrpc_connect_interpret;
-
- BUILD_BUG_ON(sizeof(*aa) > sizeof(request->rq_async_args));
- aa = ptlrpc_req_async_args(request);
- memset(aa, 0, sizeof(*aa));
-
- aa->pcaa_peer_committed = committed_before_reconnect;
- aa->pcaa_initial_connect = initial_connect;
-
- if (aa->pcaa_initial_connect) {
- spin_lock(&imp->imp_lock);
- imp->imp_replayable = 1;
- spin_unlock(&imp->imp_lock);
- lustre_msg_add_op_flags(request->rq_reqmsg,
- MSG_CONNECT_INITIAL);
- }
-
- if (set_transno)
- lustre_msg_add_op_flags(request->rq_reqmsg,
- MSG_CONNECT_TRANSNO);
-
- DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)",
- request->rq_timeout);
- ptlrpcd_add_req(request);
- rc = 0;
-out:
- if (rc != 0)
- IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_connect_import);
-
-static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
-{
- int force_verify;
-
- spin_lock(&imp->imp_lock);
- force_verify = imp->imp_force_verify != 0;
- spin_unlock(&imp->imp_lock);
-
- if (force_verify)
- ptlrpc_pinger_wake_up();
-}
-
-static int ptlrpc_busy_reconnect(int rc)
-{
- return (rc == -EBUSY) || (rc == -EAGAIN);
-}
-
-static int ptlrpc_connect_set_flags(struct obd_import *imp,
- struct obd_connect_data *ocd,
- u64 old_connect_flags,
- struct obd_export *exp, int init_connect)
-{
- struct client_obd *cli = &imp->imp_obd->u.cli;
- static bool warned;
-
- if ((imp->imp_connect_flags_orig & OBD_CONNECT_IBITS) &&
- !(ocd->ocd_connect_flags & OBD_CONNECT_IBITS)) {
- LCONSOLE_WARN("%s: MDS %s does not support ibits lock, either very old or invalid: requested %#llx, replied %#llx\n",
- imp->imp_obd->obd_name,
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_connect_flags_orig,
- ocd->ocd_connect_flags);
- return -EPROTO;
- }
-
- spin_lock(&imp->imp_lock);
- list_del(&imp->imp_conn_current->oic_item);
- list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list);
- imp->imp_last_success_conn = imp->imp_conn_current->oic_last_attempt;
-
- spin_unlock(&imp->imp_lock);
-
- if (!warned && (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
- (ocd->ocd_version > LUSTRE_VERSION_CODE +
- LUSTRE_VERSION_OFFSET_WARN ||
- ocd->ocd_version < LUSTRE_VERSION_CODE -
- LUSTRE_VERSION_OFFSET_WARN)) {
- /*
- * Sigh, some compilers do not like #ifdef in the middle
- * of macro arguments
- */
- const char *older = "older than client. Consider upgrading server";
- const char *newer = "newer than client. Consider recompiling application";
-
- LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n",
- obd2cli_tgt(imp->imp_obd),
- OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
- OBD_OCD_VERSION_MINOR(ocd->ocd_version),
- OBD_OCD_VERSION_PATCH(ocd->ocd_version),
- OBD_OCD_VERSION_FIX(ocd->ocd_version),
- ocd->ocd_version > LUSTRE_VERSION_CODE ?
- newer : older, LUSTRE_VERSION_STRING);
- warned = true;
- }
-
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 53, 0)
- /*
- * Check if server has LU-1252 fix applied to not always swab
- * the IR MNE entries. Do this only once per connection. This
- * fixup is version-limited, because we don't want to carry the
- * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we
- * need interop with unpatched 2.2 servers. For newer servers,
- * the client will do MNE swabbing only as needed. LU-1644
- */
- if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
- !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) &&
- OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 &&
- OBD_OCD_VERSION_MINOR(ocd->ocd_version) == 2 &&
- OBD_OCD_VERSION_PATCH(ocd->ocd_version) < 55 &&
- !strcmp(imp->imp_obd->obd_type->typ_name,
- LUSTRE_MGC_NAME)))
- imp->imp_need_mne_swab = 1;
- else /* clear if server was upgraded since last connect */
- imp->imp_need_mne_swab = 0;
-#endif
-
- if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
- /*
- * We sent to the server ocd_cksum_types with bits set
- * for algorithms we understand. The server masked off
- * the checksum types it doesn't support
- */
- if (!(ocd->ocd_cksum_types & cksum_types_supported_client())) {
- LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n",
- obd2cli_tgt(imp->imp_obd),
- ocd->ocd_cksum_types,
- cksum_types_supported_client());
- cli->cl_checksum = 0;
- cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
- } else {
- cli->cl_supp_cksum_types = ocd->ocd_cksum_types;
- }
- } else {
- /*
- * The server does not support OBD_CONNECT_CKSUM.
- * Enforce ADLER for backward compatibility
- */
- cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
- }
- cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types);
-
- if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
- cli->cl_max_pages_per_rpc =
- min(ocd->ocd_brw_size >> PAGE_SHIFT,
- cli->cl_max_pages_per_rpc);
- else if (imp->imp_connect_op == MDS_CONNECT ||
- imp->imp_connect_op == MGS_CONNECT)
- cli->cl_max_pages_per_rpc = 1;
-
- LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) &&
- (cli->cl_max_pages_per_rpc > 0));
-
- client_adjust_max_dirty(cli);
-
- /*
- * Update client max modify RPCs in flight with value returned
- * by the server
- */
- if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
- cli->cl_max_mod_rpcs_in_flight = min(
- cli->cl_max_mod_rpcs_in_flight,
- ocd->ocd_maxmodrpcs);
- else
- cli->cl_max_mod_rpcs_in_flight = 1;
-
- /*
- * Reset ns_connect_flags only for initial connect. It might be
- * changed in while using FS and if we reset it in reconnect
- * this leads to losing user settings done before such as
- * disable lru_resize, etc.
- */
- if (old_connect_flags != exp_connect_flags(exp) || init_connect) {
- CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n",
- imp->imp_obd->obd_name, ocd->ocd_connect_flags);
- imp->imp_obd->obd_namespace->ns_connect_flags =
- ocd->ocd_connect_flags;
- imp->imp_obd->obd_namespace->ns_orig_connect_flags =
- ocd->ocd_connect_flags;
- }
-
- if (ocd->ocd_connect_flags & OBD_CONNECT_AT)
- /*
- * We need a per-message support flag, because
- * a. we don't know if the incoming connect reply
- * supports AT or not (in reply_in_callback)
- * until we unpack it.
- * b. failovered server means export and flags are gone
- * (in ptlrpc_send_reply).
- * Can only be set when we know AT is supported at
- * both ends
- */
- imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
- else
- imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
-
- imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
-
- return 0;
-}
-
-/**
- * Add all replay requests back to unreplied list before start replay,
- * so that we can make sure the known replied XID is always increased
- * only even if when replaying requests.
- */
-static void ptlrpc_prepare_replay(struct obd_import *imp)
-{
- struct ptlrpc_request *req;
-
- if (imp->imp_state != LUSTRE_IMP_REPLAY ||
- imp->imp_resend_replay)
- return;
-
- /*
- * If the server was restart during repaly, the requests may
- * have been added to the unreplied list in former replay.
- */
- spin_lock(&imp->imp_lock);
-
- list_for_each_entry(req, &imp->imp_committed_list, rq_replay_list) {
- if (list_empty(&req->rq_unreplied_list))
- ptlrpc_add_unreplied(req);
- }
-
- list_for_each_entry(req, &imp->imp_replay_list, rq_replay_list) {
- if (list_empty(&req->rq_unreplied_list))
- ptlrpc_add_unreplied(req);
- }
-
- imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
- spin_unlock(&imp->imp_lock);
-}
-
-/**
- * interpret_reply callback for connect RPCs.
- * Looks into returned status of connect operation and decides
- * what to do with the import - i.e enter recovery, promote it to
- * full state for normal operations of disconnect it due to an error.
- */
-static int ptlrpc_connect_interpret(const struct lu_env *env,
- struct ptlrpc_request *request,
- void *data, int rc)
-{
- struct ptlrpc_connect_async_args *aa = data;
- struct obd_import *imp = request->rq_import;
- struct lustre_handle old_hdl;
- __u64 old_connect_flags;
- int msg_flags;
- struct obd_connect_data *ocd;
- struct obd_export *exp;
- int ret;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- imp->imp_connect_tried = 1;
- spin_unlock(&imp->imp_lock);
- return 0;
- }
-
- if (rc) {
- /* if this reconnect to busy export - not need select new target
- * for connecting
- */
- imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
- spin_unlock(&imp->imp_lock);
- ptlrpc_maybe_ping_import_soon(imp);
- goto out;
- }
-
- /*
- * LU-7558: indicate that we are interpretting connect reply,
- * pltrpc_connect_import() will not try to reconnect until
- * interpret will finish.
- */
- imp->imp_connected = 1;
- spin_unlock(&imp->imp_lock);
-
- LASSERT(imp->imp_conn_current);
-
- msg_flags = lustre_msg_get_op_flags(request->rq_repmsg);
-
- ret = req_capsule_get_size(&request->rq_pill, &RMF_CONNECT_DATA,
- RCL_SERVER);
- /* server replied obd_connect_data is always bigger */
- ocd = req_capsule_server_sized_get(&request->rq_pill,
- &RMF_CONNECT_DATA, ret);
-
- if (!ocd) {
- CERROR("%s: no connect data from server\n",
- imp->imp_obd->obd_name);
- rc = -EPROTO;
- goto out;
- }
-
- spin_lock(&imp->imp_lock);
-
- /* All imports are pingable */
- imp->imp_pingable = 1;
- imp->imp_force_reconnect = 0;
- imp->imp_force_verify = 0;
-
- imp->imp_connect_data = *ocd;
-
- CDEBUG(D_HA, "%s: connect to target with instance %u\n",
- imp->imp_obd->obd_name, ocd->ocd_instance);
- exp = class_conn2export(&imp->imp_dlm_handle);
-
- spin_unlock(&imp->imp_lock);
-
- if (!exp) {
- /* This could happen if export is cleaned during the
- * connect attempt
- */
- CERROR("%s: missing export after connect\n",
- imp->imp_obd->obd_name);
- rc = -ENODEV;
- goto out;
- }
-
- /* check that server granted subset of flags we asked for. */
- if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
- ocd->ocd_connect_flags) {
- CERROR("%s: Server didn't grant the asked for subset of flags: asked=%#llx granted=%#llx\n",
- imp->imp_obd->obd_name, imp->imp_connect_flags_orig,
- ocd->ocd_connect_flags);
- rc = -EPROTO;
- goto out;
- }
-
- old_connect_flags = exp_connect_flags(exp);
- exp->exp_connect_data = *ocd;
- imp->imp_obd->obd_self_export->exp_connect_data = *ocd;
-
- /*
- * The net statistics after (re-)connect is not valid anymore,
- * because may reflect other routing, etc.
- */
- at_init(&imp->imp_at.iat_net_latency, 0, 0);
- ptlrpc_at_adj_net_latency(request,
- lustre_msg_get_service_time(request->rq_repmsg));
-
- /* Import flags should be updated before waking import at FULL state */
- rc = ptlrpc_connect_set_flags(imp, ocd, old_connect_flags, exp,
- aa->pcaa_initial_connect);
- class_export_put(exp);
- if (rc)
- goto out;
-
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD);
-
- if (aa->pcaa_initial_connect) {
- spin_lock(&imp->imp_lock);
- if (msg_flags & MSG_CONNECT_REPLAYABLE) {
- imp->imp_replayable = 1;
- spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "connected to replayable target: %s\n",
- obd2cli_tgt(imp->imp_obd));
- } else {
- imp->imp_replayable = 0;
- spin_unlock(&imp->imp_lock);
- }
-
- /* if applies, adjust the imp->imp_msg_magic here
- * according to reply flags
- */
-
- imp->imp_remote_handle =
- *lustre_msg_get_handle(request->rq_repmsg);
-
- /* Initial connects are allowed for clients with non-random
- * uuids when servers are in recovery. Simply signal the
- * servers replay is complete and wait in REPLAY_WAIT.
- */
- if (msg_flags & MSG_CONNECT_RECOVERING) {
- CDEBUG(D_HA, "connect to %s during recovery\n",
- obd2cli_tgt(imp->imp_obd));
- IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
- } else {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
- ptlrpc_activate_import(imp);
- }
-
- rc = 0;
- goto finish;
- }
-
- /* Determine what recovery state to move the import to. */
- if (msg_flags & MSG_CONNECT_RECONNECT) {
- memset(&old_hdl, 0, sizeof(old_hdl));
- if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
- sizeof(old_hdl))) {
- LCONSOLE_WARN("Reconnect to %s (at @%s) failed due bad handle %#llx\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_dlm_handle.cookie);
- rc = -ENOTCONN;
- goto out;
- }
-
- if (memcmp(&imp->imp_remote_handle,
- lustre_msg_get_handle(request->rq_repmsg),
- sizeof(imp->imp_remote_handle))) {
- int level = msg_flags & MSG_CONNECT_RECOVERING ?
- D_HA : D_WARNING;
-
- /* Bug 16611/14775: if server handle have changed,
- * that means some sort of disconnection happened.
- * If the server is not in recovery, that also means it
- * already erased all of our state because of previous
- * eviction. If it is in recovery - we are safe to
- * participate since we can reestablish all of our state
- * with server again
- */
- if ((msg_flags & MSG_CONNECT_RECOVERING)) {
- CDEBUG(level, "%s@%s changed server handle from %#llx to %#llx but is still in recovery\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_remote_handle.cookie,
- lustre_msg_get_handle(
- request->rq_repmsg)->cookie);
- } else {
- LCONSOLE_WARN("Evicted from %s (at %s) after server handle changed from %#llx to %#llx\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection-> \
- c_remote_uuid.uuid,
- imp->imp_remote_handle.cookie,
- lustre_msg_get_handle(
- request->rq_repmsg)->cookie);
- }
-
- imp->imp_remote_handle =
- *lustre_msg_get_handle(request->rq_repmsg);
-
- if (!(msg_flags & MSG_CONNECT_RECOVERING)) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
- rc = 0;
- goto finish;
- }
-
- } else {
- CDEBUG(D_HA, "reconnected to %s@%s after partition\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
- }
-
- if (imp->imp_invalid) {
- CDEBUG(D_HA, "%s: reconnected but import is invalid; marking evicted\n",
- imp->imp_obd->obd_name);
- IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
- } else if (msg_flags & MSG_CONNECT_RECOVERING) {
- CDEBUG(D_HA, "%s: reconnected to %s during replay\n",
- imp->imp_obd->obd_name,
- obd2cli_tgt(imp->imp_obd));
-
- spin_lock(&imp->imp_lock);
- imp->imp_resend_replay = 1;
- spin_unlock(&imp->imp_lock);
-
- IMPORT_SET_STATE(imp, imp->imp_replay_state);
- } else {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
- }
- } else if ((msg_flags & MSG_CONNECT_RECOVERING) && !imp->imp_invalid) {
- LASSERT(imp->imp_replayable);
- imp->imp_remote_handle =
- *lustre_msg_get_handle(request->rq_repmsg);
- imp->imp_last_replay_transno = 0;
- imp->imp_replay_cursor = &imp->imp_committed_list;
- IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
- } else {
- DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)",
- imp->imp_obd->obd_name, msg_flags);
- imp->imp_remote_handle =
- *lustre_msg_get_handle(request->rq_repmsg);
- IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
- }
-
- /* Sanity checks for a reconnected import. */
- if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE))
- CERROR("imp_replayable flag does not match server after reconnect. We should LBUG right here.\n");
-
- if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 &&
- lustre_msg_get_last_committed(request->rq_repmsg) <
- aa->pcaa_peer_committed)
- CERROR("%s went back in time (transno %lld was previously committed, server now claims %lld)! See https://bugzilla.lustre.org/show_bug.cgi?id=9646\n",
- obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
- lustre_msg_get_last_committed(request->rq_repmsg));
-
-finish:
- ptlrpc_prepare_replay(imp);
- rc = ptlrpc_import_recovery_state_machine(imp);
- if (rc == -ENOTCONN) {
- CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
- ptlrpc_connect_import(imp);
- spin_lock(&imp->imp_lock);
- imp->imp_connected = 0;
- imp->imp_connect_tried = 1;
- spin_unlock(&imp->imp_lock);
- return 0;
- }
-
-out:
- spin_lock(&imp->imp_lock);
- imp->imp_connected = 0;
- imp->imp_connect_tried = 1;
- spin_unlock(&imp->imp_lock);
-
- if (rc != 0) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
- if (rc == -EACCES) {
- /*
- * Give up trying to reconnect
- * EACCES means client has no permission for connection
- */
- imp->imp_obd->obd_no_recov = 1;
- ptlrpc_deactivate_import(imp);
- }
-
- if (rc == -EPROTO) {
- struct obd_connect_data *ocd;
-
- /* reply message might not be ready */
- if (!request->rq_repmsg)
- return -EPROTO;
-
- ocd = req_capsule_server_get(&request->rq_pill,
- &RMF_CONNECT_DATA);
- if (ocd &&
- (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
- (ocd->ocd_version != LUSTRE_VERSION_CODE)) {
- /*
- * Actually servers are only supposed to refuse
- * connection from liblustre clients, so we
- * should never see this from VFS context
- */
- LCONSOLE_ERROR_MSG(0x16a, "Server %s version (%d.%d.%d.%d) refused connection from this client with an incompatible version (%s). Client must be recompiled\n",
- obd2cli_tgt(imp->imp_obd),
- OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
- OBD_OCD_VERSION_MINOR(ocd->ocd_version),
- OBD_OCD_VERSION_PATCH(ocd->ocd_version),
- OBD_OCD_VERSION_FIX(ocd->ocd_version),
- LUSTRE_VERSION_STRING);
- ptlrpc_deactivate_import(imp);
- IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED);
- }
- return -EPROTO;
- }
-
- ptlrpc_maybe_ping_import_soon(imp);
-
- CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
- obd2cli_tgt(imp->imp_obd),
- (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
- }
-
- wake_up_all(&imp->imp_recovery_waitq);
- return rc;
-}
-
-/**
- * interpret callback for "completed replay" RPCs.
- * \see signal_completed_replay
- */
-static int completed_replay_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *data, int rc)
-{
- atomic_dec(&req->rq_import->imp_replay_inflight);
- if (req->rq_status == 0 &&
- !req->rq_import->imp_vbr_failed) {
- ptlrpc_import_recovery_state_machine(req->rq_import);
- } else {
- if (req->rq_import->imp_vbr_failed) {
- CDEBUG(D_WARNING,
- "%s: version recovery fails, reconnecting\n",
- req->rq_import->imp_obd->obd_name);
- } else {
- CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, reconnecting\n",
- req->rq_import->imp_obd->obd_name,
- req->rq_status);
- }
- ptlrpc_connect_import(req->rq_import);
- }
-
- return 0;
-}
-
-/**
- * Let server know that we have no requests to replay anymore.
- * Achieved by just sending a PING request
- */
-static int signal_completed_replay(struct obd_import *imp)
-{
- struct ptlrpc_request *req;
-
- if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY)))
- return 0;
-
- LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
- atomic_inc(&imp->imp_replay_inflight);
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
- OBD_PING);
- if (!req) {
- atomic_dec(&imp->imp_replay_inflight);
- return -ENOMEM;
- }
-
- ptlrpc_request_set_replen(req);
- req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT;
- lustre_msg_add_flags(req->rq_reqmsg,
- MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE);
- if (AT_OFF)
- req->rq_timeout *= 3;
- req->rq_interpret_reply = completed_replay_interpret;
-
- ptlrpcd_add_req(req);
- return 0;
-}
-
-/**
- * In kernel code all import invalidation happens in its own
- * separate thread, so that whatever application happened to encounter
- * a problem could still be killed or otherwise continue
- */
-static int ptlrpc_invalidate_import_thread(void *data)
-{
- struct obd_import *imp = data;
-
- unshare_fs_struct();
-
- CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
- imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
-
- ptlrpc_invalidate_import(imp);
-
- if (obd_dump_on_eviction) {
- CERROR("dump the log upon eviction\n");
- libcfs_debug_dumplog();
- }
-
- IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
- ptlrpc_import_recovery_state_machine(imp);
-
- class_import_put(imp);
- return 0;
-}
-
-/**
- * This is the state machine for client-side recovery on import.
- *
- * Typically we have two possibly paths. If we came to server and it is not
- * in recovery, we just enter IMP_EVICTED state, invalidate our import
- * state and reconnect from scratch.
- * If we came to server that is in recovery, we enter IMP_REPLAY import state.
- * We go through our list of requests to replay and send them to server one by
- * one.
- * After sending all request from the list we change import state to
- * IMP_REPLAY_LOCKS and re-request all the locks we believe we have from server
- * and also all the locks we don't yet have and wait for server to grant us.
- * After that we send a special "replay completed" request and change import
- * state to IMP_REPLAY_WAIT.
- * Upon receiving reply to that "replay completed" RPC we enter IMP_RECOVER
- * state and resend all requests from sending list.
- * After that we promote import to FULL state and send all delayed requests
- * and import is fully operational after that.
- *
- */
-int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
-{
- int rc = 0;
- int inflight;
- char *target_start;
- int target_len;
-
- if (imp->imp_state == LUSTRE_IMP_EVICTED) {
- deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
- &target_start, &target_len);
- /* Don't care about MGC eviction */
- if (strcmp(imp->imp_obd->obd_type->typ_name,
- LUSTRE_MGC_NAME) != 0) {
- LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted by %.*s; in progress operations using this service will fail.\n",
- imp->imp_obd->obd_name, target_len,
- target_start);
- }
- CDEBUG(D_HA, "evicted from %s@%s; invalidating\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
- /* reset vbr_failed flag upon eviction */
- spin_lock(&imp->imp_lock);
- imp->imp_vbr_failed = 0;
- spin_unlock(&imp->imp_lock);
-
- {
- struct task_struct *task;
- /* bug 17802: XXX client_disconnect_export vs connect request
- * race. if client is evicted at this time, we start
- * invalidate thread without reference to import and import can
- * be freed at same time.
- */
- class_import_get(imp);
- task = kthread_run(ptlrpc_invalidate_import_thread, imp,
- "ll_imp_inval");
- if (IS_ERR(task)) {
- class_import_put(imp);
- CERROR("error starting invalidate thread: %d\n", rc);
- rc = PTR_ERR(task);
- } else {
- rc = 0;
- }
- return rc;
- }
- }
-
- if (imp->imp_state == LUSTRE_IMP_REPLAY) {
- CDEBUG(D_HA, "replay requested by %s\n",
- obd2cli_tgt(imp->imp_obd));
- rc = ptlrpc_replay_next(imp, &inflight);
- if (inflight == 0 &&
- atomic_read(&imp->imp_replay_inflight) == 0) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
- rc = ldlm_replay_locks(imp);
- if (rc)
- goto out;
- }
- rc = 0;
- }
-
- if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS)
- if (atomic_read(&imp->imp_replay_inflight) == 0) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
- rc = signal_completed_replay(imp);
- if (rc)
- goto out;
- }
-
- if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT)
- if (atomic_read(&imp->imp_replay_inflight) == 0)
- IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
-
- if (imp->imp_state == LUSTRE_IMP_RECOVER) {
- CDEBUG(D_HA, "reconnected to %s@%s\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
-
- rc = ptlrpc_resend(imp);
- if (rc)
- goto out;
- IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
- ptlrpc_activate_import(imp);
-
- deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
- &target_start, &target_len);
- LCONSOLE_INFO("%s: Connection restored to %.*s (at %s)\n",
- imp->imp_obd->obd_name,
- target_len, target_start,
- libcfs_nid2str(imp->imp_connection->c_peer.nid));
- }
-
- if (imp->imp_state == LUSTRE_IMP_FULL) {
- wake_up_all(&imp->imp_recovery_waitq);
- ptlrpc_wake_delayed(imp);
- }
-
-out:
- return rc;
-}
-
-int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
-{
- struct ptlrpc_request *req;
- int rq_opc, rc = 0;
-
- if (imp->imp_obd->obd_force)
- goto set_state;
-
- switch (imp->imp_connect_op) {
- case OST_CONNECT:
- rq_opc = OST_DISCONNECT;
- break;
- case MDS_CONNECT:
- rq_opc = MDS_DISCONNECT;
- break;
- case MGS_CONNECT:
- rq_opc = MGS_DISCONNECT;
- break;
- default:
- rc = -EINVAL;
- CERROR("%s: don't know how to disconnect from %s (connect_op %d): rc = %d\n",
- imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
- imp->imp_connect_op, rc);
- return rc;
- }
-
- if (ptlrpc_import_in_recovery(imp)) {
- long timeout;
-
- if (AT_OFF) {
- if (imp->imp_server_timeout)
- timeout = obd_timeout * HZ / 2;
- else
- timeout = obd_timeout * HZ;
- } else {
- int idx = import_at_get_index(imp,
- imp->imp_client->cli_request_portal);
- timeout = at_get(&imp->imp_at.iat_service_estimate[idx]) * HZ;
- }
-
- if (wait_event_idle_timeout(imp->imp_recovery_waitq,
- !ptlrpc_import_in_recovery(imp),
- cfs_timeout_cap(timeout)) == 0)
- l_wait_event_abortable(
- imp->imp_recovery_waitq,
- !ptlrpc_import_in_recovery(imp));
- }
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state != LUSTRE_IMP_FULL)
- goto out;
- spin_unlock(&imp->imp_lock);
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
- LUSTRE_OBD_VERSION, rq_opc);
- if (req) {
- /* We are disconnecting, do not retry a failed DISCONNECT rpc if
- * it fails. We can get through the above with a down server
- * if the client doesn't know the server is gone yet.
- */
- req->rq_no_resend = 1;
-
- /* We want client umounts to happen quickly, no matter the
- * server state...
- */
- req->rq_timeout = min_t(int, req->rq_timeout,
- INITIAL_CONNECT_TIMEOUT);
-
- IMPORT_SET_STATE(imp, LUSTRE_IMP_CONNECTING);
- req->rq_send_state = LUSTRE_IMP_CONNECTING;
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- ptlrpc_req_finished(req);
- }
-
-set_state:
- spin_lock(&imp->imp_lock);
-out:
- if (noclose)
- IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
- else
- IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
- memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
- spin_unlock(&imp->imp_lock);
-
- if (rc == -ETIMEDOUT || rc == -ENOTCONN || rc == -ESHUTDOWN)
- rc = 0;
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_disconnect_import);
-
-/* Adaptive Timeout utils */
-extern unsigned int at_min, at_max, at_history;
-
-/*
- *Update at_current with the specified value (bounded by at_min and at_max),
- * as well as the AT history "bins".
- * - Bin into timeslices using AT_BINS bins.
- * - This gives us a max of the last at_history seconds without the storage,
- * but still smoothing out a return to normalcy from a slow response.
- * - (E.g. remember the maximum latency in each minute of the last 4 minutes.)
- */
-int at_measured(struct adaptive_timeout *at, unsigned int val)
-{
- unsigned int old = at->at_current;
- time64_t now = ktime_get_real_seconds();
- long binlimit = max_t(long, at_history / AT_BINS, 1);
-
- LASSERT(at);
- CDEBUG(D_OTHER, "add %u to %p time=%lu v=%u (%u %u %u %u)\n",
- val, at, (long)(now - at->at_binstart), at->at_current,
- at->at_hist[0], at->at_hist[1], at->at_hist[2], at->at_hist[3]);
-
- if (val == 0)
- /* 0's don't count, because we never want our timeout to
- * drop to 0, and because 0 could mean an error
- */
- return 0;
-
- spin_lock(&at->at_lock);
-
- if (unlikely(at->at_binstart == 0)) {
- /* Special case to remove default from history */
- at->at_current = val;
- at->at_worst_ever = val;
- at->at_worst_time = now;
- at->at_hist[0] = val;
- at->at_binstart = now;
- } else if (now - at->at_binstart < binlimit) {
- /* in bin 0 */
- at->at_hist[0] = max(val, at->at_hist[0]);
- at->at_current = max(val, at->at_current);
- } else {
- int i, shift;
- unsigned int maxv = val;
- /* move bins over */
- shift = (u32)(now - at->at_binstart) / binlimit;
- LASSERT(shift > 0);
- for (i = AT_BINS - 1; i >= 0; i--) {
- if (i >= shift) {
- at->at_hist[i] = at->at_hist[i - shift];
- maxv = max(maxv, at->at_hist[i]);
- } else {
- at->at_hist[i] = 0;
- }
- }
- at->at_hist[0] = val;
- at->at_current = maxv;
- at->at_binstart += shift * binlimit;
- }
-
- if (at->at_current > at->at_worst_ever) {
- at->at_worst_ever = at->at_current;
- at->at_worst_time = now;
- }
-
- if (at->at_flags & AT_FLG_NOHIST)
- /* Only keep last reported val; keeping the rest of the history
- * for debugfs only
- */
- at->at_current = val;
-
- if (at_max > 0)
- at->at_current = min(at->at_current, at_max);
- at->at_current = max(at->at_current, at_min);
-
- if (at->at_current != old)
- CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d (val=%u) hist %u %u %u %u\n",
- at,
- old, at->at_current, at->at_current - old, val,
- at->at_hist[0], at->at_hist[1], at->at_hist[2],
- at->at_hist[3]);
-
- /* if we changed, report the old value */
- old = (at->at_current != old) ? old : 0;
-
- spin_unlock(&at->at_lock);
- return old;
-}
-
-/* Find the imp_at index for a given portal; assign if space available */
-int import_at_get_index(struct obd_import *imp, int portal)
-{
- struct imp_at *at = &imp->imp_at;
- int i;
-
- for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
- if (at->iat_portal[i] == portal)
- return i;
- if (at->iat_portal[i] == 0)
- /* unused */
- break;
- }
-
- /* Not found in list, add it under a lock */
- spin_lock(&imp->imp_lock);
-
- /* Check unused under lock */
- for (; i < IMP_AT_MAX_PORTALS; i++) {
- if (at->iat_portal[i] == portal)
- goto out;
- if (at->iat_portal[i] == 0)
- /* unused */
- break;
- }
-
- /* Not enough portals? */
- LASSERT(i < IMP_AT_MAX_PORTALS);
-
- at->iat_portal[i] = portal;
-out:
- spin_unlock(&imp->imp_lock);
- return i;
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
deleted file mode 100644
index 2855f38c8190..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ /dev/null
@@ -1,2234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/layout.c
- *
- * Lustre Metadata Target (mdt) request handler
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- */
-/*
- * This file contains the "capsule/pill" abstraction layered above PTLRPC.
- *
- * Every struct ptlrpc_request contains a "pill", which points to a description
- * of the format that the request conforms to.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <linux/module.h>
-
-#include <uapi/linux/lustre/lustre_idl.h>
-
-#include <llog_swab.h>
-#include <lustre_debug.h>
-#include <lustre_swab.h>
-#include <uapi/linux/lustre/lustre_ver.h>
-#include <obd.h>
-#include <obd_support.h>
-
-/* struct ptlrpc_request, lustre_msg* */
-#include <lustre_req_layout.h>
-#include <lustre_acl.h>
-
-/*
- * RQFs (see below) refer to two struct req_msg_field arrays describing the
- * client request and server reply, respectively.
- */
-/* empty set of fields... for suitable definition of emptiness. */
-static const struct req_msg_field *empty[] = {
- &RMF_PTLRPC_BODY
-};
-
-static const struct req_msg_field *mgs_target_info_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MGS_TARGET_INFO
-};
-
-static const struct req_msg_field *mgs_set_info[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MGS_SEND_PARAM
-};
-
-static const struct req_msg_field *mgs_config_read_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MGS_CONFIG_BODY
-};
-
-static const struct req_msg_field *mgs_config_read_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MGS_CONFIG_RES
-};
-
-static const struct req_msg_field *log_cancel_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LOGCOOKIES
-};
-
-static const struct req_msg_field *mdt_body_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY
-};
-
-static const struct req_msg_field *mdt_body_capa[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_CAPA1
-};
-
-static const struct req_msg_field *quotactl_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OBD_QUOTACTL
-};
-
-static const struct req_msg_field *mdt_close_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_EPOCH,
- &RMF_REC_REINT,
- &RMF_CAPA1
-};
-
-static const struct req_msg_field *mdt_intent_close_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_EPOCH,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_CLOSE_DATA
-};
-
-static const struct req_msg_field *obd_statfs_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OBD_STATFS
-};
-
-static const struct req_msg_field *seq_query_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_SEQ_OPC,
- &RMF_SEQ_RANGE
-};
-
-static const struct req_msg_field *seq_query_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_SEQ_RANGE
-};
-
-static const struct req_msg_field *fld_query_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_FLD_OPC,
- &RMF_FLD_MDFLD
-};
-
-static const struct req_msg_field *fld_query_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_FLD_MDFLD
-};
-
-static const struct req_msg_field *fld_read_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_FLD_MDFLD
-};
-
-static const struct req_msg_field *fld_read_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_GENERIC_DATA
-};
-
-static const struct req_msg_field *mds_getattr_name_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_CAPA1,
- &RMF_NAME
-};
-
-static const struct req_msg_field *mds_reint_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT
-};
-
-static const struct req_msg_field *mds_reint_create_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME
-};
-
-static const struct req_msg_field *mds_reint_create_slave_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_EADATA,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_create_acl_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_EADATA,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_create_sym_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_SYMTGT,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_open_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_CAPA2,
- &RMF_NAME,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *mds_reint_open_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL,
- &RMF_CAPA1,
- &RMF_CAPA2
-};
-
-static const struct req_msg_field *mds_reint_unlink_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_link_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_CAPA2,
- &RMF_NAME,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_rename_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_CAPA2,
- &RMF_NAME,
- &RMF_SYMTGT,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_migrate_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_CAPA2,
- &RMF_NAME,
- &RMF_SYMTGT,
- &RMF_DLM_REQ,
- &RMF_MDT_EPOCH,
- &RMF_CLOSE_DATA
-};
-
-static const struct req_msg_field *mds_last_unlink_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_LOGCOOKIES,
- &RMF_CAPA1,
- &RMF_CAPA2
-};
-
-static const struct req_msg_field *mds_reint_setattr_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_MDT_EPOCH,
- &RMF_EADATA,
- &RMF_LOGCOOKIES,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_setxattr_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_EADATA,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mdt_swap_layouts[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_SWAP_LAYOUTS,
- &RMF_CAPA1,
- &RMF_CAPA2,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *obd_connect_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_TGTUUID,
- &RMF_CLUUID,
- &RMF_CONN,
- &RMF_CONNECT_DATA
-};
-
-static const struct req_msg_field *obd_connect_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_CONNECT_DATA
-};
-
-static const struct req_msg_field *obd_set_info_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_SETINFO_KEY,
- &RMF_SETINFO_VAL
-};
-
-static const struct req_msg_field *ost_grant_shrink_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_SETINFO_KEY,
- &RMF_OST_BODY
-};
-
-static const struct req_msg_field *mds_getinfo_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_GETINFO_KEY,
- &RMF_GETINFO_VALLEN
-};
-
-static const struct req_msg_field *mds_getinfo_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_GETINFO_VAL,
-};
-
-static const struct req_msg_field *ldlm_enqueue_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *ldlm_enqueue_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP
-};
-
-static const struct req_msg_field *ldlm_enqueue_lvb_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_DLM_LVB
-};
-
-static const struct req_msg_field *ldlm_cp_callback_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_DLM_LVB
-};
-
-static const struct req_msg_field *ldlm_gl_callback_desc_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_DLM_GL_DESC
-};
-
-static const struct req_msg_field *ldlm_gl_callback_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_LVB
-};
-
-static const struct req_msg_field *ldlm_intent_basic_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
-};
-
-static const struct req_msg_field *ldlm_intent_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_REC_REINT
-};
-
-static const struct req_msg_field *ldlm_intent_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL
-};
-
-static const struct req_msg_field *ldlm_intent_layout_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_LAYOUT_INTENT,
- &RMF_EADATA /* for new layout to be set up */
-};
-
-static const struct req_msg_field *ldlm_intent_open_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL,
- &RMF_CAPA1,
- &RMF_CAPA2
-};
-
-static const struct req_msg_field *ldlm_intent_getattr_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_MDT_BODY, /* coincides with mds_getattr_name_client[] */
- &RMF_CAPA1,
- &RMF_NAME
-};
-
-static const struct req_msg_field *ldlm_intent_getattr_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL,
- &RMF_CAPA1
-};
-
-static const struct req_msg_field *ldlm_intent_create_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_REC_REINT, /* coincides with mds_reint_create_client[] */
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *ldlm_intent_open_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_REC_REINT, /* coincides with mds_reint_open_client[] */
- &RMF_CAPA1,
- &RMF_CAPA2,
- &RMF_NAME,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *ldlm_intent_unlink_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_REC_REINT, /* coincides with mds_reint_unlink_client[] */
- &RMF_CAPA1,
- &RMF_NAME
-};
-
-static const struct req_msg_field *ldlm_intent_getxattr_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_MDT_BODY,
- &RMF_CAPA1,
-};
-
-static const struct req_msg_field *ldlm_intent_getxattr_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL, /* for req_capsule_extend/mdt_intent_policy */
- &RMF_EADATA,
- &RMF_EAVALS,
- &RMF_EAVALS_LENS
-};
-
-static const struct req_msg_field *mds_getxattr_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *mds_getxattr_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *mds_getattr_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL,
- &RMF_CAPA1,
- &RMF_CAPA2
-};
-
-static const struct req_msg_field *mds_setattr_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL,
- &RMF_CAPA1,
- &RMF_CAPA2
-};
-
-static const struct req_msg_field *llog_origin_handle_create_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LLOGD_BODY,
- &RMF_NAME
-};
-
-static const struct req_msg_field *llogd_body_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LLOGD_BODY
-};
-
-static const struct req_msg_field *llog_log_hdr_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LLOG_LOG_HDR
-};
-
-static const struct req_msg_field *llogd_conn_body_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LLOGD_CONN_BODY
-};
-
-static const struct req_msg_field *llog_origin_handle_next_block_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LLOGD_BODY,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *ost_body_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY
-};
-
-static const struct req_msg_field *ost_body_capa[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY,
- &RMF_CAPA1
-};
-
-static const struct req_msg_field *ost_destroy_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY,
- &RMF_DLM_REQ,
- &RMF_CAPA1
-};
-
-static const struct req_msg_field *ost_brw_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY,
- &RMF_OBD_IOOBJ,
- &RMF_NIOBUF_REMOTE,
- &RMF_CAPA1
-};
-
-static const struct req_msg_field *ost_brw_read_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY
-};
-
-static const struct req_msg_field *ost_brw_write_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY,
- &RMF_RCS
-};
-
-static const struct req_msg_field *ost_get_info_generic_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_GENERIC_DATA,
-};
-
-static const struct req_msg_field *ost_get_info_generic_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_GETINFO_KEY
-};
-
-static const struct req_msg_field *ost_get_last_id_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OBD_ID
-};
-
-static const struct req_msg_field *ost_get_last_fid_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_GETINFO_KEY,
- &RMF_FID,
-};
-
-static const struct req_msg_field *ost_get_last_fid_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_FID,
-};
-
-static const struct req_msg_field *ost_get_fiemap_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_FIEMAP_KEY,
- &RMF_FIEMAP_VAL
-};
-
-static const struct req_msg_field *ost_get_fiemap_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_FIEMAP_VAL
-};
-
-static const struct req_msg_field *mdt_hsm_progress[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDS_HSM_PROGRESS,
-};
-
-static const struct req_msg_field *mdt_hsm_ct_register[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDS_HSM_ARCHIVE,
-};
-
-static const struct req_msg_field *mdt_hsm_ct_unregister[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
-};
-
-static const struct req_msg_field *mdt_hsm_action_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDS_HSM_CURRENT_ACTION,
-};
-
-static const struct req_msg_field *mdt_hsm_state_get_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_HSM_USER_STATE,
-};
-
-static const struct req_msg_field *mdt_hsm_state_set[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_CAPA1,
- &RMF_HSM_STATE_SET,
-};
-
-static const struct req_msg_field *mdt_hsm_request[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDS_HSM_REQUEST,
- &RMF_MDS_HSM_USER_ITEM,
- &RMF_GENERIC_DATA,
-};
-
-static struct req_format *req_formats[] = {
- &RQF_OBD_PING,
- &RQF_OBD_SET_INFO,
- &RQF_SEC_CTX,
- &RQF_MGS_TARGET_REG,
- &RQF_MGS_SET_INFO,
- &RQF_MGS_CONFIG_READ,
- &RQF_SEQ_QUERY,
- &RQF_FLD_QUERY,
- &RQF_FLD_READ,
- &RQF_MDS_CONNECT,
- &RQF_MDS_DISCONNECT,
- &RQF_MDS_GET_INFO,
- &RQF_MDS_GETSTATUS,
- &RQF_MDS_STATFS,
- &RQF_MDS_GETATTR,
- &RQF_MDS_GETATTR_NAME,
- &RQF_MDS_GETXATTR,
- &RQF_MDS_SYNC,
- &RQF_MDS_CLOSE,
- &RQF_MDS_INTENT_CLOSE,
- &RQF_MDS_READPAGE,
- &RQF_MDS_WRITEPAGE,
- &RQF_MDS_REINT,
- &RQF_MDS_REINT_CREATE,
- &RQF_MDS_REINT_CREATE_ACL,
- &RQF_MDS_REINT_CREATE_SLAVE,
- &RQF_MDS_REINT_CREATE_SYM,
- &RQF_MDS_REINT_OPEN,
- &RQF_MDS_REINT_UNLINK,
- &RQF_MDS_REINT_LINK,
- &RQF_MDS_REINT_RENAME,
- &RQF_MDS_REINT_MIGRATE,
- &RQF_MDS_REINT_SETATTR,
- &RQF_MDS_REINT_SETXATTR,
- &RQF_MDS_QUOTACTL,
- &RQF_MDS_HSM_PROGRESS,
- &RQF_MDS_HSM_CT_REGISTER,
- &RQF_MDS_HSM_CT_UNREGISTER,
- &RQF_MDS_HSM_STATE_GET,
- &RQF_MDS_HSM_STATE_SET,
- &RQF_MDS_HSM_ACTION,
- &RQF_MDS_HSM_REQUEST,
- &RQF_MDS_SWAP_LAYOUTS,
- &RQF_OST_CONNECT,
- &RQF_OST_DISCONNECT,
- &RQF_OST_QUOTACTL,
- &RQF_OST_GETATTR,
- &RQF_OST_SETATTR,
- &RQF_OST_CREATE,
- &RQF_OST_PUNCH,
- &RQF_OST_SYNC,
- &RQF_OST_DESTROY,
- &RQF_OST_BRW_READ,
- &RQF_OST_BRW_WRITE,
- &RQF_OST_STATFS,
- &RQF_OST_SET_GRANT_INFO,
- &RQF_OST_GET_INFO,
- &RQF_OST_GET_INFO_LAST_ID,
- &RQF_OST_GET_INFO_LAST_FID,
- &RQF_OST_SET_INFO_LAST_FID,
- &RQF_OST_GET_INFO_FIEMAP,
- &RQF_LDLM_ENQUEUE,
- &RQF_LDLM_ENQUEUE_LVB,
- &RQF_LDLM_CONVERT,
- &RQF_LDLM_CANCEL,
- &RQF_LDLM_CALLBACK,
- &RQF_LDLM_CP_CALLBACK,
- &RQF_LDLM_BL_CALLBACK,
- &RQF_LDLM_GL_CALLBACK,
- &RQF_LDLM_GL_DESC_CALLBACK,
- &RQF_LDLM_INTENT,
- &RQF_LDLM_INTENT_BASIC,
- &RQF_LDLM_INTENT_LAYOUT,
- &RQF_LDLM_INTENT_GETATTR,
- &RQF_LDLM_INTENT_OPEN,
- &RQF_LDLM_INTENT_CREATE,
- &RQF_LDLM_INTENT_UNLINK,
- &RQF_LDLM_INTENT_GETXATTR,
- &RQF_LOG_CANCEL,
- &RQF_LLOG_ORIGIN_HANDLE_CREATE,
- &RQF_LLOG_ORIGIN_HANDLE_DESTROY,
- &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
- &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
- &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
- &RQF_LLOG_ORIGIN_CONNECT,
- &RQF_CONNECT,
-};
-
-struct req_msg_field {
- const __u32 rmf_flags;
- const char *rmf_name;
- /**
- * Field length. (-1) means "variable length". If the
- * \a RMF_F_STRUCT_ARRAY flag is set the field is also variable-length,
- * but the actual size must be a whole multiple of \a rmf_size.
- */
- const int rmf_size;
- void (*rmf_swabber)(void *);
- void (*rmf_dumper)(void *);
- int rmf_offset[ARRAY_SIZE(req_formats)][RCL_NR];
-};
-
-enum rmf_flags {
- /**
- * The field is a string, must be NUL-terminated.
- */
- RMF_F_STRING = BIT(0),
- /**
- * The field's buffer size need not match the declared \a rmf_size.
- */
- RMF_F_NO_SIZE_CHECK = BIT(1),
- /**
- * The field's buffer size must be a whole multiple of the declared \a
- * rmf_size and the \a rmf_swabber function must work on the declared \a
- * rmf_size worth of bytes.
- */
- RMF_F_STRUCT_ARRAY = BIT(2)
-};
-
-struct req_capsule;
-
-/*
- * Request fields.
- */
-#define DEFINE_MSGF(name, flags, size, swabber, dumper) { \
- .rmf_name = (name), \
- .rmf_flags = (flags), \
- .rmf_size = (size), \
- .rmf_swabber = (void (*)(void *))(swabber), \
- .rmf_dumper = (void (*)(void *))(dumper) \
-}
-
-struct req_msg_field RMF_GENERIC_DATA =
- DEFINE_MSGF("generic_data", 0,
- -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_GENERIC_DATA);
-
-struct req_msg_field RMF_MGS_TARGET_INFO =
- DEFINE_MSGF("mgs_target_info", 0,
- sizeof(struct mgs_target_info),
- lustre_swab_mgs_target_info, NULL);
-EXPORT_SYMBOL(RMF_MGS_TARGET_INFO);
-
-struct req_msg_field RMF_MGS_SEND_PARAM =
- DEFINE_MSGF("mgs_send_param", 0,
- sizeof(struct mgs_send_param),
- NULL, NULL);
-EXPORT_SYMBOL(RMF_MGS_SEND_PARAM);
-
-struct req_msg_field RMF_MGS_CONFIG_BODY =
- DEFINE_MSGF("mgs_config_read request", 0,
- sizeof(struct mgs_config_body),
- lustre_swab_mgs_config_body, NULL);
-EXPORT_SYMBOL(RMF_MGS_CONFIG_BODY);
-
-struct req_msg_field RMF_MGS_CONFIG_RES =
- DEFINE_MSGF("mgs_config_read reply ", 0,
- sizeof(struct mgs_config_res),
- lustre_swab_mgs_config_res, NULL);
-EXPORT_SYMBOL(RMF_MGS_CONFIG_RES);
-
-struct req_msg_field RMF_U32 =
- DEFINE_MSGF("generic u32", 0,
- sizeof(__u32), lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_U32);
-
-struct req_msg_field RMF_SETINFO_VAL =
- DEFINE_MSGF("setinfo_val", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_SETINFO_VAL);
-
-struct req_msg_field RMF_GETINFO_KEY =
- DEFINE_MSGF("getinfo_key", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_GETINFO_KEY);
-
-struct req_msg_field RMF_GETINFO_VALLEN =
- DEFINE_MSGF("getinfo_vallen", 0,
- sizeof(__u32), lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_GETINFO_VALLEN);
-
-struct req_msg_field RMF_GETINFO_VAL =
- DEFINE_MSGF("getinfo_val", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_GETINFO_VAL);
-
-struct req_msg_field RMF_SEQ_OPC =
- DEFINE_MSGF("seq_query_opc", 0,
- sizeof(__u32), lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_SEQ_OPC);
-
-struct req_msg_field RMF_SEQ_RANGE =
- DEFINE_MSGF("seq_query_range", 0,
- sizeof(struct lu_seq_range),
- lustre_swab_lu_seq_range, NULL);
-EXPORT_SYMBOL(RMF_SEQ_RANGE);
-
-struct req_msg_field RMF_FLD_OPC =
- DEFINE_MSGF("fld_query_opc", 0,
- sizeof(__u32), lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_FLD_OPC);
-
-struct req_msg_field RMF_FLD_MDFLD =
- DEFINE_MSGF("fld_query_mdfld", 0,
- sizeof(struct lu_seq_range),
- lustre_swab_lu_seq_range, NULL);
-EXPORT_SYMBOL(RMF_FLD_MDFLD);
-
-struct req_msg_field RMF_MDT_BODY =
- DEFINE_MSGF("mdt_body", 0,
- sizeof(struct mdt_body), lustre_swab_mdt_body, NULL);
-EXPORT_SYMBOL(RMF_MDT_BODY);
-
-struct req_msg_field RMF_OBD_QUOTACTL =
- DEFINE_MSGF("obd_quotactl", 0,
- sizeof(struct obd_quotactl),
- lustre_swab_obd_quotactl, NULL);
-EXPORT_SYMBOL(RMF_OBD_QUOTACTL);
-
-struct req_msg_field RMF_MDT_EPOCH =
- DEFINE_MSGF("mdt_ioepoch", 0,
- sizeof(struct mdt_ioepoch), lustre_swab_mdt_ioepoch, NULL);
-EXPORT_SYMBOL(RMF_MDT_EPOCH);
-
-struct req_msg_field RMF_PTLRPC_BODY =
- DEFINE_MSGF("ptlrpc_body", 0,
- sizeof(struct ptlrpc_body), lustre_swab_ptlrpc_body, NULL);
-EXPORT_SYMBOL(RMF_PTLRPC_BODY);
-
-struct req_msg_field RMF_CLOSE_DATA =
- DEFINE_MSGF("data_version", 0,
- sizeof(struct close_data), lustre_swab_close_data, NULL);
-EXPORT_SYMBOL(RMF_CLOSE_DATA);
-
-struct req_msg_field RMF_OBD_STATFS =
- DEFINE_MSGF("obd_statfs", 0,
- sizeof(struct obd_statfs), lustre_swab_obd_statfs, NULL);
-EXPORT_SYMBOL(RMF_OBD_STATFS);
-
-struct req_msg_field RMF_SETINFO_KEY =
- DEFINE_MSGF("setinfo_key", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_SETINFO_KEY);
-
-struct req_msg_field RMF_NAME =
- DEFINE_MSGF("name", RMF_F_STRING, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_NAME);
-
-struct req_msg_field RMF_SYMTGT =
- DEFINE_MSGF("symtgt", RMF_F_STRING, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_SYMTGT);
-
-struct req_msg_field RMF_TGTUUID =
- DEFINE_MSGF("tgtuuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL,
- NULL);
-EXPORT_SYMBOL(RMF_TGTUUID);
-
-struct req_msg_field RMF_CLUUID =
- DEFINE_MSGF("cluuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL,
- NULL);
-EXPORT_SYMBOL(RMF_CLUUID);
-
-struct req_msg_field RMF_STRING =
- DEFINE_MSGF("string", RMF_F_STRING, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_STRING);
-
-struct req_msg_field RMF_LLOGD_BODY =
- DEFINE_MSGF("llogd_body", 0,
- sizeof(struct llogd_body), lustre_swab_llogd_body, NULL);
-EXPORT_SYMBOL(RMF_LLOGD_BODY);
-
-struct req_msg_field RMF_LLOG_LOG_HDR =
- DEFINE_MSGF("llog_log_hdr", 0,
- sizeof(struct llog_log_hdr), lustre_swab_llog_hdr, NULL);
-EXPORT_SYMBOL(RMF_LLOG_LOG_HDR);
-
-struct req_msg_field RMF_LLOGD_CONN_BODY =
- DEFINE_MSGF("llogd_conn_body", 0,
- sizeof(struct llogd_conn_body),
- lustre_swab_llogd_conn_body, NULL);
-EXPORT_SYMBOL(RMF_LLOGD_CONN_BODY);
-
-/*
- * connection handle received in MDS_CONNECT request.
- *
- * No swabbing needed because struct lustre_handle contains only a 64-bit cookie
- * that the client does not interpret at all.
- */
-struct req_msg_field RMF_CONN =
- DEFINE_MSGF("conn", 0, sizeof(struct lustre_handle), NULL, NULL);
-EXPORT_SYMBOL(RMF_CONN);
-
-struct req_msg_field RMF_CONNECT_DATA =
- DEFINE_MSGF("cdata",
- RMF_F_NO_SIZE_CHECK /* we allow extra space for interop */,
- sizeof(struct obd_connect_data),
- lustre_swab_connect, NULL);
-EXPORT_SYMBOL(RMF_CONNECT_DATA);
-
-struct req_msg_field RMF_DLM_REQ =
- DEFINE_MSGF("dlm_req", RMF_F_NO_SIZE_CHECK /* ldlm_request_bufsize */,
- sizeof(struct ldlm_request),
- lustre_swab_ldlm_request, NULL);
-EXPORT_SYMBOL(RMF_DLM_REQ);
-
-struct req_msg_field RMF_DLM_REP =
- DEFINE_MSGF("dlm_rep", 0,
- sizeof(struct ldlm_reply), lustre_swab_ldlm_reply, NULL);
-EXPORT_SYMBOL(RMF_DLM_REP);
-
-struct req_msg_field RMF_LDLM_INTENT =
- DEFINE_MSGF("ldlm_intent", 0,
- sizeof(struct ldlm_intent), lustre_swab_ldlm_intent, NULL);
-EXPORT_SYMBOL(RMF_LDLM_INTENT);
-
-struct req_msg_field RMF_DLM_LVB =
- DEFINE_MSGF("dlm_lvb", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_DLM_LVB);
-
-struct req_msg_field RMF_DLM_GL_DESC =
- DEFINE_MSGF("dlm_gl_desc", 0, sizeof(union ldlm_gl_desc),
- lustre_swab_gl_desc, NULL);
-EXPORT_SYMBOL(RMF_DLM_GL_DESC);
-
-struct req_msg_field RMF_MDT_MD =
- DEFINE_MSGF("mdt_md", RMF_F_NO_SIZE_CHECK, MIN_MD_SIZE, NULL, NULL);
-EXPORT_SYMBOL(RMF_MDT_MD);
-
-struct req_msg_field RMF_REC_REINT =
- DEFINE_MSGF("rec_reint", 0, sizeof(struct mdt_rec_reint),
- lustre_swab_mdt_rec_reint, NULL);
-EXPORT_SYMBOL(RMF_REC_REINT);
-
-/* FIXME: this length should be defined as a macro */
-struct req_msg_field RMF_EADATA = DEFINE_MSGF("eadata", 0, -1,
- NULL, NULL);
-EXPORT_SYMBOL(RMF_EADATA);
-
-struct req_msg_field RMF_EAVALS = DEFINE_MSGF("eavals", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_EAVALS);
-
-struct req_msg_field RMF_ACL =
- DEFINE_MSGF("acl", RMF_F_NO_SIZE_CHECK,
- LUSTRE_POSIX_ACL_MAX_SIZE, NULL, NULL);
-EXPORT_SYMBOL(RMF_ACL);
-
-/* FIXME: this should be made to use RMF_F_STRUCT_ARRAY */
-struct req_msg_field RMF_LOGCOOKIES =
- DEFINE_MSGF("logcookies", RMF_F_NO_SIZE_CHECK /* multiple cookies */,
- sizeof(struct llog_cookie), NULL, NULL);
-EXPORT_SYMBOL(RMF_LOGCOOKIES);
-
-struct req_msg_field RMF_CAPA1 =
- DEFINE_MSGF("capa", 0, sizeof(struct lustre_capa),
- lustre_swab_lustre_capa, NULL);
-EXPORT_SYMBOL(RMF_CAPA1);
-
-struct req_msg_field RMF_CAPA2 =
- DEFINE_MSGF("capa", 0, sizeof(struct lustre_capa),
- lustre_swab_lustre_capa, NULL);
-EXPORT_SYMBOL(RMF_CAPA2);
-
-struct req_msg_field RMF_LAYOUT_INTENT =
- DEFINE_MSGF("layout_intent", 0,
- sizeof(struct layout_intent), lustre_swab_layout_intent,
- NULL);
-EXPORT_SYMBOL(RMF_LAYOUT_INTENT);
-
-/*
- * OST request field.
- */
-struct req_msg_field RMF_OST_BODY =
- DEFINE_MSGF("ost_body", 0,
- sizeof(struct ost_body), lustre_swab_ost_body, dump_ost_body);
-EXPORT_SYMBOL(RMF_OST_BODY);
-
-struct req_msg_field RMF_OBD_IOOBJ =
- DEFINE_MSGF("obd_ioobj", RMF_F_STRUCT_ARRAY,
- sizeof(struct obd_ioobj), lustre_swab_obd_ioobj, dump_ioo);
-EXPORT_SYMBOL(RMF_OBD_IOOBJ);
-
-struct req_msg_field RMF_NIOBUF_REMOTE =
- DEFINE_MSGF("niobuf_remote", RMF_F_STRUCT_ARRAY,
- sizeof(struct niobuf_remote), lustre_swab_niobuf_remote,
- dump_rniobuf);
-EXPORT_SYMBOL(RMF_NIOBUF_REMOTE);
-
-struct req_msg_field RMF_RCS =
- DEFINE_MSGF("niobuf_remote", RMF_F_STRUCT_ARRAY, sizeof(__u32),
- lustre_swab_generic_32s, dump_rcs);
-EXPORT_SYMBOL(RMF_RCS);
-
-struct req_msg_field RMF_EAVALS_LENS =
- DEFINE_MSGF("eavals_lens", RMF_F_STRUCT_ARRAY, sizeof(__u32),
- lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_EAVALS_LENS);
-
-struct req_msg_field RMF_OBD_ID =
- DEFINE_MSGF("u64", 0,
- sizeof(u64), lustre_swab_ost_last_id, NULL);
-EXPORT_SYMBOL(RMF_OBD_ID);
-
-struct req_msg_field RMF_FID =
- DEFINE_MSGF("fid", 0,
- sizeof(struct lu_fid), lustre_swab_lu_fid, NULL);
-EXPORT_SYMBOL(RMF_FID);
-
-struct req_msg_field RMF_OST_ID =
- DEFINE_MSGF("ost_id", 0,
- sizeof(struct ost_id), lustre_swab_ost_id, NULL);
-EXPORT_SYMBOL(RMF_OST_ID);
-
-struct req_msg_field RMF_FIEMAP_KEY =
- DEFINE_MSGF("fiemap", 0, sizeof(struct ll_fiemap_info_key),
- lustre_swab_fiemap, NULL);
-EXPORT_SYMBOL(RMF_FIEMAP_KEY);
-
-struct req_msg_field RMF_FIEMAP_VAL =
- DEFINE_MSGF("fiemap", 0, -1, lustre_swab_fiemap, NULL);
-EXPORT_SYMBOL(RMF_FIEMAP_VAL);
-
-struct req_msg_field RMF_HSM_USER_STATE =
- DEFINE_MSGF("hsm_user_state", 0, sizeof(struct hsm_user_state),
- lustre_swab_hsm_user_state, NULL);
-EXPORT_SYMBOL(RMF_HSM_USER_STATE);
-
-struct req_msg_field RMF_HSM_STATE_SET =
- DEFINE_MSGF("hsm_state_set", 0, sizeof(struct hsm_state_set),
- lustre_swab_hsm_state_set, NULL);
-EXPORT_SYMBOL(RMF_HSM_STATE_SET);
-
-struct req_msg_field RMF_MDS_HSM_PROGRESS =
- DEFINE_MSGF("hsm_progress", 0, sizeof(struct hsm_progress_kernel),
- lustre_swab_hsm_progress_kernel, NULL);
-EXPORT_SYMBOL(RMF_MDS_HSM_PROGRESS);
-
-struct req_msg_field RMF_MDS_HSM_CURRENT_ACTION =
- DEFINE_MSGF("hsm_current_action", 0, sizeof(struct hsm_current_action),
- lustre_swab_hsm_current_action, NULL);
-EXPORT_SYMBOL(RMF_MDS_HSM_CURRENT_ACTION);
-
-struct req_msg_field RMF_MDS_HSM_USER_ITEM =
- DEFINE_MSGF("hsm_user_item", RMF_F_STRUCT_ARRAY,
- sizeof(struct hsm_user_item), lustre_swab_hsm_user_item,
- NULL);
-EXPORT_SYMBOL(RMF_MDS_HSM_USER_ITEM);
-
-struct req_msg_field RMF_MDS_HSM_ARCHIVE =
- DEFINE_MSGF("hsm_archive", 0,
- sizeof(__u32), lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_MDS_HSM_ARCHIVE);
-
-struct req_msg_field RMF_MDS_HSM_REQUEST =
- DEFINE_MSGF("hsm_request", 0, sizeof(struct hsm_request),
- lustre_swab_hsm_request, NULL);
-EXPORT_SYMBOL(RMF_MDS_HSM_REQUEST);
-
-struct req_msg_field RMF_SWAP_LAYOUTS =
- DEFINE_MSGF("swap_layouts", 0, sizeof(struct mdc_swap_layouts),
- lustre_swab_swap_layouts, NULL);
-EXPORT_SYMBOL(RMF_SWAP_LAYOUTS);
-/*
- * Request formats.
- */
-
-struct req_format {
- const char *rf_name;
- size_t rf_idx;
- struct {
- size_t nr;
- const struct req_msg_field **d;
- } rf_fields[RCL_NR];
-};
-
-#define DEFINE_REQ_FMT(name, client, client_nr, server, server_nr) { \
- .rf_name = name, \
- .rf_fields = { \
- [RCL_CLIENT] = { \
- .nr = client_nr, \
- .d = client \
- }, \
- [RCL_SERVER] = { \
- .nr = server_nr, \
- .d = server \
- } \
- } \
-}
-
-#define DEFINE_REQ_FMT0(name, client, server) \
-DEFINE_REQ_FMT(name, client, ARRAY_SIZE(client), server, ARRAY_SIZE(server))
-
-struct req_format RQF_OBD_PING =
- DEFINE_REQ_FMT0("OBD_PING", empty, empty);
-EXPORT_SYMBOL(RQF_OBD_PING);
-
-struct req_format RQF_OBD_SET_INFO =
- DEFINE_REQ_FMT0("OBD_SET_INFO", obd_set_info_client, empty);
-EXPORT_SYMBOL(RQF_OBD_SET_INFO);
-
-struct req_format RQF_SEC_CTX =
- DEFINE_REQ_FMT0("SEC_CTX", empty, empty);
-EXPORT_SYMBOL(RQF_SEC_CTX);
-
-struct req_format RQF_MGS_TARGET_REG =
- DEFINE_REQ_FMT0("MGS_TARGET_REG", mgs_target_info_only,
- mgs_target_info_only);
-EXPORT_SYMBOL(RQF_MGS_TARGET_REG);
-
-struct req_format RQF_MGS_SET_INFO =
- DEFINE_REQ_FMT0("MGS_SET_INFO", mgs_set_info,
- mgs_set_info);
-EXPORT_SYMBOL(RQF_MGS_SET_INFO);
-
-struct req_format RQF_MGS_CONFIG_READ =
- DEFINE_REQ_FMT0("MGS_CONFIG_READ", mgs_config_read_client,
- mgs_config_read_server);
-EXPORT_SYMBOL(RQF_MGS_CONFIG_READ);
-
-struct req_format RQF_SEQ_QUERY =
- DEFINE_REQ_FMT0("SEQ_QUERY", seq_query_client, seq_query_server);
-EXPORT_SYMBOL(RQF_SEQ_QUERY);
-
-struct req_format RQF_FLD_QUERY =
- DEFINE_REQ_FMT0("FLD_QUERY", fld_query_client, fld_query_server);
-EXPORT_SYMBOL(RQF_FLD_QUERY);
-
-/*
- * The 'fld_read_server' uses 'RMF_GENERIC_DATA' to hold the 'FLD_QUERY'
- * RPC reply that is composed of 'struct lu_seq_range_array'. But there
- * is not registered swabber function for 'RMF_GENERIC_DATA'. So the RPC
- * peers need to handle the RPC reply with fixed little-endian format.
- *
- * In theory, we can define new structure with some swabber registered to
- * handle the 'FLD_QUERY' RPC reply result automatically. But from the
- * implementation view, it is not easy to be done within current "struct
- * req_msg_field" framework. Because the sequence range array in the RPC
- * reply is not fixed length, instead, its length depends on 'lu_seq_range'
- * count, that is unknown when prepare the RPC buffer. Generally, for such
- * flexible length RPC usage, there will be a field in the RPC layout to
- * indicate the data length. But for the 'FLD_READ' RPC, we have no way to
- * do that unless we add new length filed that will broken the on-wire RPC
- * protocol and cause interoperability trouble with old peer.
- */
-struct req_format RQF_FLD_READ =
- DEFINE_REQ_FMT0("FLD_READ", fld_read_client, fld_read_server);
-EXPORT_SYMBOL(RQF_FLD_READ);
-
-struct req_format RQF_LOG_CANCEL =
- DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty);
-EXPORT_SYMBOL(RQF_LOG_CANCEL);
-
-struct req_format RQF_MDS_QUOTACTL =
- DEFINE_REQ_FMT0("MDS_QUOTACTL", quotactl_only, quotactl_only);
-EXPORT_SYMBOL(RQF_MDS_QUOTACTL);
-
-struct req_format RQF_OST_QUOTACTL =
- DEFINE_REQ_FMT0("OST_QUOTACTL", quotactl_only, quotactl_only);
-EXPORT_SYMBOL(RQF_OST_QUOTACTL);
-
-struct req_format RQF_MDS_GETSTATUS =
- DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_GETSTATUS);
-
-struct req_format RQF_MDS_STATFS =
- DEFINE_REQ_FMT0("MDS_STATFS", empty, obd_statfs_server);
-EXPORT_SYMBOL(RQF_MDS_STATFS);
-
-struct req_format RQF_MDS_SYNC =
- DEFINE_REQ_FMT0("MDS_SYNC", mdt_body_capa, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_SYNC);
-
-struct req_format RQF_MDS_GETATTR =
- DEFINE_REQ_FMT0("MDS_GETATTR", mdt_body_capa, mds_getattr_server);
-EXPORT_SYMBOL(RQF_MDS_GETATTR);
-
-struct req_format RQF_MDS_GETXATTR =
- DEFINE_REQ_FMT0("MDS_GETXATTR",
- mds_getxattr_client, mds_getxattr_server);
-EXPORT_SYMBOL(RQF_MDS_GETXATTR);
-
-struct req_format RQF_MDS_GETATTR_NAME =
- DEFINE_REQ_FMT0("MDS_GETATTR_NAME",
- mds_getattr_name_client, mds_getattr_server);
-EXPORT_SYMBOL(RQF_MDS_GETATTR_NAME);
-
-struct req_format RQF_MDS_REINT =
- DEFINE_REQ_FMT0("MDS_REINT", mds_reint_client, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_REINT);
-
-struct req_format RQF_MDS_REINT_CREATE =
- DEFINE_REQ_FMT0("MDS_REINT_CREATE",
- mds_reint_create_client, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_REINT_CREATE);
-
-struct req_format RQF_MDS_REINT_CREATE_ACL =
- DEFINE_REQ_FMT0("MDS_REINT_CREATE_ACL",
- mds_reint_create_acl_client, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_ACL);
-
-struct req_format RQF_MDS_REINT_CREATE_SLAVE =
- DEFINE_REQ_FMT0("MDS_REINT_CREATE_EA",
- mds_reint_create_slave_client, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_SLAVE);
-
-struct req_format RQF_MDS_REINT_CREATE_SYM =
- DEFINE_REQ_FMT0("MDS_REINT_CREATE_SYM",
- mds_reint_create_sym_client, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_SYM);
-
-struct req_format RQF_MDS_REINT_OPEN =
- DEFINE_REQ_FMT0("MDS_REINT_OPEN",
- mds_reint_open_client, mds_reint_open_server);
-EXPORT_SYMBOL(RQF_MDS_REINT_OPEN);
-
-struct req_format RQF_MDS_REINT_UNLINK =
- DEFINE_REQ_FMT0("MDS_REINT_UNLINK", mds_reint_unlink_client,
- mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_REINT_UNLINK);
-
-struct req_format RQF_MDS_REINT_LINK =
- DEFINE_REQ_FMT0("MDS_REINT_LINK",
- mds_reint_link_client, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_REINT_LINK);
-
-struct req_format RQF_MDS_REINT_RENAME =
- DEFINE_REQ_FMT0("MDS_REINT_RENAME", mds_reint_rename_client,
- mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_REINT_RENAME);
-
-struct req_format RQF_MDS_REINT_MIGRATE =
- DEFINE_REQ_FMT0("MDS_REINT_MIGRATE", mds_reint_migrate_client,
- mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_REINT_MIGRATE);
-
-struct req_format RQF_MDS_REINT_SETATTR =
- DEFINE_REQ_FMT0("MDS_REINT_SETATTR",
- mds_reint_setattr_client, mds_setattr_server);
-EXPORT_SYMBOL(RQF_MDS_REINT_SETATTR);
-
-struct req_format RQF_MDS_REINT_SETXATTR =
- DEFINE_REQ_FMT0("MDS_REINT_SETXATTR",
- mds_reint_setxattr_client, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_REINT_SETXATTR);
-
-struct req_format RQF_MDS_CONNECT =
- DEFINE_REQ_FMT0("MDS_CONNECT",
- obd_connect_client, obd_connect_server);
-EXPORT_SYMBOL(RQF_MDS_CONNECT);
-
-struct req_format RQF_MDS_DISCONNECT =
- DEFINE_REQ_FMT0("MDS_DISCONNECT", empty, empty);
-EXPORT_SYMBOL(RQF_MDS_DISCONNECT);
-
-struct req_format RQF_MDS_GET_INFO =
- DEFINE_REQ_FMT0("MDS_GET_INFO", mds_getinfo_client,
- mds_getinfo_server);
-EXPORT_SYMBOL(RQF_MDS_GET_INFO);
-
-struct req_format RQF_LDLM_ENQUEUE =
- DEFINE_REQ_FMT0("LDLM_ENQUEUE",
- ldlm_enqueue_client, ldlm_enqueue_lvb_server);
-EXPORT_SYMBOL(RQF_LDLM_ENQUEUE);
-
-struct req_format RQF_LDLM_ENQUEUE_LVB =
- DEFINE_REQ_FMT0("LDLM_ENQUEUE_LVB",
- ldlm_enqueue_client, ldlm_enqueue_lvb_server);
-EXPORT_SYMBOL(RQF_LDLM_ENQUEUE_LVB);
-
-struct req_format RQF_LDLM_CONVERT =
- DEFINE_REQ_FMT0("LDLM_CONVERT",
- ldlm_enqueue_client, ldlm_enqueue_server);
-EXPORT_SYMBOL(RQF_LDLM_CONVERT);
-
-struct req_format RQF_LDLM_CANCEL =
- DEFINE_REQ_FMT0("LDLM_CANCEL", ldlm_enqueue_client, empty);
-EXPORT_SYMBOL(RQF_LDLM_CANCEL);
-
-struct req_format RQF_LDLM_CALLBACK =
- DEFINE_REQ_FMT0("LDLM_CALLBACK", ldlm_enqueue_client, empty);
-EXPORT_SYMBOL(RQF_LDLM_CALLBACK);
-
-struct req_format RQF_LDLM_CP_CALLBACK =
- DEFINE_REQ_FMT0("LDLM_CP_CALLBACK", ldlm_cp_callback_client, empty);
-EXPORT_SYMBOL(RQF_LDLM_CP_CALLBACK);
-
-struct req_format RQF_LDLM_BL_CALLBACK =
- DEFINE_REQ_FMT0("LDLM_BL_CALLBACK", ldlm_enqueue_client, empty);
-EXPORT_SYMBOL(RQF_LDLM_BL_CALLBACK);
-
-struct req_format RQF_LDLM_GL_CALLBACK =
- DEFINE_REQ_FMT0("LDLM_GL_CALLBACK", ldlm_enqueue_client,
- ldlm_gl_callback_server);
-EXPORT_SYMBOL(RQF_LDLM_GL_CALLBACK);
-
-struct req_format RQF_LDLM_GL_DESC_CALLBACK =
- DEFINE_REQ_FMT0("LDLM_GL_CALLBACK", ldlm_gl_callback_desc_client,
- ldlm_gl_callback_server);
-EXPORT_SYMBOL(RQF_LDLM_GL_DESC_CALLBACK);
-
-struct req_format RQF_LDLM_INTENT_BASIC =
- DEFINE_REQ_FMT0("LDLM_INTENT_BASIC",
- ldlm_intent_basic_client, ldlm_enqueue_lvb_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_BASIC);
-
-struct req_format RQF_LDLM_INTENT =
- DEFINE_REQ_FMT0("LDLM_INTENT",
- ldlm_intent_client, ldlm_intent_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT);
-
-struct req_format RQF_LDLM_INTENT_LAYOUT =
- DEFINE_REQ_FMT0("LDLM_INTENT_LAYOUT ",
- ldlm_intent_layout_client, ldlm_enqueue_lvb_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_LAYOUT);
-
-struct req_format RQF_LDLM_INTENT_GETATTR =
- DEFINE_REQ_FMT0("LDLM_INTENT_GETATTR",
- ldlm_intent_getattr_client, ldlm_intent_getattr_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_GETATTR);
-
-struct req_format RQF_LDLM_INTENT_OPEN =
- DEFINE_REQ_FMT0("LDLM_INTENT_OPEN",
- ldlm_intent_open_client, ldlm_intent_open_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_OPEN);
-
-struct req_format RQF_LDLM_INTENT_CREATE =
- DEFINE_REQ_FMT0("LDLM_INTENT_CREATE",
- ldlm_intent_create_client, ldlm_intent_getattr_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_CREATE);
-
-struct req_format RQF_LDLM_INTENT_UNLINK =
- DEFINE_REQ_FMT0("LDLM_INTENT_UNLINK",
- ldlm_intent_unlink_client, ldlm_intent_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_UNLINK);
-
-struct req_format RQF_LDLM_INTENT_GETXATTR =
- DEFINE_REQ_FMT0("LDLM_INTENT_GETXATTR",
- ldlm_intent_getxattr_client,
- ldlm_intent_getxattr_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_GETXATTR);
-
-struct req_format RQF_MDS_CLOSE =
- DEFINE_REQ_FMT0("MDS_CLOSE",
- mdt_close_client, mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_CLOSE);
-
-struct req_format RQF_MDS_INTENT_CLOSE =
- DEFINE_REQ_FMT0("MDS_CLOSE",
- mdt_intent_close_client, mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_INTENT_CLOSE);
-
-struct req_format RQF_MDS_READPAGE =
- DEFINE_REQ_FMT0("MDS_READPAGE",
- mdt_body_capa, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_READPAGE);
-
-struct req_format RQF_MDS_HSM_ACTION =
- DEFINE_REQ_FMT0("MDS_HSM_ACTION", mdt_body_capa, mdt_hsm_action_server);
-EXPORT_SYMBOL(RQF_MDS_HSM_ACTION);
-
-struct req_format RQF_MDS_HSM_PROGRESS =
- DEFINE_REQ_FMT0("MDS_HSM_PROGRESS", mdt_hsm_progress, empty);
-EXPORT_SYMBOL(RQF_MDS_HSM_PROGRESS);
-
-struct req_format RQF_MDS_HSM_CT_REGISTER =
- DEFINE_REQ_FMT0("MDS_HSM_CT_REGISTER", mdt_hsm_ct_register, empty);
-EXPORT_SYMBOL(RQF_MDS_HSM_CT_REGISTER);
-
-struct req_format RQF_MDS_HSM_CT_UNREGISTER =
- DEFINE_REQ_FMT0("MDS_HSM_CT_UNREGISTER", mdt_hsm_ct_unregister, empty);
-EXPORT_SYMBOL(RQF_MDS_HSM_CT_UNREGISTER);
-
-struct req_format RQF_MDS_HSM_STATE_GET =
- DEFINE_REQ_FMT0("MDS_HSM_STATE_GET",
- mdt_body_capa, mdt_hsm_state_get_server);
-EXPORT_SYMBOL(RQF_MDS_HSM_STATE_GET);
-
-struct req_format RQF_MDS_HSM_STATE_SET =
- DEFINE_REQ_FMT0("MDS_HSM_STATE_SET", mdt_hsm_state_set, empty);
-EXPORT_SYMBOL(RQF_MDS_HSM_STATE_SET);
-
-struct req_format RQF_MDS_HSM_REQUEST =
- DEFINE_REQ_FMT0("MDS_HSM_REQUEST", mdt_hsm_request, empty);
-EXPORT_SYMBOL(RQF_MDS_HSM_REQUEST);
-
-struct req_format RQF_MDS_SWAP_LAYOUTS =
- DEFINE_REQ_FMT0("MDS_SWAP_LAYOUTS",
- mdt_swap_layouts, empty);
-EXPORT_SYMBOL(RQF_MDS_SWAP_LAYOUTS);
-
-/* This is for split */
-struct req_format RQF_MDS_WRITEPAGE =
- DEFINE_REQ_FMT0("MDS_WRITEPAGE",
- mdt_body_capa, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_WRITEPAGE);
-
-struct req_format RQF_LLOG_ORIGIN_HANDLE_CREATE =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_CREATE",
- llog_origin_handle_create_client, llogd_body_only);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_CREATE);
-
-struct req_format RQF_LLOG_ORIGIN_HANDLE_DESTROY =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_DESTROY",
- llogd_body_only, llogd_body_only);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_DESTROY);
-
-struct req_format RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_NEXT_BLOCK",
- llogd_body_only, llog_origin_handle_next_block_server);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
-
-struct req_format RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_PREV_BLOCK",
- llogd_body_only, llog_origin_handle_next_block_server);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
-
-struct req_format RQF_LLOG_ORIGIN_HANDLE_READ_HEADER =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_READ_HEADER",
- llogd_body_only, llog_log_hdr_only);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
-
-struct req_format RQF_LLOG_ORIGIN_CONNECT =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_CONNECT", llogd_conn_body_only, empty);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_CONNECT);
-
-struct req_format RQF_CONNECT =
- DEFINE_REQ_FMT0("CONNECT", obd_connect_client, obd_connect_server);
-EXPORT_SYMBOL(RQF_CONNECT);
-
-struct req_format RQF_OST_CONNECT =
- DEFINE_REQ_FMT0("OST_CONNECT",
- obd_connect_client, obd_connect_server);
-EXPORT_SYMBOL(RQF_OST_CONNECT);
-
-struct req_format RQF_OST_DISCONNECT =
- DEFINE_REQ_FMT0("OST_DISCONNECT", empty, empty);
-EXPORT_SYMBOL(RQF_OST_DISCONNECT);
-
-struct req_format RQF_OST_GETATTR =
- DEFINE_REQ_FMT0("OST_GETATTR", ost_body_capa, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_GETATTR);
-
-struct req_format RQF_OST_SETATTR =
- DEFINE_REQ_FMT0("OST_SETATTR", ost_body_capa, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_SETATTR);
-
-struct req_format RQF_OST_CREATE =
- DEFINE_REQ_FMT0("OST_CREATE", ost_body_only, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_CREATE);
-
-struct req_format RQF_OST_PUNCH =
- DEFINE_REQ_FMT0("OST_PUNCH", ost_body_capa, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_PUNCH);
-
-struct req_format RQF_OST_SYNC =
- DEFINE_REQ_FMT0("OST_SYNC", ost_body_capa, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_SYNC);
-
-struct req_format RQF_OST_DESTROY =
- DEFINE_REQ_FMT0("OST_DESTROY", ost_destroy_client, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_DESTROY);
-
-struct req_format RQF_OST_BRW_READ =
- DEFINE_REQ_FMT0("OST_BRW_READ", ost_brw_client, ost_brw_read_server);
-EXPORT_SYMBOL(RQF_OST_BRW_READ);
-
-struct req_format RQF_OST_BRW_WRITE =
- DEFINE_REQ_FMT0("OST_BRW_WRITE", ost_brw_client, ost_brw_write_server);
-EXPORT_SYMBOL(RQF_OST_BRW_WRITE);
-
-struct req_format RQF_OST_STATFS =
- DEFINE_REQ_FMT0("OST_STATFS", empty, obd_statfs_server);
-EXPORT_SYMBOL(RQF_OST_STATFS);
-
-struct req_format RQF_OST_SET_GRANT_INFO =
- DEFINE_REQ_FMT0("OST_SET_GRANT_INFO", ost_grant_shrink_client,
- ost_body_only);
-EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO);
-
-struct req_format RQF_OST_GET_INFO =
- DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client,
- ost_get_info_generic_server);
-EXPORT_SYMBOL(RQF_OST_GET_INFO);
-
-struct req_format RQF_OST_GET_INFO_LAST_ID =
- DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client,
- ost_get_last_id_server);
-EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID);
-
-struct req_format RQF_OST_GET_INFO_LAST_FID =
- DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", ost_get_last_fid_client,
- ost_get_last_fid_server);
-EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID);
-
-struct req_format RQF_OST_SET_INFO_LAST_FID =
- DEFINE_REQ_FMT0("OST_SET_INFO_LAST_FID", obd_set_info_client,
- empty);
-EXPORT_SYMBOL(RQF_OST_SET_INFO_LAST_FID);
-
-struct req_format RQF_OST_GET_INFO_FIEMAP =
- DEFINE_REQ_FMT0("OST_GET_INFO_FIEMAP", ost_get_fiemap_client,
- ost_get_fiemap_server);
-EXPORT_SYMBOL(RQF_OST_GET_INFO_FIEMAP);
-
-/* Convenience macro */
-#define FMT_FIELD(fmt, i, j) ((fmt)->rf_fields[(i)].d[(j)])
-
-/**
- * Initializes the capsule abstraction by computing and setting the \a rf_idx
- * field of RQFs and the \a rmf_offset field of RMFs.
- */
-int req_layout_init(void)
-{
- size_t i;
- size_t j;
- size_t k;
- struct req_format *rf = NULL;
-
- for (i = 0; i < ARRAY_SIZE(req_formats); ++i) {
- rf = req_formats[i];
- rf->rf_idx = i;
- for (j = 0; j < RCL_NR; ++j) {
- LASSERT(rf->rf_fields[j].nr <= REQ_MAX_FIELD_NR);
- for (k = 0; k < rf->rf_fields[j].nr; ++k) {
- struct req_msg_field *field;
-
- field = (typeof(field))rf->rf_fields[j].d[k];
- LASSERT(!(field->rmf_flags & RMF_F_STRUCT_ARRAY)
- || field->rmf_size > 0);
- LASSERT(field->rmf_offset[i][j] == 0);
- /*
- * k + 1 to detect unused format/field
- * combinations.
- */
- field->rmf_offset[i][j] = k + 1;
- }
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(req_layout_init);
-
-void req_layout_fini(void)
-{
-}
-EXPORT_SYMBOL(req_layout_fini);
-
-/**
- * Initializes the expected sizes of each RMF in a \a pill (\a rc_area) to -1.
- *
- * Actual/expected field sizes are set elsewhere in functions in this file:
- * req_capsule_init(), req_capsule_server_pack(), req_capsule_set_size() and
- * req_capsule_msg_size(). The \a rc_area information is used by.
- * ptlrpc_request_set_replen().
- */
-static void req_capsule_init_area(struct req_capsule *pill)
-{
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(pill->rc_area[RCL_CLIENT]); i++) {
- pill->rc_area[RCL_CLIENT][i] = -1;
- pill->rc_area[RCL_SERVER][i] = -1;
- }
-}
-
-/**
- * Initialize a pill.
- *
- * The \a location indicates whether the caller is executing on the client side
- * (RCL_CLIENT) or server side (RCL_SERVER)..
- */
-void req_capsule_init(struct req_capsule *pill,
- struct ptlrpc_request *req,
- enum req_location location)
-{
- LASSERT(location == RCL_SERVER || location == RCL_CLIENT);
-
- /*
- * Today all capsules are embedded in ptlrpc_request structs,
- * but just in case that ever isn't the case, we don't reach
- * into req unless req != NULL and pill is the one embedded in
- * the req.
- *
- * The req->rq_pill_init flag makes it safe to initialize a pill
- * twice, which might happen in the OST paths as a result of the
- * high-priority RPC queue getting peeked at before ost_handle()
- * handles an OST RPC.
- */
- if (req && pill == &req->rq_pill && req->rq_pill_init)
- return;
-
- memset(pill, 0, sizeof(*pill));
- pill->rc_req = req;
- pill->rc_loc = location;
- req_capsule_init_area(pill);
-
- if (req && pill == &req->rq_pill)
- req->rq_pill_init = 1;
-}
-EXPORT_SYMBOL(req_capsule_init);
-
-void req_capsule_fini(struct req_capsule *pill)
-{
-}
-EXPORT_SYMBOL(req_capsule_fini);
-
-static int __req_format_is_sane(const struct req_format *fmt)
-{
- return fmt->rf_idx < ARRAY_SIZE(req_formats) &&
- req_formats[fmt->rf_idx] == fmt;
-}
-
-static struct lustre_msg *__req_msg(const struct req_capsule *pill,
- enum req_location loc)
-{
- struct ptlrpc_request *req;
-
- req = pill->rc_req;
- return loc == RCL_CLIENT ? req->rq_reqmsg : req->rq_repmsg;
-}
-
-/**
- * Set the format (\a fmt) of a \a pill; format changes are not allowed here
- * (see req_capsule_extend()).
- */
-void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt)
-{
- LASSERT(!pill->rc_fmt || pill->rc_fmt == fmt);
- LASSERT(__req_format_is_sane(fmt));
-
- pill->rc_fmt = fmt;
-}
-EXPORT_SYMBOL(req_capsule_set);
-
-/**
- * Fills in any parts of the \a rc_area of a \a pill that haven't been filled in
- * yet.
-
- * \a rc_area is an array of REQ_MAX_FIELD_NR elements, used to store sizes of
- * variable-sized fields. The field sizes come from the declared \a rmf_size
- * field of a \a pill's \a rc_fmt's RMF's.
- */
-size_t req_capsule_filled_sizes(struct req_capsule *pill,
- enum req_location loc)
-{
- const struct req_format *fmt = pill->rc_fmt;
- size_t i;
-
- for (i = 0; i < fmt->rf_fields[loc].nr; ++i) {
- if (pill->rc_area[loc][i] == -1) {
- pill->rc_area[loc][i] =
- fmt->rf_fields[loc].d[i]->rmf_size;
- if (pill->rc_area[loc][i] == -1) {
- /*
- * Skip the following fields.
- *
- * If this LASSERT() trips then you're missing a
- * call to req_capsule_set_size().
- */
- LASSERT(loc != RCL_SERVER);
- break;
- }
- }
- }
- return i;
-}
-EXPORT_SYMBOL(req_capsule_filled_sizes);
-
-/**
- * Capsule equivalent of lustre_pack_request() and lustre_pack_reply().
- *
- * This function uses the \a pill's \a rc_area as filled in by
- * req_capsule_set_size() or req_capsule_filled_sizes() (the latter is called by
- * this function).
- */
-int req_capsule_server_pack(struct req_capsule *pill)
-{
- const struct req_format *fmt;
- int count;
- int rc;
-
- LASSERT(pill->rc_loc == RCL_SERVER);
- fmt = pill->rc_fmt;
- LASSERT(fmt);
-
- count = req_capsule_filled_sizes(pill, RCL_SERVER);
- rc = lustre_pack_reply(pill->rc_req, count,
- pill->rc_area[RCL_SERVER], NULL);
- if (rc != 0) {
- DEBUG_REQ(D_ERROR, pill->rc_req,
- "Cannot pack %d fields in format `%s': ",
- count, fmt->rf_name);
- }
- return rc;
-}
-EXPORT_SYMBOL(req_capsule_server_pack);
-
-/**
- * Returns the PTLRPC request or reply (\a loc) buffer offset of a \a pill
- * corresponding to the given RMF (\a field).
- */
-static u32 __req_capsule_offset(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc)
-{
- u32 offset;
-
- offset = field->rmf_offset[pill->rc_fmt->rf_idx][loc];
- LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n", pill->rc_fmt->rf_name,
- field->rmf_name, offset, loc);
- offset--;
-
- LASSERT(offset < REQ_MAX_FIELD_NR);
- return offset;
-}
-
-/**
- * Helper for __req_capsule_get(); swabs value / array of values and/or dumps
- * them if desired.
- */
-static
-void
-swabber_dumper_helper(struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc,
- int offset,
- void *value, int len, int dump, void (*swabber)(void *))
-{
- void *p;
- int i;
- int n;
- int do_swab;
- int inout = loc == RCL_CLIENT;
-
- swabber = swabber ?: field->rmf_swabber;
-
- if (ptlrpc_buf_need_swab(pill->rc_req, inout, offset) &&
- swabber && value)
- do_swab = 1;
- else
- do_swab = 0;
-
- if (!field->rmf_dumper)
- dump = 0;
-
- if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY)) {
- if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of %sfield %s follows\n",
- do_swab ? "unswabbed " : "", field->rmf_name);
- field->rmf_dumper(value);
- }
- if (!do_swab)
- return;
- swabber(value);
- ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset);
- if (dump && field->rmf_dumper) {
- CDEBUG(D_RPCTRACE, "Dump of swabbed field %s follows\n",
- field->rmf_name);
- field->rmf_dumper(value);
- }
-
- return;
- }
-
- /*
- * We're swabbing an array; swabber() swabs a single array element, so
- * swab every element.
- */
- LASSERT((len % field->rmf_size) == 0);
- for (p = value, i = 0, n = len / field->rmf_size;
- i < n;
- i++, p += field->rmf_size) {
- if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of %sarray field %s, element %d follows\n",
- do_swab ? "unswabbed " : "", field->rmf_name, i);
- field->rmf_dumper(p);
- }
- if (!do_swab)
- continue;
- swabber(p);
- if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of swabbed array field %s, element %d follows\n",
- field->rmf_name, i);
- field->rmf_dumper(value);
- }
- }
- if (do_swab)
- ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset);
-}
-
-/**
- * Returns the pointer to a PTLRPC request or reply (\a loc) buffer of a \a pill
- * corresponding to the given RMF (\a field).
- *
- * The buffer will be swabbed using the given \a swabber. If \a swabber == NULL
- * then the \a rmf_swabber from the RMF will be used. Soon there will be no
- * calls to __req_capsule_get() with a non-NULL \a swabber; \a swabber will then
- * be removed. Fields with the \a RMF_F_STRUCT_ARRAY flag set will have each
- * element of the array swabbed.
- */
-static void *__req_capsule_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc,
- void (*swabber)(void *),
- int dump)
-{
- const struct req_format *fmt;
- struct lustre_msg *msg;
- void *value;
- u32 len;
- u32 offset;
-
- void *(*getter)(struct lustre_msg *m, u32 n, u32 minlen);
-
- static const char *rcl_names[RCL_NR] = {
- [RCL_CLIENT] = "client",
- [RCL_SERVER] = "server"
- };
-
- fmt = pill->rc_fmt;
- LASSERT(fmt);
- LASSERT(fmt != LP_POISON);
- LASSERT(__req_format_is_sane(fmt));
-
- offset = __req_capsule_offset(pill, field, loc);
-
- msg = __req_msg(pill, loc);
- LASSERT(msg);
-
- getter = (field->rmf_flags & RMF_F_STRING) ?
- (typeof(getter))lustre_msg_string : lustre_msg_buf;
-
- if (field->rmf_flags & (RMF_F_STRUCT_ARRAY | RMF_F_NO_SIZE_CHECK)) {
- /*
- * We've already asserted that field->rmf_size > 0 in
- * req_layout_init().
- */
- len = lustre_msg_buflen(msg, offset);
- if (!(field->rmf_flags & RMF_F_NO_SIZE_CHECK) &&
- (len % field->rmf_size)) {
- CERROR("%s: array field size mismatch %d modulo %u != 0 (%d)\n",
- field->rmf_name, len, field->rmf_size, loc);
- return NULL;
- }
- } else if (pill->rc_area[loc][offset] != -1) {
- len = pill->rc_area[loc][offset];
- } else {
- len = max_t(typeof(field->rmf_size), field->rmf_size, 0);
- }
- value = getter(msg, offset, len);
-
- if (!value) {
- DEBUG_REQ(D_ERROR, pill->rc_req,
- "Wrong buffer for field `%s' (%u of %u) in format `%s': %u vs. %u (%s)\n",
- field->rmf_name, offset, lustre_msg_bufcount(msg),
- fmt->rf_name, lustre_msg_buflen(msg, offset), len,
- rcl_names[loc]);
- } else {
- swabber_dumper_helper(pill, field, loc, offset, value, len,
- dump, swabber);
- }
-
- return value;
-}
-
-/**
- * Trivial wrapper around __req_capsule_get(), that returns the PTLRPC request
- * buffer corresponding to the given RMF (\a field) of a \a pill.
- */
-void *req_capsule_client_get(struct req_capsule *pill,
- const struct req_msg_field *field)
-{
- return __req_capsule_get(pill, field, RCL_CLIENT, NULL, 0);
-}
-EXPORT_SYMBOL(req_capsule_client_get);
-
-/**
- * Same as req_capsule_client_get(), but with a \a swabber argument.
- *
- * Currently unused; will be removed when req_capsule_server_swab_get() is
- * unused too.
- */
-void *req_capsule_client_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- void *swabber)
-{
- return __req_capsule_get(pill, field, RCL_CLIENT, swabber, 0);
-}
-EXPORT_SYMBOL(req_capsule_client_swab_get);
-
-/**
- * Utility that combines req_capsule_set_size() and req_capsule_client_get().
- *
- * First the \a pill's request \a field's size is set (\a rc_area) using
- * req_capsule_set_size() with the given \a len. Then the actual buffer is
- * returned.
- */
-void *req_capsule_client_sized_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- u32 len)
-{
- req_capsule_set_size(pill, field, RCL_CLIENT, len);
- return __req_capsule_get(pill, field, RCL_CLIENT, NULL, 0);
-}
-EXPORT_SYMBOL(req_capsule_client_sized_get);
-
-/**
- * Trivial wrapper around __req_capsule_get(), that returns the PTLRPC reply
- * buffer corresponding to the given RMF (\a field) of a \a pill.
- */
-void *req_capsule_server_get(struct req_capsule *pill,
- const struct req_msg_field *field)
-{
- return __req_capsule_get(pill, field, RCL_SERVER, NULL, 0);
-}
-EXPORT_SYMBOL(req_capsule_server_get);
-
-/**
- * Same as req_capsule_server_get(), but with a \a swabber argument.
- *
- * Ideally all swabbing should be done pursuant to RMF definitions, with no
- * swabbing done outside this capsule abstraction.
- */
-void *req_capsule_server_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- void *swabber)
-{
- return __req_capsule_get(pill, field, RCL_SERVER, swabber, 0);
-}
-EXPORT_SYMBOL(req_capsule_server_swab_get);
-
-/**
- * Utility that combines req_capsule_set_size() and req_capsule_server_get().
- *
- * First the \a pill's request \a field's size is set (\a rc_area) using
- * req_capsule_set_size() with the given \a len. Then the actual buffer is
- * returned.
- */
-void *req_capsule_server_sized_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- u32 len)
-{
- req_capsule_set_size(pill, field, RCL_SERVER, len);
- return __req_capsule_get(pill, field, RCL_SERVER, NULL, 0);
-}
-EXPORT_SYMBOL(req_capsule_server_sized_get);
-
-void *req_capsule_server_sized_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- u32 len, void *swabber)
-{
- req_capsule_set_size(pill, field, RCL_SERVER, len);
- return __req_capsule_get(pill, field, RCL_SERVER, swabber, 0);
-}
-EXPORT_SYMBOL(req_capsule_server_sized_swab_get);
-
-/**
- * Set the size of the PTLRPC request/reply (\a loc) buffer for the given \a
- * field of the given \a pill.
- *
- * This function must be used when constructing variable sized fields of a
- * request or reply.
- */
-void req_capsule_set_size(struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc, u32 size)
-{
- LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
-
- if ((size != (u32)field->rmf_size) &&
- (field->rmf_size != -1) &&
- !(field->rmf_flags & RMF_F_NO_SIZE_CHECK) &&
- (size > 0)) {
- u32 rmf_size = (u32)field->rmf_size;
-
- if ((field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
- (size % rmf_size != 0)) {
- CERROR("%s: array field size mismatch %u %% %u != 0 (%d)\n",
- field->rmf_name, size, rmf_size, loc);
- LBUG();
- } else if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
- size < rmf_size) {
- CERROR("%s: field size mismatch %u != %u (%d)\n",
- field->rmf_name, size, rmf_size, loc);
- LBUG();
- }
- }
-
- pill->rc_area[loc][__req_capsule_offset(pill, field, loc)] = size;
-}
-EXPORT_SYMBOL(req_capsule_set_size);
-
-/**
- * Return the actual PTLRPC buffer length of a request or reply (\a loc)
- * for the given \a pill's given \a field.
- *
- * NB: this function doesn't correspond with req_capsule_set_size(), which
- * actually sets the size in pill.rc_area[loc][offset], but this function
- * returns the message buflen[offset], maybe we should use another name.
- */
-u32 req_capsule_get_size(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc)
-{
- LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
-
- return lustre_msg_buflen(__req_msg(pill, loc),
- __req_capsule_offset(pill, field, loc));
-}
-EXPORT_SYMBOL(req_capsule_get_size);
-
-/**
- * Wrapper around lustre_msg_size() that returns the PTLRPC size needed for the
- * given \a pill's request or reply (\a loc) given the field size recorded in
- * the \a pill's rc_area.
- *
- * See also req_capsule_set_size().
- */
-u32 req_capsule_msg_size(struct req_capsule *pill, enum req_location loc)
-{
- return lustre_msg_size(pill->rc_req->rq_import->imp_msg_magic,
- pill->rc_fmt->rf_fields[loc].nr,
- pill->rc_area[loc]);
-}
-
-/**
- * While req_capsule_msg_size() computes the size of a PTLRPC request or reply
- * (\a loc) given a \a pill's \a rc_area, this function computes the size of a
- * PTLRPC request or reply given only an RQF (\a fmt).
- *
- * This function should not be used for formats which contain variable size
- * fields.
- */
-u32 req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
- enum req_location loc)
-{
- size_t i = 0;
- u32 size;
-
- /*
- * This function should probably LASSERT() that fmt has no fields with
- * RMF_F_STRUCT_ARRAY in rmf_flags, since we can't know here how many
- * elements in the array there will ultimately be, but then, we could
- * assume that there will be at least one element, and that's just what
- * we do.
- */
- size = lustre_msg_hdr_size(magic, fmt->rf_fields[loc].nr);
- if (!size)
- return size;
-
- for (; i < fmt->rf_fields[loc].nr; ++i)
- if (fmt->rf_fields[loc].d[i]->rmf_size != -1)
- size += cfs_size_round(fmt->rf_fields[loc].d[i]->
- rmf_size);
- return size;
-}
-
-/**
- * Changes the format of an RPC.
- *
- * The pill must already have been initialized, which means that it already has
- * a request format. The new format \a fmt must be an extension of the pill's
- * old format. Specifically: the new format must have as many request and reply
- * fields as the old one, and all fields shared by the old and new format must
- * be at least as large in the new format.
- *
- * The new format's fields may be of different "type" than the old format, but
- * only for fields that are "opaque" blobs: fields which have a) have no
- * \a rmf_swabber, b) \a rmf_flags == 0 or RMF_F_NO_SIZE_CHECK, and c) \a
- * rmf_size == -1 or \a rmf_flags == RMF_F_NO_SIZE_CHECK. For example,
- * OBD_SET_INFO has a key field and an opaque value field that gets interpreted
- * according to the key field. When the value, according to the key, contains a
- * structure (or array thereof) to be swabbed, the format should be changed to
- * one where the value field has \a rmf_size/rmf_flags/rmf_swabber set
- * accordingly.
- */
-void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt)
-{
- int i;
- size_t j;
-
- const struct req_format *old;
-
- LASSERT(pill->rc_fmt);
- LASSERT(__req_format_is_sane(fmt));
-
- old = pill->rc_fmt;
- /*
- * Sanity checking...
- */
- for (i = 0; i < RCL_NR; ++i) {
- LASSERT(fmt->rf_fields[i].nr >= old->rf_fields[i].nr);
- for (j = 0; j < old->rf_fields[i].nr - 1; ++j) {
- const struct req_msg_field *ofield = FMT_FIELD(old, i, j);
-
- /* "opaque" fields can be transmogrified */
- if (!ofield->rmf_swabber &&
- (ofield->rmf_flags & ~RMF_F_NO_SIZE_CHECK) == 0 &&
- (ofield->rmf_size == -1 ||
- ofield->rmf_flags == RMF_F_NO_SIZE_CHECK))
- continue;
- LASSERT(FMT_FIELD(fmt, i, j) == FMT_FIELD(old, i, j));
- }
- /*
- * Last field in old format can be shorter than in new.
- */
- LASSERT(FMT_FIELD(fmt, i, j)->rmf_size >=
- FMT_FIELD(old, i, j)->rmf_size);
- }
-
- pill->rc_fmt = fmt;
-}
-EXPORT_SYMBOL(req_capsule_extend);
-
-/**
- * This function returns a non-zero value if the given \a field is present in
- * the format (\a rc_fmt) of \a pill's PTLRPC request or reply (\a loc), else it
- * returns 0.
- */
-int req_capsule_has_field(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc)
-{
- LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
-
- return field->rmf_offset[pill->rc_fmt->rf_idx][loc];
-}
-EXPORT_SYMBOL(req_capsule_has_field);
-
-/**
- * Returns a non-zero value if the given \a field is present in the given \a
- * pill's PTLRPC request or reply (\a loc), else it returns 0.
- */
-static int req_capsule_field_present(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc)
-{
- u32 offset;
-
- LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
- LASSERT(req_capsule_has_field(pill, field, loc));
-
- offset = __req_capsule_offset(pill, field, loc);
- return lustre_msg_bufcount(__req_msg(pill, loc)) > offset;
-}
-
-/**
- * This function shrinks the size of the _buffer_ of the \a pill's PTLRPC
- * request or reply (\a loc).
- *
- * This is not the opposite of req_capsule_extend().
- */
-void req_capsule_shrink(struct req_capsule *pill,
- const struct req_msg_field *field,
- u32 newlen, enum req_location loc)
-{
- const struct req_format *fmt;
- struct lustre_msg *msg;
- u32 len;
- int offset;
-
- fmt = pill->rc_fmt;
- LASSERT(fmt);
- LASSERT(__req_format_is_sane(fmt));
- LASSERT(req_capsule_has_field(pill, field, loc));
- LASSERT(req_capsule_field_present(pill, field, loc));
-
- offset = __req_capsule_offset(pill, field, loc);
-
- msg = __req_msg(pill, loc);
- len = lustre_msg_buflen(msg, offset);
- LASSERTF(newlen <= len, "%s:%s, oldlen=%u, newlen=%u\n",
- fmt->rf_name, field->rmf_name, len, newlen);
-
- if (loc == RCL_CLIENT)
- pill->rc_req->rq_reqlen = lustre_shrink_msg(msg, offset, newlen,
- 1);
- else
- pill->rc_req->rq_replen = lustre_shrink_msg(msg, offset, newlen,
- 1);
-}
-EXPORT_SYMBOL(req_capsule_shrink);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
deleted file mode 100644
index 254488be7093..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ /dev/null
@@ -1,340 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/llog_client.c
- *
- * remote api for llog - client side
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd_class.h>
-#include <lustre_log.h>
-#include <lustre_net.h>
-#include <linux/list.h>
-
-#define LLOG_CLIENT_ENTRY(ctxt, imp) do { \
- mutex_lock(&ctxt->loc_mutex); \
- if (ctxt->loc_imp) { \
- imp = class_import_get(ctxt->loc_imp); \
- } else { \
- CERROR("ctxt->loc_imp == NULL for context idx %d." \
- "Unable to complete MDS/OSS recovery," \
- "but I'll try again next time. Not fatal.\n", \
- ctxt->loc_idx); \
- imp = NULL; \
- mutex_unlock(&ctxt->loc_mutex); \
- return (-EINVAL); \
- } \
- mutex_unlock(&ctxt->loc_mutex); \
-} while (0)
-
-#define LLOG_CLIENT_EXIT(ctxt, imp) do { \
- mutex_lock(&ctxt->loc_mutex); \
- if (ctxt->loc_imp != imp) \
- CWARN("loc_imp has changed from %p to %p\n", \
- ctxt->loc_imp, imp); \
- class_import_put(imp); \
- mutex_unlock(&ctxt->loc_mutex); \
-} while (0)
-
-/* This is a callback from the llog_* functions.
- * Assumes caller has already pushed us into the kernel context.
- */
-static int llog_client_open(const struct lu_env *env,
- struct llog_handle *lgh, struct llog_logid *logid,
- char *name, enum llog_open_param open_param)
-{
- struct obd_import *imp;
- struct llogd_body *body;
- struct llog_ctxt *ctxt = lgh->lgh_ctxt;
- struct ptlrpc_request *req = NULL;
- int rc;
-
- LLOG_CLIENT_ENTRY(ctxt, imp);
-
- /* client cannot create llog */
- LASSERTF(open_param != LLOG_OPEN_NEW, "%#x\n", open_param);
- LASSERT(lgh);
-
- req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
- if (!req) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (name)
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- strlen(name) + 1);
-
- rc = ptlrpc_request_pack(req, LUSTRE_LOG_VERSION,
- LLOG_ORIGIN_HANDLE_CREATE);
- if (rc) {
- ptlrpc_request_free(req);
- req = NULL;
- goto out;
- }
- ptlrpc_request_set_replen(req);
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (logid)
- body->lgd_logid = *logid;
- body->lgd_ctxt_idx = ctxt->loc_idx - 1;
-
- if (name) {
- char *tmp;
-
- tmp = req_capsule_client_sized_get(&req->rq_pill, &RMF_NAME,
- strlen(name) + 1);
- LASSERT(tmp);
- strcpy(tmp, name);
- }
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (!body) {
- rc = -EFAULT;
- goto out;
- }
-
- lgh->lgh_id = body->lgd_logid;
- lgh->lgh_ctxt = ctxt;
-out:
- LLOG_CLIENT_EXIT(ctxt, imp);
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int llog_client_next_block(const struct lu_env *env,
- struct llog_handle *loghandle,
- int *cur_idx, int next_idx,
- __u64 *cur_offset, void *buf, int len)
-{
- struct obd_import *imp;
- struct ptlrpc_request *req = NULL;
- struct llogd_body *body;
- void *ptr;
- int rc;
-
- LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
- req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
- LUSTRE_LOG_VERSION,
- LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
- if (!req) {
- rc = -ENOMEM;
- goto err_exit;
- }
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- body->lgd_logid = loghandle->lgh_id;
- body->lgd_ctxt_idx = loghandle->lgh_ctxt->loc_idx - 1;
- body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags;
- body->lgd_index = next_idx;
- body->lgd_saved_index = *cur_idx;
- body->lgd_len = len;
- body->lgd_cur_offset = *cur_offset;
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len);
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (!body) {
- rc = -EFAULT;
- goto out;
- }
-
- /* The log records are swabbed as they are processed */
- ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
- if (!ptr) {
- rc = -EFAULT;
- goto out;
- }
-
- *cur_idx = body->lgd_saved_index;
- *cur_offset = body->lgd_cur_offset;
-
- memcpy(buf, ptr, len);
-out:
- ptlrpc_req_finished(req);
-err_exit:
- LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp);
- return rc;
-}
-
-static int llog_client_prev_block(const struct lu_env *env,
- struct llog_handle *loghandle,
- int prev_idx, void *buf, int len)
-{
- struct obd_import *imp;
- struct ptlrpc_request *req = NULL;
- struct llogd_body *body;
- void *ptr;
- int rc;
-
- LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
- req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
- LUSTRE_LOG_VERSION,
- LLOG_ORIGIN_HANDLE_PREV_BLOCK);
- if (!req) {
- rc = -ENOMEM;
- goto err_exit;
- }
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- body->lgd_logid = loghandle->lgh_id;
- body->lgd_ctxt_idx = loghandle->lgh_ctxt->loc_idx - 1;
- body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags;
- body->lgd_index = prev_idx;
- body->lgd_len = len;
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len);
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (!body) {
- rc = -EFAULT;
- goto out;
- }
-
- ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
- if (!ptr) {
- rc = -EFAULT;
- goto out;
- }
-
- memcpy(buf, ptr, len);
-out:
- ptlrpc_req_finished(req);
-err_exit:
- LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp);
- return rc;
-}
-
-static int llog_client_read_header(const struct lu_env *env,
- struct llog_handle *handle)
-{
- struct obd_import *imp;
- struct ptlrpc_request *req = NULL;
- struct llogd_body *body;
- struct llog_log_hdr *hdr;
- struct llog_rec_hdr *llh_hdr;
- int rc;
-
- LLOG_CLIENT_ENTRY(handle->lgh_ctxt, imp);
- req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
- LUSTRE_LOG_VERSION,
- LLOG_ORIGIN_HANDLE_READ_HEADER);
- if (!req) {
- rc = -ENOMEM;
- goto err_exit;
- }
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- body->lgd_logid = handle->lgh_id;
- body->lgd_ctxt_idx = handle->lgh_ctxt->loc_idx - 1;
- body->lgd_llh_flags = handle->lgh_hdr->llh_flags;
-
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR);
- if (!hdr) {
- rc = -EFAULT;
- goto out;
- }
-
- if (handle->lgh_hdr_size < hdr->llh_hdr.lrh_len) {
- rc = -EFAULT;
- goto out;
- }
-
- memcpy(handle->lgh_hdr, hdr, hdr->llh_hdr.lrh_len);
- handle->lgh_last_idx = LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_index;
-
- /* sanity checks */
- llh_hdr = &handle->lgh_hdr->llh_hdr;
- if (llh_hdr->lrh_type != LLOG_HDR_MAGIC) {
- CERROR("bad log header magic: %#x (expecting %#x)\n",
- llh_hdr->lrh_type, LLOG_HDR_MAGIC);
- rc = -EIO;
- } else if (llh_hdr->lrh_len !=
- LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len ||
- (llh_hdr->lrh_len & (llh_hdr->lrh_len - 1)) ||
- llh_hdr->lrh_len < LLOG_MIN_CHUNK_SIZE ||
- llh_hdr->lrh_len > handle->lgh_hdr_size) {
- CERROR("incorrectly sized log header: %#x (expecting %#x) (power of two > 8192)\n",
- llh_hdr->lrh_len,
- LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len);
- CERROR("you may need to re-run lconf --write_conf.\n");
- rc = -EIO;
- }
-out:
- ptlrpc_req_finished(req);
-err_exit:
- LLOG_CLIENT_EXIT(handle->lgh_ctxt, imp);
- return rc;
-}
-
-static int llog_client_close(const struct lu_env *env,
- struct llog_handle *handle)
-{
- /* this doesn't call LLOG_ORIGIN_HANDLE_CLOSE because
- * the servers all close the file at the end of every
- * other LLOG_ RPC.
- */
- return 0;
-}
-
-struct llog_operations llog_client_ops = {
- .lop_next_block = llog_client_next_block,
- .lop_prev_block = llog_client_prev_block,
- .lop_read_header = llog_client_read_header,
- .lop_open = llog_client_open,
- .lop_close = llog_client_close,
-};
-EXPORT_SYMBOL(llog_client_ops);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
deleted file mode 100644
index bc4398b9bd1d..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/llog_net.c
- *
- * OST<->MDS recovery logging infrastructure.
- *
- * Invariants in implementation:
- * - we do not share logs among different OST<->MDS connections, so that
- * if an OST or MDS fails it need only look at log(s) relevant to itself
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd_class.h>
-#include <lustre_log.h>
-#include <linux/list.h>
-
-int llog_initiator_connect(struct llog_ctxt *ctxt)
-{
- struct obd_import *new_imp;
-
- LASSERT(ctxt);
- new_imp = ctxt->loc_obd->u.cli.cl_import;
- LASSERTF(!ctxt->loc_imp || ctxt->loc_imp == new_imp,
- "%p - %p\n", ctxt->loc_imp, new_imp);
- mutex_lock(&ctxt->loc_mutex);
- if (ctxt->loc_imp != new_imp) {
- if (ctxt->loc_imp)
- class_import_put(ctxt->loc_imp);
- ctxt->loc_imp = class_import_get(new_imp);
- }
- mutex_unlock(&ctxt->loc_mutex);
- return 0;
-}
-EXPORT_SYMBOL(llog_initiator_connect);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
deleted file mode 100644
index 36eea50a77e7..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ /dev/null
@@ -1,1332 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <obd_support.h>
-#include <obd.h>
-#include <lprocfs_status.h>
-#include <uapi/linux/lustre/lustre_idl.h>
-#include <lustre_net.h>
-#include <obd_class.h>
-#include "ptlrpc_internal.h"
-
-static struct ll_rpc_opcode {
- __u32 opcode;
- const char *opname;
-} ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
- { OST_REPLY, "ost_reply" },
- { OST_GETATTR, "ost_getattr" },
- { OST_SETATTR, "ost_setattr" },
- { OST_READ, "ost_read" },
- { OST_WRITE, "ost_write" },
- { OST_CREATE, "ost_create" },
- { OST_DESTROY, "ost_destroy" },
- { OST_GET_INFO, "ost_get_info" },
- { OST_CONNECT, "ost_connect" },
- { OST_DISCONNECT, "ost_disconnect" },
- { OST_PUNCH, "ost_punch" },
- { OST_OPEN, "ost_open" },
- { OST_CLOSE, "ost_close" },
- { OST_STATFS, "ost_statfs" },
- { 14, NULL }, /* formerly OST_SAN_READ */
- { 15, NULL }, /* formerly OST_SAN_WRITE */
- { OST_SYNC, "ost_sync" },
- { OST_SET_INFO, "ost_set_info" },
- { OST_QUOTACHECK, "ost_quotacheck" },
- { OST_QUOTACTL, "ost_quotactl" },
- { OST_QUOTA_ADJUST_QUNIT, "ost_quota_adjust_qunit" },
- { MDS_GETATTR, "mds_getattr" },
- { MDS_GETATTR_NAME, "mds_getattr_lock" },
- { MDS_CLOSE, "mds_close" },
- { MDS_REINT, "mds_reint" },
- { MDS_READPAGE, "mds_readpage" },
- { MDS_CONNECT, "mds_connect" },
- { MDS_DISCONNECT, "mds_disconnect" },
- { MDS_GETSTATUS, "mds_getstatus" },
- { MDS_STATFS, "mds_statfs" },
- { MDS_PIN, "mds_pin" },
- { MDS_UNPIN, "mds_unpin" },
- { MDS_SYNC, "mds_sync" },
- { MDS_DONE_WRITING, "mds_done_writing" },
- { MDS_SET_INFO, "mds_set_info" },
- { MDS_QUOTACHECK, "mds_quotacheck" },
- { MDS_QUOTACTL, "mds_quotactl" },
- { MDS_GETXATTR, "mds_getxattr" },
- { MDS_SETXATTR, "mds_setxattr" },
- { MDS_WRITEPAGE, "mds_writepage" },
- { MDS_IS_SUBDIR, "mds_is_subdir" },
- { MDS_GET_INFO, "mds_get_info" },
- { MDS_HSM_STATE_GET, "mds_hsm_state_get" },
- { MDS_HSM_STATE_SET, "mds_hsm_state_set" },
- { MDS_HSM_ACTION, "mds_hsm_action" },
- { MDS_HSM_PROGRESS, "mds_hsm_progress" },
- { MDS_HSM_REQUEST, "mds_hsm_request" },
- { MDS_HSM_CT_REGISTER, "mds_hsm_ct_register" },
- { MDS_HSM_CT_UNREGISTER, "mds_hsm_ct_unregister" },
- { MDS_SWAP_LAYOUTS, "mds_swap_layouts" },
- { LDLM_ENQUEUE, "ldlm_enqueue" },
- { LDLM_CONVERT, "ldlm_convert" },
- { LDLM_CANCEL, "ldlm_cancel" },
- { LDLM_BL_CALLBACK, "ldlm_bl_callback" },
- { LDLM_CP_CALLBACK, "ldlm_cp_callback" },
- { LDLM_GL_CALLBACK, "ldlm_gl_callback" },
- { LDLM_SET_INFO, "ldlm_set_info" },
- { MGS_CONNECT, "mgs_connect" },
- { MGS_DISCONNECT, "mgs_disconnect" },
- { MGS_EXCEPTION, "mgs_exception" },
- { MGS_TARGET_REG, "mgs_target_reg" },
- { MGS_TARGET_DEL, "mgs_target_del" },
- { MGS_SET_INFO, "mgs_set_info" },
- { MGS_CONFIG_READ, "mgs_config_read" },
- { OBD_PING, "obd_ping" },
- { OBD_LOG_CANCEL, "llog_cancel" },
- { OBD_QC_CALLBACK, "obd_quota_callback" },
- { OBD_IDX_READ, "dt_index_read" },
- { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_open" },
- { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" },
- { LLOG_ORIGIN_HANDLE_READ_HEADER, "llog_origin_handle_read_header" },
- { LLOG_ORIGIN_HANDLE_WRITE_REC, "llog_origin_handle_write_rec" },
- { LLOG_ORIGIN_HANDLE_CLOSE, "llog_origin_handle_close" },
- { LLOG_ORIGIN_CONNECT, "llog_origin_connect" },
- { LLOG_CATINFO, "llog_catinfo" },
- { LLOG_ORIGIN_HANDLE_PREV_BLOCK, "llog_origin_handle_prev_block" },
- { LLOG_ORIGIN_HANDLE_DESTROY, "llog_origin_handle_destroy" },
- { QUOTA_DQACQ, "quota_acquire" },
- { QUOTA_DQREL, "quota_release" },
- { SEQ_QUERY, "seq_query" },
- { SEC_CTX_INIT, "sec_ctx_init" },
- { SEC_CTX_INIT_CONT, "sec_ctx_init_cont" },
- { SEC_CTX_FINI, "sec_ctx_fini" },
- { FLD_QUERY, "fld_query" },
- { FLD_READ, "fld_read" },
-};
-
-static struct ll_eopcode {
- __u32 opcode;
- const char *opname;
-} ll_eopcode_table[EXTRA_LAST_OPC] = {
- { LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" },
- { LDLM_PLAIN_ENQUEUE, "ldlm_plain_enqueue" },
- { LDLM_EXTENT_ENQUEUE, "ldlm_extent_enqueue" },
- { LDLM_FLOCK_ENQUEUE, "ldlm_flock_enqueue" },
- { LDLM_IBITS_ENQUEUE, "ldlm_ibits_enqueue" },
- { MDS_REINT_SETATTR, "mds_reint_setattr" },
- { MDS_REINT_CREATE, "mds_reint_create" },
- { MDS_REINT_LINK, "mds_reint_link" },
- { MDS_REINT_UNLINK, "mds_reint_unlink" },
- { MDS_REINT_RENAME, "mds_reint_rename" },
- { MDS_REINT_OPEN, "mds_reint_open" },
- { MDS_REINT_SETXATTR, "mds_reint_setxattr" },
- { BRW_READ_BYTES, "read_bytes" },
- { BRW_WRITE_BYTES, "write_bytes" },
-};
-
-const char *ll_opcode2str(__u32 opcode)
-{
- /* When one of the assertions below fail, chances are that:
- * 1) A new opcode was added in include/lustre/lustre_idl.h,
- * but is missing from the table above.
- * or 2) The opcode space was renumbered or rearranged,
- * and the opcode_offset() function in
- * ptlrpc_internal.h needs to be modified.
- */
- __u32 offset = opcode_offset(opcode);
-
- LASSERTF(offset < LUSTRE_MAX_OPCODES,
- "offset %u >= LUSTRE_MAX_OPCODES %u\n",
- offset, LUSTRE_MAX_OPCODES);
- LASSERTF(ll_rpc_opcode_table[offset].opcode == opcode,
- "ll_rpc_opcode_table[%u].opcode %u != opcode %u\n",
- offset, ll_rpc_opcode_table[offset].opcode, opcode);
- return ll_rpc_opcode_table[offset].opname;
-}
-
-static const char *ll_eopcode2str(__u32 opcode)
-{
- LASSERT(ll_eopcode_table[opcode].opcode == opcode);
- return ll_eopcode_table[opcode].opname;
-}
-
-static void
-ptlrpc_ldebugfs_register(struct dentry *root, char *dir,
- char *name,
- struct dentry **debugfs_root_ret,
- struct lprocfs_stats **stats_ret)
-{
- struct dentry *svc_debugfs_entry;
- struct lprocfs_stats *svc_stats;
- int i, rc;
- unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |
- LPROCFS_CNTR_STDDEV;
-
- LASSERT(!*debugfs_root_ret);
- LASSERT(!*stats_ret);
-
- svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES + LUSTRE_MAX_OPCODES,
- 0);
- if (!svc_stats)
- return;
-
- if (dir) {
- svc_debugfs_entry = ldebugfs_register(dir, root, NULL, NULL);
- if (IS_ERR(svc_debugfs_entry)) {
- lprocfs_free_stats(&svc_stats);
- return;
- }
- } else {
- svc_debugfs_entry = root;
- }
-
- lprocfs_counter_init(svc_stats, PTLRPC_REQWAIT_CNTR,
- svc_counter_config, "req_waittime", "usec");
- lprocfs_counter_init(svc_stats, PTLRPC_REQQDEPTH_CNTR,
- svc_counter_config, "req_qdepth", "reqs");
- lprocfs_counter_init(svc_stats, PTLRPC_REQACTIVE_CNTR,
- svc_counter_config, "req_active", "reqs");
- lprocfs_counter_init(svc_stats, PTLRPC_TIMEOUT,
- svc_counter_config, "req_timeout", "sec");
- lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR,
- svc_counter_config, "reqbuf_avail", "bufs");
- for (i = 0; i < EXTRA_LAST_OPC; i++) {
- char *units;
-
- switch (i) {
- case BRW_WRITE_BYTES:
- case BRW_READ_BYTES:
- units = "bytes";
- break;
- default:
- units = "reqs";
- break;
- }
- lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i,
- svc_counter_config,
- ll_eopcode2str(i), units);
- }
- for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {
- __u32 opcode = ll_rpc_opcode_table[i].opcode;
-
- lprocfs_counter_init(svc_stats,
- EXTRA_MAX_OPCODES + i, svc_counter_config,
- ll_opcode2str(opcode), "usec");
- }
-
- rc = ldebugfs_register_stats(svc_debugfs_entry, name, svc_stats);
- if (rc < 0) {
- if (dir)
- ldebugfs_remove(&svc_debugfs_entry);
- lprocfs_free_stats(&svc_stats);
- } else {
- if (dir)
- *debugfs_root_ret = svc_debugfs_entry;
- *stats_ret = svc_stats;
- }
-}
-
-static int
-ptlrpc_lprocfs_req_history_len_seq_show(struct seq_file *m, void *v)
-{
- struct ptlrpc_service *svc = m->private;
- struct ptlrpc_service_part *svcpt;
- int total = 0;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc)
- total += svcpt->scp_hist_nrqbds;
-
- seq_printf(m, "%d\n", total);
- return 0;
-}
-
-LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_req_history_len);
-
-static int
-ptlrpc_lprocfs_req_history_max_seq_show(struct seq_file *m, void *n)
-{
- struct ptlrpc_service *svc = m->private;
- struct ptlrpc_service_part *svcpt;
- int total = 0;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc)
- total += svc->srv_hist_nrqbds_cpt_max;
-
- seq_printf(m, "%d\n", total);
- return 0;
-}
-
-static ssize_t
-ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
- int bufpages;
- int val;
- int rc;
-
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc < 0)
- return rc;
-
- if (val < 0)
- return -ERANGE;
-
- /* This sanity check is more of an insanity check; we can still
- * hose a kernel by allowing the request history to grow too
- * far.
- */
- bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (val > totalram_pages / (2 * bufpages))
- return -ERANGE;
-
- spin_lock(&svc->srv_lock);
-
- if (val == 0)
- svc->srv_hist_nrqbds_cpt_max = 0;
- else
- svc->srv_hist_nrqbds_cpt_max = max(1, (val / svc->srv_ncpts));
-
- spin_unlock(&svc->srv_lock);
-
- return count;
-}
-
-LPROC_SEQ_FOPS(ptlrpc_lprocfs_req_history_max);
-
-static ssize_t threads_min_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
-
- return sprintf(buf, "%d\n", svc->srv_nthrs_cpt_init * svc->srv_ncpts);
-}
-
-static ssize_t threads_min_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
- unsigned long val;
- int rc = kstrtoul(buffer, 10, &val);
-
- if (rc < 0)
- return rc;
-
- if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
- return -ERANGE;
-
- spin_lock(&svc->srv_lock);
- if (val > svc->srv_nthrs_cpt_limit * svc->srv_ncpts) {
- spin_unlock(&svc->srv_lock);
- return -ERANGE;
- }
-
- svc->srv_nthrs_cpt_init = val / svc->srv_ncpts;
-
- spin_unlock(&svc->srv_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(threads_min);
-
-static ssize_t threads_started_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
- struct ptlrpc_service_part *svcpt;
- int total = 0;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc)
- total += svcpt->scp_nthrs_running;
-
- return sprintf(buf, "%d\n", total);
-}
-LUSTRE_RO_ATTR(threads_started);
-
-static ssize_t threads_max_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
-
- return sprintf(buf, "%d\n", svc->srv_nthrs_cpt_limit * svc->srv_ncpts);
-}
-
-static ssize_t threads_max_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
- unsigned long val;
- int rc = kstrtoul(buffer, 10, &val);
-
- if (rc < 0)
- return rc;
-
- if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
- return -ERANGE;
-
- spin_lock(&svc->srv_lock);
- if (val < svc->srv_nthrs_cpt_init * svc->srv_ncpts) {
- spin_unlock(&svc->srv_lock);
- return -ERANGE;
- }
-
- svc->srv_nthrs_cpt_limit = val / svc->srv_ncpts;
-
- spin_unlock(&svc->srv_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(threads_max);
-
-/**
- * \addtogoup nrs
- * @{
- */
-
-/**
- * Translates \e ptlrpc_nrs_pol_state values to human-readable strings.
- *
- * \param[in] state The policy state
- */
-static const char *nrs_state2str(enum ptlrpc_nrs_pol_state state)
-{
- switch (state) {
- default:
- LBUG();
- case NRS_POL_STATE_INVALID:
- return "invalid";
- case NRS_POL_STATE_STOPPED:
- return "stopped";
- case NRS_POL_STATE_STOPPING:
- return "stopping";
- case NRS_POL_STATE_STARTING:
- return "starting";
- case NRS_POL_STATE_STARTED:
- return "started";
- }
-}
-
-/**
- * Obtains status information for \a policy.
- *
- * Information is copied in \a info.
- *
- * \param[in] policy The policy
- * \param[out] info Holds returned status information
- */
-static void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_pol_info *info)
-{
- assert_spin_locked(&policy->pol_nrs->nrs_lock);
-
- memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
-
- info->pi_fallback = !!(policy->pol_flags & PTLRPC_NRS_FL_FALLBACK);
- info->pi_state = policy->pol_state;
- /**
- * XXX: These are accessed without holding
- * ptlrpc_service_part::scp_req_lock.
- */
- info->pi_req_queued = policy->pol_req_queued;
- info->pi_req_started = policy->pol_req_started;
-}
-
-/**
- * Reads and prints policy status information for all policies of a PTLRPC
- * service.
- */
-static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n)
-{
- struct ptlrpc_service *svc = m->private;
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_nrs *nrs;
- struct ptlrpc_nrs_policy *policy;
- struct ptlrpc_nrs_pol_info *infos;
- struct ptlrpc_nrs_pol_info tmp;
- unsigned int num_pols;
- unsigned int pol_idx = 0;
- bool hp = false;
- int i;
- int rc = 0;
-
- /**
- * Serialize NRS core lprocfs operations with policy registration/
- * unregistration.
- */
- mutex_lock(&nrs_core.nrs_mutex);
-
- /**
- * Use the first service partition's regular NRS head in order to obtain
- * the number of policies registered with NRS heads of this service. All
- * service partitions will have the same number of policies.
- */
- nrs = nrs_svcpt2nrs(svc->srv_parts[0], false);
-
- spin_lock(&nrs->nrs_lock);
- num_pols = svc->srv_parts[0]->scp_nrs_reg.nrs_num_pols;
- spin_unlock(&nrs->nrs_lock);
-
- infos = kcalloc(num_pols, sizeof(*infos), GFP_NOFS);
- if (!infos) {
- rc = -ENOMEM;
- goto unlock;
- }
-again:
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- nrs = nrs_svcpt2nrs(svcpt, hp);
- spin_lock(&nrs->nrs_lock);
-
- pol_idx = 0;
-
- list_for_each_entry(policy, &nrs->nrs_policy_list, pol_list) {
- LASSERT(pol_idx < num_pols);
-
- nrs_policy_get_info_locked(policy, &tmp);
- /**
- * Copy values when handling the first service
- * partition.
- */
- if (i == 0) {
- memcpy(infos[pol_idx].pi_name, tmp.pi_name,
- NRS_POL_NAME_MAX);
- memcpy(&infos[pol_idx].pi_state, &tmp.pi_state,
- sizeof(tmp.pi_state));
- infos[pol_idx].pi_fallback = tmp.pi_fallback;
- /**
- * For the rest of the service partitions
- * sanity-check the values we get.
- */
- } else {
- LASSERT(strncmp(infos[pol_idx].pi_name,
- tmp.pi_name,
- NRS_POL_NAME_MAX) == 0);
- /**
- * Not asserting ptlrpc_nrs_pol_info::pi_state,
- * because it may be different between
- * instances of the same policy in different
- * service partitions.
- */
- LASSERT(infos[pol_idx].pi_fallback ==
- tmp.pi_fallback);
- }
-
- infos[pol_idx].pi_req_queued += tmp.pi_req_queued;
- infos[pol_idx].pi_req_started += tmp.pi_req_started;
-
- pol_idx++;
- }
- spin_unlock(&nrs->nrs_lock);
- }
-
- /**
- * Policy status information output is in YAML format.
- * For example:
- *
- * regular_requests:
- * - name: fifo
- * state: started
- * fallback: yes
- * queued: 0
- * active: 0
- *
- * - name: crrn
- * state: started
- * fallback: no
- * queued: 2015
- * active: 384
- *
- * high_priority_requests:
- * - name: fifo
- * state: started
- * fallback: yes
- * queued: 0
- * active: 2
- *
- * - name: crrn
- * state: stopped
- * fallback: no
- * queued: 0
- * active: 0
- */
- seq_printf(m, "%s\n",
- !hp ? "\nregular_requests:" : "high_priority_requests:");
-
- for (pol_idx = 0; pol_idx < num_pols; pol_idx++) {
- seq_printf(m, " - name: %s\n"
- " state: %s\n"
- " fallback: %s\n"
- " queued: %-20d\n"
- " active: %-20d\n\n",
- infos[pol_idx].pi_name,
- nrs_state2str(infos[pol_idx].pi_state),
- infos[pol_idx].pi_fallback ? "yes" : "no",
- (int)infos[pol_idx].pi_req_queued,
- (int)infos[pol_idx].pi_req_started);
- }
-
- if (!hp && nrs_svc_has_hp(svc)) {
- memset(infos, 0, num_pols * sizeof(*infos));
-
- /**
- * Redo the processing for the service's HP NRS heads' policies.
- */
- hp = true;
- goto again;
- }
-
- kfree(infos);
-unlock:
- mutex_unlock(&nrs_core.nrs_mutex);
-
- return rc;
-}
-
-/**
- * The longest valid command string is the maximum policy name size, plus the
- * length of the " reg" substring
- */
-#define LPROCFS_NRS_WR_MAX_CMD (NRS_POL_NAME_MAX + sizeof(" reg") - 1)
-
-/**
- * Starts and stops a given policy on a PTLRPC service.
- *
- * Commands consist of the policy name, followed by an optional [reg|hp] token;
- * if the optional token is omitted, the operation is performed on both the
- * regular and high-priority (if the service has one) NRS head.
- */
-static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
- enum ptlrpc_nrs_queue_type queue = PTLRPC_NRS_QUEUE_BOTH;
- char *cmd;
- char *cmd_copy = NULL;
- char *token;
- int rc = 0;
-
- if (count >= LPROCFS_NRS_WR_MAX_CMD)
- return -EINVAL;
-
- cmd = kzalloc(LPROCFS_NRS_WR_MAX_CMD, GFP_NOFS);
- if (!cmd)
- return -ENOMEM;
- /**
- * strsep() modifies its argument, so keep a copy
- */
- cmd_copy = cmd;
-
- if (copy_from_user(cmd, buffer, count)) {
- rc = -EFAULT;
- goto out;
- }
-
- cmd[count] = '\0';
-
- token = strsep(&cmd, " ");
-
- if (strlen(token) > NRS_POL_NAME_MAX - 1) {
- rc = -EINVAL;
- goto out;
- }
-
- /**
- * No [reg|hp] token has been specified
- */
- if (!cmd)
- goto default_queue;
-
- /**
- * The second token is either NULL, or an optional [reg|hp] string
- */
- if (strcmp(cmd, "reg") == 0) {
- queue = PTLRPC_NRS_QUEUE_REG;
- } else if (strcmp(cmd, "hp") == 0) {
- queue = PTLRPC_NRS_QUEUE_HP;
- } else {
- rc = -EINVAL;
- goto out;
- }
-
-default_queue:
-
- if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) {
- rc = -ENODEV;
- goto out;
- } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc)) {
- queue = PTLRPC_NRS_QUEUE_REG;
- }
-
- /**
- * Serialize NRS core lprocfs operations with policy registration/
- * unregistration.
- */
- mutex_lock(&nrs_core.nrs_mutex);
-
- rc = ptlrpc_nrs_policy_control(svc, queue, token, PTLRPC_NRS_CTL_START,
- false, NULL);
-
- mutex_unlock(&nrs_core.nrs_mutex);
-out:
- kfree(cmd_copy);
-
- return rc < 0 ? rc : count;
-}
-
-LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs);
-
-/** @} nrs */
-
-struct ptlrpc_srh_iterator {
- int srhi_idx;
- __u64 srhi_seq;
- struct ptlrpc_request *srhi_req;
-};
-
-static int
-ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_srh_iterator *srhi,
- __u64 seq)
-{
- struct list_head *e;
- struct ptlrpc_request *req;
-
- if (srhi->srhi_req && srhi->srhi_seq > svcpt->scp_hist_seq_culled &&
- srhi->srhi_seq <= seq) {
- /* If srhi_req was set previously, hasn't been culled and
- * we're searching for a seq on or after it (i.e. more
- * recent), search from it onwards.
- * Since the service history is LRU (i.e. culled reqs will
- * be near the head), we shouldn't have to do long
- * re-scans
- */
- LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq,
- "%s:%d: seek seq %llu, request seq %llu\n",
- svcpt->scp_service->srv_name, svcpt->scp_cpt,
- srhi->srhi_seq, srhi->srhi_req->rq_history_seq);
- LASSERTF(!list_empty(&svcpt->scp_hist_reqs),
- "%s:%d: seek offset %llu, request seq %llu, last culled %llu\n",
- svcpt->scp_service->srv_name, svcpt->scp_cpt,
- seq, srhi->srhi_seq, svcpt->scp_hist_seq_culled);
- e = &srhi->srhi_req->rq_history_list;
- } else {
- /* search from start */
- e = svcpt->scp_hist_reqs.next;
- }
-
- while (e != &svcpt->scp_hist_reqs) {
- req = list_entry(e, struct ptlrpc_request, rq_history_list);
-
- if (req->rq_history_seq >= seq) {
- srhi->srhi_seq = req->rq_history_seq;
- srhi->srhi_req = req;
- return 0;
- }
- e = e->next;
- }
-
- return -ENOENT;
-}
-
-/*
- * ptlrpc history sequence is used as "position" of seq_file, in some case,
- * seq_read() will increase "position" to indicate reading the next
- * element, however, low bits of history sequence are reserved for CPT id
- * (check the details from comments before ptlrpc_req_add_history), which
- * means seq_read() might change CPT id of history sequence and never
- * finish reading of requests on a CPT. To make it work, we have to shift
- * CPT id to high bits and timestamp to low bits, so seq_read() will only
- * increase timestamp which can correctly indicate the next position.
- */
-
-/* convert seq_file pos to cpt */
-#define PTLRPC_REQ_POS2CPT(svc, pos) \
- ((svc)->srv_cpt_bits == 0 ? 0 : \
- (__u64)(pos) >> (64 - (svc)->srv_cpt_bits))
-
-/* make up seq_file pos from cpt */
-#define PTLRPC_REQ_CPT2POS(svc, cpt) \
- ((svc)->srv_cpt_bits == 0 ? 0 : \
- (cpt) << (64 - (svc)->srv_cpt_bits))
-
-/* convert sequence to position */
-#define PTLRPC_REQ_SEQ2POS(svc, seq) \
- ((svc)->srv_cpt_bits == 0 ? (seq) : \
- ((seq) >> (svc)->srv_cpt_bits) | \
- ((seq) << (64 - (svc)->srv_cpt_bits)))
-
-/* convert position to sequence */
-#define PTLRPC_REQ_POS2SEQ(svc, pos) \
- ((svc)->srv_cpt_bits == 0 ? (pos) : \
- ((__u64)(pos) << (svc)->srv_cpt_bits) | \
- ((__u64)(pos) >> (64 - (svc)->srv_cpt_bits)))
-
-static void *
-ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
-{
- struct ptlrpc_service *svc = s->private;
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_srh_iterator *srhi;
- unsigned int cpt;
- int rc;
- int i;
-
- if (sizeof(loff_t) != sizeof(__u64)) { /* can't support */
- CWARN("Failed to read request history because size of loff_t %d can't match size of u64\n",
- (int)sizeof(loff_t));
- return NULL;
- }
-
- srhi = kzalloc(sizeof(*srhi), GFP_NOFS);
- if (!srhi)
- return NULL;
-
- srhi->srhi_seq = 0;
- srhi->srhi_req = NULL;
-
- cpt = PTLRPC_REQ_POS2CPT(svc, *pos);
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (i < cpt) /* skip */
- continue;
- if (i > cpt) /* make up the lowest position for this CPT */
- *pos = PTLRPC_REQ_CPT2POS(svc, i);
-
- spin_lock(&svcpt->scp_lock);
- rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi,
- PTLRPC_REQ_POS2SEQ(svc, *pos));
- spin_unlock(&svcpt->scp_lock);
- if (rc == 0) {
- *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
- srhi->srhi_idx = i;
- return srhi;
- }
- }
-
- kfree(srhi);
- return NULL;
-}
-
-static void
-ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter)
-{
- struct ptlrpc_srh_iterator *srhi = iter;
-
- kfree(srhi);
-}
-
-static void *
-ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
- void *iter, loff_t *pos)
-{
- struct ptlrpc_service *svc = s->private;
- struct ptlrpc_srh_iterator *srhi = iter;
- struct ptlrpc_service_part *svcpt;
- __u64 seq;
- int rc;
- int i;
-
- for (i = srhi->srhi_idx; i < svc->srv_ncpts; i++) {
- svcpt = svc->srv_parts[i];
-
- if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */
- srhi->srhi_req = NULL;
- seq = 0;
- srhi->srhi_seq = 0;
- } else { /* the next sequence */
- seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits);
- }
-
- spin_lock(&svcpt->scp_lock);
- rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, seq);
- spin_unlock(&svcpt->scp_lock);
- if (rc == 0) {
- *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
- srhi->srhi_idx = i;
- return srhi;
- }
- }
-
- kfree(srhi);
- return NULL;
-}
-
-static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
-{
- struct ptlrpc_service *svc = s->private;
- struct ptlrpc_srh_iterator *srhi = iter;
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_request *req;
- int rc;
-
- LASSERT(srhi->srhi_idx < svc->srv_ncpts);
-
- svcpt = svc->srv_parts[srhi->srhi_idx];
-
- spin_lock(&svcpt->scp_lock);
-
- rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq);
-
- if (rc == 0) {
- struct timespec64 arrival, sent, arrivaldiff;
- char nidstr[LNET_NIDSTR_SIZE];
-
- req = srhi->srhi_req;
-
- libcfs_nid2str_r(req->rq_self, nidstr, sizeof(nidstr));
- arrival.tv_sec = req->rq_arrival_time.tv_sec;
- arrival.tv_nsec = req->rq_arrival_time.tv_nsec;
- sent.tv_sec = req->rq_sent;
- sent.tv_nsec = 0;
- arrivaldiff = timespec64_sub(sent, arrival);
-
- /* Print common req fields.
- * CAVEAT EMPTOR: we're racing with the service handler
- * here. The request could contain any old crap, so you
- * must be just as careful as the service's request
- * parser. Currently I only print stuff here I know is OK
- * to look at coz it was set up in request_in_callback()!!!
- */
- seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld.%06lld:%lld.%06llds(%+lld.0s) ",
- req->rq_history_seq, nidstr,
- libcfs_id2str(req->rq_peer), req->rq_xid,
- req->rq_reqlen, ptlrpc_rqphase2str(req),
- (s64)req->rq_arrival_time.tv_sec,
- (s64)req->rq_arrival_time.tv_nsec / NSEC_PER_USEC,
- (s64)arrivaldiff.tv_sec,
- (s64)(arrivaldiff.tv_nsec / NSEC_PER_USEC),
- (s64)(req->rq_sent - req->rq_deadline));
- if (!svc->srv_ops.so_req_printer)
- seq_putc(s, '\n');
- else
- svc->srv_ops.so_req_printer(s, srhi->srhi_req);
- }
-
- spin_unlock(&svcpt->scp_lock);
- return rc;
-}
-
-static int
-ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
-{
- static const struct seq_operations sops = {
- .start = ptlrpc_lprocfs_svc_req_history_start,
- .stop = ptlrpc_lprocfs_svc_req_history_stop,
- .next = ptlrpc_lprocfs_svc_req_history_next,
- .show = ptlrpc_lprocfs_svc_req_history_show,
- };
- struct seq_file *seqf;
- int rc;
-
- rc = seq_open(file, &sops);
- if (rc)
- return rc;
-
- seqf = file->private_data;
- seqf->private = inode->i_private;
- return 0;
-}
-
-/* See also lprocfs_rd_timeouts */
-static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n)
-{
- struct ptlrpc_service *svc = m->private;
- struct ptlrpc_service_part *svcpt;
- struct dhms ts;
- time64_t worstt;
- unsigned int cur;
- unsigned int worst;
- int i;
-
- if (AT_OFF) {
- seq_printf(m, "adaptive timeouts off, using obd_timeout %u\n",
- obd_timeout);
- return 0;
- }
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- cur = at_get(&svcpt->scp_at_estimate);
- worst = svcpt->scp_at_estimate.at_worst_ever;
- worstt = svcpt->scp_at_estimate.at_worst_time;
- s2dhms(&ts, ktime_get_real_seconds() - worstt);
-
- seq_printf(m, "%10s : cur %3u worst %3u (at %lld, "
- DHMS_FMT " ago) ", "service",
- cur, worst, (s64)worstt, DHMS_VARS(&ts));
-
- lprocfs_at_hist_helper(m, &svcpt->scp_at_estimate);
- }
-
- return 0;
-}
-
-LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_timeouts);
-
-static ssize_t high_priority_ratio_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
- return sprintf(buf, "%d\n", svc->srv_hpreq_ratio);
-}
-
-static ssize_t high_priority_ratio_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
- int rc;
- int val;
-
- rc = kstrtoint(buffer, 10, &val);
- if (rc < 0)
- return rc;
-
- if (val < 0)
- return -ERANGE;
-
- spin_lock(&svc->srv_lock);
- svc->srv_hpreq_ratio = val;
- spin_unlock(&svc->srv_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(high_priority_ratio);
-
-static struct attribute *ptlrpc_svc_attrs[] = {
- &lustre_attr_threads_min.attr,
- &lustre_attr_threads_started.attr,
- &lustre_attr_threads_max.attr,
- &lustre_attr_high_priority_ratio.attr,
- NULL,
-};
-
-static void ptlrpc_sysfs_svc_release(struct kobject *kobj)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
-
- complete(&svc->srv_kobj_unregister);
-}
-
-static struct kobj_type ptlrpc_svc_ktype = {
- .default_attrs = ptlrpc_svc_attrs,
- .sysfs_ops = &lustre_sysfs_ops,
- .release = ptlrpc_sysfs_svc_release,
-};
-
-void ptlrpc_sysfs_unregister_service(struct ptlrpc_service *svc)
-{
- /* Let's see if we had a chance at initialization first */
- if (svc->srv_kobj.kset) {
- kobject_put(&svc->srv_kobj);
- wait_for_completion(&svc->srv_kobj_unregister);
- }
-}
-
-int ptlrpc_sysfs_register_service(struct kset *parent,
- struct ptlrpc_service *svc)
-{
- int rc;
-
- svc->srv_kobj.kset = parent;
- init_completion(&svc->srv_kobj_unregister);
- rc = kobject_init_and_add(&svc->srv_kobj, &ptlrpc_svc_ktype, NULL,
- "%s", svc->srv_name);
-
- return rc;
-}
-
-void ptlrpc_ldebugfs_register_service(struct dentry *entry,
- struct ptlrpc_service *svc)
-{
- struct lprocfs_vars lproc_vars[] = {
- {.name = "req_buffer_history_len",
- .fops = &ptlrpc_lprocfs_req_history_len_fops,
- .data = svc},
- {.name = "req_buffer_history_max",
- .fops = &ptlrpc_lprocfs_req_history_max_fops,
- .data = svc},
- {.name = "timeouts",
- .fops = &ptlrpc_lprocfs_timeouts_fops,
- .data = svc},
- {.name = "nrs_policies",
- .fops = &ptlrpc_lprocfs_nrs_fops,
- .data = svc},
- {NULL}
- };
- static const struct file_operations req_history_fops = {
- .owner = THIS_MODULE,
- .open = ptlrpc_lprocfs_svc_req_history_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = lprocfs_seq_release,
- };
-
- int rc;
-
- ptlrpc_ldebugfs_register(entry, svc->srv_name,
- "stats", &svc->srv_debugfs_entry,
- &svc->srv_stats);
-
- if (IS_ERR_OR_NULL(svc->srv_debugfs_entry))
- return;
-
- ldebugfs_add_vars(svc->srv_debugfs_entry, lproc_vars, NULL);
-
- rc = ldebugfs_seq_create(svc->srv_debugfs_entry, "req_history",
- 0400, &req_history_fops, svc);
- if (rc)
- CWARN("Error adding the req_history file\n");
-}
-
-void ptlrpc_lprocfs_register_obd(struct obd_device *obddev)
-{
- ptlrpc_ldebugfs_register(obddev->obd_debugfs_entry, NULL, "stats",
- &obddev->obd_svc_debugfs_entry,
- &obddev->obd_svc_stats);
-}
-EXPORT_SYMBOL(ptlrpc_lprocfs_register_obd);
-
-void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount)
-{
- struct lprocfs_stats *svc_stats;
- __u32 op = lustre_msg_get_opc(req->rq_reqmsg);
- int opc = opcode_offset(op);
-
- svc_stats = req->rq_import->imp_obd->obd_svc_stats;
- if (!svc_stats || opc <= 0)
- return;
- LASSERT(opc < LUSTRE_MAX_OPCODES);
- if (!(op == LDLM_ENQUEUE || op == MDS_REINT))
- lprocfs_counter_add(svc_stats, opc + EXTRA_MAX_OPCODES, amount);
-}
-
-void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
-{
- struct lprocfs_stats *svc_stats;
- int idx;
-
- if (!req->rq_import)
- return;
- svc_stats = req->rq_import->imp_obd->obd_svc_stats;
- if (!svc_stats)
- return;
- idx = lustre_msg_get_opc(req->rq_reqmsg);
- switch (idx) {
- case OST_READ:
- idx = BRW_READ_BYTES + PTLRPC_LAST_CNTR;
- break;
- case OST_WRITE:
- idx = BRW_WRITE_BYTES + PTLRPC_LAST_CNTR;
- break;
- default:
- LASSERTF(0, "unsupported opcode %u\n", idx);
- break;
- }
-
- lprocfs_counter_add(svc_stats, idx, bytes);
-}
-EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
-
-void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
-{
- if (!IS_ERR_OR_NULL(svc->srv_debugfs_entry))
- ldebugfs_remove(&svc->srv_debugfs_entry);
-
- if (svc->srv_stats)
- lprocfs_free_stats(&svc->srv_stats);
-}
-
-void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd)
-{
- if (!IS_ERR_OR_NULL(obd->obd_svc_debugfs_entry))
- ldebugfs_remove(&obd->obd_svc_debugfs_entry);
-
- if (obd->obd_svc_stats)
- lprocfs_free_stats(&obd->obd_svc_stats);
-}
-EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd);
-
-#undef BUFLEN
-
-int lprocfs_wr_ping(struct file *file, const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
- struct ptlrpc_request *req;
- int rc;
-
- rc = lprocfs_climp_check(obd);
- if (rc)
- return rc;
-
- req = ptlrpc_prep_ping(obd->u.cli.cl_import);
- up_read(&obd->u.cli.cl_sem);
- if (!req)
- return -ENOMEM;
-
- req->rq_send_state = LUSTRE_IMP_FULL;
-
- rc = ptlrpc_queue_wait(req);
-
- ptlrpc_req_finished(req);
- if (rc >= 0)
- return count;
- return rc;
-}
-EXPORT_SYMBOL(lprocfs_wr_ping);
-
-/* Write the connection UUID to this file to attempt to connect to that node.
- * The connection UUID is a node's primary NID. For example,
- * "echo connection=192.168.0.1@tcp0::instance > .../import".
- */
-int lprocfs_wr_import(struct file *file, const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
- struct obd_import *imp = obd->u.cli.cl_import;
- char *kbuf = NULL;
- char *uuid;
- char *ptr;
- int do_reconn = 1;
- const char prefix[] = "connection=";
- const int prefix_len = sizeof(prefix) - 1;
-
- if (count > PAGE_SIZE - 1 || count <= prefix_len)
- return -EINVAL;
-
- kbuf = kzalloc(count + 1, GFP_NOFS);
- if (!kbuf)
- return -ENOMEM;
-
- if (copy_from_user(kbuf, buffer, count)) {
- count = -EFAULT;
- goto out;
- }
-
- kbuf[count] = 0;
-
- /* only support connection=uuid::instance now */
- if (strncmp(prefix, kbuf, prefix_len) != 0) {
- count = -EINVAL;
- goto out;
- }
-
- uuid = kbuf + prefix_len;
- ptr = strstr(uuid, "::");
- if (ptr) {
- __u32 inst;
- char *endptr;
-
- *ptr = 0;
- do_reconn = 0;
- ptr += strlen("::");
- inst = simple_strtoul(ptr, &endptr, 10);
- if (*endptr) {
- CERROR("config: wrong instance # %s\n", ptr);
- } else if (inst != imp->imp_connect_data.ocd_instance) {
- CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted target(%u/%u), reconnecting...\n",
- imp->imp_obd->obd_name,
- imp->imp_connect_data.ocd_instance, inst);
- do_reconn = 1;
- } else {
- CDEBUG(D_INFO, "IR: %s has already been connecting to new target(%u)\n",
- imp->imp_obd->obd_name, inst);
- }
- }
-
- if (do_reconn)
- ptlrpc_recover_import(imp, uuid, 1);
-
-out:
- kfree(kbuf);
- return count;
-}
-EXPORT_SYMBOL(lprocfs_wr_import);
-
-int lprocfs_rd_pinger_recov(struct seq_file *m, void *n)
-{
- struct obd_device *obd = m->private;
- struct obd_import *imp = obd->u.cli.cl_import;
- int rc;
-
- rc = lprocfs_climp_check(obd);
- if (rc)
- return rc;
-
- seq_printf(m, "%d\n", !imp->imp_no_pinger_recover);
- up_read(&obd->u.cli.cl_sem);
-
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_pinger_recov);
-
-int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
- struct client_obd *cli = &obd->u.cli;
- struct obd_import *imp = cli->cl_import;
- int rc, val;
-
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc < 0)
- return rc;
-
- if (val != 0 && val != 1)
- return -ERANGE;
-
- rc = lprocfs_climp_check(obd);
- if (rc)
- return rc;
-
- spin_lock(&imp->imp_lock);
- imp->imp_no_pinger_recover = !val;
- spin_unlock(&imp->imp_lock);
- up_read(&obd->u.cli.cl_sem);
-
- return count;
-}
-EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
deleted file mode 100644
index 86883abaad2c..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ /dev/null
@@ -1,770 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-#include <obd_support.h>
-#include <lustre_net.h>
-#include <lustre_lib.h>
-#include <obd.h>
-#include <obd_class.h>
-#include "ptlrpc_internal.h"
-
-/**
- * Helper function. Sends \a len bytes from \a base at offset \a offset
- * over \a conn connection to portal \a portal.
- * Returns 0 on success or error code.
- */
-static int ptl_send_buf(struct lnet_handle_md *mdh, void *base, int len,
- enum lnet_ack_req ack, struct ptlrpc_cb_id *cbid,
- struct ptlrpc_connection *conn, int portal, __u64 xid,
- unsigned int offset)
-{
- int rc;
- struct lnet_md md;
-
- LASSERT(portal != 0);
- CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
- md.start = base;
- md.length = len;
- md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
- md.options = PTLRPC_MD_OPTIONS;
- md.user_ptr = cbid;
- md.eq_handle = ptlrpc_eq_h;
-
- if (unlikely(ack == LNET_ACK_REQ &&
- OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK,
- OBD_FAIL_ONCE))) {
- /* don't ask for the ack to simulate failing client */
- ack = LNET_NOACK_REQ;
- }
-
- rc = LNetMDBind(md, LNET_UNLINK, mdh);
- if (unlikely(rc != 0)) {
- CERROR("LNetMDBind failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
- return -ENOMEM;
- }
-
- CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n",
- len, portal, xid, offset);
-
- rc = LNetPut(conn->c_self, *mdh, ack,
- conn->c_peer, portal, xid, offset, 0);
- if (unlikely(rc != 0)) {
- int rc2;
- /* We're going to get an UNLINK event when I unlink below,
- * which will complete just like any other failed send, so
- * I fall through and return success here!
- */
- CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
- libcfs_id2str(conn->c_peer), portal, xid, rc);
- rc2 = LNetMDUnlink(*mdh);
- LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
- }
-
- return 0;
-}
-
-static void mdunlink_iterate_helper(struct lnet_handle_md *bd_mds, int count)
-{
- int i;
-
- for (i = 0; i < count; i++)
- LNetMDUnlink(bd_mds[i]);
-}
-
-/**
- * Register bulk at the sender for later transfer.
- * Returns 0 on success or error code.
- */
-static int ptlrpc_register_bulk(struct ptlrpc_request *req)
-{
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- struct lnet_process_id peer;
- int rc = 0;
- int rc2;
- int posted_md;
- int total_md;
- u64 mbits;
- struct lnet_handle_me me_h;
- struct lnet_md md;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
- return 0;
-
- /* NB no locking required until desc is on the network */
- LASSERT(desc->bd_nob > 0);
- LASSERT(desc->bd_md_count == 0);
- LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
- LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
- LASSERT(desc->bd_req);
- LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type));
-
- /* cleanup the state of the bulk for it will be reused */
- if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
- desc->bd_nob_transferred = 0;
- else
- LASSERT(desc->bd_nob_transferred == 0);
-
- desc->bd_failure = 0;
-
- peer = desc->bd_import->imp_connection->c_peer;
-
- LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
- LASSERT(desc->bd_cbid.cbid_arg == desc);
-
- total_md = DIV_ROUND_UP(desc->bd_iov_count, LNET_MAX_IOV);
- /* rq_mbits is matchbits of the final bulk */
- mbits = req->rq_mbits - total_md + 1;
-
- LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
- "first mbits = x%llu, last mbits = x%llu\n",
- mbits, req->rq_mbits);
- LASSERTF(!(desc->bd_registered &&
- req->rq_send_state != LUSTRE_IMP_REPLAY) ||
- mbits != desc->bd_last_mbits,
- "registered: %d rq_mbits: %llu bd_last_mbits: %llu\n",
- desc->bd_registered, mbits, desc->bd_last_mbits);
-
- desc->bd_registered = 1;
- desc->bd_last_mbits = mbits;
- desc->bd_md_count = total_md;
- md.user_ptr = &desc->bd_cbid;
- md.eq_handle = ptlrpc_eq_h;
- md.threshold = 1; /* PUT or GET */
-
- for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
- md.options = PTLRPC_MD_OPTIONS |
- (ptlrpc_is_bulk_op_get(desc->bd_type) ?
- LNET_MD_OP_GET : LNET_MD_OP_PUT);
- ptlrpc_fill_bulk_md(&md, desc, posted_md);
-
- rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
- LNET_UNLINK, LNET_INS_AFTER, &me_h);
- if (rc != 0) {
- CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
- desc->bd_import->imp_obd->obd_name, mbits,
- posted_md, rc);
- break;
- }
-
- /* About to let the network at it... */
- rc = LNetMDAttach(me_h, md, LNET_UNLINK,
- &desc->bd_mds[posted_md]);
- if (rc != 0) {
- CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
- desc->bd_import->imp_obd->obd_name, mbits,
- posted_md, rc);
- rc2 = LNetMEUnlink(me_h);
- LASSERT(rc2 == 0);
- break;
- }
- }
-
- if (rc != 0) {
- LASSERT(rc == -ENOMEM);
- spin_lock(&desc->bd_lock);
- desc->bd_md_count -= total_md - posted_md;
- spin_unlock(&desc->bd_lock);
- LASSERT(desc->bd_md_count >= 0);
- mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
- req->rq_status = -ENOMEM;
- return -ENOMEM;
- }
-
- spin_lock(&desc->bd_lock);
- /* Holler if peer manages to touch buffers before he knows the mbits */
- if (desc->bd_md_count != total_md)
- CWARN("%s: Peer %s touched %d buffers while I registered\n",
- desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
- total_md - desc->bd_md_count);
- spin_unlock(&desc->bd_lock);
-
- CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, mbits x%#llx-%#llx, portal %u\n",
- desc->bd_md_count,
- ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
- desc->bd_iov_count, desc->bd_nob,
- desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
-
- return 0;
-}
-
-/**
- * Disconnect a bulk desc from the network. Idempotent. Not
- * thread-safe (i.e. only interlocks with completion callback).
- * Returns 1 on success or 0 if network unregistration failed for whatever
- * reason.
- */
-int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
-{
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- wait_queue_head_t *wq;
- int rc;
-
- LASSERT(!in_interrupt()); /* might sleep */
-
- /* Let's setup deadline for reply unlink. */
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
- req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
-
- if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
- return 1; /* never registered */
-
- LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
-
- /* the unlink ensures the callback happens ASAP and is the last
- * one. If it fails, it must be because completion just happened,
- * but we must still wait_event() in this case to give liblustre
- * a chance to run client_bulk_callback()
- */
- mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
-
- if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
- return 1; /* never registered */
-
- /* Move to "Unregistering" phase as bulk was not unlinked yet. */
- ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK);
-
- /* Do not wait for unlink to finish. */
- if (async)
- return 0;
-
- if (req->rq_set)
- wq = &req->rq_set->set_waitq;
- else
- wq = &req->rq_reply_waitq;
-
- for (;;) {
- /* Network access will complete in finite time but the HUGE
- * timeout lets us CWARN for visibility of sluggish LNDs
- */
- int cnt = 0;
- while (cnt < LONG_UNLINK &&
- (rc = wait_event_idle_timeout(*wq,
- !ptlrpc_client_bulk_active(req),
- HZ)) == 0)
- cnt += 1;
- if (rc > 0) {
- ptlrpc_rqphase_move(req, req->rq_next_phase);
- return 1;
- }
-
- DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
- desc);
- }
- return 0;
-}
-
-static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
-{
- struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
- struct ptlrpc_service *svc = svcpt->scp_service;
- int service_time = max_t(int, ktime_get_real_seconds() -
- req->rq_arrival_time.tv_sec, 1);
-
- if (!(flags & PTLRPC_REPLY_EARLY) &&
- (req->rq_type != PTL_RPC_MSG_ERR) && req->rq_reqmsg &&
- !(lustre_msg_get_flags(req->rq_reqmsg) &
- (MSG_RESENT | MSG_REPLAY |
- MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
- /* early replies, errors and recovery requests don't count
- * toward our service time estimate
- */
- int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
-
- if (oldse != 0) {
- DEBUG_REQ(D_ADAPTTO, req,
- "svc %s changed estimate from %d to %d",
- svc->srv_name, oldse,
- at_get(&svcpt->scp_at_estimate));
- }
- }
- /* Report actual service time for client latency calc */
- lustre_msg_set_service_time(req->rq_repmsg, service_time);
- /* Report service time estimate for future client reqs, but report 0
- * (to be ignored by client) if it's a error reply during recovery.
- * (bz15815)
- */
- if (req->rq_type == PTL_RPC_MSG_ERR && !req->rq_export)
- lustre_msg_set_timeout(req->rq_repmsg, 0);
- else
- lustre_msg_set_timeout(req->rq_repmsg,
- at_get(&svcpt->scp_at_estimate));
-
- if (req->rq_reqmsg &&
- !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
- CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x req_flags=%#x magic=%x/%x len=%d\n",
- flags, lustre_msg_get_flags(req->rq_reqmsg),
- lustre_msg_get_magic(req->rq_reqmsg),
- lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
- }
-}
-
-/**
- * Send request reply from request \a req reply buffer.
- * \a flags defines reply types
- * Returns 0 on success or error code
- */
-int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
-{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
- struct ptlrpc_connection *conn;
- int rc;
-
- /* We must already have a reply buffer (only ptlrpc_error() may be
- * called without one). The reply generated by sptlrpc layer (e.g.
- * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
- * have a request buffer which is either the actual (swabbed) incoming
- * request, or a saved copy if this is a req saved in
- * target_queue_final_reply().
- */
- LASSERT(req->rq_no_reply == 0);
- LASSERT(req->rq_reqbuf);
- LASSERT(rs);
- LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
- LASSERT(req->rq_repmsg);
- LASSERT(req->rq_repmsg == rs->rs_msg);
- LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback);
- LASSERT(rs->rs_cb_id.cbid_arg == rs);
-
- /* There may be no rq_export during failover */
-
- if (unlikely(req->rq_export && req->rq_export->exp_obd &&
- req->rq_export->exp_obd->obd_fail)) {
- /* Failed obd's only send ENODEV */
- req->rq_type = PTL_RPC_MSG_ERR;
- req->rq_status = -ENODEV;
- CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
- req->rq_export->exp_obd->obd_minor);
- }
-
- /* In order to keep interoperability with the client (< 2.3) which
- * doesn't have pb_jobid in ptlrpc_body, We have to shrink the
- * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
- * reply buffer on client will be overflow.
- *
- * XXX Remove this whenever we drop the interoperability with
- * such client.
- */
- req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
- sizeof(struct ptlrpc_body_v2), 1);
-
- if (req->rq_type != PTL_RPC_MSG_ERR)
- req->rq_type = PTL_RPC_MSG_REPLY;
-
- lustre_msg_set_type(req->rq_repmsg, req->rq_type);
- lustre_msg_set_status(req->rq_repmsg,
- ptlrpc_status_hton(req->rq_status));
- lustre_msg_set_opc(req->rq_repmsg,
- req->rq_reqmsg ?
- lustre_msg_get_opc(req->rq_reqmsg) : 0);
-
- target_pack_pool_reply(req);
-
- ptlrpc_at_set_reply(req, flags);
-
- if (!req->rq_export || !req->rq_export->exp_connection)
- conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
- else
- conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
-
- if (unlikely(!conn)) {
- CERROR("not replying on NULL connection\n"); /* bug 9635 */
- return -ENOTCONN;
- }
- ptlrpc_rs_addref(rs); /* +1 ref for the network */
-
- rc = sptlrpc_svc_wrap_reply(req);
- if (unlikely(rc))
- goto out;
-
- req->rq_sent = ktime_get_real_seconds();
-
- rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
- (rs->rs_difficult && !rs->rs_no_ack) ?
- LNET_ACK_REQ : LNET_NOACK_REQ,
- &rs->rs_cb_id, conn,
- ptlrpc_req2svc(req)->srv_rep_portal,
- req->rq_xid, req->rq_reply_off);
-out:
- if (unlikely(rc != 0))
- ptlrpc_req_drop_rs(req);
- ptlrpc_connection_put(conn);
- return rc;
-}
-
-int ptlrpc_reply(struct ptlrpc_request *req)
-{
- if (req->rq_no_reply)
- return 0;
- return ptlrpc_send_reply(req, 0);
-}
-
-/**
- * For request \a req send an error reply back. Create empty
- * reply buffers if necessary.
- */
-int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
-{
- int rc;
-
- if (req->rq_no_reply)
- return 0;
-
- if (!req->rq_repmsg) {
- rc = lustre_pack_reply(req, 1, NULL, NULL);
- if (rc)
- return rc;
- }
-
- if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
- req->rq_status != -EPERM && req->rq_status != -ENOENT &&
- req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT)
- req->rq_type = PTL_RPC_MSG_ERR;
-
- rc = ptlrpc_send_reply(req, may_be_difficult);
- return rc;
-}
-
-int ptlrpc_error(struct ptlrpc_request *req)
-{
- return ptlrpc_send_error(req, 0);
-}
-
-/**
- * Send request \a request.
- * if \a noreply is set, don't expect any reply back and don't set up
- * reply buffers.
- * Returns 0 on success or error code.
- */
-int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
-{
- int rc;
- int rc2;
- int mpflag = 0;
- struct ptlrpc_connection *connection;
- struct lnet_handle_me reply_me_h;
- struct lnet_md reply_md;
- struct obd_import *imp = request->rq_import;
- struct obd_device *obd = imp->imp_obd;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
- return 0;
-
- LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
- LASSERT(request->rq_wait_ctx == 0);
-
- /* If this is a re-transmit, we're required to have disengaged
- * cleanly from the previous attempt
- */
- LASSERT(!request->rq_receiving_reply);
- LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
- (imp->imp_state == LUSTRE_IMP_FULL)));
-
- if (unlikely(obd && obd->obd_fail)) {
- CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
- obd->obd_name);
- /* this prevents us from waiting in ptlrpc_queue_wait */
- spin_lock(&request->rq_lock);
- request->rq_err = 1;
- spin_unlock(&request->rq_lock);
- request->rq_status = -ENODEV;
- return -ENODEV;
- }
-
- connection = imp->imp_connection;
-
- lustre_msg_set_handle(request->rq_reqmsg,
- &imp->imp_remote_handle);
- lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
- lustre_msg_set_conn_cnt(request->rq_reqmsg, imp->imp_conn_cnt);
- lustre_msghdr_set_flags(request->rq_reqmsg, imp->imp_msghdr_flags);
-
- /*
- * If it's the first time to resend the request for EINPROGRESS,
- * we need to allocate a new XID (see after_reply()), it's different
- * from the resend for reply timeout.
- */
- if (request->rq_nr_resend && list_empty(&request->rq_unreplied_list)) {
- __u64 min_xid = 0;
- /*
- * resend for EINPROGRESS, allocate new xid to avoid reply
- * reconstruction
- */
- spin_lock(&imp->imp_lock);
- ptlrpc_assign_next_xid_nolock(request);
- min_xid = ptlrpc_known_replied_xid(imp);
- spin_unlock(&imp->imp_lock);
-
- lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
- DEBUG_REQ(D_RPCTRACE, request, "Allocating new xid for resend on EINPROGRESS");
- }
-
- if (request->rq_bulk) {
- ptlrpc_set_bulk_mbits(request);
- lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
- }
-
- if (list_empty(&request->rq_unreplied_list) ||
- request->rq_xid <= imp->imp_known_replied_xid) {
- DEBUG_REQ(D_ERROR, request,
- "xid: %llu, replied: %llu, list_empty:%d\n",
- request->rq_xid, imp->imp_known_replied_xid,
- list_empty(&request->rq_unreplied_list));
- LBUG();
- }
-
- /**
- * For enabled AT all request should have AT_SUPPORT in the
- * FULL import state when OBD_CONNECT_AT is set
- */
- LASSERT(AT_OFF || imp->imp_state != LUSTRE_IMP_FULL ||
- (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) ||
- !(imp->imp_connect_data.ocd_connect_flags &
- OBD_CONNECT_AT));
-
- if (request->rq_resend)
- lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
-
- if (request->rq_memalloc)
- mpflag = cfs_memory_pressure_get_and_set();
-
- rc = sptlrpc_cli_wrap_request(request);
- if (rc) {
- /*
- * set rq_sent so that this request is treated
- * as a delayed send in the upper layers
- */
- if (rc == -ENOMEM)
- request->rq_sent = ktime_get_seconds();
- goto out;
- }
-
- /* bulk register should be done after wrap_request() */
- if (request->rq_bulk) {
- rc = ptlrpc_register_bulk(request);
- if (rc != 0)
- goto out;
- }
-
- if (!noreply) {
- LASSERT(request->rq_replen != 0);
- if (!request->rq_repbuf) {
- LASSERT(!request->rq_repdata);
- LASSERT(!request->rq_repmsg);
- rc = sptlrpc_cli_alloc_repbuf(request,
- request->rq_replen);
- if (rc) {
- /* this prevents us from looping in
- * ptlrpc_queue_wait
- */
- spin_lock(&request->rq_lock);
- request->rq_err = 1;
- spin_unlock(&request->rq_lock);
- request->rq_status = rc;
- goto cleanup_bulk;
- }
- } else {
- request->rq_repdata = NULL;
- request->rq_repmsg = NULL;
- }
-
- rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
- connection->c_peer, request->rq_xid, 0,
- LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
- if (rc != 0) {
- CERROR("LNetMEAttach failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
- rc = -ENOMEM;
- goto cleanup_bulk;
- }
- }
-
- spin_lock(&request->rq_lock);
- /* We are responsible for unlinking the reply buffer */
- request->rq_reply_unlinked = noreply;
- request->rq_receiving_reply = !noreply;
- /* Clear any flags that may be present from previous sends. */
- request->rq_req_unlinked = 0;
- request->rq_replied = 0;
- request->rq_err = 0;
- request->rq_timedout = 0;
- request->rq_net_err = 0;
- request->rq_resend = 0;
- request->rq_restart = 0;
- request->rq_reply_truncated = 0;
- spin_unlock(&request->rq_lock);
-
- if (!noreply) {
- reply_md.start = request->rq_repbuf;
- reply_md.length = request->rq_repbuf_len;
- /* Allow multiple early replies */
- reply_md.threshold = LNET_MD_THRESH_INF;
- /* Manage remote for early replies */
- reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
- LNET_MD_MANAGE_REMOTE |
- LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */
- reply_md.user_ptr = &request->rq_reply_cbid;
- reply_md.eq_handle = ptlrpc_eq_h;
-
- /* We must see the unlink callback to set rq_reply_unlinked,
- * so we can't auto-unlink
- */
- rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
- &request->rq_reply_md_h);
- if (rc != 0) {
- CERROR("LNetMDAttach failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
- spin_lock(&request->rq_lock);
- /* ...but the MD attach didn't succeed... */
- request->rq_receiving_reply = 0;
- spin_unlock(&request->rq_lock);
- rc = -ENOMEM;
- goto cleanup_me;
- }
-
- CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %llu, portal %u\n",
- request->rq_repbuf_len, request->rq_xid,
- request->rq_reply_portal);
- }
-
- /* add references on request for request_out_callback */
- ptlrpc_request_addref(request);
- if (obd && obd->obd_svc_stats)
- lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
- atomic_read(&imp->imp_inflight));
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
-
- ktime_get_real_ts64(&request->rq_sent_tv);
- request->rq_sent = ktime_get_real_seconds();
- /* We give the server rq_timeout secs to process the req, and
- * add the network latency for our local timeout.
- */
- request->rq_deadline = request->rq_sent + request->rq_timeout +
- ptlrpc_at_get_net_latency(request);
-
- ptlrpc_pinger_sending_on_import(imp);
-
- DEBUG_REQ(D_INFO, request, "send flg=%x",
- lustre_msg_get_flags(request->rq_reqmsg));
- rc = ptl_send_buf(&request->rq_req_md_h,
- request->rq_reqbuf, request->rq_reqdata_len,
- LNET_NOACK_REQ, &request->rq_req_cbid,
- connection,
- request->rq_request_portal,
- request->rq_xid, 0);
- if (likely(rc == 0))
- goto out;
-
- request->rq_req_unlinked = 1;
- ptlrpc_req_finished(request);
- if (noreply)
- goto out;
-
- cleanup_me:
- /* MEUnlink is safe; the PUT didn't even get off the ground, and
- * nobody apart from the PUT's target has the right nid+XID to
- * access the reply buffer.
- */
- rc2 = LNetMEUnlink(reply_me_h);
- LASSERT(rc2 == 0);
- /* UNLINKED callback called synchronously */
- LASSERT(!request->rq_receiving_reply);
-
- cleanup_bulk:
- /* We do sync unlink here as there was no real transfer here so
- * the chance to have long unlink to sluggish net is smaller here.
- */
- ptlrpc_unregister_bulk(request, 0);
- out:
- if (request->rq_memalloc)
- cfs_memory_pressure_restore(mpflag);
- return rc;
-}
-EXPORT_SYMBOL(ptl_send_rpc);
-
-/**
- * Register request buffer descriptor for request receiving.
- */
-int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
-{
- struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
- static struct lnet_process_id match_id = {LNET_NID_ANY, LNET_PID_ANY};
- int rc;
- struct lnet_md md;
- struct lnet_handle_me me_h;
-
- CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
- service->srv_req_portal);
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
- return -ENOMEM;
-
- /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
- * which means buffer can only be attached on local CPT, and LND
- * threads can find it by grabbing a local lock
- */
- rc = LNetMEAttach(service->srv_req_portal,
- match_id, 0, ~0, LNET_UNLINK,
- rqbd->rqbd_svcpt->scp_cpt >= 0 ?
- LNET_INS_LOCAL : LNET_INS_AFTER, &me_h);
- if (rc != 0) {
- CERROR("LNetMEAttach failed: %d\n", rc);
- return -ENOMEM;
- }
-
- LASSERT(rqbd->rqbd_refcount == 0);
- rqbd->rqbd_refcount = 1;
-
- md.start = rqbd->rqbd_buffer;
- md.length = service->srv_buf_size;
- md.max_size = service->srv_max_req_size;
- md.threshold = LNET_MD_THRESH_INF;
- md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
- md.user_ptr = &rqbd->rqbd_cbid;
- md.eq_handle = ptlrpc_eq_h;
-
- rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
- if (rc == 0)
- return 0;
-
- CERROR("LNetMDAttach failed: %d;\n", rc);
- LASSERT(rc == -ENOMEM);
- rc = LNetMEUnlink(me_h);
- LASSERT(rc == 0);
- rqbd->rqbd_refcount = 0;
-
- return -ENOMEM;
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
deleted file mode 100644
index 4847f9a90cc9..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ /dev/null
@@ -1,1612 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details. A copy is
- * included in the COPYING file that accompanied this code.
-
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2011 Intel Corporation
- *
- * Copyright 2012 Xyratex Technology Limited
- */
-/*
- * lustre/ptlrpc/nrs.c
- *
- * Network Request Scheduler (NRS)
- *
- * Allows to reorder the handling of RPCs at servers.
- *
- * Author: Liang Zhen <liang@whamcloud.com>
- * Author: Nikitas Angelinas <nikitas_angelinas@xyratex.com>
- */
-/**
- * \addtogoup nrs
- * @{
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_net.h>
-#include <lprocfs_status.h>
-#include <linux/libcfs/libcfs.h>
-#include "ptlrpc_internal.h"
-
-/**
- * NRS core object.
- */
-struct nrs_core nrs_core;
-
-static int nrs_policy_init(struct ptlrpc_nrs_policy *policy)
-{
- return policy->pol_desc->pd_ops->op_policy_init ?
- policy->pol_desc->pd_ops->op_policy_init(policy) : 0;
-}
-
-static void nrs_policy_fini(struct ptlrpc_nrs_policy *policy)
-{
- LASSERT(policy->pol_ref == 0);
- LASSERT(policy->pol_req_queued == 0);
-
- if (policy->pol_desc->pd_ops->op_policy_fini)
- policy->pol_desc->pd_ops->op_policy_fini(policy);
-}
-
-static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy,
- enum ptlrpc_nrs_ctl opc, void *arg)
-{
- /**
- * The policy may be stopped, but the lprocfs files and
- * ptlrpc_nrs_policy instances remain present until unregistration time.
- * Do not perform the ctl operation if the policy is stopped, as
- * policy->pol_private will be NULL in such a case.
- */
- if (policy->pol_state == NRS_POL_STATE_STOPPED)
- return -ENODEV;
-
- return policy->pol_desc->pd_ops->op_policy_ctl ?
- policy->pol_desc->pd_ops->op_policy_ctl(policy, opc, arg) :
- -ENOSYS;
-}
-
-static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy)
-{
- if (policy->pol_desc->pd_ops->op_policy_stop)
- policy->pol_desc->pd_ops->op_policy_stop(policy);
-
- LASSERT(list_empty(&policy->pol_list_queued));
- LASSERT(policy->pol_req_queued == 0 &&
- policy->pol_req_started == 0);
-
- policy->pol_private = NULL;
-
- policy->pol_state = NRS_POL_STATE_STOPPED;
-
- if (atomic_dec_and_test(&policy->pol_desc->pd_refs))
- module_put(policy->pol_desc->pd_owner);
-}
-
-static int nrs_policy_stop_locked(struct ptlrpc_nrs_policy *policy)
-{
- struct ptlrpc_nrs *nrs = policy->pol_nrs;
-
- if (nrs->nrs_policy_fallback == policy && !nrs->nrs_stopping)
- return -EPERM;
-
- if (policy->pol_state == NRS_POL_STATE_STARTING)
- return -EAGAIN;
-
- /* In progress or already stopped */
- if (policy->pol_state != NRS_POL_STATE_STARTED)
- return 0;
-
- policy->pol_state = NRS_POL_STATE_STOPPING;
-
- /* Immediately make it invisible */
- if (nrs->nrs_policy_primary == policy) {
- nrs->nrs_policy_primary = NULL;
-
- } else {
- LASSERT(nrs->nrs_policy_fallback == policy);
- nrs->nrs_policy_fallback = NULL;
- }
-
- /* I have the only refcount */
- if (policy->pol_ref == 1)
- nrs_policy_stop0(policy);
-
- return 0;
-}
-
-/**
- * Transitions the \a nrs NRS head's primary policy to
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING and if the policy has no
- * pending usage references, to ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED.
- *
- * \param[in] nrs the NRS head to carry out this operation on
- */
-static void nrs_policy_stop_primary(struct ptlrpc_nrs *nrs)
-{
- struct ptlrpc_nrs_policy *tmp = nrs->nrs_policy_primary;
-
- if (!tmp)
- return;
-
- nrs->nrs_policy_primary = NULL;
-
- LASSERT(tmp->pol_state == NRS_POL_STATE_STARTED);
- tmp->pol_state = NRS_POL_STATE_STOPPING;
-
- if (tmp->pol_ref == 0)
- nrs_policy_stop0(tmp);
-}
-
-/**
- * Transitions a policy across the ptlrpc_nrs_pol_state range of values, in
- * response to an lprocfs command to start a policy.
- *
- * If a primary policy different to the current one is specified, this function
- * will transition the new policy to the
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTING and then to
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED, and will then transition
- * the old primary policy (if there is one) to
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING, and if there are no outstanding
- * references on the policy to ptlrpc_nrs_pol_stae::NRS_POL_STATE_STOPPED.
- *
- * If the fallback policy is specified, this is taken to indicate an instruction
- * to stop the current primary policy, without substituting it with another
- * primary policy, so the primary policy (if any) is transitioned to
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING, and if there are no outstanding
- * references on the policy to ptlrpc_nrs_pol_stae::NRS_POL_STATE_STOPPED. In
- * this case, the fallback policy is only left active in the NRS head.
- */
-static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy)
-{
- struct ptlrpc_nrs *nrs = policy->pol_nrs;
- int rc = 0;
-
- /**
- * Don't allow multiple starting which is too complex, and has no real
- * benefit.
- */
- if (nrs->nrs_policy_starting)
- return -EAGAIN;
-
- LASSERT(policy->pol_state != NRS_POL_STATE_STARTING);
-
- if (policy->pol_state == NRS_POL_STATE_STOPPING)
- return -EAGAIN;
-
- if (policy->pol_flags & PTLRPC_NRS_FL_FALLBACK) {
- /**
- * This is for cases in which the user sets the policy to the
- * fallback policy (currently fifo for all services); i.e. the
- * user is resetting the policy to the default; so we stop the
- * primary policy, if any.
- */
- if (policy == nrs->nrs_policy_fallback) {
- nrs_policy_stop_primary(nrs);
- return 0;
- }
-
- /**
- * If we reach here, we must be setting up the fallback policy
- * at service startup time, and only a single policy with the
- * nrs_policy_flags::PTLRPC_NRS_FL_FALLBACK flag set can
- * register with NRS core.
- */
- LASSERT(!nrs->nrs_policy_fallback);
- } else {
- /**
- * Shouldn't start primary policy if w/o fallback policy.
- */
- if (!nrs->nrs_policy_fallback)
- return -EPERM;
-
- if (policy->pol_state == NRS_POL_STATE_STARTED)
- return 0;
- }
-
- /**
- * Increase the module usage count for policies registering from other
- * modules.
- */
- if (atomic_inc_return(&policy->pol_desc->pd_refs) == 1 &&
- !try_module_get(policy->pol_desc->pd_owner)) {
- atomic_dec(&policy->pol_desc->pd_refs);
- CERROR("NRS: cannot get module for policy %s; is it alive?\n",
- policy->pol_desc->pd_name);
- return -ENODEV;
- }
-
- /**
- * Serialize policy starting across the NRS head
- */
- nrs->nrs_policy_starting = 1;
-
- policy->pol_state = NRS_POL_STATE_STARTING;
-
- if (policy->pol_desc->pd_ops->op_policy_start) {
- spin_unlock(&nrs->nrs_lock);
-
- rc = policy->pol_desc->pd_ops->op_policy_start(policy);
-
- spin_lock(&nrs->nrs_lock);
- if (rc != 0) {
- if (atomic_dec_and_test(&policy->pol_desc->pd_refs))
- module_put(policy->pol_desc->pd_owner);
-
- policy->pol_state = NRS_POL_STATE_STOPPED;
- goto out;
- }
- }
-
- policy->pol_state = NRS_POL_STATE_STARTED;
-
- if (policy->pol_flags & PTLRPC_NRS_FL_FALLBACK) {
- /**
- * This path is only used at PTLRPC service setup time.
- */
- nrs->nrs_policy_fallback = policy;
- } else {
- /*
- * Try to stop the current primary policy if there is one.
- */
- nrs_policy_stop_primary(nrs);
-
- /**
- * And set the newly-started policy as the primary one.
- */
- nrs->nrs_policy_primary = policy;
- }
-
-out:
- nrs->nrs_policy_starting = 0;
-
- return rc;
-}
-
-/**
- * Increases the policy's usage reference count.
- */
-static inline void nrs_policy_get_locked(struct ptlrpc_nrs_policy *policy)
-{
- policy->pol_ref++;
-}
-
-/**
- * Decreases the policy's usage reference count, and stops the policy in case it
- * was already stopping and have no more outstanding usage references (which
- * indicates it has no more queued or started requests, and can be safely
- * stopped).
- */
-static void nrs_policy_put_locked(struct ptlrpc_nrs_policy *policy)
-{
- LASSERT(policy->pol_ref > 0);
-
- policy->pol_ref--;
- if (unlikely(policy->pol_ref == 0 &&
- policy->pol_state == NRS_POL_STATE_STOPPING))
- nrs_policy_stop0(policy);
-}
-
-static void nrs_policy_put(struct ptlrpc_nrs_policy *policy)
-{
- spin_lock(&policy->pol_nrs->nrs_lock);
- nrs_policy_put_locked(policy);
- spin_unlock(&policy->pol_nrs->nrs_lock);
-}
-
-/**
- * Find and return a policy by name.
- */
-static struct ptlrpc_nrs_policy *nrs_policy_find_locked(struct ptlrpc_nrs *nrs,
- char *name)
-{
- struct ptlrpc_nrs_policy *tmp;
-
- list_for_each_entry(tmp, &nrs->nrs_policy_list, pol_list) {
- if (strncmp(tmp->pol_desc->pd_name, name,
- NRS_POL_NAME_MAX) == 0) {
- nrs_policy_get_locked(tmp);
- return tmp;
- }
- }
- return NULL;
-}
-
-/**
- * Release references for the resource hierarchy moving upwards towards the
- * policy instance resource.
- */
-static void nrs_resource_put(struct ptlrpc_nrs_resource *res)
-{
- struct ptlrpc_nrs_policy *policy = res->res_policy;
-
- if (policy->pol_desc->pd_ops->op_res_put) {
- struct ptlrpc_nrs_resource *parent;
-
- for (; res; res = parent) {
- parent = res->res_parent;
- policy->pol_desc->pd_ops->op_res_put(policy, res);
- }
- }
-}
-
-/**
- * Obtains references for each resource in the resource hierarchy for request
- * \a nrq if it is to be handled by \a policy.
- *
- * \param[in] policy the policy
- * \param[in] nrq the request
- * \param[in] moving_req denotes whether this is a call to the function by
- * ldlm_lock_reorder_req(), in order to move \a nrq to
- * the high-priority NRS head; we should not sleep when
- * set.
- *
- * \retval NULL resource hierarchy references not obtained
- * \retval valid-pointer the bottom level of the resource hierarchy
- *
- * \see ptlrpc_nrs_pol_ops::op_res_get()
- */
-static
-struct ptlrpc_nrs_resource *nrs_resource_get(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- bool moving_req)
-{
- /**
- * Set to NULL to traverse the resource hierarchy from the top.
- */
- struct ptlrpc_nrs_resource *res = NULL;
- struct ptlrpc_nrs_resource *tmp = NULL;
- int rc;
-
- while (1) {
- rc = policy->pol_desc->pd_ops->op_res_get(policy, nrq, res,
- &tmp, moving_req);
- if (rc < 0) {
- if (res)
- nrs_resource_put(res);
- return NULL;
- }
-
- tmp->res_parent = res;
- tmp->res_policy = policy;
- res = tmp;
- tmp = NULL;
- /**
- * Return once we have obtained a reference to the bottom level
- * of the resource hierarchy.
- */
- if (rc > 0)
- return res;
- }
-}
-
-/**
- * Obtains resources for the resource hierarchies and policy references for
- * the fallback and current primary policy (if any), that will later be used
- * to handle request \a nrq.
- *
- * \param[in] nrs the NRS head instance that will be handling request \a nrq.
- * \param[in] nrq the request that is being handled.
- * \param[out] resp the array where references to the resource hierarchy are
- * stored.
- * \param[in] moving_req is set when obtaining resources while moving a
- * request from a policy on the regular NRS head to a
- * policy on the HP NRS head (via
- * ldlm_lock_reorder_req()). It signifies that
- * allocations to get resources should be atomic; for
- * a full explanation, see comment in
- * ptlrpc_nrs_pol_ops::op_res_get().
- */
-static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs,
- struct ptlrpc_nrs_request *nrq,
- struct ptlrpc_nrs_resource **resp,
- bool moving_req)
-{
- struct ptlrpc_nrs_policy *primary = NULL;
- struct ptlrpc_nrs_policy *fallback = NULL;
-
- memset(resp, 0, sizeof(resp[0]) * NRS_RES_MAX);
-
- /**
- * Obtain policy references.
- */
- spin_lock(&nrs->nrs_lock);
-
- fallback = nrs->nrs_policy_fallback;
- nrs_policy_get_locked(fallback);
-
- primary = nrs->nrs_policy_primary;
- if (primary)
- nrs_policy_get_locked(primary);
-
- spin_unlock(&nrs->nrs_lock);
-
- /**
- * Obtain resource hierarchy references.
- */
- resp[NRS_RES_FALLBACK] = nrs_resource_get(fallback, nrq, moving_req);
- LASSERT(resp[NRS_RES_FALLBACK]);
-
- if (primary) {
- resp[NRS_RES_PRIMARY] = nrs_resource_get(primary, nrq,
- moving_req);
- /**
- * A primary policy may exist which may not wish to serve a
- * particular request for different reasons; release the
- * reference on the policy as it will not be used for this
- * request.
- */
- if (!resp[NRS_RES_PRIMARY])
- nrs_policy_put(primary);
- }
-}
-
-/**
- * Releases references to resource hierarchies and policies, because they are no
- * longer required; used when request handling has been completed, or the
- * request is moving to the high priority NRS head.
- *
- * \param resp the resource hierarchy that is being released
- *
- * \see ptlrpc_nrs_req_finalize()
- */
-static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp)
-{
- struct ptlrpc_nrs_policy *pols[NRS_RES_MAX];
- int i;
-
- for (i = 0; i < NRS_RES_MAX; i++) {
- if (resp[i]) {
- pols[i] = resp[i]->res_policy;
- nrs_resource_put(resp[i]);
- resp[i] = NULL;
- } else {
- pols[i] = NULL;
- }
- }
-
- for (i = 0; i < NRS_RES_MAX; i++) {
- if (pols[i])
- nrs_policy_put(pols[i]);
- }
-}
-
-/**
- * Obtains an NRS request from \a policy for handling or examination; the
- * request should be removed in the 'handling' case.
- *
- * Calling into this function implies we already know the policy has a request
- * waiting to be handled.
- *
- * \param[in] policy the policy from which a request
- * \param[in] peek when set, signifies that we just want to examine the
- * request, and not handle it, so the request is not removed
- * from the policy.
- * \param[in] force when set, it will force a policy to return a request if it
- * has one pending
- *
- * \retval the NRS request to be handled
- */
-static inline
-struct ptlrpc_nrs_request *nrs_request_get(struct ptlrpc_nrs_policy *policy,
- bool peek, bool force)
-{
- struct ptlrpc_nrs_request *nrq;
-
- LASSERT(policy->pol_req_queued > 0);
-
- nrq = policy->pol_desc->pd_ops->op_req_get(policy, peek, force);
-
- LASSERT(ergo(nrq, nrs_request_policy(nrq) == policy));
-
- return nrq;
-}
-
-/**
- * Enqueues request \a nrq for later handling, via one one the policies for
- * which resources where earlier obtained via nrs_resource_get_safe(). The
- * function attempts to enqueue the request first on the primary policy
- * (if any), since this is the preferred choice.
- *
- * \param nrq the request being enqueued
- *
- * \see nrs_resource_get_safe()
- */
-static inline void nrs_request_enqueue(struct ptlrpc_nrs_request *nrq)
-{
- struct ptlrpc_nrs_policy *policy;
- int rc;
- int i;
-
- /**
- * Try in descending order, because the primary policy (if any) is
- * the preferred choice.
- */
- for (i = NRS_RES_MAX - 1; i >= 0; i--) {
- if (!nrq->nr_res_ptrs[i])
- continue;
-
- nrq->nr_res_idx = i;
- policy = nrq->nr_res_ptrs[i]->res_policy;
-
- rc = policy->pol_desc->pd_ops->op_req_enqueue(policy, nrq);
- if (rc == 0) {
- policy->pol_nrs->nrs_req_queued++;
- policy->pol_req_queued++;
- return;
- }
- }
- /**
- * Should never get here, as at least the primary policy's
- * ptlrpc_nrs_pol_ops::op_req_enqueue() implementation should always
- * succeed.
- */
- LBUG();
-}
-
-/**
- * Called when a request has been handled
- *
- * \param[in] nrs the request that has been handled; can be used for
- * job/resource control.
- *
- * \see ptlrpc_nrs_req_stop_nolock()
- */
-static inline void nrs_request_stop(struct ptlrpc_nrs_request *nrq)
-{
- struct ptlrpc_nrs_policy *policy = nrs_request_policy(nrq);
-
- if (policy->pol_desc->pd_ops->op_req_stop)
- policy->pol_desc->pd_ops->op_req_stop(policy, nrq);
-
- LASSERT(policy->pol_nrs->nrs_req_started > 0);
- LASSERT(policy->pol_req_started > 0);
-
- policy->pol_nrs->nrs_req_started--;
- policy->pol_req_started--;
-}
-
-/**
- * Handler for operations that can be carried out on policies.
- *
- * Handles opcodes that are common to all policy types within NRS core, and
- * passes any unknown opcodes to the policy-specific control function.
- *
- * \param[in] nrs the NRS head this policy belongs to.
- * \param[in] name the human-readable policy name; should be the same as
- * ptlrpc_nrs_pol_desc::pd_name.
- * \param[in] opc the opcode of the operation being carried out.
- * \param[in,out] arg can be used to pass information in and out between when
- * carrying an operation; usually data that is private to
- * the policy at some level, or generic policy status
- * information.
- *
- * \retval -ve error condition
- * \retval 0 operation was carried out successfully
- */
-static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name,
- enum ptlrpc_nrs_ctl opc, void *arg)
-{
- struct ptlrpc_nrs_policy *policy;
- int rc = 0;
-
- spin_lock(&nrs->nrs_lock);
-
- policy = nrs_policy_find_locked(nrs, name);
- if (!policy) {
- rc = -ENOENT;
- goto out;
- }
-
- if (policy->pol_state != NRS_POL_STATE_STARTED &&
- policy->pol_state != NRS_POL_STATE_STOPPED) {
- rc = -EAGAIN;
- goto out;
- }
-
- switch (opc) {
- /**
- * Unknown opcode, pass it down to the policy-specific control
- * function for handling.
- */
- default:
- rc = nrs_policy_ctl_locked(policy, opc, arg);
- break;
-
- /**
- * Start \e policy
- */
- case PTLRPC_NRS_CTL_START:
- rc = nrs_policy_start_locked(policy);
- break;
- }
-out:
- if (policy)
- nrs_policy_put_locked(policy);
-
- spin_unlock(&nrs->nrs_lock);
-
- return rc;
-}
-
-/**
- * Unregisters a policy by name.
- *
- * \param[in] nrs the NRS head this policy belongs to.
- * \param[in] name the human-readable policy name; should be the same as
- * ptlrpc_nrs_pol_desc::pd_name
- *
- * \retval -ve error
- * \retval 0 success
- */
-static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name)
-{
- struct ptlrpc_nrs_policy *policy = NULL;
-
- spin_lock(&nrs->nrs_lock);
-
- policy = nrs_policy_find_locked(nrs, name);
- if (!policy) {
- spin_unlock(&nrs->nrs_lock);
-
- CERROR("Can't find NRS policy %s\n", name);
- return -ENOENT;
- }
-
- if (policy->pol_ref > 1) {
- CERROR("Policy %s is busy with %d references\n", name,
- (int)policy->pol_ref);
- nrs_policy_put_locked(policy);
-
- spin_unlock(&nrs->nrs_lock);
- return -EBUSY;
- }
-
- LASSERT(policy->pol_req_queued == 0);
- LASSERT(policy->pol_req_started == 0);
-
- if (policy->pol_state != NRS_POL_STATE_STOPPED) {
- nrs_policy_stop_locked(policy);
- LASSERT(policy->pol_state == NRS_POL_STATE_STOPPED);
- }
-
- list_del(&policy->pol_list);
- nrs->nrs_num_pols--;
-
- nrs_policy_put_locked(policy);
-
- spin_unlock(&nrs->nrs_lock);
-
- nrs_policy_fini(policy);
-
- LASSERT(!policy->pol_private);
- kfree(policy);
-
- return 0;
-}
-
-/**
- * Register a policy from \policy descriptor \a desc with NRS head \a nrs.
- *
- * \param[in] nrs the NRS head on which the policy will be registered.
- * \param[in] desc the policy descriptor from which the information will be
- * obtained to register the policy.
- *
- * \retval -ve error
- * \retval 0 success
- */
-static int nrs_policy_register(struct ptlrpc_nrs *nrs,
- struct ptlrpc_nrs_pol_desc *desc)
-{
- struct ptlrpc_nrs_policy *policy;
- struct ptlrpc_nrs_policy *tmp;
- struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
- int rc;
-
- LASSERT(desc->pd_ops->op_res_get);
- LASSERT(desc->pd_ops->op_req_get);
- LASSERT(desc->pd_ops->op_req_enqueue);
- LASSERT(desc->pd_ops->op_req_dequeue);
- LASSERT(desc->pd_compat);
-
- policy = kzalloc_node(sizeof(*policy), GFP_NOFS,
- cfs_cpt_spread_node(svcpt->scp_service->srv_cptable,
- svcpt->scp_cpt));
- if (!policy)
- return -ENOMEM;
-
- policy->pol_nrs = nrs;
- policy->pol_desc = desc;
- policy->pol_state = NRS_POL_STATE_STOPPED;
- policy->pol_flags = desc->pd_flags;
-
- INIT_LIST_HEAD(&policy->pol_list);
- INIT_LIST_HEAD(&policy->pol_list_queued);
-
- rc = nrs_policy_init(policy);
- if (rc != 0) {
- kfree(policy);
- return rc;
- }
-
- spin_lock(&nrs->nrs_lock);
-
- tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name);
- if (tmp) {
- CERROR("NRS policy %s has been registered, can't register it for %s\n",
- policy->pol_desc->pd_name,
- svcpt->scp_service->srv_name);
- nrs_policy_put_locked(tmp);
-
- spin_unlock(&nrs->nrs_lock);
- nrs_policy_fini(policy);
- kfree(policy);
-
- return -EEXIST;
- }
-
- list_add_tail(&policy->pol_list, &nrs->nrs_policy_list);
- nrs->nrs_num_pols++;
-
- if (policy->pol_flags & PTLRPC_NRS_FL_REG_START)
- rc = nrs_policy_start_locked(policy);
-
- spin_unlock(&nrs->nrs_lock);
-
- if (rc != 0)
- (void)nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
-
- return rc;
-}
-
-/**
- * Enqueue request \a req using one of the policies its resources are referring
- * to.
- *
- * \param[in] req the request to enqueue.
- */
-static void ptlrpc_nrs_req_add_nolock(struct ptlrpc_request *req)
-{
- struct ptlrpc_nrs_policy *policy;
-
- LASSERT(req->rq_nrq.nr_initialized);
- LASSERT(!req->rq_nrq.nr_enqueued);
-
- nrs_request_enqueue(&req->rq_nrq);
- req->rq_nrq.nr_enqueued = 1;
-
- policy = nrs_request_policy(&req->rq_nrq);
- /**
- * Add the policy to the NRS head's list of policies with enqueued
- * requests, if it has not been added there.
- */
- if (unlikely(list_empty(&policy->pol_list_queued)))
- list_add_tail(&policy->pol_list_queued,
- &policy->pol_nrs->nrs_policy_queued);
-}
-
-/**
- * Enqueue a request on the high priority NRS head.
- *
- * \param req the request to enqueue.
- */
-static void ptlrpc_nrs_hpreq_add_nolock(struct ptlrpc_request *req)
-{
- int opc = lustre_msg_get_opc(req->rq_reqmsg);
-
- spin_lock(&req->rq_lock);
- req->rq_hp = 1;
- ptlrpc_nrs_req_add_nolock(req);
- if (opc != OBD_PING)
- DEBUG_REQ(D_NET, req, "high priority req");
- spin_unlock(&req->rq_lock);
-}
-
-/**
- * Returns a boolean predicate indicating whether the policy described by
- * \a desc is adequate for use with service \a svc.
- *
- * \param[in] svc the service
- * \param[in] desc the policy descriptor
- *
- * \retval false the policy is not compatible with the service
- * \retval true the policy is compatible with the service
- */
-static inline bool nrs_policy_compatible(const struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc)
-{
- return desc->pd_compat(svc, desc);
-}
-
-/**
- * Registers all compatible policies in nrs_core.nrs_policies, for NRS head
- * \a nrs.
- *
- * \param[in] nrs the NRS head
- *
- * \retval -ve error
- * \retval 0 success
- *
- * \pre mutex_is_locked(&nrs_core.nrs_mutex)
- *
- * \see ptlrpc_service_nrs_setup()
- */
-static int nrs_register_policies_locked(struct ptlrpc_nrs *nrs)
-{
- struct ptlrpc_nrs_pol_desc *desc;
- /* for convenience */
- struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
- struct ptlrpc_service *svc = svcpt->scp_service;
- int rc = -EINVAL;
-
- LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
-
- list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
- if (nrs_policy_compatible(svc, desc)) {
- rc = nrs_policy_register(nrs, desc);
- if (rc != 0) {
- CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n",
- desc->pd_name, svcpt->scp_cpt,
- svc->srv_name, rc);
- /**
- * Fail registration if any of the policies'
- * registration fails.
- */
- break;
- }
- }
- }
-
- return rc;
-}
-
-/**
- * Initializes NRS head \a nrs of service partition \a svcpt, and registers all
- * compatible policies in NRS core, with the NRS head.
- *
- * \param[in] nrs the NRS head
- * \param[in] svcpt the PTLRPC service partition to setup
- *
- * \retval -ve error
- * \retval 0 success
- *
- * \pre mutex_is_locked(&nrs_core.nrs_mutex)
- */
-static int nrs_svcpt_setup_locked0(struct ptlrpc_nrs *nrs,
- struct ptlrpc_service_part *svcpt)
-{
- enum ptlrpc_nrs_queue_type queue;
-
- LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
-
- if (nrs == &svcpt->scp_nrs_reg)
- queue = PTLRPC_NRS_QUEUE_REG;
- else if (nrs == svcpt->scp_nrs_hp)
- queue = PTLRPC_NRS_QUEUE_HP;
- else
- LBUG();
-
- nrs->nrs_svcpt = svcpt;
- nrs->nrs_queue_type = queue;
- spin_lock_init(&nrs->nrs_lock);
- INIT_LIST_HEAD(&nrs->nrs_policy_list);
- INIT_LIST_HEAD(&nrs->nrs_policy_queued);
-
- return nrs_register_policies_locked(nrs);
-}
-
-/**
- * Allocates a regular and optionally a high-priority NRS head (if the service
- * handles high-priority RPCs), and then registers all available compatible
- * policies on those NRS heads.
- *
- * \param[in,out] svcpt the PTLRPC service partition to setup
- *
- * \pre mutex_is_locked(&nrs_core.nrs_mutex)
- */
-static int nrs_svcpt_setup_locked(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_nrs *nrs;
- int rc;
-
- LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
-
- /**
- * Initialize the regular NRS head.
- */
- nrs = nrs_svcpt2nrs(svcpt, false);
- rc = nrs_svcpt_setup_locked0(nrs, svcpt);
- if (rc < 0)
- goto out;
-
- /**
- * Optionally allocate a high-priority NRS head.
- */
- if (!svcpt->scp_service->srv_ops.so_hpreq_handler)
- goto out;
-
- svcpt->scp_nrs_hp =
- kzalloc_node(sizeof(*svcpt->scp_nrs_hp), GFP_NOFS,
- cfs_cpt_spread_node(svcpt->scp_service->srv_cptable,
- svcpt->scp_cpt));
- if (!svcpt->scp_nrs_hp) {
- rc = -ENOMEM;
- goto out;
- }
-
- nrs = nrs_svcpt2nrs(svcpt, true);
- rc = nrs_svcpt_setup_locked0(nrs, svcpt);
-
-out:
- return rc;
-}
-
-/**
- * Unregisters all policies on all available NRS heads in a service partition;
- * called at PTLRPC service unregistration time.
- *
- * \param[in] svcpt the PTLRPC service partition
- *
- * \pre mutex_is_locked(&nrs_core.nrs_mutex)
- */
-static void nrs_svcpt_cleanup_locked(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_nrs *nrs;
- struct ptlrpc_nrs_policy *policy;
- struct ptlrpc_nrs_policy *tmp;
- int rc;
- bool hp = false;
-
- LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
-
-again:
- /* scp_nrs_hp could be NULL due to short of memory. */
- nrs = hp ? svcpt->scp_nrs_hp : &svcpt->scp_nrs_reg;
- /* check the nrs_svcpt to see if nrs is initialized. */
- if (!nrs || !nrs->nrs_svcpt)
- return;
- nrs->nrs_stopping = 1;
-
- list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) {
- rc = nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
- LASSERT(rc == 0);
- }
-
- /**
- * If the service partition has an HP NRS head, clean that up as well.
- */
- if (!hp && nrs_svcpt_has_hp(svcpt)) {
- hp = true;
- goto again;
- }
-
- if (hp)
- kfree(nrs);
-}
-
-/**
- * Returns the descriptor for a policy as identified by by \a name.
- *
- * \param[in] name the policy name
- *
- * \retval the policy descriptor
- * \retval NULL
- */
-static struct ptlrpc_nrs_pol_desc *nrs_policy_find_desc_locked(const char *name)
-{
- struct ptlrpc_nrs_pol_desc *tmp;
-
- list_for_each_entry(tmp, &nrs_core.nrs_policies, pd_list) {
- if (strncmp(tmp->pd_name, name, NRS_POL_NAME_MAX) == 0)
- return tmp;
- }
- return NULL;
-}
-
-/**
- * Removes the policy from all supported NRS heads of all partitions of all
- * PTLRPC services.
- *
- * \param[in] desc the policy descriptor to unregister
- *
- * \retval -ve error
- * \retval 0 successfully unregistered policy on all supported NRS heads
- *
- * \pre mutex_is_locked(&nrs_core.nrs_mutex)
- * \pre mutex_is_locked(&ptlrpc_all_services_mutex)
- */
-static int nrs_policy_unregister_locked(struct ptlrpc_nrs_pol_desc *desc)
-{
- struct ptlrpc_nrs *nrs;
- struct ptlrpc_service *svc;
- struct ptlrpc_service_part *svcpt;
- int i;
- int rc = 0;
-
- LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
- LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex));
-
- list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
- if (!nrs_policy_compatible(svc, desc) ||
- unlikely(svc->srv_is_stopping))
- continue;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- bool hp = false;
-
-again:
- nrs = nrs_svcpt2nrs(svcpt, hp);
- rc = nrs_policy_unregister(nrs, desc->pd_name);
- /**
- * Ignore -ENOENT as the policy may not have registered
- * successfully on all service partitions.
- */
- if (rc == -ENOENT) {
- rc = 0;
- } else if (rc != 0) {
- CERROR("Failed to unregister NRS policy %s for partition %d of service %s: %d\n",
- desc->pd_name, svcpt->scp_cpt,
- svcpt->scp_service->srv_name, rc);
- return rc;
- }
-
- if (!hp && nrs_svc_has_hp(svc)) {
- hp = true;
- goto again;
- }
- }
-
- if (desc->pd_ops->op_lprocfs_fini)
- desc->pd_ops->op_lprocfs_fini(svc);
- }
-
- return rc;
-}
-
-/**
- * Registers a new policy with NRS core.
- *
- * The function will only succeed if policy registration with all compatible
- * service partitions (if any) is successful.
- *
- * N.B. This function should be called either at ptlrpc module initialization
- * time when registering a policy that ships with NRS core, or in a
- * module's init() function for policies registering from other modules.
- *
- * \param[in] conf configuration information for the new policy to register
- *
- * \retval -ve error
- * \retval 0 success
- */
-static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf)
-{
- struct ptlrpc_service *svc;
- struct ptlrpc_nrs_pol_desc *desc;
- size_t len;
- int rc = 0;
-
- LASSERT(conf->nc_ops);
- LASSERT(conf->nc_compat);
- LASSERT(ergo(conf->nc_compat == nrs_policy_compat_one,
- conf->nc_compat_svc_name));
- LASSERT(ergo((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0,
- conf->nc_owner));
-
- conf->nc_name[NRS_POL_NAME_MAX - 1] = '\0';
-
- /**
- * External policies are not allowed to start immediately upon
- * registration, as there is a relatively higher chance that their
- * registration might fail. In such a case, some policy instances may
- * already have requests queued wen unregistration needs to happen as
- * part o cleanup; since there is currently no way to drain requests
- * from a policy unless the service is unregistering, we just disallow
- * this.
- */
- if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) &&
- (conf->nc_flags & (PTLRPC_NRS_FL_FALLBACK |
- PTLRPC_NRS_FL_REG_START))) {
- CERROR("NRS: failing to register policy %s. Please check policy flags; external policies cannot act as fallback policies, or be started immediately upon registration without interaction with lprocfs\n",
- conf->nc_name);
- return -EINVAL;
- }
-
- mutex_lock(&nrs_core.nrs_mutex);
-
- if (nrs_policy_find_desc_locked(conf->nc_name)) {
- CERROR("NRS: failing to register policy %s which has already been registered with NRS core!\n",
- conf->nc_name);
- rc = -EEXIST;
- goto fail;
- }
-
- desc = kzalloc(sizeof(*desc), GFP_NOFS);
- if (!desc) {
- rc = -ENOMEM;
- goto fail;
- }
-
- len = strlcpy(desc->pd_name, conf->nc_name, sizeof(desc->pd_name));
- if (len >= sizeof(desc->pd_name)) {
- kfree(desc);
- rc = -E2BIG;
- goto fail;
- }
- desc->pd_ops = conf->nc_ops;
- desc->pd_compat = conf->nc_compat;
- desc->pd_compat_svc_name = conf->nc_compat_svc_name;
- if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0)
- desc->pd_owner = conf->nc_owner;
- desc->pd_flags = conf->nc_flags;
- atomic_set(&desc->pd_refs, 0);
-
- /**
- * For policies that are held in the same module as NRS (currently
- * ptlrpc), do not register the policy with all compatible services,
- * as the services will not have started at this point, since we are
- * calling from ptlrpc module initialization code. In such cases each
- * service will register all compatible policies later, via
- * ptlrpc_service_nrs_setup().
- */
- if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) == 0)
- goto internal;
-
- /**
- * Register the new policy on all compatible services
- */
- mutex_lock(&ptlrpc_all_services_mutex);
-
- list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
- struct ptlrpc_service_part *svcpt;
- int i;
- int rc2;
-
- if (!nrs_policy_compatible(svc, desc) ||
- unlikely(svc->srv_is_stopping))
- continue;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- struct ptlrpc_nrs *nrs;
- bool hp = false;
-again:
- nrs = nrs_svcpt2nrs(svcpt, hp);
- rc = nrs_policy_register(nrs, desc);
- if (rc != 0) {
- CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n",
- desc->pd_name, svcpt->scp_cpt,
- svcpt->scp_service->srv_name, rc);
-
- rc2 = nrs_policy_unregister_locked(desc);
- /**
- * Should not fail at this point
- */
- LASSERT(rc2 == 0);
- mutex_unlock(&ptlrpc_all_services_mutex);
- kfree(desc);
- goto fail;
- }
-
- if (!hp && nrs_svc_has_hp(svc)) {
- hp = true;
- goto again;
- }
- }
-
- /**
- * No need to take a reference to other modules here, as we
- * will be calling from the module's init() function.
- */
- if (desc->pd_ops->op_lprocfs_init) {
- rc = desc->pd_ops->op_lprocfs_init(svc);
- if (rc != 0) {
- rc2 = nrs_policy_unregister_locked(desc);
- /**
- * Should not fail at this point
- */
- LASSERT(rc2 == 0);
- mutex_unlock(&ptlrpc_all_services_mutex);
- kfree(desc);
- goto fail;
- }
- }
- }
-
- mutex_unlock(&ptlrpc_all_services_mutex);
-internal:
- list_add_tail(&desc->pd_list, &nrs_core.nrs_policies);
-fail:
- mutex_unlock(&nrs_core.nrs_mutex);
-
- return rc;
-}
-
-/**
- * Setup NRS heads on all service partitions of service \a svc, and register
- * all compatible policies on those NRS heads.
- *
- * To be called from within ptl
- * \param[in] svc the service to setup
- *
- * \retval -ve error, the calling logic should eventually call
- * ptlrpc_service_nrs_cleanup() to undo any work performed
- * by this function.
- *
- * \see ptlrpc_register_service()
- * \see ptlrpc_service_nrs_cleanup()
- */
-int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- const struct ptlrpc_nrs_pol_desc *desc;
- int i;
- int rc = 0;
-
- mutex_lock(&nrs_core.nrs_mutex);
-
- /**
- * Initialize NRS heads on all service CPTs.
- */
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- rc = nrs_svcpt_setup_locked(svcpt);
- if (rc != 0)
- goto failed;
- }
-
- /**
- * Set up lprocfs interfaces for all supported policies for the
- * service.
- */
- list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
- if (!nrs_policy_compatible(svc, desc))
- continue;
-
- if (desc->pd_ops->op_lprocfs_init) {
- rc = desc->pd_ops->op_lprocfs_init(svc);
- if (rc != 0)
- goto failed;
- }
- }
-
-failed:
-
- mutex_unlock(&nrs_core.nrs_mutex);
-
- return rc;
-}
-
-/**
- * Unregisters all policies on all service partitions of service \a svc.
- *
- * \param[in] svc the PTLRPC service to unregister
- */
-void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- const struct ptlrpc_nrs_pol_desc *desc;
- int i;
-
- mutex_lock(&nrs_core.nrs_mutex);
-
- /**
- * Clean up NRS heads on all service partitions
- */
- ptlrpc_service_for_each_part(svcpt, i, svc)
- nrs_svcpt_cleanup_locked(svcpt);
-
- /**
- * Clean up lprocfs interfaces for all supported policies for the
- * service.
- */
- list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
- if (!nrs_policy_compatible(svc, desc))
- continue;
-
- if (desc->pd_ops->op_lprocfs_fini)
- desc->pd_ops->op_lprocfs_fini(svc);
- }
-
- mutex_unlock(&nrs_core.nrs_mutex);
-}
-
-/**
- * Obtains NRS head resources for request \a req.
- *
- * These could be either on the regular or HP NRS head of \a svcpt; resources
- * taken on the regular head can later be swapped for HP head resources by
- * ldlm_lock_reorder_req().
- *
- * \param[in] svcpt the service partition
- * \param[in] req the request
- * \param[in] hp which NRS head of \a svcpt to use
- */
-void ptlrpc_nrs_req_initialize(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req, bool hp)
-{
- struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp);
-
- memset(&req->rq_nrq, 0, sizeof(req->rq_nrq));
- nrs_resource_get_safe(nrs, &req->rq_nrq, req->rq_nrq.nr_res_ptrs,
- false);
-
- /**
- * It is fine to access \e nr_initialized without locking as there is
- * no contention at this early stage.
- */
- req->rq_nrq.nr_initialized = 1;
-}
-
-/**
- * Releases resources for a request; is called after the request has been
- * handled.
- *
- * \param[in] req the request
- *
- * \see ptlrpc_server_finish_request()
- */
-void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req)
-{
- if (req->rq_nrq.nr_initialized) {
- nrs_resource_put_safe(req->rq_nrq.nr_res_ptrs);
- /* no protection on bit nr_initialized because no
- * contention at this late stage
- */
- req->rq_nrq.nr_finalized = 1;
- }
-}
-
-void ptlrpc_nrs_req_stop_nolock(struct ptlrpc_request *req)
-{
- if (req->rq_nrq.nr_started)
- nrs_request_stop(&req->rq_nrq);
-}
-
-/**
- * Enqueues request \a req on either the regular or high-priority NRS head
- * of service partition \a svcpt.
- *
- * \param[in] svcpt the service partition
- * \param[in] req the request to be enqueued
- * \param[in] hp whether to enqueue the request on the regular or
- * high-priority NRS head.
- */
-void ptlrpc_nrs_req_add(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req, bool hp)
-{
- spin_lock(&svcpt->scp_req_lock);
-
- if (hp)
- ptlrpc_nrs_hpreq_add_nolock(req);
- else
- ptlrpc_nrs_req_add_nolock(req);
-
- spin_unlock(&svcpt->scp_req_lock);
-}
-
-static void nrs_request_removed(struct ptlrpc_nrs_policy *policy)
-{
- LASSERT(policy->pol_nrs->nrs_req_queued > 0);
- LASSERT(policy->pol_req_queued > 0);
-
- policy->pol_nrs->nrs_req_queued--;
- policy->pol_req_queued--;
-
- /**
- * If the policy has no more requests queued, remove it from
- * ptlrpc_nrs::nrs_policy_queued.
- */
- if (unlikely(policy->pol_req_queued == 0)) {
- list_del_init(&policy->pol_list_queued);
-
- /**
- * If there are other policies with queued requests, move the
- * current policy to the end so that we can round robin over
- * all policies and drain the requests.
- */
- } else if (policy->pol_req_queued != policy->pol_nrs->nrs_req_queued) {
- LASSERT(policy->pol_req_queued <
- policy->pol_nrs->nrs_req_queued);
-
- list_move_tail(&policy->pol_list_queued,
- &policy->pol_nrs->nrs_policy_queued);
- }
-}
-
-/**
- * Obtains a request for handling from an NRS head of service partition
- * \a svcpt.
- *
- * \param[in] svcpt the service partition
- * \param[in] hp whether to obtain a request from the regular or
- * high-priority NRS head.
- * \param[in] peek when set, signifies that we just want to examine the
- * request, and not handle it, so the request is not removed
- * from the policy.
- * \param[in] force when set, it will force a policy to return a request if it
- * has one pending
- *
- * \retval the request to be handled
- * \retval NULL the head has no requests to serve
- */
-struct ptlrpc_request *
-ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp,
- bool peek, bool force)
-{
- struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp);
- struct ptlrpc_nrs_policy *policy;
- struct ptlrpc_nrs_request *nrq;
-
- /**
- * Always try to drain requests from all NRS polices even if they are
- * inactive, because the user can change policy status at runtime.
- */
- list_for_each_entry(policy, &nrs->nrs_policy_queued, pol_list_queued) {
- nrq = nrs_request_get(policy, peek, force);
- if (nrq) {
- if (likely(!peek)) {
- nrq->nr_started = 1;
-
- policy->pol_req_started++;
- policy->pol_nrs->nrs_req_started++;
-
- nrs_request_removed(policy);
- }
-
- return container_of(nrq, struct ptlrpc_request, rq_nrq);
- }
- }
-
- return NULL;
-}
-
-/**
- * Returns whether there are any requests currently enqueued on any of the
- * policies of service partition's \a svcpt NRS head specified by \a hp. Should
- * be called while holding ptlrpc_service_part::scp_req_lock to get a reliable
- * result.
- *
- * \param[in] svcpt the service partition to enquire.
- * \param[in] hp whether the regular or high-priority NRS head is to be
- * enquired.
- *
- * \retval false the indicated NRS head has no enqueued requests.
- * \retval true the indicated NRS head has some enqueued requests.
- */
-bool ptlrpc_nrs_req_pending_nolock(struct ptlrpc_service_part *svcpt, bool hp)
-{
- struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp);
-
- return nrs->nrs_req_queued > 0;
-};
-
-/**
- * Carries out a control operation \a opc on the policy identified by the
- * human-readable \a name, on either all partitions, or only on the first
- * partition of service \a svc.
- *
- * \param[in] svc the service the policy belongs to.
- * \param[in] queue whether to carry out the command on the policy which
- * belongs to the regular, high-priority, or both NRS
- * heads of service partitions of \a svc.
- * \param[in] name the policy to act upon, by human-readable name
- * \param[in] opc the opcode of the operation to carry out
- * \param[in] single when set, the operation will only be carried out on the
- * NRS heads of the first service partition of \a svc.
- * This is useful for some policies which e.g. share
- * identical values on the same parameters of different
- * service partitions; when reading these parameters via
- * lprocfs, these policies may just want to obtain and
- * print out the values from the first service partition.
- * Storing these values centrally elsewhere then could be
- * another solution for this.
- * \param[in,out] arg can be used as a generic in/out buffer between control
- * operations and the user environment.
- *
- *\retval -ve error condition
- *\retval 0 operation was carried out successfully
- */
-int ptlrpc_nrs_policy_control(const struct ptlrpc_service *svc,
- enum ptlrpc_nrs_queue_type queue, char *name,
- enum ptlrpc_nrs_ctl opc, bool single, void *arg)
-{
- struct ptlrpc_service_part *svcpt;
- int i;
- int rc = 0;
-
- LASSERT(opc != PTLRPC_NRS_CTL_INVALID);
-
- if ((queue & PTLRPC_NRS_QUEUE_BOTH) == 0)
- return -EINVAL;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if ((queue & PTLRPC_NRS_QUEUE_REG) != 0) {
- rc = nrs_policy_ctl(nrs_svcpt2nrs(svcpt, false), name,
- opc, arg);
- if (rc != 0 || (queue == PTLRPC_NRS_QUEUE_REG &&
- single))
- goto out;
- }
-
- if ((queue & PTLRPC_NRS_QUEUE_HP) != 0) {
- /**
- * XXX: We could optionally check for
- * nrs_svc_has_hp(svc) here, and return an error if it
- * is false. Right now we rely on the policies' lprocfs
- * handlers that call the present function to make this
- * check; if they fail to do so, they might hit the
- * assertion inside nrs_svcpt2nrs() below.
- */
- rc = nrs_policy_ctl(nrs_svcpt2nrs(svcpt, true), name,
- opc, arg);
- if (rc != 0 || single)
- goto out;
- }
- }
-out:
- return rc;
-}
-
-/**
- * Adds all policies that ship with the ptlrpc module, to NRS core's list of
- * policies \e nrs_core.nrs_policies.
- *
- * \retval 0 all policies have been registered successfully
- * \retval -ve error
- */
-int ptlrpc_nrs_init(void)
-{
- int rc;
-
- mutex_init(&nrs_core.nrs_mutex);
- INIT_LIST_HEAD(&nrs_core.nrs_policies);
-
- rc = ptlrpc_nrs_policy_register(&nrs_conf_fifo);
- if (rc != 0)
- goto fail;
-
- return rc;
-fail:
- /**
- * Since no PTLRPC services have been started at this point, all we need
- * to do for cleanup is to free the descriptors.
- */
- ptlrpc_nrs_fini();
-
- return rc;
-}
-
-/**
- * Removes all policy descriptors from nrs_core::nrs_policies, and frees the
- * policy descriptors.
- *
- * Since all PTLRPC services are stopped at this point, there are no more
- * instances of any policies, because each service will have stopped its policy
- * instances in ptlrpc_service_nrs_cleanup(), so we just need to free the
- * descriptors here.
- */
-void ptlrpc_nrs_fini(void)
-{
- struct ptlrpc_nrs_pol_desc *desc;
- struct ptlrpc_nrs_pol_desc *tmp;
-
- list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies, pd_list) {
- list_del_init(&desc->pd_list);
- kfree(desc);
- }
-}
-
-/** @} nrs */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
deleted file mode 100644
index 8251cbf2ad68..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
+++ /dev/null
@@ -1,269 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details. A copy is
- * included in the COPYING file that accompanied this code.
-
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2011 Intel Corporation
- *
- * Copyright 2012 Xyratex Technology Limited
- */
-/*
- * lustre/ptlrpc/nrs_fifo.c
- *
- * Network Request Scheduler (NRS) FIFO policy
- *
- * Handles RPCs in a FIFO manner, as received from the network. This policy is
- * a logical wrapper around previous, non-NRS functionality. It is used as the
- * default and fallback policy for all types of RPCs on all PTLRPC service
- * partitions, for both regular and high-priority NRS heads. Default here means
- * the policy is the one enabled at PTLRPC service partition startup time, and
- * fallback means the policy is used to handle RPCs that are not handled
- * successfully or are not handled at all by any primary policy that may be
- * enabled on a given NRS head.
- *
- * Author: Liang Zhen <liang@whamcloud.com>
- * Author: Nikitas Angelinas <nikitas_angelinas@xyratex.com>
- */
-/**
- * \addtogoup nrs
- * @{
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-#include <obd_support.h>
-#include <obd_class.h>
-#include <linux/libcfs/libcfs.h>
-#include "ptlrpc_internal.h"
-
-/**
- * \name fifo
- *
- * The FIFO policy is a logical wrapper around previous, non-NRS functionality.
- * It schedules RPCs in the same order as they are queued from LNet.
- *
- * @{
- */
-
-#define NRS_POL_NAME_FIFO "fifo"
-
-/**
- * Is called before the policy transitions into
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED; allocates and initializes a
- * policy-specific private data structure.
- *
- * \param[in] policy The policy to start
- *
- * \retval -ENOMEM OOM error
- * \retval 0 success
- *
- * \see nrs_policy_register()
- * \see nrs_policy_ctl()
- */
-static int nrs_fifo_start(struct ptlrpc_nrs_policy *policy)
-{
- struct nrs_fifo_head *head;
-
- head = kzalloc_node(sizeof(*head), GFP_NOFS,
- cfs_cpt_spread_node(nrs_pol2cptab(policy),
- nrs_pol2cptid(policy)));
- if (!head)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&head->fh_list);
- policy->pol_private = head;
- return 0;
-}
-
-/**
- * Is called before the policy transitions into
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED; deallocates the policy-specific
- * private data structure.
- *
- * \param[in] policy The policy to stop
- *
- * \see nrs_policy_stop0()
- */
-static void nrs_fifo_stop(struct ptlrpc_nrs_policy *policy)
-{
- struct nrs_fifo_head *head = policy->pol_private;
-
- LASSERT(head);
- LASSERT(list_empty(&head->fh_list));
-
- kfree(head);
-}
-
-/**
- * Is called for obtaining a FIFO policy resource.
- *
- * \param[in] policy The policy on which the request is being asked for
- * \param[in] nrq The request for which resources are being taken
- * \param[in] parent Parent resource, unused in this policy
- * \param[out] resp Resources references are placed in this array
- * \param[in] moving_req Signifies limited caller context; unused in this
- * policy
- *
- * \retval 1 The FIFO policy only has a one-level resource hierarchy, as since
- * it implements a simple scheduling algorithm in which request
- * priority is determined on the request arrival order, it does not
- * need to maintain a set of resources that would otherwise be used
- * to calculate a request's priority.
- *
- * \see nrs_resource_get_safe()
- */
-static int nrs_fifo_res_get(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp, bool moving_req)
-{
- /**
- * Just return the resource embedded inside nrs_fifo_head, and end this
- * resource hierarchy reference request.
- */
- *resp = &((struct nrs_fifo_head *)policy->pol_private)->fh_res;
- return 1;
-}
-
-/**
- * Called when getting a request from the FIFO policy for handling, or just
- * peeking; removes the request from the policy when it is to be handled.
- *
- * \param[in] policy The policy
- * \param[in] peek When set, signifies that we just want to examine the
- * request, and not handle it, so the request is not removed
- * from the policy.
- * \param[in] force Force the policy to return a request; unused in this
- * policy
- *
- * \retval The request to be handled; this is the next request in the FIFO
- * queue
- *
- * \see ptlrpc_nrs_req_get_nolock()
- * \see nrs_request_get()
- */
-static
-struct ptlrpc_nrs_request *nrs_fifo_req_get(struct ptlrpc_nrs_policy *policy,
- bool peek, bool force)
-{
- struct nrs_fifo_head *head = policy->pol_private;
- struct ptlrpc_nrs_request *nrq;
-
- nrq = unlikely(list_empty(&head->fh_list)) ? NULL :
- list_entry(head->fh_list.next, struct ptlrpc_nrs_request,
- nr_u.fifo.fr_list);
-
- if (likely(!peek && nrq)) {
- struct ptlrpc_request *req = container_of(nrq,
- struct ptlrpc_request,
- rq_nrq);
-
- list_del_init(&nrq->nr_u.fifo.fr_list);
-
- CDEBUG(D_RPCTRACE, "NRS start %s request from %s, seq: %llu\n",
- policy->pol_desc->pd_name, libcfs_id2str(req->rq_peer),
- nrq->nr_u.fifo.fr_sequence);
- }
-
- return nrq;
-}
-
-/**
- * Adds request \a nrq to \a policy's list of queued requests
- *
- * \param[in] policy The policy
- * \param[in] nrq The request to add
- *
- * \retval 0 success; nrs_request_enqueue() assumes this function will always
- * succeed
- */
-static int nrs_fifo_req_add(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq)
-{
- struct nrs_fifo_head *head;
-
- head = container_of(nrs_request_resource(nrq), struct nrs_fifo_head,
- fh_res);
- /**
- * Only used for debugging
- */
- nrq->nr_u.fifo.fr_sequence = head->fh_sequence++;
- list_add_tail(&nrq->nr_u.fifo.fr_list, &head->fh_list);
-
- return 0;
-}
-
-/**
- * Removes request \a nrq from \a policy's list of queued requests.
- *
- * \param[in] policy The policy
- * \param[in] nrq The request to remove
- */
-static void nrs_fifo_req_del(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq)
-{
- LASSERT(!list_empty(&nrq->nr_u.fifo.fr_list));
- list_del_init(&nrq->nr_u.fifo.fr_list);
-}
-
-/**
- * Prints a debug statement right before the request \a nrq stops being
- * handled.
- *
- * \param[in] policy The policy handling the request
- * \param[in] nrq The request being handled
- *
- * \see ptlrpc_server_finish_request()
- * \see ptlrpc_nrs_req_stop_nolock()
- */
-static void nrs_fifo_req_stop(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq)
-{
- struct ptlrpc_request *req = container_of(nrq, struct ptlrpc_request,
- rq_nrq);
-
- CDEBUG(D_RPCTRACE, "NRS stop %s request from %s, seq: %llu\n",
- policy->pol_desc->pd_name, libcfs_id2str(req->rq_peer),
- nrq->nr_u.fifo.fr_sequence);
-}
-
-/**
- * FIFO policy operations
- */
-static const struct ptlrpc_nrs_pol_ops nrs_fifo_ops = {
- .op_policy_start = nrs_fifo_start,
- .op_policy_stop = nrs_fifo_stop,
- .op_res_get = nrs_fifo_res_get,
- .op_req_get = nrs_fifo_req_get,
- .op_req_enqueue = nrs_fifo_req_add,
- .op_req_dequeue = nrs_fifo_req_del,
- .op_req_stop = nrs_fifo_req_stop,
-};
-
-/**
- * FIFO policy configuration
- */
-struct ptlrpc_nrs_pol_conf nrs_conf_fifo = {
- .nc_name = NRS_POL_NAME_FIFO,
- .nc_ops = &nrs_fifo_ops,
- .nc_compat = nrs_policy_compat_all,
- .nc_flags = PTLRPC_NRS_FL_FALLBACK |
- PTLRPC_NRS_FL_REG_START
-};
-
-/** @} fifo */
-
-/** @} nrs */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
deleted file mode 100644
index f73463ac401f..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ /dev/null
@@ -1,2313 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/pack_generic.c
- *
- * (Un)packing of OST requests
- *
- * Author: Peter J. Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Eric Barton <eeb@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <linux/libcfs/libcfs.h>
-
-#include <uapi/linux/lustre/lustre_fiemap.h>
-
-#include <llog_swab.h>
-#include <lustre_net.h>
-#include <lustre_swab.h>
-#include <obd_cksum.h>
-#include <obd_support.h>
-#include <obd_class.h>
-
-#include "ptlrpc_internal.h"
-
-static inline u32 lustre_msg_hdr_size_v2(u32 count)
-{
- return cfs_size_round(offsetof(struct lustre_msg_v2,
- lm_buflens[count]));
-}
-
-u32 lustre_msg_hdr_size(__u32 magic, u32 count)
-{
- switch (magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_msg_hdr_size_v2(count);
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", magic);
- return 0;
- }
-}
-
-void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
- u32 index)
-{
- if (inout)
- lustre_set_req_swabbed(req, index);
- else
- lustre_set_rep_swabbed(req, index);
-}
-
-int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
- u32 index)
-{
- if (inout)
- return (ptlrpc_req_need_swab(req) &&
- !lustre_req_swabbed(req, index));
- else
- return (ptlrpc_rep_need_swab(req) &&
- !lustre_rep_swabbed(req, index));
-}
-
-/* early reply size */
-u32 lustre_msg_early_size(void)
-{
- static u32 size;
-
- if (!size) {
- /* Always reply old ptlrpc_body_v2 to keep interoperability
- * with the old client (< 2.3) which doesn't have pb_jobid
- * in the ptlrpc_body.
- *
- * XXX Remove this whenever we drop interoperability with such
- * client.
- */
- __u32 pblen = sizeof(struct ptlrpc_body_v2);
-
- size = lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, &pblen);
- }
- return size;
-}
-EXPORT_SYMBOL(lustre_msg_early_size);
-
-u32 lustre_msg_size_v2(int count, __u32 *lengths)
-{
- u32 size;
- int i;
-
- size = lustre_msg_hdr_size_v2(count);
- for (i = 0; i < count; i++)
- size += cfs_size_round(lengths[i]);
-
- return size;
-}
-EXPORT_SYMBOL(lustre_msg_size_v2);
-
-/* This returns the size of the buffer that is required to hold a lustre_msg
- * with the given sub-buffer lengths.
- * NOTE: this should only be used for NEW requests, and should always be
- * in the form of a v2 request. If this is a connection to a v1
- * target then the first buffer will be stripped because the ptlrpc
- * data is part of the lustre_msg_v1 header. b=14043
- */
-u32 lustre_msg_size(__u32 magic, int count, __u32 *lens)
-{
- __u32 size[] = { sizeof(struct ptlrpc_body) };
-
- if (!lens) {
- LASSERT(count == 1);
- lens = size;
- }
-
- LASSERT(count > 0);
- LASSERT(lens[MSG_PTLRPC_BODY_OFF] >= sizeof(struct ptlrpc_body_v2));
-
- switch (magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_msg_size_v2(count, lens);
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", magic);
- return 0;
- }
-}
-
-/* This is used to determine the size of a buffer that was already packed
- * and will correctly handle the different message formats.
- */
-u32 lustre_packed_msg_size(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-
-void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
- char **bufs)
-{
- char *ptr;
- int i;
-
- msg->lm_bufcount = count;
- /* XXX: lm_secflvr uninitialized here */
- msg->lm_magic = LUSTRE_MSG_MAGIC_V2;
-
- for (i = 0; i < count; i++)
- msg->lm_buflens[i] = lens[i];
-
- if (!bufs)
- return;
-
- ptr = (char *)msg + lustre_msg_hdr_size_v2(count);
- for (i = 0; i < count; i++) {
- char *tmp = bufs[i];
-
- if (tmp)
- memcpy(ptr, tmp, lens[i]);
- ptr += cfs_size_round(lens[i]);
- }
-}
-EXPORT_SYMBOL(lustre_init_msg_v2);
-
-static int lustre_pack_request_v2(struct ptlrpc_request *req,
- int count, __u32 *lens, char **bufs)
-{
- int reqlen, rc;
-
- reqlen = lustre_msg_size_v2(count, lens);
-
- rc = sptlrpc_cli_alloc_reqbuf(req, reqlen);
- if (rc)
- return rc;
-
- req->rq_reqlen = reqlen;
-
- lustre_init_msg_v2(req->rq_reqmsg, count, lens, bufs);
- lustre_msg_add_version(req->rq_reqmsg, PTLRPC_MSG_VERSION);
- return 0;
-}
-
-int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
- __u32 *lens, char **bufs)
-{
- __u32 size[] = { sizeof(struct ptlrpc_body) };
-
- if (!lens) {
- LASSERT(count == 1);
- lens = size;
- }
-
- LASSERT(count > 0);
- LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
-
- /* only use new format, we don't need to be compatible with 1.4 */
- return lustre_pack_request_v2(req, count, lens, bufs);
-}
-
-#if RS_DEBUG
-LIST_HEAD(ptlrpc_rs_debug_lru);
-spinlock_t ptlrpc_rs_debug_lock;
-
-#define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
-do { \
- spin_lock(&ptlrpc_rs_debug_lock); \
- list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
- spin_unlock(&ptlrpc_rs_debug_lock); \
-} while (0)
-
-#define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
-do { \
- spin_lock(&ptlrpc_rs_debug_lock); \
- list_del(&(rs)->rs_debug_list); \
- spin_unlock(&ptlrpc_rs_debug_lock); \
-} while (0)
-#else
-# define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while (0)
-# define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while (0)
-#endif
-
-struct ptlrpc_reply_state *
-lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_reply_state *rs = NULL;
-
- spin_lock(&svcpt->scp_rep_lock);
-
- /* See if we have anything in a pool, and wait if nothing */
- while (list_empty(&svcpt->scp_rep_idle)) {
- int rc;
-
- spin_unlock(&svcpt->scp_rep_lock);
- /* If we cannot get anything for some long time, we better
- * bail out instead of waiting infinitely
- */
- rc = wait_event_idle_timeout(svcpt->scp_rep_waitq,
- !list_empty(&svcpt->scp_rep_idle),
- 10 * HZ);
- if (rc == 0)
- goto out;
- spin_lock(&svcpt->scp_rep_lock);
- }
-
- rs = list_entry(svcpt->scp_rep_idle.next,
- struct ptlrpc_reply_state, rs_list);
- list_del(&rs->rs_list);
-
- spin_unlock(&svcpt->scp_rep_lock);
-
- memset(rs, 0, svcpt->scp_service->srv_max_reply_size);
- rs->rs_size = svcpt->scp_service->srv_max_reply_size;
- rs->rs_svcpt = svcpt;
- rs->rs_prealloc = 1;
-out:
- return rs;
-}
-
-void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
-{
- struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
-
- spin_lock(&svcpt->scp_rep_lock);
- list_add(&rs->rs_list, &svcpt->scp_rep_idle);
- spin_unlock(&svcpt->scp_rep_lock);
- wake_up(&svcpt->scp_rep_waitq);
-}
-
-int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
- __u32 *lens, char **bufs, int flags)
-{
- struct ptlrpc_reply_state *rs;
- int msg_len, rc;
-
- LASSERT(!req->rq_reply_state);
-
- if ((flags & LPRFL_EARLY_REPLY) == 0) {
- spin_lock(&req->rq_lock);
- req->rq_packed_final = 1;
- spin_unlock(&req->rq_lock);
- }
-
- msg_len = lustre_msg_size_v2(count, lens);
- rc = sptlrpc_svc_alloc_rs(req, msg_len);
- if (rc)
- return rc;
-
- rs = req->rq_reply_state;
- atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
- rs->rs_cb_id.cbid_fn = reply_out_callback;
- rs->rs_cb_id.cbid_arg = rs;
- rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
- INIT_LIST_HEAD(&rs->rs_exp_list);
- INIT_LIST_HEAD(&rs->rs_obd_list);
- INIT_LIST_HEAD(&rs->rs_list);
- spin_lock_init(&rs->rs_lock);
-
- req->rq_replen = msg_len;
- req->rq_reply_state = rs;
- req->rq_repmsg = rs->rs_msg;
-
- lustre_init_msg_v2(rs->rs_msg, count, lens, bufs);
- lustre_msg_add_version(rs->rs_msg, PTLRPC_MSG_VERSION);
-
- PTLRPC_RS_DEBUG_LRU_ADD(rs);
-
- return 0;
-}
-EXPORT_SYMBOL(lustre_pack_reply_v2);
-
-int lustre_pack_reply_flags(struct ptlrpc_request *req, int count, __u32 *lens,
- char **bufs, int flags)
-{
- int rc = 0;
- __u32 size[] = { sizeof(struct ptlrpc_body) };
-
- if (!lens) {
- LASSERT(count == 1);
- lens = size;
- }
-
- LASSERT(count > 0);
- LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
-
- switch (req->rq_reqmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- rc = lustre_pack_reply_v2(req, count, lens, bufs, flags);
- break;
- default:
- LASSERTF(0, "incorrect message magic: %08x\n",
- req->rq_reqmsg->lm_magic);
- rc = -EINVAL;
- }
- if (rc != 0)
- CERROR("lustre_pack_reply failed: rc=%d size=%d\n", rc,
- lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens));
- return rc;
-}
-
-int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens,
- char **bufs)
-{
- return lustre_pack_reply_flags(req, count, lens, bufs, 0);
-}
-EXPORT_SYMBOL(lustre_pack_reply);
-
-void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, u32 n, u32 min_size)
-{
- u32 i, offset, buflen, bufcount;
-
- bufcount = m->lm_bufcount;
- if (unlikely(n >= bufcount)) {
- CDEBUG(D_INFO, "msg %p buffer[%d] not present (count %d)\n",
- m, n, bufcount);
- return NULL;
- }
-
- buflen = m->lm_buflens[n];
- if (unlikely(buflen < min_size)) {
- CERROR("msg %p buffer[%d] size %d too small (required %d, opc=%d)\n",
- m, n, buflen, min_size,
- n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m));
- return NULL;
- }
-
- offset = lustre_msg_hdr_size_v2(bufcount);
- for (i = 0; i < n; i++)
- offset += cfs_size_round(m->lm_buflens[i]);
-
- return (char *)m + offset;
-}
-
-void *lustre_msg_buf(struct lustre_msg *m, u32 n, u32 min_size)
-{
- switch (m->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_msg_buf_v2(m, n, min_size);
- default:
- LASSERTF(0, "incorrect message magic: %08x (msg:%p)\n",
- m->lm_magic, m);
- return NULL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_buf);
-
-static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, u32 segment,
- unsigned int newlen, int move_data)
-{
- char *tail = NULL, *newpos;
- int tail_len = 0, n;
-
- LASSERT(msg);
- LASSERT(msg->lm_bufcount > segment);
- LASSERT(msg->lm_buflens[segment] >= newlen);
-
- if (msg->lm_buflens[segment] == newlen)
- goto out;
-
- if (move_data && msg->lm_bufcount > segment + 1) {
- tail = lustre_msg_buf_v2(msg, segment + 1, 0);
- for (n = segment + 1; n < msg->lm_bufcount; n++)
- tail_len += cfs_size_round(msg->lm_buflens[n]);
- }
-
- msg->lm_buflens[segment] = newlen;
-
- if (tail && tail_len) {
- newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
- LASSERT(newpos <= tail);
- if (newpos != tail)
- memmove(newpos, tail, tail_len);
- }
-out:
- return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
-}
-
-/*
- * for @msg, shrink @segment to size @newlen. if @move_data is non-zero,
- * we also move data forward from @segment + 1.
- *
- * if @newlen == 0, we remove the segment completely, but we still keep the
- * totally bufcount the same to save possible data moving. this will leave a
- * unused segment with size 0 at the tail, but that's ok.
- *
- * return new msg size after shrinking.
- *
- * CAUTION:
- * + if any buffers higher than @segment has been filled in, must call shrink
- * with non-zero @move_data.
- * + caller should NOT keep pointers to msg buffers which higher than @segment
- * after call shrink.
- */
-int lustre_shrink_msg(struct lustre_msg *msg, int segment,
- unsigned int newlen, int move_data)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_shrink_msg_v2(msg, segment, newlen, move_data);
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
- return 0;
-}
-EXPORT_SYMBOL(lustre_shrink_msg);
-
-void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
-{
- PTLRPC_RS_DEBUG_LRU_DEL(rs);
-
- LASSERT(atomic_read(&rs->rs_refcount) == 0);
- LASSERT(!rs->rs_difficult || rs->rs_handled);
- LASSERT(!rs->rs_on_net);
- LASSERT(!rs->rs_scheduled);
- LASSERT(!rs->rs_export);
- LASSERT(rs->rs_nlocks == 0);
- LASSERT(list_empty(&rs->rs_exp_list));
- LASSERT(list_empty(&rs->rs_obd_list));
-
- sptlrpc_svc_free_rs(rs);
-}
-
-static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
-{
- int swabbed, required_len, i;
-
- /* Now we know the sender speaks my language. */
- required_len = lustre_msg_hdr_size_v2(0);
- if (len < required_len) {
- /* can't even look inside the message */
- CERROR("message length %d too small for lustre_msg\n", len);
- return -EINVAL;
- }
-
- swabbed = (m->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED);
-
- if (swabbed) {
- __swab32s(&m->lm_magic);
- __swab32s(&m->lm_bufcount);
- __swab32s(&m->lm_secflvr);
- __swab32s(&m->lm_repsize);
- __swab32s(&m->lm_cksum);
- __swab32s(&m->lm_flags);
- BUILD_BUG_ON(offsetof(typeof(*m), lm_padding_2) == 0);
- BUILD_BUG_ON(offsetof(typeof(*m), lm_padding_3) == 0);
- }
-
- required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
- if (len < required_len) {
- /* didn't receive all the buffer lengths */
- CERROR("message length %d too small for %d buflens\n",
- len, m->lm_bufcount);
- return -EINVAL;
- }
-
- for (i = 0; i < m->lm_bufcount; i++) {
- if (swabbed)
- __swab32s(&m->lm_buflens[i]);
- required_len += cfs_size_round(m->lm_buflens[i]);
- }
-
- if (len < required_len) {
- CERROR("len: %d, required_len %d\n", len, required_len);
- CERROR("bufcount: %d\n", m->lm_bufcount);
- for (i = 0; i < m->lm_bufcount; i++)
- CERROR("buffer %d length %d\n", i, m->lm_buflens[i]);
- return -EINVAL;
- }
-
- return swabbed;
-}
-
-int __lustre_unpack_msg(struct lustre_msg *m, int len)
-{
- int required_len, rc;
-
- /* We can provide a slightly better error log, if we check the
- * message magic and version first. In the future, struct
- * lustre_msg may grow, and we'd like to log a version mismatch,
- * rather than a short message.
- *
- */
- required_len = offsetof(struct lustre_msg, lm_magic) +
- sizeof(m->lm_magic);
- if (len < required_len) {
- /* can't even look inside the message */
- CERROR("message length %d too small for magic/version check\n",
- len);
- return -EINVAL;
- }
-
- rc = lustre_unpack_msg_v2(m, len);
-
- return rc;
-}
-EXPORT_SYMBOL(__lustre_unpack_msg);
-
-int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len)
-{
- int rc;
-
- rc = __lustre_unpack_msg(req->rq_reqmsg, len);
- if (rc == 1) {
- lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
- rc = 0;
- }
- return rc;
-}
-
-int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len)
-{
- int rc;
-
- rc = __lustre_unpack_msg(req->rq_repmsg, len);
- if (rc == 1) {
- lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
- rc = 0;
- }
- return rc;
-}
-
-static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
- const int inout, int offset)
-{
- struct ptlrpc_body *pb;
- struct lustre_msg_v2 *m = inout ? req->rq_reqmsg : req->rq_repmsg;
-
- pb = lustre_msg_buf_v2(m, offset, sizeof(struct ptlrpc_body_v2));
- if (!pb) {
- CERROR("error unpacking ptlrpc body\n");
- return -EFAULT;
- }
- if (ptlrpc_buf_need_swab(req, inout, offset)) {
- lustre_swab_ptlrpc_body(pb);
- ptlrpc_buf_set_swabbed(req, inout, offset);
- }
-
- if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) {
- CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
- return -EINVAL;
- }
-
- if (!inout)
- pb->pb_status = ptlrpc_status_ntoh(pb->pb_status);
-
- return 0;
-}
-
-int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset)
-{
- switch (req->rq_reqmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_unpack_ptlrpc_body_v2(req, 1, offset);
- default:
- CERROR("bad lustre msg magic: %08x\n",
- req->rq_reqmsg->lm_magic);
- return -EINVAL;
- }
-}
-
-int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset)
-{
- switch (req->rq_repmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_unpack_ptlrpc_body_v2(req, 0, offset);
- default:
- CERROR("bad lustre msg magic: %08x\n",
- req->rq_repmsg->lm_magic);
- return -EINVAL;
- }
-}
-
-static inline u32 lustre_msg_buflen_v2(struct lustre_msg_v2 *m, u32 n)
-{
- if (n >= m->lm_bufcount)
- return 0;
-
- return m->lm_buflens[n];
-}
-
-/**
- * lustre_msg_buflen - return the length of buffer \a n in message \a m
- * \param m lustre_msg (request or reply) to look at
- * \param n message index (base 0)
- *
- * returns zero for non-existent message indices
- */
-u32 lustre_msg_buflen(struct lustre_msg *m, u32 n)
-{
- switch (m->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_msg_buflen_v2(m, n);
- default:
- CERROR("incorrect message magic: %08x\n", m->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_buflen);
-
-/* NB return the bufcount for lustre_msg_v2 format, so if message is packed
- * in V1 format, the result is one bigger. (add struct ptlrpc_body).
- */
-u32 lustre_msg_bufcount(struct lustre_msg *m)
-{
- switch (m->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return m->lm_bufcount;
- default:
- CERROR("incorrect message magic: %08x\n", m->lm_magic);
- return 0;
- }
-}
-
-char *lustre_msg_string(struct lustre_msg *m, u32 index, u32 max_len)
-{
- /* max_len == 0 means the string should fill the buffer */
- char *str;
- u32 slen, blen;
-
- switch (m->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- str = lustre_msg_buf_v2(m, index, 0);
- blen = lustre_msg_buflen_v2(m, index);
- break;
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
- }
-
- if (!str) {
- CERROR("can't unpack string in msg %p buffer[%d]\n", m, index);
- return NULL;
- }
-
- slen = strnlen(str, blen);
-
- if (slen == blen) { /* not NULL terminated */
- CERROR("can't unpack non-NULL terminated string in msg %p buffer[%d] len %d\n",
- m, index, blen);
- return NULL;
- }
-
- if (max_len == 0) {
- if (slen != blen - 1) {
- CERROR("can't unpack short string in msg %p buffer[%d] len %d: strlen %d\n",
- m, index, blen, slen);
- return NULL;
- }
- } else if (slen > max_len) {
- CERROR("can't unpack oversized string in msg %p buffer[%d] len %d strlen %d: max %d expected\n",
- m, index, blen, slen, max_len);
- return NULL;
- }
-
- return str;
-}
-
-/* Wrap up the normal fixed length cases */
-static inline void *__lustre_swab_buf(struct lustre_msg *msg, u32 index,
- u32 min_size, void *swabber)
-{
- void *ptr = NULL;
-
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- ptr = lustre_msg_buf_v2(msg, index, min_size);
- break;
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- }
-
- if (ptr && swabber)
- ((void (*)(void *))swabber)(ptr);
-
- return ptr;
-}
-
-static inline struct ptlrpc_body *lustre_msg_ptlrpc_body(struct lustre_msg *msg)
-{
- return lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
- sizeof(struct ptlrpc_body_v2));
-}
-
-__u32 lustre_msghdr_get_flags(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- /* already in host endian */
- return msg->lm_flags;
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msghdr_get_flags);
-
-void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- msg->lm_flags = flags;
- return;
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-__u32 lustre_msg_get_flags(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (pb)
- return pb->pb_flags;
-
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- }
- /* fall through */
- default:
- /* flags might be printed in debug code while message
- * uninitialized
- */
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_flags);
-
-void lustre_msg_add_flags(struct lustre_msg *msg, u32 flags)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_flags |= flags;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_add_flags);
-
-void lustre_msg_set_flags(struct lustre_msg *msg, u32 flags)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_flags = flags;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void lustre_msg_clear_flags(struct lustre_msg *msg, u32 flags)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_flags &= ~(flags & MSG_GEN_FLAG_MASK);
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_clear_flags);
-
-__u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (pb)
- return pb->pb_op_flags;
-
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- }
- /* fall through */
- default:
- return 0;
- }
-}
-
-void lustre_msg_add_op_flags(struct lustre_msg *msg, u32 flags)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_op_flags |= flags;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_add_op_flags);
-
-struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return NULL;
- }
- return &pb->pb_handle;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return NULL;
- }
-}
-
-__u32 lustre_msg_get_type(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return PTL_RPC_MSG_ERR;
- }
- return pb->pb_type;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return PTL_RPC_MSG_ERR;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_type);
-
-void lustre_msg_add_version(struct lustre_msg *msg, u32 version)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_version |= version;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-__u32 lustre_msg_get_opc(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
- }
- return pb->pb_opc;
- }
- default:
- CERROR("incorrect message magic: %08x (msg:%p)\n",
- msg->lm_magic, msg);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_opc);
-
-__u16 lustre_msg_get_tag(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
- }
- return pb->pb_tag;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_tag);
-
-__u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
- }
- return pb->pb_last_committed;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_last_committed);
-
-__u64 *lustre_msg_get_versions(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return NULL;
- }
- return pb->pb_pre_versions;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return NULL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_versions);
-
-__u64 lustre_msg_get_transno(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
- }
- return pb->pb_transno;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_transno);
-
-int lustre_msg_get_status(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (pb)
- return pb->pb_status;
-
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- }
- /* fall through */
- default:
- /* status might be printed in debug code while message
- * uninitialized
- */
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_status);
-
-__u64 lustre_msg_get_slv(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return -EINVAL;
- }
- return pb->pb_slv;
- }
- default:
- CERROR("invalid msg magic %08x\n", msg->lm_magic);
- return -EINVAL;
- }
-}
-
-void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return;
- }
- pb->pb_slv = slv;
- return;
- }
- default:
- CERROR("invalid msg magic %x\n", msg->lm_magic);
- return;
- }
-}
-
-__u32 lustre_msg_get_limit(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return -EINVAL;
- }
- return pb->pb_limit;
- }
- default:
- CERROR("invalid msg magic %x\n", msg->lm_magic);
- return -EINVAL;
- }
-}
-
-void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return;
- }
- pb->pb_limit = limit;
- return;
- }
- default:
- CERROR("invalid msg magic %08x\n", msg->lm_magic);
- return;
- }
-}
-
-__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
- }
- return pb->pb_conn_cnt;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_conn_cnt);
-
-__u32 lustre_msg_get_magic(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return msg->lm_magic;
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-
-__u32 lustre_msg_get_timeout(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
- }
- return pb->pb_timeout;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return -EPROTO;
- }
-}
-
-__u32 lustre_msg_get_service_time(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
- }
- return pb->pb_service_time;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-
-__u32 lustre_msg_get_cksum(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return msg->lm_cksum;
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-
-__u32 lustre_msg_calc_cksum(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- __u32 crc;
- unsigned int hsize = 4;
-
- cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
- lustre_msg_buflen(msg,
- MSG_PTLRPC_BODY_OFF),
- NULL, 0, (unsigned char *)&crc, &hsize);
- return crc;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-
-void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_handle = *handle;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void lustre_msg_set_type(struct lustre_msg *msg, __u32 type)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_type = type;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_opc = opc;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_last_xid = last_xid;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_tag = tag;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_tag);
-
-void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_pre_versions[0] = versions[0];
- pb->pb_pre_versions[1] = versions[1];
- pb->pb_pre_versions[2] = versions[2];
- pb->pb_pre_versions[3] = versions[3];
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_versions);
-
-void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_transno = transno;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_transno);
-
-void lustre_msg_set_status(struct lustre_msg *msg, __u32 status)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_status = status;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_status);
-
-void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_conn_cnt = conn_cnt;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_timeout = timeout;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_service_time = service_time;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- __u32 opc = lustre_msg_get_opc(msg);
- struct ptlrpc_body *pb;
-
- /* Don't set jobid for ldlm ast RPCs, they've been shrunk.
- * See the comment in ptlrpc_request_pack().
- */
- if (!opc || opc == LDLM_BL_CALLBACK ||
- opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
- return;
-
- pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
- sizeof(struct ptlrpc_body));
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
-
- if (jobid)
- memcpy(pb->pb_jobid, jobid, LUSTRE_JOBID_SIZE);
- else if (pb->pb_jobid[0] == '\0')
- lustre_get_jobid(pb->pb_jobid);
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_jobid);
-
-void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- msg->lm_cksum = cksum;
- return;
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_mbits = mbits;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void ptlrpc_request_set_replen(struct ptlrpc_request *req)
-{
- int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
-
- req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count,
- req->rq_pill.rc_area[RCL_SERVER]);
- if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
- req->rq_reqmsg->lm_repsize = req->rq_replen;
-}
-EXPORT_SYMBOL(ptlrpc_request_set_replen);
-
-/**
- * Send a remote set_info_async.
- *
- * This may go from client to server or server to client.
- */
-int do_set_info_async(struct obd_import *imp,
- int opcode, int version,
- u32 keylen, void *key,
- u32 vallen, void *val,
- struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req;
- char *tmp;
- int rc;
-
- req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
- if (!req)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
- RCL_CLIENT, keylen);
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
- RCL_CLIENT, vallen);
- rc = ptlrpc_request_pack(req, version, opcode);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
- memcpy(tmp, key, keylen);
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
- memcpy(tmp, val, vallen);
-
- ptlrpc_request_set_replen(req);
-
- if (set) {
- ptlrpc_set_add_req(set, req);
- ptlrpc_check_set(NULL, set);
- } else {
- rc = ptlrpc_queue_wait(req);
- ptlrpc_req_finished(req);
- }
-
- return rc;
-}
-EXPORT_SYMBOL(do_set_info_async);
-
-/* byte flipping routines for all wire types declared in
- * lustre_idl.h implemented here.
- */
-void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
-{
- __swab32s(&b->pb_type);
- __swab32s(&b->pb_version);
- __swab32s(&b->pb_opc);
- __swab32s(&b->pb_status);
- __swab64s(&b->pb_last_xid);
- __swab16s(&b->pb_tag);
- __swab64s(&b->pb_last_committed);
- __swab64s(&b->pb_transno);
- __swab32s(&b->pb_flags);
- __swab32s(&b->pb_op_flags);
- __swab32s(&b->pb_conn_cnt);
- __swab32s(&b->pb_timeout);
- __swab32s(&b->pb_service_time);
- __swab32s(&b->pb_limit);
- __swab64s(&b->pb_slv);
- __swab64s(&b->pb_pre_versions[0]);
- __swab64s(&b->pb_pre_versions[1]);
- __swab64s(&b->pb_pre_versions[2]);
- __swab64s(&b->pb_pre_versions[3]);
- __swab64s(&b->pb_mbits);
- BUILD_BUG_ON(offsetof(typeof(*b), pb_padding0) == 0);
- BUILD_BUG_ON(offsetof(typeof(*b), pb_padding1) == 0);
- BUILD_BUG_ON(offsetof(typeof(*b), pb_padding64_0) == 0);
- BUILD_BUG_ON(offsetof(typeof(*b), pb_padding64_1) == 0);
- BUILD_BUG_ON(offsetof(typeof(*b), pb_padding64_2) == 0);
- /* While we need to maintain compatibility between
- * clients and servers without ptlrpc_body_v2 (< 2.3)
- * do not swab any fields beyond pb_jobid, as we are
- * using this swab function for both ptlrpc_body
- * and ptlrpc_body_v2.
- */
- BUILD_BUG_ON(offsetof(typeof(*b), pb_jobid) == 0);
-}
-
-void lustre_swab_connect(struct obd_connect_data *ocd)
-{
- __swab64s(&ocd->ocd_connect_flags);
- __swab32s(&ocd->ocd_version);
- __swab32s(&ocd->ocd_grant);
- __swab64s(&ocd->ocd_ibits_known);
- __swab32s(&ocd->ocd_index);
- __swab32s(&ocd->ocd_brw_size);
- /* ocd_blocksize and ocd_inodespace don't need to be swabbed because
- * they are 8-byte values
- */
- __swab16s(&ocd->ocd_grant_extent);
- __swab32s(&ocd->ocd_unused);
- __swab64s(&ocd->ocd_transno);
- __swab32s(&ocd->ocd_group);
- __swab32s(&ocd->ocd_cksum_types);
- __swab32s(&ocd->ocd_instance);
- /* Fields after ocd_cksum_types are only accessible by the receiver
- * if the corresponding flag in ocd_connect_flags is set. Accessing
- * any field after ocd_maxbytes on the receiver without a valid flag
- * may result in out-of-bound memory access and kernel oops.
- */
- if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)
- __swab32s(&ocd->ocd_max_easize);
- if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
- __swab64s(&ocd->ocd_maxbytes);
- if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
- __swab16s(&ocd->ocd_maxmodrpcs);
- BUILD_BUG_ON(!offsetof(typeof(*ocd), padding0));
- BUILD_BUG_ON(offsetof(typeof(*ocd), padding1) == 0);
- if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2)
- __swab64s(&ocd->ocd_connect_flags2);
- BUILD_BUG_ON(offsetof(typeof(*ocd), padding3) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), padding4) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), padding5) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), padding6) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), padding7) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), padding8) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), padding9) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), paddingA) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), paddingB) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), paddingC) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), paddingD) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), paddingE) == 0);
- BUILD_BUG_ON(offsetof(typeof(*ocd), paddingF) == 0);
-}
-
-static void lustre_swab_obdo(struct obdo *o)
-{
- __swab64s(&o->o_valid);
- lustre_swab_ost_id(&o->o_oi);
- __swab64s(&o->o_parent_seq);
- __swab64s(&o->o_size);
- __swab64s(&o->o_mtime);
- __swab64s(&o->o_atime);
- __swab64s(&o->o_ctime);
- __swab64s(&o->o_blocks);
- __swab64s(&o->o_grant);
- __swab32s(&o->o_blksize);
- __swab32s(&o->o_mode);
- __swab32s(&o->o_uid);
- __swab32s(&o->o_gid);
- __swab32s(&o->o_flags);
- __swab32s(&o->o_nlink);
- __swab32s(&o->o_parent_oid);
- __swab32s(&o->o_misc);
- __swab64s(&o->o_ioepoch);
- __swab32s(&o->o_stripe_idx);
- __swab32s(&o->o_parent_ver);
- /* o_handle is opaque */
- /* o_lcookie is swabbed elsewhere */
- __swab32s(&o->o_uid_h);
- __swab32s(&o->o_gid_h);
- __swab64s(&o->o_data_version);
- BUILD_BUG_ON(offsetof(typeof(*o), o_padding_4) == 0);
- BUILD_BUG_ON(offsetof(typeof(*o), o_padding_5) == 0);
- BUILD_BUG_ON(offsetof(typeof(*o), o_padding_6) == 0);
-}
-
-void lustre_swab_obd_statfs(struct obd_statfs *os)
-{
- __swab64s(&os->os_type);
- __swab64s(&os->os_blocks);
- __swab64s(&os->os_bfree);
- __swab64s(&os->os_bavail);
- __swab64s(&os->os_files);
- __swab64s(&os->os_ffree);
- /* no need to swab os_fsid */
- __swab32s(&os->os_bsize);
- __swab32s(&os->os_namelen);
- __swab64s(&os->os_maxbytes);
- __swab32s(&os->os_state);
- BUILD_BUG_ON(offsetof(typeof(*os), os_fprecreated) == 0);
- BUILD_BUG_ON(offsetof(typeof(*os), os_spare2) == 0);
- BUILD_BUG_ON(offsetof(typeof(*os), os_spare3) == 0);
- BUILD_BUG_ON(offsetof(typeof(*os), os_spare4) == 0);
- BUILD_BUG_ON(offsetof(typeof(*os), os_spare5) == 0);
- BUILD_BUG_ON(offsetof(typeof(*os), os_spare6) == 0);
- BUILD_BUG_ON(offsetof(typeof(*os), os_spare7) == 0);
- BUILD_BUG_ON(offsetof(typeof(*os), os_spare8) == 0);
- BUILD_BUG_ON(offsetof(typeof(*os), os_spare9) == 0);
-}
-
-void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
-{
- lustre_swab_ost_id(&ioo->ioo_oid);
- __swab32s(&ioo->ioo_max_brw);
- __swab32s(&ioo->ioo_bufcnt);
-}
-
-void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
-{
- __swab64s(&nbr->rnb_offset);
- __swab32s(&nbr->rnb_len);
- __swab32s(&nbr->rnb_flags);
-}
-
-void lustre_swab_ost_body(struct ost_body *b)
-{
- lustre_swab_obdo(&b->oa);
-}
-
-void lustre_swab_ost_last_id(u64 *id)
-{
- __swab64s(id);
-}
-
-void lustre_swab_generic_32s(__u32 *val)
-{
- __swab32s(val);
-}
-
-void lustre_swab_gl_desc(union ldlm_gl_desc *desc)
-{
- lustre_swab_lu_fid(&desc->lquota_desc.gl_id.qid_fid);
- __swab64s(&desc->lquota_desc.gl_flags);
- __swab64s(&desc->lquota_desc.gl_ver);
- __swab64s(&desc->lquota_desc.gl_hardlimit);
- __swab64s(&desc->lquota_desc.gl_softlimit);
- __swab64s(&desc->lquota_desc.gl_time);
- BUILD_BUG_ON(offsetof(typeof(desc->lquota_desc), gl_pad2) == 0);
-}
-
-void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb)
-{
- __swab64s(&lvb->lvb_size);
- __swab64s(&lvb->lvb_mtime);
- __swab64s(&lvb->lvb_atime);
- __swab64s(&lvb->lvb_ctime);
- __swab64s(&lvb->lvb_blocks);
-}
-EXPORT_SYMBOL(lustre_swab_ost_lvb_v1);
-
-void lustre_swab_ost_lvb(struct ost_lvb *lvb)
-{
- __swab64s(&lvb->lvb_size);
- __swab64s(&lvb->lvb_mtime);
- __swab64s(&lvb->lvb_atime);
- __swab64s(&lvb->lvb_ctime);
- __swab64s(&lvb->lvb_blocks);
- __swab32s(&lvb->lvb_mtime_ns);
- __swab32s(&lvb->lvb_atime_ns);
- __swab32s(&lvb->lvb_ctime_ns);
- __swab32s(&lvb->lvb_padding);
-}
-EXPORT_SYMBOL(lustre_swab_ost_lvb);
-
-void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
-{
- __swab64s(&lvb->lvb_flags);
- __swab64s(&lvb->lvb_id_may_rel);
- __swab64s(&lvb->lvb_id_rel);
- __swab64s(&lvb->lvb_id_qunit);
- __swab64s(&lvb->lvb_pad1);
-}
-EXPORT_SYMBOL(lustre_swab_lquota_lvb);
-
-void lustre_swab_mdt_body(struct mdt_body *b)
-{
- lustre_swab_lu_fid(&b->mbo_fid1);
- lustre_swab_lu_fid(&b->mbo_fid2);
- /* handle is opaque */
- __swab64s(&b->mbo_valid);
- __swab64s(&b->mbo_size);
- __swab64s(&b->mbo_mtime);
- __swab64s(&b->mbo_atime);
- __swab64s(&b->mbo_ctime);
- __swab64s(&b->mbo_blocks);
- __swab64s(&b->mbo_ioepoch);
- __swab64s(&b->mbo_t_state);
- __swab32s(&b->mbo_fsuid);
- __swab32s(&b->mbo_fsgid);
- __swab32s(&b->mbo_capability);
- __swab32s(&b->mbo_mode);
- __swab32s(&b->mbo_uid);
- __swab32s(&b->mbo_gid);
- __swab32s(&b->mbo_flags);
- __swab32s(&b->mbo_rdev);
- __swab32s(&b->mbo_nlink);
- BUILD_BUG_ON(offsetof(typeof(*b), mbo_unused2) == 0);
- __swab32s(&b->mbo_suppgid);
- __swab32s(&b->mbo_eadatasize);
- __swab32s(&b->mbo_aclsize);
- __swab32s(&b->mbo_max_mdsize);
- BUILD_BUG_ON(!offsetof(typeof(*b), mbo_unused3));
- __swab32s(&b->mbo_uid_h);
- __swab32s(&b->mbo_gid_h);
- BUILD_BUG_ON(offsetof(typeof(*b), mbo_padding_5) == 0);
-}
-
-void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
-{
- /* handle is opaque */
- /* mio_handle is opaque */
- BUILD_BUG_ON(!offsetof(typeof(*b), mio_unused1));
- BUILD_BUG_ON(!offsetof(typeof(*b), mio_unused2));
- BUILD_BUG_ON(!offsetof(typeof(*b), mio_padding));
-}
-
-void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
-{
- int i;
-
- __swab32s(&mti->mti_lustre_ver);
- __swab32s(&mti->mti_stripe_index);
- __swab32s(&mti->mti_config_ver);
- __swab32s(&mti->mti_flags);
- __swab32s(&mti->mti_instance);
- __swab32s(&mti->mti_nid_count);
- BUILD_BUG_ON(sizeof(lnet_nid_t) != sizeof(__u64));
- for (i = 0; i < MTI_NIDS_MAX; i++)
- __swab64s(&mti->mti_nids[i]);
-}
-
-void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
-{
- __u8 i;
-
- __swab64s(&entry->mne_version);
- __swab32s(&entry->mne_instance);
- __swab32s(&entry->mne_index);
- __swab32s(&entry->mne_length);
-
- /* mne_nid_(count|type) must be one byte size because we're gonna
- * access it w/o swapping. */
- BUILD_BUG_ON(sizeof(entry->mne_nid_count) != sizeof(__u8));
- BUILD_BUG_ON(sizeof(entry->mne_nid_type) != sizeof(__u8));
-
- /* remove this assertion if ipv6 is supported. */
- LASSERT(entry->mne_nid_type == 0);
- for (i = 0; i < entry->mne_nid_count; i++) {
- BUILD_BUG_ON(sizeof(lnet_nid_t) != sizeof(__u64));
- __swab64s(&entry->u.nids[i]);
- }
-}
-EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry);
-
-void lustre_swab_mgs_config_body(struct mgs_config_body *body)
-{
- __swab64s(&body->mcb_offset);
- __swab32s(&body->mcb_units);
- __swab16s(&body->mcb_type);
-}
-
-void lustre_swab_mgs_config_res(struct mgs_config_res *body)
-{
- __swab64s(&body->mcr_offset);
- __swab64s(&body->mcr_size);
-}
-
-static void lustre_swab_obd_dqinfo(struct obd_dqinfo *i)
-{
- __swab64s(&i->dqi_bgrace);
- __swab64s(&i->dqi_igrace);
- __swab32s(&i->dqi_flags);
- __swab32s(&i->dqi_valid);
-}
-
-static void lustre_swab_obd_dqblk(struct obd_dqblk *b)
-{
- __swab64s(&b->dqb_ihardlimit);
- __swab64s(&b->dqb_isoftlimit);
- __swab64s(&b->dqb_curinodes);
- __swab64s(&b->dqb_bhardlimit);
- __swab64s(&b->dqb_bsoftlimit);
- __swab64s(&b->dqb_curspace);
- __swab64s(&b->dqb_btime);
- __swab64s(&b->dqb_itime);
- __swab32s(&b->dqb_valid);
- BUILD_BUG_ON(offsetof(typeof(*b), dqb_padding) == 0);
-}
-
-void lustre_swab_obd_quotactl(struct obd_quotactl *q)
-{
- __swab32s(&q->qc_cmd);
- __swab32s(&q->qc_type);
- __swab32s(&q->qc_id);
- __swab32s(&q->qc_stat);
- lustre_swab_obd_dqinfo(&q->qc_dqinfo);
- lustre_swab_obd_dqblk(&q->qc_dqblk);
-}
-
-void lustre_swab_fid2path(struct getinfo_fid2path *gf)
-{
- lustre_swab_lu_fid(&gf->gf_fid);
- __swab64s(&gf->gf_recno);
- __swab32s(&gf->gf_linkno);
- __swab32s(&gf->gf_pathlen);
-}
-EXPORT_SYMBOL(lustre_swab_fid2path);
-
-static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent)
-{
- __swab64s(&fm_extent->fe_logical);
- __swab64s(&fm_extent->fe_physical);
- __swab64s(&fm_extent->fe_length);
- __swab32s(&fm_extent->fe_flags);
- __swab32s(&fm_extent->fe_device);
-}
-
-void lustre_swab_fiemap(struct fiemap *fiemap)
-{
- __u32 i;
-
- __swab64s(&fiemap->fm_start);
- __swab64s(&fiemap->fm_length);
- __swab32s(&fiemap->fm_flags);
- __swab32s(&fiemap->fm_mapped_extents);
- __swab32s(&fiemap->fm_extent_count);
- __swab32s(&fiemap->fm_reserved);
-
- for (i = 0; i < fiemap->fm_mapped_extents; i++)
- lustre_swab_fiemap_extent(&fiemap->fm_extents[i]);
-}
-
-void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
-{
- __swab32s(&rr->rr_opcode);
- __swab32s(&rr->rr_cap);
- __swab32s(&rr->rr_fsuid);
- /* rr_fsuid_h is unused */
- __swab32s(&rr->rr_fsgid);
- /* rr_fsgid_h is unused */
- __swab32s(&rr->rr_suppgid1);
- /* rr_suppgid1_h is unused */
- __swab32s(&rr->rr_suppgid2);
- /* rr_suppgid2_h is unused */
- lustre_swab_lu_fid(&rr->rr_fid1);
- lustre_swab_lu_fid(&rr->rr_fid2);
- __swab64s(&rr->rr_mtime);
- __swab64s(&rr->rr_atime);
- __swab64s(&rr->rr_ctime);
- __swab64s(&rr->rr_size);
- __swab64s(&rr->rr_blocks);
- __swab32s(&rr->rr_bias);
- __swab32s(&rr->rr_mode);
- __swab32s(&rr->rr_flags);
- __swab32s(&rr->rr_flags_h);
- __swab32s(&rr->rr_umask);
-
- BUILD_BUG_ON(offsetof(typeof(*rr), rr_padding_4) == 0);
-};
-
-void lustre_swab_lov_desc(struct lov_desc *ld)
-{
- __swab32s(&ld->ld_tgt_count);
- __swab32s(&ld->ld_active_tgt_count);
- __swab32s(&ld->ld_default_stripe_count);
- __swab32s(&ld->ld_pattern);
- __swab64s(&ld->ld_default_stripe_size);
- __swab64s(&ld->ld_default_stripe_offset);
- __swab32s(&ld->ld_qos_maxage);
- /* uuid endian insensitive */
-}
-EXPORT_SYMBOL(lustre_swab_lov_desc);
-
-/* This structure is always in little-endian */
-static void lustre_swab_lmv_mds_md_v1(struct lmv_mds_md_v1 *lmm1)
-{
- int i;
-
- __swab32s(&lmm1->lmv_magic);
- __swab32s(&lmm1->lmv_stripe_count);
- __swab32s(&lmm1->lmv_master_mdt_index);
- __swab32s(&lmm1->lmv_hash_type);
- __swab32s(&lmm1->lmv_layout_version);
- for (i = 0; i < lmm1->lmv_stripe_count; i++)
- lustre_swab_lu_fid(&lmm1->lmv_stripe_fids[i]);
-}
-
-void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm)
-{
- switch (lmm->lmv_magic) {
- case LMV_MAGIC_V1:
- lustre_swab_lmv_mds_md_v1(&lmm->lmv_md_v1);
- break;
- default:
- break;
- }
-}
-EXPORT_SYMBOL(lustre_swab_lmv_mds_md);
-
-void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
-{
- __swab32s(&lum->lum_magic);
- __swab32s(&lum->lum_stripe_count);
- __swab32s(&lum->lum_stripe_offset);
- __swab32s(&lum->lum_hash_type);
- __swab32s(&lum->lum_type);
- BUILD_BUG_ON(!offsetof(typeof(*lum), lum_padding1));
-}
-EXPORT_SYMBOL(lustre_swab_lmv_user_md);
-
-static void lustre_swab_lmm_oi(struct ost_id *oi)
-{
- __swab64s(&oi->oi.oi_id);
- __swab64s(&oi->oi.oi_seq);
-}
-
-static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
-{
- __swab32s(&lum->lmm_magic);
- __swab32s(&lum->lmm_pattern);
- lustre_swab_lmm_oi(&lum->lmm_oi);
- __swab32s(&lum->lmm_stripe_size);
- __swab16s(&lum->lmm_stripe_count);
- __swab16s(&lum->lmm_stripe_offset);
-}
-
-void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
-{
- CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n");
- lustre_swab_lov_user_md_common(lum);
-}
-EXPORT_SYMBOL(lustre_swab_lov_user_md_v1);
-
-void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum)
-{
- CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n");
- lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum);
- /* lmm_pool_name nothing to do with char */
-}
-EXPORT_SYMBOL(lustre_swab_lov_user_md_v3);
-
-void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
-{
- CDEBUG(D_IOCTL, "swabbing lov_mds_md\n");
- __swab32s(&lmm->lmm_magic);
- __swab32s(&lmm->lmm_pattern);
- lustre_swab_lmm_oi(&lmm->lmm_oi);
- __swab32s(&lmm->lmm_stripe_size);
- __swab16s(&lmm->lmm_stripe_count);
- __swab16s(&lmm->lmm_layout_gen);
-}
-EXPORT_SYMBOL(lustre_swab_lov_mds_md);
-
-void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
- int stripe_count)
-{
- int i;
-
- for (i = 0; i < stripe_count; i++) {
- lustre_swab_ost_id(&lod[i].l_ost_oi);
- __swab32s(&lod[i].l_ost_gen);
- __swab32s(&lod[i].l_ost_idx);
- }
-}
-EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
-
-static void lustre_swab_ldlm_res_id(struct ldlm_res_id *id)
-{
- int i;
-
- for (i = 0; i < RES_NAME_SIZE; i++)
- __swab64s(&id->name[i]);
-}
-
-static void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
-{
- /* the lock data is a union and the first two fields are always an
- * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
- * data the same way.
- */
- __swab64s(&d->l_extent.start);
- __swab64s(&d->l_extent.end);
- __swab64s(&d->l_extent.gid);
- __swab64s(&d->l_flock.lfw_owner);
- __swab32s(&d->l_flock.lfw_pid);
-}
-
-void lustre_swab_ldlm_intent(struct ldlm_intent *i)
-{
- __swab64s(&i->opc);
-}
-
-static void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
-{
- __swab32s(&r->lr_type);
- BUILD_BUG_ON(offsetof(typeof(*r), lr_padding) == 0);
- lustre_swab_ldlm_res_id(&r->lr_name);
-}
-
-static void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l)
-{
- lustre_swab_ldlm_resource_desc(&l->l_resource);
- __swab32s(&l->l_req_mode);
- __swab32s(&l->l_granted_mode);
- lustre_swab_ldlm_policy_data(&l->l_policy_data);
-}
-
-void lustre_swab_ldlm_request(struct ldlm_request *rq)
-{
- __swab32s(&rq->lock_flags);
- lustre_swab_ldlm_lock_desc(&rq->lock_desc);
- __swab32s(&rq->lock_count);
- /* lock_handle[] opaque */
-}
-
-void lustre_swab_ldlm_reply(struct ldlm_reply *r)
-{
- __swab32s(&r->lock_flags);
- BUILD_BUG_ON(offsetof(typeof(*r), lock_padding) == 0);
- lustre_swab_ldlm_lock_desc(&r->lock_desc);
- /* lock_handle opaque */
- __swab64s(&r->lock_policy_res1);
- __swab64s(&r->lock_policy_res2);
-}
-
-/* Dump functions */
-void dump_ioo(struct obd_ioobj *ioo)
-{
- CDEBUG(D_RPCTRACE,
- "obd_ioobj: ioo_oid=" DOSTID ", ioo_max_brw=%#x, ioo_bufct=%d\n",
- POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
- ioo->ioo_bufcnt);
-}
-
-void dump_rniobuf(struct niobuf_remote *nb)
-{
- CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n",
- nb->rnb_offset, nb->rnb_len, nb->rnb_flags);
-}
-
-static void dump_obdo(struct obdo *oa)
-{
- __u32 valid = oa->o_valid;
-
- CDEBUG(D_RPCTRACE, "obdo: o_valid = %08x\n", valid);
- if (valid & OBD_MD_FLID)
- CDEBUG(D_RPCTRACE, "obdo: id = " DOSTID "\n", POSTID(&oa->o_oi));
- if (valid & OBD_MD_FLFID)
- CDEBUG(D_RPCTRACE, "obdo: o_parent_seq = %#llx\n",
- oa->o_parent_seq);
- if (valid & OBD_MD_FLSIZE)
- CDEBUG(D_RPCTRACE, "obdo: o_size = %lld\n", oa->o_size);
- if (valid & OBD_MD_FLMTIME)
- CDEBUG(D_RPCTRACE, "obdo: o_mtime = %lld\n", oa->o_mtime);
- if (valid & OBD_MD_FLATIME)
- CDEBUG(D_RPCTRACE, "obdo: o_atime = %lld\n", oa->o_atime);
- if (valid & OBD_MD_FLCTIME)
- CDEBUG(D_RPCTRACE, "obdo: o_ctime = %lld\n", oa->o_ctime);
- if (valid & OBD_MD_FLBLOCKS) /* allocation of space */
- CDEBUG(D_RPCTRACE, "obdo: o_blocks = %lld\n", oa->o_blocks);
- if (valid & OBD_MD_FLGRANT)
- CDEBUG(D_RPCTRACE, "obdo: o_grant = %lld\n", oa->o_grant);
- if (valid & OBD_MD_FLBLKSZ)
- CDEBUG(D_RPCTRACE, "obdo: o_blksize = %d\n", oa->o_blksize);
- if (valid & (OBD_MD_FLTYPE | OBD_MD_FLMODE))
- CDEBUG(D_RPCTRACE, "obdo: o_mode = %o\n",
- oa->o_mode & ((valid & OBD_MD_FLTYPE ? S_IFMT : 0) |
- (valid & OBD_MD_FLMODE ? ~S_IFMT : 0)));
- if (valid & OBD_MD_FLUID)
- CDEBUG(D_RPCTRACE, "obdo: o_uid = %u\n", oa->o_uid);
- if (valid & OBD_MD_FLUID)
- CDEBUG(D_RPCTRACE, "obdo: o_uid_h = %u\n", oa->o_uid_h);
- if (valid & OBD_MD_FLGID)
- CDEBUG(D_RPCTRACE, "obdo: o_gid = %u\n", oa->o_gid);
- if (valid & OBD_MD_FLGID)
- CDEBUG(D_RPCTRACE, "obdo: o_gid_h = %u\n", oa->o_gid_h);
- if (valid & OBD_MD_FLFLAGS)
- CDEBUG(D_RPCTRACE, "obdo: o_flags = %x\n", oa->o_flags);
- if (valid & OBD_MD_FLNLINK)
- CDEBUG(D_RPCTRACE, "obdo: o_nlink = %u\n", oa->o_nlink);
- else if (valid & OBD_MD_FLCKSUM)
- CDEBUG(D_RPCTRACE, "obdo: o_checksum (o_nlink) = %u\n",
- oa->o_nlink);
- if (valid & OBD_MD_FLGENER)
- CDEBUG(D_RPCTRACE, "obdo: o_parent_oid = %x\n",
- oa->o_parent_oid);
- if (valid & OBD_MD_FLEPOCH)
- CDEBUG(D_RPCTRACE, "obdo: o_ioepoch = %lld\n",
- oa->o_ioepoch);
- if (valid & OBD_MD_FLFID) {
- CDEBUG(D_RPCTRACE, "obdo: o_stripe_idx = %u\n",
- oa->o_stripe_idx);
- CDEBUG(D_RPCTRACE, "obdo: o_parent_ver = %x\n",
- oa->o_parent_ver);
- }
- if (valid & OBD_MD_FLHANDLE)
- CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
- oa->o_handle.cookie);
-}
-
-void dump_ost_body(struct ost_body *ob)
-{
- dump_obdo(&ob->oa);
-}
-
-void dump_rcs(__u32 *rc)
-{
- CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc);
-}
-
-static inline int req_ptlrpc_body_swabbed(struct ptlrpc_request *req)
-{
- LASSERT(req->rq_reqmsg);
-
- switch (req->rq_reqmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_req_swabbed(req, MSG_PTLRPC_BODY_OFF);
- default:
- CERROR("bad lustre msg magic: %#08X\n",
- req->rq_reqmsg->lm_magic);
- }
- return 0;
-}
-
-static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req)
-{
- LASSERT(req->rq_repmsg);
-
- switch (req->rq_repmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_rep_swabbed(req, MSG_PTLRPC_BODY_OFF);
- default:
- /* uninitialized yet */
- return 0;
- }
-}
-
-void _debug_req(struct ptlrpc_request *req,
- struct libcfs_debug_msg_data *msgdata,
- const char *fmt, ...)
-{
- int req_ok = req->rq_reqmsg != NULL;
- int rep_ok = req->rq_repmsg != NULL;
- lnet_nid_t nid = LNET_NID_ANY;
- va_list args;
-
- if (ptlrpc_req_need_swab(req)) {
- req_ok = req_ok && req_ptlrpc_body_swabbed(req);
- rep_ok = rep_ok && rep_ptlrpc_body_swabbed(req);
- }
-
- if (req->rq_import && req->rq_import->imp_connection)
- nid = req->rq_import->imp_connection->c_peer.nid;
- else if (req->rq_export && req->rq_export->exp_connection)
- nid = req->rq_export->exp_connection->c_peer.nid;
-
- va_start(args, fmt);
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
- req, req->rq_xid, req->rq_transno,
- req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
- req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
- req->rq_import ?
- req->rq_import->imp_obd->obd_name :
- req->rq_export ?
- req->rq_export->exp_client_uuid.uuid :
- "<?>",
- libcfs_nid2str(nid),
- req->rq_request_portal, req->rq_reply_portal,
- req->rq_reqlen, req->rq_replen,
- req->rq_early_count, (s64)req->rq_timedout,
- (s64)req->rq_deadline,
- atomic_read(&req->rq_refcount),
- DEBUG_REQ_FLAGS(req),
- req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
- rep_ok ? lustre_msg_get_flags(req->rq_repmsg) : -1,
- req->rq_status,
- rep_ok ? lustre_msg_get_status(req->rq_repmsg) : -1);
- va_end(args);
-}
-EXPORT_SYMBOL(_debug_req);
-
-void lustre_swab_lustre_capa(struct lustre_capa *c)
-{
- lustre_swab_lu_fid(&c->lc_fid);
- __swab64s(&c->lc_opc);
- __swab64s(&c->lc_uid);
- __swab64s(&c->lc_gid);
- __swab32s(&c->lc_flags);
- __swab32s(&c->lc_keyid);
- __swab32s(&c->lc_timeout);
- __swab32s(&c->lc_expiry);
-}
-
-void lustre_swab_hsm_user_state(struct hsm_user_state *state)
-{
- __swab32s(&state->hus_states);
- __swab32s(&state->hus_archive_id);
-}
-
-void lustre_swab_hsm_state_set(struct hsm_state_set *hss)
-{
- __swab32s(&hss->hss_valid);
- __swab64s(&hss->hss_setmask);
- __swab64s(&hss->hss_clearmask);
- __swab32s(&hss->hss_archive_id);
-}
-EXPORT_SYMBOL(lustre_swab_hsm_state_set);
-
-static void lustre_swab_hsm_extent(struct hsm_extent *extent)
-{
- __swab64s(&extent->offset);
- __swab64s(&extent->length);
-}
-
-void lustre_swab_hsm_current_action(struct hsm_current_action *action)
-{
- __swab32s(&action->hca_state);
- __swab32s(&action->hca_action);
- lustre_swab_hsm_extent(&action->hca_location);
-}
-
-void lustre_swab_hsm_user_item(struct hsm_user_item *hui)
-{
- lustre_swab_lu_fid(&hui->hui_fid);
- lustre_swab_hsm_extent(&hui->hui_extent);
-}
-
-void lustre_swab_layout_intent(struct layout_intent *li)
-{
- __swab32s(&li->li_opc);
- __swab32s(&li->li_flags);
- __swab64s(&li->li_start);
- __swab64s(&li->li_end);
-}
-
-void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
-{
- lustre_swab_lu_fid(&hpk->hpk_fid);
- __swab64s(&hpk->hpk_cookie);
- __swab64s(&hpk->hpk_extent.offset);
- __swab64s(&hpk->hpk_extent.length);
- __swab16s(&hpk->hpk_flags);
- __swab16s(&hpk->hpk_errval);
-}
-
-void lustre_swab_hsm_request(struct hsm_request *hr)
-{
- __swab32s(&hr->hr_action);
- __swab32s(&hr->hr_archive_id);
- __swab64s(&hr->hr_flags);
- __swab32s(&hr->hr_itemcount);
- __swab32s(&hr->hr_data_len);
-}
-
-void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
-{
- __swab64s(&msl->msl_flags);
-}
-EXPORT_SYMBOL(lustre_swab_swap_layouts);
-
-void lustre_swab_close_data(struct close_data *cd)
-{
- lustre_swab_lu_fid(&cd->cd_fid);
- __swab64s(&cd->cd_data_version);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
deleted file mode 100644
index 2466868afb9c..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2014, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_lib.h>
-#include <lustre_ha.h>
-#include <lustre_import.h>
-
-#include "ptlrpc_internal.h"
-
-void ptlrpc_fill_bulk_md(struct lnet_md *md, struct ptlrpc_bulk_desc *desc,
- int mdidx)
-{
- int offset = mdidx * LNET_MAX_IOV;
-
- BUILD_BUG_ON(PTLRPC_MAX_BRW_PAGES >= LI_POISON);
-
- LASSERT(mdidx < desc->bd_md_max_brw);
- LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
- LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV |
- LNET_MD_PHYS)));
-
- md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV);
- md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
-
- if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) {
- md->options |= LNET_MD_KIOV;
- if (GET_ENC_KIOV(desc))
- md->start = &BD_GET_ENC_KIOV(desc, offset);
- else
- md->start = &BD_GET_KIOV(desc, offset);
- } else {
- md->options |= LNET_MD_IOVEC;
- if (GET_ENC_KVEC(desc))
- md->start = &BD_GET_ENC_KVEC(desc, offset);
- else
- md->start = &BD_GET_KVEC(desc, offset);
- }
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
deleted file mode 100644
index 0775b7a048bb..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ /dev/null
@@ -1,477 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/pinger.c
- *
- * Portal-RPC reconnection and replay operations, for use in recovery.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <obd_support.h>
-#include <obd_class.h>
-#include "ptlrpc_internal.h"
-
-struct mutex pinger_mutex;
-static LIST_HEAD(pinger_imports);
-static struct list_head timeout_list = LIST_HEAD_INIT(timeout_list);
-
-struct ptlrpc_request *
-ptlrpc_prep_ping(struct obd_import *imp)
-{
- struct ptlrpc_request *req;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING,
- LUSTRE_OBD_VERSION, OBD_PING);
- if (req) {
- ptlrpc_request_set_replen(req);
- req->rq_no_resend = 1;
- req->rq_no_delay = 1;
- }
- return req;
-}
-
-int ptlrpc_obd_ping(struct obd_device *obd)
-{
- int rc;
- struct ptlrpc_request *req;
-
- req = ptlrpc_prep_ping(obd->u.cli.cl_import);
- if (!req)
- return -ENOMEM;
-
- req->rq_send_state = LUSTRE_IMP_FULL;
-
- rc = ptlrpc_queue_wait(req);
-
- ptlrpc_req_finished(req);
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_obd_ping);
-
-static int ptlrpc_ping(struct obd_import *imp)
-{
- struct ptlrpc_request *req;
-
- req = ptlrpc_prep_ping(imp);
- if (!req) {
- CERROR("OOM trying to ping %s->%s\n",
- imp->imp_obd->obd_uuid.uuid,
- obd2cli_tgt(imp->imp_obd));
- return -ENOMEM;
- }
-
- DEBUG_REQ(D_INFO, req, "pinging %s->%s",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
- ptlrpcd_add_req(req);
-
- return 0;
-}
-
-static void ptlrpc_update_next_ping(struct obd_import *imp, int soon)
-{
- int time = soon ? PING_INTERVAL_SHORT : PING_INTERVAL;
-
- if (imp->imp_state == LUSTRE_IMP_DISCON) {
- int dtime = max_t(int, CONNECTION_SWITCH_MIN,
- AT_OFF ? 0 :
- at_get(&imp->imp_at.iat_net_latency));
- time = min(time, dtime);
- }
- imp->imp_next_ping = cfs_time_shift(time);
-}
-
-static inline int imp_is_deactive(struct obd_import *imp)
-{
- return (imp->imp_deactive ||
- OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_IMP_DEACTIVE));
-}
-
-static inline int ptlrpc_next_reconnect(struct obd_import *imp)
-{
- if (imp->imp_server_timeout)
- return cfs_time_shift(obd_timeout / 2);
- else
- return cfs_time_shift(obd_timeout);
-}
-
-static long pinger_check_timeout(unsigned long time)
-{
- struct timeout_item *item;
- unsigned long timeout = PING_INTERVAL;
-
- /* The timeout list is a increase order sorted list */
- mutex_lock(&pinger_mutex);
- list_for_each_entry(item, &timeout_list, ti_chain) {
- int ti_timeout = item->ti_timeout;
-
- if (timeout > ti_timeout)
- timeout = ti_timeout;
- break;
- }
- mutex_unlock(&pinger_mutex);
-
- return cfs_time_sub(cfs_time_add(time, timeout * HZ),
- cfs_time_current());
-}
-
-static bool ir_up;
-
-void ptlrpc_pinger_ir_up(void)
-{
- CDEBUG(D_HA, "IR up\n");
- ir_up = true;
-}
-EXPORT_SYMBOL(ptlrpc_pinger_ir_up);
-
-void ptlrpc_pinger_ir_down(void)
-{
- CDEBUG(D_HA, "IR down\n");
- ir_up = false;
-}
-EXPORT_SYMBOL(ptlrpc_pinger_ir_down);
-
-static void ptlrpc_pinger_process_import(struct obd_import *imp,
- unsigned long this_ping)
-{
- int level;
- int force;
- int force_next;
- int suppress;
-
- spin_lock(&imp->imp_lock);
-
- level = imp->imp_state;
- force = imp->imp_force_verify;
- force_next = imp->imp_force_next_verify;
- /*
- * This will be used below only if the import is "FULL".
- */
- suppress = ir_up && OCD_HAS_FLAG(&imp->imp_connect_data, PINGLESS);
-
- imp->imp_force_verify = 0;
-
- if (cfs_time_aftereq(imp->imp_next_ping - 5 * CFS_TICK, this_ping) &&
- !force) {
- spin_unlock(&imp->imp_lock);
- return;
- }
-
- imp->imp_force_next_verify = 0;
-
- spin_unlock(&imp->imp_lock);
-
- CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA, "%s->%s: level %s/%u force %u force_next %u deactive %u pingable %u suppress %u\n",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd),
- ptlrpc_import_state_name(level), level, force, force_next,
- imp->imp_deactive, imp->imp_pingable, suppress);
-
- if (level == LUSTRE_IMP_DISCON && !imp_is_deactive(imp)) {
- /* wait for a while before trying recovery again */
- imp->imp_next_ping = ptlrpc_next_reconnect(imp);
- if (!imp->imp_no_pinger_recover)
- ptlrpc_initiate_recovery(imp);
- } else if (level != LUSTRE_IMP_FULL ||
- imp->imp_obd->obd_no_recov ||
- imp_is_deactive(imp)) {
- CDEBUG(D_HA, "%s->%s: not pinging (in recovery or recovery disabled: %s)\n",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd),
- ptlrpc_import_state_name(level));
- if (force) {
- spin_lock(&imp->imp_lock);
- imp->imp_force_verify = 1;
- spin_unlock(&imp->imp_lock);
- }
- } else if ((imp->imp_pingable && !suppress) || force_next || force) {
- ptlrpc_ping(imp);
- }
-}
-
-static struct workqueue_struct *pinger_wq;
-static void ptlrpc_pinger_main(struct work_struct *ws);
-static DECLARE_DELAYED_WORK(ping_work, ptlrpc_pinger_main);
-
-static void ptlrpc_pinger_main(struct work_struct *ws)
-{
- unsigned long this_ping = cfs_time_current();
- long time_to_next_wake;
- struct timeout_item *item;
- struct obd_import *imp;
-
- do {
- mutex_lock(&pinger_mutex);
- list_for_each_entry(item, &timeout_list, ti_chain) {
- item->ti_cb(item, item->ti_cb_data);
- }
- list_for_each_entry(imp, &pinger_imports, imp_pinger_chain) {
- ptlrpc_pinger_process_import(imp, this_ping);
- /* obd_timeout might have changed */
- if (imp->imp_pingable && imp->imp_next_ping &&
- cfs_time_after(imp->imp_next_ping,
- cfs_time_add(this_ping,
- PING_INTERVAL * HZ)))
- ptlrpc_update_next_ping(imp, 0);
- }
- mutex_unlock(&pinger_mutex);
-
- /* Wait until the next ping time, or until we're stopped. */
- time_to_next_wake = pinger_check_timeout(this_ping);
- /* The ping sent by ptlrpc_send_rpc may get sent out
- * say .01 second after this.
- * ptlrpc_pinger_sending_on_import will then set the
- * next ping time to next_ping + .01 sec, which means
- * we will SKIP the next ping at next_ping, and the
- * ping will get sent 2 timeouts from now! Beware.
- */
- CDEBUG(D_INFO, "next wakeup in " CFS_DURATION_T " (%ld)\n",
- time_to_next_wake,
- cfs_time_add(this_ping,
- PING_INTERVAL * HZ));
- } while (time_to_next_wake <= 0);
-
- queue_delayed_work(pinger_wq, &ping_work,
- round_jiffies_up_relative(time_to_next_wake));
-}
-
-int ptlrpc_start_pinger(void)
-{
- if (pinger_wq)
- return -EALREADY;
-
- pinger_wq = alloc_workqueue("ptlrpc_pinger", WQ_MEM_RECLAIM, 1);
- if (!pinger_wq) {
- CERROR("cannot start pinger workqueue\n");
- return -ENOMEM;
- }
-
- queue_delayed_work(pinger_wq, &ping_work, 0);
- return 0;
-}
-
-static int ptlrpc_pinger_remove_timeouts(void);
-
-int ptlrpc_stop_pinger(void)
-{
- int rc = 0;
-
- if (!pinger_wq)
- return -EALREADY;
-
- ptlrpc_pinger_remove_timeouts();
- cancel_delayed_work_sync(&ping_work);
- destroy_workqueue(pinger_wq);
- pinger_wq = NULL;
-
- return rc;
-}
-
-void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
-{
- ptlrpc_update_next_ping(imp, 0);
-}
-
-void ptlrpc_pinger_commit_expected(struct obd_import *imp)
-{
- ptlrpc_update_next_ping(imp, 1);
- assert_spin_locked(&imp->imp_lock);
- /*
- * Avoid reading stale imp_connect_data. When not sure if pings are
- * expected or not on next connection, we assume they are not and force
- * one anyway to guarantee the chance of updating
- * imp_peer_committed_transno.
- */
- if (imp->imp_state != LUSTRE_IMP_FULL ||
- OCD_HAS_FLAG(&imp->imp_connect_data, PINGLESS))
- imp->imp_force_next_verify = 1;
-}
-
-int ptlrpc_pinger_add_import(struct obd_import *imp)
-{
- if (!list_empty(&imp->imp_pinger_chain))
- return -EALREADY;
-
- mutex_lock(&pinger_mutex);
- CDEBUG(D_HA, "adding pingable import %s->%s\n",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
- /* if we add to pinger we want recovery on this import */
- imp->imp_obd->obd_no_recov = 0;
- ptlrpc_update_next_ping(imp, 0);
- /* XXX sort, blah blah */
- list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
- class_import_get(imp);
-
- ptlrpc_pinger_wake_up();
- mutex_unlock(&pinger_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_pinger_add_import);
-
-int ptlrpc_pinger_del_import(struct obd_import *imp)
-{
- if (list_empty(&imp->imp_pinger_chain))
- return -ENOENT;
-
- mutex_lock(&pinger_mutex);
- list_del_init(&imp->imp_pinger_chain);
- CDEBUG(D_HA, "removing pingable import %s->%s\n",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
- /* if we remove from pinger we don't want recovery on this import */
- imp->imp_obd->obd_no_recov = 1;
- class_import_put(imp);
- mutex_unlock(&pinger_mutex);
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_pinger_del_import);
-
-/**
- * Register a timeout callback to the pinger list, and the callback will
- * be called when timeout happens.
- */
-static struct timeout_item *ptlrpc_new_timeout(int time,
- enum timeout_event event,
- timeout_cb_t cb, void *data)
-{
- struct timeout_item *ti;
-
- ti = kzalloc(sizeof(*ti), GFP_NOFS);
- if (!ti)
- return NULL;
-
- INIT_LIST_HEAD(&ti->ti_obd_list);
- INIT_LIST_HEAD(&ti->ti_chain);
- ti->ti_timeout = time;
- ti->ti_event = event;
- ti->ti_cb = cb;
- ti->ti_cb_data = data;
-
- return ti;
-}
-
-/**
- * Register timeout event on the pinger thread.
- * Note: the timeout list is an sorted list with increased timeout value.
- */
-static struct timeout_item*
-ptlrpc_pinger_register_timeout(int time, enum timeout_event event,
- timeout_cb_t cb, void *data)
-{
- struct timeout_item *item, *tmp;
-
- LASSERT(mutex_is_locked(&pinger_mutex));
-
- list_for_each_entry(item, &timeout_list, ti_chain)
- if (item->ti_event == event)
- goto out;
-
- item = ptlrpc_new_timeout(time, event, cb, data);
- if (item) {
- list_for_each_entry_reverse(tmp, &timeout_list, ti_chain) {
- if (tmp->ti_timeout < time) {
- list_add(&item->ti_chain, &tmp->ti_chain);
- goto out;
- }
- }
- list_add(&item->ti_chain, &timeout_list);
- }
-out:
- return item;
-}
-
-/* Add a client_obd to the timeout event list, when timeout(@time)
- * happens, the callback(@cb) will be called.
- */
-int ptlrpc_add_timeout_client(int time, enum timeout_event event,
- timeout_cb_t cb, void *data,
- struct list_head *obd_list)
-{
- struct timeout_item *ti;
-
- mutex_lock(&pinger_mutex);
- ti = ptlrpc_pinger_register_timeout(time, event, cb, data);
- if (!ti) {
- mutex_unlock(&pinger_mutex);
- return -EINVAL;
- }
- list_add(obd_list, &ti->ti_obd_list);
- mutex_unlock(&pinger_mutex);
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_add_timeout_client);
-
-int ptlrpc_del_timeout_client(struct list_head *obd_list,
- enum timeout_event event)
-{
- struct timeout_item *ti = NULL, *item;
-
- if (list_empty(obd_list))
- return 0;
- mutex_lock(&pinger_mutex);
- list_del_init(obd_list);
- /**
- * If there are no obd attached to the timeout event
- * list, remove this timeout event from the pinger
- */
- list_for_each_entry(item, &timeout_list, ti_chain) {
- if (item->ti_event == event) {
- ti = item;
- break;
- }
- }
- if (list_empty(&ti->ti_obd_list)) {
- list_del(&ti->ti_chain);
- kfree(ti);
- }
- mutex_unlock(&pinger_mutex);
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_del_timeout_client);
-
-static int ptlrpc_pinger_remove_timeouts(void)
-{
- struct timeout_item *item, *tmp;
-
- mutex_lock(&pinger_mutex);
- list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
- LASSERT(list_empty(&item->ti_obd_list));
- list_del(&item->ti_chain);
- kfree(item);
- }
- mutex_unlock(&pinger_mutex);
- return 0;
-}
-
-void ptlrpc_pinger_wake_up(void)
-{
- mod_delayed_work(pinger_wq, &ping_work, 0);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
deleted file mode 100644
index b7a8d7537a66..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ /dev/null
@@ -1,371 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/* Intramodule declarations for ptlrpc. */
-
-#ifndef PTLRPC_INTERNAL_H
-#define PTLRPC_INTERNAL_H
-
-#include "../ldlm/ldlm_internal.h"
-
-struct ldlm_namespace;
-struct obd_import;
-struct ldlm_res_id;
-struct ptlrpc_request_set;
-extern int test_req_buffer_pressure;
-extern struct mutex ptlrpc_all_services_mutex;
-extern struct list_head ptlrpc_all_services;
-
-extern struct mutex ptlrpcd_mutex;
-extern struct mutex pinger_mutex;
-
-int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait);
-/* ptlrpcd.c */
-int ptlrpcd_start(struct ptlrpcd_ctl *pc);
-
-/* client.c */
-void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
- unsigned int service_time);
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
- unsigned int max_brw,
- enum ptlrpc_bulk_op_type type,
- unsigned int portal,
- const struct ptlrpc_bulk_frag_ops *ops);
-int ptlrpc_request_cache_init(void);
-void ptlrpc_request_cache_fini(void);
-struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags);
-void ptlrpc_request_cache_free(struct ptlrpc_request *req);
-void ptlrpc_init_xid(void);
-void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
- struct ptlrpc_request *req);
-void ptlrpc_expired_set(struct ptlrpc_request_set *set);
-int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set);
-void ptlrpc_resend_req(struct ptlrpc_request *request);
-void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req);
-void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req);
-__u64 ptlrpc_known_replied_xid(struct obd_import *imp);
-void ptlrpc_add_unreplied(struct ptlrpc_request *req);
-
-/* events.c */
-int ptlrpc_init_portals(void);
-void ptlrpc_exit_portals(void);
-
-void ptlrpc_request_handle_notconn(struct ptlrpc_request *req);
-void lustre_assert_wire_constants(void);
-int ptlrpc_import_in_recovery(struct obd_import *imp);
-int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt);
-int ptlrpc_replay_next(struct obd_import *imp, int *inflight);
-void ptlrpc_initiate_recovery(struct obd_import *imp);
-
-int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset);
-int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset);
-
-int ptlrpc_sysfs_register_service(struct kset *parent,
- struct ptlrpc_service *svc);
-void ptlrpc_sysfs_unregister_service(struct ptlrpc_service *svc);
-
-void ptlrpc_ldebugfs_register_service(struct dentry *debugfs_entry,
- struct ptlrpc_service *svc);
-void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc);
-void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount);
-
-/* NRS */
-
-/**
- * NRS core object.
- *
- * Holds NRS core fields.
- */
-struct nrs_core {
- /**
- * Protects nrs_core::nrs_policies, serializes external policy
- * registration/unregistration, and NRS core lprocfs operations.
- */
- struct mutex nrs_mutex;
- /**
- * List of all policy descriptors registered with NRS core; protected
- * by nrs_core::nrs_mutex.
- */
- struct list_head nrs_policies;
-
-};
-
-extern struct nrs_core nrs_core;
-
-int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc);
-void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc);
-
-void ptlrpc_nrs_req_initialize(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req, bool hp);
-void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req);
-void ptlrpc_nrs_req_stop_nolock(struct ptlrpc_request *req);
-void ptlrpc_nrs_req_add(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req, bool hp);
-
-struct ptlrpc_request *
-ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp,
- bool peek, bool force);
-
-static inline struct ptlrpc_request *
-ptlrpc_nrs_req_get_nolock(struct ptlrpc_service_part *svcpt, bool hp,
- bool force)
-{
- return ptlrpc_nrs_req_get_nolock0(svcpt, hp, false, force);
-}
-
-bool ptlrpc_nrs_req_pending_nolock(struct ptlrpc_service_part *svcpt, bool hp);
-
-int ptlrpc_nrs_policy_control(const struct ptlrpc_service *svc,
- enum ptlrpc_nrs_queue_type queue, char *name,
- enum ptlrpc_nrs_ctl opc, bool single, void *arg);
-
-int ptlrpc_nrs_init(void);
-void ptlrpc_nrs_fini(void);
-
-static inline bool nrs_svcpt_has_hp(const struct ptlrpc_service_part *svcpt)
-{
- return svcpt->scp_nrs_hp != NULL;
-}
-
-static inline bool nrs_svc_has_hp(const struct ptlrpc_service *svc)
-{
- /**
- * If the first service partition has an HP NRS head, all service
- * partitions will.
- */
- return nrs_svcpt_has_hp(svc->srv_parts[0]);
-}
-
-static inline
-struct ptlrpc_nrs *nrs_svcpt2nrs(struct ptlrpc_service_part *svcpt, bool hp)
-{
- LASSERT(ergo(hp, nrs_svcpt_has_hp(svcpt)));
- return hp ? svcpt->scp_nrs_hp : &svcpt->scp_nrs_reg;
-}
-
-static inline int nrs_pol2cptid(const struct ptlrpc_nrs_policy *policy)
-{
- return policy->pol_nrs->nrs_svcpt->scp_cpt;
-}
-
-static inline
-struct ptlrpc_service *nrs_pol2svc(struct ptlrpc_nrs_policy *policy)
-{
- return policy->pol_nrs->nrs_svcpt->scp_service;
-}
-
-static inline
-struct ptlrpc_service_part *nrs_pol2svcpt(struct ptlrpc_nrs_policy *policy)
-{
- return policy->pol_nrs->nrs_svcpt;
-}
-
-static inline
-struct cfs_cpt_table *nrs_pol2cptab(struct ptlrpc_nrs_policy *policy)
-{
- return nrs_pol2svc(policy)->srv_cptable;
-}
-
-static inline struct ptlrpc_nrs_resource *
-nrs_request_resource(struct ptlrpc_nrs_request *nrq)
-{
- LASSERT(nrq->nr_initialized);
- LASSERT(!nrq->nr_finalized);
-
- return nrq->nr_res_ptrs[nrq->nr_res_idx];
-}
-
-static inline
-struct ptlrpc_nrs_policy *nrs_request_policy(struct ptlrpc_nrs_request *nrq)
-{
- return nrs_request_resource(nrq)->res_policy;
-}
-
-#define NRS_LPROCFS_QUANTUM_NAME_REG "reg_quantum:"
-#define NRS_LPROCFS_QUANTUM_NAME_HP "hp_quantum:"
-
-/**
- * the maximum size of nrs_crrn_client::cc_quantum and nrs_orr_data::od_quantum.
- */
-#define LPROCFS_NRS_QUANTUM_MAX 65535
-
-/**
- * Max valid command string is the size of the labels, plus "65535" twice, plus
- * a separating space character.
- */
-#define LPROCFS_NRS_WR_QUANTUM_MAX_CMD \
- sizeof(NRS_LPROCFS_QUANTUM_NAME_REG __stringify(LPROCFS_NRS_QUANTUM_MAX) " " \
- NRS_LPROCFS_QUANTUM_NAME_HP __stringify(LPROCFS_NRS_QUANTUM_MAX))
-
-/* ptlrpc/nrs_fifo.c */
-extern struct ptlrpc_nrs_pol_conf nrs_conf_fifo;
-
-/* recovd_thread.c */
-
-int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink);
-
-/* pers.c */
-void ptlrpc_fill_bulk_md(struct lnet_md *md, struct ptlrpc_bulk_desc *desc,
- int mdcnt);
-
-/* pack_generic.c */
-struct ptlrpc_reply_state *
-lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt);
-void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs);
-
-/* pinger.c */
-int ptlrpc_start_pinger(void);
-int ptlrpc_stop_pinger(void);
-void ptlrpc_pinger_sending_on_import(struct obd_import *imp);
-void ptlrpc_pinger_commit_expected(struct obd_import *imp);
-void ptlrpc_pinger_wake_up(void);
-
-/* sec_null.c */
-int sptlrpc_null_init(void);
-void sptlrpc_null_fini(void);
-
-/* sec_plain.c */
-int sptlrpc_plain_init(void);
-void sptlrpc_plain_fini(void);
-
-/* sec_bulk.c */
-int sptlrpc_enc_pool_init(void);
-void sptlrpc_enc_pool_fini(void);
-int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v);
-
-/* sec_lproc.c */
-int sptlrpc_lproc_init(void);
-void sptlrpc_lproc_fini(void);
-
-/* sec_gc.c */
-int sptlrpc_gc_init(void);
-void sptlrpc_gc_fini(void);
-
-/* sec_config.c */
-void sptlrpc_conf_choose_flavor(enum lustre_sec_part from,
- enum lustre_sec_part to,
- struct obd_uuid *target,
- lnet_nid_t nid,
- struct sptlrpc_flavor *sf);
-int sptlrpc_conf_init(void);
-void sptlrpc_conf_fini(void);
-
-/* sec.c */
-int sptlrpc_init(void);
-void sptlrpc_fini(void);
-
-static inline bool ptlrpc_recoverable_error(int rc)
-{
- return (rc == -ENOTCONN || rc == -ENODEV);
-}
-
-static inline int tgt_mod_init(void)
-{
- return 0;
-}
-
-static inline void tgt_mod_exit(void)
-{
- return;
-}
-
-static inline void ptlrpc_reqset_put(struct ptlrpc_request_set *set)
-{
- if (atomic_dec_and_test(&set->set_refcount))
- kfree(set);
-}
-
-/** initialise ptlrpc common fields */
-static inline void ptlrpc_req_comm_init(struct ptlrpc_request *req)
-{
- spin_lock_init(&req->rq_lock);
- atomic_set(&req->rq_refcount, 1);
- INIT_LIST_HEAD(&req->rq_list);
- INIT_LIST_HEAD(&req->rq_replay_list);
-}
-
-/** initialise client side ptlrpc request */
-static inline void ptlrpc_cli_req_init(struct ptlrpc_request *req)
-{
- struct ptlrpc_cli_req *cr = &req->rq_cli;
-
- ptlrpc_req_comm_init(req);
-
- req->rq_receiving_reply = 0;
- req->rq_req_unlinked = 1;
- req->rq_reply_unlinked = 1;
-
- req->rq_receiving_reply = 0;
- req->rq_req_unlinked = 1;
- req->rq_reply_unlinked = 1;
-
- INIT_LIST_HEAD(&cr->cr_set_chain);
- INIT_LIST_HEAD(&cr->cr_ctx_chain);
- INIT_LIST_HEAD(&cr->cr_unreplied_list);
- init_waitqueue_head(&cr->cr_reply_waitq);
- init_waitqueue_head(&cr->cr_set_waitq);
-}
-
-/** initialise server side ptlrpc request */
-static inline void ptlrpc_srv_req_init(struct ptlrpc_request *req)
-{
- struct ptlrpc_srv_req *sr = &req->rq_srv;
-
- ptlrpc_req_comm_init(req);
- req->rq_srv_req = 1;
- INIT_LIST_HEAD(&sr->sr_exp_list);
- INIT_LIST_HEAD(&sr->sr_timed_list);
- INIT_LIST_HEAD(&sr->sr_hist_list);
-}
-
-static inline bool ptlrpc_req_is_connect(struct ptlrpc_request *req)
-{
- if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CONNECT ||
- lustre_msg_get_opc(req->rq_reqmsg) == OST_CONNECT ||
- lustre_msg_get_opc(req->rq_reqmsg) == MGS_CONNECT)
- return true;
- else
- return false;
-}
-
-static inline bool ptlrpc_req_is_disconnect(struct ptlrpc_request *req)
-{
- if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_DISCONNECT ||
- lustre_msg_get_opc(req->rq_reqmsg) == OST_DISCONNECT ||
- lustre_msg_get_opc(req->rq_reqmsg) == MGS_DISCONNECT)
- return true;
- else
- return false;
-}
-
-#endif /* PTLRPC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
deleted file mode 100644
index 38923418669f..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_net.h>
-#include <lustre_req_layout.h>
-
-#include "ptlrpc_internal.h"
-
-extern spinlock_t ptlrpc_last_xid_lock;
-#if RS_DEBUG
-extern spinlock_t ptlrpc_rs_debug_lock;
-#endif
-
-DEFINE_MUTEX(ptlrpc_startup);
-static int ptlrpc_active = 0;
-
-int ptlrpc_inc_ref(void)
-{
- int rc = 0;
-
- mutex_lock(&ptlrpc_startup);
- if (ptlrpc_active++ == 0) {
- ptlrpc_put_connection_superhack = ptlrpc_connection_put;
-
- rc = ptlrpc_init_portals();
- if (!rc) {
- rc= ptlrpc_start_pinger();
- if (rc)
- ptlrpc_exit_portals();
- }
- if (rc)
- ptlrpc_active--;
- }
- mutex_unlock(&ptlrpc_startup);
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_inc_ref);
-
-void ptlrpc_dec_ref(void)
-{
- mutex_lock(&ptlrpc_startup);
- if (--ptlrpc_active == 0) {
- ptlrpc_stop_pinger();
- ptlrpc_exit_portals();
- }
- mutex_unlock(&ptlrpc_startup);
-}
-EXPORT_SYMBOL(ptlrpc_dec_ref);
-
-static int __init ptlrpc_init(void)
-{
- int rc, cleanup_phase = 0;
-
- lustre_assert_wire_constants();
-#if RS_DEBUG
- spin_lock_init(&ptlrpc_rs_debug_lock);
-#endif
- mutex_init(&ptlrpc_all_services_mutex);
- mutex_init(&pinger_mutex);
- mutex_init(&ptlrpcd_mutex);
- ptlrpc_init_xid();
-
- rc = req_layout_init();
- if (rc)
- return rc;
-
- rc = ptlrpc_hr_init();
- if (rc)
- return rc;
-
- cleanup_phase = 1;
- rc = ptlrpc_request_cache_init();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 3;
-
- rc = ptlrpc_connection_init();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 5;
- rc = ldlm_init();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 6;
- rc = sptlrpc_init();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 7;
- rc = ptlrpc_nrs_init();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 8;
- rc = tgt_mod_init();
- if (rc)
- goto cleanup;
- return 0;
-
-cleanup:
- switch (cleanup_phase) {
- case 8:
- ptlrpc_nrs_fini();
- /* Fall through */
- case 7:
- sptlrpc_fini();
- /* Fall through */
- case 6:
- ldlm_exit();
- /* Fall through */
- case 5:
- ptlrpc_connection_fini();
- /* Fall through */
- case 3:
- ptlrpc_request_cache_fini();
- /* Fall through */
- case 1:
- ptlrpc_hr_fini();
- req_layout_fini();
- /* Fall through */
- default:
- ;
- }
-
- return rc;
-}
-
-static void __exit ptlrpc_exit(void)
-{
- tgt_mod_exit();
- ptlrpc_nrs_fini();
- sptlrpc_fini();
- ldlm_exit();
- ptlrpc_request_cache_fini();
- ptlrpc_hr_fini();
- ptlrpc_connection_fini();
-}
-
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Request Processor and Lock Management");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(ptlrpc_init);
-module_exit(ptlrpc_exit);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
deleted file mode 100644
index c0fa13942bd8..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ /dev/null
@@ -1,912 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/ptlrpcd.c
- */
-
-/** \defgroup ptlrpcd PortalRPC daemon
- *
- * ptlrpcd is a special thread with its own set where other user might add
- * requests when they don't want to wait for their completion.
- * PtlRPCD will take care of sending such requests and then processing their
- * replies and calling completion callbacks as necessary.
- * The callbacks are called directly from ptlrpcd context.
- * It is important to never significantly block (esp. on RPCs!) within such
- * completion handler or a deadlock might occur where ptlrpcd enters some
- * callback that attempts to send another RPC and wait for it to return,
- * during which time ptlrpcd is completely blocked, so e.g. if import
- * fails, recovery cannot progress because connection requests are also
- * sent by ptlrpcd.
- *
- * @{
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <linux/libcfs/libcfs.h>
-
-#include <lustre_net.h>
-#include <lustre_lib.h>
-#include <lustre_ha.h>
-#include <obd_class.h> /* for obd_zombie */
-#include <obd_support.h> /* for OBD_FAIL_CHECK */
-#include <cl_object.h> /* cl_env_{get,put}() */
-#include <lprocfs_status.h>
-
-#include "ptlrpc_internal.h"
-
-/* One of these per CPT. */
-struct ptlrpcd {
- int pd_size;
- int pd_index;
- int pd_cpt;
- int pd_cursor;
- int pd_nthreads;
- int pd_groupsize;
- struct ptlrpcd_ctl pd_threads[0];
-};
-
-/*
- * max_ptlrpcds is obsolete, but retained to ensure that the kernel
- * module will load on a system where it has been tuned.
- * A value other than 0 implies it was tuned, in which case the value
- * is used to derive a setting for ptlrpcd_per_cpt_max.
- */
-static int max_ptlrpcds;
-module_param(max_ptlrpcds, int, 0644);
-MODULE_PARM_DESC(max_ptlrpcds,
- "Max ptlrpcd thread count to be started (obsolete).");
-
-/*
- * ptlrpcd_bind_policy is obsolete, but retained to ensure that
- * the kernel module will load on a system where it has been tuned.
- * A value other than 0 implies it was tuned, in which case the value
- * is used to derive a setting for ptlrpcd_partner_group_size.
- */
-static int ptlrpcd_bind_policy;
-module_param(ptlrpcd_bind_policy, int, 0644);
-MODULE_PARM_DESC(ptlrpcd_bind_policy,
- "Ptlrpcd threads binding mode (obsolete).");
-
-/*
- * ptlrpcd_per_cpt_max: The maximum number of ptlrpcd threads to run
- * in a CPT.
- */
-static int ptlrpcd_per_cpt_max;
-module_param(ptlrpcd_per_cpt_max, int, 0644);
-MODULE_PARM_DESC(ptlrpcd_per_cpt_max,
- "Max ptlrpcd thread count to be started per CPT.");
-
-/*
- * ptlrpcd_partner_group_size: The desired number of threads in each
- * ptlrpcd partner thread group. Default is 2, corresponding to the
- * old PDB_POLICY_PAIR. A negative value makes all ptlrpcd threads in
- * a CPT partners of each other.
- */
-static int ptlrpcd_partner_group_size;
-module_param(ptlrpcd_partner_group_size, int, 0644);
-MODULE_PARM_DESC(ptlrpcd_partner_group_size,
- "Number of ptlrpcd threads in a partner group.");
-
-/*
- * ptlrpcd_cpts: A CPT string describing the CPU partitions that
- * ptlrpcd threads should run on. Used to make ptlrpcd threads run on
- * a subset of all CPTs.
- *
- * ptlrpcd_cpts=2
- * ptlrpcd_cpts=[2]
- * run ptlrpcd threads only on CPT 2.
- *
- * ptlrpcd_cpts=0-3
- * ptlrpcd_cpts=[0-3]
- * run ptlrpcd threads on CPTs 0, 1, 2, and 3.
- *
- * ptlrpcd_cpts=[0-3,5,7]
- * run ptlrpcd threads on CPTS 0, 1, 2, 3, 5, and 7.
- */
-static char *ptlrpcd_cpts;
-module_param(ptlrpcd_cpts, charp, 0644);
-MODULE_PARM_DESC(ptlrpcd_cpts,
- "CPU partitions ptlrpcd threads should run in");
-
-/* ptlrpcds_cpt_idx maps cpt numbers to an index in the ptlrpcds array. */
-static int *ptlrpcds_cpt_idx;
-
-/* ptlrpcds_num is the number of entries in the ptlrpcds array. */
-static int ptlrpcds_num;
-static struct ptlrpcd **ptlrpcds;
-
-/*
- * In addition to the regular thread pool above, there is a single
- * global recovery thread. Recovery isn't critical for performance,
- * and doesn't block, but must always be able to proceed, and it is
- * possible that all normal ptlrpcd threads are blocked. Hence the
- * need for a dedicated thread.
- */
-static struct ptlrpcd_ctl ptlrpcd_rcv;
-
-struct mutex ptlrpcd_mutex;
-static int ptlrpcd_users;
-
-void ptlrpcd_wake(struct ptlrpc_request *req)
-{
- struct ptlrpc_request_set *set = req->rq_set;
-
- wake_up(&set->set_waitq);
-}
-EXPORT_SYMBOL(ptlrpcd_wake);
-
-static struct ptlrpcd_ctl *
-ptlrpcd_select_pc(struct ptlrpc_request *req)
-{
- struct ptlrpcd *pd;
- int cpt;
- int idx;
-
- if (req && req->rq_send_state != LUSTRE_IMP_FULL)
- return &ptlrpcd_rcv;
-
- cpt = cfs_cpt_current(cfs_cpt_table, 1);
- if (!ptlrpcds_cpt_idx)
- idx = cpt;
- else
- idx = ptlrpcds_cpt_idx[cpt];
- pd = ptlrpcds[idx];
-
- /* We do not care whether it is strict load balance. */
- idx = pd->pd_cursor;
- if (++idx == pd->pd_nthreads)
- idx = 0;
- pd->pd_cursor = idx;
-
- return &pd->pd_threads[idx];
-}
-
-/**
- * Return transferred RPCs count.
- */
-static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
- struct ptlrpc_request_set *src)
-{
- struct ptlrpc_request *req, *tmp;
- int rc = 0;
-
- spin_lock(&src->set_new_req_lock);
- if (likely(!list_empty(&src->set_new_requests))) {
- list_for_each_entry_safe(req, tmp, &src->set_new_requests, rq_set_chain)
- req->rq_set = des;
-
- list_splice_init(&src->set_new_requests, &des->set_requests);
- rc = atomic_read(&src->set_new_count);
- atomic_add(rc, &des->set_remaining);
- atomic_set(&src->set_new_count, 0);
- }
- spin_unlock(&src->set_new_req_lock);
- return rc;
-}
-
-/**
- * Requests that are added to the ptlrpcd queue are sent via
- * ptlrpcd_check->ptlrpc_check_set().
- */
-void ptlrpcd_add_req(struct ptlrpc_request *req)
-{
- struct ptlrpcd_ctl *pc;
-
- if (req->rq_reqmsg)
- lustre_msg_set_jobid(req->rq_reqmsg, NULL);
-
- spin_lock(&req->rq_lock);
- if (req->rq_invalid_rqset) {
- req->rq_invalid_rqset = 0;
- spin_unlock(&req->rq_lock);
- if (wait_event_idle_timeout(req->rq_set_waitq,
- !req->rq_set,
- 5 * HZ) == 0)
- wait_event_idle(req->rq_set_waitq,
- !req->rq_set);
- } else if (req->rq_set) {
- /* If we have a valid "rq_set", just reuse it to avoid double
- * linked.
- */
- LASSERT(req->rq_phase == RQ_PHASE_NEW);
- LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
-
- /* ptlrpc_check_set will decrease the count */
- atomic_inc(&req->rq_set->set_remaining);
- spin_unlock(&req->rq_lock);
- wake_up(&req->rq_set->set_waitq);
- return;
- } else {
- spin_unlock(&req->rq_lock);
- }
-
- pc = ptlrpcd_select_pc(req);
-
- DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
- req, pc->pc_name, pc->pc_index);
-
- ptlrpc_set_add_new_req(pc, req);
-}
-EXPORT_SYMBOL(ptlrpcd_add_req);
-
-static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
-{
- atomic_inc(&set->set_refcount);
-}
-
-/**
- * Check if there is more work to do on ptlrpcd set.
- * Returns 1 if yes.
- */
-static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
-{
- struct ptlrpc_request *req, *tmp;
- struct ptlrpc_request_set *set = pc->pc_set;
- int rc = 0;
- int rc2;
-
- if (atomic_read(&set->set_new_count)) {
- spin_lock(&set->set_new_req_lock);
- if (likely(!list_empty(&set->set_new_requests))) {
- list_splice_init(&set->set_new_requests,
- &set->set_requests);
- atomic_add(atomic_read(&set->set_new_count),
- &set->set_remaining);
- atomic_set(&set->set_new_count, 0);
- /*
- * Need to calculate its timeout.
- */
- rc = 1;
- }
- spin_unlock(&set->set_new_req_lock);
- }
-
- /* We should call lu_env_refill() before handling new requests to make
- * sure that env key the requests depending on really exists.
- */
- rc2 = lu_env_refill(env);
- if (rc2 != 0) {
- /*
- * XXX This is very awkward situation, because
- * execution can neither continue (request
- * interpreters assume that env is set up), nor repeat
- * the loop (as this potentially results in a tight
- * loop of -ENOMEM's).
- *
- * Fortunately, refill only ever does something when
- * new modules are loaded, i.e., early during boot up.
- */
- CERROR("Failure to refill session: %d\n", rc2);
- return rc;
- }
-
- if (atomic_read(&set->set_remaining))
- rc |= ptlrpc_check_set(env, set);
-
- /* NB: ptlrpc_check_set has already moved completed request at the
- * head of seq::set_requests
- */
- list_for_each_entry_safe(req, tmp, &set->set_requests, rq_set_chain) {
- if (req->rq_phase != RQ_PHASE_COMPLETE)
- break;
-
- list_del_init(&req->rq_set_chain);
- req->rq_set = NULL;
- ptlrpc_req_finished(req);
- }
-
- if (rc == 0) {
- /*
- * If new requests have been added, make sure to wake up.
- */
- rc = atomic_read(&set->set_new_count);
-
- /* If we have nothing to do, check whether we can take some
- * work from our partner threads.
- */
- if (rc == 0 && pc->pc_npartners > 0) {
- struct ptlrpcd_ctl *partner;
- struct ptlrpc_request_set *ps;
- int first = pc->pc_cursor;
-
- do {
- partner = pc->pc_partners[pc->pc_cursor++];
- if (pc->pc_cursor >= pc->pc_npartners)
- pc->pc_cursor = 0;
- if (!partner)
- continue;
-
- spin_lock(&partner->pc_lock);
- ps = partner->pc_set;
- if (!ps) {
- spin_unlock(&partner->pc_lock);
- continue;
- }
-
- ptlrpc_reqset_get(ps);
- spin_unlock(&partner->pc_lock);
-
- if (atomic_read(&ps->set_new_count)) {
- rc = ptlrpcd_steal_rqset(set, ps);
- if (rc > 0)
- CDEBUG(D_RPCTRACE, "transfer %d async RPCs [%d->%d]\n",
- rc, partner->pc_index,
- pc->pc_index);
- }
- ptlrpc_reqset_put(ps);
- } while (rc == 0 && pc->pc_cursor != first);
- }
- }
-
- return rc;
-}
-
-/**
- * Main ptlrpcd thread.
- * ptlrpc's code paths like to execute in process context, so we have this
- * thread which spins on a set which contains the rpcs and sends them.
- *
- */
-static int ptlrpcd(void *arg)
-{
- struct ptlrpcd_ctl *pc = arg;
- struct ptlrpc_request_set *set;
- struct lu_context ses = { 0 };
- struct lu_env env = { .le_ses = &ses };
- int rc = 0;
- int exit = 0;
-
- unshare_fs_struct();
- if (cfs_cpt_bind(cfs_cpt_table, pc->pc_cpt) != 0)
- CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt);
-
- /*
- * Allocate the request set after the thread has been bound
- * above. This is safe because no requests will be queued
- * until all ptlrpcd threads have confirmed that they have
- * successfully started.
- */
- set = ptlrpc_prep_set();
- if (!set) {
- rc = -ENOMEM;
- goto failed;
- }
- spin_lock(&pc->pc_lock);
- pc->pc_set = set;
- spin_unlock(&pc->pc_lock);
- /*
- * XXX So far only "client" ptlrpcd uses an environment. In
- * the future, ptlrpcd thread (or a thread-set) has to given
- * an argument, describing its "scope".
- */
- rc = lu_context_init(&env.le_ctx,
- LCT_CL_THREAD | LCT_REMEMBER | LCT_NOREF);
- if (rc == 0) {
- rc = lu_context_init(env.le_ses,
- LCT_SESSION | LCT_REMEMBER | LCT_NOREF);
- if (rc != 0)
- lu_context_fini(&env.le_ctx);
- }
-
- if (rc != 0)
- goto failed;
-
- complete(&pc->pc_starting);
-
- /*
- * This mainloop strongly resembles ptlrpc_set_wait() except that our
- * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
- * there are requests in the set. New requests come in on the set's
- * new_req_list and ptlrpcd_check() moves them into the set.
- */
- do {
- int timeout;
-
- timeout = ptlrpc_set_next_timeout(set);
-
- lu_context_enter(&env.le_ctx);
- lu_context_enter(env.le_ses);
- if (wait_event_idle_timeout(set->set_waitq,
- ptlrpcd_check(&env, pc),
- (timeout ? timeout : 1) * HZ) == 0)
- ptlrpc_expired_set(set);
-
- lu_context_exit(&env.le_ctx);
- lu_context_exit(env.le_ses);
-
- /*
- * Abort inflight rpcs for forced stop case.
- */
- if (test_bit(LIOD_STOP, &pc->pc_flags)) {
- if (test_bit(LIOD_FORCE, &pc->pc_flags))
- ptlrpc_abort_set(set);
- exit++;
- }
-
- /*
- * Let's make one more loop to make sure that ptlrpcd_check()
- * copied all raced new rpcs into the set so we can kill them.
- */
- } while (exit < 2);
-
- /*
- * Wait for inflight requests to drain.
- */
- if (!list_empty(&set->set_requests))
- ptlrpc_set_wait(set);
- lu_context_fini(&env.le_ctx);
- lu_context_fini(env.le_ses);
-
- complete(&pc->pc_finishing);
-
- return 0;
-failed:
- pc->pc_error = rc;
- complete(&pc->pc_starting);
- return rc;
-}
-
-static void ptlrpcd_ctl_init(struct ptlrpcd_ctl *pc, int index, int cpt)
-{
- pc->pc_index = index;
- pc->pc_cpt = cpt;
- init_completion(&pc->pc_starting);
- init_completion(&pc->pc_finishing);
- spin_lock_init(&pc->pc_lock);
-
- if (index < 0) {
- /* Recovery thread. */
- snprintf(pc->pc_name, sizeof(pc->pc_name), "ptlrpcd_rcv");
- } else {
- /* Regular thread. */
- snprintf(pc->pc_name, sizeof(pc->pc_name),
- "ptlrpcd_%02d_%02d", cpt, index);
- }
-}
-
-/* XXX: We want multiple CPU cores to share the async RPC load. So we
- * start many ptlrpcd threads. We also want to reduce the ptlrpcd
- * overhead caused by data transfer cross-CPU cores. So we bind
- * all ptlrpcd threads to a CPT, in the expectation that CPTs
- * will be defined in a way that matches these boundaries. Within
- * a CPT a ptlrpcd thread can be scheduled on any available core.
- *
- * Each ptlrpcd thread has its own request queue. This can cause
- * response delay if the thread is already busy. To help with
- * this we define partner threads: these are other threads bound
- * to the same CPT which will check for work in each other's
- * request queues if they have no work to do.
- *
- * The desired number of partner threads can be tuned by setting
- * ptlrpcd_partner_group_size. The default is to create pairs of
- * partner threads.
- */
-static int ptlrpcd_partners(struct ptlrpcd *pd, int index)
-{
- struct ptlrpcd_ctl *pc;
- struct ptlrpcd_ctl **ppc;
- int first;
- int i;
- int rc = 0;
- int size;
-
- LASSERT(index >= 0 && index < pd->pd_nthreads);
- pc = &pd->pd_threads[index];
- pc->pc_npartners = pd->pd_groupsize - 1;
-
- if (pc->pc_npartners <= 0)
- goto out;
-
- size = sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners;
- pc->pc_partners = kzalloc_node(size, GFP_NOFS,
- cfs_cpt_spread_node(cfs_cpt_table,
- pc->pc_cpt));
- if (!pc->pc_partners) {
- pc->pc_npartners = 0;
- rc = -ENOMEM;
- goto out;
- }
-
- first = index - index % pd->pd_groupsize;
- ppc = pc->pc_partners;
- for (i = first; i < first + pd->pd_groupsize; i++) {
- if (i != index)
- *ppc++ = &pd->pd_threads[i];
- }
-out:
- return rc;
-}
-
-int ptlrpcd_start(struct ptlrpcd_ctl *pc)
-{
- struct task_struct *task;
- int rc = 0;
-
- /*
- * Do not allow start second thread for one pc.
- */
- if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
- CWARN("Starting second thread (%s) for same pc %p\n",
- pc->pc_name, pc);
- return 0;
- }
-
- task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
- if (IS_ERR(task)) {
- rc = PTR_ERR(task);
- goto out_set;
- }
-
- wait_for_completion(&pc->pc_starting);
- rc = pc->pc_error;
- if (rc != 0)
- goto out_set;
-
- return 0;
-
-out_set:
- if (pc->pc_set) {
- struct ptlrpc_request_set *set = pc->pc_set;
-
- spin_lock(&pc->pc_lock);
- pc->pc_set = NULL;
- spin_unlock(&pc->pc_lock);
- ptlrpc_set_destroy(set);
- }
- clear_bit(LIOD_START, &pc->pc_flags);
- return rc;
-}
-
-void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
-{
- if (!test_bit(LIOD_START, &pc->pc_flags)) {
- CWARN("Thread for pc %p was not started\n", pc);
- return;
- }
-
- set_bit(LIOD_STOP, &pc->pc_flags);
- if (force)
- set_bit(LIOD_FORCE, &pc->pc_flags);
- wake_up(&pc->pc_set->set_waitq);
-}
-
-void ptlrpcd_free(struct ptlrpcd_ctl *pc)
-{
- struct ptlrpc_request_set *set = pc->pc_set;
-
- if (!test_bit(LIOD_START, &pc->pc_flags)) {
- CWARN("Thread for pc %p was not started\n", pc);
- goto out;
- }
-
- wait_for_completion(&pc->pc_finishing);
-
- spin_lock(&pc->pc_lock);
- pc->pc_set = NULL;
- spin_unlock(&pc->pc_lock);
- ptlrpc_set_destroy(set);
-
- clear_bit(LIOD_START, &pc->pc_flags);
- clear_bit(LIOD_STOP, &pc->pc_flags);
- clear_bit(LIOD_FORCE, &pc->pc_flags);
-
-out:
- if (pc->pc_npartners > 0) {
- LASSERT(pc->pc_partners);
-
- kfree(pc->pc_partners);
- pc->pc_partners = NULL;
- }
- pc->pc_npartners = 0;
- pc->pc_error = 0;
-}
-
-static void ptlrpcd_fini(void)
-{
- int i;
- int j;
-
- if (ptlrpcds) {
- for (i = 0; i < ptlrpcds_num; i++) {
- if (!ptlrpcds[i])
- break;
- for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
- ptlrpcd_stop(&ptlrpcds[i]->pd_threads[j], 0);
- for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
- ptlrpcd_free(&ptlrpcds[i]->pd_threads[j]);
- kfree(ptlrpcds[i]);
- ptlrpcds[i] = NULL;
- }
- kfree(ptlrpcds);
- }
- ptlrpcds_num = 0;
-
- ptlrpcd_stop(&ptlrpcd_rcv, 0);
- ptlrpcd_free(&ptlrpcd_rcv);
-
- kfree(ptlrpcds_cpt_idx);
- ptlrpcds_cpt_idx = NULL;
-}
-
-static int ptlrpcd_init(void)
-{
- int nthreads;
- int groupsize;
- int size;
- int i;
- int j;
- int rc = 0;
- struct cfs_cpt_table *cptable;
- __u32 *cpts = NULL;
- int ncpts;
- int cpt;
- struct ptlrpcd *pd;
-
- /*
- * Determine the CPTs that ptlrpcd threads will run on.
- */
- cptable = cfs_cpt_table;
- ncpts = cfs_cpt_number(cptable);
- if (ptlrpcd_cpts) {
- struct cfs_expr_list *el;
-
- size = ncpts * sizeof(ptlrpcds_cpt_idx[0]);
- ptlrpcds_cpt_idx = kzalloc(size, GFP_KERNEL);
- if (!ptlrpcds_cpt_idx) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = cfs_expr_list_parse(ptlrpcd_cpts,
- strlen(ptlrpcd_cpts),
- 0, ncpts - 1, &el);
-
- if (rc != 0) {
- CERROR("ptlrpcd_cpts: invalid CPT pattern string: %s",
- ptlrpcd_cpts);
- rc = -EINVAL;
- goto out;
- }
-
- rc = cfs_expr_list_values(el, ncpts, &cpts);
- cfs_expr_list_free(el);
- if (rc <= 0) {
- CERROR("ptlrpcd_cpts: failed to parse CPT array %s: %d\n",
- ptlrpcd_cpts, rc);
- if (rc == 0)
- rc = -EINVAL;
- goto out;
- }
-
- /*
- * Create the cpt-to-index map. When there is no match
- * in the cpt table, pick a cpt at random. This could
- * be changed to take the topology of the system into
- * account.
- */
- for (cpt = 0; cpt < ncpts; cpt++) {
- for (i = 0; i < rc; i++)
- if (cpts[i] == cpt)
- break;
- if (i >= rc)
- i = cpt % rc;
- ptlrpcds_cpt_idx[cpt] = i;
- }
-
- cfs_expr_list_values_free(cpts, rc);
- ncpts = rc;
- }
- ptlrpcds_num = ncpts;
-
- size = ncpts * sizeof(ptlrpcds[0]);
- ptlrpcds = kzalloc(size, GFP_KERNEL);
- if (!ptlrpcds) {
- rc = -ENOMEM;
- goto out;
- }
-
- /*
- * The max_ptlrpcds parameter is obsolete, but do something
- * sane if it has been tuned, and complain if
- * ptlrpcd_per_cpt_max has also been tuned.
- */
- if (max_ptlrpcds != 0) {
- CWARN("max_ptlrpcds is obsolete.\n");
- if (ptlrpcd_per_cpt_max == 0) {
- ptlrpcd_per_cpt_max = max_ptlrpcds / ncpts;
- /* Round up if there is a remainder. */
- if (max_ptlrpcds % ncpts != 0)
- ptlrpcd_per_cpt_max++;
- CWARN("Setting ptlrpcd_per_cpt_max = %d\n",
- ptlrpcd_per_cpt_max);
- } else {
- CWARN("ptlrpd_per_cpt_max is also set!\n");
- }
- }
-
- /*
- * The ptlrpcd_bind_policy parameter is obsolete, but do
- * something sane if it has been tuned, and complain if
- * ptlrpcd_partner_group_size is also tuned.
- */
- if (ptlrpcd_bind_policy != 0) {
- CWARN("ptlrpcd_bind_policy is obsolete.\n");
- if (ptlrpcd_partner_group_size == 0) {
- switch (ptlrpcd_bind_policy) {
- case 1: /* PDB_POLICY_NONE */
- case 2: /* PDB_POLICY_FULL */
- ptlrpcd_partner_group_size = 1;
- break;
- case 3: /* PDB_POLICY_PAIR */
- ptlrpcd_partner_group_size = 2;
- break;
- case 4: /* PDB_POLICY_NEIGHBOR */
-#ifdef CONFIG_NUMA
- ptlrpcd_partner_group_size = -1; /* CPT */
-#else
- ptlrpcd_partner_group_size = 3; /* Triplets */
-#endif
- break;
- default: /* Illegal value, use the default. */
- ptlrpcd_partner_group_size = 2;
- break;
- }
- CWARN("Setting ptlrpcd_partner_group_size = %d\n",
- ptlrpcd_partner_group_size);
- } else {
- CWARN("ptlrpcd_partner_group_size is also set!\n");
- }
- }
-
- if (ptlrpcd_partner_group_size == 0)
- ptlrpcd_partner_group_size = 2;
- else if (ptlrpcd_partner_group_size < 0)
- ptlrpcd_partner_group_size = -1;
- else if (ptlrpcd_per_cpt_max > 0 &&
- ptlrpcd_partner_group_size > ptlrpcd_per_cpt_max)
- ptlrpcd_partner_group_size = ptlrpcd_per_cpt_max;
-
- /*
- * Start the recovery thread first.
- */
- set_bit(LIOD_RECOVERY, &ptlrpcd_rcv.pc_flags);
- ptlrpcd_ctl_init(&ptlrpcd_rcv, -1, CFS_CPT_ANY);
- rc = ptlrpcd_start(&ptlrpcd_rcv);
- if (rc < 0)
- goto out;
-
- for (i = 0; i < ncpts; i++) {
- if (!cpts)
- cpt = i;
- else
- cpt = cpts[i];
-
- nthreads = cfs_cpt_weight(cptable, cpt);
- if (ptlrpcd_per_cpt_max > 0 && ptlrpcd_per_cpt_max < nthreads)
- nthreads = ptlrpcd_per_cpt_max;
- if (nthreads < 2)
- nthreads = 2;
-
- if (ptlrpcd_partner_group_size <= 0) {
- groupsize = nthreads;
- } else if (nthreads <= ptlrpcd_partner_group_size) {
- groupsize = nthreads;
- } else {
- groupsize = ptlrpcd_partner_group_size;
- if (nthreads % groupsize != 0)
- nthreads += groupsize - (nthreads % groupsize);
- }
-
- size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
- pd = kzalloc_node(size, GFP_NOFS,
- cfs_cpt_spread_node(cfs_cpt_table, cpt));
- if (!pd) {
- rc = -ENOMEM;
- goto out;
- }
- pd->pd_size = size;
- pd->pd_index = i;
- pd->pd_cpt = cpt;
- pd->pd_cursor = 0;
- pd->pd_nthreads = nthreads;
- pd->pd_groupsize = groupsize;
- ptlrpcds[i] = pd;
-
- /*
- * The ptlrpcd threads in a partner group can access
- * each other's struct ptlrpcd_ctl, so these must be
- * initialized before any thread is started.
- */
- for (j = 0; j < nthreads; j++) {
- ptlrpcd_ctl_init(&pd->pd_threads[j], j, cpt);
- rc = ptlrpcd_partners(pd, j);
- if (rc < 0)
- goto out;
- }
-
- /* XXX: We start nthreads ptlrpc daemons.
- * Each of them can process any non-recovery
- * async RPC to improve overall async RPC
- * efficiency.
- *
- * But there are some issues with async I/O RPCs
- * and async non-I/O RPCs processed in the same
- * set under some cases. The ptlrpcd may be
- * blocked by some async I/O RPC(s), then will
- * cause other async non-I/O RPC(s) can not be
- * processed in time.
- *
- * Maybe we should distinguish blocked async RPCs
- * from non-blocked async RPCs, and process them
- * in different ptlrpcd sets to avoid unnecessary
- * dependency. But how to distribute async RPCs
- * load among all the ptlrpc daemons becomes
- * another trouble.
- */
- for (j = 0; j < nthreads; j++) {
- rc = ptlrpcd_start(&pd->pd_threads[j]);
- if (rc < 0)
- goto out;
- }
- }
-out:
- if (rc != 0)
- ptlrpcd_fini();
-
- return rc;
-}
-
-int ptlrpcd_addref(void)
-{
- int rc = 0;
-
- mutex_lock(&ptlrpcd_mutex);
- if (++ptlrpcd_users == 1) {
- rc = ptlrpcd_init();
- if (rc < 0)
- ptlrpcd_users--;
- }
- mutex_unlock(&ptlrpcd_mutex);
- return rc;
-}
-EXPORT_SYMBOL(ptlrpcd_addref);
-
-void ptlrpcd_decref(void)
-{
- mutex_lock(&ptlrpcd_mutex);
- if (--ptlrpcd_users == 0)
- ptlrpcd_fini();
- mutex_unlock(&ptlrpcd_mutex);
-}
-EXPORT_SYMBOL(ptlrpcd_decref);
-/** @} ptlrpcd */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
deleted file mode 100644
index 2ea0a7ff87dd..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ /dev/null
@@ -1,374 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/recover.c
- *
- * Author: Mike Shaver <shaver@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-#include <linux/libcfs/libcfs.h>
-
-#include <obd_support.h>
-#include <lustre_ha.h>
-#include <lustre_net.h>
-#include <lustre_import.h>
-#include <lustre_export.h>
-#include <obd.h>
-#include <obd_class.h>
-#include <linux/list.h>
-
-#include "ptlrpc_internal.h"
-
-/**
- * Start recovery on disconnected import.
- * This is done by just attempting a connect
- */
-void ptlrpc_initiate_recovery(struct obd_import *imp)
-{
- CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd));
- ptlrpc_connect_import(imp);
-}
-
-/**
- * Identify what request from replay list needs to be replayed next
- * (based on what we have already replayed) and send it to server.
- */
-int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
-{
- int rc = 0;
- struct ptlrpc_request *req = NULL, *pos;
- __u64 last_transno;
-
- *inflight = 0;
-
- /* It might have committed some after we last spoke, so make sure we
- * get rid of them now.
- */
- spin_lock(&imp->imp_lock);
- imp->imp_last_transno_checked = 0;
- ptlrpc_free_committed(imp);
- last_transno = imp->imp_last_replay_transno;
-
- CDEBUG(D_HA, "import %p from %s committed %llu last %llu\n",
- imp, obd2cli_tgt(imp->imp_obd),
- imp->imp_peer_committed_transno, last_transno);
-
- /* Replay all the committed open requests on committed_list first */
- if (!list_empty(&imp->imp_committed_list)) {
- req = list_last_entry(&imp->imp_committed_list,
- struct ptlrpc_request, rq_replay_list);
-
- /* The last request on committed_list hasn't been replayed */
- if (req->rq_transno > last_transno) {
- if (!imp->imp_resend_replay ||
- imp->imp_replay_cursor == &imp->imp_committed_list)
- imp->imp_replay_cursor = imp->imp_replay_cursor->next;
-
- while (imp->imp_replay_cursor !=
- &imp->imp_committed_list) {
- req = list_entry(imp->imp_replay_cursor,
- struct ptlrpc_request,
- rq_replay_list);
- if (req->rq_transno > last_transno)
- break;
-
- req = NULL;
- LASSERT(!list_empty(imp->imp_replay_cursor));
- imp->imp_replay_cursor =
- imp->imp_replay_cursor->next;
- }
- } else {
- /* All requests on committed_list have been replayed */
- imp->imp_replay_cursor = &imp->imp_committed_list;
- req = NULL;
- }
- }
-
- /* All the requests in committed list have been replayed, let's replay
- * the imp_replay_list
- */
- if (!req) {
- struct ptlrpc_request *tmp;
- list_for_each_entry_safe(tmp, pos, &imp->imp_replay_list,
- rq_replay_list) {
- if (tmp->rq_transno > last_transno) {
- req = tmp;
- break;
- }
- }
- }
-
- /* If need to resend the last sent transno (because a reconnect
- * has occurred), then stop on the matching req and send it again.
- * If, however, the last sent transno has been committed then we
- * continue replay from the next request.
- */
- if (req && imp->imp_resend_replay)
- lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
-
- /* The resend replay request may have been removed from the
- * unreplied list.
- */
- if (req && imp->imp_resend_replay &&
- list_empty(&req->rq_unreplied_list)) {
- ptlrpc_add_unreplied(req);
- imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
- }
-
- imp->imp_resend_replay = 0;
- spin_unlock(&imp->imp_lock);
-
- if (req) {
- /* The request should have been added back in unreplied list
- * by ptlrpc_prepare_replay().
- */
- LASSERT(!list_empty(&req->rq_unreplied_list));
-
- rc = ptlrpc_replay_req(req);
- if (rc) {
- CERROR("recovery replay error %d for req %llu\n",
- rc, req->rq_xid);
- return rc;
- }
- *inflight = 1;
- }
- return rc;
-}
-
-/**
- * Schedule resending of request on sending_list. This is done after
- * we completed replaying of requests and locks.
- */
-int ptlrpc_resend(struct obd_import *imp)
-{
- struct ptlrpc_request *req, *next;
-
- /* As long as we're in recovery, nothing should be added to the sending
- * list, so we don't need to hold the lock during this iteration and
- * resend process.
- */
- /* Well... what if lctl recover is called twice at the same time?
- */
- spin_lock(&imp->imp_lock);
- if (imp->imp_state != LUSTRE_IMP_RECOVER) {
- spin_unlock(&imp->imp_lock);
- return -1;
- }
-
- list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
- LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
- "req %p bad\n", req);
- LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
-
- /*
- * If the request is allowed to be sent during replay and it
- * is not timeout yet, then it does not need to be resent.
- */
- if (!ptlrpc_no_resend(req) &&
- (req->rq_timedout || !req->rq_allow_replay))
- ptlrpc_resend_req(req);
- }
- spin_unlock(&imp->imp_lock);
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT, 2);
- return 0;
-}
-
-/**
- * Go through all requests in delayed list and wake their threads
- * for resending
- */
-void ptlrpc_wake_delayed(struct obd_import *imp)
-{
- struct ptlrpc_request *req, *pos;
-
- spin_lock(&imp->imp_lock);
- list_for_each_entry_safe(req, pos, &imp->imp_delayed_list, rq_list) {
- DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
- ptlrpc_client_wake_req(req);
- }
- spin_unlock(&imp->imp_lock);
-}
-
-void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
-{
- struct obd_import *imp = failed_req->rq_import;
-
- CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
- imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
-
- if (ptlrpc_set_import_discon(imp,
- lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) {
- if (!imp->imp_replayable) {
- CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_obd->obd_name);
- ptlrpc_deactivate_import(imp);
- }
- /* to control recovery via lctl {disable|enable}_recovery */
- if (imp->imp_deactive == 0)
- ptlrpc_connect_import(imp);
- }
-
- /* Wait for recovery to complete and resend. If evicted, then
- * this request will be errored out later.
- */
- spin_lock(&failed_req->rq_lock);
- if (!failed_req->rq_no_resend)
- failed_req->rq_resend = 1;
- spin_unlock(&failed_req->rq_lock);
-}
-
-/**
- * Administratively active/deactive a client.
- * This should only be called by the ioctl interface, currently
- * - the lctl deactivate and activate commands
- * - echo 0/1 >> /sys/fs/lustre/osc/XXX/active
- * - client umount -f (ll_umount_begin)
- */
-int ptlrpc_set_import_active(struct obd_import *imp, int active)
-{
- struct obd_device *obd = imp->imp_obd;
- int rc = 0;
-
- LASSERT(obd);
-
- /* When deactivating, mark import invalid, and abort in-flight
- * requests.
- */
- if (!active) {
- LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n",
- obd2cli_tgt(imp->imp_obd));
-
- /* set before invalidate to avoid messages about imp_inval
- * set without imp_deactive in ptlrpc_import_delay_req
- */
- spin_lock(&imp->imp_lock);
- imp->imp_deactive = 1;
- spin_unlock(&imp->imp_lock);
-
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
-
- ptlrpc_invalidate_import(imp);
- }
-
- /* When activating, mark import valid, and attempt recovery */
- if (active) {
- CDEBUG(D_HA, "setting import %s VALID\n",
- obd2cli_tgt(imp->imp_obd));
-
- spin_lock(&imp->imp_lock);
- imp->imp_deactive = 0;
- spin_unlock(&imp->imp_lock);
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
-
- rc = ptlrpc_recover_import(imp, NULL, 0);
- }
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_set_import_active);
-
-/* Attempt to reconnect an import */
-int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
-{
- int rc = 0;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
- atomic_read(&imp->imp_inval_count))
- rc = -EINVAL;
- spin_unlock(&imp->imp_lock);
- if (rc)
- goto out;
-
- /* force import to be disconnected. */
- ptlrpc_set_import_discon(imp, 0);
-
- if (new_uuid) {
- struct obd_uuid uuid;
-
- /* intruct import to use new uuid */
- obd_str2uuid(&uuid, new_uuid);
- rc = import_set_conn_priority(imp, &uuid);
- if (rc)
- goto out;
- }
-
- /* Check if reconnect is already in progress */
- spin_lock(&imp->imp_lock);
- if (imp->imp_state != LUSTRE_IMP_DISCON) {
- imp->imp_force_verify = 1;
- rc = -EALREADY;
- }
- spin_unlock(&imp->imp_lock);
- if (rc)
- goto out;
-
- rc = ptlrpc_connect_import(imp);
- if (rc)
- goto out;
-
- if (!async) {
- CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
- obd2cli_tgt(imp->imp_obd), obd_timeout);
-
- rc = wait_event_idle_timeout(imp->imp_recovery_waitq,
- !ptlrpc_import_in_recovery(imp),
- obd_timeout * HZ);
- CDEBUG(D_HA, "%s: recovery finished\n",
- obd2cli_tgt(imp->imp_obd));
- rc = rc ? 0 : -ETIMEDOUT;
- }
-
-out:
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_recover_import);
-
-int ptlrpc_import_in_recovery(struct obd_import *imp)
-{
- int in_recovery = 1;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_FULL ||
- imp->imp_state == LUSTRE_IMP_CLOSED ||
- imp->imp_state == LUSTRE_IMP_DISCON ||
- imp->imp_obd->obd_no_recov)
- in_recovery = 0;
- spin_unlock(&imp->imp_lock);
-
- return in_recovery;
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
deleted file mode 100644
index 3cb1e075f077..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ /dev/null
@@ -1,2383 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/crypto.h>
-#include <linux/cred.h>
-#include <linux/key.h>
-#include <linux/sched/task.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lustre_net.h>
-#include <lustre_import.h>
-#include <lustre_dlm.h>
-#include <lustre_sec.h>
-
-#include "ptlrpc_internal.h"
-
-/***********************************************
- * policy registers *
- ***********************************************/
-
-static rwlock_t policy_lock;
-static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
- NULL,
-};
-
-int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
-{
- __u16 number = policy->sp_policy;
-
- LASSERT(policy->sp_name);
- LASSERT(policy->sp_cops);
- LASSERT(policy->sp_sops);
-
- if (number >= SPTLRPC_POLICY_MAX)
- return -EINVAL;
-
- write_lock(&policy_lock);
- if (unlikely(policies[number])) {
- write_unlock(&policy_lock);
- return -EALREADY;
- }
- policies[number] = policy;
- write_unlock(&policy_lock);
-
- CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_register_policy);
-
-int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
-{
- __u16 number = policy->sp_policy;
-
- LASSERT(number < SPTLRPC_POLICY_MAX);
-
- write_lock(&policy_lock);
- if (unlikely(!policies[number])) {
- write_unlock(&policy_lock);
- CERROR("%s: already unregistered\n", policy->sp_name);
- return -EINVAL;
- }
-
- LASSERT(policies[number] == policy);
- policies[number] = NULL;
- write_unlock(&policy_lock);
-
- CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_unregister_policy);
-
-static
-struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
-{
- static DEFINE_MUTEX(load_mutex);
- static atomic_t loaded = ATOMIC_INIT(0);
- struct ptlrpc_sec_policy *policy;
- __u16 number = SPTLRPC_FLVR_POLICY(flavor);
- __u16 flag = 0;
-
- if (number >= SPTLRPC_POLICY_MAX)
- return NULL;
-
- while (1) {
- read_lock(&policy_lock);
- policy = policies[number];
- if (policy && !try_module_get(policy->sp_owner))
- policy = NULL;
- if (!policy)
- flag = atomic_read(&loaded);
- read_unlock(&policy_lock);
-
- if (policy || flag != 0 ||
- number != SPTLRPC_POLICY_GSS)
- break;
-
- /* try to load gss module, once */
- mutex_lock(&load_mutex);
- if (atomic_read(&loaded) == 0) {
- if (request_module("ptlrpc_gss") == 0)
- CDEBUG(D_SEC,
- "module ptlrpc_gss loaded on demand\n");
- else
- CERROR("Unable to load module ptlrpc_gss\n");
-
- atomic_set(&loaded, 1);
- }
- mutex_unlock(&load_mutex);
- }
-
- return policy;
-}
-
-__u32 sptlrpc_name2flavor_base(const char *name)
-{
- if (!strcmp(name, "null"))
- return SPTLRPC_FLVR_NULL;
- if (!strcmp(name, "plain"))
- return SPTLRPC_FLVR_PLAIN;
- if (!strcmp(name, "krb5n"))
- return SPTLRPC_FLVR_KRB5N;
- if (!strcmp(name, "krb5a"))
- return SPTLRPC_FLVR_KRB5A;
- if (!strcmp(name, "krb5i"))
- return SPTLRPC_FLVR_KRB5I;
- if (!strcmp(name, "krb5p"))
- return SPTLRPC_FLVR_KRB5P;
-
- return SPTLRPC_FLVR_INVALID;
-}
-EXPORT_SYMBOL(sptlrpc_name2flavor_base);
-
-const char *sptlrpc_flavor2name_base(__u32 flvr)
-{
- __u32 base = SPTLRPC_FLVR_BASE(flvr);
-
- if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
- return "null";
- else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
- return "plain";
- else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
- return "krb5n";
- else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
- return "krb5a";
- else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
- return "krb5i";
- else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
- return "krb5p";
-
- CERROR("invalid wire flavor 0x%x\n", flvr);
- return "invalid";
-}
-EXPORT_SYMBOL(sptlrpc_flavor2name_base);
-
-char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
- char *buf, int bufsize)
-{
- if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
- snprintf(buf, bufsize, "hash:%s",
- sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
- else
- snprintf(buf, bufsize, "%s",
- sptlrpc_flavor2name_base(sf->sf_rpc));
-
- buf[bufsize - 1] = '\0';
- return buf;
-}
-EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
-
-char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
-{
- strlcpy(buf, sptlrpc_flavor2name_base(sf->sf_rpc), bufsize);
-
- /*
- * currently we don't support customized bulk specification for
- * flavors other than plain
- */
- if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
- char bspec[16];
-
- bspec[0] = '-';
- sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
- strlcat(buf, bspec, bufsize);
- }
-
- return buf;
-}
-EXPORT_SYMBOL(sptlrpc_flavor2name);
-
-static char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
-{
- buf[0] = '\0';
-
- if (flags & PTLRPC_SEC_FL_REVERSE)
- strlcat(buf, "reverse,", bufsize);
- if (flags & PTLRPC_SEC_FL_ROOTONLY)
- strlcat(buf, "rootonly,", bufsize);
- if (flags & PTLRPC_SEC_FL_UDESC)
- strlcat(buf, "udesc,", bufsize);
- if (flags & PTLRPC_SEC_FL_BULK)
- strlcat(buf, "bulk,", bufsize);
- if (buf[0] == '\0')
- strlcat(buf, "-,", bufsize);
-
- return buf;
-}
-
-/**************************************************
- * client context APIs *
- **************************************************/
-
-static
-struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
-{
- struct vfs_cred vcred;
- int create = 1, remove_dead = 1;
-
- LASSERT(sec);
- LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
-
- if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
- PTLRPC_SEC_FL_ROOTONLY)) {
- vcred.vc_uid = 0;
- vcred.vc_gid = 0;
- if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
- create = 0;
- remove_dead = 0;
- }
- } else {
- vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
- vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
- }
-
- return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
- create, remove_dead);
-}
-
-struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
-{
- atomic_inc(&ctx->cc_refcount);
- return ctx;
-}
-EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
-
-void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
-{
- struct ptlrpc_sec *sec = ctx->cc_sec;
-
- LASSERT(sec);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
-
- if (!atomic_dec_and_test(&ctx->cc_refcount))
- return;
-
- sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
-}
-EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
-
-static int import_sec_check_expire(struct obd_import *imp)
-{
- int adapt = 0;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_sec_expire &&
- imp->imp_sec_expire < ktime_get_real_seconds()) {
- adapt = 1;
- imp->imp_sec_expire = 0;
- }
- spin_unlock(&imp->imp_lock);
-
- if (!adapt)
- return 0;
-
- CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
- return sptlrpc_import_sec_adapt(imp, NULL, NULL);
-}
-
-/**
- * Get and validate the client side ptlrpc security facilities from
- * \a imp. There is a race condition on client reconnect when the import is
- * being destroyed while there are outstanding client bound requests. In
- * this case do not output any error messages if import secuity is not
- * found.
- *
- * \param[in] imp obd import associated with client
- * \param[out] sec client side ptlrpc security
- *
- * \retval 0 if security retrieved successfully
- * \retval -ve errno if there was a problem
- */
-static int import_sec_validate_get(struct obd_import *imp,
- struct ptlrpc_sec **sec)
-{
- int rc;
-
- if (unlikely(imp->imp_sec_expire)) {
- rc = import_sec_check_expire(imp);
- if (rc)
- return rc;
- }
-
- *sec = sptlrpc_import_sec_ref(imp);
- if (!*sec) {
- CERROR("import %p (%s) with no sec\n",
- imp, ptlrpc_import_state_name(imp->imp_state));
- return -EACCES;
- }
-
- if (unlikely((*sec)->ps_dying)) {
- CERROR("attempt to use dying sec %p\n", sec);
- sptlrpc_sec_put(*sec);
- return -EACCES;
- }
-
- return 0;
-}
-
-/**
- * Given a \a req, find or allocate a appropriate context for it.
- * \pre req->rq_cli_ctx == NULL.
- *
- * \retval 0 succeed, and req->rq_cli_ctx is set.
- * \retval -ev error number, and req->rq_cli_ctx == NULL.
- */
-int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
-{
- struct obd_import *imp = req->rq_import;
- struct ptlrpc_sec *sec;
- int rc;
-
- LASSERT(!req->rq_cli_ctx);
- LASSERT(imp);
-
- rc = import_sec_validate_get(imp, &sec);
- if (rc)
- return rc;
-
- req->rq_cli_ctx = get_my_ctx(sec);
-
- sptlrpc_sec_put(sec);
-
- if (!req->rq_cli_ctx) {
- CERROR("req %p: fail to get context\n", req);
- return -ECONNREFUSED;
- }
-
- return 0;
-}
-
-/**
- * Drop the context for \a req.
- * \pre req->rq_cli_ctx != NULL.
- * \post req->rq_cli_ctx == NULL.
- *
- * If \a sync == 0, this function should return quickly without sleep;
- * otherwise it might trigger and wait for the whole process of sending
- * an context-destroying rpc to server.
- */
-void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
-{
- LASSERT(req);
- LASSERT(req->rq_cli_ctx);
-
- /* request might be asked to release earlier while still
- * in the context waiting list.
- */
- if (!list_empty(&req->rq_ctx_chain)) {
- spin_lock(&req->rq_cli_ctx->cc_lock);
- list_del_init(&req->rq_ctx_chain);
- spin_unlock(&req->rq_cli_ctx->cc_lock);
- }
-
- sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
- req->rq_cli_ctx = NULL;
-}
-
-static
-int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
- struct ptlrpc_cli_ctx *oldctx,
- struct ptlrpc_cli_ctx *newctx)
-{
- struct sptlrpc_flavor old_flvr;
- char *reqmsg = NULL; /* to workaround old gcc */
- int reqmsg_size;
- int rc = 0;
-
- LASSERT(req->rq_reqmsg);
- LASSERT(req->rq_reqlen);
- LASSERT(req->rq_replen);
-
- CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
- req,
- oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
- newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
- oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
- newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
-
- /* save flavor */
- old_flvr = req->rq_flvr;
-
- /* save request message */
- reqmsg_size = req->rq_reqlen;
- if (reqmsg_size != 0) {
- reqmsg = kvzalloc(reqmsg_size, GFP_NOFS);
- if (!reqmsg)
- return -ENOMEM;
- memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
- }
-
- /* release old req/rep buf */
- req->rq_cli_ctx = oldctx;
- sptlrpc_cli_free_reqbuf(req);
- sptlrpc_cli_free_repbuf(req);
- req->rq_cli_ctx = newctx;
-
- /* recalculate the flavor */
- sptlrpc_req_set_flavor(req, 0);
-
- /* alloc new request buffer
- * we don't need to alloc reply buffer here, leave it to the
- * rest procedure of ptlrpc
- */
- if (reqmsg_size != 0) {
- rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
- if (!rc) {
- LASSERT(req->rq_reqmsg);
- memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
- } else {
- CWARN("failed to alloc reqbuf: %d\n", rc);
- req->rq_flvr = old_flvr;
- }
-
- kvfree(reqmsg);
- }
- return rc;
-}
-
-/**
- * If current context of \a req is dead somehow, e.g. we just switched flavor
- * thus marked original contexts dead, we'll find a new context for it. if
- * no switch is needed, \a req will end up with the same context.
- *
- * \note a request must have a context, to keep other parts of code happy.
- * In any case of failure during the switching, we must restore the old one.
- */
-static int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
-{
- struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
- struct ptlrpc_cli_ctx *newctx;
- int rc;
-
- LASSERT(oldctx);
-
- sptlrpc_cli_ctx_get(oldctx);
- sptlrpc_req_put_ctx(req, 0);
-
- rc = sptlrpc_req_get_ctx(req);
- if (unlikely(rc)) {
- LASSERT(!req->rq_cli_ctx);
-
- /* restore old ctx */
- req->rq_cli_ctx = oldctx;
- return rc;
- }
-
- newctx = req->rq_cli_ctx;
- LASSERT(newctx);
-
- if (unlikely(newctx == oldctx &&
- test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
- /*
- * still get the old dead ctx, usually means system too busy
- */
- CDEBUG(D_SEC,
- "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
- newctx, newctx->cc_flags);
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
- } else if (unlikely(!test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags))) {
- /*
- * new ctx not up to date yet
- */
- CDEBUG(D_SEC,
- "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
- newctx, newctx->cc_flags);
- } else {
- /*
- * it's possible newctx == oldctx if we're switching
- * subflavor with the same sec.
- */
- rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
- if (rc) {
- /* restore old ctx */
- sptlrpc_req_put_ctx(req, 0);
- req->rq_cli_ctx = oldctx;
- return rc;
- }
-
- LASSERT(req->rq_cli_ctx == newctx);
- }
-
- sptlrpc_cli_ctx_put(oldctx, 1);
- return 0;
-}
-
-static
-int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
-{
- if (cli_ctx_is_refreshed(ctx))
- return 1;
- return 0;
-}
-
-static
-int ctx_refresh_timeout(struct ptlrpc_request *req)
-{
- int rc;
-
- /* conn_cnt is needed in expire_one_request */
- lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
-
- rc = ptlrpc_expire_one_request(req, 1);
- /* if we started recovery, we should mark this ctx dead; otherwise
- * in case of lgssd died nobody would retire this ctx, following
- * connecting will still find the same ctx thus cause deadlock.
- * there's an assumption that expire time of the request should be
- * later than the context refresh expire time.
- */
- if (rc == 0)
- req->rq_cli_ctx->cc_ops->force_die(req->rq_cli_ctx, 0);
- return rc;
-}
-
-static
-void ctx_refresh_interrupt(struct ptlrpc_request *req)
-{
- spin_lock(&req->rq_lock);
- req->rq_intr = 1;
- spin_unlock(&req->rq_lock);
-}
-
-static
-void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
-{
- spin_lock(&ctx->cc_lock);
- if (!list_empty(&req->rq_ctx_chain))
- list_del_init(&req->rq_ctx_chain);
- spin_unlock(&ctx->cc_lock);
-}
-
-/**
- * To refresh the context of \req, if it's not up-to-date.
- * \param timeout
- * - < 0: don't wait
- * - = 0: wait until success or fatal error occur
- * - > 0: timeout value (in seconds)
- *
- * The status of the context could be subject to be changed by other threads
- * at any time. We allow this race, but once we return with 0, the caller will
- * suppose it's uptodated and keep using it until the owning rpc is done.
- *
- * \retval 0 only if the context is uptodated.
- * \retval -ev error number.
- */
-int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec *sec;
- int rc;
-
- LASSERT(ctx);
-
- if (req->rq_ctx_init || req->rq_ctx_fini)
- return 0;
-
- /*
- * during the process a request's context might change type even
- * (e.g. from gss ctx to null ctx), so each loop we need to re-check
- * everything
- */
-again:
- rc = import_sec_validate_get(req->rq_import, &sec);
- if (rc)
- return rc;
-
- if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
- CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
- req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
- req_off_ctx_list(req, ctx);
- sptlrpc_req_replace_dead_ctx(req);
- ctx = req->rq_cli_ctx;
- }
- sptlrpc_sec_put(sec);
-
- if (cli_ctx_is_eternal(ctx))
- return 0;
-
- if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
- LASSERT(ctx->cc_ops->refresh);
- ctx->cc_ops->refresh(ctx);
- }
- LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
-
- LASSERT(ctx->cc_ops->validate);
- if (ctx->cc_ops->validate(ctx) == 0) {
- req_off_ctx_list(req, ctx);
- return 0;
- }
-
- if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
- spin_lock(&req->rq_lock);
- req->rq_err = 1;
- spin_unlock(&req->rq_lock);
- req_off_ctx_list(req, ctx);
- return -EPERM;
- }
-
- /*
- * There's a subtle issue for resending RPCs, suppose following
- * situation:
- * 1. the request was sent to server.
- * 2. recovery was kicked start, after finished the request was
- * marked as resent.
- * 3. resend the request.
- * 4. old reply from server received, we accept and verify the reply.
- * this has to be success, otherwise the error will be aware
- * by application.
- * 5. new reply from server received, dropped by LNet.
- *
- * Note the xid of old & new request is the same. We can't simply
- * change xid for the resent request because the server replies on
- * it for reply reconstruction.
- *
- * Commonly the original context should be uptodate because we
- * have a expiry nice time; server will keep its context because
- * we at least hold a ref of old context which prevent context
- * destroying RPC being sent. So server still can accept the request
- * and finish the RPC. But if that's not the case:
- * 1. If server side context has been trimmed, a NO_CONTEXT will
- * be returned, gss_cli_ctx_verify/unseal will switch to new
- * context by force.
- * 2. Current context never be refreshed, then we are fine: we
- * never really send request with old context before.
- */
- if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
- unlikely(req->rq_reqmsg) &&
- lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
- req_off_ctx_list(req, ctx);
- return 0;
- }
-
- if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
- req_off_ctx_list(req, ctx);
- /*
- * don't switch ctx if import was deactivated
- */
- if (req->rq_import->imp_deactive) {
- spin_lock(&req->rq_lock);
- req->rq_err = 1;
- spin_unlock(&req->rq_lock);
- return -EINTR;
- }
-
- rc = sptlrpc_req_replace_dead_ctx(req);
- if (rc) {
- LASSERT(ctx == req->rq_cli_ctx);
- CERROR("req %p: failed to replace dead ctx %p: %d\n",
- req, ctx, rc);
- spin_lock(&req->rq_lock);
- req->rq_err = 1;
- spin_unlock(&req->rq_lock);
- return rc;
- }
-
- ctx = req->rq_cli_ctx;
- goto again;
- }
-
- /*
- * Now we're sure this context is during upcall, add myself into
- * waiting list
- */
- spin_lock(&ctx->cc_lock);
- if (list_empty(&req->rq_ctx_chain))
- list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
- spin_unlock(&ctx->cc_lock);
-
- if (timeout < 0)
- return -EWOULDBLOCK;
-
- /* Clear any flags that may be present from previous sends */
- LASSERT(req->rq_receiving_reply == 0);
- spin_lock(&req->rq_lock);
- req->rq_err = 0;
- req->rq_timedout = 0;
- req->rq_resend = 0;
- req->rq_restart = 0;
- spin_unlock(&req->rq_lock);
-
- rc = wait_event_idle_timeout(req->rq_reply_waitq,
- ctx_check_refresh(ctx),
- timeout * HZ);
- if (rc == 0 && ctx_refresh_timeout(req) == 0) {
- /* Keep waiting, but enable some signals */
- rc = l_wait_event_abortable(req->rq_reply_waitq,
- ctx_check_refresh(ctx));
- if (rc == 0)
- rc = 1;
- }
-
- if (rc > 0)
- /* condition is true */
- rc = 0;
- else if (rc == 0)
- /* Timed out */
- rc = -ETIMEDOUT;
- else {
- /* Aborted by signal */
- rc = -EINTR;
- ctx_refresh_interrupt(req);
- }
-
- /*
- * following cases could lead us here:
- * - successfully refreshed;
- * - interrupted;
- * - timedout, and we don't want recover from the failure;
- * - timedout, and waked up upon recovery finished;
- * - someone else mark this ctx dead by force;
- * - someone invalidate the req and call ptlrpc_client_wake_req(),
- * e.g. ptlrpc_abort_inflight();
- */
- if (!cli_ctx_is_refreshed(ctx)) {
- /* timed out or interrupted */
- req_off_ctx_list(req, ctx);
-
- LASSERT(rc != 0);
- return rc;
- }
-
- goto again;
-}
-
-/**
- * Initialize flavor settings for \a req, according to \a opcode.
- *
- * \note this could be called in two situations:
- * - new request from ptlrpc_pre_req(), with proper @opcode
- * - old request which changed ctx in the middle, with @opcode == 0
- */
-void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
-{
- struct ptlrpc_sec *sec;
-
- LASSERT(req->rq_import);
- LASSERT(req->rq_cli_ctx);
- LASSERT(req->rq_cli_ctx->cc_sec);
- LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
-
- /* special security flags according to opcode */
- switch (opcode) {
- case OST_READ:
- case MDS_READPAGE:
- case MGS_CONFIG_READ:
- case OBD_IDX_READ:
- req->rq_bulk_read = 1;
- break;
- case OST_WRITE:
- case MDS_WRITEPAGE:
- req->rq_bulk_write = 1;
- break;
- case SEC_CTX_INIT:
- req->rq_ctx_init = 1;
- break;
- case SEC_CTX_FINI:
- req->rq_ctx_fini = 1;
- break;
- case 0:
- /* init/fini rpc won't be resend, so can't be here */
- LASSERT(req->rq_ctx_init == 0);
- LASSERT(req->rq_ctx_fini == 0);
-
- /* cleanup flags, which should be recalculated */
- req->rq_pack_udesc = 0;
- req->rq_pack_bulk = 0;
- break;
- }
-
- sec = req->rq_cli_ctx->cc_sec;
-
- spin_lock(&sec->ps_lock);
- req->rq_flvr = sec->ps_flvr;
- spin_unlock(&sec->ps_lock);
-
- /* force SVC_NULL for context initiation rpc, SVC_INTG for context
- * destruction rpc
- */
- if (unlikely(req->rq_ctx_init))
- flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
- else if (unlikely(req->rq_ctx_fini))
- flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
-
- /* user descriptor flag, null security can't do it anyway */
- if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
- (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
- req->rq_pack_udesc = 1;
-
- /* bulk security flag */
- if ((req->rq_bulk_read || req->rq_bulk_write) &&
- sptlrpc_flavor_has_bulk(&req->rq_flvr))
- req->rq_pack_bulk = 1;
-}
-
-void sptlrpc_request_out_callback(struct ptlrpc_request *req)
-{
- if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
- return;
-
- LASSERT(req->rq_clrbuf);
- if (req->rq_pool || !req->rq_reqbuf)
- return;
-
- kvfree(req->rq_reqbuf);
- req->rq_reqbuf = NULL;
- req->rq_reqbuf_len = 0;
-}
-
-/**
- * Given an import \a imp, check whether current user has a valid context
- * or not. We may create a new context and try to refresh it, and try
- * repeatedly try in case of non-fatal errors. Return 0 means success.
- */
-int sptlrpc_import_check_ctx(struct obd_import *imp)
-{
- struct ptlrpc_sec *sec;
- struct ptlrpc_cli_ctx *ctx;
- struct ptlrpc_request *req = NULL;
- int rc;
-
- might_sleep();
-
- sec = sptlrpc_import_sec_ref(imp);
- ctx = get_my_ctx(sec);
- sptlrpc_sec_put(sec);
-
- if (!ctx)
- return -ENOMEM;
-
- if (cli_ctx_is_eternal(ctx) ||
- ctx->cc_ops->validate(ctx) == 0) {
- sptlrpc_cli_ctx_put(ctx, 1);
- return 0;
- }
-
- if (cli_ctx_is_error(ctx)) {
- sptlrpc_cli_ctx_put(ctx, 1);
- return -EACCES;
- }
-
- req = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (!req)
- return -ENOMEM;
-
- ptlrpc_cli_req_init(req);
- atomic_set(&req->rq_refcount, 10000);
-
- req->rq_import = imp;
- req->rq_flvr = sec->ps_flvr;
- req->rq_cli_ctx = ctx;
-
- rc = sptlrpc_req_refresh_ctx(req, 0);
- LASSERT(list_empty(&req->rq_ctx_chain));
- sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
- ptlrpc_request_cache_free(req);
-
- return rc;
-}
-
-/**
- * Used by ptlrpc client, to perform the pre-defined security transformation
- * upon the request message of \a req. After this function called,
- * req->rq_reqmsg is still accessible as clear text.
- */
-int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- int rc = 0;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(req->rq_reqbuf || req->rq_clrbuf);
-
- /* we wrap bulk request here because now we can be sure
- * the context is uptodate.
- */
- if (req->rq_bulk) {
- rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
- if (rc)
- return rc;
- }
-
- switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
- case SPTLRPC_SVC_NULL:
- case SPTLRPC_SVC_AUTH:
- case SPTLRPC_SVC_INTG:
- LASSERT(ctx->cc_ops->sign);
- rc = ctx->cc_ops->sign(ctx, req);
- break;
- case SPTLRPC_SVC_PRIV:
- LASSERT(ctx->cc_ops->seal);
- rc = ctx->cc_ops->seal(ctx, req);
- break;
- default:
- LBUG();
- }
-
- if (rc == 0) {
- LASSERT(req->rq_reqdata_len);
- LASSERT(req->rq_reqdata_len % 8 == 0);
- LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
- }
-
- return rc;
-}
-
-static int do_cli_unwrap_reply(struct ptlrpc_request *req)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- int rc;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(req->rq_repbuf);
- LASSERT(req->rq_repdata);
- LASSERT(!req->rq_repmsg);
-
- req->rq_rep_swab_mask = 0;
-
- rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
- switch (rc) {
- case 1:
- lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
- case 0:
- break;
- default:
- CERROR("failed unpack reply: x%llu\n", req->rq_xid);
- return -EPROTO;
- }
-
- if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
- CERROR("replied data length %d too small\n",
- req->rq_repdata_len);
- return -EPROTO;
- }
-
- if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
- SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
- CERROR("reply policy %u doesn't match request policy %u\n",
- SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
- SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
- return -EPROTO;
- }
-
- switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
- case SPTLRPC_SVC_NULL:
- case SPTLRPC_SVC_AUTH:
- case SPTLRPC_SVC_INTG:
- LASSERT(ctx->cc_ops->verify);
- rc = ctx->cc_ops->verify(ctx, req);
- break;
- case SPTLRPC_SVC_PRIV:
- LASSERT(ctx->cc_ops->unseal);
- rc = ctx->cc_ops->unseal(ctx, req);
- break;
- default:
- LBUG();
- }
- LASSERT(rc || req->rq_repmsg || req->rq_resend);
-
- if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
- !req->rq_ctx_init)
- req->rq_rep_swab_mask = 0;
- return rc;
-}
-
-/**
- * Used by ptlrpc client, to perform security transformation upon the reply
- * message of \a req. After return successfully, req->rq_repmsg points to
- * the reply message in clear text.
- *
- * \pre the reply buffer should have been un-posted from LNet, so nothing is
- * going to change.
- */
-int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
-{
- LASSERT(req->rq_repbuf);
- LASSERT(!req->rq_repdata);
- LASSERT(!req->rq_repmsg);
- LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
-
- if (req->rq_reply_off == 0 &&
- (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
- CERROR("real reply with offset 0\n");
- return -EPROTO;
- }
-
- if (req->rq_reply_off % 8 != 0) {
- CERROR("reply at odd offset %u\n", req->rq_reply_off);
- return -EPROTO;
- }
-
- req->rq_repdata = (struct lustre_msg *)
- (req->rq_repbuf + req->rq_reply_off);
- req->rq_repdata_len = req->rq_nob_received;
-
- return do_cli_unwrap_reply(req);
-}
-
-/**
- * Used by ptlrpc client, to perform security transformation upon the early
- * reply message of \a req. We expect the rq_reply_off is 0, and
- * rq_nob_received is the early reply size.
- *
- * Because the receive buffer might be still posted, the reply data might be
- * changed at any time, no matter we're holding rq_lock or not. For this reason
- * we allocate a separate ptlrpc_request and reply buffer for early reply
- * processing.
- *
- * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
- * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
- * \a *req_ret to release it.
- * \retval -ev error number, and \a req_ret will not be set.
- */
-int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
- struct ptlrpc_request **req_ret)
-{
- struct ptlrpc_request *early_req;
- char *early_buf;
- int early_bufsz, early_size;
- int rc;
-
- early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (!early_req)
- return -ENOMEM;
-
- ptlrpc_cli_req_init(early_req);
-
- early_size = req->rq_nob_received;
- early_bufsz = size_roundup_power2(early_size);
- early_buf = kvzalloc(early_bufsz, GFP_NOFS);
- if (!early_buf) {
- rc = -ENOMEM;
- goto err_req;
- }
-
- /* sanity checkings and copy data out, do it inside spinlock */
- spin_lock(&req->rq_lock);
-
- if (req->rq_replied) {
- spin_unlock(&req->rq_lock);
- rc = -EALREADY;
- goto err_buf;
- }
-
- LASSERT(req->rq_repbuf);
- LASSERT(!req->rq_repdata);
- LASSERT(!req->rq_repmsg);
-
- if (req->rq_reply_off != 0) {
- CERROR("early reply with offset %u\n", req->rq_reply_off);
- spin_unlock(&req->rq_lock);
- rc = -EPROTO;
- goto err_buf;
- }
-
- if (req->rq_nob_received != early_size) {
- /* even another early arrived the size should be the same */
- CERROR("data size has changed from %u to %u\n",
- early_size, req->rq_nob_received);
- spin_unlock(&req->rq_lock);
- rc = -EINVAL;
- goto err_buf;
- }
-
- if (req->rq_nob_received < sizeof(struct lustre_msg)) {
- CERROR("early reply length %d too small\n",
- req->rq_nob_received);
- spin_unlock(&req->rq_lock);
- rc = -EALREADY;
- goto err_buf;
- }
-
- memcpy(early_buf, req->rq_repbuf, early_size);
- spin_unlock(&req->rq_lock);
-
- early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
- early_req->rq_flvr = req->rq_flvr;
- early_req->rq_repbuf = early_buf;
- early_req->rq_repbuf_len = early_bufsz;
- early_req->rq_repdata = (struct lustre_msg *)early_buf;
- early_req->rq_repdata_len = early_size;
- early_req->rq_early = 1;
- early_req->rq_reqmsg = req->rq_reqmsg;
-
- rc = do_cli_unwrap_reply(early_req);
- if (rc) {
- DEBUG_REQ(D_ADAPTTO, early_req,
- "error %d unwrap early reply", rc);
- goto err_ctx;
- }
-
- LASSERT(early_req->rq_repmsg);
- *req_ret = early_req;
- return 0;
-
-err_ctx:
- sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
-err_buf:
- kvfree(early_buf);
-err_req:
- ptlrpc_request_cache_free(early_req);
- return rc;
-}
-
-/**
- * Used by ptlrpc client, to release a processed early reply \a early_req.
- *
- * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
- */
-void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
-{
- LASSERT(early_req->rq_repbuf);
- LASSERT(early_req->rq_repdata);
- LASSERT(early_req->rq_repmsg);
-
- sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
- kvfree(early_req->rq_repbuf);
- ptlrpc_request_cache_free(early_req);
-}
-
-/**************************************************
- * sec ID *
- **************************************************/
-
-/*
- * "fixed" sec (e.g. null) use sec_id < 0
- */
-static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
-
-int sptlrpc_get_next_secid(void)
-{
- return atomic_inc_return(&sptlrpc_sec_id);
-}
-EXPORT_SYMBOL(sptlrpc_get_next_secid);
-
-/**************************************************
- * client side high-level security APIs *
- **************************************************/
-
-static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
- int grace, int force)
-{
- struct ptlrpc_sec_policy *policy = sec->ps_policy;
-
- LASSERT(policy->sp_cops);
- LASSERT(policy->sp_cops->flush_ctx_cache);
-
- return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
-}
-
-static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
-{
- struct ptlrpc_sec_policy *policy = sec->ps_policy;
-
- LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
- LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
- LASSERT(policy->sp_cops->destroy_sec);
-
- CDEBUG(D_SEC, "%s@%p: being destroyed\n", sec->ps_policy->sp_name, sec);
-
- policy->sp_cops->destroy_sec(sec);
- sptlrpc_policy_put(policy);
-}
-
-static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
-{
- LASSERT_ATOMIC_POS(&sec->ps_refcount);
-
- if (sec->ps_policy->sp_cops->kill_sec) {
- sec->ps_policy->sp_cops->kill_sec(sec);
-
- sec_cop_flush_ctx_cache(sec, -1, 1, 1);
- }
-}
-
-static struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
-{
- if (sec)
- atomic_inc(&sec->ps_refcount);
-
- return sec;
-}
-
-void sptlrpc_sec_put(struct ptlrpc_sec *sec)
-{
- if (sec) {
- LASSERT_ATOMIC_POS(&sec->ps_refcount);
-
- if (atomic_dec_and_test(&sec->ps_refcount)) {
- sptlrpc_gc_del_sec(sec);
- sec_cop_destroy_sec(sec);
- }
- }
-}
-EXPORT_SYMBOL(sptlrpc_sec_put);
-
-/*
- * policy module is responsible for taking reference of import
- */
-static
-struct ptlrpc_sec *sptlrpc_sec_create(struct obd_import *imp,
- struct ptlrpc_svc_ctx *svc_ctx,
- struct sptlrpc_flavor *sf,
- enum lustre_sec_part sp)
-{
- struct ptlrpc_sec_policy *policy;
- struct ptlrpc_sec *sec;
- char str[32];
-
- if (svc_ctx) {
- LASSERT(imp->imp_dlm_fake == 1);
-
- CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
- imp->imp_obd->obd_type->typ_name,
- imp->imp_obd->obd_name,
- sptlrpc_flavor2name(sf, str, sizeof(str)));
-
- policy = sptlrpc_policy_get(svc_ctx->sc_policy);
- sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
- } else {
- LASSERT(imp->imp_dlm_fake == 0);
-
- CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
- imp->imp_obd->obd_type->typ_name,
- imp->imp_obd->obd_name,
- sptlrpc_flavor2name(sf, str, sizeof(str)));
-
- policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
- if (!policy) {
- CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
- return NULL;
- }
- }
-
- sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
- if (sec) {
- atomic_inc(&sec->ps_refcount);
-
- sec->ps_part = sp;
-
- if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
- sptlrpc_gc_add_sec(sec);
- } else {
- sptlrpc_policy_put(policy);
- }
-
- return sec;
-}
-
-struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
-{
- struct ptlrpc_sec *sec;
-
- spin_lock(&imp->imp_lock);
- sec = sptlrpc_sec_get(imp->imp_sec);
- spin_unlock(&imp->imp_lock);
-
- return sec;
-}
-EXPORT_SYMBOL(sptlrpc_import_sec_ref);
-
-static void sptlrpc_import_sec_install(struct obd_import *imp,
- struct ptlrpc_sec *sec)
-{
- struct ptlrpc_sec *old_sec;
-
- LASSERT_ATOMIC_POS(&sec->ps_refcount);
-
- spin_lock(&imp->imp_lock);
- old_sec = imp->imp_sec;
- imp->imp_sec = sec;
- spin_unlock(&imp->imp_lock);
-
- if (old_sec) {
- sptlrpc_sec_kill(old_sec);
-
- /* balance the ref taken by this import */
- sptlrpc_sec_put(old_sec);
- }
-}
-
-static inline
-int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
-{
- return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
-}
-
-static inline
-void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
-{
- *dst = *src;
-}
-
-static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
- struct ptlrpc_sec *sec,
- struct sptlrpc_flavor *sf)
-{
- char str1[32], str2[32];
-
- if (sec->ps_flvr.sf_flags != sf->sf_flags)
- CDEBUG(D_SEC, "changing sec flags: %s -> %s\n",
- sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
- str1, sizeof(str1)),
- sptlrpc_secflags2str(sf->sf_flags,
- str2, sizeof(str2)));
-
- spin_lock(&sec->ps_lock);
- flavor_copy(&sec->ps_flvr, sf);
- spin_unlock(&sec->ps_lock);
-}
-
-/**
- * To get an appropriate ptlrpc_sec for the \a imp, according to the current
- * configuration. Upon called, imp->imp_sec may or may not be NULL.
- *
- * - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
- * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
- */
-int sptlrpc_import_sec_adapt(struct obd_import *imp,
- struct ptlrpc_svc_ctx *svc_ctx,
- struct sptlrpc_flavor *flvr)
-{
- struct ptlrpc_connection *conn;
- struct sptlrpc_flavor sf;
- struct ptlrpc_sec *sec, *newsec;
- enum lustre_sec_part sp;
- char str[24];
- int rc = 0;
-
- might_sleep();
-
- if (!imp)
- return 0;
-
- conn = imp->imp_connection;
-
- if (!svc_ctx) {
- struct client_obd *cliobd = &imp->imp_obd->u.cli;
- /*
- * normal import, determine flavor from rule set, except
- * for mgc the flavor is predetermined.
- */
- if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
- sf = cliobd->cl_flvr_mgc;
- else
- sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
- cliobd->cl_sp_to,
- &cliobd->cl_target_uuid,
- conn->c_self, &sf);
-
- sp = imp->imp_obd->u.cli.cl_sp_me;
- } else {
- /* reverse import, determine flavor from incoming request */
- sf = *flvr;
-
- if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
- sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
- PTLRPC_SEC_FL_ROOTONLY;
-
- sp = sptlrpc_target_sec_part(imp->imp_obd);
- }
-
- sec = sptlrpc_import_sec_ref(imp);
- if (sec) {
- char str2[24];
-
- if (flavor_equal(&sf, &sec->ps_flvr))
- goto out;
-
- CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
- imp->imp_obd->obd_name,
- obd_uuid2str(&conn->c_remote_uuid),
- sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
- sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
-
- if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
- SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
- SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
- SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
- sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
- goto out;
- }
- } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
- SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
- CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
- imp->imp_obd->obd_name,
- obd_uuid2str(&conn->c_remote_uuid),
- LNET_NIDNET(conn->c_self),
- sptlrpc_flavor2name(&sf, str, sizeof(str)));
- }
-
- mutex_lock(&imp->imp_sec_mutex);
-
- newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
- if (newsec) {
- sptlrpc_import_sec_install(imp, newsec);
- } else {
- CERROR("import %s->%s: failed to create new sec\n",
- imp->imp_obd->obd_name,
- obd_uuid2str(&conn->c_remote_uuid));
- rc = -EPERM;
- }
-
- mutex_unlock(&imp->imp_sec_mutex);
-out:
- sptlrpc_sec_put(sec);
- return rc;
-}
-
-void sptlrpc_import_sec_put(struct obd_import *imp)
-{
- if (imp->imp_sec) {
- sptlrpc_sec_kill(imp->imp_sec);
-
- sptlrpc_sec_put(imp->imp_sec);
- imp->imp_sec = NULL;
- }
-}
-
-static void import_flush_ctx_common(struct obd_import *imp,
- uid_t uid, int grace, int force)
-{
- struct ptlrpc_sec *sec;
-
- if (!imp)
- return;
-
- sec = sptlrpc_import_sec_ref(imp);
- if (!sec)
- return;
-
- sec_cop_flush_ctx_cache(sec, uid, grace, force);
- sptlrpc_sec_put(sec);
-}
-
-void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
-{
- import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
- 1, 1);
-}
-EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
-
-void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
-{
- import_flush_ctx_common(imp, -1, 1, 1);
-}
-EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
-
-/**
- * Used by ptlrpc client to allocate request buffer of \a req. Upon return
- * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
- */
-int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec_policy *policy;
- int rc;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(ctx->cc_sec->ps_policy);
- LASSERT(!req->rq_reqmsg);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
-
- policy = ctx->cc_sec->ps_policy;
- rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
- if (!rc) {
- LASSERT(req->rq_reqmsg);
- LASSERT(req->rq_reqbuf || req->rq_clrbuf);
-
- /* zeroing preallocated buffer */
- if (req->rq_pool)
- memset(req->rq_reqmsg, 0, msgsize);
- }
-
- return rc;
-}
-
-/**
- * Used by ptlrpc client to free request buffer of \a req. After this
- * req->rq_reqmsg is set to NULL and should not be accessed anymore.
- */
-void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec_policy *policy;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(ctx->cc_sec->ps_policy);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
-
- if (!req->rq_reqbuf && !req->rq_clrbuf)
- return;
-
- policy = ctx->cc_sec->ps_policy;
- policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
- req->rq_reqmsg = NULL;
-}
-
-/*
- * NOTE caller must guarantee the buffer size is enough for the enlargement
- */
-void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
- int segment, int newsize)
-{
- void *src, *dst;
- int oldsize, oldmsg_size, movesize;
-
- LASSERT(segment < msg->lm_bufcount);
- LASSERT(msg->lm_buflens[segment] <= newsize);
-
- if (msg->lm_buflens[segment] == newsize)
- return;
-
- /* nothing to do if we are enlarging the last segment */
- if (segment == msg->lm_bufcount - 1) {
- msg->lm_buflens[segment] = newsize;
- return;
- }
-
- oldsize = msg->lm_buflens[segment];
-
- src = lustre_msg_buf(msg, segment + 1, 0);
- msg->lm_buflens[segment] = newsize;
- dst = lustre_msg_buf(msg, segment + 1, 0);
- msg->lm_buflens[segment] = oldsize;
-
- /* move from segment + 1 to end segment */
- LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
- oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
- movesize = oldmsg_size - ((unsigned long)src - (unsigned long)msg);
- LASSERT(movesize >= 0);
-
- if (movesize)
- memmove(dst, src, movesize);
-
- /* note we don't clear the ares where old data live, not secret */
-
- /* finally set new segment size */
- msg->lm_buflens[segment] = newsize;
-}
-EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
-
-/**
- * Used by ptlrpc client to enlarge the \a segment of request message pointed
- * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
- * preserved after the enlargement. this must be called after original request
- * buffer being allocated.
- *
- * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
- * so caller should refresh its local pointers if needed.
- */
-int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
- int segment, int newsize)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec_cops *cops;
- struct lustre_msg *msg = req->rq_reqmsg;
-
- LASSERT(ctx);
- LASSERT(msg);
- LASSERT(msg->lm_bufcount > segment);
- LASSERT(msg->lm_buflens[segment] <= newsize);
-
- if (msg->lm_buflens[segment] == newsize)
- return 0;
-
- cops = ctx->cc_sec->ps_policy->sp_cops;
- LASSERT(cops->enlarge_reqbuf);
- return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
-}
-EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
-
-/**
- * Used by ptlrpc client to allocate reply buffer of \a req.
- *
- * \note After this, req->rq_repmsg is still not accessible.
- */
-int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec_policy *policy;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(ctx->cc_sec->ps_policy);
-
- if (req->rq_repbuf)
- return 0;
-
- policy = ctx->cc_sec->ps_policy;
- return policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize);
-}
-
-/**
- * Used by ptlrpc client to free reply buffer of \a req. After this
- * req->rq_repmsg is set to NULL and should not be accessed anymore.
- */
-void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec_policy *policy;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(ctx->cc_sec->ps_policy);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
-
- if (!req->rq_repbuf)
- return;
- LASSERT(req->rq_repbuf_len);
-
- policy = ctx->cc_sec->ps_policy;
- policy->sp_cops->free_repbuf(ctx->cc_sec, req);
- req->rq_repmsg = NULL;
-}
-
-static int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
- struct ptlrpc_svc_ctx *ctx)
-{
- struct ptlrpc_sec_policy *policy = ctx->sc_policy;
-
- if (!policy->sp_sops->install_rctx)
- return 0;
- return policy->sp_sops->install_rctx(imp, ctx);
-}
-
-/****************************************
- * server side security *
- ****************************************/
-
-static int flavor_allowed(struct sptlrpc_flavor *exp,
- struct ptlrpc_request *req)
-{
- struct sptlrpc_flavor *flvr = &req->rq_flvr;
-
- if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
- return 1;
-
- if ((req->rq_ctx_init || req->rq_ctx_fini) &&
- SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
- SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
- SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
- return 1;
-
- return 0;
-}
-
-#define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10)
-
-/**
- * Given an export \a exp, check whether the flavor of incoming \a req
- * is allowed by the export \a exp. Main logic is about taking care of
- * changing configurations. Return 0 means success.
- */
-int sptlrpc_target_export_check(struct obd_export *exp,
- struct ptlrpc_request *req)
-{
- struct sptlrpc_flavor flavor;
-
- if (!exp)
- return 0;
-
- /* client side export has no imp_reverse, skip
- * FIXME maybe we should check flavor this as well???
- */
- if (!exp->exp_imp_reverse)
- return 0;
-
- /* don't care about ctx fini rpc */
- if (req->rq_ctx_fini)
- return 0;
-
- spin_lock(&exp->exp_lock);
-
- /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
- * the first req with the new flavor, then treat it as current flavor,
- * adapt reverse sec according to it.
- * note the first rpc with new flavor might not be with root ctx, in
- * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
- */
- if (unlikely(exp->exp_flvr_changed) &&
- flavor_allowed(&exp->exp_flvr_old[1], req)) {
- /* make the new flavor as "current", and old ones as
- * about-to-expire
- */
- CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
- exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
- flavor = exp->exp_flvr_old[1];
- exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
- exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
- exp->exp_flvr_old[0] = exp->exp_flvr;
- exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
- EXP_FLVR_UPDATE_EXPIRE;
- exp->exp_flvr = flavor;
-
- /* flavor change finished */
- exp->exp_flvr_changed = 0;
- LASSERT(exp->exp_flvr_adapt == 1);
-
- /* if it's gss, we only interested in root ctx init */
- if (req->rq_auth_gss &&
- !(req->rq_ctx_init &&
- (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
- req->rq_auth_usr_ost))) {
- spin_unlock(&exp->exp_lock);
- CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
- req->rq_auth_gss, req->rq_ctx_init,
- req->rq_auth_usr_root, req->rq_auth_usr_mdt,
- req->rq_auth_usr_ost);
- return 0;
- }
-
- exp->exp_flvr_adapt = 0;
- spin_unlock(&exp->exp_lock);
-
- return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
- req->rq_svc_ctx, &flavor);
- }
-
- /* if it equals to the current flavor, we accept it, but need to
- * dealing with reverse sec/ctx
- */
- if (likely(flavor_allowed(&exp->exp_flvr, req))) {
- /* most cases should return here, we only interested in
- * gss root ctx init
- */
- if (!req->rq_auth_gss || !req->rq_ctx_init ||
- (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
- !req->rq_auth_usr_ost)) {
- spin_unlock(&exp->exp_lock);
- return 0;
- }
-
- /* if flavor just changed, we should not proceed, just leave
- * it and current flavor will be discovered and replaced
- * shortly, and let _this_ rpc pass through
- */
- if (exp->exp_flvr_changed) {
- LASSERT(exp->exp_flvr_adapt);
- spin_unlock(&exp->exp_lock);
- return 0;
- }
-
- if (exp->exp_flvr_adapt) {
- exp->exp_flvr_adapt = 0;
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
- exp, exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc);
- flavor = exp->exp_flvr;
- spin_unlock(&exp->exp_lock);
-
- return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
- req->rq_svc_ctx,
- &flavor);
- } else {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
- exp, exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc);
- spin_unlock(&exp->exp_lock);
-
- return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
- req->rq_svc_ctx);
- }
- }
-
- if (exp->exp_flvr_expire[0]) {
- if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
- if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the middle one (%lld)\n", exp,
- exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc,
- (s64)(exp->exp_flvr_expire[0] -
- ktime_get_real_seconds()));
- spin_unlock(&exp->exp_lock);
- return 0;
- }
- } else {
- CDEBUG(D_SEC, "mark middle expired\n");
- exp->exp_flvr_expire[0] = 0;
- }
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
- exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
- req->rq_flvr.sf_rpc);
- }
-
- /* now it doesn't match the current flavor, the only chance we can
- * accept it is match the old flavors which is not expired.
- */
- if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
- if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
- if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
- exp,
- exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc,
- (s64)(exp->exp_flvr_expire[1] -
- ktime_get_real_seconds()));
- spin_unlock(&exp->exp_lock);
- return 0;
- }
- } else {
- CDEBUG(D_SEC, "mark oldest expired\n");
- exp->exp_flvr_expire[1] = 0;
- }
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
- exp, exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
- req->rq_flvr.sf_rpc);
- } else {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
- exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc);
- }
-
- spin_unlock(&exp->exp_lock);
-
- CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
- exp, exp->exp_obd->obd_name,
- req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
- req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
- req->rq_flvr.sf_rpc,
- exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_expire[0] ?
- (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
- exp->exp_flvr_old[1].sf_rpc,
- exp->exp_flvr_expire[1] ?
- (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
- return -EACCES;
-}
-EXPORT_SYMBOL(sptlrpc_target_export_check);
-
-static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
-{
- /* peer's claim is unreliable unless gss is being used */
- if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
- return svc_rc;
-
- switch (req->rq_sp_from) {
- case LUSTRE_SP_CLI:
- if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
- DEBUG_REQ(D_ERROR, req, "faked source CLI");
- svc_rc = SECSVC_DROP;
- }
- break;
- case LUSTRE_SP_MDT:
- if (!req->rq_auth_usr_mdt) {
- DEBUG_REQ(D_ERROR, req, "faked source MDT");
- svc_rc = SECSVC_DROP;
- }
- break;
- case LUSTRE_SP_OST:
- if (!req->rq_auth_usr_ost) {
- DEBUG_REQ(D_ERROR, req, "faked source OST");
- svc_rc = SECSVC_DROP;
- }
- break;
- case LUSTRE_SP_MGS:
- case LUSTRE_SP_MGC:
- if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
- !req->rq_auth_usr_ost) {
- DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
- svc_rc = SECSVC_DROP;
- }
- break;
- case LUSTRE_SP_ANY:
- default:
- DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
- svc_rc = SECSVC_DROP;
- }
-
- return svc_rc;
-}
-
-/**
- * Used by ptlrpc server, to perform transformation upon request message of
- * incoming \a req. This must be the first thing to do with a incoming
- * request in ptlrpc layer.
- *
- * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
- * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
- * \retval SECSVC_COMPLETE success, the request has been fully processed, and
- * reply message has been prepared.
- * \retval SECSVC_DROP failed, this request should be dropped.
- */
-int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
-{
- struct ptlrpc_sec_policy *policy;
- struct lustre_msg *msg = req->rq_reqbuf;
- int rc;
-
- LASSERT(msg);
- LASSERT(!req->rq_reqmsg);
- LASSERT(!req->rq_repmsg);
- LASSERT(!req->rq_svc_ctx);
-
- req->rq_req_swab_mask = 0;
-
- rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
- switch (rc) {
- case 1:
- lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
- case 0:
- break;
- default:
- CERROR("error unpacking request from %s x%llu\n",
- libcfs_id2str(req->rq_peer), req->rq_xid);
- return SECSVC_DROP;
- }
-
- req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
- req->rq_sp_from = LUSTRE_SP_ANY;
- req->rq_auth_uid = -1;
- req->rq_auth_mapped_uid = -1;
-
- policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
- if (!policy) {
- CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
- return SECSVC_DROP;
- }
-
- LASSERT(policy->sp_sops->accept);
- rc = policy->sp_sops->accept(req);
- sptlrpc_policy_put(policy);
- LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
- LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
-
- /*
- * if it's not null flavor (which means embedded packing msg),
- * reset the swab mask for the coming inner msg unpacking.
- */
- if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
- req->rq_req_swab_mask = 0;
-
- /* sanity check for the request source */
- rc = sptlrpc_svc_check_from(req, rc);
- return rc;
-}
-
-/**
- * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
- * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
- * a buffer of \a msglen size.
- */
-int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
-{
- struct ptlrpc_sec_policy *policy;
- struct ptlrpc_reply_state *rs;
- int rc;
-
- LASSERT(req->rq_svc_ctx);
- LASSERT(req->rq_svc_ctx->sc_policy);
-
- policy = req->rq_svc_ctx->sc_policy;
- LASSERT(policy->sp_sops->alloc_rs);
-
- rc = policy->sp_sops->alloc_rs(req, msglen);
- if (unlikely(rc == -ENOMEM)) {
- struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
-
- if (svcpt->scp_service->srv_max_reply_size <
- msglen + sizeof(struct ptlrpc_reply_state)) {
- /* Just return failure if the size is too big */
- CERROR("size of message is too big (%zd), %d allowed\n",
- msglen + sizeof(struct ptlrpc_reply_state),
- svcpt->scp_service->srv_max_reply_size);
- return -ENOMEM;
- }
-
- /* failed alloc, try emergency pool */
- rs = lustre_get_emerg_rs(svcpt);
- if (!rs)
- return -ENOMEM;
-
- req->rq_reply_state = rs;
- rc = policy->sp_sops->alloc_rs(req, msglen);
- if (rc) {
- lustre_put_emerg_rs(rs);
- req->rq_reply_state = NULL;
- }
- }
-
- LASSERT(rc != 0 ||
- (req->rq_reply_state && req->rq_reply_state->rs_msg));
-
- return rc;
-}
-
-/**
- * Used by ptlrpc server, to perform transformation upon reply message.
- *
- * \post req->rq_reply_off is set to appropriate server-controlled reply offset.
- * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
- */
-int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
-{
- struct ptlrpc_sec_policy *policy;
- int rc;
-
- LASSERT(req->rq_svc_ctx);
- LASSERT(req->rq_svc_ctx->sc_policy);
-
- policy = req->rq_svc_ctx->sc_policy;
- LASSERT(policy->sp_sops->authorize);
-
- rc = policy->sp_sops->authorize(req);
- LASSERT(rc || req->rq_reply_state->rs_repdata_len);
-
- return rc;
-}
-
-/**
- * Used by ptlrpc server, to free reply_state.
- */
-void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
-{
- struct ptlrpc_sec_policy *policy;
- unsigned int prealloc;
-
- LASSERT(rs->rs_svc_ctx);
- LASSERT(rs->rs_svc_ctx->sc_policy);
-
- policy = rs->rs_svc_ctx->sc_policy;
- LASSERT(policy->sp_sops->free_rs);
-
- prealloc = rs->rs_prealloc;
- policy->sp_sops->free_rs(rs);
-
- if (prealloc)
- lustre_put_emerg_rs(rs);
-}
-
-void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
-{
- struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
-
- if (ctx)
- atomic_inc(&ctx->sc_refcount);
-}
-
-void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
-{
- struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
-
- if (!ctx)
- return;
-
- LASSERT_ATOMIC_POS(&ctx->sc_refcount);
- if (atomic_dec_and_test(&ctx->sc_refcount)) {
- if (ctx->sc_policy->sp_sops->free_ctx)
- ctx->sc_policy->sp_sops->free_ctx(ctx);
- }
- req->rq_svc_ctx = NULL;
-}
-
-/****************************************
- * bulk security *
- ****************************************/
-
-/**
- * Perform transformation upon bulk data pointed by \a desc. This is called
- * before transforming the request message.
- */
-int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_cli_ctx *ctx;
-
- LASSERT(req->rq_bulk_read || req->rq_bulk_write);
-
- if (!req->rq_pack_bulk)
- return 0;
-
- ctx = req->rq_cli_ctx;
- if (ctx->cc_ops->wrap_bulk)
- return ctx->cc_ops->wrap_bulk(ctx, req, desc);
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
-
-/**
- * This is called after unwrap the reply message.
- * return nob of actual plain text size received, or error code.
- */
-int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc,
- int nob)
-{
- struct ptlrpc_cli_ctx *ctx;
- int rc;
-
- LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
-
- if (!req->rq_pack_bulk)
- return desc->bd_nob_transferred;
-
- ctx = req->rq_cli_ctx;
- if (ctx->cc_ops->unwrap_bulk) {
- rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
- if (rc < 0)
- return rc;
- }
- return desc->bd_nob_transferred;
-}
-EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
-
-/**
- * This is called after unwrap the reply message.
- * return 0 for success or error code.
- */
-int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_cli_ctx *ctx;
- int rc;
-
- LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
-
- if (!req->rq_pack_bulk)
- return 0;
-
- ctx = req->rq_cli_ctx;
- if (ctx->cc_ops->unwrap_bulk) {
- rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
- if (rc < 0)
- return rc;
- }
-
- /*
- * if everything is going right, nob should equals to nob_transferred.
- * in case of privacy mode, nob_transferred needs to be adjusted.
- */
- if (desc->bd_nob != desc->bd_nob_transferred) {
- CERROR("nob %d doesn't match transferred nob %d\n",
- desc->bd_nob, desc->bd_nob_transferred);
- return -EPROTO;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
-
-/****************************************
- * user descriptor helpers *
- ****************************************/
-
-int sptlrpc_current_user_desc_size(void)
-{
- int ngroups;
-
- ngroups = current_ngroups;
-
- if (ngroups > LUSTRE_MAX_GROUPS)
- ngroups = LUSTRE_MAX_GROUPS;
- return sptlrpc_user_desc_size(ngroups);
-}
-EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
-
-int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
-{
- struct ptlrpc_user_desc *pud;
-
- pud = lustre_msg_buf(msg, offset, 0);
-
- if (!pud)
- return -EINVAL;
-
- pud->pud_uid = from_kuid(&init_user_ns, current_uid());
- pud->pud_gid = from_kgid(&init_user_ns, current_gid());
- pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
- pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
- pud->pud_cap = cfs_curproc_cap_pack();
- pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
-
- task_lock(current);
- if (pud->pud_ngroups > current_ngroups)
- pud->pud_ngroups = current_ngroups;
- memcpy(pud->pud_groups, current_cred()->group_info->gid,
- pud->pud_ngroups * sizeof(__u32));
- task_unlock(current);
-
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_pack_user_desc);
-
-int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
-{
- struct ptlrpc_user_desc *pud;
- int i;
-
- pud = lustre_msg_buf(msg, offset, sizeof(*pud));
- if (!pud)
- return -EINVAL;
-
- if (swabbed) {
- __swab32s(&pud->pud_uid);
- __swab32s(&pud->pud_gid);
- __swab32s(&pud->pud_fsuid);
- __swab32s(&pud->pud_fsgid);
- __swab32s(&pud->pud_cap);
- __swab32s(&pud->pud_ngroups);
- }
-
- if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
- CERROR("%u groups is too large\n", pud->pud_ngroups);
- return -EINVAL;
- }
-
- if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
- msg->lm_buflens[offset]) {
- CERROR("%u groups are claimed but bufsize only %u\n",
- pud->pud_ngroups, msg->lm_buflens[offset]);
- return -EINVAL;
- }
-
- if (swabbed) {
- for (i = 0; i < pud->pud_ngroups; i++)
- __swab32s(&pud->pud_groups[i]);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
-
-/****************************************
- * misc helpers *
- ****************************************/
-
-const char *sec2target_str(struct ptlrpc_sec *sec)
-{
- if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
- return "*";
- if (sec_is_reverse(sec))
- return "c";
- return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
-}
-EXPORT_SYMBOL(sec2target_str);
-
-/*
- * return true if the bulk data is protected
- */
-bool sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
-{
- switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
- case SPTLRPC_BULK_SVC_INTG:
- case SPTLRPC_BULK_SVC_PRIV:
- return true;
- default:
- return false;
- }
-}
-EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
-
-/****************************************
- * crypto API helper/alloc blkciper *
- ****************************************/
-
-/****************************************
- * initialize/finalize *
- ****************************************/
-
-int sptlrpc_init(void)
-{
- int rc;
-
- rwlock_init(&policy_lock);
-
- rc = sptlrpc_gc_init();
- if (rc)
- goto out;
-
- rc = sptlrpc_conf_init();
- if (rc)
- goto out_gc;
-
- rc = sptlrpc_enc_pool_init();
- if (rc)
- goto out_conf;
-
- rc = sptlrpc_null_init();
- if (rc)
- goto out_pool;
-
- rc = sptlrpc_plain_init();
- if (rc)
- goto out_null;
-
- rc = sptlrpc_lproc_init();
- if (rc)
- goto out_plain;
-
- return 0;
-
-out_plain:
- sptlrpc_plain_fini();
-out_null:
- sptlrpc_null_fini();
-out_pool:
- sptlrpc_enc_pool_fini();
-out_conf:
- sptlrpc_conf_fini();
-out_gc:
- sptlrpc_gc_fini();
-out:
- return rc;
-}
-
-void sptlrpc_fini(void)
-{
- sptlrpc_lproc_fini();
- sptlrpc_plain_fini();
- sptlrpc_null_fini();
- sptlrpc_enc_pool_fini();
- sptlrpc_conf_fini();
- sptlrpc_gc_fini();
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
deleted file mode 100644
index 625b9520d78f..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ /dev/null
@@ -1,572 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec_bulk.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd.h>
-#include <obd_cksum.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lustre_net.h>
-#include <lustre_import.h>
-#include <lustre_dlm.h>
-#include <lustre_sec.h>
-
-#include "ptlrpc_internal.h"
-
-/****************************************
- * bulk encryption page pools *
- ****************************************/
-
-#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *))
-#define PAGES_PER_POOL (POINTERS_PER_PAGE)
-
-#define IDLE_IDX_MAX (100)
-#define IDLE_IDX_WEIGHT (3)
-
-#define CACHE_QUIESCENT_PERIOD (20)
-
-static struct ptlrpc_enc_page_pool {
- /*
- * constants
- */
- unsigned long epp_max_pages; /* maximum pages can hold, const */
- unsigned int epp_max_pools; /* number of pools, const */
-
- /*
- * wait queue in case of not enough free pages.
- */
- wait_queue_head_t epp_waitq; /* waiting threads */
- unsigned int epp_waitqlen; /* wait queue length */
- unsigned long epp_pages_short; /* # of pages wanted of in-q users */
- unsigned int epp_growing:1; /* during adding pages */
-
- /*
- * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
- * this is counted based on each time when getting pages from
- * the pools, not based on time. which means in case that system
- * is idled for a while but the idle_idx might still be low if no
- * activities happened in the pools.
- */
- unsigned long epp_idle_idx;
-
- /* last shrink time due to mem tight */
- time64_t epp_last_shrink;
- time64_t epp_last_access;
-
- /*
- * in-pool pages bookkeeping
- */
- spinlock_t epp_lock; /* protect following fields */
- unsigned long epp_total_pages; /* total pages in pools */
- unsigned long epp_free_pages; /* current pages available */
-
- /*
- * statistics
- */
- unsigned long epp_st_max_pages; /* # of pages ever reached */
- unsigned int epp_st_grows; /* # of grows */
- unsigned int epp_st_grow_fails; /* # of add pages failures */
- unsigned int epp_st_shrinks; /* # of shrinks */
- unsigned long epp_st_access; /* # of access */
- unsigned long epp_st_missings; /* # of cache missing */
- unsigned long epp_st_lowfree; /* lowest free pages reached */
- unsigned int epp_st_max_wqlen; /* highest waitqueue length */
- unsigned long epp_st_max_wait; /* in jiffies */
- unsigned long epp_st_outofmem; /* # of out of mem requests */
- /*
- * pointers to pools
- */
- struct page ***epp_pools;
-} page_pools;
-
-/*
- * /sys/kernel/debug/lustre/sptlrpc/encrypt_page_pools
- */
-int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
-{
- spin_lock(&page_pools.epp_lock);
-
- seq_printf(m,
- "physical pages: %lu\n"
- "pages per pool: %lu\n"
- "max pages: %lu\n"
- "max pools: %u\n"
- "total pages: %lu\n"
- "total free: %lu\n"
- "idle index: %lu/100\n"
- "last shrink: %lds\n"
- "last access: %lds\n"
- "max pages reached: %lu\n"
- "grows: %u\n"
- "grows failure: %u\n"
- "shrinks: %u\n"
- "cache access: %lu\n"
- "cache missing: %lu\n"
- "low free mark: %lu\n"
- "max waitqueue depth: %u\n"
- "max wait time: %ld/%lu\n"
- "out of mem: %lu\n",
- totalram_pages,
- PAGES_PER_POOL,
- page_pools.epp_max_pages,
- page_pools.epp_max_pools,
- page_pools.epp_total_pages,
- page_pools.epp_free_pages,
- page_pools.epp_idle_idx,
- (long)(ktime_get_seconds() - page_pools.epp_last_shrink),
- (long)(ktime_get_seconds() - page_pools.epp_last_access),
- page_pools.epp_st_max_pages,
- page_pools.epp_st_grows,
- page_pools.epp_st_grow_fails,
- page_pools.epp_st_shrinks,
- page_pools.epp_st_access,
- page_pools.epp_st_missings,
- page_pools.epp_st_lowfree,
- page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait,
- msecs_to_jiffies(MSEC_PER_SEC),
- page_pools.epp_st_outofmem);
-
- spin_unlock(&page_pools.epp_lock);
-
- return 0;
-}
-
-static void enc_pools_release_free_pages(long npages)
-{
- int p_idx, g_idx;
- int p_idx_max1, p_idx_max2;
-
- LASSERT(npages > 0);
- LASSERT(npages <= page_pools.epp_free_pages);
- LASSERT(page_pools.epp_free_pages <= page_pools.epp_total_pages);
-
- /* max pool index before the release */
- p_idx_max2 = (page_pools.epp_total_pages - 1) / PAGES_PER_POOL;
-
- page_pools.epp_free_pages -= npages;
- page_pools.epp_total_pages -= npages;
-
- /* max pool index after the release */
- p_idx_max1 = page_pools.epp_total_pages == 0 ? -1 :
- ((page_pools.epp_total_pages - 1) / PAGES_PER_POOL);
-
- p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
- g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
- LASSERT(page_pools.epp_pools[p_idx]);
-
- while (npages--) {
- LASSERT(page_pools.epp_pools[p_idx]);
- LASSERT(page_pools.epp_pools[p_idx][g_idx]);
-
- __free_page(page_pools.epp_pools[p_idx][g_idx]);
- page_pools.epp_pools[p_idx][g_idx] = NULL;
-
- if (++g_idx == PAGES_PER_POOL) {
- p_idx++;
- g_idx = 0;
- }
- }
-
- /* free unused pools */
- while (p_idx_max1 < p_idx_max2) {
- LASSERT(page_pools.epp_pools[p_idx_max2]);
- kfree(page_pools.epp_pools[p_idx_max2]);
- page_pools.epp_pools[p_idx_max2] = NULL;
- p_idx_max2--;
- }
-}
-
-/*
- * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
- */
-static unsigned long enc_pools_shrink_count(struct shrinker *s,
- struct shrink_control *sc)
-{
- /*
- * if no pool access for a long time, we consider it's fully idle.
- * a little race here is fine.
- */
- if (unlikely(ktime_get_seconds() - page_pools.epp_last_access >
- CACHE_QUIESCENT_PERIOD)) {
- spin_lock(&page_pools.epp_lock);
- page_pools.epp_idle_idx = IDLE_IDX_MAX;
- spin_unlock(&page_pools.epp_lock);
- }
-
- LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
- return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
- (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
-}
-
-/*
- * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
- */
-static unsigned long enc_pools_shrink_scan(struct shrinker *s,
- struct shrink_control *sc)
-{
- spin_lock(&page_pools.epp_lock);
- sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
- page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
- if (sc->nr_to_scan > 0) {
- enc_pools_release_free_pages(sc->nr_to_scan);
- CDEBUG(D_SEC, "released %ld pages, %ld left\n",
- (long)sc->nr_to_scan, page_pools.epp_free_pages);
-
- page_pools.epp_st_shrinks++;
- page_pools.epp_last_shrink = ktime_get_seconds();
- }
- spin_unlock(&page_pools.epp_lock);
-
- /*
- * if no pool access for a long time, we consider it's fully idle.
- * a little race here is fine.
- */
- if (unlikely(ktime_get_seconds() - page_pools.epp_last_access >
- CACHE_QUIESCENT_PERIOD)) {
- spin_lock(&page_pools.epp_lock);
- page_pools.epp_idle_idx = IDLE_IDX_MAX;
- spin_unlock(&page_pools.epp_lock);
- }
-
- LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
- return sc->nr_to_scan;
-}
-
-static inline
-int npages_to_npools(unsigned long npages)
-{
- return (int)DIV_ROUND_UP(npages, PAGES_PER_POOL);
-}
-
-/*
- * return how many pages cleaned up.
- */
-static unsigned long enc_pools_cleanup(struct page ***pools, int npools)
-{
- unsigned long cleaned = 0;
- int i, j;
-
- for (i = 0; i < npools; i++) {
- if (pools[i]) {
- for (j = 0; j < PAGES_PER_POOL; j++) {
- if (pools[i][j]) {
- __free_page(pools[i][j]);
- cleaned++;
- }
- }
- kfree(pools[i]);
- pools[i] = NULL;
- }
- }
-
- return cleaned;
-}
-
-static inline void enc_pools_wakeup(void)
-{
- assert_spin_locked(&page_pools.epp_lock);
-
- if (unlikely(page_pools.epp_waitqlen)) {
- LASSERT(waitqueue_active(&page_pools.epp_waitq));
- wake_up_all(&page_pools.epp_waitq);
- }
-}
-
-/*
- * Export the number of free pages in the pool
- */
-int get_free_pages_in_pool(void)
-{
- return page_pools.epp_free_pages;
-}
-
-/*
- * Let outside world know if enc_pool full capacity is reached
- */
-int pool_is_at_full_capacity(void)
-{
- return (page_pools.epp_total_pages == page_pools.epp_max_pages);
-}
-
-void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
-{
- int p_idx, g_idx;
- int i;
-
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
-
- if (!GET_ENC_KIOV(desc))
- return;
-
- LASSERT(desc->bd_iov_count > 0);
-
- spin_lock(&page_pools.epp_lock);
-
- p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
- g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
-
- LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
- page_pools.epp_total_pages);
- LASSERT(page_pools.epp_pools[p_idx]);
-
- for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(BD_GET_ENC_KIOV(desc, i).bv_page);
- LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
- LASSERT(!page_pools.epp_pools[p_idx][g_idx]);
-
- page_pools.epp_pools[p_idx][g_idx] =
- BD_GET_ENC_KIOV(desc, i).bv_page;
-
- if (++g_idx == PAGES_PER_POOL) {
- p_idx++;
- g_idx = 0;
- }
- }
-
- page_pools.epp_free_pages += desc->bd_iov_count;
-
- enc_pools_wakeup();
-
- spin_unlock(&page_pools.epp_lock);
-
- kfree(GET_ENC_KIOV(desc));
- GET_ENC_KIOV(desc) = NULL;
-}
-
-static inline void enc_pools_alloc(void)
-{
- LASSERT(page_pools.epp_max_pools);
- page_pools.epp_pools =
- kvzalloc(page_pools.epp_max_pools *
- sizeof(*page_pools.epp_pools),
- GFP_KERNEL);
-}
-
-static inline void enc_pools_free(void)
-{
- LASSERT(page_pools.epp_max_pools);
- LASSERT(page_pools.epp_pools);
-
- kvfree(page_pools.epp_pools);
-}
-
-static struct shrinker pools_shrinker = {
- .count_objects = enc_pools_shrink_count,
- .scan_objects = enc_pools_shrink_scan,
- .seeks = DEFAULT_SEEKS,
-};
-
-int sptlrpc_enc_pool_init(void)
-{
- int rc;
-
- /*
- * maximum capacity is 1/8 of total physical memory.
- * is the 1/8 a good number?
- */
- page_pools.epp_max_pages = totalram_pages / 8;
- page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
-
- init_waitqueue_head(&page_pools.epp_waitq);
- page_pools.epp_waitqlen = 0;
- page_pools.epp_pages_short = 0;
-
- page_pools.epp_growing = 0;
-
- page_pools.epp_idle_idx = 0;
- page_pools.epp_last_shrink = ktime_get_seconds();
- page_pools.epp_last_access = ktime_get_seconds();
-
- spin_lock_init(&page_pools.epp_lock);
- page_pools.epp_total_pages = 0;
- page_pools.epp_free_pages = 0;
-
- page_pools.epp_st_max_pages = 0;
- page_pools.epp_st_grows = 0;
- page_pools.epp_st_grow_fails = 0;
- page_pools.epp_st_shrinks = 0;
- page_pools.epp_st_access = 0;
- page_pools.epp_st_missings = 0;
- page_pools.epp_st_lowfree = 0;
- page_pools.epp_st_max_wqlen = 0;
- page_pools.epp_st_max_wait = 0;
- page_pools.epp_st_outofmem = 0;
-
- enc_pools_alloc();
- if (!page_pools.epp_pools)
- return -ENOMEM;
-
- rc = register_shrinker(&pools_shrinker);
- if (rc)
- enc_pools_free();
-
- return rc;
-}
-
-void sptlrpc_enc_pool_fini(void)
-{
- unsigned long cleaned, npools;
-
- LASSERT(page_pools.epp_pools);
- LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
-
- unregister_shrinker(&pools_shrinker);
-
- npools = npages_to_npools(page_pools.epp_total_pages);
- cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
- LASSERT(cleaned == page_pools.epp_total_pages);
-
- enc_pools_free();
-
- if (page_pools.epp_st_access > 0) {
- CDEBUG(D_SEC,
- "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld, out of mem %lu\n",
- page_pools.epp_st_max_pages, page_pools.epp_st_grows,
- page_pools.epp_st_grow_fails,
- page_pools.epp_st_shrinks, page_pools.epp_st_access,
- page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait,
- msecs_to_jiffies(MSEC_PER_SEC),
- page_pools.epp_st_outofmem);
- }
-}
-
-static int cfs_hash_alg_id[] = {
- [BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL,
- [BULK_HASH_ALG_ADLER32] = CFS_HASH_ALG_ADLER32,
- [BULK_HASH_ALG_CRC32] = CFS_HASH_ALG_CRC32,
- [BULK_HASH_ALG_MD5] = CFS_HASH_ALG_MD5,
- [BULK_HASH_ALG_SHA1] = CFS_HASH_ALG_SHA1,
- [BULK_HASH_ALG_SHA256] = CFS_HASH_ALG_SHA256,
- [BULK_HASH_ALG_SHA384] = CFS_HASH_ALG_SHA384,
- [BULK_HASH_ALG_SHA512] = CFS_HASH_ALG_SHA512,
-};
-
-const char *sptlrpc_get_hash_name(__u8 hash_alg)
-{
- return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]);
-}
-
-__u8 sptlrpc_get_hash_alg(const char *algname)
-{
- return cfs_crypto_hash_alg(algname);
-}
-
-int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
-{
- struct ptlrpc_bulk_sec_desc *bsd;
- int size = msg->lm_buflens[offset];
-
- bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
- if (!bsd) {
- CERROR("Invalid bulk sec desc: size %d\n", size);
- return -EINVAL;
- }
-
- if (swabbed)
- __swab32s(&bsd->bsd_nob);
-
- if (unlikely(bsd->bsd_version != 0)) {
- CERROR("Unexpected version %u\n", bsd->bsd_version);
- return -EPROTO;
- }
-
- if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) {
- CERROR("Invalid type %u\n", bsd->bsd_type);
- return -EPROTO;
- }
-
- /* FIXME more sanity check here */
-
- if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
- bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG &&
- bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) {
- CERROR("Invalid svc %u\n", bsd->bsd_svc);
- return -EPROTO;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(bulk_sec_desc_unpack);
-
-int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
- void *buf, int buflen)
-{
- struct ahash_request *hdesc;
- int hashsize;
- unsigned int bufsize;
- int i, err;
-
- LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
- LASSERT(buflen >= 4);
-
- hdesc = cfs_crypto_hash_init(cfs_hash_alg_id[alg], NULL, 0);
- if (IS_ERR(hdesc)) {
- CERROR("Unable to initialize checksum hash %s\n",
- cfs_crypto_hash_name(cfs_hash_alg_id[alg]));
- return PTR_ERR(hdesc);
- }
-
- hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
-
- for (i = 0; i < desc->bd_iov_count; i++) {
- cfs_crypto_hash_update_page(hdesc,
- BD_GET_KIOV(desc, i).bv_page,
- BD_GET_KIOV(desc, i).bv_offset &
- ~PAGE_MASK,
- BD_GET_KIOV(desc, i).bv_len);
- }
-
- if (hashsize > buflen) {
- unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
-
- bufsize = sizeof(hashbuf);
- LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n",
- bufsize, hashsize);
- err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize);
- memcpy(buf, hashbuf, buflen);
- } else {
- bufsize = buflen;
- err = cfs_crypto_hash_final(hdesc, buf, &bufsize);
- }
-
- return err;
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
deleted file mode 100644
index 2389f9a8f534..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ /dev/null
@@ -1,850 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/crypto.h>
-#include <linux/key.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lustre_import.h>
-#include <uapi/linux/lustre/lustre_param.h>
-#include <lustre_sec.h>
-
-#include "ptlrpc_internal.h"
-
-enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd)
-{
- const char *type = obd->obd_type->typ_name;
-
- if (!strcmp(type, LUSTRE_MDT_NAME))
- return LUSTRE_SP_MDT;
- if (!strcmp(type, LUSTRE_OST_NAME))
- return LUSTRE_SP_OST;
- if (!strcmp(type, LUSTRE_MGS_NAME))
- return LUSTRE_SP_MGS;
-
- CERROR("unknown target %p(%s)\n", obd, type);
- return LUSTRE_SP_ANY;
-}
-
-/****************************************
- * user supplied flavor string parsing *
- ****************************************/
-
-/*
- * format: <base_flavor>[-<bulk_type:alg_spec>]
- */
-int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr)
-{
- char buf[32];
- char *bulk, *alg;
-
- memset(flvr, 0, sizeof(*flvr));
-
- if (!str || str[0] == '\0') {
- flvr->sf_rpc = SPTLRPC_FLVR_INVALID;
- return 0;
- }
-
- strlcpy(buf, str, sizeof(buf));
-
- bulk = strchr(buf, '-');
- if (bulk)
- *bulk++ = '\0';
-
- flvr->sf_rpc = sptlrpc_name2flavor_base(buf);
- if (flvr->sf_rpc == SPTLRPC_FLVR_INVALID)
- goto err_out;
-
- /*
- * currently only base flavor "plain" can have bulk specification.
- */
- if (flvr->sf_rpc == SPTLRPC_FLVR_PLAIN) {
- flvr->u_bulk.hash.hash_alg = BULK_HASH_ALG_ADLER32;
- if (bulk) {
- /*
- * format: plain-hash:<hash_alg>
- */
- alg = strchr(bulk, ':');
- if (!alg)
- goto err_out;
- *alg++ = '\0';
-
- if (strcmp(bulk, "hash"))
- goto err_out;
-
- flvr->u_bulk.hash.hash_alg = sptlrpc_get_hash_alg(alg);
- if (flvr->u_bulk.hash.hash_alg >= BULK_HASH_ALG_MAX)
- goto err_out;
- }
-
- if (flvr->u_bulk.hash.hash_alg == BULK_HASH_ALG_NULL)
- flvr_set_bulk_svc(&flvr->sf_rpc, SPTLRPC_BULK_SVC_NULL);
- else
- flvr_set_bulk_svc(&flvr->sf_rpc, SPTLRPC_BULK_SVC_INTG);
- } else {
- if (bulk)
- goto err_out;
- }
-
- flvr->sf_flags = 0;
- return 0;
-
-err_out:
- CERROR("invalid flavor string: %s\n", str);
- return -EINVAL;
-}
-EXPORT_SYMBOL(sptlrpc_parse_flavor);
-
-/****************************************
- * configure rules *
- ****************************************/
-
-static void get_default_flavor(struct sptlrpc_flavor *sf)
-{
- memset(sf, 0, sizeof(*sf));
-
- sf->sf_rpc = SPTLRPC_FLVR_NULL;
- sf->sf_flags = 0;
-}
-
-static void sptlrpc_rule_init(struct sptlrpc_rule *rule)
-{
- rule->sr_netid = LNET_NIDNET(LNET_NID_ANY);
- rule->sr_from = LUSTRE_SP_ANY;
- rule->sr_to = LUSTRE_SP_ANY;
- rule->sr_padding = 0;
-
- get_default_flavor(&rule->sr_flvr);
-}
-
-/*
- * format: network[.direction]=flavor
- */
-static int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule)
-{
- char *flavor, *dir;
- int rc;
-
- sptlrpc_rule_init(rule);
-
- flavor = strchr(param, '=');
- if (!flavor) {
- CERROR("invalid param, no '='\n");
- return -EINVAL;
- }
- *flavor++ = '\0';
-
- dir = strchr(param, '.');
- if (dir)
- *dir++ = '\0';
-
- /* 1.1 network */
- if (strcmp(param, "default")) {
- rule->sr_netid = libcfs_str2net(param);
- if (rule->sr_netid == LNET_NIDNET(LNET_NID_ANY)) {
- CERROR("invalid network name: %s\n", param);
- return -EINVAL;
- }
- }
-
- /* 1.2 direction */
- if (dir) {
- if (!strcmp(dir, "mdt2ost")) {
- rule->sr_from = LUSTRE_SP_MDT;
- rule->sr_to = LUSTRE_SP_OST;
- } else if (!strcmp(dir, "mdt2mdt")) {
- rule->sr_from = LUSTRE_SP_MDT;
- rule->sr_to = LUSTRE_SP_MDT;
- } else if (!strcmp(dir, "cli2ost")) {
- rule->sr_from = LUSTRE_SP_CLI;
- rule->sr_to = LUSTRE_SP_OST;
- } else if (!strcmp(dir, "cli2mdt")) {
- rule->sr_from = LUSTRE_SP_CLI;
- rule->sr_to = LUSTRE_SP_MDT;
- } else {
- CERROR("invalid rule dir segment: %s\n", dir);
- return -EINVAL;
- }
- }
-
- /* 2.1 flavor */
- rc = sptlrpc_parse_flavor(flavor, &rule->sr_flvr);
- if (rc)
- return -EINVAL;
-
- return 0;
-}
-
-static void sptlrpc_rule_set_free(struct sptlrpc_rule_set *rset)
-{
- LASSERT(rset->srs_nslot ||
- (rset->srs_nrule == 0 && !rset->srs_rules));
-
- if (rset->srs_nslot) {
- kfree(rset->srs_rules);
- sptlrpc_rule_set_init(rset);
- }
-}
-
-/*
- * return 0 if the rule set could accommodate one more rule.
- */
-static int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset)
-{
- struct sptlrpc_rule *rules;
- int nslot;
-
- might_sleep();
-
- if (rset->srs_nrule < rset->srs_nslot)
- return 0;
-
- nslot = rset->srs_nslot + 8;
-
- /* better use realloc() if available */
- rules = kcalloc(nslot, sizeof(*rset->srs_rules), GFP_NOFS);
- if (!rules)
- return -ENOMEM;
-
- if (rset->srs_nrule) {
- LASSERT(rset->srs_nslot && rset->srs_rules);
- memcpy(rules, rset->srs_rules,
- rset->srs_nrule * sizeof(*rset->srs_rules));
-
- kfree(rset->srs_rules);
- }
-
- rset->srs_rules = rules;
- rset->srs_nslot = nslot;
- return 0;
-}
-
-static inline int rule_spec_dir(struct sptlrpc_rule *rule)
-{
- return (rule->sr_from != LUSTRE_SP_ANY ||
- rule->sr_to != LUSTRE_SP_ANY);
-}
-
-static inline int rule_spec_net(struct sptlrpc_rule *rule)
-{
- return (rule->sr_netid != LNET_NIDNET(LNET_NID_ANY));
-}
-
-static inline int rule_match_dir(struct sptlrpc_rule *r1,
- struct sptlrpc_rule *r2)
-{
- return (r1->sr_from == r2->sr_from && r1->sr_to == r2->sr_to);
-}
-
-static inline int rule_match_net(struct sptlrpc_rule *r1,
- struct sptlrpc_rule *r2)
-{
- return (r1->sr_netid == r2->sr_netid);
-}
-
-/*
- * merge @rule into @rset.
- * the @rset slots might be expanded.
- */
-static int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *rset,
- struct sptlrpc_rule *rule)
-{
- struct sptlrpc_rule *p = rset->srs_rules;
- int spec_dir, spec_net;
- int rc, n, match = 0;
-
- might_sleep();
-
- spec_net = rule_spec_net(rule);
- spec_dir = rule_spec_dir(rule);
-
- for (n = 0; n < rset->srs_nrule; n++) {
- p = &rset->srs_rules[n];
-
- /* test network match, if failed:
- * - spec rule: skip rules which is also spec rule match, until
- * we hit a wild rule, which means no more chance
- * - wild rule: skip until reach the one which is also wild
- * and matches
- */
- if (!rule_match_net(p, rule)) {
- if (spec_net) {
- if (rule_spec_net(p))
- continue;
- else
- break;
- } else {
- continue;
- }
- }
-
- /* test dir match, same logic as net matching */
- if (!rule_match_dir(p, rule)) {
- if (spec_dir) {
- if (rule_spec_dir(p))
- continue;
- else
- break;
- } else {
- continue;
- }
- }
-
- /* find a match */
- match = 1;
- break;
- }
-
- if (match) {
- LASSERT(n >= 0 && n < rset->srs_nrule);
-
- if (rule->sr_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
- /* remove this rule */
- if (n < rset->srs_nrule - 1)
- memmove(&rset->srs_rules[n],
- &rset->srs_rules[n + 1],
- (rset->srs_nrule - n - 1) *
- sizeof(*rule));
- rset->srs_nrule--;
- } else {
- /* override the rule */
- memcpy(&rset->srs_rules[n], rule, sizeof(*rule));
- }
- } else {
- LASSERT(n >= 0 && n <= rset->srs_nrule);
-
- if (rule->sr_flvr.sf_rpc != SPTLRPC_FLVR_INVALID) {
- rc = sptlrpc_rule_set_expand(rset);
- if (rc)
- return rc;
-
- if (n < rset->srs_nrule)
- memmove(&rset->srs_rules[n + 1],
- &rset->srs_rules[n],
- (rset->srs_nrule - n) * sizeof(*rule));
- memcpy(&rset->srs_rules[n], rule, sizeof(*rule));
- rset->srs_nrule++;
- } else {
- CDEBUG(D_CONFIG, "ignore the unmatched deletion\n");
- }
- }
-
- return 0;
-}
-
-/**
- * given from/to/nid, determine a matching flavor in ruleset.
- * return 1 if a match found, otherwise return 0.
- */
-static int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
- enum lustre_sec_part from,
- enum lustre_sec_part to,
- lnet_nid_t nid,
- struct sptlrpc_flavor *sf)
-{
- struct sptlrpc_rule *r;
- int n;
-
- for (n = 0; n < rset->srs_nrule; n++) {
- r = &rset->srs_rules[n];
-
- if (LNET_NIDNET(nid) != LNET_NIDNET(LNET_NID_ANY) &&
- r->sr_netid != LNET_NIDNET(LNET_NID_ANY) &&
- LNET_NIDNET(nid) != r->sr_netid)
- continue;
-
- if (from != LUSTRE_SP_ANY && r->sr_from != LUSTRE_SP_ANY &&
- from != r->sr_from)
- continue;
-
- if (to != LUSTRE_SP_ANY && r->sr_to != LUSTRE_SP_ANY &&
- to != r->sr_to)
- continue;
-
- *sf = r->sr_flvr;
- return 1;
- }
-
- return 0;
-}
-
-/**********************************
- * sptlrpc configuration support *
- **********************************/
-
-struct sptlrpc_conf_tgt {
- struct list_head sct_list;
- char sct_name[MAX_OBD_NAME];
- struct sptlrpc_rule_set sct_rset;
-};
-
-struct sptlrpc_conf {
- struct list_head sc_list;
- char sc_fsname[MTI_NAME_MAXLEN];
- unsigned int sc_modified; /* modified during updating */
- unsigned int sc_updated:1, /* updated copy from MGS */
- sc_local:1; /* local copy from target */
- struct sptlrpc_rule_set sc_rset; /* fs general rules */
- struct list_head sc_tgts; /* target-specific rules */
-};
-
-static struct mutex sptlrpc_conf_lock;
-static LIST_HEAD(sptlrpc_confs);
-
-static inline int is_hex(char c)
-{
- return ((c >= '0' && c <= '9') ||
- (c >= 'a' && c <= 'f'));
-}
-
-static void target2fsname(const char *tgt, char *fsname, int buflen)
-{
- const char *ptr;
- int len;
-
- ptr = strrchr(tgt, '-');
- if (ptr) {
- if ((strncmp(ptr, "-MDT", 4) != 0 &&
- strncmp(ptr, "-OST", 4) != 0) ||
- !is_hex(ptr[4]) || !is_hex(ptr[5]) ||
- !is_hex(ptr[6]) || !is_hex(ptr[7]))
- ptr = NULL;
- }
-
- /* if we didn't find the pattern, treat the whole string as fsname */
- if (!ptr)
- len = strlen(tgt);
- else
- len = ptr - tgt;
-
- len = min(len, buflen - 1);
- memcpy(fsname, tgt, len);
- fsname[len] = '\0';
-}
-
-static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf)
-{
- struct sptlrpc_conf_tgt *conf_tgt, *conf_tgt_next;
-
- sptlrpc_rule_set_free(&conf->sc_rset);
-
- list_for_each_entry_safe(conf_tgt, conf_tgt_next,
- &conf->sc_tgts, sct_list) {
- sptlrpc_rule_set_free(&conf_tgt->sct_rset);
- list_del(&conf_tgt->sct_list);
- kfree(conf_tgt);
- }
- LASSERT(list_empty(&conf->sc_tgts));
-
- conf->sc_updated = 0;
- conf->sc_local = 0;
-}
-
-static void sptlrpc_conf_free(struct sptlrpc_conf *conf)
-{
- CDEBUG(D_SEC, "free sptlrpc conf %s\n", conf->sc_fsname);
-
- sptlrpc_conf_free_rsets(conf);
- list_del(&conf->sc_list);
- kfree(conf);
-}
-
-static
-struct sptlrpc_conf_tgt *sptlrpc_conf_get_tgt(struct sptlrpc_conf *conf,
- const char *name,
- int create)
-{
- struct sptlrpc_conf_tgt *conf_tgt;
-
- list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
- if (strcmp(conf_tgt->sct_name, name) == 0)
- return conf_tgt;
- }
-
- if (!create)
- return NULL;
-
- conf_tgt = kzalloc(sizeof(*conf_tgt), GFP_NOFS);
- if (conf_tgt) {
- strlcpy(conf_tgt->sct_name, name, sizeof(conf_tgt->sct_name));
- sptlrpc_rule_set_init(&conf_tgt->sct_rset);
- list_add(&conf_tgt->sct_list, &conf->sc_tgts);
- }
-
- return conf_tgt;
-}
-
-static
-struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname,
- int create)
-{
- struct sptlrpc_conf *conf;
- size_t len;
-
- list_for_each_entry(conf, &sptlrpc_confs, sc_list) {
- if (strcmp(conf->sc_fsname, fsname) == 0)
- return conf;
- }
-
- if (!create)
- return NULL;
-
- conf = kzalloc(sizeof(*conf), GFP_NOFS);
- if (!conf)
- return NULL;
-
- len = strlcpy(conf->sc_fsname, fsname, sizeof(conf->sc_fsname));
- if (len >= sizeof(conf->sc_fsname)) {
- kfree(conf);
- return NULL;
- }
- sptlrpc_rule_set_init(&conf->sc_rset);
- INIT_LIST_HEAD(&conf->sc_tgts);
- list_add(&conf->sc_list, &sptlrpc_confs);
-
- CDEBUG(D_SEC, "create sptlrpc conf %s\n", conf->sc_fsname);
- return conf;
-}
-
-/**
- * caller must hold conf_lock already.
- */
-static int sptlrpc_conf_merge_rule(struct sptlrpc_conf *conf,
- const char *target,
- struct sptlrpc_rule *rule)
-{
- struct sptlrpc_conf_tgt *conf_tgt;
- struct sptlrpc_rule_set *rule_set;
-
- /* fsname == target means general rules for the whole fs */
- if (strcmp(conf->sc_fsname, target) == 0) {
- rule_set = &conf->sc_rset;
- } else {
- conf_tgt = sptlrpc_conf_get_tgt(conf, target, 1);
- if (conf_tgt) {
- rule_set = &conf_tgt->sct_rset;
- } else {
- CERROR("out of memory, can't merge rule!\n");
- return -ENOMEM;
- }
- }
-
- return sptlrpc_rule_set_merge(rule_set, rule);
-}
-
-/**
- * process one LCFG_SPTLRPC_CONF record. if \a conf is NULL, we
- * find one through the target name in the record inside conf_lock;
- * otherwise means caller already hold conf_lock.
- */
-static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
- struct sptlrpc_conf *conf)
-{
- char *target, *param;
- char fsname[MTI_NAME_MAXLEN];
- struct sptlrpc_rule rule;
- int rc;
-
- target = lustre_cfg_string(lcfg, 1);
- if (!target) {
- CERROR("missing target name\n");
- return -EINVAL;
- }
-
- param = lustre_cfg_string(lcfg, 2);
- if (!param) {
- CERROR("missing parameter\n");
- return -EINVAL;
- }
-
- CDEBUG(D_SEC, "processing rule: %s.%s\n", target, param);
-
- /* parse rule to make sure the format is correct */
- if (strncmp(param, PARAM_SRPC_FLVR, sizeof(PARAM_SRPC_FLVR) - 1) != 0) {
- CERROR("Invalid sptlrpc parameter: %s\n", param);
- return -EINVAL;
- }
- param += sizeof(PARAM_SRPC_FLVR) - 1;
-
- rc = sptlrpc_parse_rule(param, &rule);
- if (rc)
- return -EINVAL;
-
- if (!conf) {
- target2fsname(target, fsname, sizeof(fsname));
-
- mutex_lock(&sptlrpc_conf_lock);
- conf = sptlrpc_conf_get(fsname, 0);
- if (!conf) {
- CERROR("can't find conf\n");
- rc = -ENOMEM;
- } else {
- rc = sptlrpc_conf_merge_rule(conf, target, &rule);
- }
- mutex_unlock(&sptlrpc_conf_lock);
- } else {
- LASSERT(mutex_is_locked(&sptlrpc_conf_lock));
- rc = sptlrpc_conf_merge_rule(conf, target, &rule);
- }
-
- if (rc == 0)
- conf->sc_modified++;
-
- return rc;
-}
-
-int sptlrpc_process_config(struct lustre_cfg *lcfg)
-{
- return __sptlrpc_process_config(lcfg, NULL);
-}
-EXPORT_SYMBOL(sptlrpc_process_config);
-
-static int logname2fsname(const char *logname, char *buf, int buflen)
-{
- char *ptr;
- int len;
-
- ptr = strrchr(logname, '-');
- if (!ptr || strcmp(ptr, "-sptlrpc")) {
- CERROR("%s is not a sptlrpc config log\n", logname);
- return -EINVAL;
- }
-
- len = min((int)(ptr - logname), buflen - 1);
-
- memcpy(buf, logname, len);
- buf[len] = '\0';
- return 0;
-}
-
-void sptlrpc_conf_log_update_begin(const char *logname)
-{
- struct sptlrpc_conf *conf;
- char fsname[16];
-
- if (logname2fsname(logname, fsname, sizeof(fsname)))
- return;
-
- mutex_lock(&sptlrpc_conf_lock);
-
- conf = sptlrpc_conf_get(fsname, 0);
- if (conf) {
- if (conf->sc_local) {
- LASSERT(conf->sc_updated == 0);
- sptlrpc_conf_free_rsets(conf);
- }
- conf->sc_modified = 0;
- }
-
- mutex_unlock(&sptlrpc_conf_lock);
-}
-EXPORT_SYMBOL(sptlrpc_conf_log_update_begin);
-
-/**
- * mark a config log has been updated
- */
-void sptlrpc_conf_log_update_end(const char *logname)
-{
- struct sptlrpc_conf *conf;
- char fsname[16];
-
- if (logname2fsname(logname, fsname, sizeof(fsname)))
- return;
-
- mutex_lock(&sptlrpc_conf_lock);
-
- conf = sptlrpc_conf_get(fsname, 0);
- if (conf) {
- /*
- * if original state is not updated, make sure the
- * modified counter > 0 to enforce updating local copy.
- */
- if (conf->sc_updated == 0)
- conf->sc_modified++;
-
- conf->sc_updated = 1;
- }
-
- mutex_unlock(&sptlrpc_conf_lock);
-}
-EXPORT_SYMBOL(sptlrpc_conf_log_update_end);
-
-void sptlrpc_conf_log_start(const char *logname)
-{
- char fsname[16];
-
- if (logname2fsname(logname, fsname, sizeof(fsname)))
- return;
-
- mutex_lock(&sptlrpc_conf_lock);
- sptlrpc_conf_get(fsname, 1);
- mutex_unlock(&sptlrpc_conf_lock);
-}
-EXPORT_SYMBOL(sptlrpc_conf_log_start);
-
-void sptlrpc_conf_log_stop(const char *logname)
-{
- struct sptlrpc_conf *conf;
- char fsname[16];
-
- if (logname2fsname(logname, fsname, sizeof(fsname)))
- return;
-
- mutex_lock(&sptlrpc_conf_lock);
- conf = sptlrpc_conf_get(fsname, 0);
- if (conf)
- sptlrpc_conf_free(conf);
- mutex_unlock(&sptlrpc_conf_lock);
-}
-EXPORT_SYMBOL(sptlrpc_conf_log_stop);
-
-static inline void flavor_set_flags(struct sptlrpc_flavor *sf,
- enum lustre_sec_part from,
- enum lustre_sec_part to,
- unsigned int fl_udesc)
-{
- /*
- * null flavor doesn't need to set any flavor, and in fact
- * we'd better not do that because everybody share a single sec.
- */
- if (sf->sf_rpc == SPTLRPC_FLVR_NULL)
- return;
-
- if (from == LUSTRE_SP_MDT) {
- /* MDT->MDT; MDT->OST */
- sf->sf_flags |= PTLRPC_SEC_FL_ROOTONLY;
- } else if (from == LUSTRE_SP_CLI && to == LUSTRE_SP_OST) {
- /* CLI->OST */
- sf->sf_flags |= PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_BULK;
- } else if (from == LUSTRE_SP_CLI && to == LUSTRE_SP_MDT) {
- /* CLI->MDT */
- if (fl_udesc && sf->sf_rpc != SPTLRPC_FLVR_NULL)
- sf->sf_flags |= PTLRPC_SEC_FL_UDESC;
- }
-}
-
-void sptlrpc_conf_choose_flavor(enum lustre_sec_part from,
- enum lustre_sec_part to,
- struct obd_uuid *target,
- lnet_nid_t nid,
- struct sptlrpc_flavor *sf)
-{
- struct sptlrpc_conf *conf;
- struct sptlrpc_conf_tgt *conf_tgt;
- char name[MTI_NAME_MAXLEN];
- int len, rc = 0;
-
- target2fsname(target->uuid, name, sizeof(name));
-
- mutex_lock(&sptlrpc_conf_lock);
-
- conf = sptlrpc_conf_get(name, 0);
- if (!conf)
- goto out;
-
- /* convert uuid name (supposed end with _UUID) to target name */
- len = strlen(target->uuid);
- LASSERT(len > 5);
- memcpy(name, target->uuid, len - 5);
- name[len - 5] = '\0';
-
- conf_tgt = sptlrpc_conf_get_tgt(conf, name, 0);
- if (conf_tgt) {
- rc = sptlrpc_rule_set_choose(&conf_tgt->sct_rset,
- from, to, nid, sf);
- if (rc)
- goto out;
- }
-
- rc = sptlrpc_rule_set_choose(&conf->sc_rset, from, to, nid, sf);
-out:
- mutex_unlock(&sptlrpc_conf_lock);
-
- if (rc == 0)
- get_default_flavor(sf);
-
- flavor_set_flags(sf, from, to, 1);
-}
-
-#define SEC_ADAPT_DELAY (10)
-
-/**
- * called by client devices, notify the sptlrpc config has changed and
- * do import_sec_adapt later.
- */
-void sptlrpc_conf_client_adapt(struct obd_device *obd)
-{
- struct obd_import *imp;
-
- LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
- strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) == 0);
- CDEBUG(D_SEC, "obd %s\n", obd->u.cli.cl_target_uuid.uuid);
-
- /* serialize with connect/disconnect import */
- down_read_nested(&obd->u.cli.cl_sem, OBD_CLI_SEM_MDCOSC);
-
- imp = obd->u.cli.cl_import;
- if (imp) {
- spin_lock(&imp->imp_lock);
- if (imp->imp_sec)
- imp->imp_sec_expire = ktime_get_real_seconds() +
- SEC_ADAPT_DELAY;
- spin_unlock(&imp->imp_lock);
- }
-
- up_read(&obd->u.cli.cl_sem);
-}
-EXPORT_SYMBOL(sptlrpc_conf_client_adapt);
-
-int sptlrpc_conf_init(void)
-{
- mutex_init(&sptlrpc_conf_lock);
- return 0;
-}
-
-void sptlrpc_conf_fini(void)
-{
- struct sptlrpc_conf *conf, *conf_next;
-
- mutex_lock(&sptlrpc_conf_lock);
- list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list) {
- sptlrpc_conf_free(conf);
- }
- LASSERT(list_empty(&sptlrpc_confs));
- mutex_unlock(&sptlrpc_conf_lock);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
deleted file mode 100644
index 2c8bad7b7877..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
+++ /dev/null
@@ -1,190 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec_gc.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include <linux/libcfs/libcfs.h>
-
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_net.h>
-#include <lustre_sec.h>
-
-#include "ptlrpc_internal.h"
-
-#define SEC_GC_INTERVAL (30 * 60)
-
-static struct mutex sec_gc_mutex;
-static LIST_HEAD(sec_gc_list);
-static spinlock_t sec_gc_list_lock;
-
-static LIST_HEAD(sec_gc_ctx_list);
-static spinlock_t sec_gc_ctx_list_lock;
-
-static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
-
-void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
-{
- LASSERT(sec->ps_policy->sp_cops->gc_ctx);
- LASSERT(sec->ps_gc_interval > 0);
- LASSERT(list_empty(&sec->ps_gc_list));
-
- sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
-
- spin_lock(&sec_gc_list_lock);
- list_add_tail(&sec->ps_gc_list, &sec_gc_list);
- spin_unlock(&sec_gc_list_lock);
-
- CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
-}
-
-void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
-{
- if (list_empty(&sec->ps_gc_list))
- return;
-
- might_sleep();
-
- /* signal before list_del to make iteration in gc thread safe */
- atomic_inc(&sec_gc_wait_del);
-
- spin_lock(&sec_gc_list_lock);
- list_del_init(&sec->ps_gc_list);
- spin_unlock(&sec_gc_list_lock);
-
- /* barrier */
- mutex_lock(&sec_gc_mutex);
- mutex_unlock(&sec_gc_mutex);
-
- atomic_dec(&sec_gc_wait_del);
-
- CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
-}
-
-static void sec_process_ctx_list(void)
-{
- struct ptlrpc_cli_ctx *ctx;
-
- spin_lock(&sec_gc_ctx_list_lock);
-
- while (!list_empty(&sec_gc_ctx_list)) {
- ctx = list_entry(sec_gc_ctx_list.next,
- struct ptlrpc_cli_ctx, cc_gc_chain);
- list_del_init(&ctx->cc_gc_chain);
- spin_unlock(&sec_gc_ctx_list_lock);
-
- LASSERT(ctx->cc_sec);
- LASSERT(atomic_read(&ctx->cc_refcount) == 1);
- CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- sptlrpc_cli_ctx_put(ctx, 1);
-
- spin_lock(&sec_gc_ctx_list_lock);
- }
-
- spin_unlock(&sec_gc_ctx_list_lock);
-}
-
-static void sec_do_gc(struct ptlrpc_sec *sec)
-{
- LASSERT(sec->ps_policy->sp_cops->gc_ctx);
-
- if (unlikely(sec->ps_gc_next == 0)) {
- CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
- sec, sec->ps_policy->sp_name);
- return;
- }
-
- CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
-
- if (sec->ps_gc_next > ktime_get_real_seconds())
- return;
-
- sec->ps_policy->sp_cops->gc_ctx(sec);
- sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
-}
-
-static void sec_gc_main(struct work_struct *ws);
-static DECLARE_DELAYED_WORK(sec_gc_work, sec_gc_main);
-
-static void sec_gc_main(struct work_struct *ws)
-{
- struct ptlrpc_sec *sec;
-
- sec_process_ctx_list();
-again:
- /* go through sec list do gc.
- * FIXME here we iterate through the whole list each time which
- * is not optimal. we perhaps want to use balanced binary tree
- * to trace each sec as order of expiry time.
- * another issue here is we wakeup as fixed interval instead of
- * according to each sec's expiry time
- */
- mutex_lock(&sec_gc_mutex);
- list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
- /* if someone is waiting to be deleted, let it
- * proceed as soon as possible.
- */
- if (atomic_read(&sec_gc_wait_del)) {
- CDEBUG(D_SEC, "deletion pending, start over\n");
- mutex_unlock(&sec_gc_mutex);
- goto again;
- }
-
- sec_do_gc(sec);
- }
- mutex_unlock(&sec_gc_mutex);
-
- /* check ctx list again before sleep */
- sec_process_ctx_list();
- schedule_delayed_work(&sec_gc_work, SEC_GC_INTERVAL * HZ);
-}
-
-int sptlrpc_gc_init(void)
-{
- mutex_init(&sec_gc_mutex);
- spin_lock_init(&sec_gc_list_lock);
- spin_lock_init(&sec_gc_ctx_list_lock);
-
- schedule_delayed_work(&sec_gc_work, 0);
- return 0;
-}
-
-void sptlrpc_gc_fini(void)
-{
- cancel_delayed_work_sync(&sec_gc_work);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
deleted file mode 100644
index fd609b63d2de..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
+++ /dev/null
@@ -1,195 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec_lproc.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include <linux/libcfs/libcfs.h>
-#include <linux/crypto.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <obd_support.h>
-#include <lustre_net.h>
-#include <lustre_import.h>
-#include <lustre_dlm.h>
-#include <lustre_sec.h>
-
-#include "ptlrpc_internal.h"
-
-static char *sec_flags2str(unsigned long flags, char *buf, int bufsize)
-{
- buf[0] = '\0';
-
- if (flags & PTLRPC_SEC_FL_REVERSE)
- strlcat(buf, "reverse,", bufsize);
- if (flags & PTLRPC_SEC_FL_ROOTONLY)
- strlcat(buf, "rootonly,", bufsize);
- if (flags & PTLRPC_SEC_FL_UDESC)
- strlcat(buf, "udesc,", bufsize);
- if (flags & PTLRPC_SEC_FL_BULK)
- strlcat(buf, "bulk,", bufsize);
- if (buf[0] == '\0')
- strlcat(buf, "-,", bufsize);
-
- return buf;
-}
-
-static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v)
-{
- struct obd_device *dev = seq->private;
- struct client_obd *cli = &dev->u.cli;
- struct ptlrpc_sec *sec = NULL;
- char str[32];
-
- LASSERT(strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) == 0 ||
- strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
- strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) == 0);
-
- if (cli->cl_import)
- sec = sptlrpc_import_sec_ref(cli->cl_import);
- if (!sec)
- goto out;
-
- sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str));
-
- seq_printf(seq, "rpc flavor: %s\n",
- sptlrpc_flavor2name_base(sec->ps_flvr.sf_rpc));
- seq_printf(seq, "bulk flavor: %s\n",
- sptlrpc_flavor2name_bulk(&sec->ps_flvr, str, sizeof(str)));
- seq_printf(seq, "flags: %s\n",
- sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)));
- seq_printf(seq, "id: %d\n", sec->ps_id);
- seq_printf(seq, "refcount: %d\n",
- atomic_read(&sec->ps_refcount));
- seq_printf(seq, "nctx: %d\n", atomic_read(&sec->ps_nctx));
- seq_printf(seq, "gc internal %ld\n", sec->ps_gc_interval);
- seq_printf(seq, "gc next %lld\n",
- sec->ps_gc_interval ?
- (s64)(sec->ps_gc_next - ktime_get_real_seconds()) : 0ll);
-
- sptlrpc_sec_put(sec);
-out:
- return 0;
-}
-
-LPROC_SEQ_FOPS_RO(sptlrpc_info_lprocfs);
-
-static int sptlrpc_ctxs_lprocfs_seq_show(struct seq_file *seq, void *v)
-{
- struct obd_device *dev = seq->private;
- struct client_obd *cli = &dev->u.cli;
- struct ptlrpc_sec *sec = NULL;
-
- LASSERT(strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) == 0 ||
- strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
- strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) == 0);
-
- if (cli->cl_import)
- sec = sptlrpc_import_sec_ref(cli->cl_import);
- if (!sec)
- goto out;
-
- if (sec->ps_policy->sp_cops->display)
- sec->ps_policy->sp_cops->display(sec, seq);
-
- sptlrpc_sec_put(sec);
-out:
- return 0;
-}
-
-LPROC_SEQ_FOPS_RO(sptlrpc_ctxs_lprocfs);
-
-int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev)
-{
- int rc;
-
- if (strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) != 0 &&
- strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) != 0 &&
- strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) != 0) {
- CERROR("can't register lproc for obd type %s\n",
- dev->obd_type->typ_name);
- return -EINVAL;
- }
-
- rc = ldebugfs_obd_seq_create(dev, "srpc_info", 0444,
- &sptlrpc_info_lprocfs_fops, dev);
- if (rc) {
- CERROR("create proc entry srpc_info for %s: %d\n",
- dev->obd_name, rc);
- return rc;
- }
-
- rc = ldebugfs_obd_seq_create(dev, "srpc_contexts", 0444,
- &sptlrpc_ctxs_lprocfs_fops, dev);
- if (rc) {
- CERROR("create proc entry srpc_contexts for %s: %d\n",
- dev->obd_name, rc);
- return rc;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_lprocfs_cliobd_attach);
-
-LPROC_SEQ_FOPS_RO(sptlrpc_proc_enc_pool);
-static struct lprocfs_vars sptlrpc_lprocfs_vars[] = {
- { "encrypt_page_pools", &sptlrpc_proc_enc_pool_fops },
- { NULL }
-};
-
-static struct dentry *sptlrpc_debugfs_dir;
-
-int sptlrpc_lproc_init(void)
-{
- int rc;
-
- LASSERT(!sptlrpc_debugfs_dir);
-
- sptlrpc_debugfs_dir = ldebugfs_register("sptlrpc", debugfs_lustre_root,
- sptlrpc_lprocfs_vars, NULL);
- if (IS_ERR_OR_NULL(sptlrpc_debugfs_dir)) {
- rc = sptlrpc_debugfs_dir ? PTR_ERR(sptlrpc_debugfs_dir)
- : -ENOMEM;
- sptlrpc_debugfs_dir = NULL;
- return rc;
- }
- return 0;
-}
-
-void sptlrpc_lproc_fini(void)
-{
- if (!IS_ERR_OR_NULL(sptlrpc_debugfs_dir))
- ldebugfs_remove(&sptlrpc_debugfs_dir);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
deleted file mode 100644
index ecc387d1b9b4..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
+++ /dev/null
@@ -1,459 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec_null.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include <obd_support.h>
-#include <obd_cksum.h>
-#include <obd_class.h>
-#include <lustre_net.h>
-#include <lustre_sec.h>
-
-#include "ptlrpc_internal.h"
-
-static struct ptlrpc_sec_policy null_policy;
-static struct ptlrpc_sec null_sec;
-static struct ptlrpc_cli_ctx null_cli_ctx;
-static struct ptlrpc_svc_ctx null_svc_ctx;
-
-/*
- * we can temporarily use the topmost 8-bits of lm_secflvr to identify
- * the source sec part.
- */
-static inline
-void null_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
-{
- msg->lm_secflvr |= (((__u32)sp) & 0xFF) << 24;
-}
-
-static inline
-enum lustre_sec_part null_decode_sec_part(struct lustre_msg *msg)
-{
- return (msg->lm_secflvr >> 24) & 0xFF;
-}
-
-static int null_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
-{
- /* should never reach here */
- LBUG();
- return 0;
-}
-
-static
-int null_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
-{
- req->rq_reqbuf->lm_secflvr = SPTLRPC_FLVR_NULL;
-
- if (!req->rq_import->imp_dlm_fake) {
- struct obd_device *obd = req->rq_import->imp_obd;
-
- null_encode_sec_part(req->rq_reqbuf,
- obd->u.cli.cl_sp_me);
- }
- req->rq_reqdata_len = req->rq_reqlen;
- return 0;
-}
-
-static
-int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
-{
- __u32 cksums, cksumc;
-
- LASSERT(req->rq_repdata);
-
- req->rq_repmsg = req->rq_repdata;
- req->rq_replen = req->rq_repdata_len;
-
- if (req->rq_early) {
- cksums = lustre_msg_get_cksum(req->rq_repdata);
- cksumc = lustre_msg_calc_cksum(req->rq_repmsg);
- if (cksumc != cksums) {
- CDEBUG(D_SEC,
- "early reply checksum mismatch: %08x != %08x\n",
- cksumc, cksums);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static
-struct ptlrpc_sec *null_create_sec(struct obd_import *imp,
- struct ptlrpc_svc_ctx *svc_ctx,
- struct sptlrpc_flavor *sf)
-{
- LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_NULL);
-
- /* general layer has take a module reference for us, because we never
- * really destroy the sec, simply release the reference here.
- */
- sptlrpc_policy_put(&null_policy);
- return &null_sec;
-}
-
-static
-void null_destroy_sec(struct ptlrpc_sec *sec)
-{
- LASSERT(sec == &null_sec);
-}
-
-static
-struct ptlrpc_cli_ctx *null_lookup_ctx(struct ptlrpc_sec *sec,
- struct vfs_cred *vcred,
- int create, int remove_dead)
-{
- atomic_inc(&null_cli_ctx.cc_refcount);
- return &null_cli_ctx;
-}
-
-static
-int null_flush_ctx_cache(struct ptlrpc_sec *sec,
- uid_t uid,
- int grace, int force)
-{
- return 0;
-}
-
-static
-int null_alloc_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int msgsize)
-{
- if (!req->rq_reqbuf) {
- int alloc_size = size_roundup_power2(msgsize);
-
- LASSERT(!req->rq_pool);
- req->rq_reqbuf = kvzalloc(alloc_size, GFP_NOFS);
- if (!req->rq_reqbuf)
- return -ENOMEM;
-
- req->rq_reqbuf_len = alloc_size;
- } else {
- LASSERT(req->rq_pool);
- LASSERT(req->rq_reqbuf_len >= msgsize);
- memset(req->rq_reqbuf, 0, msgsize);
- }
-
- req->rq_reqmsg = req->rq_reqbuf;
- return 0;
-}
-
-static
-void null_free_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req)
-{
- if (!req->rq_pool) {
- LASSERTF(req->rq_reqmsg == req->rq_reqbuf,
- "req %p: reqmsg %p is not reqbuf %p in null sec\n",
- req, req->rq_reqmsg, req->rq_reqbuf);
- LASSERTF(req->rq_reqbuf_len >= req->rq_reqlen,
- "req %p: reqlen %d should smaller than buflen %d\n",
- req, req->rq_reqlen, req->rq_reqbuf_len);
-
- kvfree(req->rq_reqbuf);
- req->rq_reqbuf = NULL;
- req->rq_reqbuf_len = 0;
- }
-}
-
-static
-int null_alloc_repbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int msgsize)
-{
- /* add space for early replied */
- msgsize += lustre_msg_early_size();
-
- msgsize = size_roundup_power2(msgsize);
-
- req->rq_repbuf = kvzalloc(msgsize, GFP_NOFS);
- if (!req->rq_repbuf)
- return -ENOMEM;
-
- req->rq_repbuf_len = msgsize;
- return 0;
-}
-
-static
-void null_free_repbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req)
-{
- LASSERT(req->rq_repbuf);
-
- kvfree(req->rq_repbuf);
- req->rq_repbuf = NULL;
- req->rq_repbuf_len = 0;
-}
-
-static
-int null_enlarge_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int segment, int newsize)
-{
- struct lustre_msg *newbuf;
- struct lustre_msg *oldbuf = req->rq_reqmsg;
- int oldsize, newmsg_size, alloc_size;
-
- LASSERT(req->rq_reqbuf);
- LASSERT(req->rq_reqbuf == req->rq_reqmsg);
- LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
- LASSERT(req->rq_reqlen == lustre_packed_msg_size(oldbuf));
-
- /* compute new message size */
- oldsize = req->rq_reqbuf->lm_buflens[segment];
- req->rq_reqbuf->lm_buflens[segment] = newsize;
- newmsg_size = lustre_packed_msg_size(oldbuf);
- req->rq_reqbuf->lm_buflens[segment] = oldsize;
-
- /* request from pool should always have enough buffer */
- LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newmsg_size);
-
- if (req->rq_reqbuf_len < newmsg_size) {
- alloc_size = size_roundup_power2(newmsg_size);
-
- newbuf = kvzalloc(alloc_size, GFP_NOFS);
- if (!newbuf)
- return -ENOMEM;
-
- /* Must lock this, so that otherwise unprotected change of
- * rq_reqmsg is not racing with parallel processing of
- * imp_replay_list traversing threads. See LU-3333
- * This is a bandaid at best, we really need to deal with this
- * in request enlarging code before unpacking that's already
- * there
- */
- if (req->rq_import)
- spin_lock(&req->rq_import->imp_lock);
- memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);
-
- kvfree(req->rq_reqbuf);
- req->rq_reqbuf = newbuf;
- req->rq_reqmsg = newbuf;
- req->rq_reqbuf_len = alloc_size;
-
- if (req->rq_import)
- spin_unlock(&req->rq_import->imp_lock);
- }
-
- _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
- req->rq_reqlen = newmsg_size;
-
- return 0;
-}
-
-static struct ptlrpc_svc_ctx null_svc_ctx = {
- .sc_refcount = ATOMIC_INIT(1),
- .sc_policy = &null_policy,
-};
-
-static
-int null_accept(struct ptlrpc_request *req)
-{
- LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
- SPTLRPC_POLICY_NULL);
-
- if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) {
- CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc);
- return SECSVC_DROP;
- }
-
- req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf);
-
- req->rq_reqmsg = req->rq_reqbuf;
- req->rq_reqlen = req->rq_reqdata_len;
-
- req->rq_svc_ctx = &null_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
-
- return SECSVC_OK;
-}
-
-static
-int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
-{
- struct ptlrpc_reply_state *rs;
- int rs_size = sizeof(*rs) + msgsize;
-
- LASSERT(msgsize % 8 == 0);
-
- rs = req->rq_reply_state;
-
- if (rs) {
- /* pre-allocated */
- LASSERT(rs->rs_size >= rs_size);
- } else {
- rs = kvzalloc(rs_size, GFP_NOFS);
- if (!rs)
- return -ENOMEM;
-
- rs->rs_size = rs_size;
- }
-
- rs->rs_svc_ctx = req->rq_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
-
- rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
- rs->rs_repbuf_len = rs_size - sizeof(*rs);
- rs->rs_msg = rs->rs_repbuf;
-
- req->rq_reply_state = rs;
- return 0;
-}
-
-static
-void null_free_rs(struct ptlrpc_reply_state *rs)
-{
- LASSERT_ATOMIC_GT(&rs->rs_svc_ctx->sc_refcount, 1);
- atomic_dec(&rs->rs_svc_ctx->sc_refcount);
-
- if (!rs->rs_prealloc)
- kvfree(rs);
-}
-
-static
-int null_authorize(struct ptlrpc_request *req)
-{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
-
- LASSERT(rs);
-
- rs->rs_repbuf->lm_secflvr = SPTLRPC_FLVR_NULL;
- rs->rs_repdata_len = req->rq_replen;
-
- if (likely(req->rq_packed_final)) {
- if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
- req->rq_reply_off = lustre_msg_early_size();
- else
- req->rq_reply_off = 0;
- } else {
- __u32 cksum;
-
- cksum = lustre_msg_calc_cksum(rs->rs_repbuf);
- lustre_msg_set_cksum(rs->rs_repbuf, cksum);
- req->rq_reply_off = 0;
- }
-
- return 0;
-}
-
-static struct ptlrpc_ctx_ops null_ctx_ops = {
- .refresh = null_ctx_refresh,
- .sign = null_ctx_sign,
- .verify = null_ctx_verify,
-};
-
-static struct ptlrpc_sec_cops null_sec_cops = {
- .create_sec = null_create_sec,
- .destroy_sec = null_destroy_sec,
- .lookup_ctx = null_lookup_ctx,
- .flush_ctx_cache = null_flush_ctx_cache,
- .alloc_reqbuf = null_alloc_reqbuf,
- .alloc_repbuf = null_alloc_repbuf,
- .free_reqbuf = null_free_reqbuf,
- .free_repbuf = null_free_repbuf,
- .enlarge_reqbuf = null_enlarge_reqbuf,
-};
-
-static struct ptlrpc_sec_sops null_sec_sops = {
- .accept = null_accept,
- .alloc_rs = null_alloc_rs,
- .authorize = null_authorize,
- .free_rs = null_free_rs,
-};
-
-static struct ptlrpc_sec_policy null_policy = {
- .sp_owner = THIS_MODULE,
- .sp_name = "sec.null",
- .sp_policy = SPTLRPC_POLICY_NULL,
- .sp_cops = &null_sec_cops,
- .sp_sops = &null_sec_sops,
-};
-
-static void null_init_internal(void)
-{
- static HLIST_HEAD(__list);
-
- null_sec.ps_policy = &null_policy;
- atomic_set(&null_sec.ps_refcount, 1); /* always busy */
- null_sec.ps_id = -1;
- null_sec.ps_import = NULL;
- null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
- null_sec.ps_flvr.sf_flags = 0;
- null_sec.ps_part = LUSTRE_SP_ANY;
- null_sec.ps_dying = 0;
- spin_lock_init(&null_sec.ps_lock);
- atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */
- INIT_LIST_HEAD(&null_sec.ps_gc_list);
- null_sec.ps_gc_interval = 0;
- null_sec.ps_gc_next = 0;
-
- hlist_add_head(&null_cli_ctx.cc_cache, &__list);
- atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */
- null_cli_ctx.cc_sec = &null_sec;
- null_cli_ctx.cc_ops = &null_ctx_ops;
- null_cli_ctx.cc_expire = 0;
- null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
- PTLRPC_CTX_UPTODATE;
- null_cli_ctx.cc_vcred.vc_uid = 0;
- spin_lock_init(&null_cli_ctx.cc_lock);
- INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
- INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
-}
-
-int sptlrpc_null_init(void)
-{
- int rc;
-
- null_init_internal();
-
- rc = sptlrpc_register_policy(&null_policy);
- if (rc)
- CERROR("failed to register %s: %d\n", null_policy.sp_name, rc);
-
- return rc;
-}
-
-void sptlrpc_null_fini(void)
-{
- int rc;
-
- rc = sptlrpc_unregister_policy(&null_policy);
- if (rc)
- CERROR("failed to unregister %s: %d\n",
- null_policy.sp_name, rc);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
deleted file mode 100644
index ec3d9af76b17..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ /dev/null
@@ -1,1023 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec_plain.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include <obd_support.h>
-#include <obd_cksum.h>
-#include <obd_class.h>
-#include <lustre_net.h>
-#include <lustre_sec.h>
-#include "ptlrpc_internal.h"
-
-struct plain_sec {
- struct ptlrpc_sec pls_base;
- rwlock_t pls_lock;
- struct ptlrpc_cli_ctx *pls_ctx;
-};
-
-static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
-{
- return container_of(sec, struct plain_sec, pls_base);
-}
-
-static struct ptlrpc_sec_policy plain_policy;
-static struct ptlrpc_ctx_ops plain_ctx_ops;
-static struct ptlrpc_svc_ctx plain_svc_ctx;
-
-static unsigned int plain_at_offset;
-
-/*
- * for simplicity, plain policy rpc use fixed layout.
- */
-#define PLAIN_PACK_SEGMENTS (4)
-
-#define PLAIN_PACK_HDR_OFF (0)
-#define PLAIN_PACK_MSG_OFF (1)
-#define PLAIN_PACK_USER_OFF (2)
-#define PLAIN_PACK_BULK_OFF (3)
-
-#define PLAIN_FL_USER (0x01)
-#define PLAIN_FL_BULK (0x02)
-
-struct plain_header {
- __u8 ph_ver; /* 0 */
- __u8 ph_flags;
- __u8 ph_sp; /* source */
- __u8 ph_bulk_hash_alg; /* complete flavor desc */
- __u8 ph_pad[4];
-};
-
-struct plain_bulk_token {
- __u8 pbt_hash[8];
-};
-
-#define PLAIN_BSD_SIZE \
- (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
-
-/****************************************
- * bulk checksum helpers *
- ****************************************/
-
-static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
-{
- struct ptlrpc_bulk_sec_desc *bsd;
-
- if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
- return -EPROTO;
-
- bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
- if (!bsd) {
- CERROR("bulk sec desc has short size %d\n",
- lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
- return -EPROTO;
- }
-
- if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
- bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
- CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
- return -EPROTO;
- }
-
- return 0;
-}
-
-static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
- __u8 hash_alg,
- struct plain_bulk_token *token)
-{
- if (hash_alg == BULK_HASH_ALG_NULL)
- return 0;
-
- memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
- return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
- sizeof(token->pbt_hash));
-}
-
-static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
- __u8 hash_alg,
- struct plain_bulk_token *tokenr)
-{
- struct plain_bulk_token tokenv;
- int rc;
-
- if (hash_alg == BULK_HASH_ALG_NULL)
- return 0;
-
- memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
- rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
- sizeof(tokenv.pbt_hash));
- if (rc)
- return rc;
-
- if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
- return -EACCES;
- return 0;
-}
-
-static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
-{
- char *ptr;
- unsigned int off, i;
-
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
-
- for (i = 0; i < desc->bd_iov_count; i++) {
- if (!BD_GET_KIOV(desc, i).bv_len)
- continue;
-
- ptr = kmap(BD_GET_KIOV(desc, i).bv_page);
- off = BD_GET_KIOV(desc, i).bv_offset & ~PAGE_MASK;
- ptr[off] ^= 0x1;
- kunmap(BD_GET_KIOV(desc, i).bv_page);
- return;
- }
-}
-
-/****************************************
- * cli_ctx apis *
- ****************************************/
-
-static
-int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
-{
- /* should never reach here */
- LBUG();
- return 0;
-}
-
-static
-int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
-{
- return 0;
-}
-
-static
-int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
-{
- struct lustre_msg *msg = req->rq_reqbuf;
- struct plain_header *phdr;
-
- msg->lm_secflvr = req->rq_flvr.sf_rpc;
-
- phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
- phdr->ph_ver = 0;
- phdr->ph_flags = 0;
- phdr->ph_sp = ctx->cc_sec->ps_part;
- phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
-
- if (req->rq_pack_udesc)
- phdr->ph_flags |= PLAIN_FL_USER;
- if (req->rq_pack_bulk)
- phdr->ph_flags |= PLAIN_FL_BULK;
-
- req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
- msg->lm_buflens);
- return 0;
-}
-
-static
-int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
-{
- struct lustre_msg *msg = req->rq_repdata;
- struct plain_header *phdr;
- __u32 cksum;
- int swabbed;
-
- if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
- CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
- return -EPROTO;
- }
-
- swabbed = ptlrpc_rep_need_swab(req);
-
- phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
- if (!phdr) {
- CERROR("missing plain header\n");
- return -EPROTO;
- }
-
- if (phdr->ph_ver != 0) {
- CERROR("Invalid header version\n");
- return -EPROTO;
- }
-
- /* expect no user desc in reply */
- if (phdr->ph_flags & PLAIN_FL_USER) {
- CERROR("Unexpected udesc flag in reply\n");
- return -EPROTO;
- }
-
- if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
- CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
- req->rq_flvr.u_bulk.hash.hash_alg);
- return -EPROTO;
- }
-
- if (unlikely(req->rq_early)) {
- unsigned int hsize = 4;
-
- cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
- lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF,
- 0),
- lustre_msg_buflen(msg,
- PLAIN_PACK_MSG_OFF),
- NULL, 0, (unsigned char *)&cksum,
- &hsize);
- if (cksum != msg->lm_cksum) {
- CDEBUG(D_SEC,
- "early reply checksum mismatch: %08x != %08x\n",
- cpu_to_le32(cksum), msg->lm_cksum);
- return -EINVAL;
- }
- } else {
- /* whether we sent with bulk or not, we expect the same
- * in reply, except for early reply
- */
- if (!req->rq_early &&
- !equi(req->rq_pack_bulk == 1,
- phdr->ph_flags & PLAIN_FL_BULK)) {
- CERROR("%s bulk checksum in reply\n",
- req->rq_pack_bulk ? "Missing" : "Unexpected");
- return -EPROTO;
- }
-
- if (phdr->ph_flags & PLAIN_FL_BULK) {
- if (plain_unpack_bsd(msg, swabbed))
- return -EPROTO;
- }
- }
-
- req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
- req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
- return 0;
-}
-
-static
-int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_bulk_sec_desc *bsd;
- struct plain_bulk_token *token;
- int rc;
-
- LASSERT(req->rq_pack_bulk);
- LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
-
- bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
- token = (struct plain_bulk_token *)bsd->bsd_data;
-
- bsd->bsd_version = 0;
- bsd->bsd_flags = 0;
- bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
- bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
-
- if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
- return 0;
-
- if (req->rq_bulk_read)
- return 0;
-
- rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
- token);
- if (rc) {
- CERROR("bulk write: failed to compute checksum: %d\n", rc);
- } else {
- /*
- * for sending we only compute the wrong checksum instead
- * of corrupting the data so it is still correct on a redo
- */
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
- req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
- token->pbt_hash[0] ^= 0x1;
- }
-
- return rc;
-}
-
-static
-int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_bulk_sec_desc *bsdv;
- struct plain_bulk_token *tokenv;
- int rc;
- int i, nob;
-
- LASSERT(req->rq_pack_bulk);
- LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
- LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
-
- bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
- tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
-
- if (req->rq_bulk_write) {
- if (bsdv->bsd_flags & BSD_FL_ERR)
- return -EIO;
- return 0;
- }
-
- /* fix the actual data size */
- for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
- struct bio_vec bv_desc = BD_GET_KIOV(desc, i);
-
- if (bv_desc.bv_len + nob > desc->bd_nob_transferred)
- bv_desc.bv_len = desc->bd_nob_transferred - nob;
- nob += bv_desc.bv_len;
- }
-
- rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
- tokenv);
- if (rc)
- CERROR("bulk read: client verify failed: %d\n", rc);
-
- return rc;
-}
-
-/****************************************
- * sec apis *
- ****************************************/
-
-static
-struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
-{
- struct ptlrpc_cli_ctx *ctx, *ctx_new;
-
- ctx_new = kzalloc(sizeof(*ctx_new), GFP_NOFS);
-
- write_lock(&plsec->pls_lock);
-
- ctx = plsec->pls_ctx;
- if (ctx) {
- atomic_inc(&ctx->cc_refcount);
-
- kfree(ctx_new);
- } else if (ctx_new) {
- ctx = ctx_new;
-
- atomic_set(&ctx->cc_refcount, 1); /* for cache */
- ctx->cc_sec = &plsec->pls_base;
- ctx->cc_ops = &plain_ctx_ops;
- ctx->cc_expire = 0;
- ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
- ctx->cc_vcred.vc_uid = 0;
- spin_lock_init(&ctx->cc_lock);
- INIT_LIST_HEAD(&ctx->cc_req_list);
- INIT_LIST_HEAD(&ctx->cc_gc_chain);
-
- plsec->pls_ctx = ctx;
- atomic_inc(&plsec->pls_base.ps_nctx);
- atomic_inc(&plsec->pls_base.ps_refcount);
-
- atomic_inc(&ctx->cc_refcount); /* for caller */
- }
-
- write_unlock(&plsec->pls_lock);
-
- return ctx;
-}
-
-static
-void plain_destroy_sec(struct ptlrpc_sec *sec)
-{
- struct plain_sec *plsec = sec2plsec(sec);
-
- LASSERT(sec->ps_policy == &plain_policy);
- LASSERT(sec->ps_import);
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
- LASSERT(!plsec->pls_ctx);
-
- class_import_put(sec->ps_import);
-
- kfree(plsec);
-}
-
-static
-void plain_kill_sec(struct ptlrpc_sec *sec)
-{
- sec->ps_dying = 1;
-}
-
-static
-struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
- struct ptlrpc_svc_ctx *svc_ctx,
- struct sptlrpc_flavor *sf)
-{
- struct plain_sec *plsec;
- struct ptlrpc_sec *sec;
- struct ptlrpc_cli_ctx *ctx;
-
- LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
-
- plsec = kzalloc(sizeof(*plsec), GFP_NOFS);
- if (!plsec)
- return NULL;
-
- /*
- * initialize plain_sec
- */
- rwlock_init(&plsec->pls_lock);
- plsec->pls_ctx = NULL;
-
- sec = &plsec->pls_base;
- sec->ps_policy = &plain_policy;
- atomic_set(&sec->ps_refcount, 0);
- atomic_set(&sec->ps_nctx, 0);
- sec->ps_id = sptlrpc_get_next_secid();
- sec->ps_import = class_import_get(imp);
- sec->ps_flvr = *sf;
- spin_lock_init(&sec->ps_lock);
- INIT_LIST_HEAD(&sec->ps_gc_list);
- sec->ps_gc_interval = 0;
- sec->ps_gc_next = 0;
-
- /* install ctx immediately if this is a reverse sec */
- if (svc_ctx) {
- ctx = plain_sec_install_ctx(plsec);
- if (!ctx) {
- plain_destroy_sec(sec);
- return NULL;
- }
- sptlrpc_cli_ctx_put(ctx, 1);
- }
-
- return sec;
-}
-
-static
-struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
- struct vfs_cred *vcred,
- int create, int remove_dead)
-{
- struct plain_sec *plsec = sec2plsec(sec);
- struct ptlrpc_cli_ctx *ctx;
-
- read_lock(&plsec->pls_lock);
- ctx = plsec->pls_ctx;
- if (ctx)
- atomic_inc(&ctx->cc_refcount);
- read_unlock(&plsec->pls_lock);
-
- if (unlikely(!ctx))
- ctx = plain_sec_install_ctx(plsec);
-
- return ctx;
-}
-
-static
-void plain_release_ctx(struct ptlrpc_sec *sec,
- struct ptlrpc_cli_ctx *ctx, int sync)
-{
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
- LASSERT(atomic_read(&sec->ps_nctx) > 0);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(ctx->cc_sec == sec);
-
- kfree(ctx);
-
- atomic_dec(&sec->ps_nctx);
- sptlrpc_sec_put(sec);
-}
-
-static
-int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
- uid_t uid, int grace, int force)
-{
- struct plain_sec *plsec = sec2plsec(sec);
- struct ptlrpc_cli_ctx *ctx;
-
- /* do nothing unless caller want to flush for 'all' */
- if (uid != -1)
- return 0;
-
- write_lock(&plsec->pls_lock);
- ctx = plsec->pls_ctx;
- plsec->pls_ctx = NULL;
- write_unlock(&plsec->pls_lock);
-
- if (ctx)
- sptlrpc_cli_ctx_put(ctx, 1);
- return 0;
-}
-
-static
-int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int msgsize)
-{
- __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
- int alloc_len;
-
- buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
- buflens[PLAIN_PACK_MSG_OFF] = msgsize;
-
- if (req->rq_pack_udesc)
- buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
-
- if (req->rq_pack_bulk) {
- LASSERT(req->rq_bulk_read || req->rq_bulk_write);
- buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
- }
-
- alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
-
- if (!req->rq_reqbuf) {
- LASSERT(!req->rq_pool);
-
- alloc_len = size_roundup_power2(alloc_len);
- req->rq_reqbuf = kvzalloc(alloc_len, GFP_NOFS);
- if (!req->rq_reqbuf)
- return -ENOMEM;
-
- req->rq_reqbuf_len = alloc_len;
- } else {
- LASSERT(req->rq_pool);
- LASSERT(req->rq_reqbuf_len >= alloc_len);
- memset(req->rq_reqbuf, 0, alloc_len);
- }
-
- lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
- req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
-
- if (req->rq_pack_udesc) {
- int rc = sptlrpc_pack_user_desc(req->rq_reqbuf,
- PLAIN_PACK_USER_OFF);
- if (rc < 0)
- return rc;
- }
-
- return 0;
-}
-
-static
-void plain_free_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req)
-{
- if (!req->rq_pool) {
- kvfree(req->rq_reqbuf);
- req->rq_reqbuf = NULL;
- req->rq_reqbuf_len = 0;
- }
-}
-
-static
-int plain_alloc_repbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int msgsize)
-{
- __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
- int alloc_len;
-
- buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
- buflens[PLAIN_PACK_MSG_OFF] = msgsize;
-
- if (req->rq_pack_bulk) {
- LASSERT(req->rq_bulk_read || req->rq_bulk_write);
- buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
- }
-
- alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
-
- /* add space for early reply */
- alloc_len += plain_at_offset;
-
- alloc_len = size_roundup_power2(alloc_len);
-
- req->rq_repbuf = kvzalloc(alloc_len, GFP_NOFS);
- if (!req->rq_repbuf)
- return -ENOMEM;
-
- req->rq_repbuf_len = alloc_len;
- return 0;
-}
-
-static
-void plain_free_repbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req)
-{
- kvfree(req->rq_repbuf);
- req->rq_repbuf = NULL;
- req->rq_repbuf_len = 0;
-}
-
-static
-int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int segment, int newsize)
-{
- struct lustre_msg *newbuf;
- int oldsize;
- int newmsg_size, newbuf_size;
-
- LASSERT(req->rq_reqbuf);
- LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
- LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
- req->rq_reqmsg);
-
- /* compute new embedded msg size. */
- oldsize = req->rq_reqmsg->lm_buflens[segment];
- req->rq_reqmsg->lm_buflens[segment] = newsize;
- newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
- req->rq_reqmsg->lm_buflens);
- req->rq_reqmsg->lm_buflens[segment] = oldsize;
-
- /* compute new wrapper msg size. */
- oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
- req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
- newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
- req->rq_reqbuf->lm_buflens);
- req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
-
- /* request from pool should always have enough buffer */
- LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
-
- if (req->rq_reqbuf_len < newbuf_size) {
- newbuf_size = size_roundup_power2(newbuf_size);
-
- newbuf = kvzalloc(newbuf_size, GFP_NOFS);
- if (!newbuf)
- return -ENOMEM;
-
- /* Must lock this, so that otherwise unprotected change of
- * rq_reqmsg is not racing with parallel processing of
- * imp_replay_list traversing threads. See LU-3333
- * This is a bandaid at best, we really need to deal with this
- * in request enlarging code before unpacking that's already
- * there
- */
- if (req->rq_import)
- spin_lock(&req->rq_import->imp_lock);
-
- memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
-
- kvfree(req->rq_reqbuf);
- req->rq_reqbuf = newbuf;
- req->rq_reqbuf_len = newbuf_size;
- req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
- PLAIN_PACK_MSG_OFF, 0);
-
- if (req->rq_import)
- spin_unlock(&req->rq_import->imp_lock);
- }
-
- _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
- newmsg_size);
- _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
-
- req->rq_reqlen = newmsg_size;
- return 0;
-}
-
-/****************************************
- * service apis *
- ****************************************/
-
-static struct ptlrpc_svc_ctx plain_svc_ctx = {
- .sc_refcount = ATOMIC_INIT(1),
- .sc_policy = &plain_policy,
-};
-
-static
-int plain_accept(struct ptlrpc_request *req)
-{
- struct lustre_msg *msg = req->rq_reqbuf;
- struct plain_header *phdr;
- int swabbed;
-
- LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
- SPTLRPC_POLICY_PLAIN);
-
- if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
- SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
- SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
- SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
- CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
- return SECSVC_DROP;
- }
-
- if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
- CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
- return SECSVC_DROP;
- }
-
- swabbed = ptlrpc_req_need_swab(req);
-
- phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
- if (!phdr) {
- CERROR("missing plain header\n");
- return -EPROTO;
- }
-
- if (phdr->ph_ver != 0) {
- CERROR("Invalid header version\n");
- return -EPROTO;
- }
-
- if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
- CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
- return -EPROTO;
- }
-
- req->rq_sp_from = phdr->ph_sp;
- req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
-
- if (phdr->ph_flags & PLAIN_FL_USER) {
- if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
- swabbed)) {
- CERROR("Mal-formed user descriptor\n");
- return SECSVC_DROP;
- }
-
- req->rq_pack_udesc = 1;
- req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
- }
-
- if (phdr->ph_flags & PLAIN_FL_BULK) {
- if (plain_unpack_bsd(msg, swabbed))
- return SECSVC_DROP;
-
- req->rq_pack_bulk = 1;
- }
-
- req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
- req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
-
- req->rq_svc_ctx = &plain_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
-
- return SECSVC_OK;
-}
-
-static
-int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
-{
- struct ptlrpc_reply_state *rs;
- __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
- int rs_size = sizeof(*rs);
-
- LASSERT(msgsize % 8 == 0);
-
- buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
- buflens[PLAIN_PACK_MSG_OFF] = msgsize;
-
- if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
- buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
-
- rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
-
- rs = req->rq_reply_state;
-
- if (rs) {
- /* pre-allocated */
- LASSERT(rs->rs_size >= rs_size);
- } else {
- rs = kvzalloc(rs_size, GFP_NOFS);
- if (!rs)
- return -ENOMEM;
-
- rs->rs_size = rs_size;
- }
-
- rs->rs_svc_ctx = req->rq_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
- rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
- rs->rs_repbuf_len = rs_size - sizeof(*rs);
-
- lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
- rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
-
- req->rq_reply_state = rs;
- return 0;
-}
-
-static
-void plain_free_rs(struct ptlrpc_reply_state *rs)
-{
- LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
- atomic_dec(&rs->rs_svc_ctx->sc_refcount);
-
- if (!rs->rs_prealloc)
- kvfree(rs);
-}
-
-static
-int plain_authorize(struct ptlrpc_request *req)
-{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
- struct lustre_msg_v2 *msg = rs->rs_repbuf;
- struct plain_header *phdr;
- int len;
-
- LASSERT(rs);
- LASSERT(msg);
-
- if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
- len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
- req->rq_replen, 1);
- else
- len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
-
- msg->lm_secflvr = req->rq_flvr.sf_rpc;
-
- phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
- phdr->ph_ver = 0;
- phdr->ph_flags = 0;
- phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
-
- if (req->rq_pack_bulk)
- phdr->ph_flags |= PLAIN_FL_BULK;
-
- rs->rs_repdata_len = len;
-
- if (likely(req->rq_packed_final)) {
- if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
- req->rq_reply_off = plain_at_offset;
- else
- req->rq_reply_off = 0;
- } else {
- unsigned int hsize = 4;
-
- cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
- lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF,
- 0),
- lustre_msg_buflen(msg,
- PLAIN_PACK_MSG_OFF),
- NULL, 0, (unsigned char *)&msg->lm_cksum,
- &hsize);
- req->rq_reply_off = 0;
- }
-
- return 0;
-}
-
-static
-int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
- struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
- struct plain_bulk_token *tokenr;
- int rc;
-
- LASSERT(req->rq_bulk_write);
- LASSERT(req->rq_pack_bulk);
-
- bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
- tokenr = (struct plain_bulk_token *)bsdr->bsd_data;
- bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
-
- bsdv->bsd_version = 0;
- bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
- bsdv->bsd_svc = bsdr->bsd_svc;
- bsdv->bsd_flags = 0;
-
- if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
- return 0;
-
- rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
- tokenr);
- if (rc) {
- bsdv->bsd_flags |= BSD_FL_ERR;
- CERROR("bulk write: server verify failed: %d\n", rc);
- }
-
- return rc;
-}
-
-static
-int plain_svc_wrap_bulk(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
- struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
- struct plain_bulk_token *tokenv;
- int rc;
-
- LASSERT(req->rq_bulk_read);
- LASSERT(req->rq_pack_bulk);
-
- bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
- bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
- tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
-
- bsdv->bsd_version = 0;
- bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
- bsdv->bsd_svc = bsdr->bsd_svc;
- bsdv->bsd_flags = 0;
-
- if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
- return 0;
-
- rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
- tokenv);
- if (rc) {
- CERROR("bulk read: server failed to compute checksum: %d\n",
- rc);
- } else {
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
- corrupt_bulk_data(desc);
- }
-
- return rc;
-}
-
-static struct ptlrpc_ctx_ops plain_ctx_ops = {
- .refresh = plain_ctx_refresh,
- .validate = plain_ctx_validate,
- .sign = plain_ctx_sign,
- .verify = plain_ctx_verify,
- .wrap_bulk = plain_cli_wrap_bulk,
- .unwrap_bulk = plain_cli_unwrap_bulk,
-};
-
-static struct ptlrpc_sec_cops plain_sec_cops = {
- .create_sec = plain_create_sec,
- .destroy_sec = plain_destroy_sec,
- .kill_sec = plain_kill_sec,
- .lookup_ctx = plain_lookup_ctx,
- .release_ctx = plain_release_ctx,
- .flush_ctx_cache = plain_flush_ctx_cache,
- .alloc_reqbuf = plain_alloc_reqbuf,
- .free_reqbuf = plain_free_reqbuf,
- .alloc_repbuf = plain_alloc_repbuf,
- .free_repbuf = plain_free_repbuf,
- .enlarge_reqbuf = plain_enlarge_reqbuf,
-};
-
-static struct ptlrpc_sec_sops plain_sec_sops = {
- .accept = plain_accept,
- .alloc_rs = plain_alloc_rs,
- .authorize = plain_authorize,
- .free_rs = plain_free_rs,
- .unwrap_bulk = plain_svc_unwrap_bulk,
- .wrap_bulk = plain_svc_wrap_bulk,
-};
-
-static struct ptlrpc_sec_policy plain_policy = {
- .sp_owner = THIS_MODULE,
- .sp_name = "plain",
- .sp_policy = SPTLRPC_POLICY_PLAIN,
- .sp_cops = &plain_sec_cops,
- .sp_sops = &plain_sec_sops,
-};
-
-int sptlrpc_plain_init(void)
-{
- __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
- int rc;
-
- buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
- plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
-
- rc = sptlrpc_register_policy(&plain_policy);
- if (rc)
- CERROR("failed to register: %d\n", rc);
-
- return rc;
-}
-
-void sptlrpc_plain_fini(void)
-{
- int rc;
-
- rc = sptlrpc_unregister_policy(&plain_policy);
- if (rc)
- CERROR("cannot unregister: %d\n", rc);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
deleted file mode 100644
index f37364e00dfe..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ /dev/null
@@ -1,2804 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_net.h>
-#include <lu_object.h>
-#include <uapi/linux/lnet/lnet-types.h>
-#include "ptlrpc_internal.h"
-
-/* The following are visible and mutable through /sys/module/ptlrpc */
-int test_req_buffer_pressure;
-module_param(test_req_buffer_pressure, int, 0444);
-MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools");
-module_param(at_min, int, 0644);
-MODULE_PARM_DESC(at_min, "Adaptive timeout minimum (sec)");
-module_param(at_max, int, 0644);
-MODULE_PARM_DESC(at_max, "Adaptive timeout maximum (sec)");
-module_param(at_history, int, 0644);
-MODULE_PARM_DESC(at_history,
- "Adaptive timeouts remember the slowest event that took place within this period (sec)");
-module_param(at_early_margin, int, 0644);
-MODULE_PARM_DESC(at_early_margin, "How soon before an RPC deadline to send an early reply");
-module_param(at_extra, int, 0644);
-MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply");
-
-/* forward ref */
-static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
-static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
-static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
-
-/** Holds a list of all PTLRPC services */
-LIST_HEAD(ptlrpc_all_services);
-/** Used to protect the \e ptlrpc_all_services list */
-struct mutex ptlrpc_all_services_mutex;
-
-static struct ptlrpc_request_buffer_desc *
-ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_request_buffer_desc *rqbd;
-
- rqbd = kzalloc_node(sizeof(*rqbd), GFP_NOFS,
- cfs_cpt_spread_node(svc->srv_cptable,
- svcpt->scp_cpt));
- if (!rqbd)
- return NULL;
-
- rqbd->rqbd_svcpt = svcpt;
- rqbd->rqbd_refcount = 0;
- rqbd->rqbd_cbid.cbid_fn = request_in_callback;
- rqbd->rqbd_cbid.cbid_arg = rqbd;
- INIT_LIST_HEAD(&rqbd->rqbd_reqs);
- rqbd->rqbd_buffer = kvzalloc_node(svc->srv_buf_size, GFP_KERNEL,
- cfs_cpt_spread_node(svc->srv_cptable,
- svcpt->scp_cpt));
-
- if (!rqbd->rqbd_buffer) {
- kfree(rqbd);
- return NULL;
- }
-
- spin_lock(&svcpt->scp_lock);
- list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
- svcpt->scp_nrqbds_total++;
- spin_unlock(&svcpt->scp_lock);
-
- return rqbd;
-}
-
-static void
-ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
-{
- struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
-
- LASSERT(rqbd->rqbd_refcount == 0);
- LASSERT(list_empty(&rqbd->rqbd_reqs));
-
- spin_lock(&svcpt->scp_lock);
- list_del(&rqbd->rqbd_list);
- svcpt->scp_nrqbds_total--;
- spin_unlock(&svcpt->scp_lock);
-
- kvfree(rqbd->rqbd_buffer);
- kfree(rqbd);
-}
-
-static int
-ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
-{
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_request_buffer_desc *rqbd;
- int rc = 0;
- int i;
-
- if (svcpt->scp_rqbd_allocating)
- goto try_post;
-
- spin_lock(&svcpt->scp_lock);
- /* check again with lock */
- if (svcpt->scp_rqbd_allocating) {
- /* NB: we might allow more than one thread in the future */
- LASSERT(svcpt->scp_rqbd_allocating == 1);
- spin_unlock(&svcpt->scp_lock);
- goto try_post;
- }
-
- svcpt->scp_rqbd_allocating++;
- spin_unlock(&svcpt->scp_lock);
-
- for (i = 0; i < svc->srv_nbuf_per_group; i++) {
- /* NB: another thread might have recycled enough rqbds, we
- * need to make sure it wouldn't over-allocate, see LU-1212.
- */
- if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group)
- break;
-
- rqbd = ptlrpc_alloc_rqbd(svcpt);
-
- if (!rqbd) {
- CERROR("%s: Can't allocate request buffer\n",
- svc->srv_name);
- rc = -ENOMEM;
- break;
- }
- }
-
- spin_lock(&svcpt->scp_lock);
-
- LASSERT(svcpt->scp_rqbd_allocating == 1);
- svcpt->scp_rqbd_allocating--;
-
- spin_unlock(&svcpt->scp_lock);
-
- CDEBUG(D_RPCTRACE,
- "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n",
- svc->srv_name, i, svc->srv_buf_size, svcpt->scp_nrqbds_posted,
- svcpt->scp_nrqbds_total, rc);
-
- try_post:
- if (post && rc == 0)
- rc = ptlrpc_server_post_idle_rqbds(svcpt);
-
- return rc;
-}
-
-struct ptlrpc_hr_partition;
-
-struct ptlrpc_hr_thread {
- int hrt_id; /* thread ID */
- spinlock_t hrt_lock;
- wait_queue_head_t hrt_waitq;
- struct list_head hrt_queue; /* RS queue */
- struct ptlrpc_hr_partition *hrt_partition;
-};
-
-struct ptlrpc_hr_partition {
- /* # of started threads */
- atomic_t hrp_nstarted;
- /* # of stopped threads */
- atomic_t hrp_nstopped;
- /* cpu partition id */
- int hrp_cpt;
- /* round-robin rotor for choosing thread */
- int hrp_rotor;
- /* total number of threads on this partition */
- int hrp_nthrs;
- /* threads table */
- struct ptlrpc_hr_thread *hrp_thrs;
-};
-
-#define HRT_RUNNING 0
-#define HRT_STOPPING 1
-
-struct ptlrpc_hr_service {
- /* CPU partition table, it's just cfs_cpt_table for now */
- struct cfs_cpt_table *hr_cpt_table;
- /** controller sleep waitq */
- wait_queue_head_t hr_waitq;
- unsigned int hr_stopping;
- /** roundrobin rotor for non-affinity service */
- unsigned int hr_rotor;
- /* partition data */
- struct ptlrpc_hr_partition **hr_partitions;
-};
-
-/** reply handling service. */
-static struct ptlrpc_hr_service ptlrpc_hr;
-
-/**
- * Choose an hr thread to dispatch requests to.
- */
-static struct ptlrpc_hr_thread *
-ptlrpc_hr_select(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_hr_partition *hrp;
- unsigned int rotor;
-
- if (svcpt->scp_cpt >= 0 &&
- svcpt->scp_service->srv_cptable == ptlrpc_hr.hr_cpt_table) {
- /* directly match partition */
- hrp = ptlrpc_hr.hr_partitions[svcpt->scp_cpt];
-
- } else {
- rotor = ptlrpc_hr.hr_rotor++;
- rotor %= cfs_cpt_number(ptlrpc_hr.hr_cpt_table);
-
- hrp = ptlrpc_hr.hr_partitions[rotor];
- }
-
- rotor = hrp->hrp_rotor++;
- return &hrp->hrp_thrs[rotor % hrp->hrp_nthrs];
-}
-
-/**
- * Put reply state into a queue for processing because we received
- * ACK from the client
- */
-void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
-{
- struct ptlrpc_hr_thread *hrt;
-
- LASSERT(list_empty(&rs->rs_list));
-
- hrt = ptlrpc_hr_select(rs->rs_svcpt);
-
- spin_lock(&hrt->hrt_lock);
- list_add_tail(&rs->rs_list, &hrt->hrt_queue);
- spin_unlock(&hrt->hrt_lock);
-
- wake_up(&hrt->hrt_waitq);
-}
-
-void
-ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
-{
- assert_spin_locked(&rs->rs_svcpt->scp_rep_lock);
- assert_spin_locked(&rs->rs_lock);
- LASSERT(rs->rs_difficult);
- rs->rs_scheduled_ever = 1; /* flag any notification attempt */
-
- if (rs->rs_scheduled) { /* being set up or already notified */
- return;
- }
-
- rs->rs_scheduled = 1;
- list_del_init(&rs->rs_list);
- ptlrpc_dispatch_difficult_reply(rs);
-}
-EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);
-
-static int
-ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_request_buffer_desc *rqbd;
- int rc;
- int posted = 0;
-
- for (;;) {
- spin_lock(&svcpt->scp_lock);
-
- if (list_empty(&svcpt->scp_rqbd_idle)) {
- spin_unlock(&svcpt->scp_lock);
- return posted;
- }
-
- rqbd = list_entry(svcpt->scp_rqbd_idle.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
- list_del(&rqbd->rqbd_list);
-
- /* assume we will post successfully */
- svcpt->scp_nrqbds_posted++;
- list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
-
- spin_unlock(&svcpt->scp_lock);
-
- rc = ptlrpc_register_rqbd(rqbd);
- if (rc != 0)
- break;
-
- posted = 1;
- }
-
- spin_lock(&svcpt->scp_lock);
-
- svcpt->scp_nrqbds_posted--;
- list_del(&rqbd->rqbd_list);
- list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
-
- /* Don't complain if no request buffers are posted right now; LNET
- * won't drop requests because we set the portal lazy!
- */
-
- spin_unlock(&svcpt->scp_lock);
-
- return -1;
-}
-
-static void ptlrpc_at_timer(struct timer_list *t)
-{
- struct ptlrpc_service_part *svcpt;
-
- svcpt = from_timer(svcpt, t, scp_at_timer);
-
- svcpt->scp_at_check = 1;
- svcpt->scp_at_checktime = cfs_time_current();
- wake_up(&svcpt->scp_waitq);
-}
-
-static void
-ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
- struct ptlrpc_service_conf *conf)
-{
- struct ptlrpc_service_thr_conf *tc = &conf->psc_thr;
- unsigned int init;
- unsigned int total;
- unsigned int nthrs;
- int weight;
-
- /*
- * Common code for estimating & validating threads number.
- * CPT affinity service could have percpt thread-pool instead
- * of a global thread-pool, which means user might not always
- * get the threads number they give it in conf::tc_nthrs_user
- * even they did set. It's because we need to validate threads
- * number for each CPT to guarantee each pool will have enough
- * threads to keep the service healthy.
- */
- init = PTLRPC_NTHRS_INIT + (svc->srv_ops.so_hpreq_handler != NULL);
- init = max_t(int, init, tc->tc_nthrs_init);
-
- /* NB: please see comments in lustre_lnet.h for definition
- * details of these members
- */
- LASSERT(tc->tc_nthrs_max != 0);
-
- if (tc->tc_nthrs_user != 0) {
- /* In case there is a reason to test a service with many
- * threads, we give a less strict check here, it can
- * be up to 8 * nthrs_max
- */
- total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user);
- nthrs = total / svc->srv_ncpts;
- init = max(init, nthrs);
- goto out;
- }
-
- total = tc->tc_nthrs_max;
- if (tc->tc_nthrs_base == 0) {
- /* don't care about base threads number per partition,
- * this is most for non-affinity service
- */
- nthrs = total / svc->srv_ncpts;
- goto out;
- }
-
- nthrs = tc->tc_nthrs_base;
- if (svc->srv_ncpts == 1) {
- int i;
-
- /* NB: Increase the base number if it's single partition
- * and total number of cores/HTs is larger or equal to 4.
- * result will always < 2 * nthrs_base
- */
- weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY);
- for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */
- (tc->tc_nthrs_base >> i) != 0; i++)
- nthrs += tc->tc_nthrs_base >> i;
- }
-
- if (tc->tc_thr_factor != 0) {
- int factor = tc->tc_thr_factor;
- const int fade = 4;
-
- /*
- * User wants to increase number of threads with for
- * each CPU core/HT, most likely the factor is larger then
- * one thread/core because service threads are supposed to
- * be blocked by lock or wait for IO.
- */
- /*
- * Amdahl's law says that adding processors wouldn't give
- * a linear increasing of parallelism, so it's nonsense to
- * have too many threads no matter how many cores/HTs
- * there are.
- */
- /* weight is # of HTs */
- if (cpumask_weight(topology_sibling_cpumask(0)) > 1) {
- /* depress thread factor for hyper-thread */
- factor = factor - (factor >> 1) + (factor >> 3);
- }
-
- weight = cfs_cpt_weight(svc->srv_cptable, 0);
- LASSERT(weight > 0);
-
- for (; factor > 0 && weight > 0; factor--, weight -= fade)
- nthrs += min(weight, fade) * factor;
- }
-
- if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
- nthrs = max(tc->tc_nthrs_base,
- tc->tc_nthrs_max / svc->srv_ncpts);
- }
- out:
- nthrs = max(nthrs, tc->tc_nthrs_init);
- svc->srv_nthrs_cpt_limit = nthrs;
- svc->srv_nthrs_cpt_init = init;
-
- if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
- CDEBUG(D_OTHER, "%s: This service may have more threads (%d) than the given soft limit (%d)\n",
- svc->srv_name, nthrs * svc->srv_ncpts,
- tc->tc_nthrs_max);
- }
-}
-
-/**
- * Initialize percpt data for a service
- */
-static int
-ptlrpc_service_part_init(struct ptlrpc_service *svc,
- struct ptlrpc_service_part *svcpt, int cpt)
-{
- struct ptlrpc_at_array *array;
- int size;
- int index;
- int rc;
-
- svcpt->scp_cpt = cpt;
- INIT_LIST_HEAD(&svcpt->scp_threads);
-
- /* rqbd and incoming request queue */
- spin_lock_init(&svcpt->scp_lock);
- INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
- INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
- INIT_LIST_HEAD(&svcpt->scp_req_incoming);
- init_waitqueue_head(&svcpt->scp_waitq);
- /* history request & rqbd list */
- INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
- INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
-
- /* active requests and hp requests */
- spin_lock_init(&svcpt->scp_req_lock);
-
- /* reply states */
- spin_lock_init(&svcpt->scp_rep_lock);
- INIT_LIST_HEAD(&svcpt->scp_rep_active);
- INIT_LIST_HEAD(&svcpt->scp_rep_idle);
- init_waitqueue_head(&svcpt->scp_rep_waitq);
- atomic_set(&svcpt->scp_nreps_difficult, 0);
-
- /* adaptive timeout */
- spin_lock_init(&svcpt->scp_at_lock);
- array = &svcpt->scp_at_array;
-
- size = at_est2timeout(at_max);
- array->paa_size = size;
- array->paa_count = 0;
- array->paa_deadline = -1;
-
- /* allocate memory for scp_at_array (ptlrpc_at_array) */
- array->paa_reqs_array =
- kzalloc_node(sizeof(struct list_head) * size, GFP_NOFS,
- cfs_cpt_spread_node(svc->srv_cptable, cpt));
- if (!array->paa_reqs_array)
- return -ENOMEM;
-
- for (index = 0; index < size; index++)
- INIT_LIST_HEAD(&array->paa_reqs_array[index]);
-
- array->paa_reqs_count =
- kzalloc_node(sizeof(__u32) * size, GFP_NOFS,
- cfs_cpt_spread_node(svc->srv_cptable, cpt));
- if (!array->paa_reqs_count)
- goto free_reqs_array;
-
- timer_setup(&svcpt->scp_at_timer, ptlrpc_at_timer, 0);
-
- /* At SOW, service time should be quick; 10s seems generous. If client
- * timeout is less than this, we'll be sending an early reply.
- */
- at_init(&svcpt->scp_at_estimate, 10, 0);
-
- /* assign this before call ptlrpc_grow_req_bufs */
- svcpt->scp_service = svc;
- /* Now allocate the request buffers, but don't post them now */
- rc = ptlrpc_grow_req_bufs(svcpt, 0);
- /* We shouldn't be under memory pressure at startup, so
- * fail if we can't allocate all our buffers at this time.
- */
- if (rc != 0)
- goto free_reqs_count;
-
- return 0;
-
-free_reqs_count:
- kfree(array->paa_reqs_count);
- array->paa_reqs_count = NULL;
-free_reqs_array:
- kfree(array->paa_reqs_array);
- array->paa_reqs_array = NULL;
-
- return -ENOMEM;
-}
-
-/**
- * Initialize service on a given portal.
- * This includes starting serving threads , allocating and posting rqbds and
- * so on.
- */
-struct ptlrpc_service *
-ptlrpc_register_service(struct ptlrpc_service_conf *conf,
- struct kset *parent,
- struct dentry *debugfs_entry)
-{
- struct ptlrpc_service_cpt_conf *cconf = &conf->psc_cpt;
- struct ptlrpc_service *service;
- struct ptlrpc_service_part *svcpt;
- struct cfs_cpt_table *cptable;
- __u32 *cpts = NULL;
- int ncpts;
- int cpt;
- int rc;
- int i;
-
- LASSERT(conf->psc_buf.bc_nbufs > 0);
- LASSERT(conf->psc_buf.bc_buf_size >=
- conf->psc_buf.bc_req_max_size + SPTLRPC_MAX_PAYLOAD);
- LASSERT(conf->psc_thr.tc_ctx_tags != 0);
-
- cptable = cconf->cc_cptable;
- if (!cptable)
- cptable = cfs_cpt_table;
-
- if (!conf->psc_thr.tc_cpu_affinity) {
- ncpts = 1;
- } else {
- ncpts = cfs_cpt_number(cptable);
- if (cconf->cc_pattern) {
- struct cfs_expr_list *el;
-
- rc = cfs_expr_list_parse(cconf->cc_pattern,
- strlen(cconf->cc_pattern),
- 0, ncpts - 1, &el);
- if (rc != 0) {
- CERROR("%s: invalid CPT pattern string: %s",
- conf->psc_name, cconf->cc_pattern);
- return ERR_PTR(-EINVAL);
- }
-
- rc = cfs_expr_list_values(el, ncpts, &cpts);
- cfs_expr_list_free(el);
- if (rc <= 0) {
- CERROR("%s: failed to parse CPT array %s: %d\n",
- conf->psc_name, cconf->cc_pattern, rc);
- kfree(cpts);
- return ERR_PTR(rc < 0 ? rc : -EINVAL);
- }
- ncpts = rc;
- }
- }
-
- service = kzalloc(offsetof(struct ptlrpc_service, srv_parts[ncpts]),
- GFP_NOFS);
- if (!service) {
- kfree(cpts);
- return ERR_PTR(-ENOMEM);
- }
-
- service->srv_cptable = cptable;
- service->srv_cpts = cpts;
- service->srv_ncpts = ncpts;
-
- service->srv_cpt_bits = 0; /* it's zero already, easy to read... */
- while ((1 << service->srv_cpt_bits) < cfs_cpt_number(cptable))
- service->srv_cpt_bits++;
-
- /* public members */
- spin_lock_init(&service->srv_lock);
- service->srv_name = conf->psc_name;
- service->srv_watchdog_factor = conf->psc_watchdog_factor;
- INIT_LIST_HEAD(&service->srv_list); /* for safety of cleanup */
-
- /* buffer configuration */
- service->srv_nbuf_per_group = test_req_buffer_pressure ?
- 1 : conf->psc_buf.bc_nbufs;
- service->srv_max_req_size = conf->psc_buf.bc_req_max_size +
- SPTLRPC_MAX_PAYLOAD;
- service->srv_buf_size = conf->psc_buf.bc_buf_size;
- service->srv_rep_portal = conf->psc_buf.bc_rep_portal;
- service->srv_req_portal = conf->psc_buf.bc_req_portal;
-
- /* Increase max reply size to next power of two */
- service->srv_max_reply_size = 1;
- while (service->srv_max_reply_size <
- conf->psc_buf.bc_rep_max_size + SPTLRPC_MAX_PAYLOAD)
- service->srv_max_reply_size <<= 1;
-
- service->srv_thread_name = conf->psc_thr.tc_thr_name;
- service->srv_ctx_tags = conf->psc_thr.tc_ctx_tags;
- service->srv_hpreq_ratio = PTLRPC_SVC_HP_RATIO;
- service->srv_ops = conf->psc_ops;
-
- for (i = 0; i < ncpts; i++) {
- if (!conf->psc_thr.tc_cpu_affinity)
- cpt = CFS_CPT_ANY;
- else
- cpt = cpts ? cpts[i] : i;
-
- svcpt = kzalloc_node(sizeof(*svcpt), GFP_NOFS,
- cfs_cpt_spread_node(cptable, cpt));
- if (!svcpt) {
- rc = -ENOMEM;
- goto failed;
- }
-
- service->srv_parts[i] = svcpt;
- rc = ptlrpc_service_part_init(service, svcpt, cpt);
- if (rc != 0)
- goto failed;
- }
-
- ptlrpc_server_nthreads_check(service, conf);
-
- rc = LNetSetLazyPortal(service->srv_req_portal);
- LASSERT(rc == 0);
-
- mutex_lock(&ptlrpc_all_services_mutex);
- list_add(&service->srv_list, &ptlrpc_all_services);
- mutex_unlock(&ptlrpc_all_services_mutex);
-
- if (parent) {
- rc = ptlrpc_sysfs_register_service(parent, service);
- if (rc)
- goto failed;
- }
-
- if (!IS_ERR_OR_NULL(debugfs_entry))
- ptlrpc_ldebugfs_register_service(debugfs_entry, service);
-
- rc = ptlrpc_service_nrs_setup(service);
- if (rc != 0)
- goto failed;
-
- CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
- service->srv_name, service->srv_req_portal);
-
- rc = ptlrpc_start_threads(service);
- if (rc != 0) {
- CERROR("Failed to start threads for service %s: %d\n",
- service->srv_name, rc);
- goto failed;
- }
-
- return service;
-failed:
- ptlrpc_unregister_service(service);
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL(ptlrpc_register_service);
-
-/**
- * to actually free the request, must be called without holding svc_lock.
- * note it's caller's responsibility to unlink req->rq_list.
- */
-static void ptlrpc_server_free_request(struct ptlrpc_request *req)
-{
- LASSERT(atomic_read(&req->rq_refcount) == 0);
- LASSERT(list_empty(&req->rq_timed_list));
-
- /* DEBUG_REQ() assumes the reply state of a request with a valid
- * ref will not be destroyed until that reference is dropped.
- */
- ptlrpc_req_drop_rs(req);
-
- sptlrpc_svc_ctx_decref(req);
-
- if (req != &req->rq_rqbd->rqbd_req) {
- /* NB request buffers use an embedded
- * req if the incoming req unlinked the
- * MD; this isn't one of them!
- */
- ptlrpc_request_cache_free(req);
- }
-}
-
-/**
- * drop a reference count of the request. if it reaches 0, we either
- * put it into history list, or free it immediately.
- */
-static void ptlrpc_server_drop_request(struct ptlrpc_request *req)
-{
- struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
- struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
- struct ptlrpc_service *svc = svcpt->scp_service;
- int refcount;
-
- if (!atomic_dec_and_test(&req->rq_refcount))
- return;
-
- if (req->rq_at_linked) {
- spin_lock(&svcpt->scp_at_lock);
- /* recheck with lock, in case it's unlinked by
- * ptlrpc_at_check_timed()
- */
- if (likely(req->rq_at_linked))
- ptlrpc_at_remove_timed(req);
- spin_unlock(&svcpt->scp_at_lock);
- }
-
- LASSERT(list_empty(&req->rq_timed_list));
-
- /* finalize request */
- if (req->rq_export) {
- class_export_put(req->rq_export);
- req->rq_export = NULL;
- }
-
- spin_lock(&svcpt->scp_lock);
-
- list_add(&req->rq_list, &rqbd->rqbd_reqs);
-
- refcount = --(rqbd->rqbd_refcount);
- if (refcount == 0) {
- /* request buffer is now idle: add to history */
- list_del(&rqbd->rqbd_list);
-
- list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds);
- svcpt->scp_hist_nrqbds++;
-
- /* cull some history?
- * I expect only about 1 or 2 rqbds need to be recycled here
- */
- while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
- rqbd = list_entry(svcpt->scp_hist_rqbds.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
-
- list_del(&rqbd->rqbd_list);
- svcpt->scp_hist_nrqbds--;
-
- /* remove rqbd's reqs from svc's req history while
- * I've got the service lock
- */
- list_for_each_entry(req, &rqbd->rqbd_reqs, rq_list) {
- /* Track the highest culled req seq */
- if (req->rq_history_seq >
- svcpt->scp_hist_seq_culled) {
- svcpt->scp_hist_seq_culled =
- req->rq_history_seq;
- }
- list_del(&req->rq_history_list);
- }
-
- spin_unlock(&svcpt->scp_lock);
-
- while ((req = list_first_entry_or_null(
- &rqbd->rqbd_reqs,
- struct ptlrpc_request, rq_list))) {
- list_del(&req->rq_list);
- ptlrpc_server_free_request(req);
- }
-
- spin_lock(&svcpt->scp_lock);
- /*
- * now all reqs including the embedded req has been
- * disposed, schedule request buffer for re-use.
- */
- LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) ==
- 0);
- list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
- }
-
- spin_unlock(&svcpt->scp_lock);
- } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
- /* If we are low on memory, we are not interested in history */
- list_del(&req->rq_list);
- list_del_init(&req->rq_history_list);
-
- /* Track the highest culled req seq */
- if (req->rq_history_seq > svcpt->scp_hist_seq_culled)
- svcpt->scp_hist_seq_culled = req->rq_history_seq;
-
- spin_unlock(&svcpt->scp_lock);
-
- ptlrpc_server_free_request(req);
- } else {
- spin_unlock(&svcpt->scp_lock);
- }
-}
-
-/**
- * to finish a request: stop sending more early replies, and release
- * the request.
- */
-static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req)
-{
- ptlrpc_server_hpreq_fini(req);
-
- if (req->rq_session.lc_thread) {
- lu_context_exit(&req->rq_session);
- lu_context_fini(&req->rq_session);
- }
-
- ptlrpc_server_drop_request(req);
-}
-
-/**
- * to finish a active request: stop sending more early replies, and release
- * the request. should be called after we finished handling the request.
- */
-static void ptlrpc_server_finish_active_request(
- struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req)
-{
- spin_lock(&svcpt->scp_req_lock);
- ptlrpc_nrs_req_stop_nolock(req);
- svcpt->scp_nreqs_active--;
- if (req->rq_hp)
- svcpt->scp_nhreqs_active--;
- spin_unlock(&svcpt->scp_req_lock);
-
- ptlrpc_nrs_req_finalize(req);
-
- if (req->rq_export)
- class_export_rpc_dec(req->rq_export);
-
- ptlrpc_server_finish_request(svcpt, req);
-}
-
-/**
- * Sanity check request \a req.
- * Return 0 if all is ok, error code otherwise.
- */
-static int ptlrpc_check_req(struct ptlrpc_request *req)
-{
- struct obd_device *obd = req->rq_export->exp_obd;
- int rc = 0;
-
- if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) <
- req->rq_export->exp_conn_cnt)) {
- DEBUG_REQ(D_RPCTRACE, req,
- "DROPPING req from old connection %d < %d",
- lustre_msg_get_conn_cnt(req->rq_reqmsg),
- req->rq_export->exp_conn_cnt);
- return -EEXIST;
- }
- if (unlikely(!obd || obd->obd_fail)) {
- /*
- * Failing over, don't handle any more reqs, send
- * error response instead.
- */
- CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
- req, obd ? obd->obd_name : "unknown");
- rc = -ENODEV;
- } else if (lustre_msg_get_flags(req->rq_reqmsg) &
- (MSG_REPLAY | MSG_REQ_REPLAY_DONE)) {
- DEBUG_REQ(D_ERROR, req, "Invalid replay without recovery");
- class_fail_export(req->rq_export);
- rc = -ENODEV;
- } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0) {
- DEBUG_REQ(D_ERROR, req,
- "Invalid req with transno %llu without recovery",
- lustre_msg_get_transno(req->rq_reqmsg));
- class_fail_export(req->rq_export);
- rc = -ENODEV;
- }
-
- if (unlikely(rc < 0)) {
- req->rq_status = rc;
- ptlrpc_error(req);
- }
- return rc;
-}
-
-static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_at_array *array = &svcpt->scp_at_array;
- __s32 next;
-
- if (array->paa_count == 0) {
- del_timer(&svcpt->scp_at_timer);
- return;
- }
-
- /* Set timer for closest deadline */
- next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
- at_early_margin);
- if (next <= 0) {
- ptlrpc_at_timer(&svcpt->scp_at_timer);
- } else {
- mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next));
- CDEBUG(D_INFO, "armed %s at %+ds\n",
- svcpt->scp_service->srv_name, next);
- }
-}
-
-/* Add rpc to early reply check list */
-static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
-{
- struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
- struct ptlrpc_at_array *array = &svcpt->scp_at_array;
- struct ptlrpc_request *rq = NULL;
- __u32 index;
-
- if (AT_OFF)
- return 0;
-
- if (req->rq_no_reply)
- return 0;
-
- if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
- return -ENOSYS;
-
- spin_lock(&svcpt->scp_at_lock);
- LASSERT(list_empty(&req->rq_timed_list));
-
- div_u64_rem(req->rq_deadline, array->paa_size, &index);
- if (array->paa_reqs_count[index] > 0) {
- /* latest rpcs will have the latest deadlines in the list,
- * so search backward.
- */
- list_for_each_entry_reverse(rq, &array->paa_reqs_array[index],
- rq_timed_list) {
- if (req->rq_deadline >= rq->rq_deadline) {
- list_add(&req->rq_timed_list,
- &rq->rq_timed_list);
- break;
- }
- }
- }
-
- /* Add the request at the head of the list */
- if (list_empty(&req->rq_timed_list))
- list_add(&req->rq_timed_list, &array->paa_reqs_array[index]);
-
- spin_lock(&req->rq_lock);
- req->rq_at_linked = 1;
- spin_unlock(&req->rq_lock);
- req->rq_at_index = index;
- array->paa_reqs_count[index]++;
- array->paa_count++;
- if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
- array->paa_deadline = req->rq_deadline;
- ptlrpc_at_set_timer(svcpt);
- }
- spin_unlock(&svcpt->scp_at_lock);
-
- return 0;
-}
-
-static void
-ptlrpc_at_remove_timed(struct ptlrpc_request *req)
-{
- struct ptlrpc_at_array *array;
-
- array = &req->rq_rqbd->rqbd_svcpt->scp_at_array;
-
- /* NB: must call with hold svcpt::scp_at_lock */
- LASSERT(!list_empty(&req->rq_timed_list));
- list_del_init(&req->rq_timed_list);
-
- spin_lock(&req->rq_lock);
- req->rq_at_linked = 0;
- spin_unlock(&req->rq_lock);
-
- array->paa_reqs_count[req->rq_at_index]--;
- array->paa_count--;
-}
-
-/*
- * Attempt to extend the request deadline by sending an early reply to the
- * client.
- */
-static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
-{
- struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
- struct ptlrpc_request *reqcopy;
- struct lustre_msg *reqmsg;
- long olddl = req->rq_deadline - ktime_get_real_seconds();
- time64_t newdl;
- int rc;
-
- /* deadline is when the client expects us to reply, margin is the
- * difference between clients' and servers' expectations
- */
- DEBUG_REQ(D_ADAPTTO, req,
- "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d",
- AT_OFF ? "AT off - not " : "",
- olddl, olddl - at_get(&svcpt->scp_at_estimate),
- at_get(&svcpt->scp_at_estimate), at_extra);
-
- if (AT_OFF)
- return 0;
-
- if (olddl < 0) {
- DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), not sending early reply. Consider increasing at_early_margin (%d)?",
- olddl, at_early_margin);
-
- /* Return an error so we're not re-added to the timed list. */
- return -ETIMEDOUT;
- }
-
- if (!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
- DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, but no AT support");
- return -ENOSYS;
- }
-
- /*
- * We want to extend the request deadline by at_extra seconds,
- * so we set our service estimate to reflect how much time has
- * passed since this request arrived plus an additional
- * at_extra seconds. The client will calculate the new deadline
- * based on this service estimate (plus some additional time to
- * account for network latency). See ptlrpc_at_recv_early_reply
- */
- at_measured(&svcpt->scp_at_estimate, at_extra +
- ktime_get_real_seconds() - req->rq_arrival_time.tv_sec);
- newdl = req->rq_arrival_time.tv_sec + at_get(&svcpt->scp_at_estimate);
-
- /* Check to see if we've actually increased the deadline -
- * we may be past adaptive_max
- */
- if (req->rq_deadline >= newdl) {
- DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n",
- olddl, newdl - ktime_get_real_seconds());
- return -ETIMEDOUT;
- }
-
- reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (!reqcopy)
- return -ENOMEM;
- reqmsg = kvzalloc(req->rq_reqlen, GFP_NOFS);
- if (!reqmsg) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- *reqcopy = *req;
- reqcopy->rq_reply_state = NULL;
- reqcopy->rq_rep_swab_mask = 0;
- reqcopy->rq_pack_bulk = 0;
- reqcopy->rq_pack_udesc = 0;
- reqcopy->rq_packed_final = 0;
- sptlrpc_svc_ctx_addref(reqcopy);
- /* We only need the reqmsg for the magic */
- reqcopy->rq_reqmsg = reqmsg;
- memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
-
- LASSERT(atomic_read(&req->rq_refcount));
- /** if it is last refcount then early reply isn't needed */
- if (atomic_read(&req->rq_refcount) == 1) {
- DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, abort sending early reply\n");
- rc = -EINVAL;
- goto out;
- }
-
- /* Connection ref */
- reqcopy->rq_export = class_conn2export(
- lustre_msg_get_handle(reqcopy->rq_reqmsg));
- if (!reqcopy->rq_export) {
- rc = -ENODEV;
- goto out;
- }
-
- /* RPC ref */
- class_export_rpc_inc(reqcopy->rq_export);
- if (reqcopy->rq_export->exp_obd &&
- reqcopy->rq_export->exp_obd->obd_fail) {
- rc = -ENODEV;
- goto out_put;
- }
-
- rc = lustre_pack_reply_flags(reqcopy, 1, NULL, NULL, LPRFL_EARLY_REPLY);
- if (rc)
- goto out_put;
-
- rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY);
-
- if (!rc) {
- /* Adjust our own deadline to what we told the client */
- req->rq_deadline = newdl;
- req->rq_early_count++; /* number sent, server side */
- } else {
- DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
- }
-
- /* Free the (early) reply state from lustre_pack_reply.
- * (ptlrpc_send_reply takes it's own rs ref, so this is safe here)
- */
- ptlrpc_req_drop_rs(reqcopy);
-
-out_put:
- class_export_rpc_dec(reqcopy->rq_export);
- class_export_put(reqcopy->rq_export);
-out:
- sptlrpc_svc_ctx_decref(reqcopy);
- kvfree(reqmsg);
-out_free:
- ptlrpc_request_cache_free(reqcopy);
- return rc;
-}
-
-/* Send early replies to everybody expiring within at_early_margin
- * asking for at_extra time
- */
-static void ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_at_array *array = &svcpt->scp_at_array;
- struct ptlrpc_request *rq, *n;
- struct list_head work_list;
- __u32 index, count;
- time64_t deadline;
- time64_t now = ktime_get_real_seconds();
- long delay;
- int first, counter = 0;
-
- spin_lock(&svcpt->scp_at_lock);
- if (svcpt->scp_at_check == 0) {
- spin_unlock(&svcpt->scp_at_lock);
- return;
- }
- delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
- svcpt->scp_at_check = 0;
-
- if (array->paa_count == 0) {
- spin_unlock(&svcpt->scp_at_lock);
- return;
- }
-
- /* The timer went off, but maybe the nearest rpc already completed. */
- first = array->paa_deadline - now;
- if (first > at_early_margin) {
- /* We've still got plenty of time. Reset the timer. */
- ptlrpc_at_set_timer(svcpt);
- spin_unlock(&svcpt->scp_at_lock);
- return;
- }
-
- /* We're close to a timeout, and we don't know how much longer the
- * server will take. Send early replies to everyone expiring soon.
- */
- INIT_LIST_HEAD(&work_list);
- deadline = -1;
- div_u64_rem(array->paa_deadline, array->paa_size, &index);
- count = array->paa_count;
- while (count > 0) {
- count -= array->paa_reqs_count[index];
- list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index],
- rq_timed_list) {
- if (rq->rq_deadline > now + at_early_margin) {
- /* update the earliest deadline */
- if (deadline == -1 ||
- rq->rq_deadline < deadline)
- deadline = rq->rq_deadline;
- break;
- }
-
- ptlrpc_at_remove_timed(rq);
- /**
- * ptlrpc_server_drop_request() may drop
- * refcount to 0 already. Let's check this and
- * don't add entry to work_list
- */
- if (likely(atomic_inc_not_zero(&rq->rq_refcount)))
- list_add(&rq->rq_timed_list, &work_list);
- counter++;
- }
-
- if (++index >= array->paa_size)
- index = 0;
- }
- array->paa_deadline = deadline;
- /* we have a new earliest deadline, restart the timer */
- ptlrpc_at_set_timer(svcpt);
-
- spin_unlock(&svcpt->scp_at_lock);
-
- CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early replies\n",
- first, at_extra, counter);
- if (first < 0) {
- /* We're already past request deadlines before we even get a
- * chance to send early replies
- */
- LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n",
- svcpt->scp_service->srv_name);
- CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n",
- counter, svcpt->scp_nreqs_incoming,
- svcpt->scp_nreqs_active,
- at_get(&svcpt->scp_at_estimate), delay);
- }
-
- /* we took additional refcount so entries can't be deleted from list, no
- * locking is needed
- */
- while (!list_empty(&work_list)) {
- rq = list_entry(work_list.next, struct ptlrpc_request,
- rq_timed_list);
- list_del_init(&rq->rq_timed_list);
-
- if (ptlrpc_at_send_early_reply(rq) == 0)
- ptlrpc_at_add_timed(rq);
-
- ptlrpc_server_drop_request(rq);
- }
-}
-
-/**
- * Put the request to the export list if the request may become
- * a high priority one.
- */
-static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req)
-{
- int rc = 0;
-
- if (svcpt->scp_service->srv_ops.so_hpreq_handler) {
- rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req);
- if (rc < 0)
- return rc;
- LASSERT(rc == 0);
- }
- if (req->rq_export && req->rq_ops) {
- /* Perform request specific check. We should do this check
- * before the request is added into exp_hp_rpcs list otherwise
- * it may hit swab race at LU-1044.
- */
- if (req->rq_ops->hpreq_check) {
- rc = req->rq_ops->hpreq_check(req);
- if (rc == -ESTALE) {
- req->rq_status = rc;
- ptlrpc_error(req);
- }
- /** can only return error,
- * 0 for normal request,
- * or 1 for high priority request
- */
- LASSERT(rc <= 1);
- }
-
- spin_lock_bh(&req->rq_export->exp_rpc_lock);
- list_add(&req->rq_exp_list, &req->rq_export->exp_hp_rpcs);
- spin_unlock_bh(&req->rq_export->exp_rpc_lock);
- }
-
- ptlrpc_nrs_req_initialize(svcpt, req, rc);
-
- return rc;
-}
-
-/** Remove the request from the export list. */
-static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
-{
- if (req->rq_export && req->rq_ops) {
- /* refresh lock timeout again so that client has more
- * room to send lock cancel RPC.
- */
- if (req->rq_ops->hpreq_fini)
- req->rq_ops->hpreq_fini(req);
-
- spin_lock_bh(&req->rq_export->exp_rpc_lock);
- list_del_init(&req->rq_exp_list);
- spin_unlock_bh(&req->rq_export->exp_rpc_lock);
- }
-}
-
-static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req)
-{
- int rc;
-
- rc = ptlrpc_server_hpreq_init(svcpt, req);
- if (rc < 0)
- return rc;
-
- ptlrpc_nrs_req_add(svcpt, req, !!rc);
-
- return 0;
-}
-
-/**
- * Allow to handle high priority request
- * User can call it w/o any lock but need to hold
- * ptlrpc_service_part::scp_req_lock to get reliable result
- */
-static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
- bool force)
-{
- int running = svcpt->scp_nthrs_running;
-
- if (!nrs_svcpt_has_hp(svcpt))
- return false;
-
- if (force)
- return true;
-
- if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
- CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
- /* leave just 1 thread for normal RPCs */
- running = PTLRPC_NTHRS_INIT;
- if (svcpt->scp_service->srv_ops.so_hpreq_handler)
- running += 1;
- }
-
- if (svcpt->scp_nreqs_active >= running - 1)
- return false;
-
- if (svcpt->scp_nhreqs_active == 0)
- return true;
-
- return !ptlrpc_nrs_req_pending_nolock(svcpt, false) ||
- svcpt->scp_hreq_count < svcpt->scp_service->srv_hpreq_ratio;
-}
-
-static bool ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt,
- bool force)
-{
- return ptlrpc_server_allow_high(svcpt, force) &&
- ptlrpc_nrs_req_pending_nolock(svcpt, true);
-}
-
-/**
- * Only allow normal priority requests on a service that has a high-priority
- * queue if forced (i.e. cleanup), if there are other high priority requests
- * already being processed (i.e. those threads can service more high-priority
- * requests), or if there are enough idle threads that a later thread can do
- * a high priority request.
- * User can call it w/o any lock but need to hold
- * ptlrpc_service_part::scp_req_lock to get reliable result
- */
-static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
- bool force)
-{
- int running = svcpt->scp_nthrs_running;
-
- if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
- CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
- /* leave just 1 thread for normal RPCs */
- running = PTLRPC_NTHRS_INIT;
- if (svcpt->scp_service->srv_ops.so_hpreq_handler)
- running += 1;
- }
-
- if (force ||
- svcpt->scp_nreqs_active < running - 2)
- return true;
-
- if (svcpt->scp_nreqs_active >= running - 1)
- return false;
-
- return svcpt->scp_nhreqs_active > 0 || !nrs_svcpt_has_hp(svcpt);
-}
-
-static bool ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt,
- bool force)
-{
- return ptlrpc_server_allow_normal(svcpt, force) &&
- ptlrpc_nrs_req_pending_nolock(svcpt, false);
-}
-
-/**
- * Returns true if there are requests available in incoming
- * request queue for processing and it is allowed to fetch them.
- * User can call it w/o any lock but need to hold ptlrpc_service::scp_req_lock
- * to get reliable result
- * \see ptlrpc_server_allow_normal
- * \see ptlrpc_server_allow high
- */
-static inline bool
-ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, bool force)
-{
- return ptlrpc_server_high_pending(svcpt, force) ||
- ptlrpc_server_normal_pending(svcpt, force);
-}
-
-/**
- * Fetch a request for processing from queue of unprocessed requests.
- * Favors high-priority requests.
- * Returns a pointer to fetched request.
- */
-static struct ptlrpc_request *
-ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
-{
- struct ptlrpc_request *req = NULL;
-
- spin_lock(&svcpt->scp_req_lock);
-
- if (ptlrpc_server_high_pending(svcpt, force)) {
- req = ptlrpc_nrs_req_get_nolock(svcpt, true, force);
- if (req) {
- svcpt->scp_hreq_count++;
- goto got_request;
- }
- }
-
- if (ptlrpc_server_normal_pending(svcpt, force)) {
- req = ptlrpc_nrs_req_get_nolock(svcpt, false, force);
- if (req) {
- svcpt->scp_hreq_count = 0;
- goto got_request;
- }
- }
-
- spin_unlock(&svcpt->scp_req_lock);
- return NULL;
-
-got_request:
- svcpt->scp_nreqs_active++;
- if (req->rq_hp)
- svcpt->scp_nhreqs_active++;
-
- spin_unlock(&svcpt->scp_req_lock);
-
- if (likely(req->rq_export))
- class_export_rpc_inc(req->rq_export);
-
- return req;
-}
-
-/**
- * Handle freshly incoming reqs, add to timed early reply list,
- * pass on to regular request queue.
- * All incoming requests pass through here before getting into
- * ptlrpc_server_handle_req later on.
- */
-static int
-ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_thread *thread)
-{
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_request *req;
- __u32 deadline;
- int rc;
-
- spin_lock(&svcpt->scp_lock);
- if (list_empty(&svcpt->scp_req_incoming)) {
- spin_unlock(&svcpt->scp_lock);
- return 0;
- }
-
- req = list_entry(svcpt->scp_req_incoming.next,
- struct ptlrpc_request, rq_list);
- list_del_init(&req->rq_list);
- svcpt->scp_nreqs_incoming--;
- /* Consider this still a "queued" request as far as stats are
- * concerned
- */
- spin_unlock(&svcpt->scp_lock);
-
- /* go through security check/transform */
- rc = sptlrpc_svc_unwrap_request(req);
- switch (rc) {
- case SECSVC_OK:
- break;
- case SECSVC_COMPLETE:
- target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
- goto err_req;
- case SECSVC_DROP:
- goto err_req;
- default:
- LBUG();
- }
-
- /*
- * for null-flavored rpc, msg has been unpacked by sptlrpc, although
- * redo it wouldn't be harmful.
- */
- if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
- rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen);
- if (rc != 0) {
- CERROR("error unpacking request: ptl %d from %s x%llu\n",
- svc->srv_req_portal, libcfs_id2str(req->rq_peer),
- req->rq_xid);
- goto err_req;
- }
- }
-
- rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
- if (rc) {
- CERROR("error unpacking ptlrpc body: ptl %d from %s x%llu\n",
- svc->srv_req_portal, libcfs_id2str(req->rq_peer),
- req->rq_xid);
- goto err_req;
- }
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
- lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) {
- CERROR("drop incoming rpc opc %u, x%llu\n",
- cfs_fail_val, req->rq_xid);
- goto err_req;
- }
-
- rc = -EINVAL;
- if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) {
- CERROR("wrong packet type received (type=%u) from %s\n",
- lustre_msg_get_type(req->rq_reqmsg),
- libcfs_id2str(req->rq_peer));
- goto err_req;
- }
-
- switch (lustre_msg_get_opc(req->rq_reqmsg)) {
- case MDS_WRITEPAGE:
- case OST_WRITE:
- req->rq_bulk_write = 1;
- break;
- case MDS_READPAGE:
- case OST_READ:
- case MGS_CONFIG_READ:
- req->rq_bulk_read = 1;
- break;
- }
-
- CDEBUG(D_RPCTRACE, "got req x%llu\n", req->rq_xid);
-
- req->rq_export = class_conn2export(
- lustre_msg_get_handle(req->rq_reqmsg));
- if (req->rq_export) {
- rc = ptlrpc_check_req(req);
- if (rc == 0) {
- rc = sptlrpc_target_export_check(req->rq_export, req);
- if (rc)
- DEBUG_REQ(D_ERROR, req, "DROPPING req with illegal security flavor,");
- }
-
- if (rc)
- goto err_req;
- }
-
- /* req_in handling should/must be fast */
- if (ktime_get_real_seconds() - req->rq_arrival_time.tv_sec > 5)
- DEBUG_REQ(D_WARNING, req, "Slow req_in handling %llds",
- (s64)(ktime_get_real_seconds() -
- req->rq_arrival_time.tv_sec));
-
- /* Set rpc server deadline and add it to the timed list */
- deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
- MSGHDR_AT_SUPPORT) ?
- /* The max time the client expects us to take */
- lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout;
- req->rq_deadline = req->rq_arrival_time.tv_sec + deadline;
- if (unlikely(deadline == 0)) {
- DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout");
- goto err_req;
- }
-
- req->rq_svc_thread = thread;
- if (thread) {
- /* initialize request session, it is needed for request
- * processing by target
- */
- rc = lu_context_init(&req->rq_session,
- LCT_SERVER_SESSION | LCT_NOREF);
- if (rc) {
- CERROR("%s: failure to initialize session: rc = %d\n",
- thread->t_name, rc);
- goto err_req;
- }
- req->rq_session.lc_thread = thread;
- lu_context_enter(&req->rq_session);
- req->rq_svc_thread->t_env->le_ses = &req->rq_session;
- }
-
- ptlrpc_at_add_timed(req);
-
- /* Move it over to the request processing queue */
- rc = ptlrpc_server_request_add(svcpt, req);
- if (rc)
- goto err_req;
-
- wake_up(&svcpt->scp_waitq);
- return 1;
-
-err_req:
- ptlrpc_server_finish_request(svcpt, req);
-
- return 1;
-}
-
-/**
- * Main incoming request handling logic.
- * Calls handler function from service to do actual processing.
- */
-static int
-ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_thread *thread)
-{
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_request *request;
- struct timespec64 work_start;
- struct timespec64 work_end;
- struct timespec64 timediff;
- struct timespec64 arrived;
- unsigned long timediff_usecs;
- unsigned long arrived_usecs;
- int fail_opc = 0;
-
- request = ptlrpc_server_request_get(svcpt, false);
- if (!request)
- return 0;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
- fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
- else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
- fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT;
-
- if (unlikely(fail_opc)) {
- if (request->rq_export && request->rq_ops)
- OBD_FAIL_TIMEOUT(fail_opc, 4);
- }
-
- ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
- libcfs_debug_dumplog();
-
- ktime_get_real_ts64(&work_start);
- timediff = timespec64_sub(work_start, request->rq_arrival_time);
- timediff_usecs = timediff.tv_sec * USEC_PER_SEC +
- timediff.tv_nsec / NSEC_PER_USEC;
- if (likely(svc->srv_stats)) {
- lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
- timediff_usecs);
- lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
- svcpt->scp_nreqs_incoming);
- lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
- svcpt->scp_nreqs_active);
- lprocfs_counter_add(svc->srv_stats, PTLRPC_TIMEOUT,
- at_get(&svcpt->scp_at_estimate));
- }
-
- if (likely(request->rq_export)) {
- if (unlikely(ptlrpc_check_req(request)))
- goto put_conn;
- }
-
- /* Discard requests queued for longer than the deadline.
- * The deadline is increased if we send an early reply.
- */
- if (ktime_get_real_seconds() > request->rq_deadline) {
- DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline %lld:%llds ago\n",
- libcfs_id2str(request->rq_peer),
- request->rq_deadline -
- request->rq_arrival_time.tv_sec,
- ktime_get_real_seconds() - request->rq_deadline);
- goto put_conn;
- }
-
- CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d\n",
- current_comm(),
- (request->rq_export ?
- (char *)request->rq_export->exp_client_uuid.uuid : "0"),
- (request->rq_export ?
- atomic_read(&request->rq_export->exp_refcount) : -99),
- lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
- libcfs_id2str(request->rq_peer),
- lustre_msg_get_opc(request->rq_reqmsg));
-
- if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
- CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
-
- CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
-
- /* re-assign request and sesson thread to the current one */
- request->rq_svc_thread = thread;
- if (thread) {
- LASSERT(request->rq_session.lc_thread);
- request->rq_session.lc_thread = thread;
- request->rq_session.lc_cookie = 0x55;
- thread->t_env->le_ses = &request->rq_session;
- }
- svc->srv_ops.so_req_handler(request);
-
- ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
-
-put_conn:
- if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) {
- DEBUG_REQ(D_WARNING, request,
- "Request took longer than estimated (%lld:%llds); "
- "client may timeout.",
- (s64)request->rq_deadline -
- request->rq_arrival_time.tv_sec,
- (s64)ktime_get_real_seconds() - request->rq_deadline);
- }
-
- ktime_get_real_ts64(&work_end);
- timediff = timespec64_sub(work_end, work_start);
- timediff_usecs = timediff.tv_sec * USEC_PER_SEC +
- timediff.tv_nsec / NSEC_PER_USEC;
- arrived = timespec64_sub(work_end, request->rq_arrival_time);
- arrived_usecs = arrived.tv_sec * USEC_PER_SEC +
- arrived.tv_nsec / NSEC_PER_USEC;
- CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request processed in %ldus (%ldus total) trans %llu rc %d/%d\n",
- current_comm(),
- (request->rq_export ?
- (char *)request->rq_export->exp_client_uuid.uuid : "0"),
- (request->rq_export ?
- atomic_read(&request->rq_export->exp_refcount) : -99),
- lustre_msg_get_status(request->rq_reqmsg),
- request->rq_xid,
- libcfs_id2str(request->rq_peer),
- lustre_msg_get_opc(request->rq_reqmsg),
- timediff_usecs,
- arrived_usecs,
- (request->rq_repmsg ?
- lustre_msg_get_transno(request->rq_repmsg) :
- request->rq_transno),
- request->rq_status,
- (request->rq_repmsg ?
- lustre_msg_get_status(request->rq_repmsg) : -999));
- if (likely(svc->srv_stats && request->rq_reqmsg)) {
- __u32 op = lustre_msg_get_opc(request->rq_reqmsg);
- int opc = opcode_offset(op);
-
- if (opc > 0 && !(op == LDLM_ENQUEUE || op == MDS_REINT)) {
- LASSERT(opc < LUSTRE_MAX_OPCODES);
- lprocfs_counter_add(svc->srv_stats,
- opc + EXTRA_MAX_OPCODES,
- timediff_usecs);
- }
- }
- if (unlikely(request->rq_early_count)) {
- DEBUG_REQ(D_ADAPTTO, request,
- "sent %d early replies before finishing in %llds",
- request->rq_early_count,
- (s64)work_end.tv_sec -
- request->rq_arrival_time.tv_sec);
- }
-
- ptlrpc_server_finish_active_request(svcpt, request);
-
- return 1;
-}
-
-/**
- * An internal function to process a single reply state object.
- */
-static int
-ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
-{
- struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct obd_export *exp;
- int nlocks;
- int been_handled;
-
- exp = rs->rs_export;
-
- LASSERT(rs->rs_difficult);
- LASSERT(rs->rs_scheduled);
- LASSERT(list_empty(&rs->rs_list));
-
- spin_lock(&exp->exp_lock);
- /* Noop if removed already */
- list_del_init(&rs->rs_exp_list);
- spin_unlock(&exp->exp_lock);
-
- /* The disk commit callback holds exp_uncommitted_replies_lock while it
- * iterates over newly committed replies, removing them from
- * exp_uncommitted_replies. It then drops this lock and schedules the
- * replies it found for handling here.
- *
- * We can avoid contention for exp_uncommitted_replies_lock between the
- * HRT threads and further commit callbacks by checking rs_committed
- * which is set in the commit callback while it holds both
- * rs_lock and exp_uncommitted_reples.
- *
- * If we see rs_committed clear, the commit callback _may_ not have
- * handled this reply yet and we race with it to grab
- * exp_uncommitted_replies_lock before removing the reply from
- * exp_uncommitted_replies. Note that if we lose the race and the
- * reply has already been removed, list_del_init() is a noop.
- *
- * If we see rs_committed set, we know the commit callback is handling,
- * or has handled this reply since store reordering might allow us to
- * see rs_committed set out of sequence. But since this is done
- * holding rs_lock, we can be sure it has all completed once we hold
- * rs_lock, which we do right next.
- */
- if (!rs->rs_committed) {
- spin_lock(&exp->exp_uncommitted_replies_lock);
- list_del_init(&rs->rs_obd_list);
- spin_unlock(&exp->exp_uncommitted_replies_lock);
- }
-
- spin_lock(&rs->rs_lock);
-
- been_handled = rs->rs_handled;
- rs->rs_handled = 1;
-
- nlocks = rs->rs_nlocks; /* atomic "steal", but */
- rs->rs_nlocks = 0; /* locks still on rs_locks! */
-
- if (nlocks == 0 && !been_handled) {
- /* If we see this, we should already have seen the warning
- * in mds_steal_ack_locks()
- */
- CDEBUG(D_HA, "All locks stolen from rs %p x%lld.t%lld o%d NID %s\n",
- rs,
- rs->rs_xid, rs->rs_transno, rs->rs_opc,
- libcfs_nid2str(exp->exp_connection->c_peer.nid));
- }
-
- if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
- spin_unlock(&rs->rs_lock);
-
- if (!been_handled && rs->rs_on_net) {
- LNetMDUnlink(rs->rs_md_h);
- /* Ignore return code; we're racing with completion */
- }
-
- while (nlocks-- > 0)
- ldlm_lock_decref(&rs->rs_locks[nlocks],
- rs->rs_modes[nlocks]);
-
- spin_lock(&rs->rs_lock);
- }
-
- rs->rs_scheduled = 0;
-
- if (!rs->rs_on_net) {
- /* Off the net */
- spin_unlock(&rs->rs_lock);
-
- class_export_put(exp);
- rs->rs_export = NULL;
- ptlrpc_rs_decref(rs);
- if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
- svc->srv_is_stopping)
- wake_up_all(&svcpt->scp_waitq);
- return 1;
- }
-
- /* still on the net; callback will schedule */
- spin_unlock(&rs->rs_lock);
- return 1;
-}
-
-static void
-ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt)
-{
- int avail = svcpt->scp_nrqbds_posted;
- int low_water = test_req_buffer_pressure ? 0 :
- svcpt->scp_service->srv_nbuf_per_group / 2;
-
- /* NB I'm not locking; just looking. */
-
- /* CAVEAT EMPTOR: We might be allocating buffers here because we've
- * allowed the request history to grow out of control. We could put a
- * sanity check on that here and cull some history if we need the
- * space.
- */
-
- if (avail <= low_water)
- ptlrpc_grow_req_bufs(svcpt, 1);
-
- if (svcpt->scp_service->srv_stats) {
- lprocfs_counter_add(svcpt->scp_service->srv_stats,
- PTLRPC_REQBUF_AVAIL_CNTR, avail);
- }
-}
-
-static inline int
-ptlrpc_threads_enough(struct ptlrpc_service_part *svcpt)
-{
- return svcpt->scp_nreqs_active <
- svcpt->scp_nthrs_running - 1 -
- (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL);
-}
-
-/**
- * allowed to create more threads
- * user can call it w/o any lock but need to hold
- * ptlrpc_service_part::scp_lock to get reliable result
- */
-static inline int
-ptlrpc_threads_increasable(struct ptlrpc_service_part *svcpt)
-{
- return svcpt->scp_nthrs_running +
- svcpt->scp_nthrs_starting <
- svcpt->scp_service->srv_nthrs_cpt_limit;
-}
-
-/**
- * too many requests and allowed to create more threads
- */
-static inline int
-ptlrpc_threads_need_create(struct ptlrpc_service_part *svcpt)
-{
- return !ptlrpc_threads_enough(svcpt) &&
- ptlrpc_threads_increasable(svcpt);
-}
-
-static inline int
-ptlrpc_thread_stopping(struct ptlrpc_thread *thread)
-{
- return thread_is_stopping(thread) ||
- thread->t_svcpt->scp_service->srv_is_stopping;
-}
-
-static inline int
-ptlrpc_rqbd_pending(struct ptlrpc_service_part *svcpt)
-{
- return !list_empty(&svcpt->scp_rqbd_idle) &&
- svcpt->scp_rqbd_timeout == 0;
-}
-
-static inline int
-ptlrpc_at_check(struct ptlrpc_service_part *svcpt)
-{
- return svcpt->scp_at_check;
-}
-
-/**
- * requests wait on preprocessing
- * user can call it w/o any lock but need to hold
- * ptlrpc_service_part::scp_lock to get reliable result
- */
-static inline int
-ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt)
-{
- return !list_empty(&svcpt->scp_req_incoming);
-}
-
-/* We perfer lifo queuing, but kernel doesn't provide that yet. */
-#ifndef wait_event_idle_exclusive_lifo
-#define wait_event_idle_exclusive_lifo wait_event_idle_exclusive
-#define wait_event_idle_exclusive_lifo_timeout wait_event_idle_exclusive_timeout
-#endif
-
-static __attribute__((__noinline__)) int
-ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_thread *thread)
-{
- /* Don't exit while there are replies to be handled */
-
- /* XXX: Add this back when libcfs watchdog is merged upstream
- lc_watchdog_disable(thread->t_watchdog);
- */
-
- cond_resched();
-
- if (svcpt->scp_rqbd_timeout == 0)
- wait_event_idle_exclusive_lifo(
- svcpt->scp_waitq,
- ptlrpc_thread_stopping(thread) ||
- ptlrpc_server_request_incoming(svcpt) ||
- ptlrpc_server_request_pending(svcpt,
- false) ||
- ptlrpc_rqbd_pending(svcpt) ||
- ptlrpc_at_check(svcpt));
- else if (0 == wait_event_idle_exclusive_lifo_timeout(
- svcpt->scp_waitq,
- ptlrpc_thread_stopping(thread) ||
- ptlrpc_server_request_incoming(svcpt) ||
- ptlrpc_server_request_pending(svcpt,
- false) ||
- ptlrpc_rqbd_pending(svcpt) ||
- ptlrpc_at_check(svcpt),
- svcpt->scp_rqbd_timeout))
- svcpt->scp_rqbd_timeout = 0;
-
- if (ptlrpc_thread_stopping(thread))
- return -EINTR;
-
- /*
- lc_watchdog_touch(thread->t_watchdog,
- ptlrpc_server_get_timeout(svcpt));
- */
- return 0;
-}
-
-/**
- * Main thread body for service threads.
- * Waits in a loop waiting for new requests to process to appear.
- * Every time an incoming requests is added to its queue, a waitq
- * is woken up and one of the threads will handle it.
- */
-static int ptlrpc_main(void *arg)
-{
- struct ptlrpc_thread *thread = arg;
- struct ptlrpc_service_part *svcpt = thread->t_svcpt;
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_reply_state *rs;
- struct group_info *ginfo = NULL;
- struct lu_env *env;
- int counter = 0, rc = 0;
-
- thread->t_pid = current_pid();
- unshare_fs_struct();
-
- /* NB: we will call cfs_cpt_bind() for all threads, because we
- * might want to run lustre server only on a subset of system CPUs,
- * in that case ->scp_cpt is CFS_CPT_ANY
- */
- rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt);
- if (rc != 0) {
- CWARN("%s: failed to bind %s on CPT %d\n",
- svc->srv_name, thread->t_name, svcpt->scp_cpt);
- }
-
- ginfo = groups_alloc(0);
- if (!ginfo) {
- rc = -ENOMEM;
- goto out;
- }
-
- set_current_groups(ginfo);
- put_group_info(ginfo);
-
- if (svc->srv_ops.so_thr_init) {
- rc = svc->srv_ops.so_thr_init(thread);
- if (rc)
- goto out;
- }
-
- env = kzalloc(sizeof(*env), GFP_KERNEL);
- if (!env) {
- rc = -ENOMEM;
- goto out_srv_fini;
- }
-
- rc = lu_context_init(&env->le_ctx,
- svc->srv_ctx_tags | LCT_REMEMBER | LCT_NOREF);
- if (rc)
- goto out_srv_fini;
-
- thread->t_env = env;
- env->le_ctx.lc_thread = thread;
- env->le_ctx.lc_cookie = 0x6;
-
- while (!list_empty(&svcpt->scp_rqbd_idle)) {
- rc = ptlrpc_server_post_idle_rqbds(svcpt);
- if (rc >= 0)
- continue;
-
- CERROR("Failed to post rqbd for %s on CPT %d: %d\n",
- svc->srv_name, svcpt->scp_cpt, rc);
- goto out_srv_fini;
- }
-
- /* Alloc reply state structure for this one */
- rs = kvzalloc(svc->srv_max_reply_size, GFP_KERNEL);
- if (!rs) {
- rc = -ENOMEM;
- goto out_srv_fini;
- }
-
- spin_lock(&svcpt->scp_lock);
-
- LASSERT(thread_is_starting(thread));
- thread_clear_flags(thread, SVC_STARTING);
-
- LASSERT(svcpt->scp_nthrs_starting == 1);
- svcpt->scp_nthrs_starting--;
-
- /* SVC_STOPPING may already be set here if someone else is trying
- * to stop the service while this new thread has been dynamically
- * forked. We still set SVC_RUNNING to let our creator know that
- * we are now running, however we will exit as soon as possible
- */
- thread_add_flags(thread, SVC_RUNNING);
- svcpt->scp_nthrs_running++;
- spin_unlock(&svcpt->scp_lock);
-
- /* wake up our creator in case he's still waiting. */
- wake_up(&thread->t_ctl_waitq);
-
- /*
- thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
- NULL, NULL);
- */
-
- spin_lock(&svcpt->scp_rep_lock);
- list_add(&rs->rs_list, &svcpt->scp_rep_idle);
- wake_up(&svcpt->scp_rep_waitq);
- spin_unlock(&svcpt->scp_rep_lock);
-
- CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
- svcpt->scp_nthrs_running);
-
- /* XXX maintain a list of all managed devices: insert here */
- while (!ptlrpc_thread_stopping(thread)) {
- if (ptlrpc_wait_event(svcpt, thread))
- break;
-
- ptlrpc_check_rqbd_pool(svcpt);
-
- if (ptlrpc_threads_need_create(svcpt)) {
- /* Ignore return code - we tried... */
- ptlrpc_start_thread(svcpt, 0);
- }
-
- /* Process all incoming reqs before handling any */
- if (ptlrpc_server_request_incoming(svcpt)) {
- lu_context_enter(&env->le_ctx);
- env->le_ses = NULL;
- ptlrpc_server_handle_req_in(svcpt, thread);
- lu_context_exit(&env->le_ctx);
-
- /* but limit ourselves in case of flood */
- if (counter++ < 100)
- continue;
- counter = 0;
- }
-
- if (ptlrpc_at_check(svcpt))
- ptlrpc_at_check_timed(svcpt);
-
- if (ptlrpc_server_request_pending(svcpt, false)) {
- lu_context_enter(&env->le_ctx);
- ptlrpc_server_handle_request(svcpt, thread);
- lu_context_exit(&env->le_ctx);
- }
-
- if (ptlrpc_rqbd_pending(svcpt) &&
- ptlrpc_server_post_idle_rqbds(svcpt) < 0) {
- /* I just failed to repost request buffers.
- * Wait for a timeout (unless something else
- * happens) before I try again
- */
- svcpt->scp_rqbd_timeout = HZ / 10;
- CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
- svcpt->scp_nrqbds_posted);
- }
- }
-
- /*
- lc_watchdog_delete(thread->t_watchdog);
- thread->t_watchdog = NULL;
- */
-
-out_srv_fini:
- /*
- * deconstruct service specific state created by ptlrpc_start_thread()
- */
- if (svc->srv_ops.so_thr_done)
- svc->srv_ops.so_thr_done(thread);
-
- if (env) {
- lu_context_fini(&env->le_ctx);
- kfree(env);
- }
-out:
- CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
- thread, thread->t_pid, thread->t_id, rc);
-
- spin_lock(&svcpt->scp_lock);
- if (thread_test_and_clear_flags(thread, SVC_STARTING))
- svcpt->scp_nthrs_starting--;
-
- if (thread_test_and_clear_flags(thread, SVC_RUNNING)) {
- /* must know immediately */
- svcpt->scp_nthrs_running--;
- }
-
- thread->t_id = rc;
- thread_add_flags(thread, SVC_STOPPED);
-
- wake_up(&thread->t_ctl_waitq);
- spin_unlock(&svcpt->scp_lock);
-
- return rc;
-}
-
-static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt,
- struct list_head *replies)
-{
- int result;
-
- spin_lock(&hrt->hrt_lock);
-
- list_splice_init(&hrt->hrt_queue, replies);
- result = ptlrpc_hr.hr_stopping || !list_empty(replies);
-
- spin_unlock(&hrt->hrt_lock);
- return result;
-}
-
-/**
- * Main body of "handle reply" function.
- * It processes acked reply states
- */
-static int ptlrpc_hr_main(void *arg)
-{
- struct ptlrpc_hr_thread *hrt = arg;
- struct ptlrpc_hr_partition *hrp = hrt->hrt_partition;
- LIST_HEAD(replies);
- char threadname[20];
- int rc;
-
- snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
- hrp->hrp_cpt, hrt->hrt_id);
- unshare_fs_struct();
-
- rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
- if (rc != 0) {
- CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n",
- threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc);
- }
-
- atomic_inc(&hrp->hrp_nstarted);
- wake_up(&ptlrpc_hr.hr_waitq);
-
- while (!ptlrpc_hr.hr_stopping) {
- wait_event_idle(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
-
- while (!list_empty(&replies)) {
- struct ptlrpc_reply_state *rs;
-
- rs = list_entry(replies.prev, struct ptlrpc_reply_state,
- rs_list);
- list_del_init(&rs->rs_list);
- ptlrpc_handle_rs(rs);
- }
- }
-
- atomic_inc(&hrp->hrp_nstopped);
- wake_up(&ptlrpc_hr.hr_waitq);
-
- return 0;
-}
-
-static void ptlrpc_stop_hr_threads(void)
-{
- struct ptlrpc_hr_partition *hrp;
- int i;
- int j;
-
- ptlrpc_hr.hr_stopping = 1;
-
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- if (!hrp->hrp_thrs)
- continue; /* uninitialized */
- for (j = 0; j < hrp->hrp_nthrs; j++)
- wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
- }
-
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- if (!hrp->hrp_thrs)
- continue; /* uninitialized */
- wait_event(ptlrpc_hr.hr_waitq,
- atomic_read(&hrp->hrp_nstopped) ==
- atomic_read(&hrp->hrp_nstarted));
- }
-}
-
-static int ptlrpc_start_hr_threads(void)
-{
- struct ptlrpc_hr_partition *hrp;
- int i;
- int j;
-
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- int rc = 0;
-
- for (j = 0; j < hrp->hrp_nthrs; j++) {
- struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
- struct task_struct *task;
-
- task = kthread_run(ptlrpc_hr_main,
- &hrp->hrp_thrs[j],
- "ptlrpc_hr%02d_%03d",
- hrp->hrp_cpt, hrt->hrt_id);
- if (IS_ERR(task)) {
- rc = PTR_ERR(task);
- break;
- }
- }
- wait_event(ptlrpc_hr.hr_waitq,
- atomic_read(&hrp->hrp_nstarted) == j);
-
- if (rc < 0) {
- CERROR("cannot start reply handler thread %d:%d: rc = %d\n",
- i, j, rc);
- ptlrpc_stop_hr_threads();
- return rc;
- }
- }
- return 0;
-}
-
-static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_thread *thread;
- LIST_HEAD(zombie);
-
- CDEBUG(D_INFO, "Stopping threads for service %s\n",
- svcpt->scp_service->srv_name);
-
- spin_lock(&svcpt->scp_lock);
- /* let the thread know that we would like it to stop asap */
- list_for_each_entry(thread, &svcpt->scp_threads, t_link) {
- CDEBUG(D_INFO, "Stopping thread %s #%u\n",
- svcpt->scp_service->srv_thread_name, thread->t_id);
- thread_add_flags(thread, SVC_STOPPING);
- }
-
- wake_up_all(&svcpt->scp_waitq);
-
- while (!list_empty(&svcpt->scp_threads)) {
- thread = list_entry(svcpt->scp_threads.next,
- struct ptlrpc_thread, t_link);
- if (thread_is_stopped(thread)) {
- list_del(&thread->t_link);
- list_add(&thread->t_link, &zombie);
- continue;
- }
- spin_unlock(&svcpt->scp_lock);
-
- CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n",
- svcpt->scp_service->srv_thread_name, thread->t_id);
- wait_event_idle(thread->t_ctl_waitq,
- thread_is_stopped(thread));
-
- spin_lock(&svcpt->scp_lock);
- }
-
- spin_unlock(&svcpt->scp_lock);
-
- while (!list_empty(&zombie)) {
- thread = list_entry(zombie.next,
- struct ptlrpc_thread, t_link);
- list_del(&thread->t_link);
- kfree(thread);
- }
-}
-
-/**
- * Stops all threads of a particular service \a svc
- */
-static void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service)
- ptlrpc_svcpt_stop_threads(svcpt);
- }
-}
-
-int ptlrpc_start_threads(struct ptlrpc_service *svc)
-{
- int rc = 0;
- int i;
- int j;
-
- /* We require 2 threads min, see note in ptlrpc_server_handle_request */
- LASSERT(svc->srv_nthrs_cpt_init >= PTLRPC_NTHRS_INIT);
-
- for (i = 0; i < svc->srv_ncpts; i++) {
- for (j = 0; j < svc->srv_nthrs_cpt_init; j++) {
- rc = ptlrpc_start_thread(svc->srv_parts[i], 1);
- if (rc == 0)
- continue;
-
- if (rc != -EMFILE)
- goto failed;
- /* We have enough threads, don't start more. b=15759 */
- break;
- }
- }
-
- return 0;
- failed:
- CERROR("cannot start %s thread #%d_%d: rc %d\n",
- svc->srv_thread_name, i, j, rc);
- ptlrpc_stop_all_threads(svc);
- return rc;
-}
-
-int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
-{
- struct ptlrpc_thread *thread;
- struct ptlrpc_service *svc;
- struct task_struct *task;
- int rc;
-
- svc = svcpt->scp_service;
-
- CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n",
- svc->srv_name, svcpt->scp_cpt, svcpt->scp_nthrs_running,
- svc->srv_nthrs_cpt_init, svc->srv_nthrs_cpt_limit);
-
- again:
- if (unlikely(svc->srv_is_stopping))
- return -ESRCH;
-
- if (!ptlrpc_threads_increasable(svcpt) ||
- (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
- svcpt->scp_nthrs_running == svc->srv_nthrs_cpt_init - 1))
- return -EMFILE;
-
- thread = kzalloc_node(sizeof(*thread), GFP_NOFS,
- cfs_cpt_spread_node(svc->srv_cptable,
- svcpt->scp_cpt));
- if (!thread)
- return -ENOMEM;
- init_waitqueue_head(&thread->t_ctl_waitq);
-
- spin_lock(&svcpt->scp_lock);
- if (!ptlrpc_threads_increasable(svcpt)) {
- spin_unlock(&svcpt->scp_lock);
- kfree(thread);
- return -EMFILE;
- }
-
- if (svcpt->scp_nthrs_starting != 0) {
- /* serialize starting because some modules (obdfilter)
- * might require unique and contiguous t_id
- */
- LASSERT(svcpt->scp_nthrs_starting == 1);
- spin_unlock(&svcpt->scp_lock);
- kfree(thread);
- if (wait) {
- CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
- svc->srv_thread_name, svcpt->scp_thr_nextid);
- schedule();
- goto again;
- }
-
- CDEBUG(D_INFO, "Creating thread %s #%d race, retry later\n",
- svc->srv_thread_name, svcpt->scp_thr_nextid);
- return -EAGAIN;
- }
-
- svcpt->scp_nthrs_starting++;
- thread->t_id = svcpt->scp_thr_nextid++;
- thread_add_flags(thread, SVC_STARTING);
- thread->t_svcpt = svcpt;
-
- list_add(&thread->t_link, &svcpt->scp_threads);
- spin_unlock(&svcpt->scp_lock);
-
- if (svcpt->scp_cpt >= 0) {
- snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d",
- svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
- } else {
- snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d",
- svc->srv_thread_name, thread->t_id);
- }
-
- CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
- task = kthread_run(ptlrpc_main, thread, "%s", thread->t_name);
- if (IS_ERR(task)) {
- rc = PTR_ERR(task);
- CERROR("cannot start thread '%s': rc = %d\n",
- thread->t_name, rc);
- spin_lock(&svcpt->scp_lock);
- --svcpt->scp_nthrs_starting;
- if (thread_is_stopping(thread)) {
- /* this ptlrpc_thread is being handled
- * by ptlrpc_svcpt_stop_threads now
- */
- thread_add_flags(thread, SVC_STOPPED);
- wake_up(&thread->t_ctl_waitq);
- spin_unlock(&svcpt->scp_lock);
- } else {
- list_del(&thread->t_link);
- spin_unlock(&svcpt->scp_lock);
- kfree(thread);
- }
- return rc;
- }
-
- if (!wait)
- return 0;
-
- wait_event_idle(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread));
-
- rc = thread_is_stopped(thread) ? thread->t_id : 0;
- return rc;
-}
-
-int ptlrpc_hr_init(void)
-{
- struct ptlrpc_hr_partition *hrp;
- struct ptlrpc_hr_thread *hrt;
- int rc;
- int i;
- int j;
- int weight;
-
- memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
- ptlrpc_hr.hr_cpt_table = cfs_cpt_table;
-
- ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
- sizeof(*hrp));
- if (!ptlrpc_hr.hr_partitions)
- return -ENOMEM;
-
- init_waitqueue_head(&ptlrpc_hr.hr_waitq);
-
- weight = cpumask_weight(topology_sibling_cpumask(0));
-
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- hrp->hrp_cpt = i;
-
- atomic_set(&hrp->hrp_nstarted, 0);
- atomic_set(&hrp->hrp_nstopped, 0);
-
- hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
- hrp->hrp_nthrs /= weight;
- if (hrp->hrp_nthrs == 0)
- hrp->hrp_nthrs = 1;
-
- hrp->hrp_thrs =
- kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS,
- cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
- i));
- if (!hrp->hrp_thrs) {
- rc = -ENOMEM;
- goto out;
- }
-
- for (j = 0; j < hrp->hrp_nthrs; j++) {
- hrt = &hrp->hrp_thrs[j];
-
- hrt->hrt_id = j;
- hrt->hrt_partition = hrp;
- init_waitqueue_head(&hrt->hrt_waitq);
- spin_lock_init(&hrt->hrt_lock);
- INIT_LIST_HEAD(&hrt->hrt_queue);
- }
- }
-
- rc = ptlrpc_start_hr_threads();
-out:
- if (rc != 0)
- ptlrpc_hr_fini();
- return rc;
-}
-
-void ptlrpc_hr_fini(void)
-{
- struct ptlrpc_hr_partition *hrp;
- int i;
-
- if (!ptlrpc_hr.hr_partitions)
- return;
-
- ptlrpc_stop_hr_threads();
-
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- kfree(hrp->hrp_thrs);
- }
-
- cfs_percpt_free(ptlrpc_hr.hr_partitions);
- ptlrpc_hr.hr_partitions = NULL;
-}
-
-/**
- * Wait until all already scheduled replies are processed.
- */
-static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
-{
- while (1) {
- int rc;
-
- rc = wait_event_idle_timeout(
- svcpt->scp_waitq,
- atomic_read(&svcpt->scp_nreps_difficult) == 0,
- 10 * HZ);
- if (rc > 0)
- break;
- CWARN("Unexpectedly long timeout %s %p\n",
- svcpt->scp_service->srv_name, svcpt->scp_service);
- }
-}
-
-static void
-ptlrpc_service_del_atimer(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- int i;
-
- /* early disarm AT timer... */
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service)
- del_timer(&svcpt->scp_at_timer);
- }
-}
-
-static void
-ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_request_buffer_desc *rqbd;
- int cnt;
- int rc;
- int i;
-
- /* All history will be culled when the next request buffer is
- * freed in ptlrpc_service_purge_all()
- */
- svc->srv_hist_nrqbds_cpt_max = 0;
-
- rc = LNetClearLazyPortal(svc->srv_req_portal);
- LASSERT(rc == 0);
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (!svcpt->scp_service)
- break;
-
- /* Unlink all the request buffers. This forces a 'final'
- * event with its 'unlink' flag set for each posted rqbd
- */
- list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
- rqbd_list) {
- rc = LNetMDUnlink(rqbd->rqbd_md_h);
- LASSERT(rc == 0 || rc == -ENOENT);
- }
- }
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (!svcpt->scp_service)
- break;
-
- /* Wait for the network to release any buffers
- * it's currently filling
- */
- spin_lock(&svcpt->scp_lock);
- while (svcpt->scp_nrqbds_posted != 0) {
- spin_unlock(&svcpt->scp_lock);
- /* Network access will complete in finite time but
- * the HUGE timeout lets us CWARN for visibility
- * of sluggish LNDs
- */
- cnt = 0;
- while (cnt < LONG_UNLINK &&
- (rc = wait_event_idle_timeout(svcpt->scp_waitq,
- svcpt->scp_nrqbds_posted == 0,
- HZ)) == 0)
- cnt++;
- if (rc == 0) {
- CWARN("Service %s waiting for request buffers\n",
- svcpt->scp_service->srv_name);
- }
- spin_lock(&svcpt->scp_lock);
- }
- spin_unlock(&svcpt->scp_lock);
- }
-}
-
-static void
-ptlrpc_service_purge_all(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_request_buffer_desc *rqbd;
- struct ptlrpc_request *req;
- struct ptlrpc_reply_state *rs;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (!svcpt->scp_service)
- break;
-
- spin_lock(&svcpt->scp_rep_lock);
- while (!list_empty(&svcpt->scp_rep_active)) {
- rs = list_entry(svcpt->scp_rep_active.next,
- struct ptlrpc_reply_state, rs_list);
- spin_lock(&rs->rs_lock);
- ptlrpc_schedule_difficult_reply(rs);
- spin_unlock(&rs->rs_lock);
- }
- spin_unlock(&svcpt->scp_rep_lock);
-
- /* purge the request queue. NB No new replies (rqbds
- * all unlinked) and no service threads, so I'm the only
- * thread noodling the request queue now
- */
- while (!list_empty(&svcpt->scp_req_incoming)) {
- req = list_entry(svcpt->scp_req_incoming.next,
- struct ptlrpc_request, rq_list);
-
- list_del(&req->rq_list);
- svcpt->scp_nreqs_incoming--;
- ptlrpc_server_finish_request(svcpt, req);
- }
-
- while (ptlrpc_server_request_pending(svcpt, true)) {
- req = ptlrpc_server_request_get(svcpt, true);
- ptlrpc_server_finish_active_request(svcpt, req);
- }
-
- LASSERT(list_empty(&svcpt->scp_rqbd_posted));
- LASSERT(svcpt->scp_nreqs_incoming == 0);
- LASSERT(svcpt->scp_nreqs_active == 0);
- /* history should have been culled by
- * ptlrpc_server_finish_request
- */
- LASSERT(svcpt->scp_hist_nrqbds == 0);
-
- /* Now free all the request buffers since nothing
- * references them any more...
- */
-
- while (!list_empty(&svcpt->scp_rqbd_idle)) {
- rqbd = list_entry(svcpt->scp_rqbd_idle.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
- ptlrpc_free_rqbd(rqbd);
- }
- ptlrpc_wait_replies(svcpt);
-
- while (!list_empty(&svcpt->scp_rep_idle)) {
- rs = list_entry(svcpt->scp_rep_idle.next,
- struct ptlrpc_reply_state,
- rs_list);
- list_del(&rs->rs_list);
- kvfree(rs);
- }
- }
-}
-
-static void
-ptlrpc_service_free(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_at_array *array;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (!svcpt->scp_service)
- break;
-
- /* In case somebody rearmed this in the meantime */
- del_timer(&svcpt->scp_at_timer);
- array = &svcpt->scp_at_array;
-
- kfree(array->paa_reqs_array);
- array->paa_reqs_array = NULL;
- kfree(array->paa_reqs_count);
- array->paa_reqs_count = NULL;
- }
-
- ptlrpc_service_for_each_part(svcpt, i, svc)
- kfree(svcpt);
-
- if (svc->srv_cpts)
- cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts);
-
- kfree(svc);
-}
-
-int ptlrpc_unregister_service(struct ptlrpc_service *service)
-{
- CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
-
- service->srv_is_stopping = 1;
-
- mutex_lock(&ptlrpc_all_services_mutex);
- list_del_init(&service->srv_list);
- mutex_unlock(&ptlrpc_all_services_mutex);
-
- ptlrpc_service_del_atimer(service);
- ptlrpc_stop_all_threads(service);
-
- ptlrpc_service_unlink_rqbd(service);
- ptlrpc_service_purge_all(service);
- ptlrpc_service_nrs_cleanup(service);
-
- ptlrpc_lprocfs_unregister_service(service);
- ptlrpc_sysfs_unregister_service(service);
-
- ptlrpc_service_free(service);
-
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_unregister_service);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
deleted file mode 100644
index 2f64eb417e77..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ /dev/null
@@ -1,4210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <linux/fs.h>
-#include <linux/posix_acl_xattr.h>
-
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_net.h>
-#include <lustre_disk.h>
-#include "ptlrpc_internal.h"
-
-void lustre_assert_wire_constants(void)
-{
- /* Wire protocol assertions generated by 'wirecheck'
- * (make -C lustre/utils newwiretest)
- * running on Linux centos6-bis 2.6.32-358.0.1.el6-head
- * #3 SMP Wed Apr 17 17:37:43 CEST 2013
- * with gcc version 4.4.6 20110731 (Red Hat 4.4.6-3) (GCC)
- */
-
- /* Constants... */
- LASSERTF(PTL_RPC_MSG_REQUEST == 4711, "found %lld\n",
- (long long)PTL_RPC_MSG_REQUEST);
- LASSERTF(PTL_RPC_MSG_ERR == 4712, "found %lld\n",
- (long long)PTL_RPC_MSG_ERR);
- LASSERTF(PTL_RPC_MSG_REPLY == 4713, "found %lld\n",
- (long long)PTL_RPC_MSG_REPLY);
- LASSERTF(MDS_DIR_END_OFF == 0xfffffffffffffffeULL, "found 0x%.16llxULL\n",
- MDS_DIR_END_OFF);
- LASSERTF(DEAD_HANDLE_MAGIC == 0xdeadbeefcafebabeULL, "found 0x%.16llxULL\n",
- DEAD_HANDLE_MAGIC);
- BUILD_BUG_ON(MTI_NAME_MAXLEN != 64);
- LASSERTF(OST_REPLY == 0, "found %lld\n",
- (long long)OST_REPLY);
- LASSERTF(OST_GETATTR == 1, "found %lld\n",
- (long long)OST_GETATTR);
- LASSERTF(OST_SETATTR == 2, "found %lld\n",
- (long long)OST_SETATTR);
- LASSERTF(OST_READ == 3, "found %lld\n",
- (long long)OST_READ);
- LASSERTF(OST_WRITE == 4, "found %lld\n",
- (long long)OST_WRITE);
- LASSERTF(OST_CREATE == 5, "found %lld\n",
- (long long)OST_CREATE);
- LASSERTF(OST_DESTROY == 6, "found %lld\n",
- (long long)OST_DESTROY);
- LASSERTF(OST_GET_INFO == 7, "found %lld\n",
- (long long)OST_GET_INFO);
- LASSERTF(OST_CONNECT == 8, "found %lld\n",
- (long long)OST_CONNECT);
- LASSERTF(OST_DISCONNECT == 9, "found %lld\n",
- (long long)OST_DISCONNECT);
- LASSERTF(OST_PUNCH == 10, "found %lld\n",
- (long long)OST_PUNCH);
- LASSERTF(OST_OPEN == 11, "found %lld\n",
- (long long)OST_OPEN);
- LASSERTF(OST_CLOSE == 12, "found %lld\n",
- (long long)OST_CLOSE);
- LASSERTF(OST_STATFS == 13, "found %lld\n",
- (long long)OST_STATFS);
- LASSERTF(OST_SYNC == 16, "found %lld\n",
- (long long)OST_SYNC);
- LASSERTF(OST_SET_INFO == 17, "found %lld\n",
- (long long)OST_SET_INFO);
- LASSERTF(OST_QUOTACHECK == 18, "found %lld\n",
- (long long)OST_QUOTACHECK);
- LASSERTF(OST_QUOTACTL == 19, "found %lld\n",
- (long long)OST_QUOTACTL);
- LASSERTF(OST_QUOTA_ADJUST_QUNIT == 20, "found %lld\n",
- (long long)OST_QUOTA_ADJUST_QUNIT);
- LASSERTF(OST_LAST_OPC == 21, "found %lld\n",
- (long long)OST_LAST_OPC);
- LASSERTF(OBD_OBJECT_EOF == 0xffffffffffffffffULL, "found 0x%.16llxULL\n",
- OBD_OBJECT_EOF);
- LASSERTF(OST_MIN_PRECREATE == 32, "found %lld\n",
- (long long)OST_MIN_PRECREATE);
- LASSERTF(OST_MAX_PRECREATE == 20000, "found %lld\n",
- (long long)OST_MAX_PRECREATE);
- LASSERTF(OST_LVB_ERR_INIT == 0xffbadbad80000000ULL, "found 0x%.16llxULL\n",
- OST_LVB_ERR_INIT);
- LASSERTF(OST_LVB_ERR_MASK == 0xffbadbad00000000ULL, "found 0x%.16llxULL\n",
- OST_LVB_ERR_MASK);
- LASSERTF(MDS_FIRST_OPC == 33, "found %lld\n",
- (long long)MDS_FIRST_OPC);
- LASSERTF(MDS_GETATTR == 33, "found %lld\n",
- (long long)MDS_GETATTR);
- LASSERTF(MDS_GETATTR_NAME == 34, "found %lld\n",
- (long long)MDS_GETATTR_NAME);
- LASSERTF(MDS_CLOSE == 35, "found %lld\n",
- (long long)MDS_CLOSE);
- LASSERTF(MDS_REINT == 36, "found %lld\n",
- (long long)MDS_REINT);
- LASSERTF(MDS_READPAGE == 37, "found %lld\n",
- (long long)MDS_READPAGE);
- LASSERTF(MDS_CONNECT == 38, "found %lld\n",
- (long long)MDS_CONNECT);
- LASSERTF(MDS_DISCONNECT == 39, "found %lld\n",
- (long long)MDS_DISCONNECT);
- LASSERTF(MDS_GETSTATUS == 40, "found %lld\n",
- (long long)MDS_GETSTATUS);
- LASSERTF(MDS_STATFS == 41, "found %lld\n",
- (long long)MDS_STATFS);
- LASSERTF(MDS_PIN == 42, "found %lld\n",
- (long long)MDS_PIN);
- LASSERTF(MDS_UNPIN == 43, "found %lld\n",
- (long long)MDS_UNPIN);
- LASSERTF(MDS_SYNC == 44, "found %lld\n",
- (long long)MDS_SYNC);
- LASSERTF(MDS_DONE_WRITING == 45, "found %lld\n",
- (long long)MDS_DONE_WRITING);
- LASSERTF(MDS_SET_INFO == 46, "found %lld\n",
- (long long)MDS_SET_INFO);
- LASSERTF(MDS_QUOTACHECK == 47, "found %lld\n",
- (long long)MDS_QUOTACHECK);
- LASSERTF(MDS_QUOTACTL == 48, "found %lld\n",
- (long long)MDS_QUOTACTL);
- LASSERTF(MDS_GETXATTR == 49, "found %lld\n",
- (long long)MDS_GETXATTR);
- LASSERTF(MDS_SETXATTR == 50, "found %lld\n",
- (long long)MDS_SETXATTR);
- LASSERTF(MDS_WRITEPAGE == 51, "found %lld\n",
- (long long)MDS_WRITEPAGE);
- LASSERTF(MDS_IS_SUBDIR == 52, "found %lld\n",
- (long long)MDS_IS_SUBDIR);
- LASSERTF(MDS_GET_INFO == 53, "found %lld\n",
- (long long)MDS_GET_INFO);
- LASSERTF(MDS_HSM_STATE_GET == 54, "found %lld\n",
- (long long)MDS_HSM_STATE_GET);
- LASSERTF(MDS_HSM_STATE_SET == 55, "found %lld\n",
- (long long)MDS_HSM_STATE_SET);
- LASSERTF(MDS_HSM_ACTION == 56, "found %lld\n",
- (long long)MDS_HSM_ACTION);
- LASSERTF(MDS_HSM_PROGRESS == 57, "found %lld\n",
- (long long)MDS_HSM_PROGRESS);
- LASSERTF(MDS_HSM_REQUEST == 58, "found %lld\n",
- (long long)MDS_HSM_REQUEST);
- LASSERTF(MDS_HSM_CT_REGISTER == 59, "found %lld\n",
- (long long)MDS_HSM_CT_REGISTER);
- LASSERTF(MDS_HSM_CT_UNREGISTER == 60, "found %lld\n",
- (long long)MDS_HSM_CT_UNREGISTER);
- LASSERTF(MDS_SWAP_LAYOUTS == 61, "found %lld\n",
- (long long)MDS_SWAP_LAYOUTS);
- LASSERTF(MDS_LAST_OPC == 62, "found %lld\n",
- (long long)MDS_LAST_OPC);
- LASSERTF(REINT_SETATTR == 1, "found %lld\n",
- (long long)REINT_SETATTR);
- LASSERTF(REINT_CREATE == 2, "found %lld\n",
- (long long)REINT_CREATE);
- LASSERTF(REINT_LINK == 3, "found %lld\n",
- (long long)REINT_LINK);
- LASSERTF(REINT_UNLINK == 4, "found %lld\n",
- (long long)REINT_UNLINK);
- LASSERTF(REINT_RENAME == 5, "found %lld\n",
- (long long)REINT_RENAME);
- LASSERTF(REINT_OPEN == 6, "found %lld\n",
- (long long)REINT_OPEN);
- LASSERTF(REINT_SETXATTR == 7, "found %lld\n",
- (long long)REINT_SETXATTR);
- LASSERTF(REINT_RMENTRY == 8, "found %lld\n",
- (long long)REINT_RMENTRY);
- LASSERTF(REINT_MIGRATE == 9, "found %lld\n",
- (long long)REINT_MIGRATE);
- LASSERTF(REINT_MAX == 10, "found %lld\n",
- (long long)REINT_MAX);
- LASSERTF(DISP_IT_EXECD == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned int)DISP_IT_EXECD);
- LASSERTF(DISP_LOOKUP_EXECD == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned int)DISP_LOOKUP_EXECD);
- LASSERTF(DISP_LOOKUP_NEG == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned int)DISP_LOOKUP_NEG);
- LASSERTF(DISP_LOOKUP_POS == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned int)DISP_LOOKUP_POS);
- LASSERTF(DISP_OPEN_CREATE == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned int)DISP_OPEN_CREATE);
- LASSERTF(DISP_OPEN_OPEN == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned int)DISP_OPEN_OPEN);
- LASSERTF(DISP_ENQ_COMPLETE == 0x00400000UL, "found 0x%.8xUL\n",
- (unsigned int)DISP_ENQ_COMPLETE);
- LASSERTF(DISP_ENQ_OPEN_REF == 0x00800000UL, "found 0x%.8xUL\n",
- (unsigned int)DISP_ENQ_OPEN_REF);
- LASSERTF(DISP_ENQ_CREATE_REF == 0x01000000UL, "found 0x%.8xUL\n",
- (unsigned int)DISP_ENQ_CREATE_REF);
- LASSERTF(DISP_OPEN_LOCK == 0x02000000UL, "found 0x%.8xUL\n",
- (unsigned int)DISP_OPEN_LOCK);
- LASSERTF(MDS_STATUS_CONN == 1, "found %lld\n",
- (long long)MDS_STATUS_CONN);
- LASSERTF(MDS_STATUS_LOV == 2, "found %lld\n",
- (long long)MDS_STATUS_LOV);
- LASSERTF(MDS_ATTR_MODE == 0x0000000000000001ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_MODE);
- LASSERTF(MDS_ATTR_UID == 0x0000000000000002ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_UID);
- LASSERTF(MDS_ATTR_GID == 0x0000000000000004ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_GID);
- LASSERTF(MDS_ATTR_SIZE == 0x0000000000000008ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_SIZE);
- LASSERTF(MDS_ATTR_ATIME == 0x0000000000000010ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_ATIME);
- LASSERTF(MDS_ATTR_MTIME == 0x0000000000000020ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_MTIME);
- LASSERTF(MDS_ATTR_CTIME == 0x0000000000000040ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_CTIME);
- LASSERTF(MDS_ATTR_ATIME_SET == 0x0000000000000080ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_ATIME_SET);
- LASSERTF(MDS_ATTR_MTIME_SET == 0x0000000000000100ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_MTIME_SET);
- LASSERTF(MDS_ATTR_FORCE == 0x0000000000000200ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_FORCE);
- LASSERTF(MDS_ATTR_ATTR_FLAG == 0x0000000000000400ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_ATTR_FLAG);
- LASSERTF(MDS_ATTR_KILL_SUID == 0x0000000000000800ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_KILL_SUID);
- LASSERTF(MDS_ATTR_KILL_SGID == 0x0000000000001000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_KILL_SGID);
- LASSERTF(MDS_ATTR_CTIME_SET == 0x0000000000002000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_CTIME_SET);
- LASSERTF(MDS_ATTR_FROM_OPEN == 0x0000000000004000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_FROM_OPEN);
- LASSERTF(MDS_ATTR_BLOCKS == 0x0000000000008000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_BLOCKS);
- LASSERTF(FLD_QUERY == 900, "found %lld\n",
- (long long)FLD_QUERY);
- LASSERTF(FLD_FIRST_OPC == 900, "found %lld\n",
- (long long)FLD_FIRST_OPC);
- LASSERTF(FLD_READ == 901, "found %lld\n",
- (long long)FLD_READ);
- LASSERTF(FLD_LAST_OPC == 902, "found %lld\n",
- (long long)FLD_LAST_OPC);
- LASSERTF(SEQ_QUERY == 700, "found %lld\n",
- (long long)SEQ_QUERY);
- LASSERTF(SEQ_FIRST_OPC == 700, "found %lld\n",
- (long long)SEQ_FIRST_OPC);
- LASSERTF(SEQ_LAST_OPC == 701, "found %lld\n",
- (long long)SEQ_LAST_OPC);
- LASSERTF(SEQ_ALLOC_SUPER == 0, "found %lld\n",
- (long long)SEQ_ALLOC_SUPER);
- LASSERTF(SEQ_ALLOC_META == 1, "found %lld\n",
- (long long)SEQ_ALLOC_META);
- LASSERTF(LDLM_ENQUEUE == 101, "found %lld\n",
- (long long)LDLM_ENQUEUE);
- LASSERTF(LDLM_CONVERT == 102, "found %lld\n",
- (long long)LDLM_CONVERT);
- LASSERTF(LDLM_CANCEL == 103, "found %lld\n",
- (long long)LDLM_CANCEL);
- LASSERTF(LDLM_BL_CALLBACK == 104, "found %lld\n",
- (long long)LDLM_BL_CALLBACK);
- LASSERTF(LDLM_CP_CALLBACK == 105, "found %lld\n",
- (long long)LDLM_CP_CALLBACK);
- LASSERTF(LDLM_GL_CALLBACK == 106, "found %lld\n",
- (long long)LDLM_GL_CALLBACK);
- LASSERTF(LDLM_SET_INFO == 107, "found %lld\n",
- (long long)LDLM_SET_INFO);
- LASSERTF(LDLM_LAST_OPC == 108, "found %lld\n",
- (long long)LDLM_LAST_OPC);
- LASSERTF(LCK_MINMODE == 0, "found %lld\n",
- (long long)LCK_MINMODE);
- LASSERTF(LCK_EX == 1, "found %lld\n",
- (long long)LCK_EX);
- LASSERTF(LCK_PW == 2, "found %lld\n",
- (long long)LCK_PW);
- LASSERTF(LCK_PR == 4, "found %lld\n",
- (long long)LCK_PR);
- LASSERTF(LCK_CW == 8, "found %lld\n",
- (long long)LCK_CW);
- LASSERTF(LCK_CR == 16, "found %lld\n",
- (long long)LCK_CR);
- LASSERTF(LCK_NL == 32, "found %lld\n",
- (long long)LCK_NL);
- LASSERTF(LCK_GROUP == 64, "found %lld\n",
- (long long)LCK_GROUP);
- LASSERTF(LCK_COS == 128, "found %lld\n",
- (long long)LCK_COS);
- LASSERTF(LCK_MAXMODE == 129, "found %lld\n",
- (long long)LCK_MAXMODE);
- LASSERTF(LCK_MODE_NUM == 8, "found %lld\n",
- (long long)LCK_MODE_NUM);
- BUILD_BUG_ON(LDLM_PLAIN != 10);
- BUILD_BUG_ON(LDLM_EXTENT != 11);
- BUILD_BUG_ON(LDLM_FLOCK != 12);
- BUILD_BUG_ON(LDLM_IBITS != 13);
- BUILD_BUG_ON(LDLM_MAX_TYPE != 14);
- BUILD_BUG_ON(LUSTRE_RES_ID_SEQ_OFF != 0);
- BUILD_BUG_ON(LUSTRE_RES_ID_VER_OID_OFF != 1);
- BUILD_BUG_ON(LUSTRE_RES_ID_QUOTA_SEQ_OFF != 2);
- BUILD_BUG_ON(LUSTRE_RES_ID_QUOTA_VER_OID_OFF != 3);
- BUILD_BUG_ON(LUSTRE_RES_ID_HSH_OFF != 3);
- LASSERTF(OBD_PING == 400, "found %lld\n",
- (long long)OBD_PING);
- LASSERTF(OBD_LOG_CANCEL == 401, "found %lld\n",
- (long long)OBD_LOG_CANCEL);
- LASSERTF(OBD_QC_CALLBACK == 402, "found %lld\n",
- (long long)OBD_QC_CALLBACK);
- LASSERTF(OBD_IDX_READ == 403, "found %lld\n",
- (long long)OBD_IDX_READ);
- LASSERTF(OBD_LAST_OPC == 404, "found %lld\n",
- (long long)OBD_LAST_OPC);
- LASSERTF(QUOTA_DQACQ == 601, "found %lld\n",
- (long long)QUOTA_DQACQ);
- LASSERTF(QUOTA_DQREL == 602, "found %lld\n",
- (long long)QUOTA_DQREL);
- LASSERTF(QUOTA_LAST_OPC == 603, "found %lld\n",
- (long long)QUOTA_LAST_OPC);
- LASSERTF(MGS_CONNECT == 250, "found %lld\n",
- (long long)MGS_CONNECT);
- LASSERTF(MGS_DISCONNECT == 251, "found %lld\n",
- (long long)MGS_DISCONNECT);
- LASSERTF(MGS_EXCEPTION == 252, "found %lld\n",
- (long long)MGS_EXCEPTION);
- LASSERTF(MGS_TARGET_REG == 253, "found %lld\n",
- (long long)MGS_TARGET_REG);
- LASSERTF(MGS_TARGET_DEL == 254, "found %lld\n",
- (long long)MGS_TARGET_DEL);
- LASSERTF(MGS_SET_INFO == 255, "found %lld\n",
- (long long)MGS_SET_INFO);
- LASSERTF(MGS_LAST_OPC == 257, "found %lld\n",
- (long long)MGS_LAST_OPC);
- LASSERTF(SEC_CTX_INIT == 801, "found %lld\n",
- (long long)SEC_CTX_INIT);
- LASSERTF(SEC_CTX_INIT_CONT == 802, "found %lld\n",
- (long long)SEC_CTX_INIT_CONT);
- LASSERTF(SEC_CTX_FINI == 803, "found %lld\n",
- (long long)SEC_CTX_FINI);
- LASSERTF(SEC_LAST_OPC == 804, "found %lld\n",
- (long long)SEC_LAST_OPC);
- /* Sizes and Offsets */
-
- /* Checks for struct obd_uuid */
- LASSERTF((int)sizeof(struct obd_uuid) == 40, "found %lld\n",
- (long long)(int)sizeof(struct obd_uuid));
-
- /* Checks for struct lu_seq_range */
- LASSERTF((int)sizeof(struct lu_seq_range) == 24, "found %lld\n",
- (long long)(int)sizeof(struct lu_seq_range));
- LASSERTF((int)offsetof(struct lu_seq_range, lsr_start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lu_seq_range, lsr_start));
- LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_start));
- LASSERTF((int)offsetof(struct lu_seq_range, lsr_end) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lu_seq_range, lsr_end));
- LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_end));
- LASSERTF((int)offsetof(struct lu_seq_range, lsr_index) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lu_seq_range, lsr_index));
- LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_index));
- LASSERTF((int)offsetof(struct lu_seq_range, lsr_flags) == 20, "found %lld\n",
- (long long)(int)offsetof(struct lu_seq_range, lsr_flags));
- LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_flags));
- LASSERTF(LU_SEQ_RANGE_MDT == 0, "found %lld\n",
- (long long)LU_SEQ_RANGE_MDT);
- LASSERTF(LU_SEQ_RANGE_OST == 1, "found %lld\n",
- (long long)LU_SEQ_RANGE_OST);
-
- /* Checks for struct lustre_mdt_attrs */
- LASSERTF((int)sizeof(struct lustre_mdt_attrs) == 24, "found %lld\n",
- (long long)(int)sizeof(struct lustre_mdt_attrs));
- LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_compat) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lustre_mdt_attrs, lma_compat));
- LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_compat) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_compat));
- LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_incompat) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lustre_mdt_attrs, lma_incompat));
- LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_incompat) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_incompat));
- LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_self_fid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lustre_mdt_attrs, lma_self_fid));
- LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid));
- LASSERTF(LMAI_RELEASED == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned int)LMAI_RELEASED);
- LASSERTF(LMAC_HSM == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned int)LMAC_HSM);
- LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned int)LMAC_NOT_IN_OI);
- LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned int)LMAC_FID_ON_OST);
-
- /* Checks for struct ost_id */
- LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n",
- (long long)(int)sizeof(struct ost_id));
- LASSERTF((int)offsetof(struct ost_id, oi) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ost_id, oi));
- LASSERTF((int)sizeof(((struct ost_id *)0)->oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct ost_id *)0)->oi));
- LASSERTF(LUSTRE_FID_INIT_OID == 1, "found %lld\n",
- (long long)LUSTRE_FID_INIT_OID);
- LASSERTF(FID_SEQ_OST_MDT0 == 0, "found %lld\n",
- (long long)FID_SEQ_OST_MDT0);
- LASSERTF(FID_SEQ_LLOG == 1, "found %lld\n",
- (long long)FID_SEQ_LLOG);
- LASSERTF(FID_SEQ_ECHO == 2, "found %lld\n",
- (long long)FID_SEQ_ECHO);
- LASSERTF(FID_SEQ_OST_MDT1 == 3, "found %lld\n",
- (long long)FID_SEQ_OST_MDT1);
- LASSERTF(FID_SEQ_OST_MAX == 9, "found %lld\n",
- (long long)FID_SEQ_OST_MAX);
- LASSERTF(FID_SEQ_RSVD == 11, "found %lld\n",
- (long long)FID_SEQ_RSVD);
- LASSERTF(FID_SEQ_IGIF == 12, "found %lld\n",
- (long long)FID_SEQ_IGIF);
- LASSERTF(FID_SEQ_IGIF_MAX == 0x00000000ffffffffULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_IGIF_MAX);
- LASSERTF(FID_SEQ_IDIF == 0x0000000100000000ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_IDIF);
- LASSERTF(FID_SEQ_IDIF_MAX == 0x00000001ffffffffULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_IDIF_MAX);
- LASSERTF(FID_SEQ_START == 0x0000000200000000ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_START);
- LASSERTF(FID_SEQ_LOCAL_FILE == 0x0000000200000001ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_LOCAL_FILE);
- LASSERTF(FID_SEQ_DOT_LUSTRE == 0x0000000200000002ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_DOT_LUSTRE);
- LASSERTF(FID_SEQ_SPECIAL == 0x0000000200000004ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_SPECIAL);
- LASSERTF(FID_SEQ_QUOTA == 0x0000000200000005ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_QUOTA);
- LASSERTF(FID_SEQ_QUOTA_GLB == 0x0000000200000006ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_QUOTA_GLB);
- LASSERTF(FID_SEQ_ROOT == 0x0000000200000007ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_ROOT);
- LASSERTF(FID_SEQ_NORMAL == 0x0000000200000400ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_NORMAL);
- LASSERTF(FID_SEQ_LOV_DEFAULT == 0xffffffffffffffffULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_LOV_DEFAULT);
- LASSERTF(FID_OID_SPECIAL_BFL == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned int)FID_OID_SPECIAL_BFL);
- LASSERTF(FID_OID_DOT_LUSTRE == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned int)FID_OID_DOT_LUSTRE);
- LASSERTF(FID_OID_DOT_LUSTRE_OBF == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned int)FID_OID_DOT_LUSTRE_OBF);
-
- /* Checks for struct lu_dirent */
- LASSERTF((int)sizeof(struct lu_dirent) == 32, "found %lld\n",
- (long long)(int)sizeof(struct lu_dirent));
- LASSERTF((int)offsetof(struct lu_dirent, lde_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_fid));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_fid));
- LASSERTF((int)offsetof(struct lu_dirent, lde_hash) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_hash));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_hash) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_hash));
- LASSERTF((int)offsetof(struct lu_dirent, lde_reclen) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_reclen));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_reclen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_reclen));
- LASSERTF((int)offsetof(struct lu_dirent, lde_namelen) == 26, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_namelen));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_namelen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_namelen));
- LASSERTF((int)offsetof(struct lu_dirent, lde_attrs) == 28, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_attrs));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_attrs) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_attrs));
- LASSERTF((int)offsetof(struct lu_dirent, lde_name[0]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_name[0]));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_name[0]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_name[0]));
- LASSERTF(LUDA_FID == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned int)LUDA_FID);
- LASSERTF(LUDA_TYPE == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned int)LUDA_TYPE);
- LASSERTF(LUDA_64BITHASH == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned int)LUDA_64BITHASH);
-
- /* Checks for struct luda_type */
- LASSERTF((int)sizeof(struct luda_type) == 2, "found %lld\n",
- (long long)(int)sizeof(struct luda_type));
- LASSERTF((int)offsetof(struct luda_type, lt_type) == 0, "found %lld\n",
- (long long)(int)offsetof(struct luda_type, lt_type));
- LASSERTF((int)sizeof(((struct luda_type *)0)->lt_type) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct luda_type *)0)->lt_type));
-
- /* Checks for struct lu_dirpage */
- LASSERTF((int)sizeof(struct lu_dirpage) == 24, "found %lld\n",
- (long long)(int)sizeof(struct lu_dirpage));
- LASSERTF((int)offsetof(struct lu_dirpage, ldp_hash_start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirpage, ldp_hash_start));
- LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_hash_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_hash_start));
- LASSERTF((int)offsetof(struct lu_dirpage, ldp_hash_end) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirpage, ldp_hash_end));
- LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_hash_end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_hash_end));
- LASSERTF((int)offsetof(struct lu_dirpage, ldp_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirpage, ldp_flags));
- LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_flags));
- LASSERTF((int)offsetof(struct lu_dirpage, ldp_pad0) == 20, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirpage, ldp_pad0));
- LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_pad0) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_pad0));
- LASSERTF((int)offsetof(struct lu_dirpage, ldp_entries[0]) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirpage, ldp_entries[0]));
- LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_entries[0]) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_entries[0]));
- LASSERTF(LDF_EMPTY == 1, "found %lld\n",
- (long long)LDF_EMPTY);
- LASSERTF(LDF_COLLIDE == 2, "found %lld\n",
- (long long)LDF_COLLIDE);
- LASSERTF(LU_PAGE_SIZE == 4096, "found %lld\n",
- (long long)LU_PAGE_SIZE);
-
- /* Checks for struct lustre_handle */
- LASSERTF((int)sizeof(struct lustre_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(struct lustre_handle));
- LASSERTF((int)offsetof(struct lustre_handle, cookie) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lustre_handle, cookie));
- LASSERTF((int)sizeof(((struct lustre_handle *)0)->cookie) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_handle *)0)->cookie));
-
- /* Checks for struct lustre_msg_v2 */
- LASSERTF((int)sizeof(struct lustre_msg_v2) == 32, "found %lld\n",
- (long long)(int)sizeof(struct lustre_msg_v2));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_bufcount) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_bufcount));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_bufcount) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_bufcount));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_secflvr) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_secflvr));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_secflvr) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_secflvr));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_magic) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_magic));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_magic));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_repsize) == 12, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_repsize));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_repsize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_repsize));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_cksum) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_cksum));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_cksum) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_cksum));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_flags) == 20, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_flags));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_flags));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_padding_2) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_padding_2));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_2));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_padding_3) == 28, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_padding_3));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_3));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_buflens[0]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_buflens[0]));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_buflens[0]) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_buflens[0]));
- LASSERTF(LUSTRE_MSG_MAGIC_V2 == 0x0BD00BD3, "found 0x%.8x\n",
- LUSTRE_MSG_MAGIC_V2);
- LASSERTF(LUSTRE_MSG_MAGIC_V2_SWABBED == 0xD30BD00B, "found 0x%.8x\n",
- LUSTRE_MSG_MAGIC_V2_SWABBED);
-
- /* Checks for struct ptlrpc_body */
- LASSERTF((int)sizeof(struct ptlrpc_body_v3) == 184, "found %lld\n",
- (long long)(int)sizeof(struct ptlrpc_body_v3));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_handle) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_handle));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_type) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_type));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_version) == 12, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_version));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_opc) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_opc));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_status) == 20, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_status));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_xid) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_xid));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_tag));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == 34, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding0));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == 36, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding1));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == 40, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_committed));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_transno) == 48, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_transno));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_flags) == 56, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_flags));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_op_flags) == 60, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_op_flags));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt) == 64, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_timeout) == 68, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_timeout));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_service_time) == 72, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_service_time));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_limit) == 76, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_limit));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_slv) == 80, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_slv));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv));
- BUILD_BUG_ON(PTLRPC_NUM_VERSIONS != 4);
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_pre_versions) == 88, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == 120, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_mbits));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == 128, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_0));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == 136, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_1));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == 144, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_2));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2));
- BUILD_BUG_ON(LUSTRE_JOBID_SIZE != 32);
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_jobid) == 152, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_jobid));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_jobid) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_jobid));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_handle) == (int)offsetof(struct ptlrpc_body_v2, pb_handle), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_handle), (int)offsetof(struct ptlrpc_body_v2, pb_handle));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_handle), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_handle));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_type) == (int)offsetof(struct ptlrpc_body_v2, pb_type), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_type), (int)offsetof(struct ptlrpc_body_v2, pb_type));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_type), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_type));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_version) == (int)offsetof(struct ptlrpc_body_v2, pb_version), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_version), (int)offsetof(struct ptlrpc_body_v2, pb_version));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_version), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_version));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_opc) == (int)offsetof(struct ptlrpc_body_v2, pb_opc), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_opc), (int)offsetof(struct ptlrpc_body_v2, pb_opc));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_opc), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_opc));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_status) == (int)offsetof(struct ptlrpc_body_v2, pb_status), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_status), (int)offsetof(struct ptlrpc_body_v2, pb_status));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_status), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_status));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_xid) == (int)offsetof(struct ptlrpc_body_v2, pb_last_xid), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_last_xid), (int)offsetof(struct ptlrpc_body_v2, pb_last_xid));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == (int)offsetof(struct ptlrpc_body_v2, pb_tag), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_tag), (int)offsetof(struct ptlrpc_body_v2, pb_tag));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding0), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_padding0), (int)offsetof(struct ptlrpc_body_v2, pb_padding0));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding1), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_padding1), (int)offsetof(struct ptlrpc_body_v2, pb_padding1));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == (int)offsetof(struct ptlrpc_body_v2, pb_last_committed), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_last_committed), (int)offsetof(struct ptlrpc_body_v2, pb_last_committed));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_transno) == (int)offsetof(struct ptlrpc_body_v2, pb_transno), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_transno), (int)offsetof(struct ptlrpc_body_v2, pb_transno));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_transno), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_transno));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_flags) == (int)offsetof(struct ptlrpc_body_v2, pb_flags), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_flags), (int)offsetof(struct ptlrpc_body_v2, pb_flags));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_flags), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_flags));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_op_flags) == (int)offsetof(struct ptlrpc_body_v2, pb_op_flags), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_op_flags), (int)offsetof(struct ptlrpc_body_v2, pb_op_flags));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_op_flags), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_op_flags));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt) == (int)offsetof(struct ptlrpc_body_v2, pb_conn_cnt), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt), (int)offsetof(struct ptlrpc_body_v2, pb_conn_cnt));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_conn_cnt), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_conn_cnt));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_timeout) == (int)offsetof(struct ptlrpc_body_v2, pb_timeout), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_timeout), (int)offsetof(struct ptlrpc_body_v2, pb_timeout));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_timeout), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_timeout));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_service_time) == (int)offsetof(struct ptlrpc_body_v2, pb_service_time), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_service_time), (int)offsetof(struct ptlrpc_body_v2, pb_service_time));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_service_time), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_service_time));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_limit) == (int)offsetof(struct ptlrpc_body_v2, pb_limit), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_limit), (int)offsetof(struct ptlrpc_body_v2, pb_limit));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_limit), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_limit));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_slv) == (int)offsetof(struct ptlrpc_body_v2, pb_slv), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_slv), (int)offsetof(struct ptlrpc_body_v2, pb_slv));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_slv), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_slv));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_pre_versions) == (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_pre_versions), (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == (int)offsetof(struct ptlrpc_body_v2, pb_mbits), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_mbits), (int)offsetof(struct ptlrpc_body_v2, pb_mbits));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_padding64_0), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_padding64_1), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_padding64_2), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2));
- LASSERTF(MSG_PTLRPC_BODY_OFF == 0, "found %lld\n",
- (long long)MSG_PTLRPC_BODY_OFF);
- LASSERTF(REQ_REC_OFF == 1, "found %lld\n",
- (long long)REQ_REC_OFF);
- LASSERTF(REPLY_REC_OFF == 1, "found %lld\n",
- (long long)REPLY_REC_OFF);
- LASSERTF(DLM_LOCKREQ_OFF == 1, "found %lld\n",
- (long long)DLM_LOCKREQ_OFF);
- LASSERTF(DLM_REQ_REC_OFF == 2, "found %lld\n",
- (long long)DLM_REQ_REC_OFF);
- LASSERTF(DLM_INTENT_IT_OFF == 2, "found %lld\n",
- (long long)DLM_INTENT_IT_OFF);
- LASSERTF(DLM_INTENT_REC_OFF == 3, "found %lld\n",
- (long long)DLM_INTENT_REC_OFF);
- LASSERTF(DLM_LOCKREPLY_OFF == 1, "found %lld\n",
- (long long)DLM_LOCKREPLY_OFF);
- LASSERTF(DLM_REPLY_REC_OFF == 2, "found %lld\n",
- (long long)DLM_REPLY_REC_OFF);
- LASSERTF(MSG_PTLRPC_HEADER_OFF == 31, "found %lld\n",
- (long long)MSG_PTLRPC_HEADER_OFF);
- LASSERTF(PTLRPC_MSG_VERSION == 0x00000003, "found 0x%.8x\n",
- PTLRPC_MSG_VERSION);
- LASSERTF(LUSTRE_VERSION_MASK == 0xffff0000, "found 0x%.8x\n",
- LUSTRE_VERSION_MASK);
- LASSERTF(LUSTRE_OBD_VERSION == 0x00010000, "found 0x%.8x\n",
- LUSTRE_OBD_VERSION);
- LASSERTF(LUSTRE_MDS_VERSION == 0x00020000, "found 0x%.8x\n",
- LUSTRE_MDS_VERSION);
- LASSERTF(LUSTRE_OST_VERSION == 0x00030000, "found 0x%.8x\n",
- LUSTRE_OST_VERSION);
- LASSERTF(LUSTRE_DLM_VERSION == 0x00040000, "found 0x%.8x\n",
- LUSTRE_DLM_VERSION);
- LASSERTF(LUSTRE_LOG_VERSION == 0x00050000, "found 0x%.8x\n",
- LUSTRE_LOG_VERSION);
- LASSERTF(LUSTRE_MGS_VERSION == 0x00060000, "found 0x%.8x\n",
- LUSTRE_MGS_VERSION);
- LASSERTF(MSGHDR_AT_SUPPORT == 1, "found %lld\n",
- (long long)MSGHDR_AT_SUPPORT);
- LASSERTF(MSGHDR_CKSUM_INCOMPAT18 == 2, "found %lld\n",
- (long long)MSGHDR_CKSUM_INCOMPAT18);
- LASSERTF(MSG_OP_FLAG_MASK == 0xffff0000UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_OP_FLAG_MASK);
- LASSERTF(MSG_OP_FLAG_SHIFT == 16, "found %lld\n",
- (long long)MSG_OP_FLAG_SHIFT);
- LASSERTF(MSG_GEN_FLAG_MASK == 0x0000ffffUL, "found 0x%.8xUL\n",
- (unsigned int)MSG_GEN_FLAG_MASK);
- LASSERTF(MSG_LAST_REPLAY == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_LAST_REPLAY);
- LASSERTF(MSG_RESENT == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_RESENT);
- LASSERTF(MSG_REPLAY == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_REPLAY);
- LASSERTF(MSG_DELAY_REPLAY == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_DELAY_REPLAY);
- LASSERTF(MSG_VERSION_REPLAY == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_VERSION_REPLAY);
- LASSERTF(MSG_REQ_REPLAY_DONE == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_REQ_REPLAY_DONE);
- LASSERTF(MSG_LOCK_REPLAY_DONE == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_LOCK_REPLAY_DONE);
- LASSERTF(MSG_CONNECT_RECOVERING == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_CONNECT_RECOVERING);
- LASSERTF(MSG_CONNECT_RECONNECT == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_CONNECT_RECONNECT);
- LASSERTF(MSG_CONNECT_REPLAYABLE == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_CONNECT_REPLAYABLE);
- LASSERTF(MSG_CONNECT_LIBCLIENT == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_CONNECT_LIBCLIENT);
- LASSERTF(MSG_CONNECT_INITIAL == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_CONNECT_INITIAL);
- LASSERTF(MSG_CONNECT_ASYNC == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_CONNECT_ASYNC);
- LASSERTF(MSG_CONNECT_NEXT_VER == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_CONNECT_NEXT_VER);
- LASSERTF(MSG_CONNECT_TRANSNO == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned int)MSG_CONNECT_TRANSNO);
-
- /* Checks for struct obd_connect_data */
- LASSERTF((int)sizeof(struct obd_connect_data) == 192, "found %lld\n",
- (long long)(int)sizeof(struct obd_connect_data));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_connect_flags) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_connect_flags));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_version) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_version));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_version));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_grant) == 12, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_grant));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_grant) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_grant));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_index) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_index));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_index));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_brw_size) == 20, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_brw_size));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_brw_size) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_brw_size));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_ibits_known) == 24, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_ibits_known));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_ibits_known) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_ibits_known));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_blocksize) == 32, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_blocksize));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_blocksize) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_blocksize));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_inodespace) == 33, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_inodespace));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_inodespace) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_inodespace));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_grant_extent) == 34, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_grant_extent));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_grant_extent) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_grant_extent));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_unused) == 36, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_unused));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_unused) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_unused));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_transno) == 40, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_transno));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_transno) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_transno));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_group) == 48, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_group));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_group) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_group));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_cksum_types) == 52, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_cksum_types));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_cksum_types) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_cksum_types));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_max_easize) == 56, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_max_easize));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_max_easize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_max_easize));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_instance) == 60, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_instance));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_instance) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_instance));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_maxbytes) == 64, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_maxbytes));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_maxmodrpcs) == 72, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_maxmodrpcs));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs));
- LASSERTF((int)offsetof(struct obd_connect_data, padding0) == 74, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding0));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding0) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding0));
- LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 76, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding1));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding1));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_connect_flags2) == 80, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_connect_flags2));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2));
- LASSERTF((int)offsetof(struct obd_connect_data, padding3) == 88, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding3));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding3));
- LASSERTF((int)offsetof(struct obd_connect_data, padding4) == 96, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding4));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding4) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding4));
- LASSERTF((int)offsetof(struct obd_connect_data, padding5) == 104, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding5));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding5) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding5));
- LASSERTF((int)offsetof(struct obd_connect_data, padding6) == 112, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding6));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding6) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding6));
- LASSERTF((int)offsetof(struct obd_connect_data, padding7) == 120, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding7));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding7) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding7));
- LASSERTF((int)offsetof(struct obd_connect_data, padding8) == 128, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding8));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding8) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding8));
- LASSERTF((int)offsetof(struct obd_connect_data, padding9) == 136, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding9));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding9) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding9));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingA) == 144, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingA));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingA) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingA));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingB) == 152, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingB));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingB) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingB));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingC) == 160, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingC));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingC) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingC));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingD) == 168, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingD));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingD) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingD));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingE) == 176, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingE));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingE) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingE));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingF) == 184, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingF));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingF) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingF));
- LASSERTF(OBD_CONNECT_RDONLY == 0x1ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_RDONLY);
- LASSERTF(OBD_CONNECT_INDEX == 0x2ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_INDEX);
- LASSERTF(OBD_CONNECT_MDS == 0x4ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_MDS);
- LASSERTF(OBD_CONNECT_GRANT == 0x8ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_GRANT);
- LASSERTF(OBD_CONNECT_SRVLOCK == 0x10ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_SRVLOCK);
- LASSERTF(OBD_CONNECT_VERSION == 0x20ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_VERSION);
- LASSERTF(OBD_CONNECT_REQPORTAL == 0x40ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_REQPORTAL);
- LASSERTF(OBD_CONNECT_ACL == 0x80ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_ACL);
- LASSERTF(OBD_CONNECT_XATTR == 0x100ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_XATTR);
- LASSERTF(OBD_CONNECT_CROW == 0x200ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_CROW);
- LASSERTF(OBD_CONNECT_TRUNCLOCK == 0x400ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_TRUNCLOCK);
- LASSERTF(OBD_CONNECT_TRANSNO == 0x800ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_TRANSNO);
- LASSERTF(OBD_CONNECT_IBITS == 0x1000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_IBITS);
- LASSERTF(OBD_CONNECT_JOIN == 0x2000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_JOIN);
- LASSERTF(OBD_CONNECT_ATTRFID == 0x4000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_ATTRFID);
- LASSERTF(OBD_CONNECT_NODEVOH == 0x8000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_NODEVOH);
- LASSERTF(OBD_CONNECT_RMT_CLIENT == 0x10000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_RMT_CLIENT);
- LASSERTF(OBD_CONNECT_RMT_CLIENT_FORCE == 0x20000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_RMT_CLIENT_FORCE);
- LASSERTF(OBD_CONNECT_BRW_SIZE == 0x40000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_BRW_SIZE);
- LASSERTF(OBD_CONNECT_QUOTA64 == 0x80000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_QUOTA64);
- LASSERTF(OBD_CONNECT_MDS_CAPA == 0x100000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_MDS_CAPA);
- LASSERTF(OBD_CONNECT_OSS_CAPA == 0x200000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_OSS_CAPA);
- LASSERTF(OBD_CONNECT_CANCELSET == 0x400000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_CANCELSET);
- LASSERTF(OBD_CONNECT_SOM == 0x800000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_SOM);
- LASSERTF(OBD_CONNECT_AT == 0x1000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_AT);
- LASSERTF(OBD_CONNECT_LRU_RESIZE == 0x2000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LRU_RESIZE);
- LASSERTF(OBD_CONNECT_MDS_MDS == 0x4000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_MDS_MDS);
- LASSERTF(OBD_CONNECT_REAL == 0x8000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_REAL);
- LASSERTF(OBD_CONNECT_CHANGE_QS == 0x10000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_CHANGE_QS);
- LASSERTF(OBD_CONNECT_CKSUM == 0x20000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_CKSUM);
- LASSERTF(OBD_CONNECT_FID == 0x40000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_FID);
- LASSERTF(OBD_CONNECT_VBR == 0x80000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_VBR);
- LASSERTF(OBD_CONNECT_LOV_V3 == 0x100000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LOV_V3);
- LASSERTF(OBD_CONNECT_GRANT_SHRINK == 0x200000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_GRANT_SHRINK);
- LASSERTF(OBD_CONNECT_SKIP_ORPHAN == 0x400000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_SKIP_ORPHAN);
- LASSERTF(OBD_CONNECT_MAX_EASIZE == 0x800000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_MAX_EASIZE);
- LASSERTF(OBD_CONNECT_FULL20 == 0x1000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_FULL20);
- LASSERTF(OBD_CONNECT_LAYOUTLOCK == 0x2000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LAYOUTLOCK);
- LASSERTF(OBD_CONNECT_64BITHASH == 0x4000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_64BITHASH);
- LASSERTF(OBD_CONNECT_MAXBYTES == 0x8000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_MAXBYTES);
- LASSERTF(OBD_CONNECT_IMP_RECOV == 0x10000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_IMP_RECOV);
- LASSERTF(OBD_CONNECT_JOBSTATS == 0x20000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_JOBSTATS);
- LASSERTF(OBD_CONNECT_UMASK == 0x40000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_UMASK);
- LASSERTF(OBD_CONNECT_EINPROGRESS == 0x80000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_EINPROGRESS);
- LASSERTF(OBD_CONNECT_GRANT_PARAM == 0x100000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_GRANT_PARAM);
- LASSERTF(OBD_CONNECT_FLOCK_OWNER == 0x200000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_FLOCK_OWNER);
- LASSERTF(OBD_CONNECT_LVB_TYPE == 0x400000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LVB_TYPE);
- LASSERTF(OBD_CONNECT_NANOSEC_TIME == 0x800000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_NANOSEC_TIME);
- LASSERTF(OBD_CONNECT_LIGHTWEIGHT == 0x1000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LIGHTWEIGHT);
- LASSERTF(OBD_CONNECT_SHORTIO == 0x2000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_SHORTIO);
- LASSERTF(OBD_CONNECT_PINGLESS == 0x4000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_PINGLESS);
- LASSERTF(OBD_CONNECT_FLOCK_DEAD == 0x8000000000000ULL,
- "found 0x%.16llxULL\n", OBD_CONNECT_FLOCK_DEAD);
- LASSERTF(OBD_CONNECT_OPEN_BY_FID == 0x20000000000000ULL,
- "found 0x%.16llxULL\n", OBD_CONNECT_OPEN_BY_FID);
- LASSERTF(OBD_CONNECT_LFSCK == 0x40000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LFSCK);
- LASSERTF(OBD_CONNECT_UNLINK_CLOSE == 0x100000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_UNLINK_CLOSE);
- LASSERTF(OBD_CONNECT_MULTIMODRPCS == 0x200000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_MULTIMODRPCS);
- LASSERTF(OBD_CONNECT_DIR_STRIPE == 0x400000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_DIR_STRIPE);
- LASSERTF(OBD_CONNECT_SUBTREE == 0x800000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_SUBTREE);
- LASSERTF(OBD_CONNECT_LOCK_AHEAD == 0x1000000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LOCK_AHEAD);
- LASSERTF(OBD_CONNECT_OBDOPACK == 0x4000000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_OBDOPACK);
- LASSERTF(OBD_CONNECT_FLAGS2 == 0x8000000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_FLAGS2);
- LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned int)OBD_CKSUM_CRC32);
- LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned int)OBD_CKSUM_ADLER);
- LASSERTF(OBD_CKSUM_CRC32C == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned int)OBD_CKSUM_CRC32C);
-
- /* Checks for struct obdo */
- LASSERTF((int)sizeof(struct obdo) == 208, "found %lld\n",
- (long long)(int)sizeof(struct obdo));
- LASSERTF((int)offsetof(struct obdo, o_valid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_valid));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_valid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_valid));
- LASSERTF((int)offsetof(struct obdo, o_oi) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_oi));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_oi));
- LASSERTF((int)offsetof(struct obdo, o_parent_seq) == 24, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_parent_seq));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_seq) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_parent_seq));
- LASSERTF((int)offsetof(struct obdo, o_size) == 32, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_size));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_size));
- LASSERTF((int)offsetof(struct obdo, o_mtime) == 40, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_mtime));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_mtime));
- LASSERTF((int)offsetof(struct obdo, o_atime) == 48, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_atime));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_atime));
- LASSERTF((int)offsetof(struct obdo, o_ctime) == 56, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_ctime));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_ctime));
- LASSERTF((int)offsetof(struct obdo, o_blocks) == 64, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_blocks));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_blocks));
- LASSERTF((int)offsetof(struct obdo, o_grant) == 72, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_grant));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_grant) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_grant));
- LASSERTF((int)offsetof(struct obdo, o_blksize) == 80, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_blksize));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_blksize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_blksize));
- LASSERTF((int)offsetof(struct obdo, o_mode) == 84, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_mode));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_mode));
- LASSERTF((int)offsetof(struct obdo, o_uid) == 88, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_uid));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_uid));
- LASSERTF((int)offsetof(struct obdo, o_gid) == 92, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_gid));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_gid));
- LASSERTF((int)offsetof(struct obdo, o_flags) == 96, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_flags));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_flags));
- LASSERTF((int)offsetof(struct obdo, o_nlink) == 100, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_nlink));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_nlink) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_nlink));
- LASSERTF((int)offsetof(struct obdo, o_parent_oid) == 104, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_parent_oid));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_oid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_parent_oid));
- LASSERTF((int)offsetof(struct obdo, o_misc) == 108, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_misc));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_misc) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_misc));
- LASSERTF((int)offsetof(struct obdo, o_ioepoch) == 112, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_ioepoch));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_ioepoch) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_ioepoch));
- LASSERTF((int)offsetof(struct obdo, o_stripe_idx) == 120, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_stripe_idx));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_stripe_idx) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_stripe_idx));
- LASSERTF((int)offsetof(struct obdo, o_parent_ver) == 124, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_parent_ver));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_ver) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_parent_ver));
- LASSERTF((int)offsetof(struct obdo, o_handle) == 128, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_handle));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_handle));
- LASSERTF((int)offsetof(struct obdo, o_lcookie) == 136, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_lcookie));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_lcookie) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_lcookie));
- LASSERTF((int)offsetof(struct obdo, o_uid_h) == 168, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_uid_h));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_uid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_uid_h));
- LASSERTF((int)offsetof(struct obdo, o_gid_h) == 172, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_gid_h));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_gid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_gid_h));
- LASSERTF((int)offsetof(struct obdo, o_data_version) == 176, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_data_version));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_data_version) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_data_version));
- LASSERTF((int)offsetof(struct obdo, o_padding_4) == 184, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_padding_4));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_4) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_padding_4));
- LASSERTF((int)offsetof(struct obdo, o_padding_5) == 192, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_padding_5));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_5) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_padding_5));
- LASSERTF((int)offsetof(struct obdo, o_padding_6) == 200, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_padding_6));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_6) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_padding_6));
- LASSERTF(OBD_MD_FLID == (0x00000001ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLID);
- LASSERTF(OBD_MD_FLATIME == (0x00000002ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLATIME);
- LASSERTF(OBD_MD_FLMTIME == (0x00000004ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLMTIME);
- LASSERTF(OBD_MD_FLCTIME == (0x00000008ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLCTIME);
- LASSERTF(OBD_MD_FLSIZE == (0x00000010ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLSIZE);
- LASSERTF(OBD_MD_FLBLOCKS == (0x00000020ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLBLOCKS);
- LASSERTF(OBD_MD_FLBLKSZ == (0x00000040ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLBLKSZ);
- LASSERTF(OBD_MD_FLMODE == (0x00000080ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLMODE);
- LASSERTF(OBD_MD_FLTYPE == (0x00000100ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLTYPE);
- LASSERTF(OBD_MD_FLUID == (0x00000200ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLUID);
- LASSERTF(OBD_MD_FLGID == (0x00000400ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGID);
- LASSERTF(OBD_MD_FLFLAGS == (0x00000800ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLFLAGS);
- LASSERTF(OBD_MD_FLNLINK == (0x00002000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLNLINK);
- LASSERTF(OBD_MD_FLGENER == (0x00004000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGENER);
- LASSERTF(OBD_MD_FLRDEV == (0x00010000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRDEV);
- LASSERTF(OBD_MD_FLEASIZE == (0x00020000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLEASIZE);
- LASSERTF(OBD_MD_LINKNAME == (0x00040000ULL), "found 0x%.16llxULL\n",
- OBD_MD_LINKNAME);
- LASSERTF(OBD_MD_FLHANDLE == (0x00080000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLHANDLE);
- LASSERTF(OBD_MD_FLCKSUM == (0x00100000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLCKSUM);
- LASSERTF(OBD_MD_FLQOS == (0x00200000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLQOS);
- LASSERTF(OBD_MD_FLGROUP == (0x01000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGROUP);
- LASSERTF(OBD_MD_FLFID == (0x02000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLFID);
- LASSERTF(OBD_MD_FLEPOCH == (0x04000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLEPOCH);
- LASSERTF(OBD_MD_FLGRANT == (0x08000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGRANT);
- LASSERTF(OBD_MD_FLDIREA == (0x10000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLDIREA);
- LASSERTF(OBD_MD_FLUSRQUOTA == (0x20000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLUSRQUOTA);
- LASSERTF(OBD_MD_FLGRPQUOTA == (0x40000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGRPQUOTA);
- LASSERTF(OBD_MD_FLMODEASIZE == (0x80000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLMODEASIZE);
- LASSERTF(OBD_MD_MDS == (0x0000000100000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_MDS);
- LASSERTF(OBD_MD_REINT == (0x0000000200000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_REINT);
- LASSERTF(OBD_MD_MEA == (0x0000000400000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_MEA);
- LASSERTF(OBD_MD_TSTATE == (0x0000000800000000ULL),
- "found 0x%.16llxULL\n", OBD_MD_TSTATE);
- LASSERTF(OBD_MD_FLXATTR == (0x0000001000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLXATTR);
- LASSERTF(OBD_MD_FLXATTRLS == (0x0000002000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLXATTRLS);
- LASSERTF(OBD_MD_FLXATTRRM == (0x0000004000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLXATTRRM);
- LASSERTF(OBD_MD_FLACL == (0x0000008000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLACL);
- LASSERTF(OBD_MD_FLMDSCAPA == (0x0000020000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLMDSCAPA);
- LASSERTF(OBD_MD_FLOSSCAPA == (0x0000040000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLOSSCAPA);
- LASSERTF(OBD_MD_FLCKSPLIT == (0x0000080000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLCKSPLIT);
- LASSERTF(OBD_MD_FLCROSSREF == (0x0000100000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLCROSSREF);
- LASSERTF(OBD_MD_FLGETATTRLOCK == (0x0000200000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGETATTRLOCK);
- LASSERTF(OBD_MD_FLDATAVERSION == (0x0010000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLDATAVERSION);
- BUILD_BUG_ON(OBD_FL_INLINEDATA != 0x00000001);
- BUILD_BUG_ON(OBD_FL_OBDMDEXISTS != 0x00000002);
- BUILD_BUG_ON(OBD_FL_DELORPHAN != 0x00000004);
- BUILD_BUG_ON(OBD_FL_NORPC != 0x00000008);
- BUILD_BUG_ON(OBD_FL_IDONLY != 0x00000010);
- BUILD_BUG_ON(OBD_FL_RECREATE_OBJS != 0x00000020);
- BUILD_BUG_ON(OBD_FL_DEBUG_CHECK != 0x00000040);
- BUILD_BUG_ON(OBD_FL_NO_USRQUOTA != 0x00000100);
- BUILD_BUG_ON(OBD_FL_NO_GRPQUOTA != 0x00000200);
- BUILD_BUG_ON(OBD_FL_CREATE_CROW != 0x00000400);
- BUILD_BUG_ON(OBD_FL_SRVLOCK != 0x00000800);
- BUILD_BUG_ON(OBD_FL_CKSUM_CRC32 != 0x00001000);
- BUILD_BUG_ON(OBD_FL_CKSUM_ADLER != 0x00002000);
- BUILD_BUG_ON(OBD_FL_CKSUM_CRC32C != 0x00004000);
- BUILD_BUG_ON(OBD_FL_CKSUM_RSVD2 != 0x00008000);
- BUILD_BUG_ON(OBD_FL_CKSUM_RSVD3 != 0x00010000);
- BUILD_BUG_ON(OBD_FL_SHRINK_GRANT != 0x00020000);
- BUILD_BUG_ON(OBD_FL_MMAP != 0x00040000);
- BUILD_BUG_ON(OBD_FL_RECOV_RESEND != 0x00080000);
- BUILD_BUG_ON(OBD_FL_NOSPC_BLK != 0x00100000);
- BUILD_BUG_ON(OBD_FL_LOCAL_MASK != 0xf0000000);
-
- /* Checks for struct lov_ost_data_v1 */
- LASSERTF((int)sizeof(struct lov_ost_data_v1) == 24, "found %lld\n",
- (long long)(int)sizeof(struct lov_ost_data_v1));
- LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_oi) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_oi));
- LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_oi));
- LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_gen) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_gen));
- LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_gen) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_gen));
- LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_idx) == 20, "found %lld\n",
- (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_idx));
- LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_idx) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_idx));
-
- /* Checks for struct lov_mds_md_v1 */
- LASSERTF((int)sizeof(struct lov_mds_md_v1) == 32, "found %lld\n",
- (long long)(int)sizeof(struct lov_mds_md_v1));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_magic));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_magic));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_pattern) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_pattern));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_pattern) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_pattern));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_oi) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_oi));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_oi));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_stripe_size) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_stripe_size));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_size) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_size));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_stripe_count) == 28, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_stripe_count));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_count) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_count));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_layout_gen) == 30, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_layout_gen));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_layout_gen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_layout_gen));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_objects[0]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_objects[0]));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_objects[0]) == 24, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_objects[0]));
- BUILD_BUG_ON(LOV_MAGIC_V1 != (0x0BD10000 | 0x0BD0));
-
- /* Checks for struct lov_mds_md_v3 */
- LASSERTF((int)sizeof(struct lov_mds_md_v3) == 48, "found %lld\n",
- (long long)(int)sizeof(struct lov_mds_md_v3));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_magic));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_magic));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_pattern) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_pattern));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pattern) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pattern));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_oi) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_oi));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_oi));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_stripe_size) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_stripe_size));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_size) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_size));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_stripe_count) == 28, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_stripe_count));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_count) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_count));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_layout_gen) == 30, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_layout_gen));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_layout_gen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_layout_gen));
- BUILD_BUG_ON(LOV_MAXPOOLNAME != 15);
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_pool_name[16]) == 48, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_pool_name[16]));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pool_name[16]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pool_name[16]));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_objects[0]) == 48, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_objects[0]));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]) == 24, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]));
- BUILD_BUG_ON(LOV_MAGIC_V3 != (0x0BD30000 | 0x0BD0));
- LASSERTF(LOV_PATTERN_RAID0 == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned int)LOV_PATTERN_RAID0);
- LASSERTF(LOV_PATTERN_RAID1 == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned int)LOV_PATTERN_RAID1);
- LASSERTF(LOV_PATTERN_FIRST == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned int)LOV_PATTERN_FIRST);
- LASSERTF(LOV_PATTERN_CMOBD == 0x00000200UL, "found 0x%.8xUL\n",
- (unsigned int)LOV_PATTERN_CMOBD);
-
- /* Checks for struct lmv_mds_md_v1 */
- LASSERTF((int)sizeof(struct lmv_mds_md_v1) == 56, "found %lld\n",
- (long long)(int)sizeof(struct lmv_mds_md_v1));
- LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_magic));
- LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_magic));
- LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_stripe_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_stripe_count));
- LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_count));
- LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_master_mdt_index) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_master_mdt_index));
- LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_master_mdt_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_master_mdt_index));
- LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_hash_type) == 12, "found %lld\n",
- (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_hash_type));
- LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_hash_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_hash_type));
- LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_layout_version) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_layout_version));
- LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_layout_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_layout_version));
- LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_padding1) == 20, "found %lld\n",
- (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_padding1));
- LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding1));
- LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_padding2) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_padding2));
- LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding2));
- LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_padding3) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_padding3));
- LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding3));
- LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_pool_name[16]) == 56, "found %lld\n",
- (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_pool_name[16]));
- LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_pool_name[16]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_pool_name[16]));
- LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_stripe_fids[0]) == 56, "found %lld\n",
- (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_stripe_fids[0]));
- LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_fids[0]) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_fids[0]));
- BUILD_BUG_ON(LMV_MAGIC_V1 != 0x0CD20CD0);
- BUILD_BUG_ON(LMV_MAGIC_STRIPE != 0x0CD40CD0);
- BUILD_BUG_ON(LMV_HASH_TYPE_MASK != 0x0000ffff);
- BUILD_BUG_ON(LMV_HASH_FLAG_MIGRATION != 0x80000000);
- BUILD_BUG_ON(LMV_HASH_FLAG_DEAD != 0x40000000);
-
- /* Checks for struct obd_statfs */
- LASSERTF((int)sizeof(struct obd_statfs) == 144, "found %lld\n",
- (long long)(int)sizeof(struct obd_statfs));
- LASSERTF((int)offsetof(struct obd_statfs, os_type) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_type));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_type) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_type));
- LASSERTF((int)offsetof(struct obd_statfs, os_blocks) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_blocks));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_blocks));
- LASSERTF((int)offsetof(struct obd_statfs, os_bfree) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_bfree));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bfree) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_bfree));
- LASSERTF((int)offsetof(struct obd_statfs, os_bavail) == 24, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_bavail));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bavail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_bavail));
- LASSERTF((int)offsetof(struct obd_statfs, os_ffree) == 40, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_ffree));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_ffree) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_ffree));
- LASSERTF((int)offsetof(struct obd_statfs, os_fsid) == 48, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_fsid));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_fsid) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_fsid));
- LASSERTF((int)offsetof(struct obd_statfs, os_bsize) == 88, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_bsize));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bsize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_bsize));
- LASSERTF((int)offsetof(struct obd_statfs, os_namelen) == 92, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_namelen));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_namelen) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_namelen));
- LASSERTF((int)offsetof(struct obd_statfs, os_state) == 104, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_state));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_state) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_state));
- LASSERTF((int)offsetof(struct obd_statfs, os_fprecreated) == 108, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_fprecreated));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_fprecreated) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_fprecreated));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare2) == 112, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare2));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare2));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare3) == 116, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare3));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare3));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare4) == 120, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare4));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare4) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare4));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare5) == 124, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare5));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare5) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare5));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare6) == 128, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare6));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare6) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare6));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare7) == 132, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare7));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare7) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare7));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare8) == 136, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare8));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare8) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare8));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare9) == 140, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare9));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare9) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare9));
-
- /* Checks for struct obd_ioobj */
- LASSERTF((int)sizeof(struct obd_ioobj) == 24, "found %lld\n",
- (long long)(int)sizeof(struct obd_ioobj));
- LASSERTF((int)offsetof(struct obd_ioobj, ioo_oid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_ioobj, ioo_oid));
- LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_oid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_oid));
- LASSERTF((int)offsetof(struct obd_ioobj, ioo_max_brw) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_ioobj, ioo_max_brw));
- LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_max_brw) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_max_brw));
- LASSERTF((int)offsetof(struct obd_ioobj, ioo_bufcnt) == 20, "found %lld\n",
- (long long)(int)offsetof(struct obd_ioobj, ioo_bufcnt));
- LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt));
- LASSERTF(IOOBJ_MAX_BRW_BITS == 16, "found %lld\n",
- (long long)IOOBJ_MAX_BRW_BITS);
-
- /* Checks for union lquota_id */
- LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n",
- (long long)(int)sizeof(union lquota_id));
-
- /* Checks for struct obd_quotactl */
- LASSERTF((int)sizeof(struct obd_quotactl) == 112, "found %lld\n",
- (long long)(int)sizeof(struct obd_quotactl));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_cmd) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_cmd));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_cmd) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_cmd));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_type) == 4, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_type));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_type));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_id) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_id));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_id));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_stat) == 12, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_stat));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_stat) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_stat));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_dqinfo) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_dqinfo));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_dqinfo) == 24, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_dqinfo));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_dqblk) == 40, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_dqblk));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_dqblk) == 72, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_dqblk));
-
- /* Checks for struct obd_dqinfo */
- LASSERTF((int)sizeof(struct obd_dqinfo) == 24, "found %lld\n",
- (long long)(int)sizeof(struct obd_dqinfo));
- LASSERTF((int)offsetof(struct obd_dqinfo, dqi_bgrace) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqinfo, dqi_bgrace));
- LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_bgrace) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_bgrace));
- LASSERTF((int)offsetof(struct obd_dqinfo, dqi_igrace) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqinfo, dqi_igrace));
- LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_igrace) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_igrace));
- LASSERTF((int)offsetof(struct obd_dqinfo, dqi_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqinfo, dqi_flags));
- LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_flags));
- LASSERTF((int)offsetof(struct obd_dqinfo, dqi_valid) == 20, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqinfo, dqi_valid));
- LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_valid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_valid));
-
- /* Checks for struct obd_dqblk */
- LASSERTF((int)sizeof(struct obd_dqblk) == 72, "found %lld\n",
- (long long)(int)sizeof(struct obd_dqblk));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_bhardlimit) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_bhardlimit));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_bhardlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_bhardlimit));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_bsoftlimit) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_bsoftlimit));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_bsoftlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_bsoftlimit));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_curspace) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_curspace));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_curspace) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_curspace));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_ihardlimit) == 24, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_ihardlimit));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_ihardlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_ihardlimit));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_isoftlimit) == 32, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_isoftlimit));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_isoftlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_isoftlimit));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_curinodes) == 40, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_curinodes));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_curinodes) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_curinodes));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_btime) == 48, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_btime));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_btime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_btime));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_itime) == 56, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_itime));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_itime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_itime));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_valid) == 64, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_valid));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_valid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_valid));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_padding) == 68, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_padding));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_padding));
- LASSERTF(Q_QUOTACHECK == 0x800100, "found 0x%.8x\n",
- Q_QUOTACHECK);
- LASSERTF(Q_INITQUOTA == 0x800101, "found 0x%.8x\n",
- Q_INITQUOTA);
- LASSERTF(Q_GETOINFO == 0x800102, "found 0x%.8x\n",
- Q_GETOINFO);
- LASSERTF(Q_GETOQUOTA == 0x800103, "found 0x%.8x\n",
- Q_GETOQUOTA);
- LASSERTF(Q_FINVALIDATE == 0x800104, "found 0x%.8x\n",
- Q_FINVALIDATE);
-
- /* Checks for struct niobuf_remote */
- LASSERTF((int)sizeof(struct niobuf_remote) == 16, "found %lld\n",
- (long long)(int)sizeof(struct niobuf_remote));
- LASSERTF((int)offsetof(struct niobuf_remote, rnb_offset) == 0, "found %lld\n",
- (long long)(int)offsetof(struct niobuf_remote, rnb_offset));
- LASSERTF((int)sizeof(((struct niobuf_remote *)0)->rnb_offset) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct niobuf_remote *)0)->rnb_offset));
- LASSERTF((int)offsetof(struct niobuf_remote, rnb_len) == 8, "found %lld\n",
- (long long)(int)offsetof(struct niobuf_remote, rnb_len));
- LASSERTF((int)sizeof(((struct niobuf_remote *)0)->rnb_len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct niobuf_remote *)0)->rnb_len));
- LASSERTF((int)offsetof(struct niobuf_remote, rnb_flags) == 12, "found %lld\n",
- (long long)(int)offsetof(struct niobuf_remote, rnb_flags));
- LASSERTF((int)sizeof(((struct niobuf_remote *)0)->rnb_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct niobuf_remote *)0)->rnb_flags));
- LASSERTF(OBD_BRW_READ == 0x01, "found 0x%.8x\n",
- OBD_BRW_READ);
- LASSERTF(OBD_BRW_WRITE == 0x02, "found 0x%.8x\n",
- OBD_BRW_WRITE);
- LASSERTF(OBD_BRW_SYNC == 0x08, "found 0x%.8x\n",
- OBD_BRW_SYNC);
- LASSERTF(OBD_BRW_CHECK == 0x10, "found 0x%.8x\n",
- OBD_BRW_CHECK);
- LASSERTF(OBD_BRW_FROM_GRANT == 0x20, "found 0x%.8x\n",
- OBD_BRW_FROM_GRANT);
- LASSERTF(OBD_BRW_GRANTED == 0x40, "found 0x%.8x\n",
- OBD_BRW_GRANTED);
- LASSERTF(OBD_BRW_NOCACHE == 0x80, "found 0x%.8x\n",
- OBD_BRW_NOCACHE);
- LASSERTF(OBD_BRW_NOQUOTA == 0x100, "found 0x%.8x\n",
- OBD_BRW_NOQUOTA);
- LASSERTF(OBD_BRW_SRVLOCK == 0x200, "found 0x%.8x\n",
- OBD_BRW_SRVLOCK);
- LASSERTF(OBD_BRW_ASYNC == 0x400, "found 0x%.8x\n",
- OBD_BRW_ASYNC);
- LASSERTF(OBD_BRW_MEMALLOC == 0x800, "found 0x%.8x\n",
- OBD_BRW_MEMALLOC);
- LASSERTF(OBD_BRW_OVER_USRQUOTA == 0x1000, "found 0x%.8x\n",
- OBD_BRW_OVER_USRQUOTA);
- LASSERTF(OBD_BRW_OVER_GRPQUOTA == 0x2000, "found 0x%.8x\n",
- OBD_BRW_OVER_GRPQUOTA);
- LASSERTF(OBD_BRW_SOFT_SYNC == 0x4000, "found 0x%.8x\n",
- OBD_BRW_SOFT_SYNC);
-
- /* Checks for struct ost_body */
- LASSERTF((int)sizeof(struct ost_body) == 208, "found %lld\n",
- (long long)(int)sizeof(struct ost_body));
- LASSERTF((int)offsetof(struct ost_body, oa) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ost_body, oa));
- LASSERTF((int)sizeof(((struct ost_body *)0)->oa) == 208, "found %lld\n",
- (long long)(int)sizeof(((struct ost_body *)0)->oa));
-
- /* Checks for struct ll_fid */
- LASSERTF((int)sizeof(struct ll_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(struct ll_fid));
- LASSERTF((int)offsetof(struct ll_fid, id) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ll_fid, id));
- LASSERTF((int)sizeof(((struct ll_fid *)0)->id) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fid *)0)->id));
- LASSERTF((int)offsetof(struct ll_fid, generation) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fid, generation));
- LASSERTF((int)sizeof(((struct ll_fid *)0)->generation) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fid *)0)->generation));
- LASSERTF((int)offsetof(struct ll_fid, f_type) == 12, "found %lld\n",
- (long long)(int)offsetof(struct ll_fid, f_type));
- LASSERTF((int)sizeof(((struct ll_fid *)0)->f_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fid *)0)->f_type));
-
- /* Checks for struct mdt_body */
- LASSERTF((int)sizeof(struct mdt_body) == 216, "found %lld\n",
- (long long)(int)sizeof(struct mdt_body));
- LASSERTF((int)offsetof(struct mdt_body, mbo_fid1) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_fid1));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fid1));
- LASSERTF((int)offsetof(struct mdt_body, mbo_fid2) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_fid2));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fid2));
- LASSERTF((int)offsetof(struct mdt_body, mbo_handle) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_handle));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_handle));
- LASSERTF((int)offsetof(struct mdt_body, mbo_valid) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_valid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_valid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_valid));
- LASSERTF((int)offsetof(struct mdt_body, mbo_size) == 48, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_size));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_size));
- LASSERTF((int)offsetof(struct mdt_body, mbo_mtime) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_mtime));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_mtime));
- LASSERTF((int)offsetof(struct mdt_body, mbo_atime) == 64, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_atime));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_atime));
- LASSERTF((int)offsetof(struct mdt_body, mbo_ctime) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_ctime));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_ctime));
- LASSERTF((int)offsetof(struct mdt_body, mbo_blocks) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_blocks));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_blocks));
- LASSERTF((int)offsetof(struct mdt_body, mbo_t_state) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_t_state));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_t_state) == 8,
- "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_t_state));
- LASSERTF((int)offsetof(struct mdt_body, mbo_fsuid) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_fsuid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fsuid));
- LASSERTF((int)offsetof(struct mdt_body, mbo_fsgid) == 108, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_fsgid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fsgid));
- LASSERTF((int)offsetof(struct mdt_body, mbo_capability) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_capability));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_capability) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_capability));
- LASSERTF((int)offsetof(struct mdt_body, mbo_mode) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_mode));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_mode));
- LASSERTF((int)offsetof(struct mdt_body, mbo_uid) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_uid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_uid));
- LASSERTF((int)offsetof(struct mdt_body, mbo_gid) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_gid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_gid));
- LASSERTF((int)offsetof(struct mdt_body, mbo_flags) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_flags));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_flags));
- LASSERTF((int)offsetof(struct mdt_body, mbo_rdev) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_rdev));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_rdev) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_rdev));
- LASSERTF((int)offsetof(struct mdt_body, mbo_nlink) == 136, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_nlink));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_nlink) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_nlink));
- LASSERTF((int)offsetof(struct mdt_body, mbo_unused2) == 140, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_unused2));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_unused2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_unused2));
- LASSERTF((int)offsetof(struct mdt_body, mbo_suppgid) == 144, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_suppgid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_suppgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_suppgid));
- LASSERTF((int)offsetof(struct mdt_body, mbo_eadatasize) == 148, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_eadatasize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_eadatasize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_eadatasize));
- LASSERTF((int)offsetof(struct mdt_body, mbo_aclsize) == 152, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_aclsize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_aclsize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_aclsize));
- LASSERTF((int)offsetof(struct mdt_body, mbo_max_mdsize) == 156, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_max_mdsize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize));
- LASSERTF((int)offsetof(struct mdt_body, mbo_unused3) == 160, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_unused3));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_unused3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_unused3));
- LASSERTF((int)offsetof(struct mdt_body, mbo_uid_h) == 164, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_uid_h));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_uid_h));
- LASSERTF((int)offsetof(struct mdt_body, mbo_gid_h) == 168, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_gid_h));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_gid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_gid_h));
- LASSERTF((int)offsetof(struct mdt_body, mbo_padding_5) == 172, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_padding_5));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_5) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_5));
- LASSERTF((int)offsetof(struct mdt_body, mbo_padding_6) == 176, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_padding_6));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_6) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_6));
- LASSERTF((int)offsetof(struct mdt_body, mbo_padding_7) == 184, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_padding_7));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_7) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_7));
- LASSERTF((int)offsetof(struct mdt_body, mbo_padding_8) == 192, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_padding_8));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_8) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_8));
- LASSERTF((int)offsetof(struct mdt_body, mbo_padding_9) == 200, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_padding_9));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_9) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_9));
- LASSERTF((int)offsetof(struct mdt_body, mbo_padding_10) == 208, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_padding_10));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_10) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_10));
- LASSERTF(MDS_FMODE_CLOSED == 000000000000UL, "found 0%.11oUL\n",
- MDS_FMODE_CLOSED);
- LASSERTF(MDS_FMODE_EXEC == 000000000004UL, "found 0%.11oUL\n",
- MDS_FMODE_EXEC);
- LASSERTF(MDS_OPEN_CREATED == 000000000010UL, "found 0%.11oUL\n",
- MDS_OPEN_CREATED);
- LASSERTF(MDS_OPEN_CROSS == 000000000020UL, "found 0%.11oUL\n",
- MDS_OPEN_CROSS);
- LASSERTF(MDS_OPEN_CREAT == 000000000100UL, "found 0%.11oUL\n",
- MDS_OPEN_CREAT);
- LASSERTF(MDS_OPEN_EXCL == 000000000200UL, "found 0%.11oUL\n",
- MDS_OPEN_EXCL);
- LASSERTF(MDS_OPEN_TRUNC == 000000001000UL, "found 0%.11oUL\n",
- MDS_OPEN_TRUNC);
- LASSERTF(MDS_OPEN_APPEND == 000000002000UL, "found 0%.11oUL\n",
- MDS_OPEN_APPEND);
- LASSERTF(MDS_OPEN_SYNC == 000000010000UL, "found 0%.11oUL\n",
- MDS_OPEN_SYNC);
- LASSERTF(MDS_OPEN_DIRECTORY == 000000200000UL, "found 0%.11oUL\n",
- MDS_OPEN_DIRECTORY);
- LASSERTF(MDS_OPEN_BY_FID == 000040000000UL, "found 0%.11oUL\n",
- MDS_OPEN_BY_FID);
- LASSERTF(MDS_OPEN_DELAY_CREATE == 000100000000UL, "found 0%.11oUL\n",
- MDS_OPEN_DELAY_CREATE);
- LASSERTF(MDS_OPEN_OWNEROVERRIDE == 000200000000UL, "found 0%.11oUL\n",
- MDS_OPEN_OWNEROVERRIDE);
- LASSERTF(MDS_OPEN_JOIN_FILE == 000400000000UL, "found 0%.11oUL\n",
- MDS_OPEN_JOIN_FILE);
- LASSERTF(MDS_OPEN_LOCK == 004000000000UL, "found 0%.11oUL\n",
- MDS_OPEN_LOCK);
- LASSERTF(MDS_OPEN_HAS_EA == 010000000000UL, "found 0%.11oUL\n",
- MDS_OPEN_HAS_EA);
- LASSERTF(MDS_OPEN_HAS_OBJS == 020000000000UL, "found 0%.11oUL\n",
- MDS_OPEN_HAS_OBJS);
- LASSERTF(MDS_OPEN_NORESTORE == 00000000000100000000000ULL, "found 0%.22lloULL\n",
- (long long)MDS_OPEN_NORESTORE);
- LASSERTF(MDS_OPEN_NEWSTRIPE == 00000000000200000000000ULL, "found 0%.22lloULL\n",
- (long long)MDS_OPEN_NEWSTRIPE);
- LASSERTF(MDS_OPEN_VOLATILE == 00000000000400000000000ULL, "found 0%.22lloULL\n",
- (long long)MDS_OPEN_VOLATILE);
- LASSERTF(LUSTRE_SYNC_FL == 0x00000008, "found 0x%.8x\n",
- LUSTRE_SYNC_FL);
- LASSERTF(LUSTRE_IMMUTABLE_FL == 0x00000010, "found 0x%.8x\n",
- LUSTRE_IMMUTABLE_FL);
- LASSERTF(LUSTRE_APPEND_FL == 0x00000020, "found 0x%.8x\n",
- LUSTRE_APPEND_FL);
- LASSERTF(LUSTRE_NODUMP_FL == 0x00000040, "found 0x%.8x\n",
- LUSTRE_NODUMP_FL);
- LASSERTF(LUSTRE_NOATIME_FL == 0x00000080, "found 0x%.8x\n",
- LUSTRE_NOATIME_FL);
- LASSERTF(LUSTRE_INDEX_FL == 0x00001000, "found 0x%.8x\n",
- LUSTRE_INDEX_FL);
- LASSERTF(LUSTRE_DIRSYNC_FL == 0x00010000, "found 0x%.8x\n",
- LUSTRE_DIRSYNC_FL);
- LASSERTF(LUSTRE_TOPDIR_FL == 0x00020000, "found 0x%.8x\n",
- LUSTRE_TOPDIR_FL);
- LASSERTF(LUSTRE_DIRECTIO_FL == 0x00100000, "found 0x%.8x\n",
- LUSTRE_DIRECTIO_FL);
- LASSERTF(LUSTRE_INLINE_DATA_FL == 0x10000000, "found 0x%.8x\n",
- LUSTRE_INLINE_DATA_FL);
- LASSERTF(MDS_INODELOCK_LOOKUP == 0x000001, "found 0x%.8x\n",
- MDS_INODELOCK_LOOKUP);
- LASSERTF(MDS_INODELOCK_UPDATE == 0x000002, "found 0x%.8x\n",
- MDS_INODELOCK_UPDATE);
- LASSERTF(MDS_INODELOCK_OPEN == 0x000004, "found 0x%.8x\n",
- MDS_INODELOCK_OPEN);
- LASSERTF(MDS_INODELOCK_LAYOUT == 0x000008, "found 0x%.8x\n",
- MDS_INODELOCK_LAYOUT);
-
- /* Checks for struct mdt_ioepoch */
- LASSERTF((int)sizeof(struct mdt_ioepoch) == 24, "found %lld\n",
- (long long)(int)sizeof(struct mdt_ioepoch));
- LASSERTF((int)offsetof(struct mdt_ioepoch, mio_handle) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, mio_handle));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_handle));
- LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused1) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, mio_unused1));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1));
- LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused2) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, mio_unused2));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2));
- LASSERTF((int)offsetof(struct mdt_ioepoch, mio_padding) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, mio_padding));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_padding));
-
- /* Checks for struct mdt_rec_setattr */
- LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_setattr));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_cap));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_suppgid) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_suppgid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_suppgid_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_suppgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_1) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_1));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_1_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1_h));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fid) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_fid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_valid) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_valid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_valid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_valid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_uid) == 64, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_uid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_uid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_gid) == 68, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_gid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_gid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_size) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_size));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_size));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_blocks) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_blocks));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_blocks));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_mtime) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_mtime));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_mtime));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_atime) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_atime));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_atime));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_ctime) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_ctime));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_ctime));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_attr_flags) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_attr_flags));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_attr_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_attr_flags));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_mode) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_mode));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_mode));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_bias) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_bias));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_3) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_3));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_3));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_4) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_4) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_4));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_5) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_5));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_5) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_5));
-
- /* Checks for struct mdt_rec_create */
- LASSERTF((int)sizeof(struct mdt_rec_create) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_create));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_cap));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fid1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fid1));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fid1));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fid2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fid2));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fid2));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_old_handle) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_old_handle));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_old_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_old_handle));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_time) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_time));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_time));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_rdev) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_rdev));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_rdev) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_rdev));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_ioepoch) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_ioepoch));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_ioepoch) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_ioepoch));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_padding_1) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_padding_1));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_padding_1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_padding_1));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_mode) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_mode));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_mode));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_bias) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_bias));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_flags_l) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_flags_l));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_flags_l) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_flags_l));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_flags_h) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_flags_h));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_flags_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_flags_h));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_umask) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_umask));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_umask) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_umask));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_padding_4) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_padding_4) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_padding_4));
-
- /* Checks for struct mdt_rec_link */
- LASSERTF((int)sizeof(struct mdt_rec_link) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_link));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_cap));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fid1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fid1));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fid1));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fid2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fid2));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fid2));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_time) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_time));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_time));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_1) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_1));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_1));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_2) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_2));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_2));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_3) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_3));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_3));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_4) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_4) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_4));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_bias) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_bias));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_5) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_5));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_5) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_5));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_6) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_6));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_6) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_6));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_7) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_7));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_7) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_7));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_8) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_8));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_8) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_8));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_9) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_9));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_9) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_9));
-
- /* Checks for struct mdt_rec_unlink */
- LASSERTF((int)sizeof(struct mdt_rec_unlink) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_unlink));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_cap));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fid1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fid1));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid1));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fid2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fid2));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid2));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_time) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_time));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_time));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_2) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_2));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_2));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_3) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_3));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_3));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_4) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_4) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_4));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_5) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_5));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_5) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_5));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_bias) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_bias));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_mode) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_mode));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_mode));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_6) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_6));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_6) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_6));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_7) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_7));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_7) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_7));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_8) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_8));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_8) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_8));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_9) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_9));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_9) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_9));
-
- /* Checks for struct mdt_rec_rename */
- LASSERTF((int)sizeof(struct mdt_rec_rename) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_rename));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_cap));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fid1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fid1));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fid1));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fid2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fid2));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fid2));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_time) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_time));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_time));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_1) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_1));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_1));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_2) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_2));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_2));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_3) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_3));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_3));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_4) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_4) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_4));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_bias) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_bias));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_mode) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_mode));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_mode));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_5) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_5));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_5) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_5));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_6) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_6));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_6) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_6));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_7) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_7));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_7) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_7));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_8) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_8));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_8) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_8));
-
- /* Checks for struct mdt_rec_setxattr */
- LASSERTF((int)sizeof(struct mdt_rec_setxattr) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_setxattr));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_cap));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fid) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fid));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fid));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_1) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_1));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_1));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_2) == 64, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_2));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_2));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_3) == 68, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_3));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_3));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_valid) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_valid));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_valid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_valid));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_time) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_time));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_time));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_5) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_5));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_5) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_5));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_6) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_6));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_6) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_6));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_7) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_7));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_7) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_7));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_size) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_size));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_size) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_size));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_flags) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_flags));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_flags));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_8) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_8));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_8) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_8));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_9) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_9));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_9) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_9));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_10) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_10));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_10) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_10));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_11) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_11));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_11) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_11));
-
- /* Checks for struct mdt_rec_reint */
- LASSERTF((int)sizeof(struct mdt_rec_reint) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_reint));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_cap));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fid1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fid1));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fid1));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fid2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fid2));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fid2));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_mtime) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_mtime));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_mtime));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_atime) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_atime));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_atime));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_ctime) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_ctime));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_ctime));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_size) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_size));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_size));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_blocks) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_blocks));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_blocks));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_bias) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_bias));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_mode) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_mode));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_mode));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_flags) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_flags));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_flags));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_flags_h) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_flags_h));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_flags_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_flags_h));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_umask) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_umask));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_umask) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_umask));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_padding_4) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_padding_4) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_padding_4));
-
- /* Checks for struct lmv_desc */
- LASSERTF((int)sizeof(struct lmv_desc) == 88, "found %lld\n",
- (long long)(int)sizeof(struct lmv_desc));
- LASSERTF((int)offsetof(struct lmv_desc, ld_tgt_count) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_tgt_count));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_tgt_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_tgt_count));
- LASSERTF((int)offsetof(struct lmv_desc, ld_active_tgt_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_active_tgt_count));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_active_tgt_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_active_tgt_count));
- LASSERTF((int)offsetof(struct lmv_desc, ld_default_stripe_count) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_default_stripe_count));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_default_stripe_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_default_stripe_count));
- LASSERTF((int)offsetof(struct lmv_desc, ld_pattern) == 12, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_pattern));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_pattern) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_pattern));
- LASSERTF((int)offsetof(struct lmv_desc, ld_default_hash_size) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_default_hash_size));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_default_hash_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_default_hash_size));
- LASSERTF((int)offsetof(struct lmv_desc, ld_padding_1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_padding_1));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_1));
- LASSERTF((int)offsetof(struct lmv_desc, ld_padding_2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_padding_2));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_2));
- LASSERTF((int)offsetof(struct lmv_desc, ld_qos_maxage) == 36, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_qos_maxage));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_qos_maxage) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_qos_maxage));
- LASSERTF((int)offsetof(struct lmv_desc, ld_padding_3) == 40, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_padding_3));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_3));
- LASSERTF((int)offsetof(struct lmv_desc, ld_padding_4) == 44, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_padding_4));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_4) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_4));
- LASSERTF((int)offsetof(struct lmv_desc, ld_uuid) == 48, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_uuid));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_uuid) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_uuid));
-
- /* Checks for struct lov_desc */
- LASSERTF((int)sizeof(struct lov_desc) == 88, "found %lld\n",
- (long long)(int)sizeof(struct lov_desc));
- LASSERTF((int)offsetof(struct lov_desc, ld_tgt_count) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_tgt_count));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_tgt_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_tgt_count));
- LASSERTF((int)offsetof(struct lov_desc, ld_active_tgt_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_active_tgt_count));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_active_tgt_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_active_tgt_count));
- LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_count) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_default_stripe_count));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_count));
- LASSERTF((int)offsetof(struct lov_desc, ld_pattern) == 12, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_pattern));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_pattern) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_pattern));
- LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_size) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_default_stripe_size));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_size));
- LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_offset) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_default_stripe_offset));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_offset) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_offset));
- LASSERTF((int)offsetof(struct lov_desc, ld_padding_0) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_padding_0));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_0) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_0));
- LASSERTF((int)offsetof(struct lov_desc, ld_qos_maxage) == 36, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_qos_maxage));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_qos_maxage) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_qos_maxage));
- LASSERTF((int)offsetof(struct lov_desc, ld_padding_1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_padding_1));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_1));
- LASSERTF((int)offsetof(struct lov_desc, ld_padding_2) == 44, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_padding_2));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_2));
- LASSERTF((int)offsetof(struct lov_desc, ld_uuid) == 48, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_uuid));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_uuid) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_uuid));
- BUILD_BUG_ON(LOV_DESC_MAGIC != 0xB0CCDE5C);
-
- /* Checks for struct ldlm_res_id */
- LASSERTF((int)sizeof(struct ldlm_res_id) == 32, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_res_id));
- BUILD_BUG_ON(RES_NAME_SIZE != 4);
- LASSERTF((int)offsetof(struct ldlm_res_id, name[4]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_res_id, name[4]));
- LASSERTF((int)sizeof(((struct ldlm_res_id *)0)->name[4]) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_res_id *)0)->name[4]));
-
- /* Checks for struct ldlm_extent */
- LASSERTF((int)sizeof(struct ldlm_extent) == 24, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_extent));
- LASSERTF((int)offsetof(struct ldlm_extent, start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_extent, start));
- LASSERTF((int)sizeof(((struct ldlm_extent *)0)->start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_extent *)0)->start));
- LASSERTF((int)offsetof(struct ldlm_extent, end) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_extent, end));
- LASSERTF((int)sizeof(((struct ldlm_extent *)0)->end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_extent *)0)->end));
- LASSERTF((int)offsetof(struct ldlm_extent, gid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_extent, gid));
- LASSERTF((int)sizeof(((struct ldlm_extent *)0)->gid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_extent *)0)->gid));
-
- /* Checks for struct ldlm_inodebits */
- LASSERTF((int)sizeof(struct ldlm_inodebits) == 8, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_inodebits));
- LASSERTF((int)offsetof(struct ldlm_inodebits, bits) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_inodebits, bits));
- LASSERTF((int)sizeof(((struct ldlm_inodebits *)0)->bits) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_inodebits *)0)->bits));
-
- /* Checks for struct ldlm_flock_wire */
- LASSERTF((int)sizeof(struct ldlm_flock_wire) == 32, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_flock_wire));
- LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_flock_wire, lfw_start));
- LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_start));
- LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_end) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_flock_wire, lfw_end));
- LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_end));
- LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_owner) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_flock_wire, lfw_owner));
- LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_owner) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_owner));
- LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_padding) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_flock_wire, lfw_padding));
- LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_padding));
- LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_pid) == 28, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_flock_wire, lfw_pid));
- LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_pid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_pid));
-
- /* Checks for struct ldlm_intent */
- LASSERTF((int)sizeof(struct ldlm_intent) == 8, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_intent));
- LASSERTF((int)offsetof(struct ldlm_intent, opc) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_intent, opc));
- LASSERTF((int)sizeof(((struct ldlm_intent *)0)->opc) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_intent *)0)->opc));
-
- /* Checks for struct ldlm_resource_desc */
- LASSERTF((int)sizeof(struct ldlm_resource_desc) == 40, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_resource_desc));
- LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_type) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_resource_desc, lr_type));
- LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_type));
- LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_padding) == 4, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_resource_desc, lr_padding));
- LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding));
- LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_name) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_resource_desc, lr_name));
- LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_name) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_name));
-
- /* Checks for struct ldlm_lock_desc */
- LASSERTF((int)sizeof(struct ldlm_lock_desc) == 80, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_lock_desc));
- LASSERTF((int)offsetof(struct ldlm_lock_desc, l_resource) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_lock_desc, l_resource));
- LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_resource) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_resource));
- LASSERTF((int)offsetof(struct ldlm_lock_desc, l_req_mode) == 40, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_lock_desc, l_req_mode));
- LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_req_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_req_mode));
- LASSERTF((int)offsetof(struct ldlm_lock_desc, l_granted_mode) == 44, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_lock_desc, l_granted_mode));
- LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_granted_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_granted_mode));
- LASSERTF((int)offsetof(struct ldlm_lock_desc, l_policy_data) == 48, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_lock_desc, l_policy_data));
- LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_policy_data) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_policy_data));
-
- /* Checks for struct ldlm_request */
- LASSERTF((int)sizeof(struct ldlm_request) == 104, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_request));
- LASSERTF((int)offsetof(struct ldlm_request, lock_flags) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_request, lock_flags));
- LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_request *)0)->lock_flags));
- LASSERTF((int)offsetof(struct ldlm_request, lock_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_request, lock_count));
- LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_request *)0)->lock_count));
- LASSERTF((int)offsetof(struct ldlm_request, lock_desc) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_request, lock_desc));
- LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_desc) == 80, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_request *)0)->lock_desc));
- LASSERTF((int)offsetof(struct ldlm_request, lock_handle) == 88, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_request, lock_handle));
- LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_handle) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_request *)0)->lock_handle));
-
- /* Checks for struct ldlm_reply */
- LASSERTF((int)sizeof(struct ldlm_reply) == 112, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_reply));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_flags) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_flags));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_flags));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_padding) == 4, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_padding));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_padding));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_desc) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_desc));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_desc) == 80, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_desc));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_handle) == 88, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_handle));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_handle));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_policy_res1) == 96, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_policy_res1));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_policy_res1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_policy_res1));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_policy_res2) == 104, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_policy_res2));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_policy_res2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_policy_res2));
-
- /* Checks for struct ost_lvb_v1 */
- LASSERTF((int)sizeof(struct ost_lvb_v1) == 40, "found %lld\n",
- (long long)(int)sizeof(struct ost_lvb_v1));
- LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_size) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb_v1, lvb_size));
- LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_size));
- LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_mtime) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb_v1, lvb_mtime));
- LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_mtime));
- LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_atime) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb_v1, lvb_atime));
- LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_atime));
- LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_ctime) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb_v1, lvb_ctime));
- LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_ctime));
- LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_blocks) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb_v1, lvb_blocks));
- LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_blocks));
-
- /* Checks for struct ost_lvb */
- LASSERTF((int)sizeof(struct ost_lvb) == 56, "found %lld\n",
- (long long)(int)sizeof(struct ost_lvb));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_size) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_size));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_size));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_mtime) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_mtime));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_mtime));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_atime) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_atime));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_atime));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_ctime) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_ctime));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_ctime));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_blocks) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_blocks));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_blocks));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_mtime_ns) == 40, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_mtime_ns));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_mtime_ns) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_mtime_ns));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_atime_ns) == 44, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_atime_ns));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_atime_ns) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_atime_ns));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_ctime_ns) == 48, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_ctime_ns));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_ctime_ns) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_ctime_ns));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_padding) == 52, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_padding));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_padding));
-
- /* Checks for struct lquota_lvb */
- LASSERTF((int)sizeof(struct lquota_lvb) == 40, "found %lld\n",
- (long long)(int)sizeof(struct lquota_lvb));
- LASSERTF((int)offsetof(struct lquota_lvb, lvb_flags) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lquota_lvb, lvb_flags));
- LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_flags) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_flags));
- LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_may_rel) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lquota_lvb, lvb_id_may_rel));
- LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_may_rel) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_may_rel));
- LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_rel) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lquota_lvb, lvb_id_rel));
- LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_rel) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_rel));
- LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_qunit) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lquota_lvb, lvb_id_qunit));
- LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_qunit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_qunit));
- LASSERTF((int)offsetof(struct lquota_lvb, lvb_pad1) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lquota_lvb, lvb_pad1));
- LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_pad1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_pad1));
- LASSERTF(LQUOTA_FL_EDQUOT == 1, "found %lld\n",
- (long long)LQUOTA_FL_EDQUOT);
-
- /* Checks for struct ldlm_gl_lquota_desc */
- LASSERTF((int)sizeof(struct ldlm_gl_lquota_desc) == 64, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_gl_lquota_desc));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_id) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_id));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_id) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_id));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_flags));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_flags) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_flags));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_ver) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_ver));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_ver) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_ver));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_hardlimit) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_hardlimit));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_hardlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_hardlimit));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_softlimit) == 40, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_softlimit));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_softlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_softlimit));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_time) == 48, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_time));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_time));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_pad2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_pad2));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_pad2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_pad2));
-
- /* Checks for struct mgs_send_param */
- LASSERTF((int)sizeof(struct mgs_send_param) == 1024, "found %lld\n",
- (long long)(int)sizeof(struct mgs_send_param));
- BUILD_BUG_ON(MGS_PARAM_MAXLEN != 1024);
- LASSERTF((int)offsetof(struct mgs_send_param, mgs_param[1024]) == 1024, "found %lld\n",
- (long long)(int)offsetof(struct mgs_send_param, mgs_param[1024]));
- LASSERTF((int)sizeof(((struct mgs_send_param *)0)->mgs_param[1024]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_send_param *)0)->mgs_param[1024]));
-
- /* Checks for struct cfg_marker */
- LASSERTF((int)sizeof(struct cfg_marker) == 160, "found %lld\n",
- (long long)(int)sizeof(struct cfg_marker));
- LASSERTF((int)offsetof(struct cfg_marker, cm_step) == 0, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_step));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_step) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_step));
- LASSERTF((int)offsetof(struct cfg_marker, cm_flags) == 4, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_flags));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_flags));
- LASSERTF((int)offsetof(struct cfg_marker, cm_vers) == 8, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_vers));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_vers) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_vers));
- LASSERTF((int)offsetof(struct cfg_marker, cm_padding) == 12, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_padding));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_padding));
- LASSERTF((int)offsetof(struct cfg_marker, cm_createtime) == 16, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_createtime));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_createtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_createtime));
- LASSERTF((int)offsetof(struct cfg_marker, cm_canceltime) == 24, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_canceltime));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_canceltime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_canceltime));
- LASSERTF((int)offsetof(struct cfg_marker, cm_tgtname) == 32, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_tgtname));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_tgtname) == 64, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_tgtname));
- LASSERTF((int)offsetof(struct cfg_marker, cm_comment) == 96, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_comment));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_comment) == 64, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_comment));
-
- /* Checks for struct llog_logid */
- LASSERTF((int)sizeof(struct llog_logid) == 20, "found %lld\n",
- (long long)(int)sizeof(struct llog_logid));
- LASSERTF((int)offsetof(struct llog_logid, lgl_oi) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid, lgl_oi));
- LASSERTF((int)sizeof(((struct llog_logid *)0)->lgl_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid *)0)->lgl_oi));
- LASSERTF((int)offsetof(struct llog_logid, lgl_ogen) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid, lgl_ogen));
- LASSERTF((int)sizeof(((struct llog_logid *)0)->lgl_ogen) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid *)0)->lgl_ogen));
- BUILD_BUG_ON(OST_SZ_REC != 274730752);
- BUILD_BUG_ON(MDS_UNLINK_REC != 274801668);
- BUILD_BUG_ON(MDS_UNLINK64_REC != 275325956);
- BUILD_BUG_ON(MDS_SETATTR64_REC != 275325953);
- BUILD_BUG_ON(OBD_CFG_REC != 274857984);
- BUILD_BUG_ON(LLOG_GEN_REC != 274989056);
- BUILD_BUG_ON(CHANGELOG_REC != 275120128);
- BUILD_BUG_ON(CHANGELOG_USER_REC != 275185664);
- BUILD_BUG_ON(LLOG_HDR_MAGIC != 275010873);
- BUILD_BUG_ON(LLOG_LOGID_MAGIC != 275010875);
-
- /* Checks for struct llog_catid */
- LASSERTF((int)sizeof(struct llog_catid) == 32, "found %lld\n",
- (long long)(int)sizeof(struct llog_catid));
- LASSERTF((int)offsetof(struct llog_catid, lci_logid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_catid, lci_logid));
- LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_logid) == 20, "found %lld\n",
- (long long)(int)sizeof(((struct llog_catid *)0)->lci_logid));
- LASSERTF((int)offsetof(struct llog_catid, lci_padding1) == 20, "found %lld\n",
- (long long)(int)offsetof(struct llog_catid, lci_padding1));
- LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding1));
- LASSERTF((int)offsetof(struct llog_catid, lci_padding2) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llog_catid, lci_padding2));
- LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding2));
- LASSERTF((int)offsetof(struct llog_catid, lci_padding3) == 28, "found %lld\n",
- (long long)(int)offsetof(struct llog_catid, lci_padding3));
- LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding3));
-
- /* Checks for struct llog_rec_hdr */
- LASSERTF((int)sizeof(struct llog_rec_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(struct llog_rec_hdr));
- LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_len) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_hdr, lrh_len));
- LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_len));
- LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_index) == 4, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_hdr, lrh_index));
- LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_index));
- LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_type) == 8, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_hdr, lrh_type));
- LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_type));
- LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_id) == 12, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_hdr, lrh_id));
- LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_id));
-
- /* Checks for struct llog_rec_tail */
- LASSERTF((int)sizeof(struct llog_rec_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(struct llog_rec_tail));
- LASSERTF((int)offsetof(struct llog_rec_tail, lrt_len) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_tail, lrt_len));
- LASSERTF((int)sizeof(((struct llog_rec_tail *)0)->lrt_len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_tail *)0)->lrt_len));
- LASSERTF((int)offsetof(struct llog_rec_tail, lrt_index) == 4, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_tail, lrt_index));
- LASSERTF((int)sizeof(((struct llog_rec_tail *)0)->lrt_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_tail *)0)->lrt_index));
-
- /* Checks for struct llog_logid_rec */
- LASSERTF((int)sizeof(struct llog_logid_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct llog_logid_rec));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_hdr));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_hdr));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_id) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_id));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_id) == 20, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_id));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding1) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_padding1));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding1));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding2) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_padding2));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding2));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding3) == 48, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_padding3));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding3));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_tail) == 56, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_tail));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_tail));
-
- /* Checks for struct llog_unlink_rec */
- LASSERTF((int)sizeof(struct llog_unlink_rec) == 40, "found %lld\n",
- (long long)(int)sizeof(struct llog_unlink_rec));
- LASSERTF((int)offsetof(struct llog_unlink_rec, lur_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink_rec, lur_hdr));
- LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_hdr));
- LASSERTF((int)offsetof(struct llog_unlink_rec, lur_oid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink_rec, lur_oid));
- LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_oid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_oid));
- LASSERTF((int)offsetof(struct llog_unlink_rec, lur_oseq) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink_rec, lur_oseq));
- LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_oseq) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_oseq));
- LASSERTF((int)offsetof(struct llog_unlink_rec, lur_count) == 28, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink_rec, lur_count));
- LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_count));
- LASSERTF((int)offsetof(struct llog_unlink_rec, lur_tail) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink_rec, lur_tail));
- LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_tail));
- /* Checks for struct llog_unlink64_rec */
- LASSERTF((int)sizeof(struct llog_unlink64_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct llog_unlink64_rec));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_hdr));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_hdr));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_fid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_fid));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_fid));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_count) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_count));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_count));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_tail) == 56, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_tail));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_tail));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding1) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding1));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding1));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding2) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding2));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding2));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding3) == 48, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding3));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding3));
-
- /* Checks for struct llog_setattr64_rec */
- LASSERTF((int)sizeof(struct llog_setattr64_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct llog_setattr64_rec));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_hdr));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_hdr));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_oi) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_oi));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_oi));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_uid) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_uid));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_uid_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_uid_h));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid_h));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_gid) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_gid));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_gid_h) == 44, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_gid_h));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid_h));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_valid) == 48, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_valid));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_valid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_valid));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_tail) == 56, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_tail));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_tail));
-
- /* Checks for struct llog_size_change_rec */
- LASSERTF((int)sizeof(struct llog_size_change_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct llog_size_change_rec));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_hdr));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_hdr));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_fid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_fid));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_fid));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_ioepoch) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_ioepoch));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_ioepoch) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_ioepoch));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding1) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding1));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding1));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding2) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding2));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding2));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding3) == 48, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding3));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding3));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_tail) == 56, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_tail));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_tail));
-
- /* Checks for struct changelog_rec */
- LASSERTF((int)sizeof(struct changelog_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct changelog_rec));
- LASSERTF((int)offsetof(struct changelog_rec, cr_namelen) == 0, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_namelen));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_namelen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_namelen));
- LASSERTF((int)offsetof(struct changelog_rec, cr_flags) == 2, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_flags));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_flags));
- LASSERTF((int)offsetof(struct changelog_rec, cr_type) == 4, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_type));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_type));
- LASSERTF((int)offsetof(struct changelog_rec, cr_index) == 8, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_index));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_index) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_index));
- LASSERTF((int)offsetof(struct changelog_rec, cr_prev) == 16, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_prev));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_prev) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_prev));
- LASSERTF((int)offsetof(struct changelog_rec, cr_time) == 24, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_time));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_time));
- LASSERTF((int)offsetof(struct changelog_rec, cr_tfid) == 32, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_tfid));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_tfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_tfid));
- LASSERTF((int)offsetof(struct changelog_rec, cr_pfid) == 48, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_pfid));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_pfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_pfid));
-
- /* Checks for struct changelog_setinfo */
- LASSERTF((int)sizeof(struct changelog_setinfo) == 12, "found %lld\n",
- (long long)(int)sizeof(struct changelog_setinfo));
- LASSERTF((int)offsetof(struct changelog_setinfo, cs_recno) == 0, "found %lld\n",
- (long long)(int)offsetof(struct changelog_setinfo, cs_recno));
- LASSERTF((int)sizeof(((struct changelog_setinfo *)0)->cs_recno) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_setinfo *)0)->cs_recno));
- LASSERTF((int)offsetof(struct changelog_setinfo, cs_id) == 8, "found %lld\n",
- (long long)(int)offsetof(struct changelog_setinfo, cs_id));
- LASSERTF((int)sizeof(((struct changelog_setinfo *)0)->cs_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_setinfo *)0)->cs_id));
-
- /* Checks for struct llog_changelog_rec */
- LASSERTF((int)sizeof(struct llog_changelog_rec) == 88, "found %lld\n",
- (long long)(int)sizeof(struct llog_changelog_rec));
- LASSERTF((int)offsetof(struct llog_changelog_rec, cr_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_rec, cr_hdr));
- LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr_hdr));
- LASSERTF((int)offsetof(struct llog_changelog_rec, cr) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_rec, cr));
- LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr) == 64, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr));
- LASSERTF((int)offsetof(struct llog_changelog_rec, cr_do_not_use) == 80, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_rec, cr_do_not_use));
- LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr_do_not_use) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr_do_not_use));
-
- /* Checks for struct llog_changelog_user_rec */
- LASSERTF((int)sizeof(struct llog_changelog_user_rec) == 40, "found %lld\n",
- (long long)(int)sizeof(struct llog_changelog_user_rec));
- LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_user_rec, cur_hdr));
- LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_hdr));
- LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_id) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_user_rec, cur_id));
- LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_id));
- LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_padding) == 20, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_user_rec, cur_padding));
- LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_padding));
- LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_endrec) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_user_rec, cur_endrec));
- LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_endrec) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_endrec));
- LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_tail) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_user_rec, cur_tail));
- LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_tail));
-
- /* Checks for struct llog_gen */
- LASSERTF((int)sizeof(struct llog_gen) == 16, "found %lld\n",
- (long long)(int)sizeof(struct llog_gen));
- LASSERTF((int)offsetof(struct llog_gen, mnt_cnt) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_gen, mnt_cnt));
- LASSERTF((int)sizeof(((struct llog_gen *)0)->mnt_cnt) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_gen *)0)->mnt_cnt));
- LASSERTF((int)offsetof(struct llog_gen, conn_cnt) == 8, "found %lld\n",
- (long long)(int)offsetof(struct llog_gen, conn_cnt));
- LASSERTF((int)sizeof(((struct llog_gen *)0)->conn_cnt) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_gen *)0)->conn_cnt));
-
- /* Checks for struct llog_gen_rec */
- LASSERTF((int)sizeof(struct llog_gen_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct llog_gen_rec));
- LASSERTF((int)offsetof(struct llog_gen_rec, lgr_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_gen_rec, lgr_hdr));
- LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_hdr));
- LASSERTF((int)offsetof(struct llog_gen_rec, lgr_gen) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_gen_rec, lgr_gen));
- LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_gen) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_gen));
- LASSERTF((int)offsetof(struct llog_gen_rec, lgr_tail) == 56, "found %lld\n",
- (long long)(int)offsetof(struct llog_gen_rec, lgr_tail));
- LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_tail));
-
- /* Checks for struct llog_log_hdr */
- LASSERTF((int)sizeof(struct llog_log_hdr) == 8192, "found %lld\n",
- (long long)(int)sizeof(struct llog_log_hdr));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_hdr));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_hdr));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_timestamp) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_timestamp));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_timestamp) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_timestamp));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_count) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_count));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_count));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_bitmap_offset) == 28, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_bitmap_offset));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap_offset) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap_offset));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_size) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_size));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_size) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_size));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_flags) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_flags));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_flags));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_cat_idx) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_cat_idx));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_cat_idx) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_cat_idx));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_tgtuuid) == 44, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_tgtuuid));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_tgtuuid) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_tgtuuid));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_reserved) == 84, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_reserved));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_reserved) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_reserved));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_bitmap) == 88, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_bitmap));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap) == 8096, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_tail) == 8184, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_tail));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_tail));
-
- /* Checks for struct llog_cookie */
- LASSERTF((int)sizeof(struct llog_cookie) == 32, "found %lld\n",
- (long long)(int)sizeof(struct llog_cookie));
- LASSERTF((int)offsetof(struct llog_cookie, lgc_lgl) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_cookie, lgc_lgl));
- LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_lgl) == 20, "found %lld\n",
- (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_lgl));
- LASSERTF((int)offsetof(struct llog_cookie, lgc_subsys) == 20, "found %lld\n",
- (long long)(int)offsetof(struct llog_cookie, lgc_subsys));
- LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_subsys) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_subsys));
- LASSERTF((int)offsetof(struct llog_cookie, lgc_index) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llog_cookie, lgc_index));
- LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_index));
- LASSERTF((int)offsetof(struct llog_cookie, lgc_padding) == 28, "found %lld\n",
- (long long)(int)offsetof(struct llog_cookie, lgc_padding));
- LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_padding));
-
- /* Checks for struct llogd_body */
- LASSERTF((int)sizeof(struct llogd_body) == 48, "found %lld\n",
- (long long)(int)sizeof(struct llogd_body));
- LASSERTF((int)offsetof(struct llogd_body, lgd_logid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_logid));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_logid) == 20, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_logid));
- LASSERTF((int)offsetof(struct llogd_body, lgd_ctxt_idx) == 20, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_ctxt_idx));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_ctxt_idx) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_ctxt_idx));
- LASSERTF((int)offsetof(struct llogd_body, lgd_llh_flags) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_llh_flags));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_llh_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_llh_flags));
- LASSERTF((int)offsetof(struct llogd_body, lgd_index) == 28, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_index));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_index));
- LASSERTF((int)offsetof(struct llogd_body, lgd_saved_index) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_saved_index));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_saved_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_saved_index));
- LASSERTF((int)offsetof(struct llogd_body, lgd_len) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_len));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_len));
- LASSERTF((int)offsetof(struct llogd_body, lgd_cur_offset) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_cur_offset));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_cur_offset) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_cur_offset));
- BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_CREATE != 501);
- BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_NEXT_BLOCK != 502);
- BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_READ_HEADER != 503);
- BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_WRITE_REC != 504);
- BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_CLOSE != 505);
- BUILD_BUG_ON(LLOG_ORIGIN_CONNECT != 506);
- BUILD_BUG_ON(LLOG_CATINFO != 507);
- BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_PREV_BLOCK != 508);
- BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_DESTROY != 509);
- BUILD_BUG_ON(LLOG_FIRST_OPC != 501);
- BUILD_BUG_ON(LLOG_LAST_OPC != 510);
- BUILD_BUG_ON(LLOG_CONFIG_ORIG_CTXT != 0);
- BUILD_BUG_ON(LLOG_CONFIG_REPL_CTXT != 1);
- BUILD_BUG_ON(LLOG_MDS_OST_ORIG_CTXT != 2);
- BUILD_BUG_ON(LLOG_MDS_OST_REPL_CTXT != 3);
- BUILD_BUG_ON(LLOG_SIZE_ORIG_CTXT != 4);
- BUILD_BUG_ON(LLOG_SIZE_REPL_CTXT != 5);
- BUILD_BUG_ON(LLOG_TEST_ORIG_CTXT != 8);
- BUILD_BUG_ON(LLOG_TEST_REPL_CTXT != 9);
- BUILD_BUG_ON(LLOG_CHANGELOG_ORIG_CTXT != 12);
- BUILD_BUG_ON(LLOG_CHANGELOG_REPL_CTXT != 13);
- BUILD_BUG_ON(LLOG_CHANGELOG_USER_ORIG_CTXT != 14);
- BUILD_BUG_ON(LLOG_AGENT_ORIG_CTXT != 15);
- BUILD_BUG_ON(LLOG_MAX_CTXTS != 16);
-
- /* Checks for struct llogd_conn_body */
- LASSERTF((int)sizeof(struct llogd_conn_body) == 40, "found %lld\n",
- (long long)(int)sizeof(struct llogd_conn_body));
- LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_gen) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llogd_conn_body, lgdc_gen));
- LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_gen) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_gen));
- LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_logid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llogd_conn_body, lgdc_logid));
- LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_logid) == 20, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_logid));
- LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_ctxt_idx) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llogd_conn_body, lgdc_ctxt_idx));
- LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx));
-
- /* Checks for struct fiemap_info_key */
- LASSERTF((int)sizeof(struct ll_fiemap_info_key) == 248, "found %lld\n",
- (long long)(int)sizeof(struct ll_fiemap_info_key));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_name[8]) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_name[8]));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_oa) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_oa));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa) == 208, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_fiemap) == 216, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_fiemap));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap));
-
- /* Checks for struct mgs_target_info */
- LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n",
- (long long)(int)sizeof(struct mgs_target_info));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_lustre_ver) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_lustre_ver));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_lustre_ver) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_lustre_ver));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_stripe_index) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_stripe_index));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_stripe_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_stripe_index));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_config_ver) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_config_ver));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_config_ver) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_config_ver));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_flags) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_flags));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_flags));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_nid_count) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_nid_count));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_nid_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_nid_count));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_instance) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_instance));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_instance) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_instance));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_fsname) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_fsname));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_fsname) == 64, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_fsname));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_svname) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_svname));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_svname) == 64, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_svname));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_uuid) == 152, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_uuid));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_uuid) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_uuid));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_nids) == 192, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_nids));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_nids) == 256, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_nids));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_params) == 448, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_params));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_params) == 4096, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_params));
-
- /* Checks for struct lustre_capa */
- LASSERTF((int)sizeof(struct lustre_capa) == 120, "found %lld\n",
- (long long)(int)sizeof(struct lustre_capa));
- LASSERTF((int)offsetof(struct lustre_capa, lc_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_fid));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_fid));
- LASSERTF((int)offsetof(struct lustre_capa, lc_opc) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_opc));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_opc) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_opc));
- LASSERTF((int)offsetof(struct lustre_capa, lc_uid) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_uid));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_uid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_uid));
- LASSERTF((int)offsetof(struct lustre_capa, lc_gid) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_gid));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_gid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_gid));
- LASSERTF((int)offsetof(struct lustre_capa, lc_flags) == 40, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_flags));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_flags));
- LASSERTF((int)offsetof(struct lustre_capa, lc_keyid) == 44, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_keyid));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_keyid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_keyid));
- LASSERTF((int)offsetof(struct lustre_capa, lc_timeout) == 48, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_timeout));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_timeout) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_timeout));
- LASSERTF((int)offsetof(struct lustre_capa, lc_expiry) == 52, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_expiry));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_expiry) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_expiry));
- BUILD_BUG_ON(CAPA_HMAC_MAX_LEN != 64);
- LASSERTF((int)offsetof(struct lustre_capa, lc_hmac[64]) == 120, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_hmac[64]));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_hmac[64]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_hmac[64]));
-
- /* Checks for struct lustre_capa_key */
- LASSERTF((int)sizeof(struct lustre_capa_key) == 72, "found %lld\n",
- (long long)(int)sizeof(struct lustre_capa_key));
- LASSERTF((int)offsetof(struct lustre_capa_key, lk_seq) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa_key, lk_seq));
- LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_seq) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_seq));
- LASSERTF((int)offsetof(struct lustre_capa_key, lk_keyid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa_key, lk_keyid));
- LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_keyid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_keyid));
- LASSERTF((int)offsetof(struct lustre_capa_key, lk_padding) == 12, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa_key, lk_padding));
- LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_padding));
- BUILD_BUG_ON(CAPA_HMAC_KEY_MAX_LEN != 56);
- LASSERTF((int)offsetof(struct lustre_capa_key, lk_key[56]) == 72, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa_key, lk_key[56]));
- LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_key[56]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_key[56]));
-
- /* Checks for struct getinfo_fid2path */
- LASSERTF((int)sizeof(struct getinfo_fid2path) == 32, "found %lld\n",
- (long long)(int)sizeof(struct getinfo_fid2path));
- LASSERTF((int)offsetof(struct getinfo_fid2path, gf_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct getinfo_fid2path, gf_fid));
- LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_fid));
- LASSERTF((int)offsetof(struct getinfo_fid2path, gf_recno) == 16, "found %lld\n",
- (long long)(int)offsetof(struct getinfo_fid2path, gf_recno));
- LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_recno) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_recno));
- LASSERTF((int)offsetof(struct getinfo_fid2path, gf_linkno) == 24, "found %lld\n",
- (long long)(int)offsetof(struct getinfo_fid2path, gf_linkno));
- LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_linkno) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_linkno));
- LASSERTF((int)offsetof(struct getinfo_fid2path, gf_pathlen) == 28, "found %lld\n",
- (long long)(int)offsetof(struct getinfo_fid2path, gf_pathlen));
- LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_pathlen) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_pathlen));
- LASSERTF((int)offsetof(struct getinfo_fid2path, gf_path[0]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct getinfo_fid2path, gf_path[0]));
- LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]));
-
- /* Checks for struct fiemap */
- LASSERTF((int)sizeof(struct fiemap) == 32, "found %lld\n",
- (long long)(int)sizeof(struct fiemap));
- LASSERTF((int)offsetof(struct fiemap, fm_start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct fiemap, fm_start));
- LASSERTF((int)sizeof(((struct fiemap *)0)->fm_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap *)0)->fm_start));
- LASSERTF((int)offsetof(struct fiemap, fm_length) == 8, "found %lld\n",
- (long long)(int)offsetof(struct fiemap, fm_length));
- LASSERTF((int)sizeof(((struct fiemap *)0)->fm_length) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap *)0)->fm_length));
- LASSERTF((int)offsetof(struct fiemap, fm_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct fiemap, fm_flags));
- LASSERTF((int)sizeof(((struct fiemap *)0)->fm_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap *)0)->fm_flags));
- LASSERTF((int)offsetof(struct fiemap, fm_mapped_extents) == 20, "found %lld\n",
- (long long)(int)offsetof(struct fiemap, fm_mapped_extents));
- LASSERTF((int)sizeof(((struct fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap *)0)->fm_mapped_extents));
- LASSERTF((int)offsetof(struct fiemap, fm_extent_count) == 24, "found %lld\n",
- (long long)(int)offsetof(struct fiemap, fm_extent_count));
- LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extent_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap *)0)->fm_extent_count));
- LASSERTF((int)offsetof(struct fiemap, fm_reserved) == 28, "found %lld\n",
- (long long)(int)offsetof(struct fiemap, fm_reserved));
- LASSERTF((int)sizeof(((struct fiemap *)0)->fm_reserved) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap *)0)->fm_reserved));
- LASSERTF((int)offsetof(struct fiemap, fm_extents) == 32, "found %lld\n",
- (long long)(int)offsetof(struct fiemap, fm_extents));
- LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extents) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap *)0)->fm_extents));
- BUILD_BUG_ON(FIEMAP_FLAG_SYNC != 0x00000001);
- BUILD_BUG_ON(FIEMAP_FLAG_XATTR != 0x00000002);
- BUILD_BUG_ON(FIEMAP_FLAG_DEVICE_ORDER != 0x40000000);
-
- /* Checks for struct fiemap_extent */
- LASSERTF((int)sizeof(struct fiemap_extent) == 56, "found %lld\n",
- (long long)(int)sizeof(struct fiemap_extent));
- LASSERTF((int)offsetof(struct fiemap_extent, fe_logical) == 0, "found %lld\n",
- (long long)(int)offsetof(struct fiemap_extent, fe_logical));
- LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_logical) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_logical));
- LASSERTF((int)offsetof(struct fiemap_extent, fe_physical) == 8, "found %lld\n",
- (long long)(int)offsetof(struct fiemap_extent, fe_physical));
- LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_physical) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_physical));
- LASSERTF((int)offsetof(struct fiemap_extent, fe_length) == 16, "found %lld\n",
- (long long)(int)offsetof(struct fiemap_extent, fe_length));
- LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_length) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_length));
- LASSERTF((int)offsetof(struct fiemap_extent, fe_flags) == 40, "found %lld\n",
- (long long)(int)offsetof(struct fiemap_extent, fe_flags));
- LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_flags));
- LASSERTF((int)offsetof(struct fiemap_extent, fe_reserved[0]) == 44, "found %lld\n",
- (long long)(int)offsetof(struct fiemap_extent, fe_reserved[0]));
- LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]));
- BUILD_BUG_ON(FIEMAP_EXTENT_LAST != 0x00000001);
- BUILD_BUG_ON(FIEMAP_EXTENT_UNKNOWN != 0x00000002);
- BUILD_BUG_ON(FIEMAP_EXTENT_DELALLOC != 0x00000004);
- BUILD_BUG_ON(FIEMAP_EXTENT_ENCODED != 0x00000008);
- BUILD_BUG_ON(FIEMAP_EXTENT_DATA_ENCRYPTED != 0x00000080);
- BUILD_BUG_ON(FIEMAP_EXTENT_NOT_ALIGNED != 0x00000100);
- BUILD_BUG_ON(FIEMAP_EXTENT_DATA_INLINE != 0x00000200);
- BUILD_BUG_ON(FIEMAP_EXTENT_DATA_TAIL != 0x00000400);
- BUILD_BUG_ON(FIEMAP_EXTENT_UNWRITTEN != 0x00000800);
- BUILD_BUG_ON(FIEMAP_EXTENT_MERGED != 0x00001000);
- BUILD_BUG_ON(FIEMAP_EXTENT_NO_DIRECT != 0x40000000);
- BUILD_BUG_ON(FIEMAP_EXTENT_NET != 0x80000000);
-
- /* Checks for type posix_acl_xattr_entry */
- LASSERTF((int)sizeof(struct posix_acl_xattr_entry) == 8, "found %lld\n",
- (long long)(int)sizeof(struct posix_acl_xattr_entry));
- LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_tag) == 0, "found %lld\n",
- (long long)(int)offsetof(struct posix_acl_xattr_entry, e_tag));
- LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_tag) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_tag));
- LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_perm) == 2, "found %lld\n",
- (long long)(int)offsetof(struct posix_acl_xattr_entry, e_perm));
- LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_perm) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_perm));
- LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_id) == 4, "found %lld\n",
- (long long)(int)offsetof(struct posix_acl_xattr_entry, e_id));
- LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_id));
-
- /* Checks for type posix_acl_xattr_header */
- LASSERTF((int)sizeof(struct posix_acl_xattr_header) == 4, "found %lld\n",
- (long long)(int)sizeof(struct posix_acl_xattr_header));
- LASSERTF((int)offsetof(struct posix_acl_xattr_header, a_version) == 0, "found %lld\n",
- (long long)(int)offsetof(struct posix_acl_xattr_header, a_version));
- LASSERTF((int)sizeof(((struct posix_acl_xattr_header *)0)->a_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct posix_acl_xattr_header *)0)->a_version));
-
- /* Checks for struct link_ea_header */
- LASSERTF((int)sizeof(struct link_ea_header) == 24, "found %lld\n",
- (long long)(int)sizeof(struct link_ea_header));
- LASSERTF((int)offsetof(struct link_ea_header, leh_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_header, leh_magic));
- LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_header *)0)->leh_magic));
- LASSERTF((int)offsetof(struct link_ea_header, leh_reccount) == 4, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_header, leh_reccount));
- LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_reccount) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_header *)0)->leh_reccount));
- LASSERTF((int)offsetof(struct link_ea_header, leh_len) == 8, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_header, leh_len));
- LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_len) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_header *)0)->leh_len));
- LASSERTF((int)offsetof(struct link_ea_header, leh_overflow_time) == 16, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_header, leh_overflow_time));
- LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_overflow_time) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_header *)0)->leh_overflow_time));
- LASSERTF((int)offsetof(struct link_ea_header, leh_padding) == 20, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_header, leh_padding));
- LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_header *)0)->leh_padding));
- BUILD_BUG_ON(LINK_EA_MAGIC != 0x11EAF1DFUL);
-
- /* Checks for struct link_ea_entry */
- LASSERTF((int)sizeof(struct link_ea_entry) == 18, "found %lld\n",
- (long long)(int)sizeof(struct link_ea_entry));
- LASSERTF((int)offsetof(struct link_ea_entry, lee_reclen) == 0, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_entry, lee_reclen));
- LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_reclen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_reclen));
- LASSERTF((int)offsetof(struct link_ea_entry, lee_parent_fid) == 2, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_entry, lee_parent_fid));
- LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_parent_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_parent_fid));
- LASSERTF((int)offsetof(struct link_ea_entry, lee_name) == 18, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_entry, lee_name));
- LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_name) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_name));
-
- /* Checks for struct layout_intent */
- LASSERTF((int)sizeof(struct layout_intent) == 24, "found %lld\n",
- (long long)(int)sizeof(struct layout_intent));
- LASSERTF((int)offsetof(struct layout_intent, li_opc) == 0, "found %lld\n",
- (long long)(int)offsetof(struct layout_intent, li_opc));
- LASSERTF((int)sizeof(((struct layout_intent *)0)->li_opc) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct layout_intent *)0)->li_opc));
- LASSERTF((int)offsetof(struct layout_intent, li_flags) == 4, "found %lld\n",
- (long long)(int)offsetof(struct layout_intent, li_flags));
- LASSERTF((int)sizeof(((struct layout_intent *)0)->li_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct layout_intent *)0)->li_flags));
- LASSERTF((int)offsetof(struct layout_intent, li_start) == 8, "found %lld\n",
- (long long)(int)offsetof(struct layout_intent, li_start));
- LASSERTF((int)sizeof(((struct layout_intent *)0)->li_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct layout_intent *)0)->li_start));
- LASSERTF((int)offsetof(struct layout_intent, li_end) == 16, "found %lld\n",
- (long long)(int)offsetof(struct layout_intent, li_end));
- LASSERTF((int)sizeof(((struct layout_intent *)0)->li_end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct layout_intent *)0)->li_end));
- LASSERTF(LAYOUT_INTENT_ACCESS == 0, "found %lld\n",
- (long long)LAYOUT_INTENT_ACCESS);
- LASSERTF(LAYOUT_INTENT_READ == 1, "found %lld\n",
- (long long)LAYOUT_INTENT_READ);
- LASSERTF(LAYOUT_INTENT_WRITE == 2, "found %lld\n",
- (long long)LAYOUT_INTENT_WRITE);
- LASSERTF(LAYOUT_INTENT_GLIMPSE == 3, "found %lld\n",
- (long long)LAYOUT_INTENT_GLIMPSE);
- LASSERTF(LAYOUT_INTENT_TRUNC == 4, "found %lld\n",
- (long long)LAYOUT_INTENT_TRUNC);
- LASSERTF(LAYOUT_INTENT_RELEASE == 5, "found %lld\n",
- (long long)LAYOUT_INTENT_RELEASE);
- LASSERTF(LAYOUT_INTENT_RESTORE == 6, "found %lld\n",
- (long long)LAYOUT_INTENT_RESTORE);
-
- /* Checks for struct hsm_action_item */
- LASSERTF((int)sizeof(struct hsm_action_item) == 72, "found %lld\n",
- (long long)(int)sizeof(struct hsm_action_item));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_len) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_len));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_len));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_action) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_action));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_action) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_action));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_fid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_fid));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_fid));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_dfid) == 24, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_dfid));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_dfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_dfid));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_extent) == 40, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_extent));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_extent) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_extent));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_cookie) == 56, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_cookie));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_cookie) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_cookie));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_gid) == 64, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_gid));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_gid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_gid));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_data) == 72, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_data));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_data) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_data));
-
- /* Checks for struct hsm_action_list */
- LASSERTF((int)sizeof(struct hsm_action_list) == 32, "found %lld\n",
- (long long)(int)sizeof(struct hsm_action_list));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_version) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_version));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_version));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_count));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_count));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_compound_id) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_compound_id));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_compound_id) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_compound_id));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_flags));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_flags) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_flags));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_archive_id) == 24, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_archive_id));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_archive_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_archive_id));
- LASSERTF((int)offsetof(struct hsm_action_list, padding1) == 28, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, padding1));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->padding1));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_fsname) == 32, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_fsname));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_fsname) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_fsname));
-
- /* Checks for struct hsm_progress */
- LASSERTF((int)sizeof(struct hsm_progress) == 48, "found %lld\n",
- (long long)(int)sizeof(struct hsm_progress));
- LASSERTF((int)offsetof(struct hsm_progress, hp_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, hp_fid));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->hp_fid));
- LASSERTF((int)offsetof(struct hsm_progress, hp_cookie) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, hp_cookie));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_cookie) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->hp_cookie));
- LASSERTF((int)offsetof(struct hsm_progress, hp_extent) == 24, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, hp_extent));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_extent) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->hp_extent));
- LASSERTF((int)offsetof(struct hsm_progress, hp_flags) == 40, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, hp_flags));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->hp_flags));
- LASSERTF((int)offsetof(struct hsm_progress, hp_errval) == 42, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, hp_errval));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_errval) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->hp_errval));
- LASSERTF((int)offsetof(struct hsm_progress, padding) == 44, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, padding));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->padding));
- LASSERTF(HP_FLAG_COMPLETED == 0x01, "found 0x%.8x\n",
- HP_FLAG_COMPLETED);
- LASSERTF(HP_FLAG_RETRY == 0x02, "found 0x%.8x\n",
- HP_FLAG_RETRY);
-
- LASSERTF((int)offsetof(struct hsm_copy, hc_data_version) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_copy, hc_data_version));
- LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_data_version) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_copy *)0)->hc_data_version));
- LASSERTF((int)offsetof(struct hsm_copy, hc_flags) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_copy, hc_flags));
- LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_copy *)0)->hc_flags));
- LASSERTF((int)offsetof(struct hsm_copy, hc_errval) == 10, "found %lld\n",
- (long long)(int)offsetof(struct hsm_copy, hc_errval));
- LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_errval) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_copy *)0)->hc_errval));
- LASSERTF((int)offsetof(struct hsm_copy, padding) == 12, "found %lld\n",
- (long long)(int)offsetof(struct hsm_copy, padding));
- LASSERTF((int)sizeof(((struct hsm_copy *)0)->padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_copy *)0)->padding));
- LASSERTF((int)offsetof(struct hsm_copy, hc_hai) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_copy, hc_hai));
- LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_hai) == 72, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_copy *)0)->hc_hai));
-
- /* Checks for struct hsm_progress_kernel */
- LASSERTF((int)sizeof(struct hsm_progress_kernel) == 64, "found %lld\n",
- (long long)(int)sizeof(struct hsm_progress_kernel));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_fid));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_fid));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_cookie) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_cookie));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_cookie) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_cookie));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_extent) == 24, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_extent));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_extent) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_extent));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_flags) == 40, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_flags));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_flags));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_errval) == 42, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_errval));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_errval) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_errval));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_padding1) == 44, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_padding1));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding1));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_data_version) == 48, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_data_version));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_data_version) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_data_version));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_padding2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_padding2));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding2));
-
- /* Checks for struct hsm_user_item */
- LASSERTF((int)sizeof(struct hsm_user_item) == 32, "found %lld\n",
- (long long)(int)sizeof(struct hsm_user_item));
- LASSERTF((int)offsetof(struct hsm_user_item, hui_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_item, hui_fid));
- LASSERTF((int)sizeof(((struct hsm_user_item *)0)->hui_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_item *)0)->hui_fid));
- LASSERTF((int)offsetof(struct hsm_user_item, hui_extent) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_item, hui_extent));
- LASSERTF((int)sizeof(((struct hsm_user_item *)0)->hui_extent) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_item *)0)->hui_extent));
-
- /* Checks for struct hsm_user_state */
- LASSERTF((int)sizeof(struct hsm_user_state) == 32, "found %lld\n",
- (long long)(int)sizeof(struct hsm_user_state));
- LASSERTF((int)offsetof(struct hsm_user_state, hus_states) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_state, hus_states));
- LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_states) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_states));
- LASSERTF((int)offsetof(struct hsm_user_state, hus_archive_id) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_state, hus_archive_id));
- LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_archive_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_archive_id));
- LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_state) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_state));
- LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_state) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_state));
- LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_action) == 12, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_action));
- LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_action) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_action));
- LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_location) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_location));
- LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_location) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_location));
-
- /* Checks for struct hsm_state_set */
- LASSERTF((int)sizeof(struct hsm_state_set) == 24, "found %lld\n",
- (long long)(int)sizeof(struct hsm_state_set));
- LASSERTF((int)offsetof(struct hsm_state_set, hss_valid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_state_set, hss_valid));
- LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_valid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_valid));
- LASSERTF((int)offsetof(struct hsm_state_set, hss_archive_id) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_state_set, hss_archive_id));
- LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_archive_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_archive_id));
- LASSERTF((int)offsetof(struct hsm_state_set, hss_setmask) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_state_set, hss_setmask));
- LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_setmask) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_setmask));
- LASSERTF((int)offsetof(struct hsm_state_set, hss_clearmask) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_state_set, hss_clearmask));
- LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_clearmask) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_clearmask));
-
- /* Checks for struct hsm_current_action */
- LASSERTF((int)sizeof(struct hsm_current_action) == 24, "found %lld\n",
- (long long)(int)sizeof(struct hsm_current_action));
- LASSERTF((int)offsetof(struct hsm_current_action, hca_state) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_current_action, hca_state));
- LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_state) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_state));
- LASSERTF((int)offsetof(struct hsm_current_action, hca_action) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_current_action, hca_action));
- LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_action) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_action));
- LASSERTF((int)offsetof(struct hsm_current_action, hca_location) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_current_action, hca_location));
- LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_location) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_location));
-
- /* Checks for struct hsm_request */
- LASSERTF((int)sizeof(struct hsm_request) == 24, "found %lld\n",
- (long long)(int)sizeof(struct hsm_request));
- LASSERTF((int)offsetof(struct hsm_request, hr_action) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_request, hr_action));
- LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_action) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_request *)0)->hr_action));
- LASSERTF((int)offsetof(struct hsm_request, hr_archive_id) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_request, hr_archive_id));
- LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_archive_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_request *)0)->hr_archive_id));
- LASSERTF((int)offsetof(struct hsm_request, hr_flags) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_request, hr_flags));
- LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_flags) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_request *)0)->hr_flags));
- LASSERTF((int)offsetof(struct hsm_request, hr_itemcount) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_request, hr_itemcount));
- LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_itemcount) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_request *)0)->hr_itemcount));
- LASSERTF((int)offsetof(struct hsm_request, hr_data_len) == 20, "found %lld\n",
- (long long)(int)offsetof(struct hsm_request, hr_data_len));
- LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_data_len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_request *)0)->hr_data_len));
- LASSERTF(HSM_FORCE_ACTION == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned int)HSM_FORCE_ACTION);
- LASSERTF(HSM_GHOST_COPY == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned int)HSM_GHOST_COPY);
-
- /* Checks for struct hsm_user_request */
- LASSERTF((int)sizeof(struct hsm_user_request) == 24, "found %lld\n",
- (long long)(int)sizeof(struct hsm_user_request));
- LASSERTF((int)offsetof(struct hsm_user_request, hur_request) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_request, hur_request));
- LASSERTF((int)sizeof(((struct hsm_user_request *)0)->hur_request) == 24, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_request *)0)->hur_request));
- LASSERTF((int)offsetof(struct hsm_user_request, hur_user_item) == 24, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_request, hur_user_item));
- LASSERTF((int)sizeof(((struct hsm_user_request *)0)->hur_user_item) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_request *)0)->hur_user_item));
-
- /* Checks for struct hsm_user_import */
- LASSERTF(sizeof(struct hsm_user_import) == 48, "found %lld\n",
- (long long)sizeof(struct hsm_user_import));
- LASSERTF(offsetof(struct hsm_user_import, hui_size) == 0,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_size));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_size) == 8,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_size));
- LASSERTF(offsetof(struct hsm_user_import, hui_uid) == 32,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_uid));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_uid) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_uid));
- LASSERTF(offsetof(struct hsm_user_import, hui_gid) == 36,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_gid));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_gid) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_gid));
- LASSERTF(offsetof(struct hsm_user_import, hui_mode) == 40,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_mode));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mode) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_mode));
- LASSERTF(offsetof(struct hsm_user_import, hui_atime) == 8,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_atime));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_atime) == 8,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_atime));
- LASSERTF(offsetof(struct hsm_user_import, hui_atime_ns) == 24,
- "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_import, hui_atime_ns));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_atime_ns) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_atime_ns));
- LASSERTF(offsetof(struct hsm_user_import, hui_mtime) == 16,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_mtime));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mtime) == 8,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_mtime));
- LASSERTF(offsetof(struct hsm_user_import, hui_mtime_ns) == 28,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_mtime_ns));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mtime_ns) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_mtime_ns));
- LASSERTF(offsetof(struct hsm_user_import, hui_archive_id) == 44,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_archive_id));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_archive_id) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_archive_id));
-}
diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre
deleted file mode 100644
index 8691c6543a9c..000000000000
--- a/drivers/staging/lustre/sysfs-fs-lustre
+++ /dev/null
@@ -1,654 +0,0 @@
-What: /sys/fs/lustre/version
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows current running lustre version.
-
-What: /sys/fs/lustre/pinger
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows if the lustre module has pinger support.
- "on" means yes and "off" means no.
-
-What: /sys/fs/lustre/health_check
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows whenever current system state believed to be "healthy",
- "NOT HEALTHY", or "LBUG" whenever lustre has experienced
- an internal assertion failure
-
-What: /sys/fs/lustre/jobid_name
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Currently running job "name" for this node to be transferred
- to Lustre servers for purposes of QoS and statistics gathering.
- Writing into this file will change the name, reading outputs
- currently set value.
-
-What: /sys/fs/lustre/jobid_var
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Control file for lustre "jobstats" functionality, write new
- value from the list below to change the mode:
- disable - disable job name reporting to the servers (default)
- procname_uid - form the job name as the current running
- command name and pid with a dot in between
- e.g. dd.1253
- nodelocal - use jobid_name value from above.
-
-What: /sys/fs/lustre/timeout
-Date: June 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls "lustre timeout" variable, also known as obd_timeout
- in some old manual. In the past obd_timeout was of paramount
- importance as the timeout value used everywhere and where
- other timeouts were derived from. These days it's much less
- important as network timeouts are mostly determined by
- AT (adaptive timeouts).
- Unit: seconds, default: 100
-
-What: /sys/fs/lustre/max_dirty_mb
-Date: June 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls total number of dirty cache (in megabytes) allowed
- across all mounted lustre filesystems.
- Since writeout of dirty pages in Lustre is somewhat expensive,
- when you allow to many dirty pages, this might lead to
- performance degradations as kernel tries to desperately
- find some pages to free/writeout.
- Default 1/2 RAM. Min value 4, max value 9/10 of RAM.
-
-What: /sys/fs/lustre/debug_peer_on_timeout
-Date: June 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Control if lnet debug information should be printed when
- an RPC timeout occurs.
- 0 disabled (default)
- 1 enabled
-
-What: /sys/fs/lustre/dump_on_timeout
-Date: June 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls if Lustre debug log should be dumped when an RPC
- timeout occurs. This is useful if yout debug buffer typically
- rolls over by the time you notice RPC timeouts.
-
-What: /sys/fs/lustre/dump_on_eviction
-Date: June 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls if Lustre debug log should be dumped when an this
- client is evicted from one of the servers.
- This is useful if yout debug buffer typically rolls over
- by the time you notice the eviction event.
-
-What: /sys/fs/lustre/at_min
-Date: July 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls minimum adaptive timeout in seconds. If you encounter
- a case where clients timeout due to server-reported processing
- time being too short, you might consider increasing this value.
- One common case of this if the underlying network has
- unpredictable long delays.
- Default: 0
-
-What: /sys/fs/lustre/at_max
-Date: July 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls maximum adaptive timeout in seconds. If at_max timeout
- is reached for an RPC, the RPC will time out.
- Some genuinuely slow network hardware might warrant increasing
- this value.
- Setting this value to 0 disables Adaptive Timeouts
- functionality and old-style obd_timeout value is then used.
- Default: 600
-
-What: /sys/fs/lustre/at_extra
-Date: July 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls how much extra time to request for unfinished requests
- in processing in seconds. Normally a server-side parameter, it
- is also used on the client for responses to various LDLM ASTs
- that are handled with a special server thread on the client.
- This is a way for the servers to ask the clients not to time
- out the request that reached current servicing time estimate
- yet and give it some more time.
- Default: 30
-
-What: /sys/fs/lustre/at_early_margin
-Date: July 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls when to send the early reply for requests that are
- about to timeout as an offset to the estimated service time in
- seconds..
- Default: 5
-
-What: /sys/fs/lustre/at_history
-Date: July 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls for how many seconds to remember slowest events
- encountered by adaptive timeouts code.
- Default: 600
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/blocksize
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Biggest blocksize on object storage server for this filesystem.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/kbytestotal
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows total number of kilobytes of space on this filesystem
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/kbytesfree
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows total number of free kilobytes of space on this filesystem
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/kbytesavail
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows total number of free kilobytes of space on this filesystem
- actually available for use (taking into account per-client
- grants and filesystem reservations).
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/filestotal
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows total number of inodes on the filesystem.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/filesfree
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows estimated number of free inodes on the filesystem
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/client_type
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows whenever this filesystem considers this client to be
- compute cluster-local or remote. Remote clients have
- additional uid/gid convrting logic applied.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/fstype
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows filesystem type of the filesystem
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/uuid
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows this filesystem superblock uuid
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/max_read_ahead_mb
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Sets maximum number of megabytes in system memory to be
- given to read-ahead cache.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/max_read_ahead_per_file_mb
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Sets maximum number of megabytes to read-ahead for a single file
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/max_read_ahead_whole_mb
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- For small reads, how many megabytes to actually request from
- the server as initial read-ahead.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/checksum_pages
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Enables or disables per-page checksum at llite layer, before
- the pages are actually given to lower level for network transfer
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/stats_track_pid
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Limit Lustre vfs operations gathering to just a single pid.
- 0 to track everything.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/stats_track_ppid
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Limit Lustre vfs operations gathering to just a single ppid.
- 0 to track everything.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/stats_track_gid
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Limit Lustre vfs operations gathering to just a single gid.
- 0 to track everything.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/statahead_max
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls maximum number of statahead requests to send when
- sequential readdir+stat pattern is detected.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/statahead_agl
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls if AGL (async glimpse ahead - obtain object information
- from OSTs in parallel with MDS during statahead) should be
- enabled or disabled.
- 0 to disable, 1 to enable.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/lazystatfs
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls statfs(2) behaviour in the face of down servers.
- If 0, always wait for all servers to come online,
- if 1, ignote inactive servers.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/max_easize
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows maximum number of bytes file striping data could be
- in current configuration of storage.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/default_easize
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows maximum observed file striping data seen by this
- filesystem client instance.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/xattr_cache
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls extended attributes client-side cache.
- 1 to enable, 0 to disable.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/unstable_stats
-Date: Apr 2016
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows number of pages that were sent and acknowledged by
- server but were not yet committed and therefore still
- pinned in client memory even though no longer dirty.
-
-What: /sys/fs/lustre/ldlm/cancel_unused_locks_before_replay
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls if client should replay unused locks during recovery
- If a client tends to have a lot of unused locks in LRU,
- recovery times might become prolonged.
- 1 - just locally cancel unused locks (default)
- 0 - replay unused locks.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/resource_count
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Displays number of lock resources (objects on which individual
- locks are taken) currently allocated in this namespace.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/lock_count
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Displays number or locks allocated in this namespace.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/lru_size
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls and displays LRU size limit for unused locks for this
- namespace.
- 0 - LRU size is unlimited, controlled by server resources
- positive number - number of locks to allow in lock LRU list
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/lock_unused_count
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Display number of locks currently sitting in the LRU list
- of this namespace
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/lru_max_age
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Maximum number of milliseconds a lock could sit in LRU list
- before client would voluntarily cancel it as unused.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/early_lock_cancel
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls "early lock cancellation" feature on this namespace
- if supported by the server.
- When enabled, tries to preemtively cancel locks that would be
- cancelled by verious operations and bundle the cancellation
- requests in the same RPC as the main operation, which results
- in significant speedups due to reduced lock-pingpong RPCs.
- 0 - disabled
- 1 - enabled (default)
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/granted
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Displays number of granted locks in this namespace
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_rate
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of granted locks in this namespace during last
- time interval
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/cancel_rate
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of lock cancellations in this namespace during
- last time interval
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_speed
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Calculated speed of lock granting (grant_rate - cancel_rate)
- in this namespace
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_plan
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Estimated number of locks to be granted in the next time
- interval in this namespace
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/limit
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls number of allowed locks in this pool.
- When lru_size is 0, this is the actual limit then.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/lock_volume_factor
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Multiplier for all lock volume calculations above.
- Default is 1. Increase to make the client to more agressively
- clean it's lock LRU list for this namespace.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/server_lock_volume
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Calculated server lock volume.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/recalc_period
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls length of time between recalculation of above
- values (in seconds).
-
-What: /sys/fs/lustre/ldlm/services/ldlm_cbd/threads_min
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls minimum number of ldlm callback threads to start.
-
-What: /sys/fs/lustre/ldlm/services/ldlm_cbd/threads_max
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls maximum number of ldlm callback threads to start.
-
-What: /sys/fs/lustre/ldlm/services/ldlm_cbd/threads_started
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows actual number of ldlm callback threads running.
-
-What: /sys/fs/lustre/ldlm/services/ldlm_cbd/high_priority_ratio
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls what percentage of ldlm callback threads is dedicated
- to "high priority" incoming requests.
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/blocksize
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Blocksize on backend filesystem for service behind this obd
- device (or biggest blocksize for compound devices like lov
- and lmv)
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/kbytestotal
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Total number of kilobytes of space on backend filesystem
- for service behind this obd (or total amount for compound
- devices like lov lmv)
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/kbytesfree
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of free kilobytes on backend filesystem for service
- behind this obd (or total amount for compound devices
- like lov lmv)
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/kbytesavail
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of kilobytes of free space on backend filesystem
- for service behind this obd (or total amount for compound
- devices like lov lmv) that is actually available for use
- (taking into account per-client and filesystem reservations).
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/filestotal
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of inodes on backend filesystem for service behind this
- obd.
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/filesfree
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of free inodes on backend filesystem for service
- behind this obd.
-
-What: /sys/fs/lustre/mdc/{connection_name}/max_pages_per_rpc
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Maximum number of readdir pages to fit into a single readdir
- RPC.
-
-What: /sys/fs/lustre/{mdc,osc}/{connection_name}/max_rpcs_in_flight
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Maximum number of parallel RPCs on the wire to allow on
- this connection. Increasing this number would help on higher
- latency links, but has a chance of overloading a server
- if you have too many clients like this.
- Default: 8
-
-What: /sys/fs/lustre/osc/{connection_name}/max_pages_per_rpc
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Maximum number of pages to fit into a single RPC.
- Typically bigger RPCs allow for better performance.
- Default: however many pages to form 1M of data (256 pages
- for 4K page sized platforms)
-
-What: /sys/fs/lustre/osc/{connection_name}/active
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls accessibility of this connection. If set to 0,
- fail all accesses immediately.
-
-What: /sys/fs/lustre/osc/{connection_name}/checksums
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls whenever to checksum bulk RPC data over the wire
- to this target.
- 1: enable (default) ; 0: disable
-
-What: /sys/fs/lustre/osc/{connection_name}/contention_seconds
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls for how long to consider a file contended once
- indicated as such by the server.
- When a file is considered contended, all operations switch to
- synchronous lockless mode to avoid cache and lock pingpong.
-
-What: /sys/fs/lustre/osc/{connection_name}/cur_dirty_bytes
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Displays how many dirty bytes is presently in the cache for this
- target.
-
-What: /sys/fs/lustre/osc/{connection_name}/cur_grant_bytes
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows how many bytes we have as a "dirty cache" grant from the
- server. Writing a value smaller than shown allows to release
- some grant back to the server.
- Dirty cache grant is a way Lustre ensures that cached successful
- writes on client do not end up discarded by the server due to
- lack of space later on.
-
-What: /sys/fs/lustre/osc/{connection_name}/cur_lost_grant_bytes
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows how many granted bytes were released to the server due
- to lack of write activity on this client.
-
-What: /sys/fs/lustre/osc/{connection_name}/grant_shrink_interval
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of seconds with no write activity for this target
- to start releasing dirty grant back to the server.
-
-What: /sys/fs/lustre/osc/{connection_name}/destroys_in_flight
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of DESTROY RPCs currently in flight to this target.
-
-What: /sys/fs/lustre/osc/{connection_name}/lockless_truncate
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls whether lockless truncate RPCs are allowed to this
- target.
- Lockless truncate causes server to perform the locking which
- is beneficial if the truncate is not followed by a write
- immediately.
- 1: enable ; 0: disable (default)
-
-What: /sys/fs/lustre/osc/{connection_name}/max_dirty_mb
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls how much dirty data this client can accumulate
- for this target. This is orthogonal to dirty grant and is
- a hard limit even if the server would allow a bigger dirty
- cache.
- While allowing higher dirty cache is beneficial for write
- performance, flushing write cache takes longer and as such
- the node might be more prone to OOMs.
- Having this value set too low might result in not being able
- to sent too many parallel WRITE RPCs.
- Default: 32
-
-What: /sys/fs/lustre/osc/{connection_name}/resend_count
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls how many times to try and resend RPCs to this target
- that failed with "recoverable" status, such as EAGAIN,
- ENOMEM.
-
-What: /sys/fs/lustre/lov/{connection_name}/numobd
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of OSC targets managed by this LOV instance.
-
-What: /sys/fs/lustre/lov/{connection_name}/activeobd
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of OSC targets managed by this LOV instance that are
- actually active.
-
-What: /sys/fs/lustre/lmv/{connection_name}/numobd
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of MDC targets managed by this LMV instance.
-
-What: /sys/fs/lustre/lmv/{connection_name}/activeobd
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of MDC targets managed by this LMV instance that are
- actually active.
-
-What: /sys/fs/lustre/lmv/{connection_name}/placement
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Determines policy of inode placement in case of multiple
- metadata servers:
- CHAR - based on a hash of the file name used at creation time
- (Default)
- NID - based on a hash of creating client network id.
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index 4d7fce8731fe..4569838f27a0 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -18,6 +18,8 @@
#include <linux/idr.h>
#include "most/core.h"
+#define CHRDEV_REGION_SIZE 50
+
static struct cdev_component {
dev_t devno;
struct ida minor_id;
@@ -51,7 +53,7 @@ static inline bool ch_has_mbo(struct comp_channel *c)
return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
}
-static inline bool ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
+static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
{
if (!kfifo_peek(&c->fifo, mbo)) {
*mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
@@ -242,7 +244,7 @@ static ssize_t
comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
{
size_t to_copy, not_copied, copied;
- struct mbo *mbo;
+ struct mbo *mbo = NULL;
struct comp_channel *c = filp->private_data;
mutex_lock(&c->io_mutex);
@@ -290,13 +292,15 @@ static __poll_t comp_poll(struct file *filp, poll_table *wait)
poll_wait(filp, &c->wq, wait);
+ mutex_lock(&c->io_mutex);
if (c->cfg->direction == MOST_CH_RX) {
- if (!kfifo_is_empty(&c->fifo))
+ if (!c->dev || !kfifo_is_empty(&c->fifo))
mask |= EPOLLIN | EPOLLRDNORM;
} else {
- if (!kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
+ if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
mask |= EPOLLOUT | EPOLLWRNORM;
}
+ mutex_unlock(&c->io_mutex);
return mask;
}
@@ -513,7 +517,7 @@ static int __init mod_init(void)
spin_lock_init(&ch_list_lock);
ida_init(&comp.minor_id);
- err = alloc_chrdev_region(&comp.devno, 0, 50, "cdev");
+ err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
if (err < 0)
goto dest_ida;
comp.major = MAJOR(comp.devno);
@@ -523,7 +527,7 @@ static int __init mod_init(void)
return 0;
free_cdev:
- unregister_chrdev_region(comp.devno, 1);
+ unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
dest_ida:
ida_destroy(&comp.minor_id);
class_destroy(comp.class);
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 8f2833526f7f..f4c464625a67 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -111,8 +111,10 @@ static void most_free_mbo_coherent(struct mbo *mbo)
struct most_channel *c = mbo->context;
u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
- dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
- mbo->bus_address);
+ if (c->iface->dma_free)
+ c->iface->dma_free(mbo, coherent_buf_size);
+ else
+ kfree(mbo->virt_address);
kfree(mbo);
if (atomic_sub_and_test(1, &c->mbo_ref))
complete(&c->cleanup);
@@ -420,6 +422,26 @@ static ssize_t set_packets_per_xact_store(struct device *dev,
return count;
}
+static ssize_t set_dbr_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size);
+}
+
+static ssize_t set_dbr_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct most_channel *c = to_channel(dev);
+ int ret = kstrtou16(buf, 0, &c->cfg.dbr_size);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
#define DEV_ATTR(_name) (&dev_attr_##_name.attr)
static DEVICE_ATTR_RO(available_directions);
@@ -435,6 +457,7 @@ static DEVICE_ATTR_RW(set_direction);
static DEVICE_ATTR_RW(set_datatype);
static DEVICE_ATTR_RW(set_subbuffer_size);
static DEVICE_ATTR_RW(set_packets_per_xact);
+static DEVICE_ATTR_RW(set_dbr_size);
static struct attribute *channel_attrs[] = {
DEV_ATTR(available_directions),
@@ -450,6 +473,7 @@ static struct attribute *channel_attrs[] = {
DEV_ATTR(set_datatype),
DEV_ATTR(set_subbuffer_size),
DEV_ATTR(set_packets_per_xact),
+ DEV_ATTR(set_dbr_size),
NULL,
};
@@ -952,45 +976,49 @@ static int arm_mbo_chain(struct most_channel *c, int dir,
void (*compl)(struct mbo *))
{
unsigned int i;
- int retval;
struct mbo *mbo;
+ unsigned long flags;
u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
atomic_set(&c->mbo_nq_level, 0);
for (i = 0; i < c->cfg.num_buffers; i++) {
mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
- if (!mbo) {
- retval = i;
- goto _exit;
- }
+ if (!mbo)
+ goto flush_fifos;
+
mbo->context = c;
mbo->ifp = c->iface;
mbo->hdm_channel_id = c->channel_id;
- mbo->virt_address = dma_alloc_coherent(NULL,
- coherent_buf_size,
- &mbo->bus_address,
- GFP_KERNEL);
- if (!mbo->virt_address) {
- pr_info("WARN: No DMA coherent buffer.\n");
- retval = i;
- goto _error1;
+ if (c->iface->dma_alloc) {
+ mbo->virt_address =
+ c->iface->dma_alloc(mbo, coherent_buf_size);
+ } else {
+ mbo->virt_address =
+ kzalloc(coherent_buf_size, GFP_KERNEL);
}
+ if (!mbo->virt_address)
+ goto release_mbo;
+
mbo->complete = compl;
mbo->num_buffers_ptr = &dummy_num_buffers;
if (dir == MOST_CH_RX) {
nq_hdm_mbo(mbo);
atomic_inc(&c->mbo_nq_level);
} else {
- arm_mbo(mbo);
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_add_tail(&mbo->list, &c->fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
}
}
- return i;
+ return c->cfg.num_buffers;
-_error1:
+release_mbo:
kfree(mbo);
-_exit:
- return retval;
+
+flush_fifos:
+ flush_channel_fifos(c);
+ return 0;
}
/**
diff --git a/drivers/staging/most/core.h b/drivers/staging/most/core.h
index 884bd71fafce..64cc02f161e7 100644
--- a/drivers/staging/most/core.h
+++ b/drivers/staging/most/core.h
@@ -128,6 +128,7 @@ struct most_channel_config {
u16 extra_len;
u16 subbuffer_size;
u16 packets_per_xact;
+ u16 dbr_size;
};
/*
@@ -229,11 +230,14 @@ struct mbo {
*/
struct most_interface {
struct device dev;
+ struct device *driver_dev;
struct module *mod;
enum most_interface_type interface;
const char *description;
unsigned int num_channels;
struct most_channel_capability *channel_vector;
+ void *(*dma_alloc)(struct mbo *mbo, u32 size);
+ void (*dma_free)(struct mbo *mbo, u32 size);
int (*configure)(struct most_interface *iface, int channel_idx,
struct most_channel_config *channel_config);
int (*enqueue)(struct most_interface *iface, int channel_idx,
diff --git a/drivers/staging/most/dim2/Kconfig b/drivers/staging/most/dim2/Kconfig
index e39c4e525cac..5aeef22c3cba 100644
--- a/drivers/staging/most/dim2/Kconfig
+++ b/drivers/staging/most/dim2/Kconfig
@@ -4,7 +4,7 @@
config MOST_DIM2
tristate "DIM2"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && OF
---help---
Say Y here if you want to connect via MediaLB to network transceiver.
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index f9bc7dea75b8..fe90a7cb56f7 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/of_platform.h>
#include <linux/printk.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -15,13 +16,13 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include "most/core.h"
#include "hal.h"
-#include "dim2.h"
#include "errors.h"
#include "sysfs.h"
@@ -32,11 +33,6 @@
#define MAX_BUF_SIZE_PACKET 2048
#define MAX_BUF_SIZE_STREAMING (8 * 1024)
-/* command line parameter to select clock speed */
-static char *clock_speed;
-module_param(clock_speed, charp, 0000);
-MODULE_PARM_DESC(clock_speed, "MediaLB Clock Speed");
-
/*
* The parameter representing the number of frames per sub-buffer for
* synchronous channels. Valid values: [0 .. 6].
@@ -66,6 +62,7 @@ struct hdm_channel {
char name[sizeof "caNNN"];
bool is_initialized;
struct dim_channel ch;
+ u16 *reset_dbr_size;
struct list_head pending_list; /* before dim_enqueue_buffer() */
struct list_head started_list; /* after dim_enqueue_buffer() */
enum most_channel_direction direction;
@@ -78,7 +75,6 @@ struct hdm_channel {
* @most_iface: most interface structure
* @capabilities: an array of channel capability data
* @io_base: I/O register base address
- * @clk_speed: user selectable (through command line parameter) clock speed
* @netinfo_task: thread to deliver network status
* @netinfo_waitq: waitq for the thread to sleep
* @deliver_netinfo: to identify whether network status received
@@ -93,7 +89,9 @@ struct dim2_hdm {
struct most_interface most_iface;
char name[16 + sizeof "dim2-"];
void __iomem *io_base;
- int clk_speed;
+ u8 clk_speed;
+ struct clk *clk;
+ struct clk *clk_pll;
struct task_struct *netinfo_task;
wait_queue_head_t netinfo_waitq;
int deliver_netinfo;
@@ -103,6 +101,12 @@ struct dim2_hdm {
struct medialb_bus bus;
void (*on_netinfo)(struct most_interface *most_iface,
unsigned char link_state, unsigned char *addrs);
+ void (*disable_platform)(struct platform_device *);
+};
+
+struct dim2_platform_data {
+ int (*enable)(struct platform_device *);
+ void (*disable)(struct platform_device *);
};
#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
@@ -156,65 +160,6 @@ void dimcb_on_error(u8 error_id, const char *error_message)
}
/**
- * startup_dim - initialize the dim2 interface
- * @pdev: platform device
- *
- * Get the value of command line parameter "clock_speed" if given or use the
- * default value, enable the clock and PLL, and initialize the dim2 interface.
- */
-static int startup_dim(struct platform_device *pdev)
-{
- struct dim2_hdm *dev = platform_get_drvdata(pdev);
- struct dim2_platform_data *pdata = pdev->dev.platform_data;
- u8 hal_ret;
-
- dev->clk_speed = -1;
-
- if (clock_speed) {
- if (!strcmp(clock_speed, "256fs"))
- dev->clk_speed = CLK_256FS;
- else if (!strcmp(clock_speed, "512fs"))
- dev->clk_speed = CLK_512FS;
- else if (!strcmp(clock_speed, "1024fs"))
- dev->clk_speed = CLK_1024FS;
- else if (!strcmp(clock_speed, "2048fs"))
- dev->clk_speed = CLK_2048FS;
- else if (!strcmp(clock_speed, "3072fs"))
- dev->clk_speed = CLK_3072FS;
- else if (!strcmp(clock_speed, "4096fs"))
- dev->clk_speed = CLK_4096FS;
- else if (!strcmp(clock_speed, "6144fs"))
- dev->clk_speed = CLK_6144FS;
- else if (!strcmp(clock_speed, "8192fs"))
- dev->clk_speed = CLK_8192FS;
- }
-
- if (dev->clk_speed == -1) {
- pr_info("Bad or missing clock speed parameter, using default value: 3072fs\n");
- dev->clk_speed = CLK_3072FS;
- } else {
- pr_info("Selected clock speed: %s\n", clock_speed);
- }
- if (pdata && pdata->init) {
- int ret = pdata->init(pdata, dev->io_base, dev->clk_speed);
-
- if (ret)
- return ret;
- }
-
- pr_info("sync: num of frames per sub-buffer: %u\n", fcnt);
- hal_ret = dim_startup(dev->io_base, dev->clk_speed, fcnt);
- if (hal_ret != DIM_NO_ERROR) {
- pr_err("dim_startup failed: %d\n", hal_ret);
- if (pdata && pdata->destroy)
- pdata->destroy(pdata);
- return -ENODEV;
- }
-
- return 0;
-}
-
-/**
* try_start_dim_transfer - try to transfer a buffer on a channel
* @hdm_ch: channel specific data
*
@@ -528,6 +473,12 @@ static int configure_channel(struct most_interface *most_iface, int ch_idx,
if (hdm_ch->is_initialized)
return -EPERM;
+ /* do not reset if the property was set by user, see poison_channel */
+ hdm_ch->reset_dbr_size = ccfg->dbr_size ? NULL : &ccfg->dbr_size;
+
+ /* zero value is default dbr_size, see dim2 hal */
+ hdm_ch->ch.dbr_size = ccfg->dbr_size;
+
switch (ccfg->data_type) {
case MOST_CH_CONTROL:
new_size = dim_norm_ctrl_async_buffer_size(buf_size);
@@ -608,6 +559,7 @@ static int configure_channel(struct most_interface *most_iface, int ch_idx,
dev->atx_idx = ch_idx;
spin_unlock_irqrestore(&dim_lock, flags);
+ ccfg->dbr_size = hdm_ch->ch.dbr_size;
return 0;
}
@@ -723,10 +675,64 @@ static int poison_channel(struct most_interface *most_iface, int ch_idx)
complete_all_mbos(&hdm_ch->started_list);
complete_all_mbos(&hdm_ch->pending_list);
+ if (hdm_ch->reset_dbr_size)
+ *hdm_ch->reset_dbr_size = 0;
return ret;
}
+static void *dma_alloc(struct mbo *mbo, u32 size)
+{
+ struct device *dev = mbo->ifp->driver_dev;
+
+ return dma_alloc_coherent(dev, size, &mbo->bus_address, GFP_KERNEL);
+}
+
+static void dma_free(struct mbo *mbo, u32 size)
+{
+ struct device *dev = mbo->ifp->driver_dev;
+
+ dma_free_coherent(dev, size, mbo->virt_address, mbo->bus_address);
+}
+
+static const struct of_device_id dim2_of_match[];
+
+static struct {
+ const char *clock_speed;
+ u8 clk_speed;
+} clk_mt[] = {
+ { "256fs", CLK_256FS },
+ { "512fs", CLK_512FS },
+ { "1024fs", CLK_1024FS },
+ { "2048fs", CLK_2048FS },
+ { "3072fs", CLK_3072FS },
+ { "4096fs", CLK_4096FS },
+ { "6144fs", CLK_6144FS },
+ { "8192fs", CLK_8192FS },
+};
+
+/**
+ * get_dim2_clk_speed - converts string to DIM2 clock speed value
+ *
+ * @clock_speed: string in the format "{NUMBER}fs"
+ * @val: pointer to get one of the CLK_{NUMBER}FS values
+ *
+ * By success stores one of the CLK_{NUMBER}FS in the *val and returns 0,
+ * otherwise returns -EINVAL.
+ */
+static int get_dim2_clk_speed(const char *clock_speed, u8 *val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clk_mt); i++) {
+ if (!strcmp(clock_speed, clk_mt[i].clock_speed)) {
+ *val = clk_mt[i].clk_speed;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
/*
* dim2_probe - dim2 probe handler
* @pdev: platform device structure
@@ -736,11 +742,17 @@ static int poison_channel(struct most_interface *most_iface, int ch_idx)
*/
static int dim2_probe(struct platform_device *pdev)
{
+ const struct dim2_platform_data *pdata;
+ const struct of_device_id *of_id;
+ const char *clock_speed;
struct dim2_hdm *dev;
struct resource *res;
int ret, i;
+ u8 hal_ret;
int irq;
+ enum { MLB_INT_IDX, AHB0_INT_IDX };
+
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -748,43 +760,77 @@ static int dim2_probe(struct platform_device *pdev)
dev->atx_idx = -1;
platform_set_drvdata(pdev, dev);
+
+ ret = of_property_read_string(pdev->dev.of_node,
+ "microchip,clock-speed", &clock_speed);
+ if (ret) {
+ dev_err(&pdev->dev, "missing dt property clock-speed\n");
+ return ret;
+ }
+
+ ret = get_dim2_clk_speed(clock_speed, &dev->clk_speed);
+ if (ret) {
+ dev_err(&pdev->dev, "bad dt property clock-speed\n");
+ return ret;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev->io_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dev->io_base))
return PTR_ERR(dev->io_base);
- irq = platform_get_irq(pdev, 0);
+ of_id = of_match_node(dim2_of_match, pdev->dev.of_node);
+ pdata = of_id->data;
+ ret = pdata && pdata->enable ? pdata->enable(pdev) : 0;
+ if (ret)
+ return ret;
+
+ dev->disable_platform = pdata ? pdata->disable : 0;
+
+ dev_info(&pdev->dev, "sync: num of frames per sub-buffer: %u\n", fcnt);
+ hal_ret = dim_startup(dev->io_base, dev->clk_speed, fcnt);
+ if (hal_ret != DIM_NO_ERROR) {
+ dev_err(&pdev->dev, "dim_startup failed: %d\n", hal_ret);
+ ret = -ENODEV;
+ goto err_disable_platform;
+ }
+
+ irq = platform_get_irq(pdev, AHB0_INT_IDX);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get ahb0_int irq: %d\n", irq);
- return irq;
+ ret = irq;
+ goto err_shutdown_dim;
}
ret = devm_request_irq(&pdev->dev, irq, dim2_ahb_isr, 0,
"dim2_ahb0_int", dev);
if (ret) {
dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
- return ret;
+ goto err_shutdown_dim;
}
- irq = platform_get_irq(pdev, 1);
+ irq = platform_get_irq(pdev, MLB_INT_IDX);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get mlb_int irq: %d\n", irq);
- return irq;
+ ret = irq;
+ goto err_shutdown_dim;
}
ret = devm_request_irq(&pdev->dev, irq, dim2_mlb_isr, 0,
"dim2_mlb_int", dev);
if (ret) {
dev_err(&pdev->dev, "failed to request mlb_int irq %d\n", irq);
- return ret;
+ goto err_shutdown_dim;
}
init_waitqueue_head(&dev->netinfo_waitq);
dev->deliver_netinfo = 0;
- dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
+ dev->netinfo_task = kthread_run(&deliver_netinfo_thread, dev,
"dim2_netinfo");
- if (IS_ERR(dev->netinfo_task))
- return PTR_ERR(dev->netinfo_task);
+ if (IS_ERR(dev->netinfo_task)) {
+ ret = PTR_ERR(dev->netinfo_task);
+ goto err_shutdown_dim;
+ }
for (i = 0; i < DMA_CHANNELS; i++) {
struct most_channel_capability *cap = dev->capabilities + i;
@@ -824,8 +870,11 @@ static int dim2_probe(struct platform_device *pdev)
dev->most_iface.channel_vector = dev->capabilities;
dev->most_iface.configure = configure_channel;
dev->most_iface.enqueue = enqueue;
+ dev->most_iface.dma_alloc = dma_alloc;
+ dev->most_iface.dma_free = dma_free;
dev->most_iface.poison_channel = poison_channel;
dev->most_iface.request_netinfo = request_netinfo;
+ dev->most_iface.driver_dev = &pdev->dev;
dev->dev.init_name = "dim2_state";
dev->dev.parent = &dev->most_iface.dev;
@@ -841,20 +890,17 @@ static int dim2_probe(struct platform_device *pdev)
goto err_unreg_iface;
}
- ret = startup_dim(pdev);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize DIM2\n");
- goto err_destroy_bus;
- }
-
return 0;
-err_destroy_bus:
- dim2_sysfs_destroy(&dev->dev);
err_unreg_iface:
most_deregister_interface(&dev->most_iface);
err_stop_thread:
kthread_stop(dev->netinfo_task);
+err_shutdown_dim:
+ dim_shutdown();
+err_disable_platform:
+ if (dev->disable_platform)
+ dev->disable_platform(pdev);
return ret;
}
@@ -868,48 +914,197 @@ err_stop_thread:
static int dim2_remove(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
- struct dim2_platform_data *pdata = pdev->dev.platform_data;
unsigned long flags;
+ dim2_sysfs_destroy(&dev->dev);
+ most_deregister_interface(&dev->most_iface);
+ kthread_stop(dev->netinfo_task);
+
spin_lock_irqsave(&dim_lock, flags);
dim_shutdown();
spin_unlock_irqrestore(&dim_lock, flags);
- if (pdata && pdata->destroy)
- pdata->destroy(pdata);
+ if (dev->disable_platform)
+ dev->disable_platform(pdev);
- dim2_sysfs_destroy(&dev->dev);
- most_deregister_interface(&dev->most_iface);
- kthread_stop(dev->netinfo_task);
+ return 0;
+}
+
+/* platform specific functions [[ */
+
+static int fsl_mx6_enable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+ int ret;
+
+ dev->clk = devm_clk_get(&pdev->dev, "mlb");
+ if (IS_ERR_OR_NULL(dev->clk)) {
+ dev_err(&pdev->dev, "unable to get mlb clock\n");
+ return -EFAULT;
+ }
+
+ ret = clk_prepare_enable(dev->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
+ return ret;
+ }
+
+ if (dev->clk_speed >= CLK_2048FS) {
+ /* enable pll */
+ dev->clk_pll = devm_clk_get(&pdev->dev, "pll8_mlb");
+ if (IS_ERR_OR_NULL(dev->clk_pll)) {
+ dev_err(&pdev->dev, "unable to get mlb pll clock\n");
+ clk_disable_unprepare(dev->clk);
+ return -EFAULT;
+ }
+
+ writel(0x888, dev->io_base + 0x38);
+ clk_prepare_enable(dev->clk_pll);
+ }
+
+ return 0;
+}
+
+static void fsl_mx6_disable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+
+ if (dev->clk_speed >= CLK_2048FS)
+ clk_disable_unprepare(dev->clk_pll);
+
+ clk_disable_unprepare(dev->clk);
+}
+
+static int rcar_h2_enable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+ int ret;
+
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return PTR_ERR(dev->clk);
+ }
- /*
- * break link to local platform_device_id struct
- * to prevent crash by unload platform device module
- */
- pdev->id_entry = NULL;
+ ret = clk_prepare_enable(dev->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
+ return ret;
+ }
+
+ if (dev->clk_speed >= CLK_2048FS) {
+ /* enable MLP pll and LVDS drivers */
+ writel(0x03, dev->io_base + 0x600);
+ /* set bias */
+ writel(0x888, dev->io_base + 0x38);
+ } else {
+ /* PLL */
+ writel(0x04, dev->io_base + 0x600);
+ }
+
+
+ /* BBCR = 0b11 */
+ writel(0x03, dev->io_base + 0x500);
+ writel(0x0002FF02, dev->io_base + 0x508);
return 0;
}
-static const struct platform_device_id dim2_id[] = {
- { "medialb_dim2" },
- { }, /* Terminating entry */
+static void rcar_h2_disable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(dev->clk);
+
+ /* disable PLLs and LVDS drivers */
+ writel(0x0, dev->io_base + 0x600);
+}
+
+static int rcar_m3_enable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+ u32 enable_512fs = dev->clk_speed == CLK_512FS;
+ int ret;
+
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return PTR_ERR(dev->clk);
+ }
+
+ ret = clk_prepare_enable(dev->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
+ return ret;
+ }
+
+ /* PLL */
+ writel(0x04, dev->io_base + 0x600);
+
+ writel(enable_512fs, dev->io_base + 0x604);
+
+ /* BBCR = 0b11 */
+ writel(0x03, dev->io_base + 0x500);
+ writel(0x0002FF02, dev->io_base + 0x508);
+
+ return 0;
+}
+
+static void rcar_m3_disable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(dev->clk);
+
+ /* disable PLLs and LVDS drivers */
+ writel(0x0, dev->io_base + 0x600);
+}
+
+/* ]] platform specific functions */
+
+enum dim2_platforms { FSL_MX6, RCAR_H2, RCAR_M3 };
+
+static struct dim2_platform_data plat_data[] = {
+ [FSL_MX6] = { .enable = fsl_mx6_enable, .disable = fsl_mx6_disable },
+ [RCAR_H2] = { .enable = rcar_h2_enable, .disable = rcar_h2_disable },
+ [RCAR_M3] = { .enable = rcar_m3_enable, .disable = rcar_m3_disable },
+};
+
+static const struct of_device_id dim2_of_match[] = {
+ {
+ .compatible = "fsl,imx6q-mlb150",
+ .data = plat_data + FSL_MX6
+ },
+ {
+ .compatible = "renesas,mlp",
+ .data = plat_data + RCAR_H2
+ },
+ {
+ .compatible = "rcar,medialb-dim2",
+ .data = plat_data + RCAR_M3
+ },
+ {
+ .compatible = "xlnx,axi4-os62420_3pin-1.00.a",
+ },
+ {
+ .compatible = "xlnx,axi4-os62420_6pin-1.00.a",
+ },
+ {},
};
-MODULE_DEVICE_TABLE(platform, dim2_id);
+MODULE_DEVICE_TABLE(of, dim2_of_match);
static struct platform_driver dim2_driver = {
.probe = dim2_probe,
.remove = dim2_remove,
- .id_table = dim2_id,
.driver = {
.name = "hdm_dim2",
+ .of_match_table = dim2_of_match,
},
};
module_platform_driver(dim2_driver);
-MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@microchip.com>");
MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/dim2/dim2.h b/drivers/staging/most/dim2/dim2.h
deleted file mode 100644
index 6a9fc51a2eb4..000000000000
--- a/drivers/staging/most/dim2/dim2.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * dim2.h - MediaLB DIM2 HDM Header
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- */
-
-#ifndef DIM2_HDM_H
-#define DIM2_HDM_H
-
-struct device;
-
-/* platform dependent data for dim2 interface */
-struct dim2_platform_data {
- int (*init)(struct dim2_platform_data *pd, void __iomem *io_base,
- int clk_speed);
- void (*destroy)(struct dim2_platform_data *pd);
- void *priv;
-};
-
-#endif /* DIM2_HDM_H */
diff --git a/drivers/staging/most/dim2/hal.c b/drivers/staging/most/dim2/hal.c
index 17c04e1c5e62..699e02f83bd4 100644
--- a/drivers/staging/most/dim2/hal.c
+++ b/drivers/staging/most/dim2/hal.c
@@ -760,7 +760,8 @@ static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
if (!check_channel_address(ch_address))
return DIM_INIT_ERR_CHANNEL_ADDRESS;
- ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
+ if (!ch->dbr_size)
+ ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
ch->dbr_addr = alloc_dbr(ch->dbr_size);
if (ch->dbr_addr >= DBR_SIZE)
return DIM_INIT_ERR_OUT_OF_MEMORY;
@@ -846,7 +847,8 @@ u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
if (!check_packet_length(packet_length))
return DIM_ERR_BAD_CONFIG;
- ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
+ if (!ch->dbr_size)
+ ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
ch->dbr_addr = alloc_dbr(ch->dbr_size);
if (ch->dbr_addr >= DBR_SIZE)
return DIM_INIT_ERR_OUT_OF_MEMORY;
@@ -873,7 +875,8 @@ u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
if (!check_bytes_per_frame(bytes_per_frame))
return DIM_ERR_BAD_CONFIG;
- ch->dbr_size = bytes_per_frame << bd_factor;
+ if (!ch->dbr_size)
+ ch->dbr_size = bytes_per_frame << bd_factor;
ch->dbr_addr = alloc_dbr(ch->dbr_size);
if (ch->dbr_addr >= DBR_SIZE)
return DIM_INIT_ERR_OUT_OF_MEMORY;
diff --git a/drivers/staging/most/dim2/reg.h b/drivers/staging/most/dim2/reg.h
index 69cbf78239f1..4343a483017e 100644
--- a/drivers/staging/most/dim2/reg.h
+++ b/drivers/staging/most/dim2/reg.h
@@ -12,48 +12,48 @@
#include <linux/types.h>
struct dim2_regs {
- /* 0x00 */ u32 MLBC0;
- /* 0x01 */ u32 rsvd0[1];
- /* 0x02 */ u32 MLBPC0;
- /* 0x03 */ u32 MS0;
- /* 0x04 */ u32 rsvd1[1];
- /* 0x05 */ u32 MS1;
- /* 0x06 */ u32 rsvd2[2];
- /* 0x08 */ u32 MSS;
- /* 0x09 */ u32 MSD;
- /* 0x0A */ u32 rsvd3[1];
- /* 0x0B */ u32 MIEN;
- /* 0x0C */ u32 rsvd4[1];
- /* 0x0D */ u32 MLBPC2;
- /* 0x0E */ u32 MLBPC1;
- /* 0x0F */ u32 MLBC1;
- /* 0x10 */ u32 rsvd5[0x10];
- /* 0x20 */ u32 HCTL;
- /* 0x21 */ u32 rsvd6[1];
- /* 0x22 */ u32 HCMR0;
- /* 0x23 */ u32 HCMR1;
- /* 0x24 */ u32 HCER0;
- /* 0x25 */ u32 HCER1;
- /* 0x26 */ u32 HCBR0;
- /* 0x27 */ u32 HCBR1;
- /* 0x28 */ u32 rsvd7[8];
- /* 0x30 */ u32 MDAT0;
- /* 0x31 */ u32 MDAT1;
- /* 0x32 */ u32 MDAT2;
- /* 0x33 */ u32 MDAT3;
- /* 0x34 */ u32 MDWE0;
- /* 0x35 */ u32 MDWE1;
- /* 0x36 */ u32 MDWE2;
- /* 0x37 */ u32 MDWE3;
- /* 0x38 */ u32 MCTL;
- /* 0x39 */ u32 MADR;
- /* 0x3A */ u32 rsvd8[0xB6];
- /* 0xF0 */ u32 ACTL;
- /* 0xF1 */ u32 rsvd9[3];
- /* 0xF4 */ u32 ACSR0;
- /* 0xF5 */ u32 ACSR1;
- /* 0xF6 */ u32 ACMR0;
- /* 0xF7 */ u32 ACMR1;
+ u32 MLBC0; /* 0x00 */
+ u32 rsvd0[1]; /* 0x01 */
+ u32 MLBPC0; /* 0x02 */
+ u32 MS0; /* 0x03 */
+ u32 rsvd1[1]; /* 0x04 */
+ u32 MS1; /* 0x05 */
+ u32 rsvd2[2]; /* 0x06 */
+ u32 MSS; /* 0x08 */
+ u32 MSD; /* 0x09 */
+ u32 rsvd3[1]; /* 0x0A */
+ u32 MIEN; /* 0x0B */
+ u32 rsvd4[1]; /* 0x0C */
+ u32 MLBPC2; /* 0x0D */
+ u32 MLBPC1; /* 0x0E */
+ u32 MLBC1; /* 0x0F */
+ u32 rsvd5[0x10]; /* 0x10 */
+ u32 HCTL; /* 0x20 */
+ u32 rsvd6[1]; /* 0x21 */
+ u32 HCMR0; /* 0x22 */
+ u32 HCMR1; /* 0x23 */
+ u32 HCER0; /* 0x24 */
+ u32 HCER1; /* 0x25 */
+ u32 HCBR0; /* 0x26 */
+ u32 HCBR1; /* 0x27 */
+ u32 rsvd7[8]; /* 0x28 */
+ u32 MDAT0; /* 0x30 */
+ u32 MDAT1; /* 0x31 */
+ u32 MDAT2; /* 0x32 */
+ u32 MDAT3; /* 0x33 */
+ u32 MDWE0; /* 0x34 */
+ u32 MDWE1; /* 0x35 */
+ u32 MDWE2; /* 0x36 */
+ u32 MDWE3; /* 0x37 */
+ u32 MCTL; /* 0x38 */
+ u32 MADR; /* 0x39 */
+ u32 rsvd8[0xb6]; /* 0x3A */
+ u32 ACTL; /* 0xF0 */
+ u32 rsvd9[3]; /* 0xF1 */
+ u32 ACSR0; /* 0xF4 */
+ u32 ACSR1; /* 0xF5 */
+ u32 ACMR0; /* 0xF6 */
+ u32 ACMR1; /* 0xF7 */
};
#define DIM2_MASK(n) (~((~(u32)0) << (n)))
diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c
index 141239fc9f51..4a4fc1005932 100644
--- a/drivers/staging/most/i2c/i2c.c
+++ b/drivers/staging/most/i2c/i2c.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/err.h>
@@ -29,33 +28,28 @@ enum { CH_RX, CH_TX, NUM_CHANNELS };
#define list_first_mbo(ptr) \
list_first_entry(ptr, struct mbo, list)
-/* IRQ / Polling option */
-static bool polling_req;
-module_param(polling_req, bool, 0444);
-MODULE_PARM_DESC(polling_req, "Request Polling. Default = 0 (use irq)");
-
-/* Polling Rate */
-static int scan_rate = 100;
-module_param(scan_rate, int, 0644);
-MODULE_PARM_DESC(scan_rate, "Polling rate in times/sec. Default = 100");
+static unsigned int polling_rate;
+module_param(polling_rate, uint, 0644);
+MODULE_PARM_DESC(polling_rate, "Polling rate [Hz]. Default = 0 (use IRQ)");
struct hdm_i2c {
- bool is_open[NUM_CHANNELS];
- bool polling_mode;
struct most_interface most_iface;
struct most_channel_capability capabilities[NUM_CHANNELS];
struct i2c_client *client;
struct rx {
struct delayed_work dwork;
- wait_queue_head_t waitq;
struct list_head list;
- struct mutex list_mutex;
+ bool int_disabled;
+ unsigned int delay;
} rx;
char name[64];
};
#define to_hdm(iface) container_of(iface, struct hdm_i2c, most_iface)
+static irqreturn_t most_irq_handler(int, void *);
+static void pending_rx_work(struct work_struct *);
+
/**
* configure_channel - called from MOST core to configure a channel
* @iface: interface the channel belongs to
@@ -71,10 +65,11 @@ static int configure_channel(struct most_interface *most_iface,
int ch_idx,
struct most_channel_config *channel_config)
{
+ int ret;
struct hdm_i2c *dev = to_hdm(most_iface);
+ unsigned int delay, pr;
BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
- BUG_ON(dev->is_open[ch_idx]);
if (channel_config->data_type != MOST_CH_CONTROL) {
pr_err("bad data type for channel %d\n", ch_idx);
@@ -86,11 +81,27 @@ static int configure_channel(struct most_interface *most_iface,
return -EPERM;
}
- if ((channel_config->direction == MOST_CH_RX) && (dev->polling_mode)) {
- schedule_delayed_work(&dev->rx.dwork,
- msecs_to_jiffies(MSEC_PER_SEC / 4));
+ if (channel_config->direction == MOST_CH_RX) {
+ if (!polling_rate) {
+ if (dev->client->irq <= 0) {
+ pr_err("bad irq: %d\n", dev->client->irq);
+ return -ENOENT;
+ }
+ dev->rx.int_disabled = false;
+ ret = request_irq(dev->client->irq, most_irq_handler, 0,
+ dev->client->name, dev);
+ if (ret) {
+ pr_err("request_irq(%d) failed: %d\n",
+ dev->client->irq, ret);
+ return ret;
+ }
+ } else {
+ delay = msecs_to_jiffies(MSEC_PER_SEC / polling_rate);
+ dev->rx.delay = delay ? delay : 1;
+ pr = MSEC_PER_SEC / jiffies_to_msecs(dev->rx.delay);
+ pr_info("polling rate is %u Hz\n", pr);
+ }
}
- dev->is_open[ch_idx] = true;
return 0;
}
@@ -113,14 +124,17 @@ static int enqueue(struct most_interface *most_iface,
int ret;
BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
- BUG_ON(!dev->is_open[ch_idx]);
if (ch_idx == CH_RX) {
/* RX */
- mutex_lock(&dev->rx.list_mutex);
+ if (!polling_rate)
+ disable_irq(dev->client->irq);
+ cancel_delayed_work_sync(&dev->rx.dwork);
list_add_tail(&mbo->list, &dev->rx.list);
- mutex_unlock(&dev->rx.list_mutex);
- wake_up_interruptible(&dev->rx.waitq);
+ if (dev->rx.int_disabled || polling_rate)
+ pending_rx_work(&dev->rx.dwork.work);
+ if (!polling_rate)
+ enable_irq(dev->client->irq);
} else {
/* TX */
ret = i2c_master_send(dev->client, mbo->virt_address,
@@ -155,25 +169,20 @@ static int poison_channel(struct most_interface *most_iface,
struct mbo *mbo;
BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
- BUG_ON(!dev->is_open[ch_idx]);
-
- dev->is_open[ch_idx] = false;
if (ch_idx == CH_RX) {
- mutex_lock(&dev->rx.list_mutex);
+ if (!polling_rate)
+ free_irq(dev->client->irq, dev);
+ cancel_delayed_work_sync(&dev->rx.dwork);
+
while (!list_empty(&dev->rx.list)) {
mbo = list_first_mbo(&dev->rx.list);
list_del(&mbo->list);
- mutex_unlock(&dev->rx.list_mutex);
mbo->processed_length = 0;
mbo->status = MBO_E_CLOSE;
mbo->complete(mbo);
-
- mutex_lock(&dev->rx.list_mutex);
}
- mutex_unlock(&dev->rx.list_mutex);
- wake_up_interruptible(&dev->rx.waitq);
}
return 0;
@@ -183,7 +192,7 @@ static void do_rx_work(struct hdm_i2c *dev)
{
struct mbo *mbo;
unsigned char msg[MAX_BUF_SIZE_CONTROL];
- int ret, ch_idx = CH_RX;
+ int ret;
u16 pml, data_size;
/* Read PML (2 bytes) */
@@ -206,32 +215,8 @@ static void do_rx_work(struct hdm_i2c *dev)
return;
}
- for (;;) {
- /* Conditions to wait for: poisoned channel or free buffer
- * available for reading
- */
- if (wait_event_interruptible(dev->rx.waitq,
- !dev->is_open[ch_idx] ||
- !list_empty(&dev->rx.list))) {
- pr_err("wait_event_interruptible() failed\n");
- return;
- }
-
- if (!dev->is_open[ch_idx])
- return;
-
- mutex_lock(&dev->rx.list_mutex);
-
- /* list may be empty if poison or remove is called */
- if (!list_empty(&dev->rx.list))
- break;
-
- mutex_unlock(&dev->rx.list_mutex);
- }
-
mbo = list_first_mbo(&dev->rx.list);
list_del(&mbo->list);
- mutex_unlock(&dev->rx.list_mutex);
mbo->processed_length = min(data_size, mbo->buffer_length);
memcpy(mbo->virt_address, msg, mbo->processed_length);
@@ -249,14 +234,15 @@ static void pending_rx_work(struct work_struct *work)
{
struct hdm_i2c *dev = container_of(work, struct hdm_i2c, rx.dwork.work);
+ if (list_empty(&dev->rx.list))
+ return;
+
do_rx_work(dev);
- if (dev->polling_mode) {
- if (dev->is_open[CH_RX])
- schedule_delayed_work(&dev->rx.dwork,
- msecs_to_jiffies(MSEC_PER_SEC
- / scan_rate));
+ if (polling_rate) {
+ schedule_delayed_work(&dev->rx.dwork, dev->rx.delay);
} else {
+ dev->rx.int_disabled = false;
enable_irq(dev->client->irq);
}
}
@@ -284,7 +270,7 @@ static irqreturn_t most_irq_handler(int irq, void *_dev)
struct hdm_i2c *dev = _dev;
disable_irq_nosync(irq);
-
+ dev->rx.int_disabled = true;
schedule_delayed_work(&dev->rx.dwork, 0);
return IRQ_HANDLED;
@@ -313,7 +299,6 @@ static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
client->adapter->nr, client->addr);
for (i = 0; i < NUM_CHANNELS; i++) {
- dev->is_open[i] = false;
dev->capabilities[i].data_type = MOST_CH_CONTROL;
dev->capabilities[i].num_buffers_packet = MAX_BUFFERS_CONTROL;
dev->capabilities[i].buffer_size_packet = MAX_BUF_SIZE_CONTROL;
@@ -332,8 +317,6 @@ static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev->most_iface.poison_channel = poison_channel;
INIT_LIST_HEAD(&dev->rx.list);
- mutex_init(&dev->rx.list_mutex);
- init_waitqueue_head(&dev->rx.waitq);
INIT_DELAYED_WORK(&dev->rx.dwork, pending_rx_work);
@@ -347,21 +330,6 @@ static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
return ret;
}
- dev->polling_mode = polling_req || client->irq <= 0;
- if (!dev->polling_mode) {
- pr_info("Requesting IRQ: %d\n", client->irq);
- ret = request_irq(client->irq, most_irq_handler, 0,
- client->name, dev);
- if (ret) {
- pr_info("IRQ request failed: %d, falling back to polling\n",
- ret);
- dev->polling_mode = true;
- }
- }
-
- if (dev->polling_mode)
- pr_info("Using polling at rate: %d times/sec\n", scan_rate);
-
return 0;
}
@@ -376,17 +344,8 @@ static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
static int i2c_remove(struct i2c_client *client)
{
struct hdm_i2c *dev = i2c_get_clientdata(client);
- int i;
-
- if (!dev->polling_mode)
- free_irq(client->irq, dev);
most_deregister_interface(&dev->most_iface);
-
- for (i = 0 ; i < NUM_CHANNELS; i++)
- if (dev->is_open[i])
- poison_channel(&dev->most_iface, i);
- cancel_delayed_work_sync(&dev->rx.dwork);
kfree(dev);
return 0;
@@ -410,7 +369,6 @@ static struct i2c_driver i2c_driver = {
module_i2c_driver(i2c_driver);
-MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@microchip.com>");
MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
MODULE_DESCRIPTION("I2C Hardware Dependent Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 83cec21c85b8..04c18323c2ea 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -460,21 +460,68 @@ static const struct snd_pcm_ops pcm_ops = {
.mmap = snd_pcm_lib_mmap_vmalloc,
};
-static int split_arg_list(char *buf, char **card_name, char **pcm_format)
+static int split_arg_list(char *buf, char **card_name, u16 *ch_num,
+ char **sample_res)
{
+ char *num;
+ int ret;
+
*card_name = strsep(&buf, ".");
- if (!*card_name)
- return -EIO;
- *pcm_format = strsep(&buf, ".\n");
- if (!*pcm_format)
+ if (!*card_name) {
+ pr_err("Missing sound card name\n");
return -EIO;
+ }
+ num = strsep(&buf, "x");
+ if (!num)
+ goto err;
+ ret = kstrtou16(num, 0, ch_num);
+ if (ret)
+ goto err;
+ *sample_res = strsep(&buf, ".\n");
+ if (!*sample_res)
+ goto err;
return 0;
+
+err:
+ pr_err("Bad PCM format\n");
+ return -EIO;
}
+static const struct sample_resolution_info {
+ const char *sample_res;
+ int bytes;
+ u64 formats;
+} sinfo[] = {
+ { "8", 1, SNDRV_PCM_FMTBIT_S8 },
+ { "16", 2, SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE },
+ { "24", 3, SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE },
+ { "32", 4, SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE },
+};
+
static int audio_set_hw_params(struct snd_pcm_hardware *pcm_hw,
- char *pcm_format,
+ u16 ch_num, char *sample_res,
struct most_channel_config *cfg)
{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sinfo); i++) {
+ if (!strcmp(sample_res, sinfo[i].sample_res))
+ goto found;
+ }
+ pr_err("Unsupported PCM format\n");
+ return -EIO;
+
+found:
+ if (!ch_num) {
+ pr_err("Bad number of channels\n");
+ return -EINVAL;
+ }
+
+ if (cfg->subbuffer_size != ch_num * sinfo[i].bytes) {
+ pr_err("Audio resolution doesn't fit subbuffer size\n");
+ return -EINVAL;
+ }
+
pcm_hw->info = MOST_PCM_INFO;
pcm_hw->rates = SNDRV_PCM_RATE_48000;
pcm_hw->rate_min = 48000;
@@ -484,54 +531,10 @@ static int audio_set_hw_params(struct snd_pcm_hardware *pcm_hw,
pcm_hw->period_bytes_max = cfg->buffer_size;
pcm_hw->periods_min = 1;
pcm_hw->periods_max = cfg->num_buffers;
-
- if (!strcmp(pcm_format, "1x8")) {
- if (cfg->subbuffer_size != 1)
- goto error;
- pr_info("PCM format is 8-bit mono\n");
- pcm_hw->channels_min = 1;
- pcm_hw->channels_max = 1;
- pcm_hw->formats = SNDRV_PCM_FMTBIT_S8;
- } else if (!strcmp(pcm_format, "2x16")) {
- if (cfg->subbuffer_size != 4)
- goto error;
- pr_info("PCM format is 16-bit stereo\n");
- pcm_hw->channels_min = 2;
- pcm_hw->channels_max = 2;
- pcm_hw->formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S16_BE;
- } else if (!strcmp(pcm_format, "2x24")) {
- if (cfg->subbuffer_size != 6)
- goto error;
- pr_info("PCM format is 24-bit stereo\n");
- pcm_hw->channels_min = 2;
- pcm_hw->channels_max = 2;
- pcm_hw->formats = SNDRV_PCM_FMTBIT_S24_3LE |
- SNDRV_PCM_FMTBIT_S24_3BE;
- } else if (!strcmp(pcm_format, "2x32")) {
- if (cfg->subbuffer_size != 8)
- goto error;
- pr_info("PCM format is 32-bit stereo\n");
- pcm_hw->channels_min = 2;
- pcm_hw->channels_max = 2;
- pcm_hw->formats = SNDRV_PCM_FMTBIT_S32_LE |
- SNDRV_PCM_FMTBIT_S32_BE;
- } else if (!strcmp(pcm_format, "6x16")) {
- if (cfg->subbuffer_size != 12)
- goto error;
- pr_info("PCM format is 16-bit 5.1 multi channel\n");
- pcm_hw->channels_min = 6;
- pcm_hw->channels_max = 6;
- pcm_hw->formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S16_BE;
- } else {
- pr_err("PCM format %s not supported\n", pcm_format);
- return -EIO;
- }
+ pcm_hw->channels_min = ch_num;
+ pcm_hw->channels_max = ch_num;
+ pcm_hw->formats = sinfo[i].formats;
return 0;
-error:
- pr_err("Audio resolution doesn't fit subbuffer size\n");
- return -EINVAL;
}
/**
@@ -558,7 +561,8 @@ static int audio_probe_channel(struct most_interface *iface, int channel_id,
int ret;
int direction;
char *card_name;
- char *pcm_format;
+ u16 ch_num;
+ char *sample_res;
if (!iface)
return -EINVAL;
@@ -582,13 +586,11 @@ static int audio_probe_channel(struct most_interface *iface, int channel_id,
direction = SNDRV_PCM_STREAM_CAPTURE;
}
- ret = split_arg_list(arg_list, &card_name, &pcm_format);
- if (ret < 0) {
- pr_info("PCM format missing\n");
+ ret = split_arg_list(arg_list, &card_name, &ch_num, &sample_res);
+ if (ret < 0)
return ret;
- }
- ret = snd_card_new(NULL, -1, card_name, THIS_MODULE,
+ ret = snd_card_new(&iface->dev, -1, card_name, THIS_MODULE,
sizeof(*channel), &card);
if (ret < 0)
return ret;
@@ -600,7 +602,8 @@ static int audio_probe_channel(struct most_interface *iface, int channel_id,
channel->id = channel_id;
init_waitqueue_head(&channel->playback_waitq);
- ret = audio_set_hw_params(&channel->pcm_hardware, pcm_format, cfg);
+ ret = audio_set_hw_params(&channel->pcm_hardware, ch_num, sample_res,
+ cfg);
if (ret)
goto err_free_card;
diff --git a/drivers/staging/most/usb/usb.c b/drivers/staging/most/usb/usb.c
index 31f184cfcd69..bc820f90bcb1 100644
--- a/drivers/staging/most/usb/usb.c
+++ b/drivers/staging/most/usb/usb.c
@@ -338,7 +338,6 @@ static void hdm_write_completion(struct urb *urb)
struct mbo *mbo = urb->context;
struct most_dev *mdev = to_mdev(mbo->ifp);
unsigned int channel = mbo->hdm_channel_id;
- struct device *dev = &mdev->usb_device->dev;
spinlock_t *lock = mdev->channel_lock + channel;
unsigned long flags;
@@ -354,7 +353,9 @@ static void hdm_write_completion(struct urb *urb)
mbo->status = MBO_SUCCESS;
break;
case -EPIPE:
- dev_warn(dev, "Broken OUT pipe detected\n");
+ dev_warn(&mdev->usb_device->dev,
+ "Broken pipe on ep%02x\n",
+ mdev->ep_address[channel]);
mdev->is_channel_healthy[channel] = false;
mdev->clear_work[channel].pipe = urb->pipe;
schedule_work(&mdev->clear_work[channel].ws);
@@ -507,7 +508,8 @@ static void hdm_read_completion(struct urb *urb)
}
break;
case -EPIPE:
- dev_warn(dev, "Broken IN pipe detected\n");
+ dev_warn(dev, "Broken pipe on ep%02x\n",
+ mdev->ep_address[channel]);
mdev->is_channel_healthy[channel] = false;
mdev->clear_work[channel].pipe = urb->pipe;
schedule_work(&mdev->clear_work[channel].ws);
@@ -517,7 +519,8 @@ static void hdm_read_completion(struct urb *urb)
mbo->status = MBO_E_CLOSE;
break;
case -EOVERFLOW:
- dev_warn(dev, "Babble on IN pipe detected\n");
+ dev_warn(dev, "Babble on ep%02x\n",
+ mdev->ep_address[channel]);
break;
}
}
@@ -549,7 +552,6 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
{
struct most_dev *mdev;
struct most_channel_config *conf;
- struct device *dev;
int retval = 0;
struct urb *urb;
unsigned long length;
@@ -562,14 +564,18 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
mdev = to_mdev(iface);
conf = &mdev->conf[channel];
- dev = &mdev->usb_device->dev;
- if (!mdev->usb_device)
- return -ENODEV;
+ mutex_lock(&mdev->io_mutex);
+ if (!mdev->usb_device) {
+ retval = -ENODEV;
+ goto _exit;
+ }
urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
+ if (!urb) {
+ retval = -ENOMEM;
+ goto _exit;
+ }
if ((conf->direction & MOST_CH_TX) && mdev->padding_active[channel] &&
hdm_add_padding(mdev, channel, mbo)) {
@@ -589,7 +595,8 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
length,
hdm_write_completion,
mbo);
- if (conf->data_type != MOST_CH_ISOC)
+ if (conf->data_type != MOST_CH_ISOC &&
+ conf->data_type != MOST_CH_SYNC)
urb->transfer_flags |= URB_ZERO_PACKET;
} else {
usb_fill_bulk_urb(urb, mdev->usb_device,
@@ -606,18 +613,37 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval) {
- dev_err(dev, "URB submit failed with error %d.\n", retval);
+ dev_err(&mdev->usb_device->dev,
+ "URB submit failed with error %d.\n", retval);
goto _error_1;
}
- return 0;
+ goto _exit;
_error_1:
usb_unanchor_urb(urb);
_error:
usb_free_urb(urb);
+_exit:
+ mutex_unlock(&mdev->io_mutex);
return retval;
}
+static void *hdm_dma_alloc(struct mbo *mbo, u32 size)
+{
+ struct most_dev *mdev = to_mdev(mbo->ifp);
+
+ return usb_alloc_coherent(mdev->usb_device, size, GFP_KERNEL,
+ &mbo->bus_address);
+}
+
+static void hdm_dma_free(struct mbo *mbo, u32 size)
+{
+ struct most_dev *mdev = to_mdev(mbo->ifp);
+
+ usb_free_coherent(mdev->usb_device, size, mbo->virt_address,
+ mbo->bus_address);
+}
+
/**
* hdm_configure_channel - receive channel configuration from core
* @iface: interface
@@ -1027,11 +1053,14 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
mdev->iface.mod = hdm_usb_fops.owner;
+ mdev->iface.driver_dev = &interface->dev;
mdev->iface.interface = ITYPE_USB;
mdev->iface.configure = hdm_configure_channel;
mdev->iface.request_netinfo = hdm_request_netinfo;
mdev->iface.enqueue = hdm_enqueue;
mdev->iface.poison_channel = hdm_poison_channel;
+ mdev->iface.dma_alloc = hdm_dma_alloc;
+ mdev->iface.dma_free = hdm_dma_free;
mdev->iface.description = mdev->description;
mdev->iface.num_channels = num_endpoints;
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 9d7e747519d9..cf342eb58e10 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -73,8 +73,6 @@ static int comp_vdev_open(struct file *filp)
struct most_video_dev *mdev = video_drvdata(filp);
struct comp_fh *fh;
- v4l2_info(&mdev->v4l2_dev, "comp_vdev_open()\n");
-
switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
break;
@@ -122,8 +120,6 @@ static int comp_vdev_close(struct file *filp)
struct most_video_dev *mdev = fh->mdev;
struct mbo *mbo, *tmp;
- v4l2_info(&mdev->v4l2_dev, "comp_vdev_close()\n");
-
/*
* We need to put MBOs back before we call most_stop_channel()
* to deallocate MBOs.
@@ -250,8 +246,6 @@ static int vidioc_querycap(struct file *file, void *priv,
struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
- v4l2_info(&mdev->v4l2_dev, "vidioc_querycap()\n");
-
strlcpy(cap->driver, "v4l2_component", sizeof(cap->driver));
strlcpy(cap->card, "MOST", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
@@ -267,11 +261,6 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct comp_fh *fh = priv;
- struct most_video_dev *mdev = fh->mdev;
-
- v4l2_info(&mdev->v4l2_dev, "vidioc_enum_fmt_vid_cap() %d\n", f->index);
-
if (f->index)
return -EINVAL;
@@ -286,11 +275,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct comp_fh *fh = priv;
- struct most_video_dev *mdev = fh->mdev;
-
- v4l2_info(&mdev->v4l2_dev, "vidioc_g_fmt_vid_cap()\n");
-
comp_set_format_struct(f);
return 0;
}
@@ -315,11 +299,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
- struct comp_fh *fh = priv;
- struct most_video_dev *mdev = fh->mdev;
-
- v4l2_info(&mdev->v4l2_dev, "vidioc_g_std()\n");
-
*norm = V4L2_STD_UNKNOWN;
return 0;
}
@@ -355,8 +334,6 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
- v4l2_info(&mdev->v4l2_dev, "vidioc_s_input(%d)\n", index);
-
if (index >= V4L2_CMP_MAX_INPUT)
return -EINVAL;
mdev->ctrl_input = index;
@@ -435,8 +412,6 @@ static int comp_register_videodev(struct most_video_dev *mdev)
{
int ret;
- v4l2_info(&mdev->v4l2_dev, "comp_register_videodev()\n");
-
init_waitqueue_head(&mdev->wait_data);
/* allocate and fill v4l2 video struct */
@@ -465,8 +440,6 @@ static int comp_register_videodev(struct most_video_dev *mdev)
static void comp_unregister_videodev(struct most_video_dev *mdev)
{
- v4l2_info(&mdev->v4l2_dev, "comp_unregister_videodev()\n");
-
video_unregister_device(mdev->vdev);
}
@@ -485,8 +458,6 @@ static int comp_probe_channel(struct most_interface *iface, int channel_idx,
int ret;
struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
- pr_info("comp_probe_channel(%s)\n", name);
-
if (mdev) {
pr_err("channel already linked\n");
return -EEXIST;
@@ -531,7 +502,6 @@ static int comp_probe_channel(struct most_interface *iface, int channel_idx,
spin_lock_irq(&list_lock);
list_add(&mdev->list, &video_devices);
spin_unlock_irq(&list_lock);
- v4l2_info(&mdev->v4l2_dev, "comp_probe_channel() done\n");
return 0;
err_unreg:
@@ -550,8 +520,6 @@ static int comp_disconnect_channel(struct most_interface *iface,
return -ENOENT;
}
- v4l2_info(&mdev->v4l2_dev, "comp_disconnect_channel()\n");
-
spin_lock_irq(&list_lock);
list_del(&mdev->list);
spin_unlock_irq(&list_lock);
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index 515c7cbdd15e..6b13d85d9d34 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -75,7 +75,6 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <10000000>;
- m25p,chunked-io = <32>;
partition@0 {
label = "u-boot";
@@ -97,7 +96,7 @@
partition@50000 {
label = "firmware";
- reg = <0x50000 0xFB0000>;
+ reg = <0x50000 0x1FB0000>;
};
};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index ebcaa8b1fc81..eb3966b7f033 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -60,30 +60,35 @@
reg = <0x100 0x100>;
};
- gpio@600 {
+ gpio: gpio@600 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "mtk,mt7621-gpio";
+ compatible = "mediatek,mt7621-gpio";
reg = <0x600 0x100>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
gpio0: bank@0 {
reg = <0>;
- compatible = "mtk,mt7621-gpio-bank";
+ compatible = "mediatek,mt7621-gpio-bank";
gpio-controller;
#gpio-cells = <2>;
};
gpio1: bank@1 {
reg = <1>;
- compatible = "mtk,mt7621-gpio-bank";
+ compatible = "mediatek,mt7621-gpio-bank";
gpio-controller;
#gpio-cells = <2>;
};
gpio2: bank@2 {
reg = <2>;
- compatible = "mtk,mt7621-gpio-bank";
+ compatible = "mediatek,mt7621-gpio-bank";
gpio-controller;
#gpio-cells = <2>;
};
@@ -429,10 +434,11 @@
0x01000000 0 0x00000000 0x1e160000 0 0x00010000 /* io space */
>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH
- GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH
- GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xF0000 0 0 1>;
+ interrupt-map = <0x10000 0 0 1 &gic GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH>,
+ <0x20000 0 0 1 &gic GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>,
+ <0x30000 0 0 1 &gic GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
index 38ba0c040aba..40a7d47be913 100644
--- a/drivers/staging/mt7621-eth/ethtool.c
+++ b/drivers/staging/mt7621-eth/ethtool.c
@@ -13,13 +13,35 @@
*/
#include "mtk_eth_soc.h"
+#include "ethtool.h"
-static const char mtk_gdma_str[][ETH_GSTRING_LEN] = {
-#define _FE(x...) # x,
-MTK_STAT_REG_DECLARE
-#undef _FE
+struct mtk_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int idx;
};
+#define MTK_HW_STAT(stat) { \
+ .name = #stat, \
+ .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \
+}
+
+static const struct mtk_stat mtk_ethtool_hw_stats[] = {
+ MTK_HW_STAT(tx_bytes),
+ MTK_HW_STAT(tx_packets),
+ MTK_HW_STAT(tx_skip),
+ MTK_HW_STAT(tx_collisions),
+ MTK_HW_STAT(rx_bytes),
+ MTK_HW_STAT(rx_packets),
+ MTK_HW_STAT(rx_overflow),
+ MTK_HW_STAT(rx_fcs_errors),
+ MTK_HW_STAT(rx_short_errors),
+ MTK_HW_STAT(rx_long_errors),
+ MTK_HW_STAT(rx_checksum_errors),
+ MTK_HW_STAT(rx_flow_control_packets),
+};
+
+#define MTK_HW_STATS_LEN ARRAY_SIZE(mtk_ethtool_hw_stats)
+
static int mtk_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -52,7 +74,8 @@ static int mtk_set_link_ksettings(struct net_device *dev,
mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address];
mac->phy_flags = MTK_PHY_FLAG_PORT;
} else if (mac->hw->mii_bus) {
- mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus, cmd->base.phy_address);
+ mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
+ cmd->base.phy_address);
if (!mac->phy_dev)
return -ENODEV;
mac->phy_flags = MTK_PHY_FLAG_ATTACH;
@@ -62,7 +85,6 @@ static int mtk_set_link_ksettings(struct net_device *dev,
}
return phy_ethtool_ksettings_set(mac->phy_dev, cmd);
-
}
static void mtk_get_drvinfo(struct net_device *dev,
@@ -75,7 +97,7 @@ static void mtk_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE])
- info->n_stats = ARRAY_SIZE(mtk_gdma_str);
+ info->n_stats = MTK_HW_STATS_LEN;
}
static u32 mtk_get_msglevel(struct net_device *dev)
@@ -154,9 +176,15 @@ static void mtk_get_ringparam(struct net_device *dev,
static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
+ int i;
+
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *mtk_gdma_str, sizeof(mtk_gdma_str));
+ for (i = 0; i < MTK_HW_STATS_LEN; i++) {
+ memcpy(data, mtk_ethtool_hw_stats[i].name,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
break;
}
}
@@ -165,7 +193,7 @@ static int mtk_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(mtk_gdma_str);
+ return MTK_HW_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -176,7 +204,6 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_hw_stats *hwstats = mac->hw_stats;
- u64 *data_src, *data_dst;
unsigned int start;
int i;
@@ -188,12 +215,9 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
}
do {
- data_src = &hwstats->tx_bytes;
- data_dst = data;
start = u64_stats_fetch_begin_irq(&hwstats->syncp);
-
- for (i = 0; i < ARRAY_SIZE(mtk_gdma_str); i++)
- *data_dst++ = *data_src++;
+ for (i = 0; i < MTK_HW_STATS_LEN; i++)
+ data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx];
} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h
index 1766939e2101..70f7e5481952 100644
--- a/drivers/staging/mt7621-eth/gsw_mt7620.h
+++ b/drivers/staging/mt7621-eth/gsw_mt7620.h
@@ -252,8 +252,8 @@ struct mt7620_gsw {
};
/* switch register I/O wrappers */
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned reg);
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned reg);
+void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg);
+u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg);
/* the callback used by the driver core to bringup the switch */
int mtk_gsw_init(struct mtk_eth *eth);
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c
index ce8d7d7577c7..2c07b559bed7 100644
--- a/drivers/staging/mt7621-eth/gsw_mt7621.c
+++ b/drivers/staging/mt7621-eth/gsw_mt7621.c
@@ -24,13 +24,13 @@
#include "mtk_eth_soc.h"
#include "gsw_mt7620.h"
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned reg)
+void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg)
{
iowrite32(val, gsw->base + reg);
}
EXPORT_SYMBOL_GPL(mtk_switch_w32);
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned reg)
+u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg)
{
return ioread32(gsw->base + reg);
}
@@ -201,20 +201,20 @@ static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw,
/* This is copied from mt7530_apply_config in libreCMC driver */
{
int i;
+
for (i = 0; i < MT7530_NUM_PORTS; i++)
mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000);
- mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT), 0x00ff0000);
+ mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT),
+ 0x00ff0000);
for (i = 0; i < MT7530_NUM_PORTS; i++)
mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0);
-
}
/* enable irq */
mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL);
mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f);
-
}
static const struct of_device_id mediatek_gsw_match[] = {
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
index 9d713078ef90..7ad0c4141205 100644
--- a/drivers/staging/mt7621-eth/mdio.c
+++ b/drivers/staging/mt7621-eth/mdio.c
@@ -57,6 +57,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
}
}
}
+ spin_unlock_irqrestore(&eth->phy->lock, flags);
}
int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
@@ -130,6 +131,7 @@ static int mtk_phy_connect(struct mtk_mac *mac)
}
} else if (eth->mii_bus) {
struct phy_device *phy;
+
phy = mdiobus_get_phy(eth->mii_bus, i);
if (phy) {
phy_init(eth, mac, phy);
@@ -160,7 +162,9 @@ static void mtk_phy_disconnect(struct mtk_mac *mac)
} else if (eth->phy->phy[i]) {
phy_disconnect(eth->phy->phy[i]);
} else if (eth->mii_bus) {
- struct phy_device *phy = mdiobus_get_phy(eth->mii_bus, i);
+ struct phy_device *phy =
+ mdiobus_get_phy(eth->mii_bus, i);
+
if (phy)
phy_detach(phy);
}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
index cbc7339843a5..2c7a2e666bfb 100644
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ b/drivers/staging/mt7621-eth/mtk_eth_soc.c
@@ -82,12 +82,12 @@ static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
static const u16 *mtk_reg_table = mtk_reg_table_default;
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
{
__raw_writel(val, eth->base + reg);
}
-u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
+u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
{
return __raw_readl(eth->base + reg);
}
@@ -315,7 +315,7 @@ static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
ring->rx_ring_size = eth->soc->dma_ring_size;
ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!ring->rx_data)
goto no_rx_mem;
@@ -325,10 +325,10 @@ static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
goto no_rx_mem;
}
- ring->rx_dma = dma_alloc_coherent(eth->dev,
- ring->rx_ring_size * sizeof(*ring->rx_dma),
- &ring->rx_phys,
- GFP_ATOMIC | __GFP_ZERO);
+ ring->rx_dma =
+ dma_alloc_coherent(eth->dev,
+ ring->rx_ring_size * sizeof(*ring->rx_dma),
+ &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
if (!ring->rx_dma)
goto no_rx_mem;
@@ -768,9 +768,8 @@ err_dma:
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
- unsigned int phy_ring_head, phy_ring_tail;
+ dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
int cnt = eth->soc->dma_ring_size;
- dma_addr_t dma_addr;
int i;
eth->scratch_ring = dma_alloc_coherent(eth->dev,
@@ -1195,7 +1194,6 @@ static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
int total = 0, done[MTK_MAX_DEVS];
unsigned int bytes[MTK_MAX_DEVS];
u32 cpu, dma;
- static int condition;
int i;
memset(done, 0, sizeof(done));
@@ -1220,10 +1218,8 @@ static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
tx_buf = mtk_desc_to_tx_buf(ring, desc);
skb = tx_buf->skb;
- if (!skb) {
- condition = 1;
+ if (!skb)
break;
- }
if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
bytes[mac] += skb->len;
@@ -1352,14 +1348,14 @@ static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
MAX_SKB_FRAGS);
ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!ring->tx_buf)
goto no_tx_mem;
- ring->tx_dma = dma_alloc_coherent(eth->dev,
- ring->tx_ring_size * sizeof(*ring->tx_dma),
- &ring->tx_phys,
- GFP_ATOMIC | __GFP_ZERO);
+ ring->tx_dma =
+ dma_alloc_coherent(eth->dev,
+ ring->tx_ring_size * sizeof(*ring->tx_dma),
+ &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
if (!ring->tx_dma)
goto no_tx_mem;
@@ -1540,8 +1536,8 @@ static void mtk_tx_timeout(struct net_device *dev)
if (eth->soc->dma_type & MTK_PDMA) {
netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
- netif_info(eth, drv, dev, "tx_ring=%d, "
- "base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
+ netif_info(eth, drv, dev,
+ "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
@@ -1552,8 +1548,8 @@ static void mtk_tx_timeout(struct net_device *dev)
if (eth->soc->dma_type & MTK_QDMA) {
netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
mtk_r32(eth, MTK_QDMA_GLO_CFG));
- netif_info(eth, drv, dev, "tx_ring=%d, "
- "ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
+ netif_info(eth, drv, dev,
+ "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
0, mtk_r32(eth, MTK_QTX_CTX_PTR),
mtk_r32(eth, MTK_QTX_DTX_PTR),
mtk_r32(eth, MTK_QTX_CRX_PTR),
@@ -2014,8 +2010,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
mac->hw_stats = devm_kzalloc(eth->dev,
- sizeof(*mac->hw_stats),
- GFP_KERNEL);
+ sizeof(*mac->hw_stats),
+ GFP_KERNEL);
if (!mac->hw_stats)
return -ENOMEM;
spin_lock_init(&mac->hw_stats->stats_lock);
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h
index 443f88d8af65..e6ed80433f49 100644
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.h
+++ b/drivers/staging/mt7621-eth/mtk_eth_soc.h
@@ -501,21 +501,7 @@ struct mtk_soc_data {
u32 has_switch:1;
};
-/* ugly macro hack to make sure hw_stats and ethtool strings are consistent */
#define MTK_STAT_OFFSET 0x40
-#define MTK_STAT_REG_DECLARE \
- _FE(tx_bytes) \
- _FE(tx_packets) \
- _FE(tx_skip) \
- _FE(tx_collisions) \
- _FE(rx_bytes) \
- _FE(rx_packets) \
- _FE(rx_overflow) \
- _FE(rx_fcs_errors) \
- _FE(rx_short_errors) \
- _FE(rx_long_errors) \
- _FE(rx_checksum_errors) \
- _FE(rx_flow_control_packets)
/* struct mtk_hw_stats - the structure that holds the traffic statistics.
* @stats_lock: make sure that stats operations are atomic
@@ -531,9 +517,18 @@ struct mtk_hw_stats {
u32 reg_offset;
struct u64_stats_sync syncp;
-#define _FE(x) u64 x;
- MTK_STAT_REG_DECLARE
-#undef _FE
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_skip;
+ u64 tx_collisions;
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_overflow;
+ u64 rx_fcs_errors;
+ u64 rx_short_errors;
+ u64 rx_long_errors;
+ u64 rx_checksum_errors;
+ u64 rx_flow_control_packets;
};
/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
@@ -706,8 +701,8 @@ void mtk_stats_update_mac(struct mtk_mac *mac);
void mtk_reset(struct mtk_eth *eth, u32 reset_bits);
/* register i/o wrappers */
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
-u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg);
+u32 mtk_r32(struct mtk_eth *eth, unsigned int reg);
/* default clock calibration handler */
int mtk_set_clock_cycle(struct mtk_eth *eth);
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c
index 743c0eed89b6..5d63b5d96f6b 100644
--- a/drivers/staging/mt7621-eth/soc_mt7621.c
+++ b/drivers/staging/mt7621-eth/soc_mt7621.c
@@ -50,7 +50,6 @@
#define GSW_REG_GDMA2_MAC_ADRL 0x1508
#define GSW_REG_GDMA2_MAC_ADRH 0x150C
-
#define MT7621_MTK_RST_GL 0x04
#define MT7620_MTK_INT_STATUS2 0x08
@@ -108,13 +107,15 @@ static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr)
spin_lock_irqsave(&mac->hw->page_lock, flags);
if (mac->id == 0) {
- mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1], GSW_REG_GDMA1_MAC_ADRH);
+ mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
+ GSW_REG_GDMA1_MAC_ADRH);
mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
(hwaddr[4] << 8) | hwaddr[5],
GSW_REG_GDMA1_MAC_ADRL);
}
if (mac->id == 1) {
- mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1], GSW_REG_GDMA2_MAC_ADRH);
+ mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
+ GSW_REG_GDMA2_MAC_ADRH);
mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
(hwaddr[4] << 8) | hwaddr[5],
GSW_REG_GDMA2_MAC_ADRL);
diff --git a/drivers/staging/mt7621-gpio/TODO b/drivers/staging/mt7621-gpio/TODO
index 71439054e2e4..674930a10716 100644
--- a/drivers/staging/mt7621-gpio/TODO
+++ b/drivers/staging/mt7621-gpio/TODO
@@ -1,5 +1,3 @@
-
- general code review and clean up
-- ensure device-tree requirements are documented
Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-gpio/gpio-mt7621.c b/drivers/staging/mt7621-gpio/gpio-mt7621.c
index 51235687ddb6..0c4fb4a1b4a9 100644
--- a/drivers/staging/mt7621-gpio/gpio-mt7621.c
+++ b/drivers/staging/mt7621-gpio/gpio-mt7621.c
@@ -1,24 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2013 John Crispin <blogic@openwrt.org>
*/
-#include <linux/io.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of_irq.h>
-#include <linux/spinlock.h>
-#include <linux/irqdomain.h>
-#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
-#define MTK_MAX_BANK 3
+#define MTK_BANK_CNT 3
#define MTK_BANK_WIDTH 32
+#define PIN_MASK(nr) (1UL << ((nr % MTK_BANK_WIDTH)))
enum mediatek_gpio_reg {
GPIO_REG_CTRL = 0,
@@ -34,42 +33,47 @@ enum mediatek_gpio_reg {
GPIO_REG_EDGE,
};
-static void __iomem *mediatek_gpio_membase;
-static int mediatek_gpio_irq;
-static struct irq_domain *mediatek_gpio_irq_domain;
-
-static struct mtk_gc {
+struct mtk_gc {
struct gpio_chip chip;
spinlock_t lock;
int bank;
u32 rising;
u32 falling;
-} *gc_map[MTK_MAX_BANK];
-
-static inline struct mtk_gc
-*to_mediatek_gpio(struct gpio_chip *chip)
-{
- struct mtk_gc *mgc;
+};
- mgc = container_of(chip, struct mtk_gc, chip);
+struct mtk_data {
+ void __iomem *gpio_membase;
+ int gpio_irq;
+ struct irq_domain *gpio_irq_domain;
+ struct mtk_gc gc_map[MTK_BANK_CNT];
+};
- return mgc;
+static inline struct mtk_gc *
+to_mediatek_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct mtk_gc, chip);
}
static inline void
mtk_gpio_w32(struct mtk_gc *rg, u8 reg, u32 val)
{
- iowrite32(val, mediatek_gpio_membase + (reg * 0x10) + (rg->bank * 0x4));
+ struct mtk_data *gpio_data = gpiochip_get_data(&rg->chip);
+ u32 offset = (reg * 0x10) + (rg->bank * 0x4);
+
+ iowrite32(val, gpio_data->gpio_membase + offset);
}
static inline u32
mtk_gpio_r32(struct mtk_gc *rg, u8 reg)
{
- return ioread32(mediatek_gpio_membase + (reg * 0x10) + (rg->bank * 0x4));
+ struct mtk_data *gpio_data = gpiochip_get_data(&rg->chip);
+ u32 offset = (reg * 0x10) + (rg->bank * 0x4);
+
+ return ioread32(gpio_data->gpio_membase + offset);
}
static void
-mediatek_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+mediatek_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct mtk_gc *rg = to_mediatek_gpio(chip);
@@ -77,7 +81,7 @@ mediatek_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
}
static int
-mediatek_gpio_get(struct gpio_chip *chip, unsigned offset)
+mediatek_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct mtk_gc *rg = to_mediatek_gpio(chip);
@@ -85,7 +89,7 @@ mediatek_gpio_get(struct gpio_chip *chip, unsigned offset)
}
static int
-mediatek_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+mediatek_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct mtk_gc *rg = to_mediatek_gpio(chip);
unsigned long flags;
@@ -102,7 +106,7 @@ mediatek_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int
mediatek_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
+ unsigned int offset, int value)
{
struct mtk_gc *rg = to_mediatek_gpio(chip);
unsigned long flags;
@@ -119,43 +123,37 @@ mediatek_gpio_direction_output(struct gpio_chip *chip,
}
static int
-mediatek_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+mediatek_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct mtk_gc *rg = to_mediatek_gpio(chip);
- unsigned long flags;
- u32 t;
+ u32 t = mtk_gpio_r32(rg, GPIO_REG_CTRL);
- spin_lock_irqsave(&rg->lock, flags);
- t = mtk_gpio_r32(rg, GPIO_REG_CTRL);
- spin_unlock_irqrestore(&rg->lock, flags);
-
- if (t & BIT(offset))
- return 0;
-
- return 1;
+ return (t & BIT(offset)) ? GPIOF_DIR_OUT : GPIOF_DIR_IN;
}
static int
-mediatek_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
+mediatek_gpio_to_irq(struct gpio_chip *chip, unsigned int pin)
{
+ struct mtk_data *gpio_data = gpiochip_get_data(chip);
struct mtk_gc *rg = to_mediatek_gpio(chip);
- return irq_create_mapping(mediatek_gpio_irq_domain, pin + (rg->bank * MTK_BANK_WIDTH));
+ return irq_create_mapping(gpio_data->gpio_irq_domain,
+ pin + (rg->bank * MTK_BANK_WIDTH));
}
static int
mediatek_gpio_bank_probe(struct platform_device *pdev, struct device_node *bank)
{
+ struct mtk_data *gpio_data = dev_get_drvdata(&pdev->dev);
const __be32 *id = of_get_property(bank, "reg", NULL);
- struct mtk_gc *rg = devm_kzalloc(&pdev->dev,
- sizeof(struct mtk_gc), GFP_KERNEL);
+ struct mtk_gc *rg;
+ int ret;
- if (!rg || !id || be32_to_cpu(*id) > MTK_MAX_BANK)
- return -ENOMEM;
+ if (!id || be32_to_cpu(*id) >= MTK_BANK_CNT)
+ return -EINVAL;
- gc_map[be32_to_cpu(*id)] = rg;
-
- memset(rg, 0, sizeof(struct mtk_gc));
+ rg = &gpio_data->gc_map[be32_to_cpu(*id)];
+ memset(rg, 0, sizeof(*rg));
spin_lock_init(&rg->lock);
@@ -169,25 +167,33 @@ mediatek_gpio_bank_probe(struct platform_device *pdev, struct device_node *bank)
rg->chip.get_direction = mediatek_gpio_get_direction;
rg->chip.get = mediatek_gpio_get;
rg->chip.set = mediatek_gpio_set;
- if (mediatek_gpio_irq_domain)
+ if (gpio_data->gpio_irq_domain)
rg->chip.to_irq = mediatek_gpio_to_irq;
rg->bank = be32_to_cpu(*id);
+ ret = devm_gpiochip_add_data(&pdev->dev, &rg->chip, gpio_data);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpio %d, ret=%d\n",
+ rg->chip.ngpio, ret);
+ return ret;
+ }
+
/* set polarity to low for all gpios */
mtk_gpio_w32(rg, GPIO_REG_POL, 0);
dev_info(&pdev->dev, "registering %d gpios\n", rg->chip.ngpio);
- return gpiochip_add(&rg->chip);
+ return 0;
}
static void
mediatek_gpio_irq_handler(struct irq_desc *desc)
{
+ struct mtk_data *gpio_data = irq_desc_get_handler_data(desc);
int i;
- for (i = 0; i < MTK_MAX_BANK; i++) {
- struct mtk_gc *rg = gc_map[i];
+ for (i = 0; i < MTK_BANK_CNT; i++) {
+ struct mtk_gc *rg = &gpio_data->gc_map[i];
unsigned long pending;
int bit;
@@ -197,7 +203,8 @@ mediatek_gpio_irq_handler(struct irq_desc *desc)
pending = mtk_gpio_r32(rg, GPIO_REG_STAT);
for_each_set_bit(bit, &pending, MTK_BANK_WIDTH) {
- u32 map = irq_find_mapping(mediatek_gpio_irq_domain, (MTK_BANK_WIDTH * i) + bit);
+ u32 map = irq_find_mapping(gpio_data->gpio_irq_domain,
+ (MTK_BANK_WIDTH * i) + bit);
generic_handle_irq(map);
mtk_gpio_w32(rg, GPIO_REG_STAT, BIT(bit));
@@ -208,52 +215,53 @@ mediatek_gpio_irq_handler(struct irq_desc *desc)
static void
mediatek_gpio_irq_unmask(struct irq_data *d)
{
+ struct mtk_data *gpio_data = irq_data_get_irq_chip_data(d);
int pin = d->hwirq;
- int bank = pin / 32;
- struct mtk_gc *rg = gc_map[bank];
+ int bank = pin / MTK_BANK_WIDTH;
+ struct mtk_gc *rg = &gpio_data->gc_map[bank];
unsigned long flags;
u32 rise, fall;
if (!rg)
return;
+ spin_lock_irqsave(&rg->lock, flags);
rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
-
- spin_lock_irqsave(&rg->lock, flags);
- mtk_gpio_w32(rg, GPIO_REG_REDGE, rise | (BIT(d->hwirq) & rg->rising));
- mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (BIT(d->hwirq) & rg->falling));
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise | (PIN_MASK(pin) & rg->rising));
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (PIN_MASK(pin) & rg->falling));
spin_unlock_irqrestore(&rg->lock, flags);
}
static void
mediatek_gpio_irq_mask(struct irq_data *d)
{
+ struct mtk_data *gpio_data = irq_data_get_irq_chip_data(d);
int pin = d->hwirq;
- int bank = pin / 32;
- struct mtk_gc *rg = gc_map[bank];
+ int bank = pin / MTK_BANK_WIDTH;
+ struct mtk_gc *rg = &gpio_data->gc_map[bank];
unsigned long flags;
u32 rise, fall;
if (!rg)
return;
+ spin_lock_irqsave(&rg->lock, flags);
rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
-
- spin_lock_irqsave(&rg->lock, flags);
- mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(d->hwirq));
- mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(d->hwirq));
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~PIN_MASK(pin));
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~PIN_MASK(pin));
spin_unlock_irqrestore(&rg->lock, flags);
}
static int
mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
{
+ struct mtk_data *gpio_data = irq_data_get_irq_chip_data(d);
int pin = d->hwirq;
- int bank = pin / 32;
- struct mtk_gc *rg = gc_map[bank];
- u32 mask = BIT(d->hwirq);
+ int bank = pin / MTK_BANK_WIDTH;
+ struct mtk_gc *rg = &gpio_data->gc_map[bank];
+ u32 mask = PIN_MASK(pin);
if (!rg)
return -1;
@@ -287,16 +295,23 @@ static struct irq_chip mediatek_gpio_irq_chip = {
};
static int
-mediatek_gpio_gpio_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+mediatek_gpio_gpio_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
{
- irq_set_chip_and_handler(irq, &mediatek_gpio_irq_chip, handle_level_irq);
+ int ret;
+
+ ret = irq_set_chip_data(irq, d->host_data);
+ if (ret < 0)
+ return ret;
+ irq_set_chip_and_handler(irq, &mediatek_gpio_irq_chip,
+ handle_level_irq);
irq_set_handler_data(irq, d);
return 0;
}
static const struct irq_domain_ops irq_domain_ops = {
- .xlate = irq_domain_xlate_onecell,
+ .xlate = irq_domain_xlate_twocell,
.map = mediatek_gpio_gpio_map,
};
@@ -305,32 +320,41 @@ mediatek_gpio_probe(struct platform_device *pdev)
{
struct device_node *bank, *np = pdev->dev.of_node;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct mtk_data *gpio_data;
+
+ gpio_data = devm_kzalloc(&pdev->dev, sizeof(*gpio_data), GFP_KERNEL);
+ if (!gpio_data)
+ return -ENOMEM;
- mediatek_gpio_membase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mediatek_gpio_membase))
- return PTR_ERR(mediatek_gpio_membase);
+ gpio_data->gpio_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio_data->gpio_membase))
+ return PTR_ERR(gpio_data->gpio_membase);
- mediatek_gpio_irq = irq_of_parse_and_map(np, 0);
- if (mediatek_gpio_irq) {
- mediatek_gpio_irq_domain = irq_domain_add_linear(np,
- MTK_MAX_BANK * MTK_BANK_WIDTH,
- &irq_domain_ops, NULL);
- if (!mediatek_gpio_irq_domain)
+ gpio_data->gpio_irq = irq_of_parse_and_map(np, 0);
+ if (gpio_data->gpio_irq) {
+ gpio_data->gpio_irq_domain = irq_domain_add_linear(np,
+ MTK_BANK_CNT * MTK_BANK_WIDTH,
+ &irq_domain_ops, gpio_data);
+ if (!gpio_data->gpio_irq_domain)
dev_err(&pdev->dev, "irq_domain_add_linear failed\n");
}
+ platform_set_drvdata(pdev, gpio_data);
+
for_each_child_of_node(np, bank)
- if (of_device_is_compatible(bank, "mtk,mt7621-gpio-bank"))
+ if (of_device_is_compatible(bank, "mediatek,mt7621-gpio-bank"))
mediatek_gpio_bank_probe(pdev, bank);
- if (mediatek_gpio_irq_domain)
- irq_set_chained_handler(mediatek_gpio_irq, mediatek_gpio_irq_handler);
+ if (gpio_data->gpio_irq_domain)
+ irq_set_chained_handler_and_data(gpio_data->gpio_irq,
+ mediatek_gpio_irq_handler,
+ gpio_data);
return 0;
}
static const struct of_device_id mediatek_gpio_match[] = {
- { .compatible = "mtk,mt7621-gpio" },
+ { .compatible = "mediatek,mt7621-gpio" },
{},
};
MODULE_DEVICE_TABLE(of, mediatek_gpio_match);
@@ -343,10 +367,4 @@ static struct platform_driver mediatek_gpio_driver = {
},
};
-static int __init
-mediatek_gpio_init(void)
-{
- return platform_driver_register(&mediatek_gpio_driver);
-}
-
-subsys_initcall(mediatek_gpio_init);
+module_platform_driver(mediatek_gpio_driver);
diff --git a/drivers/staging/mt7621-gpio/mediatek,mt7621-gpio.txt b/drivers/staging/mt7621-gpio/mediatek,mt7621-gpio.txt
new file mode 100644
index 000000000000..30d8a0225aa1
--- /dev/null
+++ b/drivers/staging/mt7621-gpio/mediatek,mt7621-gpio.txt
@@ -0,0 +1,68 @@
+Mediatek SoC GPIO controller bindings
+
+The IP core used inside these SoCs has 3 banks of 32 GPIOs each.
+The registers of all the banks are interwoven inside one single IO range.
+We load one GPIO controller instance per bank. To make this possible
+we support 2 types of nodes. The parent node defines the memory I/O range and
+has 3 children each describing a single bank. Also the GPIO controller can receive
+interrupts on any of the GPIOs, either edge or level. It then interrupts the CPU
+using GIC INT12.
+
+Required properties for the top level node:
+- compatible:
+ - "mediatek,mt7621-gpio" for Mediatek controllers
+- reg : Physical base address and length of the controller's registers
+- interrupt-parent : phandle of the parent interrupt controller.
+- interrupts : Interrupt specifier for the controllers interrupt.
+- interrupt-controller : Mark the device node as an interrupt controller.
+- #interrupt-cells : Should be 2. The first cell defines the interrupt number.
+ The second cell bits[3:0] is used to specify trigger type as follows:
+ - 1 = low-to-high edge triggered.
+ - 2 = high-to-low edge triggered.
+ - 4 = active high level-sensitive.
+ - 8 = active low level-sensitive.
+
+
+Required properties for the GPIO bank node:
+- compatible:
+ - "mediatek,mt7621-gpio-bank" for Mediatek banks
+- #gpio-cells : Should be two. The first cell is the GPIO pin number and the
+ second cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>.
+ Only the GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
+- gpio-controller : Marks the device node as a GPIO controller.
+- reg : The id of the bank that the node describes.
+
+Example:
+ gpio@600 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "mediatek,mt7621-gpio";
+ reg = <0x600 0x100>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ gpio0: bank@0 {
+ reg = <0>;
+ compatible = "mediatek,mt7621-gpio-bank";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio1: bank@1 {
+ reg = <1>;
+ compatible = "mediatek,mt7621-gpio-bank";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio2: bank@2 {
+ reg = <2>;
+ compatible = "mediatek,mt7621-gpio-bank";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
diff --git a/drivers/staging/mt7621-mmc/board.h b/drivers/staging/mt7621-mmc/board.h
index 33bfc7b9597a..a7d82f321b00 100644
--- a/drivers/staging/mt7621-mmc/board.h
+++ b/drivers/staging/mt7621-mmc/board.h
@@ -36,27 +36,10 @@
#ifndef __ARCH_ARM_MACH_BOARD_H
#define __ARCH_ARM_MACH_BOARD_H
-#include <generated/autoconf.h>
-#include <linux/pm.h>
-/* --- chhung */
-// #include <mach/mt6575.h>
-// #include <board-custom.h>
-/* end of chhung */
-
-typedef void (*sdio_irq_handler_t)(void*); /* external irq handler */
-typedef void (*pm_callback_t)(pm_message_t state, void *data);
-
#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */
#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */
#define MSDC_RST_PIN_EN (1 << 2) /* emmc reset pin is wired */
-#define MSDC_SDIO_IRQ (1 << 3) /* use internal sdio irq (bus) */
-#define MSDC_EXT_SDIO_IRQ (1 << 4) /* use external sdio irq */
#define MSDC_REMOVABLE (1 << 5) /* removable slot */
-#define MSDC_SYS_SUSPEND (1 << 6) /* suspended by system */
-#define MSDC_HIGHSPEED (1 << 7) /* high-speed mode support */
-#define MSDC_UHS1 (1 << 8) /* uhs-1 mode support */
-#define MSDC_DDR (1 << 9) /* ddr mode support */
-
#define MSDC_SMPL_RISING (0)
#define MSDC_SMPL_FALLING (1)
@@ -67,71 +50,14 @@ typedef void (*pm_callback_t)(pm_message_t state, void *data);
#define MSDC_WP_PIN (3)
#define MSDC_RST_PIN (4)
-enum {
- MSDC_CLKSRC_48MHZ = 0,
-// MSDC_CLKSRC_26MHZ = 0,
-// MSDC_CLKSRC_197MHZ = 1,
-// MSDC_CLKSRC_208MHZ = 2
-};
-
struct msdc_hw {
- unsigned char clk_src; /* host clock source */
- unsigned char cmd_edge; /* command latch edge */
- unsigned char data_edge; /* data latch edge */
- unsigned char clk_drv; /* clock pad driving */
- unsigned char cmd_drv; /* command pad driving */
- unsigned char dat_drv; /* data pad driving */
- unsigned long flags; /* hardware capability flags */
- unsigned long data_pins; /* data pins */
- unsigned long data_offset; /* data address offset */
-
- /* config gpio pull mode */
- void (*config_gpio_pin)(int type, int pull);
-
- /* external power control for card */
- void (*ext_power_on)(void);
- void (*ext_power_off)(void);
+ unsigned char clk_src; /* host clock source */
+ unsigned long flags; /* hardware capability flags */
- /* external sdio irq operations */
- void (*request_sdio_eirq)(sdio_irq_handler_t sdio_irq_handler, void *data);
- void (*enable_sdio_eirq)(void);
- void (*disable_sdio_eirq)(void);
-
- /* external cd irq operations */
- void (*request_cd_eirq)(sdio_irq_handler_t cd_irq_handler, void *data);
- void (*enable_cd_eirq)(void);
- void (*disable_cd_eirq)(void);
- int (*get_cd_status)(void);
-
- /* power management callback for external module */
- void (*register_pm)(pm_callback_t pm_cb, void *data);
+ /* config gpio pull mode */
+ void (*config_gpio_pin)(int type, int pull);
};
extern struct msdc_hw msdc0_hw;
-extern struct msdc_hw msdc1_hw;
-extern struct msdc_hw msdc2_hw;
-extern struct msdc_hw msdc3_hw;
-
-/*GPS driver*/
-#define GPS_FLAG_FORCE_OFF 0x0001
-struct mt3326_gps_hardware {
- int (*ext_power_on)(int);
- int (*ext_power_off)(int);
-};
-extern struct mt3326_gps_hardware mt3326_gps_hw;
-
-/* NAND driver */
-struct mt6575_nand_host_hw {
- unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
- unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
- unsigned int nfi_cs_num; /* NFI_CS_NUM */
- unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
- unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
- unsigned int nand_ecc_size;
- unsigned int nand_ecc_bytes;
- unsigned int nand_ecc_mode;
-};
-extern struct mt6575_nand_host_hw mt6575_nand_hw;
#endif /* __ARCH_ARM_MACH_BOARD_H */
-
diff --git a/drivers/staging/mt7621-mmc/dbg.c b/drivers/staging/mt7621-mmc/dbg.c
index 4dc115b53993..6e4e223cddfb 100644
--- a/drivers/staging/mt7621-mmc/dbg.c
+++ b/drivers/staging/mt7621-mmc/dbg.c
@@ -32,7 +32,7 @@
* have been modified by MediaTek Inc. All revisions are subject to any receiver's
* applicable license agreements with MediaTek Inc.
*/
-
+
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -51,272 +51,237 @@
static char cmd_buf[256];
/* for debug zone */
-unsigned int sd_debug_zone[4]={
+unsigned int sd_debug_zone[4] = {
0,
0,
0,
0
};
-/* mode select */
-u32 dma_size[4]={
- 512,
- 512,
- 512,
- 512
-};
-msdc_mode drv_mode[4]={
- MODE_SIZE_DEP, /* using DMA or not depend on the size */
- MODE_SIZE_DEP,
- MODE_SIZE_DEP,
- MODE_SIZE_DEP
-};
-
-#if defined (MT6575_SD_DEBUG)
+#if defined(MT6575_SD_DEBUG)
/* for driver profile */
#define TICKS_ONE_MS (13000)
-u32 gpt_enable = 0;
-u32 sdio_pro_enable = 0; /* make sure gpt is enabled */
-u32 sdio_pro_time = 0; /* no more than 30s */
-struct sdio_profile sdio_perfomance = {0};
+u32 gpt_enable;
+u32 sdio_pro_enable; /* make sure gpt is enabled */
+u32 sdio_pro_time; /* no more than 30s */
+struct sdio_profile sdio_perfomance = {0};
#if 0 /* --- chhung */
void msdc_init_gpt(void)
{
- GPT_CONFIG config;
-
- config.num = GPT6;
- config.mode = GPT_FREE_RUN;
- config.clkSrc = GPT_CLK_SRC_SYS;
- config.clkDiv = GPT_CLK_DIV_1; /* 13MHz GPT6 */
-
- if (GPT_Config(config) == FALSE )
- return;
-
- GPT_Start(GPT6);
+ GPT_CONFIG config;
+
+ config.num = GPT6;
+ config.mode = GPT_FREE_RUN;
+ config.clkSrc = GPT_CLK_SRC_SYS;
+ config.clkDiv = GPT_CLK_DIV_1; /* 13MHz GPT6 */
+
+ if (GPT_Config(config) == FALSE)
+ return;
+
+ GPT_Start(GPT6);
}
#endif /* end of --- */
u32 msdc_time_calc(u32 old_L32, u32 old_H32, u32 new_L32, u32 new_H32)
{
- u32 ret = 0;
-
- if (new_H32 == old_H32) {
- ret = new_L32 - old_L32;
- } else if(new_H32 == (old_H32 + 1)) {
- if (new_L32 > old_L32) {
- printk("msdc old_L<0x%x> new_L<0x%x>\n", old_L32, new_L32);
- }
- ret = (0xffffffff - old_L32);
- ret += new_L32;
- } else {
- printk("msdc old_H<0x%x> new_H<0x%x>\n", old_H32, new_H32);
- }
-
- return ret;
+ u32 ret = 0;
+
+ if (new_H32 == old_H32) {
+ ret = new_L32 - old_L32;
+ } else if (new_H32 == (old_H32 + 1)) {
+ if (new_L32 > old_L32)
+ printk("msdc old_L<0x%x> new_L<0x%x>\n", old_L32, new_L32);
+ ret = (0xffffffff - old_L32);
+ ret += new_L32;
+ } else {
+ printk("msdc old_H<0x%x> new_H<0x%x>\n", old_H32, new_H32);
+ }
+
+ return ret;
}
-void msdc_sdio_profile(struct sdio_profile* result)
+void msdc_sdio_profile(struct sdio_profile *result)
{
- struct cmd_profile* cmd;
- u32 i;
-
- printk("sdio === performance dump ===\n");
- printk("sdio === total execute tick<%d> time<%dms> Tx<%dB> Rx<%dB>\n",
- result->total_tc, result->total_tc / TICKS_ONE_MS,
- result->total_tx_bytes, result->total_rx_bytes);
-
- /* CMD52 Dump */
- cmd = &result->cmd52_rx;
- printk("sdio === CMD52 Rx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count);
- cmd = &result->cmd52_tx;
- printk("sdio === CMD52 Tx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count);
-
- /* CMD53 Rx bytes + block mode */
- for (i=0; i<512; i++) {
- cmd = &result->cmd53_rx_byte[i];
- if (cmd->count) {
- printk("sdio<%6d><%3dB>_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10));
- }
- }
- for (i=0; i<100; i++) {
- cmd = &result->cmd53_rx_blk[i];
- if (cmd->count) {
- printk("sdio<%6d><%3d>B_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10));
- }
- }
-
- /* CMD53 Tx bytes + block mode */
- for (i=0; i<512; i++) {
- cmd = &result->cmd53_tx_byte[i];
- if (cmd->count) {
- printk("sdio<%6d><%3dB>_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10));
- }
- }
- for (i=0; i<100; i++) {
- cmd = &result->cmd53_tx_blk[i];
- if (cmd->count) {
- printk("sdio<%6d><%3d>B_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10));
- }
- }
-
- printk("sdio === performance dump done ===\n");
+ struct cmd_profile *cmd;
+ u32 i;
+
+ printk("sdio === performance dump ===\n");
+ printk("sdio === total execute tick<%d> time<%dms> Tx<%dB> Rx<%dB>\n",
+ result->total_tc, result->total_tc / TICKS_ONE_MS,
+ result->total_tx_bytes, result->total_rx_bytes);
+
+ /* CMD52 Dump */
+ cmd = &result->cmd52_rx;
+ printk("sdio === CMD52 Rx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count);
+ cmd = &result->cmd52_tx;
+ printk("sdio === CMD52 Tx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count);
+
+ /* CMD53 Rx bytes + block mode */
+ for (i = 0; i < 512; i++) {
+ cmd = &result->cmd53_rx_byte[i];
+ if (cmd->count) {
+ printk("sdio<%6d><%3dB>_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
+ cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
+ }
+ }
+ for (i = 0; i < 100; i++) {
+ cmd = &result->cmd53_rx_blk[i];
+ if (cmd->count) {
+ printk("sdio<%6d><%3d>B_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
+ cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
+ }
+ }
+
+ /* CMD53 Tx bytes + block mode */
+ for (i = 0; i < 512; i++) {
+ cmd = &result->cmd53_tx_byte[i];
+ if (cmd->count) {
+ printk("sdio<%6d><%3dB>_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
+ cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
+ }
+ }
+ for (i = 0; i < 100; i++) {
+ cmd = &result->cmd53_tx_blk[i];
+ if (cmd->count) {
+ printk("sdio<%6d><%3d>B_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
+ cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
+ }
+ }
+
+ printk("sdio === performance dump done ===\n");
}
//========= sdio command table ===========
void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks)
{
- struct sdio_profile* result = &sdio_perfomance;
- struct cmd_profile* cmd;
- u32 block;
-
- if (sdio_pro_enable == 0) {
- return;
- }
-
- if (opcode == 52) {
- cmd = bRx ? &result->cmd52_rx : &result->cmd52_tx;
- } else if (opcode == 53) {
- if (sizes < 512) {
- cmd = bRx ? &result->cmd53_rx_byte[sizes] : &result->cmd53_tx_byte[sizes];
- } else {
- block = sizes / 512;
- if (block >= 99) {
- printk("cmd53 error blocks\n");
- while(1);
- }
- cmd = bRx ? &result->cmd53_rx_blk[block] : &result->cmd53_tx_blk[block];
- }
- } else {
- return;
- }
-
- /* update the members */
- if (ticks > cmd->max_tc){
- cmd->max_tc = ticks;
- }
- if (cmd->min_tc == 0 || ticks < cmd->min_tc) {
- cmd->min_tc = ticks;
- }
- cmd->tot_tc += ticks;
- cmd->tot_bytes += sizes;
- cmd->count ++;
-
- if (bRx) {
- result->total_rx_bytes += sizes;
- } else {
- result->total_tx_bytes += sizes;
- }
- result->total_tc += ticks;
-
- /* dump when total_tc > 30s */
- if (result->total_tc >= sdio_pro_time * TICKS_ONE_MS * 1000) {
- msdc_sdio_profile(result);
- memset(result, 0 , sizeof(struct sdio_profile));
- }
+ struct sdio_profile *result = &sdio_perfomance;
+ struct cmd_profile *cmd;
+ u32 block;
+
+ if (sdio_pro_enable == 0)
+ return;
+
+ if (opcode == 52) {
+ cmd = bRx ? &result->cmd52_rx : &result->cmd52_tx;
+ } else if (opcode == 53) {
+ if (sizes < 512) {
+ cmd = bRx ? &result->cmd53_rx_byte[sizes] : &result->cmd53_tx_byte[sizes];
+ } else {
+ block = sizes / 512;
+ if (block >= 99) {
+ printk("cmd53 error blocks\n");
+ while (1)
+ ;
+ }
+ cmd = bRx ? &result->cmd53_rx_blk[block] : &result->cmd53_tx_blk[block];
+ }
+ } else {
+ return;
+ }
+
+ /* update the members */
+ if (ticks > cmd->max_tc)
+ cmd->max_tc = ticks;
+ if (cmd->min_tc == 0 || ticks < cmd->min_tc)
+ cmd->min_tc = ticks;
+ cmd->tot_tc += ticks;
+ cmd->tot_bytes += sizes;
+ cmd->count++;
+
+ if (bRx)
+ result->total_rx_bytes += sizes;
+ else
+ result->total_tx_bytes += sizes;
+ result->total_tc += ticks;
+
+ /* dump when total_tc > 30s */
+ if (result->total_tc >= sdio_pro_time * TICKS_ONE_MS * 1000) {
+ msdc_sdio_profile(result);
+ memset(result, 0, sizeof(struct sdio_profile));
+ }
}
//========== driver proc interface ===========
static int msdc_debug_proc_read(struct seq_file *s, void *p)
{
- seq_printf(s, "\n=========================================\n");
- seq_printf(s, "Index<0> + Id + Zone\n");
- seq_printf(s, "-> PWR<9> WRN<8> | FIO<7> OPS<6> FUN<5> CFG<4> | INT<3> RSP<2> CMD<1> DMA<0>\n");
- seq_printf(s, "-> echo 0 3 0x3ff >msdc_bebug -> host[3] debug zone set to 0x3ff\n");
+ seq_puts(s, "\n=========================================\n");
+ seq_puts(s, "Index<0> + Id + Zone\n");
+ seq_puts(s, "-> PWR<9> WRN<8> | FIO<7> OPS<6> FUN<5> CFG<4> | INT<3> RSP<2> CMD<1> DMA<0>\n");
+ seq_puts(s, "-> echo 0 3 0x3ff >msdc_bebug -> host[3] debug zone set to 0x3ff\n");
seq_printf(s, "-> MSDC[0] Zone: 0x%.8x\n", sd_debug_zone[0]);
seq_printf(s, "-> MSDC[1] Zone: 0x%.8x\n", sd_debug_zone[1]);
seq_printf(s, "-> MSDC[2] Zone: 0x%.8x\n", sd_debug_zone[2]);
seq_printf(s, "-> MSDC[3] Zone: 0x%.8x\n", sd_debug_zone[3]);
- seq_printf(s, "Index<1> + ID:4|Mode:4 + DMA_SIZE\n");
- seq_printf(s, "-> 0)PIO 1)DMA 2)SIZE\n");
- seq_printf(s, "-> echo 1 22 0x200 >msdc_bebug -> host[2] size mode, dma when >= 512\n");
- seq_printf(s, "-> MSDC[0] mode<%d> size<%d>\n", drv_mode[0], dma_size[0]);
- seq_printf(s, "-> MSDC[1] mode<%d> size<%d>\n", drv_mode[1], dma_size[1]);
- seq_printf(s, "-> MSDC[2] mode<%d> size<%d>\n", drv_mode[2], dma_size[2]);
- seq_printf(s, "-> MSDC[3] mode<%d> size<%d>\n", drv_mode[3], dma_size[3]);
-
- seq_printf(s, "Index<3> + SDIO_PROFILE + TIME\n");
- seq_printf(s, "-> echo 3 1 0x1E >msdc_bebug -> enable sdio_profile, 30s\n");
+ seq_puts(s, "Index<3> + SDIO_PROFILE + TIME\n");
+ seq_puts(s, "-> echo 3 1 0x1E >msdc_bebug -> enable sdio_profile, 30s\n");
seq_printf(s, "-> SDIO_PROFILE<%d> TIME<%ds>\n", sdio_pro_enable, sdio_pro_time);
- seq_printf(s, "=========================================\n\n");
+ seq_puts(s, "=========================================\n\n");
return 0;
}
-static ssize_t msdc_debug_proc_write(struct file *file,
- const char __user *buf, size_t count, loff_t *data)
+static ssize_t msdc_debug_proc_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *data)
{
int ret;
-
- int cmd, p1, p2;
+
+ int cmd, p1, p2;
int id, zone;
- int mode, size;
-
- if (count == 0)return -1;
- if(count > 255)count = 255;
-
- ret = copy_from_user(cmd_buf, buf, count);
- if (ret < 0)return -1;
-
+ int mode, size;
+
+ if (count == 0)
+ return -1;
+ if (count > 255)
+ count = 255;
+
+ if (copy_from_user(cmd_buf, buf, count))
+ return -EFAULT;
+
cmd_buf[count] = '\0';
printk("msdc Write %s\n", cmd_buf);
sscanf(cmd_buf, "%x %x %x", &cmd, &p1, &p2);
-
- if(cmd == SD_TOOL_ZONE) {
- id = p1; zone = p2; zone &= 0x3ff;
+
+ if (cmd == SD_TOOL_ZONE) {
+ id = p1;
+ zone = p2;
+ zone &= 0x3ff;
printk("msdc host_id<%d> zone<0x%.8x>\n", id, zone);
- if(id >=0 && id<=3){
+ if (id >= 0 && id <= 3) {
sd_debug_zone[id] = zone;
- }
- else if(id == 4){
+ } else if (id == 4) {
sd_debug_zone[0] = sd_debug_zone[1] = zone;
sd_debug_zone[2] = sd_debug_zone[3] = zone;
- }
- else{
+ } else {
printk("msdc host_id error when set debug zone\n");
}
- } else if (cmd == SD_TOOL_DMA_SIZE) {
- id = p1>>4; mode = (p1&0xf); size = p2;
- if(id >=0 && id<=3){
- drv_mode[id] = mode;
- dma_size[id] = p2;
- }
- else if(id == 4){
- drv_mode[0] = drv_mode[1] = mode;
- drv_mode[2] = drv_mode[3] = mode;
- dma_size[0] = dma_size[1] = p2;
- dma_size[2] = dma_size[3] = p2;
- }
- else{
- printk("msdc host_id error when select mode\n");
- }
} else if (cmd == SD_TOOL_SDIO_PROFILE) {
if (p1 == 1) { /* enable profile */
if (gpt_enable == 0) {
// msdc_init_gpt(); /* --- by chhung */
gpt_enable = 1;
- }
+ }
sdio_pro_enable = 1;
- if (p2 == 0) p2 = 1; if (p2 >= 30) p2 = 30;
- sdio_pro_time = p2 ;
- } else if (p1 == 0) {
+ if (p2 == 0)
+ p2 = 1;
+ if (p2 >= 30)
+ p2 = 30;
+ sdio_pro_time = p2;
+ } else if (p1 == 0) {
/* todo */
sdio_pro_enable = 0;
- }
+ }
}
-
+
return count;
}
@@ -326,22 +291,17 @@ static int msdc_debug_show(struct inode *inode, struct file *file)
}
static const struct file_operations msdc_debug_fops = {
- .owner = THIS_MODULE,
- .open = msdc_debug_show,
- .read = seq_read,
- .write = msdc_debug_proc_write,
- .llseek = seq_lseek,
- .release = single_release,
+ .owner = THIS_MODULE,
+ .open = msdc_debug_show,
+ .read = seq_read,
+ .write = msdc_debug_proc_write,
+ .llseek = seq_lseek,
+ .release = single_release,
};
-int msdc_debug_proc_init(void)
-{
- struct proc_dir_entry *de = proc_create("msdc_debug", 0667, NULL, &msdc_debug_fops);
-
- if (!de || IS_ERR(de))
- printk("!! Create MSDC debug PROC fail !!\n");
-
- return 0 ;
+void msdc_debug_proc_init(void)
+{
+ proc_create("msdc_debug", 0660, NULL, &msdc_debug_fops);
}
EXPORT_SYMBOL_GPL(msdc_debug_proc_init);
#endif
diff --git a/drivers/staging/mt7621-mmc/dbg.h b/drivers/staging/mt7621-mmc/dbg.h
index e58c4312933e..5a25a69b00c9 100644
--- a/drivers/staging/mt7621-mmc/dbg.h
+++ b/drivers/staging/mt7621-mmc/dbg.h
@@ -39,47 +39,45 @@
extern u32 sdio_pro_enable;
/* for a type command, e.g. CMD53, 2 blocks */
struct cmd_profile {
- u32 max_tc; /* Max tick count */
- u32 min_tc;
- u32 tot_tc; /* total tick count */
- u32 tot_bytes;
- u32 count; /* the counts of the command */
+ u32 max_tc; /* Max tick count */
+ u32 min_tc;
+ u32 tot_tc; /* total tick count */
+ u32 tot_bytes;
+ u32 count; /* the counts of the command */
};
/* dump when total_tc and total_bytes */
struct sdio_profile {
- u32 total_tc; /* total tick count of CMD52 and CMD53 */
- u32 total_tx_bytes; /* total bytes of CMD53 Tx */
- u32 total_rx_bytes; /* total bytes of CMD53 Rx */
-
- /*CMD52*/
- struct cmd_profile cmd52_tx;
- struct cmd_profile cmd52_rx;
-
- /*CMD53 in byte unit */
- struct cmd_profile cmd53_tx_byte[512];
- struct cmd_profile cmd53_rx_byte[512];
-
- /*CMD53 in block unit */
- struct cmd_profile cmd53_tx_blk[100];
- struct cmd_profile cmd53_rx_blk[100];
+ u32 total_tc; /* total tick count of CMD52 and CMD53 */
+ u32 total_tx_bytes; /* total bytes of CMD53 Tx */
+ u32 total_rx_bytes; /* total bytes of CMD53 Rx */
+
+ /*CMD52*/
+ struct cmd_profile cmd52_tx;
+ struct cmd_profile cmd52_rx;
+
+ /*CMD53 in byte unit */
+ struct cmd_profile cmd53_tx_byte[512];
+ struct cmd_profile cmd53_rx_byte[512];
+
+ /*CMD53 in block unit */
+ struct cmd_profile cmd53_tx_blk[100];
+ struct cmd_profile cmd53_rx_blk[100];
};
//==========================
-typedef enum {
- SD_TOOL_ZONE = 0,
- SD_TOOL_DMA_SIZE = 1,
- SD_TOOL_PM_ENABLE = 2,
- SD_TOOL_SDIO_PROFILE = 3,
-} msdc_dbg;
-
-typedef enum {
- MODE_PIO = 0,
- MODE_DMA = 1,
- MODE_SIZE_DEP = 2,
-} msdc_mode;
-extern msdc_mode drv_mode[4];
-extern u32 dma_size[4];
+enum msdc_dbg {
+ SD_TOOL_ZONE = 0,
+ SD_TOOL_DMA_SIZE = 1,
+ SD_TOOL_PM_ENABLE = 2,
+ SD_TOOL_SDIO_PROFILE = 3,
+};
+
+enum msdc_mode {
+ MODE_PIO = 0,
+ MODE_DMA = 1,
+ MODE_SIZE_DEP = 2,
+};
/* Debug message event */
#define DBG_EVT_NONE (0) /* No event */
@@ -104,9 +102,10 @@ extern unsigned int sd_debug_zone[4];
do { \
if (x) { \
printk("[BUG] %s LINE:%d FILE:%s\n", #x, __LINE__, __FILE__); \
- while(1); \
+ while (1) \
+ ; \
} \
-}while(0)
+} while (0)
#endif /* end of +++ */
#define N_MSG(evt, fmt, args...)
@@ -121,36 +120,36 @@ do { \
#define ERR_MSG(fmt, args...) \
do { \
- printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \
- host->id, ##args , __FUNCTION__, __LINE__, current->comm, current->pid); \
-} while(0);
+ printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \
+ host->id, ##args, __FUNCTION__, __LINE__, current->comm, current->pid); \
+} while (0);
#if 1
-//defined CONFIG_MTK_MMC_CD_POLL
+//defined CONFIG_MTK_MMC_CD_POLL
#define INIT_MSG(fmt, args...)
-#define IRQ_MSG(fmt, args...)
+#define IRQ_MSG(fmt, args...)
#else
#define INIT_MSG(fmt, args...) \
do { \
- printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \
- host->id, ##args , __FUNCTION__, __LINE__, current->comm, current->pid); \
-} while(0);
+ printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \
+ host->id, ##args, __FUNCTION__, __LINE__, current->comm, current->pid); \
+} while (0);
/* PID in ISR in not corrent */
#define IRQ_MSG(fmt, args...) \
do { \
- printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d>\n", \
- host->id, ##args , __FUNCTION__, __LINE__); \
-} while(0);
+ printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d>\n", \
+ host->id, ##args, __FUNCTION__, __LINE__); \
+} while (0);
#endif
-int msdc_debug_proc_init(void);
+void msdc_debug_proc_init(void);
#if 0 /* --- chhung */
void msdc_init_gpt(void);
extern void GPT_GetCounter64(UINT32 *cntL32, UINT32 *cntH32);
#endif /* end of --- */
u32 msdc_time_calc(u32 old_L32, u32 old_H32, u32 new_L32, u32 new_H32);
-void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks);
+void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks);
#endif
diff --git a/drivers/staging/mt7621-mmc/mt6575_sd.h b/drivers/staging/mt7621-mmc/mt6575_sd.h
index e90c4f1d1df7..33fa59a019ec 100644
--- a/drivers/staging/mt7621-mmc/mt6575_sd.h
+++ b/drivers/staging/mt7621-mmc/mt6575_sd.h
@@ -44,7 +44,7 @@
/*--------------------------------------------------------------------------*/
/* Common Macro */
/*--------------------------------------------------------------------------*/
-#define REG_ADDR(x) ((volatile u32*)(base + OFFSET_##x))
+#define REG_ADDR(x) (base + OFFSET_##x)
/*--------------------------------------------------------------------------*/
/* Common Definition */
@@ -88,15 +88,15 @@
#define MSDC_EMMC_BOOTMODE1 (1) /* Reset CMD mode */
enum {
- RESP_NONE = 0,
- RESP_R1,
- RESP_R2,
- RESP_R3,
- RESP_R4,
- RESP_R5,
- RESP_R6,
- RESP_R7,
- RESP_R1B
+ RESP_NONE = 0,
+ RESP_R1,
+ RESP_R2,
+ RESP_R3,
+ RESP_R4,
+ RESP_R5,
+ RESP_R6,
+ RESP_R7,
+ RESP_R1B
};
/*--------------------------------------------------------------------------*/
@@ -253,7 +253,7 @@ enum {
#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */
#define MSDC_PS_DAT (0xff << 16) /* R */
#define MSDC_PS_CMD (0x1 << 24) /* R */
-#define MSDC_PS_WP (0x1UL<< 31) /* R */
+#define MSDC_PS_WP (0x1UL << 31) /* R */
/* MSDC_INT mask */
#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */
@@ -294,14 +294,14 @@ enum {
/* MSDC_FIFOCS mask */
#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */
#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */
-#define MSDC_FIFOCS_CLR (0x1UL<< 31) /* RW */
+#define MSDC_FIFOCS_CLR (0x1UL << 31) /* RW */
/* SDC_CFG mask */
#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */
#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */
#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */
#define SDC_CFG_SDIO (0x1 << 19) /* RW */
-#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */
+#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */
#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */
#define SDC_CFG_DTOC (0xffUL << 24) /* RW */
@@ -314,7 +314,7 @@ enum {
#define SDC_CMD_RW (0x1 << 13) /* RW */
#define SDC_CMD_STOP (0x1 << 14) /* RW */
#define SDC_CMD_GOIRQ (0x1 << 15) /* RW */
-#define SDC_CMD_BLKLEN (0xfff<< 16) /* RW */
+#define SDC_CMD_BLKLEN (0xfff << 16) /* RW */
#define SDC_CMD_AUTOCMD (0x3 << 28) /* RW */
#define SDC_CMD_VOLSWTH (0x1 << 30) /* RW */
@@ -396,7 +396,7 @@ enum {
#define MSDC_PAD_CTL0_CLKSMT (0x1 << 18) /* RW */
#define MSDC_PAD_CTL0_CLKIES (0x1 << 19) /* RW */
#define MSDC_PAD_CTL0_CLKTDSEL (0xf << 20) /* RW */
-#define MSDC_PAD_CTL0_CLKRDSEL (0xffUL<< 24) /* RW */
+#define MSDC_PAD_CTL0_CLKRDSEL (0xffUL << 24) /* RW */
/* MSDC_PAD_CTL1 mask */
#define MSDC_PAD_CTL1_CMDDRVN (0x7 << 0) /* RW */
@@ -407,7 +407,7 @@ enum {
#define MSDC_PAD_CTL1_CMDSMT (0x1 << 18) /* RW */
#define MSDC_PAD_CTL1_CMDIES (0x1 << 19) /* RW */
#define MSDC_PAD_CTL1_CMDTDSEL (0xf << 20) /* RW */
-#define MSDC_PAD_CTL1_CMDRDSEL (0xffUL<< 24) /* RW */
+#define MSDC_PAD_CTL1_CMDRDSEL (0xffUL << 24) /* RW */
/* MSDC_PAD_CTL2 mask */
#define MSDC_PAD_CTL2_DATDRVN (0x7 << 0) /* RW */
@@ -418,7 +418,7 @@ enum {
#define MSDC_PAD_CTL2_DATIES (0x1 << 19) /* RW */
#define MSDC_PAD_CTL2_DATSMT (0x1 << 18) /* RW */
#define MSDC_PAD_CTL2_DATTDSEL (0xf << 20) /* RW */
-#define MSDC_PAD_CTL2_DATRDSEL (0xffUL<< 24) /* RW */
+#define MSDC_PAD_CTL2_DATRDSEL (0xffUL << 24) /* RW */
/* MSDC_PAD_TUNE mask */
#define MSDC_PAD_TUNE_DATWRDLY (0x1F << 0) /* RW */
@@ -438,564 +438,549 @@ enum {
#define MSDC_DAT_RDDLY1_D6 (0x1F << 16) /* RW */
#define MSDC_DAT_RDDLY1_D7 (0x1F << 24) /* RW */
-#define MSDC_CKGEN_MSDC_DLY_SEL (0x1F<<10)
-#define MSDC_INT_DAT_LATCH_CK_SEL (0x7<<7)
-#define MSDC_CKGEN_MSDC_CK_SEL (0x1<<6)
-#define CARD_READY_FOR_DATA (1<<8)
-#define CARD_CURRENT_STATE(x) ((x&0x00001E00)>>9)
+#define MSDC_CKGEN_MSDC_DLY_SEL (0x1F << 10)
+#define MSDC_INT_DAT_LATCH_CK_SEL (0x7 << 7)
+#define MSDC_CKGEN_MSDC_CK_SEL (0x1 << 6)
+#define CARD_READY_FOR_DATA (1 << 8)
+#define CARD_CURRENT_STATE(x) ((x & 0x00001E00) >> 9)
/*--------------------------------------------------------------------------*/
/* Descriptor Structure */
/*--------------------------------------------------------------------------*/
-typedef struct {
- u32 hwo:1; /* could be changed by hw */
- u32 bdp:1;
- u32 rsv0:6;
- u32 chksum:8;
- u32 intr:1;
- u32 rsv1:15;
- void *next;
- void *ptr;
- u32 buflen:16;
- u32 extlen:8;
- u32 rsv2:8;
- u32 arg;
- u32 blknum;
- u32 cmd;
-} gpd_t;
-
-typedef struct {
- u32 eol:1;
- u32 rsv0:7;
- u32 chksum:8;
- u32 rsv1:1;
- u32 blkpad:1;
- u32 dwpad:1;
- u32 rsv2:13;
- void *next;
- void *ptr;
- u32 buflen:16;
- u32 rsv3:16;
-} bd_t;
+struct gpd {
+ u32 hwo:1; /* could be changed by hw */
+ u32 bdp:1;
+ u32 rsv0:6;
+ u32 chksum:8;
+ u32 intr:1;
+ u32 rsv1:15;
+ void *next;
+ void *ptr;
+ u32 buflen:16;
+ u32 extlen:8;
+ u32 rsv2:8;
+ u32 arg;
+ u32 blknum;
+ u32 cmd;
+};
+
+struct bd {
+ u32 eol:1;
+ u32 rsv0:7;
+ u32 chksum:8;
+ u32 rsv1:1;
+ u32 blkpad:1;
+ u32 dwpad:1;
+ u32 rsv2:13;
+ void *next;
+ void *ptr;
+ u32 buflen:16;
+ u32 rsv3:16;
+};
/*--------------------------------------------------------------------------*/
/* Register Debugging Structure */
/*--------------------------------------------------------------------------*/
-typedef struct {
- u32 msdc:1;
- u32 ckpwn:1;
- u32 rst:1;
- u32 pio:1;
- u32 ckdrven:1;
- u32 start18v:1;
- u32 pass18v:1;
- u32 ckstb:1;
- u32 ckdiv:8;
- u32 ckmod:2;
- u32 pad:14;
-} msdc_cfg_reg;
-typedef struct {
- u32 sdr104cksel:1;
- u32 rsmpl:1;
- u32 dsmpl:1;
- u32 ddlysel:1;
- u32 ddr50ckd:1;
- u32 dsplsel:1;
- u32 pad1:10;
- u32 d0spl:1;
- u32 d1spl:1;
- u32 d2spl:1;
- u32 d3spl:1;
- u32 d4spl:1;
- u32 d5spl:1;
- u32 d6spl:1;
- u32 d7spl:1;
- u32 riscsz:1;
- u32 pad2:7;
-} msdc_iocon_reg;
-typedef struct {
- u32 cden:1;
- u32 cdsts:1;
- u32 pad1:10;
- u32 cddebounce:4;
- u32 dat:8;
- u32 cmd:1;
- u32 pad2:6;
- u32 wp:1;
-} msdc_ps_reg;
-typedef struct {
- u32 mmcirq:1;
- u32 cdsc:1;
- u32 pad1:1;
- u32 atocmdrdy:1;
- u32 atocmdtmo:1;
- u32 atocmdcrc:1;
- u32 dmaqempty:1;
- u32 sdioirq:1;
- u32 cmdrdy:1;
- u32 cmdtmo:1;
- u32 rspcrc:1;
- u32 csta:1;
- u32 xfercomp:1;
- u32 dxferdone:1;
- u32 dattmo:1;
- u32 datcrc:1;
- u32 atocmd19done:1;
- u32 pad2:15;
-} msdc_int_reg;
-typedef struct {
- u32 mmcirq:1;
- u32 cdsc:1;
- u32 pad1:1;
- u32 atocmdrdy:1;
- u32 atocmdtmo:1;
- u32 atocmdcrc:1;
- u32 dmaqempty:1;
- u32 sdioirq:1;
- u32 cmdrdy:1;
- u32 cmdtmo:1;
- u32 rspcrc:1;
- u32 csta:1;
- u32 xfercomp:1;
- u32 dxferdone:1;
- u32 dattmo:1;
- u32 datcrc:1;
- u32 atocmd19done:1;
- u32 pad2:15;
-} msdc_inten_reg;
-typedef struct {
- u32 rxcnt:8;
- u32 pad1:8;
- u32 txcnt:8;
- u32 pad2:7;
- u32 clr:1;
-} msdc_fifocs_reg;
-typedef struct {
- u32 val;
-} msdc_txdat_reg;
-typedef struct {
- u32 val;
-} msdc_rxdat_reg;
-typedef struct {
- u32 sdiowkup:1;
- u32 inswkup:1;
- u32 pad1:14;
- u32 buswidth:2;
- u32 pad2:1;
- u32 sdio:1;
- u32 sdioide:1;
- u32 intblkgap:1;
- u32 pad4:2;
- u32 dtoc:8;
-} sdc_cfg_reg;
-typedef struct {
- u32 cmd:6;
- u32 brk:1;
- u32 rsptyp:3;
- u32 pad1:1;
- u32 dtype:2;
- u32 rw:1;
- u32 stop:1;
- u32 goirq:1;
- u32 blklen:12;
- u32 atocmd:2;
- u32 volswth:1;
- u32 pad2:1;
-} sdc_cmd_reg;
-typedef struct {
- u32 arg;
-} sdc_arg_reg;
-typedef struct {
- u32 sdcbusy:1;
- u32 cmdbusy:1;
- u32 pad:29;
- u32 swrcmpl:1;
-} sdc_sts_reg;
-typedef struct {
- u32 val;
-} sdc_resp0_reg;
-typedef struct {
- u32 val;
-} sdc_resp1_reg;
-typedef struct {
- u32 val;
-} sdc_resp2_reg;
-typedef struct {
- u32 val;
-} sdc_resp3_reg;
-typedef struct {
- u32 num;
-} sdc_blknum_reg;
-typedef struct {
- u32 sts;
-} sdc_csts_reg;
-typedef struct {
- u32 sts;
-} sdc_cstsen_reg;
-typedef struct {
- u32 datcrcsts:8;
- u32 ddrcrcsts:4;
- u32 pad:20;
-} sdc_datcrcsts_reg;
-typedef struct {
- u32 bootstart:1;
- u32 bootstop:1;
- u32 bootmode:1;
- u32 pad1:9;
- u32 bootwaidly:3;
- u32 bootsupp:1;
- u32 pad2:16;
-} emmc_cfg0_reg;
-typedef struct {
- u32 bootcrctmc:16;
- u32 pad:4;
- u32 bootacktmc:12;
-} emmc_cfg1_reg;
-typedef struct {
- u32 bootcrcerr:1;
- u32 bootackerr:1;
- u32 bootdattmo:1;
- u32 bootacktmo:1;
- u32 bootupstate:1;
- u32 bootackrcv:1;
- u32 bootdatrcv:1;
- u32 pad:25;
-} emmc_sts_reg;
-typedef struct {
- u32 bootrst:1;
- u32 pad:31;
-} emmc_iocon_reg;
-typedef struct {
- u32 val;
-} msdc_acmd_resp_reg;
-typedef struct {
- u32 tunesel:4;
- u32 pad:28;
-} msdc_acmd19_trg_reg;
-typedef struct {
- u32 val;
-} msdc_acmd19_sts_reg;
-typedef struct {
- u32 addr;
-} msdc_dma_sa_reg;
-typedef struct {
- u32 addr;
-} msdc_dma_ca_reg;
-typedef struct {
- u32 start:1;
- u32 stop:1;
- u32 resume:1;
- u32 pad1:5;
- u32 mode:1;
- u32 pad2:1;
- u32 lastbuf:1;
- u32 pad3:1;
- u32 brustsz:3;
- u32 pad4:1;
- u32 xfersz:16;
-} msdc_dma_ctrl_reg;
-typedef struct {
- u32 status:1;
- u32 decsen:1;
- u32 pad1:2;
- u32 bdcsen:1;
- u32 gpdcsen:1;
- u32 pad2:26;
-} msdc_dma_cfg_reg;
-typedef struct {
- u32 sel:16;
- u32 pad2:16;
-} msdc_dbg_sel_reg;
-typedef struct {
- u32 val;
-} msdc_dbg_out_reg;
-typedef struct {
- u32 clkdrvn:3;
- u32 rsv0:1;
- u32 clkdrvp:3;
- u32 rsv1:1;
- u32 clksr:1;
- u32 rsv2:7;
- u32 clkpd:1;
- u32 clkpu:1;
- u32 clksmt:1;
- u32 clkies:1;
- u32 clktdsel:4;
- u32 clkrdsel:8;
-} msdc_pad_ctl0_reg;
-typedef struct {
- u32 cmddrvn:3;
- u32 rsv0:1;
- u32 cmddrvp:3;
- u32 rsv1:1;
- u32 cmdsr:1;
- u32 rsv2:7;
- u32 cmdpd:1;
- u32 cmdpu:1;
- u32 cmdsmt:1;
- u32 cmdies:1;
- u32 cmdtdsel:4;
- u32 cmdrdsel:8;
-} msdc_pad_ctl1_reg;
-typedef struct {
- u32 datdrvn:3;
- u32 rsv0:1;
- u32 datdrvp:3;
- u32 rsv1:1;
- u32 datsr:1;
- u32 rsv2:7;
- u32 datpd:1;
- u32 datpu:1;
- u32 datsmt:1;
- u32 daties:1;
- u32 dattdsel:4;
- u32 datrdsel:8;
-} msdc_pad_ctl2_reg;
-typedef struct {
- u32 wrrxdly:3;
- u32 pad1:5;
- u32 rdrxdly:8;
- u32 pad2:16;
-} msdc_pad_tune_reg;
-typedef struct {
- u32 dat0:5;
- u32 rsv0:3;
- u32 dat1:5;
- u32 rsv1:3;
- u32 dat2:5;
- u32 rsv2:3;
- u32 dat3:5;
- u32 rsv3:3;
-} msdc_dat_rddly0;
-typedef struct {
- u32 dat4:5;
- u32 rsv4:3;
- u32 dat5:5;
- u32 rsv5:3;
- u32 dat6:5;
- u32 rsv6:3;
- u32 dat7:5;
- u32 rsv7:3;
-} msdc_dat_rddly1;
-typedef struct {
- u32 dbg0sel:8;
- u32 dbg1sel:6;
- u32 pad1:2;
- u32 dbg2sel:6;
- u32 pad2:2;
- u32 dbg3sel:6;
- u32 pad3:2;
-} msdc_hw_dbg_reg;
-typedef struct {
- u32 val;
-} msdc_version_reg;
-typedef struct {
- u32 val;
-} msdc_eco_ver_reg;
+struct msdc_cfg_reg {
+ u32 msdc:1;
+ u32 ckpwn:1;
+ u32 rst:1;
+ u32 pio:1;
+ u32 ckdrven:1;
+ u32 start18v:1;
+ u32 pass18v:1;
+ u32 ckstb:1;
+ u32 ckdiv:8;
+ u32 ckmod:2;
+ u32 pad:14;
+};
+
+struct msdc_iocon_reg {
+ u32 sdr104cksel:1;
+ u32 rsmpl:1;
+ u32 dsmpl:1;
+ u32 ddlysel:1;
+ u32 ddr50ckd:1;
+ u32 dsplsel:1;
+ u32 pad1:10;
+ u32 d0spl:1;
+ u32 d1spl:1;
+ u32 d2spl:1;
+ u32 d3spl:1;
+ u32 d4spl:1;
+ u32 d5spl:1;
+ u32 d6spl:1;
+ u32 d7spl:1;
+ u32 riscsz:1;
+ u32 pad2:7;
+};
+
+struct msdc_ps_reg {
+ u32 cden:1;
+ u32 cdsts:1;
+ u32 pad1:10;
+ u32 cddebounce:4;
+ u32 dat:8;
+ u32 cmd:1;
+ u32 pad2:6;
+ u32 wp:1;
+};
+
+struct msdc_int_reg {
+ u32 mmcirq:1;
+ u32 cdsc:1;
+ u32 pad1:1;
+ u32 atocmdrdy:1;
+ u32 atocmdtmo:1;
+ u32 atocmdcrc:1;
+ u32 dmaqempty:1;
+ u32 sdioirq:1;
+ u32 cmdrdy:1;
+ u32 cmdtmo:1;
+ u32 rspcrc:1;
+ u32 csta:1;
+ u32 xfercomp:1;
+ u32 dxferdone:1;
+ u32 dattmo:1;
+ u32 datcrc:1;
+ u32 atocmd19done:1;
+ u32 pad2:15;
+};
+
+struct msdc_inten_reg {
+ u32 mmcirq:1;
+ u32 cdsc:1;
+ u32 pad1:1;
+ u32 atocmdrdy:1;
+ u32 atocmdtmo:1;
+ u32 atocmdcrc:1;
+ u32 dmaqempty:1;
+ u32 sdioirq:1;
+ u32 cmdrdy:1;
+ u32 cmdtmo:1;
+ u32 rspcrc:1;
+ u32 csta:1;
+ u32 xfercomp:1;
+ u32 dxferdone:1;
+ u32 dattmo:1;
+ u32 datcrc:1;
+ u32 atocmd19done:1;
+ u32 pad2:15;
+};
+
+struct msdc_fifocs_reg {
+ u32 rxcnt:8;
+ u32 pad1:8;
+ u32 txcnt:8;
+ u32 pad2:7;
+ u32 clr:1;
+};
+
+struct msdc_txdat_reg {
+ u32 val;
+};
+
+struct msdc_rxdat_reg {
+ u32 val;
+};
+
+struct sdc_cfg_reg {
+ u32 sdiowkup:1;
+ u32 inswkup:1;
+ u32 pad1:14;
+ u32 buswidth:2;
+ u32 pad2:1;
+ u32 sdio:1;
+ u32 sdioide:1;
+ u32 intblkgap:1;
+ u32 pad4:2;
+ u32 dtoc:8;
+};
+
+struct sdc_cmd_reg {
+ u32 cmd:6;
+ u32 brk:1;
+ u32 rsptyp:3;
+ u32 pad1:1;
+ u32 dtype:2;
+ u32 rw:1;
+ u32 stop:1;
+ u32 goirq:1;
+ u32 blklen:12;
+ u32 atocmd:2;
+ u32 volswth:1;
+ u32 pad2:1;
+};
+
+struct sdc_arg_reg {
+ u32 arg;
+};
+
+struct sdc_sts_reg {
+ u32 sdcbusy:1;
+ u32 cmdbusy:1;
+ u32 pad:29;
+ u32 swrcmpl:1;
+};
+
+struct sdc_resp0_reg {
+ u32 val;
+};
+
+struct sdc_resp1_reg {
+ u32 val;
+};
+
+struct sdc_resp2_reg {
+ u32 val;
+};
+
+struct sdc_resp3_reg {
+ u32 val;
+};
+
+struct sdc_blknum_reg {
+ u32 num;
+};
+
+struct sdc_csts_reg {
+ u32 sts;
+};
+
+struct sdc_cstsen_reg {
+ u32 sts;
+};
+
+struct sdc_datcrcsts_reg {
+ u32 datcrcsts:8;
+ u32 ddrcrcsts:4;
+ u32 pad:20;
+};
+
+struct emmc_cfg0_reg {
+ u32 bootstart:1;
+ u32 bootstop:1;
+ u32 bootmode:1;
+ u32 pad1:9;
+ u32 bootwaidly:3;
+ u32 bootsupp:1;
+ u32 pad2:16;
+};
+
+struct emmc_cfg1_reg {
+ u32 bootcrctmc:16;
+ u32 pad:4;
+ u32 bootacktmc:12;
+};
+
+struct emmc_sts_reg {
+ u32 bootcrcerr:1;
+ u32 bootackerr:1;
+ u32 bootdattmo:1;
+ u32 bootacktmo:1;
+ u32 bootupstate:1;
+ u32 bootackrcv:1;
+ u32 bootdatrcv:1;
+ u32 pad:25;
+};
+
+struct emmc_iocon_reg {
+ u32 bootrst:1;
+ u32 pad:31;
+};
+
+struct msdc_acmd_resp_reg {
+ u32 val;
+};
+
+struct msdc_acmd19_trg_reg {
+ u32 tunesel:4;
+ u32 pad:28;
+};
+
+struct msdc_acmd19_sts_reg {
+ u32 val;
+};
+
+struct msdc_dma_sa_reg {
+ u32 addr;
+};
+
+struct msdc_dma_ca_reg {
+ u32 addr;
+};
+
+struct msdc_dma_ctrl_reg {
+ u32 start:1;
+ u32 stop:1;
+ u32 resume:1;
+ u32 pad1:5;
+ u32 mode:1;
+ u32 pad2:1;
+ u32 lastbuf:1;
+ u32 pad3:1;
+ u32 brustsz:3;
+ u32 pad4:1;
+ u32 xfersz:16;
+};
+
+struct msdc_dma_cfg_reg {
+ u32 status:1;
+ u32 decsen:1;
+ u32 pad1:2;
+ u32 bdcsen:1;
+ u32 gpdcsen:1;
+ u32 pad2:26;
+};
+
+struct msdc_dbg_sel_reg {
+ u32 sel:16;
+ u32 pad2:16;
+};
+
+struct msdc_dbg_out_reg {
+ u32 val;
+};
+
+struct msdc_pad_ctl0_reg {
+ u32 clkdrvn:3;
+ u32 rsv0:1;
+ u32 clkdrvp:3;
+ u32 rsv1:1;
+ u32 clksr:1;
+ u32 rsv2:7;
+ u32 clkpd:1;
+ u32 clkpu:1;
+ u32 clksmt:1;
+ u32 clkies:1;
+ u32 clktdsel:4;
+ u32 clkrdsel:8;
+};
+
+struct msdc_pad_ctl1_reg {
+ u32 cmddrvn:3;
+ u32 rsv0:1;
+ u32 cmddrvp:3;
+ u32 rsv1:1;
+ u32 cmdsr:1;
+ u32 rsv2:7;
+ u32 cmdpd:1;
+ u32 cmdpu:1;
+ u32 cmdsmt:1;
+ u32 cmdies:1;
+ u32 cmdtdsel:4;
+ u32 cmdrdsel:8;
+};
+
+struct msdc_pad_ctl2_reg {
+ u32 datdrvn:3;
+ u32 rsv0:1;
+ u32 datdrvp:3;
+ u32 rsv1:1;
+ u32 datsr:1;
+ u32 rsv2:7;
+ u32 datpd:1;
+ u32 datpu:1;
+ u32 datsmt:1;
+ u32 daties:1;
+ u32 dattdsel:4;
+ u32 datrdsel:8;
+};
+
+struct msdc_pad_tune_reg {
+ u32 wrrxdly:3;
+ u32 pad1:5;
+ u32 rdrxdly:8;
+ u32 pad2:16;
+};
+
+struct msdc_dat_rddly0 {
+ u32 dat0:5;
+ u32 rsv0:3;
+ u32 dat1:5;
+ u32 rsv1:3;
+ u32 dat2:5;
+ u32 rsv2:3;
+ u32 dat3:5;
+ u32 rsv3:3;
+};
+
+struct msdc_dat_rddly1 {
+ u32 dat4:5;
+ u32 rsv4:3;
+ u32 dat5:5;
+ u32 rsv5:3;
+ u32 dat6:5;
+ u32 rsv6:3;
+ u32 dat7:5;
+ u32 rsv7:3;
+};
+
+struct msdc_hw_dbg_reg {
+ u32 dbg0sel:8;
+ u32 dbg1sel:6;
+ u32 pad1:2;
+ u32 dbg2sel:6;
+ u32 pad2:2;
+ u32 dbg3sel:6;
+ u32 pad3:2;
+};
+
+struct msdc_version_reg {
+ u32 val;
+};
+
+struct msdc_eco_ver_reg {
+ u32 val;
+};
struct msdc_regs {
- msdc_cfg_reg msdc_cfg; /* base+0x00h */
- msdc_iocon_reg msdc_iocon; /* base+0x04h */
- msdc_ps_reg msdc_ps; /* base+0x08h */
- msdc_int_reg msdc_int; /* base+0x0ch */
- msdc_inten_reg msdc_inten; /* base+0x10h */
- msdc_fifocs_reg msdc_fifocs; /* base+0x14h */
- msdc_txdat_reg msdc_txdat; /* base+0x18h */
- msdc_rxdat_reg msdc_rxdat; /* base+0x1ch */
- u32 rsv1[4];
- sdc_cfg_reg sdc_cfg; /* base+0x30h */
- sdc_cmd_reg sdc_cmd; /* base+0x34h */
- sdc_arg_reg sdc_arg; /* base+0x38h */
- sdc_sts_reg sdc_sts; /* base+0x3ch */
- sdc_resp0_reg sdc_resp0; /* base+0x40h */
- sdc_resp1_reg sdc_resp1; /* base+0x44h */
- sdc_resp2_reg sdc_resp2; /* base+0x48h */
- sdc_resp3_reg sdc_resp3; /* base+0x4ch */
- sdc_blknum_reg sdc_blknum; /* base+0x50h */
- u32 rsv2[1];
- sdc_csts_reg sdc_csts; /* base+0x58h */
- sdc_cstsen_reg sdc_cstsen; /* base+0x5ch */
- sdc_datcrcsts_reg sdc_dcrcsta; /* base+0x60h */
- u32 rsv3[3];
- emmc_cfg0_reg emmc_cfg0; /* base+0x70h */
- emmc_cfg1_reg emmc_cfg1; /* base+0x74h */
- emmc_sts_reg emmc_sts; /* base+0x78h */
- emmc_iocon_reg emmc_iocon; /* base+0x7ch */
- msdc_acmd_resp_reg acmd_resp; /* base+0x80h */
- msdc_acmd19_trg_reg acmd19_trg; /* base+0x84h */
- msdc_acmd19_sts_reg acmd19_sts; /* base+0x88h */
- u32 rsv4[1];
- msdc_dma_sa_reg dma_sa; /* base+0x90h */
- msdc_dma_ca_reg dma_ca; /* base+0x94h */
- msdc_dma_ctrl_reg dma_ctrl; /* base+0x98h */
- msdc_dma_cfg_reg dma_cfg; /* base+0x9ch */
- msdc_dbg_sel_reg dbg_sel; /* base+0xa0h */
- msdc_dbg_out_reg dbg_out; /* base+0xa4h */
- u32 rsv5[2];
- u32 patch0; /* base+0xb0h */
- u32 patch1; /* base+0xb4h */
- u32 rsv6[10];
- msdc_pad_ctl0_reg pad_ctl0; /* base+0xe0h */
- msdc_pad_ctl1_reg pad_ctl1; /* base+0xe4h */
- msdc_pad_ctl2_reg pad_ctl2; /* base+0xe8h */
- msdc_pad_tune_reg pad_tune; /* base+0xech */
- msdc_dat_rddly0 dat_rddly0; /* base+0xf0h */
- msdc_dat_rddly1 dat_rddly1; /* base+0xf4h */
- msdc_hw_dbg_reg hw_dbg; /* base+0xf8h */
- u32 rsv7[1];
- msdc_version_reg version; /* base+0x100h */
- msdc_eco_ver_reg eco_ver; /* base+0x104h */
-};
-
-struct scatterlist_ex {
- u32 cmd;
- u32 arg;
- u32 sglen;
- struct scatterlist *sg;
-};
-
-#define DMA_FLAG_NONE (0x00000000)
-#define DMA_FLAG_EN_CHKSUM (0x00000001)
-#define DMA_FLAG_PAD_BLOCK (0x00000002)
-#define DMA_FLAG_PAD_DWORD (0x00000004)
+ struct msdc_cfg_reg msdc_cfg; /* base+0x00h */
+ struct msdc_iocon_reg msdc_iocon; /* base+0x04h */
+ struct msdc_ps_reg msdc_ps; /* base+0x08h */
+ struct msdc_int_reg msdc_int; /* base+0x0ch */
+ struct msdc_inten_reg msdc_inten; /* base+0x10h */
+ struct msdc_fifocs_reg msdc_fifocs; /* base+0x14h */
+ struct msdc_txdat_reg msdc_txdat; /* base+0x18h */
+ struct msdc_rxdat_reg msdc_rxdat; /* base+0x1ch */
+ u32 rsv1[4];
+ struct sdc_cfg_reg sdc_cfg; /* base+0x30h */
+ struct sdc_cmd_reg sdc_cmd; /* base+0x34h */
+ struct sdc_arg_reg sdc_arg; /* base+0x38h */
+ struct sdc_sts_reg sdc_sts; /* base+0x3ch */
+ struct sdc_resp0_reg sdc_resp0; /* base+0x40h */
+ struct sdc_resp1_reg sdc_resp1; /* base+0x44h */
+ struct sdc_resp2_reg sdc_resp2; /* base+0x48h */
+ struct sdc_resp3_reg sdc_resp3; /* base+0x4ch */
+ struct sdc_blknum_reg sdc_blknum; /* base+0x50h */
+ u32 rsv2[1];
+ struct sdc_csts_reg sdc_csts; /* base+0x58h */
+ struct sdc_cstsen_reg sdc_cstsen; /* base+0x5ch */
+ struct sdc_datcrcsts_reg sdc_dcrcsta; /* base+0x60h */
+ u32 rsv3[3];
+ struct emmc_cfg0_reg emmc_cfg0; /* base+0x70h */
+ struct emmc_cfg1_reg emmc_cfg1; /* base+0x74h */
+ struct emmc_sts_reg emmc_sts; /* base+0x78h */
+ struct emmc_iocon_reg emmc_iocon; /* base+0x7ch */
+ struct msdc_acmd_resp_reg acmd_resp; /* base+0x80h */
+ struct msdc_acmd19_trg_reg acmd19_trg; /* base+0x84h */
+ struct msdc_acmd19_sts_reg acmd19_sts; /* base+0x88h */
+ u32 rsv4[1];
+ struct msdc_dma_sa_reg dma_sa; /* base+0x90h */
+ struct msdc_dma_ca_reg dma_ca; /* base+0x94h */
+ struct msdc_dma_ctrl_reg dma_ctrl; /* base+0x98h */
+ struct msdc_dma_cfg_reg dma_cfg; /* base+0x9ch */
+ struct msdc_dbg_sel_reg dbg_sel; /* base+0xa0h */
+ struct msdc_dbg_out_reg dbg_out; /* base+0xa4h */
+ u32 rsv5[2];
+ u32 patch0; /* base+0xb0h */
+ u32 patch1; /* base+0xb4h */
+ u32 rsv6[10];
+ struct msdc_pad_ctl0_reg pad_ctl0; /* base+0xe0h */
+ struct msdc_pad_ctl1_reg pad_ctl1; /* base+0xe4h */
+ struct msdc_pad_ctl2_reg pad_ctl2; /* base+0xe8h */
+ struct msdc_pad_tune_reg pad_tune; /* base+0xech */
+ struct msdc_dat_rddly0 dat_rddly0; /* base+0xf0h */
+ struct msdc_dat_rddly1 dat_rddly1; /* base+0xf4h */
+ struct msdc_hw_dbg_reg hw_dbg; /* base+0xf8h */
+ u32 rsv7[1];
+ struct msdc_version_reg version; /* base+0x100h */
+ struct msdc_eco_ver_reg eco_ver; /* base+0x104h */
+};
struct msdc_dma {
- u32 flags; /* flags */
- u32 xfersz; /* xfer size in bytes */
- u32 sglen; /* size of scatter list */
- u32 blklen; /* block size */
- struct scatterlist *sg; /* I/O scatter list */
- struct scatterlist_ex *esg; /* extended I/O scatter list */
- u8 mode; /* dma mode */
- u8 burstsz; /* burst size */
- u8 intr; /* dma done interrupt */
- u8 padding; /* padding */
- u32 cmd; /* enhanced mode command */
- u32 arg; /* enhanced mode arg */
- u32 rsp; /* enhanced mode command response */
- u32 autorsp; /* auto command response */
-
- gpd_t *gpd; /* pointer to gpd array */
- bd_t *bd; /* pointer to bd array */
- dma_addr_t gpd_addr; /* the physical address of gpd array */
- dma_addr_t bd_addr; /* the physical address of bd array */
- u32 used_gpd; /* the number of used gpd elements */
- u32 used_bd; /* the number of used bd elements */
-};
-
-struct msdc_host
-{
- struct msdc_hw *hw;
+ u32 sglen; /* size of scatter list */
+ struct scatterlist *sg; /* I/O scatter list */
+ u8 mode; /* dma mode */
+
+ struct gpd *gpd; /* pointer to gpd array */
+ struct bd *bd; /* pointer to bd array */
+ dma_addr_t gpd_addr; /* the physical address of gpd array */
+ dma_addr_t bd_addr; /* the physical address of bd array */
+};
- struct mmc_host *mmc; /* mmc structure */
- struct mmc_command *cmd;
- struct mmc_data *data;
- struct mmc_request *mrq;
- int cmd_rsp;
- int cmd_rsp_done;
- int cmd_r1b_done;
+struct msdc_host {
+ struct msdc_hw *hw;
- int error;
- spinlock_t lock; /* mutex */
- struct semaphore sem;
+ struct mmc_host *mmc; /* mmc structure */
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_request *mrq;
+ int cmd_rsp;
- u32 blksz; /* host block size */
- u32 base; /* host base address */
- int id; /* host id */
- int pwr_ref; /* core power reference count */
+ int error;
+ spinlock_t lock; /* mutex */
+ struct semaphore sem;
- u32 xfer_size; /* total transferred size */
+ u32 blksz; /* host block size */
+ void __iomem *base; /* host base address */
+ int id; /* host id */
+ int pwr_ref; /* core power reference count */
- struct msdc_dma dma; /* dma channel */
- u32 dma_addr; /* dma transfer address */
- u32 dma_left_size; /* dma transfer left size */
- u32 dma_xfer_size; /* dma transfer size in bytes */
- int dma_xfer; /* dma transfer mode */
+ u32 xfer_size; /* total transferred size */
- u32 timeout_ns; /* data timeout ns */
- u32 timeout_clks; /* data timeout clks */
+ struct msdc_dma dma; /* dma channel */
+ u32 dma_xfer_size; /* dma transfer size in bytes */
- atomic_t abort; /* abort transfer */
+ u32 timeout_ns; /* data timeout ns */
+ u32 timeout_clks; /* data timeout clks */
- int irq; /* host interrupt */
+ int irq; /* host interrupt */
- struct tasklet_struct card_tasklet;
-#if 0
- struct work_struct card_workqueue;
-#else
- struct delayed_work card_delaywork;
-#endif
+ struct delayed_work card_delaywork;
+
+ struct completion cmd_done;
+ struct completion xfer_done;
+ struct pm_message pm_state;
+
+ u32 mclk; /* mmc subsystem clock */
+ u32 hclk; /* host clock speed */
+ u32 sclk; /* SD/MS clock speed */
+ u8 core_clkon; /* Host core clock on ? */
+ u8 card_clkon; /* Card clock on ? */
+ u8 core_power; /* core power */
+ u8 power_mode; /* host power mode */
+ u8 card_inserted; /* card inserted ? */
+ u8 suspend; /* host suspended ? */
+ u8 app_cmd; /* for app command */
+ u32 app_cmd_arg;
+};
+
+#define sdr_read8(reg) readb(reg)
+#define sdr_read32(reg) readl(reg)
+#define sdr_write8(reg, val) writeb(val, reg)
+#define sdr_write32(reg, val) writel(val, reg)
- struct completion cmd_done;
- struct completion xfer_done;
- struct pm_message pm_state;
-
- u32 mclk; /* mmc subsystem clock */
- u32 hclk; /* host clock speed */
- u32 sclk; /* SD/MS clock speed */
- u8 core_clkon; /* Host core clock on ? */
- u8 card_clkon; /* Card clock on ? */
- u8 core_power; /* core power */
- u8 power_mode; /* host power mode */
- u8 card_inserted; /* card inserted ? */
- u8 suspend; /* host suspended ? */
- u8 reserved;
- u8 app_cmd; /* for app command */
- u32 app_cmd_arg;
- u64 starttime;
-};
-
-static inline unsigned int uffs(unsigned int x)
+static inline void sdr_set_bits(void __iomem *reg, u32 bs)
{
- unsigned int r = 1;
-
- if (!x)
- return 0;
- if (!(x & 0xffff)) {
- x >>= 16;
- r += 16;
- }
- if (!(x & 0xff)) {
- x >>= 8;
- r += 8;
- }
- if (!(x & 0xf)) {
- x >>= 4;
- r += 4;
- }
- if (!(x & 3)) {
- x >>= 2;
- r += 2;
- }
- if (!(x & 1)) {
- x >>= 1;
- r += 1;
- }
- return r;
+ u32 val = readl(reg);
+
+ val |= bs;
+ writel(val, reg);
}
-#define sdr_read8(reg) __raw_readb(reg)
-#define sdr_read16(reg) __raw_readw(reg)
-#define sdr_read32(reg) __raw_readl(reg)
-#define sdr_write8(reg,val) __raw_writeb(val,reg)
-#define sdr_write16(reg,val) __raw_writew(val,reg)
-#define sdr_write32(reg,val) __raw_writel(val,reg)
-
-#define sdr_set_bits(reg,bs) ((*(volatile u32*)(reg)) |= (u32)(bs))
-#define sdr_clr_bits(reg,bs) ((*(volatile u32*)(reg)) &= ~((u32)(bs)))
-
-#define sdr_set_field(reg,field,val) \
- do { \
- volatile unsigned int tv = sdr_read32(reg); \
- tv &= ~(field); \
- tv |= ((val) << (uffs((unsigned int)field) - 1)); \
- sdr_write32(reg,tv); \
- } while(0)
-#define sdr_get_field(reg,field,val) \
- do { \
- volatile unsigned int tv = sdr_read32(reg); \
- val = ((tv & (field)) >> (uffs((unsigned int)field) - 1)); \
- } while(0)
-#endif
+static inline void sdr_clr_bits(void __iomem *reg, u32 bs)
+{
+ u32 val = readl(reg);
+
+ val &= ~bs;
+ writel(val, reg);
+}
+
+static inline void sdr_set_field(void __iomem *reg, u32 field, u32 val)
+{
+ unsigned int tv = readl(reg);
+ tv &= ~field;
+ tv |= ((val) << (ffs((unsigned int)field) - 1));
+ writel(tv, reg);
+}
+
+static inline void sdr_get_field(void __iomem *reg, u32 field, u32 *val)
+{
+ unsigned int tv = readl(reg);
+ *val = ((tv & field) >> (ffs((unsigned int)field) - 1));
+}
+
+#endif
diff --git a/drivers/staging/mt7621-mmc/sd.c b/drivers/staging/mt7621-mmc/sd.c
index a1d0173eba56..648a2dd1436e 100644
--- a/drivers/staging/mt7621-mmc/sd.c
+++ b/drivers/staging/mt7621-mmc/sd.c
@@ -34,38 +34,21 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/slab.h>
+
#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/core.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
-#include <linux/dma-mapping.h>
-/* +++ by chhung */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <linux/pm.h>
-#include <linux/of.h>
-
-#define MSDC_SMPL_FALLING (1)
-#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */
-#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */
-#define MSDC_REMOVABLE (1 << 5) /* removable slot */
-#define MSDC_SYS_SUSPEND (1 << 6) /* suspended by system */
-#define MSDC_HIGHSPEED (1 << 7)
+#include <asm/mach-ralink/ralink_regs.h>
+
+#include "board.h"
+#include "dbg.h"
+#include "mt6575_sd.h"
//#define IRQ_SDC 14 //MT7620 /*FIXME*/
#ifdef CONFIG_SOC_MT7621
@@ -77,46 +60,11 @@
#endif
#define IRQ_SDC 22 /*FIXME*/
-#include <asm/dma.h>
-/* end of +++ */
-
-
-#include <asm/mach-ralink/ralink_regs.h>
-
-#if 0 /* --- by chhung */
-#include <mach/board.h>
-#include <mach/mt6575_devs.h>
-#include <mach/mt6575_typedefs.h>
-#include <mach/mt6575_clock_manager.h>
-#include <mach/mt6575_pm_ldo.h>
-//#include <mach/mt6575_pll.h>
-//#include <mach/mt6575_gpio.h>
-//#include <mach/mt6575_gpt_sw.h>
-#include <asm/tcm.h>
-// #include <mach/mt6575_gpt.h>
-#endif /* end of --- */
-
-#include "mt6575_sd.h"
-#include "dbg.h"
-
-/* +++ by chhung */
-#include "board.h"
-/* end of +++ */
-
-#if 0 /* --- by chhung */
-#define isb() __asm__ __volatile__ ("" : : : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
- : : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("" : : : "memory")
-#endif /* end of --- */
-
#define DRV_NAME "mtk-sd"
-#define HOST_MAX_NUM (1) /* +/- by chhung */
-
-#if defined (CONFIG_SOC_MT7620)
+#if defined(CONFIG_SOC_MT7620)
#define HOST_MAX_MCLK (48000000) /* +/- by chhung */
-#elif defined (CONFIG_SOC_MT7621)
+#elif defined(CONFIG_SOC_MT7621)
#define HOST_MAX_MCLK (50000000) /* +/- by chhung */
#endif
#define HOST_MIN_MCLK (260000)
@@ -130,14 +78,14 @@
#if 0 /* --- by chhung */
#define MSDC_CLKSRC_REG (0xf100000C)
-#define PDN_REG (0xF1000010)
+#define PDN_REG (0xF1000010)
#endif /* end of --- */
#define DEFAULT_DEBOUNCE (8) /* 8 cycles */
#define DEFAULT_DTOC (40) /* data timeout counter. 65536x40 sclk. */
-#define CMD_TIMEOUT (HZ/10) /* 100ms */
-#define DAT_TIMEOUT (HZ/2 * 5) /* 500ms x5 */
+#define CMD_TIMEOUT (HZ / 10) /* 100ms */
+#define DAT_TIMEOUT (HZ / 2 * 5) /* 500ms x5 */
#define MAX_DMA_CNT (64 * 1024 - 512) /* a single transaction for WIFI may be 50K*/
@@ -148,13 +96,7 @@
#define MAX_HW_SGMTS (MAX_BD_NUM)
#define MAX_PHY_SGMTS (MAX_BD_NUM)
#define MAX_SGMT_SZ (MAX_DMA_CNT)
-#define MAX_REQ_SZ (MAX_SGMT_SZ * 8)
-
-#ifdef MT6575_SD_DEBUG
-static struct msdc_regs *msdc_reg[HOST_MAX_NUM];
-#endif
-
-static int mtk_sw_poll;
+#define MAX_REQ_SZ (MAX_SGMT_SZ * 8)
static int cd_active_low = 1;
@@ -164,203 +106,149 @@ static int cd_active_low = 1;
//#define PERI_MSDC2_PDN (17)
//#define PERI_MSDC3_PDN (18)
-struct msdc_host *msdc_6575_host[] = {NULL,NULL,NULL,NULL};
#if 0 /* --- by chhung */
/* gate means clock power down */
-static int g_clk_gate = 0;
+static int g_clk_gate = 0;
#define msdc_gate_clock(id) \
- do { \
- g_clk_gate &= ~(1 << ((id) + PERI_MSDC0_PDN)); \
- } while(0)
+ do { \
+ g_clk_gate &= ~(1 << ((id) + PERI_MSDC0_PDN)); \
+ } while (0)
/* not like power down register. 1 means clock on. */
#define msdc_ungate_clock(id) \
- do { \
- g_clk_gate |= 1 << ((id) + PERI_MSDC0_PDN); \
- } while(0)
+ do { \
+ g_clk_gate |= 1 << ((id) + PERI_MSDC0_PDN); \
+ } while (0)
-// do we need sync object or not
-void msdc_clk_status(int * status)
+// do we need sync object or not
+void msdc_clk_status(int *status)
{
- *status = g_clk_gate;
+ *status = g_clk_gate;
}
#endif /* end of --- */
/* +++ by chhung */
struct msdc_hw msdc0_hw = {
.clk_src = 0,
- .cmd_edge = MSDC_SMPL_FALLING,
- .data_edge = MSDC_SMPL_FALLING,
- .clk_drv = 4,
- .cmd_drv = 4,
- .dat_drv = 4,
- .data_pins = 4,
- .data_offset = 0,
- .flags = MSDC_SYS_SUSPEND | MSDC_CD_PIN_EN | MSDC_REMOVABLE | MSDC_HIGHSPEED,
-// .flags = MSDC_SYS_SUSPEND | MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE,
-};
-
-static struct resource mtk_sd_resources[] = {
- [0] = {
- .start = RALINK_MSDC_BASE,
- .end = RALINK_MSDC_BASE+0x3fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_SDC, /*FIXME*/
- .end = IRQ_SDC, /*FIXME*/
- .flags = IORESOURCE_IRQ,
- },
+ .flags = MSDC_CD_PIN_EN | MSDC_REMOVABLE,
+// .flags = MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE,
};
-static struct platform_device mtk_sd_device = {
- .name = "mtk-sd",
- .id = 0,
- .num_resources = ARRAY_SIZE(mtk_sd_resources),
- .resource = mtk_sd_resources,
-};
/* end of +++ */
static int msdc_rsp[] = {
- 0, /* RESP_NONE */
- 1, /* RESP_R1 */
- 2, /* RESP_R2 */
- 3, /* RESP_R3 */
- 4, /* RESP_R4 */
- 1, /* RESP_R5 */
- 1, /* RESP_R6 */
- 1, /* RESP_R7 */
- 7, /* RESP_R1b */
+ 0, /* RESP_NONE */
+ 1, /* RESP_R1 */
+ 2, /* RESP_R2 */
+ 3, /* RESP_R3 */
+ 4, /* RESP_R4 */
+ 1, /* RESP_R5 */
+ 1, /* RESP_R6 */
+ 1, /* RESP_R7 */
+ 7, /* RESP_R1b */
};
-/* For Inhanced DMA */
-#define msdc_init_gpd_ex(gpd,extlen,cmd,arg,blknum) \
- do { \
- ((gpd_t*)gpd)->extlen = extlen; \
- ((gpd_t*)gpd)->cmd = cmd; \
- ((gpd_t*)gpd)->arg = arg; \
- ((gpd_t*)gpd)->blknum = blknum; \
- }while(0)
-
-#define msdc_init_bd(bd, blkpad, dwpad, dptr, dlen) \
- do { \
- BUG_ON(dlen > 0xFFFFUL); \
- ((bd_t*)bd)->blkpad = blkpad; \
- ((bd_t*)bd)->dwpad = dwpad; \
- ((bd_t*)bd)->ptr = (void*)dptr; \
- ((bd_t*)bd)->buflen = dlen; \
- }while(0)
-
#define msdc_txfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16)
#define msdc_rxfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) >> 0)
#define msdc_fifo_write32(v) sdr_write32(MSDC_TXDATA, (v))
#define msdc_fifo_write8(v) sdr_write8(MSDC_TXDATA, (v))
#define msdc_fifo_read32() sdr_read32(MSDC_RXDATA)
-#define msdc_fifo_read8() sdr_read8(MSDC_RXDATA)
-
+#define msdc_fifo_read8() sdr_read8(MSDC_RXDATA)
#define msdc_dma_on() sdr_clr_bits(MSDC_CFG, MSDC_CFG_PIO)
-#define msdc_dma_off() sdr_set_bits(MSDC_CFG, MSDC_CFG_PIO)
-
-#define msdc_retry(expr,retry,cnt) \
- do { \
- int backup = cnt; \
- while (retry) { \
- if (!(expr)) break; \
- if (cnt-- == 0) { \
- retry--; mdelay(1); cnt = backup; \
- } \
- } \
- WARN_ON(retry == 0); \
- } while(0)
-#if 0 /* --- by chhung */
-#define msdc_reset() \
- do { \
- int retry = 3, cnt = 1000; \
- sdr_set_bits(MSDC_CFG, MSDC_CFG_RST); \
- dsb(); \
- msdc_retry(sdr_read32(MSDC_CFG) & MSDC_CFG_RST, retry, cnt); \
- } while(0)
-#else
-#define msdc_reset() \
- do { \
- int retry = 3, cnt = 1000; \
- sdr_set_bits(MSDC_CFG, MSDC_CFG_RST); \
- msdc_retry(sdr_read32(MSDC_CFG) & MSDC_CFG_RST, retry, cnt); \
- } while(0)
-#endif /* end of +/- */
+#define msdc_retry(expr, retry, cnt) \
+ do { \
+ int backup = cnt; \
+ while (retry) { \
+ if (!(expr)) \
+ break; \
+ if (cnt-- == 0) { \
+ retry--; mdelay(1); cnt = backup; \
+ } \
+ } \
+ WARN_ON(retry == 0); \
+ } while (0)
+
+static void msdc_reset_hw(struct msdc_host *host)
+{
+ void __iomem *base = host->base;
+
+ sdr_set_bits(MSDC_CFG, MSDC_CFG_RST);
+ while (sdr_read32(MSDC_CFG) & MSDC_CFG_RST)
+ cpu_relax();
+}
#define msdc_clr_int() \
- do { \
- volatile u32 val = sdr_read32(MSDC_INT); \
- sdr_write32(MSDC_INT, val); \
- } while(0)
+ do { \
+ volatile u32 val = sdr_read32(MSDC_INT); \
+ sdr_write32(MSDC_INT, val); \
+ } while (0)
#define msdc_clr_fifo() \
- do { \
- int retry = 3, cnt = 1000; \
- sdr_set_bits(MSDC_FIFOCS, MSDC_FIFOCS_CLR); \
- msdc_retry(sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_CLR, retry, cnt); \
- } while(0)
+ do { \
+ int retry = 3, cnt = 1000; \
+ sdr_set_bits(MSDC_FIFOCS, MSDC_FIFOCS_CLR); \
+ msdc_retry(sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_CLR, retry, cnt); \
+ } while (0)
#define msdc_irq_save(val) \
- do { \
- val = sdr_read32(MSDC_INTEN); \
- sdr_clr_bits(MSDC_INTEN, val); \
- } while(0)
-
+ do { \
+ val = sdr_read32(MSDC_INTEN); \
+ sdr_clr_bits(MSDC_INTEN, val); \
+ } while (0)
+
#define msdc_irq_restore(val) \
- do { \
- sdr_set_bits(MSDC_INTEN, val); \
- } while(0)
+ do { \
+ sdr_set_bits(MSDC_INTEN, val); \
+ } while (0)
/* clock source for host: global */
-#if defined (CONFIG_SOC_MT7620)
+#if defined(CONFIG_SOC_MT7620)
static u32 hclks[] = {48000000}; /* +/- by chhung */
-#elif defined (CONFIG_SOC_MT7621)
+#elif defined(CONFIG_SOC_MT7621)
static u32 hclks[] = {50000000}; /* +/- by chhung */
#endif
//============================================
// the power for msdc host controller: global
-// always keep the VMC on.
+// always keep the VMC on.
//============================================
#define msdc_vcore_on(host) \
- do { \
- INIT_MSG("[+]VMC ref. count<%d>", ++host->pwr_ref); \
- (void)hwPowerOn(MT65XX_POWER_LDO_VMC, VOL_3300, "SD"); \
- } while (0)
+ do { \
+ INIT_MSG("[+]VMC ref. count<%d>", ++host->pwr_ref); \
+ (void)hwPowerOn(MT65XX_POWER_LDO_VMC, VOL_3300, "SD"); \
+ } while (0)
#define msdc_vcore_off(host) \
- do { \
- INIT_MSG("[-]VMC ref. count<%d>", --host->pwr_ref); \
- (void)hwPowerDown(MT65XX_POWER_LDO_VMC, "SD"); \
- } while (0)
+ do { \
+ INIT_MSG("[-]VMC ref. count<%d>", --host->pwr_ref); \
+ (void)hwPowerDown(MT65XX_POWER_LDO_VMC, "SD"); \
+ } while (0)
//====================================
-// the vdd output for card: global
-// always keep the VMCH on.
-//====================================
+// the vdd output for card: global
+// always keep the VMCH on.
+//====================================
#define msdc_vdd_on(host) \
- do { \
- (void)hwPowerOn(MT65XX_POWER_LDO_VMCH, VOL_3300, "SD"); \
- } while (0)
+ do { \
+ (void)hwPowerOn(MT65XX_POWER_LDO_VMCH, VOL_3300, "SD"); \
+ } while (0)
#define msdc_vdd_off(host) \
- do { \
- (void)hwPowerDown(MT65XX_POWER_LDO_VMCH, "SD"); \
- } while (0)
+ do { \
+ (void)hwPowerDown(MT65XX_POWER_LDO_VMCH, "SD"); \
+ } while (0)
#define sdc_is_busy() (sdr_read32(SDC_STS) & SDC_STS_SDCBUSY)
#define sdc_is_cmd_busy() (sdr_read32(SDC_STS) & SDC_STS_CMDBUSY)
-#define sdc_send_cmd(cmd,arg) \
- do { \
- sdr_write32(SDC_ARG, (arg)); \
- sdr_write32(SDC_CMD, (cmd)); \
- } while(0)
+#define sdc_send_cmd(cmd, arg) \
+ do { \
+ sdr_write32(SDC_ARG, (arg)); \
+ sdr_write32(SDC_CMD, (cmd)); \
+ } while (0)
// can modify to read h/w register.
//#define is_card_present(h) ((sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1);
-#define is_card_present(h) (((struct msdc_host*)(h))->card_inserted)
+#define is_card_present(h) (((struct msdc_host *)(h))->card_inserted)
/* +++ by chhung */
#ifndef __ASSEMBLY__
@@ -369,2013 +257,1621 @@ static u32 hclks[] = {50000000}; /* +/- by chhung */
#define PHYSADDR(a) ((a) & 0x1fffffff)
#endif
/* end of +++ */
-static unsigned int msdc_do_command(struct msdc_host *host,
- struct mmc_command *cmd,
- int tune,
- unsigned long timeout);
-
-static int msdc_tune_cmdrsp(struct msdc_host*host,struct mmc_command *cmd);
+static unsigned int msdc_do_command(struct msdc_host *host,
+ struct mmc_command *cmd,
+ int tune,
+ unsigned long timeout);
+
+static int msdc_tune_cmdrsp(struct msdc_host *host, struct mmc_command *cmd);
#ifdef MT6575_SD_DEBUG
static void msdc_dump_card_status(struct msdc_host *host, u32 status)
{
/* N_MSG is currently a no-op */
#if 0
- static char *state[] = {
- "Idle", /* 0 */
- "Ready", /* 1 */
- "Ident", /* 2 */
- "Stby", /* 3 */
- "Tran", /* 4 */
- "Data", /* 5 */
- "Rcv", /* 6 */
- "Prg", /* 7 */
- "Dis", /* 8 */
- "Reserved", /* 9 */
- "Reserved", /* 10 */
- "Reserved", /* 11 */
- "Reserved", /* 12 */
- "Reserved", /* 13 */
- "Reserved", /* 14 */
- "I/O mode", /* 15 */
- };
+ static char *state[] = {
+ "Idle", /* 0 */
+ "Ready", /* 1 */
+ "Ident", /* 2 */
+ "Stby", /* 3 */
+ "Tran", /* 4 */
+ "Data", /* 5 */
+ "Rcv", /* 6 */
+ "Prg", /* 7 */
+ "Dis", /* 8 */
+ "Reserved", /* 9 */
+ "Reserved", /* 10 */
+ "Reserved", /* 11 */
+ "Reserved", /* 12 */
+ "Reserved", /* 13 */
+ "Reserved", /* 14 */
+ "I/O mode", /* 15 */
+ };
#endif
- if (status & R1_OUT_OF_RANGE)
- N_MSG(RSP, "[CARD_STATUS] Out of Range");
- if (status & R1_ADDRESS_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Address Error");
- if (status & R1_BLOCK_LEN_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Block Len Error");
- if (status & R1_ERASE_SEQ_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Erase Seq Error");
- if (status & R1_ERASE_PARAM)
- N_MSG(RSP, "[CARD_STATUS] Erase Param");
- if (status & R1_WP_VIOLATION)
- N_MSG(RSP, "[CARD_STATUS] WP Violation");
- if (status & R1_CARD_IS_LOCKED)
- N_MSG(RSP, "[CARD_STATUS] Card is Locked");
- if (status & R1_LOCK_UNLOCK_FAILED)
- N_MSG(RSP, "[CARD_STATUS] Lock/Unlock Failed");
- if (status & R1_COM_CRC_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Command CRC Error");
- if (status & R1_ILLEGAL_COMMAND)
- N_MSG(RSP, "[CARD_STATUS] Illegal Command");
- if (status & R1_CARD_ECC_FAILED)
- N_MSG(RSP, "[CARD_STATUS] Card ECC Failed");
- if (status & R1_CC_ERROR)
- N_MSG(RSP, "[CARD_STATUS] CC Error");
- if (status & R1_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Error");
- if (status & R1_UNDERRUN)
- N_MSG(RSP, "[CARD_STATUS] Underrun");
- if (status & R1_OVERRUN)
- N_MSG(RSP, "[CARD_STATUS] Overrun");
- if (status & R1_CID_CSD_OVERWRITE)
- N_MSG(RSP, "[CARD_STATUS] CID/CSD Overwrite");
- if (status & R1_WP_ERASE_SKIP)
- N_MSG(RSP, "[CARD_STATUS] WP Eraser Skip");
- if (status & R1_CARD_ECC_DISABLED)
- N_MSG(RSP, "[CARD_STATUS] Card ECC Disabled");
- if (status & R1_ERASE_RESET)
- N_MSG(RSP, "[CARD_STATUS] Erase Reset");
- if (status & R1_READY_FOR_DATA)
- N_MSG(RSP, "[CARD_STATUS] Ready for Data");
- if (status & R1_SWITCH_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Switch error");
- if (status & R1_APP_CMD)
- N_MSG(RSP, "[CARD_STATUS] App Command");
-
- N_MSG(RSP, "[CARD_STATUS] '%s' State", state[R1_CURRENT_STATE(status)]);
+ if (status & R1_OUT_OF_RANGE)
+ N_MSG(RSP, "[CARD_STATUS] Out of Range");
+ if (status & R1_ADDRESS_ERROR)
+ N_MSG(RSP, "[CARD_STATUS] Address Error");
+ if (status & R1_BLOCK_LEN_ERROR)
+ N_MSG(RSP, "[CARD_STATUS] Block Len Error");
+ if (status & R1_ERASE_SEQ_ERROR)
+ N_MSG(RSP, "[CARD_STATUS] Erase Seq Error");
+ if (status & R1_ERASE_PARAM)
+ N_MSG(RSP, "[CARD_STATUS] Erase Param");
+ if (status & R1_WP_VIOLATION)
+ N_MSG(RSP, "[CARD_STATUS] WP Violation");
+ if (status & R1_CARD_IS_LOCKED)
+ N_MSG(RSP, "[CARD_STATUS] Card is Locked");
+ if (status & R1_LOCK_UNLOCK_FAILED)
+ N_MSG(RSP, "[CARD_STATUS] Lock/Unlock Failed");
+ if (status & R1_COM_CRC_ERROR)
+ N_MSG(RSP, "[CARD_STATUS] Command CRC Error");
+ if (status & R1_ILLEGAL_COMMAND)
+ N_MSG(RSP, "[CARD_STATUS] Illegal Command");
+ if (status & R1_CARD_ECC_FAILED)
+ N_MSG(RSP, "[CARD_STATUS] Card ECC Failed");
+ if (status & R1_CC_ERROR)
+ N_MSG(RSP, "[CARD_STATUS] CC Error");
+ if (status & R1_ERROR)
+ N_MSG(RSP, "[CARD_STATUS] Error");
+ if (status & R1_UNDERRUN)
+ N_MSG(RSP, "[CARD_STATUS] Underrun");
+ if (status & R1_OVERRUN)
+ N_MSG(RSP, "[CARD_STATUS] Overrun");
+ if (status & R1_CID_CSD_OVERWRITE)
+ N_MSG(RSP, "[CARD_STATUS] CID/CSD Overwrite");
+ if (status & R1_WP_ERASE_SKIP)
+ N_MSG(RSP, "[CARD_STATUS] WP Eraser Skip");
+ if (status & R1_CARD_ECC_DISABLED)
+ N_MSG(RSP, "[CARD_STATUS] Card ECC Disabled");
+ if (status & R1_ERASE_RESET)
+ N_MSG(RSP, "[CARD_STATUS] Erase Reset");
+ if (status & R1_READY_FOR_DATA)
+ N_MSG(RSP, "[CARD_STATUS] Ready for Data");
+ if (status & R1_SWITCH_ERROR)
+ N_MSG(RSP, "[CARD_STATUS] Switch error");
+ if (status & R1_APP_CMD)
+ N_MSG(RSP, "[CARD_STATUS] App Command");
+
+ N_MSG(RSP, "[CARD_STATUS] '%s' State", state[R1_CURRENT_STATE(status)]);
}
static void msdc_dump_ocr_reg(struct msdc_host *host, u32 resp)
{
- if (resp & (1 << 7))
- N_MSG(RSP, "[OCR] Low Voltage Range");
- if (resp & (1 << 15))
- N_MSG(RSP, "[OCR] 2.7-2.8 volt");
- if (resp & (1 << 16))
- N_MSG(RSP, "[OCR] 2.8-2.9 volt");
- if (resp & (1 << 17))
- N_MSG(RSP, "[OCR] 2.9-3.0 volt");
- if (resp & (1 << 18))
- N_MSG(RSP, "[OCR] 3.0-3.1 volt");
- if (resp & (1 << 19))
- N_MSG(RSP, "[OCR] 3.1-3.2 volt");
- if (resp & (1 << 20))
- N_MSG(RSP, "[OCR] 3.2-3.3 volt");
- if (resp & (1 << 21))
- N_MSG(RSP, "[OCR] 3.3-3.4 volt");
- if (resp & (1 << 22))
- N_MSG(RSP, "[OCR] 3.4-3.5 volt");
- if (resp & (1 << 23))
- N_MSG(RSP, "[OCR] 3.5-3.6 volt");
- if (resp & (1 << 24))
- N_MSG(RSP, "[OCR] Switching to 1.8V Accepted (S18A)");
- if (resp & (1 << 30))
- N_MSG(RSP, "[OCR] Card Capacity Status (CCS)");
- if (resp & (1 << 31))
- N_MSG(RSP, "[OCR] Card Power Up Status (Idle)");
- else
- N_MSG(RSP, "[OCR] Card Power Up Status (Busy)");
+ if (resp & (1 << 7))
+ N_MSG(RSP, "[OCR] Low Voltage Range");
+ if (resp & (1 << 15))
+ N_MSG(RSP, "[OCR] 2.7-2.8 volt");
+ if (resp & (1 << 16))
+ N_MSG(RSP, "[OCR] 2.8-2.9 volt");
+ if (resp & (1 << 17))
+ N_MSG(RSP, "[OCR] 2.9-3.0 volt");
+ if (resp & (1 << 18))
+ N_MSG(RSP, "[OCR] 3.0-3.1 volt");
+ if (resp & (1 << 19))
+ N_MSG(RSP, "[OCR] 3.1-3.2 volt");
+ if (resp & (1 << 20))
+ N_MSG(RSP, "[OCR] 3.2-3.3 volt");
+ if (resp & (1 << 21))
+ N_MSG(RSP, "[OCR] 3.3-3.4 volt");
+ if (resp & (1 << 22))
+ N_MSG(RSP, "[OCR] 3.4-3.5 volt");
+ if (resp & (1 << 23))
+ N_MSG(RSP, "[OCR] 3.5-3.6 volt");
+ if (resp & (1 << 24))
+ N_MSG(RSP, "[OCR] Switching to 1.8V Accepted (S18A)");
+ if (resp & (1 << 30))
+ N_MSG(RSP, "[OCR] Card Capacity Status (CCS)");
+ if (resp & (1 << 31))
+ N_MSG(RSP, "[OCR] Card Power Up Status (Idle)");
+ else
+ N_MSG(RSP, "[OCR] Card Power Up Status (Busy)");
}
static void msdc_dump_rca_resp(struct msdc_host *host, u32 resp)
{
- u32 status = (((resp >> 15) & 0x1) << 23) |
- (((resp >> 14) & 0x1) << 22) |
- (((resp >> 13) & 0x1) << 19) |
- (resp & 0x1fff);
-
- N_MSG(RSP, "[RCA] 0x%.4x", resp >> 16);
- msdc_dump_card_status(host, status);
+ u32 status = (((resp >> 15) & 0x1) << 23) |
+ (((resp >> 14) & 0x1) << 22) |
+ (((resp >> 13) & 0x1) << 19) |
+ (resp & 0x1fff);
+
+ N_MSG(RSP, "[RCA] 0x%.4x", resp >> 16);
+ msdc_dump_card_status(host, status);
}
static void msdc_dump_io_resp(struct msdc_host *host, u32 resp)
{
- u32 flags = (resp >> 8) & 0xFF;
+ u32 flags = (resp >> 8) & 0xFF;
#if 0
- char *state[] = {"DIS", "CMD", "TRN", "RFU"};
+ char *state[] = {"DIS", "CMD", "TRN", "RFU"};
#endif
- if (flags & (1 << 7))
- N_MSG(RSP, "[IO] COM_CRC_ERR");
- if (flags & (1 << 6))
- N_MSG(RSP, "[IO] Illgal command");
- if (flags & (1 << 3))
- N_MSG(RSP, "[IO] Error");
- if (flags & (1 << 2))
- N_MSG(RSP, "[IO] RFU");
- if (flags & (1 << 1))
- N_MSG(RSP, "[IO] Function number error");
- if (flags & (1 << 0))
- N_MSG(RSP, "[IO] Out of range");
-
- N_MSG(RSP, "[IO] State: %s, Data:0x%x", state[(resp >> 12) & 0x3], resp & 0xFF);
+ if (flags & (1 << 7))
+ N_MSG(RSP, "[IO] COM_CRC_ERR");
+ if (flags & (1 << 6))
+ N_MSG(RSP, "[IO] Illegal command");
+ if (flags & (1 << 3))
+ N_MSG(RSP, "[IO] Error");
+ if (flags & (1 << 2))
+ N_MSG(RSP, "[IO] RFU");
+ if (flags & (1 << 1))
+ N_MSG(RSP, "[IO] Function number error");
+ if (flags & (1 << 0))
+ N_MSG(RSP, "[IO] Out of range");
+
+ N_MSG(RSP, "[IO] State: %s, Data:0x%x", state[(resp >> 12) & 0x3], resp & 0xFF);
}
#endif
static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
{
- u32 base = host->base;
- u32 timeout, clk_ns;
-
- host->timeout_ns = ns;
- host->timeout_clks = clks;
-
- clk_ns = 1000000000UL / host->sclk;
- timeout = ns / clk_ns + clks;
- timeout = timeout >> 16; /* in 65536 sclk cycle unit */
- timeout = timeout > 1 ? timeout - 1 : 0;
- timeout = timeout > 255 ? 255 : timeout;
-
- sdr_set_field(SDC_CFG, SDC_CFG_DTOC, timeout);
-
- N_MSG(OPS, "Set read data timeout: %dns %dclks -> %d x 65536 cycles",
- ns, clks, timeout + 1);
-}
-
-/* msdc_eirq_sdio() will be called when EIRQ(for WIFI) */
-static void msdc_eirq_sdio(void *data)
-{
- struct msdc_host *host = (struct msdc_host *)data;
+ void __iomem *base = host->base;
+ u32 timeout, clk_ns;
- N_MSG(INT, "SDIO EINT");
+ host->timeout_ns = ns;
+ host->timeout_clks = clks;
- mmc_signal_sdio_irq(host->mmc);
-}
+ clk_ns = 1000000000UL / host->sclk;
+ timeout = ns / clk_ns + clks;
+ timeout = timeout >> 16; /* in 65536 sclk cycle unit */
+ timeout = timeout > 1 ? timeout - 1 : 0;
+ timeout = timeout > 255 ? 255 : timeout;
-/* msdc_eirq_cd will not be used! We not using EINT for card detection. */
-static void msdc_eirq_cd(void *data)
-{
- struct msdc_host *host = (struct msdc_host *)data;
+ sdr_set_field(SDC_CFG, SDC_CFG_DTOC, timeout);
- N_MSG(INT, "CD EINT");
-
-#if 0
- tasklet_hi_schedule(&host->card_tasklet);
-#else
- schedule_delayed_work(&host->card_delaywork, HZ);
-#endif
+ N_MSG(OPS, "Set read data timeout: %dns %dclks -> %d x 65536 cycles",
+ ns, clks, timeout + 1);
}
-#if 0
-static void msdc_tasklet_card(unsigned long arg)
-{
- struct msdc_host *host = (struct msdc_host *)arg;
-#else
static void msdc_tasklet_card(struct work_struct *work)
{
- struct msdc_host *host = (struct msdc_host *)container_of(work,
- struct msdc_host, card_delaywork.work);
-#endif
- struct msdc_hw *hw = host->hw;
- u32 base = host->base;
- u32 inserted;
- u32 status = 0;
+ struct msdc_host *host = (struct msdc_host *)container_of(work,
+ struct msdc_host, card_delaywork.work);
+ void __iomem *base = host->base;
+ u32 inserted;
+ u32 status = 0;
//u32 change = 0;
- spin_lock(&host->lock);
+ spin_lock(&host->lock);
- if (hw->get_cd_status) { // NULL
- inserted = hw->get_cd_status();
- } else {
- status = sdr_read32(MSDC_PS);
- if (cd_active_low)
+ status = sdr_read32(MSDC_PS);
+ if (cd_active_low)
inserted = (status & MSDC_PS_CDSTS) ? 0 : 1;
else
- inserted = (status & MSDC_PS_CDSTS) ? 1 : 0;
- }
+ inserted = (status & MSDC_PS_CDSTS) ? 1 : 0;
#if 0
- change = host->card_inserted ^ inserted;
- host->card_inserted = inserted;
-
- if (change && !host->suspend) {
- if (inserted) {
- host->mmc->f_max = HOST_MAX_MCLK; // work around
- }
- mmc_detect_change(host->mmc, msecs_to_jiffies(20));
- }
+ change = host->card_inserted ^ inserted;
+ host->card_inserted = inserted;
+
+ if (change && !host->suspend) {
+ if (inserted)
+ host->mmc->f_max = HOST_MAX_MCLK; // work around
+ mmc_detect_change(host->mmc, msecs_to_jiffies(20));
+ }
#else /* Make sure: handle the last interrupt */
- host->card_inserted = inserted;
-
- if (!host->suspend) {
- host->mmc->f_max = HOST_MAX_MCLK;
- mmc_detect_change(host->mmc, msecs_to_jiffies(20));
- }
-
- IRQ_MSG("card found<%s>", inserted ? "inserted" : "removed");
+ host->card_inserted = inserted;
+
+ if (!host->suspend) {
+ host->mmc->f_max = HOST_MAX_MCLK;
+ mmc_detect_change(host->mmc, msecs_to_jiffies(20));
+ }
+
+ IRQ_MSG("card found<%s>", inserted ? "inserted" : "removed");
#endif
- spin_unlock(&host->lock);
+ spin_unlock(&host->lock);
}
#if 0 /* --- by chhung */
/* For E2 only */
static u8 clk_src_bit[4] = {
- 0, 3, 5, 7
+ 0, 3, 5, 7
};
-static void msdc_select_clksrc(struct msdc_host* host, unsigned char clksrc)
+static void msdc_select_clksrc(struct msdc_host *host, unsigned char clksrc)
{
- u32 val;
- u32 base = host->base;
-
- BUG_ON(clksrc > 3);
- INIT_MSG("set clock source to <%d>", clksrc);
-
- val = sdr_read32(MSDC_CLKSRC_REG);
- if (sdr_read32(MSDC_ECO_VER) >= 4) {
- val &= ~(0x3 << clk_src_bit[host->id]);
- val |= clksrc << clk_src_bit[host->id];
- } else {
- val &= ~0x3; val |= clksrc;
- }
- sdr_write32(MSDC_CLKSRC_REG, val);
-
- host->hclk = hclks[clksrc];
- host->hw->clk_src = clksrc;
+ u32 val;
+ void __iomem *base = host->base;
+
+ BUG_ON(clksrc > 3);
+ INIT_MSG("set clock source to <%d>", clksrc);
+
+ val = sdr_read32(MSDC_CLKSRC_REG);
+ if (sdr_read32(MSDC_ECO_VER) >= 4) {
+ val &= ~(0x3 << clk_src_bit[host->id]);
+ val |= clksrc << clk_src_bit[host->id];
+ } else {
+ val &= ~0x3; val |= clksrc;
+ }
+ sdr_write32(MSDC_CLKSRC_REG, val);
+
+ host->hclk = hclks[clksrc];
+ host->hw->clk_src = clksrc;
}
#endif /* end of --- */
static void msdc_set_mclk(struct msdc_host *host, int ddr, unsigned int hz)
{
- //struct msdc_hw *hw = host->hw;
- u32 base = host->base;
- u32 mode;
- u32 flags;
- u32 div;
- u32 sclk;
- u32 hclk = host->hclk;
- //u8 clksrc = hw->clk_src;
-
- if (!hz) { // set mmc system clock to 0 ?
- //ERR_MSG("set mclk to 0!!!");
- msdc_reset();
- return;
- }
-
- msdc_irq_save(flags);
-
-#if defined (CONFIG_MT7621_FPGA) || defined (CONFIG_MT7628_FPGA)
- mode = 0x0; /* use divisor */
- if (hz >= (hclk >> 1)) {
- div = 0; /* mean div = 1/2 */
- sclk = hclk >> 1; /* sclk = clk / 2 */
- } else {
- div = (hclk + ((hz << 2) - 1)) / (hz << 2);
- sclk = (hclk >> 2) / div;
- }
-#else
- if (ddr) {
- mode = 0x2; /* ddr mode and use divisor */
- if (hz >= (hclk >> 2)) {
- div = 1; /* mean div = 1/4 */
- sclk = hclk >> 2; /* sclk = clk / 4 */
- } else {
- div = (hclk + ((hz << 2) - 1)) / (hz << 2);
- sclk = (hclk >> 2) / div;
- }
- } else if (hz >= hclk) { /* bug fix */
- mode = 0x1; /* no divisor and divisor is ignored */
- div = 0;
- sclk = hclk;
- } else {
- mode = 0x0; /* use divisor */
- if (hz >= (hclk >> 1)) {
- div = 0; /* mean div = 1/2 */
- sclk = hclk >> 1; /* sclk = clk / 2 */
- } else {
- div = (hclk + ((hz << 2) - 1)) / (hz << 2);
- sclk = (hclk >> 2) / div;
- }
- }
-#endif
- /* set clock mode and divisor */
- sdr_set_field(MSDC_CFG, MSDC_CFG_CKMOD, mode);
- sdr_set_field(MSDC_CFG, MSDC_CFG_CKDIV, div);
-
- /* wait clock stable */
- while (!(sdr_read32(MSDC_CFG) & MSDC_CFG_CKSTB));
-
- host->sclk = sclk;
- host->mclk = hz;
- msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); // need?
-
- INIT_MSG("================");
- INIT_MSG("!!! Set<%dKHz> Source<%dKHz> -> sclk<%dKHz>", hz/1000, hclk/1000, sclk/1000);
- INIT_MSG("================");
-
- msdc_irq_restore(flags);
+ //struct msdc_hw *hw = host->hw;
+ void __iomem *base = host->base;
+ u32 mode;
+ u32 flags;
+ u32 div;
+ u32 sclk;
+ u32 hclk = host->hclk;
+ //u8 clksrc = hw->clk_src;
+
+ if (!hz) { // set mmc system clock to 0 ?
+ //ERR_MSG("set mclk to 0!!!");
+ msdc_reset_hw(host);
+ return;
+ }
+
+ msdc_irq_save(flags);
+
+ if (ddr) {
+ mode = 0x2; /* ddr mode and use divisor */
+ if (hz >= (hclk >> 2)) {
+ div = 1; /* mean div = 1/4 */
+ sclk = hclk >> 2; /* sclk = clk / 4 */
+ } else {
+ div = (hclk + ((hz << 2) - 1)) / (hz << 2);
+ sclk = (hclk >> 2) / div;
+ }
+ } else if (hz >= hclk) { /* bug fix */
+ mode = 0x1; /* no divisor and divisor is ignored */
+ div = 0;
+ sclk = hclk;
+ } else {
+ mode = 0x0; /* use divisor */
+ if (hz >= (hclk >> 1)) {
+ div = 0; /* mean div = 1/2 */
+ sclk = hclk >> 1; /* sclk = clk / 2 */
+ } else {
+ div = (hclk + ((hz << 2) - 1)) / (hz << 2);
+ sclk = (hclk >> 2) / div;
+ }
+ }
+
+ /* set clock mode and divisor */
+ sdr_set_field(MSDC_CFG, MSDC_CFG_CKMOD, mode);
+ sdr_set_field(MSDC_CFG, MSDC_CFG_CKDIV, div);
+
+ /* wait clock stable */
+ while (!(sdr_read32(MSDC_CFG) & MSDC_CFG_CKSTB))
+ cpu_relax();
+
+ host->sclk = sclk;
+ host->mclk = hz;
+ msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); // need?
+
+ INIT_MSG("================");
+ INIT_MSG("!!! Set<%dKHz> Source<%dKHz> -> sclk<%dKHz>", hz / 1000, hclk / 1000, sclk / 1000);
+ INIT_MSG("================");
+
+ msdc_irq_restore(flags);
}
/* Fix me. when need to abort */
static void msdc_abort_data(struct msdc_host *host)
{
- u32 base = host->base;
- struct mmc_command *stop = host->mrq->stop;
-
- ERR_MSG("Need to Abort. dma<%d>", host->dma_xfer);
-
- msdc_reset();
- msdc_clr_fifo();
- msdc_clr_int();
-
- // need to check FIFO count 0 ?
-
- if (stop) { /* try to stop, but may not success */
- ERR_MSG("stop when abort CMD<%d>", stop->opcode);
- (void)msdc_do_command(host, stop, 0, CMD_TIMEOUT);
- }
-
- //if (host->mclk >= 25000000) {
- // msdc_set_mclk(host, 0, host->mclk >> 1);
- //}
+ void __iomem *base = host->base;
+ struct mmc_command *stop = host->mrq->stop;
+
+ ERR_MSG("Need to Abort.");
+
+ msdc_reset_hw(host);
+ msdc_clr_fifo();
+ msdc_clr_int();
+
+ // need to check FIFO count 0 ?
+
+ if (stop) { /* try to stop, but may not success */
+ ERR_MSG("stop when abort CMD<%d>", stop->opcode);
+ (void)msdc_do_command(host, stop, 0, CMD_TIMEOUT);
+ }
+
+ //if (host->mclk >= 25000000) {
+ // msdc_set_mclk(host, 0, host->mclk >> 1);
+ //}
}
#if 0 /* --- by chhung */
static void msdc_pin_config(struct msdc_host *host, int mode)
{
- struct msdc_hw *hw = host->hw;
- u32 base = host->base;
- int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN;
-
- /* Config WP pin */
- if (hw->flags & MSDC_WP_PIN_EN) {
- if (hw->config_gpio_pin) /* NULL */
- hw->config_gpio_pin(MSDC_WP_PIN, pull);
- }
-
- switch (mode) {
- case MSDC_PIN_PULL_UP:
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 1); /* Check & FIXME */
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 1);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 1);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
- break;
- case MSDC_PIN_PULL_DOWN:
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 1); /* Check & FIXME */
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 1);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 1);
- break;
- case MSDC_PIN_PULL_NONE:
- default:
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
- break;
- }
-
- N_MSG(CFG, "Pins mode(%d), down(%d), up(%d)",
- mode, MSDC_PIN_PULL_DOWN, MSDC_PIN_PULL_UP);
+ struct msdc_hw *hw = host->hw;
+ void __iomem *base = host->base;
+ int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN;
+
+ /* Config WP pin */
+ if (hw->flags & MSDC_WP_PIN_EN) {
+ if (hw->config_gpio_pin) /* NULL */
+ hw->config_gpio_pin(MSDC_WP_PIN, pull);
+ }
+
+ switch (mode) {
+ case MSDC_PIN_PULL_UP:
+ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 1); /* Check & FIXME */
+ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */
+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 1);
+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 1);
+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
+ break;
+ case MSDC_PIN_PULL_DOWN:
+ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */
+ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 1); /* Check & FIXME */
+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 1);
+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 1);
+ break;
+ case MSDC_PIN_PULL_NONE:
+ default:
+ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */
+ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */
+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
+ break;
+ }
+
+ N_MSG(CFG, "Pins mode(%d), down(%d), up(%d)",
+ mode, MSDC_PIN_PULL_DOWN, MSDC_PIN_PULL_UP);
}
void msdc_pin_reset(struct msdc_host *host, int mode)
{
- struct msdc_hw *hw = (struct msdc_hw *)host->hw;
- u32 base = host->base;
- int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN;
-
- /* Config reset pin */
- if (hw->flags & MSDC_RST_PIN_EN) {
- if (hw->config_gpio_pin) /* NULL */
- hw->config_gpio_pin(MSDC_RST_PIN, pull);
-
- if (mode == MSDC_PIN_PULL_UP) {
- sdr_clr_bits(EMMC_IOCON, EMMC_IOCON_BOOTRST);
- } else {
- sdr_set_bits(EMMC_IOCON, EMMC_IOCON_BOOTRST);
- }
- }
+ struct msdc_hw *hw = (struct msdc_hw *)host->hw;
+ void __iomem *base = host->base;
+ int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN;
+
+ /* Config reset pin */
+ if (hw->flags & MSDC_RST_PIN_EN) {
+ if (hw->config_gpio_pin) /* NULL */
+ hw->config_gpio_pin(MSDC_RST_PIN, pull);
+
+ if (mode == MSDC_PIN_PULL_UP)
+ sdr_clr_bits(EMMC_IOCON, EMMC_IOCON_BOOTRST);
+ else
+ sdr_set_bits(EMMC_IOCON, EMMC_IOCON_BOOTRST);
+ }
}
static void msdc_core_power(struct msdc_host *host, int on)
{
- N_MSG(CFG, "Turn %s %s power (copower: %d -> %d)",
- on ? "on" : "off", "core", host->core_power, on);
-
- if (on && host->core_power == 0) {
- msdc_vcore_on(host);
- host->core_power = 1;
- msleep(1);
- } else if (!on && host->core_power == 1) {
- msdc_vcore_off(host);
- host->core_power = 0;
- msleep(1);
- }
+ N_MSG(CFG, "Turn %s %s power (copower: %d -> %d)",
+ on ? "on" : "off", "core", host->core_power, on);
+
+ if (on && host->core_power == 0) {
+ msdc_vcore_on(host);
+ host->core_power = 1;
+ msleep(1);
+ } else if (!on && host->core_power == 1) {
+ msdc_vcore_off(host);
+ host->core_power = 0;
+ msleep(1);
+ }
}
static void msdc_host_power(struct msdc_host *host, int on)
{
- N_MSG(CFG, "Turn %s %s power ", on ? "on" : "off", "host");
-
- if (on) {
- //msdc_core_power(host, 1); // need do card detection.
- msdc_pin_reset(host, MSDC_PIN_PULL_UP);
- } else {
- msdc_pin_reset(host, MSDC_PIN_PULL_DOWN);
- //msdc_core_power(host, 0);
- }
+ N_MSG(CFG, "Turn %s %s power ", on ? "on" : "off", "host");
+
+ if (on) {
+ //msdc_core_power(host, 1); // need do card detection.
+ msdc_pin_reset(host, MSDC_PIN_PULL_UP);
+ } else {
+ msdc_pin_reset(host, MSDC_PIN_PULL_DOWN);
+ //msdc_core_power(host, 0);
+ }
}
static void msdc_card_power(struct msdc_host *host, int on)
{
- N_MSG(CFG, "Turn %s %s power ", on ? "on" : "off", "card");
-
- if (on) {
- msdc_pin_config(host, MSDC_PIN_PULL_UP);
- if (host->hw->ext_power_on) {
- host->hw->ext_power_on();
- } else {
- //msdc_vdd_on(host); // need todo card detection.
- }
- msleep(1);
- } else {
- if (host->hw->ext_power_off) {
- host->hw->ext_power_off();
- } else {
- //msdc_vdd_off(host);
- }
- msdc_pin_config(host, MSDC_PIN_PULL_DOWN);
- msleep(1);
- }
+ N_MSG(CFG, "Turn %s %s power ", on ? "on" : "off", "card");
+
+ if (on) {
+ msdc_pin_config(host, MSDC_PIN_PULL_UP);
+ //msdc_vdd_on(host); // need todo card detection.
+ msleep(1);
+ } else {
+ //msdc_vdd_off(host);
+ msdc_pin_config(host, MSDC_PIN_PULL_DOWN);
+ msleep(1);
+ }
}
static void msdc_set_power_mode(struct msdc_host *host, u8 mode)
{
- N_MSG(CFG, "Set power mode(%d)", mode);
-
- if (host->power_mode == MMC_POWER_OFF && mode != MMC_POWER_OFF) {
- msdc_host_power(host, 1);
- msdc_card_power(host, 1);
- } else if (host->power_mode != MMC_POWER_OFF && mode == MMC_POWER_OFF) {
- msdc_card_power(host, 0);
- msdc_host_power(host, 0);
- }
- host->power_mode = mode;
+ N_MSG(CFG, "Set power mode(%d)", mode);
+
+ if (host->power_mode == MMC_POWER_OFF && mode != MMC_POWER_OFF) {
+ msdc_host_power(host, 1);
+ msdc_card_power(host, 1);
+ } else if (host->power_mode != MMC_POWER_OFF && mode == MMC_POWER_OFF) {
+ msdc_card_power(host, 0);
+ msdc_host_power(host, 0);
+ }
+ host->power_mode = mode;
}
#endif /* end of --- */
#ifdef CONFIG_PM
/*
- register as callback function of WIFI(combo_sdio_register_pm) .
- can called by msdc_drv_suspend/resume too.
+ register as callback function of WIFI(combo_sdio_register_pm) .
+ can called by msdc_drv_suspend/resume too.
*/
static void msdc_pm(pm_message_t state, void *data)
{
- struct msdc_host *host = (struct msdc_host *)data;
- int evt = state.event;
-
- if (evt == PM_EVENT_USER_RESUME || evt == PM_EVENT_USER_SUSPEND) {
- INIT_MSG("USR_%s: suspend<%d> power<%d>",
- evt == PM_EVENT_USER_RESUME ? "EVENT_USER_RESUME" : "EVENT_USER_SUSPEND",
- host->suspend, host->power_mode);
- }
-
- if (evt == PM_EVENT_SUSPEND || evt == PM_EVENT_USER_SUSPEND) {
- if (host->suspend) /* already suspend */ /* default 0*/
- return;
-
- /* for memory card. already power off by mmc */
- if (evt == PM_EVENT_SUSPEND && host->power_mode == MMC_POWER_OFF)
- return;
-
- host->suspend = 1;
- host->pm_state = state; /* default PMSG_RESUME */
-
- INIT_MSG("%s Suspend", evt == PM_EVENT_SUSPEND ? "PM" : "USR");
- if(host->hw->flags & MSDC_SYS_SUSPEND) /* set for card */
- (void)mmc_suspend_host(host->mmc);
- else {
- // host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; /* just for double confirm */ /* --- by chhung */
- mmc_remove_host(host->mmc);
- }
- } else if (evt == PM_EVENT_RESUME || evt == PM_EVENT_USER_RESUME) {
- if (!host->suspend){
- //ERR_MSG("warning: already resume");
- return;
- }
-
- /* No PM resume when USR suspend */
- if (evt == PM_EVENT_RESUME && host->pm_state.event == PM_EVENT_USER_SUSPEND) {
- ERR_MSG("PM Resume when in USR Suspend"); /* won't happen. */
- return;
- }
-
- host->suspend = 0;
- host->pm_state = state;
-
- INIT_MSG("%s Resume", evt == PM_EVENT_RESUME ? "PM" : "USR");
- if(host->hw->flags & MSDC_SYS_SUSPEND) { /* will not set for WIFI */
- (void)mmc_resume_host(host->mmc);
- }
- else {
- // host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; /* --- by chhung */
- mmc_add_host(host->mmc);
- }
- }
+ struct msdc_host *host = (struct msdc_host *)data;
+ int evt = state.event;
+
+ if (evt == PM_EVENT_USER_RESUME || evt == PM_EVENT_USER_SUSPEND) {
+ INIT_MSG("USR_%s: suspend<%d> power<%d>",
+ evt == PM_EVENT_USER_RESUME ? "EVENT_USER_RESUME" : "EVENT_USER_SUSPEND",
+ host->suspend, host->power_mode);
+ }
+
+ if (evt == PM_EVENT_SUSPEND || evt == PM_EVENT_USER_SUSPEND) {
+ if (host->suspend) /* already suspend */ /* default 0*/
+ return;
+
+ /* for memory card. already power off by mmc */
+ if (evt == PM_EVENT_SUSPEND && host->power_mode == MMC_POWER_OFF)
+ return;
+
+ host->suspend = 1;
+ host->pm_state = state; /* default PMSG_RESUME */
+
+ } else if (evt == PM_EVENT_RESUME || evt == PM_EVENT_USER_RESUME) {
+ if (!host->suspend) {
+ //ERR_MSG("warning: already resume");
+ return;
+ }
+
+ /* No PM resume when USR suspend */
+ if (evt == PM_EVENT_RESUME && host->pm_state.event == PM_EVENT_USER_SUSPEND) {
+ ERR_MSG("PM Resume when in USR Suspend"); /* won't happen. */
+ return;
+ }
+
+ host->suspend = 0;
+ host->pm_state = state;
+
+ }
}
#endif
/*--------------------------------------------------------------------------*/
/* mmc_host_ops members */
/*--------------------------------------------------------------------------*/
-static unsigned int msdc_command_start(struct msdc_host *host,
- struct mmc_command *cmd,
- int tune, /* not used */
- unsigned long timeout)
+static unsigned int msdc_command_start(struct msdc_host *host,
+ struct mmc_command *cmd,
+ int tune, /* not used */
+ unsigned long timeout)
{
- u32 base = host->base;
- u32 opcode = cmd->opcode;
- u32 rawcmd;
- u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
- MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
- MSDC_INT_ACMD19_DONE;
-
- u32 resp;
- unsigned long tmo;
-
- /* Protocol layer does not provide response type, but our hardware needs
- * to know exact type, not just size!
- */
- if (opcode == MMC_SEND_OP_COND || opcode == SD_APP_OP_COND)
- resp = RESP_R3;
- else if (opcode == MMC_SET_RELATIVE_ADDR || opcode == SD_SEND_RELATIVE_ADDR)
- resp = (mmc_cmd_type(cmd) == MMC_CMD_BCR) ? RESP_R6 : RESP_R1;
- else if (opcode == MMC_FAST_IO)
- resp = RESP_R4;
- else if (opcode == MMC_GO_IRQ_STATE)
- resp = RESP_R5;
- else if (opcode == MMC_SELECT_CARD)
- resp = (cmd->arg != 0) ? RESP_R1B : RESP_NONE;
- else if (opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED)
- resp = RESP_R1; /* SDIO workaround. */
- else if (opcode == SD_SEND_IF_COND && (mmc_cmd_type(cmd) == MMC_CMD_BCR))
- resp = RESP_R1;
- else {
- switch (mmc_resp_type(cmd)) {
- case MMC_RSP_R1:
- resp = RESP_R1;
- break;
- case MMC_RSP_R1B:
- resp = RESP_R1B;
- break;
- case MMC_RSP_R2:
- resp = RESP_R2;
- break;
- case MMC_RSP_R3:
- resp = RESP_R3;
- break;
- case MMC_RSP_NONE:
- default:
- resp = RESP_NONE;
- break;
- }
- }
-
- cmd->error = 0;
- /* rawcmd :
- * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
- * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
- */
- rawcmd = opcode | msdc_rsp[resp] << 7 | host->blksz << 16;
-
- if (opcode == MMC_READ_MULTIPLE_BLOCK) {
- rawcmd |= (2 << 11);
- } else if (opcode == MMC_READ_SINGLE_BLOCK) {
- rawcmd |= (1 << 11);
- } else if (opcode == MMC_WRITE_MULTIPLE_BLOCK) {
- rawcmd |= ((2 << 11) | (1 << 13));
- } else if (opcode == MMC_WRITE_BLOCK) {
- rawcmd |= ((1 << 11) | (1 << 13));
- } else if (opcode == SD_IO_RW_EXTENDED) {
- if (cmd->data->flags & MMC_DATA_WRITE)
- rawcmd |= (1 << 13);
- if (cmd->data->blocks > 1)
- rawcmd |= (2 << 11);
- else
- rawcmd |= (1 << 11);
- } else if (opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int)-1) {
- rawcmd |= (1 << 14);
- } else if ((opcode == SD_APP_SEND_SCR) ||
- (opcode == SD_APP_SEND_NUM_WR_BLKS) ||
- (opcode == SD_SWITCH && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) ||
- (opcode == SD_APP_SD_STATUS && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) ||
- (opcode == MMC_SEND_EXT_CSD && (mmc_cmd_type(cmd) == MMC_CMD_ADTC))) {
- rawcmd |= (1 << 11);
- } else if (opcode == MMC_STOP_TRANSMISSION) {
- rawcmd |= (1 << 14);
- rawcmd &= ~(0x0FFF << 16);
- }
-
- N_MSG(CMD, "CMD<%d><0x%.8x> Arg<0x%.8x>", opcode , rawcmd, cmd->arg);
-
- tmo = jiffies + timeout;
-
- if (opcode == MMC_SEND_STATUS) {
- for (;;) {
- if (!sdc_is_cmd_busy())
- break;
-
- if (time_after(jiffies, tmo)) {
- ERR_MSG("XXX cmd_busy timeout: before CMD<%d>", opcode);
- cmd->error = (unsigned int)-ETIMEDOUT;
- msdc_reset();
- goto end;
- }
- }
- }else {
- for (;;) {
- if (!sdc_is_busy())
- break;
- if (time_after(jiffies, tmo)) {
- ERR_MSG("XXX sdc_busy timeout: before CMD<%d>", opcode);
- cmd->error = (unsigned int)-ETIMEDOUT;
- msdc_reset();
- goto end;
- }
- }
- }
-
- //BUG_ON(in_interrupt());
- host->cmd = cmd;
- host->cmd_rsp = resp;
-
- init_completion(&host->cmd_done);
-
- sdr_set_bits(MSDC_INTEN, wints);
- sdc_send_cmd(rawcmd, cmd->arg);
-
-end:
- return cmd->error;
+ void __iomem *base = host->base;
+ u32 opcode = cmd->opcode;
+ u32 rawcmd;
+ u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
+ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
+ MSDC_INT_ACMD19_DONE;
+
+ u32 resp;
+ unsigned long tmo;
+
+ /* Protocol layer does not provide response type, but our hardware needs
+ * to know exact type, not just size!
+ */
+ if (opcode == MMC_SEND_OP_COND || opcode == SD_APP_OP_COND) {
+ resp = RESP_R3;
+ } else if (opcode == MMC_SET_RELATIVE_ADDR) {
+ resp = (mmc_cmd_type(cmd) == MMC_CMD_BCR) ? RESP_R6 : RESP_R1;
+ } else if (opcode == MMC_FAST_IO) {
+ resp = RESP_R4;
+ } else if (opcode == MMC_GO_IRQ_STATE) {
+ resp = RESP_R5;
+ } else if (opcode == MMC_SELECT_CARD) {
+ resp = (cmd->arg != 0) ? RESP_R1B : RESP_NONE;
+ } else if (opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED) {
+ resp = RESP_R1; /* SDIO workaround. */
+ } else if (opcode == SD_SEND_IF_COND && (mmc_cmd_type(cmd) == MMC_CMD_BCR)) {
+ resp = RESP_R1;
+ } else {
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1:
+ resp = RESP_R1;
+ break;
+ case MMC_RSP_R1B:
+ resp = RESP_R1B;
+ break;
+ case MMC_RSP_R2:
+ resp = RESP_R2;
+ break;
+ case MMC_RSP_R3:
+ resp = RESP_R3;
+ break;
+ case MMC_RSP_NONE:
+ default:
+ resp = RESP_NONE;
+ break;
+ }
+ }
+
+ cmd->error = 0;
+ /* rawcmd :
+ * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
+ * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
+ */
+ rawcmd = opcode | msdc_rsp[resp] << 7 | host->blksz << 16;
+
+ if (opcode == MMC_READ_MULTIPLE_BLOCK) {
+ rawcmd |= (2 << 11);
+ } else if (opcode == MMC_READ_SINGLE_BLOCK) {
+ rawcmd |= (1 << 11);
+ } else if (opcode == MMC_WRITE_MULTIPLE_BLOCK) {
+ rawcmd |= ((2 << 11) | (1 << 13));
+ } else if (opcode == MMC_WRITE_BLOCK) {
+ rawcmd |= ((1 << 11) | (1 << 13));
+ } else if (opcode == SD_IO_RW_EXTENDED) {
+ if (cmd->data->flags & MMC_DATA_WRITE)
+ rawcmd |= (1 << 13);
+ if (cmd->data->blocks > 1)
+ rawcmd |= (2 << 11);
+ else
+ rawcmd |= (1 << 11);
+ } else if (opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int)-1) {
+ rawcmd |= (1 << 14);
+ } else if ((opcode == SD_APP_SEND_SCR) ||
+ (opcode == SD_APP_SEND_NUM_WR_BLKS) ||
+ (opcode == SD_SWITCH && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) ||
+ (opcode == SD_APP_SD_STATUS && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) ||
+ (opcode == MMC_SEND_EXT_CSD && (mmc_cmd_type(cmd) == MMC_CMD_ADTC))) {
+ rawcmd |= (1 << 11);
+ } else if (opcode == MMC_STOP_TRANSMISSION) {
+ rawcmd |= (1 << 14);
+ rawcmd &= ~(0x0FFF << 16);
+ }
+
+ N_MSG(CMD, "CMD<%d><0x%.8x> Arg<0x%.8x>", opcode, rawcmd, cmd->arg);
+
+ tmo = jiffies + timeout;
+
+ if (opcode == MMC_SEND_STATUS) {
+ for (;;) {
+ if (!sdc_is_cmd_busy())
+ break;
+
+ if (time_after(jiffies, tmo)) {
+ ERR_MSG("XXX cmd_busy timeout: before CMD<%d>", opcode);
+ cmd->error = -ETIMEDOUT;
+ msdc_reset_hw(host);
+ goto end;
+ }
+ }
+ } else {
+ for (;;) {
+ if (!sdc_is_busy())
+ break;
+ if (time_after(jiffies, tmo)) {
+ ERR_MSG("XXX sdc_busy timeout: before CMD<%d>", opcode);
+ cmd->error = -ETIMEDOUT;
+ msdc_reset_hw(host);
+ goto end;
+ }
+ }
+ }
+
+ //BUG_ON(in_interrupt());
+ host->cmd = cmd;
+ host->cmd_rsp = resp;
+
+ init_completion(&host->cmd_done);
+
+ sdr_set_bits(MSDC_INTEN, wints);
+ sdc_send_cmd(rawcmd, cmd->arg);
+
+end:
+ return cmd->error;
}
-static unsigned int msdc_command_resp(struct msdc_host *host,
- struct mmc_command *cmd,
- int tune,
- unsigned long timeout)
+static unsigned int msdc_command_resp(struct msdc_host *host,
+ struct mmc_command *cmd,
+ int tune,
+ unsigned long timeout)
+ __must_hold(&host->lock)
{
- u32 base = host->base;
- u32 opcode = cmd->opcode;
- //u32 rawcmd;
- u32 resp;
- u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
- MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
- MSDC_INT_ACMD19_DONE;
-
- resp = host->cmd_rsp;
-
- BUG_ON(in_interrupt());
- //init_completion(&host->cmd_done);
- //sdr_set_bits(MSDC_INTEN, wints);
-
- spin_unlock(&host->lock);
- if(!wait_for_completion_timeout(&host->cmd_done, 10*timeout)){
- ERR_MSG("XXX CMD<%d> wait_for_completion timeout ARG<0x%.8x>", opcode, cmd->arg);
- cmd->error = (unsigned int)-ETIMEDOUT;
- msdc_reset();
- }
- spin_lock(&host->lock);
-
- sdr_clr_bits(MSDC_INTEN, wints);
- host->cmd = NULL;
+ void __iomem *base = host->base;
+ u32 opcode = cmd->opcode;
+ //u32 rawcmd;
+ u32 resp;
+ u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
+ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
+ MSDC_INT_ACMD19_DONE;
+
+ resp = host->cmd_rsp;
+
+ BUG_ON(in_interrupt());
+ //init_completion(&host->cmd_done);
+ //sdr_set_bits(MSDC_INTEN, wints);
+
+ spin_unlock(&host->lock);
+ if (!wait_for_completion_timeout(&host->cmd_done, 10 * timeout)) {
+ ERR_MSG("XXX CMD<%d> wait_for_completion timeout ARG<0x%.8x>", opcode, cmd->arg);
+ cmd->error = -ETIMEDOUT;
+ msdc_reset_hw(host);
+ }
+ spin_lock(&host->lock);
+
+ sdr_clr_bits(MSDC_INTEN, wints);
+ host->cmd = NULL;
//end:
#ifdef MT6575_SD_DEBUG
- switch (resp) {
- case RESP_NONE:
- N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)", opcode, cmd->error, resp);
- break;
- case RESP_R2:
- N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)= %.8x %.8x %.8x %.8x",
- opcode, cmd->error, resp, cmd->resp[0], cmd->resp[1],
- cmd->resp[2], cmd->resp[3]);
- break;
- default: /* Response types 1, 3, 4, 5, 6, 7(1b) */
- N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)= 0x%.8x",
- opcode, cmd->error, resp, cmd->resp[0]);
- if (cmd->error == 0) {
- switch (resp) {
- case RESP_R1:
- case RESP_R1B:
- msdc_dump_card_status(host, cmd->resp[0]);
- break;
- case RESP_R3:
- msdc_dump_ocr_reg(host, cmd->resp[0]);
- break;
- case RESP_R5:
- msdc_dump_io_resp(host, cmd->resp[0]);
- break;
- case RESP_R6:
- msdc_dump_rca_resp(host, cmd->resp[0]);
- break;
- }
- }
- break;
- }
+ switch (resp) {
+ case RESP_NONE:
+ N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)", opcode, cmd->error, resp);
+ break;
+ case RESP_R2:
+ N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)= %.8x %.8x %.8x %.8x",
+ opcode, cmd->error, resp, cmd->resp[0], cmd->resp[1],
+ cmd->resp[2], cmd->resp[3]);
+ break;
+ default: /* Response types 1, 3, 4, 5, 6, 7(1b) */
+ N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)= 0x%.8x",
+ opcode, cmd->error, resp, cmd->resp[0]);
+ if (cmd->error == 0) {
+ switch (resp) {
+ case RESP_R1:
+ case RESP_R1B:
+ msdc_dump_card_status(host, cmd->resp[0]);
+ break;
+ case RESP_R3:
+ msdc_dump_ocr_reg(host, cmd->resp[0]);
+ break;
+ case RESP_R5:
+ msdc_dump_io_resp(host, cmd->resp[0]);
+ break;
+ case RESP_R6:
+ msdc_dump_rca_resp(host, cmd->resp[0]);
+ break;
+ }
+ }
+ break;
+ }
#endif
- /* do we need to save card's RCA when SD_SEND_RELATIVE_ADDR */
-
- if (!tune) {
- return cmd->error;
- }
-
- /* memory card CRC */
- if(host->hw->flags & MSDC_REMOVABLE && cmd->error == (unsigned int)(-EIO) ) {
- if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */
- msdc_abort_data(host);
- } else {
- /* do basic: reset*/
- msdc_reset();
- msdc_clr_fifo();
- msdc_clr_int();
- }
- cmd->error = msdc_tune_cmdrsp(host,cmd);
- }
-
- // check DAT0
- /* if (resp == RESP_R1B) {
- while ((sdr_read32(MSDC_PS) & 0x10000) != 0x10000);
- } */
- /* CMD12 Error Handle */
-
- return cmd->error;
-}
-
-static unsigned int msdc_do_command(struct msdc_host *host,
- struct mmc_command *cmd,
- int tune,
- unsigned long timeout)
-{
- if (msdc_command_start(host, cmd, tune, timeout))
- goto end;
+ /* do we need to save card's RCA when SD_SEND_RELATIVE_ADDR */
+
+ if (!tune)
+ return cmd->error;
+
+ /* memory card CRC */
+ if (host->hw->flags & MSDC_REMOVABLE && cmd->error == -EIO) {
+ if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */
+ msdc_abort_data(host);
+ } else {
+ /* do basic: reset*/
+ msdc_reset_hw(host);
+ msdc_clr_fifo();
+ msdc_clr_int();
+ }
+ cmd->error = msdc_tune_cmdrsp(host, cmd);
+ }
- if (msdc_command_resp(host, cmd, tune, timeout))
- goto end;
-
-end:
+ // check DAT0
+ /* if (resp == RESP_R1B) {
+ while ((sdr_read32(MSDC_PS) & 0x10000) != 0x10000);
+ } */
+ /* CMD12 Error Handle */
- N_MSG(CMD, " return<%d> resp<0x%.8x>", cmd->error, cmd->resp[0]);
- return cmd->error;
-}
-
-/* The abort condition when PIO read/write
- tmo:
-*/
-static int msdc_pio_abort(struct msdc_host *host, struct mmc_data *data, unsigned long tmo)
-{
- int ret = 0;
- u32 base = host->base;
-
- if (atomic_read(&host->abort)) {
- ret = 1;
- }
-
- if (time_after(jiffies, tmo)) {
- data->error = (unsigned int)-ETIMEDOUT;
- ERR_MSG("XXX PIO Data Timeout: CMD<%d>", host->mrq->cmd->opcode);
- ret = 1;
- }
-
- if(ret) {
- msdc_reset();
- msdc_clr_fifo();
- msdc_clr_int();
- ERR_MSG("msdc pio find abort");
- }
- return ret;
+ return cmd->error;
}
-/*
- Need to add a timeout, or WDT timeout, system reboot.
-*/
-// pio mode data read/write
-static int msdc_pio_read(struct msdc_host *host, struct mmc_data *data)
+static unsigned int msdc_do_command(struct msdc_host *host,
+ struct mmc_command *cmd,
+ int tune,
+ unsigned long timeout)
{
- struct scatterlist *sg = data->sg;
- u32 base = host->base;
- u32 num = data->sg_len;
- u32 *ptr;
- u8 *u8ptr;
- u32 left = 0;
- u32 count, size = 0;
- u32 wints = MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ;
- unsigned long tmo = jiffies + DAT_TIMEOUT;
-
- sdr_set_bits(MSDC_INTEN, wints);
- while (num) {
- left = sg_dma_len(sg);
- ptr = sg_virt(sg);
- while (left) {
- if ((left >= MSDC_FIFO_THD) && (msdc_rxfifocnt() >= MSDC_FIFO_THD)) {
- count = MSDC_FIFO_THD >> 2;
- do {
- *ptr++ = msdc_fifo_read32();
- } while (--count);
- left -= MSDC_FIFO_THD;
- } else if ((left < MSDC_FIFO_THD) && msdc_rxfifocnt() >= left) {
- while (left > 3) {
- *ptr++ = msdc_fifo_read32();
- left -= 4;
- }
-
- u8ptr = (u8 *)ptr;
- while(left) {
- * u8ptr++ = msdc_fifo_read8();
- left--;
- }
- }
-
- if (msdc_pio_abort(host, data, tmo)) {
- goto end;
- }
- }
- size += sg_dma_len(sg);
- sg = sg_next(sg); num--;
- }
+ if (msdc_command_start(host, cmd, tune, timeout))
+ goto end;
+
+ if (msdc_command_resp(host, cmd, tune, timeout))
+ goto end;
+
end:
- data->bytes_xfered += size;
- N_MSG(FIO, " PIO Read<%d>bytes", size);
-
- sdr_clr_bits(MSDC_INTEN, wints);
- if(data->error) ERR_MSG("read pio data->error<%d> left<%d> size<%d>", data->error, left, size);
- return data->error;
-}
-/* please make sure won't using PIO when size >= 512
- which means, memory card block read/write won't using pio
- then don't need to handle the CMD12 when data error.
-*/
-static int msdc_pio_write(struct msdc_host* host, struct mmc_data *data)
-{
- u32 base = host->base;
- struct scatterlist *sg = data->sg;
- u32 num = data->sg_len;
- u32 *ptr;
- u8 *u8ptr;
- u32 left;
- u32 count, size = 0;
- u32 wints = MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ;
- unsigned long tmo = jiffies + DAT_TIMEOUT;
-
- sdr_set_bits(MSDC_INTEN, wints);
- while (num) {
- left = sg_dma_len(sg);
- ptr = sg_virt(sg);
-
- while (left) {
- if (left >= MSDC_FIFO_SZ && msdc_txfifocnt() == 0) {
- count = MSDC_FIFO_SZ >> 2;
- do {
- msdc_fifo_write32(*ptr); ptr++;
- } while (--count);
- left -= MSDC_FIFO_SZ;
- } else if (left < MSDC_FIFO_SZ && msdc_txfifocnt() == 0) {
- while (left > 3) {
- msdc_fifo_write32(*ptr); ptr++;
- left -= 4;
- }
-
- u8ptr = (u8*)ptr;
- while(left){
- msdc_fifo_write8(*u8ptr); u8ptr++;
- left--;
- }
- }
-
- if (msdc_pio_abort(host, data, tmo)) {
- goto end;
- }
- }
- size += sg_dma_len(sg);
- sg = sg_next(sg); num--;
- }
-end:
- data->bytes_xfered += size;
- N_MSG(FIO, " PIO Write<%d>bytes", size);
- if(data->error) ERR_MSG("write pio data->error<%d>", data->error);
-
- sdr_clr_bits(MSDC_INTEN, wints);
- return data->error;
+ N_MSG(CMD, " return<%d> resp<0x%.8x>", cmd->error, cmd->resp[0]);
+ return cmd->error;
}
#if 0 /* --- by chhung */
-// DMA resume / start / stop
+// DMA resume / start / stop
static void msdc_dma_resume(struct msdc_host *host)
{
- u32 base = host->base;
+ void __iomem *base = host->base;
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_RESUME, 1);
+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_RESUME, 1);
- N_MSG(DMA, "DMA resume");
+ N_MSG(DMA, "DMA resume");
}
#endif /* end of --- */
static void msdc_dma_start(struct msdc_host *host)
{
- u32 base = host->base;
- u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ;
-
- sdr_set_bits(MSDC_INTEN, wints);
- //dsb(); /* --- by chhung */
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
-
- N_MSG(DMA, "DMA start");
+ void __iomem *base = host->base;
+ u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR;
+
+ sdr_set_bits(MSDC_INTEN, wints);
+ //dsb(); /* --- by chhung */
+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
+
+ N_MSG(DMA, "DMA start");
}
static void msdc_dma_stop(struct msdc_host *host)
{
- u32 base = host->base;
- //u32 retries=500;
- u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ;
-
- N_MSG(DMA, "DMA status: 0x%.8x",sdr_read32(MSDC_DMA_CFG));
- //while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS);
+ void __iomem *base = host->base;
+ //u32 retries=500;
+ u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR;
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1);
- while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS);
+ N_MSG(DMA, "DMA status: 0x%.8x", sdr_read32(MSDC_DMA_CFG));
+ //while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS);
- //dsb(); /* --- by chhung */
- sdr_clr_bits(MSDC_INTEN, wints); /* Not just xfer_comp */
+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1);
+ while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS)
+ ;
- N_MSG(DMA, "DMA stop");
-}
+ //dsb(); /* --- by chhung */
+ sdr_clr_bits(MSDC_INTEN, wints); /* Not just xfer_comp */
-#if 0 /* --- by chhung */
-/* dump a gpd list */
-static void msdc_dma_dump(struct msdc_host *host, struct msdc_dma *dma)
-{
- gpd_t *gpd = dma->gpd;
- bd_t *bd = dma->bd;
- bd_t *ptr;
- int i = 0;
- int p_to_v;
-
- if (dma->mode != MSDC_MODE_DMA_DESC) {
- return;
- }
-
- ERR_MSG("try to dump gpd and bd");
-
- /* dump gpd */
- ERR_MSG(".gpd<0x%.8x> gpd_phy<0x%.8x>", (int)gpd, (int)dma->gpd_addr);
- ERR_MSG("...hwo <%d>", gpd->hwo );
- ERR_MSG("...bdp <%d>", gpd->bdp );
- ERR_MSG("...chksum<0x%.8x>", gpd->chksum );
- //ERR_MSG("...intr <0x%.8x>", gpd->intr );
- ERR_MSG("...next <0x%.8x>", (int)gpd->next );
- ERR_MSG("...ptr <0x%.8x>", (int)gpd->ptr );
- ERR_MSG("...buflen<0x%.8x>", gpd->buflen );
- //ERR_MSG("...extlen<0x%.8x>", gpd->extlen );
- //ERR_MSG("...arg <0x%.8x>", gpd->arg );
- //ERR_MSG("...blknum<0x%.8x>", gpd->blknum );
- //ERR_MSG("...cmd <0x%.8x>", gpd->cmd );
-
- /* dump bd */
- ERR_MSG(".bd<0x%.8x> bd_phy<0x%.8x> gpd_ptr<0x%.8x>", (int)bd, (int)dma->bd_addr, (int)gpd->ptr);
- ptr = bd;
- p_to_v = ((u32)bd - (u32)dma->bd_addr);
- while (1) {
- ERR_MSG(".bd[%d]", i); i++;
- ERR_MSG("...eol <%d>", ptr->eol );
- ERR_MSG("...chksum<0x%.8x>", ptr->chksum );
- //ERR_MSG("...blkpad<0x%.8x>", ptr->blkpad );
- //ERR_MSG("...dwpad <0x%.8x>", ptr->dwpad );
- ERR_MSG("...next <0x%.8x>", (int)ptr->next );
- ERR_MSG("...ptr <0x%.8x>", (int)ptr->ptr );
- ERR_MSG("...buflen<0x%.8x>", (int)ptr->buflen );
-
- if (ptr->eol == 1) {
- break;
- }
-
- /* find the next bd, virtual address of ptr->next */
- /* don't need to enable when use malloc */
- //BUG_ON( (ptr->next + p_to_v)!=(ptr+1) );
- //ERR_MSG(".next bd<0x%.8x><0x%.8x>", (ptr->next + p_to_v), (ptr+1));
- ptr++;
- }
-
- ERR_MSG("dump gpd and bd finished");
+ N_MSG(DMA, "DMA stop");
}
-#endif /* end of --- */
/* calc checksum */
static u8 msdc_dma_calcs(u8 *buf, u32 len)
{
- u32 i, sum = 0;
- for (i = 0; i < len; i++) {
- sum += buf[i];
- }
- return 0xFF - (u8)sum;
+ u32 i, sum = 0;
+
+ for (i = 0; i < len; i++)
+ sum += buf[i];
+ return 0xFF - (u8)sum;
}
/* gpd bd setup + dma registers */
-static int msdc_dma_config(struct msdc_host *host, struct msdc_dma *dma)
+static void msdc_dma_config(struct msdc_host *host, struct msdc_dma *dma)
{
- u32 base = host->base;
- u32 sglen = dma->sglen;
- //u32 i, j, num, bdlen, arg, xfersz;
- u32 j, num, bdlen;
- u8 blkpad, dwpad, chksum;
- struct scatterlist *sg = dma->sg;
- gpd_t *gpd;
- bd_t *bd;
-
- switch (dma->mode) {
- case MSDC_MODE_DMA_BASIC:
- BUG_ON(dma->xfersz > 65535);
- BUG_ON(dma->sglen != 1);
- sdr_write32(MSDC_DMA_SA, PHYSADDR(sg_dma_address(sg)));
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_LASTBUF, 1);
+ void __iomem *base = host->base;
+ //u32 i, j, num, bdlen, arg, xfersz;
+ u32 j, num;
+ struct scatterlist *sg;
+ struct gpd *gpd;
+ struct bd *bd;
+
+ switch (dma->mode) {
+ case MSDC_MODE_DMA_BASIC:
+ BUG_ON(host->xfer_size > 65535);
+ BUG_ON(dma->sglen != 1);
+ sdr_write32(MSDC_DMA_SA, PHYSADDR(sg_dma_address(sg)));
+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_LASTBUF, 1);
//#if defined (CONFIG_RALINK_MT7620)
- if (ralink_soc == MT762X_SOC_MT7620A)
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_XFERSZ, sg_dma_len(sg));
+ if (ralink_soc == MT762X_SOC_MT7620A)
+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_XFERSZ, sg_dma_len(sg));
//#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628)
- else
- sdr_write32((volatile u32*)(RALINK_MSDC_BASE+0xa8), sg_dma_len(sg));
+ else
+ sdr_write32((void __iomem *)(RALINK_MSDC_BASE + 0xa8), sg_dma_len(sg));
//#endif
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ, dma->burstsz);
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 0);
- break;
- case MSDC_MODE_DMA_DESC:
- blkpad = (dma->flags & DMA_FLAG_PAD_BLOCK) ? 1 : 0;
- dwpad = (dma->flags & DMA_FLAG_PAD_DWORD) ? 1 : 0;
- chksum = (dma->flags & DMA_FLAG_EN_CHKSUM) ? 1 : 0;
-
- /* calculate the required number of gpd */
- num = (sglen + MAX_BD_PER_GPD - 1) / MAX_BD_PER_GPD;
- BUG_ON(num !=1 );
-
- gpd = dma->gpd;
- bd = dma->bd;
- bdlen = sglen;
-
- /* modify gpd*/
- //gpd->intr = 0;
- gpd->hwo = 1; /* hw will clear it */
- gpd->bdp = 1;
- gpd->chksum = 0; /* need to clear first. */
- gpd->chksum = (chksum ? msdc_dma_calcs((u8 *)gpd, 16) : 0);
-
- /* modify bd*/
- for (j = 0; j < bdlen; j++) {
- msdc_init_bd(&bd[j], blkpad, dwpad, sg_dma_address(sg), sg_dma_len(sg));
- if(j == bdlen - 1) {
- bd[j].eol = 1; /* the last bd */
- } else {
- bd[j].eol = 0;
- }
- bd[j].chksum = 0; /* checksume need to clear first */
- bd[j].chksum = (chksum ? msdc_dma_calcs((u8 *)(&bd[j]), 16) : 0);
- sg++;
- }
-
- dma->used_gpd += 2;
- dma->used_bd += bdlen;
-
- sdr_set_field(MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, chksum);
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ, dma->burstsz);
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 1);
-
- sdr_write32(MSDC_DMA_SA, PHYSADDR((u32)dma->gpd_addr));
- break;
-
- default:
- break;
- }
-
- N_MSG(DMA, "DMA_CTRL = 0x%x", sdr_read32(MSDC_DMA_CTRL));
- N_MSG(DMA, "DMA_CFG = 0x%x", sdr_read32(MSDC_DMA_CFG));
- N_MSG(DMA, "DMA_SA = 0x%x", sdr_read32(MSDC_DMA_SA));
-
- return 0;
-}
-
-static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
- struct scatterlist *sg, unsigned int sglen)
-{
- BUG_ON(sglen > MAX_BD_NUM); /* not support currently */
-
- dma->sg = sg;
- dma->flags = DMA_FLAG_EN_CHKSUM;
- //dma->flags = DMA_FLAG_NONE; /* CHECKME */
- dma->sglen = sglen;
- dma->xfersz = host->xfer_size;
- dma->burstsz = MSDC_BRUST_64B;
-
- if (sglen == 1 && sg_dma_len(sg) <= MAX_DMA_CNT)
- dma->mode = MSDC_MODE_DMA_BASIC;
- else
- dma->mode = MSDC_MODE_DMA_DESC;
-
- N_MSG(DMA, "DMA mode<%d> sglen<%d> xfersz<%d>", dma->mode, dma->sglen, dma->xfersz);
-
- msdc_dma_config(host, dma);
-
- /*if (dma->mode == MSDC_MODE_DMA_DESC) {
- //msdc_dma_dump(host, dma);
- } */
+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ,
+ MSDC_BRUST_64B);
+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 0);
+ break;
+ case MSDC_MODE_DMA_DESC:
+
+ /* calculate the required number of gpd */
+ num = (dma->sglen + MAX_BD_PER_GPD - 1) / MAX_BD_PER_GPD;
+ BUG_ON(num != 1);
+
+ gpd = dma->gpd;
+ bd = dma->bd;
+
+ /* modify gpd*/
+ //gpd->intr = 0;
+ gpd->hwo = 1; /* hw will clear it */
+ gpd->bdp = 1;
+ gpd->chksum = 0; /* need to clear first. */
+ gpd->chksum = msdc_dma_calcs((u8 *)gpd, 16);
+
+ /* modify bd*/
+ for_each_sg(dma->sg, sg, dma->sglen, j) {
+ bd[j].blkpad = 0;
+ bd[j].dwpad = 0;
+ bd[j].ptr = (void *)sg_dma_address(sg);
+ bd[j].buflen = sg_dma_len(sg);
+
+ if (j == dma->sglen - 1)
+ bd[j].eol = 1; /* the last bd */
+ else
+ bd[j].eol = 0;
+
+ bd[j].chksum = 0; /* checksume need to clear first */
+ bd[j].chksum = msdc_dma_calcs((u8 *)(&bd[j]), 16);
+ }
+
+ sdr_set_field(MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ,
+ MSDC_BRUST_64B);
+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 1);
+
+ sdr_write32(MSDC_DMA_SA, PHYSADDR((u32)dma->gpd_addr));
+ break;
+
+ default:
+ break;
+ }
+
+ N_MSG(DMA, "DMA_CTRL = 0x%x", sdr_read32(MSDC_DMA_CTRL));
+ N_MSG(DMA, "DMA_CFG = 0x%x", sdr_read32(MSDC_DMA_CFG));
+ N_MSG(DMA, "DMA_SA = 0x%x", sdr_read32(MSDC_DMA_SA));
+
}
-/* set block number before send command */
-static void msdc_set_blknum(struct msdc_host *host, u32 blknum)
+static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
+ struct scatterlist *sg, unsigned int sglen)
{
- u32 base = host->base;
+ BUG_ON(sglen > MAX_BD_NUM); /* not support currently */
- sdr_write32(SDC_BLK_NUM, blknum);
+ dma->sg = sg;
+ dma->sglen = sglen;
+
+ dma->mode = MSDC_MODE_DMA_DESC;
+
+ N_MSG(DMA, "DMA mode<%d> sglen<%d> xfersz<%d>", dma->mode, dma->sglen,
+ host->xfer_size);
+
+ msdc_dma_config(host, dma);
}
-static int msdc_do_request(struct mmc_host*mmc, struct mmc_request*mrq)
+static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ __must_hold(&host->lock)
{
- struct msdc_host *host = mmc_priv(mmc);
- struct mmc_command *cmd;
- struct mmc_data *data;
- u32 base = host->base;
- //u32 intsts = 0;
- unsigned int left=0;
- int dma = 0, read = 1, dir = DMA_FROM_DEVICE, send_type=0;
-
- #define SND_DAT 0
- #define SND_CMD 1
-
- BUG_ON(mmc == NULL);
- BUG_ON(mrq == NULL);
-
- host->error = 0;
- atomic_set(&host->abort, 0);
-
- cmd = mrq->cmd;
- data = mrq->cmd->data;
-
+ struct msdc_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ void __iomem *base = host->base;
+ //u32 intsts = 0;
+ int read = 1, send_type = 0;
+
+#define SND_DAT 0
+#define SND_CMD 1
+
+ BUG_ON(mmc == NULL);
+ BUG_ON(mrq == NULL);
+
+ host->error = 0;
+
+ cmd = mrq->cmd;
+ data = mrq->cmd->data;
+
#if 0 /* --- by chhung */
- //if(host->id ==1){
- N_MSG(OPS, "enable clock!");
- msdc_ungate_clock(host->id);
- //}
+ //if(host->id ==1){
+ N_MSG(OPS, "enable clock!");
+ msdc_ungate_clock(host->id);
+ //}
#endif /* end of --- */
-
- if (!data) {
- send_type=SND_CMD;
- if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0) {
- goto done;
- }
- } else {
- BUG_ON(data->blksz > HOST_MAX_BLKSZ);
- send_type=SND_DAT;
-
- data->error = 0;
- read = data->flags & MMC_DATA_READ ? 1 : 0;
- host->data = data;
- host->xfer_size = data->blocks * data->blksz;
- host->blksz = data->blksz;
-
- /* deside the transfer mode */
- if (drv_mode[host->id] == MODE_PIO) {
- host->dma_xfer = dma = 0;
- } else if (drv_mode[host->id] == MODE_DMA) {
- host->dma_xfer = dma = 1;
- } else if (drv_mode[host->id] == MODE_SIZE_DEP) {
- host->dma_xfer = dma = ((host->xfer_size >= dma_size[host->id]) ? 1 : 0);
- }
-
- if (read) {
- if ((host->timeout_ns != data->timeout_ns) ||
- (host->timeout_clks != data->timeout_clks)) {
- msdc_set_timeout(host, data->timeout_ns, data->timeout_clks);
- }
- }
-
- msdc_set_blknum(host, data->blocks);
- //msdc_clr_fifo(); /* no need */
-
- if (dma) {
- msdc_dma_on(); /* enable DMA mode first!! */
- init_completion(&host->xfer_done);
-
- /* start the command first*/
- if (msdc_command_start(host, cmd, 1, CMD_TIMEOUT) != 0)
- goto done;
-
- dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- (void)dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, dir);
- msdc_dma_setup(host, &host->dma, data->sg, data->sg_len);
-
- /* then wait command done */
- if (msdc_command_resp(host, cmd, 1, CMD_TIMEOUT) != 0)
- goto done;
-
- /* for read, the data coming too fast, then CRC error
- start DMA no business with CRC. */
- //init_completion(&host->xfer_done);
- msdc_dma_start(host);
-
- spin_unlock(&host->lock);
- if(!wait_for_completion_timeout(&host->xfer_done, DAT_TIMEOUT)){
- ERR_MSG("XXX CMD<%d> wait xfer_done<%d> timeout!!", cmd->opcode, data->blocks * data->blksz);
- ERR_MSG(" DMA_SA = 0x%x", sdr_read32(MSDC_DMA_SA));
- ERR_MSG(" DMA_CA = 0x%x", sdr_read32(MSDC_DMA_CA));
- ERR_MSG(" DMA_CTRL = 0x%x", sdr_read32(MSDC_DMA_CTRL));
- ERR_MSG(" DMA_CFG = 0x%x", sdr_read32(MSDC_DMA_CFG));
- data->error = (unsigned int)-ETIMEDOUT;
-
- msdc_reset();
- msdc_clr_fifo();
- msdc_clr_int();
- }
- spin_lock(&host->lock);
- msdc_dma_stop(host);
- } else {
- /* Firstly: send command */
- if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0) {
- goto done;
- }
-
- /* Secondly: pio data phase */
- if (read) {
- if (msdc_pio_read(host, data)){
- goto done;
- }
- } else {
- if (msdc_pio_write(host, data)) {
- goto done;
- }
- }
-
- /* For write case: make sure contents in fifo flushed to device */
- if (!read) {
- while (1) {
- left=msdc_txfifocnt();
- if (left == 0) {
- break;
- }
- if (msdc_pio_abort(host, data, jiffies + DAT_TIMEOUT)) {
- break;
- /* Fix me: what about if data error, when stop ? how to? */
- }
- }
- } else {
- /* Fix me: read case: need to check CRC error */
- }
-
- /* For write case: SDCBUSY and Xfer_Comp will assert when DAT0 not busy.
- For read case : SDCBUSY and Xfer_Comp will assert when last byte read out from FIFO.
- */
-
- /* try not to wait xfer_comp interrupt.
- the next command will check SDC_BUSY.
- SDC_BUSY means xfer_comp assert
- */
-
- } // PIO mode
-
- /* Last: stop transfer */
- if (data->stop){
- if (msdc_do_command(host, data->stop, 0, CMD_TIMEOUT) != 0) {
- goto done;
- }
- }
- }
+
+ if (!data) {
+ send_type = SND_CMD;
+ if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0)
+ goto done;
+ } else {
+ BUG_ON(data->blksz > HOST_MAX_BLKSZ);
+ send_type = SND_DAT;
+
+ data->error = 0;
+ read = data->flags & MMC_DATA_READ ? 1 : 0;
+ host->data = data;
+ host->xfer_size = data->blocks * data->blksz;
+ host->blksz = data->blksz;
+
+ if (read) {
+ if ((host->timeout_ns != data->timeout_ns) ||
+ (host->timeout_clks != data->timeout_clks)) {
+ msdc_set_timeout(host, data->timeout_ns, data->timeout_clks);
+ }
+ }
+
+ sdr_write32(SDC_BLK_NUM, data->blocks);
+ //msdc_clr_fifo(); /* no need */
+
+ msdc_dma_on(); /* enable DMA mode first!! */
+ init_completion(&host->xfer_done);
+
+ /* start the command first*/
+ if (msdc_command_start(host, cmd, 1, CMD_TIMEOUT) != 0)
+ goto done;
+
+ data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg,
+ data->sg_len,
+ mmc_get_dma_dir(data));
+ msdc_dma_setup(host, &host->dma, data->sg,
+ data->sg_count);
+
+ /* then wait command done */
+ if (msdc_command_resp(host, cmd, 1, CMD_TIMEOUT) != 0)
+ goto done;
+
+ /* for read, the data coming too fast, then CRC error
+ start DMA no business with CRC. */
+ //init_completion(&host->xfer_done);
+ msdc_dma_start(host);
+
+ spin_unlock(&host->lock);
+ if (!wait_for_completion_timeout(&host->xfer_done, DAT_TIMEOUT)) {
+ ERR_MSG("XXX CMD<%d> wait xfer_done<%d> timeout!!", cmd->opcode, data->blocks * data->blksz);
+ ERR_MSG(" DMA_SA = 0x%x", sdr_read32(MSDC_DMA_SA));
+ ERR_MSG(" DMA_CA = 0x%x", sdr_read32(MSDC_DMA_CA));
+ ERR_MSG(" DMA_CTRL = 0x%x", sdr_read32(MSDC_DMA_CTRL));
+ ERR_MSG(" DMA_CFG = 0x%x", sdr_read32(MSDC_DMA_CFG));
+ data->error = -ETIMEDOUT;
+
+ msdc_reset_hw(host);
+ msdc_clr_fifo();
+ msdc_clr_int();
+ }
+ spin_lock(&host->lock);
+ msdc_dma_stop(host);
+
+ /* Last: stop transfer */
+ if (data->stop) {
+ if (msdc_do_command(host, data->stop, 0, CMD_TIMEOUT) != 0)
+ goto done;
+ }
+ }
done:
- if (data != NULL) {
- host->data = NULL;
- host->dma_xfer = 0;
- if (dma != 0) {
- msdc_dma_off();
- host->dma.used_bd = 0;
- host->dma.used_gpd = 0;
- dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, dir);
- }
- host->blksz = 0;
-
+ if (data != NULL) {
+ host->data = NULL;
+ dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ host->blksz = 0;
+
#if 0 // don't stop twice!
- if(host->hw->flags & MSDC_REMOVABLE && data->error) {
- msdc_abort_data(host);
- /* reset in IRQ, stop command has issued. -> No need */
- }
-#endif
+ if (host->hw->flags & MSDC_REMOVABLE && data->error) {
+ msdc_abort_data(host);
+ /* reset in IRQ, stop command has issued. -> No need */
+ }
+#endif
- N_MSG(OPS, "CMD<%d> data<%s %s> blksz<%d> block<%d> error<%d>",cmd->opcode, (dma? "dma":"pio"),
- (read ? "read ":"write") ,data->blksz, data->blocks, data->error);
- }
+ N_MSG(OPS, "CMD<%d> data<%s %s> blksz<%d> block<%d> error<%d>", cmd->opcode, (dma ? "dma" : "pio"),
+ (read ? "read " : "write"), data->blksz, data->blocks, data->error);
+ }
#if 0 /* --- by chhung */
-#if 1
- //if(host->id==1) {
- if(send_type==SND_CMD) {
- if(cmd->opcode == MMC_SEND_STATUS) {
- if((cmd->resp[0] & CARD_READY_FOR_DATA) ||(CARD_CURRENT_STATE(cmd->resp[0]) != 7)){
- N_MSG(OPS,"disable clock, CMD13 IDLE");
- msdc_gate_clock(host->id);
- }
- } else {
- N_MSG(OPS,"disable clock, CMD<%d>", cmd->opcode);
- msdc_gate_clock(host->id);
- }
- } else {
- if(read) {
- N_MSG(OPS,"disable clock!!! Read CMD<%d>",cmd->opcode);
- msdc_gate_clock(host->id);
- }
- }
- //}
+#if 1
+ //if(host->id==1) {
+ if (send_type == SND_CMD) {
+ if (cmd->opcode == MMC_SEND_STATUS) {
+ if ((cmd->resp[0] & CARD_READY_FOR_DATA) || (CARD_CURRENT_STATE(cmd->resp[0]) != 7)) {
+ N_MSG(OPS, "disable clock, CMD13 IDLE");
+ msdc_gate_clock(host->id);
+ }
+ } else {
+ N_MSG(OPS, "disable clock, CMD<%d>", cmd->opcode);
+ msdc_gate_clock(host->id);
+ }
+ } else {
+ if (read) {
+ N_MSG(OPS, "disable clock!!! Read CMD<%d>", cmd->opcode);
+ msdc_gate_clock(host->id);
+ }
+ }
+ //}
#else
- msdc_gate_clock(host->id);
+ msdc_gate_clock(host->id);
#endif
#endif /* end of --- */
-
- if (mrq->cmd->error) host->error = 0x001;
- if (mrq->data && mrq->data->error) host->error |= 0x010;
- if (mrq->stop && mrq->stop->error) host->error |= 0x100;
- //if (host->error) ERR_MSG("host->error<%d>", host->error);
+ if (mrq->cmd->error)
+ host->error = 0x001;
+ if (mrq->data && mrq->data->error)
+ host->error |= 0x010;
+ if (mrq->stop && mrq->stop->error)
+ host->error |= 0x100;
+
+ //if (host->error) ERR_MSG("host->error<%d>", host->error);
- return host->error;
+ return host->error;
}
static int msdc_app_cmd(struct mmc_host *mmc, struct msdc_host *host)
{
- struct mmc_command cmd;
- struct mmc_request mrq;
- u32 err;
-
- memset(&cmd, 0, sizeof(struct mmc_command));
- cmd.opcode = MMC_APP_CMD;
-#if 0 /* bug: we meet mmc->card is null when ACMD6 */
- cmd.arg = mmc->card->rca << 16;
-#else
- cmd.arg = host->app_cmd_arg;
-#endif
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
-
- memset(&mrq, 0, sizeof(struct mmc_request));
- mrq.cmd = &cmd; cmd.mrq = &mrq;
- cmd.data = NULL;
-
- err = msdc_do_command(host, &cmd, 0, CMD_TIMEOUT);
- return err;
+ struct mmc_command cmd;
+ struct mmc_request mrq;
+ u32 err;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = MMC_APP_CMD;
+#if 0 /* bug: we meet mmc->card is null when ACMD6 */
+ cmd.arg = mmc->card->rca << 16;
+#else
+ cmd.arg = host->app_cmd_arg;
+#endif
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+ mrq.cmd = &cmd; cmd.mrq = &mrq;
+ cmd.data = NULL;
+
+ err = msdc_do_command(host, &cmd, 0, CMD_TIMEOUT);
+ return err;
}
-static int msdc_tune_cmdrsp(struct msdc_host*host, struct mmc_command *cmd)
+static int msdc_tune_cmdrsp(struct msdc_host *host, struct mmc_command *cmd)
{
- int result = -1;
- u32 base = host->base;
- u32 rsmpl, cur_rsmpl, orig_rsmpl;
- u32 rrdly, cur_rrdly = 0xffffffff, orig_rrdly;
- u32 skip = 1;
-
- /* ==== don't support 3.0 now ====
- 1: R_SMPL[1]
- 2: PAD_CMD_RESP_RXDLY[26:22]
- ==========================*/
-
- // save the previous tune result
- sdr_get_field(MSDC_IOCON, MSDC_IOCON_RSPL, orig_rsmpl);
- sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, orig_rrdly);
-
- rrdly = 0;
- do {
- for (rsmpl = 0; rsmpl < 2; rsmpl++) {
- /* Lv1: R_SMPL[1] */
- cur_rsmpl = (orig_rsmpl + rsmpl) % 2;
- if (skip == 1) {
- skip = 0;
- continue;
- }
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, cur_rsmpl);
-
- if (host->app_cmd) {
- result = msdc_app_cmd(host->mmc, host);
- if (result) {
- ERR_MSG("TUNE_CMD app_cmd<%d> failed: RESP_RXDLY<%d>,R_SMPL<%d>",
- host->mrq->cmd->opcode, cur_rrdly, cur_rsmpl);
- continue;
- }
- }
- result = msdc_do_command(host, cmd, 0, CMD_TIMEOUT); // not tune.
- ERR_MSG("TUNE_CMD<%d> %s PAD_CMD_RESP_RXDLY[26:22]<%d> R_SMPL[1]<%d>", cmd->opcode,
- (result == 0) ? "PASS" : "FAIL", cur_rrdly, cur_rsmpl);
-
- if (result == 0) {
- return 0;
- }
- if (result != (unsigned int)(-EIO)) {
- ERR_MSG("TUNE_CMD<%d> Error<%d> not -EIO", cmd->opcode, result);
- return result;
- }
-
- /* should be EIO */
- if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */
- msdc_abort_data(host);
- }
- }
-
- /* Lv2: PAD_CMD_RESP_RXDLY[26:22] */
- cur_rrdly = (orig_rrdly + rrdly + 1) % 32;
- sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, cur_rrdly);
- }while (++rrdly < 32);
-
- return result;
+ int result = -1;
+ void __iomem *base = host->base;
+ u32 rsmpl, cur_rsmpl, orig_rsmpl;
+ u32 rrdly, cur_rrdly = 0xffffffff, orig_rrdly;
+ u32 skip = 1;
+
+ /* ==== don't support 3.0 now ====
+ 1: R_SMPL[1]
+ 2: PAD_CMD_RESP_RXDLY[26:22]
+ ==========================*/
+
+ // save the previous tune result
+ sdr_get_field(MSDC_IOCON, MSDC_IOCON_RSPL, &orig_rsmpl);
+ sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, &orig_rrdly);
+
+ rrdly = 0;
+ do {
+ for (rsmpl = 0; rsmpl < 2; rsmpl++) {
+ /* Lv1: R_SMPL[1] */
+ cur_rsmpl = (orig_rsmpl + rsmpl) % 2;
+ if (skip == 1) {
+ skip = 0;
+ continue;
+ }
+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, cur_rsmpl);
+
+ if (host->app_cmd) {
+ result = msdc_app_cmd(host->mmc, host);
+ if (result) {
+ ERR_MSG("TUNE_CMD app_cmd<%d> failed: RESP_RXDLY<%d>,R_SMPL<%d>",
+ host->mrq->cmd->opcode, cur_rrdly, cur_rsmpl);
+ continue;
+ }
+ }
+ result = msdc_do_command(host, cmd, 0, CMD_TIMEOUT); // not tune.
+ ERR_MSG("TUNE_CMD<%d> %s PAD_CMD_RESP_RXDLY[26:22]<%d> R_SMPL[1]<%d>", cmd->opcode,
+ (result == 0) ? "PASS" : "FAIL", cur_rrdly, cur_rsmpl);
+
+ if (result == 0)
+ return 0;
+ if (result != -EIO) {
+ ERR_MSG("TUNE_CMD<%d> Error<%d> not -EIO", cmd->opcode, result);
+ return result;
+ }
+
+ /* should be EIO */
+ if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */
+ msdc_abort_data(host);
+ }
+ }
+
+ /* Lv2: PAD_CMD_RESP_RXDLY[26:22] */
+ cur_rrdly = (orig_rrdly + rrdly + 1) % 32;
+ sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, cur_rrdly);
+ } while (++rrdly < 32);
+
+ return result;
}
/* Support SD2.0 Only */
static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq)
{
- struct msdc_host *host = mmc_priv(mmc);
- u32 base = host->base;
- u32 ddr=0;
- u32 dcrc=0;
- u32 rxdly, cur_rxdly0, cur_rxdly1;
- u32 dsmpl, cur_dsmpl, orig_dsmpl;
- u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3;
- u32 cur_dat4, cur_dat5, cur_dat6, cur_dat7;
- u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3;
- u32 orig_dat4, orig_dat5, orig_dat6, orig_dat7;
- int result = -1;
- u32 skip = 1;
-
- sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, orig_dsmpl);
-
- /* Tune Method 2. */
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
-
- rxdly = 0;
- do {
- for (dsmpl = 0; dsmpl < 2; dsmpl++) {
- cur_dsmpl = (orig_dsmpl + dsmpl) % 2;
- if (skip == 1) {
- skip = 0;
- continue;
- }
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl);
-
- if (host->app_cmd) {
- result = msdc_app_cmd(host->mmc, host);
- if (result) {
- ERR_MSG("TUNE_BREAD app_cmd<%d> failed", host->mrq->cmd->opcode);
- continue;
- }
- }
- result = msdc_do_request(mmc,mrq);
-
- sdr_get_field(SDC_DCRC_STS, SDC_DCRC_STS_POS|SDC_DCRC_STS_NEG, dcrc); /* RO */
- if (!ddr) dcrc &= ~SDC_DCRC_STS_NEG;
- ERR_MSG("TUNE_BREAD<%s> dcrc<0x%x> DATRDDLY0/1<0x%x><0x%x> dsmpl<0x%x>",
- (result == 0 && dcrc == 0) ? "PASS" : "FAIL", dcrc,
- sdr_read32(MSDC_DAT_RDDLY0), sdr_read32(MSDC_DAT_RDDLY1), cur_dsmpl);
-
- /* Fix me: result is 0, but dcrc is still exist */
- if (result == 0 && dcrc == 0) {
- goto done;
- } else {
- /* there is a case: command timeout, and data phase not processed */
- if (mrq->data->error != 0 && mrq->data->error != (unsigned int)(-EIO)) {
- ERR_MSG("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>",
- result, mrq->cmd->error, mrq->data->error);
- goto done;
- }
- }
- }
-
- cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0);
- cur_rxdly1 = sdr_read32(MSDC_DAT_RDDLY1);
-
- /* E1 ECO. YD: Reverse */
- if (sdr_read32(MSDC_ECO_VER) >= 4) {
- orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 0) & 0x1F;
- orig_dat4 = (cur_rxdly1 >> 24) & 0x1F;
- orig_dat5 = (cur_rxdly1 >> 16) & 0x1F;
- orig_dat6 = (cur_rxdly1 >> 8) & 0x1F;
- orig_dat7 = (cur_rxdly1 >> 0) & 0x1F;
- } else {
- orig_dat0 = (cur_rxdly0 >> 0) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 24) & 0x1F;
- orig_dat4 = (cur_rxdly1 >> 0) & 0x1F;
- orig_dat5 = (cur_rxdly1 >> 8) & 0x1F;
- orig_dat6 = (cur_rxdly1 >> 16) & 0x1F;
- orig_dat7 = (cur_rxdly1 >> 24) & 0x1F;
- }
-
- if (ddr) {
- cur_dat0 = (dcrc & (1 << 0) || dcrc & (1 << 8)) ? ((orig_dat0 + 1) % 32) : orig_dat0;
- cur_dat1 = (dcrc & (1 << 1) || dcrc & (1 << 9)) ? ((orig_dat1 + 1) % 32) : orig_dat1;
- cur_dat2 = (dcrc & (1 << 2) || dcrc & (1 << 10)) ? ((orig_dat2 + 1) % 32) : orig_dat2;
- cur_dat3 = (dcrc & (1 << 3) || dcrc & (1 << 11)) ? ((orig_dat3 + 1) % 32) : orig_dat3;
- } else {
- cur_dat0 = (dcrc & (1 << 0)) ? ((orig_dat0 + 1) % 32) : orig_dat0;
- cur_dat1 = (dcrc & (1 << 1)) ? ((orig_dat1 + 1) % 32) : orig_dat1;
- cur_dat2 = (dcrc & (1 << 2)) ? ((orig_dat2 + 1) % 32) : orig_dat2;
- cur_dat3 = (dcrc & (1 << 3)) ? ((orig_dat3 + 1) % 32) : orig_dat3;
- }
- cur_dat4 = (dcrc & (1 << 4)) ? ((orig_dat4 + 1) % 32) : orig_dat4;
- cur_dat5 = (dcrc & (1 << 5)) ? ((orig_dat5 + 1) % 32) : orig_dat5;
- cur_dat6 = (dcrc & (1 << 6)) ? ((orig_dat6 + 1) % 32) : orig_dat6;
- cur_dat7 = (dcrc & (1 << 7)) ? ((orig_dat7 + 1) % 32) : orig_dat7;
-
- cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
- cur_rxdly1 = (cur_dat4 << 24) | (cur_dat5 << 16) | (cur_dat6 << 8) | (cur_dat7 << 0);
-
- sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0);
- sdr_write32(MSDC_DAT_RDDLY1, cur_rxdly1);
-
- } while (++rxdly < 32);
-
+ struct msdc_host *host = mmc_priv(mmc);
+ void __iomem *base = host->base;
+ u32 ddr = 0;
+ u32 dcrc = 0;
+ u32 rxdly, cur_rxdly0, cur_rxdly1;
+ u32 dsmpl, cur_dsmpl, orig_dsmpl;
+ u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3;
+ u32 cur_dat4, cur_dat5, cur_dat6, cur_dat7;
+ u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3;
+ u32 orig_dat4, orig_dat5, orig_dat6, orig_dat7;
+ int result = -1;
+ u32 skip = 1;
+
+ sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, &orig_dsmpl);
+
+ /* Tune Method 2. */
+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
+
+ rxdly = 0;
+ do {
+ for (dsmpl = 0; dsmpl < 2; dsmpl++) {
+ cur_dsmpl = (orig_dsmpl + dsmpl) % 2;
+ if (skip == 1) {
+ skip = 0;
+ continue;
+ }
+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl);
+
+ if (host->app_cmd) {
+ result = msdc_app_cmd(host->mmc, host);
+ if (result) {
+ ERR_MSG("TUNE_BREAD app_cmd<%d> failed", host->mrq->cmd->opcode);
+ continue;
+ }
+ }
+ result = msdc_do_request(mmc, mrq);
+
+ sdr_get_field(SDC_DCRC_STS,
+ SDC_DCRC_STS_POS | SDC_DCRC_STS_NEG,
+ &dcrc); /* RO */
+ if (!ddr)
+ dcrc &= ~SDC_DCRC_STS_NEG;
+ ERR_MSG("TUNE_BREAD<%s> dcrc<0x%x> DATRDDLY0/1<0x%x><0x%x> dsmpl<0x%x>",
+ (result == 0 && dcrc == 0) ? "PASS" : "FAIL", dcrc,
+ sdr_read32(MSDC_DAT_RDDLY0), sdr_read32(MSDC_DAT_RDDLY1), cur_dsmpl);
+
+ /* Fix me: result is 0, but dcrc is still exist */
+ if (result == 0 && dcrc == 0) {
+ goto done;
+ } else {
+ /* there is a case: command timeout, and data phase not processed */
+ if (mrq->data->error != 0 &&
+ mrq->data->error != -EIO) {
+ ERR_MSG("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>",
+ result, mrq->cmd->error, mrq->data->error);
+ goto done;
+ }
+ }
+ }
+
+ cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0);
+ cur_rxdly1 = sdr_read32(MSDC_DAT_RDDLY1);
+
+ /* E1 ECO. YD: Reverse */
+ if (sdr_read32(MSDC_ECO_VER) >= 4) {
+ orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
+ orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
+ orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
+ orig_dat3 = (cur_rxdly0 >> 0) & 0x1F;
+ orig_dat4 = (cur_rxdly1 >> 24) & 0x1F;
+ orig_dat5 = (cur_rxdly1 >> 16) & 0x1F;
+ orig_dat6 = (cur_rxdly1 >> 8) & 0x1F;
+ orig_dat7 = (cur_rxdly1 >> 0) & 0x1F;
+ } else {
+ orig_dat0 = (cur_rxdly0 >> 0) & 0x1F;
+ orig_dat1 = (cur_rxdly0 >> 8) & 0x1F;
+ orig_dat2 = (cur_rxdly0 >> 16) & 0x1F;
+ orig_dat3 = (cur_rxdly0 >> 24) & 0x1F;
+ orig_dat4 = (cur_rxdly1 >> 0) & 0x1F;
+ orig_dat5 = (cur_rxdly1 >> 8) & 0x1F;
+ orig_dat6 = (cur_rxdly1 >> 16) & 0x1F;
+ orig_dat7 = (cur_rxdly1 >> 24) & 0x1F;
+ }
+
+ if (ddr) {
+ cur_dat0 = (dcrc & (1 << 0) || dcrc & (1 << 8)) ? ((orig_dat0 + 1) % 32) : orig_dat0;
+ cur_dat1 = (dcrc & (1 << 1) || dcrc & (1 << 9)) ? ((orig_dat1 + 1) % 32) : orig_dat1;
+ cur_dat2 = (dcrc & (1 << 2) || dcrc & (1 << 10)) ? ((orig_dat2 + 1) % 32) : orig_dat2;
+ cur_dat3 = (dcrc & (1 << 3) || dcrc & (1 << 11)) ? ((orig_dat3 + 1) % 32) : orig_dat3;
+ } else {
+ cur_dat0 = (dcrc & (1 << 0)) ? ((orig_dat0 + 1) % 32) : orig_dat0;
+ cur_dat1 = (dcrc & (1 << 1)) ? ((orig_dat1 + 1) % 32) : orig_dat1;
+ cur_dat2 = (dcrc & (1 << 2)) ? ((orig_dat2 + 1) % 32) : orig_dat2;
+ cur_dat3 = (dcrc & (1 << 3)) ? ((orig_dat3 + 1) % 32) : orig_dat3;
+ }
+ cur_dat4 = (dcrc & (1 << 4)) ? ((orig_dat4 + 1) % 32) : orig_dat4;
+ cur_dat5 = (dcrc & (1 << 5)) ? ((orig_dat5 + 1) % 32) : orig_dat5;
+ cur_dat6 = (dcrc & (1 << 6)) ? ((orig_dat6 + 1) % 32) : orig_dat6;
+ cur_dat7 = (dcrc & (1 << 7)) ? ((orig_dat7 + 1) % 32) : orig_dat7;
+
+ cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
+ cur_rxdly1 = (cur_dat4 << 24) | (cur_dat5 << 16) | (cur_dat6 << 8) | (cur_dat7 << 0);
+
+ sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0);
+ sdr_write32(MSDC_DAT_RDDLY1, cur_rxdly1);
+
+ } while (++rxdly < 32);
+
done:
- return result;
+ return result;
}
-static int msdc_tune_bwrite(struct mmc_host *mmc,struct mmc_request *mrq)
+static int msdc_tune_bwrite(struct mmc_host *mmc, struct mmc_request *mrq)
{
- struct msdc_host *host = mmc_priv(mmc);
- u32 base = host->base;
-
- u32 wrrdly, cur_wrrdly = 0xffffffff, orig_wrrdly;
- u32 dsmpl, cur_dsmpl, orig_dsmpl;
- u32 rxdly, cur_rxdly0;
- u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3;
- u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3;
- int result = -1;
- u32 skip = 1;
-
- // MSDC_IOCON_DDR50CKD need to check. [Fix me]
-
- sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, orig_wrrdly);
- sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, orig_dsmpl );
-
- /* Tune Method 2. just DAT0 */
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
- cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0);
-
- /* E1 ECO. YD: Reverse */
- if (sdr_read32(MSDC_ECO_VER) >= 4) {
- orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 0) & 0x1F;
- } else {
- orig_dat0 = (cur_rxdly0 >> 0) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 24) & 0x1F;
- }
-
- rxdly = 0;
- do {
- wrrdly = 0;
- do {
- for (dsmpl = 0; dsmpl < 2; dsmpl++) {
- cur_dsmpl = (orig_dsmpl + dsmpl) % 2;
- if (skip == 1) {
- skip = 0;
- continue;
- }
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl);
-
- if (host->app_cmd) {
- result = msdc_app_cmd(host->mmc, host);
- if (result) {
- ERR_MSG("TUNE_BWRITE app_cmd<%d> failed", host->mrq->cmd->opcode);
- continue;
- }
- }
- result = msdc_do_request(mmc,mrq);
-
- ERR_MSG("TUNE_BWRITE<%s> DSPL<%d> DATWRDLY<%d> MSDC_DAT_RDDLY0<0x%x>",
- result == 0 ? "PASS" : "FAIL",
- cur_dsmpl, cur_wrrdly, cur_rxdly0);
-
- if (result == 0) {
- goto done;
- }
- else {
- /* there is a case: command timeout, and data phase not processed */
- if (mrq->data->error != (unsigned int)(-EIO)) {
- ERR_MSG("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>",
- result, mrq->cmd->error, mrq->data->error);
- goto done;
- }
- }
- }
- cur_wrrdly = (orig_wrrdly + wrrdly + 1) % 32;
- sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, cur_wrrdly);
- } while (++wrrdly < 32);
-
- cur_dat0 = (orig_dat0 + rxdly) % 32; /* only adjust bit-1 for crc */
- cur_dat1 = orig_dat1;
- cur_dat2 = orig_dat2;
- cur_dat3 = orig_dat3;
-
- cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
- sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0);
- } while (++rxdly < 32);
+ struct msdc_host *host = mmc_priv(mmc);
+ void __iomem *base = host->base;
+
+ u32 wrrdly, cur_wrrdly = 0xffffffff, orig_wrrdly;
+ u32 dsmpl, cur_dsmpl, orig_dsmpl;
+ u32 rxdly, cur_rxdly0;
+ u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3;
+ u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3;
+ int result = -1;
+ u32 skip = 1;
+
+ // MSDC_IOCON_DDR50CKD need to check. [Fix me]
+
+ sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, &orig_wrrdly);
+ sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, &orig_dsmpl);
+
+ /* Tune Method 2. just DAT0 */
+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
+ cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0);
+
+ /* E1 ECO. YD: Reverse */
+ if (sdr_read32(MSDC_ECO_VER) >= 4) {
+ orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
+ orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
+ orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
+ orig_dat3 = (cur_rxdly0 >> 0) & 0x1F;
+ } else {
+ orig_dat0 = (cur_rxdly0 >> 0) & 0x1F;
+ orig_dat1 = (cur_rxdly0 >> 8) & 0x1F;
+ orig_dat2 = (cur_rxdly0 >> 16) & 0x1F;
+ orig_dat3 = (cur_rxdly0 >> 24) & 0x1F;
+ }
+
+ rxdly = 0;
+ do {
+ wrrdly = 0;
+ do {
+ for (dsmpl = 0; dsmpl < 2; dsmpl++) {
+ cur_dsmpl = (orig_dsmpl + dsmpl) % 2;
+ if (skip == 1) {
+ skip = 0;
+ continue;
+ }
+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl);
+
+ if (host->app_cmd) {
+ result = msdc_app_cmd(host->mmc, host);
+ if (result) {
+ ERR_MSG("TUNE_BWRITE app_cmd<%d> failed", host->mrq->cmd->opcode);
+ continue;
+ }
+ }
+ result = msdc_do_request(mmc, mrq);
+
+ ERR_MSG("TUNE_BWRITE<%s> DSPL<%d> DATWRDLY<%d> MSDC_DAT_RDDLY0<0x%x>",
+ result == 0 ? "PASS" : "FAIL",
+ cur_dsmpl, cur_wrrdly, cur_rxdly0);
+
+ if (result == 0) {
+ goto done;
+ } else {
+ /* there is a case: command timeout, and data phase not processed */
+ if (mrq->data->error != -EIO) {
+ ERR_MSG("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>",
+ result, mrq->cmd->error, mrq->data->error);
+ goto done;
+ }
+ }
+ }
+ cur_wrrdly = (orig_wrrdly + wrrdly + 1) % 32;
+ sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, cur_wrrdly);
+ } while (++wrrdly < 32);
+
+ cur_dat0 = (orig_dat0 + rxdly) % 32; /* only adjust bit-1 for crc */
+ cur_dat1 = orig_dat1;
+ cur_dat2 = orig_dat2;
+ cur_dat3 = orig_dat3;
+
+ cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
+ sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0);
+ } while (++rxdly < 32);
done:
- return result;
+ return result;
}
static int msdc_get_card_status(struct mmc_host *mmc, struct msdc_host *host, u32 *status)
{
- struct mmc_command cmd;
- struct mmc_request mrq;
- u32 err;
-
- memset(&cmd, 0, sizeof(struct mmc_command));
- cmd.opcode = MMC_SEND_STATUS;
- if (mmc->card) {
- cmd.arg = mmc->card->rca << 16;
- } else {
- ERR_MSG("cmd13 mmc card is null");
- cmd.arg = host->app_cmd_arg;
- }
- cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
-
- memset(&mrq, 0, sizeof(struct mmc_request));
- mrq.cmd = &cmd; cmd.mrq = &mrq;
- cmd.data = NULL;
-
- err = msdc_do_command(host, &cmd, 1, CMD_TIMEOUT);
-
- if (status) {
- *status = cmd.resp[0];
- }
-
- return err;
+ struct mmc_command cmd;
+ struct mmc_request mrq;
+ u32 err;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = MMC_SEND_STATUS;
+ if (mmc->card) {
+ cmd.arg = mmc->card->rca << 16;
+ } else {
+ ERR_MSG("cmd13 mmc card is null");
+ cmd.arg = host->app_cmd_arg;
+ }
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+ mrq.cmd = &cmd; cmd.mrq = &mrq;
+ cmd.data = NULL;
+
+ err = msdc_do_command(host, &cmd, 1, CMD_TIMEOUT);
+
+ if (status)
+ *status = cmd.resp[0];
+
+ return err;
}
static int msdc_check_busy(struct mmc_host *mmc, struct msdc_host *host)
{
- u32 err = 0;
- u32 status = 0;
-
- do {
- err = msdc_get_card_status(mmc, host, &status);
- if (err) return err;
- /* need cmd12? */
- ERR_MSG("cmd<13> resp<0x%x>", status);
- } while (R1_CURRENT_STATE(status) == 7);
-
- return err;
+ u32 err = 0;
+ u32 status = 0;
+
+ do {
+ err = msdc_get_card_status(mmc, host, &status);
+ if (err)
+ return err;
+ /* need cmd12? */
+ ERR_MSG("cmd<13> resp<0x%x>", status);
+ } while (R1_CURRENT_STATE(status) == 7);
+
+ return err;
}
/* failed when msdc_do_request */
static int msdc_tune_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
- struct msdc_host *host = mmc_priv(mmc);
- struct mmc_command *cmd;
- struct mmc_data *data;
- //u32 base = host->base;
- int ret=0, read;
-
- cmd = mrq->cmd;
- data = mrq->cmd->data;
-
- read = data->flags & MMC_DATA_READ ? 1 : 0;
-
- if (read) {
- if (data->error == (unsigned int)(-EIO)) {
- ret = msdc_tune_bread(mmc,mrq);
- }
- } else {
- ret = msdc_check_busy(mmc, host);
- if (ret){
- ERR_MSG("XXX cmd13 wait program done failed");
- return ret;
- }
- /* CRC and TO */
- /* Fix me: don't care card status? */
- ret = msdc_tune_bwrite(mmc,mrq);
- }
-
- return ret;
+ struct msdc_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ //u32 base = host->base;
+ int ret = 0, read;
+
+ cmd = mrq->cmd;
+ data = mrq->cmd->data;
+
+ read = data->flags & MMC_DATA_READ ? 1 : 0;
+
+ if (read) {
+ if (data->error == -EIO)
+ ret = msdc_tune_bread(mmc, mrq);
+ } else {
+ ret = msdc_check_busy(mmc, host);
+ if (ret) {
+ ERR_MSG("XXX cmd13 wait program done failed");
+ return ret;
+ }
+ /* CRC and TO */
+ /* Fix me: don't care card status? */
+ ret = msdc_tune_bwrite(mmc, mrq);
+ }
+
+ return ret;
}
/* ops.request */
-static void msdc_ops_request(struct mmc_host *mmc,struct mmc_request *mrq)
-{
- struct msdc_host *host = mmc_priv(mmc);
+static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct msdc_host *host = mmc_priv(mmc);
- //=== for sdio profile ===
+ //=== for sdio profile ===
#if 0 /* --- by chhung */
- u32 old_H32, old_L32, new_H32, new_L32;
- u32 ticks = 0, opcode = 0, sizes = 0, bRx = 0;
+ u32 old_H32, old_L32, new_H32, new_L32;
+ u32 ticks = 0, opcode = 0, sizes = 0, bRx = 0;
#endif /* end of --- */
-
- if(host->mrq){
- ERR_MSG("XXX host->mrq<0x%.8x>", (int)host->mrq);
- BUG();
- }
-
- if (!is_card_present(host) || host->power_mode == MMC_POWER_OFF) {
- ERR_MSG("cmd<%d> card<%d> power<%d>", mrq->cmd->opcode, is_card_present(host), host->power_mode);
- mrq->cmd->error = (unsigned int)-ENOMEDIUM;
-
-#if 1
- mrq->done(mrq); // call done directly.
-#else
- mrq->cmd->retries = 0; // please don't retry.
- mmc_request_done(mmc, mrq);
-#endif
- return;
- }
-
- /* start to process */
- spin_lock(&host->lock);
+ WARN_ON(host->mrq);
+
+ /* start to process */
+ spin_lock(&host->lock);
#if 0 /* --- by chhung */
- if (sdio_pro_enable) { //=== for sdio profile ===
- if (mrq->cmd->opcode == 52 || mrq->cmd->opcode == 53) {
- GPT_GetCounter64(&old_L32, &old_H32);
- }
- }
+ if (sdio_pro_enable) { //=== for sdio profile ===
+ if (mrq->cmd->opcode == 52 || mrq->cmd->opcode == 53)
+ GPT_GetCounter64(&old_L32, &old_H32);
+ }
#endif /* end of --- */
-
- host->mrq = mrq;
-
- if (msdc_do_request(mmc,mrq)) {
- if(host->hw->flags & MSDC_REMOVABLE && ralink_soc == MT762X_SOC_MT7621AT && mrq->data && mrq->data->error) {
- msdc_tune_request(mmc,mrq);
- }
- }
-
- /* ==== when request done, check if app_cmd ==== */
- if (mrq->cmd->opcode == MMC_APP_CMD) {
- host->app_cmd = 1;
- host->app_cmd_arg = mrq->cmd->arg; /* save the RCA */
- } else {
- host->app_cmd = 0;
- //host->app_cmd_arg = 0;
- }
-
- host->mrq = NULL;
+
+ host->mrq = mrq;
+
+ if (msdc_do_request(mmc, mrq)) {
+ if (host->hw->flags & MSDC_REMOVABLE && ralink_soc == MT762X_SOC_MT7621AT && mrq->data && mrq->data->error)
+ msdc_tune_request(mmc, mrq);
+ }
+
+ /* ==== when request done, check if app_cmd ==== */
+ if (mrq->cmd->opcode == MMC_APP_CMD) {
+ host->app_cmd = 1;
+ host->app_cmd_arg = mrq->cmd->arg; /* save the RCA */
+ } else {
+ host->app_cmd = 0;
+ //host->app_cmd_arg = 0;
+ }
+
+ host->mrq = NULL;
#if 0 /* --- by chhung */
- //=== for sdio profile ===
- if (sdio_pro_enable) {
- if (mrq->cmd->opcode == 52 || mrq->cmd->opcode == 53) {
- GPT_GetCounter64(&new_L32, &new_H32);
- ticks = msdc_time_calc(old_L32, old_H32, new_L32, new_H32);
-
- opcode = mrq->cmd->opcode;
- if (mrq->cmd->data) {
- sizes = mrq->cmd->data->blocks * mrq->cmd->data->blksz;
- bRx = mrq->cmd->data->flags & MMC_DATA_READ ? 1 : 0 ;
- } else {
- bRx = mrq->cmd->arg & 0x80000000 ? 1 : 0;
- }
-
- if (!mrq->cmd->error) {
- msdc_performance(opcode, sizes, bRx, ticks);
- }
- }
- }
+ //=== for sdio profile ===
+ if (sdio_pro_enable) {
+ if (mrq->cmd->opcode == 52 || mrq->cmd->opcode == 53) {
+ GPT_GetCounter64(&new_L32, &new_H32);
+ ticks = msdc_time_calc(old_L32, old_H32, new_L32, new_H32);
+
+ opcode = mrq->cmd->opcode;
+ if (mrq->cmd->data) {
+ sizes = mrq->cmd->data->blocks * mrq->cmd->data->blksz;
+ bRx = mrq->cmd->data->flags & MMC_DATA_READ ? 1 : 0;
+ } else {
+ bRx = mrq->cmd->arg & 0x80000000 ? 1 : 0;
+ }
+
+ if (!mrq->cmd->error)
+ msdc_performance(opcode, sizes, bRx, ticks);
+ }
+ }
#endif /* end of --- */
- spin_unlock(&host->lock);
-
- mmc_request_done(mmc, mrq);
-
- return;
+ spin_unlock(&host->lock);
+
+ mmc_request_done(mmc, mrq);
+
+ return;
}
/* called by ops.set_ios */
static void msdc_set_buswidth(struct msdc_host *host, u32 width)
{
- u32 base = host->base;
- u32 val = sdr_read32(SDC_CFG);
-
- val &= ~SDC_CFG_BUSWIDTH;
-
- switch (width) {
- default:
- case MMC_BUS_WIDTH_1:
- width = 1;
- val |= (MSDC_BUS_1BITS << 16);
- break;
- case MMC_BUS_WIDTH_4:
- val |= (MSDC_BUS_4BITS << 16);
- break;
- case MMC_BUS_WIDTH_8:
- val |= (MSDC_BUS_8BITS << 16);
- break;
- }
-
- sdr_write32(SDC_CFG, val);
-
- N_MSG(CFG, "Bus Width = %d", width);
+ void __iomem *base = host->base;
+ u32 val = sdr_read32(SDC_CFG);
+
+ val &= ~SDC_CFG_BUSWIDTH;
+
+ switch (width) {
+ default:
+ case MMC_BUS_WIDTH_1:
+ width = 1;
+ val |= (MSDC_BUS_1BITS << 16);
+ break;
+ case MMC_BUS_WIDTH_4:
+ val |= (MSDC_BUS_4BITS << 16);
+ break;
+ case MMC_BUS_WIDTH_8:
+ val |= (MSDC_BUS_8BITS << 16);
+ break;
+ }
+
+ sdr_write32(SDC_CFG, val);
+
+ N_MSG(CFG, "Bus Width = %d", width);
}
/* ops.set_ios */
static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
- struct msdc_host *host = mmc_priv(mmc);
- struct msdc_hw *hw=host->hw;
- u32 base = host->base;
- u32 ddr = 0;
+ struct msdc_host *host = mmc_priv(mmc);
+ void __iomem *base = host->base;
+ u32 ddr = 0;
#ifdef MT6575_SD_DEBUG
- static char *vdd[] = {
- "1.50v", "1.55v", "1.60v", "1.65v", "1.70v", "1.80v", "1.90v",
- "2.00v", "2.10v", "2.20v", "2.30v", "2.40v", "2.50v", "2.60v",
- "2.70v", "2.80v", "2.90v", "3.00v", "3.10v", "3.20v", "3.30v",
- "3.40v", "3.50v", "3.60v"
- };
- static char *power_mode[] = {
- "OFF", "UP", "ON"
- };
- static char *bus_mode[] = {
- "UNKNOWN", "OPENDRAIN", "PUSHPULL"
- };
- static char *timing[] = {
- "LEGACY", "MMC_HS", "SD_HS"
- };
-
- printk("SET_IOS: CLK(%dkHz), BUS(%s), BW(%u), PWR(%s), VDD(%s), TIMING(%s)",
- ios->clock / 1000, bus_mode[ios->bus_mode],
- (ios->bus_width == MMC_BUS_WIDTH_4) ? 4 : 1,
- power_mode[ios->power_mode], vdd[ios->vdd], timing[ios->timing]);
+ static char *vdd[] = {
+ "1.50v", "1.55v", "1.60v", "1.65v", "1.70v", "1.80v", "1.90v",
+ "2.00v", "2.10v", "2.20v", "2.30v", "2.40v", "2.50v", "2.60v",
+ "2.70v", "2.80v", "2.90v", "3.00v", "3.10v", "3.20v", "3.30v",
+ "3.40v", "3.50v", "3.60v"
+ };
+ static char *power_mode[] = {
+ "OFF", "UP", "ON"
+ };
+ static char *bus_mode[] = {
+ "UNKNOWN", "OPENDRAIN", "PUSHPULL"
+ };
+ static char *timing[] = {
+ "LEGACY", "MMC_HS", "SD_HS"
+ };
+
+ printk("SET_IOS: CLK(%dkHz), BUS(%s), BW(%u), PWR(%s), VDD(%s), TIMING(%s)",
+ ios->clock / 1000, bus_mode[ios->bus_mode],
+ (ios->bus_width == MMC_BUS_WIDTH_4) ? 4 : 1,
+ power_mode[ios->power_mode], vdd[ios->vdd], timing[ios->timing]);
#endif
- msdc_set_buswidth(host, ios->bus_width);
-
- /* Power control ??? */
- switch (ios->power_mode) {
- case MMC_POWER_OFF:
- case MMC_POWER_UP:
- // msdc_set_power_mode(host, ios->power_mode); /* --- by chhung */
- break;
- case MMC_POWER_ON:
- host->power_mode = MMC_POWER_ON;
- break;
- default:
- break;
- }
-
- /* Clock control */
- if (host->mclk != ios->clock) {
- if(ios->clock > 25000000) {
- //if (!(host->hw->flags & MSDC_REMOVABLE)) {
- INIT_MSG("SD data latch edge<%d>", hw->data_edge);
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, hw->cmd_edge);
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, hw->data_edge);
- //} /* for tuning debug */
- } else { /* default value */
- sdr_write32(MSDC_IOCON, 0x00000000);
- // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000);
- sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward
- sdr_write32(MSDC_DAT_RDDLY1, 0x00000000);
- // sdr_write32(MSDC_PAD_TUNE, 0x00000000);
- sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward
- }
- msdc_set_mclk(host, ddr, ios->clock);
- }
+ msdc_set_buswidth(host, ios->bus_width);
+
+ /* Power control ??? */
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ case MMC_POWER_UP:
+ // msdc_set_power_mode(host, ios->power_mode); /* --- by chhung */
+ break;
+ case MMC_POWER_ON:
+ host->power_mode = MMC_POWER_ON;
+ break;
+ default:
+ break;
+ }
+
+ /* Clock control */
+ if (host->mclk != ios->clock) {
+ if (ios->clock > 25000000) {
+ //if (!(host->hw->flags & MSDC_REMOVABLE)) {
+ INIT_MSG("SD data latch edge<%d>", MSDC_SMPL_FALLING);
+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL,
+ MSDC_SMPL_FALLING);
+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL,
+ MSDC_SMPL_FALLING);
+ //} /* for tuning debug */
+ } else { /* default value */
+ sdr_write32(MSDC_IOCON, 0x00000000);
+ // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000);
+ sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward
+ sdr_write32(MSDC_DAT_RDDLY1, 0x00000000);
+ // sdr_write32(MSDC_PAD_TUNE, 0x00000000);
+ sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward
+ }
+ msdc_set_mclk(host, ddr, ios->clock);
+ }
}
/* ops.get_ro */
static int msdc_ops_get_ro(struct mmc_host *mmc)
{
- struct msdc_host *host = mmc_priv(mmc);
- u32 base = host->base;
- unsigned long flags;
- int ro = 0;
-
- if (host->hw->flags & MSDC_WP_PIN_EN) { /* set for card */
- spin_lock_irqsave(&host->lock, flags);
- ro = (sdr_read32(MSDC_PS) >> 31);
- spin_unlock_irqrestore(&host->lock, flags);
- }
- return ro;
+ struct msdc_host *host = mmc_priv(mmc);
+ void __iomem *base = host->base;
+ unsigned long flags;
+ int ro = 0;
+
+ if (host->hw->flags & MSDC_WP_PIN_EN) { /* set for card */
+ spin_lock_irqsave(&host->lock, flags);
+ ro = (sdr_read32(MSDC_PS) >> 31);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ return ro;
}
/* ops.get_cd */
static int msdc_ops_get_cd(struct mmc_host *mmc)
{
- struct msdc_host *host = mmc_priv(mmc);
- u32 base = host->base;
- unsigned long flags;
- int present = 1;
-
- /* for sdio, MSDC_REMOVABLE not set, always return 1 */
- if (!(host->hw->flags & MSDC_REMOVABLE)) {
- /* For sdio, read H/W always get<1>, but may timeout some times */
+ struct msdc_host *host = mmc_priv(mmc);
+ void __iomem *base = host->base;
+ unsigned long flags;
+ int present = 1;
+
+ /* for sdio, MSDC_REMOVABLE not set, always return 1 */
+ if (!(host->hw->flags & MSDC_REMOVABLE)) {
+ /* For sdio, read H/W always get<1>, but may timeout some times */
#if 1
- host->card_inserted = 1;
- return 1;
+ host->card_inserted = 1;
+ return 1;
#else
- host->card_inserted = (host->pm_state.event == PM_EVENT_USER_RESUME) ? 1 : 0;
- INIT_MSG("sdio ops_get_cd<%d>", host->card_inserted);
- return host->card_inserted;
+ host->card_inserted = (host->pm_state.event == PM_EVENT_USER_RESUME) ? 1 : 0;
+ INIT_MSG("sdio ops_get_cd<%d>", host->card_inserted);
+ return host->card_inserted;
#endif
- }
+ }
- /* MSDC_CD_PIN_EN set for card */
- if (host->hw->flags & MSDC_CD_PIN_EN) {
- spin_lock_irqsave(&host->lock, flags);
-#if 0
- present = host->card_inserted; /* why not read from H/W: Fix me*/
+ /* MSDC_CD_PIN_EN set for card */
+ if (host->hw->flags & MSDC_CD_PIN_EN) {
+ spin_lock_irqsave(&host->lock, flags);
+#if 0
+ present = host->card_inserted; /* why not read from H/W: Fix me*/
#else
- // CD
- if (cd_active_low)
- present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1;
- else
- present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 1 : 0;
- host->card_inserted = present;
-#endif
- spin_unlock_irqrestore(&host->lock, flags);
- } else {
- present = 0; /* TODO? Check DAT3 pins for card detection */
- }
-
- INIT_MSG("ops_get_cd return<%d>", present);
- return present;
-}
+ // CD
+ if (cd_active_low)
+ present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1;
+ else
+ present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 1 : 0;
+ host->card_inserted = present;
+#endif
+ spin_unlock_irqrestore(&host->lock, flags);
+ } else {
+ present = 0; /* TODO? Check DAT3 pins for card detection */
+ }
-/* ops.enable_sdio_irq */
-static void msdc_ops_enable_sdio_irq(struct mmc_host *mmc, int enable)
-{
- struct msdc_host *host = mmc_priv(mmc);
- struct msdc_hw *hw = host->hw;
- u32 base = host->base;
- u32 tmp;
-
- if (hw->flags & MSDC_EXT_SDIO_IRQ) { /* yes for sdio */
- if (enable) {
- hw->enable_sdio_eirq(); /* combo_sdio_enable_eirq */
- } else {
- hw->disable_sdio_eirq(); /* combo_sdio_disable_eirq */
- }
- } else {
- ERR_MSG("XXX "); /* so never enter here */
- tmp = sdr_read32(SDC_CFG);
- /* FIXME. Need to interrupt gap detection */
- if (enable) {
- tmp |= (SDC_CFG_SDIOIDE | SDC_CFG_SDIOINTWKUP);
- } else {
- tmp &= ~(SDC_CFG_SDIOIDE | SDC_CFG_SDIOINTWKUP);
- }
- sdr_write32(SDC_CFG, tmp);
- }
+ INIT_MSG("ops_get_cd return<%d>", present);
+ return present;
}
static struct mmc_host_ops mt_msdc_ops = {
- .request = msdc_ops_request,
- .set_ios = msdc_ops_set_ios,
- .get_ro = msdc_ops_get_ro,
- .get_cd = msdc_ops_get_cd,
- .enable_sdio_irq = msdc_ops_enable_sdio_irq,
+ .request = msdc_ops_request,
+ .set_ios = msdc_ops_set_ios,
+ .get_ro = msdc_ops_get_ro,
+ .get_cd = msdc_ops_get_cd,
};
/*--------------------------------------------------------------------------*/
@@ -2383,150 +1879,135 @@ static struct mmc_host_ops mt_msdc_ops = {
/*--------------------------------------------------------------------------*/
static irqreturn_t msdc_irq(int irq, void *dev_id)
{
- struct msdc_host *host = (struct msdc_host *)dev_id;
- struct mmc_data *data = host->data;
- struct mmc_command *cmd = host->cmd;
- u32 base = host->base;
-
- u32 cmdsts = MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | MSDC_INT_CMDRDY |
- MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | MSDC_INT_ACMDRDY |
- MSDC_INT_ACMD19_DONE;
- u32 datsts = MSDC_INT_DATCRCERR |MSDC_INT_DATTMO;
-
- u32 intsts = sdr_read32(MSDC_INT);
- u32 inten = sdr_read32(MSDC_INTEN); inten &= intsts;
-
- sdr_write32(MSDC_INT, intsts); /* clear interrupts */
- /* MSG will cause fatal error */
-
- /* card change interrupt */
- if (intsts & MSDC_INT_CDSC){
- if (mtk_sw_poll)
- return IRQ_HANDLED;
- IRQ_MSG("MSDC_INT_CDSC irq<0x%.8x>", intsts);
-#if 0 /* ---/+++ by chhung: fix slot mechanical bounce issue */
- tasklet_hi_schedule(&host->card_tasklet);
-#else
- schedule_delayed_work(&host->card_delaywork, HZ);
-#endif
- /* tuning when plug card ? */
- }
-
- /* sdio interrupt */
- if (intsts & MSDC_INT_SDIOIRQ){
- IRQ_MSG("XXX MSDC_INT_SDIOIRQ"); /* seems not sdio irq */
- //mmc_signal_sdio_irq(host->mmc);
- }
-
- /* transfer complete interrupt */
- if (data != NULL) {
- if (inten & MSDC_INT_XFER_COMPL) {
- data->bytes_xfered = host->dma.xfersz;
- complete(&host->xfer_done);
- }
-
- if (intsts & datsts) {
- /* do basic reset, or stop command will sdc_busy */
- msdc_reset();
- msdc_clr_fifo();
- msdc_clr_int();
- atomic_set(&host->abort, 1); /* For PIO mode exit */
-
- if (intsts & MSDC_INT_DATTMO){
- IRQ_MSG("XXX CMD<%d> MSDC_INT_DATTMO", host->mrq->cmd->opcode);
- data->error = (unsigned int)-ETIMEDOUT;
- }
- else if (intsts & MSDC_INT_DATCRCERR){
- IRQ_MSG("XXX CMD<%d> MSDC_INT_DATCRCERR, SDC_DCRC_STS<0x%x>", host->mrq->cmd->opcode, sdr_read32(SDC_DCRC_STS));
- data->error = (unsigned int)-EIO;
- }
-
- //if(sdr_read32(MSDC_INTEN) & MSDC_INT_XFER_COMPL) {
- if (host->dma_xfer) {
- complete(&host->xfer_done); /* Read CRC come fast, XFER_COMPL not enabled */
- } /* PIO mode can't do complete, because not init */
- }
- }
-
- /* command interrupts */
- if ((cmd != NULL) && (intsts & cmdsts)) {
- if ((intsts & MSDC_INT_CMDRDY) || (intsts & MSDC_INT_ACMDRDY) ||
- (intsts & MSDC_INT_ACMD19_DONE)) {
- u32 *rsp = &cmd->resp[0];
-
- switch (host->cmd_rsp) {
- case RESP_NONE:
- break;
- case RESP_R2:
- *rsp++ = sdr_read32(SDC_RESP3); *rsp++ = sdr_read32(SDC_RESP2);
- *rsp++ = sdr_read32(SDC_RESP1); *rsp++ = sdr_read32(SDC_RESP0);
- break;
- default: /* Response types 1, 3, 4, 5, 6, 7(1b) */
- if ((intsts & MSDC_INT_ACMDRDY) || (intsts & MSDC_INT_ACMD19_DONE)) {
- *rsp = sdr_read32(SDC_ACMD_RESP);
- } else {
- *rsp = sdr_read32(SDC_RESP0);
- }
- break;
- }
- } else if ((intsts & MSDC_INT_RSPCRCERR) || (intsts & MSDC_INT_ACMDCRCERR)) {
- if(intsts & MSDC_INT_ACMDCRCERR){
- IRQ_MSG("XXX CMD<%d> MSDC_INT_ACMDCRCERR",cmd->opcode);
- }
- else {
- IRQ_MSG("XXX CMD<%d> MSDC_INT_RSPCRCERR",cmd->opcode);
- }
- cmd->error = (unsigned int)-EIO;
- } else if ((intsts & MSDC_INT_CMDTMO) || (intsts & MSDC_INT_ACMDTMO)) {
- if(intsts & MSDC_INT_ACMDTMO){
- IRQ_MSG("XXX CMD<%d> MSDC_INT_ACMDTMO",cmd->opcode);
- }
- else {
- IRQ_MSG("XXX CMD<%d> MSDC_INT_CMDTMO",cmd->opcode);
- }
- cmd->error = (unsigned int)-ETIMEDOUT;
- msdc_reset();
- msdc_clr_fifo();
- msdc_clr_int();
- }
- complete(&host->cmd_done);
- }
-
- /* mmc irq interrupts */
- if (intsts & MSDC_INT_MMCIRQ) {
- printk(KERN_INFO "msdc[%d] MMCIRQ: SDC_CSTS=0x%.8x\r\n", host->id, sdr_read32(SDC_CSTS));
- }
-
+ struct msdc_host *host = (struct msdc_host *)dev_id;
+ struct mmc_data *data = host->data;
+ struct mmc_command *cmd = host->cmd;
+ void __iomem *base = host->base;
+
+ u32 cmdsts = MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | MSDC_INT_CMDRDY |
+ MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | MSDC_INT_ACMDRDY |
+ MSDC_INT_ACMD19_DONE;
+ u32 datsts = MSDC_INT_DATCRCERR | MSDC_INT_DATTMO;
+
+ u32 intsts = sdr_read32(MSDC_INT);
+ u32 inten = sdr_read32(MSDC_INTEN); inten &= intsts;
+
+ sdr_write32(MSDC_INT, intsts); /* clear interrupts */
+ /* MSG will cause fatal error */
+
+ /* card change interrupt */
+ if (intsts & MSDC_INT_CDSC) {
+ if (host->mmc->caps & MMC_CAP_NEEDS_POLL)
+ return IRQ_HANDLED;
+ IRQ_MSG("MSDC_INT_CDSC irq<0x%.8x>", intsts);
+ schedule_delayed_work(&host->card_delaywork, HZ);
+ /* tuning when plug card ? */
+ }
+
+ /* sdio interrupt */
+ if (intsts & MSDC_INT_SDIOIRQ) {
+ IRQ_MSG("XXX MSDC_INT_SDIOIRQ"); /* seems not sdio irq */
+ //mmc_signal_sdio_irq(host->mmc);
+ }
+
+ /* transfer complete interrupt */
+ if (data != NULL) {
+ if (inten & MSDC_INT_XFER_COMPL) {
+ data->bytes_xfered = host->xfer_size;
+ complete(&host->xfer_done);
+ }
+
+ if (intsts & datsts) {
+ /* do basic reset, or stop command will sdc_busy */
+ msdc_reset_hw(host);
+ msdc_clr_fifo();
+ msdc_clr_int();
+
+ if (intsts & MSDC_INT_DATTMO) {
+ IRQ_MSG("XXX CMD<%d> MSDC_INT_DATTMO", host->mrq->cmd->opcode);
+ data->error = -ETIMEDOUT;
+ } else if (intsts & MSDC_INT_DATCRCERR) {
+ IRQ_MSG("XXX CMD<%d> MSDC_INT_DATCRCERR, SDC_DCRC_STS<0x%x>", host->mrq->cmd->opcode, sdr_read32(SDC_DCRC_STS));
+ data->error = -EIO;
+ }
+
+ //if(sdr_read32(MSDC_INTEN) & MSDC_INT_XFER_COMPL) {
+ complete(&host->xfer_done); /* Read CRC come fast, XFER_COMPL not enabled */
+ }
+ }
+
+ /* command interrupts */
+ if ((cmd != NULL) && (intsts & cmdsts)) {
+ if ((intsts & MSDC_INT_CMDRDY) || (intsts & MSDC_INT_ACMDRDY) ||
+ (intsts & MSDC_INT_ACMD19_DONE)) {
+ u32 *rsp = &cmd->resp[0];
+
+ switch (host->cmd_rsp) {
+ case RESP_NONE:
+ break;
+ case RESP_R2:
+ *rsp++ = sdr_read32(SDC_RESP3); *rsp++ = sdr_read32(SDC_RESP2);
+ *rsp++ = sdr_read32(SDC_RESP1); *rsp++ = sdr_read32(SDC_RESP0);
+ break;
+ default: /* Response types 1, 3, 4, 5, 6, 7(1b) */
+ if ((intsts & MSDC_INT_ACMDRDY) || (intsts & MSDC_INT_ACMD19_DONE))
+ *rsp = sdr_read32(SDC_ACMD_RESP);
+ else
+ *rsp = sdr_read32(SDC_RESP0);
+ break;
+ }
+ } else if ((intsts & MSDC_INT_RSPCRCERR) || (intsts & MSDC_INT_ACMDCRCERR)) {
+ if (intsts & MSDC_INT_ACMDCRCERR)
+ IRQ_MSG("XXX CMD<%d> MSDC_INT_ACMDCRCERR", cmd->opcode);
+ else
+ IRQ_MSG("XXX CMD<%d> MSDC_INT_RSPCRCERR", cmd->opcode);
+ cmd->error = -EIO;
+ } else if ((intsts & MSDC_INT_CMDTMO) || (intsts & MSDC_INT_ACMDTMO)) {
+ if (intsts & MSDC_INT_ACMDTMO)
+ IRQ_MSG("XXX CMD<%d> MSDC_INT_ACMDTMO", cmd->opcode);
+ else
+ IRQ_MSG("XXX CMD<%d> MSDC_INT_CMDTMO", cmd->opcode);
+ cmd->error = -ETIMEDOUT;
+ msdc_reset_hw(host);
+ msdc_clr_fifo();
+ msdc_clr_int();
+ }
+ complete(&host->cmd_done);
+ }
+
+ /* mmc irq interrupts */
+ if (intsts & MSDC_INT_MMCIRQ)
+ printk(KERN_INFO "msdc[%d] MMCIRQ: SDC_CSTS=0x%.8x\r\n", host->id, sdr_read32(SDC_CSTS));
+
#ifdef MT6575_SD_DEBUG
- {
+ {
/* msdc_int_reg *int_reg = (msdc_int_reg*)&intsts;*/
- N_MSG(INT, "IRQ_EVT(0x%x): MMCIRQ(%d) CDSC(%d), ACRDY(%d), ACTMO(%d), ACCRE(%d) AC19DN(%d)",
- intsts,
- int_reg->mmcirq,
- int_reg->cdsc,
- int_reg->atocmdrdy,
- int_reg->atocmdtmo,
- int_reg->atocmdcrc,
- int_reg->atocmd19done);
- N_MSG(INT, "IRQ_EVT(0x%x): SDIO(%d) CMDRDY(%d), CMDTMO(%d), RSPCRC(%d), CSTA(%d)",
- intsts,
- int_reg->sdioirq,
- int_reg->cmdrdy,
- int_reg->cmdtmo,
- int_reg->rspcrc,
- int_reg->csta);
- N_MSG(INT, "IRQ_EVT(0x%x): XFCMP(%d) DXDONE(%d), DATTMO(%d), DATCRC(%d), DMAEMP(%d)",
- intsts,
- int_reg->xfercomp,
- int_reg->dxferdone,
- int_reg->dattmo,
- int_reg->datcrc,
- int_reg->dmaqempty);
-
- }
+ N_MSG(INT, "IRQ_EVT(0x%x): MMCIRQ(%d) CDSC(%d), ACRDY(%d), ACTMO(%d), ACCRE(%d) AC19DN(%d)",
+ intsts,
+ int_reg->mmcirq,
+ int_reg->cdsc,
+ int_reg->atocmdrdy,
+ int_reg->atocmdtmo,
+ int_reg->atocmdcrc,
+ int_reg->atocmd19done);
+ N_MSG(INT, "IRQ_EVT(0x%x): SDIO(%d) CMDRDY(%d), CMDTMO(%d), RSPCRC(%d), CSTA(%d)",
+ intsts,
+ int_reg->sdioirq,
+ int_reg->cmdrdy,
+ int_reg->cmdtmo,
+ int_reg->rspcrc,
+ int_reg->csta);
+ N_MSG(INT, "IRQ_EVT(0x%x): XFCMP(%d) DXDONE(%d), DATTMO(%d), DATCRC(%d), DMAEMP(%d)",
+ intsts,
+ int_reg->xfercomp,
+ int_reg->dxferdone,
+ int_reg->dattmo,
+ int_reg->datcrc,
+ int_reg->dmaqempty);
+ }
#endif
-
- return IRQ_HANDLED;
+
+ return IRQ_HANDLED;
}
/*--------------------------------------------------------------------------*/
@@ -2536,450 +2017,386 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
static void msdc_enable_cd_irq(struct msdc_host *host, int enable)
{
struct msdc_hw *hw = host->hw;
- u32 base = host->base;
+ void __iomem *base = host->base;
/* for sdio, not set */
if ((hw->flags & MSDC_CD_PIN_EN) == 0) {
/* Pull down card detection pin since it is not avaiable */
/*
- if (hw->config_gpio_pin)
- hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
- */
+ if (hw->config_gpio_pin)
+ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
+ */
sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN);
sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC);
sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP);
return;
}
- N_MSG(CFG, "CD IRQ Eanable(%d)", enable);
+ N_MSG(CFG, "CD IRQ Enable(%d)", enable);
if (enable) {
- if (hw->enable_cd_eirq) { /* not set, never enter */
- hw->enable_cd_eirq();
- } else {
- /* card detection circuit relies on the core power so that the core power
- * shouldn't be turned off. Here adds a reference count to keep
- * the core power alive.
- */
- //msdc_vcore_on(host); //did in msdc_init_hw()
-
- if (hw->config_gpio_pin) /* NULL */
- hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_UP);
-
- sdr_set_field(MSDC_PS, MSDC_PS_CDDEBOUNCE, DEFAULT_DEBOUNCE);
- sdr_set_bits(MSDC_PS, MSDC_PS_CDEN);
- sdr_set_bits(MSDC_INTEN, MSDC_INTEN_CDSC);
- sdr_set_bits(SDC_CFG, SDC_CFG_INSWKUP); /* not in document! Fix me */
- }
- } else {
- if (hw->disable_cd_eirq) {
- hw->disable_cd_eirq();
- } else {
- if (hw->config_gpio_pin) /* NULL */
- hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
-
- sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP);
- sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN);
- sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC);
-
- /* Here decreases a reference count to core power since card
- * detection circuit is shutdown.
- */
- //msdc_vcore_off(host);
- }
- }
+ /* card detection circuit relies on the core power so that the core power
+ * shouldn't be turned off. Here adds a reference count to keep
+ * the core power alive.
+ */
+ //msdc_vcore_on(host); //did in msdc_init_hw()
+
+ if (hw->config_gpio_pin) /* NULL */
+ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_UP);
+
+ sdr_set_field(MSDC_PS, MSDC_PS_CDDEBOUNCE, DEFAULT_DEBOUNCE);
+ sdr_set_bits(MSDC_PS, MSDC_PS_CDEN);
+ sdr_set_bits(MSDC_INTEN, MSDC_INTEN_CDSC);
+ sdr_set_bits(SDC_CFG, SDC_CFG_INSWKUP); /* not in document! Fix me */
+ } else {
+ if (hw->config_gpio_pin) /* NULL */
+ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
+
+ sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP);
+ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN);
+ sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC);
+
+ /* Here decreases a reference count to core power since card
+ * detection circuit is shutdown.
+ */
+ //msdc_vcore_off(host);
+ }
}
/* called by msdc_drv_probe */
static void msdc_init_hw(struct msdc_host *host)
{
- u32 base = host->base;
- struct msdc_hw *hw = host->hw;
-
-#ifdef MT6575_SD_DEBUG
- msdc_reg[host->id] = (struct msdc_regs *)host->base;
-#endif
+ void __iomem *base = host->base;
- /* Power on */
+ /* Power on */
#if 0 /* --- by chhung */
- msdc_vcore_on(host);
- msdc_pin_reset(host, MSDC_PIN_PULL_UP);
- msdc_select_clksrc(host, hw->clk_src);
- enable_clock(PERI_MSDC0_PDN + host->id, "SD");
- msdc_vdd_on(host);
+ msdc_vcore_on(host);
+ msdc_pin_reset(host, MSDC_PIN_PULL_UP);
+ msdc_select_clksrc(host, hw->clk_src);
+ enable_clock(PERI_MSDC0_PDN + host->id, "SD");
+ msdc_vdd_on(host);
#endif /* end of --- */
- /* Configure to MMC/SD mode */
- sdr_set_field(MSDC_CFG, MSDC_CFG_MODE, MSDC_SDMMC);
-
- /* Reset */
- msdc_reset();
- msdc_clr_fifo();
-
- /* Disable card detection */
- sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN);
-
- /* Disable and clear all interrupts */
- sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN));
- sdr_write32(MSDC_INT, sdr_read32(MSDC_INT));
-
+ /* Configure to MMC/SD mode */
+ sdr_set_field(MSDC_CFG, MSDC_CFG_MODE, MSDC_SDMMC);
+
+ /* Reset */
+ msdc_reset_hw(host);
+ msdc_clr_fifo();
+
+ /* Disable card detection */
+ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN);
+
+ /* Disable and clear all interrupts */
+ sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN));
+ sdr_write32(MSDC_INT, sdr_read32(MSDC_INT));
+
#if 1
/* reset tuning parameter */
- sdr_write32(MSDC_PAD_CTL0, 0x00090000);
- sdr_write32(MSDC_PAD_CTL1, 0x000A0000);
- sdr_write32(MSDC_PAD_CTL2, 0x000A0000);
- // sdr_write32(MSDC_PAD_TUNE, 0x00000000);
- sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward
- // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000);
- sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward
- sdr_write32(MSDC_DAT_RDDLY1, 0x00000000);
- sdr_write32(MSDC_IOCON, 0x00000000);
+ sdr_write32(MSDC_PAD_CTL0, 0x00090000);
+ sdr_write32(MSDC_PAD_CTL1, 0x000A0000);
+ sdr_write32(MSDC_PAD_CTL2, 0x000A0000);
+ // sdr_write32(MSDC_PAD_TUNE, 0x00000000);
+ sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward
+ // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000);
+ sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward
+ sdr_write32(MSDC_DAT_RDDLY1, 0x00000000);
+ sdr_write32(MSDC_IOCON, 0x00000000);
#if 0 // use MT7620 default value: 0x403c004f
- sdr_write32(MSDC_PATCH_BIT0, 0x003C000F); /* bit0 modified: Rx Data Clock Source: 1 -> 2.0*/
+ sdr_write32(MSDC_PATCH_BIT0, 0x003C000F); /* bit0 modified: Rx Data Clock Source: 1 -> 2.0*/
+#endif
+
+ if (sdr_read32(MSDC_ECO_VER) >= 4) {
+ if (host->id == 1) {
+ sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_WRDAT_CRCS, 1);
+ sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMD_RSP, 1);
+
+ /* internal clock: latch read data */
+ sdr_set_bits(MSDC_PATCH_BIT0, MSDC_PATCH_BIT_CKGEN_CK);
+ }
+ }
#endif
- if (sdr_read32(MSDC_ECO_VER) >= 4) {
- if (host->id == 1) {
- sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_WRDAT_CRCS, 1);
- sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMD_RSP, 1);
-
- /* internal clock: latch read data */
- sdr_set_bits(MSDC_PATCH_BIT0, MSDC_PATCH_BIT_CKGEN_CK);
- }
- }
-#endif
-
- /* for safety, should clear SDC_CFG.SDIO_INT_DET_EN & set SDC_CFG.SDIO in
- pre-loader,uboot,kernel drivers. and SDC_CFG.SDIO_INT_DET_EN will be only
- set when kernel driver wants to use SDIO bus interrupt */
- /* Configure to enable SDIO mode. it's must otherwise sdio cmd5 failed */
- sdr_set_bits(SDC_CFG, SDC_CFG_SDIO);
-
- /* disable detect SDIO device interupt function */
- sdr_clr_bits(SDC_CFG, SDC_CFG_SDIOIDE);
-
- /* eneable SMT for glitch filter */
- sdr_set_bits(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKSMT);
- sdr_set_bits(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDSMT);
- sdr_set_bits(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATSMT);
+ /* for safety, should clear SDC_CFG.SDIO_INT_DET_EN & set SDC_CFG.SDIO in
+ pre-loader,uboot,kernel drivers. and SDC_CFG.SDIO_INT_DET_EN will be only
+ set when kernel driver wants to use SDIO bus interrupt */
+ /* Configure to enable SDIO mode. it's must otherwise sdio cmd5 failed */
+ sdr_set_bits(SDC_CFG, SDC_CFG_SDIO);
+
+ /* disable detect SDIO device interupt function */
+ sdr_clr_bits(SDC_CFG, SDC_CFG_SDIOIDE);
+
+ /* eneable SMT for glitch filter */
+ sdr_set_bits(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKSMT);
+ sdr_set_bits(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDSMT);
+ sdr_set_bits(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATSMT);
#if 1
- /* set clk, cmd, dat pad driving */
- sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, hw->clk_drv);
- sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, hw->clk_drv);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, hw->cmd_drv);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, hw->cmd_drv);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, hw->dat_drv);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, hw->dat_drv);
-#else
- sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 0);
- sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 0);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 0);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 0);
+ /* set clk, cmd, dat pad driving */
+ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 4);
+ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 4);
+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 4);
+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 4);
+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 4);
+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 4);
+#else
+ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 0);
+ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 0);
+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 0);
+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 0);
+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 0);
+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 0);
#endif
- /* set sampling edge */
+ /* set sampling edge */
- /* write crc timeout detection */
- sdr_set_field(MSDC_PATCH_BIT0, 1 << 30, 1);
+ /* write crc timeout detection */
+ sdr_set_field(MSDC_PATCH_BIT0, 1 << 30, 1);
- /* Configure to default data timeout */
- sdr_set_field(SDC_CFG, SDC_CFG_DTOC, DEFAULT_DTOC);
+ /* Configure to default data timeout */
+ sdr_set_field(SDC_CFG, SDC_CFG_DTOC, DEFAULT_DTOC);
- msdc_set_buswidth(host, MMC_BUS_WIDTH_1);
+ msdc_set_buswidth(host, MMC_BUS_WIDTH_1);
- N_MSG(FUC, "init hardware done!");
+ N_MSG(FUC, "init hardware done!");
}
/* called by msdc_drv_remove */
static void msdc_deinit_hw(struct msdc_host *host)
{
- u32 base = host->base;
+ void __iomem *base = host->base;
- /* Disable and clear all interrupts */
- sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN));
- sdr_write32(MSDC_INT, sdr_read32(MSDC_INT));
+ /* Disable and clear all interrupts */
+ sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN));
+ sdr_write32(MSDC_INT, sdr_read32(MSDC_INT));
- /* Disable card detection */
- msdc_enable_cd_irq(host, 0);
- // msdc_set_power_mode(host, MMC_POWER_OFF); /* make sure power down */ /* --- by chhung */
+ /* Disable card detection */
+ msdc_enable_cd_irq(host, 0);
+ // msdc_set_power_mode(host, MMC_POWER_OFF); /* make sure power down */ /* --- by chhung */
}
/* init gpd and bd list in msdc_drv_probe */
static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
{
- gpd_t *gpd = dma->gpd;
- bd_t *bd = dma->bd;
- bd_t *ptr, *prev;
-
- /* we just support one gpd */
- int bdlen = MAX_BD_PER_GPD;
-
- /* init the 2 gpd */
- memset(gpd, 0, sizeof(gpd_t) * 2);
- //gpd->next = (void *)virt_to_phys(gpd + 1); /* pointer to a null gpd, bug! kmalloc <-> virt_to_phys */
- //gpd->next = (dma->gpd_addr + 1); /* bug */
- gpd->next = (void *)((u32)dma->gpd_addr + sizeof(gpd_t));
-
- //gpd->intr = 0;
- gpd->bdp = 1; /* hwo, cs, bd pointer */
- //gpd->ptr = (void*)virt_to_phys(bd);
- gpd->ptr = (void *)dma->bd_addr; /* physical address */
-
- memset(bd, 0, sizeof(bd_t) * bdlen);
- ptr = bd + bdlen - 1;
- //ptr->eol = 1; /* 0 or 1 [Fix me]*/
- //ptr->next = 0;
-
- while (ptr != bd) {
- prev = ptr - 1;
- prev->next = (void *)(dma->bd_addr + sizeof(bd_t) *(ptr - bd));
- ptr = prev;
- }
+ struct gpd *gpd = dma->gpd;
+ struct bd *bd = dma->bd;
+ int i;
+
+ /* we just support one gpd, but gpd->next must be set for desc
+ * DMA. That's why we alloc 2 gpd structurs.
+ */
+
+ memset(gpd, 0, sizeof(struct gpd) * 2);
+
+ gpd->bdp = 1; /* hwo, cs, bd pointer */
+ gpd->ptr = (void *)dma->bd_addr; /* physical address */
+ gpd->next = (void *)((u32)dma->gpd_addr + sizeof(struct gpd));
+
+ memset(bd, 0, sizeof(struct bd) * MAX_BD_NUM);
+ for (i = 0; i < (MAX_BD_NUM - 1); i++)
+ bd[i].next = (void *)(dma->bd_addr + sizeof(*bd) * (i + 1));
}
static int msdc_drv_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- __iomem void *base;
- struct mmc_host *mmc;
- struct resource *mem;
- struct msdc_host *host;
- struct msdc_hw *hw;
- int ret, irq;
-
- pdev->dev.platform_data = &msdc0_hw;
-
- if (of_property_read_bool(pdev->dev.of_node, "mtk,wp-en"))
- msdc0_hw.flags |= MSDC_WP_PIN_EN;
-
- /* Allocate MMC host for this device */
- mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
- if (!mmc) return -ENOMEM;
-
- hw = (struct msdc_hw*)pdev->dev.platform_data;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
-
- //BUG_ON((!hw) || (!mem) || (irq < 0)); /* --- by chhung */
-
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- /* Set host parameters to mmc */
- mmc->ops = &mt_msdc_ops;
- mmc->f_min = HOST_MIN_MCLK;
- mmc->f_max = HOST_MAX_MCLK;
- mmc->ocr_avail = MSDC_OCR_AVAIL;
-
- /* For sd card: MSDC_SYS_SUSPEND | MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE | MSDC_HIGHSPEED,
- For sdio : MSDC_EXT_SDIO_IRQ | MSDC_HIGHSPEED */
- if (hw->flags & MSDC_HIGHSPEED) {
- mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
- }
- if (hw->data_pins == 4) { /* current data_pins are all 4*/
- mmc->caps |= MMC_CAP_4_BIT_DATA;
- } else if (hw->data_pins == 8) {
- mmc->caps |= MMC_CAP_8_BIT_DATA;
- }
- if ((hw->flags & MSDC_SDIO_IRQ) || (hw->flags & MSDC_EXT_SDIO_IRQ))
- mmc->caps |= MMC_CAP_SDIO_IRQ; /* yes for sdio */
+ struct resource *res;
+ __iomem void *base;
+ struct mmc_host *mmc;
+ struct msdc_host *host;
+ struct msdc_hw *hw;
+ int ret;
+
+ hw = &msdc0_hw;
+
+ if (of_property_read_bool(pdev->dev.of_node, "mtk,wp-en"))
+ msdc0_hw.flags |= MSDC_WP_PIN_EN;
+
+ /* Allocate MMC host for this device */
+ mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto host_free;
+ }
+
+ /* Set host parameters to mmc */
+ mmc->ops = &mt_msdc_ops;
+ mmc->f_min = HOST_MIN_MCLK;
+ mmc->f_max = HOST_MAX_MCLK;
+ mmc->ocr_avail = MSDC_OCR_AVAIL;
+
+ mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
+
+ //TODO: read this as bus-width from dt (via mmc_of_parse)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
cd_active_low = !of_property_read_bool(pdev->dev.of_node, "mediatek,cd-high");
- mtk_sw_poll = of_property_read_bool(pdev->dev.of_node, "mediatek,cd-poll");
- if (mtk_sw_poll)
+ if (of_property_read_bool(pdev->dev.of_node, "mediatek,cd-poll"))
mmc->caps |= MMC_CAP_NEEDS_POLL;
- /* MMC core transfer sizes tunable parameters */
-#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
- mmc->max_segs = MAX_HW_SGMTS;
-#else
- mmc->max_hw_segs = MAX_HW_SGMTS;
- mmc->max_phys_segs = MAX_PHY_SGMTS;
-#endif
- mmc->max_seg_size = MAX_SGMT_SZ;
- mmc->max_blk_size = HOST_MAX_BLKSZ;
- mmc->max_req_size = MAX_REQ_SZ;
- mmc->max_blk_count = mmc->max_req_size;
-
- host = mmc_priv(mmc);
- host->hw = hw;
- host->mmc = mmc;
- host->id = pdev->id;
- if (host->id < 0 || host->id >= 4)
- host->id = 0;
- host->error = 0;
- host->irq = irq;
- host->base = (unsigned long) base;
- host->mclk = 0; /* mclk: the request clock of mmc sub-system */
- host->hclk = hclks[hw->clk_src]; /* hclk: clock of clock source to msdc controller */
- host->sclk = 0; /* sclk: the really clock after divition */
- host->pm_state = PMSG_RESUME;
- host->suspend = 0;
- host->core_clkon = 0;
- host->card_clkon = 0;
- host->core_power = 0;
- host->power_mode = MMC_POWER_OFF;
-// host->card_inserted = hw->flags & MSDC_REMOVABLE ? 0 : 1;
- host->timeout_ns = 0;
- host->timeout_clks = DEFAULT_DTOC * 65536;
-
- host->mrq = NULL;
- //init_MUTEX(&host->sem); /* we don't need to support multiple threads access */
-
- host->dma.used_gpd = 0;
- host->dma.used_bd = 0;
- mmc_dev(mmc)->dma_mask = NULL;
-
- /* using dma_alloc_coherent*/ /* todo: using 1, for all 4 slots */
- host->dma.gpd = dma_alloc_coherent(NULL, MAX_GPD_NUM * sizeof(gpd_t), &host->dma.gpd_addr, GFP_KERNEL);
- host->dma.bd = dma_alloc_coherent(NULL, MAX_BD_NUM * sizeof(bd_t), &host->dma.bd_addr, GFP_KERNEL);
- BUG_ON((!host->dma.gpd) || (!host->dma.bd));
- msdc_init_gpd_bd(host, &host->dma);
- /*for emmc*/
- msdc_6575_host[pdev->id] = host;
-
-#if 0
- tasklet_init(&host->card_tasklet, msdc_tasklet_card, (ulong)host);
-#else
- INIT_DELAYED_WORK(&host->card_delaywork, msdc_tasklet_card);
-#endif
- spin_lock_init(&host->lock);
- msdc_init_hw(host);
-
- if (ralink_soc == MT762X_SOC_MT7621AT)
- ret = request_irq((unsigned int)irq, msdc_irq, 0, dev_name(&pdev->dev), host);
- else
- ret = request_irq((unsigned int)irq, msdc_irq, IRQF_TRIGGER_LOW, dev_name(&pdev->dev), host);
-
- if (ret) goto release;
- // mt65xx_irq_unmask(irq); /* --- by chhung */
-
- if (hw->flags & MSDC_CD_PIN_EN) { /* not set for sdio */
- if (hw->request_cd_eirq) { /* not set for MT6575 */
- hw->request_cd_eirq(msdc_eirq_cd, (void*)host); /* msdc_eirq_cd will not be used! */
- }
- }
-
- if (hw->request_sdio_eirq) /* set to combo_sdio_request_eirq() for WIFI */
- hw->request_sdio_eirq(msdc_eirq_sdio, (void*)host); /* msdc_eirq_sdio() will be called when EIRQ */
-
- if (hw->register_pm) {/* yes for sdio */
-#ifdef CONFIG_PM
- hw->register_pm(msdc_pm, (void*)host); /* combo_sdio_register_pm() */
-#endif
- if(hw->flags & MSDC_SYS_SUSPEND) { /* will not set for WIFI */
- ERR_MSG("MSDC_SYS_SUSPEND and register_pm both set");
- }
- //mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; /* pm not controlled by system but by client. */ /* --- by chhung */
- }
-
- platform_set_drvdata(pdev, mmc);
-
- ret = mmc_add_host(mmc);
- if (ret) goto free_irq;
-
- /* Config card detection pin and enable interrupts */
- if (hw->flags & MSDC_CD_PIN_EN) { /* set for card */
- msdc_enable_cd_irq(host, 1);
- } else {
- msdc_enable_cd_irq(host, 0);
- }
-
- return 0;
-
-free_irq:
- free_irq(irq, host);
-release:
- platform_set_drvdata(pdev, NULL);
- msdc_deinit_hw(host);
-
-#if 0
- tasklet_kill(&host->card_tasklet);
-#else
- cancel_delayed_work_sync(&host->card_delaywork);
-#endif
+ /* MMC core transfer sizes tunable parameters */
+ mmc->max_segs = MAX_HW_SGMTS;
+
+ mmc->max_seg_size = MAX_SGMT_SZ;
+ mmc->max_blk_size = HOST_MAX_BLKSZ;
+ mmc->max_req_size = MAX_REQ_SZ;
+ mmc->max_blk_count = mmc->max_req_size;
+
+ host = mmc_priv(mmc);
+ host->hw = hw;
+ host->mmc = mmc;
+ host->id = pdev->id;
+ if (host->id < 0 || host->id >= 4)
+ host->id = 0;
+ host->error = 0;
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0) {
+ ret = -EINVAL;
+ goto host_free;
+ }
- if (mem)
- release_mem_region(mem->start, mem->end - mem->start + 1);
+ host->base = base;
+ host->mclk = 0; /* mclk: the request clock of mmc sub-system */
+ host->hclk = hclks[hw->clk_src]; /* hclk: clock of clock source to msdc controller */
+ host->sclk = 0; /* sclk: the really clock after divition */
+ host->pm_state = PMSG_RESUME;
+ host->suspend = 0;
+ host->core_clkon = 0;
+ host->card_clkon = 0;
+ host->core_power = 0;
+ host->power_mode = MMC_POWER_OFF;
+// host->card_inserted = hw->flags & MSDC_REMOVABLE ? 0 : 1;
+ host->timeout_ns = 0;
+ host->timeout_clks = DEFAULT_DTOC * 65536;
+
+ host->mrq = NULL;
+ //init_MUTEX(&host->sem); /* we don't need to support multiple threads access */
+
+ mmc_dev(mmc)->dma_mask = NULL;
+
+ /* using dma_alloc_coherent*/ /* todo: using 1, for all 4 slots */
+ host->dma.gpd = dma_alloc_coherent(&pdev->dev,
+ MAX_GPD_NUM * sizeof(struct gpd),
+ &host->dma.gpd_addr, GFP_KERNEL);
+ host->dma.bd = dma_alloc_coherent(&pdev->dev,
+ MAX_BD_NUM * sizeof(struct bd),
+ &host->dma.bd_addr, GFP_KERNEL);
+ if (!host->dma.gpd || !host->dma.bd) {
+ ret = -ENOMEM;
+ goto release_mem;
+ }
+ msdc_init_gpd_bd(host, &host->dma);
+
+ INIT_DELAYED_WORK(&host->card_delaywork, msdc_tasklet_card);
+ spin_lock_init(&host->lock);
+ msdc_init_hw(host);
+
+ /* TODO check weather flags 0 is correct, the mtk-sd driver uses
+ * IRQF_TRIGGER_LOW | IRQF_ONESHOT for flags
+ *
+ * for flags 0 the trigger polarity is determined by the
+ * device tree, but not the oneshot flag, but maybe it is also
+ * not needed because the soc could be oneshot safe.
+ */
+ ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq, 0, pdev->name,
+ host);
+ if (ret)
+ goto release;
+
+ platform_set_drvdata(pdev, mmc);
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto release;
+
+ /* Config card detection pin and enable interrupts */
+ if (hw->flags & MSDC_CD_PIN_EN) { /* set for card */
+ msdc_enable_cd_irq(host, 1);
+ } else {
+ msdc_enable_cd_irq(host, 0);
+ }
- mmc_free_host(mmc);
+ return 0;
- return ret;
+release:
+ platform_set_drvdata(pdev, NULL);
+ msdc_deinit_hw(host);
+ cancel_delayed_work_sync(&host->card_delaywork);
+
+release_mem:
+ if (host->dma.gpd)
+ dma_free_coherent(&pdev->dev, MAX_GPD_NUM * sizeof(struct gpd),
+ host->dma.gpd, host->dma.gpd_addr);
+ if (host->dma.bd)
+ dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct bd),
+ host->dma.bd, host->dma.bd_addr);
+host_free:
+ mmc_free_host(mmc);
+
+ return ret;
}
/* 4 device share one driver, using "drvdata" to show difference */
static int msdc_drv_remove(struct platform_device *pdev)
{
- struct mmc_host *mmc;
- struct msdc_host *host;
- struct resource *mem;
+ struct mmc_host *mmc;
+ struct msdc_host *host;
- mmc = platform_get_drvdata(pdev);
- BUG_ON(!mmc);
-
- host = mmc_priv(mmc);
- BUG_ON(!host);
+ mmc = platform_get_drvdata(pdev);
+ BUG_ON(!mmc);
- ERR_MSG("removed !!!");
+ host = mmc_priv(mmc);
+ BUG_ON(!host);
- platform_set_drvdata(pdev, NULL);
- mmc_remove_host(host->mmc);
- msdc_deinit_hw(host);
+ ERR_MSG("removed !!!");
-#if 0
- tasklet_kill(&host->card_tasklet);
-#else
- cancel_delayed_work_sync(&host->card_delaywork);
-#endif
- free_irq(host->irq, host);
+ platform_set_drvdata(pdev, NULL);
+ mmc_remove_host(host->mmc);
+ msdc_deinit_hw(host);
- dma_free_coherent(NULL, MAX_GPD_NUM * sizeof(gpd_t), host->dma.gpd, host->dma.gpd_addr);
- dma_free_coherent(NULL, MAX_BD_NUM * sizeof(bd_t), host->dma.bd, host->dma.bd_addr);
+ cancel_delayed_work_sync(&host->card_delaywork);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_free_coherent(&pdev->dev, MAX_GPD_NUM * sizeof(struct gpd),
+ host->dma.gpd, host->dma.gpd_addr);
+ dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct bd),
+ host->dma.bd, host->dma.bd_addr);
- if (mem)
- release_mem_region(mem->start, mem->end - mem->start + 1);
+ mmc_free_host(host->mmc);
- mmc_free_host(host->mmc);
-
- return 0;
+ return 0;
}
/* Fix me: Power Flow */
#ifdef CONFIG_PM
+
+static void msdc_drv_pm(struct platform_device *pdev, pm_message_t state)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ if (mmc) {
+ struct msdc_host *host = mmc_priv(mmc);
+ msdc_pm(state, (void *)host);
+ }
+}
+
static int msdc_drv_suspend(struct platform_device *pdev, pm_message_t state)
{
- int ret = 0;
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct msdc_host *host = mmc_priv(mmc);
-
- if (mmc && state.event == PM_EVENT_SUSPEND && (host->hw->flags & MSDC_SYS_SUSPEND)) { /* will set for card */
- msdc_pm(state, (void*)host);
- }
-
- return ret;
+ if (state.event == PM_EVENT_SUSPEND)
+ msdc_drv_pm(pdev, state);
+ return 0;
}
static int msdc_drv_resume(struct platform_device *pdev)
{
- int ret = 0;
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct msdc_host *host = mmc_priv(mmc);
- struct pm_message state;
-
- state.event = PM_EVENT_RESUME;
- if (mmc && (host->hw->flags & MSDC_SYS_SUSPEND)) {/* will set for card */
- msdc_pm(state, (void*)host);
- }
-
- /* This mean WIFI not controller by PM */
-
- return ret;
+ struct pm_message state;
+
+ state.event = PM_EVENT_RESUME;
+ msdc_drv_pm(pdev, state);
+ return 0;
}
#endif
@@ -2990,17 +2407,16 @@ static const struct of_device_id mt7620_sdhci_match[] = {
MODULE_DEVICE_TABLE(of, mt7620_sdhci_match);
static struct platform_driver mt_msdc_driver = {
- .probe = msdc_drv_probe,
- .remove = msdc_drv_remove,
+ .probe = msdc_drv_probe,
+ .remove = msdc_drv_remove,
#ifdef CONFIG_PM
- .suspend = msdc_drv_suspend,
- .resume = msdc_drv_resume,
+ .suspend = msdc_drv_suspend,
+ .resume = msdc_drv_resume,
#endif
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- .of_match_table = mt7620_sdhci_match,
- },
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = mt7620_sdhci_match,
+ },
};
/*--------------------------------------------------------------------------*/
@@ -3008,61 +2424,29 @@ static struct platform_driver mt_msdc_driver = {
/*--------------------------------------------------------------------------*/
static int __init mt_msdc_init(void)
{
- int ret;
-/* +++ by chhung */
- u32 reg;
-
-#if defined (CONFIG_MTD_ANY_RALINK)
- extern int ra_check_flash_type(void);
- if(ra_check_flash_type() == 2) { /* NAND */
- printk("%s: !!!!! SDXC Module Initialize Fail !!!!!", __func__);
- return 0;
- }
-#endif
- printk("MTK MSDC device init.\n");
- mtk_sd_device.dev.platform_data = &msdc0_hw;
-if (ralink_soc == MT762X_SOC_MT7620A || ralink_soc == MT762X_SOC_MT7621AT) {
-//#if defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621)
- reg = sdr_read32((volatile u32*)(RALINK_SYSCTL_BASE + 0x60)) & ~(0x3<<18);
-//#if defined (CONFIG_RALINK_MT7620)
- if (ralink_soc == MT762X_SOC_MT7620A)
- reg |= 0x1<<18;
-//#endif
-} else {
-//#elif defined (CONFIG_RALINK_MT7628)
- /* TODO: maybe omitted when RAether already toggle AGPIO_CFG */
- reg = sdr_read32((volatile u32*)(RALINK_SYSCTL_BASE + 0x3c));
- reg |= 0x1e << 16;
- sdr_write32((volatile u32*)(RALINK_SYSCTL_BASE + 0x3c), reg);
-
- reg = sdr_read32((volatile u32*)(RALINK_SYSCTL_BASE + 0x60)) & ~(0x3<<10);
-#if defined (CONFIG_MTK_MMC_EMMC_8BIT)
- reg |= 0x3<<26 | 0x3<<28 | 0x3<<30;
- msdc0_hw.data_pins = 8,
-#endif
-//#endif
-}
- sdr_write32((volatile u32*)(RALINK_SYSCTL_BASE + 0x60), reg);
- //platform_device_register(&mtk_sd_device);
-/* end of +++ */
-
- ret = platform_driver_register(&mt_msdc_driver);
- if (ret) {
- printk(KERN_ERR DRV_NAME ": Can't register driver");
- return ret;
- }
- printk(KERN_INFO DRV_NAME ": MediaTek MT6575 MSDC Driver\n");
+ int ret;
+ u32 reg;
+
+ // Set the pins for sdxc to sdxc mode
+ //FIXME: this should be done by pinctl and not by the sd driver
+ reg = sdr_read32((void __iomem *)(RALINK_SYSCTL_BASE + 0x60)) & ~(0x3 << 18);
+ sdr_write32((void __iomem *)(RALINK_SYSCTL_BASE + 0x60), reg);
+
+ ret = platform_driver_register(&mt_msdc_driver);
+ if (ret) {
+ printk(KERN_ERR DRV_NAME ": Can't register driver");
+ return ret;
+ }
-#if defined (MT6575_SD_DEBUG)
- msdc_debug_proc_init();
+#if defined(MT6575_SD_DEBUG)
+ msdc_debug_proc_init();
#endif
- return 0;
+ return 0;
}
static void __exit mt_msdc_exit(void)
{
-// platform_device_unregister(&mtk_sd_device);
- platform_driver_unregister(&mt_msdc_driver);
+ platform_driver_unregister(&mt_msdc_driver);
}
module_init(mt_msdc_init);
@@ -3070,5 +2454,3 @@ module_exit(mt_msdc_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MediaTek MT6575 SD/MMC Card Driver");
MODULE_AUTHOR("Infinity Chen <infinity.chen@mediatek.com>");
-
-EXPORT_SYMBOL(msdc_6575_host);
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index 1fa41eb8a87f..17f2105ec698 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -56,112 +56,99 @@
#include <ralink_regs.h>
-extern void pcie_phy_init(void);
-extern void chk_phy_pll(void);
-
/*
* These functions and structures provide the BIOS scan and mapping of the PCI
* devices.
*/
-#define CONFIG_PCIE_PORT0
-#define CONFIG_PCIE_PORT1
-#define CONFIG_PCIE_PORT2
-#define RALINK_PCIE0_CLK_EN (1<<24)
-#define RALINK_PCIE1_CLK_EN (1<<25)
-#define RALINK_PCIE2_CLK_EN (1<<26)
-
-#define RALINK_PCI_CONFIG_ADDR 0x20
-#define RALINK_PCI_CONFIG_DATA_VIRTUAL_REG 0x24
-#define SURFBOARDINT_PCIE0 11 /* PCIE0 */
-#define RALINK_INT_PCIE0 SURFBOARDINT_PCIE0
-#define RALINK_INT_PCIE1 SURFBOARDINT_PCIE1
-#define RALINK_INT_PCIE2 SURFBOARDINT_PCIE2
-#define SURFBOARDINT_PCIE1 31 /* PCIE1 */
-#define SURFBOARDINT_PCIE2 32 /* PCIE2 */
-#define RALINK_PCI_MEMBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x0028)
-#define RALINK_PCI_IOBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x002C)
-#define RALINK_PCIE0_RST (1<<24)
-#define RALINK_PCIE1_RST (1<<25)
-#define RALINK_PCIE2_RST (1<<26)
-#define RALINK_SYSCTL_BASE 0xBE000000
-
-#define RALINK_PCI_PCICFG_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0000)
-#define RALINK_PCI_PCIMSK_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x000C)
-#define RALINK_PCI_BASE 0xBE140000
-
-#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000)
-#define RT6855_PCIE0_OFFSET 0x2000
-#define RT6855_PCIE1_OFFSET 0x3000
-#define RT6855_PCIE2_OFFSET 0x4000
-
-#define RALINK_PCI0_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0010)
-#define RALINK_PCI0_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0018)
-#define RALINK_PCI0_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0030)
-#define RALINK_PCI0_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0034)
-#define RALINK_PCI0_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0038)
-#define RALINK_PCI0_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0050)
-#define RALINK_PCI0_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0060)
-#define RALINK_PCI0_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0064)
-
-#define RALINK_PCI1_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0010)
-#define RALINK_PCI1_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0018)
-#define RALINK_PCI1_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0030)
-#define RALINK_PCI1_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0034)
-#define RALINK_PCI1_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0038)
-#define RALINK_PCI1_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0050)
-#define RALINK_PCI1_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0060)
-#define RALINK_PCI1_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0064)
-
-#define RALINK_PCI2_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0010)
-#define RALINK_PCI2_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0018)
-#define RALINK_PCI2_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0030)
-#define RALINK_PCI2_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0034)
-#define RALINK_PCI2_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0038)
-#define RALINK_PCI2_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0050)
-#define RALINK_PCI2_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0060)
-#define RALINK_PCI2_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0064)
-
-#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000)
-#define RALINK_PCIEPHY_P2_CTL_OFFSET (RALINK_PCI_BASE + 0xA000)
-
-
-#define MV_WRITE(ofs, data) \
- *(volatile u32 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le32(data)
-#define MV_READ(ofs, data) \
- *(data) = le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs)))
-#define MV_READ_DATA(ofs) \
- le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs)))
-
-#define MV_WRITE_16(ofs, data) \
- *(volatile u16 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le16(data)
-#define MV_READ_16(ofs, data) \
- *(data) = le16_to_cpu(*(volatile u16 *)(RALINK_PCI_BASE+(ofs)))
-
-#define MV_WRITE_8(ofs, data) \
- *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) = data
-#define MV_READ_8(ofs, data) \
- *(data) = *(volatile u8 *)(RALINK_PCI_BASE+(ofs))
-
-
-
-#define RALINK_PCI_MM_MAP_BASE 0x60000000
-#define RALINK_PCI_IO_MAP_BASE 0x1e160000
+#define RALINK_PCIE0_CLK_EN (1<<24)
+#define RALINK_PCIE1_CLK_EN (1<<25)
+#define RALINK_PCIE2_CLK_EN (1<<26)
+
+#define RALINK_PCI_CONFIG_ADDR 0x20
+#define RALINK_PCI_CONFIG_DATA_VIRTUAL_REG 0x24
+#define RALINK_PCI_MEMBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x0028)
+#define RALINK_PCI_IOBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x002C)
+#define RALINK_PCIE0_RST (1<<24)
+#define RALINK_PCIE1_RST (1<<25)
+#define RALINK_PCIE2_RST (1<<26)
+#define RALINK_SYSCTL_BASE 0xBE000000
+
+#define RALINK_PCI_PCICFG_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0000)
+#define RALINK_PCI_PCIMSK_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x000C)
+#define RALINK_PCI_BASE 0xBE140000
+
+#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000)
+#define RT6855_PCIE0_OFFSET 0x2000
+#define RT6855_PCIE1_OFFSET 0x3000
+#define RT6855_PCIE2_OFFSET 0x4000
+
+#define RALINK_PCI0_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0010)
+#define RALINK_PCI0_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0018)
+#define RALINK_PCI0_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0030)
+#define RALINK_PCI0_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0034)
+#define RALINK_PCI0_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0038)
+#define RALINK_PCI0_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0050)
+#define RALINK_PCI0_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0060)
+#define RALINK_PCI0_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0064)
+
+#define RALINK_PCI1_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0010)
+#define RALINK_PCI1_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0018)
+#define RALINK_PCI1_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0030)
+#define RALINK_PCI1_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0034)
+#define RALINK_PCI1_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0038)
+#define RALINK_PCI1_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0050)
+#define RALINK_PCI1_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0060)
+#define RALINK_PCI1_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0064)
+
+#define RALINK_PCI2_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0010)
+#define RALINK_PCI2_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0018)
+#define RALINK_PCI2_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0030)
+#define RALINK_PCI2_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0034)
+#define RALINK_PCI2_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0038)
+#define RALINK_PCI2_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0050)
+#define RALINK_PCI2_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0060)
+#define RALINK_PCI2_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0064)
+
+#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000)
+#define RALINK_PCIEPHY_P2_CTL_OFFSET (RALINK_PCI_BASE + 0xA000)
+
+#define MV_WRITE(ofs, data) \
+ *(volatile u32 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le32(data)
+#define MV_READ(ofs, data) \
+ *(data) = le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs)))
+#define MV_READ_DATA(ofs) \
+ le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs)))
+
+#define MV_WRITE_16(ofs, data) \
+ *(volatile u16 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le16(data)
+#define MV_READ_16(ofs, data) \
+ *(data) = le16_to_cpu(*(volatile u16 *)(RALINK_PCI_BASE+(ofs)))
+
+#define MV_WRITE_8(ofs, data) \
+ *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) = data
+#define MV_READ_8(ofs, data) \
+ *(data) = *(volatile u8 *)(RALINK_PCI_BASE+(ofs))
+
+#define RALINK_PCI_MM_MAP_BASE 0x60000000
+#define RALINK_PCI_IO_MAP_BASE 0x1e160000
#define RALINK_SYSTEM_CONTROL_BASE 0xbe000000
-#define GPIO_PERST
-#define ASSERT_SYSRST_PCIE(val) do { \
- if (*(unsigned int *)(0xbe00000c) == 0x00030101) \
- RALINK_RSTCTRL |= val; \
- else \
- RALINK_RSTCTRL &= ~val; \
- } while(0)
-#define DEASSERT_SYSRST_PCIE(val) do { \
- if (*(unsigned int *)(0xbe00000c) == 0x00030101) \
- RALINK_RSTCTRL &= ~val; \
- else \
- RALINK_RSTCTRL |= val; \
- } while(0)
+
+#define ASSERT_SYSRST_PCIE(val) \
+ do { \
+ if (*(unsigned int *)(0xbe00000c) == 0x00030101) \
+ RALINK_RSTCTRL |= val; \
+ else \
+ RALINK_RSTCTRL &= ~val; \
+ } while(0)
+#define DEASSERT_SYSRST_PCIE(val) \
+ do { \
+ if (*(unsigned int *)(0xbe00000c) == 0x00030101) \
+ RALINK_RSTCTRL &= ~val; \
+ else \
+ RALINK_RSTCTRL |= val; \
+ } while(0)
#define RALINK_SYSCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x14)
#define RALINK_CLKCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x30)
#define RALINK_RSTCTRL *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x34)
@@ -207,7 +194,8 @@ static int config_access(unsigned char access_type, struct pci_bus *bus,
address_reg = RALINK_PCI_CONFIG_ADDR;
data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG;
- address = (((where&0xF00)>>8)<<24) |(bus->number << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | 0x80000000;
+ address = (((where&0xF00)>>8)<<24) |(bus->number << 16) | (slot << 11) |
+ (func << 8) | (where & 0xfc) | 0x80000000;
MV_WRITE(address_reg, address);
switch(access_type) {
@@ -281,7 +269,6 @@ write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val)
return PCIBIOS_SUCCESSFUL;
}
-
static int
pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val)
{
@@ -309,30 +296,16 @@ pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u
}
struct pci_ops mt7621_pci_ops= {
- .read = pci_config_read,
+ .read = pci_config_read,
.write = pci_config_write,
};
-static struct resource mt7621_res_pci_mem1 = {
- .name = "PCI MEM1",
- .start = RALINK_PCI_MM_MAP_BASE,
- .end = (u32)((RALINK_PCI_MM_MAP_BASE + (unsigned char *)0x0fffffff)),
- .flags = IORESOURCE_MEM,
-};
-static struct resource mt7621_res_pci_io1 = {
- .name = "PCI I/O1",
- .start = RALINK_PCI_IO_MAP_BASE,
- .end = (u32)((RALINK_PCI_IO_MAP_BASE + (unsigned char *)0x0ffff)),
- .flags = IORESOURCE_IO,
-};
-
+static struct resource mt7621_res_pci_mem1;
+static struct resource mt7621_res_pci_io1;
static struct pci_controller mt7621_controller = {
.pci_ops = &mt7621_pci_ops,
.mem_resource = &mt7621_res_pci_mem1,
.io_resource = &mt7621_res_pci_io1,
- .mem_offset = 0x00000000UL,
- .io_offset = 0x00000000UL,
- .io_map_base = 0xa0000000,
};
static void
@@ -341,10 +314,10 @@ read_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned l
unsigned int address_reg, data_reg, address;
address_reg = RALINK_PCI_CONFIG_ADDR;
- data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG;
+ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG;
address = (((reg & 0xF00)>>8)<<24) | (bus << 16) | (dev << 11) | (func << 8) | (reg & 0xfc) | 0x80000000 ;
- MV_WRITE(address_reg, address);
- MV_READ(data_reg, val);
+ MV_WRITE(address_reg, address);
+ MV_READ(data_reg, val);
return;
}
@@ -361,74 +334,17 @@ write_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned
return;
}
-
int
pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
u16 cmd;
u32 val;
- int irq = 0;
-
- if ((dev->bus->number == 0) && (slot == 0)) {
- write_config(0, 0, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE);
- read_config(0, 0, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val);
- printk("BAR0 at slot 0 = %x\n", val);
- printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot);
- } else if((dev->bus->number == 0) && (slot == 0x1)) {
- write_config(0, 1, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE);
- read_config(0, 1, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val);
- printk("BAR0 at slot 1 = %x\n", val);
- printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot);
- } else if((dev->bus->number == 0) && (slot == 0x2)) {
- write_config(0, 2, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE);
- read_config(0, 2, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val);
- printk("BAR0 at slot 2 = %x\n", val);
- printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot);
- } else if ((dev->bus->number == 1) && (slot == 0x0)) {
- switch (pcie_link_status) {
- case 2:
- case 6:
- irq = RALINK_INT_PCIE1;
- break;
- case 4:
- irq = RALINK_INT_PCIE2;
- break;
- default:
- irq = RALINK_INT_PCIE0;
- }
- printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
- } else if ((dev->bus->number == 2) && (slot == 0x0)) {
- switch (pcie_link_status) {
- case 5:
- case 6:
- irq = RALINK_INT_PCIE2;
- break;
- default:
- irq = RALINK_INT_PCIE1;
- }
- printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
- } else if ((dev->bus->number == 2) && (slot == 0x1)) {
- switch (pcie_link_status) {
- case 5:
- case 6:
- irq = RALINK_INT_PCIE2;
- break;
- default:
- irq = RALINK_INT_PCIE1;
- }
- printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
- } else if ((dev->bus->number ==3) && (slot == 0x0)) {
- irq = RALINK_INT_PCIE2;
- printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
- } else if ((dev->bus->number ==3) && (slot == 0x1)) {
- irq = RALINK_INT_PCIE2;
- printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
- } else if ((dev->bus->number ==3) && (slot == 0x2)) {
- irq = RALINK_INT_PCIE2;
- printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
- } else {
- printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot);
- return 0;
+ int irq;
+
+ if (dev->bus->number == 0) {
+ write_config(0, slot, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE);
+ read_config(0, slot, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val);
+ printk("BAR0 at slot %d = %x\n", slot, val);
}
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14); //configure cache line size 0x14
@@ -436,46 +352,32 @@ pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ irq = of_irq_parse_and_map_pci(dev, slot, pin);
+
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
-#ifdef CONFIG_DTB_GNUBEE1
- /*
- * 'irq' here is a hwirq, but a virq is needed. Until we know how and where
- * to convert one to the other, we have this hack for the GNUBEE1
- */
- return irq == 11 ? 22 : irq;
-#else
return irq;
-#endif
}
void
set_pcie_phy(u32 *addr, int start_b, int bits, int val)
{
-// printk("0x%p:", addr);
-// printk(" %x", *addr);
*(unsigned int *)(addr) &= ~(((1<<bits) - 1)<<start_b);
*(unsigned int *)(addr) |= val << start_b;
-// printk(" -> %x\n", *addr);
}
void
bypass_pipe_rst(void)
{
-#if defined (CONFIG_PCIE_PORT0)
/* PCIe Port 0 */
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
-#endif
-#if defined (CONFIG_PCIE_PORT1)
/* PCIe Port 1 */
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
-#endif
-#if defined (CONFIG_PCIE_PORT2)
/* PCIe Port 2 */
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
-#endif
}
void
@@ -484,82 +386,79 @@ set_phy_for_ssc(void)
unsigned long reg = (*(volatile u32 *)(RALINK_SYSCTL_BASE + 0x10));
reg = (reg >> 6) & 0x7;
-#if defined (CONFIG_PCIE_PORT0) || defined (CONFIG_PCIE_PORT1)
/* Set PCIe Port0 & Port1 PHY to disable SSC */
/* Debug Xtal Type */
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 1 enable control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x00); // rg_pe1_phy_en //Port 1 disable
- if(reg <= 5 && reg >= 3) { // 40MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 1 enable control
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x00); // rg_pe1_phy_en //Port 1 disable
+ if(reg <= 5 && reg >= 3) { // 40MHz Xtal
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
printk("***** Xtal 40MHz *****\n");
} else { // 25MHz | 20MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
- if (reg >= 6) {
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
+ if (reg >= 6) {
printk("***** Xtal 25MHz *****\n");
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode)
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode)
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial
} else {
printk("***** Xtal 20MHz *****\n");
}
}
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN
- if(reg <= 5 && reg >= 3) { // 40MHz Xtal
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN
+ if(reg <= 5 && reg >= 3) { // 40MHz Xtal
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv
}
/* Enable PHY and disable force mode */
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x01); // rg_pe1_phy_en //Port 1 enable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 1 disable control
-#endif
-#if defined (CONFIG_PCIE_PORT2)
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x01); // rg_pe1_phy_en //Port 1 enable
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 1 disable control
+
/* Set PCIe Port2 PHY to disable SSC */
/* Debug Xtal Type */
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable
- if(reg <= 5 && reg >= 3) { // 40MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable
+ if(reg <= 5 && reg >= 3) { // 40MHz Xtal
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
} else { // 25MHz | 20MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
- if (reg >= 6) { // 25MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode)
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
+ if (reg >= 6) { // 25MHz Xtal
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode)
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial
}
}
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN
- if(reg <= 5 && reg >= 3) { // 40MHz Xtal
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN
+ if(reg <= 5 && reg >= 3) { // 40MHz Xtal
set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv
}
/* Enable PHY and disable force mode */
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control
-#endif
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable
+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control
}
void setup_cm_memory_region(struct resource *mem_resource)
@@ -567,15 +466,15 @@ void setup_cm_memory_region(struct resource *mem_resource)
resource_size_t mask;
if (mips_cps_numiocu(0)) {
/* FIXME: hardware doesn't accept mask values with 1s after
- 0s (e.g. 0xffef), so it would be great to warn if that's
- about to happen */
+ * 0s (e.g. 0xffef), so it would be great to warn if that's
+ * about to happen */
mask = ~(mem_resource->end - mem_resource->start);
write_gcr_reg1_base(mem_resource->start);
write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0);
printk("PCI coherence region base: 0x%08llx, mask/settings: 0x%08llx\n",
- (unsigned long long)read_gcr_reg1_base(),
- (unsigned long long)read_gcr_reg1_mask());
+ (unsigned long long)read_gcr_reg1_base(),
+ (unsigned long long)read_gcr_reg1_mask());
}
}
@@ -588,18 +487,12 @@ static int mt7621_pci_probe(struct platform_device *pdev)
ioport_resource.start= 0;
ioport_resource.end = ~0;
-#if defined (CONFIG_PCIE_PORT0)
val = RALINK_PCIE0_RST;
-#endif
-#if defined (CONFIG_PCIE_PORT1)
val |= RALINK_PCIE1_RST;
-#endif
-#if defined (CONFIG_PCIE_PORT2)
val |= RALINK_PCIE2_RST;
-#endif
+
ASSERT_SYSRST_PCIE(RALINK_PCIE0_RST | RALINK_PCIE1_RST | RALINK_PCIE2_RST);
- printk("pull PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL);
-#if defined GPIO_PERST /* add GPIO control instead of PERST_N */ /*chhung*/
+
*(unsigned int *)(0xbe000060) &= ~(0x3<<10 | 0x3<<3);
*(unsigned int *)(0xbe000060) |= 0x1<<10 | 0x1<<3;
mdelay(100);
@@ -608,38 +501,25 @@ static int mt7621_pci_probe(struct platform_device *pdev)
*(unsigned int *)(0xbe000620) &= ~(0x1<<19 | 0x1<<8 | 0x1<<7); // clear DATA
mdelay(100);
-#else
- *(unsigned int *)(0xbe000060) &= ~0x00000c00;
-#endif
-#if defined (CONFIG_PCIE_PORT0)
+
val = RALINK_PCIE0_RST;
-#endif
-#if defined (CONFIG_PCIE_PORT1)
val |= RALINK_PCIE1_RST;
-#endif
-#if defined (CONFIG_PCIE_PORT2)
val |= RALINK_PCIE2_RST;
-#endif
+
DEASSERT_SYSRST_PCIE(val);
- printk("release PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL);
if ((*(unsigned int *)(0xbe00000c)&0xFFFF) == 0x0101) // MT7621 E2
bypass_pipe_rst();
set_phy_for_ssc();
- printk("release PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL);
-#if defined (CONFIG_PCIE_PORT0)
read_config(0, 0, 0, 0x70c, &val);
printk("Port 0 N_FTS = %x\n", (unsigned int)val);
-#endif
-#if defined (CONFIG_PCIE_PORT1)
+
read_config(0, 1, 0, 0x70c, &val);
printk("Port 1 N_FTS = %x\n", (unsigned int)val);
-#endif
-#if defined (CONFIG_PCIE_PORT2)
+
read_config(0, 2, 0, 0x70c, &val);
printk("Port 2 N_FTS = %x\n", (unsigned int)val);
-#endif
RALINK_RSTCTRL = (RALINK_RSTCTRL | RALINK_PCIE_RST);
RALINK_SYSCFG1 &= ~(0x30);
@@ -650,19 +530,11 @@ static int mt7621_pci_probe(struct platform_device *pdev)
RALINK_PCIE_CLK_GEN |= 0x80000000;
mdelay(50);
RALINK_RSTCTRL = (RALINK_RSTCTRL & ~RALINK_PCIE_RST);
-
-#if defined GPIO_PERST /* add GPIO control instead of PERST_N */ /*chhung*/
+ /* Use GPIO control instead of PERST_N */
*(unsigned int *)(0xbe000620) |= 0x1<<19 | 0x1<<8 | 0x1<<7; // set DATA
- mdelay(100);
-#else
- RALINK_PCI_PCICFG_ADDR &= ~(1<<1); //de-assert PERST
-#endif
- mdelay(500);
-
+ mdelay(1000);
- mdelay(500);
-#if defined (CONFIG_PCIE_PORT0)
if(( RALINK_PCI0_STATUS & 0x1) == 0)
{
printk("PCIE0 no card, disable it(RST&CLK)\n");
@@ -673,8 +545,7 @@ static int mt7621_pci_probe(struct platform_device *pdev)
pcie_link_status |= 1<<0;
RALINK_PCI_PCIMSK_ADDR |= (1<<20); // enable pcie1 interrupt
}
-#endif
-#if defined (CONFIG_PCIE_PORT1)
+
if(( RALINK_PCI1_STATUS & 0x1) == 0)
{
printk("PCIE1 no card, disable it(RST&CLK)\n");
@@ -685,8 +556,7 @@ static int mt7621_pci_probe(struct platform_device *pdev)
pcie_link_status |= 1<<1;
RALINK_PCI_PCIMSK_ADDR |= (1<<21); // enable pcie1 interrupt
}
-#endif
-#if defined (CONFIG_PCIE_PORT2)
+
if (( RALINK_PCI2_STATUS & 0x1) == 0) {
printk("PCIE2 no card, disable it(RST&CLK)\n");
ASSERT_SYSRST_PCIE(RALINK_PCIE2_RST);
@@ -696,7 +566,7 @@ static int mt7621_pci_probe(struct platform_device *pdev)
pcie_link_status |= 1<<2;
RALINK_PCI_PCIMSK_ADDR |= (1<<22); // enable pcie2 interrupt
}
-#endif
+
if (pcie_link_status == 0)
return 0;
@@ -736,18 +606,15 @@ pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
RALINK_PCI_PCICFG_ADDR |= 0x1 << 24; //port2
break;
}
- printk(" -> %x\n", RALINK_PCI_PCICFG_ADDR);
- //printk(" RALINK_PCI_ARBCTL = %x\n", RALINK_PCI_ARBCTL);
/*
ioport_resource.start = mt7621_res_pci_io1.start;
- ioport_resource.end = mt7621_res_pci_io1.end;
+ ioport_resource.end = mt7621_res_pci_io1.end;
*/
RALINK_PCI_MEMBASE = 0xffffffff; //RALINK_PCI_MM_MAP_BASE;
RALINK_PCI_IOBASE = RALINK_PCI_IO_MAP_BASE;
-#if defined (CONFIG_PCIE_PORT0)
//PCIe0
if((pcie_link_status & 0x1) != 0) {
RALINK_PCI0_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE
@@ -755,8 +622,7 @@ pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
RALINK_PCI0_CLASS = 0x06040001;
printk("PCIE0 enabled\n");
}
-#endif
-#if defined (CONFIG_PCIE_PORT1)
+
//PCIe1
if ((pcie_link_status & 0x2) != 0) {
RALINK_PCI1_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE
@@ -764,8 +630,7 @@ pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
RALINK_PCI1_CLASS = 0x06040001;
printk("PCIE1 enabled\n");
}
-#endif
-#if defined (CONFIG_PCIE_PORT2)
+
//PCIe2
if ((pcie_link_status & 0x4) != 0) {
RALINK_PCI2_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE
@@ -773,14 +638,11 @@ pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
RALINK_PCI2_CLASS = 0x06040001;
printk("PCIE2 enabled\n");
}
-#endif
-
switch(pcie_link_status) {
case 7:
read_config(0, 2, 0, 0x4, &val);
write_config(0, 2, 0, 0x4, val|0x4);
- // write_config(0, 1, 0, 0x4, val|0x7);
read_config(0, 2, 0, 0x70c, &val);
val &= ~(0xff)<<8;
val |= 0x50<<8;
@@ -790,7 +652,6 @@ pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
case 6:
read_config(0, 1, 0, 0x4, &val);
write_config(0, 1, 0, 0x4, val|0x4);
- // write_config(0, 1, 0, 0x4, val|0x7);
read_config(0, 1, 0, 0x70c, &val);
val &= ~(0xff)<<8;
val |= 0x50<<8;
@@ -798,7 +659,6 @@ pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
default:
read_config(0, 0, 0, 0x4, &val);
write_config(0, 0, 0, 0x4, val|0x4); //bus master enable
- // write_config(0, 0, 0, 0x4, val|0x7); //bus master enable
read_config(0, 0, 0, 0x70c, &val);
val &= ~(0xff)<<8;
val |= 0x50<<8;
@@ -827,7 +687,6 @@ static struct platform_driver mt7621_pci_driver = {
.probe = mt7621_pci_probe,
.driver = {
.name = "mt7621-pci",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mt7621_pci_ids),
},
};
diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
index 3d2d1c2a006f..2d9ab2620b82 100644
--- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
+++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
@@ -459,7 +459,6 @@ static struct platform_driver rt2880_pinmux_driver = {
.probe = rt2880_pinmux_probe,
.driver = {
.name = "rt2880-pinmux",
- .owner = THIS_MODULE,
.of_match_table = rt2880_pinmux_match,
},
};
diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/staging/mt7621-spi/spi-mt7621.c
index d95e0b32f1f0..37f299080410 100644
--- a/drivers/staging/mt7621-spi/spi-mt7621.c
+++ b/drivers/staging/mt7621-spi/spi-mt7621.c
@@ -65,7 +65,6 @@ struct mt7621_spi {
unsigned int sys_freq;
unsigned int speed;
struct clk *clk;
- spinlock_t lock;
struct mt7621_spi_ops *ops;
};
@@ -395,7 +394,6 @@ static int mt7621_spi_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct spi_master *master;
struct mt7621_spi *rs;
- unsigned long flags;
void __iomem *base;
struct resource *r;
int status = 0;
@@ -447,7 +445,6 @@ static int mt7621_spi_probe(struct platform_device *pdev)
rs->sys_freq = clk_get_rate(rs->clk);
rs->ops = ops;
dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
- spin_lock_irqsave(&rs->lock, flags);
device_reset(&pdev->dev);
@@ -475,7 +472,6 @@ MODULE_ALIAS("platform:" DRIVER_NAME);
static struct platform_driver mt7621_spi_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = mt7621_spi_match,
},
.probe = mt7621_spi_probe,
diff --git a/drivers/staging/ncpfs/Kconfig b/drivers/staging/ncpfs/Kconfig
deleted file mode 100644
index c931cf22a1f6..000000000000
--- a/drivers/staging/ncpfs/Kconfig
+++ /dev/null
@@ -1,108 +0,0 @@
-#
-# NCP Filesystem configuration
-#
-config NCP_FS
- tristate "NCP file system support (to mount NetWare volumes)"
- depends on IPX!=n || INET
- help
- NCP (NetWare Core Protocol) is a protocol that runs over IPX and is
- used by Novell NetWare clients to talk to file servers. It is to
- IPX what NFS is to TCP/IP, if that helps. Saying Y here allows you
- to mount NetWare file server volumes and to access them just like
- any other Unix directory. For details, please read the file
- <file:Documentation/filesystems/ncpfs.txt> in the kernel source and
- the IPX-HOWTO from <http://www.tldp.org/docs.html#howto>.
-
- You do not have to say Y here if you want your Linux box to act as a
- file *server* for Novell NetWare clients.
-
- General information about how to connect Linux, Windows machines and
- Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>.
-
- To compile this as a module, choose M here: the module will be called
- ncpfs. Say N unless you are connected to a Novell network.
-
-config NCPFS_PACKET_SIGNING
- bool "Packet signatures"
- depends on NCP_FS
- help
- NCP allows packets to be signed for stronger security. If you want
- security, say Y. Normal users can leave it off. To be able to use
- packet signing you must use ncpfs > 2.0.12.
-
-config NCPFS_IOCTL_LOCKING
- bool "Proprietary file locking"
- depends on NCP_FS
- help
- Allows locking of records on remote volumes. Say N unless you have
- special applications which are able to utilize this locking scheme.
-
-config NCPFS_STRONG
- bool "Clear remove/delete inhibit when needed"
- depends on NCP_FS
- help
- Allows manipulation of files flagged as Delete or Rename Inhibit.
- To use this feature you must mount volumes with the ncpmount
- parameter "-s" (ncpfs-2.0.12 and newer). Say Y unless you are not
- mounting volumes with -f 444.
-
-config NCPFS_NFS_NS
- bool "Use NFS namespace if available"
- depends on NCP_FS
- help
- Allows you to utilize NFS namespace on NetWare servers. It brings
- you case sensitive filenames. Say Y. You can disable it at
- mount-time with the `-N nfs' parameter of ncpmount.
-
-config NCPFS_OS2_NS
- bool "Use LONG (OS/2) namespace if available"
- depends on NCP_FS
- help
- Allows you to utilize OS2/LONG namespace on NetWare servers.
- Filenames in this namespace are limited to 255 characters, they are
- case insensitive, and case in names is preserved. Say Y. You can
- disable it at mount time with the -N os2 parameter of ncpmount.
-
-config NCPFS_SMALLDOS
- bool "Lowercase DOS filenames"
- depends on NCP_FS
- ---help---
- If you say Y here, every filename on a NetWare server volume using
- the OS2/LONG namespace and created under DOS or on a volume using
- DOS namespace will be converted to lowercase characters.
- Saying N here will give you these filenames in uppercase.
-
- This is only a cosmetic option since the OS2/LONG namespace is case
- insensitive. The only major reason for this option is backward
- compatibility when moving from DOS to OS2/LONG namespace support.
- Long filenames (created by Win95) will not be affected.
-
- This option does not solve the problem that filenames appear
- differently under Linux and under Windows, since Windows does an
- additional conversions on the client side. You can achieve similar
- effects by saying Y to "Allow using of Native Language Support"
- below.
-
-config NCPFS_NLS
- bool "Use Native Language Support"
- depends on NCP_FS
- select NLS
- help
- Allows you to use codepages and I/O charsets for file name
- translation between the server file system and input/output. This
- may be useful, if you want to access the server with other operating
- systems, e.g. Windows 95. See also NLS for more Information.
-
- To select codepages and I/O charsets use ncpfs-2.2.0.13 or newer.
-
-config NCPFS_EXTRAS
- bool "Enable symbolic links and execute flags"
- depends on NCP_FS
- help
- This enables the use of symbolic links and an execute permission
- bit on NCPFS. The file server need not have long name space or NFS
- name space loaded for these to work.
-
- To use the new attributes, it is recommended to use the flags
- '-f 600 -d 755' on the ncpmount command line.
-
diff --git a/drivers/staging/ncpfs/Makefile b/drivers/staging/ncpfs/Makefile
deleted file mode 100644
index 66fe5f878817..000000000000
--- a/drivers/staging/ncpfs/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the linux ncp filesystem routines.
-#
-
-obj-$(CONFIG_NCP_FS) += ncpfs.o
-
-ncpfs-y := dir.o file.o inode.o ioctl.o mmap.o ncplib_kernel.o sock.o \
- ncpsign_kernel.o getopt.o
-
-ncpfs-$(CONFIG_NCPFS_EXTRAS) += symlink.o
-ncpfs-$(CONFIG_NCPFS_NFS_NS) += symlink.o
-
-# If you want debugging output, please uncomment the following line
-# ccflags-y := -DDEBUG_NCP=1
-
-CFLAGS_ncplib_kernel.o := -finline-functions
diff --git a/drivers/staging/ncpfs/TODO b/drivers/staging/ncpfs/TODO
deleted file mode 100644
index 9b6d38b7e248..000000000000
--- a/drivers/staging/ncpfs/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-The ncpfs code will be removed soon from the kernel tree as it is old and
-obsolete and broken.
-
-Don't worry about fixing up anything here, it's not needed.
diff --git a/drivers/staging/ncpfs/dir.c b/drivers/staging/ncpfs/dir.c
deleted file mode 100644
index 072bcb12898f..000000000000
--- a/drivers/staging/ncpfs/dir.c
+++ /dev/null
@@ -1,1220 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * dir.c
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- * Modified for big endian by J.F. Chadima and David S. Miller
- * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
- * Modified 1998, 1999 Wolfram Pienkoss for NLS
- * Modified 1999 Wolfram Pienkoss for directory caching
- * Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info
- *
- */
-
-
-#include <linux/time.h>
-#include <linux/errno.h>
-#include <linux/stat.h>
-#include <linux/kernel.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/namei.h>
-#include <linux/uaccess.h>
-#include <asm/byteorder.h>
-
-#include "ncp_fs.h"
-
-static void ncp_read_volume_list(struct file *, struct dir_context *,
- struct ncp_cache_control *);
-static void ncp_do_readdir(struct file *, struct dir_context *,
- struct ncp_cache_control *);
-
-static int ncp_readdir(struct file *, struct dir_context *);
-
-static int ncp_create(struct inode *, struct dentry *, umode_t, bool);
-static struct dentry *ncp_lookup(struct inode *, struct dentry *, unsigned int);
-static int ncp_unlink(struct inode *, struct dentry *);
-static int ncp_mkdir(struct inode *, struct dentry *, umode_t);
-static int ncp_rmdir(struct inode *, struct dentry *);
-static int ncp_rename(struct inode *, struct dentry *,
- struct inode *, struct dentry *, unsigned int);
-static int ncp_mknod(struct inode * dir, struct dentry *dentry,
- umode_t mode, dev_t rdev);
-#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
-extern int ncp_symlink(struct inode *, struct dentry *, const char *);
-#else
-#define ncp_symlink NULL
-#endif
-
-const struct file_operations ncp_dir_operations =
-{
- .llseek = generic_file_llseek,
- .read = generic_read_dir,
- .iterate = ncp_readdir,
- .unlocked_ioctl = ncp_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ncp_compat_ioctl,
-#endif
-};
-
-const struct inode_operations ncp_dir_inode_operations =
-{
- .create = ncp_create,
- .lookup = ncp_lookup,
- .unlink = ncp_unlink,
- .symlink = ncp_symlink,
- .mkdir = ncp_mkdir,
- .rmdir = ncp_rmdir,
- .mknod = ncp_mknod,
- .rename = ncp_rename,
- .setattr = ncp_notify_change,
-};
-
-/*
- * Dentry operations routines
- */
-static int ncp_lookup_validate(struct dentry *, unsigned int);
-static int ncp_hash_dentry(const struct dentry *, struct qstr *);
-static int ncp_compare_dentry(const struct dentry *,
- unsigned int, const char *, const struct qstr *);
-static int ncp_delete_dentry(const struct dentry *);
-static void ncp_d_prune(struct dentry *dentry);
-
-const struct dentry_operations ncp_dentry_operations =
-{
- .d_revalidate = ncp_lookup_validate,
- .d_hash = ncp_hash_dentry,
- .d_compare = ncp_compare_dentry,
- .d_delete = ncp_delete_dentry,
- .d_prune = ncp_d_prune,
-};
-
-#define ncp_namespace(i) (NCP_SERVER(i)->name_space[NCP_FINFO(i)->volNumber])
-
-static inline int ncp_preserve_entry_case(struct inode *i, __u32 nscreator)
-{
-#ifdef CONFIG_NCPFS_SMALLDOS
- int ns = ncp_namespace(i);
-
- if ((ns == NW_NS_DOS)
-#ifdef CONFIG_NCPFS_OS2_NS
- || ((ns == NW_NS_OS2) && (nscreator == NW_NS_DOS))
-#endif /* CONFIG_NCPFS_OS2_NS */
- )
- return 0;
-#endif /* CONFIG_NCPFS_SMALLDOS */
- return 1;
-}
-
-#define ncp_preserve_case(i) (ncp_namespace(i) != NW_NS_DOS)
-
-static inline int ncp_case_sensitive(const struct inode *i)
-{
-#ifdef CONFIG_NCPFS_NFS_NS
- return ncp_namespace(i) == NW_NS_NFS;
-#else
- return 0;
-#endif /* CONFIG_NCPFS_NFS_NS */
-}
-
-/*
- * Note: leave the hash unchanged if the directory
- * is case-sensitive.
- */
-static int
-ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
-{
- struct inode *inode = d_inode_rcu(dentry);
-
- if (!inode)
- return 0;
-
- if (!ncp_case_sensitive(inode)) {
- struct nls_table *t;
- unsigned long hash;
- int i;
-
- t = NCP_IO_TABLE(dentry->d_sb);
- hash = init_name_hash(dentry);
- for (i=0; i<this->len ; i++)
- hash = partial_name_hash(ncp_tolower(t, this->name[i]),
- hash);
- this->hash = end_name_hash(hash);
- }
- return 0;
-}
-
-static int
-ncp_compare_dentry(const struct dentry *dentry,
- unsigned int len, const char *str, const struct qstr *name)
-{
- struct inode *pinode;
-
- if (len != name->len)
- return 1;
-
- pinode = d_inode_rcu(dentry->d_parent);
- if (!pinode)
- return 1;
-
- if (ncp_case_sensitive(pinode))
- return strncmp(str, name->name, len);
-
- return ncp_strnicmp(NCP_IO_TABLE(pinode->i_sb), str, name->name, len);
-}
-
-/*
- * This is the callback from dput() when d_count is going to 0.
- * We use this to unhash dentries with bad inodes.
- * Closing files can be safely postponed until iput() - it's done there anyway.
- */
-static int
-ncp_delete_dentry(const struct dentry * dentry)
-{
- struct inode *inode = d_inode(dentry);
-
- if (inode) {
- if (is_bad_inode(inode))
- return 1;
- } else
- {
- /* N.B. Unhash negative dentries? */
- }
- return 0;
-}
-
-static inline int
-ncp_single_volume(struct ncp_server *server)
-{
- return (server->m.mounted_vol[0] != '\0');
-}
-
-static inline int ncp_is_server_root(struct inode *inode)
-{
- return !ncp_single_volume(NCP_SERVER(inode)) &&
- is_root_inode(inode);
-}
-
-
-/*
- * This is the callback when the dcache has a lookup hit.
- */
-
-
-#ifdef CONFIG_NCPFS_STRONG
-/* try to delete a readonly file (NW R bit set) */
-
-static int
-ncp_force_unlink(struct inode *dir, struct dentry* dentry)
-{
- int res=0x9c,res2;
- struct nw_modify_dos_info info;
- __le32 old_nwattr;
- struct inode *inode;
-
- memset(&info, 0, sizeof(info));
-
- /* remove the Read-Only flag on the NW server */
- inode = d_inode(dentry);
-
- old_nwattr = NCP_FINFO(inode)->nwattr;
- info.attributes = old_nwattr & ~(aRONLY|aDELETEINHIBIT|aRENAMEINHIBIT);
- res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(inode), inode, NULL, DM_ATTRIBUTES, &info);
- if (res2)
- goto leave_me;
-
- /* now try again the delete operation */
- res = ncp_del_file_or_subdir2(NCP_SERVER(dir), dentry);
-
- if (res) /* delete failed, set R bit again */
- {
- info.attributes = old_nwattr;
- res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(inode), inode, NULL, DM_ATTRIBUTES, &info);
- if (res2)
- goto leave_me;
- }
-leave_me:
- return(res);
-}
-#endif /* CONFIG_NCPFS_STRONG */
-
-#ifdef CONFIG_NCPFS_STRONG
-static int
-ncp_force_rename(struct inode *old_dir, struct dentry* old_dentry, char *_old_name,
- struct inode *new_dir, struct dentry* new_dentry, char *_new_name)
-{
- struct nw_modify_dos_info info;
- int res=0x90,res2;
- struct inode *old_inode = d_inode(old_dentry);
- __le32 old_nwattr = NCP_FINFO(old_inode)->nwattr;
- __le32 new_nwattr = 0; /* shut compiler warning */
- int old_nwattr_changed = 0;
- int new_nwattr_changed = 0;
-
- memset(&info, 0, sizeof(info));
-
- /* remove the Read-Only flag on the NW server */
-
- info.attributes = old_nwattr & ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
- res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(old_inode), old_inode, NULL, DM_ATTRIBUTES, &info);
- if (!res2)
- old_nwattr_changed = 1;
- if (new_dentry && d_really_is_positive(new_dentry)) {
- new_nwattr = NCP_FINFO(d_inode(new_dentry))->nwattr;
- info.attributes = new_nwattr & ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
- res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(new_dir), new_dir, _new_name, DM_ATTRIBUTES, &info);
- if (!res2)
- new_nwattr_changed = 1;
- }
- /* now try again the rename operation */
- /* but only if something really happened */
- if (new_nwattr_changed || old_nwattr_changed) {
- res = ncp_ren_or_mov_file_or_subdir(NCP_SERVER(old_dir),
- old_dir, _old_name,
- new_dir, _new_name);
- }
- if (res)
- goto leave_me;
- /* file was successfully renamed, so:
- do not set attributes on old file - it no longer exists
- copy attributes from old file to new */
- new_nwattr_changed = old_nwattr_changed;
- new_nwattr = old_nwattr;
- old_nwattr_changed = 0;
-
-leave_me:;
- if (old_nwattr_changed) {
- info.attributes = old_nwattr;
- res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(old_inode), old_inode, NULL, DM_ATTRIBUTES, &info);
- /* ignore errors */
- }
- if (new_nwattr_changed) {
- info.attributes = new_nwattr;
- res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(new_dir), new_dir, _new_name, DM_ATTRIBUTES, &info);
- /* ignore errors */
- }
- return(res);
-}
-#endif /* CONFIG_NCPFS_STRONG */
-
-
-static int
-ncp_lookup_validate(struct dentry *dentry, unsigned int flags)
-{
- struct ncp_server *server;
- struct dentry *parent;
- struct inode *dir;
- struct ncp_entry_info finfo;
- int res, val = 0, len;
- __u8 __name[NCP_MAXPATHLEN + 1];
-
- if (dentry == dentry->d_sb->s_root)
- return 1;
-
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-
- parent = dget_parent(dentry);
- dir = d_inode(parent);
-
- if (d_really_is_negative(dentry))
- goto finished;
-
- server = NCP_SERVER(dir);
-
- /*
- * Inspired by smbfs:
- * The default validation is based on dentry age:
- * We set the max age at mount time. (But each
- * successful server lookup renews the timestamp.)
- */
- val = NCP_TEST_AGE(server, dentry);
- if (val)
- goto finished;
-
- ncp_dbg(2, "%pd2 not valid, age=%ld, server lookup\n",
- dentry, NCP_GET_AGE(dentry));
-
- len = sizeof(__name);
- if (ncp_is_server_root(dir)) {
- res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
- dentry->d_name.len, 1);
- if (!res) {
- res = ncp_lookup_volume(server, __name, &(finfo.i));
- if (!res)
- ncp_update_known_namespace(server, finfo.i.volNumber, NULL);
- }
- } else {
- res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
- dentry->d_name.len, !ncp_preserve_case(dir));
- if (!res)
- res = ncp_obtain_info(server, dir, __name, &(finfo.i));
- }
- finfo.volume = finfo.i.volNumber;
- ncp_dbg(2, "looked for %pd/%s, res=%d\n",
- dentry->d_parent, __name, res);
- /*
- * If we didn't find it, or if it has a different dirEntNum to
- * what we remember, it's not valid any more.
- */
- if (!res) {
- struct inode *inode = d_inode(dentry);
-
- inode_lock(inode);
- if (finfo.i.dirEntNum == NCP_FINFO(inode)->dirEntNum) {
- ncp_new_dentry(dentry);
- val=1;
- } else
- ncp_dbg(2, "found, but dirEntNum changed\n");
-
- ncp_update_inode2(inode, &finfo);
- inode_unlock(inode);
- }
-
-finished:
- ncp_dbg(2, "result=%d\n", val);
- dput(parent);
- return val;
-}
-
-static time_t ncp_obtain_mtime(struct dentry *dentry)
-{
- struct inode *inode = d_inode(dentry);
- struct ncp_server *server = NCP_SERVER(inode);
- struct nw_info_struct i;
-
- if (!ncp_conn_valid(server) || ncp_is_server_root(inode))
- return 0;
-
- if (ncp_obtain_info(server, inode, NULL, &i))
- return 0;
-
- return ncp_date_dos2unix(i.modifyTime, i.modifyDate);
-}
-
-static inline void
-ncp_invalidate_dircache_entries(struct dentry *parent)
-{
- struct ncp_server *server = NCP_SERVER(d_inode(parent));
- struct dentry *dentry;
-
- spin_lock(&parent->d_lock);
- list_for_each_entry(dentry, &parent->d_subdirs, d_child) {
- dentry->d_fsdata = NULL;
- ncp_age_dentry(server, dentry);
- }
- spin_unlock(&parent->d_lock);
-}
-
-static int ncp_readdir(struct file *file, struct dir_context *ctx)
-{
- struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = d_inode(dentry);
- struct page *page = NULL;
- struct ncp_server *server = NCP_SERVER(inode);
- union ncp_dir_cache *cache = NULL;
- struct ncp_cache_control ctl;
- int result, mtime_valid = 0;
- time_t mtime = 0;
-
- ctl.page = NULL;
- ctl.cache = NULL;
-
- ncp_dbg(2, "reading %pD2, pos=%d\n", file, (int)ctx->pos);
-
- result = -EIO;
- /* Do not generate '.' and '..' when server is dead. */
- if (!ncp_conn_valid(server))
- goto out;
-
- result = 0;
- if (!dir_emit_dots(file, ctx))
- goto out;
-
- page = grab_cache_page(&inode->i_data, 0);
- if (!page)
- goto read_really;
-
- ctl.cache = cache = kmap(page);
- ctl.head = cache->head;
-
- if (!PageUptodate(page) || !ctl.head.eof)
- goto init_cache;
-
- if (ctx->pos == 2) {
- if (jiffies - ctl.head.time >= NCP_MAX_AGE(server))
- goto init_cache;
-
- mtime = ncp_obtain_mtime(dentry);
- mtime_valid = 1;
- if ((!mtime) || (mtime != ctl.head.mtime))
- goto init_cache;
- }
-
- if (ctx->pos > ctl.head.end)
- goto finished;
-
- ctl.fpos = ctx->pos + (NCP_DIRCACHE_START - 2);
- ctl.ofs = ctl.fpos / NCP_DIRCACHE_SIZE;
- ctl.idx = ctl.fpos % NCP_DIRCACHE_SIZE;
-
- for (;;) {
- if (ctl.ofs != 0) {
- ctl.page = find_lock_page(&inode->i_data, ctl.ofs);
- if (!ctl.page)
- goto invalid_cache;
- ctl.cache = kmap(ctl.page);
- if (!PageUptodate(ctl.page))
- goto invalid_cache;
- }
- while (ctl.idx < NCP_DIRCACHE_SIZE) {
- struct dentry *dent;
- bool over;
-
- spin_lock(&dentry->d_lock);
- if (!(NCP_FINFO(inode)->flags & NCPI_DIR_CACHE)) {
- spin_unlock(&dentry->d_lock);
- goto invalid_cache;
- }
- dent = ctl.cache->dentry[ctl.idx];
- if (unlikely(!lockref_get_not_dead(&dent->d_lockref))) {
- spin_unlock(&dentry->d_lock);
- goto invalid_cache;
- }
- spin_unlock(&dentry->d_lock);
- if (d_really_is_negative(dent)) {
- dput(dent);
- goto invalid_cache;
- }
- over = !dir_emit(ctx, dent->d_name.name,
- dent->d_name.len,
- d_inode(dent)->i_ino, DT_UNKNOWN);
- dput(dent);
- if (over)
- goto finished;
- ctx->pos += 1;
- ctl.idx += 1;
- if (ctx->pos > ctl.head.end)
- goto finished;
- }
- if (ctl.page) {
- kunmap(ctl.page);
- SetPageUptodate(ctl.page);
- unlock_page(ctl.page);
- put_page(ctl.page);
- ctl.page = NULL;
- }
- ctl.idx = 0;
- ctl.ofs += 1;
- }
-invalid_cache:
- if (ctl.page) {
- kunmap(ctl.page);
- unlock_page(ctl.page);
- put_page(ctl.page);
- ctl.page = NULL;
- }
- ctl.cache = cache;
-init_cache:
- ncp_invalidate_dircache_entries(dentry);
- if (!mtime_valid) {
- mtime = ncp_obtain_mtime(dentry);
- mtime_valid = 1;
- }
- ctl.head.mtime = mtime;
- ctl.head.time = jiffies;
- ctl.head.eof = 0;
- ctl.fpos = 2;
- ctl.ofs = 0;
- ctl.idx = NCP_DIRCACHE_START;
- ctl.filled = 0;
- ctl.valid = 1;
-read_really:
- spin_lock(&dentry->d_lock);
- NCP_FINFO(inode)->flags |= NCPI_DIR_CACHE;
- spin_unlock(&dentry->d_lock);
- if (ncp_is_server_root(inode)) {
- ncp_read_volume_list(file, ctx, &ctl);
- } else {
- ncp_do_readdir(file, ctx, &ctl);
- }
- ctl.head.end = ctl.fpos - 1;
- ctl.head.eof = ctl.valid;
-finished:
- if (ctl.page) {
- kunmap(ctl.page);
- SetPageUptodate(ctl.page);
- unlock_page(ctl.page);
- put_page(ctl.page);
- }
- if (page) {
- cache->head = ctl.head;
- kunmap(page);
- SetPageUptodate(page);
- unlock_page(page);
- put_page(page);
- }
-out:
- return result;
-}
-
-static void ncp_d_prune(struct dentry *dentry)
-{
- if (!dentry->d_fsdata) /* not referenced from page cache */
- return;
- NCP_FINFO(d_inode(dentry->d_parent))->flags &= ~NCPI_DIR_CACHE;
-}
-
-static int
-ncp_fill_cache(struct file *file, struct dir_context *ctx,
- struct ncp_cache_control *ctrl, struct ncp_entry_info *entry,
- int inval_childs)
-{
- struct dentry *newdent, *dentry = file->f_path.dentry;
- struct inode *dir = d_inode(dentry);
- struct ncp_cache_control ctl = *ctrl;
- struct qstr qname;
- int valid = 0;
- int hashed = 0;
- ino_t ino = 0;
- __u8 __name[NCP_MAXPATHLEN + 1];
-
- qname.len = sizeof(__name);
- if (ncp_vol2io(NCP_SERVER(dir), __name, &qname.len,
- entry->i.entryName, entry->i.nameLen,
- !ncp_preserve_entry_case(dir, entry->i.NSCreator)))
- return 1; /* I'm not sure */
-
- qname.name = __name;
-
- newdent = d_hash_and_lookup(dentry, &qname);
- if (IS_ERR(newdent))
- goto end_advance;
- if (!newdent) {
- newdent = d_alloc(dentry, &qname);
- if (!newdent)
- goto end_advance;
- } else {
- hashed = 1;
-
- /* If case sensitivity changed for this volume, all entries below this one
- should be thrown away. This entry itself is not affected, as its case
- sensitivity is controlled by its own parent. */
- if (inval_childs)
- shrink_dcache_parent(newdent);
-
- /*
- * NetWare's OS2 namespace is case preserving yet case
- * insensitive. So we update dentry's name as received from
- * server. Parent dir's i_mutex is locked because we're in
- * readdir.
- */
- dentry_update_name_case(newdent, &qname);
- }
-
- if (d_really_is_negative(newdent)) {
- struct inode *inode;
-
- entry->opened = 0;
- entry->ino = iunique(dir->i_sb, 2);
- inode = ncp_iget(dir->i_sb, entry);
- if (inode) {
- d_instantiate(newdent, inode);
- if (!hashed)
- d_rehash(newdent);
- } else {
- spin_lock(&dentry->d_lock);
- NCP_FINFO(dir)->flags &= ~NCPI_DIR_CACHE;
- spin_unlock(&dentry->d_lock);
- }
- } else {
- struct inode *inode = d_inode(newdent);
-
- inode_lock_nested(inode, I_MUTEX_CHILD);
- ncp_update_inode2(inode, entry);
- inode_unlock(inode);
- }
-
- if (ctl.idx >= NCP_DIRCACHE_SIZE) {
- if (ctl.page) {
- kunmap(ctl.page);
- SetPageUptodate(ctl.page);
- unlock_page(ctl.page);
- put_page(ctl.page);
- }
- ctl.cache = NULL;
- ctl.idx -= NCP_DIRCACHE_SIZE;
- ctl.ofs += 1;
- ctl.page = grab_cache_page(&dir->i_data, ctl.ofs);
- if (ctl.page)
- ctl.cache = kmap(ctl.page);
- }
- if (ctl.cache) {
- if (d_really_is_positive(newdent)) {
- newdent->d_fsdata = newdent;
- ctl.cache->dentry[ctl.idx] = newdent;
- ino = d_inode(newdent)->i_ino;
- ncp_new_dentry(newdent);
- }
- valid = 1;
- }
- dput(newdent);
-end_advance:
- if (!valid)
- ctl.valid = 0;
- if (!ctl.filled && (ctl.fpos == ctx->pos)) {
- if (!ino)
- ino = iunique(dir->i_sb, 2);
- ctl.filled = !dir_emit(ctx, qname.name, qname.len,
- ino, DT_UNKNOWN);
- if (!ctl.filled)
- ctx->pos += 1;
- }
- ctl.fpos += 1;
- ctl.idx += 1;
- *ctrl = ctl;
- return (ctl.valid || !ctl.filled);
-}
-
-static void
-ncp_read_volume_list(struct file *file, struct dir_context *ctx,
- struct ncp_cache_control *ctl)
-{
- struct inode *inode = file_inode(file);
- struct ncp_server *server = NCP_SERVER(inode);
- struct ncp_volume_info info;
- struct ncp_entry_info entry;
- int i;
-
- ncp_dbg(1, "pos=%ld\n", (unsigned long)ctx->pos);
-
- for (i = 0; i < NCP_NUMBER_OF_VOLUMES; i++) {
- int inval_dentry;
-
- if (ncp_get_volume_info_with_number(server, i, &info) != 0)
- return;
- if (!strlen(info.volume_name))
- continue;
-
- ncp_dbg(1, "found vol: %s\n", info.volume_name);
-
- if (ncp_lookup_volume(server, info.volume_name,
- &entry.i)) {
- ncp_dbg(1, "could not lookup vol %s\n",
- info.volume_name);
- continue;
- }
- inval_dentry = ncp_update_known_namespace(server, entry.i.volNumber, NULL);
- entry.volume = entry.i.volNumber;
- if (!ncp_fill_cache(file, ctx, ctl, &entry, inval_dentry))
- return;
- }
-}
-
-static void
-ncp_do_readdir(struct file *file, struct dir_context *ctx,
- struct ncp_cache_control *ctl)
-{
- struct inode *dir = file_inode(file);
- struct ncp_server *server = NCP_SERVER(dir);
- struct nw_search_sequence seq;
- struct ncp_entry_info entry;
- int err;
- void* buf;
- int more;
- size_t bufsize;
-
- ncp_dbg(1, "%pD2, fpos=%ld\n", file, (unsigned long)ctx->pos);
- ncp_vdbg("init %pD, volnum=%d, dirent=%u\n",
- file, NCP_FINFO(dir)->volNumber, NCP_FINFO(dir)->dirEntNum);
-
- err = ncp_initialize_search(server, dir, &seq);
- if (err) {
- ncp_dbg(1, "init failed, err=%d\n", err);
- return;
- }
- /* We MUST NOT use server->buffer_size handshaked with server if we are
- using UDP, as for UDP server uses max. buffer size determined by
- MTU, and for TCP server uses hardwired value 65KB (== 66560 bytes).
- So we use 128KB, just to be sure, as there is no way how to know
- this value in advance. */
- bufsize = 131072;
- buf = vmalloc(bufsize);
- if (!buf)
- return;
- do {
- int cnt;
- char* rpl;
- size_t rpls;
-
- err = ncp_search_for_fileset(server, &seq, &more, &cnt, buf, bufsize, &rpl, &rpls);
- if (err) /* Error */
- break;
- if (!cnt) /* prevent endless loop */
- break;
- while (cnt--) {
- size_t onerpl;
-
- if (rpls < offsetof(struct nw_info_struct, entryName))
- break; /* short packet */
- ncp_extract_file_info(rpl, &entry.i);
- onerpl = offsetof(struct nw_info_struct, entryName) + entry.i.nameLen;
- if (rpls < onerpl)
- break; /* short packet */
- (void)ncp_obtain_nfs_info(server, &entry.i);
- rpl += onerpl;
- rpls -= onerpl;
- entry.volume = entry.i.volNumber;
- if (!ncp_fill_cache(file, ctx, ctl, &entry, 0))
- break;
- }
- } while (more);
- vfree(buf);
- return;
-}
-
-int ncp_conn_logged_in(struct super_block *sb)
-{
- struct ncp_server* server = NCP_SBP(sb);
- int result;
-
- if (ncp_single_volume(server)) {
- int len;
- struct dentry* dent;
- __u32 volNumber;
- __le32 dirEntNum;
- __le32 DosDirNum;
- __u8 __name[NCP_MAXPATHLEN + 1];
-
- len = sizeof(__name);
- result = ncp_io2vol(server, __name, &len, server->m.mounted_vol,
- strlen(server->m.mounted_vol), 1);
- if (result)
- goto out;
- result = -ENOENT;
- if (ncp_get_volume_root(server, __name, &volNumber, &dirEntNum, &DosDirNum)) {
- ncp_vdbg("%s not found\n", server->m.mounted_vol);
- goto out;
- }
- dent = sb->s_root;
- if (dent) {
- struct inode* ino = d_inode(dent);
- if (ino) {
- ncp_update_known_namespace(server, volNumber, NULL);
- NCP_FINFO(ino)->volNumber = volNumber;
- NCP_FINFO(ino)->dirEntNum = dirEntNum;
- NCP_FINFO(ino)->DosDirNum = DosDirNum;
- result = 0;
- } else {
- ncp_dbg(1, "d_inode(sb->s_root) == NULL!\n");
- }
- } else {
- ncp_dbg(1, "sb->s_root == NULL!\n");
- }
- } else
- result = 0;
-
-out:
- return result;
-}
-
-static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
-{
- struct ncp_server *server = NCP_SERVER(dir);
- struct inode *inode = NULL;
- struct ncp_entry_info finfo;
- int res, len;
- __u8 __name[NCP_MAXPATHLEN + 1];
-
- if (!ncp_conn_valid(server))
- return ERR_PTR(-EIO);
-
- ncp_vdbg("server lookup for %pd2\n", dentry);
-
- len = sizeof(__name);
- if (ncp_is_server_root(dir)) {
- res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
- dentry->d_name.len, 1);
- if (!res)
- res = ncp_lookup_volume(server, __name, &(finfo.i));
- if (!res)
- ncp_update_known_namespace(server, finfo.i.volNumber, NULL);
- } else {
- res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
- dentry->d_name.len, !ncp_preserve_case(dir));
- if (!res)
- res = ncp_obtain_info(server, dir, __name, &(finfo.i));
- }
- ncp_vdbg("looked for %pd2, res=%d\n", dentry, res);
- if (!res) {
- /*
- * Entry found; create an inode for it.
- */
- finfo.opened = 0;
- finfo.ino = iunique(dir->i_sb, 2);
- finfo.volume = finfo.i.volNumber;
- inode = ncp_iget(dir->i_sb, &finfo);
- if (unlikely(!inode))
- inode = ERR_PTR(-EACCES);
- else
- ncp_new_dentry(dentry);
- }
- return d_splice_alias(inode, dentry);
-}
-
-/*
- * This code is common to create, mkdir, and mknod.
- */
-static int ncp_instantiate(struct inode *dir, struct dentry *dentry,
- struct ncp_entry_info *finfo)
-{
- struct inode *inode;
- int error = -EINVAL;
-
- finfo->ino = iunique(dir->i_sb, 2);
- inode = ncp_iget(dir->i_sb, finfo);
- if (!inode)
- goto out_close;
- d_instantiate(dentry,inode);
- error = 0;
-out:
- return error;
-
-out_close:
- ncp_vdbg("%pd2 failed, closing file\n", dentry);
- ncp_close_file(NCP_SERVER(dir), finfo->file_handle);
- goto out;
-}
-
-int ncp_create_new(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t rdev, __le32 attributes)
-{
- struct ncp_server *server = NCP_SERVER(dir);
- struct ncp_entry_info finfo;
- int error, result, len;
- int opmode;
- __u8 __name[NCP_MAXPATHLEN + 1];
-
- ncp_vdbg("creating %pd2, mode=%hx\n", dentry, mode);
-
- ncp_age_dentry(server, dentry);
- len = sizeof(__name);
- error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
- dentry->d_name.len, !ncp_preserve_case(dir));
- if (error)
- goto out;
-
- error = -EACCES;
-
- if (S_ISREG(mode) &&
- (server->m.flags & NCP_MOUNT_EXTRAS) &&
- (mode & S_IXUGO))
- attributes |= aSYSTEM | aSHARED;
-
- result = ncp_open_create_file_or_subdir(server, dir, __name,
- OC_MODE_CREATE | OC_MODE_OPEN | OC_MODE_REPLACE,
- attributes, AR_READ | AR_WRITE, &finfo);
- opmode = O_RDWR;
- if (result) {
- result = ncp_open_create_file_or_subdir(server, dir, __name,
- OC_MODE_CREATE | OC_MODE_OPEN | OC_MODE_REPLACE,
- attributes, AR_WRITE, &finfo);
- if (result) {
- if (result == 0x87)
- error = -ENAMETOOLONG;
- else if (result < 0)
- error = result;
- ncp_dbg(1, "%pd2 failed\n", dentry);
- goto out;
- }
- opmode = O_WRONLY;
- }
- finfo.access = opmode;
- if (ncp_is_nfs_extras(server, finfo.volume)) {
- finfo.i.nfs.mode = mode;
- finfo.i.nfs.rdev = new_encode_dev(rdev);
- if (ncp_modify_nfs_info(server, finfo.volume,
- finfo.i.dirEntNum,
- mode, new_encode_dev(rdev)) != 0)
- goto out;
- }
-
- error = ncp_instantiate(dir, dentry, &finfo);
-out:
- return error;
-}
-
-static int ncp_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
-{
- return ncp_create_new(dir, dentry, mode, 0, 0);
-}
-
-static int ncp_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
-{
- struct ncp_entry_info finfo;
- struct ncp_server *server = NCP_SERVER(dir);
- int error, len;
- __u8 __name[NCP_MAXPATHLEN + 1];
-
- ncp_dbg(1, "making %pd2\n", dentry);
-
- ncp_age_dentry(server, dentry);
- len = sizeof(__name);
- error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
- dentry->d_name.len, !ncp_preserve_case(dir));
- if (error)
- goto out;
-
- error = ncp_open_create_file_or_subdir(server, dir, __name,
- OC_MODE_CREATE, aDIR,
- cpu_to_le16(0xffff),
- &finfo);
- if (error == 0) {
- if (ncp_is_nfs_extras(server, finfo.volume)) {
- mode |= S_IFDIR;
- finfo.i.nfs.mode = mode;
- if (ncp_modify_nfs_info(server,
- finfo.volume,
- finfo.i.dirEntNum,
- mode, 0) != 0)
- goto out;
- }
- error = ncp_instantiate(dir, dentry, &finfo);
- } else if (error > 0) {
- error = -EACCES;
- }
-out:
- return error;
-}
-
-static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
-{
- struct ncp_server *server = NCP_SERVER(dir);
- int error, result, len;
- __u8 __name[NCP_MAXPATHLEN + 1];
-
- ncp_dbg(1, "removing %pd2\n", dentry);
-
- len = sizeof(__name);
- error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
- dentry->d_name.len, !ncp_preserve_case(dir));
- if (error)
- goto out;
-
- result = ncp_del_file_or_subdir(server, dir, __name);
- switch (result) {
- case 0x00:
- error = 0;
- break;
- case 0x85: /* unauthorized to delete file */
- case 0x8A: /* unauthorized to delete file */
- error = -EACCES;
- break;
- case 0x8F:
- case 0x90: /* read only */
- error = -EPERM;
- break;
- case 0x9F: /* in use by another client */
- error = -EBUSY;
- break;
- case 0xA0: /* directory not empty */
- error = -ENOTEMPTY;
- break;
- case 0xFF: /* someone deleted file */
- error = -ENOENT;
- break;
- default:
- error = result < 0 ? result : -EACCES;
- break;
- }
-out:
- return error;
-}
-
-static int ncp_unlink(struct inode *dir, struct dentry *dentry)
-{
- struct inode *inode = d_inode(dentry);
- struct ncp_server *server;
- int error;
-
- server = NCP_SERVER(dir);
- ncp_dbg(1, "unlinking %pd2\n", dentry);
-
- /*
- * Check whether to close the file ...
- */
- if (inode) {
- ncp_vdbg("closing file\n");
- ncp_make_closed(inode);
- }
-
- error = ncp_del_file_or_subdir2(server, dentry);
-#ifdef CONFIG_NCPFS_STRONG
- /* 9C is Invalid path.. It should be 8F, 90 - read only, but
- it is not :-( */
- if ((error == 0x9C || error == 0x90) && server->m.flags & NCP_MOUNT_STRONG) { /* R/O */
- error = ncp_force_unlink(dir, dentry);
- }
-#endif
- switch (error) {
- case 0x00:
- ncp_dbg(1, "removed %pd2\n", dentry);
- break;
- case 0x85:
- case 0x8A:
- error = -EACCES;
- break;
- case 0x8D: /* some files in use */
- case 0x8E: /* all files in use */
- error = -EBUSY;
- break;
- case 0x8F: /* some read only */
- case 0x90: /* all read only */
- case 0x9C: /* !!! returned when in-use or read-only by NW4 */
- error = -EPERM;
- break;
- case 0xFF:
- error = -ENOENT;
- break;
- default:
- error = error < 0 ? error : -EACCES;
- break;
- }
- return error;
-}
-
-static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
-{
- struct ncp_server *server = NCP_SERVER(old_dir);
- int error;
- int old_len, new_len;
- __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
-
- if (flags)
- return -EINVAL;
-
- ncp_dbg(1, "%pd2 to %pd2\n", old_dentry, new_dentry);
-
- ncp_age_dentry(server, old_dentry);
- ncp_age_dentry(server, new_dentry);
-
- old_len = sizeof(__old_name);
- error = ncp_io2vol(server, __old_name, &old_len,
- old_dentry->d_name.name, old_dentry->d_name.len,
- !ncp_preserve_case(old_dir));
- if (error)
- goto out;
-
- new_len = sizeof(__new_name);
- error = ncp_io2vol(server, __new_name, &new_len,
- new_dentry->d_name.name, new_dentry->d_name.len,
- !ncp_preserve_case(new_dir));
- if (error)
- goto out;
-
- error = ncp_ren_or_mov_file_or_subdir(server, old_dir, __old_name,
- new_dir, __new_name);
-#ifdef CONFIG_NCPFS_STRONG
- if ((error == 0x90 || error == 0x8B || error == -EACCES) &&
- server->m.flags & NCP_MOUNT_STRONG) { /* RO */
- error = ncp_force_rename(old_dir, old_dentry, __old_name,
- new_dir, new_dentry, __new_name);
- }
-#endif
- switch (error) {
- case 0x00:
- ncp_dbg(1, "renamed %pd -> %pd\n",
- old_dentry, new_dentry);
- ncp_d_prune(old_dentry);
- ncp_d_prune(new_dentry);
- break;
- case 0x9E:
- error = -ENAMETOOLONG;
- break;
- case 0xFF:
- error = -ENOENT;
- break;
- default:
- error = error < 0 ? error : -EACCES;
- break;
- }
-out:
- return error;
-}
-
-static int ncp_mknod(struct inode * dir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
-{
- if (ncp_is_nfs_extras(NCP_SERVER(dir), NCP_FINFO(dir)->volNumber)) {
- ncp_dbg(1, "mode = 0%ho\n", mode);
- return ncp_create_new(dir, dentry, mode, rdev, 0);
- }
- return -EPERM; /* Strange, but true */
-}
-
-/* The following routines are taken directly from msdos-fs */
-
-/* Linear day numbers of the respective 1sts in non-leap years. */
-
-static int day_n[] =
-{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, 0};
-/* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */
-
-static int utc2local(int time)
-{
- return time - sys_tz.tz_minuteswest * 60;
-}
-
-static int local2utc(int time)
-{
- return time + sys_tz.tz_minuteswest * 60;
-}
-
-/* Convert a MS-DOS time/date pair to a UNIX date (seconds since 1 1 70). */
-int
-ncp_date_dos2unix(__le16 t, __le16 d)
-{
- unsigned short time = le16_to_cpu(t), date = le16_to_cpu(d);
- int month, year, secs;
-
- /* first subtract and mask after that... Otherwise, if
- date == 0, bad things happen */
- month = ((date >> 5) - 1) & 15;
- year = date >> 9;
- secs = (time & 31) * 2 + 60 * ((time >> 5) & 63) + (time >> 11) * 3600 +
- 86400 * ((date & 31) - 1 + day_n[month] + (year / 4) +
- year * 365 - ((year & 3) == 0 && month < 2 ? 1 : 0) + 3653);
- /* days since 1.1.70 plus 80's leap day */
- return local2utc(secs);
-}
-
-
-/* Convert linear UNIX date to a MS-DOS time/date pair. */
-void
-ncp_date_unix2dos(int unix_date, __le16 *time, __le16 *date)
-{
- int day, year, nl_day, month;
-
- unix_date = utc2local(unix_date);
- *time = cpu_to_le16(
- (unix_date % 60) / 2 + (((unix_date / 60) % 60) << 5) +
- (((unix_date / 3600) % 24) << 11));
- day = unix_date / 86400 - 3652;
- year = day / 365;
- if ((year + 3) / 4 + 365 * year > day)
- year--;
- day -= (year + 3) / 4 + 365 * year;
- if (day == 59 && !(year & 3)) {
- nl_day = day;
- month = 2;
- } else {
- nl_day = (year & 3) || day <= 59 ? day : day - 1;
- for (month = 1; month < 12; month++)
- if (day_n[month] > nl_day)
- break;
- }
- *date = cpu_to_le16(nl_day - day_n[month - 1] + 1 + (month << 5) + (year << 9));
-}
diff --git a/drivers/staging/ncpfs/file.c b/drivers/staging/ncpfs/file.c
deleted file mode 100644
index 8f8cc0334ddd..000000000000
--- a/drivers/staging/ncpfs/file.c
+++ /dev/null
@@ -1,263 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * file.c
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/uaccess.h>
-
-#include <linux/time.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/fcntl.h>
-#include <linux/stat.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/sched.h>
-
-#include "ncp_fs.h"
-
-static int ncp_fsync(struct file *file, loff_t start, loff_t end, int datasync)
-{
- return file_write_and_wait_range(file, start, end);
-}
-
-/*
- * Open a file with the specified read/write mode.
- */
-int ncp_make_open(struct inode *inode, int right)
-{
- int error;
- int access;
-
- error = -EINVAL;
- if (!inode) {
- pr_err("%s: got NULL inode\n", __func__);
- goto out;
- }
-
- ncp_dbg(1, "opened=%d, volume # %u, dir entry # %u\n",
- atomic_read(&NCP_FINFO(inode)->opened),
- NCP_FINFO(inode)->volNumber,
- NCP_FINFO(inode)->dirEntNum);
- error = -EACCES;
- mutex_lock(&NCP_FINFO(inode)->open_mutex);
- if (!atomic_read(&NCP_FINFO(inode)->opened)) {
- struct ncp_entry_info finfo;
- int result;
-
- /* tries max. rights */
- finfo.access = O_RDWR;
- result = ncp_open_create_file_or_subdir(NCP_SERVER(inode),
- inode, NULL, OC_MODE_OPEN,
- 0, AR_READ | AR_WRITE, &finfo);
- if (!result)
- goto update;
- /* RDWR did not succeeded, try readonly or writeonly as requested */
- switch (right) {
- case O_RDONLY:
- finfo.access = O_RDONLY;
- result = ncp_open_create_file_or_subdir(NCP_SERVER(inode),
- inode, NULL, OC_MODE_OPEN,
- 0, AR_READ, &finfo);
- break;
- case O_WRONLY:
- finfo.access = O_WRONLY;
- result = ncp_open_create_file_or_subdir(NCP_SERVER(inode),
- inode, NULL, OC_MODE_OPEN,
- 0, AR_WRITE, &finfo);
- break;
- }
- if (result) {
- ncp_vdbg("failed, result=%d\n", result);
- goto out_unlock;
- }
- /*
- * Update the inode information.
- */
- update:
- ncp_update_inode(inode, &finfo);
- atomic_set(&NCP_FINFO(inode)->opened, 1);
- }
-
- access = NCP_FINFO(inode)->access;
- ncp_vdbg("file open, access=%x\n", access);
- if (access == right || access == O_RDWR) {
- atomic_inc(&NCP_FINFO(inode)->opened);
- error = 0;
- }
-
-out_unlock:
- mutex_unlock(&NCP_FINFO(inode)->open_mutex);
-out:
- return error;
-}
-
-static ssize_t
-ncp_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file);
- size_t already_read = 0;
- off_t pos = iocb->ki_pos;
- size_t bufsize;
- int error;
- void *freepage;
- size_t freelen;
-
- ncp_dbg(1, "enter %pD2\n", file);
-
- if (!iov_iter_count(to))
- return 0;
- if (pos > inode->i_sb->s_maxbytes)
- return 0;
- iov_iter_truncate(to, inode->i_sb->s_maxbytes - pos);
-
- error = ncp_make_open(inode, O_RDONLY);
- if (error) {
- ncp_dbg(1, "open failed, error=%d\n", error);
- return error;
- }
-
- bufsize = NCP_SERVER(inode)->buffer_size;
-
- error = -EIO;
- freelen = ncp_read_bounce_size(bufsize);
- freepage = vmalloc(freelen);
- if (!freepage)
- goto outrel;
- error = 0;
- /* First read in as much as possible for each bufsize. */
- while (iov_iter_count(to)) {
- int read_this_time;
- size_t to_read = min_t(size_t,
- bufsize - (pos % bufsize),
- iov_iter_count(to));
-
- error = ncp_read_bounce(NCP_SERVER(inode),
- NCP_FINFO(inode)->file_handle,
- pos, to_read, to, &read_this_time,
- freepage, freelen);
- if (error) {
- error = -EIO; /* NW errno -> Linux errno */
- break;
- }
- pos += read_this_time;
- already_read += read_this_time;
-
- if (read_this_time != to_read)
- break;
- }
- vfree(freepage);
-
- iocb->ki_pos = pos;
-
- file_accessed(file);
-
- ncp_dbg(1, "exit %pD2\n", file);
-outrel:
- ncp_inode_close(inode);
- return already_read ? already_read : error;
-}
-
-static ssize_t
-ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file);
- size_t already_written = 0;
- size_t bufsize;
- int errno;
- void *bouncebuffer;
- off_t pos;
-
- ncp_dbg(1, "enter %pD2\n", file);
- errno = generic_write_checks(iocb, from);
- if (errno <= 0)
- return errno;
-
- errno = ncp_make_open(inode, O_WRONLY);
- if (errno) {
- ncp_dbg(1, "open failed, error=%d\n", errno);
- return errno;
- }
- bufsize = NCP_SERVER(inode)->buffer_size;
-
- errno = file_update_time(file);
- if (errno)
- goto outrel;
-
- bouncebuffer = vmalloc(bufsize);
- if (!bouncebuffer) {
- errno = -EIO; /* -ENOMEM */
- goto outrel;
- }
- pos = iocb->ki_pos;
- while (iov_iter_count(from)) {
- int written_this_time;
- size_t to_write = min_t(size_t,
- bufsize - (pos % bufsize),
- iov_iter_count(from));
-
- if (!copy_from_iter_full(bouncebuffer, to_write, from)) {
- errno = -EFAULT;
- break;
- }
- if (ncp_write_kernel(NCP_SERVER(inode),
- NCP_FINFO(inode)->file_handle,
- pos, to_write, bouncebuffer, &written_this_time) != 0) {
- errno = -EIO;
- break;
- }
- pos += written_this_time;
- already_written += written_this_time;
-
- if (written_this_time != to_write)
- break;
- }
- vfree(bouncebuffer);
-
- iocb->ki_pos = pos;
-
- if (pos > i_size_read(inode)) {
- inode_lock(inode);
- if (pos > i_size_read(inode))
- i_size_write(inode, pos);
- inode_unlock(inode);
- }
- ncp_dbg(1, "exit %pD2\n", file);
-outrel:
- ncp_inode_close(inode);
- return already_written ? already_written : errno;
-}
-
-static int ncp_release(struct inode *inode, struct file *file) {
- if (ncp_make_closed(inode)) {
- ncp_dbg(1, "failed to close\n");
- }
- return 0;
-}
-
-const struct file_operations ncp_file_operations =
-{
- .llseek = generic_file_llseek,
- .read_iter = ncp_file_read_iter,
- .write_iter = ncp_file_write_iter,
- .unlocked_ioctl = ncp_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ncp_compat_ioctl,
-#endif
- .mmap = ncp_mmap,
- .release = ncp_release,
- .fsync = ncp_fsync,
-};
-
-const struct inode_operations ncp_file_inode_operations =
-{
- .setattr = ncp_notify_change,
-};
diff --git a/drivers/staging/ncpfs/getopt.c b/drivers/staging/ncpfs/getopt.c
deleted file mode 100644
index 5c941bef14c4..000000000000
--- a/drivers/staging/ncpfs/getopt.c
+++ /dev/null
@@ -1,76 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * getopt.c
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-
-#include <asm/errno.h>
-
-#include "getopt.h"
-
-/**
- * ncp_getopt - option parser
- * @caller: name of the caller, for error messages
- * @options: the options string
- * @opts: an array of &struct option entries controlling parser operations
- * @optopt: output; will contain the current option
- * @optarg: output; will contain the value (if one exists)
- * @value: output; may be NULL; will be overwritten with the integer value
- * of the current argument.
- *
- * Helper to parse options on the format used by mount ("a=b,c=d,e,f").
- * Returns opts->val if a matching entry in the 'opts' array is found,
- * 0 when no more tokens are found, -1 if an error is encountered.
- */
-int ncp_getopt(const char *caller, char **options, const struct ncp_option *opts,
- char **optopt, char **optarg, unsigned long *value)
-{
- char *token;
- char *val;
-
- do {
- if ((token = strsep(options, ",")) == NULL)
- return 0;
- } while (*token == '\0');
- if (optopt)
- *optopt = token;
-
- if ((val = strchr (token, '=')) != NULL) {
- *val++ = 0;
- }
- *optarg = val;
- for (; opts->name; opts++) {
- if (!strcmp(opts->name, token)) {
- if (!val) {
- if (opts->has_arg & OPT_NOPARAM) {
- return opts->val;
- }
- pr_info("%s: the %s option requires an argument\n",
- caller, token);
- return -EINVAL;
- }
- if (opts->has_arg & OPT_INT) {
- int rc = kstrtoul(val, 0, value);
-
- if (rc) {
- pr_info("%s: invalid numeric value in %s=%s\n",
- caller, token, val);
- return rc;
- }
- return opts->val;
- }
- if (opts->has_arg & OPT_STRING) {
- return opts->val;
- }
- pr_info("%s: unexpected argument %s to the %s option\n",
- caller, val, token);
- return -EINVAL;
- }
- }
- pr_info("%s: Unrecognized mount option %s\n", caller, token);
- return -EOPNOTSUPP;
-}
diff --git a/drivers/staging/ncpfs/getopt.h b/drivers/staging/ncpfs/getopt.h
deleted file mode 100644
index 30f0da317670..000000000000
--- a/drivers/staging/ncpfs/getopt.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_GETOPT_H
-#define _LINUX_GETOPT_H
-
-#define OPT_NOPARAM 1
-#define OPT_INT 2
-#define OPT_STRING 4
-struct ncp_option {
- const char *name;
- unsigned int has_arg;
- int val;
-};
-
-extern int ncp_getopt(const char *caller, char **options, const struct ncp_option *opts,
- char **optopt, char **optarg, unsigned long *value);
-
-#endif /* _LINUX_GETOPT_H */
diff --git a/drivers/staging/ncpfs/inode.c b/drivers/staging/ncpfs/inode.c
deleted file mode 100644
index bb411610a071..000000000000
--- a/drivers/staging/ncpfs/inode.c
+++ /dev/null
@@ -1,1067 +0,0 @@
-/*
- * inode.c
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- * Modified for big endian by J.F. Chadima and David S. Miller
- * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
- * Modified 1998 Wolfram Pienkoss for NLS
- * Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-
-#include <linux/uaccess.h>
-#include <asm/byteorder.h>
-
-#include <linux/time.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/file.h>
-#include <linux/fcntl.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-#include <linux/vfs.h>
-#include <linux/mount.h>
-#include <linux/seq_file.h>
-#include <linux/sched/signal.h>
-#include <linux/namei.h>
-
-#include <net/sock.h>
-
-#include "ncp_fs.h"
-#include "getopt.h"
-
-#define NCP_DEFAULT_FILE_MODE 0600
-#define NCP_DEFAULT_DIR_MODE 0700
-#define NCP_DEFAULT_TIME_OUT 10
-#define NCP_DEFAULT_RETRY_COUNT 20
-
-static void ncp_evict_inode(struct inode *);
-static void ncp_put_super(struct super_block *);
-static int ncp_statfs(struct dentry *, struct kstatfs *);
-static int ncp_show_options(struct seq_file *, struct dentry *);
-
-static struct kmem_cache * ncp_inode_cachep;
-
-static struct inode *ncp_alloc_inode(struct super_block *sb)
-{
- struct ncp_inode_info *ei;
-
- ei = kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL);
- if (!ei)
- return NULL;
- return &ei->vfs_inode;
-}
-
-static void ncp_i_callback(struct rcu_head *head)
-{
- struct inode *inode = container_of(head, struct inode, i_rcu);
- kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
-}
-
-static void ncp_destroy_inode(struct inode *inode)
-{
- call_rcu(&inode->i_rcu, ncp_i_callback);
-}
-
-static void init_once(void *foo)
-{
- struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
-
- mutex_init(&ei->open_mutex);
- inode_init_once(&ei->vfs_inode);
-}
-
-static int init_inodecache(void)
-{
- ncp_inode_cachep = kmem_cache_create("ncp_inode_cache",
- sizeof(struct ncp_inode_info),
- 0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
- init_once);
- if (ncp_inode_cachep == NULL)
- return -ENOMEM;
- return 0;
-}
-
-static void destroy_inodecache(void)
-{
- /*
- * Make sure all delayed rcu free inodes are flushed before we
- * destroy cache.
- */
- rcu_barrier();
- kmem_cache_destroy(ncp_inode_cachep);
-}
-
-static int ncp_remount(struct super_block *sb, int *flags, char* data)
-{
- sync_filesystem(sb);
- *flags |= SB_NODIRATIME;
- return 0;
-}
-
-static const struct super_operations ncp_sops =
-{
- .alloc_inode = ncp_alloc_inode,
- .destroy_inode = ncp_destroy_inode,
- .drop_inode = generic_delete_inode,
- .evict_inode = ncp_evict_inode,
- .put_super = ncp_put_super,
- .statfs = ncp_statfs,
- .remount_fs = ncp_remount,
- .show_options = ncp_show_options,
-};
-
-/*
- * Fill in the ncpfs-specific information in the inode.
- */
-static void ncp_update_dirent(struct inode *inode, struct ncp_entry_info *nwinfo)
-{
- NCP_FINFO(inode)->DosDirNum = nwinfo->i.DosDirNum;
- NCP_FINFO(inode)->dirEntNum = nwinfo->i.dirEntNum;
- NCP_FINFO(inode)->volNumber = nwinfo->volume;
-}
-
-void ncp_update_inode(struct inode *inode, struct ncp_entry_info *nwinfo)
-{
- ncp_update_dirent(inode, nwinfo);
- NCP_FINFO(inode)->nwattr = nwinfo->i.attributes;
- NCP_FINFO(inode)->access = nwinfo->access;
- memcpy(NCP_FINFO(inode)->file_handle, nwinfo->file_handle,
- sizeof(nwinfo->file_handle));
- ncp_dbg(1, "updated %s, volnum=%d, dirent=%u\n",
- nwinfo->i.entryName, NCP_FINFO(inode)->volNumber,
- NCP_FINFO(inode)->dirEntNum);
-}
-
-static void ncp_update_dates(struct inode *inode, struct nw_info_struct *nwi)
-{
- /* NFS namespace mode overrides others if it's set. */
- ncp_dbg(1, "(%s) nfs.mode=0%o\n", nwi->entryName, nwi->nfs.mode);
- if (nwi->nfs.mode) {
- /* XXX Security? */
- inode->i_mode = nwi->nfs.mode;
- }
-
- inode->i_blocks = (i_size_read(inode) + NCP_BLOCK_SIZE - 1) >> NCP_BLOCK_SHIFT;
-
- inode->i_mtime.tv_sec = ncp_date_dos2unix(nwi->modifyTime, nwi->modifyDate);
- inode->i_ctime.tv_sec = ncp_date_dos2unix(nwi->creationTime, nwi->creationDate);
- inode->i_atime.tv_sec = ncp_date_dos2unix(0, nwi->lastAccessDate);
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
-}
-
-static void ncp_update_attrs(struct inode *inode, struct ncp_entry_info *nwinfo)
-{
- struct nw_info_struct *nwi = &nwinfo->i;
- struct ncp_server *server = NCP_SERVER(inode);
-
- if (nwi->attributes & aDIR) {
- inode->i_mode = server->m.dir_mode;
- /* for directories dataStreamSize seems to be some
- Object ID ??? */
- i_size_write(inode, NCP_BLOCK_SIZE);
- } else {
- u32 size;
-
- inode->i_mode = server->m.file_mode;
- size = le32_to_cpu(nwi->dataStreamSize);
- i_size_write(inode, size);
-#ifdef CONFIG_NCPFS_EXTRAS
- if ((server->m.flags & (NCP_MOUNT_EXTRAS|NCP_MOUNT_SYMLINKS))
- && (nwi->attributes & aSHARED)) {
- switch (nwi->attributes & (aHIDDEN|aSYSTEM)) {
- case aHIDDEN:
- if (server->m.flags & NCP_MOUNT_SYMLINKS) {
- if (/* (size >= NCP_MIN_SYMLINK_SIZE)
- && */ (size <= NCP_MAX_SYMLINK_SIZE)) {
- inode->i_mode = (inode->i_mode & ~S_IFMT) | S_IFLNK;
- NCP_FINFO(inode)->flags |= NCPI_KLUDGE_SYMLINK;
- break;
- }
- }
- /* FALLTHROUGH */
- case 0:
- if (server->m.flags & NCP_MOUNT_EXTRAS)
- inode->i_mode |= S_IRUGO;
- break;
- case aSYSTEM:
- if (server->m.flags & NCP_MOUNT_EXTRAS)
- inode->i_mode |= (inode->i_mode >> 2) & S_IXUGO;
- break;
- /* case aSYSTEM|aHIDDEN: */
- default:
- /* reserved combination */
- break;
- }
- }
-#endif
- }
- if (nwi->attributes & aRONLY) inode->i_mode &= ~S_IWUGO;
-}
-
-void ncp_update_inode2(struct inode* inode, struct ncp_entry_info *nwinfo)
-{
- NCP_FINFO(inode)->flags = 0;
- if (!atomic_read(&NCP_FINFO(inode)->opened)) {
- NCP_FINFO(inode)->nwattr = nwinfo->i.attributes;
- ncp_update_attrs(inode, nwinfo);
- }
-
- ncp_update_dates(inode, &nwinfo->i);
- ncp_update_dirent(inode, nwinfo);
-}
-
-/*
- * Fill in the inode based on the ncp_entry_info structure. Used only for brand new inodes.
- */
-static void ncp_set_attr(struct inode *inode, struct ncp_entry_info *nwinfo)
-{
- struct ncp_server *server = NCP_SERVER(inode);
-
- NCP_FINFO(inode)->flags = 0;
-
- ncp_update_attrs(inode, nwinfo);
-
- ncp_dbg(2, "inode->i_mode = %u\n", inode->i_mode);
-
- set_nlink(inode, 1);
- inode->i_uid = server->m.uid;
- inode->i_gid = server->m.gid;
-
- ncp_update_dates(inode, &nwinfo->i);
- ncp_update_inode(inode, nwinfo);
-}
-
-#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
-static const struct inode_operations ncp_symlink_inode_operations = {
- .get_link = page_get_link,
- .setattr = ncp_notify_change,
-};
-#endif
-
-/*
- * Get a new inode.
- */
-struct inode *
-ncp_iget(struct super_block *sb, struct ncp_entry_info *info)
-{
- struct inode *inode;
-
- if (info == NULL) {
- pr_err("%s: info is NULL\n", __func__);
- return NULL;
- }
-
- inode = new_inode(sb);
- if (inode) {
- atomic_set(&NCP_FINFO(inode)->opened, info->opened);
-
- inode->i_ino = info->ino;
- ncp_set_attr(inode, info);
- if (S_ISREG(inode->i_mode)) {
- inode->i_op = &ncp_file_inode_operations;
- inode->i_fop = &ncp_file_operations;
- } else if (S_ISDIR(inode->i_mode)) {
- inode->i_op = &ncp_dir_inode_operations;
- inode->i_fop = &ncp_dir_operations;
-#ifdef CONFIG_NCPFS_NFS_NS
- } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
- init_special_inode(inode, inode->i_mode,
- new_decode_dev(info->i.nfs.rdev));
-#endif
-#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
- } else if (S_ISLNK(inode->i_mode)) {
- inode->i_op = &ncp_symlink_inode_operations;
- inode_nohighmem(inode);
- inode->i_data.a_ops = &ncp_symlink_aops;
-#endif
- } else {
- make_bad_inode(inode);
- }
- insert_inode_hash(inode);
- } else
- pr_err("%s: iget failed!\n", __func__);
- return inode;
-}
-
-static void
-ncp_evict_inode(struct inode *inode)
-{
- truncate_inode_pages_final(&inode->i_data);
- clear_inode(inode);
-
- if (S_ISDIR(inode->i_mode)) {
- ncp_dbg(2, "put directory %ld\n", inode->i_ino);
- }
-
- if (ncp_make_closed(inode) != 0) {
- /* We can't do anything but complain. */
- pr_err("%s: could not close\n", __func__);
- }
-}
-
-static void ncp_stop_tasks(struct ncp_server *server) {
- struct sock* sk = server->ncp_sock->sk;
-
- lock_sock(sk);
- sk->sk_error_report = server->error_report;
- sk->sk_data_ready = server->data_ready;
- sk->sk_write_space = server->write_space;
- release_sock(sk);
- del_timer_sync(&server->timeout_tm);
-
- flush_work(&server->rcv.tq);
- if (sk->sk_socket->type == SOCK_STREAM)
- flush_work(&server->tx.tq);
- else
- flush_work(&server->timeout_tq);
-}
-
-static int ncp_show_options(struct seq_file *seq, struct dentry *root)
-{
- struct ncp_server *server = NCP_SBP(root->d_sb);
- unsigned int tmp;
-
- if (!uid_eq(server->m.uid, GLOBAL_ROOT_UID))
- seq_printf(seq, ",uid=%u",
- from_kuid_munged(&init_user_ns, server->m.uid));
- if (!gid_eq(server->m.gid, GLOBAL_ROOT_GID))
- seq_printf(seq, ",gid=%u",
- from_kgid_munged(&init_user_ns, server->m.gid));
- if (!uid_eq(server->m.mounted_uid, GLOBAL_ROOT_UID))
- seq_printf(seq, ",owner=%u",
- from_kuid_munged(&init_user_ns, server->m.mounted_uid));
- tmp = server->m.file_mode & S_IALLUGO;
- if (tmp != NCP_DEFAULT_FILE_MODE)
- seq_printf(seq, ",mode=0%o", tmp);
- tmp = server->m.dir_mode & S_IALLUGO;
- if (tmp != NCP_DEFAULT_DIR_MODE)
- seq_printf(seq, ",dirmode=0%o", tmp);
- if (server->m.time_out != NCP_DEFAULT_TIME_OUT * HZ / 100) {
- tmp = server->m.time_out * 100 / HZ;
- seq_printf(seq, ",timeout=%u", tmp);
- }
- if (server->m.retry_count != NCP_DEFAULT_RETRY_COUNT)
- seq_printf(seq, ",retry=%u", server->m.retry_count);
- if (server->m.flags != 0)
- seq_printf(seq, ",flags=%lu", server->m.flags);
- if (server->m.wdog_pid != NULL)
- seq_printf(seq, ",wdogpid=%u", pid_vnr(server->m.wdog_pid));
-
- return 0;
-}
-
-static const struct ncp_option ncp_opts[] = {
- { "uid", OPT_INT, 'u' },
- { "gid", OPT_INT, 'g' },
- { "owner", OPT_INT, 'o' },
- { "mode", OPT_INT, 'm' },
- { "dirmode", OPT_INT, 'd' },
- { "timeout", OPT_INT, 't' },
- { "retry", OPT_INT, 'r' },
- { "flags", OPT_INT, 'f' },
- { "wdogpid", OPT_INT, 'w' },
- { "ncpfd", OPT_INT, 'n' },
- { "infofd", OPT_INT, 'i' }, /* v5 */
- { "version", OPT_INT, 'v' },
- { NULL, 0, 0 } };
-
-static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options) {
- int optval;
- char *optarg;
- unsigned long optint;
- int version = 0;
- int ret;
-
- data->flags = 0;
- data->int_flags = 0;
- data->mounted_uid = GLOBAL_ROOT_UID;
- data->wdog_pid = NULL;
- data->ncp_fd = ~0;
- data->time_out = NCP_DEFAULT_TIME_OUT;
- data->retry_count = NCP_DEFAULT_RETRY_COUNT;
- data->uid = GLOBAL_ROOT_UID;
- data->gid = GLOBAL_ROOT_GID;
- data->file_mode = NCP_DEFAULT_FILE_MODE;
- data->dir_mode = NCP_DEFAULT_DIR_MODE;
- data->info_fd = -1;
- data->mounted_vol[0] = 0;
-
- while ((optval = ncp_getopt("ncpfs", &options, ncp_opts, NULL, &optarg, &optint)) != 0) {
- ret = optval;
- if (ret < 0)
- goto err;
- switch (optval) {
- case 'u':
- data->uid = make_kuid(current_user_ns(), optint);
- if (!uid_valid(data->uid)) {
- ret = -EINVAL;
- goto err;
- }
- break;
- case 'g':
- data->gid = make_kgid(current_user_ns(), optint);
- if (!gid_valid(data->gid)) {
- ret = -EINVAL;
- goto err;
- }
- break;
- case 'o':
- data->mounted_uid = make_kuid(current_user_ns(), optint);
- if (!uid_valid(data->mounted_uid)) {
- ret = -EINVAL;
- goto err;
- }
- break;
- case 'm':
- data->file_mode = optint;
- break;
- case 'd':
- data->dir_mode = optint;
- break;
- case 't':
- data->time_out = optint;
- break;
- case 'r':
- data->retry_count = optint;
- break;
- case 'f':
- data->flags = optint;
- break;
- case 'w':
- data->wdog_pid = find_get_pid(optint);
- break;
- case 'n':
- data->ncp_fd = optint;
- break;
- case 'i':
- data->info_fd = optint;
- break;
- case 'v':
- ret = -ECHRNG;
- if (optint < NCP_MOUNT_VERSION_V4)
- goto err;
- if (optint > NCP_MOUNT_VERSION_V5)
- goto err;
- version = optint;
- break;
-
- }
- }
- return 0;
-err:
- put_pid(data->wdog_pid);
- data->wdog_pid = NULL;
- return ret;
-}
-
-static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
-{
- struct ncp_mount_data_kernel data;
- struct ncp_server *server;
- struct inode *root_inode;
- struct socket *sock;
- int error;
- int default_bufsize;
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
- int options;
-#endif
- struct ncp_entry_info finfo;
-
- memset(&data, 0, sizeof(data));
- server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
- if (!server)
- return -ENOMEM;
- sb->s_fs_info = server;
-
- error = -EFAULT;
- if (raw_data == NULL)
- goto out;
- switch (*(int*)raw_data) {
- case NCP_MOUNT_VERSION:
- {
- struct ncp_mount_data* md = (struct ncp_mount_data*)raw_data;
-
- data.flags = md->flags;
- data.int_flags = NCP_IMOUNT_LOGGEDIN_POSSIBLE;
- data.mounted_uid = make_kuid(current_user_ns(), md->mounted_uid);
- data.wdog_pid = find_get_pid(md->wdog_pid);
- data.ncp_fd = md->ncp_fd;
- data.time_out = md->time_out;
- data.retry_count = md->retry_count;
- data.uid = make_kuid(current_user_ns(), md->uid);
- data.gid = make_kgid(current_user_ns(), md->gid);
- data.file_mode = md->file_mode;
- data.dir_mode = md->dir_mode;
- data.info_fd = -1;
- memcpy(data.mounted_vol, md->mounted_vol,
- NCP_VOLNAME_LEN+1);
- }
- break;
- case NCP_MOUNT_VERSION_V4:
- {
- struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data;
-
- data.flags = md->flags;
- data.mounted_uid = make_kuid(current_user_ns(), md->mounted_uid);
- data.wdog_pid = find_get_pid(md->wdog_pid);
- data.ncp_fd = md->ncp_fd;
- data.time_out = md->time_out;
- data.retry_count = md->retry_count;
- data.uid = make_kuid(current_user_ns(), md->uid);
- data.gid = make_kgid(current_user_ns(), md->gid);
- data.file_mode = md->file_mode;
- data.dir_mode = md->dir_mode;
- data.info_fd = -1;
- }
- break;
- default:
- error = -ECHRNG;
- if (memcmp(raw_data, "vers", 4) == 0) {
- error = ncp_parse_options(&data, raw_data);
- }
- if (error)
- goto out;
- break;
- }
- error = -EINVAL;
- if (!uid_valid(data.mounted_uid) || !uid_valid(data.uid) ||
- !gid_valid(data.gid))
- goto out;
- sock = sockfd_lookup(data.ncp_fd, &error);
- if (!sock)
- goto out;
-
- if (sock->type == SOCK_STREAM)
- default_bufsize = 0xF000;
- else
- default_bufsize = 1024;
-
- sb->s_flags |= SB_NODIRATIME; /* probably even noatime */
- sb->s_maxbytes = 0xFFFFFFFFU;
- sb->s_blocksize = 1024; /* Eh... Is this correct? */
- sb->s_blocksize_bits = 10;
- sb->s_magic = NCP_SUPER_MAGIC;
- sb->s_op = &ncp_sops;
- sb->s_d_op = &ncp_dentry_operations;
-
- server = NCP_SBP(sb);
- memset(server, 0, sizeof(*server));
-
- error = super_setup_bdi(sb);
- if (error)
- goto out_fput;
-
- server->ncp_sock = sock;
-
- if (data.info_fd != -1) {
- struct socket *info_sock = sockfd_lookup(data.info_fd, &error);
- if (!info_sock)
- goto out_fput;
- server->info_sock = info_sock;
- error = -EBADFD;
- if (info_sock->type != SOCK_STREAM)
- goto out_fput2;
- }
-
-/* server->lock = 0; */
- mutex_init(&server->mutex);
- server->packet = NULL;
-/* server->buffer_size = 0; */
-/* server->conn_status = 0; */
-/* server->root_dentry = NULL; */
-/* server->root_setuped = 0; */
- mutex_init(&server->root_setup_lock);
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
-/* server->sign_wanted = 0; */
-/* server->sign_active = 0; */
-#endif
- init_rwsem(&server->auth_rwsem);
- server->auth.auth_type = NCP_AUTH_NONE;
-/* server->auth.object_name_len = 0; */
-/* server->auth.object_name = NULL; */
-/* server->auth.object_type = 0; */
-/* server->priv.len = 0; */
-/* server->priv.data = NULL; */
-
- server->m = data;
- /* Although anything producing this is buggy, it happens
- now because of PATH_MAX changes.. */
- if (server->m.time_out < 1) {
- server->m.time_out = 10;
- pr_info("You need to recompile your ncpfs utils..\n");
- }
- server->m.time_out = server->m.time_out * HZ / 100;
- server->m.file_mode = (server->m.file_mode & S_IRWXUGO) | S_IFREG;
- server->m.dir_mode = (server->m.dir_mode & S_IRWXUGO) | S_IFDIR;
-
-#ifdef CONFIG_NCPFS_NLS
- /* load the default NLS charsets */
- server->nls_vol = load_nls_default();
- server->nls_io = load_nls_default();
-#endif /* CONFIG_NCPFS_NLS */
-
- atomic_set(&server->dentry_ttl, 0); /* no caching */
-
- INIT_LIST_HEAD(&server->tx.requests);
- mutex_init(&server->rcv.creq_mutex);
- server->tx.creq = NULL;
- server->rcv.creq = NULL;
-
- timer_setup(&server->timeout_tm, ncpdgram_timeout_call, 0);
-#undef NCP_PACKET_SIZE
-#define NCP_PACKET_SIZE 131072
- error = -ENOMEM;
- server->packet_size = NCP_PACKET_SIZE;
- server->packet = vmalloc(NCP_PACKET_SIZE);
- if (server->packet == NULL)
- goto out_nls;
- server->txbuf = vmalloc(NCP_PACKET_SIZE);
- if (server->txbuf == NULL)
- goto out_packet;
- server->rxbuf = vmalloc(NCP_PACKET_SIZE);
- if (server->rxbuf == NULL)
- goto out_txbuf;
-
- lock_sock(sock->sk);
- server->data_ready = sock->sk->sk_data_ready;
- server->write_space = sock->sk->sk_write_space;
- server->error_report = sock->sk->sk_error_report;
- sock->sk->sk_user_data = server;
- sock->sk->sk_data_ready = ncp_tcp_data_ready;
- sock->sk->sk_error_report = ncp_tcp_error_report;
- if (sock->type == SOCK_STREAM) {
- server->rcv.ptr = (unsigned char*)&server->rcv.buf;
- server->rcv.len = 10;
- server->rcv.state = 0;
- INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
- INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
- sock->sk->sk_write_space = ncp_tcp_write_space;
- } else {
- INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
- INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
- }
- release_sock(sock->sk);
-
- ncp_lock_server(server);
- error = ncp_connect(server);
- ncp_unlock_server(server);
- if (error < 0)
- goto out_rxbuf;
- ncp_dbg(1, "NCP_SBP(sb) = %p\n", NCP_SBP(sb));
-
- error = -EMSGSIZE; /* -EREMOTESIDEINCOMPATIBLE */
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
- if (ncp_negotiate_size_and_options(server, default_bufsize,
- NCP_DEFAULT_OPTIONS, &(server->buffer_size), &options) == 0)
- {
- if (options != NCP_DEFAULT_OPTIONS)
- {
- if (ncp_negotiate_size_and_options(server,
- default_bufsize,
- options & 2,
- &(server->buffer_size), &options) != 0)
-
- {
- goto out_disconnect;
- }
- }
- ncp_lock_server(server);
- if (options & 2)
- server->sign_wanted = 1;
- ncp_unlock_server(server);
- }
- else
-#endif /* CONFIG_NCPFS_PACKET_SIGNING */
- if (ncp_negotiate_buffersize(server, default_bufsize,
- &(server->buffer_size)) != 0)
- goto out_disconnect;
- ncp_dbg(1, "bufsize = %d\n", server->buffer_size);
-
- memset(&finfo, 0, sizeof(finfo));
- finfo.i.attributes = aDIR;
- finfo.i.dataStreamSize = 0; /* ignored */
- finfo.i.dirEntNum = 0;
- finfo.i.DosDirNum = 0;
-#ifdef CONFIG_NCPFS_SMALLDOS
- finfo.i.NSCreator = NW_NS_DOS;
-#endif
- finfo.volume = NCP_NUMBER_OF_VOLUMES;
- /* set dates of mountpoint to Jan 1, 1986; 00:00 */
- finfo.i.creationTime = finfo.i.modifyTime
- = cpu_to_le16(0x0000);
- finfo.i.creationDate = finfo.i.modifyDate
- = finfo.i.lastAccessDate
- = cpu_to_le16(0x0C21);
- finfo.i.nameLen = 0;
- finfo.i.entryName[0] = '\0';
-
- finfo.opened = 0;
- finfo.ino = 2; /* tradition */
-
- server->name_space[finfo.volume] = NW_NS_DOS;
-
- error = -ENOMEM;
- root_inode = ncp_iget(sb, &finfo);
- if (!root_inode)
- goto out_disconnect;
- ncp_dbg(1, "root vol=%d\n", NCP_FINFO(root_inode)->volNumber);
- sb->s_root = d_make_root(root_inode);
- if (!sb->s_root)
- goto out_disconnect;
- return 0;
-
-out_disconnect:
- ncp_lock_server(server);
- ncp_disconnect(server);
- ncp_unlock_server(server);
-out_rxbuf:
- ncp_stop_tasks(server);
- vfree(server->rxbuf);
-out_txbuf:
- vfree(server->txbuf);
-out_packet:
- vfree(server->packet);
-out_nls:
-#ifdef CONFIG_NCPFS_NLS
- unload_nls(server->nls_io);
- unload_nls(server->nls_vol);
-#endif
- mutex_destroy(&server->rcv.creq_mutex);
- mutex_destroy(&server->root_setup_lock);
- mutex_destroy(&server->mutex);
-out_fput2:
- if (server->info_sock)
- sockfd_put(server->info_sock);
-out_fput:
- sockfd_put(sock);
-out:
- put_pid(data.wdog_pid);
- sb->s_fs_info = NULL;
- kfree(server);
- return error;
-}
-
-static void delayed_free(struct rcu_head *p)
-{
- struct ncp_server *server = container_of(p, struct ncp_server, rcu);
-#ifdef CONFIG_NCPFS_NLS
- /* unload the NLS charsets */
- unload_nls(server->nls_vol);
- unload_nls(server->nls_io);
-#endif /* CONFIG_NCPFS_NLS */
- kfree(server);
-}
-
-static void ncp_put_super(struct super_block *sb)
-{
- struct ncp_server *server = NCP_SBP(sb);
-
- ncp_lock_server(server);
- ncp_disconnect(server);
- ncp_unlock_server(server);
-
- ncp_stop_tasks(server);
-
- mutex_destroy(&server->rcv.creq_mutex);
- mutex_destroy(&server->root_setup_lock);
- mutex_destroy(&server->mutex);
-
- if (server->info_sock)
- sockfd_put(server->info_sock);
- sockfd_put(server->ncp_sock);
- kill_pid(server->m.wdog_pid, SIGTERM, 1);
- put_pid(server->m.wdog_pid);
-
- kfree(server->priv.data);
- kfree(server->auth.object_name);
- vfree(server->rxbuf);
- vfree(server->txbuf);
- vfree(server->packet);
- call_rcu(&server->rcu, delayed_free);
-}
-
-static int ncp_statfs(struct dentry *dentry, struct kstatfs *buf)
-{
- struct dentry* d;
- struct inode* i;
- struct ncp_inode_info* ni;
- struct ncp_server* s;
- struct ncp_volume_info vi;
- struct super_block *sb = dentry->d_sb;
- int err;
- __u8 dh;
-
- d = sb->s_root;
- if (!d) {
- goto dflt;
- }
- i = d_inode(d);
- if (!i) {
- goto dflt;
- }
- ni = NCP_FINFO(i);
- if (!ni) {
- goto dflt;
- }
- s = NCP_SBP(sb);
- if (!s) {
- goto dflt;
- }
- if (!s->m.mounted_vol[0]) {
- goto dflt;
- }
-
- err = ncp_dirhandle_alloc(s, ni->volNumber, ni->DosDirNum, &dh);
- if (err) {
- goto dflt;
- }
- err = ncp_get_directory_info(s, dh, &vi);
- ncp_dirhandle_free(s, dh);
- if (err) {
- goto dflt;
- }
- buf->f_type = NCP_SUPER_MAGIC;
- buf->f_bsize = vi.sectors_per_block * 512;
- buf->f_blocks = vi.total_blocks;
- buf->f_bfree = vi.free_blocks;
- buf->f_bavail = vi.free_blocks;
- buf->f_files = vi.total_dir_entries;
- buf->f_ffree = vi.available_dir_entries;
- buf->f_namelen = 12;
- return 0;
-
- /* We cannot say how much disk space is left on a mounted
- NetWare Server, because free space is distributed over
- volumes, and the current user might have disk quotas. So
- free space is not that simple to determine. Our decision
- here is to err conservatively. */
-
-dflt:;
- buf->f_type = NCP_SUPER_MAGIC;
- buf->f_bsize = NCP_BLOCK_SIZE;
- buf->f_blocks = 0;
- buf->f_bfree = 0;
- buf->f_bavail = 0;
- buf->f_namelen = 12;
- return 0;
-}
-
-int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
-{
- struct inode *inode = d_inode(dentry);
- int result = 0;
- __le32 info_mask;
- struct nw_modify_dos_info info;
- struct ncp_server *server;
-
- result = -EIO;
-
- server = NCP_SERVER(inode);
- if (!server) /* How this could happen? */
- goto out;
-
- result = -EPERM;
- if (IS_DEADDIR(d_inode(dentry)))
- goto out;
-
- /* ageing the dentry to force validation */
- ncp_age_dentry(server, dentry);
-
- result = setattr_prepare(dentry, attr);
- if (result < 0)
- goto out;
-
- result = -EPERM;
- if ((attr->ia_valid & ATTR_UID) && !uid_eq(attr->ia_uid, server->m.uid))
- goto out;
-
- if ((attr->ia_valid & ATTR_GID) && !gid_eq(attr->ia_gid, server->m.gid))
- goto out;
-
- if (((attr->ia_valid & ATTR_MODE) &&
- (attr->ia_mode &
- ~(S_IFREG | S_IFDIR | S_IRWXUGO))))
- goto out;
-
- info_mask = 0;
- memset(&info, 0, sizeof(info));
-
-#if 1
- if ((attr->ia_valid & ATTR_MODE) != 0)
- {
- umode_t newmode = attr->ia_mode;
-
- info_mask |= DM_ATTRIBUTES;
-
- if (S_ISDIR(inode->i_mode)) {
- newmode &= server->m.dir_mode;
- } else {
-#ifdef CONFIG_NCPFS_EXTRAS
- if (server->m.flags & NCP_MOUNT_EXTRAS) {
- /* any non-default execute bit set */
- if (newmode & ~server->m.file_mode & S_IXUGO)
- info.attributes |= aSHARED | aSYSTEM;
- /* read for group/world and not in default file_mode */
- else if (newmode & ~server->m.file_mode & S_IRUGO)
- info.attributes |= aSHARED;
- } else
-#endif
- newmode &= server->m.file_mode;
- }
- if (newmode & S_IWUGO)
- info.attributes &= ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
- else
- info.attributes |= (aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
-
-#ifdef CONFIG_NCPFS_NFS_NS
- if (ncp_is_nfs_extras(server, NCP_FINFO(inode)->volNumber)) {
- result = ncp_modify_nfs_info(server,
- NCP_FINFO(inode)->volNumber,
- NCP_FINFO(inode)->dirEntNum,
- attr->ia_mode, 0);
- if (result != 0)
- goto out;
- info.attributes &= ~(aSHARED | aSYSTEM);
- {
- /* mark partial success */
- struct iattr tmpattr;
-
- tmpattr.ia_valid = ATTR_MODE;
- tmpattr.ia_mode = attr->ia_mode;
-
- setattr_copy(inode, &tmpattr);
- mark_inode_dirty(inode);
- }
- }
-#endif
- }
-#endif
-
- /* Do SIZE before attributes, otherwise mtime together with size does not work...
- */
- if ((attr->ia_valid & ATTR_SIZE) != 0) {
- int written;
-
- ncp_dbg(1, "trying to change size to %llu\n", attr->ia_size);
-
- if ((result = ncp_make_open(inode, O_WRONLY)) < 0) {
- result = -EACCES;
- goto out;
- }
- ncp_write_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle,
- attr->ia_size, 0, "", &written);
-
- /* According to ndir, the changes only take effect after
- closing the file */
- ncp_inode_close(inode);
- result = ncp_make_closed(inode);
- if (result)
- goto out;
-
- if (attr->ia_size != i_size_read(inode)) {
- truncate_setsize(inode, attr->ia_size);
- mark_inode_dirty(inode);
- }
- }
- if ((attr->ia_valid & ATTR_CTIME) != 0) {
- info_mask |= (DM_CREATE_TIME | DM_CREATE_DATE);
- ncp_date_unix2dos(attr->ia_ctime.tv_sec,
- &info.creationTime, &info.creationDate);
- }
- if ((attr->ia_valid & ATTR_MTIME) != 0) {
- info_mask |= (DM_MODIFY_TIME | DM_MODIFY_DATE);
- ncp_date_unix2dos(attr->ia_mtime.tv_sec,
- &info.modifyTime, &info.modifyDate);
- }
- if ((attr->ia_valid & ATTR_ATIME) != 0) {
- __le16 dummy;
- info_mask |= (DM_LAST_ACCESS_DATE);
- ncp_date_unix2dos(attr->ia_atime.tv_sec,
- &dummy, &info.lastAccessDate);
- }
- if (info_mask != 0) {
- result = ncp_modify_file_or_subdir_dos_info(NCP_SERVER(inode),
- inode, info_mask, &info);
- if (result != 0) {
- if (info_mask == (DM_CREATE_TIME | DM_CREATE_DATE)) {
- /* NetWare seems not to allow this. I
- do not know why. So, just tell the
- user everything went fine. This is
- a terrible hack, but I do not know
- how to do this correctly. */
- result = 0;
- } else
- goto out;
- }
-#ifdef CONFIG_NCPFS_STRONG
- if ((!result) && (info_mask & DM_ATTRIBUTES))
- NCP_FINFO(inode)->nwattr = info.attributes;
-#endif
- }
- if (result)
- goto out;
-
- setattr_copy(inode, attr);
- mark_inode_dirty(inode);
-
-out:
- if (result > 0)
- result = -EACCES;
- return result;
-}
-
-static struct dentry *ncp_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_nodev(fs_type, flags, data, ncp_fill_super);
-}
-
-static struct file_system_type ncp_fs_type = {
- .owner = THIS_MODULE,
- .name = "ncpfs",
- .mount = ncp_mount,
- .kill_sb = kill_anon_super,
- .fs_flags = FS_BINARY_MOUNTDATA,
-};
-MODULE_ALIAS_FS("ncpfs");
-
-static int __init init_ncp_fs(void)
-{
- int err;
- ncp_dbg(1, "called\n");
-
- err = init_inodecache();
- if (err)
- goto out1;
- err = register_filesystem(&ncp_fs_type);
- if (err)
- goto out;
- return 0;
-out:
- destroy_inodecache();
-out1:
- return err;
-}
-
-static void __exit exit_ncp_fs(void)
-{
- ncp_dbg(1, "called\n");
- unregister_filesystem(&ncp_fs_type);
- destroy_inodecache();
-}
-
-module_init(init_ncp_fs)
-module_exit(exit_ncp_fs)
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/ncpfs/ioctl.c b/drivers/staging/ncpfs/ioctl.c
deleted file mode 100644
index d378b98cd7b6..000000000000
--- a/drivers/staging/ncpfs/ioctl.c
+++ /dev/null
@@ -1,923 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ioctl.c
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
- * Modified 1998, 1999 Wolfram Pienkoss for NLS
- *
- */
-
-#include <linux/capability.h>
-#include <linux/compat.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/ioctl.h>
-#include <linux/time.h>
-#include <linux/mm.h>
-#include <linux/mount.h>
-#include <linux/slab.h>
-#include <linux/highuid.h>
-#include <linux/vmalloc.h>
-#include <linux/sched.h>
-#include <linux/cred.h>
-
-#include <linux/uaccess.h>
-
-#include "ncp_fs.h"
-
-/* maximum limit for ncp_objectname_ioctl */
-#define NCP_OBJECT_NAME_MAX_LEN 4096
-/* maximum limit for ncp_privatedata_ioctl */
-#define NCP_PRIVATE_DATA_MAX_LEN 8192
-/* maximum negotiable packet size */
-#define NCP_PACKET_SIZE_INTERNAL 65536
-
-static int
-ncp_get_fs_info(struct ncp_server * server, struct inode *inode,
- struct ncp_fs_info __user *arg)
-{
- struct ncp_fs_info info;
-
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
-
- if (info.version != NCP_GET_FS_INFO_VERSION) {
- ncp_dbg(1, "info.version invalid: %d\n", info.version);
- return -EINVAL;
- }
- /* TODO: info.addr = server->m.serv_addr; */
- SET_UID(info.mounted_uid, from_kuid_munged(current_user_ns(), server->m.mounted_uid));
- info.connection = server->connection;
- info.buffer_size = server->buffer_size;
- info.volume_number = NCP_FINFO(inode)->volNumber;
- info.directory_id = NCP_FINFO(inode)->DosDirNum;
-
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-}
-
-static int
-ncp_get_fs_info_v2(struct ncp_server * server, struct inode *inode,
- struct ncp_fs_info_v2 __user * arg)
-{
- struct ncp_fs_info_v2 info2;
-
- if (copy_from_user(&info2, arg, sizeof(info2)))
- return -EFAULT;
-
- if (info2.version != NCP_GET_FS_INFO_VERSION_V2) {
- ncp_dbg(1, "info.version invalid: %d\n", info2.version);
- return -EINVAL;
- }
- info2.mounted_uid = from_kuid_munged(current_user_ns(), server->m.mounted_uid);
- info2.connection = server->connection;
- info2.buffer_size = server->buffer_size;
- info2.volume_number = NCP_FINFO(inode)->volNumber;
- info2.directory_id = NCP_FINFO(inode)->DosDirNum;
- info2.dummy1 = info2.dummy2 = info2.dummy3 = 0;
-
- if (copy_to_user(arg, &info2, sizeof(info2)))
- return -EFAULT;
- return 0;
-}
-
-#ifdef CONFIG_COMPAT
-struct compat_ncp_objectname_ioctl
-{
- s32 auth_type;
- u32 object_name_len;
- compat_caddr_t object_name; /* a userspace data, in most cases user name */
-};
-
-struct compat_ncp_fs_info_v2 {
- s32 version;
- u32 mounted_uid;
- u32 connection;
- u32 buffer_size;
-
- u32 volume_number;
- u32 directory_id;
-
- u32 dummy1;
- u32 dummy2;
- u32 dummy3;
-};
-
-struct compat_ncp_ioctl_request {
- u32 function;
- u32 size;
- compat_caddr_t data;
-};
-
-struct compat_ncp_privatedata_ioctl
-{
- u32 len;
- compat_caddr_t data; /* ~1000 for NDS */
-};
-
-#define NCP_IOC_GET_FS_INFO_V2_32 _IOWR('n', 4, struct compat_ncp_fs_info_v2)
-#define NCP_IOC_NCPREQUEST_32 _IOR('n', 1, struct compat_ncp_ioctl_request)
-#define NCP_IOC_GETOBJECTNAME_32 _IOWR('n', 9, struct compat_ncp_objectname_ioctl)
-#define NCP_IOC_SETOBJECTNAME_32 _IOR('n', 9, struct compat_ncp_objectname_ioctl)
-#define NCP_IOC_GETPRIVATEDATA_32 _IOWR('n', 10, struct compat_ncp_privatedata_ioctl)
-#define NCP_IOC_SETPRIVATEDATA_32 _IOR('n', 10, struct compat_ncp_privatedata_ioctl)
-
-static int
-ncp_get_compat_fs_info_v2(struct ncp_server * server, struct inode *inode,
- struct compat_ncp_fs_info_v2 __user * arg)
-{
- struct compat_ncp_fs_info_v2 info2;
-
- if (copy_from_user(&info2, arg, sizeof(info2)))
- return -EFAULT;
-
- if (info2.version != NCP_GET_FS_INFO_VERSION_V2) {
- ncp_dbg(1, "info.version invalid: %d\n", info2.version);
- return -EINVAL;
- }
- info2.mounted_uid = from_kuid_munged(current_user_ns(), server->m.mounted_uid);
- info2.connection = server->connection;
- info2.buffer_size = server->buffer_size;
- info2.volume_number = NCP_FINFO(inode)->volNumber;
- info2.directory_id = NCP_FINFO(inode)->DosDirNum;
- info2.dummy1 = info2.dummy2 = info2.dummy3 = 0;
-
- if (copy_to_user(arg, &info2, sizeof(info2)))
- return -EFAULT;
- return 0;
-}
-#endif
-
-#define NCP_IOC_GETMOUNTUID16 _IOW('n', 2, u16)
-#define NCP_IOC_GETMOUNTUID32 _IOW('n', 2, u32)
-#define NCP_IOC_GETMOUNTUID64 _IOW('n', 2, u64)
-
-#ifdef CONFIG_NCPFS_NLS
-/* Here we are select the iocharset and the codepage for NLS.
- * Thanks Petr Vandrovec for idea and many hints.
- */
-static int
-ncp_set_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
-{
- struct ncp_nls_ioctl user;
- struct nls_table *codepage;
- struct nls_table *iocharset;
- struct nls_table *oldset_io;
- struct nls_table *oldset_cp;
- int utf8;
- int err;
-
- if (copy_from_user(&user, arg, sizeof(user)))
- return -EFAULT;
-
- codepage = NULL;
- user.codepage[NCP_IOCSNAME_LEN] = 0;
- if (!user.codepage[0] || !strcmp(user.codepage, "default"))
- codepage = load_nls_default();
- else {
- codepage = load_nls(user.codepage);
- if (!codepage) {
- return -EBADRQC;
- }
- }
-
- iocharset = NULL;
- user.iocharset[NCP_IOCSNAME_LEN] = 0;
- if (!user.iocharset[0] || !strcmp(user.iocharset, "default")) {
- iocharset = load_nls_default();
- utf8 = 0;
- } else if (!strcmp(user.iocharset, "utf8")) {
- iocharset = load_nls_default();
- utf8 = 1;
- } else {
- iocharset = load_nls(user.iocharset);
- if (!iocharset) {
- unload_nls(codepage);
- return -EBADRQC;
- }
- utf8 = 0;
- }
-
- mutex_lock(&server->root_setup_lock);
- if (server->root_setuped) {
- oldset_cp = codepage;
- oldset_io = iocharset;
- err = -EBUSY;
- } else {
- if (utf8)
- NCP_SET_FLAG(server, NCP_FLAG_UTF8);
- else
- NCP_CLR_FLAG(server, NCP_FLAG_UTF8);
- oldset_cp = server->nls_vol;
- server->nls_vol = codepage;
- oldset_io = server->nls_io;
- server->nls_io = iocharset;
- err = 0;
- }
- mutex_unlock(&server->root_setup_lock);
- unload_nls(oldset_cp);
- unload_nls(oldset_io);
-
- return err;
-}
-
-static int
-ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
-{
- struct ncp_nls_ioctl user;
- int len;
-
- memset(&user, 0, sizeof(user));
- mutex_lock(&server->root_setup_lock);
- if (server->nls_vol && server->nls_vol->charset) {
- len = strlen(server->nls_vol->charset);
- if (len > NCP_IOCSNAME_LEN)
- len = NCP_IOCSNAME_LEN;
- strncpy(user.codepage, server->nls_vol->charset, len);
- user.codepage[len] = 0;
- }
-
- if (NCP_IS_FLAG(server, NCP_FLAG_UTF8))
- strcpy(user.iocharset, "utf8");
- else if (server->nls_io && server->nls_io->charset) {
- len = strlen(server->nls_io->charset);
- if (len > NCP_IOCSNAME_LEN)
- len = NCP_IOCSNAME_LEN;
- strncpy(user.iocharset, server->nls_io->charset, len);
- user.iocharset[len] = 0;
- }
- mutex_unlock(&server->root_setup_lock);
-
- if (copy_to_user(arg, &user, sizeof(user)))
- return -EFAULT;
- return 0;
-}
-#endif /* CONFIG_NCPFS_NLS */
-
-static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg)
-{
- struct ncp_server *server = NCP_SERVER(inode);
- int result;
- struct ncp_ioctl_request request;
- char* bouncebuffer;
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
-#ifdef CONFIG_COMPAT
- case NCP_IOC_NCPREQUEST_32:
-#endif
- case NCP_IOC_NCPREQUEST:
-#ifdef CONFIG_COMPAT
- if (cmd == NCP_IOC_NCPREQUEST_32) {
- struct compat_ncp_ioctl_request request32;
- if (copy_from_user(&request32, argp, sizeof(request32)))
- return -EFAULT;
- request.function = request32.function;
- request.size = request32.size;
- request.data = compat_ptr(request32.data);
- } else
-#endif
- if (copy_from_user(&request, argp, sizeof(request)))
- return -EFAULT;
-
- if ((request.function > 255)
- || (request.size >
- NCP_PACKET_SIZE - sizeof(struct ncp_request_header))) {
- return -EINVAL;
- }
- bouncebuffer = vmalloc(NCP_PACKET_SIZE_INTERNAL);
- if (!bouncebuffer)
- return -ENOMEM;
- if (copy_from_user(bouncebuffer, request.data, request.size)) {
- vfree(bouncebuffer);
- return -EFAULT;
- }
- ncp_lock_server(server);
-
- /* FIXME: We hack around in the server's structures
- here to be able to use ncp_request */
-
- server->has_subfunction = 0;
- server->current_size = request.size;
- memcpy(server->packet, bouncebuffer, request.size);
-
- result = ncp_request2(server, request.function,
- bouncebuffer, NCP_PACKET_SIZE_INTERNAL);
- if (result < 0)
- result = -EIO;
- else
- result = server->reply_size;
- ncp_unlock_server(server);
- ncp_dbg(1, "copy %d bytes\n", result);
- if (result >= 0)
- if (copy_to_user(request.data, bouncebuffer, result))
- result = -EFAULT;
- vfree(bouncebuffer);
- return result;
-
- case NCP_IOC_CONN_LOGGED_IN:
-
- if (!(server->m.int_flags & NCP_IMOUNT_LOGGEDIN_POSSIBLE))
- return -EINVAL;
- mutex_lock(&server->root_setup_lock);
- if (server->root_setuped)
- result = -EBUSY;
- else {
- result = ncp_conn_logged_in(inode->i_sb);
- if (result == 0)
- server->root_setuped = 1;
- }
- mutex_unlock(&server->root_setup_lock);
- return result;
-
- case NCP_IOC_GET_FS_INFO:
- return ncp_get_fs_info(server, inode, argp);
-
- case NCP_IOC_GET_FS_INFO_V2:
- return ncp_get_fs_info_v2(server, inode, argp);
-
-#ifdef CONFIG_COMPAT
- case NCP_IOC_GET_FS_INFO_V2_32:
- return ncp_get_compat_fs_info_v2(server, inode, argp);
-#endif
- /* we have too many combinations of CONFIG_COMPAT,
- * CONFIG_64BIT and CONFIG_UID16, so just handle
- * any of the possible ioctls */
- case NCP_IOC_GETMOUNTUID16:
- {
- u16 uid;
-
- SET_UID(uid, from_kuid_munged(current_user_ns(), server->m.mounted_uid));
- if (put_user(uid, (u16 __user *)argp))
- return -EFAULT;
- return 0;
- }
- case NCP_IOC_GETMOUNTUID32:
- {
- uid_t uid = from_kuid_munged(current_user_ns(), server->m.mounted_uid);
- if (put_user(uid, (u32 __user *)argp))
- return -EFAULT;
- return 0;
- }
- case NCP_IOC_GETMOUNTUID64:
- {
- uid_t uid = from_kuid_munged(current_user_ns(), server->m.mounted_uid);
- if (put_user(uid, (u64 __user *)argp))
- return -EFAULT;
- return 0;
- }
- case NCP_IOC_GETROOT:
- {
- struct ncp_setroot_ioctl sr;
-
- result = -EACCES;
- mutex_lock(&server->root_setup_lock);
- if (server->m.mounted_vol[0]) {
- struct dentry* dentry = inode->i_sb->s_root;
-
- if (dentry) {
- struct inode* s_inode = d_inode(dentry);
-
- if (s_inode) {
- sr.volNumber = NCP_FINFO(s_inode)->volNumber;
- sr.dirEntNum = NCP_FINFO(s_inode)->dirEntNum;
- sr.namespace = server->name_space[sr.volNumber];
- result = 0;
- } else
- ncp_dbg(1, "d_inode(s_root)==NULL\n");
- } else
- ncp_dbg(1, "s_root==NULL\n");
- } else {
- sr.volNumber = -1;
- sr.namespace = 0;
- sr.dirEntNum = 0;
- result = 0;
- }
- mutex_unlock(&server->root_setup_lock);
- if (!result && copy_to_user(argp, &sr, sizeof(sr)))
- result = -EFAULT;
- return result;
- }
-
- case NCP_IOC_SETROOT:
- {
- struct ncp_setroot_ioctl sr;
- __u32 vnum;
- __le32 de;
- __le32 dosde;
- struct dentry* dentry;
-
- if (copy_from_user(&sr, argp, sizeof(sr)))
- return -EFAULT;
- mutex_lock(&server->root_setup_lock);
- if (server->root_setuped)
- result = -EBUSY;
- else {
- if (sr.volNumber < 0) {
- server->m.mounted_vol[0] = 0;
- vnum = NCP_NUMBER_OF_VOLUMES;
- de = 0;
- dosde = 0;
- result = 0;
- } else if (sr.volNumber >= NCP_NUMBER_OF_VOLUMES) {
- result = -EINVAL;
- } else if (ncp_mount_subdir(server, sr.volNumber,
- sr.namespace, sr.dirEntNum,
- &vnum, &de, &dosde)) {
- result = -ENOENT;
- } else
- result = 0;
-
- if (result == 0) {
- dentry = inode->i_sb->s_root;
- if (dentry) {
- struct inode* s_inode = d_inode(dentry);
-
- if (s_inode) {
- NCP_FINFO(s_inode)->volNumber = vnum;
- NCP_FINFO(s_inode)->dirEntNum = de;
- NCP_FINFO(s_inode)->DosDirNum = dosde;
- server->root_setuped = 1;
- } else {
- ncp_dbg(1, "d_inode(s_root)==NULL\n");
- result = -EIO;
- }
- } else {
- ncp_dbg(1, "s_root==NULL\n");
- result = -EIO;
- }
- }
- }
- mutex_unlock(&server->root_setup_lock);
-
- return result;
- }
-
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
- case NCP_IOC_SIGN_INIT:
- {
- struct ncp_sign_init sign;
-
- if (argp)
- if (copy_from_user(&sign, argp, sizeof(sign)))
- return -EFAULT;
- ncp_lock_server(server);
- mutex_lock(&server->rcv.creq_mutex);
- if (argp) {
- if (server->sign_wanted) {
- memcpy(server->sign_root,sign.sign_root,8);
- memcpy(server->sign_last,sign.sign_last,16);
- server->sign_active = 1;
- }
- /* ignore when signatures not wanted */
- } else {
- server->sign_active = 0;
- }
- mutex_unlock(&server->rcv.creq_mutex);
- ncp_unlock_server(server);
- return 0;
- }
-
- case NCP_IOC_SIGN_WANTED:
- {
- int state;
-
- ncp_lock_server(server);
- state = server->sign_wanted;
- ncp_unlock_server(server);
- if (put_user(state, (int __user *)argp))
- return -EFAULT;
- return 0;
- }
-
- case NCP_IOC_SET_SIGN_WANTED:
- {
- int newstate;
-
- /* get only low 8 bits... */
- if (get_user(newstate, (unsigned char __user *)argp))
- return -EFAULT;
- result = 0;
- ncp_lock_server(server);
- if (server->sign_active) {
- /* cannot turn signatures OFF when active */
- if (!newstate)
- result = -EINVAL;
- } else {
- server->sign_wanted = newstate != 0;
- }
- ncp_unlock_server(server);
- return result;
- }
-
-#endif /* CONFIG_NCPFS_PACKET_SIGNING */
-
-#ifdef CONFIG_NCPFS_IOCTL_LOCKING
- case NCP_IOC_LOCKUNLOCK:
- {
- struct ncp_lock_ioctl rqdata;
-
- if (copy_from_user(&rqdata, argp, sizeof(rqdata)))
- return -EFAULT;
- if (rqdata.origin != 0)
- return -EINVAL;
- /* check for cmd */
- switch (rqdata.cmd) {
- case NCP_LOCK_EX:
- case NCP_LOCK_SH:
- if (rqdata.timeout < 0)
- return -EINVAL;
- if (rqdata.timeout == 0)
- rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT;
- else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT)
- rqdata.timeout = NCP_LOCK_MAX_TIMEOUT;
- break;
- case NCP_LOCK_LOG:
- rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; /* has no effect */
- case NCP_LOCK_CLEAR:
- break;
- default:
- return -EINVAL;
- }
- /* locking needs both read and write access */
- if ((result = ncp_make_open(inode, O_RDWR)) != 0)
- {
- return result;
- }
- result = -EISDIR;
- if (!S_ISREG(inode->i_mode))
- goto outrel;
- if (rqdata.cmd == NCP_LOCK_CLEAR)
- {
- result = ncp_ClearPhysicalRecord(NCP_SERVER(inode),
- NCP_FINFO(inode)->file_handle,
- rqdata.offset,
- rqdata.length);
- if (result > 0) result = 0; /* no such lock */
- }
- else
- {
- int lockcmd;
-
- switch (rqdata.cmd)
- {
- case NCP_LOCK_EX: lockcmd=1; break;
- case NCP_LOCK_SH: lockcmd=3; break;
- default: lockcmd=0; break;
- }
- result = ncp_LogPhysicalRecord(NCP_SERVER(inode),
- NCP_FINFO(inode)->file_handle,
- lockcmd,
- rqdata.offset,
- rqdata.length,
- rqdata.timeout);
- if (result > 0) result = -EAGAIN;
- }
-outrel:
- ncp_inode_close(inode);
- return result;
- }
-#endif /* CONFIG_NCPFS_IOCTL_LOCKING */
-
-#ifdef CONFIG_COMPAT
- case NCP_IOC_GETOBJECTNAME_32:
- {
- struct compat_ncp_objectname_ioctl user;
- size_t outl;
-
- if (copy_from_user(&user, argp, sizeof(user)))
- return -EFAULT;
- down_read(&server->auth_rwsem);
- user.auth_type = server->auth.auth_type;
- outl = user.object_name_len;
- user.object_name_len = server->auth.object_name_len;
- if (outl > user.object_name_len)
- outl = user.object_name_len;
- result = 0;
- if (outl) {
- if (copy_to_user(compat_ptr(user.object_name),
- server->auth.object_name,
- outl))
- result = -EFAULT;
- }
- up_read(&server->auth_rwsem);
- if (!result && copy_to_user(argp, &user, sizeof(user)))
- result = -EFAULT;
- return result;
- }
-#endif
-
- case NCP_IOC_GETOBJECTNAME:
- {
- struct ncp_objectname_ioctl user;
- size_t outl;
-
- if (copy_from_user(&user, argp, sizeof(user)))
- return -EFAULT;
- down_read(&server->auth_rwsem);
- user.auth_type = server->auth.auth_type;
- outl = user.object_name_len;
- user.object_name_len = server->auth.object_name_len;
- if (outl > user.object_name_len)
- outl = user.object_name_len;
- result = 0;
- if (outl) {
- if (copy_to_user(user.object_name,
- server->auth.object_name,
- outl))
- result = -EFAULT;
- }
- up_read(&server->auth_rwsem);
- if (!result && copy_to_user(argp, &user, sizeof(user)))
- result = -EFAULT;
- return result;
- }
-
-#ifdef CONFIG_COMPAT
- case NCP_IOC_SETOBJECTNAME_32:
-#endif
- case NCP_IOC_SETOBJECTNAME:
- {
- struct ncp_objectname_ioctl user;
- void* newname;
- void* oldname;
- size_t oldnamelen;
- void* oldprivate;
- size_t oldprivatelen;
-
-#ifdef CONFIG_COMPAT
- if (cmd == NCP_IOC_SETOBJECTNAME_32) {
- struct compat_ncp_objectname_ioctl user32;
- if (copy_from_user(&user32, argp, sizeof(user32)))
- return -EFAULT;
- user.auth_type = user32.auth_type;
- user.object_name_len = user32.object_name_len;
- user.object_name = compat_ptr(user32.object_name);
- } else
-#endif
- if (copy_from_user(&user, argp, sizeof(user)))
- return -EFAULT;
-
- if (user.object_name_len > NCP_OBJECT_NAME_MAX_LEN)
- return -ENOMEM;
- if (user.object_name_len) {
- newname = memdup_user(user.object_name,
- user.object_name_len);
- if (IS_ERR(newname))
- return PTR_ERR(newname);
- } else {
- newname = NULL;
- }
- down_write(&server->auth_rwsem);
- oldname = server->auth.object_name;
- oldnamelen = server->auth.object_name_len;
- oldprivate = server->priv.data;
- oldprivatelen = server->priv.len;
- server->auth.auth_type = user.auth_type;
- server->auth.object_name_len = user.object_name_len;
- server->auth.object_name = newname;
- server->priv.len = 0;
- server->priv.data = NULL;
- up_write(&server->auth_rwsem);
- kfree(oldprivate);
- kfree(oldname);
- return 0;
- }
-
-#ifdef CONFIG_COMPAT
- case NCP_IOC_GETPRIVATEDATA_32:
-#endif
- case NCP_IOC_GETPRIVATEDATA:
- {
- struct ncp_privatedata_ioctl user;
- size_t outl;
-
-#ifdef CONFIG_COMPAT
- if (cmd == NCP_IOC_GETPRIVATEDATA_32) {
- struct compat_ncp_privatedata_ioctl user32;
- if (copy_from_user(&user32, argp, sizeof(user32)))
- return -EFAULT;
- user.len = user32.len;
- user.data = compat_ptr(user32.data);
- } else
-#endif
- if (copy_from_user(&user, argp, sizeof(user)))
- return -EFAULT;
-
- down_read(&server->auth_rwsem);
- outl = user.len;
- user.len = server->priv.len;
- if (outl > user.len) outl = user.len;
- result = 0;
- if (outl) {
- if (copy_to_user(user.data,
- server->priv.data,
- outl))
- result = -EFAULT;
- }
- up_read(&server->auth_rwsem);
- if (result)
- return result;
-#ifdef CONFIG_COMPAT
- if (cmd == NCP_IOC_GETPRIVATEDATA_32) {
- struct compat_ncp_privatedata_ioctl user32;
- user32.len = user.len;
- user32.data = (unsigned long) user.data;
- if (copy_to_user(argp, &user32, sizeof(user32)))
- return -EFAULT;
- } else
-#endif
- if (copy_to_user(argp, &user, sizeof(user)))
- return -EFAULT;
-
- return 0;
- }
-
-#ifdef CONFIG_COMPAT
- case NCP_IOC_SETPRIVATEDATA_32:
-#endif
- case NCP_IOC_SETPRIVATEDATA:
- {
- struct ncp_privatedata_ioctl user;
- void* new;
- void* old;
- size_t oldlen;
-
-#ifdef CONFIG_COMPAT
- if (cmd == NCP_IOC_SETPRIVATEDATA_32) {
- struct compat_ncp_privatedata_ioctl user32;
- if (copy_from_user(&user32, argp, sizeof(user32)))
- return -EFAULT;
- user.len = user32.len;
- user.data = compat_ptr(user32.data);
- } else
-#endif
- if (copy_from_user(&user, argp, sizeof(user)))
- return -EFAULT;
-
- if (user.len > NCP_PRIVATE_DATA_MAX_LEN)
- return -ENOMEM;
- if (user.len) {
- new = memdup_user(user.data, user.len);
- if (IS_ERR(new))
- return PTR_ERR(new);
- } else {
- new = NULL;
- }
- down_write(&server->auth_rwsem);
- old = server->priv.data;
- oldlen = server->priv.len;
- server->priv.len = user.len;
- server->priv.data = new;
- up_write(&server->auth_rwsem);
- kfree(old);
- return 0;
- }
-
-#ifdef CONFIG_NCPFS_NLS
- case NCP_IOC_SETCHARSETS:
- return ncp_set_charsets(server, argp);
-
- case NCP_IOC_GETCHARSETS:
- return ncp_get_charsets(server, argp);
-
-#endif /* CONFIG_NCPFS_NLS */
-
- case NCP_IOC_SETDENTRYTTL:
- {
- u_int32_t user;
-
- if (copy_from_user(&user, argp, sizeof(user)))
- return -EFAULT;
- /* 20 secs at most... */
- if (user > 20000)
- return -EINVAL;
- user = (user * HZ) / 1000;
- atomic_set(&server->dentry_ttl, user);
- return 0;
- }
-
- case NCP_IOC_GETDENTRYTTL:
- {
- u_int32_t user = (atomic_read(&server->dentry_ttl) * 1000) / HZ;
- if (copy_to_user(argp, &user, sizeof(user)))
- return -EFAULT;
- return 0;
- }
-
- }
- return -EINVAL;
-}
-
-long ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct inode *inode = file_inode(filp);
- struct ncp_server *server = NCP_SERVER(inode);
- kuid_t uid = current_uid();
- int need_drop_write = 0;
- long ret;
-
- switch (cmd) {
- case NCP_IOC_SETCHARSETS:
- case NCP_IOC_CONN_LOGGED_IN:
- case NCP_IOC_SETROOT:
- if (!capable(CAP_SYS_ADMIN)) {
- ret = -EPERM;
- goto out;
- }
- break;
- }
- if (!uid_eq(server->m.mounted_uid, uid)) {
- switch (cmd) {
- /*
- * Only mount owner can issue these ioctls. Information
- * necessary to authenticate to other NDS servers are
- * stored here.
- */
- case NCP_IOC_GETOBJECTNAME:
- case NCP_IOC_SETOBJECTNAME:
- case NCP_IOC_GETPRIVATEDATA:
- case NCP_IOC_SETPRIVATEDATA:
-#ifdef CONFIG_COMPAT
- case NCP_IOC_GETOBJECTNAME_32:
- case NCP_IOC_SETOBJECTNAME_32:
- case NCP_IOC_GETPRIVATEDATA_32:
- case NCP_IOC_SETPRIVATEDATA_32:
-#endif
- ret = -EACCES;
- goto out;
- /*
- * These require write access on the inode if user id
- * does not match. Note that they do not write to the
- * file... But old code did mnt_want_write, so I keep
- * it as is. Of course not for mountpoint owner, as
- * that breaks read-only mounts altogether as ncpmount
- * needs working NCP_IOC_NCPREQUEST and
- * NCP_IOC_GET_FS_INFO. Some of these codes (setdentryttl,
- * signinit, setsignwanted) should be probably restricted
- * to owner only, or even more to CAP_SYS_ADMIN).
- */
- case NCP_IOC_GET_FS_INFO:
- case NCP_IOC_GET_FS_INFO_V2:
- case NCP_IOC_NCPREQUEST:
- case NCP_IOC_SETDENTRYTTL:
- case NCP_IOC_SIGN_INIT:
- case NCP_IOC_LOCKUNLOCK:
- case NCP_IOC_SET_SIGN_WANTED:
-#ifdef CONFIG_COMPAT
- case NCP_IOC_GET_FS_INFO_V2_32:
- case NCP_IOC_NCPREQUEST_32:
-#endif
- ret = mnt_want_write_file(filp);
- if (ret)
- goto out;
- need_drop_write = 1;
- ret = inode_permission(inode, MAY_WRITE);
- if (ret)
- goto outDropWrite;
- break;
- /*
- * Read access required.
- */
- case NCP_IOC_GETMOUNTUID16:
- case NCP_IOC_GETMOUNTUID32:
- case NCP_IOC_GETMOUNTUID64:
- case NCP_IOC_GETROOT:
- case NCP_IOC_SIGN_WANTED:
- ret = inode_permission(inode, MAY_READ);
- if (ret)
- goto out;
- break;
- /*
- * Anybody can read these.
- */
- case NCP_IOC_GETCHARSETS:
- case NCP_IOC_GETDENTRYTTL:
- default:
- /* Three codes below are protected by CAP_SYS_ADMIN above. */
- case NCP_IOC_SETCHARSETS:
- case NCP_IOC_CONN_LOGGED_IN:
- case NCP_IOC_SETROOT:
- break;
- }
- }
- ret = __ncp_ioctl(inode, cmd, arg);
-outDropWrite:
- if (need_drop_write)
- mnt_drop_write_file(filp);
-out:
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-long ncp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- arg = (unsigned long) compat_ptr(arg);
- ret = ncp_ioctl(file, cmd, arg);
- return ret;
-}
-#endif
diff --git a/drivers/staging/ncpfs/mmap.c b/drivers/staging/ncpfs/mmap.c
deleted file mode 100644
index a5c5cf2ff007..000000000000
--- a/drivers/staging/ncpfs/mmap.c
+++ /dev/null
@@ -1,125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * mmap.c
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
- *
- */
-
-#include <linux/stat.h>
-#include <linux/time.h>
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/mm.h>
-#include <linux/shm.h>
-#include <linux/errno.h>
-#include <linux/mman.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/memcontrol.h>
-
-#include <linux/uaccess.h>
-
-#include "ncp_fs.h"
-
-/*
- * Fill in the supplied page for mmap
- * XXX: how are we excluding truncate/invalidate here? Maybe need to lock
- * page?
- */
-static int ncp_file_mmap_fault(struct vm_fault *vmf)
-{
- struct inode *inode = file_inode(vmf->vma->vm_file);
- char *pg_addr;
- unsigned int already_read;
- unsigned int count;
- int bufsize;
- int pos; /* XXX: loff_t ? */
-
- /*
- * ncpfs has nothing against high pages as long
- * as recvmsg and memset works on it
- */
- vmf->page = alloc_page(GFP_HIGHUSER);
- if (!vmf->page)
- return VM_FAULT_OOM;
- pg_addr = kmap(vmf->page);
- pos = vmf->pgoff << PAGE_SHIFT;
-
- count = PAGE_SIZE;
- /* what we can read in one go */
- bufsize = NCP_SERVER(inode)->buffer_size;
-
- already_read = 0;
- if (ncp_make_open(inode, O_RDONLY) >= 0) {
- while (already_read < count) {
- int read_this_time;
- int to_read;
-
- to_read = bufsize - (pos % bufsize);
-
- to_read = min_t(unsigned int, to_read, count - already_read);
-
- if (ncp_read_kernel(NCP_SERVER(inode),
- NCP_FINFO(inode)->file_handle,
- pos, to_read,
- pg_addr + already_read,
- &read_this_time) != 0) {
- read_this_time = 0;
- }
- pos += read_this_time;
- already_read += read_this_time;
-
- if (read_this_time < to_read) {
- break;
- }
- }
- ncp_inode_close(inode);
-
- }
-
- if (already_read < PAGE_SIZE)
- memset(pg_addr + already_read, 0, PAGE_SIZE - already_read);
- flush_dcache_page(vmf->page);
- kunmap(vmf->page);
-
- /*
- * If I understand ncp_read_kernel() properly, the above always
- * fetches from the network, here the analogue of disk.
- * -- nyc
- */
- count_vm_event(PGMAJFAULT);
- count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
- return VM_FAULT_MAJOR;
-}
-
-static const struct vm_operations_struct ncp_file_mmap =
-{
- .fault = ncp_file_mmap_fault,
-};
-
-
-/* This is used for a general mmap of a ncp file */
-int ncp_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct inode *inode = file_inode(file);
-
- ncp_dbg(1, "called\n");
-
- if (!ncp_conn_valid(NCP_SERVER(inode)))
- return -EIO;
-
- /* only PAGE_COW or read-only supported now */
- if (vma->vm_flags & VM_SHARED)
- return -EINVAL;
- /* we do not support files bigger than 4GB... We eventually
- supports just 4GB... */
- if (vma_pages(vma) + vma->vm_pgoff
- > (1U << (32 - PAGE_SHIFT)))
- return -EFBIG;
-
- vma->vm_ops = &ncp_file_mmap;
- file_accessed(file);
- return 0;
-}
diff --git a/drivers/staging/ncpfs/ncp_fs.h b/drivers/staging/ncpfs/ncp_fs.h
deleted file mode 100644
index bdd262b6c198..000000000000
--- a/drivers/staging/ncpfs/ncp_fs.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/ncp_fs.h>
-#include "ncp_fs_i.h"
-#include "ncp_fs_sb.h"
-
-#undef NCPFS_PARANOIA
-#ifdef NCPFS_PARANOIA
-#define ncp_vdbg(fmt, ...) \
- pr_debug(fmt, ##__VA_ARGS__)
-#else
-#define ncp_vdbg(fmt, ...) \
-do { \
- if (0) \
- pr_debug(fmt, ##__VA_ARGS__); \
-} while (0)
-#endif
-
-#ifndef DEBUG_NCP
-#define DEBUG_NCP 0
-#endif
-
-#if DEBUG_NCP > 0 && !defined(DEBUG)
-#define DEBUG
-#endif
-
-#define ncp_dbg(level, fmt, ...) \
-do { \
- if (level <= DEBUG_NCP) \
- pr_debug(fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define NCP_MAX_RPC_TIMEOUT (6*HZ)
-
-
-struct ncp_entry_info {
- struct nw_info_struct i;
- ino_t ino;
- int opened;
- int access;
- unsigned int volume;
- __u8 file_handle[6];
-};
-
-static inline struct ncp_server *NCP_SBP(const struct super_block *sb)
-{
- return sb->s_fs_info;
-}
-
-#define NCP_SERVER(inode) NCP_SBP((inode)->i_sb)
-static inline struct ncp_inode_info *NCP_FINFO(const struct inode *inode)
-{
- return container_of(inode, struct ncp_inode_info, vfs_inode);
-}
-
-/* linux/fs/ncpfs/inode.c */
-int ncp_notify_change(struct dentry *, struct iattr *);
-struct inode *ncp_iget(struct super_block *, struct ncp_entry_info *);
-void ncp_update_inode(struct inode *, struct ncp_entry_info *);
-void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
-
-/* linux/fs/ncpfs/dir.c */
-extern const struct inode_operations ncp_dir_inode_operations;
-extern const struct file_operations ncp_dir_operations;
-extern const struct dentry_operations ncp_dentry_operations;
-int ncp_conn_logged_in(struct super_block *);
-int ncp_date_dos2unix(__le16 time, __le16 date);
-void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
-
-/* linux/fs/ncpfs/ioctl.c */
-long ncp_ioctl(struct file *, unsigned int, unsigned long);
-long ncp_compat_ioctl(struct file *, unsigned int, unsigned long);
-
-/* linux/fs/ncpfs/sock.c */
-int ncp_request2(struct ncp_server *server, int function,
- void* reply, int max_reply_size);
-static inline int ncp_request(struct ncp_server *server, int function) {
- return ncp_request2(server, function, server->packet, server->packet_size);
-}
-int ncp_connect(struct ncp_server *server);
-int ncp_disconnect(struct ncp_server *server);
-void ncp_lock_server(struct ncp_server *server);
-void ncp_unlock_server(struct ncp_server *server);
-
-/* linux/fs/ncpfs/symlink.c */
-#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
-extern const struct address_space_operations ncp_symlink_aops;
-int ncp_symlink(struct inode*, struct dentry*, const char*);
-#endif
-
-/* linux/fs/ncpfs/file.c */
-extern const struct inode_operations ncp_file_inode_operations;
-extern const struct file_operations ncp_file_operations;
-int ncp_make_open(struct inode *, int);
-
-/* linux/fs/ncpfs/mmap.c */
-int ncp_mmap(struct file *, struct vm_area_struct *);
-
-/* linux/fs/ncpfs/ncplib_kernel.c */
-int ncp_make_closed(struct inode *);
-
-#include "ncplib_kernel.h"
diff --git a/drivers/staging/ncpfs/ncp_fs_i.h b/drivers/staging/ncpfs/ncp_fs_i.h
deleted file mode 100644
index 3432bafb53a5..000000000000
--- a/drivers/staging/ncpfs/ncp_fs_i.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ncp_fs_i.h
- *
- * Copyright (C) 1995 Volker Lendecke
- *
- */
-
-#ifndef _LINUX_NCP_FS_I
-#define _LINUX_NCP_FS_I
-
-/*
- * This is the ncpfs part of the inode structure. This must contain
- * all the information we need to work with an inode after creation.
- */
-struct ncp_inode_info {
- __le32 dirEntNum;
- __le32 DosDirNum;
- __u8 volNumber;
- __le32 nwattr;
- struct mutex open_mutex;
- atomic_t opened;
- int access;
- int flags;
-#define NCPI_KLUDGE_SYMLINK 0x0001
-#define NCPI_DIR_CACHE 0x0002
- __u8 file_handle[6];
- struct inode vfs_inode;
-};
-
-#endif /* _LINUX_NCP_FS_I */
diff --git a/drivers/staging/ncpfs/ncp_fs_sb.h b/drivers/staging/ncpfs/ncp_fs_sb.h
deleted file mode 100644
index f06cde4adf71..000000000000
--- a/drivers/staging/ncpfs/ncp_fs_sb.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ncp_fs_sb.h
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- *
- */
-
-#ifndef _NCP_FS_SB
-#define _NCP_FS_SB
-
-#include <linux/types.h>
-#include <linux/ncp_mount.h>
-#include <linux/net.h>
-#include <linux/mutex.h>
-#include <linux/backing-dev.h>
-#include <linux/workqueue.h>
-
-#define NCP_DEFAULT_OPTIONS 0 /* 2 for packet signatures */
-
-struct sock;
-
-struct ncp_mount_data_kernel {
- unsigned long flags; /* NCP_MOUNT_* flags */
- unsigned int int_flags; /* internal flags */
-#define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001
- kuid_t mounted_uid; /* Who may umount() this filesystem? */
- struct pid *wdog_pid; /* Who cares for our watchdog packets? */
- unsigned int ncp_fd; /* The socket to the ncp port */
- unsigned int time_out; /* How long should I wait after
- sending a NCP request? */
- unsigned int retry_count; /* And how often should I retry? */
- unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
- kuid_t uid;
- kgid_t gid;
- umode_t file_mode;
- umode_t dir_mode;
- int info_fd;
-};
-
-struct ncp_server {
- struct rcu_head rcu;
- struct ncp_mount_data_kernel m; /* Nearly all of the mount data is of
- interest for us later, so we store
- it completely. */
-
- __u8 name_space[NCP_NUMBER_OF_VOLUMES + 2];
-
- struct socket *ncp_sock;/* ncp socket */
- struct socket *info_sock;
-
- u8 sequence;
- u8 task;
- u16 connection; /* Remote connection number */
-
- u8 completion; /* Status message from server */
- u8 conn_status; /* Bit 4 = 1 ==> Server going down, no
- requests allowed anymore.
- Bit 0 = 1 ==> Server is down. */
-
- int buffer_size; /* Negotiated bufsize */
-
- int reply_size; /* Size of last reply */
-
- int packet_size;
- unsigned char *packet; /* Here we prepare requests and
- receive replies */
- unsigned char *txbuf; /* Storage for current request */
- unsigned char *rxbuf; /* Storage for reply to current request */
-
- int lock; /* To prevent mismatch in protocols. */
- struct mutex mutex;
-
- int current_size; /* for packet preparation */
- int has_subfunction;
- int ncp_reply_size;
-
- int root_setuped;
- struct mutex root_setup_lock;
-
- /* info for packet signing */
- int sign_wanted; /* 1=Server needs signed packets */
- int sign_active; /* 0=don't do signing, 1=do */
- char sign_root[8]; /* generated from password and encr. key */
- char sign_last[16];
-
- /* Authentication info: NDS or BINDERY, username */
- struct {
- int auth_type;
- size_t object_name_len;
- void* object_name;
- int object_type;
- } auth;
- /* Password info */
- struct {
- size_t len;
- void* data;
- } priv;
- struct rw_semaphore auth_rwsem;
-
- /* nls info: codepage for volume and charset for I/O */
- struct nls_table *nls_vol;
- struct nls_table *nls_io;
-
- /* maximum age in jiffies */
- atomic_t dentry_ttl;
-
- /* miscellaneous */
- unsigned int flags;
-
- spinlock_t requests_lock; /* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */
-
- void (*data_ready)(struct sock* sk);
- void (*error_report)(struct sock* sk);
- void (*write_space)(struct sock* sk); /* STREAM mode only */
- struct {
- struct work_struct tq; /* STREAM/DGRAM: data/error ready */
- struct ncp_request_reply* creq; /* STREAM/DGRAM: awaiting reply from this request */
- struct mutex creq_mutex; /* DGRAM only: lock accesses to rcv.creq */
-
- unsigned int state; /* STREAM only: receiver state */
- struct {
- __u32 magic __packed;
- __u32 len __packed;
- __u16 type __packed;
- __u16 p1 __packed;
- __u16 p2 __packed;
- __u16 p3 __packed;
- __u16 type2 __packed;
- } buf; /* STREAM only: temporary buffer */
- unsigned char* ptr; /* STREAM only: pointer to data */
- size_t len; /* STREAM only: length of data to receive */
- } rcv;
- struct {
- struct list_head requests; /* STREAM only: queued requests */
- struct work_struct tq; /* STREAM only: transmitter ready */
- struct ncp_request_reply* creq; /* STREAM only: currently transmitted entry */
- } tx;
- struct timer_list timeout_tm; /* DGRAM only: timeout timer */
- struct work_struct timeout_tq; /* DGRAM only: associated queue, we run timers from process context */
- int timeout_last; /* DGRAM only: current timeout length */
- int timeout_retries; /* DGRAM only: retries left */
- struct {
- size_t len;
- __u8 data[128];
- } unexpected_packet;
-};
-
-extern void ncp_tcp_rcv_proc(struct work_struct *work);
-extern void ncp_tcp_tx_proc(struct work_struct *work);
-extern void ncpdgram_rcv_proc(struct work_struct *work);
-extern void ncpdgram_timeout_proc(struct work_struct *work);
-extern void ncpdgram_timeout_call(struct timer_list *t);
-extern void ncp_tcp_data_ready(struct sock* sk);
-extern void ncp_tcp_write_space(struct sock* sk);
-extern void ncp_tcp_error_report(struct sock* sk);
-
-#define NCP_FLAG_UTF8 1
-
-#define NCP_CLR_FLAG(server, flag) ((server)->flags &= ~(flag))
-#define NCP_SET_FLAG(server, flag) ((server)->flags |= (flag))
-#define NCP_IS_FLAG(server, flag) ((server)->flags & (flag))
-
-static inline int ncp_conn_valid(struct ncp_server *server)
-{
- return ((server->conn_status & 0x11) == 0);
-}
-
-static inline void ncp_invalidate_conn(struct ncp_server *server)
-{
- server->conn_status |= 0x01;
-}
-
-#endif
diff --git a/drivers/staging/ncpfs/ncplib_kernel.c b/drivers/staging/ncpfs/ncplib_kernel.c
deleted file mode 100644
index 3e047eb4cc7c..000000000000
--- a/drivers/staging/ncpfs/ncplib_kernel.c
+++ /dev/null
@@ -1,1326 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ncplib_kernel.c
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- * Modified for big endian by J.F. Chadima and David S. Miller
- * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
- * Modified 1999 Wolfram Pienkoss for NLS
- * Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "ncp_fs.h"
-
-static inline void assert_server_locked(struct ncp_server *server)
-{
- if (server->lock == 0) {
- ncp_dbg(1, "server not locked!\n");
- }
-}
-
-static void ncp_add_byte(struct ncp_server *server, __u8 x)
-{
- assert_server_locked(server);
- *(__u8 *) (&(server->packet[server->current_size])) = x;
- server->current_size += 1;
- return;
-}
-
-static void ncp_add_word(struct ncp_server *server, __le16 x)
-{
- assert_server_locked(server);
- put_unaligned(x, (__le16 *) (&(server->packet[server->current_size])));
- server->current_size += 2;
- return;
-}
-
-static void ncp_add_be16(struct ncp_server *server, __u16 x)
-{
- assert_server_locked(server);
- put_unaligned(cpu_to_be16(x), (__be16 *) (&(server->packet[server->current_size])));
- server->current_size += 2;
-}
-
-static void ncp_add_dword(struct ncp_server *server, __le32 x)
-{
- assert_server_locked(server);
- put_unaligned(x, (__le32 *) (&(server->packet[server->current_size])));
- server->current_size += 4;
- return;
-}
-
-static void ncp_add_be32(struct ncp_server *server, __u32 x)
-{
- assert_server_locked(server);
- put_unaligned(cpu_to_be32(x), (__be32 *)(&(server->packet[server->current_size])));
- server->current_size += 4;
-}
-
-static inline void ncp_add_dword_lh(struct ncp_server *server, __u32 x) {
- ncp_add_dword(server, cpu_to_le32(x));
-}
-
-static void ncp_add_mem(struct ncp_server *server, const void *source, int size)
-{
- assert_server_locked(server);
- memcpy(&(server->packet[server->current_size]), source, size);
- server->current_size += size;
- return;
-}
-
-static void ncp_add_pstring(struct ncp_server *server, const char *s)
-{
- int len = strlen(s);
- assert_server_locked(server);
- if (len > 255) {
- ncp_dbg(1, "string too long: %s\n", s);
- len = 255;
- }
- ncp_add_byte(server, len);
- ncp_add_mem(server, s, len);
- return;
-}
-
-static inline void ncp_init_request(struct ncp_server *server)
-{
- ncp_lock_server(server);
-
- server->current_size = sizeof(struct ncp_request_header);
- server->has_subfunction = 0;
-}
-
-static inline void ncp_init_request_s(struct ncp_server *server, int subfunction)
-{
- ncp_lock_server(server);
-
- server->current_size = sizeof(struct ncp_request_header) + 2;
- ncp_add_byte(server, subfunction);
-
- server->has_subfunction = 1;
-}
-
-static inline char *
-ncp_reply_data(struct ncp_server *server, int offset)
-{
- return &(server->packet[sizeof(struct ncp_reply_header) + offset]);
-}
-
-static inline u8 BVAL(const void *data)
-{
- return *(const u8 *)data;
-}
-
-static u8 ncp_reply_byte(struct ncp_server *server, int offset)
-{
- return *(const u8 *)ncp_reply_data(server, offset);
-}
-
-static inline u16 WVAL_LH(const void *data)
-{
- return get_unaligned_le16(data);
-}
-
-static u16
-ncp_reply_le16(struct ncp_server *server, int offset)
-{
- return get_unaligned_le16(ncp_reply_data(server, offset));
-}
-
-static u16
-ncp_reply_be16(struct ncp_server *server, int offset)
-{
- return get_unaligned_be16(ncp_reply_data(server, offset));
-}
-
-static inline u32 DVAL_LH(const void *data)
-{
- return get_unaligned_le32(data);
-}
-
-static __le32
-ncp_reply_dword(struct ncp_server *server, int offset)
-{
- return get_unaligned((__le32 *)ncp_reply_data(server, offset));
-}
-
-static inline __u32 ncp_reply_dword_lh(struct ncp_server* server, int offset) {
- return le32_to_cpu(ncp_reply_dword(server, offset));
-}
-
-int
-ncp_negotiate_buffersize(struct ncp_server *server, int size, int *target)
-{
- int result;
-
- ncp_init_request(server);
- ncp_add_be16(server, size);
-
- if ((result = ncp_request(server, 33)) != 0) {
- ncp_unlock_server(server);
- return result;
- }
- *target = min_t(unsigned int, ncp_reply_be16(server, 0), size);
-
- ncp_unlock_server(server);
- return 0;
-}
-
-
-/* options:
- * bit 0 ipx checksum
- * bit 1 packet signing
- */
-int
-ncp_negotiate_size_and_options(struct ncp_server *server,
- int size, int options, int *ret_size, int *ret_options) {
- int result;
-
- /* there is minimum */
- if (size < NCP_BLOCK_SIZE) size = NCP_BLOCK_SIZE;
-
- ncp_init_request(server);
- ncp_add_be16(server, size);
- ncp_add_byte(server, options);
-
- if ((result = ncp_request(server, 0x61)) != 0)
- {
- ncp_unlock_server(server);
- return result;
- }
-
- /* NCP over UDP returns 0 (!!!) */
- result = ncp_reply_be16(server, 0);
- if (result >= NCP_BLOCK_SIZE)
- size = min(result, size);
- *ret_size = size;
- *ret_options = ncp_reply_byte(server, 4);
-
- ncp_unlock_server(server);
- return 0;
-}
-
-int ncp_get_volume_info_with_number(struct ncp_server* server,
- int n, struct ncp_volume_info* target) {
- int result;
- int len;
-
- ncp_init_request_s(server, 44);
- ncp_add_byte(server, n);
-
- if ((result = ncp_request(server, 22)) != 0) {
- goto out;
- }
- target->total_blocks = ncp_reply_dword_lh(server, 0);
- target->free_blocks = ncp_reply_dword_lh(server, 4);
- target->purgeable_blocks = ncp_reply_dword_lh(server, 8);
- target->not_yet_purgeable_blocks = ncp_reply_dword_lh(server, 12);
- target->total_dir_entries = ncp_reply_dword_lh(server, 16);
- target->available_dir_entries = ncp_reply_dword_lh(server, 20);
- target->sectors_per_block = ncp_reply_byte(server, 28);
-
- memset(&(target->volume_name), 0, sizeof(target->volume_name));
-
- result = -EIO;
- len = ncp_reply_byte(server, 29);
- if (len > NCP_VOLNAME_LEN) {
- ncp_dbg(1, "volume name too long: %d\n", len);
- goto out;
- }
- memcpy(&(target->volume_name), ncp_reply_data(server, 30), len);
- result = 0;
-out:
- ncp_unlock_server(server);
- return result;
-}
-
-int ncp_get_directory_info(struct ncp_server* server, __u8 n,
- struct ncp_volume_info* target) {
- int result;
- int len;
-
- ncp_init_request_s(server, 45);
- ncp_add_byte(server, n);
-
- if ((result = ncp_request(server, 22)) != 0) {
- goto out;
- }
- target->total_blocks = ncp_reply_dword_lh(server, 0);
- target->free_blocks = ncp_reply_dword_lh(server, 4);
- target->purgeable_blocks = 0;
- target->not_yet_purgeable_blocks = 0;
- target->total_dir_entries = ncp_reply_dword_lh(server, 8);
- target->available_dir_entries = ncp_reply_dword_lh(server, 12);
- target->sectors_per_block = ncp_reply_byte(server, 20);
-
- memset(&(target->volume_name), 0, sizeof(target->volume_name));
-
- result = -EIO;
- len = ncp_reply_byte(server, 21);
- if (len > NCP_VOLNAME_LEN) {
- ncp_dbg(1, "volume name too long: %d\n", len);
- goto out;
- }
- memcpy(&(target->volume_name), ncp_reply_data(server, 22), len);
- result = 0;
-out:
- ncp_unlock_server(server);
- return result;
-}
-
-int
-ncp_close_file(struct ncp_server *server, const char *file_id)
-{
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 0);
- ncp_add_mem(server, file_id, 6);
-
- result = ncp_request(server, 66);
- ncp_unlock_server(server);
- return result;
-}
-
-int
-ncp_make_closed(struct inode *inode)
-{
- int err;
-
- err = 0;
- mutex_lock(&NCP_FINFO(inode)->open_mutex);
- if (atomic_read(&NCP_FINFO(inode)->opened) == 1) {
- atomic_set(&NCP_FINFO(inode)->opened, 0);
- err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle);
-
- if (!err)
- ncp_vdbg("volnum=%d, dirent=%u, error=%d\n",
- NCP_FINFO(inode)->volNumber,
- NCP_FINFO(inode)->dirEntNum, err);
- }
- mutex_unlock(&NCP_FINFO(inode)->open_mutex);
- return err;
-}
-
-static void ncp_add_handle_path(struct ncp_server *server, __u8 vol_num,
- __le32 dir_base, int have_dir_base,
- const char *path)
-{
- ncp_add_byte(server, vol_num);
- ncp_add_dword(server, dir_base);
- if (have_dir_base != 0) {
- ncp_add_byte(server, 1); /* dir_base */
- } else {
- ncp_add_byte(server, 0xff); /* no handle */
- }
- if (path != NULL) {
- ncp_add_byte(server, 1); /* 1 component */
- ncp_add_pstring(server, path);
- } else {
- ncp_add_byte(server, 0);
- }
-}
-
-int ncp_dirhandle_alloc(struct ncp_server* server, __u8 volnum, __le32 dirent,
- __u8* dirhandle) {
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 12); /* subfunction */
- ncp_add_byte(server, NW_NS_DOS);
- ncp_add_byte(server, 0);
- ncp_add_word(server, 0);
- ncp_add_handle_path(server, volnum, dirent, 1, NULL);
- if ((result = ncp_request(server, 87)) == 0) {
- *dirhandle = ncp_reply_byte(server, 0);
- }
- ncp_unlock_server(server);
- return result;
-}
-
-int ncp_dirhandle_free(struct ncp_server* server, __u8 dirhandle) {
- int result;
-
- ncp_init_request_s(server, 20);
- ncp_add_byte(server, dirhandle);
- result = ncp_request(server, 22);
- ncp_unlock_server(server);
- return result;
-}
-
-void ncp_extract_file_info(const void *structure, struct nw_info_struct *target)
-{
- const __u8 *name_len;
- const int info_struct_size = offsetof(struct nw_info_struct, nameLen);
-
- memcpy(target, structure, info_struct_size);
- name_len = structure + info_struct_size;
- target->nameLen = *name_len;
- memcpy(target->entryName, name_len + 1, *name_len);
- target->entryName[*name_len] = '\0';
- target->volNumber = le32_to_cpu(target->volNumber);
- return;
-}
-
-#ifdef CONFIG_NCPFS_NFS_NS
-static inline void ncp_extract_nfs_info(const unsigned char *structure,
- struct nw_nfs_info *target)
-{
- target->mode = DVAL_LH(structure);
- target->rdev = DVAL_LH(structure + 8);
-}
-#endif
-
-int ncp_obtain_nfs_info(struct ncp_server *server,
- struct nw_info_struct *target)
-
-{
- int result = 0;
-#ifdef CONFIG_NCPFS_NFS_NS
- __u32 volnum = target->volNumber;
-
- if (ncp_is_nfs_extras(server, volnum)) {
- ncp_init_request(server);
- ncp_add_byte(server, 19); /* subfunction */
- ncp_add_byte(server, server->name_space[volnum]);
- ncp_add_byte(server, NW_NS_NFS);
- ncp_add_byte(server, 0);
- ncp_add_byte(server, volnum);
- ncp_add_dword(server, target->dirEntNum);
- /* We must retrieve both nlinks and rdev, otherwise some server versions
- report zeroes instead of valid data */
- ncp_add_dword_lh(server, NSIBM_NFS_MODE | NSIBM_NFS_NLINKS | NSIBM_NFS_RDEV);
-
- if ((result = ncp_request(server, 87)) == 0) {
- ncp_extract_nfs_info(ncp_reply_data(server, 0), &target->nfs);
- ncp_dbg(1, "(%s) mode=0%o, rdev=0x%x\n",
- target->entryName, target->nfs.mode,
- target->nfs.rdev);
- } else {
- target->nfs.mode = 0;
- target->nfs.rdev = 0;
- }
- ncp_unlock_server(server);
-
- } else
-#endif
- {
- target->nfs.mode = 0;
- target->nfs.rdev = 0;
- }
- return result;
-}
-
-/*
- * Returns information for a (one-component) name relative to
- * the specified directory.
- */
-int ncp_obtain_info(struct ncp_server *server, struct inode *dir, const char *path,
- struct nw_info_struct *target)
-{
- __u8 volnum = NCP_FINFO(dir)->volNumber;
- __le32 dirent = NCP_FINFO(dir)->dirEntNum;
- int result;
-
- if (target == NULL) {
- pr_err("%s: invalid call\n", __func__);
- return -EINVAL;
- }
- ncp_init_request(server);
- ncp_add_byte(server, 6); /* subfunction */
- ncp_add_byte(server, server->name_space[volnum]);
- ncp_add_byte(server, server->name_space[volnum]); /* N.B. twice ?? */
- ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */
- ncp_add_dword(server, RIM_ALL);
- ncp_add_handle_path(server, volnum, dirent, 1, path);
-
- if ((result = ncp_request(server, 87)) != 0)
- goto out;
- ncp_extract_file_info(ncp_reply_data(server, 0), target);
- ncp_unlock_server(server);
-
- result = ncp_obtain_nfs_info(server, target);
- return result;
-
-out:
- ncp_unlock_server(server);
- return result;
-}
-
-#ifdef CONFIG_NCPFS_NFS_NS
-static int
-ncp_obtain_DOS_dir_base(struct ncp_server *server,
- __u8 ns, __u8 volnum, __le32 dirent,
- const char *path, /* At most 1 component */
- __le32 *DOS_dir_base)
-{
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 6); /* subfunction */
- ncp_add_byte(server, ns);
- ncp_add_byte(server, ns);
- ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */
- ncp_add_dword(server, RIM_DIRECTORY);
- ncp_add_handle_path(server, volnum, dirent, 1, path);
-
- if ((result = ncp_request(server, 87)) == 0)
- {
- if (DOS_dir_base) *DOS_dir_base=ncp_reply_dword(server, 0x34);
- }
- ncp_unlock_server(server);
- return result;
-}
-#endif /* CONFIG_NCPFS_NFS_NS */
-
-static inline int
-ncp_get_known_namespace(struct ncp_server *server, __u8 volume)
-{
-#if defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS)
- int result;
- __u8 *namespace;
- __u16 no_namespaces;
-
- ncp_init_request(server);
- ncp_add_byte(server, 24); /* Subfunction: Get Name Spaces Loaded */
- ncp_add_word(server, 0);
- ncp_add_byte(server, volume);
-
- if ((result = ncp_request(server, 87)) != 0) {
- ncp_unlock_server(server);
- return NW_NS_DOS; /* not result ?? */
- }
-
- result = NW_NS_DOS;
- no_namespaces = ncp_reply_le16(server, 0);
- namespace = ncp_reply_data(server, 2);
-
- while (no_namespaces > 0) {
- ncp_dbg(1, "found %d on %d\n", *namespace, volume);
-
-#ifdef CONFIG_NCPFS_NFS_NS
- if ((*namespace == NW_NS_NFS) && !(server->m.flags&NCP_MOUNT_NO_NFS))
- {
- result = NW_NS_NFS;
- break;
- }
-#endif /* CONFIG_NCPFS_NFS_NS */
-#ifdef CONFIG_NCPFS_OS2_NS
- if ((*namespace == NW_NS_OS2) && !(server->m.flags&NCP_MOUNT_NO_OS2))
- {
- result = NW_NS_OS2;
- }
-#endif /* CONFIG_NCPFS_OS2_NS */
- namespace += 1;
- no_namespaces -= 1;
- }
- ncp_unlock_server(server);
- return result;
-#else /* neither OS2 nor NFS - only DOS */
- return NW_NS_DOS;
-#endif /* defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS) */
-}
-
-int
-ncp_update_known_namespace(struct ncp_server *server, __u8 volume, int *ret_ns)
-{
- int ns = ncp_get_known_namespace(server, volume);
-
- if (ret_ns)
- *ret_ns = ns;
-
- ncp_dbg(1, "namespace[%d] = %d\n", volume, server->name_space[volume]);
-
- if (server->name_space[volume] == ns)
- return 0;
- server->name_space[volume] = ns;
- return 1;
-}
-
-static int
-ncp_ObtainSpecificDirBase(struct ncp_server *server,
- __u8 nsSrc, __u8 nsDst, __u8 vol_num, __le32 dir_base,
- const char *path, /* At most 1 component */
- __le32 *dirEntNum, __le32 *DosDirNum)
-{
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 6); /* subfunction */
- ncp_add_byte(server, nsSrc);
- ncp_add_byte(server, nsDst);
- ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */
- ncp_add_dword(server, RIM_ALL);
- ncp_add_handle_path(server, vol_num, dir_base, 1, path);
-
- if ((result = ncp_request(server, 87)) != 0)
- {
- ncp_unlock_server(server);
- return result;
- }
-
- if (dirEntNum)
- *dirEntNum = ncp_reply_dword(server, 0x30);
- if (DosDirNum)
- *DosDirNum = ncp_reply_dword(server, 0x34);
- ncp_unlock_server(server);
- return 0;
-}
-
-int
-ncp_mount_subdir(struct ncp_server *server,
- __u8 volNumber, __u8 srcNS, __le32 dirEntNum,
- __u32* volume, __le32* newDirEnt, __le32* newDosEnt)
-{
- int dstNS;
- int result;
-
- ncp_update_known_namespace(server, volNumber, &dstNS);
- if ((result = ncp_ObtainSpecificDirBase(server, srcNS, dstNS, volNumber,
- dirEntNum, NULL, newDirEnt, newDosEnt)) != 0)
- {
- return result;
- }
- *volume = volNumber;
- server->m.mounted_vol[1] = 0;
- server->m.mounted_vol[0] = 'X';
- return 0;
-}
-
-int
-ncp_get_volume_root(struct ncp_server *server,
- const char *volname, __u32* volume, __le32* dirent, __le32* dosdirent)
-{
- int result;
-
- ncp_dbg(1, "looking up vol %s\n", volname);
-
- ncp_init_request(server);
- ncp_add_byte(server, 22); /* Subfunction: Generate dir handle */
- ncp_add_byte(server, 0); /* DOS namespace */
- ncp_add_byte(server, 0); /* reserved */
- ncp_add_byte(server, 0); /* reserved */
- ncp_add_byte(server, 0); /* reserved */
-
- ncp_add_byte(server, 0); /* faked volume number */
- ncp_add_dword(server, 0); /* faked dir_base */
- ncp_add_byte(server, 0xff); /* Don't have a dir_base */
- ncp_add_byte(server, 1); /* 1 path component */
- ncp_add_pstring(server, volname);
-
- if ((result = ncp_request(server, 87)) != 0) {
- ncp_unlock_server(server);
- return result;
- }
- *dirent = *dosdirent = ncp_reply_dword(server, 4);
- *volume = ncp_reply_byte(server, 8);
- ncp_unlock_server(server);
- return 0;
-}
-
-int
-ncp_lookup_volume(struct ncp_server *server,
- const char *volname, struct nw_info_struct *target)
-{
- int result;
-
- memset(target, 0, sizeof(*target));
- result = ncp_get_volume_root(server, volname,
- &target->volNumber, &target->dirEntNum, &target->DosDirNum);
- if (result) {
- return result;
- }
- ncp_update_known_namespace(server, target->volNumber, NULL);
- target->nameLen = strlen(volname);
- memcpy(target->entryName, volname, target->nameLen+1);
- target->attributes = aDIR;
- /* set dates to Jan 1, 1986 00:00 */
- target->creationTime = target->modifyTime = cpu_to_le16(0x0000);
- target->creationDate = target->modifyDate = target->lastAccessDate = cpu_to_le16(0x0C21);
- target->nfs.mode = 0;
- return 0;
-}
-
-int ncp_modify_file_or_subdir_dos_info_path(struct ncp_server *server,
- struct inode *dir,
- const char *path,
- __le32 info_mask,
- const struct nw_modify_dos_info *info)
-{
- __u8 volnum = NCP_FINFO(dir)->volNumber;
- __le32 dirent = NCP_FINFO(dir)->dirEntNum;
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 7); /* subfunction */
- ncp_add_byte(server, server->name_space[volnum]);
- ncp_add_byte(server, 0); /* reserved */
- ncp_add_word(server, cpu_to_le16(0x8006)); /* search attribs: all */
-
- ncp_add_dword(server, info_mask);
- ncp_add_mem(server, info, sizeof(*info));
- ncp_add_handle_path(server, volnum, dirent, 1, path);
-
- result = ncp_request(server, 87);
- ncp_unlock_server(server);
- return result;
-}
-
-int ncp_modify_file_or_subdir_dos_info(struct ncp_server *server,
- struct inode *dir,
- __le32 info_mask,
- const struct nw_modify_dos_info *info)
-{
- return ncp_modify_file_or_subdir_dos_info_path(server, dir, NULL,
- info_mask, info);
-}
-
-#ifdef CONFIG_NCPFS_NFS_NS
-int ncp_modify_nfs_info(struct ncp_server *server, __u8 volnum, __le32 dirent,
- __u32 mode, __u32 rdev)
-
-{
- int result = 0;
-
- ncp_init_request(server);
- if (server->name_space[volnum] == NW_NS_NFS) {
- ncp_add_byte(server, 25); /* subfunction */
- ncp_add_byte(server, server->name_space[volnum]);
- ncp_add_byte(server, NW_NS_NFS);
- ncp_add_byte(server, volnum);
- ncp_add_dword(server, dirent);
- /* we must always operate on both nlinks and rdev, otherwise
- rdev is not set */
- ncp_add_dword_lh(server, NSIBM_NFS_MODE | NSIBM_NFS_NLINKS | NSIBM_NFS_RDEV);
- ncp_add_dword_lh(server, mode);
- ncp_add_dword_lh(server, 1); /* nlinks */
- ncp_add_dword_lh(server, rdev);
- result = ncp_request(server, 87);
- }
- ncp_unlock_server(server);
- return result;
-}
-#endif
-
-
-static int
-ncp_DeleteNSEntry(struct ncp_server *server,
- __u8 have_dir_base, __u8 volnum, __le32 dirent,
- const char* name, __u8 ns, __le16 attr)
-{
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 8); /* subfunction */
- ncp_add_byte(server, ns);
- ncp_add_byte(server, 0); /* reserved */
- ncp_add_word(server, attr); /* search attribs: all */
- ncp_add_handle_path(server, volnum, dirent, have_dir_base, name);
-
- result = ncp_request(server, 87);
- ncp_unlock_server(server);
- return result;
-}
-
-int
-ncp_del_file_or_subdir2(struct ncp_server *server,
- struct dentry *dentry)
-{
- struct inode *inode = d_inode(dentry);
- __u8 volnum;
- __le32 dirent;
-
- if (!inode) {
- return 0xFF; /* Any error */
- }
- volnum = NCP_FINFO(inode)->volNumber;
- dirent = NCP_FINFO(inode)->DosDirNum;
- return ncp_DeleteNSEntry(server, 1, volnum, dirent, NULL, NW_NS_DOS, cpu_to_le16(0x8006));
-}
-
-int
-ncp_del_file_or_subdir(struct ncp_server *server,
- struct inode *dir, const char *name)
-{
- __u8 volnum = NCP_FINFO(dir)->volNumber;
- __le32 dirent = NCP_FINFO(dir)->dirEntNum;
- int name_space;
-
- name_space = server->name_space[volnum];
-#ifdef CONFIG_NCPFS_NFS_NS
- if (name_space == NW_NS_NFS)
- {
- int result;
-
- result=ncp_obtain_DOS_dir_base(server, name_space, volnum, dirent, name, &dirent);
- if (result) return result;
- name = NULL;
- name_space = NW_NS_DOS;
- }
-#endif /* CONFIG_NCPFS_NFS_NS */
- return ncp_DeleteNSEntry(server, 1, volnum, dirent, name, name_space, cpu_to_le16(0x8006));
-}
-
-static inline void ConvertToNWfromDWORD(__u16 v0, __u16 v1, __u8 ret[6])
-{
- __le16 *dest = (__le16 *) ret;
- dest[1] = cpu_to_le16(v0);
- dest[2] = cpu_to_le16(v1);
- dest[0] = cpu_to_le16(v0 + 1);
- return;
-}
-
-/* If both dir and name are NULL, then in target there's already a
- looked-up entry that wants to be opened. */
-int ncp_open_create_file_or_subdir(struct ncp_server *server,
- struct inode *dir, const char *name,
- int open_create_mode,
- __le32 create_attributes,
- __le16 desired_acc_rights,
- struct ncp_entry_info *target)
-{
- __le16 search_attribs = cpu_to_le16(0x0006);
- __u8 volnum;
- __le32 dirent;
- int result;
-
- volnum = NCP_FINFO(dir)->volNumber;
- dirent = NCP_FINFO(dir)->dirEntNum;
-
- if ((create_attributes & aDIR) != 0) {
- search_attribs |= cpu_to_le16(0x8000);
- }
- ncp_init_request(server);
- ncp_add_byte(server, 1); /* subfunction */
- ncp_add_byte(server, server->name_space[volnum]);
- ncp_add_byte(server, open_create_mode);
- ncp_add_word(server, search_attribs);
- ncp_add_dword(server, RIM_ALL);
- ncp_add_dword(server, create_attributes);
- /* The desired acc rights seem to be the inherited rights mask
- for directories */
- ncp_add_word(server, desired_acc_rights);
- ncp_add_handle_path(server, volnum, dirent, 1, name);
-
- if ((result = ncp_request(server, 87)) != 0)
- goto out;
- if (!(create_attributes & aDIR))
- target->opened = 1;
-
- /* in target there's a new finfo to fill */
- ncp_extract_file_info(ncp_reply_data(server, 6), &(target->i));
- target->volume = target->i.volNumber;
- ConvertToNWfromDWORD(ncp_reply_le16(server, 0),
- ncp_reply_le16(server, 2),
- target->file_handle);
-
- ncp_unlock_server(server);
-
- (void)ncp_obtain_nfs_info(server, &(target->i));
- return 0;
-
-out:
- ncp_unlock_server(server);
- return result;
-}
-
-int
-ncp_initialize_search(struct ncp_server *server, struct inode *dir,
- struct nw_search_sequence *target)
-{
- __u8 volnum = NCP_FINFO(dir)->volNumber;
- __le32 dirent = NCP_FINFO(dir)->dirEntNum;
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 2); /* subfunction */
- ncp_add_byte(server, server->name_space[volnum]);
- ncp_add_byte(server, 0); /* reserved */
- ncp_add_handle_path(server, volnum, dirent, 1, NULL);
-
- result = ncp_request(server, 87);
- if (result)
- goto out;
- memcpy(target, ncp_reply_data(server, 0), sizeof(*target));
-
-out:
- ncp_unlock_server(server);
- return result;
-}
-
-int ncp_search_for_fileset(struct ncp_server *server,
- struct nw_search_sequence *seq,
- int* more,
- int* cnt,
- char* buffer,
- size_t bufsize,
- char** rbuf,
- size_t* rsize)
-{
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 20);
- ncp_add_byte(server, server->name_space[seq->volNumber]);
- ncp_add_byte(server, 0); /* datastream */
- ncp_add_word(server, cpu_to_le16(0x8006));
- ncp_add_dword(server, RIM_ALL);
- ncp_add_word(server, cpu_to_le16(32767)); /* max returned items */
- ncp_add_mem(server, seq, 9);
-#ifdef CONFIG_NCPFS_NFS_NS
- if (server->name_space[seq->volNumber] == NW_NS_NFS) {
- ncp_add_byte(server, 0); /* 0 byte pattern */
- } else
-#endif
- {
- ncp_add_byte(server, 2); /* 2 byte pattern */
- ncp_add_byte(server, 0xff); /* following is a wildcard */
- ncp_add_byte(server, '*');
- }
- result = ncp_request2(server, 87, buffer, bufsize);
- if (result) {
- ncp_unlock_server(server);
- return result;
- }
- if (server->ncp_reply_size < 12) {
- ncp_unlock_server(server);
- return 0xFF;
- }
- *rsize = server->ncp_reply_size - 12;
- ncp_unlock_server(server);
- buffer = buffer + sizeof(struct ncp_reply_header);
- *rbuf = buffer + 12;
- *cnt = WVAL_LH(buffer + 10);
- *more = BVAL(buffer + 9);
- memcpy(seq, buffer, 9);
- return 0;
-}
-
-static int
-ncp_RenameNSEntry(struct ncp_server *server,
- struct inode *old_dir, const char *old_name, __le16 old_type,
- struct inode *new_dir, const char *new_name)
-{
- int result = -EINVAL;
-
- if ((old_dir == NULL) || (old_name == NULL) ||
- (new_dir == NULL) || (new_name == NULL))
- goto out;
-
- ncp_init_request(server);
- ncp_add_byte(server, 4); /* subfunction */
- ncp_add_byte(server, server->name_space[NCP_FINFO(old_dir)->volNumber]);
- ncp_add_byte(server, 1); /* rename flag */
- ncp_add_word(server, old_type); /* search attributes */
-
- /* source Handle Path */
- ncp_add_byte(server, NCP_FINFO(old_dir)->volNumber);
- ncp_add_dword(server, NCP_FINFO(old_dir)->dirEntNum);
- ncp_add_byte(server, 1);
- ncp_add_byte(server, 1); /* 1 source component */
-
- /* dest Handle Path */
- ncp_add_byte(server, NCP_FINFO(new_dir)->volNumber);
- ncp_add_dword(server, NCP_FINFO(new_dir)->dirEntNum);
- ncp_add_byte(server, 1);
- ncp_add_byte(server, 1); /* 1 destination component */
-
- /* source path string */
- ncp_add_pstring(server, old_name);
- /* dest path string */
- ncp_add_pstring(server, new_name);
-
- result = ncp_request(server, 87);
- ncp_unlock_server(server);
-out:
- return result;
-}
-
-int ncp_ren_or_mov_file_or_subdir(struct ncp_server *server,
- struct inode *old_dir, const char *old_name,
- struct inode *new_dir, const char *new_name)
-{
- int result;
- __le16 old_type = cpu_to_le16(0x06);
-
-/* If somebody can do it atomic, call me... vandrove@vc.cvut.cz */
- result = ncp_RenameNSEntry(server, old_dir, old_name, old_type,
- new_dir, new_name);
- if (result == 0xFF) /* File Not Found, try directory */
- {
- old_type = cpu_to_le16(0x16);
- result = ncp_RenameNSEntry(server, old_dir, old_name, old_type,
- new_dir, new_name);
- }
- if (result != 0x92) return result; /* All except NO_FILES_RENAMED */
- result = ncp_del_file_or_subdir(server, new_dir, new_name);
- if (result != 0) return -EACCES;
- result = ncp_RenameNSEntry(server, old_dir, old_name, old_type,
- new_dir, new_name);
- return result;
-}
-
-
-/* We have to transfer to/from user space */
-int
-ncp_read_kernel(struct ncp_server *server, const char *file_id,
- __u32 offset, __u16 to_read, char *target, int *bytes_read)
-{
- const char *source;
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 0);
- ncp_add_mem(server, file_id, 6);
- ncp_add_be32(server, offset);
- ncp_add_be16(server, to_read);
-
- if ((result = ncp_request(server, 72)) != 0) {
- goto out;
- }
- *bytes_read = ncp_reply_be16(server, 0);
- if (*bytes_read > to_read) {
- result = -EINVAL;
- goto out;
- }
- source = ncp_reply_data(server, 2 + (offset & 1));
-
- memcpy(target, source, *bytes_read);
-out:
- ncp_unlock_server(server);
- return result;
-}
-
-/* There is a problem... egrep and some other silly tools do:
- x = mmap(NULL, MAP_PRIVATE, PROT_READ|PROT_WRITE, <ncpfs fd>, 32768);
- read(<ncpfs fd>, x, 32768);
- Now copying read result by copy_to_user causes pagefault. This pagefault
- could not be handled because of server was locked due to read. So we have
- to use temporary buffer. So ncp_unlock_server must be done before
- copy_to_user (and for write, copy_from_user must be done before
- ncp_init_request... same applies for send raw packet ioctl). Because of
- file is normally read in bigger chunks, caller provides kmalloced
- (vmalloced) chunk of memory with size >= to_read...
- */
-int
-ncp_read_bounce(struct ncp_server *server, const char *file_id,
- __u32 offset, __u16 to_read, struct iov_iter *to,
- int *bytes_read, void *bounce, __u32 bufsize)
-{
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 0);
- ncp_add_mem(server, file_id, 6);
- ncp_add_be32(server, offset);
- ncp_add_be16(server, to_read);
- result = ncp_request2(server, 72, bounce, bufsize);
- ncp_unlock_server(server);
- if (!result) {
- int len = get_unaligned_be16((char *)bounce +
- sizeof(struct ncp_reply_header));
- result = -EIO;
- if (len <= to_read) {
- char* source;
-
- source = (char*)bounce +
- sizeof(struct ncp_reply_header) + 2 +
- (offset & 1);
- *bytes_read = len;
- result = 0;
- if (copy_to_iter(source, len, to) != len)
- result = -EFAULT;
- }
- }
- return result;
-}
-
-int
-ncp_write_kernel(struct ncp_server *server, const char *file_id,
- __u32 offset, __u16 to_write,
- const char *source, int *bytes_written)
-{
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 0);
- ncp_add_mem(server, file_id, 6);
- ncp_add_be32(server, offset);
- ncp_add_be16(server, to_write);
- ncp_add_mem(server, source, to_write);
-
- if ((result = ncp_request(server, 73)) == 0)
- *bytes_written = to_write;
- ncp_unlock_server(server);
- return result;
-}
-
-#ifdef CONFIG_NCPFS_IOCTL_LOCKING
-int
-ncp_LogPhysicalRecord(struct ncp_server *server, const char *file_id,
- __u8 locktype, __u32 offset, __u32 length, __u16 timeout)
-{
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, locktype);
- ncp_add_mem(server, file_id, 6);
- ncp_add_be32(server, offset);
- ncp_add_be32(server, length);
- ncp_add_be16(server, timeout);
-
- if ((result = ncp_request(server, 0x1A)) != 0)
- {
- ncp_unlock_server(server);
- return result;
- }
- ncp_unlock_server(server);
- return 0;
-}
-
-int
-ncp_ClearPhysicalRecord(struct ncp_server *server, const char *file_id,
- __u32 offset, __u32 length)
-{
- int result;
-
- ncp_init_request(server);
- ncp_add_byte(server, 0); /* who knows... lanalyzer says that */
- ncp_add_mem(server, file_id, 6);
- ncp_add_be32(server, offset);
- ncp_add_be32(server, length);
-
- if ((result = ncp_request(server, 0x1E)) != 0)
- {
- ncp_unlock_server(server);
- return result;
- }
- ncp_unlock_server(server);
- return 0;
-}
-#endif /* CONFIG_NCPFS_IOCTL_LOCKING */
-
-#ifdef CONFIG_NCPFS_NLS
-/* This are the NLS conversion routines with inspirations and code parts
- * from the vfat file system and hints from Petr Vandrovec.
- */
-
-int
-ncp__io2vol(struct ncp_server *server, unsigned char *vname, unsigned int *vlen,
- const unsigned char *iname, unsigned int ilen, int cc)
-{
- struct nls_table *in = server->nls_io;
- struct nls_table *out = server->nls_vol;
- unsigned char *vname_start;
- unsigned char *vname_end;
- const unsigned char *iname_end;
-
- iname_end = iname + ilen;
- vname_start = vname;
- vname_end = vname + *vlen - 1;
-
- while (iname < iname_end) {
- int chl;
- wchar_t ec;
-
- if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) {
- int k;
- unicode_t u;
-
- k = utf8_to_utf32(iname, iname_end - iname, &u);
- if (k < 0 || u > MAX_WCHAR_T)
- return -EINVAL;
- iname += k;
- ec = u;
- } else {
- if (*iname == NCP_ESC) {
- int k;
-
- if (iname_end - iname < 5)
- goto nospec;
-
- ec = 0;
- for (k = 1; k < 5; k++) {
- unsigned char nc;
-
- nc = iname[k] - '0';
- if (nc >= 10) {
- nc -= 'A' - '0' - 10;
- if ((nc < 10) || (nc > 15)) {
- goto nospec;
- }
- }
- ec = (ec << 4) | nc;
- }
- iname += 5;
- } else {
-nospec:;
- if ( (chl = in->char2uni(iname, iname_end - iname, &ec)) < 0)
- return chl;
- iname += chl;
- }
- }
-
- /* unitoupper should be here! */
-
- chl = out->uni2char(ec, vname, vname_end - vname);
- if (chl < 0)
- return chl;
-
- /* this is wrong... */
- if (cc) {
- int chi;
-
- for (chi = 0; chi < chl; chi++){
- vname[chi] = ncp_toupper(out, vname[chi]);
- }
- }
- vname += chl;
- }
-
- *vname = 0;
- *vlen = vname - vname_start;
- return 0;
-}
-
-int
-ncp__vol2io(struct ncp_server *server, unsigned char *iname, unsigned int *ilen,
- const unsigned char *vname, unsigned int vlen, int cc)
-{
- struct nls_table *in = server->nls_vol;
- struct nls_table *out = server->nls_io;
- const unsigned char *vname_end;
- unsigned char *iname_start;
- unsigned char *iname_end;
- unsigned char *vname_cc;
- int err;
-
- vname_cc = NULL;
-
- if (cc) {
- int i;
-
- /* this is wrong! */
- vname_cc = kmalloc(vlen, GFP_KERNEL);
- if (!vname_cc)
- return -ENOMEM;
- for (i = 0; i < vlen; i++)
- vname_cc[i] = ncp_tolower(in, vname[i]);
- vname = vname_cc;
- }
-
- iname_start = iname;
- iname_end = iname + *ilen - 1;
- vname_end = vname + vlen;
-
- while (vname < vname_end) {
- wchar_t ec;
- int chl;
-
- if ( (chl = in->char2uni(vname, vname_end - vname, &ec)) < 0) {
- err = chl;
- goto quit;
- }
- vname += chl;
-
- /* unitolower should be here! */
-
- if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) {
- int k;
-
- k = utf32_to_utf8(ec, iname, iname_end - iname);
- if (k < 0) {
- err = -ENAMETOOLONG;
- goto quit;
- }
- iname += k;
- } else {
- if ( (chl = out->uni2char(ec, iname, iname_end - iname)) >= 0) {
- iname += chl;
- } else {
- int k;
-
- if (iname_end - iname < 5) {
- err = -ENAMETOOLONG;
- goto quit;
- }
- *iname = NCP_ESC;
- for (k = 4; k > 0; k--) {
- unsigned char v;
-
- v = (ec & 0xF) + '0';
- if (v > '9') {
- v += 'A' - '9' - 1;
- }
- iname[k] = v;
- ec >>= 4;
- }
- iname += 5;
- }
- }
- }
-
- *iname = 0;
- *ilen = iname - iname_start;
- err = 0;
-quit:;
- if (cc)
- kfree(vname_cc);
- return err;
-}
-
-#else
-
-int
-ncp__io2vol(unsigned char *vname, unsigned int *vlen,
- const unsigned char *iname, unsigned int ilen, int cc)
-{
- int i;
-
- if (*vlen <= ilen)
- return -ENAMETOOLONG;
-
- if (cc)
- for (i = 0; i < ilen; i++) {
- *vname = toupper(*iname);
- vname++;
- iname++;
- }
- else {
- memmove(vname, iname, ilen);
- vname += ilen;
- }
-
- *vlen = ilen;
- *vname = 0;
- return 0;
-}
-
-int
-ncp__vol2io(unsigned char *iname, unsigned int *ilen,
- const unsigned char *vname, unsigned int vlen, int cc)
-{
- int i;
-
- if (*ilen <= vlen)
- return -ENAMETOOLONG;
-
- if (cc)
- for (i = 0; i < vlen; i++) {
- *iname = tolower(*vname);
- iname++;
- vname++;
- }
- else {
- memmove(iname, vname, vlen);
- iname += vlen;
- }
-
- *ilen = vlen;
- *iname = 0;
- return 0;
-}
-
-#endif
diff --git a/drivers/staging/ncpfs/ncplib_kernel.h b/drivers/staging/ncpfs/ncplib_kernel.h
deleted file mode 100644
index aaae8aa9bf7d..000000000000
--- a/drivers/staging/ncpfs/ncplib_kernel.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ncplib_kernel.h
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- * Modified for big endian by J.F. Chadima and David S. Miller
- * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
- * Modified 1998, 1999 Wolfram Pienkoss for NLS
- * Modified 1999 Wolfram Pienkoss for directory caching
- *
- */
-
-#ifndef _NCPLIB_H
-#define _NCPLIB_H
-
-
-#include <linux/fs.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/fcntl.h>
-#include <linux/pagemap.h>
-
-#include <linux/uaccess.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-#include <asm/string.h>
-
-#ifdef CONFIG_NCPFS_NLS
-#include <linux/nls.h>
-#else
-#include <linux/ctype.h>
-#endif /* CONFIG_NCPFS_NLS */
-
-#define NCP_MIN_SYMLINK_SIZE 8
-#define NCP_MAX_SYMLINK_SIZE 512
-
-#define NCP_BLOCK_SHIFT 9
-#define NCP_BLOCK_SIZE (1 << (NCP_BLOCK_SHIFT))
-
-int ncp_negotiate_buffersize(struct ncp_server *, int, int *);
-int ncp_negotiate_size_and_options(struct ncp_server *server, int size,
- int options, int *ret_size, int *ret_options);
-
-int ncp_get_volume_info_with_number(struct ncp_server* server, int n,
- struct ncp_volume_info *target);
-
-int ncp_get_directory_info(struct ncp_server* server, __u8 dirhandle,
- struct ncp_volume_info* target);
-
-int ncp_close_file(struct ncp_server *, const char *);
-static inline int ncp_read_bounce_size(__u32 size) {
- return sizeof(struct ncp_reply_header) + 2 + 2 + size + 8;
-};
-int ncp_read_bounce(struct ncp_server *, const char *, __u32, __u16,
- struct iov_iter *, int *, void *bounce, __u32 bouncelen);
-int ncp_read_kernel(struct ncp_server *, const char *, __u32, __u16,
- char *, int *);
-int ncp_write_kernel(struct ncp_server *, const char *, __u32, __u16,
- const char *, int *);
-
-static inline void ncp_inode_close(struct inode *inode) {
- atomic_dec(&NCP_FINFO(inode)->opened);
-}
-
-void ncp_extract_file_info(const void* src, struct nw_info_struct* target);
-int ncp_obtain_info(struct ncp_server *server, struct inode *, const char *,
- struct nw_info_struct *target);
-int ncp_obtain_nfs_info(struct ncp_server *server, struct nw_info_struct *target);
-int ncp_update_known_namespace(struct ncp_server *server, __u8 volume, int *ret_ns);
-int ncp_get_volume_root(struct ncp_server *server, const char *volname,
- __u32 *volume, __le32 *dirent, __le32 *dosdirent);
-int ncp_lookup_volume(struct ncp_server *, const char *, struct nw_info_struct *);
-int ncp_modify_file_or_subdir_dos_info(struct ncp_server *, struct inode *,
- __le32, const struct nw_modify_dos_info *info);
-int ncp_modify_file_or_subdir_dos_info_path(struct ncp_server *, struct inode *,
- const char* path, __le32, const struct nw_modify_dos_info *info);
-int ncp_modify_nfs_info(struct ncp_server *, __u8 volnum, __le32 dirent,
- __u32 mode, __u32 rdev);
-
-int ncp_del_file_or_subdir2(struct ncp_server *, struct dentry*);
-int ncp_del_file_or_subdir(struct ncp_server *, struct inode *, const char *);
-int ncp_open_create_file_or_subdir(struct ncp_server *, struct inode *, const char *,
- int, __le32, __le16, struct ncp_entry_info *);
-
-int ncp_initialize_search(struct ncp_server *, struct inode *,
- struct nw_search_sequence *target);
-int ncp_search_for_fileset(struct ncp_server *server,
- struct nw_search_sequence *seq,
- int* more, int* cnt,
- char* buffer, size_t bufsize,
- char** rbuf, size_t* rsize);
-
-int ncp_ren_or_mov_file_or_subdir(struct ncp_server *server,
- struct inode *, const char *, struct inode *, const char *);
-
-
-int
-ncp_LogPhysicalRecord(struct ncp_server *server,
- const char *file_id, __u8 locktype,
- __u32 offset, __u32 length, __u16 timeout);
-
-#ifdef CONFIG_NCPFS_IOCTL_LOCKING
-int
-ncp_ClearPhysicalRecord(struct ncp_server *server,
- const char *file_id,
- __u32 offset, __u32 length);
-#endif /* CONFIG_NCPFS_IOCTL_LOCKING */
-
-int
-ncp_mount_subdir(struct ncp_server *, __u8, __u8, __le32,
- __u32* volume, __le32* dirent, __le32* dosdirent);
-int ncp_dirhandle_alloc(struct ncp_server *, __u8 vol, __le32 dirent, __u8 *dirhandle);
-int ncp_dirhandle_free(struct ncp_server *, __u8 dirhandle);
-
-int ncp_create_new(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev, __le32 attributes);
-
-static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int volnum) {
-#ifdef CONFIG_NCPFS_NFS_NS
- return (server->m.flags & NCP_MOUNT_NFS_EXTRAS) &&
- (server->name_space[volnum] == NW_NS_NFS);
-#else
- return 0;
-#endif
-}
-
-#ifdef CONFIG_NCPFS_NLS
-
-int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *,
- const unsigned char *, unsigned int, int);
-int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
- const unsigned char *, unsigned int, int);
-
-#define NCP_ESC ':'
-#define NCP_IO_TABLE(sb) (NCP_SBP(sb)->nls_io)
-#define ncp_tolower(t, c) nls_tolower(t, c)
-#define ncp_toupper(t, c) nls_toupper(t, c)
-#define ncp_strnicmp(t, s1, s2, len) \
- nls_strnicmp(t, s1, s2, len)
-#define ncp_io2vol(S,m,i,n,k,U) ncp__io2vol(S,m,i,n,k,U)
-#define ncp_vol2io(S,m,i,n,k,U) ncp__vol2io(S,m,i,n,k,U)
-
-#else
-
-int ncp__io2vol(unsigned char *, unsigned int *,
- const unsigned char *, unsigned int, int);
-int ncp__vol2io(unsigned char *, unsigned int *,
- const unsigned char *, unsigned int, int);
-
-#define NCP_IO_TABLE(sb) NULL
-#define ncp_tolower(t, c) tolower(c)
-#define ncp_toupper(t, c) toupper(c)
-#define ncp_io2vol(S,m,i,n,k,U) ncp__io2vol(m,i,n,k,U)
-#define ncp_vol2io(S,m,i,n,k,U) ncp__vol2io(m,i,n,k,U)
-
-
-static inline int ncp_strnicmp(const struct nls_table *t,
- const unsigned char *s1, const unsigned char *s2, int len)
-{
- while (len--) {
- if (tolower(*s1++) != tolower(*s2++))
- return 1;
- }
-
- return 0;
-}
-
-#endif /* CONFIG_NCPFS_NLS */
-
-#define NCP_GET_AGE(dentry) (jiffies - (dentry)->d_time)
-#define NCP_MAX_AGE(server) atomic_read(&(server)->dentry_ttl)
-#define NCP_TEST_AGE(server,dentry) (NCP_GET_AGE(dentry) < NCP_MAX_AGE(server))
-
-static inline void
-ncp_age_dentry(struct ncp_server* server, struct dentry* dentry)
-{
- dentry->d_time = jiffies - NCP_MAX_AGE(server);
-}
-
-static inline void
-ncp_new_dentry(struct dentry* dentry)
-{
- dentry->d_time = jiffies;
-}
-
-struct ncp_cache_head {
- time_t mtime;
- unsigned long time; /* cache age */
- unsigned long end; /* last valid fpos in cache */
- int eof;
-};
-
-#define NCP_DIRCACHE_SIZE ((int)(PAGE_SIZE/sizeof(struct dentry *)))
-union ncp_dir_cache {
- struct ncp_cache_head head;
- struct dentry *dentry[NCP_DIRCACHE_SIZE];
-};
-
-#define NCP_FIRSTCACHE_SIZE ((int)((NCP_DIRCACHE_SIZE * \
- sizeof(struct dentry *) - sizeof(struct ncp_cache_head)) / \
- sizeof(struct dentry *)))
-
-#define NCP_DIRCACHE_START (NCP_DIRCACHE_SIZE - NCP_FIRSTCACHE_SIZE)
-
-struct ncp_cache_control {
- struct ncp_cache_head head;
- struct page *page;
- union ncp_dir_cache *cache;
- unsigned long fpos, ofs;
- int filled, valid, idx;
-};
-
-#endif /* _NCPLIB_H */
diff --git a/drivers/staging/ncpfs/ncpsign_kernel.c b/drivers/staging/ncpfs/ncpsign_kernel.c
deleted file mode 100644
index 8085b1a3ba47..000000000000
--- a/drivers/staging/ncpfs/ncpsign_kernel.c
+++ /dev/null
@@ -1,128 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ncpsign_kernel.c
- *
- * Arne de Bruijn (arne@knoware.nl), 1997
- *
- */
-
-
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
-
-#include <linux/string.h>
-#include <linux/ncp.h>
-#include <linux/bitops.h>
-#include "ncp_fs.h"
-#include "ncpsign_kernel.h"
-
-/* i386: 32-bit, little endian, handles mis-alignment */
-#ifdef __i386__
-#define GET_LE32(p) (*(const int *)(p))
-#define PUT_LE32(p,v) { *(int *)(p)=v; }
-#else
-/* from include/ncplib.h */
-#define BVAL(buf,pos) (((const __u8 *)(buf))[pos])
-#define PVAL(buf,pos) ((unsigned)BVAL(buf,pos))
-#define BSET(buf,pos,val) (((__u8 *)(buf))[pos] = (val))
-
-static inline __u16
-WVAL_LH(const __u8 * buf, int pos)
-{
- return PVAL(buf, pos) | PVAL(buf, pos + 1) << 8;
-}
-static inline __u32
-DVAL_LH(const __u8 * buf, int pos)
-{
- return WVAL_LH(buf, pos) | WVAL_LH(buf, pos + 2) << 16;
-}
-static inline void
-WSET_LH(__u8 * buf, int pos, __u16 val)
-{
- BSET(buf, pos, val & 0xff);
- BSET(buf, pos + 1, val >> 8);
-}
-static inline void
-DSET_LH(__u8 * buf, int pos, __u32 val)
-{
- WSET_LH(buf, pos, val & 0xffff);
- WSET_LH(buf, pos + 2, val >> 16);
-}
-
-#define GET_LE32(p) DVAL_LH(p,0)
-#define PUT_LE32(p,v) DSET_LH(p,0,v)
-#endif
-
-static void nwsign(char *r_data1, char *r_data2, char *outdata) {
- int i;
- unsigned int w0,w1,w2,w3;
- static int rbit[4]={0, 2, 1, 3};
-#ifdef __i386__
- unsigned int *data2=(unsigned int *)r_data2;
-#else
- unsigned int data2[16];
- for (i=0;i<16;i++)
- data2[i]=GET_LE32(r_data2+(i<<2));
-#endif
- w0=GET_LE32(r_data1);
- w1=GET_LE32(r_data1+4);
- w2=GET_LE32(r_data1+8);
- w3=GET_LE32(r_data1+12);
- for (i=0;i<16;i+=4) {
- w0=rol32(w0 + ((w1 & w2) | ((~w1) & w3)) + data2[i+0],3);
- w3=rol32(w3 + ((w0 & w1) | ((~w0) & w2)) + data2[i+1],7);
- w2=rol32(w2 + ((w3 & w0) | ((~w3) & w1)) + data2[i+2],11);
- w1=rol32(w1 + ((w2 & w3) | ((~w2) & w0)) + data2[i+3],19);
- }
- for (i=0;i<4;i++) {
- w0=rol32(w0 + (((w2 | w3) & w1) | (w2 & w3)) + 0x5a827999 + data2[i+0],3);
- w3=rol32(w3 + (((w1 | w2) & w0) | (w1 & w2)) + 0x5a827999 + data2[i+4],5);
- w2=rol32(w2 + (((w0 | w1) & w3) | (w0 & w1)) + 0x5a827999 + data2[i+8],9);
- w1=rol32(w1 + (((w3 | w0) & w2) | (w3 & w0)) + 0x5a827999 + data2[i+12],13);
- }
- for (i=0;i<4;i++) {
- w0=rol32(w0 + ((w1 ^ w2) ^ w3) + 0x6ed9eba1 + data2[rbit[i]+0],3);
- w3=rol32(w3 + ((w0 ^ w1) ^ w2) + 0x6ed9eba1 + data2[rbit[i]+8],9);
- w2=rol32(w2 + ((w3 ^ w0) ^ w1) + 0x6ed9eba1 + data2[rbit[i]+4],11);
- w1=rol32(w1 + ((w2 ^ w3) ^ w0) + 0x6ed9eba1 + data2[rbit[i]+12],15);
- }
- PUT_LE32(outdata,(w0+GET_LE32(r_data1)) & 0xffffffff);
- PUT_LE32(outdata+4,(w1+GET_LE32(r_data1+4)) & 0xffffffff);
- PUT_LE32(outdata+8,(w2+GET_LE32(r_data1+8)) & 0xffffffff);
- PUT_LE32(outdata+12,(w3+GET_LE32(r_data1+12)) & 0xffffffff);
-}
-
-/* Make a signature for the current packet and add it at the end of the */
-/* packet. */
-void __sign_packet(struct ncp_server *server, const char *packet, size_t size, __u32 totalsize, void *sign_buff) {
- unsigned char data[64];
-
- memcpy(data, server->sign_root, 8);
- *(__u32*)(data + 8) = totalsize;
- if (size < 52) {
- memcpy(data + 12, packet, size);
- memset(data + 12 + size, 0, 52 - size);
- } else {
- memcpy(data + 12, packet, 52);
- }
- nwsign(server->sign_last, data, server->sign_last);
- memcpy(sign_buff, server->sign_last, 8);
-}
-
-int sign_verify_reply(struct ncp_server *server, const char *packet, size_t size, __u32 totalsize, const void *sign_buff) {
- unsigned char data[64];
- unsigned char hash[16];
-
- memcpy(data, server->sign_root, 8);
- *(__u32*)(data + 8) = totalsize;
- if (size < 52) {
- memcpy(data + 12, packet, size);
- memset(data + 12 + size, 0, 52 - size);
- } else {
- memcpy(data + 12, packet, 52);
- }
- nwsign(server->sign_last, data, hash);
- return memcmp(sign_buff, hash, 8);
-}
-
-#endif /* CONFIG_NCPFS_PACKET_SIGNING */
-
diff --git a/drivers/staging/ncpfs/ncpsign_kernel.h b/drivers/staging/ncpfs/ncpsign_kernel.h
deleted file mode 100644
index 57ff0a0650b8..000000000000
--- a/drivers/staging/ncpfs/ncpsign_kernel.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ncpsign_kernel.h
- *
- * Arne de Bruijn (arne@knoware.nl), 1997
- *
- */
-
-#ifndef _NCPSIGN_KERNEL_H
-#define _NCPSIGN_KERNEL_H
-
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
-void __sign_packet(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, void *sign_buff);
-int sign_verify_reply(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, const void *sign_buff);
-#endif
-
-static inline size_t sign_packet(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, void *sign_buff) {
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
- if (server->sign_active) {
- __sign_packet(server, data, size, totalsize, sign_buff);
- return 8;
- }
-#endif
- return 0;
-}
-
-#endif
diff --git a/drivers/staging/ncpfs/sock.c b/drivers/staging/ncpfs/sock.c
deleted file mode 100644
index 4c13174d85b7..000000000000
--- a/drivers/staging/ncpfs/sock.c
+++ /dev/null
@@ -1,855 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/fs/ncpfs/sock.c
- *
- * Copyright (C) 1992, 1993 Rick Sladkey
- *
- * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
- * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/time.h>
-#include <linux/errno.h>
-#include <linux/socket.h>
-#include <linux/fcntl.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/sched/signal.h>
-#include <linux/uaccess.h>
-#include <linux/in.h>
-#include <linux/net.h>
-#include <linux/mm.h>
-#include <linux/netdevice.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
-#include <net/scm.h>
-#include <net/sock.h>
-#include <linux/ipx.h>
-#include <linux/poll.h>
-#include <linux/file.h>
-
-#include "ncp_fs.h"
-
-#include "ncpsign_kernel.h"
-
-static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
-{
- struct msghdr msg = {NULL, };
- struct kvec iov = {buf, size};
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
- return sock_recvmsg(sock, &msg, flags);
-}
-
-static int _send(struct socket *sock, const void *buff, int len)
-{
- struct msghdr msg = { .msg_flags = 0 };
- struct kvec vec = {.iov_base = (void *)buff, .iov_len = len};
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &vec, 1, len);
- return sock_sendmsg(sock, &msg);
-}
-
-struct ncp_request_reply {
- struct list_head req;
- wait_queue_head_t wq;
- atomic_t refs;
- unsigned char* reply_buf;
- size_t datalen;
- int result;
- enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
- struct iov_iter from;
- struct kvec tx_iov[3];
- u_int16_t tx_type;
- u_int32_t sign[6];
-};
-
-static inline struct ncp_request_reply* ncp_alloc_req(void)
-{
- struct ncp_request_reply *req;
-
- req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
- if (!req)
- return NULL;
-
- init_waitqueue_head(&req->wq);
- atomic_set(&req->refs, (1));
- req->status = RQ_IDLE;
-
- return req;
-}
-
-static void ncp_req_get(struct ncp_request_reply *req)
-{
- atomic_inc(&req->refs);
-}
-
-static void ncp_req_put(struct ncp_request_reply *req)
-{
- if (atomic_dec_and_test(&req->refs))
- kfree(req);
-}
-
-void ncp_tcp_data_ready(struct sock *sk)
-{
- struct ncp_server *server = sk->sk_user_data;
-
- server->data_ready(sk);
- schedule_work(&server->rcv.tq);
-}
-
-void ncp_tcp_error_report(struct sock *sk)
-{
- struct ncp_server *server = sk->sk_user_data;
-
- server->error_report(sk);
- schedule_work(&server->rcv.tq);
-}
-
-void ncp_tcp_write_space(struct sock *sk)
-{
- struct ncp_server *server = sk->sk_user_data;
-
- /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
- not vice versa... */
- server->write_space(sk);
- if (server->tx.creq)
- schedule_work(&server->tx.tq);
-}
-
-void ncpdgram_timeout_call(struct timer_list *t)
-{
- struct ncp_server *server = from_timer(server, t, timeout_tm);
-
- schedule_work(&server->timeout_tq);
-}
-
-static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
-{
- req->result = result;
- if (req->status != RQ_ABANDONED)
- memcpy(req->reply_buf, server->rxbuf, req->datalen);
- req->status = RQ_DONE;
- wake_up_all(&req->wq);
- ncp_req_put(req);
-}
-
-static void __abort_ncp_connection(struct ncp_server *server)
-{
- struct ncp_request_reply *req;
-
- ncp_invalidate_conn(server);
- del_timer(&server->timeout_tm);
- while (!list_empty(&server->tx.requests)) {
- req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
-
- list_del_init(&req->req);
- ncp_finish_request(server, req, -EIO);
- }
- req = server->rcv.creq;
- if (req) {
- server->rcv.creq = NULL;
- ncp_finish_request(server, req, -EIO);
- server->rcv.ptr = NULL;
- server->rcv.state = 0;
- }
- req = server->tx.creq;
- if (req) {
- server->tx.creq = NULL;
- ncp_finish_request(server, req, -EIO);
- }
-}
-
-static inline int get_conn_number(struct ncp_reply_header *rp)
-{
- return rp->conn_low | (rp->conn_high << 8);
-}
-
-static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
-{
- /* If req is done, we got signal, but we also received answer... */
- switch (req->status) {
- case RQ_IDLE:
- case RQ_DONE:
- break;
- case RQ_QUEUED:
- list_del_init(&req->req);
- ncp_finish_request(server, req, err);
- break;
- case RQ_INPROGRESS:
- req->status = RQ_ABANDONED;
- break;
- case RQ_ABANDONED:
- break;
- }
-}
-
-static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
-{
- mutex_lock(&server->rcv.creq_mutex);
- __ncp_abort_request(server, req, err);
- mutex_unlock(&server->rcv.creq_mutex);
-}
-
-static inline void __ncptcp_abort(struct ncp_server *server)
-{
- __abort_ncp_connection(server);
-}
-
-static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
-{
- struct msghdr msg = { .msg_iter = req->from, .msg_flags = MSG_DONTWAIT };
- return sock_sendmsg(sock, &msg);
-}
-
-static void __ncptcp_try_send(struct ncp_server *server)
-{
- struct ncp_request_reply *rq;
- struct msghdr msg = { .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT };
- int result;
-
- rq = server->tx.creq;
- if (!rq)
- return;
-
- msg.msg_iter = rq->from;
- result = sock_sendmsg(server->ncp_sock, &msg);
-
- if (result == -EAGAIN)
- return;
-
- if (result < 0) {
- pr_err("tcp: Send failed: %d\n", result);
- __ncp_abort_request(server, rq, result);
- return;
- }
- if (!msg_data_left(&msg)) {
- server->rcv.creq = rq;
- server->tx.creq = NULL;
- return;
- }
- rq->from = msg.msg_iter;
-}
-
-static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
-{
- req->status = RQ_INPROGRESS;
- h->conn_low = server->connection;
- h->conn_high = server->connection >> 8;
- h->sequence = ++server->sequence;
-}
-
-static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
- size_t signlen, len = req->tx_iov[1].iov_len;
- struct ncp_request_header *h = req->tx_iov[1].iov_base;
-
- ncp_init_header(server, req, h);
- signlen = sign_packet(server,
- req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
- len - sizeof(struct ncp_request_header) + 1,
- cpu_to_le32(len), req->sign);
- if (signlen) {
- /* NCP over UDP appends signature */
- req->tx_iov[2].iov_base = req->sign;
- req->tx_iov[2].iov_len = signlen;
- }
- iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
- req->tx_iov + 1, signlen ? 2 : 1, len + signlen);
- server->rcv.creq = req;
- server->timeout_last = server->m.time_out;
- server->timeout_retries = server->m.retry_count;
- ncpdgram_send(server->ncp_sock, req);
- mod_timer(&server->timeout_tm, jiffies + server->m.time_out);
-}
-
-#define NCP_TCP_XMIT_MAGIC (0x446D6454)
-#define NCP_TCP_XMIT_VERSION (1)
-#define NCP_TCP_RCVD_MAGIC (0x744E6350)
-
-static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
- size_t signlen, len = req->tx_iov[1].iov_len;
- struct ncp_request_header *h = req->tx_iov[1].iov_base;
-
- ncp_init_header(server, req, h);
- signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
- len - sizeof(struct ncp_request_header) + 1,
- cpu_to_be32(len + 24), req->sign + 4) + 16;
-
- req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
- req->sign[1] = htonl(len + signlen);
- req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
- req->sign[3] = htonl(req->datalen + 8);
- /* NCP over TCP prepends signature */
- req->tx_iov[0].iov_base = req->sign;
- req->tx_iov[0].iov_len = signlen;
- iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
- req->tx_iov, 2, len + signlen);
-
- server->tx.creq = req;
- __ncptcp_try_send(server);
-}
-
-static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
- /* we copy the data so that we do not depend on the caller
- staying alive */
- memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
- req->tx_iov[1].iov_base = server->txbuf;
-
- if (server->ncp_sock->type == SOCK_STREAM)
- ncptcp_start_request(server, req);
- else
- ncpdgram_start_request(server, req);
-}
-
-static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
- mutex_lock(&server->rcv.creq_mutex);
- if (!ncp_conn_valid(server)) {
- mutex_unlock(&server->rcv.creq_mutex);
- pr_err("tcp: Server died\n");
- return -EIO;
- }
- ncp_req_get(req);
- if (server->tx.creq || server->rcv.creq) {
- req->status = RQ_QUEUED;
- list_add_tail(&req->req, &server->tx.requests);
- mutex_unlock(&server->rcv.creq_mutex);
- return 0;
- }
- __ncp_start_request(server, req);
- mutex_unlock(&server->rcv.creq_mutex);
- return 0;
-}
-
-static void __ncp_next_request(struct ncp_server *server)
-{
- struct ncp_request_reply *req;
-
- server->rcv.creq = NULL;
- if (list_empty(&server->tx.requests)) {
- return;
- }
- req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
- list_del_init(&req->req);
- __ncp_start_request(server, req);
-}
-
-static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
-{
- if (server->info_sock) {
- struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
- __be32 hdr[2] = {cpu_to_be32(len + 8), cpu_to_be32(id)};
- struct kvec iov[2] = {
- {.iov_base = hdr, .iov_len = 8},
- {.iov_base = (void *)data, .iov_len = len},
- };
-
- iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE,
- iov, 2, len + 8);
-
- sock_sendmsg(server->info_sock, &msg);
- }
-}
-
-void ncpdgram_rcv_proc(struct work_struct *work)
-{
- struct ncp_server *server =
- container_of(work, struct ncp_server, rcv.tq);
- struct socket* sock;
-
- sock = server->ncp_sock;
-
- while (1) {
- struct ncp_reply_header reply;
- int result;
-
- result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
- if (result < 0) {
- break;
- }
- if (result >= sizeof(reply)) {
- struct ncp_request_reply *req;
-
- if (reply.type == NCP_WATCHDOG) {
- unsigned char buf[10];
-
- if (server->connection != get_conn_number(&reply)) {
- goto drop;
- }
- result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT);
- if (result < 0) {
- ncp_dbg(1, "recv failed with %d\n", result);
- continue;
- }
- if (result < 10) {
- ncp_dbg(1, "too short (%u) watchdog packet\n", result);
- continue;
- }
- if (buf[9] != '?') {
- ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf[9]);
- continue;
- }
- buf[9] = 'Y';
- _send(sock, buf, sizeof(buf));
- continue;
- }
- if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) {
- result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT);
- if (result < 0) {
- continue;
- }
- info_server(server, 0, server->unexpected_packet.data, result);
- continue;
- }
- mutex_lock(&server->rcv.creq_mutex);
- req = server->rcv.creq;
- if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
- server->connection == get_conn_number(&reply)))) {
- if (reply.type == NCP_POSITIVE_ACK) {
- server->timeout_retries = server->m.retry_count;
- server->timeout_last = NCP_MAX_RPC_TIMEOUT;
- mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
- } else if (reply.type == NCP_REPLY) {
- result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
- if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
- if (result < 8 + 8) {
- result = -EIO;
- } else {
- unsigned int hdrl;
-
- result -= 8;
- hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
- if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
- pr_info("Signature violation\n");
- result = -EIO;
- }
- }
- }
-#endif
- del_timer(&server->timeout_tm);
- server->rcv.creq = NULL;
- ncp_finish_request(server, req, result);
- __ncp_next_request(server);
- mutex_unlock(&server->rcv.creq_mutex);
- continue;
- }
- }
- mutex_unlock(&server->rcv.creq_mutex);
- }
-drop:;
- _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
- }
-}
-
-static void __ncpdgram_timeout_proc(struct ncp_server *server)
-{
- /* If timer is pending, we are processing another request... */
- if (!timer_pending(&server->timeout_tm)) {
- struct ncp_request_reply* req;
-
- req = server->rcv.creq;
- if (req) {
- int timeout;
-
- if (server->m.flags & NCP_MOUNT_SOFT) {
- if (server->timeout_retries-- == 0) {
- __ncp_abort_request(server, req, -ETIMEDOUT);
- return;
- }
- }
- /* Ignore errors */
- ncpdgram_send(server->ncp_sock, req);
- timeout = server->timeout_last << 1;
- if (timeout > NCP_MAX_RPC_TIMEOUT) {
- timeout = NCP_MAX_RPC_TIMEOUT;
- }
- server->timeout_last = timeout;
- mod_timer(&server->timeout_tm, jiffies + timeout);
- }
- }
-}
-
-void ncpdgram_timeout_proc(struct work_struct *work)
-{
- struct ncp_server *server =
- container_of(work, struct ncp_server, timeout_tq);
- mutex_lock(&server->rcv.creq_mutex);
- __ncpdgram_timeout_proc(server);
- mutex_unlock(&server->rcv.creq_mutex);
-}
-
-static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
-{
- int result;
-
- if (buffer) {
- result = _recv(server->ncp_sock, buffer, len, MSG_DONTWAIT);
- } else {
- static unsigned char dummy[1024];
-
- if (len > sizeof(dummy)) {
- len = sizeof(dummy);
- }
- result = _recv(server->ncp_sock, dummy, len, MSG_DONTWAIT);
- }
- if (result < 0) {
- return result;
- }
- if (result > len) {
- pr_err("tcp: bug in recvmsg (%u > %zu)\n", result, len);
- return -EIO;
- }
- return result;
-}
-
-static int __ncptcp_rcv_proc(struct ncp_server *server)
-{
- /* We have to check the result, so store the complete header */
- while (1) {
- int result;
- struct ncp_request_reply *req;
- int datalen;
- int type;
-
- while (server->rcv.len) {
- result = do_tcp_rcv(server, server->rcv.ptr, server->rcv.len);
- if (result == -EAGAIN) {
- return 0;
- }
- if (result <= 0) {
- req = server->rcv.creq;
- if (req) {
- __ncp_abort_request(server, req, -EIO);
- } else {
- __ncptcp_abort(server);
- }
- if (result < 0) {
- pr_err("tcp: error in recvmsg: %d\n", result);
- } else {
- ncp_dbg(1, "tcp: EOF\n");
- }
- return -EIO;
- }
- if (server->rcv.ptr) {
- server->rcv.ptr += result;
- }
- server->rcv.len -= result;
- }
- switch (server->rcv.state) {
- case 0:
- if (server->rcv.buf.magic != htonl(NCP_TCP_RCVD_MAGIC)) {
- pr_err("tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic));
- __ncptcp_abort(server);
- return -EIO;
- }
- datalen = ntohl(server->rcv.buf.len) & 0x0FFFFFFF;
- if (datalen < 10) {
- pr_err("tcp: Unexpected reply len %d\n", datalen);
- __ncptcp_abort(server);
- return -EIO;
- }
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
- if (server->sign_active) {
- if (datalen < 18) {
- pr_err("tcp: Unexpected reply len %d\n", datalen);
- __ncptcp_abort(server);
- return -EIO;
- }
- server->rcv.buf.len = datalen - 8;
- server->rcv.ptr = (unsigned char*)&server->rcv.buf.p1;
- server->rcv.len = 8;
- server->rcv.state = 4;
- break;
- }
-#endif
- type = ntohs(server->rcv.buf.type);
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
-cont:;
-#endif
- if (type != NCP_REPLY) {
- if (datalen - 8 <= sizeof(server->unexpected_packet.data)) {
- *(__u16*)(server->unexpected_packet.data) = htons(type);
- server->unexpected_packet.len = datalen - 8;
-
- server->rcv.state = 5;
- server->rcv.ptr = server->unexpected_packet.data + 2;
- server->rcv.len = datalen - 10;
- break;
- }
- ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type);
-skipdata2:;
- server->rcv.state = 2;
-skipdata:;
- server->rcv.ptr = NULL;
- server->rcv.len = datalen - 10;
- break;
- }
- req = server->rcv.creq;
- if (!req) {
- ncp_dbg(1, "Reply without appropriate request\n");
- goto skipdata2;
- }
- if (datalen > req->datalen + 8) {
- pr_err("tcp: Unexpected reply len %d (expected at most %zd)\n", datalen, req->datalen + 8);
- server->rcv.state = 3;
- goto skipdata;
- }
- req->datalen = datalen - 8;
- ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
- server->rcv.ptr = server->rxbuf + 2;
- server->rcv.len = datalen - 10;
- server->rcv.state = 1;
- break;
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
- case 4:
- datalen = server->rcv.buf.len;
- type = ntohs(server->rcv.buf.type2);
- goto cont;
-#endif
- case 1:
- req = server->rcv.creq;
- if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
- if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
- pr_err("tcp: Bad sequence number\n");
- __ncp_abort_request(server, req, -EIO);
- return -EIO;
- }
- if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
- pr_err("tcp: Connection number mismatch\n");
- __ncp_abort_request(server, req, -EIO);
- return -EIO;
- }
- }
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
- if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
- if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
- pr_err("tcp: Signature violation\n");
- __ncp_abort_request(server, req, -EIO);
- return -EIO;
- }
- }
-#endif
- ncp_finish_request(server, req, req->datalen);
- nextreq:;
- __ncp_next_request(server);
- case 2:
- next:;
- server->rcv.ptr = (unsigned char*)&server->rcv.buf;
- server->rcv.len = 10;
- server->rcv.state = 0;
- break;
- case 3:
- ncp_finish_request(server, server->rcv.creq, -EIO);
- goto nextreq;
- case 5:
- info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
- goto next;
- }
- }
-}
-
-void ncp_tcp_rcv_proc(struct work_struct *work)
-{
- struct ncp_server *server =
- container_of(work, struct ncp_server, rcv.tq);
-
- mutex_lock(&server->rcv.creq_mutex);
- __ncptcp_rcv_proc(server);
- mutex_unlock(&server->rcv.creq_mutex);
-}
-
-void ncp_tcp_tx_proc(struct work_struct *work)
-{
- struct ncp_server *server =
- container_of(work, struct ncp_server, tx.tq);
-
- mutex_lock(&server->rcv.creq_mutex);
- __ncptcp_try_send(server);
- mutex_unlock(&server->rcv.creq_mutex);
-}
-
-static int do_ncp_rpc_call(struct ncp_server *server, int size,
- unsigned char* reply_buf, int max_reply_size)
-{
- int result;
- struct ncp_request_reply *req;
-
- req = ncp_alloc_req();
- if (!req)
- return -ENOMEM;
-
- req->reply_buf = reply_buf;
- req->datalen = max_reply_size;
- req->tx_iov[1].iov_base = server->packet;
- req->tx_iov[1].iov_len = size;
- req->tx_type = *(u_int16_t*)server->packet;
-
- result = ncp_add_request(server, req);
- if (result < 0)
- goto out;
-
- if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
- ncp_abort_request(server, req, -EINTR);
- result = -EINTR;
- goto out;
- }
-
- result = req->result;
-
-out:
- ncp_req_put(req);
-
- return result;
-}
-
-/*
- * We need the server to be locked here, so check!
- */
-
-static int ncp_do_request(struct ncp_server *server, int size,
- void* reply, int max_reply_size)
-{
- int result;
-
- if (server->lock == 0) {
- pr_err("Server not locked!\n");
- return -EIO;
- }
- if (!ncp_conn_valid(server)) {
- return -EIO;
- }
- {
- sigset_t old_set;
- unsigned long mask, flags;
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- old_set = current->blocked;
- if (current->flags & PF_EXITING)
- mask = 0;
- else
- mask = sigmask(SIGKILL);
- if (server->m.flags & NCP_MOUNT_INTR) {
- /* FIXME: This doesn't seem right at all. So, like,
- we can't handle SIGINT and get whatever to stop?
- What if we've blocked it ourselves? What about
- alarms? Why, in fact, are we mucking with the
- sigmask at all? -- r~ */
- if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
- mask |= sigmask(SIGINT);
- if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
- mask |= sigmask(SIGQUIT);
- }
- siginitsetinv(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
- result = do_ncp_rpc_call(server, size, reply, max_reply_size);
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- current->blocked = old_set;
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
- }
-
- ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result);
-
- return result;
-}
-
-/* ncp_do_request assures that at least a complete reply header is
- * received. It assumes that server->current_size contains the ncp
- * request size
- */
-int ncp_request2(struct ncp_server *server, int function,
- void* rpl, int size)
-{
- struct ncp_request_header *h;
- struct ncp_reply_header* reply = rpl;
- int result;
-
- h = (struct ncp_request_header *) (server->packet);
- if (server->has_subfunction != 0) {
- *(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
- }
- h->type = NCP_REQUEST;
- /*
- * The server shouldn't know or care what task is making a
- * request, so we always use the same task number.
- */
- h->task = 2; /* (current->pid) & 0xff; */
- h->function = function;
-
- result = ncp_do_request(server, server->current_size, reply, size);
- if (result < 0) {
- ncp_dbg(1, "ncp_request_error: %d\n", result);
- goto out;
- }
- server->completion = reply->completion_code;
- server->conn_status = reply->connection_state;
- server->reply_size = result;
- server->ncp_reply_size = result - sizeof(struct ncp_reply_header);
-
- result = reply->completion_code;
-
- if (result != 0)
- ncp_vdbg("completion code=%x\n", result);
-out:
- return result;
-}
-
-int ncp_connect(struct ncp_server *server)
-{
- struct ncp_request_header *h;
- int result;
-
- server->connection = 0xFFFF;
- server->sequence = 255;
-
- h = (struct ncp_request_header *) (server->packet);
- h->type = NCP_ALLOC_SLOT_REQUEST;
- h->task = 2; /* see above */
- h->function = 0;
-
- result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
- if (result < 0)
- goto out;
- server->connection = h->conn_low + (h->conn_high * 256);
- result = 0;
-out:
- return result;
-}
-
-int ncp_disconnect(struct ncp_server *server)
-{
- struct ncp_request_header *h;
-
- h = (struct ncp_request_header *) (server->packet);
- h->type = NCP_DEALLOC_SLOT_REQUEST;
- h->task = 2; /* see above */
- h->function = 0;
-
- return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
-}
-
-void ncp_lock_server(struct ncp_server *server)
-{
- mutex_lock(&server->mutex);
- if (server->lock)
- pr_warn("%s: was locked!\n", __func__);
- server->lock = 1;
-}
-
-void ncp_unlock_server(struct ncp_server *server)
-{
- if (!server->lock) {
- pr_warn("%s: was not locked!\n", __func__);
- return;
- }
- server->lock = 0;
- mutex_unlock(&server->mutex);
-}
diff --git a/drivers/staging/ncpfs/symlink.c b/drivers/staging/ncpfs/symlink.c
deleted file mode 100644
index b6e16da4837a..000000000000
--- a/drivers/staging/ncpfs/symlink.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/fs/ncpfs/symlink.c
- *
- * Code for allowing symbolic links on NCPFS (i.e. NetWare)
- * Symbolic links are not supported on native NetWare, so we use an
- * infrequently-used flag (Sh) and store a two-word magic header in
- * the file to make sure we don't accidentally use a non-link file
- * as a link.
- *
- * When using the NFS namespace, we set the mode to indicate a symlink and
- * don't bother with the magic numbers.
- *
- * from linux/fs/ext2/symlink.c
- *
- * Copyright (C) 1998-99, Frank A. Vorstenbosch
- *
- * ncpfs symlink handling code
- * NLS support (c) 1999 Petr Vandrovec
- * Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info
- *
- */
-
-
-#include <linux/uaccess.h>
-
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/time.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/stat.h>
-#include "ncp_fs.h"
-
-/* these magic numbers must appear in the symlink file -- this makes it a bit
- more resilient against the magic attributes being set on random files. */
-
-#define NCP_SYMLINK_MAGIC0 cpu_to_le32(0x6c6d7973) /* "symlnk->" */
-#define NCP_SYMLINK_MAGIC1 cpu_to_le32(0x3e2d6b6e)
-
-/* ----- read a symbolic link ------------------------------------------ */
-
-static int ncp_symlink_readpage(struct file *file, struct page *page)
-{
- struct inode *inode = page->mapping->host;
- int error, length, len;
- char *link, *rawlink;
- char *buf = kmap(page);
-
- error = -ENOMEM;
- rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
- if (!rawlink)
- goto fail;
-
- if (ncp_make_open(inode,O_RDONLY))
- goto failEIO;
-
- error=ncp_read_kernel(NCP_SERVER(inode),NCP_FINFO(inode)->file_handle,
- 0,NCP_MAX_SYMLINK_SIZE,rawlink,&length);
-
- ncp_inode_close(inode);
- /* Close file handle if no other users... */
- ncp_make_closed(inode);
- if (error)
- goto failEIO;
-
- if (NCP_FINFO(inode)->flags & NCPI_KLUDGE_SYMLINK) {
- if (length<NCP_MIN_SYMLINK_SIZE ||
- ((__le32 *)rawlink)[0]!=NCP_SYMLINK_MAGIC0 ||
- ((__le32 *)rawlink)[1]!=NCP_SYMLINK_MAGIC1)
- goto failEIO;
- link = rawlink + 8;
- length -= 8;
- } else {
- link = rawlink;
- }
-
- len = NCP_MAX_SYMLINK_SIZE;
- error = ncp_vol2io(NCP_SERVER(inode), buf, &len, link, length, 0);
- kfree(rawlink);
- if (error)
- goto fail;
- SetPageUptodate(page);
- kunmap(page);
- unlock_page(page);
- return 0;
-
-failEIO:
- error = -EIO;
- kfree(rawlink);
-fail:
- SetPageError(page);
- kunmap(page);
- unlock_page(page);
- return error;
-}
-
-/*
- * symlinks can't do much...
- */
-const struct address_space_operations ncp_symlink_aops = {
- .readpage = ncp_symlink_readpage,
-};
-
-/* ----- create a new symbolic link -------------------------------------- */
-
-int ncp_symlink(struct inode *dir, struct dentry *dentry, const char *symname) {
- struct inode *inode;
- char *rawlink;
- int length, err, i, outlen;
- int kludge;
- umode_t mode;
- __le32 attr;
- unsigned int hdr;
-
- ncp_dbg(1, "dir=%p, dentry=%p, symname=%s\n", dir, dentry, symname);
-
- if (ncp_is_nfs_extras(NCP_SERVER(dir), NCP_FINFO(dir)->volNumber))
- kludge = 0;
- else
-#ifdef CONFIG_NCPFS_EXTRAS
- if (NCP_SERVER(dir)->m.flags & NCP_MOUNT_SYMLINKS)
- kludge = 1;
- else
-#endif
- /* EPERM is returned by VFS if symlink procedure does not exist */
- return -EPERM;
-
- rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
- if (!rawlink)
- return -ENOMEM;
-
- if (kludge) {
- mode = 0;
- attr = aSHARED | aHIDDEN;
- ((__le32 *)rawlink)[0]=NCP_SYMLINK_MAGIC0;
- ((__le32 *)rawlink)[1]=NCP_SYMLINK_MAGIC1;
- hdr = 8;
- } else {
- mode = S_IFLNK | S_IRWXUGO;
- attr = 0;
- hdr = 0;
- }
-
- length = strlen(symname);
- /* map to/from server charset, do not touch upper/lower case as
- symlink can point out of ncp filesystem */
- outlen = NCP_MAX_SYMLINK_SIZE - hdr;
- err = ncp_io2vol(NCP_SERVER(dir), rawlink + hdr, &outlen, symname, length, 0);
- if (err)
- goto failfree;
-
- outlen += hdr;
-
- err = -EIO;
- if (ncp_create_new(dir,dentry,mode,0,attr)) {
- goto failfree;
- }
-
- inode=d_inode(dentry);
-
- if (ncp_make_open(inode, O_WRONLY))
- goto failfree;
-
- if (ncp_write_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle,
- 0, outlen, rawlink, &i) || i!=outlen) {
- goto fail;
- }
-
- ncp_inode_close(inode);
- ncp_make_closed(inode);
- kfree(rawlink);
- return 0;
-fail:;
- ncp_inode_close(inode);
- ncp_make_closed(inode);
-failfree:;
- kfree(rawlink);
- return err;
-}
-
-/* ----- EOF ----- */
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 52054a528723..08027a36e0bc 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -17,12 +17,11 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/list.h>
#include <linux/mfd/core.h>
#include <linux/mutex.h>
@@ -236,8 +235,8 @@ static size_t nvec_msg_size(struct nvec_msg *msg)
static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
{
dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
- gpio_get_value(nvec->gpio), value);
- gpio_set_value(nvec->gpio, value);
+ gpiod_get_value(nvec->gpiod), value);
+ gpiod_set_value(nvec->gpiod, value);
}
/**
@@ -761,31 +760,11 @@ static void nvec_power_off(void)
nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
}
-/*
- * Parse common device tree data
- */
-static int nvec_i2c_parse_dt_pdata(struct nvec_chip *nvec)
-{
- nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
-
- if (nvec->gpio < 0) {
- dev_err(nvec->dev, "no gpio specified");
- return -ENODEV;
- }
-
- if (of_property_read_u32(nvec->dev->of_node, "slave-addr",
- &nvec->i2c_addr)) {
- dev_err(nvec->dev, "no i2c address specified");
- return -ENODEV;
- }
-
- return 0;
-}
-
static int tegra_nvec_probe(struct platform_device *pdev)
{
int err, ret;
struct clk *i2c_clk;
+ struct device *dev = &pdev->dev;
struct nvec_chip *nvec;
struct nvec_msg *msg;
struct resource *res;
@@ -794,42 +773,43 @@ static int tegra_nvec_probe(struct platform_device *pdev)
unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
- if (!pdev->dev.of_node) {
- dev_err(&pdev->dev, "must be instantiated using device tree\n");
+ if (!dev->of_node) {
+ dev_err(dev, "must be instantiated using device tree\n");
return -ENODEV;
}
- nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
+ nvec = devm_kzalloc(dev, sizeof(struct nvec_chip), GFP_KERNEL);
if (!nvec)
return -ENOMEM;
platform_set_drvdata(pdev, nvec);
- nvec->dev = &pdev->dev;
+ nvec->dev = dev;
- err = nvec_i2c_parse_dt_pdata(nvec);
- if (err < 0)
- return err;
+ if (of_property_read_u32(dev->of_node, "slave-addr", &nvec->i2c_addr)) {
+ dev_err(dev, "no i2c address specified");
+ return -ENODEV;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
nvec->irq = platform_get_irq(pdev, 0);
if (nvec->irq < 0) {
- dev_err(&pdev->dev, "no irq resource?\n");
+ dev_err(dev, "no irq resource?\n");
return -ENODEV;
}
- i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
+ i2c_clk = devm_clk_get(dev, "div-clk");
if (IS_ERR(i2c_clk)) {
- dev_err(nvec->dev, "failed to get controller clock\n");
+ dev_err(dev, "failed to get controller clock\n");
return -ENODEV;
}
- nvec->rst = devm_reset_control_get_exclusive(&pdev->dev, "i2c");
+ nvec->rst = devm_reset_control_get_exclusive(dev, "i2c");
if (IS_ERR(nvec->rst)) {
- dev_err(nvec->dev, "failed to get controller reset\n");
+ dev_err(dev, "failed to get controller reset\n");
return PTR_ERR(nvec->rst);
}
@@ -849,17 +829,16 @@ static int tegra_nvec_probe(struct platform_device *pdev)
INIT_WORK(&nvec->rx_work, nvec_dispatch);
INIT_WORK(&nvec->tx_work, nvec_request_master);
- err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
- "nvec gpio");
- if (err < 0) {
- dev_err(nvec->dev, "couldn't request gpio\n");
- return -ENODEV;
+ nvec->gpiod = devm_gpiod_get(dev, "request", GPIOD_OUT_HIGH);
+ if (IS_ERR(nvec->gpiod)) {
+ dev_err(dev, "couldn't request gpio\n");
+ return PTR_ERR(nvec->gpiod);
}
- err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
+ err = devm_request_irq(dev, nvec->irq, nvec_interrupt, 0,
"nvec", nvec);
if (err) {
- dev_err(nvec->dev, "couldn't request irq\n");
+ dev_err(dev, "couldn't request irq\n");
return -ENODEV;
}
disable_irq(nvec->irq);
@@ -879,7 +858,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
err = nvec_write_sync(nvec, get_firmware_version, 2, &msg);
if (!err) {
- dev_warn(nvec->dev,
+ dev_warn(dev,
"ec firmware version %02x.%02x.%02x / %02x\n",
msg->data[4], msg->data[5],
msg->data[6], msg->data[7]);
@@ -887,10 +866,10 @@ static int tegra_nvec_probe(struct platform_device *pdev)
nvec_msg_free(nvec, msg);
}
- ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
+ ret = mfd_add_devices(dev, 0, nvec_devices,
ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
if (ret)
- dev_err(nvec->dev, "error adding subdevices\n");
+ dev_err(dev, "error adding subdevices\n");
/* unmute speakers? */
nvec_write_async(nvec, unmute_speakers, 4);
@@ -925,8 +904,7 @@ static int tegra_nvec_remove(struct platform_device *pdev)
static int nvec_suspend(struct device *dev)
{
int err;
- struct platform_device *pdev = to_platform_device(dev);
- struct nvec_chip *nvec = platform_get_drvdata(pdev);
+ struct nvec_chip *nvec = dev_get_drvdata(dev);
struct nvec_msg *msg;
char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
@@ -946,8 +924,7 @@ static int nvec_suspend(struct device *dev)
static int nvec_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct nvec_chip *nvec = platform_get_drvdata(pdev);
+ struct nvec_chip *nvec = dev_get_drvdata(dev);
dev_dbg(nvec->dev, "resuming\n");
tegra_init_i2c_slave(nvec);
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index 25efcdfa4f20..80c0353f141c 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -132,7 +132,7 @@ struct nvec_msg {
*/
struct nvec_chip {
struct device *dev;
- int gpio;
+ struct gpio_desc *gpiod;
int irq;
u32 i2c_addr;
void __iomem *base;
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index 61c2e65ac354..665a0b061719 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -1,6 +1,10 @@
TODO:
- see if vx855 gpio API can be made similar enough to cs5535 so we can
share more code
+ - convert all uses of the old GPIO API from <linux/gpio.h> to the
+ GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
+ lines from device tree, ACPI or board files, board files should
+ use <linux/gpio/machine.h>
- allow simultaneous XO-1 and XO-1.5 support
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
diff --git a/drivers/staging/pi433/Documentation/devicetree/pi433.txt b/drivers/staging/pi433/Documentation/devicetree/pi433.txt
index 9ff217fbcbbd..a810548c5857 100644
--- a/drivers/staging/pi433/Documentation/devicetree/pi433.txt
+++ b/drivers/staging/pi433/Documentation/devicetree/pi433.txt
@@ -46,10 +46,10 @@ It consists of the three gpio pins and an spi interface (here chip select 0)
For Raspbian users only
=======================
-Since Raspbian supports device tree overlays, you may use and overlay, instead
+Since Raspbian supports device tree overlays, you may use an overlay instead
of editing your boards device tree.
-For using the overlay, you need to compile the file pi433-overlay.dts you can
-find aside to this documentation.
+To use the overlay, you need to compile the file pi433-overlay.dts which can
+be found alongside this documentation.
The file needs to be compiled - either manually or by integration in your kernel
source tree. For a manual compile, you may use a command line like the following:
'linux/scripts/dtc/dtc -@ -I dts -O dtb -o pi433.dtbo pi433-overlay.dts'
diff --git a/drivers/staging/pi433/Documentation/pi433.txt b/drivers/staging/pi433/Documentation/pi433.txt
index d0b7000faafc..21cffdb86ecf 100644
--- a/drivers/staging/pi433/Documentation/pi433.txt
+++ b/drivers/staging/pi433/Documentation/pi433.txt
@@ -8,11 +8,11 @@ Introduction
This driver is for controlling pi433, a radio module for the Raspberry Pi
(www.pi433.de). It supports transmission and reception. It can be opened
by multiple applications for transmission and reception. While transmit
-jobs were queued and process automatically in the background, the first
+jobs are queued and processed automatically in the background, the first
application asking for reception will block out all other applications
until something gets received terminates the read request.
The driver supports on the fly reloading of the hardware fifo of the rf
-chip, thus enabling for much longer telegrams then hardware fifo size.
+chip, thus enabling for much longer telegrams than the hardware fifo size.
Discription of driver operation
===============================
@@ -46,10 +46,10 @@ configuration set is written to the rf module and it gets set into receiving mod
Now the driver is waiting, that a predefined RSSI level (signal strength at the
receiver) is reached. Until this hasn't happened, the reception can be
interrupted by the transmission thread at any time to insert a transmission cycle.
-As soon as the predefined RSSI level is meat, a receiving cycle starts. Similar
+As soon as the predefined RSSI level is met, a receiving cycle starts. Similar
as described for the transmission cycle the read out of the hardware fifo is done
dynamically. Upon each hardware fifo threshold interrupt, a portion of data gets
-read. So also for reception it is possible to receive more data then the hardware
+read. So also for reception it is possible to receive more data than the hardware
fifo can hold.
@@ -225,7 +225,7 @@ rf params:
isn't found, telegram will be internally discarded
optionOff - sync detection is disabled.
enable_length_byte
- optionOn - First byte of payload will be used as length byte,
+ optionOn - First byte of payload will be used as a length byte,
regardless of the amount of bytes that were requested
by the read request.
optionOff - Number of bytes to be read will be set according to
diff --git a/drivers/staging/pi433/Kconfig b/drivers/staging/pi433/Kconfig
index 87c2ee192cca..c7340129dd4c 100644
--- a/drivers/staging/pi433/Kconfig
+++ b/drivers/staging/pi433/Kconfig
@@ -1,7 +1,7 @@
config PI433
tristate "Pi433 - a 433MHz radio module for Raspberry Pi"
depends on SPI
- ---help---
+ help
This option allows you to enable support for the radio module Pi433.
Pi433 is a shield that fits onto the GPIO header of a Raspberry Pi
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index d1e0ddbc79ce..b061f77dda41 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -87,7 +87,7 @@ struct pi433_device {
/* tx related values */
STRUCT_KFIFO_REC_1(MSG_FIFO_SIZE) tx_fifo;
- struct mutex tx_fifo_lock; // TODO: check, whether necessary or obsolete
+ struct mutex tx_fifo_lock; /* serialize userspace writers */
struct task_struct *tx_task_struct;
wait_queue_head_t tx_wait_queue;
u8 free_in_fifo;
@@ -589,19 +589,19 @@ pi433_tx_thread(void *data)
* - size of message
* - message
*/
- mutex_lock(&device->tx_fifo_lock);
-
retval = kfifo_out(&device->tx_fifo, &tx_cfg, sizeof(tx_cfg));
if (retval != sizeof(tx_cfg)) {
- dev_dbg(device->dev, "reading tx_cfg from fifo failed: got %d byte(s), expected %d", retval, (unsigned int)sizeof(tx_cfg));
- mutex_unlock(&device->tx_fifo_lock);
+ dev_dbg(device->dev,
+ "reading tx_cfg from fifo failed: got %d byte(s), expected %d",
+ retval, (unsigned int)sizeof(tx_cfg));
continue;
}
retval = kfifo_out(&device->tx_fifo, &size, sizeof(size_t));
if (retval != sizeof(size_t)) {
- dev_dbg(device->dev, "reading msg size from fifo failed: got %d, expected %d", retval, (unsigned int)sizeof(size_t));
- mutex_unlock(&device->tx_fifo_lock);
+ dev_dbg(device->dev,
+ "reading msg size from fifo failed: got %d, expected %d",
+ retval, (unsigned int)sizeof(size_t));
continue;
}
@@ -623,7 +623,11 @@ pi433_tx_thread(void *data)
/* add length byte, if requested */
if (tx_cfg.enable_length_byte == OPTION_ON)
- device->buffer[position++] = size - 1; /* according to spec length byte itself must be excluded from the length calculation */
+ /*
+ * according to spec, length byte itself must be
+ * excluded from the length calculation
+ */
+ device->buffer[position++] = size - 1;
/* add adr byte, if requested */
if (tx_cfg.enable_address_byte == OPTION_ON)
@@ -634,7 +638,6 @@ pi433_tx_thread(void *data)
sizeof(device->buffer) - position);
dev_dbg(device->dev,
"read %d message byte(s) from fifo queue.", retval);
- mutex_unlock(&device->tx_fifo_lock);
/* if rx is active, we need to interrupt the waiting for
* incoming telegrams, to be able to send something.
@@ -818,7 +821,7 @@ pi433_write(struct file *filp, const char __user *buf,
struct pi433_instance *instance;
struct pi433_device *device;
int retval;
- unsigned int copied;
+ unsigned int required, available, copied;
instance = filp->private_data;
device = instance->device;
@@ -833,6 +836,16 @@ pi433_write(struct file *filp, const char __user *buf,
* - message
*/
mutex_lock(&device->tx_fifo_lock);
+
+ required = sizeof(instance->tx_cfg) + sizeof(size_t) + count;
+ available = kfifo_avail(&device->tx_fifo);
+ if (required > available) {
+ dev_dbg(device->dev, "write to fifo failed: %d bytes required but %d available",
+ required, available);
+ mutex_unlock(&device->tx_fifo_lock);
+ return -EAGAIN;
+ }
+
retval = kfifo_in(&device->tx_fifo, &instance->tx_cfg,
sizeof(instance->tx_cfg));
if (retval != sizeof(instance->tx_cfg))
@@ -855,8 +868,8 @@ pi433_write(struct file *filp, const char __user *buf,
return copied;
abort:
- dev_dbg(device->dev, "write to fifo failed: 0x%x", retval);
- kfifo_reset(&device->tx_fifo); // TODO: maybe find a solution, not to discard already stored, valid entries
+ dev_warn(device->dev,
+ "write to fifo failed, non recoverable: 0x%x", retval);
mutex_unlock(&device->tx_fifo_lock);
return -EAGAIN;
}
@@ -1042,7 +1055,7 @@ static int setup_gpio(struct pi433_device *device)
/* configure irq */
device->irq_num[i] = gpiod_to_irq(device->gpiod[i]);
if (device->irq_num[i] < 0) {
- device->gpiod[i] = ERR_PTR(-EINVAL);//(struct gpio_desc *)device->irq_num[i];
+ device->gpiod[i] = ERR_PTR(-EINVAL);
return device->irq_num[i];
}
retval = request_irq(device->irq_num[i],
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index 5b0554823263..90280e9b006d 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -97,7 +97,8 @@ static int rf69_clear_bit(struct spi_device *spi, u8 reg, u8 mask)
return rf69_write_reg(spi, reg, tmp);
}
-static inline int rf69_read_mod_write(struct spi_device *spi, u8 reg, u8 mask, u8 value)
+static inline int rf69_read_mod_write(struct spi_device *spi, u8 reg,
+ u8 mask, u8 value)
{
u8 tmp;
@@ -112,15 +113,20 @@ int rf69_set_mode(struct spi_device *spi, enum mode mode)
{
switch (mode) {
case transmit:
- return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE, OPMODE_MODE_TRANSMIT);
+ return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE,
+ OPMODE_MODE_TRANSMIT);
case receive:
- return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE, OPMODE_MODE_RECEIVE);
+ return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE,
+ OPMODE_MODE_RECEIVE);
case synthesizer:
- return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE, OPMODE_MODE_SYNTHESIZER);
+ return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE,
+ OPMODE_MODE_SYNTHESIZER);
case standby:
- return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE, OPMODE_MODE_STANDBY);
+ return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE,
+ OPMODE_MODE_STANDBY);
case mode_sleep:
- return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE, OPMODE_MODE_SLEEP);
+ return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE,
+ OPMODE_MODE_SLEEP);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -133,16 +139,21 @@ int rf69_set_mode(struct spi_device *spi, enum mode mode)
int rf69_set_data_mode(struct spi_device *spi, u8 data_mode)
{
- return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODE, data_mode);
+ return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODE,
+ data_mode);
}
int rf69_set_modulation(struct spi_device *spi, enum modulation modulation)
{
switch (modulation) {
case OOK:
- return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_TYPE, DATAMODUL_MODULATION_TYPE_OOK);
+ return rf69_read_mod_write(spi, REG_DATAMODUL,
+ MASK_DATAMODUL_MODULATION_TYPE,
+ DATAMODUL_MODULATION_TYPE_OOK);
case FSK:
- return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_TYPE, DATAMODUL_MODULATION_TYPE_FSK);
+ return rf69_read_mod_write(spi, REG_DATAMODUL,
+ MASK_DATAMODUL_MODULATION_TYPE,
+ DATAMODUL_MODULATION_TYPE_FSK);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -172,13 +183,21 @@ int rf69_set_modulation_shaping(struct spi_device *spi,
case FSK:
switch (mod_shaping) {
case SHAPING_OFF:
- return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_NONE);
+ return rf69_read_mod_write(spi, REG_DATAMODUL,
+ MASK_DATAMODUL_MODULATION_SHAPE,
+ DATAMODUL_MODULATION_SHAPE_NONE);
case SHAPING_1_0:
- return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_1_0);
+ return rf69_read_mod_write(spi, REG_DATAMODUL,
+ MASK_DATAMODUL_MODULATION_SHAPE,
+ DATAMODUL_MODULATION_SHAPE_1_0);
case SHAPING_0_5:
- return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_0_5);
+ return rf69_read_mod_write(spi, REG_DATAMODUL,
+ MASK_DATAMODUL_MODULATION_SHAPE,
+ DATAMODUL_MODULATION_SHAPE_0_5);
case SHAPING_0_3:
- return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_0_3);
+ return rf69_read_mod_write(spi, REG_DATAMODUL,
+ MASK_DATAMODUL_MODULATION_SHAPE,
+ DATAMODUL_MODULATION_SHAPE_0_3);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -186,11 +205,17 @@ int rf69_set_modulation_shaping(struct spi_device *spi,
case OOK:
switch (mod_shaping) {
case SHAPING_OFF:
- return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_NONE);
+ return rf69_read_mod_write(spi, REG_DATAMODUL,
+ MASK_DATAMODUL_MODULATION_SHAPE,
+ DATAMODUL_MODULATION_SHAPE_NONE);
case SHAPING_BR:
- return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_BR);
+ return rf69_read_mod_write(spi, REG_DATAMODUL,
+ MASK_DATAMODUL_MODULATION_SHAPE,
+ DATAMODUL_MODULATION_SHAPE_BR);
case SHAPING_2BR:
- return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_2BR);
+ return rf69_read_mod_write(spi, REG_DATAMODUL,
+ MASK_DATAMODUL_MODULATION_SHAPE,
+ DATAMODUL_MODULATION_SHAPE_2BR);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -342,7 +367,8 @@ int rf69_set_output_power_level(struct spi_device *spi, u8 power_level)
}
// write value
- return rf69_read_mod_write(spi, REG_PALEVEL, MASK_PALEVEL_OUTPUT_POWER, power_level);
+ return rf69_read_mod_write(spi, REG_PALEVEL, MASK_PALEVEL_OUTPUT_POWER,
+ power_level);
}
int rf69_set_pa_ramp(struct spi_device *spi, enum pa_ramp pa_ramp)
@@ -386,7 +412,8 @@ int rf69_set_pa_ramp(struct spi_device *spi, enum pa_ramp pa_ramp)
}
}
-int rf69_set_antenna_impedance(struct spi_device *spi, enum antenna_impedance antenna_impedance)
+int rf69_set_antenna_impedance(struct spi_device *spi,
+ enum antenna_impedance antenna_impedance)
{
switch (antenna_impedance) {
case fifty_ohm:
@@ -403,19 +430,26 @@ int rf69_set_lna_gain(struct spi_device *spi, enum lna_gain lna_gain)
{
switch (lna_gain) {
case automatic:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_AUTO);
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
+ LNA_GAIN_AUTO);
case max:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX);
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
+ LNA_GAIN_MAX);
case max_minus_6:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX_MINUS_6);
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
+ LNA_GAIN_MAX_MINUS_6);
case max_minus_12:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX_MINUS_12);
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
+ LNA_GAIN_MAX_MINUS_12);
case max_minus_24:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX_MINUS_24);
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
+ LNA_GAIN_MAX_MINUS_24);
case max_minus_36:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX_MINUS_36);
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
+ LNA_GAIN_MAX_MINUS_36);
case max_minus_48:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX_MINUS_48);
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
+ LNA_GAIN_MAX_MINUS_48);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -466,35 +500,55 @@ static int rf69_set_bandwidth_intern(struct spi_device *spi, u8 reg,
return rf69_write_reg(spi, reg, bandwidth);
}
-int rf69_set_bandwidth(struct spi_device *spi, enum mantisse mantisse, u8 exponent)
+int rf69_set_bandwidth(struct spi_device *spi, enum mantisse mantisse,
+ u8 exponent)
{
return rf69_set_bandwidth_intern(spi, REG_RXBW, mantisse, exponent);
}
-int rf69_set_bandwidth_during_afc(struct spi_device *spi, enum mantisse mantisse, u8 exponent)
+int rf69_set_bandwidth_during_afc(struct spi_device *spi,
+ enum mantisse mantisse,
+ u8 exponent)
{
return rf69_set_bandwidth_intern(spi, REG_AFCBW, mantisse, exponent);
}
-int rf69_set_ook_threshold_dec(struct spi_device *spi, enum threshold_decrement threshold_decrement)
+int rf69_set_ook_threshold_dec(struct spi_device *spi,
+ enum threshold_decrement threshold_decrement)
{
switch (threshold_decrement) {
case dec_every8th:
- return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_EVERY_8TH);
+ return rf69_read_mod_write(spi, REG_OOKPEAK,
+ MASK_OOKPEAK_THRESDEC,
+ OOKPEAK_THRESHDEC_EVERY_8TH);
case dec_every4th:
- return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_EVERY_4TH);
+ return rf69_read_mod_write(spi, REG_OOKPEAK,
+ MASK_OOKPEAK_THRESDEC,
+ OOKPEAK_THRESHDEC_EVERY_4TH);
case dec_every2nd:
- return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_EVERY_2ND);
+ return rf69_read_mod_write(spi, REG_OOKPEAK,
+ MASK_OOKPEAK_THRESDEC,
+ OOKPEAK_THRESHDEC_EVERY_2ND);
case dec_once:
- return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_ONCE);
+ return rf69_read_mod_write(spi, REG_OOKPEAK,
+ MASK_OOKPEAK_THRESDEC,
+ OOKPEAK_THRESHDEC_ONCE);
case dec_twice:
- return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_TWICE);
+ return rf69_read_mod_write(spi, REG_OOKPEAK,
+ MASK_OOKPEAK_THRESDEC,
+ OOKPEAK_THRESHDEC_TWICE);
case dec_4times:
- return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_4_TIMES);
+ return rf69_read_mod_write(spi, REG_OOKPEAK,
+ MASK_OOKPEAK_THRESDEC,
+ OOKPEAK_THRESHDEC_4_TIMES);
case dec_8times:
- return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_8_TIMES);
+ return rf69_read_mod_write(spi, REG_OOKPEAK,
+ MASK_OOKPEAK_THRESDEC,
+ OOKPEAK_THRESHDEC_8_TIMES);
case dec_16times:
- return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_16_TIMES);
+ return rf69_read_mod_write(spi, REG_OOKPEAK,
+ MASK_OOKPEAK_THRESDEC,
+ OOKPEAK_THRESHDEC_16_TIMES);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -620,13 +674,16 @@ int rf69_disable_sync(struct spi_device *spi)
return rf69_clear_bit(spi, REG_SYNC_CONFIG, MASK_SYNC_CONFIG_SYNC_ON);
}
-int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifo_fill_condition fifo_fill_condition)
+int rf69_set_fifo_fill_condition(struct spi_device *spi,
+ enum fifo_fill_condition fifo_fill_condition)
{
switch (fifo_fill_condition) {
case always:
- return rf69_set_bit(spi, REG_SYNC_CONFIG, MASK_SYNC_CONFIG_FIFO_FILL_CONDITION);
+ return rf69_set_bit(spi, REG_SYNC_CONFIG,
+ MASK_SYNC_CONFIG_FIFO_FILL_CONDITION);
case after_sync_interrupt:
- return rf69_clear_bit(spi, REG_SYNC_CONFIG, MASK_SYNC_CONFIG_FIFO_FILL_CONDITION);
+ return rf69_clear_bit(spi, REG_SYNC_CONFIG,
+ MASK_SYNC_CONFIG_FIFO_FILL_CONDITION);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -642,7 +699,9 @@ int rf69_set_sync_size(struct spi_device *spi, u8 sync_size)
}
// write value
- return rf69_read_mod_write(spi, REG_SYNC_CONFIG, MASK_SYNC_CONFIG_SYNC_SIZE, (sync_size << 3));
+ return rf69_read_mod_write(spi, REG_SYNC_CONFIG,
+ MASK_SYNC_CONFIG_SYNC_SIZE,
+ (sync_size << 3));
}
int rf69_set_sync_values(struct spi_device *spi, u8 sync_values[8])
@@ -661,13 +720,16 @@ int rf69_set_sync_values(struct spi_device *spi, u8 sync_values[8])
return retval;
}
-int rf69_set_packet_format(struct spi_device *spi, enum packet_format packet_format)
+int rf69_set_packet_format(struct spi_device *spi,
+ enum packet_format packet_format)
{
switch (packet_format) {
case packet_length_var:
- return rf69_set_bit(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE);
+ return rf69_set_bit(spi, REG_PACKETCONFIG1,
+ MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE);
case packet_length_fix:
- return rf69_clear_bit(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE);
+ return rf69_clear_bit(spi, REG_PACKETCONFIG1,
+ MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -684,15 +746,22 @@ int rf69_disable_crc(struct spi_device *spi)
return rf69_clear_bit(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_CRC_ON);
}
-int rf69_set_address_filtering(struct spi_device *spi, enum address_filtering address_filtering)
+int rf69_set_address_filtering(struct spi_device *spi,
+ enum address_filtering address_filtering)
{
switch (address_filtering) {
case filtering_off:
- return rf69_read_mod_write(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_ADDRESSFILTERING, PACKETCONFIG1_ADDRESSFILTERING_OFF);
+ return rf69_read_mod_write(spi, REG_PACKETCONFIG1,
+ MASK_PACKETCONFIG1_ADDRESSFILTERING,
+ PACKETCONFIG1_ADDRESSFILTERING_OFF);
case node_address:
- return rf69_read_mod_write(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_ADDRESSFILTERING, PACKETCONFIG1_ADDRESSFILTERING_NODE);
+ return rf69_read_mod_write(spi, REG_PACKETCONFIG1,
+ MASK_PACKETCONFIG1_ADDRESSFILTERING,
+ PACKETCONFIG1_ADDRESSFILTERING_NODE);
case node_or_broadcast_address:
- return rf69_read_mod_write(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_ADDRESSFILTERING, PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST);
+ return rf69_read_mod_write(spi, REG_PACKETCONFIG1,
+ MASK_PACKETCONFIG1_ADDRESSFILTERING,
+ PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -714,13 +783,16 @@ int rf69_set_broadcast_address(struct spi_device *spi, u8 broadcast_address)
return rf69_write_reg(spi, REG_BROADCASTADRS, broadcast_address);
}
-int rf69_set_tx_start_condition(struct spi_device *spi, enum tx_start_condition tx_start_condition)
+int rf69_set_tx_start_condition(struct spi_device *spi,
+ enum tx_start_condition tx_start_condition)
{
switch (tx_start_condition) {
case fifo_level:
- return rf69_clear_bit(spi, REG_FIFO_THRESH, MASK_FIFO_THRESH_TXSTART);
+ return rf69_clear_bit(spi, REG_FIFO_THRESH,
+ MASK_FIFO_THRESH_TXSTART);
case fifo_not_empty:
- return rf69_set_bit(spi, REG_FIFO_THRESH, MASK_FIFO_THRESH_TXSTART);
+ return rf69_set_bit(spi, REG_FIFO_THRESH,
+ MASK_FIFO_THRESH_TXSTART);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -738,7 +810,9 @@ int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold)
}
/* write value */
- retval = rf69_read_mod_write(spi, REG_FIFO_THRESH, MASK_FIFO_THRESH_VALUE, threshold);
+ retval = rf69_read_mod_write(spi, REG_FIFO_THRESH,
+ MASK_FIFO_THRESH_VALUE,
+ threshold);
if (retval)
return retval;
@@ -775,7 +849,8 @@ int rf69_read_fifo(struct spi_device *spi, u8 *buffer, unsigned int size)
int retval;
if (size > FIFO_SIZE) {
- dev_dbg(&spi->dev, "read fifo: passed in buffer bigger then internal buffer\n");
+ dev_dbg(&spi->dev,
+ "read fifo: passed in buffer bigger then internal buffer\n");
return -EMSGSIZE;
}
@@ -807,7 +882,8 @@ int rf69_write_fifo(struct spi_device *spi, u8 *buffer, unsigned int size)
u8 local_buffer[FIFO_SIZE + 1];
if (size > FIFO_SIZE) {
- dev_dbg(&spi->dev, "read fifo: passed in buffer bigger then internal buffer\n");
+ dev_dbg(&spi->dev,
+ "read fifo: passed in buffer bigger then internal buffer\n");
return -EMSGSIZE;
}
diff --git a/drivers/staging/pi433/rf69.h b/drivers/staging/pi433/rf69.h
index b9f6850e3316..c131ffbdc2db 100644
--- a/drivers/staging/pi433/rf69.h
+++ b/drivers/staging/pi433/rf69.h
@@ -28,7 +28,8 @@
int rf69_set_mode(struct spi_device *spi, enum mode mode);
int rf69_set_data_mode(struct spi_device *spi, u8 data_mode);
int rf69_set_modulation(struct spi_device *spi, enum modulation modulation);
-int rf69_set_modulation_shaping(struct spi_device *spi, enum mod_shaping mod_shaping);
+int rf69_set_modulation_shaping(struct spi_device *spi,
+ enum mod_shaping mod_shaping);
int rf69_set_bit_rate(struct spi_device *spi, u16 bit_rate);
int rf69_set_deviation(struct spi_device *spi, u32 deviation);
int rf69_set_frequency(struct spi_device *spi, u32 frequency);
@@ -36,28 +37,37 @@ int rf69_enable_amplifier(struct spi_device *spi, u8 amplifier_mask);
int rf69_disable_amplifier(struct spi_device *spi, u8 amplifier_mask);
int rf69_set_output_power_level(struct spi_device *spi, u8 power_level);
int rf69_set_pa_ramp(struct spi_device *spi, enum pa_ramp pa_ramp);
-int rf69_set_antenna_impedance(struct spi_device *spi, enum antenna_impedance antenna_impedance);
+int rf69_set_antenna_impedance(struct spi_device *spi,
+ enum antenna_impedance antenna_impedance);
int rf69_set_lna_gain(struct spi_device *spi, enum lna_gain lna_gain);
-int rf69_set_bandwidth(struct spi_device *spi, enum mantisse mantisse, u8 exponent);
-int rf69_set_bandwidth_during_afc(struct spi_device *spi, enum mantisse mantisse, u8 exponent);
-int rf69_set_ook_threshold_dec(struct spi_device *spi, enum threshold_decrement threshold_decrement);
+int rf69_set_bandwidth(struct spi_device *spi, enum mantisse mantisse,
+ u8 exponent);
+int rf69_set_bandwidth_during_afc(struct spi_device *spi,
+ enum mantisse mantisse,
+ u8 exponent);
+int rf69_set_ook_threshold_dec(struct spi_device *spi,
+ enum threshold_decrement threshold_decrement);
int rf69_set_dio_mapping(struct spi_device *spi, u8 dio_number, u8 value);
bool rf69_get_flag(struct spi_device *spi, enum flag flag);
int rf69_set_rssi_threshold(struct spi_device *spi, u8 threshold);
int rf69_set_preamble_length(struct spi_device *spi, u16 preamble_length);
int rf69_enable_sync(struct spi_device *spi);
int rf69_disable_sync(struct spi_device *spi);
-int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifo_fill_condition fifo_fill_condition);
+int rf69_set_fifo_fill_condition(struct spi_device *spi,
+ enum fifo_fill_condition fifo_fill_condition);
int rf69_set_sync_size(struct spi_device *spi, u8 sync_size);
int rf69_set_sync_values(struct spi_device *spi, u8 sync_values[8]);
-int rf69_set_packet_format(struct spi_device *spi, enum packet_format packet_format);
+int rf69_set_packet_format(struct spi_device *spi,
+ enum packet_format packet_format);
int rf69_enable_crc(struct spi_device *spi);
int rf69_disable_crc(struct spi_device *spi);
-int rf69_set_address_filtering(struct spi_device *spi, enum address_filtering address_filtering);
+int rf69_set_address_filtering(struct spi_device *spi,
+ enum address_filtering address_filtering);
int rf69_set_payload_length(struct spi_device *spi, u8 payload_length);
int rf69_set_node_address(struct spi_device *spi, u8 node_address);
int rf69_set_broadcast_address(struct spi_device *spi, u8 broadcast_address);
-int rf69_set_tx_start_condition(struct spi_device *spi, enum tx_start_condition tx_start_condition);
+int rf69_set_tx_start_condition(struct spi_device *spi,
+ enum tx_start_condition tx_start_condition);
int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold);
int rf69_set_dagc(struct spi_device *spi, enum dagc dagc);
diff --git a/drivers/staging/rtl8188eu/TODO b/drivers/staging/rtl8188eu/TODO
index ce60f07b9977..7581e25f231d 100644
--- a/drivers/staging/rtl8188eu/TODO
+++ b/drivers/staging/rtl8188eu/TODO
@@ -9,11 +9,6 @@ TODO:
- merge Realtek's bugfixes and new features into the driver
- switch to use LIB80211
- switch to use MAC80211
-- figure out what to do with this code in rtw_recv_indicatepkt():
- rcu_read_lock();
- rcu_dereference(padapter->pnetdev->rx_handler_data);
- rcu_read_unlock();
- Perhaps delete it, perhaps assign to some local variable.
Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index fa611455109a..4140e37bf859 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -69,19 +69,19 @@ static void update_BCNTIM(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
- unsigned char *pie = pnetwork_mlmeext->IEs;
+ unsigned char *pie = pnetwork_mlmeext->ies;
u8 *p, *dst_ie, *premainder_ie = NULL;
u8 *pbackup_remainder_ie = NULL;
uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
/* update TIM IE */
p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen,
- pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_);
+ pnetwork_mlmeext->ie_length - _FIXED_IE_LENGTH_);
if (p && tim_ielen > 0) {
tim_ielen += 2;
premainder_ie = p + tim_ielen;
tim_ie_offset = (int)(p - pie);
- remainder_ielen = pnetwork_mlmeext->IELength -
+ remainder_ielen = pnetwork_mlmeext->ie_length -
tim_ie_offset - tim_ielen;
/* append TIM IE from dst_ie offset */
dst_ie = p;
@@ -94,7 +94,7 @@ static void update_BCNTIM(struct adapter *padapter)
/* get supported rates len */
p = rtw_get_ie(pie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_,
- &tmp_len, (pnetwork_mlmeext->IELength -
+ &tmp_len, (pnetwork_mlmeext->ie_length -
_BEACON_IE_OFFSET_));
if (p)
offset += tmp_len+2;
@@ -104,7 +104,7 @@ static void update_BCNTIM(struct adapter *padapter)
premainder_ie = pie + offset;
- remainder_ielen = pnetwork_mlmeext->IELength -
+ remainder_ielen = pnetwork_mlmeext->ie_length -
offset - tim_ielen;
/* append TIM IE from offset */
@@ -148,7 +148,7 @@ static void update_BCNTIM(struct adapter *padapter)
kfree(pbackup_remainder_ie);
}
offset = (uint)(dst_ie - pie);
- pnetwork_mlmeext->IELength = offset + remainder_ielen;
+ pnetwork_mlmeext->ie_length = offset + remainder_ielen;
set_tx_beacon_cmd(padapter);
}
@@ -158,13 +158,13 @@ void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
{
struct ndis_802_11_var_ie *pIE;
u8 bmatch = false;
- u8 *pie = pnetwork->IEs;
+ u8 *pie = pnetwork->ies;
u8 *p = NULL, *dst_ie = NULL, *premainder_ie = NULL;
u8 *pbackup_remainder_ie = NULL;
u32 i, offset, ielen = 0, ie_offset, remainder_ielen = 0;
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < pnetwork->IELength;) {
- pIE = (struct ndis_802_11_var_ie *)(pnetwork->IEs + i);
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pnetwork->ie_length;) {
+ pIE = (struct ndis_802_11_var_ie *)(pnetwork->ies + i);
if (pIE->ElementID > index) {
break;
@@ -187,7 +187,7 @@ void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
ie_offset = (int)(p - pie);
- remainder_ielen = pnetwork->IELength - ie_offset - ielen;
+ remainder_ielen = pnetwork->ie_length - ie_offset - ielen;
if (bmatch)
dst_ie = p;
@@ -216,7 +216,7 @@ void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
}
offset = (uint)(dst_ie - pie);
- pnetwork->IELength = offset + remainder_ielen;
+ pnetwork->ie_length = offset + remainder_ielen;
}
void rtw_remove_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
@@ -225,10 +225,10 @@ void rtw_remove_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
u8 *p, *dst_ie = NULL, *premainder_ie = NULL;
u8 *pbackup_remainder_ie = NULL;
uint offset, ielen, ie_offset, remainder_ielen = 0;
- u8 *pie = pnetwork->IEs;
+ u8 *pie = pnetwork->ies;
p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, index, &ielen,
- pnetwork->IELength - _FIXED_IE_LENGTH_);
+ pnetwork->ie_length - _FIXED_IE_LENGTH_);
if (p && ielen > 0) {
ielen += 2;
@@ -236,7 +236,7 @@ void rtw_remove_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
ie_offset = (int)(p - pie);
- remainder_ielen = pnetwork->IELength - ie_offset - ielen;
+ remainder_ielen = pnetwork->ie_length - ie_offset - ielen;
dst_ie = p;
}
@@ -256,7 +256,7 @@ void rtw_remove_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
}
offset = (uint)(dst_ie - pie);
- pnetwork->IELength = offset + remainder_ielen;
+ pnetwork->ie_length = offset + remainder_ielen;
}
static u8 chk_sta_is_alive(struct sta_info *psta)
@@ -740,7 +740,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
* beacon twice when stating hostapd, and at first time the
* security ie (RSN/WPA IE) will not include in beacon.
*/
- if (!rtw_get_wps_ie(pnetwork->IEs + _FIXED_IE_LENGTH_, pnetwork->IELength - _FIXED_IE_LENGTH_, NULL, NULL))
+ if (!rtw_get_wps_ie(pnetwork->ies + _FIXED_IE_LENGTH_, pnetwork->ie_length - _FIXED_IE_LENGTH_, NULL, NULL))
pmlmeext->bstart_bss = true;
/* todo: update wmm, ht cap */
@@ -794,7 +794,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
}
/* set channel, bwmode */
- p = rtw_get_ie((pnetwork->IEs + sizeof(struct ndis_802_11_fixed_ie)), _HT_ADD_INFO_IE_, &ie_len, (pnetwork->IELength - sizeof(struct ndis_802_11_fixed_ie)));
+ p = rtw_get_ie((pnetwork->ies + sizeof(struct ndis_802_11_fixed_ie)), _HT_ADD_INFO_IE_, &ie_len, (pnetwork->ie_length - sizeof(struct ndis_802_11_fixed_ie)));
if (p && ie_len) {
pht_info = (struct HT_info_element *)(p + 2);
@@ -866,7 +866,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pbss_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
- u8 *ie = pbss_network->IEs;
+ u8 *ie = pbss_network->ies;
/* SSID */
/* Supported rates */
@@ -888,11 +888,11 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
if (len < 0 || len > MAX_IE_SZ)
return _FAIL;
- pbss_network->IELength = len;
+ pbss_network->ie_length = len;
memset(ie, 0, MAX_IE_SZ);
- memcpy(ie, pbuf, pbss_network->IELength);
+ memcpy(ie, pbuf, pbss_network->ie_length);
if (pbss_network->InfrastructureMode != Ndis802_11APMode)
@@ -910,7 +910,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
cap = get_unaligned_le16(ie);
/* SSID */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p && ie_len > 0) {
memset(&pbss_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(pbss_network->Ssid.Ssid, (p + 2), ie_len);
@@ -920,7 +920,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
/* channel */
channel = 0;
pbss_network->Configuration.Length = 0;
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _DSSET_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _DSSET_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p && ie_len > 0)
channel = *(p + 2);
@@ -928,14 +928,14 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
memset(supportRate, 0, NDIS_802_11_LENGTH_RATES_EX);
/* get supported rates */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p) {
memcpy(supportRate, p + 2, ie_len);
supportRateNum = ie_len;
}
/* get ext_supported rates */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->IELength - _BEACON_IE_OFFSET_);
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p) {
memcpy(supportRate + supportRateNum, p + 2, ie_len);
supportRateNum += ie_len;
@@ -946,7 +946,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
rtw_set_supported_rate(pbss_network->SupportedRates, network_type);
/* parsing ERP_IE */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p && ie_len > 0)
ERP_IE_handler(padapter, (struct ndis_802_11_var_ie *)p);
@@ -963,7 +963,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pairwise_cipher = 0;
psecuritypriv->wpa2_group_cipher = _NO_PRIVACY_;
psecuritypriv->wpa2_pairwise_cipher = _NO_PRIVACY_;
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p && ie_len > 0) {
if (rtw_parse_wpa2_ie(p, ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
@@ -983,7 +983,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
psecuritypriv->wpa_pairwise_cipher = _NO_PRIVACY_;
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _SSN_IE_1_, &ie_len,
- (pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ (pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2)));
if ((p) && (!memcmp(p + 2, OUI1, 4))) {
if (rtw_parse_wpa_ie(p, ie_len + 2, &group_cipher,
&pairwise_cipher, NULL) == _SUCCESS) {
@@ -1008,7 +1008,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
if (pregistrypriv->wmm_enable) {
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len,
- (pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ (pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2)));
if ((p) && !memcmp(p + 2, WMM_PARA_IE, 6)) {
pmlmepriv->qospriv.qos_option = 1;
@@ -1031,7 +1031,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
}
/* parsing HT_CAP_IE */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len,
- (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p && ie_len > 0) {
struct ieee80211_ht_cap *pht_cap = (struct ieee80211_ht_cap *)(p + 2);
@@ -1055,7 +1055,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
/* parsing HT_INFO_IE */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len,
- (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p && ie_len > 0)
pHT_info_ie = p;
switch (network_type) {
@@ -1235,7 +1235,7 @@ static void update_bcn_erpinfo_ie(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- unsigned char *p, *ie = pnetwork->IEs;
+ unsigned char *p, *ie = pnetwork->ies;
u32 len = 0;
DBG_88E("%s, ERP_enable =%d\n", __func__, pmlmeinfo->ERP_enable);
@@ -1245,7 +1245,7 @@ static void update_bcn_erpinfo_ie(struct adapter *padapter)
/* parsing ERP_IE */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &len,
- (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ (pnetwork->ie_length - _BEACON_IE_OFFSET_));
if (p && len > 0) {
struct ndis_802_11_var_ie *pIE = (struct ndis_802_11_var_ie *)p;
@@ -1272,8 +1272,8 @@ static void update_bcn_wps_ie(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- unsigned char *ie = pnetwork->IEs;
- u32 ielen = pnetwork->IELength;
+ unsigned char *ie = pnetwork->ies;
+ u32 ielen = pnetwork->ie_length;
DBG_88E("%s\n", __func__);
@@ -1306,8 +1306,8 @@ static void update_bcn_wps_ie(struct adapter *padapter)
if (pbackup_remainder_ie)
memcpy(pwps_ie, pbackup_remainder_ie, remainder_ielen);
- /* update IELength */
- pnetwork->IELength = wps_offset + (wps_ielen+2) + remainder_ielen;
+ /* update ie_length */
+ pnetwork->ie_length = wps_offset + (wps_ielen+2) + remainder_ielen;
}
kfree(pbackup_remainder_ie);
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index be8542676adf..72099f5d6915 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -380,7 +380,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
res = _FAIL;
goto exit;
}
- /* for IEs is fix buf size */
+ /* for ies is fix buf size */
t_len = sizeof(struct wlan_bssid_ex);
/* for hidden ap to set fw_state here */
@@ -415,14 +415,14 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
memcpy(psecnetwork, &pnetwork->network, get_wlan_bssid_ex_sz(&pnetwork->network));
- psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->IELength;
+ psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->ie_length;
- if ((psecnetwork->IELength-12) < (256-1))
- memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], psecnetwork->IELength-12);
+ if ((psecnetwork->ie_length-12) < (256-1))
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], psecnetwork->ie_length-12);
else
- memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], (256-1));
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], (256-1));
- psecnetwork->IELength = 0;
+ psecnetwork->ie_length = 0;
/* Added by Albert 2009/02/18 */
/* If the driver wants to use the bssid to create the connection. */
/* If not, we have to copy the connecting AP's MAC address to it so that */
@@ -431,17 +431,17 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
if (!pmlmepriv->assoc_by_bssid)
memcpy(&pmlmepriv->assoc_bssid[0], &pnetwork->network.MacAddress[0], ETH_ALEN);
- psecnetwork->IELength = rtw_restruct_sec_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength);
+ psecnetwork->ie_length = rtw_restruct_sec_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0], pnetwork->network.ie_length);
pqospriv->qos_option = 0;
if (pregistrypriv->wmm_enable) {
u32 tmp_len;
- tmp_len = rtw_restruct_wmm_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength, psecnetwork->IELength);
+ tmp_len = rtw_restruct_wmm_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0], pnetwork->network.ie_length, psecnetwork->ie_length);
- if (psecnetwork->IELength != tmp_len) {
- psecnetwork->IELength = tmp_len;
+ if (psecnetwork->ie_length != tmp_len) {
+ psecnetwork->ie_length = tmp_len;
pqospriv->qos_option = 1; /* There is WMM IE in this corresp. beacon */
} else {
pqospriv->qos_option = 0;/* There is no WMM IE in this corresp. beacon */
@@ -460,12 +460,12 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
(padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
/* rtw_restructure_ht_ie */
- rtw_restructure_ht_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0],
- pnetwork->network.IELength, &psecnetwork->IELength);
+ rtw_restructure_ht_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0],
+ pnetwork->network.ie_length, &psecnetwork->ie_length);
}
}
- pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pnetwork->network.IEs, pnetwork->network.IELength);
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pnetwork->network.ies, pnetwork->network.ie_length);
if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_TENDA)
padapter->pwrctrlpriv.smart_ps = 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 0b0fdccc7278..52ad085383a0 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -229,7 +229,7 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
int rateLen;
uint sz = 0;
struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
- u8 *ie = pdev_network->IEs;
+ u8 *ie = pdev_network->ies;
/* timestamp will be inserted by hardware */
@@ -590,8 +590,8 @@ u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
}
/**
- * rtw_get_wps_ie - Search WPS IE from a series of IEs
- * @in_ie: Address of IEs to search
+ * rtw_get_wps_ie - Search WPS IE from a series of ies
+ * @in_ie: Address of ies to search
* @in_len: Length limit from in_ie
* @wps_ie: If not NULL and WPS IE is found, WPS IE will be copied to the buf starting from wps_ie
* @wps_ielen: If not NULL and WPS IE is found, will set to the length of the entire WPS IE
@@ -798,7 +798,7 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
/**
* ieee802_11_parse_elems - Parse information elements in management frames
- * @start: Pointer to the start of IEs
+ * @start: Pointer to the start of ies
* @len: Length of IE buffer in octets
* @elems: Data structure for parsed elements
* @show_errors: Whether to show parsing errors in debug log
@@ -962,7 +962,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
int ret = _FAIL;
- pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
+ pbuf = rtw_get_wpa_ie(&pnetwork->network.ies[12], &wpa_ielen, pnetwork->network.ie_length - 12);
if (pbuf && (wpa_ielen > 0)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: wpa_ielen: %d", __func__, wpa_ielen));
@@ -975,7 +975,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
ret = _SUCCESS;
}
} else {
- pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
+ pbuf = rtw_get_wpa2_ie(&pnetwork->network.ies[12], &wpa_ielen, pnetwork->network.ie_length - 12);
if (pbuf && (wpa_ielen > 0)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE\n"));
@@ -1005,7 +1005,7 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
uint len;
unsigned char *p;
- memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.IEs), 2);
+ memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.ies), 2);
cap = le16_to_cpu(le_tmp);
if (cap & WLAN_CAPABILITY_PRIVACY) {
bencrypt = 1;
@@ -1013,7 +1013,7 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
} else {
pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_OPENSYS;
}
- rtw_get_sec_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &rsn_len, NULL, &wpa_len);
+ rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, NULL, &rsn_len, NULL, &wpa_len);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: ssid =%s\n", __func__, pnetwork->network.Ssid.Ssid));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: wpa_len =%d rsn_len =%d\n", __func__, wpa_len, rsn_len));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: ssid =%s\n", __func__, pnetwork->network.Ssid.Ssid));
@@ -1035,7 +1035,7 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
/* get bwmode and ch_offset */
/* parsing HT_CAP_IE */
- p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
+ p = rtw_get_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pnetwork->network.ie_length - _FIXED_IE_LENGTH_);
if (p && len > 0) {
struct ieee80211_ht_cap *ht_cap =
(struct ieee80211_ht_cap *)(p + 2);
@@ -1045,7 +1045,7 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
pnetwork->BcnInfo.ht_cap_info = 0;
}
/* parsing HT_INFO_IE */
- p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
+ p = rtw_get_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, pnetwork->network.ie_length - _FIXED_IE_LENGTH_);
if (p && len > 0) {
pht_info = (struct HT_info_element *)(p + 2);
pnetwork->BcnInfo.ht_info_infos_0 = pht_info->infos[0];
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index 2183c613e61b..2fca8ae68e05 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -577,7 +577,7 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
return 0;
if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N|WIRELESS_11_5N)) {
- p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
+ p = rtw_get_ie(&pcur_bss->ies[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->ie_length-12);
if (p && ht_ielen > 0) {
/* cur_bwmod is updated by beacon, pmlmeinfo is updated by association response */
bw_40MHz = (pmlmeext->cur_bwmode && (HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH & pmlmeinfo->HT_info.infos[0])) ? 1 : 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 8d49e3047201..24e92998a30c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -270,7 +270,7 @@ u16 rtw_get_capability(struct wlan_bssid_ex *bss)
{
__le16 val;
- memcpy((u8 *)&val, rtw_get_capability_from_ie(bss->IEs), 2);
+ memcpy((u8 *)&val, rtw_get_capability_from_ie(bss->ies), 2);
return le16_to_cpu(val);
}
@@ -318,8 +318,8 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
u16 s_cap, d_cap;
__le16 le_scap, le_dcap;
- memcpy((u8 *)&le_scap, rtw_get_capability_from_ie(src->IEs), 2);
- memcpy((u8 *)&le_dcap, rtw_get_capability_from_ie(dst->IEs), 2);
+ memcpy((u8 *)&le_scap, rtw_get_capability_from_ie(src->ies), 2);
+ memcpy((u8 *)&le_dcap, rtw_get_capability_from_ie(dst->ies), 2);
s_cap = le16_to_cpu(le_scap);
d_cap = le16_to_cpu(le_dcap);
@@ -399,8 +399,8 @@ static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex
if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) &&
(is_same_network(&(pmlmepriv->cur_network.network), pnetwork))) {
update_network(&(pmlmepriv->cur_network.network), pnetwork, adapter, true);
- rtw_update_protection(adapter, (pmlmepriv->cur_network.network.IEs) + sizeof(struct ndis_802_11_fixed_ie),
- pmlmepriv->cur_network.network.IELength);
+ rtw_update_protection(adapter, (pmlmepriv->cur_network.network.ies) + sizeof(struct ndis_802_11_fixed_ie),
+ pmlmepriv->cur_network.network.ie_length);
}
}
@@ -483,7 +483,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
pnetwork->last_scanned = jiffies;
/* target.Reserved[0]== 1, means that scanned network is a bcn frame. */
- if ((pnetwork->network.IELength > target->IELength) && (target->Reserved[0] == 1))
+ if ((pnetwork->network.ie_length > target->ie_length) && (target->Reserved[0] == 1))
update_ie = false;
update_network(&(pnetwork->network), target, adapter, update_ie);
@@ -524,7 +524,7 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *
privacy = pnetwork->network.Privacy;
if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
- if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen))
+ if (rtw_get_wps_ie(pnetwork->network.ies+_FIXED_IE_LENGTH_, pnetwork->network.ie_length-_FIXED_IE_LENGTH_, NULL, &wps_ielen))
return true;
else
return false;
@@ -576,11 +576,11 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
if (!memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) {
struct wlan_network *ibss_wlan = NULL;
- memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8);
+ memcpy(pmlmepriv->cur_network.network.ies, pnetwork->ies, 8);
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
ibss_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->MacAddress);
if (ibss_wlan) {
- memcpy(ibss_wlan->network.IEs, pnetwork->IEs, 8);
+ memcpy(ibss_wlan->network.ies, pnetwork->ies, 8);
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto exit;
}
@@ -947,9 +947,9 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
/* why not use ptarget_wlan?? */
memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length);
- /* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */
- cur_network->network.IELength = ptarget_wlan->network.IELength;
- memcpy(&cur_network->network.IEs[0], &ptarget_wlan->network.IEs[0], MAX_IE_SZ);
+ /* some ies in pnetwork is wrong, so we should use ptarget_wlan ies */
+ cur_network->network.ie_length = ptarget_wlan->network.ie_length;
+ memcpy(&cur_network->network.ies[0], &ptarget_wlan->network.ies[0], MAX_IE_SZ);
cur_network->aid = pnetwork->join_res;
@@ -977,10 +977,10 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
break;
}
- rtw_update_protection(padapter, (cur_network->network.IEs) +
+ rtw_update_protection(padapter, (cur_network->network.ies) +
sizeof(struct ndis_802_11_fixed_ie),
- (cur_network->network.IELength));
- rtw_update_ht_cap(padapter, cur_network->network.IEs, cur_network->network.IELength);
+ (cur_network->network.ie_length));
+ rtw_update_ht_cap(padapter, cur_network->network.ies, cur_network->network.ie_length);
}
/* Notes: the function could be > passive_level (the same context as Rx tasklet) */
@@ -1665,7 +1665,7 @@ err_free_cmd:
return res;
}
-/* adjust IEs for rtw_joinbss_cmd in WMM */
+/* adjust ies for rtw_joinbss_cmd in WMM */
int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len, uint initial_out_len)
{
unsigned int ielength = 0;
@@ -1861,11 +1861,11 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter)
/* 2. IE */
sz = rtw_generate_ie(pregistrypriv);
- pdev_network->IELength = sz;
+ pdev_network->ie_length = sz;
pdev_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
- /* notes: translate IELength & Length after assign the Length to cmdsz in createbss_cmd(); */
- /* pdev_network->IELength = cpu_to_le32(sz); */
+ /* notes: translate ie_length & Length after assign the Length to cmdsz in createbss_cmd(); */
+ /* pdev_network->ie_length = cpu_to_le32(sz); */
}
void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter)
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 19266cf1edbd..59d862f67573 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -371,14 +371,14 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
uint wps_ielen;
u8 sr = 0;
- memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ memcpy(pframe, cur_network->ies, cur_network->ie_length);
len_diff = update_hidden_ssid(
pframe+_BEACON_IE_OFFSET_
- , cur_network->IELength-_BEACON_IE_OFFSET_
+ , cur_network->ie_length-_BEACON_IE_OFFSET_
, pmlmeinfo->hidden_ssid_mode
);
- pframe += (cur_network->IELength+len_diff);
- pattrib->pktlen += (cur_network->IELength+len_diff);
+ pframe += (cur_network->ie_length+len_diff);
+ pattrib->pktlen += (cur_network->ie_length+len_diff);
wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0)
@@ -399,14 +399,14 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
/* beacon interval: 2 bytes */
- memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->ies)), 2);
pframe += 2;
pattrib->pktlen += 2;
/* capability info: 2 bytes */
- memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->ies)), 2);
pframe += 2;
pattrib->pktlen += 2;
@@ -509,25 +509,25 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
pattrib->pktlen = pattrib->hdrlen;
pframe += pattrib->hdrlen;
- if (cur_network->IELength > MAX_IE_SZ)
+ if (cur_network->ie_length > MAX_IE_SZ)
return;
#if defined(CONFIG_88EU_AP_MODE)
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
- pwps_ie = rtw_get_wps_ie(cur_network->IEs+_FIXED_IE_LENGTH_, cur_network->IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+ pwps_ie = rtw_get_wps_ie(cur_network->ies+_FIXED_IE_LENGTH_, cur_network->ie_length-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
/* inerset & update wps_probe_resp_ie */
if ((pmlmepriv->wps_probe_resp_ie != NULL) && pwps_ie && (wps_ielen > 0)) {
uint wps_offset, remainder_ielen;
u8 *premainder_ie;
- wps_offset = (uint)(pwps_ie - cur_network->IEs);
+ wps_offset = (uint)(pwps_ie - cur_network->ies);
premainder_ie = pwps_ie + wps_ielen;
- remainder_ielen = cur_network->IELength - wps_offset - wps_ielen;
+ remainder_ielen = cur_network->ie_length - wps_offset - wps_ielen;
- memcpy(pframe, cur_network->IEs, wps_offset);
+ memcpy(pframe, cur_network->ies, wps_offset);
pframe += wps_offset;
pattrib->pktlen += wps_offset;
@@ -544,9 +544,9 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
pattrib->pktlen += remainder_ielen;
}
} else {
- memcpy(pframe, cur_network->IEs, cur_network->IELength);
- pframe += cur_network->IELength;
- pattrib->pktlen += cur_network->IELength;
+ memcpy(pframe, cur_network->ies, cur_network->ie_length);
+ pframe += cur_network->ie_length;
+ pattrib->pktlen += cur_network->ie_length;
}
} else
#endif
@@ -557,14 +557,14 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
/* beacon interval: 2 bytes */
- memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->ies)), 2);
pframe += 2;
pattrib->pktlen += 2;
/* capability info: 2 bytes */
- memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->ies)), 2);
pframe += 2;
pattrib->pktlen += 2;
@@ -906,7 +906,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
- u8 *ie = pnetwork->IEs;
+ u8 *ie = pnetwork->ies;
__le16 lestatus, leval;
DBG_88E("%s\n", __func__);
@@ -968,7 +968,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
uint ie_len = 0;
/* FILL HT CAP INFO IE */
- pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_));
if (pbuf && ie_len > 0) {
memcpy(pframe, pbuf, ie_len+2);
pframe += (ie_len+2);
@@ -976,7 +976,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
}
/* FILL HT ADD INFO IE */
- pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_));
if (pbuf && ie_len > 0) {
memcpy(pframe, pbuf, ie_len+2);
pframe += (ie_len+2);
@@ -990,7 +990,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
unsigned char WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
for (pbuf = ie + _BEACON_IE_OFFSET_;; pbuf += (ie_len + 2)) {
- pbuf = rtw_get_ie(pbuf, _VENDOR_SPECIFIC_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ pbuf = rtw_get_ie(pbuf, _VENDOR_SPECIFIC_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2)));
if (pbuf && !memcmp(pbuf+2, WMM_PARA_IE, 6)) {
memcpy(pframe, pbuf, ie_len+2);
pframe += (ie_len+2);
@@ -1065,7 +1065,7 @@ static void issue_assocreq(struct adapter *padapter)
/* caps */
- memcpy(pframe, rtw_get_capability_from_ie(pmlmeinfo->network.IEs), 2);
+ memcpy(pframe, rtw_get_capability_from_ie(pmlmeinfo->network.ies), 2);
pframe += 2;
pattrib->pktlen += 2;
@@ -1132,13 +1132,13 @@ static void issue_assocreq(struct adapter *padapter)
}
/* RSN */
- p = rtw_get_ie((pmlmeinfo->network.IEs + sizeof(struct ndis_802_11_fixed_ie)), _RSN_IE_2_, &ie_len, (pmlmeinfo->network.IELength - sizeof(struct ndis_802_11_fixed_ie)));
+ p = rtw_get_ie((pmlmeinfo->network.ies + sizeof(struct ndis_802_11_fixed_ie)), _RSN_IE_2_, &ie_len, (pmlmeinfo->network.ie_length - sizeof(struct ndis_802_11_fixed_ie)));
if (p)
pframe = rtw_set_ie(pframe, _RSN_IE_2_, ie_len, (p + 2), &(pattrib->pktlen));
/* HT caps */
if (padapter->mlmepriv.htpriv.ht_option) {
- p = rtw_get_ie((pmlmeinfo->network.IEs + sizeof(struct ndis_802_11_fixed_ie)), _HT_CAPABILITY_IE_, &ie_len, (pmlmeinfo->network.IELength - sizeof(struct ndis_802_11_fixed_ie)));
+ p = rtw_get_ie((pmlmeinfo->network.ies + sizeof(struct ndis_802_11_fixed_ie)), _HT_CAPABILITY_IE_, &ie_len, (pmlmeinfo->network.ie_length - sizeof(struct ndis_802_11_fixed_ie)));
if ((p != NULL) && (!(is_ap_in_tkip(padapter)))) {
memcpy(&pmlmeinfo->HT_caps, p + 2, sizeof(struct ieee80211_ht_cap));
@@ -1159,8 +1159,8 @@ static void issue_assocreq(struct adapter *padapter)
}
/* vendor specific IE, such as WPA, WMM, WPS */
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength; i += (pIE->Length + 2)) {
- pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.ie_length; i += (pIE->Length + 2)) {
+ pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.ies + i);
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
@@ -1804,7 +1804,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
- p = rtw_get_ie(pbss_network->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->IELength - _FIXED_IE_LENGTH_);
+ p = rtw_get_ie(pbss_network->ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->ie_length - _FIXED_IE_LENGTH_);
if ((p == NULL) || (len == 0)) { /* non-HT */
if ((pbss_network->Configuration.DSConfig <= 0) || (pbss_network->Configuration.DSConfig > 14))
continue;
@@ -2080,8 +2080,8 @@ static u8 collect_bss_info(struct adapter *padapter,
bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
/* below is to copy the information element */
- bssid->IELength = len;
- memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
+ bssid->ie_length = len;
+ memcpy(bssid->ies, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->ie_length);
/* get the signal strength in dBM.raw data */
bssid->Rssi = precv_frame->attrib.phy_info.recvpower;
@@ -2090,7 +2090,7 @@ static u8 collect_bss_info(struct adapter *padapter,
rtw_hal_get_def_var(padapter, HAL_DEF_CURRENT_ANTENNA, &bssid->PhyInfo.Optimum_antenna);
/* checking SSID */
- p = rtw_get_ie(bssid->IEs + ie_offset, _SSID_IE_, &len, bssid->IELength - ie_offset);
+ p = rtw_get_ie(bssid->ies + ie_offset, _SSID_IE_, &len, bssid->ie_length - ie_offset);
if (!p) {
DBG_88E("marc: cannot find SSID for survey event\n");
return _FAIL;
@@ -2111,7 +2111,7 @@ static u8 collect_bss_info(struct adapter *padapter,
/* checking rate info... */
i = 0;
- p = rtw_get_ie(bssid->IEs + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
+ p = rtw_get_ie(bssid->ies + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->ie_length - ie_offset);
if (p != NULL) {
if (len > NDIS_802_11_LENGTH_RATES_EX) {
DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
@@ -2121,7 +2121,7 @@ static u8 collect_bss_info(struct adapter *padapter,
i = len;
}
- p = rtw_get_ie(bssid->IEs + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
+ p = rtw_get_ie(bssid->ies + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->ie_length - ie_offset);
if (p) {
if (len > (NDIS_802_11_LENGTH_RATES_EX-i)) {
DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
@@ -2133,11 +2133,11 @@ static u8 collect_bss_info(struct adapter *padapter,
/* todo: */
bssid->NetworkTypeInUse = Ndis802_11OFDM24;
- if (bssid->IELength < 12)
+ if (bssid->ie_length < 12)
return _FAIL;
/* Checking for DSConfig */
- p = rtw_get_ie(bssid->IEs + ie_offset, _DSSET_IE_, &len, bssid->IELength - ie_offset);
+ p = rtw_get_ie(bssid->ies + ie_offset, _DSSET_IE_, &len, bssid->ie_length - ie_offset);
bssid->Configuration.DSConfig = 0;
bssid->Configuration.Length = 0;
@@ -2146,7 +2146,7 @@ static u8 collect_bss_info(struct adapter *padapter,
bssid->Configuration.DSConfig = *(p + 2);
} else {/* In 5G, some ap do not have DSSET IE */
/* checking HT info for channel */
- p = rtw_get_ie(bssid->IEs + ie_offset, _HT_ADD_INFO_IE_, &len, bssid->IELength - ie_offset);
+ p = rtw_get_ie(bssid->ies + ie_offset, _HT_ADD_INFO_IE_, &len, bssid->ie_length - ie_offset);
if (p) {
struct HT_info_element *HT_info = (struct HT_info_element *)(p + 2);
@@ -2165,7 +2165,7 @@ static u8 collect_bss_info(struct adapter *padapter,
}
bssid->Configuration.BeaconPeriod =
- get_unaligned_le16(rtw_get_beacon_interval_from_ie(bssid->IEs));
+ get_unaligned_le16(rtw_get_beacon_interval_from_ie(bssid->ies));
val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
@@ -2188,7 +2188,7 @@ static u8 collect_bss_info(struct adapter *padapter,
if ((pregistrypriv->wifi_spec == 1) && (!pmlmeinfo->bwmode_updated)) {
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- p = rtw_get_ie(bssid->IEs + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->IELength - ie_offset);
+ p = rtw_get_ie(bssid->ies + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->ie_length - ie_offset);
if (p && len > 0) {
struct ieee80211_ht_cap *pHT_caps =
(struct ieee80211_ht_cap *)(p + 2);
@@ -2408,7 +2408,7 @@ static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid
u8 noc; /* number of channel */
u8 j, k;
- ie = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _COUNTRY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ ie = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _COUNTRY_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
if (!ie)
return;
if (len < 6)
@@ -4970,13 +4970,13 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
/* clear CAM */
flush_all_cam_entry(padapter);
- memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength));
- pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
+ memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, ie_length));
+ pnetwork->ie_length = ((struct wlan_bssid_ex *)pbuf)->ie_length;
- if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
+ if (pnetwork->ie_length > MAX_IE_SZ)/* Check pbuf->ie_length */
return H2C_PARAMETERS_ERROR;
- memcpy(pnetwork->IEs, ((struct wlan_bssid_ex *)pbuf)->IEs, pnetwork->IELength);
+ memcpy(pnetwork->ies, ((struct wlan_bssid_ex *)pbuf)->ies, pnetwork->ie_length);
start_create_ibss(padapter);
}
@@ -5029,18 +5029,18 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
pmlmeinfo->candidate_tid_bitmap = 0;
pmlmeinfo->bwmode_updated = false;
- memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength));
- pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
+ memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, ie_length));
+ pnetwork->ie_length = ((struct wlan_bssid_ex *)pbuf)->ie_length;
- if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
+ if (pnetwork->ie_length > MAX_IE_SZ)/* Check pbuf->ie_length */
return H2C_PARAMETERS_ERROR;
- memcpy(pnetwork->IEs, ((struct wlan_bssid_ex *)pbuf)->IEs, pnetwork->IELength);
+ memcpy(pnetwork->ies, ((struct wlan_bssid_ex *)pbuf)->ies, pnetwork->ie_length);
/* Check AP vendor to move rtw_joinbss_cmd() */
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < pnetwork->IELength;) {
- pIE = (struct ndis_802_11_var_ie *)(pnetwork->IEs + i);
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pnetwork->ie_length;) {
+ pIE = (struct ndis_802_11_var_ie *)(pnetwork->ies + i);
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:/* Get WMM IE. */
@@ -5409,10 +5409,10 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
goto exit;
}
- len_diff = update_hidden_ssid(ptxBeacon_parm->IEs+_BEACON_IE_OFFSET_,
- ptxBeacon_parm->IELength-_BEACON_IE_OFFSET_,
+ len_diff = update_hidden_ssid(ptxBeacon_parm->ies+_BEACON_IE_OFFSET_,
+ ptxBeacon_parm->ie_length-_BEACON_IE_OFFSET_,
pmlmeinfo->hidden_ssid_mode);
- ptxBeacon_parm->IELength += len_diff;
+ ptxBeacon_parm->ie_length += len_diff;
init_h2fwcmd_w_parm_no_rsp(ph2c, ptxBeacon_parm, _TX_Beacon_CMD_);
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 9a130cbf6def..ec5a74df9f48 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -380,7 +380,7 @@ u16 get_beacon_interval(struct wlan_bssid_ex *bss)
{
__le16 val;
- memcpy((unsigned char *)&val, rtw_get_beacon_interval_from_ie(bss->IEs), 2);
+ memcpy((unsigned char *)&val, rtw_get_beacon_interval_from_ie(bss->ies), 2);
return le16_to_cpu(val);
}
@@ -897,12 +897,12 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
/* below is to copy the information element */
- bssid->IELength = len;
- memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
+ bssid->ie_length = len;
+ memcpy(bssid->ies, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->ie_length);
/* check bw and channel offset */
/* parsing HT_CAP_IE */
- p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
if (p && len > 0) {
struct ieee80211_ht_cap *ht_cap =
(struct ieee80211_ht_cap *)(p + 2);
@@ -912,7 +912,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
ht_cap_info = 0;
}
/* parsing HT_INFO_IE */
- p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
if (p && len > 0) {
pht_info = (struct HT_info_element *)(p + 2);
ht_info_infos_0 = pht_info->infos[0];
@@ -934,11 +934,11 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
/* Checking for channel */
- p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _DSSET_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _DSSET_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
if (p) {
bcn_channel = *(p + 2);
} else {/* In 5G, some ap do not have DSSET IE checking HT info for channel */
- p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
if (pht_info) {
bcn_channel = pht_info->primary_channel;
} else { /* we don't find channel IE, so don't check it */
@@ -954,7 +954,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
/* checking SSID */
ssid_len = 0;
- p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
if (p) {
ssid_len = *(p + 1);
if (ssid_len > NDIS_802_11_LENGTH_SSID)
@@ -992,7 +992,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
goto _mismatch;
}
- rtw_get_sec_ie(bssid->IEs, bssid->IELength, NULL, &rsn_len, NULL, &wpa_len);
+ rtw_get_sec_ie(bssid->ies, bssid->ie_length, NULL, &rsn_len, NULL, &wpa_len);
if (rsn_len > 0) {
encryp_protocol = ENCRYP_PROTOCOL_WPA2;
@@ -1009,7 +1009,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
if (encryp_protocol == ENCRYP_PROTOCOL_WPA || encryp_protocol == ENCRYP_PROTOCOL_WPA2) {
- pbuf = rtw_get_wpa_ie(&bssid->IEs[12], &wpa_ielen, bssid->IELength-12);
+ pbuf = rtw_get_wpa_ie(&bssid->ies[12], &wpa_ielen, bssid->ie_length-12);
if (pbuf && (wpa_ielen > 0)) {
if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is_8021x)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
@@ -1017,7 +1017,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
pairwise_cipher, group_cipher, is_8021x));
}
} else {
- pbuf = rtw_get_wpa2_ie(&bssid->IEs[12], &wpa_ielen, bssid->IELength-12);
+ pbuf = rtw_get_wpa2_ie(&bssid->ies[12], &wpa_ielen, bssid->ie_length-12);
if (pbuf && (wpa_ielen > 0)) {
if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is_8021x)) {
@@ -1088,8 +1088,8 @@ unsigned int is_ap_in_tkip(struct adapter *padapter)
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength;) {
- pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.ie_length;) {
+ pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.ies + i);
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
@@ -1119,8 +1119,8 @@ unsigned int should_forbid_n_rate(struct adapter *padapter)
struct wlan_bssid_ex *cur_network = &pmlmepriv->cur_network.network;
if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < cur_network->IELength;) {
- pIE = (struct ndis_802_11_var_ie *)(cur_network->IEs + i);
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < cur_network->ie_length;) {
+ pIE = (struct ndis_802_11_var_ie *)(cur_network->ies + i);
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
@@ -1155,8 +1155,8 @@ unsigned int is_ap_in_wep(struct adapter *padapter)
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength;) {
- pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.ie_length;) {
+ pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.ies + i);
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 8d242adae4b3..eeb2d9f82e92 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -249,20 +249,20 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
pktlen += 8;
/* beacon interval: 2 bytes */
- memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->ies)), 2);
pframe += 2;
pktlen += 2;
/* capability info: 2 bytes */
- memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->ies)), 2);
pframe += 2;
pktlen += 2;
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
- pktlen += cur_network->IELength - sizeof(struct ndis_802_11_fixed_ie);
- memcpy(pframe, cur_network->IEs+sizeof(struct ndis_802_11_fixed_ie), pktlen);
+ pktlen += cur_network->ie_length - sizeof(struct ndis_802_11_fixed_ie);
+ memcpy(pframe, cur_network->ies+sizeof(struct ndis_802_11_fixed_ie), pktlen);
goto _ConstructBeacon;
}
@@ -425,12 +425,12 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe += pktlen;
- if (cur_network->IELength > MAX_IE_SZ)
+ if (cur_network->ie_length > MAX_IE_SZ)
return;
- memcpy(pframe, cur_network->IEs, cur_network->IELength);
- pframe += cur_network->IELength;
- pktlen += cur_network->IELength;
+ memcpy(pframe, cur_network->ies, cur_network->ie_length);
+ pframe += cur_network->ie_length;
+ pktlen += cur_network->ie_length;
*pLength = pktlen;
}
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
index d7b25d2f933a..6000049bda8f 100644
--- a/drivers/staging/rtl8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -83,9 +83,9 @@ struct ndis_802_11_var_ie {
* [ETH_ALEN] + 2 + sizeof (struct ndis_802_11_ssid) + sizeof (u32)
* + sizeof (NDIS_802_11_RSSI) + sizeof (enum NDIS_802_11_NETWORK_TYPE)
* + sizeof (struct ndis_802_11_config)
- * + NDIS_802_11_LENGTH_RATES_EX + IELength
+ * + NDIS_802_11_LENGTH_RATES_EX + ie_length
*
- * Except the IELength, all other fields are fixed length.
+ * Except the ie_length, all other fields are fixed length.
* Therefore, we can define a macro to represent the partial sum.
*/
@@ -196,15 +196,15 @@ struct wlan_bssid_ex {
enum ndis_802_11_network_infra InfrastructureMode;
unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
struct wlan_phy_info PhyInfo;
- u32 IELength;
- u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and
+ u32 ie_length;
+ u8 ies[MAX_IE_SZ]; /* timestamp, beacon interval, and
* capability information)
*/
} __packed;
static inline uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
{
- return sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + bss->IELength;
+ return sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + bss->ie_length;
}
struct wlan_network {
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 446310775e90..3d648cb55a6d 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -129,7 +129,7 @@ static char *translate_scan(struct adapter *padapter,
start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
/* parsing HT_CAP_IE */
- p = rtw_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength-12);
+ p = rtw_get_ie(&pnetwork->network.ies[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.ie_length-12);
if (p && ht_ielen > 0) {
struct ieee80211_ht_cap *pht_capie;
@@ -174,7 +174,7 @@ static char *translate_scan(struct adapter *padapter,
/* Add mode */
iwe.cmd = SIOCGIWMODE;
- memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.IEs), 2);
+ memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.ies), 2);
cap = le16_to_cpu(le_tmp);
@@ -243,7 +243,7 @@ static char *translate_scan(struct adapter *padapter,
u16 wpa_len = 0, rsn_len = 0;
u8 *p;
- rtw_get_sec_ie(pnetwork->network.IEs, pnetwork->network.IELength, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
+ rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.Ssid.Ssid));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
@@ -286,9 +286,9 @@ static char *translate_scan(struct adapter *padapter,
uint cnt = 0, total_ielen;
u8 *wpsie_ptr = NULL;
uint wps_ielen = 0;
- u8 *ie_ptr = pnetwork->network.IEs + _FIXED_IE_LENGTH_;
+ u8 *ie_ptr = pnetwork->network.ies + _FIXED_IE_LENGTH_;
- total_ielen = pnetwork->network.IELength - _FIXED_IE_LENGTH_;
+ total_ielen = pnetwork->network.ie_length - _FIXED_IE_LENGTH_;
while (cnt < total_ielen) {
if (rtw_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen > 2)) {
@@ -644,7 +644,7 @@ static int rtw_wx_get_name(struct net_device *dev,
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) {
/* parsing HT_CAP_IE */
- p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
+ p = rtw_get_ie(&pcur_bss->ies[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->ie_length-12);
if (p && ht_ielen > 0)
ht_cap = true;
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index bda4ab879f58..7ec53a9dfa27 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -121,10 +121,6 @@ int rtw_recv_indicatepkt(struct adapter *padapter,
}
}
- rcu_read_lock();
- rcu_dereference(padapter->pnetdev->rx_handler_data);
- rcu_read_unlock();
-
skb->ip_summed = CHECKSUM_NONE;
skb->dev = padapter->pnetdev;
skb->protocol = eth_type_trans(skb, padapter->pnetdev);
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 88f89d77b511..a1c096124683 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -50,7 +50,7 @@ void dot11d_init(struct rtllib_device *ieee)
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
- memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
+ memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER + 1);
RESET_CIE_WATCHDOG(ieee);
}
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index fc88d47dea43..d314b2f602e4 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -578,7 +578,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
.seq_ctl = 0,
.qos_ctl = 0
};
- int qos_actived = ieee->current_network.qos_data.active;
+ int qos_activated = ieee->current_network.qos_data.active;
u8 dest[ETH_ALEN];
u8 src[ETH_ALEN];
struct lib80211_crypt_data *crypt = NULL;
@@ -684,7 +684,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
else
fc = RTLLIB_FTYPE_DATA;
- if (qos_actived)
+ if (qos_activated)
fc |= RTLLIB_STYPE_QOS_DATA;
else
fc |= RTLLIB_STYPE_DATA;
@@ -727,7 +727,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
qos_ctl = 0;
}
- if (qos_actived) {
+ if (qos_activated) {
hdr_len = RTLLIB_3ADDR_LEN + 2;
/* in case we are a client verify acm is not set for this ac */
@@ -788,7 +788,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
txb->encrypted = encrypt;
txb->payload_size = cpu_to_le16(bytes);
- if (qos_actived)
+ if (qos_activated)
txb->queue_index = UP2AC(skb->priority);
else
txb->queue_index = WME_AC_BE;
@@ -797,7 +797,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
skb_frag = txb->fragments[i];
tcb_desc = (struct cb_desc *)(skb_frag->cb +
MAX_DEV_ADDR_SIZE);
- if (qos_actived) {
+ if (qos_activated) {
skb_frag->priority = skb->priority;
tcb_desc->queue_index = UP2AC(skb->priority);
} else {
@@ -831,7 +831,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
/* The last fragment has the remaining length */
bytes = bytes_last_frag;
}
- if ((qos_actived) && (!bIsMulticast)) {
+ if ((qos_activated) && (!bIsMulticast)) {
frag_hdr->seq_ctl =
cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
header.addr1));
@@ -866,7 +866,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
skb_put(skb_frag, 4);
}
- if ((qos_actived) && (!bIsMulticast)) {
+ if ((qos_activated) && (!bIsMulticast)) {
if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
else
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
index a4b40422e5bd..041f1b123888 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
@@ -66,7 +66,7 @@ static void *ieee80211_ccmp_init(int key_idx)
{
struct ieee80211_ccmp_data *priv;
- priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto fail;
priv->key_idx = key_idx;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 60ecfec71112..a7efaae4e25a 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -66,7 +66,7 @@ static void *ieee80211_tkip_init(int key_idx)
{
struct ieee80211_tkip_data *priv;
- priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto fail;
priv->key_idx = key_idx;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 7ba4b07aa842..b9f86be9e52b 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -41,7 +41,7 @@ static void *prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
- priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
priv->key_idx = keyidx;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 25c186a8bde3..21874e78d8a1 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -2684,7 +2684,7 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
for(i = 0; i < 5; i++) {
ieee->seq_ctrl[i] = 0;
}
- ieee->pDot11dInfo = kzalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC);
+ ieee->pDot11dInfo = kzalloc(sizeof(RT_DOT11D_INFO), GFP_KERNEL);
if (!ieee->pDot11dInfo)
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for DOT11D\n");
//added for AP roaming
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index bf7b7122d042..1b61a8de1edf 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -7,28 +7,28 @@ u8 MCS_FILTER_ALL[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0
u8 MCS_FILTER_1SS[16] = {0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-u16 MCS_DATA_RATE[2][2][77] =
- { { {13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78 ,104, 156, 208, 234, 260,
- 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416, 468, 520,
- 0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182, 182, 208, 156, 195,
- 195, 234, 273, 273, 312, 130, 156, 181, 156, 181, 208, 234, 208, 234, 260, 260,
- 286, 195, 234, 273, 234, 273, 312, 351, 312, 351, 390, 390, 429}, // Long GI, 20MHz
- {14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289,
- 43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520, 578,
- 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231, 173, 217,
- 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260, 231, 260, 289, 289,
- 318, 217, 260, 303, 260, 303, 347, 390, 347, 390, 433, 433, 477} }, // Short GI, 20MHz
- { {27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540,
- 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648, 864, 972, 1080,
- 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324, 378, 378, 432, 324, 405,
- 405, 486, 567, 567, 648, 270, 324, 378, 324, 378, 432, 486, 432, 486, 540, 540,
- 594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891}, // Long GI, 40MHz
- {30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600,
- 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720, 960, 1080, 1200,
- 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360, 420, 420, 480, 360, 450,
- 450, 540, 630, 630, 720, 300, 360, 420, 360, 420, 480, 540, 480, 540, 600, 600,
- 660, 450, 540, 630, 540, 630, 720, 810, 720, 810, 900, 900, 990} } // Short GI, 40MHz
- };
+u16 MCS_DATA_RATE[2][2][77] = {
+ { {13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78, 104, 156, 208, 234, 260,
+ 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416, 468, 520,
+ 0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182, 182, 208, 156, 195,
+ 195, 234, 273, 273, 312, 130, 156, 181, 156, 181, 208, 234, 208, 234, 260, 260,
+ 286, 195, 234, 273, 234, 273, 312, 351, 312, 351, 390, 390, 429}, // Long GI, 20MHz
+ {14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289,
+ 43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520, 578,
+ 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231, 173, 217,
+ 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260, 231, 260, 289, 289,
+ 318, 217, 260, 303, 260, 303, 347, 390, 347, 390, 433, 433, 477} }, // Short GI, 20MHz
+ { {27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540,
+ 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648, 864, 972, 1080,
+ 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324, 378, 378, 432, 324, 405,
+ 405, 486, 567, 567, 648, 270, 324, 378, 324, 378, 432, 486, 432, 486, 540, 540,
+ 594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891}, // Long GI, 40MHz
+ {30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600,
+ 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720, 960, 1080, 1200,
+ 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360, 420, 420, 480, 360, 450,
+ 450, 540, 630, 630, 720, 300, 360, 420, 360, 420, 480, 540, 480, 540, 600, 600,
+ 660, 450, 540, 630, 540, 630, 720, 810, 720, 810, 900, 900, 990} } // Short GI, 40MHz
+};
static u8 UNKNOWN_BORADCOM[3] = {0x00, 0x14, 0xbf};
static u8 LINKSYSWRT330_LINKSYSWRT300_BROADCOM[3] = {0x00, 0x1a, 0x70};
@@ -60,14 +60,14 @@ void HTUpdateDefaultSetting(struct ieee80211_device *ieee)
//printk("pHTinfo:%p, &pHTinfo:%p, mptr:%p, offsetof:%x\n", pHTInfo, &pHTInfo, __mptr, offsetof(struct ieee80211_device, pHTInfo));
//printk("===>ieee:%p,\n", ieee);
// ShortGI support
- pHTInfo->bRegShortGI20MHz= 1;
- pHTInfo->bRegShortGI40MHz= 1;
+ pHTInfo->bRegShortGI20MHz = 1;
+ pHTInfo->bRegShortGI40MHz = 1;
// 40MHz channel support
pHTInfo->bRegBW40MHz = 1;
// CCK rate support in 40MHz channel
- if(pHTInfo->bRegBW40MHz)
+ if (pHTInfo->bRegBW40MHz)
pHTInfo->bRegSuppCCK = 1;
else
pHTInfo->bRegSuppCCK = true;
@@ -83,7 +83,7 @@ void HTUpdateDefaultSetting(struct ieee80211_device *ieee)
// MIMO Power Save
pHTInfo->SelfMimoPs = 3;// 0: Static Mimo Ps, 1: Dynamic Mimo Ps, 3: No Limitation, 2: Reserved(Set to 3 automatically.)
- if(pHTInfo->SelfMimoPs == 2)
+ if (pHTInfo->SelfMimoPs == 2)
pHTInfo->SelfMimoPs = 3;
// 8190 only. Assign rate operation mode to firmware
ieee->bTxDisableRateFallBack = 0;
@@ -122,28 +122,27 @@ void HTUpdateDefaultSetting(struct ieee80211_device *ieee)
* return: none
* notice: Driver should not print out this message by default.
* *****************************************************************************************************************/
-void HTDebugHTCapability(u8 *CapIE, u8 *TitleString )
+void HTDebugHTCapability(u8 *CapIE, u8 *TitleString)
{
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
PHT_CAPABILITY_ELE pCapELE;
- if(!memcmp(CapIE, EWC11NHTCap, sizeof(EWC11NHTCap)))
- {
+ if (!memcmp(CapIE, EWC11NHTCap, sizeof(EWC11NHTCap))) {
//EWC IE
IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __func__);
pCapELE = (PHT_CAPABILITY_ELE)(&CapIE[4]);
- }else
+ } else {
pCapELE = (PHT_CAPABILITY_ELE)(&CapIE[0]);
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Capability>. Called by %s\n", TitleString );
-
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupported Channel Width = %s\n", (pCapELE->ChlWidth)?"20MHz": "20/40MHz");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 20M = %s\n", (pCapELE->ShortGI20Mhz)?"YES": "NO");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 40M = %s\n", (pCapELE->ShortGI40Mhz)?"YES": "NO");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport TX STBC = %s\n", (pCapELE->TxSTBC)?"YES": "NO");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMSDU Size = %s\n", (pCapELE->MaxAMSDUSize)?"3839": "7935");
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport CCK in 20/40 mode = %s\n", (pCapELE->DssCCk)?"YES": "NO");
+ }
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Capability>. Called by %s\n", TitleString);
+
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupported Channel Width = %s\n", (pCapELE->ChlWidth) ? "20MHz" : "20/40MHz");
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 20M = %s\n", (pCapELE->ShortGI20Mhz) ? "YES" : "NO");
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 40M = %s\n", (pCapELE->ShortGI40Mhz) ? "YES" : "NO");
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport TX STBC = %s\n", (pCapELE->TxSTBC) ? "YES" : "NO");
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMSDU Size = %s\n", (pCapELE->MaxAMSDUSize) ? "3839" : "7935");
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport CCK in 20/40 mode = %s\n", (pCapELE->DssCCk) ? "YES" : "NO");
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMPDU Factor = %d\n", pCapELE->MaxRxAMPDUFactor);
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMPDU Density = %d\n", pCapELE->MPDUDensity);
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMCS Rate Set = [%x][%x][%x][%x][%x]\n", pCapELE->MCS[0],\
@@ -165,51 +164,48 @@ void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString)
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
PHT_INFORMATION_ELE pHTInfoEle;
- if(!memcmp(InfoIE, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
- {
+ if (!memcmp(InfoIE, EWC11NHTInfo, sizeof(EWC11NHTInfo))) {
// Not EWC IE
IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __func__);
pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[4]);
- }else
+ } else {
pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[0]);
-
+ }
IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Information Element>. Called by %s\n", TitleString);
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tPrimary channel = %d\n", pHTInfoEle->ControlChl);
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSecondary channel =");
- switch (pHTInfoEle->ExtChlOffset)
- {
- case 0:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Not Present\n");
- break;
- case 1:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Upper channel\n");
- break;
- case 2:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Reserved. Eooro!!!\n");
- break;
- case 3:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Lower Channel\n");
- break;
+ switch (pHTInfoEle->ExtChlOffset) {
+ case 0:
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "Not Present\n");
+ break;
+ case 1:
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "Upper channel\n");
+ break;
+ case 2:
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "Reserved. Eooro!!!\n");
+ break;
+ case 3:
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "Lower Channel\n");
+ break;
}
- IEEE80211_DEBUG(IEEE80211_DL_HT, "\tRecommended channel width = %s\n", (pHTInfoEle->RecommemdedTxWidth)?"20Mhz": "40Mhz");
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "\tRecommended channel width = %s\n", (pHTInfoEle->RecommemdedTxWidth) ? "20Mhz" : "40Mhz");
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tOperation mode for protection = ");
- switch (pHTInfoEle->OptMode)
- {
- case 0:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "No Protection\n");
- break;
- case 1:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "HT non-member protection mode\n");
- break;
- case 2:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "Suggest to open protection\n");
- break;
- case 3:
- IEEE80211_DEBUG(IEEE80211_DL_HT, "HT mixed mode\n");
- break;
+ switch (pHTInfoEle->OptMode) {
+ case 0:
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "No Protection\n");
+ break;
+ case 1:
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "HT non-member protection mode\n");
+ break;
+ case 2:
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "Suggest to open protection\n");
+ break;
+ case 3:
+ IEEE80211_DEBUG(IEEE80211_DL_HT, "HT mixed mode\n");
+ break;
}
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tBasic MCS Rate Set = [%x][%x][%x][%x][%x]\n", pHTInfoEle->BasicMSC[0],\
@@ -225,13 +221,13 @@ static bool IsHTHalfNmode40Bandwidth(struct ieee80211_device *ieee)
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- if(!pHTInfo->bCurrentHTSupport) // wireless is n mode
+ if (!pHTInfo->bCurrentHTSupport) // wireless is n mode
retValue = false;
- else if(!pHTInfo->bRegBW40MHz) // station supports 40 bw
+ else if (!pHTInfo->bRegBW40MHz) // station supports 40 bw
retValue = false;
- else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
+ else if (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
retValue = false;
- else if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ChlWidth) // ap support 40 bw
+ else if (((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ChlWidth) // ap support 40 bw
retValue = true;
else
retValue = false;
@@ -244,20 +240,17 @@ static bool IsHTHalfNmodeSGI(struct ieee80211_device *ieee, bool is40MHz)
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- if(!pHTInfo->bCurrentHTSupport) // wireless is n mode
+ if (!pHTInfo->bCurrentHTSupport) // wireless is n mode
retValue = false;
- else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
+ else if (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
retValue = false;
- else if(is40MHz) // ap support 40 bw
- {
- if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI40Mhz) // ap support 40 bw short GI
+ else if (is40MHz) { // ap support 40 bw
+ if (((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI40Mhz) // ap support 40 bw short GI
retValue = true;
else
retValue = false;
- }
- else
- {
- if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI20Mhz) // ap support 40 bw short GI
+ } else {
+ if (((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI20Mhz) // ap support 40 bw short GI
retValue = true;
else
retValue = false;
@@ -272,8 +265,8 @@ u16 HTHalfMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate)
u8 is40MHz;
u8 isShortGI;
- is40MHz = (IsHTHalfNmode40Bandwidth(ieee))?1:0;
- isShortGI = (IsHTHalfNmodeSGI(ieee, is40MHz))? 1:0;
+ is40MHz = (IsHTHalfNmode40Bandwidth(ieee)) ? 1 : 0;
+ isShortGI = (IsHTHalfNmodeSGI(ieee, is40MHz)) ? 1 : 0;
return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)];
}
@@ -283,10 +276,10 @@ u16 HTMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- u8 is40MHz = (pHTInfo->bCurBW40MHz)?1:0;
- u8 isShortGI = (pHTInfo->bCurBW40MHz)?
- ((pHTInfo->bCurShortGI40MHz)?1:0):
- ((pHTInfo->bCurShortGI20MHz)?1:0);
+ u8 is40MHz = (pHTInfo->bCurBW40MHz) ? 1 : 0;
+ u8 isShortGI = (pHTInfo->bCurBW40MHz) ?
+ ((pHTInfo->bCurShortGI40MHz) ? 1 : 0) :
+ ((pHTInfo->bCurShortGI20MHz) ? 1 : 0);
return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)];
}
@@ -301,39 +294,29 @@ u16 HTMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate)
u16 TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate)
{
//PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- u16 CCKOFDMRate[12] = {0x02 , 0x04 , 0x0b , 0x16 , 0x0c , 0x12 , 0x18 , 0x24 , 0x30 , 0x48 , 0x60 , 0x6c};
+ u16 CCKOFDMRate[12] = {0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c};
u8 is40MHz = 0;
u8 isShortGI = 0;
- if(nDataRate < 12)
- {
+ if (nDataRate < 12) {
return CCKOFDMRate[nDataRate];
- }
- else
- {
- if (nDataRate >= 0x10 && nDataRate <= 0x1f)//if(nDataRate > 11 && nDataRate < 28 )
- {
+ } else {
+ if (nDataRate >= 0x10 && nDataRate <= 0x1f) { //if(nDataRate > 11 && nDataRate < 28 )
is40MHz = 0;
isShortGI = 0;
// nDataRate = nDataRate - 12;
- }
- else if(nDataRate >=0x20 && nDataRate <= 0x2f ) //(27, 44)
- {
+ } else if (nDataRate >= 0x20 && nDataRate <= 0x2f) { //(27, 44)
is40MHz = 1;
isShortGI = 0;
//nDataRate = nDataRate - 28;
- }
- else if(nDataRate >= 0x30 && nDataRate <= 0x3f ) //(43, 60)
- {
+ } else if (nDataRate >= 0x30 && nDataRate <= 0x3f) { //(43, 60)
is40MHz = 0;
isShortGI = 1;
//nDataRate = nDataRate - 44;
- }
- else if(nDataRate >= 0x40 && nDataRate <= 0x4f ) //(59, 76)
- {
+ } else if (nDataRate >= 0x40 && nDataRate <= 0x4f) { //(59, 76)
is40MHz = 1;
isShortGI = 1;
@@ -349,20 +332,20 @@ bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee)
{
bool retValue = false;
struct ieee80211_network *net = &ieee->current_network;
- if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) ||
- (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) ||
- (memcmp(net->bssid, PCI_RALINK, 3)==0) ||
- (memcmp(net->bssid, EDIMAX_RALINK, 3)==0) ||
- (memcmp(net->bssid, AIRLINK_RALINK, 3)==0) ||
- (net->ralink_cap_exist))
+ if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
+ (net->ralink_cap_exist))
+ retValue = true;
+ else if ((memcmp(net->bssid, UNKNOWN_BORADCOM, 3) == 0) ||
+ (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) == 0) ||
+ (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) == 0) ||
+ (memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3) == 0) ||
+ (net->broadcom_cap_exist))
retValue = true;
- else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) ||
- (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)||
- (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)||
- (memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3)==0) ||
- (net->broadcom_cap_exist))
- retValue = true;
- else if(net->bssht.bdRT2RTAggregation)
+ else if (net->bssht.bdRT2RTAggregation)
retValue = true;
else
retValue = false;
@@ -381,25 +364,25 @@ static void HTIOTPeerDetermine(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
struct ieee80211_network *net = &ieee->current_network;
- if(net->bssht.bdRT2RTAggregation)
+ if (net->bssht.bdRT2RTAggregation)
pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK;
- else if(net->broadcom_cap_exist)
+ else if (net->broadcom_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
- else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) ||
- (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)||
- (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)||
- (memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3)==0) )
+ else if ((memcmp(net->bssid, UNKNOWN_BORADCOM, 3) == 0) ||
+ (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) == 0) ||
+ (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) == 0) ||
+ (memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3) == 0))
pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
- else if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) ||
- (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) ||
- (memcmp(net->bssid, PCI_RALINK, 3)==0) ||
- (memcmp(net->bssid, EDIMAX_RALINK, 3)==0) ||
- (memcmp(net->bssid, AIRLINK_RALINK, 3)==0) ||
+ else if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
net->ralink_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_RALINK;
- else if(net->atheros_cap_exist)
+ else if (net->atheros_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS;
- else if(memcmp(net->bssid, CISCO_BROADCOM, 3)==0)
+ else if (memcmp(net->bssid, CISCO_BROADCOM, 3) == 0)
pHTInfo->IOTPeer = HT_IOT_PEER_CISCO;
else
pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
@@ -438,7 +421,7 @@ static bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)
#ifdef TODO
// Apply for 819u only
-#if (HAL_CODE_BASE==RTL8192)
+#if (HAL_CODE_BASE == RTL8192)
#if (DEV_BUS_TYPE == USB_INTERFACE)
// Alway disable MCS15 by Jerry Chang's request.by Emily, 2008.04.15
@@ -505,9 +488,7 @@ static u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
// 2008/01/28 MH We must prevent that we select null bssid to link.
if (network->broadcom_cap_exist)
- {
retValue = 1;
- }
return retValue;
}
@@ -515,12 +496,10 @@ static u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
static u8 HTIOTActIsCCDFsync(u8 *PeerMacAddr)
{
u8 retValue = 0;
- if( (memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0) ||
- (memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0) ||
- (memcmp(PeerMacAddr, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) ==0))
- {
+ if ((memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3) == 0) ||
+ (memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) == 0) ||
+ (memcmp(PeerMacAddr, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) == 0))
retValue = 1;
- }
return retValue;
}
@@ -550,32 +529,26 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
PHT_CAPABILITY_ELE pCapELE = NULL;
//u8 bIsDeclareMCS13;
- if ((posHTCap == NULL) || (pHT == NULL))
- {
+ if ((posHTCap == NULL) || (pHT == NULL)) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTCap or pHTInfo can't be null in HTConstructCapabilityElement()\n");
return;
}
memset(posHTCap, 0, *len);
- if(pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)
- {
+ if (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
pCapELE = (PHT_CAPABILITY_ELE)&(posHTCap[4]);
- }else
- {
+ } else {
pCapELE = (PHT_CAPABILITY_ELE)posHTCap;
}
//HT capability info
pCapELE->AdvCoding = 0; // This feature is not supported now!!
- if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
- {
+ if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
pCapELE->ChlWidth = 0;
- }
- else
- {
- pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
+ } else {
+ pCapELE->ChlWidth = (pHT->bRegBW40MHz ? 1 : 0);
}
// pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
@@ -588,8 +561,8 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
pCapELE->TxSTBC = 1;
pCapELE->RxSTBC = 0;
pCapELE->DelayBA = 0; // Do not support now!!
- pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE>=7935)?1:0;
- pCapELE->DssCCk = ((pHT->bRegBW40MHz)?(pHT->bRegSuppCCK?1:0):0);
+ pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE >= 7935) ? 1 : 0;
+ pCapELE->DssCCk = ((pHT->bRegBW40MHz) ? (pHT->bRegSuppCCK ? 1 : 0) : 0);
pCapELE->PSMP = 0; // Do not support now!!
pCapELE->LSigTxopProtect = 0; // Do not support now!!
@@ -600,31 +573,28 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
if (IsEncrypt) {
pCapELE->MPDUDensity = 7; // 8us
- pCapELE->MaxRxAMPDUFactor = 2; // 2 is for 32 K and 3 is 64K
- }
- else
- {
- pCapELE->MaxRxAMPDUFactor = 3; // 2 is for 32 K and 3 is 64K
+ pCapELE->MaxRxAMPDUFactor = 2; // 2 is for 32 K and 3 is 64K
+ } else {
+ pCapELE->MaxRxAMPDUFactor = 3; // 2 is for 32 K and 3 is 64K
pCapELE->MPDUDensity = 0; // no density
}
//Supported MCS set
memcpy(pCapELE->MCS, ieee->Regdot11HTOperationalRateSet, 16);
- if(pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15)
+ if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15)
pCapELE->MCS[1] &= 0x7f;
- if(pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14)
+ if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14)
pCapELE->MCS[1] &= 0xbf;
- if(pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS)
+ if (pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS)
pCapELE->MCS[1] &= 0x00;
// 2008.06.12
// For RTL819X, if pairwisekey = wep/tkip, ap is ralink, we support only MCS0~7.
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
- {
+ if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
int i;
- for(i = 1; i< 16; i++)
+ for (i = 1; i < 16; i++)
pCapELE->MCS[i] = 0;
}
@@ -638,7 +608,7 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
//Antenna Selection Capabilities
pCapELE->ASCap = 0;
//add 2 to give space for element ID and len when construct frames
- if(pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)
+ if (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)
*len = 30 + 2;
else
*len = 26 + 2;
@@ -666,19 +636,17 @@ void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *le
{
PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
PHT_INFORMATION_ELE pHTInfoEle = (PHT_INFORMATION_ELE)posHTInfo;
- if ((posHTInfo == NULL) || (pHTInfoEle == NULL))
- {
+ if ((posHTInfo == NULL) || (pHTInfoEle == NULL)) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTInfo or pHTInfoEle can't be null in HTConstructInfoElement()\n");
return;
}
memset(posHTInfo, 0, *len);
- if ( (ieee->iw_mode == IW_MODE_ADHOC) || (ieee->iw_mode == IW_MODE_MASTER)) //ap mode is not currently supported
- {
+ if ((ieee->iw_mode == IW_MODE_ADHOC) || (ieee->iw_mode == IW_MODE_MASTER)) { //ap mode is not currently supported
pHTInfoEle->ControlChl = ieee->current_network.channel;
- pHTInfoEle->ExtChlOffset = ((!pHT->bRegBW40MHz)?HT_EXTCHNL_OFFSET_NO_EXT:
- (ieee->current_network.channel<=6)?
- HT_EXTCHNL_OFFSET_UPPER:HT_EXTCHNL_OFFSET_LOWER);
+ pHTInfoEle->ExtChlOffset = ((!pHT->bRegBW40MHz) ? HT_EXTCHNL_OFFSET_NO_EXT :
+ (ieee->current_network.channel <= 6) ?
+ HT_EXTCHNL_OFFSET_UPPER : HT_EXTCHNL_OFFSET_LOWER);
pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz;
pHTInfoEle->RIFS = 0;
pHTInfoEle->PSMPAccessOnly = 0;
@@ -696,9 +664,7 @@ void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *le
*len = 22 + 2; //same above
- }
- else
- {
+ } else {
//STA should not generate High Throughput Information Element
*len = 0;
}
@@ -791,23 +757,20 @@ void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg,
static u8 HT_PickMCSRate(struct ieee80211_device *ieee, u8 *pOperateMCS)
{
u8 i;
- if (pOperateMCS == NULL)
- {
+ if (pOperateMCS == NULL) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "pOperateMCS can't be null in HT_PickMCSRate()\n");
return false;
}
- switch (ieee->mode)
- {
+ switch (ieee->mode) {
case IEEE_A:
case IEEE_B:
case IEEE_G:
//legacy rate routine handled at selectedrate
//no MCS rate
- for(i=0;i<=15;i++){
+ for (i = 0; i <= 15; i++)
pOperateMCS[i] = 0;
- }
break;
case IEEE_N_24G: //assume CCK rate ok
@@ -817,9 +780,9 @@ static u8 HT_PickMCSRate(struct ieee80211_device *ieee, u8 *pOperateMCS)
//HT part
// TODO: may be different if we have different number of antenna
- pOperateMCS[0] &=RATE_ADPT_1SS_MASK; //support MCS 0~7
- pOperateMCS[1] &=RATE_ADPT_2SS_MASK;
- pOperateMCS[3] &=RATE_ADPT_MCS32_MASK;
+ pOperateMCS[0] &= RATE_ADPT_1SS_MASK; //support MCS 0~7
+ pOperateMCS[1] &= RATE_ADPT_2SS_MASK;
+ pOperateMCS[3] &= RATE_ADPT_MCS32_MASK;
break;
//should never reach here
@@ -857,32 +820,26 @@ u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSF
u8 bitMap;
u8 mcsRate = 0;
u8 availableMcsRate[16];
- if (pMCSRateSet == NULL || pMCSFilter == NULL)
- {
+ if (pMCSRateSet == NULL || pMCSFilter == NULL) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "pMCSRateSet or pMCSFilter can't be null in HTGetHighestMCSRate()\n");
return false;
}
- for(i=0; i<16; i++)
+ for (i = 0; i < 16; i++)
availableMcsRate[i] = pMCSRateSet[i] & pMCSFilter[i];
- for(i = 0; i < 16; i++)
- {
- if(availableMcsRate[i] != 0)
+ for (i = 0; i < 16; i++) {
+ if (availableMcsRate[i] != 0)
break;
}
- if(i == 16)
+ if (i == 16)
return false;
- for(i = 0; i < 16; i++)
- {
- if (availableMcsRate[i] != 0)
- {
+ for (i = 0; i < 16; i++) {
+ if (availableMcsRate[i] != 0) {
bitMap = availableMcsRate[i];
- for(j = 0; j < 8; j++)
- {
- if ((bitMap%2) != 0)
- {
- if(HTMcsToDataRate(ieee, (8*i+j)) > HTMcsToDataRate(ieee, mcsRate))
+ for (j = 0; j < 8; j++) {
+ if ((bitMap%2) != 0) {
+ if (HTMcsToDataRate(ieee, (8*i+j)) > HTMcsToDataRate(ieee, mcsRate))
mcsRate = (8*i+j);
}
bitMap >>= 1;
@@ -907,10 +864,10 @@ static u8 HTFilterMCSRate(struct ieee80211_device *ieee, u8 *pSupportMCS,
u8 *pOperateMCS)
{
- u8 i=0;
+ u8 i = 0;
// filter out operational rate set not supported by AP, the length of it is 16
- for(i=0;i<=15;i++){
+ for (i = 0; i <= 15; i++) {
pOperateMCS[i] = ieee->Regdot11HTOperationalRateSet[i]&pSupportMCS[i];
}
@@ -922,14 +879,14 @@ static u8 HTFilterMCSRate(struct ieee80211_device *ieee, u8 *pSupportMCS,
HT_PickMCSRate(ieee, pOperateMCS);
// For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7.
- if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
pOperateMCS[1] = 0;
//
// For RTL819X, we support only MCS0~15.
// And also, we do not know how to use MCS32 now.
//
- for(i=2; i<=15; i++)
+ for (i = 2; i <= 15; i++)
pOperateMCS[i] = 0;
return true;
@@ -962,7 +919,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
else
pPeerHTCap = (PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf);
- if(!memcmp(pHTInfo->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
+ if (!memcmp(pHTInfo->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
pPeerHTInfo = (PHT_INFORMATION_ELE)(&pHTInfo->PeerHTInfoBuf[4]);
else
pPeerHTInfo = (PHT_INFORMATION_ELE)(pHTInfo->PeerHTInfoBuf);
@@ -1006,9 +963,9 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
//
pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
- nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize==0)?3839:7935;
+ nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize == 0) ? 3839 : 7935;
- if(pHTInfo->nAMSDU_MaxSize > nMaxAMSDUSize )
+ if (pHTInfo->nAMSDU_MaxSize > nMaxAMSDUSize)
pHTInfo->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize;
else
pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
@@ -1022,28 +979,24 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
// <1> Decide AMPDU Factor
// By Emily
- if(!pHTInfo->bRegRT2RTAggregation)
- {
+ if (!pHTInfo->bRegRT2RTAggregation) {
// Decide AMPDU Factor according to protocol handshake
- if(pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
+ if (pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
else
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
- }else
- {
+ } else {
// Set MPDU density to 2 to Realtek AP, and set it to 0 for others
// Replace MPDU factor declared in original association response frame format. 2007.08.20 by Emily
- if (ieee->current_network.bssht.bdRT2RTAggregation)
- {
+ if (ieee->current_network.bssht.bdRT2RTAggregation) {
if (ieee->pairwise_key_type != KEY_TYPE_NA)
// Realtek may set 32k in security mode and 64k for others
pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
else
pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
- }else
- {
- if(pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K)
+ } else {
+ if (pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K)
pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
else
pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K;
@@ -1052,22 +1005,20 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
// <2> Set AMPDU Minimum MPDU Start Spacing
// 802.11n 3.0 section 9.7d.3
- if(pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
+ if (pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
else
pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
- if(ieee->pairwise_key_type != KEY_TYPE_NA )
+ if (ieee->pairwise_key_type != KEY_TYPE_NA)
pHTInfo->CurrentMPDUDensity = 7; // 8us
// Force TX AMSDU
// Lanhsin: mark for tmp to avoid deauth by ap from s3
//if(memcmp(pMgntInfo->Bssid, NETGEAR834Bv2_BROADCOM, 3)==0)
- if (0)
- {
-
- pHTInfo->bCurrentAMPDUEnable = false;
- pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
- pHTInfo->ForcedAMSDUMaxSize = 7935;
+ if (0) {
+ pHTInfo->bCurrentAMPDUEnable = false;
+ pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
+ pHTInfo->ForcedAMSDUMaxSize = 7935;
pHTInfo->IOTAction |= HT_IOT_ACT_TX_USE_AMSDU_8K;
}
@@ -1083,7 +1034,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
// Handle Ralink AP bad MCS rate set condition. Joseph.
// This fix the bug of Ralink AP. This may be removed in the future.
- if(pPeerHTCap->MCS[0] == 0)
+ if (pPeerHTCap->MCS[0] == 0)
pPeerHTCap->MCS[0] = 0xff;
HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet);
@@ -1092,7 +1043,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
// Config MIMO Power Save setting
//
pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave;
- if(pHTInfo->PeerMimoPs == MIMO_PS_STATIC)
+ if (pHTInfo->PeerMimoPs == MIMO_PS_STATIC)
pMcsFilter = MCS_FILTER_1SS;
else
pMcsFilter = MCS_FILTER_ALL;
@@ -1196,7 +1147,7 @@ void HTInitializeBssDesc(PBSS_HT pBssHT)
memset(pBssHT->bdHTInfoBuf, 0, sizeof(pBssHT->bdHTInfoBuf));
pBssHT->bdHTInfoLen = 0;
- pBssHT->bdHTSpecVer= HT_SPEC_VER_IEEE;
+ pBssHT->bdHTSpecVer = HT_SPEC_VER_IEEE;
pBssHT->bdRT2RTAggregation = false;
pBssHT->bdRT2RTLongSlotTime = false;
@@ -1224,26 +1175,22 @@ void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee802
IEEE80211_DEBUG(IEEE80211_DL_HT, "==============>%s()\n", __func__);
/*unmark bEnableHT flag here is the same reason why unmarked in function ieee80211_softmac_new_net. WB 2008.09.10*/
// if( pHTInfo->bEnableHT && pNetwork->bssht.bdSupportHT)
- if (pNetwork->bssht.bdSupportHT)
- {
+ if (pNetwork->bssht.bdSupportHT) {
pHTInfo->bCurrentHTSupport = true;
pHTInfo->ePeerHTSpecVer = pNetwork->bssht.bdHTSpecVer;
// Save HTCap and HTInfo information Element
- if(pNetwork->bssht.bdHTCapLen > 0 && pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf))
+ if (pNetwork->bssht.bdHTCapLen > 0 && pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf))
memcpy(pHTInfo->PeerHTCapBuf, pNetwork->bssht.bdHTCapBuf, pNetwork->bssht.bdHTCapLen);
- if(pNetwork->bssht.bdHTInfoLen > 0 && pNetwork->bssht.bdHTInfoLen <= sizeof(pHTInfo->PeerHTInfoBuf))
+ if (pNetwork->bssht.bdHTInfoLen > 0 && pNetwork->bssht.bdHTInfoLen <= sizeof(pHTInfo->PeerHTInfoBuf))
memcpy(pHTInfo->PeerHTInfoBuf, pNetwork->bssht.bdHTInfoBuf, pNetwork->bssht.bdHTInfoLen);
// Check whether RT to RT aggregation mode is enabled
- if(pHTInfo->bRegRT2RTAggregation)
- {
+ if (pHTInfo->bRegRT2RTAggregation) {
pHTInfo->bCurrentRT2RTAggregation = pNetwork->bssht.bdRT2RTAggregation;
pHTInfo->bCurrentRT2RTLongSlotTime = pNetwork->bssht.bdRT2RTLongSlotTime;
- }
- else
- {
+ } else {
pHTInfo->bCurrentRT2RTAggregation = false;
pHTInfo->bCurrentRT2RTLongSlotTime = false;
}
@@ -1255,34 +1202,32 @@ void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee802
// Must be called after the parameter of pHTInfo->bCurrentRT2RTAggregation is decided
pHTInfo->IOTAction = 0;
bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid);
- if(bIOTAction)
+ if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS14;
bIOTAction = HTIOTActIsDisableMCS15(ieee);
- if(bIOTAction)
+ if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS15;
bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee, pNetwork->bssid);
- if(bIOTAction)
+ if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS;
bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid);
- if(bIOTAction)
+ if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
bIOTAction = HTIOTActIsMgntUseCCK6M(pNetwork);
- if(bIOTAction)
+ if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_MGNT_USE_CCK_6M;
bIOTAction = HTIOTActIsCCDFsync(pNetwork->bssid);
- if(bIOTAction)
+ if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC;
- }
- else
- {
+ } else {
pHTInfo->bCurrentHTSupport = false;
pHTInfo->bCurrentRT2RTAggregation = false;
pHTInfo->bCurrentRT2RTLongSlotTime = false;
@@ -1298,12 +1243,11 @@ void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_
// PHT_CAPABILITY_ELE pPeerHTCap = (PHT_CAPABILITY_ELE)pNetwork->bssht.bdHTCapBuf;
PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf;
- if (pHTInfo->bCurrentHTSupport)
- {
+ if (pHTInfo->bCurrentHTSupport) {
//
// Config current operation mode.
//
- if(pNetwork->bssht.bdHTInfoLen != 0)
+ if (pNetwork->bssht.bdHTInfoLen != 0)
pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
//
@@ -1323,8 +1267,7 @@ EXPORT_SYMBOL(HTUpdateSelfAndPeerSetting);
********************************************************************************************************************/
u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame)
{
- if (ieee->pHTInfo->bCurrentHTSupport)
- {
+ if (ieee->pHTInfo->bCurrentHTSupport) {
if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) {
IEEE80211_DEBUG(IEEE80211_DL_HT, "HT CONTROL FILED EXIST!!\n");
return true;
@@ -1341,7 +1284,7 @@ void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidt
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// u32 flags = 0;
- if(!pHTInfo->bRegBW40MHz)
+ if (!pHTInfo->bRegBW40MHz)
return;
@@ -1357,12 +1300,11 @@ void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidt
return;
}
//if in half N mode, set to 20M bandwidth please 09.08.2008 WB.
- if(Bandwidth==HT_CHANNEL_WIDTH_20_40 && (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)))
- {
+ if (Bandwidth == HT_CHANNEL_WIDTH_20_40 && (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))) {
// Handle Illegal extension channel offset!!
- if(ieee->current_network.channel<2 && Offset==HT_EXTCHNL_OFFSET_LOWER)
+ if (ieee->current_network.channel < 2 && Offset == HT_EXTCHNL_OFFSET_LOWER)
Offset = HT_EXTCHNL_OFFSET_NO_EXT;
- if(Offset==HT_EXTCHNL_OFFSET_UPPER || Offset==HT_EXTCHNL_OFFSET_LOWER) {
+ if (Offset == HT_EXTCHNL_OFFSET_UPPER || Offset == HT_EXTCHNL_OFFSET_LOWER) {
pHTInfo->bCurBW40MHz = true;
pHTInfo->CurSTAExtChnlOffset = Offset;
} else {
@@ -1391,11 +1333,10 @@ void HTSetConnectBwModeCallback(struct ieee80211_device *ieee)
IEEE80211_DEBUG(IEEE80211_DL_HT, "======>%s()\n", __func__);
- if(pHTInfo->bCurBW40MHz)
- {
- if(pHTInfo->CurSTAExtChnlOffset==HT_EXTCHNL_OFFSET_UPPER)
+ if (pHTInfo->bCurBW40MHz) {
+ if (pHTInfo->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_UPPER)
ieee->set_chan(ieee->dev, ieee->current_network.channel+2);
- else if(pHTInfo->CurSTAExtChnlOffset==HT_EXTCHNL_OFFSET_LOWER)
+ else if (pHTInfo->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_LOWER)
ieee->set_chan(ieee->dev, ieee->current_network.channel-2);
else
ieee->set_chan(ieee->dev, ieee->current_network.channel);
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 7a0dbc0fa18e..a4df95cc7f60 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -4996,7 +4996,7 @@ static void rtl8192_usb_disconnect(struct usb_interface *intf)
kfree(priv->pFirmware);
priv->pFirmware = NULL;
rtl8192_usb_deleteendpoints(dev);
- mdelay(10);
+ usleep_range(10000, 11000);
}
free_ieee80211(dev);
RT_TRACE(COMP_DOWN, "wlan driver removed\n");
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index e1b81d34f1ad..e25b058dec26 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -539,13 +539,13 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
rtStatus = SendTxCommandPacket(dev, &tx_cmd, 12);
if (rtStatus == RT_STATUS_FAILURE)
RT_TRACE(COMP_POWER_TRACKING, "Set configuration with tx cmd queue fail!\n");
- mdelay(1);
+ usleep_range(1000, 2000);
/*DbgPrint("hi, vivi, strange\n");*/
for (i = 0; i <= 30; i++) {
read_nic_byte(dev, 0x1ba, &Pwr_Flag);
if (Pwr_Flag == 0) {
- mdelay(1);
+ usleep_range(1000, 2000);
continue;
}
read_nic_word(dev, 0x13c, &Avg_TSSI_Meas);
diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile
index f236acfd3afa..a12cf8dd8ed9 100644
--- a/drivers/staging/rtl8723bs/Makefile
+++ b/drivers/staging/rtl8723bs/Makefile
@@ -23,7 +23,7 @@ r8723bs-y = \
hal/hal_com_phycfg.o \
hal/hal_btcoex.o \
hal/hal_sdio.o \
- hal/Hal8723BPwrSeq.o \
+ hal/hal_pwr_seq.o \
hal/HalPhyRf.o \
hal/HalPwrSeqCmd.o \
hal/odm.o \
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 0b530ea7fd81..45c05527a57a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_AP_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_btcoex.c b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
index 79aa02afad01..adac915a2153 100644
--- a/drivers/staging/rtl8723bs/core/rtw_btcoex.c
+++ b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include <drv_types.h>
#include <rtw_debug.h>
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 9e132f943687..830be63391b7 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_CMD_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_debug.c b/drivers/staging/rtl8723bs/core/rtw_debug.c
index b5dd244fee8f..f852fde47350 100644
--- a/drivers/staging/rtl8723bs/core/rtw_debug.c
+++ b/drivers/staging/rtl8723bs/core/rtw_debug.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_DEBUG_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_eeprom.c b/drivers/staging/rtl8723bs/core/rtw_eeprom.c
index 35031a7a5dc5..5eea02cfce1f 100644
--- a/drivers/staging/rtl8723bs/core/rtw_eeprom.c
+++ b/drivers/staging/rtl8723bs/core/rtw_eeprom.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_EEPROM_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
index 44b92ef5db92..bbf6f3fa21ea 100644
--- a/drivers/staging/rtl8723bs/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_EFUSE_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 74750dbce379..0822e440204e 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _IEEE80211_C
diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c
index 6bd5a4741e79..d341069097e2 100644
--- a/drivers/staging/rtl8723bs/core/rtw_io.c
+++ b/drivers/staging/rtl8723bs/core/rtw_io.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/*
diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
index e5354cec8dd5..35ca825a7e5a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_IOCTL_SET_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index c13e514a655a..cc4f115e082c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_MLME_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 589fab24bb25..a81e13011c49 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_MLME_EXT_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_odm.c b/drivers/staging/rtl8723bs/core/rtw_odm.c
index edbcaeb9f8c2..93e8f17d2574 100644
--- a/drivers/staging/rtl8723bs/core/rtw_odm.c
+++ b/drivers/staging/rtl8723bs/core/rtw_odm.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include <drv_types.h>
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index 85f7769ecc2d..110bbe340b78 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_PWRCTRL_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 86f995b8a88b..02f821d42658 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_RECV_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_rf.c b/drivers/staging/rtl8723bs/core/rtw_rf.c
index 07f5577cc073..a5095a4ef690 100644
--- a/drivers/staging/rtl8723bs/core/rtw_rf.c
+++ b/drivers/staging/rtl8723bs/core/rtw_rf.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_RF_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index aadf67bd0559..612277a555d2 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_SECURITY_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index 03dd6848daa1..82716fd5c6f0 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_STA_MGT_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index f6dc26c8bd3d..2b3eb6f8ddc5 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_WLAN_UTIL_C_
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index be54186fb223..aaabffb0a199 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_XMIT_C_
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
index 37f42bfc55ed..eb6e07ef5dad 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "Mp_Precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.h b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.h
index 880bd63d36a5..cdffa391bd9d 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.h
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/* The following is for 8723B 1ANT BT Co-exist definition */
#define BT_INFO_8723B_1ANT_B_FTP BIT7
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
index 33610d39333f..cb62fc0a0f9c 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "Mp_Precomp.h"
@@ -2198,7 +2190,7 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
BTC_MSG_ALGORITHM,
ALGO_TRACE_FW_DETAIL,
(
- "[BTCoex], PsTdma type dismatch!!!, curPsTdma =%d, recordPsTdma =%d\n",
+ "[BTCoex], PsTdma type mismatch!!!, curPsTdma =%d, recordPsTdma =%d\n",
pCoexDm->curPsTdma,
pCoexDm->psTdmaDuAdjType
)
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.h b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.h
index 5a0fed69eda7..df973fcda48c 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.h
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/* The following is for 8723B 2Ant BT Co-exist definition */
#define BT_INFO_8723B_2ANT_B_FTP BIT7
diff --git a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
index 38414dd8adbd..aad86570b59c 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
+++ b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HALBTC_OUT_SRC_H__
#define __HALBTC_OUT_SRC_H__
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
index 951585467ab1..bae59e515348 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
******************************************************************************/
#include <linux/kernel.h>
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.h b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.h
index 41fe0ba16914..c1fbe91cd4f3 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.h
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
******************************************************************************/
#ifndef __INC_MP_BB_HW_IMG_8723B_H
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
index 7f8afa1be1ca..3c8e26aba406 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
******************************************************************************/
#include <linux/kernel.h>
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.h b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.h
index ae5dd3ccf939..788fdca1337b 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.h
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
******************************************************************************/
#ifndef __INC_MP_MAC_HW_IMG_8723B_H
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
index fadfcbd91858..ba42b4d2a9c4 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
******************************************************************************/
#include <linux/kernel.h>
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.h b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.h
index 98aa2ba97fa9..36a47437f974 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.h
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
******************************************************************************/
#ifndef __INC_MP_RF_HW_IMG_8723B_H
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.c b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
index 9adcc3098463..beb4002a40e1 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/* include "Mp_Precomp.h" */
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.h b/drivers/staging/rtl8723bs/hal/HalPhyRf.h
index bd7462d4ed91..3d6f68bc61d7 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf.h
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_PHY_RF_H__
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index a99a863be656..2ee25b2471de 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include <drv_types.h>
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h
index ae4eca144606..a4d5150007be 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_PHY_RF_8723B_H__
diff --git a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
index 334d680b9b1c..c24156873fed 100644
--- a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/*++
Copyright (c) Realtek Semiconductor Corp. All rights reserved.
diff --git a/drivers/staging/rtl8723bs/hal/Mp_Precomp.h b/drivers/staging/rtl8723bs/hal/Mp_Precomp.h
index 248b9fb3886a..3ed1142a9896 100644
--- a/drivers/staging/rtl8723bs/hal/Mp_Precomp.h
+++ b/drivers/staging/rtl8723bs/hal/Mp_Precomp.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __MP_PRECOMP_H__
#define __MP_PRECOMP_H__
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 7d4df5a8832e..14284b1867f3 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define __HAL_BTCOEX_C__
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 1cef1d77977c..7f8ec55b08f1 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _HAL_COM_C_
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 93d6cc478706..3922d0308a81 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _HAL_COM_PHYCFG_C_
diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c
index 3463f40d6a31..4a4d17b44ba6 100644
--- a/drivers/staging/rtl8723bs/hal/hal_intf.c
+++ b/drivers/staging/rtl8723bs/hal/hal_intf.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _HAL_INTF_C_
diff --git a/drivers/staging/rtl8723bs/hal/hal_phy.c b/drivers/staging/rtl8723bs/hal/hal_phy.c
index c0a899df7aaf..ebaefcaf5f2a 100644
--- a/drivers/staging/rtl8723bs/hal/hal_phy.c
+++ b/drivers/staging/rtl8723bs/hal/hal_phy.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _HAL_PHY_C_
diff --git a/drivers/staging/rtl8723bs/hal/Hal8723BPwrSeq.c b/drivers/staging/rtl8723bs/hal/hal_pwr_seq.c
index 0376806335d3..589e7aae7bbd 100644
--- a/drivers/staging/rtl8723bs/hal/Hal8723BPwrSeq.c
+++ b/drivers/staging/rtl8723bs/hal/hal_pwr_seq.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/*
@@ -25,7 +17,7 @@ Major Change History:
*/
-#include "Hal8723BPwrSeq.h"
+#include "hal_pwr_seq.h"
/* drivers should parse below arrays and do the corresponding actions */
/* 3 Power on Array */
diff --git a/drivers/staging/rtl8723bs/hal/hal_sdio.c b/drivers/staging/rtl8723bs/hal/hal_sdio.c
index e147c69e7222..2d61b09ebce6 100644
--- a/drivers/staging/rtl8723bs/hal/hal_sdio.c
+++ b/drivers/staging/rtl8723bs/hal/hal_sdio.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _HAL_SDIO_C_
diff --git a/drivers/staging/rtl8723bs/hal/odm.c b/drivers/staging/rtl8723bs/hal/odm.c
index ff43bb26950b..7de5161e2ac4 100644
--- a/drivers/staging/rtl8723bs/hal/odm.c
+++ b/drivers/staging/rtl8723bs/hal/odm.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
index 1037b88e8f08..a4153a660d32 100644
--- a/drivers/staging/rtl8723bs/hal/odm.h
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8723bs/hal/odm_AntDiv.c b/drivers/staging/rtl8723bs/hal/odm_AntDiv.c
index e0b20562ccd6..d5415eecdd7f 100644
--- a/drivers/staging/rtl8723bs/hal/odm_AntDiv.c
+++ b/drivers/staging/rtl8723bs/hal/odm_AntDiv.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
//============================================================
diff --git a/drivers/staging/rtl8723bs/hal/odm_AntDiv.h b/drivers/staging/rtl8723bs/hal/odm_AntDiv.h
index 92cdad55b7a4..c9496d561c91 100644
--- a/drivers/staging/rtl8723bs/hal/odm_AntDiv.h
+++ b/drivers/staging/rtl8723bs/hal/odm_AntDiv.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODMANTDIV_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
index 178aaab3f76c..a73304639226 100644
--- a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
+++ b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.h b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.h
index 0c92899d9967..81db63efe286 100644
--- a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.h
+++ b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODMCFOTRACK_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index f02eb63a45ce..a12fdce77eae 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.h b/drivers/staging/rtl8723bs/hal/odm_DIG.h
index e27a69126086..f6777e97a24a 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.h
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODMDIG_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.c b/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.c
index 6af017562539..e18c9d65eee2 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.h b/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.h
index b435cae4ab63..dba19271d526 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.h
+++ b/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODMDYNAMICBBPOWERSAVING_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.c b/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.c
index d24e5f712c20..17f90f4cc1ad 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.h b/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.h
index 35cdbab6d965..e2d244324ebe 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.h
+++ b/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODMDYNAMICTXPOWER_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c b/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c
index 8d53cb73cea5..acc64fa8f166 100644
--- a/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c
+++ b/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.h b/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.h
index b0590abe87f0..bc574d2ad065 100644
--- a/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.h
+++ b/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODMEDCATURBOCHECK_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
index 9e161f080c57..ee2c293e4f59 100644
--- a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm_HWConfig.h b/drivers/staging/rtl8723bs/hal/odm_HWConfig.h
index fdb4f8579ff9..d3af1caaa73c 100644
--- a/drivers/staging/rtl8723bs/hal/odm_HWConfig.h
+++ b/drivers/staging/rtl8723bs/hal/odm_HWConfig.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
index af7b44e29092..6ca799816c08 100644
--- a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
+++ b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h
index 3f650912f4ab..a7f13a85559c 100644
--- a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h
+++ b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*****************************************************************************/
#ifndef __ODMNOISEMONITOR_H__
#define __ODMNOISEMONITOR_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_PathDiv.c b/drivers/staging/rtl8723bs/hal/odm_PathDiv.c
index 2735ebfb6be3..4d22360934f6 100644
--- a/drivers/staging/rtl8723bs/hal/odm_PathDiv.c
+++ b/drivers/staging/rtl8723bs/hal/odm_PathDiv.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm_PathDiv.h b/drivers/staging/rtl8723bs/hal/odm_PathDiv.h
index becde2e2a9ee..7a5bc00c3682 100644
--- a/drivers/staging/rtl8723bs/hal/odm_PathDiv.h
+++ b/drivers/staging/rtl8723bs/hal/odm_PathDiv.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODMPATHDIV_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_RTL8723B.c b/drivers/staging/rtl8723bs/hal/odm_RTL8723B.c
index 0e4ce2741737..ecf0045fcad9 100644
--- a/drivers/staging/rtl8723bs/hal/odm_RTL8723B.c
+++ b/drivers/staging/rtl8723bs/hal/odm_RTL8723B.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm_RTL8723B.h b/drivers/staging/rtl8723bs/hal/odm_RTL8723B.h
index 0700351a2b74..96ef1cc41a96 100644
--- a/drivers/staging/rtl8723bs/hal/odm_RTL8723B.h
+++ b/drivers/staging/rtl8723bs/hal/odm_RTL8723B.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODM_RTL8723B_H__
#define __ODM_RTL8723B_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c
index cdc9f38438d1..39f989bf3410 100644
--- a/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c
+++ b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h
index a6b3d21e6463..12dfc58a6da0 100644
--- a/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h
+++ b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __INC_ODM_REGCONFIG_H_8723B
#define __INC_ODM_REGCONFIG_H_8723B
diff --git a/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h b/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h
index dc20e6165911..f2c0707aad4c 100644
--- a/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h
+++ b/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODM_REGDEFINE11N_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_debug.c b/drivers/staging/rtl8723bs/hal/odm_debug.c
index 28cf0a66ff93..b92422c8fb8e 100644
--- a/drivers/staging/rtl8723bs/hal/odm_debug.c
+++ b/drivers/staging/rtl8723bs/hal/odm_debug.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/odm_debug.h b/drivers/staging/rtl8723bs/hal/odm_debug.h
index ff131361248c..3e58cb806c8c 100644
--- a/drivers/staging/rtl8723bs/hal/odm_debug.h
+++ b/drivers/staging/rtl8723bs/hal/odm_debug.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODM_DBG_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_interface.h b/drivers/staging/rtl8723bs/hal/odm_interface.h
index 8ad0a0afca57..63f374f0bda7 100644
--- a/drivers/staging/rtl8723bs/hal/odm_interface.h
+++ b/drivers/staging/rtl8723bs/hal/odm_interface.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8723bs/hal/odm_precomp.h b/drivers/staging/rtl8723bs/hal/odm_precomp.h
index f543bdb31a89..b5b0c0ed02fc 100644
--- a/drivers/staging/rtl8723bs/hal/odm_precomp.h
+++ b/drivers/staging/rtl8723bs/hal/odm_precomp.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODM_PRECOMP_H__
diff --git a/drivers/staging/rtl8723bs/hal/odm_reg.h b/drivers/staging/rtl8723bs/hal/odm_reg.h
index 2496dcef7a9f..1ec6ffd69dbe 100644
--- a/drivers/staging/rtl8723bs/hal/odm_reg.h
+++ b/drivers/staging/rtl8723bs/hal/odm_reg.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/* File Name: odm_reg.h */
/* Description: */
diff --git a/drivers/staging/rtl8723bs/hal/odm_types.h b/drivers/staging/rtl8723bs/hal/odm_types.h
index 9e3d072b03db..28f42c9c3dd7 100644
--- a/drivers/staging/rtl8723bs/hal/odm_types.h
+++ b/drivers/staging/rtl8723bs/hal/odm_types.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODM_TYPES_H__
#define __ODM_TYPES_H__
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
index 69eed62b7c94..9f4a10aaa774 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTL8723B_CMD_C_
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
index b16255984f4e..6b6fc835c601 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/* Description: */
/* This file is for 92CE/92CU dynamic mechanism only */
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 0ce9b47d644d..592917fc00aa 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _HAL_INIT_C_
@@ -433,29 +425,29 @@ s32 rtl8723b_FirmwareDownload(struct adapter *padapter, bool bUsedWoWLANFw)
goto exit;
}
- pFirmware->szFwBuffer = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (!pFirmware->szFwBuffer) {
+ pFirmware->fw_buffer_sz = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ if (!pFirmware->fw_buffer_sz) {
rtStatus = _FAIL;
goto exit;
}
- pFirmware->ulFwLength = fw->size;
+ pFirmware->fw_length = fw->size;
release_firmware(fw);
- if (pFirmware->ulFwLength > FW_8723B_SIZE) {
+ if (pFirmware->fw_length > FW_8723B_SIZE) {
rtStatus = _FAIL;
- DBG_871X_LEVEL(_drv_emerg_, "Firmware size:%u exceed %u\n", pFirmware->ulFwLength, FW_8723B_SIZE);
+ DBG_871X_LEVEL(_drv_emerg_, "Firmware size:%u exceed %u\n", pFirmware->fw_length, FW_8723B_SIZE);
goto release_fw1;
}
- pFirmwareBuf = pFirmware->szFwBuffer;
- FirmwareLen = pFirmware->ulFwLength;
+ pFirmwareBuf = pFirmware->fw_buffer_sz;
+ FirmwareLen = pFirmware->fw_length;
/* To Check Fw header. Added by tynli. 2009.12.04. */
pFwHdr = (struct rt_firmware_hdr *)pFirmwareBuf;
- pHalData->FirmwareVersion = le16_to_cpu(pFwHdr->Version);
- pHalData->FirmwareSubVersion = le16_to_cpu(pFwHdr->Subversion);
- pHalData->FirmwareSignature = le16_to_cpu(pFwHdr->Signature);
+ pHalData->FirmwareVersion = le16_to_cpu(pFwHdr->version);
+ pHalData->FirmwareSubVersion = le16_to_cpu(pFwHdr->subversion);
+ pHalData->FirmwareSignature = le16_to_cpu(pFwHdr->signature);
DBG_871X(
"%s: fw_ver =%x fw_subver =%04x sig = 0x%x, Month =%02x, Date =%02x, Hour =%02x, Minute =%02x\n",
@@ -463,10 +455,10 @@ s32 rtl8723b_FirmwareDownload(struct adapter *padapter, bool bUsedWoWLANFw)
pHalData->FirmwareVersion,
pHalData->FirmwareSubVersion,
pHalData->FirmwareSignature,
- pFwHdr->Month,
- pFwHdr->Date,
- pFwHdr->Hour,
- pFwHdr->Minute
+ pFwHdr->month,
+ pFwHdr->date,
+ pFwHdr->hour,
+ pFwHdr->minute
);
if (IS_FW_HEADER_EXIST_8723B(pFwHdr)) {
@@ -518,7 +510,7 @@ fwdl_stat:
);
exit:
- kfree(pFirmware->szFwBuffer);
+ kfree(pFirmware->fw_buffer_sz);
kfree(pFirmware);
release_fw1:
kfree(pBTFirmware);
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index e34d133075c0..50428f688859 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTL8723B_PHYCFG_C_
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
index f6aeb2630398..aa45a8421ebe 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/******************************************************************************
*
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c b/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
index 14bfbe3be0ca..76c8e6e9e6bc 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTL8723B_REDESC_C_
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 5d5cd4d01156..85077947b9b8 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTL8723BS_RECV_C_
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index 6281dfa1a3ca..10b3f9733bad 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTL8723BS_XMIT_C_
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index 1af77add6af4..3c65a9c02bbd 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _SDIO_HALINIT_C_
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index ab2ff53a8e57..d6b93e1f78d8 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*******************************************************************************/
#define _SDIO_OPS_C_
diff --git a/drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h b/drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h
index fbb83db4fb60..fb80901f0788 100644
--- a/drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h
+++ b/drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/*****************************************************************************
*
diff --git a/drivers/staging/rtl8723bs/include/HalPwrSeqCmd.h b/drivers/staging/rtl8723bs/include/HalPwrSeqCmd.h
index 5bb9f5a04734..7040cfc507d8 100644
--- a/drivers/staging/rtl8723bs/include/HalPwrSeqCmd.h
+++ b/drivers/staging/rtl8723bs/include/HalPwrSeqCmd.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HALPWRSEQCMD_H__
#define __HALPWRSEQCMD_H__
diff --git a/drivers/staging/rtl8723bs/include/HalVerDef.h b/drivers/staging/rtl8723bs/include/HalVerDef.h
index a9e8609303b9..160f34efbfd5 100644
--- a/drivers/staging/rtl8723bs/include/HalVerDef.h
+++ b/drivers/staging/rtl8723bs/include/HalVerDef.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_VERSION_DEF_H__
#define __HAL_VERSION_DEF_H__
diff --git a/drivers/staging/rtl8723bs/include/autoconf.h b/drivers/staging/rtl8723bs/include/autoconf.h
index 09ed29f4efbd..196aca3aed7b 100644
--- a/drivers/staging/rtl8723bs/include/autoconf.h
+++ b/drivers/staging/rtl8723bs/include/autoconf.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8723bs/include/basic_types.h b/drivers/staging/rtl8723bs/include/basic_types.h
index abadea07466d..bab9811aeb5f 100644
--- a/drivers/staging/rtl8723bs/include/basic_types.h
+++ b/drivers/staging/rtl8723bs/include/basic_types.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __BASIC_TYPES_H__
#define __BASIC_TYPES_H__
diff --git a/drivers/staging/rtl8723bs/include/cmd_osdep.h b/drivers/staging/rtl8723bs/include/cmd_osdep.h
index 3ad3ace86fd4..0749936df032 100644
--- a/drivers/staging/rtl8723bs/include/cmd_osdep.h
+++ b/drivers/staging/rtl8723bs/include/cmd_osdep.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __CMD_OSDEP_H_
#define __CMD_OSDEP_H_
diff --git a/drivers/staging/rtl8723bs/include/drv_conf.h b/drivers/staging/rtl8723bs/include/drv_conf.h
index 3e1ed0717ed8..7fc88b07a25e 100644
--- a/drivers/staging/rtl8723bs/include/drv_conf.h
+++ b/drivers/staging/rtl8723bs/include/drv_conf.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __DRV_CONF_H__
#define __DRV_CONF_H__
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 16b81b1a3f33..c57f290f605a 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/*-------------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8723bs/include/drv_types_sdio.h b/drivers/staging/rtl8723bs/include/drv_types_sdio.h
index aef9bf71ab25..23bf30ece2df 100644
--- a/drivers/staging/rtl8723bs/include/drv_types_sdio.h
+++ b/drivers/staging/rtl8723bs/include/drv_types_sdio.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __DRV_TYPES_SDIO_H__
#define __DRV_TYPES_SDIO_H__
diff --git a/drivers/staging/rtl8723bs/include/ethernet.h b/drivers/staging/rtl8723bs/include/ethernet.h
index bd7099497b95..59899ab52aab 100644
--- a/drivers/staging/rtl8723bs/include/ethernet.h
+++ b/drivers/staging/rtl8723bs/include/ethernet.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/*! \file */
#ifndef __INC_ETHERNET_H
diff --git a/drivers/staging/rtl8723bs/include/hal_btcoex.h b/drivers/staging/rtl8723bs/include/hal_btcoex.h
index 7ee59c07fbf9..4066b0a1450c 100644
--- a/drivers/staging/rtl8723bs/include/hal_btcoex.h
+++ b/drivers/staging/rtl8723bs/include/hal_btcoex.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_BTCOEX_H__
#define __HAL_BTCOEX_H__
diff --git a/drivers/staging/rtl8723bs/include/hal_com.h b/drivers/staging/rtl8723bs/include/hal_com.h
index 3e9ed3b66632..d1c5b3193043 100644
--- a/drivers/staging/rtl8723bs/include/hal_com.h
+++ b/drivers/staging/rtl8723bs/include/hal_com.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_COMMON_H__
#define __HAL_COMMON_H__
diff --git a/drivers/staging/rtl8723bs/include/hal_com_h2c.h b/drivers/staging/rtl8723bs/include/hal_com_h2c.h
index 86b0c42295c2..7dbae5e2050e 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_h2c.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_h2c.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __COMMON_H2C_H__
#define __COMMON_H2C_H__
diff --git a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
index bcd81f50581f..c5184315f82f 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_COM_PHYCFG_H__
#define __HAL_COM_PHYCFG_H__
diff --git a/drivers/staging/rtl8723bs/include/hal_com_reg.h b/drivers/staging/rtl8723bs/include/hal_com_reg.h
index fbf33dba1757..31a187b35810 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_reg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_reg.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_COMMON_REG_H__
#define __HAL_COMMON_REG_H__
diff --git a/drivers/staging/rtl8723bs/include/hal_data.h b/drivers/staging/rtl8723bs/include/hal_data.h
index 74a1db1eac38..7d782659a84f 100644
--- a/drivers/staging/rtl8723bs/include/hal_data.h
+++ b/drivers/staging/rtl8723bs/include/hal_data.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_DATA_H__
#define __HAL_DATA_H__
diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h
index 276089ad4864..19ceb4aa753e 100644
--- a/drivers/staging/rtl8723bs/include/hal_intf.h
+++ b/drivers/staging/rtl8723bs/include/hal_intf.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_INTF_H__
#define __HAL_INTF_H__
diff --git a/drivers/staging/rtl8723bs/include/hal_pg.h b/drivers/staging/rtl8723bs/include/hal_pg.h
index ba2a0b0c5b2f..0b7a8adf5c74 100644
--- a/drivers/staging/rtl8723bs/include/hal_pg.h
+++ b/drivers/staging/rtl8723bs/include/hal_pg.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_PG_H__
diff --git a/drivers/staging/rtl8723bs/include/hal_phy.h b/drivers/staging/rtl8723bs/include/hal_phy.h
index 15f192697957..c6b9bf139ef6 100644
--- a/drivers/staging/rtl8723bs/include/hal_phy.h
+++ b/drivers/staging/rtl8723bs/include/hal_phy.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_PHY_H__
#define __HAL_PHY_H__
diff --git a/drivers/staging/rtl8723bs/include/Hal8723BPhyCfg.h b/drivers/staging/rtl8723bs/include/hal_phy_cfg.h
index 22250411ed6c..640427f407e3 100644
--- a/drivers/staging/rtl8723bs/include/Hal8723BPhyCfg.h
+++ b/drivers/staging/rtl8723bs/include/hal_phy_cfg.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __INC_HAL8723BPHYCFG_H__
#define __INC_HAL8723BPHYCFG_H__
diff --git a/drivers/staging/rtl8723bs/include/hal_phy_reg.h b/drivers/staging/rtl8723bs/include/hal_phy_reg.h
index 518095269497..682cdd6655ab 100644
--- a/drivers/staging/rtl8723bs/include/hal_phy_reg.h
+++ b/drivers/staging/rtl8723bs/include/hal_phy_reg.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_PHY_REG_H__
#define __HAL_PHY_REG_H__
diff --git a/drivers/staging/rtl8723bs/include/Hal8723BPhyReg.h b/drivers/staging/rtl8723bs/include/hal_phy_reg_8723b.h
index 84a08e0092dd..b0b1ac1090fc 100644
--- a/drivers/staging/rtl8723bs/include/Hal8723BPhyReg.h
+++ b/drivers/staging/rtl8723bs/include/hal_phy_reg_8723b.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __INC_HAL8723BPHYREG_H__
#define __INC_HAL8723BPHYREG_H__
diff --git a/drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
index 130a94879805..130a94879805 100644
--- a/drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h
+++ b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
diff --git a/drivers/staging/rtl8723bs/include/hal_sdio.h b/drivers/staging/rtl8723bs/include/hal_sdio.h
index 691a02e980a3..3fc8acb430e5 100644
--- a/drivers/staging/rtl8723bs/include/hal_sdio.h
+++ b/drivers/staging/rtl8723bs/include/hal_sdio.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_SDIO_H_
#define __HAL_SDIO_H_
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index c8e5251c2760..974e922f54fa 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __IEEE80211_H
#define __IEEE80211_H
diff --git a/drivers/staging/rtl8723bs/include/ioctl_cfg80211.h b/drivers/staging/rtl8723bs/include/ioctl_cfg80211.h
index 2d42e0c2deff..931599d8b08a 100644
--- a/drivers/staging/rtl8723bs/include/ioctl_cfg80211.h
+++ b/drivers/staging/rtl8723bs/include/ioctl_cfg80211.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __IOCTL_CFG80211_H__
#define __IOCTL_CFG80211_H__
diff --git a/drivers/staging/rtl8723bs/include/mlme_osdep.h b/drivers/staging/rtl8723bs/include/mlme_osdep.h
index 69fd5543a3b1..f0d19637fb0f 100644
--- a/drivers/staging/rtl8723bs/include/mlme_osdep.h
+++ b/drivers/staging/rtl8723bs/include/mlme_osdep.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __MLME_OSDEP_H_
#define __MLME_OSDEP_H_
diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h
index cd738da3bec9..0ea91a111da3 100644
--- a/drivers/staging/rtl8723bs/include/osdep_intf.h
+++ b/drivers/staging/rtl8723bs/include/osdep_intf.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __OSDEP_INTF_H_
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index e62ed71e1d80..76d619585046 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __OSDEP_SERVICE_H_
#define __OSDEP_SERVICE_H_
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index 711863d74a01..58d1e1019241 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __OSDEP_LINUX_SERVICE_H_
#define __OSDEP_LINUX_SERVICE_H_
diff --git a/drivers/staging/rtl8723bs/include/recv_osdep.h b/drivers/staging/rtl8723bs/include/recv_osdep.h
index a480874a2f2a..6fea0e948271 100644
--- a/drivers/staging/rtl8723bs/include/recv_osdep.h
+++ b/drivers/staging/rtl8723bs/include/recv_osdep.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RECV_OSDEP_H_
#define __RECV_OSDEP_H_
diff --git a/drivers/staging/rtl8723bs/include/rtl8192c_recv.h b/drivers/staging/rtl8723bs/include/rtl8192c_recv.h
index 3e1be0092df4..c77d172de7d0 100644
--- a/drivers/staging/rtl8723bs/include/rtl8192c_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtl8192c_recv.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTL8192C_RECV_H_
#define _RTL8192C_RECV_H_
diff --git a/drivers/staging/rtl8723bs/include/rtl8192c_rf.h b/drivers/staging/rtl8723bs/include/rtl8192c_rf.h
index 97900a31b326..ad684add8dd6 100644
--- a/drivers/staging/rtl8723bs/include/rtl8192c_rf.h
+++ b/drivers/staging/rtl8723bs/include/rtl8192c_rf.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTL8192C_RF_H_
#define _RTL8192C_RF_H_
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h b/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h
index 8d610646ad17..ecfd2d383ac2 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTL8723B_CMD_H__
#define __RTL8723B_CMD_H__
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_dm.h b/drivers/staging/rtl8723bs/include/rtl8723b_dm.h
index cc64b38ead2a..1d2da5286e7c 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_dm.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_dm.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTL8723B_DM_H__
#define __RTL8723B_DM_H__
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
index adaeea115e4d..8f00ced1c697 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTL8723B_HAL_H__
#define __RTL8723B_HAL_H__
@@ -24,165 +16,179 @@
#include "rtl8723b_xmit.h"
#include "rtl8723b_cmd.h"
#include "rtw_mp.h"
-#include "Hal8723BPwrSeq.h"
-#include "Hal8723BPhyReg.h"
-#include "Hal8723BPhyCfg.h"
+#include "hal_pwr_seq.h"
+#include "hal_phy_reg_8723b.h"
+#include "hal_phy_cfg.h"
/* */
-/* RTL8723B From file */
+/* RTL8723B From file */
/* */
- #define RTL8723B_FW_IMG "rtl8723b/FW_NIC.bin"
- #define RTL8723B_FW_WW_IMG "rtl8723b/FW_WoWLAN.bin"
- #define RTL8723B_PHY_REG "rtl8723b/PHY_REG.txt"
- #define RTL8723B_PHY_RADIO_A "rtl8723b/RadioA.txt"
- #define RTL8723B_PHY_RADIO_B "rtl8723b/RadioB.txt"
- #define RTL8723B_TXPWR_TRACK "rtl8723b/TxPowerTrack.txt"
- #define RTL8723B_AGC_TAB "rtl8723b/AGC_TAB.txt"
- #define RTL8723B_PHY_MACREG "rtl8723b/MAC_REG.txt"
- #define RTL8723B_PHY_REG_PG "rtl8723b/PHY_REG_PG.txt"
- #define RTL8723B_PHY_REG_MP "rtl8723b/PHY_REG_MP.txt"
- #define RTL8723B_TXPWR_LMT "rtl8723b/TXPWR_LMT.txt"
+#define RTL8723B_FW_IMG "rtl8723b/FW_NIC.bin"
+#define RTL8723B_FW_WW_IMG "rtl8723b/FW_WoWLAN.bin"
+#define RTL8723B_PHY_REG "rtl8723b/PHY_REG.txt"
+#define RTL8723B_PHY_RADIO_A "rtl8723b/RadioA.txt"
+#define RTL8723B_PHY_RADIO_B "rtl8723b/RadioB.txt"
+#define RTL8723B_TXPWR_TRACK "rtl8723b/TxPowerTrack.txt"
+#define RTL8723B_AGC_TAB "rtl8723b/AGC_TAB.txt"
+#define RTL8723B_PHY_MACREG "rtl8723b/MAC_REG.txt"
+#define RTL8723B_PHY_REG_PG "rtl8723b/PHY_REG_PG.txt"
+#define RTL8723B_PHY_REG_MP "rtl8723b/PHY_REG_MP.txt"
+#define RTL8723B_TXPWR_LMT "rtl8723b/TXPWR_LMT.txt"
/* */
-/* RTL8723B From header */
+/* RTL8723B From header */
/* */
-#define FW_8723B_SIZE 0x8000
-#define FW_8723B_START_ADDRESS 0x1000
-#define FW_8723B_END_ADDRESS 0x1FFF /* 0x5FFF */
+#define FW_8723B_SIZE 0x8000
+#define FW_8723B_START_ADDRESS 0x1000
+#define FW_8723B_END_ADDRESS 0x1FFF /* 0x5FFF */
-#define IS_FW_HEADER_EXIST_8723B(_pFwHdr) ((le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x5300)
+#define IS_FW_HEADER_EXIST_8723B(fw_hdr) \
+ ((le16_to_cpu(fw_hdr->signature) & 0xFFF0) == 0x5300)
struct rt_firmware {
- u32 ulFwLength;
- u8 *szFwBuffer;
+ u32 fw_length;
+ u8 *fw_buffer_sz;
};
-/* This structure must be cared byte-ordering */
+/* This structure must be carefully byte-ordered. */
struct rt_firmware_hdr {
/* 8-byte alinment required */
/* LONG WORD 0 ---- */
- __le16 Signature; /* 92C0: test chip; 92C, 88C0: test chip; 88C1: MP A-cut; 92C1: MP A-cut */
- u8 Category; /* AP/NIC and USB/PCI */
- u8 Function; /* Reserved for different FW function indcation, for further use when driver needs to download different FW in different conditions */
- __le16 Version; /* FW Version */
- __le16 Subversion; /* FW Subversion, default 0x00 */
+ __le16 signature; /* 92C0: test chip; 92C, 88C0: test chip;
+ * 88C1: MP A-cut; 92C1: MP A-cut */
+ u8 category; /* AP/NIC and USB/PCI */
+ u8 function; /* Reserved for different FW function indications,
+ * for further use when driver needs to download
+ * different FW in different conditions. */
+ __le16 version; /* FW Version */
+ __le16 subversion; /* FW Subversion, default 0x00 */
/* LONG WORD 1 ---- */
- u8 Month; /* Release time Month field */
- u8 Date; /* Release time Date field */
- u8 Hour; /* Release time Hour field */
- u8 Minute; /* Release time Minute field */
- __le16 RamCodeSize; /* The size of RAM code */
- __le16 Rsvd2;
+ u8 month; /* Release time Month field */
+ u8 date; /* Release time Date field */
+ u8 hour; /* Release time Hour field */
+ u8 minute; /* Release time Minute field */
+
+ __le16 ram_code_size; /* The size of RAM code */
+ __le16 rsvd2;
/* LONG WORD 2 ---- */
- __le32 SvnIdx; /* The SVN entry index */
- __le32 Rsvd3;
+ __le32 svn_idx; /* The SVN entry index */
+ __le32 rsvd3;
/* LONG WORD 3 ---- */
- __le32 Rsvd4;
- __le32 Rsvd5;
+ __le32 rsvd4;
+ __le32 rsvd5;
};
-#define DRIVER_EARLY_INT_TIME_8723B 0x05
-#define BCN_DMA_ATIME_INT_TIME_8723B 0x02
+#define DRIVER_EARLY_INT_TIME_8723B 0x05
+#define BCN_DMA_ATIME_INT_TIME_8723B 0x02
-/* for 8723B */
-/* TX 32K, RX 16K, Page size 128B for TX, 8B for RX */
-#define PAGE_SIZE_TX_8723B 128
-#define PAGE_SIZE_RX_8723B 8
+/* for 8723B */
+/* TX 32K, RX 16K, Page size 128B for TX, 8B for RX */
+#define PAGE_SIZE_TX_8723B 128
+#define PAGE_SIZE_RX_8723B 8
-#define RX_DMA_SIZE_8723B 0x4000 /* 16K */
-#define RX_DMA_RESERVED_SIZE_8723B 0x80 /* 128B, reserved for tx report */
-#define RX_DMA_BOUNDARY_8723B (RX_DMA_SIZE_8723B - RX_DMA_RESERVED_SIZE_8723B - 1)
+#define RX_DMA_SIZE_8723B 0x4000 /* 16K */
+#define RX_DMA_RESERVED_SIZE_8723B 0x80 /* 128B, reserved for tx report */
+#define RX_DMA_BOUNDARY_8723B \
+ (RX_DMA_SIZE_8723B - RX_DMA_RESERVED_SIZE_8723B - 1)
-
-/* Note: We will divide number of page equally for each queue other than public queue! */
+/* Note: We will divide number of pages equally for each queue other than the
+ * public queue!
+ */
/* For General Reserved Page Number(Beacon Queue is reserved page) */
/* Beacon:2, PS-Poll:1, Null Data:1, Qos Null Data:1, BT Qos Null Data:1 */
-#define BCNQ_PAGE_NUM_8723B 0x08
-#define BCNQ1_PAGE_NUM_8723B 0x00
+#define BCNQ_PAGE_NUM_8723B 0x08
+#define BCNQ1_PAGE_NUM_8723B 0x00
#ifdef CONFIG_PNO_SUPPORT
#undef BCNQ1_PAGE_NUM_8723B
-#define BCNQ1_PAGE_NUM_8723B 0x00 /* 0x04 */
+#define BCNQ1_PAGE_NUM_8723B 0x00 /* 0x04 */
#endif
-#define MAX_RX_DMA_BUFFER_SIZE_8723B 0x2800 /* RX 10K */
-/* For WoWLan , more reserved page */
+#define MAX_RX_DMA_BUFFER_SIZE_8723B 0x2800 /* RX 10K */
+
+/* For WoWLan, more reserved page */
/* ARP Rsp:1, RWC:1, GTK Info:1, GTK RSP:2, GTK EXT MEM:2, PNO: 6 */
#ifdef CONFIG_WOWLAN
-#define WOWLAN_PAGE_NUM_8723B 0x07
+#define WOWLAN_PAGE_NUM_8723B 0x07
#else
-#define WOWLAN_PAGE_NUM_8723B 0x00
+#define WOWLAN_PAGE_NUM_8723B 0x00
#endif
#ifdef CONFIG_PNO_SUPPORT
#undef WOWLAN_PAGE_NUM_8723B
-#define WOWLAN_PAGE_NUM_8723B 0x0d
+#define WOWLAN_PAGE_NUM_8723B 0x0d
#endif
#ifdef CONFIG_AP_WOWLAN
-#define AP_WOWLAN_PAGE_NUM_8723B 0x02
+#define AP_WOWLAN_PAGE_NUM_8723B 0x02
#endif
-#define TX_TOTAL_PAGE_NUMBER_8723B (0xFF - BCNQ_PAGE_NUM_8723B - BCNQ1_PAGE_NUM_8723B - WOWLAN_PAGE_NUM_8723B)
-#define TX_PAGE_BOUNDARY_8723B (TX_TOTAL_PAGE_NUMBER_8723B + 1)
+#define TX_TOTAL_PAGE_NUMBER_8723B \
+ (0xFF - BCNQ_PAGE_NUM_8723B - \
+ BCNQ1_PAGE_NUM_8723B - \
+ WOWLAN_PAGE_NUM_8723B)
+#define TX_PAGE_BOUNDARY_8723B (TX_TOTAL_PAGE_NUMBER_8723B + 1)
-#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER_8723B TX_TOTAL_PAGE_NUMBER_8723B
-#define WMM_NORMAL_TX_PAGE_BOUNDARY_8723B (WMM_NORMAL_TX_TOTAL_PAGE_NUMBER_8723B + 1)
+#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER_8723B TX_TOTAL_PAGE_NUMBER_8723B
+#define WMM_NORMAL_TX_PAGE_BOUNDARY_8723B \
+ (WMM_NORMAL_TX_TOTAL_PAGE_NUMBER_8723B + 1)
-/* For Normal Chip Setting */
-/* (HPQ + LPQ + NPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER_8723B */
-#define NORMAL_PAGE_NUM_HPQ_8723B 0x0C
-#define NORMAL_PAGE_NUM_LPQ_8723B 0x02
-#define NORMAL_PAGE_NUM_NPQ_8723B 0x02
+/* For Normal Chip Setting */
+/* (HPQ + LPQ + NPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER_8723B */
+#define NORMAL_PAGE_NUM_HPQ_8723B 0x0C
+#define NORMAL_PAGE_NUM_LPQ_8723B 0x02
+#define NORMAL_PAGE_NUM_NPQ_8723B 0x02
/* Note: For Normal Chip Setting, modify later */
-#define WMM_NORMAL_PAGE_NUM_HPQ_8723B 0x30
-#define WMM_NORMAL_PAGE_NUM_LPQ_8723B 0x20
-#define WMM_NORMAL_PAGE_NUM_NPQ_8723B 0x20
+#define WMM_NORMAL_PAGE_NUM_HPQ_8723B 0x30
+#define WMM_NORMAL_PAGE_NUM_LPQ_8723B 0x20
+#define WMM_NORMAL_PAGE_NUM_NPQ_8723B 0x20
#include "HalVerDef.h"
#include "hal_com.h"
-#define EFUSE_OOB_PROTECT_BYTES 15
+#define EFUSE_OOB_PROTECT_BYTES 15
#define HAL_EFUSE_MEMORY
-#define HWSET_MAX_SIZE_8723B 512
-#define EFUSE_REAL_CONTENT_LEN_8723B 512
-#define EFUSE_MAP_LEN_8723B 512
-#define EFUSE_MAX_SECTION_8723B 64
+#define HWSET_MAX_SIZE_8723B 512
+#define EFUSE_REAL_CONTENT_LEN_8723B 512
+#define EFUSE_MAP_LEN_8723B 512
+#define EFUSE_MAX_SECTION_8723B 64
-#define EFUSE_IC_ID_OFFSET 506 /* For some inferiority IC purpose. added by Roger, 2009.09.02. */
-#define AVAILABLE_EFUSE_ADDR(addr) (addr < EFUSE_REAL_CONTENT_LEN_8723B)
+#define EFUSE_IC_ID_OFFSET 506 /* For some inferiority IC purpose.
+ * Added by Roger, 2009.09.02. */
+#define AVAILABLE_EFUSE_ADDR(addr) (addr < EFUSE_REAL_CONTENT_LEN_8723B)
-#define EFUSE_ACCESS_ON 0x69 /* For RTL8723 only. */
-#define EFUSE_ACCESS_OFF 0x00 /* For RTL8723 only. */
+#define EFUSE_ACCESS_ON 0x69 /* For RTL8723 only. */
+#define EFUSE_ACCESS_OFF 0x00 /* For RTL8723 only. */
/* */
-/* EFUSE for BT definition */
+/* EFUSE for BT definition */
/* */
-#define EFUSE_BT_REAL_BANK_CONTENT_LEN 512
-#define EFUSE_BT_REAL_CONTENT_LEN 1536 /* 512*3 */
-#define EFUSE_BT_MAP_LEN 1024 /* 1k bytes */
-#define EFUSE_BT_MAX_SECTION 128 /* 1024/8 */
+#define EFUSE_BT_REAL_BANK_CONTENT_LEN 512
+#define EFUSE_BT_REAL_CONTENT_LEN 1536 /* 512*3 */
+#define EFUSE_BT_MAP_LEN 1024 /* 1k bytes */
+#define EFUSE_BT_MAX_SECTION 128 /* 1024/8 */
-#define EFUSE_PROTECT_BYTES_BANK 16
+#define EFUSE_PROTECT_BYTES_BANK 16
-/* Description: Determine the types of C2H events that are the same in driver and Fw. */
-/* Fisrt constructed by tynli. 2009.10.09. */
-typedef enum _C2H_EVT
-{
+/* Description: Determine the types of C2H events that are the same in driver
+ * and FW; First constructed by tynli. 2009.10.09.
+ */
+typedef enum _C2H_EVT {
C2H_DBG = 0,
C2H_TSF = 1,
C2H_AP_RPT_RSP = 2,
- C2H_CCX_TX_RPT = 3, /* The FW notify the report of the specific tx packet. */
+ C2H_CCX_TX_RPT = 3, /* The FW notify the report
+ * of the specific tx packet. */
C2H_BT_RSSI = 4,
C2H_BT_OP_MODE = 5,
C2H_EXT_RA_RPT = 6,
@@ -192,24 +198,24 @@ typedef enum _C2H_EVT
MAX_C2HEVENT
} C2H_EVT;
-typedef struct _C2H_EVT_HDR
-{
+typedef struct _C2H_EVT_HDR {
u8 CmdID;
u8 CmdLen;
u8 CmdSeq;
} __attribute__((__packed__)) C2H_EVT_HDR, *PC2H_EVT_HDR;
-typedef enum tag_Package_Definition
-{
- PACKAGE_DEFAULT,
- PACKAGE_QFN68,
- PACKAGE_TFBGA90,
- PACKAGE_TFBGA80,
- PACKAGE_TFBGA79
-}PACKAGE_TYPE_E;
+typedef enum tag_Package_Definition {
+ PACKAGE_DEFAULT,
+ PACKAGE_QFN68,
+ PACKAGE_TFBGA90,
+ PACKAGE_TFBGA80,
+ PACKAGE_TFBGA79
+} PACKAGE_TYPE_E;
-#define INCLUDE_MULTI_FUNC_BT(_Adapter) (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_BT)
-#define INCLUDE_MULTI_FUNC_GPS(_Adapter) (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_GPS)
+#define INCLUDE_MULTI_FUNC_BT(_Adapter) \
+ (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_BT)
+#define INCLUDE_MULTI_FUNC_GPS(_Adapter) \
+ (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_GPS)
/* rtl8723a_hal_init.c */
s32 rtl8723b_FirmwareDownload(struct adapter *padapter, bool bUsedWoWLANFw);
@@ -225,28 +231,40 @@ s32 rtl8723b_InitLLTTable(struct adapter *padapter);
u8 GetEEPROMSize8723B(struct adapter *padapter);
void Hal_InitPGData(struct adapter *padapter, u8 *PROMContent);
void Hal_EfuseParseIDCode(struct adapter *padapter, u8 *hwinfo);
-void Hal_EfuseParseTxPowerInfo_8723B(struct adapter *padapter, u8 *PROMContent, bool AutoLoadFail);
-void Hal_EfuseParseBTCoexistInfo_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseEEPROMVer_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseChnlPlan_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseCustomerID_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseAntennaDiversity_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseXtal_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseThermalMeter_8723B(struct adapter *padapter, u8 *hwinfo, u8 AutoLoadFail);
-void Hal_EfuseParsePackageType_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseVoltage_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseTxPowerInfo_8723B(struct adapter *padapter, u8 *PROMContent,
+ bool AutoLoadFail);
+void Hal_EfuseParseBTCoexistInfo_8723B(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_EfuseParseEEPROMVer_8723B(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_EfuseParseChnlPlan_8723B(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_EfuseParseCustomerID_8723B(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_EfuseParseAntennaDiversity_8723B(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_EfuseParseXtal_8723B(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_EfuseParseThermalMeter_8723B(struct adapter *padapter, u8 *hwinfo,
+ u8 AutoLoadFail);
+void Hal_EfuseParsePackageType_8723B(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_EfuseParseVoltage_8723B(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
void C2HPacketHandler_8723B(struct adapter *padapter, u8 *pbuffer, u16 length);
void rtl8723b_set_hal_ops(struct hal_ops *pHalFunc);
void SetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val);
void GetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val);
-u8 SetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval);
-u8 GetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval);
+u8 SetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable,
+ void *pval);
+u8 GetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable,
+ void *pval);
/* register */
void rtl8723b_InitBeaconParameters(struct adapter *padapter);
-void _InitBurstPktLen_8723BS(struct adapter * Adapter);
+void _InitBurstPktLen_8723BS(struct adapter *adapter);
void _8051Reset8723(struct adapter *padapter);
#ifdef CONFIG_WOWLAN
void Hal_DetectWoWMode(struct adapter *padapter);
@@ -256,24 +274,25 @@ void rtl8723b_start_thread(struct adapter *padapter);
void rtl8723b_stop_thread(struct adapter *padapter);
#if defined(CONFIG_CHECK_BT_HANG)
-void rtl8723bs_init_checkbthang_workqueue(struct adapter * adapter);
-void rtl8723bs_free_checkbthang_workqueue(struct adapter * adapter);
-void rtl8723bs_cancle_checkbthang_workqueue(struct adapter * adapter);
-void rtl8723bs_hal_check_bt_hang(struct adapter * adapter);
+void rtl8723bs_init_checkbthang_workqueue(struct adapter *adapter);
+void rtl8723bs_free_checkbthang_workqueue(struct adapter *adapter);
+void rtl8723bs_cancle_checkbthang_workqueue(struct adapter *adapter);
+void rtl8723bs_hal_check_bt_hang(struct adapter *adapter);
#endif
#ifdef CONFIG_GPIO_WAKEUP
void HalSetOutPutGPIO(struct adapter *padapter, u8 index, u8 OutPutValue);
#endif
-int FirmwareDownloadBT(struct adapter * Adapter, struct rt_firmware *firmware);
+int FirmwareDownloadBT(struct adapter *adapter, struct rt_firmware *firmware);
void CCX_FwC2HTxRpt_8723b(struct adapter *padapter, u8 *pdata, u8 len);
s32 c2h_id_filter_ccx_8723b(u8 *buf);
s32 c2h_handler_8723b(struct adapter *padapter, u8 *pC2hEvent);
-u8 MRateToHwRate8723B(u8 rate);
-u8 HwRateToMRate8723B(u8 rate);
+u8 MRateToHwRate8723B(u8 rate);
+u8 HwRateToMRate8723B(u8 rate);
-void Hal_ReadRFGainOffset(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_ReadRFGainOffset(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
#endif
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_recv.h b/drivers/staging/rtl8723bs/include/rtl8723b_recv.h
index 7218424dae99..31ae83f2557f 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_recv.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTL8723B_RECV_H__
#define __RTL8723B_RECV_H__
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_rf.h b/drivers/staging/rtl8723bs/include/rtl8723b_rf.h
index f5aa1b09a608..1c16183caf9b 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_rf.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_rf.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTL8723B_RF_H__
#define __RTL8723B_RF_H__
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_spec.h b/drivers/staging/rtl8723bs/include/rtl8723b_spec.h
index 1906ff2038f5..9149fe598545 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_spec.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_spec.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*******************************************************************************/
#ifndef __RTL8723B_SPEC_H__
#define __RTL8723B_SPEC_H__
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h b/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h
index 3bea5d5dd82b..23a6069c3e5d 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTL8723B_XMIT_H__
#define __RTL8723B_XMIT_H__
diff --git a/drivers/staging/rtl8723bs/include/rtw_ap.h b/drivers/staging/rtl8723bs/include/rtw_ap.h
index 3c2d1e912a9d..fd56c9db16a9 100644
--- a/drivers/staging/rtl8723bs/include/rtw_ap.h
+++ b/drivers/staging/rtl8723bs/include/rtw_ap.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_AP_H_
#define __RTW_AP_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_beamforming.h b/drivers/staging/rtl8723bs/include/rtw_beamforming.h
index 69711e41c50b..031496c8c02c 100644
--- a/drivers/staging/rtl8723bs/include/rtw_beamforming.h
+++ b/drivers/staging/rtl8723bs/include/rtw_beamforming.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_BEAMFORMING_H_
#define __RTW_BEAMFORMING_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_br_ext.h b/drivers/staging/rtl8723bs/include/rtw_br_ext.h
index c942535a7961..5eaeb3c9a97c 100644
--- a/drivers/staging/rtl8723bs/include/rtw_br_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_br_ext.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_BR_EXT_H_
#define _RTW_BR_EXT_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_btcoex.h b/drivers/staging/rtl8723bs/include/rtw_btcoex.h
index 9a5c3f40bddb..53f49c6b2fcd 100644
--- a/drivers/staging/rtl8723bs/include/rtw_btcoex.h
+++ b/drivers/staging/rtl8723bs/include/rtw_btcoex.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_BTCOEX_H__
#define __RTW_BTCOEX_H__
diff --git a/drivers/staging/rtl8723bs/include/rtw_byteorder.h b/drivers/staging/rtl8723bs/include/rtw_byteorder.h
index ffbbcec77a0d..f76fbfbed4c7 100644
--- a/drivers/staging/rtl8723bs/include/rtw_byteorder.h
+++ b/drivers/staging/rtl8723bs/include/rtw_byteorder.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTL871X_BYTEORDER_H_
#define _RTL871X_BYTEORDER_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h
index 286d329b7a4f..1530d0ea1d51 100644
--- a/drivers/staging/rtl8723bs/include/rtw_cmd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_CMD_H_
#define __RTW_CMD_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_debug.h b/drivers/staging/rtl8723bs/include/rtw_debug.h
index 625e2a39a861..216d9492575e 100644
--- a/drivers/staging/rtl8723bs/include/rtw_debug.h
+++ b/drivers/staging/rtl8723bs/include/rtw_debug.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_DEBUG_H__
#define __RTW_DEBUG_H__
diff --git a/drivers/staging/rtl8723bs/include/rtw_eeprom.h b/drivers/staging/rtl8723bs/include/rtw_eeprom.h
index 2e292bf9cf51..1fcd79fa1c21 100644
--- a/drivers/staging/rtl8723bs/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8723bs/include/rtw_eeprom.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_EEPROM_H__
#define __RTW_EEPROM_H__
diff --git a/drivers/staging/rtl8723bs/include/rtw_efuse.h b/drivers/staging/rtl8723bs/include/rtw_efuse.h
index 5d3778a3204b..9d9a5a3e336d 100644
--- a/drivers/staging/rtl8723bs/include/rtw_efuse.h
+++ b/drivers/staging/rtl8723bs/include/rtw_efuse.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_EFUSE_H__
#define __RTW_EFUSE_H__
diff --git a/drivers/staging/rtl8723bs/include/rtw_event.h b/drivers/staging/rtl8723bs/include/rtw_event.h
index 2bf23de7e516..ee80aa21eb10 100644
--- a/drivers/staging/rtl8723bs/include/rtw_event.h
+++ b/drivers/staging/rtl8723bs/include/rtw_event.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_EVENT_H_
#define _RTW_EVENT_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_ht.h b/drivers/staging/rtl8723bs/include/rtw_ht.h
index 20ca0b7b481c..952c804dd0fd 100644
--- a/drivers/staging/rtl8723bs/include/rtw_ht.h
+++ b/drivers/staging/rtl8723bs/include/rtw_ht.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_HT_H_
#define _RTW_HT_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_io.h b/drivers/staging/rtl8723bs/include/rtw_io.h
index 0341d0d35375..4f8be55da65d 100644
--- a/drivers/staging/rtl8723bs/include/rtw_io.h
+++ b/drivers/staging/rtl8723bs/include/rtw_io.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_IO_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_ioctl.h b/drivers/staging/rtl8723bs/include/rtw_ioctl.h
index c19e179fc426..7179591cb01d 100644
--- a/drivers/staging/rtl8723bs/include/rtw_ioctl.h
+++ b/drivers/staging/rtl8723bs/include/rtw_ioctl.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_IOCTL_H_
#define _RTW_IOCTL_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_ioctl_set.h b/drivers/staging/rtl8723bs/include/rtw_ioctl_set.h
index ebf233559f67..f0457e91d00f 100644
--- a/drivers/staging/rtl8723bs/include/rtw_ioctl_set.h
+++ b/drivers/staging/rtl8723bs/include/rtw_ioctl_set.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_IOCTL_SET_H_
#define __RTW_IOCTL_SET_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index 2e4f12b54929..1ea9ea0e8d2e 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_MLME_H_
#define __RTW_MLME_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index 6613dea2b283..4c3141882143 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_MLME_EXT_H_
#define __RTW_MLME_EXT_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_mp.h b/drivers/staging/rtl8723bs/include/rtw_mp.h
index 88ace11e42e9..839084733201 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mp.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mp.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_MP_H_
#define _RTW_MP_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_odm.h b/drivers/staging/rtl8723bs/include/rtw_odm.h
index 961ae2c218f9..263e92cfea96 100644
--- a/drivers/staging/rtl8723bs/include/rtw_odm.h
+++ b/drivers/staging/rtl8723bs/include/rtw_odm.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_ODM_H__
#define __RTW_ODM_H__
diff --git a/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h b/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
index faf91022f54a..72df6cffe62e 100644
--- a/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_PWRCTRL_H_
#define __RTW_PWRCTRL_H_
@@ -38,19 +30,18 @@
#define BTCOEX_ALIVE BIT(4)
-enum Power_Mgnt
-{
- PS_MODE_ACTIVE = 0 ,
- PS_MODE_MIN ,
- PS_MODE_MAX ,
- PS_MODE_DTIM , /* PS_MODE_SELF_DEFINED */
- PS_MODE_VOIP ,
- PS_MODE_UAPSD_WMM ,
- PS_MODE_UAPSD ,
- PS_MODE_IBSS ,
- PS_MODE_WWLAN ,
- PM_Radio_Off ,
- PM_Card_Disable ,
+enum Power_Mgnt {
+ PS_MODE_ACTIVE = 0,
+ PS_MODE_MIN,
+ PS_MODE_MAX,
+ PS_MODE_DTIM, /* PS_MODE_SELF_DEFINED */
+ PS_MODE_VOIP,
+ PS_MODE_UAPSD_WMM,
+ PS_MODE_UAPSD,
+ PS_MODE_IBSS,
+ PS_MODE_WWLAN,
+ PM_Radio_Off,
+ PM_Card_Disable,
PS_MODE_NUM,
};
diff --git a/drivers/staging/rtl8723bs/include/rtw_qos.h b/drivers/staging/rtl8723bs/include/rtw_qos.h
index ce6d9142bc5a..1f28837f6c27 100644
--- a/drivers/staging/rtl8723bs/include/rtw_qos.h
+++ b/drivers/staging/rtl8723bs/include/rtw_qos.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index d4986f5685c5..1f53c1c7b0da 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_RECV_H_
#define _RTW_RECV_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_rf.h b/drivers/staging/rtl8723bs/include/rtw_rf.h
index f9becab7f7ad..d3a8e4b7069a 100644
--- a/drivers/staging/rtl8723bs/include/rtw_rf.h
+++ b/drivers/staging/rtl8723bs/include/rtw_rf.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_RF_H_
#define __RTW_RF_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h
index d5af72b2335d..bea184452edd 100644
--- a/drivers/staging/rtl8723bs/include/rtw_security.h
+++ b/drivers/staging/rtl8723bs/include/rtw_security.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_SECURITY_H_
#define __RTW_SECURITY_H_
diff --git a/drivers/staging/rtl8723bs/include/rtw_version.h b/drivers/staging/rtl8723bs/include/rtw_version.h
index 628d987c3e86..55e907b097ae 100644
--- a/drivers/staging/rtl8723bs/include/rtw_version.h
+++ b/drivers/staging/rtl8723bs/include/rtw_version.h
@@ -1,2 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define DRIVERVERSION "v4.3.5.5_12290.20140916_BTCOEX20140507-4E40"
#define BTCOEXVERSION "BTCOEX20140507-4E40"
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index 11571649cd2c..a75b668d09a6 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_XMIT_H_
#define _RTW_XMIT_H_
diff --git a/drivers/staging/rtl8723bs/include/sdio_hal.h b/drivers/staging/rtl8723bs/include/sdio_hal.h
index 8fd8bbeda2d8..6fae19dd0cbd 100644
--- a/drivers/staging/rtl8723bs/include/sdio_hal.h
+++ b/drivers/staging/rtl8723bs/include/sdio_hal.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __SDIO_HAL_H__
#define __SDIO_HAL_H__
diff --git a/drivers/staging/rtl8723bs/include/sdio_ops.h b/drivers/staging/rtl8723bs/include/sdio_ops.h
index 8fffc652cf0c..0f117ff1fbbe 100644
--- a/drivers/staging/rtl8723bs/include/sdio_ops.h
+++ b/drivers/staging/rtl8723bs/include/sdio_ops.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __SDIO_OPS_H__
#define __SDIO_OPS_H__
diff --git a/drivers/staging/rtl8723bs/include/sdio_ops_linux.h b/drivers/staging/rtl8723bs/include/sdio_ops_linux.h
index bd62caec409f..16a03adbc2cf 100644
--- a/drivers/staging/rtl8723bs/include/sdio_ops_linux.h
+++ b/drivers/staging/rtl8723bs/include/sdio_ops_linux.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __SDIO_OPS_LINUX_H__
#define __SDIO_OPS_LINUX_H__
diff --git a/drivers/staging/rtl8723bs/include/sdio_osintf.h b/drivers/staging/rtl8723bs/include/sdio_osintf.h
index 86673682fb4f..146b44f95e29 100644
--- a/drivers/staging/rtl8723bs/include/sdio_osintf.h
+++ b/drivers/staging/rtl8723bs/include/sdio_osintf.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __SDIO_OSINTF_H__
#define __SDIO_OSINTF_H__
diff --git a/drivers/staging/rtl8723bs/include/sta_info.h b/drivers/staging/rtl8723bs/include/sta_info.h
index 84fa116fc5da..b9df42d0677e 100644
--- a/drivers/staging/rtl8723bs/include/sta_info.h
+++ b/drivers/staging/rtl8723bs/include/sta_info.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __STA_INFO_H_
#define __STA_INFO_H_
diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h
index 530d698f50d9..08bc79840b23 100644
--- a/drivers/staging/rtl8723bs/include/wifi.h
+++ b/drivers/staging/rtl8723bs/include/wifi.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _WIFI_H_
#define _WIFI_H_
@@ -976,7 +968,7 @@ enum HT_CAP_AMPDU_FACTOR {
#define P2P_STATUS_FAIL_INCOMPATIBLE_PROVSION 0x0A
#define P2P_STATUS_FAIL_USER_REJECT 0x0B
-/* Value of Inviation Flags Attribute */
+/* Value of Invitation Flags Attribute */
#define P2P_INVITATION_FLAGS_PERSISTENT BIT(0)
#define DMP_P2P_DEVCAP_SUPPORT (P2P_DEVCAP_SERVICE_DISCOVERY | \
@@ -1082,15 +1074,15 @@ enum P2P_STATE {
P2P_STATE_GONEGO_ING = 9, /* Doing the group owner negoitation handshake */
P2P_STATE_GONEGO_OK = 10, /* finish the group negoitation handshake with success */
P2P_STATE_GONEGO_FAIL = 11, /* finish the group negoitation handshake with failure */
- P2P_STATE_RECV_INVITE_REQ_MATCH = 12, /* receiving the P2P Inviation request and match with the profile. */
+ P2P_STATE_RECV_INVITE_REQ_MATCH = 12, /* receiving the P2P Invitation request and match with the profile. */
P2P_STATE_PROVISIONING_ING = 13, /* Doing the P2P WPS */
P2P_STATE_PROVISIONING_DONE = 14, /* Finish the P2P WPS */
P2P_STATE_TX_INVITE_REQ = 15, /* Transmit the P2P Invitation request */
P2P_STATE_RX_INVITE_RESP_OK = 16, /* Receiving the P2P Invitation response */
- P2P_STATE_RECV_INVITE_REQ_DISMATCH = 17, /* receiving the P2P Inviation request and dismatch with the profile. */
- P2P_STATE_RECV_INVITE_REQ_GO = 18, /* receiving the P2P Inviation request and this wifi is GO. */
- P2P_STATE_RECV_INVITE_REQ_JOIN = 19, /* receiving the P2P Inviation request to join an existing P2P Group. */
- P2P_STATE_RX_INVITE_RESP_FAIL = 20, /* recveing the P2P Inviation response with failure */
+ P2P_STATE_RECV_INVITE_REQ_DISMATCH = 17, /* receiving the P2P Invitation request and mismatch with the profile. */
+ P2P_STATE_RECV_INVITE_REQ_GO = 18, /* receiving the P2P Invitation request and this wifi is GO. */
+ P2P_STATE_RECV_INVITE_REQ_JOIN = 19, /* receiving the P2P Invitation request to join an existing P2P Group. */
+ P2P_STATE_RX_INVITE_RESP_FAIL = 20, /* recveing the P2P Invitation response with failure */
P2P_STATE_RX_INFOR_NOREADY = 21, /* receiving p2p negoitation response with information is not available */
P2P_STATE_TX_INFOR_NOREADY = 22, /* sending p2p negoitation response with information is not available */
};
diff --git a/drivers/staging/rtl8723bs/include/wlan_bssdef.h b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
index af78d97980fa..bdb14a84e5a5 100644
--- a/drivers/staging/rtl8723bs/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __WLAN_BSSDEF_H__
#define __WLAN_BSSDEF_H__
diff --git a/drivers/staging/rtl8723bs/include/xmit_osdep.h b/drivers/staging/rtl8723bs/include/xmit_osdep.h
index 46909ff7339c..377b453de199 100644
--- a/drivers/staging/rtl8723bs/include/xmit_osdep.h
+++ b/drivers/staging/rtl8723bs/include/xmit_osdep.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __XMIT_OSDEP_H_
#define __XMIT_OSDEP_H_
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 46bc2e512557..02178e25fbb8 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _IOCTL_CFG80211_C_
@@ -2472,7 +2464,7 @@ static int rtw_cfg80211_monitor_if_close(struct net_device *ndev)
return ret;
}
-static int rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struct net_device *ndev)
{
int ret = 0;
int rtap_len;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index b26533983864..39502156f652 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _IOCTL_LINUX_C_
diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
index a4ef5789d794..da4bd5292b0a 100644
--- a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index fc5e3d4739c0..ace68f023b49 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _OS_INTFS_C_
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index f4221952dd1b..e14d7cc411c9 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index b43e24c3a23a..67ec336264e5 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RECV_OSDEP_C_
diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
index 49c8684dc25b..d8e7ad1ed842 100644
--- a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
+++ b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include <drv_types.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.h b/drivers/staging/rtl8723bs/os_dep/rtw_proc.h
index f633663fa790..c7e6f62b61ef 100644
--- a/drivers/staging/rtl8723bs/os_dep/rtw_proc.h
+++ b/drivers/staging/rtl8723bs/os_dep/rtw_proc.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_PROC_H__
#define __RTW_PROC_H__
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 99c407ba0874..22191c9584ad 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _HCI_INTF_C_
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
index 3108a625ada3..43a9d922e3aa 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*******************************************************************************/
#define _SDIO_OPS_LINUX_C_
diff --git a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
index 21e1b811f997..4da0c6f323d1 100644
--- a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _XMIT_OSDEP_C_
diff --git a/drivers/staging/rtlwifi/base.c b/drivers/staging/rtlwifi/base.c
index eea00035a735..e46e47d93d7d 100644
--- a/drivers/staging/rtlwifi/base.c
+++ b/drivers/staging/rtlwifi/base.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/base.h b/drivers/staging/rtlwifi/base.h
index b7f92b32978e..4299ca181365 100644
--- a/drivers/staging/rtlwifi/base.h
+++ b/drivers/staging/rtlwifi/base.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/btcoexist/halbt_precomp.h b/drivers/staging/rtlwifi/btcoexist/halbt_precomp.h
index d78cd9394373..90d0f2462303 100644
--- a/drivers/staging/rtlwifi/btcoexist/halbt_precomp.h
+++ b/drivers/staging/rtlwifi/btcoexist/halbt_precomp.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c b/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c
index 157395b85405..ade271cb4aab 100644
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c
+++ b/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h b/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h
index 583e99dc5cc9..f1bf83001164 100644
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h
+++ b/drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c b/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c
index 5b826403ed66..7e6071059a95 100644
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c
+++ b/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h b/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h
index 212e0c8404fa..c99aa6ff1d7f 100644
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h
+++ b/drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c b/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c
index 43d628a71611..ad7b6c42840b 100644
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c
+++ b/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h b/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h
index 464774e6e7b4..5910fe1a1fb0 100644
--- a/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h
+++ b/drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c
index 493011a54e64..4d1f9bf53c53 100644
--- a/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h
index 8913983b8ad8..fd65de2ac8b5 100644
--- a/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/btcoexist/rtl_btc.c b/drivers/staging/rtlwifi/btcoexist/rtl_btc.c
index 18a4f5b43b5a..dfd47b88e54b 100644
--- a/drivers/staging/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/staging/rtlwifi/btcoexist/rtl_btc.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2013 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/btcoexist/rtl_btc.h b/drivers/staging/rtlwifi/btcoexist/rtl_btc.h
index 8c996055de71..0141f4641ef0 100644
--- a/drivers/staging/rtlwifi/btcoexist/rtl_btc.h
+++ b/drivers/staging/rtlwifi/btcoexist/rtl_btc.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2010 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/cam.c b/drivers/staging/rtlwifi/cam.c
index ca1c9e36d976..e8572d654655 100644
--- a/drivers/staging/rtlwifi/cam.c
+++ b/drivers/staging/rtlwifi/cam.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/cam.h b/drivers/staging/rtlwifi/cam.h
index b25729e15b75..3f1d8b5a13a5 100644
--- a/drivers/staging/rtlwifi/cam.h
+++ b/drivers/staging/rtlwifi/cam.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/core.c b/drivers/staging/rtlwifi/core.c
index 3ec039498208..ca37f7511c4d 100644
--- a/drivers/staging/rtlwifi/core.c
+++ b/drivers/staging/rtlwifi/core.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/core.h b/drivers/staging/rtlwifi/core.h
index 4c2b69412621..991af1abf8ca 100644
--- a/drivers/staging/rtlwifi/core.h
+++ b/drivers/staging/rtlwifi/core.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/debug.c b/drivers/staging/rtlwifi/debug.c
index be8d72cb63db..8999feda29b4 100644
--- a/drivers/staging/rtlwifi/debug.c
+++ b/drivers/staging/rtlwifi/debug.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
@@ -106,7 +95,7 @@ static const struct file_operations file_ops_common = {
.open = dl_debug_open_common,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
static int rtl_debug_get_mac_page(struct seq_file *m, void *v)
@@ -496,18 +485,20 @@ static int rtl_debug_get_phydm_cmd(struct seq_file *m, void *v)
static int rtl_debugfs_open_rw(struct inode *inode, struct file *filp)
{
+ int ret = 0;
+
if (filp->f_mode & FMODE_READ)
- single_open(filp, rtl_debug_get_common, inode->i_private);
+ ret = single_open(filp, rtl_debug_get_common, inode->i_private);
else
filp->private_data = inode->i_private;
- return 0;
+ return ret;
}
static int rtl_debugfs_close_rw(struct inode *inode, struct file *filp)
{
if (filp->f_mode == FMODE_READ)
- seq_release(inode, filp);
+ single_release(inode, filp);
return 0;
}
@@ -530,12 +521,9 @@ static const struct file_operations file_ops_common_rw = {
#define RTL_DEBUGFS_ADD_CORE(name, mode, fopname) \
do { \
rtl_debug_priv_ ##name.rtlpriv = rtlpriv; \
- if (!debugfs_create_file(#name, mode, \
- parent, &rtl_debug_priv_ ##name, \
- &file_ops_ ##fopname)) \
- pr_err("Unable to initialize debugfs:%s/%s\n", \
- rtlpriv->dbg.debugfs_name, \
- #name); \
+ debugfs_create_file(#name, mode, parent, \
+ &rtl_debug_priv_ ##name, \
+ &file_ops_ ##fopname); \
} while (0)
#define RTL_DEBUGFS_ADD(name) \
diff --git a/drivers/staging/rtlwifi/debug.h b/drivers/staging/rtlwifi/debug.h
index ac942477f629..666d7bc80c48 100644
--- a/drivers/staging/rtlwifi/debug.h
+++ b/drivers/staging/rtlwifi/debug.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/efuse.c b/drivers/staging/rtlwifi/efuse.c
index d74c80d512c9..d7c7d146a84d 100644
--- a/drivers/staging/rtlwifi/efuse.c
+++ b/drivers/staging/rtlwifi/efuse.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/efuse.h b/drivers/staging/rtlwifi/efuse.h
index 0a23305b0b7a..5335d3ee6da7 100644
--- a/drivers/staging/rtlwifi/efuse.h
+++ b/drivers/staging/rtlwifi/efuse.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_2_platform.h b/drivers/staging/rtlwifi/halmac/halmac_2_platform.h
index 26e60cb873eb..262304deb7fc 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_2_platform.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_2_platform.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h
index 04e44aed9b45..9013baefcede 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c
index b2a5aed75dca..c68b9e82c2e7 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c
index 0edd1f5a04a8..08f6536840cf 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h
index 79a6072ef2ef..03bbec32a3e3 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c
index 6b729fe4c096..aea481bb2403 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h
index cf21e3d25607..072cd40fd339 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c
index e25e2b0ebb4c..a716fb532170 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h
index c68ea0039703..b47c50863f06 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c
index 4d708d841bad..a6b6d7fa2689 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h
index 07ffb3baf7c0..75c83f7f031e 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c
index 5f27eb172430..2eaf362ca8c3 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h
index 3a99fd5675e0..8ba7bee0a99b 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c
index 5f1dff8d9e3b..bcc402838bc0 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h
index 5ac2b15477c0..8488fc5f98ee 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h
index ea1206744902..ec9b10277450 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
index 448b1379d220..acd7930e417d 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h
index 5debd1ff3abd..6c6eb85a09a3 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c
index fa97cac34742..8462f23b652e 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h
index 34969fc5c03e..dc4d98bcc68c 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c
index 69b26a5a3cf3..979821ea54a1 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h
index ee441eee24d6..2a891b0f6ab8 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c
index 17d7c3cc62ec..0bd6abdd0a68 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h
index a3d2a6abd91b..befa4a5415db 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
index c4cb217d3d1f..53f55f129a76 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h
index 1b59301d1158..86d59d9b76f3 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_api.c b/drivers/staging/rtlwifi/halmac/halmac_api.c
index 0886a4611da0..e75eb42009c8 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_api.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_api.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_api.h b/drivers/staging/rtlwifi/halmac/halmac_api.h
index 917a64601053..4922cc8ce6f2 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_api.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_api.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_bit2.h b/drivers/staging/rtlwifi/halmac/halmac_bit2.h
index 1c7fe5d7df64..5f0f8528d136 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_bit2.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_bit2.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h b/drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h
index 7d02553f229e..481ea6d01ca5 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_fw_info.h b/drivers/staging/rtlwifi/halmac/halmac_fw_info.h
index dad8be311ff2..eb4558d0b62c 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_fw_info.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_fw_info.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h b/drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h
index 0e99967f3663..45b5f8780baf 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h b/drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h
index 7adc3cdb38c9..58e47584cedc 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h b/drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h
index 5f23cba6d067..04cee38c33f9 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h b/drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h
index 273d4c0e338a..bc9fb8e8bf5a 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h b/drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h
index 4331e2ae14c2..f58077539e33 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h b/drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h
index db7aac4de843..ce39f0868419 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h b/drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h
index 41780676508e..a2552b27367b 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h b/drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h
index 13a65a4754b0..802142be607d 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_reg2.h b/drivers/staging/rtlwifi/halmac/halmac_reg2.h
index bebf974ed949..34ab11d8d97b 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_reg2.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_reg2.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h b/drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h
index 4bc59b127412..48d5dc0df858 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h b/drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h
index 59ff1fecf73f..d5ff89c91ab3 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h b/drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h
index 62817d808fbb..c030f2597176 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h b/drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h
index 442120a14839..5f960e2ae8c7 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h b/drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h
index 8256c3605072..413004eef0b7 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h b/drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h
index 8967699e3784..7760a6b42d98 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h b/drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h
index d5c9da247ca3..e1cfcfdf1990 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h b/drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h
index 43c2261ab083..fd3f80bd752d 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h b/drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h
index fd1aa39c4bed..ca32f1b5f80a 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h b/drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h
index 02177c5faddf..73b973d90137 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_type.h b/drivers/staging/rtlwifi/halmac/halmac_type.h
index 0bf842435080..51d758b6433b 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_type.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_type.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/halmac_usb_reg.h b/drivers/staging/rtlwifi/halmac/halmac_usb_reg.h
index d6e721ea7463..27910a4adb4e 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_usb_reg.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_usb_reg.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/rtl_halmac.c b/drivers/staging/rtlwifi/halmac/rtl_halmac.c
index 66f0a6dfc52c..ae433aa6ebbb 100644
--- a/drivers/staging/rtlwifi/halmac/rtl_halmac.c
+++ b/drivers/staging/rtlwifi/halmac/rtl_halmac.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/halmac/rtl_halmac.h b/drivers/staging/rtlwifi/halmac/rtl_halmac.h
index 51a3684f30d8..aa511dad8d16 100644
--- a/drivers/staging/rtlwifi/halmac/rtl_halmac.h
+++ b/drivers/staging/rtlwifi/halmac/rtl_halmac.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/pci.c b/drivers/staging/rtlwifi/pci.c
index d56810eabde7..4bb5703bd715 100644
--- a/drivers/staging/rtlwifi/pci.c
+++ b/drivers/staging/rtlwifi/pci.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/pci.h b/drivers/staging/rtlwifi/pci.h
index 3fb56c845a61..7535ac24bfbb 100644
--- a/drivers/staging/rtlwifi/pci.h
+++ b/drivers/staging/rtlwifi/pci.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/halphyrf_ce.c b/drivers/staging/rtlwifi/phydm/halphyrf_ce.c
index 5986892e767e..f77847c4206a 100644
--- a/drivers/staging/rtlwifi/phydm/halphyrf_ce.c
+++ b/drivers/staging/rtlwifi/phydm/halphyrf_ce.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/halphyrf_ce.h b/drivers/staging/rtlwifi/phydm/halphyrf_ce.h
index e5d6257efb2b..c542efc7d0e0 100644
--- a/drivers/staging/rtlwifi/phydm/halphyrf_ce.h
+++ b/drivers/staging/rtlwifi/phydm/halphyrf_ce.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/mp_precomp.h b/drivers/staging/rtlwifi/phydm/mp_precomp.h
index b313de511ed6..8e9caca695ff 100644
--- a/drivers/staging/rtlwifi/phydm/mp_precomp.h
+++ b/drivers/staging/rtlwifi/phydm/mp_precomp.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm.c b/drivers/staging/rtlwifi/phydm/phydm.c
index 8b2a180cc13c..985978d3decc 100644
--- a/drivers/staging/rtlwifi/phydm/phydm.c
+++ b/drivers/staging/rtlwifi/phydm/phydm.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm.h b/drivers/staging/rtlwifi/phydm/phydm.h
index 5812ff427ead..8c3ad3f56273 100644
--- a/drivers/staging/rtlwifi/phydm/phydm.h
+++ b/drivers/staging/rtlwifi/phydm/phydm.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_acs.c b/drivers/staging/rtlwifi/phydm/phydm_acs.c
index eae5a0a24b9b..f47b245e77e3 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_acs.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_acs.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_acs.h b/drivers/staging/rtlwifi/phydm/phydm_acs.h
index 51d72b72bd6f..c6516b871fdb 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_acs.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_acs.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c b/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c
index 103a774f9c8f..58ec3999391c 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.h b/drivers/staging/rtlwifi/phydm/phydm_adaptivity.h
index fdb39b4f9df2..a88c34cd30e0 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_adaptivity.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c b/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c
index 158dd5d05de4..42020101380a 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h b/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h
index 460931489be3..300a22b075e0 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_antdiv.c b/drivers/staging/rtlwifi/phydm/phydm_antdiv.c
index 39d3c6947556..5a62e6b73a10 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_antdiv.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_antdiv.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_antdiv.h b/drivers/staging/rtlwifi/phydm/phydm_antdiv.h
index ebbff2f56c5e..4a58163bda4c 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_antdiv.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_antdiv.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_beamforming.h b/drivers/staging/rtlwifi/phydm/phydm_beamforming.h
index adc04ba4e218..a0bcdb620698 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_beamforming.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_beamforming.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_ccx.c b/drivers/staging/rtlwifi/phydm/phydm_ccx.c
index 2e0dc68757dc..57138606d9be 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_ccx.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_ccx.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_ccx.h b/drivers/staging/rtlwifi/phydm/phydm_ccx.h
index a3517f4642f9..b3e3e0bae582 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_ccx.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_ccx.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_cfotracking.c b/drivers/staging/rtlwifi/phydm/phydm_cfotracking.c
index 2ec8444f31a7..cf35601efe94 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_cfotracking.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_cfotracking.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_cfotracking.h b/drivers/staging/rtlwifi/phydm/phydm_cfotracking.h
index e8436a31019d..1ab015669dea 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_cfotracking.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_cfotracking.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_debug.c b/drivers/staging/rtlwifi/phydm/phydm_debug.c
index e18ba2cca2bd..b5b69d5f1a41 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_debug.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_debug.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_debug.h b/drivers/staging/rtlwifi/phydm/phydm_debug.h
index f442f7c19595..1010bf61ca3c 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_debug.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_debug.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dfs.h b/drivers/staging/rtlwifi/phydm/phydm_dfs.h
index 59a1d08cf381..c0358253d79a 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_dfs.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_dfs.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dig.c b/drivers/staging/rtlwifi/phydm/phydm_dig.c
index f851ff12dc35..3115e7bdc749 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_dig.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_dig.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dig.h b/drivers/staging/rtlwifi/phydm/phydm_dig.h
index af70aaec3b19..f618b4dd1fd3 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_dig.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_dig.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h b/drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h
index 9f3cb2468c02..61e29df5c3f9 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c b/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c
index 7661c499aeb1..d3f74d1506b4 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h b/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h
index e7394c475395..3ea68066ccdb 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c b/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c
index ebb43342b80b..afe650e5313f 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h b/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h
index 10bad1209db2..afde69db6ad2 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c b/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c
index 753a9b9834e4..cd12512628c0 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h b/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h
index 5845b108a001..c10b5fcc6f4e 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_features.h b/drivers/staging/rtlwifi/phydm/phydm_features.h
index a12361c6a1a0..b4ff293280f7 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_features.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_features.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_hwconfig.c b/drivers/staging/rtlwifi/phydm/phydm_hwconfig.c
index 0a1f11a926e4..4bf86e5a451f 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_hwconfig.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_hwconfig.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_hwconfig.h b/drivers/staging/rtlwifi/phydm/phydm_hwconfig.h
index ec94c61df2b9..6ad5e0292a97 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_hwconfig.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_hwconfig.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_interface.c b/drivers/staging/rtlwifi/phydm/phydm_interface.c
index 2f9bf6708c54..f5ecde505153 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_interface.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_interface.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_interface.h b/drivers/staging/rtlwifi/phydm/phydm_interface.h
index 53ba5585bf33..6ef289201d9d 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_interface.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_interface.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_iqk.h b/drivers/staging/rtlwifi/phydm/phydm_iqk.h
index 0d45bf099aeb..0ed21e06fc33 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_iqk.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_iqk.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_kfree.c b/drivers/staging/rtlwifi/phydm/phydm_kfree.c
index 5f3582341806..1a52500f97a1 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_kfree.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_kfree.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_kfree.h b/drivers/staging/rtlwifi/phydm/phydm_kfree.h
index fa1627e3662d..feeeb69d5202 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_kfree.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_kfree.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c b/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c
index 8d79a5add1b4..63f52623a8cf 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h b/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h
index a711b7954985..7bce088678d3 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c b/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c
index 48e73eb1622b..c98de4bb3c57 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h b/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h
index 757d7720d931..eb635de2d693 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_pre_define.h b/drivers/staging/rtlwifi/phydm/phydm_pre_define.h
index 6c301fe87b3d..ce9a076b32cb 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_pre_define.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_pre_define.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_precomp.h b/drivers/staging/rtlwifi/phydm/phydm_precomp.h
index bada15c4d2d8..39988d532340 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_precomp.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_precomp.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_psd.c b/drivers/staging/rtlwifi/phydm/phydm_psd.c
index 48f8776bc8f9..badc514ac0be 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_psd.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_psd.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_psd.h b/drivers/staging/rtlwifi/phydm/phydm_psd.h
index aeb70751d80b..0fd45c1cb6ec 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_psd.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_psd.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_rainfo.c b/drivers/staging/rtlwifi/phydm/phydm_rainfo.c
index e10a91aeebee..b46791a727c7 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_rainfo.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_rainfo.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
@@ -722,7 +711,7 @@ void phydm_update_hal_ra_mask(void *dm_void, u32 wireless_mode, u8 rf_type,
phydm_BW = phydm_trans_platform_bw(dm, BW);
ODM_RT_TRACE(dm, ODM_COMP_RA_MASK,
- "Platfoem original RA Mask = (( 0x %x | %x ))\n",
+ "Platform original RA Mask = (( 0x %x | %x ))\n",
ratr_bitmap_msb, ratr_bitmap);
switch (wireless_mode) {
@@ -996,7 +985,7 @@ static void phydm_ra_common_info_update(void *dm_void)
}
ODM_RT_TRACE(
dm, ODM_COMP_RATE_ADAPTIVE,
- "MACID[%d], Highest Tx order Update for power traking: %d\n",
+ "MACID[%d], Highest Tx order Update for power tracking: %d\n",
(ra_tab->highest_client_tx_rate_order),
(ra_tab->highest_client_tx_order));
}
diff --git a/drivers/staging/rtlwifi/phydm/phydm_rainfo.h b/drivers/staging/rtlwifi/phydm/phydm_rainfo.h
index c14ed9bda0af..6c1f30e758bc 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_rainfo.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_rainfo.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_reg.h b/drivers/staging/rtlwifi/phydm/phydm_reg.h
index d9d878e4c925..562c1199d669 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_reg.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_reg.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h b/drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h
index 28d48415ac99..5b59dffc72a5 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h b/drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h
index 0b6581c50ab3..765e0a0c8c7b 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/phydm_types.h b/drivers/staging/rtlwifi/phydm/phydm_types.h
index a34ebe876528..082bb03a99d4 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_types.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_types.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c
index 29d19f2b300e..52a113d731d9 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h
index 53431998b47e..a12745051678 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c
index 70924f002541..aed97e437e76 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h
index d02fdd7a4a53..2f8107bd0205 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c
index 0ff3a9a712d6..b8d33d7637b5 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h
index 1340fa9f369b..5e259846c67f 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c
index ae3e2278fefd..9e92a81dc6d1 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
@@ -29,7 +18,7 @@
static bool
get_mix_mode_tx_agc_bb_swing_offset_8822b(void *dm_void,
enum pwrtrack_method method,
- u8 rf_path, u8 tx_power_index_offest)
+ u8 rf_path, u8 tx_power_index_offset)
{
struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
@@ -42,24 +31,24 @@ get_mix_mode_tx_agc_bb_swing_offset_8822b(void *dm_void,
ODM_RT_TRACE(
dm, ODM_COMP_TX_PWR_TRACK,
- "Path_%d cali_info->absolute_ofdm_swing_idx[rf_path]=%d, tx_power_index_offest=%d\n",
+ "Path_%d cali_info->absolute_ofdm_swing_idx[rf_path]=%d, tx_power_index_offset=%d\n",
rf_path, cali_info->absolute_ofdm_swing_idx[rf_path],
- tx_power_index_offest);
+ tx_power_index_offset);
- if (tx_power_index_offest > 0XF)
- tx_power_index_offest = 0XF;
+ if (tx_power_index_offset > 0XF)
+ tx_power_index_offset = 0XF;
if (cali_info->absolute_ofdm_swing_idx[rf_path] >= 0 &&
cali_info->absolute_ofdm_swing_idx[rf_path] <=
- tx_power_index_offest) {
+ tx_power_index_offset) {
tx_agc_index = cali_info->absolute_ofdm_swing_idx[rf_path];
tx_bb_swing_index = cali_info->default_ofdm_index;
} else if (cali_info->absolute_ofdm_swing_idx[rf_path] >
- tx_power_index_offest) {
- tx_agc_index = tx_power_index_offest;
+ tx_power_index_offset) {
+ tx_agc_index = tx_power_index_offset;
cali_info->remnant_ofdm_swing_idx[rf_path] =
cali_info->absolute_ofdm_swing_idx[rf_path] -
- tx_power_index_offest;
+ tx_power_index_offset;
tx_bb_swing_index = cali_info->default_ofdm_index +
cali_info->remnant_ofdm_swing_idx[rf_path];
@@ -85,9 +74,9 @@ get_mix_mode_tx_agc_bb_swing_offset_8822b(void *dm_void,
ODM_RT_TRACE(
dm, ODM_COMP_TX_PWR_TRACK,
- "MixMode Offset Path_%d cali_info->absolute_ofdm_swing_idx[rf_path]=%d cali_info->bb_swing_idx_ofdm[rf_path]=%d tx_power_index_offest=%d\n",
+ "MixMode Offset Path_%d cali_info->absolute_ofdm_swing_idx[rf_path]=%d cali_info->bb_swing_idx_ofdm[rf_path]=%d tx_power_index_offset=%d\n",
rf_path, cali_info->absolute_ofdm_swing_idx[rf_path],
- cali_info->bb_swing_idx_ofdm[rf_path], tx_power_index_offest);
+ cali_info->bb_swing_idx_ofdm[rf_path], tx_power_index_offset);
return true;
}
@@ -97,7 +86,7 @@ void odm_tx_pwr_track_set_pwr8822b(void *dm_void, enum pwrtrack_method method,
{
struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
struct dm_rf_calibration_struct *cali_info = &dm->rf_calibrate_info;
- u8 tx_power_index_offest = 0;
+ u8 tx_power_index_offset = 0;
u8 tx_power_index = 0;
struct rtl_priv *rtlpriv = (struct rtl_priv *)dm->adapter;
@@ -139,11 +128,11 @@ void odm_tx_pwr_track_set_pwr8822b(void *dm_void, enum pwrtrack_method method,
if (tx_power_index >= 63)
tx_power_index = 63;
- tx_power_index_offest = 63 - tx_power_index;
+ tx_power_index_offset = 63 - tx_power_index;
ODM_RT_TRACE(dm, ODM_COMP_TX_PWR_TRACK,
- "tx_power_index=%d tx_power_index_offest=%d rf_path=%d\n",
- tx_power_index, tx_power_index_offest, rf_path);
+ "tx_power_index=%d tx_power_index_offset=%d rf_path=%d\n",
+ tx_power_index, tx_power_index_offset, rf_path);
if (method ==
BBSWING) { /*use for mp driver clean power tracking status*/
@@ -178,7 +167,7 @@ void odm_tx_pwr_track_set_pwr8822b(void *dm_void, enum pwrtrack_method method,
switch (rf_path) {
case ODM_RF_PATH_A:
get_mix_mode_tx_agc_bb_swing_offset_8822b(
- dm, method, rf_path, tx_power_index_offest);
+ dm, method, rf_path, tx_power_index_offset);
odm_set_bb_reg(
dm, 0xC94, (BIT(29) | BIT(28) | BIT(27) |
BIT(26) | BIT(25)),
@@ -201,7 +190,7 @@ void odm_tx_pwr_track_set_pwr8822b(void *dm_void, enum pwrtrack_method method,
case ODM_RF_PATH_B:
get_mix_mode_tx_agc_bb_swing_offset_8822b(
- dm, method, rf_path, tx_power_index_offest);
+ dm, method, rf_path, tx_power_index_offset);
odm_set_bb_reg(
dm, 0xE94, (BIT(29) | BIT(28) | BIT(27) |
BIT(26) | BIT(25)),
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h
index 4f3bfe316ee9..794ee33ea7df 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c
index 26d1022e851c..776096164b80 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h
index 279ef06298e2..5c5370af6ece 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c
index e2c72af16150..3ce49322b686 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h
index ea19deb512d5..246518e8bf8f 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c
index 644fca822c61..8f96c77974cc 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h
index 4817cf6b1ed9..4606427bd273 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c
index 59adabda09de..a05c8aa53b0e 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h
index af91a6f958ed..788258e8c3d1 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h b/drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h
index ad0d32fce0a9..53fd51aacdf2 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
index 85e490d3601f..9930ed954abb 100644
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c
+++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.h b/drivers/staging/rtlwifi/phydm/rtl_phydm.h
index 483d2418699b..b98d502ef196 100644
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.h
+++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h b/drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h
index 6cacca12d792..b85c5e17efdf 100644
--- a/drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h
+++ b/drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h b/drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h
index 5c92c4326f7e..2554fcc991de 100644
--- a/drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h
+++ b/drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h b/drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h
index 82aeac1ff3e0..cf1ced07e138 100644
--- a/drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h
+++ b/drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h b/drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h
index c5ddd9cb9cd5..4b30f062b7f7 100644
--- a/drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h
+++ b/drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h b/drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h
index 41358fce2875..278eb5d3bd4a 100644
--- a/drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h
+++ b/drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/ps.c b/drivers/staging/rtlwifi/ps.c
index 7856fc5d10bd..0ca0532c73da 100644
--- a/drivers/staging/rtlwifi/ps.c
+++ b/drivers/staging/rtlwifi/ps.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/ps.h b/drivers/staging/rtlwifi/ps.h
index 6c187daced4a..badd0fa7ece6 100644
--- a/drivers/staging/rtlwifi/ps.h
+++ b/drivers/staging/rtlwifi/ps.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/pwrseqcmd.h b/drivers/staging/rtlwifi/pwrseqcmd.h
index f411b7ebb08f..bd8ae84aca4f 100644
--- a/drivers/staging/rtlwifi/pwrseqcmd.h
+++ b/drivers/staging/rtlwifi/pwrseqcmd.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rc.c b/drivers/staging/rtlwifi/rc.c
index c835be91f398..3ebfc67ee345 100644
--- a/drivers/staging/rtlwifi/rc.c
+++ b/drivers/staging/rtlwifi/rc.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rc.h b/drivers/staging/rtlwifi/rc.h
index dcc8520866b7..7f631175fac3 100644
--- a/drivers/staging/rtlwifi/rc.h
+++ b/drivers/staging/rtlwifi/rc.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/regd.c b/drivers/staging/rtlwifi/regd.c
index e0a3ff85edb6..3afd206ce4b1 100644
--- a/drivers/staging/rtlwifi/regd.c
+++ b/drivers/staging/rtlwifi/regd.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/regd.h b/drivers/staging/rtlwifi/regd.h
index 5626015a6d0d..c19e87936ad3 100644
--- a/drivers/staging/rtlwifi/regd.h
+++ b/drivers/staging/rtlwifi/regd.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/def.h b/drivers/staging/rtlwifi/rtl8822be/def.h
index 7942ddfdcf43..596f73691d55 100644
--- a/drivers/staging/rtlwifi/rtl8822be/def.h
+++ b/drivers/staging/rtlwifi/rtl8822be/def.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index 483ea85943c3..efec7281511c 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.h b/drivers/staging/rtlwifi/rtl8822be/fw.h
index 3ad7a66e80a3..6e7eb52dba2d 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.h
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.c b/drivers/staging/rtlwifi/rtl8822be/hw.c
index 74386003044f..7947edb239a1 100644
--- a/drivers/staging/rtlwifi/rtl8822be/hw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/hw.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.h b/drivers/staging/rtlwifi/rtl8822be/hw.h
index a91c276c5794..cf3536113f06 100644
--- a/drivers/staging/rtlwifi/rtl8822be/hw.h
+++ b/drivers/staging/rtlwifi/rtl8822be/hw.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/led.c b/drivers/staging/rtlwifi/rtl8822be/led.c
index 0054c892dce6..6d6e1f271e18 100644
--- a/drivers/staging/rtlwifi/rtl8822be/led.c
+++ b/drivers/staging/rtlwifi/rtl8822be/led.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/led.h b/drivers/staging/rtlwifi/rtl8822be/led.h
index 9c0a2290df7d..9a19e17cf3a4 100644
--- a/drivers/staging/rtlwifi/rtl8822be/led.h
+++ b/drivers/staging/rtlwifi/rtl8822be/led.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/phy.c b/drivers/staging/rtlwifi/rtl8822be/phy.c
index 6697aee9317f..048904d783fc 100644
--- a/drivers/staging/rtlwifi/rtl8822be/phy.c
+++ b/drivers/staging/rtlwifi/rtl8822be/phy.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/phy.h b/drivers/staging/rtlwifi/rtl8822be/phy.h
index 5c33f16bcaa4..f33b086a0167 100644
--- a/drivers/staging/rtlwifi/rtl8822be/phy.h
+++ b/drivers/staging/rtlwifi/rtl8822be/phy.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/reg.h b/drivers/staging/rtlwifi/rtl8822be/reg.h
index 0dca5dccf49a..8f0ec5b18c33 100644
--- a/drivers/staging/rtlwifi/rtl8822be/reg.h
+++ b/drivers/staging/rtlwifi/rtl8822be/reg.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/sw.c b/drivers/staging/rtlwifi/rtl8822be/sw.c
index 91b784b6d1c5..7825e85ed091 100644
--- a/drivers/staging/rtlwifi/rtl8822be/sw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/sw.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/sw.h b/drivers/staging/rtlwifi/rtl8822be/sw.h
index 931eba98bd80..0983a8e9605b 100644
--- a/drivers/staging/rtlwifi/rtl8822be/sw.h
+++ b/drivers/staging/rtlwifi/rtl8822be/sw.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/trx.c b/drivers/staging/rtlwifi/rtl8822be/trx.c
index 87e15e419252..8fff2ea344eb 100644
--- a/drivers/staging/rtlwifi/rtl8822be/trx.c
+++ b/drivers/staging/rtlwifi/rtl8822be/trx.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/rtl8822be/trx.h b/drivers/staging/rtlwifi/rtl8822be/trx.h
index db769f3c4cd6..d7ba7f3e58b7 100644
--- a/drivers/staging/rtlwifi/rtl8822be/trx.h
+++ b/drivers/staging/rtlwifi/rtl8822be/trx.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2016 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/stats.c b/drivers/staging/rtlwifi/stats.c
index 96eb14c92c01..149b665a0e5c 100644
--- a/drivers/staging/rtlwifi/stats.c
+++ b/drivers/staging/rtlwifi/stats.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/stats.h b/drivers/staging/rtlwifi/stats.h
index bd0108f93182..aa4f30d40af0 100644
--- a/drivers/staging/rtlwifi/stats.h
+++ b/drivers/staging/rtlwifi/stats.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtlwifi/wifi.h b/drivers/staging/rtlwifi/wifi.h
index a23bb1719e35..012fb618840b 100644
--- a/drivers/staging/rtlwifi/wifi.h
+++ b/drivers/staging/rtlwifi/wifi.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/sm750fb/TODO b/drivers/staging/sm750fb/TODO
index f710ab15abfe..481409eb3fb3 100644
--- a/drivers/staging/sm750fb/TODO
+++ b/drivers/staging/sm750fb/TODO
@@ -9,8 +9,11 @@ TODO:
- must be ported to the atomic kms framework in the drm subsystem (which will
give you a basic fbdev driver for free)
+Note:
+- This driver will be removed from staging after the drm driver is ready
+- The drm driver is getting ready at https://gitlab.com/sudipm/sm750/tree/sm750
+
Please send any patches to
Greg Kroah-Hartman <greg@kroah.com>
Sudip Mukherjee <sudipm.mukherjee@gmail.com>
Teddy Wang <teddy.wang@siliconmotion.com>
- Sudip Mukherjee <sudip@vectorindia.org>
diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c
index 461f131644a2..1371ced2f5ca 100644
--- a/drivers/staging/speakup/buffers.c
+++ b/drivers/staging/speakup/buffers.c
@@ -77,6 +77,10 @@ void synth_buffer_add(u16 ch)
*buff_in++ = ch;
if (buff_in > buffer_end)
buff_in = synth_buffer;
+ /* We have written something to the speech synthesis, so we are not
+ * paused any more.
+ */
+ spk_paused = false;
}
u16 synth_buffer_getc(void)
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index af30b7099bed..869f40ebf1a7 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -67,6 +67,8 @@ short spk_punc_mask;
int spk_punc_level, spk_reading_punc;
char spk_str_caps_start[MAXVARLEN + 1] = "\0";
char spk_str_caps_stop[MAXVARLEN + 1] = "\0";
+char spk_str_pause[MAXVARLEN + 1] = "\0";
+bool spk_paused;
const struct st_bits_data spk_punc_info[] = {
{"none", "", 0},
{"some", "/$%&@", SOME},
@@ -1782,6 +1784,10 @@ static void speakup_con_update(struct vc_data *vc)
/* Speakup output, discard */
return;
speakup_date(vc);
+ if (vc->vc_mode == KD_GRAPHICS && !spk_paused && spk_str_pause[0]) {
+ synth_printf("%s", spk_str_pause);
+ spk_paused = true;
+ }
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index 3d8bda8b9620..e4f4f00be2dc 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -94,7 +94,8 @@ extern struct spk_synth *synth;
extern char spk_pitch_buff[];
extern u_char *spk_our_keys[];
extern short spk_punc_masks[];
-extern char spk_str_caps_start[], spk_str_caps_stop[];
+extern char spk_str_caps_start[], spk_str_caps_stop[], spk_str_pause[];
+extern bool spk_paused;
extern const struct st_bits_data spk_punc_info[];
extern u_char spk_key_buf[600];
extern char *spk_characters[];
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index aa0c900f79f2..7df1a84297f6 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -21,6 +21,7 @@
static struct var_t vars[] = {
{ CAPS_START, .u.s = {"CAPS_START\n" } },
{ CAPS_STOP, .u.s = {"CAPS_STOP\n" } },
+ { PAUSE, .u.s = {"PAUSE\n"} },
{ RATE, .u.n = {"RATE %d\n", 8, 1, 16, 0, 0, NULL } },
{ PITCH, .u.n = {"PITCH %d\n", 8, 0, 16, 0, 0, NULL } },
{ VOL, .u.n = {"VOL %d\n", 8, 0, 16, 0, 0, NULL } },
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index 0a1a7c259ab0..a61bc41b82d7 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -35,6 +35,7 @@ static int misc_registered;
static struct var_t vars[] = {
{ CAPS_START, .u.s = {"\x01+3p" } },
{ CAPS_STOP, .u.s = {"\x01-3p" } },
+ { PAUSE, .u.n = {"\x01P" } },
{ RATE, .u.n = {"\x01%ds", 2, 0, 9, 0, 0, NULL } },
{ PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } },
{ VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
@@ -154,7 +155,7 @@ static char *get_initstring(void)
var = synth_soft.vars;
while (var->var_id != MAXVARS) {
if (var->var_id != CAPS_START && var->var_id != CAPS_STOP &&
- var->var_id != DIRECT)
+ var->var_id != PAUSE && var->var_id != DIRECT)
cp = cp + sprintf(cp, var->u.n.synth_fmt,
var->u.n.value);
var++;
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index 4203bed90b4f..3e082dc3d45c 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -42,7 +42,7 @@ enum var_id_t {
SAY_CONTROL, SAY_WORD_CTL, NO_INTERRUPT, KEY_ECHO,
SPELL_DELAY, PUNC_LEVEL, READING_PUNC,
ATTRIB_BLEEP, BLEEPS,
- RATE, PITCH, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG, DIRECT,
+ RATE, PITCH, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG, DIRECT, PAUSE,
CAPS_START, CAPS_STOP, CHARTAB,
MAXVARS
};
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 321405532a8e..54a76b6752ad 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -45,6 +45,7 @@ static struct st_var_header var_headers[] = {
{ "lang", LANG, VAR_NUM, NULL, NULL },
{ "chartab", CHARTAB, VAR_PROC, NULL, NULL },
{ "direct", DIRECT, VAR_NUM, NULL, NULL },
+ { "pause", PAUSE, VAR_STRING, spk_str_pause, NULL },
};
static struct st_var_header *var_ptrs[MAXVARS] = { NULL, NULL, NULL };
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 92dceb557886..3647b8f1ed28 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -849,7 +849,7 @@ static bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
*
* Return: NETDEV_TX_OK.
*/
-static int visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct visornic_devdata *devdata;
int len, firstfraglen, padlen;
@@ -2126,30 +2126,19 @@ static struct visor_driver visornic_driver = {
*/
static int visornic_init(void)
{
- struct dentry *ret;
- int err = -ENOMEM;
+ int err;
visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
- if (!visornic_debugfs_dir)
- return err;
- ret = debugfs_create_file("info", 0400, visornic_debugfs_dir, NULL,
- &debugfs_info_fops);
- if (!ret)
- goto cleanup_debugfs;
- ret = debugfs_create_file("enable_ints", 0200, visornic_debugfs_dir,
- NULL, &debugfs_enable_ints_fops);
- if (!ret)
- goto cleanup_debugfs;
+ debugfs_create_file("info", 0400, visornic_debugfs_dir, NULL,
+ &debugfs_info_fops);
+ debugfs_create_file("enable_ints", 0200, visornic_debugfs_dir, NULL,
+ &debugfs_enable_ints_fops);
err = visorbus_register_visor_driver(&visornic_driver);
if (err)
- goto cleanup_debugfs;
+ debugfs_remove_recursive(visornic_debugfs_dir);
- return 0;
-
-cleanup_debugfs:
- debugfs_remove_recursive(visornic_debugfs_dir);
return err;
}
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c
index 973b3bcc04b1..9d2018cd544e 100644
--- a/drivers/staging/vboxvideo/vbox_main.c
+++ b/drivers/staging/vboxvideo/vbox_main.c
@@ -61,7 +61,8 @@ void vbox_enable_accel(struct vbox_private *vbox)
if (vbox->vbva_info[i].vbva)
continue;
- vbva = (void __force *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE;
+ vbva = (void __force *)vbox->vbva_buffers +
+ i * VBVA_MIN_BUFFER_SIZE;
if (!vbva_enable(&vbox->vbva_info[i],
vbox->guest_pool, vbva, i)) {
/* very old host or driver error. */
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index b265fe924556..5c7ea237893e 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -573,7 +573,7 @@ static int vbox_get_modes(struct drm_connector *connector)
return num_modes;
}
-static int vbox_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status vbox_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index f5aaf7d629f0..98064ce2c2b4 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -1,6 +1,5 @@
menuconfig BCM_VIDEOCORE
tristate "Broadcom VideoCore support"
- depends on HAS_DMA
depends on OF
depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
default y
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index fb26b826e640..afe43fa5a6d7 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -13,5 +13,5 @@ vchiq-objs := \
obj-$(CONFIG_SND_BCM2835) += bcm2835-audio/
obj-$(CONFIG_VIDEO_BCM2835) += bcm2835-camera/
-ccflags-y += -DVCOS_VERIFY_BKPTS=1 -Idrivers/staging/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
+ccflags-y += -Idrivers/staging/vc04_services -D__VCCOREVER__=0x04000000
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index a4a48f31f1a3..f0cefa1b7b0f 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -22,7 +22,6 @@
/* ---- Include Files -------------------------------------------------------- */
-#include "interface/vchi/vchi.h"
#include "vc_vchi_audioserv_defs.h"
/* ---- Private Constants and Types ------------------------------------------ */
@@ -360,14 +359,46 @@ static int vc_vchi_audio_deinit(struct bcm2835_audio_instance *instance)
return 0;
}
+int bcm2835_new_vchi_ctx(struct bcm2835_vchi_ctx *vchi_ctx)
+{
+ int ret;
+
+ /* Initialize and create a VCHI connection */
+ ret = vchi_initialise(&vchi_ctx->vchi_instance);
+ if (ret) {
+ LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n",
+ __func__, ret);
+
+ return -EIO;
+ }
+
+ ret = vchi_connect(NULL, 0, vchi_ctx->vchi_instance);
+ if (ret) {
+ LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n",
+ __func__, ret);
+
+ kfree(vchi_ctx->vchi_instance);
+ vchi_ctx->vchi_instance = NULL;
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void bcm2835_free_vchi_ctx(struct bcm2835_vchi_ctx *vchi_ctx)
+{
+ /* Close the VCHI connection - it will also free vchi_instance */
+ WARN_ON(vchi_disconnect(vchi_ctx->vchi_instance));
+
+ vchi_ctx->vchi_instance = NULL;
+}
+
static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream)
{
- static VCHI_INSTANCE_T vchi_instance;
- static VCHI_CONNECTION_T *vchi_connection;
- static int initted;
struct bcm2835_audio_instance *instance =
(struct bcm2835_audio_instance *)alsa_stream->instance;
- int ret;
+ struct bcm2835_vchi_ctx *vhci_ctx = alsa_stream->chip->vchi_ctx;
LOG_INFO("%s: start\n", __func__);
BUG_ON(instance);
@@ -379,28 +410,9 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
return 0;
}
- /* Initialize and create a VCHI connection */
- if (!initted) {
- ret = vchi_initialise(&vchi_instance);
- if (ret) {
- LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n",
- __func__, ret);
-
- return -EIO;
- }
- ret = vchi_connect(NULL, 0, vchi_instance);
- if (ret) {
- LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n",
- __func__, ret);
-
- kfree(vchi_instance);
- return -EIO;
- }
- initted = 1;
- }
-
/* Initialize an instance of the audio service */
- instance = vc_vchi_audio_init(vchi_instance, &vchi_connection, 1);
+ instance = vc_vchi_audio_init(vhci_ctx->vchi_instance,
+ &vhci_ctx->vchi_connection, 1);
if (IS_ERR(instance)) {
LOG_ERR("%s: failed to initialize audio service\n", __func__);
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
index 0ed21dd08170..da0fa34501fa 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
@@ -54,6 +54,36 @@ static int snd_devm_add_child(struct device *dev, struct device *child)
return 0;
}
+static void bcm2835_devm_free_vchi_ctx(struct device *dev, void *res)
+{
+ struct bcm2835_vchi_ctx *vchi_ctx = res;
+
+ bcm2835_free_vchi_ctx(vchi_ctx);
+}
+
+static int bcm2835_devm_add_vchi_ctx(struct device *dev)
+{
+ struct bcm2835_vchi_ctx *vchi_ctx;
+ int ret;
+
+ vchi_ctx = devres_alloc(bcm2835_devm_free_vchi_ctx, sizeof(*vchi_ctx),
+ GFP_KERNEL);
+ if (!vchi_ctx)
+ return -ENOMEM;
+
+ memset(vchi_ctx, 0, sizeof(*vchi_ctx));
+
+ ret = bcm2835_new_vchi_ctx(vchi_ctx);
+ if (ret) {
+ devres_free(vchi_ctx);
+ return ret;
+ }
+
+ devres_add(dev, vchi_ctx);
+
+ return 0;
+}
+
static void snd_bcm2835_release(struct device *dev)
{
struct bcm2835_chip *chip = dev_get_drvdata(dev);
@@ -95,8 +125,6 @@ static int snd_bcm2835_dev_free(struct snd_device *device)
struct bcm2835_chip *chip = device->device_data;
struct snd_card *card = chip->card;
- /* TODO: free pcm, ctl */
-
snd_device_free(card, chip);
return 0;
@@ -122,6 +150,13 @@ static int snd_bcm2835_create(struct snd_card *card,
chip->card = card;
+ chip->vchi_ctx = devres_find(card->dev->parent,
+ bcm2835_devm_free_vchi_ctx, NULL, NULL);
+ if (!chip->vchi_ctx) {
+ kfree(chip);
+ return -ENODEV;
+ }
+
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
if (err) {
kfree(chip);
@@ -392,6 +427,10 @@ static int snd_bcm2835_alsa_probe_dt(struct platform_device *pdev)
numchans);
}
+ err = bcm2835_devm_add_vchi_ctx(dev);
+ if (err)
+ return err;
+
err = snd_add_child_devices(dev, numchans);
if (err)
return err;
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
index dc6ec915f9f5..5dc427240a1d 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -15,6 +15,8 @@
#include <sound/pcm-indirect.h>
#include <linux/workqueue.h>
+#include "interface/vchi/vchi.h"
+
/*
* #define AUDIO_DEBUG_ENABLE
* #define AUDIO_VERBOSE_DEBUG_ENABLE
@@ -86,6 +88,11 @@ enum snd_bcm2835_ctrl {
PCM_PLAYBACK_DEVICE,
};
+struct bcm2835_vchi_ctx {
+ VCHI_INSTANCE_T vchi_instance;
+ VCHI_CONNECTION_T *vchi_connection;
+};
+
/* definition of the chip-specific record */
struct bcm2835_chip {
struct snd_card *card;
@@ -104,6 +111,8 @@ struct bcm2835_chip {
unsigned int opened;
unsigned int spdif_status;
struct mutex audio_mutex;
+
+ struct bcm2835_vchi_ctx *vchi_ctx;
};
struct bcm2835_alsa_stream {
@@ -142,6 +151,9 @@ int snd_bcm2835_new_simple_pcm(struct bcm2835_chip *chip,
int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip);
int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip);
+int bcm2835_new_vchi_ctx(struct bcm2835_vchi_ctx *vchi_ctx);
+void bcm2835_free_vchi_ctx(struct bcm2835_vchi_ctx *vchi_ctx);
+
int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream);
int bcm2835_audio_close(struct bcm2835_alsa_stream *alsa_stream);
int bcm2835_audio_set_params(struct bcm2835_alsa_stream *alsa_stream,
diff --git a/drivers/staging/vc04_services/bcm2835-camera/TODO b/drivers/staging/vc04_services/bcm2835-camera/TODO
index 0ab9e88d769a..cefce72d814f 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/TODO
+++ b/drivers/staging/vc04_services/bcm2835-camera/TODO
@@ -21,14 +21,3 @@ less copy it needed to do.
The bulk_receive() does some manual cache flushing that are 32-bit ARM
only, which we should convert to proper cross-platform APIs.
-4) Convert to be a platform driver.
-
-Right now when the module probes, it tries to initialize VCHI and
-errors out if it wasn't ready yet. If bcm2835-v4l2 was built in, then
-VCHI generally isn't ready because it depends on both the firmware and
-mailbox drivers having already loaded.
-
-We should have VCHI create a platform device once it's initialized,
-and have this driver bind to it, so that we automatically load the
-v4l2 module after VCHI loads.
-
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index d2262275a870..ce26741ae9d9 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -23,6 +23,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-common.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
#include "mmal-common.h"
#include "mmal-encodings.h"
@@ -78,131 +79,132 @@ static const struct v4l2_fract
/* video formats */
static struct mmal_fmt formats[] = {
{
- .name = "4:2:0, planar, YUV",
- .fourcc = V4L2_PIX_FMT_YUV420,
- .flags = 0,
- .mmal = MMAL_ENCODING_I420,
- .depth = 12,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 1,
- },
- {
- .name = "4:2:2, packed, YUYV",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .flags = 0,
- .mmal = MMAL_ENCODING_YUYV,
- .depth = 16,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 2,
- },
- {
- .name = "RGB24 (LE)",
- .fourcc = V4L2_PIX_FMT_RGB24,
- .flags = 0,
- .mmal = MMAL_ENCODING_RGB24,
- .depth = 24,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 3,
- },
- {
- .name = "JPEG",
- .fourcc = V4L2_PIX_FMT_JPEG,
- .flags = V4L2_FMT_FLAG_COMPRESSED,
- .mmal = MMAL_ENCODING_JPEG,
- .depth = 8,
- .mmal_component = MMAL_COMPONENT_IMAGE_ENCODE,
- .ybbp = 0,
- },
- {
- .name = "H264",
- .fourcc = V4L2_PIX_FMT_H264,
- .flags = V4L2_FMT_FLAG_COMPRESSED,
- .mmal = MMAL_ENCODING_H264,
- .depth = 8,
- .mmal_component = MMAL_COMPONENT_VIDEO_ENCODE,
- .ybbp = 0,
- },
- {
- .name = "MJPEG",
- .fourcc = V4L2_PIX_FMT_MJPEG,
- .flags = V4L2_FMT_FLAG_COMPRESSED,
- .mmal = MMAL_ENCODING_MJPEG,
- .depth = 8,
- .mmal_component = MMAL_COMPONENT_VIDEO_ENCODE,
- .ybbp = 0,
- },
- {
- .name = "4:2:2, packed, YVYU",
- .fourcc = V4L2_PIX_FMT_YVYU,
- .flags = 0,
- .mmal = MMAL_ENCODING_YVYU,
- .depth = 16,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 2,
- },
- {
- .name = "4:2:2, packed, VYUY",
- .fourcc = V4L2_PIX_FMT_VYUY,
- .flags = 0,
- .mmal = MMAL_ENCODING_VYUY,
- .depth = 16,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 2,
- },
- {
- .name = "4:2:2, packed, UYVY",
- .fourcc = V4L2_PIX_FMT_UYVY,
- .flags = 0,
- .mmal = MMAL_ENCODING_UYVY,
- .depth = 16,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 2,
- },
- {
- .name = "4:2:0, planar, NV12",
- .fourcc = V4L2_PIX_FMT_NV12,
- .flags = 0,
- .mmal = MMAL_ENCODING_NV12,
- .depth = 12,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 1,
- },
- {
- .name = "RGB24 (BE)",
- .fourcc = V4L2_PIX_FMT_BGR24,
- .flags = 0,
- .mmal = MMAL_ENCODING_BGR24,
- .depth = 24,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 3,
- },
- {
- .name = "4:2:0, planar, YVU",
- .fourcc = V4L2_PIX_FMT_YVU420,
- .flags = 0,
- .mmal = MMAL_ENCODING_YV12,
- .depth = 12,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 1,
- },
- {
- .name = "4:2:0, planar, NV21",
- .fourcc = V4L2_PIX_FMT_NV21,
- .flags = 0,
- .mmal = MMAL_ENCODING_NV21,
- .depth = 12,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 1,
- },
- {
- .name = "RGB32 (BE)",
- .fourcc = V4L2_PIX_FMT_BGR32,
- .flags = 0,
- .mmal = MMAL_ENCODING_BGRA,
- .depth = 32,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 4,
- },
+ .name = "4:2:0, planar, YUV",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_I420,
+ .depth = 12,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 1,
+ .remove_padding = 1,
+ }, {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_YUYV,
+ .depth = 16,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 2,
+ .remove_padding = 0,
+ }, {
+ .name = "RGB24 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_RGB24,
+ .depth = 24,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 3,
+ .remove_padding = 0,
+ }, {
+ .name = "JPEG",
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ .mmal = MMAL_ENCODING_JPEG,
+ .depth = 8,
+ .mmal_component = MMAL_COMPONENT_IMAGE_ENCODE,
+ .ybbp = 0,
+ .remove_padding = 0,
+ }, {
+ .name = "H264",
+ .fourcc = V4L2_PIX_FMT_H264,
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ .mmal = MMAL_ENCODING_H264,
+ .depth = 8,
+ .mmal_component = MMAL_COMPONENT_VIDEO_ENCODE,
+ .ybbp = 0,
+ .remove_padding = 0,
+ }, {
+ .name = "MJPEG",
+ .fourcc = V4L2_PIX_FMT_MJPEG,
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ .mmal = MMAL_ENCODING_MJPEG,
+ .depth = 8,
+ .mmal_component = MMAL_COMPONENT_VIDEO_ENCODE,
+ .ybbp = 0,
+ .remove_padding = 0,
+ }, {
+ .name = "4:2:2, packed, YVYU",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_YVYU,
+ .depth = 16,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 2,
+ .remove_padding = 0,
+ }, {
+ .name = "4:2:2, packed, VYUY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_VYUY,
+ .depth = 16,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 2,
+ .remove_padding = 0,
+ }, {
+ .name = "4:2:2, packed, UYVY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_UYVY,
+ .depth = 16,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 2,
+ .remove_padding = 0,
+ }, {
+ .name = "4:2:0, planar, NV12",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_NV12,
+ .depth = 12,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 1,
+ .remove_padding = 1,
+ }, {
+ .name = "RGB24 (BE)",
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_BGR24,
+ .depth = 24,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 3,
+ .remove_padding = 0,
+ }, {
+ .name = "4:2:0, planar, YVU",
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_YV12,
+ .depth = 12,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 1,
+ .remove_padding = 1,
+ }, {
+ .name = "4:2:0, planar, NV21",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_NV21,
+ .depth = 12,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 1,
+ .remove_padding = 1,
+ }, {
+ .name = "RGB32 (BE)",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_BGRA,
+ .depth = 32,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 4,
+ .remove_padding = 0,
+ },
};
static struct mmal_fmt *get_format(struct v4l2_format *f)
@@ -245,8 +247,10 @@ static int queue_setup(struct vb2_queue *vq,
return -EINVAL;
}
- if (*nbuffers < (dev->capture.port->current_buffer.num + 2))
- *nbuffers = (dev->capture.port->current_buffer.num + 2);
+ if (*nbuffers < dev->capture.port->minimum_buffer.num)
+ *nbuffers = dev->capture.port->minimum_buffer.num;
+
+ dev->capture.port->current_buffer.num = *nbuffers;
*nplanes = 1;
@@ -263,16 +267,30 @@ static int queue_setup(struct vb2_queue *vq,
return 0;
}
+static int buffer_init(struct vb2_buffer *vb)
+{
+ struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
+ struct mmal_buffer *buf = container_of(vb2, struct mmal_buffer, vb);
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p, vb %p\n",
+ __func__, dev, vb);
+ buf->buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ buf->buffer_size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+
+ return mmal_vchi_buffer_init(dev->instance, buf);
+}
+
static int buffer_prepare(struct vb2_buffer *vb)
{
struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
unsigned long size;
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
- __func__, dev);
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p, vb %p\n",
+ __func__, dev, vb);
- BUG_ON(!dev->capture.port);
- BUG_ON(!dev->capture.fmt);
+ if (!dev->capture.port || !dev->capture.fmt)
+ return -ENODEV;
size = dev->capture.stride * dev->capture.height;
if (vb2_plane_size(vb, 0) < size) {
@@ -285,6 +303,17 @@ static int buffer_prepare(struct vb2_buffer *vb)
return 0;
}
+static void buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
+ struct mmal_buffer *buf = container_of(vb2, struct mmal_buffer, vb);
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p, vb %p\n",
+ __func__, dev, vb);
+ mmal_vchi_buffer_cleanup(buf);
+}
+
static inline bool is_capturing(struct bm2835_mmal_dev *dev)
{
return dev->capture.camera_port ==
@@ -450,10 +479,8 @@ static void buffer_queue(struct vb2_buffer *vb)
int ret;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "%s: dev:%p buf:%p\n", __func__, dev, buf);
-
- buf->buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
- buf->buffer_size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+ "%s: dev:%p buf:%p, idx %u\n",
+ __func__, dev, buf, vb2->vb2_buf.index);
ret = vchiq_mmal_submit_buffer(dev->instance, dev->capture.port, buf);
if (ret < 0)
@@ -465,7 +492,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
int ret;
- int parameter_size;
+ u32 parameter_size;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
__func__, dev);
@@ -617,7 +644,9 @@ static void bm2835_mmal_unlock(struct vb2_queue *vq)
static const struct vb2_ops bm2835_mmal_video_qops = {
.queue_setup = queue_setup,
+ .buf_init = buffer_init,
.buf_prepare = buffer_prepare,
+ .buf_cleanup = buffer_cleanup,
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
@@ -634,17 +663,19 @@ static int set_overlay_params(struct bm2835_mmal_dev *dev,
struct vchiq_mmal_port *port)
{
struct mmal_parameter_displayregion prev_config = {
- .set = MMAL_DISPLAY_SET_LAYER | MMAL_DISPLAY_SET_ALPHA |
- MMAL_DISPLAY_SET_DEST_RECT | MMAL_DISPLAY_SET_FULLSCREEN,
- .layer = PREVIEW_LAYER,
- .alpha = dev->overlay.global_alpha,
- .fullscreen = 0,
- .dest_rect = {
- .x = dev->overlay.w.left,
- .y = dev->overlay.w.top,
- .width = dev->overlay.w.width,
- .height = dev->overlay.w.height,
- },
+ .set = MMAL_DISPLAY_SET_LAYER |
+ MMAL_DISPLAY_SET_ALPHA |
+ MMAL_DISPLAY_SET_DEST_RECT |
+ MMAL_DISPLAY_SET_FULLSCREEN,
+ .layer = PREVIEW_LAYER,
+ .alpha = dev->overlay.global_alpha,
+ .fullscreen = 0,
+ .dest_rect = {
+ .x = dev->overlay.w.left,
+ .y = dev->overlay.w.top,
+ .width = dev->overlay.w.width,
+ .height = dev->overlay.w.height,
+ },
};
return vchiq_mmal_port_parameter_set(dev->instance, port,
MMAL_PARAMETER_DISPLAYREGION,
@@ -662,7 +693,7 @@ static int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
fmt = &formats[f->index];
- strlcpy(f->description, fmt->name, sizeof(f->description));
+ strlcpy((char *)f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
f->flags = fmt->flags;
@@ -820,7 +851,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
return -EINVAL;
inp->type = V4L2_INPUT_TYPE_CAMERA;
- sprintf(inp->name, "Camera %u", inp->index);
+ sprintf((char *)inp->name, "Camera %u", inp->index);
return 0;
}
@@ -848,11 +879,11 @@ static int vidioc_querycap(struct file *file, void *priv,
vchiq_mmal_version(dev->instance, &major, &minor);
- strcpy(cap->driver, "bm2835 mmal");
- snprintf(cap->card, sizeof(cap->card), "mmal service %d.%d",
+ strcpy((char *)cap->driver, "bm2835 mmal");
+ snprintf((char *)cap->card, sizeof(cap->card), "mmal service %d.%d",
major, minor);
- snprintf(cap->bus_info, sizeof(cap->bus_info),
+ snprintf((char *)cap->bus_info, sizeof(cap->bus_info),
"platform:%s", dev->v4l2_dev.name);
cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY |
V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
@@ -871,7 +902,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
fmt = &formats[f->index];
- strlcpy(f->description, fmt->name, sizeof(f->description));
+ strlcpy((char *)f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
f->flags = fmt->flags;
@@ -928,9 +959,19 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
&f->fmt.pix.height, MIN_HEIGHT, dev->max_height,
1, 0);
f->fmt.pix.bytesperline = f->fmt.pix.width * mfmt->ybbp;
+ if (!mfmt->remove_padding) {
+ int align_mask = ((32 * mfmt->depth) >> 3) - 1;
+ /* GPU isn't removing padding, so stride is aligned to 32 */
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.bytesperline + align_mask) & ~align_mask;
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Not removing padding, so bytes/line = %d, "
+ "(align_mask %d)\n",
+ f->fmt.pix.bytesperline, align_mask);
+ }
/* Image buffer has to be padded to allow for alignment, even though
- * we then remove that padding before delivering the buffer.
+ * we sometimes then remove that padding before delivering the buffer.
*/
f->fmt.pix.sizeimage = ((f->fmt.pix.height + 15) & ~15) *
(((f->fmt.pix.width + 31) & ~31) * mfmt->depth) >> 3;
@@ -963,8 +1004,10 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
struct vchiq_mmal_port *port = NULL, *camera_port = NULL;
struct vchiq_mmal_component *encode_component = NULL;
struct mmal_fmt *mfmt = get_format(f);
+ u32 remove_padding;
- BUG_ON(!mfmt);
+ if (!mfmt)
+ return -EINVAL;
if (dev->capture.encode_component) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
@@ -1031,6 +1074,12 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
camera_port->format.encoding = MMAL_ENCODING_RGB24;
}
+ remove_padding = mfmt->remove_padding;
+ vchiq_mmal_port_parameter_set(dev->instance,
+ camera_port,
+ MMAL_PARAMETER_NO_IMAGE_PADDING,
+ &remove_padding, sizeof(remove_padding));
+
camera_port->format.encoding_variant = 0;
camera_port->es.video.width = f->fmt.pix.width;
camera_port->es.video.height = f->fmt.pix.height;
@@ -1348,7 +1397,6 @@ static int vidioc_s_parm(struct file *file, void *priv,
{
struct bm2835_mmal_dev *dev = video_drvdata(file);
struct v4l2_fract tpf;
- struct mmal_parameter_rational fps_param;
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1365,10 +1413,6 @@ static int vidioc_s_parm(struct file *file, void *priv,
parm->parm.capture.readbuffers = 1;
parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- fps_param.num = 0; /* Select variable fps, and then use
- * FPS_RANGE to select the actual limits.
- */
- fps_param.den = 1;
set_framerate_params(dev);
return 0;
@@ -1445,7 +1489,7 @@ static int get_num_cameras(struct vchiq_mmal_instance *instance,
int ret;
struct vchiq_mmal_component *cam_info_component;
struct mmal_parameter_camera_info_t cam_info = {0};
- int param_size = sizeof(cam_info);
+ u32 param_size = sizeof(cam_info);
int i;
/* create a camera_info component */
@@ -1504,13 +1548,12 @@ static int set_camera_parameters(struct vchiq_mmal_instance *instance,
#define MAX_SUPPORTED_ENCODINGS 20
/* MMAL instance and component init */
-static int __init mmal_init(struct bm2835_mmal_dev *dev)
+static int mmal_init(struct bm2835_mmal_dev *dev)
{
int ret;
struct mmal_es_format_local *format;
- u32 bool_true = 1;
u32 supported_encodings[MAX_SUPPORTED_ENCODINGS];
- int param_size;
+ u32 param_size;
struct vchiq_mmal_component *camera;
ret = vchiq_mmal_init(&dev->instance);
@@ -1592,11 +1635,6 @@ static int __init mmal_init(struct bm2835_mmal_dev *dev)
format->es->video.frame_rate.num = 0; /* Rely on fps_range */
format->es->video.frame_rate.den = 1;
- vchiq_mmal_port_parameter_set(dev->instance,
- &camera->output[MMAL_CAMERA_PORT_VIDEO],
- MMAL_PARAMETER_NO_IMAGE_PADDING,
- &bool_true, sizeof(bool_true));
-
format = &camera->output[MMAL_CAMERA_PORT_CAPTURE].format;
format->encoding = MMAL_ENCODING_OPAQUE;
@@ -1618,11 +1656,6 @@ static int __init mmal_init(struct bm2835_mmal_dev *dev)
dev->capture.enc_profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
dev->capture.enc_level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
- vchiq_mmal_port_parameter_set(dev->instance,
- &camera->output[MMAL_CAMERA_PORT_CAPTURE],
- MMAL_PARAMETER_NO_IMAGE_PADDING,
- &bool_true, sizeof(bool_true));
-
/* get the preview component ready */
ret = vchiq_mmal_component_init(
dev->instance, "ril.video_render",
@@ -1723,8 +1756,8 @@ unreg_mmal:
return ret;
}
-static int __init bm2835_mmal_init_device(struct bm2835_mmal_dev *dev,
- struct video_device *vfd)
+static int bm2835_mmal_init_device(struct bm2835_mmal_dev *dev,
+ struct video_device *vfd)
{
int ret;
@@ -1803,7 +1836,7 @@ static struct v4l2_format default_v4l2_format = {
.fmt.pix.sizeimage = 1024 * 768,
};
-static int __init bm2835_mmal_init(void)
+static int bcm2835_mmal_probe(struct platform_device *pdev)
{
int ret;
struct bm2835_mmal_dev *dev;
@@ -1923,7 +1956,7 @@ cleanup_gdev:
return ret;
}
-static void __exit bm2835_mmal_exit(void)
+static int bcm2835_mmal_remove(struct platform_device *pdev)
{
int camera;
struct vchiq_mmal_instance *instance = gdev[0]->instance;
@@ -1933,7 +1966,16 @@ static void __exit bm2835_mmal_exit(void)
gdev[camera] = NULL;
}
vchiq_mmal_finalise(instance);
+
+ return 0;
}
-module_init(bm2835_mmal_init);
-module_exit(bm2835_mmal_exit);
+static struct platform_driver bcm2835_camera_driver = {
+ .probe = bcm2835_mmal_probe,
+ .remove = bcm2835_mmal_remove,
+ .driver = {
+ .name = "bcm2835-camera",
+ },
+};
+
+module_platform_driver(bcm2835_camera_driver)
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
index 800e4e7e5f96..a20bf274a4fd 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
@@ -17,7 +17,9 @@
#define MMAL_MAGIC MMAL_FOURCC('m', 'm', 'a', 'l')
/** Special value signalling that time is not known */
-#define MMAL_TIME_UNKNOWN (1LL<<63)
+#define MMAL_TIME_UNKNOWN BIT_ULL(63)
+
+struct mmal_msg_context;
/* mapping between v4l and mmal video modes */
struct mmal_fmt {
@@ -28,6 +30,9 @@ struct mmal_fmt {
int depth;
u32 mmal_component; /* MMAL component index to be used to encode */
u32 ybbp; /* depth of first Y plane for planar formats */
+ bool remove_padding; /* Does the GPU have to remove padding,
+ * or can we do hide padding via bytesperline.
+ */
};
/* buffer for one video frame */
@@ -40,6 +45,8 @@ struct mmal_buffer {
void *buffer; /* buffer pointer */
unsigned long buffer_size; /* size of allocated buffer */
+
+ struct mmal_msg_context *msg_context;
};
/* */
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h
index dd4b4ce72081..3b3ed79cadd9 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h
@@ -37,7 +37,7 @@ enum mmal_port_type {
*
* most elements are informational only, the pointer values for
* interogation messages are generally provided as additional
- * strucures within the message. When used to set values only teh
+ * structures within the message. When used to set values only the
* buffer_num, buffer_size and userdata parameters are writable.
*/
struct mmal_port {
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
index 1607bc4c0347..184024dfb8b7 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
@@ -18,6 +18,9 @@
* @{
*/
+#ifndef __MMAL_PARAMETERS_H
+#define __MMAL_PARAMETERS_H
+
/** Common parameter ID group, used with many types of component. */
#define MMAL_PARAMETER_GROUP_COMMON (0<<16)
/** Camera-specific parameter ID group. */
@@ -561,6 +564,14 @@ enum mmal_parameter_displayset {
MMAL_DISPLAY_SET_ALPHA = 0x400,
};
+/* rectangle, used lots so it gets its own struct */
+struct vchiq_mmal_rect {
+ s32 x;
+ s32 y;
+ s32 width;
+ s32 height;
+};
+
struct mmal_parameter_displayregion {
/** Bitfield that indicates which fields are set and should be
* used. All other fields will maintain their current value.
@@ -682,3 +693,5 @@ struct mmal_parameter_camera_info_t {
struct mmal_parameter_camera_info_flash_t
flashes[MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES];
};
+
+#endif
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index a91ef6ea29ce..f5b5ead6347c 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
-#include <linux/btree.h>
#include <asm/cacheflush.h>
#include <media/videobuf2-vmalloc.h>
@@ -111,7 +110,11 @@ struct vchiq_mmal_instance;
/* normal message context */
struct mmal_msg_context {
struct vchiq_mmal_instance *instance;
- u32 handle;
+
+ /* Index in the context_map idr so that we can find the
+ * mmal_msg_context again when servicing the VCHI reply.
+ */
+ int handle;
union {
struct {
@@ -149,111 +152,28 @@ struct mmal_msg_context {
};
-struct vchiq_mmal_context_map {
- /* ensure serialized access to the btree(contention should be low) */
- struct mutex lock;
- struct btree_head32 btree_head;
- u32 last_handle;
-};
-
struct vchiq_mmal_instance {
VCHI_SERVICE_HANDLE_T handle;
/* ensure serialised access to service */
struct mutex vchiq_mutex;
- /* ensure serialised access to bulk operations */
- struct mutex bulk_mutex;
-
/* vmalloc page to receive scratch bulk xfers into */
void *bulk_scratch;
- /* mapping table between context handles and mmal_msg_contexts */
- struct vchiq_mmal_context_map context_map;
+ struct idr context_map;
+ spinlock_t context_map_lock;
/* component to use next */
int component_idx;
struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
};
-static int __must_check
-mmal_context_map_init(struct vchiq_mmal_context_map *context_map)
-{
- mutex_init(&context_map->lock);
- context_map->last_handle = 0;
- return btree_init32(&context_map->btree_head);
-}
-
-static void mmal_context_map_destroy(struct vchiq_mmal_context_map *context_map)
-{
- mutex_lock(&context_map->lock);
- btree_destroy32(&context_map->btree_head);
- mutex_unlock(&context_map->lock);
-}
-
-static u32
-mmal_context_map_create_handle(struct vchiq_mmal_context_map *context_map,
- struct mmal_msg_context *msg_context,
- gfp_t gfp)
-{
- u32 handle;
-
- mutex_lock(&context_map->lock);
-
- while (1) {
- /* just use a simple count for handles, but do not use 0 */
- context_map->last_handle++;
- if (!context_map->last_handle)
- context_map->last_handle++;
-
- handle = context_map->last_handle;
-
- /* check if the handle is already in use */
- if (!btree_lookup32(&context_map->btree_head, handle))
- break;
- }
-
- if (btree_insert32(&context_map->btree_head, handle,
- msg_context, gfp)) {
- /* probably out of memory */
- mutex_unlock(&context_map->lock);
- return 0;
- }
-
- mutex_unlock(&context_map->lock);
- return handle;
-}
-
-static struct mmal_msg_context *
-mmal_context_map_lookup_handle(struct vchiq_mmal_context_map *context_map,
- u32 handle)
-{
- struct mmal_msg_context *msg_context;
-
- if (!handle)
- return NULL;
-
- mutex_lock(&context_map->lock);
-
- msg_context = btree_lookup32(&context_map->btree_head, handle);
-
- mutex_unlock(&context_map->lock);
- return msg_context;
-}
-
-static void
-mmal_context_map_destroy_handle(struct vchiq_mmal_context_map *context_map,
- u32 handle)
-{
- mutex_lock(&context_map->lock);
- btree_remove32(&context_map->btree_head, handle);
- mutex_unlock(&context_map->lock);
-}
-
static struct mmal_msg_context *
get_msg_context(struct vchiq_mmal_instance *instance)
{
struct mmal_msg_context *msg_context;
+ int handle;
/* todo: should this be allocated from a pool to avoid kzalloc */
msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
@@ -261,32 +181,40 @@ get_msg_context(struct vchiq_mmal_instance *instance)
if (!msg_context)
return ERR_PTR(-ENOMEM);
- msg_context->instance = instance;
- msg_context->handle =
- mmal_context_map_create_handle(&instance->context_map,
- msg_context,
- GFP_KERNEL);
+ /* Create an ID that will be passed along with our message so
+ * that when we service the VCHI reply, we can look up what
+ * message is being replied to.
+ */
+ spin_lock(&instance->context_map_lock);
+ handle = idr_alloc(&instance->context_map, msg_context,
+ 0, 0, GFP_KERNEL);
+ spin_unlock(&instance->context_map_lock);
- if (!msg_context->handle) {
+ if (handle < 0) {
kfree(msg_context);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(handle);
}
+ msg_context->instance = instance;
+ msg_context->handle = handle;
+
return msg_context;
}
static struct mmal_msg_context *
-lookup_msg_context(struct vchiq_mmal_instance *instance, u32 handle)
+lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
{
- return mmal_context_map_lookup_handle(&instance->context_map,
- handle);
+ return idr_find(&instance->context_map, handle);
}
static void
release_msg_context(struct mmal_msg_context *msg_context)
{
- mmal_context_map_destroy_handle(&msg_context->instance->context_map,
- msg_context->handle);
+ struct vchiq_mmal_instance *instance = msg_context->instance;
+
+ spin_lock(&instance->context_map_lock);
+ idr_remove(&instance->context_map, msg_context->handle);
+ spin_unlock(&instance->context_map_lock);
kfree(msg_context);
}
@@ -321,8 +249,6 @@ static void buffer_work_cb(struct work_struct *work)
msg_context->u.bulk.dts,
msg_context->u.bulk.pts);
- /* release message context */
- release_msg_context(msg_context);
}
/* enqueue a bulk receive for a given message context */
@@ -331,23 +257,12 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
struct mmal_msg_context *msg_context)
{
unsigned long rd_len;
- unsigned long flags = 0;
int ret;
- /* bulk mutex stops other bulk operations while we have a
- * receive in progress - released in callback
- */
- ret = mutex_lock_interruptible(&instance->bulk_mutex);
- if (ret != 0)
- return ret;
-
rd_len = msg->u.buffer_from_host.buffer_header.length;
- /* take buffer from queue */
- spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
- if (list_empty(&msg_context->u.bulk.port->buffers)) {
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
- pr_err("buffer list empty trying to submit bulk receive\n");
+ if (!msg_context->u.bulk.buffer) {
+ pr_err("bulk.buffer not configured - error in buffer_from_host\n");
/* todo: this is a serious error, we should never have
* committed a buffer_to_host operation to the mmal
@@ -359,18 +274,9 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
* waiting bulk receive?
*/
- mutex_unlock(&instance->bulk_mutex);
-
return -EINVAL;
}
- msg_context->u.bulk.buffer =
- list_entry(msg_context->u.bulk.port->buffers.next,
- struct mmal_buffer, list);
- list_del(&msg_context->u.bulk.buffer->list);
-
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
-
/* ensure we do not overrun the available buffer */
if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
rd_len = msg_context->u.bulk.buffer->buffer_size;
@@ -401,11 +307,6 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
vchi_service_release(instance->handle);
- if (ret != 0) {
- /* callback will not be clearing the mutex */
- mutex_unlock(&instance->bulk_mutex);
- }
-
return ret;
}
@@ -415,13 +316,6 @@ static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
{
int ret;
- /* bulk mutex stops other bulk operations while we have a
- * receive in progress - released in callback
- */
- ret = mutex_lock_interruptible(&instance->bulk_mutex);
- if (ret != 0)
- return ret;
-
/* zero length indicates this was a dummy transfer */
msg_context->u.bulk.buffer_used = 0;
@@ -437,11 +331,6 @@ static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
vchi_service_release(instance->handle);
- if (ret != 0) {
- /* callback will not be clearing the mutex */
- mutex_unlock(&instance->bulk_mutex);
- }
-
return ret;
}
@@ -450,31 +339,6 @@ static int inline_receive(struct vchiq_mmal_instance *instance,
struct mmal_msg *msg,
struct mmal_msg_context *msg_context)
{
- unsigned long flags = 0;
-
- /* take buffer from queue */
- spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
- if (list_empty(&msg_context->u.bulk.port->buffers)) {
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
- pr_err("buffer list empty trying to receive inline\n");
-
- /* todo: this is a serious error, we should never have
- * committed a buffer_to_host operation to the mmal
- * port without the buffer to back it up (with
- * underflow handling) and there is no obvious way to
- * deal with this. Less bad than the bulk case as we
- * can just drop this on the floor but...unhelpful
- */
- return -EINVAL;
- }
-
- msg_context->u.bulk.buffer =
- list_entry(msg_context->u.bulk.port->buffers.next,
- struct mmal_buffer, list);
- list_del(&msg_context->u.bulk.buffer->list);
-
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
-
memcpy(msg_context->u.bulk.buffer->buffer,
msg->u.buffer_from_host.short_data,
msg->u.buffer_from_host.payload_in_message);
@@ -494,25 +358,23 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
struct mmal_msg m;
int ret;
- pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
+ if (!port->enabled)
+ return -EINVAL;
- /* bulk mutex stops other bulk operations while we
- * have a receive in progress
- */
- if (mutex_lock_interruptible(&instance->bulk_mutex))
- return -EINTR;
+ pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
/* get context */
- msg_context = get_msg_context(instance);
- if (IS_ERR(msg_context)) {
- ret = PTR_ERR(msg_context);
- goto unlock;
+ if (!buf->msg_context) {
+ pr_err("%s: msg_context not allocated, buf %p\n", __func__,
+ buf);
+ return -EINVAL;
}
+ msg_context = buf->msg_context;
/* store bulk message context for when data arrives */
msg_context->u.bulk.instance = instance;
msg_context->u.bulk.port = port;
- msg_context->u.bulk.buffer = NULL; /* not valid until bulk xfer */
+ msg_context->u.bulk.buffer = buf;
msg_context->u.bulk.buffer_used = 0;
/* initialise work structure ready to schedule callback */
@@ -557,54 +419,8 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
sizeof(struct mmal_msg_header) +
sizeof(m.u.buffer_from_host));
- if (ret != 0) {
- release_msg_context(msg_context);
- /* todo: is this correct error value? */
- }
-
vchi_service_release(instance->handle);
-unlock:
- mutex_unlock(&instance->bulk_mutex);
-
- return ret;
-}
-
-/* submit a buffer to the mmal sevice
- *
- * the buffer_from_host uses size data from the ports next available
- * mmal_buffer and deals with there being no buffer available by
- * incrementing the underflow for later
- */
-static int port_buffer_from_host(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port)
-{
- int ret;
- struct mmal_buffer *buf;
- unsigned long flags = 0;
-
- if (!port->enabled)
- return -EINVAL;
-
- /* peek buffer from queue */
- spin_lock_irqsave(&port->slock, flags);
- if (list_empty(&port->buffers)) {
- port->buffer_underflow++;
- spin_unlock_irqrestore(&port->slock, flags);
- return -ENOSPC;
- }
-
- buf = list_entry(port->buffers.next, struct mmal_buffer, list);
-
- spin_unlock_irqrestore(&port->slock, flags);
-
- /* issue buffer to mmal service */
- ret = buffer_from_host(instance, port, buf);
- if (ret) {
- pr_err("adding buffer header failed\n");
- /* todo: how should this be dealt with */
- }
-
return ret;
}
@@ -680,9 +496,6 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
msg->u.buffer_from_host.payload_in_message;
}
- /* replace the buffer header */
- port_buffer_from_host(instance, msg_context->u.bulk.port);
-
/* schedule the port callback */
schedule_work(&msg_context->u.bulk.work);
}
@@ -690,13 +503,6 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
struct mmal_msg_context *msg_context)
{
- /* bulk receive operation complete */
- mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
-
- /* replace the buffer header */
- port_buffer_from_host(msg_context->u.bulk.instance,
- msg_context->u.bulk.port);
-
msg_context->u.bulk.status = 0;
/* schedule the port callback */
@@ -708,13 +514,6 @@ static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
{
pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
- /* bulk receive operation complete */
- mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
-
- /* replace the buffer header */
- port_buffer_from_host(msg_context->u.bulk.instance,
- msg_context->u.bulk.port);
-
msg_context->u.bulk.status = -EINTR;
schedule_work(&msg_context->u.bulk.work);
@@ -1482,7 +1281,14 @@ static int port_disable(struct vchiq_mmal_instance *instance,
ret = port_action_port(instance, port,
MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
if (ret == 0) {
- /* drain all queued buffers on port */
+ /*
+ * Drain all queued buffers on port. This should only
+ * apply to buffers that have been queued before the port
+ * has been enabled. If the port has been enabled and buffers
+ * passed, then the buffers should have been removed from this
+ * list, and we should get the relevant callbacks via VCHIQ
+ * to release the buffers.
+ */
spin_lock_irqsave(&port->slock, flags);
list_for_each_safe(buf_head, q, &port->buffers) {
@@ -1511,7 +1317,7 @@ static int port_enable(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port)
{
unsigned int hdr_count;
- struct list_head *buf_head;
+ struct list_head *q, *buf_head;
int ret;
if (port->enabled)
@@ -1537,7 +1343,7 @@ static int port_enable(struct vchiq_mmal_instance *instance,
if (port->buffer_cb) {
/* send buffer headers to videocore */
hdr_count = 1;
- list_for_each(buf_head, &port->buffers) {
+ list_for_each_safe(buf_head, q, &port->buffers) {
struct mmal_buffer *mmalbuf;
mmalbuf = list_entry(buf_head, struct mmal_buffer,
@@ -1546,6 +1352,7 @@ static int port_enable(struct vchiq_mmal_instance *instance,
if (ret)
goto done;
+ list_del(buf_head);
hdr_count++;
if (hdr_count > port->current_buffer.num)
break;
@@ -1758,23 +1565,42 @@ int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
struct mmal_buffer *buffer)
{
unsigned long flags = 0;
+ int ret;
- spin_lock_irqsave(&port->slock, flags);
- list_add_tail(&buffer->list, &port->buffers);
- spin_unlock_irqrestore(&port->slock, flags);
-
- /* the port previously underflowed because it was missing a
- * mmal_buffer which has just been added, submit that buffer
- * to the mmal service.
- */
- if (port->buffer_underflow) {
- port_buffer_from_host(instance, port);
- port->buffer_underflow--;
+ ret = buffer_from_host(instance, port, buffer);
+ if (ret == -EINVAL) {
+ /* Port is disabled. Queue for when it is enabled. */
+ spin_lock_irqsave(&port->slock, flags);
+ list_add_tail(&buffer->list, &port->buffers);
+ spin_unlock_irqrestore(&port->slock, flags);
}
return 0;
}
+int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
+ struct mmal_buffer *buf)
+{
+ struct mmal_msg_context *msg_context = get_msg_context(instance);
+
+ if (IS_ERR(msg_context))
+ return (PTR_ERR(msg_context));
+
+ buf->msg_context = msg_context;
+ return 0;
+}
+
+int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
+{
+ struct mmal_msg_context *msg_context = buf->msg_context;
+
+ if (msg_context)
+ release_msg_context(msg_context);
+ buf->msg_context = NULL;
+
+ return 0;
+}
+
/* Initialise a mmal component and its ports
*
*/
@@ -1965,7 +1791,7 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
vfree(instance->bulk_scratch);
- mmal_context_map_destroy(&instance->context_map);
+ idr_destroy(&instance->context_map);
kfree(instance);
@@ -2024,16 +1850,11 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
return -ENOMEM;
mutex_init(&instance->vchiq_mutex);
- mutex_init(&instance->bulk_mutex);
instance->bulk_scratch = vmalloc(PAGE_SIZE);
- status = mmal_context_map_init(&instance->context_map);
- if (status) {
- pr_err("Failed to init context map (status=%d)\n", status);
- kfree(instance);
- return status;
- }
+ spin_lock_init(&instance->context_map_lock);
+ idr_init_base(&instance->context_map, 1);
params.callback_param = instance;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
index b1f22b6dca10..22b839ecd5f0 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
@@ -32,14 +32,6 @@ enum vchiq_mmal_es_type {
MMAL_ES_TYPE_SUBPICTURE /**< Sub-picture elementary stream */
};
-/* rectangle, used lots so it gets its own struct */
-struct vchiq_mmal_rect {
- s32 x;
- s32 y;
- s32 width;
- s32 height;
-};
-
struct vchiq_mmal_port_buffer {
unsigned int num; /* number of buffers */
u32 size; /* size of buffers */
@@ -79,10 +71,6 @@ struct vchiq_mmal_port {
struct list_head buffers;
/* lock to serialise adding and removing buffers from list */
spinlock_t slock;
- /* count of how many buffer header refils have failed because
- * there was no buffer to satisfy them
- */
- int buffer_underflow;
/* callback on buffer completion */
vchiq_mmal_buffer_cb buffer_cb;
/* callback context */
@@ -168,4 +156,7 @@ int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port,
struct mmal_buffer *buf);
+int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
+ struct mmal_buffer *buf);
+int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf);
#endif /* MMAL_VCHIQ_H */
diff --git a/drivers/staging/vc04_services/interface/vchi/TODO b/drivers/staging/vc04_services/interface/vchi/TODO
index 86708c7c8ec3..0b3ec75ff627 100644
--- a/drivers/staging/vc04_services/interface/vchi/TODO
+++ b/drivers/staging/vc04_services/interface/vchi/TODO
@@ -40,17 +40,12 @@ should properly handle a module unload. This also includes that all
resouces must be freed (kthreads, debugfs entries, ...) and global
variables avoided.
-5) Fix stack hog
-
-Running make checkstack shows that vchiq_dump_service_use_state() has
-an extensive stack usage. Maybe other functions are also affected.
-
-6) Cleanup logging mechanism
+5) Cleanup logging mechanism
The driver should probably be using the standard kernel logging mechanisms
such as dev_info, dev_dbg, and friends.
-7) Documentation
+6) Documentation
A short top-down description of this driver's architecture (function of
kthreads, userspace, limitations) could be very helpful for reviewers.
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_common.h b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
index 76e10fe65d9b..8eb2bb9f0fe2 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_common.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
@@ -97,12 +97,10 @@ typedef enum {
VCHI_CALLBACK_PEER_RESUMED,
VCHI_CALLBACK_FORCED_POWER_OFF,
-#ifdef USE_VCHIQ_ARM
// some extra notifications provided by vchiq_arm
VCHI_CALLBACK_SERVICE_OPENED,
VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
-#endif
VCHI_CALLBACK_REASON_MAX
} VCHI_CALLBACK_REASON_T;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index afdd3e944f3f..e76720903064 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -475,7 +475,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
/* do not try and release vmalloc pages */
} else {
actual_pages = get_user_pages_fast(
- (unsigned long)buf & PAGE_MASK,
+ (unsigned long)buf & PAGE_MASK,
num_pages,
type == PAGELIST_READ,
pages);
@@ -582,8 +582,8 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
struct page **pages = pagelistinfo->pages;
unsigned int num_pages = pagelistinfo->num_pages;
- vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %pK, %d",
- pagelistinfo->pagelist, actual);
+ vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
+ __func__, pagelistinfo->pagelist, actual);
/*
* NOTE: dma_unmap_sg must be called before the
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 24d456b0a6f0..bc05c69383b8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -168,6 +168,7 @@ static VCHIQ_STATE_T g_state;
static struct class *vchiq_class;
static struct device *vchiq_dev;
static DEFINE_SPINLOCK(msg_queue_spinlock);
+static struct platform_device *bcm2835_camera;
static const char *const ioctl_names[] = {
"CONNECT",
@@ -215,7 +216,7 @@ VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
state = vchiq_get_state();
if (state)
break;
- udelay(500);
+ usleep_range(500, 600);
}
if (i == VCHIQ_INIT_RETRIES) {
vchiq_log_error(vchiq_core_log_level,
@@ -563,7 +564,7 @@ add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
/* Out of space - wait for the client */
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_log_trace(vchiq_arm_log_level,
- "add_completion - completion queue full");
+ "%s - completion queue full", __func__);
DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
if (down_interruptible(&instance->remove_event) != 0) {
vchiq_log_info(vchiq_arm_log_level,
@@ -641,9 +642,9 @@ service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
return VCHIQ_SUCCESS;
vchiq_log_trace(vchiq_arm_log_level,
- "service_callback - service %lx(%d,%p), reason %d, header %lx, "
+ "%s - service %lx(%d,%p), reason %d, header %lx, "
"instance %lx, bulk_userdata %lx",
- (unsigned long)user_service,
+ __func__, (unsigned long)user_service,
service->localport, user_service->userdata,
reason, (unsigned long)header,
(unsigned long)instance, (unsigned long)bulk_userdata);
@@ -679,12 +680,12 @@ service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
if (down_interruptible(&user_service->remove_event)
!= 0) {
vchiq_log_info(vchiq_arm_log_level,
- "service_callback interrupted");
+ "%s interrupted", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
return VCHIQ_RETRY;
} else if (instance->closing) {
vchiq_log_info(vchiq_arm_log_level,
- "service_callback closing");
+ "%s closing", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
return VCHIQ_ERROR;
}
@@ -740,8 +741,8 @@ user_service_free(void *userdata)
static void close_delivered(USER_SERVICE_T *user_service)
{
vchiq_log_info(vchiq_arm_log_level,
- "close_delivered(handle=%x)",
- user_service->service->handle);
+ "%s(handle=%x)",
+ __func__, user_service->service->handle);
if (user_service->close_pending) {
/* Allow the underlying service to be culled */
@@ -872,8 +873,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
DEBUG_INITIALISE(g_state.local)
vchiq_log_trace(vchiq_arm_log_level,
- "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
- instance,
+ "%s - instance %pK, cmd %s, arg %lx",
+ __func__, instance,
((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
(_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
@@ -2017,7 +2018,6 @@ vchiq_open(struct inode *inode, struct file *file)
vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
switch (dev) {
case VCHIQ_MINOR: {
- int ret;
VCHIQ_STATE_T *state = vchiq_get_state();
VCHIQ_INSTANCE_T instance;
@@ -2034,11 +2034,7 @@ vchiq_open(struct inode *inode, struct file *file)
instance->state = state;
instance->pid = current->tgid;
- ret = vchiq_debugfs_add_instance(instance);
- if (ret != 0) {
- kfree(instance);
- return ret;
- }
+ vchiq_debugfs_add_instance(instance);
sema_init(&instance->insert_event, 0);
sema_init(&instance->remove_event, 0);
@@ -2078,8 +2074,8 @@ vchiq_release(struct inode *inode, struct file *file)
int i;
vchiq_log_info(vchiq_arm_log_level,
- "vchiq_release: instance=%lx",
- (unsigned long)instance);
+ "%s: instance=%lx",
+ __func__, (unsigned long)instance);
if (!state) {
ret = -EPERM;
@@ -2128,9 +2124,11 @@ vchiq_release(struct inode *inode, struct file *file)
while (user_service->msg_remove !=
user_service->msg_insert) {
- VCHIQ_HEADER_T *header = user_service->
- msg_queue[user_service->msg_remove &
- (MSG_QUEUE_SIZE - 1)];
+ VCHIQ_HEADER_T *header;
+ int m = user_service->msg_remove &
+ (MSG_QUEUE_SIZE - 1);
+
+ header = user_service->msg_queue[m];
user_service->msg_remove++;
spin_unlock(&msg_queue_spinlock);
@@ -2666,8 +2664,7 @@ start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
{
del_timer(&arm_state->suspend_timer);
arm_state->suspend_timer.expires = jiffies +
- msecs_to_jiffies(arm_state->
- suspend_timer_timeout);
+ msecs_to_jiffies(arm_state->suspend_timer_timeout);
add_timer(&arm_state->suspend_timer);
arm_state->suspend_timer_running = 1;
}
@@ -3019,7 +3016,6 @@ vchiq_check_suspend(VCHIQ_STATE_T *state)
out:
vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
- return;
}
int
@@ -3414,13 +3410,18 @@ vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
return ret;
}
+struct service_data_struct {
+ int fourcc;
+ int clientid;
+ int use_count;
+};
+
void
vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
{
VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
- int i, j = 0;
- /* Only dump 64 services */
- static const int local_max_services = 64;
+ struct service_data_struct *service_data;
+ int i, found = 0;
/* If there's more than 64 services, only dump ones with
* non-zero counts */
int only_nonzero = 0;
@@ -3431,25 +3432,25 @@ vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
int peer_count;
int vc_use_count;
int active_services;
- struct service_data_struct {
- int fourcc;
- int clientid;
- int use_count;
- } service_data[local_max_services];
if (!arm_state)
return;
+ service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
+ GFP_KERNEL);
+ if (!service_data)
+ return;
+
read_lock_bh(&arm_state->susp_res_lock);
vc_suspend_state = arm_state->vc_suspend_state;
vc_resume_state = arm_state->vc_resume_state;
peer_count = arm_state->peer_use_count;
vc_use_count = arm_state->videocore_use_count;
active_services = state->unused_service;
- if (active_services > local_max_services)
+ if (active_services > MAX_SERVICES)
only_nonzero = 1;
- for (i = 0; (i < active_services) && (j < local_max_services); i++) {
+ for (i = 0; i < active_services; i++) {
VCHIQ_SERVICE_T *service_ptr = state->services[i];
if (!service_ptr)
@@ -3461,9 +3462,12 @@ vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
continue;
- service_data[j].fourcc = service_ptr->base.fourcc;
- service_data[j].clientid = service_ptr->client_id;
- service_data[j++].use_count = service_ptr->service_use_count;
+ service_data[found].fourcc = service_ptr->base.fourcc;
+ service_data[found].clientid = service_ptr->client_id;
+ service_data[found].use_count = service_ptr->service_use_count;
+ found++;
+ if (found >= MAX_SERVICES)
+ break;
}
read_unlock_bh(&arm_state->susp_res_lock);
@@ -3478,10 +3482,9 @@ vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
if (only_nonzero)
vchiq_log_warning(vchiq_susp_log_level, "Too many active "
"services (%d). Only dumping up to first %d services "
- "with non-zero use-count", active_services,
- local_max_services);
+ "with non-zero use-count", active_services, found);
- for (i = 0; i < j; i++) {
+ for (i = 0; i < found; i++) {
vchiq_log_warning(vchiq_susp_log_level,
"----- %c%c%c%c:%d service count %d %s",
VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
@@ -3494,6 +3497,8 @@ vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
vchiq_log_warning(vchiq_susp_log_level,
"--- Overall vchiq instance use count %d", vc_use_count);
+ kfree(service_data);
+
vchiq_dump_platform_use_state(state);
}
@@ -3620,19 +3625,19 @@ static int vchiq_probe(struct platform_device *pdev)
goto failed_device_create;
/* create debugfs entries */
- err = vchiq_debugfs_init();
- if (err != 0)
- goto failed_debugfs_init;
+ vchiq_debugfs_init();
vchiq_log_info(vchiq_arm_log_level,
"vchiq: initialised - version %d (min %d), device %d.%d",
VCHIQ_VERSION, VCHIQ_VERSION_MIN,
MAJOR(vchiq_devid), MINOR(vchiq_devid));
+ bcm2835_camera = platform_device_register_data(&pdev->dev,
+ "bcm2835-camera", -1,
+ NULL, 0);
+
return 0;
-failed_debugfs_init:
- device_destroy(vchiq_class, vchiq_devid);
failed_device_create:
class_destroy(vchiq_class);
failed_class_create:
@@ -3646,6 +3651,7 @@ failed_platform_init:
static int vchiq_remove(struct platform_device *pdev)
{
+ platform_device_unregister(bcm2835_camera);
vchiq_debugfs_deinit();
device_destroy(vchiq_class, vchiq_devid);
class_destroy(vchiq_class);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 5d28fff46557..7642ced31436 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -601,6 +601,7 @@ reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking)
}
if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
+ up(&state->slot_available_event);
pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
return NULL;
}
@@ -619,10 +620,9 @@ reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking)
/* Called by the recycle thread. */
static void
-process_free_queue(VCHIQ_STATE_T *state)
+process_free_queue(VCHIQ_STATE_T *state, BITSET_T *service_found, size_t length)
{
VCHIQ_SHARED_STATE_T *local = state->local;
- BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
int slot_queue_available;
/* Find slots which have been freed by the other side, and return them
@@ -655,7 +655,7 @@ process_free_queue(VCHIQ_STATE_T *state)
/* Initialise the bitmask for services which have used this
** slot */
- BITSET_ZERO(service_found);
+ memset(service_found, 0, length);
pos = 0;
@@ -1197,8 +1197,8 @@ release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
SLOT_INDEX_FROM_INFO(state, slot_info);
state->remote->slot_queue_recycle = slot_queue_recycle + 1;
vchiq_log_info(vchiq_core_log_level,
- "%d: release_slot %d - recycle->%x",
- state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
+ "%d: %s %d - recycle->%x", state->id, __func__,
+ SLOT_INDEX_FROM_INFO(state, slot_info),
state->remote->slot_queue_recycle);
/* A write barrier is necessary, but remote_event_signal
@@ -2182,11 +2182,20 @@ recycle_func(void *v)
{
VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
VCHIQ_SHARED_STATE_T *local = state->local;
+ BITSET_T *found;
+ size_t length;
+
+ length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
+
+ found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
+ GFP_KERNEL);
+ if (!found)
+ return -ENOMEM;
while (1) {
remote_event_wait(state, &local->recycle);
- process_free_queue(state);
+ process_free_queue(state, found, length);
}
return 0;
}
@@ -2329,8 +2338,8 @@ vchiq_init_slots(void *mem_base, int mem_size)
if (num_slots < 4) {
vchiq_log_error(vchiq_core_log_level,
- "vchiq_init_slots - insufficient memory %x bytes",
- mem_size);
+ "%s - insufficient memory %x bytes",
+ __func__, mem_size);
return NULL;
}
@@ -2544,7 +2553,6 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
return VCHIQ_ERROR;
}
set_user_nice(state->slot_handler_thread, -19);
- wake_up_process(state->slot_handler_thread);
snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
state->recycle_thread = kthread_create(&recycle_func,
@@ -2554,10 +2562,9 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
- return VCHIQ_ERROR;
+ goto fail_free_handler_thread;
}
set_user_nice(state->recycle_thread, -19);
- wake_up_process(state->recycle_thread);
snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
state->sync_thread = kthread_create(&sync_func,
@@ -2567,9 +2574,12 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
- return VCHIQ_ERROR;
+ goto fail_free_recycle_thread;
}
set_user_nice(state->sync_thread, -20);
+
+ wake_up_process(state->slot_handler_thread);
+ wake_up_process(state->recycle_thread);
wake_up_process(state->sync_thread);
vchiq_states[0] = state;
@@ -2578,6 +2588,13 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
local->initialised = 1;
return status;
+
+fail_free_recycle_thread:
+ kthread_stop(state->recycle_thread);
+fail_free_handler_thread:
+ kthread_stop(state->slot_handler_thread);
+
+ return VCHIQ_ERROR;
}
/* Called from application thread when a client or server service is created. */
@@ -2861,9 +2878,9 @@ close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
break;
default:
vchiq_log_error(vchiq_core_log_level,
- "close_service_complete(%x) called in state %s",
+ "%s(%x) called in state %s", __func__,
service->handle, srvstate_names[service->srvstate]);
- WARN(1, "close_service_complete in unexpected state\n");
+ WARN(1, "%s in unexpected state\n", __func__);
return VCHIQ_ERROR;
}
@@ -2915,9 +2932,9 @@ vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
case VCHIQ_SRVSTATE_CLOSEWAIT:
if (close_recvd)
vchiq_log_error(vchiq_core_log_level,
- "vchiq_close_service_internal(1) called "
+ "%s(1) called "
"in state %s",
- srvstate_names[service->srvstate]);
+ __func__, srvstate_names[service->srvstate]);
else if (is_server) {
if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
status = VCHIQ_ERROR;
@@ -3024,7 +3041,7 @@ vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
default:
vchiq_log_error(vchiq_core_log_level,
- "vchiq_close_service_internal(%d) called in state %s",
+ "%s(%d) called in state %s", __func__,
close_recvd, srvstate_names[service->srvstate]);
break;
}
@@ -3145,8 +3162,8 @@ vchiq_pause_internal(VCHIQ_STATE_T *state)
break;
default:
vchiq_log_error(vchiq_core_log_level,
- "vchiq_pause_internal in state %s\n",
- conn_state_names[state->conn_state]);
+ "%s in state %s\n",
+ __func__, conn_state_names[state->conn_state]);
status = VCHIQ_ERROR;
VCHIQ_STATS_INC(state, error_count);
break;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index afc1d8144a84..10deb5745dda 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -147,7 +147,6 @@ vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
#define BITSET_SIZE(b) ((b + 31) >> 5)
#define BITSET_WORD(b) (b >> 5)
#define BITSET_BIT(b) (1 << (b & 31))
-#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 766b4fe5f32c..38805504d462 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -51,25 +51,14 @@
#define VCHIQ_LOG_INFO_STR "info"
#define VCHIQ_LOG_TRACE_STR "trace"
-/* Top-level debug info */
-struct vchiq_debugfs_info {
- /* Global 'vchiq' debugfs entry used by all instances */
- struct dentry *vchiq_cfg_dir;
-
- /* one entry per client process */
- struct dentry *clients;
-
- /* log categories */
- struct dentry *log_categories;
-};
-
-static struct vchiq_debugfs_info debugfs_info;
+/* Global 'vchiq' debugfs and clients entry used by all instances */
+struct dentry *vchiq_dbg_dir;
+struct dentry *vchiq_dbg_clients;
/* Log category debugfs entries */
struct vchiq_debugfs_log_entry {
const char *name;
- int *plevel;
- struct dentry *dir;
+ void *plevel;
};
static struct vchiq_debugfs_log_entry vchiq_debugfs_log_entries[] = {
@@ -81,9 +70,6 @@ static struct vchiq_debugfs_log_entry vchiq_debugfs_log_entries[] = {
};
static int n_log_entries = ARRAY_SIZE(vchiq_debugfs_log_entries);
-static struct dentry *vchiq_clients_top(void);
-static struct dentry *vchiq_debugfs_top(void);
-
static int debugfs_log_show(struct seq_file *f, void *offset)
{
int *levp = f->private;
@@ -157,36 +143,6 @@ static const struct file_operations debugfs_log_fops = {
.release = single_release,
};
-/* create an entry under <debugfs>/vchiq/log for each log category */
-static int vchiq_debugfs_create_log_entries(struct dentry *top)
-{
- struct dentry *dir;
- size_t i;
- int ret = 0;
-
- dir = debugfs_create_dir("log", vchiq_debugfs_top());
- if (!dir)
- return -ENOMEM;
- debugfs_info.log_categories = dir;
-
- for (i = 0; i < n_log_entries; i++) {
- void *levp = (void *)vchiq_debugfs_log_entries[i].plevel;
-
- dir = debugfs_create_file(vchiq_debugfs_log_entries[i].name,
- 0644,
- debugfs_info.log_categories,
- levp,
- &debugfs_log_fops);
- if (!dir) {
- ret = -ENOMEM;
- break;
- }
-
- vchiq_debugfs_log_entries[i].dir = dir;
- }
- return ret;
-}
-
static int debugfs_usecount_show(struct seq_file *f, void *offset)
{
VCHIQ_INSTANCE_T instance = f->private;
@@ -268,43 +224,21 @@ static const struct file_operations debugfs_trace_fops = {
};
/* add an instance (process) to the debugfs entries */
-int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
{
char pidstr[16];
- struct dentry *top, *use_count, *trace;
- struct dentry *clients = vchiq_clients_top();
+ struct dentry *top;
snprintf(pidstr, sizeof(pidstr), "%d",
vchiq_instance_get_pid(instance));
- top = debugfs_create_dir(pidstr, clients);
- if (!top)
- goto fail_top;
+ top = debugfs_create_dir(pidstr, vchiq_dbg_clients);
- use_count = debugfs_create_file("use_count",
- 0444, top,
- instance,
- &debugfs_usecount_fops);
- if (!use_count)
- goto fail_use_count;
-
- trace = debugfs_create_file("trace",
- 0644, top,
- instance,
- &debugfs_trace_fops);
- if (!trace)
- goto fail_trace;
+ debugfs_create_file("use_count", 0444, top, instance,
+ &debugfs_usecount_fops);
+ debugfs_create_file("trace", 0644, top, instance, &debugfs_trace_fops);
vchiq_instance_get_debugfs_node(instance)->dentry = top;
-
- return 0;
-
-fail_trace:
- debugfs_remove(use_count);
-fail_use_count:
- debugfs_remove(top);
-fail_top:
- return -ENOMEM;
}
void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
@@ -314,64 +248,41 @@ void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
debugfs_remove_recursive(node->dentry);
}
-int vchiq_debugfs_init(void)
+void vchiq_debugfs_init(void)
{
- BUG_ON(debugfs_info.vchiq_cfg_dir != NULL);
-
- debugfs_info.vchiq_cfg_dir = debugfs_create_dir("vchiq", NULL);
- if (debugfs_info.vchiq_cfg_dir == NULL)
- goto fail;
-
- debugfs_info.clients = debugfs_create_dir("clients",
- vchiq_debugfs_top());
- if (!debugfs_info.clients)
- goto fail;
-
- if (vchiq_debugfs_create_log_entries(vchiq_debugfs_top()) != 0)
- goto fail;
+ struct dentry *dir;
+ int i;
- return 0;
+ vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL);
+ vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir);
-fail:
- vchiq_debugfs_deinit();
- vchiq_log_error(vchiq_arm_log_level,
- "%s: failed to create debugfs directory",
- __func__);
+ /* create an entry under <debugfs>/vchiq/log for each log category */
+ dir = debugfs_create_dir("log", vchiq_dbg_dir);
- return -ENOMEM;
+ for (i = 0; i < n_log_entries; i++)
+ debugfs_create_file(vchiq_debugfs_log_entries[i].name, 0644,
+ dir, vchiq_debugfs_log_entries[i].plevel,
+ &debugfs_log_fops);
}
/* remove all the debugfs entries */
void vchiq_debugfs_deinit(void)
{
- debugfs_remove_recursive(vchiq_debugfs_top());
-}
-
-static struct dentry *vchiq_clients_top(void)
-{
- return debugfs_info.clients;
-}
-
-static struct dentry *vchiq_debugfs_top(void)
-{
- BUG_ON(debugfs_info.vchiq_cfg_dir == NULL);
- return debugfs_info.vchiq_cfg_dir;
+ debugfs_remove_recursive(vchiq_dbg_dir);
}
#else /* CONFIG_DEBUG_FS */
-int vchiq_debugfs_init(void)
+void vchiq_debugfs_init(void)
{
- return 0;
}
void vchiq_debugfs_deinit(void)
{
}
-int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
{
- return 0;
}
void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
index 1d95e3d70621..3af6397ada19 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
@@ -40,11 +40,11 @@ typedef struct vchiq_debugfs_node_struct {
struct dentry *dentry;
} VCHIQ_DEBUGFS_NODE_T;
-int vchiq_debugfs_init(void);
+void vchiq_debugfs_init(void);
void vchiq_debugfs_deinit(void);
-int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance);
+void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance);
void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 29984f9795c7..dddc828390d0 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -590,7 +590,6 @@ static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
VCHI_CALLBACK_MSG_AVAILABLE, NULL);
goto done;
- break;
case VCHIQ_BULK_TRANSMIT_DONE:
service->callback(service->callback_param,
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index ea0a4b57852c..52e9e6b90b56 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -557,10 +557,7 @@ CARDvSafeResetTx(
*
* Return Value: none
*/
-void
-CARDvSafeResetRx(
- struct vnt_private *priv
-)
+void CARDvSafeResetRx(struct vnt_private *priv)
{
unsigned int uu;
struct vnt_rx_desc *pDesc;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index fbc4bc68144c..1ab0e8562d40 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -19,6 +19,7 @@
* device_print_info - print out resource
* device_rx_srv - rx service function
* device_alloc_rx_buf - rx buffer pre-allocated function
+ * device_free_rx_buf - free rx buffer function
* device_free_tx_buf - free tx buffer function
* device_init_rd0_ring- initial rd dma0 ring
* device_init_rd1_ring- initial rd dma1 ring
@@ -124,14 +125,15 @@ static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent);
static void device_free_info(struct vnt_private *priv);
static void device_print_info(struct vnt_private *priv);
-static void device_init_rd0_ring(struct vnt_private *priv);
-static void device_init_rd1_ring(struct vnt_private *priv);
-static void device_init_td0_ring(struct vnt_private *priv);
-static void device_init_td1_ring(struct vnt_private *priv);
+static int device_init_rd0_ring(struct vnt_private *priv);
+static int device_init_rd1_ring(struct vnt_private *priv);
+static int device_init_td0_ring(struct vnt_private *priv);
+static int device_init_td1_ring(struct vnt_private *priv);
static int device_rx_srv(struct vnt_private *priv, unsigned int idx);
static int device_tx_srv(struct vnt_private *priv, unsigned int idx);
static bool device_alloc_rx_buf(struct vnt_private *, struct vnt_rx_desc *);
+static void device_free_rx_buf(struct vnt_private *priv, struct vnt_rx_desc *rd);
static void device_init_registers(struct vnt_private *priv);
static void device_free_tx_buf(struct vnt_private *, struct vnt_tx_desc *);
static void device_free_td0_ring(struct vnt_private *priv);
@@ -528,20 +530,28 @@ static void device_free_rings(struct vnt_private *priv)
priv->tx0_bufs, priv->tx_bufs_dma0);
}
-static void device_init_rd0_ring(struct vnt_private *priv)
+static int device_init_rd0_ring(struct vnt_private *priv)
{
int i;
dma_addr_t curr = priv->rd0_pool_dma;
struct vnt_rx_desc *desc;
+ int ret;
/* Init the RD0 ring entries */
for (i = 0; i < priv->opts.rx_descs0;
i ++, curr += sizeof(struct vnt_rx_desc)) {
desc = &priv->aRD0Ring[i];
desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
+ if (!desc->rd_info) {
+ ret = -ENOMEM;
+ goto err_free_desc;
+ }
- if (!device_alloc_rx_buf(priv, desc))
+ if (!device_alloc_rx_buf(priv, desc)) {
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
+ ret = -ENOMEM;
+ goto err_free_rd;
+ }
desc->next = &priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0];
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
@@ -550,22 +560,44 @@ static void device_init_rd0_ring(struct vnt_private *priv)
if (i > 0)
priv->aRD0Ring[i-1].next_desc = cpu_to_le32(priv->rd0_pool_dma);
priv->pCurrRD[0] = &priv->aRD0Ring[0];
+
+ return 0;
+
+err_free_rd:
+ kfree(desc->rd_info);
+
+err_free_desc:
+ while (--i) {
+ desc = &priv->aRD0Ring[i];
+ device_free_rx_buf(priv, desc);
+ kfree(desc->rd_info);
+ }
+
+ return ret;
}
-static void device_init_rd1_ring(struct vnt_private *priv)
+static int device_init_rd1_ring(struct vnt_private *priv)
{
int i;
dma_addr_t curr = priv->rd1_pool_dma;
struct vnt_rx_desc *desc;
+ int ret;
/* Init the RD1 ring entries */
for (i = 0; i < priv->opts.rx_descs1;
i ++, curr += sizeof(struct vnt_rx_desc)) {
desc = &priv->aRD1Ring[i];
desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
+ if (!desc->rd_info) {
+ ret = -ENOMEM;
+ goto err_free_desc;
+ }
- if (!device_alloc_rx_buf(priv, desc))
+ if (!device_alloc_rx_buf(priv, desc)) {
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
+ ret = -ENOMEM;
+ goto err_free_rd;
+ }
desc->next = &priv->aRD1Ring[(i+1) % priv->opts.rx_descs1];
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
@@ -574,6 +606,20 @@ static void device_init_rd1_ring(struct vnt_private *priv)
if (i > 0)
priv->aRD1Ring[i-1].next_desc = cpu_to_le32(priv->rd1_pool_dma);
priv->pCurrRD[1] = &priv->aRD1Ring[0];
+
+ return 0;
+
+err_free_rd:
+ kfree(desc->rd_info);
+
+err_free_desc:
+ while (--i) {
+ desc = &priv->aRD1Ring[i];
+ device_free_rx_buf(priv, desc);
+ kfree(desc->rd_info);
+ }
+
+ return ret;
}
static void device_free_rd0_ring(struct vnt_private *priv)
@@ -582,13 +628,8 @@ static void device_free_rd0_ring(struct vnt_private *priv)
for (i = 0; i < priv->opts.rx_descs0; i++) {
struct vnt_rx_desc *desc = &priv->aRD0Ring[i];
- struct vnt_rd_info *rd_info = desc->rd_info;
-
- dma_unmap_single(&priv->pcid->dev, rd_info->skb_dma,
- priv->rx_buf_sz, DMA_FROM_DEVICE);
-
- dev_kfree_skb(rd_info->skb);
+ device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
}
}
@@ -599,28 +640,28 @@ static void device_free_rd1_ring(struct vnt_private *priv)
for (i = 0; i < priv->opts.rx_descs1; i++) {
struct vnt_rx_desc *desc = &priv->aRD1Ring[i];
- struct vnt_rd_info *rd_info = desc->rd_info;
-
- dma_unmap_single(&priv->pcid->dev, rd_info->skb_dma,
- priv->rx_buf_sz, DMA_FROM_DEVICE);
-
- dev_kfree_skb(rd_info->skb);
+ device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
}
}
-static void device_init_td0_ring(struct vnt_private *priv)
+static int device_init_td0_ring(struct vnt_private *priv)
{
int i;
dma_addr_t curr;
struct vnt_tx_desc *desc;
+ int ret;
curr = priv->td0_pool_dma;
for (i = 0; i < priv->opts.tx_descs[0];
i++, curr += sizeof(struct vnt_tx_desc)) {
desc = &priv->apTD0Rings[i];
desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
+ if (!desc->td_info) {
+ ret = -ENOMEM;
+ goto err_free_desc;
+ }
desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
@@ -632,13 +673,24 @@ static void device_init_td0_ring(struct vnt_private *priv)
if (i > 0)
priv->apTD0Rings[i-1].next_desc = cpu_to_le32(priv->td0_pool_dma);
priv->apTailTD[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
+
+ return 0;
+
+err_free_desc:
+ while (--i) {
+ desc = &priv->apTD0Rings[i];
+ kfree(desc->td_info);
+ }
+
+ return ret;
}
-static void device_init_td1_ring(struct vnt_private *priv)
+static int device_init_td1_ring(struct vnt_private *priv)
{
int i;
dma_addr_t curr;
struct vnt_tx_desc *desc;
+ int ret;
/* Init the TD ring entries */
curr = priv->td1_pool_dma;
@@ -646,6 +698,10 @@ static void device_init_td1_ring(struct vnt_private *priv)
i++, curr += sizeof(struct vnt_tx_desc)) {
desc = &priv->apTD1Rings[i];
desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
+ if (!desc->td_info) {
+ ret = -ENOMEM;
+ goto err_free_desc;
+ }
desc->td_info->buf = priv->tx1_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma1 + i * PKT_BUF_SZ;
@@ -657,6 +713,16 @@ static void device_init_td1_ring(struct vnt_private *priv)
if (i > 0)
priv->apTD1Rings[i-1].next_desc = cpu_to_le32(priv->td1_pool_dma);
priv->apTailTD[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
+
+ return 0;
+
+err_free_desc:
+ while (--i) {
+ desc = &priv->apTD1Rings[i];
+ kfree(desc->td_info);
+ }
+
+ return ret;
}
static void device_free_td0_ring(struct vnt_private *priv)
@@ -745,6 +811,16 @@ static bool device_alloc_rx_buf(struct vnt_private *priv,
return true;
}
+static void device_free_rx_buf(struct vnt_private *priv,
+ struct vnt_rx_desc *rd)
+{
+ struct vnt_rd_info *rd_info = rd->rd_info;
+
+ dma_unmap_single(&priv->pcid->dev, rd_info->skb_dma,
+ priv->rx_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb(rd_info->skb);
+}
+
static const u8 fallback_rate0[5][5] = {
{RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
{RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
@@ -1161,14 +1237,22 @@ static int vnt_start(struct ieee80211_hw *hw)
IRQF_SHARED, "vt6655", priv);
if (ret) {
dev_dbg(&priv->pcid->dev, "failed to start irq\n");
- return ret;
+ goto err_free_rings;
}
dev_dbg(&priv->pcid->dev, "call device init rd0 ring\n");
- device_init_rd0_ring(priv);
- device_init_rd1_ring(priv);
- device_init_td0_ring(priv);
- device_init_td1_ring(priv);
+ ret = device_init_rd0_ring(priv);
+ if (ret)
+ goto err_free_irq;
+ ret = device_init_rd1_ring(priv);
+ if (ret)
+ goto err_free_rd0_ring;
+ ret = device_init_td0_ring(priv);
+ if (ret)
+ goto err_free_rd1_ring;
+ ret = device_init_td1_ring(priv);
+ if (ret)
+ goto err_free_td0_ring;
device_init_registers(priv);
@@ -1178,6 +1262,18 @@ static int vnt_start(struct ieee80211_hw *hw)
ieee80211_wake_queues(hw);
return 0;
+
+err_free_td0_ring:
+ device_free_td0_ring(priv);
+err_free_rd1_ring:
+ device_free_rd1_ring(priv);
+err_free_rd0_ring:
+ device_free_rd0_ring(priv);
+err_free_irq:
+ free_irq(priv->pcid->irq, priv);
+err_free_rings:
+ device_free_rings(priv);
+ return ret;
}
static void vnt_stop(struct ieee80211_hw *hw)
diff --git a/drivers/staging/wilc1000/TODO b/drivers/staging/wilc1000/TODO
index ae61b55f14fd..d123324bd5c9 100644
--- a/drivers/staging/wilc1000/TODO
+++ b/drivers/staging/wilc1000/TODO
@@ -1,7 +1,4 @@
TODO:
-- remove the defined feature as kernel versions
-- remove OS wrapper functions
-- remove custom debug and tracing functions
- rework comments and function headers(also coding style)
- Move handling for each individual members of 'union message_body' out
into a separate 'struct work_struct' and completely remove the multiplexer
@@ -9,10 +6,9 @@ TODO:
implementation of each message handler into the callsite of the function
that currently queues the 'host_if_msg'.
- make spi and sdio components coexist in one build
-- turn compile-time platform configuration (BEAGLE_BOARD,
- PANDA_BOARD, PLAT_WMS8304, PLAT_RKXXXX, CUSTOMER_PLATFORM, ...)
- into run-time options that are read from DT
- support soft-ap and p2p mode
- support resume/suspend function
-- replace SIOCDEVPRIVATE commands with generic API functions
-- use wext-core handling instead of private SIOCSIWPRIV implementation
+- convert all uses of the old GPIO API from <linux/gpio.h> to the
+ GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
+ lines from device tree, ACPI or board files, board files should
+ use <linux/gpio/machine.h>
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index db66b1cc80b3..44816024f79c 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -1,9 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "coreconfigurator.h"
-#include "wilc_wlan_if.h"
-#include "wilc_wlan.h"
-#include <linux/errno.h>
-#include <linux/slab.h>
+
#define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \
BEACON_INTERVAL_LEN + CAP_INFO_LEN)
@@ -210,16 +207,6 @@ static inline u16 get_cap_info(u8 *data)
return cap_info;
}
-static inline u16 get_assoc_resp_cap_info(u8 *data)
-{
- u16 cap_info;
-
- cap_info = data[0];
- cap_info |= (data[1] << 8);
-
- return cap_info;
-}
-
static inline u16 get_asoc_status(u8 *data)
{
u16 asoc_status;
@@ -228,16 +215,6 @@ static inline u16 get_asoc_status(u8 *data)
return (asoc_status << 8) | data[2];
}
-static inline u16 get_asoc_id(u8 *data)
-{
- u16 asoc_id;
-
- asoc_id = data[4];
- asoc_id |= (data[5] << 8);
-
- return asoc_id;
-}
-
static u8 *get_tim_elm(u8 *msa, u16 rx_len, u16 tag_param_offset)
{
u16 index;
@@ -272,78 +249,68 @@ s32 wilc_parse_network_info(u8 *msg_buffer,
{
struct network_info *network_info = NULL;
u8 msg_type = 0;
- u8 msg_id = 0;
- u16 msg_len = 0;
-
- u16 wid_id = (u16)WID_NIL;
u16 wid_len = 0;
u8 *wid_val = NULL;
+ u8 *msa = NULL;
+ u16 rx_len = 0;
+ u8 *tim_elm = NULL;
+ u8 *ies = NULL;
+ u16 ies_len = 0;
+ u8 index = 0;
+ u32 tsf_lo;
+ u32 tsf_hi;
msg_type = msg_buffer[0];
if ('N' != msg_type)
return -EFAULT;
- msg_id = msg_buffer[1];
- msg_len = MAKE_WORD16(msg_buffer[2], msg_buffer[3]);
- wid_id = MAKE_WORD16(msg_buffer[4], msg_buffer[5]);
wid_len = MAKE_WORD16(msg_buffer[6], msg_buffer[7]);
wid_val = &msg_buffer[8];
- {
- u8 *msa = NULL;
- u16 rx_len = 0;
- u8 *tim_elm = NULL;
- u8 *ies = NULL;
- u16 ies_len = 0;
- u8 index = 0;
- u32 tsf_lo;
- u32 tsf_hi;
-
- network_info = kzalloc(sizeof(*network_info), GFP_KERNEL);
- if (!network_info)
- return -ENOMEM;
+ network_info = kzalloc(sizeof(*network_info), GFP_KERNEL);
+ if (!network_info)
+ return -ENOMEM;
- network_info->rssi = wid_val[0];
+ network_info->rssi = wid_val[0];
- msa = &wid_val[1];
+ msa = &wid_val[1];
- rx_len = wid_len - 1;
- network_info->cap_info = get_cap_info(msa);
- network_info->tsf_lo = get_beacon_timestamp_lo(msa);
+ rx_len = wid_len - 1;
+ network_info->cap_info = get_cap_info(msa);
+ network_info->tsf_lo = get_beacon_timestamp_lo(msa);
- tsf_lo = get_beacon_timestamp_lo(msa);
- tsf_hi = get_beacon_timestamp_hi(msa);
+ tsf_lo = get_beacon_timestamp_lo(msa);
+ tsf_hi = get_beacon_timestamp_hi(msa);
- network_info->tsf_hi = tsf_lo | ((u64)tsf_hi << 32);
+ network_info->tsf_hi = tsf_lo | ((u64)tsf_hi << 32);
- get_ssid(msa, network_info->ssid, &network_info->ssid_len);
- get_BSSID(msa, network_info->bssid);
+ get_ssid(msa, network_info->ssid, &network_info->ssid_len);
+ get_BSSID(msa, network_info->bssid);
- network_info->ch = get_current_channel_802_11n(msa, rx_len
- + FCS_LEN);
+ network_info->ch = get_current_channel_802_11n(msa, rx_len
+ + FCS_LEN);
- index = MAC_HDR_LEN + TIME_STAMP_LEN;
+ index = MAC_HDR_LEN + TIME_STAMP_LEN;
- network_info->beacon_period = get_beacon_period(msa + index);
+ network_info->beacon_period = get_beacon_period(msa + index);
- index += BEACON_INTERVAL_LEN + CAP_INFO_LEN;
+ index += BEACON_INTERVAL_LEN + CAP_INFO_LEN;
- tim_elm = get_tim_elm(msa, rx_len + FCS_LEN, index);
- if (tim_elm)
- network_info->dtim_period = tim_elm[3];
- ies = &msa[TAG_PARAM_OFFSET];
- ies_len = rx_len - TAG_PARAM_OFFSET;
+ tim_elm = get_tim_elm(msa, rx_len + FCS_LEN, index);
+ if (tim_elm)
+ network_info->dtim_period = tim_elm[3];
+ ies = &msa[TAG_PARAM_OFFSET];
+ ies_len = rx_len - TAG_PARAM_OFFSET;
- if (ies_len > 0) {
- network_info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
- if (!network_info->ies) {
- kfree(network_info);
- return -ENOMEM;
- }
+ if (ies_len > 0) {
+ network_info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
+ if (!network_info->ies) {
+ kfree(network_info);
+ return -ENOMEM;
}
- network_info->ies_len = ies_len;
}
+ network_info->ies_len = ies_len;
*ret_network_info = network_info;
@@ -351,38 +318,23 @@ s32 wilc_parse_network_info(u8 *msg_buffer,
}
s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
- struct connect_resp_info **ret_connect_resp_info)
+ struct connect_info *ret_conn_info)
{
- struct connect_resp_info *connect_resp_info = NULL;
- u16 assoc_resp_len = 0;
u8 *ies = NULL;
u16 ies_len = 0;
- connect_resp_info = kzalloc(sizeof(*connect_resp_info), GFP_KERNEL);
- if (!connect_resp_info)
- return -ENOMEM;
-
- assoc_resp_len = (u16)buffer_len;
-
- connect_resp_info->status = get_asoc_status(buffer);
- if (connect_resp_info->status == SUCCESSFUL_STATUSCODE) {
- connect_resp_info->capability = get_assoc_resp_cap_info(buffer);
- connect_resp_info->assoc_id = get_asoc_id(buffer);
-
+ ret_conn_info->status = get_asoc_status(buffer);
+ if (ret_conn_info->status == SUCCESSFUL_STATUSCODE) {
ies = &buffer[CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN];
- ies_len = assoc_resp_len - (CAP_INFO_LEN + STATUS_CODE_LEN +
- AID_LEN);
+ ies_len = buffer_len - (CAP_INFO_LEN + STATUS_CODE_LEN +
+ AID_LEN);
- connect_resp_info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
- if (!connect_resp_info->ies) {
- kfree(connect_resp_info);
+ ret_conn_info->resp_ies = kmemdup(ies, ies_len, GFP_KERNEL);
+ if (!ret_conn_info->resp_ies)
return -ENOMEM;
- }
- connect_resp_info->ies_len = ies_len;
+ ret_conn_info->resp_ies_len = ies_len;
}
- *ret_connect_resp_info = connect_resp_info;
-
return 0;
}
diff --git a/drivers/staging/wilc1000/coreconfigurator.h b/drivers/staging/wilc1000/coreconfigurator.h
index 3f5da8c58815..55b5531856f8 100644
--- a/drivers/staging/wilc1000/coreconfigurator.h
+++ b/drivers/staging/wilc1000/coreconfigurator.h
@@ -1,34 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
-
-/*!
- * @file coreconfigurator.h
- * @brief
- * @author
- * @sa coreconfigurator.c
- * @date 1 Mar 2012
- * @version 1.0
- */
-
#ifndef CORECONFIGURATOR_H
#define CORECONFIGURATOR_H
#include "wilc_wlan_if.h"
-#define NUM_BASIC_SWITCHES 45
-#define NUM_FHSS_SWITCHES 0
-
#define NUM_RSSI 5
-#ifdef MAC_802_11N
-#define NUM_11N_BASIC_SWITCHES 25
-#define NUM_11N_HUT_SWITCHES 47
-#else
-#define NUM_11N_BASIC_SWITCHES 0
-#define NUM_11N_HUT_SWITCHES 0
-#endif
-
#define MAC_HDR_LEN 24
-#define MAX_SSID_LEN 33
#define FCS_LEN 4
#define TIME_STAMP_LEN 8
#define BEACON_INTERVAL_LEN 2
@@ -41,13 +19,8 @@
#define GET_CFG 1
#define MAX_STRING_LEN 256
-#define MAX_SURVEY_RESULT_FRAG_SIZE MAX_STRING_LEN
-#define SURVEY_RESULT_LENGTH 44
#define MAX_ASSOC_RESP_FRAME_SIZE MAX_STRING_LEN
-#define MAC_CONNECTED 1
-#define MAC_DISCONNECTED 0
-
#define MAKE_WORD16(lsb, msb) ((((u16)(msb) << 8) & 0xFF00) | (lsb))
#define MAKE_WORD32(lsw, msw) ((((u32)(msw) << 16) & 0xFFFF0000) | (lsw))
@@ -98,14 +71,6 @@ struct network_info {
u64 tsf_hi;
};
-struct connect_resp_info {
- u16 capability;
- u16 status;
- u16 assoc_id;
- u8 *ies;
- u16 ies_len;
-};
-
struct connect_info {
u8 bssid[6];
u8 *req_ies;
@@ -124,7 +89,7 @@ struct disconnect_info {
s32 wilc_parse_network_info(u8 *msg_buffer,
struct network_info **ret_network_info);
s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
- struct connect_resp_info **ret_connect_resp_info);
+ struct connect_info *ret_conn_info);
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 885f5fcead77..0aaae33f97b9 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1,18 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/kthread.h>
-#include <linux/delay.h>
-#include <linux/completion.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
-#include "host_interface.h"
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include "coreconfigurator.h"
-#include "wilc_wlan.h"
-#include "wilc_wlan_if.h"
-#include <linux/etherdevice.h>
#include "wilc_wfi_netdevice.h"
#define HOST_IF_MSG_SCAN 0
@@ -53,14 +39,13 @@
#define HOST_IF_SCAN_TIMEOUT 4000
#define HOST_IF_CONNECT_TIMEOUT 9500
-#define BA_SESSION_DEFAULT_BUFFER_SIZE 16
-#define BA_SESSION_DEFAULT_TIMEOUT 1000
-#define BLOCK_ACK_REQ_SIZE 0x14
#define FALSE_FRMWR_CHANNEL 100
#define TCP_ACK_FILTER_LINK_SPEED_THRESH 54
#define DEFAULT_LINK_SPEED 72
+#define REAL_JOIN_REQ 0
+
struct host_if_wpa_attr {
u8 *key;
const u8 *mac_addr;
@@ -232,7 +217,7 @@ struct join_bss_param {
static struct host_if_drv *terminated_handle;
bool wilc_optaining_ip;
-static u8 P2P_LISTEN_STATE;
+static u8 p2p_listen_state;
static struct workqueue_struct *hif_workqueue;
static struct completion hif_thread_comp;
static struct completion hif_driver_comp;
@@ -245,19 +230,12 @@ u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
static u8 rcv_assoc_resp[MAX_ASSOC_RESP_FRAME_SIZE];
-static bool scan_while_connected;
-
static s8 rssi;
static u8 set_ip[2][4];
static u8 get_ip[2][4];
static u32 inactive_time;
-static u8 del_beacon;
static u32 clients_count;
-#define REAL_JOIN_REQ 0
-#define FLUSHED_JOIN_REQ 1
-#define FLUSHED_BYTE_POS 79
-
static void *host_int_parse_join_bss_param(struct network_info *info);
static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
static s32 handle_scan_done(struct wilc_vif *vif, enum scan_event evt);
@@ -599,7 +577,7 @@ static void handle_cfg_param(struct wilc_vif *vif, struct cfg_param_attr *param)
wid_list[i].size = sizeof(char);
hif_drv->cfg_values.preamble_type = preamble_type;
} else {
- netdev_err(vif->ndev, "Preamle Range(0~2) over\n");
+ netdev_err(vif->ndev, "Preamble Range(0~2) over\n");
goto unlock;
}
i++;
@@ -842,11 +820,6 @@ static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
wid_list[index].val = (s8 *)&scan_info->src;
index++;
- if (hif_drv->hif_state == HOST_IF_CONNECTED)
- scan_while_connected = true;
- else if (hif_drv->hif_state == HOST_IF_IDLE)
- scan_while_connected = false;
-
result = wilc_send_config_pkt(vif, SET_CFG, wid_list,
index,
wilc_get_vif_idx(vif));
@@ -1144,10 +1117,8 @@ error:
}
conn_attr->result(CONN_DISCONN_EVENT_CONN_RESP,
- &conn_info,
- MAC_DISCONNECTED,
- NULL,
- conn_attr->arg);
+ &conn_info, MAC_STATUS_DISCONNECTED,
+ NULL, conn_attr->arg);
hif_drv->hif_state = HOST_IF_IDLE;
kfree(conn_info.req_ies);
conn_info.req_ies = NULL;
@@ -1185,8 +1156,6 @@ static s32 handle_connect_timeout(struct wilc_vif *vif)
hif_drv->hif_state = HOST_IF_IDLE;
- scan_while_connected = false;
-
memset(&info, 0, sizeof(struct connect_info));
if (hif_drv->usr_conn_req.conn_result) {
@@ -1197,15 +1166,16 @@ static s32 handle_connect_timeout(struct wilc_vif *vif)
if (hif_drv->usr_conn_req.ies) {
info.req_ies_len = hif_drv->usr_conn_req.ies_len;
- info.req_ies = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL);
- memcpy(info.req_ies,
- hif_drv->usr_conn_req.ies,
- hif_drv->usr_conn_req.ies_len);
+ info.req_ies = kmemdup(hif_drv->usr_conn_req.ies,
+ hif_drv->usr_conn_req.ies_len,
+ GFP_KERNEL);
+ if (!info.req_ies)
+ return -ENOMEM;
}
hif_drv->usr_conn_req.conn_result(CONN_DISCONN_EVENT_CONN_RESP,
&info,
- MAC_DISCONNECTED,
+ MAC_STATUS_DISCONNECTED,
NULL,
hif_drv->usr_conn_req.arg);
@@ -1327,62 +1297,47 @@ static inline void host_int_free_user_conn_req(struct host_if_drv *hif_drv)
static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
u8 mac_status)
{
- struct connect_resp_info *connect_resp_info = NULL;
struct connect_info conn_info;
struct host_if_drv *hif_drv = vif->hif_drv;
memset(&conn_info, 0, sizeof(struct connect_info));
- if (mac_status == MAC_CONNECTED) {
- u32 rcvd_assoc_resp_info_len;
+ if (mac_status == MAC_STATUS_CONNECTED) {
+ u32 assoc_resp_info_len;
memset(rcv_assoc_resp, 0, MAX_ASSOC_RESP_FRAME_SIZE);
host_int_get_assoc_res_info(vif, rcv_assoc_resp,
MAX_ASSOC_RESP_FRAME_SIZE,
- &rcvd_assoc_resp_info_len);
+ &assoc_resp_info_len);
- if (rcvd_assoc_resp_info_len != 0) {
+ if (assoc_resp_info_len != 0) {
s32 err = 0;
- err = wilc_parse_assoc_resp_info(rcv_assoc_resp, rcvd_assoc_resp_info_len,
- &connect_resp_info);
- if (err) {
+ err = wilc_parse_assoc_resp_info(rcv_assoc_resp,
+ assoc_resp_info_len,
+ &conn_info);
+ if (err)
netdev_err(vif->ndev,
"wilc_parse_assoc_resp_info() returned error %d\n",
err);
- } else {
- conn_info.status = connect_resp_info->status;
-
- if (conn_info.status == SUCCESSFUL_STATUSCODE &&
- connect_resp_info->ies) {
- conn_info.resp_ies = kmemdup(connect_resp_info->ies,
- connect_resp_info->ies_len,
- GFP_KERNEL);
- if (conn_info.resp_ies)
- conn_info.resp_ies_len = connect_resp_info->ies_len;
- }
-
- kfree(connect_resp_info->ies);
- kfree(connect_resp_info);
- }
}
}
- if (mac_status == MAC_CONNECTED &&
+ if (mac_status == MAC_STATUS_CONNECTED &&
conn_info.status != SUCCESSFUL_STATUSCODE) {
netdev_err(vif->ndev,
- "Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n");
+ "Received MAC status is MAC_STATUS_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n");
eth_zero_addr(wilc_connected_ssid);
- } else if (mac_status == MAC_DISCONNECTED) {
- netdev_err(vif->ndev, "Received MAC status is MAC_DISCONNECTED\n");
+ } else if (mac_status == MAC_STATUS_DISCONNECTED) {
+ netdev_err(vif->ndev, "Received MAC status is MAC_STATUS_DISCONNECTED\n");
eth_zero_addr(wilc_connected_ssid);
}
if (hif_drv->usr_conn_req.bssid) {
memcpy(conn_info.bssid, hif_drv->usr_conn_req.bssid, 6);
- if (mac_status == MAC_CONNECTED &&
+ if (mac_status == MAC_STATUS_CONNECTED &&
conn_info.status == SUCCESSFUL_STATUSCODE) {
memcpy(hif_drv->assoc_bssid,
hif_drv->usr_conn_req.bssid, ETH_ALEN);
@@ -1402,7 +1357,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
&conn_info, mac_status, NULL,
hif_drv->usr_conn_req.arg);
- if (mac_status == MAC_CONNECTED &&
+ if (mac_status == MAC_STATUS_CONNECTED &&
conn_info.status == SUCCESSFUL_STATUSCODE) {
wilc_set_power_mgmt(vif, 0, 0);
@@ -1413,7 +1368,6 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
jiffies + msecs_to_jiffies(10000));
} else {
hif_drv->hif_state = HOST_IF_IDLE;
- scan_while_connected = false;
}
kfree(conn_info.resp_ies);
@@ -1428,6 +1382,7 @@ static inline void host_int_handle_disconnect(struct wilc_vif *vif)
{
struct disconnect_info disconn_info;
struct host_if_drv *hif_drv = vif->hif_drv;
+ wilc_connect_result conn_result = hif_drv->usr_conn_req.conn_result;
memset(&disconn_info, 0, sizeof(struct disconnect_info));
@@ -1440,13 +1395,12 @@ static inline void host_int_handle_disconnect(struct wilc_vif *vif)
disconn_info.ie = NULL;
disconn_info.ie_len = 0;
- if (hif_drv->usr_conn_req.conn_result) {
+ if (conn_result) {
wilc_optaining_ip = false;
wilc_set_power_mgmt(vif, 0, 0);
- hif_drv->usr_conn_req.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF,
- NULL, 0, &disconn_info,
- hif_drv->usr_conn_req.arg);
+ conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, NULL, 0,
+ &disconn_info, hif_drv->usr_conn_req.arg);
} else {
netdev_err(vif->ndev, "Connect result NULL\n");
}
@@ -1455,7 +1409,6 @@ static inline void host_int_handle_disconnect(struct wilc_vif *vif)
host_int_free_user_conn_req(hif_drv);
hif_drv->hif_state = HOST_IF_IDLE;
- scan_while_connected = false;
}
static s32 handle_rcvd_gnrl_async_info(struct wilc_vif *vif,
@@ -1463,13 +1416,7 @@ static s32 handle_rcvd_gnrl_async_info(struct wilc_vif *vif,
{
s32 result = 0;
u8 msg_type = 0;
- u8 msg_id = 0;
- u16 msg_len = 0;
- u16 wid_id = (u16)WID_NIL;
- u8 wid_len = 0;
u8 mac_status;
- u8 mac_status_reason_code;
- u8 mac_status_additional_info;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!rcvd_info->buffer) {
@@ -1503,19 +1450,13 @@ static s32 handle_rcvd_gnrl_async_info(struct wilc_vif *vif,
return -EFAULT;
}
- msg_id = rcvd_info->buffer[1];
- msg_len = MAKE_WORD16(rcvd_info->buffer[2], rcvd_info->buffer[3]);
- wid_id = MAKE_WORD16(rcvd_info->buffer[4], rcvd_info->buffer[5]);
- wid_len = rcvd_info->buffer[6];
mac_status = rcvd_info->buffer[7];
- mac_status_reason_code = rcvd_info->buffer[8];
- mac_status_additional_info = rcvd_info->buffer[9];
if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
host_int_parse_assoc_resp_info(vif, mac_status);
- } else if ((mac_status == MAC_DISCONNECTED) &&
+ } else if ((mac_status == MAC_STATUS_DISCONNECTED) &&
(hif_drv->hif_state == HOST_IF_CONNECTED)) {
host_int_handle_disconnect(vif);
- } else if ((mac_status == MAC_DISCONNECTED) &&
+ } else if ((mac_status == MAC_STATUS_DISCONNECTED) &&
(hif_drv->usr_scan_req.scan_result)) {
del_timer(&hif_drv->scan_timer);
if (hif_drv->usr_scan_req.scan_result)
@@ -1529,15 +1470,46 @@ static s32 handle_rcvd_gnrl_async_info(struct wilc_vif *vif,
return result;
}
+static int wilc_pmksa_key_copy(struct wilc_vif *vif, struct key_attr *hif_key)
+{
+ int i;
+ int ret;
+ struct wid wid;
+ u8 *key_buf;
+
+ key_buf = kmalloc((hif_key->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1,
+ GFP_KERNEL);
+ if (!key_buf)
+ return -ENOMEM;
+
+ key_buf[0] = hif_key->attr.pmkid.numpmkid;
+
+ for (i = 0; i < hif_key->attr.pmkid.numpmkid; i++) {
+ memcpy(key_buf + ((PMKSA_KEY_LEN * i) + 1),
+ hif_key->attr.pmkid.pmkidlist[i].bssid, ETH_ALEN);
+ memcpy(key_buf + ((PMKSA_KEY_LEN * i) + ETH_ALEN + 1),
+ hif_key->attr.pmkid.pmkidlist[i].pmkid, PMKID_LEN);
+ }
+
+ wid.id = (u16)WID_PMKID_INFO;
+ wid.type = WID_STR;
+ wid.val = (s8 *)key_buf;
+ wid.size = (hif_key->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1;
+
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
+
+ kfree(key_buf);
+
+ return ret;
+}
+
static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
{
- s32 result = 0;
+ int result = 0;
struct wid wid;
struct wid wid_list[5];
- u8 i;
u8 *key_buf;
- s8 s8idxarray[1];
- s8 ret = 0;
struct host_if_drv *hif_drv = vif->hif_drv;
switch (hif_key->type) {
@@ -1556,8 +1528,10 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
key_buf = kmalloc(hif_key->attr.wep.key_len + 2,
GFP_KERNEL);
- if (!key_buf)
- return -ENOMEM;
+ if (!key_buf) {
+ result = -ENOMEM;
+ goto out_wep;
+ }
key_buf[0] = hif_key->attr.wep.index;
key_buf[1] = hif_key->attr.wep.key_len;
@@ -1577,9 +1551,12 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
wilc_get_vif_idx(vif));
kfree(key_buf);
} else if (hif_key->action & ADDKEY) {
- key_buf = kmalloc(hif_key->attr.wep.key_len + 2, GFP_KERNEL);
- if (!key_buf)
- return -ENOMEM;
+ key_buf = kmalloc(hif_key->attr.wep.key_len + 2,
+ GFP_KERNEL);
+ if (!key_buf) {
+ result = -ENOMEM;
+ goto out_wep;
+ }
key_buf[0] = hif_key->attr.wep.index;
memcpy(key_buf + 1, &hif_key->attr.wep.key_len, 1);
memcpy(key_buf + 2, hif_key->attr.wep.key,
@@ -1599,8 +1576,7 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
wid.id = (u16)WID_REMOVE_WEP_KEY;
wid.type = WID_STR;
- s8idxarray[0] = (s8)hif_key->attr.wep.index;
- wid.val = s8idxarray;
+ wid.val = (s8 *)&hif_key->attr.wep.index;
wid.size = 1;
result = wilc_send_config_pkt(vif, SET_CFG,
@@ -1616,6 +1592,7 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
&wid, 1,
wilc_get_vif_idx(vif));
}
+out_wep:
complete(&hif_drv->comp_test_key_block);
break;
@@ -1623,7 +1600,7 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
if (hif_key->action & ADDKEY_AP) {
key_buf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL);
if (!key_buf) {
- ret = -ENOMEM;
+ result = -ENOMEM;
goto out_wpa_rx_gtk;
}
@@ -1650,11 +1627,10 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
wilc_get_vif_idx(vif));
kfree(key_buf);
- complete(&hif_drv->comp_test_key_block);
} else if (hif_key->action & ADDKEY) {
key_buf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL);
if (!key_buf) {
- ret = -ENOMEM;
+ result = -ENOMEM;
goto out_wpa_rx_gtk;
}
@@ -1679,21 +1655,18 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
wilc_get_vif_idx(vif));
kfree(key_buf);
- complete(&hif_drv->comp_test_key_block);
}
out_wpa_rx_gtk:
+ complete(&hif_drv->comp_test_key_block);
kfree(hif_key->attr.wpa.key);
kfree(hif_key->attr.wpa.seq);
- if (ret)
- return ret;
-
break;
case WPA_PTK:
if (hif_key->action & ADDKEY_AP) {
key_buf = kmalloc(PTK_KEY_MSG_LEN + 1, GFP_KERNEL);
if (!key_buf) {
- ret = -ENOMEM;
+ result = -ENOMEM;
goto out_wpa_ptk;
}
@@ -1717,12 +1690,10 @@ out_wpa_rx_gtk:
wid_list, 2,
wilc_get_vif_idx(vif));
kfree(key_buf);
- complete(&hif_drv->comp_test_key_block);
} else if (hif_key->action & ADDKEY) {
key_buf = kmalloc(PTK_KEY_MSG_LEN, GFP_KERNEL);
if (!key_buf) {
- netdev_err(vif->ndev, "No buffer send PTK\n");
- ret = -ENOMEM;
+ result = -ENOMEM;
goto out_wpa_ptk;
}
@@ -1740,37 +1711,15 @@ out_wpa_rx_gtk:
&wid, 1,
wilc_get_vif_idx(vif));
kfree(key_buf);
- complete(&hif_drv->comp_test_key_block);
}
out_wpa_ptk:
+ complete(&hif_drv->comp_test_key_block);
kfree(hif_key->attr.wpa.key);
- if (ret)
- return ret;
-
break;
case PMKSA:
- key_buf = kmalloc((hif_key->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1, GFP_KERNEL);
- if (!key_buf)
- return -ENOMEM;
-
- key_buf[0] = hif_key->attr.pmkid.numpmkid;
-
- for (i = 0; i < hif_key->attr.pmkid.numpmkid; i++) {
- memcpy(key_buf + ((PMKSA_KEY_LEN * i) + 1), hif_key->attr.pmkid.pmkidlist[i].bssid, ETH_ALEN);
- memcpy(key_buf + ((PMKSA_KEY_LEN * i) + ETH_ALEN + 1), hif_key->attr.pmkid.pmkidlist[i].pmkid, PMKID_LEN);
- }
-
- wid.id = (u16)WID_PMKID_INFO;
- wid.type = WID_STR;
- wid.val = (s8 *)key_buf;
- wid.size = (hif_key->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1;
-
- result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
-
- kfree(key_buf);
+ result = wilc_pmksa_key_copy(vif, hif_key);
break;
}
@@ -1833,8 +1782,6 @@ static void handle_disconnect(struct wilc_vif *vif)
netdev_err(vif->ndev, "conn_result = NULL\n");
}
- scan_while_connected = false;
-
hif_drv->hif_state = HOST_IF_IDLE;
eth_zero_addr(hif_drv->assoc_bssid);
@@ -2035,18 +1982,13 @@ static void handle_del_beacon(struct wilc_vif *vif)
{
s32 result = 0;
struct wid wid;
- u8 *cur_byte;
+ u8 del_beacon = 0;
wid.id = (u16)WID_DEL_BEACON;
wid.type = WID_CHAR;
wid.size = sizeof(char);
wid.val = &del_beacon;
- if (!wid.val)
- return;
-
- cur_byte = wid.val;
-
result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
if (result)
@@ -2156,7 +2098,6 @@ static void handle_del_station(struct wilc_vif *vif, struct del_sta *param)
{
s32 result = 0;
struct wid wid;
- u8 *cur_byte;
wid.id = (u16)WID_REMOVE_STA;
wid.type = WID_BIN;
@@ -2166,9 +2107,7 @@ static void handle_del_station(struct wilc_vif *vif, struct del_sta *param)
if (!wid.val)
goto error;
- cur_byte = wid.val;
-
- ether_addr_copy(cur_byte, param->mac_addr);
+ ether_addr_copy(wid.val, param->mac_addr);
result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
@@ -2260,19 +2199,16 @@ static int handle_remain_on_chan(struct wilc_vif *vif,
netdev_err(vif->ndev, "Failed to set remain on channel\n");
error:
- {
- P2P_LISTEN_STATE = 1;
- hif_drv->remain_on_ch_timer_vif = vif;
- mod_timer(&hif_drv->remain_on_ch_timer,
- jiffies +
- msecs_to_jiffies(hif_remain_ch->duration));
+ p2p_listen_state = 1;
+ hif_drv->remain_on_ch_timer_vif = vif;
+ mod_timer(&hif_drv->remain_on_ch_timer,
+ jiffies + msecs_to_jiffies(hif_remain_ch->duration));
- if (hif_drv->remain_on_ch.ready)
- hif_drv->remain_on_ch.ready(hif_drv->remain_on_ch.arg);
+ if (hif_drv->remain_on_ch.ready)
+ hif_drv->remain_on_ch.ready(hif_drv->remain_on_ch.arg);
- if (hif_drv->remain_on_ch_pending)
- hif_drv->remain_on_ch_pending = 0;
- }
+ if (hif_drv->remain_on_ch_pending)
+ hif_drv->remain_on_ch_pending = 0;
return result;
}
@@ -2317,7 +2253,7 @@ static u32 handle_listen_state_expired(struct wilc_vif *vif,
s32 result = 0;
struct host_if_drv *hif_drv = vif->hif_drv;
- if (P2P_LISTEN_STATE) {
+ if (p2p_listen_state) {
remain_on_chan_flag = false;
wid.id = (u16)WID_REMAIN_ON_CHAN;
wid.type = WID_STR;
@@ -2335,20 +2271,19 @@ static u32 handle_listen_state_expired(struct wilc_vif *vif,
kfree(wid.val);
if (result != 0) {
netdev_err(vif->ndev, "Failed to set remain channel\n");
- goto _done_;
+ return result;
}
if (hif_drv->remain_on_ch.expired) {
hif_drv->remain_on_ch.expired(hif_drv->remain_on_ch.arg,
hif_remain_ch->id);
}
- P2P_LISTEN_STATE = 0;
+ p2p_listen_state = 0;
} else {
netdev_dbg(vif->ndev, "Not in listen state\n");
result = -EFAULT;
}
-_done_:
return result;
}
@@ -2664,18 +2599,6 @@ static void timer_connect_cb(struct timer_list *t)
wilc_enqueue_cmd(&msg);
}
-s32 wilc_remove_key(struct host_if_drv *hif_drv, const u8 *sta_addr)
-{
- struct wid wid;
-
- wid.id = (u16)WID_REMOVE_KEY;
- wid.type = WID_STR;
- wid.val = (s8 *)sta_addr;
- wid.size = 6;
-
- return 0;
-}
-
int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
{
int result = 0;
@@ -3335,7 +3258,6 @@ static void get_periodic_rssi(struct timer_list *unused)
int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
{
- int result = 0;
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
struct wilc *wilc;
@@ -3344,15 +3266,12 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
vif = netdev_priv(dev);
wilc = vif->wilc;
- scan_while_connected = false;
-
init_completion(&hif_wait_response);
hif_drv = kzalloc(sizeof(*hif_drv), GFP_KERNEL);
- if (!hif_drv) {
- result = -ENOMEM;
- goto _fail_;
- }
+ if (!hif_drv)
+ return -ENOMEM;
+
*hif_drv_handler = hif_drv;
for (i = 0; i < wilc->vif_num; i++)
if (dev == wilc->vif[i]->ndev) {
@@ -3363,7 +3282,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
wilc_optaining_ip = false;
- if (clients_count == 0) {
+ if (clients_count == 0) {
init_completion(&hif_thread_comp);
init_completion(&hif_driver_comp);
mutex_init(&hif_deinit_lock);
@@ -3374,12 +3293,12 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
init_completion(&hif_drv->comp_get_rssi);
init_completion(&hif_drv->comp_inactive_time);
- if (clients_count == 0) {
+ if (clients_count == 0) {
hif_workqueue = create_singlethread_workqueue("WILC_wq");
if (!hif_workqueue) {
netdev_err(vif->ndev, "Failed to create workqueue\n");
- result = -ENOMEM;
- goto _fail_;
+ kfree(hif_drv);
+ return -ENOMEM;
}
periodic_rssi_vif = vif;
@@ -3407,8 +3326,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
clients_count++;
-_fail_:
- return result;
+ return 0;
}
int wilc_deinit(struct wilc_vif *vif)
@@ -3443,8 +3361,6 @@ int wilc_deinit(struct wilc_vif *vif)
hif_drv->hif_state = HOST_IF_IDLE;
- scan_while_connected = false;
-
memset(&msg, 0, sizeof(struct host_if_msg));
if (clients_count == 1) {
@@ -3895,150 +3811,167 @@ int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
return result;
}
-static void *host_int_parse_join_bss_param(struct network_info *info)
+static void host_int_fill_join_bss_param(struct join_bss_param *param, u8 *ies,
+ u16 *out_index, u8 *pcipher_tc,
+ u8 *auth_total_cnt, u32 tsf_lo,
+ u8 *rates_no)
{
- struct join_bss_param *param = NULL;
- u8 *ies;
- u16 ies_len;
- u16 index = 0;
- u8 rates_no = 0;
u8 ext_rates_no;
u16 offset;
u8 pcipher_cnt;
u8 auth_cnt;
- u8 pcipher_total_cnt = 0;
- u8 auth_total_cnt = 0;
u8 i, j;
+ u16 index = *out_index;
- ies = info->ies;
- ies_len = info->ies_len;
+ if (ies[index] == SUPP_RATES_IE) {
+ *rates_no = ies[index + 1];
+ param->supp_rates[0] = *rates_no;
+ index += 2;
- param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (!param)
- return NULL;
+ for (i = 0; i < *rates_no; i++)
+ param->supp_rates[i + 1] = ies[index + i];
- param->dtim_period = info->dtim_period;
- param->beacon_period = info->beacon_period;
- param->cap_info = info->cap_info;
- memcpy(param->bssid, info->bssid, 6);
- memcpy((u8 *)param->ssid, info->ssid, info->ssid_len + 1);
- param->ssid_len = info->ssid_len;
- memset(param->rsn_pcip_policy, 0xFF, 3);
- memset(param->rsn_auth_policy, 0xFF, 3);
+ index += *rates_no;
+ } else if (ies[index] == EXT_SUPP_RATES_IE) {
+ ext_rates_no = ies[index + 1];
+ if (ext_rates_no > (MAX_RATES_SUPPORTED - *rates_no))
+ param->supp_rates[0] = MAX_RATES_SUPPORTED;
+ else
+ param->supp_rates[0] += ext_rates_no;
+ index += 2;
+ for (i = 0; i < (param->supp_rates[0] - *rates_no); i++)
+ param->supp_rates[*rates_no + i + 1] = ies[index + i];
+
+ index += ext_rates_no;
+ } else if (ies[index] == HT_CAPABILITY_IE) {
+ param->ht_capable = true;
+ index += ies[index + 1] + 2;
+ } else if ((ies[index] == WMM_IE) &&
+ (ies[index + 2] == 0x00) && (ies[index + 3] == 0x50) &&
+ (ies[index + 4] == 0xF2) && (ies[index + 5] == 0x02) &&
+ ((ies[index + 6] == 0x00) || (ies[index + 6] == 0x01)) &&
+ (ies[index + 7] == 0x01)) {
+ param->wmm_cap = true;
+
+ if (ies[index + 8] & BIT(7))
+ param->uapsd_cap = true;
+ index += ies[index + 1] + 2;
+ } else if ((ies[index] == P2P_IE) &&
+ (ies[index + 2] == 0x50) && (ies[index + 3] == 0x6f) &&
+ (ies[index + 4] == 0x9a) &&
+ (ies[index + 5] == 0x09) && (ies[index + 6] == 0x0c)) {
+ u16 p2p_cnt;
+
+ param->tsf = tsf_lo;
+ param->noa_enabled = 1;
+ param->idx = ies[index + 9];
+
+ if (ies[index + 10] & BIT(7)) {
+ param->opp_enabled = 1;
+ param->ct_window = ies[index + 10];
+ } else {
+ param->opp_enabled = 0;
+ }
- while (index < ies_len) {
- if (ies[index] == SUPP_RATES_IE) {
- rates_no = ies[index + 1];
- param->supp_rates[0] = rates_no;
- index += 2;
+ param->cnt = ies[index + 11];
+ p2p_cnt = index + 12;
- for (i = 0; i < rates_no; i++)
- param->supp_rates[i + 1] = ies[index + i];
+ memcpy(param->duration, ies + p2p_cnt, 4);
+ p2p_cnt += 4;
- index += rates_no;
- } else if (ies[index] == EXT_SUPP_RATES_IE) {
- ext_rates_no = ies[index + 1];
- if (ext_rates_no > (MAX_RATES_SUPPORTED - rates_no))
- param->supp_rates[0] = MAX_RATES_SUPPORTED;
- else
- param->supp_rates[0] += ext_rates_no;
- index += 2;
- for (i = 0; i < (param->supp_rates[0] - rates_no); i++)
- param->supp_rates[rates_no + i + 1] = ies[index + i];
-
- index += ext_rates_no;
- } else if (ies[index] == HT_CAPABILITY_IE) {
- param->ht_capable = true;
- index += ies[index + 1] + 2;
- } else if ((ies[index] == WMM_IE) &&
- (ies[index + 2] == 0x00) && (ies[index + 3] == 0x50) &&
- (ies[index + 4] == 0xF2) &&
- (ies[index + 5] == 0x02) &&
- ((ies[index + 6] == 0x00) || (ies[index + 6] == 0x01)) &&
- (ies[index + 7] == 0x01)) {
- param->wmm_cap = true;
-
- if (ies[index + 8] & BIT(7))
- param->uapsd_cap = true;
- index += ies[index + 1] + 2;
- } else if ((ies[index] == P2P_IE) &&
- (ies[index + 2] == 0x50) && (ies[index + 3] == 0x6f) &&
- (ies[index + 4] == 0x9a) &&
- (ies[index + 5] == 0x09) && (ies[index + 6] == 0x0c)) {
- u16 p2p_cnt;
-
- param->tsf = info->tsf_lo;
- param->noa_enabled = 1;
- param->idx = ies[index + 9];
-
- if (ies[index + 10] & BIT(7)) {
- param->opp_enabled = 1;
- param->ct_window = ies[index + 10];
- } else {
- param->opp_enabled = 0;
- }
+ memcpy(param->interval, ies + p2p_cnt, 4);
+ p2p_cnt += 4;
- param->cnt = ies[index + 11];
- p2p_cnt = index + 12;
+ memcpy(param->start_time, ies + p2p_cnt, 4);
- memcpy(param->duration, ies + p2p_cnt, 4);
- p2p_cnt += 4;
+ index += ies[index + 1] + 2;
+ } else if ((ies[index] == RSN_IE) ||
+ ((ies[index] == WPA_IE) && (ies[index + 2] == 0x00) &&
+ (ies[index + 3] == 0x50) && (ies[index + 4] == 0xF2) &&
+ (ies[index + 5] == 0x01))) {
+ u16 rsn_idx = index;
- memcpy(param->interval, ies + p2p_cnt, 4);
- p2p_cnt += 4;
+ if (ies[rsn_idx] == RSN_IE) {
+ param->mode_802_11i = 2;
+ } else {
+ if (param->mode_802_11i == 0)
+ param->mode_802_11i = 1;
+ rsn_idx += 4;
+ }
- memcpy(param->start_time, ies + p2p_cnt, 4);
+ rsn_idx += 7;
+ param->rsn_grp_policy = ies[rsn_idx];
+ rsn_idx++;
+ offset = ies[rsn_idx] * 4;
+ pcipher_cnt = (ies[rsn_idx] > 3) ? 3 : ies[rsn_idx];
+ rsn_idx += 2;
- index += ies[index + 1] + 2;
- } else if ((ies[index] == RSN_IE) ||
- ((ies[index] == WPA_IE) && (ies[index + 2] == 0x00) &&
- (ies[index + 3] == 0x50) && (ies[index + 4] == 0xF2) &&
- (ies[index + 5] == 0x01))) {
- u16 rsn_idx = index;
+ i = *pcipher_tc;
+ j = 0;
+ for (; i < (pcipher_cnt + *pcipher_tc) && i < 3; i++, j++) {
+ u8 *policy = &param->rsn_pcip_policy[i];
- if (ies[rsn_idx] == RSN_IE) {
- param->mode_802_11i = 2;
- } else {
- if (param->mode_802_11i == 0)
- param->mode_802_11i = 1;
- rsn_idx += 4;
- }
+ *policy = ies[rsn_idx + ((j + 1) * 4) - 1];
+ }
- rsn_idx += 7;
- param->rsn_grp_policy = ies[rsn_idx];
- rsn_idx++;
- offset = ies[rsn_idx] * 4;
- pcipher_cnt = (ies[rsn_idx] > 3) ? 3 : ies[rsn_idx];
- rsn_idx += 2;
+ *pcipher_tc += pcipher_cnt;
+ rsn_idx += offset;
+
+ offset = ies[rsn_idx] * 4;
- for (i = pcipher_total_cnt, j = 0; i < pcipher_cnt + pcipher_total_cnt && i < 3; i++, j++)
- param->rsn_pcip_policy[i] = ies[rsn_idx + ((j + 1) * 4) - 1];
+ auth_cnt = (ies[rsn_idx] > 3) ? 3 : ies[rsn_idx];
+ rsn_idx += 2;
+ i = *auth_total_cnt;
+ j = 0;
+ for (; i < (*auth_total_cnt + auth_cnt); i++, j++) {
+ u8 *policy = &param->rsn_auth_policy[i];
- pcipher_total_cnt += pcipher_cnt;
- rsn_idx += offset;
+ *policy = ies[rsn_idx + ((j + 1) * 4) - 1];
+ }
- offset = ies[rsn_idx] * 4;
+ *auth_total_cnt += auth_cnt;
+ rsn_idx += offset;
- auth_cnt = (ies[rsn_idx] > 3) ? 3 : ies[rsn_idx];
+ if (ies[index] == RSN_IE) {
+ param->rsn_cap[0] = ies[rsn_idx];
+ param->rsn_cap[1] = ies[rsn_idx + 1];
rsn_idx += 2;
+ }
+ param->rsn_found = true;
+ index += ies[index + 1] + 2;
+ } else {
+ index += ies[index + 1] + 2;
+ }
+
+ *out_index = index;
+}
+
+static void *host_int_parse_join_bss_param(struct network_info *info)
+{
+ struct join_bss_param *param = NULL;
+ u16 index = 0;
+ u8 rates_no = 0;
+ u8 pcipher_total_cnt = 0;
+ u8 auth_total_cnt = 0;
- for (i = auth_total_cnt, j = 0; i < auth_total_cnt + auth_cnt; i++, j++)
- param->rsn_auth_policy[i] = ies[rsn_idx + ((j + 1) * 4) - 1];
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param)
+ return NULL;
- auth_total_cnt += auth_cnt;
- rsn_idx += offset;
+ param->dtim_period = info->dtim_period;
+ param->beacon_period = info->beacon_period;
+ param->cap_info = info->cap_info;
+ memcpy(param->bssid, info->bssid, 6);
+ memcpy((u8 *)param->ssid, info->ssid, info->ssid_len + 1);
+ param->ssid_len = info->ssid_len;
+ memset(param->rsn_pcip_policy, 0xFF, 3);
+ memset(param->rsn_auth_policy, 0xFF, 3);
- if (ies[index] == RSN_IE) {
- param->rsn_cap[0] = ies[rsn_idx];
- param->rsn_cap[1] = ies[rsn_idx + 1];
- rsn_idx += 2;
- }
- param->rsn_found = true;
- index += ies[index + 1] + 2;
- } else {
- index += ies[index + 1] + 2;
- }
- }
+ while (index < info->ies_len)
+ host_int_fill_join_bss_param(param, info->ies, &index,
+ &pcipher_total_cnt,
+ &auth_total_cnt, info->tsf_lo,
+ &rates_no);
return (void *)param;
}
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 4b60b1822e91..068b587a9df4 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -49,7 +49,6 @@
#define PMKID_LEN 16
#define WILC_MAX_NUM_PMKIDS 16
#define WILC_ADD_STA_LENGTH 40
-#define SCAN_EVENT_DONE_ABORTED
#define NUM_CONCURRENT_IFC 2
#define DRV_HANDLER_SIZE 5
#define DRV_HANDLER_MASK 0x000000FF
@@ -272,7 +271,7 @@ struct host_if_drv {
u8 assoc_bssid[ETH_ALEN];
struct cfg_param_attr cfg_values;
-
+ /*lock to protect concurrent setting of cfg params*/
struct mutex cfg_values_lock;
struct completion comp_test_key_block;
struct completion comp_test_disconn_block;
@@ -304,7 +303,6 @@ struct add_sta_param {
};
struct wilc_vif;
-s32 wilc_remove_key(struct host_if_drv *hif_drv, const u8 *sta_addr);
int wilc_remove_wep_key(struct wilc_vif *vif, u8 index);
int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index);
int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
@@ -315,7 +313,7 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
const u8 *mac_addr, const u8 *rx_mic, const u8 *tx_mic,
u8 mode, u8 cipher_mode, u8 index);
s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
- u32 *pu32InactiveTime);
+ u32 *out_val);
int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
u8 index, u32 key_rsc_len, const u8 *key_rsc,
const u8 *rx_mic, const u8 *tx_mic, u8 mode,
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 169213f24faf..1c7e6e15809c 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -1,15 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*!
- * @file linux_mon.c
- * @brief File Operations OS wrapper functionality
- * @author mdaftedar
- * @sa wilc_wfi_netdevice.h
- * @date 01 MAR 2012
- * @version 1.0
- */
#include "wilc_wfi_cfgoperations.h"
-#include "wilc_wlan_if.h"
-#include "wilc_wlan.h"
struct wilc_wfi_radiotap_hdr {
struct ieee80211_radiotap_header hdr;
@@ -27,22 +17,15 @@ static struct net_device *wilc_wfi_mon; /* global monitor netdev */
static u8 srcadd[6];
static u8 bssid[6];
-static u8 broadcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-/**
- * @brief WILC_WFI_monitor_rx
- * @details
- * @param[in]
- * @return int : Return 0 on Success
- * @author mdaftedar
- * @date 12 JUL 2012
- * @version 1.0
- */
#define IEEE80211_RADIOTAP_F_TX_RTS 0x0004 /* used rts/cts handshake */
#define IEEE80211_RADIOTAP_F_TX_FAIL 0x0001 /* failed due to excessive*/
#define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff)
-void WILC_WFI_monitor_rx(u8 *buff, u32 size)
+#define TX_RADIOTAP_PRESENT ((1 << IEEE80211_RADIOTAP_RATE) | \
+ (1 << IEEE80211_RADIOTAP_TX_FLAGS))
+
+void wilc_wfi_monitor_rx(u8 *buff, u32 size)
{
u32 header, pkt_offset;
struct sk_buff *skb = NULL;
@@ -66,22 +49,20 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
/* hostapd callback mgmt frame */
- skb = dev_alloc_skb(size + sizeof(struct wilc_wfi_radiotap_cb_hdr));
+ skb = dev_alloc_skb(size + sizeof(*cb_hdr));
if (!skb)
return;
skb_put_data(skb, buff, size);
cb_hdr = skb_push(skb, sizeof(*cb_hdr));
- memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr));
+ memset(cb_hdr, 0, sizeof(*cb_hdr));
cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
- cb_hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_cb_hdr));
+ cb_hdr->hdr.it_len = cpu_to_le16(sizeof(*cb_hdr));
- cb_hdr->hdr.it_present = cpu_to_le32(
- (1 << IEEE80211_RADIOTAP_RATE) |
- (1 << IEEE80211_RADIOTAP_TX_FLAGS));
+ cb_hdr->hdr.it_present = cpu_to_le32(TX_RADIOTAP_PRESENT);
cb_hdr->rate = 5; /* txrate->bitrate / 5; */
@@ -93,7 +74,7 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
}
} else {
- skb = dev_alloc_skb(size + sizeof(struct wilc_wfi_radiotap_hdr));
+ skb = dev_alloc_skb(size + sizeof(*hdr));
if (!skb)
return;
@@ -102,7 +83,7 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
hdr = skb_push(skb, sizeof(*hdr));
memset(hdr, 0, sizeof(struct wilc_wfi_radiotap_hdr));
hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
- hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr));
+ hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
hdr->hdr.it_present = cpu_to_le32
(1 << IEEE80211_RADIOTAP_RATE); /* | */
hdr->rate = 5; /* txrate->bitrate / 5; */
@@ -162,20 +143,11 @@ static int mon_mgmt_tx(struct net_device *dev, const u8 *buf, size_t len)
return 0;
}
-/**
- * @brief WILC_WFI_mon_xmit
- * @details
- * @param[in]
- * @return int : Return 0 on Success
- * @author mdaftedar
- * @date 12 JUL 2012
- * @version 1.0
- */
-static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
+static netdev_tx_t wilc_wfi_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
{
u32 rtap_len, ret = 0;
- struct WILC_WFI_mon_priv *mon_priv;
+ struct wilc_wfi_mon_priv *mon_priv;
struct sk_buff *skb2;
struct wilc_wfi_radiotap_cb_hdr *cb_hdr;
@@ -192,8 +164,8 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
skb_pull(skb, rtap_len);
- if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) {
- skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr));
+ if (skb->data[0] == 0xc0 && is_broadcast_ether_addr(&skb->data[4])) {
+ skb2 = dev_alloc_skb(skb->len + sizeof(*cb_hdr));
if (!skb2)
return -ENOMEM;
@@ -204,11 +176,9 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
- cb_hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_cb_hdr));
+ cb_hdr->hdr.it_len = cpu_to_le16(sizeof(*cb_hdr));
- cb_hdr->hdr.it_present = cpu_to_le32(
- (1 << IEEE80211_RADIOTAP_RATE) |
- (1 << IEEE80211_RADIOTAP_TX_FLAGS));
+ cb_hdr->hdr.it_present = cpu_to_le32(TX_RADIOTAP_PRESENT);
cb_hdr->rate = 5; /* txrate->bitrate / 5; */
cb_hdr->tx_flags = 0x0004;
@@ -244,29 +214,20 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
}
static const struct net_device_ops wilc_wfi_netdev_ops = {
- .ndo_start_xmit = WILC_WFI_mon_xmit,
+ .ndo_start_xmit = wilc_wfi_mon_xmit,
};
-/**
- * @brief WILC_WFI_init_mon_interface
- * @details
- * @param[in]
- * @return Pointer to net_device
- * @author mdaftedar
- * @date 12 JUL 2012
- * @version 1.0
- */
-struct net_device *WILC_WFI_init_mon_interface(const char *name,
+struct net_device *wilc_wfi_init_mon_interface(const char *name,
struct net_device *real_dev)
{
- struct WILC_WFI_mon_priv *priv;
+ struct wilc_wfi_mon_priv *priv;
/*If monitor interface is already initialized, return it*/
if (wilc_wfi_mon)
return wilc_wfi_mon;
- wilc_wfi_mon = alloc_etherdev(sizeof(struct WILC_WFI_mon_priv));
+ wilc_wfi_mon = alloc_etherdev(sizeof(struct wilc_wfi_mon_priv));
if (!wilc_wfi_mon)
return NULL;
wilc_wfi_mon->type = ARPHRD_IEEE80211_RADIOTAP;
@@ -287,16 +248,7 @@ struct net_device *WILC_WFI_init_mon_interface(const char *name,
return wilc_wfi_mon;
}
-/**
- * @brief WILC_WFI_deinit_mon_interface
- * @details
- * @param[in]
- * @return int : Return 0 on Success
- * @author mdaftedar
- * @date 12 JUL 2012
- * @version 1.0
- */
-int WILC_WFI_deinit_mon_interface(void)
+int wilc_wfi_deinit_mon_interface(void)
{
bool rollback_lock = false;
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 38a83bd31671..02e6b1338440 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1,60 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
-#include "wilc_wfi_cfgoperations.h"
-#include "wilc_wlan_if.h"
-#include "wilc_wlan.h"
-
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/gpio.h>
-
#include <linux/kthread.h>
#include <linux/firmware.h>
-
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/mutex.h>
-#include <linux/completion.h>
-
-static int dev_state_ev_handler(struct notifier_block *this,
- unsigned long event, void *ptr);
-
-static struct notifier_block g_dev_notifier = {
- .notifier_call = dev_state_ev_handler
-};
-static int wlan_deinit_locks(struct net_device *dev);
-static void wlan_deinitialize_threads(struct net_device *dev);
-
-static void linux_wlan_tx_complete(void *priv, int status);
-static int mac_init_fn(struct net_device *ndev);
-static struct net_device_stats *mac_stats(struct net_device *dev);
-static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd);
-static int wilc_mac_open(struct net_device *ndev);
-static int wilc_mac_close(struct net_device *ndev);
-static void wilc_set_multicast_list(struct net_device *dev);
+#include "wilc_wfi_cfgoperations.h"
bool wilc_enable_ps = true;
-static const struct net_device_ops wilc_netdev_ops = {
- .ndo_init = mac_init_fn,
- .ndo_open = wilc_mac_open,
- .ndo_stop = wilc_mac_close,
- .ndo_start_xmit = wilc_mac_xmit,
- .ndo_do_ioctl = mac_ioctl,
- .ndo_get_stats = mac_stats,
- .ndo_set_rx_mode = wilc_set_multicast_list,
-
-};
-
static int dev_state_ev_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -221,19 +176,16 @@ static void deinit_irq(struct net_device *dev)
}
}
-void wilc_mac_indicate(struct wilc *wilc, int flag)
+void wilc_mac_indicate(struct wilc *wilc)
{
int status;
- if (flag == WILC_MAC_INDICATE_STATUS) {
- wilc_wlan_cfg_get_val(WID_STATUS,
- (unsigned char *)&status, 4);
- if (wilc->mac_status == WILC_MAC_STATUS_INIT) {
- wilc->mac_status = status;
- complete(&wilc->sync_event);
- } else {
- wilc->mac_status = status;
- }
+ wilc_wlan_cfg_get_val(WID_STATUS, (unsigned char *)&status, 4);
+ if (wilc->mac_status == MAC_STATUS_INIT) {
+ wilc->mac_status = status;
+ complete(&wilc->sync_event);
+ } else {
+ wilc->mac_status = status;
}
}
@@ -317,7 +269,7 @@ static int linux_wlan_txq_task(void *vp)
return 0;
}
-int wilc_wlan_get_firmware(struct net_device *dev)
+static int wilc_wlan_get_firmware(struct net_device *dev)
{
struct wilc_vif *vif;
struct wilc *wilc;
@@ -338,16 +290,16 @@ int wilc_wlan_get_firmware(struct net_device *dev)
netdev_info(dev, "loading firmware %s\n", firmware);
if (!(&vif->ndev->dev))
- goto _fail_;
+ goto fail;
if (request_firmware(&wilc_firmware, firmware, wilc->dev) != 0) {
netdev_err(dev, "%s - firmware not available\n", firmware);
ret = -1;
- goto _fail_;
+ goto fail;
}
wilc->firmware = wilc_firmware;
-_fail_:
+fail:
return ret;
}
@@ -416,197 +368,230 @@ static int linux_wlan_init_test_config(struct net_device *dev,
*(int *)c_val = 1;
if (!wilc_wlan_cfg_set(vif, 1, WID_SET_DRV_HANDLER, c_val, 4, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = 0;
if (!wilc_wlan_cfg_set(vif, 0, WID_PC_TEST_MODE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = INFRASTRUCTURE;
if (!wilc_wlan_cfg_set(vif, 0, WID_BSS_TYPE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = RATE_AUTO;
if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_TX_RATE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = G_MIXED_11B_2_MODE;
if (!wilc_wlan_cfg_set(vif, 0, WID_11G_OPERATING_MODE, c_val, 1, 0,
0))
- goto _fail_;
+ goto fail;
c_val[0] = 1;
if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_CHANNEL, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = G_SHORT_PREAMBLE;
if (!wilc_wlan_cfg_set(vif, 0, WID_PREAMBLE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = AUTO_PROT;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_PROT_MECH, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = ACTIVE_SCAN;
if (!wilc_wlan_cfg_set(vif, 0, WID_SCAN_TYPE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = SITE_SURVEY_OFF;
if (!wilc_wlan_cfg_set(vif, 0, WID_SITE_SURVEY, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
*((int *)c_val) = 0xffff;
if (!wilc_wlan_cfg_set(vif, 0, WID_RTS_THRESHOLD, c_val, 2, 0, 0))
- goto _fail_;
+ goto fail;
*((int *)c_val) = 2346;
if (!wilc_wlan_cfg_set(vif, 0, WID_FRAG_THRESHOLD, c_val, 2, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = 0;
if (!wilc_wlan_cfg_set(vif, 0, WID_BCAST_SSID, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = 1;
if (!wilc_wlan_cfg_set(vif, 0, WID_QOS_ENABLE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = NO_POWERSAVE;
if (!wilc_wlan_cfg_set(vif, 0, WID_POWER_MANAGEMENT, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = NO_SECURITY; /* NO_ENCRYPT, 0x79 */
if (!wilc_wlan_cfg_set(vif, 0, WID_11I_MODE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = OPEN_SYSTEM;
if (!wilc_wlan_cfg_set(vif, 0, WID_AUTH_TYPE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
strcpy(c_val, "123456790abcdef1234567890");
if (!wilc_wlan_cfg_set(vif, 0, WID_WEP_KEY_VALUE, c_val,
(strlen(c_val) + 1), 0, 0))
- goto _fail_;
+ goto fail;
strcpy(c_val, "12345678");
if (!wilc_wlan_cfg_set(vif, 0, WID_11I_PSK, c_val, (strlen(c_val)), 0,
0))
- goto _fail_;
+ goto fail;
strcpy(c_val, "password");
if (!wilc_wlan_cfg_set(vif, 0, WID_1X_KEY, c_val, (strlen(c_val) + 1),
0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = 192;
c_val[1] = 168;
c_val[2] = 1;
c_val[3] = 112;
if (!wilc_wlan_cfg_set(vif, 0, WID_1X_SERV_ADDR, c_val, 4, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = 3;
if (!wilc_wlan_cfg_set(vif, 0, WID_LISTEN_INTERVAL, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = 3;
if (!wilc_wlan_cfg_set(vif, 0, WID_DTIM_PERIOD, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = NORMAL_ACK;
if (!wilc_wlan_cfg_set(vif, 0, WID_ACK_POLICY, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = 0;
if (!wilc_wlan_cfg_set(vif, 0, WID_USER_CONTROL_ON_TX_POWER, c_val, 1,
0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = 48;
if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11A, c_val, 1, 0,
0))
- goto _fail_;
+ goto fail;
c_val[0] = 28;
if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11B, c_val, 1, 0,
0))
- goto _fail_;
+ goto fail;
*((int *)c_val) = 100;
if (!wilc_wlan_cfg_set(vif, 0, WID_BEACON_INTERVAL, c_val, 2, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = REKEY_DISABLE;
if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_POLICY, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
*((int *)c_val) = 84600;
if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PERIOD, c_val, 4, 0, 0))
- goto _fail_;
+ goto fail;
*((int *)c_val) = 500;
if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PACKET_COUNT, c_val, 4, 0,
0))
- goto _fail_;
+ goto fail;
c_val[0] = 1;
if (!wilc_wlan_cfg_set(vif, 0, WID_SHORT_SLOT_ALLOWED, c_val, 1, 0,
0))
- goto _fail_;
+ goto fail;
c_val[0] = G_SELF_CTS_PROT;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ERP_PROT_TYPE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = 1;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ENABLE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = HT_MIXED_MODE;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OPERATING_MODE, c_val, 1, 0,
0))
- goto _fail_;
+ goto fail;
c_val[0] = 1;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_TXOP_PROT_DISABLE, c_val, 1, 0,
0))
- goto _fail_;
+ goto fail;
c_val[0] = DETECT_PROTECT_REPORT;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1,
0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = RTS_CTS_NONHT_PROT;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_HT_PROT_TYPE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = 0;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_RIFS_PROT_ENABLE, c_val, 1, 0,
0))
- goto _fail_;
+ goto fail;
c_val[0] = MIMO_MODE;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_SMPS_MODE, c_val, 1, 0, 0))
- goto _fail_;
+ goto fail;
c_val[0] = 7;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_CURRENT_TX_MCS, c_val, 1, 0,
0))
- goto _fail_;
+ goto fail;
c_val[0] = 1;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, c_val, 1,
1, 1))
- goto _fail_;
+ goto fail;
return 0;
-_fail_:
+fail:
return -1;
}
-void wilc1000_wlan_deinit(struct net_device *dev)
+static int wlan_deinit_locks(struct net_device *dev)
+{
+ struct wilc_vif *vif;
+ struct wilc *wilc;
+
+ vif = netdev_priv(dev);
+ wilc = vif->wilc;
+
+ mutex_destroy(&wilc->hif_cs);
+ mutex_destroy(&wilc->rxq_cs);
+ mutex_destroy(&wilc->txq_add_to_head_cs);
+
+ return 0;
+}
+
+static void wlan_deinitialize_threads(struct net_device *dev)
+{
+ struct wilc_vif *vif;
+ struct wilc *wl;
+
+ vif = netdev_priv(dev);
+ wl = vif->wilc;
+
+ wl->close = 1;
+
+ complete(&wl->txq_event);
+
+ if (wl->txq_thread) {
+ kthread_stop(wl->txq_thread);
+ wl->txq_thread = NULL;
+ }
+}
+
+static void wilc_wlan_deinitialize(struct net_device *dev)
{
struct wilc_vif *vif;
struct wilc *wl;
@@ -668,21 +653,6 @@ static int wlan_init_locks(struct net_device *dev)
return 0;
}
-static int wlan_deinit_locks(struct net_device *dev)
-{
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
-
- mutex_destroy(&wilc->hif_cs);
- mutex_destroy(&wilc->rxq_cs);
- mutex_destroy(&wilc->txq_add_to_head_cs);
-
- return 0;
-}
-
static int wlan_initialize_threads(struct net_device *dev)
{
struct wilc_vif *vif;
@@ -703,31 +673,13 @@ static int wlan_initialize_threads(struct net_device *dev)
return 0;
}
-static void wlan_deinitialize_threads(struct net_device *dev)
-{
- struct wilc_vif *vif;
- struct wilc *wl;
-
- vif = netdev_priv(dev);
- wl = vif->wilc;
-
- wl->close = 1;
-
- complete(&wl->txq_event);
-
- if (wl->txq_thread) {
- kthread_stop(wl->txq_thread);
- wl->txq_thread = NULL;
- }
-}
-
-int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif)
+static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
{
int ret = 0;
struct wilc *wl = vif->wilc;
if (!wl->initialized) {
- wl->mac_status = WILC_MAC_STATUS_INIT;
+ wl->mac_status = MAC_STATUS_INIT;
wl->close = 0;
wlan_init_locks(dev);
@@ -735,42 +687,42 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif)
ret = wilc_wlan_init(dev);
if (ret < 0) {
ret = -EIO;
- goto _fail_locks_;
+ goto fail_locks;
}
if (wl->gpio >= 0 && init_irq(dev)) {
ret = -EIO;
- goto _fail_locks_;
+ goto fail_locks;
}
ret = wlan_initialize_threads(dev);
if (ret < 0) {
ret = -EIO;
- goto _fail_wilc_wlan_;
+ goto fail_wilc_wlan;
}
if (!wl->dev_irq_num &&
wl->hif_func->enable_interrupt &&
wl->hif_func->enable_interrupt(wl)) {
ret = -EIO;
- goto _fail_irq_init_;
+ goto fail_irq_init;
}
if (wilc_wlan_get_firmware(dev)) {
ret = -EIO;
- goto _fail_irq_enable_;
+ goto fail_irq_enable;
}
ret = wilc1000_firmware_download(dev);
if (ret < 0) {
ret = -EIO;
- goto _fail_irq_enable_;
+ goto fail_irq_enable;
}
ret = linux_wlan_start_firmware(dev);
if (ret < 0) {
ret = -EIO;
- goto _fail_irq_enable_;
+ goto fail_irq_enable;
}
if (wilc_wlan_cfg_get(vif, 1, WID_FIRMWARE_VERSION, 1, 0)) {
@@ -788,27 +740,27 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif)
if (ret < 0) {
netdev_err(dev, "Failed to configure firmware\n");
ret = -EIO;
- goto _fail_fw_start_;
+ goto fail_fw_start;
}
wl->initialized = true;
return 0;
-_fail_fw_start_:
+fail_fw_start:
wilc_wlan_stop(wl);
-_fail_irq_enable_:
+fail_irq_enable:
if (!wl->dev_irq_num &&
wl->hif_func->disable_interrupt)
wl->hif_func->disable_interrupt(wl);
-_fail_irq_init_:
+fail_irq_init:
if (wl->dev_irq_num)
deinit_irq(dev);
wlan_deinitialize_threads(dev);
-_fail_wilc_wlan_:
+fail_wilc_wlan:
wilc_wlan_cleanup(dev);
-_fail_locks_:
+fail_locks:
wlan_deinit_locks(dev);
netdev_err(dev, "WLAN initialization FAILED\n");
} else {
@@ -848,7 +800,7 @@ static int wilc_mac_open(struct net_device *ndev)
if (ret < 0)
return ret;
- ret = wilc1000_wlan_init(ndev, vif);
+ ret = wilc_wlan_initialize(ndev, vif);
if (ret < 0) {
wilc_deinit_host_int(ndev);
return ret;
@@ -871,7 +823,7 @@ static int wilc_mac_open(struct net_device *ndev)
if (!is_valid_ether_addr(ndev->dev_addr)) {
netdev_err(ndev, "Wrong MAC address\n");
wilc_deinit_host_int(ndev);
- wilc1000_wlan_deinit(ndev);
+ wilc_wlan_deinitialize(ndev);
return -EINVAL;
}
@@ -941,7 +893,7 @@ static void linux_wlan_tx_complete(void *priv, int status)
kfree(pv_data);
}
-int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
+netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct wilc_vif *vif;
struct tx_complete_data *tx_data = NULL;
@@ -1040,8 +992,8 @@ static int wilc_mac_close(struct net_device *ndev)
if (wl->open_ifcs == 0) {
netdev_dbg(ndev, "Deinitializing wilc1000\n");
wl->close = 1;
- wilc1000_wlan_deinit(ndev);
- WILC_WFI_deinit_mon_interface();
+ wilc_wlan_deinitialize(ndev);
+ wilc_wfi_deinit_mon_interface();
}
vif->mac_opened = 0;
@@ -1049,67 +1001,6 @@ static int wilc_mac_close(struct net_device *ndev)
return 0;
}
-static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
-{
- u8 *buff = NULL;
- s8 rssi;
- u32 size = 0;
- struct wilc_vif *vif;
- s32 ret = 0;
- struct wilc *wilc;
-
- vif = netdev_priv(ndev);
- wilc = vif->wilc;
-
- if (!wilc->initialized)
- return 0;
-
- switch (cmd) {
- case SIOCSIWPRIV:
- {
- struct iwreq *wrq = (struct iwreq *)req;
-
- size = wrq->u.data.length;
-
- if (size && wrq->u.data.pointer) {
- buff = memdup_user(wrq->u.data.pointer,
- wrq->u.data.length);
- if (IS_ERR(buff))
- return PTR_ERR(buff);
-
- if (strncasecmp(buff, "RSSI", size) == 0) {
- ret = wilc_get_rssi(vif, &rssi);
- netdev_info(ndev, "RSSI :%d\n", rssi);
-
- rssi += 5;
-
- snprintf(buff, size, "rssi %d", rssi);
-
- if (copy_to_user(wrq->u.data.pointer, buff, size)) {
- netdev_err(ndev, "failed to copy\n");
- ret = -EFAULT;
- goto done;
- }
- }
- }
- }
- break;
-
- default:
- {
- netdev_info(ndev, "Command - %d - has been received\n", cmd);
- ret = -EOPNOTSUPP;
- goto done;
- }
- }
-
-done:
-
- kfree(buff);
-
- return ret;
-}
-
void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset)
{
unsigned int frame_len = 0;
@@ -1158,7 +1049,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
for (i = 0; i < wilc->vif_num; i++) {
vif = netdev_priv(wilc->vif[i]->ndev);
if (vif->monitor_flag) {
- WILC_WFI_monitor_rx(buff, size);
+ wilc_wfi_monitor_rx(buff, size);
return;
}
}
@@ -1166,9 +1057,13 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
vif = netdev_priv(wilc->vif[1]->ndev);
if ((buff[0] == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
(buff[0] == vif->frame_reg[1].type && vif->frame_reg[1].reg))
- WILC_WFI_p2p_rx(wilc->vif[1]->ndev, buff, size);
+ wilc_wfi_p2p_rx(wilc->vif[1]->ndev, buff, size);
}
+static struct notifier_block g_dev_notifier = {
+ .notifier_call = dev_state_ev_handler
+};
+
void wilc_netdev_cleanup(struct wilc *wilc)
{
int i;
@@ -1198,6 +1093,15 @@ void wilc_netdev_cleanup(struct wilc *wilc)
}
EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
+static const struct net_device_ops wilc_netdev_ops = {
+ .ndo_init = mac_init_fn,
+ .ndo_open = wilc_mac_open,
+ .ndo_stop = wilc_mac_close,
+ .ndo_start_xmit = wilc_mac_xmit,
+ .ndo_get_stats = mac_stats,
+ .ndo_set_rx_mode = wilc_set_multicast_list,
+};
+
int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
int gpio, const struct wilc_hif_func *ops)
{
@@ -1218,6 +1122,8 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
register_inetaddr_notifier(&g_dev_notifier);
for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
+ struct wireless_dev *wdev;
+
ndev = alloc_etherdev(sizeof(struct wilc_vif));
if (!ndev)
return -ENOMEM;
@@ -1240,28 +1146,24 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
ndev->netdev_ops = &wilc_netdev_ops;
- {
- struct wireless_dev *wdev;
+ wdev = wilc_create_wiphy(ndev, dev);
- wdev = wilc_create_wiphy(ndev, dev);
+ if (dev)
+ SET_NETDEV_DEV(ndev, dev);
- if (dev)
- SET_NETDEV_DEV(ndev, dev);
-
- if (!wdev) {
- netdev_err(ndev, "Can't register WILC Wiphy\n");
- return -1;
- }
-
- vif->ndev->ieee80211_ptr = wdev;
- vif->ndev->ml_priv = vif;
- wdev->netdev = vif->ndev;
- vif->netstats.rx_packets = 0;
- vif->netstats.tx_packets = 0;
- vif->netstats.rx_bytes = 0;
- vif->netstats.tx_bytes = 0;
+ if (!wdev) {
+ netdev_err(ndev, "Can't register WILC Wiphy\n");
+ return -1;
}
+ vif->ndev->ieee80211_ptr = wdev;
+ vif->ndev->ml_priv = vif;
+ wdev->netdev = vif->ndev;
+ vif->netstats.rx_packets = 0;
+ vif->netstats.tx_packets = 0;
+ vif->netstats.rx_bytes = 0;
+ vif->netstats.tx_bytes = 0;
+
ret = register_netdev(ndev);
if (ret)
return ret;
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index 0deb61a21b27..287c11b58160 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -13,8 +13,6 @@
#if defined(WILC_DEBUGFS)
#include <linux/module.h>
#include <linux/debugfs.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
#include "wilc_wlan_if.h"
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index a08899941491..4ab43f97646a 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -5,17 +5,11 @@
* Module Name: wilc_sdio.c
*/
-#include <linux/string.h>
-#include "wilc_wlan_if.h"
-#include "wilc_wlan.h"
-#include "wilc_wfi_netdevice.h"
#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/host.h>
#include <linux/of_gpio.h>
+#include "wilc_wfi_netdevice.h"
+
#define SDIO_MODALIAS "wilc1000_sdio"
#define SDIO_VENDOR_ID_WILC 0x0296
@@ -32,7 +26,8 @@ struct wilc_sdio {
bool irq_gpio;
u32 block_size;
int nint;
-#define MAX_NUN_INT_THRPT_ENH2 (5) /* Max num interrupts allowed in registers 0xf7, 0xf8 */
+/* Max num interrupts allowed in registers 0xf7, 0xf8 */
+#define MAX_NUN_INT_THRPT_ENH2 (5)
int has_thrpt_enh3;
};
@@ -274,7 +269,7 @@ static int sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10c data...\n");
- goto _fail_;
+ goto fail;
}
cmd.address = 0x10d;
@@ -282,7 +277,7 @@ static int sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10d data...\n");
- goto _fail_;
+ goto fail;
}
cmd.address = 0x10e;
@@ -290,11 +285,11 @@ static int sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10e data...\n");
- goto _fail_;
+ goto fail;
}
return 1;
-_fail_:
+fail:
return 0;
}
@@ -312,7 +307,7 @@ static int sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10 data...\n");
- goto _fail_;
+ goto fail;
}
cmd.address = 0x11;
@@ -320,11 +315,11 @@ static int sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x11 data...\n");
- goto _fail_;
+ goto fail;
}
return 1;
-_fail_:
+fail:
return 0;
}
@@ -348,18 +343,18 @@ static int sdio_set_func1_block_size(struct wilc *wilc, u32 block_size)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x110 data...\n");
- goto _fail_;
+ goto fail;
}
cmd.address = 0x111;
cmd.data = (u8)(block_size >> 8);
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x111 data...\n");
- goto _fail_;
+ goto fail;
}
return 1;
-_fail_:
+fail:
return 0;
}
@@ -387,7 +382,7 @@ static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
if (ret) {
dev_err(&func->dev,
"Failed cmd 52, read reg (%08x) ...\n", addr);
- goto _fail_;
+ goto fail;
}
} else {
struct sdio_cmd53 cmd;
@@ -396,7 +391,7 @@ static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
* set the AHB address
**/
if (!sdio_set_func0_csa_address(wilc, addr))
- goto _fail_;
+ goto fail;
cmd.read_write = 1;
cmd.function = 0;
@@ -410,13 +405,13 @@ static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
if (ret) {
dev_err(&func->dev,
"Failed cmd53, write reg (%08x)...\n", addr);
- goto _fail_;
+ goto fail;
}
}
return 1;
-_fail_:
+fail:
return 0;
}
@@ -470,13 +465,13 @@ static int sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.block_size = block_size;
if (addr > 0) {
if (!sdio_set_func0_csa_address(wilc, addr))
- goto _fail_;
+ goto fail;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], block send...\n", addr);
- goto _fail_;
+ goto fail;
}
if (addr > 0)
addr += nblk * block_size;
@@ -493,19 +488,19 @@ static int sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
if (addr > 0) {
if (!sdio_set_func0_csa_address(wilc, addr))
- goto _fail_;
+ goto fail;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], bytes send...\n", addr);
- goto _fail_;
+ goto fail;
}
}
return 1;
-_fail_:
+fail:
return 0;
}
@@ -526,14 +521,14 @@ static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
if (ret) {
dev_err(&func->dev,
"Failed cmd 52, read reg (%08x) ...\n", addr);
- goto _fail_;
+ goto fail;
}
*data = cmd.data;
} else {
struct sdio_cmd53 cmd;
if (!sdio_set_func0_csa_address(wilc, addr))
- goto _fail_;
+ goto fail;
cmd.read_write = 0;
cmd.function = 0;
@@ -548,7 +543,7 @@ static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
if (ret) {
dev_err(&func->dev,
"Failed cmd53, read reg (%08x)...\n", addr);
- goto _fail_;
+ goto fail;
}
}
@@ -556,7 +551,7 @@ static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
return 1;
-_fail_:
+fail:
return 0;
}
@@ -610,13 +605,13 @@ static int sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.block_size = block_size;
if (addr > 0) {
if (!sdio_set_func0_csa_address(wilc, addr))
- goto _fail_;
+ goto fail;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], block read...\n", addr);
- goto _fail_;
+ goto fail;
}
if (addr > 0)
addr += nblk * block_size;
@@ -633,19 +628,19 @@ static int sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
if (addr > 0) {
if (!sdio_set_func0_csa_address(wilc, addr))
- goto _fail_;
+ goto fail;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], bytes read...\n", addr);
- goto _fail_;
+ goto fail;
}
}
return 1;
-_fail_:
+fail:
return 0;
}
@@ -684,7 +679,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Fail cmd 52, enable csa...\n");
- goto _fail_;
+ goto fail;
}
/**
@@ -692,7 +687,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
**/
if (!sdio_set_func0_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
dev_err(&func->dev, "Fail cmd 52, set func 0 block size...\n");
- goto _fail_;
+ goto fail;
}
g_sdio.block_size = WILC_SDIO_BLOCK_SIZE;
@@ -708,7 +703,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
if (ret) {
dev_err(&func->dev,
"Fail cmd 52, set IOE register...\n");
- goto _fail_;
+ goto fail;
}
/**
@@ -725,7 +720,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
if (ret) {
dev_err(&func->dev,
"Fail cmd 52, get IOR register...\n");
- goto _fail_;
+ goto fail;
}
if (cmd.data == 0x2)
break;
@@ -733,7 +728,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
if (loop <= 0) {
dev_err(&func->dev, "Fail func 1 is not ready...\n");
- goto _fail_;
+ goto fail;
}
/**
@@ -741,7 +736,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
**/
if (!sdio_set_func1_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
dev_err(&func->dev, "Fail set func 1 block size...\n");
- goto _fail_;
+ goto fail;
}
/**
@@ -755,7 +750,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Fail cmd 52, set IEN register...\n");
- goto _fail_;
+ goto fail;
}
/**
@@ -764,7 +759,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
if (!resume) {
if (!sdio_read_reg(wilc, 0x1000, &chipid)) {
dev_err(&func->dev, "Fail cmd read chip id...\n");
- goto _fail_;
+ goto fail;
}
dev_err(&func->dev, "chipid (%08x)\n", chipid);
if ((chipid & 0xfff) > 0x2a0)
@@ -777,7 +772,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
return 1;
-_fail_:
+fail:
return 0;
}
@@ -885,13 +880,13 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val)
reg = 0;
}
/* select VMM table 0 */
- if ((val & SEL_VMM_TBL0) == SEL_VMM_TBL0)
+ if (val & SEL_VMM_TBL0)
reg |= BIT(5);
/* select VMM table 1 */
- if ((val & SEL_VMM_TBL1) == SEL_VMM_TBL1)
+ if (val & SEL_VMM_TBL1)
reg |= BIT(6);
/* enable VMM */
- if ((val & EN_VMM) == EN_VMM)
+ if (val & EN_VMM)
reg |= BIT(7);
if (reg) {
struct sdio_cmd52 cmd;
@@ -907,7 +902,7 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val)
dev_err(&func->dev,
"Failed cmd52, set 0xf8 data (%d) ...\n",
__LINE__);
- goto _fail_;
+ goto fail;
}
}
return 1;
@@ -940,7 +935,7 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val)
dev_err(&func->dev,
"Failed cmd52, set 0xf8 data (%d) ...\n",
__LINE__);
- goto _fail_;
+ goto fail;
}
}
if (!ret)
@@ -948,7 +943,7 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val)
flags >>= 1;
}
if (!ret)
- goto _fail_;
+ goto fail;
for (i = g_sdio.nint; i < MAX_NUM_INT; i++) {
if (flags & 1)
dev_err(&func->dev,
@@ -961,13 +956,13 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val)
vmm_ctl = 0;
/* select VMM table 0 */
- if ((val & SEL_VMM_TBL0) == SEL_VMM_TBL0)
+ if (val & SEL_VMM_TBL0)
vmm_ctl |= BIT(0);
/* select VMM table 1 */
- if ((val & SEL_VMM_TBL1) == SEL_VMM_TBL1)
+ if (val & SEL_VMM_TBL1)
vmm_ctl |= BIT(1);
/* enable VMM */
- if ((val & EN_VMM) == EN_VMM)
+ if (val & EN_VMM)
vmm_ctl |= BIT(2);
if (vmm_ctl) {
@@ -983,11 +978,11 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val)
dev_err(&func->dev,
"Failed cmd52, set 0xf6 data (%d) ...\n",
__LINE__);
- goto _fail_;
+ goto fail;
}
}
return 1;
-_fail_:
+fail:
return 0;
}
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index 6b392c946a6f..647526387784 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -5,21 +5,9 @@
* Module Name: wilc_spi.c
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/cdev.h>
-#include <linux/uaccess.h>
-#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/of_gpio.h>
-#include <linux/string.h>
-#include "wilc_wlan_if.h"
-#include "wilc_wlan.h"
#include "wilc_wfi_netdevice.h"
struct wilc_spi {
@@ -31,9 +19,6 @@ struct wilc_spi {
static struct wilc_spi g_spi;
static const struct wilc_hif_func wilc_hif_spi;
-static int wilc_spi_read(struct wilc *wilc, u32, u8 *, u32);
-static int wilc_spi_write(struct wilc *wilc, u32, u8 *, u32);
-
/********************************************
*
* Crc7
@@ -95,29 +80,29 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
#define CMD_DMA_WRITE 0xc1
#define CMD_DMA_READ 0xc2
-#define CMD_INTERNAL_WRITE 0xc3
-#define CMD_INTERNAL_READ 0xc4
+#define CMD_INTERNAL_WRITE 0xc3
+#define CMD_INTERNAL_READ 0xc4
#define CMD_TERMINATE 0xc5
-#define CMD_REPEAT 0xc6
-#define CMD_DMA_EXT_WRITE 0xc7
-#define CMD_DMA_EXT_READ 0xc8
+#define CMD_REPEAT 0xc6
+#define CMD_DMA_EXT_WRITE 0xc7
+#define CMD_DMA_EXT_READ 0xc8
#define CMD_SINGLE_WRITE 0xc9
-#define CMD_SINGLE_READ 0xca
-#define CMD_RESET 0xcf
+#define CMD_SINGLE_READ 0xca
+#define CMD_RESET 0xcf
-#define N_OK 1
-#define N_FAIL 0
-#define N_RESET -1
-#define N_RETRY -2
+#define N_OK 1
+#define N_FAIL 0
+#define N_RESET -1
+#define N_RETRY -2
#define DATA_PKT_SZ_256 256
-#define DATA_PKT_SZ_512 512
+#define DATA_PKT_SZ_512 512
#define DATA_PKT_SZ_1K 1024
#define DATA_PKT_SZ_4K (4 * 1024)
#define DATA_PKT_SZ_8K (8 * 1024)
-#define DATA_PKT_SZ DATA_PKT_SZ_8K
+#define DATA_PKT_SZ DATA_PKT_SZ_8K
-#define USE_SPI_DMA 0
+#define USE_SPI_DMA 0
static int wilc_bus_probe(struct spi_device *spi)
{
@@ -527,8 +512,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (wilc_spi_rx(wilc, &b[ix], nbytes)) {
dev_err(&spi->dev,
"Failed block read, bus err\n");
- result = N_FAIL;
- goto _error_;
+ return N_FAIL;
}
/*
@@ -537,8 +521,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (!g_spi.crc_off && wilc_spi_rx(wilc, crc, 2)) {
dev_err(&spi->dev,
"Failed block crc read, bus err\n");
- result = N_FAIL;
- goto _error_;
+ return N_FAIL;
}
ix += nbytes;
@@ -604,7 +587,6 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
sz -= nbytes;
}
}
-_error_:
return result;
}
@@ -620,28 +602,23 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
*/
ix = 0;
do {
- if (sz <= DATA_PKT_SZ)
+ if (sz <= DATA_PKT_SZ) {
nbytes = sz;
- else
+ order = 0x3;
+ } else {
nbytes = DATA_PKT_SZ;
+ if (ix == 0)
+ order = 0x1;
+ else
+ order = 0x02;
+ }
/*
* Write command
*/
cmd = 0xf0;
- if (ix == 0) {
- if (sz <= DATA_PKT_SZ)
-
- order = 0x3;
- else
- order = 0x1;
- } else {
- if (sz <= DATA_PKT_SZ)
- order = 0x3;
- else
- order = 0x2;
- }
cmd |= order;
+
if (wilc_spi_tx(wilc, &cmd, 1)) {
dev_err(&spi->dev,
"Failed data block cmd write, bus error...\n");
@@ -748,7 +725,6 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
{
struct spi_device *spi = to_spi_device(wilc->dev);
int result;
- u8 cmd = CMD_DMA_EXT_WRITE;
/*
* has to be greated than 4
@@ -756,7 +732,7 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
if (size <= 4)
return 0;
- result = spi_cmd_complete(wilc, cmd, addr, NULL, size, 0);
+ result = spi_cmd_complete(wilc, CMD_DMA_EXT_WRITE, addr, NULL, size, 0);
if (result != N_OK) {
dev_err(&spi->dev,
"Failed cmd, write block (%08x)...\n", addr);
@@ -801,13 +777,12 @@ static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
{
struct spi_device *spi = to_spi_device(wilc->dev);
- u8 cmd = CMD_DMA_EXT_READ;
int result;
if (size <= 4)
return 0;
- result = spi_cmd_complete(wilc, cmd, addr, buf, size, 0);
+ result = spi_cmd_complete(wilc, CMD_DMA_EXT_READ, addr, buf, size, 0);
if (result != N_OK) {
dev_err(&spi->dev, "Failed cmd, read block (%08x)...\n", addr);
return 0;
@@ -920,13 +895,12 @@ static int wilc_spi_read_size(struct wilc *wilc, u32 *size)
if (!ret) {
dev_err(&spi->dev,
"Failed read WILC_VMM_TO_HOST_SIZE ...\n");
- goto _fail_;
+ return ret;
}
tmp = (byte_cnt >> 2) & IRQ_DMA_WD_CNT_MASK;
*size = tmp;
}
-_fail_:
return ret;
}
@@ -950,7 +924,7 @@ static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status)
if (!ret) {
dev_err(&spi->dev,
"Failed read WILC_VMM_TO_HOST_SIZE ...\n");
- goto _fail_;
+ return ret;
}
tmp = (byte_cnt >> 2) & IRQ_DMA_WD_CNT_MASK;
@@ -980,7 +954,6 @@ static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status)
*int_status = tmp;
-_fail_:
return ret;
}
@@ -1018,7 +991,7 @@ static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val)
dev_err(&spi->dev,
"Failed wilc_spi_write_reg, set reg %x ...\n",
0x10c8 + i * 4);
- goto _fail_;
+ return ret;
}
for (i = g_spi.nint; i < MAX_NUM_INT; i++) {
if (flags & 1)
@@ -1031,29 +1004,29 @@ static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val)
tbl_ctl = 0;
/* select VMM table 0 */
- if ((val & SEL_VMM_TBL0) == SEL_VMM_TBL0)
+ if (val & SEL_VMM_TBL0)
tbl_ctl |= BIT(0);
/* select VMM table 1 */
- if ((val & SEL_VMM_TBL1) == SEL_VMM_TBL1)
+ if (val & SEL_VMM_TBL1)
tbl_ctl |= BIT(1);
ret = wilc_spi_write_reg(wilc, WILC_VMM_TBL_CTL, tbl_ctl);
if (!ret) {
dev_err(&spi->dev, "fail write reg vmm_tbl_ctl...\n");
- goto _fail_;
+ return ret;
}
- if ((val & EN_VMM) == EN_VMM) {
+ if (val & EN_VMM) {
/*
* enable vmm transfer.
*/
ret = wilc_spi_write_reg(wilc, WILC_VMM_CORE_CTL, 1);
if (!ret) {
dev_err(&spi->dev, "fail write reg vmm_core_ctl...\n");
- goto _fail_;
+ return ret;
}
}
-_fail_:
+
return ret;
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 730d64f2f46a..e248702ee519 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include "wilc_wfi_cfgoperations.h"
-#include "host_interface.h"
-#include <linux/errno.h>
#define NO_ENCRYPT 0
#define ENCRYPT_ENABLED BIT(0)
@@ -79,9 +77,6 @@ static const struct wiphy_wowlan_support wowlan_support = {
.flags = WIPHY_WOWLAN_ANY
};
-#define WILC_WFI_DWELL_PASSIVE 100
-#define WILC_WFI_DWELL_ACTIVE 40
-
#define TCP_ACK_FILTER_LINK_SPEED_THRESH 54
#define DEFAULT_LINK_SPEED 72
@@ -90,7 +85,7 @@ static const struct wiphy_wowlan_support wowlan_support = {
static struct network_info last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
static u32 last_scanned_cnt;
struct timer_list wilc_during_ip_timer;
-static struct timer_list hAgingTimer;
+static struct timer_list aging_timer;
static u8 op_ifcs;
#define CHAN2G(_channel, _freq, _flags) { \
@@ -153,49 +148,35 @@ static u8 p2p_recv_random;
static u8 p2p_vendor_spec[] = {0xdd, 0x05, 0x00, 0x08, 0x40, 0x03};
static bool wilc_ie;
-static struct ieee80211_supported_band WILC_WFI_band_2ghz = {
+static struct ieee80211_supported_band wilc_band_2ghz = {
.channels = ieee80211_2ghz_channels,
.n_channels = ARRAY_SIZE(ieee80211_2ghz_channels),
.bitrates = ieee80211_bitrates,
.n_bitrates = ARRAY_SIZE(ieee80211_bitrates),
};
-struct add_key_params {
- u8 key_idx;
- bool pairwise;
- u8 *mac_addr;
-};
-
-static struct add_key_params g_add_gtk_key_params;
-static struct wilc_wfi_key g_key_gtk_params;
-static struct add_key_params g_add_ptk_key_params;
-static struct wilc_wfi_key g_key_ptk_params;
-static struct wilc_wfi_wep_key g_key_wep_params;
-static bool g_ptk_keys_saved;
-static bool g_gtk_keys_saved;
-static bool g_wep_keys_saved;
-
#define AGING_TIME (9 * 1000)
-#define during_ip_time 15000
+#define DURING_IP_TIME_OUT 15000
static void clear_shadow_scan(void)
{
int i;
- if (op_ifcs == 0) {
- del_timer_sync(&hAgingTimer);
+ if (op_ifcs != 0)
+ return;
- for (i = 0; i < last_scanned_cnt; i++) {
- if (last_scanned_shadow[last_scanned_cnt].ies) {
- kfree(last_scanned_shadow[i].ies);
- last_scanned_shadow[last_scanned_cnt].ies = NULL;
- }
+ del_timer_sync(&aging_timer);
- kfree(last_scanned_shadow[i].join_params);
- last_scanned_shadow[i].join_params = NULL;
+ for (i = 0; i < last_scanned_cnt; i++) {
+ if (last_scanned_shadow[last_scanned_cnt].ies) {
+ kfree(last_scanned_shadow[i].ies);
+ last_scanned_shadow[last_scanned_cnt].ies = NULL;
}
- last_scanned_cnt = 0;
+
+ kfree(last_scanned_shadow[i].join_params);
+ last_scanned_shadow[i].join_params = NULL;
}
+ last_scanned_cnt = 0;
}
static u32 get_rssi_avg(struct network_info *network_info)
@@ -270,26 +251,25 @@ static void remove_network_from_shadow(struct timer_list *unused)
int i, j;
for (i = 0; i < last_scanned_cnt; i++) {
- if (time_after(now, last_scanned_shadow[i].time_scan +
- (unsigned long)(SCAN_RESULT_EXPIRE))) {
- kfree(last_scanned_shadow[i].ies);
- last_scanned_shadow[i].ies = NULL;
+ if (!time_after(now, last_scanned_shadow[i].time_scan +
+ (unsigned long)(SCAN_RESULT_EXPIRE)))
+ continue;
+ kfree(last_scanned_shadow[i].ies);
+ last_scanned_shadow[i].ies = NULL;
- kfree(last_scanned_shadow[i].join_params);
+ kfree(last_scanned_shadow[i].join_params);
- for (j = i; (j < last_scanned_cnt - 1); j++)
- last_scanned_shadow[j] = last_scanned_shadow[j + 1];
+ for (j = i; (j < last_scanned_cnt - 1); j++)
+ last_scanned_shadow[j] = last_scanned_shadow[j + 1];
- last_scanned_cnt--;
- }
+ last_scanned_cnt--;
}
- if (last_scanned_cnt != 0) {
- mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
- }
+ if (last_scanned_cnt != 0)
+ mod_timer(&aging_timer, jiffies + msecs_to_jiffies(AGING_TIME));
}
-static void clear_duringIP(struct timer_list *unused)
+static void clear_during_ip(struct timer_list *unused)
{
wilc_optaining_ip = false;
}
@@ -300,7 +280,7 @@ static int is_network_in_shadow(struct network_info *nw_info, void *user_void)
int i;
if (last_scanned_cnt == 0) {
- mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
+ mod_timer(&aging_timer, jiffies + msecs_to_jiffies(AGING_TIME));
state = -1;
} else {
for (i = 0; i < last_scanned_cnt; i++) {
@@ -320,6 +300,7 @@ static void add_network_to_shadow(struct network_info *nw_info,
int ap_found = is_network_in_shadow(nw_info, user_void);
u32 ap_index = 0;
u8 rssi_index = 0;
+ struct network_info *shadow_nw_info;
if (last_scanned_cnt >= MAX_NUM_SCANNED_NETWORKS_SHADOW)
return;
@@ -330,37 +311,37 @@ static void add_network_to_shadow(struct network_info *nw_info,
} else {
ap_index = ap_found;
}
- rssi_index = last_scanned_shadow[ap_index].rssi_history.index;
- last_scanned_shadow[ap_index].rssi_history.samples[rssi_index++] = nw_info->rssi;
+ shadow_nw_info = &last_scanned_shadow[ap_index];
+ rssi_index = shadow_nw_info->rssi_history.index;
+ shadow_nw_info->rssi_history.samples[rssi_index++] = nw_info->rssi;
if (rssi_index == NUM_RSSI) {
rssi_index = 0;
- last_scanned_shadow[ap_index].rssi_history.full = true;
+ shadow_nw_info->rssi_history.full = true;
}
- last_scanned_shadow[ap_index].rssi_history.index = rssi_index;
- last_scanned_shadow[ap_index].rssi = nw_info->rssi;
- last_scanned_shadow[ap_index].cap_info = nw_info->cap_info;
- last_scanned_shadow[ap_index].ssid_len = nw_info->ssid_len;
- memcpy(last_scanned_shadow[ap_index].ssid,
- nw_info->ssid, nw_info->ssid_len);
- memcpy(last_scanned_shadow[ap_index].bssid,
- nw_info->bssid, ETH_ALEN);
- last_scanned_shadow[ap_index].beacon_period = nw_info->beacon_period;
- last_scanned_shadow[ap_index].dtim_period = nw_info->dtim_period;
- last_scanned_shadow[ap_index].ch = nw_info->ch;
- last_scanned_shadow[ap_index].ies_len = nw_info->ies_len;
- last_scanned_shadow[ap_index].tsf_hi = nw_info->tsf_hi;
+ shadow_nw_info->rssi_history.index = rssi_index;
+ shadow_nw_info->rssi = nw_info->rssi;
+ shadow_nw_info->cap_info = nw_info->cap_info;
+ shadow_nw_info->ssid_len = nw_info->ssid_len;
+ memcpy(shadow_nw_info->ssid, nw_info->ssid, nw_info->ssid_len);
+ memcpy(shadow_nw_info->bssid, nw_info->bssid, ETH_ALEN);
+ shadow_nw_info->beacon_period = nw_info->beacon_period;
+ shadow_nw_info->dtim_period = nw_info->dtim_period;
+ shadow_nw_info->ch = nw_info->ch;
+ shadow_nw_info->tsf_hi = nw_info->tsf_hi;
if (ap_found != -1)
- kfree(last_scanned_shadow[ap_index].ies);
- last_scanned_shadow[ap_index].ies = kmalloc(nw_info->ies_len,
- GFP_KERNEL);
- memcpy(last_scanned_shadow[ap_index].ies,
- nw_info->ies, nw_info->ies_len);
- last_scanned_shadow[ap_index].time_scan = jiffies;
- last_scanned_shadow[ap_index].time_scan_cached = jiffies;
- last_scanned_shadow[ap_index].found = 1;
+ kfree(shadow_nw_info->ies);
+ shadow_nw_info->ies = kmemdup(nw_info->ies, nw_info->ies_len,
+ GFP_KERNEL);
+ if (shadow_nw_info->ies)
+ shadow_nw_info->ies_len = nw_info->ies_len;
+ else
+ shadow_nw_info->ies_len = 0;
+ shadow_nw_info->time_scan = jiffies;
+ shadow_nw_info->time_scan_cached = jiffies;
+ shadow_nw_info->found = 1;
if (ap_found != -1)
- kfree(last_scanned_shadow[ap_index].join_params);
- last_scanned_shadow[ap_index].join_params = join_params;
+ kfree(shadow_nw_info->join_params);
+ shadow_nw_info->join_params = join_params;
}
static void cfg_scan_result(enum scan_event scan_event,
@@ -369,7 +350,7 @@ static void cfg_scan_result(enum scan_event scan_event,
{
struct wilc_priv *priv;
struct wiphy *wiphy;
- s32 s32Freq;
+ s32 freq;
struct ieee80211_channel *channel;
struct cfg80211_bss *bss = NULL;
@@ -388,9 +369,9 @@ static void cfg_scan_result(enum scan_event scan_event,
((s32)network_info->rssi * 100) > 100))
return;
- s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch,
- NL80211_BAND_2GHZ);
- channel = ieee80211_get_channel(wiphy, s32Freq);
+ freq = ieee80211_channel_to_frequency((s32)network_info->ch,
+ NL80211_BAND_2GHZ);
+ channel = ieee80211_get_channel(wiphy, freq);
if (!channel)
return;
@@ -468,6 +449,17 @@ static void cfg_scan_result(enum scan_event scan_event,
}
}
+static inline bool wilc_wfi_cfg_scan_time_expired(int i)
+{
+ unsigned long now = jiffies;
+
+ if (time_after(now, last_scanned_shadow[i].time_scan_cached +
+ (unsigned long)(nl80211_SCAN_RESULT_EXPIRE - (1 * HZ))))
+ return true;
+ else
+ return false;
+}
+
int wilc_connecting;
static void cfg_connect_result(enum conn_event conn_disconn_evt,
@@ -496,7 +488,7 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt,
connect_status = conn_info->status;
- if (mac_status == MAC_DISCONNECTED &&
+ if (mac_status == MAC_STATUS_DISCONNECTED &&
conn_info->status == SUCCESSFUL_STATUSCODE) {
connect_status = WLAN_STATUS_UNSPECIFIED_FAILURE;
wilc_wlan_set_bssid(priv->dev, null_bssid,
@@ -513,17 +505,14 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt,
bool scan_refresh = false;
u32 i;
- memcpy(priv->associated_bss, conn_info->bssid, ETH_ALEN);
+ memcpy(priv->associated_bss, conn_info->bssid,
+ ETH_ALEN);
for (i = 0; i < last_scanned_cnt; i++) {
if (memcmp(last_scanned_shadow[i].bssid,
conn_info->bssid,
ETH_ALEN) == 0) {
- unsigned long now = jiffies;
-
- if (time_after(now,
- last_scanned_shadow[i].time_scan_cached +
- (unsigned long)(nl80211_SCAN_RESULT_EXPIRE - (1 * HZ))))
+ if (wilc_wfi_cfg_scan_time_expired(i))
scan_refresh = true;
break;
@@ -535,9 +524,11 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt,
}
cfg80211_connect_result(dev, conn_info->bssid,
- conn_info->req_ies, conn_info->req_ies_len,
- conn_info->resp_ies, conn_info->resp_ies_len,
- connect_status, GFP_KERNEL);
+ conn_info->req_ies,
+ conn_info->req_ies_len,
+ conn_info->resp_ies,
+ conn_info->resp_ies_len, connect_status,
+ GFP_KERNEL);
} else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
wilc_optaining_ip = false;
p2p_local_random = 0x01;
@@ -554,9 +545,9 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt,
else if (!wfi_drv->IFC_UP && dev == wl->vif[1]->ndev)
disconn_info->reason = 1;
- cfg80211_disconnected(dev, disconn_info->reason, disconn_info->ie,
- disconn_info->ie_len, false,
- GFP_KERNEL);
+ cfg80211_disconnected(dev, disconn_info->reason,
+ disconn_info->ie, disconn_info->ie_len,
+ false, GFP_KERNEL);
}
}
@@ -582,6 +573,49 @@ static int set_channel(struct wiphy *wiphy,
return result;
}
+static inline int
+wilc_wfi_cfg_alloc_fill_ssid(struct cfg80211_scan_request *request,
+ struct hidden_network *ntwk)
+{
+ int i;
+ int slot_id = 0;
+
+ ntwk->net_info = kcalloc(request->n_ssids, sizeof(*ntwk->net_info),
+ GFP_KERNEL);
+ if (!ntwk->net_info)
+ goto out;
+
+ ntwk->n_ssids = request->n_ssids;
+
+ for (i = 0; i < request->n_ssids; i++) {
+ if (request->ssids[i].ssid_len > 0) {
+ struct hidden_net_info *info = &ntwk->net_info[slot_id];
+
+ info->ssid = kmemdup(request->ssids[i].ssid,
+ request->ssids[i].ssid_len,
+ GFP_KERNEL);
+ if (!info->ssid)
+ goto out_free;
+
+ info->ssid_len = request->ssids[i].ssid_len;
+ slot_id++;
+ } else {
+ ntwk->n_ssids -= 1;
+ }
+ }
+ return 0;
+
+out_free:
+
+ for (i = 0; i < slot_id; i++)
+ kfree(ntwk->net_info[i].ssid);
+
+ kfree(ntwk->net_info);
+out:
+
+ return -ENOMEM;
+}
+
static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct wilc_priv *priv;
@@ -602,27 +636,17 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
priv->cfg_scanning = true;
if (request->n_channels <= MAX_NUM_SCANNED_NETWORKS) {
- for (i = 0; i < request->n_channels; i++)
- scan_ch_list[i] = (u8)ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+ for (i = 0; i < request->n_channels; i++) {
+ u16 freq = request->channels[i]->center_freq;
+
+ scan_ch_list[i] = ieee80211_frequency_to_channel(freq);
+ }
if (request->n_ssids >= 1) {
- hidden_ntwk.net_info =
- kmalloc_array(request->n_ssids,
- sizeof(struct hidden_network),
- GFP_KERNEL);
- if (!hidden_ntwk.net_info)
+ if (wilc_wfi_cfg_alloc_fill_ssid(request,
+ &hidden_ntwk))
return -ENOMEM;
- hidden_ntwk.n_ssids = request->n_ssids;
-
- for (i = 0; i < request->n_ssids; i++) {
- if (request->ssids[i].ssid_len != 0) {
- hidden_ntwk.net_info[i].ssid = kmalloc(request->ssids[i].ssid_len, GFP_KERNEL);
- memcpy(hidden_ntwk.net_info[i].ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
- hidden_ntwk.net_info[i].ssid_len = request->ssids[i].ssid_len;
- } else {
- hidden_ntwk.n_ssids -= 1;
- }
- }
+
ret = wilc_scan(vif, USER_SCAN, ACTIVE_SCAN,
scan_ch_list,
request->n_channels,
@@ -653,9 +677,9 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
s32 ret = 0;
u32 i;
u32 sel_bssi_idx = UINT_MAX;
- u8 u8security = NO_ENCRYPT;
+ u8 security = NO_ENCRYPT;
enum AUTHTYPE auth_type = ANY;
-
+ u32 cipher_group;
struct wilc_priv *priv;
struct host_if_drv *wfi_drv;
struct network_info *nw_info = NULL;
@@ -700,50 +724,41 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
- memset(priv->WILC_WFI_wep_key, 0, sizeof(priv->WILC_WFI_wep_key));
- memset(priv->WILC_WFI_wep_key_len, 0, sizeof(priv->WILC_WFI_wep_key_len));
-
- if (sme->crypto.cipher_group != NO_ENCRYPT) {
- if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP40) {
- u8security = ENCRYPT_ENABLED | WEP;
+ memset(priv->wep_key, 0, sizeof(priv->wep_key));
+ memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len));
- priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len;
- memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
+ cipher_group = sme->crypto.cipher_group;
+ if (cipher_group != NO_ENCRYPT) {
+ if (cipher_group == WLAN_CIPHER_SUITE_WEP40) {
+ security = ENCRYPT_ENABLED | WEP;
- g_key_wep_params.key_len = sme->key_len;
- g_key_wep_params.key = kmalloc(sme->key_len, GFP_KERNEL);
- memcpy(g_key_wep_params.key, sme->key, sme->key_len);
- g_key_wep_params.key_idx = sme->key_idx;
- g_wep_keys_saved = true;
+ priv->wep_key_len[sme->key_idx] = sme->key_len;
+ memcpy(priv->wep_key[sme->key_idx], sme->key,
+ sme->key_len);
wilc_set_wep_default_keyid(vif, sme->key_idx);
wilc_add_wep_key_bss_sta(vif, sme->key, sme->key_len,
sme->key_idx);
- } else if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP104) {
- u8security = ENCRYPT_ENABLED | WEP | WEP_EXTENDED;
+ } else if (cipher_group == WLAN_CIPHER_SUITE_WEP104) {
+ security = ENCRYPT_ENABLED | WEP | WEP_EXTENDED;
- priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len;
- memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
-
- g_key_wep_params.key_len = sme->key_len;
- g_key_wep_params.key = kmalloc(sme->key_len, GFP_KERNEL);
- memcpy(g_key_wep_params.key, sme->key, sme->key_len);
- g_key_wep_params.key_idx = sme->key_idx;
- g_wep_keys_saved = true;
+ priv->wep_key_len[sme->key_idx] = sme->key_len;
+ memcpy(priv->wep_key[sme->key_idx], sme->key,
+ sme->key_len);
wilc_set_wep_default_keyid(vif, sme->key_idx);
wilc_add_wep_key_bss_sta(vif, sme->key, sme->key_len,
sme->key_idx);
- } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
- if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP)
- u8security = ENCRYPT_ENABLED | WPA2 | TKIP;
+ } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
+ if (cipher_group == WLAN_CIPHER_SUITE_TKIP)
+ security = ENCRYPT_ENABLED | WPA2 | TKIP;
else
- u8security = ENCRYPT_ENABLED | WPA2 | AES;
- } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) {
- if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP)
- u8security = ENCRYPT_ENABLED | WPA | TKIP;
+ security = ENCRYPT_ENABLED | WPA2 | AES;
+ } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) {
+ if (cipher_group == WLAN_CIPHER_SUITE_TKIP)
+ security = ENCRYPT_ENABLED | WPA | TKIP;
else
- u8security = ENCRYPT_ENABLED | WPA | AES;
+ security = ENCRYPT_ENABLED | WPA | AES;
} else {
ret = -ENOTSUPP;
netdev_err(dev, "Not supported cipher\n");
@@ -755,14 +770,16 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
if ((sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) ||
(sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) {
for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) {
- if (sme->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP)
- u8security = u8security | TKIP;
+ u32 ciphers_pairwise = sme->crypto.ciphers_pairwise[i];
+
+ if (ciphers_pairwise == WLAN_CIPHER_SUITE_TKIP)
+ security = security | TKIP;
else
- u8security = u8security | AES;
+ security = security | AES;
}
}
- switch (sme->auth_type) {
+ switch (sme->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
auth_type = OPEN_SYSTEM;
break;
@@ -790,7 +807,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
ret = wilc_set_join_req(vif, nw_info->bssid, sme->ssid,
sme->ssid_len, sme->ie, sme->ie_len,
cfg_connect_result, (void *)priv,
- u8security, auth_type,
+ security, auth_type,
nw_info->ch,
nw_info->join_params);
if (ret != 0) {
@@ -803,7 +820,8 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code)
+static int disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code)
{
s32 ret = 0;
struct wilc_priv *priv;
@@ -845,203 +863,161 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_co
return ret;
}
+static inline void wilc_wfi_cfg_copy_wep_info(struct wilc_priv *priv,
+ u8 key_index,
+ struct key_params *params)
+{
+ priv->wep_key_len[key_index] = params->key_len;
+ memcpy(priv->wep_key[key_index], params->key, params->key_len);
+}
+
+static int wilc_wfi_cfg_allocate_wpa_entry(struct wilc_priv *priv, u8 idx)
+{
+ if (!priv->wilc_gtk[idx]) {
+ priv->wilc_gtk[idx] = kzalloc(sizeof(*priv->wilc_gtk[idx]),
+ GFP_KERNEL);
+ if (!priv->wilc_gtk[idx])
+ return -ENOMEM;
+ }
+
+ if (!priv->wilc_ptk[idx]) {
+ priv->wilc_ptk[idx] = kzalloc(sizeof(*priv->wilc_ptk[idx]),
+ GFP_KERNEL);
+ if (!priv->wilc_ptk[idx])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int wilc_wfi_cfg_copy_wpa_info(struct wilc_wfi_key *key_info,
+ struct key_params *params)
+{
+ kfree(key_info->key);
+
+ key_info->key = kmemdup(params->key, params->key_len, GFP_KERNEL);
+ if (!key_info->key)
+ return -ENOMEM;
+
+ kfree(key_info->seq);
+
+ if (params->seq_len > 0) {
+ key_info->seq = kmemdup(params->seq, params->seq_len,
+ GFP_KERNEL);
+ if (!key_info->seq)
+ return -ENOMEM;
+ }
+
+ key_info->cipher = params->cipher;
+ key_info->key_len = params->key_len;
+ key_info->seq_len = params->seq_len;
+
+ return 0;
+}
+
static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
- bool pairwise,
- const u8 *mac_addr, struct key_params *params)
+ bool pairwise, const u8 *mac_addr, struct key_params *params)
{
s32 ret = 0, keylen = params->key_len;
struct wilc_priv *priv;
const u8 *rx_mic = NULL;
const u8 *tx_mic = NULL;
- u8 u8mode = NO_ENCRYPT;
- u8 u8gmode = NO_ENCRYPT;
- u8 u8pmode = NO_ENCRYPT;
- enum AUTHTYPE auth_type = ANY;
- struct wilc *wl;
+ u8 mode = NO_ENCRYPT;
+ u8 op_mode;
struct wilc_vif *vif;
priv = wiphy_priv(wiphy);
vif = netdev_priv(netdev);
- wl = vif->wilc;
- switch (params->cipher) {
+ switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
if (priv->wdev->iftype == NL80211_IFTYPE_AP) {
- priv->WILC_WFI_wep_key_len[key_index] = params->key_len;
- memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
-
- auth_type = OPEN_SYSTEM;
+ wilc_wfi_cfg_copy_wep_info(priv, key_index, params);
if (params->cipher == WLAN_CIPHER_SUITE_WEP40)
- u8mode = ENCRYPT_ENABLED | WEP;
+ mode = ENCRYPT_ENABLED | WEP;
else
- u8mode = ENCRYPT_ENABLED | WEP | WEP_EXTENDED;
+ mode = ENCRYPT_ENABLED | WEP | WEP_EXTENDED;
- wilc_add_wep_key_bss_ap(vif, params->key,
- params->key_len, key_index,
- u8mode, auth_type);
+ ret = wilc_add_wep_key_bss_ap(vif, params->key,
+ params->key_len,
+ key_index, mode,
+ OPEN_SYSTEM);
break;
}
- if (memcmp(params->key, priv->WILC_WFI_wep_key[key_index], params->key_len)) {
- priv->WILC_WFI_wep_key_len[key_index] = params->key_len;
- memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
+ if (memcmp(params->key, priv->wep_key[key_index],
+ params->key_len)) {
+ wilc_wfi_cfg_copy_wep_info(priv, key_index, params);
- wilc_add_wep_key_bss_sta(vif, params->key,
- params->key_len, key_index);
+ ret = wilc_add_wep_key_bss_sta(vif, params->key,
+ params->key_len,
+ key_index);
}
break;
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
- if (priv->wdev->iftype == NL80211_IFTYPE_AP || priv->wdev->iftype == NL80211_IFTYPE_P2P_GO) {
- if (!priv->wilc_gtk[key_index]) {
- priv->wilc_gtk[key_index] = kmalloc(sizeof(struct wilc_wfi_key), GFP_KERNEL);
- priv->wilc_gtk[key_index]->key = NULL;
- priv->wilc_gtk[key_index]->seq = NULL;
- }
- if (!priv->wilc_ptk[key_index]) {
- priv->wilc_ptk[key_index] = kmalloc(sizeof(struct wilc_wfi_key), GFP_KERNEL);
- priv->wilc_ptk[key_index]->key = NULL;
- priv->wilc_ptk[key_index]->seq = NULL;
+ if (priv->wdev->iftype == NL80211_IFTYPE_AP ||
+ priv->wdev->iftype == NL80211_IFTYPE_P2P_GO) {
+ struct wilc_wfi_key *key;
+
+ ret = wilc_wfi_cfg_allocate_wpa_entry(priv, key_index);
+ if (ret)
+ return -ENOMEM;
+
+ if (params->key_len > 16 &&
+ params->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ tx_mic = params->key + 24;
+ rx_mic = params->key + 16;
+ keylen = params->key_len - 16;
}
if (!pairwise) {
if (params->cipher == WLAN_CIPHER_SUITE_TKIP)
- u8gmode = ENCRYPT_ENABLED | WPA | TKIP;
+ mode = ENCRYPT_ENABLED | WPA | TKIP;
else
- u8gmode = ENCRYPT_ENABLED | WPA2 | AES;
+ mode = ENCRYPT_ENABLED | WPA2 | AES;
- priv->wilc_groupkey = u8gmode;
-
- if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) {
- tx_mic = params->key + 24;
- rx_mic = params->key + 16;
- keylen = params->key_len - 16;
- }
- kfree(priv->wilc_gtk[key_index]->key);
-
- priv->wilc_gtk[key_index]->key = kmalloc(params->key_len, GFP_KERNEL);
- memcpy(priv->wilc_gtk[key_index]->key, params->key, params->key_len);
- kfree(priv->wilc_gtk[key_index]->seq);
-
- if (params->seq_len > 0) {
- priv->wilc_gtk[key_index]->seq = kmalloc(params->seq_len, GFP_KERNEL);
- memcpy(priv->wilc_gtk[key_index]->seq, params->seq, params->seq_len);
- }
-
- priv->wilc_gtk[key_index]->cipher = params->cipher;
- priv->wilc_gtk[key_index]->key_len = params->key_len;
- priv->wilc_gtk[key_index]->seq_len = params->seq_len;
-
- wilc_add_rx_gtk(vif, params->key, keylen,
- key_index, params->seq_len,
- params->seq, rx_mic,
- tx_mic, AP_MODE, u8gmode);
+ priv->wilc_groupkey = mode;
+ key = priv->wilc_gtk[key_index];
} else {
if (params->cipher == WLAN_CIPHER_SUITE_TKIP)
- u8pmode = ENCRYPT_ENABLED | WPA | TKIP;
+ mode = ENCRYPT_ENABLED | WPA | TKIP;
else
- u8pmode = priv->wilc_groupkey | AES;
-
- if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) {
- tx_mic = params->key + 24;
- rx_mic = params->key + 16;
- keylen = params->key_len - 16;
- }
-
- kfree(priv->wilc_ptk[key_index]->key);
- priv->wilc_ptk[key_index]->key = kmalloc(params->key_len, GFP_KERNEL);
- memcpy(priv->wilc_ptk[key_index]->key, params->key, params->key_len);
-
- kfree(priv->wilc_ptk[key_index]->seq);
- if (params->seq_len > 0) {
- priv->wilc_ptk[key_index]->seq = kmalloc(params->seq_len, GFP_KERNEL);
- memcpy(priv->wilc_ptk[key_index]->seq, params->seq, params->seq_len);
- }
+ mode = priv->wilc_groupkey | AES;
- priv->wilc_ptk[key_index]->cipher = params->cipher;
- priv->wilc_ptk[key_index]->key_len = params->key_len;
- priv->wilc_ptk[key_index]->seq_len = params->seq_len;
-
- wilc_add_ptk(vif, params->key, keylen,
- mac_addr, rx_mic, tx_mic,
- AP_MODE, u8pmode, key_index);
+ key = priv->wilc_ptk[key_index];
}
- break;
- }
-
- {
- u8mode = 0;
- if (!pairwise) {
- if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) {
- rx_mic = params->key + 24;
- tx_mic = params->key + 16;
- keylen = params->key_len - 16;
- }
+ ret = wilc_wfi_cfg_copy_wpa_info(key, params);
+ if (ret)
+ return -ENOMEM;
- if (!g_gtk_keys_saved && netdev == wl->vif[0]->ndev) {
- g_add_gtk_key_params.key_idx = key_index;
- g_add_gtk_key_params.pairwise = pairwise;
- if (!mac_addr) {
- g_add_gtk_key_params.mac_addr = NULL;
- } else {
- g_add_gtk_key_params.mac_addr = kmalloc(ETH_ALEN, GFP_KERNEL);
- memcpy(g_add_gtk_key_params.mac_addr, mac_addr, ETH_ALEN);
- }
- g_key_gtk_params.key_len = params->key_len;
- g_key_gtk_params.seq_len = params->seq_len;
- g_key_gtk_params.key = kmalloc(params->key_len, GFP_KERNEL);
- memcpy(g_key_gtk_params.key, params->key, params->key_len);
- if (params->seq_len > 0) {
- g_key_gtk_params.seq = kmalloc(params->seq_len, GFP_KERNEL);
- memcpy(g_key_gtk_params.seq, params->seq, params->seq_len);
- }
- g_key_gtk_params.cipher = params->cipher;
- g_gtk_keys_saved = true;
- }
+ op_mode = AP_MODE;
+ } else {
+ if (params->key_len > 16 &&
+ params->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ rx_mic = params->key + 24;
+ tx_mic = params->key + 16;
+ keylen = params->key_len - 16;
+ }
- wilc_add_rx_gtk(vif, params->key, keylen,
- key_index, params->seq_len,
- params->seq, rx_mic,
- tx_mic, STATION_MODE,
- u8mode);
- } else {
- if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) {
- rx_mic = params->key + 24;
- tx_mic = params->key + 16;
- keylen = params->key_len - 16;
- }
+ op_mode = STATION_MODE;
+ }
- if (!g_ptk_keys_saved && netdev == wl->vif[0]->ndev) {
- g_add_ptk_key_params.key_idx = key_index;
- g_add_ptk_key_params.pairwise = pairwise;
- if (!mac_addr) {
- g_add_ptk_key_params.mac_addr = NULL;
- } else {
- g_add_ptk_key_params.mac_addr = kmalloc(ETH_ALEN, GFP_KERNEL);
- memcpy(g_add_ptk_key_params.mac_addr, mac_addr, ETH_ALEN);
- }
- g_key_ptk_params.key_len = params->key_len;
- g_key_ptk_params.seq_len = params->seq_len;
- g_key_ptk_params.key = kmalloc(params->key_len, GFP_KERNEL);
- memcpy(g_key_ptk_params.key, params->key, params->key_len);
- if (params->seq_len > 0) {
- g_key_ptk_params.seq = kmalloc(params->seq_len, GFP_KERNEL);
- memcpy(g_key_ptk_params.seq, params->seq, params->seq_len);
- }
- g_key_ptk_params.cipher = params->cipher;
- g_ptk_keys_saved = true;
- }
+ if (!pairwise)
+ ret = wilc_add_rx_gtk(vif, params->key, keylen,
+ key_index, params->seq_len,
+ params->seq, rx_mic, tx_mic,
+ op_mode, mode);
+ else
+ ret = wilc_add_ptk(vif, params->key, keylen, mac_addr,
+ rx_mic, tx_mic, op_mode, mode,
+ key_index);
- wilc_add_ptk(vif, params->key, keylen,
- mac_addr, rx_mic, tx_mic,
- STATION_MODE, u8mode, key_index);
- }
- }
break;
default:
@@ -1066,14 +1042,7 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
wl = vif->wilc;
if (netdev == wl->vif[0]->ndev) {
- g_ptk_keys_saved = false;
- g_gtk_keys_saved = false;
- g_wep_keys_saved = false;
-
- kfree(g_key_wep_params.key);
- g_key_wep_params.key = NULL;
-
- if (priv->wilc_gtk[key_index] != NULL) {
+ if (priv->wilc_gtk[key_index]) {
kfree(priv->wilc_gtk[key_index]->key);
priv->wilc_gtk[key_index]->key = NULL;
kfree(priv->wilc_gtk[key_index]->seq);
@@ -1083,7 +1052,7 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
priv->wilc_gtk[key_index] = NULL;
}
- if (priv->wilc_ptk[key_index] != NULL) {
+ if (priv->wilc_ptk[key_index]) {
kfree(priv->wilc_ptk[key_index]->key);
priv->wilc_ptk[key_index]->key = NULL;
kfree(priv->wilc_ptk[key_index]->seq);
@@ -1091,27 +1060,13 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
kfree(priv->wilc_ptk[key_index]);
priv->wilc_ptk[key_index] = NULL;
}
-
- kfree(g_key_ptk_params.key);
- g_key_ptk_params.key = NULL;
- kfree(g_key_ptk_params.seq);
- g_key_ptk_params.seq = NULL;
-
- kfree(g_key_gtk_params.key);
- g_key_gtk_params.key = NULL;
- kfree(g_key_gtk_params.seq);
- g_key_gtk_params.seq = NULL;
}
- if (key_index >= 0 && key_index <= 3) {
- if (priv->WILC_WFI_wep_key_len[key_index]) {
- memset(priv->WILC_WFI_wep_key[key_index], 0,
- priv->WILC_WFI_wep_key_len[key_index]);
- priv->WILC_WFI_wep_key_len[key_index] = 0;
- wilc_remove_wep_key(vif, key_index);
- }
- } else {
- wilc_remove_key(priv->hif_drv, mac_addr);
+ if (key_index <= 3 && priv->wep_key_len[key_index]) {
+ memset(priv->wep_key[key_index], 0,
+ priv->wep_key_len[key_index]);
+ priv->wep_key_len[key_index] = 0;
+ wilc_remove_wep_key(vif, key_index);
}
return 0;
@@ -1173,7 +1128,9 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
for (i = 0; i < NUM_STA_ASSOCIATED; i++) {
- if (!(memcmp(mac, priv->assoc_stainfo.sta_associated_bss[i], ETH_ALEN))) {
+ if (!(memcmp(mac,
+ priv->assoc_stainfo.sta_associated_bss[i],
+ ETH_ALEN))) {
associatedsta = i;
break;
}
@@ -1188,18 +1145,16 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
wilc_get_inactive_time(vif, mac, &inactive_time);
sinfo->inactive_time = 1000 * inactive_time;
- }
-
- if (vif->iftype == STATION_MODE) {
+ } else if (vif->iftype == STATION_MODE) {
struct rf_info stats;
wilc_get_statistics(vif, &stats);
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL) |
- BIT(NL80211_STA_INFO_RX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_FAILED) |
- BIT(NL80211_STA_INFO_TX_BITRATE);
+ BIT(NL80211_STA_INFO_RX_PACKETS) |
+ BIT(NL80211_STA_INFO_TX_PACKETS) |
+ BIT(NL80211_STA_INFO_TX_FAILED) |
+ BIT(NL80211_STA_INFO_TX_BITRATE);
sinfo->signal = stats.rssi;
sinfo->rx_packets = stats.rx_cnt;
@@ -1236,20 +1191,20 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
if (changed & WIPHY_PARAM_RETRY_SHORT) {
cfg_param_val.flag |= RETRY_SHORT;
- cfg_param_val.short_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_short;
+ cfg_param_val.short_retry_limit = wiphy->retry_short;
}
if (changed & WIPHY_PARAM_RETRY_LONG) {
cfg_param_val.flag |= RETRY_LONG;
- cfg_param_val.long_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_long;
+ cfg_param_val.long_retry_limit = wiphy->retry_long;
}
if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
cfg_param_val.flag |= FRAG_THRESHOLD;
- cfg_param_val.frag_threshold = priv->dev->ieee80211_ptr->wiphy->frag_threshold;
+ cfg_param_val.frag_threshold = wiphy->frag_threshold;
}
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
cfg_param_val.flag |= RTS_THRESHOLD;
- cfg_param_val.rts_threshold = priv->dev->ieee80211_ptr->wiphy->rts_threshold;
+ cfg_param_val.rts_threshold = wiphy->rts_threshold;
}
ret = wilc_hif_set_cfg(vif, &cfg_param_val);
@@ -1306,7 +1261,8 @@ static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
for (i = 0; i < priv->pmkid_list.numpmkid; i++) {
if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
ETH_ALEN)) {
- memset(&priv->pmkid_list.pmkidlist[i], 0, sizeof(struct host_if_pmkid));
+ memset(&priv->pmkid_list.pmkidlist[i], 0,
+ sizeof(struct host_if_pmkid));
break;
}
}
@@ -1409,13 +1365,50 @@ static void wilc_wfi_cfg_parse_tx_action(u8 *buf, u32 len, bool oper_ch,
op_channel_attr_index);
}
-void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
+static void wilc_wfi_cfg_parse_rx_vendor_spec(struct wilc_priv *priv, u8 *buff,
+ u32 size)
+{
+ int i;
+ u8 subtype;
+ struct wilc_vif *vif = netdev_priv(priv->dev);
+
+ subtype = buff[P2P_PUB_ACTION_SUBTYPE];
+ if ((subtype == GO_NEG_REQ || subtype == GO_NEG_RSP) && !wilc_ie) {
+ for (i = P2P_PUB_ACTION_SUBTYPE; i < size; i++) {
+ if (!memcmp(p2p_vendor_spec, &buff[i], 6)) {
+ p2p_recv_random = buff[i + 6];
+ wilc_ie = true;
+ break;
+ }
+ }
+ }
+
+ if (p2p_local_random <= p2p_recv_random) {
+ netdev_dbg(vif->ndev,
+ "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n",
+ p2p_local_random, p2p_recv_random);
+ return;
+ }
+
+ if (subtype == GO_NEG_REQ || subtype == GO_NEG_RSP ||
+ subtype == P2P_INV_REQ || subtype == P2P_INV_RSP) {
+ for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < size; i++) {
+ if (buff[i] == P2PELEM_ATTR_ID &&
+ !(memcmp(p2p_oui, &buff[i + 2], 4))) {
+ wilc_wfi_cfg_parse_rx_action(&buff[i + 6],
+ size - (i + 6));
+ break;
+ }
+ }
+ }
+}
+
+void wilc_wfi_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
{
struct wilc_priv *priv;
u32 header, pkt_offset;
struct host_if_drv *wfi_drv;
- u32 i = 0;
- s32 s32Freq;
+ s32 freq;
priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
wfi_drv = (struct host_if_drv *)priv->hif_drv;
@@ -1425,75 +1418,57 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
pkt_offset = GET_PKT_OFFSET(header);
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
- if (buff[FRAME_TYPE_ID] == IEEE80211_STYPE_PROBE_RESP) {
- cfg80211_mgmt_tx_status(priv->wdev, priv->tx_cookie, buff, size, true, GFP_KERNEL);
- return;
- } else {
- if (pkt_offset & IS_MGMT_STATUS_SUCCES)
- cfg80211_mgmt_tx_status(priv->wdev, priv->tx_cookie, buff, size, true, GFP_KERNEL);
- else
- cfg80211_mgmt_tx_status(priv->wdev, priv->tx_cookie, buff, size, false, GFP_KERNEL);
- return;
- }
- } else {
- s32Freq = ieee80211_channel_to_frequency(curr_channel, NL80211_BAND_2GHZ);
+ bool ack = false;
- if (ieee80211_is_action(buff[FRAME_TYPE_ID])) {
- if (priv->cfg_scanning && time_after_eq(jiffies, (unsigned long)wfi_drv->p2p_timeout)) {
- netdev_dbg(dev, "Receiving action wrong ch\n");
- return;
- }
- if (buff[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) {
- switch (buff[ACTION_SUBTYPE_ID]) {
- case GAS_INITIAL_REQ:
- break;
+ if (buff[FRAME_TYPE_ID] == IEEE80211_STYPE_PROBE_RESP ||
+ pkt_offset & IS_MGMT_STATUS_SUCCES)
+ ack = true;
- case GAS_INITIAL_RSP:
- break;
+ cfg80211_mgmt_tx_status(priv->wdev, priv->tx_cookie, buff, size,
+ ack, GFP_KERNEL);
+ return;
+ }
- case PUBLIC_ACT_VENDORSPEC:
- if (!memcmp(p2p_oui, &buff[ACTION_SUBTYPE_ID + 1], 4)) {
- if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP)) {
- if (!wilc_ie) {
- for (i = P2P_PUB_ACTION_SUBTYPE; i < size; i++) {
- if (!memcmp(p2p_vendor_spec, &buff[i], 6)) {
- p2p_recv_random = buff[i + 6];
- wilc_ie = true;
- break;
- }
- }
- }
- }
- if (p2p_local_random > p2p_recv_random) {
- if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP ||
- buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) {
- for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < size; i++) {
- if (buff[i] == P2PELEM_ATTR_ID && !(memcmp(p2p_oui, &buff[i + 2], 4))) {
- wilc_wfi_cfg_parse_rx_action(&buff[i + 6], size - (i + 6));
- break;
- }
- }
- }
- } else {
- netdev_dbg(dev, "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random);
- }
- }
-
- if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP) && (wilc_ie)) {
- cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size - 7, 0);
- return;
- }
- break;
+ freq = ieee80211_channel_to_frequency(curr_channel, NL80211_BAND_2GHZ);
- default:
- netdev_dbg(dev, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buff[ACTION_SUBTYPE_ID]);
- break;
- }
- }
- }
+ if (!ieee80211_is_action(buff[FRAME_TYPE_ID])) {
+ cfg80211_rx_mgmt(priv->wdev, freq, 0, buff, size, 0);
+ return;
+ }
- cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size, 0);
+ if (priv->cfg_scanning &&
+ time_after_eq(jiffies, (unsigned long)wfi_drv->p2p_timeout)) {
+ netdev_dbg(dev, "Receiving action wrong ch\n");
+ return;
}
+ if (buff[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) {
+ u8 subtype = buff[P2P_PUB_ACTION_SUBTYPE];
+
+ switch (buff[ACTION_SUBTYPE_ID]) {
+ case GAS_INITIAL_REQ:
+ case GAS_INITIAL_RSP:
+ break;
+
+ case PUBLIC_ACT_VENDORSPEC:
+ if (!memcmp(p2p_oui, &buff[ACTION_SUBTYPE_ID + 1], 4))
+ wilc_wfi_cfg_parse_rx_vendor_spec(priv, buff,
+ size);
+
+ if ((subtype == GO_NEG_REQ || subtype == GO_NEG_RSP) &&
+ wilc_ie)
+ size -= 7;
+
+ break;
+
+ default:
+ netdev_dbg(dev,
+ "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n",
+ buff[ACTION_SUBTYPE_ID]);
+ break;
+ }
+ }
+
+ cfg80211_rx_mgmt(priv->wdev, freq, 0, buff, size, 0);
}
static void wilc_wfi_mgmt_tx_complete(void *priv, int status)
@@ -1521,18 +1496,16 @@ static void wilc_wfi_remain_on_channel_ready(void *priv_data)
static void wilc_wfi_remain_on_channel_expired(void *data, u32 session_id)
{
- struct wilc_priv *priv;
+ struct wilc_priv *priv = data;
+ struct wilc_wfi_p2p_listen_params *params = &priv->remain_on_ch_params;
- priv = data;
+ if (session_id != params->listen_session_id)
+ return;
- if (session_id == priv->remain_on_ch_params.listen_session_id) {
- priv->p2p_listen_state = false;
+ priv->p2p_listen_state = false;
- cfg80211_remain_on_channel_expired(priv->wdev,
- priv->remain_on_ch_params.listen_cookie,
- priv->remain_on_ch_params.listen_ch,
- GFP_KERNEL);
- }
+ cfg80211_remain_on_channel_expired(priv->wdev, params->listen_cookie,
+ params->listen_ch, GFP_KERNEL);
}
static int remain_on_channel(struct wiphy *wiphy,
@@ -1580,6 +1553,55 @@ static int cancel_remain_on_channel(struct wiphy *wiphy,
priv->remain_on_ch_params.listen_session_id);
}
+static void wilc_wfi_cfg_tx_vendor_spec(struct p2p_mgmt_data *mgmt_tx,
+ struct cfg80211_mgmt_tx_params *params,
+ u8 iftype, u32 buf_len)
+{
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ u32 i;
+ u8 subtype = buf[P2P_PUB_ACTION_SUBTYPE];
+
+ if (subtype == GO_NEG_REQ || subtype == GO_NEG_RSP) {
+ if (p2p_local_random == 1 &&
+ p2p_recv_random < p2p_local_random) {
+ get_random_bytes(&p2p_local_random, 1);
+ p2p_local_random++;
+ }
+ }
+
+ if (p2p_local_random <= p2p_recv_random || !(subtype == GO_NEG_REQ ||
+ subtype == GO_NEG_RSP ||
+ subtype == P2P_INV_REQ ||
+ subtype == P2P_INV_RSP))
+ return;
+
+ for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < len; i++) {
+ if (buf[i] == P2PELEM_ATTR_ID &&
+ !memcmp(p2p_oui, &buf[i + 2], 4)) {
+ bool oper_ch = false;
+ u8 *tx_buff = &mgmt_tx->buff[i + 6];
+
+ if (subtype == P2P_INV_REQ || subtype == P2P_INV_RSP)
+ oper_ch = true;
+
+ wilc_wfi_cfg_parse_tx_action(tx_buff, len - (i + 6),
+ oper_ch, iftype);
+
+ break;
+ }
+ }
+
+ if (subtype != P2P_INV_REQ && subtype != P2P_INV_RSP) {
+ int vendor_spec_len = sizeof(p2p_vendor_spec);
+
+ memcpy(&mgmt_tx->buff[len], p2p_vendor_spec,
+ vendor_spec_len);
+ mgmt_tx->buff[len + vendor_spec_len] = p2p_local_random;
+ mgmt_tx->size = buf_len;
+ }
+}
+
static int mgmt_tx(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params,
@@ -1593,9 +1615,9 @@ static int mgmt_tx(struct wiphy *wiphy,
struct p2p_mgmt_data *mgmt_tx;
struct wilc_priv *priv;
struct host_if_drv *wfi_drv;
- u32 i;
struct wilc_vif *vif;
u32 buf_len = len + sizeof(p2p_vendor_spec) + sizeof(p2p_local_random);
+ int ret = 0;
vif = netdev_priv(wdev->netdev);
priv = wiphy_priv(wiphy);
@@ -1605,92 +1627,75 @@ static int mgmt_tx(struct wiphy *wiphy,
priv->tx_cookie = *cookie;
mgmt = (const struct ieee80211_mgmt *)buf;
- if (ieee80211_is_mgmt(mgmt->frame_control)) {
- mgmt_tx = kmalloc(sizeof(struct p2p_mgmt_data), GFP_KERNEL);
- if (!mgmt_tx)
- return -EFAULT;
+ if (!ieee80211_is_mgmt(mgmt->frame_control))
+ goto out;
- mgmt_tx->buff = kmalloc(buf_len, GFP_KERNEL);
- if (!mgmt_tx->buff) {
- kfree(mgmt_tx);
- return -ENOMEM;
- }
+ mgmt_tx = kmalloc(sizeof(*mgmt_tx), GFP_KERNEL);
+ if (!mgmt_tx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mgmt_tx->buff = kmalloc(buf_len, GFP_KERNEL);
+ if (!mgmt_tx->buff) {
+ ret = -ENOMEM;
+ kfree(mgmt_tx);
+ goto out;
+ }
+
+ memcpy(mgmt_tx->buff, buf, len);
+ mgmt_tx->size = len;
+
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ wilc_set_mac_chnl_num(vif, chan->hw_value);
+ curr_channel = chan->hw_value;
+ goto out_txq_add_pkt;
+ }
- memcpy(mgmt_tx->buff, buf, len);
- mgmt_tx->size = len;
+ if (!ieee80211_is_action(mgmt->frame_control))
+ goto out_txq_add_pkt;
- if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ if (buf[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) {
+ if (buf[ACTION_SUBTYPE_ID] != PUBLIC_ACT_VENDORSPEC ||
+ buf[P2P_PUB_ACTION_SUBTYPE] != GO_NEG_CONF) {
wilc_set_mac_chnl_num(vif, chan->hw_value);
curr_channel = chan->hw_value;
- } else if (ieee80211_is_action(mgmt->frame_control)) {
- if (buf[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) {
- if (buf[ACTION_SUBTYPE_ID] != PUBLIC_ACT_VENDORSPEC ||
- buf[P2P_PUB_ACTION_SUBTYPE] != GO_NEG_CONF) {
- wilc_set_mac_chnl_num(vif,
- chan->hw_value);
- curr_channel = chan->hw_value;
- }
- switch (buf[ACTION_SUBTYPE_ID]) {
- case GAS_INITIAL_REQ:
- break;
+ }
+ switch (buf[ACTION_SUBTYPE_ID]) {
+ case GAS_INITIAL_REQ:
+ case GAS_INITIAL_RSP:
+ break;
- case GAS_INITIAL_RSP:
- break;
+ case PUBLIC_ACT_VENDORSPEC:
+ if (!memcmp(p2p_oui, &buf[ACTION_SUBTYPE_ID + 1], 4))
+ wilc_wfi_cfg_tx_vendor_spec(mgmt_tx, params,
+ vif->iftype,
+ buf_len);
+ else
+ netdev_dbg(vif->ndev,
+ "Not a P2P public action frame\n");
- case PUBLIC_ACT_VENDORSPEC:
- {
- if (!memcmp(p2p_oui, &buf[ACTION_SUBTYPE_ID + 1], 4)) {
- if ((buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP)) {
- if (p2p_local_random == 1 && p2p_recv_random < p2p_local_random) {
- get_random_bytes(&p2p_local_random, 1);
- p2p_local_random++;
- }
- }
-
- if ((buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP ||
- buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) {
- if (p2p_local_random > p2p_recv_random) {
- for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < len; i++) {
- if (buf[i] == P2PELEM_ATTR_ID && !(memcmp(p2p_oui, &buf[i + 2], 4))) {
- if (buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)
- wilc_wfi_cfg_parse_tx_action(&mgmt_tx->buff[i + 6], len - (i + 6), true, vif->iftype);
- else
- wilc_wfi_cfg_parse_tx_action(&mgmt_tx->buff[i + 6], len - (i + 6), false, vif->iftype);
- break;
- }
- }
-
- if (buf[P2P_PUB_ACTION_SUBTYPE] != P2P_INV_REQ && buf[P2P_PUB_ACTION_SUBTYPE] != P2P_INV_RSP) {
- memcpy(&mgmt_tx->buff[len], p2p_vendor_spec, sizeof(p2p_vendor_spec));
- mgmt_tx->buff[len + sizeof(p2p_vendor_spec)] = p2p_local_random;
- mgmt_tx->size = buf_len;
- }
- }
- }
-
- } else {
- netdev_dbg(vif->ndev, "Not a P2P public action frame\n");
- }
+ break;
- break;
- }
+ default:
+ netdev_dbg(vif->ndev,
+ "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n",
+ buf[ACTION_SUBTYPE_ID]);
+ break;
+ }
+ }
- default:
- {
- netdev_dbg(vif->ndev, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buf[ACTION_SUBTYPE_ID]);
- break;
- }
- }
- }
+ wfi_drv->p2p_timeout = (jiffies + msecs_to_jiffies(wait));
- wfi_drv->p2p_timeout = (jiffies + msecs_to_jiffies(wait));
- }
+out_txq_add_pkt:
- wilc_wlan_txq_add_mgmt_pkt(wdev->netdev, mgmt_tx,
- mgmt_tx->buff, mgmt_tx->size,
- wilc_wfi_mgmt_tx_complete);
- }
- return 0;
+ wilc_wlan_txq_add_mgmt_pkt(wdev->netdev, mgmt_tx,
+ mgmt_tx->buff, mgmt_tx->size,
+ wilc_wfi_mgmt_tx_complete);
+
+out:
+
+ return ret;
}
static int mgmt_tx_cancel_wait(struct wiphy *wiphy,
@@ -1734,24 +1739,18 @@ void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
switch (frame_type) {
case PROBE_REQ:
- {
vif->frame_reg[0].type = frame_type;
vif->frame_reg[0].reg = reg;
- }
- break;
+ break;
case ACTION:
- {
vif->frame_reg[1].type = frame_type;
vif->frame_reg[1].reg = reg;
- }
- break;
+ break;
default:
- {
break;
}
- }
if (!wl->initialized)
return;
@@ -1866,7 +1865,7 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
case NL80211_IFTYPE_P2P_GO:
wilc_optaining_ip = true;
mod_timer(&wilc_during_ip_timer,
- jiffies + msecs_to_jiffies(during_ip_time));
+ jiffies + msecs_to_jiffies(DURING_IP_TIME_OUT));
wilc_set_operation_mode(vif, AP_MODE);
dev->ieee80211_ptr->iftype = type;
priv->wdev->iftype = type;
@@ -1888,12 +1887,10 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *settings)
{
struct cfg80211_beacon_data *beacon = &settings->beacon;
- struct wilc_priv *priv;
s32 ret = 0;
struct wilc *wl;
struct wilc_vif *vif;
- priv = wiphy_priv(wiphy);
vif = netdev_priv(dev);
wl = vif->wilc;
@@ -1964,7 +1961,8 @@ static int add_station(struct wiphy *wiphy, struct net_device *dev,
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
memcpy(sta_params.bssid, mac, ETH_ALEN);
- memcpy(priv->assoc_stainfo.sta_associated_bss[params->aid], mac, ETH_ALEN);
+ memcpy(priv->assoc_stainfo.sta_associated_bss[params->aid], mac,
+ ETH_ALEN);
sta_params.aid = params->aid;
sta_params.rates_len = params->supported_rates_len;
sta_params.rates = params->supported_rates;
@@ -1994,6 +1992,7 @@ static int del_station(struct wiphy *wiphy, struct net_device *dev,
s32 ret = 0;
struct wilc_priv *priv;
struct wilc_vif *vif;
+ struct sta_info *info;
if (!wiphy)
return -EFAULT;
@@ -2001,16 +2000,17 @@ static int del_station(struct wiphy *wiphy, struct net_device *dev,
priv = wiphy_priv(wiphy);
vif = netdev_priv(dev);
- if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
- if (!mac)
- ret = wilc_del_allstation(vif,
- priv->assoc_stainfo.sta_associated_bss);
+ if (!(vif->iftype == AP_MODE || vif->iftype == GO_MODE))
+ return ret;
- ret = wilc_del_station(vif, mac);
+ info = &priv->assoc_stainfo;
- if (ret)
- netdev_err(dev, "Host delete station fail\n");
- }
+ if (!mac)
+ ret = wilc_del_allstation(vif, info->sta_associated_bss);
+
+ ret = wilc_del_station(vif, mac);
+ if (ret)
+ netdev_err(dev, "Host delete station fail\n");
return ret;
}
@@ -2018,14 +2018,12 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_parameters *params)
{
s32 ret = 0;
- struct wilc_priv *priv;
struct add_sta_param sta_params = { {0} };
struct wilc_vif *vif;
if (!wiphy)
return -EFAULT;
- priv = wiphy_priv(wiphy);
vif = netdev_priv(dev);
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
@@ -2065,7 +2063,7 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
vif = netdev_priv(priv->wdev->netdev);
if (type == NL80211_IFTYPE_MONITOR) {
- new_ifc = WILC_WFI_init_mon_interface(name, vif->ndev);
+ new_ifc = wilc_wfi_init_mon_interface(name, vif->ndev);
if (new_ifc) {
vif = netdev_priv(priv->wdev->netdev);
vif->monitor_flag = 1;
@@ -2196,7 +2194,7 @@ static struct wireless_dev *wilc_wfi_cfg_alloc(void)
{
struct wireless_dev *wdev;
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
if (!wdev)
goto _fail_;
@@ -2204,13 +2202,13 @@ static struct wireless_dev *wilc_wfi_cfg_alloc(void)
if (!wdev->wiphy)
goto _fail_mem_;
- WILC_WFI_band_2ghz.ht_cap.ht_supported = 1;
- WILC_WFI_band_2ghz.ht_cap.cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
- WILC_WFI_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
- WILC_WFI_band_2ghz.ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K;
- WILC_WFI_band_2ghz.ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ wilc_band_2ghz.ht_cap.ht_supported = 1;
+ wilc_band_2ghz.ht_cap.cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ wilc_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ wilc_band_2ghz.ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K;
+ wilc_band_2ghz.ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
- wdev->wiphy->bands[NL80211_BAND_2GHZ] = &WILC_WFI_band_2ghz;
+ wdev->wiphy->bands[NL80211_BAND_2GHZ] = &wilc_band_2ghz;
return wdev;
@@ -2220,7 +2218,8 @@ _fail_:
return NULL;
}
-struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *dev)
+struct wireless_dev *wilc_create_wiphy(struct net_device *net,
+ struct device *dev)
{
struct wilc_priv *priv;
struct wireless_dev *wdev;
@@ -2272,8 +2271,8 @@ int wilc_init_host_int(struct net_device *net)
priv = wdev_priv(net->ieee80211_ptr);
if (op_ifcs == 0) {
- timer_setup(&hAgingTimer, remove_network_from_shadow, 0);
- timer_setup(&wilc_during_ip_timer, clear_duringIP, 0);
+ timer_setup(&aging_timer, remove_network_from_shadow, 0);
+ timer_setup(&wilc_during_ip_timer, clear_during_ip, 0);
}
op_ifcs++;
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
index dfb7ec272935..a69103b44958 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
@@ -1,23 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*!
- * @file wilc_wfi_cfgoperations.h
- * @brief Definitions for the network module
- * @author syounan
- * @sa wilc_oswrapper.h top level OS wrapper file
- * @date 31 Aug 2010
- * @version 1.0
- */
#ifndef NM_WFI_CFGOPERATIONS
#define NM_WFI_CFGOPERATIONS
#include "wilc_wfi_netdevice.h"
-struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *dev);
+struct wireless_dev *wilc_create_wiphy(struct net_device *net,
+ struct device *dev);
void wilc_free_wiphy(struct net_device *net);
int wilc_deinit_host_int(struct net_device *net);
int wilc_init_host_int(struct net_device *net);
-void WILC_WFI_monitor_rx(u8 *buff, u32 size);
-int WILC_WFI_deinit_mon_interface(void);
-struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_device *real_dev);
+void wilc_wfi_monitor_rx(u8 *buff, u32 size);
+int wilc_wfi_deinit_mon_interface(void);
+struct net_device *wilc_wfi_init_mon_interface(const char *name,
+ struct net_device *real_dev);
void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
u16 frame_type, bool reg);
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index d62c4f1cddc6..f2b07e8aedd7 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -1,60 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*!
- * @file wilc_wfi_netdevice.h
- * @brief Definitions for the network module
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
#ifndef WILC_WFI_NETDEVICE
#define WILC_WFI_NETDEVICE
-#define WILC_WFI_RX_INTR 0x0001
-#define WILC_WFI_TX_INTR 0x0002
-
-#define WILC_WFI_TIMEOUT 5
-#define WILC_MAX_NUM_PMKIDS 16
-#define PMKID_LEN 16
-#define PMKID_FOUND 1
- #define NUM_STA_ASSOCIATED 8
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
-#include <linux/in.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
#include <linux/tcp.h>
-#include <linux/skbuff.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/ieee80211_radiotap.h>
#include <linux/if_arp.h>
-#include <linux/in6.h>
-#include <asm/checksum.h>
+
#include "host_interface.h"
#include "wilc_wlan.h"
-#include <linux/wireless.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-#define FLOW_CONTROL_LOWER_THRESHOLD 128
-#define FLOW_CONTROL_UPPER_THRESHOLD 256
+#define FLOW_CONTROL_LOWER_THRESHOLD 128
+#define FLOW_CONTROL_UPPER_THRESHOLD 256
-enum stats_flags {
- WILC_WFI_RX_PKT = BIT(0),
- WILC_WFI_TX_PKT = BIT(1),
-};
+#define WILC_MAX_NUM_PMKIDS 16
+#define PMKID_LEN 16
+#define PMKID_FOUND 1
+#define NUM_STA_ASSOCIATED 8
+
+#define NUM_REG_FRAME 2
-struct WILC_WFI_stats {
+struct wilc_wfi_stats {
unsigned long rx_packets;
unsigned long tx_packets;
unsigned long rx_bytes;
@@ -69,8 +36,6 @@ struct WILC_WFI_stats {
* packets in and out, so there is place for a packet
*/
-#define num_reg_frame 2
-
struct wilc_wfi_key {
u8 *key;
u8 *seq;
@@ -112,20 +77,13 @@ struct wilc_priv {
struct net_device_stats stats;
u8 monitor_flag;
int status;
- struct WILC_WFI_packet *ppool;
- struct WILC_WFI_packet *rx_queue; /* List of incoming packets */
- int rx_int_enabled;
- int tx_packetlen;
- u8 *tx_packetdata;
struct sk_buff *skb;
- spinlock_t lock;
struct net_device *dev;
- struct napi_struct napi;
struct host_if_drv *hif_drv;
struct host_if_pmkid_attr pmkid_list;
- struct WILC_WFI_stats netstats;
- u8 WILC_WFI_wep_key[4][WLAN_KEY_LEN_WEP104];
- u8 WILC_WFI_wep_key_len[4];
+ struct wilc_wfi_stats netstats;
+ u8 wep_key[4][WLAN_KEY_LEN_WEP104];
+ u8 wep_key_len[4];
/* The real interface that the monitor is on */
struct net_device *real_ndev;
struct wilc_wfi_key *wilc_gtk[MAX_NUM_STA];
@@ -150,7 +108,7 @@ struct wilc_vif {
u8 iftype;
int monitor_flag;
int mac_opened;
- struct frame_reg frame_reg[num_reg_frame];
+ struct frame_reg frame_reg[NUM_REG_FRAME];
struct net_device_stats netstats;
struct wilc *wilc;
u8 src_addr[ETH_ALEN];
@@ -172,10 +130,11 @@ struct wilc {
u8 vif_num;
struct wilc_vif *vif[NUM_CONCURRENT_IFC];
u8 open_ifcs;
-
+ /*protect head of transmit queue*/
struct mutex txq_add_to_head_cs;
+ /*protect txq_entry_t transmit queue*/
spinlock_t txq_spinlock;
-
+ /*protect rxq_entry_t receiver queue*/
struct mutex rxq_cs;
struct mutex hif_cs;
@@ -218,20 +177,16 @@ struct wilc {
struct rf_info dummy_statistics;
};
-struct WILC_WFI_mon_priv {
+struct wilc_wfi_mon_priv {
struct net_device *real_ndev;
};
-int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif);
-
void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
-void wilc_mac_indicate(struct wilc *wilc, int flag);
+void wilc_mac_indicate(struct wilc *wilc);
void wilc_netdev_cleanup(struct wilc *wilc);
int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
int gpio, const struct wilc_hif_func *ops);
-void wilc1000_wlan_deinit(struct net_device *dev);
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size);
-int wilc_wlan_get_firmware(struct net_device *dev);
int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode);
#endif
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index bcbb92323a0a..d4ebbf67e50b 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -1,7 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/completion.h>
-#include "wilc_wlan_if.h"
-#include "wilc_wlan.h"
#include "wilc_wfi_netdevice.h"
#include "wilc_wlan_cfg.h"
@@ -819,12 +816,7 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size)
if (wilc->cfg_seq_no == rsp.seq_no)
complete(&wilc->cfg_event);
} else if (rsp.type == WILC_CFG_RSP_STATUS) {
- wilc_mac_indicate(wilc,
- WILC_MAC_INDICATE_STATUS);
-
- } else if (rsp.type == WILC_CFG_RSP_SCAN) {
- wilc_mac_indicate(wilc,
- WILC_MAC_INDICATE_SCAN);
+ wilc_mac_indicate(wilc);
}
}
}
@@ -996,11 +988,11 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
if (!ret) {
ret = -EIO;
- goto _fail_;
+ goto fail;
}
} while (offset < buffer_size);
-_fail_:
+fail:
kfree(dma_buffer);
@@ -1421,12 +1413,12 @@ int wilc_wlan_init(struct net_device *dev)
if (!wilc->hif_func->hif_init(wilc, false)) {
ret = -EIO;
- goto _fail_;
+ goto fail;
}
if (!wilc_wlan_cfg_init()) {
ret = -ENOBUFS;
- goto _fail_;
+ goto fail;
}
if (!wilc->tx_buffer)
@@ -1434,7 +1426,7 @@ int wilc_wlan_init(struct net_device *dev)
if (!wilc->tx_buffer) {
ret = -ENOBUFS;
- goto _fail_;
+ goto fail;
}
if (!wilc->rx_buffer)
@@ -1442,17 +1434,17 @@ int wilc_wlan_init(struct net_device *dev)
if (!wilc->rx_buffer) {
ret = -ENOBUFS;
- goto _fail_;
+ goto fail;
}
if (!init_chip(dev)) {
ret = -EIO;
- goto _fail_;
+ goto fail;
}
return 1;
-_fail_:
+fail:
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index fa157a67b045..a5b9c68e1b9c 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -202,13 +202,6 @@
/********************************************
*
- * Debug Type
- *
- ********************************************/
-typedef void (*wilc_debug_func)(u32, char *, ...);
-
-/********************************************
- *
* Tx/Rx Queue Structure
*
********************************************/
@@ -222,7 +215,7 @@ struct txq_entry_t {
int buffer_size;
void *priv;
int status;
- void (*tx_complete_func)(void *, int);
+ void (*tx_complete_func)(void *priv, int status);
};
struct rxq_entry_t {
@@ -238,18 +231,18 @@ struct rxq_entry_t {
********************************************/
struct wilc;
struct wilc_hif_func {
- int (*hif_init)(struct wilc *, bool resume);
- int (*hif_deinit)(struct wilc *);
- int (*hif_read_reg)(struct wilc *, u32, u32 *);
- int (*hif_write_reg)(struct wilc *, u32, u32);
- int (*hif_block_rx)(struct wilc *, u32, u8 *, u32);
- int (*hif_block_tx)(struct wilc *, u32, u8 *, u32);
- int (*hif_read_int)(struct wilc *, u32 *);
- int (*hif_clear_int_ext)(struct wilc *, u32);
- int (*hif_read_size)(struct wilc *, u32 *);
- int (*hif_block_tx_ext)(struct wilc *, u32, u8 *, u32);
- int (*hif_block_rx_ext)(struct wilc *, u32, u8 *, u32);
- int (*hif_sync_ext)(struct wilc *, int);
+ int (*hif_init)(struct wilc *wilc, bool resume);
+ int (*hif_deinit)(struct wilc *wilc);
+ int (*hif_read_reg)(struct wilc *wilc, u32 addr, u32 *data);
+ int (*hif_write_reg)(struct wilc *wilc, u32 addr, u32 data);
+ int (*hif_block_rx)(struct wilc *wilc, u32 addr, u8 *buf, u32 size);
+ int (*hif_block_tx)(struct wilc *wilc, u32 addr, u8 *buf, u32 size);
+ int (*hif_read_int)(struct wilc *wilc, u32 *int_status);
+ int (*hif_clear_int_ext)(struct wilc *wilc, u32 val);
+ int (*hif_read_size)(struct wilc *wilc, u32 *size);
+ int (*hif_block_tx_ext)(struct wilc *wilc, u32 addr, u8 *buf, u32 size);
+ int (*hif_block_rx_ext)(struct wilc *wilc, u32 addr, u8 *buf, u32 size);
+ int (*hif_sync_ext)(struct wilc *wilc, int nint);
int (*enable_interrupt)(struct wilc *nic);
void (*disable_interrupt)(struct wilc *nic);
};
@@ -298,9 +291,9 @@ void wilc_chip_sleep_manually(struct wilc *wilc);
void wilc_enable_tcp_ack_filter(bool value);
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc);
-int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
-void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size);
+void wilc_wfi_p2p_rx(struct net_device *dev, u8 *buff, u32 size);
void host_wakeup_notify(struct wilc *wilc);
void host_sleep_notify(struct wilc *wilc);
extern bool wilc_enable_ps;
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index 2b44f4cc56b7..c0b9b700f4d7 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -8,7 +8,6 @@
/* */
/* ///////////////////////////////////////////////////////////////////////// */
-#include <linux/string.h>
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
#include "wilc_wlan_cfg.h"
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index e186509ad334..e4a7bf5df65b 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -21,7 +21,6 @@
#define HIF_SDIO (0)
#define HIF_SPI BIT(0)
-#define HIF_SDIO_GPIO_IRQ BIT(2)
/********************************************
*
@@ -48,12 +47,9 @@ struct sdio_cmd53 {
u32 block_size;
};
-#define WILC_MAC_INDICATE_STATUS 0x1
-#define WILC_MAC_STATUS_INIT -1
-#define WILC_MAC_STATUS_READY 0
-#define WILC_MAC_STATUS_CONNECT 1
-
-#define WILC_MAC_INDICATE_SCAN 0x2
+#define MAC_STATUS_INIT -1
+#define MAC_STATUS_CONNECTED 1
+#define MAC_STATUS_DISCONNECTED 0
struct tx_complete_data {
int size;
@@ -120,10 +116,6 @@ enum {
G_AUTO_PREAMBLE = 2, /* Auto Preamble Selection */
};
-#define MAC_CONNECTED 1
-#define MAC_DISCONNECTED 0
-
-#define SCAN_DONE TRUE
enum {
PASSIVE_SCAN = 0,
ACTIVE_SCAN = 1,
@@ -741,7 +733,7 @@ enum {
WID_DEL_BEACON = 0x00CA,
- WID_LOGTerminal_Switch = 0x00CD,
+ WID_LOG_TERMINAL_SWITCH = 0x00CD,
WID_TX_POWER = 0x00CE,
/* EMAC Short WID list */
/* RTS Threshold */
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 85c3af00abd2..67a944c0d690 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/* hfa384x.h
*
* Defines the constants and data structures for the hfa384x
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 555711bc12f0..33e97ffbb436 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -202,7 +202,7 @@ static void unlocked_usbctlx_complete(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx);
struct usbctlx_completor {
- int (*complete)(struct usbctlx_completor *);
+ int (*complete)(struct usbctlx_completor *completor);
};
static int
@@ -3417,7 +3417,7 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
/* Attach the rxmeta, set some stuff */
p80211skb_rxmeta_attach(wlandev, skb);
- rxmeta = P80211SKB_RXMETA(skb);
+ rxmeta = p80211skb_rxmeta(skb);
rxmeta->mactime = usbin->rxfrm.desc.time;
rxmeta->rxrate = usbin->rxfrm.desc.rate;
rxmeta->signal = usbin->rxfrm.desc.signal - hw->dbmadjust;
@@ -3439,8 +3439,7 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
default:
netdev_warn(hw->wlandev->netdev, "Received frame on unsupported port=%d\n",
- HFA384x_RXSTATUS_MACPORT_GET(
- usbin->rxfrm.desc.status));
+ HFA384x_RXSTATUS_MACPORT_GET(usbin->rxfrm.desc.status));
break;
}
}
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 855b424f6423..91debcf20646 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -497,7 +497,7 @@ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
/* jkriegl: only process signal/noise if requested by iwspy */
if (wlandev->spy_number)
orinoco_spy_gather(wlandev, eth_hdr(skb)->h_source,
- P80211SKB_RXMETA(skb));
+ p80211skb_rxmeta(skb));
/* Free the metadata */
p80211skb_rxmeta_detach(skb);
@@ -563,7 +563,7 @@ void p80211skb_rxmeta_detach(struct sk_buff *skb)
pr_debug("Called w/ null skb.\n");
return;
}
- frmmeta = P80211SKB_FRMMETA(skb);
+ frmmeta = p80211skb_frmmeta(skb);
if (!frmmeta) { /* no magic */
pr_debug("Called w/ bad frmmeta magic.\n");
return;
@@ -605,7 +605,7 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
struct p80211_frmmeta *frmmeta;
/* If these already have metadata, we error out! */
- if (P80211SKB_RXMETA(skb)) {
+ if (p80211skb_rxmeta(skb)) {
netdev_err(wlandev->netdev,
"%s: RXmeta already attached!\n", wlandev->name);
result = 0;
@@ -654,7 +654,7 @@ void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb)
{
struct p80211_frmmeta *meta;
- meta = P80211SKB_FRMMETA(skb);
+ meta = p80211skb_frmmeta(skb);
if (meta && meta->rx)
p80211skb_rxmeta_detach(skb);
else
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index 28459dcea4b1..827002ca9930 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/* p80211conv.h
*
* Ether/802.11 conversions and packet buffer routines
@@ -63,16 +63,6 @@
#define P80211_FRMMETA_MAGIC 0x802110
-#define P80211SKB_FRMMETA(s) \
- (((((struct p80211_frmmeta *)((s)->cb))->magic) == \
- P80211_FRMMETA_MAGIC) ? \
- ((struct p80211_frmmeta *)((s)->cb)) : \
- (NULL))
-
-#define P80211SKB_RXMETA(s) \
- (P80211SKB_FRMMETA((s)) ? P80211SKB_FRMMETA((s))->rx : \
- ((struct p80211_rxmeta *)(NULL)))
-
struct p80211_rxmeta {
struct wlandevice *wlandev;
@@ -98,6 +88,20 @@ void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb);
int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb);
void p80211skb_rxmeta_detach(struct sk_buff *skb);
+static inline struct p80211_frmmeta *p80211skb_frmmeta(struct sk_buff *skb)
+{
+ struct p80211_frmmeta *frmmeta = (struct p80211_frmmeta *)skb->cb;
+
+ return frmmeta->magic == P80211_FRMMETA_MAGIC ? frmmeta : NULL;
+}
+
+static inline struct p80211_rxmeta *p80211skb_rxmeta(struct sk_buff *skb)
+{
+ struct p80211_frmmeta *frmmeta = p80211skb_frmmeta(skb);
+
+ return frmmeta ? frmmeta->rx : NULL;
+}
+
/*
* Frame capture header. (See doc/capturefrm.txt)
*/
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 133d70c08ecf..26b178721414 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/* p80211hdr.h
*
* Macros, types, and functions for handling 802.11 MAC headers
diff --git a/drivers/staging/wlan-ng/p80211ioctl.h b/drivers/staging/wlan-ng/p80211ioctl.h
index d8cde1d8870b..ed65ac57adbe 100644
--- a/drivers/staging/wlan-ng/p80211ioctl.h
+++ b/drivers/staging/wlan-ng/p80211ioctl.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/* p80211ioctl.h
*
* Declares constants and types for the p80211 ioctls
diff --git a/drivers/staging/wlan-ng/p80211metadef.h b/drivers/staging/wlan-ng/p80211metadef.h
index 4ac2f08a520a..e63b4b557d0a 100644
--- a/drivers/staging/wlan-ng/p80211metadef.h
+++ b/drivers/staging/wlan-ng/p80211metadef.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/* This file is GENERATED AUTOMATICALLY. DO NOT EDIT OR MODIFY.
* --------------------------------------------------------------------
*
diff --git a/drivers/staging/wlan-ng/p80211metastruct.h b/drivers/staging/wlan-ng/p80211metastruct.h
index 15b7c08e210d..5602ec606074 100644
--- a/drivers/staging/wlan-ng/p80211metastruct.h
+++ b/drivers/staging/wlan-ng/p80211metastruct.h
@@ -1,49 +1,49 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/* This file is GENERATED AUTOMATICALLY. DO NOT EDIT OR MODIFY.
-* --------------------------------------------------------------------
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ * --------------------------------------------------------------------
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211MKMETASTRUCT_H
#define _P80211MKMETASTRUCT_H
diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h
index 3c12929858cb..c045c08e1991 100644
--- a/drivers/staging/wlan-ng/p80211mgmt.h
+++ b/drivers/staging/wlan-ng/p80211mgmt.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/* p80211mgmt.h
*
* Macros, types, and functions to handle 802.11 mgmt frames
diff --git a/drivers/staging/wlan-ng/p80211msg.h b/drivers/staging/wlan-ng/p80211msg.h
index ae119ecd74b0..114066526df4 100644
--- a/drivers/staging/wlan-ng/p80211msg.h
+++ b/drivers/staging/wlan-ng/p80211msg.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/* p80211msg.h
*
* Macros, constants, types, and funcs for req and ind messages
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index ec9cc00ee241..8258cb5a335d 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -95,8 +95,8 @@
static int p80211knetdev_init(struct net_device *netdev);
static int p80211knetdev_open(struct net_device *netdev);
static int p80211knetdev_stop(struct net_device *netdev);
-static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
- struct net_device *netdev);
+static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev);
static void p80211knetdev_set_multicast_list(struct net_device *dev);
static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd);
@@ -321,8 +321,8 @@ static void p80211netdev_rx_bh(unsigned long arg)
* zero on success, non-zero on failure.
*----------------------------------------------------------------
*/
-static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
int result = 0;
int txresult = -1;
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index cebbe746a52f..d48466d943b4 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/* p80211netdev.h
*
* WLAN net device structure and functions
@@ -180,11 +180,11 @@ struct wlandevice {
int (*close)(struct wlandevice *wlandev);
void (*reset)(struct wlandevice *wlandev);
int (*txframe)(struct wlandevice *wlandev, struct sk_buff *skb,
- union p80211_hdr *p80211_hdr,
- struct p80211_metawep *p80211_wep);
+ union p80211_hdr *p80211_hdr,
+ struct p80211_metawep *p80211_wep);
int (*mlmerequest)(struct wlandevice *wlandev, struct p80211msg *msg);
int (*set_multicast_list)(struct wlandevice *wlandev,
- struct net_device *dev);
+ struct net_device *dev);
void (*tx_timeout)(struct wlandevice *wlandev);
/* 802.11 State */
diff --git a/drivers/staging/wlan-ng/p80211req.h b/drivers/staging/wlan-ng/p80211req.h
index 20be2c3af4c1..c04053f3b02b 100644
--- a/drivers/staging/wlan-ng/p80211req.h
+++ b/drivers/staging/wlan-ng/p80211req.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/* p80211req.h
*
* Request handling functions
diff --git a/drivers/staging/wlan-ng/p80211types.h b/drivers/staging/wlan-ng/p80211types.h
index 94420562c418..7c37d56dd9b7 100644
--- a/drivers/staging/wlan-ng/p80211types.h
+++ b/drivers/staging/wlan-ng/p80211types.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/*
* p80211types.h
*
@@ -119,11 +119,6 @@
/* is a DID-LEN-DATA triple */
/* with a max size of 4+4+384 */
-/*----------------------------------------------------------------*/
-/* The following macro creates a name for an enum */
-
-#define MKENUMNAME(name) p80211enum_ ## name
-
/*----------------------------------------------------------------
* The following constants and macros are used to construct and
* deconstruct the Data ID codes. The coding is as follows:
@@ -348,30 +343,4 @@ typedef void (*p80211_fromtext_t) (struct catlistitem *, u32 did, u8 *itembuf,
char *textbuf);
typedef u32(*p80211_valid_t) (struct catlistitem *, u32 did, u8 *itembuf);
-/*----------------------------------------------------------------*/
-/* Enumeration Lists */
-/* The following are the external declarations */
-/* for all enumerations */
-
-extern struct p80211enum MKENUMNAME(truth);
-extern struct p80211enum MKENUMNAME(ifstate);
-extern struct p80211enum MKENUMNAME(powermgmt);
-extern struct p80211enum MKENUMNAME(bsstype);
-extern struct p80211enum MKENUMNAME(authalg);
-extern struct p80211enum MKENUMNAME(phytype);
-extern struct p80211enum MKENUMNAME(temptype);
-extern struct p80211enum MKENUMNAME(regdomain);
-extern struct p80211enum MKENUMNAME(ccamode);
-extern struct p80211enum MKENUMNAME(diversity);
-extern struct p80211enum MKENUMNAME(scantype);
-extern struct p80211enum MKENUMNAME(resultcode);
-extern struct p80211enum MKENUMNAME(reason);
-extern struct p80211enum MKENUMNAME(status);
-extern struct p80211enum MKENUMNAME(msgcode);
-extern struct p80211enum MKENUMNAME(msgitem_status);
-
-extern struct p80211enum MKENUMNAME(lnxroam_reason);
-
-extern struct p80211enum MKENUMNAME(p2preamble);
-
#endif /* _P80211TYPES_H */
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index 564c3f4a3e03..17bc1ee0d498 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/* prism2mgmt.h
*
* Declares the mgmt command handler functions
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index fed0b8ceca6f..914970249680 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -764,16 +764,16 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
if (hw->cap_sup_sta.id == 0x04) {
netdev_info(wlandev->netdev,
- "STA:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_sup_sta.role, hw->cap_sup_sta.id,
- hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom,
- hw->cap_sup_sta.top);
+ "STA:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_sup_sta.role, hw->cap_sup_sta.id,
+ hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom,
+ hw->cap_sup_sta.top);
} else {
netdev_info(wlandev->netdev,
- "AP:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_sup_sta.role, hw->cap_sup_sta.id,
- hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom,
- hw->cap_sup_sta.top);
+ "AP:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_sup_sta.role, hw->cap_sup_sta.id,
+ hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom,
+ hw->cap_sup_sta.top);
}
/* Compatibility range, primary f/w actor, CFI supplier */
@@ -1189,7 +1189,6 @@ void prism2sta_processing_defer(struct work_struct *data)
inf = (struct hfa384x_inf_frame *)skb->data;
prism2sta_inf_authreq_defer(wlandev, inf);
}
-
}
/* Now let's handle the linkstatus stuff */
@@ -1241,9 +1240,9 @@ void prism2sta_processing_defer(struct work_struct *data)
/* Collect the BSSID, and set state to allow tx */
result = hfa384x_drvr_getconfig(hw,
- HFA384x_RID_CURRENTBSSID,
- wlandev->bssid,
- WLAN_BSSID_LEN);
+ HFA384x_RID_CURRENTBSSID,
+ wlandev->bssid,
+ WLAN_BSSID_LEN);
if (result) {
pr_debug
("getconfig(0x%02x) failed, result = %d\n",
@@ -1260,14 +1259,13 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID, result);
return;
}
- prism2mgmt_bytestr2pstr(
- (struct hfa384x_bytestr *)&ssid,
- (struct p80211pstrd *)&wlandev->ssid);
+ prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *)&ssid,
+ (struct p80211pstrd *)&wlandev->ssid);
/* Collect the port status */
result = hfa384x_drvr_getconfig16(hw,
- HFA384x_RID_PORTSTATUS,
- &portstatus);
+ HFA384x_RID_PORTSTATUS,
+ &portstatus);
if (result) {
pr_debug
("getconfig(0x%02x) failed, result = %d\n",
@@ -1404,7 +1402,7 @@ void prism2sta_processing_defer(struct work_struct *data)
&joinreq,
HFA384x_RID_JOINREQUEST_LEN);
netdev_info(wlandev->netdev,
- "linkstatus=ASSOCFAIL (re-submitting join)\n");
+ "linkstatus=ASSOCFAIL (re-submitting join)\n");
} else {
netdev_info(wlandev->netdev, "linkstatus=ASSOCFAIL (unhandled)\n");
}
@@ -1501,7 +1499,7 @@ static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
if (i >= hw->authlist.cnt) {
if (rec.assocstatus != HFA384x_ASSOCSTATUS_AUTHFAIL)
netdev_warn(wlandev->netdev,
- "assocstatus info frame received for non-authenticated station.\n");
+ "assocstatus info frame received for non-authenticated station.\n");
} else {
hw->authlist.assoc[i] =
(rec.assocstatus == HFA384x_ASSOCSTATUS_STAASSOC ||
@@ -1509,7 +1507,7 @@ static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
if (rec.assocstatus == HFA384x_ASSOCSTATUS_AUTHFAIL)
netdev_warn(wlandev->netdev,
-"authfail assocstatus info frame received for authenticated station.\n");
+ "authfail assocstatus info frame received for authenticated station.\n");
}
}
@@ -1674,9 +1672,8 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
if (hw->authlist.cnt >= WLAN_AUTH_MAX) {
rec.status = cpu_to_le16(P80211ENUM_status_ap_full);
} else {
- ether_addr_copy(
- hw->authlist.addr[hw->authlist.cnt],
- rec.address);
+ ether_addr_copy(hw->authlist.addr[hw->authlist.cnt],
+ rec.address);
hw->authlist.cnt++;
added = 1;
}
@@ -1697,8 +1694,8 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
if (added)
hw->authlist.cnt--;
netdev_err(wlandev->netdev,
- "setconfig(authenticatestation) failed, result=%d\n",
- result);
+ "setconfig(authenticatestation) failed, result=%d\n",
+ result);
}
}
@@ -1937,9 +1934,8 @@ void prism2sta_commsqual_defer(struct work_struct *data)
/* It only makes sense to poll these in non-IBSS */
if (wlandev->macmode != WLAN_MACMODE_IBSS_STA) {
- result = hfa384x_drvr_getconfig(
- hw, HFA384x_RID_DBMCOMMSQUALITY,
- &hw->qual, HFA384x_RID_DBMCOMMSQUALITY_LEN);
+ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_DBMCOMMSQUALITY,
+ &hw->qual, HFA384x_RID_DBMCOMMSQUALITY_LEN);
if (result) {
netdev_err(wlandev->netdev, "error fetching commsqual\n");
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 3f4bf126eed0..5ccef7d597fa 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -155,6 +155,8 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
mutex_unlock(&g_tf_lock);
+ pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
+
return read_bytes;
}
@@ -3213,6 +3215,27 @@ void target_setup_backend_cits(struct target_backend *tb)
target_core_setup_dev_stat_cit(tb);
}
+static void target_init_dbroot(void)
+{
+ struct file *fp;
+
+ snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
+ fp = filp_open(db_root_stage, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ pr_err("db_root: cannot open: %s\n", db_root_stage);
+ return;
+ }
+ if (!S_ISDIR(file_inode(fp)->i_mode)) {
+ filp_close(fp, NULL);
+ pr_err("db_root: not a valid directory: %s\n", db_root_stage);
+ return;
+ }
+ filp_close(fp, NULL);
+
+ strncpy(db_root, db_root_stage, DB_ROOT_LEN);
+ pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
+}
+
static int __init target_core_init_configfs(void)
{
struct configfs_subsystem *subsys = &target_core_fabrics;
@@ -3293,6 +3316,8 @@ static int __init target_core_init_configfs(void)
if (ret < 0)
goto out;
+ target_init_dbroot();
+
return 0;
out:
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 9b2c0c773022..16751ae55d7b 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -250,6 +250,84 @@ static void fd_destroy_device(struct se_device *dev)
}
}
+struct target_core_file_cmd {
+ unsigned long len;
+ struct se_cmd *cmd;
+ struct kiocb iocb;
+};
+
+static void cmd_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
+{
+ struct target_core_file_cmd *cmd;
+
+ cmd = container_of(iocb, struct target_core_file_cmd, iocb);
+
+ if (ret != cmd->len)
+ target_complete_cmd(cmd->cmd, SAM_STAT_CHECK_CONDITION);
+ else
+ target_complete_cmd(cmd->cmd, SAM_STAT_GOOD);
+
+ kfree(cmd);
+}
+
+static sense_reason_t
+fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
+{
+ int is_write = !(data_direction == DMA_FROM_DEVICE);
+ struct se_device *dev = cmd->se_dev;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *file = fd_dev->fd_file;
+ struct target_core_file_cmd *aio_cmd;
+ struct iov_iter iter = {};
+ struct scatterlist *sg;
+ struct bio_vec *bvec;
+ ssize_t len = 0;
+ int ret = 0, i;
+
+ aio_cmd = kmalloc(sizeof(struct target_core_file_cmd), GFP_KERNEL);
+ if (!aio_cmd)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
+ if (!bvec) {
+ kfree(aio_cmd);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ for_each_sg(sgl, sg, sgl_nents, i) {
+ bvec[i].bv_page = sg_page(sg);
+ bvec[i].bv_len = sg->length;
+ bvec[i].bv_offset = sg->offset;
+
+ len += sg->length;
+ }
+
+ iov_iter_bvec(&iter, ITER_BVEC | is_write, bvec, sgl_nents, len);
+
+ aio_cmd->cmd = cmd;
+ aio_cmd->len = len;
+ aio_cmd->iocb.ki_pos = cmd->t_task_lba * dev->dev_attrib.block_size;
+ aio_cmd->iocb.ki_filp = file;
+ aio_cmd->iocb.ki_complete = cmd_rw_aio_complete;
+ aio_cmd->iocb.ki_flags = IOCB_DIRECT;
+
+ if (is_write && (cmd->se_cmd_flags & SCF_FUA))
+ aio_cmd->iocb.ki_flags |= IOCB_DSYNC;
+
+ if (is_write)
+ ret = call_write_iter(file, &aio_cmd->iocb, &iter);
+ else
+ ret = call_read_iter(file, &aio_cmd->iocb, &iter);
+
+ kfree(bvec);
+
+ if (ret != -EIOCBQUEUED)
+ cmd_rw_aio_complete(&aio_cmd->iocb, ret, 0);
+
+ return 0;
+}
+
static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
u32 block_size, struct scatterlist *sgl,
u32 sgl_nents, u32 data_length, int is_write)
@@ -527,7 +605,7 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
}
static sense_reason_t
-fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+fd_execute_rw_buffered(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
@@ -537,16 +615,6 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
sense_reason_t rc;
int ret = 0;
/*
- * We are currently limited by the number of iovecs (2048) per
- * single vfs_[writev,readv] call.
- */
- if (cmd->data_length > FD_MAX_BYTES) {
- pr_err("FILEIO: Not able to process I/O of %u bytes due to"
- "FD_MAX_BYTES: %u iovec count limitation\n",
- cmd->data_length, FD_MAX_BYTES);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
- /*
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
*/
@@ -620,14 +688,39 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
return 0;
}
+static sense_reason_t
+fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+
+ /*
+ * We are currently limited by the number of iovecs (2048) per
+ * single vfs_[writev,readv] call.
+ */
+ if (cmd->data_length > FD_MAX_BYTES) {
+ pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+ "FD_MAX_BYTES: %u iovec count limitation\n",
+ cmd->data_length, FD_MAX_BYTES);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ if (fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO)
+ return fd_execute_rw_aio(cmd, sgl, sgl_nents, data_direction);
+ return fd_execute_rw_buffered(cmd, sgl, sgl_nents, data_direction);
+}
+
enum {
- Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
+ Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io,
+ Opt_fd_async_io, Opt_err
};
static match_table_t tokens = {
{Opt_fd_dev_name, "fd_dev_name=%s"},
{Opt_fd_dev_size, "fd_dev_size=%s"},
{Opt_fd_buffered_io, "fd_buffered_io=%d"},
+ {Opt_fd_async_io, "fd_async_io=%d"},
{Opt_err, NULL}
};
@@ -693,6 +786,21 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
break;
+ case Opt_fd_async_io:
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
+ if (arg != 1) {
+ pr_err("bogus fd_async_io=%d value\n", arg);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pr_debug("FILEIO: Using async I/O"
+ " operations for struct fd_dev\n");
+
+ fd_dev->fbd_flags |= FDBD_HAS_ASYNC_IO;
+ break;
default:
break;
}
@@ -709,10 +817,11 @@ static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
- bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
+ bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s Async: %d\n",
fd_dev->fd_dev_name, fd_dev->fd_dev_size,
(fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
- "Buffered-WCE" : "O_DSYNC");
+ "Buffered-WCE" : "O_DSYNC",
+ !!(fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO));
return bl;
}
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 53be5ffd3261..929b1ecd544e 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -22,6 +22,7 @@
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+#define FDBD_HAS_ASYNC_IO 0x08
#define FDBD_FORMAT_UNIT_SIZE 2048
struct fd_dev {
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 1d5afc3ae017..dead30b1d32c 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -166,6 +166,7 @@ extern struct se_portal_group xcopy_pt_tpg;
/* target_core_configfs.c */
#define DB_ROOT_LEN 4096
#define DB_ROOT_DEFAULT "/var/target"
+#define DB_ROOT_PREFERRED "/etc/target"
extern char db_root[];
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 668934ea74cb..47d76c862014 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -689,8 +689,29 @@ after_mode_sense:
}
after_mode_select:
- if (scsi_status == SAM_STAT_CHECK_CONDITION)
+ if (scsi_status == SAM_STAT_CHECK_CONDITION) {
transport_copy_sense_to_cmd(cmd, req_sense);
+
+ /*
+ * check for TAPE device reads with
+ * FM/EOM/ILI set, so that we can get data
+ * back despite framework assumption that a
+ * check condition means there is no data
+ */
+ if (sd->type == TYPE_TAPE &&
+ cmd->data_direction == DMA_FROM_DEVICE) {
+ /*
+ * is sense data valid, fixed format,
+ * and have FM, EOM, or ILI set?
+ */
+ if (req_sense[0] == 0xf0 && /* valid, fixed format */
+ req_sense[2] & 0xe0 && /* FM, EOM, or ILI */
+ (req_sense[2] & 0xf) == 0) { /* key==NO_SENSE */
+ pr_debug("Tape FM/EOM/ILI status detected. Treat as normal read.\n");
+ cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
+ }
+ }
+ }
}
enum {
@@ -1062,7 +1083,8 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
switch (host_byte(result)) {
case DID_OK:
- target_complete_cmd(cmd, scsi_status);
+ target_complete_cmd_with_length(cmd, scsi_status,
+ cmd->data_length - scsi_req(req)->resid_len);
break;
default:
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4558f2e1fe1b..f0e8f0f4ccb4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -779,7 +779,9 @@ EXPORT_SYMBOL(target_complete_cmd);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
- if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
+ if ((scsi_status == SAM_STAT_GOOD ||
+ cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
+ length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
@@ -1431,7 +1433,7 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
return 0;
}
-/*
+/**
* target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
* se_cmd + use pre-allocated SGL memory.
*
@@ -1441,7 +1443,7 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @data_length: fabric expected data transfer length
- * @task_addr: SAM task attribute
+ * @task_attr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
* @sgl: struct scatterlist memory for unidirectional mapping
@@ -1578,7 +1580,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
}
EXPORT_SYMBOL(target_submit_cmd_map_sgls);
-/*
+/**
* target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
*
* @se_cmd: command descriptor to submit
@@ -1587,7 +1589,7 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @data_length: fabric expected data transfer length
- * @task_addr: SAM task attribute
+ * @task_attr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
*
@@ -1654,7 +1656,7 @@ static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
* @se_sess: associated se_sess for endpoint
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
- * @fabric_context: fabric context for TMR req
+ * @fabric_tmr_ptr: fabric context for TMR req
* @tm_type: Type of TM request
* @gfp: gfp type for caller
* @tag: referenced task tag for TMR_ABORT_TASK
@@ -2084,12 +2086,24 @@ static void transport_complete_qf(struct se_cmd *cmd)
goto queue_status;
}
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+ /*
+ * Check if we need to send a sense buffer from
+ * the struct se_cmd in question. We do NOT want
+ * to take this path of the IO has been marked as
+ * needing to be treated like a "normal read". This
+ * is the case if it's a tape read, and either the
+ * FM, EOM, or ILI bits are set, but there is no
+ * sense data.
+ */
+ if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
+ cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
goto queue_status;
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
- if (cmd->scsi_status)
+ /* queue status if not treating this as a normal read */
+ if (cmd->scsi_status &&
+ !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
goto queue_status;
trace_target_cmd_complete(cmd);
@@ -2194,9 +2208,15 @@ static void target_complete_ok_work(struct work_struct *work)
/*
* Check if we need to send a sense buffer from
- * the struct se_cmd in question.
+ * the struct se_cmd in question. We do NOT want
+ * to take this path of the IO has been marked as
+ * needing to be treated like a "normal read". This
+ * is the case if it's a tape read, and either the
+ * FM, EOM, or ILI bits are set, but there is no
+ * sense data.
*/
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
+ cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
WARN_ON(!cmd->scsi_status);
ret = transport_send_check_condition_and_sense(
cmd, 0, 1);
@@ -2238,7 +2258,18 @@ static void target_complete_ok_work(struct work_struct *work)
queue_rsp:
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
- if (cmd->scsi_status)
+ /*
+ * if this is a READ-type IO, but SCSI status
+ * is set, then skip returning data and just
+ * return the status -- unless this IO is marked
+ * as needing to be treated as a normal read,
+ * in which case we want to go ahead and return
+ * the data. This happens, for example, for tape
+ * reads with the FM, EOM, or ILI bits set, with
+ * no sense data.
+ */
+ if (cmd->scsi_status &&
+ !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
goto queue_status;
atomic_long_add(cmd->data_length,
@@ -2606,7 +2637,8 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
}
EXPORT_SYMBOL(transport_generic_free_cmd);
-/* target_get_sess_cmd - Add command to active ->sess_cmd_list
+/**
+ * target_get_sess_cmd - Add command to active ->sess_cmd_list
* @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
@@ -2800,7 +2832,8 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
}
EXPORT_SYMBOL(target_show_cmd);
-/* target_sess_cmd_list_set_waiting - Flag all commands in
+/**
+ * target_sess_cmd_list_set_waiting - Flag all commands in
* sess_cmd_list to complete cmd_wait_comp. Set
* sess_tearing_down so no more commands are queued.
* @se_sess: session to flag
@@ -2835,7 +2868,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
}
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
-/* target_wait_for_sess_cmds - Wait for outstanding descriptors
+/**
+ * target_wait_for_sess_cmds - Wait for outstanding descriptors
* @se_sess: session to wait for active I/O
*/
void target_wait_for_sess_cmds(struct se_session *se_sess)
@@ -3332,7 +3366,7 @@ static void target_tmr_work(struct work_struct *work)
tmr->response = TMR_FUNCTION_REJECTED;
break;
default:
- pr_err("Uknown TMR function: 0x%02x.\n",
+ pr_err("Unknown TMR function: 0x%02x.\n",
tmr->function);
tmr->response = TMR_FUNCTION_REJECTED;
break;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 4f26bdc3d1dc..94b183efd236 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -42,7 +42,11 @@
#include <linux/target_core_user.h>
-/*
+/**
+ * DOC: Userspace I/O
+ * Userspace I/O
+ * -------------
+ *
* Define a shared-memory interface for LIO to pass SCSI commands and
* data to userspace for processing. This is to allow backends that
* are too complex for in-kernel support to be possible.
@@ -53,7 +57,7 @@
* See the .h file for how the ring is laid out. Note that while the
* command ring is defined, the particulars of the data area are
* not. Offset values in the command entry point to other locations
- * internal to the mmap()ed area. There is separate space outside the
+ * internal to the mmap-ed area. There is separate space outside the
* command ring for data buffers. This leaves maximum flexibility for
* moving buffer allocations, or even page flipping or other
* allocation techniques, without altering the command ring layout.
@@ -1382,7 +1386,7 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
return page;
}
-static int tcmu_vma_fault(struct vm_fault *vmf)
+static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
{
struct tcmu_dev *udev = vmf->vma->vm_private_data;
struct uio_info *info = &udev->uio_info;
@@ -1586,8 +1590,9 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
return ret;
}
-static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
- int reconfig_attr, const void *reconfig_data)
+static int tcmu_netlink_event_init(struct tcmu_dev *udev,
+ enum tcmu_genl_cmd cmd,
+ struct sk_buff **buf, void **hdr)
{
struct sk_buff *skb;
void *msg_header;
@@ -1613,46 +1618,66 @@ static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
if (ret < 0)
goto free_skb;
- if (cmd == TCMU_CMD_RECONFIG_DEVICE) {
- switch (reconfig_attr) {
- case TCMU_ATTR_DEV_CFG:
- ret = nla_put_string(skb, reconfig_attr, reconfig_data);
- break;
- case TCMU_ATTR_DEV_SIZE:
- ret = nla_put_u64_64bit(skb, reconfig_attr,
- *((u64 *)reconfig_data),
- TCMU_ATTR_PAD);
- break;
- case TCMU_ATTR_WRITECACHE:
- ret = nla_put_u8(skb, reconfig_attr,
- *((u8 *)reconfig_data));
- break;
- default:
- BUG();
- }
+ *buf = skb;
+ *hdr = msg_header;
+ return ret;
- if (ret < 0)
- goto free_skb;
- }
+free_skb:
+ nlmsg_free(skb);
+ return ret;
+}
+
+static int tcmu_netlink_event_send(struct tcmu_dev *udev,
+ enum tcmu_genl_cmd cmd,
+ struct sk_buff **buf, void **hdr)
+{
+ int ret = 0;
+ struct sk_buff *skb = *buf;
+ void *msg_header = *hdr;
genlmsg_end(skb, msg_header);
tcmu_init_genl_cmd_reply(udev, cmd);
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
- TCMU_MCGRP_CONFIG, GFP_KERNEL);
- /* We don't care if no one is listening */
+ TCMU_MCGRP_CONFIG, GFP_KERNEL);
+ /* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
if (!ret)
ret = tcmu_wait_genl_cmd_reply(udev);
-
- return ret;
-free_skb:
- nlmsg_free(skb);
return ret;
}
+static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
+ &msg_header);
+ if (ret < 0)
+ return ret;
+ return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb,
+ &msg_header);
+
+}
+
+static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
+ &skb, &msg_header);
+}
+
static int tcmu_update_uio_info(struct tcmu_dev *udev)
{
struct tcmu_hba *hba = udev->hba->hba_ptr;
@@ -1762,7 +1787,7 @@ static int tcmu_configure_device(struct se_device *dev)
*/
kref_get(&udev->kref);
- ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL);
+ ret = tcmu_send_dev_add_event(udev);
if (ret)
goto err_netlink;
@@ -1812,7 +1837,7 @@ static void tcmu_destroy_device(struct se_device *dev)
list_del(&udev->node);
mutex_unlock(&root_udev_mutex);
- tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
+ tcmu_send_dev_remove_event(udev);
uio_unregister_device(&udev->uio_info);
@@ -2151,6 +2176,27 @@ static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
}
+static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
+ const char *reconfig_data)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
+ if (ret < 0) {
+ nlmsg_free(skb);
+ return ret;
+ }
+ return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+}
+
+
static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
size_t count)
{
@@ -2165,8 +2211,7 @@ static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
/* Check if device has been configured before */
if (tcmu_dev_configured(udev)) {
- ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
- TCMU_ATTR_DEV_CFG, page);
+ ret = tcmu_send_dev_config_event(udev, page);
if (ret) {
pr_err("Unable to reconfigure device\n");
return ret;
@@ -2193,6 +2238,26 @@ static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
}
+static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
+ size, TCMU_ATTR_PAD);
+ if (ret < 0) {
+ nlmsg_free(skb);
+ return ret;
+ }
+ return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+}
+
static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
size_t count)
{
@@ -2208,8 +2273,7 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
/* Check if device has been configured before */
if (tcmu_dev_configured(udev)) {
- ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
- TCMU_ATTR_DEV_SIZE, &val);
+ ret = tcmu_send_dev_size_event(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
return ret;
@@ -2257,6 +2321,25 @@ static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
}
+static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
+ if (ret < 0) {
+ nlmsg_free(skb);
+ return ret;
+ }
+ return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+}
+
static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
const char *page, size_t count)
{
@@ -2272,8 +2355,7 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
/* Check if device has been configured before */
if (tcmu_dev_configured(udev)) {
- ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
- TCMU_ATTR_WRITECACHE, &val);
+ ret = tcmu_send_emulate_write_cache(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
return ret;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 1c909183c42a..8ae0349d9f0a 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -33,6 +33,7 @@
#include <xen/xen.h>
#include <xen/privcmd.h>
#include <xen/interface/xen.h>
+#include <xen/interface/memory.h>
#include <xen/interface/hvm/dm_op.h>
#include <xen/features.h>
#include <xen/page.h>
@@ -722,6 +723,134 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
return 0;
}
+struct remap_pfn {
+ struct mm_struct *mm;
+ struct page **pages;
+ pgprot_t prot;
+ unsigned long i;
+};
+
+static int remap_pfn_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct remap_pfn *r = data;
+ struct page *page = r->pages[r->i];
+ pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
+
+ set_pte_at(r->mm, addr, ptep, pte);
+ r->i++;
+
+ return 0;
+}
+
+static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
+{
+ struct privcmd_data *data = file->private_data;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct privcmd_mmap_resource kdata;
+ xen_pfn_t *pfns = NULL;
+ struct xen_mem_acquire_resource xdata;
+ int rc;
+
+ if (copy_from_user(&kdata, udata, sizeof(kdata)))
+ return -EFAULT;
+
+ /* If restriction is in place, check the domid matches */
+ if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
+ return -EPERM;
+
+ down_write(&mm->mmap_sem);
+
+ vma = find_vma(mm, kdata.addr);
+ if (!vma || vma->vm_ops != &privcmd_vm_ops) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
+ struct page **pages;
+ unsigned int i;
+
+ rc = alloc_empty_pages(vma, nr);
+ if (rc < 0)
+ goto out;
+
+ pages = vma->vm_private_data;
+ for (i = 0; i < kdata.num; i++) {
+ xen_pfn_t pfn =
+ page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
+
+ pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
+ }
+ } else
+ vma->vm_private_data = PRIV_VMA_LOCKED;
+
+ memset(&xdata, 0, sizeof(xdata));
+ xdata.domid = kdata.dom;
+ xdata.type = kdata.type;
+ xdata.id = kdata.id;
+ xdata.frame = kdata.idx;
+ xdata.nr_frames = kdata.num;
+ set_xen_guest_handle(xdata.frame_list, pfns);
+
+ xen_preemptible_hcall_begin();
+ rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
+ xen_preemptible_hcall_end();
+
+ if (rc)
+ goto out;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ struct remap_pfn r = {
+ .mm = vma->vm_mm,
+ .pages = vma->vm_private_data,
+ .prot = vma->vm_page_prot,
+ };
+
+ rc = apply_to_page_range(r.mm, kdata.addr,
+ kdata.num << PAGE_SHIFT,
+ remap_pfn_fn, &r);
+ } else {
+ unsigned int domid =
+ (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
+ DOMID_SELF : kdata.dom;
+ int num;
+
+ num = xen_remap_domain_mfn_array(vma,
+ kdata.addr & PAGE_MASK,
+ pfns, kdata.num, (int *)pfns,
+ vma->vm_page_prot,
+ domid,
+ vma->vm_private_data);
+ if (num < 0)
+ rc = num;
+ else if (num != kdata.num) {
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ rc = pfns[i];
+ if (rc < 0)
+ break;
+ }
+ } else
+ rc = 0;
+ }
+
+out:
+ up_write(&mm->mmap_sem);
+ kfree(pfns);
+
+ return rc;
+}
+
static long privcmd_ioctl(struct file *file,
unsigned int cmd, unsigned long data)
{
@@ -753,6 +882,10 @@ static long privcmd_ioctl(struct file *file,
ret = privcmd_ioctl_restrict(file, udata);
break;
+ case IOCTL_PRIVCMD_MMAP_RESOURCE:
+ ret = privcmd_ioctl_mmap_resource(file, udata);
+ break;
+
default:
break;
}
@@ -801,7 +934,7 @@ static void privcmd_close(struct vm_area_struct *vma)
kfree(pages);
}
-static int privcmd_fault(struct vm_fault *vmf)
+static vm_fault_t privcmd_fault(struct vm_fault *vmf)
{
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index ec9eb4fba59c..f2088838f690 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -710,7 +710,7 @@ static int __init xenstored_local_init(void)
if (!page)
goto out_err;
- xen_store_gfn = xen_start_info->store_mfn = virt_to_gfn((void *)page);
+ xen_store_gfn = virt_to_gfn((void *)page);
/* Next allocate a local port which xenstored can bind to */
alloc_unbound.dom = DOMID_SELF;
@@ -722,8 +722,7 @@ static int __init xenstored_local_init(void)
goto out_err;
BUG_ON(err);
- xen_store_evtchn = xen_start_info->store_evtchn =
- alloc_unbound.port;
+ xen_store_evtchn = alloc_unbound.port;
return 0;
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index e622f0f10502..0429c8ee58f1 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -210,12 +210,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
p9_debug(P9_DEBUG_ERROR,
"integer field, but no integer?\n");
ret = r;
- continue;
- }
- v9ses->debug = option;
+ } else {
+ v9ses->debug = option;
#ifdef CONFIG_NET_9P_DEBUG
- p9_debug_level = option;
+ p9_debug_level = option;
#endif
+ }
break;
case Opt_dfltuid:
@@ -231,7 +231,6 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
p9_debug(P9_DEBUG_ERROR,
"uid field, but not a uid?\n");
ret = -EINVAL;
- continue;
}
break;
case Opt_dfltgid:
@@ -247,7 +246,6 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
p9_debug(P9_DEBUG_ERROR,
"gid field, but not a gid?\n");
ret = -EINVAL;
- continue;
}
break;
case Opt_afid:
@@ -256,9 +254,9 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
p9_debug(P9_DEBUG_ERROR,
"integer field, but no integer?\n");
ret = r;
- continue;
+ } else {
+ v9ses->afid = option;
}
- v9ses->afid = option;
break;
case Opt_uname:
kfree(v9ses->uname);
@@ -306,13 +304,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
"problem allocating copy of cache arg\n");
goto free_and_return;
}
- ret = get_cache_mode(s);
- if (ret == -EINVAL) {
- kfree(s);
- goto free_and_return;
- }
+ r = get_cache_mode(s);
+ if (r < 0)
+ ret = r;
+ else
+ v9ses->cache = r;
- v9ses->cache = ret;
kfree(s);
break;
@@ -341,14 +338,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
pr_info("Unknown access argument %s\n",
s);
kfree(s);
- goto free_and_return;
+ continue;
}
v9ses->uid = make_kuid(current_user_ns(), uid);
if (!uid_valid(v9ses->uid)) {
ret = -EINVAL;
pr_info("Uknown uid %s\n", s);
- kfree(s);
- goto free_and_return;
}
}
diff --git a/fs/Kconfig b/fs/Kconfig
index ac4ac908f001..ab2d96d1abee 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -38,6 +38,7 @@ config FS_DAX
bool "Direct Access (DAX) support"
depends on MMU
depends on !(ARM || MIPS || SPARC)
+ select DEV_PAGEMAP_OPS if (ZONE_DEVICE && !FS_DAX_LIMITED)
select FS_IOMAP
select DAX
help
@@ -108,6 +109,7 @@ source "fs/notify/Kconfig"
source "fs/quota/Kconfig"
+source "fs/autofs/Kconfig"
source "fs/autofs4/Kconfig"
source "fs/fuse/Kconfig"
source "fs/overlayfs/Kconfig"
@@ -203,6 +205,9 @@ config HUGETLBFS
config HUGETLB_PAGE
def_bool HUGETLBFS
+config MEMFD_CREATE
+ def_bool TMPFS || HUGETLBFS
+
config ARCH_HAS_GIGANTIC_PAGE
bool
diff --git a/fs/Makefile b/fs/Makefile
index c9375fd2c8c4..2e005525cc19 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_AFFS_FS) += affs/
obj-$(CONFIG_ROMFS_FS) += romfs/
obj-$(CONFIG_QNX4FS_FS) += qnx4/
obj-$(CONFIG_QNX6FS_FS) += qnx6/
+obj-$(CONFIG_AUTOFS_FS) += autofs/
obj-$(CONFIG_AUTOFS4_FS) += autofs4/
obj-$(CONFIG_ADFS_FS) += adfs/
obj-$(CONFIG_FUSE_FS) += fuse/
diff --git a/fs/aio.c b/fs/aio.c
index b850e92ee0d5..134e5b635d64 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1434,7 +1434,23 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
req->ki_flags = iocb_flags(req->ki_filp);
if (iocb->aio_flags & IOCB_FLAG_RESFD)
req->ki_flags |= IOCB_EVENTFD;
- req->ki_hint = file_write_hint(req->ki_filp);
+ req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp));
+ if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
+ /*
+ * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
+ * aio_reqprio is interpreted as an I/O scheduling
+ * class and priority.
+ */
+ ret = ioprio_check_cap(iocb->aio_reqprio);
+ if (ret) {
+ pr_debug("aio ioprio check cap error: %d\n", ret);
+ return ret;
+ }
+
+ req->ki_ioprio = iocb->aio_reqprio;
+ } else
+ req->ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+
ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
if (unlikely(ret))
fput(req->ki_filp);
diff --git a/fs/autofs/Kconfig b/fs/autofs/Kconfig
new file mode 100644
index 000000000000..6a2064eb3b27
--- /dev/null
+++ b/fs/autofs/Kconfig
@@ -0,0 +1,20 @@
+config AUTOFS_FS
+ tristate "Kernel automounter support (supports v3, v4 and v5)"
+ default n
+ help
+ The automounter is a tool to automatically mount remote file systems
+ on demand. This implementation is partially kernel-based to reduce
+ overhead in the already-mounted case; this is unlike the BSD
+ automounter (amd), which is a pure user space daemon.
+
+ To use the automounter you need the user-space tools from
+ <https://www.kernel.org/pub/linux/daemons/autofs/>; you also want
+ to answer Y to "NFS file system support", below.
+
+ To compile this support as a module, choose M here: the module will be
+ called autofs.
+
+ If you are not a part of a fairly large, distributed network or
+ don't have a laptop which needs to dynamically reconfigure to the
+ local network, you probably do not need an automounter, and can say
+ N here.
diff --git a/fs/autofs/Makefile b/fs/autofs/Makefile
new file mode 100644
index 000000000000..43fedde15c26
--- /dev/null
+++ b/fs/autofs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux autofs-filesystem routines.
+#
+
+obj-$(CONFIG_AUTOFS_FS) += autofs.o
+
+autofs-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs/autofs_i.h
index 4737615f0eaa..9400a9f6318a 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -9,7 +9,7 @@
/* Internal header file for autofs */
-#include <linux/auto_fs4.h>
+#include <linux/auto_fs.h>
#include <linux/auto_dev-ioctl.h>
#include <linux/kernel.h>
@@ -25,7 +25,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/completion.h>
-#include <asm/current.h>
+#include <linux/file.h>
/* This is the range of ioctl() numbers we claim as ours */
#define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
@@ -122,44 +122,44 @@ struct autofs_sb_info {
struct rcu_head rcu;
};
-static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
+static inline struct autofs_sb_info *autofs_sbi(struct super_block *sb)
{
return (struct autofs_sb_info *)(sb->s_fs_info);
}
-static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry)
+static inline struct autofs_info *autofs_dentry_ino(struct dentry *dentry)
{
return (struct autofs_info *)(dentry->d_fsdata);
}
-/* autofs4_oz_mode(): do we see the man behind the curtain? (The
+/* autofs_oz_mode(): do we see the man behind the curtain? (The
* processes which do manipulations for us in user space sees the raw
* filesystem without "magic".)
*/
-static inline int autofs4_oz_mode(struct autofs_sb_info *sbi)
+static inline int autofs_oz_mode(struct autofs_sb_info *sbi)
{
return sbi->catatonic || task_pgrp(current) == sbi->oz_pgrp;
}
-struct inode *autofs4_get_inode(struct super_block *, umode_t);
-void autofs4_free_ino(struct autofs_info *);
+struct inode *autofs_get_inode(struct super_block *, umode_t);
+void autofs_free_ino(struct autofs_info *);
/* Expiration */
-int is_autofs4_dentry(struct dentry *);
-int autofs4_expire_wait(const struct path *path, int rcu_walk);
-int autofs4_expire_run(struct super_block *, struct vfsmount *,
- struct autofs_sb_info *,
- struct autofs_packet_expire __user *);
-int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
- struct autofs_sb_info *sbi, int when);
-int autofs4_expire_multi(struct super_block *, struct vfsmount *,
- struct autofs_sb_info *, int __user *);
-struct dentry *autofs4_expire_direct(struct super_block *sb,
- struct vfsmount *mnt,
- struct autofs_sb_info *sbi, int how);
-struct dentry *autofs4_expire_indirect(struct super_block *sb,
- struct vfsmount *mnt,
- struct autofs_sb_info *sbi, int how);
+int is_autofs_dentry(struct dentry *);
+int autofs_expire_wait(const struct path *path, int rcu_walk);
+int autofs_expire_run(struct super_block *, struct vfsmount *,
+ struct autofs_sb_info *,
+ struct autofs_packet_expire __user *);
+int autofs_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
+ struct autofs_sb_info *sbi, int when);
+int autofs_expire_multi(struct super_block *, struct vfsmount *,
+ struct autofs_sb_info *, int __user *);
+struct dentry *autofs_expire_direct(struct super_block *sb,
+ struct vfsmount *mnt,
+ struct autofs_sb_info *sbi, int how);
+struct dentry *autofs_expire_indirect(struct super_block *sb,
+ struct vfsmount *mnt,
+ struct autofs_sb_info *sbi, int how);
/* Device node initialization */
@@ -168,11 +168,11 @@ void autofs_dev_ioctl_exit(void);
/* Operations structures */
-extern const struct inode_operations autofs4_symlink_inode_operations;
-extern const struct inode_operations autofs4_dir_inode_operations;
-extern const struct file_operations autofs4_dir_operations;
-extern const struct file_operations autofs4_root_operations;
-extern const struct dentry_operations autofs4_dentry_operations;
+extern const struct inode_operations autofs_symlink_inode_operations;
+extern const struct inode_operations autofs_dir_inode_operations;
+extern const struct file_operations autofs_dir_operations;
+extern const struct file_operations autofs_root_operations;
+extern const struct dentry_operations autofs_dentry_operations;
/* VFS automount flags management functions */
static inline void __managed_dentry_set_managed(struct dentry *dentry)
@@ -201,9 +201,9 @@ static inline void managed_dentry_clear_managed(struct dentry *dentry)
/* Initializing function */
-int autofs4_fill_super(struct super_block *, void *, int);
-struct autofs_info *autofs4_new_ino(struct autofs_sb_info *);
-void autofs4_clean_ino(struct autofs_info *);
+int autofs_fill_super(struct super_block *, void *, int);
+struct autofs_info *autofs_new_ino(struct autofs_sb_info *);
+void autofs_clean_ino(struct autofs_info *);
static inline int autofs_prepare_pipe(struct file *pipe)
{
@@ -218,25 +218,25 @@ static inline int autofs_prepare_pipe(struct file *pipe)
/* Queue management functions */
-int autofs4_wait(struct autofs_sb_info *,
+int autofs_wait(struct autofs_sb_info *,
const struct path *, enum autofs_notify);
-int autofs4_wait_release(struct autofs_sb_info *, autofs_wqt_t, int);
-void autofs4_catatonic_mode(struct autofs_sb_info *);
+int autofs_wait_release(struct autofs_sb_info *, autofs_wqt_t, int);
+void autofs_catatonic_mode(struct autofs_sb_info *);
-static inline u32 autofs4_get_dev(struct autofs_sb_info *sbi)
+static inline u32 autofs_get_dev(struct autofs_sb_info *sbi)
{
return new_encode_dev(sbi->sb->s_dev);
}
-static inline u64 autofs4_get_ino(struct autofs_sb_info *sbi)
+static inline u64 autofs_get_ino(struct autofs_sb_info *sbi)
{
return d_inode(sbi->sb->s_root)->i_ino;
}
-static inline void __autofs4_add_expiring(struct dentry *dentry)
+static inline void __autofs_add_expiring(struct dentry *dentry)
{
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
if (ino) {
if (list_empty(&ino->expiring))
@@ -244,10 +244,10 @@ static inline void __autofs4_add_expiring(struct dentry *dentry)
}
}
-static inline void autofs4_add_expiring(struct dentry *dentry)
+static inline void autofs_add_expiring(struct dentry *dentry)
{
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
if (ino) {
spin_lock(&sbi->lookup_lock);
@@ -257,10 +257,10 @@ static inline void autofs4_add_expiring(struct dentry *dentry)
}
}
-static inline void autofs4_del_expiring(struct dentry *dentry)
+static inline void autofs_del_expiring(struct dentry *dentry)
{
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
if (ino) {
spin_lock(&sbi->lookup_lock);
@@ -270,4 +270,4 @@ static inline void autofs4_del_expiring(struct dentry *dentry)
}
}
-void autofs4_kill_sb(struct super_block *);
+void autofs_kill_sb(struct super_block *);
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs/dev-ioctl.c
index 26f6b4f41ce6..ea4ca1445ab7 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs/dev-ioctl.c
@@ -7,23 +7,10 @@
* option, any later version, incorporated herein by reference.
*/
-#include <linux/module.h>
-#include <linux/vmalloc.h>
#include <linux/miscdevice.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/namei.h>
-#include <linux/fcntl.h>
-#include <linux/file.h>
-#include <linux/fdtable.h>
-#include <linux/sched.h>
-#include <linux/cred.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
#include <linux/magic.h>
-#include <linux/dcache.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
#include "autofs_i.h"
@@ -166,7 +153,7 @@ static struct autofs_sb_info *autofs_dev_ioctl_sbi(struct file *f)
if (f) {
inode = file_inode(f);
- sbi = autofs4_sbi(inode->i_sb);
+ sbi = autofs_sbi(inode->i_sb);
}
return sbi;
}
@@ -236,7 +223,7 @@ static int test_by_dev(const struct path *path, void *p)
static int test_by_type(const struct path *path, void *p)
{
- struct autofs_info *ino = autofs4_dentry_ino(path->dentry);
+ struct autofs_info *ino = autofs_dentry_ino(path->dentry);
return ino && ino->sbi->type & *(unsigned *)p;
}
@@ -324,7 +311,7 @@ static int autofs_dev_ioctl_ready(struct file *fp,
autofs_wqt_t token;
token = (autofs_wqt_t) param->ready.token;
- return autofs4_wait_release(sbi, token, 0);
+ return autofs_wait_release(sbi, token, 0);
}
/*
@@ -340,7 +327,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
token = (autofs_wqt_t) param->fail.token;
status = param->fail.status < 0 ? param->fail.status : -ENOENT;
- return autofs4_wait_release(sbi, token, status);
+ return autofs_wait_release(sbi, token, status);
}
/*
@@ -412,7 +399,7 @@ static int autofs_dev_ioctl_catatonic(struct file *fp,
struct autofs_sb_info *sbi,
struct autofs_dev_ioctl *param)
{
- autofs4_catatonic_mode(sbi);
+ autofs_catatonic_mode(sbi);
return 0;
}
@@ -459,10 +446,10 @@ static int autofs_dev_ioctl_requester(struct file *fp,
if (err)
goto out;
- ino = autofs4_dentry_ino(path.dentry);
+ ino = autofs_dentry_ino(path.dentry);
if (ino) {
err = 0;
- autofs4_expire_wait(&path, 0);
+ autofs_expire_wait(&path, 0);
spin_lock(&sbi->fs_lock);
param->requester.uid =
from_kuid_munged(current_user_ns(), ino->uid);
@@ -489,7 +476,7 @@ static int autofs_dev_ioctl_expire(struct file *fp,
how = param->expire.how;
mnt = fp->f_path.mnt;
- return autofs4_do_expire_multi(sbi->sb, mnt, sbi, how);
+ return autofs_do_expire_multi(sbi->sb, mnt, sbi, how);
}
/* Check if autofs mount point is in use */
@@ -686,7 +673,7 @@ static int _autofs_dev_ioctl(unsigned int command,
* Admin needs to be able to set the mount catatonic in
* order to be able to perform the re-open.
*/
- if (!autofs4_oz_mode(sbi) &&
+ if (!autofs_oz_mode(sbi) &&
cmd != AUTOFS_DEV_IOCTL_CATATONIC_CMD) {
err = -EACCES;
fput(fp);
diff --git a/fs/autofs4/expire.c b/fs/autofs/expire.c
index 57725d4a8c59..b332d3f6e730 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs/expire.c
@@ -13,10 +13,10 @@
static unsigned long now;
/* Check if a dentry can be expired */
-static inline int autofs4_can_expire(struct dentry *dentry,
- unsigned long timeout, int do_now)
+static inline int autofs_can_expire(struct dentry *dentry,
+ unsigned long timeout, int do_now)
{
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
/* dentry in the process of being deleted */
if (ino == NULL)
@@ -31,7 +31,7 @@ static inline int autofs4_can_expire(struct dentry *dentry,
}
/* Check a mount point for busyness */
-static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
+static int autofs_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
{
struct dentry *top = dentry;
struct path path = {.mnt = mnt, .dentry = dentry};
@@ -44,8 +44,8 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
if (!follow_down_one(&path))
goto done;
- if (is_autofs4_dentry(path.dentry)) {
- struct autofs_sb_info *sbi = autofs4_sbi(path.dentry->d_sb);
+ if (is_autofs_dentry(path.dentry)) {
+ struct autofs_sb_info *sbi = autofs_sbi(path.dentry->d_sb);
/* This is an autofs submount, we can't expire it */
if (autofs_type_indirect(sbi->type))
@@ -56,7 +56,7 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
if (!may_umount_tree(path.mnt)) {
struct autofs_info *ino;
- ino = autofs4_dentry_ino(top);
+ ino = autofs_dentry_ino(top);
ino->last_used = jiffies;
goto done;
}
@@ -74,7 +74,7 @@ done:
static struct dentry *get_next_positive_subdir(struct dentry *prev,
struct dentry *root)
{
- struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
+ struct autofs_sb_info *sbi = autofs_sbi(root->d_sb);
struct list_head *next;
struct dentry *q;
@@ -121,7 +121,7 @@ cont:
static struct dentry *get_next_positive_dentry(struct dentry *prev,
struct dentry *root)
{
- struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
+ struct autofs_sb_info *sbi = autofs_sbi(root->d_sb);
struct list_head *next;
struct dentry *p, *ret;
@@ -184,10 +184,10 @@ again:
* The tree is not busy iff no mountpoints are busy and there are no
* autofs submounts.
*/
-static int autofs4_direct_busy(struct vfsmount *mnt,
- struct dentry *top,
- unsigned long timeout,
- int do_now)
+static int autofs_direct_busy(struct vfsmount *mnt,
+ struct dentry *top,
+ unsigned long timeout,
+ int do_now)
{
pr_debug("top %p %pd\n", top, top);
@@ -195,14 +195,14 @@ static int autofs4_direct_busy(struct vfsmount *mnt,
if (!may_umount_tree(mnt)) {
struct autofs_info *ino;
- ino = autofs4_dentry_ino(top);
+ ino = autofs_dentry_ino(top);
if (ino)
ino->last_used = jiffies;
return 1;
}
/* Timeout of a direct mount is determined by its top dentry */
- if (!autofs4_can_expire(top, timeout, do_now))
+ if (!autofs_can_expire(top, timeout, do_now))
return 1;
return 0;
@@ -212,12 +212,12 @@ static int autofs4_direct_busy(struct vfsmount *mnt,
* Check a directory tree of mount points for busyness
* The tree is not busy iff no mountpoints are busy
*/
-static int autofs4_tree_busy(struct vfsmount *mnt,
- struct dentry *top,
- unsigned long timeout,
- int do_now)
+static int autofs_tree_busy(struct vfsmount *mnt,
+ struct dentry *top,
+ unsigned long timeout,
+ int do_now)
{
- struct autofs_info *top_ino = autofs4_dentry_ino(top);
+ struct autofs_info *top_ino = autofs_dentry_ino(top);
struct dentry *p;
pr_debug("top %p %pd\n", top, top);
@@ -237,13 +237,13 @@ static int autofs4_tree_busy(struct vfsmount *mnt,
* If the fs is busy update the expiry counter.
*/
if (d_mountpoint(p)) {
- if (autofs4_mount_busy(mnt, p)) {
+ if (autofs_mount_busy(mnt, p)) {
top_ino->last_used = jiffies;
dput(p);
return 1;
}
} else {
- struct autofs_info *ino = autofs4_dentry_ino(p);
+ struct autofs_info *ino = autofs_dentry_ino(p);
unsigned int ino_count = atomic_read(&ino->count);
/* allow for dget above and top is already dgot */
@@ -261,16 +261,16 @@ static int autofs4_tree_busy(struct vfsmount *mnt,
}
/* Timeout of a tree mount is ultimately determined by its top dentry */
- if (!autofs4_can_expire(top, timeout, do_now))
+ if (!autofs_can_expire(top, timeout, do_now))
return 1;
return 0;
}
-static struct dentry *autofs4_check_leaves(struct vfsmount *mnt,
- struct dentry *parent,
- unsigned long timeout,
- int do_now)
+static struct dentry *autofs_check_leaves(struct vfsmount *mnt,
+ struct dentry *parent,
+ unsigned long timeout,
+ int do_now)
{
struct dentry *p;
@@ -282,11 +282,11 @@ static struct dentry *autofs4_check_leaves(struct vfsmount *mnt,
if (d_mountpoint(p)) {
/* Can we umount this guy */
- if (autofs4_mount_busy(mnt, p))
+ if (autofs_mount_busy(mnt, p))
continue;
/* Can we expire this guy */
- if (autofs4_can_expire(p, timeout, do_now))
+ if (autofs_can_expire(p, timeout, do_now))
return p;
}
}
@@ -294,10 +294,10 @@ static struct dentry *autofs4_check_leaves(struct vfsmount *mnt,
}
/* Check if we can expire a direct mount (possibly a tree) */
-struct dentry *autofs4_expire_direct(struct super_block *sb,
- struct vfsmount *mnt,
- struct autofs_sb_info *sbi,
- int how)
+struct dentry *autofs_expire_direct(struct super_block *sb,
+ struct vfsmount *mnt,
+ struct autofs_sb_info *sbi,
+ int how)
{
unsigned long timeout;
struct dentry *root = dget(sb->s_root);
@@ -310,9 +310,9 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
now = jiffies;
timeout = sbi->exp_timeout;
- if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
+ if (!autofs_direct_busy(mnt, root, timeout, do_now)) {
spin_lock(&sbi->fs_lock);
- ino = autofs4_dentry_ino(root);
+ ino = autofs_dentry_ino(root);
/* No point expiring a pending mount */
if (ino->flags & AUTOFS_INF_PENDING) {
spin_unlock(&sbi->fs_lock);
@@ -321,7 +321,7 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
ino->flags |= AUTOFS_INF_WANT_EXPIRE;
spin_unlock(&sbi->fs_lock);
synchronize_rcu();
- if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
+ if (!autofs_direct_busy(mnt, root, timeout, do_now)) {
spin_lock(&sbi->fs_lock);
ino->flags |= AUTOFS_INF_EXPIRING;
init_completion(&ino->expire_complete);
@@ -350,7 +350,7 @@ static struct dentry *should_expire(struct dentry *dentry,
{
int do_now = how & AUTOFS_EXP_IMMEDIATE;
int exp_leaves = how & AUTOFS_EXP_LEAVES;
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
unsigned int ino_count;
/* No point expiring a pending mount */
@@ -367,11 +367,11 @@ static struct dentry *should_expire(struct dentry *dentry,
pr_debug("checking mountpoint %p %pd\n", dentry, dentry);
/* Can we umount this guy */
- if (autofs4_mount_busy(mnt, dentry))
+ if (autofs_mount_busy(mnt, dentry))
return NULL;
/* Can we expire this guy */
- if (autofs4_can_expire(dentry, timeout, do_now))
+ if (autofs_can_expire(dentry, timeout, do_now))
return dentry;
return NULL;
}
@@ -382,7 +382,7 @@ static struct dentry *should_expire(struct dentry *dentry,
* A symlink can't be "busy" in the usual sense so
* just check last used for expire timeout.
*/
- if (autofs4_can_expire(dentry, timeout, do_now))
+ if (autofs_can_expire(dentry, timeout, do_now))
return dentry;
return NULL;
}
@@ -397,7 +397,7 @@ static struct dentry *should_expire(struct dentry *dentry,
if (d_count(dentry) > ino_count)
return NULL;
- if (!autofs4_tree_busy(mnt, dentry, timeout, do_now))
+ if (!autofs_tree_busy(mnt, dentry, timeout, do_now))
return dentry;
/*
* Case 3: pseudo direct mount, expire individual leaves
@@ -411,7 +411,7 @@ static struct dentry *should_expire(struct dentry *dentry,
if (d_count(dentry) > ino_count)
return NULL;
- expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
+ expired = autofs_check_leaves(mnt, dentry, timeout, do_now);
if (expired) {
if (expired == dentry)
dput(dentry);
@@ -427,10 +427,10 @@ static struct dentry *should_expire(struct dentry *dentry,
* - it is unused by any user process
* - it has been unused for exp_timeout time
*/
-struct dentry *autofs4_expire_indirect(struct super_block *sb,
- struct vfsmount *mnt,
- struct autofs_sb_info *sbi,
- int how)
+struct dentry *autofs_expire_indirect(struct super_block *sb,
+ struct vfsmount *mnt,
+ struct autofs_sb_info *sbi,
+ int how)
{
unsigned long timeout;
struct dentry *root = sb->s_root;
@@ -450,7 +450,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
int flags = how;
spin_lock(&sbi->fs_lock);
- ino = autofs4_dentry_ino(dentry);
+ ino = autofs_dentry_ino(dentry);
if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
spin_unlock(&sbi->fs_lock);
continue;
@@ -462,7 +462,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
continue;
spin_lock(&sbi->fs_lock);
- ino = autofs4_dentry_ino(expired);
+ ino = autofs_dentry_ino(expired);
ino->flags |= AUTOFS_INF_WANT_EXPIRE;
spin_unlock(&sbi->fs_lock);
synchronize_rcu();
@@ -498,11 +498,11 @@ found:
return expired;
}
-int autofs4_expire_wait(const struct path *path, int rcu_walk)
+int autofs_expire_wait(const struct path *path, int rcu_walk)
{
struct dentry *dentry = path->dentry;
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
int status;
int state;
@@ -529,7 +529,7 @@ retry:
pr_debug("waiting for expire %p name=%pd\n", dentry, dentry);
- status = autofs4_wait(sbi, path, NFY_NONE);
+ status = autofs_wait(sbi, path, NFY_NONE);
wait_for_completion(&ino->expire_complete);
pr_debug("expire done status=%d\n", status);
@@ -545,10 +545,10 @@ retry:
}
/* Perform an expiry operation */
-int autofs4_expire_run(struct super_block *sb,
- struct vfsmount *mnt,
- struct autofs_sb_info *sbi,
- struct autofs_packet_expire __user *pkt_p)
+int autofs_expire_run(struct super_block *sb,
+ struct vfsmount *mnt,
+ struct autofs_sb_info *sbi,
+ struct autofs_packet_expire __user *pkt_p)
{
struct autofs_packet_expire pkt;
struct autofs_info *ino;
@@ -560,7 +560,7 @@ int autofs4_expire_run(struct super_block *sb,
pkt.hdr.proto_version = sbi->version;
pkt.hdr.type = autofs_ptype_expire;
- dentry = autofs4_expire_indirect(sb, mnt, sbi, 0);
+ dentry = autofs_expire_indirect(sb, mnt, sbi, 0);
if (!dentry)
return -EAGAIN;
@@ -573,7 +573,7 @@ int autofs4_expire_run(struct super_block *sb,
ret = -EFAULT;
spin_lock(&sbi->fs_lock);
- ino = autofs4_dentry_ino(dentry);
+ ino = autofs_dentry_ino(dentry);
/* avoid rapid-fire expire attempts if expiry fails */
ino->last_used = now;
ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
@@ -583,25 +583,25 @@ int autofs4_expire_run(struct super_block *sb,
return ret;
}
-int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
- struct autofs_sb_info *sbi, int when)
+int autofs_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
+ struct autofs_sb_info *sbi, int when)
{
struct dentry *dentry;
int ret = -EAGAIN;
if (autofs_type_trigger(sbi->type))
- dentry = autofs4_expire_direct(sb, mnt, sbi, when);
+ dentry = autofs_expire_direct(sb, mnt, sbi, when);
else
- dentry = autofs4_expire_indirect(sb, mnt, sbi, when);
+ dentry = autofs_expire_indirect(sb, mnt, sbi, when);
if (dentry) {
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
const struct path path = { .mnt = mnt, .dentry = dentry };
/* This is synchronous because it makes the daemon a
* little easier
*/
- ret = autofs4_wait(sbi, &path, NFY_EXPIRE);
+ ret = autofs_wait(sbi, &path, NFY_EXPIRE);
spin_lock(&sbi->fs_lock);
/* avoid rapid-fire expire attempts if expiry fails */
@@ -619,7 +619,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
* Call repeatedly until it returns -EAGAIN, meaning there's nothing
* more to be done.
*/
-int autofs4_expire_multi(struct super_block *sb, struct vfsmount *mnt,
+int autofs_expire_multi(struct super_block *sb, struct vfsmount *mnt,
struct autofs_sb_info *sbi, int __user *arg)
{
int do_now = 0;
@@ -627,6 +627,5 @@ int autofs4_expire_multi(struct super_block *sb, struct vfsmount *mnt,
if (arg && get_user(do_now, arg))
return -EFAULT;
- return autofs4_do_expire_multi(sb, mnt, sbi, do_now);
+ return autofs_do_expire_multi(sb, mnt, sbi, do_now);
}
-
diff --git a/fs/autofs4/init.c b/fs/autofs/init.c
index 8cf0e63389ae..16fb61315843 100644
--- a/fs/autofs4/init.c
+++ b/fs/autofs/init.c
@@ -13,18 +13,18 @@
static struct dentry *autofs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- return mount_nodev(fs_type, flags, data, autofs4_fill_super);
+ return mount_nodev(fs_type, flags, data, autofs_fill_super);
}
static struct file_system_type autofs_fs_type = {
.owner = THIS_MODULE,
.name = "autofs",
.mount = autofs_mount,
- .kill_sb = autofs4_kill_sb,
+ .kill_sb = autofs_kill_sb,
};
MODULE_ALIAS_FS("autofs");
-static int __init init_autofs4_fs(void)
+static int __init init_autofs_fs(void)
{
int err;
@@ -37,12 +37,12 @@ static int __init init_autofs4_fs(void)
return err;
}
-static void __exit exit_autofs4_fs(void)
+static void __exit exit_autofs_fs(void)
{
autofs_dev_ioctl_exit();
unregister_filesystem(&autofs_fs_type);
}
-module_init(init_autofs4_fs)
-module_exit(exit_autofs4_fs)
+module_init(init_autofs_fs)
+module_exit(exit_autofs_fs)
MODULE_LICENSE("GPL");
diff --git a/fs/autofs4/inode.c b/fs/autofs/inode.c
index 09e7d68dff02..b51980fc274e 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs/inode.c
@@ -7,18 +7,14 @@
* option, any later version, incorporated herein by reference.
*/
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/file.h>
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/parser.h>
-#include <linux/bitops.h>
#include <linux/magic.h>
+
#include "autofs_i.h"
-#include <linux/module.h>
-struct autofs_info *autofs4_new_ino(struct autofs_sb_info *sbi)
+struct autofs_info *autofs_new_ino(struct autofs_sb_info *sbi)
{
struct autofs_info *ino;
@@ -32,21 +28,21 @@ struct autofs_info *autofs4_new_ino(struct autofs_sb_info *sbi)
return ino;
}
-void autofs4_clean_ino(struct autofs_info *ino)
+void autofs_clean_ino(struct autofs_info *ino)
{
ino->uid = GLOBAL_ROOT_UID;
ino->gid = GLOBAL_ROOT_GID;
ino->last_used = jiffies;
}
-void autofs4_free_ino(struct autofs_info *ino)
+void autofs_free_ino(struct autofs_info *ino)
{
kfree(ino);
}
-void autofs4_kill_sb(struct super_block *sb)
+void autofs_kill_sb(struct super_block *sb)
{
- struct autofs_sb_info *sbi = autofs4_sbi(sb);
+ struct autofs_sb_info *sbi = autofs_sbi(sb);
/*
* In the event of a failure in get_sb_nodev the superblock
@@ -56,7 +52,7 @@ void autofs4_kill_sb(struct super_block *sb)
*/
if (sbi) {
/* Free wait queues, close pipe */
- autofs4_catatonic_mode(sbi);
+ autofs_catatonic_mode(sbi);
put_pid(sbi->oz_pgrp);
}
@@ -66,9 +62,9 @@ void autofs4_kill_sb(struct super_block *sb)
kfree_rcu(sbi, rcu);
}
-static int autofs4_show_options(struct seq_file *m, struct dentry *root)
+static int autofs_show_options(struct seq_file *m, struct dentry *root)
{
- struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
+ struct autofs_sb_info *sbi = autofs_sbi(root->d_sb);
struct inode *root_inode = d_inode(root->d_sb->s_root);
if (!sbi)
@@ -101,16 +97,16 @@ static int autofs4_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
-static void autofs4_evict_inode(struct inode *inode)
+static void autofs_evict_inode(struct inode *inode)
{
clear_inode(inode);
kfree(inode->i_private);
}
-static const struct super_operations autofs4_sops = {
+static const struct super_operations autofs_sops = {
.statfs = simple_statfs,
- .show_options = autofs4_show_options,
- .evict_inode = autofs4_evict_inode,
+ .show_options = autofs_show_options,
+ .evict_inode = autofs_evict_inode,
};
enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto,
@@ -206,7 +202,7 @@ static int parse_options(char *options, int *pipefd, kuid_t *uid, kgid_t *gid,
return (*pipefd < 0);
}
-int autofs4_fill_super(struct super_block *s, void *data, int silent)
+int autofs_fill_super(struct super_block *s, void *data, int silent)
{
struct inode *root_inode;
struct dentry *root;
@@ -246,19 +242,19 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = AUTOFS_SUPER_MAGIC;
- s->s_op = &autofs4_sops;
- s->s_d_op = &autofs4_dentry_operations;
+ s->s_op = &autofs_sops;
+ s->s_d_op = &autofs_dentry_operations;
s->s_time_gran = 1;
/*
* Get the root inode and dentry, but defer checking for errors.
*/
- ino = autofs4_new_ino(sbi);
+ ino = autofs_new_ino(sbi);
if (!ino) {
ret = -ENOMEM;
goto fail_free;
}
- root_inode = autofs4_get_inode(s, S_IFDIR | 0755);
+ root_inode = autofs_get_inode(s, S_IFDIR | 0755);
root = d_make_root(root_inode);
if (!root)
goto fail_ino;
@@ -305,8 +301,8 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
if (autofs_type_trigger(sbi->type))
__managed_dentry_set_managed(root);
- root_inode->i_fop = &autofs4_root_operations;
- root_inode->i_op = &autofs4_dir_inode_operations;
+ root_inode->i_fop = &autofs_root_operations;
+ root_inode->i_op = &autofs_dir_inode_operations;
pr_debug("pipe fd = %d, pgrp = %u\n", pipefd, pid_nr(sbi->oz_pgrp));
pipe = fget(pipefd);
@@ -340,14 +336,14 @@ fail_dput:
dput(root);
goto fail_free;
fail_ino:
- autofs4_free_ino(ino);
+ autofs_free_ino(ino);
fail_free:
kfree(sbi);
s->s_fs_info = NULL;
return ret;
}
-struct inode *autofs4_get_inode(struct super_block *sb, umode_t mode)
+struct inode *autofs_get_inode(struct super_block *sb, umode_t mode)
{
struct inode *inode = new_inode(sb);
@@ -364,10 +360,10 @@ struct inode *autofs4_get_inode(struct super_block *sb, umode_t mode)
if (S_ISDIR(mode)) {
set_nlink(inode, 2);
- inode->i_op = &autofs4_dir_inode_operations;
- inode->i_fop = &autofs4_dir_operations;
+ inode->i_op = &autofs_dir_inode_operations;
+ inode->i_fop = &autofs_dir_operations;
} else if (S_ISLNK(mode)) {
- inode->i_op = &autofs4_symlink_inode_operations;
+ inode->i_op = &autofs_symlink_inode_operations;
} else
WARN_ON(1);
diff --git a/fs/autofs4/root.c b/fs/autofs/root.c
index b12e37f27530..a3d414150578 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs/root.c
@@ -9,72 +9,66 @@
*/
#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/stat.h>
-#include <linux/slab.h>
-#include <linux/param.h>
-#include <linux/time.h>
#include <linux/compat.h>
-#include <linux/mutex.h>
#include "autofs_i.h"
-static int autofs4_dir_symlink(struct inode *, struct dentry *, const char *);
-static int autofs4_dir_unlink(struct inode *, struct dentry *);
-static int autofs4_dir_rmdir(struct inode *, struct dentry *);
-static int autofs4_dir_mkdir(struct inode *, struct dentry *, umode_t);
-static long autofs4_root_ioctl(struct file *, unsigned int, unsigned long);
+static int autofs_dir_symlink(struct inode *, struct dentry *, const char *);
+static int autofs_dir_unlink(struct inode *, struct dentry *);
+static int autofs_dir_rmdir(struct inode *, struct dentry *);
+static int autofs_dir_mkdir(struct inode *, struct dentry *, umode_t);
+static long autofs_root_ioctl(struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
-static long autofs4_root_compat_ioctl(struct file *,
- unsigned int, unsigned long);
+static long autofs_root_compat_ioctl(struct file *,
+ unsigned int, unsigned long);
#endif
-static int autofs4_dir_open(struct inode *inode, struct file *file);
-static struct dentry *autofs4_lookup(struct inode *,
- struct dentry *, unsigned int);
-static struct vfsmount *autofs4_d_automount(struct path *);
-static int autofs4_d_manage(const struct path *, bool);
-static void autofs4_dentry_release(struct dentry *);
-
-const struct file_operations autofs4_root_operations = {
+static int autofs_dir_open(struct inode *inode, struct file *file);
+static struct dentry *autofs_lookup(struct inode *,
+ struct dentry *, unsigned int);
+static struct vfsmount *autofs_d_automount(struct path *);
+static int autofs_d_manage(const struct path *, bool);
+static void autofs_dentry_release(struct dentry *);
+
+const struct file_operations autofs_root_operations = {
.open = dcache_dir_open,
.release = dcache_dir_close,
.read = generic_read_dir,
.iterate_shared = dcache_readdir,
.llseek = dcache_dir_lseek,
- .unlocked_ioctl = autofs4_root_ioctl,
+ .unlocked_ioctl = autofs_root_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = autofs4_root_compat_ioctl,
+ .compat_ioctl = autofs_root_compat_ioctl,
#endif
};
-const struct file_operations autofs4_dir_operations = {
- .open = autofs4_dir_open,
+const struct file_operations autofs_dir_operations = {
+ .open = autofs_dir_open,
.release = dcache_dir_close,
.read = generic_read_dir,
.iterate_shared = dcache_readdir,
.llseek = dcache_dir_lseek,
};
-const struct inode_operations autofs4_dir_inode_operations = {
- .lookup = autofs4_lookup,
- .unlink = autofs4_dir_unlink,
- .symlink = autofs4_dir_symlink,
- .mkdir = autofs4_dir_mkdir,
- .rmdir = autofs4_dir_rmdir,
+const struct inode_operations autofs_dir_inode_operations = {
+ .lookup = autofs_lookup,
+ .unlink = autofs_dir_unlink,
+ .symlink = autofs_dir_symlink,
+ .mkdir = autofs_dir_mkdir,
+ .rmdir = autofs_dir_rmdir,
};
-const struct dentry_operations autofs4_dentry_operations = {
- .d_automount = autofs4_d_automount,
- .d_manage = autofs4_d_manage,
- .d_release = autofs4_dentry_release,
+const struct dentry_operations autofs_dentry_operations = {
+ .d_automount = autofs_d_automount,
+ .d_manage = autofs_d_manage,
+ .d_release = autofs_dentry_release,
};
-static void autofs4_add_active(struct dentry *dentry)
+static void autofs_add_active(struct dentry *dentry)
{
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
struct autofs_info *ino;
- ino = autofs4_dentry_ino(dentry);
+ ino = autofs_dentry_ino(dentry);
if (ino) {
spin_lock(&sbi->lookup_lock);
if (!ino->active_count) {
@@ -86,12 +80,12 @@ static void autofs4_add_active(struct dentry *dentry)
}
}
-static void autofs4_del_active(struct dentry *dentry)
+static void autofs_del_active(struct dentry *dentry)
{
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
struct autofs_info *ino;
- ino = autofs4_dentry_ino(dentry);
+ ino = autofs_dentry_ino(dentry);
if (ino) {
spin_lock(&sbi->lookup_lock);
ino->active_count--;
@@ -103,14 +97,14 @@ static void autofs4_del_active(struct dentry *dentry)
}
}
-static int autofs4_dir_open(struct inode *inode, struct file *file)
+static int autofs_dir_open(struct inode *inode, struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
pr_debug("file=%p dentry=%p %pd\n", file, dentry, dentry);
- if (autofs4_oz_mode(sbi))
+ if (autofs_oz_mode(sbi))
goto out;
/*
@@ -133,10 +127,10 @@ out:
return dcache_dir_open(inode, file);
}
-static void autofs4_dentry_release(struct dentry *de)
+static void autofs_dentry_release(struct dentry *de)
{
- struct autofs_info *ino = autofs4_dentry_ino(de);
- struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb);
+ struct autofs_info *ino = autofs_dentry_ino(de);
+ struct autofs_sb_info *sbi = autofs_sbi(de->d_sb);
pr_debug("releasing %p\n", de);
@@ -152,12 +146,12 @@ static void autofs4_dentry_release(struct dentry *de)
spin_unlock(&sbi->lookup_lock);
}
- autofs4_free_ino(ino);
+ autofs_free_ino(ino);
}
-static struct dentry *autofs4_lookup_active(struct dentry *dentry)
+static struct dentry *autofs_lookup_active(struct dentry *dentry)
{
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
struct dentry *parent = dentry->d_parent;
const struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
@@ -209,10 +203,10 @@ next:
return NULL;
}
-static struct dentry *autofs4_lookup_expiring(struct dentry *dentry,
- bool rcu_walk)
+static struct dentry *autofs_lookup_expiring(struct dentry *dentry,
+ bool rcu_walk)
{
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
struct dentry *parent = dentry->d_parent;
const struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
@@ -269,17 +263,17 @@ next:
return NULL;
}
-static int autofs4_mount_wait(const struct path *path, bool rcu_walk)
+static int autofs_mount_wait(const struct path *path, bool rcu_walk)
{
- struct autofs_sb_info *sbi = autofs4_sbi(path->dentry->d_sb);
- struct autofs_info *ino = autofs4_dentry_ino(path->dentry);
+ struct autofs_sb_info *sbi = autofs_sbi(path->dentry->d_sb);
+ struct autofs_info *ino = autofs_dentry_ino(path->dentry);
int status = 0;
if (ino->flags & AUTOFS_INF_PENDING) {
if (rcu_walk)
return -ECHILD;
pr_debug("waiting for mount name=%pd\n", path->dentry);
- status = autofs4_wait(sbi, path, NFY_MOUNT);
+ status = autofs_wait(sbi, path, NFY_MOUNT);
pr_debug("mount wait done status=%d\n", status);
}
ino->last_used = jiffies;
@@ -291,11 +285,11 @@ static int do_expire_wait(const struct path *path, bool rcu_walk)
struct dentry *dentry = path->dentry;
struct dentry *expiring;
- expiring = autofs4_lookup_expiring(dentry, rcu_walk);
+ expiring = autofs_lookup_expiring(dentry, rcu_walk);
if (IS_ERR(expiring))
return PTR_ERR(expiring);
if (!expiring)
- return autofs4_expire_wait(path, rcu_walk);
+ return autofs_expire_wait(path, rcu_walk);
else {
const struct path this = { .mnt = path->mnt, .dentry = expiring };
/*
@@ -303,17 +297,17 @@ static int do_expire_wait(const struct path *path, bool rcu_walk)
* be quite complete, but the directory has been removed
* so it must have been successful, just wait for it.
*/
- autofs4_expire_wait(&this, 0);
- autofs4_del_expiring(expiring);
+ autofs_expire_wait(&this, 0);
+ autofs_del_expiring(expiring);
dput(expiring);
}
return 0;
}
-static struct dentry *autofs4_mountpoint_changed(struct path *path)
+static struct dentry *autofs_mountpoint_changed(struct path *path)
{
struct dentry *dentry = path->dentry;
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
/*
* If this is an indirect mount the dentry could have gone away
@@ -327,7 +321,7 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
new = d_lookup(parent, &dentry->d_name);
if (!new)
return NULL;
- ino = autofs4_dentry_ino(new);
+ ino = autofs_dentry_ino(new);
ino->last_used = jiffies;
dput(path->dentry);
path->dentry = new;
@@ -335,17 +329,17 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
return path->dentry;
}
-static struct vfsmount *autofs4_d_automount(struct path *path)
+static struct vfsmount *autofs_d_automount(struct path *path)
{
struct dentry *dentry = path->dentry;
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
int status;
pr_debug("dentry=%p %pd\n", dentry, dentry);
/* The daemon never triggers a mount. */
- if (autofs4_oz_mode(sbi))
+ if (autofs_oz_mode(sbi))
return NULL;
/*
@@ -364,7 +358,7 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
spin_lock(&sbi->fs_lock);
if (ino->flags & AUTOFS_INF_PENDING) {
spin_unlock(&sbi->fs_lock);
- status = autofs4_mount_wait(path, 0);
+ status = autofs_mount_wait(path, 0);
if (status)
return ERR_PTR(status);
goto done;
@@ -405,7 +399,7 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
}
ino->flags |= AUTOFS_INF_PENDING;
spin_unlock(&sbi->fs_lock);
- status = autofs4_mount_wait(path, 0);
+ status = autofs_mount_wait(path, 0);
spin_lock(&sbi->fs_lock);
ino->flags &= ~AUTOFS_INF_PENDING;
if (status) {
@@ -416,24 +410,24 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
spin_unlock(&sbi->fs_lock);
done:
/* Mount succeeded, check if we ended up with a new dentry */
- dentry = autofs4_mountpoint_changed(path);
+ dentry = autofs_mountpoint_changed(path);
if (!dentry)
return ERR_PTR(-ENOENT);
return NULL;
}
-static int autofs4_d_manage(const struct path *path, bool rcu_walk)
+static int autofs_d_manage(const struct path *path, bool rcu_walk)
{
struct dentry *dentry = path->dentry;
- struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
int status;
pr_debug("dentry=%p %pd\n", dentry, dentry);
/* The daemon never waits. */
- if (autofs4_oz_mode(sbi)) {
+ if (autofs_oz_mode(sbi)) {
if (!path_is_mountpoint(path))
return -EISDIR;
return 0;
@@ -447,7 +441,7 @@ static int autofs4_d_manage(const struct path *path, bool rcu_walk)
* This dentry may be under construction so wait on mount
* completion.
*/
- status = autofs4_mount_wait(path, rcu_walk);
+ status = autofs_mount_wait(path, rcu_walk);
if (status)
return status;
@@ -500,8 +494,8 @@ static int autofs4_d_manage(const struct path *path, bool rcu_walk)
}
/* Lookups in the root directory */
-static struct dentry *autofs4_lookup(struct inode *dir,
- struct dentry *dentry, unsigned int flags)
+static struct dentry *autofs_lookup(struct inode *dir,
+ struct dentry *dentry, unsigned int flags)
{
struct autofs_sb_info *sbi;
struct autofs_info *ino;
@@ -513,13 +507,13 @@ static struct dentry *autofs4_lookup(struct inode *dir,
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
- sbi = autofs4_sbi(dir->i_sb);
+ sbi = autofs_sbi(dir->i_sb);
pr_debug("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d\n",
current->pid, task_pgrp_nr(current), sbi->catatonic,
- autofs4_oz_mode(sbi));
+ autofs_oz_mode(sbi));
- active = autofs4_lookup_active(dentry);
+ active = autofs_lookup_active(dentry);
if (active)
return active;
else {
@@ -529,7 +523,7 @@ static struct dentry *autofs4_lookup(struct inode *dir,
* can return fail immediately. The daemon however does need
* to create directories within the file system.
*/
- if (!autofs4_oz_mode(sbi) && !IS_ROOT(dentry->d_parent))
+ if (!autofs_oz_mode(sbi) && !IS_ROOT(dentry->d_parent))
return ERR_PTR(-ENOENT);
/* Mark entries in the root as mount triggers */
@@ -537,24 +531,24 @@ static struct dentry *autofs4_lookup(struct inode *dir,
autofs_type_indirect(sbi->type))
__managed_dentry_set_managed(dentry);
- ino = autofs4_new_ino(sbi);
+ ino = autofs_new_ino(sbi);
if (!ino)
return ERR_PTR(-ENOMEM);
dentry->d_fsdata = ino;
ino->dentry = dentry;
- autofs4_add_active(dentry);
+ autofs_add_active(dentry);
}
return NULL;
}
-static int autofs4_dir_symlink(struct inode *dir,
+static int autofs_dir_symlink(struct inode *dir,
struct dentry *dentry,
const char *symname)
{
- struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
struct autofs_info *p_ino;
struct inode *inode;
size_t size = strlen(symname);
@@ -562,14 +556,14 @@ static int autofs4_dir_symlink(struct inode *dir,
pr_debug("%s <- %pd\n", symname, dentry);
- if (!autofs4_oz_mode(sbi))
+ if (!autofs_oz_mode(sbi))
return -EACCES;
BUG_ON(!ino);
- autofs4_clean_ino(ino);
+ autofs_clean_ino(ino);
- autofs4_del_active(dentry);
+ autofs_del_active(dentry);
cp = kmalloc(size + 1, GFP_KERNEL);
if (!cp)
@@ -577,7 +571,7 @@ static int autofs4_dir_symlink(struct inode *dir,
strcpy(cp, symname);
- inode = autofs4_get_inode(dir->i_sb, S_IFLNK | 0555);
+ inode = autofs_get_inode(dir->i_sb, S_IFLNK | 0555);
if (!inode) {
kfree(cp);
return -ENOMEM;
@@ -588,7 +582,7 @@ static int autofs4_dir_symlink(struct inode *dir,
dget(dentry);
atomic_inc(&ino->count);
- p_ino = autofs4_dentry_ino(dentry->d_parent);
+ p_ino = autofs_dentry_ino(dentry->d_parent);
if (p_ino && !IS_ROOT(dentry))
atomic_inc(&p_ino->count);
@@ -610,20 +604,20 @@ static int autofs4_dir_symlink(struct inode *dir,
* If a process is blocked on the dentry waiting for the expire to finish,
* it will invalidate the dentry and try to mount with a new one.
*
- * Also see autofs4_dir_rmdir()..
+ * Also see autofs_dir_rmdir()..
*/
-static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
+static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry)
{
- struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
struct autofs_info *p_ino;
/* This allows root to remove symlinks */
- if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
+ if (!autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (atomic_dec_and_test(&ino->count)) {
- p_ino = autofs4_dentry_ino(dentry->d_parent);
+ p_ino = autofs_dentry_ino(dentry->d_parent);
if (p_ino && !IS_ROOT(dentry))
atomic_dec(&p_ino->count);
}
@@ -635,7 +629,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
dir->i_mtime = current_time(dir);
spin_lock(&sbi->lookup_lock);
- __autofs4_add_expiring(dentry);
+ __autofs_add_expiring(dentry);
d_drop(dentry);
spin_unlock(&sbi->lookup_lock);
@@ -692,15 +686,15 @@ static void autofs_clear_leaf_automount_flags(struct dentry *dentry)
managed_dentry_set_managed(parent);
}
-static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
+static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
{
- struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
struct autofs_info *p_ino;
pr_debug("dentry %p, removing %pd\n", dentry, dentry);
- if (!autofs4_oz_mode(sbi))
+ if (!autofs_oz_mode(sbi))
return -EACCES;
spin_lock(&sbi->lookup_lock);
@@ -708,7 +702,7 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
spin_unlock(&sbi->lookup_lock);
return -ENOTEMPTY;
}
- __autofs4_add_expiring(dentry);
+ __autofs_add_expiring(dentry);
d_drop(dentry);
spin_unlock(&sbi->lookup_lock);
@@ -716,7 +710,7 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
autofs_clear_leaf_automount_flags(dentry);
if (atomic_dec_and_test(&ino->count)) {
- p_ino = autofs4_dentry_ino(dentry->d_parent);
+ p_ino = autofs_dentry_ino(dentry->d_parent);
if (p_ino && dentry->d_parent != dentry)
atomic_dec(&p_ino->count);
}
@@ -730,26 +724,26 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
return 0;
}
-static int autofs4_dir_mkdir(struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static int autofs_dir_mkdir(struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
- struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
struct autofs_info *p_ino;
struct inode *inode;
- if (!autofs4_oz_mode(sbi))
+ if (!autofs_oz_mode(sbi))
return -EACCES;
pr_debug("dentry %p, creating %pd\n", dentry, dentry);
BUG_ON(!ino);
- autofs4_clean_ino(ino);
+ autofs_clean_ino(ino);
- autofs4_del_active(dentry);
+ autofs_del_active(dentry);
- inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
+ inode = autofs_get_inode(dir->i_sb, S_IFDIR | mode);
if (!inode)
return -ENOMEM;
d_add(dentry, inode);
@@ -759,7 +753,7 @@ static int autofs4_dir_mkdir(struct inode *dir,
dget(dentry);
atomic_inc(&ino->count);
- p_ino = autofs4_dentry_ino(dentry->d_parent);
+ p_ino = autofs_dentry_ino(dentry->d_parent);
if (p_ino && !IS_ROOT(dentry))
atomic_inc(&p_ino->count);
inc_nlink(dir);
@@ -770,7 +764,7 @@ static int autofs4_dir_mkdir(struct inode *dir,
/* Get/set timeout ioctl() operation */
#ifdef CONFIG_COMPAT
-static inline int autofs4_compat_get_set_timeout(struct autofs_sb_info *sbi,
+static inline int autofs_compat_get_set_timeout(struct autofs_sb_info *sbi,
compat_ulong_t __user *p)
{
unsigned long ntimeout;
@@ -795,7 +789,7 @@ error:
}
#endif
-static inline int autofs4_get_set_timeout(struct autofs_sb_info *sbi,
+static inline int autofs_get_set_timeout(struct autofs_sb_info *sbi,
unsigned long __user *p)
{
unsigned long ntimeout;
@@ -820,14 +814,14 @@ error:
}
/* Return protocol version */
-static inline int autofs4_get_protover(struct autofs_sb_info *sbi,
+static inline int autofs_get_protover(struct autofs_sb_info *sbi,
int __user *p)
{
return put_user(sbi->version, p);
}
/* Return protocol sub version */
-static inline int autofs4_get_protosubver(struct autofs_sb_info *sbi,
+static inline int autofs_get_protosubver(struct autofs_sb_info *sbi,
int __user *p)
{
return put_user(sbi->sub_version, p);
@@ -836,7 +830,7 @@ static inline int autofs4_get_protosubver(struct autofs_sb_info *sbi,
/*
* Tells the daemon whether it can umount the autofs mount.
*/
-static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p)
+static inline int autofs_ask_umount(struct vfsmount *mnt, int __user *p)
{
int status = 0;
@@ -850,14 +844,14 @@ static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p)
return status;
}
-/* Identify autofs4_dentries - this is so we can tell if there's
+/* Identify autofs_dentries - this is so we can tell if there's
* an extra dentry refcount or not. We only hold a refcount on the
* dentry if its non-negative (ie, d_inode != NULL)
*/
-int is_autofs4_dentry(struct dentry *dentry)
+int is_autofs_dentry(struct dentry *dentry)
{
return dentry && d_really_is_positive(dentry) &&
- dentry->d_op == &autofs4_dentry_operations &&
+ dentry->d_op == &autofs_dentry_operations &&
dentry->d_fsdata != NULL;
}
@@ -865,10 +859,10 @@ int is_autofs4_dentry(struct dentry *dentry)
* ioctl()'s on the root directory is the chief method for the daemon to
* generate kernel reactions
*/
-static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp,
+static int autofs_root_ioctl_unlocked(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- struct autofs_sb_info *sbi = autofs4_sbi(inode->i_sb);
+ struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb);
void __user *p = (void __user *)arg;
pr_debug("cmd = 0x%08x, arg = 0x%08lx, sbi = %p, pgrp = %u\n",
@@ -878,64 +872,63 @@ static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp,
_IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT)
return -ENOTTY;
- if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
+ if (!autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
return -EPERM;
switch (cmd) {
case AUTOFS_IOC_READY: /* Wait queue: go ahead and retry */
- return autofs4_wait_release(sbi, (autofs_wqt_t) arg, 0);
+ return autofs_wait_release(sbi, (autofs_wqt_t) arg, 0);
case AUTOFS_IOC_FAIL: /* Wait queue: fail with ENOENT */
- return autofs4_wait_release(sbi, (autofs_wqt_t) arg, -ENOENT);
+ return autofs_wait_release(sbi, (autofs_wqt_t) arg, -ENOENT);
case AUTOFS_IOC_CATATONIC: /* Enter catatonic mode (daemon shutdown) */
- autofs4_catatonic_mode(sbi);
+ autofs_catatonic_mode(sbi);
return 0;
case AUTOFS_IOC_PROTOVER: /* Get protocol version */
- return autofs4_get_protover(sbi, p);
+ return autofs_get_protover(sbi, p);
case AUTOFS_IOC_PROTOSUBVER: /* Get protocol sub version */
- return autofs4_get_protosubver(sbi, p);
+ return autofs_get_protosubver(sbi, p);
case AUTOFS_IOC_SETTIMEOUT:
- return autofs4_get_set_timeout(sbi, p);
+ return autofs_get_set_timeout(sbi, p);
#ifdef CONFIG_COMPAT
case AUTOFS_IOC_SETTIMEOUT32:
- return autofs4_compat_get_set_timeout(sbi, p);
+ return autofs_compat_get_set_timeout(sbi, p);
#endif
case AUTOFS_IOC_ASKUMOUNT:
- return autofs4_ask_umount(filp->f_path.mnt, p);
+ return autofs_ask_umount(filp->f_path.mnt, p);
/* return a single thing to expire */
case AUTOFS_IOC_EXPIRE:
- return autofs4_expire_run(inode->i_sb,
- filp->f_path.mnt, sbi, p);
+ return autofs_expire_run(inode->i_sb, filp->f_path.mnt, sbi, p);
/* same as above, but can send multiple expires through pipe */
case AUTOFS_IOC_EXPIRE_MULTI:
- return autofs4_expire_multi(inode->i_sb,
- filp->f_path.mnt, sbi, p);
+ return autofs_expire_multi(inode->i_sb,
+ filp->f_path.mnt, sbi, p);
default:
return -EINVAL;
}
}
-static long autofs4_root_ioctl(struct file *filp,
+static long autofs_root_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- return autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
+ return autofs_root_ioctl_unlocked(inode, filp, cmd, arg);
}
#ifdef CONFIG_COMPAT
-static long autofs4_root_compat_ioctl(struct file *filp,
+static long autofs_root_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
int ret;
if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL)
- ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
+ ret = autofs_root_ioctl_unlocked(inode, filp, cmd, arg);
else
- ret = autofs4_root_ioctl_unlocked(inode, filp, cmd,
+ ret = autofs_root_ioctl_unlocked(inode, filp, cmd,
(unsigned long) compat_ptr(arg));
return ret;
diff --git a/fs/autofs4/symlink.c b/fs/autofs/symlink.c
index ab0b4285a202..aad3902c0cc1 100644
--- a/fs/autofs4/symlink.c
+++ b/fs/autofs/symlink.c
@@ -8,22 +8,22 @@
#include "autofs_i.h"
-static const char *autofs4_get_link(struct dentry *dentry,
- struct inode *inode,
- struct delayed_call *done)
+static const char *autofs_get_link(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done)
{
struct autofs_sb_info *sbi;
struct autofs_info *ino;
if (!dentry)
return ERR_PTR(-ECHILD);
- sbi = autofs4_sbi(dentry->d_sb);
- ino = autofs4_dentry_ino(dentry);
- if (ino && !autofs4_oz_mode(sbi))
+ sbi = autofs_sbi(dentry->d_sb);
+ ino = autofs_dentry_ino(dentry);
+ if (ino && !autofs_oz_mode(sbi))
ino->last_used = jiffies;
return d_inode(dentry)->i_private;
}
-const struct inode_operations autofs4_symlink_inode_operations = {
- .get_link = autofs4_get_link
+const struct inode_operations autofs_symlink_inode_operations = {
+ .get_link = autofs_get_link
};
diff --git a/fs/autofs4/waitq.c b/fs/autofs/waitq.c
index be9c3dc048ab..f6385c6ef0a5 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs/waitq.c
@@ -7,19 +7,15 @@
* option, any later version, incorporated herein by reference.
*/
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/signal.h>
#include <linux/sched/signal.h>
-#include <linux/file.h>
#include "autofs_i.h"
/* We make this a static variable rather than a part of the superblock; it
* is better if we don't reassign numbers easily even across filesystems
*/
-static autofs_wqt_t autofs4_next_wait_queue = 1;
+static autofs_wqt_t autofs_next_wait_queue = 1;
-void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
+void autofs_catatonic_mode(struct autofs_sb_info *sbi)
{
struct autofs_wait_queue *wq, *nwq;
@@ -49,8 +45,8 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
mutex_unlock(&sbi->wq_mutex);
}
-static int autofs4_write(struct autofs_sb_info *sbi,
- struct file *file, const void *addr, int bytes)
+static int autofs_write(struct autofs_sb_info *sbi,
+ struct file *file, const void *addr, int bytes)
{
unsigned long sigpipe, flags;
const char *data = (const char *)addr;
@@ -82,7 +78,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
}
-static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
+static void autofs_notify_daemon(struct autofs_sb_info *sbi,
struct autofs_wait_queue *wq,
int type)
{
@@ -167,23 +163,23 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
mutex_unlock(&sbi->wq_mutex);
- switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
+ switch (ret = autofs_write(sbi, pipe, &pkt, pktsz)) {
case 0:
break;
case -ENOMEM:
case -ERESTARTSYS:
/* Just fail this one */
- autofs4_wait_release(sbi, wq->wait_queue_token, ret);
+ autofs_wait_release(sbi, wq->wait_queue_token, ret);
break;
default:
- autofs4_catatonic_mode(sbi);
+ autofs_catatonic_mode(sbi);
break;
}
fput(pipe);
}
-static int autofs4_getpath(struct autofs_sb_info *sbi,
- struct dentry *dentry, char **name)
+static int autofs_getpath(struct autofs_sb_info *sbi,
+ struct dentry *dentry, char *name)
{
struct dentry *root = sbi->sb->s_root;
struct dentry *tmp;
@@ -193,7 +189,7 @@ static int autofs4_getpath(struct autofs_sb_info *sbi,
unsigned seq;
rename_retry:
- buf = *name;
+ buf = name;
len = 0;
seq = read_seqbegin(&rename_lock);
@@ -228,7 +224,7 @@ rename_retry:
}
static struct autofs_wait_queue *
-autofs4_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr)
+autofs_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr)
{
struct autofs_wait_queue *wq;
@@ -263,7 +259,7 @@ static int validate_request(struct autofs_wait_queue **wait,
return -ENOENT;
/* Wait in progress, continue; */
- wq = autofs4_find_wait(sbi, qstr);
+ wq = autofs_find_wait(sbi, qstr);
if (wq) {
*wait = wq;
return 1;
@@ -272,7 +268,7 @@ static int validate_request(struct autofs_wait_queue **wait,
*wait = NULL;
/* If we don't yet have any info this is a new request */
- ino = autofs4_dentry_ino(dentry);
+ ino = autofs_dentry_ino(dentry);
if (!ino)
return 1;
@@ -297,7 +293,7 @@ static int validate_request(struct autofs_wait_queue **wait,
if (sbi->catatonic)
return -ENOENT;
- wq = autofs4_find_wait(sbi, qstr);
+ wq = autofs_find_wait(sbi, qstr);
if (wq) {
*wait = wq;
return 1;
@@ -351,7 +347,7 @@ static int validate_request(struct autofs_wait_queue **wait,
return 1;
}
-int autofs4_wait(struct autofs_sb_info *sbi,
+int autofs_wait(struct autofs_sb_info *sbi,
const struct path *path, enum autofs_notify notify)
{
struct dentry *dentry = path->dentry;
@@ -399,7 +395,7 @@ int autofs4_wait(struct autofs_sb_info *sbi,
if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
qstr.len = sprintf(name, "%p", dentry);
else {
- qstr.len = autofs4_getpath(sbi, dentry, &name);
+ qstr.len = autofs_getpath(sbi, dentry, name);
if (!qstr.len) {
kfree(name);
return -ENOENT;
@@ -430,15 +426,15 @@ int autofs4_wait(struct autofs_sb_info *sbi,
return -ENOMEM;
}
- wq->wait_queue_token = autofs4_next_wait_queue;
- if (++autofs4_next_wait_queue == 0)
- autofs4_next_wait_queue = 1;
+ wq->wait_queue_token = autofs_next_wait_queue;
+ if (++autofs_next_wait_queue == 0)
+ autofs_next_wait_queue = 1;
wq->next = sbi->queues;
sbi->queues = wq;
init_waitqueue_head(&wq->queue);
memcpy(&wq->name, &qstr, sizeof(struct qstr));
- wq->dev = autofs4_get_dev(sbi);
- wq->ino = autofs4_get_ino(sbi);
+ wq->dev = autofs_get_dev(sbi);
+ wq->ino = autofs_get_ino(sbi);
wq->uid = current_uid();
wq->gid = current_gid();
wq->pid = pid;
@@ -467,9 +463,9 @@ int autofs4_wait(struct autofs_sb_info *sbi,
wq->name.name, notify);
/*
- * autofs4_notify_daemon() may block; it will unlock ->wq_mutex
+ * autofs_notify_daemon() may block; it will unlock ->wq_mutex
*/
- autofs4_notify_daemon(sbi, wq, type);
+ autofs_notify_daemon(sbi, wq, type);
} else {
wq->wait_ctr++;
pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n",
@@ -500,12 +496,12 @@ int autofs4_wait(struct autofs_sb_info *sbi,
struct dentry *de = NULL;
/* direct mount or browsable map */
- ino = autofs4_dentry_ino(dentry);
+ ino = autofs_dentry_ino(dentry);
if (!ino) {
/* If not lookup actual dentry used */
de = d_lookup(dentry->d_parent, &dentry->d_name);
if (de)
- ino = autofs4_dentry_ino(de);
+ ino = autofs_dentry_ino(de);
}
/* Set mount requester */
@@ -530,7 +526,8 @@ int autofs4_wait(struct autofs_sb_info *sbi,
}
-int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status)
+int autofs_wait_release(struct autofs_sb_info *sbi,
+ autofs_wqt_t wait_queue_token, int status)
{
struct autofs_wait_queue *wq, **wql;
diff --git a/fs/autofs4/Kconfig b/fs/autofs4/Kconfig
index 44727bf18297..99fda4d6da25 100644
--- a/fs/autofs4/Kconfig
+++ b/fs/autofs4/Kconfig
@@ -1,5 +1,7 @@
config AUTOFS4_FS
- tristate "Kernel automounter version 4 support (also supports v3)"
+ tristate "Kernel automounter version 4 support (also supports v3 and v5)"
+ default n
+ depends on AUTOFS_FS = n
help
The automounter is a tool to automatically mount remote file systems
on demand. This implementation is partially kernel-based to reduce
@@ -7,14 +9,38 @@ config AUTOFS4_FS
automounter (amd), which is a pure user space daemon.
To use the automounter you need the user-space tools from
- <https://www.kernel.org/pub/linux/daemons/autofs/v4/>; you also
- want to answer Y to "NFS file system support", below.
+ <https://www.kernel.org/pub/linux/daemons/autofs/>; you also want
+ to answer Y to "NFS file system support", below.
- To compile this support as a module, choose M here: the module will be
- called autofs4. You will need to add "alias autofs autofs4" to your
- modules configuration file.
+ This module is in the process of being renamed from autofs4 to
+ autofs. Since autofs is now the only module that provides the
+ autofs file system the module is not version 4 specific.
- If you are not a part of a fairly large, distributed network or
- don't have a laptop which needs to dynamically reconfigure to the
- local network, you probably do not need an automounter, and can say
- N here.
+ The autofs4 module is now built from the source located in
+ fs/autofs. The autofs4 directory and its configuration entry
+ will be removed two kernel versions from the inclusion of this
+ change.
+
+ Changes that will need to be made should be limited to:
+ - source include statments should be changed from autofs_fs4.h to
+ autofs_fs.h since these two header files have been merged.
+ - user space scripts that manually load autofs4.ko should be
+ changed to load autofs.ko. But since the module directory name
+ and the module name are the same as the file system name there
+ is no need to manually load module.
+ - any "alias autofs autofs4" will need to be removed.
+ - due to the autofs4 module directory name not being the same as
+ its file system name autoloading didn't work properly. Because
+ of this kernel configurations would often build the module into
+ the kernel. This may have resulted in selinux policies that will
+ prevent the autofs module from autoloading and will need to be
+ updated.
+
+ Please configure AUTOFS_FS instead of AUTOFS4_FS from now on.
+
+ NOTE: Since the modules autofs and autofs4 use the same file system
+ type name of "autofs" only one can be built. The "depends"
+ above will result in AUTOFS4_FS not appearing in .config for
+ any setting of AUTOFS_FS other than n and AUTOFS4_FS will
+ appear under the AUTOFS_FS entry otherwise which is intended
+ to draw attention to the module rename change.
diff --git a/fs/autofs4/Makefile b/fs/autofs4/Makefile
index a811c1f7d9ab..417dd726d9ef 100644
--- a/fs/autofs4/Makefile
+++ b/fs/autofs4/Makefile
@@ -4,4 +4,6 @@
obj-$(CONFIG_AUTOFS4_FS) += autofs4.o
-autofs4-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o
+autofs4-objs := ../autofs/init.o ../autofs/inode.o ../autofs/root.o \
+ ../autofs/symlink.o ../autofs/waitq.o ../autofs/expire.o \
+ ../autofs/dev-ioctl.o
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index a41b48f82a70..4de191563261 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -387,8 +387,13 @@ static Node *create_entry(const char __user *buffer, size_t count)
s = strchr(p, del);
if (!s)
goto einval;
- *s++ = '\0';
- e->offset = simple_strtoul(p, &p, 10);
+ *s = '\0';
+ if (p != s) {
+ int r = kstrtoint(p, 10, &e->offset);
+ if (r != 0 || e->offset < 0)
+ goto einval;
+ }
+ p = s;
if (*p++)
goto einval;
pr_debug("register: offset: %#x\n", e->offset);
@@ -428,7 +433,8 @@ static Node *create_entry(const char __user *buffer, size_t count)
if (e->mask &&
string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
goto einval;
- if (e->size + e->offset > BINPRM_BUF_SIZE)
+ if (e->size > BINPRM_BUF_SIZE ||
+ BINPRM_BUF_SIZE - e->size < e->offset)
goto einval;
pr_debug("register: magic/mask length: %i\n", e->size);
if (USE_DEBUG) {
diff --git a/fs/block_dev.c b/fs/block_dev.c
index bef6934b6189..05e12aea2404 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -216,6 +216,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
bio.bi_write_hint = iocb->ki_hint;
bio.bi_private = current;
bio.bi_end_io = blkdev_bio_end_io_simple;
+ bio.bi_ioprio = iocb->ki_ioprio;
ret = bio_iov_iter_get_pages(&bio, iter);
if (unlikely(ret))
@@ -355,6 +356,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bio->bi_write_hint = iocb->ki_hint;
bio->bi_private = dio;
bio->bi_end_io = blkdev_bio_end_io;
+ bio->bi_ioprio = iocb->ki_ioprio;
ret = bio_iov_iter_get_pages(bio, iter);
if (unlikely(ret)) {
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
index 4f3884835267..dd95a6fa24bf 100644
--- a/fs/cifs/cifsacl.h
+++ b/fs/cifs/cifsacl.h
@@ -98,4 +98,18 @@ struct cifs_ace {
struct cifs_sid sid; /* ie UUID of user or group who gets these perms */
} __attribute__((packed));
+/*
+ * Minimum security identifier can be one for system defined Users
+ * and Groups such as NULL SID and World or Built-in accounts such
+ * as Administrator and Guest and consists of
+ * Revision + Num (Sub)Auths + Authority + Domain (one Subauthority)
+ */
+#define MIN_SID_LEN (1 + 1 + 6 + 4) /* in bytes */
+
+/*
+ * Minimum security descriptor can be one without any SACL and DACL and can
+ * consist of revision, type, and two sids of minimum size for owner and group
+ */
+#define MIN_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN))
+
#endif /* _CIFSACL_H */
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index a6ef088e057b..937251cc61c0 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -37,6 +37,7 @@
#include <crypto/aead.h>
int __cifs_calc_signature(struct smb_rqst *rqst,
+ int start,
struct TCP_Server_Info *server, char *signature,
struct shash_desc *shash)
{
@@ -45,10 +46,7 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
- if (n_vec < 2 || iov[0].iov_len != 4)
- return -EIO;
-
- for (i = 1; i < n_vec; i++) {
+ for (i = start; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
@@ -68,11 +66,12 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
/* now hash over the rq_pages array */
for (i = 0; i < rqst->rq_npages; i++) {
- void *kaddr = kmap(rqst->rq_pages[i]);
- size_t len = rqst->rq_pagesz;
+ void *kaddr;
+ unsigned int len, offset;
+
+ rqst_page_get_length(rqst, i, &len, &offset);
- if (i == rqst->rq_npages - 1)
- len = rqst->rq_tailsz;
+ kaddr = (char *) kmap(rqst->rq_pages[i]) + offset;
crypto_shash_update(shash, kaddr, len);
@@ -119,7 +118,7 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
return rc;
}
- return __cifs_calc_signature(rqst, server, signature,
+ return __cifs_calc_signature(rqst, 1, server, signature,
&server->secmech.sdescmd5->shash);
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index eb7b6573f322..d5aa7ae917bf 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -698,8 +698,8 @@ static int cifs_set_super(struct super_block *sb, void *data)
}
static struct dentry *
-cifs_do_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+cifs_smb3_do_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, bool is_smb3)
{
int rc;
struct super_block *sb;
@@ -710,7 +710,7 @@ cifs_do_mount(struct file_system_type *fs_type,
cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
- volume_info = cifs_get_volume_info((char *)data, dev_name);
+ volume_info = cifs_get_volume_info((char *)data, dev_name, is_smb3);
if (IS_ERR(volume_info))
return ERR_CAST(volume_info);
@@ -790,6 +790,20 @@ out_nls:
goto out;
}
+static struct dentry *
+smb3_do_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return cifs_smb3_do_mount(fs_type, flags, dev_name, data, true);
+}
+
+static struct dentry *
+cifs_do_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return cifs_smb3_do_mount(fs_type, flags, dev_name, data, false);
+}
+
static ssize_t
cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
@@ -925,7 +939,7 @@ MODULE_ALIAS_FS("cifs");
static struct file_system_type smb3_fs_type = {
.owner = THIS_MODULE,
.name = "smb3",
- .mount = cifs_do_mount,
+ .mount = smb3_do_mount,
.kill_sb = cifs_kill_sb,
/* .fs_flags */
};
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 08d1cdd96701..1efa2e65bc1a 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1019,6 +1019,12 @@ tlink_tcon(struct tcon_link *tlink)
return tlink->tl_tcon;
}
+static inline struct tcon_link *
+cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
+{
+ return cifs_sb->master_tlink;
+}
+
extern void cifs_put_tlink(struct tcon_link *tlink);
static inline struct tcon_link *
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 7933c5f9c076..4e0d183c3d10 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -211,7 +211,7 @@ extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
extern int cifs_match_super(struct super_block *, void *);
extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info);
extern struct smb_vol *cifs_get_volume_info(char *mount_data,
- const char *devname);
+ const char *devname, bool is_smb3);
extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *);
extern void cifs_umount(struct cifs_sb_info *);
extern void cifs_mark_open_files_invalid(struct cifs_tcon *tcon);
@@ -544,7 +544,7 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const unsigned char *path, char *pbuf,
unsigned int *pbytes_written);
-int __cifs_calc_signature(struct smb_rqst *rqst,
+int __cifs_calc_signature(struct smb_rqst *rqst, int start,
struct TCP_Server_Info *server, char *signature,
struct shash_desc *shash);
enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
@@ -557,4 +557,7 @@ int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
struct sdesc **sdesc);
void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc);
+extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
+ unsigned int *len, unsigned int *offset);
+
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e5a2fe7f0dd4..96645a7d8f27 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -320,7 +320,7 @@ static int generic_ip_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
static void cifs_prune_tlinks(struct work_struct *work);
static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
- const char *devname);
+ const char *devname, bool is_smb3);
/*
* cifs tcp session reconnection
@@ -1166,7 +1166,7 @@ cifs_parse_cache_flavor(char *value, struct smb_vol *vol)
}
static int
-cifs_parse_smb_version(char *value, struct smb_vol *vol)
+cifs_parse_smb_version(char *value, struct smb_vol *vol, bool is_smb3)
{
substring_t args[MAX_OPT_ARGS];
@@ -1176,6 +1176,10 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol)
cifs_dbg(VFS, "mount with legacy dialect disabled\n");
return 1;
}
+ if (is_smb3) {
+ cifs_dbg(VFS, "vers=1.0 (cifs) not permitted when mounting with smb3\n");
+ return 1;
+ }
vol->ops = &smb1_operations;
vol->vals = &smb1_values;
break;
@@ -1184,6 +1188,10 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol)
cifs_dbg(VFS, "mount with legacy dialect disabled\n");
return 1;
}
+ if (is_smb3) {
+ cifs_dbg(VFS, "vers=2.0 not permitted when mounting with smb3\n");
+ return 1;
+ }
vol->ops = &smb20_operations;
vol->vals = &smb20_values;
break;
@@ -1272,7 +1280,7 @@ cifs_parse_devname(const char *devname, struct smb_vol *vol)
static int
cifs_parse_mount_options(const char *mountdata, const char *devname,
- struct smb_vol *vol)
+ struct smb_vol *vol, bool is_smb3)
{
char *data, *end;
char *mountdata_copy = NULL, *options;
@@ -1985,7 +1993,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (string == NULL)
goto out_nomem;
- if (cifs_parse_smb_version(string, vol) != 0)
+ if (cifs_parse_smb_version(string, vol, is_smb3) != 0)
goto cifs_parse_mount_err;
got_version = true;
break;
@@ -3116,12 +3124,6 @@ cifs_put_tlink(struct tcon_link *tlink)
return;
}
-static inline struct tcon_link *
-cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
-{
- return cifs_sb->master_tlink;
-}
-
static int
compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
{
@@ -3803,7 +3805,7 @@ expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
} else {
cleanup_volume_info_contents(volume_info);
rc = cifs_setup_volume_info(volume_info, mdata,
- fake_devname);
+ fake_devname, false);
}
kfree(fake_devname);
kfree(cifs_sb->mountdata);
@@ -3816,11 +3818,11 @@ expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
static int
cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
- const char *devname)
+ const char *devname, bool is_smb3)
{
int rc = 0;
- if (cifs_parse_mount_options(mount_data, devname, volume_info))
+ if (cifs_parse_mount_options(mount_data, devname, volume_info, is_smb3))
return -EINVAL;
if (volume_info->nullauth) {
@@ -3854,7 +3856,7 @@ cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
}
struct smb_vol *
-cifs_get_volume_info(char *mount_data, const char *devname)
+cifs_get_volume_info(char *mount_data, const char *devname, bool is_smb3)
{
int rc;
struct smb_vol *volume_info;
@@ -3863,7 +3865,7 @@ cifs_get_volume_info(char *mount_data, const char *devname)
if (!volume_info)
return ERR_PTR(-ENOMEM);
- rc = cifs_setup_volume_info(volume_info, mount_data, devname);
+ rc = cifs_setup_volume_info(volume_info, mount_data, devname, is_smb3);
if (rc) {
cifs_cleanup_volume_info(volume_info);
volume_info = ERR_PTR(rc);
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 889a840172eb..de41f96aba49 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -421,7 +421,8 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
return -ENOMEM;
}
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, pfile_info, NULL);
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, pfile_info, NULL,
+ NULL);
if (rc)
goto qmf_out_open_fail;
@@ -478,7 +479,8 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+ NULL);
if (rc) {
kfree(utf16_path);
return rc;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index aba3fc3058da..f90d4ad6624c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -905,3 +905,20 @@ cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
crypto_free_shash(*shash);
*shash = NULL;
}
+
+/**
+ * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
+ * Input: rqst - a smb_rqst, page - a page index for rqst
+ * Output: *len - the length for this page, *offset - the offset for this page
+ */
+void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
+ unsigned int *len, unsigned int *offset)
+{
+ *len = rqst->rq_pagesz;
+ *offset = (page == 0) ? rqst->rq_offset : 0;
+
+ if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
+ *len = rqst->rq_tailsz;
+ else if (page == 0)
+ *len = rqst->rq_pagesz - rqst->rq_offset;
+}
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 12af5dba742b..788412675723 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -64,7 +64,8 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
if (oparms->tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
memcpy(smb2_oplock + 1, fid->lease_key, SMB2_LEASE_KEY_SIZE);
- rc = SMB2_open(xid, oparms, smb2_path, smb2_oplock, smb2_data, NULL);
+ rc = SMB2_open(xid, oparms, smb2_path, smb2_oplock, smb2_data, NULL,
+ NULL);
if (rc)
goto out;
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index a6e786e39248..d01ad706d7fc 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -71,7 +71,8 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+ NULL);
if (rc) {
kfree(utf16_path);
return rc;
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index cb5728e3d87d..e2bec47c6845 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -453,8 +453,10 @@ cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
start_of_path = from + 1;
#ifdef CONFIG_CIFS_SMB311
/* SMB311 POSIX extensions paths do not include leading slash */
- else if (cifs_sb_master_tcon(cifs_sb)->posix_extensions)
+ else if (cifs_sb_master_tlink(cifs_sb) &&
+ cifs_sb_master_tcon(cifs_sb)->posix_extensions) {
start_of_path = from + 1;
+ }
#endif /* 311 */
else
start_of_path = from;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 950d0ab2cc61..b15f5957d645 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -348,7 +348,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
oparams.fid = pfid;
oparams.reconnect = false;
- rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
if (rc == 0) {
memcpy(tcon->prfid, pfid, sizeof(struct cifs_fid));
tcon->valid_root_fid = true;
@@ -375,7 +375,8 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
oparms.reconnect = false;
if (no_cached_open)
- rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
+ NULL);
else
rc = open_shroot(xid, tcon, &fid);
@@ -413,7 +414,7 @@ smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
if (rc)
return;
@@ -449,7 +450,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
if (rc) {
kfree(utf16_path);
return rc;
@@ -598,7 +599,7 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
kfree(utf16_path);
if (rc) {
cifs_dbg(FYI, "open failed rc=%d\n", rc);
@@ -677,7 +678,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
kfree(utf16_path);
if (rc) {
cifs_dbg(FYI, "open failed rc=%d\n", rc);
@@ -1261,7 +1262,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
kfree(utf16_path);
if (rc) {
cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
@@ -1361,7 +1362,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
if (rc)
return rc;
buf->f_type = SMB2_MAGIC_NUMBER;
@@ -1515,7 +1516,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct cifs_fid fid;
struct kvec err_iov = {NULL, 0};
- struct smb2_err_rsp *err_buf;
+ struct smb2_err_rsp *err_buf = NULL;
+ int resp_buftype;
struct smb2_symlink_err_rsp *symlink;
unsigned int sub_len;
unsigned int sub_offset;
@@ -1535,18 +1537,18 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov);
-
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
+ &resp_buftype);
if (!rc || !err_iov.iov_base) {
- kfree(utf16_path);
- return -ENOENT;
+ rc = -ENOENT;
+ goto querty_exit;
}
err_buf = err_iov.iov_base;
if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
- kfree(utf16_path);
- return -ENOENT;
+ rc = -ENOENT;
+ goto querty_exit;
}
/* open must fail on symlink - reset rc */
@@ -1558,25 +1560,28 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
print_offset = le16_to_cpu(symlink->PrintNameOffset);
if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
- kfree(utf16_path);
- return -ENOENT;
+ rc = -ENOENT;
+ goto querty_exit;
}
if (err_iov.iov_len <
SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
- kfree(utf16_path);
- return -ENOENT;
+ rc = -ENOENT;
+ goto querty_exit;
}
*target_path = cifs_strndup_from_utf16(
(char *)symlink->PathBuffer + sub_offset,
sub_len, true, cifs_sb->local_nls);
if (!(*target_path)) {
- kfree(utf16_path);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto querty_exit;
}
convert_delimiter(*target_path, '/');
cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
+
+ querty_exit:
+ free_rsp_buf(resp_buftype, err_buf);
kfree(utf16_path);
return rc;
}
@@ -1649,7 +1654,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
kfree(utf16_path);
if (!rc) {
rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
@@ -1712,7 +1717,7 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
kfree(utf16_path);
if (!rc) {
rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
@@ -2189,9 +2194,10 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
smb2_sg_set_buf(&sg[i], rqst->rq_iov[i+1].iov_base,
rqst->rq_iov[i+1].iov_len);
for (j = 0; i < sg_len - 1; i++, j++) {
- unsigned int len = (j < rqst->rq_npages - 1) ? rqst->rq_pagesz
- : rqst->rq_tailsz;
- sg_set_page(&sg[i], rqst->rq_pages[j], len, 0);
+ unsigned int len, offset;
+
+ rqst_page_get_length(rqst, j, &len, &offset);
+ sg_set_page(&sg[i], rqst->rq_pages[j], len, offset);
}
smb2_sg_set_buf(&sg[sg_len - 1], sign, SMB2_SIGNATURE_SIZE);
return sg;
@@ -2229,7 +2235,7 @@ static int
crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
{
struct smb2_transform_hdr *tr_hdr =
- (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base;
+ (struct smb2_transform_hdr *)rqst->rq_iov[1].iov_base;
unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
int rc = 0;
struct scatterlist *sg;
@@ -2338,6 +2344,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
return rc;
new_rq->rq_pages = pages;
+ new_rq->rq_offset = old_rq->rq_offset;
new_rq->rq_npages = old_rq->rq_npages;
new_rq->rq_pagesz = old_rq->rq_pagesz;
new_rq->rq_tailsz = old_rq->rq_tailsz;
@@ -2379,10 +2386,14 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
/* copy pages form the old */
for (i = 0; i < npages; i++) {
- char *dst = kmap(new_rq->rq_pages[i]);
- char *src = kmap(old_rq->rq_pages[i]);
- unsigned int len = (i < npages - 1) ? new_rq->rq_pagesz :
- new_rq->rq_tailsz;
+ char *dst, *src;
+ unsigned int offset, len;
+
+ rqst_page_get_length(new_rq, i, &len, &offset);
+
+ dst = (char *) kmap(new_rq->rq_pages[i]) + offset;
+ src = (char *) kmap(old_rq->rq_pages[i]) + offset;
+
memcpy(dst, src, len);
kunmap(new_rq->rq_pages[i]);
kunmap(old_rq->rq_pages[i]);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 281fbc1dc720..48e2004c75fb 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1889,7 +1889,7 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
int
SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
__u8 *oplock, struct smb2_file_all_info *buf,
- struct kvec *err_iov)
+ struct kvec *err_iov, int *buftype)
{
struct smb2_create_req *req;
struct smb2_create_rsp *rsp;
@@ -2052,6 +2052,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
if (err_iov && rsp) {
*err_iov = rsp_iov;
+ *buftype = resp_buftype;
resp_buftype = CIFS_NO_BUFFER;
rsp = NULL;
}
@@ -2492,8 +2493,7 @@ SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
return query_info(xid, tcon, persistent_fid, volatile_fid,
0, SMB2_O_INFO_SECURITY, additional_info,
- SMB2_MAX_BUFFER_SIZE,
- sizeof(struct smb2_file_all_info), data, plen);
+ SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
}
int
@@ -2721,8 +2721,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
rdata->mr = smbd_register_mr(
server->smbd_conn, rdata->pages,
- rdata->nr_pages, rdata->tailsz,
- true, need_invalidate);
+ rdata->nr_pages, rdata->page_offset,
+ rdata->tailsz, true, need_invalidate);
if (!rdata->mr)
return -ENOBUFS;
@@ -3108,16 +3108,22 @@ smb2_async_writev(struct cifs_writedata *wdata,
wdata->mr = smbd_register_mr(
server->smbd_conn, wdata->pages,
- wdata->nr_pages, wdata->tailsz,
- false, need_invalidate);
+ wdata->nr_pages, wdata->page_offset,
+ wdata->tailsz, false, need_invalidate);
if (!wdata->mr) {
rc = -ENOBUFS;
goto async_writev_out;
}
req->Length = 0;
req->DataOffset = 0;
- req->RemainingBytes =
- cpu_to_le32((wdata->nr_pages-1)*PAGE_SIZE + wdata->tailsz);
+ if (wdata->nr_pages > 1)
+ req->RemainingBytes =
+ cpu_to_le32(
+ (wdata->nr_pages - 1) * wdata->pagesz -
+ wdata->page_offset + wdata->tailsz
+ );
+ else
+ req->RemainingBytes = cpu_to_le32(wdata->tailsz);
req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
if (need_invalidate)
req->Channel = SMB2_CHANNEL_RDMA_V1;
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 908555b1c6b5..c84020057bd8 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -125,7 +125,7 @@ extern int SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon);
extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms,
__le16 *path, __u8 *oplock,
struct smb2_file_all_info *buf,
- struct kvec *err_iov);
+ struct kvec *err_iov, int *resp_buftype);
extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen,
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 2c671123a6bf..349d5ccf854c 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -171,7 +171,9 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
unsigned char *sigptr = smb2_signature;
struct kvec *iov = rqst->rq_iov;
- struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[1].iov_base;
+ int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
+ struct smb2_sync_hdr *shdr =
+ (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
struct cifs_ses *ses;
ses = smb2_find_smb_ses(server, shdr->SessionId);
@@ -202,7 +204,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
return rc;
}
- rc = __cifs_calc_signature(rqst, server, sigptr,
+ rc = __cifs_calc_signature(rqst, iov_hdr_index, server, sigptr,
&server->secmech.sdeschmacsha256->shash);
if (!rc)
@@ -412,7 +414,9 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
unsigned char smb3_signature[SMB2_CMACAES_SIZE];
unsigned char *sigptr = smb3_signature;
struct kvec *iov = rqst->rq_iov;
- struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[1].iov_base;
+ int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
+ struct smb2_sync_hdr *shdr =
+ (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
struct cifs_ses *ses;
ses = smb2_find_smb_ses(server, shdr->SessionId);
@@ -443,7 +447,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
return rc;
}
- rc = __cifs_calc_signature(rqst, server, sigptr,
+ rc = __cifs_calc_signature(rqst, iov_hdr_index, server, sigptr,
&server->secmech.sdesccmacaes->shash);
if (!rc)
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index c62f7c95683c..e459c97151b3 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -17,6 +17,7 @@
#include <linux/highmem.h>
#include "smbdirect.h"
#include "cifs_debug.h"
+#include "cifsproto.h"
static struct smbd_response *get_empty_queue_buffer(
struct smbd_connection *info);
@@ -2003,10 +2004,12 @@ read_rfc1002_done:
* return value: actual data read
*/
static int smbd_recv_page(struct smbd_connection *info,
- struct page *page, unsigned int to_read)
+ struct page *page, unsigned int page_offset,
+ unsigned int to_read)
{
int ret;
char *to_address;
+ void *page_address;
/* make sure we have the page ready for read */
ret = wait_event_interruptible(
@@ -2014,16 +2017,17 @@ static int smbd_recv_page(struct smbd_connection *info,
info->reassembly_data_length >= to_read ||
info->transport_status != SMBD_CONNECTED);
if (ret)
- return 0;
+ return ret;
/* now we can read from reassembly queue and not sleep */
- to_address = kmap_atomic(page);
+ page_address = kmap_atomic(page);
+ to_address = (char *) page_address + page_offset;
log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
page, to_address, to_read);
ret = smbd_recv_buf(info, to_address, to_read);
- kunmap_atomic(to_address);
+ kunmap_atomic(page_address);
return ret;
}
@@ -2037,7 +2041,7 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
{
char *buf;
struct page *page;
- unsigned int to_read;
+ unsigned int to_read, page_offset;
int rc;
info->smbd_recv_pending++;
@@ -2051,15 +2055,16 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
case READ | ITER_BVEC:
page = msg->msg_iter.bvec->bv_page;
+ page_offset = msg->msg_iter.bvec->bv_offset;
to_read = msg->msg_iter.bvec->bv_len;
- rc = smbd_recv_page(info, page, to_read);
+ rc = smbd_recv_page(info, page, page_offset, to_read);
break;
default:
/* It's a bug in upper layer to get there */
cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
msg->msg_iter.type);
- rc = -EIO;
+ rc = -EINVAL;
}
info->smbd_recv_pending--;
@@ -2082,7 +2087,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
struct kvec vec;
int nvecs;
int size;
- int buflen = 0, remaining_data_length;
+ unsigned int buflen = 0, remaining_data_length;
int start, i, j;
int max_iov_size =
info->max_send_size - sizeof(struct smbd_data_transfer);
@@ -2113,10 +2118,17 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
buflen += iov[i].iov_len;
}
- /* add in the page array if there is one */
+ /*
+ * Add in the page array if there is one. The caller needs to set
+ * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
+ * ends at page boundary
+ */
if (rqst->rq_npages) {
- buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
- buflen += rqst->rq_tailsz;
+ if (rqst->rq_npages == 1)
+ buflen += rqst->rq_tailsz;
+ else
+ buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
+ rqst->rq_offset + rqst->rq_tailsz;
}
if (buflen + sizeof(struct smbd_data_transfer) >
@@ -2213,8 +2225,9 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
/* now sending pages if there are any */
for (i = 0; i < rqst->rq_npages; i++) {
- buflen = (i == rqst->rq_npages-1) ?
- rqst->rq_tailsz : rqst->rq_pagesz;
+ unsigned int offset;
+
+ rqst_page_get_length(rqst, i, &buflen, &offset);
nvecs = (buflen + max_iov_size - 1) / max_iov_size;
log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
buflen, nvecs);
@@ -2225,9 +2238,11 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
remaining_data_length -= size;
log_write(INFO, "sending pages i=%d offset=%d size=%d"
" remaining_data_length=%d\n",
- i, j*max_iov_size, size, remaining_data_length);
+ i, j*max_iov_size+offset, size,
+ remaining_data_length);
rc = smbd_post_send_page(
- info, rqst->rq_pages[i], j*max_iov_size,
+ info, rqst->rq_pages[i],
+ j*max_iov_size + offset,
size, remaining_data_length);
if (rc)
goto done;
@@ -2284,37 +2299,37 @@ static void smbd_mr_recovery_work(struct work_struct *work)
if (smbdirect_mr->state == MR_INVALIDATED ||
smbdirect_mr->state == MR_ERROR) {
- if (smbdirect_mr->state == MR_INVALIDATED) {
+ /* recover this MR entry */
+ rc = ib_dereg_mr(smbdirect_mr->mr);
+ if (rc) {
+ log_rdma_mr(ERR,
+ "ib_dereg_mr failed rc=%x\n",
+ rc);
+ smbd_disconnect_rdma_connection(info);
+ continue;
+ }
+
+ smbdirect_mr->mr = ib_alloc_mr(
+ info->pd, info->mr_type,
+ info->max_frmr_depth);
+ if (IS_ERR(smbdirect_mr->mr)) {
+ log_rdma_mr(ERR,
+ "ib_alloc_mr failed mr_type=%x "
+ "max_frmr_depth=%x\n",
+ info->mr_type,
+ info->max_frmr_depth);
+ smbd_disconnect_rdma_connection(info);
+ continue;
+ }
+
+ if (smbdirect_mr->state == MR_INVALIDATED)
ib_dma_unmap_sg(
info->id->device, smbdirect_mr->sgl,
smbdirect_mr->sgl_count,
smbdirect_mr->dir);
- smbdirect_mr->state = MR_READY;
- } else if (smbdirect_mr->state == MR_ERROR) {
-
- /* recover this MR entry */
- rc = ib_dereg_mr(smbdirect_mr->mr);
- if (rc) {
- log_rdma_mr(ERR,
- "ib_dereg_mr failed rc=%x\n",
- rc);
- smbd_disconnect_rdma_connection(info);
- }
- smbdirect_mr->mr = ib_alloc_mr(
- info->pd, info->mr_type,
- info->max_frmr_depth);
- if (IS_ERR(smbdirect_mr->mr)) {
- log_rdma_mr(ERR,
- "ib_alloc_mr failed mr_type=%x "
- "max_frmr_depth=%x\n",
- info->mr_type,
- info->max_frmr_depth);
- smbd_disconnect_rdma_connection(info);
- }
+ smbdirect_mr->state = MR_READY;
- smbdirect_mr->state = MR_READY;
- }
/* smbdirect_mr->state is updated by this function
* and is read and updated by I/O issuing CPUs trying
* to get a MR, the call to atomic_inc_return
@@ -2460,7 +2475,7 @@ again:
*/
struct smbd_mr *smbd_register_mr(
struct smbd_connection *info, struct page *pages[], int num_pages,
- int tailsz, bool writing, bool need_invalidate)
+ int offset, int tailsz, bool writing, bool need_invalidate)
{
struct smbd_mr *smbdirect_mr;
int rc, i;
@@ -2483,17 +2498,31 @@ struct smbd_mr *smbd_register_mr(
smbdirect_mr->sgl_count = num_pages;
sg_init_table(smbdirect_mr->sgl, num_pages);
- for (i = 0; i < num_pages - 1; i++)
- sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
+ log_rdma_mr(INFO, "num_pages=0x%x offset=0x%x tailsz=0x%x\n",
+ num_pages, offset, tailsz);
+
+ if (num_pages == 1) {
+ sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset);
+ goto skip_multiple_pages;
+ }
+ /* We have at least two pages to register */
+ sg_set_page(
+ &smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset);
+ i = 1;
+ while (i < num_pages - 1) {
+ sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
+ i++;
+ }
sg_set_page(&smbdirect_mr->sgl[i], pages[i],
tailsz ? tailsz : PAGE_SIZE, 0);
+skip_multiple_pages:
dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
smbdirect_mr->dir = dir;
rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
if (!rc) {
- log_rdma_mr(INFO, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
+ log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
num_pages, dir, rc);
goto dma_map_error;
}
@@ -2501,8 +2530,8 @@ struct smbd_mr *smbd_register_mr(
rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
NULL, PAGE_SIZE);
if (rc != num_pages) {
- log_rdma_mr(INFO,
- "ib_map_mr_sg failed rc = %x num_pages = %x\n",
+ log_rdma_mr(ERR,
+ "ib_map_mr_sg failed rc = %d num_pages = %x\n",
rc, num_pages);
goto map_mr_error;
}
diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h
index f9038daea194..1e419c21dc60 100644
--- a/fs/cifs/smbdirect.h
+++ b/fs/cifs/smbdirect.h
@@ -321,7 +321,7 @@ struct smbd_mr {
/* Interfaces to register and deregister MR for RDMA read/write */
struct smbd_mr *smbd_register_mr(
struct smbd_connection *info, struct page *pages[], int num_pages,
- int tailsz, bool writing, bool need_invalidate);
+ int offset, int tailsz, bool writing, bool need_invalidate);
int smbd_deregister_mr(struct smbd_mr *mr);
#else
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index e7254e386b79..24887a0898c0 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -212,10 +212,24 @@ rqst_len(struct smb_rqst *rqst)
for (i = 0; i < rqst->rq_nvec; i++)
buflen += iov[i].iov_len;
- /* add in the page array if there is one */
+ /*
+ * Add in the page array if there is one. The caller needs to make
+ * sure rq_offset and rq_tailsz are set correctly. If a buffer of
+ * multiple pages ends at page boundary, rq_tailsz needs to be set to
+ * PAGE_SIZE.
+ */
if (rqst->rq_npages) {
- buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
- buflen += rqst->rq_tailsz;
+ if (rqst->rq_npages == 1)
+ buflen += rqst->rq_tailsz;
+ else {
+ /*
+ * If there is more than one page, calculate the
+ * buffer length based on rq_offset and rq_tailsz
+ */
+ buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
+ rqst->rq_offset;
+ buflen += rqst->rq_tailsz;
+ }
}
return buflen;
@@ -274,15 +288,13 @@ __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
/* now walk the page array and send each page in it */
for (i = 0; i < rqst->rq_npages; i++) {
- size_t len = i == rqst->rq_npages - 1
- ? rqst->rq_tailsz
- : rqst->rq_pagesz;
- struct bio_vec bvec = {
- .bv_page = rqst->rq_pages[i],
- .bv_len = len
- };
+ struct bio_vec bvec;
+
+ bvec.bv_page = rqst->rq_pages[i];
+ rqst_page_get_length(rqst, i, &bvec.bv_len, &bvec.bv_offset);
+
iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
- &bvec, 1, len);
+ &bvec, 1, bvec.bv_len);
rc = smb_send_kvec(server, &smb_msg, &sent);
if (rc < 0)
break;
diff --git a/fs/compat.c b/fs/compat.c
index 190b38b39d9e..4a0aaaf53217 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -16,79 +16,12 @@
*/
#include <linux/compat.h>
-#include <linux/ncp_mount.h>
#include <linux/nfs4_mount.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "internal.h"
-struct compat_ncp_mount_data {
- compat_int_t version;
- compat_uint_t ncp_fd;
- __compat_uid_t mounted_uid;
- compat_pid_t wdog_pid;
- unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
- compat_uint_t time_out;
- compat_uint_t retry_count;
- compat_uint_t flags;
- __compat_uid_t uid;
- __compat_gid_t gid;
- compat_mode_t file_mode;
- compat_mode_t dir_mode;
-};
-
-struct compat_ncp_mount_data_v4 {
- compat_int_t version;
- compat_ulong_t flags;
- compat_ulong_t mounted_uid;
- compat_long_t wdog_pid;
- compat_uint_t ncp_fd;
- compat_uint_t time_out;
- compat_uint_t retry_count;
- compat_ulong_t uid;
- compat_ulong_t gid;
- compat_ulong_t file_mode;
- compat_ulong_t dir_mode;
-};
-
-static void *do_ncp_super_data_conv(void *raw_data)
-{
- int version = *(unsigned int *)raw_data;
-
- if (version == 3) {
- struct compat_ncp_mount_data *c_n = raw_data;
- struct ncp_mount_data *n = raw_data;
-
- n->dir_mode = c_n->dir_mode;
- n->file_mode = c_n->file_mode;
- n->gid = c_n->gid;
- n->uid = c_n->uid;
- memmove (n->mounted_vol, c_n->mounted_vol, (sizeof (c_n->mounted_vol) + 3 * sizeof (unsigned int)));
- n->wdog_pid = c_n->wdog_pid;
- n->mounted_uid = c_n->mounted_uid;
- } else if (version == 4) {
- struct compat_ncp_mount_data_v4 *c_n = raw_data;
- struct ncp_mount_data_v4 *n = raw_data;
-
- n->dir_mode = c_n->dir_mode;
- n->file_mode = c_n->file_mode;
- n->gid = c_n->gid;
- n->uid = c_n->uid;
- n->retry_count = c_n->retry_count;
- n->time_out = c_n->time_out;
- n->ncp_fd = c_n->ncp_fd;
- n->wdog_pid = c_n->wdog_pid;
- n->mounted_uid = c_n->mounted_uid;
- n->flags = c_n->flags;
- } else if (version != 5) {
- return NULL;
- }
-
- return raw_data;
-}
-
-
struct compat_nfs_string {
compat_uint_t len;
compat_uptr_t data;
@@ -154,7 +87,6 @@ static int do_nfs4_super_data_conv(void *raw_data)
return 0;
}
-#define NCPFS_NAME "ncpfs"
#define NFS4_NAME "nfs4"
COMPAT_SYSCALL_DEFINE5(mount, const char __user *, dev_name,
@@ -183,9 +115,7 @@ COMPAT_SYSCALL_DEFINE5(mount, const char __user *, dev_name,
goto out2;
if (kernel_type && options) {
- if (!strcmp(kernel_type, NCPFS_NAME)) {
- do_ncp_super_data_conv(options);
- } else if (!strcmp(kernel_type, NFS4_NAME)) {
+ if (!strcmp(kernel_type, NFS4_NAME)) {
retval = -EINVAL;
if (do_nfs4_super_data_conv(options))
goto out3;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index ef80085ed564..9907475b4226 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -38,8 +38,6 @@
#include <linux/ppp-ioctl.h>
#include <linux/if_pppox.h>
#include <linux/mtio.h>
-#include <linux/auto_fs.h>
-#include <linux/auto_fs4.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
#include <linux/fb.h>
diff --git a/fs/dax.c b/fs/dax.c
index aa86d9f971a4..641192808bb6 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -351,6 +351,19 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
}
}
+static struct page *dax_busy_page(void *entry)
+{
+ unsigned long pfn;
+
+ for_each_mapped_pfn(entry, pfn) {
+ struct page *page = pfn_to_page(pfn);
+
+ if (page_ref_count(page) > 1)
+ return page;
+ }
+ return NULL;
+}
+
/*
* Find radix tree entry at given index. If it points to an exceptional entry,
* return it with the radix tree entry locked. If the radix tree doesn't
@@ -492,6 +505,90 @@ restart:
return entry;
}
+/**
+ * dax_layout_busy_page - find first pinned page in @mapping
+ * @mapping: address space to scan for a page with ref count > 1
+ *
+ * DAX requires ZONE_DEVICE mapped pages. These pages are never
+ * 'onlined' to the page allocator so they are considered idle when
+ * page->count == 1. A filesystem uses this interface to determine if
+ * any page in the mapping is busy, i.e. for DMA, or other
+ * get_user_pages() usages.
+ *
+ * It is expected that the filesystem is holding locks to block the
+ * establishment of new mappings in this address_space. I.e. it expects
+ * to be able to run unmap_mapping_range() and subsequently not race
+ * mapping_mapped() becoming true.
+ */
+struct page *dax_layout_busy_page(struct address_space *mapping)
+{
+ pgoff_t indices[PAGEVEC_SIZE];
+ struct page *page = NULL;
+ struct pagevec pvec;
+ pgoff_t index, end;
+ unsigned i;
+
+ /*
+ * In the 'limited' case get_user_pages() for dax is disabled.
+ */
+ if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
+ return NULL;
+
+ if (!dax_mapping(mapping) || !mapping_mapped(mapping))
+ return NULL;
+
+ pagevec_init(&pvec);
+ index = 0;
+ end = -1;
+
+ /*
+ * If we race get_user_pages_fast() here either we'll see the
+ * elevated page count in the pagevec_lookup and wait, or
+ * get_user_pages_fast() will see that the page it took a reference
+ * against is no longer mapped in the page tables and bail to the
+ * get_user_pages() slow path. The slow path is protected by
+ * pte_lock() and pmd_lock(). New references are not taken without
+ * holding those locks, and unmap_mapping_range() will not zero the
+ * pte or pmd without holding the respective lock, so we are
+ * guaranteed to either see new references or prevent new
+ * references from being established.
+ */
+ unmap_mapping_range(mapping, 0, 0, 1);
+
+ while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE),
+ indices)) {
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ struct page *pvec_ent = pvec.pages[i];
+ void *entry;
+
+ index = indices[i];
+ if (index >= end)
+ break;
+
+ if (!radix_tree_exceptional_entry(pvec_ent))
+ continue;
+
+ xa_lock_irq(&mapping->i_pages);
+ entry = get_unlocked_mapping_entry(mapping, index, NULL);
+ if (entry)
+ page = dax_busy_page(entry);
+ put_unlocked_mapping_entry(mapping, index, entry);
+ xa_unlock_irq(&mapping->i_pages);
+ if (page)
+ break;
+ }
+ pagevec_remove_exceptionals(&pvec);
+ pagevec_release(&pvec);
+ index++;
+
+ if (page)
+ break;
+ }
+ return page;
+}
+EXPORT_SYMBOL_GPL(dax_layout_busy_page);
+
static int __dax_invalidate_mapping_entry(struct address_space *mapping,
pgoff_t index, bool trunc)
{
@@ -905,14 +1002,13 @@ out:
* If this page is ever written to we will re-fault and change the mapping to
* point to real DAX storage instead.
*/
-static int dax_load_hole(struct address_space *mapping, void *entry,
+static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
struct vm_fault *vmf)
{
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
- int ret = VM_FAULT_NOPAGE;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
struct page *zero_page;
- void *entry2;
pfn_t pfn;
zero_page = ZERO_PAGE(0);
@@ -922,14 +1018,9 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
}
pfn = page_to_pfn_t(zero_page);
- entry2 = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
- RADIX_DAX_ZERO_PAGE, false);
- if (IS_ERR(entry2)) {
- ret = VM_FAULT_SIGBUS;
- goto out;
- }
-
- vm_insert_mixed(vmf->vma, vaddr, pfn);
+ dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
+ false);
+ ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
out:
trace_dax_load_hole(inode, vmf, ret);
return ret;
@@ -991,6 +1082,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
struct iov_iter *iter = data;
loff_t end = pos + length, done = 0;
ssize_t ret = 0;
+ size_t xfer;
int id;
if (iov_iter_rw(iter) == READ) {
@@ -1054,18 +1146,20 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
* vfs_write(), depending on which operation we are doing.
*/
if (iov_iter_rw(iter) == WRITE)
- map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
+ xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
map_len, iter);
else
- map_len = copy_to_iter(kaddr, map_len, iter);
- if (map_len <= 0) {
- ret = map_len ? map_len : -EFAULT;
- break;
- }
+ xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
+ map_len, iter);
+
+ pos += xfer;
+ length -= xfer;
+ done += xfer;
- pos += map_len;
- length -= map_len;
- done += map_len;
+ if (xfer == 0)
+ ret = -EFAULT;
+ if (xfer < map_len)
+ break;
}
dax_read_unlock(id);
@@ -1112,7 +1206,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
}
EXPORT_SYMBOL_GPL(dax_iomap_rw);
-static int dax_fault_return(int error)
+static vm_fault_t dax_fault_return(int error)
{
if (error == 0)
return VM_FAULT_NOPAGE;
@@ -1132,7 +1226,7 @@ static bool dax_fault_is_synchronous(unsigned long flags,
&& (iomap->flags & IOMAP_F_DIRTY);
}
-static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
int *iomap_errp, const struct iomap_ops *ops)
{
struct vm_area_struct *vma = vmf->vma;
@@ -1145,18 +1239,18 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
int error, major = 0;
bool write = vmf->flags & FAULT_FLAG_WRITE;
bool sync;
- int vmf_ret = 0;
+ vm_fault_t ret = 0;
void *entry;
pfn_t pfn;
- trace_dax_pte_fault(inode, vmf, vmf_ret);
+ trace_dax_pte_fault(inode, vmf, ret);
/*
* Check whether offset isn't beyond end of file now. Caller is supposed
* to hold locks serializing us with truncate / punch hole so this is
* a reliable test.
*/
if (pos >= i_size_read(inode)) {
- vmf_ret = VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
goto out;
}
@@ -1165,7 +1259,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
if (IS_ERR(entry)) {
- vmf_ret = dax_fault_return(PTR_ERR(entry));
+ ret = dax_fault_return(PTR_ERR(entry));
goto out;
}
@@ -1176,7 +1270,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
* retried.
*/
if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
- vmf_ret = VM_FAULT_NOPAGE;
+ ret = VM_FAULT_NOPAGE;
goto unlock_entry;
}
@@ -1189,7 +1283,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
if (iomap_errp)
*iomap_errp = error;
if (error) {
- vmf_ret = dax_fault_return(error);
+ ret = dax_fault_return(error);
goto unlock_entry;
}
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
@@ -1219,9 +1313,9 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
goto error_finish_iomap;
__SetPageUptodate(vmf->cow_page);
- vmf_ret = finish_fault(vmf);
- if (!vmf_ret)
- vmf_ret = VM_FAULT_DONE_COW;
+ ret = finish_fault(vmf);
+ if (!ret)
+ ret = VM_FAULT_DONE_COW;
goto finish_iomap;
}
@@ -1240,10 +1334,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
0, write && !sync);
- if (IS_ERR(entry)) {
- error = PTR_ERR(entry);
- goto error_finish_iomap;
- }
/*
* If we are doing synchronous page fault and inode needs fsync,
@@ -1257,23 +1347,20 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
goto error_finish_iomap;
}
*pfnp = pfn;
- vmf_ret = VM_FAULT_NEEDDSYNC | major;
+ ret = VM_FAULT_NEEDDSYNC | major;
goto finish_iomap;
}
trace_dax_insert_mapping(inode, vmf, entry);
if (write)
- error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
+ ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
else
- error = vm_insert_mixed(vma, vaddr, pfn);
+ ret = vmf_insert_mixed(vma, vaddr, pfn);
- /* -EBUSY is fine, somebody else faulted on the same PTE */
- if (error == -EBUSY)
- error = 0;
- break;
+ goto finish_iomap;
case IOMAP_UNWRITTEN:
case IOMAP_HOLE:
if (!write) {
- vmf_ret = dax_load_hole(mapping, entry, vmf);
+ ret = dax_load_hole(mapping, entry, vmf);
goto finish_iomap;
}
/*FALLTHRU*/
@@ -1284,12 +1371,12 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
}
error_finish_iomap:
- vmf_ret = dax_fault_return(error) | major;
+ ret = dax_fault_return(error);
finish_iomap:
if (ops->iomap_end) {
int copied = PAGE_SIZE;
- if (vmf_ret & VM_FAULT_ERROR)
+ if (ret & VM_FAULT_ERROR)
copied = 0;
/*
* The fault is done by now and there's no way back (other
@@ -1302,12 +1389,12 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
unlock_entry:
put_locked_mapping_entry(mapping, vmf->pgoff);
out:
- trace_dax_pte_fault_done(inode, vmf, vmf_ret);
- return vmf_ret;
+ trace_dax_pte_fault_done(inode, vmf, ret);
+ return ret | major;
}
#ifdef CONFIG_FS_DAX_PMD
-static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
+static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
void *entry)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -1327,8 +1414,6 @@ static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
pfn = page_to_pfn_t(zero_page);
ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
- if (IS_ERR(ret))
- goto fallback;
ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
if (!pmd_none(*(vmf->pmd))) {
@@ -1348,7 +1433,7 @@ fallback:
return VM_FAULT_FALLBACK;
}
-static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops)
{
struct vm_area_struct *vma = vmf->vma;
@@ -1358,7 +1443,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
bool sync;
unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
struct inode *inode = mapping->host;
- int result = VM_FAULT_FALLBACK;
+ vm_fault_t result = VM_FAULT_FALLBACK;
struct iomap iomap = { 0 };
pgoff_t max_pgoff, pgoff;
void *entry;
@@ -1450,8 +1535,6 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
RADIX_DAX_PMD, write && !sync);
- if (IS_ERR(entry))
- goto finish_iomap;
/*
* If we are doing synchronous page fault and inode needs fsync,
@@ -1509,7 +1592,7 @@ out:
return result;
}
#else
-static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops)
{
return VM_FAULT_FALLBACK;
@@ -1529,7 +1612,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* has done all the necessary locking for page fault to proceed
* successfully.
*/
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
{
switch (pe_size) {
@@ -1553,14 +1636,14 @@ EXPORT_SYMBOL_GPL(dax_iomap_fault);
* DAX file. It takes care of marking corresponding radix tree entry as dirty
* as well.
*/
-static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
+static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
enum page_entry_size pe_size,
pfn_t pfn)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
void *entry, **slot;
pgoff_t index = vmf->pgoff;
- int vmf_ret, error;
+ vm_fault_t ret;
xa_lock_irq(&mapping->i_pages);
entry = get_unlocked_mapping_entry(mapping, index, &slot);
@@ -1579,21 +1662,20 @@ static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
xa_unlock_irq(&mapping->i_pages);
switch (pe_size) {
case PE_SIZE_PTE:
- error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
- vmf_ret = dax_fault_return(error);
+ ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
break;
#ifdef CONFIG_FS_DAX_PMD
case PE_SIZE_PMD:
- vmf_ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
+ ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
pfn, true);
break;
#endif
default:
- vmf_ret = VM_FAULT_FALLBACK;
+ ret = VM_FAULT_FALLBACK;
}
put_locked_mapping_entry(mapping, index);
- trace_dax_insert_pfn_mkwrite(mapping->host, vmf, vmf_ret);
- return vmf_ret;
+ trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
+ return ret;
}
/**
@@ -1606,8 +1688,8 @@ static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
* stored persistently on the media and handles inserting of appropriate page
* table entry.
*/
-int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
- pfn_t pfn)
+vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size, pfn_t pfn)
{
int err;
loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
diff --git a/fs/exec.c b/fs/exec.c
index 30a36c2a39bf..2d4e0075bd24 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1824,6 +1824,7 @@ static int __do_execve_file(int fd, struct filename *filename,
current->fs->in_exec = 0;
current->in_execve = 0;
membarrier_execve(current);
+ rseq_execve(current);
acct_update_integrals(current);
task_numa_free(current);
free_bprm(bprm);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index c42169459298..12273b6ea56d 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -23,7 +23,7 @@
#include <linux/rcupdate.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
-#include <linux/shmem_fs.h>
+#include <linux/memfd.h>
#include <linux/compat.h>
#include <linux/poll.h>
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 2577ef1034ef..2a153aed4c19 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -26,8 +26,7 @@
#include "hpfs.h"
#define EIOERROR EIO
-#define EFSERROR EPERM
-#define EMEMERROR ENOMEM
+#define EFSERROR EUCLEAN
#define ANODE_ALLOC_FWD 512
#define FNODE_ALLOC_FWD 0
diff --git a/fs/iomap.c b/fs/iomap.c
index 206539d369a8..7d1e9f45f098 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1046,6 +1046,7 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
bio_set_dev(bio, iomap->bdev);
bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
bio->bi_write_hint = dio->iocb->ki_hint;
+ bio->bi_ioprio = dio->iocb->ki_ioprio;
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 97a972efab83..68728de12864 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -788,35 +788,34 @@ static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
spin_unlock(&lockres->l_lock);
}
-static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
- struct ocfs2_lock_holder *oh)
-{
- spin_lock(&lockres->l_lock);
- list_del(&oh->oh_list);
- spin_unlock(&lockres->l_lock);
-
- put_pid(oh->oh_owner_pid);
-}
-
-static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res *lockres)
+static struct ocfs2_lock_holder *
+ocfs2_pid_holder(struct ocfs2_lock_res *lockres,
+ struct pid *pid)
{
struct ocfs2_lock_holder *oh;
- struct pid *pid;
- /* look in the list of holders for one with the current task as owner */
spin_lock(&lockres->l_lock);
- pid = task_pid(current);
list_for_each_entry(oh, &lockres->l_holders, oh_list) {
if (oh->oh_owner_pid == pid) {
spin_unlock(&lockres->l_lock);
- return 1;
+ return oh;
}
}
spin_unlock(&lockres->l_lock);
+ return NULL;
+}
- return 0;
+static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
+ struct ocfs2_lock_holder *oh)
+{
+ spin_lock(&lockres->l_lock);
+ list_del(&oh->oh_list);
+ spin_unlock(&lockres->l_lock);
+
+ put_pid(oh->oh_owner_pid);
}
+
static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
int level)
{
@@ -2610,34 +2609,93 @@ void ocfs2_inode_unlock(struct inode *inode,
*
* return < 0 on error, return == 0 if there's no lock holder on the stack
* before this call, return == 1 if this call would be a recursive locking.
+ * return == -1 if this lock attempt will cause an upgrade which is forbidden.
+ *
+ * When taking lock levels into account,we face some different situations.
+ *
+ * 1. no lock is held
+ * In this case, just lock the inode as requested and return 0
+ *
+ * 2. We are holding a lock
+ * For this situation, things diverges into several cases
+ *
+ * wanted holding what to do
+ * ex ex see 2.1 below
+ * ex pr see 2.2 below
+ * pr ex see 2.1 below
+ * pr pr see 2.1 below
+ *
+ * 2.1 lock level that is been held is compatible
+ * with the wanted level, so no lock action will be tacken.
+ *
+ * 2.2 Otherwise, an upgrade is needed, but it is forbidden.
+ *
+ * Reason why upgrade within a process is forbidden is that
+ * lock upgrade may cause dead lock. The following illustrates
+ * how it happens.
+ *
+ * thread on node1 thread on node2
+ * ocfs2_inode_lock_tracker(ex=0)
+ *
+ * <====== ocfs2_inode_lock_tracker(ex=1)
+ *
+ * ocfs2_inode_lock_tracker(ex=1)
*/
int ocfs2_inode_lock_tracker(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
struct ocfs2_lock_holder *oh)
{
- int status;
- int arg_flags = 0, has_locked;
+ int status = 0;
struct ocfs2_lock_res *lockres;
+ struct ocfs2_lock_holder *tmp_oh;
+ struct pid *pid = task_pid(current);
+
lockres = &OCFS2_I(inode)->ip_inode_lockres;
- has_locked = ocfs2_is_locked_by_me(lockres);
- /* Just get buffer head if the cluster lock has been taken */
- if (has_locked)
- arg_flags = OCFS2_META_LOCK_GETBH;
+ tmp_oh = ocfs2_pid_holder(lockres, pid);
- if (likely(!has_locked || ret_bh)) {
- status = ocfs2_inode_lock_full(inode, ret_bh, ex, arg_flags);
+ if (!tmp_oh) {
+ /*
+ * This corresponds to the case 1.
+ * We haven't got any lock before.
+ */
+ status = ocfs2_inode_lock_full(inode, ret_bh, ex, 0);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
return status;
}
- }
- if (!has_locked)
+
+ oh->oh_ex = ex;
ocfs2_add_holder(lockres, oh);
+ return 0;
+ }
- return has_locked;
+ if (unlikely(ex && !tmp_oh->oh_ex)) {
+ /*
+ * case 2.2 upgrade may cause dead lock, forbid it.
+ */
+ mlog(ML_ERROR, "Recursive locking is not permitted to "
+ "upgrade to EX level from PR level.\n");
+ dump_stack();
+ return -EINVAL;
+ }
+
+ /*
+ * case 2.1 OCFS2_META_LOCK_GETBH flag make ocfs2_inode_lock_full.
+ * ignore the lock level and just update it.
+ */
+ if (ret_bh) {
+ status = ocfs2_inode_lock_full(inode, ret_bh, ex,
+ OCFS2_META_LOCK_GETBH);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ return status;
+ }
+ }
+ return tmp_oh ? 1 : 0;
}
void ocfs2_inode_unlock_tracker(struct inode *inode,
@@ -2649,12 +2707,13 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
lockres = &OCFS2_I(inode)->ip_inode_lockres;
/* had_lock means that the currect process already takes the cluster
- * lock previously. If had_lock is 1, we have nothing to do here, and
- * it will get unlocked where we got the lock.
+ * lock previously.
+ * If had_lock is 1, we have nothing to do here.
+ * If had_lock is 0, we will release the lock.
*/
if (!had_lock) {
+ ocfs2_inode_unlock(inode, oh->oh_ex);
ocfs2_remove_holder(lockres, oh);
- ocfs2_inode_unlock(inode, ex);
}
}
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index 256e0a9067b8..4ec1c828f6e0 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -96,6 +96,7 @@ struct ocfs2_trim_fs_info {
struct ocfs2_lock_holder {
struct list_head oh_list;
struct pid *oh_owner_pid;
+ int oh_ex;
};
/* ocfs2_inode_lock_full() 'arg_flags' flags */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 6ee94bc23f5b..a2a8603d27e0 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -563,8 +563,8 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb,
return ret;
}
-static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
- u32 clusters_to_add, int mark_unwritten)
+static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
+ u32 clusters_to_add, int mark_unwritten)
{
int status = 0;
int restart_func = 0;
@@ -1035,8 +1035,8 @@ int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
clusters_to_add -= oi->ip_clusters;
if (clusters_to_add) {
- ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
- clusters_to_add, 0);
+ ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
+ clusters_to_add, 0);
if (ret) {
mlog_errno(ret);
goto out;
@@ -1493,7 +1493,7 @@ static int ocfs2_allocate_unwritten_extents(struct inode *inode,
goto next;
}
- ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
+ ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
if (ret) {
if (ret != -ENOSPC)
mlog_errno(ret);
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index 1fdc9839cd93..7eb7f03531f6 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -65,8 +65,6 @@ int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
u64 new_i_size, u64 zero_to);
int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
loff_t zero_to);
-int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
- u32 clusters_to_add, int mark_unwritten);
int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
int ocfs2_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index ab30c005cc4b..994726ada857 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -402,7 +402,7 @@ out_err:
static void o2ffg_update_histogram(struct ocfs2_info_free_chunk_list *hist,
unsigned int chunksize)
{
- int index;
+ u32 index;
index = __ilog2_u32(chunksize);
if (index >= OCFS2_INFO_MAX_HIST)
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index fb9a20e3d608..05220b365fb9 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -44,11 +44,11 @@
#include "ocfs2_trace.h"
-static int ocfs2_fault(struct vm_fault *vmf)
+static vm_fault_t ocfs2_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
sigset_t oldset;
- int ret;
+ vm_fault_t ret;
ocfs2_block_signals(&oldset);
ret = filemap_fault(vmf);
@@ -59,10 +59,11 @@ static int ocfs2_fault(struct vm_fault *vmf)
return ret;
}
-static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
- struct page *page)
+static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
+ struct buffer_head *di_bh, struct page *page)
{
- int ret = VM_FAULT_NOPAGE;
+ int err;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
loff_t pos = page_offset(page);
@@ -105,15 +106,12 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
if (page->index == last_index)
len = ((size - 1) & ~PAGE_MASK) + 1;
- ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
+ err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
&locked_page, &fsdata, di_bh, page);
- if (ret) {
- if (ret != -ENOSPC)
- mlog_errno(ret);
- if (ret == -ENOMEM)
- ret = VM_FAULT_OOM;
- else
- ret = VM_FAULT_SIGBUS;
+ if (err) {
+ if (err != -ENOSPC)
+ mlog_errno(err);
+ ret = vmf_error(err);
goto out;
}
@@ -121,20 +119,21 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
ret = VM_FAULT_NOPAGE;
goto out;
}
- ret = ocfs2_write_end_nolock(mapping, pos, len, len, fsdata);
- BUG_ON(ret != len);
+ err = ocfs2_write_end_nolock(mapping, pos, len, len, fsdata);
+ BUG_ON(err != len);
ret = VM_FAULT_LOCKED;
out:
return ret;
}
-static int ocfs2_page_mkwrite(struct vm_fault *vmf)
+static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
struct buffer_head *di_bh = NULL;
sigset_t oldset;
- int ret;
+ int err;
+ vm_fault_t ret;
sb_start_pagefault(inode->i_sb);
ocfs2_block_signals(&oldset);
@@ -144,13 +143,10 @@ static int ocfs2_page_mkwrite(struct vm_fault *vmf)
* node. Taking the data lock will also ensure that we don't
* attempt page truncation as part of a downconvert.
*/
- ret = ocfs2_inode_lock(inode, &di_bh, 1);
- if (ret < 0) {
- mlog_errno(ret);
- if (ret == -ENOMEM)
- ret = VM_FAULT_OOM;
- else
- ret = VM_FAULT_SIGBUS;
+ err = ocfs2_inode_lock(inode, &di_bh, 1);
+ if (err < 0) {
+ mlog_errno(err);
+ ret = vmf_error(err);
goto out;
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 8dd6f703c819..b7ca84bc3df7 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -2332,8 +2332,7 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
struct buffer_head *orphan_dir_bh,
bool dio)
{
- const int namelen = OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN;
- char name[namelen + 1];
+ char name[OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN + 1];
struct ocfs2_dinode *orphan_fe;
int status = 0;
struct ocfs2_dir_lookup_result lookup = { NULL, };
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 5bb4a89f9045..7071ad0dec90 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -807,11 +807,11 @@ struct ocfs2_dir_block_trailer {
* in this block. (unused) */
/*10*/ __u8 db_signature[8]; /* Signature for verification */
__le64 db_reserved2;
- __le64 db_free_next; /* Next block in list (unused) */
-/*20*/ __le64 db_blkno; /* Offset on disk, in blocks */
- __le64 db_parent_dinode; /* dinode which owns me, in
+/*20*/ __le64 db_free_next; /* Next block in list (unused) */
+ __le64 db_blkno; /* Offset on disk, in blocks */
+/*30*/ __le64 db_parent_dinode; /* dinode which owns me, in
blocks */
-/*30*/ struct ocfs2_block_check db_check; /* Error checking */
+ struct ocfs2_block_check db_check; /* Error checking */
/*40*/
};
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 004077f1a7bf..0ceb3b6b37e7 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -268,7 +268,7 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
unsigned long flags;
sigset_t pending, shpending, blocked, ignored, caught;
int num_threads = 0;
- unsigned long qsize = 0;
+ unsigned int qsize = 0;
unsigned long qlim = 0;
sigemptyset(&pending);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index af128b374143..4aa9ce5df02f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -205,171 +205,129 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
return result;
}
-static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
- size_t _count, loff_t *pos)
+static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
+ size_t count, loff_t *ppos)
{
- struct task_struct *tsk;
- struct mm_struct *mm;
- char *page;
- unsigned long count = _count;
unsigned long arg_start, arg_end, env_start, env_end;
- unsigned long len1, len2, len;
- unsigned long p;
- char c;
- ssize_t rv;
-
- BUG_ON(*pos < 0);
+ unsigned long pos, len;
+ char *page;
- tsk = get_proc_task(file_inode(file));
- if (!tsk)
- return -ESRCH;
- mm = get_task_mm(tsk);
- put_task_struct(tsk);
- if (!mm)
- return 0;
/* Check if process spawned far enough to have cmdline. */
- if (!mm->env_end) {
- rv = 0;
- goto out_mmput;
- }
-
- page = (char *)__get_free_page(GFP_KERNEL);
- if (!page) {
- rv = -ENOMEM;
- goto out_mmput;
- }
+ if (!mm->env_end)
+ return 0;
- down_read(&mm->mmap_sem);
+ spin_lock(&mm->arg_lock);
arg_start = mm->arg_start;
arg_end = mm->arg_end;
env_start = mm->env_start;
env_end = mm->env_end;
- up_read(&mm->mmap_sem);
-
- BUG_ON(arg_start > arg_end);
- BUG_ON(env_start > env_end);
+ spin_unlock(&mm->arg_lock);
- len1 = arg_end - arg_start;
- len2 = env_end - env_start;
+ if (arg_start >= arg_end)
+ return 0;
- /* Empty ARGV. */
- if (len1 == 0) {
- rv = 0;
- goto out_free_page;
- }
/*
- * Inherently racy -- command line shares address space
- * with code and data.
+ * We have traditionally allowed the user to re-write
+ * the argument strings and overflow the end result
+ * into the environment section. But only do that if
+ * the environment area is contiguous to the arguments.
*/
- rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
- if (rv <= 0)
- goto out_free_page;
-
- rv = 0;
-
- if (c == '\0') {
- /* Command line (set of strings) occupies whole ARGV. */
- if (len1 <= *pos)
- goto out_free_page;
-
- p = arg_start + *pos;
- len = len1 - *pos;
- while (count > 0 && len > 0) {
- unsigned int _count;
- int nr_read;
-
- _count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
- if (nr_read < 0)
- rv = nr_read;
- if (nr_read <= 0)
- goto out_free_page;
-
- if (copy_to_user(buf, page, nr_read)) {
- rv = -EFAULT;
- goto out_free_page;
- }
+ if (env_start != arg_end || env_start >= env_end)
+ env_start = env_end = arg_end;
- p += nr_read;
- len -= nr_read;
- buf += nr_read;
- count -= nr_read;
- rv += nr_read;
- }
- } else {
- /*
- * Command line (1 string) occupies ARGV and
- * extends into ENVP.
- */
- struct {
- unsigned long p;
- unsigned long len;
- } cmdline[2] = {
- { .p = arg_start, .len = len1 },
- { .p = env_start, .len = len2 },
- };
- loff_t pos1 = *pos;
- unsigned int i;
+ /* We're not going to care if "*ppos" has high bits set */
+ pos = arg_start + *ppos;
+
+ /* .. but we do check the result is in the proper range */
+ if (pos < arg_start || pos >= env_end)
+ return 0;
+
+ /* .. and we never go past env_end */
+ if (env_end - pos < count)
+ count = env_end - pos;
+
+ page = (char *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ len = 0;
+ while (count) {
+ int got;
+ size_t size = min_t(size_t, PAGE_SIZE, count);
+
+ got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
+ if (got <= 0)
+ break;
- i = 0;
- while (i < 2 && pos1 >= cmdline[i].len) {
- pos1 -= cmdline[i].len;
- i++;
+ /* Don't walk past a NUL character once you hit arg_end */
+ if (pos + got >= arg_end) {
+ int n = 0;
+
+ /*
+ * If we started before 'arg_end' but ended up
+ * at or after it, we start the NUL character
+ * check at arg_end-1 (where we expect the normal
+ * EOF to be).
+ *
+ * NOTE! This is smaller than 'got', because
+ * pos + got >= arg_end
+ */
+ if (pos < arg_end)
+ n = arg_end - pos - 1;
+
+ /* Cut off at first NUL after 'n' */
+ got = n + strnlen(page+n, got-n);
+ if (!got)
+ break;
}
- while (i < 2) {
- p = cmdline[i].p + pos1;
- len = cmdline[i].len - pos1;
- while (count > 0 && len > 0) {
- unsigned int _count, l;
- int nr_read;
- bool final;
-
- _count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
- if (nr_read < 0)
- rv = nr_read;
- if (nr_read <= 0)
- goto out_free_page;
-
- /*
- * Command line can be shorter than whole ARGV
- * even if last "marker" byte says it is not.
- */
- final = false;
- l = strnlen(page, nr_read);
- if (l < nr_read) {
- nr_read = l;
- final = true;
- }
-
- if (copy_to_user(buf, page, nr_read)) {
- rv = -EFAULT;
- goto out_free_page;
- }
-
- p += nr_read;
- len -= nr_read;
- buf += nr_read;
- count -= nr_read;
- rv += nr_read;
-
- if (final)
- goto out_free_page;
- }
- /* Only first chunk can be read partially. */
- pos1 = 0;
- i++;
+ got -= copy_to_user(buf, page, got);
+ if (unlikely(!got)) {
+ if (!len)
+ len = -EFAULT;
+ break;
}
+ pos += got;
+ buf += got;
+ len += got;
+ count -= got;
}
-out_free_page:
free_page((unsigned long)page);
-out_mmput:
+ return len;
+}
+
+static ssize_t get_task_cmdline(struct task_struct *tsk, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mm_struct *mm;
+ ssize_t ret;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ return 0;
+
+ ret = get_mm_cmdline(mm, buf, count, pos);
mmput(mm);
- if (rv > 0)
- *pos += rv;
- return rv;
+ return ret;
+}
+
+static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct task_struct *tsk;
+ ssize_t ret;
+
+ BUG_ON(*pos < 0);
+
+ tsk = get_proc_task(file_inode(file));
+ if (!tsk)
+ return -ESRCH;
+ ret = get_task_cmdline(tsk, buf, count, pos);
+ put_task_struct(tsk);
+ if (ret > 0)
+ *pos += ret;
+ return ret;
}
static const struct file_operations proc_pid_cmdline_ops = {
@@ -430,7 +388,6 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
struct stack_trace trace;
unsigned long *entries;
int err;
- int i;
entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
if (!entries)
@@ -443,6 +400,8 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
err = lock_trace(task);
if (!err) {
+ unsigned int i;
+
save_stack_trace_tsk(task, &trace);
for (i = 0; i < trace.nr_entries; i++) {
@@ -927,10 +886,10 @@ static ssize_t environ_read(struct file *file, char __user *buf,
if (!mmget_not_zero(mm))
goto free;
- down_read(&mm->mmap_sem);
+ spin_lock(&mm->arg_lock);
env_start = mm->env_start;
env_end = mm->env_end;
- up_read(&mm->mmap_sem);
+ spin_unlock(&mm->arg_lock);
while (count > 0) {
size_t this_len, max_len;
@@ -1784,9 +1743,9 @@ int pid_getattr(const struct path *path, struct kstat *stat,
generic_fillattr(inode, stat);
- rcu_read_lock();
stat->uid = GLOBAL_ROOT_UID;
stat->gid = GLOBAL_ROOT_GID;
+ rcu_read_lock();
task = pid_task(proc_pid(inode), PIDTYPE_PID);
if (task) {
if (!has_pid_permissions(pid, task, HIDEPID_INVISIBLE)) {
@@ -1875,7 +1834,7 @@ const struct dentry_operations pid_dentry_operations =
* by stat.
*/
bool proc_fill_cache(struct file *file, struct dir_context *ctx,
- const char *name, int len,
+ const char *name, unsigned int len,
instantiate_t instantiate, struct task_struct *task, const void *ptr)
{
struct dentry *child, *dir = file->f_path.dentry;
@@ -1894,19 +1853,19 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
struct dentry *res;
res = instantiate(child, task, ptr);
d_lookup_done(child);
- if (IS_ERR(res))
- goto end_instantiate;
if (unlikely(res)) {
dput(child);
child = res;
+ if (IS_ERR(child))
+ goto end_instantiate;
}
}
}
inode = d_inode(child);
ino = inode->i_ino;
type = inode->i_mode >> 12;
-end_instantiate:
dput(child);
+end_instantiate:
return dir_emit(ctx, name, len, ino, type);
}
@@ -3251,7 +3210,7 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
iter.task;
iter.tgid += 1, iter = next_tgid(ns, iter)) {
char name[10 + 1];
- int len;
+ unsigned int len;
cond_resched();
if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE))
@@ -3578,7 +3537,7 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx)
task;
task = next_tid(task), ctx->pos++) {
char name[10 + 1];
- int len;
+ unsigned int len;
tid = task_pid_nr_ns(task, ns);
len = snprintf(name, sizeof(name), "%u", tid);
if (!proc_fill_cache(file, ctx, name, len,
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 05b9893e9a22..81882a13212d 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -248,7 +248,7 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
struct file *f;
struct fd_data data;
char name[10 + 1];
- int len;
+ unsigned int len;
f = fcheck_files(files, fd);
if (!f)
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 93eb1906c28d..50cb22a08c2f 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -163,7 +163,7 @@ extern loff_t mem_lseek(struct file *, loff_t, int);
/* Lookups */
typedef struct dentry *instantiate_t(struct dentry *,
struct task_struct *, const void *);
-extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, int,
+bool proc_fill_cache(struct file *, struct dir_context *, const char *, unsigned int,
instantiate_t, struct task_struct *, const void *);
/*
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 1491918a33c3..792c78a49174 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -154,6 +154,8 @@ u64 stable_page_flags(struct page *page)
if (PageBalloon(page))
u |= 1 << KPF_BALLOON;
+ if (PageTable(page))
+ u |= 1 << KPF_PGTABLE;
if (page_is_idle(page))
u |= 1 << KPF_IDLE;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 7e074138d2f2..597969db9e90 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1259,8 +1259,9 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
if (pte_swp_soft_dirty(pte))
flags |= PM_SOFT_DIRTY;
entry = pte_to_swp_entry(pte);
- frame = swp_type(entry) |
- (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
+ if (pm->show_pfn)
+ frame = swp_type(entry) |
+ (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags |= PM_SWAP;
if (is_migration_entry(entry))
page = migration_entry_to_page(entry);
@@ -1311,11 +1312,14 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
else if (is_swap_pmd(pmd)) {
swp_entry_t entry = pmd_to_swp_entry(pmd);
- unsigned long offset = swp_offset(entry);
+ unsigned long offset;
- offset += (addr & ~PMD_MASK) >> PAGE_SHIFT;
- frame = swp_type(entry) |
- (offset << MAX_SWAPFILES_SHIFT);
+ if (pm->show_pfn) {
+ offset = swp_offset(entry) +
+ ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ frame = swp_type(entry) |
+ (offset << MAX_SWAPFILES_SHIFT);
+ }
flags |= PM_SWAP;
if (pmd_swp_soft_dirty(pmd))
flags |= PM_SOFT_DIRTY;
@@ -1333,10 +1337,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
err = add_to_pagemap(addr, &pme, pm);
if (err)
break;
- if (pm->show_pfn && (flags & PM_PRESENT))
- frame++;
- else if (flags & PM_SWAP)
- frame += (1 << MAX_SWAPFILES_SHIFT);
+ if (pm->show_pfn) {
+ if (flags & PM_PRESENT)
+ frame++;
+ else if (flags & PM_SWAP)
+ frame += (1 << MAX_SWAPFILES_SHIFT);
+ }
}
spin_unlock(ptl);
return err;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 1acb2ff505e6..28b80713a163 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1513,7 +1513,7 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
* mmap()d file has taken write protection fault and is being made writable.
* UBIFS must ensure page is budgeted for.
*/
-static int ubifs_vm_page_mkwrite(struct vm_fault *vmf)
+static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
@@ -1567,8 +1567,7 @@ static int ubifs_vm_page_mkwrite(struct vm_fault *vmf)
if (unlikely(page->mapping != inode->i_mapping ||
page_offset(page) > i_size_read(inode))) {
/* Page got truncated out from underneath us */
- err = -EINVAL;
- goto out_unlock;
+ goto sigbus;
}
if (PagePrivate(page))
@@ -1597,12 +1596,10 @@ static int ubifs_vm_page_mkwrite(struct vm_fault *vmf)
wait_for_stable_page(page);
return VM_FAULT_LOCKED;
-out_unlock:
+sigbus:
unlock_page(page);
ubifs_release_budget(c, &req);
- if (err)
- err = VM_FAULT_SIGBUS;
- return err;
+ return VM_FAULT_SIGBUS;
}
static const struct vm_operations_struct ubifs_file_vm_ops = {
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 7b35e3d6cde7..a03a47cf880d 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -49,7 +49,7 @@
* maximum size. So dark watermark is the amount of free + dirty space in LEB
* which are guaranteed to be reclaimable. If LEB has less space, the GC might
* be unable to reclaim it. So, LEBs with free + dirty greater than dark
- * watermark are "good" LEBs from GC's point of few. The other LEBs are not so
+ * watermark are "good" LEBs from GC's point of view. The other LEBs are not so
* good, and GC takes extra care when moving them.
*/
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 04c4ec6483e5..da8afdfccaa6 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -98,9 +98,8 @@ static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
*
* This function reserves space in journal head @head. If the reservation
* succeeded, the journal head stays locked and later has to be unlocked using
- * 'release_head()'. 'write_node()' and 'write_head()' functions also unlock
- * it. Returns zero in case of success, %-EAGAIN if commit has to be done, and
- * other negative error codes in case of other failures.
+ * 'release_head()'. Returns zero in case of success, %-EAGAIN if commit has to
+ * be done, and other negative error codes in case of other failures.
*/
static int reserve_space(struct ubifs_info *c, int jhead, int len)
{
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
index 8c795e6392b1..7cffa120a750 100644
--- a/fs/ubifs/log.c
+++ b/fs/ubifs/log.c
@@ -167,10 +167,10 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
* @lnum: LEB number of the bud
* @offs: starting offset of the bud
*
- * This function writes reference node for the new bud LEB @lnum it to the log,
- * and adds it to the buds tress. It also makes sure that log size does not
+ * This function writes a reference node for the new bud LEB @lnum to the log,
+ * and adds it to the buds trees. It also makes sure that log size does not
* exceed the 'c->max_bud_bytes' limit. Returns zero in case of success,
- * %-EAGAIN if commit is required, and a negative error codes in case of
+ * %-EAGAIN if commit is required, and a negative error code in case of
* failure.
*/
int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index 235654c2fe89..78da65b2fb85 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -619,7 +619,7 @@ static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c,
/**
* pnode_lookup - lookup a pnode in the LPT.
* @c: UBIFS file-system description object
- * @i: pnode number (0 to main_lebs - 1)
+ * @i: pnode number (0 to (main_lebs - 1) / UBIFS_LPT_FANOUT))
*
* This function returns a pointer to the pnode on success or a negative
* error code on failure.
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index ae5c02f22f3e..85c2a43082b7 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -223,9 +223,6 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
dbg_mntk(&r->key, "LEB %d:%d len %d deletion %d sqnum %llu key ",
r->lnum, r->offs, r->len, r->deletion, r->sqnum);
- /* Set c->replay_sqnum to help deal with dangling branches. */
- c->replay_sqnum = r->sqnum;
-
if (is_hash_key(c, &r->key)) {
if (r->deletion)
err = ubifs_tnc_remove_nm(c, &r->key, &r->nm);
@@ -1037,7 +1034,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
* The head of the log must always start with the
* "commit start" node on a properly formatted UBIFS.
* But we found no nodes at all, which means that
- * someting went wrong and we cannot proceed mounting
+ * something went wrong and we cannot proceed mounting
* the file-system.
*/
ubifs_err(c, "no UBIFS nodes found at the log head LEB %d:%d, possibly corrupted",
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 5ee7af879cc4..209d6369ae71 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1206,7 +1206,6 @@ struct ubifs_debug_info;
* @replay_list: temporary list used during journal replay
* @replay_buds: list of buds to replay
* @cs_sqnum: sequence number of first node in the log (commit start node)
- * @replay_sqnum: sequence number of node currently being replayed
* @unclean_leb_list: LEBs to recover when re-mounting R/O mounted FS to R/W
* mode
* @rcvrd_mst_node: recovered master node to write when re-mounting R/O mounted
@@ -1438,7 +1437,6 @@ struct ubifs_info {
struct list_head replay_list;
struct list_head replay_buds;
unsigned long long cs_sqnum;
- unsigned long long replay_sqnum;
struct list_head unclean_leb_list;
struct ubifs_mst_node *rcvrd_mst_node;
struct rb_root size_tree;
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 759f1a209dbb..6f720fdf5020 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -139,7 +139,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
inode->i_op = &empty_iops;
inode->i_fop = &empty_fops;
- inode->i_flags |= S_SYNC | S_NOATIME | S_NOCMTIME | S_NOQUOTA;
+ inode->i_flags |= S_SYNC | S_NOATIME | S_NOCMTIME;
ui = ubifs_inode(inode);
ui->xattr = 1;
ui->flags |= UBIFS_XATTR_FL;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index cec550c8468f..123bf7d516fc 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -62,6 +62,8 @@ struct userfaultfd_ctx {
enum userfaultfd_state state;
/* released */
bool released;
+ /* memory mappings are changing because of non-cooperative event */
+ bool mmap_changing;
/* mm with one ore more vmas attached to this userfaultfd_ctx */
struct mm_struct *mm;
};
@@ -641,6 +643,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
* already released.
*/
out:
+ WRITE_ONCE(ctx->mmap_changing, false);
userfaultfd_ctx_put(ctx);
}
@@ -686,10 +689,12 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
ctx->state = UFFD_STATE_RUNNING;
ctx->features = octx->features;
ctx->released = false;
+ ctx->mmap_changing = false;
ctx->mm = vma->vm_mm;
mmgrab(ctx->mm);
userfaultfd_ctx_get(octx);
+ WRITE_ONCE(octx->mmap_changing, true);
fctx->orig = octx;
fctx->new = ctx;
list_add_tail(&fctx->list, fcs);
@@ -732,6 +737,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
vm_ctx->ctx = ctx;
userfaultfd_ctx_get(ctx);
+ WRITE_ONCE(ctx->mmap_changing, true);
}
}
@@ -772,6 +778,7 @@ bool userfaultfd_remove(struct vm_area_struct *vma,
return true;
userfaultfd_ctx_get(ctx);
+ WRITE_ONCE(ctx->mmap_changing, true);
up_read(&mm->mmap_sem);
msg_init(&ewq.msg);
@@ -815,6 +822,7 @@ int userfaultfd_unmap_prep(struct vm_area_struct *vma,
return -ENOMEM;
userfaultfd_ctx_get(ctx);
+ WRITE_ONCE(ctx->mmap_changing, true);
unmap_ctx->ctx = ctx;
unmap_ctx->start = start;
unmap_ctx->end = end;
@@ -1653,6 +1661,10 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
user_uffdio_copy = (struct uffdio_copy __user *) arg;
+ ret = -EAGAIN;
+ if (READ_ONCE(ctx->mmap_changing))
+ goto out;
+
ret = -EFAULT;
if (copy_from_user(&uffdio_copy, user_uffdio_copy,
/* don't copy "copy" last field */
@@ -1674,7 +1686,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
goto out;
if (mmget_not_zero(ctx->mm)) {
ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
- uffdio_copy.len);
+ uffdio_copy.len, &ctx->mmap_changing);
mmput(ctx->mm);
} else {
return -ESRCH;
@@ -1705,6 +1717,10 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
+ ret = -EAGAIN;
+ if (READ_ONCE(ctx->mmap_changing))
+ goto out;
+
ret = -EFAULT;
if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
/* don't copy "zeropage" last field */
@@ -1721,7 +1737,8 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
if (mmget_not_zero(ctx->mm)) {
ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
- uffdio_zeropage.range.len);
+ uffdio_zeropage.range.len,
+ &ctx->mmap_changing);
mmput(ctx->mm);
} else {
return -ESRCH;
@@ -1900,6 +1917,7 @@ SYSCALL_DEFINE1(userfaultfd, int, flags)
ctx->features = 0;
ctx->state = UFFD_STATE_WAIT_API;
ctx->released = false;
+ ctx->mmap_changing = false;
ctx->mm = current->mm;
/* prevent the mm struct to be freed */
mmgrab(ctx->mm);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 0e3fb8978344..bed07dfbb85e 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -312,7 +312,7 @@ restart:
if (error <= 0)
return error;
- error = xfs_break_layouts(inode, iolock);
+ error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
if (error)
return error;
@@ -731,6 +731,69 @@ xfs_file_write_iter(
return xfs_file_buffered_aio_write(iocb, from);
}
+static void
+xfs_wait_dax_page(
+ struct inode *inode,
+ bool *did_unlock)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+
+ *did_unlock = true;
+ xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
+ schedule();
+ xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+}
+
+static int
+xfs_break_dax_layouts(
+ struct inode *inode,
+ uint iolock,
+ bool *did_unlock)
+{
+ struct page *page;
+
+ ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
+
+ page = dax_layout_busy_page(inode->i_mapping);
+ if (!page)
+ return 0;
+
+ return ___wait_var_event(&page->_refcount,
+ atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
+ 0, 0, xfs_wait_dax_page(inode, did_unlock));
+}
+
+int
+xfs_break_layouts(
+ struct inode *inode,
+ uint *iolock,
+ enum layout_break_reason reason)
+{
+ bool retry;
+ int error;
+
+ ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
+
+ do {
+ retry = false;
+ switch (reason) {
+ case BREAK_UNMAP:
+ error = xfs_break_dax_layouts(inode, *iolock, &retry);
+ if (error || retry)
+ break;
+ /* fall through */
+ case BREAK_WRITE:
+ error = xfs_break_leased_layouts(inode, iolock, &retry);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ error = -EINVAL;
+ }
+ } while (error == 0 && retry);
+
+ return error;
+}
+
#define XFS_FALLOC_FL_SUPPORTED \
(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
@@ -747,7 +810,7 @@ xfs_file_fallocate(
struct xfs_inode *ip = XFS_I(inode);
long error;
enum xfs_prealloc_flags flags = 0;
- uint iolock = XFS_IOLOCK_EXCL;
+ uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
loff_t new_size = 0;
bool do_file_insert = false;
@@ -757,13 +820,10 @@ xfs_file_fallocate(
return -EOPNOTSUPP;
xfs_ilock(ip, iolock);
- error = xfs_break_layouts(inode, &iolock);
+ error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
if (error)
goto out_unlock;
- xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
- iolock |= XFS_MMAPLOCK_EXCL;
-
if (mode & FALLOC_FL_PUNCH_HOLE) {
error = xfs_free_file_space(ip, offset, len);
if (error)
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 00fee6824745..a91d9fb1effc 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -379,6 +379,20 @@ static inline void xfs_ifunlock(struct xfs_inode *ip)
>> XFS_ILOCK_SHIFT)
/*
+ * Layouts are broken in the BREAK_WRITE case to ensure that
+ * layout-holders do not collide with local writes. Additionally,
+ * layouts are broken in the BREAK_UNMAP case to make sure the
+ * layout-holder has a consistent view of the file's extent map. While
+ * BREAK_WRITE breaks can be satisfied by recalling FL_LAYOUT leases,
+ * BREAK_UNMAP breaks additionally require waiting for busy dax-pages to
+ * go idle.
+ */
+enum layout_break_reason {
+ BREAK_WRITE,
+ BREAK_UNMAP,
+};
+
+/*
* For multiple groups support: if S_ISGID bit is set in the parent
* directory, group of new file is set to that of the parent, and
* new subdirectory gets S_ISGID bit from parent.
@@ -453,6 +467,8 @@ enum xfs_prealloc_flags {
int xfs_update_prealloc_flags(struct xfs_inode *ip,
enum xfs_prealloc_flags flags);
+int xfs_break_layouts(struct inode *inode, uint *iolock,
+ enum layout_break_reason reason);
/* from xfs_iops.c */
extern void xfs_setup_inode(struct xfs_inode *ip);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 5dd9e22b4a4c..32b680522abd 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -39,7 +39,6 @@
#include "xfs_icache.h"
#include "xfs_symlink.h"
#include "xfs_trans.h"
-#include "xfs_pnfs.h"
#include "xfs_acl.h"
#include "xfs_btree.h"
#include <linux/fsmap.h>
@@ -614,7 +613,7 @@ xfs_ioc_space(
struct xfs_inode *ip = XFS_I(inode);
struct iattr iattr;
enum xfs_prealloc_flags flags = 0;
- uint iolock = XFS_IOLOCK_EXCL;
+ uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
int error;
/*
@@ -644,13 +643,10 @@ xfs_ioc_space(
return error;
xfs_ilock(ip, iolock);
- error = xfs_break_layouts(inode, &iolock);
+ error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
if (error)
goto out_unlock;
- xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
- iolock |= XFS_MMAPLOCK_EXCL;
-
switch (bf->l_whence) {
case 0: /*SEEK_SET*/
break;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index b0eb49bb4918..3b4be06fdaa5 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -37,7 +37,6 @@
#include "xfs_da_btree.h"
#include "xfs_dir2.h"
#include "xfs_trans_space.h"
-#include "xfs_pnfs.h"
#include "xfs_iomap.h"
#include <linux/capability.h>
@@ -1030,14 +1029,19 @@ xfs_vn_setattr(
int error;
if (iattr->ia_valid & ATTR_SIZE) {
- struct xfs_inode *ip = XFS_I(d_inode(dentry));
- uint iolock = XFS_IOLOCK_EXCL;
+ struct inode *inode = d_inode(dentry);
+ struct xfs_inode *ip = XFS_I(inode);
+ uint iolock;
- error = xfs_break_layouts(d_inode(dentry), &iolock);
- if (error)
+ xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+ iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
+
+ error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
+ if (error) {
+ xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
return error;
+ }
- xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
error = xfs_vn_setattr_size(dentry, iattr);
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
} else {
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index aa6c5c193f45..f44c3599527d 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -31,19 +31,20 @@
* rules in the page fault path we don't bother.
*/
int
-xfs_break_layouts(
+xfs_break_leased_layouts(
struct inode *inode,
- uint *iolock)
+ uint *iolock,
+ bool *did_unlock)
{
struct xfs_inode *ip = XFS_I(inode);
int error;
- ASSERT(xfs_isilocked(ip, XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
-
while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
xfs_iunlock(ip, *iolock);
+ *did_unlock = true;
error = break_layout(inode, true);
- *iolock = XFS_IOLOCK_EXCL;
+ *iolock &= ~XFS_IOLOCK_SHARED;
+ *iolock |= XFS_IOLOCK_EXCL;
xfs_ilock(ip, *iolock);
}
@@ -120,8 +121,8 @@ xfs_fs_map_blocks(
* Lock out any other I/O before we flush and invalidate the pagecache,
* and then hand out a layout to the remote system. This is very
* similar to direct I/O, except that the synchronization is much more
- * complicated. See the comment near xfs_break_layouts for a detailed
- * explanation.
+ * complicated. See the comment near xfs_break_leased_layouts
+ * for a detailed explanation.
*/
xfs_ilock(ip, XFS_IOLOCK_EXCL);
diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h
index bf45951e28fe..940c6c2ad88c 100644
--- a/fs/xfs/xfs_pnfs.h
+++ b/fs/xfs/xfs_pnfs.h
@@ -9,10 +9,11 @@ int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length,
int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps,
struct iattr *iattr);
-int xfs_break_layouts(struct inode *inode, uint *iolock);
+int xfs_break_leased_layouts(struct inode *inode, uint *iolock,
+ bool *did_unlock);
#else
static inline int
-xfs_break_layouts(struct inode *inode, uint *iolock)
+xfs_break_leased_layouts(struct inode *inode, uint *iolock, bool *did_unlock)
{
return 0;
}
diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
index ffb68d67be5f..a248545f1e18 100644
--- a/include/asm-generic/int-ll64.h
+++ b/include/asm-generic/int-ll64.h
@@ -13,17 +13,14 @@
#ifndef __ASSEMBLY__
-typedef signed char s8;
-typedef unsigned char u8;
-
-typedef signed short s16;
-typedef unsigned short u16;
-
-typedef signed int s32;
-typedef unsigned int u32;
-
-typedef signed long long s64;
-typedef unsigned long long u64;
+typedef __s8 s8;
+typedef __u8 u8;
+typedef __s16 s16;
+typedef __u16 u16;
+typedef __s32 s32;
+typedef __u32 u32;
+typedef __s64 s64;
+typedef __u64 u64;
#define S8_C(x) x
#define U8_C(x) x ## U
diff --git a/include/dt-bindings/clock/actions,s900-cmu.h b/include/dt-bindings/clock/actions,s900-cmu.h
new file mode 100644
index 000000000000..7c1251565f43
--- /dev/null
+++ b/include/dt-bindings/clock/actions,s900-cmu.h
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Device Tree binding constants for Actions Semi S900 Clock Management Unit
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Copyright (c) 2018 Linaro Ltd.
+
+#ifndef __DT_BINDINGS_CLOCK_S900_CMU_H
+#define __DT_BINDINGS_CLOCK_S900_CMU_H
+
+#define CLK_NONE 0
+
+/* fixed rate clocks */
+#define CLK_LOSC 1
+#define CLK_HOSC 2
+
+/* pll clocks */
+#define CLK_CORE_PLL 3
+#define CLK_DEV_PLL 4
+#define CLK_DDR_PLL 5
+#define CLK_NAND_PLL 6
+#define CLK_DISPLAY_PLL 7
+#define CLK_DSI_PLL 8
+#define CLK_ASSIST_PLL 9
+#define CLK_AUDIO_PLL 10
+
+/* system clock */
+#define CLK_CPU 15
+#define CLK_DEV 16
+#define CLK_NOC 17
+#define CLK_NOC_MUX 18
+#define CLK_NOC_DIV 19
+#define CLK_AHB 20
+#define CLK_APB 21
+#define CLK_DMAC 22
+
+/* peripheral device clock */
+#define CLK_GPIO 23
+
+#define CLK_BISP 24
+#define CLK_CSI0 25
+#define CLK_CSI1 26
+
+#define CLK_DE0 27
+#define CLK_DE1 28
+#define CLK_DE2 29
+#define CLK_DE3 30
+#define CLK_DSI 32
+
+#define CLK_GPU 33
+#define CLK_GPU_CORE 34
+#define CLK_GPU_MEM 35
+#define CLK_GPU_SYS 36
+
+#define CLK_HDE 37
+#define CLK_I2C0 38
+#define CLK_I2C1 39
+#define CLK_I2C2 40
+#define CLK_I2C3 41
+#define CLK_I2C4 42
+#define CLK_I2C5 43
+#define CLK_I2SRX 44
+#define CLK_I2STX 45
+#define CLK_IMX 46
+#define CLK_LCD 47
+#define CLK_NAND0 48
+#define CLK_NAND1 49
+#define CLK_PWM0 50
+#define CLK_PWM1 51
+#define CLK_PWM2 52
+#define CLK_PWM3 53
+#define CLK_PWM4 54
+#define CLK_PWM5 55
+#define CLK_SD0 56
+#define CLK_SD1 57
+#define CLK_SD2 58
+#define CLK_SD3 59
+#define CLK_SENSOR 60
+#define CLK_SPEED_SENSOR 61
+#define CLK_SPI0 62
+#define CLK_SPI1 63
+#define CLK_SPI2 64
+#define CLK_SPI3 65
+#define CLK_THERMAL_SENSOR 66
+#define CLK_UART0 67
+#define CLK_UART1 68
+#define CLK_UART2 69
+#define CLK_UART3 70
+#define CLK_UART4 71
+#define CLK_UART5 72
+#define CLK_UART6 73
+#define CLK_VCE 74
+#define CLK_VDE 75
+
+#define CLK_USB3_480MPLL0 76
+#define CLK_USB3_480MPHY0 77
+#define CLK_USB3_5GPHY 78
+#define CLK_USB3_CCE 79
+#define CLK_USB3_MAC 80
+
+#define CLK_TIMER 83
+
+#define CLK_HDMI_AUDIO 84
+
+#define CLK_24M 85
+
+#define CLK_EDP 86
+
+#define CLK_24M_EDP 87
+#define CLK_EDP_PLL 88
+#define CLK_EDP_LINK 89
+
+#define CLK_USB2H0_PLLEN 90
+#define CLK_USB2H0_PHY 91
+#define CLK_USB2H0_CCE 92
+#define CLK_USB2H1_PLLEN 93
+#define CLK_USB2H1_PHY 94
+#define CLK_USB2H1_CCE 95
+
+#define CLK_DDR0 96
+#define CLK_DDR1 97
+#define CLK_DMM 98
+
+#define CLK_ETH_MAC 99
+#define CLK_RMII_REF 100
+
+#define CLK_NR_CLKS (CLK_RMII_REF + 1)
+
+#endif /* __DT_BINDINGS_CLOCK_S900_CMU_H */
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index d3558d897a4d..44761849fcbe 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -38,6 +38,7 @@
#define ASPEED_CLK_MAC 32
#define ASPEED_CLK_BCLK 33
#define ASPEED_CLK_MPLL 34
+#define ASPEED_CLK_24M 35
#define ASPEED_RESET_XDMA 0
#define ASPEED_RESET_MCTP 1
@@ -45,8 +46,9 @@
#define ASPEED_RESET_JTAG_MASTER 3
#define ASPEED_RESET_MIC 4
#define ASPEED_RESET_PWM 5
-#define ASPEED_RESET_PCIVGA 6
+#define ASPEED_RESET_PECI 6
#define ASPEED_RESET_I2C 7
#define ASPEED_RESET_AHB 8
+#define ASPEED_RESET_CRT1 9
#endif
diff --git a/include/dt-bindings/clock/axg-aoclkc.h b/include/dt-bindings/clock/axg-aoclkc.h
new file mode 100644
index 000000000000..61955016a55b
--- /dev/null
+++ b/include/dt-bindings/clock/axg-aoclkc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ */
+
+#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_AXG_AOCLK
+#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_AXG_AOCLK
+
+#define CLKID_AO_REMOTE 0
+#define CLKID_AO_I2C_MASTER 1
+#define CLKID_AO_I2C_SLAVE 2
+#define CLKID_AO_UART1 3
+#define CLKID_AO_UART2 4
+#define CLKID_AO_IR_BLASTER 5
+#define CLKID_AO_SAR_ADC 6
+#define CLKID_AO_CLK81 7
+#define CLKID_AO_SAR_ADC_SEL 8
+#define CLKID_AO_SAR_ADC_DIV 9
+#define CLKID_AO_SAR_ADC_CLK 10
+#define CLKID_AO_ALT_XTAL 11
+
+#endif
diff --git a/include/dt-bindings/clock/bcm-sr.h b/include/dt-bindings/clock/bcm-sr.h
index cff6c6fe2947..419011ba1a94 100644
--- a/include/dt-bindings/clock/bcm-sr.h
+++ b/include/dt-bindings/clock/bcm-sr.h
@@ -35,7 +35,7 @@
/* GENPLL 0 clock channel ID SCR HSLS FS PCIE */
#define BCM_SR_GENPLL0 0
-#define BCM_SR_GENPLL0_SATA_CLK 1
+#define BCM_SR_GENPLL0_125M_CLK 1
#define BCM_SR_GENPLL0_SCR_CLK 2
#define BCM_SR_GENPLL0_250M_CLK 3
#define BCM_SR_GENPLL0_PCIE_AXI_CLK 4
@@ -50,9 +50,11 @@
/* GENPLL 2 clock channel ID NITRO MHB*/
#define BCM_SR_GENPLL2 0
#define BCM_SR_GENPLL2_NIC_CLK 1
-#define BCM_SR_GENPLL2_250_NITRO_CLK 2
+#define BCM_SR_GENPLL2_TS_500_CLK 2
#define BCM_SR_GENPLL2_125_NITRO_CLK 3
#define BCM_SR_GENPLL2_CHIMP_CLK 4
+#define BCM_SR_GENPLL2_NIC_FLASH_CLK 5
+#define BCM_SR_GENPLL2_FS4_CLK 6
/* GENPLL 3 HSLS clock channel ID */
#define BCM_SR_GENPLL3 0
@@ -62,11 +64,16 @@
/* GENPLL 4 SCR clock channel ID */
#define BCM_SR_GENPLL4 0
#define BCM_SR_GENPLL4_CCN_CLK 1
+#define BCM_SR_GENPLL4_TPIU_PLL_CLK 2
+#define BCM_SR_GENPLL4_NOC_CLK 3
+#define BCM_SR_GENPLL4_CHCLK_FS4_CLK 4
+#define BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK 5
/* GENPLL 5 FS4 clock channel ID */
#define BCM_SR_GENPLL5 0
-#define BCM_SR_GENPLL5_FS_CLK 1
-#define BCM_SR_GENPLL5_SPU_CLK 2
+#define BCM_SR_GENPLL5_FS4_HF_CLK 1
+#define BCM_SR_GENPLL5_CRYPTO_AE_CLK 2
+#define BCM_SR_GENPLL5_RAID_AE_CLK 3
/* GENPLL 6 NITRO clock channel ID */
#define BCM_SR_GENPLL6 0
@@ -74,13 +81,16 @@
/* LCPLL0 clock channel ID */
#define BCM_SR_LCPLL0 0
-#define BCM_SR_LCPLL0_SATA_REF_CLK 1
-#define BCM_SR_LCPLL0_USB_REF_CLK 2
-#define BCM_SR_LCPLL0_SATA_REFPN_CLK 3
+#define BCM_SR_LCPLL0_SATA_REFP_CLK 1
+#define BCM_SR_LCPLL0_SATA_REFN_CLK 2
+#define BCM_SR_LCPLL0_SATA_350_CLK 3
+#define BCM_SR_LCPLL0_SATA_500_CLK 4
/* LCPLL1 clock channel ID */
#define BCM_SR_LCPLL1 0
#define BCM_SR_LCPLL1_WAN_CLK 1
+#define BCM_SR_LCPLL1_USB_REF_CLK 2
+#define BCM_SR_LCPLL1_CRMU_TS_CLK 3
/* LCPLL PCIE clock channel ID */
#define BCM_SR_LCPLL_PCIE 0
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 8ba99a5e3fd3..7a892be90549 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -125,5 +125,7 @@
#define CLKID_VAPB_1 138
#define CLKID_VAPB_SEL 139
#define CLKID_VAPB 140
+#define CLKID_VDEC_1 153
+#define CLKID_VDEC_HEVC 156
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/histb-clock.h b/include/dt-bindings/clock/histb-clock.h
index fab30b3f78b2..136de24733be 100644
--- a/include/dt-bindings/clock/histb-clock.h
+++ b/include/dt-bindings/clock/histb-clock.h
@@ -62,6 +62,14 @@
#define HISTB_USB2_PHY1_REF_CLK 40
#define HISTB_USB2_PHY2_REF_CLK 41
#define HISTB_COMBPHY0_CLK 42
+#define HISTB_USB3_BUS_CLK 43
+#define HISTB_USB3_UTMI_CLK 44
+#define HISTB_USB3_PIPE_CLK 45
+#define HISTB_USB3_SUSPEND_CLK 46
+#define HISTB_USB3_BUS_CLK1 47
+#define HISTB_USB3_UTMI_CLK1 48
+#define HISTB_USB3_PIPE_CLK1 49
+#define HISTB_USB3_SUSPEND_CLK1 50
/* clocks provided by mcu CRG */
#define HISTB_MCE_CLK 1
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index da59fd9cdb5e..7ad171b8f3bf 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -271,6 +271,8 @@
#define IMX6QDL_CLK_PRE_AXI 258
#define IMX6QDL_CLK_MLB_SEL 259
#define IMX6QDL_CLK_MLB_PODF 260
-#define IMX6QDL_CLK_END 261
+#define IMX6QDL_CLK_EPIT1 261
+#define IMX6QDL_CLK_EPIT2 262
+#define IMX6QDL_CLK_END 263
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h
index 36f0324902a5..cd2d6c570e86 100644
--- a/include/dt-bindings/clock/imx6sx-clock.h
+++ b/include/dt-bindings/clock/imx6sx-clock.h
@@ -275,6 +275,10 @@
#define IMX6SX_PLL6_BYPASS 262
#define IMX6SX_PLL7_BYPASS 263
#define IMX6SX_CLK_SPDIF_GCLK 264
-#define IMX6SX_CLK_CLK_END 265
+#define IMX6SX_CLK_LVDS2_SEL 265
+#define IMX6SX_CLK_LVDS2_OUT 266
+#define IMX6SX_CLK_LVDS2_IN 267
+#define IMX6SX_CLK_ANACLK2 268
+#define IMX6SX_CLK_CLK_END 269
#endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index ee9f1a508d2f..9564597cbfac 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -235,20 +235,27 @@
#define IMX6UL_CLK_CSI_PODF 222
#define IMX6UL_CLK_PLL3_120M 223
#define IMX6UL_CLK_KPP 224
+#define IMX6UL_CLK_CKO1_SEL 225
+#define IMX6UL_CLK_CKO1_PODF 226
+#define IMX6UL_CLK_CKO1 227
+#define IMX6UL_CLK_CKO2_SEL 228
+#define IMX6UL_CLK_CKO2_PODF 229
+#define IMX6UL_CLK_CKO2 230
+#define IMX6UL_CLK_CKO 231
/* For i.MX6ULL */
-#define IMX6ULL_CLK_ESAI_PRED 225
-#define IMX6ULL_CLK_ESAI_PODF 226
-#define IMX6ULL_CLK_ESAI_EXTAL 227
-#define IMX6ULL_CLK_ESAI_MEM 228
-#define IMX6ULL_CLK_ESAI_IPG 229
-#define IMX6ULL_CLK_DCP_CLK 230
-#define IMX6ULL_CLK_EPDC_PRE_SEL 231
-#define IMX6ULL_CLK_EPDC_SEL 232
-#define IMX6ULL_CLK_EPDC_PODF 233
-#define IMX6ULL_CLK_EPDC_ACLK 234
-#define IMX6ULL_CLK_EPDC_PIX 235
-#define IMX6ULL_CLK_ESAI_SEL 236
-#define IMX6UL_CLK_END 237
+#define IMX6ULL_CLK_ESAI_PRED 232
+#define IMX6ULL_CLK_ESAI_PODF 233
+#define IMX6ULL_CLK_ESAI_EXTAL 234
+#define IMX6ULL_CLK_ESAI_MEM 235
+#define IMX6ULL_CLK_ESAI_IPG 236
+#define IMX6ULL_CLK_DCP_CLK 237
+#define IMX6ULL_CLK_EPDC_PRE_SEL 238
+#define IMX6ULL_CLK_EPDC_SEL 239
+#define IMX6ULL_CLK_EPDC_PODF 240
+#define IMX6ULL_CLK_EPDC_ACLK 241
+#define IMX6ULL_CLK_EPDC_PIX 242
+#define IMX6ULL_CLK_ESAI_SEL 243
+#define IMX6UL_CLK_END 244
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h
index b2325d3e236a..0d67f53bba93 100644
--- a/include/dt-bindings/clock/imx7d-clock.h
+++ b/include/dt-bindings/clock/imx7d-clock.h
@@ -168,7 +168,7 @@
#define IMX7D_SPDIF_ROOT_SRC 155
#define IMX7D_SPDIF_ROOT_CG 156
#define IMX7D_SPDIF_ROOT_DIV 157
-#define IMX7D_ENET1_REF_ROOT_CLK 158
+#define IMX7D_ENET1_IPG_ROOT_CLK 158
#define IMX7D_ENET1_REF_ROOT_SRC 159
#define IMX7D_ENET1_REF_ROOT_CG 160
#define IMX7D_ENET1_REF_ROOT_DIV 161
@@ -176,7 +176,7 @@
#define IMX7D_ENET1_TIME_ROOT_SRC 163
#define IMX7D_ENET1_TIME_ROOT_CG 164
#define IMX7D_ENET1_TIME_ROOT_DIV 165
-#define IMX7D_ENET2_REF_ROOT_CLK 166
+#define IMX7D_ENET2_IPG_ROOT_CLK 166
#define IMX7D_ENET2_REF_ROOT_SRC 167
#define IMX7D_ENET2_REF_ROOT_CG 168
#define IMX7D_ENET2_REF_ROOT_DIV 169
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index dea9d46d4fa7..a60f47b49231 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -102,5 +102,6 @@
#define CLKID_MPLL0 93
#define CLKID_MPLL1 94
#define CLKID_MPLL2 95
+#define CLKID_NAND_CLK 112
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h
index 24e93dfcee9f..9ac2f2b5710a 100644
--- a/include/dt-bindings/clock/mt2701-clk.h
+++ b/include/dt-bindings/clock/mt2701-clk.h
@@ -171,13 +171,12 @@
#define CLK_TOP_8BDAC 151
#define CLK_TOP_WBG_DIG_416M 152
#define CLK_TOP_DPI 153
-#define CLK_TOP_HDMITX_CLKDIG_CTS 154
-#define CLK_TOP_DSI0_LNTC_DSI 155
-#define CLK_TOP_AUD_EXT1 156
-#define CLK_TOP_AUD_EXT2 157
-#define CLK_TOP_NFI1X_PAD 158
-#define CLK_TOP_AXISEL_D4 159
-#define CLK_TOP_NR 160
+#define CLK_TOP_DSI0_LNTC_DSI 154
+#define CLK_TOP_AUD_EXT1 155
+#define CLK_TOP_AUD_EXT2 156
+#define CLK_TOP_NFI1X_PAD 157
+#define CLK_TOP_AXISEL_D4 158
+#define CLK_TOP_NR 159
/* APMIXEDSYS */
@@ -194,7 +193,8 @@
#define CLK_APMIXED_HADDS2PLL 11
#define CLK_APMIXED_AUD2PLL 12
#define CLK_APMIXED_TVD2PLL 13
-#define CLK_APMIXED_NR 14
+#define CLK_APMIXED_HDMI_REF 14
+#define CLK_APMIXED_NR 15
/* DDRPHY */
@@ -431,6 +431,10 @@
#define CLK_ETHSYS_CRYPTO 8
#define CLK_ETHSYS_NR 9
+/* G3DSYS */
+#define CLK_G3DSYS_CORE 1
+#define CLK_G3DSYS_NR 2
+
/* BDP */
#define CLK_BDP_BRG_BA 1
diff --git a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
new file mode 100644
index 000000000000..f21522605b94
--- /dev/null
+++ b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Nuvoton NPCM7xx Clock Generator binding
+ * clock binding number for all clocks supportted by nuvoton,npcm7xx-clk
+ *
+ * Copyright (C) 2018 Nuvoton Technologies tali.perry@nuvoton.com
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_NPCM7XX_H
+#define __DT_BINDINGS_CLOCK_NPCM7XX_H
+
+
+#define NPCM7XX_CLK_CPU 0
+#define NPCM7XX_CLK_GFX_PIXEL 1
+#define NPCM7XX_CLK_MC 2
+#define NPCM7XX_CLK_ADC 3
+#define NPCM7XX_CLK_AHB 4
+#define NPCM7XX_CLK_TIMER 5
+#define NPCM7XX_CLK_UART 6
+#define NPCM7XX_CLK_MMC 7
+#define NPCM7XX_CLK_SPI3 8
+#define NPCM7XX_CLK_PCI 9
+#define NPCM7XX_CLK_AXI 10
+#define NPCM7XX_CLK_APB4 11
+#define NPCM7XX_CLK_APB3 12
+#define NPCM7XX_CLK_APB2 13
+#define NPCM7XX_CLK_APB1 14
+#define NPCM7XX_CLK_APB5 15
+#define NPCM7XX_CLK_CLKOUT 16
+#define NPCM7XX_CLK_GFX 17
+#define NPCM7XX_CLK_SU 18
+#define NPCM7XX_CLK_SU48 19
+#define NPCM7XX_CLK_SDHC 20
+#define NPCM7XX_CLK_SPI0 21
+#define NPCM7XX_CLK_SPIX 22
+
+#define NPCM7XX_CLK_REFCLK 23
+#define NPCM7XX_CLK_SYSBYPCK 24
+#define NPCM7XX_CLK_MCBYPCK 25
+
+#define NPCM7XX_NUM_CLOCKS (NPCM7XX_CLK_MCBYPCK+1)
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
new file mode 100644
index 000000000000..58a242e656b1
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_COBALT_H
+#define _DT_BINDINGS_CLK_MSM_GCC_COBALT_H
+
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 0
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 1
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 2
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 3
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 4
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 5
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 6
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 7
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 8
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 9
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 10
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 11
+#define BLSP1_UART1_APPS_CLK_SRC 12
+#define BLSP1_UART2_APPS_CLK_SRC 13
+#define BLSP1_UART3_APPS_CLK_SRC 14
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 15
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 16
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 17
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 18
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 19
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 20
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 21
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 22
+#define BLSP2_QUP5_I2C_APPS_CLK_SRC 23
+#define BLSP2_QUP5_SPI_APPS_CLK_SRC 24
+#define BLSP2_QUP6_I2C_APPS_CLK_SRC 25
+#define BLSP2_QUP6_SPI_APPS_CLK_SRC 26
+#define BLSP2_UART1_APPS_CLK_SRC 27
+#define BLSP2_UART2_APPS_CLK_SRC 28
+#define BLSP2_UART3_APPS_CLK_SRC 29
+#define GCC_AGGRE1_NOC_XO_CLK 30
+#define GCC_AGGRE1_UFS_AXI_CLK 31
+#define GCC_AGGRE1_USB3_AXI_CLK 32
+#define GCC_APSS_QDSS_TSCTR_DIV2_CLK 33
+#define GCC_APSS_QDSS_TSCTR_DIV8_CLK 34
+#define GCC_BIMC_HMSS_AXI_CLK 35
+#define GCC_BIMC_MSS_Q6_AXI_CLK 36
+#define GCC_BLSP1_AHB_CLK 37
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 38
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 39
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 40
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 41
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 42
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 43
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 44
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 45
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 46
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 47
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 48
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 49
+#define GCC_BLSP1_SLEEP_CLK 50
+#define GCC_BLSP1_UART1_APPS_CLK 51
+#define GCC_BLSP1_UART2_APPS_CLK 52
+#define GCC_BLSP1_UART3_APPS_CLK 53
+#define GCC_BLSP2_AHB_CLK 54
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 55
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 56
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 57
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 58
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 59
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 60
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 61
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 62
+#define GCC_BLSP2_QUP5_I2C_APPS_CLK 63
+#define GCC_BLSP2_QUP5_SPI_APPS_CLK 64
+#define GCC_BLSP2_QUP6_I2C_APPS_CLK 65
+#define GCC_BLSP2_QUP6_SPI_APPS_CLK 66
+#define GCC_BLSP2_SLEEP_CLK 67
+#define GCC_BLSP2_UART1_APPS_CLK 68
+#define GCC_BLSP2_UART2_APPS_CLK 69
+#define GCC_BLSP2_UART3_APPS_CLK 70
+#define GCC_CFG_NOC_USB3_AXI_CLK 71
+#define GCC_GP1_CLK 72
+#define GCC_GP2_CLK 73
+#define GCC_GP3_CLK 74
+#define GCC_GPU_BIMC_GFX_CLK 75
+#define GCC_GPU_BIMC_GFX_SRC_CLK 76
+#define GCC_GPU_CFG_AHB_CLK 77
+#define GCC_GPU_SNOC_DVM_GFX_CLK 78
+#define GCC_HMSS_AHB_CLK 79
+#define GCC_HMSS_AT_CLK 80
+#define GCC_HMSS_DVM_BUS_CLK 81
+#define GCC_HMSS_RBCPR_CLK 82
+#define GCC_HMSS_TRIG_CLK 83
+#define GCC_LPASS_AT_CLK 84
+#define GCC_LPASS_TRIG_CLK 85
+#define GCC_MMSS_NOC_CFG_AHB_CLK 86
+#define GCC_MMSS_QM_AHB_CLK 87
+#define GCC_MMSS_QM_CORE_CLK 88
+#define GCC_MMSS_SYS_NOC_AXI_CLK 89
+#define GCC_MSS_AT_CLK 90
+#define GCC_PCIE_0_AUX_CLK 91
+#define GCC_PCIE_0_CFG_AHB_CLK 92
+#define GCC_PCIE_0_MSTR_AXI_CLK 93
+#define GCC_PCIE_0_PIPE_CLK 94
+#define GCC_PCIE_0_SLV_AXI_CLK 95
+#define GCC_PCIE_PHY_AUX_CLK 96
+#define GCC_PDM2_CLK 97
+#define GCC_PDM_AHB_CLK 98
+#define GCC_PDM_XO4_CLK 99
+#define GCC_PRNG_AHB_CLK 100
+#define GCC_SDCC2_AHB_CLK 101
+#define GCC_SDCC2_APPS_CLK 102
+#define GCC_SDCC4_AHB_CLK 103
+#define GCC_SDCC4_APPS_CLK 104
+#define GCC_TSIF_AHB_CLK 105
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 106
+#define GCC_TSIF_REF_CLK 107
+#define GCC_UFS_AHB_CLK 108
+#define GCC_UFS_AXI_CLK 109
+#define GCC_UFS_ICE_CORE_CLK 110
+#define GCC_UFS_PHY_AUX_CLK 111
+#define GCC_UFS_RX_SYMBOL_0_CLK 112
+#define GCC_UFS_RX_SYMBOL_1_CLK 113
+#define GCC_UFS_TX_SYMBOL_0_CLK 114
+#define GCC_UFS_UNIPRO_CORE_CLK 115
+#define GCC_USB30_MASTER_CLK 116
+#define GCC_USB30_MOCK_UTMI_CLK 117
+#define GCC_USB30_SLEEP_CLK 118
+#define GCC_USB3_PHY_AUX_CLK 119
+#define GCC_USB3_PHY_PIPE_CLK 120
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 121
+#define GP1_CLK_SRC 122
+#define GP2_CLK_SRC 123
+#define GP3_CLK_SRC 124
+#define GPLL0 125
+#define GPLL0_OUT_EVEN 126
+#define GPLL0_OUT_MAIN 127
+#define GPLL0_OUT_ODD 128
+#define GPLL0_OUT_TEST 129
+#define GPLL1 130
+#define GPLL1_OUT_EVEN 131
+#define GPLL1_OUT_MAIN 132
+#define GPLL1_OUT_ODD 133
+#define GPLL1_OUT_TEST 134
+#define GPLL2 135
+#define GPLL2_OUT_EVEN 136
+#define GPLL2_OUT_MAIN 137
+#define GPLL2_OUT_ODD 138
+#define GPLL2_OUT_TEST 139
+#define GPLL3 140
+#define GPLL3_OUT_EVEN 141
+#define GPLL3_OUT_MAIN 142
+#define GPLL3_OUT_ODD 143
+#define GPLL3_OUT_TEST 144
+#define GPLL4 145
+#define GPLL4_OUT_EVEN 146
+#define GPLL4_OUT_MAIN 147
+#define GPLL4_OUT_ODD 148
+#define GPLL4_OUT_TEST 149
+#define GPLL6 150
+#define GPLL6_OUT_EVEN 151
+#define GPLL6_OUT_MAIN 152
+#define GPLL6_OUT_ODD 153
+#define GPLL6_OUT_TEST 154
+#define HMSS_AHB_CLK_SRC 155
+#define HMSS_RBCPR_CLK_SRC 156
+#define PCIE_AUX_CLK_SRC 157
+#define PDM2_CLK_SRC 158
+#define SDCC2_APPS_CLK_SRC 159
+#define SDCC4_APPS_CLK_SRC 160
+#define TSIF_REF_CLK_SRC 161
+#define UFS_AXI_CLK_SRC 162
+#define USB30_MASTER_CLK_SRC 163
+#define USB30_MOCK_UTMI_CLK_SRC 164
+#define USB3_PHY_AUX_CLK_SRC 165
+
+#define PCIE_0_GDSC 0
+#define UFS_GDSC 1
+#define USB_30_GDSC 2
+
+#define GCC_BLSP1_QUP1_BCR 0
+#define GCC_BLSP1_QUP2_BCR 1
+#define GCC_BLSP1_QUP3_BCR 2
+#define GCC_BLSP1_QUP4_BCR 3
+#define GCC_BLSP1_QUP5_BCR 4
+#define GCC_BLSP1_QUP6_BCR 5
+#define GCC_BLSP2_QUP1_BCR 6
+#define GCC_BLSP2_QUP2_BCR 7
+#define GCC_BLSP2_QUP3_BCR 8
+#define GCC_BLSP2_QUP4_BCR 9
+#define GCC_BLSP2_QUP5_BCR 10
+#define GCC_BLSP2_QUP6_BCR 11
+#define GCC_PCIE_0_BCR 12
+#define GCC_PDM_BCR 13
+#define GCC_SDCC2_BCR 14
+#define GCC_SDCC4_BCR 15
+#define GCC_TSIF_BCR 16
+#define GCC_UFS_BCR 17
+#define GCC_USB_30_BCR 18
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
new file mode 100644
index 000000000000..aca61264f12c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_GCC_SDM845_H
+
+/* GCC clock registers */
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 4
+#define GCC_BOOT_ROM_AHB_CLK 5
+#define GCC_CAMERA_AHB_CLK 6
+#define GCC_CAMERA_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CE1_AHB_CLK 9
+#define GCC_CE1_AXI_CLK 10
+#define GCC_CE1_CLK 11
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13
+#define GCC_CPUSS_AHB_CLK 14
+#define GCC_CPUSS_AHB_CLK_SRC 15
+#define GCC_CPUSS_RBCPR_CLK 16
+#define GCC_CPUSS_RBCPR_CLK_SRC 17
+#define GCC_DDRSS_GPU_AXI_CLK 18
+#define GCC_DISP_AHB_CLK 19
+#define GCC_DISP_AXI_CLK 20
+#define GCC_DISP_GPLL0_CLK_SRC 21
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 22
+#define GCC_DISP_XO_CLK 23
+#define GCC_GP1_CLK 24
+#define GCC_GP1_CLK_SRC 25
+#define GCC_GP2_CLK 26
+#define GCC_GP2_CLK_SRC 27
+#define GCC_GP3_CLK 28
+#define GCC_GP3_CLK_SRC 29
+#define GCC_GPU_CFG_AHB_CLK 30
+#define GCC_GPU_GPLL0_CLK_SRC 31
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 32
+#define GCC_GPU_MEMNOC_GFX_CLK 33
+#define GCC_GPU_SNOC_DVM_GFX_CLK 34
+#define GCC_MSS_AXIS2_CLK 35
+#define GCC_MSS_CFG_AHB_CLK 36
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 37
+#define GCC_MSS_MFAB_AXIS_CLK 38
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 39
+#define GCC_MSS_SNOC_AXI_CLK 40
+#define GCC_PCIE_0_AUX_CLK 41
+#define GCC_PCIE_0_AUX_CLK_SRC 42
+#define GCC_PCIE_0_CFG_AHB_CLK 43
+#define GCC_PCIE_0_CLKREF_CLK 44
+#define GCC_PCIE_0_MSTR_AXI_CLK 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_1_AUX_CLK 49
+#define GCC_PCIE_1_AUX_CLK_SRC 50
+#define GCC_PCIE_1_CFG_AHB_CLK 51
+#define GCC_PCIE_1_CLKREF_CLK 52
+#define GCC_PCIE_1_MSTR_AXI_CLK 53
+#define GCC_PCIE_1_PIPE_CLK 54
+#define GCC_PCIE_1_SLV_AXI_CLK 55
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 56
+#define GCC_PCIE_PHY_AUX_CLK 57
+#define GCC_PCIE_PHY_REFGEN_CLK 58
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 59
+#define GCC_PDM2_CLK 60
+#define GCC_PDM2_CLK_SRC 61
+#define GCC_PDM_AHB_CLK 62
+#define GCC_PDM_XO4_CLK 63
+#define GCC_PRNG_AHB_CLK 64
+#define GCC_QMIP_CAMERA_AHB_CLK 65
+#define GCC_QMIP_DISP_AHB_CLK 66
+#define GCC_QMIP_VIDEO_AHB_CLK 67
+#define GCC_QUPV3_WRAP0_S0_CLK 68
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 69
+#define GCC_QUPV3_WRAP0_S1_CLK 70
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S2_CLK 72
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S3_CLK 74
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S4_CLK 76
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S5_CLK 78
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S6_CLK 80
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S7_CLK 82
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S0_CLK 84
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S1_CLK 86
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S2_CLK 88
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S3_CLK 90
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S4_CLK 92
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S5_CLK 94
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S6_CLK 96
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S7_CLK 98
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 99
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 100
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 101
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 102
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 103
+#define GCC_SDCC2_AHB_CLK 104
+#define GCC_SDCC2_APPS_CLK 105
+#define GCC_SDCC2_APPS_CLK_SRC 106
+#define GCC_SDCC4_AHB_CLK 107
+#define GCC_SDCC4_APPS_CLK 108
+#define GCC_SDCC4_APPS_CLK_SRC 109
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 110
+#define GCC_TSIF_AHB_CLK 111
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 112
+#define GCC_TSIF_REF_CLK 113
+#define GCC_TSIF_REF_CLK_SRC 114
+#define GCC_UFS_CARD_AHB_CLK 115
+#define GCC_UFS_CARD_AXI_CLK 116
+#define GCC_UFS_CARD_AXI_CLK_SRC 117
+#define GCC_UFS_CARD_CLKREF_CLK 118
+#define GCC_UFS_CARD_ICE_CORE_CLK 119
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 120
+#define GCC_UFS_CARD_PHY_AUX_CLK 121
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 122
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 123
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 124
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 125
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 126
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 127
+#define GCC_UFS_MEM_CLKREF_CLK 128
+#define GCC_UFS_PHY_AHB_CLK 129
+#define GCC_UFS_PHY_AXI_CLK 130
+#define GCC_UFS_PHY_AXI_CLK_SRC 131
+#define GCC_UFS_PHY_ICE_CORE_CLK 132
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 133
+#define GCC_UFS_PHY_PHY_AUX_CLK 134
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 135
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 136
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 137
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 139
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 140
+#define GCC_USB30_PRIM_MASTER_CLK 141
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 142
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 143
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 144
+#define GCC_USB30_PRIM_SLEEP_CLK 145
+#define GCC_USB30_SEC_MASTER_CLK 146
+#define GCC_USB30_SEC_MASTER_CLK_SRC 147
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 148
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 149
+#define GCC_USB30_SEC_SLEEP_CLK 150
+#define GCC_USB3_PRIM_CLKREF_CLK 151
+#define GCC_USB3_PRIM_PHY_AUX_CLK 152
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 153
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 154
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 155
+#define GCC_USB3_SEC_CLKREF_CLK 156
+#define GCC_USB3_SEC_PHY_AUX_CLK 157
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 158
+#define GCC_USB3_SEC_PHY_PIPE_CLK 159
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 160
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 161
+#define GCC_VIDEO_AHB_CLK 162
+#define GCC_VIDEO_AXI_CLK 163
+#define GCC_VIDEO_XO_CLK 164
+#define GPLL0 165
+#define GPLL0_OUT_EVEN 166
+#define GPLL0_OUT_MAIN 167
+#define GCC_GPU_IREF_CLK 168
+#define GCC_SDCC1_AHB_CLK 169
+#define GCC_SDCC1_APPS_CLK 170
+#define GCC_SDCC1_ICE_CORE_CLK 171
+#define GCC_SDCC1_APPS_CLK_SRC 172
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 173
+#define GCC_APC_VS_CLK 174
+#define GCC_GPU_VS_CLK 175
+#define GCC_MSS_VS_CLK 176
+#define GCC_VDDA_VS_CLK 177
+#define GCC_VDDCX_VS_CLK 178
+#define GCC_VDDMX_VS_CLK 179
+#define GCC_VS_CTRL_AHB_CLK 180
+#define GCC_VS_CTRL_CLK 181
+#define GCC_VS_CTRL_CLK_SRC 182
+#define GCC_VSENSOR_CLK_SRC 183
+#define GPLL4 184
+
+/* GCC Resets */
+#define GCC_MMSS_BCR 0
+#define GCC_PCIE_0_BCR 1
+#define GCC_PCIE_1_BCR 2
+#define GCC_PCIE_PHY_BCR 3
+#define GCC_PDM_BCR 4
+#define GCC_PRNG_BCR 5
+#define GCC_QUPV3_WRAPPER_0_BCR 6
+#define GCC_QUPV3_WRAPPER_1_BCR 7
+#define GCC_QUSB2PHY_PRIM_BCR 8
+#define GCC_QUSB2PHY_SEC_BCR 9
+#define GCC_SDCC2_BCR 10
+#define GCC_SDCC4_BCR 11
+#define GCC_TSIF_BCR 12
+#define GCC_UFS_CARD_BCR 13
+#define GCC_UFS_PHY_BCR 14
+#define GCC_USB30_PRIM_BCR 15
+#define GCC_USB30_SEC_BCR 16
+#define GCC_USB3_PHY_PRIM_BCR 17
+#define GCC_USB3PHY_PHY_PRIM_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_PHY_SEC_BCR 20
+#define GCC_USB3PHY_PHY_SEC_BCR 21
+#define GCC_USB3_DP_PHY_SEC_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+#define GCC_PCIE_0_PHY_BCR 24
+#define GCC_PCIE_1_PHY_BCR 25
+
+/* GCC GDSCRs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define USB30_SEC_GDSC 5
+#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 6
+#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 7
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 8
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 9
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 10
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 11
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 12
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
new file mode 100644
index 000000000000..f48fbd6f2095
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+
+
+#ifndef _DT_BINDINGS_CLK_MSM_RPMH_H
+#define _DT_BINDINGS_CLK_MSM_RPMH_H
+
+/* RPMh controlled clocks */
+#define RPMH_CXO_CLK 0
+#define RPMH_CXO_CLK_A 1
+#define RPMH_LN_BB_CLK2 2
+#define RPMH_LN_BB_CLK2_A 3
+#define RPMH_LN_BB_CLK3 4
+#define RPMH_LN_BB_CLK3_A 5
+#define RPMH_RF_CLK1 6
+#define RPMH_RF_CLK1_A 7
+#define RPMH_RF_CLK2 8
+#define RPMH_RF_CLK2_A 9
+#define RPMH_RF_CLK3 10
+#define RPMH_RF_CLK3_A 11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sdm845.h b/include/dt-bindings/clock/qcom,videocc-sdm845.h
new file mode 100644
index 000000000000..1b868165e8ce
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-sdm845.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_VIDEO_CC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_VIDEO_CC_SDM845_H
+
+/* VIDEO_CC clock registers */
+#define VIDEO_CC_APB_CLK 0
+#define VIDEO_CC_AT_CLK 1
+#define VIDEO_CC_QDSS_TRIG_CLK 2
+#define VIDEO_CC_QDSS_TSCTR_DIV8_CLK 3
+#define VIDEO_CC_VCODEC0_AXI_CLK 4
+#define VIDEO_CC_VCODEC0_CORE_CLK 5
+#define VIDEO_CC_VCODEC1_AXI_CLK 6
+#define VIDEO_CC_VCODEC1_CORE_CLK 7
+#define VIDEO_CC_VENUS_AHB_CLK 8
+#define VIDEO_CC_VENUS_CLK_SRC 9
+#define VIDEO_CC_VENUS_CTL_AXI_CLK 10
+#define VIDEO_CC_VENUS_CTL_CORE_CLK 11
+#define VIDEO_PLL0 12
+
+/* VIDEO_CC Resets */
+#define VIDEO_CC_VENUS_BCR 0
+#define VIDEO_CC_VCODEC0_BCR 1
+#define VIDEO_CC_VCODEC1_BCR 2
+#define VIDEO_CC_INTERFACE_BCR 3
+
+/* VIDEO_CC GDSCRs */
+#define VENUS_GDSC 0
+#define VCODEC0_GDSC 1
+#define VCODEC1_GDSC 2
+
+#endif
diff --git a/include/dt-bindings/clock/r8a77470-cpg-mssr.h b/include/dt-bindings/clock/r8a77470-cpg-mssr.h
new file mode 100644
index 000000000000..34cba49d0f84
--- /dev/null
+++ b/include/dt-bindings/clock/r8a77470-cpg-mssr.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a77470 CPG Core Clocks */
+#define R8A77470_CLK_Z2 0
+#define R8A77470_CLK_ZTR 1
+#define R8A77470_CLK_ZTRD2 2
+#define R8A77470_CLK_ZT 3
+#define R8A77470_CLK_ZX 4
+#define R8A77470_CLK_ZS 5
+#define R8A77470_CLK_HP 6
+#define R8A77470_CLK_B 7
+#define R8A77470_CLK_LB 8
+#define R8A77470_CLK_P 9
+#define R8A77470_CLK_CL 10
+#define R8A77470_CLK_CP 11
+#define R8A77470_CLK_M2 12
+#define R8A77470_CLK_ZB3 13
+#define R8A77470_CLK_SDH 14
+#define R8A77470_CLK_SD0 15
+#define R8A77470_CLK_SD1 16
+#define R8A77470_CLK_SD2 17
+#define R8A77470_CLK_MP 18
+#define R8A77470_CLK_QSPI 19
+#define R8A77470_CLK_CPEX 20
+#define R8A77470_CLK_RCAN 21
+#define R8A77470_CLK_R 22
+#define R8A77470_CLK_OSC 23
+
+#endif /* __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a77990-cpg-mssr.h b/include/dt-bindings/clock/r8a77990-cpg-mssr.h
new file mode 100644
index 000000000000..a596a482f3a9
--- /dev/null
+++ b/include/dt-bindings/clock/r8a77990-cpg-mssr.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a77990 CPG Core Clocks */
+#define R8A77990_CLK_Z2 0
+#define R8A77990_CLK_ZR 1
+#define R8A77990_CLK_ZG 2
+#define R8A77990_CLK_ZTR 3
+#define R8A77990_CLK_ZT 4
+#define R8A77990_CLK_ZX 5
+#define R8A77990_CLK_S0D1 6
+#define R8A77990_CLK_S0D3 7
+#define R8A77990_CLK_S0D6 8
+#define R8A77990_CLK_S0D12 9
+#define R8A77990_CLK_S0D24 10
+#define R8A77990_CLK_S1D1 11
+#define R8A77990_CLK_S1D2 12
+#define R8A77990_CLK_S1D4 13
+#define R8A77990_CLK_S2D1 14
+#define R8A77990_CLK_S2D2 15
+#define R8A77990_CLK_S2D4 16
+#define R8A77990_CLK_S3D1 17
+#define R8A77990_CLK_S3D2 18
+#define R8A77990_CLK_S3D4 19
+#define R8A77990_CLK_S0D6C 20
+#define R8A77990_CLK_S3D1C 21
+#define R8A77990_CLK_S3D2C 22
+#define R8A77990_CLK_S3D4C 23
+#define R8A77990_CLK_LB 24
+#define R8A77990_CLK_CL 25
+#define R8A77990_CLK_ZB3 26
+#define R8A77990_CLK_ZB3D2 27
+#define R8A77990_CLK_CR 28
+#define R8A77990_CLK_CRD2 29
+#define R8A77990_CLK_SD0H 30
+#define R8A77990_CLK_SD0 31
+#define R8A77990_CLK_SD1H 32
+#define R8A77990_CLK_SD1 33
+#define R8A77990_CLK_SD3H 34
+#define R8A77990_CLK_SD3 35
+#define R8A77990_CLK_RPC 36
+#define R8A77990_CLK_RPCD2 37
+#define R8A77990_CLK_ZA2 38
+#define R8A77990_CLK_ZA8 39
+#define R8A77990_CLK_Z2D 40
+#define R8A77990_CLK_CANFD 41
+#define R8A77990_CLK_MSO 42
+#define R8A77990_CLK_R 43
+#define R8A77990_CLK_OSC 44
+#define R8A77990_CLK_LV0 45
+#define R8A77990_CLK_LV1 46
+#define R8A77990_CLK_CSI0 47
+#define R8A77990_CLK_CP 48
+#define R8A77990_CLK_CPEX 49
+
+#endif /* __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/sun50i-h6-r-ccu.h b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
new file mode 100644
index 000000000000..76136132a13e
--- /dev/null
+++ b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.xyz>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_
+#define _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_
+
+#define CLK_AR100 0
+
+#define CLK_R_APB1 2
+
+#define CLK_R_APB1_TIMER 4
+#define CLK_R_APB1_TWD 5
+#define CLK_R_APB1_PWM 6
+#define CLK_R_APB2_UART 7
+#define CLK_R_APB2_I2C 8
+#define CLK_R_APB1_IR 9
+#define CLK_R_APB1_W1 10
+
+#define CLK_IR 11
+#define CLK_W1 12
+
+#endif /* _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ */
diff --git a/include/dt-bindings/pinctrl/mt7623-pinfunc.h b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
index 4878a67a844c..604fe781c465 100644
--- a/include/dt-bindings/pinctrl/mt7623-pinfunc.h
+++ b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
@@ -23,20 +23,26 @@
#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_PWRAP_SPICK2_I (MTK_PIN_NO(5) | 1)
+#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_ANT_SEL1 (MTK_PIN_NO(5) | 5)
#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_PWRAP_SPICS2_B_I (MTK_PIN_NO(6) | 1)
+#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_ANT_SEL0 (MTK_PIN_NO(6) | 5)
#define MT7623_PIN_7_SPI1_CSN_FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
#define MT7623_PIN_7_SPI1_CSN_FUNC_SPI1_CS (MTK_PIN_NO(7) | 1)
+#define MT7623_PIN_7_SPI1_CSN_FUNC_KCOL0 (MTK_PIN_NO(7) | 4)
#define MT7623_PIN_8_SPI1_MI_FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MI (MTK_PIN_NO(8) | 1)
#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MO (MTK_PIN_NO(8) | 2)
+#define MT7623_PIN_8_SPI1_MI_FUNC_KCOL1 (MTK_PIN_NO(8) | 4)
#define MT7623_PIN_9_SPI1_MO_FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MO (MTK_PIN_NO(9) | 1)
#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MI (MTK_PIN_NO(9) | 2)
+#define MT7623_PIN_9_SPI1_MO_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(9) | 3)
+#define MT7623_PIN_9_SPI1_MO_FUNC_KCOL2 (MTK_PIN_NO(9) | 4)
#define MT7623_PIN_10_RTC32K_CK_FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
#define MT7623_PIN_10_RTC32K_CK_FUNC_RTC32K_CK (MTK_PIN_NO(10) | 1)
@@ -53,6 +59,7 @@
#define MT7623_PIN_14_GPIO14_FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
#define MT7623_PIN_14_GPIO14_FUNC_URXD2 (MTK_PIN_NO(14) | 1)
#define MT7623_PIN_14_GPIO14_FUNC_UTXD2 (MTK_PIN_NO(14) | 2)
+#define MT7623_PIN_14_GPIO14_FUNC_SRCCLKENAI2 (MTK_PIN_NO(14) | 5)
#define MT7623_PIN_15_GPIO15_FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
#define MT7623_PIN_15_GPIO15_FUNC_UTXD2 (MTK_PIN_NO(15) | 1)
@@ -60,88 +67,139 @@
#define MT7623_PIN_18_PCM_CLK_FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
#define MT7623_PIN_18_PCM_CLK_FUNC_PCM_CLK0 (MTK_PIN_NO(18) | 1)
+#define MT7623_PIN_18_PCM_CLK_FUNC_MRG_CLK (MTK_PIN_NO(18) | 2)
+#define MT7623_PIN_18_PCM_CLK_FUNC_MM_TEST_CK (MTK_PIN_NO(18) | 4)
+#define MT7623_PIN_18_PCM_CLK_FUNC_CONN_DSP_JCK (MTK_PIN_NO(18) | 5)
#define MT7623_PIN_18_PCM_CLK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(18) | 6)
#define MT7623_PIN_19_PCM_SYNC_FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
#define MT7623_PIN_19_PCM_SYNC_FUNC_PCM_SYNC (MTK_PIN_NO(19) | 1)
+#define MT7623_PIN_19_PCM_SYNC_FUNC_MRG_SYNC (MTK_PIN_NO(19) | 2)
+#define MT7623_PIN_19_PCM_SYNC_FUNC_CONN_DSP_JINTP (MTK_PIN_NO(19) | 5)
#define MT7623_PIN_19_PCM_SYNC_FUNC_AP_PCM_SYNC (MTK_PIN_NO(19) | 6)
#define MT7623_PIN_20_PCM_RX_FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
#define MT7623_PIN_20_PCM_RX_FUNC_PCM_RX (MTK_PIN_NO(20) | 1)
+#define MT7623_PIN_20_PCM_RX_FUNC_MRG_RX (MTK_PIN_NO(20) | 2)
+#define MT7623_PIN_20_PCM_RX_FUNC_MRG_TX (MTK_PIN_NO(20) | 3)
#define MT7623_PIN_20_PCM_RX_FUNC_PCM_TX (MTK_PIN_NO(20) | 4)
+#define MT7623_PIN_20_PCM_RX_FUNC_CONN_DSP_JDI (MTK_PIN_NO(20) | 5)
#define MT7623_PIN_20_PCM_RX_FUNC_AP_PCM_RX (MTK_PIN_NO(20) | 6)
#define MT7623_PIN_21_PCM_TX_FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
#define MT7623_PIN_21_PCM_TX_FUNC_PCM_TX (MTK_PIN_NO(21) | 1)
+#define MT7623_PIN_21_PCM_TX_FUNC_MRG_TX (MTK_PIN_NO(21) | 2)
+#define MT7623_PIN_21_PCM_TX_FUNC_MRG_RX (MTK_PIN_NO(21) | 3)
#define MT7623_PIN_21_PCM_TX_FUNC_PCM_RX (MTK_PIN_NO(21) | 4)
+#define MT7623_PIN_21_PCM_TX_FUNC_CONN_DSP_JMS (MTK_PIN_NO(21) | 5)
#define MT7623_PIN_21_PCM_TX_FUNC_AP_PCM_TX (MTK_PIN_NO(21) | 6)
#define MT7623_PIN_22_EINT0_FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
#define MT7623_PIN_22_EINT0_FUNC_UCTS0 (MTK_PIN_NO(22) | 1)
#define MT7623_PIN_22_EINT0_FUNC_PCIE0_PERST_N (MTK_PIN_NO(22) | 2)
+#define MT7623_PIN_22_EINT0_FUNC_KCOL3 (MTK_PIN_NO(22) | 3)
+#define MT7623_PIN_22_EINT0_FUNC_CONN_DSP_JDO (MTK_PIN_NO(22) | 4)
+#define MT7623_PIN_22_EINT0_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(22) | 5)
#define MT7623_PIN_23_EINT1_FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
#define MT7623_PIN_23_EINT1_FUNC_URTS0 (MTK_PIN_NO(23) | 1)
#define MT7623_PIN_23_EINT1_FUNC_PCIE1_PERST_N (MTK_PIN_NO(23) | 2)
+#define MT7623_PIN_23_EINT1_FUNC_KCOL2 (MTK_PIN_NO(23) | 3)
+#define MT7623_PIN_23_EINT1_FUNC_CONN_MCU_TDO (MTK_PIN_NO(23) | 4)
+#define MT7623_PIN_23_EINT1_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5)
#define MT7623_PIN_24_EINT2_FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
#define MT7623_PIN_24_EINT2_FUNC_UCTS1 (MTK_PIN_NO(24) | 1)
#define MT7623_PIN_24_EINT2_FUNC_PCIE2_PERST_N (MTK_PIN_NO(24) | 2)
+#define MT7623_PIN_24_EINT2_FUNC_KCOL1 (MTK_PIN_NO(24) | 3)
+#define MT7623_PIN_24_EINT2_FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(24) | 4)
#define MT7623_PIN_25_EINT3_FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
#define MT7623_PIN_25_EINT3_FUNC_URTS1 (MTK_PIN_NO(25) | 1)
+#define MT7623_PIN_25_EINT3_FUNC_KCOL0 (MTK_PIN_NO(25) | 3)
+#define MT7623_PIN_25_EINT3_FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(25) | 4)
#define MT7623_PIN_26_EINT4_FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
#define MT7623_PIN_26_EINT4_FUNC_UCTS3 (MTK_PIN_NO(26) | 1)
+#define MT7623_PIN_26_EINT4_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(26) | 2)
+#define MT7623_PIN_26_EINT4_FUNC_KROW3 (MTK_PIN_NO(26) | 3)
+#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_TCK0 (MTK_PIN_NO(26) | 4)
+#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(26) | 5)
#define MT7623_PIN_26_EINT4_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(26) | 6)
#define MT7623_PIN_27_EINT5_FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
#define MT7623_PIN_27_EINT5_FUNC_URTS3 (MTK_PIN_NO(27) | 1)
+#define MT7623_PIN_27_EINT5_FUNC_IDDIG_P1 (MTK_PIN_NO(27) | 2)
+#define MT7623_PIN_27_EINT5_FUNC_KROW2 (MTK_PIN_NO(27) | 3)
+#define MT7623_PIN_27_EINT5_FUNC_CONN_MCU_TDI (MTK_PIN_NO(27) | 4)
#define MT7623_PIN_27_EINT5_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(27) | 6)
#define MT7623_PIN_28_EINT6_FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
#define MT7623_PIN_28_EINT6_FUNC_DRV_VBUS (MTK_PIN_NO(28) | 1)
+#define MT7623_PIN_28_EINT6_FUNC_KROW1 (MTK_PIN_NO(28) | 3)
+#define MT7623_PIN_28_EINT6_FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(28) | 4)
#define MT7623_PIN_28_EINT6_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(28) | 6)
#define MT7623_PIN_29_EINT7_FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
#define MT7623_PIN_29_EINT7_FUNC_IDDIG (MTK_PIN_NO(29) | 1)
#define MT7623_PIN_29_EINT7_FUNC_MSDC1_WP (MTK_PIN_NO(29) | 2)
+#define MT7623_PIN_29_EINT7_FUNC_KROW0 (MTK_PIN_NO(29) | 3)
+#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_TMS (MTK_PIN_NO(29) | 4)
+#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(29) | 5)
#define MT7623_PIN_29_EINT7_FUNC_PCIE2_PERST_N (MTK_PIN_NO(29) | 6)
#define MT7623_PIN_33_I2S1_DATA_FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA (MTK_PIN_NO(33) | 1)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA_BYPS (MTK_PIN_NO(33) | 2)
#define MT7623_PIN_33_I2S1_DATA_FUNC_PCM_TX (MTK_PIN_NO(33) | 3)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_IMG_TEST_CK (MTK_PIN_NO(33) | 4)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_G1_RXD0 (MTK_PIN_NO(33) | 5)
#define MT7623_PIN_33_I2S1_DATA_FUNC_AP_PCM_TX (MTK_PIN_NO(33) | 6)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_I2S1_DATA_IN (MTK_PIN_NO(34) | 1)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(34) | 3)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_VDEC_TEST_CK (MTK_PIN_NO(34) | 4)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_G1_RXD1 (MTK_PIN_NO(34) | 5)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_AP_PCM_RX (MTK_PIN_NO(34) | 6)
#define MT7623_PIN_35_I2S1_BCK_FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
#define MT7623_PIN_35_I2S1_BCK_FUNC_I2S1_BCK (MTK_PIN_NO(35) | 1)
#define MT7623_PIN_35_I2S1_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(35) | 3)
+#define MT7623_PIN_35_I2S1_BCK_FUNC_G1_RXD2 (MTK_PIN_NO(35) | 5)
#define MT7623_PIN_35_I2S1_BCK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(35) | 6)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_I2S1_LRCK (MTK_PIN_NO(36) | 1)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(36) | 3)
+#define MT7623_PIN_36_I2S1_LRCK_FUNC_G1_RXD3 (MTK_PIN_NO(36) | 5)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_AP_PCM_SYNC (MTK_PIN_NO(36) | 6)
#define MT7623_PIN_37_I2S1_MCLK_FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
#define MT7623_PIN_37_I2S1_MCLK_FUNC_I2S1_MCLK (MTK_PIN_NO(37) | 1)
+#define MT7623_PIN_37_I2S1_MCLK_FUNC_G1_RXDV (MTK_PIN_NO(37) | 5)
#define MT7623_PIN_39_JTMS_FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
#define MT7623_PIN_39_JTMS_FUNC_JTMS (MTK_PIN_NO(39) | 1)
+#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_TMS (MTK_PIN_NO(39) | 2)
+#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(39) | 3)
+#define MT7623_PIN_39_JTMS_FUNC_DFD_TMS_XI (MTK_PIN_NO(39) | 4)
#define MT7623_PIN_40_JTCK_FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
#define MT7623_PIN_40_JTCK_FUNC_JTCK (MTK_PIN_NO(40) | 1)
+#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_TCK1 (MTK_PIN_NO(40) | 2)
+#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(40) | 3)
+#define MT7623_PIN_40_JTCK_FUNC_DFD_TCK_XI (MTK_PIN_NO(40) | 4)
#define MT7623_PIN_41_JTDI_FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
#define MT7623_PIN_41_JTDI_FUNC_JTDI (MTK_PIN_NO(41) | 1)
+#define MT7623_PIN_41_JTDI_FUNC_CONN_MCU_TDI (MTK_PIN_NO(41) | 2)
+#define MT7623_PIN_41_JTDI_FUNC_DFD_TDI_XI (MTK_PIN_NO(41) | 4)
#define MT7623_PIN_42_JTDO_FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
#define MT7623_PIN_42_JTDO_FUNC_JTDO (MTK_PIN_NO(42) | 1)
+#define MT7623_PIN_42_JTDO_FUNC_CONN_MCU_TDO (MTK_PIN_NO(42) | 2)
+#define MT7623_PIN_42_JTDO_FUNC_DFD_TDO (MTK_PIN_NO(42) | 4)
#define MT7623_PIN_43_NCLE_FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
#define MT7623_PIN_43_NCLE_FUNC_NCLE (MTK_PIN_NO(43) | 1)
@@ -160,31 +218,40 @@
#define MT7623_PIN_47_NREB_FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
#define MT7623_PIN_47_NREB_FUNC_NREB (MTK_PIN_NO(47) | 1)
+#define MT7623_PIN_47_NREB_FUNC_IDDIG_P1 (MTK_PIN_NO(47) | 2)
#define MT7623_PIN_48_NRNB_FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
#define MT7623_PIN_48_NRNB_FUNC_NRNB (MTK_PIN_NO(48) | 1)
+#define MT7623_PIN_48_NRNB_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(48) | 2)
#define MT7623_PIN_49_I2S0_DATA_FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA (MTK_PIN_NO(49) | 1)
+#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA_BYPS (MTK_PIN_NO(49) | 2)
#define MT7623_PIN_49_I2S0_DATA_FUNC_PCM_TX (MTK_PIN_NO(49) | 3)
#define MT7623_PIN_49_I2S0_DATA_FUNC_AP_I2S_DO (MTK_PIN_NO(49) | 6)
#define MT7623_PIN_53_SPI0_CSN_FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
#define MT7623_PIN_53_SPI0_CSN_FUNC_SPI0_CS (MTK_PIN_NO(53) | 1)
+#define MT7623_PIN_53_SPI0_CSN_FUNC_SPDIF (MTK_PIN_NO(53) | 3)
+#define MT7623_PIN_53_SPI0_CSN_FUNC_ADC_CK (MTK_PIN_NO(53) | 4)
#define MT7623_PIN_53_SPI0_CSN_FUNC_PWM1 (MTK_PIN_NO(53) | 5)
#define MT7623_PIN_54_SPI0_CK_FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
#define MT7623_PIN_54_SPI0_CK_FUNC_SPI0_CK (MTK_PIN_NO(54) | 1)
+#define MT7623_PIN_54_SPI0_CK_FUNC_SPDIF_IN1 (MTK_PIN_NO(54) | 3)
+#define MT7623_PIN_54_SPI0_CK_FUNC_ADC_DAT_IN (MTK_PIN_NO(54) | 4)
#define MT7623_PIN_55_SPI0_MI_FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MI (MTK_PIN_NO(55) | 1)
#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MO (MTK_PIN_NO(55) | 2)
#define MT7623_PIN_55_SPI0_MI_FUNC_MSDC1_WP (MTK_PIN_NO(55) | 3)
+#define MT7623_PIN_55_SPI0_MI_FUNC_ADC_WS (MTK_PIN_NO(55) | 4)
#define MT7623_PIN_55_SPI0_MI_FUNC_PWM2 (MTK_PIN_NO(55) | 5)
#define MT7623_PIN_56_SPI0_MO_FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MO (MTK_PIN_NO(56) | 1)
#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MI (MTK_PIN_NO(56) | 2)
+#define MT7623_PIN_56_SPI0_MO_FUNC_SPDIF_IN0 (MTK_PIN_NO(56) | 3)
#define MT7623_PIN_57_SDA1_FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
#define MT7623_PIN_57_SDA1_FUNC_SDA1 (MTK_PIN_NO(57) | 1)
@@ -275,10 +342,23 @@
#define MT7623_PIN_83_LCM_RST_FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
#define MT7623_PIN_83_LCM_RST_FUNC_LCM_RST (MTK_PIN_NO(83) | 1)
+#define MT7623_PIN_83_LCM_RST_FUNC_VDAC_CK_XI (MTK_PIN_NO(83) | 2)
#define MT7623_PIN_84_DSI_TE_FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
#define MT7623_PIN_84_DSI_TE_FUNC_DSI_TE (MTK_PIN_NO(84) | 1)
+#define MT7623_PIN_91_MIPI_TDN3_FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define MT7623_PIN_91_MIPI_TDN3_FUNC_TDN3 (MTK_PIN_NO(91) | 1)
+
+#define MT7623_PIN_92_MIPI_TDP3_FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define MT7623_PIN_92_MIPI_TDP3_FUNC_TDP3 (MTK_PIN_NO(92) | 1)
+
+#define MT7623_PIN_93_MIPI_TDN2_FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define MT7623_PIN_93_MIPI_TDN2_FUNC_TDN2 (MTK_PIN_NO(93) | 1)
+
+#define MT7623_PIN_94_MIPI_TDP2_FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define MT7623_PIN_94_MIPI_TDP2_FUNC_TDP2 (MTK_PIN_NO(94) | 1)
+
#define MT7623_PIN_95_MIPI_TCN_FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
#define MT7623_PIN_95_MIPI_TCN_FUNC_TCN (MTK_PIN_NO(95) | 1)
@@ -300,20 +380,24 @@
#define MT7623_PIN_101_SPI2_CSN_FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
#define MT7623_PIN_101_SPI2_CSN_FUNC_SPI2_CS (MTK_PIN_NO(101) | 1)
#define MT7623_PIN_101_SPI2_CSN_FUNC_SCL3 (MTK_PIN_NO(101) | 3)
+#define MT7623_PIN_101_SPI2_CSN_FUNC_KROW0 (MTK_PIN_NO(101) | 4)
#define MT7623_PIN_102_SPI2_MI_FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MI (MTK_PIN_NO(102) | 1)
#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MO (MTK_PIN_NO(102) | 2)
#define MT7623_PIN_102_SPI2_MI_FUNC_SDA3 (MTK_PIN_NO(102) | 3)
+#define MT7623_PIN_102_SPI2_MI_FUNC_KROW1 (MTK_PIN_NO(102) | 4)
#define MT7623_PIN_103_SPI2_MO_FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MO (MTK_PIN_NO(103) | 1)
#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MI (MTK_PIN_NO(103) | 2)
#define MT7623_PIN_103_SPI2_MO_FUNC_SCL3 (MTK_PIN_NO(103) | 3)
+#define MT7623_PIN_103_SPI2_MO_FUNC_KROW2 (MTK_PIN_NO(103) | 4)
#define MT7623_PIN_104_SPI2_CK_FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
#define MT7623_PIN_104_SPI2_CK_FUNC_SPI2_CK (MTK_PIN_NO(104) | 1)
#define MT7623_PIN_104_SPI2_CK_FUNC_SDA3 (MTK_PIN_NO(104) | 3)
+#define MT7623_PIN_104_SPI2_CK_FUNC_KROW3 (MTK_PIN_NO(104) | 4)
#define MT7623_PIN_105_MSDC1_CMD_FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
#define MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD (MTK_PIN_NO(105) | 1)
@@ -394,7 +478,7 @@
#define MT7623_PIN_121_MSDC0_DAT0_FUNC_WATCHDOG (MTK_PIN_NO(121) | 5)
#define MT7623_PIN_122_GPIO122_FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
-#define MT7623_PIN_122_GPIO122_FUNC_TEST (MTK_PIN_NO(122) | 1)
+#define MT7623_PIN_122_GPIO122_FUNC_CEC (MTK_PIN_NO(122) | 1)
#define MT7623_PIN_122_GPIO122_FUNC_SDA2 (MTK_PIN_NO(122) | 4)
#define MT7623_PIN_122_GPIO122_FUNC_URXD0 (MTK_PIN_NO(122) | 5)
@@ -404,12 +488,12 @@
#define MT7623_PIN_123_HTPLG_FUNC_UTXD0 (MTK_PIN_NO(123) | 5)
#define MT7623_PIN_124_GPIO124_FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
-#define MT7623_PIN_124_GPIO124_FUNC_TEST (MTK_PIN_NO(124) | 1)
+#define MT7623_PIN_124_GPIO124_FUNC_HDMISCK (MTK_PIN_NO(124) | 1)
#define MT7623_PIN_124_GPIO124_FUNC_SDA1 (MTK_PIN_NO(124) | 4)
#define MT7623_PIN_124_GPIO124_FUNC_PWM3 (MTK_PIN_NO(124) | 5)
#define MT7623_PIN_125_GPIO125_FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
-#define MT7623_PIN_125_GPIO125_FUNC_TEST (MTK_PIN_NO(125) | 1)
+#define MT7623_PIN_125_GPIO125_FUNC_HDMISD (MTK_PIN_NO(125) | 1)
#define MT7623_PIN_125_GPIO125_FUNC_SCL1 (MTK_PIN_NO(125) | 4)
#define MT7623_PIN_125_GPIO125_FUNC_PWM4 (MTK_PIN_NO(125) | 5)
diff --git a/include/dt-bindings/reset/axg-aoclkc.h b/include/dt-bindings/reset/axg-aoclkc.h
new file mode 100644
index 000000000000..d342c0b6b2a7
--- /dev/null
+++ b/include/dt-bindings/reset/axg-aoclkc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ */
+
+#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_AXG_AOCLK
+#define DT_BINDINGS_RESET_AMLOGIC_MESON_AXG_AOCLK
+
+#define RESET_AO_REMOTE 0
+#define RESET_AO_I2C_MASTER 1
+#define RESET_AO_I2C_SLAVE 2
+#define RESET_AO_UART1 3
+#define RESET_AO_UART2 4
+#define RESET_AO_IR_BLASTER 5
+
+#endif
diff --git a/include/dt-bindings/reset/mt2701-resets.h b/include/dt-bindings/reset/mt2701-resets.h
index 21deb547cfa4..50b7f066da9a 100644
--- a/include/dt-bindings/reset/mt2701-resets.h
+++ b/include/dt-bindings/reset/mt2701-resets.h
@@ -87,4 +87,7 @@
#define MT2701_ETHSYS_GMAC_RST 23
#define MT2701_ETHSYS_PPE_RST 31
+/* G3DSYS resets */
+#define MT2701_G3DSYS_CORE_RST 0
+
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT2701 */
diff --git a/include/dt-bindings/reset/sun50i-h6-r-ccu.h b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
new file mode 100644
index 000000000000..01c84dba49a4
--- /dev/null
+++ b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_
+#define _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_
+
+#define RST_R_APB1_TIMER 0
+#define RST_R_APB1_TWD 1
+#define RST_R_APB1_PWM 2
+#define RST_R_APB2_UART 3
+#define RST_R_APB2_I2C 4
+#define RST_R_APB1_IR 5
+#define RST_R_APB1_W1 6
+
+#endif /* _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fd0ea6af9e36..4b35a66383f9 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -506,7 +506,8 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004
#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
-#define OSC_PCI_CONTROL_MASKS 0x0000001f
+#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
+#define OSC_PCI_CONTROL_MASKS 0x0000003f
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
@@ -1298,4 +1299,23 @@ static inline int lpit_read_residency_count_address(u64 *address)
}
#endif
+#ifdef CONFIG_ACPI_PPTT
+int find_acpi_cpu_topology(unsigned int cpu, int level);
+int find_acpi_cpu_topology_package(unsigned int cpu);
+int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
+#else
+static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_topology_package(unsigned int cpu)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
+{
+ return -EINVAL;
+}
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 8f87bbeceef4..514bffa11dbb 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -14,6 +14,7 @@
#define AER_NONFATAL 0
#define AER_FATAL 1
#define AER_CORRECTABLE 2
+#define DPC_FATAL 3
struct pci_dev;
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index a031897fca76..ca1d2cc2cdfa 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -80,6 +80,11 @@
ARM_SMCCC_SMC_32, \
0, 0x8000)
+#define ARM_SMCCC_ARCH_WORKAROUND_2 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x7fff)
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
@@ -291,5 +296,10 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
*/
#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+/* Return codes defined in ARM DEN 0070A */
+#define SMCCC_RET_SUCCESS 0
+#define SMCCC_RET_NOT_SUPPORTED -1
+#define SMCCC_RET_NOT_REQUIRED -2
+
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 397a38aca182..f08f5fe7bd08 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -422,6 +422,7 @@ enum {
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
extern void bioset_exit(struct bio_set *);
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
+extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
extern void bio_put(struct bio *);
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 3d9805297cda..70e19bc6cc9f 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -34,9 +34,8 @@ enum cache_type {
* @shared_cpu_map: logical cpumask representing all the cpus sharing
* this cache node
* @attributes: bitfield representing various cache attributes
- * @of_node: if devicetree is used, this represents either the cpu node in
- * case there's no explicit cache node or the cache node itself in the
- * device tree
+ * @fw_token: Unique value used to determine if different cacheinfo
+ * structures represent a single hardware cache instance.
* @disable_sysfs: indicates whether this node is visible to the user via
* sysfs or not
* @priv: pointer to any private data structure specific to particular
@@ -65,8 +64,7 @@ struct cacheinfo {
#define CACHE_ALLOCATE_POLICY_MASK \
(CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
#define CACHE_ID BIT(4)
-
- struct device_node *of_node;
+ void *fw_token;
bool disable_sysfs;
void *priv;
};
@@ -99,6 +97,23 @@ int func(unsigned int cpu) \
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
int init_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
+int cache_setup_acpi(unsigned int cpu);
+#ifndef CONFIG_ACPI_PPTT
+/*
+ * acpi_find_last_cache_level is only called on ACPI enabled
+ * platforms using the PPTT for topology. This means that if
+ * the platform supports other firmware configuration methods
+ * we need to stub out the call when ACPI is disabled.
+ * ACPI enabled platforms not using PPTT won't be making calls
+ * to this function so we need not worry about them.
+ */
+static inline int acpi_find_last_cache_level(unsigned int cpu)
+{
+ return 0;
+}
+#else
+int acpi_find_last_cache_level(unsigned int cpu);
+#endif
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
index b454dfce60d9..4060004968c8 100644
--- a/include/linux/cfag12864b.h
+++ b/include/linux/cfag12864b.h
@@ -1,25 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filename: cfag12864b.h
* Version: 0.1.0
* Description: cfag12864b LCD driver header
- * License: GPLv2
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-12
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _CFAG12864B_H_
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 1d25e149c1c5..b7cfa037e593 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_clk.h>
#ifdef CONFIG_COMMON_CLK
@@ -218,7 +219,7 @@ struct clk_ops {
int (*get_phase)(struct clk_hw *hw);
int (*set_phase)(struct clk_hw *hw, int degrees);
void (*init)(struct clk_hw *hw);
- int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+ void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
};
/**
@@ -805,8 +806,6 @@ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
struct of_device_id;
-typedef void (*of_clk_init_cb_t)(struct device_node *);
-
struct clk_onecell_data {
struct clk **clks;
unsigned int clk_num;
@@ -893,13 +892,10 @@ struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec,
void *data);
-unsigned int of_clk_get_parent_count(struct device_node *np);
int of_clk_parent_fill(struct device_node *np, const char **parents,
unsigned int size);
-const char *of_clk_get_parent_name(struct device_node *np, int index);
int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags);
-void of_clk_init(const struct of_device_id *matches);
#else /* !CONFIG_OF */
@@ -946,26 +942,16 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
{
return ERR_PTR(-ENOENT);
}
-static inline unsigned int of_clk_get_parent_count(struct device_node *np)
-{
- return 0;
-}
static inline int of_clk_parent_fill(struct device_node *np,
const char **parents, unsigned int size)
{
return 0;
}
-static inline const char *of_clk_get_parent_name(struct device_node *np,
- int index)
-{
- return NULL;
-}
static inline int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags)
{
return 0;
}
-static inline void of_clk_init(const struct of_device_id *matches) {}
#endif /* CONFIG_OF */
/*
@@ -999,10 +985,5 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
#endif /* platform dependent I/O accessors */
-#ifdef CONFIG_DEBUG_FS
-struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
- void *data, const struct file_operations *fops);
-#endif
-
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk/davinci.h b/include/linux/clk/davinci.h
new file mode 100644
index 000000000000..8a7b5cd7eac0
--- /dev/null
+++ b/include/linux/clk/davinci.h
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock drivers for TI DaVinci PLL and PSC controllers
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __LINUX_CLK_DAVINCI_PLL_H___
+#define __LINUX_CLK_DAVINCI_PLL_H___
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* function for registering clocks in early boot */
+
+#ifdef CONFIG_ARCH_DAVINCI_DA830
+int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DA850
+int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM355
+int dm355_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+int dm355_psc_init(struct device *dev, void __iomem *base);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM365
+int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+int dm365_psc_init(struct device *dev, void __iomem *base);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM644x
+int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+int dm644x_psc_init(struct device *dev, void __iomem *base);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DM646x
+int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+int dm646x_psc_init(struct device *dev, void __iomem *base);
+#endif
+
+#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index c99692ddd4b5..3855e3800f48 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -20,6 +20,9 @@ struct dax_operations {
/* copy_from_iter: required operation for fs-dax direct-i/o */
size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
struct iov_iter *);
+ /* copy_to_iter: required operation for fs-dax direct-i/o */
+ size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
+ struct iov_iter *);
};
extern struct attribute_group dax_attribute_group;
@@ -83,6 +86,8 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc);
+
+struct page *dax_layout_busy_page(struct address_space *mapping);
#else
static inline bool bdev_dax_supported(struct block_device *bdev,
int blocksize)
@@ -104,6 +109,11 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
return NULL;
}
+static inline struct page *dax_layout_busy_page(struct address_space *mapping)
+{
+ return NULL;
+}
+
static inline int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc)
{
@@ -119,14 +129,16 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
void **kaddr, pfn_t *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
+size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t bytes, struct iov_iter *i);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
-int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
- pfn_t pfn);
+vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size, pfn_t pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 31fef7c34185..6fb0808e87c8 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -133,7 +133,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn);
-typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
+typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i);
#define PAGE_SECTORS (PAGE_SIZE / 512)
@@ -184,7 +184,8 @@ struct target_type {
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access;
- dm_dax_copy_from_iter_fn dax_copy_from_iter;
+ dm_dax_copy_iter_fn dax_copy_from_iter;
+ dm_dax_copy_iter_fn dax_copy_to_iter;
/* For internal device-mapper use. */
struct list_head list;
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
new file mode 100644
index 000000000000..b0115e340fbc
--- /dev/null
+++ b/include/linux/dma/sprd-dma.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _SPRD_DMA_H_
+#define _SPRD_DMA_H_
+
+#define SPRD_DMA_REQ_SHIFT 16
+#define SPRD_DMA_FLAGS(req_mode, int_type) \
+ ((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
+
+/*
+ * enum sprd_dma_req_mode: define the DMA request mode
+ * @SPRD_DMA_FRAG_REQ: fragment request mode
+ * @SPRD_DMA_BLK_REQ: block request mode
+ * @SPRD_DMA_TRANS_REQ: transaction request mode
+ * @SPRD_DMA_LIST_REQ: link-list request mode
+ *
+ * We have 4 types request mode: fragment mode, block mode, transaction mode
+ * and linklist mode. One transaction can contain several blocks, one block can
+ * contain several fragments. Link-list mode means we can save several DMA
+ * configuration into one reserved memory, then DMA can fetch each DMA
+ * configuration automatically to start transfer.
+ */
+enum sprd_dma_req_mode {
+ SPRD_DMA_FRAG_REQ,
+ SPRD_DMA_BLK_REQ,
+ SPRD_DMA_TRANS_REQ,
+ SPRD_DMA_LIST_REQ,
+};
+
+/*
+ * enum sprd_dma_int_type: define the DMA interrupt type
+ * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
+ * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
+ * is done.
+ * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
+ * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
+ * or one block request is done.
+ * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
+ * request is done.
+ * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
+ * transaction request or fragment request is done.
+ * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
+ * transaction request or block request is done.
+ * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
+ * is done.
+ * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
+ * incorrect.
+ */
+enum sprd_dma_int_type {
+ SPRD_DMA_NO_INT,
+ SPRD_DMA_FRAG_INT,
+ SPRD_DMA_BLK_INT,
+ SPRD_DMA_BLK_FRAG_INT,
+ SPRD_DMA_TRANS_INT,
+ SPRD_DMA_TRANS_FRAG_INT,
+ SPRD_DMA_TRANS_BLK_INT,
+ SPRD_DMA_LIST_INT,
+ SPRD_DMA_CFGERR_INT,
+};
+
+#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 77333ed3a488..7207de8c4e9a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -36,6 +36,7 @@
#include <linux/delayed_call.h>
#include <linux/uuid.h>
#include <linux/errseq.h>
+#include <linux/ioprio.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -299,7 +300,8 @@ struct kiocb {
void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
void *private;
int ki_flags;
- enum rw_hint ki_hint;
+ u16 ki_hint;
+ u16 ki_ioprio; /* See linux/ioprio.h */
} __randomize_layout;
static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -1934,12 +1936,22 @@ static inline enum rw_hint file_write_hint(struct file *file)
static inline int iocb_flags(struct file *file);
+static inline u16 ki_hint_validate(enum rw_hint hint)
+{
+ typeof(((struct kiocb *)0)->ki_hint) max_hint = -1;
+
+ if (hint <= max_hint)
+ return hint;
+ return 0;
+}
+
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
.ki_flags = iocb_flags(filp),
- .ki_hint = file_write_hint(filp),
+ .ki_hint = ki_hint_validate(file_write_hint(filp)),
+ .ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0),
};
}
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fc5ab85278d5..a6afcec53795 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -24,6 +24,7 @@ struct vm_area_struct;
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
+#define ___GFP_WRITE 0x100u
#define ___GFP_NOWARN 0x200u
#define ___GFP_RETRY_MAYFAIL 0x400u
#define ___GFP_NOFAIL 0x800u
@@ -36,11 +37,10 @@ struct vm_area_struct;
#define ___GFP_THISNODE 0x40000u
#define ___GFP_ATOMIC 0x80000u
#define ___GFP_ACCOUNT 0x100000u
-#define ___GFP_DIRECT_RECLAIM 0x400000u
-#define ___GFP_WRITE 0x800000u
-#define ___GFP_KSWAPD_RECLAIM 0x1000000u
+#define ___GFP_DIRECT_RECLAIM 0x200000u
+#define ___GFP_KSWAPD_RECLAIM 0x400000u
#ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP 0x2000000u
+#define ___GFP_NOLOCKDEP 0x800000u
#else
#define ___GFP_NOLOCKDEP 0
#endif
@@ -205,7 +205,7 @@ struct vm_area_struct;
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@@ -343,7 +343,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0x1 => DMA or NORMAL
* 0x2 => HIGHMEM or NORMAL
* 0x3 => BAD (DMA+HIGHMEM)
- * 0x4 => DMA32 or DMA or NORMAL
+ * 0x4 => DMA32 or NORMAL
* 0x5 => BAD (DMA+DMA32)
* 0x6 => BAD (HIGHMEM+DMA32)
* 0x7 => BAD (HIGHMEM+DMA32+DMA)
@@ -351,7 +351,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0x9 => DMA or NORMAL (MOVABLE+DMA)
* 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
* 0xb => BAD (MOVABLE+HIGHMEM+DMA)
- * 0xc => DMA32 (MOVABLE+DMA32)
+ * 0xc => DMA32 or NORMAL (MOVABLE+DMA32)
* 0xd => BAD (MOVABLE+DMA32+DMA)
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index dbd065963296..243112c7fa7d 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -116,7 +116,7 @@ int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_value(unsigned int array_size,
+int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
@@ -134,7 +134,7 @@ int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
@@ -369,12 +369,13 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array_value(unsigned int array_size,
+static inline int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
+ return 0;
}
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
@@ -423,12 +424,13 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
+ return 0;
}
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index b2f2dc638463..daa44eac9241 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -39,6 +39,23 @@ struct gpiod_lookup_table {
struct gpiod_lookup table[];
};
+/**
+ * struct gpiod_hog - GPIO line hog table
+ * @chip_label: name of the chip the GPIO belongs to
+ * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
+ * @line_name: consumer name for the hogged line
+ * @lflags: mask of GPIO lookup flags
+ * @dflags: GPIO flags used to specify the direction and value
+ */
+struct gpiod_hog {
+ struct list_head list;
+ const char *chip_label;
+ u16 chip_hwnum;
+ const char *line_name;
+ enum gpio_lookup_flags lflags;
+ int dflags;
+};
+
/*
* Simple definition of a single GPIO under a con_id
*/
@@ -59,10 +76,23 @@ struct gpiod_lookup_table {
.flags = _flags, \
}
+/*
+ * Simple definition of a single GPIO hog in an array.
+ */
+#define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \
+{ \
+ .chip_label = _chip_label, \
+ .chip_hwnum = _chip_hwnum, \
+ .line_name = _line_name, \
+ .lflags = _lflags, \
+ .dflags = _dflags, \
+}
+
#ifdef CONFIG_GPIOLIB
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n);
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_add_hogs(struct gpiod_hog *hogs);
#else
static inline
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
@@ -70,6 +100,7 @@ static inline
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {}
static inline
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
+static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {}
#endif
#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 26240a22978a..41a3d5775394 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -292,9 +292,12 @@ struct hid_item {
#define HID_DG_CONTACTCOUNT 0x000d0054
#define HID_DG_CONTACTMAX 0x000d0055
#define HID_DG_SCANTIME 0x000d0056
+#define HID_DG_SURFACESWITCH 0x000d0057
+#define HID_DG_BUTTONSWITCH 0x000d0058
#define HID_DG_BUTTONTYPE 0x000d0059
#define HID_DG_BARRELSWITCH2 0x000d005a
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
+#define HID_DG_LATENCYMODE 0x000d0060
#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
/*
@@ -341,10 +344,12 @@ struct hid_item {
/* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */
/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
#define HID_QUIRK_ALWAYS_POLL BIT(10)
+#define HID_QUIRK_INPUT_PER_APP BIT(11)
#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16)
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17)
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
+#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
#define HID_QUIRK_NO_IGNORE BIT(30)
@@ -367,6 +372,7 @@ struct hid_item {
#define HID_GROUP_RMI 0x0100
#define HID_GROUP_WACOM 0x0101
#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
+#define HID_GROUP_STEAM 0x0103
/*
* HID protocol status
@@ -463,8 +469,10 @@ struct hid_field {
struct hid_report {
struct list_head list;
- unsigned id; /* id of this report */
- unsigned type; /* report type */
+ struct list_head hidinput_list;
+ unsigned int id; /* id of this report */
+ unsigned int type; /* report type */
+ unsigned int application; /* application usage for this report */
struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */
unsigned maxfield; /* maximum valid field index */
unsigned size; /* size of the report (bits) */
@@ -502,12 +510,15 @@ struct hid_output_fifo {
#define HID_STAT_ADDED BIT(0)
#define HID_STAT_PARSED BIT(1)
+#define HID_STAT_DUP_DETECTED BIT(2)
struct hid_input {
struct list_head list;
struct hid_report *report;
struct input_dev *input;
+ const char *name;
bool registered;
+ struct list_head reports; /* the list of reports */
};
enum hid_type {
@@ -864,7 +875,9 @@ void hid_output_report(struct hid_report *report, __u8 *data);
void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
-struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
+struct hid_report *hid_register_report(struct hid_device *device,
+ unsigned int type, unsigned int id,
+ unsigned int application);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
struct hid_report *hid_validate_values(struct hid_device *hid,
unsigned int type, unsigned int id,
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 2f1327c37a63..4c92e3ba3e16 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -522,9 +522,7 @@ void hmm_devmem_remove(struct hmm_devmem *devmem);
static inline void hmm_devmem_page_set_drvdata(struct page *page,
unsigned long data)
{
- unsigned long *drvdata = (unsigned long *)&page->pgmap;
-
- drvdata[1] = data;
+ page->hmm_data = data;
}
/*
@@ -535,9 +533,7 @@ static inline void hmm_devmem_page_set_drvdata(struct page *page,
*/
static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
{
- const unsigned long *drvdata = (const unsigned long *)&page->pgmap;
-
- return drvdata[1];
+ return page->hmm_data;
}
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 11b5612dc066..3a3012f57be4 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -35,6 +35,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
+#include <linux/reciprocal_div.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
@@ -120,6 +121,7 @@ struct hv_ring_buffer {
struct hv_ring_buffer_info {
struct hv_ring_buffer *ring_buffer;
u32 ring_size; /* Include the shared header */
+ struct reciprocal_value ring_size_div10_reciprocal;
spinlock_t ring_lock;
u32 ring_datasize; /* < ring_size */
@@ -154,6 +156,16 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
return write;
}
+static inline u32 hv_get_avail_to_write_percent(
+ const struct hv_ring_buffer_info *rbi)
+{
+ u32 avail_write = hv_get_bytes_to_write(rbi);
+
+ return reciprocal_divide(
+ (avail_write << 3) + (avail_write << 1),
+ rbi->ring_size_div10_reciprocal);
+}
+
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 1fc7abd28b0b..730ead1a46df 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -127,7 +127,7 @@ void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, _extend_name, _type) \
+ _storagebits, _shift, _extend_name, _type, _mask_all) \
{ \
.type = (_type), \
.differential = (_channel2 == -1 ? 0 : 1), \
@@ -139,7 +139,7 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_OFFSET), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all = _mask_all, \
.scan_index = (_si), \
.scan_type = { \
.sign = 'u', \
@@ -153,25 +153,35 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
_storagebits, _shift) \
__AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE)
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
_storagebits, _shift) \
__AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \
- _storagebits, _shift, "shorted", IIO_VOLTAGE)
+ _storagebits, _shift, "shorted", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \
_storagebits, _shift) \
__AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE)
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD_SD_CHANNEL_NO_SAMP_FREQ(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, 0)
#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
__AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_TEMP)
+ _storagebits, _shift, NULL, IIO_TEMP, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
_shift) \
__AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, "supply", IIO_VOLTAGE)
+ _storagebits, _shift, "supply", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
#endif
diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h
index e7dc7a542a4e..0da298b41737 100644
--- a/include/linux/iio/adc/stm32-dfsdm-adc.h
+++ b/include/linux/iio/adc/stm32-dfsdm-adc.h
@@ -9,6 +9,8 @@
#ifndef STM32_DFSDM_ADC_H
#define STM32_DFSDM_ADC_H
+#include <linux/iio/iio.h>
+
int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
int (*cb)(const void *data, size_t size,
void *private),
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
index 2edf68dc7336..ce16445411ac 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
+++ b/include/linux/iio/common/cros_ec_sensors_core.h
@@ -16,7 +16,9 @@
#ifndef __CROS_EC_SENSORS_CORE_H
#define __CROS_EC_SENSORS_CORE_H
+#include <linux/iio/iio.h>
#include <linux/irqreturn.h>
+#include <linux/mfd/cros_ec.h>
enum {
CROS_EC_SENSOR_X,
@@ -103,6 +105,7 @@ int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask,
int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
s16 *data);
+struct platform_device;
/**
* cros_ec_sensors_core_init() - basic initialization of the core structure
* @pdev: platform device created for the sensors
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 11579fd4126e..a74cb177dc6f 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -183,18 +183,18 @@ struct iio_event_spec {
* @address: Driver specific identifier.
* @scan_index: Monotonic index to give ordering in scans when read
* from a buffer.
- * @scan_type: sign: 's' or 'u' to specify signed or unsigned
- * realbits: Number of valid bits of data
- * storagebits: Realbits + padding
- * shift: Shift right by this before masking out
- * realbits.
- * repeat: Number of times real/storage bits
- * repeats. When the repeat element is
- * more than 1, then the type element in
- * sysfs will show a repeat value.
- * Otherwise, the number of repetitions is
- * omitted.
- * endianness: little or big endian
+ * @scan_type: struct describing the scan type
+ * @scan_type.sign: 's' or 'u' to specify signed or unsigned
+ * @scan_type.realbits: Number of valid bits of data
+ * @scan_type.storagebits: Realbits + padding
+ * @scan_type.shift: Shift right by this before masking out
+ * realbits.
+ * @scan_type.repeat: Number of times real/storage bits repeats.
+ * When the repeat element is more than 1, then
+ * the type element in sysfs will show a repeat
+ * value. Otherwise, the number of repetitions
+ * is omitted.
+ * @scan_type.endianness: little or big endian
* @info_mask_separate: What information is to be exported that is specific to
* this channel.
* @info_mask_separate_available: What availability information is to be
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index ef169d67df92..1df940196ab2 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -121,7 +121,6 @@
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
-#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 627efac73e6d..9e30ed6443db 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -77,4 +77,13 @@ extern int ioprio_best(unsigned short aprio, unsigned short bprio);
extern int set_task_ioprio(struct task_struct *task, int ioprio);
+#ifdef CONFIG_BLOCK
+extern int ioprio_check_cap(int ioprio);
+#else
+static inline int ioprio_check_cap(int ioprio)
+{
+ return -ENOTBLK;
+}
+#endif /* CONFIG_BLOCK */
+
#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b2067083aa94..4bd2f34947f4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -552,7 +552,12 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
-void irq_move_irq(struct irq_data *data);
+void __irq_move_irq(struct irq_data *data);
+static inline void irq_move_irq(struct irq_data *data)
+{
+ if (unlikely(irqd_is_setaffinity_pending(data)))
+ __irq_move_irq(data);
+}
void irq_move_masked_irq(struct irq_data *data);
void irq_force_complete_move(struct irq_desc *desc);
#else
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7aed92624531..d23123238534 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -29,6 +29,7 @@
#define LLONG_MIN (-LLONG_MAX - 1)
#define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0)
+#define PHYS_ADDR_MAX (~(phys_addr_t)0)
#define U8_MAX ((u8)~0U)
#define S8_MAX ((s8)(U8_MAX>>1))
@@ -965,6 +966,22 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
"pointer type mismatch in container_of()"); \
((type *)(__mptr - offsetof(type, member))); })
+/**
+ * container_of_safe - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged.
+ */
+#define container_of_safe(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
+ !__same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
+ ((type *)(__mptr - offsetof(type, member))); })
+
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h
index cb311798e0bc..0738389b42b6 100644
--- a/include/linux/ks0108.h
+++ b/include/linux/ks0108.h
@@ -1,25 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filename: ks0108.h
* Version: 0.1.0
* Description: ks0108 LCD Controller driver header
- * License: GPLv2
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _KS0108_H_
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 44368b19b27e..161e8164abcf 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -37,17 +37,6 @@ static inline void ksm_exit(struct mm_struct *mm)
__ksm_exit(mm);
}
-static inline struct stable_node *page_stable_node(struct page *page)
-{
- return PageKsm(page) ? page_rmapping(page) : NULL;
-}
-
-static inline void set_page_stable_node(struct page *page,
- struct stable_node *stable_node)
-{
- page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
-}
-
/*
* When do_swap_page() first faults in from swap what used to be a KSM page,
* no problem, it will be assigned to this vma's anon_vma; but thereafter,
@@ -89,12 +78,6 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
return page;
}
-static inline int page_referenced_ksm(struct page *page,
- struct mem_cgroup *memcg, unsigned long *vm_flags)
-{
- return 0;
-}
-
static inline void rmap_walk_ksm(struct page *page,
struct rmap_walk_control *rwc)
{
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 9db904344c75..8b8946dd63b9 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1130,6 +1130,8 @@ extern void ata_sas_async_probe(struct ata_port *ap);
extern int ata_sas_sync_probe(struct ata_port *ap);
extern int ata_sas_port_init(struct ata_port *);
extern int ata_sas_port_start(struct ata_port *ap);
+extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
+extern void ata_sas_tport_delete(struct ata_port *ap);
extern void ata_sas_port_stop(struct ata_port *ap);
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 41a1ae010993..2af7f77866d0 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -72,16 +72,13 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
}
/**
- * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * const_ilog2 - log base 2 of 32-bit or a 64-bit constant unsigned value
* @n: parameter
*
- * constant-capable log of base 2 calculation
- * - this can be used to initialise global variables from constant data, hence
- * the massive ternary operator construction
- *
- * selects the appropriately-sized optimised version depending on sizeof(n)
+ * Use this where sparse expects a true constant expression, e.g. for array
+ * indices.
*/
-#define ilog2(n) \
+#define const_ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
(n) < 2 ? 0 : \
@@ -147,10 +144,26 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
- 1 ) : \
- (sizeof(n) <= 4) ? \
- __ilog2_u32(n) : \
- __ilog2_u64(n) \
+ 1) : \
+ -1)
+
+/**
+ * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * @n: parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ __builtin_constant_p(n) ? \
+ const_ilog2(n) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \
)
/**
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d99b71bc2c66..4f52ec755725 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,9 +53,17 @@ enum memcg_memory_event {
MEMCG_HIGH,
MEMCG_MAX,
MEMCG_OOM,
+ MEMCG_SWAP_MAX,
+ MEMCG_SWAP_FAIL,
MEMCG_NR_MEMORY_EVENTS,
};
+enum mem_cgroup_protection {
+ MEMCG_PROT_NONE,
+ MEMCG_PROT_LOW,
+ MEMCG_PROT_MIN,
+};
+
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
int priority;
@@ -158,6 +166,15 @@ enum memcg_kmem_state {
KMEM_ONLINE,
};
+#if defined(CONFIG_SMP)
+struct memcg_padding {
+ char x[0];
+} ____cacheline_internodealigned_in_smp;
+#define MEMCG_PADDING(name) struct memcg_padding name;
+#else
+#define MEMCG_PADDING(name)
+#endif
+
/*
* The memory controller data structure. The memory controller controls both
* page cache and RSS per cgroup. We would eventually like to provide
@@ -179,8 +196,7 @@ struct mem_cgroup {
struct page_counter kmem;
struct page_counter tcpmem;
- /* Normal memory consumption range */
- unsigned long low;
+ /* Upper bound of normal memory consumption range */
unsigned long high;
/* Range enforcement for interrupt charges */
@@ -205,9 +221,11 @@ struct mem_cgroup {
int oom_kill_disable;
/* memory.events */
- atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
struct cgroup_file events_file;
+ /* handle for "memory.swap.events" */
+ struct cgroup_file swap_events_file;
+
/* protect arrays of thresholds */
struct mutex thresholds_lock;
@@ -225,19 +243,26 @@ struct mem_cgroup {
* mem_cgroup ? And what type of charges should we move ?
*/
unsigned long move_charge_at_immigrate;
+ /* taken only while moving_account > 0 */
+ spinlock_t move_lock;
+ unsigned long move_lock_flags;
+
+ MEMCG_PADDING(_pad1_);
+
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
*/
atomic_t moving_account;
- /* taken only while moving_account > 0 */
- spinlock_t move_lock;
struct task_struct *move_lock_task;
- unsigned long move_lock_flags;
/* memory.stat */
struct mem_cgroup_stat_cpu __percpu *stat_cpu;
+
+ MEMCG_PADDING(_pad2_);
+
atomic_long_t stat[MEMCG_NR_STAT];
atomic_long_t events[NR_VM_EVENT_ITEMS];
+ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
unsigned long socket_pressure;
@@ -285,7 +310,8 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
+enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
+ struct mem_cgroup *memcg);
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp,
@@ -462,7 +488,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
void mem_cgroup_handle_over_high(void);
-unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
+unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
@@ -730,10 +756,10 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
{
}
-static inline bool mem_cgroup_low(struct mem_cgroup *root,
- struct mem_cgroup *memcg)
+static inline enum mem_cgroup_protection mem_cgroup_protected(
+ struct mem_cgroup *root, struct mem_cgroup *memcg)
{
- return false;
+ return MEMCG_PROT_NONE;
}
static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
@@ -853,7 +879,7 @@ mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
return 0;
}
-static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{
return 0;
}
@@ -1093,7 +1119,6 @@ static inline void dec_lruvec_page_state(struct page *page,
#ifdef CONFIG_CGROUP_WRITEBACK
-struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
unsigned long *pheadroom, unsigned long *pdirty,
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
new file mode 100644
index 000000000000..4f1600413f91
--- /dev/null
+++ b/include/linux/memfd.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MEMFD_H
+#define __LINUX_MEMFD_H
+
+#include <linux/file.h>
+
+#ifdef CONFIG_MEMFD_CREATE
+extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
+#else
+static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 2b0265265c28..4e9828cda7a2 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -107,7 +107,6 @@ static inline bool movable_node_is_enabled(void)
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_pageblock_removable_nolock(struct page *page);
extern int arch_remove_memory(u64 start, u64 size,
struct vmem_altmap *altmap);
extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 74ea5e2310a8..f91f9e763557 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MEMREMAP_H_
#define _LINUX_MEMREMAP_H_
-#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/percpu-refcount.h>
@@ -30,13 +29,6 @@ struct vmem_altmap {
* Specialize ZONE_DEVICE memory into multiple types each having differents
* usage.
*
- * MEMORY_DEVICE_HOST:
- * Persistent device memory (pmem): struct page might be allocated in different
- * memory and architecture might want to perform special actions. It is similar
- * to regular memory, in that the CPU can access it transparently. However,
- * it is likely to have different bandwidth and latency than regular memory.
- * See Documentation/nvdimm/nvdimm.txt for more information.
- *
* MEMORY_DEVICE_PRIVATE:
* Device memory that is not directly addressable by the CPU: CPU can neither
* read nor write private memory. In this case, we do still have struct pages
@@ -53,11 +45,19 @@ struct vmem_altmap {
* driver can hotplug the device memory using ZONE_DEVICE and with that memory
* type. Any page of a process can be migrated to such memory. However no one
* should be allow to pin such memory so that it can always be evicted.
+ *
+ * MEMORY_DEVICE_FS_DAX:
+ * Host memory that has similar access semantics as System RAM i.e. DMA
+ * coherent and supports page pinning. In support of coordinating page
+ * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a
+ * wakeup event whenever a page is unpinned and becomes idle. This
+ * wakeup is used to coordinate physical address space management (ex:
+ * fs truncate/hole punch) vs pinned pages (ex: device dma).
*/
enum memory_type {
- MEMORY_DEVICE_HOST = 0,
- MEMORY_DEVICE_PRIVATE,
+ MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_PUBLIC,
+ MEMORY_DEVICE_FS_DAX,
};
/*
@@ -129,8 +129,6 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
-
-static inline bool is_zone_device_page(const struct page *page);
#else
static inline void *devm_memremap_pages(struct device *dev,
struct dev_pagemap *pgmap)
@@ -161,20 +159,6 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap,
}
#endif /* CONFIG_ZONE_DEVICE */
-#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
-static inline bool is_device_private_page(const struct page *page)
-{
- return is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PRIVATE;
-}
-
-static inline bool is_device_public_page(const struct page *page)
-{
- return is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PUBLIC;
-}
-#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
-
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 44412c9d26e1..aa09414756db 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -271,7 +271,6 @@ struct abx500_bm_data {
bool autopower_cfg;
bool ac_enabled;
bool usb_enabled;
- bool usb_power_path;
bool no_maintenance;
bool capacity_scaling;
bool chg_unknown_bat;
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
index e63681eb6c62..c06daf3d490a 100644
--- a/include/linux/mfd/abx500/ab8500-bm.h
+++ b/include/linux/mfd/abx500/ab8500-bm.h
@@ -248,8 +248,6 @@ enum bup_vch_sel {
#define BAT_CTRL_20U_ENA 0x02
#define BAT_CTRL_18U_ENA 0x01
#define BAT_CTRL_16U_ENA 0x02
-#define BAT_CTRL_60U_ENA 0x01
-#define BAT_CTRL_120U_ENA 0x02
#define BAT_CTRL_CMP_ENA 0x04
#define FORCE_BAT_CTRL_CMP_HIGH 0x08
#define BAT_CTRL_PULL_UP_ENA 0x10
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
index 67703f23e7ba..669894f434f5 100644
--- a/include/linux/mfd/abx500/ux500_chargalg.h
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -25,8 +25,6 @@ struct ux500_charger_ops {
int (*check_enable) (struct ux500_charger *, int, int);
int (*kick_wd) (struct ux500_charger *);
int (*update_curr) (struct ux500_charger *, int);
- int (*pp_enable) (struct ux500_charger *, bool);
- int (*pre_chg_enable) (struct ux500_charger *, bool);
};
/**
@@ -37,7 +35,6 @@ struct ux500_charger_ops {
* @max_out_curr maximum output charger current in mA
* @enabled indicates if this charger is used or not
* @external external charger unit (pm2xxx)
- * @power_path USB power path support
*/
struct ux500_charger {
struct power_supply *psy;
@@ -47,7 +44,6 @@ struct ux500_charger {
int wdt_refresh;
bool enabled;
bool external;
- bool power_path;
};
extern struct blocking_notifier_head charger_notifier_list;
diff --git a/include/linux/mfd/bd9571mwv.h b/include/linux/mfd/bd9571mwv.h
index f0708ba4cbba..eb05569f752b 100644
--- a/include/linux/mfd/bd9571mwv.h
+++ b/include/linux/mfd/bd9571mwv.h
@@ -33,6 +33,11 @@
#define BD9571MWV_I2C_MD2_E1_BIT_2 0x12
#define BD9571MWV_BKUP_MODE_CNT 0x20
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_MASK GENMASK(3, 0)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR0 BIT(0)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR1 BIT(1)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR0C BIT(2)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR1C BIT(3)
#define BD9571MWV_BKUP_MODE_STATUS 0x21
#define BD9571MWV_BKUP_RECOVERY_CNT 0x22
#define BD9571MWV_BKUP_CTRL_TIM_CNT 0x23
diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h
index 786bf6679a28..2010e0de3e34 100644
--- a/include/linux/mfd/lp8788.h
+++ b/include/linux/mfd/lp8788.h
@@ -182,20 +182,6 @@ struct lp8788_buck2_dvs {
};
/*
- * struct lp8788_ldo_enable_pin
- *
- * Basically, all LDOs are enabled through the I2C commands.
- * But ALDO 1 ~ 5, 7, DLDO 7, 9, 11 can be enabled by external gpio pins.
- *
- * @gpio : gpio number which is used for enabling ldos
- * @init_state : initial gpio state (ex. GPIOF_OUT_INIT_LOW)
- */
-struct lp8788_ldo_enable_pin {
- int gpio;
- int init_state;
-};
-
-/*
* struct lp8788_chg_param
* @addr : charging control register address (range : 0x11 ~ 0x1C)
* @val : charging parameter value
@@ -288,7 +274,6 @@ struct lp8788_vib_platform_data {
* @aldo_data : regulator initial data for analog ldo
* @buck1_dvs : gpio configurations for buck1 dvs
* @buck2_dvs : gpio configurations for buck2 dvs
- * @ldo_pin : gpio configurations for enabling LDOs
* @chg_pdata : platform data for charger driver
* @alarm_sel : rtc alarm selection (1 or 2)
* @bl_pdata : configurable data for backlight driver
@@ -306,7 +291,6 @@ struct lp8788_platform_data {
struct regulator_init_data *aldo_data[LP8788_NUM_ALDOS];
struct lp8788_buck1_dvs *buck1_dvs;
struct lp8788_buck2_dvs *buck2_dvs;
- struct lp8788_ldo_enable_pin *ldo_pin[EN_LDOS_MAX];
/* charger */
struct lp8788_charger_platform_data *chg_pdata;
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 5a23dd4df432..28f4ae76271d 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -39,6 +39,8 @@
#define STEP_12_5_MV 12500
#define STEP_6_25_MV 6250
+struct gpio_desc;
+
enum sec_device_type {
S5M8751X,
S5M8763X,
@@ -151,7 +153,7 @@ struct sec_regulator_data {
int id;
struct regulator_init_data *initdata;
struct device_node *reg_node;
- int ext_control_gpio;
+ struct gpio_desc *ext_control_gpiod;
};
/*
diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h
index 67d144b3b8f9..f05bf4a146e2 100644
--- a/include/linux/mfd/tps65090.h
+++ b/include/linux/mfd/tps65090.h
@@ -83,6 +83,8 @@ enum {
#define TPS65090_MAX_REG TPS65090_REG_AD_OUT2
#define TPS65090_NUM_REGS (TPS65090_MAX_REG + 1)
+struct gpio_desc;
+
struct tps65090 {
struct device *dev;
struct regmap *rmap;
@@ -95,8 +97,8 @@ struct tps65090 {
* @reg_init_data: The regulator init data.
* @enable_ext_control: Enable extrenal control or not. Only available for
* DCDC1, DCDC2 and DCDC3.
- * @gpio: Gpio number if external control is enabled and controlled through
- * gpio.
+ * @gpiod: Gpio descriptor if external control is enabled and controlled through
+ * gpio
* @overcurrent_wait_valid: True if the overcurrent_wait should be applied.
* @overcurrent_wait: Value to set as the overcurrent wait time. This is the
* actual bitfield value, not a time in ms (valid value are 0 - 3).
@@ -104,7 +106,7 @@ struct tps65090 {
struct tps65090_regulator_plat_data {
struct regulator_init_data *reg_init_data;
bool enable_ext_control;
- int gpio;
+ struct gpio_desc *gpiod;
bool overcurrent_wait_valid;
int overcurrent_wait;
};
diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h
index 2fe68e481230..b19c2801a30e 100644
--- a/include/linux/mfd/tps6586x.h
+++ b/include/linux/mfd/tps6586x.h
@@ -18,6 +18,7 @@
#define TPS658621A 0x15
#define TPS658621CD 0x2c
#define TPS658623 0x1b
+#define TPS658624 0x0a
#define TPS658640 0x01
#define TPS658640v2 0x02
#define TPS658643 0x03
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0f006cf8343d..02f72ebf31a7 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1002,6 +1002,18 @@ enum mlx5_wol_mode {
MLX5_WOL_PHY_ACTIVITY = 1 << 7,
};
+enum mlx5_mpls_supported_fields {
+ MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0,
+ MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1,
+ MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2,
+ MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3
+};
+
+enum mlx5_flex_parser_protos {
+ MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
+ MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
+};
+
/* MLX5 DEV CAPs */
/* TODO: EAT.ME */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 9f4d32e41c06..757b4a30281e 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -160,6 +160,7 @@ struct mlx5_flow_act {
u32 modify_id;
uintptr_t esp_id;
struct mlx5_fs_vlan vlan;
+ struct ib_counters *counters;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
@@ -186,6 +187,9 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
+int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
+ u64 *packets, u64 *bytes);
+
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 8e0b8865f91e..27134c4fcb76 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -299,9 +299,15 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_dport[0x1];
u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9];
- u8 reserved_at_40[0x17];
+
+ u8 reserved_at_40[0x5];
+ u8 outer_first_mpls_over_udp[0x4];
+ u8 outer_first_mpls_over_gre[0x4];
+ u8 inner_first_mpls[0x4];
+ u8 outer_first_mpls[0x4];
+ u8 reserved_at_55[0x2];
u8 outer_esp_spi[0x1];
- u8 reserved_at_58[0x2];
+ u8 reserved_at_58[0x2];
u8 bth_dst_qp[0x1];
u8 reserved_at_5b[0x25];
@@ -435,6 +441,29 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_1a0[0x60];
};
+struct mlx5_ifc_fte_match_mpls_bits {
+ u8 mpls_label[0x14];
+ u8 mpls_exp[0x3];
+ u8 mpls_s_bos[0x1];
+ u8 mpls_ttl[0x8];
+};
+
+struct mlx5_ifc_fte_match_set_misc2_bits {
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
+
+ struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
+
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
+
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
+
+ u8 reserved_at_80[0x100];
+
+ u8 metadata_reg_a[0x20];
+
+ u8 reserved_at_1a0[0x60];
+};
+
struct mlx5_ifc_cmd_pas_bits {
u8 pa_h[0x20];
@@ -1097,9 +1126,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_500[0x20];
u8 num_of_uars_per_page[0x20];
- u8 reserved_at_540[0x40];
- u8 reserved_at_580[0x3d];
+ u8 flex_parser_protocols[0x20];
+ u8 reserved_at_560[0x20];
+
+ u8 reserved_at_580[0x3c];
+ u8 mini_cqe_resp_stride_index[0x1];
u8 cqe_128_always[0x1];
u8 cqe_compression_128[0x1];
u8 cqe_compression[0x1];
@@ -1159,7 +1191,9 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
- u8 reserved_at_600[0xa00];
+ struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
+
+ u8 reserved_at_800[0x800];
};
enum {
@@ -4568,6 +4602,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -6958,9 +6993,10 @@ struct mlx5_ifc_create_flow_group_out_bits {
};
enum {
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
};
struct mlx5_ifc_create_flow_group_in_bits {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0495e6f97fae..0e493884e6e1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -386,7 +386,7 @@ enum page_entry_size {
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
- * to the functions called when a no-page or a wp-page exception occurs.
+ * to the functions called when a no-page or a wp-page exception occurs.
*/
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
@@ -830,27 +830,65 @@ static inline bool is_zone_device_page(const struct page *page)
}
#endif
-#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
-void put_zone_device_private_or_public_page(struct page *page);
-DECLARE_STATIC_KEY_FALSE(device_private_key);
-#define IS_HMM_ENABLED static_branch_unlikely(&device_private_key)
-static inline bool is_device_private_page(const struct page *page);
-static inline bool is_device_public_page(const struct page *page);
-#else /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
-static inline void put_zone_device_private_or_public_page(struct page *page)
+#ifdef CONFIG_DEV_PAGEMAP_OPS
+void dev_pagemap_get_ops(void);
+void dev_pagemap_put_ops(void);
+void __put_devmap_managed_page(struct page *page);
+DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
+static inline bool put_devmap_managed_page(struct page *page)
{
+ if (!static_branch_unlikely(&devmap_managed_key))
+ return false;
+ if (!is_zone_device_page(page))
+ return false;
+ switch (page->pgmap->type) {
+ case MEMORY_DEVICE_PRIVATE:
+ case MEMORY_DEVICE_PUBLIC:
+ case MEMORY_DEVICE_FS_DAX:
+ __put_devmap_managed_page(page);
+ return true;
+ default:
+ break;
+ }
+ return false;
}
-#define IS_HMM_ENABLED 0
+
static inline bool is_device_private_page(const struct page *page)
{
- return false;
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PRIVATE;
}
+
static inline bool is_device_public_page(const struct page *page)
{
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PUBLIC;
+}
+
+#else /* CONFIG_DEV_PAGEMAP_OPS */
+static inline void dev_pagemap_get_ops(void)
+{
+}
+
+static inline void dev_pagemap_put_ops(void)
+{
+}
+
+static inline bool put_devmap_managed_page(struct page *page)
+{
return false;
}
-#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
+static inline bool is_device_private_page(const struct page *page)
+{
+ return false;
+}
+
+static inline bool is_device_public_page(const struct page *page)
+{
+ return false;
+}
+#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline void get_page(struct page *page)
{
@@ -868,16 +906,13 @@ static inline void put_page(struct page *page)
page = compound_head(page);
/*
- * For private device pages we need to catch refcount transition from
- * 2 to 1, when refcount reach one it means the private device page is
- * free and we need to inform the device driver through callback. See
+ * For devmap managed pages we need to catch refcount transition from
+ * 2 to 1, when refcount reach one it means the page is free and we
+ * need to inform the device driver through callback. See
* include/linux/memremap.h and HMM for details.
*/
- if (IS_HMM_ENABLED && unlikely(is_device_private_page(page) ||
- unlikely(is_device_public_page(page)))) {
- put_zone_device_private_or_public_page(page);
+ if (put_devmap_managed_page(page))
return;
- }
if (put_page_testzero(page))
__put_page(page);
@@ -1276,10 +1311,10 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
-int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
- unsigned long size);
+void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
- unsigned long size);
+ unsigned long size);
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long start, unsigned long end);
@@ -1851,6 +1886,7 @@ static inline bool pgtable_page_ctor(struct page *page)
{
if (!ptlock_init(page))
return false;
+ __SetPageTable(page);
inc_zone_page_state(page, NR_PAGETABLE);
return true;
}
@@ -1858,6 +1894,7 @@ static inline bool pgtable_page_ctor(struct page *page)
static inline void pgtable_page_dtor(struct page *page)
{
pte_lock_deinit(page);
+ __ClearPageTable(page);
dec_zone_page_state(page, NR_PAGETABLE);
}
@@ -2303,10 +2340,10 @@ extern void truncate_inode_pages_range(struct address_space *,
extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
-extern int filemap_fault(struct vm_fault *vmf);
+extern vm_fault_t filemap_fault(struct vm_fault *vmf);
extern void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
-extern int filemap_page_mkwrite(struct vm_fault *vmf);
+extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
/* mm/page-writeback.c */
int __must_check write_one_page(struct page *page);
@@ -2431,8 +2468,8 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
-int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn);
+vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
+ unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
@@ -2530,12 +2567,10 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
#ifdef CONFIG_PAGE_POISONING
extern bool page_poisoning_enabled(void);
extern void kernel_poison_pages(struct page *page, int numpages, int enable);
-extern bool page_is_poisoned(struct page *page);
#else
static inline bool page_poisoning_enabled(void) { return false; }
static inline void kernel_poison_pages(struct page *page, int numpages,
int enable) { }
-static inline bool page_is_poisoned(struct page *page) { return false; }
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 21612347d311..99ce070e7dcb 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -33,29 +33,27 @@ struct hmm;
* it to keep track of whatever it is we are using the page for at the
* moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us
- * who is mapping it. If you allocate the page using alloc_pages(), you
- * can use some of the space in struct page for your own purposes.
+ * who is mapping it.
*
- * Pages that were once in the page cache may be found under the RCU lock
- * even after they have been recycled to a different purpose. The page
- * cache reads and writes some of the fields in struct page to pin the
- * page before checking that it's still in the page cache. It is vital
- * that all users of struct page:
- * 1. Use the first word as PageFlags.
- * 2. Clear or preserve bit 0 of page->compound_head. It is used as
- * PageTail for compound pages, and the page cache must not see false
- * positives. Some users put a pointer here (guaranteed to be at least
- * 4-byte aligned), other users avoid using the field altogether.
- * 3. page->_refcount must either not be used, or must be used in such a
- * way that other CPUs temporarily incrementing and then decrementing the
- * refcount does not cause problems. On receiving the page from
- * alloc_pages(), the refcount will be positive.
- * 4. Either preserve page->_mapcount or restore it to -1 before freeing it.
+ * If you allocate the page using alloc_pages(), you can use some of the
+ * space in struct page for your own purposes. The five words in the main
+ * union are available, except for bit 0 of the first word which must be
+ * kept clear. Many users use this word to store a pointer to an object
+ * which is guaranteed to be aligned. If you use the same storage as
+ * page->mapping, you must restore it to NULL before freeing the page.
*
- * If you allocate pages of order > 0, you can use the fields in the struct
- * page associated with each page, but bear in mind that the pages may have
- * been inserted individually into the page cache, so you must use the above
- * four fields in a compatible way for each struct page.
+ * If your page will not be mapped to userspace, you can also use the four
+ * bytes in the mapcount union, but you must call page_mapcount_reset()
+ * before freeing it.
+ *
+ * If you want to use the refcount field, it must be used in such a way
+ * that other CPUs temporarily incrementing and then decrementing the
+ * refcount does not cause problems. On receiving the page from
+ * alloc_pages(), the refcount will be positive.
+ *
+ * If you allocate pages of order > 0, you can use some of the fields
+ * in each subpage, but you may need to restore some of their values
+ * afterwards.
*
* SLUB uses cmpxchg_double() to atomically update its freelist and
* counters. That requires that freelist & counters be adjacent and
@@ -65,135 +63,122 @@ struct hmm;
*/
#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
-#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
-#define _slub_counter_t unsigned long
#else
-#define _slub_counter_t unsigned int
-#endif
-#else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
#define _struct_page_alignment
-#define _slub_counter_t unsigned int
-#endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
+#endif
struct page {
- /* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
- union {
- /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
- struct address_space *mapping;
-
- void *s_mem; /* slab first object */
- atomic_t compound_mapcount; /* first tail page */
- /* page_deferred_list().next -- second tail page */
- };
-
- /* Second double word */
- union {
- pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* sl[aou]b first free object */
- /* page_deferred_list().prev -- second tail page */
- };
-
- union {
- _slub_counter_t counters;
- unsigned int active; /* SLAB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
- int units; /* SLOB */
-
- struct { /* Page cache */
- /*
- * Count of ptes mapped in mms, to show when
- * page is mapped & limit reverse map searches.
- *
- * Extra information about page type may be
- * stored here for pages that are never mapped,
- * in which case the value MUST BE <= -2.
- * See page-flags.h for more details.
- */
- atomic_t _mapcount;
-
- /*
- * Usage count, *USE WRAPPER FUNCTION* when manual
- * accounting. See page_ref.h
- */
- atomic_t _refcount;
- };
- };
-
/*
- * WARNING: bit 0 of the first word encode PageTail(). That means
- * the rest users of the storage space MUST NOT use the bit to
+ * Five words (20/40 bytes) are available in this union.
+ * WARNING: bit 0 of the first word is used for PageTail(). That
+ * means the other users of this union MUST NOT use the bit to
* avoid collision and false-positive PageTail().
*/
union {
- struct list_head lru; /* Pageout list, eg. active_list
- * protected by zone_lru_lock !
- * Can be used as a generic list
- * by the page owner.
- */
- struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
- * lru or handled by a slab
- * allocator, this points to the
- * hosting device page map.
- */
- struct { /* slub per cpu partial pages */
- struct page *next; /* Next partial slab */
+ struct { /* Page cache and anonymous pages */
+ /**
+ * @lru: Pageout list, eg. active_list protected by
+ * zone_lru_lock. Sometimes used as a generic list
+ * by the page owner.
+ */
+ struct list_head lru;
+ /* See page-flags.h for PAGE_MAPPING_FLAGS */
+ struct address_space *mapping;
+ pgoff_t index; /* Our offset within mapping. */
+ /**
+ * @private: Mapping-private opaque data.
+ * Usually used for buffer_heads if PagePrivate.
+ * Used for swp_entry_t if PageSwapCache.
+ * Indicates order in the buddy system if PageBuddy.
+ */
+ unsigned long private;
+ };
+ struct { /* slab, slob and slub */
+ union {
+ struct list_head slab_list; /* uses lru */
+ struct { /* Partial pages */
+ struct page *next;
#ifdef CONFIG_64BIT
- int pages; /* Nr of partial slabs left */
- int pobjects; /* Approximate # of objects */
+ int pages; /* Nr of pages left */
+ int pobjects; /* Approximate count */
#else
- short int pages;
- short int pobjects;
+ short int pages;
+ short int pobjects;
#endif
+ };
+ };
+ struct kmem_cache *slab_cache; /* not slob */
+ /* Double-word boundary */
+ void *freelist; /* first free object */
+ union {
+ void *s_mem; /* slab: first object */
+ unsigned long counters; /* SLUB */
+ struct { /* SLUB */
+ unsigned inuse:16;
+ unsigned objects:15;
+ unsigned frozen:1;
+ };
+ };
};
-
- struct rcu_head rcu_head; /* Used by SLAB
- * when destroying via RCU
- */
- /* Tail pages of compound page */
- struct {
- unsigned long compound_head; /* If bit zero is set */
+ struct { /* Tail pages of compound page */
+ unsigned long compound_head; /* Bit zero is set */
/* First tail page only */
unsigned char compound_dtor;
unsigned char compound_order;
- /* two/six bytes available here */
+ atomic_t compound_mapcount;
};
-
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
- struct {
- unsigned long __pad; /* do not overlay pmd_huge_pte
- * with compound_head to avoid
- * possible bit 0 collision.
- */
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ struct { /* Second tail page of compound page */
+ unsigned long _compound_pad_1; /* compound_head */
+ unsigned long _compound_pad_2;
+ struct list_head deferred_list;
};
+ struct { /* Page table pages */
+ unsigned long _pt_pad_1; /* compound_head */
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ unsigned long _pt_pad_2; /* mapping */
+ struct mm_struct *pt_mm; /* x86 pgds only */
+#if ALLOC_SPLIT_PTLOCKS
+ spinlock_t *ptl;
+#else
+ spinlock_t ptl;
#endif
+ };
+ struct { /* ZONE_DEVICE pages */
+ /** @pgmap: Points to the hosting device page map. */
+ struct dev_pagemap *pgmap;
+ unsigned long hmm_data;
+ unsigned long _zd_pad_1; /* uses mapping */
+ };
+
+ /** @rcu_head: You can use this to free a page by RCU. */
+ struct rcu_head rcu_head;
};
- union {
+ union { /* This union is 4 bytes in size. */
/*
- * Mapping-private opaque data:
- * Usually used for buffer_heads if PagePrivate
- * Used for swp_entry_t if PageSwapCache
- * Indicates order in the buddy system if PageBuddy
+ * If the page can be mapped to userspace, encodes the number
+ * of times this page is referenced by a page table.
*/
- unsigned long private;
-#if USE_SPLIT_PTE_PTLOCKS
-#if ALLOC_SPLIT_PTLOCKS
- spinlock_t *ptl;
-#else
- spinlock_t ptl;
-#endif
-#endif
- struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
+ atomic_t _mapcount;
+
+ /*
+ * If the page is neither PageSlab nor mappable to userspace,
+ * the value stored here may help determine what this page
+ * is used for. See page-flags.h for a list of page types
+ * which are currently stored here.
+ */
+ unsigned int page_type;
+
+ unsigned int active; /* SLAB */
+ int units; /* SLOB */
};
+ /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
+ atomic_t _refcount;
+
#ifdef CONFIG_MEMCG
struct mem_cgroup *mem_cgroup;
#endif
@@ -413,6 +398,8 @@ struct mm_struct {
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
+
+ spinlock_t arg_lock; /* protect the below fields */
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
@@ -627,9 +614,9 @@ struct vm_special_mapping {
* If non-NULL, then this is called to resolve page faults
* on the special mapping. If used, .pages is not checked.
*/
- int (*fault)(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma,
- struct vm_fault *vmf);
+ vm_fault_t (*fault)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf);
int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 1cc5ffb769af..7cd1473c64a4 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -53,93 +53,32 @@ struct gcry_mpi {
typedef struct gcry_mpi *MPI;
#define mpi_get_nlimbs(a) ((a)->nlimbs)
-#define mpi_is_neg(a) ((a)->sign)
/*-- mpiutil.c --*/
MPI mpi_alloc(unsigned nlimbs);
-MPI mpi_alloc_secure(unsigned nlimbs);
-MPI mpi_alloc_like(MPI a);
void mpi_free(MPI a);
int mpi_resize(MPI a, unsigned nlimbs);
-int mpi_copy(MPI *copy, const MPI a);
-void mpi_clear(MPI a);
-int mpi_set(MPI w, MPI u);
-int mpi_set_ui(MPI w, ulong u);
-MPI mpi_alloc_set_ui(unsigned long u);
-void mpi_m_check(MPI a);
-void mpi_swap(MPI a, MPI b);
/*-- mpicoder.c --*/
-MPI do_encode_md(const void *sha_buffer, unsigned nbits);
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
-int mpi_fromstr(MPI val, const char *str);
-u32 mpi_get_keyid(MPI a, u32 *keyid);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
-void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
-#define log_mpidump g10_log_mpidump
-
-/*-- mpi-add.c --*/
-int mpi_add_ui(MPI w, MPI u, ulong v);
-int mpi_add(MPI w, MPI u, MPI v);
-int mpi_addm(MPI w, MPI u, MPI v, MPI m);
-int mpi_sub_ui(MPI w, MPI u, ulong v);
-int mpi_sub(MPI w, MPI u, MPI v);
-int mpi_subm(MPI w, MPI u, MPI v, MPI m);
-
-/*-- mpi-mul.c --*/
-int mpi_mul_ui(MPI w, MPI u, ulong v);
-int mpi_mul_2exp(MPI w, MPI u, ulong cnt);
-int mpi_mul(MPI w, MPI u, MPI v);
-int mpi_mulm(MPI w, MPI u, MPI v, MPI m);
-
-/*-- mpi-div.c --*/
-ulong mpi_fdiv_r_ui(MPI rem, MPI dividend, ulong divisor);
-int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
-int mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
-int mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor);
-int mpi_tdiv_r(MPI rem, MPI num, MPI den);
-int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
-int mpi_tdiv_q_2exp(MPI w, MPI u, unsigned count);
-int mpi_divisible_ui(const MPI dividend, ulong divisor);
-
-/*-- mpi-gcd.c --*/
-int mpi_gcd(MPI g, const MPI a, const MPI b);
-
/*-- mpi-pow.c --*/
-int mpi_pow(MPI w, MPI u, MPI v);
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
-/*-- mpi-mpow.c --*/
-int mpi_mulpowm(MPI res, MPI *basearray, MPI *exparray, MPI mod);
-
/*-- mpi-cmp.c --*/
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
-/*-- mpi-scan.c --*/
-int mpi_getbyte(MPI a, unsigned idx);
-void mpi_putbyte(MPI a, unsigned idx, int value);
-unsigned mpi_trailing_zeros(MPI a);
-
/*-- mpi-bit.c --*/
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
-int mpi_test_bit(MPI a, unsigned n);
-int mpi_set_bit(MPI a, unsigned n);
-int mpi_set_highbit(MPI a, unsigned n);
-void mpi_clear_highbit(MPI a, unsigned n);
-void mpi_clear_bit(MPI a, unsigned n);
-int mpi_rshift(MPI x, MPI a, unsigned n);
-
-/*-- mpi-inv.c --*/
-int mpi_invm(MPI x, MPI u, MPI v);
/* inline functions */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 792ea5c26329..abe975c87b90 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -86,6 +86,7 @@ struct nand_pos {
* @ooboffs: the OOB offset within the page
* @ooblen: the number of OOB bytes to read from/write to this page
* @oobbuf: buffer to store OOB data in or get OOB data from
+ * @mode: one of the %MTD_OPS_XXX mode
*
* This object is used to pass per-page I/O requests to NAND sub-layers. This
* way all useful information are already formatted in a useful way and
@@ -106,6 +107,7 @@ struct nand_page_io_req {
const void *out;
void *in;
} oobbuf;
+ int mode;
};
/**
@@ -599,6 +601,7 @@ static inline void nanddev_io_iter_init(struct nand_device *nand,
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
+ iter->req.mode = req->mode;
iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
iter->req.ooboffs = req->ooboffs;
iter->oobbytes_per_page = mtd_oobavail(mtd, req);
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 17c919436f48..3e8ec3b8a39c 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -28,7 +28,14 @@ struct nand_flash_dev;
struct device_node;
/* Scan and identify a NAND device */
-int nand_scan(struct mtd_info *mtd, int max_chips);
+int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
+ struct nand_flash_dev *ids);
+
+static inline int nand_scan(struct mtd_info *mtd, int max_chips)
+{
+ return nand_scan_with_ids(mtd, max_chips, NULL);
+}
+
/*
* Separate phases of nand_scan(), allowing board driver to intervene
* and override command or ECC setup according to flash type.
@@ -740,8 +747,9 @@ enum nand_data_interface_type {
/**
* struct nand_data_interface - NAND interface timing
- * @type: type of the timing
- * @timings: The timing, type according to @type
+ * @type: type of the timing
+ * @timings: The timing, type according to @type
+ * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
*/
struct nand_data_interface {
enum nand_data_interface_type type;
@@ -798,8 +806,9 @@ struct nand_op_addr_instr {
/**
* struct nand_op_data_instr - Definition of a data instruction
* @len: number of data bytes to move
- * @in: buffer to fill when reading from the NAND chip
- * @out: buffer to read from when writing to the NAND chip
+ * @buf: buffer to fill
+ * @buf.in: buffer to fill when reading from the NAND chip
+ * @buf.out: buffer to read from when writing to the NAND chip
* @force_8bit: force 8-bit access
*
* Please note that "in" and "out" are inverted from the ONFI specification
@@ -842,9 +851,13 @@ enum nand_op_instr_type {
/**
* struct nand_op_instr - Instruction object
* @type: the instruction type
- * @cmd/@addr/@data/@waitrdy: extra data associated to the instruction.
- * You'll have to use the appropriate element
- * depending on @type
+ * @ctx: extra data associated to the instruction. You'll have to use the
+ * appropriate element depending on @type
+ * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR
+ * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR
+ * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR
+ * or %NAND_OP_DATA_OUT_INSTR
+ * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR
* @delay_ns: delay the controller should apply after the instruction has been
* issued on the bus. Most modern controllers have internal timings
* control logic, and in this case, the controller driver can ignore
@@ -1003,7 +1016,9 @@ struct nand_op_parser_data_constraints {
* struct nand_op_parser_pattern_elem - One element of a pattern
* @type: the instructuction type
* @optional: whether this element of the pattern is optional or mandatory
- * @addr/@data: address or data constraint (number of cycles or data length)
+ * @ctx: address or data constraint
+ * @ctx.addr: address constraint (number of cycles)
+ * @ctx.data: data constraint (data length)
*/
struct nand_op_parser_pattern_elem {
enum nand_op_instr_type type;
@@ -1230,6 +1245,8 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
* devices.
* @priv: [OPTIONAL] pointer to private chip data
* @manufacturer: [INTERN] Contains manufacturer information
+ * @manufacturer.desc: [INTERN] Contains manufacturer's description
+ * @manufacturer.priv: [INTERN] Contains manufacturer private information
*/
struct nand_chip {
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index de36969eb359..e60da0d34cc1 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -62,6 +62,8 @@
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
+#define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
+#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
diff --git a/include/linux/of_clk.h b/include/linux/of_clk.h
new file mode 100644
index 000000000000..b27da9f164cb
--- /dev/null
+++ b/include/linux/of_clk.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * OF clock helpers
+ */
+
+#ifndef __LINUX_OF_CLK_H
+#define __LINUX_OF_CLK_H
+
+#if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF)
+
+unsigned int of_clk_get_parent_count(struct device_node *np);
+const char *of_clk_get_parent_name(struct device_node *np, int index);
+void of_clk_init(const struct of_device_id *matches);
+
+#else /* !CONFIG_COMMON_CLK || !CONFIG_OF */
+
+static inline unsigned int of_clk_get_parent_count(struct device_node *np)
+{
+ return 0;
+}
+static inline const char *of_clk_get_parent_name(struct device_node *np,
+ int index)
+{
+ return NULL;
+}
+static inline void of_clk_init(const struct of_device_id *matches) {}
+
+#endif /* !CONFIG_COMMON_CLK || !CONFIG_OF */
+
+#endif /* __LINUX_OF_CLK_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 091033a6b836..e83d87fc5673 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -13,9 +13,6 @@ struct device_node;
struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn);
int of_pci_get_devfn(struct device_node *np);
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
-int of_get_pci_domain_nr(struct device_node *node);
-int of_pci_get_max_link_speed(struct device_node *node);
void of_pci_check_probe_only(void);
int of_pci_map_rid(struct device_node *np, u32 rid,
const char *map_name, const char *map_mask_name,
@@ -32,18 +29,6 @@ static inline int of_pci_get_devfn(struct device_node *np)
return -EINVAL;
}
-static inline int
-of_pci_parse_bus_range(struct device_node *node, struct resource *res)
-{
- return -EINVAL;
-}
-
-static inline int
-of_get_pci_domain_nr(struct device_node *node)
-{
- return -1;
-}
-
static inline int of_pci_map_rid(struct device_node *np, u32 rid,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
@@ -51,12 +36,6 @@ static inline int of_pci_map_rid(struct device_node *np, u32 rid,
return -EINVAL;
}
-static inline int
-of_pci_get_max_link_speed(struct device_node *node)
-{
- return -EINVAL;
-}
-
static inline void of_pci_check_probe_only(void) { }
#endif
@@ -70,17 +49,4 @@ of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
}
#endif
-#if defined(CONFIG_OF_ADDRESS)
-int of_pci_get_host_bridge_resources(struct device_node *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base);
-#else
-static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base)
-{
- return -EINVAL;
-}
-#endif
-
#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e34a27727b9a..901943e4754b 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -642,49 +642,62 @@ PAGEFLAG_FALSE(DoubleMap)
#endif
/*
- * For pages that are never mapped to userspace, page->mapcount may be
- * used for storing extra information about page type. Any value used
- * for this purpose must be <= -2, but it's better start not too close
- * to -2 so that an underflow of the page_mapcount() won't be mistaken
- * for a special page.
+ * For pages that are never mapped to userspace (and aren't PageSlab),
+ * page_type may be used. Because it is initialised to -1, we invert the
+ * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
+ * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
+ * low bits so that an underflow or overflow of page_mapcount() won't be
+ * mistaken for a page type value.
*/
-#define PAGE_MAPCOUNT_OPS(uname, lname) \
+
+#define PAGE_TYPE_BASE 0xf0000000
+/* Reserve 0x0000007f to catch underflows of page_mapcount */
+#define PG_buddy 0x00000080
+#define PG_balloon 0x00000100
+#define PG_kmemcg 0x00000200
+#define PG_table 0x00000400
+
+#define PageType(page, flag) \
+ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+
+#define PAGE_TYPE_OPS(uname, lname) \
static __always_inline int Page##uname(struct page *page) \
{ \
- return atomic_read(&page->_mapcount) == \
- PAGE_##lname##_MAPCOUNT_VALUE; \
+ return PageType(page, PG_##lname); \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \
- atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \
+ VM_BUG_ON_PAGE(!PageType(page, 0), page); \
+ page->page_type &= ~PG_##lname; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
- atomic_set(&page->_mapcount, -1); \
+ page->page_type |= PG_##lname; \
}
/*
- * PageBuddy() indicate that the page is free and in the buddy system
+ * PageBuddy() indicates that the page is free and in the buddy system
* (see mm/page_alloc.c).
*/
-#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
-PAGE_MAPCOUNT_OPS(Buddy, BUDDY)
+PAGE_TYPE_OPS(Buddy, buddy)
/*
- * PageBalloon() is set on pages that are on the balloon page list
+ * PageBalloon() is true for pages that are on the balloon page list
* (see mm/balloon_compaction.c).
*/
-#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
-PAGE_MAPCOUNT_OPS(Balloon, BALLOON)
+PAGE_TYPE_OPS(Balloon, balloon)
/*
* If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
* pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
*/
-#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512)
-PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG)
+PAGE_TYPE_OPS(Kmemcg, kmemcg)
+
+/*
+ * Marks pages in use as page tables.
+ */
+PAGE_TYPE_OPS(Table, table)
extern bool is_free_buddy_page(struct page *page);
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index c15ab80ad32d..bab7e57f659b 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -7,10 +7,22 @@
#include <asm/page.h>
struct page_counter {
- atomic_long_t count;
- unsigned long limit;
+ atomic_long_t usage;
+ unsigned long min;
+ unsigned long low;
+ unsigned long max;
struct page_counter *parent;
+ /* effective memory.min and memory.min usage tracking */
+ unsigned long emin;
+ atomic_long_t min_usage;
+ atomic_long_t children_min_usage;
+
+ /* effective memory.low and memory.low usage tracking */
+ unsigned long elow;
+ atomic_long_t low_usage;
+ atomic_long_t children_low_usage;
+
/* legacy */
unsigned long watermark;
unsigned long failcnt;
@@ -25,14 +37,14 @@ struct page_counter {
static inline void page_counter_init(struct page_counter *counter,
struct page_counter *parent)
{
- atomic_long_set(&counter->count, 0);
- counter->limit = PAGE_COUNTER_MAX;
+ atomic_long_set(&counter->usage, 0);
+ counter->max = PAGE_COUNTER_MAX;
counter->parent = parent;
}
static inline unsigned long page_counter_read(struct page_counter *counter)
{
- return atomic_long_read(&counter->count);
+ return atomic_long_read(&counter->usage);
}
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
@@ -41,7 +53,9 @@ bool page_counter_try_charge(struct page_counter *counter,
unsigned long nr_pages,
struct page_counter **fail);
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
-int page_counter_limit(struct page_counter *counter, unsigned long limit);
+void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
+void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
+int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
int page_counter_memparse(const char *buf, const char *max,
unsigned long *nr_pages);
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index baadad1aabbc..29efa09d686b 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -62,5 +62,6 @@ extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
/* for DT-based PCI controllers that support ECAM */
int pci_host_common_probe(struct platform_device *pdev,
struct pci_ecam_ops *ops);
+int pci_host_common_remove(struct platform_device *pdev);
#endif
#endif
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index af657ca58b70..243eaa5a66ff 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -90,8 +90,16 @@ struct pci_epc {
struct config_group *group;
/* spinlock to protect against concurrent access of EP controller */
spinlock_t lock;
+ unsigned int features;
};
+#define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0)
+#define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3))
+#define EPC_FEATURE_SET_BAR(features, bar) \
+ (features |= (EPC_FEATURE_BAR_MASK & (bar << 1)))
+#define EPC_FEATURE_GET_BAR(features) \
+ ((features & EPC_FEATURE_BAR_MASK) >> 1)
+
#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
#define pci_epc_create(dev, ops) \
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index f7d6f4883f8b..4e7764935fa8 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -72,7 +72,7 @@ struct pci_epf_ops {
* @driver: PCI EPF driver
* @ops: set of function pointers for performing EPF operations
* @owner: the owner of the module that registers the PCI EPF driver
- * @group: configfs group corresponding to the PCI EPF driver
+ * @epf_group: list of configfs group corresponding to the PCI EPF driver
* @id_table: identifies EPF devices for probing
*/
struct pci_epf_driver {
@@ -82,7 +82,7 @@ struct pci_epf_driver {
struct device_driver driver;
struct pci_epf_ops *ops;
struct module *owner;
- struct config_group *group;
+ struct list_head epf_group;
const struct pci_epf_device_id *id_table;
};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 55371cb827ad..340029b2fb38 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -217,6 +217,7 @@ enum pci_bus_flags {
PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
+ PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
};
/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
@@ -406,6 +407,9 @@ struct pci_dev {
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
+#ifdef CONFIG_HOTPLUG_PCI_PCIE
+ unsigned int broken_cmd_compl:1; /* No compl for some cmds */
+#endif
#ifdef CONFIG_PCIE_PTM
unsigned int ptm_root:1;
unsigned int ptm_enabled:1;
@@ -471,8 +475,10 @@ struct pci_host_bridge {
unsigned int ignore_reset_delay:1; /* For entire hierarchy */
unsigned int no_ext_tags:1; /* No Extended Tags */
unsigned int native_aer:1; /* OS may use PCIe AER */
- unsigned int native_hotplug:1; /* OS may use PCIe hotplug */
+ unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
+ unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
unsigned int native_pme:1; /* OS may use PCIe PME */
+ unsigned int native_ltr:1; /* OS may use PCIe LTR */
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
const struct resource *res,
@@ -1079,8 +1085,6 @@ int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
-int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width);
u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
@@ -1451,8 +1455,10 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
+extern bool pcie_ports_native;
#else
#define pcie_ports_disabled true
+#define pcie_ports_native false
#endif
#ifdef CONFIG_PCIEASPM
@@ -1479,6 +1485,8 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
static inline void pcie_ecrc_get_policy(char *str) { }
#endif
+bool pci_ats_disabled(void);
+
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
void pci_ats_init(struct pci_dev *dev);
@@ -1510,12 +1518,10 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
*/
#ifdef CONFIG_PCI_DOMAINS
extern int pci_domains_supported;
-int pci_get_new_domain_nr(void);
#else
enum { pci_domains_supported = 0 };
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
-static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#endif /* CONFIG_PCI_DOMAINS */
/*
@@ -1670,7 +1676,6 @@ static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
-static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
@@ -1954,6 +1959,7 @@ int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
int pci_sriov_get_totalvfs(struct pci_dev *dev);
+int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
@@ -1986,6 +1992,7 @@ static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{ return 0; }
static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
{ return 0; }
+#define pci_sriov_configure_simple NULL
static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
{ return 0; }
static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
@@ -2284,7 +2291,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
return false;
}
-#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 26213024e81b..cf5e22103f68 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -162,8 +162,9 @@ struct hotplug_params {
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp);
-bool pciehp_is_native(struct pci_dev *pdev);
-int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
+bool pciehp_is_native(struct pci_dev *bridge);
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge);
+bool shpchp_is_native(struct pci_dev *bridge);
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
int acpi_pci_detect_ejectable(acpi_handle handle);
#else
@@ -172,6 +173,17 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
{
return -ENODEV;
}
-static inline bool pciehp_is_native(struct pci_dev *pdev) { return true; }
+
+static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge)
+{
+ return 0;
+}
+static inline bool pciehp_is_native(struct pci_dev *bridge) { return true; }
+static inline bool shpchp_is_native(struct pci_dev *bridge) { return true; }
#endif
+
+static inline bool hotplug_is_native(struct pci_dev *bridge)
+{
+ return pciehp_is_native(bridge) || shpchp_is_native(bridge);
+}
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cc608fc55334..29502238e510 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -561,6 +561,7 @@
#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
+#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
@@ -2119,6 +2120,8 @@
#define PCI_VENDOR_ID_MYRICOM 0x14c1
+#define PCI_VENDOR_ID_MEDIATEK 0x14c3
+
#define PCI_VENDOR_ID_TITAN 0x14D2
#define PCI_DEVICE_ID_TITAN_010L 0x8001
#define PCI_DEVICE_ID_TITAN_100L 0x8010
@@ -2387,6 +2390,8 @@
#define PCI_VENDOR_ID_LENOVO 0x17aa
+#define PCI_VENDOR_ID_QCOM 0x17cb
+
#define PCI_VENDOR_ID_CDNS 0x17cd
#define PCI_VENDOR_ID_ARECA 0x17d3
@@ -2552,6 +2557,8 @@
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001
+#define PCI_VENDOR_ID_AMAZON 0x1d0f
+
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
@@ -2672,6 +2679,7 @@
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
+#define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
@@ -2776,6 +2784,7 @@
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
+#define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0
#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 40036a57d072..ad5444491975 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -78,7 +78,7 @@ struct arm_pmu {
struct pmu pmu;
cpumask_t supported_cpus;
char *name;
- irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ irqreturn_t (*handle_irq)(struct arm_pmu *pmu);
void (*enable)(struct perf_event *event);
void (*disable)(struct perf_event *event);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index a03c2642a87c..21713dc14ce2 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -122,7 +122,7 @@ pud_t pud_mkdevmap(pud_t pud);
#endif
#endif /* __HAVE_ARCH_PTE_DEVMAP */
-#ifdef __HAVE_ARCH_PTE_SPECIAL
+#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static inline bool pfn_t_special(pfn_t pfn)
{
return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL;
@@ -132,5 +132,5 @@ static inline bool pfn_t_special(pfn_t pfn)
{
return false;
}
-#endif /* __HAVE_ARCH_PTE_SPECIAL */
+#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
index 2dc7f4a8ab09..419cfacb4b42 100644
--- a/include/linux/platform_data/gpio-dwapb.h
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -19,7 +19,8 @@ struct dwapb_port_property {
unsigned int idx;
unsigned int ngpio;
unsigned int gpio_base;
- unsigned int irq;
+ int irq[32];
+ bool has_irq;
bool irq_shared;
};
diff --git a/include/linux/platform_data/tsl2772.h b/include/linux/platform_data/tsl2772.h
new file mode 100644
index 000000000000..f8ade15a35e2
--- /dev/null
+++ b/include/linux/platform_data/tsl2772.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Device driver for monitoring ambient light intensity (lux)
+ * and proximity (prox) within the TAOS TSL2772 family of devices.
+ *
+ * Copyright (c) 2012, TAOS Corporation.
+ * Copyright (c) 2017-2018 Brian Masney <masneyb@onstation.org>
+ */
+
+#ifndef __TSL2772_H
+#define __TSL2772_H
+
+struct tsl2772_lux {
+ unsigned int ch0;
+ unsigned int ch1;
+};
+
+/* Max number of segments allowable in LUX table */
+#define TSL2772_MAX_LUX_TABLE_SIZE 6
+/* The default LUX tables all have 3 elements. */
+#define TSL2772_DEF_LUX_TABLE_SZ 3
+#define TSL2772_DEFAULT_TABLE_BYTES (sizeof(struct tsl2772_lux) * \
+ TSL2772_DEF_LUX_TABLE_SZ)
+
+/* Proximity diode to use */
+#define TSL2772_DIODE0 0x01
+#define TSL2772_DIODE1 0x02
+#define TSL2772_DIODE_BOTH 0x03
+
+/* LED Power */
+#define TSL2772_100_mA 0x00
+#define TSL2772_50_mA 0x01
+#define TSL2772_25_mA 0x02
+#define TSL2772_13_mA 0x03
+
+/**
+ * struct tsl2772_settings - Settings for the tsl2772 driver
+ * @als_time: Integration time of the ALS channel ADCs in 2.73 ms
+ * increments. Total integration time is
+ * (256 - als_time) * 2.73.
+ * @als_gain: Index into the tsl2772_als_gain array.
+ * @als_gain_trim: Default gain trim to account for aperture effects.
+ * @wait_time: Time between proximity and ALS cycles in 2.73
+ * periods.
+ * @prox_time: Integration time of the proximity ADC in 2.73 ms
+ * increments. Total integration time is
+ * (256 - prx_time) * 2.73.
+ * @prox_gain: Index into the tsl2772_prx_gain array.
+ * @als_prox_config: The value of the ALS / Proximity configuration
+ * register.
+ * @als_cal_target: Known external ALS reading for calibration.
+ * @als_persistence: H/W Filters, Number of 'out of limits' ALS readings.
+ * @als_interrupt_en: Enable/Disable ALS interrupts
+ * @als_thresh_low: CH0 'low' count to trigger interrupt.
+ * @als_thresh_high: CH0 'high' count to trigger interrupt.
+ * @prox_persistence: H/W Filters, Number of 'out of limits' proximity
+ * readings.
+ * @prox_interrupt_en: Enable/Disable proximity interrupts.
+ * @prox_thres_low: Low threshold proximity detection.
+ * @prox_thres_high: High threshold proximity detection.
+ * @prox_pulse_count: Number if proximity emitter pulses.
+ * @prox_max_samples_cal: The number of samples that are taken when performing
+ * a proximity calibration.
+ * @prox_diode Which diode(s) to use for driving the external
+ * LED(s) for proximity sensing.
+ * @prox_power The amount of power to use for the external LED(s).
+ */
+struct tsl2772_settings {
+ int als_time;
+ int als_gain;
+ int als_gain_trim;
+ int wait_time;
+ int prox_time;
+ int prox_gain;
+ int als_prox_config;
+ int als_cal_target;
+ u8 als_persistence;
+ bool als_interrupt_en;
+ int als_thresh_low;
+ int als_thresh_high;
+ u8 prox_persistence;
+ bool prox_interrupt_en;
+ int prox_thres_low;
+ int prox_thres_high;
+ int prox_pulse_count;
+ int prox_max_samples_cal;
+ int prox_diode;
+ int prox_power;
+};
+
+/**
+ * struct tsl2772_platform_data - Platform callback, glass and defaults
+ * @platform_lux_table: Device specific glass coefficents
+ * @platform_default_settings: Device specific power on defaults
+ */
+struct tsl2772_platform_data {
+ struct tsl2772_lux platform_lux_table[TSL2772_MAX_LUX_TABLE_SIZE];
+ struct tsl2772_settings *platform_default_settings;
+};
+
+#endif /* __TSL2772_H */
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 01fbf1b16258..d6355f49fbae 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -24,8 +24,9 @@ enum bq27xxx_chip {
BQ27546,
BQ27742,
BQ27545, /* bq27545 */
- BQ27421, /* bq27421, bq27425, bq27441, bq27621 */
+ BQ27421, /* bq27421, bq27441, bq27621 */
BQ27425,
+ BQ27426,
BQ27441,
BQ27621,
};
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index e679b175b411..65163aa0bb04 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -652,9 +652,7 @@ static inline void rcu_read_lock(void)
* Unfortunately, this function acquires the scheduler's runqueue and
* priority-inheritance spinlocks. This means that deadlock could result
* if the caller of rcu_read_unlock() already holds one of these locks or
- * any lock that is ever acquired while holding them; or any lock which
- * can be taken from interrupt context because rcu_boost()->rt_mutex_lock()
- * does not disable irqs while taking ->wait_lock.
+ * any lock that is ever acquired while holding them.
*
* That said, RCU readers are never priority boosted unless they were
* preempted. Therefore, one way to avoid deadlock is to make sure
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index d8ecefaf63ca..6d46f962685d 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -49,47 +49,7 @@ enum ab8505_regulator_id {
AB8505_NUM_REGULATORS,
};
-/* AB9540 regulators */
-enum ab9540_regulator_id {
- AB9540_LDO_AUX1,
- AB9540_LDO_AUX2,
- AB9540_LDO_AUX3,
- AB9540_LDO_AUX4,
- AB9540_LDO_INTCORE,
- AB9540_LDO_TVOUT,
- AB9540_LDO_USB,
- AB9540_LDO_AUDIO,
- AB9540_LDO_ANAMIC1,
- AB9540_LDO_ANAMIC2,
- AB9540_LDO_DMIC,
- AB9540_LDO_ANA,
- AB9540_SYSCLKREQ_2,
- AB9540_SYSCLKREQ_4,
- AB9540_NUM_REGULATORS,
-};
-
-/* AB8540 regulators */
-enum ab8540_regulator_id {
- AB8540_LDO_AUX1,
- AB8540_LDO_AUX2,
- AB8540_LDO_AUX3,
- AB8540_LDO_AUX4,
- AB8540_LDO_AUX5,
- AB8540_LDO_AUX6,
- AB8540_LDO_INTCORE,
- AB8540_LDO_TVOUT,
- AB8540_LDO_AUDIO,
- AB8540_LDO_ANAMIC1,
- AB8540_LDO_ANAMIC2,
- AB8540_LDO_DMIC,
- AB8540_LDO_ANA,
- AB8540_LDO_SDIO,
- AB8540_SYSCLKREQ_2,
- AB8540_SYSCLKREQ_4,
- AB8540_NUM_REGULATORS,
-};
-
-/* AB8500, AB8505, and AB9540 register initialization */
+/* AB8500 and AB8505 register initialization */
struct ab8500_regulator_reg_init {
int id;
u8 mask;
@@ -185,121 +145,6 @@ enum ab8505_regulator_reg {
AB8505_NUM_REGULATOR_REGISTERS,
};
-/* AB9540 registers */
-enum ab9540_regulator_reg {
- AB9540_REGUREQUESTCTRL1,
- AB9540_REGUREQUESTCTRL2,
- AB9540_REGUREQUESTCTRL3,
- AB9540_REGUREQUESTCTRL4,
- AB9540_REGUSYSCLKREQ1HPVALID1,
- AB9540_REGUSYSCLKREQ1HPVALID2,
- AB9540_REGUHWHPREQ1VALID1,
- AB9540_REGUHWHPREQ1VALID2,
- AB9540_REGUHWHPREQ2VALID1,
- AB9540_REGUHWHPREQ2VALID2,
- AB9540_REGUSWHPREQVALID1,
- AB9540_REGUSWHPREQVALID2,
- AB9540_REGUSYSCLKREQVALID1,
- AB9540_REGUSYSCLKREQVALID2,
- AB9540_REGUVAUX4REQVALID,
- AB9540_REGUMISC1,
- AB9540_VAUDIOSUPPLY,
- AB9540_REGUCTRL1VAMIC,
- AB9540_VSMPS1REGU,
- AB9540_VSMPS2REGU,
- AB9540_VSMPS3REGU, /* NOTE! PRCMU register */
- AB9540_VPLLVANAREGU,
- AB9540_EXTSUPPLYREGU,
- AB9540_VAUX12REGU,
- AB9540_VRF1VAUX3REGU,
- AB9540_VSMPS1SEL1,
- AB9540_VSMPS1SEL2,
- AB9540_VSMPS1SEL3,
- AB9540_VSMPS2SEL1,
- AB9540_VSMPS2SEL2,
- AB9540_VSMPS2SEL3,
- AB9540_VSMPS3SEL1, /* NOTE! PRCMU register */
- AB9540_VSMPS3SEL2, /* NOTE! PRCMU register */
- AB9540_VAUX1SEL,
- AB9540_VAUX2SEL,
- AB9540_VRF1VAUX3SEL,
- AB9540_REGUCTRL2SPARE,
- AB9540_VAUX4REQCTRL,
- AB9540_VAUX4REGU,
- AB9540_VAUX4SEL,
- AB9540_REGUCTRLDISCH,
- AB9540_REGUCTRLDISCH2,
- AB9540_REGUCTRLDISCH3,
- AB9540_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB8540 registers */
-enum ab8540_regulator_reg {
- AB8540_REGUREQUESTCTRL1,
- AB8540_REGUREQUESTCTRL2,
- AB8540_REGUREQUESTCTRL3,
- AB8540_REGUREQUESTCTRL4,
- AB8540_REGUSYSCLKREQ1HPVALID1,
- AB8540_REGUSYSCLKREQ1HPVALID2,
- AB8540_REGUHWHPREQ1VALID1,
- AB8540_REGUHWHPREQ1VALID2,
- AB8540_REGUHWHPREQ2VALID1,
- AB8540_REGUHWHPREQ2VALID2,
- AB8540_REGUSWHPREQVALID1,
- AB8540_REGUSWHPREQVALID2,
- AB8540_REGUSYSCLKREQVALID1,
- AB8540_REGUSYSCLKREQVALID2,
- AB8540_REGUVAUX4REQVALID,
- AB8540_REGUVAUX5REQVALID,
- AB8540_REGUVAUX6REQVALID,
- AB8540_REGUVCLKBREQVALID,
- AB8540_REGUVRF1REQVALID,
- AB8540_REGUMISC1,
- AB8540_VAUDIOSUPPLY,
- AB8540_REGUCTRL1VAMIC,
- AB8540_VHSIC,
- AB8540_VSDIO,
- AB8540_VSMPS1REGU,
- AB8540_VSMPS2REGU,
- AB8540_VSMPS3REGU,
- AB8540_VPLLVANAREGU,
- AB8540_EXTSUPPLYREGU,
- AB8540_VAUX12REGU,
- AB8540_VRF1VAUX3REGU,
- AB8540_VSMPS1SEL1,
- AB8540_VSMPS1SEL2,
- AB8540_VSMPS1SEL3,
- AB8540_VSMPS2SEL1,
- AB8540_VSMPS2SEL2,
- AB8540_VSMPS2SEL3,
- AB8540_VSMPS3SEL1,
- AB8540_VSMPS3SEL2,
- AB8540_VAUX1SEL,
- AB8540_VAUX2SEL,
- AB8540_VRF1VAUX3SEL,
- AB8540_REGUCTRL2SPARE,
- AB8540_VAUX4REQCTRL,
- AB8540_VAUX4REGU,
- AB8540_VAUX4SEL,
- AB8540_VAUX5REQCTRL,
- AB8540_VAUX5REGU,
- AB8540_VAUX5SEL,
- AB8540_VAUX6REQCTRL,
- AB8540_VAUX6REGU,
- AB8540_VAUX6SEL,
- AB8540_VCLKBREQCTRL,
- AB8540_VCLKBREGU,
- AB8540_VCLKBSEL,
- AB8540_VRF1REQCTRL,
- AB8540_REGUCTRLDISCH,
- AB8540_REGUCTRLDISCH2,
- AB8540_REGUCTRLDISCH3,
- AB8540_REGUCTRLDISCH4,
- AB8540_VSIMSYSCLKCTRL,
- AB8540_VANAVPLLSEL,
- AB8540_NUM_REGULATOR_REGISTERS,
-};
-
/* AB8500 external regulators */
struct ab8500_ext_regulator_cfg {
bool hwreq; /* requires hw mode or high power mode */
diff --git a/include/linux/regulator/arizona-ldo1.h b/include/linux/regulator/arizona-ldo1.h
index c685f1277c63..fe74ab9990e6 100644
--- a/include/linux/regulator/arizona-ldo1.h
+++ b/include/linux/regulator/arizona-ldo1.h
@@ -14,9 +14,6 @@
struct regulator_init_data;
struct arizona_ldo1_pdata {
- /** GPIO controlling LDOENA, if any */
- int ldoena;
-
/** Regulator configuration for LDO1 */
const struct regulator_init_data *init_data;
};
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index df176d7c2b87..25602afd4844 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -80,6 +80,7 @@ struct regmap;
* These modes can be OR'ed together to make up a mask of valid register modes.
*/
+#define REGULATOR_MODE_INVALID 0x0
#define REGULATOR_MODE_FAST 0x1
#define REGULATOR_MODE_NORMAL 0x2
#define REGULATOR_MODE_IDLE 0x4
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4fc96cb8e5d7..fc2dc8df476f 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -15,6 +15,8 @@
#ifndef __LINUX_REGULATOR_DRIVER_H_
#define __LINUX_REGULATOR_DRIVER_H_
+#define MAX_COUPLED 4
+
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/regulator/consumer.h>
@@ -81,9 +83,12 @@ struct regulator_linear_range {
* @set_voltage_sel: Set the voltage for the regulator using the specified
* selector.
* @map_voltage: Convert a voltage into a selector
- * @get_voltage: Return the currently configured voltage for the regulator.
+ * @get_voltage: Return the currently configured voltage for the regulator;
+ * return -ENOTRECOVERABLE if regulator can't be read at
+ * bootup and hasn't been set yet.
* @get_voltage_sel: Return the currently configured voltage selector for the
- * regulator.
+ * regulator; return -ENOTRECOVERABLE if regulator can't
+ * be read at bootup and hasn't been set yet.
* @list_voltage: Return one of the supported voltages, in microvolts; zero
* if the selector indicates a voltage that is unusable on this system;
* or negative errno. Selectors range from zero to one less than
@@ -407,6 +412,20 @@ struct regulator_config {
};
/*
+ * struct coupling_desc
+ *
+ * Describes coupling of regulators. Each regulator should have
+ * at least a pointer to itself in coupled_rdevs array.
+ * When a new coupled regulator is resolved, n_resolved is
+ * incremented.
+ */
+struct coupling_desc {
+ struct regulator_dev *coupled_rdevs[MAX_COUPLED];
+ int n_resolved;
+ int n_coupled;
+};
+
+/*
* struct regulator_dev
*
* Voltage / Current regulator class device. One for each
@@ -429,8 +448,12 @@ struct regulator_dev {
/* lists we own */
struct list_head consumer_list; /* consumers we supply */
+ struct coupling_desc coupling_desc;
+
struct blocking_notifier_head notifier;
struct mutex mutex; /* consumer lock */
+ struct task_struct *mutex_owner;
+ int ref_cnt;
struct module *owner;
struct device dev;
struct regulation_constraints *constraints;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 93a04893c739..3468703d663a 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -103,6 +103,7 @@ struct regulator_state {
* @ilim_uA: Maximum input current.
* @system_load: Load that isn't captured by any consumer requests.
*
+ * @max_spread: Max possible spread between coupled regulators
* @valid_modes_mask: Mask of modes which may be configured by consumers.
* @valid_ops_mask: Operations which may be performed by consumers.
*
@@ -154,6 +155,9 @@ struct regulation_constraints {
int system_load;
+ /* used for coupled regulators */
+ int max_spread;
+
/* valid regulator operating modes for this machine */
unsigned int valid_modes_mask;
diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h
index 4dbb63a1d4ab..686c42c041b5 100644
--- a/include/linux/regulator/max8952.h
+++ b/include/linux/regulator/max8952.h
@@ -120,7 +120,6 @@ enum {
struct max8952_platform_data {
int gpio_vid0;
int gpio_vid1;
- int gpio_en;
u32 default_mode;
u32 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 4c007f69082f..6268208760e9 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -285,7 +285,7 @@ void rtc_nvmem_unregister(struct rtc_device *rtc);
static inline int rtc_nvmem_register(struct rtc_device *rtc,
struct nvmem_config *nvmem_config)
{
- return -ENODEV;
+ return 0;
}
static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {}
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 14e4f9c12337..3aa4fcb74e76 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -27,6 +27,7 @@
#include <linux/signal_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
+#include <linux/rseq.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -1047,6 +1048,17 @@ struct task_struct {
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_RSEQ
+ struct rseq __user *rseq;
+ u32 rseq_len;
+ u32 rseq_sig;
+ /*
+ * RmW on rseq_event_mask must be performed atomically
+ * with respect to preemption.
+ */
+ unsigned long rseq_event_mask;
+#endif
+
struct tlbflush_unmap_batch tlb_ubc;
struct rcu_head rcu;
@@ -1757,4 +1769,126 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
+#ifdef CONFIG_RSEQ
+
+/*
+ * Map the event mask on the user-space ABI enum rseq_cs_flags
+ * for direct mask checks.
+ */
+enum rseq_event_mask_bits {
+ RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
+ RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
+ RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
+};
+
+enum rseq_event_mask {
+ RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
+ RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
+ RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
+};
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+ if (t->rseq)
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+}
+
+void __rseq_handle_notify_resume(struct pt_regs *regs);
+
+static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+{
+ if (current->rseq)
+ __rseq_handle_notify_resume(regs);
+}
+
+static inline void rseq_signal_deliver(struct pt_regs *regs)
+{
+ preempt_disable();
+ __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+ preempt_enable();
+ rseq_handle_notify_resume(regs);
+}
+
+/* rseq_preempt() requires preemption to be disabled. */
+static inline void rseq_preempt(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/* rseq_migrate() requires preemption to be disabled. */
+static inline void rseq_migrate(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/*
+ * If parent process has a registered restartable sequences area, the
+ * child inherits. Only applies when forking a process, not a thread. In
+ * case a parent fork() in the middle of a restartable sequence, set the
+ * resume notifier to force the child to retry.
+ */
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+ if (clone_flags & CLONE_THREAD) {
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+ } else {
+ t->rseq = current->rseq;
+ t->rseq_len = current->rseq_len;
+ t->rseq_sig = current->rseq_sig;
+ t->rseq_event_mask = current->rseq_event_mask;
+ rseq_preempt(t);
+ }
+}
+
+static inline void rseq_execve(struct task_struct *t)
+{
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+}
+
+#else
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+}
+static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+{
+}
+static inline void rseq_signal_deliver(struct pt_regs *regs)
+{
+}
+static inline void rseq_preempt(struct task_struct *t)
+{
+}
+static inline void rseq_migrate(struct task_struct *t)
+{
+}
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+}
+static inline void rseq_execve(struct task_struct *t)
+{
+}
+
+#endif
+
+#ifdef CONFIG_DEBUG_RSEQ
+
+void rseq_syscall(struct pt_regs *regs);
+
+#else
+
+static inline void rseq_syscall(struct pt_regs *regs)
+{
+}
+
+#endif
+
#endif
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 76a8cb4ef178..44d356f5e47c 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -163,9 +163,13 @@ static inline gfp_t current_gfp_context(gfp_t flags)
}
#ifdef CONFIG_LOCKDEP
+extern void __fs_reclaim_acquire(void);
+extern void __fs_reclaim_release(void);
extern void fs_reclaim_acquire(gfp_t gfp_mask);
extern void fs_reclaim_release(gfp_t gfp_mask);
#else
+static inline void __fs_reclaim_acquire(void) { }
+static inline void __fs_reclaim_release(void) { }
static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
static inline void fs_reclaim_release(gfp_t gfp_mask) { }
#endif
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 73b5e655a76e..f155dc607112 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -110,19 +110,6 @@ static inline bool shmem_file(struct file *file)
extern bool shmem_charge(struct inode *inode, long pages);
extern void shmem_uncharge(struct inode *inode, long pages);
-#ifdef CONFIG_TMPFS
-
-extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
-
-#else
-
-static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a)
-{
- return -EINVAL;
-}
-
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
extern bool shmem_huge_enabled(struct vm_area_struct *vma);
#else
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index d9228e4d0320..3485c58cfd1c 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -67,9 +67,10 @@ struct kmem_cache {
/*
* If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. size contains the total
- * object size including these internal fields, the following two
- * variables contain the offset to the user object and its size.
+ * fields and/or padding to every object. 'size' contains the total
+ * object size including these internal fields, while 'obj_offset'
+ * and 'object_size' contain the offset to the user object and its
+ * size.
*/
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 3773e26c08c1..09fa2c6f0e68 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -101,7 +101,6 @@ struct kmem_cache {
void (*ctor)(void *);
unsigned int inuse; /* Offset to metadata */
unsigned int align; /* Alignment */
- unsigned int reserved; /* Reserved bytes at the end of slabs */
unsigned int red_left_pad; /* Left redzone padding size */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 390e814fdc8d..73810808cdf2 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -66,6 +66,7 @@ struct old_linux_dirent;
struct perf_event_attr;
struct file_handle;
struct sigaltstack;
+struct rseq;
union bpf_attr;
#include <linux/types.h>
@@ -897,7 +898,8 @@ asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val);
asmlinkage long sys_pkey_free(int pkey);
asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags,
unsigned mask, struct statx __user *buffer);
-
+asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len,
+ int flags, uint32_t sig);
/*
* Architecture-specific system calls
diff --git a/include/linux/types.h b/include/linux/types.h
index ec13d02b3481..9834e90aa010 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -10,14 +10,14 @@
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
-typedef __u32 __kernel_dev_t;
+typedef u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
typedef __kernel_ino_t ino_t;
typedef __kernel_mode_t mode_t;
typedef unsigned short umode_t;
-typedef __u32 nlink_t;
+typedef u32 nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_daddr_t daddr_t;
@@ -95,29 +95,29 @@ typedef unsigned long ulong;
#ifndef __BIT_TYPES_DEFINED__
#define __BIT_TYPES_DEFINED__
-typedef __u8 u_int8_t;
-typedef __s8 int8_t;
-typedef __u16 u_int16_t;
-typedef __s16 int16_t;
-typedef __u32 u_int32_t;
-typedef __s32 int32_t;
+typedef u8 u_int8_t;
+typedef s8 int8_t;
+typedef u16 u_int16_t;
+typedef s16 int16_t;
+typedef u32 u_int32_t;
+typedef s32 int32_t;
#endif /* !(__BIT_TYPES_DEFINED__) */
-typedef __u8 uint8_t;
-typedef __u16 uint16_t;
-typedef __u32 uint32_t;
+typedef u8 uint8_t;
+typedef u16 uint16_t;
+typedef u32 uint32_t;
#if defined(__GNUC__)
-typedef __u64 uint64_t;
-typedef __u64 u_int64_t;
-typedef __s64 int64_t;
+typedef u64 uint64_t;
+typedef u64 u_int64_t;
+typedef s64 int64_t;
#endif
/* this is a special 64bit data type that is 8-byte aligned */
-#define aligned_u64 __u64 __attribute__((aligned(8)))
-#define aligned_be64 __be64 __attribute__((aligned(8)))
-#define aligned_le64 __le64 __attribute__((aligned(8)))
+#define aligned_u64 __aligned_u64
+#define aligned_be64 __aligned_be64
+#define aligned_le64 __aligned_le64
/**
* The type used for indexing onto a disc or disc partition.
diff --git a/include/linux/uio.h b/include/linux/uio.h
index f5766e853a77..409c845d4cd3 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -155,7 +155,7 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#endif
#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
-size_t _copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i);
+size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
#else
#define _copy_to_iter_mcsafe _copy_to_iter
#endif
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index f2f3b68ba910..e091f0a11b11 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -31,10 +31,12 @@
extern int handle_userfault(struct vm_fault *vmf, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
- unsigned long src_start, unsigned long len);
+ unsigned long src_start, unsigned long len,
+ bool *mmap_changing);
extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
unsigned long dst_start,
- unsigned long len);
+ unsigned long len,
+ bool *mmap_changing);
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 9c689868eb4d..a0794632fd01 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -298,30 +298,44 @@ TRACE_EVENT(non_standard_event,
TRACE_EVENT(aer_event,
TP_PROTO(const char *dev_name,
const u32 status,
- const u8 severity),
+ const u8 severity,
+ const u8 tlp_header_valid,
+ struct aer_header_log_regs *tlp),
- TP_ARGS(dev_name, status, severity),
+ TP_ARGS(dev_name, status, severity, tlp_header_valid, tlp),
TP_STRUCT__entry(
__string( dev_name, dev_name )
__field( u32, status )
__field( u8, severity )
+ __field( u8, tlp_header_valid)
+ __array( u32, tlp_header, 4 )
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->status = status;
__entry->severity = severity;
+ __entry->tlp_header_valid = tlp_header_valid;
+ if (tlp_header_valid) {
+ __entry->tlp_header[0] = tlp->dw0;
+ __entry->tlp_header[1] = tlp->dw1;
+ __entry->tlp_header[2] = tlp->dw2;
+ __entry->tlp_header[3] = tlp->dw3;
+ }
),
- TP_printk("%s PCIe Bus Error: severity=%s, %s\n",
+ TP_printk("%s PCIe Bus Error: severity=%s, %s, TLP Header=%s\n",
__get_str(dev_name),
__entry->severity == AER_CORRECTABLE ? "Corrected" :
__entry->severity == AER_FATAL ?
"Fatal" : "Uncorrected, non-fatal",
__entry->severity == AER_CORRECTABLE ?
__print_flags(__entry->status, "|", aer_correctable_errors) :
- __print_flags(__entry->status, "|", aer_uncorrectable_errors))
+ __print_flags(__entry->status, "|", aer_uncorrectable_errors),
+ __entry->tlp_header_valid ?
+ __print_array(__entry->tlp_header, 4, 4) :
+ "Not available")
);
/*
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index a08cc7278980..c2c8b1fdeead 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -49,22 +49,6 @@
#include <net/ipv6.h>
#include <net/net_namespace.h>
-struct rdma_addr_client {
- atomic_t refcount;
- struct completion comp;
-};
-
-/**
- * rdma_addr_register_client - Register an address client.
- */
-void rdma_addr_register_client(struct rdma_addr_client *client);
-
-/**
- * rdma_addr_unregister_client - Deregister an address client.
- * @client: Client object to deregister.
- */
-void rdma_addr_unregister_client(struct rdma_addr_client *client);
-
/**
* struct rdma_dev_addr - Contains resolved RDMA hardware addresses
* @src_dev_addr: Source MAC address.
@@ -99,7 +83,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
/**
* rdma_resolve_ip - Resolve source and destination IP addresses to
* RDMA hardware addresses.
- * @client: Address client associated with request.
* @src_addr: An optional source address to use in the resolution. If a
* source address is not provided, a usable address will be returned via
* the callback.
@@ -112,8 +95,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
* or been canceled. A status of 0 indicates success.
* @context: User-specified context associated with the call.
*/
-int rdma_resolve_ip(struct rdma_addr_client *client,
- struct sockaddr *src_addr, struct sockaddr *dst_addr,
+int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr,
struct rdma_dev_addr *addr, int timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index eb49cc8d1f95..a5f249828115 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -149,4 +149,5 @@ int ib_get_cached_port_state(struct ib_device *device,
u8 port_num,
enum ib_port_state *port_active);
+bool rdma_is_zero_gid(const union ib_gid *gid);
#endif /* _IB_CACHE_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 9fc8a825aa28..2043e1a8f851 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -861,6 +861,19 @@ enum ib_sig_err_type {
};
/**
+ * Signature check masks (8 bytes in total) according to the T10-PI standard:
+ * -------- -------- ------------
+ * | GUARD | APPTAG | REFTAG |
+ * | 2B | 2B | 4B |
+ * -------- -------- ------------
+ */
+enum {
+ IB_SIG_CHECK_GUARD = 0xc0,
+ IB_SIG_CHECK_APPTAG = 0x30,
+ IB_SIG_CHECK_REFTAG = 0x0f,
+};
+
+/**
* struct ib_sig_err - signature error descriptor
*/
struct ib_sig_err {
@@ -1852,14 +1865,17 @@ enum ib_flow_spec_type {
IB_FLOW_SPEC_TCP = 0x40,
IB_FLOW_SPEC_UDP = 0x41,
IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
+ IB_FLOW_SPEC_GRE = 0x51,
+ IB_FLOW_SPEC_MPLS = 0x60,
IB_FLOW_SPEC_INNER = 0x100,
/* Actions */
IB_FLOW_SPEC_ACTION_TAG = 0x1000,
IB_FLOW_SPEC_ACTION_DROP = 0x1001,
IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
+ IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
};
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
-#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
+#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
/* Flow steering rule priority is set according to it's domain.
* Lower domain value means higher priority.
@@ -1994,6 +2010,34 @@ struct ib_flow_spec_esp {
struct ib_flow_esp_filter mask;
};
+struct ib_flow_gre_filter {
+ __be16 c_ks_res0_ver;
+ __be16 protocol;
+ __be32 key;
+ /* Must be last */
+ u8 real_sz[0];
+};
+
+struct ib_flow_spec_gre {
+ u32 type;
+ u16 size;
+ struct ib_flow_gre_filter val;
+ struct ib_flow_gre_filter mask;
+};
+
+struct ib_flow_mpls_filter {
+ __be32 tag;
+ /* Must be last */
+ u8 real_sz[0];
+};
+
+struct ib_flow_spec_mpls {
+ u32 type;
+ u16 size;
+ struct ib_flow_mpls_filter val;
+ struct ib_flow_mpls_filter mask;
+};
+
struct ib_flow_spec_action_tag {
enum ib_flow_spec_type type;
u16 size;
@@ -2011,6 +2055,17 @@ struct ib_flow_spec_action_handle {
struct ib_flow_action *act;
};
+enum ib_counters_description {
+ IB_COUNTER_PACKETS,
+ IB_COUNTER_BYTES,
+};
+
+struct ib_flow_spec_action_count {
+ enum ib_flow_spec_type type;
+ u16 size;
+ struct ib_counters *counters;
+};
+
union ib_flow_spec {
struct {
u32 type;
@@ -2023,9 +2078,12 @@ union ib_flow_spec {
struct ib_flow_spec_ipv6 ipv6;
struct ib_flow_spec_tunnel tunnel;
struct ib_flow_spec_esp esp;
+ struct ib_flow_spec_gre gre;
+ struct ib_flow_spec_mpls mpls;
struct ib_flow_spec_action_tag flow_tag;
struct ib_flow_spec_action_drop drop;
struct ib_flow_spec_action_handle action;
+ struct ib_flow_spec_action_count flow_count;
};
struct ib_flow_attr {
@@ -2180,6 +2238,24 @@ struct ib_port_pkey_list {
struct list_head pkey_list;
};
+struct ib_counters {
+ struct ib_device *device;
+ struct ib_uobject *uobject;
+ /* num of objects attached */
+ atomic_t usecnt;
+};
+
+enum ib_read_counters_flags {
+ /* prefer read values from driver cache */
+ IB_READ_COUNTERS_ATTR_PREFER_CACHED = 1 << 0,
+};
+
+struct ib_counters_read_attr {
+ u64 *counters_buff;
+ u32 ncounters;
+ u32 flags; /* use enum ib_read_counters_flags */
+};
+
struct uverbs_attr_bundle;
struct ib_device {
@@ -2409,7 +2485,8 @@ struct ib_device {
struct ib_flow * (*create_flow)(struct ib_qp *qp,
struct ib_flow_attr
*flow_attr,
- int domain);
+ int domain,
+ struct ib_udata *udata);
int (*destroy_flow)(struct ib_flow *flow_id);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
@@ -2451,6 +2528,13 @@ struct ib_device {
struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
+ struct ib_counters * (*create_counters)(struct ib_device *device,
+ struct uverbs_attr_bundle *attrs);
+ int (*destroy_counters)(struct ib_counters *counters);
+ int (*read_counters)(struct ib_counters *counters,
+ struct ib_counters_read_attr *counters_read_attr,
+ struct uverbs_attr_bundle *attrs);
+
/**
* rdma netdev operation
*
@@ -3734,6 +3818,20 @@ static inline int ib_check_mr_access(int flags)
return 0;
}
+static inline bool ib_access_writable(int access_flags)
+{
+ /*
+ * We have writable memory backing the MR if any of the following
+ * access flags are set. "Local write" and "remote write" obviously
+ * require write access. "Remote atomic" can do things like fetch and
+ * add, which will modify memory, and "MW bind" can change permissions
+ * by binding a window.
+ */
+ return access_flags &
+ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
+}
+
/**
* ib_check_mr_status: lightweight check of MR status.
* This routine may provide status checks on a selected
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 690934733ba7..c5c1435c129a 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -420,4 +420,7 @@ const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
union ib_gid *dgid);
+struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *cm_id);
+struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res);
+
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 3f4c187e435d..e79229a0cf01 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -2,7 +2,7 @@
#define DEF_RDMA_VT_H
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -167,7 +167,6 @@ struct rvt_driver_params {
int qpn_res_end;
int nports;
int npkeys;
- char cq_name[RVT_CQN_MAX];
int node;
int psn_mask;
int psn_shift;
@@ -347,6 +346,9 @@ struct rvt_driver_provided {
/* Notify driver to restart rc */
void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait);
+
+ /* Get and return CPU to pin CQ processing thread */
+ int (*comp_vect_cpu_lookup)(struct rvt_dev_info *rdi, int comp_vect);
};
struct rvt_dev_info {
@@ -402,7 +404,6 @@ struct rvt_dev_info {
spinlock_t pending_lock; /* protect pending mmap list */
/* CQ */
- struct kthread_worker *worker; /* per device cq worker */
u32 n_cqs_allocated; /* number of CQs allocated for device */
spinlock_t n_cqs_lock; /* protect count of in use cqs */
diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h
index 51fd00b243d0..75dc65c0bfb8 100644
--- a/include/rdma/rdmavt_cq.h
+++ b/include/rdma/rdmavt_cq.h
@@ -8,7 +8,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -80,10 +80,11 @@ struct rvt_cq_wc {
*/
struct rvt_cq {
struct ib_cq ibcq;
- struct kthread_work comptask;
+ struct work_struct comptask;
spinlock_t lock; /* protect changes in this struct */
u8 notify;
u8 triggered;
+ int comp_vector_cpu;
struct rvt_dev_info *rdi;
struct rvt_cq_wc *queue;
struct rvt_mmap_info *ip;
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 89ab88c342b6..1145a4c154b2 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -663,6 +663,7 @@ static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
extern const int ib_rvt_state_ops[];
struct rvt_dev_info;
+int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
void rvt_comm_est(struct rvt_qp *qp);
int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index f3b3e3576f6a..9654d33edd98 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
*/
@@ -12,6 +12,7 @@
#include <linux/kref.h>
#include <linux/completion.h>
#include <linux/sched/task.h>
+#include <uapi/rdma/rdma_netlink.h>
/**
* enum rdma_restrack_type - HW objects to track
@@ -44,6 +45,8 @@ enum rdma_restrack_type {
};
#define RDMA_RESTRACK_HASH_BITS 8
+struct rdma_restrack_entry;
+
/**
* struct rdma_restrack_root - main resource tracking management
* entity, per-device
@@ -57,6 +60,13 @@ struct rdma_restrack_root {
* @hash: global database for all resources per-device
*/
DECLARE_HASHTABLE(hash, RDMA_RESTRACK_HASH_BITS);
+ /**
+ * @fill_res_entry: driver-specific fill function
+ *
+ * Allows rdma drivers to add their own restrack attributes.
+ */
+ int (*fill_res_entry)(struct sk_buff *msg,
+ struct rdma_restrack_entry *entry);
};
/**
@@ -174,4 +184,14 @@ static inline void rdma_restrack_set_task(struct rdma_restrack_entry *res,
res->task = task;
}
+/*
+ * Helper functions for rdma drivers when filling out
+ * nldev driver attributes.
+ */
+int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value);
+int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
+ u32 value);
+int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value);
+int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name,
+ u64 value);
#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 095383a4bd1a..bd6bba3a6e04 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -420,6 +420,17 @@ static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_b
return attr->obj_attr.uobject->object;
}
+static inline struct ib_uobject *uverbs_attr_get_uobject(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
+
+ if (IS_ERR(attr))
+ return ERR_CAST(attr);
+
+ return attr->obj_attr.uobject;
+}
+
static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, const void *from, size_t size)
{
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 7ae177c8e399..4c36af6edd79 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -15,7 +15,7 @@ struct scsi_cmnd;
struct scsi_lun;
struct scsi_sense_hdr;
-typedef unsigned int __bitwise blist_flags_t;
+typedef __u64 __bitwise blist_flags_t;
struct scsi_mode_data {
__u32 length;
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index ea67c32e870e..3fdb322d4c4b 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -6,55 +6,80 @@
*/
/* Only scan LUN 0 */
-#define BLIST_NOLUN ((__force blist_flags_t)(1 << 0))
+#define BLIST_NOLUN ((__force blist_flags_t)(1ULL << 0))
/* Known to have LUNs, force scanning.
* DEPRECATED: Use max_luns=N */
-#define BLIST_FORCELUN ((__force blist_flags_t)(1 << 1))
+#define BLIST_FORCELUN ((__force blist_flags_t)(1ULL << 1))
/* Flag for broken handshaking */
-#define BLIST_BORKEN ((__force blist_flags_t)(1 << 2))
+#define BLIST_BORKEN ((__force blist_flags_t)(1ULL << 2))
/* unlock by special command */
-#define BLIST_KEY ((__force blist_flags_t)(1 << 3))
+#define BLIST_KEY ((__force blist_flags_t)(1ULL << 3))
/* Do not use LUNs in parallel */
-#define BLIST_SINGLELUN ((__force blist_flags_t)(1 << 4))
+#define BLIST_SINGLELUN ((__force blist_flags_t)(1ULL << 4))
/* Buggy Tagged Command Queuing */
-#define BLIST_NOTQ ((__force blist_flags_t)(1 << 5))
+#define BLIST_NOTQ ((__force blist_flags_t)(1ULL << 5))
/* Non consecutive LUN numbering */
-#define BLIST_SPARSELUN ((__force blist_flags_t)(1 << 6))
+#define BLIST_SPARSELUN ((__force blist_flags_t)(1ULL << 6))
/* Avoid LUNS >= 5 */
-#define BLIST_MAX5LUN ((__force blist_flags_t)(1 << 7))
+#define BLIST_MAX5LUN ((__force blist_flags_t)(1ULL << 7))
/* Treat as (removable) CD-ROM */
-#define BLIST_ISROM ((__force blist_flags_t)(1 << 8))
+#define BLIST_ISROM ((__force blist_flags_t)(1ULL << 8))
/* LUNs past 7 on a SCSI-2 device */
-#define BLIST_LARGELUN ((__force blist_flags_t)(1 << 9))
+#define BLIST_LARGELUN ((__force blist_flags_t)(1ULL << 9))
/* override additional length field */
-#define BLIST_INQUIRY_36 ((__force blist_flags_t)(1 << 10))
+#define BLIST_INQUIRY_36 ((__force blist_flags_t)(1ULL << 10))
+#define __BLIST_UNUSED_11 ((__force blist_flags_t)(1ULL << 11))
/* do not do automatic start on add */
-#define BLIST_NOSTARTONADD ((__force blist_flags_t)(1 << 12))
+#define BLIST_NOSTARTONADD ((__force blist_flags_t)(1ULL << 12))
+#define __BLIST_UNUSED_13 ((__force blist_flags_t)(1ULL << 13))
+#define __BLIST_UNUSED_14 ((__force blist_flags_t)(1ULL << 14))
+#define __BLIST_UNUSED_15 ((__force blist_flags_t)(1ULL << 15))
+#define __BLIST_UNUSED_16 ((__force blist_flags_t)(1ULL << 16))
/* try REPORT_LUNS even for SCSI-2 devs (if HBA supports more than 8 LUNs) */
-#define BLIST_REPORTLUN2 ((__force blist_flags_t)(1 << 17))
+#define BLIST_REPORTLUN2 ((__force blist_flags_t)(1ULL << 17))
/* don't try REPORT_LUNS scan (SCSI-3 devs) */
-#define BLIST_NOREPORTLUN ((__force blist_flags_t)(1 << 18))
+#define BLIST_NOREPORTLUN ((__force blist_flags_t)(1ULL << 18))
/* don't use PREVENT-ALLOW commands */
-#define BLIST_NOT_LOCKABLE ((__force blist_flags_t)(1 << 19))
+#define BLIST_NOT_LOCKABLE ((__force blist_flags_t)(1ULL << 19))
/* device is actually for RAID config */
-#define BLIST_NO_ULD_ATTACH ((__force blist_flags_t)(1 << 20))
+#define BLIST_NO_ULD_ATTACH ((__force blist_flags_t)(1ULL << 20))
/* select without ATN */
-#define BLIST_SELECT_NO_ATN ((__force blist_flags_t)(1 << 21))
+#define BLIST_SELECT_NO_ATN ((__force blist_flags_t)(1ULL << 21))
/* retry HARDWARE_ERROR */
-#define BLIST_RETRY_HWERROR ((__force blist_flags_t)(1 << 22))
+#define BLIST_RETRY_HWERROR ((__force blist_flags_t)(1ULL << 22))
/* maximum 512 sector cdb length */
-#define BLIST_MAX_512 ((__force blist_flags_t)(1 << 23))
+#define BLIST_MAX_512 ((__force blist_flags_t)(1ULL << 23))
+#define __BLIST_UNUSED_24 ((__force blist_flags_t)(1ULL << 24))
/* Disable T10 PI (DIF) */
-#define BLIST_NO_DIF ((__force blist_flags_t)(1 << 25))
+#define BLIST_NO_DIF ((__force blist_flags_t)(1ULL << 25))
/* Ignore SBC-3 VPD pages */
-#define BLIST_SKIP_VPD_PAGES ((__force blist_flags_t)(1 << 26))
+#define BLIST_SKIP_VPD_PAGES ((__force blist_flags_t)(1ULL << 26))
+#define __BLIST_UNUSED_27 ((__force blist_flags_t)(1ULL << 27))
/* Attempt to read VPD pages */
-#define BLIST_TRY_VPD_PAGES ((__force blist_flags_t)(1 << 28))
+#define BLIST_TRY_VPD_PAGES ((__force blist_flags_t)(1ULL << 28))
/* don't try to issue RSOC */
-#define BLIST_NO_RSOC ((__force blist_flags_t)(1 << 29))
+#define BLIST_NO_RSOC ((__force blist_flags_t)(1ULL << 29))
/* maximum 1024 sector cdb length */
-#define BLIST_MAX_1024 ((__force blist_flags_t)(1 << 30))
+#define BLIST_MAX_1024 ((__force blist_flags_t)(1ULL << 30))
/* Use UNMAP limit for WRITE SAME */
-#define BLIST_UNMAP_LIMIT_WS ((__force blist_flags_t)(1 << 31))
+#define BLIST_UNMAP_LIMIT_WS ((__force blist_flags_t)(1ULL << 31))
+/* Always retry ABORTED_COMMAND with Internal Target Failure */
+#define BLIST_RETRY_ITF ((__force blist_flags_t)(1ULL << 32))
+/* Always retry ABORTED_COMMAND with ASC 0xc1 */
+#define BLIST_RETRY_ASC_C1 ((__force blist_flags_t)(1ULL << 33))
+
+#define __BLIST_LAST_USED BLIST_RETRY_ASC_C1
+
+#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
+ (__force blist_flags_t) \
+ ((__force __u64)__BLIST_LAST_USED - 1ULL)))
+#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_11 | \
+ __BLIST_UNUSED_13 | \
+ __BLIST_UNUSED_14 | \
+ __BLIST_UNUSED_15 | \
+ __BLIST_UNUSED_16 | \
+ __BLIST_UNUSED_24 | \
+ __BLIST_UNUSED_27 | \
+ __BLIST_HIGH_UNUSED)
#endif
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 9f9f5902af38..922a39f45abc 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -143,6 +143,7 @@ enum se_cmd_flags_table {
SCF_ACK_KREF = 0x00400000,
SCF_USE_CPUID = 0x00800000,
SCF_TASK_ATTR_SET = 0x01000000,
+ SCF_TREAT_READ_AS_NORMAL = 0x02000000,
};
/*
diff --git a/include/trace/events/rseq.h b/include/trace/events/rseq.h
new file mode 100644
index 000000000000..a04a64bc1a00
--- /dev/null
+++ b/include/trace/events/rseq.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rseq
+
+#if !defined(_TRACE_RSEQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RSEQ_H
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+TRACE_EVENT(rseq_update,
+
+ TP_PROTO(struct task_struct *t),
+
+ TP_ARGS(t),
+
+ TP_STRUCT__entry(
+ __field(s32, cpu_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = raw_smp_processor_id();
+ ),
+
+ TP_printk("cpu_id=%d", __entry->cpu_id)
+);
+
+TRACE_EVENT(rseq_ip_fixup,
+
+ TP_PROTO(unsigned long regs_ip, unsigned long start_ip,
+ unsigned long post_commit_offset, unsigned long abort_ip),
+
+ TP_ARGS(regs_ip, start_ip, post_commit_offset, abort_ip),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, regs_ip)
+ __field(unsigned long, start_ip)
+ __field(unsigned long, post_commit_offset)
+ __field(unsigned long, abort_ip)
+ ),
+
+ TP_fast_assign(
+ __entry->regs_ip = regs_ip;
+ __entry->start_ip = start_ip;
+ __entry->post_commit_offset = post_commit_offset;
+ __entry->abort_ip = abort_ip;
+ ),
+
+ TP_printk("regs_ip=0x%lx start_ip=0x%lx post_commit_offset=%lu abort_ip=0x%lx",
+ __entry->regs_ip, __entry->start_ip,
+ __entry->post_commit_offset, __entry->abort_ip)
+);
+
+#endif /* _TRACE_SOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index ed0185945bb2..75846164290e 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -53,6 +53,7 @@ enum {
* is valid.
*/
#define IOCB_FLAG_RESFD (1 << 0)
+#define IOCB_FLAG_IOPRIO (1 << 1)
/* read() from /dev/aio returns these structures. */
struct io_event {
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 04f9bd249094..c35aee9ad4a6 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -147,6 +147,7 @@
#define AUDIT_INTEGRITY_HASH 1803 /* Integrity HASH type */
#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */
#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
+#define AUDIT_INTEGRITY_EVM_XATTR 1806 /* New EVM-covered xattr */
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index 2a4432c7a4b4..e13eec3dfb2f 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
- * Copyright 1997 Transmeta Corporation - All Rights Reserved
+ * Copyright 1997 Transmeta Corporation - All Rights Reserved
+ * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org>
+ * Copyright 2005-2006,2013,2017-2018 Ian Kent <raven@themaw.net>
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
@@ -8,7 +10,6 @@
*
* ----------------------------------------------------------------------- */
-
#ifndef _UAPI_LINUX_AUTO_FS_H
#define _UAPI_LINUX_AUTO_FS_H
@@ -18,13 +19,11 @@
#include <sys/ioctl.h>
#endif /* __KERNEL__ */
+#define AUTOFS_PROTO_VERSION 5
+#define AUTOFS_MIN_PROTO_VERSION 3
+#define AUTOFS_MAX_PROTO_VERSION 5
-/* This file describes autofs v3 */
-#define AUTOFS_PROTO_VERSION 3
-
-/* Range of protocol versions defined */
-#define AUTOFS_MAX_PROTO_VERSION AUTOFS_PROTO_VERSION
-#define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION
+#define AUTOFS_PROTO_SUBVERSION 2
/*
* The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
@@ -76,9 +75,155 @@ enum {
#define AUTOFS_IOC_READY _IO(AUTOFS_IOCTL, AUTOFS_IOC_READY_CMD)
#define AUTOFS_IOC_FAIL _IO(AUTOFS_IOCTL, AUTOFS_IOC_FAIL_CMD)
#define AUTOFS_IOC_CATATONIC _IO(AUTOFS_IOCTL, AUTOFS_IOC_CATATONIC_CMD)
-#define AUTOFS_IOC_PROTOVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOVER_CMD, int)
-#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, compat_ulong_t)
-#define AUTOFS_IOC_SETTIMEOUT _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, unsigned long)
-#define AUTOFS_IOC_EXPIRE _IOR(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_CMD, struct autofs_packet_expire)
+#define AUTOFS_IOC_PROTOVER _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_PROTOVER_CMD, int)
+#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_SETTIMEOUT_CMD, \
+ compat_ulong_t)
+#define AUTOFS_IOC_SETTIMEOUT _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_SETTIMEOUT_CMD, \
+ unsigned long)
+#define AUTOFS_IOC_EXPIRE _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_EXPIRE_CMD, \
+ struct autofs_packet_expire)
+
+/* autofs version 4 and later definitions */
+
+/* Mask for expire behaviour */
+#define AUTOFS_EXP_IMMEDIATE 1
+#define AUTOFS_EXP_LEAVES 2
+
+#define AUTOFS_TYPE_ANY 0U
+#define AUTOFS_TYPE_INDIRECT 1U
+#define AUTOFS_TYPE_DIRECT 2U
+#define AUTOFS_TYPE_OFFSET 4U
+
+static inline void set_autofs_type_indirect(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_INDIRECT;
+}
+
+static inline unsigned int autofs_type_indirect(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_INDIRECT);
+}
+
+static inline void set_autofs_type_direct(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_DIRECT;
+}
+
+static inline unsigned int autofs_type_direct(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_DIRECT);
+}
+
+static inline void set_autofs_type_offset(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_OFFSET;
+}
+
+static inline unsigned int autofs_type_offset(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_OFFSET);
+}
+
+static inline unsigned int autofs_type_trigger(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_DIRECT || type == AUTOFS_TYPE_OFFSET);
+}
+
+/*
+ * This isn't really a type as we use it to say "no type set" to
+ * indicate we want to search for "any" mount in the
+ * autofs_dev_ioctl_ismountpoint() device ioctl function.
+ */
+static inline void set_autofs_type_any(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_ANY;
+}
+
+static inline unsigned int autofs_type_any(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_ANY);
+}
+
+/* Daemon notification packet types */
+enum autofs_notify {
+ NFY_NONE,
+ NFY_MOUNT,
+ NFY_EXPIRE
+};
+
+/* Kernel protocol version 4 packet types */
+
+/* Expire entry (umount request) */
+#define autofs_ptype_expire_multi 2
+
+/* Kernel protocol version 5 packet types */
+
+/* Indirect mount missing and expire requests. */
+#define autofs_ptype_missing_indirect 3
+#define autofs_ptype_expire_indirect 4
+
+/* Direct mount missing and expire requests */
+#define autofs_ptype_missing_direct 5
+#define autofs_ptype_expire_direct 6
+
+/* v4 multi expire (via pipe) */
+struct autofs_packet_expire_multi {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ int len;
+ char name[NAME_MAX+1];
+};
+
+union autofs_packet_union {
+ struct autofs_packet_hdr hdr;
+ struct autofs_packet_missing missing;
+ struct autofs_packet_expire expire;
+ struct autofs_packet_expire_multi expire_multi;
+};
+
+/* autofs v5 common packet struct */
+struct autofs_v5_packet {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ __u32 dev;
+ __u64 ino;
+ __u32 uid;
+ __u32 gid;
+ __u32 pid;
+ __u32 tgid;
+ __u32 len;
+ char name[NAME_MAX+1];
+};
+
+typedef struct autofs_v5_packet autofs_packet_missing_indirect_t;
+typedef struct autofs_v5_packet autofs_packet_expire_indirect_t;
+typedef struct autofs_v5_packet autofs_packet_missing_direct_t;
+typedef struct autofs_v5_packet autofs_packet_expire_direct_t;
+
+union autofs_v5_packet_union {
+ struct autofs_packet_hdr hdr;
+ struct autofs_v5_packet v5_packet;
+ autofs_packet_missing_indirect_t missing_indirect;
+ autofs_packet_expire_indirect_t expire_indirect;
+ autofs_packet_missing_direct_t missing_direct;
+ autofs_packet_expire_direct_t expire_direct;
+};
+
+enum {
+ AUTOFS_IOC_EXPIRE_MULTI_CMD = 0x66, /* AUTOFS_IOC_EXPIRE_CMD + 1 */
+ AUTOFS_IOC_PROTOSUBVER_CMD,
+ AUTOFS_IOC_ASKUMOUNT_CMD = 0x70, /* AUTOFS_DEV_IOCTL_VERSION_CMD - 1 */
+};
+
+#define AUTOFS_IOC_EXPIRE_MULTI _IOW(AUTOFS_IOCTL, \
+ AUTOFS_IOC_EXPIRE_MULTI_CMD, int)
+#define AUTOFS_IOC_PROTOSUBVER _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_PROTOSUBVER_CMD, int)
+#define AUTOFS_IOC_ASKUMOUNT _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_ASKUMOUNT_CMD, int)
#endif /* _UAPI_LINUX_AUTO_FS_H */
diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h
index 1f608e27a06f..d01ef0a0189c 100644
--- a/include/uapi/linux/auto_fs4.h
+++ b/include/uapi/linux/auto_fs4.h
@@ -7,156 +7,9 @@
* option, any later version, incorporated herein by reference.
*/
-#ifndef _LINUX_AUTO_FS4_H
-#define _LINUX_AUTO_FS4_H
+#ifndef _UAPI_LINUX_AUTO_FS4_H
+#define _UAPI_LINUX_AUTO_FS4_H
-/* Include common v3 definitions */
-#include <linux/types.h>
#include <linux/auto_fs.h>
-/* autofs v4 definitions */
-#undef AUTOFS_PROTO_VERSION
-#undef AUTOFS_MIN_PROTO_VERSION
-#undef AUTOFS_MAX_PROTO_VERSION
-
-#define AUTOFS_PROTO_VERSION 5
-#define AUTOFS_MIN_PROTO_VERSION 3
-#define AUTOFS_MAX_PROTO_VERSION 5
-
-#define AUTOFS_PROTO_SUBVERSION 2
-
-/* Mask for expire behaviour */
-#define AUTOFS_EXP_IMMEDIATE 1
-#define AUTOFS_EXP_LEAVES 2
-
-#define AUTOFS_TYPE_ANY 0U
-#define AUTOFS_TYPE_INDIRECT 1U
-#define AUTOFS_TYPE_DIRECT 2U
-#define AUTOFS_TYPE_OFFSET 4U
-
-static inline void set_autofs_type_indirect(unsigned int *type)
-{
- *type = AUTOFS_TYPE_INDIRECT;
-}
-
-static inline unsigned int autofs_type_indirect(unsigned int type)
-{
- return (type == AUTOFS_TYPE_INDIRECT);
-}
-
-static inline void set_autofs_type_direct(unsigned int *type)
-{
- *type = AUTOFS_TYPE_DIRECT;
-}
-
-static inline unsigned int autofs_type_direct(unsigned int type)
-{
- return (type == AUTOFS_TYPE_DIRECT);
-}
-
-static inline void set_autofs_type_offset(unsigned int *type)
-{
- *type = AUTOFS_TYPE_OFFSET;
-}
-
-static inline unsigned int autofs_type_offset(unsigned int type)
-{
- return (type == AUTOFS_TYPE_OFFSET);
-}
-
-static inline unsigned int autofs_type_trigger(unsigned int type)
-{
- return (type == AUTOFS_TYPE_DIRECT || type == AUTOFS_TYPE_OFFSET);
-}
-
-/*
- * This isn't really a type as we use it to say "no type set" to
- * indicate we want to search for "any" mount in the
- * autofs_dev_ioctl_ismountpoint() device ioctl function.
- */
-static inline void set_autofs_type_any(unsigned int *type)
-{
- *type = AUTOFS_TYPE_ANY;
-}
-
-static inline unsigned int autofs_type_any(unsigned int type)
-{
- return (type == AUTOFS_TYPE_ANY);
-}
-
-/* Daemon notification packet types */
-enum autofs_notify {
- NFY_NONE,
- NFY_MOUNT,
- NFY_EXPIRE
-};
-
-/* Kernel protocol version 4 packet types */
-
-/* Expire entry (umount request) */
-#define autofs_ptype_expire_multi 2
-
-/* Kernel protocol version 5 packet types */
-
-/* Indirect mount missing and expire requests. */
-#define autofs_ptype_missing_indirect 3
-#define autofs_ptype_expire_indirect 4
-
-/* Direct mount missing and expire requests */
-#define autofs_ptype_missing_direct 5
-#define autofs_ptype_expire_direct 6
-
-/* v4 multi expire (via pipe) */
-struct autofs_packet_expire_multi {
- struct autofs_packet_hdr hdr;
- autofs_wqt_t wait_queue_token;
- int len;
- char name[NAME_MAX+1];
-};
-
-union autofs_packet_union {
- struct autofs_packet_hdr hdr;
- struct autofs_packet_missing missing;
- struct autofs_packet_expire expire;
- struct autofs_packet_expire_multi expire_multi;
-};
-
-/* autofs v5 common packet struct */
-struct autofs_v5_packet {
- struct autofs_packet_hdr hdr;
- autofs_wqt_t wait_queue_token;
- __u32 dev;
- __u64 ino;
- __u32 uid;
- __u32 gid;
- __u32 pid;
- __u32 tgid;
- __u32 len;
- char name[NAME_MAX+1];
-};
-
-typedef struct autofs_v5_packet autofs_packet_missing_indirect_t;
-typedef struct autofs_v5_packet autofs_packet_expire_indirect_t;
-typedef struct autofs_v5_packet autofs_packet_missing_direct_t;
-typedef struct autofs_v5_packet autofs_packet_expire_direct_t;
-
-union autofs_v5_packet_union {
- struct autofs_packet_hdr hdr;
- struct autofs_v5_packet v5_packet;
- autofs_packet_missing_indirect_t missing_indirect;
- autofs_packet_expire_indirect_t expire_indirect;
- autofs_packet_missing_direct_t missing_direct;
- autofs_packet_expire_direct_t expire_direct;
-};
-
-enum {
- AUTOFS_IOC_EXPIRE_MULTI_CMD = 0x66, /* AUTOFS_IOC_EXPIRE_CMD + 1 */
- AUTOFS_IOC_PROTOSUBVER_CMD,
- AUTOFS_IOC_ASKUMOUNT_CMD = 0x70, /* AUTOFS_DEV_IOCTL_VERSION_CMD - 1 */
-};
-
-#define AUTOFS_IOC_EXPIRE_MULTI _IOW(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_MULTI_CMD, int)
-#define AUTOFS_IOC_PROTOSUBVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOSUBVER_CMD, int)
-#define AUTOFS_IOC_ASKUMOUNT _IOR(AUTOFS_IOCTL, AUTOFS_IOC_ASKUMOUNT_CMD, int)
-
-#endif /* _LINUX_AUTO_FS4_H */
+#endif /* _UAPI_LINUX_AUTO_FS4_H */
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index fa139841ec18..21b9113c69da 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -35,6 +35,6 @@
#define KPF_BALLOON 23
#define KPF_ZERO_PAGE 24
#define KPF_IDLE 25
-
+#define KPF_PGTABLE 26
#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/uapi/linux/ncp.h b/include/uapi/linux/ncp.h
deleted file mode 100644
index ca6f3d42c88f..000000000000
--- a/include/uapi/linux/ncp.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * ncp.h
- *
- * Copyright (C) 1995 by Volker Lendecke
- * Modified for sparc by J.F. Chadima
- * Modified for __constant_ntoh by Frank A. Vorstenbosch
- *
- */
-
-#ifndef _LINUX_NCP_H
-#define _LINUX_NCP_H
-
-#include <linux/types.h>
-
-#define NCP_PTYPE (0x11)
-#define NCP_PORT (0x0451)
-
-#define NCP_ALLOC_SLOT_REQUEST (0x1111)
-#define NCP_REQUEST (0x2222)
-#define NCP_DEALLOC_SLOT_REQUEST (0x5555)
-
-struct ncp_request_header {
- __u16 type;
- __u8 sequence;
- __u8 conn_low;
- __u8 task;
- __u8 conn_high;
- __u8 function;
- __u8 data[0];
-} __attribute__((packed));
-
-#define NCP_REPLY (0x3333)
-#define NCP_WATCHDOG (0x3E3E)
-#define NCP_POSITIVE_ACK (0x9999)
-
-struct ncp_reply_header {
- __u16 type;
- __u8 sequence;
- __u8 conn_low;
- __u8 task;
- __u8 conn_high;
- __u8 completion_code;
- __u8 connection_state;
- __u8 data[0];
-} __attribute__((packed));
-
-#define NCP_VOLNAME_LEN (16)
-#define NCP_NUMBER_OF_VOLUMES (256)
-struct ncp_volume_info {
- __u32 total_blocks;
- __u32 free_blocks;
- __u32 purgeable_blocks;
- __u32 not_yet_purgeable_blocks;
- __u32 total_dir_entries;
- __u32 available_dir_entries;
- __u8 sectors_per_block;
- char volume_name[NCP_VOLNAME_LEN + 1];
-};
-
-#define AR_READ (cpu_to_le16(1))
-#define AR_WRITE (cpu_to_le16(2))
-#define AR_EXCLUSIVE (cpu_to_le16(0x20))
-
-#define NCP_FILE_ID_LEN 6
-
-/* Defines for Name Spaces */
-#define NW_NS_DOS 0
-#define NW_NS_MAC 1
-#define NW_NS_NFS 2
-#define NW_NS_FTAM 3
-#define NW_NS_OS2 4
-
-/* Defines for ReturnInformationMask */
-#define RIM_NAME (cpu_to_le32(1))
-#define RIM_SPACE_ALLOCATED (cpu_to_le32(2))
-#define RIM_ATTRIBUTES (cpu_to_le32(4))
-#define RIM_DATA_SIZE (cpu_to_le32(8))
-#define RIM_TOTAL_SIZE (cpu_to_le32(0x10))
-#define RIM_EXT_ATTR_INFO (cpu_to_le32(0x20))
-#define RIM_ARCHIVE (cpu_to_le32(0x40))
-#define RIM_MODIFY (cpu_to_le32(0x80))
-#define RIM_CREATION (cpu_to_le32(0x100))
-#define RIM_OWNING_NAMESPACE (cpu_to_le32(0x200))
-#define RIM_DIRECTORY (cpu_to_le32(0x400))
-#define RIM_RIGHTS (cpu_to_le32(0x800))
-#define RIM_ALL (cpu_to_le32(0xFFF))
-#define RIM_COMPRESSED_INFO (cpu_to_le32(0x80000000))
-
-/* Defines for NSInfoBitMask */
-#define NSIBM_NFS_NAME 0x0001
-#define NSIBM_NFS_MODE 0x0002
-#define NSIBM_NFS_GID 0x0004
-#define NSIBM_NFS_NLINKS 0x0008
-#define NSIBM_NFS_RDEV 0x0010
-#define NSIBM_NFS_LINK 0x0020
-#define NSIBM_NFS_CREATED 0x0040
-#define NSIBM_NFS_UID 0x0080
-#define NSIBM_NFS_ACSFLAG 0x0100
-#define NSIBM_NFS_MYFLAG 0x0200
-
-/* open/create modes */
-#define OC_MODE_OPEN 0x01
-#define OC_MODE_TRUNCATE 0x02
-#define OC_MODE_REPLACE 0x02
-#define OC_MODE_CREATE 0x08
-
-/* open/create results */
-#define OC_ACTION_NONE 0x00
-#define OC_ACTION_OPEN 0x01
-#define OC_ACTION_CREATE 0x02
-#define OC_ACTION_TRUNCATE 0x04
-#define OC_ACTION_REPLACE 0x04
-
-/* access rights attributes */
-#ifndef AR_READ_ONLY
-#define AR_READ_ONLY 0x0001
-#define AR_WRITE_ONLY 0x0002
-#define AR_DENY_READ 0x0004
-#define AR_DENY_WRITE 0x0008
-#define AR_COMPATIBILITY 0x0010
-#define AR_WRITE_THROUGH 0x0040
-#define AR_OPEN_COMPRESSED 0x0100
-#endif
-
-struct nw_nfs_info {
- __u32 mode;
- __u32 rdev;
-};
-
-struct nw_info_struct {
- __u32 spaceAlloc;
- __le32 attributes;
- __u16 flags;
- __le32 dataStreamSize;
- __le32 totalStreamSize;
- __u16 numberOfStreams;
- __le16 creationTime;
- __le16 creationDate;
- __u32 creatorID;
- __le16 modifyTime;
- __le16 modifyDate;
- __u32 modifierID;
- __le16 lastAccessDate;
- __u16 archiveTime;
- __u16 archiveDate;
- __u32 archiverID;
- __u16 inheritedRightsMask;
- __le32 dirEntNum;
- __le32 DosDirNum;
- __u32 volNumber;
- __u32 EADataSize;
- __u32 EAKeyCount;
- __u32 EAKeySize;
- __u32 NSCreator;
- __u8 nameLen;
- __u8 entryName[256];
- /* libncp may depend on there being nothing after entryName */
-#ifdef __KERNEL__
- struct nw_nfs_info nfs;
-#endif
-} __attribute__((packed));
-
-/* modify mask - use with MODIFY_DOS_INFO structure */
-#define DM_ATTRIBUTES (cpu_to_le32(0x02))
-#define DM_CREATE_DATE (cpu_to_le32(0x04))
-#define DM_CREATE_TIME (cpu_to_le32(0x08))
-#define DM_CREATOR_ID (cpu_to_le32(0x10))
-#define DM_ARCHIVE_DATE (cpu_to_le32(0x20))
-#define DM_ARCHIVE_TIME (cpu_to_le32(0x40))
-#define DM_ARCHIVER_ID (cpu_to_le32(0x80))
-#define DM_MODIFY_DATE (cpu_to_le32(0x0100))
-#define DM_MODIFY_TIME (cpu_to_le32(0x0200))
-#define DM_MODIFIER_ID (cpu_to_le32(0x0400))
-#define DM_LAST_ACCESS_DATE (cpu_to_le32(0x0800))
-#define DM_INHERITED_RIGHTS_MASK (cpu_to_le32(0x1000))
-#define DM_MAXIMUM_SPACE (cpu_to_le32(0x2000))
-
-struct nw_modify_dos_info {
- __le32 attributes;
- __le16 creationDate;
- __le16 creationTime;
- __u32 creatorID;
- __le16 modifyDate;
- __le16 modifyTime;
- __u32 modifierID;
- __u16 archiveDate;
- __u16 archiveTime;
- __u32 archiverID;
- __le16 lastAccessDate;
- __u16 inheritanceGrantMask;
- __u16 inheritanceRevokeMask;
- __u32 maximumSpace;
-} __attribute__((packed));
-
-struct nw_search_sequence {
- __u8 volNumber;
- __u32 dirBase;
- __u32 sequence;
-} __attribute__((packed));
-
-#endif /* _LINUX_NCP_H */
diff --git a/include/uapi/linux/ncp_fs.h b/include/uapi/linux/ncp_fs.h
deleted file mode 100644
index e76a44229d2f..000000000000
--- a/include/uapi/linux/ncp_fs.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * ncp_fs.h
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- *
- */
-
-#ifndef _LINUX_NCP_FS_H
-#define _LINUX_NCP_FS_H
-
-#include <linux/fs.h>
-#include <linux/in.h>
-#include <linux/types.h>
-#include <linux/magic.h>
-
-#include <linux/ipx.h>
-#include <linux/ncp_no.h>
-
-/*
- * ioctl commands
- */
-
-struct ncp_ioctl_request {
- unsigned int function;
- unsigned int size;
- char __user *data;
-};
-
-struct ncp_fs_info {
- int version;
- struct sockaddr_ipx addr;
- __kernel_uid_t mounted_uid;
- int connection; /* Connection number the server assigned us */
- int buffer_size; /* The negotiated buffer size, to be
- used for read/write requests! */
-
- int volume_number;
- __le32 directory_id;
-};
-
-struct ncp_fs_info_v2 {
- int version;
- unsigned long mounted_uid;
- unsigned int connection;
- unsigned int buffer_size;
-
- unsigned int volume_number;
- __le32 directory_id;
-
- __u32 dummy1;
- __u32 dummy2;
- __u32 dummy3;
-};
-
-struct ncp_sign_init
-{
- char sign_root[8];
- char sign_last[16];
-};
-
-struct ncp_lock_ioctl
-{
-#define NCP_LOCK_LOG 0
-#define NCP_LOCK_SH 1
-#define NCP_LOCK_EX 2
-#define NCP_LOCK_CLEAR 256
- int cmd;
- int origin;
- unsigned int offset;
- unsigned int length;
-#define NCP_LOCK_DEFAULT_TIMEOUT 18
-#define NCP_LOCK_MAX_TIMEOUT 180
- int timeout;
-};
-
-struct ncp_setroot_ioctl
-{
- int volNumber;
- int namespace;
- __le32 dirEntNum;
-};
-
-struct ncp_objectname_ioctl
-{
-#define NCP_AUTH_NONE 0x00
-#define NCP_AUTH_BIND 0x31
-#define NCP_AUTH_NDS 0x32
- int auth_type;
- size_t object_name_len;
- void __user * object_name; /* a userspace data, in most cases user name */
-};
-
-struct ncp_privatedata_ioctl
-{
- size_t len;
- void __user * data; /* ~1000 for NDS */
-};
-
-/* NLS charsets by ioctl */
-#define NCP_IOCSNAME_LEN 20
-struct ncp_nls_ioctl
-{
- unsigned char codepage[NCP_IOCSNAME_LEN+1];
- unsigned char iocharset[NCP_IOCSNAME_LEN+1];
-};
-
-#define NCP_IOC_NCPREQUEST _IOR('n', 1, struct ncp_ioctl_request)
-#define NCP_IOC_GETMOUNTUID _IOW('n', 2, __kernel_old_uid_t)
-#define NCP_IOC_GETMOUNTUID2 _IOW('n', 2, unsigned long)
-
-#define NCP_IOC_CONN_LOGGED_IN _IO('n', 3)
-
-#define NCP_GET_FS_INFO_VERSION (1)
-#define NCP_IOC_GET_FS_INFO _IOWR('n', 4, struct ncp_fs_info)
-#define NCP_GET_FS_INFO_VERSION_V2 (2)
-#define NCP_IOC_GET_FS_INFO_V2 _IOWR('n', 4, struct ncp_fs_info_v2)
-
-#define NCP_IOC_SIGN_INIT _IOR('n', 5, struct ncp_sign_init)
-#define NCP_IOC_SIGN_WANTED _IOR('n', 6, int)
-#define NCP_IOC_SET_SIGN_WANTED _IOW('n', 6, int)
-
-#define NCP_IOC_LOCKUNLOCK _IOR('n', 7, struct ncp_lock_ioctl)
-
-#define NCP_IOC_GETROOT _IOW('n', 8, struct ncp_setroot_ioctl)
-#define NCP_IOC_SETROOT _IOR('n', 8, struct ncp_setroot_ioctl)
-
-#define NCP_IOC_GETOBJECTNAME _IOWR('n', 9, struct ncp_objectname_ioctl)
-#define NCP_IOC_SETOBJECTNAME _IOR('n', 9, struct ncp_objectname_ioctl)
-#define NCP_IOC_GETPRIVATEDATA _IOWR('n', 10, struct ncp_privatedata_ioctl)
-#define NCP_IOC_SETPRIVATEDATA _IOR('n', 10, struct ncp_privatedata_ioctl)
-
-#define NCP_IOC_GETCHARSETS _IOWR('n', 11, struct ncp_nls_ioctl)
-#define NCP_IOC_SETCHARSETS _IOR('n', 11, struct ncp_nls_ioctl)
-
-#define NCP_IOC_GETDENTRYTTL _IOW('n', 12, __u32)
-#define NCP_IOC_SETDENTRYTTL _IOR('n', 12, __u32)
-
-/*
- * The packet size to allocate. One page should be enough.
- */
-#define NCP_PACKET_SIZE 4070
-
-#define NCP_MAXPATHLEN 255
-#define NCP_MAXNAMELEN 14
-
-#endif /* _LINUX_NCP_FS_H */
diff --git a/include/uapi/linux/ncp_mount.h b/include/uapi/linux/ncp_mount.h
deleted file mode 100644
index 9bdbcd68c329..000000000000
--- a/include/uapi/linux/ncp_mount.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * ncp_mount.h
- *
- * Copyright (C) 1995, 1996 by Volker Lendecke
- *
- */
-
-#ifndef _LINUX_NCP_MOUNT_H
-#define _LINUX_NCP_MOUNT_H
-
-#include <linux/types.h>
-#include <linux/ncp.h>
-
-#define NCP_MOUNT_VERSION 3 /* Binary */
-
-/* Values for flags */
-#define NCP_MOUNT_SOFT 0x0001
-#define NCP_MOUNT_INTR 0x0002
-#define NCP_MOUNT_STRONG 0x0004 /* enable delete/rename of r/o files */
-#define NCP_MOUNT_NO_OS2 0x0008 /* do not use OS/2 (LONG) namespace */
-#define NCP_MOUNT_NO_NFS 0x0010 /* do not use NFS namespace */
-#define NCP_MOUNT_EXTRAS 0x0020
-#define NCP_MOUNT_SYMLINKS 0x0040 /* enable symlinks */
-#define NCP_MOUNT_NFS_EXTRAS 0x0080 /* Enable use of NFS NS meta-info */
-
-struct ncp_mount_data {
- int version;
- unsigned int ncp_fd; /* The socket to the ncp port */
- __kernel_uid_t mounted_uid; /* Who may umount() this filesystem? */
- __kernel_pid_t wdog_pid; /* Who cares for our watchdog packets? */
-
- unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
- unsigned int time_out; /* How long should I wait after
- sending a NCP request? */
- unsigned int retry_count; /* And how often should I retry? */
- unsigned int flags;
-
- __kernel_uid_t uid;
- __kernel_gid_t gid;
- __kernel_mode_t file_mode;
- __kernel_mode_t dir_mode;
-};
-
-#define NCP_MOUNT_VERSION_V4 (4) /* Binary or text */
-
-struct ncp_mount_data_v4 {
- int version;
- unsigned long flags; /* NCP_MOUNT_* flags */
- /* MIPS uses long __kernel_uid_t, but... */
- /* we neever pass -1, so it is safe */
- unsigned long mounted_uid; /* Who may umount() this filesystem? */
- /* MIPS uses long __kernel_pid_t */
- long wdog_pid; /* Who cares for our watchdog packets? */
-
- unsigned int ncp_fd; /* The socket to the ncp port */
- unsigned int time_out; /* How long should I wait after
- sending a NCP request? */
- unsigned int retry_count; /* And how often should I retry? */
-
- /* MIPS uses long __kernel_uid_t... */
- /* we never pass -1, so it is safe */
- unsigned long uid;
- unsigned long gid;
- /* MIPS uses unsigned long __kernel_mode_t */
- unsigned long file_mode;
- unsigned long dir_mode;
-};
-
-#define NCP_MOUNT_VERSION_V5 (5) /* Text only */
-
-#endif
diff --git a/include/uapi/linux/ncp_no.h b/include/uapi/linux/ncp_no.h
deleted file mode 100644
index 654d7c7f5d92..000000000000
--- a/include/uapi/linux/ncp_no.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _NCP_NO
-#define _NCP_NO
-
-/* these define the attribute byte as seen by NCP */
-#define aRONLY (__cpu_to_le32(1))
-#define aHIDDEN (__cpu_to_le32(2))
-#define aSYSTEM (__cpu_to_le32(4))
-#define aEXECUTE (__cpu_to_le32(8))
-#define aDIR (__cpu_to_le32(0x10))
-#define aARCH (__cpu_to_le32(0x20))
-#define aSHARED (__cpu_to_le32(0x80))
-#define aDONTSUBALLOCATE (__cpu_to_le32(1L<<11))
-#define aTRANSACTIONAL (__cpu_to_le32(1L<<12))
-#define aPURGE (__cpu_to_le32(1L<<16))
-#define aRENAMEINHIBIT (__cpu_to_le32(1L<<17))
-#define aDELETEINHIBIT (__cpu_to_le32(1L<<18))
-#define aDONTCOMPRESS (__cpu_to_le32(1L<<27))
-
-#endif /* _NCP_NO */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 83ade9b5cf95..4da87e2ef8a8 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -657,6 +657,11 @@
#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+#define PCI_EXP_LNKCTL2_TLS 0x000f
+#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
+#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
@@ -983,6 +988,7 @@
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
#define PCI_EXP_DPC_CTL 6 /* DPC control */
+#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
new file mode 100644
index 000000000000..d620fa43756c
--- /dev/null
+++ b/include/uapi/linux/rseq.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_RSEQ_H
+#define _UAPI_LINUX_RSEQ_H
+
+/*
+ * linux/rseq.h
+ *
+ * Restartable sequences system call API
+ *
+ * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifdef __KERNEL__
+# include <linux/types.h>
+#else
+# include <stdint.h>
+#endif
+
+#include <linux/types_32_64.h>
+
+enum rseq_cpu_id_state {
+ RSEQ_CPU_ID_UNINITIALIZED = -1,
+ RSEQ_CPU_ID_REGISTRATION_FAILED = -2,
+};
+
+enum rseq_flags {
+ RSEQ_FLAG_UNREGISTER = (1 << 0),
+};
+
+enum rseq_cs_flags_bit {
+ RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0,
+ RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1,
+ RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2,
+};
+
+enum rseq_cs_flags {
+ RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT =
+ (1U << RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT),
+ RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL =
+ (1U << RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT),
+ RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE =
+ (1U << RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT),
+};
+
+/*
+ * struct rseq_cs is aligned on 4 * 8 bytes to ensure it is always
+ * contained within a single cache-line. It is usually declared as
+ * link-time constant data.
+ */
+struct rseq_cs {
+ /* Version of this structure. */
+ __u32 version;
+ /* enum rseq_cs_flags */
+ __u32 flags;
+ LINUX_FIELD_u32_u64(start_ip);
+ /* Offset from start_ip. */
+ LINUX_FIELD_u32_u64(post_commit_offset);
+ LINUX_FIELD_u32_u64(abort_ip);
+} __attribute__((aligned(4 * sizeof(__u64))));
+
+/*
+ * struct rseq is aligned on 4 * 8 bytes to ensure it is always
+ * contained within a single cache-line.
+ *
+ * A single struct rseq per thread is allowed.
+ */
+struct rseq {
+ /*
+ * Restartable sequences cpu_id_start field. Updated by the
+ * kernel, and read by user-space with single-copy atomicity
+ * semantics. Aligned on 32-bit. Always contains a value in the
+ * range of possible CPUs, although the value may not be the
+ * actual current CPU (e.g. if rseq is not initialized). This
+ * CPU number value should always be compared against the value
+ * of the cpu_id field before performing a rseq commit or
+ * returning a value read from a data structure indexed using
+ * the cpu_id_start value.
+ */
+ __u32 cpu_id_start;
+ /*
+ * Restartable sequences cpu_id field. Updated by the kernel,
+ * and read by user-space with single-copy atomicity semantics.
+ * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and
+ * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the
+ * former means "rseq uninitialized", and latter means "rseq
+ * initialization failed". This value is meant to be read within
+ * rseq critical sections and compared with the cpu_id_start
+ * value previously read, before performing the commit instruction,
+ * or read and compared with the cpu_id_start value before returning
+ * a value loaded from a data structure indexed using the
+ * cpu_id_start value.
+ */
+ __u32 cpu_id;
+ /*
+ * Restartable sequences rseq_cs field.
+ *
+ * Contains NULL when no critical section is active for the current
+ * thread, or holds a pointer to the currently active struct rseq_cs.
+ *
+ * Updated by user-space, which sets the address of the currently
+ * active rseq_cs at the beginning of assembly instruction sequence
+ * block, and set to NULL by the kernel when it restarts an assembly
+ * instruction sequence block, as well as when the kernel detects that
+ * it is preempting or delivering a signal outside of the range
+ * targeted by the rseq_cs. Also needs to be set to NULL by user-space
+ * before reclaiming memory that contains the targeted struct rseq_cs.
+ *
+ * Read and set by the kernel with single-copy atomicity semantics.
+ * Set by user-space with single-copy atomicity semantics. Aligned
+ * on 64-bit.
+ */
+ LINUX_FIELD_u32_u64(rseq_cs);
+ /*
+ * - RSEQ_DISABLE flag:
+ *
+ * Fallback fast-track flag for single-stepping.
+ * Set by user-space if lack of progress is detected.
+ * Cleared by user-space after rseq finish.
+ * Read by the kernel.
+ * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
+ * Inhibit instruction sequence block restart and event
+ * counter increment on preemption for this thread.
+ * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
+ * Inhibit instruction sequence block restart and event
+ * counter increment on signal delivery for this thread.
+ * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
+ * Inhibit instruction sequence block restart and event
+ * counter increment on migration for this thread.
+ */
+ __u32 flags;
+} __attribute__((aligned(4 * sizeof(__u64))));
+
+#endif /* _UAPI_LINUX_RSEQ_H */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 0be80f72646b..6e299349b158 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -9,21 +9,22 @@
#define TCMU_VERSION "2.0"
-/*
+/**
+ * DOC: Ring Design
* Ring Design
* -----------
*
* The mmaped area is divided into three parts:
- * 1) The mailbox (struct tcmu_mailbox, below)
- * 2) The command ring
- * 3) Everything beyond the command ring (data)
+ * 1) The mailbox (struct tcmu_mailbox, below);
+ * 2) The command ring;
+ * 3) Everything beyond the command ring (data).
*
* The mailbox tells userspace the offset of the command ring from the
* start of the shared memory region, and how big the command ring is.
*
* The kernel passes SCSI commands to userspace by putting a struct
* tcmu_cmd_entry in the ring, updating mailbox->cmd_head, and poking
- * userspace via uio's interrupt mechanism.
+ * userspace via UIO's interrupt mechanism.
*
* tcmu_cmd_entry contains a header. If the header type is PAD,
* userspace should skip hdr->length bytes (mod cmdr_size) to find the
diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h
new file mode 100644
index 000000000000..0a87ace34a57
--- /dev/null
+++ b/include/uapi/linux/types_32_64.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_TYPES_32_64_H
+#define _UAPI_LINUX_TYPES_32_64_H
+
+/*
+ * linux/types_32_64.h
+ *
+ * Integer type declaration for pointers across 32-bit and 64-bit systems.
+ *
+ * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifdef __KERNEL__
+# include <linux/types.h>
+#else
+# include <stdint.h>
+#endif
+
+#include <asm/byteorder.h>
+
+#ifdef __BYTE_ORDER
+# if (__BYTE_ORDER == __BIG_ENDIAN)
+# define LINUX_BYTE_ORDER_BIG_ENDIAN
+# else
+# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
+# endif
+#else
+# ifdef __BIG_ENDIAN
+# define LINUX_BYTE_ORDER_BIG_ENDIAN
+# else
+# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
+# endif
+#endif
+
+#ifdef __LP64__
+# define LINUX_FIELD_u32_u64(field) __u64 field
+# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) field = (intptr_t)v
+#else
+# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN
+# define LINUX_FIELD_u32_u64(field) __u32 field ## _padding, field
+# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
+ field ## _padding = 0, field = (intptr_t)v
+# else
+# define LINUX_FIELD_u32_u64(field) __u32 field, field ## _padding
+# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
+ field = (intptr_t)v, field ## _padding = 0
+# endif
+#endif
+
+#endif /* _UAPI_LINUX_TYPES_32_64_H */
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 83e3890eef20..888ac5975a6c 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -55,6 +55,7 @@ enum uverbs_default_objects {
UVERBS_OBJECT_WQ,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_OBJECT_DM,
+ UVERBS_OBJECT_COUNTERS,
};
enum {
@@ -131,4 +132,24 @@ enum uverbs_methods_mr {
UVERBS_METHOD_DM_MR_REG,
};
+enum uverbs_attrs_create_counters_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
+};
+
+enum uverbs_attrs_destroy_counters_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_COUNTERS_HANDLE,
+};
+
+enum uverbs_attrs_read_counters_cmd_attr_ids {
+ UVERBS_ATTR_READ_COUNTERS_HANDLE,
+ UVERBS_ATTR_READ_COUNTERS_BUFF,
+ UVERBS_ATTR_READ_COUNTERS_FLAGS,
+};
+
+enum uverbs_methods_actions_counters_ops {
+ UVERBS_METHOD_COUNTERS_CREATE,
+ UVERBS_METHOD_COUNTERS_DESTROY,
+ UVERBS_METHOD_COUNTERS_READ,
+};
+
#endif
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 6aeb03315b0b..4f9991de8e3a 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -998,6 +998,19 @@ struct ib_uverbs_flow_spec_action_handle {
__u32 reserved1;
};
+struct ib_uverbs_flow_spec_action_count {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ __u32 handle;
+ __u32 reserved1;
+};
+
struct ib_uverbs_flow_tunnel_filter {
__be32 tunnel_id;
};
@@ -1033,6 +1046,56 @@ struct ib_uverbs_flow_spec_esp {
struct ib_uverbs_flow_spec_esp_filter mask;
};
+struct ib_uverbs_flow_gre_filter {
+ /* c_ks_res0_ver field is bits 0-15 in offset 0 of a standard GRE header:
+ * bit 0 - C - checksum bit.
+ * bit 1 - reserved. set to 0.
+ * bit 2 - key bit.
+ * bit 3 - sequence number bit.
+ * bits 4:12 - reserved. set to 0.
+ * bits 13:15 - GRE version.
+ */
+ __be16 c_ks_res0_ver;
+ __be16 protocol;
+ __be32 key;
+};
+
+struct ib_uverbs_flow_spec_gre {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_gre_filter val;
+ struct ib_uverbs_flow_gre_filter mask;
+};
+
+struct ib_uverbs_flow_mpls_filter {
+ /* The field includes the entire MPLS label:
+ * bits 0:19 - label field.
+ * bits 20:22 - traffic class field.
+ * bits 23 - bottom of stack bit.
+ * bits 24:31 - ttl field.
+ */
+ __be32 label;
+};
+
+struct ib_uverbs_flow_spec_mpls {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_mpls_filter val;
+ struct ib_uverbs_flow_mpls_filter mask;
+};
+
struct ib_uverbs_flow_attr {
__u32 type;
__u16 size;
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index fdaf00e20649..8daec1fa49cf 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <linux/if_ether.h> /* For ETH_ALEN. */
+#include <rdma/ib_user_ioctl_verbs.h>
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
@@ -163,7 +164,7 @@ struct mlx5_ib_rss_caps {
enum mlx5_ib_cqe_comp_res_format {
MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
- MLX5_IB_CQE_RES_RESERVED = 1 << 2,
+ MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
};
struct mlx5_ib_cqe_comp_caps {
@@ -233,7 +234,9 @@ enum mlx5_ib_query_dev_resp_flags {
enum mlx5_ib_tunnel_offloads {
MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0,
MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1,
- MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2
+ MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
};
struct mlx5_ib_query_device_resp {
@@ -441,4 +444,27 @@ enum {
enum {
MLX5_IB_CLOCK_INFO_V1 = 0,
};
+
+struct mlx5_ib_flow_counters_desc {
+ __u32 description;
+ __u32 index;
+};
+
+struct mlx5_ib_flow_counters_data {
+ RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
+ __u32 ncounters;
+ __u32 reserved;
+};
+
+struct mlx5_ib_create_flow {
+ __u32 ncounters_data;
+ __u32 reserved;
+ /*
+ * Following are counters data based on ncounters_data, each
+ * entry in the data[] should match a corresponding counter object
+ * that was pointed by a counters spec upon the flow creation
+ */
+ struct mlx5_ib_flow_counters_data data[];
+};
+
#endif /* MLX5_ABI_USER_H */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 0ce0943fc808..edba6351ac13 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -249,10 +249,22 @@ enum rdma_nldev_command {
RDMA_NLDEV_NUM_OPS
};
+enum {
+ RDMA_NLDEV_ATTR_ENTRY_STRLEN = 16,
+};
+
+enum rdma_nldev_print_type {
+ RDMA_NLDEV_PRINT_TYPE_UNSPEC,
+ RDMA_NLDEV_PRINT_TYPE_HEX,
+};
+
enum rdma_nldev_attr {
/* don't change the order or add anything between, this is ABI! */
RDMA_NLDEV_ATTR_UNSPEC,
+ /* Pad attribute for 64b alignment */
+ RDMA_NLDEV_ATTR_PAD = RDMA_NLDEV_ATTR_UNSPEC,
+
/* Identifier for ib_device */
RDMA_NLDEV_ATTR_DEV_INDEX, /* u32 */
@@ -387,7 +399,6 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_RES_PD_ENTRY, /* nested table */
RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, /* u32 */
RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, /* u32 */
-
/*
* Provides logical name and index of netdevice which is
* connected to physical port. This information is relevant
@@ -400,7 +411,24 @@ enum rdma_nldev_attr {
*/
RDMA_NLDEV_ATTR_NDEV_INDEX, /* u32 */
RDMA_NLDEV_ATTR_NDEV_NAME, /* string */
+ /*
+ * driver-specific attributes.
+ */
+ RDMA_NLDEV_ATTR_DRIVER, /* nested table */
+ RDMA_NLDEV_ATTR_DRIVER_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_DRIVER_STRING, /* string */
+ /*
+ * u8 values from enum rdma_nldev_print_type
+ */
+ RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_DRIVER_S32, /* s32 */
+ RDMA_NLDEV_ATTR_DRIVER_U32, /* u32 */
+ RDMA_NLDEV_ATTR_DRIVER_S64, /* s64 */
+ RDMA_NLDEV_ATTR_DRIVER_U64, /* u64 */
+ /*
+ * Always the end
+ */
RDMA_NLDEV_ATTR_MAX
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index 39d3e7b8e993..d2029556083e 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -89,6 +89,15 @@ struct privcmd_dm_op {
const struct privcmd_dm_op_buf __user *ubufs;
};
+struct privcmd_mmap_resource {
+ domid_t dom;
+ __u32 type;
+ __u32 id;
+ __u32 idx;
+ __u64 num;
+ __u64 addr;
+};
+
/*
* @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t
@@ -114,5 +123,7 @@ struct privcmd_dm_op {
_IOC(_IOC_NONE, 'P', 5, sizeof(struct privcmd_dm_op))
#define IOCTL_PRIVCMD_RESTRICT \
_IOC(_IOC_NONE, 'P', 6, sizeof(domid_t))
+#define IOCTL_PRIVCMD_MMAP_RESOURCE \
+ _IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource))
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 583dd93b3016..4c5751c26f87 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -265,4 +265,70 @@ struct xen_remove_from_physmap {
};
DEFINE_GUEST_HANDLE_STRUCT(xen_remove_from_physmap);
+/*
+ * Get the pages for a particular guest resource, so that they can be
+ * mapped directly by a tools domain.
+ */
+#define XENMEM_acquire_resource 28
+struct xen_mem_acquire_resource {
+ /* IN - The domain whose resource is to be mapped */
+ domid_t domid;
+ /* IN - the type of resource */
+ uint16_t type;
+
+#define XENMEM_resource_ioreq_server 0
+#define XENMEM_resource_grant_table 1
+
+ /*
+ * IN - a type-specific resource identifier, which must be zero
+ * unless stated otherwise.
+ *
+ * type == XENMEM_resource_ioreq_server -> id == ioreq server id
+ * type == XENMEM_resource_grant_table -> id defined below
+ */
+ uint32_t id;
+
+#define XENMEM_resource_grant_table_id_shared 0
+#define XENMEM_resource_grant_table_id_status 1
+
+ /* IN/OUT - As an IN parameter number of frames of the resource
+ * to be mapped. However, if the specified value is 0 and
+ * frame_list is NULL then this field will be set to the
+ * maximum value supported by the implementation on return.
+ */
+ uint32_t nr_frames;
+ /*
+ * OUT - Must be zero on entry. On return this may contain a bitwise
+ * OR of the following values.
+ */
+ uint32_t flags;
+
+ /* The resource pages have been assigned to the calling domain */
+#define _XENMEM_rsrc_acq_caller_owned 0
+#define XENMEM_rsrc_acq_caller_owned (1u << _XENMEM_rsrc_acq_caller_owned)
+
+ /*
+ * IN - the index of the initial frame to be mapped. This parameter
+ * is ignored if nr_frames is 0.
+ */
+ uint64_t frame;
+
+#define XENMEM_resource_ioreq_server_frame_bufioreq 0
+#define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
+
+ /*
+ * IN/OUT - If the tools domain is PV then, upon return, frame_list
+ * will be populated with the MFNs of the resource.
+ * If the tools domain is HVM then it is expected that, on
+ * entry, frame_list will be populated with a list of GFNs
+ * that will be mapped to the MFNs of the resource.
+ * If -EIO is returned then the frame_list has only been
+ * partially mapped and it is up to the caller to unmap all
+ * the GFNs.
+ * This parameter may be NULL if nr_frames is 0.
+ */
+ GUEST_HANDLE(xen_pfn_t) frame_list;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_mem_acquire_resource);
+
#endif /* __XEN_PUBLIC_MEMORY_H__ */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 4f4830ef8f93..8bfb242f433e 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -265,9 +265,10 @@
*
* PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
*/
-#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
-#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
-#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
+#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
+#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
+#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
+#define MMU_PT_UPDATE_NO_TRANSLATE 3 /* checked '*ptr = val'. ptr is MA. */
/*
* MMU EXTENDED OPERATIONS
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index fd23e42c6024..fd18c974a619 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -63,7 +63,7 @@ static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
struct vm_area_struct;
/*
- * xen_remap_domain_gfn_array() - map an array of foreign frames
+ * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
* @vma: VMA to map the pages into
* @addr: Address at which to map the pages
* @gfn: Array of GFNs to map
@@ -86,6 +86,28 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned domid,
struct page **pages);
+/*
+ * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
+ * @vma: VMA to map the pages into
+ * @addr: Address at which to map the pages
+ * @mfn: Array of MFNs to map
+ * @nr: Number entries in the MFN array
+ * @err_ptr: Returns per-MFN error status.
+ * @prot: page protection mask
+ * @domid: Domain owning the pages
+ * @pages: Array of pages if this domain has an auto-translated physmap
+ *
+ * @mfn and @err_ptr may point to the same buffer, the MFNs will be
+ * overwritten by the error codes after they are mapped.
+ *
+ * Returns the number of successfully mapped frames, or a -ve error
+ * code.
+ */
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr, xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid, struct page **pages);
+
/* xen_remap_domain_gfn_range() - map a range of foreign frames
* @vma: VMA to map the pages into
* @addr: Address at which to map the pages
diff --git a/init/Kconfig b/init/Kconfig
index 22ca30f6a6bd..d2b8b2ea097e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1428,6 +1428,29 @@ config ARCH_HAS_MEMBARRIER_CALLBACKS
config ARCH_HAS_MEMBARRIER_SYNC_CORE
bool
+config RSEQ
+ bool "Enable rseq() system call" if EXPERT
+ default y
+ depends on HAVE_RSEQ
+ select MEMBARRIER
+ help
+ Enable the restartable sequences system call. It provides a
+ user-space cache for the current CPU number value, which
+ speeds up getting the current CPU number from user-space,
+ as well as an ABI to speed up user-space operations on
+ per-CPU data.
+
+ If unsure, say Y.
+
+config DEBUG_RSEQ
+ default n
+ bool "Enabled debugging of rseq() system call" if EXPERT
+ depends on RSEQ && DEBUG_KERNEL
+ help
+ Enable extra debugging checks for the rseq system call.
+
+ If unsure, say N.
+
config EMBEDDED
bool "Embedded system"
option allnoconfig_y
diff --git a/kernel/Makefile b/kernel/Makefile
index f85ae5dfa474..d2001624fe7a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -112,7 +112,9 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
obj-$(CONFIG_TORTURE_TEST) += torture.o
-obj-$(CONFIG_HAS_IOMEM) += memremap.o
+obj-$(CONFIG_HAS_IOMEM) += iomem.o
+obj-$(CONFIG_ZONE_DEVICE) += memremap.o
+obj-$(CONFIG_RSEQ) += rseq.o
$(obj)/configs.o: $(obj)/config_data.h
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index f7674d676889..b66aced5e8c2 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -460,6 +460,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_hwpoison);
#endif
VMCOREINFO_NUMBER(PG_head_mask);
+#define PAGE_BUDDY_MAPCOUNT_VALUE (~PG_buddy)
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
#ifdef CONFIG_HUGETLB_PAGE
VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
diff --git a/kernel/fork.c b/kernel/fork.c
index 80b48a8fb47b..08c6e5e217a0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -899,6 +899,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->pinned_vm = 0;
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
+ spin_lock_init(&mm->arg_lock);
mm_init_cpumask(mm);
mm_init_aio(mm);
mm_init_owner(mm, p);
@@ -1899,6 +1900,8 @@ static __latent_entropy struct task_struct *copy_process(
*/
copy_seccomp(p);
+ rseq_fork(p, clone_flags);
+
/*
* Process group and session signals need to be delivered to just the
* parent before the fork or both the parent and the child after the
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 751593ed7c0b..32b479468e4d 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -44,6 +44,7 @@ int __read_mostly sysctl_hung_task_warnings = 10;
static int __read_mostly did_panic;
static bool hung_task_show_lock;
+static bool hung_task_call_panic;
static struct task_struct *watchdog_task;
@@ -127,10 +128,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
touch_nmi_watchdog();
if (sysctl_hung_task_panic) {
- if (hung_task_show_lock)
- debug_show_all_locks();
- trigger_all_cpu_backtrace();
- panic("hung_task: blocked tasks");
+ hung_task_show_lock = true;
+ hung_task_call_panic = true;
}
}
@@ -193,6 +192,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
rcu_read_unlock();
if (hung_task_show_lock)
debug_show_all_locks();
+ if (hung_task_call_panic) {
+ trigger_all_cpu_backtrace();
+ panic("hung_task: blocked tasks");
+ }
}
static long hung_timeout_jiffies(unsigned long last_checked,
diff --git a/kernel/iomem.c b/kernel/iomem.c
new file mode 100644
index 000000000000..f7525e14ebc6
--- /dev/null
+++ b/kernel/iomem.c
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+
+#ifndef ioremap_cache
+/* temporary while we convert existing ioremap_cache users to memremap */
+__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
+{
+ return ioremap(offset, size);
+}
+#endif
+
+#ifndef arch_memremap_wb
+static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
+{
+ return (__force void *)ioremap_cache(offset, size);
+}
+#endif
+
+#ifndef arch_memremap_can_ram_remap
+static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags)
+{
+ return true;
+}
+#endif
+
+static void *try_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags)
+{
+ unsigned long pfn = PHYS_PFN(offset);
+
+ /* In the simple case just return the existing linear address */
+ if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
+ arch_memremap_can_ram_remap(offset, size, flags))
+ return __va(offset);
+
+ return NULL; /* fallback to arch_memremap_wb */
+}
+
+/**
+ * memremap() - remap an iomem_resource as cacheable memory
+ * @offset: iomem resource start address
+ * @size: size of remap
+ * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
+ * MEMREMAP_ENC, MEMREMAP_DEC
+ *
+ * memremap() is "ioremap" for cases where it is known that the resource
+ * being mapped does not have i/o side effects and the __iomem
+ * annotation is not applicable. In the case of multiple flags, the different
+ * mapping types will be attempted in the order listed below until one of
+ * them succeeds.
+ *
+ * MEMREMAP_WB - matches the default mapping for System RAM on
+ * the architecture. This is usually a read-allocate write-back cache.
+ * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
+ * memremap() will bypass establishing a new mapping and instead return
+ * a pointer into the direct map.
+ *
+ * MEMREMAP_WT - establish a mapping whereby writes either bypass the
+ * cache or are written through to memory and never exist in a
+ * cache-dirty state with respect to program visibility. Attempts to
+ * map System RAM with this mapping type will fail.
+ *
+ * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
+ * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
+ * uncached. Attempts to map System RAM with this mapping type will fail.
+ */
+void *memremap(resource_size_t offset, size_t size, unsigned long flags)
+{
+ int is_ram = region_intersects(offset, size,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
+ void *addr = NULL;
+
+ if (!flags)
+ return NULL;
+
+ if (is_ram == REGION_MIXED) {
+ WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
+ &offset, (unsigned long) size);
+ return NULL;
+ }
+
+ /* Try all mapping types requested until one returns non-NULL */
+ if (flags & MEMREMAP_WB) {
+ /*
+ * MEMREMAP_WB is special in that it can be satisifed
+ * from the direct map. Some archs depend on the
+ * capability of memremap() to autodetect cases where
+ * the requested range is potentially in System RAM.
+ */
+ if (is_ram == REGION_INTERSECTS)
+ addr = try_ram_remap(offset, size, flags);
+ if (!addr)
+ addr = arch_memremap_wb(offset, size);
+ }
+
+ /*
+ * If we don't have a mapping yet and other request flags are
+ * present then we will be attempting to establish a new virtual
+ * address mapping. Enforce that this mapping is not aliasing
+ * System RAM.
+ */
+ if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
+ WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
+ &offset, (unsigned long) size);
+ return NULL;
+ }
+
+ if (!addr && (flags & MEMREMAP_WT))
+ addr = ioremap_wt(offset, size);
+
+ if (!addr && (flags & MEMREMAP_WC))
+ addr = ioremap_wc(offset, size);
+
+ return addr;
+}
+EXPORT_SYMBOL(memremap);
+
+void memunmap(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ iounmap((void __iomem *) addr);
+}
+EXPORT_SYMBOL(memunmap);
+
+static void devm_memremap_release(struct device *dev, void *res)
+{
+ memunmap(*(void **)res);
+}
+
+static int devm_memremap_match(struct device *dev, void *res, void *match_data)
+{
+ return *(void **)res == match_data;
+}
+
+void *devm_memremap(struct device *dev, resource_size_t offset,
+ size_t size, unsigned long flags)
+{
+ void **ptr, *addr;
+
+ ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ addr = memremap(offset, size, flags);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ return ERR_PTR(-ENXIO);
+ }
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_memremap);
+
+void devm_memunmap(struct device *dev, void *addr)
+{
+ WARN_ON(devres_release(dev, devm_memremap_release,
+ devm_memremap_match, addr));
+}
+EXPORT_SYMBOL(devm_memunmap);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index e3336d904f64..daeabd791d58 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -24,6 +24,7 @@
#ifdef CONFIG_IRQ_FORCED_THREADING
__read_mostly bool force_irqthreads;
+EXPORT_SYMBOL_GPL(force_irqthreads);
static int __init setup_forced_irqthreads(char *arg)
{
@@ -204,6 +205,39 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
return ret;
}
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+static inline int irq_set_affinity_pending(struct irq_data *data,
+ const struct cpumask *dest)
+{
+ struct irq_desc *desc = irq_data_to_desc(data);
+
+ irqd_set_move_pending(data);
+ irq_copy_pending(desc, dest);
+ return 0;
+}
+#else
+static inline int irq_set_affinity_pending(struct irq_data *data,
+ const struct cpumask *dest)
+{
+ return -EBUSY;
+}
+#endif
+
+static int irq_try_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force)
+{
+ int ret = irq_do_set_affinity(data, dest, force);
+
+ /*
+ * In case that the underlying vector management is busy and the
+ * architecture supports the generic pending mechanism then utilize
+ * this to avoid returning an error to user space.
+ */
+ if (ret == -EBUSY && !force)
+ ret = irq_set_affinity_pending(data, dest);
+ return ret;
+}
+
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
bool force)
{
@@ -214,8 +248,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
- if (irq_can_move_pcntxt(data)) {
- ret = irq_do_set_affinity(data, mask, force);
+ if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
+ ret = irq_try_set_affinity(data, mask, force);
} else {
irqd_set_move_pending(data);
irq_copy_pending(desc, mask);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 86ae0eb80b53..def48589ea48 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -38,17 +38,18 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
void irq_move_masked_irq(struct irq_data *idata)
{
struct irq_desc *desc = irq_data_to_desc(idata);
- struct irq_chip *chip = desc->irq_data.chip;
+ struct irq_data *data = &desc->irq_data;
+ struct irq_chip *chip = data->chip;
- if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
+ if (likely(!irqd_is_setaffinity_pending(data)))
return;
- irqd_clr_move_pending(&desc->irq_data);
+ irqd_clr_move_pending(data);
/*
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
*/
- if (irqd_is_per_cpu(&desc->irq_data)) {
+ if (irqd_is_per_cpu(data)) {
WARN_ON(1);
return;
}
@@ -73,13 +74,24 @@ void irq_move_masked_irq(struct irq_data *idata)
* For correct operation this depends on the caller
* masking the irqs.
*/
- if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
- irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
-
+ if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
+ int ret;
+
+ ret = irq_do_set_affinity(data, desc->pending_mask, false);
+ /*
+ * If the there is a cleanup pending in the underlying
+ * vector management, reschedule the move for the next
+ * interrupt. Leave desc->pending_mask intact.
+ */
+ if (ret == -EBUSY) {
+ irqd_set_move_pending(data);
+ return;
+ }
+ }
cpumask_clear(desc->pending_mask);
}
-void irq_move_irq(struct irq_data *idata)
+void __irq_move_irq(struct irq_data *idata)
{
bool masked;
@@ -90,9 +102,6 @@ void irq_move_irq(struct irq_data *idata)
*/
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
- if (likely(!irqd_is_setaffinity_pending(idata)))
- return;
-
if (unlikely(irqd_irq_disabled(idata)))
return;
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 895e6b76b25e..5857267a4af5 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -1,15 +1,5 @@
-/*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
#include <linux/radix-tree.h>
#include <linux/device.h>
#include <linux/types.h>
@@ -19,170 +9,8 @@
#include <linux/memory_hotplug.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/wait_bit.h>
-#ifndef ioremap_cache
-/* temporary while we convert existing ioremap_cache users to memremap */
-__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
-{
- return ioremap(offset, size);
-}
-#endif
-
-#ifndef arch_memremap_wb
-static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
-{
- return (__force void *)ioremap_cache(offset, size);
-}
-#endif
-
-#ifndef arch_memremap_can_ram_remap
-static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
- unsigned long flags)
-{
- return true;
-}
-#endif
-
-static void *try_ram_remap(resource_size_t offset, size_t size,
- unsigned long flags)
-{
- unsigned long pfn = PHYS_PFN(offset);
-
- /* In the simple case just return the existing linear address */
- if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
- arch_memremap_can_ram_remap(offset, size, flags))
- return __va(offset);
-
- return NULL; /* fallback to arch_memremap_wb */
-}
-
-/**
- * memremap() - remap an iomem_resource as cacheable memory
- * @offset: iomem resource start address
- * @size: size of remap
- * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
- * MEMREMAP_ENC, MEMREMAP_DEC
- *
- * memremap() is "ioremap" for cases where it is known that the resource
- * being mapped does not have i/o side effects and the __iomem
- * annotation is not applicable. In the case of multiple flags, the different
- * mapping types will be attempted in the order listed below until one of
- * them succeeds.
- *
- * MEMREMAP_WB - matches the default mapping for System RAM on
- * the architecture. This is usually a read-allocate write-back cache.
- * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
- * memremap() will bypass establishing a new mapping and instead return
- * a pointer into the direct map.
- *
- * MEMREMAP_WT - establish a mapping whereby writes either bypass the
- * cache or are written through to memory and never exist in a
- * cache-dirty state with respect to program visibility. Attempts to
- * map System RAM with this mapping type will fail.
- *
- * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
- * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
- * uncached. Attempts to map System RAM with this mapping type will fail.
- */
-void *memremap(resource_size_t offset, size_t size, unsigned long flags)
-{
- int is_ram = region_intersects(offset, size,
- IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
- void *addr = NULL;
-
- if (!flags)
- return NULL;
-
- if (is_ram == REGION_MIXED) {
- WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
- &offset, (unsigned long) size);
- return NULL;
- }
-
- /* Try all mapping types requested until one returns non-NULL */
- if (flags & MEMREMAP_WB) {
- /*
- * MEMREMAP_WB is special in that it can be satisifed
- * from the direct map. Some archs depend on the
- * capability of memremap() to autodetect cases where
- * the requested range is potentially in System RAM.
- */
- if (is_ram == REGION_INTERSECTS)
- addr = try_ram_remap(offset, size, flags);
- if (!addr)
- addr = arch_memremap_wb(offset, size);
- }
-
- /*
- * If we don't have a mapping yet and other request flags are
- * present then we will be attempting to establish a new virtual
- * address mapping. Enforce that this mapping is not aliasing
- * System RAM.
- */
- if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
- WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
- &offset, (unsigned long) size);
- return NULL;
- }
-
- if (!addr && (flags & MEMREMAP_WT))
- addr = ioremap_wt(offset, size);
-
- if (!addr && (flags & MEMREMAP_WC))
- addr = ioremap_wc(offset, size);
-
- return addr;
-}
-EXPORT_SYMBOL(memremap);
-
-void memunmap(void *addr)
-{
- if (is_vmalloc_addr(addr))
- iounmap((void __iomem *) addr);
-}
-EXPORT_SYMBOL(memunmap);
-
-static void devm_memremap_release(struct device *dev, void *res)
-{
- memunmap(*(void **)res);
-}
-
-static int devm_memremap_match(struct device *dev, void *res, void *match_data)
-{
- return *(void **)res == match_data;
-}
-
-void *devm_memremap(struct device *dev, resource_size_t offset,
- size_t size, unsigned long flags)
-{
- void **ptr, *addr;
-
- ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
- dev_to_node(dev));
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- addr = memremap(offset, size, flags);
- if (addr) {
- *ptr = addr;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- return ERR_PTR(-ENXIO);
- }
-
- return addr;
-}
-EXPORT_SYMBOL(devm_memremap);
-
-void devm_memunmap(struct device *dev, void *addr)
-{
- WARN_ON(devres_release(dev, devm_memremap_release,
- devm_memremap_match, addr));
-}
-EXPORT_SYMBOL(devm_memunmap);
-
-#ifdef CONFIG_ZONE_DEVICE
static DEFINE_MUTEX(pgmap_lock);
static RADIX_TREE(pgmap_radix, GFP_KERNEL);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
@@ -473,10 +301,32 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
return pgmap;
}
-#endif /* CONFIG_ZONE_DEVICE */
+EXPORT_SYMBOL_GPL(get_dev_pagemap);
+
+#ifdef CONFIG_DEV_PAGEMAP_OPS
+DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
+EXPORT_SYMBOL_GPL(devmap_managed_key);
+static atomic_t devmap_enable;
+
+/*
+ * Toggle the static key for ->page_free() callbacks when dev_pagemap
+ * pages go idle.
+ */
+void dev_pagemap_get_ops(void)
+{
+ if (atomic_inc_return(&devmap_enable) == 1)
+ static_branch_enable(&devmap_managed_key);
+}
+EXPORT_SYMBOL_GPL(dev_pagemap_get_ops);
+
+void dev_pagemap_put_ops(void)
+{
+ if (atomic_dec_and_test(&devmap_enable))
+ static_branch_disable(&devmap_managed_key);
+}
+EXPORT_SYMBOL_GPL(dev_pagemap_put_ops);
-#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
-void put_zone_device_private_or_public_page(struct page *page)
+void __put_devmap_managed_page(struct page *page)
{
int count = page_ref_dec_return(page);
@@ -496,5 +346,5 @@ void put_zone_device_private_or_public_page(struct page *page)
} else if (!count)
__put_page(page);
}
-EXPORT_SYMBOL(put_zone_device_private_or_public_page);
-#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
+EXPORT_SYMBOL_GPL(__put_devmap_managed_page);
+#endif /* CONFIG_DEV_PAGEMAP_OPS */
diff --git a/kernel/resource.c b/kernel/resource.c
index b589dda910b3..30e1bc68503b 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -415,6 +415,7 @@ int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
return __walk_iomem_res_desc(&res, desc, false, arg, func);
}
+EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
/*
* This function calls the @func callback against all memory ranges of type
diff --git a/kernel/rseq.c b/kernel/rseq.c
new file mode 100644
index 000000000000..ae306f90c514
--- /dev/null
+++ b/kernel/rseq.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Restartable sequences system call
+ *
+ * Copyright (C) 2015, Google, Inc.,
+ * Paul Turner <pjt@google.com> and Andrew Hunter <ahh@google.com>
+ * Copyright (C) 2015-2018, EfficiOS Inc.,
+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/rseq.h>
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/rseq.h>
+
+#define RSEQ_CS_PREEMPT_MIGRATE_FLAGS (RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE | \
+ RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT)
+
+/*
+ *
+ * Restartable sequences are a lightweight interface that allows
+ * user-level code to be executed atomically relative to scheduler
+ * preemption and signal delivery. Typically used for implementing
+ * per-cpu operations.
+ *
+ * It allows user-space to perform update operations on per-cpu data
+ * without requiring heavy-weight atomic operations.
+ *
+ * Detailed algorithm of rseq user-space assembly sequences:
+ *
+ * init(rseq_cs)
+ * cpu = TLS->rseq::cpu_id_start
+ * [1] TLS->rseq::rseq_cs = rseq_cs
+ * [start_ip] ----------------------------
+ * [2] if (cpu != TLS->rseq::cpu_id)
+ * goto abort_ip;
+ * [3] <last_instruction_in_cs>
+ * [post_commit_ip] ----------------------------
+ *
+ * The address of jump target abort_ip must be outside the critical
+ * region, i.e.:
+ *
+ * [abort_ip] < [start_ip] || [abort_ip] >= [post_commit_ip]
+ *
+ * Steps [2]-[3] (inclusive) need to be a sequence of instructions in
+ * userspace that can handle being interrupted between any of those
+ * instructions, and then resumed to the abort_ip.
+ *
+ * 1. Userspace stores the address of the struct rseq_cs assembly
+ * block descriptor into the rseq_cs field of the registered
+ * struct rseq TLS area. This update is performed through a single
+ * store within the inline assembly instruction sequence.
+ * [start_ip]
+ *
+ * 2. Userspace tests to check whether the current cpu_id field match
+ * the cpu number loaded before start_ip, branching to abort_ip
+ * in case of a mismatch.
+ *
+ * If the sequence is preempted or interrupted by a signal
+ * at or after start_ip and before post_commit_ip, then the kernel
+ * clears TLS->__rseq_abi::rseq_cs, and sets the user-space return
+ * ip to abort_ip before returning to user-space, so the preempted
+ * execution resumes at abort_ip.
+ *
+ * 3. Userspace critical section final instruction before
+ * post_commit_ip is the commit. The critical section is
+ * self-terminating.
+ * [post_commit_ip]
+ *
+ * 4. <success>
+ *
+ * On failure at [2], or if interrupted by preempt or signal delivery
+ * between [1] and [3]:
+ *
+ * [abort_ip]
+ * F1. <failure>
+ */
+
+static int rseq_update_cpu_id(struct task_struct *t)
+{
+ u32 cpu_id = raw_smp_processor_id();
+
+ if (__put_user(cpu_id, &t->rseq->cpu_id_start))
+ return -EFAULT;
+ if (__put_user(cpu_id, &t->rseq->cpu_id))
+ return -EFAULT;
+ trace_rseq_update(t);
+ return 0;
+}
+
+static int rseq_reset_rseq_cpu_id(struct task_struct *t)
+{
+ u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED;
+
+ /*
+ * Reset cpu_id_start to its initial state (0).
+ */
+ if (__put_user(cpu_id_start, &t->rseq->cpu_id_start))
+ return -EFAULT;
+ /*
+ * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming
+ * in after unregistration can figure out that rseq needs to be
+ * registered again.
+ */
+ if (__put_user(cpu_id, &t->rseq->cpu_id))
+ return -EFAULT;
+ return 0;
+}
+
+static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
+{
+ struct rseq_cs __user *urseq_cs;
+ unsigned long ptr;
+ u32 __user *usig;
+ u32 sig;
+ int ret;
+
+ ret = __get_user(ptr, &t->rseq->rseq_cs);
+ if (ret)
+ return ret;
+ if (!ptr) {
+ memset(rseq_cs, 0, sizeof(*rseq_cs));
+ return 0;
+ }
+ urseq_cs = (struct rseq_cs __user *)ptr;
+ if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs)))
+ return -EFAULT;
+ if (rseq_cs->version > 0)
+ return -EINVAL;
+
+ /* Ensure that abort_ip is not in the critical section. */
+ if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset)
+ return -EINVAL;
+
+ usig = (u32 __user *)(rseq_cs->abort_ip - sizeof(u32));
+ ret = get_user(sig, usig);
+ if (ret)
+ return ret;
+
+ if (current->rseq_sig != sig) {
+ printk_ratelimited(KERN_WARNING
+ "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n",
+ sig, current->rseq_sig, current->pid, usig);
+ return -EPERM;
+ }
+ return 0;
+}
+
+static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
+{
+ u32 flags, event_mask;
+ int ret;
+
+ /* Get thread flags. */
+ ret = __get_user(flags, &t->rseq->flags);
+ if (ret)
+ return ret;
+
+ /* Take critical section flags into account. */
+ flags |= cs_flags;
+
+ /*
+ * Restart on signal can only be inhibited when restart on
+ * preempt and restart on migrate are inhibited too. Otherwise,
+ * a preempted signal handler could fail to restart the prior
+ * execution context on sigreturn.
+ */
+ if (unlikely((flags & RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL) &&
+ (flags & RSEQ_CS_PREEMPT_MIGRATE_FLAGS) !=
+ RSEQ_CS_PREEMPT_MIGRATE_FLAGS))
+ return -EINVAL;
+
+ /*
+ * Load and clear event mask atomically with respect to
+ * scheduler preemption.
+ */
+ preempt_disable();
+ event_mask = t->rseq_event_mask;
+ t->rseq_event_mask = 0;
+ preempt_enable();
+
+ return !!(event_mask & ~flags);
+}
+
+static int clear_rseq_cs(struct task_struct *t)
+{
+ /*
+ * The rseq_cs field is set to NULL on preemption or signal
+ * delivery on top of rseq assembly block, as well as on top
+ * of code outside of the rseq assembly block. This performs
+ * a lazy clear of the rseq_cs field.
+ *
+ * Set rseq_cs to NULL with single-copy atomicity.
+ */
+ return __put_user(0UL, &t->rseq->rseq_cs);
+}
+
+/*
+ * Unsigned comparison will be true when ip >= start_ip, and when
+ * ip < start_ip + post_commit_offset.
+ */
+static bool in_rseq_cs(unsigned long ip, struct rseq_cs *rseq_cs)
+{
+ return ip - rseq_cs->start_ip < rseq_cs->post_commit_offset;
+}
+
+static int rseq_ip_fixup(struct pt_regs *regs)
+{
+ unsigned long ip = instruction_pointer(regs);
+ struct task_struct *t = current;
+ struct rseq_cs rseq_cs;
+ int ret;
+
+ ret = rseq_get_rseq_cs(t, &rseq_cs);
+ if (ret)
+ return ret;
+
+ /*
+ * Handle potentially not being within a critical section.
+ * If not nested over a rseq critical section, restart is useless.
+ * Clear the rseq_cs pointer and return.
+ */
+ if (!in_rseq_cs(ip, &rseq_cs))
+ return clear_rseq_cs(t);
+ ret = rseq_need_restart(t, rseq_cs.flags);
+ if (ret <= 0)
+ return ret;
+ ret = clear_rseq_cs(t);
+ if (ret)
+ return ret;
+ trace_rseq_ip_fixup(ip, rseq_cs.start_ip, rseq_cs.post_commit_offset,
+ rseq_cs.abort_ip);
+ instruction_pointer_set(regs, (unsigned long)rseq_cs.abort_ip);
+ return 0;
+}
+
+/*
+ * This resume handler must always be executed between any of:
+ * - preemption,
+ * - signal delivery,
+ * and return to user-space.
+ *
+ * This is how we can ensure that the entire rseq critical section,
+ * consisting of both the C part and the assembly instruction sequence,
+ * will issue the commit instruction only if executed atomically with
+ * respect to other threads scheduled on the same CPU, and with respect
+ * to signal handlers.
+ */
+void __rseq_handle_notify_resume(struct pt_regs *regs)
+{
+ struct task_struct *t = current;
+ int ret;
+
+ if (unlikely(t->flags & PF_EXITING))
+ return;
+ if (unlikely(!access_ok(VERIFY_WRITE, t->rseq, sizeof(*t->rseq))))
+ goto error;
+ ret = rseq_ip_fixup(regs);
+ if (unlikely(ret < 0))
+ goto error;
+ if (unlikely(rseq_update_cpu_id(t)))
+ goto error;
+ return;
+
+error:
+ force_sig(SIGSEGV, t);
+}
+
+#ifdef CONFIG_DEBUG_RSEQ
+
+/*
+ * Terminate the process if a syscall is issued within a restartable
+ * sequence.
+ */
+void rseq_syscall(struct pt_regs *regs)
+{
+ unsigned long ip = instruction_pointer(regs);
+ struct task_struct *t = current;
+ struct rseq_cs rseq_cs;
+
+ if (!t->rseq)
+ return;
+ if (!access_ok(VERIFY_READ, t->rseq, sizeof(*t->rseq)) ||
+ rseq_get_rseq_cs(t, &rseq_cs) || in_rseq_cs(ip, &rseq_cs))
+ force_sig(SIGSEGV, t);
+}
+
+#endif
+
+/*
+ * sys_rseq - setup restartable sequences for caller thread.
+ */
+SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
+ int, flags, u32, sig)
+{
+ int ret;
+
+ if (flags & RSEQ_FLAG_UNREGISTER) {
+ /* Unregister rseq for current thread. */
+ if (current->rseq != rseq || !current->rseq)
+ return -EINVAL;
+ if (current->rseq_len != rseq_len)
+ return -EINVAL;
+ if (current->rseq_sig != sig)
+ return -EPERM;
+ ret = rseq_reset_rseq_cpu_id(current);
+ if (ret)
+ return ret;
+ current->rseq = NULL;
+ current->rseq_len = 0;
+ current->rseq_sig = 0;
+ return 0;
+ }
+
+ if (unlikely(flags))
+ return -EINVAL;
+
+ if (current->rseq) {
+ /*
+ * If rseq is already registered, check whether
+ * the provided address differs from the prior
+ * one.
+ */
+ if (current->rseq != rseq || current->rseq_len != rseq_len)
+ return -EINVAL;
+ if (current->rseq_sig != sig)
+ return -EPERM;
+ /* Already registered. */
+ return -EBUSY;
+ }
+
+ /*
+ * If there was no rseq previously registered,
+ * ensure the provided rseq is properly aligned and valid.
+ */
+ if (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)) ||
+ rseq_len != sizeof(*rseq))
+ return -EINVAL;
+ if (!access_ok(VERIFY_WRITE, rseq, rseq_len))
+ return -EFAULT;
+ current->rseq = rseq;
+ current->rseq_len = rseq_len;
+ current->rseq_sig = sig;
+ /*
+ * If rseq was previously inactive, and has just been
+ * registered, ensure the cpu_id_start and cpu_id fields
+ * are updated before returning to user-space.
+ */
+ rseq_set_notify_resume(current);
+
+ return 0;
+}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e9866f86f304..a98d54cd5535 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1191,6 +1191,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p);
p->se.nr_migrations++;
+ rseq_migrate(p);
perf_event_task_migrate(p);
}
@@ -2634,6 +2635,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
{
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
+ rseq_preempt(prev);
fire_sched_out_preempt_notifiers(prev, next);
prepare_task(next);
prepare_arch_switch(next);
diff --git a/kernel/signal.c b/kernel/signal.c
index 0f865d67415d..8d8a940422a8 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1244,19 +1244,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
{
struct sighand_struct *sighand;
+ rcu_read_lock();
for (;;) {
- /*
- * Disable interrupts early to avoid deadlocks.
- * See rcu_read_unlock() comment header for details.
- */
- local_irq_save(*flags);
- rcu_read_lock();
sighand = rcu_dereference(tsk->sighand);
- if (unlikely(sighand == NULL)) {
- rcu_read_unlock();
- local_irq_restore(*flags);
+ if (unlikely(sighand == NULL))
break;
- }
+
/*
* This sighand can be already freed and even reused, but
* we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
@@ -1268,15 +1261,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
* __exit_signal(). In the latter case the next iteration
* must see ->sighand == NULL.
*/
- spin_lock(&sighand->siglock);
- if (likely(sighand == tsk->sighand)) {
- rcu_read_unlock();
+ spin_lock_irqsave(&sighand->siglock, *flags);
+ if (likely(sighand == tsk->sighand))
break;
- }
- spin_unlock(&sighand->siglock);
- rcu_read_unlock();
- local_irq_restore(*flags);
+ spin_unlock_irqrestore(&sighand->siglock, *flags);
}
+ rcu_read_unlock();
return sighand;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index d1b2b8d934bb..38509dc1f77b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2018,7 +2018,11 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
return error;
}
- down_write(&mm->mmap_sem);
+ /*
+ * arg_lock protects concurent updates but we still need mmap_sem for
+ * read to exclude races with sys_brk.
+ */
+ down_read(&mm->mmap_sem);
/*
* We don't validate if these members are pointing to
@@ -2032,6 +2036,7 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
* to any problem in kernel itself
*/
+ spin_lock(&mm->arg_lock);
mm->start_code = prctl_map.start_code;
mm->end_code = prctl_map.end_code;
mm->start_data = prctl_map.start_data;
@@ -2043,6 +2048,7 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
mm->arg_end = prctl_map.arg_end;
mm->env_start = prctl_map.env_start;
mm->env_end = prctl_map.env_end;
+ spin_unlock(&mm->arg_lock);
/*
* Note this update of @saved_auxv is lockless thus
@@ -2055,7 +2061,7 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
if (prctl_map.auxv_size)
memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
- up_write(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
return 0;
}
#endif /* CONFIG_CHECKPOINT_RESTORE */
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 06b4ccee0047..df556175be50 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -432,3 +432,6 @@ COND_SYSCALL(setresgid16);
COND_SYSCALL(setresuid16);
COND_SYSCALL(setreuid16);
COND_SYSCALL(setuid16);
+
+/* restartable sequence */
+COND_SYSCALL(rseq);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9f9983b0a27d..465a28b4cd32 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4362,6 +4362,7 @@ void set_worker_desc(const char *fmt, ...)
va_end(args);
}
}
+EXPORT_SYMBOL_GPL(set_worker_desc);
/**
* print_worker_info - print out worker information and description
diff --git a/lib/Kconfig b/lib/Kconfig
index 7a913937888b..abc111eb5054 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -621,6 +621,9 @@ config ARCH_HAS_PMEM_API
config ARCH_HAS_UACCESS_FLUSHCACHE
bool
+config ARCH_HAS_UACCESS_MCSAFE
+ bool
+
config STACKDEPOT
bool
select STACKTRACE
diff --git a/lib/bitmap.c b/lib/bitmap.c
index a42eff7e8c48..58f9750e49c6 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -64,12 +64,9 @@ EXPORT_SYMBOL(__bitmap_equal);
void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
{
- unsigned int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = BITS_TO_LONGS(bits);
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
-
- if (bits % BITS_PER_LONG)
- dst[k] = ~src[k];
}
EXPORT_SYMBOL(__bitmap_complement);
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
index 266a97c5708b..ade3ce6c4af6 100644
--- a/lib/bucket_locks.c
+++ b/lib/bucket_locks.c
@@ -30,10 +30,7 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
}
if (sizeof(spinlock_t) != 0) {
- if (gfpflags_allow_blocking(gfp))
- tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
- else
- tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
+ tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
if (!tlocks)
return -ENOMEM;
for (i = 0; i < size; i++)
diff --git a/lib/idr.c b/lib/idr.c
index 823b813f08f8..ed9c169c12bd 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -4,9 +4,9 @@
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/xarray.h>
DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
-static DEFINE_SPINLOCK(simple_ida_lock);
/**
* idr_alloc_u32() - Allocate an ID.
@@ -581,7 +581,7 @@ again:
if (!ida_pre_get(ida, gfp_mask))
return -ENOMEM;
- spin_lock_irqsave(&simple_ida_lock, flags);
+ xa_lock_irqsave(&ida->ida_rt, flags);
ret = ida_get_new_above(ida, start, &id);
if (!ret) {
if (id > max) {
@@ -591,7 +591,7 @@ again:
ret = id;
}
}
- spin_unlock_irqrestore(&simple_ida_lock, flags);
+ xa_unlock_irqrestore(&ida->ida_rt, flags);
if (unlikely(ret == -EAGAIN))
goto again;
@@ -615,8 +615,8 @@ void ida_simple_remove(struct ida *ida, unsigned int id)
unsigned long flags;
BUG_ON((int)id < 0);
- spin_lock_irqsave(&simple_ida_lock, flags);
+ xa_lock_irqsave(&ida->ida_rt, flags);
ida_remove(ida, id);
- spin_unlock_irqrestore(&simple_ida_lock, flags);
+ xa_unlock_irqrestore(&ida->ida_rt, flags);
}
EXPORT_SYMBOL(ida_simple_remove);
diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h
index 7eceeddb3fb8..c2d6f4efcfbc 100644
--- a/lib/mpi/mpi-internal.h
+++ b/lib/mpi/mpi-internal.h
@@ -65,13 +65,6 @@
typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
typedef int mpi_size_t; /* (must be a signed type) */
-static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
-{
- if (a->alloced < b)
- return mpi_resize(a, b);
- return 0;
-}
-
/* Copy N limbs from S to D. */
#define MPN_COPY(d, s, n) \
do { \
@@ -80,13 +73,6 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
(d)[_i] = (s)[_i]; \
} while (0)
-#define MPN_COPY_INCR(d, s, n) \
- do { \
- mpi_size_t _i; \
- for (_i = 0; _i < (n); _i++) \
- (d)[_i] = (s)[_i]; \
- } while (0)
-
#define MPN_COPY_DECR(d, s, n) \
do { \
mpi_size_t _i; \
@@ -111,15 +97,6 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
} \
} while (0)
-#define MPN_NORMALIZE_NOT_ZERO(d, n) \
- do { \
- for (;;) { \
- if ((d)[(n)-1]) \
- break; \
- (n)--; \
- } \
- } while (0)
-
#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
do { \
if ((size) < KARATSUBA_THRESHOLD) \
@@ -128,46 +105,11 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
mul_n(prodp, up, vp, size, tspace); \
} while (0);
-/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
- * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
- * If this would yield overflow, DI should be the largest possible number
- * (i.e., only ones). For correct operation, the most significant bit of D
- * has to be set. Put the quotient in Q and the remainder in R.
- */
-#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \
- do { \
- mpi_limb_t _q, _ql, _r; \
- mpi_limb_t _xh, _xl; \
- umul_ppmm(_q, _ql, (nh), (di)); \
- _q += (nh); /* DI is 2**BITS_PER_MPI_LIMB too small */ \
- umul_ppmm(_xh, _xl, _q, (d)); \
- sub_ddmmss(_xh, _r, (nh), (nl), _xh, _xl); \
- if (_xh) { \
- sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
- _q++; \
- if (_xh) { \
- sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
- _q++; \
- } \
- } \
- if (_r >= (d)) { \
- _r -= (d); \
- _q++; \
- } \
- (r) = _r; \
- (q) = _q; \
- } while (0)
-
/*-- mpiutil.c --*/
mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs);
void mpi_free_limb_space(mpi_ptr_t a);
void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs);
-/*-- mpi-bit.c --*/
-void mpi_rshift_limbs(MPI a, unsigned int count);
-int mpi_lshift_limbs(MPI a, unsigned int count);
-
-/*-- mpihelp-add.c --*/
static inline mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
@@ -175,7 +117,6 @@ mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
static inline mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
mpi_ptr_t s2_ptr, mpi_size_t s2_size);
-/*-- mpihelp-sub.c --*/
static inline mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
@@ -183,10 +124,10 @@ mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
static inline mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
mpi_ptr_t s2_ptr, mpi_size_t s2_size);
-/*-- mpihelp-cmp.c --*/
+/*-- mpih-cmp.c --*/
int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size);
-/*-- mpihelp-mul.c --*/
+/*-- mpih-mul.c --*/
struct karatsuba_ctx {
struct karatsuba_ctx *next;
@@ -202,7 +143,6 @@ mpi_limb_t mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
-int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size);
int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result);
void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size);
@@ -214,21 +154,16 @@ int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t vp, mpi_size_t vsize,
struct karatsuba_ctx *ctx);
-/*-- mpihelp-mul_1.c (or xxx/cpu/ *.S) --*/
+/*-- generic_mpih-mul1.c --*/
mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
-/*-- mpihelp-div.c --*/
-mpi_limb_t mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
- mpi_limb_t divisor_limb);
+/*-- mpih-div.c --*/
mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
mpi_ptr_t np, mpi_size_t nsize,
mpi_ptr_t dp, mpi_size_t dsize);
-mpi_limb_t mpihelp_divmod_1(mpi_ptr_t quot_ptr,
- mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
- mpi_limb_t divisor_limb);
-/*-- mpihelp-shift.c --*/
+/*-- generic_mpih-[lr]shift.c --*/
mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
unsigned cnt);
mpi_limb_t mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 6016f1deb1f5..9bbd9c5d375a 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -112,18 +112,6 @@ static inline void alloc_global_tags(struct percpu_ida *pool,
min(pool->nr_free, pool->percpu_batch_size));
}
-static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
-{
- int tag = -ENOSPC;
-
- spin_lock(&tags->lock);
- if (tags->nr_free)
- tag = tags->freelist[--tags->nr_free];
- spin_unlock(&tags->lock);
-
- return tag;
-}
-
/**
* percpu_ida_alloc - allocate a tag
* @pool: pool to allocate from
@@ -147,20 +135,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
DEFINE_WAIT(wait);
struct percpu_ida_cpu *tags;
unsigned long flags;
- int tag;
+ int tag = -ENOSPC;
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
+ tags = raw_cpu_ptr(pool->tag_cpu);
+ spin_lock_irqsave(&tags->lock, flags);
/* Fastpath */
- tag = alloc_local_tag(tags);
- if (likely(tag >= 0)) {
- local_irq_restore(flags);
+ if (likely(tags->nr_free >= 0)) {
+ tag = tags->freelist[--tags->nr_free];
+ spin_unlock_irqrestore(&tags->lock, flags);
return tag;
}
+ spin_unlock_irqrestore(&tags->lock, flags);
while (1) {
- spin_lock(&pool->lock);
+ spin_lock_irqsave(&pool->lock, flags);
+ tags = this_cpu_ptr(pool->tag_cpu);
/*
* prepare_to_wait() must come before steal_tags(), in case
@@ -184,8 +174,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
&pool->cpus_have_tags);
}
- spin_unlock(&pool->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&pool->lock, flags);
if (tag >= 0 || state == TASK_RUNNING)
break;
@@ -196,9 +185,6 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
}
schedule();
-
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
}
if (state != TASK_RUNNING)
finish_wait(&pool->wait, &wait);
@@ -222,28 +208,24 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
BUG_ON(tag >= pool->nr_tags);
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
+ tags = raw_cpu_ptr(pool->tag_cpu);
- spin_lock(&tags->lock);
+ spin_lock_irqsave(&tags->lock, flags);
tags->freelist[tags->nr_free++] = tag;
nr_free = tags->nr_free;
- spin_unlock(&tags->lock);
if (nr_free == 1) {
cpumask_set_cpu(smp_processor_id(),
&pool->cpus_have_tags);
wake_up(&pool->wait);
}
+ spin_unlock_irqrestore(&tags->lock, flags);
if (nr_free == pool->percpu_max_size) {
- spin_lock(&pool->lock);
+ spin_lock_irqsave(&pool->lock, flags);
+ spin_lock(&tags->lock);
- /*
- * Global lock held and irqs disabled, don't need percpu
- * lock
- */
if (tags->nr_free == pool->percpu_max_size) {
move_tags(pool->freelist, &pool->nr_free,
tags->freelist, &tags->nr_free,
@@ -251,10 +233,9 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
wake_up(&pool->wait);
}
- spin_unlock(&pool->lock);
+ spin_unlock(&tags->lock);
+ spin_unlock_irqrestore(&pool->lock, flags);
}
-
- local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(percpu_ida_free);
@@ -346,29 +327,27 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
struct percpu_ida_cpu *remote;
unsigned cpu, i, err = 0;
- local_irq_save(flags);
for_each_possible_cpu(cpu) {
remote = per_cpu_ptr(pool->tag_cpu, cpu);
- spin_lock(&remote->lock);
+ spin_lock_irqsave(&remote->lock, flags);
for (i = 0; i < remote->nr_free; i++) {
err = fn(remote->freelist[i], data);
if (err)
break;
}
- spin_unlock(&remote->lock);
+ spin_unlock_irqrestore(&remote->lock, flags);
if (err)
goto out;
}
- spin_lock(&pool->lock);
+ spin_lock_irqsave(&pool->lock, flags);
for (i = 0; i < pool->nr_free; i++) {
err = fn(pool->freelist[i], data);
if (err)
break;
}
- spin_unlock(&pool->lock);
+ spin_unlock_irqrestore(&pool->lock, flags);
out:
- local_irq_restore(flags);
return err;
}
EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index d7e06b28de38..0a559a42359b 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -112,3 +112,5 @@ ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
return j;
}
EXPORT_SYMBOL(ucs2_as_utf8);
+
+MODULE_LICENSE("GPL v2");
diff --git a/mm/Kconfig b/mm/Kconfig
index 3e0b6e87f65d..ce95491abd6a 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -694,6 +694,9 @@ config ARCH_HAS_HMM
config MIGRATE_VMA_HELPER
bool
+config DEV_PAGEMAP_OPS
+ bool
+
config HMM
bool
select MIGRATE_VMA_HELPER
@@ -714,6 +717,7 @@ config DEVICE_PRIVATE
bool "Unaddressable device memory (GPU memory, ...)"
depends on ARCH_HAS_HMM
select HMM
+ select DEV_PAGEMAP_OPS
help
Allows creation of struct pages to represent unaddressable device
@@ -724,6 +728,7 @@ config DEVICE_PUBLIC
bool "Addressable device memory (like GPU memory)"
depends on ARCH_HAS_HMM
select HMM
+ select DEV_PAGEMAP_OPS
help
Allows creation of struct pages to represent addressable device
@@ -754,3 +759,6 @@ config GUP_BENCHMARK
performance of get_user_pages_fast().
See tools/testing/selftests/vm/gup_benchmark.c
+
+config ARCH_HAS_PTE_SPECIAL
+ bool
diff --git a/mm/Makefile b/mm/Makefile
index b4e54a9ae9c5..8716bdabe1e6 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -105,3 +105,4 @@ obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
obj-$(CONFIG_HMM) += hmm.o
+obj-$(CONFIG_MEMFD_CREATE) += memfd.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8fe3ebd6ac00..347cc834c04a 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -557,7 +557,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
memcg = mem_cgroup_from_css(memcg_css);
blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
blkcg = css_to_blkcg(blkcg_css);
- memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
+ memcg_cgwb_list = &memcg->cgwb_list;
blkcg_cgwb_list = &blkcg->cgwb_list;
/* look up again under lock and discard on blkcg mismatch */
@@ -736,7 +736,7 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
*/
void wb_memcg_offline(struct mem_cgroup *memcg)
{
- struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
+ struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
struct bdi_writeback *wb, *next;
spin_lock_irq(&cgwb_lock);
diff --git a/mm/filemap.c b/mm/filemap.c
index 0604cb02e6f3..52517f28e6f4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2489,7 +2489,7 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
*
* We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
*/
-int filemap_fault(struct vm_fault *vmf)
+vm_fault_t filemap_fault(struct vm_fault *vmf)
{
int error;
struct file *file = vmf->vma->vm_file;
@@ -2499,7 +2499,7 @@ int filemap_fault(struct vm_fault *vmf)
pgoff_t offset = vmf->pgoff;
pgoff_t max_off;
struct page *page;
- int ret = 0;
+ vm_fault_t ret = 0;
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (unlikely(offset >= max_off))
@@ -2693,11 +2693,11 @@ next:
}
EXPORT_SYMBOL(filemap_map_pages);
-int filemap_page_mkwrite(struct vm_fault *vmf)
+vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
- int ret = VM_FAULT_LOCKED;
+ vm_fault_t ret = VM_FAULT_LOCKED;
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
diff --git a/mm/gup.c b/mm/gup.c
index 541904a7c60f..b70d7ba7cc13 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -212,53 +212,69 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
unsigned int flags, unsigned int *page_mask)
{
- pmd_t *pmd;
+ pmd_t *pmd, pmdval;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
pmd = pmd_offset(pudp, address);
- if (pmd_none(*pmd))
+ /*
+ * The READ_ONCE() will stabilize the pmdval in a register or
+ * on the stack so that it will stop changing under the code.
+ */
+ pmdval = READ_ONCE(*pmd);
+ if (pmd_none(pmdval))
return no_page_table(vma, flags);
- if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
+ if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
page = follow_huge_pmd(mm, address, pmd, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
- if (is_hugepd(__hugepd(pmd_val(*pmd)))) {
+ if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
page = follow_huge_pd(vma, address,
- __hugepd(pmd_val(*pmd)), flags,
+ __hugepd(pmd_val(pmdval)), flags,
PMD_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
retry:
- if (!pmd_present(*pmd)) {
+ if (!pmd_present(pmdval)) {
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
VM_BUG_ON(thp_migration_supported() &&
- !is_pmd_migration_entry(*pmd));
- if (is_pmd_migration_entry(*pmd))
+ !is_pmd_migration_entry(pmdval));
+ if (is_pmd_migration_entry(pmdval))
pmd_migration_entry_wait(mm, pmd);
+ pmdval = READ_ONCE(*pmd);
+ /*
+ * MADV_DONTNEED may convert the pmd to null because
+ * mmap_sem is held in read mode
+ */
+ if (pmd_none(pmdval))
+ return no_page_table(vma, flags);
goto retry;
}
- if (pmd_devmap(*pmd)) {
+ if (pmd_devmap(pmdval)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags);
spin_unlock(ptl);
if (page)
return page;
}
- if (likely(!pmd_trans_huge(*pmd)))
+ if (likely(!pmd_trans_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags);
- if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
+ if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
return no_page_table(vma, flags);
retry_locked:
ptl = pmd_lock(mm, pmd);
+ if (unlikely(pmd_none(*pmd))) {
+ spin_unlock(ptl);
+ return no_page_table(vma, flags);
+ }
if (unlikely(!pmd_present(*pmd))) {
spin_unlock(ptl);
if (likely(!(flags & FOLL_MIGRATION)))
@@ -1354,7 +1370,7 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
}
}
-#ifdef __HAVE_ARCH_PTE_SPECIAL
+#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
@@ -1430,7 +1446,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
{
return 0;
}
-#endif /* __HAVE_ARCH_PTE_SPECIAL */
+#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
#if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
static int __gup_device_huge(unsigned long pfn, unsigned long addr,
@@ -1459,32 +1475,48 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
return 1;
}
-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
+static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
unsigned long fault_pfn;
+ int nr_start = *nr;
+
+ fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
+ return 0;
- fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- return __gup_device_huge(fault_pfn, addr, end, pages, nr);
+ if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
+ undo_dev_pagemap(nr, nr_start, pages);
+ return 0;
+ }
+ return 1;
}
-static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
+static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
unsigned long fault_pfn;
+ int nr_start = *nr;
- fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- return __gup_device_huge(fault_pfn, addr, end, pages, nr);
+ fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
+ return 0;
+
+ if (unlikely(pud_val(orig) != pud_val(*pudp))) {
+ undo_dev_pagemap(nr, nr_start, pages);
+ return 0;
+ }
+ return 1;
}
#else
-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
+static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
BUILD_BUG();
return 0;
}
-static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
+static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
BUILD_BUG();
@@ -1502,7 +1534,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
return 0;
if (pmd_devmap(orig))
- return __gup_device_huge_pmd(orig, addr, end, pages, nr);
+ return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
refs = 0;
page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
@@ -1540,7 +1572,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
return 0;
if (pud_devmap(orig))
- return __gup_device_huge_pud(orig, addr, end, pages, nr);
+ return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
refs = 0;
page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
diff --git a/mm/hmm.c b/mm/hmm.c
index e63e353830e8..de7b6bf77201 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -35,15 +35,6 @@
#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
-#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
-/*
- * Device private memory see HMM (Documentation/vm/hmm.rst) or hmm.h
- */
-DEFINE_STATIC_KEY_FALSE(device_private_key);
-EXPORT_SYMBOL(device_private_key);
-#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
-
-
#if IS_ENABLED(CONFIG_HMM_MIRROR)
static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
@@ -1167,7 +1158,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
resource_size_t addr;
int ret;
- static_branch_enable(&device_private_key);
+ dev_pagemap_get_ops();
devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
GFP_KERNEL, dev_to_node(device));
@@ -1261,7 +1252,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
return ERR_PTR(-EINVAL);
- static_branch_enable(&device_private_key);
+ dev_pagemap_get_ops();
devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
GFP_KERNEL, dev_to_node(device));
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ac5591d8622c..ba8fdc0b6e7f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -483,11 +483,8 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
static inline struct list_head *page_deferred_list(struct page *page)
{
- /*
- * ->lru in the tail pages is occupied by compound_head.
- * Let's use ->mapping + ->index in the second tail page as list_head.
- */
- return (struct list_head *)&page[2].mapping;
+ /* ->lru in the tail pages is occupied by compound_head. */
+ return &page[2].deferred_list;
}
void prep_transhuge_page(struct page *page)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 129088710510..696befffe6f7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3159,7 +3159,7 @@ static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far.
*/
-static int hugetlb_vm_op_fault(struct vm_fault *vmf)
+static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
{
BUG();
return 0;
@@ -3686,6 +3686,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page;
pte_t new_pte;
spinlock_t *ptl;
+ unsigned long haddr = address & huge_page_mask(h);
/*
* Currently, we are forced to kill the process in the event the
@@ -3716,7 +3717,7 @@ retry:
u32 hash;
struct vm_fault vmf = {
.vma = vma,
- .address = address,
+ .address = haddr,
.flags = flags,
/*
* Hard to debug if it ends up being
@@ -3733,14 +3734,14 @@ retry:
* fault to make calling code simpler.
*/
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
- idx, address);
+ idx, haddr);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
goto out;
}
- page = alloc_huge_page(vma, address, 0);
+ page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
if (ret == -ENOMEM)
@@ -3789,12 +3790,12 @@ retry:
* the spinlock.
*/
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
- if (vma_needs_reservation(h, vma, address) < 0) {
+ if (vma_needs_reservation(h, vma, haddr) < 0) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
}
/* Just decrements count, does not deallocate */
- vma_end_reservation(h, vma, address);
+ vma_end_reservation(h, vma, haddr);
}
ptl = huge_pte_lock(h, mm, ptep);
@@ -3808,17 +3809,17 @@ retry:
if (anon_rmap) {
ClearPagePrivate(page);
- hugepage_add_new_anon_rmap(page, vma, address);
+ hugepage_add_new_anon_rmap(page, vma, haddr);
} else
page_dup_rmap(page, true);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
- set_huge_pte_at(mm, address, ptep, new_pte);
+ set_huge_pte_at(mm, haddr, ptep, new_pte);
hugetlb_count_add(pages_per_huge_page(h), mm);
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
- ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
+ ret = hugetlb_cow(mm, vma, haddr, ptep, page, ptl);
}
spin_unlock(ptl);
@@ -3830,7 +3831,7 @@ backout:
spin_unlock(ptl);
backout_unlocked:
unlock_page(page);
- restore_reserve_on_error(h, vma, address, page);
+ restore_reserve_on_error(h, vma, haddr, page);
put_page(page);
goto out;
}
@@ -3883,10 +3884,9 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
struct hstate *h = hstate_vma(vma);
struct address_space *mapping;
int need_wait_lock = 0;
+ unsigned long haddr = address & huge_page_mask(h);
- address &= huge_page_mask(h);
-
- ptep = huge_pte_offset(mm, address, huge_page_size(h));
+ ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (ptep) {
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
@@ -3896,20 +3896,20 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
} else {
- ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+ ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
if (!ptep)
return VM_FAULT_OOM;
}
mapping = vma->vm_file->f_mapping;
- idx = vma_hugecache_offset(h, vma, address);
+ idx = vma_hugecache_offset(h, vma, haddr);
/*
* Serialize hugepage allocation and instantiation, so that we don't
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
- hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
+ hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
entry = huge_ptep_get(ptep);
@@ -3939,16 +3939,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* consumed.
*/
if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
- if (vma_needs_reservation(h, vma, address) < 0) {
+ if (vma_needs_reservation(h, vma, haddr) < 0) {
ret = VM_FAULT_OOM;
goto out_mutex;
}
/* Just decrements count, does not deallocate */
- vma_end_reservation(h, vma, address);
+ vma_end_reservation(h, vma, haddr);
if (!(vma->vm_flags & VM_MAYSHARE))
pagecache_page = hugetlbfs_pagecache_page(h,
- vma, address);
+ vma, haddr);
}
ptl = huge_pte_lock(h, mm, ptep);
@@ -3973,16 +3973,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (flags & FAULT_FLAG_WRITE) {
if (!huge_pte_write(entry)) {
- ret = hugetlb_cow(mm, vma, address, ptep,
+ ret = hugetlb_cow(mm, vma, haddr, ptep,
pagecache_page, ptl);
goto out_put_page;
}
entry = huge_pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
- if (huge_ptep_set_access_flags(vma, address, ptep, entry,
+ if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
flags & FAULT_FLAG_WRITE))
- update_mmu_cache(vma, address, ptep);
+ update_mmu_cache(vma, haddr, ptep);
out_put_page:
if (page != pagecache_page)
unlock_page(page);
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index eec1150125b9..68c2f2f3c05b 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -84,7 +84,7 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
limit = round_down(PAGE_COUNTER_MAX,
1 << huge_page_order(&hstates[idx]));
- ret = page_counter_limit(counter, limit);
+ ret = page_counter_set_max(counter, limit);
VM_BUG_ON(ret);
}
}
@@ -273,7 +273,7 @@ static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
case RES_USAGE:
return (u64)page_counter_read(counter) * PAGE_SIZE;
case RES_LIMIT:
- return (u64)counter->limit * PAGE_SIZE;
+ return (u64)counter->max * PAGE_SIZE;
case RES_MAX_USAGE:
return (u64)counter->watermark * PAGE_SIZE;
case RES_FAILCNT:
@@ -306,7 +306,7 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
switch (MEMFILE_ATTR(of_cft(of)->private)) {
case RES_LIMIT:
mutex_lock(&hugetlb_limit_mutex);
- ret = page_counter_limit(&h_cg->hugepage[idx], nr_pages);
+ ret = page_counter_set_max(&h_cg->hugepage[idx], nr_pages);
mutex_unlock(&hugetlb_limit_mutex);
break;
default:
diff --git a/mm/init-mm.c b/mm/init-mm.c
index f94d5d15ebc0..f0179c9c04c2 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -22,6 +22,7 @@ struct mm_struct init_mm = {
.mm_count = ATOMIC_INIT(1),
.mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
+ .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
.user_ns = &init_user_ns,
INIT_MM_CONTEXT(init_mm)
diff --git a/mm/ksm.c b/mm/ksm.c
index 7d6558f3bac9..e2d2886fb1df 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -840,6 +840,17 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
return err;
}
+static inline struct stable_node *page_stable_node(struct page *page)
+{
+ return PageKsm(page) ? page_rmapping(page) : NULL;
+}
+
+static inline void set_page_stable_node(struct page *page,
+ struct stable_node *stable_node)
+{
+ page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+}
+
#ifdef CONFIG_SYSFS
/*
* Only called through the sysfs control interface:
diff --git a/mm/memblock.c b/mm/memblock.c
index 5108356ad8aa..93ad42bc8a73 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -68,7 +68,7 @@ ulong __init_memblock choose_memblock_flags(void)
/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
{
- return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
+ return *size = min(*size, PHYS_ADDR_MAX - base);
}
/*
@@ -697,6 +697,11 @@ static int __init_memblock memblock_remove_range(struct memblock_type *type,
int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
{
+ phys_addr_t end = base + size - 1;
+
+ memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
+ &base, &end, (void *)_RET_IP_);
+
return memblock_remove_range(&memblock.memory, base, size);
}
@@ -925,7 +930,7 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
r = &type_b->regions[idx_b];
r_start = idx_b ? r[-1].base + r[-1].size : 0;
r_end = idx_b < type_b->cnt ?
- r->base : (phys_addr_t)ULLONG_MAX;
+ r->base : PHYS_ADDR_MAX;
/*
* if idx_b advanced past idx_a,
@@ -1041,7 +1046,7 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
r = &type_b->regions[idx_b];
r_start = idx_b ? r[-1].base + r[-1].size : 0;
r_end = idx_b < type_b->cnt ?
- r->base : (phys_addr_t)ULLONG_MAX;
+ r->base : PHYS_ADDR_MAX;
/*
* if idx_b advanced past idx_a,
* break out to advance idx_a
@@ -1516,13 +1521,13 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void)
static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
{
- phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
+ phys_addr_t max_addr = PHYS_ADDR_MAX;
struct memblock_region *r;
/*
* translate the memory @limit size into the max address within one of
* the memory memblock regions, if the @limit exceeds the total size
- * of those regions, max_addr will keep original value ULLONG_MAX
+ * of those regions, max_addr will keep original value PHYS_ADDR_MAX
*/
for_each_memblock(memory, r) {
if (limit <= r->size) {
@@ -1537,7 +1542,7 @@ static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
void __init memblock_enforce_memory_limit(phys_addr_t limit)
{
- phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
+ phys_addr_t max_addr = PHYS_ADDR_MAX;
if (!limit)
return;
@@ -1545,14 +1550,14 @@ void __init memblock_enforce_memory_limit(phys_addr_t limit)
max_addr = __find_max_addr(limit);
/* @limit exceeds the total size of the memory, do nothing */
- if (max_addr == (phys_addr_t)ULLONG_MAX)
+ if (max_addr == PHYS_ADDR_MAX)
return;
/* truncate both memory and reserved regions */
memblock_remove_range(&memblock.memory, max_addr,
- (phys_addr_t)ULLONG_MAX);
+ PHYS_ADDR_MAX);
memblock_remove_range(&memblock.reserved, max_addr,
- (phys_addr_t)ULLONG_MAX);
+ PHYS_ADDR_MAX);
}
void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
@@ -1580,7 +1585,7 @@ void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
/* truncate the reserved regions */
memblock_remove_range(&memblock.reserved, 0, base);
memblock_remove_range(&memblock.reserved,
- base + size, (phys_addr_t)ULLONG_MAX);
+ base + size, PHYS_ADDR_MAX);
}
void __init memblock_mem_limit_remove_map(phys_addr_t limit)
@@ -1593,7 +1598,7 @@ void __init memblock_mem_limit_remove_map(phys_addr_t limit)
max_addr = __find_max_addr(limit);
/* @limit exceeds the total size of the memory, do nothing */
- if (max_addr == (phys_addr_t)ULLONG_MAX)
+ if (max_addr == PHYS_ADDR_MAX)
return;
memblock_cap_memory_range(0, max_addr);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1695f38630f1..c1e64d60ed02 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1034,13 +1034,13 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
unsigned long limit;
count = page_counter_read(&memcg->memory);
- limit = READ_ONCE(memcg->memory.limit);
+ limit = READ_ONCE(memcg->memory.max);
if (count < limit)
margin = limit - count;
if (do_memsw_account()) {
count = page_counter_read(&memcg->memsw);
- limit = READ_ONCE(memcg->memsw.limit);
+ limit = READ_ONCE(memcg->memsw.max);
if (count <= limit)
margin = min(margin, limit - count);
else
@@ -1148,13 +1148,13 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memory)),
- K((u64)memcg->memory.limit), memcg->memory.failcnt);
+ K((u64)memcg->memory.max), memcg->memory.failcnt);
pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memsw)),
- K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
+ K((u64)memcg->memsw.max), memcg->memsw.failcnt);
pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->kmem)),
- K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
+ K((u64)memcg->kmem.max), memcg->kmem.failcnt);
for_each_mem_cgroup_tree(iter, memcg) {
pr_info("Memory cgroup stats for ");
@@ -1179,21 +1179,21 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
/*
* Return the memory (and swap, if configured) limit for a memcg.
*/
-unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{
- unsigned long limit;
+ unsigned long max;
- limit = memcg->memory.limit;
+ max = memcg->memory.max;
if (mem_cgroup_swappiness(memcg)) {
- unsigned long memsw_limit;
- unsigned long swap_limit;
+ unsigned long memsw_max;
+ unsigned long swap_max;
- memsw_limit = memcg->memsw.limit;
- swap_limit = memcg->swap.limit;
- swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
- limit = min(limit + swap_limit, memsw_limit);
+ memsw_max = memcg->memsw.max;
+ swap_max = memcg->swap.max;
+ swap_max = min(swap_max, (unsigned long)total_swap_pages);
+ max = min(max + swap_max, memsw_max);
}
- return limit;
+ return max;
}
static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
@@ -2444,12 +2444,13 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
}
#endif
-static DEFINE_MUTEX(memcg_limit_mutex);
+static DEFINE_MUTEX(memcg_max_mutex);
-static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
- unsigned long limit, bool memsw)
+static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
+ unsigned long max, bool memsw)
{
bool enlarge = false;
+ bool drained = false;
int ret;
bool limits_invariant;
struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
@@ -2460,26 +2461,32 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
break;
}
- mutex_lock(&memcg_limit_mutex);
+ mutex_lock(&memcg_max_mutex);
/*
* Make sure that the new limit (memsw or memory limit) doesn't
- * break our basic invariant rule memory.limit <= memsw.limit.
+ * break our basic invariant rule memory.max <= memsw.max.
*/
- limits_invariant = memsw ? limit >= memcg->memory.limit :
- limit <= memcg->memsw.limit;
+ limits_invariant = memsw ? max >= memcg->memory.max :
+ max <= memcg->memsw.max;
if (!limits_invariant) {
- mutex_unlock(&memcg_limit_mutex);
+ mutex_unlock(&memcg_max_mutex);
ret = -EINVAL;
break;
}
- if (limit > counter->limit)
+ if (max > counter->max)
enlarge = true;
- ret = page_counter_limit(counter, limit);
- mutex_unlock(&memcg_limit_mutex);
+ ret = page_counter_set_max(counter, max);
+ mutex_unlock(&memcg_max_mutex);
if (!ret)
break;
+ if (!drained) {
+ drain_all_stock(memcg);
+ drained = true;
+ continue;
+ }
+
if (!try_to_free_mem_cgroup_pages(memcg, 1,
GFP_KERNEL, !memsw)) {
ret = -EBUSY;
@@ -2603,6 +2610,9 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
/* we call try-to-free pages for make this cgroup empty */
lru_add_drain_all();
+
+ drain_all_stock(memcg);
+
/* try to free all pages in this cgroup */
while (nr_retries && page_counter_read(&memcg->memory)) {
int progress;
@@ -2757,7 +2767,7 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
return (u64)page_counter_read(counter) * PAGE_SIZE;
case RES_LIMIT:
- return (u64)counter->limit * PAGE_SIZE;
+ return (u64)counter->max * PAGE_SIZE;
case RES_MAX_USAGE:
return (u64)counter->watermark * PAGE_SIZE;
case RES_FAILCNT:
@@ -2871,24 +2881,24 @@ static void memcg_free_kmem(struct mem_cgroup *memcg)
}
#endif /* !CONFIG_SLOB */
-static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
- unsigned long limit)
+static int memcg_update_kmem_max(struct mem_cgroup *memcg,
+ unsigned long max)
{
int ret;
- mutex_lock(&memcg_limit_mutex);
- ret = page_counter_limit(&memcg->kmem, limit);
- mutex_unlock(&memcg_limit_mutex);
+ mutex_lock(&memcg_max_mutex);
+ ret = page_counter_set_max(&memcg->kmem, max);
+ mutex_unlock(&memcg_max_mutex);
return ret;
}
-static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
+static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
{
int ret;
- mutex_lock(&memcg_limit_mutex);
+ mutex_lock(&memcg_max_mutex);
- ret = page_counter_limit(&memcg->tcpmem, limit);
+ ret = page_counter_set_max(&memcg->tcpmem, max);
if (ret)
goto out;
@@ -2913,7 +2923,7 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
memcg->tcpmem_active = true;
}
out:
- mutex_unlock(&memcg_limit_mutex);
+ mutex_unlock(&memcg_max_mutex);
return ret;
}
@@ -2941,16 +2951,16 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
}
switch (MEMFILE_TYPE(of_cft(of)->private)) {
case _MEM:
- ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
+ ret = mem_cgroup_resize_max(memcg, nr_pages, false);
break;
case _MEMSWAP:
- ret = mem_cgroup_resize_limit(memcg, nr_pages, true);
+ ret = mem_cgroup_resize_max(memcg, nr_pages, true);
break;
case _KMEM:
- ret = memcg_update_kmem_limit(memcg, nr_pages);
+ ret = memcg_update_kmem_max(memcg, nr_pages);
break;
case _TCP:
- ret = memcg_update_tcp_limit(memcg, nr_pages);
+ ret = memcg_update_tcp_max(memcg, nr_pages);
break;
}
break;
@@ -3083,7 +3093,7 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
#endif /* CONFIG_NUMA */
/* Universal VM events cgroup1 shows, original sort order */
-unsigned int memcg1_events[] = {
+static const unsigned int memcg1_events[] = {
PGPGIN,
PGPGOUT,
PGFAULT,
@@ -3126,8 +3136,8 @@ static int memcg_stat_show(struct seq_file *m, void *v)
/* Hierarchical information */
memory = memsw = PAGE_COUNTER_MAX;
for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
- memory = min(memory, mi->memory.limit);
- memsw = min(memsw, mi->memsw.limit);
+ memory = min(memory, mi->memory.max);
+ memsw = min(memsw, mi->memsw.max);
}
seq_printf(m, "hierarchical_memory_limit %llu\n",
(u64)memory * PAGE_SIZE);
@@ -3562,11 +3572,6 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
#ifdef CONFIG_CGROUP_WRITEBACK
-struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
-{
- return &memcg->cgwb_list;
-}
-
static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
return wb_domain_init(&memcg->cgwb_domain, gfp);
@@ -3626,7 +3631,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
*pheadroom = PAGE_COUNTER_MAX;
while ((parent = parent_mem_cgroup(memcg))) {
- unsigned long ceiling = min(memcg->memory.limit, memcg->high);
+ unsigned long ceiling = min(memcg->memory.max, memcg->high);
unsigned long used = page_counter_read(&memcg->memory);
*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
@@ -4270,7 +4275,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
}
spin_unlock(&memcg->event_list_lock);
- memcg->low = 0;
+ page_counter_set_min(&memcg->memory, 0);
+ page_counter_set_low(&memcg->memory, 0);
memcg_offline_kmem(memcg);
wb_memcg_offline(memcg);
@@ -4319,12 +4325,13 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
- page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
- page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
- page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
- page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
- page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
- memcg->low = 0;
+ page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
+ page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
+ page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
+ page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
+ page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
+ page_counter_set_min(&memcg->memory, 0);
+ page_counter_set_low(&memcg->memory, 0);
memcg->high = PAGE_COUNTER_MAX;
memcg->soft_limit = PAGE_COUNTER_MAX;
memcg_wb_domain_size_changed(memcg);
@@ -5061,10 +5068,40 @@ static u64 memory_current_read(struct cgroup_subsys_state *css,
return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
}
+static int memory_min_show(struct seq_file *m, void *v)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ unsigned long min = READ_ONCE(memcg->memory.min);
+
+ if (min == PAGE_COUNTER_MAX)
+ seq_puts(m, "max\n");
+ else
+ seq_printf(m, "%llu\n", (u64)min * PAGE_SIZE);
+
+ return 0;
+}
+
+static ssize_t memory_min_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ unsigned long min;
+ int err;
+
+ buf = strstrip(buf);
+ err = page_counter_memparse(buf, "max", &min);
+ if (err)
+ return err;
+
+ page_counter_set_min(&memcg->memory, min);
+
+ return nbytes;
+}
+
static int memory_low_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- unsigned long low = READ_ONCE(memcg->low);
+ unsigned long low = READ_ONCE(memcg->memory.low);
if (low == PAGE_COUNTER_MAX)
seq_puts(m, "max\n");
@@ -5086,7 +5123,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
if (err)
return err;
- memcg->low = low;
+ page_counter_set_low(&memcg->memory, low);
return nbytes;
}
@@ -5131,7 +5168,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
static int memory_max_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- unsigned long max = READ_ONCE(memcg->memory.limit);
+ unsigned long max = READ_ONCE(memcg->memory.max);
if (max == PAGE_COUNTER_MAX)
seq_puts(m, "max\n");
@@ -5155,7 +5192,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
if (err)
return err;
- xchg(&memcg->memory.limit, max);
+ xchg(&memcg->memory.max, max);
for (;;) {
unsigned long nr_pages = page_counter_read(&memcg->memory);
@@ -5296,6 +5333,12 @@ static struct cftype memory_files[] = {
.read_u64 = memory_current_read,
},
{
+ .name = "min",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = memory_min_show,
+ .write = memory_min_write,
+ },
+ {
.name = "low",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = memory_low_show,
@@ -5344,54 +5387,140 @@ struct cgroup_subsys memory_cgrp_subsys = {
};
/**
- * mem_cgroup_low - check if memory consumption is below the normal range
+ * mem_cgroup_protected - check if memory consumption is in the normal range
* @root: the top ancestor of the sub-tree being checked
* @memcg: the memory cgroup to check
*
- * Returns %true if memory consumption of @memcg, and that of all
- * ancestors up to (but not including) @root, is below the normal range.
+ * WARNING: This function is not stateless! It can only be used as part
+ * of a top-down tree iteration, not for isolated queries.
+ *
+ * Returns one of the following:
+ * MEMCG_PROT_NONE: cgroup memory is not protected
+ * MEMCG_PROT_LOW: cgroup memory is protected as long there is
+ * an unprotected supply of reclaimable memory from other cgroups.
+ * MEMCG_PROT_MIN: cgroup memory is protected
*
- * @root is exclusive; it is never low when looked at directly and isn't
- * checked when traversing the hierarchy.
+ * @root is exclusive; it is never protected when looked at directly
*
- * Excluding @root enables using memory.low to prioritize memory usage
- * between cgroups within a subtree of the hierarchy that is limited by
- * memory.high or memory.max.
+ * To provide a proper hierarchical behavior, effective memory.min/low values
+ * are used. Below is the description of how effective memory.low is calculated.
+ * Effective memory.min values is calculated in the same way.
*
- * For example, given cgroup A with children B and C:
+ * Effective memory.low is always equal or less than the original memory.low.
+ * If there is no memory.low overcommittment (which is always true for
+ * top-level memory cgroups), these two values are equal.
+ * Otherwise, it's a part of parent's effective memory.low,
+ * calculated as a cgroup's memory.low usage divided by sum of sibling's
+ * memory.low usages, where memory.low usage is the size of actually
+ * protected memory.
*
- * A
- * / \
- * B C
+ * low_usage
+ * elow = min( memory.low, parent->elow * ------------------ ),
+ * siblings_low_usage
*
- * and
+ * | memory.current, if memory.current < memory.low
+ * low_usage = |
+ | 0, otherwise.
*
- * 1. A/memory.current > A/memory.high
- * 2. A/B/memory.current < A/B/memory.low
- * 3. A/C/memory.current >= A/C/memory.low
*
- * As 'A' is high, i.e. triggers reclaim from 'A', and 'B' is low, we
- * should reclaim from 'C' until 'A' is no longer high or until we can
- * no longer reclaim from 'C'. If 'A', i.e. @root, isn't excluded by
- * mem_cgroup_low when reclaming from 'A', then 'B' won't be considered
- * low and we will reclaim indiscriminately from both 'B' and 'C'.
+ * Such definition of the effective memory.low provides the expected
+ * hierarchical behavior: parent's memory.low value is limiting
+ * children, unprotected memory is reclaimed first and cgroups,
+ * which are not using their guarantee do not affect actual memory
+ * distribution.
+ *
+ * For example, if there are memcgs A, A/B, A/C, A/D and A/E:
+ *
+ * A A/memory.low = 2G, A/memory.current = 6G
+ * //\\
+ * BC DE B/memory.low = 3G B/memory.current = 2G
+ * C/memory.low = 1G C/memory.current = 2G
+ * D/memory.low = 0 D/memory.current = 2G
+ * E/memory.low = 10G E/memory.current = 0
+ *
+ * and the memory pressure is applied, the following memory distribution
+ * is expected (approximately):
+ *
+ * A/memory.current = 2G
+ *
+ * B/memory.current = 1.3G
+ * C/memory.current = 0.6G
+ * D/memory.current = 0
+ * E/memory.current = 0
+ *
+ * These calculations require constant tracking of the actual low usages
+ * (see propagate_protected_usage()), as well as recursive calculation of
+ * effective memory.low values. But as we do call mem_cgroup_protected()
+ * path for each memory cgroup top-down from the reclaim,
+ * it's possible to optimize this part, and save calculated elow
+ * for next usage. This part is intentionally racy, but it's ok,
+ * as memory.low is a best-effort mechanism.
*/
-bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
+enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
+ struct mem_cgroup *memcg)
{
+ struct mem_cgroup *parent;
+ unsigned long emin, parent_emin;
+ unsigned long elow, parent_elow;
+ unsigned long usage;
+
if (mem_cgroup_disabled())
- return false;
+ return MEMCG_PROT_NONE;
if (!root)
root = root_mem_cgroup;
if (memcg == root)
- return false;
+ return MEMCG_PROT_NONE;
+
+ usage = page_counter_read(&memcg->memory);
+ if (!usage)
+ return MEMCG_PROT_NONE;
+
+ emin = memcg->memory.min;
+ elow = memcg->memory.low;
+
+ parent = parent_mem_cgroup(memcg);
+ if (parent == root)
+ goto exit;
- for (; memcg != root; memcg = parent_mem_cgroup(memcg)) {
- if (page_counter_read(&memcg->memory) >= memcg->low)
- return false;
+ parent_emin = READ_ONCE(parent->memory.emin);
+ emin = min(emin, parent_emin);
+ if (emin && parent_emin) {
+ unsigned long min_usage, siblings_min_usage;
+
+ min_usage = min(usage, memcg->memory.min);
+ siblings_min_usage = atomic_long_read(
+ &parent->memory.children_min_usage);
+
+ if (min_usage && siblings_min_usage)
+ emin = min(emin, parent_emin * min_usage /
+ siblings_min_usage);
}
- return true;
+ parent_elow = READ_ONCE(parent->memory.elow);
+ elow = min(elow, parent_elow);
+ if (elow && parent_elow) {
+ unsigned long low_usage, siblings_low_usage;
+
+ low_usage = min(usage, memcg->memory.low);
+ siblings_low_usage = atomic_long_read(
+ &parent->memory.children_low_usage);
+
+ if (low_usage && siblings_low_usage)
+ elow = min(elow, parent_elow * low_usage /
+ siblings_low_usage);
+ }
+
+exit:
+ memcg->memory.emin = emin;
+ memcg->memory.elow = elow;
+
+ if (usage <= emin)
+ return MEMCG_PROT_MIN;
+ else if (usage <= elow)
+ return MEMCG_PROT_LOW;
+ else
+ return MEMCG_PROT_NONE;
}
/**
@@ -6012,10 +6141,17 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
if (!memcg)
return 0;
+ if (!entry.val) {
+ memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
+ return 0;
+ }
+
memcg = mem_cgroup_id_get_online(memcg);
if (!mem_cgroup_is_root(memcg) &&
!page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
+ memcg_memory_event(memcg, MEMCG_SWAP_MAX);
+ memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
mem_cgroup_id_put(memcg);
return -ENOMEM;
}
@@ -6067,7 +6203,7 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
return nr_swap_pages;
for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
nr_swap_pages = min_t(long, nr_swap_pages,
- READ_ONCE(memcg->swap.limit) -
+ READ_ONCE(memcg->swap.max) -
page_counter_read(&memcg->swap));
return nr_swap_pages;
}
@@ -6088,7 +6224,7 @@ bool mem_cgroup_swap_full(struct page *page)
return false;
for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
- if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
+ if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max)
return true;
return false;
@@ -6122,7 +6258,7 @@ static u64 swap_current_read(struct cgroup_subsys_state *css,
static int swap_max_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- unsigned long max = READ_ONCE(memcg->swap.limit);
+ unsigned long max = READ_ONCE(memcg->swap.max);
if (max == PAGE_COUNTER_MAX)
seq_puts(m, "max\n");
@@ -6144,15 +6280,23 @@ static ssize_t swap_max_write(struct kernfs_open_file *of,
if (err)
return err;
- mutex_lock(&memcg_limit_mutex);
- err = page_counter_limit(&memcg->swap, max);
- mutex_unlock(&memcg_limit_mutex);
- if (err)
- return err;
+ xchg(&memcg->swap.max, max);
return nbytes;
}
+static int swap_events_show(struct seq_file *m, void *v)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+
+ seq_printf(m, "max %lu\n",
+ atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
+ seq_printf(m, "fail %lu\n",
+ atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
+
+ return 0;
+}
+
static struct cftype swap_files[] = {
{
.name = "swap.current",
@@ -6165,6 +6309,12 @@ static struct cftype swap_files[] = {
.seq_show = swap_max_show,
.write = swap_max_write,
},
+ {
+ .name = "swap.events",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .file_offset = offsetof(struct mem_cgroup, swap_events_file),
+ .seq_show = swap_events_show,
+ },
{ } /* terminate */
};
diff --git a/mm/memfd.c b/mm/memfd.c
new file mode 100644
index 000000000000..27069518e3c5
--- /dev/null
+++ b/mm/memfd.c
@@ -0,0 +1,345 @@
+/*
+ * memfd_create system call and file sealing support
+ *
+ * Code was originally included in shmem.c, and broken out to facilitate
+ * use by hugetlbfs as well as tmpfs.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/pagemap.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/khugepaged.h>
+#include <linux/syscalls.h>
+#include <linux/hugetlb.h>
+#include <linux/shmem_fs.h>
+#include <linux/memfd.h>
+#include <uapi/linux/memfd.h>
+
+/*
+ * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
+ * so reuse a tag which we firmly believe is never set or cleared on tmpfs
+ * or hugetlbfs because they are memory only filesystems.
+ */
+#define MEMFD_TAG_PINNED PAGECACHE_TAG_TOWRITE
+#define LAST_SCAN 4 /* about 150ms max */
+
+static void memfd_tag_pins(struct address_space *mapping)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ pgoff_t start;
+ struct page *page;
+
+ lru_add_drain();
+ start = 0;
+ rcu_read_lock();
+
+ radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
+ page = radix_tree_deref_slot(slot);
+ if (!page || radix_tree_exception(page)) {
+ if (radix_tree_deref_retry(page)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+ } else if (page_count(page) - page_mapcount(page) > 1) {
+ xa_lock_irq(&mapping->i_pages);
+ radix_tree_tag_set(&mapping->i_pages, iter.index,
+ MEMFD_TAG_PINNED);
+ xa_unlock_irq(&mapping->i_pages);
+ }
+
+ if (need_resched()) {
+ slot = radix_tree_iter_resume(slot, &iter);
+ cond_resched_rcu();
+ }
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
+ * via get_user_pages(), drivers might have some pending I/O without any active
+ * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
+ * and see whether it has an elevated ref-count. If so, we tag them and wait for
+ * them to be dropped.
+ * The caller must guarantee that no new user will acquire writable references
+ * to those pages to avoid races.
+ */
+static int memfd_wait_for_pins(struct address_space *mapping)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ pgoff_t start;
+ struct page *page;
+ int error, scan;
+
+ memfd_tag_pins(mapping);
+
+ error = 0;
+ for (scan = 0; scan <= LAST_SCAN; scan++) {
+ if (!radix_tree_tagged(&mapping->i_pages, MEMFD_TAG_PINNED))
+ break;
+
+ if (!scan)
+ lru_add_drain_all();
+ else if (schedule_timeout_killable((HZ << scan) / 200))
+ scan = LAST_SCAN;
+
+ start = 0;
+ rcu_read_lock();
+ radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter,
+ start, MEMFD_TAG_PINNED) {
+
+ page = radix_tree_deref_slot(slot);
+ if (radix_tree_exception(page)) {
+ if (radix_tree_deref_retry(page)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+
+ page = NULL;
+ }
+
+ if (page &&
+ page_count(page) - page_mapcount(page) != 1) {
+ if (scan < LAST_SCAN)
+ goto continue_resched;
+
+ /*
+ * On the last scan, we clean up all those tags
+ * we inserted; but make a note that we still
+ * found pages pinned.
+ */
+ error = -EBUSY;
+ }
+
+ xa_lock_irq(&mapping->i_pages);
+ radix_tree_tag_clear(&mapping->i_pages,
+ iter.index, MEMFD_TAG_PINNED);
+ xa_unlock_irq(&mapping->i_pages);
+continue_resched:
+ if (need_resched()) {
+ slot = radix_tree_iter_resume(slot, &iter);
+ cond_resched_rcu();
+ }
+ }
+ rcu_read_unlock();
+ }
+
+ return error;
+}
+
+static unsigned int *memfd_file_seals_ptr(struct file *file)
+{
+ if (shmem_file(file))
+ return &SHMEM_I(file_inode(file))->seals;
+
+#ifdef CONFIG_HUGETLBFS
+ if (is_file_hugepages(file))
+ return &HUGETLBFS_I(file_inode(file))->seals;
+#endif
+
+ return NULL;
+}
+
+#define F_ALL_SEALS (F_SEAL_SEAL | \
+ F_SEAL_SHRINK | \
+ F_SEAL_GROW | \
+ F_SEAL_WRITE)
+
+static int memfd_add_seals(struct file *file, unsigned int seals)
+{
+ struct inode *inode = file_inode(file);
+ unsigned int *file_seals;
+ int error;
+
+ /*
+ * SEALING
+ * Sealing allows multiple parties to share a tmpfs or hugetlbfs file
+ * but restrict access to a specific subset of file operations. Seals
+ * can only be added, but never removed. This way, mutually untrusted
+ * parties can share common memory regions with a well-defined policy.
+ * A malicious peer can thus never perform unwanted operations on a
+ * shared object.
+ *
+ * Seals are only supported on special tmpfs or hugetlbfs files and
+ * always affect the whole underlying inode. Once a seal is set, it
+ * may prevent some kinds of access to the file. Currently, the
+ * following seals are defined:
+ * SEAL_SEAL: Prevent further seals from being set on this file
+ * SEAL_SHRINK: Prevent the file from shrinking
+ * SEAL_GROW: Prevent the file from growing
+ * SEAL_WRITE: Prevent write access to the file
+ *
+ * As we don't require any trust relationship between two parties, we
+ * must prevent seals from being removed. Therefore, sealing a file
+ * only adds a given set of seals to the file, it never touches
+ * existing seals. Furthermore, the "setting seals"-operation can be
+ * sealed itself, which basically prevents any further seal from being
+ * added.
+ *
+ * Semantics of sealing are only defined on volatile files. Only
+ * anonymous tmpfs and hugetlbfs files support sealing. More
+ * importantly, seals are never written to disk. Therefore, there's
+ * no plan to support it on other file types.
+ */
+
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+ if (seals & ~(unsigned int)F_ALL_SEALS)
+ return -EINVAL;
+
+ inode_lock(inode);
+
+ file_seals = memfd_file_seals_ptr(file);
+ if (!file_seals) {
+ error = -EINVAL;
+ goto unlock;
+ }
+
+ if (*file_seals & F_SEAL_SEAL) {
+ error = -EPERM;
+ goto unlock;
+ }
+
+ if ((seals & F_SEAL_WRITE) && !(*file_seals & F_SEAL_WRITE)) {
+ error = mapping_deny_writable(file->f_mapping);
+ if (error)
+ goto unlock;
+
+ error = memfd_wait_for_pins(file->f_mapping);
+ if (error) {
+ mapping_allow_writable(file->f_mapping);
+ goto unlock;
+ }
+ }
+
+ *file_seals |= seals;
+ error = 0;
+
+unlock:
+ inode_unlock(inode);
+ return error;
+}
+
+static int memfd_get_seals(struct file *file)
+{
+ unsigned int *seals = memfd_file_seals_ptr(file);
+
+ return seals ? *seals : -EINVAL;
+}
+
+long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long error;
+
+ switch (cmd) {
+ case F_ADD_SEALS:
+ /* disallow upper 32bit */
+ if (arg > UINT_MAX)
+ return -EINVAL;
+
+ error = memfd_add_seals(file, arg);
+ break;
+ case F_GET_SEALS:
+ error = memfd_get_seals(file);
+ break;
+ default:
+ error = -EINVAL;
+ break;
+ }
+
+ return error;
+}
+
+#define MFD_NAME_PREFIX "memfd:"
+#define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
+#define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
+
+#define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB)
+
+SYSCALL_DEFINE2(memfd_create,
+ const char __user *, uname,
+ unsigned int, flags)
+{
+ unsigned int *file_seals;
+ struct file *file;
+ int fd, error;
+ char *name;
+ long len;
+
+ if (!(flags & MFD_HUGETLB)) {
+ if (flags & ~(unsigned int)MFD_ALL_FLAGS)
+ return -EINVAL;
+ } else {
+ /* Allow huge page size encoding in flags. */
+ if (flags & ~(unsigned int)(MFD_ALL_FLAGS |
+ (MFD_HUGE_MASK << MFD_HUGE_SHIFT)))
+ return -EINVAL;
+ }
+
+ /* length includes terminating zero */
+ len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
+ if (len <= 0)
+ return -EFAULT;
+ if (len > MFD_NAME_MAX_LEN + 1)
+ return -EINVAL;
+
+ name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ strcpy(name, MFD_NAME_PREFIX);
+ if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
+ error = -EFAULT;
+ goto err_name;
+ }
+
+ /* terminating-zero may have changed after strnlen_user() returned */
+ if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
+ error = -EFAULT;
+ goto err_name;
+ }
+
+ fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
+ if (fd < 0) {
+ error = fd;
+ goto err_name;
+ }
+
+ if (flags & MFD_HUGETLB) {
+ struct user_struct *user = NULL;
+
+ file = hugetlb_file_setup(name, 0, VM_NORESERVE, &user,
+ HUGETLB_ANONHUGE_INODE,
+ (flags >> MFD_HUGE_SHIFT) &
+ MFD_HUGE_MASK);
+ } else
+ file = shmem_file_setup(name, 0, VM_NORESERVE);
+ if (IS_ERR(file)) {
+ error = PTR_ERR(file);
+ goto err_fd;
+ }
+ file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
+ file->f_flags |= O_RDWR | O_LARGEFILE;
+
+ if (flags & MFD_ALLOW_SEALING) {
+ file_seals = memfd_file_seals_ptr(file);
+ *file_seals &= ~F_SEAL_SEAL;
+ }
+
+ fd_install(fd, file);
+ kfree(name);
+ return fd;
+
+err_fd:
+ put_unused_fd(fd);
+err_name:
+ kfree(name);
+ return error;
+}
diff --git a/mm/memory.c b/mm/memory.c
index 01f5464e0fd2..7206a634270b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -817,17 +817,12 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
* PFNMAP mappings in order to support COWable mappings.
*
*/
-#ifdef __HAVE_ARCH_PTE_SPECIAL
-# define HAVE_PTE_SPECIAL 1
-#else
-# define HAVE_PTE_SPECIAL 0
-#endif
struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte, bool with_public_device)
{
unsigned long pfn = pte_pfn(pte);
- if (HAVE_PTE_SPECIAL) {
+ if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
if (likely(!pte_special(pte)))
goto check_pfn;
if (vma->vm_ops && vma->vm_ops->find_special_page)
@@ -862,7 +857,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
return NULL;
}
- /* !HAVE_PTE_SPECIAL case follows: */
+ /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
if (vma->vm_flags & VM_MIXEDMAP) {
@@ -881,6 +876,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
if (is_zero_pfn(pfn))
return NULL;
+
check_pfn:
if (unlikely(pfn > highest_memmap_pfn)) {
print_bad_pte(vma, addr, pte, NULL);
@@ -904,7 +900,7 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
/*
* There is no pmd_special() but there may be special pmds, e.g.
* in a direct-access (dax) mapping, so let's just replicate the
- * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
+ * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
*/
if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
if (vma->vm_flags & VM_MIXEDMAP) {
@@ -1660,16 +1656,15 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
*
* The entire address range must be fully contained within the vma.
*
- * Returns 0 if successful.
*/
-int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size)
{
if (address < vma->vm_start || address + size > vma->vm_end ||
!(vma->vm_flags & VM_PFNMAP))
- return -1;
+ return;
+
zap_page_range_single(vma, address, size, NULL);
- return 0;
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
@@ -1933,7 +1928,8 @@ static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
* than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
* without pte special, it would there be refcounted as a normal page.
*/
- if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
+ !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
struct page *page;
/*
@@ -1955,12 +1951,25 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
}
EXPORT_SYMBOL(vm_insert_mixed);
-int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn)
+/*
+ * If the insertion of PTE failed because someone else already added a
+ * different entry in the mean time, we treat that as success as we assume
+ * the same entry was actually inserted.
+ */
+
+vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
+ unsigned long addr, pfn_t pfn)
{
- return __vm_insert_mixed(vma, addr, pfn, true);
+ int err;
+
+ err = __vm_insert_mixed(vma, addr, pfn, true);
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
+ return VM_FAULT_NOPAGE;
}
-EXPORT_SYMBOL(vm_insert_mixed_mkwrite);
+EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
/*
* maps a range of physical memory into the requested pages. the old
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 25982467800b..7deb49f69e27 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1237,6 +1237,29 @@ static struct page *next_active_pageblock(struct page *page)
return page + pageblock_nr_pages;
}
+static bool is_pageblock_removable_nolock(struct page *page)
+{
+ struct zone *zone;
+ unsigned long pfn;
+
+ /*
+ * We have to be careful here because we are iterating over memory
+ * sections which are not zone aware so we might end up outside of
+ * the zone but still within the section.
+ * We have to take care about the node as well. If the node is offline
+ * its NODE_DATA will be NULL - see page_zone.
+ */
+ if (!node_online(page_to_nid(page)))
+ return false;
+
+ zone = page_zone(page);
+ pfn = page_to_pfn(page);
+ if (!zone_spans_pfn(zone, pfn))
+ return false;
+
+ return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true);
+}
+
/* Checks if this range of memory is likely to be hot-removable. */
bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
{
diff --git a/mm/mmap.c b/mm/mmap.c
index d817764a9974..d1eb87ef4b1a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3277,7 +3277,7 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
mm->data_vm += npages;
}
-static int special_mapping_fault(struct vm_fault *vmf);
+static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
/*
* Having a close hook prevents vma merging regardless of flags.
@@ -3316,7 +3316,7 @@ static const struct vm_operations_struct legacy_special_mapping_vmops = {
.fault = special_mapping_fault,
};
-static int special_mapping_fault(struct vm_fault *vmf)
+static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgoff_t pgoff;
diff --git a/mm/nommu.c b/mm/nommu.c
index 13723736d38f..4452d8bd9ae4 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1763,7 +1763,7 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
return -ENOMEM;
}
-int filemap_fault(struct vm_fault *vmf)
+vm_fault_t filemap_fault(struct vm_fault *vmf)
{
BUG();
return 0;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 8ba6cb88cf58..6694348b27e9 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -256,7 +256,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)
int nid;
if (is_memcg_oom(oc)) {
- oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
+ oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
return CONSTRAINT_MEMCG;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 22320ea27489..07b3c23762ad 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -705,16 +705,14 @@ static inline void rmv_page_order(struct page *page)
/*
* This function checks whether a page is free && is the buddy
- * we can do coalesce a page and its buddy if
+ * we can coalesce a page and its buddy if
* (a) the buddy is not in a hole (check before calling!) &&
* (b) the buddy is in the buddy system &&
* (c) a page and its buddy have the same order &&
* (d) a page and its buddy are in the same zone.
*
- * For recording whether a page is in the buddy system, we set ->_mapcount
- * PAGE_BUDDY_MAPCOUNT_VALUE.
- * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
- * serialized by zone->lock.
+ * For recording whether a page is in the buddy system, we set PageBuddy.
+ * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
*
* For recording page's order, we use page_private(page).
*/
@@ -759,9 +757,8 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
* as necessary, plus some accounting needed to play nicely with other
* parts of the VM system.
* At each level, we keep a list of pages, which are heads of continuous
- * free pages of length of (1 << order) and marked with _mapcount
- * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
- * field.
+ * free pages of length of (1 << order) and marked with PageBuddy.
+ * Page's order is recorded in page_private(page) field.
* So when we are allocating or freeing one, we can derive the state of the
* other. That is, if we allocate a small block, and both were
* free, the remainder of the region must be split into blocks.
@@ -946,7 +943,7 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
}
switch (page - head_page) {
case 1:
- /* the first tail page: ->mapping is compound_mapcount() */
+ /* the first tail page: ->mapping may be compound_mapcount() */
if (unlikely(compound_mapcount(page))) {
bad_page(page, "nonzero compound_mapcount", 0);
goto out;
@@ -955,7 +952,7 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
case 2:
/*
* the second tail page: ->mapping is
- * page_deferred_list().next -- ignore value.
+ * deferred_list.next -- ignore value.
*/
break;
default:
@@ -3701,7 +3698,7 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
#endif /* CONFIG_COMPACTION */
#ifdef CONFIG_LOCKDEP
-struct lockdep_map __fs_reclaim_map =
+static struct lockdep_map __fs_reclaim_map =
STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
static bool __need_fs_reclaim(gfp_t gfp_mask)
@@ -3726,17 +3723,27 @@ static bool __need_fs_reclaim(gfp_t gfp_mask)
return true;
}
+void __fs_reclaim_acquire(void)
+{
+ lock_map_acquire(&__fs_reclaim_map);
+}
+
+void __fs_reclaim_release(void)
+{
+ lock_map_release(&__fs_reclaim_map);
+}
+
void fs_reclaim_acquire(gfp_t gfp_mask)
{
if (__need_fs_reclaim(gfp_mask))
- lock_map_acquire(&__fs_reclaim_map);
+ __fs_reclaim_acquire();
}
EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
void fs_reclaim_release(gfp_t gfp_mask)
{
if (__need_fs_reclaim(gfp_mask))
- lock_map_release(&__fs_reclaim_map);
+ __fs_reclaim_release();
}
EXPORT_SYMBOL_GPL(fs_reclaim_release);
#endif
@@ -3754,8 +3761,8 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
/* We now go into synchronous reclaim */
cpuset_memory_pressure_bump();
- noreclaim_flag = memalloc_noreclaim_save();
fs_reclaim_acquire(gfp_mask);
+ noreclaim_flag = memalloc_noreclaim_save();
reclaim_state.reclaimed_slab = 0;
current->reclaim_state = &reclaim_state;
@@ -3763,8 +3770,8 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
ac->nodemask);
current->reclaim_state = NULL;
- fs_reclaim_release(gfp_mask);
memalloc_noreclaim_restore(noreclaim_flag);
+ fs_reclaim_release(gfp_mask);
cond_resched();
@@ -4162,7 +4169,6 @@ retry:
* orientated.
*/
if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
- ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->high_zoneidx, ac->nodemask);
}
@@ -4326,8 +4332,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
}
/* Determine whether to spread dirty pages and what the first usable zone */
-static inline void finalise_ac(gfp_t gfp_mask,
- unsigned int order, struct alloc_context *ac)
+static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
{
/* Dirty zone balancing only done in the fast path */
ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
@@ -4358,7 +4363,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
return NULL;
- finalise_ac(gfp_mask, order, &ac);
+ finalise_ac(gfp_mask, &ac);
/* First allocation attempt */
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
@@ -6229,18 +6234,18 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
- unsigned long size, realsize, freesize, memmap_pages;
+ unsigned long size, freesize, memmap_pages;
unsigned long zone_start_pfn = zone->zone_start_pfn;
size = zone->spanned_pages;
- realsize = freesize = zone->present_pages;
+ freesize = zone->present_pages;
/*
* Adjust freesize so that it accounts for how much memory
* is used by this zone for memmap. This affects the watermark
* and per-cpu initialisations
*/
- memmap_pages = calc_memmap_size(size, realsize);
+ memmap_pages = calc_memmap_size(size, freesize);
if (!is_highmem_idx(j)) {
if (freesize >= memmap_pages) {
freesize -= memmap_pages;
@@ -6272,7 +6277,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
* when the bootmem allocator frees pages into the buddy system.
* And all highmem pages will be managed by the buddy system.
*/
- zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
+ zone->managed_pages = freesize;
#ifdef CONFIG_NUMA
zone->node = nid;
#endif
@@ -7682,29 +7687,6 @@ unmovable:
return true;
}
-bool is_pageblock_removable_nolock(struct page *page)
-{
- struct zone *zone;
- unsigned long pfn;
-
- /*
- * We have to be careful here because we are iterating over memory
- * sections which are not zone aware so we might end up outside of
- * the zone but still within the section.
- * We have to take care about the node as well. If the node is offline
- * its NODE_DATA will be NULL - see page_zone.
- */
- if (!node_online(page_to_nid(page)))
- return false;
-
- zone = page_zone(page);
- pfn = page_to_pfn(page);
- if (!zone_spans_pfn(zone, pfn))
- return false;
-
- return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true);
-}
-
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
static unsigned long pfn_max_align_down(unsigned long pfn)
diff --git a/mm/page_counter.c b/mm/page_counter.c
index 2a8df3ad60a4..de31470655f6 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -13,6 +13,40 @@
#include <linux/bug.h>
#include <asm/page.h>
+static void propagate_protected_usage(struct page_counter *c,
+ unsigned long usage)
+{
+ unsigned long protected, old_protected;
+ long delta;
+
+ if (!c->parent)
+ return;
+
+ if (c->min || atomic_long_read(&c->min_usage)) {
+ if (usage <= c->min)
+ protected = usage;
+ else
+ protected = 0;
+
+ old_protected = atomic_long_xchg(&c->min_usage, protected);
+ delta = protected - old_protected;
+ if (delta)
+ atomic_long_add(delta, &c->parent->children_min_usage);
+ }
+
+ if (c->low || atomic_long_read(&c->low_usage)) {
+ if (usage <= c->low)
+ protected = usage;
+ else
+ protected = 0;
+
+ old_protected = atomic_long_xchg(&c->low_usage, protected);
+ delta = protected - old_protected;
+ if (delta)
+ atomic_long_add(delta, &c->parent->children_low_usage);
+ }
+}
+
/**
* page_counter_cancel - take pages out of the local counter
* @counter: counter
@@ -22,7 +56,8 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
{
long new;
- new = atomic_long_sub_return(nr_pages, &counter->count);
+ new = atomic_long_sub_return(nr_pages, &counter->usage);
+ propagate_protected_usage(counter, new);
/* More uncharges than charges? */
WARN_ON_ONCE(new < 0);
}
@@ -41,7 +76,8 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
for (c = counter; c; c = c->parent) {
long new;
- new = atomic_long_add_return(nr_pages, &c->count);
+ new = atomic_long_add_return(nr_pages, &c->usage);
+ propagate_protected_usage(counter, new);
/*
* This is indeed racy, but we can live with some
* inaccuracy in the watermark.
@@ -82,9 +118,10 @@ bool page_counter_try_charge(struct page_counter *counter,
* we either see the new limit or the setter sees the
* counter has changed and retries.
*/
- new = atomic_long_add_return(nr_pages, &c->count);
- if (new > c->limit) {
- atomic_long_sub(nr_pages, &c->count);
+ new = atomic_long_add_return(nr_pages, &c->usage);
+ if (new > c->max) {
+ atomic_long_sub(nr_pages, &c->usage);
+ propagate_protected_usage(counter, new);
/*
* This is racy, but we can live with some
* inaccuracy in the failcnt.
@@ -93,6 +130,7 @@ bool page_counter_try_charge(struct page_counter *counter,
*fail = c;
goto failed;
}
+ propagate_protected_usage(counter, new);
/*
* Just like with failcnt, we can live with some
* inaccuracy in the watermark.
@@ -123,20 +161,20 @@ void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
}
/**
- * page_counter_limit - limit the number of pages allowed
+ * page_counter_set_max - set the maximum number of pages allowed
* @counter: counter
- * @limit: limit to set
+ * @nr_pages: limit to set
*
* Returns 0 on success, -EBUSY if the current number of pages on the
* counter already exceeds the specified limit.
*
* The caller must serialize invocations on the same counter.
*/
-int page_counter_limit(struct page_counter *counter, unsigned long limit)
+int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
{
for (;;) {
unsigned long old;
- long count;
+ long usage;
/*
* Update the limit while making sure that it's not
@@ -149,22 +187,56 @@ int page_counter_limit(struct page_counter *counter, unsigned long limit)
* the limit, so if it sees the old limit, we see the
* modified counter and retry.
*/
- count = atomic_long_read(&counter->count);
+ usage = atomic_long_read(&counter->usage);
- if (count > limit)
+ if (usage > nr_pages)
return -EBUSY;
- old = xchg(&counter->limit, limit);
+ old = xchg(&counter->max, nr_pages);
- if (atomic_long_read(&counter->count) <= count)
+ if (atomic_long_read(&counter->usage) <= usage)
return 0;
- counter->limit = old;
+ counter->max = old;
cond_resched();
}
}
/**
+ * page_counter_set_min - set the amount of protected memory
+ * @counter: counter
+ * @nr_pages: value to set
+ *
+ * The caller must serialize invocations on the same counter.
+ */
+void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
+{
+ struct page_counter *c;
+
+ counter->min = nr_pages;
+
+ for (c = counter; c; c = c->parent)
+ propagate_protected_usage(c, atomic_long_read(&c->usage));
+}
+
+/**
+ * page_counter_set_low - set the amount of protected memory
+ * @counter: counter
+ * @nr_pages: value to set
+ *
+ * The caller must serialize invocations on the same counter.
+ */
+void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
+{
+ struct page_counter *c;
+
+ counter->low = nr_pages;
+
+ for (c = counter; c; c = c->parent)
+ propagate_protected_usage(c, atomic_long_read(&c->usage));
+}
+
+/**
* page_counter_memparse - memparse() for page counter limits
* @buf: string to parse
* @max: string meaning maximum possible value
diff --git a/mm/shmem.c b/mm/shmem.c
index 9d6c7e595415..e9a7ac74823d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -327,7 +327,7 @@ static int shmem_radix_tree_replace(struct address_space *mapping,
pgoff_t index, void *expected, void *replacement)
{
struct radix_tree_node *node;
- void **pslot;
+ void __rcu **pslot;
void *item;
VM_BUG_ON(!expected);
@@ -395,7 +395,7 @@ static bool shmem_confirm_swap(struct address_space *mapping,
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
/* ifdef here to avoid bloating shmem.o when not necessary */
-int shmem_huge __read_mostly;
+static int shmem_huge __read_mostly;
#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
static int shmem_parse_huge(const char *str)
@@ -571,6 +571,15 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
}
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
+static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
+{
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
+ (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
+ shmem_huge != SHMEM_HUGE_DENY)
+ return true;
+ return false;
+}
+
/*
* Like add_to_page_cache_locked, but error if expected item has gone.
*/
@@ -682,7 +691,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
struct radix_tree_iter iter;
- void **slot;
+ void __rcu **slot;
struct page *page;
unsigned long swapped = 0;
@@ -988,6 +997,7 @@ static int shmem_getattr(const struct path *path, struct kstat *stat,
{
struct inode *inode = path->dentry->d_inode;
struct shmem_inode_info *info = SHMEM_I(inode);
+ struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
spin_lock_irq(&info->lock);
@@ -995,6 +1005,10 @@ static int shmem_getattr(const struct path *path, struct kstat *stat,
spin_unlock_irq(&info->lock);
}
generic_fillattr(inode, stat);
+
+ if (is_huge_enabled(sb_info))
+ stat->blksize = HPAGE_PMD_SIZE;
+
return 0;
}
@@ -1098,13 +1112,19 @@ static void shmem_evict_inode(struct inode *inode)
static unsigned long find_swap_entry(struct radix_tree_root *root, void *item)
{
struct radix_tree_iter iter;
- void **slot;
+ void __rcu **slot;
unsigned long found = -1;
unsigned int checked = 0;
rcu_read_lock();
radix_tree_for_each_slot(slot, root, &iter, 0) {
- if (*slot == item) {
+ void *entry = radix_tree_deref_slot(slot);
+
+ if (radix_tree_deref_retry(entry)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+ if (entry == item) {
found = iter.index;
break;
}
@@ -1322,9 +1342,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
if (!swap.val)
goto redirty;
- if (mem_cgroup_try_charge_swap(page, swap))
- goto free_swap;
-
/*
* Add inode to shmem_unuse()'s list of swapped-out inodes,
* if it's not already there. Do it now before the page is
@@ -1353,7 +1370,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
}
mutex_unlock(&shmem_swaplist_mutex);
-free_swap:
put_swap_page(page, swap);
redirty:
set_page_dirty(page);
@@ -1404,10 +1420,9 @@ static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
struct shmem_inode_info *info, pgoff_t index)
{
/* Create a pseudo vma that just contains the policy */
- vma->vm_start = 0;
+ memset(vma, 0, sizeof(*vma));
/* Bias interleave by inode number to distribute better across nodes */
vma->vm_pgoff = index + info->vfs_inode.i_ino;
- vma->vm_ops = NULL;
vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
}
@@ -1931,14 +1946,14 @@ static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, in
return ret;
}
-static int shmem_fault(struct vm_fault *vmf)
+static vm_fault_t shmem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
enum sgp_type sgp;
- int error;
- int ret = VM_FAULT_LOCKED;
+ int err;
+ vm_fault_t ret = VM_FAULT_LOCKED;
/*
* Trinity finds that probing a hole which tmpfs is punching can
@@ -2006,10 +2021,10 @@ static int shmem_fault(struct vm_fault *vmf)
else if (vma->vm_flags & VM_HUGEPAGE)
sgp = SGP_HUGE;
- error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
+ err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
gfp, vma, vmf, &ret);
- if (error)
- return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
+ if (err)
+ return vmf_error(err);
return ret;
}
@@ -2616,241 +2631,6 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
return offset;
}
-/*
- * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
- * so reuse a tag which we firmly believe is never set or cleared on shmem.
- */
-#define SHMEM_TAG_PINNED PAGECACHE_TAG_TOWRITE
-#define LAST_SCAN 4 /* about 150ms max */
-
-static void shmem_tag_pins(struct address_space *mapping)
-{
- struct radix_tree_iter iter;
- void **slot;
- pgoff_t start;
- struct page *page;
-
- lru_add_drain();
- start = 0;
- rcu_read_lock();
-
- radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
- page = radix_tree_deref_slot(slot);
- if (!page || radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
- } else if (page_count(page) - page_mapcount(page) > 1) {
- xa_lock_irq(&mapping->i_pages);
- radix_tree_tag_set(&mapping->i_pages, iter.index,
- SHMEM_TAG_PINNED);
- xa_unlock_irq(&mapping->i_pages);
- }
-
- if (need_resched()) {
- slot = radix_tree_iter_resume(slot, &iter);
- cond_resched_rcu();
- }
- }
- rcu_read_unlock();
-}
-
-/*
- * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
- * via get_user_pages(), drivers might have some pending I/O without any active
- * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
- * and see whether it has an elevated ref-count. If so, we tag them and wait for
- * them to be dropped.
- * The caller must guarantee that no new user will acquire writable references
- * to those pages to avoid races.
- */
-static int shmem_wait_for_pins(struct address_space *mapping)
-{
- struct radix_tree_iter iter;
- void **slot;
- pgoff_t start;
- struct page *page;
- int error, scan;
-
- shmem_tag_pins(mapping);
-
- error = 0;
- for (scan = 0; scan <= LAST_SCAN; scan++) {
- if (!radix_tree_tagged(&mapping->i_pages, SHMEM_TAG_PINNED))
- break;
-
- if (!scan)
- lru_add_drain_all();
- else if (schedule_timeout_killable((HZ << scan) / 200))
- scan = LAST_SCAN;
-
- start = 0;
- rcu_read_lock();
- radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter,
- start, SHMEM_TAG_PINNED) {
-
- page = radix_tree_deref_slot(slot);
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
-
- page = NULL;
- }
-
- if (page &&
- page_count(page) - page_mapcount(page) != 1) {
- if (scan < LAST_SCAN)
- goto continue_resched;
-
- /*
- * On the last scan, we clean up all those tags
- * we inserted; but make a note that we still
- * found pages pinned.
- */
- error = -EBUSY;
- }
-
- xa_lock_irq(&mapping->i_pages);
- radix_tree_tag_clear(&mapping->i_pages,
- iter.index, SHMEM_TAG_PINNED);
- xa_unlock_irq(&mapping->i_pages);
-continue_resched:
- if (need_resched()) {
- slot = radix_tree_iter_resume(slot, &iter);
- cond_resched_rcu();
- }
- }
- rcu_read_unlock();
- }
-
- return error;
-}
-
-static unsigned int *memfd_file_seals_ptr(struct file *file)
-{
- if (file->f_op == &shmem_file_operations)
- return &SHMEM_I(file_inode(file))->seals;
-
-#ifdef CONFIG_HUGETLBFS
- if (file->f_op == &hugetlbfs_file_operations)
- return &HUGETLBFS_I(file_inode(file))->seals;
-#endif
-
- return NULL;
-}
-
-#define F_ALL_SEALS (F_SEAL_SEAL | \
- F_SEAL_SHRINK | \
- F_SEAL_GROW | \
- F_SEAL_WRITE)
-
-static int memfd_add_seals(struct file *file, unsigned int seals)
-{
- struct inode *inode = file_inode(file);
- unsigned int *file_seals;
- int error;
-
- /*
- * SEALING
- * Sealing allows multiple parties to share a shmem-file but restrict
- * access to a specific subset of file operations. Seals can only be
- * added, but never removed. This way, mutually untrusted parties can
- * share common memory regions with a well-defined policy. A malicious
- * peer can thus never perform unwanted operations on a shared object.
- *
- * Seals are only supported on special shmem-files and always affect
- * the whole underlying inode. Once a seal is set, it may prevent some
- * kinds of access to the file. Currently, the following seals are
- * defined:
- * SEAL_SEAL: Prevent further seals from being set on this file
- * SEAL_SHRINK: Prevent the file from shrinking
- * SEAL_GROW: Prevent the file from growing
- * SEAL_WRITE: Prevent write access to the file
- *
- * As we don't require any trust relationship between two parties, we
- * must prevent seals from being removed. Therefore, sealing a file
- * only adds a given set of seals to the file, it never touches
- * existing seals. Furthermore, the "setting seals"-operation can be
- * sealed itself, which basically prevents any further seal from being
- * added.
- *
- * Semantics of sealing are only defined on volatile files. Only
- * anonymous shmem files support sealing. More importantly, seals are
- * never written to disk. Therefore, there's no plan to support it on
- * other file types.
- */
-
- if (!(file->f_mode & FMODE_WRITE))
- return -EPERM;
- if (seals & ~(unsigned int)F_ALL_SEALS)
- return -EINVAL;
-
- inode_lock(inode);
-
- file_seals = memfd_file_seals_ptr(file);
- if (!file_seals) {
- error = -EINVAL;
- goto unlock;
- }
-
- if (*file_seals & F_SEAL_SEAL) {
- error = -EPERM;
- goto unlock;
- }
-
- if ((seals & F_SEAL_WRITE) && !(*file_seals & F_SEAL_WRITE)) {
- error = mapping_deny_writable(file->f_mapping);
- if (error)
- goto unlock;
-
- error = shmem_wait_for_pins(file->f_mapping);
- if (error) {
- mapping_allow_writable(file->f_mapping);
- goto unlock;
- }
- }
-
- *file_seals |= seals;
- error = 0;
-
-unlock:
- inode_unlock(inode);
- return error;
-}
-
-static int memfd_get_seals(struct file *file)
-{
- unsigned int *seals = memfd_file_seals_ptr(file);
-
- return seals ? *seals : -EINVAL;
-}
-
-long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- long error;
-
- switch (cmd) {
- case F_ADD_SEALS:
- /* disallow upper 32bit */
- if (arg > UINT_MAX)
- return -EINVAL;
-
- error = memfd_add_seals(file, arg);
- break;
- case F_GET_SEALS:
- error = memfd_get_seals(file);
- break;
- default:
- error = -EINVAL;
- break;
- }
-
- return error;
-}
-
static long shmem_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
@@ -3428,6 +3208,15 @@ static int shmem_match(struct inode *ino, void *vfh)
return ino->i_ino == inum && fh[0] == ino->i_generation;
}
+/* Find any alias of inode, but prefer a hashed alias */
+static struct dentry *shmem_find_alias(struct inode *inode)
+{
+ struct dentry *alias = d_find_alias(inode);
+
+ return alias ?: d_find_any_alias(inode);
+}
+
+
static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
@@ -3444,7 +3233,7 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
shmem_match, fid->raw);
if (inode) {
- dentry = d_find_alias(inode);
+ dentry = shmem_find_alias(inode);
iput(inode);
}
@@ -3673,93 +3462,6 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
return 0;
}
-#define MFD_NAME_PREFIX "memfd:"
-#define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
-#define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
-
-#define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB)
-
-SYSCALL_DEFINE2(memfd_create,
- const char __user *, uname,
- unsigned int, flags)
-{
- unsigned int *file_seals;
- struct file *file;
- int fd, error;
- char *name;
- long len;
-
- if (!(flags & MFD_HUGETLB)) {
- if (flags & ~(unsigned int)MFD_ALL_FLAGS)
- return -EINVAL;
- } else {
- /* Allow huge page size encoding in flags. */
- if (flags & ~(unsigned int)(MFD_ALL_FLAGS |
- (MFD_HUGE_MASK << MFD_HUGE_SHIFT)))
- return -EINVAL;
- }
-
- /* length includes terminating zero */
- len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
- if (len <= 0)
- return -EFAULT;
- if (len > MFD_NAME_MAX_LEN + 1)
- return -EINVAL;
-
- name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
-
- strcpy(name, MFD_NAME_PREFIX);
- if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
- error = -EFAULT;
- goto err_name;
- }
-
- /* terminating-zero may have changed after strnlen_user() returned */
- if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
- error = -EFAULT;
- goto err_name;
- }
-
- fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
- if (fd < 0) {
- error = fd;
- goto err_name;
- }
-
- if (flags & MFD_HUGETLB) {
- struct user_struct *user = NULL;
-
- file = hugetlb_file_setup(name, 0, VM_NORESERVE, &user,
- HUGETLB_ANONHUGE_INODE,
- (flags >> MFD_HUGE_SHIFT) &
- MFD_HUGE_MASK);
- } else
- file = shmem_file_setup(name, 0, VM_NORESERVE);
- if (IS_ERR(file)) {
- error = PTR_ERR(file);
- goto err_fd;
- }
- file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
- file->f_flags |= O_RDWR | O_LARGEFILE;
-
- if (flags & MFD_ALLOW_SEALING) {
- file_seals = memfd_file_seals_ptr(file);
- *file_seals &= ~F_SEAL_SEAL;
- }
-
- fd_install(fd, file);
- kfree(name);
- return fd;
-
-err_fd:
- put_unused_fd(fd);
-err_name:
- kfree(name);
- return error;
-}
-
#endif /* CONFIG_TMPFS */
static void shmem_put_super(struct super_block *sb)
diff --git a/mm/slab.c b/mm/slab.c
index 2f308253c3d7..36688f6c87eb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1235,8 +1235,6 @@ void __init kmem_cache_init(void)
{
int i;
- BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
- sizeof(struct rcu_head));
kmem_cache = &kmem_cache_boot;
if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
@@ -2665,6 +2663,7 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
invalid_mask, &invalid_mask, flags, &flags);
dump_stack();
}
+ WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
check_irq_off();
@@ -3071,6 +3070,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
gfp_t flags, void *objp, unsigned long caller)
{
+ WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
if (!objp)
return objp;
if (cachep->flags & SLAB_POISON) {
diff --git a/mm/slob.c b/mm/slob.c
index 623e8a5c46ce..307c2c9feb44 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -555,8 +555,10 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
flags, node);
}
- if (b && c->ctor)
+ if (b && c->ctor) {
+ WARN_ON_ONCE(flags & __GFP_ZERO);
c->ctor(b);
+ }
kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
return b;
diff --git a/mm/slub.c b/mm/slub.c
index 44aa7847324a..15505479c3ab 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -52,11 +52,11 @@
* and to synchronize major metadata changes to slab cache structures.
*
* The slab_lock is only used for debugging and on arches that do not
- * have the ability to do a cmpxchg_double. It only protects the second
- * double word in the page struct. Meaning
+ * have the ability to do a cmpxchg_double. It only protects:
* A. page->freelist -> List of object free in a page
- * B. page->counters -> Counters of objects
- * C. page->frozen -> frozen state
+ * B. page->inuse -> Number of objects in use
+ * C. page->objects -> Number of objects in page
+ * D. page->frozen -> frozen state
*
* If a slab is frozen then it is exempt from list management. It is not
* on any list. The processor that froze the slab is the one who can
@@ -316,16 +316,16 @@ static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
return (p - addr) / s->size;
}
-static inline unsigned int order_objects(unsigned int order, unsigned int size, unsigned int reserved)
+static inline unsigned int order_objects(unsigned int order, unsigned int size)
{
- return (((unsigned int)PAGE_SIZE << order) - reserved) / size;
+ return ((unsigned int)PAGE_SIZE << order) / size;
}
static inline struct kmem_cache_order_objects oo_make(unsigned int order,
- unsigned int size, unsigned int reserved)
+ unsigned int size)
{
struct kmem_cache_order_objects x = {
- (order << OO_SHIFT) + order_objects(order, size, reserved)
+ (order << OO_SHIFT) + order_objects(order, size)
};
return x;
@@ -356,21 +356,6 @@ static __always_inline void slab_unlock(struct page *page)
__bit_spin_unlock(PG_locked, &page->flags);
}
-static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
-{
- struct page tmp;
- tmp.counters = counters_new;
- /*
- * page->counters can cover frozen/inuse/objects as well
- * as page->_refcount. If we assign to ->counters directly
- * we run the risk of losing updates to page->_refcount, so
- * be careful and only assign to the fields we need.
- */
- page->frozen = tmp.frozen;
- page->inuse = tmp.inuse;
- page->objects = tmp.objects;
-}
-
/* Interrupts must be disabled (for the fallback code to work right) */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
@@ -392,7 +377,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
if (page->freelist == freelist_old &&
page->counters == counters_old) {
page->freelist = freelist_new;
- set_page_slub_counters(page, counters_new);
+ page->counters = counters_new;
slab_unlock(page);
return true;
}
@@ -431,7 +416,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
if (page->freelist == freelist_old &&
page->counters == counters_old) {
page->freelist = freelist_new;
- set_page_slub_counters(page, counters_new);
+ page->counters = counters_new;
slab_unlock(page);
local_irq_restore(flags);
return true;
@@ -711,7 +696,7 @@ void object_err(struct kmem_cache *s, struct page *page,
print_trailer(s, page, object);
}
-static void slab_err(struct kmem_cache *s, struct page *page,
+static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
const char *fmt, ...)
{
va_list args;
@@ -847,7 +832,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
return 1;
start = page_address(page);
- length = (PAGE_SIZE << compound_order(page)) - s->reserved;
+ length = PAGE_SIZE << compound_order(page);
end = start + length;
remainder = length % s->size;
if (!remainder)
@@ -936,7 +921,7 @@ static int check_slab(struct kmem_cache *s, struct page *page)
return 0;
}
- maxobj = order_objects(compound_order(page), s->size, s->reserved);
+ maxobj = order_objects(compound_order(page), s->size);
if (page->objects > maxobj) {
slab_err(s, page, "objects %u > max %u",
page->objects, maxobj);
@@ -986,7 +971,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
nr++;
}
- max_objects = order_objects(compound_order(page), s->size, s->reserved);
+ max_objects = order_objects(compound_order(page), s->size);
if (max_objects > MAX_OBJS_PER_PAGE)
max_objects = MAX_OBJS_PER_PAGE;
@@ -1694,24 +1679,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);
- page_mapcount_reset(page);
+ page->mapping = NULL;
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
memcg_uncharge_slab(page, order, s);
__free_pages(page, order);
}
-#define need_reserve_slab_rcu \
- (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-
static void rcu_free_slab(struct rcu_head *h)
{
- struct page *page;
-
- if (need_reserve_slab_rcu)
- page = virt_to_head_page(h);
- else
- page = container_of((struct list_head *)h, struct page, lru);
+ struct page *page = container_of(h, struct page, rcu_head);
__free_slab(page->slab_cache, page);
}
@@ -1719,19 +1696,7 @@ static void rcu_free_slab(struct rcu_head *h)
static void free_slab(struct kmem_cache *s, struct page *page)
{
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
- struct rcu_head *head;
-
- if (need_reserve_slab_rcu) {
- int order = compound_order(page);
- int offset = (PAGE_SIZE << order) - s->reserved;
-
- VM_BUG_ON(s->reserved != sizeof(*head));
- head = page_address(page) + offset;
- } else {
- head = &page->rcu_head;
- }
-
- call_rcu(head, rcu_free_slab);
+ call_rcu(&page->rcu_head, rcu_free_slab);
} else
__free_slab(s, page);
}
@@ -2444,6 +2409,8 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
struct kmem_cache_cpu *c = *pc;
struct page *page;
+ WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
+
freelist = get_partial(s, flags, node, c);
if (freelist)
@@ -3226,21 +3193,21 @@ static unsigned int slub_min_objects;
*/
static inline unsigned int slab_order(unsigned int size,
unsigned int min_objects, unsigned int max_order,
- unsigned int fract_leftover, unsigned int reserved)
+ unsigned int fract_leftover)
{
unsigned int min_order = slub_min_order;
unsigned int order;
- if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
+ if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
return get_order(size * MAX_OBJS_PER_PAGE) - 1;
- for (order = max(min_order, (unsigned int)get_order(min_objects * size + reserved));
+ for (order = max(min_order, (unsigned int)get_order(min_objects * size));
order <= max_order; order++) {
unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
unsigned int rem;
- rem = (slab_size - reserved) % size;
+ rem = slab_size % size;
if (rem <= slab_size / fract_leftover)
break;
@@ -3249,7 +3216,7 @@ static inline unsigned int slab_order(unsigned int size,
return order;
}
-static inline int calculate_order(unsigned int size, unsigned int reserved)
+static inline int calculate_order(unsigned int size)
{
unsigned int order;
unsigned int min_objects;
@@ -3266,7 +3233,7 @@ static inline int calculate_order(unsigned int size, unsigned int reserved)
min_objects = slub_min_objects;
if (!min_objects)
min_objects = 4 * (fls(nr_cpu_ids) + 1);
- max_objects = order_objects(slub_max_order, size, reserved);
+ max_objects = order_objects(slub_max_order, size);
min_objects = min(min_objects, max_objects);
while (min_objects > 1) {
@@ -3275,7 +3242,7 @@ static inline int calculate_order(unsigned int size, unsigned int reserved)
fraction = 16;
while (fraction >= 4) {
order = slab_order(size, min_objects,
- slub_max_order, fraction, reserved);
+ slub_max_order, fraction);
if (order <= slub_max_order)
return order;
fraction /= 2;
@@ -3287,14 +3254,14 @@ static inline int calculate_order(unsigned int size, unsigned int reserved)
* We were unable to place multiple objects in a slab. Now
* lets see if we can place a single object there.
*/
- order = slab_order(size, 1, slub_max_order, 1, reserved);
+ order = slab_order(size, 1, slub_max_order, 1);
if (order <= slub_max_order)
return order;
/*
* Doh this slab cannot be placed using slub_max_order.
*/
- order = slab_order(size, 1, MAX_ORDER, 1, reserved);
+ order = slab_order(size, 1, MAX_ORDER, 1);
if (order < MAX_ORDER)
return order;
return -ENOSYS;
@@ -3562,7 +3529,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
if (forced_order >= 0)
order = forced_order;
else
- order = calculate_order(size, s->reserved);
+ order = calculate_order(size);
if ((int)order < 0)
return 0;
@@ -3580,8 +3547,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
/*
* Determine the number of objects per slab
*/
- s->oo = oo_make(order, size, s->reserved);
- s->min = oo_make(get_order(size), size, s->reserved);
+ s->oo = oo_make(order, size);
+ s->min = oo_make(get_order(size), size);
if (oo_objects(s->oo) > oo_objects(s->max))
s->max = s->oo;
@@ -3591,14 +3558,10 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
{
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
- s->reserved = 0;
#ifdef CONFIG_SLAB_FREELIST_HARDENED
s->random = get_random_long();
#endif
- if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
- s->reserved = sizeof(struct rcu_head);
-
if (!calculate_sizes(s, -1))
goto error;
if (disable_higher_order_debug) {
@@ -4239,12 +4202,6 @@ void __init kmem_cache_init(void)
SLAB_HWCACHE_ALIGN, 0, 0);
kmem_cache = bootstrap(&boot_kmem_cache);
-
- /*
- * Allocate kmem_cache_node properly from the kmem_cache slab.
- * kmem_cache_node is separately allocated so no need to
- * update any list pointers.
- */
kmem_cache_node = bootstrap(&boot_kmem_cache_node);
/* Now we can use the kmem_cache to allocate kmalloc slabs */
@@ -5117,12 +5074,6 @@ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO(destroy_by_rcu);
-static ssize_t reserved_show(struct kmem_cache *s, char *buf)
-{
- return sprintf(buf, "%u\n", s->reserved);
-}
-SLAB_ATTR_RO(reserved);
-
#ifdef CONFIG_SLUB_DEBUG
static ssize_t slabs_show(struct kmem_cache *s, char *buf)
{
@@ -5435,7 +5386,6 @@ static struct attribute *slab_attrs[] = {
&reclaim_account_attr.attr,
&destroy_by_rcu_attr.attr,
&shrink_attr.attr,
- &reserved_attr.attr,
&slabs_cpu_partial_attr.attr,
#ifdef CONFIG_SLUB_DEBUG
&total_objects_attr.attr,
diff --git a/mm/sparse.c b/mm/sparse.c
index 73dc2fcc0eab..f13f2723950a 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -190,15 +190,13 @@ static inline int next_present_section_nr(int section_nr)
section_nr++;
if (present_section_nr(section_nr))
return section_nr;
- } while ((section_nr < NR_MEM_SECTIONS) &&
- (section_nr <= __highest_present_section_nr));
+ } while ((section_nr <= __highest_present_section_nr));
return -1;
}
#define for_each_present_section_nr(start, section_nr) \
for (section_nr = next_present_section_nr(start-1); \
((section_nr >= 0) && \
- (section_nr < NR_MEM_SECTIONS) && \
(section_nr <= __highest_present_section_nr)); \
section_nr = next_present_section_nr(section_nr))
@@ -524,7 +522,7 @@ static void __init alloc_usemap_and_memmap(void (*alloc_func)
map_count = 1;
}
/* ok, last chunk */
- alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
+ alloc_func(data, pnum_begin, __highest_present_section_nr+1,
map_count, nodeid_begin);
}
diff --git a/mm/swap.c b/mm/swap.c
index 3dd518832096..26fc9b5f1b6c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -29,6 +29,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/backing-dev.h>
+#include <linux/memremap.h>
#include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/uio.h>
@@ -743,7 +744,7 @@ void release_pages(struct page **pages, int nr)
flags);
locked_pgdat = NULL;
}
- put_zone_device_private_or_public_page(page);
+ put_devmap_managed_page(page);
continue;
}
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index f2641894f440..f51ac051c0c9 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -317,7 +317,7 @@ swp_entry_t get_swap_page(struct page *page)
if (PageTransHuge(page)) {
if (IS_ENABLED(CONFIG_THP_SWAP))
get_swap_pages(1, true, &entry);
- return entry;
+ goto out;
}
/*
@@ -347,10 +347,14 @@ repeat:
}
mutex_unlock(&cache->alloc_lock);
if (entry.val)
- return entry;
+ goto out;
}
get_swap_pages(1, false, &entry);
-
+out:
+ if (mem_cgroup_try_charge_swap(page, entry)) {
+ put_swap_page(page, entry);
+ entry.val = 0;
+ }
return entry;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 07f9aa2340c3..ab8e59cd18ea 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -216,9 +216,6 @@ int add_to_swap(struct page *page)
if (!entry.val)
return 0;
- if (mem_cgroup_try_charge_swap(page, entry))
- goto fail;
-
/*
* Radix-tree node allocations from PF_MEMALLOC contexts could
* completely exhaust the page allocator. __GFP_NOMEMALLOC
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 39791b81ede7..5029f241908f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -404,7 +404,8 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
- bool zeropage)
+ bool zeropage,
+ bool *mmap_changing)
{
struct vm_area_struct *dst_vma;
ssize_t err;
@@ -431,6 +432,15 @@ retry:
down_read(&dst_mm->mmap_sem);
/*
+ * If memory mappings are changing because of non-cooperative
+ * operation (e.g. mremap) running in parallel, bail out and
+ * request the user to retry later
+ */
+ err = -EAGAIN;
+ if (mmap_changing && READ_ONCE(*mmap_changing))
+ goto out_unlock;
+
+ /*
* Make sure the vma is not shared, that the dst range is
* both valid and fully within a single existing vma.
*/
@@ -563,13 +573,15 @@ out:
}
ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
- unsigned long src_start, unsigned long len)
+ unsigned long src_start, unsigned long len,
+ bool *mmap_changing)
{
- return __mcopy_atomic(dst_mm, dst_start, src_start, len, false);
+ return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
+ mmap_changing);
}
ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
- unsigned long len)
+ unsigned long len, bool *mmap_changing)
{
- return __mcopy_atomic(dst_mm, start, 0, len, true);
+ return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing);
}
diff --git a/mm/util.c b/mm/util.c
index c2d0a7cdb189..3351659200e6 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -391,7 +391,8 @@ EXPORT_SYMBOL(vm_mmap);
* __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
* preferable to the vmalloc fallback, due to visible performance drawbacks.
*
- * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people.
+ * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
+ * fall back to vmalloc.
*/
void *kvmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -402,7 +403,8 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
* vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
* so the given set of flags has to be compatible.
*/
- WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
+ if ((flags & GFP_KERNEL) != GFP_KERNEL)
+ return kmalloc_node(size, flags, node);
/*
* We want to attempt a large physically contiguous block first because
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 63a5f502da08..89efac3a020e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -603,26 +603,6 @@ static void unmap_vmap_area(struct vmap_area *va)
vunmap_page_range(va->va_start, va->va_end);
}
-static void vmap_debug_free_range(unsigned long start, unsigned long end)
-{
- /*
- * Unmap page tables and force a TLB flush immediately if pagealloc
- * debugging is enabled. This catches use after free bugs similarly to
- * those in linear kernel virtual address space after a page has been
- * freed.
- *
- * All the lazy freeing logic is still retained, in order to minimise
- * intrusiveness of this debugging feature.
- *
- * This is going to be *slow* (linear kernel virtual address debugging
- * doesn't do a broadcast TLB flush so it is a lot faster).
- */
- if (debug_pagealloc_enabled()) {
- vunmap_page_range(start, end);
- flush_tlb_kernel_range(start, end);
- }
-}
-
/*
* lazy_max_pages is the maximum amount of virtual address space we gather up
* before attempting to purge with a TLB flush.
@@ -756,6 +736,9 @@ static void free_unmap_vmap_area(struct vmap_area *va)
{
flush_cache_vunmap(va->va_start, va->va_end);
unmap_vmap_area(va);
+ if (debug_pagealloc_enabled())
+ flush_tlb_kernel_range(va->va_start, va->va_end);
+
free_vmap_area_noflush(va);
}
@@ -1053,6 +1036,10 @@ static void vb_free(const void *addr, unsigned long size)
vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
+ if (debug_pagealloc_enabled())
+ flush_tlb_kernel_range((unsigned long)addr,
+ (unsigned long)addr + size);
+
spin_lock(&vb->lock);
/* Expand dirty range */
@@ -1141,16 +1128,16 @@ void vm_unmap_ram(const void *mem, unsigned int count)
BUG_ON(addr > VMALLOC_END);
BUG_ON(!PAGE_ALIGNED(addr));
- debug_check_no_locks_freed(mem, size);
- vmap_debug_free_range(addr, addr+size);
-
if (likely(count <= VMAP_MAX_ALLOC)) {
+ debug_check_no_locks_freed(mem, size);
vb_free(mem, size);
return;
}
va = find_vmap_area(addr);
BUG_ON(!va);
+ debug_check_no_locks_freed((void *)va->va_start,
+ (va->va_end - va->va_start));
free_unmap_vmap_area(va);
}
EXPORT_SYMBOL(vm_unmap_ram);
@@ -1499,7 +1486,6 @@ struct vm_struct *remove_vm_area(const void *addr)
va->flags |= VM_LAZY_FREE;
spin_unlock(&vmap_area_lock);
- vmap_debug_free_range(va->va_start, va->va_end);
kasan_free_shadow(vm);
free_unmap_vmap_area(va);
@@ -1519,16 +1505,17 @@ static void __vunmap(const void *addr, int deallocate_pages)
addr))
return;
- area = remove_vm_area(addr);
+ area = find_vmap_area((unsigned long)addr)->vm;
if (unlikely(!area)) {
WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
addr);
return;
}
- debug_check_no_locks_freed(addr, get_vm_area_size(area));
- debug_check_no_obj_freed(addr, get_vm_area_size(area));
+ debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
+ debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
+ remove_vm_area(addr);
if (deallocate_pages) {
int i;
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 85350ce2d25d..4854584ec436 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -342,26 +342,6 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
vmpressure(gfp, memcg, true, vmpressure_win, 0);
}
-static enum vmpressure_levels str_to_level(const char *arg)
-{
- enum vmpressure_levels level;
-
- for (level = 0; level < VMPRESSURE_NUM_LEVELS; level++)
- if (!strcmp(vmpressure_str_levels[level], arg))
- return level;
- return -1;
-}
-
-static enum vmpressure_modes str_to_mode(const char *arg)
-{
- enum vmpressure_modes mode;
-
- for (mode = 0; mode < VMPRESSURE_NUM_MODES; mode++)
- if (!strcmp(vmpressure_str_modes[mode], arg))
- return mode;
- return -1;
-}
-
#define MAX_VMPRESSURE_ARGS_LEN (strlen("critical") + strlen("hierarchy") + 2)
/**
@@ -390,27 +370,26 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
char *token;
int ret = 0;
- spec_orig = spec = kzalloc(MAX_VMPRESSURE_ARGS_LEN + 1, GFP_KERNEL);
+ spec_orig = spec = kstrndup(args, MAX_VMPRESSURE_ARGS_LEN, GFP_KERNEL);
if (!spec) {
ret = -ENOMEM;
goto out;
}
- strncpy(spec, args, MAX_VMPRESSURE_ARGS_LEN);
/* Find required level */
token = strsep(&spec, ",");
- level = str_to_level(token);
- if (level == -1) {
- ret = -EINVAL;
+ level = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
+ if (level < 0) {
+ ret = level;
goto out;
}
/* Find optional mode */
token = strsep(&spec, ",");
if (token) {
- mode = str_to_mode(token);
- if (mode == -1) {
- ret = -EINVAL;
+ mode = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
+ if (mode < 0) {
+ ret = mode;
goto out;
}
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9270a4370d54..03822f86f288 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2544,12 +2544,28 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
unsigned long reclaimed;
unsigned long scanned;
- if (mem_cgroup_low(root, memcg)) {
+ switch (mem_cgroup_protected(root, memcg)) {
+ case MEMCG_PROT_MIN:
+ /*
+ * Hard protection.
+ * If there is no reclaimable memory, OOM.
+ */
+ continue;
+ case MEMCG_PROT_LOW:
+ /*
+ * Soft protection.
+ * Respect the protection only as long as
+ * there is an unprotected supply
+ * of reclaimable memory from other cgroups.
+ */
if (!sc->memcg_low_reclaim) {
sc->memcg_low_skipped = 1;
continue;
}
memcg_memory_event(memcg, MEMCG_LOW);
+ break;
+ case MEMCG_PROT_NONE:
+ break;
}
reclaimed = sc->nr_reclaimed;
@@ -3318,11 +3334,15 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
.may_unmap = 1,
.may_swap = 1,
};
+
+ __fs_reclaim_acquire();
+
count_vm_event(PAGEOUTRUN);
do {
unsigned long nr_reclaimed = sc.nr_reclaimed;
bool raise_priority = true;
+ bool ret;
sc.reclaim_idx = classzone_idx;
@@ -3395,7 +3415,10 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
wake_up_all(&pgdat->pfmemalloc_wait);
/* Check if kswapd should be suspending */
- if (try_to_freeze() || kthread_should_stop())
+ __fs_reclaim_release();
+ ret = try_to_freeze();
+ __fs_reclaim_acquire();
+ if (ret || kthread_should_stop())
break;
/*
@@ -3412,6 +3435,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
out:
snapshot_refaults(NULL, pgdat);
+ __fs_reclaim_release();
/*
* Return the order kswapd stopped reclaiming at as
* prepare_kswapd_sleep() takes it into account. If another caller
@@ -3600,9 +3624,7 @@ kswapd_try_sleep:
*/
trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
alloc_order);
- fs_reclaim_acquire(GFP_KERNEL);
reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
- fs_reclaim_release(GFP_KERNEL);
if (reclaim_order < alloc_order)
goto kswapd_try_sleep;
}
@@ -3684,16 +3706,16 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
unsigned long nr_reclaimed;
unsigned int noreclaim_flag;
- noreclaim_flag = memalloc_noreclaim_save();
fs_reclaim_acquire(sc.gfp_mask);
+ noreclaim_flag = memalloc_noreclaim_save();
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
p->reclaim_state = NULL;
- fs_reclaim_release(sc.gfp_mask);
memalloc_noreclaim_restore(noreclaim_flag);
+ fs_reclaim_release(sc.gfp_mask);
return nr_reclaimed;
}
@@ -3870,6 +3892,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
};
cond_resched();
+ fs_reclaim_acquire(sc.gfp_mask);
/*
* We need to be able to allocate from the reserves for RECLAIM_UNMAP
* and we also need to be able to write out pages for RECLAIM_WRITE
@@ -3877,7 +3900,6 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
*/
noreclaim_flag = memalloc_noreclaim_save();
p->flags |= PF_SWAPWRITE;
- fs_reclaim_acquire(sc.gfp_mask);
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
@@ -3892,9 +3914,9 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
}
p->reclaim_state = NULL;
- fs_reclaim_release(gfp_mask);
current->flags &= ~PF_SWAPWRITE;
memalloc_noreclaim_restore(noreclaim_flag);
+ fs_reclaim_release(sc.gfp_mask);
return sc.nr_reclaimed >= nr_pages;
}
diff --git a/net/9p/client.c b/net/9p/client.c
index 21e6df1cc70f..18c5271910dc 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -198,8 +198,6 @@ static int parse_opts(char *opts, struct p9_client *clnt)
pr_info("Could not find request transport: %s\n",
s);
ret = -EINVAL;
- kfree(s);
- goto free_and_return;
}
kfree(s);
break;
@@ -214,13 +212,12 @@ static int parse_opts(char *opts, struct p9_client *clnt)
"problem allocating copy of version arg\n");
goto free_and_return;
}
- ret = get_protocol_version(s);
- if (ret == -EINVAL) {
- kfree(s);
- goto free_and_return;
- }
+ r = get_protocol_version(s);
+ if (r < 0)
+ ret = r;
+ else
+ clnt->proto_version = r;
kfree(s);
- clnt->proto_version = ret;
break;
default:
continue;
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 0f19960390a6..2e2b8bca54f3 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -38,7 +38,6 @@
#include <linux/module.h>
#include <linux/spinlock.h>
-#include <linux/rwlock.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
diff --git a/samples/auxdisplay/cfag12864b-example.c b/samples/auxdisplay/cfag12864b-example.c
index e7823ffb1ca0..85571e90191f 100644
--- a/samples/auxdisplay/cfag12864b-example.c
+++ b/samples/auxdisplay/cfag12864b-example.c
@@ -1,25 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Filename: cfag12864b-example.c
* Version: 0.1.0
* Description: cfag12864b LCD userspace example program
- * License: GPLv2
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
/*
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 5af34a2b0cd9..1bb594fcfe12 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -251,6 +251,9 @@ DTC_FLAGS += -Wno-unit_address_vs_reg \
-Wno-unit_address_format \
-Wno-avoid_unnecessary_addr_size \
-Wno-alias_paths \
+ -Wno-graph_child_address \
+ -Wno-graph_port \
+ -Wno-unique_unit_address \
-Wno-pci_device_reg
endif
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index e6033d3c48d3..e3b7362b0ee4 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1,9 +1,11 @@
#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+#
# (c) 2001, Dave Jones. (the file handling bit)
# (c) 2005, Joel Schopp <jschopp@austin.ibm.com> (the ugly bit)
# (c) 2007,2008, Andy Whitcroft <apw@uk.ibm.com> (new conditions, test suite)
# (c) 2008-2010 Andy Whitcroft <apw@canonical.com>
-# Licensed under the terms of the GNU GPL License version 2
+# (c) 2010-2018 Joe Perches <joe@perches.com>
use strict;
use warnings;
@@ -2375,6 +2377,14 @@ sub process {
my $rawline = $rawlines[$linenr - 1];
+# check if it's a mode change, rename or start of a patch
+ if (!$in_commit_log &&
+ ($line =~ /^ mode change [0-7]+ => [0-7]+ \S+\s*$/ ||
+ ($line =~ /^rename (?:from|to) \S+\s*$/ ||
+ $line =~ /^diff --git a\/[\w\/\.\_\-]+ b\/\S+\s*$/))) {
+ $is_patch = 1;
+ }
+
#extract the line range in the file after the patch is applied
if (!$in_commit_log &&
$line =~ /^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@(.*)/) {
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index 815eaf140ab5..a2cc1036c915 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -255,7 +255,7 @@ static void check_duplicate_node_names(struct check *c, struct dt_info *dti,
child2;
child2 = child2->next_sibling)
if (streq(child->name, child2->name))
- FAIL(c, dti, node, "Duplicate node name");
+ FAIL(c, dti, child2, "Duplicate node name");
}
ERROR(duplicate_node_names, check_duplicate_node_names, NULL);
@@ -317,6 +317,11 @@ static void check_unit_address_vs_reg(struct check *c, struct dt_info *dti,
const char *unitname = get_unitname(node);
struct property *prop = get_property(node, "reg");
+ if (get_subnode(node, "__overlay__")) {
+ /* HACK: Overlay fragments are a special case */
+ return;
+ }
+
if (!prop) {
prop = get_property(node, "ranges");
if (prop && !prop->val.len)
@@ -579,6 +584,8 @@ static void fixup_phandle_references(struct check *c, struct dt_info *dti,
phandle = get_node_phandle(dt, refnode);
*((fdt32_t *)(prop->val.val + m->offset)) = cpu_to_fdt32(phandle);
+
+ reference_node(refnode);
}
}
}
@@ -609,11 +616,21 @@ static void fixup_path_references(struct check *c, struct dt_info *dti,
path = refnode->fullpath;
prop->val = data_insert_at_marker(prop->val, m, path,
strlen(path) + 1);
+
+ reference_node(refnode);
}
}
}
ERROR(path_references, fixup_path_references, NULL, &duplicate_node_names);
+static void fixup_omit_unused_nodes(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ if (node->omit_if_unused && !node->is_referenced)
+ delete_node(node);
+}
+ERROR(omit_unused_nodes, fixup_omit_unused_nodes, NULL, &phandle_references, &path_references);
+
/*
* Semantic checks
*/
@@ -1017,6 +1034,36 @@ static void check_avoid_unnecessary_addr_size(struct check *c, struct dt_info *d
}
WARNING(avoid_unnecessary_addr_size, check_avoid_unnecessary_addr_size, NULL, &avoid_default_addr_size);
+static void check_unique_unit_address(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ struct node *childa;
+
+ if (node->addr_cells < 0 || node->size_cells < 0)
+ return;
+
+ if (!node->children)
+ return;
+
+ for_each_child(node, childa) {
+ struct node *childb;
+ const char *addr_a = get_unitname(childa);
+
+ if (!strlen(addr_a))
+ continue;
+
+ for_each_child(node, childb) {
+ const char *addr_b = get_unitname(childb);
+ if (childa == childb)
+ break;
+
+ if (streq(addr_a, addr_b))
+ FAIL(c, dti, childb, "duplicate unit-address (also used in node %s)", childa->fullpath);
+ }
+ }
+}
+WARNING(unique_unit_address, check_unique_unit_address, NULL, &avoid_default_addr_size);
+
static void check_obsolete_chosen_interrupt_controller(struct check *c,
struct dt_info *dti,
struct node *node)
@@ -1357,6 +1404,152 @@ static void check_interrupts_property(struct check *c,
}
WARNING(interrupts_property, check_interrupts_property, &phandle_references);
+static const struct bus_type graph_port_bus = {
+ .name = "graph-port",
+};
+
+static const struct bus_type graph_ports_bus = {
+ .name = "graph-ports",
+};
+
+static void check_graph_nodes(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ struct node *child;
+
+ for_each_child(node, child) {
+ if (!(strprefixeq(child->name, child->basenamelen, "endpoint") ||
+ get_property(child, "remote-endpoint")))
+ continue;
+
+ node->bus = &graph_port_bus;
+
+ /* The parent of 'port' nodes can be either 'ports' or a device */
+ if (!node->parent->bus &&
+ (streq(node->parent->name, "ports") || get_property(node, "reg")))
+ node->parent->bus = &graph_ports_bus;
+
+ break;
+ }
+
+}
+WARNING(graph_nodes, check_graph_nodes, NULL);
+
+static void check_graph_child_address(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ int cnt = 0;
+ struct node *child;
+
+ if (node->bus != &graph_ports_bus && node->bus != &graph_port_bus)
+ return;
+
+ for_each_child(node, child) {
+ struct property *prop = get_property(child, "reg");
+
+ /* No error if we have any non-zero unit address */
+ if (prop && propval_cell(prop) != 0)
+ return;
+
+ cnt++;
+ }
+
+ if (cnt == 1 && node->addr_cells != -1)
+ FAIL(c, dti, node, "graph node has single child node '%s', #address-cells/#size-cells are not necessary",
+ node->children->name);
+}
+WARNING(graph_child_address, check_graph_child_address, NULL, &graph_nodes);
+
+static void check_graph_reg(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ char unit_addr[9];
+ const char *unitname = get_unitname(node);
+ struct property *prop;
+
+ prop = get_property(node, "reg");
+ if (!prop || !unitname)
+ return;
+
+ if (!(prop->val.val && prop->val.len == sizeof(cell_t))) {
+ FAIL(c, dti, node, "graph node malformed 'reg' property");
+ return;
+ }
+
+ snprintf(unit_addr, sizeof(unit_addr), "%x", propval_cell(prop));
+ if (!streq(unitname, unit_addr))
+ FAIL(c, dti, node, "graph node unit address error, expected \"%s\"",
+ unit_addr);
+
+ if (node->parent->addr_cells != 1)
+ FAIL_PROP(c, dti, node, get_property(node, "#address-cells"),
+ "graph node '#address-cells' is %d, must be 1",
+ node->parent->addr_cells);
+ if (node->parent->size_cells != 0)
+ FAIL_PROP(c, dti, node, get_property(node, "#size-cells"),
+ "graph node '#size-cells' is %d, must be 0",
+ node->parent->size_cells);
+}
+
+static void check_graph_port(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ if (node->bus != &graph_port_bus)
+ return;
+
+ if (!strprefixeq(node->name, node->basenamelen, "port"))
+ FAIL(c, dti, node, "graph port node name should be 'port'");
+
+ check_graph_reg(c, dti, node);
+}
+WARNING(graph_port, check_graph_port, NULL, &graph_nodes);
+
+static struct node *get_remote_endpoint(struct check *c, struct dt_info *dti,
+ struct node *endpoint)
+{
+ int phandle;
+ struct node *node;
+ struct property *prop;
+
+ prop = get_property(endpoint, "remote-endpoint");
+ if (!prop)
+ return NULL;
+
+ phandle = propval_cell(prop);
+ /* Give up if this is an overlay with external references */
+ if (phandle == 0 || phandle == -1)
+ return NULL;
+
+ node = get_node_by_phandle(dti->dt, phandle);
+ if (!node)
+ FAIL_PROP(c, dti, endpoint, prop, "graph phandle is not valid");
+
+ return node;
+}
+
+static void check_graph_endpoint(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ struct node *remote_node;
+
+ if (!node->parent || node->parent->bus != &graph_port_bus)
+ return;
+
+ if (!strprefixeq(node->name, node->basenamelen, "endpoint"))
+ FAIL(c, dti, node, "graph endpont node name should be 'endpoint'");
+
+ check_graph_reg(c, dti, node);
+
+ remote_node = get_remote_endpoint(c, dti, node);
+ if (!remote_node)
+ return;
+
+ if (get_remote_endpoint(c, dti, remote_node) != node)
+ FAIL(c, dti, node, "graph connection to node '%s' is not bidirectional",
+ remote_node->fullpath);
+}
+WARNING(graph_endpoint, check_graph_endpoint, NULL, &graph_nodes);
+
static struct check *check_table[] = {
&duplicate_node_names, &duplicate_property_names,
&node_name_chars, &node_name_format, &property_name_chars,
@@ -1366,6 +1559,7 @@ static struct check *check_table[] = {
&explicit_phandles,
&phandle_references, &path_references,
+ &omit_unused_nodes,
&address_cells_is_cell, &size_cells_is_cell, &interrupt_cells_is_cell,
&device_type_is_string, &model_is_string, &status_is_string,
@@ -1390,6 +1584,7 @@ static struct check *check_table[] = {
&avoid_default_addr_size,
&avoid_unnecessary_addr_size,
+ &unique_unit_address,
&obsolete_chosen_interrupt_controller,
&chosen_node_is_root, &chosen_node_bootargs, &chosen_node_stdout_path,
@@ -1416,6 +1611,8 @@ static struct check *check_table[] = {
&alias_paths,
+ &graph_nodes, &graph_child_address, &graph_port, &graph_endpoint,
+
&always_fail,
};
diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
index fd825ebba69c..615b7ec6588f 100644
--- a/scripts/dtc/dtc-lexer.l
+++ b/scripts/dtc/dtc-lexer.l
@@ -153,6 +153,13 @@ static void PRINTF(1, 2) lexical_error(const char *fmt, ...);
return DT_DEL_NODE;
}
+<*>"/omit-if-no-ref/" {
+ DPRINT("Keyword: /omit-if-no-ref/\n");
+ DPRINT("<PROPNODENAME>\n");
+ BEGIN(PROPNODENAME);
+ return DT_OMIT_NO_REF;
+ }
+
<*>{LABEL}: {
DPRINT("Label: %s\n", yytext);
yylval.labelref = xstrdup(yytext);
diff --git a/scripts/dtc/dtc-parser.y b/scripts/dtc/dtc-parser.y
index 44af170abfea..011a5b25539a 100644
--- a/scripts/dtc/dtc-parser.y
+++ b/scripts/dtc/dtc-parser.y
@@ -63,6 +63,7 @@ extern bool treesource_error;
%token DT_BITS
%token DT_DEL_PROP
%token DT_DEL_NODE
+%token DT_OMIT_NO_REF
%token <propnodename> DT_PROPNODENAME
%token <integer> DT_LITERAL
%token <integer> DT_CHAR_LITERAL
@@ -190,18 +191,18 @@ devicetree:
}
| devicetree DT_REF nodedef
{
- struct node *target = get_node_by_ref($1, $2);
-
- if (target) {
- merge_nodes(target, $3);
+ /*
+ * We rely on the rule being always:
+ * versioninfo plugindecl memreserves devicetree
+ * so $-1 is what we want (plugindecl)
+ */
+ if ($<flags>-1 & DTSF_PLUGIN) {
+ add_orphan_node($1, $3, $2);
} else {
- /*
- * We rely on the rule being always:
- * versioninfo plugindecl memreserves devicetree
- * so $-1 is what we want (plugindecl)
- */
- if ($<flags>-1 & DTSF_PLUGIN)
- add_orphan_node($1, $3, $2);
+ struct node *target = get_node_by_ref($1, $2);
+
+ if (target)
+ merge_nodes(target, $3);
else
ERROR(&@2, "Label or path %s not found", $2);
}
@@ -219,6 +220,18 @@ devicetree:
$$ = $1;
}
+ | devicetree DT_OMIT_NO_REF DT_REF ';'
+ {
+ struct node *target = get_node_by_ref($1, $3);
+
+ if (target)
+ omit_node_if_unused(target);
+ else
+ ERROR(&@3, "Label or path %s not found", $3);
+
+
+ $$ = $1;
+ }
;
nodedef:
@@ -523,6 +536,10 @@ subnode:
{
$$ = name_node(build_node_delete(), $2);
}
+ | DT_OMIT_NO_REF subnode
+ {
+ $$ = omit_node_if_unused($2);
+ }
| DT_LABEL subnode
{
add_label(&$2->labels, $1);
diff --git a/scripts/dtc/dtc.h b/scripts/dtc/dtc.h
index 3b18a42b866e..6d667701ab6a 100644
--- a/scripts/dtc/dtc.h
+++ b/scripts/dtc/dtc.h
@@ -168,6 +168,8 @@ struct node {
struct label *labels;
const struct bus_type *bus;
+
+ bool omit_if_unused, is_referenced;
};
#define for_each_label_withdel(l0, l) \
@@ -202,6 +204,8 @@ struct property *reverse_properties(struct property *first);
struct node *build_node(struct property *proplist, struct node *children);
struct node *build_node_delete(void);
struct node *name_node(struct node *node, char *name);
+struct node *omit_node_if_unused(struct node *node);
+struct node *reference_node(struct node *node);
struct node *chain_node(struct node *first, struct node *list);
struct node *merge_nodes(struct node *old_node, struct node *new_node);
struct node *add_orphan_node(struct node *old_node, struct node *new_node, char *ref);
diff --git a/scripts/dtc/livetree.c b/scripts/dtc/livetree.c
index 57b7db2ed153..6e4c367f54b3 100644
--- a/scripts/dtc/livetree.c
+++ b/scripts/dtc/livetree.c
@@ -134,6 +134,20 @@ struct node *name_node(struct node *node, char *name)
return node;
}
+struct node *omit_node_if_unused(struct node *node)
+{
+ node->omit_if_unused = 1;
+
+ return node;
+}
+
+struct node *reference_node(struct node *node)
+{
+ node->is_referenced = 1;
+
+ return node;
+}
+
struct node *merge_nodes(struct node *old_node, struct node *new_node)
{
struct property *new_prop, *old_prop;
@@ -224,10 +238,16 @@ struct node * add_orphan_node(struct node *dt, struct node *new_node, char *ref)
struct data d = empty_data;
char *name;
- d = data_add_marker(d, REF_PHANDLE, ref);
- d = data_append_integer(d, 0xffffffff, 32);
+ if (ref[0] == '/') {
+ d = data_append_data(d, ref, strlen(ref) + 1);
- p = build_property("target", d);
+ p = build_property("target-path", d);
+ } else {
+ d = data_add_marker(d, REF_PHANDLE, ref);
+ d = data_append_integer(d, 0xffffffff, 32);
+
+ p = build_property("target", d);
+ }
xasprintf(&name, "fragment@%u",
next_orphan_fragment++);
diff --git a/scripts/dtc/version_gen.h b/scripts/dtc/version_gen.h
index ad87849e333b..b00f14ff7a17 100644
--- a/scripts/dtc/version_gen.h
+++ b/scripts/dtc/version_gen.h
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.4.6-gaadd0b65"
+#define DTC_VERSION "DTC 1.4.6-g84e414b0"
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 99c96e86eccb..c87fa734e3e1 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -1,4 +1,6 @@
#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+#
# (c) 2007, Joe Perches <joe@perches.com>
# created from checkpatch.pl
#
@@ -7,8 +9,6 @@
#
# usage: perl scripts/get_maintainer.pl [OPTIONS] <patch>
# perl scripts/get_maintainer.pl [OPTIONS] -f <file>
-#
-# Licensed under the terms of the GNU GPL License version 2
use warnings;
use strict;
@@ -542,7 +542,18 @@ foreach my $file (@ARGV) {
while (<$patch>) {
my $patch_line = $_;
- if (m/^\+\+\+\s+(\S+)/ or m/^---\s+(\S+)/) {
+ if (m/^ mode change [0-7]+ => [0-7]+ (\S+)\s*$/) {
+ my $filename = $1;
+ push(@files, $filename);
+ } elsif (m/^rename (?:from|to) (\S+)\s*$/) {
+ my $filename = $1;
+ push(@files, $filename);
+ } elsif (m/^diff --git a\/(\S+) b\/(\S+)\s*$/) {
+ my $filename1 = $1;
+ my $filename2 = $2;
+ push(@files, $filename1);
+ push(@files, $filename2);
+ } elsif (m/^\+\+\+\s+(\S+)/ or m/^---\s+(\S+)/) {
my $filename = $1;
$filename =~ s@^[^/]*/@@;
$filename =~ s@\n@@;
diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c
index ffe8179f5d41..073fe7537f6c 100644
--- a/scripts/selinux/mdp/mdp.c
+++ b/scripts/selinux/mdp/mdp.c
@@ -124,7 +124,6 @@ int main(int argc, char *argv[])
fprintf(fout, "fs_use_xattr reiserfs user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_xattr jffs2 user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_xattr gfs2 user_u:base_r:base_t;\n");
- fprintf(fout, "fs_use_xattr lustre user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_task eventpollfs user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_task pipefs user_u:base_r:base_t;\n");
diff --git a/scripts/tags.sh b/scripts/tags.sh
index e587610d1492..66f08bb1cce9 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -179,9 +179,9 @@ regex_c=(
'/\<CLEARPAGEFLAG_NOOP(\([[:alnum:]_]*\).*/ClearPage\1/'
'/\<__CLEARPAGEFLAG_NOOP(\([[:alnum:]_]*\).*/__ClearPage\1/'
'/\<TESTCLEARFLAG_FALSE(\([[:alnum:]_]*\).*/TestClearPage\1/'
- '/^PAGE_MAPCOUNT_OPS(\([[:alnum:]_]*\).*/Page\1/'
- '/^PAGE_MAPCOUNT_OPS(\([[:alnum:]_]*\).*/__SetPage\1/'
- '/^PAGE_MAPCOUNT_OPS(\([[:alnum:]_]*\).*/__ClearPage\1/'
+ '/^PAGE_TYPE_OPS(\([[:alnum:]_]*\).*/Page\1/'
+ '/^PAGE_TYPE_OPS(\([[:alnum:]_]*\).*/__SetPage\1/'
+ '/^PAGE_TYPE_OPS(\([[:alnum:]_]*\).*/__ClearPage\1/'
'/^TASK_PFA_TEST([^,]*, *\([[:alnum:]_]*\))/task_\1/'
'/^TASK_PFA_SET([^,]*, *\([[:alnum:]_]*\))/task_set_\1/'
'/^TASK_PFA_CLEAR([^,]*, *\([[:alnum:]_]*\))/task_clear_\1/'
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
index e825e0ae78e7..d593346d0bba 100644
--- a/security/integrity/evm/Kconfig
+++ b/security/integrity/evm/Kconfig
@@ -42,6 +42,17 @@ config EVM_EXTRA_SMACK_XATTRS
additional info to the calculation, requires existing EVM
labeled file systems to be relabeled.
+config EVM_ADD_XATTRS
+ bool "Add additional EVM extended attributes at runtime"
+ depends on EVM
+ default n
+ help
+ Allow userland to provide additional xattrs for HMAC calculation.
+
+ When this option is enabled, root can add additional xattrs to the
+ list used by EVM by writing them into
+ /sys/kernel/security/integrity/evm/evm_xattrs.
+
config EVM_LOAD_X509
bool "Load an X509 certificate onto the '.evm' trusted keyring"
depends on EVM && INTEGRITY_TRUSTED_KEYRING
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
index 45c4a89c02ff..1257c3c24723 100644
--- a/security/integrity/evm/evm.h
+++ b/security/integrity/evm/evm.h
@@ -30,6 +30,11 @@
#define EVM_INIT_MASK (EVM_INIT_HMAC | EVM_INIT_X509 | EVM_SETUP_COMPLETE | \
EVM_ALLOW_METADATA_WRITES)
+struct xattr_list {
+ struct list_head list;
+ char *name;
+};
+
extern int evm_initialized;
#define EVM_ATTR_FSUUID 0x0001
@@ -40,7 +45,7 @@ extern struct crypto_shash *hmac_tfm;
extern struct crypto_shash *hash_tfm;
/* List of EVM protected security xattrs */
-extern char *evm_config_xattrnames[];
+extern struct list_head evm_config_xattrnames;
int evm_init_key(void);
int evm_update_evmxattr(struct dentry *dentry,
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index facf9cdd577d..b60524310855 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -192,8 +192,8 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
char type, char *digest)
{
struct inode *inode = d_backing_inode(dentry);
+ struct xattr_list *xattr;
struct shash_desc *desc;
- char **xattrname;
size_t xattr_size = 0;
char *xattr_value = NULL;
int error;
@@ -209,14 +209,14 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
return PTR_ERR(desc);
error = -ENODATA;
- for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
bool is_ima = false;
- if (strcmp(*xattrname, XATTR_NAME_IMA) == 0)
+ if (strcmp(xattr->name, XATTR_NAME_IMA) == 0)
is_ima = true;
if ((req_xattr_name && req_xattr_value)
- && !strcmp(*xattrname, req_xattr_name)) {
+ && !strcmp(xattr->name, req_xattr_name)) {
error = 0;
crypto_shash_update(desc, (const u8 *)req_xattr_value,
req_xattr_value_len);
@@ -224,7 +224,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
ima_present = true;
continue;
}
- size = vfs_getxattr_alloc(dentry, *xattrname,
+ size = vfs_getxattr_alloc(dentry, xattr->name,
&xattr_value, xattr_size, GFP_NOFS);
if (size == -ENOMEM) {
error = -ENOMEM;
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 9ea9c19a545c..f9eff5041e4c 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -35,28 +35,29 @@ static const char * const integrity_status_msg[] = {
};
int evm_hmac_attrs;
-char *evm_config_xattrnames[] = {
+static struct xattr_list evm_config_default_xattrnames[] = {
#ifdef CONFIG_SECURITY_SELINUX
- XATTR_NAME_SELINUX,
+ {.name = XATTR_NAME_SELINUX},
#endif
#ifdef CONFIG_SECURITY_SMACK
- XATTR_NAME_SMACK,
+ {.name = XATTR_NAME_SMACK},
#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
- XATTR_NAME_SMACKEXEC,
- XATTR_NAME_SMACKTRANSMUTE,
- XATTR_NAME_SMACKMMAP,
+ {.name = XATTR_NAME_SMACKEXEC},
+ {.name = XATTR_NAME_SMACKTRANSMUTE},
+ {.name = XATTR_NAME_SMACKMMAP},
#endif
#endif
#ifdef CONFIG_SECURITY_APPARMOR
- XATTR_NAME_APPARMOR,
+ {.name = XATTR_NAME_APPARMOR},
#endif
#ifdef CONFIG_IMA_APPRAISE
- XATTR_NAME_IMA,
+ {.name = XATTR_NAME_IMA},
#endif
- XATTR_NAME_CAPS,
- NULL
+ {.name = XATTR_NAME_CAPS},
};
+LIST_HEAD(evm_config_xattrnames);
+
static int evm_fixmode;
static int __init evm_set_fixmode(char *str)
{
@@ -68,6 +69,17 @@ __setup("evm=", evm_set_fixmode);
static void __init evm_init_config(void)
{
+ int i, xattrs;
+
+ xattrs = ARRAY_SIZE(evm_config_default_xattrnames);
+
+ pr_info("Initialising EVM extended attributes:\n");
+ for (i = 0; i < xattrs; i++) {
+ pr_info("%s\n", evm_config_default_xattrnames[i].name);
+ list_add_tail(&evm_config_default_xattrnames[i].list,
+ &evm_config_xattrnames);
+ }
+
#ifdef CONFIG_EVM_ATTR_FSUUID
evm_hmac_attrs |= EVM_ATTR_FSUUID;
#endif
@@ -82,15 +94,15 @@ static bool evm_key_loaded(void)
static int evm_find_protected_xattrs(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
- char **xattr;
+ struct xattr_list *xattr;
int error;
int count = 0;
if (!(inode->i_opflags & IOP_XATTR))
return -EOPNOTSUPP;
- for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) {
- error = __vfs_getxattr(dentry, inode, *xattr, NULL, 0);
+ list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
+ error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0);
if (error < 0) {
if (error == -ENODATA)
continue;
@@ -211,24 +223,25 @@ out:
static int evm_protected_xattr(const char *req_xattr_name)
{
- char **xattrname;
int namelen;
int found = 0;
+ struct xattr_list *xattr;
namelen = strlen(req_xattr_name);
- for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
- if ((strlen(*xattrname) == namelen)
- && (strncmp(req_xattr_name, *xattrname, namelen) == 0)) {
+ list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
+ if ((strlen(xattr->name) == namelen)
+ && (strncmp(req_xattr_name, xattr->name, namelen) == 0)) {
found = 1;
break;
}
if (strncmp(req_xattr_name,
- *xattrname + XATTR_SECURITY_PREFIX_LEN,
+ xattr->name + XATTR_SECURITY_PREFIX_LEN,
strlen(req_xattr_name)) == 0) {
found = 1;
break;
}
}
+
return found;
}
@@ -544,35 +557,35 @@ void __init evm_load_x509(void)
static int __init init_evm(void)
{
int error;
+ struct list_head *pos, *q;
+ struct xattr_list *xattr;
evm_init_config();
error = integrity_init_keyring(INTEGRITY_KEYRING_EVM);
if (error)
- return error;
+ goto error;
error = evm_init_secfs();
if (error < 0) {
pr_info("Error registering secfs\n");
- return error;
+ goto error;
}
- return 0;
-}
-
-/*
- * evm_display_config - list the EVM protected security extended attributes
- */
-static int __init evm_display_config(void)
-{
- char **xattrname;
+error:
+ if (error != 0) {
+ if (!list_empty(&evm_config_xattrnames)) {
+ list_for_each_safe(pos, q, &evm_config_xattrnames) {
+ xattr = list_entry(pos, struct xattr_list,
+ list);
+ list_del(pos);
+ }
+ }
+ }
- for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++)
- pr_info("%s\n", *xattrname);
- return 0;
+ return error;
}
-pure_initcall(evm_display_config);
late_initcall(init_evm);
MODULE_DESCRIPTION("Extended Verification Module");
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index feba03bbedae..637eb999e340 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -15,11 +15,21 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/audit.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include "evm.h"
+static struct dentry *evm_dir;
static struct dentry *evm_init_tpm;
+static struct dentry *evm_symlink;
+
+#ifdef CONFIG_EVM_ADD_XATTRS
+static struct dentry *evm_xattrs;
+static DEFINE_MUTEX(xattr_list_mutex);
+static int evm_xattrs_locked;
+#endif
/**
* evm_read_key - read() for <securityfs>/evm
@@ -107,13 +117,203 @@ static const struct file_operations evm_key_ops = {
.write = evm_write_key,
};
+#ifdef CONFIG_EVM_ADD_XATTRS
+/**
+ * evm_read_xattrs - read() for <securityfs>/evm_xattrs
+ *
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t evm_read_xattrs(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *temp;
+ int offset = 0;
+ ssize_t rc, size = 0;
+ struct xattr_list *xattr;
+
+ if (*ppos != 0)
+ return 0;
+
+ rc = mutex_lock_interruptible(&xattr_list_mutex);
+ if (rc)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(xattr, &evm_config_xattrnames, list)
+ size += strlen(xattr->name) + 1;
+
+ temp = kmalloc(size + 1, GFP_KERNEL);
+ if (!temp) {
+ mutex_unlock(&xattr_list_mutex);
+ return -ENOMEM;
+ }
+
+ list_for_each_entry(xattr, &evm_config_xattrnames, list) {
+ sprintf(temp + offset, "%s\n", xattr->name);
+ offset += strlen(xattr->name) + 1;
+ }
+
+ mutex_unlock(&xattr_list_mutex);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ kfree(temp);
+
+ return rc;
+}
+
+/**
+ * evm_write_xattrs - write() for <securityfs>/evm_xattrs
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int len, err;
+ struct xattr_list *xattr, *tmp;
+ struct audit_buffer *ab;
+ struct iattr newattrs;
+ struct inode *inode;
+
+ if (!capable(CAP_SYS_ADMIN) || evm_xattrs_locked)
+ return -EPERM;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (count > XATTR_NAME_MAX)
+ return -E2BIG;
+
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_EVM_XATTR);
+ if (IS_ERR(ab))
+ return PTR_ERR(ab);
+
+ xattr = kmalloc(sizeof(struct xattr_list), GFP_KERNEL);
+ if (!xattr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ xattr->name = memdup_user_nul(buf, count);
+ if (IS_ERR(xattr->name)) {
+ err = PTR_ERR(xattr->name);
+ xattr->name = NULL;
+ goto out;
+ }
+
+ /* Remove any trailing newline */
+ len = strlen(xattr->name);
+ if (len && xattr->name[len-1] == '\n')
+ xattr->name[len-1] = '\0';
+
+ if (strcmp(xattr->name, ".") == 0) {
+ evm_xattrs_locked = 1;
+ newattrs.ia_mode = S_IFREG | 0440;
+ newattrs.ia_valid = ATTR_MODE;
+ inode = evm_xattrs->d_inode;
+ inode_lock(inode);
+ err = simple_setattr(evm_xattrs, &newattrs);
+ inode_unlock(inode);
+ audit_log_format(ab, "locked");
+ if (!err)
+ err = count;
+ goto out;
+ }
+
+ audit_log_format(ab, "xattr=");
+ audit_log_untrustedstring(ab, xattr->name);
+
+ if (strncmp(xattr->name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN) != 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Guard against races in evm_read_xattrs */
+ mutex_lock(&xattr_list_mutex);
+ list_for_each_entry(tmp, &evm_config_xattrnames, list) {
+ if (strcmp(xattr->name, tmp->name) == 0) {
+ err = -EEXIST;
+ mutex_unlock(&xattr_list_mutex);
+ goto out;
+ }
+ }
+ list_add_tail_rcu(&xattr->list, &evm_config_xattrnames);
+ mutex_unlock(&xattr_list_mutex);
+
+ audit_log_format(ab, " res=0");
+ audit_log_end(ab);
+ return count;
+out:
+ audit_log_format(ab, " res=%d", err);
+ audit_log_end(ab);
+ if (xattr) {
+ kfree(xattr->name);
+ kfree(xattr);
+ }
+ return err;
+}
+
+static const struct file_operations evm_xattr_ops = {
+ .read = evm_read_xattrs,
+ .write = evm_write_xattrs,
+};
+
+static int evm_init_xattrs(void)
+{
+ evm_xattrs = securityfs_create_file("evm_xattrs", 0660, evm_dir, NULL,
+ &evm_xattr_ops);
+ if (!evm_xattrs || IS_ERR(evm_xattrs))
+ return -EFAULT;
+
+ return 0;
+}
+#else
+static int evm_init_xattrs(void)
+{
+ return 0;
+}
+#endif
+
int __init evm_init_secfs(void)
{
int error = 0;
- evm_init_tpm = securityfs_create_file("evm", S_IRUSR | S_IRGRP,
- NULL, NULL, &evm_key_ops);
- if (!evm_init_tpm || IS_ERR(evm_init_tpm))
+ evm_dir = securityfs_create_dir("evm", integrity_dir);
+ if (!evm_dir || IS_ERR(evm_dir))
+ return -EFAULT;
+
+ evm_init_tpm = securityfs_create_file("evm", 0660,
+ evm_dir, NULL, &evm_key_ops);
+ if (!evm_init_tpm || IS_ERR(evm_init_tpm)) {
error = -EFAULT;
+ goto out;
+ }
+
+ evm_symlink = securityfs_create_symlink("evm", NULL,
+ "integrity/evm/evm", NULL);
+ if (!evm_symlink || IS_ERR(evm_symlink)) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ if (evm_init_xattrs() != 0) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ return 0;
+out:
+ securityfs_remove(evm_symlink);
+ securityfs_remove(evm_init_tpm);
+ securityfs_remove(evm_dir);
return error;
}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index f266e4b3b7d4..149faa81f6f0 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -21,12 +21,15 @@
#include <linux/rbtree.h>
#include <linux/file.h>
#include <linux/uaccess.h>
+#include <linux/security.h>
#include "integrity.h"
static struct rb_root integrity_iint_tree = RB_ROOT;
static DEFINE_RWLOCK(integrity_iint_lock);
static struct kmem_cache *iint_cache __read_mostly;
+struct dentry *integrity_dir;
+
/*
* __integrity_iint_find - return the iint associated with an inode
*/
@@ -211,3 +214,18 @@ void __init integrity_load_keys(void)
ima_load_x509();
evm_load_x509();
}
+
+static int __init integrity_fs_init(void)
+{
+ integrity_dir = securityfs_create_dir("integrity", NULL);
+ if (IS_ERR(integrity_dir)) {
+ pr_err("Unable to create integrity sysfs dir: %ld\n",
+ PTR_ERR(integrity_dir));
+ integrity_dir = NULL;
+ return PTR_ERR(integrity_dir);
+ }
+
+ return 0;
+}
+
+late_initcall(integrity_fs_init)
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 35fe91aa1fc9..354bb5716ce3 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -53,7 +53,6 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
extern int ima_policy_flag;
/* set during initialization */
-extern int ima_initialized;
extern int ima_used_chip;
extern int ima_hash_algo;
extern int ima_appraise;
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index fa540c0469da..ae9d5c766a3c 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -15,6 +15,9 @@
* implemenents security file system for reporting
* current measurement list and IMA statistics
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/fcntl.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -336,7 +339,7 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf,
if (data[0] == '/') {
result = ima_read_policy(data);
} else if (ima_appraise & IMA_APPRAISE_POLICY) {
- pr_err("IMA: signed policy file (specified as an absolute pathname) required\n");
+ pr_err("signed policy file (specified as an absolute pathname) required\n");
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
"policy_update", "signed policy required",
1, 0);
@@ -356,6 +359,7 @@ out:
}
static struct dentry *ima_dir;
+static struct dentry *ima_symlink;
static struct dentry *binary_runtime_measurements;
static struct dentry *ascii_runtime_measurements;
static struct dentry *runtime_measurements_count;
@@ -417,7 +421,7 @@ static int ima_release_policy(struct inode *inode, struct file *file)
valid_policy = 0;
}
- pr_info("IMA: policy update %s\n", cause);
+ pr_info("policy update %s\n", cause);
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
"policy_update", cause, !valid_policy, 0);
@@ -434,6 +438,8 @@ static int ima_release_policy(struct inode *inode, struct file *file)
ima_policy = NULL;
#elif defined(CONFIG_IMA_WRITE_POLICY)
clear_bit(IMA_FS_BUSY, &ima_fs_flags);
+#elif defined(CONFIG_IMA_READ_POLICY)
+ inode->i_mode &= ~S_IWUSR;
#endif
return 0;
}
@@ -448,10 +454,15 @@ static const struct file_operations ima_measure_policy_ops = {
int __init ima_fs_init(void)
{
- ima_dir = securityfs_create_dir("ima", NULL);
+ ima_dir = securityfs_create_dir("ima", integrity_dir);
if (IS_ERR(ima_dir))
return -1;
+ ima_symlink = securityfs_create_symlink("ima", NULL, "integrity/ima",
+ NULL);
+ if (IS_ERR(ima_symlink))
+ goto out;
+
binary_runtime_measurements =
securityfs_create_file("binary_runtime_measurements",
S_IRUSR | S_IRGRP, ima_dir, NULL,
@@ -491,6 +502,7 @@ out:
securityfs_remove(runtime_measurements_count);
securityfs_remove(ascii_runtime_measurements);
securityfs_remove(binary_runtime_measurements);
+ securityfs_remove(ima_symlink);
securityfs_remove(ima_dir);
securityfs_remove(ima_policy);
return -1;
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index e473eee913cb..16bd18747cfa 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -10,6 +10,8 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/kexec.h>
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 74d0bd7e76d7..dca44cf7838e 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -32,8 +32,6 @@
#include "ima.h"
-int ima_initialized;
-
#ifdef CONFIG_IMA_APPRAISE
int ima_appraise = IMA_APPRAISE_ENFORCE;
#else
@@ -61,14 +59,11 @@ static int __init hash_setup(char *str)
goto out;
}
- for (i = 0; i < HASH_ALGO__LAST; i++) {
- if (strcmp(str, hash_algo_name[i]) == 0) {
- ima_hash_algo = i;
- break;
- }
- }
- if (i == HASH_ALGO__LAST)
+ i = match_string(hash_algo_name, HASH_ALGO__LAST, str);
+ if (i < 0)
return 1;
+
+ ima_hash_algo = i;
out:
hash_setup_done = 1;
return 1;
@@ -449,6 +444,7 @@ int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
static int read_idmap[READING_MAX_ID] = {
[READING_FIRMWARE] = FIRMWARE_CHECK,
+ [READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK,
[READING_MODULE] = MODULE_CHECK,
[READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK,
[READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK,
@@ -517,10 +513,9 @@ static int __init init_ima(void)
error = ima_init();
}
- if (!error) {
- ima_initialized = 1;
+ if (!error)
ima_update_policy_flag();
- }
+
return error;
}
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index d89bebf85421..cdcc9a7b4e24 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -33,6 +33,7 @@
#define IMA_INMASK 0x0040
#define IMA_EUID 0x0080
#define IMA_PCR 0x0100
+#define IMA_FSNAME 0x0200
#define UNKNOWN 0
#define MEASURE 0x0001 /* same as IMA_MEASURE */
@@ -74,6 +75,7 @@ struct ima_rule_entry {
void *args_p; /* audit value */
int type; /* audit type */
} lsm[MAX_LSM_RULES];
+ char *fsname;
};
/*
@@ -273,6 +275,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
if ((rule->flags & IMA_FSMAGIC)
&& rule->fsmagic != inode->i_sb->s_magic)
return false;
+ if ((rule->flags & IMA_FSNAME)
+ && strcmp(rule->fsname, inode->i_sb->s_type->name))
+ return false;
if ((rule->flags & IMA_FSUUID) &&
!uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid))
return false;
@@ -435,6 +440,17 @@ void ima_update_policy_flag(void)
ima_policy_flag &= ~IMA_APPRAISE;
}
+static int ima_appraise_flag(enum ima_hooks func)
+{
+ if (func == MODULE_CHECK)
+ return IMA_APPRAISE_MODULES;
+ else if (func == FIRMWARE_CHECK)
+ return IMA_APPRAISE_FIRMWARE;
+ else if (func == POLICY_CHECK)
+ return IMA_APPRAISE_POLICY;
+ return 0;
+}
+
/**
* ima_init_policy - initialize the default measure rules.
*
@@ -473,9 +489,11 @@ void __init ima_init_policy(void)
* Insert the appraise rules requiring file signatures, prior to
* any other appraise rules.
*/
- for (i = 0; i < secure_boot_entries; i++)
- list_add_tail(&secure_boot_rules[i].list,
- &ima_default_rules);
+ for (i = 0; i < secure_boot_entries; i++) {
+ list_add_tail(&secure_boot_rules[i].list, &ima_default_rules);
+ temp_ima_appraise |=
+ ima_appraise_flag(secure_boot_rules[i].func);
+ }
for (i = 0; i < appraise_entries; i++) {
list_add_tail(&default_appraise_rules[i].list,
@@ -509,22 +527,9 @@ int ima_check_policy(void)
*/
void ima_update_policy(void)
{
- struct list_head *first, *last, *policy;
-
- /* append current policy with the new rules */
- first = (&ima_temp_rules)->next;
- last = (&ima_temp_rules)->prev;
- policy = &ima_policy_rules;
-
- synchronize_rcu();
+ struct list_head *policy = &ima_policy_rules;
- last->next = policy;
- rcu_assign_pointer(list_next_rcu(policy->prev), first);
- first->prev = policy->prev;
- policy->prev = last;
-
- /* prepare for the next policy rules addition */
- INIT_LIST_HEAD(&ima_temp_rules);
+ list_splice_tail_init_rcu(&ima_temp_rules, policy, synchronize_rcu);
if (ima_rules != policy) {
ima_policy_flag = 0;
@@ -540,7 +545,7 @@ enum {
Opt_audit, Opt_hash, Opt_dont_hash,
Opt_obj_user, Opt_obj_role, Opt_obj_type,
Opt_subj_user, Opt_subj_role, Opt_subj_type,
- Opt_func, Opt_mask, Opt_fsmagic,
+ Opt_func, Opt_mask, Opt_fsmagic, Opt_fsname,
Opt_fsuuid, Opt_uid_eq, Opt_euid_eq, Opt_fowner_eq,
Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
@@ -565,6 +570,7 @@ static match_table_t policy_tokens = {
{Opt_func, "func=%s"},
{Opt_mask, "mask=%s"},
{Opt_fsmagic, "fsmagic=%s"},
+ {Opt_fsname, "fsname=%s"},
{Opt_fsuuid, "fsuuid=%s"},
{Opt_uid_eq, "uid=%s"},
{Opt_euid_eq, "euid=%s"},
@@ -776,6 +782,17 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
if (!result)
entry->flags |= IMA_FSMAGIC;
break;
+ case Opt_fsname:
+ ima_log_string(ab, "fsname", args[0].from);
+
+ entry->fsname = kstrdup(args[0].from, GFP_KERNEL);
+ if (!entry->fsname) {
+ result = -ENOMEM;
+ break;
+ }
+ result = 0;
+ entry->flags |= IMA_FSNAME;
+ break;
case Opt_fsuuid:
ima_log_string(ab, "fsuuid", args[0].from);
@@ -917,12 +934,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
}
if (!result && (entry->action == UNKNOWN))
result = -EINVAL;
- else if (entry->func == MODULE_CHECK)
- temp_ima_appraise |= IMA_APPRAISE_MODULES;
- else if (entry->func == FIRMWARE_CHECK)
- temp_ima_appraise |= IMA_APPRAISE_FIRMWARE;
- else if (entry->func == POLICY_CHECK)
- temp_ima_appraise |= IMA_APPRAISE_POLICY;
+ else if (entry->action == APPRAISE)
+ temp_ima_appraise |= ima_appraise_flag(entry->func);
+
audit_log_format(ab, "res=%d", !result);
audit_log_end(ab);
return result;
@@ -1104,6 +1118,12 @@ int ima_policy_show(struct seq_file *m, void *v)
seq_puts(m, " ");
}
+ if (entry->flags & IMA_FSNAME) {
+ snprintf(tbuf, sizeof(tbuf), "%s", entry->fsname);
+ seq_printf(m, pt(Opt_fsname), tbuf);
+ seq_puts(m, " ");
+ }
+
if (entry->flags & IMA_PCR) {
snprintf(tbuf, sizeof(tbuf), "%d", entry->pcr);
seq_printf(m, pt(Opt_pcr), tbuf);
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index 5afaa53decc5..43752002c222 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -13,6 +13,8 @@
* Library of supported template fields.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ima_template_lib.h"
static bool ima_template_hash_algo_allowed(u8 algo)
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index 5e58e02ba8dc..0bb372eed62a 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -143,6 +143,8 @@ int integrity_kernel_read(struct file *file, loff_t offset,
#define INTEGRITY_KEYRING_MODULE 2
#define INTEGRITY_KEYRING_MAX 3
+extern struct dentry *integrity_dir;
+
#ifdef CONFIG_INTEGRITY_SIGNATURE
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index dcb976f98df2..7ad226018f51 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1545,9 +1545,9 @@ static int smack_inode_listsecurity(struct inode *inode, char *buffer,
*/
static void smack_inode_getsecid(struct inode *inode, u32 *secid)
{
- struct inode_smack *isp = inode->i_security;
+ struct smack_known *skp = smk_of_inode(inode);
- *secid = isp->smk_inode->smk_secid;
+ *secid = skp->smk_secid;
}
/*
@@ -4559,12 +4559,10 @@ static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
- int len = 0;
- len = smack_inode_getsecurity(inode, XATTR_SMACK_SUFFIX, ctx, true);
+ struct smack_known *skp = smk_of_inode(inode);
- if (len < 0)
- return len;
- *ctxlen = len;
+ *ctx = skp->smk_known;
+ *ctxlen = strlen(skp->smk_known);
return 0;
}
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index f0c6f54a8b2f..3040830d7797 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -248,7 +248,7 @@ void print_usage(void)
"Capture, convert and output data from IIO device buffer\n"
" -a Auto-activate all available channels\n"
" -A Force-activate ALL channels\n"
- " -c <n> Do n conversions\n"
+ " -c <n> Do n conversions, or loop forever if n < 0\n"
" -e Disable wait for event (new data)\n"
" -g Use trigger-less mode\n"
" -l <n> Set buffer length to n samples\n"
@@ -330,11 +330,14 @@ static const struct option longopts[] = {
int main(int argc, char **argv)
{
- unsigned long num_loops = 2;
+ unsigned long long num_loops = 2;
unsigned long timedelay = 1000000;
unsigned long buf_len = 128;
- int ret, c, i, j, toread;
+ ssize_t i;
+ unsigned long long j;
+ unsigned long toread;
+ int ret, c;
int fp = -1;
int num_channels = 0;
@@ -366,7 +369,7 @@ int main(int argc, char **argv)
break;
case 'c':
errno = 0;
- num_loops = strtoul(optarg, &dummy, 10);
+ num_loops = strtoll(optarg, &dummy, 10);
if (errno) {
ret = -errno;
goto error;
@@ -634,7 +637,7 @@ int main(int argc, char **argv)
goto error;
}
- for (j = 0; j < num_loops; j++) {
+ for (j = 0; j < num_loops || num_loops < 0; j++) {
if (!noevents) {
struct pollfd pfd = {
.fd = fp,
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 3a31b238f885..38047c6aa575 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -543,6 +543,28 @@ static int add_jump_destinations(struct objtool_file *file)
dest_off);
return -1;
}
+
+ /*
+ * For GCC 8+, create parent/child links for any cold
+ * subfunctions. This is _mostly_ redundant with a similar
+ * initialization in read_symbols().
+ *
+ * If a function has aliases, we want the *first* such function
+ * in the symbol table to be the subfunction's parent. In that
+ * case we overwrite the initialization done in read_symbols().
+ *
+ * However this code can't completely replace the
+ * read_symbols() code because this doesn't detect the case
+ * where the parent function's only reference to a subfunction
+ * is through a switch table.
+ */
+ if (insn->func && insn->jump_dest->func &&
+ insn->func != insn->jump_dest->func &&
+ !strstr(insn->func->name, ".cold.") &&
+ strstr(insn->jump_dest->func->name, ".cold.")) {
+ insn->func->cfunc = insn->jump_dest->func;
+ insn->jump_dest->func->pfunc = insn->func;
+ }
}
return 0;
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 2549c34a7895..11300dbe35c5 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -124,7 +124,11 @@ The available PMUs and their raw parameters can be listed with
For example the raw event "LSD.UOPS" core pmu event above could
be specified as
- perf stat -e cpu/event=0xa8,umask=0x1,name=LSD.UOPS_CYCLES,cmask=1/ ...
+ perf stat -e cpu/event=0xa8,umask=0x1,name=LSD.UOPS_CYCLES,cmask=0x1/ ...
+
+ or using extended name syntax
+
+ perf stat -e cpu/event=0xa8,umask=0x1,cmask=0x1,name=\'LSD.UOPS_CYCLES:cmask=0x1\'/ ...
PER SOCKET PMUS
---------------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index cc37b3a4be76..04168da4268e 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -57,6 +57,9 @@ OPTIONS
FP mode, "dwarf" for DWARF mode, "lbr" for LBR mode and
"no" for disable callgraph.
- 'stack-size': user stack size for dwarf mode
+ - 'name' : User defined event name. Single quotes (') may be used to
+ escape symbols in the name from parsing by shell and tool
+ like this: name=\'CPU_CLK_UNHALTED.THREAD:cmask=0x1\'.
See the linkperf:perf-list[1] man page for more parameters.
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 51ec2d20068a..0fb9eda3cbca 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -610,6 +610,32 @@ Various utility functions for use with perf script:
nsecs_str(nsecs) - returns printable string in the form secs.nsecs
avg(total, n) - returns average given a sum and a total number of values
+SUPPORTED FIELDS
+----------------
+
+Currently supported fields:
+
+ev_name, comm, pid, tid, cpu, ip, time, period, phys_addr, addr,
+symbol, dso, time_enabled, time_running, values, callchain,
+brstack, brstacksym, datasrc, datasrc_decode, iregs, uregs,
+weight, transaction, raw_buf, attr.
+
+Some fields have sub items:
+
+brstack:
+ from, to, from_dsoname, to_dsoname, mispred,
+ predicted, in_tx, abort, cycles.
+
+brstacksym:
+ items: from, to, pred, in_tx, abort (converted string)
+
+For example,
+We can use this code to print brstack "from", "to", "cycles".
+
+if 'brstack' in dict:
+ for entry in dict['brstack']:
+ print "from %s, to %s, cycles %s" % (entry["from"], entry["to"], entry["cycles"])
+
SEE ALSO
--------
linkperf:perf-script[1]
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 3a822f308e6d..5dfe102fb5b5 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -310,20 +310,38 @@ Users who wants to get the actual value can apply --no-metric-only.
EXAMPLES
--------
-$ perf stat -- make -j
+$ perf stat -- make
- Performance counter stats for 'make -j':
+ Performance counter stats for 'make':
- 8117.370256 task clock ticks # 11.281 CPU utilization factor
- 678 context switches # 0.000 M/sec
- 133 CPU migrations # 0.000 M/sec
- 235724 pagefaults # 0.029 M/sec
- 24821162526 CPU cycles # 3057.784 M/sec
- 18687303457 instructions # 2302.138 M/sec
- 172158895 cache references # 21.209 M/sec
- 27075259 cache misses # 3.335 M/sec
+ 83723.452481 task-clock:u (msec) # 1.004 CPUs utilized
+ 0 context-switches:u # 0.000 K/sec
+ 0 cpu-migrations:u # 0.000 K/sec
+ 3,228,188 page-faults:u # 0.039 M/sec
+ 229,570,665,834 cycles:u # 2.742 GHz
+ 313,163,853,778 instructions:u # 1.36 insn per cycle
+ 69,704,684,856 branches:u # 832.559 M/sec
+ 2,078,861,393 branch-misses:u # 2.98% of all branches
- Wall-clock time elapsed: 719.554352 msecs
+ 83.409183620 seconds time elapsed
+
+ 74.684747000 seconds user
+ 8.739217000 seconds sys
+
+TIMINGS
+-------
+As displayed in the example above we can display 3 types of timings.
+We always display the time the counters were enabled/alive:
+
+ 83.409183620 seconds time elapsed
+
+For workload sessions we also display time the workloads spent in
+user/system lands:
+
+ 74.684747000 seconds user
+ 8.739217000 seconds sys
+
+Those times are the very same as displayed by the 'time' tool.
CSV FORMAT
----------
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index c6f373508a4f..82657c01a3b8 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -189,7 +189,7 @@ out_error:
return -1;
}
-int perf_env__lookup_objdump(struct perf_env *env)
+int perf_env__lookup_objdump(struct perf_env *env, const char **path)
{
/*
* For live mode, env->arch will be NULL and we can use
@@ -198,5 +198,5 @@ int perf_env__lookup_objdump(struct perf_env *env)
if (env->arch == NULL)
return 0;
- return perf_env__lookup_binutils_path(env, "objdump", &objdump_path);
+ return perf_env__lookup_binutils_path(env, "objdump", path);
}
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index 2d875baa92e6..2167001b18c5 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -4,8 +4,6 @@
#include "../util/env.h"
-extern const char *objdump_path;
-
-int perf_env__lookup_objdump(struct perf_env *env);
+int perf_env__lookup_objdump(struct perf_env *env, const char **path);
#endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index da5704240239..5eb22cc56363 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -40,9 +40,8 @@
struct perf_annotate {
struct perf_tool tool;
struct perf_session *session;
+ struct annotation_options opts;
bool use_tui, use_stdio, use_stdio2, use_gtk;
- bool full_paths;
- bool print_line;
bool skip_missing;
bool has_br_stack;
bool group_set;
@@ -162,12 +161,12 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
hist__account_cycles(sample->branch_stack, al, sample, false);
bi = he->branch_info;
- err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx);
+ err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
if (err)
goto out;
- err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx);
+ err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
out:
return err;
@@ -249,7 +248,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
if (he == NULL)
return -ENOMEM;
- ret = hist_entry__inc_addr_samples(he, sample, evsel->idx, al->addr);
+ ret = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
hists__inc_nr_samples(hists, true);
return ret;
}
@@ -289,10 +288,9 @@ static int hist_entry__tty_annotate(struct hist_entry *he,
struct perf_annotate *ann)
{
if (!ann->use_stdio2)
- return symbol__tty_annotate(he->ms.sym, he->ms.map, evsel,
- ann->print_line, ann->full_paths, 0, 0);
- return symbol__tty_annotate2(he->ms.sym, he->ms.map, evsel,
- ann->print_line, ann->full_paths);
+ return symbol__tty_annotate(he->ms.sym, he->ms.map, evsel, &ann->opts);
+
+ return symbol__tty_annotate2(he->ms.sym, he->ms.map, evsel, &ann->opts);
}
static void hists__find_annotations(struct hists *hists,
@@ -343,7 +341,7 @@ find_next:
/* skip missing symbols */
nd = rb_next(nd);
} else if (use_browser == 1) {
- key = hist_entry__tui_annotate(he, evsel, NULL);
+ key = hist_entry__tui_annotate(he, evsel, NULL, &ann->opts);
switch (key) {
case -1:
@@ -390,8 +388,9 @@ static int __cmd_annotate(struct perf_annotate *ann)
goto out;
}
- if (!objdump_path) {
- ret = perf_env__lookup_objdump(&session->header.env);
+ if (!ann->opts.objdump_path) {
+ ret = perf_env__lookup_objdump(&session->header.env,
+ &ann->opts.objdump_path);
if (ret)
goto out;
}
@@ -476,6 +475,7 @@ int cmd_annotate(int argc, const char **argv)
.ordered_events = true,
.ordering_requires_timestamps = true,
},
+ .opts = annotation__default_options,
};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
@@ -503,9 +503,9 @@ int cmd_annotate(int argc, const char **argv)
"file", "vmlinux pathname"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
- OPT_BOOLEAN('l', "print-line", &annotate.print_line,
+ OPT_BOOLEAN('l', "print-line", &annotate.opts.print_lines,
"print matching source lines (may be slow)"),
- OPT_BOOLEAN('P', "full-paths", &annotate.full_paths,
+ OPT_BOOLEAN('P', "full-paths", &annotate.opts.full_path,
"Don't shorten the displayed pathnames"),
OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
"Skip symbols that cannot be annotated"),
@@ -516,13 +516,13 @@ int cmd_annotate(int argc, const char **argv)
OPT_CALLBACK(0, "symfs", NULL, "directory",
"Look for files with symbols relative to this directory",
symbol__config_symfs),
- OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
+ OPT_BOOLEAN(0, "source", &annotate.opts.annotate_src,
"Interleave source code with assembly code (default)"),
- OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
+ OPT_BOOLEAN(0, "asm-raw", &annotate.opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
- OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
+ OPT_STRING('M', "disassembler-style", &annotate.opts.disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
- OPT_STRING(0, "objdump", &objdump_path, "path",
+ OPT_STRING(0, "objdump", &annotate.opts.objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
"Show event group information together"),
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 2126bfbcb385..307b3594525f 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -1976,7 +1976,7 @@ static int filter_cb(struct hist_entry *he)
c2c_he = container_of(he, struct c2c_hist_entry, he);
if (c2c.show_src && !he->srcline)
- he->srcline = hist_entry__get_srcline(he);
+ he->srcline = hist_entry__srcline(he);
calc_width(c2c_he);
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 72e2ca096bf5..2b1ef704169f 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1438,8 +1438,6 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
goto out;
}
- symbol_conf.nr_events = kvm->evlist->nr_entries;
-
if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
usage_with_options(live_usage, live_options);
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index c0065923a525..99de91698de1 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -81,8 +81,7 @@ static int parse_probe_event(const char *str)
params.target_used = true;
}
- if (params.nsi)
- pev->nsi = nsinfo__get(params.nsi);
+ pev->nsi = nsinfo__get(params.nsi);
/* Parse a perf-probe command into event */
ret = parse_perf_probe_command(str, pev);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index ad978e3ee2b8..cdb5b6949832 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -71,6 +71,7 @@ struct report {
bool group_set;
int max_stack;
struct perf_read_values show_threads_values;
+ struct annotation_options annotation_opts;
const char *pretty_printing_style;
const char *cpu_list;
const char *symbol_filter_str;
@@ -136,26 +137,25 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
if (sort__mode == SORT_MODE__BRANCH) {
bi = he->branch_info;
- err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx);
+ err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
if (err)
goto out;
- err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx);
+ err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
} else if (rep->mem_mode) {
mi = he->mem_info;
- err = addr_map_symbol__inc_samples(&mi->daddr, sample, evsel->idx);
+ err = addr_map_symbol__inc_samples(&mi->daddr, sample, evsel);
if (err)
goto out;
- err = hist_entry__inc_addr_samples(he, sample, evsel->idx, al->addr);
+ err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
} else if (symbol_conf.cumulate_callchain) {
if (single)
- err = hist_entry__inc_addr_samples(he, sample, evsel->idx,
- al->addr);
+ err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
} else {
- err = hist_entry__inc_addr_samples(he, sample, evsel->idx, al->addr);
+ err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
}
out:
@@ -181,11 +181,11 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
rep->nonany_branch_mode);
bi = he->branch_info;
- err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx);
+ err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
if (err)
goto out;
- err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx);
+ err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
branch_type_count(&rep->brtype_stat, &bi->flags,
bi->from.addr, bi->to.addr);
@@ -561,7 +561,7 @@ static int report__browse_hists(struct report *rep)
ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
rep->min_percent,
&session->header.env,
- true);
+ true, &rep->annotation_opts);
/*
* Usually "ret" is the last pressed key, and we only
* care if the key notifies us to switch data file.
@@ -946,12 +946,6 @@ parse_percent_limit(const struct option *opt, const char *str,
return 0;
}
-#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
-
-const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
- CALLCHAIN_REPORT_HELP
- "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
-
int cmd_report(int argc, const char **argv)
{
struct perf_session *session;
@@ -960,6 +954,10 @@ int cmd_report(int argc, const char **argv)
bool has_br_stack = false;
int branch_mode = -1;
bool branch_call_mode = false;
+#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
+ const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
+ CALLCHAIN_REPORT_HELP
+ "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
const char * const report_usage[] = {
"perf report [<options>]",
@@ -989,6 +987,7 @@ int cmd_report(int argc, const char **argv)
.max_stack = PERF_MAX_STACK_DEPTH,
.pretty_printing_style = "normal",
.socket_filter = -1,
+ .annotation_opts = annotation__default_options,
};
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
@@ -1078,11 +1077,11 @@ int cmd_report(int argc, const char **argv)
"list of cpus to profile"),
OPT_BOOLEAN('I', "show-info", &report.show_full_info,
"Display extended information about perf.data file"),
- OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
+ OPT_BOOLEAN(0, "source", &report.annotation_opts.annotate_src,
"Interleave source code with assembly code (default)"),
- OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
+ OPT_BOOLEAN(0, "asm-raw", &report.annotation_opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
- OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
+ OPT_STRING('M', "disassembler-style", &report.annotation_opts.disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
@@ -1093,7 +1092,7 @@ int cmd_report(int argc, const char **argv)
parse_branch_mode),
OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
"add last branch records to call history"),
- OPT_STRING(0, "objdump", &objdump_path, "path",
+ OPT_STRING(0, "objdump", &report.annotation_opts.objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
"Disable symbol demangling"),
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 4dfdee668b0c..cbf39dab19c1 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -2143,7 +2143,7 @@ static void save_task_callchain(struct perf_sched *sched,
return;
}
- if (!symbol_conf.use_callchain || sample->callchain == NULL)
+ if (!sched->show_callchain || sample->callchain == NULL)
return;
if (thread__resolve_callchain(thread, cursor, evsel, sample,
@@ -2271,10 +2271,11 @@ static struct thread *get_idle_thread(int cpu)
return idle_threads[cpu];
}
-static void save_idle_callchain(struct idle_thread_runtime *itr,
+static void save_idle_callchain(struct perf_sched *sched,
+ struct idle_thread_runtime *itr,
struct perf_sample *sample)
{
- if (!symbol_conf.use_callchain || sample->callchain == NULL)
+ if (!sched->show_callchain || sample->callchain == NULL)
return;
callchain_cursor__copy(&itr->cursor, &callchain_cursor);
@@ -2320,7 +2321,7 @@ static struct thread *timehist_get_thread(struct perf_sched *sched,
/* copy task callchain when entering to idle */
if (perf_evsel__intval(evsel, sample, "next_pid") == 0)
- save_idle_callchain(itr, sample);
+ save_idle_callchain(sched, itr, sample);
}
}
@@ -2849,7 +2850,7 @@ static void timehist_print_summary(struct perf_sched *sched,
printf(" CPU %2d idle entire time window\n", i);
}
- if (sched->idle_hist && symbol_conf.use_callchain) {
+ if (sched->idle_hist && sched->show_callchain) {
callchain_param.mode = CHAIN_FOLDED;
callchain_param.value = CCVAL_PERIOD;
@@ -2933,8 +2934,7 @@ static int timehist_check_attr(struct perf_sched *sched,
return -1;
}
- if (sched->show_callchain &&
- !(evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ if (sched->show_callchain && !evsel__has_callchain(evsel)) {
pr_info("Samples do not have callchains.\n");
sched->show_callchain = 0;
symbol_conf.use_callchain = 0;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index cefc8813e91e..b3bf35512d21 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -517,7 +517,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
evlist__for_each_entry(session->evlist, evsel) {
not_pipe = true;
- if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
+ if (evsel__has_callchain(evsel)) {
use_callchain = true;
break;
}
@@ -532,22 +532,18 @@ static int perf_session__check_output_opt(struct perf_session *session)
*/
if (symbol_conf.use_callchain &&
!output[PERF_TYPE_TRACEPOINT].user_set) {
- struct perf_event_attr *attr;
-
j = PERF_TYPE_TRACEPOINT;
evlist__for_each_entry(session->evlist, evsel) {
if (evsel->attr.type != j)
continue;
- attr = &evsel->attr;
-
- if (attr->sample_type & PERF_SAMPLE_CALLCHAIN) {
+ if (evsel__has_callchain(evsel)) {
output[j].fields |= PERF_OUTPUT_IP;
output[j].fields |= PERF_OUTPUT_SYM;
output[j].fields |= PERF_OUTPUT_SYMOFFSET;
output[j].fields |= PERF_OUTPUT_DSO;
- set_print_ip_opts(attr);
+ set_print_ip_opts(&evsel->attr);
goto out;
}
}
@@ -610,7 +606,7 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
if (PRINT_FIELD(COMM)) {
if (latency_format)
printed += fprintf(fp, "%8.8s ", thread__comm_str(thread));
- else if (PRINT_FIELD(IP) && symbol_conf.use_callchain)
+ else if (PRINT_FIELD(IP) && evsel__has_callchain(evsel) && symbol_conf.use_callchain)
printed += fprintf(fp, "%s ", thread__comm_str(thread));
else
printed += fprintf(fp, "%16s ", thread__comm_str(thread));
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index a4f662a462c6..096ccb25c11f 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -80,6 +80,9 @@
#include <sys/stat.h>
#include <sys/wait.h>
#include <unistd.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/wait.h>
#include "sane_ctype.h"
@@ -175,6 +178,8 @@ static int output_fd;
static int print_free_counters_hint;
static int print_mixed_hw_group_error;
static u64 *walltime_run;
+static bool ru_display = false;
+static struct rusage ru_data;
struct perf_stat {
bool record;
@@ -726,7 +731,7 @@ try_again:
break;
}
}
- waitpid(child_pid, &status, 0);
+ wait4(child_pid, &status, 0, &ru_data);
if (workload_exec_errno) {
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
@@ -1804,6 +1809,11 @@ static void print_table(FILE *output, int precision, double avg)
fprintf(output, "\n%*s# Final result:\n", indent, "");
}
+static double timeval2double(struct timeval *t)
+{
+ return t->tv_sec + (double) t->tv_usec/USEC_PER_SEC;
+}
+
static void print_footer(void)
{
double avg = avg_stats(&walltime_nsecs_stats) / NSEC_PER_SEC;
@@ -1815,6 +1825,15 @@ static void print_footer(void)
if (run_count == 1) {
fprintf(output, " %17.9f seconds time elapsed", avg);
+
+ if (ru_display) {
+ double ru_utime = timeval2double(&ru_data.ru_utime);
+ double ru_stime = timeval2double(&ru_data.ru_stime);
+
+ fprintf(output, "\n\n");
+ fprintf(output, " %17.9f seconds user\n", ru_utime);
+ fprintf(output, " %17.9f seconds sys\n", ru_stime);
+ }
} else {
double sd = stddev_stats(&walltime_nsecs_stats) / NSEC_PER_SEC;
/*
@@ -2950,6 +2969,13 @@ int cmd_stat(int argc, const char **argv)
setup_system_wide(argc);
+ /*
+ * Display user/system times only for single
+ * run and when there's specified tracee.
+ */
+ if ((run_count == 1) && target__none(&target))
+ ru_display = true;
+
if (run_count < 0) {
pr_err("Run count must be a positive number\n");
parse_options_usage(stat_usage, stat_options, "r", 1);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 7a349fcd3864..ffdc2769ff9f 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -123,14 +123,9 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
}
notes = symbol__annotation(sym);
- if (notes->src != NULL) {
- pthread_mutex_lock(&notes->lock);
- goto out_assign;
- }
-
pthread_mutex_lock(&notes->lock);
- if (symbol__alloc_hist(sym) < 0) {
+ if (!symbol__hists(sym, top->evlist->nr_entries)) {
pthread_mutex_unlock(&notes->lock);
pr_err("Not enough memory for annotating '%s' symbol!\n",
sym->name);
@@ -138,9 +133,8 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
- err = symbol__annotate(sym, map, evsel, 0, NULL);
+ err = symbol__annotate(sym, map, evsel, 0, &top->annotation_opts, NULL);
if (err == 0) {
-out_assign:
top->sym_filter_entry = he;
} else {
char msg[BUFSIZ];
@@ -188,7 +182,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
static void perf_top__record_precise_ip(struct perf_top *top,
struct hist_entry *he,
struct perf_sample *sample,
- int counter, u64 ip)
+ struct perf_evsel *evsel, u64 ip)
{
struct annotation *notes;
struct symbol *sym = he->ms.sym;
@@ -204,7 +198,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
if (pthread_mutex_trylock(&notes->lock))
return;
- err = hist_entry__inc_addr_samples(he, sample, counter, ip);
+ err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
pthread_mutex_unlock(&notes->lock);
@@ -249,10 +243,9 @@ static void perf_top__show_details(struct perf_top *top)
goto out_unlock;
printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
- printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
+ printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
- more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel,
- 0, top->sym_pcnt_filter, top->print_entries, 4);
+ more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, &top->annotation_opts);
if (top->evlist->enabled) {
if (top->zero)
@@ -412,7 +405,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
- fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
+ fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
fprintf(stdout, "\t[S] stop annotation.\n");
@@ -515,7 +508,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
prompt_integer(&top->count_filter, "Enter display event count filter");
break;
case 'F':
- prompt_percent(&top->sym_pcnt_filter,
+ prompt_percent(&top->annotation_opts.min_pcnt,
"Enter details display event filter (percent)");
break;
case 'K':
@@ -613,7 +606,8 @@ static void *display_thread_tui(void *arg)
perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
top->min_percent,
&top->session->header.env,
- !top->record_opts.overwrite);
+ !top->record_opts.overwrite,
+ &top->annotation_opts);
done = 1;
return NULL;
@@ -691,7 +685,7 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter,
struct perf_evsel *evsel = iter->evsel;
if (perf_hpp_list.sym && single)
- perf_top__record_precise_ip(top, he, iter->sample, evsel->idx, al->addr);
+ perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
!(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
@@ -1083,8 +1077,9 @@ static int __cmd_top(struct perf_top *top)
if (top->session == NULL)
return -1;
- if (!objdump_path) {
- ret = perf_env__lookup_objdump(&top->session->header.env);
+ if (!top->annotation_opts.objdump_path) {
+ ret = perf_env__lookup_objdump(&top->session->header.env,
+ &top->annotation_opts.objdump_path);
if (ret)
goto out_delete;
}
@@ -1265,7 +1260,7 @@ int cmd_top(int argc, const char **argv)
.overwrite = 1,
},
.max_stack = sysctl__max_stack(),
- .sym_pcnt_filter = 5,
+ .annotation_opts = annotation__default_options,
.nr_threads_synthesize = UINT_MAX,
};
struct record_opts *opts = &top.record_opts;
@@ -1347,15 +1342,15 @@ int cmd_top(int argc, const char **argv)
"only consider symbols in these comms"),
OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
- OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
+ OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
"Interleave source code with assembly code (default)"),
- OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
+ OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
- OPT_STRING(0, "objdump", &objdump_path, "path",
+ OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
- OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
+ OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
OPT_CALLBACK(0, "percent-limit", &top, "percent",
@@ -1391,6 +1386,9 @@ int cmd_top(int argc, const char **argv)
if (status < 0)
return status;
+ top.annotation_opts.min_pcnt = 5;
+ top.annotation_opts.context = 4;
+
top.evlist = perf_evlist__new();
if (top.evlist == NULL)
return -ENOMEM;
@@ -1468,8 +1466,6 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
}
- symbol_conf.nr_events = top.evlist->nr_entries;
-
if (top.delay_secs < 1)
top.delay_secs = 1;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 560aed7da36a..6a748eca2edb 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2491,7 +2491,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
* to override an explicitely set --max-stack global setting.
*/
evlist__for_each_entry(evlist, evsel) {
- if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) &&
+ if (evsel__has_callchain(evsel) &&
evsel->attr.sample_max_stack == 0)
evsel->attr.sample_max_stack = trace->max_stack;
}
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 51c81509a315..a11cb006f968 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -12,7 +12,6 @@
#include "util/env.h"
#include <subcmd/exec-cmd.h>
#include "util/config.h"
-#include "util/quote.h"
#include <subcmd/run-command.h>
#include "util/parse-events.h"
#include <subcmd/parse-options.h>
diff --git a/tools/perf/scripts/python/bin/powerpc-hcalls-record b/tools/perf/scripts/python/bin/powerpc-hcalls-record
new file mode 100644
index 000000000000..b7402aa9147d
--- /dev/null
+++ b/tools/perf/scripts/python/bin/powerpc-hcalls-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e "{powerpc:hcall_entry,powerpc:hcall_exit}" $@
diff --git a/tools/perf/scripts/python/bin/powerpc-hcalls-report b/tools/perf/scripts/python/bin/powerpc-hcalls-report
new file mode 100644
index 000000000000..dd32ad7465f6
--- /dev/null
+++ b/tools/perf/scripts/python/bin/powerpc-hcalls-report
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/powerpc-hcalls.py
diff --git a/tools/perf/scripts/python/powerpc-hcalls.py b/tools/perf/scripts/python/powerpc-hcalls.py
new file mode 100644
index 000000000000..00e0e7476e55
--- /dev/null
+++ b/tools/perf/scripts/python/powerpc-hcalls.py
@@ -0,0 +1,200 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2018 Ravi Bangoria, IBM Corporation
+#
+# Hypervisor call statisics
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+# output: {
+# opcode: {
+# 'min': minimum time nsec
+# 'max': maximum time nsec
+# 'time': average time nsec
+# 'cnt': counter
+# } ...
+# }
+output = {}
+
+# d_enter: {
+# cpu: {
+# opcode: nsec
+# } ...
+# }
+d_enter = {}
+
+hcall_table = {
+ 4: 'H_REMOVE',
+ 8: 'H_ENTER',
+ 12: 'H_READ',
+ 16: 'H_CLEAR_MOD',
+ 20: 'H_CLEAR_REF',
+ 24: 'H_PROTECT',
+ 28: 'H_GET_TCE',
+ 32: 'H_PUT_TCE',
+ 36: 'H_SET_SPRG0',
+ 40: 'H_SET_DABR',
+ 44: 'H_PAGE_INIT',
+ 48: 'H_SET_ASR',
+ 52: 'H_ASR_ON',
+ 56: 'H_ASR_OFF',
+ 60: 'H_LOGICAL_CI_LOAD',
+ 64: 'H_LOGICAL_CI_STORE',
+ 68: 'H_LOGICAL_CACHE_LOAD',
+ 72: 'H_LOGICAL_CACHE_STORE',
+ 76: 'H_LOGICAL_ICBI',
+ 80: 'H_LOGICAL_DCBF',
+ 84: 'H_GET_TERM_CHAR',
+ 88: 'H_PUT_TERM_CHAR',
+ 92: 'H_REAL_TO_LOGICAL',
+ 96: 'H_HYPERVISOR_DATA',
+ 100: 'H_EOI',
+ 104: 'H_CPPR',
+ 108: 'H_IPI',
+ 112: 'H_IPOLL',
+ 116: 'H_XIRR',
+ 120: 'H_MIGRATE_DMA',
+ 124: 'H_PERFMON',
+ 220: 'H_REGISTER_VPA',
+ 224: 'H_CEDE',
+ 228: 'H_CONFER',
+ 232: 'H_PROD',
+ 236: 'H_GET_PPP',
+ 240: 'H_SET_PPP',
+ 244: 'H_PURR',
+ 248: 'H_PIC',
+ 252: 'H_REG_CRQ',
+ 256: 'H_FREE_CRQ',
+ 260: 'H_VIO_SIGNAL',
+ 264: 'H_SEND_CRQ',
+ 272: 'H_COPY_RDMA',
+ 276: 'H_REGISTER_LOGICAL_LAN',
+ 280: 'H_FREE_LOGICAL_LAN',
+ 284: 'H_ADD_LOGICAL_LAN_BUFFER',
+ 288: 'H_SEND_LOGICAL_LAN',
+ 292: 'H_BULK_REMOVE',
+ 304: 'H_MULTICAST_CTRL',
+ 308: 'H_SET_XDABR',
+ 312: 'H_STUFF_TCE',
+ 316: 'H_PUT_TCE_INDIRECT',
+ 332: 'H_CHANGE_LOGICAL_LAN_MAC',
+ 336: 'H_VTERM_PARTNER_INFO',
+ 340: 'H_REGISTER_VTERM',
+ 344: 'H_FREE_VTERM',
+ 348: 'H_RESET_EVENTS',
+ 352: 'H_ALLOC_RESOURCE',
+ 356: 'H_FREE_RESOURCE',
+ 360: 'H_MODIFY_QP',
+ 364: 'H_QUERY_QP',
+ 368: 'H_REREGISTER_PMR',
+ 372: 'H_REGISTER_SMR',
+ 376: 'H_QUERY_MR',
+ 380: 'H_QUERY_MW',
+ 384: 'H_QUERY_HCA',
+ 388: 'H_QUERY_PORT',
+ 392: 'H_MODIFY_PORT',
+ 396: 'H_DEFINE_AQP1',
+ 400: 'H_GET_TRACE_BUFFER',
+ 404: 'H_DEFINE_AQP0',
+ 408: 'H_RESIZE_MR',
+ 412: 'H_ATTACH_MCQP',
+ 416: 'H_DETACH_MCQP',
+ 420: 'H_CREATE_RPT',
+ 424: 'H_REMOVE_RPT',
+ 428: 'H_REGISTER_RPAGES',
+ 432: 'H_DISABLE_AND_GETC',
+ 436: 'H_ERROR_DATA',
+ 440: 'H_GET_HCA_INFO',
+ 444: 'H_GET_PERF_COUNT',
+ 448: 'H_MANAGE_TRACE',
+ 468: 'H_FREE_LOGICAL_LAN_BUFFER',
+ 472: 'H_POLL_PENDING',
+ 484: 'H_QUERY_INT_STATE',
+ 580: 'H_ILLAN_ATTRIBUTES',
+ 592: 'H_MODIFY_HEA_QP',
+ 596: 'H_QUERY_HEA_QP',
+ 600: 'H_QUERY_HEA',
+ 604: 'H_QUERY_HEA_PORT',
+ 608: 'H_MODIFY_HEA_PORT',
+ 612: 'H_REG_BCMC',
+ 616: 'H_DEREG_BCMC',
+ 620: 'H_REGISTER_HEA_RPAGES',
+ 624: 'H_DISABLE_AND_GET_HEA',
+ 628: 'H_GET_HEA_INFO',
+ 632: 'H_ALLOC_HEA_RESOURCE',
+ 644: 'H_ADD_CONN',
+ 648: 'H_DEL_CONN',
+ 664: 'H_JOIN',
+ 676: 'H_VASI_STATE',
+ 688: 'H_ENABLE_CRQ',
+ 696: 'H_GET_EM_PARMS',
+ 720: 'H_SET_MPP',
+ 724: 'H_GET_MPP',
+ 748: 'H_HOME_NODE_ASSOCIATIVITY',
+ 756: 'H_BEST_ENERGY',
+ 764: 'H_XIRR_X',
+ 768: 'H_RANDOM',
+ 772: 'H_COP',
+ 788: 'H_GET_MPP_X',
+ 796: 'H_SET_MODE',
+ 61440: 'H_RTAS',
+}
+
+def hcall_table_lookup(opcode):
+ if (hcall_table.has_key(opcode)):
+ return hcall_table[opcode]
+ else:
+ return opcode
+
+print_ptrn = '%-28s%10s%10s%10s%10s'
+
+def trace_end():
+ print print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)')
+ print '-' * 68
+ for opcode in output:
+ h_name = hcall_table_lookup(opcode)
+ time = output[opcode]['time']
+ cnt = output[opcode]['cnt']
+ min_t = output[opcode]['min']
+ max_t = output[opcode]['max']
+
+ print print_ptrn % (h_name, cnt, min_t, max_t, time/cnt)
+
+def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
+ opcode, retval):
+ if (d_enter.has_key(cpu) and d_enter[cpu].has_key(opcode)):
+ diff = nsecs(sec, nsec) - d_enter[cpu][opcode]
+
+ if (output.has_key(opcode)):
+ output[opcode]['time'] += diff
+ output[opcode]['cnt'] += 1
+ if (output[opcode]['min'] > diff):
+ output[opcode]['min'] = diff
+ if (output[opcode]['max'] < diff):
+ output[opcode]['max'] = diff
+ else:
+ output[opcode] = {
+ 'time': diff,
+ 'cnt': 1,
+ 'min': diff,
+ 'max': diff,
+ }
+
+ del d_enter[cpu][opcode]
+# else:
+# print "Can't find matching hcall_enter event. Ignoring sample"
+
+def powerpc__hcall_entry(event_name, context, cpu, sec, nsec, pid, comm,
+ callchain, opcode):
+ if (d_enter.has_key(cpu)):
+ d_enter[cpu][opcode] = nsecs(sec, nsec)
+ else:
+ d_enter[cpu] = {opcode: nsecs(sec, nsec)}
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index afa4ce21ba7c..4892bd2dc33e 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -560,6 +560,7 @@ static int do_test_code_reading(bool try_kcore)
pid = getpid();
machine = machine__new_host();
+ machine->env = &perf_env;
ret = machine__create_kernel_maps(machine);
if (ret < 0) {
diff --git a/tools/perf/tests/kmod-path.c b/tools/perf/tests/kmod-path.c
index 8e57d46109de..148dd31cc201 100644
--- a/tools/perf/tests/kmod-path.c
+++ b/tools/perf/tests/kmod-path.c
@@ -127,6 +127,22 @@ int test__kmod_path__parse(struct test *t __maybe_unused, int subtest __maybe_un
M("[vdso]", PERF_RECORD_MISC_KERNEL, false);
M("[vdso]", PERF_RECORD_MISC_USER, false);
+ T("[vdso32]", true , true , false, false, "[vdso32]", NULL);
+ T("[vdso32]", false , true , false, false, NULL , NULL);
+ T("[vdso32]", true , false , false, false, "[vdso32]", NULL);
+ T("[vdso32]", false , false , false, false, NULL , NULL);
+ M("[vdso32]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+ M("[vdso32]", PERF_RECORD_MISC_KERNEL, false);
+ M("[vdso32]", PERF_RECORD_MISC_USER, false);
+
+ T("[vdsox32]", true , true , false, false, "[vdsox32]", NULL);
+ T("[vdsox32]", false , true , false, false, NULL , NULL);
+ T("[vdsox32]", true , false , false, false, "[vdsox32]", NULL);
+ T("[vdsox32]", false , false , false, false, NULL , NULL);
+ M("[vdsox32]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+ M("[vdsox32]", PERF_RECORD_MISC_KERNEL, false);
+ M("[vdsox32]", PERF_RECORD_MISC_USER, false);
+
/* path alloc_name alloc_ext kmod comp name ext */
T("[vsyscall]", true , true , false, false, "[vsyscall]", NULL);
T("[vsyscall]", false , true , false, false, NULL , NULL);
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index b9ebe15afb13..7d4077068454 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -499,7 +499,7 @@ static int test__checkevent_pmu_partial_time_callgraph(struct perf_evlist *evlis
* while this test executes only parse events method.
*/
TEST_ASSERT_VAL("wrong period", 0 == evsel->attr.sample_period);
- TEST_ASSERT_VAL("wrong callgraph", !(PERF_SAMPLE_CALLCHAIN & evsel->attr.sample_type));
+ TEST_ASSERT_VAL("wrong callgraph", !evsel__has_callchain(evsel));
TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->attr.sample_type));
/* cpu/config=2,call-graph=no,time=0,period=2000/ */
@@ -512,7 +512,7 @@ static int test__checkevent_pmu_partial_time_callgraph(struct perf_evlist *evlis
* while this test executes only parse events method.
*/
TEST_ASSERT_VAL("wrong period", 0 == evsel->attr.sample_period);
- TEST_ASSERT_VAL("wrong callgraph", !(PERF_SAMPLE_CALLCHAIN & evsel->attr.sample_type));
+ TEST_ASSERT_VAL("wrong callgraph", !evsel__has_callchain(evsel));
TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->attr.sample_type));
return 0;
diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c
index 5d2df65ada6a..40ab72149ce1 100644
--- a/tools/perf/tests/python-use.c
+++ b/tools/perf/tests/python-use.c
@@ -7,8 +7,7 @@
#include <stdlib.h>
#include <linux/compiler.h>
#include "tests.h"
-
-extern int verbose;
+#include "util/debug.h"
int test__python_use(struct test *test __maybe_unused, int subtest __maybe_unused)
{
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 650b208f700f..263057039693 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -11,7 +11,7 @@
. $(dirname $0)/lib/probe.sh
libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
-nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
+nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254
trace_libc_inet_pton_backtrace() {
idx=0
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 8be40fa903aa..3b4f1c10ff57 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -29,6 +29,7 @@ struct annotate_browser {
struct rb_node *curr_hot;
struct annotation_line *selection;
struct arch *arch;
+ struct annotation_options *opts;
bool searching_backwards;
char search_bf[128];
};
@@ -410,7 +411,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
notes = symbol__annotation(dl->ops.target.sym);
pthread_mutex_lock(&notes->lock);
- if (notes->src == NULL && symbol__alloc_hist(dl->ops.target.sym) < 0) {
+ if (!symbol__hists(dl->ops.target.sym, evsel->evlist->nr_entries)) {
pthread_mutex_unlock(&notes->lock);
ui__warning("Not enough memory for annotating '%s' symbol!\n",
dl->ops.target.sym->name);
@@ -418,7 +419,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
}
pthread_mutex_unlock(&notes->lock);
- symbol__tui_annotate(dl->ops.target.sym, ms->map, evsel, hbt);
+ symbol__tui_annotate(dl->ops.target.sym, ms->map, evsel, hbt, browser->opts);
sym_title(ms->sym, ms->map, title, sizeof(title));
ui_browser__show_title(&browser->b, title);
return true;
@@ -817,24 +818,27 @@ out:
}
int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
- struct hist_browser_timer *hbt)
+ struct hist_browser_timer *hbt,
+ struct annotation_options *opts)
{
- return symbol__tui_annotate(ms->sym, ms->map, evsel, hbt);
+ return symbol__tui_annotate(ms->sym, ms->map, evsel, hbt, opts);
}
int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
- struct hist_browser_timer *hbt)
+ struct hist_browser_timer *hbt,
+ struct annotation_options *opts)
{
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
- return map_symbol__tui_annotate(&he->ms, evsel, hbt);
+ return map_symbol__tui_annotate(&he->ms, evsel, hbt, opts);
}
int symbol__tui_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
- struct hist_browser_timer *hbt)
+ struct hist_browser_timer *hbt,
+ struct annotation_options *opts)
{
struct annotation *notes = symbol__annotation(sym);
struct map_symbol ms = {
@@ -851,6 +855,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
.priv = &ms,
.use_navkeypressed = true,
},
+ .opts = opts,
};
int ret = -1, err;
@@ -860,7 +865,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
if (map->dso->annotate_warned)
return -1;
- err = symbol__annotate2(sym, map, evsel, &annotation__default_options, &browser.arch);
+ err = symbol__annotate2(sym, map, evsel, opts, &browser.arch);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index e5f247247daa..a96f62ca984a 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1231,6 +1231,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
int width = browser->b.width;
char folded_sign = ' ';
bool current_entry = ui_browser__is_current_entry(&browser->b, row);
+ bool use_callchain = hist_entry__has_callchains(entry) && symbol_conf.use_callchain;
off_t row_offset = entry->row_offset;
bool first = true;
struct perf_hpp_fmt *fmt;
@@ -1240,7 +1241,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
browser->selection = &entry->ms;
}
- if (symbol_conf.use_callchain) {
+ if (use_callchain) {
hist_entry__init_have_children(entry);
folded_sign = hist_entry__folded(entry);
}
@@ -1276,7 +1277,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
}
if (first) {
- if (symbol_conf.use_callchain) {
+ if (use_callchain) {
ui_browser__printf(&browser->b, "%c ", folded_sign);
width -= 2;
}
@@ -1583,7 +1584,7 @@ hists_browser__scnprintf_headers(struct hist_browser *browser, char *buf,
int column = 0;
int span = 0;
- if (symbol_conf.use_callchain) {
+ if (hists__has_callchains(hists) && symbol_conf.use_callchain) {
ret = scnprintf(buf, size, " ");
if (advance_hpp_check(&dummy_hpp, ret))
return ret;
@@ -1987,7 +1988,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
bool first = true;
int ret;
- if (symbol_conf.use_callchain) {
+ if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
folded_sign = hist_entry__folded(he);
printed += fprintf(fp, "%c ", folded_sign);
}
@@ -2175,7 +2176,8 @@ struct hist_browser *hist_browser__new(struct hists *hists)
static struct hist_browser *
perf_evsel_browser__new(struct perf_evsel *evsel,
struct hist_browser_timer *hbt,
- struct perf_env *env)
+ struct perf_env *env,
+ struct annotation_options *annotation_opts)
{
struct hist_browser *browser = hist_browser__new(evsel__hists(evsel));
@@ -2183,6 +2185,7 @@ perf_evsel_browser__new(struct perf_evsel *evsel,
browser->hbt = hbt;
browser->env = env;
browser->title = hists_browser__scnprintf_title;
+ browser->annotation_opts = annotation_opts;
}
return browser;
}
@@ -2336,7 +2339,8 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
struct hist_entry *he;
int err;
- if (!objdump_path && perf_env__lookup_objdump(browser->env))
+ if (!browser->annotation_opts->objdump_path &&
+ perf_env__lookup_objdump(browser->env, &browser->annotation_opts->objdump_path))
return 0;
notes = symbol__annotation(act->ms.sym);
@@ -2344,7 +2348,8 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
return 0;
evsel = hists_to_evsel(browser->hists);
- err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt);
+ err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt,
+ browser->annotation_opts);
he = hist_browser__selected_entry(browser);
/*
* offer option to annotate the other branch source or target
@@ -2667,7 +2672,7 @@ static void hist_browser__update_percent_limit(struct hist_browser *hb,
he->nr_rows = 0;
}
- if (!he->leaf || !symbol_conf.use_callchain)
+ if (!he->leaf || !hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
goto next;
if (callchain_param.mode == CHAIN_GRAPH_REL) {
@@ -2697,10 +2702,11 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
struct hist_browser_timer *hbt,
float min_pcnt,
struct perf_env *env,
- bool warn_lost_event)
+ bool warn_lost_event,
+ struct annotation_options *annotation_opts)
{
struct hists *hists = evsel__hists(evsel);
- struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env);
+ struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts);
struct branch_info *bi;
#define MAX_OPTIONS 16
char *options[MAX_OPTIONS];
@@ -3062,6 +3068,7 @@ out:
struct perf_evsel_menu {
struct ui_browser b;
struct perf_evsel *selection;
+ struct annotation_options *annotation_opts;
bool lost_events, lost_events_warned;
float min_pcnt;
struct perf_env *env;
@@ -3163,7 +3170,8 @@ browse_hists:
true, hbt,
menu->min_pcnt,
menu->env,
- warn_lost_event);
+ warn_lost_event,
+ menu->annotation_opts);
ui_browser__show_title(&menu->b, title);
switch (key) {
case K_TAB:
@@ -3222,7 +3230,8 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
struct hist_browser_timer *hbt,
float min_pcnt,
struct perf_env *env,
- bool warn_lost_event)
+ bool warn_lost_event,
+ struct annotation_options *annotation_opts)
{
struct perf_evsel *pos;
struct perf_evsel_menu menu = {
@@ -3237,6 +3246,7 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
},
.min_pcnt = min_pcnt,
.env = env,
+ .annotation_opts = annotation_opts,
};
ui_helpline__push("Press ESC to exit");
@@ -3257,7 +3267,8 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
struct hist_browser_timer *hbt,
float min_pcnt,
struct perf_env *env,
- bool warn_lost_event)
+ bool warn_lost_event,
+ struct annotation_options *annotation_opts)
{
int nr_entries = evlist->nr_entries;
@@ -3267,7 +3278,8 @@ single_entry:
return perf_evsel__hists_browse(first, nr_entries, help,
false, hbt, min_pcnt,
- env, warn_lost_event);
+ env, warn_lost_event,
+ annotation_opts);
}
if (symbol_conf.event_group) {
@@ -3285,5 +3297,6 @@ single_entry:
return __perf_evlist__tui_browse_hists(evlist, nr_entries, help,
hbt, min_pcnt, env,
- warn_lost_event);
+ warn_lost_event,
+ annotation_opts);
}
diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h
index 9428bee076f2..91d3e18b50aa 100644
--- a/tools/perf/ui/browsers/hists.h
+++ b/tools/perf/ui/browsers/hists.h
@@ -4,6 +4,8 @@
#include "ui/browser.h"
+struct annotation_options;
+
struct hist_browser {
struct ui_browser b;
struct hists *hists;
@@ -12,6 +14,7 @@ struct hist_browser {
struct hist_browser_timer *hbt;
struct pstack *pstack;
struct perf_env *env;
+ struct annotation_options *annotation_opts;
int print_seq;
bool show_dso;
bool show_headers;
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index aeeaf15029f0..48428c9acd89 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -169,7 +169,7 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
if (map->dso->annotate_warned)
return -1;
- err = symbol__annotate(sym, map, evsel, 0, NULL);
+ err = symbol__annotate(sym, map, evsel, 0, &annotation__default_options, NULL);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 24e1ec201ffd..b085f1b3e34d 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -382,7 +382,8 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
gtk_tree_store_set(store, &iter, col_idx++, s, -1);
}
- if (symbol_conf.use_callchain && hists__has(hists, sym)) {
+ if (hists__has_callchains(hists) &&
+ symbol_conf.use_callchain && hists__has(hists, sym)) {
if (callchain_param.mode == CHAIN_GRAPH_REL)
total = symbol_conf.cumulate_callchain ?
h->stat_acc->period : h->stat.period;
@@ -479,7 +480,7 @@ static void perf_gtk__add_hierarchy_entries(struct hists *hists,
}
}
- if (symbol_conf.use_callchain && he->leaf) {
+ if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
if (callchain_param.mode == CHAIN_GRAPH_REL)
total = symbol_conf.cumulate_callchain ?
he->stat_acc->period : he->stat.period;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 706f6f1e9c7d..fe3dfaa64a91 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -207,7 +207,7 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
if (ret)
return ret;
- if (a->thread != b->thread || !symbol_conf.use_callchain)
+ if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
return 0;
ret = b->callchain->max_depth - a->callchain->max_depth;
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index c1eb476da91b..69b7a28f7a1c 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -516,7 +516,7 @@ static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
}
printed += putc('\n', fp);
- if (symbol_conf.use_callchain && he->leaf) {
+ if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
u64 total = hists__total_period(hists);
printed += hist_entry_callchain__fprintf(he, total, 0, fp);
@@ -550,7 +550,7 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
ret = fprintf(fp, "%s\n", bf);
- if (use_callchain)
+ if (hist_entry__has_callchains(he) && use_callchain)
callchain_ret = hist_entry_callchain__fprintf(he, total_period,
0, fp);
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 5d4c45b76895..b604ef334dc9 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -24,7 +24,6 @@ libperf-y += libstring.o
libperf-y += bitmap.o
libperf-y += hweight.o
libperf-y += smt.o
-libperf-y += quote.o
libperf-y += strbuf.o
libperf-y += string.o
libperf-y += strlist.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 71897689dacf..f91775b4bc3c 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -21,6 +21,7 @@
#include "debug.h"
#include "annotate.h"
#include "evsel.h"
+#include "evlist.h"
#include "block-range.h"
#include "string2.h"
#include "arch/common.h"
@@ -46,11 +47,10 @@
struct annotation_options annotation__default_options = {
.use_offset = true,
.jump_arrows = true,
+ .annotate_src = true,
.offset_level = ANNOTATION__OFFSET_JUMP_TARGETS,
};
-const char *disassembler_style;
-const char *objdump_path;
static regex_t file_lineno;
static struct ins_ops *ins__find(struct arch *arch, const char *name);
@@ -678,10 +678,28 @@ static struct arch *arch__find(const char *name)
return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
}
-int symbol__alloc_hist(struct symbol *sym)
+static struct annotated_source *annotated_source__new(void)
+{
+ struct annotated_source *src = zalloc(sizeof(*src));
+
+ if (src != NULL)
+ INIT_LIST_HEAD(&src->source);
+
+ return src;
+}
+
+static __maybe_unused void annotated_source__delete(struct annotated_source *src)
+{
+ if (src == NULL)
+ return;
+ zfree(&src->histograms);
+ zfree(&src->cycles_hist);
+ free(src);
+}
+
+static int annotated_source__alloc_histograms(struct annotated_source *src,
+ size_t size, int nr_hists)
{
- struct annotation *notes = symbol__annotation(sym);
- size_t size = symbol__size(sym);
size_t sizeof_sym_hist;
/*
@@ -701,17 +719,13 @@ int symbol__alloc_hist(struct symbol *sym)
sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(struct sym_hist_entry));
/* Check for overflow in zalloc argument */
- if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src))
- / symbol_conf.nr_events)
+ if (sizeof_sym_hist > SIZE_MAX / nr_hists)
return -1;
- notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
- if (notes->src == NULL)
- return -1;
- notes->src->sizeof_sym_hist = sizeof_sym_hist;
- notes->src->nr_histograms = symbol_conf.nr_events;
- INIT_LIST_HEAD(&notes->src->source);
- return 0;
+ src->sizeof_sym_hist = sizeof_sym_hist;
+ src->nr_histograms = nr_hists;
+ src->histograms = calloc(nr_hists, sizeof_sym_hist) ;
+ return src->histograms ? 0 : -1;
}
/* The cycles histogram is lazily allocated. */
@@ -741,14 +755,11 @@ void symbol__annotate_zero_histograms(struct symbol *sym)
pthread_mutex_unlock(&notes->lock);
}
-static int __symbol__account_cycles(struct annotation *notes,
+static int __symbol__account_cycles(struct cyc_hist *ch,
u64 start,
unsigned offset, unsigned cycles,
unsigned have_start)
{
- struct cyc_hist *ch;
-
- ch = notes->src->cycles_hist;
/*
* For now we can only account one basic block per
* final jump. But multiple could be overlapping.
@@ -791,7 +802,7 @@ static int __symbol__account_cycles(struct annotation *notes,
}
static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
- struct annotation *notes, int evidx, u64 addr,
+ struct annotated_source *src, int evidx, u64 addr,
struct perf_sample *sample)
{
unsigned offset;
@@ -807,7 +818,12 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
}
offset = addr - sym->start;
- h = annotation__histogram(notes, evidx);
+ h = annotated_source__histogram(src, evidx);
+ if (h == NULL) {
+ pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
+ __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
+ return -ENOMEM;
+ }
h->nr_samples++;
h->addr[offset].nr_samples++;
h->period += sample->period;
@@ -820,45 +836,69 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
return 0;
}
-static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles)
+static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
if (notes->src == NULL) {
- if (symbol__alloc_hist(sym) < 0)
+ notes->src = annotated_source__new();
+ if (notes->src == NULL)
return NULL;
+ goto alloc_cycles_hist;
+ }
+
+ if (!notes->src->cycles_hist) {
+alloc_cycles_hist:
+ symbol__alloc_hist_cycles(sym);
}
- if (!notes->src->cycles_hist && cycles) {
- if (symbol__alloc_hist_cycles(sym) < 0)
+
+ return notes->src->cycles_hist;
+}
+
+struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
+{
+ struct annotation *notes = symbol__annotation(sym);
+
+ if (notes->src == NULL) {
+ notes->src = annotated_source__new();
+ if (notes->src == NULL)
return NULL;
+ goto alloc_histograms;
}
- return notes;
+
+ if (notes->src->histograms == NULL) {
+alloc_histograms:
+ annotated_source__alloc_histograms(notes->src, symbol__size(sym),
+ nr_hists);
+ }
+
+ return notes->src;
}
static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
- int evidx, u64 addr,
+ struct perf_evsel *evsel, u64 addr,
struct perf_sample *sample)
{
- struct annotation *notes;
+ struct annotated_source *src;
if (sym == NULL)
return 0;
- notes = symbol__get_annotation(sym, false);
- if (notes == NULL)
+ src = symbol__hists(sym, evsel->evlist->nr_entries);
+ if (src == NULL)
return -ENOMEM;
- return __symbol__inc_addr_samples(sym, map, notes, evidx, addr, sample);
+ return __symbol__inc_addr_samples(sym, map, src, evsel->idx, addr, sample);
}
static int symbol__account_cycles(u64 addr, u64 start,
struct symbol *sym, unsigned cycles)
{
- struct annotation *notes;
+ struct cyc_hist *cycles_hist;
unsigned offset;
if (sym == NULL)
return 0;
- notes = symbol__get_annotation(sym, true);
- if (notes == NULL)
+ cycles_hist = symbol__cycles_hist(sym);
+ if (cycles_hist == NULL)
return -ENOMEM;
if (addr < sym->start || addr >= sym->end)
return -ERANGE;
@@ -870,7 +910,7 @@ static int symbol__account_cycles(u64 addr, u64 start,
start = 0;
}
offset = addr - sym->start;
- return __symbol__account_cycles(notes,
+ return __symbol__account_cycles(cycles_hist,
start ? start - sym->start : 0,
offset, cycles,
!!start);
@@ -974,15 +1014,15 @@ void annotation__compute_ipc(struct annotation *notes, size_t size)
}
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
- int evidx)
+ struct perf_evsel *evsel)
{
- return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr, sample);
+ return symbol__inc_addr_samples(ams->sym, ams->map, evsel, ams->al_addr, sample);
}
int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
- int evidx, u64 ip)
+ struct perf_evsel *evsel, u64 ip)
{
- return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip, sample);
+ return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evsel, ip, sample);
}
static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms)
@@ -1031,6 +1071,7 @@ struct annotate_args {
struct arch *arch;
struct map_symbol ms;
struct perf_evsel *evsel;
+ struct annotation_options *options;
s64 offset;
char *line;
int line_nr;
@@ -1572,6 +1613,7 @@ fallback:
static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
+ struct annotation_options *opts = args->options;
struct map *map = args->ms.map;
struct dso *dso = map->dso;
char *command;
@@ -1619,13 +1661,13 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
" -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
- objdump_path ? objdump_path : "objdump",
- disassembler_style ? "-M " : "",
- disassembler_style ? disassembler_style : "",
+ opts->objdump_path ?: "objdump",
+ opts->disassembler_style ? "-M " : "",
+ opts->disassembler_style ?: "",
map__rip_2objdump(map, sym->start),
map__rip_2objdump(map, sym->end),
- symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
- symbol_conf.annotate_src ? "-S" : "",
+ opts->show_asm_raw ? "" : "--no-show-raw",
+ opts->annotate_src ? "-S" : "",
symfs_filename, symfs_filename);
if (err < 0) {
@@ -1767,11 +1809,13 @@ void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel)
int symbol__annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, size_t privsize,
+ struct annotation_options *options,
struct arch **parch)
{
struct annotate_args args = {
.privsize = privsize,
.evsel = evsel,
+ .options = options,
};
struct perf_env *env = perf_evsel__env(evsel);
const char *arch_name = perf_env__arch(env);
@@ -1949,8 +1993,8 @@ static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
}
int symbol__annotate_printf(struct symbol *sym, struct map *map,
- struct perf_evsel *evsel, bool full_paths,
- int min_pcnt, int max_lines, int context)
+ struct perf_evsel *evsel,
+ struct annotation_options *opts)
{
struct dso *dso = map->dso;
char *filename;
@@ -1962,6 +2006,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
u64 start = map__rip_2objdump(map, sym->start);
int printed = 2, queue_len = 0, addr_fmt_width;
int more = 0;
+ bool context = opts->context;
u64 len;
int width = symbol_conf.show_total_period ? 12 : 8;
int graph_dotted_len;
@@ -1971,7 +2016,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
if (!filename)
return -ENOMEM;
- if (full_paths)
+ if (opts->full_path)
d_filename = filename;
else
d_filename = basename(filename);
@@ -2006,7 +2051,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
}
err = annotation_line__print(pos, sym, start, evsel, len,
- min_pcnt, printed, max_lines,
+ opts->min_pcnt, printed, opts->max_lines,
queue, addr_fmt_width);
switch (err) {
@@ -2339,20 +2384,19 @@ static void symbol__calc_lines(struct symbol *sym, struct map *map,
}
int symbol__tty_annotate2(struct symbol *sym, struct map *map,
- struct perf_evsel *evsel, bool print_lines,
- bool full_paths)
+ struct perf_evsel *evsel,
+ struct annotation_options *opts)
{
struct dso *dso = map->dso;
struct rb_root source_line = RB_ROOT;
- struct annotation_options opts = annotation__default_options;
struct annotation *notes = symbol__annotation(sym);
char buf[1024];
- if (symbol__annotate2(sym, map, evsel, &opts, NULL) < 0)
+ if (symbol__annotate2(sym, map, evsel, opts, NULL) < 0)
return -1;
- if (print_lines) {
- srcline_full_filename = full_paths;
+ if (opts->print_lines) {
+ srcline_full_filename = opts->full_path;
symbol__calc_lines(sym, map, &source_line);
print_summary(&source_line, dso->long_name);
}
@@ -2367,25 +2411,24 @@ int symbol__tty_annotate2(struct symbol *sym, struct map *map,
}
int symbol__tty_annotate(struct symbol *sym, struct map *map,
- struct perf_evsel *evsel, bool print_lines,
- bool full_paths, int min_pcnt, int max_lines)
+ struct perf_evsel *evsel,
+ struct annotation_options *opts)
{
struct dso *dso = map->dso;
struct rb_root source_line = RB_ROOT;
- if (symbol__annotate(sym, map, evsel, 0, NULL) < 0)
+ if (symbol__annotate(sym, map, evsel, 0, opts, NULL) < 0)
return -1;
symbol__calc_percent(sym, evsel);
- if (print_lines) {
- srcline_full_filename = full_paths;
+ if (opts->print_lines) {
+ srcline_full_filename = opts->full_path;
symbol__calc_lines(sym, map, &source_line);
print_summary(&source_line, dso->long_name);
}
- symbol__annotate_printf(sym, map, evsel, full_paths,
- min_pcnt, max_lines, 0);
+ symbol__annotate_printf(sym, map, evsel, opts);
annotated_source__purge(symbol__annotation(sym)->src);
@@ -2620,7 +2663,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
if (perf_evsel__is_group_event(evsel))
nr_pcnt = evsel->nr_members;
- err = symbol__annotate(sym, map, evsel, 0, parch);
+ err = symbol__annotate(sym, map, evsel, 0, options, parch);
if (err)
goto out_free_offsets;
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 5080b6dd98b8..a4c0d91907e6 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -67,12 +67,21 @@ struct annotation_options {
bool hide_src_code,
use_offset,
jump_arrows,
+ print_lines,
+ full_path,
show_linenr,
show_nr_jumps,
show_nr_samples,
show_total_period,
- show_minmax_cycle;
+ show_minmax_cycle,
+ show_asm_raw,
+ annotate_src;
u8 offset_level;
+ int min_pcnt;
+ int max_lines;
+ int context;
+ const char *objdump_path;
+ const char *disassembler_style;
};
enum {
@@ -201,7 +210,11 @@ struct cyc_hist {
/** struct annotated_source - symbols with hits have this attached as in sannotation
*
- * @histogram: Array of addr hit histograms per event being monitored
+ * @histograms: Array of addr hit histograms per event being monitored
+ * nr_histograms: This may not be the same as evsel->evlist->nr_entries if
+ * we have more than a group in a evlist, where we will want
+ * to see each group separately, that is why symbol__annotate2()
+ * sets src->nr_histograms to evsel->nr_members.
* @lines: If 'print_lines' is specified, per source code line percentages
* @source: source parsed from a disassembler like objdump -dS
* @cyc_hist: Average cycles per basic block
@@ -217,7 +230,7 @@ struct annotated_source {
int nr_histograms;
size_t sizeof_sym_hist;
struct cyc_hist *cycles_hist;
- struct sym_hist histograms[0];
+ struct sym_hist *histograms;
};
struct annotation {
@@ -267,10 +280,14 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
void annotation__update_column_widths(struct annotation *notes);
void annotation__init_column_widths(struct annotation *notes, struct symbol *sym);
+static inline struct sym_hist *annotated_source__histogram(struct annotated_source *src, int idx)
+{
+ return ((void *)src->histograms) + (src->sizeof_sym_hist * idx);
+}
+
static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
{
- return (((void *)&notes->src->histograms) +
- (notes->src->sizeof_sym_hist * idx));
+ return annotated_source__histogram(notes->src, idx);
}
static inline struct annotation *symbol__annotation(struct symbol *sym)
@@ -279,20 +296,21 @@ static inline struct annotation *symbol__annotation(struct symbol *sym)
}
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
- int evidx);
+ struct perf_evsel *evsel);
int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
struct addr_map_symbol *start,
unsigned cycles);
int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
- int evidx, u64 addr);
+ struct perf_evsel *evsel, u64 addr);
-int symbol__alloc_hist(struct symbol *sym);
+struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists);
void symbol__annotate_zero_histograms(struct symbol *sym);
int symbol__annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, size_t privsize,
+ struct annotation_options *options,
struct arch **parch);
int symbol__annotate2(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
@@ -320,8 +338,8 @@ int symbol__strerror_disassemble(struct symbol *sym, struct map *map,
int errnum, char *buf, size_t buflen);
int symbol__annotate_printf(struct symbol *sym, struct map *map,
- struct perf_evsel *evsel, bool full_paths,
- int min_pcnt, int max_lines, int context);
+ struct perf_evsel *evsel,
+ struct annotation_options *options);
int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp);
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
@@ -332,30 +350,27 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel)
bool ui__has_annotation(void);
int symbol__tty_annotate(struct symbol *sym, struct map *map,
- struct perf_evsel *evsel, bool print_lines,
- bool full_paths, int min_pcnt, int max_lines);
+ struct perf_evsel *evsel, struct annotation_options *opts);
int symbol__tty_annotate2(struct symbol *sym, struct map *map,
- struct perf_evsel *evsel, bool print_lines,
- bool full_paths);
+ struct perf_evsel *evsel, struct annotation_options *opts);
#ifdef HAVE_SLANG_SUPPORT
int symbol__tui_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
- struct hist_browser_timer *hbt);
+ struct hist_browser_timer *hbt,
+ struct annotation_options *opts);
#else
static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
struct map *map __maybe_unused,
struct perf_evsel *evsel __maybe_unused,
- struct hist_browser_timer *hbt
- __maybe_unused)
+ struct hist_browser_timer *hbt __maybe_unused,
+ struct annotation_options *opts __maybe_unused)
{
return 0;
}
#endif
-extern const char *disassembler_style;
-
void annotation_config__init(void);
#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index decb91f9da82..ccd02634a616 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -93,20 +93,17 @@ static int open_cgroup(const char *name)
static struct cgroup *evlist__find_cgroup(struct perf_evlist *evlist, const char *str)
{
struct perf_evsel *counter;
- struct cgroup *cgrp = NULL;
/*
* check if cgrp is already defined, if so we reuse it
*/
evlist__for_each_entry(evlist, counter) {
if (!counter->cgrp)
continue;
- if (!strcmp(counter->cgrp->name, str)) {
- cgrp = cgroup__get(counter->cgrp);
- break;
- }
+ if (!strcmp(counter->cgrp->name, str))
+ return cgroup__get(counter->cgrp);
}
- return cgrp;
+ return NULL;
}
static struct cgroup *cgroup__new(const char *name)
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index cdfc2e5f55f5..51cf82cf1882 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -354,6 +354,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
(strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
(strncmp(name, "[vdso]", 6) == 0) ||
+ (strncmp(name, "[vdso32]", 8) == 0) ||
+ (strncmp(name, "[vdsox32]", 9) == 0) ||
(strncmp(name, "[vsyscall]", 10) == 0)) {
m->kmod = false;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 150db5ed7400..94fce4f537e9 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -2197,7 +2197,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
}
}
- if (type & PERF_SAMPLE_CALLCHAIN) {
+ if (evsel__has_callchain(evsel)) {
const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
OVERFLOW_CHECK_u64(array);
@@ -2857,7 +2857,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
"Hint: Try again after reducing the number of events.\n"
"Hint: Try increasing the limit with 'ulimit -n <limit>'");
case ENOMEM:
- if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0 &&
+ if (evsel__has_callchain(evsel) &&
access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
return scnprintf(msg, size,
"Not enough memory to setup event with callchain.\n"
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index b13f5f234c8f..d277930b19a1 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -459,6 +459,11 @@ static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evs
return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
}
+static inline bool evsel__has_callchain(const struct perf_evsel *evsel)
+{
+ return (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0;
+}
+
typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index a8bff2178fbc..540cd2dcd3e7 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1459,8 +1459,24 @@ static void print_cmdline(struct feat_fd *ff, FILE *fp)
fprintf(fp, "# cmdline : ");
- for (i = 0; i < nr; i++)
- fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
+ for (i = 0; i < nr; i++) {
+ char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
+ if (!argv_i) {
+ fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
+ } else {
+ char *mem = argv_i;
+ do {
+ char *quote = strchr(argv_i, '\'');
+ if (!quote)
+ break;
+ *quote++ = '\0';
+ fprintf(fp, "%s\\\'", argv_i);
+ argv_i = quote;
+ } while (1);
+ fprintf(fp, "%s ", argv_i);
+ free(mem);
+ }
+ }
fputc('\n', fp);
}
@@ -3312,8 +3328,6 @@ int perf_session__read_header(struct perf_session *session)
lseek(fd, tmp, SEEK_SET);
}
- symbol_conf.nr_events = nr_attrs;
-
perf_header__process_sections(header, fd, &session->tevent,
perf_file_section__process);
@@ -3739,8 +3753,6 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
}
- symbol_conf.nr_events = evlist->nr_entries;
-
return 0;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 4d602fba40b2..52e8fda93a47 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -410,7 +410,7 @@ static int hist_entry__init(struct hist_entry *he,
map__get(he->mem_info->daddr.map);
}
- if (symbol_conf.use_callchain)
+ if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
callchain_init(he->callchain);
if (he->raw_data) {
@@ -492,7 +492,7 @@ static u8 symbol__parent_filter(const struct symbol *parent)
static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
{
- if (!symbol_conf.use_callchain)
+ if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
return;
he->hists->callchain_period += period;
@@ -986,7 +986,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
iter->he = he;
he_cache[iter->curr++] = he;
- if (symbol_conf.use_callchain)
+ if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
callchain_append(he->callchain, &cursor, sample->period);
return 0;
}
@@ -1039,7 +1039,7 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
int err, err2;
struct map *alm = NULL;
- if (al && al->map)
+ if (al)
alm = map__get(al->map);
err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
@@ -1373,7 +1373,8 @@ static int hists__hierarchy_insert_entry(struct hists *hists,
if (new_he) {
new_he->leaf = true;
- if (symbol_conf.use_callchain) {
+ if (hist_entry__has_callchains(new_he) &&
+ symbol_conf.use_callchain) {
callchain_cursor_reset(&callchain_cursor);
if (callchain_merge(&callchain_cursor,
new_he->callchain,
@@ -1414,7 +1415,7 @@ static int hists__collapse_insert_entry(struct hists *hists,
if (symbol_conf.cumulate_callchain)
he_stat__add_stat(iter->stat_acc, he->stat_acc);
- if (symbol_conf.use_callchain) {
+ if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
callchain_cursor_reset(&callchain_cursor);
if (callchain_merge(&callchain_cursor,
iter->callchain,
@@ -1757,7 +1758,7 @@ void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *pro
bool use_callchain;
if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
- use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
+ use_callchain = evsel__has_callchain(evsel);
else
use_callchain = symbol_conf.use_callchain;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index fbabfd8a215d..06607c434949 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -220,6 +220,12 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel)
return &hevsel->hists;
}
+static __pure inline bool hists__has_callchains(struct hists *hists)
+{
+ const struct perf_evsel *evsel = hists_to_evsel(hists);
+ return evsel__has_callchain(evsel);
+}
+
int hists__init(void);
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
@@ -419,19 +425,24 @@ struct hist_browser_timer {
int refresh;
};
+struct annotation_options;
+
#ifdef HAVE_SLANG_SUPPORT
#include "../ui/keysyms.h"
int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
- struct hist_browser_timer *hbt);
+ struct hist_browser_timer *hbt,
+ struct annotation_options *annotation_opts);
int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
- struct hist_browser_timer *hbt);
+ struct hist_browser_timer *hbt,
+ struct annotation_options *annotation_opts);
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
struct hist_browser_timer *hbt,
float min_pcnt,
struct perf_env *env,
- bool warn_lost_event);
+ bool warn_lost_event,
+ struct annotation_options *annotation_options);
int script_browse(const char *script_opt);
#else
static inline
@@ -440,20 +451,23 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
struct hist_browser_timer *hbt __maybe_unused,
float min_pcnt __maybe_unused,
struct perf_env *env __maybe_unused,
- bool warn_lost_event __maybe_unused)
+ bool warn_lost_event __maybe_unused,
+ struct annotation_options *annotation_options __maybe_unused)
{
return 0;
}
static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
struct perf_evsel *evsel __maybe_unused,
- struct hist_browser_timer *hbt __maybe_unused)
+ struct hist_browser_timer *hbt __maybe_unused,
+ struct annotation_options *annotation_options __maybe_unused)
{
return 0;
}
static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
struct perf_evsel *evsel __maybe_unused,
- struct hist_browser_timer *hbt __maybe_unused)
+ struct hist_browser_timer *hbt __maybe_unused,
+ struct annotation_options *annotation_opts __maybe_unused)
{
return 0;
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index f9157aed1289..d404bed7003a 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -113,6 +113,7 @@ struct intel_pt_decoder {
bool have_cyc;
bool fixup_last_mtc;
bool have_last_ip;
+ enum intel_pt_param_flags flags;
uint64_t pos;
uint64_t last_ip;
uint64_t ip;
@@ -226,6 +227,8 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
decoder->return_compression = params->return_compression;
decoder->branch_enable = params->branch_enable;
+ decoder->flags = params->flags;
+
decoder->period = params->period;
decoder->period_type = params->period_type;
@@ -1097,6 +1100,15 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
return ret;
}
+static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
+ struct intel_pt_insn *intel_pt_insn,
+ uint64_t ip, int err)
+{
+ return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
+ intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
+ ip == decoder->ip + intel_pt_insn->length;
+}
+
static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
{
struct intel_pt_insn intel_pt_insn;
@@ -1109,10 +1121,11 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
if (err == INTEL_PT_RETURN)
return 0;
- if (err == -EAGAIN) {
+ if (err == -EAGAIN ||
+ intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
if (intel_pt_fup_event(decoder))
return 0;
- return err;
+ return -EAGAIN;
}
decoder->set_fup_tx_flags = false;
if (err)
@@ -1376,7 +1389,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
{
intel_pt_log("ERROR: Buffer overflow\n");
intel_pt_clear_tx_flags(decoder);
- decoder->have_tma = false;
decoder->cbr = 0;
decoder->timestamp_insn_cnt = 0;
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
@@ -1604,7 +1616,6 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
case INTEL_PT_PSB:
case INTEL_PT_TSC:
case INTEL_PT_TMA:
- case INTEL_PT_CBR:
case INTEL_PT_MODE_TSX:
case INTEL_PT_BAD:
case INTEL_PT_PSBEND:
@@ -1620,6 +1631,10 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
decoder->pkt_step = 0;
return -ENOENT;
+ case INTEL_PT_CBR:
+ intel_pt_calc_cbr(decoder);
+ break;
+
case INTEL_PT_OVF:
return intel_pt_overflow(decoder);
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index fc1752d50019..51c18d67f4ca 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -60,6 +60,14 @@ enum {
INTEL_PT_ERR_MAX,
};
+enum intel_pt_param_flags {
+ /*
+ * FUP packet can contain next linear instruction pointer instead of
+ * current linear instruction pointer.
+ */
+ INTEL_PT_FUP_WITH_NLIP = 1 << 0,
+};
+
struct intel_pt_state {
enum intel_pt_sample_type type;
int err;
@@ -106,6 +114,7 @@ struct intel_pt_params {
unsigned int mtc_period;
uint32_t tsc_ctc_ratio_n;
uint32_t tsc_ctc_ratio_d;
+ enum intel_pt_param_flags flags;
};
struct intel_pt_decoder;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 492986a25ef6..aec68908d604 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -749,6 +749,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
unsigned int queue_nr)
{
struct intel_pt_params params = { .get_trace = 0, };
+ struct perf_env *env = pt->machine->env;
struct intel_pt_queue *ptq;
ptq = zalloc(sizeof(struct intel_pt_queue));
@@ -830,6 +831,9 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
}
}
+ if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
+ params.flags |= INTEL_PT_FUP_WITH_NLIP;
+
ptq->decoder = intel_pt_decoder_new(&params);
if (!ptq->decoder)
goto out_free;
@@ -1521,6 +1525,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
switch (ptq->switch_state) {
+ case INTEL_PT_SS_NOT_TRACING:
case INTEL_PT_SS_UNKNOWN:
case INTEL_PT_SS_EXPECTING_SWITCH_IP:
err = intel_pt_next_tid(pt, ptq);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 6ae97eda370b..89ac5b5dc218 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -415,16 +415,20 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp)
return fprintf(fp, "%s", dsoname);
}
+char *map__srcline(struct map *map, u64 addr, struct symbol *sym)
+{
+ if (map == NULL)
+ return SRCLINE_UNKNOWN;
+ return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr);
+}
+
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
FILE *fp)
{
- char *srcline;
int ret = 0;
if (map && map->dso) {
- srcline = get_srcline(map->dso,
- map__rip_2objdump(map, addr), NULL,
- true, true, addr);
+ char *srcline = map__srcline(map, addr, NULL);
if (srcline != SRCLINE_UNKNOWN)
ret = fprintf(fp, "%s%s", prefix, srcline);
free_srcline(srcline);
@@ -445,6 +449,20 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
*/
u64 map__rip_2objdump(struct map *map, u64 rip)
{
+ struct kmap *kmap = __map__kmap(map);
+
+ /*
+ * vmlinux does not have program headers for PTI entry trampolines and
+ * kcore may not either. However the trampoline object code is on the
+ * main kernel map, so just use that instead.
+ */
+ if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) {
+ struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine);
+
+ if (kernel_map)
+ map = kernel_map;
+ }
+
if (!map->dso->adjust_symbols)
return rip;
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 97e2a063bd65..4cb90f242bed 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -169,6 +169,7 @@ static inline void __map__zput(struct map **map)
int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *map, FILE *fp);
size_t map__fprintf_dsoname(struct map *map, FILE *fp);
+char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
FILE *fp);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index a1a01b1ac8b8..5f761f3ed0f3 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -53,7 +53,21 @@ static int str(yyscan_t scanner, int token)
YYSTYPE *yylval = parse_events_get_lval(scanner);
char *text = parse_events_get_text(scanner);
- yylval->str = strdup(text);
+ if (text[0] != '\'') {
+ yylval->str = strdup(text);
+ } else {
+ /*
+ * If a text tag specified on the command line
+ * contains opening single quite ' then it is
+ * expected that the tag ends with single quote
+ * as well, like this:
+ * name=\'CPU_CLK_UNHALTED.THREAD:cmask=1\'
+ * quotes need to be escaped to bypass shell
+ * processing.
+ */
+ yylval->str = strndup(&text[1], strlen(text) - 2);
+ }
+
return token;
}
@@ -176,6 +190,7 @@ num_dec [0-9]+
num_hex 0x[a-fA-F0-9]+
num_raw_hex [a-fA-F0-9]+
name [a-zA-Z_*?\[\]][a-zA-Z0-9_*?.\[\]]*
+name_tag [\'][a-zA-Z_*?\[\]][a-zA-Z0-9_*?\-,\.\[\]:=]*[\']
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
/* If you add a modifier you need to update check_modifier() */
@@ -344,6 +359,7 @@ r{num_raw_hex} { return raw(yyscanner); }
{bpf_object} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_OBJECT); }
{bpf_source} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_SOURCE); }
{name} { return pmu_str_check(yyscanner); }
+{name_tag} { return str(yyscanner, PE_NAME); }
"/" { BEGIN(config); return '/'; }
- { return '-'; }
, { BEGIN(event); return ','; }
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index e37608a87dba..155d2570274f 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -73,6 +73,7 @@ static void inc_group_count(struct list_head *list,
%type <num> value_sym
%type <head> event_config
%type <head> opt_event_config
+%type <head> opt_pmu_config
%type <term> event_term
%type <head> event_pmu
%type <head> event_legacy_symbol
@@ -224,7 +225,7 @@ event_def: event_pmu |
event_bpf_file
event_pmu:
-PE_NAME opt_event_config
+PE_NAME opt_pmu_config
{
struct list_head *list, *orig_terms, *terms;
@@ -496,6 +497,17 @@ opt_event_config:
$$ = NULL;
}
+opt_pmu_config:
+'/' event_config '/'
+{
+ $$ = $2;
+}
+|
+'/' '/'
+{
+ $$ = NULL;
+}
+
start_terms: event_config
{
struct parse_events_state *parse_state = _parse_state;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 3094f11e7d81..f119eb628dbb 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -165,8 +165,7 @@ static struct map *kernel_get_module_map(const char *module)
if (strncmp(pos->dso->short_name + 1, module,
pos->dso->short_name_len - 2) == 0 &&
module[pos->dso->short_name_len - 2] == '\0') {
- map__get(pos);
- return pos;
+ return map__get(pos);
}
}
return NULL;
diff --git a/tools/perf/util/quote.c b/tools/perf/util/quote.c
deleted file mode 100644
index 22eaa201aa27..000000000000
--- a/tools/perf/util/quote.c
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <errno.h>
-#include <stdlib.h>
-#include "strbuf.h"
-#include "quote.h"
-#include "util.h"
-
-/* Help to copy the thing properly quoted for the shell safety.
- * any single quote is replaced with '\'', any exclamation point
- * is replaced with '\!', and the whole thing is enclosed in a
- *
- * E.g.
- * original sq_quote result
- * name ==> name ==> 'name'
- * a b ==> a b ==> 'a b'
- * a'b ==> a'\''b ==> 'a'\''b'
- * a!b ==> a'\!'b ==> 'a'\!'b'
- */
-static inline int need_bs_quote(char c)
-{
- return (c == '\'' || c == '!');
-}
-
-static int sq_quote_buf(struct strbuf *dst, const char *src)
-{
- char *to_free = NULL;
- int ret;
-
- if (dst->buf == src)
- to_free = strbuf_detach(dst, NULL);
-
- ret = strbuf_addch(dst, '\'');
- while (!ret && *src) {
- size_t len = strcspn(src, "'!");
- ret = strbuf_add(dst, src, len);
- src += len;
- while (!ret && need_bs_quote(*src))
- ret = strbuf_addf(dst, "'\\%c\'", *src++);
- }
- if (!ret)
- ret = strbuf_addch(dst, '\'');
- free(to_free);
-
- return ret;
-}
-
-int sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
-{
- int i, ret;
-
- /* Copy into destination buffer. */
- ret = strbuf_grow(dst, 255);
- for (i = 0; !ret && argv[i]; ++i) {
- ret = strbuf_addch(dst, ' ');
- if (ret)
- break;
- ret = sq_quote_buf(dst, argv[i]);
- if (maxlen && dst->len > maxlen)
- return -ENOSPC;
- }
- return ret;
-}
diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h
deleted file mode 100644
index 274bf26d3511..000000000000
--- a/tools/perf/util/quote.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PERF_QUOTE_H
-#define __PERF_QUOTE_H
-
-#include <stddef.h>
-
-/* Help to copy the thing properly quoted for the shell safety.
- * any single quote is replaced with '\'', any exclamation point
- * is replaced with '\!', and the whole thing is enclosed in a
- * single quote pair.
- *
- * For example, if you are passing the result to system() as an
- * argument:
- *
- * sprintf(cmd, "foobar %s %s", sq_quote(arg0), sq_quote(arg1))
- *
- * would be appropriate. If the system() is going to call ssh to
- * run the command on the other side:
- *
- * sprintf(cmd, "git-diff-tree %s %s", sq_quote(arg0), sq_quote(arg1));
- * sprintf(rcmd, "ssh %s %s", sq_util/quote.host), sq_quote(cmd));
- *
- * Note that the above examples leak memory! Remember to free result from
- * sq_quote() in a real application.
- */
-
-struct strbuf;
-
-int sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
-
-#endif /* __PERF_QUOTE_H */
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 7f8afacd08ee..46e9e19ab1ac 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -48,6 +48,7 @@
#include "cpumap.h"
#include "print_binary.h"
#include "stat.h"
+#include "mem-events.h"
#if PY_MAJOR_VERSION < 3
#define _PyUnicode_FromString(arg) \
@@ -372,6 +373,19 @@ static PyObject *get_field_numeric_entry(struct event_format *event,
return obj;
}
+static const char *get_dsoname(struct map *map)
+{
+ const char *dsoname = "[unknown]";
+
+ if (map && map->dso) {
+ if (symbol_conf.show_kernel_path && map->dso->long_name)
+ dsoname = map->dso->long_name;
+ else
+ dsoname = map->dso->name;
+ }
+
+ return dsoname;
+}
static PyObject *python_process_callchain(struct perf_sample *sample,
struct perf_evsel *evsel,
@@ -427,14 +441,8 @@ static PyObject *python_process_callchain(struct perf_sample *sample,
}
if (node->map) {
- struct map *map = node->map;
- const char *dsoname = "[unknown]";
- if (map && map->dso) {
- if (symbol_conf.show_kernel_path && map->dso->long_name)
- dsoname = map->dso->long_name;
- else
- dsoname = map->dso->name;
- }
+ const char *dsoname = get_dsoname(node->map);
+
pydict_set_item_string_decref(pyelem, "dso",
_PyUnicode_FromString(dsoname));
}
@@ -448,6 +456,166 @@ exit:
return pylist;
}
+static PyObject *python_process_brstack(struct perf_sample *sample,
+ struct thread *thread)
+{
+ struct branch_stack *br = sample->branch_stack;
+ PyObject *pylist;
+ u64 i;
+
+ pylist = PyList_New(0);
+ if (!pylist)
+ Py_FatalError("couldn't create Python list");
+
+ if (!(br && br->nr))
+ goto exit;
+
+ for (i = 0; i < br->nr; i++) {
+ PyObject *pyelem;
+ struct addr_location al;
+ const char *dsoname;
+
+ pyelem = PyDict_New();
+ if (!pyelem)
+ Py_FatalError("couldn't create Python dictionary");
+
+ pydict_set_item_string_decref(pyelem, "from",
+ PyLong_FromUnsignedLongLong(br->entries[i].from));
+ pydict_set_item_string_decref(pyelem, "to",
+ PyLong_FromUnsignedLongLong(br->entries[i].to));
+ pydict_set_item_string_decref(pyelem, "mispred",
+ PyBool_FromLong(br->entries[i].flags.mispred));
+ pydict_set_item_string_decref(pyelem, "predicted",
+ PyBool_FromLong(br->entries[i].flags.predicted));
+ pydict_set_item_string_decref(pyelem, "in_tx",
+ PyBool_FromLong(br->entries[i].flags.in_tx));
+ pydict_set_item_string_decref(pyelem, "abort",
+ PyBool_FromLong(br->entries[i].flags.abort));
+ pydict_set_item_string_decref(pyelem, "cycles",
+ PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
+
+ thread__find_map(thread, sample->cpumode,
+ br->entries[i].from, &al);
+ dsoname = get_dsoname(al.map);
+ pydict_set_item_string_decref(pyelem, "from_dsoname",
+ _PyUnicode_FromString(dsoname));
+
+ thread__find_map(thread, sample->cpumode,
+ br->entries[i].to, &al);
+ dsoname = get_dsoname(al.map);
+ pydict_set_item_string_decref(pyelem, "to_dsoname",
+ _PyUnicode_FromString(dsoname));
+
+ PyList_Append(pylist, pyelem);
+ Py_DECREF(pyelem);
+ }
+
+exit:
+ return pylist;
+}
+
+static unsigned long get_offset(struct symbol *sym, struct addr_location *al)
+{
+ unsigned long offset;
+
+ if (al->addr < sym->end)
+ offset = al->addr - sym->start;
+ else
+ offset = al->addr - al->map->start - sym->start;
+
+ return offset;
+}
+
+static int get_symoff(struct symbol *sym, struct addr_location *al,
+ bool print_off, char *bf, int size)
+{
+ unsigned long offset;
+
+ if (!sym || !sym->name[0])
+ return scnprintf(bf, size, "%s", "[unknown]");
+
+ if (!print_off)
+ return scnprintf(bf, size, "%s", sym->name);
+
+ offset = get_offset(sym, al);
+
+ return scnprintf(bf, size, "%s+0x%x", sym->name, offset);
+}
+
+static int get_br_mspred(struct branch_flags *flags, char *bf, int size)
+{
+ if (!flags->mispred && !flags->predicted)
+ return scnprintf(bf, size, "%s", "-");
+
+ if (flags->mispred)
+ return scnprintf(bf, size, "%s", "M");
+
+ return scnprintf(bf, size, "%s", "P");
+}
+
+static PyObject *python_process_brstacksym(struct perf_sample *sample,
+ struct thread *thread)
+{
+ struct branch_stack *br = sample->branch_stack;
+ PyObject *pylist;
+ u64 i;
+ char bf[512];
+ struct addr_location al;
+
+ pylist = PyList_New(0);
+ if (!pylist)
+ Py_FatalError("couldn't create Python list");
+
+ if (!(br && br->nr))
+ goto exit;
+
+ for (i = 0; i < br->nr; i++) {
+ PyObject *pyelem;
+
+ pyelem = PyDict_New();
+ if (!pyelem)
+ Py_FatalError("couldn't create Python dictionary");
+
+ thread__find_symbol(thread, sample->cpumode,
+ br->entries[i].from, &al);
+ get_symoff(al.sym, &al, true, bf, sizeof(bf));
+ pydict_set_item_string_decref(pyelem, "from",
+ _PyUnicode_FromString(bf));
+
+ thread__find_symbol(thread, sample->cpumode,
+ br->entries[i].to, &al);
+ get_symoff(al.sym, &al, true, bf, sizeof(bf));
+ pydict_set_item_string_decref(pyelem, "to",
+ _PyUnicode_FromString(bf));
+
+ get_br_mspred(&br->entries[i].flags, bf, sizeof(bf));
+ pydict_set_item_string_decref(pyelem, "pred",
+ _PyUnicode_FromString(bf));
+
+ if (br->entries[i].flags.in_tx) {
+ pydict_set_item_string_decref(pyelem, "in_tx",
+ _PyUnicode_FromString("X"));
+ } else {
+ pydict_set_item_string_decref(pyelem, "in_tx",
+ _PyUnicode_FromString("-"));
+ }
+
+ if (br->entries[i].flags.abort) {
+ pydict_set_item_string_decref(pyelem, "abort",
+ _PyUnicode_FromString("A"));
+ } else {
+ pydict_set_item_string_decref(pyelem, "abort",
+ _PyUnicode_FromString("-"));
+ }
+
+ PyList_Append(pylist, pyelem);
+ Py_DECREF(pyelem);
+ }
+
+exit:
+ return pylist;
+}
+
static PyObject *get_sample_value_as_tuple(struct sample_read_value *value)
{
PyObject *t;
@@ -498,12 +666,63 @@ static void set_sample_read_in_dict(PyObject *dict_sample,
pydict_set_item_string_decref(dict_sample, "values", values);
}
+static void set_sample_datasrc_in_dict(PyObject *dict,
+ struct perf_sample *sample)
+{
+ struct mem_info mi = { .data_src.val = sample->data_src };
+ char decode[100];
+
+ pydict_set_item_string_decref(dict, "datasrc",
+ PyLong_FromUnsignedLongLong(sample->data_src));
+
+ perf_script__meminfo_scnprintf(decode, 100, &mi);
+
+ pydict_set_item_string_decref(dict, "datasrc_decode",
+ _PyUnicode_FromString(decode));
+}
+
+static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size)
+{
+ unsigned int i = 0, r;
+ int printed = 0;
+
+ bf[0] = 0;
+
+ for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
+ u64 val = regs->regs[i++];
+
+ printed += scnprintf(bf + printed, size - printed,
+ "%5s:0x%" PRIx64 " ",
+ perf_reg_name(r), val);
+ }
+
+ return printed;
+}
+
+static void set_regs_in_dict(PyObject *dict,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel)
+{
+ struct perf_event_attr *attr = &evsel->attr;
+ char bf[512];
+
+ regs_map(&sample->intr_regs, attr->sample_regs_intr, bf, sizeof(bf));
+
+ pydict_set_item_string_decref(dict, "iregs",
+ _PyUnicode_FromString(bf));
+
+ regs_map(&sample->user_regs, attr->sample_regs_user, bf, sizeof(bf));
+
+ pydict_set_item_string_decref(dict, "uregs",
+ _PyUnicode_FromString(bf));
+}
+
static PyObject *get_perf_sample_dict(struct perf_sample *sample,
struct perf_evsel *evsel,
struct addr_location *al,
PyObject *callchain)
{
- PyObject *dict, *dict_sample;
+ PyObject *dict, *dict_sample, *brstack, *brstacksym;
dict = PyDict_New();
if (!dict)
@@ -534,6 +753,11 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
pydict_set_item_string_decref(dict_sample, "addr",
PyLong_FromUnsignedLongLong(sample->addr));
set_sample_read_in_dict(dict_sample, sample, evsel);
+ pydict_set_item_string_decref(dict_sample, "weight",
+ PyLong_FromUnsignedLongLong(sample->weight));
+ pydict_set_item_string_decref(dict_sample, "transaction",
+ PyLong_FromUnsignedLongLong(sample->transaction));
+ set_sample_datasrc_in_dict(dict_sample, sample);
pydict_set_item_string_decref(dict, "sample", dict_sample);
pydict_set_item_string_decref(dict, "raw_buf", _PyBytes_FromStringAndSize(
@@ -551,6 +775,14 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
pydict_set_item_string_decref(dict, "callchain", callchain);
+ brstack = python_process_brstack(sample, al->thread);
+ pydict_set_item_string_decref(dict, "brstack", brstack);
+
+ brstacksym = python_process_brstacksym(sample, al->thread);
+ pydict_set_item_string_decref(dict, "brstacksym", brstacksym);
+
+ set_regs_in_dict(dict, sample, evsel);
+
return dict;
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index b998bb475589..8b9369303561 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1094,7 +1094,7 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
sample_type = evsel->attr.sample_type;
- if (sample_type & PERF_SAMPLE_CALLCHAIN)
+ if (evsel__has_callchain(evsel))
callchain__printf(evsel, sample);
if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 4058ade352a5..fed2952ab45a 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -331,24 +331,18 @@ struct sort_entry sort_sym = {
/* --sort srcline */
-char *hist_entry__get_srcline(struct hist_entry *he)
+char *hist_entry__srcline(struct hist_entry *he)
{
- struct map *map = he->ms.map;
-
- if (!map)
- return SRCLINE_UNKNOWN;
-
- return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
- he->ms.sym, true, true, he->ip);
+ return map__srcline(he->ms.map, he->ip, he->ms.sym);
}
static int64_t
sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->srcline)
- left->srcline = hist_entry__get_srcline(left);
+ left->srcline = hist_entry__srcline(left);
if (!right->srcline)
- right->srcline = hist_entry__get_srcline(right);
+ right->srcline = hist_entry__srcline(right);
return strcmp(right->srcline, left->srcline);
}
@@ -357,7 +351,7 @@ static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (!he->srcline)
- he->srcline = hist_entry__get_srcline(he);
+ he->srcline = hist_entry__srcline(he);
return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
}
@@ -371,33 +365,20 @@ struct sort_entry sort_srcline = {
/* --sort srcline_from */
+static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
+{
+ return map__srcline(ams->map, ams->al_addr, ams->sym);
+}
+
static int64_t
sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
- if (!left->branch_info->srcline_from) {
- struct map *map = left->branch_info->from.map;
- if (!map)
- left->branch_info->srcline_from = SRCLINE_UNKNOWN;
- else
- left->branch_info->srcline_from = get_srcline(map->dso,
- map__rip_2objdump(map,
- left->branch_info->from.al_addr),
- left->branch_info->from.sym,
- true, true,
- left->branch_info->from.al_addr);
- }
- if (!right->branch_info->srcline_from) {
- struct map *map = right->branch_info->from.map;
- if (!map)
- right->branch_info->srcline_from = SRCLINE_UNKNOWN;
- else
- right->branch_info->srcline_from = get_srcline(map->dso,
- map__rip_2objdump(map,
- right->branch_info->from.al_addr),
- right->branch_info->from.sym,
- true, true,
- right->branch_info->from.al_addr);
- }
+ if (!left->branch_info->srcline_from)
+ left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
+
+ if (!right->branch_info->srcline_from)
+ right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
+
return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
}
@@ -419,30 +400,12 @@ struct sort_entry sort_srcline_from = {
static int64_t
sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
- if (!left->branch_info->srcline_to) {
- struct map *map = left->branch_info->to.map;
- if (!map)
- left->branch_info->srcline_to = SRCLINE_UNKNOWN;
- else
- left->branch_info->srcline_to = get_srcline(map->dso,
- map__rip_2objdump(map,
- left->branch_info->to.al_addr),
- left->branch_info->from.sym,
- true, true,
- left->branch_info->to.al_addr);
- }
- if (!right->branch_info->srcline_to) {
- struct map *map = right->branch_info->to.map;
- if (!map)
- right->branch_info->srcline_to = SRCLINE_UNKNOWN;
- else
- right->branch_info->srcline_to = get_srcline(map->dso,
- map__rip_2objdump(map,
- right->branch_info->to.al_addr),
- right->branch_info->to.sym,
- true, true,
- right->branch_info->to.al_addr);
- }
+ if (!left->branch_info->srcline_to)
+ left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
+
+ if (!right->branch_info->srcline_to)
+ right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
+
return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 9e6896293bbd..7cf2d5cc038e 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -151,6 +151,11 @@ struct hist_entry {
struct callchain_root callchain[0]; /* must be last member */
};
+static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
+{
+ return hists__has_callchains(he->hists);
+}
+
static inline bool hist_entry__has_pairs(struct hist_entry *he)
{
return !list_empty(&he->pairs.node);
@@ -292,5 +297,5 @@ int64_t
sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right);
int64_t
sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right);
-char *hist_entry__get_srcline(struct hist_entry *he);
+char *hist_entry__srcline(struct hist_entry *he);
#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 8c84437f2a10..d188b7588152 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -40,7 +40,6 @@ char **vmlinux_path;
struct symbol_conf symbol_conf = {
.use_modules = true,
.try_vmlinux_path = true,
- .annotate_src = true,
.demangle = true,
.demangle_kernel = false,
.cumulate_callchain = true,
@@ -74,7 +73,7 @@ static enum dso_binary_type binary_type_symtab[] = {
static bool symbol_type__filter(char symbol_type)
{
symbol_type = toupper(symbol_type);
- return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D';
+ return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
}
static int prefix_underscores_count(const char *str)
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 1a16438eb3ce..f25fae4b5743 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -90,7 +90,6 @@ struct intlist;
struct symbol_conf {
unsigned short priv_size;
- unsigned short nr_events;
bool try_vmlinux_path,
init_annotation,
force,
@@ -109,8 +108,6 @@ struct symbol_conf {
show_cpu_utilization,
initialized,
kptr_restrict,
- annotate_asm_raw,
- annotate_src,
event_group,
demangle,
demangle_kernel,
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 9892323cdd7c..9add1f72ce95 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -3,6 +3,7 @@
#define __PERF_TOP_H 1
#include "tool.h"
+#include "annotate.h"
#include <linux/types.h>
#include <stddef.h>
#include <stdbool.h>
@@ -16,6 +17,7 @@ struct perf_top {
struct perf_tool tool;
struct perf_evlist *evlist;
struct record_opts record_opts;
+ struct annotation_options annotation_opts;
/*
* Symbols will be added here in perf_event__process_sample and will
* get out after decayed.
@@ -35,7 +37,6 @@ struct perf_top {
struct perf_session *session;
struct winsize winsize;
int realtime_prio;
- int sym_pcnt_filter;
const char *sym_filter;
float min_percent;
unsigned int nr_threads_synthesize;
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 4ea385be528f..a8fb63edcf89 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -29,6 +29,8 @@
#include "nfit_test.h"
#include "../watermark.h"
+#include <asm/mcsafe_test.h>
+
/*
* Generate an NFIT table to describe the following topology:
*
@@ -2681,6 +2683,107 @@ static struct platform_driver nfit_test_driver = {
.id_table = nfit_test_id,
};
+static char mcsafe_buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+
+enum INJECT {
+ INJECT_NONE,
+ INJECT_SRC,
+ INJECT_DST,
+};
+
+static void mcsafe_test_init(char *dst, char *src, size_t size)
+{
+ size_t i;
+
+ memset(dst, 0xff, size);
+ for (i = 0; i < size; i++)
+ src[i] = (char) i;
+}
+
+static bool mcsafe_test_validate(unsigned char *dst, unsigned char *src,
+ size_t size, unsigned long rem)
+{
+ size_t i;
+
+ for (i = 0; i < size - rem; i++)
+ if (dst[i] != (unsigned char) i) {
+ pr_info_once("%s:%d: offset: %zd got: %#x expect: %#x\n",
+ __func__, __LINE__, i, dst[i],
+ (unsigned char) i);
+ return false;
+ }
+ for (i = size - rem; i < size; i++)
+ if (dst[i] != 0xffU) {
+ pr_info_once("%s:%d: offset: %zd got: %#x expect: 0xff\n",
+ __func__, __LINE__, i, dst[i]);
+ return false;
+ }
+ return true;
+}
+
+void mcsafe_test(void)
+{
+ char *inject_desc[] = { "none", "source", "destination" };
+ enum INJECT inj;
+
+ if (IS_ENABLED(CONFIG_MCSAFE_TEST)) {
+ pr_info("%s: run...\n", __func__);
+ } else {
+ pr_info("%s: disabled, skip.\n", __func__);
+ return;
+ }
+
+ for (inj = INJECT_NONE; inj <= INJECT_DST; inj++) {
+ int i;
+
+ pr_info("%s: inject: %s\n", __func__, inject_desc[inj]);
+ for (i = 0; i < 512; i++) {
+ unsigned long expect, rem;
+ void *src, *dst;
+ bool valid;
+
+ switch (inj) {
+ case INJECT_NONE:
+ mcsafe_inject_src(NULL);
+ mcsafe_inject_dst(NULL);
+ dst = &mcsafe_buf[2048];
+ src = &mcsafe_buf[1024 - i];
+ expect = 0;
+ break;
+ case INJECT_SRC:
+ mcsafe_inject_src(&mcsafe_buf[1024]);
+ mcsafe_inject_dst(NULL);
+ dst = &mcsafe_buf[2048];
+ src = &mcsafe_buf[1024 - i];
+ expect = 512 - i;
+ break;
+ case INJECT_DST:
+ mcsafe_inject_src(NULL);
+ mcsafe_inject_dst(&mcsafe_buf[2048]);
+ dst = &mcsafe_buf[2048 - i];
+ src = &mcsafe_buf[1024];
+ expect = 512 - i;
+ break;
+ }
+
+ mcsafe_test_init(dst, src, 512);
+ rem = __memcpy_mcsafe(dst, src, 512);
+ valid = mcsafe_test_validate(dst, src, 512, expect);
+ if (rem == expect && valid)
+ continue;
+ pr_info("%s: copy(%#lx, %#lx, %d) off: %d rem: %ld %s expect: %ld\n",
+ __func__,
+ ((unsigned long) dst) & ~PAGE_MASK,
+ ((unsigned long ) src) & ~PAGE_MASK,
+ 512, i, rem, valid ? "valid" : "bad",
+ expect);
+ }
+ }
+
+ mcsafe_inject_src(NULL);
+ mcsafe_inject_dst(NULL);
+}
+
static __init int nfit_test_init(void)
{
int rc, i;
@@ -2689,6 +2792,7 @@ static __init int nfit_test_init(void)
libnvdimm_test();
acpi_nfit_test();
device_dax_test();
+ mcsafe_test();
nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 305130de910c..f1fe492c8e17 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -29,10 +29,12 @@ TARGETS += powerpc
TARGETS += proc
TARGETS += pstore
TARGETS += ptrace
+TARGETS += rseq
TARGETS += rtc
TARGETS += seccomp
TARGETS += sigaltstack
TARGETS += size
+TARGETS += sparc64
TARGETS += splice
TARGETS += static_keys
TARGETS += sync
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 6466294366dc..17ab36605a8e 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -133,6 +133,9 @@ COMPILE.S = $(CC) $(ASFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c
LINK.S = $(CC) $(ASFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH)
endif
+# Selftest makefiles can override those targets by setting
+# OVERRIDE_TARGETS = 1.
+ifeq ($(OVERRIDE_TARGETS),)
$(OUTPUT)/%:%.c
$(LINK.c) $^ $(LDLIBS) -o $@
@@ -141,5 +144,6 @@ $(OUTPUT)/%.o:%.S
$(OUTPUT)/%:%.S
$(LINK.S) $^ $(LDLIBS) -o $@
+endif
.PHONY: run_tests all clean install emit_tests
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 6c16f77c722c..74e5912e9f2e 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -1,3 +1,6 @@
+/fd-001-lookup
+/fd-002-posix-eq
+/fd-003-kthread
/proc-loadavg-001
/proc-self-map-files-001
/proc-self-map-files-002
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index dbb87e56264c..db310eedc268 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -1,6 +1,9 @@
-CFLAGS += -Wall -O2
+CFLAGS += -Wall -O2 -Wno-unused-function
TEST_GEN_PROGS :=
+TEST_GEN_PROGS += fd-001-lookup
+TEST_GEN_PROGS += fd-002-posix-eq
+TEST_GEN_PROGS += fd-003-kthread
TEST_GEN_PROGS += proc-loadavg-001
TEST_GEN_PROGS += proc-self-map-files-001
TEST_GEN_PROGS += proc-self-map-files-002
diff --git a/tools/testing/selftests/proc/fd-001-lookup.c b/tools/testing/selftests/proc/fd-001-lookup.c
new file mode 100644
index 000000000000..a2010dfb2110
--- /dev/null
+++ b/tools/testing/selftests/proc/fd-001-lookup.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+// Test /proc/*/fd lookup.
+#define _GNU_SOURCE
+#undef NDEBUG
+#include <assert.h>
+#include <dirent.h>
+#include <errno.h>
+#include <limits.h>
+#include <sched.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "proc.h"
+
+/* lstat(2) has more "coverage" in case non-symlink pops up somehow. */
+static void test_lookup_pass(const char *pathname)
+{
+ struct stat st;
+ ssize_t rv;
+
+ memset(&st, 0, sizeof(struct stat));
+ rv = lstat(pathname, &st);
+ assert(rv == 0);
+ assert(S_ISLNK(st.st_mode));
+}
+
+static void test_lookup_fail(const char *pathname)
+{
+ struct stat st;
+ ssize_t rv;
+
+ rv = lstat(pathname, &st);
+ assert(rv == -1 && errno == ENOENT);
+}
+
+static void test_lookup(unsigned int fd)
+{
+ char buf[64];
+ unsigned int c;
+ unsigned int u;
+ int i;
+
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%u", fd);
+ test_lookup_pass(buf);
+
+ /* leading junk */
+ for (c = 1; c <= 255; c++) {
+ if (c == '/')
+ continue;
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%c%u", c, fd);
+ test_lookup_fail(buf);
+ }
+
+ /* trailing junk */
+ for (c = 1; c <= 255; c++) {
+ if (c == '/')
+ continue;
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%u%c", fd, c);
+ test_lookup_fail(buf);
+ }
+
+ for (i = INT_MIN; i < INT_MIN + 1024; i++) {
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", i);
+ test_lookup_fail(buf);
+ }
+ for (i = -1024; i < 0; i++) {
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", i);
+ test_lookup_fail(buf);
+ }
+ for (u = INT_MAX - 1024; u <= (unsigned int)INT_MAX + 1024; u++) {
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%u", u);
+ test_lookup_fail(buf);
+ }
+ for (u = UINT_MAX - 1024; u != 0; u++) {
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%u", u);
+ test_lookup_fail(buf);
+ }
+
+
+}
+
+int main(void)
+{
+ struct dirent *de;
+ unsigned int fd, target_fd;
+
+ if (unshare(CLONE_FILES) == -1)
+ return 1;
+
+ /* Wipe fdtable. */
+ do {
+ DIR *d;
+
+ d = opendir("/proc/self/fd");
+ if (!d)
+ return 1;
+
+ de = xreaddir(d);
+ assert(de->d_type == DT_DIR);
+ assert(streq(de->d_name, "."));
+
+ de = xreaddir(d);
+ assert(de->d_type == DT_DIR);
+ assert(streq(de->d_name, ".."));
+next:
+ de = xreaddir(d);
+ if (de) {
+ unsigned long long fd_ull;
+ unsigned int fd;
+ char *end;
+
+ assert(de->d_type == DT_LNK);
+
+ fd_ull = xstrtoull(de->d_name, &end);
+ assert(*end == '\0');
+ assert(fd_ull == (unsigned int)fd_ull);
+
+ fd = fd_ull;
+ if (fd == dirfd(d))
+ goto next;
+ close(fd);
+ }
+
+ closedir(d);
+ } while (de);
+
+ /* Now fdtable is clean. */
+
+ fd = open("/", O_PATH|O_DIRECTORY);
+ assert(fd == 0);
+ test_lookup(fd);
+ close(fd);
+
+ /* Clean again! */
+
+ fd = open("/", O_PATH|O_DIRECTORY);
+ assert(fd == 0);
+ /* Default RLIMIT_NOFILE-1 */
+ target_fd = 1023;
+ while (target_fd > 0) {
+ if (dup2(fd, target_fd) == target_fd)
+ break;
+ target_fd /= 2;
+ }
+ assert(target_fd > 0);
+ close(fd);
+ test_lookup(target_fd);
+ close(target_fd);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/proc/fd-002-posix-eq.c b/tools/testing/selftests/proc/fd-002-posix-eq.c
new file mode 100644
index 000000000000..417322ca9c53
--- /dev/null
+++ b/tools/testing/selftests/proc/fd-002-posix-eq.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+// Test that open(/proc/*/fd/*) opens the same file.
+#undef NDEBUG
+#include <assert.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+int main(void)
+{
+ int fd0, fd1, fd2;
+ struct stat st0, st1, st2;
+ char buf[64];
+ int rv;
+
+ fd0 = open("/", O_DIRECTORY|O_RDONLY);
+ assert(fd0 >= 0);
+
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%u", fd0);
+ fd1 = open(buf, O_RDONLY);
+ assert(fd1 >= 0);
+
+ snprintf(buf, sizeof(buf), "/proc/thread-self/fd/%u", fd0);
+ fd2 = open(buf, O_RDONLY);
+ assert(fd2 >= 0);
+
+ rv = fstat(fd0, &st0);
+ assert(rv == 0);
+ rv = fstat(fd1, &st1);
+ assert(rv == 0);
+ rv = fstat(fd2, &st2);
+ assert(rv == 0);
+
+ assert(st0.st_dev == st1.st_dev);
+ assert(st0.st_ino == st1.st_ino);
+
+ assert(st0.st_dev == st2.st_dev);
+ assert(st0.st_ino == st2.st_ino);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/proc/fd-003-kthread.c b/tools/testing/selftests/proc/fd-003-kthread.c
new file mode 100644
index 000000000000..1d659d55368c
--- /dev/null
+++ b/tools/testing/selftests/proc/fd-003-kthread.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+// Test that /proc/$KERNEL_THREAD/fd/ is empty.
+#define _GNU_SOURCE
+#undef NDEBUG
+#include <sys/syscall.h>
+#include <assert.h>
+#include <dirent.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "proc.h"
+
+#define PF_KHTREAD 0x00200000
+
+/*
+ * Test for kernel threadness atomically with openat().
+ *
+ * Return /proc/$PID/fd descriptor if process is kernel thread.
+ * Return -1 if a process is userspace process.
+ */
+static int kernel_thread_fd(unsigned int pid)
+{
+ unsigned int flags = 0;
+ char buf[4096];
+ int dir_fd, fd;
+ ssize_t rv;
+
+ snprintf(buf, sizeof(buf), "/proc/%u", pid);
+ dir_fd = open(buf, O_RDONLY|O_DIRECTORY);
+ if (dir_fd == -1)
+ return -1;
+
+ /*
+ * Believe it or not, struct task_struct::flags is directly exposed
+ * to userspace!
+ */
+ fd = openat(dir_fd, "stat", O_RDONLY);
+ if (fd == -1) {
+ close(dir_fd);
+ return -1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (0 < rv && rv <= sizeof(buf)) {
+ unsigned long long flags_ull;
+ char *p, *end;
+ int i;
+
+ assert(buf[rv - 1] == '\n');
+ buf[rv - 1] = '\0';
+
+ /* Search backwards: ->comm can contain whitespace and ')'. */
+ for (i = 0; i < 43; i++) {
+ p = strrchr(buf, ' ');
+ assert(p);
+ *p = '\0';
+ }
+
+ p = strrchr(buf, ' ');
+ assert(p);
+
+ flags_ull = xstrtoull(p + 1, &end);
+ assert(*end == '\0');
+ assert(flags_ull == (unsigned int)flags_ull);
+
+ flags = flags_ull;
+ }
+
+ fd = -1;
+ if (flags & PF_KHTREAD) {
+ fd = openat(dir_fd, "fd", O_RDONLY|O_DIRECTORY);
+ }
+ close(dir_fd);
+ return fd;
+}
+
+static void test_readdir(int fd)
+{
+ DIR *d;
+ struct dirent *de;
+
+ d = fdopendir(fd);
+ assert(d);
+
+ de = xreaddir(d);
+ assert(streq(de->d_name, "."));
+ assert(de->d_type == DT_DIR);
+
+ de = xreaddir(d);
+ assert(streq(de->d_name, ".."));
+ assert(de->d_type == DT_DIR);
+
+ de = xreaddir(d);
+ assert(!de);
+}
+
+static inline int sys_statx(int dirfd, const char *pathname, int flags,
+ unsigned int mask, void *stx)
+{
+ return syscall(SYS_statx, dirfd, pathname, flags, mask, stx);
+}
+
+static void test_lookup_fail(int fd, const char *pathname)
+{
+ char stx[256] __attribute__((aligned(8)));
+ int rv;
+
+ rv = sys_statx(fd, pathname, AT_SYMLINK_NOFOLLOW, 0, (void *)stx);
+ assert(rv == -1 && errno == ENOENT);
+}
+
+static void test_lookup(int fd)
+{
+ char buf[64];
+ unsigned int u;
+ int i;
+
+ for (i = INT_MIN; i < INT_MIN + 1024; i++) {
+ snprintf(buf, sizeof(buf), "%d", i);
+ test_lookup_fail(fd, buf);
+ }
+ for (i = -1024; i < 1024; i++) {
+ snprintf(buf, sizeof(buf), "%d", i);
+ test_lookup_fail(fd, buf);
+ }
+ for (u = INT_MAX - 1024; u < (unsigned int)INT_MAX + 1024; u++) {
+ snprintf(buf, sizeof(buf), "%u", u);
+ test_lookup_fail(fd, buf);
+ }
+ for (u = UINT_MAX - 1024; u != 0; u++) {
+ snprintf(buf, sizeof(buf), "%u", u);
+ test_lookup_fail(fd, buf);
+ }
+}
+
+int main(void)
+{
+ unsigned int pid;
+ int fd;
+
+ /*
+ * In theory this will loop indefinitely if kernel threads are exiled
+ * from /proc.
+ *
+ * Start with kthreadd.
+ */
+ pid = 2;
+ while ((fd = kernel_thread_fd(pid)) == -1 && pid < 1024) {
+ pid++;
+ }
+ /* EACCES if run as non-root. */
+ if (pid >= 1024)
+ return 1;
+
+ test_readdir(fd);
+ test_lookup(fd);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/proc/proc-uptime.h b/tools/testing/selftests/proc/proc-uptime.h
index 0e464b50e9d9..dc6a42b1d6b0 100644
--- a/tools/testing/selftests/proc/proc-uptime.h
+++ b/tools/testing/selftests/proc/proc-uptime.h
@@ -20,21 +20,7 @@
#include <stdlib.h>
#include <unistd.h>
-static unsigned long long xstrtoull(const char *p, char **end)
-{
- if (*p == '0') {
- *end = (char *)p + 1;
- return 0;
- } else if ('1' <= *p && *p <= '9') {
- unsigned long long val;
-
- errno = 0;
- val = strtoull(p, end, 10);
- assert(errno == 0);
- return val;
- } else
- assert(0);
-}
+#include "proc.h"
static void proc_uptime(int fd, uint64_t *uptime, uint64_t *idle)
{
diff --git a/tools/testing/selftests/proc/proc.h b/tools/testing/selftests/proc/proc.h
new file mode 100644
index 000000000000..4e178166fd84
--- /dev/null
+++ b/tools/testing/selftests/proc/proc.h
@@ -0,0 +1,39 @@
+#pragma once
+#undef NDEBUG
+#include <assert.h>
+#include <dirent.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+
+static inline bool streq(const char *s1, const char *s2)
+{
+ return strcmp(s1, s2) == 0;
+}
+
+static unsigned long long xstrtoull(const char *p, char **end)
+{
+ if (*p == '0') {
+ *end = (char *)p + 1;
+ return 0;
+ } else if ('1' <= *p && *p <= '9') {
+ unsigned long long val;
+
+ errno = 0;
+ val = strtoull(p, end, 10);
+ assert(errno == 0);
+ return val;
+ } else
+ assert(0);
+}
+
+static struct dirent *xreaddir(DIR *d)
+{
+ struct dirent *de;
+
+ errno = 0;
+ de = readdir(d);
+ assert(de || errno == 0);
+ return de;
+}
diff --git a/tools/testing/selftests/proc/read.c b/tools/testing/selftests/proc/read.c
index 1e73c2232097..563e752e6eba 100644
--- a/tools/testing/selftests/proc/read.c
+++ b/tools/testing/selftests/proc/read.c
@@ -31,22 +31,7 @@
#include <fcntl.h>
#include <unistd.h>
-static inline bool streq(const char *s1, const char *s2)
-{
- return strcmp(s1, s2) == 0;
-}
-
-static struct dirent *xreaddir(DIR *d)
-{
- struct dirent *de;
-
- errno = 0;
- de = readdir(d);
- if (!de && errno != 0) {
- exit(1);
- }
- return de;
-}
+#include "proc.h"
static void f_reg(DIR *d, const char *filename)
{
diff --git a/tools/testing/selftests/rseq/.gitignore b/tools/testing/selftests/rseq/.gitignore
new file mode 100644
index 000000000000..cc610da7e369
--- /dev/null
+++ b/tools/testing/selftests/rseq/.gitignore
@@ -0,0 +1,6 @@
+basic_percpu_ops_test
+basic_test
+basic_rseq_op_test
+param_test
+param_test_benchmark
+param_test_compare_twice
diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
new file mode 100644
index 000000000000..c30c52e1d0d2
--- /dev/null
+++ b/tools/testing/selftests/rseq/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0+ OR MIT
+CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./
+LDLIBS += -lpthread
+
+# Own dependencies because we only want to build against 1st prerequisite, but
+# still track changes to header files and depend on shared object.
+OVERRIDE_TARGETS = 1
+
+TEST_GEN_PROGS = basic_test basic_percpu_ops_test param_test \
+ param_test_benchmark param_test_compare_twice
+
+TEST_GEN_PROGS_EXTENDED = librseq.so
+
+TEST_PROGS = run_param_test.sh
+
+include ../lib.mk
+
+$(OUTPUT)/librseq.so: rseq.c rseq.h rseq-*.h
+ $(CC) $(CFLAGS) -shared -fPIC $< $(LDLIBS) -o $@
+
+$(OUTPUT)/%: %.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h
+ $(CC) $(CFLAGS) $< $(LDLIBS) -lrseq -o $@
+
+$(OUTPUT)/param_test_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
+ rseq.h rseq-*.h
+ $(CC) $(CFLAGS) -DBENCHMARK $< $(LDLIBS) -lrseq -o $@
+
+$(OUTPUT)/param_test_compare_twice: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
+ rseq.h rseq-*.h
+ $(CC) $(CFLAGS) -DRSEQ_COMPARE_TWICE $< $(LDLIBS) -lrseq -o $@
diff --git a/tools/testing/selftests/rseq/basic_percpu_ops_test.c b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
new file mode 100644
index 000000000000..eb3f6db36d36
--- /dev/null
+++ b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: LGPL-2.1
+#define _GNU_SOURCE
+#include <assert.h>
+#include <pthread.h>
+#include <sched.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stddef.h>
+
+#include "rseq.h"
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+struct percpu_lock_entry {
+ intptr_t v;
+} __attribute__((aligned(128)));
+
+struct percpu_lock {
+ struct percpu_lock_entry c[CPU_SETSIZE];
+};
+
+struct test_data_entry {
+ intptr_t count;
+} __attribute__((aligned(128)));
+
+struct spinlock_test_data {
+ struct percpu_lock lock;
+ struct test_data_entry c[CPU_SETSIZE];
+ int reps;
+};
+
+struct percpu_list_node {
+ intptr_t data;
+ struct percpu_list_node *next;
+};
+
+struct percpu_list_entry {
+ struct percpu_list_node *head;
+} __attribute__((aligned(128)));
+
+struct percpu_list {
+ struct percpu_list_entry c[CPU_SETSIZE];
+};
+
+/* A simple percpu spinlock. Returns the cpu lock was acquired on. */
+int rseq_this_cpu_lock(struct percpu_lock *lock)
+{
+ int cpu;
+
+ for (;;) {
+ int ret;
+
+ cpu = rseq_cpu_start();
+ ret = rseq_cmpeqv_storev(&lock->c[cpu].v,
+ 0, 1, cpu);
+ if (rseq_likely(!ret))
+ break;
+ /* Retry if comparison fails or rseq aborts. */
+ }
+ /*
+ * Acquire semantic when taking lock after control dependency.
+ * Matches rseq_smp_store_release().
+ */
+ rseq_smp_acquire__after_ctrl_dep();
+ return cpu;
+}
+
+void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
+{
+ assert(lock->c[cpu].v == 1);
+ /*
+ * Release lock, with release semantic. Matches
+ * rseq_smp_acquire__after_ctrl_dep().
+ */
+ rseq_smp_store_release(&lock->c[cpu].v, 0);
+}
+
+void *test_percpu_spinlock_thread(void *arg)
+{
+ struct spinlock_test_data *data = arg;
+ int i, cpu;
+
+ if (rseq_register_current_thread()) {
+ fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ abort();
+ }
+ for (i = 0; i < data->reps; i++) {
+ cpu = rseq_this_cpu_lock(&data->lock);
+ data->c[cpu].count++;
+ rseq_percpu_unlock(&data->lock, cpu);
+ }
+ if (rseq_unregister_current_thread()) {
+ fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ abort();
+ }
+
+ return NULL;
+}
+
+/*
+ * A simple test which implements a sharded counter using a per-cpu
+ * lock. Obviously real applications might prefer to simply use a
+ * per-cpu increment; however, this is reasonable for a test and the
+ * lock can be extended to synchronize more complicated operations.
+ */
+void test_percpu_spinlock(void)
+{
+ const int num_threads = 200;
+ int i;
+ uint64_t sum;
+ pthread_t test_threads[num_threads];
+ struct spinlock_test_data data;
+
+ memset(&data, 0, sizeof(data));
+ data.reps = 5000;
+
+ for (i = 0; i < num_threads; i++)
+ pthread_create(&test_threads[i], NULL,
+ test_percpu_spinlock_thread, &data);
+
+ for (i = 0; i < num_threads; i++)
+ pthread_join(test_threads[i], NULL);
+
+ sum = 0;
+ for (i = 0; i < CPU_SETSIZE; i++)
+ sum += data.c[i].count;
+
+ assert(sum == (uint64_t)data.reps * num_threads);
+}
+
+void this_cpu_list_push(struct percpu_list *list,
+ struct percpu_list_node *node,
+ int *_cpu)
+{
+ int cpu;
+
+ for (;;) {
+ intptr_t *targetptr, newval, expect;
+ int ret;
+
+ cpu = rseq_cpu_start();
+ /* Load list->c[cpu].head with single-copy atomicity. */
+ expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
+ newval = (intptr_t)node;
+ targetptr = (intptr_t *)&list->c[cpu].head;
+ node->next = (struct percpu_list_node *)expect;
+ ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu);
+ if (rseq_likely(!ret))
+ break;
+ /* Retry if comparison fails or rseq aborts. */
+ }
+ if (_cpu)
+ *_cpu = cpu;
+}
+
+/*
+ * Unlike a traditional lock-less linked list; the availability of a
+ * rseq primitive allows us to implement pop without concerns over
+ * ABA-type races.
+ */
+struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
+ int *_cpu)
+{
+ for (;;) {
+ struct percpu_list_node *head;
+ intptr_t *targetptr, expectnot, *load;
+ off_t offset;
+ int ret, cpu;
+
+ cpu = rseq_cpu_start();
+ targetptr = (intptr_t *)&list->c[cpu].head;
+ expectnot = (intptr_t)NULL;
+ offset = offsetof(struct percpu_list_node, next);
+ load = (intptr_t *)&head;
+ ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot,
+ offset, load, cpu);
+ if (rseq_likely(!ret)) {
+ if (_cpu)
+ *_cpu = cpu;
+ return head;
+ }
+ if (ret > 0)
+ return NULL;
+ /* Retry if rseq aborts. */
+ }
+}
+
+/*
+ * __percpu_list_pop is not safe against concurrent accesses. Should
+ * only be used on lists that are not concurrently modified.
+ */
+struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
+{
+ struct percpu_list_node *node;
+
+ node = list->c[cpu].head;
+ if (!node)
+ return NULL;
+ list->c[cpu].head = node->next;
+ return node;
+}
+
+void *test_percpu_list_thread(void *arg)
+{
+ int i;
+ struct percpu_list *list = (struct percpu_list *)arg;
+
+ if (rseq_register_current_thread()) {
+ fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ abort();
+ }
+
+ for (i = 0; i < 100000; i++) {
+ struct percpu_list_node *node;
+
+ node = this_cpu_list_pop(list, NULL);
+ sched_yield(); /* encourage shuffling */
+ if (node)
+ this_cpu_list_push(list, node, NULL);
+ }
+
+ if (rseq_unregister_current_thread()) {
+ fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ abort();
+ }
+
+ return NULL;
+}
+
+/* Simultaneous modification to a per-cpu linked list from many threads. */
+void test_percpu_list(void)
+{
+ int i, j;
+ uint64_t sum = 0, expected_sum = 0;
+ struct percpu_list list;
+ pthread_t test_threads[200];
+ cpu_set_t allowed_cpus;
+
+ memset(&list, 0, sizeof(list));
+
+ /* Generate list entries for every usable cpu. */
+ sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (!CPU_ISSET(i, &allowed_cpus))
+ continue;
+ for (j = 1; j <= 100; j++) {
+ struct percpu_list_node *node;
+
+ expected_sum += j;
+
+ node = malloc(sizeof(*node));
+ assert(node);
+ node->data = j;
+ node->next = list.c[i].head;
+ list.c[i].head = node;
+ }
+ }
+
+ for (i = 0; i < 200; i++)
+ pthread_create(&test_threads[i], NULL,
+ test_percpu_list_thread, &list);
+
+ for (i = 0; i < 200; i++)
+ pthread_join(test_threads[i], NULL);
+
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ struct percpu_list_node *node;
+
+ if (!CPU_ISSET(i, &allowed_cpus))
+ continue;
+
+ while ((node = __percpu_list_pop(&list, i))) {
+ sum += node->data;
+ free(node);
+ }
+ }
+
+ /*
+ * All entries should now be accounted for (unless some external
+ * actor is interfering with our allowed affinity while this
+ * test is running).
+ */
+ assert(sum == expected_sum);
+}
+
+int main(int argc, char **argv)
+{
+ if (rseq_register_current_thread()) {
+ fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ goto error;
+ }
+ printf("spinlock\n");
+ test_percpu_spinlock();
+ printf("percpu_list\n");
+ test_percpu_list();
+ if (rseq_unregister_current_thread()) {
+ fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ goto error;
+ }
+ return 0;
+
+error:
+ return -1;
+}
diff --git a/tools/testing/selftests/rseq/basic_test.c b/tools/testing/selftests/rseq/basic_test.c
new file mode 100644
index 000000000000..d8efbfb89193
--- /dev/null
+++ b/tools/testing/selftests/rseq/basic_test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * Basic test coverage for critical regions and rseq_current_cpu().
+ */
+
+#define _GNU_SOURCE
+#include <assert.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/time.h>
+
+#include "rseq.h"
+
+void test_cpu_pointer(void)
+{
+ cpu_set_t affinity, test_affinity;
+ int i;
+
+ sched_getaffinity(0, sizeof(affinity), &affinity);
+ CPU_ZERO(&test_affinity);
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (CPU_ISSET(i, &affinity)) {
+ CPU_SET(i, &test_affinity);
+ sched_setaffinity(0, sizeof(test_affinity),
+ &test_affinity);
+ assert(sched_getcpu() == i);
+ assert(rseq_current_cpu() == i);
+ assert(rseq_current_cpu_raw() == i);
+ assert(rseq_cpu_start() == i);
+ CPU_CLR(i, &test_affinity);
+ }
+ }
+ sched_setaffinity(0, sizeof(affinity), &affinity);
+}
+
+int main(int argc, char **argv)
+{
+ if (rseq_register_current_thread()) {
+ fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ goto init_thread_error;
+ }
+ printf("testing current cpu\n");
+ test_cpu_pointer();
+ if (rseq_unregister_current_thread()) {
+ fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ goto init_thread_error;
+ }
+ return 0;
+
+init_thread_error:
+ return -1;
+}
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
new file mode 100644
index 000000000000..6a9f602a8718
--- /dev/null
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -0,0 +1,1260 @@
+// SPDX-License-Identifier: LGPL-2.1
+#define _GNU_SOURCE
+#include <assert.h>
+#include <pthread.h>
+#include <sched.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <unistd.h>
+#include <poll.h>
+#include <sys/types.h>
+#include <signal.h>
+#include <errno.h>
+#include <stddef.h>
+
+static inline pid_t gettid(void)
+{
+ return syscall(__NR_gettid);
+}
+
+#define NR_INJECT 9
+static int loop_cnt[NR_INJECT + 1];
+
+static int loop_cnt_1 asm("asm_loop_cnt_1") __attribute__((used));
+static int loop_cnt_2 asm("asm_loop_cnt_2") __attribute__((used));
+static int loop_cnt_3 asm("asm_loop_cnt_3") __attribute__((used));
+static int loop_cnt_4 asm("asm_loop_cnt_4") __attribute__((used));
+static int loop_cnt_5 asm("asm_loop_cnt_5") __attribute__((used));
+static int loop_cnt_6 asm("asm_loop_cnt_6") __attribute__((used));
+
+static int opt_modulo, verbose;
+
+static int opt_yield, opt_signal, opt_sleep,
+ opt_disable_rseq, opt_threads = 200,
+ opt_disable_mod = 0, opt_test = 's', opt_mb = 0;
+
+#ifndef RSEQ_SKIP_FASTPATH
+static long long opt_reps = 5000;
+#else
+static long long opt_reps = 100;
+#endif
+
+static __thread __attribute__((tls_model("initial-exec")))
+unsigned int signals_delivered;
+
+#ifndef BENCHMARK
+
+static __thread __attribute__((tls_model("initial-exec"), unused))
+unsigned int yield_mod_cnt, nr_abort;
+
+#define printf_verbose(fmt, ...) \
+ do { \
+ if (verbose) \
+ printf(fmt, ## __VA_ARGS__); \
+ } while (0)
+
+#if defined(__x86_64__) || defined(__i386__)
+
+#define INJECT_ASM_REG "eax"
+
+#define RSEQ_INJECT_CLOBBER \
+ , INJECT_ASM_REG
+
+#ifdef __i386__
+
+#define RSEQ_INJECT_ASM(n) \
+ "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \
+ "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
+ "jz 333f\n\t" \
+ "222:\n\t" \
+ "dec %%" INJECT_ASM_REG "\n\t" \
+ "jnz 222b\n\t" \
+ "333:\n\t"
+
+#elif defined(__x86_64__)
+
+#define RSEQ_INJECT_ASM(n) \
+ "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG "\n\t" \
+ "mov (%%" INJECT_ASM_REG "), %%" INJECT_ASM_REG "\n\t" \
+ "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
+ "jz 333f\n\t" \
+ "222:\n\t" \
+ "dec %%" INJECT_ASM_REG "\n\t" \
+ "jnz 222b\n\t" \
+ "333:\n\t"
+
+#else
+#error "Unsupported architecture"
+#endif
+
+#elif defined(__ARMEL__)
+
+#define RSEQ_INJECT_INPUT \
+ , [loop_cnt_1]"m"(loop_cnt[1]) \
+ , [loop_cnt_2]"m"(loop_cnt[2]) \
+ , [loop_cnt_3]"m"(loop_cnt[3]) \
+ , [loop_cnt_4]"m"(loop_cnt[4]) \
+ , [loop_cnt_5]"m"(loop_cnt[5]) \
+ , [loop_cnt_6]"m"(loop_cnt[6])
+
+#define INJECT_ASM_REG "r4"
+
+#define RSEQ_INJECT_CLOBBER \
+ , INJECT_ASM_REG
+
+#define RSEQ_INJECT_ASM(n) \
+ "ldr " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
+ "cmp " INJECT_ASM_REG ", #0\n\t" \
+ "beq 333f\n\t" \
+ "222:\n\t" \
+ "subs " INJECT_ASM_REG ", #1\n\t" \
+ "bne 222b\n\t" \
+ "333:\n\t"
+
+#elif __PPC__
+
+#define RSEQ_INJECT_INPUT \
+ , [loop_cnt_1]"m"(loop_cnt[1]) \
+ , [loop_cnt_2]"m"(loop_cnt[2]) \
+ , [loop_cnt_3]"m"(loop_cnt[3]) \
+ , [loop_cnt_4]"m"(loop_cnt[4]) \
+ , [loop_cnt_5]"m"(loop_cnt[5]) \
+ , [loop_cnt_6]"m"(loop_cnt[6])
+
+#define INJECT_ASM_REG "r18"
+
+#define RSEQ_INJECT_CLOBBER \
+ , INJECT_ASM_REG
+
+#define RSEQ_INJECT_ASM(n) \
+ "lwz %%" INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
+ "cmpwi %%" INJECT_ASM_REG ", 0\n\t" \
+ "beq 333f\n\t" \
+ "222:\n\t" \
+ "subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \
+ "bne 222b\n\t" \
+ "333:\n\t"
+#else
+#error unsupported target
+#endif
+
+#define RSEQ_INJECT_FAILED \
+ nr_abort++;
+
+#define RSEQ_INJECT_C(n) \
+{ \
+ int loc_i, loc_nr_loops = loop_cnt[n]; \
+ \
+ for (loc_i = 0; loc_i < loc_nr_loops; loc_i++) { \
+ rseq_barrier(); \
+ } \
+ if (loc_nr_loops == -1 && opt_modulo) { \
+ if (yield_mod_cnt == opt_modulo - 1) { \
+ if (opt_sleep > 0) \
+ poll(NULL, 0, opt_sleep); \
+ if (opt_yield) \
+ sched_yield(); \
+ if (opt_signal) \
+ raise(SIGUSR1); \
+ yield_mod_cnt = 0; \
+ } else { \
+ yield_mod_cnt++; \
+ } \
+ } \
+}
+
+#else
+
+#define printf_verbose(fmt, ...)
+
+#endif /* BENCHMARK */
+
+#include "rseq.h"
+
+struct percpu_lock_entry {
+ intptr_t v;
+} __attribute__((aligned(128)));
+
+struct percpu_lock {
+ struct percpu_lock_entry c[CPU_SETSIZE];
+};
+
+struct test_data_entry {
+ intptr_t count;
+} __attribute__((aligned(128)));
+
+struct spinlock_test_data {
+ struct percpu_lock lock;
+ struct test_data_entry c[CPU_SETSIZE];
+};
+
+struct spinlock_thread_test_data {
+ struct spinlock_test_data *data;
+ long long reps;
+ int reg;
+};
+
+struct inc_test_data {
+ struct test_data_entry c[CPU_SETSIZE];
+};
+
+struct inc_thread_test_data {
+ struct inc_test_data *data;
+ long long reps;
+ int reg;
+};
+
+struct percpu_list_node {
+ intptr_t data;
+ struct percpu_list_node *next;
+};
+
+struct percpu_list_entry {
+ struct percpu_list_node *head;
+} __attribute__((aligned(128)));
+
+struct percpu_list {
+ struct percpu_list_entry c[CPU_SETSIZE];
+};
+
+#define BUFFER_ITEM_PER_CPU 100
+
+struct percpu_buffer_node {
+ intptr_t data;
+};
+
+struct percpu_buffer_entry {
+ intptr_t offset;
+ intptr_t buflen;
+ struct percpu_buffer_node **array;
+} __attribute__((aligned(128)));
+
+struct percpu_buffer {
+ struct percpu_buffer_entry c[CPU_SETSIZE];
+};
+
+#define MEMCPY_BUFFER_ITEM_PER_CPU 100
+
+struct percpu_memcpy_buffer_node {
+ intptr_t data1;
+ uint64_t data2;
+};
+
+struct percpu_memcpy_buffer_entry {
+ intptr_t offset;
+ intptr_t buflen;
+ struct percpu_memcpy_buffer_node *array;
+} __attribute__((aligned(128)));
+
+struct percpu_memcpy_buffer {
+ struct percpu_memcpy_buffer_entry c[CPU_SETSIZE];
+};
+
+/* A simple percpu spinlock. Grabs lock on current cpu. */
+static int rseq_this_cpu_lock(struct percpu_lock *lock)
+{
+ int cpu;
+
+ for (;;) {
+ int ret;
+
+ cpu = rseq_cpu_start();
+ ret = rseq_cmpeqv_storev(&lock->c[cpu].v,
+ 0, 1, cpu);
+ if (rseq_likely(!ret))
+ break;
+ /* Retry if comparison fails or rseq aborts. */
+ }
+ /*
+ * Acquire semantic when taking lock after control dependency.
+ * Matches rseq_smp_store_release().
+ */
+ rseq_smp_acquire__after_ctrl_dep();
+ return cpu;
+}
+
+static void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
+{
+ assert(lock->c[cpu].v == 1);
+ /*
+ * Release lock, with release semantic. Matches
+ * rseq_smp_acquire__after_ctrl_dep().
+ */
+ rseq_smp_store_release(&lock->c[cpu].v, 0);
+}
+
+void *test_percpu_spinlock_thread(void *arg)
+{
+ struct spinlock_thread_test_data *thread_data = arg;
+ struct spinlock_test_data *data = thread_data->data;
+ long long i, reps;
+
+ if (!opt_disable_rseq && thread_data->reg &&
+ rseq_register_current_thread())
+ abort();
+ reps = thread_data->reps;
+ for (i = 0; i < reps; i++) {
+ int cpu = rseq_cpu_start();
+
+ cpu = rseq_this_cpu_lock(&data->lock);
+ data->c[cpu].count++;
+ rseq_percpu_unlock(&data->lock, cpu);
+#ifndef BENCHMARK
+ if (i != 0 && !(i % (reps / 10)))
+ printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
+#endif
+ }
+ printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
+ (int) gettid(), nr_abort, signals_delivered);
+ if (!opt_disable_rseq && thread_data->reg &&
+ rseq_unregister_current_thread())
+ abort();
+ return NULL;
+}
+
+/*
+ * A simple test which implements a sharded counter using a per-cpu
+ * lock. Obviously real applications might prefer to simply use a
+ * per-cpu increment; however, this is reasonable for a test and the
+ * lock can be extended to synchronize more complicated operations.
+ */
+void test_percpu_spinlock(void)
+{
+ const int num_threads = opt_threads;
+ int i, ret;
+ uint64_t sum;
+ pthread_t test_threads[num_threads];
+ struct spinlock_test_data data;
+ struct spinlock_thread_test_data thread_data[num_threads];
+
+ memset(&data, 0, sizeof(data));
+ for (i = 0; i < num_threads; i++) {
+ thread_data[i].reps = opt_reps;
+ if (opt_disable_mod <= 0 || (i % opt_disable_mod))
+ thread_data[i].reg = 1;
+ else
+ thread_data[i].reg = 0;
+ thread_data[i].data = &data;
+ ret = pthread_create(&test_threads[i], NULL,
+ test_percpu_spinlock_thread,
+ &thread_data[i]);
+ if (ret) {
+ errno = ret;
+ perror("pthread_create");
+ abort();
+ }
+ }
+
+ for (i = 0; i < num_threads; i++) {
+ ret = pthread_join(test_threads[i], NULL);
+ if (ret) {
+ errno = ret;
+ perror("pthread_join");
+ abort();
+ }
+ }
+
+ sum = 0;
+ for (i = 0; i < CPU_SETSIZE; i++)
+ sum += data.c[i].count;
+
+ assert(sum == (uint64_t)opt_reps * num_threads);
+}
+
+void *test_percpu_inc_thread(void *arg)
+{
+ struct inc_thread_test_data *thread_data = arg;
+ struct inc_test_data *data = thread_data->data;
+ long long i, reps;
+
+ if (!opt_disable_rseq && thread_data->reg &&
+ rseq_register_current_thread())
+ abort();
+ reps = thread_data->reps;
+ for (i = 0; i < reps; i++) {
+ int ret;
+
+ do {
+ int cpu;
+
+ cpu = rseq_cpu_start();
+ ret = rseq_addv(&data->c[cpu].count, 1, cpu);
+ } while (rseq_unlikely(ret));
+#ifndef BENCHMARK
+ if (i != 0 && !(i % (reps / 10)))
+ printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
+#endif
+ }
+ printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
+ (int) gettid(), nr_abort, signals_delivered);
+ if (!opt_disable_rseq && thread_data->reg &&
+ rseq_unregister_current_thread())
+ abort();
+ return NULL;
+}
+
+void test_percpu_inc(void)
+{
+ const int num_threads = opt_threads;
+ int i, ret;
+ uint64_t sum;
+ pthread_t test_threads[num_threads];
+ struct inc_test_data data;
+ struct inc_thread_test_data thread_data[num_threads];
+
+ memset(&data, 0, sizeof(data));
+ for (i = 0; i < num_threads; i++) {
+ thread_data[i].reps = opt_reps;
+ if (opt_disable_mod <= 0 || (i % opt_disable_mod))
+ thread_data[i].reg = 1;
+ else
+ thread_data[i].reg = 0;
+ thread_data[i].data = &data;
+ ret = pthread_create(&test_threads[i], NULL,
+ test_percpu_inc_thread,
+ &thread_data[i]);
+ if (ret) {
+ errno = ret;
+ perror("pthread_create");
+ abort();
+ }
+ }
+
+ for (i = 0; i < num_threads; i++) {
+ ret = pthread_join(test_threads[i], NULL);
+ if (ret) {
+ errno = ret;
+ perror("pthread_join");
+ abort();
+ }
+ }
+
+ sum = 0;
+ for (i = 0; i < CPU_SETSIZE; i++)
+ sum += data.c[i].count;
+
+ assert(sum == (uint64_t)opt_reps * num_threads);
+}
+
+void this_cpu_list_push(struct percpu_list *list,
+ struct percpu_list_node *node,
+ int *_cpu)
+{
+ int cpu;
+
+ for (;;) {
+ intptr_t *targetptr, newval, expect;
+ int ret;
+
+ cpu = rseq_cpu_start();
+ /* Load list->c[cpu].head with single-copy atomicity. */
+ expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
+ newval = (intptr_t)node;
+ targetptr = (intptr_t *)&list->c[cpu].head;
+ node->next = (struct percpu_list_node *)expect;
+ ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu);
+ if (rseq_likely(!ret))
+ break;
+ /* Retry if comparison fails or rseq aborts. */
+ }
+ if (_cpu)
+ *_cpu = cpu;
+}
+
+/*
+ * Unlike a traditional lock-less linked list; the availability of a
+ * rseq primitive allows us to implement pop without concerns over
+ * ABA-type races.
+ */
+struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
+ int *_cpu)
+{
+ struct percpu_list_node *node = NULL;
+ int cpu;
+
+ for (;;) {
+ struct percpu_list_node *head;
+ intptr_t *targetptr, expectnot, *load;
+ off_t offset;
+ int ret;
+
+ cpu = rseq_cpu_start();
+ targetptr = (intptr_t *)&list->c[cpu].head;
+ expectnot = (intptr_t)NULL;
+ offset = offsetof(struct percpu_list_node, next);
+ load = (intptr_t *)&head;
+ ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot,
+ offset, load, cpu);
+ if (rseq_likely(!ret)) {
+ node = head;
+ break;
+ }
+ if (ret > 0)
+ break;
+ /* Retry if rseq aborts. */
+ }
+ if (_cpu)
+ *_cpu = cpu;
+ return node;
+}
+
+/*
+ * __percpu_list_pop is not safe against concurrent accesses. Should
+ * only be used on lists that are not concurrently modified.
+ */
+struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
+{
+ struct percpu_list_node *node;
+
+ node = list->c[cpu].head;
+ if (!node)
+ return NULL;
+ list->c[cpu].head = node->next;
+ return node;
+}
+
+void *test_percpu_list_thread(void *arg)
+{
+ long long i, reps;
+ struct percpu_list *list = (struct percpu_list *)arg;
+
+ if (!opt_disable_rseq && rseq_register_current_thread())
+ abort();
+
+ reps = opt_reps;
+ for (i = 0; i < reps; i++) {
+ struct percpu_list_node *node;
+
+ node = this_cpu_list_pop(list, NULL);
+ if (opt_yield)
+ sched_yield(); /* encourage shuffling */
+ if (node)
+ this_cpu_list_push(list, node, NULL);
+ }
+
+ printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
+ (int) gettid(), nr_abort, signals_delivered);
+ if (!opt_disable_rseq && rseq_unregister_current_thread())
+ abort();
+
+ return NULL;
+}
+
+/* Simultaneous modification to a per-cpu linked list from many threads. */
+void test_percpu_list(void)
+{
+ const int num_threads = opt_threads;
+ int i, j, ret;
+ uint64_t sum = 0, expected_sum = 0;
+ struct percpu_list list;
+ pthread_t test_threads[num_threads];
+ cpu_set_t allowed_cpus;
+
+ memset(&list, 0, sizeof(list));
+
+ /* Generate list entries for every usable cpu. */
+ sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (!CPU_ISSET(i, &allowed_cpus))
+ continue;
+ for (j = 1; j <= 100; j++) {
+ struct percpu_list_node *node;
+
+ expected_sum += j;
+
+ node = malloc(sizeof(*node));
+ assert(node);
+ node->data = j;
+ node->next = list.c[i].head;
+ list.c[i].head = node;
+ }
+ }
+
+ for (i = 0; i < num_threads; i++) {
+ ret = pthread_create(&test_threads[i], NULL,
+ test_percpu_list_thread, &list);
+ if (ret) {
+ errno = ret;
+ perror("pthread_create");
+ abort();
+ }
+ }
+
+ for (i = 0; i < num_threads; i++) {
+ ret = pthread_join(test_threads[i], NULL);
+ if (ret) {
+ errno = ret;
+ perror("pthread_join");
+ abort();
+ }
+ }
+
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ struct percpu_list_node *node;
+
+ if (!CPU_ISSET(i, &allowed_cpus))
+ continue;
+
+ while ((node = __percpu_list_pop(&list, i))) {
+ sum += node->data;
+ free(node);
+ }
+ }
+
+ /*
+ * All entries should now be accounted for (unless some external
+ * actor is interfering with our allowed affinity while this
+ * test is running).
+ */
+ assert(sum == expected_sum);
+}
+
+bool this_cpu_buffer_push(struct percpu_buffer *buffer,
+ struct percpu_buffer_node *node,
+ int *_cpu)
+{
+ bool result = false;
+ int cpu;
+
+ for (;;) {
+ intptr_t *targetptr_spec, newval_spec;
+ intptr_t *targetptr_final, newval_final;
+ intptr_t offset;
+ int ret;
+
+ cpu = rseq_cpu_start();
+ offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
+ if (offset == buffer->c[cpu].buflen)
+ break;
+ newval_spec = (intptr_t)node;
+ targetptr_spec = (intptr_t *)&buffer->c[cpu].array[offset];
+ newval_final = offset + 1;
+ targetptr_final = &buffer->c[cpu].offset;
+ if (opt_mb)
+ ret = rseq_cmpeqv_trystorev_storev_release(
+ targetptr_final, offset, targetptr_spec,
+ newval_spec, newval_final, cpu);
+ else
+ ret = rseq_cmpeqv_trystorev_storev(targetptr_final,
+ offset, targetptr_spec, newval_spec,
+ newval_final, cpu);
+ if (rseq_likely(!ret)) {
+ result = true;
+ break;
+ }
+ /* Retry if comparison fails or rseq aborts. */
+ }
+ if (_cpu)
+ *_cpu = cpu;
+ return result;
+}
+
+struct percpu_buffer_node *this_cpu_buffer_pop(struct percpu_buffer *buffer,
+ int *_cpu)
+{
+ struct percpu_buffer_node *head;
+ int cpu;
+
+ for (;;) {
+ intptr_t *targetptr, newval;
+ intptr_t offset;
+ int ret;
+
+ cpu = rseq_cpu_start();
+ /* Load offset with single-copy atomicity. */
+ offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
+ if (offset == 0) {
+ head = NULL;
+ break;
+ }
+ head = RSEQ_READ_ONCE(buffer->c[cpu].array[offset - 1]);
+ newval = offset - 1;
+ targetptr = (intptr_t *)&buffer->c[cpu].offset;
+ ret = rseq_cmpeqv_cmpeqv_storev(targetptr, offset,
+ (intptr_t *)&buffer->c[cpu].array[offset - 1],
+ (intptr_t)head, newval, cpu);
+ if (rseq_likely(!ret))
+ break;
+ /* Retry if comparison fails or rseq aborts. */
+ }
+ if (_cpu)
+ *_cpu = cpu;
+ return head;
+}
+
+/*
+ * __percpu_buffer_pop is not safe against concurrent accesses. Should
+ * only be used on buffers that are not concurrently modified.
+ */
+struct percpu_buffer_node *__percpu_buffer_pop(struct percpu_buffer *buffer,
+ int cpu)
+{
+ struct percpu_buffer_node *head;
+ intptr_t offset;
+
+ offset = buffer->c[cpu].offset;
+ if (offset == 0)
+ return NULL;
+ head = buffer->c[cpu].array[offset - 1];
+ buffer->c[cpu].offset = offset - 1;
+ return head;
+}
+
+void *test_percpu_buffer_thread(void *arg)
+{
+ long long i, reps;
+ struct percpu_buffer *buffer = (struct percpu_buffer *)arg;
+
+ if (!opt_disable_rseq && rseq_register_current_thread())
+ abort();
+
+ reps = opt_reps;
+ for (i = 0; i < reps; i++) {
+ struct percpu_buffer_node *node;
+
+ node = this_cpu_buffer_pop(buffer, NULL);
+ if (opt_yield)
+ sched_yield(); /* encourage shuffling */
+ if (node) {
+ if (!this_cpu_buffer_push(buffer, node, NULL)) {
+ /* Should increase buffer size. */
+ abort();
+ }
+ }
+ }
+
+ printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
+ (int) gettid(), nr_abort, signals_delivered);
+ if (!opt_disable_rseq && rseq_unregister_current_thread())
+ abort();
+
+ return NULL;
+}
+
+/* Simultaneous modification to a per-cpu buffer from many threads. */
+void test_percpu_buffer(void)
+{
+ const int num_threads = opt_threads;
+ int i, j, ret;
+ uint64_t sum = 0, expected_sum = 0;
+ struct percpu_buffer buffer;
+ pthread_t test_threads[num_threads];
+ cpu_set_t allowed_cpus;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Generate list entries for every usable cpu. */
+ sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (!CPU_ISSET(i, &allowed_cpus))
+ continue;
+ /* Worse-case is every item in same CPU. */
+ buffer.c[i].array =
+ malloc(sizeof(*buffer.c[i].array) * CPU_SETSIZE *
+ BUFFER_ITEM_PER_CPU);
+ assert(buffer.c[i].array);
+ buffer.c[i].buflen = CPU_SETSIZE * BUFFER_ITEM_PER_CPU;
+ for (j = 1; j <= BUFFER_ITEM_PER_CPU; j++) {
+ struct percpu_buffer_node *node;
+
+ expected_sum += j;
+
+ /*
+ * We could theoretically put the word-sized
+ * "data" directly in the buffer. However, we
+ * want to model objects that would not fit
+ * within a single word, so allocate an object
+ * for each node.
+ */
+ node = malloc(sizeof(*node));
+ assert(node);
+ node->data = j;
+ buffer.c[i].array[j - 1] = node;
+ buffer.c[i].offset++;
+ }
+ }
+
+ for (i = 0; i < num_threads; i++) {
+ ret = pthread_create(&test_threads[i], NULL,
+ test_percpu_buffer_thread, &buffer);
+ if (ret) {
+ errno = ret;
+ perror("pthread_create");
+ abort();
+ }
+ }
+
+ for (i = 0; i < num_threads; i++) {
+ ret = pthread_join(test_threads[i], NULL);
+ if (ret) {
+ errno = ret;
+ perror("pthread_join");
+ abort();
+ }
+ }
+
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ struct percpu_buffer_node *node;
+
+ if (!CPU_ISSET(i, &allowed_cpus))
+ continue;
+
+ while ((node = __percpu_buffer_pop(&buffer, i))) {
+ sum += node->data;
+ free(node);
+ }
+ free(buffer.c[i].array);
+ }
+
+ /*
+ * All entries should now be accounted for (unless some external
+ * actor is interfering with our allowed affinity while this
+ * test is running).
+ */
+ assert(sum == expected_sum);
+}
+
+bool this_cpu_memcpy_buffer_push(struct percpu_memcpy_buffer *buffer,
+ struct percpu_memcpy_buffer_node item,
+ int *_cpu)
+{
+ bool result = false;
+ int cpu;
+
+ for (;;) {
+ intptr_t *targetptr_final, newval_final, offset;
+ char *destptr, *srcptr;
+ size_t copylen;
+ int ret;
+
+ cpu = rseq_cpu_start();
+ /* Load offset with single-copy atomicity. */
+ offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
+ if (offset == buffer->c[cpu].buflen)
+ break;
+ destptr = (char *)&buffer->c[cpu].array[offset];
+ srcptr = (char *)&item;
+ /* copylen must be <= 4kB. */
+ copylen = sizeof(item);
+ newval_final = offset + 1;
+ targetptr_final = &buffer->c[cpu].offset;
+ if (opt_mb)
+ ret = rseq_cmpeqv_trymemcpy_storev_release(
+ targetptr_final, offset,
+ destptr, srcptr, copylen,
+ newval_final, cpu);
+ else
+ ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final,
+ offset, destptr, srcptr, copylen,
+ newval_final, cpu);
+ if (rseq_likely(!ret)) {
+ result = true;
+ break;
+ }
+ /* Retry if comparison fails or rseq aborts. */
+ }
+ if (_cpu)
+ *_cpu = cpu;
+ return result;
+}
+
+bool this_cpu_memcpy_buffer_pop(struct percpu_memcpy_buffer *buffer,
+ struct percpu_memcpy_buffer_node *item,
+ int *_cpu)
+{
+ bool result = false;
+ int cpu;
+
+ for (;;) {
+ intptr_t *targetptr_final, newval_final, offset;
+ char *destptr, *srcptr;
+ size_t copylen;
+ int ret;
+
+ cpu = rseq_cpu_start();
+ /* Load offset with single-copy atomicity. */
+ offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
+ if (offset == 0)
+ break;
+ destptr = (char *)item;
+ srcptr = (char *)&buffer->c[cpu].array[offset - 1];
+ /* copylen must be <= 4kB. */
+ copylen = sizeof(*item);
+ newval_final = offset - 1;
+ targetptr_final = &buffer->c[cpu].offset;
+ ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final,
+ offset, destptr, srcptr, copylen,
+ newval_final, cpu);
+ if (rseq_likely(!ret)) {
+ result = true;
+ break;
+ }
+ /* Retry if comparison fails or rseq aborts. */
+ }
+ if (_cpu)
+ *_cpu = cpu;
+ return result;
+}
+
+/*
+ * __percpu_memcpy_buffer_pop is not safe against concurrent accesses. Should
+ * only be used on buffers that are not concurrently modified.
+ */
+bool __percpu_memcpy_buffer_pop(struct percpu_memcpy_buffer *buffer,
+ struct percpu_memcpy_buffer_node *item,
+ int cpu)
+{
+ intptr_t offset;
+
+ offset = buffer->c[cpu].offset;
+ if (offset == 0)
+ return false;
+ memcpy(item, &buffer->c[cpu].array[offset - 1], sizeof(*item));
+ buffer->c[cpu].offset = offset - 1;
+ return true;
+}
+
+void *test_percpu_memcpy_buffer_thread(void *arg)
+{
+ long long i, reps;
+ struct percpu_memcpy_buffer *buffer = (struct percpu_memcpy_buffer *)arg;
+
+ if (!opt_disable_rseq && rseq_register_current_thread())
+ abort();
+
+ reps = opt_reps;
+ for (i = 0; i < reps; i++) {
+ struct percpu_memcpy_buffer_node item;
+ bool result;
+
+ result = this_cpu_memcpy_buffer_pop(buffer, &item, NULL);
+ if (opt_yield)
+ sched_yield(); /* encourage shuffling */
+ if (result) {
+ if (!this_cpu_memcpy_buffer_push(buffer, item, NULL)) {
+ /* Should increase buffer size. */
+ abort();
+ }
+ }
+ }
+
+ printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
+ (int) gettid(), nr_abort, signals_delivered);
+ if (!opt_disable_rseq && rseq_unregister_current_thread())
+ abort();
+
+ return NULL;
+}
+
+/* Simultaneous modification to a per-cpu buffer from many threads. */
+void test_percpu_memcpy_buffer(void)
+{
+ const int num_threads = opt_threads;
+ int i, j, ret;
+ uint64_t sum = 0, expected_sum = 0;
+ struct percpu_memcpy_buffer buffer;
+ pthread_t test_threads[num_threads];
+ cpu_set_t allowed_cpus;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Generate list entries for every usable cpu. */
+ sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (!CPU_ISSET(i, &allowed_cpus))
+ continue;
+ /* Worse-case is every item in same CPU. */
+ buffer.c[i].array =
+ malloc(sizeof(*buffer.c[i].array) * CPU_SETSIZE *
+ MEMCPY_BUFFER_ITEM_PER_CPU);
+ assert(buffer.c[i].array);
+ buffer.c[i].buflen = CPU_SETSIZE * MEMCPY_BUFFER_ITEM_PER_CPU;
+ for (j = 1; j <= MEMCPY_BUFFER_ITEM_PER_CPU; j++) {
+ expected_sum += 2 * j + 1;
+
+ /*
+ * We could theoretically put the word-sized
+ * "data" directly in the buffer. However, we
+ * want to model objects that would not fit
+ * within a single word, so allocate an object
+ * for each node.
+ */
+ buffer.c[i].array[j - 1].data1 = j;
+ buffer.c[i].array[j - 1].data2 = j + 1;
+ buffer.c[i].offset++;
+ }
+ }
+
+ for (i = 0; i < num_threads; i++) {
+ ret = pthread_create(&test_threads[i], NULL,
+ test_percpu_memcpy_buffer_thread,
+ &buffer);
+ if (ret) {
+ errno = ret;
+ perror("pthread_create");
+ abort();
+ }
+ }
+
+ for (i = 0; i < num_threads; i++) {
+ ret = pthread_join(test_threads[i], NULL);
+ if (ret) {
+ errno = ret;
+ perror("pthread_join");
+ abort();
+ }
+ }
+
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ struct percpu_memcpy_buffer_node item;
+
+ if (!CPU_ISSET(i, &allowed_cpus))
+ continue;
+
+ while (__percpu_memcpy_buffer_pop(&buffer, &item, i)) {
+ sum += item.data1;
+ sum += item.data2;
+ }
+ free(buffer.c[i].array);
+ }
+
+ /*
+ * All entries should now be accounted for (unless some external
+ * actor is interfering with our allowed affinity while this
+ * test is running).
+ */
+ assert(sum == expected_sum);
+}
+
+static void test_signal_interrupt_handler(int signo)
+{
+ signals_delivered++;
+}
+
+static int set_signal_handler(void)
+{
+ int ret = 0;
+ struct sigaction sa;
+ sigset_t sigset;
+
+ ret = sigemptyset(&sigset);
+ if (ret < 0) {
+ perror("sigemptyset");
+ return ret;
+ }
+
+ sa.sa_handler = test_signal_interrupt_handler;
+ sa.sa_mask = sigset;
+ sa.sa_flags = 0;
+ ret = sigaction(SIGUSR1, &sa, NULL);
+ if (ret < 0) {
+ perror("sigaction");
+ return ret;
+ }
+
+ printf_verbose("Signal handler set for SIGUSR1\n");
+
+ return ret;
+}
+
+static void show_usage(int argc, char **argv)
+{
+ printf("Usage : %s <OPTIONS>\n",
+ argv[0]);
+ printf("OPTIONS:\n");
+ printf(" [-1 loops] Number of loops for delay injection 1\n");
+ printf(" [-2 loops] Number of loops for delay injection 2\n");
+ printf(" [-3 loops] Number of loops for delay injection 3\n");
+ printf(" [-4 loops] Number of loops for delay injection 4\n");
+ printf(" [-5 loops] Number of loops for delay injection 5\n");
+ printf(" [-6 loops] Number of loops for delay injection 6\n");
+ printf(" [-7 loops] Number of loops for delay injection 7 (-1 to enable -m)\n");
+ printf(" [-8 loops] Number of loops for delay injection 8 (-1 to enable -m)\n");
+ printf(" [-9 loops] Number of loops for delay injection 9 (-1 to enable -m)\n");
+ printf(" [-m N] Yield/sleep/kill every modulo N (default 0: disabled) (>= 0)\n");
+ printf(" [-y] Yield\n");
+ printf(" [-k] Kill thread with signal\n");
+ printf(" [-s S] S: =0: disabled (default), >0: sleep time (ms)\n");
+ printf(" [-t N] Number of threads (default 200)\n");
+ printf(" [-r N] Number of repetitions per thread (default 5000)\n");
+ printf(" [-d] Disable rseq system call (no initialization)\n");
+ printf(" [-D M] Disable rseq for each M threads\n");
+ printf(" [-T test] Choose test: (s)pinlock, (l)ist, (b)uffer, (m)emcpy, (i)ncrement\n");
+ printf(" [-M] Push into buffer and memcpy buffer with memory barriers.\n");
+ printf(" [-v] Verbose output.\n");
+ printf(" [-h] Show this help.\n");
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ int i;
+
+ for (i = 1; i < argc; i++) {
+ if (argv[i][0] != '-')
+ continue;
+ switch (argv[i][1]) {
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ loop_cnt[argv[i][1] - '0'] = atol(argv[i + 1]);
+ i++;
+ break;
+ case 'm':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ opt_modulo = atol(argv[i + 1]);
+ if (opt_modulo < 0) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ i++;
+ break;
+ case 's':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ opt_sleep = atol(argv[i + 1]);
+ if (opt_sleep < 0) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ i++;
+ break;
+ case 'y':
+ opt_yield = 1;
+ break;
+ case 'k':
+ opt_signal = 1;
+ break;
+ case 'd':
+ opt_disable_rseq = 1;
+ break;
+ case 'D':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ opt_disable_mod = atol(argv[i + 1]);
+ if (opt_disable_mod < 0) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ i++;
+ break;
+ case 't':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ opt_threads = atol(argv[i + 1]);
+ if (opt_threads < 0) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ i++;
+ break;
+ case 'r':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ opt_reps = atoll(argv[i + 1]);
+ if (opt_reps < 0) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ i++;
+ break;
+ case 'h':
+ show_usage(argc, argv);
+ goto end;
+ case 'T':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ goto error;
+ }
+ opt_test = *argv[i + 1];
+ switch (opt_test) {
+ case 's':
+ case 'l':
+ case 'i':
+ case 'b':
+ case 'm':
+ break;
+ default:
+ show_usage(argc, argv);
+ goto error;
+ }
+ i++;
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ case 'M':
+ opt_mb = 1;
+ break;
+ default:
+ show_usage(argc, argv);
+ goto error;
+ }
+ }
+
+ loop_cnt_1 = loop_cnt[1];
+ loop_cnt_2 = loop_cnt[2];
+ loop_cnt_3 = loop_cnt[3];
+ loop_cnt_4 = loop_cnt[4];
+ loop_cnt_5 = loop_cnt[5];
+ loop_cnt_6 = loop_cnt[6];
+
+ if (set_signal_handler())
+ goto error;
+
+ if (!opt_disable_rseq && rseq_register_current_thread())
+ goto error;
+ switch (opt_test) {
+ case 's':
+ printf_verbose("spinlock\n");
+ test_percpu_spinlock();
+ break;
+ case 'l':
+ printf_verbose("linked list\n");
+ test_percpu_list();
+ break;
+ case 'b':
+ printf_verbose("buffer\n");
+ test_percpu_buffer();
+ break;
+ case 'm':
+ printf_verbose("memcpy buffer\n");
+ test_percpu_memcpy_buffer();
+ break;
+ case 'i':
+ printf_verbose("counter increment\n");
+ test_percpu_inc();
+ break;
+ }
+ if (!opt_disable_rseq && rseq_unregister_current_thread())
+ abort();
+end:
+ return 0;
+
+error:
+ return -1;
+}
diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h
new file mode 100644
index 000000000000..3b055f9aeaab
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-arm.h
@@ -0,0 +1,715 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq-arm.h
+ *
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define RSEQ_SIG 0x53053053
+
+#define rseq_smp_mb() __asm__ __volatile__ ("dmb" ::: "memory", "cc")
+#define rseq_smp_rmb() __asm__ __volatile__ ("dmb" ::: "memory", "cc")
+#define rseq_smp_wmb() __asm__ __volatile__ ("dmb" ::: "memory", "cc")
+
+#define rseq_smp_load_acquire(p) \
+__extension__ ({ \
+ __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_smp_mb(); \
+ ____p1; \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v) \
+do { \
+ rseq_smp_mb(); \
+ RSEQ_WRITE_ONCE(*p, v); \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#define __RSEQ_ASM_DEFINE_TABLE(version, flags, start_ip, \
+ post_commit_offset, abort_ip) \
+ ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".balign 32\n\t" \
+ ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
+ ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+ RSEQ_INJECT_ASM(1) \
+ "adr r0, " __rseq_str(cs_label) "\n\t" \
+ "str r0, %[" __rseq_str(rseq_cs) "]\n\t" \
+ __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+ RSEQ_INJECT_ASM(2) \
+ "ldr r0, %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "cmp %[" __rseq_str(cpu_id) "], r0\n\t" \
+ "bne " __rseq_str(label) "\n\t"
+
+#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+ abort_label, version, flags, \
+ start_ip, post_commit_offset, abort_ip) \
+ __rseq_str(table_label) ":\n\t" \
+ ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
+ ".word " __rseq_str(RSEQ_SIG) "\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "b %l[" __rseq_str(abort_label) "]\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \
+ start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+ abort_label, 0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "b %l[" __rseq_str(cmpfail_label) "]\n\t"
+
+#define rseq_workaround_gcc_asm_size_guess() __asm__ __volatile__("")
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[error2]\n\t"
+#endif
+ /* final store */
+ "str %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+ off_t voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expectnot], r0\n\t"
+ "beq %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "ldr r0, %[v]\n\t"
+ "cmp %[expectnot], r0\n\t"
+ "beq %l[error2]\n\t"
+#endif
+ "str r0, %[load]\n\t"
+ "add r0, %[voffp]\n\t"
+ "ldr r0, [r0]\n\t"
+ /* final store */
+ "str r0, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "Ir" (voffp),
+ [load] "m" (*load)
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ "ldr r0, %[v]\n\t"
+ "add r0, %[count]\n\t"
+ /* final store */
+ "str r0, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "m" (*v),
+ [count] "Ir" (count)
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[error2]\n\t"
+#endif
+ /* try store */
+ "str %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ "str %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[error2]\n\t"
+#endif
+ /* try store */
+ "str %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ "dmb\n\t" /* full mb provides store-release */
+ /* final store */
+ "str %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ "ldr r0, %[v2]\n\t"
+ "cmp %[expect2], r0\n\t"
+ "bne %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[error2]\n\t"
+ "ldr r0, %[v2]\n\t"
+ "cmp %[expect2], r0\n\t"
+ "bne %l[error3]\n\t"
+#endif
+ /* final store */
+ "str %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uint32_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ "str %[src], %[rseq_scratch0]\n\t"
+ "str %[dst], %[rseq_scratch1]\n\t"
+ "str %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne 7f\n\t"
+#endif
+ /* try memcpy */
+ "cmp %[len], #0\n\t" \
+ "beq 333f\n\t" \
+ "222:\n\t" \
+ "ldrb %%r0, [%[src]]\n\t" \
+ "strb %%r0, [%[dst]]\n\t" \
+ "adds %[src], #1\n\t" \
+ "adds %[dst], #1\n\t" \
+ "subs %[len], #1\n\t" \
+ "bne 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ "str %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t"
+ "b 8f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ abort, 1b, 2b, 4f)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ error2)
+#endif
+ "8:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uint32_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ "str %[src], %[rseq_scratch0]\n\t"
+ "str %[dst], %[rseq_scratch1]\n\t"
+ "str %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne 7f\n\t"
+#endif
+ /* try memcpy */
+ "cmp %[len], #0\n\t" \
+ "beq 333f\n\t" \
+ "222:\n\t" \
+ "ldrb %%r0, [%[src]]\n\t" \
+ "strb %%r0, [%[dst]]\n\t" \
+ "adds %[src], #1\n\t" \
+ "adds %[dst], #1\n\t" \
+ "subs %[len], #1\n\t" \
+ "bne 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+ "dmb\n\t" /* full mb provides store-release */
+ /* final store */
+ "str %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t"
+ "b 8f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ abort, 1b, 2b, 4f)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ error2)
+#endif
+ "8:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* !RSEQ_SKIP_FASTPATH */
diff --git a/tools/testing/selftests/rseq/rseq-ppc.h b/tools/testing/selftests/rseq/rseq-ppc.h
new file mode 100644
index 000000000000..52630c9f42be
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-ppc.h
@@ -0,0 +1,671 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq-ppc.h
+ *
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * (C) Copyright 2016-2018 - Boqun Feng <boqun.feng@gmail.com>
+ */
+
+#define RSEQ_SIG 0x53053053
+
+#define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory", "cc")
+#define rseq_smp_lwsync() __asm__ __volatile__ ("lwsync" ::: "memory", "cc")
+#define rseq_smp_rmb() rseq_smp_lwsync()
+#define rseq_smp_wmb() rseq_smp_lwsync()
+
+#define rseq_smp_load_acquire(p) \
+__extension__ ({ \
+ __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_smp_lwsync(); \
+ ____p1; \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_lwsync()
+
+#define rseq_smp_store_release(p, v) \
+do { \
+ rseq_smp_lwsync(); \
+ RSEQ_WRITE_ONCE(*p, v); \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+/*
+ * The __rseq_table section can be used by debuggers to better handle
+ * single-stepping through the restartable critical sections.
+ */
+
+#ifdef __PPC64__
+
+#define STORE_WORD "std "
+#define LOAD_WORD "ld "
+#define LOADX_WORD "ldx "
+#define CMP_WORD "cmpd "
+
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
+ start_ip, post_commit_offset, abort_ip) \
+ ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
+ ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ ".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t"
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+ RSEQ_INJECT_ASM(1) \
+ "lis %%r17, (" __rseq_str(cs_label) ")@highest\n\t" \
+ "ori %%r17, %%r17, (" __rseq_str(cs_label) ")@higher\n\t" \
+ "rldicr %%r17, %%r17, 32, 31\n\t" \
+ "oris %%r17, %%r17, (" __rseq_str(cs_label) ")@high\n\t" \
+ "ori %%r17, %%r17, (" __rseq_str(cs_label) ")@l\n\t" \
+ "std %%r17, %[" __rseq_str(rseq_cs) "]\n\t" \
+ __rseq_str(label) ":\n\t"
+
+#else /* #ifdef __PPC64__ */
+
+#define STORE_WORD "stw "
+#define LOAD_WORD "lwz "
+#define LOADX_WORD "lwzx "
+#define CMP_WORD "cmpw "
+
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
+ start_ip, post_commit_offset, abort_ip) \
+ ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
+ ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ /* 32-bit only supported on BE */ \
+ ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t"
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+ RSEQ_INJECT_ASM(1) \
+ "lis %%r17, (" __rseq_str(cs_label) ")@ha\n\t" \
+ "addi %%r17, %%r17, (" __rseq_str(cs_label) ")@l\n\t" \
+ "stw %%r17, %[" __rseq_str(rseq_cs) "]\n\t" \
+ __rseq_str(label) ":\n\t"
+
+#endif /* #ifdef __PPC64__ */
+
+#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+ RSEQ_INJECT_ASM(2) \
+ "lwz %%r17, %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "cmpw cr7, %[" __rseq_str(cpu_id) "], %%r17\n\t" \
+ "bne- cr7, " __rseq_str(label) "\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(label, abort_label) \
+ ".pushsection __rseq_failure, \"ax\"\n\t" \
+ ".long " __rseq_str(RSEQ_SIG) "\n\t" \
+ __rseq_str(label) ":\n\t" \
+ "b %l[" __rseq_str(abort_label) "]\n\t" \
+ ".popsection\n\t"
+
+/*
+ * RSEQ_ASM_OPs: asm operations for rseq
+ * RSEQ_ASM_OP_R_*: has hard-code registers in it
+ * RSEQ_ASM_OP_* (else): doesn't have hard-code registers(unless cr7)
+ */
+#define RSEQ_ASM_OP_CMPEQ(var, expect, label) \
+ LOAD_WORD "%%r17, %[" __rseq_str(var) "]\n\t" \
+ CMP_WORD "cr7, %%r17, %[" __rseq_str(expect) "]\n\t" \
+ "bne- cr7, " __rseq_str(label) "\n\t"
+
+#define RSEQ_ASM_OP_CMPNE(var, expectnot, label) \
+ LOAD_WORD "%%r17, %[" __rseq_str(var) "]\n\t" \
+ CMP_WORD "cr7, %%r17, %[" __rseq_str(expectnot) "]\n\t" \
+ "beq- cr7, " __rseq_str(label) "\n\t"
+
+#define RSEQ_ASM_OP_STORE(value, var) \
+ STORE_WORD "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t"
+
+/* Load @var to r17 */
+#define RSEQ_ASM_OP_R_LOAD(var) \
+ LOAD_WORD "%%r17, %[" __rseq_str(var) "]\n\t"
+
+/* Store r17 to @var */
+#define RSEQ_ASM_OP_R_STORE(var) \
+ STORE_WORD "%%r17, %[" __rseq_str(var) "]\n\t"
+
+/* Add @count to r17 */
+#define RSEQ_ASM_OP_R_ADD(count) \
+ "add %%r17, %[" __rseq_str(count) "], %%r17\n\t"
+
+/* Load (r17 + voffp) to r17 */
+#define RSEQ_ASM_OP_R_LOADX(voffp) \
+ LOADX_WORD "%%r17, %[" __rseq_str(voffp) "], %%r17\n\t"
+
+/* TODO: implement a faster memcpy. */
+#define RSEQ_ASM_OP_R_MEMCPY() \
+ "cmpdi %%r19, 0\n\t" \
+ "beq 333f\n\t" \
+ "addi %%r20, %%r20, -1\n\t" \
+ "addi %%r21, %%r21, -1\n\t" \
+ "222:\n\t" \
+ "lbzu %%r18, 1(%%r20)\n\t" \
+ "stbu %%r18, 1(%%r21)\n\t" \
+ "addi %%r19, %%r19, -1\n\t" \
+ "cmpdi %%r19, 0\n\t" \
+ "bne 222b\n\t" \
+ "333:\n\t" \
+
+#define RSEQ_ASM_OP_R_FINAL_STORE(var, post_commit_label) \
+ STORE_WORD "%%r17, %[" __rseq_str(var) "]\n\t" \
+ __rseq_str(post_commit_label) ":\n\t"
+
+#define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label) \
+ STORE_WORD "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t" \
+ __rseq_str(post_commit_label) ":\n\t"
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ /* final store */
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+ off_t voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v not equal to @expectnot */
+ RSEQ_ASM_OP_CMPNE(v, expectnot, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v not equal to @expectnot */
+ RSEQ_ASM_OP_CMPNE(v, expectnot, %l[error2])
+#endif
+ /* load the value of @v */
+ RSEQ_ASM_OP_R_LOAD(v)
+ /* store it in @load */
+ RSEQ_ASM_OP_R_STORE(load)
+ /* dereference voffp(v) */
+ RSEQ_ASM_OP_R_LOADX(voffp)
+ /* final store the value at voffp(v) */
+ RSEQ_ASM_OP_R_FINAL_STORE(v, 2)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "b" (voffp),
+ [load] "m" (*load)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ /* load the value of @v */
+ RSEQ_ASM_OP_R_LOAD(v)
+ /* add @count to it */
+ RSEQ_ASM_OP_R_ADD(count)
+ /* final store */
+ RSEQ_ASM_OP_R_FINAL_STORE(v, 2)
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [count] "r" (count)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ /* try store */
+ RSEQ_ASM_OP_STORE(newv2, v2)
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ /* try store */
+ RSEQ_ASM_OP_STORE(newv2, v2)
+ RSEQ_INJECT_ASM(5)
+ /* for 'release' */
+ "lwsync\n\t"
+ /* final store */
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+ /* cmp @v2 equal to @expct2 */
+ RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[cmpfail])
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+ /* cmp @v2 equal to @expct2 */
+ RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[error3])
+#endif
+ /* final store */
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* setup for mempcy */
+ "mr %%r19, %[len]\n\t"
+ "mr %%r20, %[src]\n\t"
+ "mr %%r21, %[dst]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ /* try memcpy */
+ RSEQ_ASM_OP_R_MEMCPY()
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17", "r18", "r19", "r20", "r21"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* setup for mempcy */
+ "mr %%r19, %[len]\n\t"
+ "mr %%r20, %[src]\n\t"
+ "mr %%r21, %[dst]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ /* try memcpy */
+ RSEQ_ASM_OP_R_MEMCPY()
+ RSEQ_INJECT_ASM(5)
+ /* for 'release' */
+ "lwsync\n\t"
+ /* final store */
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17", "r18", "r19", "r20", "r21"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#undef STORE_WORD
+#undef LOAD_WORD
+#undef LOADX_WORD
+#undef CMP_WORD
+
+#endif /* !RSEQ_SKIP_FASTPATH */
diff --git a/tools/testing/selftests/rseq/rseq-skip.h b/tools/testing/selftests/rseq/rseq-skip.h
new file mode 100644
index 000000000000..72750b5905a9
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-skip.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq-skip.h
+ *
+ * (C) Copyright 2017-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ return -1;
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+ off_t voffp, intptr_t *load, int cpu)
+{
+ return -1;
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+ return -1;
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ return -1;
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ return -1;
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ return -1;
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ return -1;
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ return -1;
+}
diff --git a/tools/testing/selftests/rseq/rseq-x86.h b/tools/testing/selftests/rseq/rseq-x86.h
new file mode 100644
index 000000000000..089410a314e9
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-x86.h
@@ -0,0 +1,1132 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq-x86.h
+ *
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <stdint.h>
+
+#define RSEQ_SIG 0x53053053
+
+#ifdef __x86_64__
+
+#define rseq_smp_mb() \
+ __asm__ __volatile__ ("lock; addl $0,-128(%%rsp)" ::: "memory", "cc")
+#define rseq_smp_rmb() rseq_barrier()
+#define rseq_smp_wmb() rseq_barrier()
+
+#define rseq_smp_load_acquire(p) \
+__extension__ ({ \
+ __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_barrier(); \
+ ____p1; \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v) \
+do { \
+ rseq_barrier(); \
+ RSEQ_WRITE_ONCE(*p, v); \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
+ start_ip, post_commit_offset, abort_ip) \
+ ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
+ ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ ".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+ RSEQ_INJECT_ASM(1) \
+ "leaq " __rseq_str(cs_label) "(%%rip), %%rax\n\t" \
+ "movq %%rax, %[" __rseq_str(rseq_cs) "]\n\t" \
+ __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+ RSEQ_INJECT_ASM(2) \
+ "cmpl %[" __rseq_str(cpu_id) "], %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "jnz " __rseq_str(label) "\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
+ ".pushsection __rseq_failure, \"ax\"\n\t" \
+ /* Disassembler-friendly signature: nopl <sig>(%rip). */\
+ ".byte 0x0f, 0x1f, 0x05\n\t" \
+ ".long " __rseq_str(RSEQ_SIG) "\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "jmp %l[" __rseq_str(abort_label) "]\n\t" \
+ ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
+ ".pushsection __rseq_failure, \"ax\"\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "jmp %l[" __rseq_str(cmpfail_label) "]\n\t" \
+ ".popsection\n\t"
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* final store */
+ "movq %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ : "memory", "cc", "rax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/*
+ * Compare @v against @expectnot. When it does _not_ match, load @v
+ * into @load, and store the content of *@v + voffp into @v.
+ */
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+ off_t voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "movq %[v], %%rbx\n\t"
+ "cmpq %%rbx, %[expectnot]\n\t"
+ "je %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "movq %[v], %%rbx\n\t"
+ "cmpq %%rbx, %[expectnot]\n\t"
+ "je %l[error2]\n\t"
+#endif
+ "movq %%rbx, %[load]\n\t"
+ "addq %[voffp], %%rbx\n\t"
+ "movq (%%rbx), %%rbx\n\t"
+ /* final store */
+ "movq %%rbx, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "er" (voffp),
+ [load] "m" (*load)
+ : "memory", "cc", "rax", "rbx"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ /* final store */
+ "addq %[count], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [count] "er" (count)
+ : "memory", "cc", "rax"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* try store */
+ "movq %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ "movq %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ : "memory", "cc", "rax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/* x86-64 is TSO. */
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ return rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2, newv, cpu);
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ "cmpq %[v2], %[expect2]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[error2]\n\t"
+ "cmpq %[v2], %[expect2]\n\t"
+ "jnz %l[error3]\n\t"
+#endif
+ /* final store */
+ "movq %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ : "memory", "cc", "rax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uint64_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ "movq %[src], %[rseq_scratch0]\n\t"
+ "movq %[dst], %[rseq_scratch1]\n\t"
+ "movq %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpq %[v], %[expect]\n\t"
+ "jnz 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ "cmpq %[v], %[expect]\n\t"
+ "jnz 7f\n\t"
+#endif
+ /* try memcpy */
+ "test %[len], %[len]\n\t" \
+ "jz 333f\n\t" \
+ "222:\n\t" \
+ "movb (%[src]), %%al\n\t" \
+ "movb %%al, (%[dst])\n\t" \
+ "inc %[src]\n\t" \
+ "inc %[dst]\n\t" \
+ "dec %[len]\n\t" \
+ "jnz 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ "movq %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ "movq %[rseq_scratch2], %[len]\n\t"
+ "movq %[rseq_scratch1], %[dst]\n\t"
+ "movq %[rseq_scratch0], %[src]\n\t"
+ RSEQ_ASM_DEFINE_ABORT(4,
+ "movq %[rseq_scratch2], %[len]\n\t"
+ "movq %[rseq_scratch1], %[dst]\n\t"
+ "movq %[rseq_scratch0], %[src]\n\t",
+ abort)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ "movq %[rseq_scratch2], %[len]\n\t"
+ "movq %[rseq_scratch1], %[dst]\n\t"
+ "movq %[rseq_scratch0], %[src]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ "movq %[rseq_scratch2], %[len]\n\t"
+ "movq %[rseq_scratch1], %[dst]\n\t"
+ "movq %[rseq_scratch0], %[src]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ "movq %[rseq_scratch2], %[len]\n\t"
+ "movq %[rseq_scratch1], %[dst]\n\t"
+ "movq %[rseq_scratch0], %[src]\n\t",
+ error2)
+#endif
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ : "memory", "cc", "rax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/* x86-64 is TSO. */
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ return rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len,
+ newv, cpu);
+}
+
+#endif /* !RSEQ_SKIP_FASTPATH */
+
+#elif __i386__
+
+#define rseq_smp_mb() \
+ __asm__ __volatile__ ("lock; addl $0,-128(%%esp)" ::: "memory", "cc")
+#define rseq_smp_rmb() \
+ __asm__ __volatile__ ("lock; addl $0,-128(%%esp)" ::: "memory", "cc")
+#define rseq_smp_wmb() \
+ __asm__ __volatile__ ("lock; addl $0,-128(%%esp)" ::: "memory", "cc")
+
+#define rseq_smp_load_acquire(p) \
+__extension__ ({ \
+ __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_smp_mb(); \
+ ____p1; \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v) \
+do { \
+ rseq_smp_mb(); \
+ RSEQ_WRITE_ONCE(*p, v); \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+/*
+ * Use eax as scratch register and take memory operands as input to
+ * lessen register pressure. Especially needed when compiling in O0.
+ */
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
+ start_ip, post_commit_offset, abort_ip) \
+ ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
+ ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ ".long " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
+ ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+ RSEQ_INJECT_ASM(1) \
+ "movl $" __rseq_str(cs_label) ", %[rseq_cs]\n\t" \
+ __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+ RSEQ_INJECT_ASM(2) \
+ "cmpl %[" __rseq_str(cpu_id) "], %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "jnz " __rseq_str(label) "\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
+ ".pushsection __rseq_failure, \"ax\"\n\t" \
+ /* Disassembler-friendly signature: nopl <sig>. */ \
+ ".byte 0x0f, 0x1f, 0x05\n\t" \
+ ".long " __rseq_str(RSEQ_SIG) "\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "jmp %l[" __rseq_str(abort_label) "]\n\t" \
+ ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
+ ".pushsection __rseq_failure, \"ax\"\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "jmp %l[" __rseq_str(cmpfail_label) "]\n\t" \
+ ".popsection\n\t"
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpl %[v], %[expect]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "cmpl %[v], %[expect]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* final store */
+ "movl %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/*
+ * Compare @v against @expectnot. When it does _not_ match, load @v
+ * into @load, and store the content of *@v + voffp into @v.
+ */
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+ off_t voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "movl %[v], %%ebx\n\t"
+ "cmpl %%ebx, %[expectnot]\n\t"
+ "je %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "movl %[v], %%ebx\n\t"
+ "cmpl %%ebx, %[expectnot]\n\t"
+ "je %l[error2]\n\t"
+#endif
+ "movl %%ebx, %[load]\n\t"
+ "addl %[voffp], %%ebx\n\t"
+ "movl (%%ebx), %%ebx\n\t"
+ /* final store */
+ "movl %%ebx, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "ir" (voffp),
+ [load] "m" (*load)
+ : "memory", "cc", "eax", "ebx"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ /* final store */
+ "addl %[count], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [count] "ir" (count)
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpl %[v], %[expect]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "cmpl %[v], %[expect]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* try store */
+ "movl %[newv2], %%eax\n\t"
+ "movl %%eax, %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ "movl %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "m" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "movl %[expect], %%eax\n\t"
+ "cmpl %[v], %%eax\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "movl %[expect], %%eax\n\t"
+ "cmpl %[v], %%eax\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* try store */
+ "movl %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ "lock; addl $0,-128(%%esp)\n\t"
+ /* final store */
+ "movl %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "m" (expect),
+ [newv] "r" (newv)
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpl %[v], %[expect]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ "cmpl %[expect2], %[v2]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "cmpl %[v], %[expect]\n\t"
+ "jnz %l[error2]\n\t"
+ "cmpl %[expect2], %[v2]\n\t"
+ "jnz %l[error3]\n\t"
+#endif
+ "movl %[newv], %%eax\n\t"
+ /* final store */
+ "movl %%eax, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "m" (newv)
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+/* TODO: implement a faster memcpy. */
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uint32_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ "movl %[src], %[rseq_scratch0]\n\t"
+ "movl %[dst], %[rseq_scratch1]\n\t"
+ "movl %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "movl %[expect], %%eax\n\t"
+ "cmpl %%eax, %[v]\n\t"
+ "jnz 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ "movl %[expect], %%eax\n\t"
+ "cmpl %%eax, %[v]\n\t"
+ "jnz 7f\n\t"
+#endif
+ /* try memcpy */
+ "test %[len], %[len]\n\t" \
+ "jz 333f\n\t" \
+ "222:\n\t" \
+ "movb (%[src]), %%al\n\t" \
+ "movb %%al, (%[dst])\n\t" \
+ "inc %[src]\n\t" \
+ "inc %[dst]\n\t" \
+ "dec %[len]\n\t" \
+ "jnz 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+ "movl %[newv], %%eax\n\t"
+ /* final store */
+ "movl %%eax, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t"
+ RSEQ_ASM_DEFINE_ABORT(4,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ abort)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ error2)
+#endif
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "m" (expect),
+ [newv] "m" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/* TODO: implement a faster memcpy. */
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uint32_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ "movl %[src], %[rseq_scratch0]\n\t"
+ "movl %[dst], %[rseq_scratch1]\n\t"
+ "movl %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "movl %[expect], %%eax\n\t"
+ "cmpl %%eax, %[v]\n\t"
+ "jnz 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ "movl %[expect], %%eax\n\t"
+ "cmpl %%eax, %[v]\n\t"
+ "jnz 7f\n\t"
+#endif
+ /* try memcpy */
+ "test %[len], %[len]\n\t" \
+ "jz 333f\n\t" \
+ "222:\n\t" \
+ "movb (%[src]), %%al\n\t" \
+ "movb %%al, (%[dst])\n\t" \
+ "inc %[src]\n\t" \
+ "inc %[dst]\n\t" \
+ "dec %[len]\n\t" \
+ "jnz 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+ "lock; addl $0,-128(%%esp)\n\t"
+ "movl %[newv], %%eax\n\t"
+ /* final store */
+ "movl %%eax, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t"
+ RSEQ_ASM_DEFINE_ABORT(4,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ abort)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ error2)
+#endif
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "m" (expect),
+ [newv] "m" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* !RSEQ_SKIP_FASTPATH */
+
+#endif
diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
new file mode 100644
index 000000000000..4847e97ed049
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * rseq.c
+ *
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <syscall.h>
+#include <assert.h>
+#include <signal.h>
+
+#include "rseq.h"
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+__attribute__((tls_model("initial-exec"))) __thread
+volatile struct rseq __rseq_abi = {
+ .cpu_id = RSEQ_CPU_ID_UNINITIALIZED,
+};
+
+static __attribute__((tls_model("initial-exec"))) __thread
+volatile int refcount;
+
+static void signal_off_save(sigset_t *oldset)
+{
+ sigset_t set;
+ int ret;
+
+ sigfillset(&set);
+ ret = pthread_sigmask(SIG_BLOCK, &set, oldset);
+ if (ret)
+ abort();
+}
+
+static void signal_restore(sigset_t oldset)
+{
+ int ret;
+
+ ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+ if (ret)
+ abort();
+}
+
+static int sys_rseq(volatile struct rseq *rseq_abi, uint32_t rseq_len,
+ int flags, uint32_t sig)
+{
+ return syscall(__NR_rseq, rseq_abi, rseq_len, flags, sig);
+}
+
+int rseq_register_current_thread(void)
+{
+ int rc, ret = 0;
+ sigset_t oldset;
+
+ signal_off_save(&oldset);
+ if (refcount++)
+ goto end;
+ rc = sys_rseq(&__rseq_abi, sizeof(struct rseq), 0, RSEQ_SIG);
+ if (!rc) {
+ assert(rseq_current_cpu_raw() >= 0);
+ goto end;
+ }
+ if (errno != EBUSY)
+ __rseq_abi.cpu_id = -2;
+ ret = -1;
+ refcount--;
+end:
+ signal_restore(oldset);
+ return ret;
+}
+
+int rseq_unregister_current_thread(void)
+{
+ int rc, ret = 0;
+ sigset_t oldset;
+
+ signal_off_save(&oldset);
+ if (--refcount)
+ goto end;
+ rc = sys_rseq(&__rseq_abi, sizeof(struct rseq),
+ RSEQ_FLAG_UNREGISTER, RSEQ_SIG);
+ if (!rc)
+ goto end;
+ ret = -1;
+end:
+ signal_restore(oldset);
+ return ret;
+}
+
+int32_t rseq_fallback_current_cpu(void)
+{
+ int32_t cpu;
+
+ cpu = sched_getcpu();
+ if (cpu < 0) {
+ perror("sched_getcpu()");
+ abort();
+ }
+ return cpu;
+}
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
new file mode 100644
index 000000000000..0a808575cbc4
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq.h
+ *
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef RSEQ_H
+#define RSEQ_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sched.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sched.h>
+#include <linux/rseq.h>
+
+/*
+ * Empty code injection macros, override when testing.
+ * It is important to consider that the ASM injection macros need to be
+ * fully reentrant (e.g. do not modify the stack).
+ */
+#ifndef RSEQ_INJECT_ASM
+#define RSEQ_INJECT_ASM(n)
+#endif
+
+#ifndef RSEQ_INJECT_C
+#define RSEQ_INJECT_C(n)
+#endif
+
+#ifndef RSEQ_INJECT_INPUT
+#define RSEQ_INJECT_INPUT
+#endif
+
+#ifndef RSEQ_INJECT_CLOBBER
+#define RSEQ_INJECT_CLOBBER
+#endif
+
+#ifndef RSEQ_INJECT_FAILED
+#define RSEQ_INJECT_FAILED
+#endif
+
+extern __thread volatile struct rseq __rseq_abi;
+
+#define rseq_likely(x) __builtin_expect(!!(x), 1)
+#define rseq_unlikely(x) __builtin_expect(!!(x), 0)
+#define rseq_barrier() __asm__ __volatile__("" : : : "memory")
+
+#define RSEQ_ACCESS_ONCE(x) (*(__volatile__ __typeof__(x) *)&(x))
+#define RSEQ_WRITE_ONCE(x, v) __extension__ ({ RSEQ_ACCESS_ONCE(x) = (v); })
+#define RSEQ_READ_ONCE(x) RSEQ_ACCESS_ONCE(x)
+
+#define __rseq_str_1(x) #x
+#define __rseq_str(x) __rseq_str_1(x)
+
+#define rseq_log(fmt, args...) \
+ fprintf(stderr, fmt "(in %s() at " __FILE__ ":" __rseq_str(__LINE__)"\n", \
+ ## args, __func__)
+
+#define rseq_bug(fmt, args...) \
+ do { \
+ rseq_log(fmt, ##args); \
+ abort(); \
+ } while (0)
+
+#if defined(__x86_64__) || defined(__i386__)
+#include <rseq-x86.h>
+#elif defined(__ARMEL__)
+#include <rseq-arm.h>
+#elif defined(__PPC__)
+#include <rseq-ppc.h>
+#else
+#error unsupported target
+#endif
+
+/*
+ * Register rseq for the current thread. This needs to be called once
+ * by any thread which uses restartable sequences, before they start
+ * using restartable sequences, to ensure restartable sequences
+ * succeed. A restartable sequence executed from a non-registered
+ * thread will always fail.
+ */
+int rseq_register_current_thread(void);
+
+/*
+ * Unregister rseq for current thread.
+ */
+int rseq_unregister_current_thread(void);
+
+/*
+ * Restartable sequence fallback for reading the current CPU number.
+ */
+int32_t rseq_fallback_current_cpu(void);
+
+/*
+ * Values returned can be either the current CPU number, -1 (rseq is
+ * uninitialized), or -2 (rseq initialization has failed).
+ */
+static inline int32_t rseq_current_cpu_raw(void)
+{
+ return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id);
+}
+
+/*
+ * Returns a possible CPU number, which is typically the current CPU.
+ * The returned CPU number can be used to prepare for an rseq critical
+ * section, which will confirm whether the cpu number is indeed the
+ * current one, and whether rseq is initialized.
+ *
+ * The CPU number returned by rseq_cpu_start should always be validated
+ * by passing it to a rseq asm sequence, or by comparing it to the
+ * return value of rseq_current_cpu_raw() if the rseq asm sequence
+ * does not need to be invoked.
+ */
+static inline uint32_t rseq_cpu_start(void)
+{
+ return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id_start);
+}
+
+static inline uint32_t rseq_current_cpu(void)
+{
+ int32_t cpu;
+
+ cpu = rseq_current_cpu_raw();
+ if (rseq_unlikely(cpu < 0))
+ cpu = rseq_fallback_current_cpu();
+ return cpu;
+}
+
+/*
+ * rseq_prepare_unload() should be invoked by each thread using rseq_finish*()
+ * at least once between their last rseq_finish*() and library unload of the
+ * library defining the rseq critical section (struct rseq_cs). This also
+ * applies to use of rseq in code generated by JIT: rseq_prepare_unload()
+ * should be invoked at least once by each thread using rseq_finish*() before
+ * reclaim of the memory holding the struct rseq_cs.
+ */
+static inline void rseq_prepare_unload(void)
+{
+ __rseq_abi.rseq_cs = 0;
+}
+
+#endif /* RSEQ_H_ */
diff --git a/tools/testing/selftests/rseq/run_param_test.sh b/tools/testing/selftests/rseq/run_param_test.sh
new file mode 100644
index 000000000000..3acd6d75ff9f
--- /dev/null
+++ b/tools/testing/selftests/rseq/run_param_test.sh
@@ -0,0 +1,121 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+ or MIT
+
+EXTRA_ARGS=${@}
+
+OLDIFS="$IFS"
+IFS=$'\n'
+TEST_LIST=(
+ "-T s"
+ "-T l"
+ "-T b"
+ "-T b -M"
+ "-T m"
+ "-T m -M"
+ "-T i"
+)
+
+TEST_NAME=(
+ "spinlock"
+ "list"
+ "buffer"
+ "buffer with barrier"
+ "memcpy"
+ "memcpy with barrier"
+ "increment"
+)
+IFS="$OLDIFS"
+
+REPS=1000
+SLOW_REPS=100
+
+function do_tests()
+{
+ local i=0
+ while [ "$i" -lt "${#TEST_LIST[@]}" ]; do
+ echo "Running test ${TEST_NAME[$i]}"
+ ./param_test ${TEST_LIST[$i]} -r ${REPS} ${@} ${EXTRA_ARGS} || exit 1
+ echo "Running compare-twice test ${TEST_NAME[$i]}"
+ ./param_test_compare_twice ${TEST_LIST[$i]} -r ${REPS} ${@} ${EXTRA_ARGS} || exit 1
+ let "i++"
+ done
+}
+
+echo "Default parameters"
+do_tests
+
+echo "Loop injection: 10000 loops"
+
+OLDIFS="$IFS"
+IFS=$'\n'
+INJECT_LIST=(
+ "1"
+ "2"
+ "3"
+ "4"
+ "5"
+ "6"
+ "7"
+ "8"
+ "9"
+)
+IFS="$OLDIFS"
+
+NR_LOOPS=10000
+
+i=0
+while [ "$i" -lt "${#INJECT_LIST[@]}" ]; do
+ echo "Injecting at <${INJECT_LIST[$i]}>"
+ do_tests -${INJECT_LIST[i]} ${NR_LOOPS}
+ let "i++"
+done
+NR_LOOPS=
+
+function inject_blocking()
+{
+ OLDIFS="$IFS"
+ IFS=$'\n'
+ INJECT_LIST=(
+ "7"
+ "8"
+ "9"
+ )
+ IFS="$OLDIFS"
+
+ NR_LOOPS=-1
+
+ i=0
+ while [ "$i" -lt "${#INJECT_LIST[@]}" ]; do
+ echo "Injecting at <${INJECT_LIST[$i]}>"
+ do_tests -${INJECT_LIST[i]} -1 ${@}
+ let "i++"
+ done
+ NR_LOOPS=
+}
+
+echo "Yield injection (25%)"
+inject_blocking -m 4 -y
+
+echo "Yield injection (50%)"
+inject_blocking -m 2 -y
+
+echo "Yield injection (100%)"
+inject_blocking -m 1 -y
+
+echo "Kill injection (25%)"
+inject_blocking -m 4 -k
+
+echo "Kill injection (50%)"
+inject_blocking -m 2 -k
+
+echo "Kill injection (100%)"
+inject_blocking -m 1 -k
+
+echo "Sleep injection (1ms, 25%)"
+inject_blocking -m 4 -s 1
+
+echo "Sleep injection (1ms, 50%)"
+inject_blocking -m 2 -s 1
+
+echo "Sleep injection (1ms, 100%)"
+inject_blocking -m 1 -s 1
diff --git a/tools/testing/selftests/sparc64/Makefile b/tools/testing/selftests/sparc64/Makefile
new file mode 100644
index 000000000000..2082eeffd779
--- /dev/null
+++ b/tools/testing/selftests/sparc64/Makefile
@@ -0,0 +1,46 @@
+SUBDIRS := drivers
+
+TEST_PROGS := run.sh
+
+.PHONY: all clean
+
+include ../lib.mk
+
+all:
+ @for DIR in $(SUBDIRS); do \
+ BUILD_TARGET=$(OUTPUT)/$$DIR; \
+ mkdir $$BUILD_TARGET -p; \
+ make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
+ #SUBDIR test prog name should be in the form: SUBDIR_test.sh \
+ TEST=$$DIR"_test.sh"; \
+ if [ -e $$DIR/$$TEST ]; then \
+ rsync -a $$DIR/$$TEST $$BUILD_TARGET/; \
+ fi \
+ done
+
+override define RUN_TESTS
+ @cd $(OUTPUT); ./run.sh
+endef
+
+override define INSTALL_RULE
+ mkdir -p $(INSTALL_PATH)
+ install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
+
+ @for SUBDIR in $(SUBDIRS); do \
+ BUILD_TARGET=$(OUTPUT)/$$SUBDIR; \
+ mkdir $$BUILD_TARGET -p; \
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$SUBDIR INSTALL_PATH=$(INSTALL_PATH)/$$SUBDIR install; \
+ done;
+endef
+
+override define EMIT_TESTS
+ echo "./run.sh"
+endef
+
+override define CLEAN
+ @for DIR in $(SUBDIRS); do \
+ BUILD_TARGET=$(OUTPUT)/$$DIR; \
+ mkdir $$BUILD_TARGET -p; \
+ make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
+ done
+endef
diff --git a/tools/testing/selftests/sparc64/drivers/.gitignore b/tools/testing/selftests/sparc64/drivers/.gitignore
new file mode 100644
index 000000000000..90e835ed74e6
--- /dev/null
+++ b/tools/testing/selftests/sparc64/drivers/.gitignore
@@ -0,0 +1 @@
+adi-test
diff --git a/tools/testing/selftests/sparc64/drivers/Makefile b/tools/testing/selftests/sparc64/drivers/Makefile
new file mode 100644
index 000000000000..6264f40bbdbc
--- /dev/null
+++ b/tools/testing/selftests/sparc64/drivers/Makefile
@@ -0,0 +1,15 @@
+
+INCLUDEDIR := -I.
+CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g
+
+TEST_GEN_FILES := adi-test
+
+all: $(TEST_GEN_FILES)
+
+$(TEST_GEN_FILES): adi-test.c
+
+TEST_PROGS := drivers_test.sh
+
+include ../../lib.mk
+
+$(OUTPUT)/adi-test: adi-test.c
diff --git a/tools/testing/selftests/sparc64/drivers/adi-test.c b/tools/testing/selftests/sparc64/drivers/adi-test.c
new file mode 100644
index 000000000000..95d93c6a88a5
--- /dev/null
+++ b/tools/testing/selftests/sparc64/drivers/adi-test.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * selftest for sparc64's privileged ADI driver
+ *
+ * Author: Tom Hromatka <tom.hromatka@oracle.com>
+ */
+#include <linux/kernel.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "../../kselftest.h"
+
+#define DEBUG_LEVEL_1_BIT (0x0001)
+#define DEBUG_LEVEL_2_BIT (0x0002)
+#define DEBUG_LEVEL_3_BIT (0x0004)
+#define DEBUG_LEVEL_4_BIT (0x0008)
+#define DEBUG_TIMING_BIT (0x1000)
+
+#ifndef ARRAY_SIZE
+# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+/* bit mask of enabled bits to print */
+#define DEBUG 0x0001
+
+#define DEBUG_PRINT_L1(...) debug_print(DEBUG_LEVEL_1_BIT, __VA_ARGS__)
+#define DEBUG_PRINT_L2(...) debug_print(DEBUG_LEVEL_2_BIT, __VA_ARGS__)
+#define DEBUG_PRINT_L3(...) debug_print(DEBUG_LEVEL_3_BIT, __VA_ARGS__)
+#define DEBUG_PRINT_L4(...) debug_print(DEBUG_LEVEL_4_BIT, __VA_ARGS__)
+#define DEBUG_PRINT_T(...) debug_print(DEBUG_TIMING_BIT, __VA_ARGS__)
+
+static void debug_print(int level, const char *s, ...)
+{
+ va_list args;
+
+ va_start(args, s);
+
+ if (DEBUG & level)
+ vfprintf(stdout, s, args);
+ va_end(args);
+}
+
+#ifndef min
+#define min(x, y) ((x) < (y) ? x : y)
+#endif
+
+#define RETURN_FROM_TEST(_ret) \
+ do { \
+ DEBUG_PRINT_L1( \
+ "\tTest %s returned %d\n", __func__, _ret); \
+ return _ret; \
+ } while (0)
+
+#define ADI_BLKSZ 64
+#define ADI_MAX_VERSION 15
+
+#define TEST_STEP_FAILURE(_ret) \
+ do { \
+ fprintf(stderr, "\tTest step failure: %d at %s:%d\n", \
+ _ret, __func__, __LINE__); \
+ goto out; \
+ } while (0)
+
+#define RDTICK(_x) \
+ asm volatile(" rd %%tick, %0\n" : "=r" (_x))
+
+static int random_version(void)
+{
+ long tick;
+
+ RDTICK(tick);
+
+ return tick % (ADI_MAX_VERSION + 1);
+}
+
+#define MAX_RANGES_SUPPORTED 5
+static const char system_ram_str[] = "System RAM\n";
+static int range_count;
+static unsigned long long int start_addr[MAX_RANGES_SUPPORTED];
+static unsigned long long int end_addr[MAX_RANGES_SUPPORTED];
+
+struct stats {
+ char name[16];
+ unsigned long total;
+ unsigned long count;
+ unsigned long bytes;
+};
+
+static struct stats read_stats = {
+ .name = "read", .total = 0, .count = 0, .bytes = 0};
+static struct stats pread_stats = {
+ .name = "pread", .total = 0, .count = 0, .bytes = 0};
+static struct stats write_stats = {
+ .name = "write", .total = 0, .count = 0, .bytes = 0};
+static struct stats pwrite_stats = {
+ .name = "pwrite", .total = 0, .count = 0, .bytes = 0};
+static struct stats seek_stats = {
+ .name = "seek", .total = 0, .count = 0, .bytes = 0};
+
+static void update_stats(struct stats * const ustats,
+ unsigned long measurement, unsigned long bytes)
+{
+ ustats->total += measurement;
+ ustats->bytes += bytes;
+ ustats->count++;
+}
+
+static void print_ustats(const struct stats * const ustats)
+{
+ DEBUG_PRINT_L1("%s\t%7d\t%7.0f\t%7.0f\n",
+ ustats->name, ustats->count,
+ (float)ustats->total / (float)ustats->count,
+ (float)ustats->bytes / (float)ustats->count);
+}
+
+static void print_stats(void)
+{
+ DEBUG_PRINT_L1("\nSyscall\tCall\tAvgTime\tAvgSize\n"
+ "\tCount\t(ticks)\t(bytes)\n"
+ "-------------------------------\n");
+
+ print_ustats(&read_stats);
+ print_ustats(&pread_stats);
+ print_ustats(&write_stats);
+ print_ustats(&pwrite_stats);
+ print_ustats(&seek_stats);
+}
+
+static int build_memory_map(void)
+{
+ char line[256];
+ FILE *fp;
+ int i;
+
+ range_count = 0;
+
+ fp = fopen("/proc/iomem", "r");
+ if (!fp) {
+ fprintf(stderr, "/proc/iomem: error %d: %s\n",
+ errno, strerror(errno));
+ return -errno;
+ }
+
+ while (fgets(line, sizeof(line), fp) != 0) {
+ if (strstr(line, system_ram_str)) {
+ char *dash, *end_ptr;
+
+ /* Given a line like this:
+ * d0400000-10ffaffff : System RAM
+ * replace the "-" with a space
+ */
+ dash = strstr(line, "-");
+ dash[0] = 0x20;
+
+ start_addr[range_count] = strtoull(line, &end_ptr, 16);
+ end_addr[range_count] = strtoull(end_ptr, NULL, 16);
+ range_count++;
+ }
+ }
+
+ fclose(fp);
+
+ DEBUG_PRINT_L1("RAM Ranges\n");
+ for (i = 0; i < range_count; i++)
+ DEBUG_PRINT_L1("\trange %d: 0x%llx\t- 0x%llx\n",
+ i, start_addr[i], end_addr[i]);
+
+ if (range_count == 0) {
+ fprintf(stderr, "No valid address ranges found. Error.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int read_adi(int fd, unsigned char *buf, int buf_sz)
+{
+ int ret, bytes_read = 0;
+ long start, end, elapsed_time = 0;
+
+ do {
+ RDTICK(start);
+ ret = read(fd, buf + bytes_read, buf_sz - bytes_read);
+ RDTICK(end);
+ if (ret < 0)
+ return -errno;
+
+ elapsed_time += end - start;
+ update_stats(&read_stats, elapsed_time, buf_sz);
+ bytes_read += ret;
+
+ } while (bytes_read < buf_sz);
+
+ DEBUG_PRINT_T("\tread elapsed timed = %ld\n", elapsed_time);
+ DEBUG_PRINT_L3("\tRead %d bytes\n", bytes_read);
+
+ return bytes_read;
+}
+
+static int pread_adi(int fd, unsigned char *buf,
+ int buf_sz, unsigned long offset)
+{
+ int ret, i, bytes_read = 0;
+ unsigned long cur_offset;
+ long start, end, elapsed_time = 0;
+
+ cur_offset = offset;
+ do {
+ RDTICK(start);
+ ret = pread(fd, buf + bytes_read, buf_sz - bytes_read,
+ cur_offset);
+ RDTICK(end);
+ if (ret < 0)
+ return -errno;
+
+ elapsed_time += end - start;
+ update_stats(&pread_stats, elapsed_time, buf_sz);
+ bytes_read += ret;
+ cur_offset += ret;
+
+ } while (bytes_read < buf_sz);
+
+ DEBUG_PRINT_T("\tpread elapsed timed = %ld\n", elapsed_time);
+ DEBUG_PRINT_L3("\tRead %d bytes starting at offset 0x%lx\n",
+ bytes_read, offset);
+ for (i = 0; i < bytes_read; i++)
+ DEBUG_PRINT_L4("\t\t0x%lx\t%d\n", offset + i, buf[i]);
+
+ return bytes_read;
+}
+
+static int write_adi(int fd, const unsigned char * const buf, int buf_sz)
+{
+ int ret, bytes_written = 0;
+ long start, end, elapsed_time = 0;
+
+ do {
+ RDTICK(start);
+ ret = write(fd, buf + bytes_written, buf_sz - bytes_written);
+ RDTICK(end);
+ if (ret < 0)
+ return -errno;
+
+ elapsed_time += (end - start);
+ update_stats(&write_stats, elapsed_time, buf_sz);
+ bytes_written += ret;
+ } while (bytes_written < buf_sz);
+
+ DEBUG_PRINT_T("\twrite elapsed timed = %ld\n", elapsed_time);
+ DEBUG_PRINT_L3("\tWrote %d of %d bytes\n", bytes_written, buf_sz);
+
+ return bytes_written;
+}
+
+static int pwrite_adi(int fd, const unsigned char * const buf,
+ int buf_sz, unsigned long offset)
+{
+ int ret, bytes_written = 0;
+ unsigned long cur_offset;
+ long start, end, elapsed_time = 0;
+
+ cur_offset = offset;
+
+ do {
+ RDTICK(start);
+ ret = pwrite(fd, buf + bytes_written,
+ buf_sz - bytes_written, cur_offset);
+ RDTICK(end);
+ if (ret < 0) {
+ fprintf(stderr, "pwrite(): error %d: %s\n",
+ errno, strerror(errno));
+ return -errno;
+ }
+
+ elapsed_time += (end - start);
+ update_stats(&pwrite_stats, elapsed_time, buf_sz);
+ bytes_written += ret;
+ cur_offset += ret;
+
+ } while (bytes_written < buf_sz);
+
+ DEBUG_PRINT_T("\tpwrite elapsed timed = %ld\n", elapsed_time);
+ DEBUG_PRINT_L3("\tWrote %d of %d bytes starting at address 0x%lx\n",
+ bytes_written, buf_sz, offset);
+
+ return bytes_written;
+}
+
+static off_t seek_adi(int fd, off_t offset, int whence)
+{
+ long start, end;
+ off_t ret;
+
+ RDTICK(start);
+ ret = lseek(fd, offset, whence);
+ RDTICK(end);
+ DEBUG_PRINT_L2("\tlseek ret = 0x%llx\n", ret);
+ if (ret < 0)
+ goto out;
+
+ DEBUG_PRINT_T("\tlseek elapsed timed = %ld\n", end - start);
+ update_stats(&seek_stats, end - start, 0);
+
+out:
+ (void)lseek(fd, 0, SEEK_END);
+ return ret;
+}
+
+static int test0_prpw_aligned_1byte(int fd)
+{
+ /* somewhat arbitrarily chosen address */
+ unsigned long paddr =
+ (end_addr[range_count - 1] - 0x1000) & ~(ADI_BLKSZ - 1);
+ unsigned char version[1], expected_version;
+ loff_t offset;
+ int ret;
+
+ version[0] = random_version();
+ expected_version = version[0];
+
+ offset = paddr / ADI_BLKSZ;
+
+ ret = pwrite_adi(fd, version, sizeof(version), offset);
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ ret = pread_adi(fd, version, sizeof(version), offset);
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ if (expected_version != version[0]) {
+ DEBUG_PRINT_L2("\tExpected version %d but read version %d\n",
+ expected_version, version[0]);
+ TEST_STEP_FAILURE(-expected_version);
+ }
+
+ ret = 0;
+out:
+ RETURN_FROM_TEST(ret);
+}
+
+#define TEST1_VERSION_SZ 4096
+static int test1_prpw_aligned_4096bytes(int fd)
+{
+ /* somewhat arbitrarily chosen address */
+ unsigned long paddr =
+ (end_addr[range_count - 1] - 0x6000) & ~(ADI_BLKSZ - 1);
+ unsigned char version[TEST1_VERSION_SZ],
+ expected_version[TEST1_VERSION_SZ];
+ loff_t offset;
+ int ret, i;
+
+ for (i = 0; i < TEST1_VERSION_SZ; i++) {
+ version[i] = random_version();
+ expected_version[i] = version[i];
+ }
+
+ offset = paddr / ADI_BLKSZ;
+
+ ret = pwrite_adi(fd, version, sizeof(version), offset);
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ ret = pread_adi(fd, version, sizeof(version), offset);
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ for (i = 0; i < TEST1_VERSION_SZ; i++) {
+ if (expected_version[i] != version[i]) {
+ DEBUG_PRINT_L2(
+ "\tExpected version %d but read version %d\n",
+ expected_version, version[0]);
+ TEST_STEP_FAILURE(-expected_version[i]);
+ }
+ }
+
+ ret = 0;
+out:
+ RETURN_FROM_TEST(ret);
+}
+
+#define TEST2_VERSION_SZ 10327
+static int test2_prpw_aligned_10327bytes(int fd)
+{
+ /* somewhat arbitrarily chosen address */
+ unsigned long paddr =
+ (start_addr[0] + 0x6000) & ~(ADI_BLKSZ - 1);
+ unsigned char version[TEST2_VERSION_SZ],
+ expected_version[TEST2_VERSION_SZ];
+ loff_t offset;
+ int ret, i;
+
+ for (i = 0; i < TEST2_VERSION_SZ; i++) {
+ version[i] = random_version();
+ expected_version[i] = version[i];
+ }
+
+ offset = paddr / ADI_BLKSZ;
+
+ ret = pwrite_adi(fd, version, sizeof(version), offset);
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ ret = pread_adi(fd, version, sizeof(version), offset);
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ for (i = 0; i < TEST2_VERSION_SZ; i++) {
+ if (expected_version[i] != version[i]) {
+ DEBUG_PRINT_L2(
+ "\tExpected version %d but read version %d\n",
+ expected_version, version[0]);
+ TEST_STEP_FAILURE(-expected_version[i]);
+ }
+ }
+
+ ret = 0;
+out:
+ RETURN_FROM_TEST(ret);
+}
+
+#define TEST3_VERSION_SZ 12541
+static int test3_prpw_unaligned_12541bytes(int fd)
+{
+ /* somewhat arbitrarily chosen address */
+ unsigned long paddr =
+ ((start_addr[0] + 0xC000) & ~(ADI_BLKSZ - 1)) + 17;
+ unsigned char version[TEST3_VERSION_SZ],
+ expected_version[TEST3_VERSION_SZ];
+ loff_t offset;
+ int ret, i;
+
+ for (i = 0; i < TEST3_VERSION_SZ; i++) {
+ version[i] = random_version();
+ expected_version[i] = version[i];
+ }
+
+ offset = paddr / ADI_BLKSZ;
+
+ ret = pwrite_adi(fd, version, sizeof(version), offset);
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ ret = pread_adi(fd, version, sizeof(version), offset);
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ for (i = 0; i < TEST3_VERSION_SZ; i++) {
+ if (expected_version[i] != version[i]) {
+ DEBUG_PRINT_L2(
+ "\tExpected version %d but read version %d\n",
+ expected_version, version[0]);
+ TEST_STEP_FAILURE(-expected_version[i]);
+ }
+ }
+
+ ret = 0;
+out:
+ RETURN_FROM_TEST(ret);
+}
+
+static int test4_lseek(int fd)
+{
+#define OFFSET_ADD (0x100)
+#define OFFSET_SUBTRACT (0xFFFFFFF000000000)
+
+ off_t offset_out, offset_in;
+ int ret;
+
+
+ offset_in = 0x123456789abcdef0;
+ offset_out = seek_adi(fd, offset_in, SEEK_SET);
+ if (offset_out != offset_in) {
+ ret = -1;
+ TEST_STEP_FAILURE(ret);
+ }
+
+ /* seek to the current offset. this should return EINVAL */
+ offset_out = seek_adi(fd, offset_in, SEEK_SET);
+ if (offset_out < 0 && errno == EINVAL)
+ DEBUG_PRINT_L2(
+ "\tSEEK_SET failed as designed. Not an error\n");
+ else {
+ ret = -2;
+ TEST_STEP_FAILURE(ret);
+ }
+
+ offset_out = seek_adi(fd, 0, SEEK_CUR);
+ if (offset_out != offset_in) {
+ ret = -3;
+ TEST_STEP_FAILURE(ret);
+ }
+
+ offset_out = seek_adi(fd, OFFSET_ADD, SEEK_CUR);
+ if (offset_out != (offset_in + OFFSET_ADD)) {
+ ret = -4;
+ TEST_STEP_FAILURE(ret);
+ }
+
+ offset_out = seek_adi(fd, OFFSET_SUBTRACT, SEEK_CUR);
+ if (offset_out != (offset_in + OFFSET_ADD + OFFSET_SUBTRACT)) {
+ ret = -5;
+ TEST_STEP_FAILURE(ret);
+ }
+
+ ret = 0;
+out:
+ RETURN_FROM_TEST(ret);
+}
+
+static int test5_rw_aligned_1byte(int fd)
+{
+ /* somewhat arbitrarily chosen address */
+ unsigned long paddr =
+ (end_addr[range_count - 1] - 0xF000) & ~(ADI_BLKSZ - 1);
+ unsigned char version, expected_version;
+ loff_t offset;
+ off_t oret;
+ int ret;
+
+ offset = paddr / ADI_BLKSZ;
+ version = expected_version = random_version();
+
+ oret = seek_adi(fd, offset, SEEK_SET);
+ if (oret != offset) {
+ ret = -1;
+ TEST_STEP_FAILURE(ret);
+ }
+
+ ret = write_adi(fd, &version, sizeof(version));
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ oret = seek_adi(fd, offset, SEEK_SET);
+ if (oret != offset) {
+ ret = -1;
+ TEST_STEP_FAILURE(ret);
+ }
+
+ ret = read_adi(fd, &version, sizeof(version));
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ if (expected_version != version) {
+ DEBUG_PRINT_L2("\tExpected version %d but read version %d\n",
+ expected_version, version);
+ TEST_STEP_FAILURE(-expected_version);
+ }
+
+ ret = 0;
+out:
+ RETURN_FROM_TEST(ret);
+}
+
+#define TEST6_VERSION_SZ 9434
+static int test6_rw_aligned_9434bytes(int fd)
+{
+ /* somewhat arbitrarily chosen address */
+ unsigned long paddr =
+ (end_addr[range_count - 1] - 0x5F000) & ~(ADI_BLKSZ - 1);
+ unsigned char version[TEST6_VERSION_SZ],
+ expected_version[TEST6_VERSION_SZ];
+ loff_t offset;
+ off_t oret;
+ int ret, i;
+
+ offset = paddr / ADI_BLKSZ;
+ for (i = 0; i < TEST6_VERSION_SZ; i++)
+ version[i] = expected_version[i] = random_version();
+
+ oret = seek_adi(fd, offset, SEEK_SET);
+ if (oret != offset) {
+ ret = -1;
+ TEST_STEP_FAILURE(ret);
+ }
+
+ ret = write_adi(fd, version, sizeof(version));
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ memset(version, 0, TEST6_VERSION_SZ);
+
+ oret = seek_adi(fd, offset, SEEK_SET);
+ if (oret != offset) {
+ ret = -1;
+ TEST_STEP_FAILURE(ret);
+ }
+
+ ret = read_adi(fd, version, sizeof(version));
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ for (i = 0; i < TEST6_VERSION_SZ; i++) {
+ if (expected_version[i] != version[i]) {
+ DEBUG_PRINT_L2(
+ "\tExpected version %d but read version %d\n",
+ expected_version[i], version[i]);
+ TEST_STEP_FAILURE(-expected_version[i]);
+ }
+ }
+
+ ret = 0;
+out:
+ RETURN_FROM_TEST(ret);
+}
+
+#define TEST7_VERSION_SZ 14963
+static int test7_rw_aligned_14963bytes(int fd)
+{
+ /* somewhat arbitrarily chosen address */
+ unsigned long paddr =
+ ((start_addr[range_count - 1] + 0xF000) & ~(ADI_BLKSZ - 1)) + 39;
+ unsigned char version[TEST7_VERSION_SZ],
+ expected_version[TEST7_VERSION_SZ];
+ loff_t offset;
+ off_t oret;
+ int ret, i;
+
+ offset = paddr / ADI_BLKSZ;
+ for (i = 0; i < TEST7_VERSION_SZ; i++) {
+ version[i] = random_version();
+ expected_version[i] = version[i];
+ }
+
+ oret = seek_adi(fd, offset, SEEK_SET);
+ if (oret != offset) {
+ ret = -1;
+ TEST_STEP_FAILURE(ret);
+ }
+
+ ret = write_adi(fd, version, sizeof(version));
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ memset(version, 0, TEST7_VERSION_SZ);
+
+ oret = seek_adi(fd, offset, SEEK_SET);
+ if (oret != offset) {
+ ret = -1;
+ TEST_STEP_FAILURE(ret);
+ }
+
+ ret = read_adi(fd, version, sizeof(version));
+ if (ret != sizeof(version))
+ TEST_STEP_FAILURE(ret);
+
+ for (i = 0; i < TEST7_VERSION_SZ; i++) {
+ if (expected_version[i] != version[i]) {
+ DEBUG_PRINT_L2(
+ "\tExpected version %d but read version %d\n",
+ expected_version[i], version[i]);
+ TEST_STEP_FAILURE(-expected_version[i]);
+ }
+
+ paddr += ADI_BLKSZ;
+ }
+
+ ret = 0;
+out:
+ RETURN_FROM_TEST(ret);
+}
+
+static int (*tests[])(int fd) = {
+ test0_prpw_aligned_1byte,
+ test1_prpw_aligned_4096bytes,
+ test2_prpw_aligned_10327bytes,
+ test3_prpw_unaligned_12541bytes,
+ test4_lseek,
+ test5_rw_aligned_1byte,
+ test6_rw_aligned_9434bytes,
+ test7_rw_aligned_14963bytes,
+};
+#define TEST_COUNT ARRAY_SIZE(tests)
+
+int main(int argc, char *argv[])
+{
+ int fd, ret, test;
+
+ ret = build_memory_map();
+ if (ret < 0)
+ return ret;
+
+ fd = open("/dev/adi", O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, "open: error %d: %s\n",
+ errno, strerror(errno));
+ return -errno;
+ }
+
+ for (test = 0; test < TEST_COUNT; test++) {
+ DEBUG_PRINT_L1("Running test #%d\n", test);
+
+ ret = (*tests[test])(fd);
+ if (ret != 0)
+ ksft_test_result_fail("Test #%d failed: error %d\n",
+ test, ret);
+ else
+ ksft_test_result_pass("Test #%d passed\n", test);
+ }
+
+ print_stats();
+ close(fd);
+
+ if (ksft_get_fail_cnt() > 0)
+ ksft_exit_fail();
+ else
+ ksft_exit_pass();
+
+ /* it's impossible to get here, but the compiler throws a warning
+ * about control reaching the end of non-void function. bah.
+ */
+ return 0;
+}
diff --git a/tools/testing/selftests/sparc64/drivers/drivers_test.sh b/tools/testing/selftests/sparc64/drivers/drivers_test.sh
new file mode 100755
index 000000000000..6d08273b7532
--- /dev/null
+++ b/tools/testing/selftests/sparc64/drivers/drivers_test.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+SRC_TREE=../../../../
+
+test_run()
+{
+ if [ -f ${SRC_TREE}/drivers/char/adi.ko ]; then
+ insmod ${SRC_TREE}/drivers/char/adi.ko 2> /dev/null
+ if [ $? -ne 0 ]; then
+ rc=1
+ fi
+ else
+ # Use modprobe dry run to check for missing adi module
+ if ! /sbin/modprobe -q -n adi; then
+ echo "adi: [SKIP]"
+ elif /sbin/modprobe -q adi; then
+ echo "adi: ok"
+ else
+ echo "adi: [FAIL]"
+ rc=1
+ fi
+ fi
+ ./adi-test
+ rmmod adi 2> /dev/null
+}
+
+rc=0
+test_run
+exit $rc
diff --git a/tools/testing/selftests/sparc64/run.sh b/tools/testing/selftests/sparc64/run.sh
new file mode 100755
index 000000000000..38ad61f9328e
--- /dev/null
+++ b/tools/testing/selftests/sparc64/run.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+(cd drivers; ./drivers_test.sh)
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index a8783f48f77f..cce853dca691 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -131,6 +131,7 @@ static const char * const page_flag_names[] = {
[KPF_KSM] = "x:ksm",
[KPF_THP] = "t:thp",
[KPF_BALLOON] = "o:balloon",
+ [KPF_PGTABLE] = "g:pgtable",
[KPF_ZERO_PAGE] = "z:zero_page",
[KPF_IDLE] = "i:idle_page",
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index a4c1b76240df..2d9b4795edb2 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -1490,6 +1490,10 @@ static int init_hyp_mode(void)
}
}
+ err = hyp_map_aux_data();
+ if (err)
+ kvm_err("Cannot map host auxilary data: %d\n", err);
+
return 0;
out_err:
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index c4762bef13c6..c95ab4c5a475 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -405,7 +405,7 @@ static int kvm_psci_call(struct kvm_vcpu *vcpu)
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
{
u32 func_id = smccc_get_function(vcpu);
- u32 val = PSCI_RET_NOT_SUPPORTED;
+ u32 val = SMCCC_RET_NOT_SUPPORTED;
u32 feature;
switch (func_id) {
@@ -417,7 +417,21 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
switch(feature) {
case ARM_SMCCC_ARCH_WORKAROUND_1:
if (kvm_arm_harden_branch_predictor())
- val = 0;
+ val = SMCCC_RET_SUCCESS;
+ break;
+ case ARM_SMCCC_ARCH_WORKAROUND_2:
+ switch (kvm_arm_have_ssbd()) {
+ case KVM_SSBD_FORCE_DISABLE:
+ case KVM_SSBD_UNKNOWN:
+ break;
+ case KVM_SSBD_KERNEL:
+ val = SMCCC_RET_SUCCESS;
+ break;
+ case KVM_SSBD_FORCE_ENABLE:
+ case KVM_SSBD_MITIGATED:
+ val = SMCCC_RET_NOT_REQUIRED;
+ break;
+ }
break;
}
break;